diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..50f99917e1 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,539 @@ +version: 2.1 + +#----------------------------------------------------------------------------# +# Commands are used as building blocks for jobs, which run through workflows # +#----------------------------------------------------------------------------# + +commands: + checkout: + steps: + - run: + name: checkout + command: | + # Copy of the upstream checkout command with the following modifications: + # 1. CIRCLE_REPOSITORY_URL is updated to use https rather than ssh + # 2. Removed ssh specific sections + + # Use https rather than ssh to clone public projects + CIRCLE_REPOSITORY_URL=${CIRCLE_REPOSITORY_URL/://} + CIRCLE_REPOSITORY_URL=${CIRCLE_REPOSITORY_URL/git@/https://} + + echo "Repository URL: ${CIRCLE_REPOSITORY_URL}" + + # Workaround old docker images with incorrect $HOME + # check https://github.com/docker/docker/issues/2968 for details + if [ "${HOME}" = "/" ] + then + export HOME=$(getent passwd $(id -un) | cut -d: -f6) + fi + + # Ensure ~ is expanded otherwise bash treats is as string literal + eval CIRCLE_WORKING_DIRECTORY=${CIRCLE_WORKING_DIRECTORY} + if [ -e ${CIRCLE_WORKING_DIRECTORY}/.git ] + then + cd ${CIRCLE_WORKING_DIRECTORY} + git remote set-url origin "$CIRCLE_REPOSITORY_URL" || true + else + mkdir -p ${CIRCLE_WORKING_DIRECTORY} + cd ${CIRCLE_WORKING_DIRECTORY} + git clone "$CIRCLE_REPOSITORY_URL" . + fi + + if [ -n "$CIRCLE_TAG" ] + then + git fetch --force origin "refs/tags/${CIRCLE_TAG}" + else + # By default "git fetch" only fetches refs/ + # Below ensures we also fetch PR refs + git config --add remote.origin.fetch "+refs/pull/*/head:refs/remotes/origin/pull/*" + git fetch --force --quiet origin + fi + + if [ -n "$CIRCLE_TAG" ] + then + git reset --hard "$CIRCLE_SHA1" + git checkout -q "$CIRCLE_TAG" + elif [ -n "$CIRCLE_BRANCH" ] + then + git reset --hard "$CIRCLE_SHA1" + git checkout -q -B "$CIRCLE_BRANCH" + fi + + git reset --hard "$CIRCLE_SHA1" + + # gofmt performs checks on the entire codebase to ensure everything is formated + # with the gofmt tool. + gofmt: + steps: + - checkout + - run: + name: Run gofmt + command: ./gofmt.sh + + # gogenerate validates that any generated code has been updated if needed. + gogenerate: + steps: + - checkout + - run: + name: Check generated code + command: ./gogenerate.sh + + # govet does govet checks in the entire codebase. + govet: + steps: + - checkout + - run: + name: Run govet + command: ./govet.sh + + # staticcheck runs staticcheck in the entire codebase. + staticcheck: + steps: + - checkout + - run: + name: Run staticcheck + command: ./staticcheck.sh + + # check_deprecations ensures a release is actually removing deprecated fields + # that were supposed to be discontinued in said release. + check_deprecations: + steps: + - run: + name: Run deprecation tests when on a tagged commit + command: | + if [ "$CIRCLE_TAG" != "" ]; then + # Negate the result so process exits with 1 if anything found + echo "Searching for \"action needed\" tags..." + ! egrep -irn -A 1 --include=*.go "Action.+needed.+in.+release:.+$CIRCLE_TAG" ./ + fi + + # install_golang installs golang, it's only used in horizon integration tests, + # other jobs use golang docker image. + install_golang: + steps: + - run: + name: Download and install golang + command: | + sudo rm -rf /usr/local/go + wget https://dl.google.com/go/go1.17.linux-amd64.tar.gz + sudo tar -C /usr/local -xzf go1.17.linux-amd64.tar.gz + + # install_go_deps installs the go dependencies of the project. + install_go_deps: + steps: + - checkout + - restore_cache: + keys: + - go-mod-v1-{{ checksum "go.mod" }}-{{ checksum "go.sum" }} + - run: + name: Download dependencies + command: | + go mod download + - save_cache: + key: go-mod-v1-{{ checksum "go.mod" }}-{{ checksum "go.sum" }} + paths: + - /go/pkg/mod + + # check_go_deps validates that the dependencies are expected. + check_go_deps: + steps: + - checkout + - run: + name: Check dependencies + command: ./gomod.sh + + # install_stellar_core installs the latest unstable version of stellar core. + install_stellar_core: + parameters: + core-version: + type: string + default: "18.0.3-746.f3baea6.focal" + steps: + - run: + name: Install Stellar Core <<#parameters.core-version>> (version <>)<> + command: | + sudo wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true sudo apt-key add - + sudo bash -c 'echo "deb https://apt.stellar.org focal unstable" > /etc/apt/sources.list.d/SDF-unstable.list' + sudo apt-get update && sudo apt-get install -y stellar-core<<#parameters.core-version>>=<><> + echo "using stellar core version $(stellar-core version)" + echo "export CAPTIVE_CORE_BIN=/usr/bin/stellar-core" >> $BASH_ENV + + check_ingest_state: + steps: + - run: + name: Getting latest checkpoint ledger + command: | + export LATEST_LEDGER=`curl -s http://history.stellar.org/prd/core-live/core_live_001/.well-known/stellar-history.json | jq -r '.currentLedger'` + echo $LATEST_LEDGER # For debug + echo "export LATEST_LEDGER=$LATEST_LEDGER" >> $BASH_ENV # Persist between steps + - run: + name: Dump state using ingest + command: go run -v ./exp/tools/dump-ledger-state/ + - run: + name: Init stellar-core DB + command: stellar-core --conf ./exp/tools/dump-ledger-state/stellar-core.cfg new-db + - run: + name: Catchup core + command: stellar-core --conf ./exp/tools/dump-ledger-state/stellar-core.cfg catchup $LATEST_LEDGER/1 + - run: + name: Dump stellar-core DB + command: ./exp/tools/dump-ledger-state/dump_core_db.sh + - run: + name: Compare state dumps + command: ./exp/tools/dump-ledger-state/diff_test.sh + + # test_packages performs tests on all packages of the monorepo. + test_packages: + steps: + - run: + name: Install dockerize + command: wget https://github.com/jwilder/dockerize/releases/download/$DOCKERIZE_VERSION/dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && tar -C /usr/local/bin -xzvf dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz && rm dockerize-linux-amd64-$DOCKERIZE_VERSION.tar.gz + environment: + DOCKERIZE_VERSION: v0.3.0 + - run: + name: Wait for postgres + command: | + dockerize -wait tcp://localhost:5432 -timeout 1m + - run: + name: Run package tests + environment: + # When running on Docker in Circle, Go thinks there are 36 CPUs + # which means the default number of parallel build processes will be 36 + # but using 36 build processes can lead to OOM errors + # because according to https://circleci.com/docs/2.0/configuration-reference/#resource_class , + # the default Docker container only has 2 CPUs available. + # That is why we explicitly specify -p=4 to reduce the number of parallel build processes + GOFLAGS: -p=4 + command: go test -race -coverprofile=coverage.txt -covermode=atomic ./... + + # build_packages creates the project's artifacts. + build_packages: + steps: + - run: + name: Build release artifacts dependencies + command: | + apt-get update + apt-get install -y zip + - run: + name: Build release artifacts + command: go run ./support/scripts/build_release_artifacts/main.go + +#-----------------------------------------------------------------------------# +# Jobs use the commands to accomplish a given task, and run through workflows # +#-----------------------------------------------------------------------------# + +jobs: + # check_code_1_17 performs code checks using Go 1.17. + check_code_1_17: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.17 + steps: + - install_go_deps + - check_go_deps + - gofmt + - gogenerate + - govet + - staticcheck + - build_packages + + # test_code_1_16 performs all package tests using Go 1.16. + test_code_1_16: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.16 + environment: + GO111MODULE: "on" + PGHOST: localhost + PGPORT: 5432 + PGUSER: circleci + PGDATABASE: circle_test + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: circleci + steps: + - install_go_deps + - test_packages + + # test_code_1_16 performs all package tests using Go 1.16 and Postgres 10. + test_code_1_16_postgres10: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.16 + environment: + GO111MODULE: "on" + PGHOST: localhost + PGPORT: 5432 + PGUSER: circleci + PGDATABASE: circle_test + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + - image: circleci/postgres:10-alpine-ram + environment: + POSTGRES_USER: circleci + - image: circleci/redis:5.0-alpine + steps: + - install_go_deps + - test_packages + + # test_code_1_17 performs all package tests using Go 1.17. + test_code_1_17: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.17 + environment: + GO111MODULE: "on" + PGHOST: localhost + PGPORT: 5432 + PGUSER: circleci + PGDATABASE: circle_test + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + - image: circleci/postgres:9.6.5-alpine-ram + environment: + POSTGRES_USER: circleci + steps: + - install_go_deps + - test_packages + + # test_code_1_17 performs all package tests using Go 1.17 and Postgres 10. + test_code_1_17_postgres10: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.17 + environment: + GO111MODULE: "on" + PGHOST: localhost + PGPORT: 5432 + PGUSER: circleci + PGDATABASE: circle_test + MYSQL_HOST: 127.0.0.1 + MYSQL_PORT: 3306 + - image: circleci/postgres:10-alpine-ram + environment: + POSTGRES_USER: circleci + - image: circleci/redis:5.0-alpine + steps: + - install_go_deps + - test_packages + # publish_artifacts builds and uploads artifacts to any tagged commit. + # + # NOTE: this commands relies on an env var called GITHUB_TOKEN which is a + # GH OAUTH token with `repo` access. + publish_artifacts: + working_directory: /go/src/github.com/stellar/go + docker: + - image: golang:1.17 + steps: + - check_deprecations + - install_go_deps + - check_go_deps + - build_packages + - attach_workspace: + at: ./dist + - run: + name: "Publish release on GitHub" + command: | + if [ "$(ls -A ./dist)" ] + then + go get github.com/tcnksm/ghr@v0.14.0 + ghr -t ${GITHUB_TOKEN} -u ${CIRCLE_PROJECT_USERNAME} -r ${CIRCLE_PROJECT_REPONAME} -c ${CIRCLE_SHA1} ${CIRCLE_TAG} ./dist/ + else + echo "No files found in ./dist. No binaries to publish for ${CIRCLE_TAG}." + fi + + publish_state_diff_docker_image: + docker: + - image: docker:18.04-git + steps: + - checkout + - setup_remote_docker + - run: + name: Build and Push Docker image + command: | + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + docker build --no-cache -f exp/tools/dump-ledger-state/Dockerfile --build-arg GITCOMMIT=$CIRCLE_SHA1 -t stellar/ledger-state-diff:$CIRCLE_SHA1 -t stellar/ledger-state-diff:latest . + docker push stellar/ledger-state-diff:$CIRCLE_SHA1 + docker push stellar/ledger-state-diff:latest + + publish_horizon_docker_image: + docker: + - image: docker:18.04-git + steps: + - checkout + - setup_remote_docker + - run: + name: Build and Push Docker image + command: | + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + # CIRCLE_TAG will be prefixed by "horizon-v", here we build the horizon docker image and tag it with stellar/horizon:$VERSION + # where version is CIRCLE_TAG without the "horizon-v" prefix + VERSION=${CIRCLE_TAG#horizon-v} + docker build -f services/horizon/docker/Dockerfile.dev -t stellar/horizon:$VERSION . + docker push stellar/horizon:$VERSION + + test_verify_range_docker_image: + parameters: + publish: + type: boolean + default: false + docker: + - image: docker:18.04-git + steps: + - checkout + - setup_remote_docker + - run: + name: Build, test<<#parameters.publish>> and Push<> the Verify Range Docker image + command: | + docker build -f services/horizon/docker/verify-range/Dockerfile -t stellar/horizon-verify-range services/horizon/docker/verify-range/ + # Any range should do for basic testing, this range was chosen pretty early in history so that it only takes a few mins to run + docker run -ti -e BRANCH=${CIRCLE_SHA1} -e FROM=10000063 -e TO=10000127 stellar/horizon-verify-range + <<#parameters.publish>> + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + docker push stellar/horizon-verify-range + <> + + + publish_latest_horizon_docker_image: + docker: + - image: docker:18.04-git + steps: + - checkout + - setup_remote_docker + - run: + name: Build and Push Docker image + command: | + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + docker build -f services/horizon/docker/Dockerfile.dev -t stellar/horizon:latest . + docker push stellar/horizon:latest + + publish_commit_hash_horizon_docker_image: + docker: + - image: docker:18.04-git + steps: + - checkout + - setup_remote_docker + - run: + name: Build and Push Docker image + command: | + echo "$DOCKER_PASS" | docker login -u "$DOCKER_USER" --password-stdin + TAG=$(git rev-parse --short HEAD) + docker build -f services/horizon/docker/Dockerfile.dev -t stellar/horizon:$TAG . + docker push stellar/horizon:$TAG + + # test_horizon_integration performs Horizon integration tests, it's using + # decicated vm machine to be able to start arbitrary docker containers. + test_horizon_integration: + parameters: + enable-captive-core: + type: boolean + default: false + working_directory: ~/go/src/github.com/stellar/go + machine: + image: ubuntu-2004:202010-01 + steps: + - checkout + - run: + name: Setting env variables + command: echo "export HORIZON_INTEGRATION_TESTS=true" >> $BASH_ENV + - run: + name: Pull latest Stellar Core image + command: docker pull stellar/stellar-core + - install_golang + - run: + name: Start Horizon Postgres DB + command: docker run -d --env POSTGRES_HOST_AUTH_METHOD=trust -p 5432:5432 circleci/postgres:9.6.5-alpine + - when: + condition: <> + steps: + - install_stellar_core + - run: + name: Setting Captive Core env variables + command: echo "export HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE=true" >> $BASH_ENV + - run: + name: Run Horizon integration tests <<#parameters.enable-captive-core>>(With captive core)<> + # Currently all integration tests are in a single directory. + command: | + cd ~/go/src/github.com/stellar/go + go test -race -timeout 25m -v ./services/horizon/internal/integration/... + +#-------------------------------------------------------------------------# +# Workflows orchestrate jobs and make sure they run in the right sequence # +#-------------------------------------------------------------------------# + +workflows: + version: 2 + + check_code_and_test: + jobs: + - check_code_1_17 + - test_code_1_16 + - test_code_1_16_postgres10 + - test_code_1_17 + - test_code_1_17_postgres10 + # run the integration tests ... + # ... without captive core + - test_horizon_integration + # ... and with captive core + - test_horizon_integration: + name: test_horizon_integration_with_captive_core + enable-captive-core: true + - test_verify_range_docker_image: + filters: + # we use test_verify_range_docker_image with publish in master + branches: + ignore: master + - publish_state_diff_docker_image: + filters: + branches: + only: master + + build_and_deploy: + jobs: + - publish_artifacts: + filters: + tags: + ignore: snapshots + branches: + ignore: /.*/ + - publish_commit_hash_horizon_docker_image: + filters: + tags: + ignore: /.*/ + branches: + only: master + - publish_horizon_docker_image: + filters: + tags: + only: /^horizon-v.*/ + branches: + ignore: /.*/ + - hold: # <<< A job that will require manual approval in the CircleCI web application. + filters: + tags: + only: /^horizon-v.*/ + branches: + ignore: /.*/ + type: approval # <<< This key-value pair will set your workflow to a status of "On Hold" + requires: # We only run the "hold" job when publish_horizon_docker_image has succeeded + - publish_horizon_docker_image + - publish_latest_horizon_docker_image: + filters: + tags: + only: /^horizon-v.*/ + branches: + ignore: /.*/ + # Pushing stellar/horizon:latest to docker hub requires manual approval + requires: + - hold + - test_verify_range_docker_image: + name: test_and_publish_verify_range_docker_image + publish: true + filters: + tags: + ignore: /.*/ + branches: + only: master + diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..6c8a3a4e04 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +vendor +dist \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..0c4a54c7ef --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,43 @@ + + +
+ PR Checklist + +### PR Structure + +* [ ] This PR has reasonably narrow scope (if not, break it down into smaller PRs). +* [ ] This PR avoids mixing refactoring changes with feature changes (split into two PRs + otherwise). +* [ ] This PR's title starts with name of package that is most changed in the PR, ex. + `services/friendbot`, or `all` or `doc` if the changes are broad or impact many + packages. + +### Thoroughness + +* [ ] This PR adds tests for the most critical parts of the new functionality or fixes. +* [ ] I've updated any docs ([developer docs](https://developers.stellar.org/api/), `.md` + files, etc... affected by this change). Take a look in the `docs` folder for a given service, + like [this one](https://github.com/stellar/go/tree/master/services/horizon/internal/docs). + +### Release planning + +* [ ] I've updated the relevant CHANGELOG ([here](services/horizon/CHANGELOG.md) for Horizon) if + needed with deprecations, added features, breaking changes, and DB schema changes. +* [ ] I've decided if this PR requires a new major/minor version according to + [semver](https://semver.org/), or if it's mainly a patch change. The PR is targeted at the next + release branch if it's not a patch change. +
+ +### What + +[TODO: Short statement about what is changing.] + +### Why + +[TODO: Why this change is being made. Include any context required to understand the why.] + +### Known limitations + +[TODO or N/A] diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000000..0e1cc42b02 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,38 @@ +name: "CodeQL" + +on: + push: + branches: + - 'master' + - 'release-*' + pull_request: + branches: + - 'master' + - 'release-*' + schedule: + - cron: '30 17 * * 1,2,3,4,5' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + + - name: Autobuild + uses: github/codeql-action/autobuild@v1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000000..1fda24d2d2 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,86 @@ +name: Go + +on: + push: + branches: [master] + pull_request: + +jobs: + + check: + strategy: + matrix: + os: [ubuntu-latest] + go: [1.17] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + - run: ./gomod.sh + - run: ./gofmt.sh + - run: ./gogenerate.sh + - run: ./govet.sh + - run: ./staticcheck.sh + + build: + strategy: + matrix: + os: [ubuntu-latest] + go: [1.17, 1.16.7] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + - run: go build ./... + + test: + strategy: + matrix: + os: [ubuntu-latest] + go: [1.17, 1.16.7] + pg: [9.6.5, 10] + runs-on: ${{ matrix.os }} + services: + postgres: + image: postgres:${{ matrix.pg }} + env: + POSTGRES_USER: postgres + POSTGRES_DB: postgres + POSTGRES_PASSWORD: postgres + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + redis: + image: redis + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 6379:6379 + env: + PGHOST: localhost + PGPORT: 5432 + PGUSER: postgres + PGPASSWORD: postgres + PGDATABASE: postgres + REDIS_HOST: localhost + REDIS_PORT: 6379 + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: ${{ matrix.go }} + - run: go test -race -cover ./... diff --git a/.gitignore b/.gitignore index ada5bbb6d1..de59d9a0a1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,18 @@ .DS_Store /vendor /dist -/local-archive \ No newline at end of file +/local-archive +/.vscode/*.sql +/.vscode/settings.json +/.vscode/tasks.json.old +.idea +debug +.bundle +*.swp +*.crt +*.csr +*.key +*.prof +*.test +/captive-core-*/ +/services/horizon/internal/db2/schema/generated diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 5d3a353f0f..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ -language: go -services: -- mysql -- postgres -go: -- '1.5' -- '1.6' -- tip -env: -- GO15VENDOREXPERIMENT="1" -install: -- go get github.com/Masterminds/glide -before_script: -script: -- glide install -- go test -race $(glide novendor) -before_deploy: -- go run ./support/scripts/build_release_artifacts/main.go -matrix: - fast_finish: true - allow_failures: - - go: tip -notifications: - slack: - on_success: change - on_failure: always - secure: Uhx8H2RFNsJ9GknDItSwhMOl5K3HTGNjifJwkLUiRio4JH4PzrkHDOzCOpEzrboviVF+Rch9VsC7HchIY2uyqeB9UW63dCMPeaYSs7rfNCmW8bTcGydeVOe0HV18mpM/4yPy/gsw5Zq+eGq7+p9NZtwkOHeQ/YtoSFcWbQQYs3CPZpBUP95KW1fGTn8OIW6nKkVA0Kyr6aXJFZB9AHIg24R9l2uraHdjg5EUJJO/4yR0GWC1pLTfUCqUt9mXPeGSQOhhvR+pCKSljY3pB6oex2hKH04u3U1iRzOBuKMPmMrcyJDzvVRkCaCn+0LPGZ3vgDDcDdcqYxEk1a1n27wzjqYlDeH4xaYTZGpB18LiX6MAm84+iviL5JILIMQ4NcqVNB2L37zl1UW+BMQUSH6+zQknHkGcOImU984PEdrhCM2uvknvp0+IGXdpyv9WwztzxhJOz6PAE1aA3O+BI1xuSft6sGY07Z0Q9iKO/uCWIHbsfLso16kIubiHzwjSAz90tbCc8VcR4EFeVqEoWm9QwRP0v6pclWMhj9bq3Oye1gKrSCwWPXCg8mRFCCuXgr4dev7I+vKcx43oIddgcx6vIROMYsgX/0gInBrkNxZw3Gu1YUXe1fuNvmCZWSlaWrO+9XWnFItnLs2ypnQjlguKzxgqmfqR0eXmMYBQrH18sxU= -deploy: - provider: releases - skip_cleanup: true - api_key: - secure: q8H6HZWdaVF9lW2svzV/xdVZzF8SOBrnqGleAXEtI8l9bykq6gWBxSrU4BSma3KRsIBy/G5z6DNFGO0lRTPMLJ6j6WybvPruW1bOpBgPgaOD6Jz+jr62VNlRr4eWot+UZNTvuJMbR/3qqzMee5pIqIjI2kmDYD6VnOB0vAK3It/JCIc+VDDGzROMi9+nGIsAt7mZkU5kNzLDzaC+92IopTi7vXUGJ0cUDjKF1aiHKCFcWfkNUlTQoPnItpGNzBzGoTOX06LR9X+Uf9w651za+vZ7c0Ftd/Aujvv73KfMV5pIJehPCxI5x/PfEZ3AZwrDYC0theZ+NDWjHz/C2O56Ybi5V9TkKG3Yjy6eNVt7txhq1BY2A4EwvX8Lf8zMiG2VQ0YMNM1qUV6LDjqq4uCOeBjBjtgpqQXwMAtPKH+UF0Z8dj41+GfVOKD/Tv6r2Ue6XHYtk1pJVgWJwImnMbxNArEWWe2gt8ZL5vk21Q1VJN4bF2o2o94j/OjC/Nrawb9wgJN6tcXNEOYZGZPgoviJI5isBZy7SFzVXZpFgFEapZ7Ira3xa8bjGTmG+wTCWGkIRg1uX5EqbOqU1pP9lUPPknstfEi3w30ZR/4wnOrfXBh8fjfxZnTz6S32yolYxWKeSFyR6CvHvtZovAL7nEdEUb3DGSrfk+D6LcmzGqn78L4= - file: dist/* - file_glob: true - on: - repo: stellar/go - tags: true - go: '1.6' diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 0cebe53c3d..a7e67b450f 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -1,17 +1,21 @@ { // See https://go.microsoft.com/fwlink/?LinkId=733558 // for the documentation about the tasks.json format - "version": "0.1.0", + "version": "2.0.0", "command": "go", - "isShellCommand": true, - "showOutput": "always", - "suppressTaskName": true, "options": { "cwd": "${workspaceRoot}" }, - "tasks": [{ - "taskName": "test all", - "isWatching": false, - "args": ["test", "./..."] - }] -} \ No newline at end of file + "tasks": [ + { + "label": "go", + "type": "shell", + "command": "go", + "problemMatcher": [], + "group": { + "_id": "build", + "isDefault": false + } + } + ] +} diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..83038df502 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,22 @@ +# Changelog +This repository adheres to [Semantic Versioning](http://semver.org/). + +This monorepo contains a number of projects, individually versioned and released. Please consult the relevant changelog: + +* `horizon server` ([changelog](./services/horizon/CHANGELOG.md)) +* `horizonclient` ([changelog](./clients/horizonclient/CHANGELOG.md)) +* `txnbuild` ([changelog](./txnbuild/CHANGELOG.md)) +* `bridge` ([changelog](./services/bridge/CHANGELOG.md)) +* `compliance` ([changelog](./services/compliance/CHANGELOG.md)) +* `federation` ([changelog](./services/federation/CHANGELOG.md)) +* `ticker` ([changelog](./services/ticker/CHANGELOG.md)) +* `keystore` (experimental) ([changelog](./services/keystore/CHANGELOG.md)) +* `stellar-vanity-gen` ([changelog](./tools/stellar-vanity-gen/CHANGELOG.md)) +* `stellar-sign` ([changelog](./tools/stellar-sign/CHANGELOG.md)) +* `stellar-archivist` ([changelog](./tools/stellar-archivist/CHANGELOG.md)) +* `stellar-hd-wallet` ([changelog](./tools/stellar-hd-wallet/CHANGELOG.md)) + +If a project is pre-v1.0, breaking changes may happen for minor version +bumps. A breaking change will be clearly notified in the corresponding changelog. + +Official project releases may be found here: https://github.com/stellar/go/releases diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 26ba4d1ad0..378e90e5e5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,42 @@ # How to contribute -Please read the [Contribution Guide](https://github.com/stellar/docs/blob/master/CONTRIBUTING.md). +πŸ‘πŸŽ‰ First off, thanks for taking the time to contribute! πŸŽ‰πŸ‘ -Then please [sign the Contributor License Agreement](https://docs.google.com/forms/d/1g7EF6PERciwn7zfmfke5Sir2n10yddGGSXyZsq98tVY/viewform?usp=send_form). +Check out the [Stellar Contribution Guide](https://github.com/stellar/.github/blob/master/CONTRIBUTING.md) that apply to all Stellar projects. +## Style guides + +### Git Commit Messages + +* Use the present tense ("Add feature" not "Added feature") +* Use the imperative mood ("Move cursor to..." not "Moves cursor to...") + +### Issues + +* Issues and PR titles start with: + * The package name most affected, ex. `ingest: fix...`. + * Or, for services and tools, the service most affected, ex. `services/horizon: fix...`, `services/ticker: add...` + * Or, multiple package names separated by a comma when the fix addresses multiple packages worth noting, ex. `services/horizon, services/friendbot: fix...`. + * Or, `all:` when changes or an issue are broad, ex. `all: update...`. + * Or, `doc:` when changes or an issue are isolated to non-code documentation not limited to a single package. +* Label issues with `bug` if they're clearly a bug. +* Label issues with `feature request` if they're a feature request. + +### Pull Requests + +* PR titles follow the same rules as described in the [Issues](#Issues) section above. +* PRs must update the [CHANGELOG](CHANGELOG.md) with a small description of the change +* PRs are merged into master or release branch using squash merge +* Carefully think about where your PR fits according to [semver](https://semver.org). Target it at master if it’s only a patch change, otherwise if it contains breaking change or significant feature additions, set the base branch to the next major or minor release. +* Keep PR scope narrow. Expectation: 20 minutes to review max +* Explicitly differentiate refactoring PRs and feature PRs. Refactoring PRs don’t change functionality. They usually touch a lot more code, and are reviewed in less detail. Avoid refactoring in feature PRs. + +### Go Style Guide + +* Use `gofmt` or preferably `goimports` to format code +* Follow [Effective Go](https://golang.org/doc/effective_go.html) and [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + +### Go Coding conventions + +- Always document exported package elements: vars, consts, funcs, types, etc. +- Tests are better than no tests. diff --git a/DEVELOPING.md b/DEVELOPING.md new file mode 100644 index 0000000000..d799900517 --- /dev/null +++ b/DEVELOPING.md @@ -0,0 +1,115 @@ +# Developing + +Welcome to the Stellar Go monorepo. These instructions help launch πŸš€ you into making and testing code changes to this repository. + +For details about what's in this repository and how it is organized read the [README.md](README.md). + +If you're aiming to submit a contribution make sure to also read the [contributing guidelines](CONTRIBUTING.md). + +If you're making changes to Horizon, look for documentation in its [docs](services/horizon/internal/docs) directory for specific instructions. + +## Requirements +To checkout, build, and run most tests these tools are required: +- Git +- [Go](https://golang.org/dl) (this repository is officially supported on the last two releases of Go) + +To run some tests these tools are also required: +- PostgreSQL 9.6+ server running locally, or set [environment variables](https://www.postgresql.org/docs/9.6/libpq-envars.html) (e.g. `PGHOST`, etc) for alternative host. +- MySQL 10.1+ server running locally. + +## Get the code + +Check the code out anywhere, using a `GOPATH` is not required. + +``` +git clone https://github.com/stellar/go +``` + +## Installing dependencies + +Dependencies are managed using [Modules](https://github.com/golang/go/wiki/Modules). Dependencies for the packages you are building will be installed automatically when running any Go command that requires them. If you need to pre-download all dependencies for the repository for offline development, run `go mod download`. + +See [Dependency management](#dependency-management) for more details. + +## Running tests + +``` +go test ./... +``` + +## Running services/tools + +``` +go run ./services/ +``` + +``` +go run ./tools/ +``` + +## Dependency management + +Dependencies are managed using [Modules](https://github.com/golang/go/wiki/Modules) and are tracked in the repository across three files: +- [go.mod](go.mod): Contains a list of direct dependencies, and some indirect dependencies (see [why](https://github.com/golang/go/wiki/Modules#why-does-go-mod-tidy-record-indirect-and-test-dependencies-in-my-gomod)). +- [go.sum](go.sum): Contains hashes for dependencies that are used for verifying downloaded dependencies. +- [go.list](go.list): A file that is unique to this Go repository, containing the output of `go list -m all`, and captures all direct and indirect dependencies and their versions used in builds and tests within this repository. This is not a lock file but instead it helps us track over time which versions are being used for builds and tests, and to see when that changes in PR diffs. + +### Adding new dependencies + +Add new dependencies by adding the import paths to the code. The next time you execute a Go command the tool will update the `go.mod` and `go.sum` files. + +To add a specific version of a dependency use `go get`: + +``` +go get @ +``` + +Go modules files track the minimum dependency required, not the exact dependency version that will be used. To validate the version of the dependency being used update the `go.list` file by running `go mod -m all > go.list`. + +Before opening a PR make sure to run these commands to tidy the module files: +- `go mod tidy` +- `go list -m all > go.list` + +### Updating a dependency + +Update an existing dependency by using `go get`: + +``` +go get @ +``` + +Go modules files track the minimum dependency required, not the exact dependency version that will be used. To validate the version of the dependency being used update the `go.list` file by running `go mod -m all > go.list`. + +Before opening a PR make sure to run these commands to tidy the module files: +``` +go mod tidy +go list -m all > go.list +``` + +### Removing a dependency + +Remove a dependency by removing all import paths from the code, then use the following commands to remove any unneeded direct or indirect dependencies: + +``` +go mod tidy +go list -m all > go.list +``` + +Note: `go list -m all` may show that the dependency is still being used. It will be possible that the dependency is still an indirect dependency. If it's important to understand why the dependency is still being used, use `go mod why /...` and `go mod graph | grep ` to understand which modules are importing it. + +### Reviewing changes in dependencies + +When updating or adding dependencies it's critical that we review what the +changes are in those dependencies that we are introducing into our builds. When +dependencies change the diff for the `go.list` file may be too complex to +understand. In those situations use the [golistcmp] tool to get a list of +changing modules, as well as GitHub links for easy access to diff review. + +``` +git checkout master +go list -m -json all > go.list.master +git checkout +golistcmp go.list.master <(go list -m -json all) +``` + +[golistcmp]: https://github.com/stellar/golistcmp diff --git a/Gemfile b/Gemfile index dff936d473..8d9bc26e2d 100644 --- a/Gemfile +++ b/Gemfile @@ -1,4 +1,6 @@ -gem 'xdrgen' +source 'https://rubygems.org' + +gem 'xdrgen', git:'https://github.com/stellar/xdrgen.git', ref: 'master' gem 'pry' gem 'octokit' gem 'netrc' diff --git a/Gemfile.lock b/Gemfile.lock index c44657712f..723d0fb05a 100644 --- a/Gemfile.lock +++ b/Gemfile.lock @@ -1,48 +1,72 @@ -PATH - remote: /Users/nullstyle/src/stellar/xdrgen +GIT + remote: https://github.com/stellar/xdrgen.git + revision: 7d53fb17bcb6155d916db09e48c880aae42f624d + ref: master specs: - xdrgen (0.0.1) - activesupport (~> 4) + xdrgen (0.1.1) + activesupport (~> 6) memoist (~> 0.11.0) slop (~> 3.4) treetop (~> 1.5.3) GEM + remote: https://rubygems.org/ specs: - activesupport (4.2.4) - i18n (~> 0.7) - json (~> 1.7, >= 1.7.7) - minitest (~> 5.1) - thread_safe (~> 0.3, >= 0.3.4) - tzinfo (~> 1.1) - addressable (2.3.8) - coderay (1.1.0) - faraday (0.9.1) + activesupport (6.1.4.1) + concurrent-ruby (~> 1.0, >= 1.0.2) + i18n (>= 1.6, < 2) + minitest (>= 5.1) + tzinfo (~> 2.0) + zeitwerk (~> 2.3) + addressable (2.8.0) + public_suffix (>= 2.0.2, < 5.0) + coderay (1.1.3) + concurrent-ruby (1.1.9) + faraday (1.8.0) + faraday-em_http (~> 1.0) + faraday-em_synchrony (~> 1.0) + faraday-excon (~> 1.1) + faraday-httpclient (~> 1.0.1) + faraday-net_http (~> 1.0) + faraday-net_http_persistent (~> 1.1) + faraday-patron (~> 1.0) + faraday-rack (~> 1.0) multipart-post (>= 1.2, < 3) - i18n (0.7.0) - json (1.8.3) + ruby2_keywords (>= 0.0.4) + faraday-em_http (1.0.0) + faraday-em_synchrony (1.0.0) + faraday-excon (1.1.0) + faraday-httpclient (1.0.1) + faraday-net_http (1.0.1) + faraday-net_http_persistent (1.2.0) + faraday-patron (1.0.0) + faraday-rack (1.0.0) + i18n (1.8.11) + concurrent-ruby (~> 1.0) memoist (0.11.0) - method_source (0.8.2) - minitest (5.8.0) - multipart-post (2.0.0) - netrc (0.10.3) - octokit (4.1.0) - sawyer (~> 0.6.0, >= 0.5.3) + method_source (1.0.0) + minitest (5.14.4) + multipart-post (2.1.1) + netrc (0.11.0) + octokit (4.21.0) + faraday (>= 0.9) + sawyer (~> 0.8.0, >= 0.5.3) polyglot (0.3.5) - pry (0.10.1) - coderay (~> 1.1.0) - method_source (~> 0.8.1) - slop (~> 3.4) - rake (10.4.2) - sawyer (0.6.0) - addressable (~> 2.3.5) - faraday (~> 0.8, < 0.10) + pry (0.14.1) + coderay (~> 1.1) + method_source (~> 1.0) + public_suffix (4.0.6) + rake (13.0.6) + ruby2_keywords (0.0.5) + sawyer (0.8.2) + addressable (>= 2.3.5) + faraday (> 0.8, < 2.0) slop (3.6.0) - thread_safe (0.3.5) treetop (1.5.3) polyglot (~> 0.3) - tzinfo (1.2.2) - thread_safe (~> 0.1) + tzinfo (2.0.4) + concurrent-ruby (~> 1.0) + zeitwerk (2.5.1) PLATFORMS ruby @@ -53,3 +77,6 @@ DEPENDENCIES pry rake xdrgen! + +BUNDLED WITH + 2.2.29 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..f90c8bde43 --- /dev/null +++ b/Makefile @@ -0,0 +1,38 @@ +# Docker build targets use an optional "TAG" environment +# variable can be set to use custom tag name. For example: +# TAG=my-registry.example.com/keystore:dev make keystore +XDRS = xdr/Stellar-SCP.x \ +xdr/Stellar-ledger-entries.x \ +xdr/Stellar-ledger.x \ +xdr/Stellar-overlay.x \ +xdr/Stellar-transaction.x \ +xdr/Stellar-types.x + +.PHONY: xdr + +keystore: + $(MAKE) -C services/keystore/ docker-build + +ticker: + $(MAKE) -C services/ticker/ docker-build + +friendbot: + $(MAKE) -C services/friendbot/ docker-build + +webauth: + $(MAKE) -C exp/services/webauth/ docker-build + +recoverysigner: + $(MAKE) -C exp/services/recoverysigner/ docker-build + +regulated-assets-approval-server: + $(MAKE) -C services/regulated-assets-approval-server/ docker-build + +gxdr/xdr_generated.go: $(XDRS) + go run github.com/xdrpp/goxdr/cmd/goxdr -p gxdr -enum-comments -o $@ $(XDRS) + go fmt $@ + +xdr/xdr_generated.go: $(XDRS) Rakefile Gemfile.lock + bundle exec rake xdr:generate + +xdr: gxdr/xdr_generated.go xdr/xdr_generated.go diff --git a/README.md b/README.md index f1ca39624c..9090d715f8 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,37 @@ -# Stellar Go -[![Build Status](https://travis-ci.org/stellar/go.svg?branch=master)](https://travis-ci.org/stellar/go) -[![GoDoc](https://godoc.org/github.com/stellar/go?status.svg)](https://godoc.org/github.com/stellar/go) - -This repo is the home for all of the public go code produced by SDF. In addition to various tools and services, this repository is the SDK from which you may develop your own applications that integrate with the stellar network. +
+Stellar +
+Creating equitable access to the global financial system +

Stellar Go Monorepo

+
+

+Build Status +GoDoc +Go Report Card +

+ +This repo is the home for all of the public Go code produced by the [Stellar Development Foundation]. + +This repo contains various tools and services that you can use and deploy, as well as the SDK you can use to develop applications that integrate with the Stellar network. + +## Package Index + +* [Horizon Server](services/horizon): Full-featured API server for Stellar network +* [Go Horizon SDK - horizonclient](clients/horizonclient): Client for Horizon server (queries and transaction submission) +* [Go Horizon SDK - txnbuild](txnbuild): Construct Stellar transactions and operations +* [Ticker](services/ticker): An API server that provides statistics about assets and markets on the Stellar network +* [Keystore](services/keystore): An API server that is used to store and manage encrypted keys for Stellar client applications +* Servers for Anchors & Financial Institutions + * [Compliance Server](services/compliance): Allows financial institutions to exchange KYC information + * [Federation Server](services/federation): Allows organizations to provide addresses for users (`jane*examplebank.com`) ## Dependencies -This repository depends upon a [number of external dependencies](./glide.yaml), and we use [Glide](https://glide.sh/) to manage them. Glide is used to populate the [vendor directory](http://glide.readthedocs.io/en/latest/vendor/), ensuring that builds are reproducible even as upstream dependencies are changed. Please see the [Glide](http://glide.sh/) website for installation instructions. +This repository is officially supported on the last two releases of Go. -When creating this project, we had to decide whether or not we committed our external dependencies to the repo. We decided that we would not, by default, do so. This lets us avoid the diff churn associated with updating dependencies while allowing an acceptable path to get reproducible builds. To do so, simply install glide and run `glide install` in your checkout of the code. We realize this is a judgement call; Please feel free to open an issue if you would like to make a case that we change this policy. +It depends on a [number of external dependencies](./go.mod), and uses Go [Modules](https://github.com/golang/go/wiki/Modules) to manage them. Running any `go` command will automatically download dependencies required for that operation. +You can choose to checkout this repository into a [GOPATH](https://github.com/golang/go/wiki/GOPATH) or into any directory. ## Directory Layout @@ -34,14 +56,29 @@ In addition to the packages described above, this repository contains various pa While much of the code in individual packages is organized based upon different developers' personal preferences, many of the packages follow a simple convention for organizing the declarations inside of a package that aim to aid in your ability to find code. -Every package should have a `main.go` file. This file contains the package documentation (unless a separate `doc.go` file is used), _all_ of the exported vars, consts, types and funcs for the package. It may also contain any unexported declarations that are not tied to any particular type. In addition to `main.go`, a package often has a single go source file for each type that has method declarations. This file uses the snake case form of the type name (for example `loggly_hook.go` should contain methods for the type `LogglyHook`). +In each package, there may be one or more of a set of common files: + +- *errors.go*: This file should contains declarations (both types and vars) for errors that are used by the package. +- *example_test.go*: This file should contains example tests, as described at https://blog.golang.org/examples. +- *main.go/internal.go* (**deprecated**): Older packages may have a `main.go` (public symbols) or `internal.go` (private symbols). These files contain, respectively, the exported and unexported vars, consts, types and funcs for the package. New packages do not follow this pattern, and instead follow the standard Go convention to co-locate structs and their methods in the same files. +- *main.go* (**new convention**): If present, this file contains a `main` function as part of an executable `main` package. -Each non-test file can have a test counterpart like normal, whose name ends with `_test.go`. Additionally, an `assertions_test.go` file should contain any custom assertions that are related to the package in some way. This allows a developer to include the package in their tests to gain access to assertions that make writing tests that involve the package more simple. Finally, a `helpers_test.go` file can contain test utility functions that are not necessarily custom assertions. +In addition to the above files, a package often has files that contains code that is specific to one declared type. This file uses the snake case form of the type name (for example `loggly_hook.go` would correspond to the type `LogglyHook`). This file should contain method declarations, interface implementation assertions and any other declarations that are tied solely to that type. + +Each non-test file can have a test counterpart like normal, whose name ends with `_test.go`. The common files described above also have their own test counterparts... for example `internal_test.go` should contains tests that test unexported behavior and more commonly test helpers that are unexported. Generally, file contents are sorted by exported/unexported, then declaration type (ordered as consts, vars, types, then funcs), then finally alphabetically. +### Test helpers + +Often, we provide test packages that aid in the creation of tests that interact with our other packages. For example, the `support/db` package has the `support/db/dbtest` package underneath it that contains elements that make it easier to test code that accesses a SQL database. We've found that this pattern of having a separate test package maximizes flexibility and simplifies package dependencies. + +### Contributing + +Contributions are welcome! See [CONTRIBUTING.md](CONTRIBUTING.md) for more details. + +### Developing -## Coding conventions +See [DEVELOPING.md](DEVELOPING.md) for helpful instructions for getting started developing code in this repository. -- Always document exported package elements: vars, consts, funcs, types, etc. -- Tests are better than no tests. \ No newline at end of file +[Stellar Development Foundation]: https://stellar.org diff --git a/Rakefile b/Rakefile index 6280cdc2f3..2cb03a9312 100644 --- a/Rakefile +++ b/Rakefile @@ -8,12 +8,12 @@ namespace :xdr do # Prior to launch, we should be separating our .x files into a separate # repo, and should be able to improve this integration. HAYASHI_XDR = [ - "src/xdr/Stellar-types.x", - "src/xdr/Stellar-ledger-entries.x", - "src/xdr/Stellar-transaction.x", - "src/xdr/Stellar-ledger.x", - "src/xdr/Stellar-overlay.x", - "src/xdr/Stellar-SCP.x", + "src/xdr/Stellar-SCP.x", + "src/xdr/Stellar-ledger-entries.x", + "src/xdr/Stellar-ledger.x", + "src/xdr/Stellar-overlay.x", + "src/xdr/Stellar-transaction.x", + "src/xdr/Stellar-types.x" ] LOCAL_XDR_PATHS = HAYASHI_XDR.map{ |src| "xdr/" + File.basename(src) } @@ -40,7 +40,6 @@ namespace :xdr do require "pathname" require "xdrgen" require 'fileutils' - FileUtils.rm_f("xdr/xdr_generated.go") compilation = Xdrgen::Compilation.new( LOCAL_XDR_PATHS, @@ -49,6 +48,14 @@ namespace :xdr do language: :go ) compilation.compile + + xdr_generated = IO.read("xdr/xdr_generated.go") + IO.write("xdr/xdr_generated.go", <<~EOS) + //lint:file-ignore S1005 The issue should be fixed in xdrgen. Unfortunately, there's no way to ignore a single file in staticcheck. + //lint:file-ignore U1000 fmtTest is not needed anywhere, should be removed in xdrgen. + #{xdr_generated} + EOS + system("gofmt -w xdr/xdr_generated.go") end end diff --git a/address/main.go b/address/main.go index e4f61ff87b..7791cb509e 100644 --- a/address/main.go +++ b/address/main.go @@ -1,5 +1,5 @@ // Package address provides utility functions for working with stellar -// addresses. See https://www.stellar.org/developers/guides/concepts/federation. +// addresses. See https://developers.stellar.org/docs/glossary/federation/ // html#stellar-addresses for more on addresses. package address diff --git a/address/main_test.go b/address/main_test.go index ee59a0b2a9..449371e7eb 100644 --- a/address/main_test.go +++ b/address/main_test.go @@ -20,7 +20,7 @@ func TestNew(t *testing.T) { for _, c := range cases { actual := New(c.Name, c.Domain) - assert.Equal(t, actual, c.ExpectedAddress) + assert.Equal(t, c.ExpectedAddress, actual) } } @@ -43,10 +43,10 @@ func TestSplit(t *testing.T) { name, domain, err := Split(c.Address) if c.ExpectedError == nil { - assert.Equal(t, name, c.ExpectedName) - assert.Equal(t, domain, c.ExpectedDomain) + assert.Equal(t, c.ExpectedName, name) + assert.Equal(t, c.ExpectedDomain, domain) } else { - assert.Equal(t, errors.Cause(err), c.ExpectedError) + assert.Equal(t, c.ExpectedError, errors.Cause(err)) } } } diff --git a/amount/main.go b/amount/main.go index f3c1ec8158..b1b7bc1474 100644 --- a/amount/main.go +++ b/amount/main.go @@ -10,18 +10,34 @@ package amount import ( - "fmt" "math/big" + "regexp" "strconv" + "strings" + "github.com/stellar/go/support/errors" "github.com/stellar/go/xdr" ) -// One is the value of one whole unit of currency. Stellar uses 7 fixed digits -// for fractional values, thus One is 10 million (10^7) -const One = 10000000 +// One is the value of one whole unit of currency. Stellar uses 7 fixed digits +// for fractional values, thus One is 10 million (10^7). +const ( + One = 10000000 +) + +var ( + bigOne = big.NewRat(One, 1) + // validAmountSimple is a simple regular expression checking if a string looks like + // a number, more or less. The details will be checked in `math/big` internally. + // What we want to prevent is passing very big numbers like `1e9223372036854775807` + // to `big.Rat.SetString` triggering long calculations. + // Note: {1,20} because the biggest amount you can use in Stellar is: + // len("922337203685.4775807") = 20. + validAmountSimple = regexp.MustCompile("^-?[.0-9]{1,20}$") + negativePositiveNumberOnly = regexp.MustCompile("^-?[0-9]+$") +) -// MustParse is the panicking version of Parse +// MustParse is the panicking version of Parse. func MustParse(v string) xdr.Int64 { ret, err := Parse(v) if err != nil { @@ -30,35 +46,80 @@ func MustParse(v string) xdr.Int64 { return ret } -// Parse parses the provided as a stellar "amount", i.e. A 64-bit signed integer +// Parse parses the provided as a stellar "amount", i.e. a 64-bit signed integer // that represents a decimal number with 7 digits of significance in the -// fractional portion of the number. +// fractional portion of the number, and returns a xdr.Int64. func Parse(v string) (xdr.Int64, error) { - var f, o, r big.Rat + i, err := ParseInt64(v) + if err != nil { + return xdr.Int64(0), err + } + return xdr.Int64(i), nil +} + +// ParseInt64 parses the provided as a stellar "amount", i.e. a 64-bit signed +// integer that represents a decimal number with 7 digits of significance in +// the fractional portion of the number. +func ParseInt64(v string) (int64, error) { + if !validAmountSimple.MatchString(v) { + return 0, errors.Errorf("invalid amount format: %s", v) + } - _, ok := f.SetString(v) - if !ok { - return xdr.Int64(0), fmt.Errorf("cannot parse amount: %s", v) + r := &big.Rat{} + if _, ok := r.SetString(v); !ok { + return 0, errors.Errorf("cannot parse amount: %s", v) } - o.SetInt64(One) - r.Mul(&f, &o) + r.Mul(r, bigOne) + if !r.IsInt() { + return 0, errors.Errorf("more than 7 significant digits: %s", v) + } - is := r.FloatString(0) - i, err := strconv.ParseInt(is, 10, 64) + i, err := strconv.ParseInt(r.FloatString(0), 10, 64) if err != nil { - return xdr.Int64(0), err + return 0, errors.Wrapf(err, "amount outside bounds of int64: %s", v) } - return xdr.Int64(i), nil + return i, nil } -// String returns an "amount string" from the provided raw value `v`. -func String(v xdr.Int64) string { - var f, o, r big.Rat +// IntStringToAmount converts string integer value and converts it to stellar +// "amount". In other words, it divides the given string integer value by 10^7 +// and returns the string representation of that number. +// It is safe to use with values exceeding int64 limits. +func IntStringToAmount(v string) (string, error) { + if !negativePositiveNumberOnly.MatchString(v) { + return "", errors.Errorf("invalid amount format: %s", v) + } + + negative := false + if v[0] == '-' { + negative = true + v = v[1:] + } + + l := len(v) + var r string + if l <= 7 { + r = "0." + strings.Repeat("0", 7-l) + v + } else { + r = v[0:l-7] + "." + v[l-7:l] + } - f.SetInt64(int64(v)) - o.SetInt64(One) - r.Quo(&f, &o) + if negative { + r = "-" + r + } + + return r, nil +} + +// String returns an "amount string" from the provided raw xdr.Int64 value `v`. +func String(v xdr.Int64) string { + return StringFromInt64(int64(v)) +} +// StringFromInt64 returns an "amount string" from the provided raw int64 value `v`. +func StringFromInt64(v int64) string { + r := big.NewRat(v, 1) + r.Quo(r, bigOne) return r.FloatString(7) } diff --git a/amount/main_test.go b/amount/main_test.go index 9120a8429e..736c88f82f 100644 --- a/amount/main_test.go +++ b/amount/main_test.go @@ -1,6 +1,8 @@ package amount_test import ( + "fmt" + "strings" "testing" "github.com/stellar/go/amount" @@ -8,19 +10,42 @@ import ( ) var Tests = []struct { - S string - I xdr.Int64 + S string + I xdr.Int64 + valid bool }{ - {"100.0000000", 1000000000}, - {"100.0000001", 1000000001}, - {"123.0000001", 1230000001}, + {"100.0000000", 1000000000, true}, + {"-100.0000000", -1000000000, true}, + {"100.0000001", 1000000001, true}, + {"123.0000001", 1230000001, true}, + {"123.00000001", 0, false}, + {"922337203685.4775807", 9223372036854775807, true}, + {"922337203685.4775808", 0, false}, + {"922337203686", 0, false}, + {"-922337203685.4775808", -9223372036854775808, true}, + {"-922337203685.4775809", 0, false}, + {"-922337203686", 0, false}, + {"1000000000000.0000000", 0, false}, + {"1000000000000", 0, false}, + {"-0.5000000", -5000000, true}, + {"0.5000000", 5000000, true}, + {"0.12345678", 0, false}, + // Expensive inputs: + {strings.Repeat("1", 1000000), 0, false}, + {"1E9223372036854775807", 0, false}, + {"1e9223372036854775807", 0, false}, + {"Inf", 0, false}, } func TestParse(t *testing.T) { for _, v := range Tests { o, err := amount.Parse(v.S) - if err != nil { - t.Errorf("Couldn't parse %s: %v+", v.S, err) + if !v.valid && err == nil { + t.Errorf("expected err for input %s", v.S) + continue + } + if v.valid && err != nil { + t.Errorf("couldn't parse %s: %v", v.S, err) continue } @@ -32,6 +57,10 @@ func TestParse(t *testing.T) { func TestString(t *testing.T) { for _, v := range Tests { + if !v.valid { + continue + } + o := amount.String(v.I) if o != v.S { @@ -39,3 +68,56 @@ func TestString(t *testing.T) { } } } + +func TestIntStringToAmount(t *testing.T) { + var testCases = []struct { + Output string + Input string + Valid bool + }{ + {"100.0000000", "1000000000", true}, + {"-100.0000000", "-1000000000", true}, + {"100.0000001", "1000000001", true}, + {"123.0000001", "1230000001", true}, + {"922337203685.4775807", "9223372036854775807", true}, + {"922337203685.4775808", "9223372036854775808", true}, + {"92233.7203686", "922337203686", true}, + {"-922337203685.4775808", "-9223372036854775808", true}, + {"-922337203685.4775809", "-9223372036854775809", true}, + {"-92233.7203686", "-922337203686", true}, + {"1000000000000.0000000", "10000000000000000000", true}, + {"0.0000000", "0", true}, + // Expensive inputs when using big.Rat: + {"10000000000000.0000000", "1" + strings.Repeat("0", 20), true}, + {"-10000000000000.0000000", "-1" + strings.Repeat("0", 20), true}, + {"1" + strings.Repeat("0", 1000-7) + ".0000000", "1" + strings.Repeat("0", 1000), true}, + {"1" + strings.Repeat("0", 1000000-7) + ".0000000", "1" + strings.Repeat("0", 1000000), true}, + // Invalid inputs + {"", "nan", false}, + {"", "", false}, + {"", "-", false}, + {"", "1E9223372036854775807", false}, + {"", "1e9223372036854775807", false}, + {"", "Inf", false}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s to %s (valid = %t)", tc.Input, tc.Output, tc.Valid), func(t *testing.T) { + o, err := amount.IntStringToAmount(tc.Input) + + if !tc.Valid && err == nil { + t.Errorf("expected err for input %s (output: %s)", tc.Input, tc.Output) + return + } + if tc.Valid && err != nil { + t.Errorf("couldn't parse %s: %v", tc.Input, err) + return + } + + if o != tc.Output { + t.Errorf("%s converted to %s, not %s", tc.Input, o, tc.Output) + } + }) + } + +} diff --git a/benchmarks/xdr_test.go b/benchmarks/xdr_test.go new file mode 100644 index 0000000000..5f6e232d52 --- /dev/null +++ b/benchmarks/xdr_test.go @@ -0,0 +1,201 @@ +package benchmarks + +import ( + "bytes" + "encoding/base64" + "testing" + + xdr3 "github.com/stellar/go-xdr/xdr3" + "github.com/stellar/go/gxdr" + "github.com/stellar/go/xdr" + goxdr "github.com/xdrpp/goxdr/xdr" +) + +const input64 = "AAAAAgAAAACfHrX0tYB0gpXuJYTN9os06cdF62KAaqY9jid+777eyQAAC7gCM9czAAi/DQAAAAEAAAAAAAAAAAAAAABhga2dAAAAAAAAAAMAAAAAAAAADAAAAAAAAAABTU9CSQAAAAA8cTArnmXa4wEQJxDHOw5SwBaDVjBfAP5lRMNZkRtlZAAAAAAG42RBAAf7lQCYloAAAAAAMgbg0AAAAAAAAAADAAAAAU1PQkkAAAAAPHEwK55l2uMBECcQxzsOUsAWg1YwXwD+ZUTDWZEbZWQAAAAAAAAADkpyV7kAARBNABMS0AAAAAAyBuDRAAAAAAAAAAMAAAABTU9CSQAAAAA8cTArnmXa4wEQJxDHOw5SwBaDVjBfAP5lRMNZkRtlZAAAAAAAAAAclOSvewAIl5kAmJaAAAAAADIG4NIAAAAAAAAAAe++3skAAABAs2jt6+cyeyFvXVFphBcwt18GXnj7Jwa+hWQRyaBmPOSR2415GBi8XY3lC4m4aX9S322HvHjrxgQiar7KjgnQDw==" + +var input = func() []byte { + decoded, err := base64.StdEncoding.DecodeString(input64) + if err != nil { + panic(err) + } + return decoded +}() + +var xdrInput = func() xdr.TransactionEnvelope { + var te xdr.TransactionEnvelope + if err := te.UnmarshalBinary(input); err != nil { + panic(err) + } + return te +}() + +var gxdrInput = func() gxdr.TransactionEnvelope { + var te gxdr.TransactionEnvelope + // note goxdr will panic if there's a marshaling error. + te.XdrMarshal(&goxdr.XdrIn{In: bytes.NewReader(input)}, "") + return te +}() + +func BenchmarkXDRUnmarshalWithReflection(b *testing.B) { + var ( + r bytes.Reader + te xdr.TransactionEnvelope + ) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + r.Reset(input) + _, _ = xdr3.Unmarshal(&r, &te) + } +} + +func BenchmarkXDRUnmarshal(b *testing.B) { + var te xdr.TransactionEnvelope + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _ = te.UnmarshalBinary(input) + } +} + +func BenchmarkGXDRUnmarshal(b *testing.B) { + var ( + te gxdr.TransactionEnvelope + r bytes.Reader + ) + b.ReportAllocs() + for i := 0; i < b.N; i++ { + r.Reset(input) + te.XdrMarshal(&goxdr.XdrIn{In: &r}, "") + } +} + +func BenchmarkXDRMarshalWithReflection(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = xdr3.Marshal(&bytes.Buffer{}, xdrInput) + } +} + +func BenchmarkXDRMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = xdrInput.MarshalBinary() + } +} + +func BenchmarkXDRMarshalWithEncodingBuffer(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + _, _ = e.UnsafeMarshalBinary(xdrInput) + } +} + +func BenchmarkGXDRMarshal(b *testing.B) { + var output bytes.Buffer + b.ReportAllocs() + for i := 0; i < b.N; i++ { + output.Reset() + gxdrInput.XdrMarshal(&goxdr.XdrOut{Out: &output}, "") + } +} + +func BenchmarkXDRMarshalHex(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = xdr.MarshalHex(xdrInput) + } +} + +func BenchmarkXDRMarshalHexWithEncodingBuffer(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + _, _ = e.MarshalHex(xdrInput) + } +} + +func BenchmarkXDRUnsafeMarshalHexWithEncodingBuffer(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + _, _ = e.UnsafeMarshalHex(xdrInput) + } +} + +func BenchmarkXDRMarshalBase64(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + _, _ = xdr.MarshalBase64(xdrInput) + } +} + +func BenchmarkXDRMarshalBase64WithEncodingBuffer(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + _, _ = e.MarshalBase64(xdrInput) + } +} + +func BenchmarkXDRUnsafeMarshalBase64WithEncodingBuffer(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + _, _ = e.UnsafeMarshalBase64(xdrInput) + } +} + +var ledgerKeys = []xdr.LedgerKey{ + { + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.LedgerKeyAccount{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + }, + }, + { + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.LedgerKeyTrustLine{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB").ToTrustLineAsset(), + }, + }, + { + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.LedgerKeyOffer{ + SellerId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + OfferId: xdr.Int64(3), + }, + }, + { + Type: xdr.LedgerEntryTypeData, + Data: &xdr.LedgerKeyData{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + DataName: "foobar", + }, + }, + { + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.LedgerKeyClaimableBalance{ + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{0xca, 0xfe, 0xba, 0xbe}, + }, + }, + }, + { + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LedgerKeyLiquidityPool{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe}, + }, + }, +} + +func BenchmarkXDRMarshalCompress(b *testing.B) { + b.ReportAllocs() + e := xdr.NewEncodingBuffer() + for i := 0; i < b.N; i++ { + for _, lk := range ledgerKeys { + _, _ = e.LedgerKeyUnsafeMarshalBinaryCompress(lk) + } + } +} diff --git a/build/account_merge.go b/build/account_merge.go deleted file mode 100644 index c8b5cd581f..0000000000 --- a/build/account_merge.go +++ /dev/null @@ -1,52 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/xdr" -) - -// AccountMerge groups the creation of a new AccountMergeBuilder with a call to Mutate. -func AccountMerge(muts ...interface{}) (result AccountMergeBuilder) { - result.Mutate(muts...) - return -} - -// AccountMergeMutator is a interface that wraps the -// MutateAccountMerge operation. types may implement this interface to -// specify how they modify an xdr.AccountMergeBuilder object -type AccountMergeMutator interface { - MutateAccountMerge(*AccountMergeBuilder) error -} - -// AccountMergeBuilder represents a transaction that is being built. -type AccountMergeBuilder struct { - O xdr.Operation - Destination xdr.AccountId - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *AccountMergeBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case AccountMergeMutator: - err = mut.MutateAccountMerge(b) - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateAccountMerge for Destination sets the AccountMergeBuilder's Destination field -func (m Destination) MutateAccountMerge(o *AccountMergeBuilder) error { - return setAccountId(m.AddressOrSeed, &o.Destination) -} diff --git a/build/account_merge_test.go b/build/account_merge_test.go deleted file mode 100644 index 85f64157d4..0000000000 --- a/build/account_merge_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("AccountMergeBuilder Mutators", func() { - - var ( - subject AccountMergeBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = AccountMergeBuilder{} - subject.Mutate(mut) - }) - - Describe("Destination", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Destination{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.Destination.Equals(aid)).To(BeTrue()) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = Destination{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.Equals(aid)).To(BeTrue()) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - // -}) diff --git a/build/allow_trust.go b/build/allow_trust.go deleted file mode 100644 index df08463320..0000000000 --- a/build/allow_trust.go +++ /dev/null @@ -1,80 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/xdr" -) - -// AllowTrust groups the creation of a new AllowTrustBuilder with a call to Mutate. -func AllowTrust(muts ...interface{}) (result AllowTrustBuilder) { - result.Mutate(muts...) - return -} - -// AllowTrustMutator is a interface that wraps the -// MutateAllowTrust operation. types may implement this interface to -// specify how they modify an xdr.AllowTrustOp object -type AllowTrustMutator interface { - MutateAllowTrust(*xdr.AllowTrustOp) error -} - -// AllowTrustBuilder represents a transaction that is being built. -type AllowTrustBuilder struct { - O xdr.Operation - AT xdr.AllowTrustOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *AllowTrustBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case AllowTrustMutator: - err = mut.MutateAllowTrust(&b.AT) - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateAllowTrust for Authorize sets the AllowTrustOp's Authorize field -func (m Authorize) MutateAllowTrust(o *xdr.AllowTrustOp) error { - o.Authorize = m.Value - return nil -} - -// MutateAllowTrust for Asset sets the AllowTrustOp's Asset field -func (m AllowTrustAsset) MutateAllowTrust(o *xdr.AllowTrustOp) (err error) { - length := len(m.Code) - - switch { - case length >= 1 && length <= 4: - var code [4]byte - byteArray := []byte(m.Code) - copy(code[:], byteArray[0:length]) - o.Asset, err = xdr.NewAllowTrustOpAsset(xdr.AssetTypeAssetTypeCreditAlphanum4, code) - case length >= 5 && length <= 12: - var code [12]byte - byteArray := []byte(m.Code) - copy(code[:], byteArray[0:length]) - o.Asset, err = xdr.NewAllowTrustOpAsset(xdr.AssetTypeAssetTypeCreditAlphanum12, code) - default: - err = errors.New("Asset code length is invalid") - } - - return -} - -// MutateAllowTrust for Trustor sets the AllowTrustOp's Trustor field -func (m Trustor) MutateAllowTrust(o *xdr.AllowTrustOp) error { - return setAccountId(m.Address, &o.Trustor) -} diff --git a/build/allow_trust_test.go b/build/allow_trust_test.go deleted file mode 100644 index 8d1fe14e6a..0000000000 --- a/build/allow_trust_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("AllowTrustBuilder Mutators", func() { - - var ( - subject AllowTrustBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = AllowTrustBuilder{} - subject.Mutate(mut) - }) - - Describe("Trustor", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Trustor{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.AT.Trustor.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = Trustor{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("AllowTrustAsset", func() { - Context("AssetTypeCreditAlphanum4", func() { - BeforeEach(func() { - mut = AllowTrustAsset{"USD"} - }) - - It("sets Asset properly", func() { - Expect(subject.AT.Asset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(*subject.AT.Asset.AssetCode4).To(Equal([4]byte{'U', 'S', 'D', 0})) - Expect(subject.AT.Asset.AssetCode12).To(BeNil()) - }) - }) - - Context("AssetTypeCreditAlphanum12", func() { - BeforeEach(func() { - mut = AllowTrustAsset{"ABCDEF"} - }) - - It("sets Asset properly", func() { - Expect(subject.AT.Asset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum12)) - Expect(subject.AT.Asset.AssetCode4).To(BeNil()) - Expect(*subject.AT.Asset.AssetCode12).To(Equal([12]byte{'A', 'B', 'C', 'D', 'E', 'F', 0, 0, 0, 0, 0, 0})) - }) - }) - - Context("asset code length invalid", func() { - Context("empty", func() { - BeforeEach(func() { - mut = AllowTrustAsset{""} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - - Context("too long", func() { - BeforeEach(func() { - mut = AllowTrustAsset{"1234567890123"} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - }) - }) - - Describe("Authorize", func() { - Context("when equal true", func() { - BeforeEach(func() { - mut = Authorize{true} - }) - - It("sets authorize flag properly", func() { - Expect(subject.AT.Authorize).To(Equal(true)) - }) - }) - - Context("when equal false", func() { - BeforeEach(func() { - subject.AT.Authorize = true - mut = Authorize{false} - }) - - It("sets authorize flag properly", func() { - Expect(subject.AT.Authorize).To(Equal(false)) - }) - }) - }) -}) diff --git a/build/change_trust.go b/build/change_trust.go deleted file mode 100644 index 72dddd9904..0000000000 --- a/build/change_trust.go +++ /dev/null @@ -1,101 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/amount" - "github.com/stellar/go/xdr" -) - -// ChangeTrust groups the creation of a new ChangeTrustBuilder with a call to Mutate. -func ChangeTrust(muts ...interface{}) (result ChangeTrustBuilder) { - result.Mutate(muts...) - return -} - -// ChangeTrustMutator is a interface that wraps the -// MutateChangeTrust operation. types may implement this interface to -// specify how they modify an xdr.ChangeTrustOp object -type ChangeTrustMutator interface { - MutateChangeTrust(*xdr.ChangeTrustOp) error -} - -// ChangeTrustBuilder represents a transaction that is being built. -type ChangeTrustBuilder struct { - O xdr.Operation - CT xdr.ChangeTrustOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *ChangeTrustBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case ChangeTrustMutator: - err = mut.MutateChangeTrust(&b.CT) - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateChangeTrust for Asset sets the ChangeTrustOp's Line field -func (m Asset) MutateChangeTrust(o *xdr.ChangeTrustOp) (err error) { - if m.Native { - return errors.New("Native asset not allowed") - } - - o.Line, err = m.ToXdrObject() - return -} - -// MutateChangeTrust for Limit sets the ChangeTrustOp's Limit field -func (m Limit) MutateChangeTrust(o *xdr.ChangeTrustOp) (err error) { - o.Limit, err = amount.Parse(string(m)) - return -} - -// Trust is a helper that creates ChangeTrustBuilder -func Trust(code, issuer string, args ...interface{}) (result ChangeTrustBuilder) { - mutators := []interface{}{ - CreditAsset(code, issuer), - } - - limitSet := false - - for _, mut := range args { - mutators = append(mutators, mut) - _, isLimit := mut.(Limit) - if isLimit { - limitSet = true - } - } - - if !limitSet { - mutators = append(mutators, MaxLimit) - } - - return ChangeTrust(mutators...) -} - -// RemoveTrust is a helper that creates ChangeTrustBuilder -func RemoveTrust(code, issuer string, args ...interface{}) (result ChangeTrustBuilder) { - mutators := []interface{}{ - CreditAsset(code, issuer), - Limit("0"), - } - - for _, mut := range args { - mutators = append(mutators, mut) - } - - return ChangeTrust(mutators...) -} diff --git a/build/change_trust_test.go b/build/change_trust_test.go deleted file mode 100644 index 1893280c57..0000000000 --- a/build/change_trust_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("ChangeTrustBuilder Mutators", func() { - - var ( - subject ChangeTrustBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = ChangeTrustBuilder{} - subject.Mutate(mut) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("Line", func() { - Context("AssetTypeCreditAlphanum4", func() { - BeforeEach(func() { - mut = CreditAsset("USD", address) - }) - - It("sets Asset properly", func() { - Expect(subject.CT.Line.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(subject.CT.Line.AlphaNum4.AssetCode).To(Equal([4]byte{'U', 'S', 'D', 0})) - Expect(subject.CT.Line.AlphaNum12).To(BeNil()) - }) - }) - - Context("AssetTypeCreditAlphanum12", func() { - BeforeEach(func() { - mut = CreditAsset("ABCDEF", address) - }) - - It("sets Asset properly", func() { - Expect(subject.CT.Line.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum12)) - Expect(subject.CT.Line.AlphaNum4).To(BeNil()) - Expect(subject.CT.Line.AlphaNum12.AssetCode).To(Equal([12]byte{'A', 'B', 'C', 'D', 'E', 'F', 0, 0, 0, 0, 0, 0})) - }) - }) - - Context("asset invalid", func() { - Context("native", func() { - BeforeEach(func() { - mut = NativeAsset() - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Native asset not allowed")) - }) - }) - - Context("empty", func() { - BeforeEach(func() { - mut = CreditAsset("", address) - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - - Context("too long", func() { - BeforeEach(func() { - mut = CreditAsset("1234567890123", address) - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - }) - - Context("issuer invalid", func() { - BeforeEach(func() { - mut = CreditAsset("USD", bad) - }) - - It("failed", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - }) - - Describe("Limit", func() { - Context("sets limit properly", func() { - BeforeEach(func() { - mut = Limit("20") - }) - - It("sets limit value properly", func() { - Expect(subject.CT.Limit).To(Equal(xdr.Int64(200000000))) - }) - }) - - Context("sets max limit properly", func() { - BeforeEach(func() { - mut = MaxLimit - }) - - It("sets limit value properly", func() { - Expect(subject.CT.Limit).To(Equal(xdr.Int64(9223372036854775807))) - }) - }) - }) -}) diff --git a/build/create_account.go b/build/create_account.go deleted file mode 100644 index 4afe96cd47..0000000000 --- a/build/create_account.go +++ /dev/null @@ -1,62 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/amount" - "github.com/stellar/go/xdr" -) - -// CreateAccount groups the creation of a new CreateAccountBuilder with a call -// to Mutate. -func CreateAccount(muts ...interface{}) (result CreateAccountBuilder) { - result.Mutate(muts...) - return -} - -// CreateAccountMutator is a interface that wraps the -// MutateCreateAccount operation. types may implement this interface to -// specify how they modify an xdr.PaymentOp object -type CreateAccountMutator interface { - MutateCreateAccount(*xdr.CreateAccountOp) error -} - -// CreateAccountBuilder helps to build CreateAccountOp structs. -type CreateAccountBuilder struct { - O xdr.Operation - CA xdr.CreateAccountOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *CreateAccountBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case CreateAccountMutator: - err = mut.MutateCreateAccount(&b.CA) - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateCreateAccount for Destination sets the CreateAccountOp's Destination -// field -func (m Destination) MutateCreateAccount(o *xdr.CreateAccountOp) error { - return setAccountId(m.AddressOrSeed, &o.Destination) -} - -// MutateCreateAccount for NativeAmount sets the CreateAccountOp's -// StartingBalance field -func (m NativeAmount) MutateCreateAccount(o *xdr.CreateAccountOp) (err error) { - o.StartingBalance, err = amount.Parse(m.Amount) - return -} diff --git a/build/create_account_test.go b/build/create_account_test.go deleted file mode 100644 index e57b20906d..0000000000 --- a/build/create_account_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("CreateAccountBuilder Mutators", func() { - - var ( - subject CreateAccountBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = CreateAccountBuilder{} - subject.Mutate(mut) - }) - - Describe("Destination", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Destination{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.CA.Destination.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = Destination{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("NativeAmount", func() { - BeforeEach(func() { mut = NativeAmount{"101"} }) - It("sets the starting balance properly", func() { - Expect(subject.CA.StartingBalance).To(Equal(xdr.Int64(1010000000))) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) -}) diff --git a/build/inflation.go b/build/inflation.go deleted file mode 100644 index cc0c089007..0000000000 --- a/build/inflation.go +++ /dev/null @@ -1,37 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/xdr" -) - -// Inflation groups the creation of a new InflationBuilder with a call to Mutate. -func Inflation(muts ...interface{}) (result InflationBuilder) { - result.Mutate(muts...) - return -} - -// InflationBuilder represents an operation that is being built. -type InflationBuilder struct { - O xdr.Operation - Err error -} - -// Mutate applies the provided mutators to this builder's operation. -func (b *InflationBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} diff --git a/build/inflation_test.go b/build/inflation_test.go deleted file mode 100644 index 1c09902694..0000000000 --- a/build/inflation_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("InflationBuilder Mutators", func() { - - var ( - subject InflationBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = InflationBuilder{} - subject.Mutate(mut) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) -}) diff --git a/build/main.go b/build/main.go deleted file mode 100644 index 7c2cd7f273..0000000000 --- a/build/main.go +++ /dev/null @@ -1,262 +0,0 @@ -// Package build provides a builder system for constructing various xdr -// structures used by the stellar network. -// -// At the core of this package is the *Builder and *Mutator types. A Builder -// object (ex. PaymentBuilder, TransactionBuilder) contain an underlying xdr -// struct that is being iteratively built by having zero or more Mutator structs -// applied to it. See ExampleTransactionBuilder in main_test.go for an example. -package build - -import ( - "errors" - "math" - - "github.com/stellar/go/amount" - "github.com/stellar/go/network" - "github.com/stellar/go/xdr" -) - -const ( - // MemoTextMaxLength represents the maximum number of bytes a valid memo of - // type "MEMO_TEXT" can be. - MemoTextMaxLength = 28 -) - -var ( - // PublicNetwork is a mutator that configures the transaction for submission - // to the main public stellar network. - PublicNetwork = Network{network.PublicNetworkPassphrase} - - // TestNetwork is a mutator that configures the transaction for submission - // to the test stellar network (often called testnet). - TestNetwork = Network{network.TestNetworkPassphrase} - - // DefaultNetwork is a mutator that configures the transaction for submission - // to the default stellar network. Integrators may change this value to - // another `Network` mutator if they would like to effect the default in a - // process-global manner. - DefaultNetwork = TestNetwork -) - -// Amount is a mutator capable of setting the amount -type Amount string - -// Asset is struct used in path_payment mutators -type Asset struct { - Code string - Issuer string - Native bool -} - -// ToXdrObject creates xdr.Asset object from build.Asset object -func (a Asset) ToXdrObject() (xdr.Asset, error) { - if a.Native { - return xdr.NewAsset(xdr.AssetTypeAssetTypeNative, nil) - } - - var issuer xdr.AccountId - err := setAccountId(a.Issuer, &issuer) - if err != nil { - return xdr.Asset{}, err - } - - length := len(a.Code) - switch { - case length >= 1 && length <= 4: - var codeArray [4]byte - byteArray := []byte(a.Code) - copy(codeArray[:], byteArray[0:length]) - asset := xdr.AssetAlphaNum4{codeArray, issuer} - return xdr.NewAsset(xdr.AssetTypeAssetTypeCreditAlphanum4, asset) - case length >= 5 && length <= 12: - var codeArray [12]byte - byteArray := []byte(a.Code) - copy(codeArray[:], byteArray[0:length]) - asset := xdr.AssetAlphaNum12{codeArray, issuer} - return xdr.NewAsset(xdr.AssetTypeAssetTypeCreditAlphanum12, asset) - default: - return xdr.Asset{}, errors.New("Asset code length is invalid") - } -} - -// AllowTrustAsset is a mutator capable of setting the asset on -// an operations that have one. -type AllowTrustAsset struct { - Code string -} - -// Authorize is a mutator capable of setting the `authorize` flag -type Authorize struct { - Value bool -} - -// AutoSequence loads the sequence to use for the transaction from an external -// provider. -type AutoSequence struct { - SequenceProvider -} - -// NativeAsset is a helper method to create native Asset object -func NativeAsset() Asset { - return Asset{Native: true} -} - -// CreditAsset is a helper method to create credit Asset object -func CreditAsset(code, issuer string) Asset { - return Asset{code, issuer, false} -} - -// CreditAmount is a mutator that configures a payment to be using credit -// asset and have the amount provided. -type CreditAmount struct { - Code string - Issuer string - Amount string -} - -// Defaults is a mutator that sets defaults -type Defaults struct{} - -// Destination is a mutator capable of setting the destination on -// an operations that have one. -type Destination struct { - AddressOrSeed string -} - -// InflationDest is a mutator capable of setting the inflation destination -type InflationDest string - -// HomeDomain is a mutator capable of setting home domain of the account -type HomeDomain string - -// MemoHash is a mutator that sets a memo on the mutated transaction of type -// MEMO_HASH. -type MemoHash struct { - Value xdr.Hash -} - -// Limit is a mutator that sets a limit on the change_trust operation -type Limit Amount - -// MasterWeight is a mutator that sets account's master weight -type MasterWeight uint32 - -// MaxLimit represents the maximum value that can be passed as trutline Limit -var MaxLimit = Limit(amount.String(math.MaxInt64)) - -// MemoID is a mutator that sets a memo on the mutated transaction of type -// MEMO_ID. -type MemoID struct { - Value uint64 -} - -// MemoReturn is a mutator that sets a memo on the mutated transaction of type -// MEMO_RETURN. -type MemoReturn struct { - Value xdr.Hash -} - -// MemoText is a mutator that sets a memo on the mutated transaction of type -// MEMO_TEXT. -type MemoText struct { - Value string -} - -// NativeAmount is a mutator that configures a payment to be using native -// currency and have the amount provided. -type NativeAmount struct { - Amount string -} - -// OfferID is a mutator that sets offer ID on offer operations -type OfferID uint64 - -// PayWithPath is a mutator that configures a path_payment's send asset and max amount -type PayWithPath struct { - Asset - MaxAmount string - Path []Asset -} - -// Through appends a new asset to the path -func (pathSend PayWithPath) Through(asset Asset) PayWithPath { - pathSend.Path = append(pathSend.Path, asset) - return pathSend -} - -// PayWith is a helper to create PayWithPath struct -func PayWith(sendAsset Asset, maxAmount string) PayWithPath { - return PayWithPath{ - Asset: sendAsset, - MaxAmount: maxAmount, - } -} - -// Price is a mutator that sets price on offer operations -type Price string - -// Rate is a mutator that sets selling/buying asset and price on offer operations -type Rate struct { - Selling Asset - Buying Asset - Price -} - -// Sequence is a mutator that sets the sequence number on a transaction -type Sequence struct { - Sequence uint64 -} - -// SequenceProvider is the interface that other packages may implement to be -// used with the `AutoSequence` mutator. -type SequenceProvider interface { - SequenceForAccount(aid string) (xdr.SequenceNumber, error) -} - -// Sign is a mutator that contributes a signature of the provided envelope's -// transaction with the configured key -type Sign struct { - Seed string -} - -// SetFlag is a mutator capable of setting account flags -type SetFlag int32 - -// ClearFlag is a mutator capable of clearing account flags -type ClearFlag int32 - -// Signer is a mutator capable of adding, updating and deleting account signer -type Signer struct { - PublicKey string - Weight uint32 -} - -// SourceAccount is a mutator capable of setting the source account on -// an xdr.Operation and an xdr.Transaction -type SourceAccount struct { - AddressOrSeed string -} - -// Thresholds is a mutator capable of setting account thresholds -type Thresholds struct { - Low *uint32 - Medium *uint32 - High *uint32 -} - -// Trustor is a mutator capable of setting the trustor on -// allow_trust operation. -type Trustor struct { - Address string -} - -// Network establishes the stellar network that a transaction should apply to. -// This modifier influences how a transaction is hashed for the purposes of signature generation. -type Network struct { - Passphrase string -} - -// ID returns the network ID derived from this struct's Passphrase -func (n *Network) ID() [32]byte { - return network.ID(n.Passphrase) -} diff --git a/build/main_test.go b/build/main_test.go deleted file mode 100644 index 9259ce03b6..0000000000 --- a/build/main_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package build - -import ( - "fmt" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestBuild(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Package: github.com/stellar/go/build") -} - -// ExampleTransactionBuilder creates and signs a simple transaction, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -// -// It uses the transaction builder system -func ExampleTransactionBuilder() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - Payment( - Destination{"GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"}, - NativeAmount{"50"}, - ), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAALSRpLtCLv2eboZlEiHDSGR6Hb+zZL92fbSdNpObeE0EAAAAAAAAAAB3NZQAAAAAAAAAAARtDMfAAAABA2oIeQxoJl53RMRWFeLB865zcky39f2gf2PmUubCuJYccEePRSrTC8QQrMOgGwD8a6oe8dgltvezdDsmmXBPyBw== -} - -// ExamplePathPayment creates and signs a simple transaction with PathPayment operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExamplePathPayment() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - Payment( - Destination{"GBDT3K42LOPSHNAEHEJ6AVPADIJ4MAR64QEKKW2LQPBSKLYD22KUEH4P"}, - CreditAmount{"USD", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA", "50"}, - PayWith(CreditAsset("EUR", "GCPZJ3MJQ3GUGJSBL6R3MLYZS6FKVHG67BPAINMXL3NWNXR5S6XG657P"), "100"). - Through(Asset{Native: true}). - Through(CreditAsset("BTC", "GAHJZHVKFLATAATJH46C7OK2ZOVRD47GZBGQ7P6OCVF6RJDCEG5JMQBQ")), - ), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABRVVSAAAAAACflO2Jhs1DJkFfo7YvGZeKqpze+F4ENZde22bePZeubwAAAAA7msoAAAAAAEc9q5pbnyO0BDkT4FXgGhPGAj7kCKVbS4PDJS8D1pVCAAAAAVVTRAAAAAAALSRpLtCLv2eboZlEiHDSGR6Hb+zZL92fbSdNpObeE0EAAAAAHc1lAAAAAAIAAAAAAAAAAUJUQwAAAAAADpyeqirBMAJpPzwvuVrLqxHz5shND7/OFUvopGIhupYAAAAAAAAAARtDMfAAAABA5xuIJu/KGKQRuDrdkzNsR4HjT6wX464SHZ/yvYwVb/AkAyyfeMLDNhgKbBxQMWc3Uo5fTst1UHldC+jYNeAhCQ== -} - -// ExampleSetOptions creates and signs a simple transaction with SetOptions operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleSetOptions() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - SetOptions( - InflationDest("GCT7S5BA6ZC7SV7GGEMEYJTWOBYTBOA7SC4JEYP7IAEDG7HQNIWKRJ4G"), - SetAuthRequired(), - SetAuthRevocable(), - SetAuthImmutable(), - ClearAuthRequired(), - ClearAuthRevocable(), - ClearAuthImmutable(), - MasterWeight(1), - SetThresholds(2, 3, 4), - HomeDomain("stellar.org"), - AddSigner("GC6DDGPXVWXD5V6XOWJ7VUTDYI7VKPV2RAJWBVBHR47OPV5NASUNHTJW", 5), - ), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAKf5dCD2RflX5jEYTCZ2cHEwuB+QuJJh/0AIM3zwaiyoAAAAAQAAAAcAAAABAAAABwAAAAEAAAABAAAAAQAAAAIAAAABAAAAAwAAAAEAAAAEAAAAAQAAAAtzdGVsbGFyLm9yZwAAAAABAAAAALwxmfetrj7X13WT+tJjwj9VPrqIE2DUJ48+59etBKjTAAAABQAAAAAAAAABG0Mx8AAAAECZF17pOfZcyc7YJXMyx++PMydIvL6g2yZcPDY8h4+tmlz+3rsE6uuX0R6xfgNnuMntvK4YMmaOvp4DvaZMMNoA -} - -// ExampleSetOptionsOperations creates and signs a simple transaction with many SetOptions operations, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleSetOptionsOperations() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - InflationDest("GCT7S5BA6ZC7SV7GGEMEYJTWOBYTBOA7SC4JEYP7IAEDG7HQNIWKRJ4G"), - SetAuthRequired(), - SetAuthRevocable(), - SetAuthImmutable(), - ClearAuthRequired(), - ClearAuthRevocable(), - ClearAuthImmutable(), - MasterWeight(1), - SetThresholds(2, 3, 4), - HomeDomain("stellar.org"), - RemoveSigner("GC6DDGPXVWXD5V6XOWJ7VUTDYI7VKPV2RAJWBVBHR47OPV5NASUNHTJW"), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAETAAAAAAAAAABAAAAAAAAAAAAAAALAAAAAAAAAAUAAAABAAAAAKf5dCD2RflX5jEYTCZ2cHEwuB+QuJJh/0AIM3zwaiyoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAABAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAEAAAADAAAAAQAAAAQAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAALc3RlbGxhci5vcmcAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAALwxmfetrj7X13WT+tJjwj9VPrqIE2DUJ48+59etBKjTAAAAAAAAAAAAAAABG0Mx8AAAAEAOXsLbFo3e8fpqyeZEHGP9o/IrQDQRyof+DA1EeUkvUGbNhy57xXcpMhZpRtwXThWBYx4za4q+TRrnoZQtezgN -} - -// ExampleChangeTrust creates and signs a simple transaction with ChangeTrust operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleChangeTrust() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - Trust("USD", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA", Limit("100.25")), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAAtJGku0Iu/Z5uhmUSIcNIZHodv7Nkv3Z9tJ02k5t4TQQAAAAA7wO+gAAAAAAAAAAEbQzHwAAAAQOIy19X38Y3jcFzvhDsmXu6iDzrzb4iwfS2NAq9GGAFiRJUGoFX85vKtlNcXzQppF4X8oIMNPEb74fuZE/N+GAE= -} - -// ExampleChangeTrustMaxLimit creates and signs a simple transaction with ChangeTrust operation (maximum limit), and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleChangeTrustMaxLimit() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - Trust("USD", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAAtJGku0Iu/Z5uhmUSIcNIZHodv7Nkv3Z9tJ02k5t4TQX//////////AAAAAAAAAAEbQzHwAAAAQJQC6R3RqNaw5rOmaxqpAE0lD5onM/njn9I2RVlhtS2SGi2Z7xm65USYVWXTJFVqTCfTwwu+QXFcOuqgJjVtHAk= -} - -// ExampleRemoveTrust creates and signs a simple transaction with ChangeTrust operation (remove trust), and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleRemoveTrust() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - operationSource := "GCVJCNUHSGKOTBBSXZJ7JJZNOSE2YDNGRLIDPMQDUEQWJQSE6QZSDPNU" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - RemoveTrust( - "USD", - "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA", - SourceAccount{operationSource}, - ), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAQAAAACqkTaHkZTphDK+U/SnLXSJrA2mitA3sgOhIWTCRPQzIQAAAAYAAAABVVNEAAAAAAAtJGku0Iu/Z5uhmUSIcNIZHodv7Nkv3Z9tJ02k5t4TQQAAAAAAAAAAAAAAAAAAAAEbQzHwAAAAQD5FeGBEwJyeauK+WKfcxYBeKw62EtCqvC0p9Z+1cY32fKQ+5Jz9uE1LaDsHW5NurtStKcUTiG5j2qNDf1QpYgw= -} - -// ExampleManageOffer creates and signs a simple transaction with ManageOffer operations, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleManageOffer() { - rate := Rate{ - Selling: NativeAsset(), - Buying: CreditAsset("USD", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"), - Price: Price("125.12"), - } - - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - CreateOffer(rate, "20"), - UpdateOffer(rate, "40", OfferID(2)), - DeleteOffer(rate, OfferID(1)), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAABLAAAAAAAAAABAAAAAAAAAAAAAAADAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALSRpLtCLv2eboZlEiHDSGR6Hb+zZL92fbSdNpObeE0EAAAAAC+vCAAAADDgAAAAZAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAABVVNEAAAAAAAtJGku0Iu/Z5uhmUSIcNIZHodv7Nkv3Z9tJ02k5t4TQQAAAAAX14QAAAAMOAAAABkAAAAAAAAAAgAAAAAAAAADAAAAAAAAAAFVU0QAAAAAAC0kaS7Qi79nm6GZRIhw0hkeh2/s2S/dn20nTaTm3hNBAAAAAAAAAAAAAAw4AAAAGQAAAAAAAAABAAAAAAAAAAEbQzHwAAAAQBfosk+t8qpULHP4ppNX2xVPih8lmnbHFZdeuxSP6pgpCCX05S7zZ4PsjVQY2nOnLru6mBTc1r8So+vxHs3FXAc= -} - -// ExampleCreatePassiveOffer creates and signs a simple transaction with CreatePassiveOffer operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleCreatePassiveOffer() { - rate := Rate{ - Selling: NativeAsset(), - Buying: CreditAsset("USD", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"), - Price: Price("125.12"), - } - - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - CreatePassiveOffer(rate, "20"), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAQAAAAAAAAAAVVTRAAAAAAALSRpLtCLv2eboZlEiHDSGR6Hb+zZL92fbSdNpObeE0EAAAAAC+vCAAAADDgAAAAZAAAAAAAAAAEbQzHwAAAAQHv/1xLn+ArfIUoWjn3V0zVka6tulqMYx4zJZhGqdmTw8iCXY0ZtHS+y+7YGgR3vM1DpKOdvWTmhee+sCXIppQA= -} - -// ExampleAccountMerge creates and signs a simple transaction with AccountMerge operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleAccountMerge() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - AccountMerge( - Destination{"GBDT3K42LOPSHNAEHEJ6AVPADIJ4MAR64QEKKW2LQPBSKLYD22KUEH4P"}, - ), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAARz2rmlufI7QEORPgVeAaE8YCPuQIpVtLg8MlLwPWlUIAAAAAAAAAARtDMfAAAABAh3qZrP5T9Xg0LdzwOLx/eA/B7bzj+8j+s9eXNuu7/Ldch7I6kW5iYz6Vfy32FVnKNtoykToB7nQY2o2vo1tqAw== -} - -// ExampleInflation creates and signs a simple transaction with Inflation operation, and then -// encodes it into a base64 string capable of being submitted to stellar-core. -func ExampleInflation() { - seed := "SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H" - tx := Transaction( - SourceAccount{seed}, - Sequence{1}, - Inflation(), - ) - - txe := tx.Sign(seed) - txeB64, _ := txe.Base64() - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAADZY/nWY0gx6beMpf4S8Ur0qHsjA8fbFtBzBx1cbQzHwAAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAARtDMfAAAABAzzDG4V7KzynWY0ER/V4HH0WgDvl3hrIizDcKW3qEQY4Ib3yXufVvdbzsET/Dj5js5dgDkcYgikHwRCpqi/J8BQ== -} diff --git a/build/manage_data.go b/build/manage_data.go deleted file mode 100644 index ba61d0210a..0000000000 --- a/build/manage_data.go +++ /dev/null @@ -1,75 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/xdr" -) - -// ClearData removes a key/value pair associated with the source account -func ClearData(name string, muts ...interface{}) (result ManageDataBuilder) { - result.MD.DataName = xdr.String64(name) - result.MD.DataValue = nil - result.validateName() - result.Mutate(muts...) - return -} - -// SetData sets a key/value pair associated with the source account, updating it -// if one already exists. -func SetData(name string, value []byte, muts ...interface{}) (result ManageDataBuilder) { - result.MD.DataName = xdr.String64(name) - v := xdr.DataValue(value) - result.MD.DataValue = &v - result.validateName() - result.validateValue() - result.Mutate(muts...) - return -} - -// ManageDataBuilder helps to build ManageDataOp structs. -type ManageDataBuilder struct { - O xdr.Operation - MD xdr.ManageDataOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *ManageDataBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -func (b *ManageDataBuilder) validateName() { - if len(b.MD.DataName) > 64 { - b.Err = errors.New("Name too long: must be less than 64 bytes") - return - } - - if b.MD.DataName == "" { - b.Err = errors.New("Invalid name: empty string") - return - } -} - -func (b *ManageDataBuilder) validateValue() { - if *b.MD.DataValue == nil { - b.Err = errors.New("Invalid value: cannot set a nil value") - } - - if len(*b.MD.DataValue) > 64 { - b.Err = errors.New("Value too long: must be less than 64 bytes") - } -} diff --git a/build/manage_data_test.go b/build/manage_data_test.go deleted file mode 100644 index 04161d9543..0000000000 --- a/build/manage_data_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package build - -import ( - "strings" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("ClearData", func() { - var ( - subject ManageDataBuilder - name string - ) - - JustBeforeEach(func() { - subject = ClearData(name) - }) - - Context("Valid name", func() { - BeforeEach(func() { - name = "my data" - }) - - It("succeeds", func() { - Expect(subject.Err).ToNot(HaveOccurred()) - Expect(subject.MD.DataName).To(Equal(xdr.String64("my data"))) - Expect(subject.MD.DataValue).To(BeNil()) - }) - }) - - Context("Long key", func() { - BeforeEach(func() { name = strings.Repeat("a", 65) }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("empty key", func() { - BeforeEach(func() { name = "" }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) -}) - -var _ = Describe("SetData", func() { - var ( - subject ManageDataBuilder - name string - value []byte - ) - - JustBeforeEach(func() { - subject = SetData(name, value) - }) - - Context("Valid name and value", func() { - BeforeEach(func() { - name = "my data" - value = []byte{0xFF, 0xFF} - }) - - It("succeeds", func() { - Expect(subject.Err).ToNot(HaveOccurred()) - Expect(subject.MD.DataName).To(Equal(xdr.String64("my data"))) - Expect(*subject.MD.DataValue).To(Equal(xdr.DataValue([]byte{0xFF, 0xFF}))) - }) - }) - - Context("empty value", func() { - BeforeEach(func() { - name = "some name" - value = []byte{} - }) - - It("succeeds", func() { - Expect(subject.Err).ToNot(HaveOccurred()) - Expect(subject.MD.DataName).To(Equal(xdr.String64("some name"))) - }) - }) - - Context("Long key", func() { - BeforeEach(func() { - name = strings.Repeat("a", 65) - value = []byte{} - }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("empty key", func() { - BeforeEach(func() { - name = "" - value = []byte{} - }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("nil value", func() { - BeforeEach(func() { - name = "some name" - value = nil - }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("Long value", func() { - BeforeEach(func() { - name = "some name" - value = []byte(strings.Repeat("a", 65)) - }) - - It("errors", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) -}) - -var _ = Describe("ManageData Mutators", func() { - - var ( - subject ManageDataBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = ManageDataBuilder{} - subject.Mutate(mut) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) -}) diff --git a/build/manage_offer.go b/build/manage_offer.go deleted file mode 100644 index d71beab5f3..0000000000 --- a/build/manage_offer.go +++ /dev/null @@ -1,133 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/amount" - "github.com/stellar/go/price" - "github.com/stellar/go/xdr" -) - -// CreateOffer creates a new offer -func CreateOffer(rate Rate, amount Amount) (result ManageOfferBuilder) { - return ManageOffer(false, rate, amount) -} - -// CreatePassiveOffer creates a new passive offer -func CreatePassiveOffer(rate Rate, amount Amount) (result ManageOfferBuilder) { - return ManageOffer(true, rate, amount) -} - -// UpdateOffer updates an existing offer -func UpdateOffer(rate Rate, amount Amount, offerID OfferID) (result ManageOfferBuilder) { - return ManageOffer(false, rate, amount, offerID) -} - -// DeleteOffer deletes an existing offer -func DeleteOffer(rate Rate, offerID OfferID) (result ManageOfferBuilder) { - return ManageOffer(false, rate, Amount("0"), offerID) -} - -// ManageOffer groups the creation of a new ManageOfferBuilder with a call to Mutate. -func ManageOffer(passiveOffer bool, muts ...interface{}) (result ManageOfferBuilder) { - result.PassiveOffer = passiveOffer - result.Mutate(muts...) - return -} - -// ManageOfferMutator is a interface that wraps the -// MutateManageOffer operation. types may implement this interface to -// specify how they modify an xdr.ManageOfferOp object -type ManageOfferMutator interface { - MutateManageOffer(interface{}) error -} - -// ManageOfferBuilder represents a transaction that is being built. -type ManageOfferBuilder struct { - PassiveOffer bool - O xdr.Operation - MO xdr.ManageOfferOp - PO xdr.CreatePassiveOfferOp - Err error -} - -// Mutate applies the provided mutators to this builder's offer or operation. -func (b *ManageOfferBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case ManageOfferMutator: - if b.PassiveOffer { - err = mut.MutateManageOffer(&b.PO) - } else { - err = mut.MutateManageOffer(&b.MO) - } - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateManageOffer for Amount sets the ManageOfferOp's Amount field -func (m Amount) MutateManageOffer(o interface{}) (err error) { - switch o := o.(type) { - default: - err = errors.New("Unexpected operation type") - case *xdr.ManageOfferOp: - o.Amount, err = amount.Parse(string(m)) - case *xdr.CreatePassiveOfferOp: - o.Amount, err = amount.Parse(string(m)) - } - return -} - -// MutateManageOffer for OfferID sets the ManageOfferOp's OfferID field -func (m OfferID) MutateManageOffer(o interface{}) (err error) { - switch o := o.(type) { - default: - err = errors.New("Unexpected operation type") - case *xdr.ManageOfferOp: - o.OfferId = xdr.Uint64(m) - } - return -} - -// MutateManageOffer for Rate sets the ManageOfferOp's selling, buying and price fields -func (m Rate) MutateManageOffer(o interface{}) (err error) { - switch o := o.(type) { - default: - err = errors.New("Unexpected operation type") - case *xdr.ManageOfferOp: - o.Selling, err = m.Selling.ToXdrObject() - if err != nil { - return - } - - o.Buying, err = m.Buying.ToXdrObject() - if err != nil { - return - } - - o.Price, err = price.Parse(string(m.Price)) - case *xdr.CreatePassiveOfferOp: - o.Selling, err = m.Selling.ToXdrObject() - if err != nil { - return - } - - o.Buying, err = m.Buying.ToXdrObject() - if err != nil { - return - } - - o.Price, err = price.Parse(string(m.Price)) - } - return -} diff --git a/build/manage_offer_test.go b/build/manage_offer_test.go deleted file mode 100644 index cc14b1a307..0000000000 --- a/build/manage_offer_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("ManageOffer", func() { - - Describe("ManageOfferBuilder", func() { - var ( - subject ManageOfferBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - - rate = Rate{ - Selling: CreditAsset("EUR", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"), - Buying: NativeAsset(), - Price: Price("41.265"), - } - ) - - JustBeforeEach(func() { - subject = ManageOfferBuilder{} - subject.Mutate(mut) - }) - - Describe("CreateOffer", func() { - Context("creates offer properly", func() { - It("sets values properly", func() { - builder := CreateOffer(rate, "20") - - Expect(builder.MO.Amount).To(Equal(xdr.Int64(200000000))) - - Expect(builder.MO.Selling.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(builder.MO.Selling.AlphaNum4.AssetCode).To(Equal([4]byte{'E', 'U', 'R', 0})) - var aid xdr.AccountId - aid.SetAddress(rate.Selling.Issuer) - Expect(builder.MO.Selling.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(builder.MO.Selling.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Buying.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(builder.MO.Buying.AlphaNum4).To(BeNil()) - Expect(builder.MO.Buying.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Price.N).To(Equal(xdr.Int32(8253))) - Expect(builder.MO.Price.D).To(Equal(xdr.Int32(200))) - - Expect(builder.MO.OfferId).To(Equal(xdr.Uint64(0))) - }) - }) - }) - - Describe("UpdateOffer", func() { - Context("updates the offer properly", func() { - It("sets values properly", func() { - builder := UpdateOffer(rate, "100", 5) - - Expect(builder.MO.Amount).To(Equal(xdr.Int64(1000000000))) - - Expect(builder.MO.Selling.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(builder.MO.Selling.AlphaNum4.AssetCode).To(Equal([4]byte{'E', 'U', 'R', 0})) - var aid xdr.AccountId - aid.SetAddress(rate.Selling.Issuer) - Expect(builder.MO.Selling.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(builder.MO.Selling.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Buying.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(builder.MO.Buying.AlphaNum4).To(BeNil()) - Expect(builder.MO.Buying.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Price.N).To(Equal(xdr.Int32(8253))) - Expect(builder.MO.Price.D).To(Equal(xdr.Int32(200))) - - Expect(builder.MO.OfferId).To(Equal(xdr.Uint64(5))) - }) - }) - }) - - Describe("DeleteOffer", func() { - Context("deletes the offer properly", func() { - It("sets values properly", func() { - builder := DeleteOffer(rate, 10) - - Expect(builder.MO.Amount).To(Equal(xdr.Int64(0))) - - Expect(builder.MO.Selling.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(builder.MO.Selling.AlphaNum4.AssetCode).To(Equal([4]byte{'E', 'U', 'R', 0})) - var aid xdr.AccountId - aid.SetAddress(rate.Selling.Issuer) - Expect(builder.MO.Selling.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(builder.MO.Selling.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Buying.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(builder.MO.Buying.AlphaNum4).To(BeNil()) - Expect(builder.MO.Buying.AlphaNum12).To(BeNil()) - - Expect(builder.MO.Price.N).To(Equal(xdr.Int32(8253))) - Expect(builder.MO.Price.D).To(Equal(xdr.Int32(200))) - - Expect(builder.MO.OfferId).To(Equal(xdr.Uint64(10))) - }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - }) - - Describe("CreatePassiveOfferBuilder", func() { - var ( - subject ManageOfferBuilder - mut interface{} - - rate = Rate{ - Selling: CreditAsset("EUR", "GAWSI2JO2CF36Z43UGMUJCDQ2IMR5B3P5TMS7XM7NUTU3JHG3YJUDQXA"), - Buying: NativeAsset(), - Price: Price("41.265"), - } - ) - - JustBeforeEach(func() { - subject = ManageOfferBuilder{} - subject.Mutate(mut) - }) - - Describe("CreatePassiveOffer", func() { - Context("creates offer properly", func() { - It("sets values properly", func() { - builder := CreatePassiveOffer(rate, "20") - - Expect(builder.PO.Amount).To(Equal(xdr.Int64(200000000))) - - Expect(builder.PO.Selling.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(builder.PO.Selling.AlphaNum4.AssetCode).To(Equal([4]byte{'E', 'U', 'R', 0})) - var aid xdr.AccountId - aid.SetAddress(rate.Selling.Issuer) - Expect(builder.PO.Selling.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(builder.PO.Selling.AlphaNum12).To(BeNil()) - - Expect(builder.PO.Buying.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(builder.PO.Buying.AlphaNum4).To(BeNil()) - Expect(builder.PO.Buying.AlphaNum12).To(BeNil()) - - Expect(builder.PO.Price.N).To(Equal(xdr.Int32(8253))) - Expect(builder.PO.Price.D).To(Equal(xdr.Int32(200))) - }) - }) - }) - }) -}) diff --git a/build/operation.go b/build/operation.go deleted file mode 100644 index 672b0f26c3..0000000000 --- a/build/operation.go +++ /dev/null @@ -1,19 +0,0 @@ -package build - -import ( - "github.com/stellar/go/xdr" -) - -// OperationMutator is a interface that wraps the MutateOperation operation. -// types may implement this interface to specify how they modify an -// xdr.Operation object -type OperationMutator interface { - MutateOperation(*xdr.Operation) error -} - -// MutateOperation for SourceAccount sets the operation's SourceAccount -// to the pubilic key for the address provided -func (m SourceAccount) MutateOperation(o *xdr.Operation) error { - o.SourceAccount = &xdr.AccountId{} - return setAccountId(m.AddressOrSeed, o.SourceAccount) -} diff --git a/build/payment.go b/build/payment.go deleted file mode 100644 index 364a0a7af5..0000000000 --- a/build/payment.go +++ /dev/null @@ -1,156 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/amount" - "github.com/stellar/go/xdr" -) - -// Payment groups the creation of a new PaymentBuilder with a call to Mutate. -func Payment(muts ...interface{}) (result PaymentBuilder) { - result.Mutate(muts...) - return -} - -// PaymentMutator is a interface that wraps the -// MutatePayment operation. types may implement this interface to -// specify how they modify an xdr.PaymentOp object -type PaymentMutator interface { - MutatePayment(interface{}) error -} - -// PaymentBuilder represents a transaction that is being built. -type PaymentBuilder struct { - PathPayment bool - O xdr.Operation - P xdr.PaymentOp - PP xdr.PathPaymentOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *PaymentBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - if _, ok := m.(PayWithPath); ok { - b.PathPayment = true - break - } - } - - for _, m := range muts { - var err error - switch mut := m.(type) { - case PaymentMutator: - if b.PathPayment { - err = mut.MutatePayment(&b.PP) - } else { - err = mut.MutatePayment(&b.P) - } - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutatePayment for Asset sets the PaymentOp's Asset field -func (m CreditAmount) MutatePayment(o interface{}) (err error) { - switch o := o.(type) { - default: - err = errors.New("Unexpected operation type") - case *xdr.PaymentOp: - o.Amount, err = amount.Parse(m.Amount) - if err != nil { - return - } - - o.Asset, err = createAlphaNumAsset(m.Code, m.Issuer) - case *xdr.PathPaymentOp: - o.DestAmount, err = amount.Parse(m.Amount) - if err != nil { - return - } - - o.DestAsset, err = createAlphaNumAsset(m.Code, m.Issuer) - } - return -} - -// MutatePayment for Destination sets the PaymentOp's Destination field -func (m Destination) MutatePayment(o interface{}) error { - switch o := o.(type) { - default: - return errors.New("Unexpected operation type") - case *xdr.PaymentOp: - return setAccountId(m.AddressOrSeed, &o.Destination) - case *xdr.PathPaymentOp: - return setAccountId(m.AddressOrSeed, &o.Destination) - } - return nil -} - -// MutatePayment for NativeAmount sets the PaymentOp's currency field to -// native and sets its amount to the provided integer -func (m NativeAmount) MutatePayment(o interface{}) (err error) { - switch o := o.(type) { - default: - err = errors.New("Unexpected operation type") - case *xdr.PaymentOp: - o.Amount, err = amount.Parse(m.Amount) - if err != nil { - return - } - - o.Asset, err = xdr.NewAsset(xdr.AssetTypeAssetTypeNative, nil) - case *xdr.PathPaymentOp: - o.DestAmount, err = amount.Parse(m.Amount) - if err != nil { - return - } - - o.DestAsset, err = xdr.NewAsset(xdr.AssetTypeAssetTypeNative, nil) - } - return -} - -// MutatePayment for PayWithPath sets the PathPaymentOp's SendAsset, -// SendMax and Path fields -func (m PayWithPath) MutatePayment(o interface{}) (err error) { - var pathPaymentOp *xdr.PathPaymentOp - var ok bool - if pathPaymentOp, ok = o.(*xdr.PathPaymentOp); !ok { - return errors.New("Unexpected operation type") - } - - // MaxAmount - pathPaymentOp.SendMax, err = amount.Parse(m.MaxAmount) - if err != nil { - return - } - - // Path - var path []xdr.Asset - var xdrAsset xdr.Asset - - for _, asset := range m.Path { - xdrAsset, err = asset.ToXdrObject() - if err != nil { - return err - } - - path = append(path, xdrAsset) - } - - pathPaymentOp.Path = path - - // Asset - pathPaymentOp.SendAsset, err = m.Asset.ToXdrObject() - return -} diff --git a/build/payment_test.go b/build/payment_test.go deleted file mode 100644 index c169948fcc..0000000000 --- a/build/payment_test.go +++ /dev/null @@ -1,284 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("Payment Mutators", func() { - - var ( - subject PaymentBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - Describe("Payment", func() { - JustBeforeEach(func() { - subject = PaymentBuilder{} - subject.Mutate(mut) - }) - - Describe("CreditAmount", func() { - Context("AlphaNum4", func() { - BeforeEach(func() { - mut = CreditAmount{"USD", address, "50.0"} - }) - It("sets the asset properly", func() { - Expect(subject.P.Amount).To(Equal(xdr.Int64(500000000))) - Expect(subject.P.Asset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(subject.P.Asset.AlphaNum4.AssetCode).To(Equal([4]byte{'U', 'S', 'D', 0})) - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.P.Asset.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(subject.P.Asset.AlphaNum12).To(BeNil()) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - - Context("AlphaNum12", func() { - BeforeEach(func() { - mut = CreditAmount{"ABCDEF", address, "50.0"} - }) - It("sets the asset properly", func() { - Expect(subject.P.Amount).To(Equal(xdr.Int64(500000000))) - Expect(subject.P.Asset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum12)) - Expect(subject.P.Asset.AlphaNum4).To(BeNil()) - Expect(subject.P.Asset.AlphaNum12.AssetCode).To(Equal([12]byte{'A', 'B', 'C', 'D', 'E', 'F', 0, 0, 0, 0, 0, 0})) - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.P.Asset.AlphaNum12.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - - Context("issuer invalid", func() { - BeforeEach(func() { - mut = CreditAmount{"USD", bad, "50.0"} - }) - - It("failed", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("amount invalid", func() { - BeforeEach(func() { - mut = CreditAmount{"ABCDEF", address, "test"} - }) - - It("failed", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("asset code length invalid", func() { - Context("empty", func() { - BeforeEach(func() { - mut = CreditAmount{"", address, "50.0"} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - - Context("too long", func() { - BeforeEach(func() { - mut = CreditAmount{"1234567890123", address, "50.0"} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - }) - }) - - Describe("Destination", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Destination{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.P.Destination.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = Destination{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("NativeAmount", func() { - BeforeEach(func() { mut = NativeAmount{"101"} }) - It("sets the starting balance properly", func() { - Expect(subject.P.Asset.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(subject.P.Amount).To(Equal(xdr.Int64(1010000000))) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - }) - - Describe("PathPayment", func() { - JustBeforeEach(func() { - subject = PaymentBuilder{} - subject.Mutate(PayWith(CreditAsset("EUR", "GCPZJ3MJQ3GUGJSBL6R3MLYZS6FKVHG67BPAINMXL3NWNXR5S6XG657P"), "100"). - Through(NativeAsset()). - Through(CreditAsset("BTC", "GAHJZHVKFLATAATJH46C7OK2ZOVRD47GZBGQ7P6OCVF6RJDCEG5JMQBQ"))) - subject.Mutate(mut) - }) - - Describe("Destination", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Destination{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.PP.Destination.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = Destination{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("Destination: Asset and Amount", func() { - Context("native", func() { - BeforeEach(func() { - mut = NativeAmount{"50"} - }) - It("sets the fields properly", func() { - Expect(subject.PP.DestAmount).To(Equal(xdr.Int64(500000000))) - Expect(subject.PP.DestAsset.Type).To(Equal(xdr.AssetTypeAssetTypeNative)) - Expect(subject.PP.DestAsset.AlphaNum4).To(BeNil()) - Expect(subject.PP.DestAsset.AlphaNum12).To(BeNil()) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - - Context("AlphaNum4", func() { - BeforeEach(func() { - mut = CreditAmount{"USD", address, "50"} - }) - It("sets the asset properly", func() { - Expect(subject.PP.DestAmount).To(Equal(xdr.Int64(500000000))) - Expect(subject.PP.DestAsset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum4)) - Expect(subject.PP.DestAsset.AlphaNum4.AssetCode).To(Equal([4]byte{'U', 'S', 'D', 0})) - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.PP.DestAsset.AlphaNum4.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(subject.PP.DestAsset.AlphaNum12).To(BeNil()) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - - Context("AlphaNum12", func() { - BeforeEach(func() { - mut = CreditAmount{"ABCDEF", address, "50"} - }) - It("sets the asset properly", func() { - Expect(subject.PP.DestAmount).To(Equal(xdr.Int64(500000000))) - Expect(subject.PP.DestAsset.Type).To(Equal(xdr.AssetTypeAssetTypeCreditAlphanum12)) - Expect(subject.PP.DestAsset.AlphaNum4).To(BeNil()) - Expect(subject.PP.DestAsset.AlphaNum12.AssetCode).To(Equal([12]byte{'A', 'B', 'C', 'D', 'E', 'F', 0, 0, 0, 0, 0, 0})) - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.PP.DestAsset.AlphaNum12.Issuer.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - }) - - Context("issuer invalid", func() { - BeforeEach(func() { - mut = CreditAmount{"ABCDEF", bad, "50"} - }) - - It("failed", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("amount invalid", func() { - BeforeEach(func() { - mut = CreditAmount{"ABCDEF", address, "test"} - }) - - It("failed", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - - Context("asset code length invalid", func() { - Context("empty", func() { - BeforeEach(func() { - mut = CreditAmount{"", address, "50.0"} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - - Context("too long", func() { - BeforeEach(func() { - mut = CreditAmount{"1234567890123", address, "50.0"} - }) - - It("failed", func() { - Expect(subject.Err).To(MatchError("Asset code length is invalid")) - }) - }) - }) - }) - }) -}) diff --git a/build/set_options.go b/build/set_options.go deleted file mode 100644 index 47df8d602b..0000000000 --- a/build/set_options.go +++ /dev/null @@ -1,250 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/xdr" -) - -// SetOptions groups the creation of a new SetOptions with a call to Mutate. -func SetOptions(muts ...interface{}) (result SetOptionsBuilder) { - result.Mutate(muts...) - return -} - -// SetOptionsMutator is a interface that wraps the -// MutateSetOptions operation. types may implement this interface to -// specify how they modify an xdr.SetOptionsOp object -type SetOptionsMutator interface { - MutateSetOptions(*xdr.SetOptionsOp) error -} - -// SetOptionsBuilder represents a transaction that is being built. -type SetOptionsBuilder struct { - O xdr.Operation - SO xdr.SetOptionsOp - Err error -} - -// Mutate applies the provided mutators to this builder's payment or operation. -func (b *SetOptionsBuilder) Mutate(muts ...interface{}) { - for _, m := range muts { - var err error - switch mut := m.(type) { - case SetOptionsMutator: - err = mut.MutateSetOptions(&b.SO) - case OperationMutator: - err = mut.MutateOperation(&b.O) - default: - err = errors.New("Mutator type not allowed") - } - - if err != nil { - b.Err = err - return - } - } -} - -// MutateSetOptions for HomeDomain sets the SetOptionsOp's HomeDomain field -func (m HomeDomain) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - if len(m) > 32 { - return errors.New("HomeDomain is too long") - } - - value := xdr.String32(m) - o.HomeDomain = &value - return -} - -// MutateTransaction for HomeDomain allows creating an operation using a single mutator -func (m HomeDomain) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// MutateSetOptions for InflationDest sets the SetOptionsOp's InflationDest field -func (m InflationDest) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - o.InflationDest = &xdr.AccountId{} - err = setAccountId(string(m), o.InflationDest) - return -} - -// MutateTransaction for InflationDest allows creating an operation using a single mutator -func (m InflationDest) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// MutateSetOptions for MasterWeight sets the SetOptionsOp's MasterWeight field -func (m MasterWeight) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - val := xdr.Uint32(m) - o.MasterWeight = &val - return -} - -// MutateTransaction for MasterWeight allows creating an operation using a single mutator -func (m MasterWeight) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// AddSigner creates Signer mutator that sets account's signer -func AddSigner(publicKey string, weight uint32) Signer { - return Signer{publicKey, weight} -} - -// RemoveSigner creates Signer mutator that removes account's signer -func RemoveSigner(publicKey string) Signer { - return Signer{publicKey, 0} -} - -// MutateSetOptions for Signer sets the SetOptionsOp's signer field -func (m Signer) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - var signer xdr.Signer - signer.Weight = xdr.Uint32(m.Weight) - err = setAccountId(m.PublicKey, &signer.PubKey) - o.Signer = &signer - return -} - -// MutateTransaction for Signer allows creating an operation using a single mutator -func (m Signer) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// SetThresholds creates Thresholds mutator -func SetThresholds(low, medium, high uint32) Thresholds { - return Thresholds{ - Low: &low, - Medium: &medium, - High: &high, - } -} - -// SetLowThreshold creates Thresholds mutator that sets account's low threshold -func SetLowThreshold(value uint32) Thresholds { - return Thresholds{Low: &value} -} - -// SetMediumThreshold creates Thresholds mutator that sets account's medium threshold -func SetMediumThreshold(value uint32) Thresholds { - return Thresholds{Medium: &value} -} - -// SetHighThreshold creates Thresholds mutator that sets account's high threshold -func SetHighThreshold(value uint32) Thresholds { - return Thresholds{High: &value} -} - -// MutateSetOptions for Thresholds sets the SetOptionsOp's thresholds fields -func (m Thresholds) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - if m.Low != nil { - val := xdr.Uint32(*m.Low) - o.LowThreshold = &val - } - - if m.Medium != nil { - val := xdr.Uint32(*m.Medium) - o.MedThreshold = &val - } - - if m.High != nil { - val := xdr.Uint32(*m.High) - o.HighThreshold = &val - } - - return -} - -// MutateTransaction for Thresholds allows creating an operation using a single mutator -func (m Thresholds) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// SetAuthRequired sets AuthRequiredFlag on SetOptions operation -func SetAuthRequired() SetFlag { - return SetFlag(xdr.AccountFlagsAuthRequiredFlag) -} - -// SetAuthRevocable sets AuthRevocableFlag on SetOptions operation -func SetAuthRevocable() SetFlag { - return SetFlag(xdr.AccountFlagsAuthRevocableFlag) -} - -// SetAuthImmutable sets AuthImmutableFlag on SetOptions operation -func SetAuthImmutable() SetFlag { - return SetFlag(xdr.AccountFlagsAuthImmutableFlag) -} - -// MutateSetOptions for SetFlag sets the SetOptionsOp's SetFlags field -func (m SetFlag) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - if !isFlagValid(xdr.AccountFlags(m)) { - return errors.New("Unknown flag in SetFlag mutator") - } - - var val xdr.Uint32 - if o.SetFlags == nil { - val = xdr.Uint32(m) - } else { - val = xdr.Uint32(m) | *o.SetFlags - } - o.SetFlags = &val - return -} - -// MutateTransaction for SetFlag allows creating an operation using a single mutator -func (m SetFlag) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -// ClearAuthRequired clears AuthRequiredFlag on SetOptions operation -func ClearAuthRequired() ClearFlag { - return ClearFlag(xdr.AccountFlagsAuthRequiredFlag) -} - -// ClearAuthRevocable clears AuthRevocableFlag on SetOptions operation -func ClearAuthRevocable() ClearFlag { - return ClearFlag(xdr.AccountFlagsAuthRevocableFlag) -} - -// ClearAuthImmutable clears AuthImmutableFlag on SetOptions operation -func ClearAuthImmutable() ClearFlag { - return ClearFlag(xdr.AccountFlagsAuthImmutableFlag) -} - -// MutateSetOptions for ClearFlag sets the SetOptionsOp's ClearFlags field -func (m ClearFlag) MutateSetOptions(o *xdr.SetOptionsOp) (err error) { - if !isFlagValid(xdr.AccountFlags(m)) { - return errors.New("Unknown flag in SetFlag mutator") - } - - var val xdr.Uint32 - if o.ClearFlags == nil { - val = xdr.Uint32(m) - } else { - val = xdr.Uint32(m) | *o.ClearFlags - } - o.ClearFlags = &val - return -} - -// MutateTransaction for ClearFlag allows creating an operation using a single mutator -func (m ClearFlag) MutateTransaction(t *TransactionBuilder) error { - return mutateTransactionBuilder(t, m) -} - -func isFlagValid(flag xdr.AccountFlags) bool { - if flag != xdr.AccountFlagsAuthRequiredFlag && - flag != xdr.AccountFlagsAuthRevocableFlag && - flag != xdr.AccountFlagsAuthImmutableFlag { - return false - } - return true -} - -func mutateTransactionBuilder(t *TransactionBuilder, m SetOptionsMutator) error { - builder := SetOptions(m) - if builder.Err != nil { - return builder.Err - } - t.Mutate(builder) - return nil -} diff --git a/build/set_options_test.go b/build/set_options_test.go deleted file mode 100644 index 3bb17db05a..0000000000 --- a/build/set_options_test.go +++ /dev/null @@ -1,179 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("SetOptionsBuilder Mutators", func() { - - var ( - subject SetOptionsBuilder - mut interface{} - - address = "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - bad = "foo" - ) - - JustBeforeEach(func() { - subject = SetOptionsBuilder{} - subject.Mutate(mut) - }) - - Describe("InflationDest", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = InflationDest(address) }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.SO.InflationDest.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = InflationDest(bad) }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("Signer", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = Signer{address, 5} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the values", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.SO.Signer.PubKey.MustEd25519()).To(Equal(aid.MustEd25519())) - Expect(subject.SO.Signer.Weight).To(Equal(xdr.Uint32(5))) - }) - }) - - Context("using an invalid PubKey", func() { - BeforeEach(func() { mut = Signer{bad, 5} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("HomeDomain", func() { - Context("using a valid value", func() { - BeforeEach(func() { mut = HomeDomain("stellar.org") }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the HomeDomain to correct value", func() { - Expect(*subject.SO.HomeDomain).To(Equal(xdr.String32("stellar.org"))) - }) - }) - - Context("value too long", func() { - BeforeEach(func() { mut = HomeDomain("123456789012345678901234567890123") }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("SetFlag", func() { - Context("using a valid account flag", func() { - BeforeEach(func() { mut = SetFlag(1) }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the flag to the correct value", func() { - Expect(*subject.SO.SetFlags).To(Equal(xdr.Uint32(1))) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SetFlag(3) }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("ClearFlag", func() { - Context("using a valid account flag", func() { - BeforeEach(func() { mut = ClearFlag(1) }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the flag to the correct value", func() { - Expect(*subject.SO.ClearFlags).To(Equal(xdr.Uint32(1))) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = ClearFlag(3) }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("MasterWeight", func() { - Context("using a valid weight", func() { - BeforeEach(func() { mut = MasterWeight(1) }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the weight to the correct value", func() { - Expect(*subject.SO.MasterWeight).To(Equal(xdr.Uint32(1))) - }) - }) - }) - - Describe("Thresholds", func() { - Context("using a valid weight", func() { - BeforeEach(func() { - low := uint32(1) - med := uint32(2) - high := uint32(3) - mut = Thresholds{&low, &med, &high} - }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the thresholds to the correct value", func() { - Expect(*subject.SO.LowThreshold).To(Equal(xdr.Uint32(1))) - Expect(*subject.SO.MedThreshold).To(Equal(xdr.Uint32(2))) - Expect(*subject.SO.HighThreshold).To(Equal(xdr.Uint32(3))) - }) - }) - }) - - Describe("SourceAccount", func() { - Context("using a valid stellar address", func() { - BeforeEach(func() { mut = SourceAccount{address} }) - - It("succeeds", func() { - Expect(subject.Err).NotTo(HaveOccurred()) - }) - - It("sets the destination to the correct xdr.AccountId", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.O.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("using an invalid value", func() { - BeforeEach(func() { mut = SourceAccount{bad} }) - It("failed", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) -}) diff --git a/build/testing.go b/build/testing.go deleted file mode 100644 index 7edfdd2b90..0000000000 --- a/build/testing.go +++ /dev/null @@ -1,28 +0,0 @@ -package build - -import ( - "fmt" - - "github.com/stellar/go/xdr" -) - -// MockSequenceProvider is a mock sequence provider. -type MockSequenceProvider struct { - Data map[string]xdr.SequenceNumber -} - -var _ SequenceProvider = &MockSequenceProvider{} - -// SequenceForAccount implements `SequenceProvider` -func (sp *MockSequenceProvider) SequenceForAccount( - accountID string, -) (xdr.SequenceNumber, error) { - - ret, ok := sp.Data[accountID] - - if !ok { - return 0, fmt.Errorf("No sequence for %s in mock", accountID) - } - - return ret, nil -} diff --git a/build/transaction.go b/build/transaction.go deleted file mode 100644 index ac8ee78202..0000000000 --- a/build/transaction.go +++ /dev/null @@ -1,305 +0,0 @@ -package build - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - - "github.com/stellar/go/hash" - "github.com/stellar/go/xdr" -) - -// Transaction groups the creation of a new TransactionBuilder with a call -// to Mutate. -func Transaction(muts ...TransactionMutator) (result *TransactionBuilder) { - result = &TransactionBuilder{} - result.Mutate(muts...) - result.Mutate(Defaults{}) - return -} - -// TransactionMutator is a interface that wraps the -// MutateTransaction operation. types may implement this interface to -// specify how they modify an xdr.Transaction object -type TransactionMutator interface { - MutateTransaction(*TransactionBuilder) error -} - -// TransactionBuilder represents a Transaction that is being constructed. -type TransactionBuilder struct { - TX *xdr.Transaction - NetworkID [32]byte - Err error -} - -// Mutate applies the provided TransactionMutators to this builder's transaction -func (b *TransactionBuilder) Mutate(muts ...TransactionMutator) { - if b.TX == nil { - b.TX = &xdr.Transaction{} - } - - for _, m := range muts { - err := m.MutateTransaction(b) - if err != nil { - b.Err = err - return - } - } -} - -// Hash returns the hash of this builder's transaction. -func (b *TransactionBuilder) Hash() ([32]byte, error) { - var txBytes bytes.Buffer - - _, err := fmt.Fprintf(&txBytes, "%s", b.NetworkID) - if err != nil { - return [32]byte{}, err - } - - _, err = xdr.Marshal(&txBytes, xdr.EnvelopeTypeEnvelopeTypeTx) - if err != nil { - return [32]byte{}, err - } - - _, err = xdr.Marshal(&txBytes, b.TX) - if err != nil { - return [32]byte{}, err - } - - return hash.Hash(txBytes.Bytes()), nil -} - -// HashHex returns the hex-encoded hash of this builder's transaction -func (b *TransactionBuilder) HashHex() (string, error) { - hash, err := b.Hash() - if err != nil { - return "", err - } - - return hex.EncodeToString(hash[:]), nil -} - -// Sign returns an new TransactionEnvelopeBuilder using this builder's -// transaction as the basis and with signatures of that transaction from the -// provided Signers. -func (b *TransactionBuilder) Sign(signers ...string) (result TransactionEnvelopeBuilder) { - result.Mutate(b) - - for _, s := range signers { - result.Mutate(Sign{s}) - } - - return -} - -// ------------------------------------------------------------ -// -// Mutator implementations -// -// ------------------------------------------------------------ - -// MutateTransaction for AccountMergeBuilder causes the underylying Destination -// to be added to the operation list for the provided transaction -func (m AccountMergeBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeAccountMerge, m.Destination) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for AllowTrustBuilder causes the underylying AllowTrustOp -// to be added to the operation list for the provided transaction -func (m AllowTrustBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeAllowTrust, m.AT) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for AutoSequence loads the sequence and sets it on the tx. -// NOTE: this mutator assumes that the source account has already been set on -// the transaction and will error if that has not occurred. -func (m AutoSequence) MutateTransaction(o *TransactionBuilder) error { - source := o.TX.SourceAccount - - if source == (xdr.AccountId{}) { - return errors.New("auto sequence used prior to setting source account") - } - - seq, err := m.SequenceForAccount(source.Address()) - if err != nil { - return err - } - - o.TX.SeqNum = seq + 1 - return nil -} - -// MutateTransaction for ChangeTrustBuilder causes the underylying -// CreateAccountOp to be added to the operation list for the provided -// transaction -func (m ChangeTrustBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeChangeTrust, m.CT) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for CreateAccountBuilder causes the underylying -// CreateAccountOp to be added to the operation list for the provided -// transaction -func (m CreateAccountBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeCreateAccount, m.CA) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for Defaults sets reasonable defaults on the transaction being built -func (m Defaults) MutateTransaction(o *TransactionBuilder) error { - - if o.TX.Fee == 0 { - o.TX.Fee = xdr.Uint32(100 * len(o.TX.Operations)) - } - - if o.NetworkID == [32]byte{} { - o.NetworkID = DefaultNetwork.ID() - } - return nil -} - -// MutateTransaction for InflationBuilder causes the underylying -// InflationOp to be added to the operation list for the provided -// transaction -func (m InflationBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeInflation, nil) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for ManageDataBuilder causes the underylying -// ManageData to be added to the operation list for the provided -// transaction -func (m ManageDataBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeManageData, m.MD) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for ManageOfferBuilder causes the underylying -// ManageData to be added to the operation list for the provided -// transaction -func (m ManageOfferBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - if m.PassiveOffer { - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeCreatePassiveOffer, m.PO) - o.TX.Operations = append(o.TX.Operations, m.O) - } else { - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeManageOffer, m.MO) - o.TX.Operations = append(o.TX.Operations, m.O) - } - return m.Err -} - -// MutateTransaction for MemoHash sets the memo. -func (m MemoHash) MutateTransaction(o *TransactionBuilder) (err error) { - o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoHash, m.Value) - return -} - -// MutateTransaction for MemoID sets the memo. -func (m MemoID) MutateTransaction(o *TransactionBuilder) (err error) { - o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoId, xdr.Uint64(m.Value)) - return -} - -// MutateTransaction for MemoReturn sets the memo. -func (m MemoReturn) MutateTransaction(o *TransactionBuilder) (err error) { - o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoReturn, m.Value) - return -} - -// MutateTransaction for MemoText sets the memo. -func (m MemoText) MutateTransaction(o *TransactionBuilder) (err error) { - - if len([]byte(m.Value)) > MemoTextMaxLength { - err = errors.New("Memo too long; over 28 bytes") - return - } - - o.TX.Memo, err = xdr.NewMemo(xdr.MemoTypeMemoText, m.Value) - return -} - -// MutateTransaction for Network sets the Network ID to use when signing this transaction -func (m Network) MutateTransaction(o *TransactionBuilder) error { - o.NetworkID = m.ID() - return nil -} - -// MutateTransaction for PaymentBuilder causes the underylying PaymentOp -// or PathPaymentOp to be added to the operation list for the provided transaction -func (m PaymentBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - if m.PathPayment { - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypePathPayment, m.PP) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypePayment, m.P) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for SetOptionsBuilder causes the underylying -// SetOptionsOp to be added to the operation list for the provided -// transaction -func (m SetOptionsBuilder) MutateTransaction(o *TransactionBuilder) error { - if m.Err != nil { - return m.Err - } - - m.O.Body, m.Err = xdr.NewOperationBody(xdr.OperationTypeSetOptions, m.SO) - o.TX.Operations = append(o.TX.Operations, m.O) - return m.Err -} - -// MutateTransaction for Sequence sets the SeqNum on the transaction. -func (m Sequence) MutateTransaction(o *TransactionBuilder) error { - o.TX.SeqNum = xdr.SequenceNumber(m.Sequence) - return nil -} - -// MutateTransaction for SourceAccount sets the transaction's SourceAccount -// to the pubilic key for the address provided -func (m SourceAccount) MutateTransaction(o *TransactionBuilder) error { - return setAccountId(m.AddressOrSeed, &o.TX.SourceAccount) -} diff --git a/build/transaction_envelope.go b/build/transaction_envelope.go deleted file mode 100644 index 57b10d9aa6..0000000000 --- a/build/transaction_envelope.go +++ /dev/null @@ -1,125 +0,0 @@ -package build - -import ( - "bytes" - "encoding/base64" - - "github.com/stellar/go/keypair" - "github.com/stellar/go/xdr" -) - -// TransactionEnvelopeMutator is a interface that wraps the -// MutateTransactionEnvelope operation. types may implement this interface to -// specify how they modify an xdr.TransactionEnvelope object -type TransactionEnvelopeMutator interface { - MutateTransactionEnvelope(*TransactionEnvelopeBuilder) error -} - -// TransactionEnvelopeBuilder helps you build a TransactionEnvelope -type TransactionEnvelopeBuilder struct { - E *xdr.TransactionEnvelope - Err error - - child *TransactionBuilder -} - -func (b *TransactionEnvelopeBuilder) Init() { - if b.E == nil { - b.E = &xdr.TransactionEnvelope{} - } - - if b.child == nil { - b.child = &TransactionBuilder{TX: &b.E.Tx} - } -} - -// Mutate applies the provided TransactionEnvelopeMutators to this builder's -// envelope -func (b *TransactionEnvelopeBuilder) Mutate(muts ...TransactionEnvelopeMutator) { - b.Init() - - for _, m := range muts { - err := m.MutateTransactionEnvelope(b) - if err != nil { - b.Err = err - return - } - } -} - -// MutateTX runs Mutate on the underlying transaction using the provided -// mutators. -func (b *TransactionEnvelopeBuilder) MutateTX(muts ...TransactionMutator) { - b.Init() - - if b.Err != nil { - return - } - - b.child.Mutate(muts...) - b.Err = b.child.Err -} - -// Bytes encodes the builder's underlying envelope to XDR -func (b *TransactionEnvelopeBuilder) Bytes() ([]byte, error) { - if b.Err != nil { - return nil, b.Err - } - - var txBytes bytes.Buffer - _, err := xdr.Marshal(&txBytes, b.E) - if err != nil { - return nil, err - } - - return txBytes.Bytes(), nil -} - -// Base64 returns a string which is the xdr-then-base64-encoded form -// of the builder's underlying transaction envelope -func (b *TransactionEnvelopeBuilder) Base64() (string, error) { - bs, err := b.Bytes() - return base64.StdEncoding.EncodeToString(bs), err -} - -// ------------------------------------------------------------ -// -// Mutator implementations -// -// ------------------------------------------------------------ - -// MutateTransactionEnvelope adds a signature to the provided envelope -func (m Sign) MutateTransactionEnvelope(txe *TransactionEnvelopeBuilder) error { - hash, err := txe.child.Hash() - - if err != nil { - return err - } - - kp, err := keypair.Parse(m.Seed) - if err != nil { - return err - } - - sig, err := kp.SignDecorated(hash[:]) - if err != nil { - return err - } - - txe.E.Signatures = append(txe.E.Signatures, sig) - return nil -} - -// MutateTransactionEnvelope for TransactionBuilder causes the underylying -// transaction to be set as the provided envelope's Tx field -func (m *TransactionBuilder) MutateTransactionEnvelope(txe *TransactionEnvelopeBuilder) error { - if m.Err != nil { - return m.Err - } - - txe.E.Tx = *m.TX - newChild := *m - txe.child = &newChild - m.TX = &txe.E.Tx - return nil -} diff --git a/build/transaction_envelope_test.go b/build/transaction_envelope_test.go deleted file mode 100644 index ce84af4406..0000000000 --- a/build/transaction_envelope_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package build - -import ( - "errors" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("TransactionEnvelope Mutators:", func() { - - var ( - subject TransactionEnvelopeBuilder - mut TransactionEnvelopeMutator - ) - - BeforeEach(func() { subject = TransactionEnvelopeBuilder{} }) - JustBeforeEach(func() { subject.Mutate(mut) }) - - Describe("TransactionBuilder", func() { - Context("that is valid", func() { - BeforeEach(func() { mut = Transaction(Sequence{10}) }) - It("succeeds", func() { Expect(subject.Err).NotTo(HaveOccurred()) }) - It("sets the TX", func() { Expect(subject.E.Tx.SeqNum).To(BeEquivalentTo(10)) }) - }) - - Context("with an error set on it", func() { - err := errors.New("busted!") - BeforeEach(func() { mut = &TransactionBuilder{Err: err} }) - It("propagates the error upwards", func() { Expect(subject.Err).To(Equal(err)) }) - }) - - }) - - Describe("Sign", func() { - Context("with a valid key", func() { - BeforeEach(func() { - subject.MutateTX(SourceAccount{"SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H"}) - mut = Sign{"SDOTALIMPAM2IV65IOZA7KZL7XWZI5BODFXTRVLIHLQZQCKK57PH5F3H"} - }) - - It("succeeds", func() { Expect(subject.Err).NotTo(HaveOccurred()) }) - It("adds a signature to the envelope", func() { - Expect(subject.E.Signatures).To(HaveLen(1)) - }) - }) - - Context("with an invalid key", func() { - BeforeEach(func() { mut = Sign{""} }) - It("fails", func() { - Expect(subject.Err).To(HaveOccurred()) - }) - }) - }) - -}) diff --git a/build/transaction_test.go b/build/transaction_test.go deleted file mode 100644 index d8b7bc7346..0000000000 --- a/build/transaction_test.go +++ /dev/null @@ -1,135 +0,0 @@ -package build - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("Transaction Mutators:", func() { - - var ( - subject *TransactionBuilder - mut TransactionMutator - ) - - BeforeEach(func() { subject = &TransactionBuilder{} }) - JustBeforeEach(func() { subject.Mutate(mut) }) - - Describe("Defaults", func() { - BeforeEach(func() { - subject.Mutate(Payment()) - mut = Defaults{} - }) - It("sets the fee", func() { Expect(subject.TX.Fee).To(BeEquivalentTo(100)) }) - It("sets the network id", func() { Expect(subject.NetworkID).To(Equal(DefaultNetwork.ID())) }) - - Context("on a transaction with 2 operations", func() { - BeforeEach(func() { subject.Mutate(Payment()) }) - It("sets the fee to 200", func() { Expect(subject.TX.Fee).To(BeEquivalentTo(200)) }) - }) - }) - - Describe("MemoHash", func() { - BeforeEach(func() { mut = MemoHash{[32]byte{0x01}} }) - It("sets a Hash memo on the transaction", func() { - Expect(subject.TX.Memo.Type).To(Equal(xdr.MemoTypeMemoHash)) - Expect(subject.TX.Memo.MustHash()).To(Equal(xdr.Hash([32]byte{0x01}))) - }) - }) - - Describe("MemoID", func() { - BeforeEach(func() { mut = MemoID{123} }) - It("sets an ID memo on the transaction", func() { - Expect(subject.TX.Memo.Type).To(Equal(xdr.MemoTypeMemoId)) - Expect(subject.TX.Memo.MustId()).To(Equal(xdr.Uint64(123))) - }) - }) - - Describe("MemoReturn", func() { - BeforeEach(func() { mut = MemoReturn{[32]byte{0x01}} }) - It("sets a Hash memo on the transaction", func() { - Expect(subject.TX.Memo.Type).To(Equal(xdr.MemoTypeMemoReturn)) - Expect(subject.TX.Memo.MustRetHash()).To(Equal(xdr.Hash([32]byte{0x01}))) - }) - }) - - Describe("MemoText", func() { - BeforeEach(func() { mut = MemoText{"hello"} }) - It("sets a TEXT memo on the transaction", func() { - Expect(subject.TX.Memo.Type).To(Equal(xdr.MemoTypeMemoText)) - Expect(subject.TX.Memo.MustText()).To(Equal("hello")) - }) - - Context("a string longer than 28 bytes", func() { - BeforeEach(func() { mut = MemoText{"12345678901234567890123456789"} }) - It("sets an error", func() { - Expect(subject.Err).ToNot(BeNil()) - }) - }) - }) - - Describe("AllowTrustBuilder", func() { - BeforeEach(func() { mut = AllowTrust() }) - It("adds itself to the tx's operations", func() { - Expect(subject.TX.Operations).To(HaveLen(1)) - }) - }) - - Describe("PaymentBuilder", func() { - BeforeEach(func() { mut = Payment() }) - It("adds itself to the tx's operations", func() { - Expect(subject.TX.Operations).To(HaveLen(1)) - }) - }) - - Describe("SourceAccount", func() { - Context("with a valid address", func() { - address := "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ" - BeforeEach(func() { mut = SourceAccount{address} }) - It("sets the AccountId correctly", func() { - var aid xdr.AccountId - aid.SetAddress(address) - Expect(subject.TX.SourceAccount.MustEd25519()).To(Equal(aid.MustEd25519())) - }) - }) - - Context("with bad address", func() { - BeforeEach(func() { mut = SourceAccount{"foo"} }) - It("fails", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - }) - - Describe("Sequence", func() { - BeforeEach(func() { mut = Sequence{12345} }) - It("succeeds", func() { Expect(subject.Err).NotTo(HaveOccurred()) }) - It("sets the sequence", func() { Expect(subject.TX.SeqNum).To(BeEquivalentTo(12345)) }) - }) - - Describe("AutoSequence", func() { - BeforeEach(func() { - mock := &MockSequenceProvider{ - Data: map[string]xdr.SequenceNumber{ - "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ": 2, - }, - } - - mut = AutoSequence{mock} - }) - - Context("with no source account set", func() { - It("fails", func() { Expect(subject.Err).To(HaveOccurred()) }) - }) - - Context("with a source account set", func() { - BeforeEach(func() { - subject.Mutate(SourceAccount{ - "GAXEMCEXBERNSRXOEKD4JAIKVECIXQCENHEBRVSPX2TTYZPMNEDSQCNQ", - }) - }) - - It("succeeds", func() { Expect(subject.Err).NotTo(HaveOccurred()) }) - It("sets the sequence", func() { Expect(subject.TX.SeqNum).To(BeEquivalentTo(3)) }) - }) - }) -}) diff --git a/build/util.go b/build/util.go deleted file mode 100644 index 4409fc0d4c..0000000000 --- a/build/util.go +++ /dev/null @@ -1,47 +0,0 @@ -package build - -import ( - "errors" - - "github.com/stellar/go/keypair" - "github.com/stellar/go/xdr" -) - -func setAccountId(addressOrSeed string, aid *xdr.AccountId) error { - kp, err := keypair.Parse(addressOrSeed) - if err != nil { - return err - } - - if aid == nil { - return errors.New("aid is nil in setAccountId") - } - - return aid.SetAddress(kp.Address()) -} - -func createAlphaNumAsset(code, issuerAccountId string) (xdr.Asset, error) { - var issuer xdr.AccountId - err := setAccountId(issuerAccountId, &issuer) - if err != nil { - return xdr.Asset{}, err - } - - length := len(code) - switch { - case length >= 1 && length <= 4: - var codeArray [4]byte - byteArray := []byte(code) - copy(codeArray[:], byteArray[0:length]) - asset := xdr.AssetAlphaNum4{codeArray, issuer} - return xdr.NewAsset(xdr.AssetTypeAssetTypeCreditAlphanum4, asset) - case length >= 5 && length <= 12: - var codeArray [12]byte - byteArray := []byte(code) - copy(codeArray[:], byteArray[0:length]) - asset := xdr.AssetAlphaNum12{codeArray, issuer} - return xdr.NewAsset(xdr.AssetTypeAssetTypeCreditAlphanum12, asset) - default: - return xdr.Asset{}, errors.New("Asset code length is invalid") - } -} diff --git a/clients/README.md b/clients/README.md index 59a6090418..216862fb8b 100644 --- a/clients/README.md +++ b/clients/README.md @@ -1,11 +1,16 @@ # Clients package -Packages contained by this package provide client libraries for accessing the ecosystem of stellar services. At present, it only contains a simple horizon client library, but in the future it will contain clients to interact with stellar-core, federation, the bridge server and more. +Packages here provide client libraries for accessing the ecosystem of Stellar services. -See [godoc](https://godoc.org/github.com/stellar/go/clients) for details about each package. +* `horizonclient` - programmatic client access to Horizon (use in conjunction with [txnbuild](../txnbuild)) +* `stellartoml` - parse Stellar.toml files from the internet +* `federation` - resolve federation addresses into stellar account IDs, suitable for use within a transaction +* `horizon` (DEPRECATED) - the original Horizon client, now superceded by `horizonclient` -## Adding new client packages +See [GoDoc](https://godoc.org/github.com/stellar/go/clients) for more details. + +## For developers: Adding new client packages Ideally, each one of our client packages will have commonalities in their API to ease the cost of learning each. It's recommended that we follow a pattern similar to the `net/http` package's client shape: -A type, `Client`, is the central type of any client package, and its methods should provide the bulk of the functionality for the package. A `DefaultClient` var is provided for consumers that don't need client-level customization of behavior. Each method on the `Client` type should have a corresponding func at the package level that proxies a call through to the default client. For example, `http.Get()` is the equivalent of `http.DefaultClient.Get()`. \ No newline at end of file +A type, `Client`, is the central type of any client package, and its methods should provide the bulk of the functionality for the package. A `DefaultClient` var is provided for consumers that don't need client-level customization of behavior. Each method on the `Client` type should have a corresponding func at the package level that proxies a call through to the default client. For example, `http.Get()` is the equivalent of `http.DefaultClient.Get()`. diff --git a/clients/federation/client.go b/clients/federation/client.go new file mode 100644 index 0000000000..ca5ca5c147 --- /dev/null +++ b/clients/federation/client.go @@ -0,0 +1,153 @@ +package federation + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + + "github.com/stellar/go/address" + proto "github.com/stellar/go/protocols/federation" + "github.com/stellar/go/support/errors" +) + +// LookupByAddress performs a federated lookup following to the stellar +// federation protocol using the "name" type request. The provided address is +// used to resolve what server the request should be made against. NOTE: the +// "name" type is a legacy holdover from the legacy stellar network's federation +// protocol. It is unfortunate. +func (c *Client) LookupByAddress(addy string) (*proto.NameResponse, error) { + _, domain, err := address.Split(addy) + if err != nil { + return nil, errors.Wrap(err, "parse address failed") + } + + fserv, err := c.getFederationServer(domain) + if err != nil { + return nil, errors.Wrap(err, "lookup federation server failed") + } + + qstr := url.Values{} + qstr.Add("type", "name") + qstr.Add("q", addy) + url := c.url(fserv, qstr) + + var resp proto.NameResponse + err = c.getJSON(url, &resp) + if err != nil { + return nil, errors.Wrap(err, "get federation failed") + } + + if resp.MemoType != "" && resp.Memo.String() == "" { + return nil, errors.New("Invalid federation response (memo)") + } + + return &resp, nil +} + +// LookupByAccountID performs a federated lookup following to the stellar +// federation protocol using the "id" type request. The provided strkey-encoded +// account id is used to resolve what server the request should be made against. +func (c *Client) LookupByAccountID(aid string) (*proto.IDResponse, error) { + + domain, err := c.Horizon.HomeDomainForAccount(aid) + if err != nil { + return nil, errors.Wrap(err, "get homedomain failed") + } + + if domain == "" { + return nil, errors.New("homedomain not set") + } + + fserv, err := c.getFederationServer(domain) + if err != nil { + return nil, errors.Wrap(err, "lookup federation server failed") + } + + qstr := url.Values{} + qstr.Add("type", "id") + qstr.Add("q", aid) + url := c.url(fserv, qstr) + + var resp proto.IDResponse + err = c.getJSON(url, &resp) + if err != nil { + return nil, errors.Wrap(err, "get federation failed") + } + + return &resp, nil +} + +// ForwardRequest performs a federated lookup following to the stellar +// federation protocol using the "forward" type request. +func (c *Client) ForwardRequest(domain string, fields url.Values) (*proto.NameResponse, error) { + fserv, err := c.getFederationServer(domain) + if err != nil { + return nil, errors.Wrap(err, "lookup federation server failed") + } + + fields.Add("type", "forward") + url := c.url(fserv, fields) + + var resp proto.NameResponse + err = c.getJSON(url, &resp) + if err != nil { + return nil, errors.Wrap(err, "get federation failed") + } + + if resp.MemoType != "" && resp.Memo.String() == "" { + return nil, errors.New("Invalid federation response (memo)") + } + + return &resp, nil +} + +func (c *Client) getFederationServer(domain string) (string, error) { + stoml, err := c.StellarTOML.GetStellarToml(domain) + if err != nil { + return "", errors.Wrap(err, "get stellar.toml failed") + } + + if stoml.FederationServer == "" { + return "", errors.New("stellar.toml is missing federation server info") + } + + if !c.AllowHTTP && !strings.HasPrefix(stoml.FederationServer, "https://") { + return "", errors.New("non-https federation server disallowed") + } + + return stoml.FederationServer, nil +} + +// getJSON populates `dest` with the contents at `url`, provided the request +// succeeds and the json can be successfully decoded. +func (c *Client) getJSON(url string, dest interface{}) error { + hresp, err := c.HTTP.Get(url) + if err != nil { + return errors.Wrap(err, "http get errored") + } + + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + return errors.Errorf("http get failed with (%d) status code", hresp.StatusCode) + } + + limitReader := io.LimitReader(hresp.Body, FederationResponseMaxSize) + + err = json.NewDecoder(limitReader).Decode(dest) + if err == io.ErrUnexpectedEOF && limitReader.(*io.LimitedReader).N == 0 { + return errors.Errorf("federation response exceeds %d bytes limit", FederationResponseMaxSize) + } + + if err != nil { + return errors.Wrap(err, "json decode errored") + } + + return nil +} + +func (c *Client) url(endpoint string, qstr url.Values) string { + return fmt.Sprintf("%s?%s", endpoint, qstr.Encode()) +} diff --git a/clients/federation/client_test.go b/clients/federation/client_test.go new file mode 100644 index 0000000000..c3ce075531 --- /dev/null +++ b/clients/federation/client_test.go @@ -0,0 +1,184 @@ +package federation + +import ( + "errors" + "net/http" + "net/url" + "strings" + "testing" + + hc "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/clients/stellartoml" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" +) + +func TestLookupByAddress(t *testing.T) { + hmock := httptest.NewClient() + tomlmock := &stellartoml.MockClient{} + c := &Client{StellarTOML: tomlmock, HTTP: hmock} + + // happy path - string integer + tomlmock.On("GetStellarToml", "stellar.org").Return(&stellartoml.Response{ + FederationServer: "https://stellar.org/federation", + }, nil) + hmock.On("GET", "https://stellar.org/federation"). + ReturnJSON(http.StatusOK, map[string]string{ + "stellar_address": "scott*stellar.org", + "account_id": "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", + "memo_type": "id", + "memo": "123", + }) + resp, err := c.LookupByAddress("scott*stellar.org") + + if assert.NoError(t, err) { + assert.Equal(t, "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", resp.AccountID) + assert.Equal(t, "id", resp.MemoType) + assert.Equal(t, "123", resp.Memo.String()) + } + + // happy path - integer + tomlmock.On("GetStellarToml", "stellar.org").Return(&stellartoml.Response{ + FederationServer: "https://stellar.org/federation", + }, nil) + hmock.On("GET", "https://stellar.org/federation"). + ReturnJSON(http.StatusOK, map[string]interface{}{ + "stellar_address": "scott*stellar.org", + "account_id": "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", + "memo_type": "id", + "memo": 123, + }) + resp, err = c.LookupByAddress("scott*stellar.org") + + if assert.NoError(t, err) { + assert.Equal(t, "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", resp.AccountID) + assert.Equal(t, "id", resp.MemoType) + assert.Equal(t, "123", resp.Memo.String()) + } + + // happy path - string + tomlmock.On("GetStellarToml", "stellar.org").Return(&stellartoml.Response{ + FederationServer: "https://stellar.org/federation", + }, nil) + hmock.On("GET", "https://stellar.org/federation"). + ReturnJSON(http.StatusOK, map[string]interface{}{ + "stellar_address": "scott*stellar.org", + "account_id": "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", + "memo_type": "text", + "memo": "testing", + }) + resp, err = c.LookupByAddress("scott*stellar.org") + + if assert.NoError(t, err) { + assert.Equal(t, "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", resp.AccountID) + assert.Equal(t, "text", resp.MemoType) + assert.Equal(t, "testing", resp.Memo.String()) + } + + // response exceeds limit + tomlmock.On("GetStellarToml", "toobig.org").Return(&stellartoml.Response{ + FederationServer: "https://toobig.org/federation", + }, nil) + hmock.On("GET", "https://toobig.org/federation"). + ReturnJSON(http.StatusOK, map[string]string{ + "stellar_address": strings.Repeat("0", FederationResponseMaxSize) + "*stellar.org", + "account_id": "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", + "memo_type": "id", + "memo": "123", + }) + _, err = c.LookupByAddress("response*toobig.org") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "federation response exceeds") + } + + // failed toml resolution + tomlmock.On("GetStellarToml", "missing.org").Return( + (*stellartoml.Response)(nil), + errors.New("toml failed"), + ) + _, err = c.LookupByAddress("scott*missing.org") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "toml failed") + } + + // 404 federation response + tomlmock.On("GetStellarToml", "404.org").Return(&stellartoml.Response{ + FederationServer: "https://404.org/federation", + }, nil) + hmock.On("GET", "https://404.org/federation").ReturnNotFound() + _, err = c.LookupByAddress("scott*404.org") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "failed with (404)") + } + + // connection error on federation response + tomlmock.On("GetStellarToml", "error.org").Return(&stellartoml.Response{ + FederationServer: "https://error.org/federation", + }, nil) + hmock.On("GET", "https://error.org/federation").ReturnError("kaboom!") + _, err = c.LookupByAddress("scott*error.org") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "kaboom!") + } +} + +func TestLookupByID(t *testing.T) { + horizonMock := &hc.MockClient{} + client := &Client{Horizon: horizonMock} + + horizonMock.On("HomeDomainForAccount", "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C"). + Return("", errors.New("homedomain not set")) + + // an account without a homedomain set fails + _, err := client.LookupByAccountID("GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C") + assert.Error(t, err) + assert.Equal(t, "get homedomain failed: homedomain not set", err.Error()) +} + +func TestForwardRequest(t *testing.T) { + hmock := httptest.NewClient() + tomlmock := &stellartoml.MockClient{} + c := &Client{StellarTOML: tomlmock, HTTP: hmock} + + // happy path - string integer + tomlmock.On("GetStellarToml", "stellar.org").Return(&stellartoml.Response{ + FederationServer: "https://stellar.org/federation", + }, nil) + hmock.On("GET", "https://stellar.org/federation"). + ReturnJSON(http.StatusOK, map[string]string{ + "account_id": "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", + "memo_type": "id", + "memo": "123", + }) + fields := url.Values{} + fields.Add("federation_type", "bank_account") + fields.Add("swift", "BOPBPHMM") + fields.Add("acct", "2382376") + resp, err := c.ForwardRequest("stellar.org", fields) + + if assert.NoError(t, err) { + assert.Equal(t, "GASTNVNLHVR3NFO3QACMHCJT3JUSIV4NBXDHDO4VTPDTNN65W3B2766C", resp.AccountID) + assert.Equal(t, "id", resp.MemoType) + assert.Equal(t, "123", resp.Memo.String()) + } +} + +func Test_url(t *testing.T) { + c := &Client{} + + // forward requests + qstr := url.Values{} + qstr.Add("type", "forward") + qstr.Add("federation_type", "bank_account") + qstr.Add("swift", "BOPBPHMM") + qstr.Add("acct", "2382376") + furl := c.url("https://stellar.org/federation", qstr) + assert.Equal(t, "https://stellar.org/federation?acct=2382376&federation_type=bank_account&swift=BOPBPHMM&type=forward", furl) + + // regression: ensure that query is properly URI encoded + qstr = url.Values{} + qstr.Add("type", "q") + qstr.Add("q", "scott+receiver1@stellar.org*stellar.org") + furl = c.url("", qstr) + assert.Equal(t, "?q=scott%2Breceiver1%40stellar.org%2Astellar.org&type=q", furl) +} diff --git a/clients/federation/main.go b/clients/federation/main.go new file mode 100644 index 0000000000..f1a30638da --- /dev/null +++ b/clients/federation/main.go @@ -0,0 +1,66 @@ +package federation + +import ( + "net/http" + "net/url" + + hc "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/clients/stellartoml" + proto "github.com/stellar/go/protocols/federation" +) + +// FederationResponseMaxSize is the maximum size of response from a federation server +const FederationResponseMaxSize = 100 * 1024 + +// DefaultTestNetClient is a default federation client for testnet +var DefaultTestNetClient = &Client{ + HTTP: http.DefaultClient, + Horizon: hc.DefaultTestNetClient, + StellarTOML: stellartoml.DefaultClient, +} + +// DefaultPublicNetClient is a default federation client for pubnet +var DefaultPublicNetClient = &Client{ + HTTP: http.DefaultClient, + Horizon: hc.DefaultPublicNetClient, + StellarTOML: stellartoml.DefaultClient, +} + +// Client represents a client that is capable of resolving a federation request +// using the internet. +type Client struct { + StellarTOML StellarTOML + HTTP HTTP + Horizon Horizon + AllowHTTP bool +} + +type ClientInterface interface { + LookupByAddress(addy string) (*proto.NameResponse, error) + LookupByAccountID(aid string) (*proto.IDResponse, error) + ForwardRequest(domain string, fields url.Values) (*proto.NameResponse, error) +} + +// Horizon represents a horizon client that can be consulted for data when +// needed as part of the federation protocol +type Horizon interface { + HomeDomainForAccount(aid string) (string, error) +} + +// HTTP represents the http client that a federation client uses to make http +// requests. +type HTTP interface { + Get(url string) (*http.Response, error) +} + +// StellarTOML represents a client that can resolve a given domain name to +// stellar.toml file. The response is used to find the federation server that a +// query should be made against. +type StellarTOML interface { + GetStellarToml(domain string) (*stellartoml.Response, error) +} + +// confirm interface conformity +var _ StellarTOML = stellartoml.DefaultClient +var _ HTTP = http.DefaultClient +var _ ClientInterface = &Client{} diff --git a/clients/horizon/main.go b/clients/horizon/main.go deleted file mode 100644 index ed4f0fec48..0000000000 --- a/clients/horizon/main.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package horizon provides client access to a horizon server, allowing an -// application to post transactions and lookup ledger information. -// -// Create an instance of `Client` to customize the server used, or alternatively -// use `DefaultTestNetClient` or `DefaultPublicNetClient` to access the SDF run -// horizon servers. -package horizon - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - "sync" - - "github.com/stellar/go/xdr" -) - -// DefaultTestNetClient is a default client to connect to test network -var DefaultTestNetClient = &Client{URL: "https://horizon-testnet.stellar.org"} - -// DefaultPublicNetClient is a default client to connect to public network -var DefaultPublicNetClient = &Client{URL: "https://horizon.stellar.org"} - -// Error struct contains the problem returned by Horizon -type Error struct { - Response *http.Response - Problem Problem -} - -func (herror *Error) Error() string { - return "Horizon error" -} - -type HorizonHttpClient interface { - Get(url string) (resp *http.Response, err error) - PostForm(url string, data url.Values) (resp *http.Response, err error) -} - -// Client struct contains data required to connect to Horizon instance -type Client struct { - // URL of Horizon server to connect - URL string - // Will be populated with &http.Client when nil. If you want to configure your http.Client make sure Timeout is at least 10 seconds. - Client HorizonHttpClient - // clientInit initializes http client once - clientInit sync.Once -} - -// LoadAccount loads the account state from horizon. err can be either error -// object or horizon.Error object. -func (c *Client) LoadAccount(accountID string) (account Account, err error) { - c.initHttpClient() - resp, err := c.Client.Get(c.URL + "/accounts/" + accountID) - if err != nil { - return - } - - err = decodeResponse(resp, &account) - return -} - -// SequenceForAccount implements build.SequenceProvider -func (c *Client) SequenceForAccount( - accountID string, -) (xdr.SequenceNumber, error) { - - a, err := c.LoadAccount(accountID) - if err != nil { - return 0, err - } - - seq, err := strconv.ParseUint(a.Sequence, 10, 64) - if err != nil { - return 0, err - } - - return xdr.SequenceNumber(seq), nil -} - -// SubmitTransaction submits a transaction to the network. err can be either error object or horizon.Error object. -func (c *Client) SubmitTransaction(transactionEnvelopeXdr string) (response TransactionSuccess, err error) { - v := url.Values{} - v.Set("tx", transactionEnvelopeXdr) - - c.initHttpClient() - resp, err := c.Client.PostForm(c.URL+"/transactions", v) - if err != nil { - return - } - - err = decodeResponse(resp, &response) - return -} - -func (c *Client) initHttpClient() { - c.clientInit.Do(func() { - if c.Client == nil { - c.Client = &http.Client{} - } - }) -} - -func decodeResponse(resp *http.Response, object interface{}) (err error) { - defer resp.Body.Close() - decoder := json.NewDecoder(resp.Body) - - if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { - horizonError := &Error{ - Response: resp, - } - decodeError := decoder.Decode(&horizonError.Problem) - if decodeError != nil { - return decodeError - } - return horizonError - } - - err = decoder.Decode(&object) - if err != nil { - return - } - return -} diff --git a/clients/horizon/main_test.go b/clients/horizon/main_test.go deleted file mode 100644 index 9669d2030a..0000000000 --- a/clients/horizon/main_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package horizon - -import ( - "bytes" - "errors" - "io/ioutil" - "net/http" - "net/url" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/build" -) - -func TestHorizon(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Package: github.com/stellar/go/horizon") -} - -var _ build.SequenceProvider = TestHorizonClient - -var _ = Describe("Horizon", func() { - Describe("initHttpClient", func() { - It("does not run into race condition", func() { - // Race condition should be detected by race-detector: - // http://blog.golang.org/race-detector - init := func() { - DefaultTestNetClient.initHttpClient() - } - go init() - go init() - }) - }) - - Describe("LoadAccount", func() { - It("success response", func() { - TestHorizonClient.Client = &TestHttpClient{ - Response: http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString(accountResponse)), - }, - } - - account, err := TestHorizonClient.LoadAccount("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") - Expect(err).To(BeNil()) - Expect(account.ID).To(Equal("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")) - Expect(account.PT).To(Equal("1")) - Expect(account.GetNativeBalance()).To(Equal("948522307.6146000")) - }) - - It("failure response", func() { - TestHorizonClient.Client = &TestHttpClient{ - Response: http.Response{ - StatusCode: 404, - Body: ioutil.NopCloser(bytes.NewBufferString(notFoundResponse)), - }, - } - - _, err := TestHorizonClient.LoadAccount("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") - Expect(err).NotTo(BeNil()) - Expect(err.Error()).To(Equal("Horizon error")) - horizonError, ok := err.(*Error) - Expect(ok).To(BeTrue()) - Expect(horizonError.Problem.Title).To(Equal("Resource Missing")) - }) - - It("connection error", func() { - TestHorizonClient.Client = &TestHttpClient{ - Error: errors.New("http.Client error"), - } - - _, err := TestHorizonClient.LoadAccount("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") - Expect(err).NotTo(BeNil()) - Expect(err.Error()).To(Equal("http.Client error")) - _, ok := err.(*Error) - Expect(ok).To(BeFalse()) - }) - }) - - Describe("SubmitTransaction", func() { - var tx = "AAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAZAAT3TUAAAAwAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABSU5SAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAAAFVU0QAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAX14QAAAAAKAAAAAQAAAAAAAAAAAAAAAAAAAAG/dhGXAAAAQLuStfImg0OeeGAQmvLkJSZ1MPSkCzCYNbGqX5oYNuuOqZ5SmWhEsC7uOD9ha4V7KengiwNlc0oMNqBVo22S7gk=" - - It("success response", func() { - TestHorizonClient.Client = &TestHttpClient{ - Response: http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewBufferString(submitResponse)), - }, - } - - account, err := TestHorizonClient.SubmitTransaction(tx) - Expect(err).To(BeNil()) - Expect(account.Ledger).To(Equal(int32(3128812))) - }) - - It("failure response", func() { - TestHorizonClient.Client = &TestHttpClient{ - Response: http.Response{ - StatusCode: 400, - Body: ioutil.NopCloser(bytes.NewBufferString(transactionFailure)), - }, - } - - _, err := TestHorizonClient.SubmitTransaction(tx) - Expect(err).NotTo(BeNil()) - Expect(err.Error()).To(Equal("Horizon error")) - horizonError, ok := err.(*Error) - Expect(ok).To(BeTrue()) - Expect(horizonError.Problem.Title).To(Equal("Transaction Failed")) - }) - - It("connection error", func() { - TestHorizonClient.Client = &TestHttpClient{ - Error: errors.New("http.Client error"), - } - - _, err := TestHorizonClient.SubmitTransaction(tx) - Expect(err).NotTo(BeNil()) - Expect(err.Error()).To(Equal("http.Client error")) - _, ok := err.(*Error) - Expect(ok).To(BeFalse()) - }) - }) -}) - -var TestHorizonClient = &Client{ - Client: &TestHttpClient{}, -} - -type TestHttpClient struct { - Response http.Response - Error error -} - -func (tc *TestHttpClient) Get(url string) (*http.Response, error) { - return &tc.Response, tc.Error -} - -func (tc *TestHttpClient) PostForm(url string, data url.Values) (resp *http.Response, err error) { - return &tc.Response, tc.Error -} - -var accountResponse = `{ - "_links": { - "self": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" - }, - "transactions": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/transactions{?cursor,limit,order}", - "templated": true - }, - "operations": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/operations{?cursor,limit,order}", - "templated": true - }, - "payments": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/payments{?cursor,limit,order}", - "templated": true - }, - "effects": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/effects{?cursor,limit,order}", - "templated": true - }, - "offers": { - "href": "https://horizon-testnet.stellar.org/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/Offers{?cursor,limit,order}", - "templated": true - } - }, - "id": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", - "paging_token": "1", - "account_id": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", - "sequence": "7384", - "subentry_count": 0, - "thresholds": { - "low_threshold": 0, - "med_threshold": 0, - "high_threshold": 0 - }, - "flags": { - "auth_required": false, - "auth_revocable": false - }, - "balances": [ - { - "balance": "948522307.6146000", - "asset_type": "native" - } - ], - "signers": [ - { - "public_key": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", - "weight": 1 - } - ] -}` - -var notFoundResponse = `{ - "type": "https://stellar.org/horizon-errors/not_found", - "title": "Resource Missing", - "status": 404, - "detail": "The resource at the url requested was not found. This is usually occurs for one of two reasons: The url requested is not valid, or no data in our database could be found with the parameters provided.", - "instance": "horizon-live-001/61KdRW8tKi-18408110" -}` - -var submitResponse = `{ - "_links": { - "transaction": { - "href": "https://horizon-testnet.stellar.org/transactions/ee14b93fcd31d4cfe835b941a0a8744e23a6677097db1fafe0552d8657bed940" - } - }, - "hash": "ee14b93fcd31d4cfe835b941a0a8744e23a6677097db1fafe0552d8657bed940", - "ledger": 3128812, - "envelope_xdr": "AAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAZAAT3TUAAAAwAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABSU5SAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAAAFVU0QAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAX14QAAAAAKAAAAAQAAAAAAAAAAAAAAAAAAAAG/dhGXAAAAQLuStfImg0OeeGAQmvLkJSZ1MPSkCzCYNbGqX5oYNuuOqZ5SmWhEsC7uOD9ha4V7KengiwNlc0oMNqBVo22S7gk=", - "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAAAAPEAAAABSU5SAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAAAFVU0QAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAX14QAAAAAKAAAAAQAAAAAAAAAAAAAAAA==", - "result_meta_xdr": "AAAAAAAAAAEAAAACAAAAAAAvoHwAAAACAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAAAAPEAAAABSU5SAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAAAFVU0QAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAX14QAAAAAKAAAAAQAAAAAAAAAAAAAAAAAAAAEAL6B8AAAAAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAABZ9zvNAABPdNQAAADAAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==" -}` - -var transactionFailure = `{ - "type": "https://stellar.org/horizon-errors/transaction_failed", - "title": "Transaction Failed", - "status": 400, - "detail": "The transaction failed when submitted to the stellar network. The extras.result_codes field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", - "instance": "horizon-testnet-001.prd.stellar001.internal.stellar-ops.com/4elYz2fHhC-528285", - "extras": { - "envelope_xdr": "AAAAAKpmDL6Z4hvZmkTBkYpHftan4ogzTaO4XTB7joLgQnYYAAAAZAAAAAAABeoyAAAAAAAAAAEAAAAAAAAAAQAAAAAAAAABAAAAAD3sEVVGZGi/NoC3ta/8f/YZKMzyi9ZJpOi0H47x7IqYAAAAAAAAAAAF9eEAAAAAAAAAAAA=", - "result_codes": { - "transaction": "tx_no_source_account" - }, - "result_xdr": "AAAAAAAAAAD////4AAAAAA==" - } -}` diff --git a/clients/horizon/responses.go b/clients/horizon/responses.go deleted file mode 100644 index 0f3b785e66..0000000000 --- a/clients/horizon/responses.go +++ /dev/null @@ -1,91 +0,0 @@ -package horizon - -type Problem struct { - Type string `json:"type"` - Title string `json:"title"` - Status int `json:"status"` - Detail string `json:"detail,omitempty"` - Instance string `json:"instance,omitempty"` - Extras map[string]interface{} `json:"extras,omitempty"` -} - -type Account struct { - Links struct { - Self Link `json:"self"` - Transactions Link `json:"transactions"` - Operations Link `json:"operations"` - Payments Link `json:"payments"` - Effects Link `json:"effects"` - Offers Link `json:"offers"` - } `json:"_links"` - - HistoryAccount - Sequence string `json:"sequence"` - SubentryCount int32 `json:"subentry_count"` - InflationDestination string `json:"inflation_destination,omitempty"` - HomeDomain string `json:"home_domain,omitempty"` - Thresholds AccountThresholds `json:"thresholds"` - Flags AccountFlags `json:"flags"` - Balances []Balance `json:"balances"` - Signers []Signer `json:"signers"` -} - -func (a Account) GetNativeBalance() string { - for _, balance := range a.Balances { - if balance.Asset.Type == "native" { - return balance.Balance - } - } - - return "0" -} - -type AccountFlags struct { - AuthRequired bool `json:"auth_required"` - AuthRevocable bool `json:"auth_revocable"` -} - -type AccountThresholds struct { - LowThreshold byte `json:"low_threshold"` - MedThreshold byte `json:"med_threshold"` - HighThreshold byte `json:"high_threshold"` -} - -type Asset struct { - Type string `json:"asset_type"` - Code string `json:"asset_code,omitempty"` - Issuer string `json:"asset_issuer,omitempty"` -} - -type Balance struct { - Balance string `json:"balance"` - Limit string `json:"limit,omitempty"` - Asset -} - -type HistoryAccount struct { - ID string `json:"id"` - PT string `json:"paging_token"` - AccountID string `json:"account_id"` -} - -type Link struct { - Href string `json:"href"` - Templated bool `json:"templated,omitempty"` -} - -type TransactionSuccess struct { - Links struct { - Transaction Link `json:"transaction"` - } `json:"_links"` - Hash string `json:"hash"` - Ledger int32 `json:"ledger"` - Env string `json:"envelope_xdr"` - Result string `json:"result_xdr"` - Meta string `json:"result_meta_xdr"` -} - -type Signer struct { - PublicKey string `json:"public_key"` - Weight int32 `json:"weight"` -} diff --git a/clients/horizonclient/CHANGELOG.md b/clients/horizonclient/CHANGELOG.md new file mode 100644 index 0000000000..ea916df9f4 --- /dev/null +++ b/clients/horizonclient/CHANGELOG.md @@ -0,0 +1,268 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + + +## [v9.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v9.0.0) - 2022-01-10 + +None + +## [8.0.0-beta.0](https://github.com/stellar/go/releases/tag/horizonclient-v8.0.0-beta.0) - 2021-10-04 + +**This release adds support for Protocol 18.** + +* The restriction that `Fund` can only be called on the DefaultTestNetClient has been removed. Any `horizonclient.Client` may now call Fund. Horizon instances not supporting funding will error with a resource not found error. +* Change `AccountRequest` to accept `Sponsor` and `LiquidityPool` filters +* Change `EffectRequest`, `TransactionRequest`, and `OperationRequest` to accept a `ForLiquidityPool` filter +* Change `TradeRequest` to accept both a `ForLiquidityPool` filter or a `TradeType` filter +* Add `LiquidityPoolsRequest` for getting details about liquidity pools, with an optional `Reserves` field to filter by pools' reserve asset(s). +* Add `LiquidityPoolRequest` for getting details about a _specific_ liquidity pool via the `LiquidityPoolID` filter. + + +## [v7.1.1](https://github.com/stellar/go/releases/tag/horizonclient-v7.1.1) - 2021-06-25 + +* Added transaction and operation result codes to the horizonclient.Error string for easy glancing at string only errors for underlying cause. +* Fix bug in the transaction submission where requests with large transaction payloads fail with an HTTP 414 URI Too Long error ([#3643](https://github.com/stellar/go/pull/3643)). +* Fix a data race in `Client.fixHorizonURL`([#3690](https://github.com/stellar/go/pull/3690)). +* Fix bug which occurs when parsing operations involving muxed accounts ([#3722](https://github.com/stellar/go/pull/3722)). + +## [v7.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v7.0.0) - 2021-05-15 + +None + +## [v6.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v6.0.0) - 2021-02-22 + +None + +## [v5.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v5.0.0) - 2020-11-12 + +None + +## [v4.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.2.0) - 2020-11-11 + +None + +## [v4.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.1.0) - 2020-10-16 + +None + +## [v4.0.1](https://github.com/stellar/go/releases/tag/horizonclient-v4.0.1) - 2020-10-02 + +None + +## [v4.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.0.0) - 2020-09-29 + +Added new client methods and effects supporting [Protocol 14](https://github.com/stellar/go/issues/3035). + +* New client methods + * `ClaimableBalances(req ClaimableBalanceRequest)` - returns details about available claimable balances, possibly filtered to a specific sponsor or other parameters. + * `ClaimableBalance(balanceID string)` - returns details about a *specific*, unique claimable balance. +* New effects: + * `ClaimableBalance{Created,Updated,Removed}` + * `ClaimabeBalanceSponsorship{Created,Updated,Removed}` + * `AccountSponsorship{Created,Updated,Removed}` + * `TrustlineSponsorship{Created,Updated,Removed}` + * `Data{Created,Updated,Removed}` + * `DataSponsorship{Created,Updated,Removed}` + * `SignerSponsorship{Created,Updated,Removed}` +* Removed JSON variant of `GET /metrics`, both in the server and client code. It's using Prometheus format by default now. +* Added `NextAccountsPage`. +* Fixed `Fund` function that consistently errored. +* Added support for Go 1.15. + +### Breaking changes + +* Dropped support for Go 1.13. + +## [v3.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v3.0.0) - 2020-04-28 + +### Breaking changes + +- The type for the following fields in the `Transaction` struct have changed from `int32` to `int64`: + - `FeeCharged` + - `MaxFee` +- The `TransactionSuccess` Horizon response has been removed. Instead, all submit transaction functions return with a full Horizon `Transaction` response on success. +- The `GetSequenceNumber()` and `IncrementSequenceNumber()` functions on the `Account` struct now return `int64` values instead of `xdr.SequenceNumber` values. + +### Add + +- Add `IsNotFoundError` +- Add `client.SubmitFeeBumpTransaction` and `client.SubmitFeeBumpTransactionWithOptions` for submitting [fee bump transactions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md). Note that fee bump transactions will only be accepted by Stellar Core once Protocol 13 is enabled. + +### Updates + +- To support [CAP0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) Fine-Grained Control of Authorization: + - There is a new effect `TrustlineAuthorizedToMaintainLiabilities` which occurs when a trustline has been authorized to maintain liabilities. + - The `AuthorizeToMaintainLiabilities` boolean field was added to the `AllowTrust` operation struct. +- These fields were added to the `Transaction` struct to support fee bump transactions: + - `FeeAccount` (the account which paid the transaction fee) + - `FeeBumpTransaction` (only present in Protocol 13 fee bump transactions) + - `InnerTransaction` (only present in Protocol 13 fee bump transactions). +- `Transaction` has a new `MemoBytes` field which is populated when `MemoType` is equal to `text`. `MemoBytes` stores the base 64 encoding of the memo bytes set in the transaction envelope. +- Fixed a bug where HorizonTimeOut has misleading units of time by: + - Removed HorizonTimeOut (seconds) + - Added HorizonTimeout (nanoseconds) + +### Remove + +- Dropped support for Go 1.12. + +## [v2.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v2.2.0) - 2020-03-26 + +### Added + +- Add `client.SubmitTransactionWithOptions` with support for [SEP0029](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0029.md). + If any of the operations included in `client.SubmitTransactionWithOptions` is of type + `payment`, `pathPaymentStrictReceive`, `pathPaymentStrictSend`, or + `mergeAccount`, then the SDK will load the destination account from Horizon and check if + `config.memo_required` is set to `1` as defined in [SEP0029](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0029.md). + + For performance reasons, you may choose to skip the check by setting the `SkipMemoRequiredCheck` to `true`: + + ``` + client.SubmitTransactionWithOptions(tx, horizonclient.SubmitTxOpts{SkipMemoRequiredCheck: true}) + ``` + + Additionally, the check will be skipped automatically if the transaction includes a memo. + +## Changed + +- Change `client.SubmitTransaction` to always check if memo is required. + If you want to skip the check, call `client.SubmitTransactionWithOptions` instead. + +## [v2.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v2.1.0) - 2020-02-24 + +### Added + +- Add `client.StrictReceivePaths` and `client.StrictSendPaths` ([#2237](https://github.com/stellar/go/pull/2237)). + +`client.StrictReceivePaths`: + +```go + client := horizonclient.DefaultPublicNetClient + // Find paths for XLM->NGN + pr := horizonclient.PathsRequest{ + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + DestinationAmount: "100", + DestinationAssetCode: "NGN", + DestinationAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + DestinationAssetType: horizonclient.AssetType4, + SourceAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + paths, err := client.StrictReceivePaths(pr) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(paths) +``` + +`client.StrictSendPaths`: + +```go + client := horizonclient.DefaultPublicNetClient + // Find paths for USD->EUR + pr := horizonclient.StrictSendPathsRequest{ + SourceAmount: "20", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", + SourceAssetType: horizonclient.AssetType4, + DestinationAssets: "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S", + } + paths, err := client.StrictSendPaths(pr) +``` + +- Add `client.OfferDetails` ([#2303](https://github.com/stellar/go/pull/2303)). + +```go + client := horizonclient.DefaultPublicNetClient + offer, err := client.OfferDetails("2") + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offer) +``` + +- Add support to `client.Offers` for the filters: `Seller`, `Selling` and `Buying` ([#2230](https://github.com/stellar/go/pull/2230)). +```go + offerRequest = horizonclient.OfferRequest{ + Seller: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Selling: "COP:GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Buying: "EUR:GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Order: horizonclient.OrderDesc, + } + offers, err = client.Offers(offerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offers) +``` +- Add `client.Accounts` ([#2229](https://github.com/stellar/go/pull/2229)). + +This feature allows account retrieval filtering by signer or by a trustline to an asset. + +```go + client := horizonclient.DefaultPublicNetClient + accountID := "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU" + accountsRequest := horizonclient.AccountsRequest{Signer: accountID} + account, err := client.Accounts(accountsRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(account) +``` + +- Add `IsNotFoundError` ([#2197](https://github.com/stellar/go/pull/2197)). + +### Deprecated + +- Make `hProtocol.FeeStats` backwards compatible with Horizon `0.24.1` and `1.0` deprecating usage of `*_accepted_fee` ([#2290](https://github.com/stellar/go/pull/2290)). + +All the `_accepted_fee` fields were removed in Horizon 1.0, however we extended this version of the SDK to backfill the `FeeStat` struct using data from `MaxFee`. This is a temporary workaround and it will be removed in horizonclient 3.0. Please start using data from `FeeStat.MaxFee` instead. + + +## [v2.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v2.0.0) - 2020-01-13 + +- Add custom `UnmarshalJSON()` implementations to Horizon protocol structs so `int64` fields can be parsed as JSON numbers or JSON strings +- Remove deprecated `fee_paid field` from Transaction response +- Dropped support for Go 1.10, 1.11. + +## [v1.4.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.4.0) - 2019-08-09 + +- Add support for querying operation endpoint with `join` parameter [#1521](https://github.com/stellar/go/issues/1521). +- Add support for querying previous and next trade aggregations with `Client.NextTradeAggregationsPage` and `Client.PrevTradeAggregationsPage` methods. + + +## [v1.3.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.3.0) - 2019-07-08 + +- Transaction information returned by methods now contain new fields: `FeeCharged` and `MaxFee`. `FeePaid` is deprecated and will be removed in later versions. +- Improved unit test for `Client.FetchTimebounds` method. +- Added `Client.HomeDomainForAccount` helper method for retrieving the home domain of an account. + +## [v1.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.2.0) - 2019-05-16 + +- Added support for returning the previous and next set of pages for a horizon response; issue [#985](https://github.com/stellar/go/issues/985). +- Fixed bug reported in [#1254](https://github.com/stellar/go/issues/1254) that causes a panic when using horizonclient in goroutines. + + +## [v1.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.1.0) - 2019-05-02 + +### Added + +- `Client.Root()` method for querying the root endpoint of a horizon server. +- Support for returning concrete effect types[#1217](https://github.com/stellar/go/pull/1217) +- Fix when no HTTP client is provided + +### Changes + +- `Client.Fund()` now returns `TransactionSuccess` instead of a http response pointer. + +- Querying the effects endpoint now supports returning the concrete effect type for each effect. This is also supported in streaming mode. See the [docs](https://godoc.org/github.com/stellar/go/clients/horizonclient#Client.Effects) for examples. + +## [v1.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.0) - 2019-04-26 + + * Initial release diff --git a/clients/horizonclient/README.md b/clients/horizonclient/README.md new file mode 100644 index 0000000000..9435607e71 --- /dev/null +++ b/clients/horizonclient/README.md @@ -0,0 +1,54 @@ +# horizonclient + + +`horizonclient` is a [Stellar Go SDK](https://developers.stellar.org/api/) package that provides client access to a horizon server. It supports all endpoints exposed by the [horizon API](https://developers.stellar.org/api/introduction/). + +This project is maintained by the Stellar Development Foundation. + +## Getting Started +This library is aimed at developers building Go applications that interact with the [Stellar network](https://www.stellar.org/). It allows users to query the network and submit transactions to the network. The recommended transaction builder for Go programmers is [txnbuild](https://github.com/stellar/go/tree/master/txnbuild). Together, these two libraries provide a complete Stellar SDK. + +* The [horizonclient API reference](https://godoc.org/github.com/stellar/go/clients/horizonclient). +* The [txnbuild API reference](https://godoc.org/github.com/stellar/go/txnbuild). + +### Prerequisites +* Go (this repository is officially supported on the last two releases of Go) +* [Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies + +### Installing +* `go get github.com/stellar/go/clients/horizonclient` + +### Usage + +``` golang + ... + import hClient "github.com/stellar/go/clients/horizonclient" + ... + + // Use the default pubnet client + client := hClient.DefaultPublicNetClient + + // Create an account request + accountRequest := hClient.AccountRequest{AccountID: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + + // Load the account detail from the network + account, err := client.AccountDetail(accountRequest) + if err != nil { + fmt.Println(err) + return + } + // Account contains information about the stellar account + fmt.Print(account) +``` +For more examples, refer to the [documentation](https://godoc.org/github.com/stellar/go/clients/horizonclient). + +## Running the tests +Run the unit tests from the package directory: `go test` + +## Contributing +Please read [Code of Conduct](https://github.com/stellar/.github/blob/master/CODE_OF_CONDUCT.md) to understand this project's communication rules. + +To submit improvements and fixes to this library, please see [CONTRIBUTING](../CONTRIBUTING.md). + +## License +This project is licensed under the Apache License - see the [LICENSE](../../LICENSE-APACHE.txt) file for details. diff --git a/clients/horizonclient/account_request.go b/clients/horizonclient/account_request.go new file mode 100644 index 0000000000..91c85e024a --- /dev/null +++ b/clients/horizonclient/account_request.go @@ -0,0 +1,59 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the AccountRequest struct. +// If only AccountID is present, then the endpoint for account details is returned. +// If both AccounId and DataKey are present, then the endpoint for getting account data is returned +func (ar AccountRequest) BuildURL() (endpoint string, err error) { + + nParams := countParams(ar.DataKey, ar.AccountID) + + if nParams >= 1 && ar.AccountID == "" { + err = errors.New("invalid request: too few parameters") + } + + if nParams <= 0 { + err = errors.New("invalid request: no parameters") + } + + if err != nil { + return endpoint, err + } + + if ar.DataKey != "" && ar.AccountID != "" { + endpoint = fmt.Sprintf( + "accounts/%s/data/%s", + ar.AccountID, + ar.DataKey, + ) + } else if ar.AccountID != "" { + endpoint = fmt.Sprintf( + "accounts/%s", + ar.AccountID, + ) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the account endpoint +func (ar AccountRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := ar.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/account_request_test.go b/clients/horizonclient/account_request_test.go new file mode 100644 index 0000000000..e90d026e2c --- /dev/null +++ b/clients/horizonclient/account_request_test.go @@ -0,0 +1,42 @@ +package horizonclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccountRequestBuildUrl(t *testing.T) { + ar := AccountRequest{} + _, err := ar.BuildURL() + + // error case: No parameters + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: no parameters") + } + + ar.DataKey = "test" + _, err = ar.BuildURL() + + // error case: few parameters for building account data endpoint + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too few parameters") + } + + ar.DataKey = "" + ar.AccountID = "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU" + endpoint, err := ar.BuildURL() + + // It should return valid account details endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint) + + ar.DataKey = "test" + ar.AccountID = "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU" + endpoint, err = ar.BuildURL() + + // It should return valid account data endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/data/test", endpoint) +} diff --git a/clients/horizonclient/accounts_request.go b/clients/horizonclient/accounts_request.go new file mode 100644 index 0000000000..d48f986850 --- /dev/null +++ b/clients/horizonclient/accounts_request.go @@ -0,0 +1,73 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the AccountsRequest struct. +// Either "Signer" or "Asset" fields should be set when retrieving Accounts. +// At the moment, you can't use both filters at the same time. +func (r AccountsRequest) BuildURL() (endpoint string, err error) { + + nParams := countParams(r.Signer, r.Asset, r.Sponsor, r.LiquidityPool) + + if nParams <= 0 { + err = errors.New("invalid request: no parameters - Signer, Asset, Sponsor, or LiquidityPool must be provided") + } + + if nParams > 1 { + err = errors.New("invalid request: too many parameters - Multiple filters provided, provide a single filter") + } + + if err != nil { + return endpoint, err + } + query := url.Values{} + switch { + case len(r.Signer) > 0: + query.Add("signer", r.Signer) + + case len(r.Asset) > 0: + query.Add("asset", r.Asset) + + case len(r.Sponsor) > 0: + query.Add("sponsor", r.Sponsor) + + case len(r.LiquidityPool) > 0: + query.Add("liquidity_pool", r.LiquidityPool) + } + + endpoint = fmt.Sprintf( + "accounts?%s", + query.Encode(), + ) + + if pageParams := addQueryParams(cursor(r.Cursor), limit(r.Limit), r.Order); len(pageParams) > 0 { + endpoint = fmt.Sprintf( + "%s&%s", + endpoint, + pageParams, + ) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the accounts endpoint +func (r AccountsRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := r.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/accounts_request_test.go b/clients/horizonclient/accounts_request_test.go new file mode 100644 index 0000000000..04323e5f0b --- /dev/null +++ b/clients/horizonclient/accounts_request_test.go @@ -0,0 +1,45 @@ +package horizonclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccountsRequestBuildUrl(t *testing.T) { + // error case: No parameters + _, err := AccountsRequest{}.BuildURL() + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: no parameters") + } + + // error case: too many parameters + _, err = AccountsRequest{ + Signer: "signer", + Asset: "asset", + }.BuildURL() + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too many parameters") + } + + // signer + endpoint, err := AccountsRequest{Signer: "abcdef"}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "accounts?signer=abcdef", endpoint) + + // asset + endpoint, err = AccountsRequest{Asset: "abcdef"}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "accounts?asset=abcdef", endpoint) + + // sponsor + endpoint, err = AccountsRequest{Sponsor: "abcdef"}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "accounts?sponsor=abcdef", endpoint) + + // liquidity_pool + endpoint, err = AccountsRequest{LiquidityPool: "abcdef"}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "accounts?liquidity_pool=abcdef", endpoint) +} diff --git a/clients/horizonclient/asset_request.go b/clients/horizonclient/asset_request.go new file mode 100644 index 0000000000..f0a8f3c522 --- /dev/null +++ b/clients/horizonclient/asset_request.go @@ -0,0 +1,40 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the AssetRequest struct. +// If no data is set, it defaults to the build the URL for all assets +func (ar AssetRequest) BuildURL() (endpoint string, err error) { + endpoint = "assets" + queryParams := addQueryParams(assetCode(ar.ForAssetCode), assetIssuer(ar.ForAssetIssuer), cursor(ar.Cursor), limit(ar.Limit), ar.Order) + if queryParams != "" { + endpoint = fmt.Sprintf( + "%s?%s", + endpoint, + queryParams, + ) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the assets endpoint +func (ar AssetRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := ar.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/asset_request_test.go b/clients/horizonclient/asset_request_test.go new file mode 100644 index 0000000000..b8d32cdb3f --- /dev/null +++ b/clients/horizonclient/asset_request_test.go @@ -0,0 +1,31 @@ +package horizonclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAssetRequestBuildUrl(t *testing.T) { + er := AssetRequest{} + endpoint, err := er.BuildURL() + + // It should return valid all assets endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "assets", endpoint) + + er = AssetRequest{ForAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err = er.BuildURL() + + // It should return valid assets endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "assets?asset_issuer=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint) + + er = AssetRequest{ForAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForAssetCode: "ABC", Order: OrderDesc} + endpoint, err = er.BuildURL() + + // It should return valid assets endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "assets?asset_code=ABC&asset_issuer=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&order=desc", endpoint) +} diff --git a/clients/horizonclient/claimable_balance_request.go b/clients/horizonclient/claimable_balance_request.go new file mode 100644 index 0000000000..eb80c13249 --- /dev/null +++ b/clients/horizonclient/claimable_balance_request.go @@ -0,0 +1,53 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// Creates the URL to either request a specific claimable balance (CB) by ID, or +// request all CBs, possibly filtered by asset, claimant, or sponsor. +func (cbr ClaimableBalanceRequest) BuildURL() (endpoint string, err error) { + endpoint = "claimable_balances" + + // Only one filter parameter is allowed, and you can't mix an ID query and + // filters. + nParams := countParams(cbr.Asset, cbr.Claimant, cbr.Sponsor, cbr.ID) + if nParams > 1 { + return endpoint, errors.New("invalid request: too many parameters") + } + + if cbr.ID != "" { + endpoint = fmt.Sprintf("%s/%s", endpoint, cbr.ID) + } else { + queryParams := addQueryParams( + map[string]string{ + "claimant": cbr.Claimant, + "sponsor": cbr.Sponsor, + "asset": cbr.Asset, + }, + ) + + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the claimable balances endpoint +func (cbr ClaimableBalanceRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := cbr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/client.go b/clients/horizonclient/client.go new file mode 100644 index 0000000000..105c37fc7f --- /dev/null +++ b/clients/horizonclient/client.go @@ -0,0 +1,845 @@ +package horizonclient + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + + "github.com/manucorporat/sse" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/support/errors" +) + +// sendRequest builds the URL for the given horizon request and sends the url to a horizon server +func (c *Client) sendRequest(hr HorizonRequest, resp interface{}) (err error) { + req, err := hr.HTTPRequest(c.fixHorizonURL()) + if err != nil { + return err + } + + return c.sendHTTPRequest(req, resp) +} + +// checkMemoRequired implements a memo required check as defined in +// https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0029.md +func (c *Client) checkMemoRequired(transaction *txnbuild.Transaction) error { + destinations := map[string]bool{} + + for i, op := range transaction.Operations() { + var destination string + + if err := op.Validate(); err != nil { + return err + } + + switch p := op.(type) { + case *txnbuild.Payment: + destination = p.Destination + case *txnbuild.PathPaymentStrictReceive: + destination = p.Destination + case *txnbuild.PathPaymentStrictSend: + destination = p.Destination + case *txnbuild.AccountMerge: + destination = p.Destination + default: + continue + } + + muxed, err := xdr.AddressToMuxedAccount(destination) + if err != nil { + return errors.Wrapf(err, "destination %v is not a valid address", destination) + } + // Skip destination addresses with a memo id because the address has a memo + // encoded within it + destinationHasMemoID := muxed.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 + + if destinations[destination] || destinationHasMemoID { + continue + } + destinations[destination] = true + + request := AccountRequest{ + AccountID: destination, + DataKey: "config.memo_required", + } + + data, err := c.AccountData(request) + if err != nil { + horizonError := GetError(err) + + if horizonError == nil || horizonError.Response.StatusCode != 404 { + return err + } + + continue + } + + if data.Value == accountRequiresMemo { + return errors.Wrap( + ErrAccountRequiresMemo, + fmt.Sprintf("operation[%d]", i), + ) + } + } + + return nil +} + +// sendGetRequest sends a HTTP GET request to a horizon server. +// It can be used for requests that do not implement the HorizonRequest interface. +func (c *Client) sendGetRequest(requestURL string, a interface{}) error { + req, err := http.NewRequest("GET", requestURL, nil) + if err != nil { + return errors.Wrap(err, "error creating HTTP request") + } + return c.sendHTTPRequest(req, a) +} + +func (c *Client) sendHTTPRequest(req *http.Request, a interface{}) error { + c.setClientAppHeaders(req) + c.setDefaultClient() + + if c.horizonTimeout == 0 { + c.horizonTimeout = HorizonTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), c.horizonTimeout) + defer cancel() + + if resp, err := c.HTTP.Do(req.WithContext(ctx)); err != nil { + return err + } else { + return decodeResponse(resp, &a, c) + } +} + +// stream handles connections to endpoints that support streaming on a horizon server +func (c *Client) stream( + ctx context.Context, + streamURL string, + handler func(data []byte) error, +) error { + su, err := url.Parse(streamURL) + if err != nil { + return errors.Wrap(err, "error parsing stream url") + } + + query := su.Query() + if query.Get("cursor") == "" { + query.Set("cursor", "now") + } + + for { + // updates the url with new cursor + su.RawQuery = query.Encode() + req, err := http.NewRequest("GET", su.String(), nil) + if err != nil { + return errors.Wrap(err, "error creating HTTP request") + } + req.Header.Set("Accept", "text/event-stream") + c.setDefaultClient() + c.setClientAppHeaders(req) + + // We can use c.HTTP here because we set Timeout per request not on the client. See sendRequest() + resp, err := c.HTTP.Do(req) + if err != nil { + return errors.Wrap(err, "error sending HTTP request") + } + + // Expected statusCode are 200-299 + if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { + return fmt.Errorf("got bad HTTP status code %d", resp.StatusCode) + } + defer resp.Body.Close() + + reader := bufio.NewReader(resp.Body) + + // Read events one by one. Break this loop when there is no more data to be + // read from resp.Body (io.EOF). + Events: + for { + // Read until empty line = event delimiter. The perfect solution would be to read + // as many bytes as possible and forward them to sse.Decode. However this + // requires much more complicated code. + // We could also write our own `sse` package that works fine with streams directly + // (github.com/manucorporat/sse is just using io/ioutils.ReadAll). + var buffer bytes.Buffer + nonEmptylinesRead := 0 + for { + // Check if ctx is not cancelled + select { + case <-ctx.Done(): + return nil + default: + // Continue + } + + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + // We catch EOF errors to handle two possible situations: + // - The last line before closing the stream was not empty. This should never + // happen in Horizon as it always sends an empty line after each event. + // - The stream was closed by the server/proxy because the connection was idle. + // + // In the former case, that (again) should never happen in Horizon, we need to + // check if there are any events we need to decode. We do this in the `if` + // statement below just in case if Horizon behaviour changes in a future. + // + // From spec: + // > Once the end of the file is reached, the user agent must dispatch the + // > event one final time, as defined below. + if nonEmptylinesRead == 0 { + break Events + } + } else { + return errors.Wrap(err, "error reading line") + } + } + buffer.WriteString(line) + + if strings.TrimRight(line, "\n\r") == "" { + break + } + + nonEmptylinesRead++ + } + + events, err := sse.Decode(strings.NewReader(buffer.String())) + if err != nil { + return errors.Wrap(err, "error decoding event") + } + + // Right now len(events) should always be 1. This loop will be helpful after writing + // new SSE decoder that can handle io.Reader without using ioutils.ReadAll(). + for _, event := range events { + if event.Event != "message" { + continue + } + + // Update cursor with event ID + if event.Id != "" { + query.Set("cursor", event.Id) + } + + switch data := event.Data.(type) { + case string: + err = handler([]byte(data)) + err = errors.Wrap(err, "handler error") + case []byte: + err = handler(data) + err = errors.Wrap(err, "handler error") + default: + err = errors.New("invalid event.Data type") + } + if err != nil { + return err + } + } + } + } +} + +func (c *Client) setClientAppHeaders(req *http.Request) { + req.Header.Set("X-Client-Name", "go-stellar-sdk") + req.Header.Set("X-Client-Version", c.Version()) + req.Header.Set("X-App-Name", c.AppName) + req.Header.Set("X-App-Version", c.AppVersion) +} + +// setDefaultClient sets the default HTTP client when none is provided. +func (c *Client) setDefaultClient() { + if c.HTTP == nil { + c.HTTP = http.DefaultClient + } +} + +// fixHorizonURL strips all slashes(/) at the end of HorizonURL if any, then adds a single slash +func (c *Client) fixHorizonURL() string { + c.fixHorizonURLOnce.Do(func() { + c.HorizonURL = strings.TrimRight(c.HorizonURL, "/") + "/" + }) + return c.HorizonURL +} + +// SetHorizonTimeout allows users to set the timeout before a horizon request is cancelled. +// The timeout is specified as a time.Duration which is in nanoseconds. +func (c *Client) SetHorizonTimeout(t time.Duration) *Client { + c.horizonTimeout = t + return c +} + +// HorizonTimeout returns the current timeout for a horizon client +func (c *Client) HorizonTimeout() time.Duration { + return c.horizonTimeout +} + +// Accounts returns accounts who have a given signer or +// have a trustline to an asset. +// See https://developers.stellar.org/api/resources/accounts/ +func (c *Client) Accounts(request AccountsRequest) (accounts hProtocol.AccountsPage, err error) { + err = c.sendRequest(request, &accounts) + return +} + +// AccountDetail returns information for a single account. +// See https://developers.stellar.org/api/resources/accounts/single/ +func (c *Client) AccountDetail(request AccountRequest) (account hProtocol.Account, err error) { + if request.AccountID == "" { + err = errors.New("no account ID provided") + } + + if err != nil { + return + } + + err = c.sendRequest(request, &account) + return +} + +// AccountData returns a single data associated with a given account +// See https://developers.stellar.org/api/resources/accounts/data/ +func (c *Client) AccountData(request AccountRequest) (accountData hProtocol.AccountData, err error) { + if request.AccountID == "" || request.DataKey == "" { + err = errors.New("too few parameters") + } + + if err != nil { + return + } + + err = c.sendRequest(request, &accountData) + return +} + +// Effects returns effects (https://developers.stellar.org/api/resources/effects/) +// It can be used to return effects for an account, a ledger, an operation, a transaction and all effects on the network. +func (c *Client) Effects(request EffectRequest) (effects effects.EffectsPage, err error) { + err = c.sendRequest(request, &effects) + return +} + +// Assets returns asset information. +// See https://developers.stellar.org/api/resources/assets/list/ +func (c *Client) Assets(request AssetRequest) (assets hProtocol.AssetsPage, err error) { + err = c.sendRequest(request, &assets) + return +} + +// Ledgers returns information about all ledgers. +// See https://developers.stellar.org/api/resources/ledgers/list/ +func (c *Client) Ledgers(request LedgerRequest) (ledgers hProtocol.LedgersPage, err error) { + err = c.sendRequest(request, &ledgers) + return +} + +// LedgerDetail returns information about a particular ledger for a given sequence number +// See https://developers.stellar.org/api/resources/ledgers/single/ +func (c *Client) LedgerDetail(sequence uint32) (ledger hProtocol.Ledger, err error) { + if sequence == 0 { + err = errors.New("invalid sequence number provided") + } + + if err != nil { + return + } + + request := LedgerRequest{forSequence: sequence} + err = c.sendRequest(request, &ledger) + return +} + +// FeeStats returns information about fees in the last 5 ledgers. +// See https://developers.stellar.org/api/aggregations/fee-stats/ +func (c *Client) FeeStats() (feestats hProtocol.FeeStats, err error) { + request := feeStatsRequest{endpoint: "fee_stats"} + err = c.sendRequest(request, &feestats) + return +} + +// Offers returns information about offers made on the SDEX. +// See https://developers.stellar.org/api/resources/offers/list/ +func (c *Client) Offers(request OfferRequest) (offers hProtocol.OffersPage, err error) { + err = c.sendRequest(request, &offers) + return +} + +// OfferDetails returns information for a single offer. +// See https://developers.stellar.org/api/resources/offers/single/ +func (c *Client) OfferDetails(offerID string) (offer hProtocol.Offer, err error) { + if len(offerID) == 0 { + err = errors.New("no offer ID provided") + return + } + + if _, err = strconv.ParseInt(offerID, 10, 64); err != nil { + err = errors.New("invalid offer ID provided") + return + } + + err = c.sendRequest(OfferRequest{OfferID: offerID}, &offer) + return +} + +// Operations returns stellar operations (https://developers.stellar.org/api/resources/operations/list/) +// It can be used to return operations for an account, a ledger, a transaction and all operations on the network. +func (c *Client) Operations(request OperationRequest) (ops operations.OperationsPage, err error) { + err = c.sendRequest(request.SetOperationsEndpoint(), &ops) + return +} + +// OperationDetail returns a single stellar operation for a given operation id +// See https://developers.stellar.org/api/resources/operations/single/ +func (c *Client) OperationDetail(id string) (ops operations.Operation, err error) { + if id == "" { + return ops, errors.New("invalid operation id provided") + } + + request := OperationRequest{forOperationID: id, endpoint: "operations"} + + var record interface{} + + err = c.sendRequest(request, &record) + if err != nil { + return ops, errors.Wrap(err, "sending request to horizon") + } + + var baseRecord operations.Base + dataString, err := json.Marshal(record) + if err != nil { + return ops, errors.Wrap(err, "marshaling json") + } + if err = json.Unmarshal(dataString, &baseRecord); err != nil { + return ops, errors.Wrap(err, "unmarshaling json") + } + + ops, err = operations.UnmarshalOperation(baseRecord.GetTypeI(), dataString) + if err != nil { + return ops, errors.Wrap(err, "unmarshaling to the correct operation type") + } + return ops, nil +} + +// SubmitTransactionXDR submits a transaction represented as a base64 XDR string to the network. err can be either error object or horizon.Error object. +// See https://developers.stellar.org/api/resources/transactions/post/ +func (c *Client) SubmitTransactionXDR(transactionXdr string) (tx hProtocol.Transaction, + err error) { + request := submitRequest{endpoint: "transactions", transactionXdr: transactionXdr} + err = c.sendRequest(request, &tx) + return +} + +// SubmitFeeBumpTransaction submits a fee bump transaction to the network. err can be either an +// error object or a horizon.Error object. +// +// This function will always check if the destination account requires a memo in the transaction as +// defined in SEP0029: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0029.md +// +// If you want to skip this check, use SubmitTransactionWithOptions. +// +// See https://developers.stellar.org/api/resources/transactions/post/ +func (c *Client) SubmitFeeBumpTransaction(transaction *txnbuild.FeeBumpTransaction) (tx hProtocol.Transaction, err error) { + return c.SubmitFeeBumpTransactionWithOptions(transaction, SubmitTxOpts{}) +} + +// SubmitFeeBumpTransactionWithOptions submits a fee bump transaction to the network, allowing +// you to pass SubmitTxOpts. err can be either an error object or a horizon.Error object. +// +// See https://developers.stellar.org/api/resources/transactions/post/ +func (c *Client) SubmitFeeBumpTransactionWithOptions(transaction *txnbuild.FeeBumpTransaction, opts SubmitTxOpts) (tx hProtocol.Transaction, err error) { + // only check if memo is required if skip is false and the inner transaction + // doesn't have a memo. + if inner := transaction.InnerTransaction(); !opts.SkipMemoRequiredCheck && inner.Memo() == nil { + err = c.checkMemoRequired(inner) + if err != nil { + return + } + } + + txeBase64, err := transaction.Base64() + if err != nil { + err = errors.Wrap(err, "Unable to convert transaction object to base64 string") + return + } + + return c.SubmitTransactionXDR(txeBase64) +} + +// SubmitTransaction submits a transaction to the network. err can be either an +// error object or a horizon.Error object. +// +// This function will always check if the destination account requires a memo in the transaction as +// defined in SEP0029: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0029.md +// +// If you want to skip this check, use SubmitTransactionWithOptions. +// +// See https://developers.stellar.org/api/resources/transactions/post/ +func (c *Client) SubmitTransaction(transaction *txnbuild.Transaction) (tx hProtocol.Transaction, err error) { + return c.SubmitTransactionWithOptions(transaction, SubmitTxOpts{}) +} + +// SubmitTransactionWithOptions submits a transaction to the network, allowing +// you to pass SubmitTxOpts. err can be either an error object or a horizon.Error object. +// +// See https://developers.stellar.org/api/resources/transactions/post/ +func (c *Client) SubmitTransactionWithOptions(transaction *txnbuild.Transaction, opts SubmitTxOpts) (tx hProtocol.Transaction, err error) { + // only check if memo is required if skip is false and the transaction + // doesn't have a memo. + if !opts.SkipMemoRequiredCheck && transaction.Memo() == nil { + err = c.checkMemoRequired(transaction) + if err != nil { + return + } + } + + txeBase64, err := transaction.Base64() + if err != nil { + err = errors.Wrap(err, "Unable to convert transaction object to base64 string") + return + } + + return c.SubmitTransactionXDR(txeBase64) +} + +// Transactions returns stellar transactions (https://developers.stellar.org/api/resources/transactions/list/) +// It can be used to return transactions for an account, a ledger,and all transactions on the network. +func (c *Client) Transactions(request TransactionRequest) (txs hProtocol.TransactionsPage, err error) { + err = c.sendRequest(request, &txs) + return +} + +// TransactionDetail returns information about a particular transaction for a given transaction hash +// See https://developers.stellar.org/api/resources/transactions/single/ +func (c *Client) TransactionDetail(txHash string) (tx hProtocol.Transaction, err error) { + if txHash == "" { + return tx, errors.New("no transaction hash provided") + } + + request := TransactionRequest{forTransactionHash: txHash} + err = c.sendRequest(request, &tx) + return +} + +// OrderBook returns the orderbook for an asset pair (https://developers.stellar.org/api/aggregations/order-books/single/) +func (c *Client) OrderBook(request OrderBookRequest) (obs hProtocol.OrderBookSummary, err error) { + err = c.sendRequest(request, &obs) + return +} + +// Paths returns the available paths to make a strict receive path payment. See https://developers.stellar.org/api/aggregations/paths/strict-receive/ +// This function is an alias for `client.StrictReceivePaths` and will be deprecated, use `client.StrictReceivePaths` instead. +func (c *Client) Paths(request PathsRequest) (paths hProtocol.PathsPage, err error) { + paths, err = c.StrictReceivePaths(request) + return +} + +// StrictReceivePaths returns the available paths to make a strict receive path payment. See https://developers.stellar.org/api/aggregations/paths/strict-receive/ +func (c *Client) StrictReceivePaths(request PathsRequest) (paths hProtocol.PathsPage, err error) { + err = c.sendRequest(request, &paths) + return +} + +// StrictSendPaths returns the available paths to make a strict send path payment. See https://developers.stellar.org/api/aggregations/paths/strict-send/ +func (c *Client) StrictSendPaths(request StrictSendPathsRequest) (paths hProtocol.PathsPage, err error) { + err = c.sendRequest(request, &paths) + return +} + +// Payments returns stellar account_merge, create_account, path payment and payment operations. +// It can be used to return payments for an account, a ledger, a transaction and all payments on the network. +func (c *Client) Payments(request OperationRequest) (ops operations.OperationsPage, err error) { + err = c.sendRequest(request.SetPaymentsEndpoint(), &ops) + return +} + +// Trades returns stellar trades (https://developers.stellar.org/api/resources/trades/list/) +// It can be used to return trades for an account, an offer and all trades on the network. +func (c *Client) Trades(request TradeRequest) (tds hProtocol.TradesPage, err error) { + err = c.sendRequest(request, &tds) + return +} + +// Fund creates a new account funded from friendbot. It only works on test networks. See +// https://developers.stellar.org/docs/tutorials/create-account/ for more information. +func (c *Client) Fund(addr string) (tx hProtocol.Transaction, err error) { + friendbotURL := fmt.Sprintf("%sfriendbot?addr=%s", c.fixHorizonURL(), addr) + err = c.sendGetRequest(friendbotURL, &tx) + if IsNotFoundError(err) { + return tx, errors.Wrap(err, "funding is only available on test networks and may not be supported by "+c.fixHorizonURL()) + } + return +} + +// StreamTrades streams executed trades. It can be used to stream all trades, trades for an account and +// trades for an offer. Use context.WithCancel to stop streaming or context.Background() if you want +// to stream indefinitely. TradeHandler is a user-supplied function that is executed for each streamed trade received. +func (c *Client) StreamTrades(ctx context.Context, request TradeRequest, handler TradeHandler) (err error) { + err = request.StreamTrades(ctx, c, handler) + return +} + +// TradeAggregations returns stellar trade aggregations (https://developers.stellar.org/api/aggregations/trade-aggregations/list/) +func (c *Client) TradeAggregations(request TradeAggregationRequest) (tds hProtocol.TradeAggregationsPage, err error) { + err = c.sendRequest(request, &tds) + return +} + +// StreamTransactions streams processed transactions. It can be used to stream all transactions and +// transactions for an account. Use context.WithCancel to stop streaming or context.Background() +// if you want to stream indefinitely. TransactionHandler is a user-supplied function that is executed for each streamed transaction received. +func (c *Client) StreamTransactions(ctx context.Context, request TransactionRequest, handler TransactionHandler) error { + return request.StreamTransactions(ctx, c, handler) +} + +// StreamEffects streams horizon effects. It can be used to stream all effects or account specific effects. +// Use context.WithCancel to stop streaming or context.Background() if you want to stream indefinitely. +// EffectHandler is a user-supplied function that is executed for each streamed transaction received. +func (c *Client) StreamEffects(ctx context.Context, request EffectRequest, handler EffectHandler) error { + return request.StreamEffects(ctx, c, handler) +} + +// StreamOperations streams stellar operations. It can be used to stream all operations or operations +// for an account. Use context.WithCancel to stop streaming or context.Background() if you want to +// stream indefinitely. OperationHandler is a user-supplied function that is executed for each streamed +// operation received. +func (c *Client) StreamOperations(ctx context.Context, request OperationRequest, handler OperationHandler) error { + return request.SetOperationsEndpoint().StreamOperations(ctx, c, handler) +} + +// StreamPayments streams stellar payments. It can be used to stream all payments or payments +// for an account. Payments include create_account, payment, path_payment and account_merge operations. +// Use context.WithCancel to stop streaming or context.Background() if you want to +// stream indefinitely. OperationHandler is a user-supplied function that is executed for each streamed +// operation received. +func (c *Client) StreamPayments(ctx context.Context, request OperationRequest, handler OperationHandler) error { + return request.SetPaymentsEndpoint().StreamOperations(ctx, c, handler) +} + +// StreamOffers streams offers processed by the Stellar network for an account. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// OfferHandler is a user-supplied function that is executed for each streamed offer received. +func (c *Client) StreamOffers(ctx context.Context, request OfferRequest, handler OfferHandler) error { + return request.StreamOffers(ctx, c, handler) +} + +// StreamLedgers streams stellar ledgers. It can be used to stream all ledgers. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// LedgerHandler is a user-supplied function that is executed for each streamed ledger received. +func (c *Client) StreamLedgers(ctx context.Context, request LedgerRequest, handler LedgerHandler) error { + return request.StreamLedgers(ctx, c, handler) +} + +// StreamOrderBooks streams the orderbook for a given asset pair. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// OrderBookHandler is a user-supplied function that is executed for each streamed order received. +func (c *Client) StreamOrderBooks(ctx context.Context, request OrderBookRequest, handler OrderBookHandler) error { + return request.StreamOrderBooks(ctx, c, handler) +} + +// FetchTimebounds provides timebounds for N seconds from now using the server time of the horizon instance. +// It defaults to localtime when the server time is not available. +// Note that this will generate your timebounds when you init the transaction, not when you build or submit +// the transaction! So give yourself enough time to get the transaction built and signed before submitting. +func (c *Client) FetchTimebounds(seconds int64) (txnbuild.Timebounds, error) { + serverURL, err := url.Parse(c.HorizonURL) + if err != nil { + return txnbuild.Timebounds{}, errors.Wrap(err, "unable to parse horizon url") + } + currentTime := currentServerTime(serverURL.Hostname(), c.clock.Now().UTC().Unix()) + if currentTime != 0 { + return txnbuild.NewTimebounds(0, currentTime+seconds), nil + } + + // return a timebounds based on local time if no server time has been recorded + // to do: query an endpoint to get the most current time. Implement this after we add retry logic to client. + return txnbuild.NewTimeout(seconds), nil +} + +// Root loads the root endpoint of horizon +func (c *Client) Root() (root hProtocol.Root, err error) { + err = c.sendGetRequest(c.fixHorizonURL(), &root) + return +} + +// Version returns the current version. +func (c *Client) Version() string { + return version +} + +// NextAccountsPage returns the next page of accounts. +func (c *Client) NextAccountsPage(page hProtocol.AccountsPage) (accounts hProtocol.AccountsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &accounts) + return +} + +// NextAssetsPage returns the next page of assets. +func (c *Client) NextAssetsPage(page hProtocol.AssetsPage) (assets hProtocol.AssetsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &assets) + return +} + +// PrevAssetsPage returns the previous page of assets. +func (c *Client) PrevAssetsPage(page hProtocol.AssetsPage) (assets hProtocol.AssetsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &assets) + return +} + +// NextLedgersPage returns the next page of ledgers. +func (c *Client) NextLedgersPage(page hProtocol.LedgersPage) (ledgers hProtocol.LedgersPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &ledgers) + return +} + +// PrevLedgersPage returns the previous page of ledgers. +func (c *Client) PrevLedgersPage(page hProtocol.LedgersPage) (ledgers hProtocol.LedgersPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &ledgers) + return +} + +// NextEffectsPage returns the next page of effects. +func (c *Client) NextEffectsPage(page effects.EffectsPage) (efp effects.EffectsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &efp) + return +} + +// PrevEffectsPage returns the previous page of effects. +func (c *Client) PrevEffectsPage(page effects.EffectsPage) (efp effects.EffectsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &efp) + return +} + +// NextTransactionsPage returns the next page of transactions. +func (c *Client) NextTransactionsPage(page hProtocol.TransactionsPage) (transactions hProtocol.TransactionsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &transactions) + return +} + +// PrevTransactionsPage returns the previous page of transactions. +func (c *Client) PrevTransactionsPage(page hProtocol.TransactionsPage) (transactions hProtocol.TransactionsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &transactions) + return +} + +// NextOperationsPage returns the next page of operations. +func (c *Client) NextOperationsPage(page operations.OperationsPage) (operations operations.OperationsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &operations) + return +} + +// PrevOperationsPage returns the previous page of operations. +func (c *Client) PrevOperationsPage(page operations.OperationsPage) (operations operations.OperationsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &operations) + return +} + +// NextPaymentsPage returns the next page of payments. +func (c *Client) NextPaymentsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + return c.NextOperationsPage(page) +} + +// PrevPaymentsPage returns the previous page of payments. +func (c *Client) PrevPaymentsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + return c.PrevOperationsPage(page) +} + +// NextOffersPage returns the next page of offers. +func (c *Client) NextOffersPage(page hProtocol.OffersPage) (offers hProtocol.OffersPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &offers) + return +} + +// PrevOffersPage returns the previous page of offers. +func (c *Client) PrevOffersPage(page hProtocol.OffersPage) (offers hProtocol.OffersPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &offers) + return +} + +// NextTradesPage returns the next page of trades. +func (c *Client) NextTradesPage(page hProtocol.TradesPage) (trades hProtocol.TradesPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &trades) + return +} + +// PrevTradesPage returns the previous page of trades. +func (c *Client) PrevTradesPage(page hProtocol.TradesPage) (trades hProtocol.TradesPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &trades) + return +} + +// HomeDomainForAccount returns the home domain for a single account. +func (c *Client) HomeDomainForAccount(aid string) (string, error) { + if aid == "" { + return "", errors.New("no account ID provided") + } + + accountDetail, err := c.AccountDetail(AccountRequest{AccountID: aid}) + if err != nil { + return "", errors.Wrap(err, "get account detail failed") + } + + return accountDetail.HomeDomain, nil +} + +// NextTradeAggregationsPage returns the next page of trade aggregations from the current +// trade aggregations response. +func (c *Client) NextTradeAggregationsPage(page hProtocol.TradeAggregationsPage) (ta hProtocol.TradeAggregationsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &ta) + return +} + +// PrevTradeAggregationsPage returns the previous page of trade aggregations from the current +// trade aggregations response. +func (c *Client) PrevTradeAggregationsPage(page hProtocol.TradeAggregationsPage) (ta hProtocol.TradeAggregationsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &ta) + return +} + +// ClaimableBalances returns details about available claimable balances, +// possibly filtered to a specific sponsor or other parameters. +func (c *Client) ClaimableBalances(cbr ClaimableBalanceRequest) (cb hProtocol.ClaimableBalances, err error) { + err = c.sendRequest(cbr, &cb) + return +} + +// ClaimableBalance returns details about a *specific*, unique claimable balance. +func (c *Client) ClaimableBalance(id string) (cb hProtocol.ClaimableBalance, err error) { + cbr := ClaimableBalanceRequest{ID: id} + err = c.sendRequest(cbr, &cb) + return +} + +func (c *Client) LiquidityPoolDetail(request LiquidityPoolRequest) (lp hProtocol.LiquidityPool, err error) { + err = c.sendRequest(request, &lp) + return +} + +func (c *Client) LiquidityPools(request LiquidityPoolsRequest) (lp hProtocol.LiquidityPoolsPage, err error) { + err = c.sendRequest(request, &lp) + return +} + +func (c *Client) NextLiquidityPoolsPage(page hProtocol.LiquidityPoolsPage) (lp hProtocol.LiquidityPoolsPage, err error) { + err = c.sendGetRequest(page.Links.Next.Href, &lp) + return +} + +func (c *Client) PrevLiquidityPoolsPage(page hProtocol.LiquidityPoolsPage) (lp hProtocol.LiquidityPoolsPage, err error) { + err = c.sendGetRequest(page.Links.Prev.Href, &lp) + return +} + +// ensure that the horizon client implements ClientInterface +var _ ClientInterface = &Client{} diff --git a/clients/horizonclient/client_fund_test.go b/clients/horizonclient/client_fund_test.go new file mode 100644 index 0000000000..528ba26f17 --- /dev/null +++ b/clients/horizonclient/client_fund_test.go @@ -0,0 +1,61 @@ +package horizonclient + +import ( + "testing" + + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" +) + +func TestFund(t *testing.T) { + friendbotFundResponse := `{ + "_links": { + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/94e42f65d3ff5f30669b6109c2ce3e82c0e592c52004e3b41bb30e24df33954e" + } + }, + "hash": "94e42f65d3ff5f30669b6109c2ce3e82c0e592c52004e3b41bb30e24df33954e", + "ledger": 8269, + "envelope_xdr": "AAAAAgAAAAD2Leuk4afNVCYqxbN03yPH6kgKe/o2yiOd3CQNkpkpQwABhqAAAAFSAAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAAAAAAABW9+rbvt6YXwwXyFszptQFlfzzFMrWObLiJmBhOzNblAAAABdIdugAAAAAAAAAAAKSmSlDAAAAQHWNbXOoVQqH0YJRr8LAtpalV+NoXb8Tv/ETkPNv2NignhN8seUSde8m2HLNLHOo+5W34BXfxfBmDXgZn8yHkwSGVuCcAAAAQDQLh1UAxYZ27sIxyYgyYFo8IUbTiANWadUJUR7K0q1eY6Q5J/BFfNlf6UqLqJ5zd8uI3TXCaBNJDkiQc1ZLEg4=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAgAAAAIAAAADAAAgTQAAAAAAAAAA9i3rpOGnzVQmKsWzdN8jx+pICnv6NsojndwkDZKZKUMAAAAAPDNbbAAAAVIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAgTQAAAAAAAAAA9i3rpOGnzVQmKsWzdN8jx+pICnv6NsojndwkDZKZKUMAAAAAPDNbbAAAAVIAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAACBMAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFg09HQY/uMAAAA2wAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAACBNAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFg07qH7ROMAAAA2wAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAACBNAAAAAAAAAABW9+rbvt6YXwwXyFszptQFlfzzFMrWObLiJmBhOzNblAAAABdIdugAAAAgTQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAA=" +}` + + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + hmock.On( + "GET", + "https://localhost/friendbot?addr=GBLPP2W3X3PJQXYMC7EFWM5G2QCZL7HTCTFNMONS4ITGAYJ3GNNZIQ4V", + ).ReturnString(200, friendbotFundResponse) + + tx, err := client.Fund("GBLPP2W3X3PJQXYMC7EFWM5G2QCZL7HTCTFNMONS4ITGAYJ3GNNZIQ4V") + assert.NoError(t, err) + assert.Equal(t, int32(8269), tx.Ledger) +} + +func TestFund_notSupported(t *testing.T) { + friendbotFundResponse := `{ + "type": "https://stellar.org/horizon-errors/not_found", + "title": "Resource Missing", + "status": 404, + "detail": "The resource at the url requested was not found. This usually occurs for one of two reasons: The url requested is not valid, or no data in our database could be found with the parameters provided." +}` + + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + hmock.On( + "GET", + "https://localhost/friendbot?addr=GBLPP2W3X3PJQXYMC7EFWM5G2QCZL7HTCTFNMONS4ITGAYJ3GNNZIQ4V", + ).ReturnString(404, friendbotFundResponse) + + _, err := client.Fund("GBLPP2W3X3PJQXYMC7EFWM5G2QCZL7HTCTFNMONS4ITGAYJ3GNNZIQ4V") + assert.EqualError(t, err, "funding is only available on test networks and may not be supported by https://localhost/: horizon error: \"Resource Missing\" - check horizon.Error.Problem for more information") +} diff --git a/clients/horizonclient/effect_request.go b/clients/horizonclient/effect_request.go new file mode 100644 index 0000000000..8d20f14e99 --- /dev/null +++ b/clients/horizonclient/effect_request.go @@ -0,0 +1,97 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/support/errors" +) + +// EffectHandler is a function that is called when a new effect is received +type EffectHandler func(effects.Effect) + +// BuildURL creates the endpoint to be queried based on the data in the EffectRequest struct. +// If no data is set, it defaults to the build the URL for all effects +func (er EffectRequest) BuildURL() (endpoint string, err error) { + nParams := countParams(er.ForAccount, er.ForLedger, er.ForLiquidityPool, er.ForOperation, er.ForTransaction) + + if nParams > 1 { + return endpoint, errors.New("invalid request: too many parameters") + } + + endpoint = "effects" + + if er.ForAccount != "" { + endpoint = fmt.Sprintf("accounts/%s/effects", er.ForAccount) + } + + if er.ForLedger != "" { + endpoint = fmt.Sprintf("ledgers/%s/effects", er.ForLedger) + } + + if er.ForLiquidityPool != "" { + endpoint = fmt.Sprintf("liquidity_pools/%s/effects", er.ForLiquidityPool) + } + + if er.ForOperation != "" { + endpoint = fmt.Sprintf("operations/%s/effects", er.ForOperation) + } + + if er.ForTransaction != "" { + endpoint = fmt.Sprintf("transactions/%s/effects", er.ForTransaction) + } + + queryParams := addQueryParams(cursor(er.Cursor), limit(er.Limit), er.Order) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// StreamEffects streams horizon effects. It can be used to stream all effects or account specific effects. +// Use context.WithCancel to stop streaming or context.Background() if you want to stream indefinitely. +// EffectHandler is a user-supplied function that is executed for each streamed effect received. +func (er EffectRequest) StreamEffects(ctx context.Context, client *Client, handler EffectHandler) error { + endpoint, err := er.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint for effects request") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + return client.stream(ctx, url, func(data []byte) error { + var baseEffect effects.Base + // unmarshal into the base effect type + if err = json.Unmarshal(data, &baseEffect); err != nil { + return errors.Wrap(err, "error unmarshaling data for effects request") + } + + // unmarshal into the concrete effect type + effs, err := effects.UnmarshalEffect(baseEffect.GetType(), data) + if err != nil { + return errors.Wrap(err, "unmarshaling to the correct effect type") + } + + handler(effs) + return nil + }) +} + +// HTTPRequest returns the http request for the effects endpoint +func (er EffectRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := er.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/effect_request_test.go b/clients/horizonclient/effect_request_test.go new file mode 100644 index 0000000000..27b0ac2b7b --- /dev/null +++ b/clients/horizonclient/effect_request_test.go @@ -0,0 +1,392 @@ +package horizonclient + +import ( + "context" + "testing" + + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEffectRequestBuildUrl(t *testing.T) { + er := EffectRequest{} + endpoint, err := er.BuildURL() + + // It should return valid all effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "effects", endpoint) + + er = EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err = er.BuildURL() + + // It should return valid account effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/effects", endpoint) + + er = EffectRequest{ForLedger: "123"} + endpoint, err = er.BuildURL() + + // It should return valid ledger effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "ledgers/123/effects", endpoint) + + er = EffectRequest{ForLiquidityPool: "123"} + endpoint, err = er.BuildURL() + + // It should return valid liquidity pool effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "liquidity_pools/123/effects", endpoint) + + er = EffectRequest{ForOperation: "123"} + endpoint, err = er.BuildURL() + + // It should return valid operation effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "operations/123/effects", endpoint) + + er = EffectRequest{ForTransaction: "123"} + endpoint, err = er.BuildURL() + + // It should return valid transaction effects endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "transactions/123/effects", endpoint) + + er = EffectRequest{ForLedger: "123", ForOperation: "789"} + _, err = er.BuildURL() + + // error case: too many parameters for building any effect endpoint + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too many parameters") + } + + er = EffectRequest{Cursor: "123456", Limit: 30, Order: OrderAsc} + endpoint, err = er.BuildURL() + // It should return valid all effects endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "effects?cursor=123456&limit=30&order=asc", endpoint) + +} + +func TestEffectRequestStreamEffects(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // All effects + effectRequest := EffectRequest{} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/effects?cursor=now", + ).ReturnString(200, effectStreamResponse) + + effectStream := make([]effects.Effect, 1) + err := client.StreamEffects(ctx, effectRequest, func(effect effects.Effect) { + effectStream[0] = effect + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, effectStream[0].GetType(), "account_credited") + } + + // Account effects + effectRequest = EffectRequest{ForAccount: "GBNZN27NAOHRJRCMHQF2ZN2F6TAPVEWKJIGZIRNKIADWIS2HDENIS6CI"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GBNZN27NAOHRJRCMHQF2ZN2F6TAPVEWKJIGZIRNKIADWIS2HDENIS6CI/effects?cursor=now", + ).ReturnString(200, effectStreamResponse) + + err = client.StreamEffects(ctx, effectRequest, func(effect effects.Effect) { + effectStream[0] = effect + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, effectStream[0].GetAccount(), "GBNZN27NAOHRJRCMHQF2ZN2F6TAPVEWKJIGZIRNKIADWIS2HDENIS6CI") + } + + // test error + effectRequest = EffectRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/effects?cursor=now", + ).ReturnString(500, effectStreamResponse) + + err = client.StreamEffects(ctx, effectRequest, func(effect effects.Effect) { + effectStream[0] = effect + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +func TestNextEffectsPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // Account effects + effectRequest := EffectRequest{ForAccount: "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD"} + + hmock.On( + "GET", + "https://localhost/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects", + ).ReturnString(200, firstEffectsPage) + + efp, err := client.Effects(effectRequest) + + if assert.NoError(t, err) { + assert.Len(t, efp.Embedded.Records, 2) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=asc", + ).ReturnString(200, emptyEffectsPage) + + nextPage, err := client.NextEffectsPage(efp) + if assert.NoError(t, err) { + assert.Len(t, nextPage.Embedded.Records, 0) + } +} + +func TestSequenceBumpedNewSeq(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + effectRequest := EffectRequest{ForAccount: "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD"} + testCases := []struct { + desc string + payload string + }{ + { + desc: "new_seq as a string", + payload: sequenceBumpedPage, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + hmock.On( + "GET", + "https://localhost/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects", + ).ReturnString(200, tc.payload) + + efp, err := client.Effects(effectRequest) + + if assert.NoError(t, err) { + assert.Len(t, efp.Embedded.Records, 1) + } + + effect, ok := efp.Embedded.Records[0].(effects.SequenceBumped) + assert.True(t, ok) + assert.Equal(t, int64(300000000000), effect.NewSeq) + + }) + } +} + +func TestTradeEffectOfferID(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + effectRequest := EffectRequest{ForAccount: "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD"} + testCases := []struct { + desc string + payload string + }{ + { + desc: "offer_id as a string", + payload: tradeEffectPage, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + hmock.On( + "GET", + "https://localhost/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects", + ).ReturnString(200, tc.payload) + + efp, err := client.Effects(effectRequest) + + if assert.NoError(t, err) { + assert.Len(t, efp.Embedded.Records, 1) + } + + effect, ok := efp.Embedded.Records[0].(effects.Trade) + assert.True(t, ok) + assert.Equal(t, int64(127538672), effect.OfferID) + }) + } +} + +var effectStreamResponse = `data: {"_links":{"operation":{"href":"https://horizon-testnet.stellar.org/operations/2531135896703017"},"succeeds":{"href":"https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=2531135896703017-1"},"precedes":{"href":"https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=2531135896703017-1"}},"id":"0002531135896703017-0000000001","paging_token":"2531135896703017-1","account":"GBNZN27NAOHRJRCMHQF2ZN2F6TAPVEWKJIGZIRNKIADWIS2HDENIS6CI","type":"account_credited","type_i":2,"created_at":"2019-04-03T10:14:17Z","asset_type":"credit_alphanum4","asset_code":"qwop","asset_issuer":"GBM4HXXNDBWWQBXOL4QCTZIUQAP6XFUI3FPINUGUPBMULMTEHJPIKX6T","amount":"0.0460000"} +` + +var firstEffectsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1557363731492865" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1557363731492865-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1557363731492865-1" + } + }, + "id": "0001557363731492865-0000000001", + "paging_token": "1557363731492865-1", + "account": "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD", + "type": "account_created", + "type_i": 0, + "created_at": "2019-05-16T07:13:25Z", + "starting_balance": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1557363731492865" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1557363731492865-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1557363731492865-3" + } + }, + "id": "0001557363731492865-0000000003", + "paging_token": "1557363731492865-3", + "account": "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD", + "type": "signer_created", + "type_i": 10, + "created_at": "2019-05-16T07:13:25Z", + "weight": 1, + "public_key": "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD", + "key": "" + } + ] + } +}` + +var sequenceBumpedPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/249108107265" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=249108107265-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=249108107265-1" + } + }, + "id": "0000000249108107265-0000000001", + "paging_token": "249108107265-1", + "account": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + "type": "sequence_bumped", + "type_i": 43, + "created_at": "2019-06-03T16:36:24Z", + "new_seq": "300000000000" + } + ] + } + }` + +var tradeEffectPage = ` +{ + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/224209713045979100" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=224209713045979100-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=224209713045979100-3" + } + }, + "id": "2214209713045979100-0000000003", + "paging_token": "224209713045979100-3", + "account": "GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD", + "type": "trade", + "type_i": 33, + "created_at": "2019-11-01T23:05:58Z", + "seller": "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", + "offer_id": "127538672", + "sold_amount": "14.5984123", + "sold_asset_type": "native", + "bought_amount": "1.0000000", + "bought_asset_type": "credit_alphanum4", + "bought_asset_code": "USD", + "bought_asset_issuer": "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX" + } + ] + } +} +` + +var emptyEffectsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDIZFWLOTBWHTPODXCBH6XNXPFMSQFRVIDRP3JLEKQZN66G7NF3ANOD/effects?cursor=1557363731492865-3&limit=10&order=desc" + } + }, + "_embedded": { + "records": [] + } +}` diff --git a/clients/horizonclient/error.go b/clients/horizonclient/error.go new file mode 100644 index 0000000000..75beb54786 --- /dev/null +++ b/clients/horizonclient/error.go @@ -0,0 +1,92 @@ +package horizonclient + +import ( + "encoding/json" + "strings" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func (herr Error) Error() string { + s := strings.Builder{} + s.WriteString(`horizon error: "`) + s.WriteString(herr.Problem.Title) + s.WriteString(`" `) + if rc, err := herr.ResultCodes(); err == nil { + s.WriteString(`(`) + resultCodes := append([]string{rc.TransactionCode}, rc.OperationCodes...) + s.WriteString(strings.Join(resultCodes, `, `)) + s.WriteString(`) `) + } + s.WriteString(`- check horizon.Error.Problem for more information`) + return s.String() +} + +// Envelope extracts the transaction envelope that triggered this error from the +// extra fields. +func (herr *Error) Envelope() (*xdr.TransactionEnvelope, error) { + b64, err := herr.EnvelopeXDR() + if err != nil { + return nil, err + } + + var result xdr.TransactionEnvelope + err = xdr.SafeUnmarshalBase64(b64, &result) + return &result, errors.Wrap(err, "xdr decode failed") +} + +// EnvelopeXDR returns the base 64 serialised string representation of the XDR envelope. +// This can be stored, or decoded in the Stellar Laboratory XDR viewer for example. +func (herr *Error) EnvelopeXDR() (string, error) { + raw, ok := herr.Problem.Extras["envelope_xdr"] + if !ok { + return "", ErrEnvelopeNotPopulated + } + + var b64 string + b64, ok = raw.(string) + if !ok { + return "", errors.New("type assertion failed") + } + + return b64, nil +} + +// ResultString extracts the transaction result as a string. +func (herr *Error) ResultString() (string, error) { + raw, ok := herr.Problem.Extras["result_xdr"] + if !ok { + return "", ErrResultNotPopulated + } + + b64, ok := raw.(string) + if !ok { + return "", errors.New("type assertion failed") + } + + return b64, nil +} + +// ResultCodes extracts a result code summary from the error, if possible. +func (herr *Error) ResultCodes() (*hProtocol.TransactionResultCodes, error) { + + raw, ok := herr.Problem.Extras["result_codes"] + if !ok { + return nil, ErrResultCodesNotPopulated + } + + // converts map to []byte + dataString, err := json.Marshal(raw) + if err != nil { + return nil, errors.Wrap(err, "marshaling failed") + } + + var result hProtocol.TransactionResultCodes + if err = json.Unmarshal(dataString, &result); err != nil { + return nil, errors.Wrap(err, "unmarshaling failed") + } + + return &result, nil +} diff --git a/clients/horizonclient/error_helpers.go b/clients/horizonclient/error_helpers.go new file mode 100644 index 0000000000..3dbd705ee0 --- /dev/null +++ b/clients/horizonclient/error_helpers.go @@ -0,0 +1,41 @@ +package horizonclient + +import "github.com/stellar/go/support/errors" + +// IsNotFoundError returns true if the error is a horizonclient.Error with +// a not_found problem indicating that the resource is not found on +// Horizon. +func IsNotFoundError(err error) bool { + var hErr *Error + + err = errors.Cause(err) + switch err := err.(type) { + case *Error: + hErr = err + case Error: + hErr = &err + } + + if hErr == nil { + return false + } + + return hErr.Problem.Type == "https://stellar.org/horizon-errors/not_found" +} + +// GetError returns an error that can be interpreted as a horizon-specific +// error. If err cannot be interpreted as a horizon-specific error, a nil error +// is returned. The caller should still check whether err is nil. +func GetError(err error) *Error { + var hErr *Error + + err = errors.Cause(err) + switch e := err.(type) { + case *Error: + hErr = e + case Error: + hErr = &e + } + + return hErr +} diff --git a/clients/horizonclient/error_helpers_test.go b/clients/horizonclient/error_helpers_test.go new file mode 100644 index 0000000000..42f0b3185f --- /dev/null +++ b/clients/horizonclient/error_helpers_test.go @@ -0,0 +1,231 @@ +package horizonclient + +import ( + "testing" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stretchr/testify/assert" +) + +func TestIsNotFoundError(t *testing.T) { + testCases := []struct { + desc string + err error + is bool + }{ + { + desc: "nil error", + err: nil, + is: false, + }, + { + desc: "another Go type of error", + err: errors.New("error"), + is: false, + }, + { + desc: "not found problem (pointer)", + err: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + is: true, + }, + { + desc: "wrapped not found problem (pointer)", + err: errors.Wrap(&Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, "wrap message"), + is: true, + }, + { + desc: "not found problem (not a pointer)", + err: Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + is: true, + }, + { + desc: "wrapped not found problem (not a pointer)", + err: errors.Wrap(Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, "wrap message"), + is: true, + }, + { + desc: "some other problem (pointer)", + err: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/server_error", + Title: "Server Error", + Status: 500, + }, + }, + is: false, + }, + { + desc: "wrapped some other problem (pointer)", + err: errors.Wrap(&Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/server_error", + Title: "Server Error", + Status: 500, + }, + }, "wrap message"), + is: false, + }, + { + desc: "some other problem (not a pointer)", + err: Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/server_error", + Title: "Server Error", + Status: 500, + }, + }, + is: false, + }, + { + desc: "wrapped some other problem (not a pointer)", + err: errors.Wrap(Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/server_error", + Title: "Server Error", + Status: 500, + }, + }, "wrap message"), + is: false, + }, + { + desc: "a nil *horizonclient.Error", + err: (*Error)(nil), + is: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + is := IsNotFoundError(tc.err) + assert.Equal(t, tc.is, is) + }) + } +} + +func TestGetError(t *testing.T) { + testCases := []struct { + desc string + err error + wantErr error + }{ + { + desc: "nil error", + err: nil, + wantErr: nil, + }, + { + desc: "another Go type of error", + err: errors.New("error"), + wantErr: nil, + }, + { + desc: "not found problem (pointer)", + err: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + wantErr: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + }, + { + desc: "wrapped not found problem (pointer)", + err: errors.Wrap(&Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, "wrap message"), + wantErr: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + }, + { + desc: "not found problem (not a pointer)", + err: Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + wantErr: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + }, + { + desc: "wrapped not found problem (not a pointer)", + err: errors.Wrap(Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, "wrap message"), + wantErr: &Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + }, + { + desc: "a nil *horizonclient.Error", + err: (*Error)(nil), + wantErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + gotErr := GetError(tc.err) + if tc.wantErr == nil { + assert.Nil(t, gotErr) + } else { + assert.Equal(t, tc.wantErr, gotErr) + } + }) + } +} diff --git a/clients/horizonclient/error_test.go b/clients/horizonclient/error_test.go new file mode 100644 index 0000000000..f7584bea16 --- /dev/null +++ b/clients/horizonclient/error_test.go @@ -0,0 +1,146 @@ +package horizonclient + +import ( + "testing" + + "github.com/stellar/go/support/render/problem" + "github.com/stretchr/testify/assert" +) + +func TestError_Error(t *testing.T) { + var herr Error + + // transaction failed happy path: with the appropriate extra fields + herr = Error{ + Problem: problem.P{ + Title: "Transaction Failed", + Type: "transaction_failed", + Extras: map[string]interface{}{ + "result_codes": map[string]interface{}{ + "transaction": "tx_failed", + "operations": []string{"op_underfunded", "op_already_exists"}, + }, + }, + }, + } + assert.Equal(t, `horizon error: "Transaction Failed" (tx_failed, op_underfunded, op_already_exists) - check horizon.Error.Problem for more information`, herr.Error()) + + // transaction failed sad path: missing result_codes extra + herr = Error{ + Problem: problem.P{ + Title: "Transaction Failed", + Type: "transaction_failed", + Extras: map[string]interface{}{}, + }, + } + assert.Equal(t, `horizon error: "Transaction Failed" - check horizon.Error.Problem for more information`, herr.Error()) + + // transaction failed sad path: unparseable result_codes extra + herr = Error{ + Problem: problem.P{ + Title: "Transaction Failed", + Type: "transaction_failed", + Extras: map[string]interface{}{ + "result_codes": "kaboom", + }, + }, + } + assert.Equal(t, `horizon error: "Transaction Failed" - check horizon.Error.Problem for more information`, herr.Error()) + + // non-transaction errors + herr = Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + } + assert.Equal(t, `horizon error: "Resource Missing" - check horizon.Error.Problem for more information`, herr.Error()) +} + +func TestError_ResultCodes(t *testing.T) { + var herr Error + + // happy path: transaction_failed with the appropriate extra fields + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["result_codes"] = map[string]interface{}{ + "transaction": "tx_failed", + "operations": []string{"op_underfunded", "op_already_exists"}, + } + + trc, err := herr.ResultCodes() + if assert.NoError(t, err) { + assert.Equal(t, "tx_failed", trc.TransactionCode) + + if assert.Len(t, trc.OperationCodes, 2) { + assert.Equal(t, "op_underfunded", trc.OperationCodes[0]) + assert.Equal(t, "op_already_exists", trc.OperationCodes[1]) + } + } + + // sad path: missing result_codes extra + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + _, err = herr.ResultCodes() + assert.Equal(t, ErrResultCodesNotPopulated, err) + + // sad path: unparseable result_codes extra + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["result_codes"] = "kaboom" + _, err = herr.ResultCodes() + assert.Error(t, err) +} + +func TestError_ResultString(t *testing.T) { + var herr Error + + // happy path: transaction_failed with the appropriate extra fields + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["result_xdr"] = "AAAAAAAAAMj/////AAAAAgAAAAAAAAAA/////wAAAAAAAAAAAAAAAAAAAAA=" + + trs, err := herr.ResultString() + if assert.NoError(t, err) { + assert.Equal(t, "AAAAAAAAAMj/////AAAAAgAAAAAAAAAA/////wAAAAAAAAAAAAAAAAAAAAA=", trs) + } + + // sad path: missing result_xdr extra + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + _, err = herr.ResultString() + assert.Equal(t, ErrResultNotPopulated, err) + + // sad path: unparseable result_xdr extra + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["result_xdr"] = 1234 + _, err = herr.ResultString() + assert.Error(t, err) +} + +func TestError_Envelope(t *testing.T) { + var herr Error + + // happy path: transaction_failed with the appropriate extra fields + herr.Problem.Type = "transaction_failed" + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["envelope_xdr"] = `AAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAZAAT3TUAAAAwAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABSU5SAAAAAAA0jDEZkBgx+hCc5IIv+z6CoaYTB8jRkIA6drZUv3YRlwAAAAFVU0QAAAAAADSMMRmQGDH6EJzkgi/7PoKhphMHyNGQgDp2tlS/dhGXAAAAAAX14QAAAAAKAAAAAQAAAAAAAAAAAAAAAAAAAAG/dhGXAAAAQLuStfImg0OeeGAQmvLkJSZ1MPSkCzCYNbGqX5oYNuuOqZ5SmWhEsC7uOD9ha4V7KengiwNlc0oMNqBVo22S7gk=` + + _, err := herr.Envelope() + assert.NoError(t, err) + + // sad path: missing envelope_xdr extra + herr.Problem.Extras = make(map[string]interface{}) + _, err = herr.Envelope() + assert.Equal(t, ErrEnvelopeNotPopulated, err) + + // sad path: unparseable envelope_xdr extra + herr.Problem.Extras = make(map[string]interface{}) + herr.Problem.Extras["envelope_xdr"] = "AAAAADSMMRmQGDH6EJzkgi" + _, err = herr.Envelope() + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "xdr decode") + } +} diff --git a/clients/horizonclient/examples_test.go b/clients/horizonclient/examples_test.go new file mode 100644 index 0000000000..146bba681e --- /dev/null +++ b/clients/horizonclient/examples_test.go @@ -0,0 +1,1395 @@ +package horizonclient_test + +import ( + "context" + "fmt" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/txnbuild" +) + +func ExampleClient_Accounts() { + client := horizonclient.DefaultPublicNetClient + accountsRequest := horizonclient.AccountsRequest{Signer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + + account, err := client.Accounts(accountsRequest) + if err != nil { + fmt.Println(err) + return + } + + fmt.Print(account) +} + +func ExampleClient_AccountDetail() { + client := horizonclient.DefaultPublicNetClient + accountRequest := horizonclient.AccountRequest{AccountID: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + + account, err := client.AccountDetail(accountRequest) + if err != nil { + fmt.Println(err) + return + } + + fmt.Print(account) +} + +func ExampleClient_Assets() { + client := horizonclient.DefaultPublicNetClient + // assets for asset issuer + assetRequest := horizonclient.AssetRequest{ForAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + asset, err := client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(asset) + + // all assets + assetRequest = horizonclient.AssetRequest{} + asset, err = client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(asset) +} + +func ExampleClient_Effects() { + client := horizonclient.DefaultPublicNetClient + // effects for an account + effectRequest := horizonclient.EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + effect, err := client.Effects(effectRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(effect) + + // all effects + effectRequest = horizonclient.EffectRequest{} + effect, err = client.Effects(effectRequest) + if err != nil { + fmt.Println(err) + return + } + records := effect.Embedded.Records + if records[0].GetType() == "account_created" { + acc, ok := records[0].(effects.AccountCreated) + if ok { + fmt.Print(acc.Account) + fmt.Print(acc.StartingBalance) + } + } +} + +func ExampleClient_FeeStats() { + client := horizonclient.DefaultPublicNetClient + // horizon fees + fees, err := client.FeeStats() + if err != nil { + fmt.Println(err) + return + } + fmt.Print(fees) + +} + +func ExampleClient_Fund() { + client := horizonclient.DefaultTestNetClient + // fund an account + resp, err := client.Fund("GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU") + if err != nil { + fmt.Println(err) + return + } + fmt.Print(resp) +} + +func ExampleClient_LedgerDetail() { + client := horizonclient.DefaultPublicNetClient + // details for a ledger + sequence := uint32(12345) + ledger, err := client.LedgerDetail(sequence) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ledger) + +} + +func ExampleClient_NextAccountsPage() { + client := horizonclient.DefaultPublicNetClient + // accounts with signer + accountsRequest := horizonclient.AccountsRequest{Signer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Limit: 20} + accounts, err := client.Accounts(accountsRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("Page 1:") + for _, a := range accounts.Embedded.Records { + fmt.Println(a.ID) + } + + // next page + accounts2, err := client.NextAccountsPage(accounts) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("Page 2:") + for _, a := range accounts2.Embedded.Records { + fmt.Println(a.ID) + } +} + +func ExampleClient_NextAssetsPage() { + client := horizonclient.DefaultPublicNetClient + // assets for asset issuer + assetRequest := horizonclient.AssetRequest{ForAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Limit: 20} + asset, err := client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(asset) + + // all assets + assetRequest = horizonclient.AssetRequest{} + asset, err = client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + + // next page + nextPage, err := client.NextAssetsPage(asset) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(nextPage) +} + +func ExampleClient_NextEffectsPage() { + client := horizonclient.DefaultPublicNetClient + // all effects + effectRequest := horizonclient.EffectRequest{Limit: 20} + efp, err := client.Effects(effectRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(efp) + + // get next pages. + recordsFound := false + if len(efp.Embedded.Records) > 0 { + recordsFound = true + } + page := efp + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextEffectsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_NextLedgersPage() { + client := horizonclient.DefaultPublicNetClient + // all ledgers + ledgerRequest := horizonclient.LedgerRequest{Limit: 20} + ledgers, err := client.Ledgers(ledgerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ledgers) + + // get next pages. + recordsFound := false + if len(ledgers.Embedded.Records) > 0 { + recordsFound = true + } + page := ledgers + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextLedgersPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_NextOffersPage() { + client := horizonclient.DefaultPublicNetClient + // all offers + offerRequest := horizonclient.OfferRequest{ForAccount: "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", Limit: 20} + offers, err := client.Offers(offerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offers) + + // get next pages. + recordsFound := false + if len(offers.Embedded.Records) > 0 { + recordsFound = true + } + page := offers + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextOffersPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} +func ExampleClient_NextOperationsPage() { + client := horizonclient.DefaultPublicNetClient + // all operations + operationRequest := horizonclient.OperationRequest{Limit: 20} + ops, err := client.Operations(operationRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + + // get next pages. + recordsFound := false + if len(ops.Embedded.Records) > 0 { + recordsFound = true + } + page := ops + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextOperationsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_NextTradeAggregationsPage() { + client := horizonclient.DefaultPublicNetClient + testTime := time.Unix(int64(1517521726), int64(0)) + // Find trade aggregations + ta := horizonclient.TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: horizonclient.FiveMinuteResolution, + BaseAssetType: horizonclient.AssetTypeNative, + CounterAssetType: horizonclient.AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: horizonclient.OrderDesc, + } + tradeAggs, err := client.TradeAggregations(ta) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(tradeAggs) + + // get next pages. + recordsFound := false + if len(tradeAggs.Embedded.Records) > 0 { + recordsFound = true + } + page := tradeAggs + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextTradeAggregationsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_NextTradesPage() { + client := horizonclient.DefaultPublicNetClient + // all trades + tradeRequest := horizonclient.TradeRequest{Cursor: "123456", Limit: 30, Order: horizonclient.OrderAsc} + trades, err := client.Trades(tradeRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(trades) + + // get next pages. + recordsFound := false + if len(trades.Embedded.Records) > 0 { + recordsFound = true + } + page := trades + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextTradesPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_NextTransactionsPage() { + client := horizonclient.DefaultPublicNetClient + // all transactions + transactionRequest := horizonclient.TransactionRequest{Limit: 20} + transactions, err := client.Transactions(transactionRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(transactions) + + // get next pages. + recordsFound := false + if len(transactions.Embedded.Records) > 0 { + recordsFound = true + } + page := transactions + // get the next page of records if recordsFound is true + for recordsFound { + // next page + nextPage, err := client.NextTransactionsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = nextPage + if len(nextPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(nextPage) + } +} + +func ExampleClient_OfferDetails() { + client := horizonclient.DefaultPublicNetClient + offer, err := client.OfferDetails("2") + if err != nil { + fmt.Println(err) + return + } + + fmt.Print(offer) +} + +func ExampleClient_Offers() { + client := horizonclient.DefaultPublicNetClient + offerRequest := horizonclient.OfferRequest{ + ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Cursor: "now", + Order: horizonclient.OrderDesc, + } + offers, err := client.Offers(offerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offers) + + offerRequest = horizonclient.OfferRequest{ + Seller: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Selling: "COP:GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Buying: "EUR:GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Order: horizonclient.OrderDesc, + } + + offers, err = client.Offers(offerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offers) +} + +func ExampleClient_OperationDetail() { + client := horizonclient.DefaultPublicNetClient + opID := "123456" + // operation details for an id + ops, err := client.OperationDetail(opID) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) +} + +func ExampleClient_Operations() { + client := horizonclient.DefaultPublicNetClient + // operations for an account + opRequest := horizonclient.OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + ops, err := client.Operations(opRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + + // all operations + opRequest = horizonclient.OperationRequest{Cursor: "now"} + ops, err = client.Operations(opRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + records := ops.Embedded.Records + + for _, value := range records { + // prints the type + fmt.Print(value.GetType()) + // for example if the type is change_trust + c, ok := value.(operations.ChangeTrust) + if ok { + // access ChangeTrust fields + fmt.Print(c.Trustee) + } + + } +} + +func ExampleClient_OrderBook() { + client := horizonclient.DefaultPublicNetClient + // orderbook for an asset pair, e.g XLM/NGN + obRequest := horizonclient.OrderBookRequest{ + BuyingAssetType: horizonclient.AssetTypeNative, + SellingAssetCode: "USD", + SellingAssetType: horizonclient.AssetType4, + SellingAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + } + obs, err := client.OrderBook(obRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(obs) +} + +func ExampleClient_Paths() { + client := horizonclient.DefaultPublicNetClient + // Find paths for XLM->NGN + pr := horizonclient.PathsRequest{ + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + DestinationAmount: "100", + DestinationAssetCode: "NGN", + DestinationAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + DestinationAssetType: horizonclient.AssetType4, + SourceAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + paths, err := client.StrictReceivePaths(pr) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(paths) +} + +func ExampleClient_StrictSendPaths() { + client := horizonclient.DefaultPublicNetClient + // Find paths for USD->EUR + pr := horizonclient.StrictSendPathsRequest{ + SourceAmount: "20", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", + SourceAssetType: horizonclient.AssetType4, + DestinationAssets: "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S", + } + paths, err := client.StrictSendPaths(pr) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(paths) +} + +func ExampleClient_Payments() { + client := horizonclient.DefaultPublicNetClient + // payments for an account + opRequest := horizonclient.OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + ops, err := client.Payments(opRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + + // all payments + opRequest = horizonclient.OperationRequest{Cursor: "now"} + ops, err = client.Payments(opRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + records := ops.Embedded.Records + + for _, value := range records { + // prints the type + fmt.Print(value.GetType()) + // for example if the type is create_account + c, ok := value.(operations.CreateAccount) + if ok { + // access create_account fields + fmt.Print(c.StartingBalance) + } + + } +} + +func ExampleClient_PrevAssetsPage() { + client := horizonclient.DefaultPublicNetClient + // assets for asset issuer + assetRequest := horizonclient.AssetRequest{ForAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Limit: 20} + asset, err := client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(asset) + + // all assets + assetRequest = horizonclient.AssetRequest{} + asset, err = client.Assets(assetRequest) + if err != nil { + fmt.Println(err) + return + } + + // next page + prevPage, err := client.PrevAssetsPage(asset) + if err != nil { + fmt.Println(err) + return + } + fmt.Println(prevPage) +} + +func ExampleClient_PrevEffectsPage() { + client := horizonclient.DefaultPublicNetClient + // all effects + effectRequest := horizonclient.EffectRequest{Limit: 20} + efp, err := client.Effects(effectRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(efp) + + // get prev pages. + recordsFound := false + if len(efp.Embedded.Records) > 0 { + recordsFound = true + } + page := efp + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevEffectsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevLedgersPage() { + client := horizonclient.DefaultPublicNetClient + // all ledgers + ledgerRequest := horizonclient.LedgerRequest{Limit: 20} + ledgers, err := client.Ledgers(ledgerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ledgers) + + // get prev pages. + recordsFound := false + if len(ledgers.Embedded.Records) > 0 { + recordsFound = true + } + page := ledgers + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevLedgersPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevOffersPage() { + client := horizonclient.DefaultPublicNetClient + // all offers + offerRequest := horizonclient.OfferRequest{ForAccount: "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", Limit: 20} + offers, err := client.Offers(offerRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(offers) + + // get prev pages. + recordsFound := false + if len(offers.Embedded.Records) > 0 { + recordsFound = true + } + page := offers + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevOffersPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevOperationsPage() { + client := horizonclient.DefaultPublicNetClient + // all operations + operationRequest := horizonclient.OperationRequest{Limit: 20} + ops, err := client.Operations(operationRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(ops) + + // get prev pages. + recordsFound := false + if len(ops.Embedded.Records) > 0 { + recordsFound = true + } + page := ops + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevOperationsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevTradeAggregationsPage() { + client := horizonclient.DefaultPublicNetClient + testTime := time.Unix(int64(1517521726), int64(0)) + // Find trade aggregations + ta := horizonclient.TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: horizonclient.FiveMinuteResolution, + BaseAssetType: horizonclient.AssetTypeNative, + CounterAssetType: horizonclient.AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: horizonclient.OrderDesc, + } + tradeAggs, err := client.TradeAggregations(ta) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(tradeAggs) + + // get prev pages. + recordsFound := false + if len(tradeAggs.Embedded.Records) > 0 { + recordsFound = true + } + page := tradeAggs + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevTradeAggregationsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevTradesPage() { + client := horizonclient.DefaultPublicNetClient + // all trades + tradeRequest := horizonclient.TradeRequest{Cursor: "123456", Limit: 30, Order: horizonclient.OrderAsc} + trades, err := client.Trades(tradeRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(trades) + + // get prev pages. + recordsFound := false + if len(trades.Embedded.Records) > 0 { + recordsFound = true + } + page := trades + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevTradesPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_PrevTransactionsPage() { + client := horizonclient.DefaultPublicNetClient + // all transactions + transactionRequest := horizonclient.TransactionRequest{Limit: 20} + transactions, err := client.Transactions(transactionRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(transactions) + + // get prev pages. + recordsFound := false + if len(transactions.Embedded.Records) > 0 { + recordsFound = true + } + page := transactions + // get the prev page of records if recordsFound is true + for recordsFound { + // prev page + prevPage, err := client.PrevTransactionsPage(page) + if err != nil { + fmt.Println(err) + return + } + + page = prevPage + if len(prevPage.Embedded.Records) == 0 { + recordsFound = false + } + fmt.Println(prevPage) + } +} + +func ExampleClient_Root() { + client := horizonclient.DefaultTestNetClient + root, err := client.Root() + if err != nil { + fmt.Println(err) + return + } + fmt.Print(root) +} + +func ExampleClient_SetHorizonTimeout() { + client := horizonclient.DefaultTestNetClient + + // https://www.stellar.org/laboratory/#xdr-viewer?input=AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM%2BHm2GVuCcAAAAZAAABD0AAuV%2FAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAyTBGxOgfSApppsTnb%2FYRr6gOR8WT0LZNrhLh4y3FCgoAAAAXSHboAAAAAAAAAAABhlbgnAAAAEAivKe977CQCxMOKTuj%2BcWTFqc2OOJU8qGr9afrgu2zDmQaX5Q0cNshc3PiBwe0qw%2F%2BD%2FqJk5QqM5dYeSUGeDQP&type=TransactionEnvelope&network=test + txXdr := `AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AAuV/AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAyTBGxOgfSApppsTnb/YRr6gOR8WT0LZNrhLh4y3FCgoAAAAXSHboAAAAAAAAAAABhlbgnAAAAEAivKe977CQCxMOKTuj+cWTFqc2OOJU8qGr9afrgu2zDmQaX5Q0cNshc3PiBwe0qw/+D/qJk5QqM5dYeSUGeDQP` + + // test user timeout + client = client.SetHorizonTimeout(30 * time.Second) + resp, err := client.SubmitTransactionXDR(txXdr) + if err != nil { + fmt.Println(err) + return + } + + fmt.Print(resp) +} + +func ExampleClient_StreamEffects() { + client := horizonclient.DefaultTestNetClient + // all effects + effectRequest := horizonclient.EffectRequest{Cursor: "760209215489"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(e effects.Effect) { + fmt.Println(e) + } + err := client.StreamEffects(ctx, effectRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamLedgers() { + client := horizonclient.DefaultTestNetClient + // all ledgers from now + ledgerRequest := horizonclient.LedgerRequest{} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(ledger hProtocol.Ledger) { + fmt.Println(ledger) + } + err := client.StreamLedgers(ctx, ledgerRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamOffers() { + client := horizonclient.DefaultTestNetClient + // offers for account + offerRequest := horizonclient.OfferRequest{ForAccount: "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", Cursor: "1"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(offer hProtocol.Offer) { + fmt.Println(offer) + } + err := client.StreamOffers(ctx, offerRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamOperations() { + client := horizonclient.DefaultTestNetClient + // operations for an account + opRequest := horizonclient.OperationRequest{ForAccount: "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", Cursor: "760209215489"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(op operations.Operation) { + fmt.Println(op) + } + err := client.StreamOperations(ctx, opRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamOrderBooks() { + client := horizonclient.DefaultTestNetClient + orderbookRequest := horizonclient.OrderBookRequest{ + SellingAssetType: horizonclient.AssetTypeNative, + BuyingAssetType: horizonclient.AssetType4, + BuyingAssetCode: "ABC", + BuyingAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(orderbook hProtocol.OrderBookSummary) { + fmt.Println(orderbook) + } + err := client.StreamOrderBooks(ctx, orderbookRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamPayments() { + client := horizonclient.DefaultTestNetClient + // all payments + opRequest := horizonclient.OperationRequest{Cursor: "760209215489"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(op operations.Operation) { + fmt.Println(op) + } + err := client.StreamPayments(ctx, opRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamTrades() { + client := horizonclient.DefaultTestNetClient + // all trades + tradeRequest := horizonclient.TradeRequest{Cursor: "760209215489"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(tr hProtocol.Trade) { + fmt.Println(tr) + } + err := client.StreamTrades(ctx, tradeRequest, printHandler) + + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_StreamTransactions() { + client := horizonclient.DefaultTestNetClient + // all transactions + transactionRequest := horizonclient.TransactionRequest{Cursor: "760209215489"} + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Stop streaming after 60 seconds. + time.Sleep(60 * time.Second) + cancel() + }() + + printHandler := func(tr hProtocol.Transaction) { + fmt.Println(tr) + } + err := client.StreamTransactions(ctx, transactionRequest, printHandler) + if err != nil { + fmt.Println(err) + } +} + +func ExampleClient_SubmitFeeBumpTransaction() { + kp := keypair.MustParseFull("SDQQUZMIPUP5TSDWH3UJYAKUOP55IJ4KTBXTY7RCOMEFRQGYA6GIR3OD") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + return + } + + op := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + if err != nil { + fmt.Println(err) + return + } + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + if err != nil { + fmt.Println(err) + return + } + + feeBumpKP := keypair.MustParseFull("SA5ZEFDVFZ52GRU7YUGR6EDPBNRU2WLA6IQFQ7S2IH2DG3VFV3DOMV2Q") + feeBumpTx, err := txnbuild.NewFeeBumpTransaction(txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: txnbuild.MinBaseFee * 2, + }) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP) + if err != nil { + fmt.Println(err) + return + } + + result, err := client.SubmitFeeBumpTransaction(feeBumpTx) + if err != nil { + fmt.Println(err) + } + + fmt.Println(result) +} + +func ExampleClient_SubmitFeeBumpTransactionWithOptions() { + kp := keypair.MustParseFull("SDQQUZMIPUP5TSDWH3UJYAKUOP55IJ4KTBXTY7RCOMEFRQGYA6GIR3OD") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + return + } + + op := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + if err != nil { + fmt.Println(err) + return + } + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + if err != nil { + fmt.Println(err) + return + } + + feeBumpKP := keypair.MustParseFull("SA5ZEFDVFZ52GRU7YUGR6EDPBNRU2WLA6IQFQ7S2IH2DG3VFV3DOMV2Q") + feeBumpTx, err := txnbuild.NewFeeBumpTransaction(txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: txnbuild.MinBaseFee * 2, + }) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP) + if err != nil { + fmt.Println(err) + return + } + + result, err := client.SubmitFeeBumpTransactionWithOptions( + feeBumpTx, + horizonclient.SubmitTxOpts{SkipMemoRequiredCheck: true}, + ) + if err != nil { + fmt.Println(err) + } + + fmt.Println(result) +} + +func ExampleClient_SubmitTransaction() { + kp := keypair.MustParseFull("SDQQUZMIPUP5TSDWH3UJYAKUOP55IJ4KTBXTY7RCOMEFRQGYA6GIR3OD") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + return + } + + op := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + if err != nil { + fmt.Println(err) + return + } + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + if err != nil { + fmt.Println(err) + return + } + + result, err := client.SubmitTransaction(tx) + if err != nil { + fmt.Println(err) + } + + fmt.Println(result) +} + +func ExampleClient_SubmitTransactionWithOptions() { + kp := keypair.MustParseFull("SDQQUZMIPUP5TSDWH3UJYAKUOP55IJ4KTBXTY7RCOMEFRQGYA6GIR3OD") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + return + } + + op := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + if err != nil { + fmt.Println(err) + return + } + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + if err != nil { + fmt.Println(err) + return + } + + result, err := client.SubmitTransactionWithOptions(tx, horizonclient.SubmitTxOpts{SkipMemoRequiredCheck: true}) + if err != nil { + fmt.Println(err) + } + + fmt.Println(result) +} + +func ExampleClient_SubmitTransactionWithOptions_skip_memo_required_check() { + kp := keypair.MustParseFull("SDQQUZMIPUP5TSDWH3UJYAKUOP55IJ4KTBXTY7RCOMEFRQGYA6GIR3OD") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + return + } + + op := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + if err != nil { + fmt.Println(err) + return + } + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + if err != nil { + fmt.Println(err) + return + } + + result, err := client.SubmitTransactionWithOptions(tx, horizonclient.SubmitTxOpts{ + SkipMemoRequiredCheck: true, + }) + if err != nil { + fmt.Println(err) + } + + fmt.Println(result) +} + +func ExampleClient_SubmitTransactionXDR() { + client := horizonclient.DefaultPublicNetClient + // https://www.stellar.org/laboratory/#xdr-viewer?input=AAAAAOoS%2F5V%2BBiCPXRiVcz8YsnkDdODufq%2Bg7xdqTdIXN8vyAAAE4gFiW0YAAALxAAAAAQAAAAAAAAAAAAAAAFyuBUcAAAABAAAABzIyMjgyNDUAAAAAAQAAAAEAAAAALhsY%2FFdAHXllTmb025DtCVBw06WDSQjq6I9NrCQHOV8AAAABAAAAAHT8zKV7bRQzuGTpk9AO3gjWJ9jVxBXTgguFORkxHVIKAAAAAAAAAAAAOnDwAAAAAAAAAAIkBzlfAAAAQPefqlsOvni6xX1g3AqddvOp1GOM88JYzayGZodbzTfV5toyhxZvL1ZggY3prFsvrereugEpj1kyPJ67z6gcRg0XN8vyAAAAQGwmoTssW49gaze8iQkz%2FUA2E2N%2BBOo%2B6v7YdOSsvIcZnMc37KmXH920nLosKpDLqkNChVztSZFcbVUlHhjbQgA%3D&type=TransactionEnvelope&network=public + txXdr := `AAAAAOoS/5V+BiCPXRiVcz8YsnkDdODufq+g7xdqTdIXN8vyAAAE4gFiW0YAAALxAAAAAQAAAAAAAAAAAAAAAFyuBUcAAAABAAAABzIyMjgyNDUAAAAAAQAAAAEAAAAALhsY/FdAHXllTmb025DtCVBw06WDSQjq6I9NrCQHOV8AAAABAAAAAHT8zKV7bRQzuGTpk9AO3gjWJ9jVxBXTgguFORkxHVIKAAAAAAAAAAAAOnDwAAAAAAAAAAIkBzlfAAAAQPefqlsOvni6xX1g3AqddvOp1GOM88JYzayGZodbzTfV5toyhxZvL1ZggY3prFsvrereugEpj1kyPJ67z6gcRg0XN8vyAAAAQGwmoTssW49gaze8iQkz/UA2E2N+BOo+6v7YdOSsvIcZnMc37KmXH920nLosKpDLqkNChVztSZFcbVUlHhjbQgA=` + + // submit transaction + resp, err := client.SubmitTransactionXDR(txXdr) + if err != nil { + fmt.Println(err) + return + } + + fmt.Print(resp) +} + +func ExampleClient_TradeAggregations() { + client := horizonclient.DefaultPublicNetClient + testTime := time.Unix(int64(1517521726), int64(0)) + // Find trade aggregations + ta := horizonclient.TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: horizonclient.FiveMinuteResolution, + BaseAssetType: horizonclient.AssetTypeNative, + CounterAssetType: horizonclient.AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: horizonclient.OrderDesc, + } + tradeAggs, err := client.TradeAggregations(ta) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(tradeAggs) +} + +func ExampleClient_Trades() { + client := horizonclient.DefaultPublicNetClient + // Find all trades + tr := horizonclient.TradeRequest{Cursor: "123456", Limit: 30, Order: horizonclient.OrderAsc} + trades, err := client.Trades(tr) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(trades) +} + +func ExampleClient_Transactions() { + client := horizonclient.DefaultPublicNetClient + // transactions for an account + txRequest := horizonclient.TransactionRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + txs, err := client.Transactions(txRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(txs) + + // all transactions + txRequest = horizonclient.TransactionRequest{Cursor: "now", Order: horizonclient.OrderDesc} + txs, err = client.Transactions(txRequest) + if err != nil { + fmt.Println(err) + return + } + fmt.Print(txs) + records := txs.Embedded.Records + + for _, tx := range records { + fmt.Print(tx) + } +} diff --git a/clients/horizonclient/fee_stats_request.go b/clients/horizonclient/fee_stats_request.go new file mode 100644 index 0000000000..24c55a6483 --- /dev/null +++ b/clients/horizonclient/fee_stats_request.go @@ -0,0 +1,26 @@ +package horizonclient + +import ( + "github.com/stellar/go/support/errors" + "net/http" +) + +// BuildURL returns the url for getting fee stats about a running horizon instance +func (fr feeStatsRequest) BuildURL() (endpoint string, err error) { + endpoint = fr.endpoint + if endpoint == "" { + err = errors.New("invalid request: too few parameters") + } + + return +} + +// HTTPRequest returns the http request for the fee stats endpoint +func (fr feeStatsRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := fr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/internal.go b/clients/horizonclient/internal.go new file mode 100644 index 0000000000..574b5b31ca --- /dev/null +++ b/clients/horizonclient/internal.go @@ -0,0 +1,151 @@ +package horizonclient + +import ( + "encoding/json" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/stellar/go/support/errors" +) + +// decodeResponse decodes the response from a request to a horizon server +func decodeResponse(resp *http.Response, object interface{}, hc *Client) (err error) { + defer resp.Body.Close() + decoder := json.NewDecoder(resp.Body) + + u, err := url.Parse(hc.HorizonURL) + if err != nil { + return errors.Errorf("unable to parse the provided horizon url: %s", hc.HorizonURL) + } + setCurrentServerTime(u.Hostname(), resp.Header["Date"], hc) + + if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { + horizonError := &Error{ + Response: resp, + } + decodeError := decoder.Decode(&horizonError.Problem) + if decodeError != nil { + return errors.Wrap(decodeError, "error decoding horizon.Problem") + } + return horizonError + } + + err = decoder.Decode(&object) + if err != nil { + return errors.Wrap(err, "error decoding response") + } + return +} + +// countParams counts the number of parameters provided +func countParams(params ...interface{}) int { + counter := 0 + for _, param := range params { + switch param := param.(type) { + case string: + if param != "" { + counter++ + } + case int: + if param > 0 { + counter++ + } + case uint: + if param > 0 { + counter++ + } + case bool: + counter++ + default: + panic("Unknown parameter type") + } + + } + return counter +} + +// addQueryParams sets query parameters for a url +func addQueryParams(params ...interface{}) string { + query := url.Values{} + + for _, param := range params { + switch param := param.(type) { + case cursor: + if param != "" { + query.Add("cursor", string(param)) + } + case Order: + if param != "" { + query.Add("order", string(param)) + } + case limit: + if param != 0 { + query.Add("limit", strconv.Itoa(int(param))) + } + case assetCode: + if param != "" { + query.Add("asset_code", string(param)) + } + case assetIssuer: + if param != "" { + query.Add("asset_issuer", string(param)) + } + case includeFailed: + if param { + query.Add("include_failed", "true") + } + case join: + if param != "" { + query.Add("join", string(param)) + } + case reserves: + if len(param) > 0 { + query.Add("reserves", strings.Join(param, ",")) + } + case map[string]string: + for key, value := range param { + if value != "" { + query.Add(key, value) + } + } + default: + panic("Unknown parameter type") + } + } + + return query.Encode() +} + +// setCurrentServerTime saves the current time returned by a horizon server +func setCurrentServerTime(host string, serverDate []string, hc *Client) { + if len(serverDate) == 0 { + return + } + st, err := time.Parse(time.RFC1123, serverDate[0]) + if err != nil { + return + } + serverTimeMapMutex.Lock() + ServerTimeMap[host] = ServerTimeRecord{ServerTime: st.UTC().Unix(), LocalTimeRecorded: hc.clock.Now().UTC().Unix()} + serverTimeMapMutex.Unlock() +} + +// currentServerTime returns the current server time for a given horizon server +func currentServerTime(host string, currentTimeUTC int64) int64 { + serverTimeMapMutex.Lock() + st := ServerTimeMap[host] + serverTimeMapMutex.Unlock() + if &st == nil { + return 0 + } + + // if it has been more than 5 minutes from the last time, then return 0 because the saved + // server time is behind. + if currentTimeUTC-st.LocalTimeRecorded > 60*5 { + return 0 + } + return currentTimeUTC - st.LocalTimeRecorded + st.ServerTime +} diff --git a/clients/horizonclient/ledger_request.go b/clients/horizonclient/ledger_request.go new file mode 100644 index 0000000000..cb9f85afb4 --- /dev/null +++ b/clients/horizonclient/ledger_request.go @@ -0,0 +1,77 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the LedgerRequest struct. +// If no data is set, it defaults to the build the URL for all ledgers +func (lr LedgerRequest) BuildURL() (endpoint string, err error) { + endpoint = "ledgers" + + if lr.forSequence != 0 { + endpoint = fmt.Sprintf( + "%s/%d", + endpoint, + lr.forSequence, + ) + } else { + queryParams := addQueryParams(cursor(lr.Cursor), limit(lr.Limit), lr.Order) + if queryParams != "" { + endpoint = fmt.Sprintf( + "%s?%s", + endpoint, + queryParams, + ) + } + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the ledger endpoint +func (lr LedgerRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := lr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// LedgerHandler is a function that is called when a new ledger is received +type LedgerHandler func(hProtocol.Ledger) + +// StreamLedgers streams stellar ledgers. It can be used to stream all ledgers. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// LedgerHandler is a user-supplied function that is executed for each streamed ledger received. +func (lr LedgerRequest) StreamLedgers(ctx context.Context, client *Client, + handler LedgerHandler) (err error) { + endpoint, err := lr.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint for ledger request") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + return client.stream(ctx, url, func(data []byte) error { + var ledger hProtocol.Ledger + err = json.Unmarshal(data, &ledger) + if err != nil { + return errors.Wrap(err, "error unmarshaling data for ledger request") + } + handler(ledger) + return nil + }) +} diff --git a/clients/horizonclient/ledger_request_test.go b/clients/horizonclient/ledger_request_test.go new file mode 100644 index 0000000000..19515e4882 --- /dev/null +++ b/clients/horizonclient/ledger_request_test.go @@ -0,0 +1,299 @@ +package horizonclient + +import ( + "context" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLedgerRequestBuildUrl(t *testing.T) { + lr := LedgerRequest{} + endpoint, err := lr.BuildURL() + + // It should return valid all ledgers endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "ledgers", endpoint) + + lr = LedgerRequest{forSequence: 123} + endpoint, err = lr.BuildURL() + + // It should return valid ledger detail endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "ledgers/123", endpoint) + + lr = LedgerRequest{forSequence: 123, Cursor: "now", Order: OrderDesc} + endpoint, err = lr.BuildURL() + + // It should return valid ledger detail endpoint, with no cursor or order + require.NoError(t, err) + assert.Equal(t, "ledgers/123", endpoint) + + lr = LedgerRequest{Cursor: "now", Order: OrderDesc} + endpoint, err = lr.BuildURL() + + // It should return valid ledgers endpoint, with cursor and order + require.NoError(t, err) + assert.Equal(t, "ledgers?cursor=now&order=desc", endpoint) +} + +func TestLedgerDetail(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // invalid parameters + var sequence uint32 + hmock.On( + "GET", + "https://localhost/ledgers/", + ).ReturnString(200, ledgerResponse) + + _, err := client.LedgerDetail(sequence) + // error case: invalid sequence + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid sequence number provided") + } + + // happy path + hmock.On( + "GET", + "https://localhost/ledgers/69859", + ).ReturnString(200, ledgerResponse) + + sequence = 69859 + ledger, err := client.LedgerDetail(sequence) + ftc := int32(1) + + if assert.NoError(t, err) { + assert.Equal(t, ledger.ID, "71a40c0581d8d7c1158e1d9368024c5f9fd70de17a8d277cdd96781590cc10fb") + assert.Equal(t, ledger.PT, "300042120331264") + assert.Equal(t, ledger.Sequence, int32(69859)) + assert.Equal(t, ledger.FailedTransactionCount, &ftc) + } + + // failure response + hmock.On( + "GET", + "https://localhost/ledgers/69859", + ).ReturnString(404, notFoundResponse) + + _, err = client.LedgerDetail(sequence) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Resource Missing") + } + + // connection error + hmock.On( + "GET", + "https://localhost/ledgers/69859", + ).ReturnError("http.Client error") + + _, err = client.LedgerDetail(sequence) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } +} + +func TestLedgerRequestStreamLedgers(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + ledgerRequest := LedgerRequest{Cursor: "1"} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/ledgers?cursor=1", + ).ReturnString(200, ledgerStreamResponse) + + ledgers := make([]hProtocol.Ledger, 1) + err := client.StreamLedgers(ctx, ledgerRequest, func(ledger hProtocol.Ledger) { + ledgers[0] = ledger + cancel() + + }) + + if assert.NoError(t, err) { + assert.Equal(t, ledgers[0].Sequence, int32(560339)) + } + + // test error + ledgerRequest = LedgerRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/ledgers?cursor=now", + ).ReturnString(500, ledgerStreamResponse) + + err = client.StreamLedgers(ctx, ledgerRequest, func(ledger hProtocol.Ledger) { + ledgers[0] = ledger + cancel() + + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + + } +} + +func TestNextLedgersPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + ledgerRequest := LedgerRequest{Limit: 2} + + hmock.On( + "GET", + "https://localhost/ledgers?limit=2", + ).ReturnString(200, firstLedgersPage) + + ledgers, err := client.Ledgers(ledgerRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(ledgers.Embedded.Records), 2) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/ledgers?cursor=1559012998905856&limit=2&order=desc", + ).ReturnString(200, emptyLedgersPage) + + nextPage, err := client.NextLedgersPage(ledgers) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +var ledgerStreamResponse = `data: {"_links":{"self":{"href":"https://horizon-testnet.stellar.org/ledgers/560339"},"transactions":{"href":"https://horizon-testnet.stellar.org/ledgers/560339/transactions{?cursor,limit,order}","templated":true},"operations":{"href":"https://horizon-testnet.stellar.org/ledgers/560339/operations{?cursor,limit,order}","templated":true},"payments":{"href":"https://horizon-testnet.stellar.org/ledgers/560339/payments{?cursor,limit,order}","templated":true},"effects":{"href":"https://horizon-testnet.stellar.org/ledgers/560339/effects{?cursor,limit,order}","templated":true}},"id":"66f4d95dab22dbc422585cc4b011716014e81df3599cee8db9c776cfc3a31e93","paging_token":"2406637679673344","hash":"66f4d95dab22dbc422585cc4b011716014e81df3599cee8db9c776cfc3a31e93","prev_hash":"6071f1e52a6bf37aba3f7437081577eafe69f78593c465fc5028c46a4746dda3","sequence":560339,"successful_transaction_count":5,"failed_transaction_count":1,"operation_count":44,"closed_at":"2019-04-01T16:47:05Z","total_coins":"100057227213.0436903","fee_pool":"57227816.6766542","base_fee_in_stroops":100,"base_reserve_in_stroops":5000000,"max_tx_set_size":100,"protocol_version":10,"header_xdr":"AAAACmBx8eUqa/N6uj90NwgVd+r+afeFk8Rl/FAoxGpHRt2jdIn+3X+/O3PFUUZ8Tgy4rfD1oNamR+9NMOCM2V6ndksAAAAAXKJAiQAAAAAAAAAAPyIIYU6Y37lve/MwZls1vmbgxgFdx93hdzOn6g8kHhQ1BS9aAKuXtApQoE3gKpjQ5ze0H9qUruyOUsbM776zXQAIjNMN4r8uJHCvJwACCHvk18POAAAAAwAAAAAAQZnVAAAAZABMS0AAAABkkiIcXkjaTtc9zTQBn0o72CUBe3u+2Mz7W6dgkvkYcJJle8JCNmXx5HcRlDSHJzzBShc8C3rQUIsIuJ93eoBMgHeYAzfholE8hjvrHrqoHq8jfPowxj1FGD6HaUPD1PHTcBXmf0U0cs2Ki0NBDDKNcwKC84nUPdumCkdAxSuEzn4AAAAA"} +` + +var firstLedgersPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559021588840447&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559012998905856&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559017293873152&limit=2&order=asc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/362987" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/ledgers/362987/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/ledgers/362987/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/ledgers/362987/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/ledgers/362987/effects{?cursor,limit,order}", + "templated": true + } + }, + "id": "e346ec9065a61c311e012989ac8368a14438cf716246045227b9133a9f7b527c", + "paging_token": "1559017293873152", + "hash": "e346ec9065a61c311e012989ac8368a14438cf716246045227b9133a9f7b527c", + "prev_hash": "018bf9ac8ee44d57982ca154b27056a9e0dbab85c074d5af696265876a539c95", + "sequence": 362987, + "successful_transaction_count": 5, + "failed_transaction_count": 0, + "operation_count": 102, + "closed_at": "2019-05-16T07:48:28Z", + "total_coins": "100286463748.0798442", + "fee_pool": "286463903.1537236", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 5000000, + "max_tx_set_size": 150, + "protocol_version": 11, + "header_xdr": "AAAACwGL+ayO5E1XmCyhVLJwVqng26uFwHTVr2liZYdqU5yVfaWG9R/+ORte/mnEN9e19MjAXhEYCkpE55NUbhoTvQIAAAAAXN0VzAAAAAAAAAAAM+Y77/jsRfTQJd4Rc77eAxLFqf51z1hkL+lAmXdque/KKHfTioV3SodQAb9sTonc3y2WbQ+3SHIpzafWzw5ofAAFiesN6uQTCtgI6gAKLV+/4m5UAAAADwAAAAAAQdNFAAAAZABMS0AAAACWxYbq8s16ytvhOI1WVPxU7sZf8hLAl3EcsegF9EH/6uL1KqOTvsbW9+L7KRHuAFSBaOAkfdNlxX+010AN80K7r+TXs0UcgvyyUvvA0BuNTik2ZGi6lBdt9kg11m7anDbLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/362986" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/ledgers/362986/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/ledgers/362986/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/ledgers/362986/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/ledgers/362986/effects{?cursor,limit,order}", + "templated": true + } + }, + "id": "018bf9ac8ee44d57982ca154b27056a9e0dbab85c074d5af696265876a539c95", + "paging_token": "1559012998905856", + "hash": "018bf9ac8ee44d57982ca154b27056a9e0dbab85c074d5af696265876a539c95", + "prev_hash": "e96448c838514bbf08d9a198d1d589f3d396aa57d85c85a170ac7b9c965fc102", + "sequence": 362986, + "successful_transaction_count": 2, + "failed_transaction_count": 1, + "operation_count": 21, + "closed_at": "2019-05-16T07:48:21Z", + "total_coins": "100286463748.0798442", + "fee_pool": "286463903.1527036", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 5000000, + "max_tx_set_size": 150, + "protocol_version": 11, + "header_xdr": "AAAAC+lkSMg4UUu/CNmhmNHVifPTlqpX2FyFoXCse5yWX8ECCX4L/N934EuR74itfcXN38l7B/bfVKdjCE/RstuR6icAAAAAXN0VxQAAAAAAAAAAl4WBEEVHCpGjWQy8VWnCezcSbSgGi3rHCdppElGC9Rzs7mcFl393ae4zKfU586pM5divzZ1PgHW2yGNx2/uXMwAFieoN6uQTCtgI6gAKLV+/4kZ8AAAADwAAAAAAQdMVAAAAZABMS0AAAACWxYbq8s16ytvhOI1WVPxU7sZf8hLAl3EcsegF9EH/6uL1KqOTvsbW9+L7KRHuAFSBaOAkfdNlxX+010AN80K7r+TXs0UcgvyyUvvA0BuNTik2ZGi6lBdt9kg11m7anDbLAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + } + ] + } +}` + +var emptyLedgersPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559012998905856&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559004408971264&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers?cursor=1559008703938560&limit=2&order=asc" + } + }, + "_embedded": { + "records": [] + } +}` diff --git a/clients/horizonclient/liquidity_pool_request.go b/clients/horizonclient/liquidity_pool_request.go new file mode 100644 index 0000000000..3b8d94055d --- /dev/null +++ b/clients/horizonclient/liquidity_pool_request.go @@ -0,0 +1,49 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// LiquidityPoolRequest struct contains data for getting liquidity pool details from a horizon server. +type LiquidityPoolRequest struct { + LiquidityPoolID string +} + +// BuildURL creates the endpoint to be queried based on the data in the LiquidityPoolRequest struct. +// If no data is set, it defaults to the build the URL for all assets +func (r LiquidityPoolRequest) BuildURL() (endpoint string, err error) { + + nParams := countParams(r.LiquidityPoolID) + if nParams <= 0 { + err = errors.New("invalid request: no parameters") + } + if err != nil { + return endpoint, err + } + + endpoint = fmt.Sprintf( + "liquidity_pools/%s", + r.LiquidityPoolID, + ) + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the liquidity pool endpoint +func (r LiquidityPoolRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := r.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/liquidity_pool_request_test.go b/clients/horizonclient/liquidity_pool_request_test.go new file mode 100644 index 0000000000..02337b24a5 --- /dev/null +++ b/clients/horizonclient/liquidity_pool_request_test.go @@ -0,0 +1,86 @@ +package horizonclient + +import ( + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLiquidityPoolRequestBuildUrl(t *testing.T) { + // It should return valid liquidity_pool endpoint and no errors + endpoint, err := LiquidityPoolRequest{}.BuildURL() + assert.EqualError(t, err, "invalid request: no parameters") + assert.Equal(t, "", endpoint) + + // It should return valid liquidity_pool endpoint and no errors + endpoint, err = LiquidityPoolRequest{LiquidityPoolID: "abcdef"}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "liquidity_pools/abcdef", endpoint) +} + +func TestLiquidityPoolDetailRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + request := LiquidityPoolRequest{LiquidityPoolID: "abcdef"} + + hmock.On( + "GET", + "https://localhost/liquidity_pools/abcdef", + ).ReturnString(200, liquidityPoolResponse) + + response, err := client.LiquidityPoolDetail(request) + if assert.NoError(t, err) { + assert.IsType(t, response, hProtocol.LiquidityPool{}) + assert.Equal(t, "abcdef", response.ID) + assert.Equal(t, uint32(30), response.FeeBP) + assert.Equal(t, uint64(300), response.TotalTrustlines) + assert.Equal(t, "5000.0000000", response.TotalShares) + } + + // failure response + request = LiquidityPoolRequest{LiquidityPoolID: "abcdef"} + + hmock.On( + "GET", + "https://localhost/liquidity_pools/abcdef", + ).ReturnString(400, badRequestResponse) + + _, err = client.LiquidityPoolDetail(request) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } +} + +var liquidityPoolResponse = `{ + "_links": { + "self": { + "href": "https://horizon.stellar.org/liquidity_pools/abcdef" + } + }, + "id": "abcdef", + "paging_token": "abcdef", + "fee_bp": 30, + "type": "constant_product", + "total_trustlines": "300", + "total_shares": "5000.0000000", + "reserves": [ + { + "amount": "1000.0000005", + "asset": "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S" + }, + { + "amount": "2000.0000000", + "asset": "PHP:GBUQWP3BOUZX34TOND2QV7QQ7K7VJTG6VSE7WMLBTMDJLLAW7YKGU6EP" + } + ] +}` diff --git a/clients/horizonclient/liquidity_pools_request.go b/clients/horizonclient/liquidity_pools_request.go new file mode 100644 index 0000000000..fa6e73bf6a --- /dev/null +++ b/clients/horizonclient/liquidity_pools_request.go @@ -0,0 +1,55 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// LiquidityPoolsRequest struct contains data for getting pool details from a horizon server. +// If "Reserves" is not set, it returns all liquidity pools. +// The query parameters (Order, Cursor and Limit) are optional. All or none can be set. +type LiquidityPoolsRequest struct { + Cursor string + Limit uint + Order Order + Reserves []string +} + +// BuildURL creates the endpoint to be queried based on the data in the LiquidityPoolRequest struct. +// If no data is set, it defaults to the build the URL for all assets +func (r LiquidityPoolsRequest) BuildURL() (endpoint string, err error) { + endpoint = "liquidity_pools" + + if pageParams := addQueryParams( + cursor(r.Cursor), + limit(r.Limit), + r.Order, + reserves(r.Reserves), + ); len(pageParams) > 0 { + endpoint = fmt.Sprintf( + "%s?%s", + endpoint, + pageParams, + ) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the pool endpoint +func (r LiquidityPoolsRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := r.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/liquidity_pools_request_test.go b/clients/horizonclient/liquidity_pools_request_test.go new file mode 100644 index 0000000000..11c6be44fe --- /dev/null +++ b/clients/horizonclient/liquidity_pools_request_test.go @@ -0,0 +1,110 @@ +package horizonclient + +import ( + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLiquidityPoolsRequestBuildUrl(t *testing.T) { + // It should return valid liquidity_pools endpoint and no errors + endpoint, err := LiquidityPoolsRequest{}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "liquidity_pools", endpoint) + + // It should return valid liquidity_pools endpoint and no errors + endpoint, err = LiquidityPoolsRequest{Order: OrderDesc}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "liquidity_pools?order=desc", endpoint) + + // It should return valid liquidity_pools endpoint and no errors + endpoint, err = LiquidityPoolsRequest{Reserves: []string{ + "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S", + "PHP:GBUQWP3BOUZX34TOND2QV7QQ7K7VJTG6VSE7WMLBTMDJLLAW7YKGU6EP", + }}.BuildURL() + require.NoError(t, err) + assert.Equal(t, "liquidity_pools?reserves=EURT%3AGAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S%2CPHP%3AGBUQWP3BOUZX34TOND2QV7QQ7K7VJTG6VSE7WMLBTMDJLLAW7YKGU6EP", endpoint) +} + +func TestLiquidityPoolsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + request := LiquidityPoolsRequest{} + + hmock.On( + "GET", + "https://localhost/liquidity_pools", + ).ReturnString(200, liquidityPoolsResponse) + + response, err := client.LiquidityPools(request) + if assert.NoError(t, err) { + assert.IsType(t, response, hProtocol.LiquidityPoolsPage{}) + links := response.Links + assert.Equal(t, links.Self.Href, "https://horizon.stellar.org/liquidity_pools?limit=200\u0026order=asc") + + assert.Equal(t, links.Next.Href, "https://horizon.stellar.org/liquidity_pools?limit=200\u0026order=asc") + + record := response.Embedded.Records[0] + assert.IsType(t, record, hProtocol.LiquidityPool{}) + assert.Equal(t, "abcdef", record.ID) + assert.Equal(t, uint32(30), record.FeeBP) + assert.Equal(t, uint64(300), record.TotalTrustlines) + assert.Equal(t, "5000.0000000", record.TotalShares) + } + + // failure response + request = LiquidityPoolsRequest{} + + hmock.On( + "GET", + "https://localhost/liquidity_pools", + ).ReturnString(400, badRequestResponse) + + _, err = client.LiquidityPools(request) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } +} + +var liquidityPoolsResponse = `{ + "_links": { + "self": { + "href": "https://horizon.stellar.org/liquidity_pools?limit=200\u0026order=asc" + }, + "next": { + "href": "https://horizon.stellar.org/liquidity_pools?limit=200\u0026order=asc" + } + }, + "_embedded": { + "records": [ + { + "id": "abcdef", + "paging_token": "abcdef", + "fee_bp": 30, + "type": "constant_product", + "total_trustlines": "300", + "total_shares": "5000.0000000", + "reserves": [ + { + "amount": "1000.0000005", + "asset": "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S" + }, + { + "amount": "2000.0000000", + "asset": "PHP:GBUQWP3BOUZX34TOND2QV7QQ7K7VJTG6VSE7WMLBTMDJLLAW7YKGU6EP" + } + ] + } + ] + } +}` diff --git a/clients/horizonclient/main.go b/clients/horizonclient/main.go new file mode 100644 index 0000000000..209b2a0fab --- /dev/null +++ b/clients/horizonclient/main.go @@ -0,0 +1,447 @@ +/* +Package horizonclient provides client access to a Horizon server, allowing an application to post transactions and look up ledger information. + +This library provides an interface to the Stellar Horizon service. It supports the building of Go applications on +top of the Stellar network (https://www.stellar.org/). Transactions may be constructed using the sister package to +this one, txnbuild (https://github.com/stellar/go/tree/master/txnbuild), and then submitted with this client to any +Horizon instance for processing onto the ledger. Together, these two libraries provide a complete Stellar SDK. + +For more information and further examples, see https://github.com/stellar/go/blob/master/docs/reference/readme.md +*/ +package horizonclient + +import ( + "context" + "errors" + "net/http" + "net/url" + "sync" + "time" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/txnbuild" +) + +// cursor represents `cursor` param in queries +type cursor string + +// limit represents `limit` param in queries +type limit uint + +// Order represents `order` param in queries +type Order string + +// assetCode represets `asset_code` param in queries +type assetCode string + +// assetIssuer represents `asset_issuer` param in queries +type assetIssuer string + +// includeFailed represents `include_failed` param in queries +type includeFailed bool + +// AssetType represents `asset_type` param in queries +type AssetType string + +// join represents `join` param in queries +type join string + +// reserves represents `reserves` param in queries +type reserves []string + +const ( + // OrderAsc represents an ascending order parameter + OrderAsc Order = "asc" + // OrderDesc represents an descending order parameter + OrderDesc Order = "desc" + // AssetType4 represents an asset type that is 4 characters long + AssetType4 AssetType = "credit_alphanum4" + // AssetType12 represents an asset type that is 12 characters long + AssetType12 AssetType = "credit_alphanum12" + // AssetTypeNative represents the asset type for Stellar Lumens (XLM) + AssetTypeNative AssetType = "native" + // accountRequiresMemo is the base64 encoding of "1". + // SEP 29 uses this value to define transaction memo requirements for incoming payments. + accountRequiresMemo = "MQ==" +) + +// Error struct contains the problem returned by Horizon +type Error struct { + Response *http.Response + Problem problem.P +} + +var ( + // ErrResultCodesNotPopulated is the error returned from a call to + // ResultCodes() against a `Problem` value that doesn't have the + // "result_codes" extra field populated when it is expected to be. + ErrResultCodesNotPopulated = errors.New("result_codes not populated") + + // ErrEnvelopeNotPopulated is the error returned from a call to + // Envelope() against a `Problem` value that doesn't have the + // "envelope_xdr" extra field populated when it is expected to be. + ErrEnvelopeNotPopulated = errors.New("envelope_xdr not populated") + + // ErrResultNotPopulated is the error returned from a call to + // Result() against a `Problem` value that doesn't have the + // "result_xdr" extra field populated when it is expected to be. + ErrResultNotPopulated = errors.New("result_xdr not populated") + + // ErrAccountRequiresMemo is the error returned from a call to checkMemoRequired + // when any of the destination accounts required a memo in the transaction. + ErrAccountRequiresMemo = errors.New("destination account requires a memo in the transaction") + + // HorizonTimeout is the default number of nanoseconds before a request to horizon times out. + HorizonTimeout = 60 * time.Second + + // MinuteResolution represents 1 minute used as `resolution` parameter in trade aggregation + MinuteResolution = time.Duration(1 * time.Minute) + + // FiveMinuteResolution represents 5 minutes used as `resolution` parameter in trade aggregation + FiveMinuteResolution = time.Duration(5 * time.Minute) + + // FifteenMinuteResolution represents 15 minutes used as `resolution` parameter in trade aggregation + FifteenMinuteResolution = time.Duration(15 * time.Minute) + + // HourResolution represents 1 hour used as `resolution` parameter in trade aggregation + HourResolution = time.Duration(1 * time.Hour) + + // DayResolution represents 1 day used as `resolution` parameter in trade aggregation + DayResolution = time.Duration(24 * time.Hour) + + // WeekResolution represents 1 week used as `resolution` parameter in trade aggregation + WeekResolution = time.Duration(168 * time.Hour) +) + +// HTTP represents the HTTP client that a horizon client uses to communicate +type HTTP interface { + Do(req *http.Request) (resp *http.Response, err error) + Get(url string) (resp *http.Response, err error) + PostForm(url string, data url.Values) (resp *http.Response, err error) +} + +// UniversalTimeHandler is a function that is called to return the UTC unix time in seconds. +// This handler is used when getting the time from a horizon server, which can be used to calculate +// transaction timebounds. +type UniversalTimeHandler func() int64 + +// Client struct contains data for creating a horizon client that connects to the stellar network. +type Client struct { + // URL of Horizon server to connect + HorizonURL string + fixHorizonURLOnce sync.Once + + // HTTP client to make requests with + HTTP HTTP + + // AppName is the name of the application using the horizonclient package + AppName string + + // AppVersion is the version of the application using the horizonclient package + AppVersion string + horizonTimeout time.Duration + + // clock is a Clock returning the current time. + clock *clock.Clock +} + +// SubmitTxOpts represents the submit transaction options +type SubmitTxOpts struct { + SkipMemoRequiredCheck bool +} + +// ClientInterface contains methods implemented by the horizon client +type ClientInterface interface { + Accounts(request AccountsRequest) (hProtocol.AccountsPage, error) + AccountDetail(request AccountRequest) (hProtocol.Account, error) + AccountData(request AccountRequest) (hProtocol.AccountData, error) + Effects(request EffectRequest) (effects.EffectsPage, error) + Assets(request AssetRequest) (hProtocol.AssetsPage, error) + Ledgers(request LedgerRequest) (hProtocol.LedgersPage, error) + LedgerDetail(sequence uint32) (hProtocol.Ledger, error) + FeeStats() (hProtocol.FeeStats, error) + Offers(request OfferRequest) (hProtocol.OffersPage, error) + OfferDetails(offerID string) (offer hProtocol.Offer, err error) + Operations(request OperationRequest) (operations.OperationsPage, error) + OperationDetail(id string) (operations.Operation, error) + SubmitTransactionXDR(transactionXdr string) (hProtocol.Transaction, error) + SubmitFeeBumpTransactionWithOptions(transaction *txnbuild.FeeBumpTransaction, opts SubmitTxOpts) (hProtocol.Transaction, error) + SubmitTransactionWithOptions(transaction *txnbuild.Transaction, opts SubmitTxOpts) (hProtocol.Transaction, error) + SubmitFeeBumpTransaction(transaction *txnbuild.FeeBumpTransaction) (hProtocol.Transaction, error) + SubmitTransaction(transaction *txnbuild.Transaction) (hProtocol.Transaction, error) + Transactions(request TransactionRequest) (hProtocol.TransactionsPage, error) + TransactionDetail(txHash string) (hProtocol.Transaction, error) + OrderBook(request OrderBookRequest) (hProtocol.OrderBookSummary, error) + Paths(request PathsRequest) (hProtocol.PathsPage, error) + Payments(request OperationRequest) (operations.OperationsPage, error) + TradeAggregations(request TradeAggregationRequest) (hProtocol.TradeAggregationsPage, error) + Trades(request TradeRequest) (hProtocol.TradesPage, error) + Fund(addr string) (hProtocol.Transaction, error) + StreamTransactions(ctx context.Context, request TransactionRequest, handler TransactionHandler) error + StreamTrades(ctx context.Context, request TradeRequest, handler TradeHandler) error + StreamEffects(ctx context.Context, request EffectRequest, handler EffectHandler) error + StreamOperations(ctx context.Context, request OperationRequest, handler OperationHandler) error + StreamPayments(ctx context.Context, request OperationRequest, handler OperationHandler) error + StreamOffers(ctx context.Context, request OfferRequest, handler OfferHandler) error + StreamLedgers(ctx context.Context, request LedgerRequest, handler LedgerHandler) error + StreamOrderBooks(ctx context.Context, request OrderBookRequest, handler OrderBookHandler) error + Root() (hProtocol.Root, error) + NextAccountsPage(hProtocol.AccountsPage) (hProtocol.AccountsPage, error) + NextAssetsPage(hProtocol.AssetsPage) (hProtocol.AssetsPage, error) + PrevAssetsPage(hProtocol.AssetsPage) (hProtocol.AssetsPage, error) + NextLedgersPage(hProtocol.LedgersPage) (hProtocol.LedgersPage, error) + PrevLedgersPage(hProtocol.LedgersPage) (hProtocol.LedgersPage, error) + NextEffectsPage(effects.EffectsPage) (effects.EffectsPage, error) + PrevEffectsPage(effects.EffectsPage) (effects.EffectsPage, error) + NextTransactionsPage(hProtocol.TransactionsPage) (hProtocol.TransactionsPage, error) + PrevTransactionsPage(hProtocol.TransactionsPage) (hProtocol.TransactionsPage, error) + NextOperationsPage(operations.OperationsPage) (operations.OperationsPage, error) + PrevOperationsPage(operations.OperationsPage) (operations.OperationsPage, error) + NextPaymentsPage(operations.OperationsPage) (operations.OperationsPage, error) + PrevPaymentsPage(operations.OperationsPage) (operations.OperationsPage, error) + NextOffersPage(hProtocol.OffersPage) (hProtocol.OffersPage, error) + PrevOffersPage(hProtocol.OffersPage) (hProtocol.OffersPage, error) + NextTradesPage(hProtocol.TradesPage) (hProtocol.TradesPage, error) + PrevTradesPage(hProtocol.TradesPage) (hProtocol.TradesPage, error) + HomeDomainForAccount(aid string) (string, error) + NextTradeAggregationsPage(hProtocol.TradeAggregationsPage) (hProtocol.TradeAggregationsPage, error) + PrevTradeAggregationsPage(hProtocol.TradeAggregationsPage) (hProtocol.TradeAggregationsPage, error) + LiquidityPoolDetail(request LiquidityPoolRequest) (hProtocol.LiquidityPool, error) + LiquidityPools(request LiquidityPoolsRequest) (hProtocol.LiquidityPoolsPage, error) + NextLiquidityPoolsPage(hProtocol.LiquidityPoolsPage) (hProtocol.LiquidityPoolsPage, error) + PrevLiquidityPoolsPage(hProtocol.LiquidityPoolsPage) (hProtocol.LiquidityPoolsPage, error) +} + +// DefaultTestNetClient is a default client to connect to test network. +var DefaultTestNetClient = &Client{ + HorizonURL: "https://horizon-testnet.stellar.org/", + HTTP: http.DefaultClient, + horizonTimeout: HorizonTimeout, +} + +// DefaultPublicNetClient is a default client to connect to public network. +var DefaultPublicNetClient = &Client{ + HorizonURL: "https://horizon.stellar.org/", + HTTP: http.DefaultClient, + horizonTimeout: HorizonTimeout, +} + +// HorizonRequest contains methods implemented by request structs for horizon endpoints. +// Action needed in release: horizonclient-v8.0.0: remove BuildURL() +type HorizonRequest interface { + BuildURL() (string, error) + HTTPRequest(horizonURL string) (*http.Request, error) +} + +// AccountsRequest struct contains data for making requests to the accounts endpoint of a horizon server. +// Either "Signer" or "Asset" fields should be set when retrieving Accounts. +// At the moment, you can't use both filters at the same time. +type AccountsRequest struct { + Signer string + Asset string + Sponsor string + LiquidityPool string + Order Order + Cursor string + Limit uint +} + +// AccountRequest struct contains data for making requests to the show account endpoint of a horizon server. +// "AccountID" and "DataKey" fields should both be set when retrieving AccountData. +// When getting the AccountDetail, only "AccountID" needs to be set. +type AccountRequest struct { + AccountID string + DataKey string +} + +// EffectRequest struct contains data for getting effects from a horizon server. +// "ForAccount", "ForLedger", "ForOperation" and "ForTransaction": Not more than one of these +// can be set at a time. If none are set, the default is to return all effects. +// The query parameters (Order, Cursor and Limit) are optional. All or none can be set. +type EffectRequest struct { + ForAccount string + ForLedger string + ForLiquidityPool string + ForOperation string + ForTransaction string + Order Order + Cursor string + Limit uint +} + +// AssetRequest struct contains data for getting asset details from a horizon server. +// If "ForAssetCode" and "ForAssetIssuer" are not set, it returns all assets. +// The query parameters (Order, Cursor and Limit) are optional. All or none can be set. +type AssetRequest struct { + ForAssetCode string + ForAssetIssuer string + Order Order + Cursor string + Limit uint +} + +// LedgerRequest struct contains data for getting ledger details from a horizon server. +// The query parameters (Order, Cursor and Limit) are optional. All or none can be set. +type LedgerRequest struct { + Order Order + Cursor string + Limit uint + forSequence uint32 +} + +type feeStatsRequest struct { + endpoint string +} + +// OfferRequest struct contains data for getting offers made by an account from a horizon server. +// The query parameters (Order, Cursor and Limit) are optional. All or none can be set. +type OfferRequest struct { + OfferID string + ForAccount string + Selling string + Seller string + Buying string + Order Order + Cursor string + Limit uint +} + +// OperationRequest struct contains data for getting operation details from a horizon server. +// "ForAccount", "ForLedger", "ForTransaction": Only one of these can be set at a time. If none +// are provided, the default is to return all operations. +// The query parameters (Order, Cursor, Limit and IncludeFailed) are optional. All or none can be set. +type OperationRequest struct { + ForAccount string + ForClaimableBalance string + ForLedger uint + ForLiquidityPool string + ForTransaction string + forOperationID string + Order Order + Cursor string + Limit uint + IncludeFailed bool + Join string + endpoint string +} + +type submitRequest struct { + endpoint string + transactionXdr string +} + +// TransactionRequest struct contains data for getting transaction details from a horizon server. +// "ForAccount", "ForClaimableBalance", "ForLedger": Only one of these can be set at a time. +// If none are provided, the default is to return all transactions. +// The query parameters (Order, Cursor, Limit and IncludeFailed) are optional. All or none can be set. +type TransactionRequest struct { + ForAccount string + ForClaimableBalance string + ForLedger uint + ForLiquidityPool string + forTransactionHash string + Order Order + Cursor string + Limit uint + IncludeFailed bool +} + +// OrderBookRequest struct contains data for getting the orderbook for an asset pair from a horizon server. +// Limit is optional. All other parameters are required. +type OrderBookRequest struct { + SellingAssetType AssetType + SellingAssetCode string + SellingAssetIssuer string + BuyingAssetType AssetType + BuyingAssetCode string + BuyingAssetIssuer string + Limit uint +} + +// PathsRequest struct contains data for getting available strict receive path payments from a horizon server. +// All the Destination related parameters are required and you need to include either +// SourceAccount or SourceAssets. +// See https://developers.stellar.org/api/aggregations/paths/strict-receive/ +type PathsRequest struct { + DestinationAccount string + DestinationAssetType AssetType + DestinationAssetCode string + DestinationAssetIssuer string + DestinationAmount string + SourceAccount string + SourceAssets string +} + +// StrictSendPathsRequest struct contains data for getting available strict send path payments from a horizon server. +// All the Source related parameters are required and you need to include either +// DestinationAccount or DestinationAssets. +// See https://developers.stellar.org/api/aggregations/paths/strict-send/ +type StrictSendPathsRequest struct { + DestinationAccount string + DestinationAssets string + SourceAssetType AssetType + SourceAssetCode string + SourceAssetIssuer string + SourceAmount string +} + +// TradeRequest struct contains data for getting trade details from a horizon server. +// "ForAccount", "ForOfferID": Only one of these can be set at a time. If none are provided, the +// default is to return all trades. +// All other query parameters are optional. All or none can be set. +type TradeRequest struct { + ForOfferID string + ForAccount string + ForLiquidityPool string + BaseAssetType AssetType + BaseAssetCode string + BaseAssetIssuer string + CounterAssetType AssetType + CounterAssetCode string + CounterAssetIssuer string + TradeType string + Order Order + Cursor string + Limit uint +} + +// TradeAggregationRequest struct contains data for getting trade aggregations from a horizon server. +// The query parameters (Order and Limit) are optional. All or none can be set. +// All other parameters are required. +type TradeAggregationRequest struct { + StartTime time.Time + EndTime time.Time + Resolution time.Duration + Offset time.Duration + BaseAssetType AssetType + BaseAssetCode string + BaseAssetIssuer string + CounterAssetType AssetType + CounterAssetCode string + CounterAssetIssuer string + Order Order + Limit uint +} + +// ClaimableBalanceRequest contains data about claimable balances. +// The filters are optional (all added except Asset) +type ClaimableBalanceRequest struct { + ID string + Asset string + Sponsor string + Claimant string +} + +// ServerTimeRecord contains data for the current unix time of a horizon server instance, and the local time when it was recorded. +type ServerTimeRecord struct { + ServerTime int64 + LocalTimeRecorded int64 +} + +// ServerTimeMap holds the ServerTimeRecord for different horizon instances. +var ServerTimeMap = make(map[string]ServerTimeRecord) +var serverTimeMapMutex = &sync.Mutex{} diff --git a/clients/horizonclient/main_test.go b/clients/horizonclient/main_test.go new file mode 100644 index 0000000000..9bd0d6f5ea --- /dev/null +++ b/clients/horizonclient/main_test.go @@ -0,0 +1,2628 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/jarcoal/httpmock" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/clock/clocktest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httptest" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/assert" +) + +func TestFixHTTP(t *testing.T) { + client := &Client{ + HorizonURL: "https://localhost/", + } + // No HTTP client is provided + assert.Nil(t, client.HTTP, "client HTTP is nil") + client.Root() + // When a request is made, default HTTP client is set + assert.IsType(t, client.HTTP, &http.Client{}) +} + +func TestCheckMemoRequired(t *testing.T) { + tt := assert.New(t) + + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + sourceAccount := txnbuild.NewSimpleAccount(kp.Address(), int64(0)) + + paymentMemoRequired := txnbuild.Payment{ + Destination: "GAYHAAKPAQLMGIJYMIWPDWCGUCQ5LAWY4Q7Q3IKSP57O7GUPD3NEOSEA", + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + paymentNoMemo := txnbuild.Payment{ + Destination: "GDWIRURRED6SQSZVQVVMK46PE2MOZEKHV6ZU54JG3NPVRDIF4XCXYYW4", + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + asset := txnbuild.CreditAsset{"ABCD", kp.Address()} + pathPaymentStrictSend := txnbuild.PathPaymentStrictSend{ + SendAsset: asset, + SendAmount: "10", + Destination: "GDYM6SBBGDF6ZDRM2SKGVIWM257Q4V63V3IYNDQQWPKNV4QDERS4YTLX", + DestAsset: txnbuild.NativeAsset{}, + DestMin: "1", + Path: []txnbuild.Asset{asset}, + } + + pathPaymentStrictReceive := txnbuild.PathPaymentStrictReceive{ + SendAsset: asset, + SendMax: "10", + Destination: "GD2JTIDP2JJKNIDXW4L6AU2RYFXZIUH3YFIS43PJT2467AP46CWBHSCN", + DestAsset: txnbuild.NativeAsset{}, + DestAmount: "1", + Path: []txnbuild.Asset{asset}, + } + + accountMerge := txnbuild.AccountMerge{ + Destination: "GBVZZ5XPHECNGA5SENAJP4C6ZJ7FGZ55ZZUCTFTHREZM73LKUGCQDRHR", + } + + testCases := []struct { + desc string + destination string + expected string + operations []txnbuild.Operation + mockNotFound bool + }{ + { + desc: "payment operation", + destination: "GAYHAAKPAQLMGIJYMIWPDWCGUCQ5LAWY4Q7Q3IKSP57O7GUPD3NEOSEA", + expected: "operation[0]: destination account requires a memo in the transaction", + operations: []txnbuild.Operation{ + &paymentMemoRequired, + &pathPaymentStrictReceive, + &pathPaymentStrictSend, + &accountMerge, + }, + }, + { + desc: "strict receive operation", + destination: "GD2JTIDP2JJKNIDXW4L6AU2RYFXZIUH3YFIS43PJT2467AP46CWBHSCN", + expected: "operation[1]: destination account requires a memo in the transaction", + operations: []txnbuild.Operation{ + &paymentNoMemo, + &pathPaymentStrictReceive, + }, + mockNotFound: true, + }, + { + desc: "strict send operation", + destination: "GDYM6SBBGDF6ZDRM2SKGVIWM257Q4V63V3IYNDQQWPKNV4QDERS4YTLX", + expected: "operation[1]: destination account requires a memo in the transaction", + operations: []txnbuild.Operation{ + &paymentNoMemo, + &pathPaymentStrictSend, + }, + mockNotFound: true, + }, + { + desc: "merge account operation", + destination: "GBVZZ5XPHECNGA5SENAJP4C6ZJ7FGZ55ZZUCTFTHREZM73LKUGCQDRHR", + expected: "operation[1]: destination account requires a memo in the transaction", + operations: []txnbuild.Operation{ + &paymentNoMemo, + &accountMerge, + }, + mockNotFound: true, + }, + { + desc: "two operations with same destination", + operations: []txnbuild.Operation{ + &paymentNoMemo, + &paymentNoMemo, + }, + mockNotFound: true, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: tc.operations, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + tt.NoError(err) + + if len(tc.destination) > 0 { + hmock.On( + "GET", + fmt.Sprintf("https://localhost/accounts/%s/data/config.memo_required", tc.destination), + ).ReturnJSON(200, memoRequiredResponse) + } + + if tc.mockNotFound { + hmock.On( + "GET", + "https://localhost/accounts/GDWIRURRED6SQSZVQVVMK46PE2MOZEKHV6ZU54JG3NPVRDIF4XCXYYW4/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + } + + err = client.checkMemoRequired(tx) + + if len(tc.expected) > 0 { + tt.Error(err) + tt.Contains(err.Error(), tc.expected) + tt.Equal(ErrAccountRequiresMemo, errors.Cause(err)) + } else { + tt.NoError(err) + } + }) + } +} + +func TestAccounts(t *testing.T) { + tt := assert.New(t) + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + accountRequest := AccountsRequest{} + _, err := client.Accounts(accountRequest) + if tt.Error(err) { + tt.Contains(err.Error(), "invalid request: no parameters") + } + + accountRequest = AccountsRequest{ + Signer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + Asset: "COP:GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + } + _, err = client.Accounts(accountRequest) + if tt.Error(err) { + tt.Contains(err.Error(), "invalid request: too many parameters") + } + + var accounts hProtocol.AccountsPage + + hmock.On( + "GET", + "https://localhost/accounts?signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + ).ReturnString(200, accountsResponse) + + accountRequest = AccountsRequest{ + Signer: "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + } + accounts, err = client.Accounts(accountRequest) + tt.NoError(err) + tt.Len(accounts.Embedded.Records, 1) + + hmock.On( + "GET", + "https://localhost/accounts?asset=COP%3AGAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + ).ReturnString(200, accountsResponse) + + accountRequest = AccountsRequest{ + Asset: "COP:GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + } + accounts, err = client.Accounts(accountRequest) + tt.NoError(err) + tt.Len(accounts.Embedded.Records, 1) + + hmock.On( + "GET", + "https://localhost/accounts?signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP&cursor=GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H&limit=200&order=desc", + ).ReturnString(200, accountsResponse) + + accountRequest = AccountsRequest{ + Signer: "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + Order: "desc", + Cursor: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + Limit: 200, + } + accounts, err = client.Accounts(accountRequest) + tt.NoError(err) + tt.Len(accounts.Embedded.Records, 1) + + // connection error + hmock.On( + "GET", + "https://localhost/accounts?signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + ).ReturnError("http.Client error") + + accountRequest = AccountsRequest{ + Signer: "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + } + accounts, err = client.Accounts(accountRequest) + if tt.Error(err) { + tt.Contains(err.Error(), "http.Client error") + _, ok := err.(*Error) + tt.Equal(ok, false) + } +} + +func TestAccountDetail(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // no parameters + accountRequest := AccountRequest{} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + ).ReturnString(200, accountResponse) + + _, err := client.AccountDetail(accountRequest) + // error case: no account id + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "no account ID provided") + } + + // wrong parameters + accountRequest = AccountRequest{DataKey: "test"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + ).ReturnString(200, accountResponse) + + _, err = client.AccountDetail(accountRequest) + // error case: no account id + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "no account ID provided") + } + + accountRequest = AccountRequest{AccountID: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + + // happy path + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + ).ReturnString(200, accountResponse) + + account, err := client.AccountDetail(accountRequest) + + if assert.NoError(t, err) { + assert.Equal(t, account.ID, "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU") + assert.Equal(t, account.Signers[0].Key, "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU") + assert.Equal(t, account.Signers[0].Type, "ed25519_public_key") + assert.Equal(t, account.Data["test"], "dGVzdA==") + balance, balanceErr := account.GetNativeBalance() + assert.Nil(t, balanceErr) + assert.Equal(t, balance, "9999.9999900") + assert.NotNil(t, account.LastModifiedTime) + assert.Equal(t, "2019-03-05 13:23:50 +0000 UTC", account.LastModifiedTime.String()) + assert.Equal(t, uint32(103307), account.LastModifiedLedger) + } + + // failure response + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + ).ReturnString(404, notFoundResponse) + + account, err = client.AccountDetail(accountRequest) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Resource Missing") + } + + // connection error + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + ).ReturnError("http.Client error") + + _, err = client.AccountDetail(accountRequest) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } +} + +func TestAccountData(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // no parameters + accountRequest := AccountRequest{} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/data/test", + ).ReturnString(200, accountResponse) + + _, err := client.AccountData(accountRequest) + // error case: few parameters + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too few parameters") + } + + // wrong parameters + accountRequest = AccountRequest{AccountID: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/data/test", + ).ReturnString(200, accountResponse) + + _, err = client.AccountData(accountRequest) + // error case: few parameters + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too few parameters") + } + + accountRequest = AccountRequest{AccountID: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", DataKey: "test"} + + // happy path + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/data/test", + ).ReturnString(200, accountData) + + data, err := client.AccountData(accountRequest) + if assert.NoError(t, err) { + assert.Equal(t, data.Value, "dGVzdA==") + } + +} + +func TestEffectsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + effectRequest := EffectRequest{} + + // all effects + hmock.On( + "GET", + "https://localhost/effects", + ).ReturnString(200, effectsResponse) + + effs, err := client.Effects(effectRequest) + if assert.NoError(t, err) { + assert.IsType(t, effs, effects.EffectsPage{}) + links := effs.Links + assert.Equal(t, links.Self.Href, "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=&limit=10&order=asc") + + assert.Equal(t, links.Next.Href, "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=43989725060534273-3&limit=10&order=asc") + + assert.Equal(t, links.Prev.Href, "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=43989725060534273-1&limit=10&order=desc") + + adEffect := effs.Embedded.Records[0] + acEffect := effs.Embedded.Records[1] + arEffect := effs.Embedded.Records[2] + assert.IsType(t, adEffect, effects.AccountDebited{}) + assert.IsType(t, acEffect, effects.AccountCredited{}) + // account_removed effect does not have a struct. Defaults to effects.Base + assert.IsType(t, arEffect, effects.Base{}) + + c, ok := acEffect.(effects.AccountCredited) + assert.Equal(t, ok, true) + assert.Equal(t, c.ID, "0043989725060534273-0000000002") + assert.Equal(t, c.Amount, "9999.9999900") + assert.Equal(t, c.Account, "GBO7LQUWCC7M237TU2PAXVPOLLYNHYCYYFCLVMX3RBJCML4WA742X3UB") + assert.Equal(t, c.Asset.Type, "native") + } + + effectRequest = EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/effects", + ).ReturnString(200, effectsResponse) + + effs, err = client.Effects(effectRequest) + if assert.NoError(t, err) { + assert.IsType(t, effs, effects.EffectsPage{}) + } + + // too many parameters + effectRequest = EffectRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForLedger: "123"} + hmock.On( + "GET", + "https://localhost/effects", + ).ReturnString(200, effectsResponse) + + _, err = client.Effects(effectRequest) + // error case + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too many parameters") + } +} + +func TestAssetsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + assetRequest := AssetRequest{} + + // all assets + hmock.On( + "GET", + "https://localhost/assets", + ).ReturnString(200, assetsResponse) + + assets, err := client.Assets(assetRequest) + if assert.NoError(t, err) { + assert.IsType(t, assets, hProtocol.AssetsPage{}) + record := assets.Embedded.Records[0] + assert.Equal(t, record.Asset.Code, "ABC") + assert.Equal(t, record.Asset.Issuer, "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU") + assert.Equal(t, record.PT, "1") + assert.Equal(t, record.NumAccounts, int32(3)) + assert.Equal(t, record.Amount, "105.0000000") + assert.Equal(t, record.Flags.AuthRevocable, false) + assert.Equal(t, record.Flags.AuthRequired, true) + assert.Equal(t, record.Flags.AuthImmutable, false) + } + +} + +func TestFeeStats(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // happy path + hmock.On( + "GET", + "https://localhost/fee_stats", + ).ReturnString(200, feesResponse) + + fees, err := client.FeeStats() + + if assert.NoError(t, err) { + assert.Equal(t, uint32(22606298), fees.LastLedger) + assert.Equal(t, int64(100), fees.LastLedgerBaseFee) + assert.Equal(t, 0.97, fees.LedgerCapacityUsage) + assert.Equal(t, int64(130), fees.MaxFee.Min) + assert.Equal(t, int64(8000), fees.MaxFee.Max) + assert.Equal(t, int64(250), fees.MaxFee.Mode) + assert.Equal(t, int64(150), fees.MaxFee.P10) + assert.Equal(t, int64(200), fees.MaxFee.P20) + assert.Equal(t, int64(300), fees.MaxFee.P30) + assert.Equal(t, int64(400), fees.MaxFee.P40) + assert.Equal(t, int64(500), fees.MaxFee.P50) + assert.Equal(t, int64(1000), fees.MaxFee.P60) + assert.Equal(t, int64(2000), fees.MaxFee.P70) + assert.Equal(t, int64(3000), fees.MaxFee.P80) + assert.Equal(t, int64(4000), fees.MaxFee.P90) + assert.Equal(t, int64(5000), fees.MaxFee.P95) + assert.Equal(t, int64(8000), fees.MaxFee.P99) + + assert.Equal(t, int64(100), fees.FeeCharged.Min) + assert.Equal(t, int64(100), fees.FeeCharged.Max) + assert.Equal(t, int64(100), fees.FeeCharged.Mode) + assert.Equal(t, int64(100), fees.FeeCharged.P10) + assert.Equal(t, int64(100), fees.FeeCharged.P20) + assert.Equal(t, int64(100), fees.FeeCharged.P30) + assert.Equal(t, int64(100), fees.FeeCharged.P40) + assert.Equal(t, int64(100), fees.FeeCharged.P50) + assert.Equal(t, int64(100), fees.FeeCharged.P60) + assert.Equal(t, int64(100), fees.FeeCharged.P70) + assert.Equal(t, int64(100), fees.FeeCharged.P80) + assert.Equal(t, int64(100), fees.FeeCharged.P90) + assert.Equal(t, int64(100), fees.FeeCharged.P95) + assert.Equal(t, int64(100), fees.FeeCharged.P99) + } +} + +func TestOfferRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + offersRequest := OfferRequest{ForAccount: "GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F"} + + // account offers + hmock.On( + "GET", + "https://localhost/accounts/GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F/offers", + ).ReturnString(200, offersResponse) + + offers, err := client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.IsType(t, offers, hProtocol.OffersPage{}) + record := offers.Embedded.Records[0] + assert.Equal(t, record.ID, int64(432323)) + assert.Equal(t, record.Seller, "GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F") + assert.Equal(t, record.PT, "432323") + assert.Equal(t, record.Selling.Code, "ABC") + assert.Equal(t, record.Amount, "1999979.8700000") + assert.Equal(t, record.LastModifiedLedger, int32(103307)) + } + + hmock.On( + "GET", + "https://localhost/offers?seller=GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + ).ReturnString(200, offersResponse) + + offersRequest = OfferRequest{ + Seller: "GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + } + + offers, err = client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.Len(t, offers.Embedded.Records, 1) + } + + hmock.On( + "GET", + "https://localhost/offers?buying=COP%3AGDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + ).ReturnString(200, offersResponse) + + offersRequest = OfferRequest{ + Buying: "COP:GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + } + + offers, err = client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.Len(t, offers.Embedded.Records, 1) + } + + hmock.On( + "GET", + "https://localhost/offers?selling=EUR%3AGDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + ).ReturnString(200, offersResponse) + + offersRequest = OfferRequest{ + Selling: "EUR:GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + } + + offers, err = client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.Len(t, offers.Embedded.Records, 1) + } + + hmock.On( + "GET", + "https://localhost/offers?selling=EUR%3AGDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + ).ReturnString(200, offersResponse) + + offersRequest = OfferRequest{ + Selling: "EUR:GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + } + + offers, err = client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.Len(t, offers.Embedded.Records, 1) + } + + hmock.On( + "GET", + "https://localhost/offers?buying=EUR%3AGDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F&seller=GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F&selling=EUR%3AGDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F&cursor=30&limit=20&order=desc", + ).ReturnString(200, offersResponse) + + offersRequest = OfferRequest{ + Seller: "GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + Buying: "EUR:GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + Selling: "EUR:GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + Order: "desc", + Limit: 20, + Cursor: "30", + } + + offers, err = client.Offers(offersRequest) + if assert.NoError(t, err) { + assert.Len(t, offers.Embedded.Records, 1) + } +} +func TestOfferDetailsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // account offers + hmock.On( + "GET", + "https://localhost/offers/5635", + ).ReturnString(200, offerResponse) + + record, err := client.OfferDetails("5635") + if assert.NoError(t, err) { + assert.IsType(t, record, hProtocol.Offer{}) + assert.Equal(t, record.ID, int64(5635)) + assert.Equal(t, record.Seller, "GD6UOZ3FGFI5L2X6F52YPJ6ICSW375BNBZIQC4PCLSEOO6SMX7CUS5MB") + assert.Equal(t, record.PT, "5635") + assert.Equal(t, record.Selling.Type, "native") + assert.Equal(t, record.Buying.Code, "AstroDollar") + assert.Equal(t, record.Buying.Issuer, "GDA2EHKPDEWZTAL6B66FO77HMOZL3RHZTIJO7KJJK5RQYSDUXEYMPJYY") + assert.Equal(t, record.Amount, "100.0000000") + assert.Equal(t, record.LastModifiedLedger, int32(356183)) + } + + _, err = client.OfferDetails("S6ES") + assert.Error(t, err) + assert.EqualError(t, err, "invalid offer ID provided") + + _, err = client.OfferDetails("") + assert.Error(t, err) + assert.EqualError(t, err, "no offer ID provided") +} + +func TestOperationsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + operationRequest := OperationRequest{Join: "transactions"} + + // all operations + hmock.On( + "GET", + "https://localhost/operations?join=transactions", + ).ReturnString(200, multipleOpsResponse) + + ops, err := client.Operations(operationRequest) + if assert.NoError(t, err) { + assert.IsType(t, ops, operations.OperationsPage{}) + links := ops.Links + assert.Equal(t, links.Self.Href, "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=&limit=10&order=asc") + + assert.Equal(t, links.Next.Href, "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=98447788659970049&limit=10&order=asc") + + assert.Equal(t, links.Prev.Href, "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=98447788659970049&limit=10&order=desc") + + paymentOp := ops.Embedded.Records[0] + mangageOfferOp := ops.Embedded.Records[1] + createAccountOp := ops.Embedded.Records[2] + assert.IsType(t, paymentOp, operations.Payment{}) + assert.IsType(t, mangageOfferOp, operations.ManageSellOffer{}) + assert.IsType(t, createAccountOp, operations.CreateAccount{}) + + c, ok := createAccountOp.(operations.CreateAccount) + assert.Equal(t, ok, true) + assert.Equal(t, c.ID, "98455906148208641") + assert.Equal(t, c.StartingBalance, "2.0000000") + assert.Equal(t, c.TransactionHash, "ade3c60f1b581e8744596673d95bffbdb8f68f199e0e2f7d63b7c3af9fd8d868") + } + + // all payments + hmock.On( + "GET", + "https://localhost/payments?join=transactions", + ).ReturnString(200, paymentsResponse) + + ops, err = client.Payments(operationRequest) + if assert.NoError(t, err) { + assert.IsType(t, ops, operations.OperationsPage{}) + links := ops.Links + assert.Equal(t, links.Self.Href, "https://horizon-testnet.stellar.org/payments?cursor=&limit=2&order=desc") + + assert.Equal(t, links.Next.Href, "https://horizon-testnet.stellar.org/payments?cursor=2024660468248577&limit=2&order=desc") + + assert.Equal(t, links.Prev.Href, "https://horizon-testnet.stellar.org/payments?cursor=2024660468256769&limit=2&order=asc") + + createAccountOp := ops.Embedded.Records[0] + paymentOp := ops.Embedded.Records[1] + + assert.IsType(t, paymentOp, operations.Payment{}) + assert.IsType(t, createAccountOp, operations.CreateAccount{}) + + p, ok := paymentOp.(operations.Payment) + assert.Equal(t, ok, true) + assert.Equal(t, p.ID, "2024660468248577") + assert.Equal(t, p.Amount, "177.0000000") + assert.Equal(t, p.TransactionHash, "87d7a29539e7902b14a6c720094856f74a77128ab332d8629432c5a176a9fe7b") + } + + // operations for account + operationRequest = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/operations", + ).ReturnString(200, multipleOpsResponse) + + ops, err = client.Operations(operationRequest) + if assert.NoError(t, err) { + assert.IsType(t, ops, operations.OperationsPage{}) + } + + // too many parameters + operationRequest = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForLedger: 123} + hmock.On( + "GET", + "https://localhost/operations", + ).ReturnString(200, multipleOpsResponse) + + _, err = client.Operations(operationRequest) + // error case + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too many parameters") + } + + // operation detail + opID := "1103965508866049" + hmock.On( + "GET", + "https://localhost/operations/1103965508866049", + ).ReturnString(200, opsResponse) + + record, err := client.OperationDetail(opID) + if assert.NoError(t, err) { + assert.Equal(t, record.GetType(), "change_trust") + c, ok := record.(operations.ChangeTrust) + assert.Equal(t, ok, true) + assert.Equal(t, c.ID, "1103965508866049") + assert.Equal(t, c.TransactionSuccessful, true) + assert.Equal(t, c.TransactionHash, "93c2755ec61c8b01ac11daa4d8d7a012f56be172bdfcaf77a6efd683319ca96d") + assert.Equal(t, c.Asset.Code, "UAHd") + assert.Equal(t, c.Asset.Issuer, "GDDETPGV4OJVNBTB6GQICCPGH5DZRYYB7XQCSAZO2ZQH6HO7SWXHKKJN") + assert.Equal(t, c.Limit, "922337203685.4775807") + assert.Equal(t, c.Trustee, "GDDETPGV4OJVNBTB6GQICCPGH5DZRYYB7XQCSAZO2ZQH6HO7SWXHKKJN") + assert.Equal(t, c.Trustor, "GBMVGXJXJ7ZBHIWMXHKR6IVPDTYKHJPXC2DHZDPJBEZWZYAC7NKII7IB") + assert.Equal(t, c.Links.Self.Href, "https://horizon-testnet.stellar.org/operations/1103965508866049") + assert.Equal(t, c.Links.Effects.Href, "https://horizon-testnet.stellar.org/operations/1103965508866049/effects") + assert.Equal(t, c.Links.Transaction.Href, "https://horizon-testnet.stellar.org/transactions/93c2755ec61c8b01ac11daa4d8d7a012f56be172bdfcaf77a6efd683319ca96d") + + assert.Equal(t, c.Links.Succeeds.Href, "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=1103965508866049") + + assert.Equal(t, c.Links.Precedes.Href, "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=1103965508866049") + } + +} + +func TestSubmitTransactionXDRRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + txXdr := `AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AAuV/AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAyTBGxOgfSApppsTnb/YRr6gOR8WT0LZNrhLh4y3FCgoAAAAXSHboAAAAAAAAAAABhlbgnAAAAEAivKe977CQCxMOKTuj+cWTFqc2OOJU8qGr9afrgu2zDmQaX5Q0cNshc3PiBwe0qw/+D/qJk5QqM5dYeSUGeDQP` + + // failure response + hmock. + On("POST", "https://localhost/transactions"). + ReturnString(400, transactionFailure) + + _, err := client.SubmitTransactionXDR(txXdr) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := errors.Cause(err).(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Transaction Failed") + } + + // connection error + hmock. + On("POST", "https://localhost/transactions"). + ReturnError("http.Client error") + + _, err = client.SubmitTransactionXDR(txXdr) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } + + // successful tx + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + resp, err := client.SubmitTransactionXDR(txXdr) + if assert.NoError(t, err) { + assert.IsType(t, resp, hProtocol.Transaction{}) + assert.Equal(t, resp.Links.Transaction.Href, "https://horizon-testnet.stellar.org/transactions/bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca") + assert.Equal(t, resp.Hash, "bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca") + assert.Equal(t, resp.Ledger, int32(354811)) + assert.Equal(t, resp.EnvelopeXdr, txXdr) + assert.Equal(t, resp.ResultXdr, "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=") + assert.Equal(t, resp.ResultMetaXdr, `AAAAAQAAAAIAAAADAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV+AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV/AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE/9O7JwcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE+gGdbQcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABWn7AAAAAAAAAADJMEbE6B9ICmmmxOdv9hGvqA5HxZPQtk2uEuHjLcUKCgAAABdIdugAAAVp+wAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==`) + } +} + +func TestSubmitTransactionRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + sourceAccount := txnbuild.NewSimpleAccount(kp.Address(), int64(0)) + + payment := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + txXdr := "AAAAAgAAAAAFNPMlEPLB6oWPI/Zl1sBEXxwv93ChUnv7KQK9KxrTtgAAAGQAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAKAAAAAAAAAAEAAAAAAAAAAQAAAAAFNPMlEPLB6oWPI/Zl1sBEXxwv93ChUnv7KQK9KxrTtgAAAAAAAAAABfXhAAAAAAAAAAABKxrTtgAAAECmVMsI0W6JmfJNeLzgH+PseZA2AgYGZl8zaHgkOvhZw65Hj9OaCdw6yssG55qu7X2sauJAwfxaoTL4gwbmH94H" + // successful tx with config.memo_required not found + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + + _, err = client.SubmitTransaction(tx) + assert.NoError(t, err) + + // memo required - does not submit transaction + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnJSON(200, memoRequiredResponse) + + _, err = client.SubmitTransaction(tx) + assert.Error(t, err) + assert.Equal(t, ErrAccountRequiresMemo, errors.Cause(err)) +} + +func TestSubmitTransactionRequestMuxedAccounts(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + accountID := xdr.MustAddress(kp.Address()) + mx := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *accountID.Ed25519, + }, + } + sourceAccount := txnbuild.NewSimpleAccount(mx.Address(), int64(0)) + + payment := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + txXdr := "AAAAAgAAAQAAAAAAyv66vgU08yUQ8sHqhY8j9mXWwERfHC/3cKFSe/spAr0rGtO2AAAAZAAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAoAAAAAAAAAAQAAAAAAAAABAAAAAAU08yUQ8sHqhY8j9mXWwERfHC/3cKFSe/spAr0rGtO2AAAAAAAAAAAF9eEAAAAAAAAAAAErGtO2AAAAQJvQkE9UVo/mfFBl/8ZPTzSUyVO4nvW0BYfnbowoBPEdRfLOLQz28v6sBKQc2b86NUfVHN5TQVo3+jH4nK9wVgk=" + // successful tx with config.memo_required not found + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + + _, err = client.SubmitTransaction(tx) + assert.NoError(t, err) + + // memo required - does not submit transaction + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnJSON(200, memoRequiredResponse) + + _, err = client.SubmitTransaction(tx) + assert.Error(t, err) + assert.Equal(t, ErrAccountRequiresMemo, errors.Cause(err)) +} + +func TestSubmitFeeBumpTransaction(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + sourceAccount := txnbuild.NewSimpleAccount(kp.Address(), int64(0)) + + payment := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + feeBumpKP := keypair.MustParseFull("SA5ZEFDVFZ52GRU7YUGR6EDPBNRU2WLA6IQFQ7S2IH2DG3VFV3DOMV2Q") + feeBumpTx, err := txnbuild.NewFeeBumpTransaction(txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: txnbuild.MinBaseFee * 2, + }) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP) + feeBumpTxB64, err := feeBumpTx.Base64() + assert.NoError(t, err) + + // successful tx with config.memo_required not found + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, feeBumpTxB64) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + + _, err = client.SubmitFeeBumpTransaction(feeBumpTx) + assert.NoError(t, err) + + // memo required - does not submit transaction + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnJSON(200, memoRequiredResponse) + + _, err = client.SubmitFeeBumpTransaction(feeBumpTx) + assert.Error(t, err) + assert.Equal(t, ErrAccountRequiresMemo, errors.Cause(err)) +} + +func TestSubmitTransactionWithOptionsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + sourceAccount := txnbuild.NewSimpleAccount(kp.Address(), int64(0)) + + payment := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + hmock. + On("POST", "https://localhost/transactions"). + ReturnString(400, transactionFailure) + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := errors.Cause(err).(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Transaction Failed") + } + + // connection error + hmock. + On("POST", "https://localhost/transactions"). + ReturnError("http.Client error") + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } + + txXdr := "AAAAAgAAAAAFNPMlEPLB6oWPI/Zl1sBEXxwv93ChUnv7KQK9KxrTtgAAAGQAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAKAAAAAAAAAAEAAAAAAAAAAQAAAAAFNPMlEPLB6oWPI/Zl1sBEXxwv93ChUnv7KQK9KxrTtgAAAAAAAAAABfXhAAAAAAAAAAABKxrTtgAAAECmVMsI0W6JmfJNeLzgH+PseZA2AgYGZl8zaHgkOvhZw65Hj9OaCdw6yssG55qu7X2sauJAwfxaoTL4gwbmH94H" + // successful tx + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + assert.NoError(t, err) + + // successful tx with config.memo_required not found + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.NoError(t, err) + + // memo required - does not submit transaction + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnJSON(200, memoRequiredResponse) + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.Error(t, err) + assert.Equal(t, ErrAccountRequiresMemo, errors.Cause(err)) + + // skips memo check if tx includes a memo + txXdr = "AAAAAgAAAAAFNPMlEPLB6oWPI/Zl1sBEXxwv93ChUnv7KQK9KxrTtgAAAGQAAAAAAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAKAAAAAQAAAApIZWxsb1dvcmxkAAAAAAABAAAAAAAAAAEAAAAABTTzJRDyweqFjyP2ZdbARF8cL/dwoVJ7+ykCvSsa07YAAAAAAAAAAAX14QAAAAAAAAAAASsa07YAAABA7rDHZ+HcBIQbWByMZL3aT231WuwjOhxvb0c1i3vPzArUCE+HdCIJXq6Mk/xdhJj6QEEJrg15uAxke3P3k2vWCw==" + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, txXdr) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Memo: txnbuild.MemoText("HelloWorld"), + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + _, err = client.SubmitTransactionWithOptions(tx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.NoError(t, err) +} + +func TestSubmitFeeBumpTransactionWithOptions(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + kp := keypair.MustParseFull("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") + sourceAccount := txnbuild.NewSimpleAccount(kp.Address(), int64(0)) + + payment := txnbuild.Payment{ + Destination: kp.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + feeBumpKP := keypair.MustParseFull("SA5ZEFDVFZ52GRU7YUGR6EDPBNRU2WLA6IQFQ7S2IH2DG3VFV3DOMV2Q") + feeBumpTx, err := txnbuild.NewFeeBumpTransaction(txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: txnbuild.MinBaseFee * 2, + }) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP) + feeBumpTxB64, err := feeBumpTx.Base64() + assert.NoError(t, err) + + hmock. + On("POST", "https://localhost/transactions"). + ReturnString(400, transactionFailure) + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := errors.Cause(err).(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Transaction Failed") + } + + // connection error + hmock. + On("POST", "https://localhost/transactions"). + ReturnError("http.Client error") + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } + + // successful tx + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, feeBumpTxB64) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: true}) + assert.NoError(t, err) + + // successful tx with config.memo_required not found + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, feeBumpTxB64) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnString(404, notFoundResponse) + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.NoError(t, err) + + // memo required - does not submit transaction + hmock.On( + "GET", + "https://localhost/accounts/GACTJ4ZFCDZMD2UFR4R7MZOWYBCF6HBP65YKCUT37MUQFPJLDLJ3N5D2/data/config.memo_required", + ).ReturnJSON(200, memoRequiredResponse) + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.Error(t, err) + assert.Equal(t, ErrAccountRequiresMemo, errors.Cause(err)) + + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&payment}, + BaseFee: txnbuild.MinBaseFee, + Memo: txnbuild.MemoText("HelloWorld"), + Timebounds: txnbuild.NewTimebounds(0, 10), + }, + ) + assert.NoError(t, err) + + feeBumpKP = keypair.MustParseFull("SA5ZEFDVFZ52GRU7YUGR6EDPBNRU2WLA6IQFQ7S2IH2DG3VFV3DOMV2Q") + feeBumpTx, err = txnbuild.NewFeeBumpTransaction(txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: txnbuild.MinBaseFee * 2, + }) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP) + feeBumpTxB64, err = feeBumpTx.Base64() + assert.NoError(t, err) + + // skips memo check if tx includes a memo + hmock.On( + "POST", + "https://localhost/transactions", + ).Return(func(request *http.Request) (*http.Response, error) { + val := request.FormValue("tx") + assert.Equal(t, val, feeBumpTxB64) + return httpmock.NewStringResponse(http.StatusOK, txSuccess), nil + }) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp) + assert.NoError(t, err) + + _, err = client.SubmitFeeBumpTransactionWithOptions(feeBumpTx, SubmitTxOpts{SkipMemoRequiredCheck: false}) + assert.NoError(t, err) +} + +func TestTransactionsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + transactionRequest := TransactionRequest{} + + // all transactions + hmock.On( + "GET", + "https://localhost/transactions", + ).ReturnString(200, txPageResponse) + + txs, err := client.Transactions(transactionRequest) + if assert.NoError(t, err) { + assert.IsType(t, txs, hProtocol.TransactionsPage{}) + links := txs.Links + assert.Equal(t, links.Self.Href, "https://horizon-testnet.stellar.org/transactions?cursor=&limit=10&order=desc") + + assert.Equal(t, links.Next.Href, "https://horizon-testnet.stellar.org/transactions?cursor=1881762611335168&limit=10&order=desc") + + assert.Equal(t, links.Prev.Href, "https://horizon-testnet.stellar.org/transactions?cursor=1881771201286144&limit=10&order=asc") + + tx := txs.Embedded.Records[0] + assert.IsType(t, tx, hProtocol.Transaction{}) + assert.Equal(t, tx.ID, "3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599") + assert.Equal(t, tx.Ledger, int32(438134)) + assert.Equal(t, tx.FeeCharged, int64(100)) + assert.Equal(t, tx.MaxFee, int64(100)) + assert.Equal(t, tx.Hash, "3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599") + } + + transactionRequest = TransactionRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/transactions", + ).ReturnString(200, txPageResponse) + + txs, err = client.Transactions(transactionRequest) + if assert.NoError(t, err) { + assert.IsType(t, txs, hProtocol.TransactionsPage{}) + } + + // too many parameters + transactionRequest = TransactionRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForLedger: 123} + hmock.On( + "GET", + "https://localhost/transactions", + ).ReturnString(200, txPageResponse) + + _, err = client.Transactions(transactionRequest) + // error case + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too many parameters") + } + + // transaction detail + txHash := "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c" + hmock.On( + "GET", + "https://localhost/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c", + ).ReturnString(200, txDetailResponse) + + record, err := client.TransactionDetail(txHash) + if assert.NoError(t, err) { + assert.Equal(t, record.ID, "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c") + assert.Equal(t, record.Successful, true) + assert.Equal(t, record.Hash, "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c") + assert.Equal(t, record.Memo, "2A1V6J5703G47XHY") + } +} + +func TestOrderBookRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + orderBookRequest := OrderBookRequest{BuyingAssetType: AssetTypeNative, SellingAssetCode: "USD", SellingAssetType: AssetType4, SellingAssetIssuer: "GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5"} + + // orderbook for XLM/USD + hmock.On( + "GET", + "https://localhost/order_book?buying_asset_type=native&selling_asset_code=USD&selling_asset_issuer=GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5&selling_asset_type=credit_alphanum4", + ).ReturnString(200, orderbookResponse) + + obs, err := client.OrderBook(orderBookRequest) + if assert.NoError(t, err) { + assert.IsType(t, obs, hProtocol.OrderBookSummary{}) + bids := obs.Bids + asks := obs.Asks + assert.Equal(t, bids[0].Price, "0.0000251") + assert.Equal(t, asks[0].Price, "0.0000256") + assert.Equal(t, obs.Selling.Type, "native") + assert.Equal(t, obs.Buying.Type, "credit_alphanum4") + } + + // failure response + orderBookRequest = OrderBookRequest{} + hmock.On( + "GET", + "https://localhost/order_book", + ).ReturnString(400, orderBookNotFound) + + _, err = client.OrderBook(orderBookRequest) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Invalid Order Book Parameters") + } + +} + +func TestFetchTimebounds(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + clock: &clock.Clock{ + Source: clocktest.FixedSource(time.Unix(1560947096, 0)), + }, + } + + // When no saved server time, return local time + st, err := client.FetchTimebounds(100) + if assert.NoError(t, err) { + assert.IsType(t, ServerTimeMap["localhost"], ServerTimeRecord{}) + assert.Equal(t, st.MinTime, int64(0)) + } + + // server time is saved on requests to horizon + header := http.Header{} + header.Add("Date", "Wed, 19 Jun 2019 12:24:56 GMT") //unix time: 1560947096 + hmock.On( + "GET", + "https://localhost/", + ).ReturnStringWithHeader(200, metricsResponse, header) + _, err = client.Root() + assert.NoError(t, err) + + // get saved server time + st, err = client.FetchTimebounds(100) + if assert.NoError(t, err) { + assert.IsType(t, ServerTimeMap["localhost"], ServerTimeRecord{}) + assert.Equal(t, st.MinTime, int64(0)) + // serverTime + 100seconds + assert.Equal(t, st.MaxTime, int64(1560947196)) + } + + // mock server time + newRecord := ServerTimeRecord{ServerTime: 100, LocalTimeRecorded: 1560947096} + ServerTimeMap["localhost"] = newRecord + st, err = client.FetchTimebounds(100) + assert.NoError(t, err) + assert.IsType(t, st, txnbuild.Timebounds{}) + assert.Equal(t, st.MinTime, int64(0)) + // time should be 200, serverTime + 100seconds + assert.Equal(t, st.MaxTime, int64(200)) +} + +func TestVersion(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + assert.Equal(t, "2.1.0", client.Version()) +} + +var accountsResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=\u0026limit=10\u0026order=asc\u0026signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP\u0026limit=10\u0026order=asc\u0026signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP\u0026limit=10\u0026order=desc\u0026signer=GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/offers{?cursor,limit,order}", + "templated": true + }, + "trades": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/trades{?cursor,limit,order}", + "templated": true + }, + "data": { + "href": "https://horizon-testnet.stellar.org/accounts/GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP/data/{key}", + "templated": true + } + }, + "id": "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + "account_id": "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + "sequence": "47236050321450", + "subentry_count": 0, + "last_modified_ledger": 116787, + "thresholds": { + "low_threshold": 0, + "med_threshold": 0, + "high_threshold": 0 + }, + "flags": { + "auth_required": false, + "auth_revocable": false, + "auth_immutable": false + }, + "balances": [ + { + "balance": "100.8182300", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "asset_type": "native" + } + ], + "signers": [ + { + "weight": 1, + "key": "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP", + "type": "ed25519_public_key" + } + ], + "data": {}, + "paging_token": "GAI3SO3S4E67HAUZPZ2D3VBFXY4AT6N7WQI7K5WFGRXWENTZJG2B6CYP" + } + ] + } +} +` + +var accountResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/offers{?cursor,limit,order}", + "templated": true + }, + "trades": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/trades{?cursor,limit,order}", + "templated": true + }, + "data": { + "href": "https://horizon-testnet.stellar.org/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/data/{key}", + "templated": true + } + }, + "id": "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + "paging_token": "1", + "account_id": "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + "sequence": "9865509814140929", + "subentry_count": 1, + "thresholds": { + "low_threshold": 0, + "med_threshold": 0, + "high_threshold": 0 + }, + "flags": { + "auth_required": false, + "auth_revocable": false, + "auth_immutable": false + }, + "balances": [ + { + "balance": "9999.9999900", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "asset_type": "native" + } + ], + "signers": [ + { + "public_key": "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + "weight": 1, + "key": "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + "type": "ed25519_public_key" + } + ], + "data": { + "test": "dGVzdA==" + }, + "last_modified_ledger": 103307, + "last_modified_time": "2019-03-05T13:23:50Z" +}` + +var memoRequiredResponse = map[string]string{ + "value": "MQ==", +} + +var notFoundResponse = `{ + "type": "https://stellar.org/horizon-errors/not_found", + "title": "Resource Missing", + "status": 404, + "detail": "The resource at the url requested was not found. This is usually occurs for one of two reasons: The url requested is not valid, or no data in our database could be found with the parameters provided.", + "instance": "horizon-live-001/61KdRW8tKi-18408110" +}` + +var accountData = `{ + "value": "dGVzdA==" +}` + +var effectsResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=43989725060534273-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273/effects?cursor=43989725060534273-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=43989725060534273-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=43989725060534273-1" + } + }, + "id": "0043989725060534273-0000000001", + "paging_token": "43989725060534273-1", + "account": "GANHAS5OMPLKD6VYU4LK7MBHSHB2Q37ZHAYWOBJRUXGDHMPJF3XNT45Y", + "type": "account_debited", + "type_i": 3, + "created_at": "2018-07-27T21:00:12Z", + "asset_type": "native", + "amount": "9999.9999900" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=43989725060534273-2" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=43989725060534273-2" + } + }, + "id": "0043989725060534273-0000000002", + "paging_token": "43989725060534273-2", + "account": "GBO7LQUWCC7M237TU2PAXVPOLLYNHYCYYFCLVMX3RBJCML4WA742X3UB", + "type": "account_credited", + "type_i": 2, + "created_at": "2018-07-27T21:00:12Z", + "asset_type": "native", + "amount": "9999.9999900" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/43989725060534273" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=43989725060534273-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=43989725060534273-3" + } + }, + "id": "0043989725060534273-0000000003", + "paging_token": "43989725060534273-3", + "account": "GANHAS5OMPLKD6VYU4LK7MBHSHB2Q37ZHAYWOBJRUXGDHMPJF3XNT45Y", + "type": "account_removed", + "type_i": 1, + "created_at": "2018-07-27T21:00:12Z" + } + ] + } +}` + +var assetsResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/assets?cursor=&limit=1&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/assets?cursor=ABC_GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU_credit_alphanum12&limit=1&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/assets?cursor=ABC_GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU_credit_alphanum12&limit=1&order=asc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "toml": { + "href": "" + } + }, + "asset_type": "credit_alphanum12", + "asset_code": "ABC", + "asset_issuer": "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + "paging_token": "1", + "amount": "105.0000000", + "num_accounts": 3, + "flags": { + "auth_required": true, + "auth_revocable": false, + "auth_immutable": false + } + } + ] + } +}` + +var ledgerResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/69859" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/ledgers/69859/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/ledgers/69859/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/ledgers/69859/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/ledgers/69859/effects{?cursor,limit,order}", + "templated": true + } + }, + "id": "71a40c0581d8d7c1158e1d9368024c5f9fd70de17a8d277cdd96781590cc10fb", + "paging_token": "300042120331264", + "hash": "71a40c0581d8d7c1158e1d9368024c5f9fd70de17a8d277cdd96781590cc10fb", + "prev_hash": "78979bed15463bfc3b0c1915acc6aec866565d360ba6565d26ffbb3dc484f18c", + "sequence": 69859, + "successful_transaction_count": 0, + "failed_transaction_count": 1, + "operation_count": 0, + "closed_at": "2019-03-03T13:38:16Z", + "total_coins": "100000000000.0000000", + "fee_pool": "10.7338093", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 5000000, + "max_tx_set_size": 100, + "protocol_version": 10, + "header_xdr": "AAAACniXm+0VRjv8OwwZFazGrshmVl02C6ZWXSb/uz3EhPGMLuFhI0sVqAG57WnGMUKmOUk/J8TAktUB97VgrgEsZuEAAAAAXHvYyAAAAAAAAAAAcvWzXsmT72oXZ7QPC1nZLJei+lFwYRXF4FIz/PQguubMDKGRJrT/0ofTHlZjWAMWjABeGgup7zhfZkm0xrthCAABEOMN4Lazp2QAAAAAAAAGZdltAAAAAAAAAAAABOqvAAAAZABMS0AAAABk4Vse3u3dDM9UWfoH9ooQLLSXYEee8xiHu/k9p6YLlWR2KT4hYGehoHGmp04rhMRMAEp+GHE+KXv0UUxAPmmNmwGYK2HFCnl5a931YmTQYrHQzEeCHx+aI4+TLjTlFjMqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" +}` + +var metricsResponse = `{ + "_links": { + "self": { + "href": "/metrics" + } + }, + "goroutines": { + "value": 1893 + }, + "history.elder_ledger": { + "value": 1 + }, + "history.latest_ledger": { + "value": 22826153 + }, + "history.open_connections": { + "value": 27 + }, + "ingest.ledger_graph_only_ingestion": { + "15m.rate": 0, + "1m.rate": 0, + "5m.rate": 0, + "75%": 0, + "95%": 0, + "99%": 0, + "99.9%": 0, + "count": 0, + "max": 0, + "mean": 0, + "mean.rate": 0, + "median": 0, + "min": 10, + "stddev": 0 + }, + "ingest.ledger_ingestion": { + "15m.rate": 4.292383845297832, + "1m.rate": 1.6828538278349856, + "5m.rate": 3.7401206537727854, + "75%": 0.0918039395, + "95%": 0.11669889484999994, + "99%": 0.143023258, + "99.9%": 0.143023258, + "count": 36, + "max": 0.143023258, + "mean": 0.074862138, + "mean.rate": 0.48723881363424204, + "median": 0.0706217925, + "min": 0.03396778, + "stddev": 0.023001478 + }, + "ingest.state_verify": { + "15m.rate": 0, + "1m.rate": 0, + "5m.rate": 0, + "75%": 0, + "95%": 0, + "99%": 0, + "99.9%": 230.123456, + "count": 0, + "max": 0, + "mean": 0, + "mean.rate": 0, + "median": 0, + "min": 0, + "stddev": 0 + }, + "logging.debug": { + "15m.rate": 0, + "1m.rate": 0, + "5m.rate": 0, + "count": 0, + "mean.rate": 0 + }, + "logging.error": { + "15m.rate": 0, + "1m.rate": 0, + "5m.rate": 0, + "count": 0, + "mean.rate": 0 + }, + "logging.info": { + "15m.rate": 232.85916859415772, + "1m.rate": 242.7785273104503, + "5m.rate": 237.74161591995696, + "count": 133049195, + "mean.rate": 227.30356525388274 + }, + "logging.panic": { + "15m.rate": 0, + "1m.rate": 0, + "5m.rate": 0, + "count": 0, + "mean.rate": 0 + }, + "logging.warning": { + "15m.rate": 0.00002864686194423444, + "1m.rate": 4.5629799451093754e-41, + "5m.rate": 3.714334583072108e-10, + "count": 6995, + "mean.rate": 0.011950421578867764 + }, + "requests.failed": { + "15m.rate": 46.27434280564861, + "1m.rate": 48.559342299629265, + "5m.rate": 47.132925275045295, + "count": 26002133, + "mean.rate": 44.42250383043155 + }, + "requests.succeeded": { + "15m.rate": 69.36681910982539, + "1m.rate": 72.38504433912904, + "5m.rate": 71.00293298710338, + "count": 39985482, + "mean.rate": 68.31190342961553 + }, + "requests.total": { + "15m.rate": 115.64116191547402, + "1m.rate": 120.94438663875829, + "5m.rate": 118.13585826214866, + "75%": 4628801.75, + "95%": 55000011530.4, + "99%": 55004856745.49, + "99.9%": 55023166974.193, + "count": 65987615, + "max": 55023405838, + "mean": 3513634813.836576, + "mean.rate": 112.73440653824905, + "median": 1937564.5, + "min": 20411, + "stddev": 13264750988.737148 + }, + "stellar_core.latest_ledger": { + "value": 22826156 + }, + "stellar_core.open_connections": { + "value": 94 + }, + "txsub.buffered": { + "value": 1 + }, + "txsub.failed": { + "15m.rate": 0.02479237361888591, + "1m.rate": 0.03262394685483348, + "5m.rate": 0.026600772194616953, + "count": 13977, + "mean.rate": 0.02387863835950965 + }, + "txsub.open": { + "value": 0 + }, + "txsub.succeeded": { + "15m.rate": 0.3684477520175787, + "1m.rate": 0.3620036969560598, + "5m.rate": 0.3669857018510689, + "count": 253242, + "mean.rate": 0.43264464015537746 + }, + "txsub.total": { + "15m.rate": 0.3932401256364647, + "1m.rate": 0.3946276438108932, + "5m.rate": 0.3935864740456858, + "75%": 30483683.25, + "95%": 88524119.3999999, + "99%": 320773244.6300014, + "99.9%": 1582447912.6680026, + "count": 267219, + "max": 1602906917, + "mean": 34469463.39785992, + "mean.rate": 0.45652327851684915, + "median": 18950996.5, + "min": 3156355, + "stddev": 79193338.90936844 + } +}` + +var feesResponse = `{ + "last_ledger": "22606298", + "last_ledger_base_fee": "100", + "ledger_capacity_usage": "0.97", + "max_fee": { + "min": "130", + "max": "8000", + "mode": "250", + "p10": "150", + "p20": "200", + "p30": "300", + "p40": "400", + "p50": "500", + "p60": "1000", + "p70": "2000", + "p80": "3000", + "p90": "4000", + "p95": "5000", + "p99": "8000" + }, + "fee_charged": { + "min": "100", + "max": "100", + "mode": "100", + "p10": "100", + "p20": "100", + "p30": "100", + "p40": "100", + "p50": "100", + "p60": "100", + "p70": "100", + "p80": "100", + "p90": "100", + "p95": "100", + "p99": "100" + } +}` + +var offersResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F/offers?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F/offers?cursor=432323&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F/offers?cursor=432323&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/432323" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F" + } + }, + "id": "432323", + "paging_token": "432323", + "seller": "GDOJCPYIB66RY4XNDLRRHQQXB27YLNNAGAYV5HMHEYNYY4KUNV5FDV2F", + "selling": { + "asset_type": "credit_alphanum4", + "asset_code": "ABC", + "asset_issuer": "GDP6QEA4A5CRNGUIGYHHFETDPNEESIZFW53RVISXGSALI7KXNUC4YBWD" + }, + "buying": { + "asset_type": "native" + }, + "amount": "1999979.8700000", + "price_r": { + "n": 100, + "d": 1 + }, + "price": "100.0000000", + "last_modified_ledger": 103307, + "last_modified_time": "2019-03-05T13:23:50Z" + } + ] + } +}` + +var offerResponse = ` +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/5635" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GD6UOZ3FGFI5L2X6F52YPJ6ICSW375BNBZIQC4PCLSEOO6SMX7CUS5MB" + } + }, + "id": "5635", + "paging_token": "5635", + "seller": "GD6UOZ3FGFI5L2X6F52YPJ6ICSW375BNBZIQC4PCLSEOO6SMX7CUS5MB", + "selling": { + "asset_type": "native" + }, + "buying": { + "asset_type": "credit_alphanum12", + "asset_code": "AstroDollar", + "asset_issuer": "GDA2EHKPDEWZTAL6B66FO77HMOZL3RHZTIJO7KJJK5RQYSDUXEYMPJYY" + }, + "amount": "100.0000000", + "price_r": { + "n": 10, + "d": 1 + }, + "price": "10.0000000", + "last_modified_ledger": 356183, + "last_modified_time": "2020-02-20T20:44:55Z" +} +` + +var multipleOpsResponse = `{ + "_links": { + "self": { + "href": "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=98447788659970049&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53/operations?cursor=98447788659970049&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon.stellar.org/operations/98447788659970049" + }, + "transaction": { + "href": "https://horizon.stellar.org/transactions/b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53" + }, + "effects": { + "href": "https://horizon.stellar.org/operations/98447788659970049/effects" + }, + "succeeds": { + "href": "https://horizon.stellar.org/effects?order=desc&cursor=98447788659970049" + }, + "precedes": { + "href": "https://horizon.stellar.org/effects?order=asc&cursor=98447788659970049" + } + }, + "id": "98447788659970049", + "paging_token": "98447788659970049", + "transaction_successful": true, + "source_account": "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + "type": "payment", + "type_i": 1, + "created_at": "2019-03-14T09:44:26Z", + "transaction_hash": "b63307ef92bb253df13361a72095156d19fc0713798bc2e6c3bd9ee63cc3ca53", + "asset_type": "credit_alphanum4", + "asset_code": "DRA", + "asset_issuer": "GCJKSAQECBGSLPQWAU7ME4LVQVZ6IDCNUA5NVTPPCUWZWBN5UBFMXZ53", + "from": "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + "to": "GDTCW47BX2ELQ76KAZIA5Z6V4IEHUUD44ABJ66JTRZRMINEJY3OUCNEO", + "amount": "1.1200000" + }, + { + "_links": { + "self": { + "href": "https://horizon.stellar.org/operations/98448467264811009" + }, + "transaction": { + "href": "https://horizon.stellar.org/transactions/af68055329e570bf461f384e2cd40db023be32f1c38a756ba2db08b6baf66148" + }, + "effects": { + "href": "https://horizon.stellar.org/operations/98448467264811009/effects" + }, + "succeeds": { + "href": "https://horizon.stellar.org/effects?order=desc&cursor=98448467264811009" + }, + "precedes": { + "href": "https://horizon.stellar.org/effects?order=asc&cursor=98448467264811009" + } + }, + "id": "98448467264811009", + "paging_token": "98448467264811009", + "transaction_successful": true, + "source_account": "GDD7ABRF7BCK76W33RXDQG5Q3WXVSQYVLGEMXSOWRGZ6Z3G3M2EM2TCP", + "type": "manage_offer", + "type_i": 3, + "created_at": "2019-03-14T09:58:33Z", + "transaction_hash": "af68055329e570bf461f384e2cd40db023be32f1c38a756ba2db08b6baf66148", + "amount": "7775.0657728", + "price": "3.0058511", + "price_r": { + "n": 30058511, + "d": 10000000 + }, + "buying_asset_type": "native", + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "XRP", + "selling_asset_issuer": "GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5", + "offer_id": "73938565" + }, + { + "_links": { + "self": { + "href": "http://horizon-mon.stellar-ops.com/operations/98455906148208641" + }, + "transaction": { + "href": "http://horizon-mon.stellar-ops.com/transactions/ade3c60f1b581e8744596673d95bffbdb8f68f199e0e2f7d63b7c3af9fd8d868" + }, + "effects": { + "href": "http://horizon-mon.stellar-ops.com/operations/98455906148208641/effects" + }, + "succeeds": { + "href": "http://horizon-mon.stellar-ops.com/effects?order=desc\u0026cursor=98455906148208641" + }, + "precedes": { + "href": "http://horizon-mon.stellar-ops.com/effects?order=asc\u0026cursor=98455906148208641" + } + }, + "id": "98455906148208641", + "paging_token": "98455906148208641", + "transaction_successful": true, + "source_account": "GD7C4MQJDM3AHXKO2Z2OF7BK3FYL6QMNBGVEO4H2DHM65B7JMHD2IU2E", + "type": "create_account", + "type_i": 0, + "created_at": "2019-03-14T12:30:40Z", + "transaction_hash": "ade3c60f1b581e8744596673d95bffbdb8f68f199e0e2f7d63b7c3af9fd8d868", + "starting_balance": "2.0000000", + "funder": "GD7C4MQJDM3AHXKO2Z2OF7BK3FYL6QMNBGVEO4H2DHM65B7JMHD2IU2E", + "account": "GD6LCN37TNJZW3JF2R7N5EYGQGVWRPMSGQHR6RZD4X4NATEQLP7RFAMA" + } + ] + } +}` + +var opsResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/1103965508866049" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/93c2755ec61c8b01ac11daa4d8d7a012f56be172bdfcaf77a6efd683319ca96d" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/1103965508866049/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=1103965508866049" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=1103965508866049" + } + }, + "id": "1103965508866049", + "paging_token": "1103965508866049", + "transaction_successful": true, + "source_account": "GBMVGXJXJ7ZBHIWMXHKR6IVPDTYKHJPXC2DHZDPJBEZWZYAC7NKII7IB", + "type": "change_trust", + "type_i": 6, + "created_at": "2019-03-14T15:58:57Z", + "transaction_hash": "93c2755ec61c8b01ac11daa4d8d7a012f56be172bdfcaf77a6efd683319ca96d", + "asset_type": "credit_alphanum4", + "asset_code": "UAHd", + "asset_issuer": "GDDETPGV4OJVNBTB6GQICCPGH5DZRYYB7XQCSAZO2ZQH6HO7SWXHKKJN", + "limit": "922337203685.4775807", + "trustee": "GDDETPGV4OJVNBTB6GQICCPGH5DZRYYB7XQCSAZO2ZQH6HO7SWXHKKJN", + "trustor": "GBMVGXJXJ7ZBHIWMXHKR6IVPDTYKHJPXC2DHZDPJBEZWZYAC7NKII7IB" +}` + +var txSuccess = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/438134" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1881771201282048" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1881771201282048" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca" + } + }, + "id": "bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca", + "hash": "bcc7a97264dca0a51a63f7ea971b5e7458e334489673078bb2a34eb0cce910ca", + "ledger": 354811, + "successful": true, + "created_at": "2019-03-25T10:27:53Z", + "source_account": "GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD", + "source_account_sequence": "1881766906298369", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "signatures": [ + "kOZumR7L/Pxnf2kSdhDC7qyTMRcp0+ymw+dU+4A/dRqqf387ER4pUhqFUsOc7ZrSW9iz+6N20G4mcp0IiT5fAg==" + ], + "envelope_xdr": "AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AAuV/AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAyTBGxOgfSApppsTnb/YRr6gOR8WT0LZNrhLh4y3FCgoAAAAXSHboAAAAAAAAAAABhlbgnAAAAEAivKe977CQCxMOKTuj+cWTFqc2OOJU8qGr9afrgu2zDmQaX5Q0cNshc3PiBwe0qw/+D/qJk5QqM5dYeSUGeDQP", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV+AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAVp+wAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwACBP/TuycHAAABD0AAuV/AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE/9O7JwcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABWn7AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAIE+gGdbQcAAAEPQAC5X8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABWn7AAAAAAAAAADJMEbE6B9ICmmmxOdv9hGvqA5HxZPQtk2uEuHjLcUKCgAAABdIdugAAAVp+wAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==" +}` + +var transactionFailure = `{ + "type": "https://stellar.org/horizon-errors/transaction_failed", + "title": "Transaction Failed", + "status": 400, + "detail": "The transaction failed when submitted to the stellar network. The extras.result_codes field on this response contains further details. Descriptions of each code can be found at: https://developers.stellar.org/docs/start/list-of-operations/", + "instance": "horizon-testnet-001.prd.stellar001.internal.stellar-ops.com/4elYz2fHhC-528285", + "extras": { + "envelope_xdr": "AAAAAKpmDL6Z4hvZmkTBkYpHftan4ogzTaO4XTB7joLgQnYYAAAAZAAAAAAABeoyAAAAAAAAAAEAAAAAAAAAAQAAAAAAAAABAAAAAD3sEVVGZGi/NoC3ta/8f/YZKMzyi9ZJpOi0H47x7IqYAAAAAAAAAAAF9eEAAAAAAAAAAAA=", + "result_codes": { + "transaction": "tx_no_source_account" + }, + "result_xdr": "AAAAAAAAAAD////4AAAAAA==" + } +}` + +var txPageResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=&limit=10&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1881762611335168&limit=10&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1881771201286144&limit=10&order=asc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/438134" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1881771201286144" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1881771201286144" + } + }, + "id": "3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599", + "paging_token": "1881771201286144", + "successful": true, + "hash": "3274f131af56ecb6d8668acf6eb0b31b5f8faeca785cbce0a911a5a81308a599", + "ledger": 438134, + "created_at": "2019-03-25T10:27:53Z", + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "source_account_sequence": "4660039787356", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0ABCNcAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAzvbakxhsAWYE0gRDf2pfXaYUCnH8vEwyQiNOJYLmNRIAAAAXSHboAAAAAAAAAAABhlbgnAAAAEBw2qecm0C4q7xi8+43NjuExfspCtA1ki2Jq2lWuNSLArJ0qcOhz/HnszFppaCBHkFf/37557MbF4NbFZXlVv4P", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAavdgAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwAHtF2q1bgcAAABD0ABCNbAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAavdgAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwAHtF2q1bgcAAABD0ABCNcAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMABq92AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAe0XarVuBwAAAEPQAEI1wAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAe0V9i3/hwAAAEPQAEI1wAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABq92AAAAAAAAAADO9tqTGGwBZgTSBEN/al9dphQKcfy8TDJCI04lguY1EgAAABdIdugAAAavdgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMABq92AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAe0Y3zzcjUAAAEPQAEI1oAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAe0Y3zzchwAAAEPQAEI1oAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "none", + "signatures": [ + "cNqnnJtAuKu8YvPuNzY7hMX7KQrQNZItiatpVrjUiwKydKnDoc/x57MxaaWggR5BX/9++eezGxeDWxWV5Vb+Dw==" + ] + }, + { + "memo": "2A1V6J5703G47XHY", + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/438134" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1881771201282048" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1881771201282048" + } + }, + "id": "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c", + "paging_token": "1881771201282048", + "successful": true, + "hash": "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c", + "ledger": 438134, + "created_at": "2019-03-25T10:27:53Z", + "source_account": "GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD", + "source_account_sequence": "1881766906298369", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAALaGK0GR25zywBbBeGAfPCeVUoNP6YkDR5tEi/ZdB1tRAAAAZAAGr3UAAAABAAAAAAAAAAEAAAAQMkExVjZKNTcwM0c0N1hIWQAAAAEAAAABAAAAALaGK0GR25zywBbBeGAfPCeVUoNP6YkDR5tEi/ZdB1tRAAAAAQAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAAAAAACBo93AAAAAAAAAAABXQdbUQAAAECQ5m6ZHsv8/Gd/aRJ2EMLurJMxFynT7KbD51T7gD91Gqp/fzsRHilSGoVSw5ztmtJb2LP7o3bQbiZynQiJPl8C", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAavdgAAAAAAAAAAtoYrQZHbnPLAFsF4YB88J5VSg0/piQNHm0SL9l0HW1EAAAAXSHbnnAAGr3UAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAavdgAAAAAAAAAAtoYrQZHbnPLAFsF4YB88J5VSg0/piQNHm0SL9l0HW1EAAAAXSHbnnAAGr3UAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMABq9zAAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAUQ/z+cAABeBgAASuQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAcXjracAABeBgAASuQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIduecAAavdQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABVB53CcAAavdQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMABq91AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIdugAAAavdQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIduecAAavdQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "signatures": [ + "kOZumR7L/Pxnf2kSdhDC7qyTMRcp0+ymw+dU+4A/dRqqf387ER4pUhqFUsOc7ZrSW9iz+6N20G4mcp0IiT5fAg==" + ] + } + ] + } +}` + +var txDetailResponse = `{ + "memo": "2A1V6J5703G47XHY", + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/438134" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1881771201282048" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1881771201282048" + } + }, + "id": "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c", + "paging_token": "1881771201282048", + "successful": true, + "hash": "5131aed266a639a6eb4802a92fba310454e711ded830ed899745b9e777d7110c", + "ledger": 438134, + "created_at": "2019-03-25T10:27:53Z", + "source_account": "GC3IMK2BSHNZZ4WAC3AXQYA7HQTZKUUDJ7UYSA2HTNCIX5S5A5NVD3FD", + "source_account_sequence": "1881766906298369", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAALaGK0GR25zywBbBeGAfPCeVUoNP6YkDR5tEi/ZdB1tRAAAAZAAGr3UAAAABAAAAAAAAAAEAAAAQMkExVjZKNTcwM0c0N1hIWQAAAAEAAAABAAAAALaGK0GR25zywBbBeGAfPCeVUoNP6YkDR5tEi/ZdB1tRAAAAAQAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAAAAAACBo93AAAAAAAAAAABXQdbUQAAAECQ5m6ZHsv8/Gd/aRJ2EMLurJMxFynT7KbD51T7gD91Gqp/fzsRHilSGoVSw5ztmtJb2LP7o3bQbiZynQiJPl8C", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAavdgAAAAAAAAAAtoYrQZHbnPLAFsF4YB88J5VSg0/piQNHm0SL9l0HW1EAAAAXSHbnnAAGr3UAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAavdgAAAAAAAAAAtoYrQZHbnPLAFsF4YB88J5VSg0/piQNHm0SL9l0HW1EAAAAXSHbnnAAGr3UAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMABq9zAAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAUQ/z+cAABeBgAASuQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAcXjracAABeBgAASuQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIduecAAavdQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABVB53CcAAavdQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMABq91AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIdugAAAavdQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABq92AAAAAAAAAAC2hitBkduc8sAWwXhgHzwnlVKDT+mJA0ebRIv2XQdbUQAAABdIduecAAavdQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "signatures": [ + "kOZumR7L/Pxnf2kSdhDC7qyTMRcp0+ymw+dU+4A/dRqqf387ER4pUhqFUsOc7ZrSW9iz+6N20G4mcp0IiT5fAg==" + ] +}` + +var orderbookResponse = `{ + "bids": [ + { + "price_r": { + "n": 48904, + "d": 1949839975 + }, + "price": "0.0000251", + "amount": "0.0841405" + }, + { + "price_r": { + "n": 273, + "d": 10917280 + }, + "price": "0.0000250", + "amount": "0.0005749" + } + ], + "asks": [ + { + "price_r": { + "n": 2, + "d": 78125 + }, + "price": "0.0000256", + "amount": "3354.7460938" + }, + { + "price_r": { + "n": 10178, + "d": 394234000 + }, + "price": "0.0000258", + "amount": "1.7314070" + } + ], + "base": { + "asset_type": "native" + }, + "counter": { + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5" + } +}` + +var orderBookNotFound = `{ + "type": "https://stellar.org/horizon-errors/invalid_order_book", + "title": "Invalid Order Book Parameters", + "status": 400, + "detail": "The parameters that specify what order book to view are invalid in some way. Please ensure that your type parameters (selling_asset_type and buying_asset_type) are one the following valid values: native, credit_alphanum4, credit_alphanum12. Also ensure that you have specified selling_asset_code and selling_asset_issuer if selling_asset_type is not 'native', as well as buying_asset_code and buying_asset_issuer if buying_asset_type is not 'native'" +}` + +var paymentsResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/payments?cursor=&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/payments?cursor=2024660468248577&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/payments?cursor=2024660468256769&limit=2&order=asc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2024660468256769" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/a0207513c372146bae8cdb299975047216cb1ffb393074b2015b39496e8767c2" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2024660468256769/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2024660468256769" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2024660468256769" + } + }, + "id": "2024660468256769", + "paging_token": "2024660468256769", + "transaction_successful": true, + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "create_account", + "type_i": 0, + "created_at": "2019-03-27T09:55:41Z", + "transaction_hash": "a0207513c372146bae8cdb299975047216cb1ffb393074b2015b39496e8767c2", + "starting_balance": "10000.0000000", + "funder": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "account": "GB4OHVQE7OZH4HLCHFNR7OHDMZVNKOJT3RCRAXRNGGCNUHFRVGUGKW36" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2024660468248577" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/87d7a29539e7902b14a6c720094856f74a77128ab332d8629432c5a176a9fe7b" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2024660468248577/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2024660468248577" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2024660468248577" + } + }, + "id": "2024660468248577", + "paging_token": "2024660468248577", + "transaction_successful": true, + "source_account": "GAL6CXEVI3Y4O4J3FIX3KCRF7HSUG5RW2IRQRUUFC6XHZOLNV3NU35TL", + "type": "payment", + "type_i": 1, + "created_at": "2019-03-27T09:55:41Z", + "transaction_hash": "87d7a29539e7902b14a6c720094856f74a77128ab332d8629432c5a176a9fe7b", + "asset_type": "native", + "from": "GAL6CXEVI3Y4O4J3FIX3KCRF7HSUG5RW2IRQRUUFC6XHZOLNV3NU35TL", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "177.0000000" + } + ] + } +}` diff --git a/clients/horizonclient/mocks.go b/clients/horizonclient/mocks.go new file mode 100644 index 0000000000..b65e9739a9 --- /dev/null +++ b/clients/horizonclient/mocks.go @@ -0,0 +1,353 @@ +package horizonclient + +import ( + "context" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/mock" +) + +// MockClient is a mockable horizon client. +type MockClient struct { + mock.Mock +} + +// Accounts is a mocking method +func (m *MockClient) Accounts(request AccountsRequest) (hProtocol.AccountsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.AccountsPage), a.Error(1) +} + +// AccountDetail is a mocking method +func (m *MockClient) AccountDetail(request AccountRequest) (hProtocol.Account, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.Account), a.Error(1) +} + +// AccountData is a mocking method +func (m *MockClient) AccountData(request AccountRequest) (hProtocol.AccountData, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.AccountData), a.Error(1) +} + +// Effects is a mocking method +func (m *MockClient) Effects(request EffectRequest) (effects.EffectsPage, error) { + a := m.Called(request) + return a.Get(0).(effects.EffectsPage), a.Error(1) +} + +// Assets is a mocking method +func (m *MockClient) Assets(request AssetRequest) (hProtocol.AssetsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.AssetsPage), a.Error(1) +} + +// Ledgers is a mocking method +func (m *MockClient) Ledgers(request LedgerRequest) (hProtocol.LedgersPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.LedgersPage), a.Error(1) +} + +// LedgerDetail is a mocking method +func (m *MockClient) LedgerDetail(sequence uint32) (hProtocol.Ledger, error) { + a := m.Called(sequence) + return a.Get(0).(hProtocol.Ledger), a.Error(1) +} + +// FeeStats is a mocking method +func (m *MockClient) FeeStats() (hProtocol.FeeStats, error) { + a := m.Called() + return a.Get(0).(hProtocol.FeeStats), a.Error(1) +} + +// Offers is a mocking method +func (m *MockClient) Offers(request OfferRequest) (hProtocol.OffersPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.OffersPage), a.Error(1) +} + +// OfferDetail is a mocking method +func (m *MockClient) OfferDetails(offerID string) (hProtocol.Offer, error) { + a := m.Called(offerID) + return a.Get(0).(hProtocol.Offer), a.Error(1) +} + +// Operations is a mocking method +func (m *MockClient) Operations(request OperationRequest) (operations.OperationsPage, error) { + a := m.Called(request) + return a.Get(0).(operations.OperationsPage), a.Error(1) +} + +// OperationDetail is a mocking method +func (m *MockClient) OperationDetail(id string) (operations.Operation, error) { + a := m.Called(id) + return a.Get(0).(operations.Operation), a.Error(1) +} + +// SubmitTransactionXDR is a mocking method +func (m *MockClient) SubmitTransactionXDR(transactionXdr string) (hProtocol.Transaction, error) { + a := m.Called(transactionXdr) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// SubmitFeeBumpTransaction is a mocking method +func (m *MockClient) SubmitFeeBumpTransaction(transaction *txnbuild.FeeBumpTransaction) (hProtocol.Transaction, error) { + a := m.Called(transaction) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// SubmitTransaction is a mocking method +func (m *MockClient) SubmitTransaction(transaction *txnbuild.Transaction) (hProtocol.Transaction, error) { + a := m.Called(transaction) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// SubmitFeeBumpTransactionWithOptions is a mocking method +func (m *MockClient) SubmitFeeBumpTransactionWithOptions(transaction *txnbuild.FeeBumpTransaction, opts SubmitTxOpts) (hProtocol.Transaction, error) { + a := m.Called(transaction, opts) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// SubmitTransactionWithOptions is a mocking method +func (m *MockClient) SubmitTransactionWithOptions(transaction *txnbuild.Transaction, opts SubmitTxOpts) (hProtocol.Transaction, error) { + a := m.Called(transaction, opts) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// Transactions is a mocking method +func (m *MockClient) Transactions(request TransactionRequest) (hProtocol.TransactionsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.TransactionsPage), a.Error(1) +} + +// TransactionDetail is a mocking method +func (m *MockClient) TransactionDetail(txHash string) (hProtocol.Transaction, error) { + a := m.Called(txHash) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// OrderBook is a mocking method +func (m *MockClient) OrderBook(request OrderBookRequest) (hProtocol.OrderBookSummary, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.OrderBookSummary), a.Error(1) +} + +// Paths is a mocking method +func (m *MockClient) Paths(request PathsRequest) (hProtocol.PathsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.PathsPage), a.Error(1) +} + +// Payments is a mocking method +func (m *MockClient) Payments(request OperationRequest) (operations.OperationsPage, error) { + a := m.Called(request) + return a.Get(0).(operations.OperationsPage), a.Error(1) +} + +// TradeAggregations is a mocking method +func (m *MockClient) TradeAggregations(request TradeAggregationRequest) (hProtocol.TradeAggregationsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.TradeAggregationsPage), a.Error(1) +} + +// Trades is a mocking method +func (m *MockClient) Trades(request TradeRequest) (hProtocol.TradesPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.TradesPage), a.Error(1) +} + +// Fund is a mocking method +func (m *MockClient) Fund(addr string) (hProtocol.Transaction, error) { + a := m.Called(addr) + return a.Get(0).(hProtocol.Transaction), a.Error(1) +} + +// StreamTransactions is a mocking method +func (m *MockClient) StreamTransactions(ctx context.Context, request TransactionRequest, handler TransactionHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamTrades is a mocking method +func (m *MockClient) StreamTrades(ctx context.Context, request TradeRequest, handler TradeHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamEffects is a mocking method +func (m *MockClient) StreamEffects(ctx context.Context, request EffectRequest, handler EffectHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamOperations is a mocking method +func (m *MockClient) StreamOperations(ctx context.Context, request OperationRequest, handler OperationHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamPayments is a mocking method +func (m *MockClient) StreamPayments(ctx context.Context, request OperationRequest, handler OperationHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamOffers is a mocking method +func (m *MockClient) StreamOffers(ctx context.Context, request OfferRequest, handler OfferHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamLedgers is a mocking method +func (m *MockClient) StreamLedgers(ctx context.Context, request LedgerRequest, handler LedgerHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// StreamOrderBooks is a mocking method +func (m *MockClient) StreamOrderBooks(ctx context.Context, request OrderBookRequest, handler OrderBookHandler) error { + return m.Called(ctx, request, handler).Error(0) +} + +// Root is a mocking method +func (m *MockClient) Root() (hProtocol.Root, error) { + a := m.Called() + return a.Get(0).(hProtocol.Root), a.Error(1) +} + +// NextAccountsPage is a mocking method +func (m *MockClient) NextAccountsPage(page hProtocol.AccountsPage) (hProtocol.AccountsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.AccountsPage), a.Error(1) +} + +// NextAssetsPage is a mocking method +func (m *MockClient) NextAssetsPage(page hProtocol.AssetsPage) (hProtocol.AssetsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.AssetsPage), a.Error(1) +} + +// PrevAssetsPage is a mocking method +func (m *MockClient) PrevAssetsPage(page hProtocol.AssetsPage) (hProtocol.AssetsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.AssetsPage), a.Error(1) +} + +// NextLedgersPage is a mocking method +func (m *MockClient) NextLedgersPage(page hProtocol.LedgersPage) (hProtocol.LedgersPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.LedgersPage), a.Error(1) +} + +// PrevLedgersPage is a mocking method +func (m *MockClient) PrevLedgersPage(page hProtocol.LedgersPage) (hProtocol.LedgersPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.LedgersPage), a.Error(1) +} + +// NextEffectsPage is a mocking method +func (m *MockClient) NextEffectsPage(page effects.EffectsPage) (effects.EffectsPage, error) { + a := m.Called(page) + return a.Get(0).(effects.EffectsPage), a.Error(1) +} + +// PrevEffectsPage is a mocking method +func (m *MockClient) PrevEffectsPage(page effects.EffectsPage) (effects.EffectsPage, error) { + a := m.Called(page) + return a.Get(0).(effects.EffectsPage), a.Error(1) +} + +// NextTransactionsPage is a mocking method +func (m *MockClient) NextTransactionsPage(page hProtocol.TransactionsPage) (hProtocol.TransactionsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TransactionsPage), a.Error(1) +} + +// PrevTransactionsPage is a mocking method +func (m *MockClient) PrevTransactionsPage(page hProtocol.TransactionsPage) (hProtocol.TransactionsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TransactionsPage), a.Error(1) +} + +// NextOperationsPage is a mocking method +func (m *MockClient) NextOperationsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + a := m.Called(page) + return a.Get(0).(operations.OperationsPage), a.Error(1) +} + +// PrevOperationsPage is a mocking method +func (m *MockClient) PrevOperationsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + a := m.Called(page) + return a.Get(0).(operations.OperationsPage), a.Error(1) +} + +// NextPaymentsPage is a mocking method +func (m *MockClient) NextPaymentsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + return m.NextOperationsPage(page) +} + +// PrevPaymentsPage is a mocking method +func (m *MockClient) PrevPaymentsPage(page operations.OperationsPage) (operations.OperationsPage, error) { + return m.PrevOperationsPage(page) +} + +// NextOffersPage is a mocking method +func (m *MockClient) NextOffersPage(page hProtocol.OffersPage) (hProtocol.OffersPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.OffersPage), a.Error(1) +} + +// PrevOffersPage is a mocking method +func (m *MockClient) PrevOffersPage(page hProtocol.OffersPage) (hProtocol.OffersPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.OffersPage), a.Error(1) +} + +// NextTradesPage is a mocking method +func (m *MockClient) NextTradesPage(page hProtocol.TradesPage) (hProtocol.TradesPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TradesPage), a.Error(1) +} + +// PrevTradesPage is a mocking method +func (m *MockClient) PrevTradesPage(page hProtocol.TradesPage) (hProtocol.TradesPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TradesPage), a.Error(1) +} + +// HomeDomainForAccount is a mocking method +func (m *MockClient) HomeDomainForAccount(aid string) (string, error) { + a := m.Called(aid) + return a.Get(0).(string), a.Error(1) +} + +// NextTradeAggregationsPage is a mocking method +func (m *MockClient) NextTradeAggregationsPage(page hProtocol.TradeAggregationsPage) (hProtocol.TradeAggregationsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TradeAggregationsPage), a.Error(1) +} + +// PrevTradeAggregationsPage is a mocking method +func (m *MockClient) PrevTradeAggregationsPage(page hProtocol.TradeAggregationsPage) (hProtocol.TradeAggregationsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.TradeAggregationsPage), a.Error(1) +} + +func (m *MockClient) LiquidityPoolDetail(request LiquidityPoolRequest) (hProtocol.LiquidityPool, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.LiquidityPool), a.Error(1) +} + +func (m *MockClient) LiquidityPools(request LiquidityPoolsRequest) (hProtocol.LiquidityPoolsPage, error) { + a := m.Called(request) + return a.Get(0).(hProtocol.LiquidityPoolsPage), a.Error(1) +} + +func (m *MockClient) NextLiquidityPoolsPage(page hProtocol.LiquidityPoolsPage) (hProtocol.LiquidityPoolsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.LiquidityPoolsPage), a.Error(1) +} + +func (m *MockClient) PrevLiquidityPoolsPage(page hProtocol.LiquidityPoolsPage) (hProtocol.LiquidityPoolsPage, error) { + a := m.Called(page) + return a.Get(0).(hProtocol.LiquidityPoolsPage), a.Error(1) +} + +// ensure that the MockClient implements ClientInterface +var _ ClientInterface = &MockClient{} diff --git a/clients/horizonclient/offer_request.go b/clients/horizonclient/offer_request.go new file mode 100644 index 0000000000..510576d0e8 --- /dev/null +++ b/clients/horizonclient/offer_request.go @@ -0,0 +1,87 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the OfferRequest struct. +func (or OfferRequest) BuildURL() (endpoint string, err error) { + if len(or.OfferID) > 0 { + endpoint = fmt.Sprintf("offers/%s", or.OfferID) + } else { + // backwards compatibility support + if len(or.ForAccount) > 0 { + endpoint = fmt.Sprintf("accounts/%s/offers", or.ForAccount) + queryParams := addQueryParams(cursor(or.Cursor), limit(or.Limit), or.Order) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + } else { + query := url.Values{} + if len(or.Seller) > 0 { + query.Add("seller", or.Seller) + } + if len(or.Selling) > 0 { + query.Add("selling", or.Selling) + } + if len(or.Buying) > 0 { + query.Add("buying", or.Buying) + } + + endpoint = fmt.Sprintf("offers?%s", query.Encode()) + pageParams := addQueryParams(cursor(or.Cursor), limit(or.Limit), or.Order) + if pageParams != "" { + endpoint = fmt.Sprintf("%s&%s", endpoint, pageParams) + } + } + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the offers endpoint +func (or OfferRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := or.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// OfferHandler is a function that is called when a new offer is received +type OfferHandler func(hProtocol.Offer) + +// StreamOffers streams offers processed by the Stellar network for an account. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// OfferHandler is a user-supplied function that is executed for each streamed offer received. +func (or OfferRequest) StreamOffers(ctx context.Context, client *Client, handler OfferHandler) (err error) { + endpoint, err := or.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint for offers request") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + + return client.stream(ctx, url, func(data []byte) error { + var offer hProtocol.Offer + err = json.Unmarshal(data, &offer) + if err != nil { + return errors.Wrap(err, "error unmarshaling data for offers request") + } + handler(offer) + return nil + }) +} diff --git a/clients/horizonclient/offer_request_test.go b/clients/horizonclient/offer_request_test.go new file mode 100644 index 0000000000..fcf303ed45 --- /dev/null +++ b/clients/horizonclient/offer_request_test.go @@ -0,0 +1,274 @@ +package horizonclient + +import ( + "context" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOfferRequestBuildUrl(t *testing.T) { + + er := OfferRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err := er.BuildURL() + + // It should return valid offers endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/offers", endpoint) + + er = OfferRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", Cursor: "now", Order: OrderDesc} + endpoint, err = er.BuildURL() + + // It should return valid offers endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/offers?cursor=now&order=desc", endpoint) + + er = OfferRequest{OfferID: "12345"} + endpoint, err = er.BuildURL() + + require.NoError(t, err) + assert.Equal(t, "offers/12345", endpoint) +} + +func TestNextOffersPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + offerRequest := OfferRequest{ForAccount: "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", Limit: 2} + + hmock.On( + "GET", + "https://localhost/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?limit=2", + ).ReturnString(200, firstOffersPage) + + offers, err := client.Offers(offerRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(offers.Embedded.Records), 2) + } + + assert.Equal(t, int64(2946580), offers.Embedded.Records[0].ID) + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946581&limit=2&order=asc", + ).ReturnString(200, emptyOffersPage) + + nextPage, err := client.NextOffersPage(offers) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +func TestOfferRequestStreamOffers(t *testing.T) { + + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // offers for account + orRequest := OfferRequest{ForAccount: "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C"} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C/offers?cursor=now", + ).ReturnString(200, offerStreamResponse) + + offers := make([]hProtocol.Offer, 1) + err := client.StreamOffers(ctx, orRequest, func(offer hProtocol.Offer) { + offers[0] = offer + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, offers[0].Amount, "20.4266087") + assert.Equal(t, offers[0].Seller, "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C") + } + + // test error + orRequest = OfferRequest{ForAccount: "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C/offers?cursor=now", + ).ReturnString(500, offerStreamResponse) + + offers = make([]hProtocol.Offer, 1) + err = client.StreamOffers(ctx, orRequest, func(offer hProtocol.Offer) { + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +func TestStringOfferID(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + offerRequest := OfferRequest{ForAccount: "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", Limit: 1} + + hmock.On( + "GET", + "https://localhost/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?limit=1", + ).ReturnString(200, stringOffersPage) + + offers, err := client.Offers(offerRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(offers.Embedded.Records), 1) + } + + assert.Equal(t, int64(2946580), offers.Embedded.Records[0].ID) +} + +var offerStreamResponse = `data: {"_links":{"self":{"href":"https://horizon-testnet.stellar.org/offers/5269100"},"offer_maker":{"href":"https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C"}},"id":"5269100","paging_token":"5269100","seller":"GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C","selling":{"asset_type":"credit_alphanum4","asset_code":"DSQ","asset_issuer":"GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF"},"buying":{"asset_type":"credit_alphanum4","asset_code":"XCS6","asset_issuer":"GBH2V47NOZRC56QAYCPV5JUBG5NVFJQF5AQTUNFNWNDHSWWTKH2MWR2L"},"amount":"20.4266087","price_r":{"n":24819,"d":10000000},"price":"0.0024819","last_modified_ledger":674449,"last_modified_time":"2019-04-08T11:56:41Z"} +` +var emptyOffersPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946581&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946583&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946582&limit=2&order=desc" + } + }, + "_embedded": { + "records": [] + } +}` + +var firstOffersPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946581&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/offers?cursor=2946580&limit=2&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/2946580" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG" + } + }, + "id": "2946580", + "paging_token": "2946580", + "seller": "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", + "selling": { + "asset_type": "credit_alphanum4", + "asset_code": "HT", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "amount": "33.7252478", + "price_r": { + "n": 15477, + "d": 43975000 + }, + "price": "0.0003519", + "last_modified_ledger": 363492, + "last_modified_time": "2019-05-16T08:35:22Z" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/2946581" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG" + } + }, + "id": "2946581", + "paging_token": "2946581", + "seller": "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", + "selling": { + "asset_type": "credit_alphanum4", + "asset_code": "HT", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "amount": "20.0242956", + "price_r": { + "n": 3157, + "d": 8795000 + }, + "price": "0.0003590", + "last_modified_ledger": 363492, + "last_modified_time": "2019-05-16T08:35:22Z" + } + ] + } +}` + +var stringOffersPage = `{ + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/2946580" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG" + } + }, + "id": "2946580", + "paging_token": "2946580", + "seller": "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", + "selling": { + "asset_type": "credit_alphanum4", + "asset_code": "HT", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P" + }, + "amount": "33.7252478", + "price_r": { + "n": 15477, + "d": 43975000 + }, + "price": "0.0003519", + "last_modified_ledger": 363492, + "last_modified_time": "2019-05-16T08:35:22Z" + } + ] + } +}` diff --git a/clients/horizonclient/operation_request.go b/clients/horizonclient/operation_request.go new file mode 100644 index 0000000000..cc1a4ed372 --- /dev/null +++ b/clients/horizonclient/operation_request.go @@ -0,0 +1,121 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the OperationRequest struct. +// If no data is set, it defaults to the build the URL for all operations or all payments; depending on thevalue of `op.endpoint` +func (op OperationRequest) BuildURL() (endpoint string, err error) { + nParams := countParams(op.ForAccount, op.ForLedger, op.ForLiquidityPool, op.forOperationID, op.ForTransaction) + + if nParams > 1 { + return endpoint, errors.New("invalid request: too many parameters") + } + + if op.endpoint == "" { + return endpoint, errors.New("internal error, endpoint not set") + } + + endpoint = op.endpoint + if op.ForAccount != "" { + endpoint = fmt.Sprintf("accounts/%s/%s", op.ForAccount, op.endpoint) + } + if op.ForClaimableBalance != "" { + endpoint = fmt.Sprintf("claimable_balances/%s/%s", op.ForClaimableBalance, op.endpoint) + } + if op.ForLedger > 0 { + endpoint = fmt.Sprintf("ledgers/%d/%s", op.ForLedger, op.endpoint) + } + if op.ForLiquidityPool != "" { + endpoint = fmt.Sprintf("liquidity_pools/%s/%s", op.ForLiquidityPool, op.endpoint) + } + if op.forOperationID != "" { + endpoint = fmt.Sprintf("operations/%s", op.forOperationID) + } + if op.ForTransaction != "" { + endpoint = fmt.Sprintf("transactions/%s/%s", op.ForTransaction, op.endpoint) + } + + queryParams := addQueryParams(cursor(op.Cursor), limit(op.Limit), op.Order, + includeFailed(op.IncludeFailed), join(op.Join)) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the operations endpoint +func (op OperationRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := op.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// setEndpoint sets the endpoint for the OperationRequest +func (op *OperationRequest) setEndpoint(endpoint string) *OperationRequest { + if endpoint == "payments" { + op.endpoint = endpoint + } else { + // default to operations + op.endpoint = "operations" + } + return op +} + +// SetPaymentsEndpoint is a helper function that sets the `endpoint` for OperationRequests to `payments` +func (op *OperationRequest) SetPaymentsEndpoint() *OperationRequest { + return op.setEndpoint("payments") +} + +// SetOperationsEndpoint is a helper function that sets the `endpoint` for OperationRequests to `operations` +func (op *OperationRequest) SetOperationsEndpoint() *OperationRequest { + return op.setEndpoint("operations") +} + +// OperationHandler is a function that is called when a new operation is received +type OperationHandler func(operations.Operation) + +// StreamOperations streams stellar operations. It can be used to stream all operations or operations +// for and account. Use context.WithCancel to stop streaming or context.Background() if you want to +// stream indefinitely. OperationHandler is a user-supplied function that is executed for each streamed +// operation received. +func (op OperationRequest) StreamOperations(ctx context.Context, client *Client, handler OperationHandler) error { + endpoint, err := op.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint for operation request") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + return client.stream(ctx, url, func(data []byte) error { + var baseRecord operations.Base + + if err = json.Unmarshal(data, &baseRecord); err != nil { + return errors.Wrap(err, "error unmarshaling data for operation request") + } + + ops, err := operations.UnmarshalOperation(baseRecord.GetTypeI(), data) + if err != nil { + return errors.Wrap(err, "unmarshaling to the correct operation type") + } + + handler(ops) + return nil + }) +} diff --git a/clients/horizonclient/operation_request_test.go b/clients/horizonclient/operation_request_test.go new file mode 100644 index 0000000000..0bb2a5c1b4 --- /dev/null +++ b/clients/horizonclient/operation_request_test.go @@ -0,0 +1,516 @@ +package horizonclient + +import ( + "context" + "testing" + + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOperationRequestBuildUrl(t *testing.T) { + op := OperationRequest{endpoint: "operations"} + endpoint, err := op.BuildURL() + + // It should return valid all operations endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "operations", endpoint) + + op = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint: "operations"} + endpoint, err = op.BuildURL() + + // It should return valid account operations endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/operations", endpoint) + + op = OperationRequest{ForClaimableBalance: "00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9", endpoint: "operations"} + endpoint, err = op.BuildURL() + + // It should return valid account transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "claimable_balances/00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9/operations", endpoint) + + op = OperationRequest{ForLedger: 123, endpoint: "operations"} + endpoint, err = op.BuildURL() + + // It should return valid ledger operations endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "ledgers/123/operations", endpoint) + + op = OperationRequest{ForLiquidityPool: "123", endpoint: "operations"} + endpoint, err = op.BuildURL() + + // It should return valid liquidity pool effects operations and no errors + require.NoError(t, err) + assert.Equal(t, "liquidity_pools/123/operations", endpoint) + + op = OperationRequest{forOperationID: "123", endpoint: "operations"} + endpoint, err = op.BuildURL() + + // It should return valid operation operations endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "operations/123", endpoint) + + op = OperationRequest{ForTransaction: "123", endpoint: "payments"} + endpoint, err = op.BuildURL() + + // It should return valid transaction payments endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "transactions/123/payments", endpoint) + + op = OperationRequest{ForLedger: 123, forOperationID: "789", endpoint: "operations"} + _, err = op.BuildURL() + + // error case: too many parameters for building any operation endpoint + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too many parameters") + } + + op = OperationRequest{Cursor: "123456", Limit: 30, Order: OrderAsc, endpoint: "operations", Join: "transactions"} + endpoint, err = op.BuildURL() + // It should return valid all operations endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "operations?cursor=123456&join=transactions&limit=30&order=asc", endpoint) + + op = OperationRequest{Cursor: "123456", Limit: 30, Order: OrderAsc, endpoint: "payments", Join: "transactions"} + endpoint, err = op.BuildURL() + // It should return valid all operations endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "payments?cursor=123456&join=transactions&limit=30&order=asc", endpoint) + + op = OperationRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", endpoint: "payments", Join: "transactions"} + endpoint, err = op.BuildURL() + // It should return valid all operations endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/payments?join=transactions", endpoint) + + op = OperationRequest{forOperationID: "1234", endpoint: "payments", Join: "transactions"} + endpoint, err = op.BuildURL() + // It should return valid all operations endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "operations/1234?join=transactions", endpoint) +} + +func TestNextOperationsPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + operationRequest := OperationRequest{Limit: 2} + + hmock.On( + "GET", + "https://localhost/operations?limit=2", + ).ReturnString(200, firstOperationsPage) + + ops, err := client.Operations(operationRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(ops.Embedded.Records), 2) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc", + ).ReturnString(200, emptyOperationsPage) + + nextPage, err := client.NextOperationsPage(ops) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +func TestOperationRequestStreamOperations(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // All operations + operationRequest := OperationRequest{} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/operations?cursor=now", + ).ReturnString(200, operationStreamResponse) + + operationStream := make([]operations.Operation, 1) + err := client.StreamOperations(ctx, operationRequest, func(op operations.Operation) { + operationStream[0] = op + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, operationStream[0].GetType(), "create_account") + } + + // Account payments + operationRequest = OperationRequest{ForAccount: "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR/payments?cursor=now", + ).ReturnString(200, operationStreamResponse) + + err = client.StreamPayments(ctx, operationRequest, func(op operations.Operation) { + operationStream[0] = op + cancel() + }) + + if assert.NoError(t, err) { + payment, ok := operationStream[0].(operations.CreateAccount) + assert.Equal(t, ok, true) + assert.Equal(t, payment.Funder, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR") + } + + // test connection error + operationRequest = OperationRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/operations?cursor=now", + ).ReturnString(500, operationStreamResponse) + + err = client.StreamOperations(ctx, operationRequest, func(op operations.Operation) { + operationStream[0] = op + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +func TestManageSellManageBuyOfferOfferID(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + testCases := []struct { + desc string + payload string + }{ + { + desc: "offer_id as a string", + payload: manageSellBuyOfferOperationsPage, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + hmock.On( + "GET", + "https://localhost/operations", + ).ReturnString(200, tc.payload) + operationRequest := OperationRequest{} + ops, err := client.Operations(operationRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(ops.Embedded.Records), 2) + } + + mso, ok := ops.Embedded.Records[0].(operations.ManageSellOffer) + assert.True(t, ok) + assert.Equal(t, int64(127538671), mso.OfferID) + + mbo, ok := ops.Embedded.Records[1].(operations.ManageBuyOffer) + assert.True(t, ok) + assert.Equal(t, int64(127538672), mbo.OfferID) + }) + } +} + +var operationStreamResponse = `data: {"_links":{"self":{"href":"https://horizon-testnet.stellar.org/operations/4934917427201"},"transaction":{"href":"https://horizon-testnet.stellar.org/transactions/1c1449106a54cccd8a2ec2094815ad9db30ae54c69c3309dd08d13fdb8c749de"},"effects":{"href":"https://horizon-testnet.stellar.org/operations/4934917427201/effects"},"succeeds":{"href":"https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=4934917427201"},"precedes":{"href":"https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=4934917427201"}},"id":"4934917427201","paging_token":"4934917427201","transaction_successful":true,"source_account":"GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR","type":"create_account","type_i":0,"created_at":"2019-02-27T11:32:39Z","transaction_hash":"1c1449106a54cccd8a2ec2094815ad9db30ae54c69c3309dd08d13fdb8c749de","starting_balance":"10000.0000000","funder":"GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR","account":"GDBLBBDIUULY3HGIKXNK6WVBISY7DCNCDA45EL7NTXWX5R4UZ26HGMGS"} +` + +var firstOperationsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967681&limit=2&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/661424967681" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/661424967681/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=661424967681" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=661424967681" + } + }, + "id": "661424967681", + "paging_token": "661424967681", + "transaction_successful": true, + "source_account": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "type": "create_account", + "type_i": 0, + "created_at": "2019-04-24T09:16:14Z", + "transaction_hash": "749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e", + "starting_balance": "10000000000.0000000", + "funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/661424967682" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/661424967682/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=661424967682" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=661424967682" + } + }, + "id": "661424967682", + "paging_token": "661424967682", + "transaction_successful": true, + "source_account": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "type": "create_account", + "type_i": 0, + "created_at": "2019-04-24T09:16:14Z", + "transaction_hash": "749e4f8933221b9942ef38a02856803f379789ec8d971f1f60535db70135673e", + "starting_balance": "10000.0000000", + "funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "account": "GDO34SQXVOSNODK7JCTAXLZUPSAF3JIH4ADQELVIKOQJUWQ3U4BMSCSH" + } + ] + } +}` + +var emptyOperationsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc" + } + }, + "_embedded": { + "records": [] + } +}` + +var numberManageSellBuyOfferOperations = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/972702718365697" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/972702718365697/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=972702718365697" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=972702718365697" + } + }, + "id": "972702718365697", + "paging_token": "972702718365697", + "transaction_successful": true, + "source_account": "GBPPEHGF322UNA62WHRHBCUBCVOIT3SLUY7U7XQEEISZ5B2JLZ3AYTDC", + "type": "manage_offer", + "type_i": 3, + "created_at": "2019-11-13T16:46:36Z", + "transaction_hash": "cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52", + "amount": "1000.0000000", + "price": "0.1312531", + "price_r": { + "n": 265, + "d": 2019 + }, + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "BAT", + "buying_asset_issuer": "GBBJMSXCTLXVOYRL7SJ5ABLJ3GGCUFQXCFIXYUOHZZUDAZJKLXCO32AU", + "selling_asset_type": "native", + "offer_id": 127538671 + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/158041911595009" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/158041911595009/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=158041911595009" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=158041911595009" + } + }, + "id": "158041911595009", + "paging_token": "158041911595009", + "transaction_successful": true, + "source_account": "GBBXM7GVMXZMQWDEKSWGEW6GT6XMPBLEVEPLYWIQF3SRS43AIJVU3QES", + "type": "manage_buy_offer", + "type_i": 12, + "created_at": "2019-11-01T17:06:47Z", + "transaction_hash": "8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134", + "amount": "1.0000000", + "price": "0.5000000", + "price_r": { + "n": 1, + "d": 2 + }, + "buying_asset_type": "credit_alphanum12", + "buying_asset_code": "MosaiRMBA", + "buying_asset_issuer": "GBBWA24VLGPVMMFMF2OJHW3QHFVSILK2UJSNTORRC6QHK6EPTUADAJFA", + "selling_asset_type": "native", + "offer_id": 127538672 + } + ] + } + }` + +var manageSellBuyOfferOperationsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967682&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967684&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations?cursor=661424967683&limit=2&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/972702718365697" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/972702718365697/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc\u0026cursor=972702718365697" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc\u0026cursor=972702718365697" + } + }, + "id": "972702718365697", + "paging_token": "972702718365697", + "transaction_successful": true, + "source_account": "GBPPEHGF322UNA62WHRHBCUBCVOIT3SLUY7U7XQEEISZ5B2JLZ3AYTDC", + "type": "manage_offer", + "type_i": 3, + "created_at": "2019-11-13T16:46:36Z", + "transaction_hash": "cfe9eba317025dd0cff111967a3709358153e9ee97472e67c17e42837dd50a52", + "amount": "1000.0000000", + "price": "0.1312531", + "price_r": { + "n": 265, + "d": 2019 + }, + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "BAT", + "buying_asset_issuer": "GBBJMSXCTLXVOYRL7SJ5ABLJ3GGCUFQXCFIXYUOHZZUDAZJKLXCO32AU", + "selling_asset_type": "native", + "offer_id": "127538671" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/158041911595009" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/158041911595009/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=158041911595009" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=158041911595009" + } + }, + "id": "158041911595009", + "paging_token": "158041911595009", + "transaction_successful": true, + "source_account": "GBBXM7GVMXZMQWDEKSWGEW6GT6XMPBLEVEPLYWIQF3SRS43AIJVU3QES", + "type": "manage_buy_offer", + "type_i": 12, + "created_at": "2019-11-01T17:06:47Z", + "transaction_hash": "8a4db87e4749130ba32924943c2f219de497fe2d4f3e074187c5d2159ca2d134", + "amount": "1.0000000", + "price": "0.5000000", + "price_r": { + "n": 1, + "d": 2 + }, + "buying_asset_type": "credit_alphanum12", + "buying_asset_code": "MosaiRMBA", + "buying_asset_issuer": "GBBWA24VLGPVMMFMF2OJHW3QHFVSILK2UJSNTORRC6QHK6EPTUADAJFA", + "selling_asset_type": "native", + "offer_id": "127538672" + } + ] + } + }` diff --git a/clients/horizonclient/order_book_request.go b/clients/horizonclient/order_book_request.go new file mode 100644 index 0000000000..fee5547397 --- /dev/null +++ b/clients/horizonclient/order_book_request.go @@ -0,0 +1,73 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the OrderBookRequest struct. +func (obr OrderBookRequest) BuildURL() (endpoint string, err error) { + endpoint = "order_book" + + // add the parameters to a map here so it is easier for addQueryParams to populate the parameter list + // We can't use assetCode and assetIssuer types here because the paremeter names are different + paramMap := make(map[string]string) + paramMap["selling_asset_type"] = string(obr.SellingAssetType) + paramMap["selling_asset_code"] = obr.SellingAssetCode + paramMap["selling_asset_issuer"] = obr.SellingAssetIssuer + paramMap["buying_asset_type"] = string(obr.BuyingAssetType) + paramMap["buying_asset_code"] = obr.BuyingAssetCode + paramMap["buying_asset_issuer"] = obr.BuyingAssetIssuer + + queryParams := addQueryParams(paramMap, limit(obr.Limit)) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the order book endpoint +func (obr OrderBookRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := obr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// OrderBookHandler is a function that is called when a new order summary is received +type OrderBookHandler func(hProtocol.OrderBookSummary) + +// StreamOrderBooks streams the orderbook for a given asset pair. Use context.WithCancel +// to stop streaming or context.Background() if you want to stream indefinitely. +// OrderBookHandler is a user-supplied function that is executed for each streamed order received. +func (obr OrderBookRequest) StreamOrderBooks(ctx context.Context, client *Client, handler OrderBookHandler) error { + endpoint, err := obr.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint for orderbook request") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + return client.stream(ctx, url, func(data []byte) error { + var orderbook hProtocol.OrderBookSummary + err = json.Unmarshal(data, &orderbook) + if err != nil { + return errors.Wrap(err, "error unmarshaling data for orderbook request") + } + handler(orderbook) + return nil + }) +} diff --git a/clients/horizonclient/order_book_request_test.go b/clients/horizonclient/order_book_request_test.go new file mode 100644 index 0000000000..6c1f56401a --- /dev/null +++ b/clients/horizonclient/order_book_request_test.go @@ -0,0 +1,83 @@ +package horizonclient + +import ( + "context" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOrderBookRequestBuildUrl(t *testing.T) { + obr := OrderBookRequest{} + endpoint, err := obr.BuildURL() + + // It should return no errors and orderbook endpoint + // Horizon will return an error though because there are no parameters + require.NoError(t, err) + assert.Equal(t, "order_book", endpoint) + + obr = OrderBookRequest{SellingAssetType: AssetTypeNative, BuyingAssetType: AssetTypeNative} + endpoint, err = obr.BuildURL() + + // It should return valid assets endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "order_book?buying_asset_type=native&selling_asset_type=native", endpoint) + + obr = OrderBookRequest{SellingAssetType: AssetTypeNative, BuyingAssetType: AssetType4, BuyingAssetCode: "ABC", BuyingAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err = obr.BuildURL() + + // It should return valid assets endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "order_book?buying_asset_code=ABC&buying_asset_issuer=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&buying_asset_type=credit_alphanum4&selling_asset_type=native", endpoint) +} + +func TestOrderBookRequestStreamOrderBooks(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + orderbookRequest := OrderBookRequest{SellingAssetType: AssetTypeNative, BuyingAssetType: AssetType4, BuyingAssetCode: "ABC", BuyingAssetIssuer: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/order_book?buying_asset_code=ABC&buying_asset_issuer=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&buying_asset_type=credit_alphanum4&cursor=now&selling_asset_type=native", + ).ReturnString(200, orderbookStreamResponse) + + orderbooks := make([]hProtocol.OrderBookSummary, 1) + err := client.StreamOrderBooks(ctx, orderbookRequest, func(orderbook hProtocol.OrderBookSummary) { + orderbooks[0] = orderbook + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, orderbooks[0].Selling.Type, "native") + assert.Equal(t, orderbooks[0].Buying.Type, "credit_alphanum4") + assert.Equal(t, orderbooks[0].Buying.Code, "ABC") + assert.Equal(t, orderbooks[0].Buying.Issuer, "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU") + } + + // test error + orderbookRequest = OrderBookRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/order_book?cursor=now", + ).ReturnString(500, orderbookStreamResponse) + + err = client.StreamOrderBooks(ctx, orderbookRequest, func(orderbook hProtocol.OrderBookSummary) { + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +var orderbookStreamResponse = `data: {"bids":[{"price_r":{"n":10000000,"d":416041},"price":"24.0360926","amount":"64.5477778"},{"price_r":{"n":1250000,"d":52009},"price":"24.0343018","amount":"69.0955580"},{"price_r":{"n":10000000,"d":416173},"price":"24.0284689","amount":"48.0957175"},{"price_r":{"n":10000000,"d":416293},"price":"24.0215425","amount":"85.2955923"},{"price_r":{"n":2000000,"d":83261},"price":"24.0208501","amount":"95.0060029"},{"price_r":{"n":10000000,"d":416359},"price":"24.0177347","amount":"21.0996208"},{"price_r":{"n":2000000,"d":83317},"price":"24.0047049","amount":"58.5071234"},{"price_r":{"n":5000000,"d":208313},"price":"24.0023426","amount":"2.6124606"},{"price_r":{"n":10000000,"d":416703},"price":"23.9979074","amount":"75.2954767"},{"price_r":{"n":10000000,"d":416799},"price":"23.9923800","amount":"90.8729460"},{"price_r":{"n":1250000,"d":52113},"price":"23.9863374","amount":"98.1852777"},{"price_r":{"n":10000000,"d":417043},"price":"23.9783428","amount":"87.1819093"},{"price_r":{"n":1250000,"d":52237},"price":"23.9293987","amount":"46.2976363"},{"price_r":{"n":10000000,"d":418173},"price":"23.9135477","amount":"30.5438228"},{"price_r":{"n":5000000,"d":209337},"price":"23.8849320","amount":"92.2168107"},{"price_r":{"n":1600,"d":67},"price":"23.8805970","amount":"34.1880836"},{"price_r":{"n":25000,"d":1047},"price":"23.8777459","amount":"1.5260053"},{"price_r":{"n":2500000,"d":104701},"price":"23.8775179","amount":"28.8883583"},{"price_r":{"n":10000000,"d":418889},"price":"23.8726727","amount":"32.5403317"},{"price_r":{"n":5000000,"d":209463},"price":"23.8705643","amount":"68.7506816"}],"asks":[{"price_r":{"n":60099621,"d":2500000},"price":"24.0398484","amount":"114240.9695894"},{"price_r":{"n":2000,"d":83},"price":"24.0963855","amount":"10.6240000"},{"price_r":{"n":243902439,"d":10000000},"price":"24.3902439","amount":"5098.5158704"},{"price_r":{"n":247581003,"d":10000000},"price":"24.7581003","amount":"48.7365083"},{"price_r":{"n":247622939,"d":10000000},"price":"24.7622939","amount":"85.4807258"},{"price_r":{"n":30954891,"d":1250000},"price":"24.7639128","amount":"73.3863524"},{"price_r":{"n":248116049,"d":10000000},"price":"24.8116049","amount":"10.8025861"},{"price_r":{"n":124071407,"d":5000000},"price":"24.8142814","amount":"40.5349552"},{"price_r":{"n":124089177,"d":5000000},"price":"24.8178354","amount":"98.5958629"},{"price_r":{"n":248207821,"d":10000000},"price":"24.8207821","amount":"35.9280393"},{"price_r":{"n":62052967,"d":2500000},"price":"24.8211868","amount":"27.1415841"},{"price_r":{"n":248326957,"d":10000000},"price":"24.8326957","amount":"64.7660814"},{"price_r":{"n":248453671,"d":10000000},"price":"24.8453671","amount":"52.3970380"},{"price_r":{"n":248913989,"d":10000000},"price":"24.8913989","amount":"98.5221362"},{"price_r":{"n":31129641,"d":1250000},"price":"24.9037128","amount":"40.6966868"},{"price_r":{"n":249076933,"d":10000000},"price":"24.9076933","amount":"86.4499134"},{"price_r":{"n":249136251,"d":10000000},"price":"24.9136251","amount":"53.6600249"},{"price_r":{"n":249189189,"d":10000000},"price":"24.9189189","amount":"76.1849984"},{"price_r":{"n":249391503,"d":10000000},"price":"24.9391503","amount":"35.8199766"},{"price_r":{"n":15590707,"d":625000},"price":"24.9451312","amount":"51.2253042"}],"base":{"asset_type":"native"},"counter":{"asset_type":"credit_alphanum4","asset_code":"ABC","asset_issuer":"GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"}} +` diff --git a/clients/horizonclient/paths_request.go b/clients/horizonclient/paths_request.go new file mode 100644 index 0000000000..ab603b59ea --- /dev/null +++ b/clients/horizonclient/paths_request.go @@ -0,0 +1,47 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the PathsRequest struct. +func (pr PathsRequest) BuildURL() (endpoint string, err error) { + endpoint = "paths" + + // add the parameters to a map here so it is easier for addQueryParams to populate the parameter list + // We can't use assetCode and assetIssuer types here because the paremeter names are different + paramMap := make(map[string]string) + paramMap["destination_account"] = pr.DestinationAccount + paramMap["destination_asset_type"] = string(pr.DestinationAssetType) + paramMap["destination_asset_code"] = pr.DestinationAssetCode + paramMap["destination_asset_issuer"] = pr.DestinationAssetIssuer + paramMap["destination_amount"] = pr.DestinationAmount + paramMap["source_account"] = pr.SourceAccount + paramMap["source_assets"] = pr.SourceAssets + + queryParams := addQueryParams(paramMap) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the path payment endpoint +func (pr PathsRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := pr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/paths_request_test.go b/clients/horizonclient/paths_request_test.go new file mode 100644 index 0000000000..6354407570 --- /dev/null +++ b/clients/horizonclient/paths_request_test.go @@ -0,0 +1,210 @@ +package horizonclient + +import ( + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPathsRequestBuildUrl(t *testing.T) { + pr := PathsRequest{} + endpoint, err := pr.BuildURL() + + // It should return no errors and orderbook endpoint + // Horizon will return an error though because there are no parameters + require.NoError(t, err) + assert.Equal(t, "paths", endpoint) + + pr = PathsRequest{ + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + DestinationAmount: "100", + DestinationAssetCode: "NGN", + DestinationAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + DestinationAssetType: AssetType4, + SourceAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + SourceAssets: "COP:GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + + endpoint, err = pr.BuildURL() + + // It should return valid assets endpoint and no errors + require.NoError(t, err) + assert.Equal( + t, + "paths?destination_account=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&destination_amount=100&destination_asset_code=NGN&destination_asset_issuer=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&destination_asset_type=credit_alphanum4&source_account=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&source_assets=COP%3AGDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + endpoint, + ) + +} + +func TestPathsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + pr := PathsRequest{ + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + DestinationAmount: "100", + DestinationAssetCode: "NGN", + DestinationAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + DestinationAssetType: AssetType4, + SourceAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + + // orderbook for XLM/USD + hmock.On( + "GET", + "https://localhost/paths?destination_account=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&destination_amount=100&destination_asset_code=NGN&destination_asset_issuer=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&destination_asset_type=credit_alphanum4&source_account=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + ).ReturnString(200, pathsResponse) + + paths, err := client.StrictReceivePaths(pr) + if assert.NoError(t, err) { + assert.IsType(t, paths, hProtocol.PathsPage{}) + record := paths.Embedded.Records[0] + assert.Equal(t, record.DestinationAmount, "20.0000000") + assert.Equal(t, record.DestinationAssetCode, "EUR") + assert.Equal(t, record.SourceAmount, "30.0000000") + } + + // failure response + pr = PathsRequest{} + hmock.On( + "GET", + "https://localhost/paths", + ).ReturnString(400, badRequestResponse) + + _, err = client.StrictReceivePaths(pr) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } + +} + +func TestStrictReceivePathsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + pr := PathsRequest{ + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + DestinationAmount: "100", + DestinationAssetCode: "NGN", + DestinationAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + DestinationAssetType: AssetType4, + SourceAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + + // orderbook for XLM/USD + hmock.On( + "GET", + "https://localhost/paths?destination_account=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&destination_amount=100&destination_asset_code=NGN&destination_asset_issuer=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&destination_asset_type=credit_alphanum4&source_account=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + ).ReturnString(200, pathsResponse) + + paths, err := client.StrictReceivePaths(pr) + if assert.NoError(t, err) { + assert.IsType(t, paths, hProtocol.PathsPage{}) + record := paths.Embedded.Records[0] + assert.Equal(t, record.DestinationAmount, "20.0000000") + assert.Equal(t, record.DestinationAssetCode, "EUR") + assert.Equal(t, record.SourceAmount, "30.0000000") + } + + // failure response + pr = PathsRequest{} + hmock.On( + "GET", + "https://localhost/paths", + ).ReturnString(400, badRequestResponse) + + _, err = client.StrictReceivePaths(pr) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } + +} + +var badRequestResponse = `{ + "type": "https://stellar.org/horizon-errors/bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way", + "extras": { + "invalid_field": "destination_amount", + "reason": "Value must be positive" + } +}` + +var pathsResponse = `{ + "_embedded": { + "records": [ + { + "destination_amount": "20.0000000", + "destination_asset_code": "EUR", + "destination_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "destination_asset_type": "credit_alphanum4", + "path": [], + "source_amount": "30.0000000", + "source_asset_code": "USD", + "source_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "source_asset_type": "credit_alphanum4" + }, + { + "destination_amount": "20.0000000", + "destination_asset_code": "EUR", + "destination_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "destination_asset_type": "credit_alphanum4", + "path": [ + { + "asset_code": "1", + "asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "asset_type": "credit_alphanum4" + } + ], + "source_amount": "20.0000000", + "source_asset_code": "USD", + "source_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "source_asset_type": "credit_alphanum4" + }, + { + "destination_amount": "20.0000000", + "destination_asset_code": "EUR", + "destination_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "destination_asset_type": "credit_alphanum4", + "path": [ + { + "asset_code": "21", + "asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "asset_type": "credit_alphanum4" + }, + { + "asset_code": "22", + "asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "asset_type": "credit_alphanum4" + } + ], + "source_amount": "20.0000000", + "source_asset_code": "USD", + "source_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "source_asset_type": "credit_alphanum4" + } + ] + }, + "_links": { + "self": { + "href": "/paths" + } + } +}` diff --git a/clients/horizonclient/root_test.go b/clients/horizonclient/root_test.go new file mode 100644 index 0000000000..6d593b0653 --- /dev/null +++ b/clients/horizonclient/root_test.go @@ -0,0 +1,104 @@ +package horizonclient + +import ( + "testing" + + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" +) + +func TestRoot(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // happy path + hmock.On( + "GET", + "https://localhost/", + ).ReturnString(200, rootResponse) + + root, err := client.Root() + if assert.NoError(t, err) { + assert.Equal(t, root.HorizonVersion, "0.17.6-unstable-bc999a67d0b2413d8abd76153a56733c7d517484") + assert.Equal(t, root.StellarCoreVersion, "stellar-core 11.0.0 (236f831521b6724c0ae63906416faa997ef27e19)") + assert.Equal(t, root.HorizonSequence, int32(84959)) + assert.Equal(t, root.NetworkPassphrase, "Test SDF Network ; September 2015") + } + + // failure response + hmock.On( + "GET", + "https://localhost/", + ).ReturnString(404, notFoundResponse) + + _, err = client.Root() + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Resource Missing") + } + + // connection error + hmock.On( + "GET", + "https://localhost/", + ).ReturnError("http.Client error") + + _, err = client.Root() + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "http.Client error") + _, ok := err.(*Error) + assert.Equal(t, ok, false) + } +} + +var rootResponse = `{ + "_links": { + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/{account_id}", + "templated": true + }, + "account_transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/{account_id}/transactions{?cursor,limit,order}", + "templated": true + }, + "assets": { + "href": "https://horizon-testnet.stellar.org/assets{?asset_code,asset_issuer,cursor,limit,order}", + "templated": true + }, + "friendbot": { + "href": "https://friendbot.stellar.org/{?addr}", + "templated": true + }, + "metrics": { + "href": "https://horizon-testnet.stellar.org/metrics" + }, + "order_book": { + "href": "https://horizon-testnet.stellar.org/order_book{?selling_asset_type,selling_asset_code,selling_asset_issuer,buying_asset_type,buying_asset_code,buying_asset_issuer,limit}", + "templated": true + }, + "self": { + "href": "https://horizon-testnet.stellar.org/" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/{hash}", + "templated": true + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/transactions{?cursor,limit,order}", + "templated": true + } + }, + "horizon_version": "0.17.6-unstable-bc999a67d0b2413d8abd76153a56733c7d517484", + "core_version": "stellar-core 11.0.0 (236f831521b6724c0ae63906416faa997ef27e19)", + "history_latest_ledger": 84959, + "history_elder_ledger": 1, + "core_latest_ledger": 84959, + "network_passphrase": "Test SDF Network ; September 2015", + "current_protocol_version": 10, + "core_supported_protocol_version": 11 +}` diff --git a/clients/horizonclient/strict_send_paths_request.go b/clients/horizonclient/strict_send_paths_request.go new file mode 100644 index 0000000000..14a494b404 --- /dev/null +++ b/clients/horizonclient/strict_send_paths_request.go @@ -0,0 +1,46 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the PathsRequest struct. +func (pr StrictSendPathsRequest) BuildURL() (endpoint string, err error) { + endpoint = "paths/strict-send" + + // add the parameters to a map here so it is easier for addQueryParams to populate the parameter list + // We can't use assetCode and assetIssuer types here because the parameter names are different + paramMap := make(map[string]string) + paramMap["destination_assets"] = pr.DestinationAssets + paramMap["destination_account"] = pr.DestinationAccount + paramMap["source_asset_type"] = string(pr.SourceAssetType) + paramMap["source_asset_code"] = pr.SourceAssetCode + paramMap["source_asset_issuer"] = pr.SourceAssetIssuer + paramMap["source_amount"] = pr.SourceAmount + + queryParams := addQueryParams(paramMap) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the strict send path payment endpoint +func (pr StrictSendPathsRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := pr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/strict_send_paths_request_test.go b/clients/horizonclient/strict_send_paths_request_test.go new file mode 100644 index 0000000000..ca515536f3 --- /dev/null +++ b/clients/horizonclient/strict_send_paths_request_test.go @@ -0,0 +1,117 @@ +package horizonclient + +import ( + "testing" + + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStrictSendPathsRequestBuildUrl(t *testing.T) { + pr := StrictSendPathsRequest{} + endpoint, err := pr.BuildURL() + + // It should return no errors and paths endpoint + // Horizon will return an error though because there are no parameters + require.NoError(t, err) + assert.Equal(t, "paths/strict-send", endpoint) + + pr = StrictSendPathsRequest{ + SourceAmount: "100", + SourceAssetCode: "NGN", + SourceAssetIssuer: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + SourceAssetType: AssetType4, + DestinationAccount: "GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM", + } + + endpoint, err = pr.BuildURL() + + // It should return a valid endpoint and no errors + require.NoError(t, err) + assert.Equal( + t, + "paths/strict-send?destination_account=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&source_amount=100&source_asset_code=NGN&source_asset_issuer=GDZST3XVCDTUJ76ZAV2HA72KYQODXXZ5PTMAPZGDHZ6CS7RO7MGG3DBM&source_asset_type=credit_alphanum4", + endpoint, + ) + + pr = StrictSendPathsRequest{ + SourceAmount: "100", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", + SourceAssetType: AssetType4, + DestinationAssets: "EURT:GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S,native", + } + + endpoint, err = pr.BuildURL() + + require.NoError(t, err) + assert.Equal( + t, + "paths/strict-send?destination_assets=EURT%3AGAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S%2Cnative&source_amount=100&source_asset_code=USD&source_asset_issuer=GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX&source_asset_type=credit_alphanum4", + endpoint, + ) +} +func TestStrictSendPathsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + pr := StrictSendPathsRequest{ + SourceAmount: "20", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + SourceAssetType: AssetType4, + DestinationAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", + } + + hmock.On( + "GET", + "https://localhost/paths/strict-send?destination_account=GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU&source_amount=20&source_asset_code=USD&source_asset_issuer=GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&source_asset_type=credit_alphanum4", + ).ReturnString(200, pathsResponse) + + paths, err := client.StrictSendPaths(pr) + assert.NoError(t, err) + assert.Len(t, paths.Embedded.Records, 3) + + pr = StrictSendPathsRequest{ + SourceAmount: "20", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + SourceAssetType: AssetType4, + DestinationAssets: "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + } + + hmock.On( + "GET", + "https://localhost/paths/strict-send?destination_assets=EUR%3AGDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&source_amount=20&source_asset_code=USD&source_asset_issuer=GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&source_asset_type=credit_alphanum4", + ).ReturnString(200, pathsResponse) + + paths, err = client.StrictSendPaths(pr) + assert.NoError(t, err) + assert.Len(t, paths.Embedded.Records, 3) + + pr = StrictSendPathsRequest{ + SourceAmount: "20", + SourceAssetCode: "USD", + SourceAssetIssuer: "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + SourceAssetType: AssetType4, + DestinationAssets: "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + DestinationAccount: "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + } + + hmock.On( + "GET", + "https://localhost/paths/strict-send?destination_account=GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&destination_assets=EUR%3AGDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&source_amount=20&source_asset_code=USD&source_asset_issuer=GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN&source_asset_type=credit_alphanum4", + ).ReturnString(400, badRequestResponse) + + _, err = client.StrictSendPaths(pr) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } +} diff --git a/clients/horizonclient/submit_request.go b/clients/horizonclient/submit_request.go new file mode 100644 index 0000000000..673e48f29f --- /dev/null +++ b/clients/horizonclient/submit_request.go @@ -0,0 +1,35 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/stellar/go/support/errors" +) + +// BuildURL returns the url for submitting transactions to a running horizon instance +func (sr submitRequest) BuildURL() (endpoint string, err error) { + if sr.endpoint == "" || sr.transactionXdr == "" { + return endpoint, errors.New("invalid request: too few parameters") + } + + query := url.Values{} + query.Set("tx", sr.transactionXdr) + + endpoint = fmt.Sprintf("%s?%s", sr.endpoint, query.Encode()) + return endpoint, err +} + +// HTTPRequest returns the http request for submitting transactions to a running horizon instance +func (sr submitRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + form := url.Values{} + form.Set("tx", sr.transactionXdr) + request, err := http.NewRequest("POST", horizonURL+sr.endpoint, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + request.Header.Add("Content-Type", "application/x-www-form-urlencoded") + return request, nil +} diff --git a/clients/horizonclient/submit_request_test.go b/clients/horizonclient/submit_request_test.go new file mode 100644 index 0000000000..9956fdce6e --- /dev/null +++ b/clients/horizonclient/submit_request_test.go @@ -0,0 +1,25 @@ +package horizonclient + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSubmitRequestBuildUrl(t *testing.T) { + sr := submitRequest{endpoint: "transactions", transactionXdr: "xyzabc"} + endpoint, err := sr.BuildURL() + + // It should return valid endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "transactions?tx=xyzabc", endpoint) + + sr = submitRequest{} + _, err = sr.BuildURL() + + // It should return errors + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too few parameters") + } +} diff --git a/clients/horizonclient/trade_aggregation_request.go b/clients/horizonclient/trade_aggregation_request.go new file mode 100644 index 0000000000..acb13e9ffb --- /dev/null +++ b/clients/horizonclient/trade_aggregation_request.go @@ -0,0 +1,49 @@ +package horizonclient + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the TradeAggregationRequest struct. +func (ta TradeAggregationRequest) BuildURL() (endpoint string, err error) { + endpoint = "trade_aggregations" + // add the parameters for trade aggregations endpoint + paramMap := make(map[string]string) + paramMap["start_time"] = strconv.FormatInt((ta.StartTime.UnixNano() / 1e6), 10) + paramMap["end_time"] = strconv.FormatInt((ta.EndTime.UnixNano() / 1e6), 10) + paramMap["resolution"] = strconv.FormatInt((ta.Resolution.Nanoseconds() / 1e6), 10) + paramMap["offset"] = strconv.FormatInt((ta.Offset.Nanoseconds() / 1e6), 10) + paramMap["base_asset_type"] = string(ta.BaseAssetType) + paramMap["base_asset_code"] = ta.BaseAssetCode + paramMap["base_asset_issuer"] = ta.BaseAssetIssuer + paramMap["counter_asset_type"] = string(ta.CounterAssetType) + paramMap["counter_asset_code"] = ta.CounterAssetCode + paramMap["counter_asset_issuer"] = ta.CounterAssetIssuer + + queryParams := addQueryParams(paramMap, limit(ta.Limit), ta.Order) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the trade aggregations endpoint +func (ta TradeAggregationRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := ta.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} diff --git a/clients/horizonclient/trade_aggregation_request_test.go b/clients/horizonclient/trade_aggregation_request_test.go new file mode 100644 index 0000000000..9ea2adccca --- /dev/null +++ b/clients/horizonclient/trade_aggregation_request_test.go @@ -0,0 +1,400 @@ +package horizonclient + +import ( + "testing" + "time" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var testTime = time.Unix(int64(1517521726), int64(0)) + +func TestTradeAggregationRequestBuildUrl(t *testing.T) { + ta := TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: HourResolution, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + endpoint, err := ta.BuildURL() + + // It should return valid trade aggregation endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=3600000&start_time=1517521726000", endpoint) +} + +func TestTradeAggregationsRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + taRequest := TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: DayResolution, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + + hmock.On( + "GET", + "https://localhost/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=86400000&start_time=1517521726000", + ).ReturnString(200, tradeAggsResponse) + + tradeAggs, err := client.TradeAggregations(taRequest) + if assert.NoError(t, err) { + assert.IsType(t, tradeAggs, hProtocol.TradeAggregationsPage{}) + links := tradeAggs.Links + assert.Equal(t, links.Self.Href, "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517521726000\u0026end_time=1517532526000") + + assert.Equal(t, links.Next.Href, "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026end_time=1517532526000\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517529600000") + + record := tradeAggs.Embedded.Records[0] + assert.IsType(t, record, hProtocol.TradeAggregation{}) + assert.Equal(t, record.Timestamp, int64(1517522400000)) + assert.Equal(t, record.TradeCount, int64(26)) + assert.Equal(t, record.BaseVolume, "27575.0201596") + assert.Equal(t, record.CounterVolume, "5085.6410385") + } + + // failure response + taRequest = TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + + hmock.On( + "GET", + "https://localhost/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=0&start_time=1517521726000", + ).ReturnString(400, badRequestResponse) + + _, err = client.TradeAggregations(taRequest) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "horizon error") + horizonError, ok := err.(*Error) + assert.Equal(t, ok, true) + assert.Equal(t, horizonError.Problem.Title, "Bad Request") + } +} + +func TestNextTradeAggregationsPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + taRequest := TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: DayResolution, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + + hmock.On( + "GET", + "https://localhost/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=86400000&start_time=1517521726000", + ).ReturnString(200, firstTradeAggsPage) + tradeAggs, err := client.TradeAggregations(taRequest) + + if assert.NoError(t, err) { + assert.Len(t, tradeAggs.Embedded.Records, 2) + } + + assert.Equal(t, int64(1565026860000), tradeAggs.Embedded.Records[0].Timestamp) + assert.Equal(t, int64(3), tradeAggs.Embedded.Records[0].TradeCount) + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&base_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&limit=2&resolution=60000&start_time=0", + ).ReturnString(200, emptyTradeAggsPage) + + nextPage, err := client.NextTradeAggregationsPage(tradeAggs) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +func TestPrevTradeAggregationsPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + taRequest := TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: DayResolution, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + + hmock.On( + "GET", + "https://localhost/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=86400000&start_time=1517521726000", + ).ReturnString(200, emptyTradeAggsPage) + tradeAggs, err := client.TradeAggregations(taRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(tradeAggs.Embedded.Records), 0) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_type=credit_alphanum4&base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&start_time=1565132904&resolution=60000&limit=2", + ).ReturnString(200, firstTradeAggsPage) + + prevPage, err := client.PrevTradeAggregationsPage(tradeAggs) + if assert.NoError(t, err) { + assert.Equal(t, len(prevPage.Embedded.Records), 2) + } +} + +func TestTradeAggregationsPageStringPayload(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + taRequest := TradeAggregationRequest{ + StartTime: testTime, + EndTime: testTime, + Resolution: DayResolution, + BaseAssetType: AssetTypeNative, + CounterAssetType: AssetType4, + CounterAssetCode: "SLT", + CounterAssetIssuer: "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + Order: OrderDesc, + } + + hmock.On( + "GET", + "https://localhost/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&end_time=1517521726000&offset=0&order=desc&resolution=86400000&start_time=1517521726000", + ).ReturnString(200, stringTradeAggsPage) + tradeAggs, err := client.TradeAggregations(taRequest) + + if assert.NoError(t, err) { + assert.Len(t, tradeAggs.Embedded.Records, 1) + } + + assert.Equal(t, int64(1565026860000), tradeAggs.Embedded.Records[0].Timestamp) + assert.Equal(t, int64(3), tradeAggs.Embedded.Records[0].TradeCount) +} + +var tradeAggsResponse = `{ + "_links": { + "self": { + "href": "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517521726000\u0026end_time=1517532526000" + }, + "next": { + "href": "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026end_time=1517532526000\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517529600000" + } + }, + "_embedded": { + "records": [ + { + "timestamp": "1517522400000", + "trade_count": "26", + "base_volume": "27575.0201596", + "counter_volume": "5085.6410385", + "avg": "0.1844293", + "high": "0.1915709", + "high_r": { + "N": 50, + "D": 261 + }, + "low": "0.1506024", + "low_r": { + "N": 25, + "D": 166 + }, + "open": "0.1724138", + "open_r": { + "N": 5, + "D": 29 + }, + "close": "0.1506024", + "close_r": { + "N": 25, + "D": 166 + } + }, + { + "timestamp": "1517526000000", + "trade_count": "15", + "base_volume": "3913.8224543", + "counter_volume": "719.4993608", + "avg": "0.1838355", + "high": "0.1960784", + "high_r": { + "N": 10, + "D": 51 + }, + "low": "0.1506024", + "low_r": { + "N": 25, + "D": 166 + }, + "open": "0.1869159", + "open_r": { + "N": 20, + "D": 107 + }, + "close": "0.1515152", + "close_r": { + "N": 5, + "D": 33 + } + } + ] + } +}` + +var firstTradeAggsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_type=credit_alphanum4&base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&resolution=60000&limit=2" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&base_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&limit=2&resolution=60000&start_time=0" + }, + "prev": { + "href": "" + } + }, + "_embedded": { + "records": [ + { + "timestamp": "1565026860000", + "trade_count": "3", + "base_volume": "23781.2128418", + "counter_volume": "2.0000000", + "avg": "0.0000841", + "high": "0.0000841", + "high_r": { + "N": 841, + "D": 10000000 + }, + "low": "0.0000841", + "low_r": { + "N": 841, + "D": 10000000 + }, + "open": "0.0000841", + "open_r": { + "N": 841, + "D": 10000000 + }, + "close": "0.0000841", + "close_r": { + "N": 841, + "D": 10000000 + } + }, + { + "timestamp": "1565026920000", + "trade_count": "1", + "base_volume": "11890.6052319", + "counter_volume": "0.9999999", + "avg": "0.0000841", + "high": "0.0000841", + "high_r": { + "N": 841, + "D": 10000000 + }, + "low": "0.0000841", + "low_r": { + "N": 841, + "D": 10000000 + }, + "open": "0.0000841", + "open_r": { + "N": 841, + "D": 10000000 + }, + "close": "0.0000841", + "close_r": { + "N": 841, + "D": 10000000 + } + } + ] + } +}` + +var emptyTradeAggsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_type=credit_alphanum4&base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&resolution=60000&limit=2" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&base_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&limit=2&resolution=60000&start_time=0" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/trade_aggregations?base_asset_type=credit_alphanum4&base_asset_code=USD&base_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&counter_asset_type=credit_alphanum4&counter_asset_code=BTC&counter_asset_issuer=GDLEUZYDSFMWA5ZLQIOCYS7DMLYDKFS2KWJ5M3RQ3P3WS4L75ZTWKELP&start_time=1565132904&resolution=60000&limit=2" + } + }, + "_embedded": { + "records": [] + } +}` + +var stringTradeAggsPage = `{ + "_embedded": { + "records": [ + { + "timestamp": "1565026860000", + "trade_count": "3", + "base_volume": "23781.2128418", + "counter_volume": "2.0000000", + "avg": "0.0000841", + "high": "0.0000841", + "high_r": { + "N": 841, + "D": 10000000 + }, + "low": "0.0000841", + "low_r": { + "N": 841, + "D": 10000000 + }, + "open": "0.0000841", + "open_r": { + "N": 841, + "D": 10000000 + }, + "close": "0.0000841", + "close_r": { + "N": 841, + "D": 10000000 + } + } + ] + } +}` diff --git a/clients/horizonclient/trade_request.go b/clients/horizonclient/trade_request.go new file mode 100644 index 0000000000..5cbfab8914 --- /dev/null +++ b/clients/horizonclient/trade_request.go @@ -0,0 +1,105 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the TradeRequest struct. +// If no data is set, it defaults to the build the URL for all trades +func (tr TradeRequest) BuildURL() (endpoint string, err error) { + nParams := countParams(tr.ForAccount, tr.ForOfferID, tr.ForLiquidityPool) + + if nParams > 1 { + return endpoint, errors.New("invalid request: too many parameters") + } + + endpoint = "trades" + if tr.ForAccount != "" { + endpoint = fmt.Sprintf("accounts/%s/trades", tr.ForAccount) + } + + // Note[Peter - 28/03/2019]: querying an "all trades" endpoint that has the query parameter + // for offer_id is the same as querying the url for trades of a particular offer. The results + // returned will be the same. So, I am opting to build the endpoint for trades per offer when + // `ForOfferID` is set + if tr.ForOfferID != "" { + endpoint = fmt.Sprintf("offers/%s/trades", tr.ForOfferID) + } + + if tr.ForLiquidityPool != "" { + endpoint = fmt.Sprintf("liquidity_pools/%s/trades", tr.ForLiquidityPool) + } + + var queryParams string + + if endpoint != "trades" { + queryParams = addQueryParams(cursor(tr.Cursor), limit(tr.Limit), tr.Order) + } else { + // add the parameters for all trades endpoint + paramMap := make(map[string]string) + paramMap["base_asset_type"] = string(tr.BaseAssetType) + paramMap["base_asset_code"] = tr.BaseAssetCode + paramMap["base_asset_issuer"] = tr.BaseAssetIssuer + paramMap["counter_asset_type"] = string(tr.CounterAssetType) + paramMap["counter_asset_code"] = tr.CounterAssetCode + paramMap["counter_asset_issuer"] = tr.CounterAssetIssuer + paramMap["trade_type"] = tr.TradeType + paramMap["offer_id"] = tr.ForOfferID + + queryParams = addQueryParams(paramMap, cursor(tr.Cursor), limit(tr.Limit), tr.Order) + } + + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the trades endpoint +func (tr TradeRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := tr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// TradeHandler is a function that is called when a new trade is received +type TradeHandler func(hProtocol.Trade) + +// StreamTrades streams executed trades. It can be used to stream all trades, trades for an account and +// trades for an offer. Use context.WithCancel to stop streaming or context.Background() if you want +// to stream indefinitely. TradeHandler is a user-supplied function that is executed for each streamed trade received. +func (tr TradeRequest) StreamTrades(ctx context.Context, client *Client, + handler TradeHandler) (err error) { + endpoint, err := tr.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + + return client.stream(ctx, url, func(data []byte) error { + var trade hProtocol.Trade + err = json.Unmarshal(data, &trade) + if err != nil { + return errors.Wrap(err, "error unmarshaling data") + } + handler(trade) + return nil + }) +} diff --git a/clients/horizonclient/trade_request_test.go b/clients/horizonclient/trade_request_test.go new file mode 100644 index 0000000000..e56550eba1 --- /dev/null +++ b/clients/horizonclient/trade_request_test.go @@ -0,0 +1,450 @@ +package horizonclient + +import ( + "context" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTradeRequestBuildUrl(t *testing.T) { + tr := TradeRequest{} + endpoint, err := tr.BuildURL() + + // It should return valid all trades endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "trades", endpoint) + + tr = TradeRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err = tr.BuildURL() + + // It should return valid account trades endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/trades", endpoint) + + tr = TradeRequest{ForOfferID: "123"} + endpoint, err = tr.BuildURL() + + // It should return valid offer trades endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "offers/123/trades", endpoint) + + tr = TradeRequest{ForLiquidityPool: "123"} + endpoint, err = tr.BuildURL() + + // It should return valid liquidity pool trades endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "liquidity_pools/123/trades", endpoint) + + tr = TradeRequest{Cursor: "123"} + endpoint, err = tr.BuildURL() + + // It should return valid trades endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "trades?cursor=123", endpoint) + + // It should return valid trades endpoint and no errors for trade_type + tr = TradeRequest{TradeType: "orderbook"} + endpoint, err = tr.BuildURL() + require.NoError(t, err) + assert.Equal(t, "trades?trade_type=orderbook", endpoint) + + tr = TradeRequest{ForOfferID: "123", ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + _, err = tr.BuildURL() + + // error case: too many parameters for building any operation endpoint + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too many parameters") + } + + tr = TradeRequest{Cursor: "123456", Limit: 30, Order: OrderAsc} + endpoint, err = tr.BuildURL() + // It should return valid all trades endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "trades?cursor=123456&limit=30&order=asc", endpoint) + +} + +func TestTradesRequest(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + tradeRequest := TradeRequest{} + + // all trades + hmock.On( + "GET", + "https://localhost/trades", + ).ReturnString(200, tradesResponse) + + trades, err := client.Trades(tradeRequest) + if assert.NoError(t, err) { + assert.IsType(t, trades, hProtocol.TradesPage{}) + links := trades.Links + assert.Equal(t, links.Self.Href, "https://horizon-testnet.stellar.org/trades?cursor=&limit=2&order=desc") + + assert.Equal(t, links.Next.Href, "https://horizon-testnet.stellar.org/trades?cursor=2099298409914407-0&limit=2&order=desc") + + assert.Equal(t, links.Prev.Href, "https://horizon-testnet.stellar.org/trades?cursor=2099319884746791-0&limit=2&order=asc") + + trade := trades.Embedded.Records[0] + assert.IsType(t, trade, hProtocol.Trade{}) + assert.Equal(t, trade.ID, "2099319884746791-0") + assert.Equal(t, trade.BaseAmount, "2.4104452") + assert.Equal(t, trade.CounterAmount, "0.0973412") + assert.Equal(t, trade.OfferID, "3698823") + assert.Equal(t, trade.BaseIsSeller, false) + } + + tradeRequest = TradeRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + hmock.On( + "GET", + "https://localhost/accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/trades", + ).ReturnString(200, tradesResponse) + + trades, err = client.Trades(tradeRequest) + if assert.NoError(t, err) { + assert.IsType(t, trades, hProtocol.TradesPage{}) + } + + // too many parameters + tradeRequest = TradeRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU", ForOfferID: "123"} + hmock.On( + "GET", + "https://localhost/trades", + ).ReturnString(200, "") + + _, err = client.Trades(tradeRequest) + // error case + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "too many parameters") + } +} + +func TestNextTradesPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + tradeRequest := TradeRequest{ForAccount: "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", Limit: 2} + + hmock.On( + "GET", + "https://localhost/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?limit=2", + ).ReturnString(200, firstTradesPage) + + trades, err := client.Trades(tradeRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(trades.Embedded.Records), 2) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=45122926424065-0&limit=2&order=asc", + ).ReturnString(200, emptyTradesPage) + + nextPage, err := client.NextTradesPage(trades) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +func TestTradeRequestStreamTrades(t *testing.T) { + + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // all trades + trRequest := TradeRequest{} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/trades?cursor=now", + ).ReturnString(200, tradeStreamResponse) + + trades := make([]hProtocol.Trade, 1) + err := client.StreamTrades(ctx, trRequest, func(tr hProtocol.Trade) { + trades[0] = tr + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, trades[0].ID, "76909979385857-0") + assert.Equal(t, trades[0].OfferID, "494") + } + + // trades for accounts + trRequest = TradeRequest{ForAccount: "GCRHQBHX7JNBZE4HHPLNOAAYDRDVAGBJKJ4KPGHIID3CBGVALXBD6TVQ"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GCRHQBHX7JNBZE4HHPLNOAAYDRDVAGBJKJ4KPGHIID3CBGVALXBD6TVQ/trades?cursor=now", + ).ReturnString(200, tradeStreamResponse) + + trades = make([]hProtocol.Trade, 1) + err = client.StreamTrades(ctx, trRequest, func(tr hProtocol.Trade) { + trades[0] = tr + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, trades[0].ID, "76909979385857-0") + assert.Equal(t, trades[0].OfferID, "494") + } + + // trades for offers + trRequest = TradeRequest{ForOfferID: "494"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/offers/494/trades?cursor=now", + ).ReturnString(200, tradeStreamResponse) + + trades = make([]hProtocol.Trade, 1) + err = client.StreamTrades(ctx, trRequest, func(tr hProtocol.Trade) { + trades[0] = tr + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, trades[0].ID, "76909979385857-0") + assert.Equal(t, trades[0].OfferID, "494") + } + + // test error + trRequest = TradeRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/trades?cursor=now", + ).ReturnString(500, tradeStreamResponse) + + trades = make([]hProtocol.Trade, 1) + err = client.StreamTrades(ctx, trRequest, func(tr hProtocol.Trade) { + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +var tradesResponse = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=2099298409914407-0&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=2099319884746791-0&limit=2&order=asc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/2099319884746791" + } + }, + "id": "2099319884746791-0", + "paging_token": "2099319884746791-0", + "ledger_close_time": "2019-03-28T10:45:28Z", + "offer_id": "3698823", + "base_offer_id": "4613785338312134695", + "base_account": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "base_amount": "2.4104452", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "DSQ", + "base_asset_issuer": "GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF", + "counter_offer_id": "3698823", + "counter_account": "GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC", + "counter_amount": "0.0973412", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "USD", + "counter_asset_issuer": "GAA4MFNZGUPJAVLWWG6G5XZJFZDHLKQNG3Q6KB24BAD6JHNNVXDCF4XG", + "base_is_seller": false, + "price": { + "n": 2000000, + "d": 49525693 + } + }, + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/2099298409914407" + } + }, + "id": "2099298409914407-0", + "paging_token": "2099298409914407-0", + "ledger_close_time": "2019-03-28T10:45:02Z", + "offer_id": "3698823", + "base_offer_id": "4613785316837302311", + "base_account": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "base_amount": "89.3535843", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "DSQ", + "base_asset_issuer": "GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF", + "counter_offer_id": "3698823", + "counter_account": "GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC", + "counter_amount": "3.6083729", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "USD", + "counter_asset_issuer": "GAA4MFNZGUPJAVLWWG6G5XZJFZDHLKQNG3Q6KB24BAD6JHNNVXDCF4XG", + "base_is_seller": false, + "price": { + "n": 2000000, + "d": 49525693 + } + } + ] + } +}` + +var tradeStreamResponse = `data: {"_links":{"self":{"href":""},"base":{"href":"https://horizon-testnet.stellar.org/accounts/GCRHQBHX7JNBZE4HHPLNOAAYDRDVAGBJKJ4KPGHIID3CBGVALXBD6TVQ"},"counter":{"href":"https://horizon-testnet.stellar.org/accounts/GAEETTPUI5CO3CSYXXM5CRX4FHLDWJ3KD6XRRJ3GJISWQSCYF5ALN6JC"},"operation":{"href":"https://horizon-testnet.stellar.org/operations/76909979385857"}},"id":"76909979385857-0","paging_token":"76909979385857-0","ledger_close_time":"2019-02-28T11:29:40Z","offer_id":"494","base_offer_id":"4611762928406773761","base_account":"GCRHQBHX7JNBZE4HHPLNOAAYDRDVAGBJKJ4KPGHIID3CBGVALXBD6TVQ","base_amount":"0.0000001","base_asset_type":"native","counter_offer_id":"494","counter_account":"GAEETTPUI5CO3CSYXXM5CRX4FHLDWJ3KD6XRRJ3GJISWQSCYF5ALN6JC","counter_amount":"0.0001000","counter_asset_type":"credit_alphanum4","counter_asset_code":"WTF","counter_asset_issuer":"GAQZKAGUAHCN4OHAMQVQ3PNA5DUHCQ3CEVOSOTPUAXHG3UHTRSSUFHUL","base_is_seller":false,"price":{"n":1000,"d":1}} +` + +var firstTradesPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=45122926424065-0&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=45097156620289-0&limit=2&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GBH77NK3ZP7RT52YZWGIU5Y6VTIJ52VXUSXDMQ7Z7VAAQO4U4QGGIROV" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/45097156620289" + } + }, + "id": "45097156620289-0", + "paging_token": "45097156620289-0", + "ledger_close_time": "2019-04-25T02:29:20Z", + "offer_id": "1219", + "base_offer_id": "928", + "base_account": "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", + "base_amount": "2.7922715", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "HT", + "base_asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P", + "counter_offer_id": "1219", + "counter_account": "GBH77NK3ZP7RT52YZWGIU5Y6VTIJ52VXUSXDMQ7Z7VAAQO4U4QGGIROV", + "counter_amount": "0.0012000", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "BTC", + "counter_asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P", + "base_is_seller": false, + "price": { + "n": 383, + "d": 891200 + } + }, + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GBH77NK3ZP7RT52YZWGIU5Y6VTIJ52VXUSXDMQ7Z7VAAQO4U4QGGIROV" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/45122926424065" + } + }, + "id": "45122926424065-0", + "paging_token": "45122926424065-0", + "ledger_close_time": "2019-04-25T02:29:49Z", + "offer_id": "928", + "base_offer_id": "928", + "base_account": "GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG", + "base_amount": "2.7956854", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "HT", + "base_asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P", + "counter_offer_id": "4611731141353811969", + "counter_account": "GBH77NK3ZP7RT52YZWGIU5Y6VTIJ52VXUSXDMQ7Z7VAAQO4U4QGGIROV", + "counter_amount": "0.0012000", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "BTC", + "counter_asset_issuer": "GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P", + "base_is_seller": true, + "price": { + "n": 7973, + "d": 18575000 + } + } + ] + } +}` + +var emptyTradesPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=45122926424065-0&limit=2&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=59889023983617-0&limit=2&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBZ5OD56VRTRQKMNADD6VUZUG3FCILMAMYQY5ZSC3AW3GBXNEPIK76IG/trades?cursor=45810121191425-0&limit=2&order=desc" + } + }, + "_embedded": { + "records": [] + } +}` diff --git a/clients/horizonclient/transaction_request.go b/clients/horizonclient/transaction_request.go new file mode 100644 index 0000000000..58f25458bd --- /dev/null +++ b/clients/horizonclient/transaction_request.go @@ -0,0 +1,87 @@ +package horizonclient + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" +) + +// BuildURL creates the endpoint to be queried based on the data in the TransactionRequest struct. +// If no data is set, it defaults to the build the URL for all transactions +func (tr TransactionRequest) BuildURL() (endpoint string, err error) { + nParams := countParams(tr.ForAccount, tr.ForLedger, tr.ForLiquidityPool, tr.forTransactionHash) + + if nParams > 1 { + return endpoint, errors.New("invalid request: too many parameters") + } + + endpoint = "transactions" + if tr.ForAccount != "" { + endpoint = fmt.Sprintf("accounts/%s/transactions", tr.ForAccount) + } + if tr.ForClaimableBalance != "" { + endpoint = fmt.Sprintf("claimable_balances/%s/transactions", tr.ForClaimableBalance) + } + if tr.ForLedger > 0 { + endpoint = fmt.Sprintf("ledgers/%d/transactions", tr.ForLedger) + } + if tr.ForLiquidityPool != "" { + endpoint = fmt.Sprintf("liquidity_pools/%s/transactions", tr.ForLiquidityPool) + } + if tr.forTransactionHash != "" { + endpoint = fmt.Sprintf("transactions/%s", tr.forTransactionHash) + } + + queryParams := addQueryParams(cursor(tr.Cursor), limit(tr.Limit), tr.Order, + includeFailed(tr.IncludeFailed)) + if queryParams != "" { + endpoint = fmt.Sprintf("%s?%s", endpoint, queryParams) + } + + _, err = url.Parse(endpoint) + if err != nil { + err = errors.Wrap(err, "failed to parse endpoint") + } + + return endpoint, err +} + +// HTTPRequest returns the http request for the transactions endpoint +func (tr TransactionRequest) HTTPRequest(horizonURL string) (*http.Request, error) { + endpoint, err := tr.BuildURL() + if err != nil { + return nil, err + } + + return http.NewRequest("GET", horizonURL+endpoint, nil) +} + +// TransactionHandler is a function that is called when a new transaction is received +type TransactionHandler func(hProtocol.Transaction) + +// StreamTransactions streams executed transactions. It can be used to stream all transactions and transactions for an account. Use context.WithCancel to stop streaming or context.Background() if you want +// to stream indefinitely. TransactionHandler is a user-supplied function that is executed for each streamed transaction received. +func (tr TransactionRequest) StreamTransactions(ctx context.Context, client *Client, + handler TransactionHandler) (err error) { + endpoint, err := tr.BuildURL() + if err != nil { + return errors.Wrap(err, "unable to build endpoint") + } + + url := fmt.Sprintf("%s%s", client.fixHorizonURL(), endpoint) + + return client.stream(ctx, url, func(data []byte) error { + var transaction hProtocol.Transaction + err = json.Unmarshal(data, &transaction) + if err != nil { + return errors.Wrap(err, "error unmarshaling data") + } + handler(transaction) + return nil + }) +} diff --git a/clients/horizonclient/transaction_request_test.go b/clients/horizonclient/transaction_request_test.go new file mode 100644 index 0000000000..8d3c6c8cdc --- /dev/null +++ b/clients/horizonclient/transaction_request_test.go @@ -0,0 +1,297 @@ +package horizonclient + +import ( + "context" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTransactionRequestBuildUrl(t *testing.T) { + tr := TransactionRequest{} + endpoint, err := tr.BuildURL() + + // It should return valid all transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "transactions", endpoint) + + tr = TransactionRequest{ForAccount: "GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU"} + endpoint, err = tr.BuildURL() + + // It should return valid account transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "accounts/GCLWGQPMKXQSPF776IU33AH4PZNOOWNAWGGKVTBQMIC5IMKUNP3E6NVU/transactions", endpoint) + + tr = TransactionRequest{ForClaimableBalance: "00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9"} + endpoint, err = tr.BuildURL() + + // It should return valid account transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "claimable_balances/00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9/transactions", endpoint) + + tr = TransactionRequest{ForLedger: 123} + endpoint, err = tr.BuildURL() + + // It should return valid ledger transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "ledgers/123/transactions", endpoint) + + tr = TransactionRequest{ForLiquidityPool: "123"} + endpoint, err = tr.BuildURL() + + // It should return valid liquidity pool transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "liquidity_pools/123/transactions", endpoint) + + tr = TransactionRequest{forTransactionHash: "123"} + endpoint, err = tr.BuildURL() + + // It should return valid operation transactions endpoint and no errors + require.NoError(t, err) + assert.Equal(t, "transactions/123", endpoint) + + tr = TransactionRequest{ForLedger: 123, forTransactionHash: "789"} + _, err = tr.BuildURL() + + // error case: too many parameters for building any operation endpoint + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "invalid request: too many parameters") + } + + tr = TransactionRequest{Cursor: "123456", Limit: 30, Order: OrderAsc, IncludeFailed: true} + endpoint, err = tr.BuildURL() + // It should return valid all transactions endpoint with query params and no errors + require.NoError(t, err) + assert.Equal(t, "transactions?cursor=123456&include_failed=true&limit=30&order=asc", endpoint) + +} + +func TestNextTransactionsPage(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + transactionRequest := TransactionRequest{Limit: 2} + + hmock.On( + "GET", + "https://localhost/transactions?limit=2", + ).ReturnString(200, firstTransactionsPage) + + transactions, err := client.Transactions(transactionRequest) + + if assert.NoError(t, err) { + assert.Equal(t, len(transactions.Embedded.Records), 2) + } + + hmock.On( + "GET", + "https://horizon-testnet.stellar.org/transactions?cursor=1566052450312192&limit=2&order=desc", + ).ReturnString(200, emptyTransactionsPage) + + nextPage, err := client.NextTransactionsPage(transactions) + if assert.NoError(t, err) { + assert.Equal(t, len(nextPage.Embedded.Records), 0) + } +} + +func TestTransactionRequestStreamTransactions(t *testing.T) { + hmock := httptest.NewClient() + client := &Client{ + HorizonURL: "https://localhost/", + HTTP: hmock, + } + + // all transactions + trRequest := TransactionRequest{} + ctx, cancel := context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/transactions?cursor=now", + ).ReturnString(200, txStreamResponse) + + transactions := make([]hProtocol.Transaction, 1) + err := client.StreamTransactions(ctx, trRequest, func(tr hProtocol.Transaction) { + transactions[0] = tr + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, transactions[0].Hash, "1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e") + assert.Equal(t, transactions[0].Account, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR") + } + + // transactions for accounts + trRequest = TransactionRequest{ForAccount: "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR"} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR/transactions?cursor=now", + ).ReturnString(200, txStreamResponse) + + transactions = make([]hProtocol.Transaction, 1) + err = client.StreamTransactions(ctx, trRequest, func(tr hProtocol.Transaction) { + transactions[0] = tr + cancel() + }) + + if assert.NoError(t, err) { + assert.Equal(t, transactions[0].Hash, "1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e") + assert.Equal(t, transactions[0].Account, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR") + } + + // test error + trRequest = TransactionRequest{} + ctx, cancel = context.WithCancel(context.Background()) + + hmock.On( + "GET", + "https://localhost/transactions?cursor=now", + ).ReturnString(500, txStreamResponse) + + transactions = make([]hProtocol.Transaction, 1) + err = client.StreamTransactions(ctx, trRequest, func(tr hProtocol.Transaction) { + cancel() + }) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "got bad HTTP status code 500") + } +} + +var txStreamResponse = `data: {"_links":{"self":{"href":"https://horizon-testnet.stellar.org/transactions/1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e"},"account":{"href":"https://horizon-testnet.stellar.org/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR"},"ledger":{"href":"https://horizon-testnet.stellar.org/ledgers/607387"},"operations":{"href":"https://horizon-testnet.stellar.org/transactions/1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e/operations{?cursor,limit,order}","templated":true},"effects":{"href":"https://horizon-testnet.stellar.org/transactions/1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e/effects{?cursor,limit,order}","templated":true},"precedes":{"href":"https://horizon-testnet.stellar.org/transactions?order=asc\u0026cursor=2608707301036032"},"succeeds":{"href":"https://horizon-testnet.stellar.org/transactions?order=desc\u0026cursor=2608707301036032"}},"id":"1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e","paging_token":"2608707301036032","successful":true,"hash":"1534f6507420c6871b557cc2fc800c29fb1ed1e012e694993ffe7a39c824056e","ledger":607387,"created_at":"2019-04-04T12:07:03Z","source_account":"GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR","source_account_sequence":"4660039930473","max_fee":100,"fee_charged":100,"operation_count":1,"envelope_xdr":"AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0ABlJpAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAmLuzasXDMqsqgFK4xkbLxJLzmQQzkiCF2SnKPD+b1TsAAAAXSHboAAAAAAAAAAABhlbgnAAAAECqxhXduvtzs65keKuTzMtk76cts2WeVB2pZKYdlxlOb1EIbOpFhYizDSXVfQlAvvg18qV6oNRr7ls4nnEm2YIK","result_xdr":"AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=","result_meta_xdr":"AAAAAQAAAAIAAAADAAlEmwAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBT3aiixBA2AAABD0ABlJoAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAlEmwAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBT3aiixBA2AAABD0ABlJpAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMACUSbAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFPdqKLEEDYAAAEPQAGUmkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACUSbAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFPdotCmVjYAAAEPQAGUmkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAACUSbAAAAAAAAAACYu7NqxcMyqyqAUrjGRsvEkvOZBDOSIIXZKco8P5vVOwAAABdIdugAAAlEmwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==","fee_meta_xdr":"AAAAAgAAAAMACUSaAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFPdqKLEEE8AAAEPQAGUmgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACUSbAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFPdqKLEEDYAAAEPQAGUmgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==","memo_type":"none","signatures":["qsYV3br7c7OuZHirk8zLZO+nLbNlnlQdqWSmHZcZTm9RCGzqRYWIsw0l1X0JQL74NfKleqDUa+5bOJ5xJtmCCg=="]} +` + +var firstTransactionsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1566052450312192&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1566052450316288&limit=2&order=asc" + } + }, + "_embedded": { + "records": [ + { + "memo": "3232096465", + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/a748158973896c2b0a4fc32a2ae1c96954e4a52e3385f942832a1852fce6d775" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GDRZVYB5QI6UFR4NR4RXQ3HR5IH4KL2ECR4IUZXGHOUMPGLN2OGCSAOK" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/364625" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/a748158973896c2b0a4fc32a2ae1c96954e4a52e3385f942832a1852fce6d775/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/a748158973896c2b0a4fc32a2ae1c96954e4a52e3385f942832a1852fce6d775/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1566052450316288" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1566052450316288" + } + }, + "id": "a748158973896c2b0a4fc32a2ae1c96954e4a52e3385f942832a1852fce6d775", + "paging_token": "1566052450316288", + "successful": true, + "hash": "a748158973896c2b0a4fc32a2ae1c96954e4a52e3385f942832a1852fce6d775", + "ledger": 364625, + "created_at": "2019-05-16T10:17:44Z", + "source_account": "GDRZVYB5QI6UFR4NR4RXQ3HR5IH4KL2ECR4IUZXGHOUMPGLN2OGCSAOK", + "source_account_sequence": "1566048155336705", + "max_fee": 100, + "fee_charged":100, + "operation_count": 1, + "envelope_xdr": "AAAAAOOa4D2CPULHjY8jeGzx6g/FL0QUeIpm5juox5lt04wpAAAAZAAFkFAAAAABAAAAAAAAAAEAAAAKMzIzMjA5NjQ2NQAAAAAAAQAAAAEAAAAA45rgPYI9QseNjyN4bPHqD8UvRBR4imbmO6jHmW3TjCkAAAABAAAAAE3j7m7lhZ39noA3ToXWDjJ9QuMmmp/1UaIg0chYzRSlAAAAAAAAAAJMTD+AAAAAAAAAAAFt04wpAAAAQAxFRWcepbQoisfiZ0PG7XhPIBl2ssiD9ymMVpsDyLoHyWXboJLaqibNbiPUHk/KEToTVg7G/JCZ06Mfj0daVAc=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAWQUQAAAAAAAAAA45rgPYI9QseNjyN4bPHqD8UvRBR4imbmO6jHmW3TjCkAAAAXSHbnnAAFkFAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWQUQAAAAAAAAAA45rgPYI9QseNjyN4bPHqD8UvRBR4imbmO6jHmW3TjCkAAAAXSHbnnAAFkFAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMABZBQAAAAAAAAAABN4+5u5YWd/Z6AN06F1g4yfULjJpqf9VGiINHIWM0UpQAAQkzJKzWcAAF79QAAP30AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZBRAAAAAAAAAABN4+5u5YWd/Z6AN06F1g4yfULjJpqf9VGiINHIWM0UpQAAQk8Vd3UcAAF79QAAP30AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABZBRAAAAAAAAAADjmuA9gj1Cx42PI3hs8eoPxS9EFHiKZuY7qMeZbdOMKQAAABdIduecAAWQUAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZBRAAAAAAAAAADjmuA9gj1Cx42PI3hs8eoPxS9EFHiKZuY7qMeZbdOMKQAAABT8KqgcAAWQUAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMABZBQAAAAAAAAAADjmuA9gj1Cx42PI3hs8eoPxS9EFHiKZuY7qMeZbdOMKQAAABdIdugAAAWQUAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZBRAAAAAAAAAADjmuA9gj1Cx42PI3hs8eoPxS9EFHiKZuY7qMeZbdOMKQAAABdIduecAAWQUAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "signatures": [ + "DEVFZx6ltCiKx+JnQ8bteE8gGXayyIP3KYxWmwPIugfJZdugktqqJs1uI9QeT8oROhNWDsb8kJnTox+PR1pUBw==" + ] + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/80af95a8aeb49bd19eeb2c89fbdd18c691fe80d1a0609fd20c8418fdde0ea943" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/364625" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/80af95a8aeb49bd19eeb2c89fbdd18c691fe80d1a0609fd20c8418fdde0ea943/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/80af95a8aeb49bd19eeb2c89fbdd18c691fe80d1a0609fd20c8418fdde0ea943/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=1566052450312192" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=1566052450312192" + } + }, + "id": "80af95a8aeb49bd19eeb2c89fbdd18c691fe80d1a0609fd20c8418fdde0ea943", + "paging_token": "1566052450312192", + "successful": true, + "hash": "80af95a8aeb49bd19eeb2c89fbdd18c691fe80d1a0609fd20c8418fdde0ea943", + "ledger": 364625, + "created_at": "2019-05-16T10:17:44Z", + "source_account": "GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC", + "source_account_sequence": "132761734108361", + "max_fee": 100, + "fee_charged":100, + "operation_count": 50, + "envelope_xdr": "AAAAALDfsR6t6xf/io...", + "result_xdr": "AAAAALDfsR6t6xf/io...", + "fee_meta_xdr": "AAAAAgAAAAMABZBQAAAAAAAAAACw37EeresX/4qPmKCC2iLWK6/QFjp+Yh7R62zZgiexrQAAABdB/2doAAB4vwAAVMgAAACLAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZBRAAAAAAAAAACw37EeresX/4qPmKCC2iLWK6/QFjp+Yh7R62zZgiexrQAAABdB/1PgAAB4vwAAVMgAAACLAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "none", + "signatures": [ + "y3niLNdTDYEmLv9n13RAm58VBy0zTexh5IsbM/ajTDOA00ozphxymabRayRL8xHQZRWFka9kh+zlyLfnIB4JBw==" + ] + } + ] + } +}` + +var emptyTransactionsPage = `{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1566052450312192&limit=2&order=desc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1566048155353088&limit=2&order=desc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions?cursor=1566052450308096&limit=2&order=asc" + } + }, + "_embedded": { + "records": [] + } +}` diff --git a/clients/horizonclient/version.go b/clients/horizonclient/version.go new file mode 100644 index 0000000000..9735a00e56 --- /dev/null +++ b/clients/horizonclient/version.go @@ -0,0 +1,5 @@ +package horizonclient + +// version is the current version of the horizonclient. +// This is updated for every release. +const version = "2.1.0" diff --git a/clients/stellarcore/client.go b/clients/stellarcore/client.go new file mode 100644 index 0000000000..7fdb62515a --- /dev/null +++ b/clients/stellarcore/client.go @@ -0,0 +1,241 @@ +package stellarcore + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + proto "github.com/stellar/go/protocols/stellarcore" + "github.com/stellar/go/support/errors" +) + +// Client represents a client that is capable of communicating with a +// stellar-core server using HTTP +type Client struct { + // HTTP is the client to use when communicating with stellar-core. If nil, + // http.DefaultClient will be used. + HTTP HTTP + + // URL of Stellar Core server to connect. + URL string +} + +// Upgrade upgrades the protocol version running on the stellar core instance +func (c *Client) Upgrade(ctx context.Context, version int) error { + queryParams := url.Values{} + queryParams.Add("mode", "set") + queryParams.Add("upgradetime", "1970-01-01T00:00:00Z") + queryParams.Add("protocolversion", strconv.Itoa(version)) + + req, err := c.simpleGet(ctx, "upgrades", queryParams) + if err != nil { + return errors.Wrap(err, "failed to create request") + } + + hresp, err := c.http().Do(req) + if err != nil { + return errors.Wrap(err, "http request errored") + } + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + return errors.New("http request failed with non-200 status code") + } + + return nil +} + +// Info calls the `info` command on the connected stellar core and returns the +// provided response +func (c *Client) Info(ctx context.Context) (resp *proto.InfoResponse, err error) { + + req, err := c.simpleGet(ctx, "info", nil) + if err != nil { + err = errors.Wrap(err, "failed to create request") + return + } + + hresp, err := c.http().Do(req) + if err != nil { + err = errors.Wrap(err, "http request errored") + return + } + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + err = errors.New("http request failed with non-200 status code") + return + } + + err = json.NewDecoder(hresp.Body).Decode(&resp) + + if err != nil { + err = errors.Wrap(err, "json decode failed") + return + } + + return +} + +// SetCursor calls the `setcursor` command on the connected stellar core +func (c *Client) SetCursor(ctx context.Context, id string, cursor int32) error { + req, err := c.simpleGet(ctx, "setcursor", url.Values{ + "id": []string{id}, + "cursor": []string{fmt.Sprintf("%d", cursor)}, + }) + + if err != nil { + return errors.Wrap(err, "failed to create request") + } + + hresp, err := c.http().Do(req) + if err != nil { + return errors.Wrap(err, "http request errored") + } + defer hresp.Body.Close() + + raw, err := ioutil.ReadAll(hresp.Body) + if err != nil { + return err + } + + body := strings.TrimSpace(string(raw)) + if body != SetCursorDone { + return errors.Errorf("failed to set cursor on stellar-core: %s", body) + } + + return nil +} + +// SubmitTransaction calls the `tx` command on the connected stellar core with the provided envelope +func (c *Client) SubmitTransaction(ctx context.Context, envelope string) (resp *proto.TXResponse, err error) { + + q := url.Values{} + q.Set("blob", envelope) + + req, err := c.simpleGet(ctx, "tx", q) + if err != nil { + err = errors.Wrap(err, "failed to create request") + return + } + + hresp, err := c.http().Do(req) + if err != nil { + err = errors.Wrap(err, "http request errored") + return + } + defer hresp.Body.Close() + + err = json.NewDecoder(hresp.Body).Decode(&resp) + + if err != nil { + err = errors.Wrap(err, "json decode failed") + return + } + + return +} + +// WaitForNetworkSync continually polls the connected stellar-core until it +// receives a response that indicated the node has synced with the network +func (c *Client) WaitForNetworkSync(ctx context.Context) error { + + for { + info, err := c.Info(ctx) + + if err != nil { + return errors.Wrap(err, "info request failed") + } + + if info.IsSynced() { + return nil + } + + // wait for next attempt or error if canceled while waiting + select { + case <-ctx.Done(): + return errors.New("canceled") + case <-time.After(5 * time.Second): + continue + } + } +} + +// ManualClose closes a ledger when Core is running in `MANUAL_CLOSE` mode +func (c *Client) ManualClose(ctx context.Context) (err error) { + + q := url.Values{} + + req, err := c.simpleGet(ctx, "manualclose", q) + if err != nil { + err = errors.Wrap(err, "failed to create request") + return + } + + hresp, err := c.http().Do(req) + if err != nil { + err = errors.Wrap(err, "http request errored") + return + } + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + err = errors.New("http request failed with non-200 status code") + } + + // verify there wasn't an exception + resp := struct { + Exception string `json:"exception"` + }{} + if decErr := json.NewDecoder(hresp.Body).Decode(&resp); decErr != nil { + return + } + if resp.Exception != "" { + err = fmt.Errorf("exception in response: %s", resp.Exception) + return + } + + return +} + +func (c *Client) http() HTTP { + if c.HTTP == nil { + return http.DefaultClient + } + + return c.HTTP +} + +// simpleGet returns a new GET request to the connected stellar-core using the +// provided path and query values to construct the result. +func (c *Client) simpleGet( + ctx context.Context, + newPath string, + query url.Values, +) (*http.Request, error) { + + u, err := url.Parse(c.URL) + if err != nil { + return nil, errors.Wrap(err, "unparseable url") + } + + u.Path = path.Join(u.Path, newPath) + if query != nil { + u.RawQuery = query.Encode() + } + newURL := u.String() + + req, err := http.NewRequest(http.MethodGet, newURL, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to create request") + } + + return req.WithContext(ctx), nil +} diff --git a/clients/stellarcore/client_test.go b/clients/stellarcore/client_test.go new file mode 100644 index 0000000000..90f4c1cc55 --- /dev/null +++ b/clients/stellarcore/client_test.go @@ -0,0 +1,56 @@ +package stellarcore + +import ( + "context" + "net/http" + "testing" + + proto "github.com/stellar/go/protocols/stellarcore" + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" +) + +func TestSubmitTransaction(t *testing.T) { + hmock := httptest.NewClient() + c := &Client{HTTP: hmock, URL: "http://localhost:11626"} + + // happy path - new transaction + hmock.On("GET", "http://localhost:11626/tx?blob=foo"). + ReturnJSON(http.StatusOK, proto.TXResponse{ + Status: proto.TXStatusPending, + }) + + resp, err := c.SubmitTransaction(context.Background(), "foo") + + if assert.NoError(t, err) { + assert.Equal(t, proto.TXStatusPending, resp.Status) + } +} + +func TestManualClose(t *testing.T) { + hmock := httptest.NewClient() + c := &Client{HTTP: hmock, URL: "http://localhost:11626"} + + // happy path - new transaction + hmock.On("GET", "http://localhost:11626/manualclose"). + ReturnString(http.StatusOK, "Manually triggered a ledger close with sequence number 7") + + err := c.ManualClose(context.Background()) + + assert.NoError(t, err) +} + +func TestManualClose_NotAvailable(t *testing.T) { + hmock := httptest.NewClient() + c := &Client{HTTP: hmock, URL: "http://localhost:11626"} + + // happy path - new transaction + hmock.On("GET", "http://localhost:11626/manualclose"). + ReturnString(http.StatusOK, + `{"exception": "Set MANUAL_CLOSE=true"}`, + ) + + err := c.ManualClose(context.Background()) + + assert.EqualError(t, err, "exception in response: Set MANUAL_CLOSE=true") +} diff --git a/clients/stellarcore/main.go b/clients/stellarcore/main.go new file mode 100644 index 0000000000..c7dd97b00c --- /dev/null +++ b/clients/stellarcore/main.go @@ -0,0 +1,18 @@ +// Package stellarcore is a client library for communicating with an +// instance of stellar-core using through the server's HTTP port. +package stellarcore + +import "net/http" + +// SetCursorDone is the success message returned by stellar-core when a cursor +// update succeeds. +const SetCursorDone = "Done" + +// HTTP represents the http client that a stellarcore client uses to make http +// requests. +type HTTP interface { + Do(req *http.Request) (*http.Response, error) +} + +// confirm interface conformity +var _ HTTP = http.DefaultClient diff --git a/clients/stellarcore/main_test.go b/clients/stellarcore/main_test.go new file mode 100644 index 0000000000..110d0d5cec --- /dev/null +++ b/clients/stellarcore/main_test.go @@ -0,0 +1,18 @@ +package stellarcore + +import ( + "context" + "fmt" +) + +func ExampleClient_Info() { + client := &Client{URL: "http://localhost:11626"} + + info, err := client.Info(context.Background()) + + if err != nil { + panic(err) + } + + fmt.Printf("synced: %v", info.IsSynced()) +} diff --git a/clients/stellartoml/client.go b/clients/stellartoml/client.go new file mode 100644 index 0000000000..373f3d36f0 --- /dev/null +++ b/clients/stellartoml/client.go @@ -0,0 +1,70 @@ +package stellartoml + +import ( + "fmt" + "io" + "net/http" + + "github.com/BurntSushi/toml" + "github.com/stellar/go/address" + "github.com/stellar/go/support/errors" +) + +// GetStellarToml returns stellar.toml file for a given domain +func (c *Client) GetStellarToml(domain string) (resp *Response, err error) { + var hresp *http.Response + hresp, err = c.HTTP.Get(c.url(domain)) + if err != nil { + err = errors.Wrap(err, "http request errored") + return + } + defer hresp.Body.Close() + + if !(hresp.StatusCode >= 200 && hresp.StatusCode < 300) { + err = errors.New("http request failed with non-200 status code") + return + } + + limitReader := io.LimitReader(hresp.Body, StellarTomlMaxSize) + _, err = toml.DecodeReader(limitReader, &resp) + + // There is one corner case not handled here: response is exactly + // StellarTomlMaxSize long and is incorrect toml. Check discussion: + // https://github.com/stellar/go/pull/24#discussion_r89909696 + if err != nil && limitReader.(*io.LimitedReader).N == 0 { + err = errors.Errorf("stellar.toml response exceeds %d bytes limit", StellarTomlMaxSize) + return + } + + if err != nil { + err = errors.Wrap(err, "toml decode failed") + return + } + + return +} + +// GetStellarTomlByAddress returns stellar.toml file of a domain fetched from a +// given address +func (c *Client) GetStellarTomlByAddress(addr string) (*Response, error) { + _, domain, err := address.Split(addr) + if err != nil { + return nil, errors.Wrap(err, "parse address failed") + } + + return c.GetStellarToml(domain) +} + +// url returns the appropriate url to load for resolving domain's stellar.toml +// file +func (c *Client) url(domain string) string { + var scheme string + + if c.UseHTTP { + scheme = "http" + } else { + scheme = "https" + } + + return fmt.Sprintf("%s://%s%s", scheme, domain, WellKnownPath) +} diff --git a/clients/stellartoml/client_test.go b/clients/stellartoml/client_test.go new file mode 100644 index 0000000000..b93c1e44a0 --- /dev/null +++ b/clients/stellartoml/client_test.go @@ -0,0 +1,66 @@ +package stellartoml + +import ( + "strings" + "testing" + + "net/http" + + "github.com/stellar/go/support/http/httptest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestClientURL(t *testing.T) { + //HACK: we're testing an internal method rather than setting up a http client + //mock. + + c := &Client{UseHTTP: false} + assert.Equal(t, "https://stellar.org/.well-known/stellar.toml", c.url("stellar.org")) + + c = &Client{UseHTTP: true} + assert.Equal(t, "http://stellar.org/.well-known/stellar.toml", c.url("stellar.org")) +} + +func TestClient(t *testing.T) { + h := httptest.NewClient() + c := &Client{HTTP: h} + + // happy path + h. + On("GET", "https://stellar.org/.well-known/stellar.toml"). + ReturnString(http.StatusOK, + `FEDERATION_SERVER="https://localhost/federation"`, + ) + stoml, err := c.GetStellarToml("stellar.org") + require.NoError(t, err) + assert.Equal(t, "https://localhost/federation", stoml.FederationServer) + + // stellar.toml exceeds limit + h. + On("GET", "https://toobig.org/.well-known/stellar.toml"). + ReturnString(http.StatusOK, + `FEDERATION_SERVER="https://localhost/federation`+strings.Repeat("0", StellarTomlMaxSize)+`"`, + ) + stoml, err = c.GetStellarToml("toobig.org") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "stellar.toml response exceeds") + } + + // not found + h. + On("GET", "https://missing.org/.well-known/stellar.toml"). + ReturnNotFound() + stoml, err = c.GetStellarToml("missing.org") + assert.EqualError(t, err, "http request failed with non-200 status code") + + // invalid toml + h. + On("GET", "https://json.org/.well-known/stellar.toml"). + ReturnJSON(http.StatusOK, map[string]string{"hello": "world"}) + stoml, err = c.GetStellarToml("json.org") + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "toml decode failed") + } +} diff --git a/clients/stellartoml/main.go b/clients/stellartoml/main.go new file mode 100644 index 0000000000..e85555d803 --- /dev/null +++ b/clients/stellartoml/main.go @@ -0,0 +1,126 @@ +package stellartoml + +import "net/http" + +// StellarTomlMaxSize is the maximum size of stellar.toml file +const StellarTomlMaxSize = 100 * 1024 + +// WellKnownPath represents the url path at which the stellar.toml file should +// exist to conform to the federation protocol. +const WellKnownPath = "/.well-known/stellar.toml" + +// HTTP represents the http client that a stellertoml resolver uses to make http +// requests. +type HTTP interface { + Get(url string) (*http.Response, error) +} + +// Client represents a client that is capable of resolving a Stellar.toml file +// using the internet. +type Client struct { + // HTTP is the http client used when resolving a Stellar.toml file + HTTP HTTP + + // UseHTTP forces the client to resolve against servers using plain HTTP. + // Useful for debugging. + UseHTTP bool +} + +type ClientInterface interface { + GetStellarToml(domain string) (*Response, error) + GetStellarTomlByAddress(addr string) (*Response, error) +} + +// DefaultClient is a default client using the default parameters +var DefaultClient = &Client{HTTP: http.DefaultClient} + +type Principal struct { + Name string `toml:"name"` + Email string `toml:"email"` + Keybase string `toml:"keybase"` + Telegram string `toml:"telegram"` + Twitter string `toml:"twitter"` + Github string `toml:"github"` + IdPhotoHash string `toml:"id_photo_hash"` + VerificationPhotoHash string `toml:"verification_photo_hash"` +} + +type Currency struct { + Code string `toml:"code"` + CodeTemplate string `toml:"code_template"` + Issuer string `toml:"issuer"` + Status string `toml:"status"` + DisplayDecimals int `toml:"display_decimals"` + Name string `toml:"name"` + Desc string `toml:"desc"` + Conditions string `toml:"conditions"` + Image string `toml:"image"` + FixedNumber int `toml:"fixed_number"` + MaxNumber int `toml:"max_number"` + IsUnlimited bool `toml:"is_unlimited"` + IsAssetAnchored bool `toml:"is_asset_anchored"` + AnchorAsset string `toml:"anchor_asset"` + RedemptionInstructions string `toml:"redemption_instructions"` + CollateralAddresses []string `toml:"collateral_addresses"` + CollateralAddressMessages []string `toml:"collateral_address_messages"` + CollateralAddressSignatures []string `toml:"collateral_address_signatures"` + Regulated string `toml:"regulated"` + ApprovalServer string `toml:"APPROVAL_SERVER"` + ApprovalCriteria string `toml:"APPROVAL_CRITERIA"` +} + +type Validator struct { + Alias string `toml:"ALIAS"` + DisplayName string `toml:"DISPLAY_NAME"` + PublicKey string `toml:"PUBLIC_KEY"` + Host string `toml:"HOST"` + History string `toml:"HISTORY"` +} + +// SEP-1 commit +// https://github.com/stellar/stellar-protocol/blob/f8993e36fa6b5b8bba1254c21c2174d250af4958/ecosystem/sep-0001.md +type Response struct { + Version string `toml:"VERSION"` + NetworkPassphrase string `toml:"NETWORK_PASSPHRASE"` + FederationServer string `toml:"FEDERATION_SERVER"` + AuthServer string `toml:"AUTH_SERVER"` + TransferServer string `toml:"TRANSFER_SERVER"` + TransferServer0024 string `toml:"TRANSFER_SERVER_0024"` + KycServer string `toml:"KYC_SERVER"` + WebAuthEndpoint string `toml:"WEB_AUTH_ENDPOINT"` + SigningKey string `toml:"SIGNING_KEY"` + HorizonUrl string `toml:"HORIZON_URL"` + Accounts []string `toml:"ACCOUNTS"` + UriRequestSigningKey string `toml:"URI_REQUEST_SIGNING_KEY"` + DirectPaymentServer string `toml:"DIRECT_PAYMENT_SERVER"` + OrgName string `toml:"ORG_NAME"` + OrgDba string `toml:"ORG_DBA"` + OrgUrl string `toml:"ORG_URL"` + OrgLogo string `toml:"ORG_LOGO"` + OrgDescription string `toml:"ORG_DESCRIPTION"` + OrgPhysicalAddress string `toml:"ORG_PHYSICAL_ADDRESS"` + OrgPhysicalAddressAttestation string `toml:"ORG_PHYSICAL_ADDRESS_ATTESTATION"` + OrgPhoneNumber string `toml:"ORG_PHONE_NUMBER"` + OrgPhoneNumberAttestation string `toml:"ORG_PHONE_NUMBER_ATTESTATION"` + OrgKeybase string `toml:"ORG_KEYBASE"` + OrgTwitter string `toml:"ORG_TWITTER"` + OrgGithub string `toml:"ORG_GITHUB"` + OrgOfficialEmail string `toml:"ORG_OFFICIAL_EMAIL"` + OrgLicensingAuthority string `toml:"ORG_LICENSING_AUTHORITY"` + OrgLicenseType string `toml:"ORG_LICENSE_TYPE"` + OrgLicenseNumber string `toml:"ORG_LICENSE_NUMBER"` + Principals []Principal `toml:"PRINCIPALS"` + Currencies []Currency `toml:"CURRENCIES"` + Validators []Validator `toml:"VALIDATORS"` +} + +// GetStellarToml returns stellar.toml file for a given domain +func GetStellarToml(domain string) (*Response, error) { + return DefaultClient.GetStellarToml(domain) +} + +// GetStellarTomlByAddress returns stellar.toml file of a domain fetched from a +// given address +func GetStellarTomlByAddress(addr string) (*Response, error) { + return DefaultClient.GetStellarTomlByAddress(addr) +} diff --git a/clients/stellartoml/main_test.go b/clients/stellartoml/main_test.go new file mode 100644 index 0000000000..b5ba21e0d7 --- /dev/null +++ b/clients/stellartoml/main_test.go @@ -0,0 +1,11 @@ +package stellartoml + +import "log" + +// ExampleGetTOML gets the stellar.toml file for coins.asia +func ExampleClient_GetStellarToml() { + _, err := DefaultClient.GetStellarToml("coins.asia") + if err != nil { + log.Fatal(err) + } +} diff --git a/clients/stellartoml/mocks.go b/clients/stellartoml/mocks.go new file mode 100644 index 0000000000..14a132f5b8 --- /dev/null +++ b/clients/stellartoml/mocks.go @@ -0,0 +1,20 @@ +package stellartoml + +import "github.com/stretchr/testify/mock" + +// MockClient is a mockable stellartoml client. +type MockClient struct { + mock.Mock +} + +// GetStellarToml is a mocking a method +func (m *MockClient) GetStellarToml(domain string) (*Response, error) { + a := m.Called(domain) + return a.Get(0).(*Response), a.Error(1) +} + +// GetStellarTomlByAddress is a mocking a method +func (m *MockClient) GetStellarTomlByAddress(address string) (*Response, error) { + a := m.Called(address) + return a.Get(0).(*Response), a.Error(1) +} diff --git a/doc.go b/doc.go deleted file mode 100644 index 72631149bc..0000000000 --- a/doc.go +++ /dev/null @@ -1 +0,0 @@ -package stellargo diff --git a/docs/reference/examples.md b/docs/reference/examples.md new file mode 100644 index 0000000000..c8bb14cfb9 --- /dev/null +++ b/docs/reference/examples.md @@ -0,0 +1,84 @@ +--- +title: Basic Examples +--- + +## Create an account + +The first account on TestNet needs to be created by calling friendbot, a helper service that will create and fund the +provided account address. However, on the public network, you need an initial, funded account before you can create further accounts. Typically a wallet or exchange can create an initial account for you. + +In this TestNet example, we first get an account funded from friendbot, and then demonstrate the `create account` +operation to set up a second account. + +```go +package main + +import ( + "log" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/txnbuild" +) + +func main() { + // Generate a new randomly generated address + pair, err := keypair.Random() + if err != nil { + log.Fatal(err) + } + log.Println("Seed 0:", pair.Seed()) + log.Println("Address 0:", pair.Address()) + + // Create and fund the address on TestNet, using friendbot + client := horizonclient.DefaultTestNetClient + client.Fund(pair.Address()) + + // Get information about the account we just created + accountRequest := horizonclient.AccountRequest{AccountID: pair.Address()} + hAccount0, err := client.AccountDetail(accountRequest) + if err != nil { + log.Fatal(err) + } + + // Generate a second randomly generated address + kp1, err := keypair.Random() + if err != nil { + log.Fatal(err) + } + log.Println("Seed 1:", kp1.Seed()) + log.Println("Address 1:", kp1.Address()) + + // Construct the operation + createAccountOp := txnbuild.CreateAccount{ + Destination: kp1.Address(), + Amount: "10", + } + + // Construct the transaction that will carry the operation + txParams := txnbuild.TransactionParams{ + SourceAccount: &hAccount0, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&createAccountOp}, + Timebounds: txnbuild.NewTimeout(300), + BaseFee: 100, + } + tx, _ := txnbuild.NewTransaction(txParams) + + // Sign the transaction, and base 64 encode its XDR representation + signedTx, _ := tx.Sign(network.TestNetworkPassphrase, pair) + txeBase64, _ := signedTx.Base64() + log.Println("Transaction base64: ", txeBase64) + + // Submit the transaction + resp, err := client.SubmitTransactionXDR(txeBase64) + if err != nil { + hError := err.(*horizonclient.Error) + log.Fatal("Error submitting transaction:", hError.Problem) + } + + log.Println("\nTransaction response: ", resp) +} + +``` diff --git a/docs/reference/readme.md b/docs/reference/readme.md new file mode 100644 index 0000000000..13aef54d7a --- /dev/null +++ b/docs/reference/readme.md @@ -0,0 +1,18 @@ +--- +title: Overview +--- + +The Go SDK is a set of packages for interacting with most aspects of the Stellar ecosystem. The primary component is the Horizon SDK, which provides convenient access to Horizon services. There are also packages for other Stellar services such as [TOML support](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md) and [federation](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0002.md). + +## Horizon SDK + +The Horizon SDK is composed of two complementary libraries: `txnbuild` + `horizonclient`. +The `txnbuild` ([source](https://github.com/stellar/go/tree/master/txnbuild), [docs](https://godoc.org/github.com/stellar/go/txnbuild)) package enables the construction, signing and encoding of Stellar [transactions](https://developers.stellar.org/docs/glossary/transactions/) and [operations](https://developers.stellar.org/docs/start/list-of-operations/) in Go. The `horizonclient` ([source](https://github.com/stellar/go/tree/master/clients/horizonclient), [docs](https://godoc.org/github.com/stellar/go/clients/horizonclient)) package provides a web client for interfacing with [Horizon](https://developers.stellar.org/docs/start/introduction/) server REST endpoints to retrieve ledger information, and to submit transactions built with `txnbuild`. + +## List of major SDK packages + +- `horizonclient` ([source](https://github.com/stellar/go/tree/master/clients/horizonclient), [docs](https://godoc.org/github.com/stellar/go/clients/horizonclient)) - programmatic client access to Horizon +- `txnbuild` ([source](https://github.com/stellar/go/tree/master/txnbuild), [docs](https://godoc.org/github.com/stellar/go/txnbuild)) - construction, signing and encoding of Stellar transactions and operations +- `stellartoml` ([source](https://github.com/stellar/go/tree/master/clients/stellartoml), [docs](https://godoc.org/github.com/stellar/go/clients/stellartoml)) - parse [Stellar.toml](../../guides/concepts/stellar-toml.md) files from the internet +- `federation` ([source](https://godoc.org/github.com/stellar/go/clients/federation)) - resolve federation addresses into stellar account IDs, suitable for use within a transaction + diff --git a/exp/README.me b/exp/README.md similarity index 93% rename from exp/README.me rename to exp/README.md index 191696537c..846c939499 100644 --- a/exp/README.me +++ b/exp/README.md @@ -6,4 +6,4 @@ See [godoc](https://godoc.org/github.com/stellar/go/exp) for details about each ## Adding experimental services, clients, tools, etc. -Just as with the non-experimental portion of this repo, the `exp` package uses a well-defined package structure for a few common activities. An experimental service would go in `github.com/stellar/go/exp/services`, for example. \ No newline at end of file +Just as with the non-experimental portion of this repo, the `exp` package uses a well-defined package structure for a few common activities. An experimental service would go in `github.com/stellar/go/exp/services`, for example. diff --git a/exp/crypto/derivation/doc.go b/exp/crypto/derivation/doc.go new file mode 100644 index 0000000000..481afd1da0 --- /dev/null +++ b/exp/crypto/derivation/doc.go @@ -0,0 +1,3 @@ +// Package derivation provides functions for ed25519 key derivation as described in: +// https://github.com/satoshilabs/slips/blob/master/slip-0010.md +package derivation diff --git a/exp/crypto/derivation/main.go b/exp/crypto/derivation/main.go new file mode 100644 index 0000000000..89d0ea483c --- /dev/null +++ b/exp/crypto/derivation/main.go @@ -0,0 +1,142 @@ +package derivation + +import ( + "bytes" + "crypto/ed25519" + "crypto/hmac" + "crypto/sha512" + "encoding/binary" + "errors" + "regexp" + "strconv" + "strings" +) + +const ( + // StellarAccountPrefix is a prefix for Stellar key pairs derivation. + StellarAccountPrefix = "m/44'/148'" + // StellarPrimaryAccountPath is a derivation path of the primary account. + StellarPrimaryAccountPath = "m/44'/148'/0'" + // StellarAccountPathFormat is a path format used for Stellar key pair + // derivation as described in SEP-00XX. Use with `fmt.Sprintf` and `DeriveForPath`. + StellarAccountPathFormat = "m/44'/148'/%d'" + // FirstHardenedIndex is the index of the first hardened key. + FirstHardenedIndex = uint32(0x80000000) + // As in https://github.com/satoshilabs/slips/blob/master/slip-0010.md + seedModifier = "ed25519 seed" +) + +var ( + ErrInvalidPath = errors.New("Invalid derivation path") + ErrNoPublicDerivation = errors.New("No public derivation for ed25519") + + pathRegex = regexp.MustCompile(`^m(\/[0-9]+')+$`) +) + +type Key struct { + Key []byte + ChainCode []byte +} + +// DeriveForPath derives key for a path in BIP-44 format and a seed. +// Ed25119 derivation operated on hardened keys only. +func DeriveForPath(path string, seed []byte) (*Key, error) { + if !isValidPath(path) { + return nil, ErrInvalidPath + } + + key, err := NewMasterKey(seed) + if err != nil { + return nil, err + } + + segments := strings.Split(path, "/") + for _, segment := range segments[1:] { + i64, err := strconv.ParseUint(strings.TrimRight(segment, "'"), 10, 32) + if err != nil { + return nil, err + } + + // We operate on hardened keys + i := uint32(i64) + FirstHardenedIndex + key, err = key.Derive(i) + if err != nil { + return nil, err + } + } + + return key, nil +} + +// NewMasterKey generates a new master key from seed. +func NewMasterKey(seed []byte) (*Key, error) { + hmac := hmac.New(sha512.New, []byte(seedModifier)) + _, err := hmac.Write(seed) + if err != nil { + return nil, err + } + sum := hmac.Sum(nil) + key := &Key{ + Key: sum[:32], + ChainCode: sum[32:], + } + return key, nil +} + +func (k *Key) Derive(i uint32) (*Key, error) { + // no public derivation for ed25519 + if i < FirstHardenedIndex { + return nil, ErrNoPublicDerivation + } + + iBytes := make([]byte, 4) + binary.BigEndian.PutUint32(iBytes, i) + key := append([]byte{0x0}, k.Key...) + data := append(key, iBytes...) + + hmac := hmac.New(sha512.New, k.ChainCode) + _, err := hmac.Write(data) + if err != nil { + return nil, err + } + sum := hmac.Sum(nil) + newKey := &Key{ + Key: sum[:32], + ChainCode: sum[32:], + } + return newKey, nil +} + +// PublicKey returns public key for a derived private key. +func (k *Key) PublicKey() ([]byte, error) { + reader := bytes.NewReader(k.Key) + pub, _, err := ed25519.GenerateKey(reader) + if err != nil { + return nil, err + } + return pub[:], nil +} + +// RawSeed returns raw seed bytes +func (k *Key) RawSeed() [32]byte { + var rawSeed [32]byte + copy(rawSeed[:], k.Key[:]) + return rawSeed +} + +func isValidPath(path string) bool { + if !pathRegex.MatchString(path) { + return false + } + + // Check for overflows + segments := strings.Split(path, "/") + for _, segment := range segments[1:] { + _, err := strconv.ParseUint(strings.TrimRight(segment, "'"), 10, 32) + if err != nil { + return false + } + } + + return true +} diff --git a/exp/crypto/derivation/main_test.go b/exp/crypto/derivation/main_test.go new file mode 100644 index 0000000000..5dd2a5b558 --- /dev/null +++ b/exp/crypto/derivation/main_test.go @@ -0,0 +1,258 @@ +package derivation + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stretchr/testify/assert" +) + +func ExampleDeriveFromPath() { + seed, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + key, err := DeriveForPath(StellarPrimaryAccountPath, seed) + if err != nil { + panic(err) + } + + kp, err := keypair.FromRawSeed(key.RawSeed()) + if err != nil { + panic(err) + } + + fmt.Println(kp.Seed()) + fmt.Println(kp.Address()) + + // Output: + // SB6VZS57IY25334Y6F6SPGFUNESWS7D2OSJHKDPIZ354BK3FN5GBTS6V + // GCWSJRG6YZSA374IY7LF53PIGTO6JD6BP5CNMUAVNWL3YYE636F3APML +} + +func ExampleDeriveMultipleKeys() { + seed, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + + for i := 0; i < 10; i++ { + path := fmt.Sprintf(StellarAccountPathFormat, i) + key, err := DeriveForPath(path, seed) + if err != nil { + panic(err) + } + + kp, err := keypair.FromRawSeed(key.RawSeed()) + if err != nil { + panic(err) + } + + fmt.Println(path, kp.Seed(), kp.Address()) + } + + // Output: + // m/44'/148'/0' SB6VZS57IY25334Y6F6SPGFUNESWS7D2OSJHKDPIZ354BK3FN5GBTS6V GCWSJRG6YZSA374IY7LF53PIGTO6JD6BP5CNMUAVNWL3YYE636F3APML + // m/44'/148'/1' SBQXELSCK4ES2WYYDS6664VIK6XCYKUNC3HE77MYNCEXFJ2XOC3NIMK2 GDGYXMH2GBB6E4Z4ZW4APZ7JQTEBNGDAVOWBYEQVSAHA27HXYPHLY5GO + // m/44'/148'/2' SBUTA7E22ZKLKLJCAR2XZLR5G3KK7QZX2JEUPLRXDTK5SNERHOMXAAY5 GBUKOZ5272DZQR5CT5H5OCA4FTSRYXO6N56VHLX3BR4QQKIGGMVL6JJV + // m/44'/148'/3' SASF5BSLMHFHFEWY4UVPGXIILDCCX7DZS33ONG4HPJNBACM77Y7QRSBZ GD6QC2W63E3LNLJZZVK3SN2D6TOYZERNAXUUQ4X4SLAE7P6MH5IH6CVI + // m/44'/148'/4' SATFB32TYAYSVWCJCIXLW4UWP7CJY7QLXD3YHYUF4XTZNJWWK5JRB2DI GDRU2QN7DZ4FD3MAR4UFN2KSAOOSBVU2QA5FHTF2FL62IFYKJGRLVNAR + // m/44'/148'/5' SCA3VR76COFO3QKGPX6XVGGXBIHUQKD4IUTLCDHRZTBNP76V5QKGUF2P GAHWMS7V5R3OR33X32V42JIAHWSCA5JW3XAPCHG3PEBSOPRNMVCN6KL3 + // m/44'/148'/6' SBGHZ2FLCWGXBIZFEZXZFOPOYWEWDFCWIFIQ6SXVYY7QGCQA5HBPDZY7 GC2OPUPPYPV3IE4X2V26FXSD744SZNDYIYAYOXH6S7FPLN2K4PMONLMJ + // m/44'/148'/7' SC4F5CX2D2SWUOV6ZESZRCB4CTKI5LNQJ4F46BVOLOENGEKUN77JMTO7 GAK43JBFVKWFEQDNM2JP46BEEN5F257F5YNJOMLBGCM7E5TBMVQOKATM + // m/44'/148'/8' SBERNO4ZLRNGB54OK4A75Q5MBLIB2J577W2GQXCIUWY2KALTB2XEUIBZ GC4L6437RLPEA5QAN2GH7FYPVLVE7FQSG2DN3UATERKMOULB44J7ABWD + // m/44'/148'/9' SCK6ZQ7F2P44HJ3DGVQA3AQJX7YRYGTKHY3D273AYZMPH3HVE3SB5VLP GDCRJ5F3WRZ47GHPAKLOO3WECAFBU2LRH4YUGIFLAKQTXC3MYC2GVYQU +} + +func ExampleDeriveMultipleKeysFaster() { + seed, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + mainKey, err := DeriveForPath(StellarAccountPrefix, seed) + if err != nil { + panic(err) + } + + for i := uint32(0); i < 10; i++ { + key, err := mainKey.Derive(FirstHardenedIndex + i) + if err != nil { + panic(err) + } + + kp, err := keypair.FromRawSeed(key.RawSeed()) + if err != nil { + panic(err) + } + + fmt.Println(fmt.Sprintf(StellarAccountPathFormat, i), kp.Seed(), kp.Address()) + } + + // Output: + // m/44'/148'/0' SB6VZS57IY25334Y6F6SPGFUNESWS7D2OSJHKDPIZ354BK3FN5GBTS6V GCWSJRG6YZSA374IY7LF53PIGTO6JD6BP5CNMUAVNWL3YYE636F3APML + // m/44'/148'/1' SBQXELSCK4ES2WYYDS6664VIK6XCYKUNC3HE77MYNCEXFJ2XOC3NIMK2 GDGYXMH2GBB6E4Z4ZW4APZ7JQTEBNGDAVOWBYEQVSAHA27HXYPHLY5GO + // m/44'/148'/2' SBUTA7E22ZKLKLJCAR2XZLR5G3KK7QZX2JEUPLRXDTK5SNERHOMXAAY5 GBUKOZ5272DZQR5CT5H5OCA4FTSRYXO6N56VHLX3BR4QQKIGGMVL6JJV + // m/44'/148'/3' SASF5BSLMHFHFEWY4UVPGXIILDCCX7DZS33ONG4HPJNBACM77Y7QRSBZ GD6QC2W63E3LNLJZZVK3SN2D6TOYZERNAXUUQ4X4SLAE7P6MH5IH6CVI + // m/44'/148'/4' SATFB32TYAYSVWCJCIXLW4UWP7CJY7QLXD3YHYUF4XTZNJWWK5JRB2DI GDRU2QN7DZ4FD3MAR4UFN2KSAOOSBVU2QA5FHTF2FL62IFYKJGRLVNAR + // m/44'/148'/5' SCA3VR76COFO3QKGPX6XVGGXBIHUQKD4IUTLCDHRZTBNP76V5QKGUF2P GAHWMS7V5R3OR33X32V42JIAHWSCA5JW3XAPCHG3PEBSOPRNMVCN6KL3 + // m/44'/148'/6' SBGHZ2FLCWGXBIZFEZXZFOPOYWEWDFCWIFIQ6SXVYY7QGCQA5HBPDZY7 GC2OPUPPYPV3IE4X2V26FXSD744SZNDYIYAYOXH6S7FPLN2K4PMONLMJ + // m/44'/148'/7' SC4F5CX2D2SWUOV6ZESZRCB4CTKI5LNQJ4F46BVOLOENGEKUN77JMTO7 GAK43JBFVKWFEQDNM2JP46BEEN5F257F5YNJOMLBGCM7E5TBMVQOKATM + // m/44'/148'/8' SBERNO4ZLRNGB54OK4A75Q5MBLIB2J577W2GQXCIUWY2KALTB2XEUIBZ GC4L6437RLPEA5QAN2GH7FYPVLVE7FQSG2DN3UATERKMOULB44J7ABWD + // m/44'/148'/9' SCK6ZQ7F2P44HJ3DGVQA3AQJX7YRYGTKHY3D273AYZMPH3HVE3SB5VLP GDCRJ5F3WRZ47GHPAKLOO3WECAFBU2LRH4YUGIFLAKQTXC3MYC2GVYQU +} + +func BenchmarkDerive(b *testing.B) { + seed, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + + for i := 0; i < b.N; i++ { + _, err := DeriveForPath(StellarPrimaryAccountPath, seed) + if err != nil { + panic(err) + } + } +} + +func BenchmarkDeriveFast(b *testing.B) { + seed, _ := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + mainKey, err := DeriveForPath(StellarAccountPrefix, seed) + if err != nil { + panic(err) + } + + for i := 0; i < b.N; i++ { + _, err := mainKey.Derive(FirstHardenedIndex) + if err != nil { + panic(err) + } + } +} + +func TestIsValidPath(t *testing.T) { + assert.True(t, isValidPath("m/0'")) + assert.True(t, isValidPath("m/0'/100'")) + assert.True(t, isValidPath("m/0'/100'/200'")) + assert.True(t, isValidPath("m/0'/100'/200'/300'")) + + assert.False(t, isValidPath("foobar")) + assert.False(t, isValidPath("m")) // Master key only + assert.False(t, isValidPath("m/0")) // Missing ' + assert.False(t, isValidPath("m/0'/")) // Trailing slash + assert.False(t, isValidPath("m/0'/893478327492379497823'")) // Overflow +} + +// https://github.com/satoshilabs/slips/blob/master/slip-0010.md#test-vector-1-for-ed25519 +func TestDeriveVector1(t *testing.T) { + seed, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f") + assert.NoError(t, err) + + key, err := NewMasterKey(seed) + assert.NoError(t, err) + assert.Equal(t, "2b4be7f19ee27bbf30c667b642d5f4aa69fd169872f8fc3059c08ebae2eb19e7", hex.EncodeToString(key.Key)) + assert.Equal(t, "90046a93de5380a72b5e45010748567d5ea02bbf6522f979e05c0d8d8ca9fffb", hex.EncodeToString(key.ChainCode)) + publicKey, err := key.PublicKey() + assert.NoError(t, err) + assert.Equal(t, "00a4b2856bfec510abab89753fac1ac0e1112364e7d250545963f135f2a33188ed", hex.EncodeToString(append([]byte{0x0}, publicKey...))) + + tests := []struct { + Path string + ChainCode string + PrivateKey string + PublicKey string + }{ + { + Path: "m/0'", + ChainCode: "8b59aa11380b624e81507a27fedda59fea6d0b779a778918a2fd3590e16e9c69", + PrivateKey: "68e0fe46dfb67e368c75379acec591dad19df3cde26e63b93a8e704f1dade7a3", + PublicKey: "008c8a13df77a28f3445213a0f432fde644acaa215fc72dcdf300d5efaa85d350c", + }, + { + Path: "m/0'/1'", + ChainCode: "a320425f77d1b5c2505a6b1b27382b37368ee640e3557c315416801243552f14", + PrivateKey: "b1d0bad404bf35da785a64ca1ac54b2617211d2777696fbffaf208f746ae84f2", + PublicKey: "001932a5270f335bed617d5b935c80aedb1a35bd9fc1e31acafd5372c30f5c1187", + }, + { + Path: "m/0'/1'/2'", + ChainCode: "2e69929e00b5ab250f49c3fb1c12f252de4fed2c1db88387094a0f8c4c9ccd6c", + PrivateKey: "92a5b23c0b8a99e37d07df3fb9966917f5d06e02ddbd909c7e184371463e9fc9", + PublicKey: "00ae98736566d30ed0e9d2f4486a64bc95740d89c7db33f52121f8ea8f76ff0fc1", + }, + { + Path: "m/0'/1'/2'/2'", + ChainCode: "8f6d87f93d750e0efccda017d662a1b31a266e4a6f5993b15f5c1f07f74dd5cc", + PrivateKey: "30d1dc7e5fc04c31219ab25a27ae00b50f6fd66622f6e9c913253d6511d1e662", + PublicKey: "008abae2d66361c879b900d204ad2cc4984fa2aa344dd7ddc46007329ac76c429c", + }, + { + Path: "m/0'/1'/2'/2'/1000000000'", + ChainCode: "68789923a0cac2cd5a29172a475fe9e0fb14cd6adb5ad98a3fa70333e7afa230", + PrivateKey: "8f94d394a8e8fd6b1bc2f3f49f5c47e385281d5c17e65324b0f62483e37e8793", + PublicKey: "003c24da049451555d51a7014a37337aa4e12d41e485abccfa46b47dfb2af54b7a", + }, + } + + for _, test := range tests { + key, err = DeriveForPath(test.Path, seed) + assert.NoError(t, err) + assert.Equal(t, test.PrivateKey, hex.EncodeToString(key.Key)) + assert.Equal(t, test.ChainCode, hex.EncodeToString(key.ChainCode)) + publicKey, err := key.PublicKey() + assert.NoError(t, err) + assert.Equal(t, test.PublicKey, hex.EncodeToString(append([]byte{0x0}, publicKey...))) + } +} + +// https://github.com/satoshilabs/slips/blob/master/slip-0010.md#test-vector-2-for-ed25519 +func TestDeriveVector2(t *testing.T) { + seed, err := hex.DecodeString("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542") + assert.NoError(t, err) + + key, err := NewMasterKey(seed) + assert.NoError(t, err) + assert.Equal(t, "171cb88b1b3c1db25add599712e36245d75bc65a1a5c9e18d76f9f2b1eab4012", hex.EncodeToString(key.Key)) + assert.Equal(t, "ef70a74db9c3a5af931b5fe73ed8e1a53464133654fd55e7a66f8570b8e33c3b", hex.EncodeToString(key.ChainCode)) + publicKey, err := key.PublicKey() + assert.NoError(t, err) + assert.Equal(t, "008fe9693f8fa62a4305a140b9764c5ee01e455963744fe18204b4fb948249308a", hex.EncodeToString(append([]byte{0x0}, publicKey...))) + + tests := []struct { + Path string + ChainCode string + PrivateKey string + PublicKey string + }{ + { + Path: "m/0'", + ChainCode: "0b78a3226f915c082bf118f83618a618ab6dec793752624cbeb622acb562862d", + PrivateKey: "1559eb2bbec5790b0c65d8693e4d0875b1747f4970ae8b650486ed7470845635", + PublicKey: "0086fab68dcb57aa196c77c5f264f215a112c22a912c10d123b0d03c3c28ef1037", + }, + { + Path: "m/0'/2147483647'", + ChainCode: "138f0b2551bcafeca6ff2aa88ba8ed0ed8de070841f0c4ef0165df8181eaad7f", + PrivateKey: "ea4f5bfe8694d8bb74b7b59404632fd5968b774ed545e810de9c32a4fb4192f4", + PublicKey: "005ba3b9ac6e90e83effcd25ac4e58a1365a9e35a3d3ae5eb07b9e4d90bcf7506d", + }, + { + Path: "m/0'/2147483647'/1'", + ChainCode: "73bd9fff1cfbde33a1b846c27085f711c0fe2d66fd32e139d3ebc28e5a4a6b90", + PrivateKey: "3757c7577170179c7868353ada796c839135b3d30554bbb74a4b1e4a5a58505c", + PublicKey: "002e66aa57069c86cc18249aecf5cb5a9cebbfd6fadeab056254763874a9352b45", + }, + { + Path: "m/0'/2147483647'/1'/2147483646'", + ChainCode: "0902fe8a29f9140480a00ef244bd183e8a13288e4412d8389d140aac1794825a", + PrivateKey: "5837736c89570de861ebc173b1086da4f505d4adb387c6a1b1342d5e4ac9ec72", + PublicKey: "00e33c0f7d81d843c572275f287498e8d408654fdf0d1e065b84e2e6f157aab09b", + }, + { + Path: "m/0'/2147483647'/1'/2147483646'/2'", + ChainCode: "5d70af781f3a37b829f0d060924d5e960bdc02e85423494afc0b1a41bbe196d4", + PrivateKey: "551d333177df541ad876a60ea71f00447931c0a9da16f227c11ea080d7391b8d", + PublicKey: "0047150c75db263559a70d5778bf36abbab30fb061ad69f69ece61a72b0cfa4fc0", + }, + } + + for _, test := range tests { + key, err = DeriveForPath(test.Path, seed) + assert.NoError(t, err) + assert.Equal(t, test.PrivateKey, hex.EncodeToString(key.Key)) + assert.Equal(t, test.ChainCode, hex.EncodeToString(key.ChainCode)) + publicKey, err := key.PublicKey() + assert.NoError(t, err) + assert.Equal(t, test.PublicKey, hex.EncodeToString(append([]byte{0x0}, publicKey...))) + } +} diff --git a/exp/orderbook/batch.go b/exp/orderbook/batch.go new file mode 100644 index 0000000000..0497de001e --- /dev/null +++ b/exp/orderbook/batch.go @@ -0,0 +1,118 @@ +package orderbook + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const ( + _ = iota + // the operationType enum values start at 1 because when constructing a + // orderBookOperation struct, the operationType field should always be specified + // explicitly. if the operationType enum values started at 0 then it would be + // possible to create a valid orderBookOperation struct without specifying + // the operationType field + addOfferOperationType = iota + removeOfferOperationType = iota + addLiquidityPoolOperationType = iota + removeLiquidityPoolOperationType = iota +) + +type orderBookOperation struct { + operationType int + offerID xdr.Int64 + offer *xdr.OfferEntry + liquidityPool *xdr.LiquidityPoolEntry +} + +type orderBookBatchedUpdates struct { + operations []orderBookOperation + orderbook *OrderBookGraph + committed bool +} + +// addOffer will queue an operation to add the given offer to the order book +func (tx *orderBookBatchedUpdates) addOffer(offer xdr.OfferEntry) *orderBookBatchedUpdates { + tx.operations = append(tx.operations, orderBookOperation{ + operationType: addOfferOperationType, + offerID: offer.OfferId, + offer: &offer, + }) + + return tx +} + +// addLiquidityPool will queue an operation to add the given liquidity pool to the order book graph +func (tx *orderBookBatchedUpdates) addLiquidityPool(pool xdr.LiquidityPoolEntry) *orderBookBatchedUpdates { + tx.operations = append(tx.operations, orderBookOperation{ + operationType: addLiquidityPoolOperationType, + liquidityPool: &pool, + }) + + return tx +} + +// removeOffer will queue an operation to remove the given offer from the order book +func (tx *orderBookBatchedUpdates) removeOffer(offerID xdr.Int64) *orderBookBatchedUpdates { + tx.operations = append(tx.operations, orderBookOperation{ + operationType: removeOfferOperationType, + offerID: offerID, + }) + + return tx +} + +// removeLiquidityPool will queue an operation to remove the given liquidity pool from the order book +func (tx *orderBookBatchedUpdates) removeLiquidityPool(pool xdr.LiquidityPoolEntry) *orderBookBatchedUpdates { + tx.operations = append(tx.operations, orderBookOperation{ + operationType: removeLiquidityPoolOperationType, + liquidityPool: &pool, + }) + + return tx +} + +// apply will attempt to apply all the updates in the batch to the order book +func (tx *orderBookBatchedUpdates) apply(ledger uint32) error { + tx.orderbook.lock.Lock() + defer tx.orderbook.lock.Unlock() + + if tx.committed { + // This should never happen + panic(errBatchAlreadyApplied) + } + tx.committed = true + + if tx.orderbook.lastLedger > 0 && ledger <= tx.orderbook.lastLedger { + return errUnexpectedLedger + } + + for _, operation := range tx.operations { + switch operation.operationType { + case addOfferOperationType: + if err := tx.orderbook.addOffer(*operation.offer); err != nil { + panic(errors.Wrap(err, "could not apply update in batch")) + } + case removeOfferOperationType: + if _, ok := tx.orderbook.tradingPairForOffer[operation.offerID]; !ok { + continue + } + if err := tx.orderbook.removeOffer(operation.offerID); err != nil { + panic(errors.Wrap(err, "could not apply update in batch")) + } + + case addLiquidityPoolOperationType: + tx.orderbook.addPool(*operation.liquidityPool) + + case removeLiquidityPoolOperationType: + tx.orderbook.removePool(*operation.liquidityPool) + + default: + panic(errors.New("invalid operation type")) + } + } + + tx.orderbook.lastLedger = ledger + + return nil +} diff --git a/exp/orderbook/edges.go b/exp/orderbook/edges.go new file mode 100644 index 0000000000..44a90f45fe --- /dev/null +++ b/exp/orderbook/edges.go @@ -0,0 +1,109 @@ +package orderbook + +import ( + "sort" + + "github.com/stellar/go/xdr" +) + +// edgeSet maintains a mapping of assets to a set of venues, which +// is composed of a sorted lists of offers and, optionally, a liquidity pool. +// The offers are sorted by ascending price (in terms of the buying asset). +type edgeSet []edge + +type edge struct { + key int32 + value Venues +} + +func (e edgeSet) find(key int32) int { + for i := 0; i < len(e); i++ { + if e[i].key == key { + return i + } + } + return -1 +} + +// addOffer will insert the given offer into the edge set +func (e edgeSet) addOffer(key int32, offer xdr.OfferEntry) edgeSet { + // The list of offers in a venue is sorted by cheapest to most expensive + // price to convert buyingAsset to sellingAsset + i := e.find(key) + if i < 0 { + return append(e, edge{key: key, value: Venues{offers: []xdr.OfferEntry{offer}}}) + } + + offers := e[i].value.offers + // find the smallest i such that Price of offers[i] > Price of offer + insertIndex := sort.Search(len(offers), func(j int) bool { + return offer.Price.Cheaper(offers[j].Price) + }) + + // then insert it into the slice (taken from Method 2 at + // https://github.com/golang/go/wiki/SliceTricks#insert). + offers = append(offers, xdr.OfferEntry{}) // add to end + copy(offers[insertIndex+1:], offers[insertIndex:]) // shift right by 1 + offers[insertIndex] = offer // insert + + e[i].value = Venues{offers: offers, pool: e[i].value.pool} + return e +} + +// addPool makes `pool` a viable venue at `key`. +func (e edgeSet) addPool(key int32, pool liquidityPool) edgeSet { + i := e.find(key) + if i < 0 { + return append(e, edge{key: key, value: Venues{pool: pool}}) + } + e[i].value.pool = pool + return e +} + +// removeOffer will delete the given offer from the edge set, returning whether +// or not the given offer was actually found. +func (e edgeSet) removeOffer(key int32, offerID xdr.Int64) (edgeSet, bool) { + i := e.find(key) + if i < 0 { + return e, false + } + + offers := e[i].value.offers + updatedOffers := offers + contains := false + for i, offer := range offers { + if offer.OfferId != offerID { + continue + } + + // remove the entry in the slice at this location (taken from + // https://github.com/golang/go/wiki/SliceTricks#cut). + updatedOffers = append(offers[:i], offers[i+1:]...) + contains = true + break + } + + if !contains { + return e, false + } + + if len(updatedOffers) == 0 && e[i].value.pool.Body.ConstantProduct == nil { + return append(e[:i], e[i+1:]...), true + } + e[i].value.offers = updatedOffers + return e, true +} + +func (e edgeSet) removePool(key int32) edgeSet { + i := e.find(key) + if i < 0 { + return e + } + + if len(e[i].value.offers) == 0 { + return append(e[:i], e[i+1:]...) + } + + e[i].value = Venues{offers: e[i].value.offers} + return e +} diff --git a/exp/orderbook/graph.go b/exp/orderbook/graph.go new file mode 100644 index 0000000000..3ccb47d1a1 --- /dev/null +++ b/exp/orderbook/graph.go @@ -0,0 +1,929 @@ +package orderbook + +import ( + "context" + "fmt" + "sort" + "sync" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +var ( + errOfferNotPresent = errors.New("offer is not present in the order book graph") + errEmptyOffers = errors.New("offers is empty") + errAssetAmountIsZero = errors.New("current asset amount is 0") + errSoldTooMuch = errors.New("sold more than current balance") + errBatchAlreadyApplied = errors.New("cannot apply batched updates more than once") + errUnexpectedLedger = errors.New("cannot apply unexpected ledger") +) + +type sortByType string + +const ( + sortBySourceAsset sortByType = "source" + sortByDestinationAsset sortByType = "destination" +) + +// trading pair represents two assets that can be exchanged if an order is fulfilled +type tradingPair struct { + // buyingAsset corresponds to offer.Buying.String() from an xdr.OfferEntry + buyingAsset int32 + // sellingAsset corresponds to offer.Selling.String() from an xdr.OfferEntry + sellingAsset int32 +} + +// OBGraph is an interface for orderbook graphs +type OBGraph interface { + AddOffers(offer ...xdr.OfferEntry) + AddLiquidityPools(liquidityPool ...xdr.LiquidityPoolEntry) + Apply(ledger uint32) error + Discard() + Offers() []xdr.OfferEntry + LiquidityPools() []xdr.LiquidityPoolEntry + RemoveOffer(xdr.Int64) OBGraph + RemoveLiquidityPool(pool xdr.LiquidityPoolEntry) OBGraph + Verify() ([]xdr.OfferEntry, []xdr.LiquidityPoolEntry, error) + Clear() +} + +// OrderBookGraph is an in-memory graph representation of all the offers in the +// Stellar ledger. +type OrderBookGraph struct { + // idToAssetString maps an int32 asset id to its string representation. + // Every asset on the OrderBookGraph has an int32 id which indexes into idToAssetString. + // The asset integer ids are largely contiguous. When an asset is completely removed + // from the OrderBookGraph the integer id for that asset will be assigned to the next + // asset which is added to the OrderBookGraph. + idToAssetString []string + // assetStringToID maps an asset string to its int32 id. + assetStringToID map[string]int32 + // vacantIDs is a list of int32 asset ids which can be mapped to new assets. + // When a new asset is added to the OrderBookGraph we first check if there are + // any available vacantIDs, if so, we will assign the new asset to one of the vacantIDs. + // Otherwise, we will add a new entry to idToAssetString for the new asset. + vacantIDs []int32 + + // venuesForBuyingAsset maps an asset to all of its buying opportunities, + // which may be offers (sorted by price) or a liquidity pools. + venuesForBuyingAsset []edgeSet + // venuesForSellingAsset maps an asset to all of its *selling* opportunities, + // which may be offers (sorted by price) or a liquidity pools. + venuesForSellingAsset []edgeSet + // liquidityPools associates a particular asset pair (in "asset order", see + // xdr.Asset.LessThan) with a liquidity pool. + liquidityPools map[tradingPair]xdr.LiquidityPoolEntry + // tradingPairForOffer maps an offer ID to the assets which are being + // exchanged in the given offer. It's mostly used privately in order to + // associate specific offers with their respective edges in the graph. + tradingPairForOffer map[xdr.Int64]tradingPair + + // batchedUpdates is internal batch of updates to this graph. Users can + // create multiple batches using `Batch()` method but sometimes only one + // batch is enough. + batchedUpdates *orderBookBatchedUpdates + lock sync.RWMutex + // the orderbook graph is accurate up to lastLedger + lastLedger uint32 +} + +var _ OBGraph = (*OrderBookGraph)(nil) + +// NewOrderBookGraph constructs an empty OrderBookGraph +func NewOrderBookGraph() *OrderBookGraph { + graph := &OrderBookGraph{} + graph.Clear() + return graph +} + +// AddOffers will queue an operation to add the given offer(s) to the order book +// in the internal batch. +// +// You need to run Apply() to apply all enqueued operations. +func (graph *OrderBookGraph) AddOffers(offers ...xdr.OfferEntry) { + for _, offer := range offers { + graph.batchedUpdates.addOffer(offer) + } +} + +// AddLiquidityPools will queue an operation to add the given liquidity pool(s) +// to the order book graph in the internal batch. +// +// You need to run Apply() to apply all enqueued operations. +func (graph *OrderBookGraph) AddLiquidityPools(pools ...xdr.LiquidityPoolEntry) { + for _, lp := range pools { + graph.batchedUpdates.addLiquidityPool(lp) + } +} + +// RemoveOffer will queue an operation to remove the given offer from the order +// book in the internal batch. +// +// You need to run Apply() to apply all enqueued operations. +func (graph *OrderBookGraph) RemoveOffer(offerID xdr.Int64) OBGraph { + graph.batchedUpdates.removeOffer(offerID) + return graph +} + +// RemoveLiquidityPool will queue an operation to remove any liquidity pool (if +// any) that matches the given pool, based exclusively on the pool ID. +// +// You need to run Apply() to apply all enqueued operations. +func (graph *OrderBookGraph) RemoveLiquidityPool(pool xdr.LiquidityPoolEntry) OBGraph { + graph.batchedUpdates.removeLiquidityPool(pool) + return graph +} + +// Discard removes all operations which have been queued but not yet applied to the OrderBookGraph +func (graph *OrderBookGraph) Discard() { + graph.batchedUpdates = graph.batch() +} + +// Apply will attempt to apply all the updates in the internal batch to the order book. +// When Apply is successful, a new empty, instance of internal batch will be created. +func (graph *OrderBookGraph) Apply(ledger uint32) error { + err := graph.batchedUpdates.apply(ledger) + if err != nil { + return err + } + graph.batchedUpdates = graph.batch() + return nil +} + +// Offers returns a list of offers contained in the order book +func (graph *OrderBookGraph) Offers() []xdr.OfferEntry { + graph.lock.RLock() + defer graph.lock.RUnlock() + + var offers []xdr.OfferEntry + for _, edges := range graph.venuesForSellingAsset { + for _, edge := range edges { + offers = append(offers, edge.value.offers...) + } + } + + return offers +} + +// Verify checks the internal consistency of the OrderBookGraph data structures +// and returns all the offers and pools contained in the graph. +func (graph *OrderBookGraph) Verify() ([]xdr.OfferEntry, []xdr.LiquidityPoolEntry, error) { + graph.lock.RLock() + defer graph.lock.RUnlock() + + var offers []xdr.OfferEntry + var pools []xdr.LiquidityPoolEntry + poolSet := map[xdr.PoolId]xdr.LiquidityPoolEntry{} + offerSet := map[xdr.Int64]xdr.OfferEntry{} + vacantSet := map[int32]bool{} + + if len(graph.venuesForSellingAsset) != len(graph.venuesForBuyingAsset) { + return nil, nil, fmt.Errorf( + "len(graph.venuesForSellingAsset) %v does not match len(graph.venuesForBuyingAsset) %v", + len(graph.venuesForSellingAsset), + len(graph.venuesForBuyingAsset), + ) + } + + if len(graph.venuesForSellingAsset) != len(graph.idToAssetString) { + return nil, nil, fmt.Errorf( + "len(graph.venuesForSellingAsset) %v does not match len(graph.idToAssetString) %v", + len(graph.venuesForSellingAsset), + len(graph.idToAssetString), + ) + } + + for sellingAsset, edges := range graph.venuesForSellingAsset { + sellingAssetString := graph.idToAssetString[sellingAsset] + if len(sellingAssetString) == 0 { + vacantSet[int32(sellingAsset)] = true + } + if len(sellingAssetString) == 0 && len(edges) == 0 { + continue + } + if len(sellingAssetString) == 0 && len(edges) > 0 { + return nil, nil, fmt.Errorf("found vacant id %v with non empty edges %v", sellingAsset, edges) + } + if id, ok := graph.assetStringToID[sellingAssetString]; !ok { + return nil, nil, fmt.Errorf( + "asset string %v is not in graph.assetStringToID", + sellingAssetString, + ) + } else if id != int32(sellingAsset) { + return nil, nil, fmt.Errorf( + "asset string %v maps to %v , expected %v", + sellingAssetString, + id, + sellingAsset, + ) + } + for _, edge := range edges { + buyingAssetString := graph.idToAssetString[edge.key] + for i, offer := range edge.value.offers { + if _, ok := offerSet[offer.OfferId]; ok { + return nil, nil, fmt.Errorf("offer %v is present more than once", offer.OfferId) + } + pair := graph.tradingPairForOffer[offer.OfferId] + if pair.sellingAsset != int32(sellingAsset) { + return nil, nil, fmt.Errorf( + "trading pair %v for offer %v does not match selling asset id %v", + pair, + offer.OfferId, + sellingAsset, + ) + } + if pair.buyingAsset != edge.key { + return nil, nil, fmt.Errorf( + "trading pair %v for offer %v does not match buying asset id %v", + pair, + offer.OfferId, + edge.key, + ) + } + if i == 0 { + if offer.Buying.String() != buyingAssetString { + return nil, nil, fmt.Errorf( + "offer buying asset %v does not match expected %v", + offer, + buyingAssetString, + ) + } + if offer.Selling.String() != sellingAssetString { + return nil, nil, fmt.Errorf( + "offer selling asset %v does not match expected %v", + offer, + sellingAssetString, + ) + } + } else { + if !offer.Buying.Equals(edge.value.offers[i-1].Buying) { + return nil, nil, fmt.Errorf( + "offer buying asset %v does not match expected %v", + offer, + buyingAssetString, + ) + } + if !offer.Selling.Equals(edge.value.offers[i-1].Selling) { + return nil, nil, fmt.Errorf( + "offer selling asset %v does not match expected %v", + offer, + sellingAssetString, + ) + } + } + offerSet[offer.OfferId] = offer + offers = append(offers, offer) + } + if edge.value.pool.Body.ConstantProduct != nil { + if edge.value.pool.assetA == int32(sellingAsset) { + if edge.value.pool.assetB != edge.key { + return nil, nil, fmt.Errorf( + "pool assetB %v does not match edge %v", + edge.value.pool.assetB, + edge.key, + ) + } + } else if edge.value.pool.assetB == int32(sellingAsset) { + if edge.value.pool.assetA != edge.key { + return nil, nil, fmt.Errorf( + "pool assetA %v does not match edge %v", + edge.value.pool.assetA, + edge.key, + ) + } + } else { + return nil, nil, fmt.Errorf( + "pool assets %v does not match sellingAsset %v", + edge.value.pool, + sellingAsset, + ) + } + + pair := tradingPair{ + buyingAsset: edge.value.pool.assetA, + sellingAsset: edge.value.pool.assetB, + } + assertPoolsEqual(edge.value.pool.LiquidityPoolEntry, graph.liquidityPools[pair]) + params := edge.value.pool.LiquidityPoolEntry.Body.ConstantProduct.Params + if assetA := params.AssetA.String(); graph.assetStringToID[assetA] != pair.buyingAsset { + return nil, nil, fmt.Errorf( + "pool asset A %v does not match asset id %v", + assetA, + pair.buyingAsset, + ) + } + if assetB := params.AssetB.String(); graph.assetStringToID[assetB] != pair.sellingAsset { + return nil, nil, fmt.Errorf( + "pool asset B %v does not match asset id %v", + assetB, + pair.sellingAsset, + ) + } + if _, ok := poolSet[edge.value.pool.LiquidityPoolId]; !ok { + poolSet[edge.value.pool.LiquidityPoolId] = edge.value.pool.LiquidityPoolEntry + pools = append(pools, edge.value.pool.LiquidityPoolEntry) + } + } + } + } + + if len(offerSet) != len(graph.tradingPairForOffer) { + return nil, nil, fmt.Errorf( + "expected number of offers %v to match trading pairs for offer size %v", + len(offerSet), + len(graph.tradingPairForOffer), + ) + } + + for buyingAsset, edges := range graph.venuesForBuyingAsset { + buyingAssetString := graph.idToAssetString[buyingAsset] + if len(buyingAssetString) == 0 && len(edges) == 0 { + continue + } + if len(buyingAssetString) == 0 && len(edges) > 0 { + return nil, nil, fmt.Errorf("found vacant id %v with non empty edges %v", buyingAssetString, edges) + } + for _, edge := range edges { + sellingAssetString := graph.idToAssetString[edge.key] + for i, offer := range edge.value.offers { + o, ok := offerSet[offer.OfferId] + if !ok { + return nil, nil, fmt.Errorf("expected offer %v to be present", offer.OfferId) + } + if err := assertOffersEqual(o, offer); err != nil { + return nil, nil, err + } + + if i == 0 { + if offer.Buying.String() != buyingAssetString { + return nil, nil, fmt.Errorf( + "offer buying asset %v does not match expected %v", + offer, + buyingAssetString, + ) + } + if offer.Selling.String() != sellingAssetString { + return nil, nil, fmt.Errorf( + "offer selling asset %v does not match expected %v", + offer, + sellingAssetString, + ) + } + } else { + if !offer.Buying.Equals(edge.value.offers[i-1].Buying) { + return nil, nil, fmt.Errorf( + "offer buying asset %v does not match expected %v", + offer, + buyingAssetString, + ) + } + if !offer.Selling.Equals(edge.value.offers[i-1].Selling) { + return nil, nil, fmt.Errorf( + "offer selling asset %v does not match expected %v", + offer, + sellingAssetString, + ) + } + } + delete(offerSet, offer.OfferId) + } + if edge.value.pool.Body.ConstantProduct != nil { + if edge.value.pool.assetA == int32(buyingAsset) { + if edge.value.pool.assetB != edge.key { + return nil, nil, fmt.Errorf( + "pool assetB %v does not match edge %v", + edge.value.pool.assetB, + edge.key, + ) + } + } else if edge.value.pool.assetB == int32(buyingAsset) { + if edge.value.pool.assetA != edge.key { + return nil, nil, fmt.Errorf( + "pool assetA %v does not match edge %v", + edge.value.pool.assetA, + edge.key, + ) + } + } else { + return nil, nil, fmt.Errorf( + "pool assets %v does not match sellingAsset %v", + edge.value.pool, + buyingAsset, + ) + } + + pair := tradingPair{ + buyingAsset: edge.value.pool.assetA, + sellingAsset: edge.value.pool.assetB, + } + assertPoolsEqual(edge.value.pool.LiquidityPoolEntry, graph.liquidityPools[pair]) + params := edge.value.pool.LiquidityPoolEntry.Body.ConstantProduct.Params + if assetA := params.AssetA.String(); graph.assetStringToID[assetA] != pair.buyingAsset { + return nil, nil, fmt.Errorf("pool asset A %v does not match asset id %v", assetA, pair.buyingAsset) + } + if assetB := params.AssetB.String(); graph.assetStringToID[assetB] != pair.sellingAsset { + return nil, nil, fmt.Errorf("pool asset B %v does not match asset id %v", assetB, pair.sellingAsset) + } + if _, ok := poolSet[edge.value.pool.LiquidityPoolId]; !ok { + return nil, nil, fmt.Errorf("expected pool %v to be present", edge.value.pool.LiquidityPoolId) + } + } + } + } + + if len(offerSet) != 0 { + return nil, nil, fmt.Errorf("expected all offers to be matched %v", offerSet) + } + + if len(graph.vacantIDs) != len(vacantSet) { + return nil, nil, fmt.Errorf("expected vacant ids %v to be match vacant set %v", graph.vacantIDs, vacantSet) + } + + for _, vacantID := range graph.vacantIDs { + if !vacantSet[vacantID] { + return nil, nil, fmt.Errorf("expected vacant ids %v to be match vacant set %v", graph.vacantIDs, vacantSet) + } + } + + return offers, pools, nil +} + +func assertOffersEqual(o xdr.OfferEntry, offer xdr.OfferEntry) error { + if o.Price != offer.Price { + return fmt.Errorf("expected offer price %v to match %v", o, offer) + } + if o.Amount != offer.Amount { + return fmt.Errorf("expected offer amount %v to match %v", o, offer) + } + if !o.Buying.Equals(offer.Buying) { + return fmt.Errorf("expected offer buying asset %v to match %v", o, offer) + } + if !o.Selling.Equals(offer.Selling) { + return fmt.Errorf("expected offer selling asset %v to match %v", o, offer) + } + return nil +} + +func assertPoolsEqual(p xdr.LiquidityPoolEntry, pool xdr.LiquidityPoolEntry) error { + if p.LiquidityPoolId != pool.LiquidityPoolId { + return fmt.Errorf("expected pool id %v to match %v", p, pool) + } + constantProductPool := p.Body.MustConstantProduct() + other := pool.Body.MustConstantProduct() + if !constantProductPool.Params.AssetA.Equals(other.Params.AssetA) { + return fmt.Errorf("expected pool asset a %v to match %v", p, pool) + } + if !constantProductPool.Params.AssetB.Equals(other.Params.AssetB) { + return fmt.Errorf("expected pool asset b %v to match %v", p, pool) + } + if constantProductPool.Params.Fee != other.Params.Fee { + return fmt.Errorf("expected pool fee %v to match %v", p, pool) + } + if constantProductPool.ReserveA != other.ReserveA { + return fmt.Errorf("expected pool reserveA %v to match %v", p, pool) + } + if constantProductPool.ReserveB != other.ReserveB { + return fmt.Errorf("expected pool reserveB %v to match %v", p, pool) + } + + return nil +} + +// LiquidityPools returns a list of unique liquidity pools contained in the +// order book graph +func (graph *OrderBookGraph) LiquidityPools() []xdr.LiquidityPoolEntry { + graph.lock.RLock() + defer graph.lock.RUnlock() + + pools := make([]xdr.LiquidityPoolEntry, 0, len(graph.liquidityPools)) + for _, pool := range graph.liquidityPools { + pools = append(pools, pool) + } + + return pools +} + +// Clear removes all offers from the graph. +func (graph *OrderBookGraph) Clear() { + graph.lock.Lock() + defer graph.lock.Unlock() + + graph.assetStringToID = map[string]int32{} + graph.idToAssetString = []string{} + graph.vacantIDs = []int32{} + graph.venuesForSellingAsset = []edgeSet{} + graph.venuesForBuyingAsset = []edgeSet{} + graph.tradingPairForOffer = map[xdr.Int64]tradingPair{} + graph.liquidityPools = map[tradingPair]xdr.LiquidityPoolEntry{} + graph.batchedUpdates = graph.batch() + graph.lastLedger = 0 +} + +// Batch creates a new batch of order book updates which can be applied +// on this graph +func (graph *OrderBookGraph) batch() *orderBookBatchedUpdates { + return &orderBookBatchedUpdates{ + operations: []orderBookOperation{}, + committed: false, + orderbook: graph, + } +} + +func (graph *OrderBookGraph) getOrCreateAssetID(asset xdr.Asset) int32 { + assetString := asset.String() + id, ok := graph.assetStringToID[assetString] + if ok { + return id + } + // before creating a new int32 asset id we will try to use + // a vacant id so that we can plug any empty cells in the + // idToAssetString array. + if len(graph.vacantIDs) > 0 { + id = graph.vacantIDs[len(graph.vacantIDs)-1] + graph.vacantIDs = graph.vacantIDs[:len(graph.vacantIDs)-1] + graph.idToAssetString[id] = assetString + } else { + // idToAssetString never decreases in length unless we call graph.Clear() + id = int32(len(graph.idToAssetString)) + // we assign id to asset + graph.idToAssetString = append(graph.idToAssetString, assetString) + graph.venuesForBuyingAsset = append(graph.venuesForBuyingAsset, nil) + graph.venuesForSellingAsset = append(graph.venuesForSellingAsset, nil) + } + + graph.assetStringToID[assetString] = id + return id +} + +func (graph *OrderBookGraph) maybeDeleteAsset(asset int32) { + buyingEdgesEmpty := len(graph.venuesForBuyingAsset[asset]) == 0 + sellingEdgesEmpty := len(graph.venuesForSellingAsset[asset]) == 0 + + if buyingEdgesEmpty && sellingEdgesEmpty { + delete(graph.assetStringToID, graph.idToAssetString[asset]) + // When removing an asset we do not resize the idToAssetString array. + // Instead, we allow the cell occupied by the id to be empty. + // The next time we will add an asset to the graph we will allocate the + // id to the new asset. + graph.idToAssetString[asset] = "" + graph.vacantIDs = append(graph.vacantIDs, asset) + } +} + +// addOffer inserts a given offer into the order book graph +func (graph *OrderBookGraph) addOffer(offer xdr.OfferEntry) error { + // If necessary, replace any existing offer with a new one. + if _, contains := graph.tradingPairForOffer[offer.OfferId]; contains { + if err := graph.removeOffer(offer.OfferId); err != nil { + return errors.Wrap(err, "could not update offer in order book graph") + } + } + + buying := graph.getOrCreateAssetID(offer.Buying) + selling := graph.getOrCreateAssetID(offer.Selling) + + graph.tradingPairForOffer[offer.OfferId] = tradingPair{ + buyingAsset: buying, sellingAsset: selling, + } + + graph.venuesForSellingAsset[selling] = graph.venuesForSellingAsset[selling].addOffer(buying, offer) + graph.venuesForBuyingAsset[buying] = graph.venuesForBuyingAsset[buying].addOffer(selling, offer) + + return nil +} + +func (graph *OrderBookGraph) poolFromEntry(poolXDR xdr.LiquidityPoolEntry) liquidityPool { + aXDR, bXDR := getPoolAssets(poolXDR) + assetA, assetB := graph.getOrCreateAssetID(aXDR), graph.getOrCreateAssetID(bXDR) + return liquidityPool{ + LiquidityPoolEntry: poolXDR, + assetA: assetA, + assetB: assetB, + } +} + +// addPool sets the given pool as the venue for the given trading pair. +func (graph *OrderBookGraph) addPool(poolEntry xdr.LiquidityPoolEntry) { + // Liquidity pools have no concept of a "buying" or "selling" asset, + // so we create venues in both directions. + pool := graph.poolFromEntry(poolEntry) + graph.liquidityPools[tradingPair{ + buyingAsset: pool.assetA, + sellingAsset: pool.assetB, + }] = pool.LiquidityPoolEntry + + for _, table := range [][]edgeSet{ + graph.venuesForBuyingAsset, + graph.venuesForSellingAsset, + } { + table[pool.assetA] = table[pool.assetA].addPool(pool.assetB, pool) + table[pool.assetB] = table[pool.assetB].addPool(pool.assetA, pool) + } +} + +// removeOffer deletes a given offer from the order book graph +func (graph *OrderBookGraph) removeOffer(offerID xdr.Int64) error { + pair, ok := graph.tradingPairForOffer[offerID] + if !ok { + return errOfferNotPresent + } + delete(graph.tradingPairForOffer, offerID) + + if set, ok := graph.venuesForSellingAsset[pair.sellingAsset].removeOffer(pair.buyingAsset, offerID); !ok { + return errOfferNotPresent + } else { + graph.venuesForSellingAsset[pair.sellingAsset] = set + } + + if set, ok := graph.venuesForBuyingAsset[pair.buyingAsset].removeOffer(pair.sellingAsset, offerID); !ok { + return errOfferNotPresent + } else { + graph.venuesForBuyingAsset[pair.buyingAsset] = set + } + + graph.maybeDeleteAsset(pair.buyingAsset) + graph.maybeDeleteAsset(pair.sellingAsset) + return nil +} + +// removePool unsets the pool matching the given asset pair, if it exists. +func (graph *OrderBookGraph) removePool(poolXDR xdr.LiquidityPoolEntry) { + aXDR, bXDR := getPoolAssets(poolXDR) + assetA, assetB := graph.getOrCreateAssetID(aXDR), graph.getOrCreateAssetID(bXDR) + + for _, asset := range []int32{assetA, assetB} { + otherAsset := assetB + if asset == assetB { + otherAsset = assetA + } + + for _, table := range [][]edgeSet{ + graph.venuesForBuyingAsset, + graph.venuesForSellingAsset, + } { + table[asset] = table[asset].removePool(otherAsset) + } + } + + delete(graph.liquidityPools, tradingPair{assetA, assetB}) + graph.maybeDeleteAsset(assetA) + graph.maybeDeleteAsset(assetB) +} + +// IsEmpty returns true if the orderbook graph is not populated +func (graph *OrderBookGraph) IsEmpty() bool { + graph.lock.RLock() + defer graph.lock.RUnlock() + + return len(graph.liquidityPools) == 0 && len(graph.tradingPairForOffer) == 0 +} + +// FindPaths returns a list of payment paths originating from a source account +// and ending with a given destinaton asset and amount. +func (graph *OrderBookGraph) FindPaths( + ctx context.Context, + maxPathLength int, + destinationAsset xdr.Asset, + destinationAmount xdr.Int64, + sourceAccountID *xdr.AccountId, + sourceAssets []xdr.Asset, + sourceAssetBalances []xdr.Int64, + validateSourceBalance bool, + maxAssetsPerPath int, + includePools bool, +) ([]Path, uint32, error) { + paths, lastLedger, err := graph.findPathsWithLock( + ctx, maxPathLength, destinationAsset, destinationAmount, sourceAccountID, sourceAssets, sourceAssetBalances, + validateSourceBalance, includePools, + ) + if err != nil { + return nil, lastLedger, errors.Wrap(err, "could not determine paths") + } + + paths, err = sortAndFilterPaths( + paths, + maxAssetsPerPath, + sortBySourceAsset, + ) + return paths, lastLedger, err +} + +func (graph *OrderBookGraph) findPathsWithLock( + ctx context.Context, + maxPathLength int, + destinationAsset xdr.Asset, + destinationAmount xdr.Int64, + sourceAccountID *xdr.AccountId, + sourceAssets []xdr.Asset, + sourceAssetBalances []xdr.Int64, + validateSourceBalance bool, + includePools bool, +) ([]Path, uint32, error) { + graph.lock.RLock() + defer graph.lock.RUnlock() + + destinationAssetString := destinationAsset.String() + sourceAssetsMap := make(map[int32]xdr.Int64, len(sourceAssets)) + for i, sourceAsset := range sourceAssets { + sourceAssetString := sourceAsset.String() + sourceAssetID, ok := graph.assetStringToID[sourceAssetString] + if !ok { + continue + } + sourceAssetsMap[sourceAssetID] = sourceAssetBalances[i] + } + destinationAssetID, ok := graph.assetStringToID[destinationAssetString] + if !ok || len(sourceAssetsMap) == 0 { + return []Path{}, graph.lastLedger, nil + } + searchState := &sellingGraphSearchState{ + graph: graph, + destinationAssetString: destinationAssetString, + destinationAssetAmount: destinationAmount, + ignoreOffersFrom: sourceAccountID, + targetAssets: sourceAssetsMap, + validateSourceBalance: validateSourceBalance, + paths: []Path{}, + includePools: includePools, + } + err := search( + ctx, + searchState, + maxPathLength, + destinationAssetID, + destinationAmount, + ) + return searchState.paths, graph.lastLedger, err +} + +type sortablePaths struct { + paths []Path + less func(paths []Path, i, j int) bool +} + +func (s sortablePaths) Swap(i, j int) { + s.paths[i], s.paths[j] = s.paths[j], s.paths[i] +} + +func (s sortablePaths) Less(i, j int) bool { + return s.less(s.paths, i, j) +} + +func (s sortablePaths) Len() int { + return len(s.paths) +} + +// FindFixedPaths returns a list of payment paths where the source and +// destination assets are fixed. +// +// All returned payment paths will start by spending `amountToSpend` of +// `sourceAsset` and will end with some positive balance of `destinationAsset`. +// +// `sourceAccountID` is optional, but if it's provided, then no offers created +// by `sourceAccountID` will be considered when evaluating payment paths. +func (graph *OrderBookGraph) FindFixedPaths( + ctx context.Context, + maxPathLength int, + sourceAsset xdr.Asset, + amountToSpend xdr.Int64, + destinationAssets []xdr.Asset, + maxAssetsPerPath int, + includePools bool, +) ([]Path, uint32, error) { + paths, lastLedger, err := graph.findFixedPathsWithLock( + ctx, maxPathLength, sourceAsset, amountToSpend, destinationAssets, includePools, + ) + if err != nil { + return nil, lastLedger, errors.Wrap(err, "could not determine paths") + } + + paths, err = sortAndFilterPaths( + paths, + maxAssetsPerPath, + sortByDestinationAsset, + ) + return paths, lastLedger, err +} + +func (graph *OrderBookGraph) findFixedPathsWithLock( + ctx context.Context, + maxPathLength int, + sourceAsset xdr.Asset, + amountToSpend xdr.Int64, + destinationAssets []xdr.Asset, + includePools bool, +) ([]Path, uint32, error) { + graph.lock.RLock() + defer graph.lock.RUnlock() + + target := make(map[int32]bool, len(destinationAssets)) + for _, destinationAsset := range destinationAssets { + destinationAssetString := destinationAsset.String() + destinationAssetID, ok := graph.assetStringToID[destinationAssetString] + if !ok { + continue + } + target[destinationAssetID] = true + } + + sourceAssetString := sourceAsset.String() + sourceAssetID, ok := graph.assetStringToID[sourceAssetString] + if !ok || len(target) == 0 { + return []Path{}, graph.lastLedger, nil + } + searchState := &buyingGraphSearchState{ + graph: graph, + sourceAssetString: sourceAssetString, + sourceAssetAmount: amountToSpend, + targetAssets: target, + paths: []Path{}, + includePools: includePools, + } + err := search( + ctx, + searchState, + maxPathLength, + sourceAssetID, + amountToSpend, + ) + return searchState.paths, graph.lastLedger, err +} + +// compareSourceAsset will group payment paths by `SourceAsset` +// paths which spend less `SourceAmount` will appear earlier in the sorting +// if there are multiple paths which spend the same `SourceAmount` then shorter payment paths +// will be prioritized +func compareSourceAsset(allPaths []Path, i, j int) bool { + if allPaths[i].SourceAsset == allPaths[j].SourceAsset { + if allPaths[i].SourceAmount == allPaths[j].SourceAmount { + return len(allPaths[i].InteriorNodes) < len(allPaths[j].InteriorNodes) + } + return allPaths[i].SourceAmount < allPaths[j].SourceAmount + } + return allPaths[i].SourceAsset < allPaths[j].SourceAsset +} + +// compareDestinationAsset will group payment paths by `DestinationAsset`. Paths +// which deliver a higher `DestinationAmount` will appear earlier in the +// sorting. If there are multiple paths which deliver the same +// `DestinationAmount`, then shorter payment paths will be prioritized. +func compareDestinationAsset(allPaths []Path, i, j int) bool { + if allPaths[i].DestinationAsset == allPaths[j].DestinationAsset { + if allPaths[i].DestinationAmount == allPaths[j].DestinationAmount { + return len(allPaths[i].InteriorNodes) < len(allPaths[j].InteriorNodes) + } + return allPaths[i].DestinationAmount > allPaths[j].DestinationAmount + } + return allPaths[i].DestinationAsset < allPaths[j].DestinationAsset +} + +func sourceAssetEquals(p, otherPath Path) bool { + return p.SourceAsset == otherPath.SourceAsset +} + +func destinationAssetEquals(p, otherPath Path) bool { + return p.DestinationAsset == otherPath.DestinationAsset +} + +// sortAndFilterPaths sorts the given list of paths using `comparePaths` +// also, we limit the number of paths with the same asset to `maxPathsPerAsset` +func sortAndFilterPaths( + allPaths []Path, + maxPathsPerAsset int, + sortType sortByType, +) ([]Path, error) { + var comparePaths func([]Path, int, int) bool + var assetsEqual func(Path, Path) bool + + switch sortType { + case sortBySourceAsset: + comparePaths = compareSourceAsset + assetsEqual = sourceAssetEquals + case sortByDestinationAsset: + comparePaths = compareDestinationAsset + assetsEqual = destinationAssetEquals + default: + return nil, errors.New("invalid sort by type") + } + + sPaths := sortablePaths{ + paths: allPaths, + less: comparePaths, + } + sort.Sort(sPaths) + + filtered := make([]Path, 0, len(allPaths)) + countForAsset := 0 + for _, entry := range allPaths { + if len(filtered) == 0 || !assetsEqual(filtered[len(filtered)-1], entry) { + countForAsset = 1 + filtered = append(filtered, entry) + } else if countForAsset < maxPathsPerAsset { + countForAsset++ + filtered = append(filtered, entry) + } + } + + return filtered, nil +} diff --git a/exp/orderbook/graph_benchmark_test.go b/exp/orderbook/graph_benchmark_test.go new file mode 100644 index 0000000000..8588a2f36a --- /dev/null +++ b/exp/orderbook/graph_benchmark_test.go @@ -0,0 +1,275 @@ +package orderbook + +import ( + "context" + "flag" + "io/ioutil" + "math" + "math/rand" + "net/url" + "path/filepath" + "strings" + "testing" + + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +var ( + // offersFile should contain a list of offers + // each line in the offers file is the base 64 encoding of an offer entry xdr + offersFile = flag.String("offers", "", "offers file generated by the dump-orderbook tool") + poolsFile = flag.String("pools", "", "pools file generated by the dump-orderbook tool") +) + +// loadGraphFromFiles reads an offers and pools file generated by the dump-orderbook tool +// and returns an orderbook built from those offers / pools +func loadGraphFromFiles(offerFilePath, poolFilePath string) (*OrderBookGraph, error) { + graph := NewOrderBookGraph() + offerDumpBytes, err := ioutil.ReadFile(offerFilePath) + if err != nil { + return nil, errors.Wrap(err, "could not read offers file") + } + poolDumpBytes, err := ioutil.ReadFile(poolFilePath) + if err != nil { + return nil, errors.Wrap(err, "could not read pools file") + } + + for _, line := range strings.Split(string(offerDumpBytes), "\n") { + offer := xdr.OfferEntry{} + if err := xdr.SafeUnmarshalBase64(line, &offer); err != nil { + return nil, errors.Wrap(err, "could not base64 decode entry") + } + + graph.AddOffers(offer) + } + + for _, line := range strings.Split(string(poolDumpBytes), "\n") { + pool := xdr.LiquidityPoolEntry{} + if err := xdr.SafeUnmarshalBase64(line, &pool); err != nil { + return nil, errors.Wrap(err, "could not base64 decode entry") + } + + graph.AddLiquidityPools(pool) + } + + if err := graph.Apply(1); err != nil { + return nil, err + } + + return graph, nil +} + +type request struct { + src xdr.Asset + amt xdr.Int64 + dst []xdr.Asset +} + +func loadRequestsFromFile(filePath string) ([]request, error) { + var requests []request + rawBytes, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, errors.Wrap(err, "could not read file") + } + for _, line := range strings.Split(string(rawBytes), "\n") { + if line == "" { + continue + } + var parsed *url.URL + if parsed, err = url.Parse(line); err != nil { + return nil, errors.Wrap(err, "could not parse url") + } + var r request + if parsed.Query().Get("source_asset_type") == "native" { + r.src = xdr.MustNewNativeAsset() + } else { + r.src = xdr.MustNewCreditAsset( + parsed.Query().Get("source_asset_code"), + parsed.Query().Get("source_asset_issuer"), + ) + } + if r.amt, err = amount.Parse(parsed.Query().Get("source_amount")); err != nil { + return nil, errors.Wrap(err, "could not parse source amount") + } + for _, asset := range strings.Split(parsed.Query().Get("destination_assets"), ",") { + var parsedAsset xdr.Asset + if len(asset) == 0 { + continue + } else if asset == "native" { + parsedAsset = xdr.MustNewNativeAsset() + } else { + parts := strings.Split(asset, ":") + parsedAsset = xdr.MustNewCreditAsset(parts[0], parts[1]) + } + r.dst = append(r.dst, parsedAsset) + } + requests = append(requests, r) + } + + return requests, nil +} + +// BenchmarkVibrantPath benchmarks the most commonly requested path payment from the vibrant app. +func BenchmarkVibrantPath(b *testing.B) { + if *offersFile == "" { + b.Skip("missing offers file") + } + graph, err := loadGraphFromFiles(*offersFile, *poolsFile) + if err != nil { + b.Fatalf("could not read graph from file: %v", err) + } + + b.ResetTimer() + b.ReportAllocs() + // https://horizon.stellar.org/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDC&source_asset_issuer=GA5ZSEJYB37JRC5AVCIA5MOP4RHTM335X2KGX3IHOJAPP5RE34K4KZVN&source_amount=10&destination_assets=ARST%3AGCSAZVWXZKWS4XS223M5F54H2B6XPIIXZZGP7KEAIU6YSL5HDRGCI3DG + // Uncomment in order to get a detailed heap allocations profile + // runtime.MemProfileRate = 1 + for i := 0; i < b.N; i++ { + _, _, err := graph.FindFixedPaths( + context.Background(), + 3, + xdr.MustNewCreditAsset("USDC", "GA5ZSEJYB37JRC5AVCIA5MOP4RHTM335X2KGX3IHOJAPP5RE34K4KZVN"), + amount.MustParse("10"), + []xdr.Asset{ + xdr.MustNewCreditAsset("ARST", "GCSAZVWXZKWS4XS223M5F54H2B6XPIIXZZGP7KEAIU6YSL5HDRGCI3DG"), + }, + 5, + true, + ) + if err != nil { + b.Fatal("could not find path") + } + } +} + +// BenchmarkMultipleDestinationAssets benchmarks the path finding function +// on a request which has multiple destination assets. Most requests to the +// path finding endpoint only specify a single destination asset, so I +// wanted to have benchmark dedicated to this case because it could +// easily be overlooked. +func BenchmarkMultipleDestinationAssets(b *testing.B) { + if *offersFile == "" { + b.Skip("missing offers file") + } + graph, err := loadGraphFromFiles(*offersFile, *poolsFile) + if err != nil { + b.Fatalf("could not read graph from file: %v", err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + _, _, err := graph.FindFixedPaths( + context.Background(), + 3, + xdr.MustNewCreditAsset("USDT", "GCQTGZQQ5G4PTM2GL7CDIFKUBIPEC52BROAQIAPW53XBRJVN6ZJVTG6V"), + amount.MustParse("554.2610400"), + []xdr.Asset{ + xdr.MustNewNativeAsset(), + xdr.MustNewCreditAsset("yXLM", "GARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55"), + xdr.MustNewCreditAsset("USDC", "GA5ZSEJYB37JRC5AVCIA5MOP4RHTM335X2KGX3IHOJAPP5RE34K4KZVN"), + xdr.MustNewCreditAsset("EURT", "GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S"), + }, + 5, + true, + ) + if err != nil { + b.Fatal("could not find path") + } + } +} + +// BenchmarkTestData benchmarks the path finding function on a sample of 100 expensive path finding requests. +// The sample of requests was obtained from recent horizon production pubnet logs. +func BenchmarkTestData(b *testing.B) { + if *offersFile == "" { + b.Skip("missing offers file") + } + graph, err := loadGraphFromFiles(*offersFile, *poolsFile) + if err != nil { + b.Fatalf("could not read graph from file: %v", err) + } + + requests, err := loadRequestsFromFile(filepath.Join("testdata", "sample-requests")) + if err != nil { + b.Fatalf("could not read requests from file: %v", err) + } + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, req := range requests { + _, _, err := graph.FindFixedPaths( + context.Background(), + 3, + req.src, + req.amt, + req.dst, + 5, + true, + ) + if err != nil { + b.Fatal("could not find path") + } + } + } +} + +func BenchmarkSingleLiquidityPoolExchange(b *testing.B) { + graph := NewOrderBookGraph() + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(usdAsset) + + b.ResetTimer() + b.ReportAllocs() + + b.Run("deposit", func(b *testing.B) { + makeTrade(pool, asset, tradeTypeDeposit, math.MaxInt64/2) + }) + b.Run("exchange", func(b *testing.B) { + makeTrade(pool, asset, tradeTypeExpectation, math.MaxInt64/2) + }) +} + +// Note: making these subtests with one randomness pool doesn't work, because +// the benchmark doesn't do enough runs on the parent test... + +func BenchmarkLiquidityPoolDeposits(b *testing.B) { + amounts := createRandomAmounts(b.N) + graph := NewOrderBookGraph() + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(usdAsset) + + b.ResetTimer() + b.ReportAllocs() + + for _, amount := range amounts { + makeTrade(pool, asset, tradeTypeDeposit, amount) + } +} + +func BenchmarkLiquidityPoolExpectations(b *testing.B) { + amounts := createRandomAmounts(b.N) + graph := NewOrderBookGraph() + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(usdAsset) + + b.ResetTimer() + b.ReportAllocs() + + for _, amount := range amounts { + makeTrade(pool, asset, tradeTypeExpectation, amount) + } +} + +func createRandomAmounts(quantity int) []xdr.Int64 { + amounts := make([]xdr.Int64, quantity) + for i, _ := range amounts { + amounts[i] = xdr.Int64(1 + rand.Int63n(math.MaxInt64-100)) + } + return amounts +} diff --git a/exp/orderbook/graph_test.go b/exp/orderbook/graph_test.go new file mode 100644 index 0000000000..203c923167 --- /dev/null +++ b/exp/orderbook/graph_test.go @@ -0,0 +1,2360 @@ +package orderbook + +import ( + "bytes" + "context" + "encoding" + "math" + "sort" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/price" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +var ( + issuer, _ = xdr.NewAccountId(xdr.PublicKeyTypePublicKeyTypeEd25519, xdr.Uint256{}) + + nativeAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeNative, + } + + usdAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'u', 's', 'd', 0}, + Issuer: issuer, + }, + } + + eurAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'e', 'u', 'r', 0}, + Issuer: issuer, + }, + } + + chfAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'c', 'h', 'f', 0}, + Issuer: issuer, + }, + } + + yenAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'y', 'e', 'n', 0}, + Issuer: issuer, + }, + } + + fiftyCentsOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(1), + Buying: usdAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Amount: xdr.Int64(500), + } + quarterOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(2), + Buying: usdAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 1, + D: 4, + }, + Amount: xdr.Int64(500), + } + dollarOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(3), + Buying: usdAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + + eurOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(4), + Buying: eurAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + twoEurOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(5), + Buying: eurAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + threeEurOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(6), + Buying: eurAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 3, + D: 1, + }, + Amount: xdr.Int64(500), + } + + eurUsdLiquidityPool = makePool(eurAsset, usdAsset, 1000, 1000) + eurYenLiquidityPool = makePool(eurAsset, yenAsset, 1000, 1000) + usdChfLiquidityPool = makePool(chfAsset, usdAsset, 500, 1000) + nativeEurPool = makePool(xdr.MustNewNativeAsset(), eurAsset, 1500, 30) + nativeUsdPool = makePool(xdr.MustNewNativeAsset(), usdAsset, 120, 30) +) + +func assertBinaryMarshalerEquals(t *testing.T, a, b encoding.BinaryMarshaler) { + serializedA, err := a.MarshalBinary() + if !assert.NoError(t, err) { + t.FailNow() + } + + serializedB, err := b.MarshalBinary() + if !assert.NoError(t, err) { + t.FailNow() + } + + if !assert.Truef(t, bytes.Equal(serializedA, serializedB), + "expected lists to be equal but got %v %v", a, b) { + t.FailNow() + } +} + +func assertOfferListEquals(t *testing.T, a, b []xdr.OfferEntry) { + assert.Equalf(t, len(a), len(b), + "expected lists to have same length but got %v %v", a, b) + + for i := 0; i < len(a); i++ { + assertBinaryMarshalerEquals(t, a[i], b[i]) + } +} + +// assertGraphEquals ensures two graphs are identical +func assertGraphEquals(t *testing.T, a, b *OrderBookGraph) { + assert.Equalf(t, len(a.assetStringToID), len(b.assetStringToID), + "expected same # of asset string to id entries but got %v %v", + a.assetStringToID, b.assetStringToID) + + assert.Equalf(t, len(a.tradingPairForOffer), len(b.tradingPairForOffer), + "expected same # of trading pairs but got %v %v", a, b) + + assert.Equalf(t, len(a.liquidityPools), len(b.liquidityPools), + "expected same # of liquidity pools but got %v %v", a, b) + + for assetString, _ := range a.assetStringToID { + asset := a.assetStringToID[assetString] + otherAsset, ok := b.assetStringToID[assetString] + if !ok { + t.Fatalf("asset %v is not present in assetStringToID", assetString) + } + es := a.venuesForSellingAsset[asset] + other := b.venuesForSellingAsset[otherAsset] + + assertEdgeSetEquals(t, a, b, es, other, assetString) + + es = a.venuesForBuyingAsset[asset] + other = b.venuesForBuyingAsset[otherAsset] + + assert.Equalf(t, len(es), len(other), + "expected edge set for %v to have same length but got %v %v", + assetString, es, other) + + assertEdgeSetEquals(t, a, b, es, other, assetString) + } + + for offerID, pair := range a.tradingPairForOffer { + otherPair := b.tradingPairForOffer[offerID] + + assert.Equalf( + t, + a.idToAssetString[pair.buyingAsset], + b.idToAssetString[otherPair.buyingAsset], + "expected trading pair to match but got %v %v", pair, otherPair) + + assert.Equalf( + t, + a.idToAssetString[pair.sellingAsset], + b.idToAssetString[otherPair.sellingAsset], + "expected trading pair to match but got %v %v", pair, otherPair) + } + + for pair, pool := range a.liquidityPools { + otherPair := tradingPair{ + buyingAsset: b.assetStringToID[a.idToAssetString[pair.buyingAsset]], + sellingAsset: b.assetStringToID[a.idToAssetString[pair.sellingAsset]], + } + otherPool := b.liquidityPools[otherPair] + assert.Equalf(t, pool, otherPool, "expected pool to match but got %v %v", pool, otherPool) + } +} + +func assertEdgeSetEquals( + t *testing.T, a *OrderBookGraph, b *OrderBookGraph, + es edgeSet, other edgeSet, assetString string) { + assert.Equalf(t, len(es), len(other), + "expected edge set for %v to have same length but got %v %v", + assetString, es, other) + + for _, edge := range es { + venues := edge.value + otherVenues := findByAsset(b, other, a.idToAssetString[edge.key]) + + assert.Equalf(t, venues.pool.LiquidityPoolEntry, otherVenues.pool.LiquidityPoolEntry, + "expected pools for %v to be equal") + + assert.Equalf(t, len(venues.offers), len(otherVenues.offers), + "expected offers for %v to have same length but got %v %v", + edge.key, venues.offers, otherVenues.offers, + ) + + assertOfferListEquals(t, venues.offers, otherVenues.offers) + } +} + +func assertPathEquals(t *testing.T, a, b []Path) { + if !assert.Equalf(t, len(a), len(b), + "expected paths to have same length but got %v != %v", a, b) { + t.FailNow() + } + + for i := 0; i < len(a); i++ { + assert.Equalf(t, a[i].SourceAmount, b[i].SourceAmount, + "expected src amounts to be same got %v %v", a[i], b[i]) + + assert.Equalf(t, a[i].DestinationAmount, b[i].DestinationAmount, + "expected dest amounts to be same got %v %v", a[i], b[i]) + + assert.Equalf(t, a[i].DestinationAsset, b[i].DestinationAsset, + "expected dest assets to be same got %v %v", a[i], b[i]) + + assert.Equalf(t, a[i].SourceAsset, b[i].SourceAsset, + "expected source assets to be same got %v %v", a[i], b[i]) + + assert.Equalf(t, len(a[i].InteriorNodes), len(b[i].InteriorNodes), + "expected interior nodes have same length got %v %v", a[i], b[i]) + + for j := 0; j > len(a[i].InteriorNodes); j++ { + assert.Equalf(t, + a[i].InteriorNodes[j], b[i].InteriorNodes[j], + "expected interior nodes to be same got %v %v", a[i], b[i]) + } + } +} + +func findByAsset(g *OrderBookGraph, edges edgeSet, assetString string) Venues { + asset, ok := g.assetStringToID[assetString] + if !ok { + return Venues{} + } + i := edges.find(asset) + if i >= 0 { + return edges[i].value + } + return Venues{} +} + +func TestAddEdgeSet(t *testing.T) { + set := edgeSet{} + g := NewOrderBookGraph() + + set = set.addOffer(g.getOrCreateAssetID(dollarOffer.Buying), dollarOffer) + set = set.addOffer(g.getOrCreateAssetID(eurOffer.Buying), eurOffer) + set = set.addOffer(g.getOrCreateAssetID(twoEurOffer.Buying), twoEurOffer) + set = set.addOffer(g.getOrCreateAssetID(threeEurOffer.Buying), threeEurOffer) + set = set.addOffer(g.getOrCreateAssetID(quarterOffer.Buying), quarterOffer) + set = set.addOffer(g.getOrCreateAssetID(fiftyCentsOffer.Buying), fiftyCentsOffer) + set = set.addPool(g.getOrCreateAssetID(usdAsset), g.poolFromEntry(eurUsdLiquidityPool)) + set = set.addPool(g.getOrCreateAssetID(eurAsset), g.poolFromEntry(eurUsdLiquidityPool)) + + assert.Lenf(t, set, 2, "expected set to have 2 entries but got %v", set) + assert.Equal(t, findByAsset(g, set, usdAsset.String()).pool.LiquidityPoolEntry, eurUsdLiquidityPool) + assert.Equal(t, findByAsset(g, set, eurAsset.String()).pool.LiquidityPoolEntry, eurUsdLiquidityPool) + + assertOfferListEquals(t, findByAsset(g, set, usdAsset.String()).offers, []xdr.OfferEntry{ + quarterOffer, + fiftyCentsOffer, + dollarOffer, + }) + + assertOfferListEquals(t, findByAsset(g, set, eurAsset.String()).offers, []xdr.OfferEntry{ + eurOffer, + twoEurOffer, + threeEurOffer, + }) +} + +func TestRemoveEdgeSet(t *testing.T) { + set := edgeSet{} + g := NewOrderBookGraph() + + var found bool + set, found = set.removeOffer(g.getOrCreateAssetID(usdAsset), dollarOffer.OfferId) + assert.Falsef(t, found, "expected set to not contain asset but is %v", set) + + set = set.addOffer(g.getOrCreateAssetID(dollarOffer.Buying), dollarOffer) + set = set.addOffer(g.getOrCreateAssetID(eurOffer.Buying), eurOffer) + set = set.addOffer(g.getOrCreateAssetID(twoEurOffer.Buying), twoEurOffer) + set = set.addOffer(g.getOrCreateAssetID(threeEurOffer.Buying), threeEurOffer) + set = set.addOffer(g.getOrCreateAssetID(quarterOffer.Buying), quarterOffer) + set = set.addOffer(g.getOrCreateAssetID(fiftyCentsOffer.Buying), fiftyCentsOffer) + set = set.addPool(g.getOrCreateAssetID(usdAsset), g.poolFromEntry(eurUsdLiquidityPool)) + + set = set.removePool(g.getOrCreateAssetID(usdAsset)) + assert.Nil(t, findByAsset(g, set, usdAsset.String()).pool.Body.ConstantProduct) + + set, found = set.removeOffer(g.getOrCreateAssetID(usdAsset), dollarOffer.OfferId) + assert.Truef(t, found, "expected set to contain dollar offer but is %v", set) + set, found = set.removeOffer(g.getOrCreateAssetID(usdAsset), dollarOffer.OfferId) + assert.Falsef(t, found, "expected set to not contain dollar offer after deletion but is %v", set) + set, found = set.removeOffer(g.getOrCreateAssetID(eurAsset), threeEurOffer.OfferId) + assert.Truef(t, found, "expected set to contain three euro offer but is %v", set) + set, found = set.removeOffer(g.getOrCreateAssetID(eurAsset), eurOffer.OfferId) + assert.Truef(t, found, "expected set to contain euro offer but is %v", set) + set, found = set.removeOffer(g.getOrCreateAssetID(eurAsset), twoEurOffer.OfferId) + assert.Truef(t, found, "expected set to contain two euro offer but is %v", set) + set, found = set.removeOffer(g.getOrCreateAssetID(eurAsset), eurOffer.OfferId) + assert.Falsef(t, found, "expected set to not contain euro offer after deletion but is %v", set) + + assert.Lenf(t, set, 1, "%v", set) + + assertOfferListEquals(t, findByAsset(g, set, usdAsset.String()).offers, []xdr.OfferEntry{ + quarterOffer, + fiftyCentsOffer, + }) +} + +func TestApplyOutdatedLedger(t *testing.T) { + graph := NewOrderBookGraph() + if graph.lastLedger != 0 { + t.Fatalf("expected last ledger to be %v but got %v", 0, graph.lastLedger) + } + + graph.AddOffers(fiftyCentsOffer) + err := graph.Apply(2) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, graph.lastLedger) + } + + graph.AddOffers(eurOffer) + err = graph.Apply(1) + if err != errUnexpectedLedger { + t.Fatalf("expected error %v but got %v", errUnexpectedLedger, err) + } + if graph.lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, graph.lastLedger) + } + + graph.Discard() + + graph.AddOffers(eurOffer) + err = graph.Apply(2) + if err != errUnexpectedLedger { + t.Fatalf("expected error %v but got %v", errUnexpectedLedger, err) + } + if graph.lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, graph.lastLedger) + } + + graph.Discard() + + err = graph.Apply(4) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.lastLedger != 4 { + t.Fatalf("expected last ledger to be %v but got %v", 4, graph.lastLedger) + } +} + +func TestAddOffersOrderBook(t *testing.T) { + graph := NewOrderBookGraph() + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + if !assert.NoError(t, graph.Apply(1)) || + !assert.EqualValues(t, 1, graph.lastLedger) { + t.FailNow() + } + + eurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(9), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + otherEurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(10), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + + usdEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(11), + Buying: usdAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 3, + }, + Amount: xdr.Int64(500), + } + + graph.AddOffers(eurUsdOffer, otherEurUsdOffer, usdEurOffer) + if !assert.NoError(t, graph.Apply(2)) || + !assert.EqualValues(t, 2, graph.lastLedger) { + t.FailNow() + } + + assetStringToID := map[string]int32{} + idToAssetString := []string{} + for i, asset := range []xdr.Asset{ + nativeAsset, + usdAsset, + eurAsset, + } { + assetStringToID[asset.String()] = int32(i) + idToAssetString = append(idToAssetString, asset.String()) + } + + expectedGraph := &OrderBookGraph{ + assetStringToID: assetStringToID, + idToAssetString: idToAssetString, + venuesForSellingAsset: []edgeSet{ + { + { + assetStringToID[usdAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer, dollarOffer), + }, + { + assetStringToID[eurAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + }, + { + { + assetStringToID[eurAsset.String()], + makeVenues(eurUsdOffer, otherEurUsdOffer), + }, + }, + { + { + assetStringToID[usdAsset.String()], + makeVenues(usdEurOffer), + }, + }, + }, + venuesForBuyingAsset: []edgeSet{ + {}, + { + { + assetStringToID[eurAsset.String()], + makeVenues(usdEurOffer), + }, + { + assetStringToID[nativeAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer, dollarOffer), + }, + }, + { + { + assetStringToID[usdAsset.String()], + makeVenues(eurUsdOffer, otherEurUsdOffer), + }, + { + assetStringToID[nativeAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + }, + }, + tradingPairForOffer: map[xdr.Int64]tradingPair{ + quarterOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + fiftyCentsOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + dollarOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + eurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + twoEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + threeEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + eurUsdOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, usdAsset), + otherEurUsdOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, usdAsset), + usdEurOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, eurAsset), + }, + } + + // adding the same orders multiple times should have no effect + graph.AddOffers(otherEurUsdOffer, usdEurOffer, dollarOffer, threeEurOffer) + assert.NoError(t, graph.Apply(3)) + assert.EqualValues(t, 3, graph.lastLedger) + + assertGraphEquals(t, expectedGraph, graph) +} + +func clonePool(entry xdr.LiquidityPoolEntry) xdr.LiquidityPoolEntry { + clone := entry + body := entry.Body.MustConstantProduct() + clone.Body.ConstantProduct = &body + return clone +} + +func setupGraphWithLiquidityPools(t *testing.T) (*OrderBookGraph, []xdr.LiquidityPoolEntry) { + graph := NewOrderBookGraph() + graph.AddLiquidityPools(nativeEurPool, nativeUsdPool) + if !assert.NoError(t, graph.Apply(1)) { + t.FailNow() + } + + expectedLiquidityPools := []xdr.LiquidityPoolEntry{nativeEurPool, nativeUsdPool} + return graph, expectedLiquidityPools +} + +func assertLiquidityPoolsEqual(t *testing.T, expectedLiquidityPools, liquidityPools []xdr.LiquidityPoolEntry) { + sort.Slice(liquidityPools, func(i, j int) bool { + return liquidityPools[i].Body.MustConstantProduct().Params.AssetB.String() < + liquidityPools[j].Body.MustConstantProduct().Params.AssetB.String() + }) + + if !assert.Equal(t, len(expectedLiquidityPools), len(liquidityPools)) { + t.FailNow() + } + + for i, expected := range expectedLiquidityPools { + liquidityPool := liquidityPools[i] + liquidityPoolBase64, err := xdr.MarshalBase64(liquidityPool) + assert.NoError(t, err) + + expectedBase64, err := xdr.MarshalBase64(expected) + assert.NoError(t, err) + + assert.Equalf(t, expectedBase64, liquidityPoolBase64, + "pool mismatch: %v != %v", expected, liquidityPool) + } +} + +func TestAddLiquidityPools(t *testing.T) { + graph, expectedLiquidityPools := setupGraphWithLiquidityPools(t) + assertLiquidityPoolsEqual(t, expectedLiquidityPools, graph.LiquidityPools()) +} + +func TestUpdateLiquidityPools(t *testing.T) { + graph, expectedLiquidityPools := setupGraphWithLiquidityPools(t) + p0 := clonePool(expectedLiquidityPools[0]) + p1 := clonePool(expectedLiquidityPools[1]) + p0.Body.ConstantProduct.ReserveA += 100 + p1.Body.ConstantProduct.ReserveB -= 2 + expectedLiquidityPools[0] = p0 + expectedLiquidityPools[1] = p1 + + graph.AddLiquidityPools(expectedLiquidityPools[:2]...) + if !assert.NoError(t, graph.Apply(2)) { + t.FailNow() + } + + assertLiquidityPoolsEqual(t, expectedLiquidityPools, graph.LiquidityPools()) +} + +func TestRemoveLiquidityPools(t *testing.T) { + graph, expectedLiquidityPools := setupGraphWithLiquidityPools(t) + p0 := clonePool(expectedLiquidityPools[0]) + p0.Body.ConstantProduct.ReserveA += 100 + expectedLiquidityPools[0] = p0 + + graph.AddLiquidityPools(expectedLiquidityPools[0]) + graph.RemoveLiquidityPool(expectedLiquidityPools[1]) + + if !assert.NoError(t, graph.Apply(2)) { + t.FailNow() + } + + assertLiquidityPoolsEqual(t, expectedLiquidityPools[:1], graph.LiquidityPools()) +} + +func TestUpdateOfferOrderBook(t *testing.T) { + graph := NewOrderBookGraph() + + if !graph.IsEmpty() { + t.Fatal("expected graph to be empty") + } + + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + err := graph.Apply(1) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.lastLedger != 1 { + t.Fatalf("expected last ledger to be %v but got %v", 1, graph.lastLedger) + } + + if graph.IsEmpty() { + t.Fatal("expected graph to not be empty") + } + + eurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(9), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + otherEurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(10), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + + usdEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(11), + Buying: usdAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 3, + }, + Amount: xdr.Int64(500), + } + + graph.AddOffers(eurUsdOffer, otherEurUsdOffer, usdEurOffer) + err = graph.Apply(2) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, graph.lastLedger) + } + + usdEurOffer.Price.N = 4 + usdEurOffer.Price.D = 1 + + otherEurUsdOffer.Price.N = 1 + otherEurUsdOffer.Price.D = 2 + + dollarOffer.Amount = 12 + + graph.AddOffers(usdEurOffer, otherEurUsdOffer, dollarOffer) + err = graph.Apply(3) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.lastLedger != 3 { + t.Fatalf("expected last ledger to be %v but got %v", 3, graph.lastLedger) + } + + assetStringToID := map[string]int32{} + idToAssetString := []string{} + for i, asset := range []xdr.Asset{ + nativeAsset, + usdAsset, + eurAsset, + } { + assetStringToID[asset.String()] = int32(i) + idToAssetString = append(idToAssetString, asset.String()) + } + expectedGraph := &OrderBookGraph{ + idToAssetString: idToAssetString, + assetStringToID: assetStringToID, + venuesForSellingAsset: []edgeSet{ + { + { + assetStringToID[usdAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer, dollarOffer), + }, + { + assetStringToID[eurAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + }, + { + { + assetStringToID[eurAsset.String()], + makeVenues(otherEurUsdOffer, eurUsdOffer), + }, + }, + { + { + assetStringToID[usdAsset.String()], + makeVenues(usdEurOffer), + }, + }, + }, + venuesForBuyingAsset: []edgeSet{ + {}, + { + { + assetStringToID[nativeAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer, dollarOffer), + }, + { + assetStringToID[eurAsset.String()], + makeVenues(usdEurOffer), + }, + }, + { + { + assetStringToID[nativeAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + { + assetStringToID[usdAsset.String()], + makeVenues(otherEurUsdOffer, eurUsdOffer), + }, + }, + }, + tradingPairForOffer: map[xdr.Int64]tradingPair{ + quarterOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + fiftyCentsOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + dollarOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + eurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + twoEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + threeEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + eurUsdOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, usdAsset), + otherEurUsdOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, usdAsset), + usdEurOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, eurAsset), + }, + } + + assertGraphEquals(t, expectedGraph, graph) +} + +func TestDiscard(t *testing.T) { + graph := NewOrderBookGraph() + + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + graph.Discard() + if graph.lastLedger != 0 { + t.Fatalf("expected last ledger to be %v but got %v", 0, graph.lastLedger) + } + + if err := graph.Apply(1); err != nil { + t.Fatalf("unexpected error %v", err) + } + if !graph.IsEmpty() { + t.Fatal("expected graph to be empty") + } + if graph.lastLedger != 1 { + t.Fatalf("expected last ledger to be %v but got %v", 1, graph.lastLedger) + } + + graph.AddOffers(dollarOffer) + err := graph.Apply(2) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if graph.IsEmpty() { + t.Fatal("expected graph to be not empty") + } + if graph.lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, graph.lastLedger) + } + + expectedOffers := []xdr.OfferEntry{dollarOffer} + assertOfferListEquals(t, graph.Offers(), expectedOffers) + + graph.AddOffers(threeEurOffer) + graph.Discard() + assertOfferListEquals(t, graph.Offers(), expectedOffers) +} + +func TestRemoveOfferOrderBook(t *testing.T) { + graph := NewOrderBookGraph() + + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + if !assert.NoError(t, graph.Apply(1)) || + !assert.EqualValues(t, 1, graph.lastLedger) { + t.FailNow() + } + + eurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(9), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + otherEurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(10), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + + usdEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(11), + Buying: usdAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 3, + }, + Amount: xdr.Int64(500), + } + + graph.AddOffers(eurUsdOffer, otherEurUsdOffer, usdEurOffer) + graph.RemoveOffer(usdEurOffer.OfferId) + graph.RemoveOffer(otherEurUsdOffer.OfferId) + graph.RemoveOffer(dollarOffer.OfferId) + + if !assert.NoError(t, graph.Apply(2)) || + !assert.EqualValues(t, 2, graph.lastLedger) { + t.FailNow() + } + + assetStringToID := map[string]int32{} + idToAssetString := []string{} + for i, asset := range []xdr.Asset{ + nativeAsset, + usdAsset, + eurAsset, + } { + assetStringToID[asset.String()] = int32(i) + idToAssetString = append(idToAssetString, asset.String()) + } + expectedGraph := &OrderBookGraph{ + idToAssetString: idToAssetString, + assetStringToID: assetStringToID, + venuesForSellingAsset: []edgeSet{ + { + { + assetStringToID[usdAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer), + }, + { + assetStringToID[eurAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + }, + { + { + assetStringToID[eurAsset.String()], + makeVenues(eurUsdOffer), + }, + }, + {}, + }, + venuesForBuyingAsset: []edgeSet{ + {}, + { + { + assetStringToID[nativeAsset.String()], + makeVenues(quarterOffer, fiftyCentsOffer), + }, + }, + { + { + assetStringToID[nativeAsset.String()], + makeVenues(eurOffer, twoEurOffer, threeEurOffer), + }, + { + assetStringToID[usdAsset.String()], + makeVenues(eurUsdOffer), + }, + }, + }, + tradingPairForOffer: map[xdr.Int64]tradingPair{ + quarterOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + fiftyCentsOffer.OfferId: makeTradingPair(assetStringToID, usdAsset, nativeAsset), + eurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + twoEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + threeEurOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, nativeAsset), + eurUsdOffer.OfferId: makeTradingPair(assetStringToID, eurAsset, usdAsset), + }, + } + + assertGraphEquals(t, expectedGraph, graph) + + graph. + RemoveOffer(quarterOffer.OfferId). + RemoveOffer(fiftyCentsOffer.OfferId). + RemoveOffer(eurOffer.OfferId). + RemoveOffer(twoEurOffer.OfferId). + RemoveOffer(threeEurOffer.OfferId). + RemoveOffer(eurUsdOffer.OfferId) + + assert.NoError(t, graph.Apply(3)) + assert.EqualValues(t, 3, graph.lastLedger) + + // Skip over offer ids which are not present in the graph + assert.NoError(t, graph.RemoveOffer(988888).Apply(4)) + + expectedGraph.Clear() + assertGraphEquals(t, expectedGraph, graph) + assert.True(t, graph.IsEmpty()) +} + +func TestConsumeOffersForSellingAsset(t *testing.T) { + kp := keypair.MustRandom() + ignoreOffersFrom := xdr.MustAddress(kp.Address()) + otherSellerTwoEurOffer := twoEurOffer + otherSellerTwoEurOffer.SellerId = ignoreOffersFrom + + denominatorZeroOffer := twoEurOffer + denominatorZeroOffer.Price.D = 0 + + overflowOffer := twoEurOffer + overflowOffer.Amount = math.MaxInt64 + overflowOffer.Price.N = math.MaxInt32 + overflowOffer.Price.D = 1 + + for _, testCase := range []struct { + name string + offers []xdr.OfferEntry + ignoreOffersFrom *xdr.AccountId + currentAssetAmount xdr.Int64 + result xdr.Int64 + err error + }{ + { + "offers must not be empty", + []xdr.OfferEntry{}, + &issuer, + 100, + 0, + errEmptyOffers, + }, + { + "currentAssetAmount must be positive", + []xdr.OfferEntry{eurOffer}, + &ignoreOffersFrom, + 0, + 0, + errAssetAmountIsZero, + }, + { + "ignore all offers", + []xdr.OfferEntry{eurOffer}, + &issuer, + 1, + -1, + nil, + }, + { + "offer denominator cannot be zero", + []xdr.OfferEntry{denominatorZeroOffer}, + &ignoreOffersFrom, + 10000, + 0, + price.ErrDivisionByZero, + }, + { + "ignore some offers", + []xdr.OfferEntry{eurOffer, otherSellerTwoEurOffer}, + &issuer, + 100, + 200, + nil, + }, + { + "ignore overflow offers", + []xdr.OfferEntry{overflowOffer}, + nil, + math.MaxInt64, + -1, + nil, + }, + { + "not enough offers to consume", + []xdr.OfferEntry{eurOffer, twoEurOffer}, + nil, + 1001, + -1, + nil, + }, + { + "consume all offers", + []xdr.OfferEntry{eurOffer, twoEurOffer, threeEurOffer}, + nil, + 1500, + 3000, + nil, + }, + { + "consume offer partially", + []xdr.OfferEntry{eurOffer, twoEurOffer}, + nil, + 2, + 2, + nil, + }, + { + "round up", + []xdr.OfferEntry{quarterOffer}, + nil, + 5, + 2, + nil, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + result, err := consumeOffersForSellingAsset( + testCase.offers, + testCase.ignoreOffersFrom, + testCase.currentAssetAmount, + 0, + ) + if err != testCase.err { + t.Fatalf("expected error %v but got %v", testCase.err, err) + } + if err == nil { + if result != testCase.result { + t.Fatalf("expected %v but got %v", testCase.result, result) + } + } + }) + } + +} + +func TestConsumeOffersForBuyingAsset(t *testing.T) { + kp := keypair.MustRandom() + ignoreOffersFrom := xdr.MustAddress(kp.Address()) + otherSellerTwoEurOffer := twoEurOffer + otherSellerTwoEurOffer.SellerId = ignoreOffersFrom + + denominatorZeroOffer := twoEurOffer + denominatorZeroOffer.Price.D = 0 + + overflowOffer := twoEurOffer + overflowOffer.Price.N = 1 + overflowOffer.Price.D = math.MaxInt32 + + for _, testCase := range []struct { + name string + offers []xdr.OfferEntry + currentAssetAmount xdr.Int64 + result xdr.Int64 + err error + }{ + { + "offers must not be empty", + []xdr.OfferEntry{}, + 100, + 0, + errEmptyOffers, + }, + { + "currentAssetAmount must be positive", + []xdr.OfferEntry{eurOffer}, + 0, + 0, + errAssetAmountIsZero, + }, + { + "offer denominator cannot be zero", + []xdr.OfferEntry{denominatorZeroOffer}, + 10000, + -1, + nil, + }, + { + "balance too low to consume offers", + []xdr.OfferEntry{twoEurOffer}, + 1, + -1, + nil, + }, + { + "not enough offers to consume", + []xdr.OfferEntry{eurOffer, twoEurOffer}, + 1502, + -1, + nil, + }, + { + "ignore overflow offers", + []xdr.OfferEntry{overflowOffer}, + math.MaxInt64, + -1, + nil, + }, + { + "consume all offers", + []xdr.OfferEntry{eurOffer, twoEurOffer, threeEurOffer}, + 3000, + 1500, + nil, + }, + { + "consume offer partially", + []xdr.OfferEntry{eurOffer, twoEurOffer}, + 2, + 2, + nil, + }, + { + "round down", + []xdr.OfferEntry{eurOffer, twoEurOffer}, + 1501, + 1000, + nil, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + result, err := consumeOffersForBuyingAsset( + testCase.offers, + testCase.currentAssetAmount, + ) + assert.Equal(t, testCase.err, err) + if err == nil { + assert.Equal(t, testCase.result, result) + } + }) + } + +} + +func TestSortAndFilterPathsBySourceAsset(t *testing.T) { + allPaths := []Path{ + { + SourceAmount: 3, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 4, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 1, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 2, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 2, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{ + nativeAsset.String(), + }, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 10, + SourceAsset: nativeAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + } + sortedAndFiltered, err := sortAndFilterPaths( + allPaths, + 3, + sortBySourceAsset, + ) + assert.NoError(t, err) + + expectedPaths := []Path{ + { + SourceAmount: 2, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 2, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{ + nativeAsset.String(), + }, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 3, + SourceAsset: eurAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 1, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + { + SourceAmount: 10, + SourceAsset: nativeAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 1000, + }, + } + + assertPathEquals(t, sortedAndFiltered, expectedPaths) +} + +func TestSortAndFilterPathsByDestinationAsset(t *testing.T) { + allPaths := []Path{ + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 3, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 4, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: usdAsset.String(), + DestinationAmount: 1, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 2, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + nativeAsset.String(), + }, + DestinationAsset: eurAsset.String(), + DestinationAmount: 2, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 10, + }, + } + sortedAndFiltered, err := sortAndFilterPaths( + allPaths, + 3, + sortByDestinationAsset, + ) + assert.NoError(t, err) + + expectedPaths := []Path{ + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 4, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 3, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: eurAsset.String(), + DestinationAmount: 2, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: usdAsset.String(), + DestinationAmount: 1, + }, + { + SourceAmount: 1000, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 10, + }, + } + + assertPathEquals(t, sortedAndFiltered, expectedPaths) +} + +func TestFindPaths(t *testing.T) { + graph := NewOrderBookGraph() + + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + if !assert.NoError(t, graph.Apply(1)) { + t.FailNow() + } + + eurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(9), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + otherEurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(10), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + + usdEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(11), + Buying: usdAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 3, + }, + Amount: xdr.Int64(500), + } + + chfEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(12), + Buying: chfAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Amount: xdr.Int64(500), + } + + yenChfOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(13), + Buying: yenAsset, + Selling: chfAsset, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Amount: xdr.Int64(500), + } + + graph.AddOffers(eurUsdOffer, otherEurUsdOffer, usdEurOffer, chfEurOffer, yenChfOffer) + if !assert.NoError(t, graph.Apply(2)) { + t.FailNow() + } + + kp := keypair.MustRandom() + ignoreOffersFrom := xdr.MustAddress(kp.Address()) + + paths, lastLedger, err := graph.FindPaths( + context.TODO(), + 3, + nativeAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + }, + []xdr.Int64{ + 0, + 0, + }, + true, + 5, + true, + ) + assert.NoError(t, err) + assertPathEquals(t, paths, []Path{}) + assert.EqualValues(t, 2, lastLedger) + + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 3, + nativeAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + }, + []xdr.Int64{ + 100000, + 60000, + }, + true, + 5, + true, + ) + + expectedPaths := []Path{ + { + SourceAmount: 5, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + eurAsset.String(), + chfAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 3, + nativeAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + }, + []xdr.Int64{ + 0, + 0, + }, + false, + 5, + true, + ) + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 4, + nativeAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + }, + []xdr.Int64{ + 100000, + 60000, + }, + true, + 5, + true, + ) + + expectedPaths = []Path{ + { + SourceAmount: 5, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + { + SourceAmount: 2, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + usdAsset.String(), + eurAsset.String(), + chfAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + eurAsset.String(), + chfAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 4, + nativeAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + nativeAsset, + }, + []xdr.Int64{ + 100000, + 60000, + 100000, + }, + true, + 5, + true, + ) + + expectedPaths = []Path{ + { + SourceAmount: 5, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + { + SourceAmount: 2, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + usdAsset.String(), + eurAsset.String(), + chfAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + eurAsset.String(), + chfAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + // include the empty path where xlm is transferred without any + // conversions + { + SourceAmount: 20, + SourceAsset: nativeAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assertPathEquals(t, paths, expectedPaths) + + t.Run("find paths starting from non-existent asset", func(t *testing.T) { + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 4, + xdr.MustNewCreditAsset("DNE", yenAsset.GetIssuer()), + 20, + &ignoreOffersFrom, + []xdr.Asset{ + yenAsset, + usdAsset, + }, + []xdr.Int64{ + 100000, + 60000, + }, + false, + 5, + true, + ) + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assert.Len(t, paths, 0) + }) + + t.Run("find paths ending at non-existent assets", func(t *testing.T) { + paths, lastLedger, err = graph.FindPaths( + context.TODO(), + 4, + usdAsset, + 20, + &ignoreOffersFrom, + []xdr.Asset{ + xdr.MustNewCreditAsset("DNE", yenAsset.GetIssuer()), + }, + []xdr.Int64{ + 1000000000, + }, + false, + 5, + true, + ) + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assert.Len(t, paths, 0) + }) +} + +func TestFindPathsStartingAt(t *testing.T) { + graph := NewOrderBookGraph() + + graph.AddOffers(dollarOffer, threeEurOffer, eurOffer, twoEurOffer, + quarterOffer, fiftyCentsOffer) + + err := graph.Apply(1) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + eurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(9), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Amount: xdr.Int64(500), + } + otherEurUsdOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(10), + Buying: eurAsset, + Selling: usdAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Amount: xdr.Int64(500), + } + + usdEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(11), + Buying: usdAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 3, + }, + Amount: xdr.Int64(500), + } + + chfEurOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(12), + Buying: chfAsset, + Selling: eurAsset, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Amount: xdr.Int64(500), + } + + yenChfOffer := xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(13), + Buying: yenAsset, + Selling: chfAsset, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Amount: xdr.Int64(500), + } + + graph.AddOffers(eurUsdOffer, otherEurUsdOffer, usdEurOffer, chfEurOffer, yenChfOffer) + err = graph.Apply(2) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + paths, lastLedger, err := graph.FindFixedPaths( + context.TODO(), + 3, + usdAsset, + 5, + []xdr.Asset{nativeAsset}, + 5, + true, + ) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, lastLedger) + } + + expectedPaths := []Path{ + { + SourceAmount: 5, + SourceAsset: usdAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 2, + yenAsset, + 5, + []xdr.Asset{nativeAsset}, + 5, + true, + ) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, lastLedger) + } + + expectedPaths = []Path{} + + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 3, + yenAsset, + 5, + []xdr.Asset{nativeAsset}, + 5, + true, + ) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, lastLedger) + } + + expectedPaths = []Path{ + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 5, + yenAsset, + 5, + []xdr.Asset{nativeAsset}, + 5, + true, + ) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, lastLedger) + } + + expectedPaths = []Path{ + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + usdAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 80, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + + assertPathEquals(t, paths, expectedPaths) + + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 5, + yenAsset, + 5, + []xdr.Asset{nativeAsset, usdAsset, yenAsset}, + 5, + true, + ) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if lastLedger != 2 { + t.Fatalf("expected last ledger to be %v but got %v", 2, lastLedger) + } + + expectedPaths = []Path{ + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + }, + DestinationAsset: usdAsset.String(), + DestinationAmount: 20, + }, + // include the empty path where yen is transferred without any + // conversions + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{}, + DestinationAsset: yenAsset.String(), + DestinationAmount: 5, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + usdAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 80, + }, + { + SourceAmount: 5, + SourceAsset: yenAsset.String(), + InteriorNodes: []string{ + chfAsset.String(), + eurAsset.String(), + }, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 20, + }, + } + assertPathEquals(t, paths, expectedPaths) + + t.Run("find fixed paths starting from non-existent asset", func(t *testing.T) { + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 5, + xdr.MustNewCreditAsset("DNE", yenAsset.GetIssuer()), + 5, + []xdr.Asset{nativeAsset, usdAsset}, + 5, + true, + ) + + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assert.Len(t, paths, 0) + }) + + t.Run("find fixed paths ending at non-existent assets", func(t *testing.T) { + paths, lastLedger, err = graph.FindFixedPaths( + context.TODO(), + 5, + usdAsset, + 5, + []xdr.Asset{xdr.MustNewCreditAsset("DNE", yenAsset.GetIssuer())}, + 5, + true, + ) + + assert.NoError(t, err) + assert.EqualValues(t, 2, lastLedger) + assert.Len(t, paths, 0) + }) +} + +func TestPathThroughLiquidityPools(t *testing.T) { + graph := NewOrderBookGraph() + graph.AddLiquidityPools(eurUsdLiquidityPool) + graph.AddLiquidityPools(eurYenLiquidityPool) + graph.AddLiquidityPools(usdChfLiquidityPool) + if !assert.NoErrorf(t, graph.Apply(1), "applying LPs to graph failed") { + t.FailNow() + } + + kp := keypair.MustRandom() + fakeSource := xdr.MustAddress(kp.Address()) + + t.Run("happy path", func(t *testing.T) { + paths, _, err := graph.FindPaths( + context.TODO(), + 5, // more than enough hops + yenAsset, // path should go USD -> EUR -> Yen + 100, // less than LP reserves for either pool + &fakeSource, // fake source account to ignore pools from + []xdr.Asset{usdAsset}, + []xdr.Int64{127}, // we only exactly the right amount of $ to trade + true, + 5, // irrelevant + true, + ) + + // The path should go USD -> EUR -> Yen, jumping through both liquidity + // pools. For a payout of 100 Yen from the EUR/Yen pool, we need to + // exchange 112 Euros. To get 112 EUR, we need to exchange 127 USD. + expectedPaths := []Path{ + { + SourceAsset: usdAsset.String(), + SourceAmount: 127, + DestinationAsset: yenAsset.String(), + DestinationAmount: 100, + InteriorNodes: []string{eurAsset.String()}, + }, + } + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) + }) + + t.Run("exclude pools", func(t *testing.T) { + paths, _, err := graph.FindPaths( + context.TODO(), + 5, // more than enough hops + yenAsset, // path should go USD -> EUR -> Yen + 100, // less than LP reserves for either pool + &fakeSource, // fake source account to ignore pools from + []xdr.Asset{usdAsset}, + []xdr.Int64{127}, // we only exactly the right amount of $ to trade + true, + 5, // irrelevant + false, + ) + + assert.NoError(t, err) + assert.Empty(t, paths) + }) + + t.Run("not enough source balance", func(t *testing.T) { + paths, _, err := graph.FindPaths(context.TODO(), + 5, yenAsset, 100, &fakeSource, []xdr.Asset{usdAsset}, + []xdr.Int64{126}, // the only change: we're short on balance now + true, 5, + true, + ) + + assert.NoError(t, err) + assertPathEquals(t, []Path{}, paths) + }) + + t.Run("more hops", func(t *testing.T) { + // The conversion rate is different this time: one more more hop means + // one more exchange rate to deal with. + paths, _, err := graph.FindPaths(context.TODO(), + 5, + yenAsset, // different path: CHF -> USD -> EUR -> Yen + 100, + &fakeSource, + []xdr.Asset{chfAsset}, + []xdr.Int64{73}, + true, + 5, + true, + ) + + expectedPaths := []Path{{ + SourceAsset: chfAsset.String(), + SourceAmount: 73, + DestinationAsset: yenAsset.String(), + DestinationAmount: 100, + InteriorNodes: []string{usdAsset.String(), eurAsset.String()}, + }} + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) + }) +} + +func TestInterleavedPaths(t *testing.T) { + graph := NewOrderBookGraph() + graph.AddLiquidityPools(nativeUsdPool, eurUsdLiquidityPool, usdChfLiquidityPool) + if !assert.NoError(t, graph.Apply(1)) { + t.FailNow() + } + + graph.AddOffers(xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(42), + Selling: nativeAsset, + Buying: eurAsset, + Amount: 100, + Price: xdr.Price{1, 1}, + }, xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(43), + Selling: chfAsset, + Buying: usdAsset, + Amount: 1, + Price: xdr.Price{1, 1}, + }) + if !assert.NoError(t, graph.Apply(2)) { + t.FailNow() + } + + kp := keypair.MustRandom() + fakeSource := xdr.MustAddress(kp.Address()) + + // The final graph looks like the following: + // + // - XLM: Offer 100 for 1 EUR each + // LP for USD, 50:1 + // + // - EUR: LP for USD, 1:1 + // + // - USD: LP for EUR, 1:1 + // LP for XLM, 1:4 + // LP for CHF, 2:1 + // + // - CHF: Offer 1 for 4 USD each + // LP for USD, 1:2 + + paths, _, err := graph.FindPaths(context.TODO(), + 5, + nativeAsset, + 100, + &fakeSource, + []xdr.Asset{chfAsset}, + []xdr.Int64{1000}, + true, + 5, + true, + ) + + // There should be two paths: one that consumes the EUR/XLM offers and one + // that goes through the USD/XLM liquidity pool. + // + // If we take up the offers, it's very efficient: + // 64 CHF for 166 USD for 142 EUR for 100 XLM + // + // If we only go through pools, it's less-so: + // 90 CHF for 152 USD for 100 XLM + expectedPaths := []Path{{ + SourceAsset: chfAsset.String(), + SourceAmount: 64, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 100, + InteriorNodes: []string{usdAsset.String(), eurAsset.String()}, + }, { + SourceAsset: chfAsset.String(), + SourceAmount: 90, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 100, + InteriorNodes: []string{usdAsset.String()}, + }} + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) + + // If we ask for more than the offer can handle, though, it should only go + // through the LPs, not some sort of mix of the two: + paths, _, err = graph.FindPaths(context.TODO(), 5, + nativeAsset, 101, // only change: more than the offer has + &fakeSource, []xdr.Asset{chfAsset}, []xdr.Int64{1000}, + true, 5, true, + ) + + expectedPaths = []Path{{ + SourceAsset: chfAsset.String(), + SourceAmount: 96, + DestinationAsset: nativeAsset.String(), + DestinationAmount: 101, + InteriorNodes: []string{usdAsset.String()}, + }} + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) + + t.Run("without pools", func(t *testing.T) { + paths, _, err = graph.FindPaths(context.TODO(), 5, + nativeAsset, 100, &fakeSource, + []xdr.Asset{chfAsset}, []xdr.Int64{1000}, true, 5, + false, // only change: no pools + ) + assert.NoError(t, err) + + onlyOffersGraph := NewOrderBookGraph() + onlyOffersGraph.AddOffers(graph.Offers()...) + if !assert.NoError(t, onlyOffersGraph.Apply(2)) { + t.FailNow() + } + expectedPaths, _, err = onlyOffersGraph.FindPaths(context.TODO(), 5, + nativeAsset, 100, &fakeSource, + []xdr.Asset{chfAsset}, []xdr.Int64{1000}, true, 5, + true, + ) + assert.NoError(t, err) + + assertPathEquals(t, expectedPaths, paths) + }) +} + +func TestInterleavedFixedPaths(t *testing.T) { + graph := NewOrderBookGraph() + graph.AddLiquidityPools(nativeUsdPool, nativeEurPool, + eurUsdLiquidityPool, usdChfLiquidityPool) + if !assert.NoErrorf(t, graph.Apply(1), "applying LPs to graph failed") { + t.FailNow() + } + graph.AddOffers(xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(42), + Selling: eurAsset, + Buying: nativeAsset, + Amount: 10, + Price: xdr.Price{1, 1}, + }, xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(43), + Selling: chfAsset, + Buying: usdAsset, + Amount: 1, + Price: xdr.Price{1, 1}, + }) + if !assert.NoErrorf(t, graph.Apply(2), "applying offers to graph failed") { + t.FailNow() + } + + paths, _, err := graph.FindFixedPaths(context.TODO(), + 5, + nativeAsset, + 1234, + []xdr.Asset{chfAsset}, + 5, + true, + ) + + expectedPaths := []Path{ + { + SourceAsset: nativeAsset.String(), + SourceAmount: 1234, + DestinationAsset: chfAsset.String(), + DestinationAmount: 13, + InteriorNodes: []string{usdAsset.String()}, + }, + } + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) + + paths, _, err = graph.FindFixedPaths(context.TODO(), + 5, + nativeAsset, + 1234, + []xdr.Asset{chfAsset}, + 5, + false, + ) + assert.NoError(t, err) + + onlyOffersGraph := NewOrderBookGraph() + for _, offer := range graph.Offers() { + onlyOffersGraph.addOffer(offer) + } + if !assert.NoErrorf(t, onlyOffersGraph.Apply(2), "applying offers to graph failed") { + t.FailNow() + } + expectedPaths, _, err = onlyOffersGraph.FindFixedPaths(context.TODO(), + 5, + nativeAsset, + 1234, + []xdr.Asset{chfAsset}, + 5, + true, + ) + + assert.NoError(t, err) + assertPathEquals(t, expectedPaths, paths) +} + +func TestRepro(t *testing.T) { + // A reproduction of the bug report: + // https://github.com/stellar/go/issues/4014 + usdc := xdr.MustNewCreditAsset("USDC", "GAEB3HSAWRVILER6T5NMX5VAPTK4PPO2BAL37HR2EOUIK567GJFEO437") + eurt := xdr.MustNewCreditAsset("EURT", "GABHG6C7YL2WA2ZJSONPD6ZBWLPAWKYDPYMK6BQRFLZXPQE7IBSTMPNN") + + ybx := xdr.MustNewCreditAsset("YBX", "GCIWMQHPST7LQ7V4LHAF2UP6ZSDCFRYYP7IM4BBAFSBZMVTR3BB4OQZ5") + btc := xdr.MustNewCreditAsset("BTC", "GA2RETJWNREEUY4JHMZVXCE6EJG6MGBUEXK2QXXMNE5EYAQMG22XCXHA") + eth := xdr.MustNewCreditAsset("ETH", "GATPY6X6OYTXKNRKVP6LEMUUQKFDUW5P7HL4XI3KWRCY52RAWYJ5FLMC") + + usdcYbxPool := makePool(usdc, ybx, 115066115, 9133346) + eurtYbxPool := makePool(eurt, ybx, 871648100, 115067) + btcYbxPool := makePool(btc, ybx, 453280, 19884933) + ethYbxPool := makePool(eth, ybx, 900000, 10000000) + usdcForBtcOffer := xdr.OfferEntry{ + OfferId: 42, + Selling: usdc, + Buying: btc, + Amount: 1000000000000000, + Price: xdr.Price{N: 81, D: 5000000}, + } + + graph := NewOrderBookGraph() + graph.AddLiquidityPools(usdcYbxPool, eurtYbxPool, btcYbxPool, ethYbxPool) + graph.AddOffers(usdcForBtcOffer) + if !assert.NoError(t, graph.Apply(2)) { + t.FailNow() + } + + // get me 70000.0000000 USDC if I have some ETH + paths, _, err := graph.FindPaths(context.TODO(), 5, + usdc, 700000000000, nil, []xdr.Asset{eth}, []xdr.Int64{0}, + false, 5, true, + ) + + assert.NoError(t, err) + assertPathEquals(t, []Path{}, paths) + // can't, because BTC/YBX pool is too small +} + +func makeVenues(offers ...xdr.OfferEntry) Venues { + return Venues{offers: offers} +} + +func makeTradingPair(assetStringToID map[string]int32, buying, selling xdr.Asset) tradingPair { + return tradingPair{ + buyingAsset: assetStringToID[buying.String()], + sellingAsset: assetStringToID[selling.String()], + } +} + +func makePool(A, B xdr.Asset, a, b xdr.Int64) xdr.LiquidityPoolEntry { + if !A.LessThan(B) { + B, A = A, B + b, a = a, b + } + + poolId, _ := xdr.NewPoolId(A, B, xdr.LiquidityPoolFeeV18) + return xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolId, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: A, + AssetB: B, + Fee: xdr.LiquidityPoolFeeV18, + }, + ReserveA: a, + ReserveB: b, + TotalPoolShares: 123, + PoolSharesTrustLineCount: 456, + }, + }, + } +} + +func getCode(asset xdr.Asset) string { + code := asset.GetCode() + if code == "" { + return "xlm" + } + return code +} diff --git a/exp/orderbook/pools.go b/exp/orderbook/pools.go new file mode 100644 index 0000000000..766401771b --- /dev/null +++ b/exp/orderbook/pools.go @@ -0,0 +1,171 @@ +package orderbook + +import ( + "math" + + "github.com/holiman/uint256" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// There are two different exchanges that can be simulated: +// +// 1. You know how much you can *give* to the pool, and are curious about the +// resulting payout. We call this a "deposit", and you should pass +// tradeTypeDeposit. +// +// 2. You know how much you'd like to *receive* from the pool, and want to know +// how much to deposit to achieve this. We call this an "expectation", and you +// should pass tradeTypeExpectation. +const ( + tradeTypeDeposit = iota // deposit into pool, what's the payout? + tradeTypeExpectation = iota // expect payout, what to deposit? +) + +var ( + errPoolOverflows = errors.New("Liquidity pool overflows from this exchange") + errBadPoolType = errors.New("Unsupported liquidity pool: must be ConstantProduct") + errBadTradeType = errors.New("Unknown pool exchange type requested") + errBadAmount = errors.New("Exchange amount must be positive") +) + +// makeTrade simulates execution of an exchange with a liquidity pool. +// +// In (1), this returns the amount that would be paid out by the pool (in terms +// of the *other* asset) for depositing `amount` of `asset`. +// +// In (2), this returns the amount of `asset` you'd need to deposit to get +// `amount` of the *other* asset in return. +// +// Refer to https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md#pathpaymentstrictsendop-and-pathpaymentstrictreceiveop +// and the calculation functions (below) for details on the exchange algorithm. +// +// Warning: If you pass an asset that is NOT one of the pool reserves, the +// behavior of this function is undefined (for performance). +func makeTrade( + pool liquidityPool, + asset int32, + tradeType int, + amount xdr.Int64, +) (xdr.Int64, error) { + details, ok := pool.Body.GetConstantProduct() + if !ok { + return 0, errBadPoolType + } + + if amount <= 0 { + return 0, errBadAmount + } + + // determine which asset `amount` corresponds to + X, Y := details.ReserveA, details.ReserveB + if pool.assetA != asset { + X, Y = Y, X + } + + ok = false + var result xdr.Int64 + switch tradeType { + case tradeTypeDeposit: + result, ok = calculatePoolPayout(X, Y, amount, details.Params.Fee) + + case tradeTypeExpectation: + result, ok = calculatePoolExpectation(X, Y, amount, details.Params.Fee) + + default: + return 0, errBadTradeType + } + + if !ok { + // the error isn't strictly accurate (e.g. it could be div-by-0), but + // from the caller's perspective it's true enough + return 0, errPoolOverflows + } + return result, nil +} + +// calculatePoolPayout calculates the amount of `reserveB` disbursed from the +// pool for a `received` amount of `reserveA` . From CAP-38: +// +// y = floor[(1 - F) Yx / (X + x - Fx)] +// +// It returns false if the calculation overflows. +func calculatePoolPayout(reserveA, reserveB, received xdr.Int64, feeBips xdr.Int32) (xdr.Int64, bool) { + X, Y := uint256.NewInt(uint64(reserveA)), uint256.NewInt(uint64(reserveB)) + F, x := uint256.NewInt(uint64(feeBips)), uint256.NewInt(uint64(received)) + + // would this deposit overflow the reserve? + if received > math.MaxInt64-reserveA { + return 0, false + } + + // We do all of the math in bips, so it's all upscaled by this value. + maxBips := uint256.NewInt(10000) + f := new(uint256.Int).Sub(maxBips, F) // upscaled 1 - F + + // right half: X + (1 - F)x + denom := X.Mul(X, maxBips).Add(X, new(uint256.Int).Mul(x, f)) + if denom.IsZero() { // avoid div-by-zero panic + return 0, false + } + + // left half, a: (1 - F) Yx + numer := Y.Mul(Y, x).Mul(Y, f) + + // divide & check overflow + result := numer.Div(numer, denom) + + val := xdr.Int64(result.Uint64()) + return val, result.IsUint64() && val >= 0 +} + +// calculatePoolExpectation determines how much of `reserveA` you would need to +// put into a pool to get the `disbursed` amount of `reserveB`. +// +// x = ceil[Xy / ((Y - y)(1 - F))] +// +// It returns false if the calculation overflows. +func calculatePoolExpectation( + reserveA, reserveB, disbursed xdr.Int64, feeBips xdr.Int32, +) (xdr.Int64, bool) { + X, Y := uint256.NewInt(uint64(reserveA)), uint256.NewInt(uint64(reserveB)) + F, y := uint256.NewInt(uint64(feeBips)), uint256.NewInt(uint64(disbursed)) + + // sanity check: disbursing shouldn't underflow the reserve + if disbursed >= reserveB { + return 0, false + } + + // We do all of the math in bips, so it's all upscaled by this value. + maxBips := uint256.NewInt(10000) + f := new(uint256.Int).Sub(maxBips, F) // upscaled 1 - F + + denom := Y.Sub(Y, y).Mul(Y, f) // right half: (Y - y)(1 - F) + if denom.IsZero() { // avoid div-by-zero panic + return 0, false + } + + numer := X.Mul(X, y).Mul(X, maxBips) // left half: Xy + + result, rem := new(uint256.Int), new(uint256.Int) + result.Div(numer, denom) + rem.Mod(numer, denom) + + // hacky way to ceil(): if there's a remainder, add 1 + if !rem.IsZero() { + result.AddUint64(result, 1) + } + + val := xdr.Int64(result.Uint64()) + return val, result.IsUint64() && val >= 0 +} + +// getOtherAsset returns the other asset in the liquidity pool. Note that +// doesn't check to make sure the passed in `asset` is actually part of the +// pool; behavior in that case is undefined. +func getOtherAsset(asset int32, pool liquidityPool) int32 { + if pool.assetA == asset { + return pool.assetB + } + return pool.assetA +} diff --git a/exp/orderbook/pools_test.go b/exp/orderbook/pools_test.go new file mode 100644 index 0000000000..67d56749d9 --- /dev/null +++ b/exp/orderbook/pools_test.go @@ -0,0 +1,324 @@ +package orderbook + +import ( + "math" + "math/big" + "math/rand" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestLiquidityPoolExchanges(t *testing.T) { + graph := NewOrderBookGraph() + for _, poolEntry := range []xdr.LiquidityPoolEntry{ + eurUsdLiquidityPool, + eurYenLiquidityPool, + nativeUsdPool, + usdChfLiquidityPool, + } { + params := poolEntry.Body.MustConstantProduct().Params + graph.getOrCreateAssetID(params.AssetA) + graph.getOrCreateAssetID(params.AssetB) + } + t.Run("happy path", func(t *testing.T) { + for _, assetXDR := range []xdr.Asset{usdAsset, eurAsset} { + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(assetXDR) + payout, err := makeTrade(pool, asset, tradeTypeDeposit, 500) + assert.NoError(t, err) + assert.EqualValues(t, 332, int64(payout)) + // reserves would now be: 668 of A, 1500 of B + // note pool object is unchanged so looping is safe + } + + for _, assetXDR := range []xdr.Asset{usdAsset, eurAsset} { + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(assetXDR) + payout, err := makeTrade(pool, asset, tradeTypeExpectation, 332) + assert.NoError(t, err) + assert.EqualValues(t, 499, int64(payout)) + } + + // More sanity checks; if they fail, something was changed about how + // constant product liquidity pools work. + // + // We use these oddly-specific values because we rely on them again to + // validate paths in later tests. + testTable := []struct { + dstAsset xdr.Asset + pool xdr.LiquidityPoolEntry + expectedPayout xdr.Int64 + expectedInput xdr.Int64 + }{ + {yenAsset, eurYenLiquidityPool, 100, 112}, + {eurAsset, eurUsdLiquidityPool, 112, 127}, + {nativeAsset, nativeUsdPool, 5, 25}, + {usdAsset, usdChfLiquidityPool, 127, 342}, + {usdAsset, usdChfLiquidityPool, 27, 58}, + } + + for _, test := range testTable { + pool := graph.poolFromEntry(test.pool) + asset := graph.getOrCreateAssetID(test.dstAsset) + needed, err := makeTrade(pool, asset, tradeTypeExpectation, test.expectedPayout) + + assert.NoError(t, err) + assert.EqualValuesf(t, test.expectedInput, needed, + "expected exchange of %d %s -> %d %s, got %d", + test.expectedInput, graph.idToAssetString[getOtherAsset(asset, pool)], + test.expectedPayout, getCode(test.dstAsset), + needed) + } + }) + + t.Run("fail on bad exchange amounts", func(t *testing.T) { + badValues := []xdr.Int64{math.MaxInt64, math.MaxInt64 - 99, 0, -100} + for _, badValue := range badValues { + pool := graph.poolFromEntry(eurUsdLiquidityPool) + asset := graph.getOrCreateAssetID(usdAsset) + + _, err := makeTrade(pool, asset, tradeTypeDeposit, badValue) + assert.Error(t, err) + } + }) +} + +// TestLiquidityPoolMath is a robust suite of tests to ensure that theliquidity +// pool calculation functions are correct, taken from Stellar Core's tests here: +// +// https://github.com/stellar/stellar-core/blob/master/src/transactions/test/ExchangeTests.cpp#L948 +func TestLiquidityPoolMath(t *testing.T) { + iMax := xdr.Int64(math.MaxInt64) + send, recv := tradeTypeDeposit, tradeTypeExpectation + + t.Run("deposit edge cases", func(t *testing.T) { + // Sending deposit would overflow the reserve: + // low reserves but high deposit + assertPoolExchange(t, send, 100, iMax-100, 100, -1, 0, true, -1, 99) + assertPoolExchange(t, send, 100, iMax-99, 100, -1, 0, false, -1, -1) + + // high reserves but low deposit + assertPoolExchange(t, send, iMax-100, 100, iMax-100, -1, 0, true, -1, 99) + assertPoolExchange(t, send, iMax-99, 100, iMax-100, -1, 0, false, -1, -1) + + // fromPool = 0 + assertPoolExchange(t, send, 100, 2, 100, -1, 0, true, -1, 1) + assertPoolExchange(t, send, 100, 1, 100, -1, 0, false, -1, -1) + }) + + t.Run("disburse edge cases", func(t *testing.T) { + // Receiving maxReceiveFromPool would deplete the reserves entirely: + // low reserves and low maxReceive + assertPoolExchange(t, recv, 100, -1, 100, 99, 0, true, 9900, -1) + assertPoolExchange(t, recv, 100, -1, 100, 100, 0, false, -1, -1) + + // high reserves and high maxReceive + assertPoolExchange(t, recv, 100, -1, iMax/100, iMax/100-1, 0, true, iMax-107, -1) + assertPoolExchange(t, recv, 100, -1, iMax/100, iMax/100, 0, false, -1, -1) + + // If + // fromPool = k*(maxBps - feeBps) and + // reservesFromPool = fromPool + 1 + // then + // (reservesToPool * fromPool) / (maxBps - feeBps) + // = k * reservesToPool + // so if k = 101 and reservesToPool = iMax / 100 then B / C > iMax during division + assertPoolExchange(t, recv, iMax/100, -1, 101*10000+1, 101*10000, 0, false, -1, -1) + + // If + // fromPool = maxBps - feeBps and + // reservesFromPool = fromPool + 1 + // then + // toPool = maxBps * reservesToPool + // so if reservesToPool = iMax / 100 then the division overflows. + assertPoolExchange(t, recv, iMax/100, -1, 10000+1, 10000, 0, false, -1, -1) + + // Pool receives more than it has available reserves for + assertPoolExchange(t, recv, iMax-100, -1, iMax/2, 49, 0, true, 98, -1) + assertPoolExchange(t, recv, iMax-100, -1, iMax/2, 50, 0, false, -1, -1) + }) + + t.Run("No fees", func(t *testing.T) { + // Deposits + assertPoolExchange(t, send, 100, 100, 100, -1, 0, true, -1, 50) // work exactly + assertPoolExchange(t, send, 100, 50, 100, -1, 0, true, -1, 33) // require sending + assertPoolExchange(t, send, 100, 0, 100, -1, 0, false, -1, -1) // sending 0 + assertPoolExchange(t, send, 100, iMax-99, 100, -1, 0, false, -1, -1) // sending too much + + // Disburses + assertPoolExchange(t, recv, 100, -1, 100, 50, 0, true, 100, -1) // work exactly + assertPoolExchange(t, recv, 100, -1, 100, 33, 0, true, 50, -1) // require recving + assertPoolExchange(t, recv, 100, -1, 100, 0, 0, true, 0, -1) // receiving 0 + assertPoolExchange(t, recv, 100, -1, 100, 100, 0, false, -1, -1) // receiving too much + }) + + // These test cases look weird because they actually charge 31 bps instead + // of 30 bps. But this is expected, because you pay fees on the fees you + // provided: I want to send 10000 after fees, so I send 100030.... but that + // doesn't work because 0.997 * 10030 = 9999.910 is too low. + t.Run("30 bps fee actually charges 30 bps", func(t *testing.T) { + + // With no fee, sending 10000 would receive 10000. So to receive + // 1000 we need to send ceil(10000 / 0.997) = 10031. + assertPoolExchange(t, send, 10000, 10031, 20000, -1, 30, true, -1, 10000) + + // With no fee, sending 10000 would receive 10000. So to send + // ceil(10000 / 0.997) = 10031 we need to receive 10000. + assertPoolExchange(t, recv, 10000, -1, 20000, 10000, 30, true, 10031, -1) + }) + + t.Run("Potential Internal Overflow", func(t *testing.T) { + + // Test for internal uint128 underflow/overflow in calculatePoolPayout() and calculatePoolExpectation() by providing + // input values which cause the maximum internal calculations + + assertPoolExchange(t, send, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, false, 0, 0) + assertPoolExchange(t, send, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, false, 0, 0) + assertPoolExchange(t, recv, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, 0, false, 0, 0) + + // Check with reserveB < disbursed + assertPoolExchange(t, recv, math.MaxInt64, math.MaxInt64, 0, 1, 0, false, 0, 0) + + // Check with poolFeeBips > 10000 + assertPoolExchange(t, send, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, 10001, false, 0, 0) + assertPoolExchange(t, recv, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, 10010, false, 0, 0) + + assertPoolExchange(t, send, 92017260901926686, 9157376027422527, 4000000000000000000, 30, 1, false, 0, 0) + }) +} + +// assertPoolExchange validates that pool inputs match their expected outputs. +func assertPoolExchange(t *testing.T, + exchangeType int, + reservesBeingDeposited, deposited xdr.Int64, + reservesBeingDisbursed, disbursed xdr.Int64, + poolFeeBips xdr.Int32, + expectedReturn bool, expectedDeposited, expectedDisbursed xdr.Int64, +) { + var ok bool + toPool, fromPool := xdr.Int64(-1), xdr.Int64(-1) + + switch exchangeType { + case tradeTypeDeposit: + fromPool, ok = calculatePoolPayout( + reservesBeingDeposited, reservesBeingDisbursed, + deposited, poolFeeBips) + + case tradeTypeExpectation: + toPool, ok = calculatePoolExpectation( + reservesBeingDeposited, reservesBeingDisbursed, + disbursed, poolFeeBips) + + default: + t.FailNow() + } + + if expectedReturn && assert.Equal(t, expectedReturn, ok, "wrong exchange success state") { + assert.EqualValues(t, expectedDisbursed, fromPool, "wrong payout") + assert.EqualValues(t, expectedDeposited, toPool, "wrong expectation") + } +} + +func TestCalculatePoolExpectations(t *testing.T) { + for i := 0; i < 1000000; i++ { + reserveA := xdr.Int64(rand.Int63()) + reserveB := xdr.Int64(rand.Int63()) + disbursed := xdr.Int64(rand.Int63()) + + result, ok := calculatePoolExpectationBig(reserveA, reserveB, disbursed, 30) + result1, ok1 := calculatePoolExpectation(reserveA, reserveB, disbursed, 30) + if assert.Equal(t, ok, ok1) { + assert.Equal(t, result, result1) + } + } +} + +func TestCalculatePoolPayout(t *testing.T) { + for i := 0; i < 1000000; i++ { + reserveA := xdr.Int64(rand.Int63()) + reserveB := xdr.Int64(rand.Int63()) + received := xdr.Int64(rand.Int63()) + + result, ok := calculatePoolPayoutBig(reserveA, reserveB, received, 30) + result1, ok1 := calculatePoolPayout(reserveA, reserveB, received, 30) + if assert.Equal(t, ok, ok1) { + assert.Equal(t, result, result1) + } + } +} + +// calculatePoolPayout calculates the amount of `reserveB` disbursed from the +// pool for a `received` amount of `reserveA` . From CAP-38: +// +// y = floor[(1 - F) Yx / (X + x - Fx)] +// +// It returns false if the calculation overflows. +func calculatePoolPayoutBig(reserveA, reserveB, received xdr.Int64, feeBips xdr.Int32) (xdr.Int64, bool) { + X, Y := big.NewInt(int64(reserveA)), big.NewInt(int64(reserveB)) + F, x := big.NewInt(int64(feeBips)), big.NewInt(int64(received)) + + // would this deposit overflow the reserve? + if received > math.MaxInt64-reserveA { + return 0, false + } + + // We do all of the math in bips, so it's all upscaled by this value. + maxBips := big.NewInt(10000) + f := new(big.Int).Sub(maxBips, F) // upscaled 1 - F + + // right half: X + (1 - F)x + denom := X.Mul(X, maxBips).Add(X, new(big.Int).Mul(x, f)) + if denom.Cmp(big.NewInt(0)) == 0 { // avoid div-by-zero panic + return 0, false + } + + // left half, a: (1 - F) Yx + numer := Y.Mul(Y, x).Mul(Y, f) + + // divide & check overflow + result := numer.Div(numer, denom) + + i := xdr.Int64(result.Int64()) + return i, result.IsInt64() && i >= 0 +} + +// calculatePoolExpectation determines how much of `reserveA` you would need to +// put into a pool to get the `disbursed` amount of `reserveB`. +// +// x = ceil[Xy / ((Y - y)(1 - F))] +// +// It returns false if the calculation overflows. +func calculatePoolExpectationBig( + reserveA, reserveB, disbursed xdr.Int64, feeBips xdr.Int32, +) (xdr.Int64, bool) { + X, Y := big.NewInt(int64(reserveA)), big.NewInt(int64(reserveB)) + F, y := big.NewInt(int64(feeBips)), big.NewInt(int64(disbursed)) + + // sanity check: disbursing shouldn't underflow the reserve + if disbursed >= reserveB { + return 0, false + } + + // We do all of the math in bips, so it's all upscaled by this value. + maxBips := big.NewInt(10000) + f := new(big.Int).Sub(maxBips, F) // upscaled 1 - F + + denom := Y.Sub(Y, y).Mul(Y, f) // right half: (Y - y)(1 - F) + if denom.Cmp(big.NewInt(0)) == 0 { // avoid div-by-zero panic + return 0, false + } + + numer := X.Mul(X, y).Mul(X, maxBips) // left half: Xy + + result, rem := new(big.Int), new(big.Int) + result.DivMod(numer, denom, rem) + + // hacky way to ceil(): if there's a remainder, add 1 + if rem.Cmp(big.NewInt(0)) > 0 { + result.Add(result, big.NewInt(1)) + } + + return xdr.Int64(result.Int64()), result.IsInt64() +} diff --git a/exp/orderbook/search.go b/exp/orderbook/search.go new file mode 100644 index 0000000000..0c2ed1469a --- /dev/null +++ b/exp/orderbook/search.go @@ -0,0 +1,576 @@ +package orderbook + +import ( + "context" + + "github.com/stellar/go/price" + "github.com/stellar/go/xdr" +) + +// Path represents a payment path from a source asset to some destination asset +type Path struct { + SourceAsset string + SourceAmount xdr.Int64 + DestinationAsset string + DestinationAmount xdr.Int64 + + InteriorNodes []string +} + +type liquidityPool struct { + xdr.LiquidityPoolEntry + assetA int32 + assetB int32 +} + +type Venues struct { + offers []xdr.OfferEntry + pool liquidityPool // can be empty, check body pointer +} + +type searchState interface { + // totalAssets returns the total number of assets in the search space. + totalAssets() int32 + + // considerPools returns true if we will consider liquidity pools in our path + // finding search. + considerPools() bool + + // isTerminalNode returns true if the current asset is a terminal node in our + // path finding search. + isTerminalNode(asset int32) bool + + // includePath returns true if the current path which ends at the given asset + // and produces the given amount satisfies our search criteria. + includePath( + currentAsset int32, + currentAssetAmount xdr.Int64, + ) bool + + // betterPathAmount returns true if alternativeAmount is better than currentAmount + // Given two paths (current path and alternative path) which lead to the same asset + // but possibly have different amounts of that asset, betterPathAmount will return + // true if the alternative path is better than the current path. + betterPathAmount(currentAmount, alternativeAmount xdr.Int64) bool + + // appendToPaths appends the current path to our result list. + appendToPaths( + path []int32, + currentAsset int32, + currentAssetAmount xdr.Int64, + ) + + // venues returns all possible trading opportunities for a particular asset. + // + // The result is grouped by the next asset hop, mapping to a sorted list of + // offers (by price) and a liquidity pool (if one exists for that trading + // pair). + venues(currentAsset int32) edgeSet + + // consumeOffers will consume the given set of offers to trade our + // current asset for a different asset. + consumeOffers( + currentAssetAmount xdr.Int64, + currentBestAmount xdr.Int64, + offers []xdr.OfferEntry, + ) (xdr.Int64, error) + + // consumePool will consume the given liquidity pool to trade our + // current asset for a different asset. + consumePool( + pool liquidityPool, + currentAsset int32, + currentAssetAmount xdr.Int64, + ) (xdr.Int64, error) +} + +type pathNode struct { + asset int32 + prev *pathNode +} + +func (p *pathNode) contains(node int32) bool { + for cur := p; cur != nil; cur = cur.prev { + if cur.asset == node { + return true + } + } + return false +} + +func reversePath(path []int32) { + for i := len(path)/2 - 1; i >= 0; i-- { + opp := len(path) - 1 - i + path[i], path[opp] = path[opp], path[i] + } +} + +func (e *pathNode) path() []int32 { + // Initialize slice capacity to minimize allocations. + // 8 is the maximum path supported by stellar. + result := make([]int32, 0, 8) + for cur := e; cur != nil; cur = cur.prev { + result = append(result, cur.asset) + } + + reversePath(result) + return result +} + +func search( + ctx context.Context, + state searchState, + maxPathLength int, + sourceAsset int32, + sourceAssetAmount xdr.Int64, +) error { + totalAssets := state.totalAssets() + bestAmount := make([]xdr.Int64, totalAssets) + updateAmount := make([]xdr.Int64, totalAssets) + bestPath := make([]*pathNode, totalAssets) + updatePath := make([]*pathNode, totalAssets) + updatedAssets := make([]int32, 0, totalAssets) + // Used to minimize allocations + slab := make([]pathNode, 0, totalAssets) + bestAmount[sourceAsset] = sourceAssetAmount + updateAmount[sourceAsset] = sourceAssetAmount + bestPath[sourceAsset] = &pathNode{ + asset: sourceAsset, + prev: nil, + } + // Simple payments (e.g. payments where an asset is transferred from + // one account to another without any conversions into another asset) + // are also valid path payments. If the source asset is a valid + // destination asset we include the empty path in the response. + if state.includePath(sourceAsset, sourceAssetAmount) { + state.appendToPaths( + []int32{sourceAsset}, + sourceAsset, + sourceAssetAmount, + ) + } + + for i := 0; i < maxPathLength; i++ { + updatedAssets = updatedAssets[:0] + + for currentAsset := int32(0); currentAsset < totalAssets; currentAsset++ { + currentAmount := bestAmount[currentAsset] + if currentAmount == 0 { + continue + } + pathToCurrentAsset := bestPath[currentAsset] + edges := state.venues(currentAsset) + for j := 0; j < len(edges); j++ { + // Exit early if the context was cancelled. + if err := ctx.Err(); err != nil { + return err + } + nextAsset, venues := edges[j].key, edges[j].value + + // If we're on our last step ignore any edges which don't lead to + // our desired destination. This optimization will save us from + // doing wasted computation. + if i == maxPathLength-1 && !state.isTerminalNode(nextAsset) { + continue + } + + // Make sure we don't visit a node more than once. + if pathToCurrentAsset.contains(nextAsset) { + continue + } + + nextAssetAmount, err := processVenues(state, currentAsset, currentAmount, venues) + if err != nil { + return err + } + if nextAssetAmount <= 0 { + continue + } + + if state.betterPathAmount(updateAmount[nextAsset], nextAssetAmount) { + newEntry := updateAmount[nextAsset] == bestAmount[nextAsset] + updateAmount[nextAsset] = nextAssetAmount + + if newEntry { + updatedAssets = append(updatedAssets, nextAsset) + // By piggybacking on slice appending (which uses exponential allocation) + // we avoid allocating each node individually, which is much slower and + // puts more pressure on the garbage collector. + slab = append(slab, pathNode{ + asset: nextAsset, + prev: pathToCurrentAsset, + }) + updatePath[nextAsset] = &slab[len(slab)-1] + } else { + updatePath[nextAsset].prev = pathToCurrentAsset + } + + // We could avoid this step until the last iteration, but we would + // like to include multiple paths in the response to give the user + // other options in case the best path is already consumed. + if state.includePath(nextAsset, nextAssetAmount) { + state.appendToPaths( + append(bestPath[currentAsset].path(), nextAsset), + nextAsset, + nextAssetAmount, + ) + } + } + } + } + + // Only update bestPath and bestAmount if we have more iterations left in + // the algorithm. This optimization will save us from doing wasted + // computation. + if i < maxPathLength-1 { + for _, asset := range updatedAssets { + bestPath[asset] = updatePath[asset] + bestAmount[asset] = updateAmount[asset] + } + } + } + + return nil +} + +// sellingGraphSearchState configures a DFS on the orderbook graph where only +// edges in `graph.edgesForSellingAsset` are traversed. +// +// The DFS maintains the following invariants: +// - no node is repeated +// - no offers are consumed from the `ignoreOffersFrom` account +// - each payment path must begin with an asset in `targetAssets` +// - also, the required source asset amount cannot exceed the balance in +// `targetAssets` +type sellingGraphSearchState struct { + graph *OrderBookGraph + destinationAssetString string + destinationAssetAmount xdr.Int64 + ignoreOffersFrom *xdr.AccountId + targetAssets map[int32]xdr.Int64 + validateSourceBalance bool + paths []Path + includePools bool +} + +func (state *sellingGraphSearchState) totalAssets() int32 { + return int32(len(state.graph.idToAssetString)) +} + +func (state *sellingGraphSearchState) isTerminalNode(currentAsset int32) bool { + _, ok := state.targetAssets[currentAsset] + return ok +} + +func (state *sellingGraphSearchState) includePath(currentAsset int32, currentAssetAmount xdr.Int64) bool { + targetAssetBalance, ok := state.targetAssets[currentAsset] + return ok && (!state.validateSourceBalance || targetAssetBalance >= currentAssetAmount) +} + +func (state *sellingGraphSearchState) betterPathAmount(currentAmount, alternativeAmount xdr.Int64) bool { + if currentAmount == 0 { + return true + } + if alternativeAmount == 0 { + return false + } + return alternativeAmount < currentAmount +} + +func assetIDsToAssetStrings(graph *OrderBookGraph, path []int32) []string { + result := make([]string, len(path)) + for i := 0; i < len(path); i++ { + result[i] = graph.idToAssetString[path[i]] + } + return result +} + +func (state *sellingGraphSearchState) appendToPaths( + path []int32, + currentAsset int32, + currentAssetAmount xdr.Int64, +) { + if len(path) > 2 { + path = path[1 : len(path)-1] + reversePath(path) + } else { + path = []int32{} + } + + state.paths = append(state.paths, Path{ + SourceAmount: currentAssetAmount, + SourceAsset: state.graph.idToAssetString[currentAsset], + InteriorNodes: assetIDsToAssetStrings(state.graph, path), + DestinationAsset: state.destinationAssetString, + DestinationAmount: state.destinationAssetAmount, + }) +} + +func (state *sellingGraphSearchState) venues(currentAsset int32) edgeSet { + return state.graph.venuesForSellingAsset[currentAsset] +} + +func (state *sellingGraphSearchState) consumeOffers( + currentAssetAmount xdr.Int64, + currentBestAmount xdr.Int64, + offers []xdr.OfferEntry, +) (xdr.Int64, error) { + nextAmount, err := consumeOffersForSellingAsset( + offers, state.ignoreOffersFrom, currentAssetAmount, currentBestAmount) + + return positiveMin(currentBestAmount, nextAmount), err +} + +func (state *sellingGraphSearchState) considerPools() bool { + return state.includePools +} + +func (state *sellingGraphSearchState) consumePool( + pool liquidityPool, + currentAsset int32, + currentAssetAmount xdr.Int64, +) (xdr.Int64, error) { + // How many of the previous hop do we need to get this amount? + return makeTrade(pool, getOtherAsset(currentAsset, pool), + tradeTypeExpectation, currentAssetAmount) +} + +// buyingGraphSearchState configures a DFS on the orderbook graph where only +// edges in `graph.edgesForBuyingAsset` are traversed. +// +// The DFS maintains the following invariants: +// - no node is repeated +// - no offers are consumed from the `ignoreOffersFrom` account +// - each payment path must terminate with an asset in `targetAssets` +// - each payment path must begin with `sourceAsset` +type buyingGraphSearchState struct { + graph *OrderBookGraph + sourceAssetString string + sourceAssetAmount xdr.Int64 + targetAssets map[int32]bool + paths []Path + includePools bool +} + +func (state *buyingGraphSearchState) totalAssets() int32 { + return int32(len(state.graph.idToAssetString)) +} + +func (state *buyingGraphSearchState) isTerminalNode(currentAsset int32) bool { + return state.targetAssets[currentAsset] +} + +func (state *buyingGraphSearchState) includePath(currentAsset int32, currentAssetAmount xdr.Int64) bool { + return state.targetAssets[currentAsset] +} + +func (state *buyingGraphSearchState) betterPathAmount(currentAmount, alternativeAmount xdr.Int64) bool { + return alternativeAmount > currentAmount +} + +func (state *buyingGraphSearchState) appendToPaths( + path []int32, + currentAsset int32, + currentAssetAmount xdr.Int64, +) { + if len(path) > 2 { + path = path[1 : len(path)-1] + } else { + path = []int32{} + } + + state.paths = append(state.paths, Path{ + SourceAmount: state.sourceAssetAmount, + SourceAsset: state.sourceAssetString, + InteriorNodes: assetIDsToAssetStrings(state.graph, path), + DestinationAsset: state.graph.idToAssetString[currentAsset], + DestinationAmount: currentAssetAmount, + }) +} + +func (state *buyingGraphSearchState) venues(currentAsset int32) edgeSet { + return state.graph.venuesForBuyingAsset[currentAsset] +} + +func (state *buyingGraphSearchState) consumeOffers( + currentAssetAmount xdr.Int64, + currentBestAmount xdr.Int64, + offers []xdr.OfferEntry, +) (xdr.Int64, error) { + nextAmount, err := consumeOffersForBuyingAsset(offers, currentAssetAmount) + + return max(nextAmount, currentBestAmount), err +} + +func (state *buyingGraphSearchState) considerPools() bool { + return state.includePools +} + +func (state *buyingGraphSearchState) consumePool( + pool liquidityPool, + currentAsset int32, + currentAssetAmount xdr.Int64, +) (xdr.Int64, error) { + return makeTrade(pool, currentAsset, tradeTypeDeposit, currentAssetAmount) +} + +func consumeOffersForSellingAsset( + offers []xdr.OfferEntry, + ignoreOffersFrom *xdr.AccountId, + currentAssetAmount xdr.Int64, + currentBestAmount xdr.Int64, +) (xdr.Int64, error) { + if len(offers) == 0 { + return 0, errEmptyOffers + } + + if currentAssetAmount == 0 { + return 0, errAssetAmountIsZero + } + + totalConsumed := xdr.Int64(0) + for i := 0; i < len(offers); i++ { + if ignoreOffersFrom != nil && ignoreOffersFrom.Equals(offers[i].SellerId) { + continue + } + + buyingUnitsFromOffer, sellingUnitsFromOffer, err := price.ConvertToBuyingUnits( + int64(offers[i].Amount), + int64(currentAssetAmount), + int64(offers[i].Price.N), + int64(offers[i].Price.D), + ) + if err == price.ErrOverflow { + // skip paths which would result in overflow errors + // but still continue the path finding search + return -1, nil + } else if err != nil { + return -1, err + } + + totalConsumed += xdr.Int64(buyingUnitsFromOffer) + + // For sell-state, we are aiming to *minimize* the amount of the source + // assets we need to get to the destination, so if we exceed the best + // amount, it's time to bail. + // + // FIXME: Evaluate if this can work, and if it's actually performant. + // if totalConsumed >= currentBestAmount && currentBestAmount > 0 { + // return currentBestAmount, nil + // } + + currentAssetAmount -= xdr.Int64(sellingUnitsFromOffer) + + if currentAssetAmount == 0 { + return totalConsumed, nil + } + if currentAssetAmount < 0 { + return -1, errSoldTooMuch + } + } + + return -1, nil +} + +func consumeOffersForBuyingAsset( + offers []xdr.OfferEntry, + currentAssetAmount xdr.Int64, +) (xdr.Int64, error) { + if len(offers) == 0 { + return 0, errEmptyOffers + } + + if currentAssetAmount == 0 { + return 0, errAssetAmountIsZero + } + + totalConsumed := xdr.Int64(0) + for i := 0; i < len(offers); i++ { + n := int64(offers[i].Price.N) + d := int64(offers[i].Price.D) + + // check if we can spend all of currentAssetAmount on the current offer + // otherwise consume entire offer and move on to the next one + amountSold, err := price.MulFractionRoundDown(int64(currentAssetAmount), d, n) + if err == nil { + if amountSold == 0 { + // not enough of the buying asset to consume the offer + return -1, nil + } + if amountSold < 0 { + return -1, errSoldTooMuch + } + + amountSoldXDR := xdr.Int64(amountSold) + if amountSoldXDR <= offers[i].Amount { + totalConsumed += amountSoldXDR + return totalConsumed, nil + } + } else if err != price.ErrOverflow { + return -1, err + } + + buyingUnitsFromOffer, sellingUnitsFromOffer, err := price.ConvertToBuyingUnits( + int64(offers[i].Amount), + int64(offers[i].Amount), + n, + d, + ) + if err == price.ErrOverflow { + // skip paths which would result in overflow errors + // but still continue the path finding search + return -1, nil + } else if err != nil { + return -1, err + } + + totalConsumed += xdr.Int64(sellingUnitsFromOffer) + currentAssetAmount -= xdr.Int64(buyingUnitsFromOffer) + + if currentAssetAmount == 0 { + return totalConsumed, nil + } + if currentAssetAmount < 0 { + return -1, errSoldTooMuch + } + } + + return -1, nil +} + +func processVenues( + state searchState, + currentAsset int32, + currentAssetAmount xdr.Int64, + venues Venues, +) (xdr.Int64, error) { + if currentAssetAmount == 0 { + return 0, errAssetAmountIsZero + } + + // We evaluate the pool venue (if any) before offers, because pool exchange + // rates can only be evaluated with an amount. + poolAmount := xdr.Int64(0) + if pool := venues.pool; state.considerPools() && pool.Body.ConstantProduct != nil { + amount, err := state.consumePool(pool, currentAsset, currentAssetAmount) + if err == nil { + poolAmount = amount + } + // It's only a true error if the offers fail later, too + } + + if poolAmount == 0 && len(venues.offers) == 0 { + return -1, nil // not really an error + } + + // This will return the pool amount if the LP performs better. + nextAssetAmount, err := state.consumeOffers( + currentAssetAmount, poolAmount, venues.offers) + + // Only error out the offers if the LP trade didn't happen. + if err != nil && poolAmount == 0 { + return 0, err + } + + return nextAssetAmount, nil +} diff --git a/exp/orderbook/testdata/sample-requests b/exp/orderbook/testdata/sample-requests new file mode 100644 index 0000000000..13562b8a07 --- /dev/null +++ b/exp/orderbook/testdata/sample-requests @@ -0,0 +1,100 @@ +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SHX&source_asset_issuer=GDSTRSHXHGJ7ZIVRBXEYE5Q74XUVCUSEKEBR7UCHEUUEK72N7I7KJ6JH&source_amount=145.0023490&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=672.0520437&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=4.0777510&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=MOBI&source_asset_issuer=GA6HCMBLTZS5VYYBCATRBRZ3BZJMAFUDKYYF6AH6MVCMGWMRDNSWJPIH&source_amount=70.5089335&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=725.4998694&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=ETH&source_asset_issuer=GBDEVU63Y6NTHJQQZIKVTC23NWLQVP3WJ2RI2OTSJTNYOIGICST6DUXR&source_amount=0.0328678&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=LSP&source_asset_issuer=GAB7STHVD5BDH3EEYXPI3OM7PCS4V443PYB5FNT6CFGJVPDLMKDM24WK&source_amount=2732.7661658&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=284.2488191&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=WHIPLASH&source_asset_issuer=GD433VZYCLHOQL2AZZGRXCDU7JGPMOY6NC732WQJJ7PN6JEWBYBWANQK&source_amount=16844.2802694&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0005368&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=18443.2722046&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDT&source_asset_issuer=GCQTGZQQ5G4PTM2GL7CDIFKUBIPEC52BROAQIAPW53XBRJVN6ZJVTG6V&source_amount=554.2610400&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLMG&source_asset_issuer=GCVNN7O5JISPEYUTLK3JYGBDWCPDIHB4MTG4PMSJVIKJCR64NOXWI3YH&source_amount=551.7667572&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=2517.1781427&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=34613.8515807&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=MOBI&source_asset_issuer=GA6HCMBLTZS5VYYBCATRBRZ3BZJMAFUDKYYF6AH6MVCMGWMRDNSWJPIH&source_amount=69.5684115&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=617.6194476&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDC&source_asset_issuer=GA5ZSEJYB37JRC5AVCIA5MOP4RHTM335X2KGX3IHOJAPP5RE34K4KZVN&source_amount=554.4740000&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=EURT&source_asset_issuer=GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S&source_amount=421.2027000&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=622.6664621&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=GDOGE&source_asset_issuer=GBJQWD2FBFFPUMICLLBEWUIZNQ7UVLDMB7NCDID7Z6ORJ36VKJCYW3C2&source_amount=30339.8058252&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=646.0031783&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XRP&source_asset_issuer=GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P&source_amount=11.8089921&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=45625.5703196&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLPG&source_asset_issuer=GDDRETFPCQIDWH3LNMIONXSSBWYLXZFSF3WY6UCCIF6NMTW2UKA3R4NX&source_amount=529.0280931&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=255734.3664103&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=190.2044708&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLPG&source_asset_issuer=GDDRETFPCQIDWH3LNMIONXSSBWYLXZFSF3WY6UCCIF6NMTW2UKA3R4NX&source_amount=31.0110564&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BRL&source_asset_issuer=GDVKY2GU2DRXWTBEYJJWSFXIGBZV6AZNBVVSUHEPZI54LIS6BA7DVVSP&source_amount=205.1160000&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=4805.2769176&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDC&source_asset_issuer=GA5ZSEJYB37JRC5AVCIA5MOP4RHTM335X2KGX3IHOJAPP5RE34K4KZVN&source_amount=554.4740000&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.2565695&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=15180.5099129&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.2927787&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=190.2044708&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=LRT&source_asset_issuer=GB7EFWTJFVSB3N6F5YJFWK3SHYJ7YI3O5RUUWLF2PWREPN4QNHLRWCAS&source_amount=4465.0830505&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=660.0137283&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=725.4998694&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=ETH&source_asset_issuer=GBDEVU63Y6NTHJQQZIKVTC23NWLQVP3WJ2RI2OTSJTNYOIGICST6DUXR&source_amount=0.0004502&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=4.0732444&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=SDOGE&source_asset_issuer=GDKZXCRYKOFFSRI37TI4XQCUQJH544M5SRC2TNFTWIDEO4Z6MUZO2FDS&source_amount=2595.7844461&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=AFR&source_asset_issuer=GBX6YI45VU7WNAAKA3RBFDR3I3UKNFHTJPQ5F6KOOKSGYIAM4TRQN54W&source_amount=1805.1193184&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=725.4998694&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=679.3847492&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0135772&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=SDOGE&source_asset_issuer=GDKZXCRYKOFFSRI37TI4XQCUQJH544M5SRC2TNFTWIDEO4Z6MUZO2FDS&source_amount=2694.8366929&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=255734.3664103&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=667.6191366&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLPG&source_asset_issuer=GDDRETFPCQIDWH3LNMIONXSSBWYLXZFSF3WY6UCCIF6NMTW2UKA3R4NX&source_amount=0.3632401&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDT&source_asset_issuer=GCQTGZQQ5G4PTM2GL7CDIFKUBIPEC52BROAQIAPW53XBRJVN6ZJVTG6V&source_amount=43.3991714&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XRP&source_asset_issuer=GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5&source_amount=81.5476156&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=662.2604273&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0301716&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0015092&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XRP&source_asset_issuer=GCNSGHUCG5VMGLT5RIYYZSO7VQULQKAJ62QA33DBC5PPBSO57LFWVV6P&source_amount=0.8333333&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.2882062&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=69317.7186502&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=yXLM&source_asset_issuer=GARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55&source_amount=2.5000009&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0135830&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0301716&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=20764.0407787&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SHIB&source_asset_issuer=GBNO52SQ5WBG3N7Y35HO7BOWNGEWLNJCJK4N4J6XBWLCHZ3AZHWOPRKF&source_amount=15586.0349127&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=20764.0407787&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=GTN&source_asset_issuer=GARFMAHQM4JDI55SK2FGEPLOZU7BTEODS3Y5QNT3VMQQIU3WV2HTBA46&source_amount=24.7839023&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=ETH&source_asset_issuer=GBDEVU63Y6NTHJQQZIKVTC23NWLQVP3WJ2RI2OTSJTNYOIGICST6DUXR&source_amount=0.1055100&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLMG&source_asset_issuer=GCVNN7O5JISPEYUTLK3JYGBDWCPDIHB4MTG4PMSJVIKJCR64NOXWI3YH&source_amount=53736.5085759&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=MOBI&source_asset_issuer=GA6HCMBLTZS5VYYBCATRBRZ3BZJMAFUDKYYF6AH6MVCMGWMRDNSWJPIH&source_amount=12813.7329364&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=VELO&source_asset_issuer=GDM4RQUQQUVSKQA7S6EM7XBZP3FCGH4Q7CL6TABQ7B2BEJ5ERARM2M5M&source_amount=152.0833460&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=16.3311016&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=NORA&source_asset_issuer=GCWLS57RYB67I3LYXQECRVSRB62OVQPF3U2DCTDJLJ5ZETY6TFQ6VL2W&source_amount=58218.1624678&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0000336&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDT&source_asset_issuer=GCQTGZQQ5G4PTM2GL7CDIFKUBIPEC52BROAQIAPW53XBRJVN6ZJVTG6V&source_amount=1.3714755&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLMG&source_asset_issuer=GCVNN7O5JISPEYUTLK3JYGBDWCPDIHB4MTG4PMSJVIKJCR64NOXWI3YH&source_amount=589.8034775&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.2927794&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLMG&source_asset_issuer=GCVNN7O5JISPEYUTLK3JYGBDWCPDIHB4MTG4PMSJVIKJCR64NOXWI3YH&source_amount=551.4016630&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=WHIPLASH&source_asset_issuer=GD433VZYCLHOQL2AZZGRXCDU7JGPMOY6NC732WQJJ7PN6JEWBYBWANQK&source_amount=243365.7821043&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SHX&source_asset_issuer=GDSTRSHXHGJ7ZIVRBXEYE5Q74XUVCUSEKEBR7UCHEUUEK72N7I7KJ6JH&source_amount=172470.4360779&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=32361.0416288&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=STD&source_asset_issuer=GABRD2ANVDM5XKMHIZ5K4JDAFLSAKAAHP3WR7DOPZJHALWE5C5UXSRYV&source_amount=35.7985914&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.1928011&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0301845&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0000339&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=TZS&source_asset_issuer=GA2MSSZKJOU6RNL3EJKH3S5TB5CDYTFQFWRYFGUJVIN5I6AOIRTLUHTO&source_amount=1739178.9999522&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0015043&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=DRA&source_asset_issuer=GCJKSAQECBGSLPQWAU7ME4LVQVZ6IDCNUA5NVTPPCUWZWBN5UBFMXZ53&source_amount=55710.2710579&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=ETH&source_asset_issuer=GBVOL67TMUQBGL4TZYNMY3ZQ5WGQYFPFD5VJRWXR72VA33VFNL225PL5&source_amount=0.0004920&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=TRUMPSARA&source_asset_issuer=GD6FAIEG7GN27ZIY4L6OATLRQYD5L5KLMUQWVPMZT553BR5UM7KLPI73&source_amount=4718048.3256848&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=BTC&source_asset_issuer=GAUTUYY2THLF7SGITDFMXJVYH3LHDSMGEAKSBU267M2K7A3W543CKUEF&source_amount=0.0034950&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=LSP&source_asset_issuer=GAB7STHVD5BDH3EEYXPI3OM7PCS4V443PYB5FNT6CFGJVPDLMKDM24WK&source_amount=33.3556371&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=4.1666667&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=EURT&source_asset_issuer=GAP5LETOV6YIE62YAM56STDANPRDO7ZFDBGSNHJQIYGGKSMOZAHOOS2S&source_amount=20554.5200000&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=USDT&source_asset_issuer=GCQTGZQQ5G4PTM2GL7CDIFKUBIPEC52BROAQIAPW53XBRJVN6ZJVTG6V&source_amount=10.9471021&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=KIN&source_asset_issuer=GBDEVU63Y6NTHJQQZIKVTC23NWLQVP3WJ2RI2OTSJTNYOIGICST6DUXR&source_amount=4184.4505816&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XLPG&source_asset_issuer=GDDRETFPCQIDWH3LNMIONXSSBWYLXZFSF3WY6UCCIF6NMTW2UKA3R4NX&source_amount=46.1544553&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=XXA&source_asset_issuer=GC4HS4CQCZULIOTGLLPGRAAMSBDLFRR6Y7HCUQG66LNQDISXKIXXADIM&source_amount=2.6881147&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SHX&source_asset_issuer=GDSTRSHXHGJ7ZIVRBXEYE5Q74XUVCUSEKEBR7UCHEUUEK72N7I7KJ6JH&source_amount=144.1815534&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=CREDIT&source_asset_issuer=GBAKUWF2HTJ325PH6VATZQ3UNTK2AGTATR43U52WQCYJ25JNSCF5OFUN&source_amount=8.8339238&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=XLMMega&source_asset_issuer=GAMH54QWYMJ2NACVBGNU7PPWWKIWE7L3QXGGGI65QO6ODJIDRAPZQAYF&source_amount=99.1510058&destination_assets=native%2CyXLM%3AGARDNV3Q7YGT4AKSDF25LT32YSCCW4EV22Y2TV3I2PU2MMXJTEDL5T55 +/paths/strict-send?source_asset_type=credit_alphanum12&source_asset_code=DOGET&source_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&source_amount=653.6204034&destination_assets=native +/paths/strict-send?source_asset_type=credit_alphanum4&source_asset_code=SLT&source_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&source_amount=0.2979613&destination_assets=native \ No newline at end of file diff --git a/exp/orderbook/utils.go b/exp/orderbook/utils.go new file mode 100644 index 0000000000..d1fad24a62 --- /dev/null +++ b/exp/orderbook/utils.go @@ -0,0 +1,31 @@ +package orderbook + +import ( + "github.com/stellar/go/xdr" +) + +// getPoolAssets retrieves string representations of a pool's reserves +func getPoolAssets(pool xdr.LiquidityPoolEntry) (xdr.Asset, xdr.Asset) { + params := pool.Body.MustConstantProduct().Params + return params.AssetA, params.AssetB +} + +func max(a, b xdr.Int64) xdr.Int64 { + if a < b { + return b + } + return a +} + +// positiveMin returns the smallest positive value possible +func positiveMin(a, b xdr.Int64) xdr.Int64 { + if b <= 0 { + return a + } + + if b < a || a <= 0 { + return b + } + + return a +} diff --git a/exp/services/captivecore/README.md b/exp/services/captivecore/README.md new file mode 100644 index 0000000000..3d621008fe --- /dev/null +++ b/exp/services/captivecore/README.md @@ -0,0 +1,97 @@ +# captivecore + +The Captive Stellar-Core Server allows you to run a dedicated Stellar-Core instance +for the purpose of ingestion. The server must be bundled with a Stellar Core binary. + +If you run Horizon with Captive Stellar-Core ingestion enabled Horizon will spawn a Stellar-Core +subprocess. Horizon's ingestion system will then stream ledgers from the subprocess via +a filesystem pipe. The disadvantage of running both Horizon and the Stellar-Core subprocess +on the same machine is it requires detailed per-process monitoring to be able to attribute +potential issues (like memory leaks) to a specific service. + +Now you can run Horizon and pair it with a remote Captive Stellar-Core instance. The +Captive Stellar-Core Server can run on a separate machine from Horizon. The server +will manage Stellar-Core as a subprocess and provide an HTTP API which Horizon +can use remotely to stream ledgers for the purpose of ingestion. + +Note that, currently, a single Captive Stellar-Core Server cannot be shared by +multiple Horizon instances. + +## API + +### `GET /latest-sequence` + +Fetches the latest ledger sequence available on the captive core instance. + +Response: + +```json +{ + "sequence": 12345 +} +``` + + +### `GET /ledger/` + +Fetches the ledger with the given sequence number from the captive core instance. + +Response: + + +```json +{ + "present": true, + "ledger": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" +} +``` + +### `POST /prepare-range` + +Preloads the given range of ledgers in the captive core instance. + +Bounded request: +```json +{ + "from": 123, + "to": 150, + "bounded": true +} +``` + +Unbounded request: +```json +{ + "from": 123, + "bounded": false +} +``` + +Response: +```json +{ + "ledgerRange": {"from": 123, "bounded": false}, + "startTime": "2020-08-31T13:29:09Z", + "ready": true, + "readyDuration": 1000 +} +``` + +## Usage + +``` +$ captivecore --help +Run the Captive Stellar-Core Server + +Usage: + captivecore [flags] + +Flags: + --db-url Horizon Postgres URL (optional) used to lookup the ledger hash for sequence numbers + --stellar-core-binary-path Path to stellar core binary + --stellar-core-config-path Path to stellar core config file + --history-archive-urls Comma-separated list of stellar history archives to connect with + --log-level Minimum log severity (debug, info, warn, error) to log (default info) + --network-passphrase string Network passphrase of the Stellar network transactions should be signed for (NETWORK_PASSPHRASE) (default "Test SDF Network ; September 2015") + --port int Port to listen and serve on (PORT) (default 8000) +``` \ No newline at end of file diff --git a/exp/services/captivecore/internal/api.go b/exp/services/captivecore/internal/api.go new file mode 100644 index 0000000000..b39f7c0854 --- /dev/null +++ b/exp/services/captivecore/internal/api.go @@ -0,0 +1,186 @@ +package internal + +import ( + "context" + "sync" + "time" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +var ( + // ErrMissingPrepareRange is returned when attempting an operation without satisfying + // its PrepareRange dependency + ErrMissingPrepareRange = errors.New("PrepareRange must be called before any other operations") + // ErrMissingPrepareRange is returned when attempting an operation before PrepareRange has finished + // running + ErrPrepareRangeNotReady = errors.New("PrepareRange operation is not yet complete") +) + +type rangeRequest struct { + ledgerRange ledgerbackend.Range + startTime time.Time + readyDuration int + valid bool + ready bool + sync.Mutex +} + +// CaptiveCoreAPI manages a shared captive core subprocess and exposes an API for +// executing commands remotely on the captive core instance. +type CaptiveCoreAPI struct { + ctx context.Context + cancel context.CancelFunc + core ledgerbackend.LedgerBackend + activeRequest *rangeRequest + wg *sync.WaitGroup + log *log.Entry +} + +// NewCaptiveCoreAPI constructs a new CaptiveCoreAPI instance. +func NewCaptiveCoreAPI(core ledgerbackend.LedgerBackend, log *log.Entry) CaptiveCoreAPI { + ctx, cancel := context.WithCancel(context.Background()) + return CaptiveCoreAPI{ + ctx: ctx, + cancel: cancel, + core: core, + log: log, + activeRequest: &rangeRequest{}, + wg: &sync.WaitGroup{}, + } +} + +// Shutdown disables the PrepareRange endpoint and closes +// the captive core process. +func (c *CaptiveCoreAPI) Shutdown() { + c.activeRequest.Lock() + c.cancel() + c.activeRequest.Unlock() + + c.wg.Wait() + c.core.Close() +} + +func (c *CaptiveCoreAPI) isShutdown() bool { + return c.ctx.Err() != nil +} + +func (c *CaptiveCoreAPI) startPrepareRange(ctx context.Context, ledgerRange ledgerbackend.Range) { + defer c.wg.Done() + + err := c.core.PrepareRange(ctx, ledgerRange) + + c.activeRequest.Lock() + defer c.activeRequest.Unlock() + if c.isShutdown() { + return + } + + if !c.activeRequest.valid || c.activeRequest.ledgerRange != ledgerRange { + c.log.WithFields(log.F{ + "requestedRange": c.activeRequest.ledgerRange, + "valid": c.activeRequest.valid, + "preparedRange": ledgerRange, + }).Warn("Prepared range does not match requested range") + return + } + + if c.activeRequest.ready { + c.log.WithField("preparedRange", ledgerRange).Warn("Prepared range already completed") + return + } + + if err != nil { + c.log.WithError(err).WithField("preparedRange", ledgerRange).Warn("Could not prepare range") + c.activeRequest.valid = false + c.activeRequest.ready = false + return + } + + c.activeRequest.ready = true + c.activeRequest.readyDuration = int(time.Since(c.activeRequest.startTime).Seconds()) +} + +// PrepareRange executes the PrepareRange operation on the captive core instance. +func (c *CaptiveCoreAPI) PrepareRange(ctx context.Context, ledgerRange ledgerbackend.Range) (ledgerbackend.PrepareRangeResponse, error) { + c.activeRequest.Lock() + defer c.activeRequest.Unlock() + if c.isShutdown() { + return ledgerbackend.PrepareRangeResponse{}, errors.New("Cannot prepare range when shut down") + } + + if !c.activeRequest.valid || !c.activeRequest.ledgerRange.Contains(ledgerRange) { + if c.activeRequest.valid { + c.log.WithFields(log.F{ + "activeRange": c.activeRequest.ledgerRange, + "requestedRange": ledgerRange, + }).Info("Requested range differs from previously requested range") + } + + c.activeRequest.ledgerRange = ledgerRange + c.activeRequest.startTime = time.Now() + c.activeRequest.ready = false + c.activeRequest.valid = true + + c.wg.Add(1) + go c.startPrepareRange(c.ctx, ledgerRange) + + return ledgerbackend.PrepareRangeResponse{ + LedgerRange: ledgerRange, + StartTime: c.activeRequest.startTime, + Ready: false, + ReadyDuration: 0, + }, nil + } + + return ledgerbackend.PrepareRangeResponse{ + LedgerRange: c.activeRequest.ledgerRange, + StartTime: c.activeRequest.startTime, + Ready: c.activeRequest.ready, + ReadyDuration: c.activeRequest.readyDuration, + }, nil +} + +// GetLatestLedgerSequence determines the latest ledger sequence available on the captive core instance. +func (c *CaptiveCoreAPI) GetLatestLedgerSequence(ctx context.Context) (ledgerbackend.LatestLedgerSequenceResponse, error) { + c.activeRequest.Lock() + defer c.activeRequest.Unlock() + + if !c.activeRequest.valid { + return ledgerbackend.LatestLedgerSequenceResponse{}, ErrMissingPrepareRange + } + if !c.activeRequest.ready { + return ledgerbackend.LatestLedgerSequenceResponse{}, ErrPrepareRangeNotReady + } + + seq, err := c.core.GetLatestLedgerSequence(ctx) + if err != nil { + c.activeRequest.valid = false + } + return ledgerbackend.LatestLedgerSequenceResponse{Sequence: seq}, err +} + +// GetLedger fetches the ledger with the given sequence number from the captive core instance. +func (c *CaptiveCoreAPI) GetLedger(ctx context.Context, sequence uint32) (ledgerbackend.LedgerResponse, error) { + c.activeRequest.Lock() + defer c.activeRequest.Unlock() + + if !c.activeRequest.valid { + return ledgerbackend.LedgerResponse{}, ErrMissingPrepareRange + } + if !c.activeRequest.ready { + return ledgerbackend.LedgerResponse{}, ErrPrepareRangeNotReady + } + + ledger, err := c.core.GetLedger(ctx, sequence) + if err != nil { + c.activeRequest.valid = false + } + // TODO: We are always true here now, so this changes the semantics of this + // call a bit. We need to change the client to long-poll this endpoint. + return ledgerbackend.LedgerResponse{ + Ledger: ledgerbackend.Base64Ledger(ledger), + }, err +} diff --git a/exp/services/captivecore/internal/api_test.go b/exp/services/captivecore/internal/api_test.go new file mode 100644 index 0000000000..b9c96bf8b4 --- /dev/null +++ b/exp/services/captivecore/internal/api_test.go @@ -0,0 +1,244 @@ +package internal + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +func TestAPITestSuite(t *testing.T) { + suite.Run(t, new(APITestSuite)) +} + +type APITestSuite struct { + suite.Suite + ctx context.Context + ledgerBackend *ledgerbackend.MockDatabaseBackend + api CaptiveCoreAPI +} + +func (s *APITestSuite) SetupTest() { + s.ctx = context.Background() + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.api = NewCaptiveCoreAPI(s.ledgerBackend, log.New()) +} + +func (s *APITestSuite) TearDownTest() { + s.ledgerBackend.AssertExpectations(s.T()) +} + +func (s *APITestSuite) TestLatestSeqActiveRequestInvalid() { + _, err := s.api.GetLatestLedgerSequence(s.ctx) + s.Assert().Equal(err, ErrMissingPrepareRange) +} + +func (s *APITestSuite) TestGetLedgerActiveRequestInvalid() { + _, err := s.api.GetLedger(s.ctx, 64) + s.Assert().Equal(err, ErrMissingPrepareRange) +} + +func (s *APITestSuite) runBeforeReady(prepareRangeErr error, f func()) { + waitChan := make(chan time.Time) + ledgerRange := ledgerbackend.UnboundedRange(63) + s.ledgerBackend.On("PrepareRange", mock.Anything, ledgerRange). + WaitUntil(waitChan). + Return(prepareRangeErr).Once() + + response, err := s.api.PrepareRange(s.ctx, ledgerRange) + s.Assert().NoError(err) + s.Assert().False(response.Ready) + s.Assert().Equal(response.LedgerRange, ledgerRange) + + f() + + close(waitChan) + s.api.wg.Wait() +} + +func (s *APITestSuite) TestLatestSeqActiveRequestNotReady() { + s.runBeforeReady(nil, func() { + _, err := s.api.GetLatestLedgerSequence(s.ctx) + s.Assert().Equal(err, ErrPrepareRangeNotReady) + }) +} + +func (s *APITestSuite) TestGetLedgerNotReady() { + s.runBeforeReady(nil, func() { + _, err := s.api.GetLedger(s.ctx, 64) + s.Assert().Equal(err, ErrPrepareRangeNotReady) + }) +} + +func (s *APITestSuite) waitUntilReady(ledgerRange ledgerbackend.Range) { + s.ledgerBackend.On("PrepareRange", mock.Anything, ledgerRange). + Return(nil).Once() + + response, err := s.api.PrepareRange(s.ctx, ledgerRange) + s.Assert().NoError(err) + s.Assert().False(response.Ready) + s.Assert().Equal(response.LedgerRange, ledgerRange) + + s.api.wg.Wait() +} + +func (s *APITestSuite) TestLatestSeqError() { + s.waitUntilReady(ledgerbackend.UnboundedRange(63)) + + expectedErr := fmt.Errorf("test error") + s.ledgerBackend.On("GetLatestLedgerSequence", s.ctx).Return(uint32(0), expectedErr).Once() + + _, err := s.api.GetLatestLedgerSequence(s.ctx) + s.Assert().Equal(err, expectedErr) +} + +func (s *APITestSuite) TestGetLedgerError() { + s.waitUntilReady(ledgerbackend.UnboundedRange(63)) + + expectedErr := fmt.Errorf("test error") + s.ledgerBackend.On("GetLedger", s.ctx, uint32(64)). + Return(xdr.LedgerCloseMeta{}, expectedErr).Once() + + _, err := s.api.GetLedger(s.ctx, 64) + s.Assert().Equal(err, expectedErr) +} + +func (s *APITestSuite) TestLatestSeqSucceeds() { + s.waitUntilReady(ledgerbackend.UnboundedRange(63)) + + expectedSeq := uint32(100) + s.ledgerBackend.On("GetLatestLedgerSequence", s.ctx).Return(expectedSeq, nil).Once() + seq, err := s.api.GetLatestLedgerSequence(s.ctx) + s.Assert().NoError(err) + s.Assert().Equal(seq, ledgerbackend.LatestLedgerSequenceResponse{Sequence: expectedSeq}) +} + +func (s *APITestSuite) TestGetLedgerSucceeds() { + s.waitUntilReady(ledgerbackend.UnboundedRange(63)) + + expectedLedger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 64, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(64)). + Return(expectedLedger, nil).Once() + seq, err := s.api.GetLedger(s.ctx, 64) + + s.Assert().NoError(err) + s.Assert().Equal(seq, ledgerbackend.LedgerResponse{ + Ledger: ledgerbackend.Base64Ledger(expectedLedger), + }) +} + +func (s *APITestSuite) TestShutDownBeforePrepareRange() { + s.ledgerBackend.On("Close").Return(nil).Once() + s.api.Shutdown() + _, err := s.api.PrepareRange(s.ctx, ledgerbackend.UnboundedRange(63)) + s.Assert().EqualError(err, "Cannot prepare range when shut down") +} + +func (s *APITestSuite) TestShutDownDuringPrepareRange() { + s.runBeforeReady(nil, func() { + s.api.cancel() + }) + + s.Assert().False(s.api.activeRequest.ready) +} + +func (s *APITestSuite) TestPrepareRangeInvalidActiveRequest() { + s.runBeforeReady(nil, func() { + s.Assert().True(s.api.activeRequest.valid) + s.api.activeRequest.valid = false + }) + s.Assert().False(s.api.activeRequest.ready) + + s.api.activeRequest = &rangeRequest{} + + s.runBeforeReady(fmt.Errorf("with error"), func() { + s.Assert().True(s.api.activeRequest.valid) + s.api.activeRequest.valid = false + }) + s.Assert().False(s.api.activeRequest.ready) +} + +func (s *APITestSuite) TestPrepareRangeDoesNotMatchActiveRequestRange() { + s.runBeforeReady(nil, func() { + s.Assert().Equal(ledgerbackend.UnboundedRange(63), s.api.activeRequest.ledgerRange) + s.api.activeRequest.ledgerRange = ledgerbackend.UnboundedRange(1000) + }) + s.Assert().False(s.api.activeRequest.ready) + s.Assert().Equal(ledgerbackend.UnboundedRange(1000), s.api.activeRequest.ledgerRange) + + s.api.activeRequest = &rangeRequest{} + + s.runBeforeReady(fmt.Errorf("with error"), func() { + s.Assert().Equal(ledgerbackend.UnboundedRange(63), s.api.activeRequest.ledgerRange) + s.api.activeRequest.ledgerRange = ledgerbackend.UnboundedRange(10) + }) + s.Assert().False(s.api.activeRequest.ready) + s.Assert().Equal(ledgerbackend.UnboundedRange(10), s.api.activeRequest.ledgerRange) +} + +func (s *APITestSuite) TestPrepareRangeActiveRequestReady() { + s.runBeforeReady(nil, func() { + s.api.activeRequest.ready = true + }) + s.Assert().True(s.api.activeRequest.ready) + s.Assert().True(s.api.activeRequest.valid) + s.Assert().Equal(0, s.api.activeRequest.readyDuration) + + s.api.activeRequest = &rangeRequest{} + + s.runBeforeReady(fmt.Errorf("with error"), func() { + s.api.activeRequest.ready = true + }) + s.Assert().True(s.api.activeRequest.ready) + s.Assert().True(s.api.activeRequest.valid) + s.Assert().Equal(0, s.api.activeRequest.readyDuration) +} + +func (s *APITestSuite) TestPrepareRangeError() { + s.runBeforeReady(fmt.Errorf("with error"), func() { + s.Assert().False(s.api.activeRequest.ready) + s.Assert().True(s.api.activeRequest.valid) + }) + s.Assert().False(s.api.activeRequest.ready) + s.Assert().False(s.api.activeRequest.valid) + + s.api.activeRequest = &rangeRequest{} +} + +func (s *APITestSuite) TestRangeAlreadyPrepared() { + superSetRange := ledgerbackend.UnboundedRange(63) + s.waitUntilReady(superSetRange) + + for _, ledgerRange := range []ledgerbackend.Range{ + superSetRange, + ledgerbackend.UnboundedRange(100), + ledgerbackend.BoundedRange(63, 70), + } { + response, err := s.api.PrepareRange(s.ctx, ledgerRange) + s.Assert().NoError(err) + s.Assert().True(response.Ready) + s.Assert().Equal(superSetRange, response.LedgerRange) + } +} + +func (s *APITestSuite) TestNewPrepareRange() { + s.waitUntilReady(ledgerbackend.UnboundedRange(63)) + s.waitUntilReady(ledgerbackend.UnboundedRange(50)) + s.waitUntilReady(ledgerbackend.BoundedRange(45, 50)) + s.waitUntilReady(ledgerbackend.UnboundedRange(46)) +} diff --git a/exp/services/captivecore/internal/server.go b/exp/services/captivecore/internal/server.go new file mode 100644 index 0000000000..127444611f --- /dev/null +++ b/exp/services/captivecore/internal/server.go @@ -0,0 +1,89 @@ +package internal + +import ( + "context" + "encoding/json" + "net/http" + "time" + + "github.com/stellar/go/ingest/ledgerbackend" + supporthttp "github.com/stellar/go/support/http" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" +) + +func serializeResponse( + logger *supportlog.Entry, + w http.ResponseWriter, + r *http.Request, + response interface{}, + err error, +) { + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + if err := json.NewEncoder(w).Encode(response); err != nil { + logger.WithContext(r.Context()).WithError(err).Warn("could not serialize response") + w.WriteHeader(http.StatusInternalServerError) + } +} + +type GetLedgerRequest struct { + Sequence uint32 `path:"sequence"` +} + +// Handler returns an HTTP handler which exposes captive core operations via HTTP endpoints. +func Handler(api CaptiveCoreAPI) http.Handler { + mux := supporthttp.NewMux(api.log) + + mux.Get("/latest-sequence", func(w http.ResponseWriter, r *http.Request) { + response, err := api.GetLatestLedgerSequence(r.Context()) + serializeResponse(api.log, w, r, response, err) + }) + + mux.Get("/ledger/{sequence}", func(w http.ResponseWriter, r *http.Request) { + req := GetLedgerRequest{} + if err := httpdecode.Decode(r, &req); err != nil { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(err.Error())) + return + } + + // must be shorter than the RemoteCaptiveCore http client timeout. + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() // release timer + + var response ledgerbackend.LedgerResponse + var err error + done := make(chan struct{}) + go func() { + response, err = api.GetLedger(ctx, req.Sequence) + close(done) + }() + + select { + case <-ctx.Done(): + w.WriteHeader(http.StatusRequestTimeout) + case <-done: + serializeResponse(api.log, w, r, response, err) + } + }) + + mux.Post("/prepare-range", func(w http.ResponseWriter, r *http.Request) { + ledgerRange := ledgerbackend.Range{} + if err := json.NewDecoder(r.Body).Decode(&ledgerRange); err != nil { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(err.Error())) + return + } + + response, err := api.PrepareRange(r.Context(), ledgerRange) + serializeResponse(api.log, w, r, response, err) + }) + + return mux +} diff --git a/exp/services/captivecore/internal/server_test.go b/exp/services/captivecore/internal/server_test.go new file mode 100644 index 0000000000..86ad989191 --- /dev/null +++ b/exp/services/captivecore/internal/server_test.go @@ -0,0 +1,182 @@ +package internal + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +func TestServerTestSuite(t *testing.T) { + suite.Run(t, new(ServerTestSuite)) +} + +type ServerTestSuite struct { + suite.Suite + ctx context.Context + ledgerBackend *ledgerbackend.MockDatabaseBackend + api CaptiveCoreAPI + handler http.Handler + server *httptest.Server + client ledgerbackend.RemoteCaptiveStellarCore +} + +func (s *ServerTestSuite) SetupTest() { + s.ctx = context.Background() + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.api = NewCaptiveCoreAPI(s.ledgerBackend, log.New()) + s.handler = Handler(s.api) + s.server = httptest.NewServer(s.handler) + var err error + s.client, err = ledgerbackend.NewRemoteCaptive( + s.server.URL, + ledgerbackend.PrepareRangePollInterval(time.Millisecond), + ) + s.Assert().NoError(err) +} + +func (s *ServerTestSuite) TearDownTest() { + s.ledgerBackend.AssertExpectations(s.T()) + s.server.Close() + s.client.Close() +} + +func (s *ServerTestSuite) TestLatestSequence() { + s.api.activeRequest.valid = true + s.api.activeRequest.ready = true + + expectedSeq := uint32(100) + s.ledgerBackend.On("GetLatestLedgerSequence", mock.Anything).Return(expectedSeq, nil).Once() + + seq, err := s.client.GetLatestLedgerSequence(s.ctx) + s.Assert().NoError(err) + s.Assert().Equal(expectedSeq, seq) +} + +func (s *ServerTestSuite) TestLatestSequenceError() { + s.api.activeRequest.valid = true + s.api.activeRequest.ready = true + + s.ledgerBackend.On("GetLatestLedgerSequence", mock.Anything).Return(uint32(100), fmt.Errorf("test error")).Once() + + _, err := s.client.GetLatestLedgerSequence(s.ctx) + s.Assert().EqualError(err, "test error") +} + +func (s *ServerTestSuite) TestPrepareBoundedRange() { + ledgerRange := ledgerbackend.BoundedRange(10, 30) + s.ledgerBackend.On("PrepareRange", mock.Anything, ledgerRange). + Return(nil).Once() + + s.Assert().NoError(s.client.PrepareRange(s.ctx, ledgerRange)) + s.Assert().True(s.api.activeRequest.ready) + + prepared, err := s.client.IsPrepared(s.ctx, ledgerRange) + s.Assert().NoError(err) + s.Assert().True(prepared) +} + +func (s *ServerTestSuite) TestPrepareUnboundedRange() { + ledgerRange := ledgerbackend.UnboundedRange(100) + s.ledgerBackend.On("PrepareRange", mock.Anything, ledgerRange). + Return(nil).Once() + + s.Assert().NoError(s.client.PrepareRange(s.ctx, ledgerRange)) + s.Assert().True(s.api.activeRequest.ready) + + prepared, err := s.client.IsPrepared(s.ctx, ledgerRange) + s.Assert().NoError(err) + s.Assert().True(prepared) +} + +func (s *ServerTestSuite) TestPrepareError() { + s.ledgerBackend.On("Close").Return(nil).Once() + s.api.Shutdown() + + s.Assert().EqualError( + s.client.PrepareRange(s.ctx, ledgerbackend.UnboundedRange(100)), + "Cannot prepare range when shut down", + ) +} + +func (s *ServerTestSuite) TestGetLedgerInvalidSequence() { + req := httptest.NewRequest("GET", "/ledger/abcdef", nil) + req = req.WithContext(s.ctx) + w := httptest.NewRecorder() + + s.handler.ServeHTTP(w, req) + + resp := w.Result() + body, err := ioutil.ReadAll(resp.Body) + s.Assert().NoError(err) + + s.Assert().Equal(http.StatusBadRequest, resp.StatusCode) + s.Assert().Equal("path params could not be parsed: schema: error converting value for \"sequence\"", string(body)) +} + +func (s *ServerTestSuite) TestGetLedgerError() { + s.api.activeRequest.valid = true + s.api.activeRequest.ready = true + + expectedErr := fmt.Errorf("test error") + s.ledgerBackend.On("GetLedger", mock.Anything, uint32(64)). + Return(xdr.LedgerCloseMeta{}, expectedErr).Once() + + _, err := s.client.GetLedger(s.ctx, 64) + s.Assert().EqualError(err, "test error") +} + +func (s *ServerTestSuite) TestGetLedgerSucceeds() { + s.api.activeRequest.valid = true + s.api.activeRequest.ready = true + + expectedLedger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 64, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", mock.Anything, uint32(64)). + Return(expectedLedger, nil).Once() + + ledger, err := s.client.GetLedger(s.ctx, 64) + s.Assert().NoError(err) + s.Assert().Equal(expectedLedger, ledger) +} + +func (s *ServerTestSuite) TestGetLedgerTakesAWhile() { + s.api.activeRequest.valid = true + s.api.activeRequest.ready = true + + expectedLedger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 64, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", mock.Anything, uint32(64)). + Run(func(mock.Arguments) { time.Sleep(6 * time.Second) }). + Return(xdr.LedgerCloseMeta{}, nil).Once() + s.ledgerBackend.On("GetLedger", mock.Anything, uint32(64)). + Return(expectedLedger, nil).Once() + + ledger, err := s.client.GetLedger(s.ctx, 64) + s.Assert().NoError(err) + s.Assert().Equal(expectedLedger, ledger) +} diff --git a/exp/services/captivecore/main.go b/exp/services/captivecore/main.go new file mode 100644 index 0000000000..7fde277d69 --- /dev/null +++ b/exp/services/captivecore/main.go @@ -0,0 +1,184 @@ +package main + +import ( + "fmt" + "go/types" + "strings" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/spf13/viper" + + "github.com/stellar/go/exp/services/captivecore/internal" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/support/config" + "github.com/stellar/go/support/db" + supporthttp "github.com/stellar/go/support/http" + supportlog "github.com/stellar/go/support/log" +) + +func main() { + var port int + var networkPassphrase, binaryPath, configPath, dbURL string + var captiveCoreTomlParams ledgerbackend.CaptiveCoreTomlParams + var historyArchiveURLs []string + var checkpointFrequency uint32 + var logLevel logrus.Level + logger := supportlog.New() + + configOpts := config.ConfigOptions{ + { + Name: "port", + Usage: "Port to listen and serve on", + OptType: types.Int, + ConfigKey: &port, + FlagDefault: 8000, + Required: true, + }, + { + Name: "network-passphrase", + Usage: "Network passphrase of the Stellar network transactions should be signed for", + OptType: types.String, + ConfigKey: &networkPassphrase, + FlagDefault: network.TestNetworkPassphrase, + Required: true, + }, + &config.ConfigOption{ + Name: "stellar-core-binary-path", + OptType: types.String, + FlagDefault: "", + Required: true, + Usage: "path to stellar core binary", + ConfigKey: &binaryPath, + }, + &config.ConfigOption{ + Name: "captive-core-config-path", + OptType: types.String, + FlagDefault: "", + Required: true, + Usage: "path to additional configuration for the Stellar Core configuration file used by captive core. It must, at least, include enough details to define a quorum set", + ConfigKey: &configPath, + }, + &config.ConfigOption{ + Name: "history-archive-urls", + ConfigKey: &historyArchiveURLs, + OptType: types.String, + Required: true, + FlagDefault: "", + CustomSetValue: func(co *config.ConfigOption) error { + stringOfUrls := viper.GetString(co.Name) + urlStrings := strings.Split(stringOfUrls, ",") + + *(co.ConfigKey.(*[]string)) = urlStrings + return nil + }, + Usage: "comma-separated list of stellar history archives to connect with", + }, + &config.ConfigOption{ + Name: "log-level", + ConfigKey: &logLevel, + OptType: types.String, + FlagDefault: "info", + CustomSetValue: func(co *config.ConfigOption) error { + ll, err := logrus.ParseLevel(viper.GetString(co.Name)) + if err != nil { + return fmt.Errorf("Could not parse log-level: %v", viper.GetString(co.Name)) + } + *(co.ConfigKey.(*logrus.Level)) = ll + return nil + }, + Usage: "minimum log severity (debug, info, warn, error) to log", + }, + &config.ConfigOption{ + Name: "db-url", + EnvVar: "DATABASE_URL", + ConfigKey: &dbURL, + OptType: types.String, + Required: false, + Usage: "horizon postgres database to connect with", + }, + &config.ConfigOption{ + Name: "stellar-captive-core-http-port", + ConfigKey: &captiveCoreTomlParams.HTTPPort, + OptType: types.Uint, + CustomSetValue: config.SetOptionalUint, + Required: false, + FlagDefault: uint(11626), + Usage: "HTTP port for Captive Core to listen on (0 disables the HTTP server)", + }, + &config.ConfigOption{ + Name: "checkpoint-frequency", + ConfigKey: &checkpointFrequency, + OptType: types.Uint32, + FlagDefault: uint32(64), + Required: false, + Usage: "establishes how many ledgers exist between checkpoints, do NOT change this unless you really know what you are doing", + }, + } + cmd := &cobra.Command{ + Use: "captivecore", + Short: "Run the remote captive core server", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + logger.SetLevel(logLevel) + + captiveCoreTomlParams.HistoryArchiveURLs = historyArchiveURLs + captiveCoreTomlParams.NetworkPassphrase = networkPassphrase + captiveCoreTomlParams.Strict = true + captiveCoreToml, err := ledgerbackend.NewCaptiveCoreTomlFromFile(configPath, captiveCoreTomlParams) + if err != nil { + logger.WithError(err).Fatal("Invalid captive core toml") + } + + captiveConfig := ledgerbackend.CaptiveCoreConfig{ + BinaryPath: binaryPath, + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: historyArchiveURLs, + CheckpointFrequency: checkpointFrequency, + Log: logger.WithField("subservice", "stellar-core"), + Toml: captiveCoreToml, + } + + var dbConn *db.Session + if len(dbURL) > 0 { + dbConn, err = db.Open("postgres", dbURL) + if err != nil { + logger.WithError(err).Fatal("Could not create db connection instance") + } + captiveConfig.LedgerHashStore = ledgerbackend.NewHorizonDBLedgerHashStore(dbConn) + } + + core, err := ledgerbackend.NewCaptive(captiveConfig) + if err != nil { + logger.WithError(err).Fatal("Could not create captive core instance") + } + api := internal.NewCaptiveCoreAPI(core, logger.WithField("subservice", "api")) + + supporthttp.Run(supporthttp.Config{ + ListenAddr: fmt.Sprintf(":%d", port), + Handler: internal.Handler(api), + OnStarting: func() { + logger.Infof("Starting Captive Core server on %v", port) + }, + OnStopping: func() { + // TODO: Check this aborts in-progress requests instead of letting + // them finish, to preserve existing behaviour. + api.Shutdown() + if dbConn != nil { + dbConn.Close() + } + }, + }) + }, + } + + if err := configOpts.Init(cmd); err != nil { + logger.WithError(err).Fatal("could not parse config options") + } + + if err := cmd.Execute(); err != nil { + logger.WithError(err).Fatal("could not run") + } +} diff --git a/exp/services/market-tracker/README.md b/exp/services/market-tracker/README.md new file mode 100644 index 0000000000..5d9034993d --- /dev/null +++ b/exp/services/market-tracker/README.md @@ -0,0 +1,16 @@ +# Stellar Market Tracker + +The Stellar Market Tracker allows you to monitor the spreads of any desired asset pairs, and makes them available for Prometheus to scrape. +To use this project, you will have to define the following in a `.env`: +- Custom `config.json` listing the asset pairs for monitoring. The format is displayed in `config_sample.json` +- Environment variables: `STELLAR_EXPERT_AUTH_KEY` and `STELLAR_EXPERT_AUTH_VAL`, the authentication header for Stellar Expert; `RATES_API_KEY` and `RATES_API_VAL`, the key-value pair for the OpenExchangeRates API. Note that the exact format of these variables may change, as we finalize internal deployment. + +## Running the project + +This project was built using Go (this repository is officially supported on the last two releases of Go) and [Go Modules](https://blog.golang.org/using-go-modules) + +1. From the monorepo root, navigate to the project: `cd exp/services/market-tracker` +2. Create a `config.json` file with the asset pairs to monitor and the refresh interval. A sample file is checked in at `config_sample.json` +3. Build the project: `go build .` +4. Run the project `./market-tracker` +5. Open `http://127.0.01:2112/metrics` and you should be able to view the metrics. This is the endpoint Prometheus should scrape. diff --git a/exp/services/market-tracker/calc.go b/exp/services/market-tracker/calc.go new file mode 100644 index 0000000000..d80268e03a --- /dev/null +++ b/exp/services/market-tracker/calc.go @@ -0,0 +1,117 @@ +package main + +import ( + "math" + + hProtocol "github.com/stellar/go/protocols/horizon" +) + +func calcSpreadPctForOrderBook(obStats hProtocol.OrderBookSummary) float64 { + highestBid := calcHighestBid(obStats.Bids) + lowestAsk := calcLowestAsk(obStats.Asks) + spread := calcSpread(highestBid, lowestAsk) + spreadPct := 100.0 * spread + return spreadPct +} + +func calcSpread(highestBid float64, lowestAsk float64) float64 { + if lowestAsk == 0 || highestBid == 0 || math.IsInf(highestBid, -1) || math.IsInf(lowestAsk, 1) { + return 0 + } + return (lowestAsk - highestBid) / lowestAsk +} + +func calcSpreadPctAtDepth(bids, asks []usdOrder, depth float64) float64 { + highestBid := calcBestOrderAtDepth(bids, depth) + lowestAsk := calcBestOrderAtDepth(asks, depth) + spread := calcSpread(highestBid, lowestAsk) + spreadPct := 100.0 * spread + return spreadPct +} + +func calcHighestBid(bids []hProtocol.PriceLevel) float64 { + highestBid := math.Inf(-1) + for _, bid := range bids { + currBid := float64(bid.PriceR.N) / float64(bid.PriceR.D) + if currBid > highestBid { + highestBid = currBid + } + } + return highestBid +} + +func calcLowestAsk(bids []hProtocol.PriceLevel) float64 { + lowestAsk := math.Inf(1) + for _, bid := range bids { + currBid := float64(bid.PriceR.N) / float64(bid.PriceR.D) + if currBid < lowestAsk { + lowestAsk = currBid + } + } + return lowestAsk +} + +func calcBestOrderAtDepth(orders []usdOrder, depth float64) float64 { + total := 0.0 + for _, order := range orders { + total += order.usdAmount + if total >= depth { + return order.usdPrice + } + } + + // We return 0.0 in case of insufficient depth. + return 0.0 +} + +func calcSlippageAtDepth(bids, asks []usdOrder, depth float64, isBid bool) float64 { + mp := calcMidPrice(bids, asks) + + var ap float64 + if isBid { + ap = calcAvgPriceAtDepth(bids, depth) + } else { + ap = calcAvgPriceAtDepth(asks, depth) + } + + // if insufficient liquidity, return 0. + if ap == 0 { + return 0 + } + + return 100. * math.Abs(ap-mp) / mp +} + +func calcMidPrice(bids, asks []usdOrder) float64 { + // note that this assumes bids and asks have already been sorted, + // which is done in the convertBids / convertAsks functions + highestBid := calcBestOrderAtDepth(bids, 0.) + lowestAsk := calcBestOrderAtDepth(asks, 0.) + midPrice := (highestBid + lowestAsk) / 2 + return midPrice +} + +func calcAvgPriceAtDepth(orders []usdOrder, depth float64) float64 { + // note that this assumes bids and asks have already been sorted, + // which is done in the convertBids / convertAsks functions + totalPrice := 0.0 + totalAmount := 0.0 + for i, order := range orders { + totalPrice += order.usdPrice + totalAmount += order.usdAmount + if totalAmount >= depth { + return totalPrice / float64(i+1) + } + } + return 0.0 +} + +func calcFairValuePct(sortedBids, sortedAsks []usdOrder, trueAssetUsdPrice float64) float64 { + if trueAssetUsdPrice == 0 { + return 0. + } + + dexAssetUsdPrice := calcMidPrice(sortedBids, sortedAsks) + fairValuePct := 100. * math.Abs(dexAssetUsdPrice-trueAssetUsdPrice) / trueAssetUsdPrice + return fairValuePct +} diff --git a/exp/services/market-tracker/calc_test.go b/exp/services/market-tracker/calc_test.go new file mode 100644 index 0000000000..590336af9f --- /dev/null +++ b/exp/services/market-tracker/calc_test.go @@ -0,0 +1,258 @@ +package main + +import ( + "math" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" +) + +var hLowOrder = hProtocol.PriceLevel{ + PriceR: hProtocol.Price{ + N: 4, + D: 2, + }, + Price: "2.0", + Amount: "50.0", +} + +var hHighOrder = hProtocol.PriceLevel{ + PriceR: hProtocol.Price{ + N: 5, + D: 2, + }, + Price: "2.5", + Amount: "100.0", +} + +var hOrders = []hProtocol.PriceLevel{hLowOrder, hHighOrder} + +var lowUsdOrder = usdOrder{ + xlmAmount: 50.0, + usdAmount: 5.0, + usdPrice: 20.0, +} + +var highUsdOrder = usdOrder{ + xlmAmount: 100.0, + usdAmount: 10.0, + usdPrice: 25.0, +} + +func TestCalcSpreadPctForOrderBook(t *testing.T) { + obStats := hProtocol.OrderBookSummary{ + Bids: hOrders, + Asks: hOrders, + } + wantPct := 100.0 * (2.0 - 2.5) / 2.0 + gotPct := calcSpreadPctForOrderBook(obStats) + assert.Equal(t, wantPct, gotPct) +} + +func TestCalcSpread(t *testing.T) { + // Test inputs that should result in 0 spread. + wantSpread := 0.0 + highestBid := -1.0 + lowestAsk := 0.0 + gotSpread := calcSpread(highestBid, lowestAsk) + assert.Equal(t, wantSpread, gotSpread) + + lowestAsk = -1.0 + highestBid = 0.0 + gotSpread = calcSpread(highestBid, lowestAsk) + assert.Equal(t, wantSpread, gotSpread) + + highestBid = math.Inf(-1) + gotSpread = calcSpread(highestBid, lowestAsk) + assert.Equal(t, wantSpread, gotSpread) + + highestBid = -1.0 + lowestAsk = math.Inf(1) + gotSpread = calcSpread(highestBid, lowestAsk) + assert.Equal(t, wantSpread, gotSpread) + + // Test the spread calculation. + lowestAsk = 10.0 + highestBid = 9.0 + wantSpread = (lowestAsk - highestBid) / lowestAsk + gotSpread = calcSpread(highestBid, lowestAsk) + assert.Equal(t, wantSpread, gotSpread) +} + +func TestCalcSpreadPctAtDepth(t *testing.T) { + bids := []usdOrder{ + usdOrder{ + xlmAmount: 100.0, + usdAmount: 10.0, + usdPrice: 10.0, + }, + } + asks := []usdOrder{lowUsdOrder, highUsdOrder} + + gotSpreadPct := calcSpreadPctAtDepth(bids, asks, 5.0) + assert.Equal(t, 50.0, gotSpreadPct) + + gotSpreadPct = calcSpreadPctAtDepth(bids, asks, 10.0) + assert.Equal(t, 60.0, gotSpreadPct) +} + +func TestCalcHighestBid(t *testing.T) { + // Test empty bids. + gotBid := calcHighestBid([]hProtocol.PriceLevel{}) + assert.Equal(t, math.Inf(-1), gotBid) + + // Test non-empty bids. + gotBid = calcHighestBid(hOrders) + assert.Equal(t, 2.5, gotBid) +} + +func TestCalcLowestAsk(t *testing.T) { + // Test empty asks. + asks := []hProtocol.PriceLevel{} + wantAsk := math.Inf(1) + gotAsk := calcLowestAsk(asks) + assert.Equal(t, wantAsk, gotAsk) + + // Test non-empty asks. + wantAsk = 2.0 + gotAsk = calcLowestAsk(hOrders) + assert.Equal(t, wantAsk, gotAsk) +} + +func TestCalcBestOrderAtDepth(t *testing.T) { + bids := []usdOrder{highUsdOrder, lowUsdOrder} + gotBid := calcBestOrderAtDepth(bids, 5.0) + assert.Equal(t, 25.0, gotBid) + + gotBid = calcBestOrderAtDepth(bids, 15.0) + assert.Equal(t, 20.0, gotBid) + + gotBid = calcBestOrderAtDepth(bids, 25.0) + assert.Equal(t, 0.0, gotBid) +} + +func TestCalcSlippageAtDepth(t *testing.T) { + bids := []usdOrder{ + usdOrder{ + xlmAmount: 1., + usdAmount: 30., + usdPrice: 30., + }, + usdOrder{ + xlmAmount: 1., + usdAmount: 25., + usdPrice: 25., + }, + usdOrder{ + xlmAmount: 1., + usdAmount: 50., + usdPrice: 20., + }, + } + + asks := []usdOrder{ + usdOrder{ + xlmAmount: 5., + usdAmount: 100., + usdPrice: 20., + }, + usdOrder{ + xlmAmount: 4., + usdAmount: 100., + usdPrice: 25., + }, + usdOrder{ + xlmAmount: 4., + usdAmount: 120., + usdPrice: 30., + }, + } + + bs := calcSlippageAtDepth(bids, asks, 10., true) + assert.Equal(t, 20., bs) + as := calcSlippageAtDepth(bids, asks, 10., false) + assert.Equal(t, 20., as) + + bs = calcSlippageAtDepth(bids, asks, 50., true) + assert.Equal(t, 10., bs) + as = calcSlippageAtDepth(bids, asks, 50., false) + assert.Equal(t, 20., as) + + bs = calcSlippageAtDepth(bids, asks, 100., true) + assert.Equal(t, 0., bs) + as = calcSlippageAtDepth(bids, asks, 100., false) + assert.Equal(t, 20., as) + + bs = calcSlippageAtDepth(bids, asks, 1000., true) + assert.Equal(t, 0., bs) + as = calcSlippageAtDepth(bids, asks, 1000., false) + assert.Equal(t, 0., as) +} + +func TestCalcMidPrice(t *testing.T) { + bids := []usdOrder{highUsdOrder, lowUsdOrder} + asks := []usdOrder{lowUsdOrder, highUsdOrder} + mp := calcMidPrice(bids, asks) + assert.Equal(t, 22.5, mp) +} + +func TestCalcAvgPriceAtDepth(t *testing.T) { + orders := []usdOrder{highUsdOrder, lowUsdOrder} + ap := calcAvgPriceAtDepth(orders, 10.) + assert.Equal(t, 25., ap) + ap = calcAvgPriceAtDepth(orders, 15.) + assert.Equal(t, 22.5, ap) + ap = calcAvgPriceAtDepth(orders, 20.) + assert.Equal(t, 0., ap) +} + +func TestCalcFairValuePct(t *testing.T) { + bids := []usdOrder{ + usdOrder{ + xlmAmount: 1., + usdAmount: 30., + usdPrice: 30., + }, + usdOrder{ + xlmAmount: 1., + usdAmount: 25., + usdPrice: 25., + }, + usdOrder{ + xlmAmount: 1., + usdAmount: 50., + usdPrice: 20., + }, + } + + asks := []usdOrder{ + usdOrder{ + xlmAmount: 5., + usdAmount: 100., + usdPrice: 20., + }, + usdOrder{ + xlmAmount: 4., + usdAmount: 100., + usdPrice: 25., + }, + usdOrder{ + xlmAmount: 4., + usdAmount: 120., + usdPrice: 30., + }, + } + + trueAssetUsdPrice := 0. + pct := calcFairValuePct(bids, asks, trueAssetUsdPrice) + assert.Equal(t, 0., pct) + + trueAssetUsdPrice = 20. + pct = calcFairValuePct(bids, asks, trueAssetUsdPrice) + assert.Equal(t, 25., pct) + + trueAssetUsdPrice = 25. + pct = calcFairValuePct(bids, asks, trueAssetUsdPrice) + assert.Equal(t, 0., pct) +} diff --git a/exp/services/market-tracker/client.go b/exp/services/market-tracker/client.go new file mode 100644 index 0000000000..6aa1daf99c --- /dev/null +++ b/exp/services/market-tracker/client.go @@ -0,0 +1,78 @@ +package main + +import ( + "math" + "time" + + hClient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" +) + +type trackerClient struct { + horizon *hClient.Client +} + +func (tc *trackerClient) computeSpreadForTradePair(tp TradePair) (spread float64, err error) { + obStats, err := tc.getOrderBookForTradePair(tp) + if err != nil { + return 0.0, err + } + + highestBid := calcHighestBid(obStats.Bids) + lowestAsk := calcLowestAsk(obStats.Asks) + spread = calcSpread(highestBid, lowestAsk) + return +} + +func (tc *trackerClient) getOrderBookForTradePair(tp TradePair) (obStats hProtocol.OrderBookSummary, err error) { + req := hClient.OrderBookRequest{ + SellingAssetType: tp.SellingAsset.ProtocolAssetType, + SellingAssetCode: tp.SellingAsset.Code, + SellingAssetIssuer: tp.SellingAsset.IssuerAddress, + BuyingAssetType: tp.BuyingAsset.ProtocolAssetType, + BuyingAssetCode: tp.BuyingAsset.Code, + BuyingAssetIssuer: tp.BuyingAsset.IssuerAddress, + Limit: 200, + } + obStats, err = tc.horizon.OrderBook(req) + return +} + +func (tc *trackerClient) getAggTradesForTradePair(tp TradePair, start, end time.Time, res time.Duration) (taps []hProtocol.TradeAggregationsPage, err error) { + const maxLimit = 200 + req := hClient.TradeAggregationRequest{ + StartTime: start, + EndTime: end, + Resolution: res, + Offset: time.Duration(0), + BaseAssetType: tp.BuyingAsset.ProtocolAssetType, + BaseAssetCode: tp.BuyingAsset.Code, + BaseAssetIssuer: tp.BuyingAsset.IssuerAddress, + CounterAssetType: tp.SellingAsset.ProtocolAssetType, + CounterAssetCode: tp.SellingAsset.Code, + CounterAssetIssuer: tp.SellingAsset.IssuerAddress, + Limit: maxLimit, + } + + tap, err := tc.horizon.TradeAggregations(req) + if err != nil { + return + } + taps = append(taps, tap) + + // iterate through the remaining trade aggregations as needed, until error + // TODO: Check error for Horizon rate limiting, before adding volume metrics. + counter := 1 + numRes := float64(end.Sub(start) / res) + numRequests := int(math.Ceil(numRes / maxLimit)) + for counter < numRequests { + tap, err = tc.horizon.NextTradeAggregationsPage(tap) + if err != nil { + return + } + + counter++ + taps = append(taps, tap) + } + return +} diff --git a/exp/services/market-tracker/config.go b/exp/services/market-tracker/config.go new file mode 100644 index 0000000000..885a7d7a3a --- /dev/null +++ b/exp/services/market-tracker/config.go @@ -0,0 +1,87 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + + hClient "github.com/stellar/go/clients/horizonclient" +) + +// Asset represents an asset on the Stellar network +type Asset struct { + ProtocolAssetType hClient.AssetType + AssetType string `json:"type"` + Code string `json:"code"` + IssuerAddress string `json:"issuerAddress"` + IssuerName string `json:"issuerName"` + Currency string `json:"currency"` +} + +func (a Asset) String() string { + if a.IssuerName != "" { + return fmt.Sprintf("%s:%s", a.Code, a.IssuerName) + } + return fmt.Sprintf("%s:%s", a.Code, a.IssuerAddress) +} + +// TradePair represents a trading pair on SDEX +type TradePair struct { + BuyingAsset Asset `json:"buyingAsset"` + SellingAsset Asset `json:"sellingAsset"` +} + +func (tp TradePair) String() string { + return fmt.Sprintf("%s / %s", tp.BuyingAsset, tp.SellingAsset) +} + +// Config represents the overall config of the application +type Config struct { + TradePairs []TradePair `json:"tradePairs"` + CheckIntervalSeconds int64 `json:"checkIntervalSeconds"` +} + +func computeAssetType(a *Asset) (err error) { + switch a.AssetType { + case "AssetType4": + a.ProtocolAssetType = hClient.AssetType4 + case "AssetType12": + a.ProtocolAssetType = hClient.AssetType12 + case "AssetTypeNative": + a.ProtocolAssetType = hClient.AssetTypeNative + default: + err = errors.New("unrecognized asset type") + } + return +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +func loadConfig() Config { + configFile, err := os.Open("config_sample.json") + check(err) + + defer configFile.Close() + + byteValue, _ := ioutil.ReadAll(configFile) + + var config Config + err = json.Unmarshal(byteValue, &config) + check(err) + + for n := range config.TradePairs { + err = computeAssetType(&config.TradePairs[n].BuyingAsset) + check(err) + + computeAssetType(&config.TradePairs[n].SellingAsset) + check(err) + } + + return config +} diff --git a/exp/services/market-tracker/config_sample.json b/exp/services/market-tracker/config_sample.json new file mode 100644 index 0000000000..c69f2af312 --- /dev/null +++ b/exp/services/market-tracker/config_sample.json @@ -0,0 +1,19 @@ +{ + "tradePairs": [ + { + "buyingAsset": { + "code": "USD", + "issuerAddress": "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", + "issuerName": "AnchorUSD", + "type": "AssetType4", + "currency": "USD" + }, + "sellingAsset": { + "code": "XLM", + "issuerAddress": "native", + "type": "AssetTypeNative" + } + } + ], + "checkIntervalSeconds": 10 +} \ No newline at end of file diff --git a/exp/services/market-tracker/go.mod b/exp/services/market-tracker/go.mod new file mode 100644 index 0000000000..775082f038 --- /dev/null +++ b/exp/services/market-tracker/go.mod @@ -0,0 +1,14 @@ +module github.com/stellar/market-tracker + +go 1.13 + +require ( + github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 // indirect + github.com/joho/godotenv v1.3.0 + github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect + github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 // indirect + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 + github.com/stellar/go v0.0.0-20211208234857-bf7909b45bd4 + github.com/stretchr/testify v1.7.0 + gopkg.in/matryer/try.v1 v1.0.0-20150601225556-312d2599e12e +) diff --git a/exp/services/market-tracker/go.sum b/exp/services/market-tracker/go.sum new file mode 100644 index 0000000000..3639d15a35 --- /dev/null +++ b/exp/services/market-tracker/go.sum @@ -0,0 +1,703 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.5.0/go.mod h1:c4nNYR1qdq7eaZ+jSc5fonrQN2k3M7sWATcYTiakjEo= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +firebase.google.com/go v3.12.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/adjust/goautoneg v0.0.0-20150426214442-d788f35a0315/go.mod h1:4U522XvlkqOY2AVBUM7ISHODDb6tdB+KAXfGaBDsWts= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f h1:zvClvFQwU++UpIUBGC8YmDlfhUrweEy1R1Fj1gu5iIM= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go v1.39.5/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= +github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4= +github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= +github.com/getsentry/raven-go v0.0.0-20160805001729-c9d3cc542ad1/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/go-chi/chi v4.0.3+incompatible h1:gakN3pDJnzZN5jqFV2TEdF66rTfKeITyR8qu6ekICEY= +github.com/go-chi/chi v4.0.3+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-errors/errors v0.0.0-20150906023321-a41850380601 h1:jxTbmDuqQUTI6MscgbqB39vtxGfr2fi61nYIcFQUnlE= +github.com/go-errors/errors v0.0.0-20150906023321-a41850380601/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/packr v1.12.1/go.mod h1:H2dZhQFqHeZwr/5A/uGQkBp7xYuMGuzXFeKhYdcz5No= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 h1:oERTZ1buOUYlpmKaqlO5fYmz8cZ1rYu5DieJzF4ZVmU= +github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY= +github.com/gorilla/schema v1.1.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/graph-gophers/graphql-go v0.0.0-20190225005345-3e8838d4614c/go.mod h1:uJhtPXrcJLqyi0H5IuMFh+fgW+8cMMakK3Txrbk/WJE= +github.com/guregu/null v2.1.3-0.20151024101046-79c5bd36b615+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31 h1:Aw95BEvxJ3K6o9GGv5ppCd1P8hkeIeEJ30FO+OhOJpM= +github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd h1:vQ0EEfHpdFUtNRj1ri25MUq5jb3Vma+kKhLyjeUTVow= +github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc h1:WW8B7p7QBnFlqRVv/k6ro/S8Z7tCnYjJHcQNScx9YVs= +github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.5.4/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739 h1:ykXz+pRRTibcSjG1yRhpdSHInF8yZY/mfn+Rz2Nd1rE= +github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739/go.mod h1:zUx1mhth20V3VKgL5jbd1BSQcW4Fy6Qs4PZvQwRFwzM= +github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2 h1:JAEbJn3j/FrhdWA9jW8B5ajsLIjeuEHLi8xE4fk997o= +github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db h1:eZgFHVkk9uOTaOQLC6tgjkzdp7Ays8eEVecBcfHZlJQ= +github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/rubenv/sql-migrate v0.0.0-20190717103323-87ce952f7079/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= +github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2 h1:S4OC0+OBKz6mJnzuHioeEat74PuQ4Sgvbf8eus695sc= +github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2/go.mod h1:8zLRYR5npGjaOXgPSKat5+oOh+UHd8OdbS18iqX9F6Y= +github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca h1:oR/RycYTFTVXzND5r4FdsvbnBn0HJXSVeNAnwaTXRwk= +github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cobra v0.0.0-20160830174925-9c28e4bbd74e/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20161005214240-4bd69631f475/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v0.0.0-20150621231900-db7ff930a189/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stellar/go v0.0.0-20211208234857-bf7909b45bd4 h1:D6Kj6vqT1cBP1hIAmnkeA4FyOzJfR+lRg8+SvFrD1vA= +github.com/stellar/go v0.0.0-20211208234857-bf7909b45bd4/go.mod h1:ZjEIAKldwfIdxI1N/vNX1TqUGwwcLWAa5XO4JdiAUIc= +github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee h1:fbVs0xmXpBvVS4GBeiRmAE3Le70ofAqFMch1GTiq/e8= +github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee/go.mod h1:yoxyU/M8nl9LKeWIoBrbDPQ7Cy+4jxRcWcOayZ4BMps= +github.com/stellar/throttled v2.2.3-0.20190823235211-89d75816f59d+incompatible/go.mod h1:7CJ23pXirXBJq45DqvO6clzTEGM/l1SfKrgrzLry8b4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6 h1:s0IDmR1jFyWvOK7jVIuAsmHQaGkXUuTas8NXFUOwuAI= +github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6/go.mod h1:+g/po7GqyG5E+1CNgquiIxJnsXEi5vwFn5weFujbO78= +github.com/xdrpp/goxdr v0.1.1 h1:E1B2c6E8eYhOVyd7yEpOyopzTPirUeF6mVOfXfGyJyc= +github.com/xdrpp/goxdr v0.1.1/go.mod h1:dXo1scL/l6s7iME1gxHWo2XCppbHEKZS7m/KyYWkNzA= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 h1:KM4T3G70MiR+JtqplcYkNVoNz7pDwYaBxWBXQK804So= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c h1:XZWnr3bsDQWAZg4Ne+cPoXRPILrNlPNQfxBuwLl43is= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce h1:cVSRGH8cOveJNwFEEZLXtB+XMnRqKLjUP6V/ZFYQCXI= +github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb h1:06WAhQa+mYv7BiOk13B/ywyTlkoE/S7uu6TBKU6FHnE= +github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d h1:yJIizrfO599ot2kQ6Af1enICnwBD3XoxgX3MrMwot2M= +github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce h1:888GrqRxabUce7lj4OaoShPxodm3kXOMpSa85wdYzfY= +github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210223095934-7937bea0104d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw= +gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0/go.mod h1:WtiW9ZA1LdaWqtQRo1VbIL/v4XZ8NDta+O/kSpGgVek= +gopkg.in/gorp.v1 v1.7.1/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/matryer/try.v1 v1.0.0-20150601225556-312d2599e12e h1:bJHzu9Qwc9wQRWJ/WVkJGAfs+riucl/tKAFNxf9pzqk= +gopkg.in/matryer/try.v1 v1.0.0-20150601225556-312d2599e12e/go.mod h1:tve0rTLdGlwnXF7iBO9rbAEyeXvuuPx0n4DvXS/Nw7o= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tylerb/graceful.v1 v1.2.13/go.mod h1:yBhekWvR20ACXVObSSdD3u6S9DeSylanL2PAbAC/uJ8= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/exp/services/market-tracker/main.go b/exp/services/market-tracker/main.go new file mode 100644 index 0000000000..7a4348f7ae --- /dev/null +++ b/exp/services/market-tracker/main.go @@ -0,0 +1,31 @@ +package main + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" + + hClient "github.com/stellar/go/clients/horizonclient" +) + +type prometheusWatchedTP struct { + TradePair TradePair + Spread Spread + Volume Volume + Slippage Slippage + Orderbook Orderbook + FairValue FairValue +} + +var watchedTradePairs []prometheusWatchedTP + +func main() { + cfg := loadConfig() + c := trackerClient{hClient.DefaultPublicNetClient} + watchedTPs := configPrometheusWatchers(cfg.TradePairs) + trackSpreads(cfg, c, &watchedTPs) + trackVolumes(cfg, c, &watchedTPs) + + http.Handle("/metrics", promhttp.Handler()) + http.ListenAndServe(":2112", nil) +} diff --git a/exp/services/market-tracker/orders.go b/exp/services/market-tracker/orders.go new file mode 100644 index 0000000000..b4466d22da --- /dev/null +++ b/exp/services/market-tracker/orders.go @@ -0,0 +1,95 @@ +package main + +import ( + "sort" + "strconv" + + "github.com/prometheus/client_golang/prometheus" + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// Orderbook tracks top-level orderbook statistics. +// Note that volume is denominated in USD for easiest viewing. +type Orderbook struct { + NumBids prometheus.Gauge + NumAsks prometheus.Gauge + BidBaseVolume prometheus.Gauge + BidUsdVolume prometheus.Gauge + AskBaseVolume prometheus.Gauge + AskUsdVolume prometheus.Gauge +} + +// usdOrder holds the USD representation of an XLM-based order on the DEX. +// This contains the amount of the asset in the order; its price in USD; and that amount in USD. +type usdOrder struct { + xlmAmount float64 + usdPrice float64 + usdAmount float64 + baseAmount float64 +} + +// convertBids converts a list of bids into dollar and base asset amounts and sorts them in decreasing price order. +func convertBids(bids []hProtocol.PriceLevel, xlmUsdPrice, baseUsdPrice float64) ([]usdOrder, error) { + convertedBids, err := convertOrders(bids, xlmUsdPrice, baseUsdPrice) + if err != nil { + return []usdOrder{}, err + } + + // sort in decreasing order + sort.Slice(convertedBids, func(i, j int) bool { + return convertedBids[i].usdPrice >= convertedBids[j].usdPrice + }) + + return convertedBids, nil +} + +func convertAsks(asks []hProtocol.PriceLevel, xlmUsdPrice, baseUsdPrice float64) ([]usdOrder, error) { + convertedAsks, err := convertOrders(asks, xlmUsdPrice, baseUsdPrice) + if err != nil { + return []usdOrder{}, err + } + + // sort in increasing order + sort.Slice(convertedAsks, func(i, j int) bool { + return convertedAsks[i].usdPrice <= convertedAsks[j].usdPrice + }) + return convertedAsks, nil +} + +func convertOrders(orders []hProtocol.PriceLevel, xlmUsdPrice, baseUsdPrice float64) ([]usdOrder, error) { + convertedOrders := []usdOrder{} + for _, order := range orders { + xlmAmt, err := strconv.ParseFloat(order.Amount, 64) + if err != nil { + return []usdOrder{}, err + } + + usdAmt := xlmAmt * xlmUsdPrice + usdPrice := float64(order.PriceR.N) / float64(order.PriceR.D) * xlmUsdPrice + baseAmt := usdAmt * baseUsdPrice + cOrder := usdOrder{ + xlmAmount: xlmAmt, + usdPrice: usdPrice, + usdAmount: usdAmt, + baseAmount: baseAmt, + } + + convertedOrders = append(convertedOrders, cOrder) + } + + return convertedOrders, nil +} + +func getOrdersUsdVolume(orders []usdOrder) (v float64) { + for _, o := range orders { + v += o.usdAmount + } + return +} + +func getOrdersBaseVolume(orders []usdOrder) (v float64) { + for _, o := range orders { + v += o.baseAmount + } + return +} diff --git a/exp/services/market-tracker/orders_test.go b/exp/services/market-tracker/orders_test.go new file mode 100644 index 0000000000..22a13774df --- /dev/null +++ b/exp/services/market-tracker/orders_test.go @@ -0,0 +1,74 @@ +package main + +import ( + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" +) + +var badAmtOrders = []hProtocol.PriceLevel{hProtocol.PriceLevel{ + PriceR: hProtocol.Price{ + N: 4, + D: 2, + }, + Price: "2.0", + Amount: "amount", +}} + +func TestConvertBids(t *testing.T) { + usdXlmPrice := 0.10 + basePrice := 0.10 + bids, err := convertBids(badAmtOrders, usdXlmPrice, basePrice) + assert.Error(t, err) + assert.Equal(t, 0, len(bids)) + + highBid := usdOrder{ + xlmAmount: 100.0, + usdAmount: 10.0, + baseAmount: 1.0, + usdPrice: 0.25, + } + + lowBid := usdOrder{ + xlmAmount: 50.0, + usdAmount: 5.0, + baseAmount: 0.5, + usdPrice: 0.2, + } + + bids, err = convertBids(hOrders, usdXlmPrice, basePrice) + assert.NoError(t, err) + assert.GreaterOrEqual(t, bids[0].usdPrice, bids[1].usdPrice) + assert.Equal(t, highBid, bids[0]) + assert.Equal(t, lowBid, bids[1]) +} + +func TestConvertAsks(t *testing.T) { + usdXlmPrice := 0.10 + basePrice := 0.10 + asks, err := convertAsks(badAmtOrders, usdXlmPrice, basePrice) + assert.Error(t, err) + assert.Equal(t, 0, len(asks)) + + lowAsk := usdOrder{ + xlmAmount: 50, + usdPrice: 0.2, + usdAmount: 5, + baseAmount: 0.5, + } + + highAsk := usdOrder{ + xlmAmount: 100, + usdPrice: 0.25, + usdAmount: 10, + baseAmount: 1, + } + + orders := []hProtocol.PriceLevel{hHighOrder, hLowOrder} + asks, err = convertAsks(orders, usdXlmPrice, basePrice) + assert.NoError(t, err) + assert.LessOrEqual(t, asks[0].usdPrice, asks[1].usdPrice) + assert.Equal(t, lowAsk, asks[0]) + assert.Equal(t, highAsk, asks[1]) +} diff --git a/exp/services/market-tracker/price.go b/exp/services/market-tracker/price.go new file mode 100644 index 0000000000..780e5d8f5d --- /dev/null +++ b/exp/services/market-tracker/price.go @@ -0,0 +1,270 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "strings" + "time" + + "github.com/joho/godotenv" + "gopkg.in/matryer/try.v1" +) + +const stelExURL = "https://api.stellar.expert/explorer/public/xlm-price" + +const ratesURL = "https://openexchangerates.org/api/latest.json" + +type cachedPrice struct { + price float64 + updated time.Time +} + +func mustCreateXlmPriceRequest() *http.Request { + numAttempts := 10 + var req *http.Request + err := try.Do(func(attempt int) (bool, error) { + var err error + req, err = createXlmPriceRequest() + if err != nil { + time.Sleep(time.Duration(attempt) * time.Second) + } + return attempt < numAttempts, err + }) + if err != nil { + // TODO: Add a fallback price API. + log.Fatal(err) + } + return req +} + +func createXlmPriceRequest() (*http.Request, error) { + err := godotenv.Load() + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", stelExURL, nil) + if err != nil { + return nil, err + } + + // TODO: Eliminate dependency on dotenv before monorepo conversion. + authKey := os.Getenv("STELLAR_EXPERT_AUTH_KEY") + authVal := os.Getenv("STELLAR_EXPERT_AUTH_VAL") + req.Header.Add(authKey, authVal) + + return req, nil +} + +func getLatestXlmPrice(req *http.Request) (float64, error) { + body, err := getPriceResponse(req) + if err != nil { + return 0.0, fmt.Errorf("got error from stellar expert price api: %s", err) + } + return parseStellarExpertLatestPrice(body) +} + +func getXlmPriceHistory(req *http.Request) ([]xlmPrice, error) { + body, err := getPriceResponse(req) + if err != nil { + return []xlmPrice{}, fmt.Errorf("got error from stellar expert price api: %s", err) + } + return parseStellarExpertPriceHistory(body) +} + +func getPriceResponse(req *http.Request) (string, error) { + client := &http.Client{} + + numAttempts := 10 + var resp *http.Response + err := try.Do(func(attempt int) (bool, error) { + + var err error + resp, err = client.Do(req) + if err != nil { + return attempt < numAttempts, err + } + + if resp.StatusCode != http.StatusOK { + time.Sleep(time.Duration(attempt) * time.Second) + err = fmt.Errorf("got status code %d", resp.StatusCode) + } + + return attempt < numAttempts, err + }) + + if err != nil { + return "", err + } + + defer resp.Body.Close() + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + body := string(bodyBytes) + return body, nil +} + +func parseStellarExpertPriceHistory(body string) ([]xlmPrice, error) { + // The Stellar Expert response has expected format: [[timestamp1,price1], [timestamp2,price2], ...] + // with the most recent timestamp and price first. We split that array to get strings of only "timestamp,price". + // We then split each of those strings and define a struct containing the timestamp and price. + if len(body) < 5 { + return []xlmPrice{}, fmt.Errorf("got ill-formed response body from stellar expert") + } + + body = body[2 : len(body)-2] + timePriceStrs := strings.Split(body, "],[") + + var xlmPrices []xlmPrice + for _, timePriceStr := range timePriceStrs { + timePrice := strings.Split(timePriceStr, ",") + if len(timePrice) != 2 { + return []xlmPrice{}, fmt.Errorf("got ill-formed time/price from stellar expert") + } + + ts, err := strconv.ParseInt(timePrice[0], 10, 64) + if err != nil { + return []xlmPrice{}, err + } + + p, err := strconv.ParseFloat(timePrice[1], 64) + if err != nil { + return []xlmPrice{}, err + } + + newXlmPrice := xlmPrice{ + timestamp: ts, + price: p, + } + xlmPrices = append(xlmPrices, newXlmPrice) + } + return xlmPrices, nil +} + +func parseStellarExpertLatestPrice(body string) (float64, error) { + // The Stellar Expert response has expected format: [[timestamp1,price1], [timestamp2,price2], ...] + // with the most recent timestamp and price first. + // We then split the remainder by ",". + // The first element will be the most recent timestamp, and the second will be the latest price. + // We format and return the most recent price. + lists := strings.Split(body, ",") + if len(lists) < 2 { + return 0.0, fmt.Errorf("mis-formed response from stellar expert") + } + + rawPriceStr := lists[1] + if len(rawPriceStr) < 2 { + return 0.0, fmt.Errorf("mis-formed price from stellar expert") + } + + priceStr := rawPriceStr[:len(rawPriceStr)-1] + price, err := strconv.ParseFloat(priceStr, 64) + if err != nil { + return 0.0, err + } + + return price, nil +} + +func mustCreateAssetPriceRequest() *http.Request { + numAttempts := 10 + var req *http.Request + err := try.Do(func(attempt int) (bool, error) { + var err error + req, err = createAssetPriceRequest() + if err != nil { + time.Sleep(time.Duration(attempt) * time.Second) + } + return attempt < numAttempts, err + }) + if err != nil { + // TODO: Add a fallback price API. + log.Fatal(err) + } + return req +} + +func createAssetPriceRequest() (*http.Request, error) { + err := godotenv.Load() + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", ratesURL, nil) + if err != nil { + return nil, err + } + + // TODO: Eliminate dependency on dotenv before monorepo conversion. + apiKey := os.Getenv("RATES_API_KEY") + apiVal := os.Getenv("RATES_API_VAL") + req.Header.Add(apiKey, apiVal) + return req, nil +} + +func getAssetUSDPrice(body, currency string) (float64, error) { + // The real asset price for USD will be 1 USD + if currency == "USD" { + return 1.0, nil + } else if currency == "" { + return 0.0, nil + } + + // we expect the body to contain a JSON response from the OpenExchangeRates API, + // including a "rates" field which maps currency code to USD rate. + // e.g., "USD": 1.0, "BRL": 5.2, etc. + m := make(map[string]interface{}) + json.Unmarshal([]byte(body), &m) + + rates := make(map[string]interface{}) + var ok bool + if rates, ok = m["rates"].(map[string]interface{}); !ok { + return 0.0, fmt.Errorf("could not get rates from api response") + } + + var rate float64 + if rate, ok = rates[currency].(float64); !ok { + return 0.0, fmt.Errorf("could not get rate for %s", currency) + } + + return rate, nil +} + +func updateAssetUsdPrice(currency string) (float64, error) { + assetReq, err := createAssetPriceRequest() + if err != nil { + return 0.0, fmt.Errorf("could not create asset price request: %s", err) + } + + assetMapStr, err := getPriceResponse(assetReq) + if err != nil { + return 0.0, fmt.Errorf("could not get asset price response from external api: %s", err) + } + + assetUsdPrice, err := getAssetUSDPrice(assetMapStr, currency) + if err != nil { + return 0.0, fmt.Errorf("could not parse asset price response from external api: %s", err) + } + + return assetUsdPrice, nil +} + +func createPriceCache(pairs []prometheusWatchedTP) map[string]cachedPrice { + pc := make(map[string]cachedPrice) + t := time.Now().Add(-2 * time.Hour) + for _, p := range pairs { + pc[p.TradePair.BuyingAsset.Currency] = cachedPrice{ + price: 0.0, + updated: t, + } + } + return pc +} diff --git a/exp/services/market-tracker/price_test.go b/exp/services/market-tracker/price_test.go new file mode 100644 index 0000000000..7c0c61869e --- /dev/null +++ b/exp/services/market-tracker/price_test.go @@ -0,0 +1,34 @@ +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCreateXlmPriceRequest(t *testing.T) { + req, err := createXlmPriceRequest() + assert.NoError(t, err) + assert.Equal(t, "GET", req.Method) + assert.Equal(t, stelExURL, req.URL.String()) +} + +func TestParseStellarExpertResponse(t *testing.T) { + body := "hello" + gotPrice, gotErr := parseStellarExpertLatestPrice(body) + assert.EqualError(t, gotErr, "mis-formed response from stellar expert") + + body = "hello," + gotPrice, gotErr = parseStellarExpertLatestPrice(body) + assert.EqualError(t, gotErr, "mis-formed price from stellar expert") + + body = "[[10001,hello]" + gotPrice, gotErr = parseStellarExpertLatestPrice(body) + assert.Error(t, gotErr) + + body = "[[100001,5.00],[100002,6.00]]" + wantPrice := 5.00 + gotPrice, gotErr = parseStellarExpertLatestPrice(body) + assert.NoError(t, gotErr) + assert.Equal(t, wantPrice, gotPrice) +} diff --git a/exp/services/market-tracker/spread.go b/exp/services/market-tracker/spread.go new file mode 100644 index 0000000000..50742bcbd4 --- /dev/null +++ b/exp/services/market-tracker/spread.go @@ -0,0 +1,125 @@ +package main + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// Spread tracks the percent spread at various market depths. +type Spread struct { + Top prometheus.Gauge + D100 prometheus.Gauge + D1K prometheus.Gauge + D5K prometheus.Gauge + D25K prometheus.Gauge + D50K prometheus.Gauge +} + +// Slippage tracks the bid and ask slippages at various market depths. +type Slippage struct { + BidD100 prometheus.Gauge + AskD100 prometheus.Gauge + BidD1K prometheus.Gauge + AskD1K prometheus.Gauge + BidD5K prometheus.Gauge + AskD5K prometheus.Gauge +} + +// FairValue tracks the reference price, +type FairValue struct { + Percent prometheus.Gauge + RefPrice prometheus.Gauge + DexPrice prometheus.Gauge +} + +func trackSpreads(cfg Config, c trackerClient, watchedTPsPtr *[]prometheusWatchedTP) { + watchedTPs := *watchedTPsPtr + priceCache := createPriceCache(watchedTPs) + req := mustCreateXlmPriceRequest() + go func() { + for { + xlmPrice, err := getLatestXlmPrice(req) + if err != nil { + fmt.Printf("error while getting latest price: %s", err) + } + + for i, wtp := range watchedTPs { + obStats, err := c.getOrderBookForTradePair(wtp.TradePair) + if err != nil { + fmt.Printf("error while getting orderbook stats for asset pair %s: %s", wtp.TradePair, err) + } + + spreadPct := calcSpreadPctForOrderBook(obStats) + if err != nil { + fmt.Printf("error while processing asset pair %s: %s", wtp.TradePair, err) + } + + watchedTPs[i].Spread.Top.Set(spreadPct) + + // we only compute spreads at various depths for xlm-based pairs, + // because our usd prices are in terms of xlm. + if wtp.TradePair.SellingAsset.Code != "XLM" { + continue + } + + trueAssetUsdPrice := 0.0 + currency := watchedTPs[i].TradePair.BuyingAsset.Currency + if priceCache[currency].updated.Before(time.Now().Add(-1 * time.Hour)) { + trueAssetUsdPrice, err = updateAssetUsdPrice(currency) + if err != nil { + fmt.Printf("error while getting asset price: %s\n", err) + return + } + + priceCache[currency] = cachedPrice{ + price: trueAssetUsdPrice, + updated: time.Now(), + } + } else { + trueAssetUsdPrice = priceCache[currency].price + } + + usdBids, err := convertBids(obStats.Bids, xlmPrice, trueAssetUsdPrice) + if err != nil { + fmt.Printf("error while converting bids to USD: %s", err) + continue + } + + usdAsks, err := convertAsks(obStats.Asks, xlmPrice, trueAssetUsdPrice) + if err != nil { + fmt.Printf("error while converting asks to USD: %s", err) + continue + } + + watchedTPs[i].FairValue.DexPrice.Set(calcMidPrice(usdBids, usdAsks)) + + watchedTPs[i].Orderbook.BidBaseVolume.Set(getOrdersBaseVolume(usdBids)) + watchedTPs[i].Orderbook.BidUsdVolume.Set(getOrdersUsdVolume(usdBids)) + watchedTPs[i].Orderbook.AskBaseVolume.Set(getOrdersBaseVolume(usdAsks)) + watchedTPs[i].Orderbook.AskUsdVolume.Set(getOrdersUsdVolume(usdAsks)) + watchedTPs[i].Orderbook.NumBids.Set(float64(len(usdBids))) + watchedTPs[i].Orderbook.NumAsks.Set(float64(len(usdAsks))) + + watchedTPs[i].Spread.D100.Set(calcSpreadPctAtDepth(usdBids, usdAsks, 100.)) + watchedTPs[i].Spread.D1K.Set(calcSpreadPctAtDepth(usdBids, usdAsks, 1000.)) + watchedTPs[i].Spread.D5K.Set(calcSpreadPctAtDepth(usdBids, usdAsks, 5000.)) + watchedTPs[i].Spread.D25K.Set(calcSpreadPctAtDepth(usdBids, usdAsks, 25000.)) + watchedTPs[i].Spread.D50K.Set(calcSpreadPctAtDepth(usdBids, usdAsks, 50000.)) + + watchedTPs[i].Slippage.BidD100.Set(calcSlippageAtDepth(usdBids, usdAsks, 100., true)) + watchedTPs[i].Slippage.AskD100.Set(calcSlippageAtDepth(usdBids, usdAsks, 100., false)) + watchedTPs[i].Slippage.BidD1K.Set(calcSlippageAtDepth(usdBids, usdAsks, 1000., true)) + watchedTPs[i].Slippage.AskD1K.Set(calcSlippageAtDepth(usdBids, usdAsks, 1000., false)) + watchedTPs[i].Slippage.BidD5K.Set(calcSlippageAtDepth(usdBids, usdAsks, 5000., true)) + watchedTPs[i].Slippage.AskD5K.Set(calcSlippageAtDepth(usdBids, usdAsks, 5000., false)) + + watchedTPs[i].FairValue.Percent.Set(calcFairValuePct(usdBids, usdAsks, trueAssetUsdPrice)) + watchedTPs[i].FairValue.RefPrice.Set(trueAssetUsdPrice) + } + + time.Sleep(time.Duration(cfg.CheckIntervalSeconds) * time.Second) + } + }() +} diff --git a/exp/services/market-tracker/volume.go b/exp/services/market-tracker/volume.go new file mode 100644 index 0000000000..df4367a27d --- /dev/null +++ b/exp/services/market-tracker/volume.go @@ -0,0 +1,433 @@ +package main + +import ( + "fmt" + "sort" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// Volume stores volume of a various base pair in both XLM and USD. +// It also includes metadata about associated trades. +type Volume struct { + BaseVolumeBaseAsset prometheus.Gauge + BaseVolumeUsd prometheus.Gauge + CounterVolumeBaseAsset prometheus.Gauge + CounterVolumeUsd prometheus.Gauge + TradeCount prometheus.Gauge + TradeAvgAmt prometheus.Gauge +} + +type xlmPrice struct { + timestamp int64 + price float64 +} + +type volumeHist struct { + start int64 + end int64 + numTrades float64 + baseVolumeBaseAsset float64 + baseVolumeUsd float64 + counterVolumeBaseAsset float64 + counterVolumeUsd float64 +} + +func trackVolumes(cfg Config, c trackerClient, watchedTPsPtr *[]prometheusWatchedTP) { + watchedTPs := *watchedTPsPtr + volumeMap := initVolumes(cfg, c, watchedTPs) + + go func() { + updateVolume(cfg, c, watchedTPsPtr, volumeMap) + }() +} + +func initVolumes(cfg Config, c trackerClient, watchedTPs []prometheusWatchedTP) map[string][]volumeHist { + xlmReq := mustCreateXlmPriceRequest() + xlmPriceHist, err := getXlmPriceHistory(xlmReq) + if err != nil { + fmt.Printf("got error when getting xlm price history: %s\n", err) + } + + volumeHistMap := make(map[string][]volumeHist) + end := time.Now() + start := end.Add(time.Duration(-24 * time.Hour)) + res := 15 * 60 // resolution length, in seconds + cRes := time.Duration(res*1000) * time.Millisecond // horizon request must be in milliseconds + + for i, wtp := range watchedTPs { + // TODO: Calculate volume for assets with non-native counter. + if wtp.TradePair.SellingAsset.Code != "XLM" && wtp.TradePair.SellingAsset.IssuerAddress != "" { + continue + } + + currency := watchedTPs[i].TradePair.BuyingAsset.Currency + trueAssetUsdPrice, err := updateAssetUsdPrice(currency) + if err != nil { + fmt.Printf("error while getting asset price: %s\n", err) + return make(map[string][]volumeHist) + } + + taps, err := c.getAggTradesForTradePair(wtp.TradePair, start, end, cRes) + if err != nil { + fmt.Printf("got error getting agg trades for pair %s\n: %s", wtp.TradePair.String(), err) + } + + records := getAggRecords(taps) + volumeHist, err := constructVolumeHistory(records, xlmPriceHist, trueAssetUsdPrice, start, end, res) + if err != nil { + fmt.Printf("got error constructing volume history for pair %s\n: %s", wtp.TradePair.String(), err) + } + + volumeHistMap[wtp.TradePair.String()] = volumeHist + + day := end.Add(time.Duration(-24 * time.Hour)).Unix() + watchedTPs[i].Volume.BaseVolumeBaseAsset.Set(addBaseVolumeBaseAssetHistory(volumeHist, day)) + watchedTPs[i].Volume.BaseVolumeUsd.Set(addBaseVolumeUsdHistory(volumeHist, day)) + watchedTPs[i].Volume.CounterVolumeBaseAsset.Set(addCounterVolumeBaseHistory(volumeHist, day)) + watchedTPs[i].Volume.CounterVolumeUsd.Set(addCounterVolumeUsdHistory(volumeHist, day)) + watchedTPs[i].Volume.TradeCount.Set(addTradeCount(volumeHist, day)) + watchedTPs[i].Volume.TradeAvgAmt.Set(addTradeAvg(volumeHist, day)) + } + return volumeHistMap +} + +func updateVolume(cfg Config, c trackerClient, watchedTPsPtr *[]prometheusWatchedTP, volumeHistMap map[string][]volumeHist) { + req := mustCreateXlmPriceRequest() + historyUnit := time.Duration(15 * 60 * time.Second) // length of each individual unit of volume history + cRes := time.Duration(60*1000) * time.Millisecond // horizon client requests have a 1 minute resolution, in milliseconds + day := time.Duration(24 * 60 * 60 * time.Second) // number of seconds in a day + watchedTPs := *watchedTPsPtr + forLoopDuration := time.Duration(0) + + priceCache := createPriceCache(watchedTPs) + for { + time.Sleep(historyUnit - forLoopDuration) // wait before starting the update + + xlmUsdPrice, err := getLatestXlmPrice(req) + if err != nil { + fmt.Printf("error while getting latest price: %s", err) + } + + end := time.Now() + start := end.Add(-1 * historyUnit) + for i, wtp := range watchedTPs { + // TODO: Calculate volume for assets with non-native counter. + if wtp.TradePair.SellingAsset.Code != "XLM" && wtp.TradePair.SellingAsset.IssuerAddress != "" { + continue + } + + trueAssetUsdPrice := 0.0 + currency := wtp.TradePair.BuyingAsset.Currency + if priceCache[currency].updated.Before(time.Now().Add(-1 * time.Hour)) { + trueAssetUsdPrice, err = updateAssetUsdPrice(currency) + if err != nil { + fmt.Printf("error while getting asset price: %s\n", err) + return + } + + priceCache[currency] = cachedPrice{ + price: trueAssetUsdPrice, + updated: time.Now(), + } + } else { + trueAssetUsdPrice = priceCache[currency].price + } + + tps := wtp.TradePair.String() + taps, err := c.getAggTradesForTradePair(wtp.TradePair, start, end, cRes) + if err != nil { + fmt.Printf("got error getting agg trades for pair %s\n: %s", tps, err) + return + } + + records := getAggRecords(taps) + sts := start.Unix() + ets := end.Unix() + counterVolume, err := totalRecordsCounterVolume(records, start, end) + if err != nil { + fmt.Printf("got error aggregating xlm volume for pair %s\n: %s", tps, err) + } + + baseVolume, err := totalRecordsBaseVolume(records, start, end) + if err != nil { + fmt.Printf("got error aggregating base volume for pair %s\n: %s", tps, err) + } + + numTrades, err := totalRecordsTradeCount(records, start, end) + if err != nil { + fmt.Printf("got error aggregating trade counts for pair %s\n: %s", tps, err) + } + + latestVolume := volumeHist{ + start: sts, + end: ets, + baseVolumeBaseAsset: baseVolume, + baseVolumeUsd: baseVolume / trueAssetUsdPrice, + counterVolumeBaseAsset: counterVolume * xlmUsdPrice * trueAssetUsdPrice, + counterVolumeUsd: counterVolume * xlmUsdPrice, + numTrades: numTrades, + } + + // get the volumes of the oldest history unit, for both the day and month + vh := volumeHistMap[tps] + oldestVolume := vh[int(day/historyUnit)] + + // remove the oldest volume, store the newest one + vh = vh[:len(vh)-1] + vh = append([]volumeHist{latestVolume}, vh...) + volumeHistMap[tps] = vh + + // update the volume metrics using the difference between the latest and oldest + // history units' volumes, as appropriate for that metric + watchedTPs[i].Volume.BaseVolumeBaseAsset.Add(latestVolume.baseVolumeBaseAsset - oldestVolume.baseVolumeBaseAsset) + watchedTPs[i].Volume.BaseVolumeUsd.Add(latestVolume.baseVolumeUsd - oldestVolume.baseVolumeUsd) + watchedTPs[i].Volume.CounterVolumeBaseAsset.Add(latestVolume.counterVolumeBaseAsset - oldestVolume.counterVolumeBaseAsset) + watchedTPs[i].Volume.CounterVolumeUsd.Add(latestVolume.counterVolumeUsd - oldestVolume.counterVolumeUsd) + watchedTPs[i].Volume.TradeCount.Add(latestVolume.numTrades - oldestVolume.numTrades) + watchedTPs[i].Volume.TradeAvgAmt.Add(latestVolume.counterVolumeUsd/latestVolume.numTrades - oldestVolume.counterVolumeUsd/oldestVolume.numTrades) + } + + forLoopDuration = time.Now().Sub(end) + } +} + +func getAggRecords(taps []hProtocol.TradeAggregationsPage) (records []hProtocol.TradeAggregation) { + for _, tap := range taps { + records = append(records, tap.Embedded.Records...) + } + sort.Slice(records, func(i, j int) bool { return records[i].Timestamp > records[j].Timestamp }) + return +} + +func constructVolumeHistory(tas []hProtocol.TradeAggregation, xlmPrices []xlmPrice, assetPrice float64, start, end time.Time, res int) ([]volumeHist, error) { + if len(xlmPrices) < 2 { + return []volumeHist{}, fmt.Errorf("mis-formed xlm price history from stellar expert") + } + + volumeHistory := []volumeHist{} + priceIdx := -1 + recordIdx := 0 + currEnd := end + for currEnd.After(start) { + // find the weighted price for the current interval + cets := currEnd.Unix() + csts := cets - int64(res) + priceIdx = findTimestampPriceIndex(csts, xlmPrices, priceIdx) + + weightedXlmUsdPrice, err := calcWeightedPrice(csts, priceIdx, xlmPrices) + if err != nil { + return []volumeHist{}, err + } + + // find total volume of records in this interval + // TODO: This loop does not correctly include records before the start + // time. however, that should not happen, given that we define start before + // calling the horizon client. + currBaseVolume := 0.0 + currCounterVolume := 0.0 + currTradeCount := 0.0 + for recordIdx < len(tas) { + r := tas[recordIdx] + rts := r.Timestamp / 1000 + if rts < csts { + // if record is before timeframe, break + break + } else if rts > cets { + // if record is after timeframe, continue to next + // record, since this could be in range + recordIdx++ + continue + } else { + recordBaseVolume, err := strconv.ParseFloat(r.BaseVolume, 64) + if err != nil { + return []volumeHist{}, err + } + + recordCounterVolume, err := strconv.ParseFloat(r.CounterVolume, 64) + if err != nil { + return []volumeHist{}, err + } + + currBaseVolume += recordBaseVolume + currCounterVolume += recordCounterVolume + currTradeCount += float64(r.TradeCount) + recordIdx++ + } + } + + currVolume := volumeHist{ + start: csts, + end: cets, + numTrades: currTradeCount, + baseVolumeBaseAsset: currBaseVolume, + baseVolumeUsd: currBaseVolume / assetPrice, + counterVolumeBaseAsset: currCounterVolume * weightedXlmUsdPrice * assetPrice, + counterVolumeUsd: weightedXlmUsdPrice * currCounterVolume, + } + + currEnd = currEnd.Add(time.Duration(-1*res) * time.Second) + volumeHistory = append(volumeHistory, currVolume) + } + return volumeHistory, nil +} + +func addBaseVolumeBaseAssetHistory(history []volumeHist, end int64) (baseVolume float64) { + for _, vh := range history { + if vh.end < end { + break + } + baseVolume += vh.baseVolumeBaseAsset + } + return +} + +func addBaseVolumeUsdHistory(history []volumeHist, end int64) (baseVolume float64) { + for _, vh := range history { + if vh.end < end { + break + } + baseVolume += vh.baseVolumeUsd + } + return +} + +func addCounterVolumeBaseHistory(history []volumeHist, end int64) (counterVolume float64) { + for _, vh := range history { + if vh.end < end { + break + } + counterVolume += vh.counterVolumeBaseAsset + } + return +} + +func addCounterVolumeUsdHistory(history []volumeHist, end int64) (counterVolume float64) { + for _, vh := range history { + if vh.end < end { + break + } + counterVolume += vh.counterVolumeUsd + } + return +} + +func addTradeCount(history []volumeHist, end int64) (tradeCount float64) { + for _, vh := range history { + if vh.end < end { + break + } + tradeCount += float64(vh.numTrades) + } + return +} + +func addTradeAvg(history []volumeHist, end int64) (tradeAvg float64) { + totalAmt := 0. + totalTrades := 0. + for _, vh := range history { + if vh.end < end { + break + } + totalAmt += vh.counterVolumeUsd + totalTrades += float64(vh.numTrades) + } + tradeAvg = totalAmt / totalTrades + return +} + +func totalRecordsCounterVolume(tas []hProtocol.TradeAggregation, start, end time.Time) (float64, error) { + tv := 0.0 + for _, ta := range tas { + ts := time.Unix(ta.Timestamp/1000, 0) // timestamps are milliseconds since epoch time + if ts.Before(start) || ts.After(end) { + return 0.0, fmt.Errorf("record at timestamp %v is out of time bounds %v to %v", ts, start, end) + } + + cv, err := strconv.ParseFloat(ta.CounterVolume, 64) + if err != nil { + return 0.0, err + } + + tv += cv + } + return tv, nil +} + +func totalRecordsBaseVolume(tas []hProtocol.TradeAggregation, start, end time.Time) (float64, error) { + tv := 0.0 + for _, ta := range tas { + ts := time.Unix(ta.Timestamp/1000, 0) // timestamps are milliseconds since epoch time + if ts.Before(start) || ts.After(end) { + return 0.0, fmt.Errorf("record at timestamp %v is out of time bounds %v to %v", ts, start, end) + } + + bv, err := strconv.ParseFloat(ta.BaseVolume, 64) + if err != nil { + return 0.0, err + } + + tv += bv + } + return tv, nil +} + +func totalRecordsTradeCount(tas []hProtocol.TradeAggregation, start, end time.Time) (float64, error) { + ttc := 0.0 + for _, ta := range tas { + ts := time.Unix(ta.Timestamp/1000, 0) // timestamps are milliseconds since epoch time + if ts.Before(start) || ts.After(end) { + return 0, fmt.Errorf("record at timestamp %v is out of time bounds %v to %v", ts, start, end) + } + + ttc += float64(ta.TradeCount) + } + return ttc, nil +} + +// findTimestampPriceIndex iterates through an array of timestamps and prices, and returns the +// index of the oldest such pair that is more recent than the given timestamp. +// This assumes those pairs are sorted by decreasing timestamp, (i.e. most recent first). +func findTimestampPriceIndex(timestamp int64, prices []xlmPrice, startIndex int) int { + index := startIndex + if index < 0 { + if timestamp > prices[0].timestamp { + return index + } + index = 0 + } + + for index < len(prices)-1 { + if prices[index].timestamp >= timestamp && timestamp >= prices[index+1].timestamp { + break + } + index++ + } + return index +} + +func calcWeightedPrice(timestamp int64, startIndex int, prices []xlmPrice) (float64, error) { + // we expect prices sorted in decreasing timestamp (i.e., most recent first) + // TODO: Use resolution to weight prices. + if startIndex < 0 { + if timestamp < prices[0].timestamp { + return 0.0, fmt.Errorf("update price index before calculating price") + } + return prices[0].price, nil + } else if startIndex >= len(prices)-1 { + if timestamp > prices[len(prices)-1].timestamp { + return 0.0, fmt.Errorf("update price index before calculating price") + } + return prices[len(prices)-1].price, nil + } + + if timestamp > prices[startIndex].timestamp || timestamp < prices[startIndex+1].timestamp { + return 0.0, fmt.Errorf("update price index before calculating price") + } + + avgPrice := (prices[startIndex].price + prices[startIndex+1].price) / 2 + return avgPrice, nil +} diff --git a/exp/services/market-tracker/volume_test.go b/exp/services/market-tracker/volume_test.go new file mode 100644 index 0000000000..2e60fa9ddd --- /dev/null +++ b/exp/services/market-tracker/volume_test.go @@ -0,0 +1,223 @@ +package main + +import ( + "testing" + "time" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" +) + +var pts int64 = 1594668800 +var res = 15 * 60 // 15 minutes, in seconds + +var xp1 = xlmPrice{ + timestamp: pts, + price: 1.00, +} + +var xp2 = xlmPrice{ + timestamp: pts - int64(res), + price: 2.00, +} + +var xp3 = xlmPrice{ + timestamp: pts - int64(2*res), + price: 3.00, +} + +var xp4 = xlmPrice{ + timestamp: pts - int64(3*res), + price: 4.00, +} + +var prices = []xlmPrice{xp1, xp2, xp3, xp4} + +func TestTotalRecordsBaseVolume(t *testing.T) { + res := 15 * 60 + ta1 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(res/2))} + ta1.BaseVolume = "100.0" + ta2 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(3*res/2))} + ta2.BaseVolume = "200.0" + ta3 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(5*res/2))} + ta3.BaseVolume = "300.0" + tas := []hProtocol.TradeAggregation{ta1, ta2, ta3} + + pt := time.Unix(pts, 0) + total, err := totalRecordsBaseVolume(tas, time.Unix(pts-int64(res*3), 0), pt) + assert.NoError(t, err) + assert.Equal(t, 600., total) + + total, err = totalRecordsBaseVolume(tas, time.Unix(pts-int64(res*2), 0), pt) + assert.Error(t, err) + assert.Equal(t, 0., total) +} + +func TestTotalRecordsCounterVolume(t *testing.T) { + res := 15 * 60 + ta1 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(res/2))} + ta1.CounterVolume = "100.0" + ta2 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(3*res/2))} + ta2.CounterVolume = "200.0" + ta3 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(5*res/2))} + ta3.CounterVolume = "300.0" + tas := []hProtocol.TradeAggregation{ta1, ta2, ta3} + + pt := time.Unix(pts, 0) + total, err := totalRecordsCounterVolume(tas, time.Unix(pts-int64(res*3), 0), pt) + assert.NoError(t, err) + assert.Equal(t, 600., total) + + total, err = totalRecordsCounterVolume(tas, time.Unix(pts-int64(res*2), 0), pt) + assert.Error(t, err) + assert.Equal(t, 0., total) +} + +func TestTotalRecordsTradeCount(t *testing.T) { + res := 15 * 60 + ta1 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(res/2))} + ta1.TradeCount = int64(100) + ta2 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(3*res/2))} + ta2.TradeCount = int64(200) + ta3 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(5*res/2))} + ta3.TradeCount = int64(300) + tas := []hProtocol.TradeAggregation{ta1, ta2, ta3} + + pt := time.Unix(pts, 0) + total, err := totalRecordsTradeCount(tas, time.Unix(pts-int64(res*3), 0), pt) + assert.NoError(t, err) + assert.Equal(t, 600., total) + + total, err = totalRecordsTradeCount(tas, time.Unix(pts-int64(res*2), 0), pt) + assert.Error(t, err) + assert.Equal(t, 0., total) +} + +func TestAddVolumeHistory(t *testing.T) { + // every 15 min over 30 days + numIntervals := 4 * 24 * 30 + vh := []volumeHist{} + i := 0 + for i < numIntervals { + s := pts - int64(i*15*60) + h := volumeHist{ + start: s, + end: s - 15*60, + baseVolumeBaseAsset: 100.0, + baseVolumeUsd: 10.0, + counterVolumeBaseAsset: 100.0, + counterVolumeUsd: 10.0, + } + vh = append(vh, h) + i++ + } + + // one day, in seconds + end := pts - 24*60*60 + baseBase := addBaseVolumeBaseAssetHistory(vh, end) + assert.Equal(t, 9600., baseBase) + baseUsd := addBaseVolumeUsdHistory(vh, end) + assert.Equal(t, 960., baseUsd) + counterBase := addCounterVolumeBaseHistory(vh, end) + assert.Equal(t, 9600., counterBase) + counterUsd := addCounterVolumeUsdHistory(vh, end) + assert.Equal(t, 960., counterUsd) +} + +func TestConstructVolumeHistory(t *testing.T) { + res := 15 * 60 + ta1 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(res/2))} + ta1.CounterVolume = "200.0" + ta1.BaseVolume = "100.0" + + ta2 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(3*res/2))} + ta2.CounterVolume = "400.0" + ta2.BaseVolume = "200.0" + + ta3 := hProtocol.TradeAggregation{Timestamp: 1000 * (pts - int64(5*res/2))} + ta3.CounterVolume = "600.0" + ta3.BaseVolume = "300.0" + + tas := []hProtocol.TradeAggregation{ta1, ta2, ta3} + start := time.Unix(pts-24*60*60, 0) + end := time.Unix(pts, 0) + + errPrices := []xlmPrice{} + assetUsdPrice := 10.0 + volumeHist, err := constructVolumeHistory(tas, errPrices, assetUsdPrice, start, end, res) + assert.Error(t, err) + assert.Equal(t, 0, len(volumeHist)) + + volumeHist, err = constructVolumeHistory(tas, prices, assetUsdPrice, start, end, res) + assert.NoError(t, err) + assert.Equal(t, 24*4, len(volumeHist)) + + assert.Equal(t, pts-int64(res), volumeHist[0].start) + assert.Equal(t, pts, volumeHist[0].end) + assert.Equal(t, 100.0, volumeHist[0].baseVolumeBaseAsset) + assert.Equal(t, 10.0, volumeHist[0].baseVolumeUsd) + assert.Equal(t, 3000.0, volumeHist[0].counterVolumeBaseAsset) + assert.Equal(t, 300.0, volumeHist[0].counterVolumeUsd) + + assert.Equal(t, pts-int64(2*res), volumeHist[1].start) + assert.Equal(t, pts-int64(res), volumeHist[1].end) + assert.Equal(t, 200.0, volumeHist[1].baseVolumeBaseAsset) + assert.Equal(t, 20.0, volumeHist[1].baseVolumeUsd) + assert.Equal(t, 10000.0, volumeHist[1].counterVolumeBaseAsset) + assert.Equal(t, 1000.0, volumeHist[1].counterVolumeUsd) + + assert.Equal(t, pts-int64(3*res), volumeHist[2].start) + assert.Equal(t, pts-int64(2*res), volumeHist[2].end) + assert.Equal(t, 300.0, volumeHist[2].baseVolumeBaseAsset) + assert.Equal(t, 30.0, volumeHist[2].baseVolumeUsd) + assert.Equal(t, 21000.0, volumeHist[2].counterVolumeBaseAsset) + assert.Equal(t, 2100.0, volumeHist[2].counterVolumeUsd) +} + +func TestFindTimestampPriceIndex(t *testing.T) { + idx := -1 + ts := pts + int64(res/2) + newIdx := findTimestampPriceIndex(ts, prices, idx) + assert.Equal(t, -1, newIdx) + + ts = pts - int64(res/2) + newIdx = findTimestampPriceIndex(ts, prices, idx) + assert.Equal(t, 0, newIdx) + + ts = pts - (int64(res) + 1) + newIdx = findTimestampPriceIndex(ts, prices, idx) + assert.Equal(t, 1, newIdx) + + ts = pts - (int64(2*res) + 1) + newIdx = findTimestampPriceIndex(ts, prices, idx) + assert.Equal(t, 2, newIdx) +} + +func TestCalcWeightedPrice(t *testing.T) { + idx := -1 + ts := pts - int64(res/2) + wp, err := calcWeightedPrice(ts, idx, prices) + assert.Error(t, err) + + ts = pts + int64(res/2) + wp, err = calcWeightedPrice(ts, idx, prices) + assert.NoError(t, err) + assert.Equal(t, 1., wp) + + idx = 0 + wp, err = calcWeightedPrice(ts, idx, prices) + assert.Error(t, err) + + ts = pts - int64(res/2) + wp, err = calcWeightedPrice(ts, idx, prices) + assert.NoError(t, err) + assert.Equal(t, 1.5, wp) + + idx = 4 + ts = pts - int64(res/2) + wp, err = calcWeightedPrice(ts, idx, prices) + assert.Error(t, err) + + ts = pts - int64(res*5) + wp, err = calcWeightedPrice(ts, idx, prices) +} diff --git a/exp/services/market-tracker/watchers.go b/exp/services/market-tracker/watchers.go new file mode 100644 index 0000000000..0832249d98 --- /dev/null +++ b/exp/services/market-tracker/watchers.go @@ -0,0 +1,157 @@ +package main + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +func configPrometheusWatchers(tps []TradePair) (watchedTPs []prometheusWatchedTP) { + for _, tp := range tps { + labels := prometheus.Labels{ + "tradePair": fmt.Sprintf("%s", tp), + "buyingAsset": fmt.Sprintf("%s", tp.BuyingAsset), + "sellingAsset": fmt.Sprintf("%s", tp.SellingAsset), + } + + pwtp := prometheusWatchedTP{ + TradePair: tp, + Spread: createSpread(labels), + Volume: createVolume(labels), + Slippage: createSlippage(labels), + Orderbook: createOrderbook(labels), + FairValue: createFairValue(labels), + } + watchedTPs = append(watchedTPs, pwtp) + } + return +} + +func createSpread(labels prometheus.Labels) Spread { + return Spread{ + Top: createSpreadGauge("", "", labels), + D100: createSpreadGauge("_100", "at depth $100", labels), + D1K: createSpreadGauge("_1K", "at depth $1000", labels), + D5K: createSpreadGauge("_5K", "at depth $5000", labels), + D25K: createSpreadGauge("_25K", "at depth $25,000", labels), + D50K: createSpreadGauge("_50K", "at depth $50,000", labels), + } +} + +func createSpreadGauge(depthShort, depthLong string, labels prometheus.Labels) prometheus.Gauge { + return promauto.NewGauge(prometheus.GaugeOpts{ + Name: fmt.Sprintf("stellar_market_tracker_spread%s", depthShort), + ConstLabels: labels, + Help: fmt.Sprintf("Percentage market spread %s", depthLong), + }) +} + +func createVolume(labels prometheus.Labels) Volume { + return Volume{ + BaseVolumeBaseAsset: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_volume_base_base", + ConstLabels: labels, + Help: "Base asset trading volume, in base asset, over last 1d", + }), + BaseVolumeUsd: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_volume_base_usd", + ConstLabels: labels, + Help: "Base asset trading volume, in USD, over last 1d", + }), + CounterVolumeBaseAsset: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_volume_counter_base", + ConstLabels: labels, + Help: "Counter asset trading volume, in base asset, over last 1d", + }), + CounterVolumeUsd: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_volume_counter_usd", + ConstLabels: labels, + Help: "Counter asset trading volume, in USD, over last 1d", + }), + TradeCount: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_tradecount", + ConstLabels: labels, + Help: "Number of trades over last 1d", + }), + TradeAvgAmt: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_tradeavg", + ConstLabels: labels, + Help: "Average trade amount over last 1d", + }), + } +} + +func createSlippage(labels prometheus.Labels) Slippage { + return Slippage{ + BidD100: createSlippageGauge("bid", "100", labels), + AskD100: createSlippageGauge("ask", "100", labels), + BidD1K: createSlippageGauge("bid", "1K", labels), + AskD1K: createSlippageGauge("ask", "1K", labels), + BidD5K: createSlippageGauge("bid", "5K", labels), + AskD5K: createSlippageGauge("ask", "5K", labels), + } +} + +func createSlippageGauge(orderType, depth string, labels prometheus.Labels) prometheus.Gauge { + return promauto.NewGauge(prometheus.GaugeOpts{ + Name: fmt.Sprintf("stellar_market_tracker_%s_slippage_%s", orderType, depth), + ConstLabels: labels, + Help: fmt.Sprintf("Slippage of %s at depth %s", orderType, depth), + }) +} + +func createOrderbook(labels prometheus.Labels) Orderbook { + return Orderbook{ + NumBids: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_numbids", + ConstLabels: labels, + Help: "Number of bids in the orderbook", + }), + NumAsks: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_numasks", + ConstLabels: labels, + Help: "Number of asks in the orderbook", + }), + BidBaseVolume: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_bidbasevol", + ConstLabels: labels, + Help: "Volume of bids in the orderbook in base currency", + }), + BidUsdVolume: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_bidusdvol", + ConstLabels: labels, + Help: "Volume of bids in the orderbook in USD", + }), + AskBaseVolume: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_askbasevol", + ConstLabels: labels, + Help: "Volume of asks in the orderbook in base", + }), + AskUsdVolume: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_askusdvol", + ConstLabels: labels, + Help: "Volume of asks in the orderbook in USD", + }), + } +} + +func createFairValue(labels prometheus.Labels) FairValue { + return FairValue{ + Percent: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_fmp", + ConstLabels: labels, + Help: "Pct difference of DEX value from fair market value", + }), + RefPrice: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_refprice", + ConstLabels: labels, + Help: "Reference price of real asset (in USD)", + }), + DexPrice: promauto.NewGauge(prometheus.GaugeOpts{ + Name: "stellar_market_tracker_price", + ConstLabels: labels, + Help: "Mid-market price on the DEX in USD", + }), + } +} diff --git a/exp/services/recoverysigner/Makefile b/exp/services/recoverysigner/Makefile new file mode 100644 index 0000000000..f5e357d356 --- /dev/null +++ b/exp/services/recoverysigner/Makefile @@ -0,0 +1,16 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/recoverysigner:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f exp/services/recoverysigner/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../../ && \ + $(SUDO) docker push $(TAG) diff --git a/exp/services/recoverysigner/README-Firebase.md b/exp/services/recoverysigner/README-Firebase.md new file mode 100644 index 0000000000..d037755aab --- /dev/null +++ b/exp/services/recoverysigner/README-Firebase.md @@ -0,0 +1,24 @@ +# Recovery Signer: Firebase Setup + +This service uses Firebase to authenticate a user with an email address or +phone number. To configure a new Firebase project for use with recoverysigner +follow the steps below. These steps assume a default Firebase project setup. + +## Enable Phone Number Authentication + +1. Login to Firebase Console. +2. Click `Authentication` under `Develop` in the left sidebar. +3. Click `Sign-in method`. +4. Click `Phone`. +5. Toggle the feature to `Enable`. +6. Click `Save`. + +## Enable Email Address Authentication + +1. Login to Firebase Console. +2. Click `Authentication` under `Develop` in the left sidebar. +3. Click `Sign-in method`. +4. Click `Email/Password`. +5. Toggle the feature to `Enable`. +6. Toggle the `Email link (passwordless sign-in)`. +7. Click `Save`. diff --git a/exp/services/recoverysigner/README.md b/exp/services/recoverysigner/README.md new file mode 100644 index 0000000000..f450d67695 --- /dev/null +++ b/exp/services/recoverysigner/README.md @@ -0,0 +1,80 @@ +# Recovery Signer + +This is an incomplete and work-in-progress implementation of the [SEP-30] +Recovery Signer protocol v0.7.0. + +A Recovery Signer is a server that can help a user regain control of a Stellar +account if they have lost their secret key. A user registers their account with +a Recovery Signer by adding it as a signer, and informs the Recovery Signer +that any user proving access to a phone number or email address can have +transactions signed. A user who has registered their account with two or more +Recovery Signers can recover the account with their help. + +This implementation uses Firebase to authenticate a user with an email address +or phone number. To configure a Firebase project for use with recoverysigner +see [README-Firebase.md](README-Firebase.md). + +This implementation is not polished and is still experimental. +Running this implementation in production is not recommended. + +## Usage + +``` +$ recoverysigner --help +SEP-30 Recovery Signer server + +Usage: + recoverysigner [command] [flags] + recoverysigner [command] + +Available Commands: + db Run database operations + serve Run the SEP-30 Recovery Signer server + +Use "recoverysigner [command] --help" for more information about a command. +``` + +## Usage: serve + +``` +$ recoverysigner serve --help +Run the SEP-30 Recovery Signer server + +Usage: + recoverysigner serve [flags] + +Flags: + --admin-port int Port to listen and serve admin functionality including metrics (ADMIN_PORT) + --allowed-source-accounts string Stellar account(s) allowed as source accounts in transactions signed for all users in addition to the registered account comma separated (important: these accounts must never be registered accounts and must never have the signer configured that is a signing key used by this server) (ALLOWED_SOURCE_ACCOUNTS) + --db-max-open-conns int Database max open connections (DB_MAX_OPEN_CONNS) (default 20) + --db-url string Database URL (DB_URL) (default "postgres://localhost:5432/?sslmode=disable") + --firebase-project-id string Firebase project ID to use for validating Firebase JWTs (FIREBASE_PROJECT_ID) + --metrics-namespace string Namespace to use for metric names prefixed to metrics reported (METRICS_NAMESPACE) (default "recoverysigner") + --network-passphrase string Network passphrase of the Stellar network transactions should be signed for (NETWORK_PASSPHRASE) (default "Test SDF Network ; September 2015") + --port int Port to listen and serve on (PORT) (default 8000) + --sep10-jwks string JSON Web Key Set (JWKS) containing one or more keys used to validate SEP-10 JWTs (if the key is an asymmetric key that has separate public and private key, the JWK need only contain the public key) (if multiple keys are provided they will all attempt verification the key ID will be ignored although logged) (SEP10_JWKS) + --sep10-jwt-issuer string JWT issuer to verify is in the SEP-10 JWT iss field (not checked if empty) (SEP10_JWT_ISSUER) + --signing-key string Stellar signing key(s) used for signing transactions comma separated (first key is preferred signer) (will be deprecated with per-account keys in the future) (SIGNING_KEY) +``` + +## Usage: db + +``` +$ recoverysigner db --help +Run database operations + +Usage: + recoverysigner db [flags] + recoverysigner db [command] + +Available Commands: + migrate Run migrations on the database + +Flags: + --db-url string Database URL (DB_URL) (default "postgres://localhost:5432/?sslmode=disable") + +Use "recoverysigner db [command] --help" for more information about a command. +``` + +[SEP-30]: https://github.com/stellar/stellar-protocol/blob/3e05bb668f94793545588106af74699b8d6b02d6/ecosystem/sep-0030.md +[README-Firebase.md]: README-Firebase.md diff --git a/exp/services/recoverysigner/cmd/db.go b/exp/services/recoverysigner/cmd/db.go new file mode 100644 index 0000000000..88b3a99b86 --- /dev/null +++ b/exp/services/recoverysigner/cmd/db.go @@ -0,0 +1,113 @@ +package cmd + +import ( + "go/types" + "strconv" + "strings" + + migrate "github.com/rubenv/sql-migrate" + "github.com/spf13/cobra" + dbpkg "github.com/stellar/go/exp/services/recoverysigner/internal/db" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbmigrate" + "github.com/stellar/go/support/config" + supportlog "github.com/stellar/go/support/log" +) + +type DBCommand struct { + Logger *supportlog.Entry + DatabaseURL string +} + +func (c *DBCommand) Command() *cobra.Command { + configOpts := config.ConfigOptions{ + { + Name: "db-url", + Usage: "Database URL", + OptType: types.String, + ConfigKey: &c.DatabaseURL, + FlagDefault: "postgres://localhost:5432/?sslmode=disable", + Required: true, + }, + } + cmd := &cobra.Command{ + Use: "db", + Short: "Run database operations", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + configOpts.Require() + configOpts.SetValues() + }, + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + configOpts.Init(cmd) + + migrateCmd := &cobra.Command{ + Use: "migrate [up|down] [count]", + Short: "Run migrations on the database", + Run: func(cmd *cobra.Command, args []string) { + c.Migrate(cmd, args) + }, + } + cmd.AddCommand(migrateCmd) + + return cmd +} + +func (c *DBCommand) Migrate(cmd *cobra.Command, args []string) { + db, err := dbpkg.Open(c.DatabaseURL) + if err != nil { + c.Logger.Errorf("Error opening database: %s", err.Error()) + return + } + + if len(args) < 1 { + cmd.Help() + return + } + dirStr := args[0] + + var dir migrate.MigrationDirection + switch dirStr { + case "down": + dir = migrate.Down + case "up": + dir = migrate.Up + default: + c.Logger.Errorf("Invalid migration direction, must be 'up' or 'down'.") + return + } + + var count int + if len(args) >= 2 { + count, err = strconv.Atoi(args[1]) + if err != nil { + c.Logger.Errorf("Invalid migration count, must be a number.") + return + } + if count < 1 { + c.Logger.Errorf("Invalid migration count, must be a number greater than zero.") + return + } + } + + migrations, err := dbmigrate.PlanMigration(db, dir, count) + if err != nil { + c.Logger.Errorf("Error planning migration: %s", err.Error()) + return + } + if len(migrations) > 0 { + c.Logger.Infof("Migrations to apply %s: %s", dirStr, strings.Join(migrations, ", ")) + } + + n, err := dbmigrate.Migrate(db, dir, count) + if err != nil { + c.Logger.Errorf("Error applying migrations: %s", err.Error()) + return + } + if n > 0 { + c.Logger.Infof("Successfully applied %d migrations %s.", n, dirStr) + } else { + c.Logger.Infof("No migrations applied %s.", dirStr) + } +} diff --git a/exp/services/recoverysigner/cmd/db_test.go b/exp/services/recoverysigner/cmd/db_test.go new file mode 100644 index 0000000000..028a120fce --- /dev/null +++ b/exp/services/recoverysigner/cmd/db_test.go @@ -0,0 +1,247 @@ +package cmd + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + dbpkg "github.com/stellar/go/exp/services/recoverysigner/internal/db" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDBCommand_Migrate_upDownAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + log := log.New() + + dbCommand := DBCommand{ + Logger: log, + DatabaseURL: db.DSN, + } + + // Migrate Up + { + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"up"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + "20200309000001-initial-2.sql", + "20200311000000-create-accounts.sql", + "20200311000001-create-identities.sql", + "20200311000002-create-auth-methods.sql", + "20200320000000-create-accounts-audit.sql", + "20200320000001-create-identities-audit.sql", + "20200320000002-create-auth-methods-audit.sql", + } + assert.Equal(t, wantIDs, ids) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Migrations to apply up: 20200309000000-initial-1.sql, 20200309000001-initial-2.sql, 20200311000000-create-accounts.sql, 20200311000001-create-identities.sql, 20200311000002-create-auth-methods.sql, 20200320000000-create-accounts-audit.sql, 20200320000001-create-identities-audit.sql, 20200320000002-create-auth-methods-audit.sql", + "Successfully applied 8 migrations up.", + } + assert.Equal(t, wantMessages, messages) + } + + // Migrate Down + { + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"down"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + assert.Empty(t, ids) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Migrations to apply down: 20200320000002-create-auth-methods-audit.sql, 20200320000001-create-identities-audit.sql, 20200320000000-create-accounts-audit.sql, 20200311000002-create-auth-methods.sql, 20200311000001-create-identities.sql, 20200311000000-create-accounts.sql, 20200309000001-initial-2.sql, 20200309000000-initial-1.sql", + "Successfully applied 8 migrations down.", + } + assert.Equal(t, wantMessages, messages) + } +} + +func TestDBCommand_Migrate_upTwoDownOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + log := log.New() + + dbCommand := DBCommand{ + Logger: log, + DatabaseURL: db.DSN, + } + + // Migrate Up 2 + { + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"up", "2"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + ids := []string{} + err = session.Unsafe().Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + "20200309000001-initial-2.sql", + } + assert.Equal(t, wantIDs, ids) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Migrations to apply up: 20200309000000-initial-1.sql, 20200309000001-initial-2.sql", + "Successfully applied 2 migrations up.", + } + assert.Equal(t, wantMessages, messages) + } + + // Migrate Down 1 + { + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"down", "1"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + } + assert.Equal(t, wantIDs, ids) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Migrations to apply down: 20200309000001-initial-2.sql", + "Successfully applied 1 migrations down.", + } + assert.Equal(t, wantMessages, messages) + } +} + +func TestDBCommand_Migrate_invalidDirection(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + log := log.New() + + dbCommand := DBCommand{ + Logger: log, + DatabaseURL: db.DSN, + } + + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"invalid"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + tables := []string{} + err = session.Select(&tables, `SELECT table_name FROM information_schema.tables WHERE table_schema='public'`) + require.NoError(t, err) + assert.Empty(t, tables) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Invalid migration direction, must be 'up' or 'down'.", + } + assert.Equal(t, wantMessages, messages) +} + +func TestDBCommand_Migrate_invalidCount(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + log := log.New() + + dbCommand := DBCommand{ + Logger: log, + DatabaseURL: db.DSN, + } + + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"down", "invalid"}) + dbCommand.Migrate(&cobra.Command{}, []string{"up", "invalid"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + tables := []string{} + err = session.Select(&tables, `SELECT table_name FROM information_schema.tables WHERE table_schema='public'`) + require.NoError(t, err) + assert.Empty(t, tables) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Invalid migration count, must be a number.", + "Invalid migration count, must be a number.", + } + assert.Equal(t, wantMessages, messages) +} + +func TestDBCommand_Migrate_zeroCount(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + log := log.New() + + dbCommand := DBCommand{ + Logger: log, + DatabaseURL: db.DSN, + } + + logsGet := log.StartTest(logrus.InfoLevel) + + dbCommand.Migrate(&cobra.Command{}, []string{"down", "0"}) + dbCommand.Migrate(&cobra.Command{}, []string{"up", "0"}) + + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + tables := []string{} + err = session.Select(&tables, `SELECT table_name FROM information_schema.tables WHERE table_schema='public'`) + require.NoError(t, err) + assert.Empty(t, tables) + + logs := logsGet() + messages := []string{} + for _, l := range logs { + messages = append(messages, l.Message) + } + wantMessages := []string{ + "Invalid migration count, must be a number greater than zero.", + "Invalid migration count, must be a number greater than zero.", + } + assert.Equal(t, wantMessages, messages) +} diff --git a/exp/services/recoverysigner/cmd/serve.go b/exp/services/recoverysigner/cmd/serve.go new file mode 100644 index 0000000000..4ee73ee812 --- /dev/null +++ b/exp/services/recoverysigner/cmd/serve.go @@ -0,0 +1,121 @@ +package cmd + +import ( + "go/types" + + "github.com/spf13/cobra" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve" + "github.com/stellar/go/network" + "github.com/stellar/go/support/config" + supportlog "github.com/stellar/go/support/log" +) + +type ServeCommand struct { + Logger *supportlog.Entry +} + +func (c *ServeCommand) Command() *cobra.Command { + opts := serve.Options{ + Logger: c.Logger, + } + configOpts := config.ConfigOptions{ + { + Name: "port", + Usage: "Port to listen and serve on", + OptType: types.Int, + ConfigKey: &opts.Port, + FlagDefault: 8000, + Required: true, + }, + { + Name: "db-url", + Usage: "Database URL", + OptType: types.String, + ConfigKey: &opts.DatabaseURL, + FlagDefault: "postgres://localhost:5432/?sslmode=disable", + Required: false, + }, + { + Name: "db-max-open-conns", + Usage: "Database max open connections", + OptType: types.Int, + ConfigKey: &opts.DatabaseMaxOpenConns, + FlagDefault: 20, + Required: false, + }, + { + Name: "network-passphrase", + Usage: "Network passphrase of the Stellar network transactions should be signed for", + OptType: types.String, + ConfigKey: &opts.NetworkPassphrase, + FlagDefault: network.TestNetworkPassphrase, + Required: true, + }, + { + Name: "signing-key", + Usage: "Stellar signing key(s) used for signing transactions comma separated (first key is preferred signer) (will be deprecated with per-account keys in the future)", + OptType: types.String, + ConfigKey: &opts.SigningKeys, + Required: true, + }, + { + Name: "sep10-jwks", + Usage: "JSON Web Key Set (JWKS) containing one or more keys used to validate SEP-10 JWTs (if the key is an asymmetric key that has separate public and private key, the JWK need only contain the public key) (if multiple keys are provided they will all attempt verification the key ID will be ignored although logged)", + OptType: types.String, + ConfigKey: &opts.SEP10JWKS, + Required: true, + }, + { + Name: "sep10-jwt-issuer", + Usage: "JWT issuer to verify is in the SEP-10 JWT iss field (not checked if empty)", + OptType: types.String, + ConfigKey: &opts.SEP10JWTIssuer, + Required: false, + }, + { + Name: "firebase-project-id", + Usage: "Firebase project ID to use for validating Firebase JWTs", + OptType: types.String, + ConfigKey: &opts.FirebaseProjectID, + Required: true, + }, + { + Name: "admin-port", + Usage: "Port to listen and serve admin functionality including metrics", + OptType: types.Int, + ConfigKey: &opts.AdminPort, + FlagDefault: 0, + Required: false, + }, + { + Name: "metrics-namespace", + Usage: "Namespace to use for metric names prefixed to metrics reported", + OptType: types.String, + ConfigKey: &opts.MetricsNamespace, + FlagDefault: "recoverysigner", + Required: false, + }, + { + Name: "allowed-source-accounts", + Usage: "Stellar account(s) allowed as source accounts in transactions signed for all users in addition to the registered account comma separated (important: these accounts must never be registered accounts and must never have the signer configured that is a signing key used by this server)", + OptType: types.String, + ConfigKey: &opts.AllowedSourceAccounts, + Required: false, + }, + } + cmd := &cobra.Command{ + Use: "serve", + Short: "Run the SEP-30 Recovery Signer server", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + c.Run(opts) + }, + } + configOpts.Init(cmd) + return cmd +} + +func (c *ServeCommand) Run(opts serve.Options) { + serve.Serve(opts) +} diff --git a/exp/services/recoverysigner/docker/Dockerfile b/exp/services/recoverysigner/docker/Dockerfile new file mode 100644 index 0000000000..54a1e3df4f --- /dev/null +++ b/exp/services/recoverysigner/docker/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.17 as build + +ADD . /src/recoverysigner +WORKDIR /src/recoverysigner +RUN go build -o /bin/recoverysigner ./exp/services/recoverysigner + + +FROM ubuntu:20.04 + +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates +COPY --from=build /bin/recoverysigner /app/ +EXPOSE 8000 +ENTRYPOINT ["/app/recoverysigner"] +CMD ["serve"] diff --git a/exp/services/recoverysigner/internal/account/account.go b/exp/services/recoverysigner/internal/account/account.go new file mode 100644 index 0000000000..d69fcc2560 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/account.go @@ -0,0 +1,34 @@ +package account + +type Account struct { + Address string + Identities []Identity +} + +type Identity struct { + Role string + AuthMethods []AuthMethod +} + +type AuthMethodType string + +func (t AuthMethodType) Valid() bool { + return AuthMethodTypes[t] +} + +const ( + AuthMethodTypeAddress AuthMethodType = "stellar_address" + AuthMethodTypePhoneNumber AuthMethodType = "phone_number" + AuthMethodTypeEmail AuthMethodType = "email" +) + +var AuthMethodTypes = map[AuthMethodType]bool{ + AuthMethodTypeAddress: true, + AuthMethodTypePhoneNumber: true, + AuthMethodTypeEmail: true, +} + +type AuthMethod struct { + Type AuthMethodType + Value string +} diff --git a/exp/services/recoverysigner/internal/account/db_store.go b/exp/services/recoverysigner/internal/account/db_store.go new file mode 100644 index 0000000000..5419c07f5a --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store.go @@ -0,0 +1,9 @@ +package account + +import ( + "github.com/jmoiron/sqlx" +) + +type DBStore struct { + DB *sqlx.DB +} diff --git a/exp/services/recoverysigner/internal/account/db_store_add.go b/exp/services/recoverysigner/internal/account/db_store_add.go new file mode 100644 index 0000000000..b2687e0777 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_add.go @@ -0,0 +1,55 @@ +package account + +import "github.com/lib/pq" + +func (s *DBStore) Add(a Account) error { + tx, err := s.DB.Beginx() + if err != nil { + return err + } + defer tx.Rollback() + + accountID := int64(0) + err = tx.Get(&accountID, ` + INSERT INTO accounts (address) + VALUES ($1) + RETURNING id + `, a.Address) + if err != nil { + // 23505 is the PostgreSQL error for Unique Violation. + // See https://www.postgresql.org/docs/9.2/errcodes-appendix.html. + if pqErr, ok := err.(*pq.Error); ok && pqErr.Code == "23505" { + return ErrAlreadyExists + } + return err + } + + for _, i := range a.Identities { + identityID := int64(0) + err = tx.Get(&identityID, ` + INSERT INTO identities (account_id, role) + VALUES ($1, $2) + RETURNING id + `, accountID, i.Role) + if err != nil { + return err + } + + for _, m := range i.AuthMethods { + _, err = tx.Exec(` + INSERT INTO auth_methods (account_id, identity_id, type_, value) + VALUES ($1, $2, $3, $4) + `, accountID, identityID, m.Type, m.Value) + if err != nil { + return err + } + } + } + + err = tx.Commit() + if err != nil { + return err + } + + return nil +} diff --git a/exp/services/recoverysigner/internal/account/db_store_add_test.go b/exp/services/recoverysigner/internal/account/db_store_add_test.go new file mode 100644 index 0000000000..9d554333a9 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_add_test.go @@ -0,0 +1,190 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAdd(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a) + require.NoError(t, err) + + // Check the account row has been added. + { + type row struct { + ID int64 `db:"id"` + Address string `db:"address"` + } + rows := []row{} + err = session.Select(&rows, `SELECT id, address FROM accounts ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + { + ID: 1, + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + }, + } + assert.Equal(t, wantRows, rows) + } + + // Check the identity rows have been added. + { + type row struct { + AccountID int64 `db:"account_id"` + ID int64 `db:"id"` + Role string `db:"role"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, id, role FROM identities ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + { + AccountID: 1, + ID: 1, + Role: "sender", + }, + { + AccountID: 1, + ID: 2, + Role: "receiver", + }, + } + assert.Equal(t, wantRows, rows) + } + + // Check the auth method rows have been added. + { + type row struct { + AccountID int64 `db:"account_id"` + IdentityID int64 `db:"identity_id"` + ID int64 `db:"id"` + Type string `db:"type_"` + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, identity_id, id, type_, value FROM auth_methods ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + { + AccountID: 1, + IdentityID: 1, + ID: 1, + Type: "stellar_address", + Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6", + }, + { + AccountID: 1, + IdentityID: 1, + ID: 2, + Type: "phone_number", + Value: "+10000000000", + }, + { + AccountID: 1, + IdentityID: 1, + ID: 3, + Type: "email", + Value: "user1@example.com", + }, + { + AccountID: 1, + IdentityID: 2, + ID: 4, + Type: "stellar_address", + Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP", + }, + { + AccountID: 1, + IdentityID: 2, + ID: 5, + Type: "phone_number", + Value: "+20000000000", + }, + { + AccountID: 1, + IdentityID: 2, + ID: 6, + Type: "email", + Value: "user2@example.com", + }, + } + assert.Equal(t, wantRows, rows) + } +} + +func TestAdd_conflict(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + } + + err := store.Add(a) + require.NoError(t, err) + + err = store.Add(a) + assert.Equal(t, ErrAlreadyExists, err) +} + +func TestAdd_conflict_properlyClosesDBConnections(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + session.SetMaxIdleConns(1) + session.SetMaxOpenConns(1) + + store := DBStore{ + DB: session, + } + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + } + + err := store.Add(a) + require.NoError(t, err) + + for range [2]int{} { + // If the database transaction is not being properly closed when + // returning an error, the execution will get stuck in the following + // line of code when the `Add` method tries to start a new DB + // transaction through `s.DB.Beginx()`: + err = store.Add(a) + require.Equal(t, ErrAlreadyExists, err) + } +} diff --git a/exp/services/recoverysigner/internal/account/db_store_count.go b/exp/services/recoverysigner/internal/account/db_store_count.go new file mode 100644 index 0000000000..483d9815dd --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_count.go @@ -0,0 +1,10 @@ +package account + +func (s *DBStore) Count() (int, error) { + count := int(0) + err := s.DB.Get(&count, `SELECT COUNT(*) FROM accounts`) + if err != nil { + return 0, err + } + return count, nil +} diff --git a/exp/services/recoverysigner/internal/account/db_store_count_test.go b/exp/services/recoverysigner/internal/account/db_store_count_test.go new file mode 100644 index 0000000000..9fa8ef90c5 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_count_test.go @@ -0,0 +1,50 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCount(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + count, err := store.Count() + require.NoError(t, err) + assert.Equal(t, 0, count) + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err = store.Add(a) + require.NoError(t, err) + + count, err = store.Count() + require.NoError(t, err) + assert.Equal(t, 1, count) +} diff --git a/exp/services/recoverysigner/internal/account/db_store_delete.go b/exp/services/recoverysigner/internal/account/db_store_delete.go new file mode 100644 index 0000000000..0c5aa155fb --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_delete.go @@ -0,0 +1,22 @@ +package account + +func (s *DBStore) Delete(address string) error { + // Delete an account will delete the associated identities and auth methods because of the ON DELETE CASCADE references. + result, err := s.DB.Exec( + `DELETE FROM accounts + WHERE address = $1`, + address, + ) + if err != nil { + return err + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + if rowsAffected == 0 { + return ErrNotFound + } + + return nil +} diff --git a/exp/services/recoverysigner/internal/account/db_store_delete_test.go b/exp/services/recoverysigner/internal/account/db_store_delete_test.go new file mode 100644 index 0000000000..7f1f8cd6d4 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_delete_test.go @@ -0,0 +1,243 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDelete(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + // Store account 1 + a1Address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + a1 := Account{ + Address: a1Address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a1) + require.NoError(t, err) + + // Store account 2 + a2Address := "GDJ6ZE3SR6XBKF2ZDGNMWXF7TKZEEQZDSBVRLZXJ2HVOFIYMYQ7IAMMU" + a2 := Account{ + Address: a2Address, + Identities: []Identity{ + { + Role: "owner", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GAA5TI5BXVNJTA6UEDF7UTMA5FXHR2TFRCJ2G7QT6FJCJ7WD5ITIKQNE"}, + {Type: AuthMethodTypePhoneNumber, Value: "+30000000000"}, + {Type: AuthMethodTypeEmail, Value: "user3@example.com"}, + }, + }, + }, + } + err = store.Add(a2) + require.NoError(t, err) + + // Get account 1 to check it exists + a1Got, err := store.Get(a1Address) + require.NoError(t, err) + assert.Equal(t, a1, a1Got) + + // Get account 2 to check it exists + a2Got, err := store.Get(a2Address) + require.NoError(t, err) + assert.Equal(t, a2, a2Got) + + // Delete account 1 + err = store.Delete(a1Address) + require.NoError(t, err) + + // Get account 1 to check it no longer exists + _, err = store.Get(a1Address) + assert.Equal(t, ErrNotFound, err) + + // Get account 2 to check it was not deleted + a2Got, err = store.Get(a2Address) + require.NoError(t, err) + assert.Equal(t, a2, a2Got) + + // Check that account 1 is gone and account 2 remains + { + type row struct { + Address string `db:"address"` + } + rows := []row{} + err = session.Select(&rows, `SELECT address FROM accounts ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + {Address: a2Address}, + } + assert.ElementsMatch(t, wantRows, rows) + } + // Check that account 1 delete has been audited + { + type row struct { + AuditOp string `db:"audit_op"` + Address string `db:"address"` + } + rows := []row{} + err = session.Select(&rows, `SELECT audit_op, address FROM accounts_audit ORDER BY audit_id`) + require.NoError(t, err) + wantRows := []row{ + {AuditOp: "INSERT", Address: a1Address}, + {AuditOp: "INSERT", Address: a2Address}, + {AuditOp: "DELETE", Address: a1Address}, + } + assert.Equal(t, wantRows, rows) + } + + // Check that account 1's identities are gone and account 2's remain + { + type row struct { + Role string `db:"role"` + } + rows := []row{} + err = session.Select(&rows, `SELECT role FROM identities ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + // Identities for account 2 + {Role: "owner"}, + } + assert.ElementsMatch(t, wantRows, rows) + } + // Check that account 1's identity's deletes have been audited + { + type row struct { + AuditOp string `db:"audit_op"` + Role string `db:"role"` + } + rows := []row{} + err = session.Select(&rows, `SELECT audit_op, role FROM identities_audit ORDER BY audit_id`) + require.NoError(t, err) + wantRows := []row{ + {AuditOp: "INSERT", Role: "sender"}, + {AuditOp: "INSERT", Role: "receiver"}, + {AuditOp: "INSERT", Role: "owner"}, + {AuditOp: "DELETE", Role: "sender"}, + {AuditOp: "DELETE", Role: "receiver"}, + } + assert.Equal(t, wantRows, rows) + } + + // Check that account 1's auth methods are gone and account 2's remain + { + type row struct { + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT value FROM auth_methods ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + // Auth methods for account 2 + {Value: "GAA5TI5BXVNJTA6UEDF7UTMA5FXHR2TFRCJ2G7QT6FJCJ7WD5ITIKQNE"}, + {Value: "+30000000000"}, + {Value: "user3@example.com"}, + } + assert.ElementsMatch(t, wantRows, rows) + } + // Check that account 1's auth methods's deletes have been audited + { + type row struct { + AuditOp string `db:"audit_op"` + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT audit_op, value FROM auth_methods_audit ORDER BY audit_id`) + require.NoError(t, err) + wantRows := []row{ + {AuditOp: "INSERT", Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {AuditOp: "INSERT", Value: "+10000000000"}, + {AuditOp: "INSERT", Value: "user1@example.com"}, + {AuditOp: "INSERT", Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {AuditOp: "INSERT", Value: "+20000000000"}, + {AuditOp: "INSERT", Value: "user2@example.com"}, + {AuditOp: "INSERT", Value: "GAA5TI5BXVNJTA6UEDF7UTMA5FXHR2TFRCJ2G7QT6FJCJ7WD5ITIKQNE"}, + {AuditOp: "INSERT", Value: "+30000000000"}, + {AuditOp: "INSERT", Value: "user3@example.com"}, + {AuditOp: "DELETE", Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {AuditOp: "DELETE", Value: "+10000000000"}, + {AuditOp: "DELETE", Value: "user1@example.com"}, + {AuditOp: "DELETE", Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {AuditOp: "DELETE", Value: "+20000000000"}, + {AuditOp: "DELETE", Value: "user2@example.com"}, + } + assert.Equal(t, wantRows, rows) + } + + // Store account 3 (same address as account 1) + a3Address := a1Address + a3 := Account{ + Address: a3Address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err = store.Add(a3) + require.NoError(t, err) + + // Get account 3 to check it exists + a3Got, err := store.Get(a3Address) + require.NoError(t, err) + assert.Equal(t, a3, a3Got) + + // Get account 2 to check it exists + a2Got, err = store.Get(a2Address) + require.NoError(t, err) + assert.Equal(t, a2, a2Got) +} + +func TestDelete_notFound(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + err := store.Delete(address) + assert.Equal(t, ErrNotFound, err) +} diff --git a/exp/services/recoverysigner/internal/account/db_store_find_with_identity.go b/exp/services/recoverysigner/internal/account/db_store_find_with_identity.go new file mode 100644 index 0000000000..6e70fd4d78 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_find_with_identity.go @@ -0,0 +1,37 @@ +package account + +import "github.com/lib/pq" + +func (s *DBStore) FindWithIdentityAuthMethod(t AuthMethodType, value string) ([]Account, error) { + query := `SELECT account_id + FROM auth_methods + WHERE type_ = $1 + AND value = $2` + accountIDs := []int64{} + err := s.DB.Select(&accountIDs, query, t, value) + if err != nil { + return nil, err + } + + accounts, err := s.getAccounts( + `accounts.id = ANY($1::bigint[])`, + pq.Int64Array(accountIDs), + ) + if err != nil { + return []Account{}, err + } + + return accounts, nil +} + +func (s *DBStore) FindWithIdentityAddress(address string) ([]Account, error) { + return s.FindWithIdentityAuthMethod(AuthMethodTypeAddress, address) +} + +func (s *DBStore) FindWithIdentityEmail(email string) ([]Account, error) { + return s.FindWithIdentityAuthMethod(AuthMethodTypeEmail, email) +} + +func (s *DBStore) FindWithIdentityPhoneNumber(phoneNumber string) ([]Account, error) { + return s.FindWithIdentityAuthMethod(AuthMethodTypePhoneNumber, phoneNumber) +} diff --git a/exp/services/recoverysigner/internal/account/db_store_find_with_identity_test.go b/exp/services/recoverysigner/internal/account/db_store_find_with_identity_test.go new file mode 100644 index 0000000000..3793e35d49 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_find_with_identity_test.go @@ -0,0 +1,133 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindWithIdentityAuthMethod(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + // Register an account with two identities + a1 := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a1) + require.NoError(t, err) + + // Register an account with one identity that overlaps with identities in a1 + a2 := Account{ + Address: "GA3ADWA6QWC6D7VSUS4QZCPYC5SYJQGCBIVLIHO4P2WDGPJRJEQO3QNS", + Identities: []Identity{ + { + Role: "owner", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err = store.Add(a2) + require.NoError(t, err) + + // Check that the first account can be found by its sender auth methods + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeAddress, "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6") + require.NoError(t, err) + assert.Equal(t, []Account{a1}, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypePhoneNumber, "+10000000000") + require.NoError(t, err) + assert.Equal(t, []Account{a1}, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeEmail, "user1@example.com") + require.NoError(t, err) + assert.Equal(t, []Account{a1}, found) + } + + // Check that both accounts can be found by the receiver/owner auth methods + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeAddress, "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP") + require.NoError(t, err) + assert.Equal(t, []Account{a1, a2}, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypePhoneNumber, "+20000000000") + require.NoError(t, err) + assert.Equal(t, []Account{a1, a2}, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeEmail, "user2@example.com") + require.NoError(t, err) + assert.Equal(t, []Account{a1, a2}, found) + } + + // Check that accounts are not found by their own address + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeAddress, "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT") + require.NoError(t, err) + assert.Empty(t, found) + } + + // Check that accounts are not found by an unrelated auth methods + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeAddress, "GBNZT3ZY6QYLIZLHQRQCHJGBEVV4QLR2CAL3WCMAO52PJMPISIKMS7OQ") + require.NoError(t, err) + assert.Empty(t, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypePhoneNumber, "+99999999999") + require.NoError(t, err) + assert.Empty(t, found) + } + { + found, err := store.FindWithIdentityAuthMethod(AuthMethodTypeEmail, "user9@example.com") + require.NoError(t, err) + assert.Empty(t, found) + } +} + +func TestFindWithIdentityAuthMethod_notFound(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + accounts, err := store.FindWithIdentityAuthMethod(AuthMethodTypeAddress, address) + require.NoError(t, err) + assert.Empty(t, accounts) +} diff --git a/exp/services/recoverysigner/internal/account/db_store_get.go b/exp/services/recoverysigner/internal/account/db_store_get.go new file mode 100644 index 0000000000..6a1ef6a3ba --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_get.go @@ -0,0 +1,101 @@ +package account + +func (s *DBStore) Get(address string) (Account, error) { + accounts, err := s.getAccounts("accounts.address = $1", address) + if err != nil { + return Account{}, err + } + + // There should only ever be at most one account due to the database + // constraint where an address must be unique. + + if len(accounts) == 0 { + return Account{}, ErrNotFound + } + + return accounts[0], nil +} + +func (s *DBStore) getAccounts(where string, args ...interface{}) ([]Account, error) { + query := `SELECT + accounts.id AS account_id, + accounts.address AS account_address, + identities.id AS identity_id, + identities.role AS identity_role, + auth_methods.type_ AS auth_method_type, + auth_methods.value AS auth_method_value + FROM accounts + LEFT JOIN identities ON identities.account_id = accounts.id + LEFT JOIN auth_methods ON auth_methods.identity_id = identities.id + WHERE ` + where + ` + ORDER BY accounts.id, identities.id, auth_methods.id` + + rows, err := s.DB.Queryx(query, args...) + if err != nil { + return nil, err + } + + accounts := []Account{} + accountIndexByAccountID := map[int64]int{} + identityIndexByIdentityID := map[int64]int{} + + for rows.Next() { + var r struct { + AccountID int64 `db:"account_id"` + AccountAddress string `db:"account_address"` + IdentityID *int64 `db:"identity_id"` + IdentityRole *string `db:"identity_role"` + AuthMethodType *string `db:"auth_method_type"` + AuthMethodValue *string `db:"auth_method_value"` + } + err = rows.StructScan(&r) + if err != nil { + return nil, err + } + + accountIndex, ok := accountIndexByAccountID[r.AccountID] + if !ok { + a := Account{Address: r.AccountAddress} + accounts = append(accounts, a) + accountIndex = len(accounts) - 1 + accountIndexByAccountID[r.AccountID] = accountIndex + } + a := accounts[accountIndex] + + // IdentityID and IdentityRole will be nil if the LEFT JOIN results in + // an account row that joins to no identities. + if r.IdentityID != nil && r.IdentityRole != nil { + identityID := *r.IdentityID + identityRole := *r.IdentityRole + + identityIndex, ok := identityIndexByIdentityID[identityID] + if !ok { + i := Identity{Role: identityRole} + a.Identities = append(a.Identities, i) + identityIndex = len(a.Identities) - 1 + identityIndexByIdentityID[identityID] = identityIndex + } + i := a.Identities[identityIndex] + + // AuthMethodType and AuthMethodValue will be nil if the LEFT JOIN + // results in an account/identity row that joins to no auth + // methods. + if r.AuthMethodType != nil && r.AuthMethodValue != nil { + authMethodType := *r.AuthMethodType + authMethodValue := *r.AuthMethodValue + + m := AuthMethod{ + Type: AuthMethodType(authMethodType), + Value: authMethodValue, + } + i.AuthMethods = append(i.AuthMethods, m) + } + + a.Identities[identityIndex] = i + } + + accounts[accountIndex] = a + } + + return accounts, nil +} diff --git a/exp/services/recoverysigner/internal/account/db_store_get_test.go b/exp/services/recoverysigner/internal/account/db_store_get_test.go new file mode 100644 index 0000000000..93b617dd94 --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_get_test.go @@ -0,0 +1,111 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGet(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a) + require.NoError(t, err) + + // Reading the account out results in the same data. + aRoundTrip, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, a, aRoundTrip) +} + +func TestGet_noIdentities(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + } + err := store.Add(a) + require.NoError(t, err) + + // Reading the account out results in the same data. + aRoundTrip, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, a, aRoundTrip) +} + +func TestGet_noAuthMethods(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + Identities: []Identity{{Role: "owner"}}, + } + err := store.Add(a) + require.NoError(t, err) + + // Reading the account out results in the same data. + aRoundTrip, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, a, aRoundTrip) +} + +func TestGet_notFound(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + _, err := store.Get(address) + assert.Equal(t, ErrNotFound, err) +} diff --git a/exp/services/recoverysigner/internal/account/db_store_update.go b/exp/services/recoverysigner/internal/account/db_store_update.go new file mode 100644 index 0000000000..b2383ded3e --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_update.go @@ -0,0 +1,62 @@ +package account + +import ( + "database/sql" + + "github.com/lib/pq" +) + +func (s *DBStore) Update(a Account) error { + tx, err := s.DB.Beginx() + if err != nil { + return err + } + defer tx.Rollback() + + var accountID int64 + // Delete an identity will delete the associated auth methods because of the ON DELETE CASCADE reference. + // https://github.com/stellar/go/blob/b3e0a353a901ce0babad5b4953330e55f2c674a1/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000002-create-auth-methods.sql#L11 + err = tx.Get(&accountID, ` + WITH deleted_identities AS ( + DELETE FROM identities + USING accounts + WHERE identities.account_id = accounts.id AND accounts.address = $1 + RETURNING identities.account_id AS account_id + ) + SELECT DISTINCT account_id FROM deleted_identities + `, a.Address) + if err == sql.ErrNoRows { + return ErrNotFound + } + if err != nil { + return err + } + + for _, i := range a.Identities { + var authTypes, authValues pq.StringArray + for _, m := range i.AuthMethods { + authTypes = append(authTypes, string(m.Type)) + authValues = append(authValues, m.Value) + } + _, err = tx.Exec(` + WITH new_identity AS ( + INSERT INTO identities (account_id, role) + VALUES ($1, $2) + RETURNING account_id, id + ) + INSERT INTO auth_methods (account_id, identity_id, type_, value) + SELECT account_id, id, unnest($3::auth_method_type[]), unnest($4::text[]) + FROM new_identity + `, accountID, i.Role, authTypes, authValues) + if err != nil { + return err + } + } + + err = tx.Commit() + if err != nil { + return err + } + + return nil +} diff --git a/exp/services/recoverysigner/internal/account/db_store_update_test.go b/exp/services/recoverysigner/internal/account/db_store_update_test.go new file mode 100644 index 0000000000..ffb8116aaa --- /dev/null +++ b/exp/services/recoverysigner/internal/account/db_store_update_test.go @@ -0,0 +1,324 @@ +package account + +import ( + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUpdate(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a) + require.NoError(t, err) + + // Update the identities on the account + b := Account{ + Address: address, + Identities: []Identity{ + { + Role: "owner", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GAUZVLZUTB3SE4MTQ6CFAQXOCMMVCG6LCUZNPM4ZS5X3HZ4BZB4RJM2S"}, + {Type: AuthMethodTypePhoneNumber, Value: "+30000000000"}, + {Type: AuthMethodTypeEmail, Value: "user3@example.com"}, + }, + }, + }, + } + + err = store.Update(b) + require.NoError(t, err) + + updatedAcc, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, b, updatedAcc) + + // Check the account row has not been changed. + { + type row struct { + ID int64 `db:"id"` + Address string `db:"address"` + } + rows := []row{} + err = session.Select(&rows, `SELECT id, address FROM accounts`) + require.NoError(t, err) + wantRows := []row{ + { + ID: 1, + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + }, + } + assert.Equal(t, wantRows, rows) + } + + // Check the identity rows have been updated. + { + type row struct { + AccountID int64 `db:"account_id"` + ID int64 `db:"id"` + Role string `db:"role"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, id, role FROM identities`) + require.NoError(t, err) + wantRows := []row{ + { + AccountID: 1, + ID: 3, + Role: "owner", + }, + } + assert.Equal(t, wantRows, rows) + } + + // Check the auth method rows have been updated. + { + type row struct { + AccountID int64 `db:"account_id"` + IdentityID int64 `db:"identity_id"` + ID int64 `db:"id"` + Type string `db:"type_"` + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, identity_id, id, type_, value FROM auth_methods ORDER BY id`) + require.NoError(t, err) + wantRows := []row{ + { + AccountID: 1, + IdentityID: 3, + ID: 7, + Type: "stellar_address", + Value: "GAUZVLZUTB3SE4MTQ6CFAQXOCMMVCG6LCUZNPM4ZS5X3HZ4BZB4RJM2S", + }, + { + AccountID: 1, + IdentityID: 3, + ID: 8, + Type: "phone_number", + Value: "+30000000000", + }, + { + AccountID: 1, + IdentityID: 3, + ID: 9, + Type: "email", + Value: "user3@example.com", + }, + } + assert.Equal(t, wantRows, rows) + } +} + +func TestUpdate_removeIdentities(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a) + require.NoError(t, err) + + // Remove the identities on the account + b := Account{ + Address: address, + } + + err = store.Update(b) + require.NoError(t, err) + + updatedAcc, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, b, updatedAcc) + + { + type row struct { + AccountID int64 `db:"account_id"` + ID int64 `db:"id"` + Role string `db:"role"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, id, role FROM identities ORDER BY id`) + require.NoError(t, err) + assert.Equal(t, []row{}, rows) + } + { + type row struct { + AccountID int64 `db:"account_id"` + IdentityID int64 `db:"identity_id"` + ID int64 `db:"id"` + Type string `db:"type_"` + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, identity_id, id, type_, value FROM auth_methods ORDER BY id`) + require.NoError(t, err) + assert.Equal(t, []row{}, rows) + } +} + +func TestUpdate_noAuthMethods(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + address := "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT" + + // Store the account + a := Account{ + Address: address, + Identities: []Identity{ + { + Role: "sender", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GD4NGMOTV4QOXWA6PGPIGVWZYMRCJAKLQJKZIP55C5DGB3GBHHET3YC6"}, + {Type: AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []AuthMethod{ + {Type: AuthMethodTypeAddress, Value: "GBJCOYGKIJYX3VUEOZ6GVMFP522UO4OEBI5KB5HHWZAZ2DEJTHS6VOHP"}, + {Type: AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + err := store.Add(a) + require.NoError(t, err) + + b := Account{ + Address: address, + Identities: []Identity{ + { + Role: "sender", + }, + { + Role: "receiver", + }, + }, + } + + err = store.Update(b) + require.NoError(t, err) + + updatedAcc, err := store.Get(address) + require.NoError(t, err) + assert.Equal(t, b, updatedAcc) + + // check there is no row in auth_methods + type row struct { + AccountID int64 `db:"account_id"` + IdentityID int64 `db:"identity_id"` + ID int64 `db:"id"` + Type string `db:"type_"` + Value string `db:"value"` + } + rows := []row{} + err = session.Select(&rows, `SELECT account_id, identity_id, id, type_, value FROM auth_methods`) + require.NoError(t, err) + assert.Equal(t, []row{}, rows) +} + +func TestUpdate_notFound(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + + store := DBStore{ + DB: session, + } + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + } + + err := store.Update(a) + assert.Equal(t, ErrNotFound, err) +} + +func TestUpdate_notFound_properlyClosesDBConnections(t *testing.T) { + db := dbtest.Open(t) + session := db.Open() + session.SetMaxIdleConns(1) + session.SetMaxOpenConns(1) + + store := DBStore{ + DB: session, + } + + a := Account{ + Address: "GCLLT3VG4F6EZAHZEBKWBWV5JGVPCVIKUCGTY3QEOAIZU5IJGMWCT2TT", + } + + for range [2]int{} { + // If the database transaction is not being properly closed when + // returning an error, the execution will get stuck in the following + // line of code when the `Update` method tries to start a new DB + // transaction through `s.DB.Beginx()`: + err := store.Update(a) + assert.Equal(t, ErrNotFound, err) + } +} diff --git a/exp/services/recoverysigner/internal/account/store.go b/exp/services/recoverysigner/internal/account/store.go new file mode 100644 index 0000000000..909c8e5c1e --- /dev/null +++ b/exp/services/recoverysigner/internal/account/store.go @@ -0,0 +1,17 @@ +package account + +import "errors" + +type Store interface { + Add(a Account) error + Delete(address string) error + Get(address string) (Account, error) + Update(a Account) error + FindWithIdentityAddress(address string) ([]Account, error) + FindWithIdentityPhoneNumber(phoneNumber string) ([]Account, error) + FindWithIdentityEmail(email string) ([]Account, error) + Count() (int, error) +} + +var ErrNotFound = errors.New("account not found") +var ErrAlreadyExists = errors.New("account already exists") diff --git a/exp/services/recoverysigner/internal/db/db.go b/exp/services/recoverysigner/internal/db/db.go new file mode 100644 index 0000000000..d0cab4db33 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/db.go @@ -0,0 +1,9 @@ +package db + +import ( + "github.com/jmoiron/sqlx" +) + +func Open(dataSourceName string) (*sqlx.DB, error) { + return sqlx.Open("postgres", dataSourceName) +} diff --git a/exp/services/recoverysigner/internal/db/db_audit_test.go b/exp/services/recoverysigner/internal/db/db_audit_test.go new file mode 100644 index 0000000000..aa28b89f04 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/db_audit_test.go @@ -0,0 +1,75 @@ +package db + +import ( + "testing" + + "github.com/jmoiron/sqlx" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestAuditTables confirms that the columns for audit tables are a superset of +// the columns in the tables they are auditing. +func TestAuditTables(t *testing.T) { + db := dbtest.Open(t) + conn, err := Open(db.DSN) + require.NoError(t, err) + + assertAuditTableCols(t, conn, "accounts", "accounts_audit") + assertAuditTableCols(t, conn, "identities", "identities_audit") + assertAuditTableCols(t, conn, "auth_methods", "auth_methods_audit") +} + +// assertAuditTableCols checks that the audit table for the given +// table has the same columns as the given table, as well as the header +// columns, that all the types and columns are as we expect. +func assertAuditTableCols(t *testing.T, db *sqlx.DB, tableName, auditTableName string) { + t.Run(tableName, func(t *testing.T) { + cols, err := tableCols(db, tableName) + require.NoError(t, err) + + wantAuditHeaderCols := []tableCol{ + {Name: "audit_id", DataType: "bigint", UDTName: "int8", IsNullable: "NO"}, + {Name: "audit_at", DataType: "timestamp with time zone", UDTName: "timestamptz", IsNullable: "NO"}, + {Name: "audit_user", DataType: "text", UDTName: "text", IsNullable: "NO"}, + {Name: "audit_op", DataType: "USER-DEFINED", UDTName: "audit_op", IsNullable: "NO"}, + } + wantAuditCols := append(append([]tableCol{}, wantAuditHeaderCols...), cols...) + + auditCols, err := tableCols(db, auditTableName) + require.NoError(t, err) + assert.Equal(t, wantAuditCols, auditCols) + }) +} + +// tableCol represents a column in a table with some of its information as +// defined by Postgres' standard information_schema table. +type tableCol struct { + Name string + DataType string + UDTName string + IsNullable string +} + +// tableCols returns the column names for the table. +func tableCols(db *sqlx.DB, tableName string) ([]tableCol, error) { + cols := []tableCol{} + err := db.Select( + &cols, + `SELECT + column_name as Name, + data_type as DataType, + udt_name as UDTName, + is_nullable as IsNullable + FROM information_schema.columns + WHERE table_schema = 'public' + AND table_name = $1 + ORDER BY ordinal_position ASC;`, + tableName, + ) + if err != nil { + return nil, err + } + return cols, nil +} diff --git a/exp/services/recoverysigner/internal/db/db_test.go b/exp/services/recoverysigner/internal/db/db_test.go new file mode 100644 index 0000000000..91dba7d5b2 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/db_test.go @@ -0,0 +1,47 @@ +package db + +import ( + "fmt" + "net" + "regexp" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOpen_openAndPingSucceeds(t *testing.T) { + db := dbtest.Postgres(t) + + sqlxDB, err := Open(db.DSN) + require.NoError(t, err) + assert.Equal(t, "postgres", sqlxDB.DriverName()) + + err = sqlxDB.Ping() + require.NoError(t, err) +} + +func TestOpen_openAndPingFails(t *testing.T) { + // Find an empty port + listener, err := net.Listen("tcp", ":0") + require.NoError(t, err) + port := listener.Addr().(*net.TCPAddr).Port + require.NoError(t, listener.Close()) + // Slight race here with other stuff on the system, which could claim this port. + + sqlxDB, err := Open(fmt.Sprintf("postgres://localhost:%d", port)) + require.NoError(t, err) + assert.Equal(t, "postgres", sqlxDB.DriverName()) + + err = sqlxDB.Ping() + require.Error(t, err) + require.Regexp( + t, + regexp.MustCompile( + // regex to support both ipv4 and ipv6, on the port we found. + fmt.Sprintf("dial tcp (127\\.0\\.0\\.1|\\[::1\\]):%d: connect: connection refused", port), + ), + err.Error(), + ) +} diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate.go b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate.go new file mode 100644 index 0000000000..f5cbc5822b --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate.go @@ -0,0 +1,35 @@ +package dbmigrate + +import ( + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" +) + +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -ignore .+\.(go|swp)$ -pkg dbmigrate -o dbmigrate_generated.go ./migrations + +var migrationSource = &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// PlanMigration finds the migrations that would be applied if Migrate was to +// be run now. +func PlanMigration(db *sqlx.DB, dir migrate.MigrationDirection, count int) ([]string, error) { + migrations, _, err := migrate.PlanMigration(db.DB, db.DriverName(), migrationSource, dir, count) + if err != nil { + return nil, err + } + ids := make([]string, 0, len(migrations)) + for _, m := range migrations { + ids = append(ids, m.Id) + } + return ids, nil +} + +// Migrate runs all the migrations to get the database to the state described +// by the migration files in the direction specified. Count is the maximum +// number of migrations to apply or rollback. +func Migrate(db *sqlx.DB, dir migrate.MigrationDirection, count int) (int, error) { + return migrate.ExecMax(db.DB, db.DriverName(), migrationSource, dir, count) +} diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_generated.go b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_generated.go new file mode 100644 index 0000000000..0088292d53 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_generated.go @@ -0,0 +1,434 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// migrations/20200309000000-initial-1.sql (162B) +// migrations/20200309000001-initial-2.sql (162B) +// migrations/20200311000000-create-accounts.sql (324B) +// migrations/20200311000001-create-identities.sql (389B) +// migrations/20200311000002-create-auth-methods.sql (716B) +// migrations/20200320000000-create-accounts-audit.sql (1.23kB) +// migrations/20200320000001-create-identities-audit.sql (1.166kB) +// migrations/20200320000002-create-auth-methods-audit.sql (1.192kB) + +package dbmigrate + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _migrations20200309000000Initial1Sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\xd1\x0d\xc2\x30\x0c\x04\xd0\xff\x4c\x71\xff\x28\x4c\xc1\x08\x30\x80\x01\xa7\xb5\xd4\xda\x91\x6d\xa8\xb2\x3d\x8a\xf8\x40\x7c\xde\xdd\xd3\xd5\x8a\xeb\x2a\x81\x5d\x16\xa7\x14\x53\x34\xd9\x18\x12\x10\x4d\xd6\xd9\xd0\xb6\x0d\xf0\xde\x73\x80\xf4\x39\x27\x42\x13\x8f\x44\x24\x79\x8a\x2e\xe8\x26\x9a\x68\xe6\xa5\x56\xd8\xcb\x7f\x77\x81\x3b\x37\x73\xc6\xc1\x18\x9c\x58\xe9\xcd\x20\xc4\x63\xe5\x9d\xce\x65\xfa\xd3\x17\x33\x6e\xfd\x3f\x5f\xec\xd0\x52\x3e\x01\x00\x00\xff\xff\xd3\x79\x21\xda\xa2\x00\x00\x00") + +func migrations20200309000000Initial1SqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200309000000Initial1Sql, + "migrations/20200309000000-initial-1.sql", + ) +} + +func migrations20200309000000Initial1Sql() (*asset, error) { + bytes, err := migrations20200309000000Initial1SqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200309000000-initial-1.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0xd1, 0x21, 0xe9, 0x6d, 0xe0, 0xfe, 0xb4, 0x8b, 0x78, 0x2, 0xae, 0x5c, 0xd5, 0x8b, 0x41, 0xb8, 0x4b, 0xaa, 0x3a, 0xea, 0x69, 0xf, 0xf3, 0x2f, 0x6c, 0xae, 0x38, 0x46, 0xb, 0x2, 0xfc}} + return a, nil +} + +var _migrations20200309000001Initial2Sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\xd1\x0d\xc2\x30\x0c\x04\xd0\xff\x4c\x71\xff\x28\x4c\xc1\x08\x30\x80\x01\xa7\xb5\xd4\xda\x91\x6d\xa8\xb2\x3d\x8a\xf8\x40\x7c\xde\xdd\xd3\xd5\x8a\xeb\x2a\x81\x5d\x16\xa7\x14\x53\x34\xd9\x18\x12\x10\x4d\xd6\xd9\xd0\xb6\x0d\xf0\xde\x73\x80\xf4\x39\x27\x42\x13\x8f\x44\x24\x79\x8a\x2e\xe8\x26\x9a\x68\xe6\xa5\x56\xd8\xcb\x7f\x77\x81\x3b\x37\x73\xc6\xc1\x18\x9c\x58\xe9\xcd\x20\xc4\x63\xe5\x9d\xce\x65\xfa\xd3\x17\x33\x6e\xfd\x3f\x5f\xec\xd0\x52\x3e\x01\x00\x00\xff\xff\xd3\x79\x21\xda\xa2\x00\x00\x00") + +func migrations20200309000001Initial2SqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200309000001Initial2Sql, + "migrations/20200309000001-initial-2.sql", + ) +} + +func migrations20200309000001Initial2Sql() (*asset, error) { + bytes, err := migrations20200309000001Initial2SqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200309000001-initial-2.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0xd1, 0x21, 0xe9, 0x6d, 0xe0, 0xfe, 0xb4, 0x8b, 0x78, 0x2, 0xae, 0x5c, 0xd5, 0x8b, 0x41, 0xb8, 0x4b, 0xaa, 0x3a, 0xea, 0x69, 0xf, 0xf3, 0x2f, 0x6c, 0xae, 0x38, 0x46, 0xb, 0x2, 0xfc}} + return a, nil +} + +var _migrations20200311000000CreateAccountsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\xc1\x4e\xc3\x30\x10\x44\xef\xfb\x15\x73\x4c\x44\xfb\x05\x3d\xb9\x78\x29\x16\x8e\x63\x9c\xb5\xd2\x70\x41\x56\x1c\xa1\x1e\x68\xab\x24\x15\xbf\x8f\x5a\x21\x1a\x71\xe1\xb8\x87\x99\xd9\xf7\xd6\x6b\x3c\x7c\x1e\x3e\xc6\x34\x0f\x88\x67\xa2\xc7\xc0\x4a\x18\xa2\xb6\x96\x91\xfa\xfe\x74\x39\xce\x13\x0a\x02\x0e\x19\x5b\xb3\x33\x4e\xe0\x6a\x81\x8b\xd6\xc2\x07\x53\xa9\xd0\xe1\x85\x3b\xec\xd8\x71\x50\xc2\x1a\xca\xb6\xaa\x6b\xa0\x1a\x18\xcd\x4e\x8c\x74\x2b\x22\xa0\x1f\x87\x34\x0f\xf9\x3d\xcd\x10\x53\x71\x23\xaa\xf2\x68\x8d\x3c\xdf\x4e\xbc\xd5\x8e\xef\xcd\x9a\x9f\x54\xb4\xd7\xa9\xb6\x28\x57\x04\x5c\xce\xf9\xbf\xf4\x6d\x25\xe5\x3c\x0e\xd3\x04\xe1\xfd\xfd\x51\x2a\x37\xbf\x64\xd1\x99\xd7\xc8\x30\x4e\xf3\x1e\xb5\x5b\x30\x46\xef\x39\x14\x3f\x05\xe5\x35\xb2\x94\xa3\x4f\x5f\x47\x22\x1d\x6a\xff\x47\xce\x86\xbe\x03\x00\x00\xff\xff\x35\x11\xef\x05\x44\x01\x00\x00") + +func migrations20200311000000CreateAccountsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200311000000CreateAccountsSql, + "migrations/20200311000000-create-accounts.sql", + ) +} + +func migrations20200311000000CreateAccountsSql() (*asset, error) { + bytes, err := migrations20200311000000CreateAccountsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200311000000-create-accounts.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1c, 0xd6, 0x9b, 0xfd, 0xbd, 0x53, 0x3b, 0xf0, 0x35, 0xd5, 0xf9, 0x9a, 0xde, 0x85, 0xe, 0xe3, 0xb4, 0x94, 0xa6, 0xe7, 0xf6, 0xaf, 0xf2, 0x54, 0xb2, 0x6f, 0xfd, 0xce, 0x61, 0xda, 0xb, 0x20}} + return a, nil +} + +var _migrations20200311000001CreateIdentitiesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\xc1\x6a\x83\x40\x10\x86\xef\xfb\x14\xff\x51\x69\xf2\x04\x39\x6d\xdc\x49\xba\x74\x5d\x65\x1d\x31\xf6\x12\x44\x97\xb2\xd0\x6a\x30\x1b\xfa\xfa\xc5\x40\x6b\xa0\x85\x1e\x07\xfe\xf9\xbf\x99\x6f\xbb\xc5\xd3\x47\x78\x9b\xbb\xe8\x51\x5f\x84\xc8\x1c\x49\x26\xb0\xdc\x1b\x42\x18\xfc\x18\x43\x0c\xfe\x8a\x44\x00\x5d\xdf\x4f\xb7\x31\x9e\xc3\x80\xbd\x3e\x6a\xcb\xb0\x05\xc3\xd6\xc6\xc0\xd1\x81\x1c\xd9\x8c\xaa\xef\xd4\x15\x49\x18\x52\x14\x16\x8a\x0c\x31\x21\x93\x55\x26\x15\x6d\x04\xf0\x47\x41\xe9\x74\x2e\x5d\x8b\x17\x6a\x71\x24\x4b\x4e\x32\x29\x48\xd3\xc8\xb6\x82\xac\xa0\x15\x59\xd6\xdc\x6e\x84\x00\xfa\xd9\x77\xd1\x0f\xe7\x2e\x82\x75\x4e\x15\xcb\xbc\x44\xa3\xf9\xf9\x3e\xe2\xb5\xb0\xb4\x36\x2b\x3a\xc8\xda\x2c\xa8\x26\x49\x17\xfa\xed\x32\xfc\xb7\x7d\xa7\xcc\xd3\xbb\x07\xd3\x69\xbd\x52\xa4\xbb\x1f\x43\xda\x2a\x3a\x2d\xef\x3d\x4a\x5a\x0d\x2d\xc9\x47\xb7\x6a\xfa\x1c\x85\x50\xae\x28\x7f\xb9\xdd\x89\xaf\x00\x00\x00\xff\xff\xb1\x1a\x5c\x4b\x85\x01\x00\x00") + +func migrations20200311000001CreateIdentitiesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200311000001CreateIdentitiesSql, + "migrations/20200311000001-create-identities.sql", + ) +} + +func migrations20200311000001CreateIdentitiesSql() (*asset, error) { + bytes, err := migrations20200311000001CreateIdentitiesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200311000001-create-identities.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaa, 0xcb, 0x82, 0x87, 0xcb, 0xd7, 0x70, 0xb9, 0xd, 0x73, 0xfa, 0xbe, 0xc1, 0x8c, 0xc5, 0x4c, 0xa4, 0x3b, 0x72, 0xd8, 0x8e, 0x82, 0x13, 0xc2, 0x89, 0xfc, 0x9b, 0x48, 0x4a, 0x28, 0x34, 0x52}} + return a, nil +} + +var _migrations20200311000002CreateAuthMethodsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x92\xcf\xce\x9b\x30\x10\xc4\xef\x7e\x8a\xbd\x25\xa8\xf9\x9e\x80\x93\x83\x37\xa9\x55\x30\x08\x8c\x52\x7a\x41\x6e\x6c\x35\x48\xfc\x13\x98\xb6\x79\xfb\xca\x24\x69\x12\x25\xfa\x38\xb2\x8c\x7f\xb3\xda\x99\x8f\x0f\xf8\xd2\x54\xbf\x06\x65\x0d\xe4\x3d\x21\x41\x8a\x54\x22\xc8\x22\x41\x50\x93\x3d\x95\x8d\xb1\xa7\x4e\x97\xf6\xdc\x1b\xa0\x19\xa0\xc8\x23\x58\x13\x80\xd5\x68\x4d\x5d\xab\xa1\x54\x5a\x0f\x66\x1c\x57\x1b\x37\xec\x4f\x5d\x6b\xca\x76\x6a\x7e\x9a\xe1\x32\x31\x8d\xaa\xea\x15\xf1\xfc\x3b\x9b\x6e\xc3\x27\xf8\x38\x03\xd5\xf1\xd8\x4d\xad\x2d\x2b\x0d\x5b\xbe\xe7\x42\x82\x88\x25\x88\x3c\x0c\x21\xc5\x1d\xa6\x28\x02\xcc\x6e\xaa\x11\xd6\x95\xf6\x20\x16\xc0\x30\x44\x89\x10\xd0\x2c\xa0\x0c\x9d\x65\xa5\x4d\x6b\x2b\x7b\x5e\x20\x5d\x65\x95\xf9\x9c\xf5\x82\x48\x52\x1e\xd1\xb4\x80\x6f\x58\xc0\x1e\x05\xa6\x54\x22\x03\x1a\x1e\x68\x91\xb9\x0b\x71\x86\x42\x72\x59\x6c\x08\x01\x38\x0e\x46\x59\xa3\x4b\x65\x41\xf2\x08\x33\x49\xa3\x04\x0e\x5c\x7e\x9d\x3f\xe1\x47\x2c\xf0\x4e\x66\xb8\xa3\x79\xe8\xac\x0e\x6b\xcf\xb9\x4f\xbd\x5e\x7a\x3d\xbb\xb8\x70\xca\xd7\xb8\x6e\x60\x87\xfa\xad\xea\xc9\x80\x35\x7f\xed\xff\xf1\x63\x26\x5c\x30\xfc\xee\x4e\xf0\x1c\xcb\x3d\x13\xcf\x5f\x90\x3e\x5c\x7d\x51\x3b\xef\xbb\xb9\xec\xe4\x96\x78\x2c\x21\xeb\xfe\xb4\x84\xb0\x34\x4e\xde\x14\xc5\xbf\xfe\x78\xd7\x4e\x9f\xfc\x0b\x00\x00\xff\xff\x81\xdc\x93\xfc\xcc\x02\x00\x00") + +func migrations20200311000002CreateAuthMethodsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200311000002CreateAuthMethodsSql, + "migrations/20200311000002-create-auth-methods.sql", + ) +} + +func migrations20200311000002CreateAuthMethodsSql() (*asset, error) { + bytes, err := migrations20200311000002CreateAuthMethodsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200311000002-create-auth-methods.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x14, 0x3a, 0x39, 0x59, 0xd, 0x9, 0x9d, 0x68, 0x45, 0xbc, 0x3a, 0xdf, 0xf5, 0x8c, 0x4, 0x29, 0x60, 0xda, 0x7, 0xf9, 0x8b, 0xfa, 0x57, 0x49, 0xfa, 0x41, 0xb3, 0xb0, 0x94, 0x6d, 0xdd, 0x1e}} + return a, nil +} + +var _migrations20200320000000CreateAccountsAuditSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\xd1\x6e\xda\x30\x14\x7d\xf7\x57\x9c\x87\x4a\x85\x8d\xee\x03\x1a\xed\xc1\xe0\x9b\x60\xd5\xd8\x91\x73\x3d\x9a\xbd\x20\x04\x11\x42\x6a\x81\x41\xd0\x7e\x7f\x0a\x0e\x0d\x2d\xac\x6f\x93\xf6\x76\x62\xdf\x1c\x9f\x73\xef\xb1\x1f\x1e\xf0\xf5\x75\xbd\xda\xcf\xeb\x0a\x61\x27\xc4\xc8\x93\x64\x02\x97\x39\x61\x7e\x5c\xae\xeb\xd9\x76\x07\x59\x80\x6c\x98\xa0\x27\x80\x7b\x6d\x0b\xf2\x7c\x3f\x68\x70\xc8\x95\x64\x8a\x58\x91\x21\xa6\x7b\xd1\x4f\x3a\x16\x39\x34\x84\xf9\x62\xb1\x3d\x6e\xea\xc3\xec\xc4\x77\x22\x89\xcc\xeb\x25\x86\x3a\xd3\x96\x61\x1d\xc3\x06\x63\x90\x7b\x3d\x91\xbe\xc4\x13\x95\xc8\xc8\x92\x97\x4c\x0a\xd2\x4c\x65\x59\x34\x32\xb4\x22\xcb\x9a\xcb\xc1\x1b\xc9\xbc\x06\xeb\x09\x15\x2c\x27\x39\xa6\x9a\xc7\xa7\x4f\xfc\x74\x96\x3a\x5a\x45\xa9\x0c\xa6\x39\x67\xda\xeb\x77\xff\x1e\x0f\xd5\x1e\x4c\xcf\x7c\x5d\x19\x0a\xf2\x5d\xe1\x76\xd7\x81\x73\x69\xb3\x6b\xf4\x53\xe7\xef\xe4\xfc\xb2\x9f\x45\x3d\xaf\xab\xd7\x6a\x53\x0f\xab\xd5\x7a\x73\x6e\x4a\x1a\xec\x88\xb5\xb3\xd8\x57\x8b\xed\x7e\x39\x7b\xdf\x9e\x5e\x1f\x9e\x38\x78\x5b\x80\xbd\xce\x32\xf2\x8d\xed\xbb\xa1\x53\xe5\x9d\x00\x86\x94\x69\x2b\x00\x40\xa7\xe8\x71\x36\x73\x39\xbe\xbf\xcd\xa4\x0f\x1e\x53\xdc\x06\xe2\x1a\xb4\x65\xf7\x71\x04\x3f\xa4\x09\x54\xa0\xd7\x9a\x1d\xe0\x1a\x9c\xa8\x1f\x1f\xcf\xae\x07\xb0\x34\xfd\xf6\xa5\x9f\xb4\xe4\x51\x63\xb3\x18\x57\xc8\x14\xef\x04\xb5\xc1\xf8\x7f\x04\xb5\xe9\xfc\x97\x82\x9c\x51\xd7\x82\x9c\x51\xad\x20\xab\xa0\xd3\x06\x93\x55\x89\x88\x13\x85\x91\x36\x0b\x32\x23\xec\x5e\x76\xab\xc3\xaf\x97\xe4\x76\x80\x68\xb3\xec\x2e\x55\x1b\x8b\x9b\xf1\x11\x32\x65\xf2\x67\x67\xce\x23\x0e\xa2\x41\xb1\x03\x70\xb6\x0b\x2c\x90\x3a\x0f\x92\xa3\x31\xbc\x9b\x82\x9e\x69\x14\x98\x90\x7b\x37\x22\x15\x3c\xfd\x2d\xa2\x1f\x72\xae\xb6\xbf\x37\x42\x28\xef\xf2\xcf\xc5\x5d\x9e\x9d\xc4\xfa\xcf\x2f\x43\x5b\x74\xeb\x19\x39\x6f\x5d\xbe\x53\x89\xf8\x13\x00\x00\xff\xff\x6f\x9b\x27\x54\xce\x04\x00\x00") + +func migrations20200320000000CreateAccountsAuditSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200320000000CreateAccountsAuditSql, + "migrations/20200320000000-create-accounts-audit.sql", + ) +} + +func migrations20200320000000CreateAccountsAuditSql() (*asset, error) { + bytes, err := migrations20200320000000CreateAccountsAuditSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200320000000-create-accounts-audit.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa5, 0x83, 0x86, 0xf3, 0xa3, 0x90, 0x12, 0x71, 0x8b, 0x6b, 0x30, 0x68, 0x8e, 0xe6, 0xfa, 0x36, 0xdd, 0x1e, 0xc9, 0x1a, 0xd1, 0xa0, 0x4, 0x61, 0x4e, 0xb6, 0xd2, 0x99, 0xfc, 0xa6, 0xcd, 0xf8}} + return a, nil +} + +var _migrations20200320000001CreateIdentitiesAuditSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\xc1\x6e\xdb\x3a\x10\xbc\xf3\x2b\xe6\x10\x20\xf6\x7b\x4e\x3f\x20\x42\x0f\xb4\xb9\x92\x89\xd0\xa4\x40\x2d\xab\xa8\x17\xc3\xa8\x04\x43\x40\x22\xbb\xb6\x82\xfe\x7e\x21\x4b\xae\x9c\x38\x41\x4e\x05\x7a\x1b\x2d\x97\xab\x99\xdd\x59\xde\xdd\xe1\xff\xe7\x7a\x7b\xd8\xb4\x15\xc2\x5e\x88\x85\x27\xc9\x04\x96\x73\x43\xa8\xcb\xaa\x69\xeb\xb6\xae\x8e\xeb\xcd\x4b\x59\xb7\x98\x08\xe0\x84\xd6\x75\x89\xb9\x4e\xb4\x65\x58\xc7\xb0\xc1\x18\xa4\x5e\xaf\xa4\x2f\xf0\x40\x05\x12\xb2\xe4\x25\x93\x82\x34\xb9\x2c\x32\xc8\x0c\x5a\x91\x65\xcd\xc5\xec\x4f\x91\x4d\x0b\xd6\x2b\xca\x58\xae\x52\xe4\x9a\x97\xa7\x4f\x7c\x77\x96\xc6\xb2\x8a\x62\x19\x4c\xf7\x9f\x7c\x32\x1d\xef\xbe\x1c\xab\x03\x98\x1e\xf9\x3a\x33\x64\xe4\xc7\xc4\xdd\x7e\x04\xe7\xd4\xee\xd4\xe8\x87\x4b\x85\x62\x1a\x09\x71\xd9\x8d\xac\xdd\xb4\xd5\x73\xd5\xb4\xf3\x6a\x5b\x37\xe7\xc6\xc4\xc1\x2e\x58\x3b\x8b\x43\xf5\x63\x77\x28\xd7\x6f\x5b\x34\x99\xc2\x13\x07\x6f\x33\xb0\xd7\x49\x42\xbe\x93\x7e\x33\x77\xaa\xb8\x11\xc0\x9c\x12\x6d\x05\x00\xe8\x18\x13\x4e\xd6\x2e\xc5\x57\xdc\x6a\x9b\x91\xe7\xdb\x29\x78\x49\xfd\x31\xd0\xc7\xa0\x2d\xbb\xeb\x41\x7c\x93\x26\x50\x86\xc9\x20\x79\x86\x6b\x70\x2a\x7e\x7f\x7f\xd6\x3e\x83\xa5\xfc\xcb\x7f\xd3\x68\x28\xdf\xb3\xec\x82\x7d\x84\x4c\xf6\x8a\x52\x48\x95\x64\xfa\xa7\x28\x29\x32\xf4\xd7\x29\x39\xa3\xae\x29\x39\xa3\x06\x4a\x56\x41\xc7\x1d\x26\xab\x22\xd1\xcf\x15\x46\xda\x24\xc8\x84\xb0\x7f\xda\x6f\x8f\x3f\x9f\xa2\xf7\x8d\x44\x4d\x39\x2e\xd8\x60\x8e\x0f\x6c\x24\x64\xcc\xe4\xcf\xea\x9c\x47\x3f\x8e\x0e\xf5\x5d\x80\xb3\x97\xe6\x05\x62\xe7\x41\x72\xb1\x84\x77\x39\xe8\x91\x16\x81\x09\xa9\x77\x0b\x52\xc1\xd3\xc7\x76\x7d\xe3\x7a\xb5\xfb\xd5\x08\xa1\xbc\x4b\x3f\xa3\xf8\x9a\x41\xd4\xdf\xf9\x6c\x3d\x86\xb4\xf7\x9f\x97\x48\xfc\x0e\x00\x00\xff\xff\x12\x39\xe3\xeb\x8e\x04\x00\x00") + +func migrations20200320000001CreateIdentitiesAuditSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200320000001CreateIdentitiesAuditSql, + "migrations/20200320000001-create-identities-audit.sql", + ) +} + +func migrations20200320000001CreateIdentitiesAuditSql() (*asset, error) { + bytes, err := migrations20200320000001CreateIdentitiesAuditSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200320000001-create-identities-audit.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0x4f, 0x36, 0x8f, 0xf0, 0x74, 0xca, 0x86, 0x60, 0x21, 0xda, 0xb8, 0x3, 0x5e, 0xd1, 0x21, 0x6e, 0x2, 0xfb, 0x89, 0x56, 0x62, 0x6e, 0xd0, 0xc, 0x88, 0x3a, 0x60, 0x62, 0x6a, 0x91, 0x8b}} + return a, nil +} + +var _migrations20200320000002CreateAuthMethodsAuditSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x54\xcb\x6e\xdb\x30\x10\xbc\xf3\x2b\xe6\x10\x20\x76\xeb\xf4\x03\x22\xf4\x40\x9b\x2b\x99\x08\x4d\x0a\xd4\xb2\x8e\x7b\x11\x84\x4a\x70\x0c\xc4\x8f\xda\x32\xfa\xfb\x85\x2c\xb9\xaa\xe3\xa4\xb9\x15\xb9\x8d\x96\xcb\xe5\xec\xec\xac\xee\xee\xf0\x79\xbd\x5a\xee\x8b\xba\x42\xd8\x09\x31\xf1\x24\x99\xc0\x72\x6c\x08\xc5\xb1\x7e\xca\xd7\x55\xfd\xb4\x2d\x0f\x79\x71\x2c\x57\x35\x06\x02\x38\xa1\x7c\x55\x62\xac\x13\x6d\x19\xd6\x31\x6c\x30\x06\xa9\xd7\x33\xe9\x17\x78\xa0\x05\x12\xb2\xe4\x25\x93\x82\x34\x73\xb9\xc8\x20\x33\x68\x45\x96\x35\x2f\x46\x7f\x8a\x14\x35\x58\xcf\x28\x63\x39\x4b\x31\xd7\x3c\x3d\x7d\xe2\xbb\xb3\xd4\x97\x55\x14\xcb\x60\x9a\x77\xe6\x83\x61\x7f\xf7\x78\xa8\xf6\x60\x7a\xe4\xeb\xcc\x90\x91\xef\x13\xb7\xbb\x1e\x9c\x53\x9b\x53\xa3\x1f\x2e\x7b\x14\xc3\x48\x88\xbf\x15\xc9\xea\xa2\xae\xd6\xd5\xa6\x1e\x57\xcb\xd5\xe6\x2c\x4e\x1c\xec\x84\xb5\xb3\xd8\x57\x3f\xb6\xfb\x32\xbf\x96\x69\x30\x84\x27\x0e\xde\x66\x60\xaf\x93\x84\x7c\xd3\xfe\xcd\xd8\xa9\xc5\x8d\x00\xc6\x94\x68\x2b\x00\x40\xc7\x18\x70\x92\xbb\x14\x5f\x71\xab\x6d\x46\x9e\x6f\x87\xe0\x29\xb5\xc7\x40\x1b\x83\xb6\xec\x5e\x1b\xc7\x37\x69\x02\x65\x18\x74\x8d\x8f\x70\x0d\x4e\xe5\xef\xef\xcf\x0a\x8c\x60\x69\xfe\xe5\xd3\x30\xea\x1e\x68\x79\x36\xc1\x36\x42\x26\xbb\x20\x15\x52\x25\x99\x3e\x18\x29\x45\x86\xfe\x03\x29\x67\xd4\x35\x29\x67\x54\x47\xca\x2a\xe8\xb8\xc1\x64\x55\x24\xda\xe9\xc2\x48\x9b\x04\x99\x10\x76\xcf\xbb\xe5\xe1\xe7\x73\xf4\xba\xa1\x68\x53\xf6\xcb\xd6\x59\xe4\x4d\x3b\x09\x19\x33\xf9\x73\x87\xce\xa3\x1d\x4a\x83\x5a\x25\xe0\xec\xa5\x91\x81\xd8\x79\x90\x9c\x4c\xe1\xdd\x1c\xf4\x48\x93\xc0\x84\xd4\xbb\x09\xa9\xe0\xe9\x5f\xd6\x7d\xb1\x03\x6a\xfb\x6b\x23\x84\xf2\x2e\x7d\x9f\xe8\x4b\x1e\x51\x7b\xef\xfd\x85\xe9\x12\xdf\xfa\xed\x44\xe2\x77\x00\x00\x00\xff\xff\x06\xf2\x9a\x1b\xa8\x04\x00\x00") + +func migrations20200320000002CreateAuthMethodsAuditSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20200320000002CreateAuthMethodsAuditSql, + "migrations/20200320000002-create-auth-methods-audit.sql", + ) +} + +func migrations20200320000002CreateAuthMethodsAuditSql() (*asset, error) { + bytes, err := migrations20200320000002CreateAuthMethodsAuditSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20200320000002-create-auth-methods-audit.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0xf6, 0x44, 0xea, 0x65, 0x85, 0x7, 0x17, 0x47, 0x8c, 0x51, 0x36, 0xe4, 0x32, 0x27, 0x95, 0xc5, 0xb2, 0x49, 0x74, 0xff, 0x3d, 0xc3, 0x7c, 0x51, 0x12, 0xa1, 0x6a, 0x42, 0x94, 0x19, 0x65}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "migrations/20200309000000-initial-1.sql": migrations20200309000000Initial1Sql, + "migrations/20200309000001-initial-2.sql": migrations20200309000001Initial2Sql, + "migrations/20200311000000-create-accounts.sql": migrations20200311000000CreateAccountsSql, + "migrations/20200311000001-create-identities.sql": migrations20200311000001CreateIdentitiesSql, + "migrations/20200311000002-create-auth-methods.sql": migrations20200311000002CreateAuthMethodsSql, + "migrations/20200320000000-create-accounts-audit.sql": migrations20200320000000CreateAccountsAuditSql, + "migrations/20200320000001-create-identities-audit.sql": migrations20200320000001CreateIdentitiesAuditSql, + "migrations/20200320000002-create-auth-methods-audit.sql": migrations20200320000002CreateAuthMethodsAuditSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "migrations": &bintree{nil, map[string]*bintree{ + "20200309000000-initial-1.sql": &bintree{migrations20200309000000Initial1Sql, map[string]*bintree{}}, + "20200309000001-initial-2.sql": &bintree{migrations20200309000001Initial2Sql, map[string]*bintree{}}, + "20200311000000-create-accounts.sql": &bintree{migrations20200311000000CreateAccountsSql, map[string]*bintree{}}, + "20200311000001-create-identities.sql": &bintree{migrations20200311000001CreateIdentitiesSql, map[string]*bintree{}}, + "20200311000002-create-auth-methods.sql": &bintree{migrations20200311000002CreateAuthMethodsSql, map[string]*bintree{}}, + "20200320000000-create-accounts-audit.sql": &bintree{migrations20200320000000CreateAccountsAuditSql, map[string]*bintree{}}, + "20200320000001-create-identities-audit.sql": &bintree{migrations20200320000001CreateIdentitiesAuditSql, map[string]*bintree{}}, + "20200320000002-create-auth-methods-audit.sql": &bintree{migrations20200320000002CreateAuthMethodsAuditSql, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_test.go b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_test.go new file mode 100644 index 0000000000..790fc4aec2 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/dbmigrate_test.go @@ -0,0 +1,255 @@ +package dbmigrate + +import ( + "net/http" + "os" + "strings" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + migrate "github.com/rubenv/sql-migrate" + "github.com/shurcooL/httpfs/filter" + dbpkg "github.com/stellar/go/exp/services/recoverysigner/internal/db" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + supportHttp "github.com/stellar/go/support/http" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGeneratedAssets(t *testing.T) { + localAssets := http.FileSystem(filter.Keep(http.Dir("."), func(path string, fi os.FileInfo) bool { + return fi.IsDir() || strings.HasSuffix(path, ".sql") + })) + generatedAssets := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo} + + if !supportHttp.EqualFileSystems(localAssets, generatedAssets, "/") { + t.Fatalf("generated migrations does not match local migrations") + } +} + +func TestPlanMigration_upApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + migrations, err := PlanMigration(session, migrate.Up, 1) + require.NoError(t, err) + wantMigrations := []string{"20200309000000-initial-1.sql"} + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_upApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + migrations, err := PlanMigration(session, migrate.Up, 0) + require.NoError(t, err) + require.GreaterOrEqual(t, len(migrations), 2) + wantAtLeastMigrations := []string{ + "20200309000000-initial-1.sql", + "20200309000001-initial-2.sql", + } + assert.Equal(t, wantAtLeastMigrations, migrations[:2]) +} + +func TestPlanMigration_upApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + migrations, err := PlanMigration(session, migrate.Up, 0) + require.NoError(t, err) + require.Empty(t, migrations) +} + +func TestPlanMigration_downApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + migrations, err := PlanMigration(session, migrate.Down, 1) + require.NoError(t, err) + wantMigrations := []string{"20200309000001-initial-2.sql"} + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_downApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + migrations, err := PlanMigration(session, migrate.Down, 0) + require.NoError(t, err) + wantMigrations := []string{ + "20200309000001-initial-2.sql", + "20200309000000-initial-1.sql", + } + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_downApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + migrations, err := PlanMigration(session, migrate.Down, 0) + require.NoError(t, err) + assert.Empty(t, migrations) +} + +func TestMigrate_upApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 1) + require.NoError(t, err) + assert.Equal(t, 1, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_upApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + "20200309000001-initial-2.sql", + "20200311000000-create-accounts.sql", + "20200311000001-create-identities.sql", + "20200311000002-create-auth-methods.sql", + "20200320000000-create-accounts-audit.sql", + "20200320000001-create-identities-audit.sql", + "20200320000002-create-auth-methods-audit.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_upApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + n, err = Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Zero(t, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + "20200309000001-initial-2.sql", + "20200311000000-create-accounts.sql", + "20200311000001-create-identities.sql", + "20200311000002-create-auth-methods.sql", + "20200320000000-create-accounts-audit.sql", + "20200320000001-create-identities-audit.sql", + "20200320000002-create-auth-methods-audit.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_downApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 1) + require.NoError(t, err) + require.Equal(t, 1, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "20200309000000-initial-1.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_downApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + assert.Empty(t, ids) +} + +func TestMigrate_downApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 0, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + assert.Empty(t, ids) +} diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000000-initial-1.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000000-initial-1.sql new file mode 100644 index 0000000000..21884dcac3 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000000-initial-1.sql @@ -0,0 +1,7 @@ +-- This migration file is intentionally empty and is a first starting point for +-- our migrations before we yet have a schema. + +-- +migrate Up + +-- +migrate Down + diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000001-initial-2.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000001-initial-2.sql new file mode 100644 index 0000000000..21884dcac3 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200309000001-initial-2.sql @@ -0,0 +1,7 @@ +-- This migration file is intentionally empty and is a first starting point for +-- our migrations before we yet have a schema. + +-- +migrate Up + +-- +migrate Down + diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000000-create-accounts.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000000-create-accounts.sql new file mode 100644 index 0000000000..f0f1a20938 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000000-create-accounts.sql @@ -0,0 +1,16 @@ +-- +migrate Up + +CREATE TABLE accounts ( + id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE, + + address TEXT NOT NULL +); + +CREATE UNIQUE INDEX ON accounts (UPPER(address)); + +-- +migrate Down + +DROP TABLE accounts; diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000001-create-identities.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000001-create-identities.sql new file mode 100644 index 0000000000..bb5d0132a5 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000001-create-identities.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +CREATE TABLE identities ( + account_id BIGINT NOT NULL REFERENCES accounts (id) ON DELETE CASCADE, + id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE, + + role TEXT NOT NULL +); + +CREATE INDEX ON identities (account_id); + +-- +migrate Down + +DROP TABLE identities; diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000002-create-auth-methods.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000002-create-auth-methods.sql new file mode 100644 index 0000000000..aaef233eb5 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200311000002-create-auth-methods.sql @@ -0,0 +1,28 @@ +-- +migrate Up + +CREATE TYPE auth_method_type AS ENUM ( + 'stellar_address', + 'phone_number', + 'email' +); + +CREATE TABLE auth_methods ( + account_id BIGINT NOT NULL REFERENCES accounts (id) ON DELETE CASCADE, + identity_id BIGINT NOT NULL REFERENCES identities (id) ON DELETE CASCADE, + id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + + created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE, + + type_ auth_method_type NOT NULL, + value text NOT NULL +); + +CREATE INDEX ON auth_methods (account_id); +CREATE INDEX ON auth_methods (identity_id); +CREATE INDEX ON auth_methods (type_, value); + +-- +migrate Down + +DROP TABLE auth_methods; +DROP TYPE auth_method_type; diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000000-create-accounts-audit.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000000-create-accounts-audit.sql new file mode 100644 index 0000000000..3d1e5f3a5e --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000000-create-accounts-audit.sql @@ -0,0 +1,43 @@ +-- +migrate Up + +CREATE TYPE audit_op AS ENUM ( + 'INSERT', + 'UPDATE', + 'DELETE' +); + +CREATE TABLE accounts_audit ( + audit_id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + audit_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + audit_user TEXT NOT NULL DEFAULT USER, + audit_op audit_op NOT NULL, + LIKE accounts +); + +-- +migrate StatementBegin +CREATE FUNCTION record_accounts_audit() RETURNS TRIGGER AS $BODY$ + BEGIN + IF (TG_OP = 'INSERT') THEN + INSERT INTO accounts_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'UPDATE') THEN + INSERT INTO accounts_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'DELETE') THEN + INSERT INTO accounts_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, OLD.*); + RETURN OLD; + END IF; + END; +$BODY$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +CREATE TRIGGER record_accounts_audit +AFTER INSERT OR UPDATE OR DELETE ON accounts + FOR EACH ROW EXECUTE PROCEDURE record_accounts_audit(); + +-- +migrate Down + +DROP TRIGGER record_accounts_audit ON accounts; +DROP FUNCTION record_accounts_audit; +DROP TABLE accounts_audit; +DROP TYPE audit_op; diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000001-create-identities-audit.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000001-create-identities-audit.sql new file mode 100644 index 0000000000..6279c363a9 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000001-create-identities-audit.sql @@ -0,0 +1,36 @@ +-- +migrate Up + +CREATE TABLE identities_audit ( + audit_id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + audit_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + audit_user TEXT NOT NULL DEFAULT USER, + audit_op audit_op NOT NULL, + LIKE identities +); + +-- +migrate StatementBegin +CREATE FUNCTION record_identities_audit() RETURNS TRIGGER AS $BODY$ + BEGIN + IF (TG_OP = 'INSERT') THEN + INSERT INTO identities_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'UPDATE') THEN + INSERT INTO identities_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'DELETE') THEN + INSERT INTO identities_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, OLD.*); + RETURN OLD; + END IF; + END; +$BODY$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +CREATE TRIGGER record_identities_audit +AFTER INSERT OR UPDATE OR DELETE ON identities + FOR EACH ROW EXECUTE PROCEDURE record_identities_audit(); + +-- +migrate Down + +DROP TRIGGER record_identities_audit ON identities; +DROP FUNCTION record_identities_audit; +DROP TABLE identities_audit; diff --git a/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000002-create-auth-methods-audit.sql b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000002-create-auth-methods-audit.sql new file mode 100644 index 0000000000..ffa1be81de --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbmigrate/migrations/20200320000002-create-auth-methods-audit.sql @@ -0,0 +1,36 @@ +-- +migrate Up + +CREATE TABLE auth_methods_audit ( + audit_id BIGINT NOT NULL PRIMARY KEY GENERATED ALWAYS AS IDENTITY, + audit_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(), + audit_user TEXT NOT NULL DEFAULT USER, + audit_op audit_op NOT NULL, + LIKE auth_methods +); + +-- +migrate StatementBegin +CREATE FUNCTION record_auth_methods_audit() RETURNS TRIGGER AS $BODY$ + BEGIN + IF (TG_OP = 'INSERT') THEN + INSERT INTO auth_methods_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'UPDATE') THEN + INSERT INTO auth_methods_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, NEW.*); + RETURN NEW; + ELSIF (TG_OP = 'DELETE') THEN + INSERT INTO auth_methods_audit VALUES (DEFAULT, DEFAULT, DEFAULT, TG_OP::audit_op, OLD.*); + RETURN OLD; + END IF; + END; +$BODY$ LANGUAGE plpgsql; +-- +migrate StatementEnd + +CREATE TRIGGER record_auth_methods_audit +AFTER INSERT OR UPDATE OR DELETE ON auth_methods + FOR EACH ROW EXECUTE PROCEDURE record_auth_methods_audit(); + +-- +migrate Down + +DROP TRIGGER record_auth_methods_audit ON auth_methods; +DROP FUNCTION record_auth_methods_audit; +DROP TABLE auth_methods_audit; diff --git a/exp/services/recoverysigner/internal/db/dbtest/dbtest.go b/exp/services/recoverysigner/internal/db/dbtest/dbtest.go new file mode 100644 index 0000000000..0e41f434cc --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbtest/dbtest.go @@ -0,0 +1,47 @@ +package dbtest + +import ( + "path" + "runtime" + "testing" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" +) + +func OpenWithoutMigrations(t *testing.T) *dbtest.DB { + db := dbtest.Postgres(t) + + // Recoverysigner requires at least Postgres v10 because it uses IDENTITYs + // instead of SERIAL/BIGSERIAL, which are recommended against. + dbVersion := db.Version() + if dbVersion < 10 { + t.Skipf("Skipping test becuase Postgres v%d found, and Postgres v10+ required for this test.", dbVersion) + } + + return db +} + +func Open(t *testing.T) *dbtest.DB { + db := OpenWithoutMigrations(t) + + // Get the folder holding the migrations relative to this file. We cannot + // hardcode "../migrations" because Open is called from tests in multiple + // packages and tests are executed with the current working directory set + // to the package the test lives in. + _, filename, _, _ := runtime.Caller(0) + migrationsDir := path.Join(path.Dir(filename), "..", "dbmigrate", "migrations") + + migrations := &migrate.FileMigrationSource{ + Dir: migrationsDir, + } + + conn := db.Open() + defer conn.Close() + + _, err := migrate.Exec(conn.DB, "postgres", migrations, migrate.Up) + if err != nil { + t.Fatal(err) + } + return db +} diff --git a/exp/services/recoverysigner/internal/db/dbtest/dbtest_test.go b/exp/services/recoverysigner/internal/db/dbtest/dbtest_test.go new file mode 100644 index 0000000000..95f6213b13 --- /dev/null +++ b/exp/services/recoverysigner/internal/db/dbtest/dbtest_test.go @@ -0,0 +1,18 @@ +package dbtest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOpen(t *testing.T) { + db := Open(t) + session := db.Open() + + count := 0 + err := session.Get(&count, `SELECT COUNT(*) FROM gorp_migrations`) + require.NoError(t, err) + assert.Greater(t, count, 0) +} diff --git a/exp/services/recoverysigner/internal/serve/account_delete.go b/exp/services/recoverysigner/internal/serve/account_delete.go new file mode 100644 index 0000000000..6349bd9503 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_delete.go @@ -0,0 +1,111 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type accountDeleteHandler struct { + Logger *supportlog.Entry + SigningAddresses []*keypair.FromAddress + AccountStore account.Store +} + +type accountDeleteRequest struct { + Address *keypair.FromAddress `path:"address"` +} + +func (h accountDeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + claims, _ := auth.FromContext(ctx) + if claims.Address == "" && claims.PhoneNumber == "" && claims.Email == "" { + unauthorized.Render(w) + return + } + + req := accountDeleteRequest{} + err := httpdecode.Decode(r, &req) + if err != nil || req.Address == nil { + badRequest.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("account", req.Address.Address()) + + l.Info("Request to delete account.") + + acc, err := h.AccountStore.Get(req.Address.Address()) + if err == account.ErrNotFound { + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } + + resp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + resp.Signers = append(resp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + + // Authorized if authenticated as the account. + authorized := claims.Address == req.Address.Address() + l.Infof("Authorized with self: %v.", authorized) + + // Authorized if authenticated as an identity registered with the account. + for _, i := range acc.Identities { + respIdentity := accountResponseIdentity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + if m.Value != "" && ((m.Type == account.AuthMethodTypeAddress && m.Value == claims.Address) || + (m.Type == account.AuthMethodTypePhoneNumber && m.Value == claims.PhoneNumber) || + (m.Type == account.AuthMethodTypeEmail && m.Value == claims.Email)) { + respIdentity.Authenticated = true + authorized = true + l.Infof("Authorized with %s.", m.Type) + break + } + } + + resp.Identities = append(resp.Identities, respIdentity) + } + + l.Infof("Authorized: %v.", authorized) + if !authorized { + notFound.Render(w) + return + } + + l.Info("Deleting account.") + + err = h.AccountStore.Delete(req.Address.Address()) + if err == account.ErrNotFound { + // It can happen if two authorized users are trying to delete the account at the same time. + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + l.Error("Error deleting account:", err) + serverError.Render(w) + return + } + + l.Info("Deleted account.") + + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_delete_test.go b/exp/services/recoverysigner/internal/serve/account_delete_test.go new file mode 100644 index 0000000000..117d34b69b --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_delete_test.go @@ -0,0 +1,457 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test that when authenticated with an account, but deleting the wrong account, +// an error is returned. +func TestAccountDelete_authenticatedNotAuthorized(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCNPATZQVSFGGSAHR4T54WNELPHYEBTSKH4IIKUTC7CHPLG6EPPC4PJL"}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountDelete_notAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be authenticated." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountDelete_notFound(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}) + r := httptest.NewRequest("DELETE", "/GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountDelete_authenticatedByIdentityAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender", + "authenticated": true + }, + { + "role": "receiver" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.Error(t, err) + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountDelete_authenticatedByAccountAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.Error(t, err) + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountDelete_authenticatedByPhoneNumber(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver", + "authenticated": true + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.Error(t, err) + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountDelete_authenticatedByEmail(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountDeleteHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + r := httptest.NewRequest("DELETE", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Delete("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver", + "authenticated": true + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.Error(t, err) + assert.Equal(t, account.ErrNotFound, err) +} diff --git a/exp/services/recoverysigner/internal/serve/account_get.go b/exp/services/recoverysigner/internal/serve/account_get.go new file mode 100644 index 0000000000..615a555383 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_get.go @@ -0,0 +1,95 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type accountGetHandler struct { + Logger *supportlog.Entry + SigningAddresses []*keypair.FromAddress + AccountStore account.Store +} + +type accountGetRequest struct { + Address *keypair.FromAddress `path:"address"` +} + +func (h accountGetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + claims, _ := auth.FromContext(ctx) + if claims.Address == "" && claims.PhoneNumber == "" && claims.Email == "" { + unauthorized.Render(w) + return + } + + req := accountGetRequest{} + err := httpdecode.Decode(r, &req) + if err != nil || req.Address == nil { + badRequest.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("account", req.Address.Address()) + + l.Info("Request to get account.") + + acc, err := h.AccountStore.Get(req.Address.Address()) + if err == account.ErrNotFound { + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } + + resp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + resp.Signers = append(resp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + + // Authorized if authenticated as the account. + authorized := claims.Address == req.Address.Address() + l.Infof("Authorized with self: %v.", authorized) + + // Authorized if authenticated as an identity registered with the account. + for _, i := range acc.Identities { + respIdentity := accountResponseIdentity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + if m.Value != "" && ((m.Type == account.AuthMethodTypeAddress && m.Value == claims.Address) || + (m.Type == account.AuthMethodTypePhoneNumber && m.Value == claims.PhoneNumber) || + (m.Type == account.AuthMethodTypeEmail && m.Value == claims.Email)) { + respIdentity.Authenticated = true + authorized = true + l.Infof("Authorized with %s.", m.Type) + break + } + } + + resp.Identities = append(resp.Identities, respIdentity) + } + + l.Infof("Authorized: %v.", authorized) + if !authorized { + notFound.Render(w) + return + } + + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_get_test.go b/exp/services/recoverysigner/internal/serve/account_get_test.go new file mode 100644 index 0000000000..f6e2fa6e07 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_get_test.go @@ -0,0 +1,441 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test that when authenticated with an account, but querying the wrong account, +// an error is returned. +func TestAccountGet_authenticatedNotAuthorized(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCNPATZQVSFGGSAHR4T54WNELPHYEBTSKH4IIKUTC7CHPLG6EPPC4PJL"}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_notAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be authenticated." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_notFound(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}) + r := httptest.NewRequest("GET", "/GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_authenticatedByIdentityAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender", + "authenticated": true + }, + { + "role": "receiver" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_authenticatedByAccountAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+11000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_authenticatedByPhoneNumber(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver", + "authenticated": true + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountGet_authenticatedByEmail(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountGetHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + r := httptest.NewRequest("GET", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Get("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "sender" + }, + { + "role": "receiver", + "authenticated": true + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) +} diff --git a/exp/services/recoverysigner/internal/serve/account_list.go b/exp/services/recoverysigner/internal/serve/account_list.go new file mode 100644 index 0000000000..448fae5b27 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_list.go @@ -0,0 +1,179 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type accountListHandler struct { + Logger *supportlog.Entry + SigningAddresses []*keypair.FromAddress + AccountStore account.Store +} + +type accountListResponse struct { + Accounts []accountResponse `json:"accounts"` +} + +func (h accountListHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + claims, _ := auth.FromContext(ctx) + if claims.Address == "" && claims.PhoneNumber == "" && claims.Email == "" { + unauthorized.Render(w) + return + } + + l := h.Logger.Ctx(ctx) + + l.Info("Request to get accounts.") + + resp := accountListResponse{ + Accounts: []accountResponse{}, + } + + // Find accounts matching the authenticated address. + if claims.Address != "" { + // Find an account that has that address. + acc, err := h.AccountStore.Get(claims.Address) + if err == account.ErrNotFound { + // Do nothing. + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } else { + accResp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + accResp.Signers = append(accResp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range acc.Identities { + accRespIdentity := accountResponseIdentity{ + Role: i.Role, + } + accResp.Identities = append(accResp.Identities, accRespIdentity) + } + resp.Accounts = append(resp.Accounts, accResp) + l.WithField("account", acc.Address). + WithField("auth_method_type", account.AuthMethodTypeAddress). + Info("Found account with auth method type as self.") + } + + // Find accounts that have the address listed as an owner or other identity. + accs, err := h.AccountStore.FindWithIdentityAddress(claims.Address) + if err != nil { + l.Error(err) + serverError.Render(w) + return + } + for _, acc := range accs { + accResp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + accResp.Signers = append(accResp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range acc.Identities { + accRespIdentity := accountResponseIdentity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + if m.Type == account.AuthMethodTypeAddress && m.Value == claims.Address { + accRespIdentity.Authenticated = true + break + } + } + accResp.Identities = append(accResp.Identities, accRespIdentity) + } + resp.Accounts = append(resp.Accounts, accResp) + l.WithField("account", acc.Address). + WithField("auth_method_type", account.AuthMethodTypeAddress). + Info("Found account with auth method type as identity.") + } + } + + // Find accounts matching the authenticated phone number. + if claims.PhoneNumber != "" { + accs, err := h.AccountStore.FindWithIdentityPhoneNumber(claims.PhoneNumber) + if err != nil { + h.Logger.Error(err) + serverError.Render(w) + return + } + for _, acc := range accs { + accResp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + accResp.Signers = append(accResp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range acc.Identities { + accRespIdentity := accountResponseIdentity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + if m.Type == account.AuthMethodTypePhoneNumber && m.Value == claims.PhoneNumber { + accRespIdentity.Authenticated = true + break + } + } + accResp.Identities = append(accResp.Identities, accRespIdentity) + } + resp.Accounts = append(resp.Accounts, accResp) + l.WithField("account", acc.Address). + WithField("auth_method_type", account.AuthMethodTypePhoneNumber). + Info("Found account with auth method type as identity.") + } + } + + // Find accounts matching the authenticated email. + if claims.Email != "" { + accs, err := h.AccountStore.FindWithIdentityEmail(claims.Email) + if err != nil { + h.Logger.Error(err) + serverError.Render(w) + return + } + for _, acc := range accs { + accResp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + accResp.Signers = append(accResp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range acc.Identities { + accRespIdentity := accountResponseIdentity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + if m.Type == account.AuthMethodTypeEmail && m.Value == claims.Email { + accRespIdentity.Authenticated = true + break + } + } + accResp.Identities = append(accResp.Identities, accRespIdentity) + } + resp.Accounts = append(resp.Accounts, accResp) + l.WithField("account", acc.Address). + WithField("auth_method_type", account.AuthMethodTypeEmail). + Info("Found account with auth method type as identity.") + } + } + + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_list_test.go b/exp/services/recoverysigner/internal/serve/account_list_test.go new file mode 100644 index 0000000000..c09f939c83 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_list_test.go @@ -0,0 +1,337 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test that when authenticated with an account, but no matching accounts, +// empty list is returned. +func TestAccountList_authenticatedButNonePermitted(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + }) + s.Add(account.Account{ + Address: "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GCS4CVAAX7MVUNHP24655TNHIJ4YFN7GW5V3RFDC2BXVVMVDTB3GYH5U", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}, + }, + }, + }, + }) + h := accountListHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCNPATZQVSFGGSAHR4T54WNELPHYEBTSKH4IIKUTC7CHPLG6EPPC4PJL"}) + r := httptest.NewRequest("", "/", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "accounts": [] +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountList_authenticatedByPhoneNumber(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GCS4CVAAX7MVUNHP24655TNHIJ4YFN7GW5V3RFDC2BXVVMVDTB3GYH5U", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+20000000000"}, + }, + }, + }, + }) + h := accountListHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + r := httptest.NewRequest("", "/", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "accounts": [ + { + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "sender", "authenticated": true } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] + }, + { + "address": "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + "identities": [ + { "role": "receiver", "authenticated": true } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountList_authenticatedByEmail(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GCS4CVAAX7MVUNHP24655TNHIJ4YFN7GW5V3RFDC2BXVVMVDTB3GYH5U", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + }) + h := accountListHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + r := httptest.NewRequest("", "/", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "accounts": [ + { + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "sender", "authenticated": true } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] + }, + { + "address": "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + "identities": [ + { "role": "receiver", "authenticated": true } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountList_notAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GDU2CH4V3QYQB2BLMX45XQLVBEKSIN2EZLP37I6MZZ7NAR5U3TLZDQEY", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GCS4CVAAX7MVUNHP24655TNHIJ4YFN7GW5V3RFDC2BXVVMVDTB3GYH5U", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + }) + h := accountListHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{}) + r := httptest.NewRequest("", "/", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be authenticated." +}` + assert.JSONEq(t, wantBody, string(body)) +} diff --git a/exp/services/recoverysigner/internal/serve/account_post.go b/exp/services/recoverysigner/internal/serve/account_post.go new file mode 100644 index 0000000000..e189b4f5ab --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_post.go @@ -0,0 +1,156 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type accountPostHandler struct { + Logger *supportlog.Entry + SigningAddresses []*keypair.FromAddress + AccountStore account.Store +} + +type accountPostRequest struct { + Address *keypair.FromAddress `path:"address"` + Identities []accountPostRequestIdentity `json:"identities" form:"identities"` +} + +func (r accountPostRequest) Validate() error { + if len(r.Identities) == 0 { + return errors.Errorf("no identities provided but at least one is required") + } + for _, i := range r.Identities { + err := i.Validate() + if err != nil { + return err + } + } + return nil +} + +type accountPostRequestIdentity struct { + Role string `json:"role" form:"role"` + AuthMethods []accountPostRequestIdentityAuthMethod `json:"auth_methods" form:"auth_methods"` +} + +func (i accountPostRequestIdentity) Validate() error { + if i.Role == "" { + return errors.Errorf("role is not set but required") + } + if len(i.AuthMethods) == 0 { + return errors.Errorf("auth methods not provided for identity but required") + } + for _, am := range i.AuthMethods { + err := am.Validate() + if err != nil { + return err + } + } + return nil +} + +type accountPostRequestIdentityAuthMethod struct { + Type string `json:"type" form:"type"` + Value string `json:"value" form:"value"` +} + +func (am accountPostRequestIdentityAuthMethod) Validate() error { + if !account.AuthMethodType(am.Type).Valid() { + return errors.Errorf("auth method type %q unrecognized", am.Type) + } + // TODO: Validate auth method values: Stellar address, phone number and email. + return nil +} + +func (h accountPostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + claims, _ := auth.FromContext(ctx) + if claims.Address == "" { + unauthorized.Render(w) + return + } + + req := accountPostRequest{} + err := httpdecode.Decode(r, &req) + if err != nil || req.Address == nil { + badRequest.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("account", req.Address.Address()) + + l.Info("Request to register account.") + + if req.Address.Address() != claims.Address { + l.WithField("address", claims.Address). + Info("Not authorized as self, authorized as other address.") + unauthorized.Render(w) + return + } + + if req.Validate() != nil { + l.Info("Request validation failed.") + badRequest.Render(w) + return + } + + authMethodCount := 0 + acc := account.Account{ + Address: req.Address.Address(), + } + for _, i := range req.Identities { + accIdentity := account.Identity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + accIdentity.AuthMethods = append(accIdentity.AuthMethods, account.AuthMethod{ + Type: account.AuthMethodType(m.Type), + Value: m.Value, + }) + authMethodCount++ + } + acc.Identities = append(acc.Identities, accIdentity) + } + l = l. + WithField("identities_count", len(acc.Identities)). + WithField("auth_methods_count", authMethodCount) + + err = h.AccountStore.Add(acc) + if err == account.ErrAlreadyExists { + l.Info("Account already registered.") + conflict.Render(w) + return + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } + + l.Info("Account registered.") + + resp := accountResponse{ + Address: acc.Address, + } + for _, signingAddress := range h.SigningAddresses { + resp.Signers = append(resp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range acc.Identities { + respIdentity := accountResponseIdentity{ + Role: i.Role, + } + resp.Identities = append(resp.Identities, respIdentity) + } + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_post_test.go b/exp/services/recoverysigner/internal/serve/account_post_test.go new file mode 100644 index 0000000000..1dfdbaebe4 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_post_test.go @@ -0,0 +1,675 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccountPost_newWithRoleOwnerContentTypeJSON(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "owner" } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_newWithRoleOwnerContentTypeForm(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + reqValues := url.Values{} + reqValues.Set("identities.0.role", "owner") + reqValues.Set("identities.0.auth_methods.0.type", "stellar_address") + reqValues.Set("identities.0.auth_methods.0.value", "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ") + reqValues.Set("identities.0.auth_methods.1.type", "phone_number") + reqValues.Set("identities.0.auth_methods.1.value", "+10000000000") + reqValues.Set("identities.0.auth_methods.2.type", "email") + reqValues.Set("identities.0.auth_methods.2.value", "user1@example.com") + req := reqValues.Encode() + t.Log("Request Body:", req) + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "owner" } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_newWithRolesSenderReceiverContentTypeJSON(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "sender", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + }, + { + "role": "receiver", + "auth_methods": [ + { "type": "stellar_address", "value": "GB5VOTKJ3IPGIYQBJ6GVJMUVEAIYGXZUJE4WYLPBJSHOTKLZTETBYOBI" }, + { "type": "phone_number", "value": "+20000000000" }, + { "type": "email", "value": "user2@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "sender" }, + { "role": "receiver" } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GB5VOTKJ3IPGIYQBJ6GVJMUVEAIYGXZUJE4WYLPBJSHOTKLZTETBYOBI"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_newWithRolesSenderReceiverContentTypeForm(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + reqValues := url.Values{} + reqValues.Set("identities.0.role", "sender") + reqValues.Set("identities.0.auth_methods.0.type", "stellar_address") + reqValues.Set("identities.0.auth_methods.0.value", "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ") + reqValues.Set("identities.0.auth_methods.1.type", "phone_number") + reqValues.Set("identities.0.auth_methods.1.value", "+10000000000") + reqValues.Set("identities.0.auth_methods.2.type", "email") + reqValues.Set("identities.0.auth_methods.2.value", "user1@example.com") + reqValues.Set("identities.1.role", "receiver") + reqValues.Set("identities.1.auth_methods.0.type", "stellar_address") + reqValues.Set("identities.1.auth_methods.0.value", "GB5VOTKJ3IPGIYQBJ6GVJMUVEAIYGXZUJE4WYLPBJSHOTKLZTETBYOBI") + reqValues.Set("identities.1.auth_methods.1.type", "phone_number") + reqValues.Set("identities.1.auth_methods.1.value", "+20000000000") + reqValues.Set("identities.1.auth_methods.2.type", "email") + reqValues.Set("identities.1.auth_methods.2.value", "user2@example.com") + req := reqValues.Encode() + t.Log("Request Body:", req) + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { "role": "sender" }, + { "role": "receiver" } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GB5VOTKJ3IPGIYQBJ6GVJMUVEAIYGXZUJE4WYLPBJSHOTKLZTETBYOBI"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+20000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_accountAddressInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + }) + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{}` + r := httptest.NewRequest("POST", "/ZDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request was invalid in some way." +}` + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("ZDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountPost_accountAlreadyExists(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + }) + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "phone_number", "value": "+10000000000" } + ] + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusConflict, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be completed because the resource already exists." +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_identitiesNotProvided(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request was invalid in some way." +}` + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountPost_roleNotProvided(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request was invalid in some way." +}` + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountPost_authMethodsNotProvided(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "owner" + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request was invalid in some way." +}` + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + assert.Equal(t, account.ErrNotFound, err) +} + +func TestAccountPost_authMethodTypeUnrecognized(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + }) + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "wormhole_technology", "value": "galaxy5.earth3.asdfuaiosufd" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("POST", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request was invalid in some way." +}` + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPost_notAuthenticatedForAccount(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountPostHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{}` + r := httptest.NewRequest("POST", "/GDUKTYDY3RDNTNOUFJ2GPL5PIZTMTRD5P2CT274SYH67Q5J3NYI7XKYB", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be authenticated." +}` + assert.JSONEq(t, wantBody, string(body)) + + _, err = s.Get("GDUKTYDY3RDNTNOUFJ2GPL5PIZTMTRD5P2CT274SYH67Q5J3NYI7XKYB") + assert.Equal(t, account.ErrNotFound, err) +} diff --git a/exp/services/recoverysigner/internal/serve/account_put.go b/exp/services/recoverysigner/internal/serve/account_put.go new file mode 100644 index 0000000000..63bfe7165b --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_put.go @@ -0,0 +1,183 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type accountPutHandler struct { + Logger *supportlog.Entry + SigningAddresses []*keypair.FromAddress + AccountStore account.Store +} + +type accountPutRequest struct { + Address *keypair.FromAddress `path:"address"` + Identities []accountPutRequestIdentity `json:"identities" form:"identities"` +} + +func (r accountPutRequest) Validate() error { + if len(r.Identities) == 0 { + return errors.Errorf("no identities provided but at least one is required") + } + for _, i := range r.Identities { + err := i.Validate() + if err != nil { + return err + } + } + return nil +} + +type accountPutRequestIdentity struct { + Role string `json:"role" form:"role"` + AuthMethods []accountPutRequestIdentityAuthMethod `json:"auth_methods" form:"auth_methods"` +} + +func (i accountPutRequestIdentity) Validate() error { + if i.Role == "" { + return errors.Errorf("role is not set but required") + } + if len(i.AuthMethods) == 0 { + return errors.Errorf("auth methods not provided for identity but required") + } + for _, am := range i.AuthMethods { + err := am.Validate() + if err != nil { + return err + } + } + return nil +} + +type accountPutRequestIdentityAuthMethod struct { + Type string `json:"type" form:"type"` + Value string `json:"value" form:"value"` +} + +func (am accountPutRequestIdentityAuthMethod) Validate() error { + if !account.AuthMethodType(am.Type).Valid() { + return errors.Errorf("auth method type %q unrecognized", am.Type) + } + // TODO: Validate auth method values: Stellar address, phone number and email. + return nil +} + +func (h accountPutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + claims, _ := auth.FromContext(ctx) + if claims.Address == "" && claims.PhoneNumber == "" && claims.Email == "" { + unauthorized.Render(w) + return + } + + req := accountPutRequest{} + err := httpdecode.Decode(r, &req) + if err != nil || req.Address == nil { + badRequest.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("account", req.Address.Address()) + + l.Info("Request to update account.") + + if req.Validate() != nil { + l.Info("Request validation failed.") + badRequest.Render(w) + return + } + + acc, err := h.AccountStore.Get(req.Address.Address()) + if err == account.ErrNotFound { + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } + + // Authorized if authenticated as the account. + authorized := claims.Address == req.Address.Address() + l.Infof("Authorized with self: %v.", authorized) + + // Authorized if authenticated as an identity registered with the account. + for _, i := range acc.Identities { + for _, m := range i.AuthMethods { + if m.Value != "" && ((m.Type == account.AuthMethodTypeAddress && m.Value == claims.Address) || + (m.Type == account.AuthMethodTypePhoneNumber && m.Value == claims.PhoneNumber) || + (m.Type == account.AuthMethodTypeEmail && m.Value == claims.Email)) { + authorized = true + l.Infof("Authorized with %s.", m.Type) + break + } + } + } + if !authorized { + notFound.Render(w) + return + } + + authMethodCount := 0 + accWithNewIdentiies := account.Account{ + Address: req.Address.Address(), + Identities: []account.Identity{}, + } + for _, i := range req.Identities { + accIdentity := account.Identity{ + Role: i.Role, + } + for _, m := range i.AuthMethods { + accIdentity.AuthMethods = append(accIdentity.AuthMethods, account.AuthMethod{ + Type: account.AuthMethodType(m.Type), + Value: m.Value, + }) + authMethodCount++ + } + accWithNewIdentiies.Identities = append(accWithNewIdentiies.Identities, accIdentity) + } + l = l. + WithField("identities_count", len(accWithNewIdentiies.Identities)). + WithField("auth_methods_count", authMethodCount) + + err = h.AccountStore.Update(accWithNewIdentiies) + if err == account.ErrNotFound { + // It can happen if another authorized user is trying to delete the account at the same time. + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + h.Logger.Error(err) + serverError.Render(w) + return + } + + l.Info("Account updated.") + + resp := accountResponse{ + Address: accWithNewIdentiies.Address, + } + for _, signingAddress := range h.SigningAddresses { + resp.Signers = append(resp.Signers, accountResponseSigner{ + Key: signingAddress.Address(), + }) + } + for _, i := range accWithNewIdentiies.Identities { + resp.Identities = append(resp.Identities, accountResponseIdentity{ + Role: i.Role, + }) + } + + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_put_test.go b/exp/services/recoverysigner/internal/serve/account_put_test.go new file mode 100644 index 0000000000..6f83940cc1 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_put_test.go @@ -0,0 +1,531 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccountPut_authenticatedNotAuthorized(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+11000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCNPATZQVSFGGSAHR4T54WNELPHYEBTSKH4IIKUTC7CHPLG6EPPC4PJL"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountPut_notAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+11000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The request could not be authenticated." +}` + + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountPut_authenticatedByAccountAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+11000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "owner" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPut_authenticatedByIdentityAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "owner" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPut_authenticatedByPhoneNumber(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "owner" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} + +func TestAccountPut_authenticatedByEmail(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountPutHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningAddresses: []*keypair.FromAddress{ + keypair.MustParseAddress("GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE"), + keypair.MustParseAddress("GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS"), + }, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + req := `{ + "identities": [ + { + "role": "owner", + "auth_methods": [ + { "type": "stellar_address", "value": "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ" }, + { "type": "phone_number", "value": "+10000000000" }, + { "type": "email", "value": "user1@example.com" } + ] + } + ] +}` + r := httptest.NewRequest("PUT", "/GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Put("/{address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "address": "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + "identities": [ + { + "role": "owner" + } + ], + "signers": [ + { + "key": "GCAPXRXSU7P6D353YGXMP6ROJIC744HO5OZCIWTXZQK2X757YU5KCHUE", + "added_at": "0001-01-01T00:00:00Z" + }, + { + "key": "GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", + "added_at": "0001-01-01T00:00:00Z" + } + ] +}` + + assert.JSONEq(t, wantBody, string(body)) + + acc, err := s.Get("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + require.NoError(t, err) + wantAcc := account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{ + { + Role: "owner", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeAddress, Value: "GBF3XFXGBGNQDN3HOSZ7NVRF6TJ2JOD5U6ELIWJOOEI6T5WKMQT2YSXQ"}, + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + } + assert.Equal(t, wantAcc, acc) +} diff --git a/exp/services/recoverysigner/internal/serve/account_response.go b/exp/services/recoverysigner/internal/serve/account_response.go new file mode 100644 index 0000000000..ae1ca55441 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_response.go @@ -0,0 +1,19 @@ +package serve + +import "time" + +type accountResponse struct { + Address string `json:"address"` + Identities []accountResponseIdentity `json:"identities"` + Signers []accountResponseSigner `json:"signers"` +} + +type accountResponseIdentity struct { + Role string `json:"role"` + Authenticated bool `json:"authenticated,omitempty"` +} + +type accountResponseSigner struct { + Key string `json:"key"` + AddedAt time.Time `json:"added_at"` +} diff --git a/exp/services/recoverysigner/internal/serve/account_sign.go b/exp/services/recoverysigner/internal/serve/account_sign.go new file mode 100644 index 0000000000..503cd6ba86 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_sign.go @@ -0,0 +1,184 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/txnbuild" +) + +type accountSignHandler struct { + Logger *supportlog.Entry + SigningKeys []*keypair.Full + NetworkPassphrase string + AccountStore account.Store + AllowedSourceAccounts []*keypair.FromAddress +} + +type accountSignRequest struct { + Address *keypair.FromAddress `path:"address"` + SigningAddress *keypair.FromAddress `path:"signing-address"` + Transaction string `json:"transaction" form:"transaction"` +} + +type accountSignResponse struct { + Signature string `json:"signature"` + NetworkPassphrase string `json:"network_passphrase"` +} + +func (h accountSignHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Check that the client is authenticated in some bare minimum way. + claims, _ := auth.FromContext(ctx) + if claims.Address == "" && claims.PhoneNumber == "" && claims.Email == "" { + unauthorized.Render(w) + return + } + + // Decode request. + req := accountSignRequest{} + err := httpdecode.Decode(r, &req) + if err != nil || req.Address == nil || req.SigningAddress == nil { + badRequest.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("account", req.Address.Address()) + if req.SigningAddress != nil { + l = l.WithField("signingaddress", req.SigningAddress.Address()) + } + + l.Info("Request to sign transaction.") + + var signingKey *keypair.Full + for _, sk := range h.SigningKeys { + if req.SigningAddress.Address() == sk.Address() { + signingKey = sk + break + } + } + if signingKey == nil { + l.Info("Signing key not found.") + notFound.Render(w) + return + } + + // Find the account that the request is for. + acc, err := h.AccountStore.Get(req.Address.Address()) + if err == account.ErrNotFound { + l.Info("Account not found.") + notFound.Render(w) + return + } else if err != nil { + l.Error(err) + serverError.Render(w) + return + } + + // Authorized if authenticated as the account. + authorized := claims.Address == req.Address.Address() + l.Infof("Authorized with self: %v.", authorized) + + // Authorized if authenticated as an identity registered with the account. + for _, i := range acc.Identities { + for _, m := range i.AuthMethods { + if m.Value != "" && ((m.Type == account.AuthMethodTypeAddress && m.Value == claims.Address) || + (m.Type == account.AuthMethodTypePhoneNumber && m.Value == claims.PhoneNumber) || + (m.Type == account.AuthMethodTypeEmail && m.Value == claims.Email)) { + authorized = true + l.Infof("Authorized with %s.", m.Type) + break + } + } + } + + l.Infof("Authorized: %v.", authorized) + if !authorized { + notFound.Render(w) + return + } + + // Decode the request transaction. + parsed, err := txnbuild.TransactionFromXDR(req.Transaction) + if err != nil { + l.WithField("transaction", req.Transaction). + Info("Parsing transaction failed.") + badRequest.Render(w) + return + } + tx, ok := parsed.Transaction() + if !ok { + l.Info("Transaction is not a simple transaction.") + badRequest.Render(w) + return + } + hashHex, err := tx.HashHex(h.NetworkPassphrase) + if err != nil { + l.Error("Error hashing transaction:", err) + serverError.Render(w) + return + } + + l = l.WithField("transaction_hash", hashHex) + + l.Info("Signing transaction.") + + // Check that the transaction's source account and any operations it + // contains references only to this account. + if tx.SourceAccount().AccountID != req.Address.Address() { + l.Info("Transaction's source account is not the account in the request.") + badRequest.Render(w) + return + } + for _, op := range tx.Operations() { + opSourceAccount := op.GetSourceAccount() + if opSourceAccount == "" { + continue + } + + if op.GetSourceAccount() != req.Address.Address() { + var opHasAllowedAccount bool + for _, sa := range h.AllowedSourceAccounts { + if sa.Address() == op.GetSourceAccount() { + opHasAllowedAccount = true + break + } + } + + if !opHasAllowedAccount { + l.Info("Operation's source account is not the account in the request and not any account that is configured to be allowed.") + badRequest.Render(w) + return + } + } + } + + // Sign the transaction. + hash, err := tx.Hash(h.NetworkPassphrase) + if err != nil { + l.Error("Error hashing transaction:", err) + serverError.Render(w) + return + } + sig, err := signingKey.SignBase64(hash[:]) + if err != nil { + l.Error("Error signing transaction:", err) + serverError.Render(w) + return + } + + l.Info("Transaction signed.") + + resp := accountSignResponse{ + Signature: sig, + NetworkPassphrase: h.NetworkPassphrase, + } + httpjson.Render(w, resp, httpjson.JSON) +} diff --git a/exp/services/recoverysigner/internal/serve/account_sign_signing_address_test.go b/exp/services/recoverysigner/internal/serve/account_sign_signing_address_test.go new file mode 100644 index 0000000000..7246653230 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/account_sign_signing_address_test.go @@ -0,0 +1,1390 @@ +package serve + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// Test that when the account does not exist it returns not found. +func TestAccountSign_signingAddressAuthenticatedButNotFound(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the account exists but the authenticated client does not have +// permission to access it returns not found. +func TestAccountSign_signingAddressAccountAuthenticatedButNotPermitted(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + s.Add(account.Account{ + Address: "GBLOP46WEVXWO5N75TDX7GXLYFQE3XLDT5NQ2VYIBEWWEMSZWR3AUISZ", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GBLOP46WEVXWO5N75TDX7GXLYFQE3XLDT5NQ2VYIBEWWEMSZWR3AUISZ"}) + req := `{ + "transaction": "" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountSign_signingAddressAccountAuthenticatedButInvalidAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + r := httptest.NewRequest("POST", "/ZA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountSign_signingAddressAccountAuthenticatedButEmptyAddress(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + r := httptest.NewRequest("POST", "//sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", nil) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the account exists but the authenticated client does not have +// permission to access it returns not found. +func TestAccountSign_signingAddressPhoneNumberAuthenticatedButNotPermitted(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GBLOP46WEVXWO5N75TDX7GXLYFQE3XLDT5NQ2VYIBEWWEMSZWR3AUISZ", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+20000000000"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+20000000000"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+20000000000"}) + req := `{ + "transaction": "" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the account exists but the authenticated client does not have +// permission to access it returns not found. +func TestAccountSign_signingAddressEmailAuthenticatedButNotPermitted(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + s.Add(account.Account{ + Address: "GBLOP46WEVXWO5N75TDX7GXLYFQE3XLDT5NQ2VYIBEWWEMSZWR3AUISZ", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user2@example.com"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "user2@example.com"}) + req := `{ + "transaction": "" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the signing address is not valid. +func TestAccountSign_signingAddressAccountAuthenticatedButSigningAddressInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GCF7VHXRMFMYODQTW7J4PFMHITSEP7XVYIM6X2AKSD7EKV3PMV2PCYHE", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusNotFound, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "error": "The resource at the url requested was not found." +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestAccountSign_signingAddressAccountAuthenticatedOtherSignerSelected(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "8Tew6rTol9me8H9u7ezJXg6SEqzOr7cwSf1y9+Vri275XEDH9xWtZ2klTX2uUSPy56otUoIySPqsV3dUFs2kDA==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the transaction matches the account the +// request is for, that the transaction is signed and a signature is returned. +// The operation source account does not need to be set. +func TestAccountSign_signingAddressAccountAuthenticatedTxSourceAccountValid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the transaction and operation are both +// set to values that match the account the request is for, that the +// transaction is signed and a signature is returned. +func TestAccountSign_signingAddressAccountAuthenticatedTxAndOpSourceAccountValid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + SourceAccount: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "MKAkl+R3VT5DJw6Qed8jO8ERD4RcQ4dJlN+UR2n7nT6AVBXnKBk0zqBZnDuB153zfTYmuA5kmsRiNr5terHVBg==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the transaction is not the account sign +// the request is calling sign on a bad request response is returned. +func TestAccountSign_signingAddressAccountAuthenticatedTxSourceAccountInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA47G3ZQBUR5NF2ZECG774O3QGKFMAW75XLXSCDICFDDV5GKGRFGFSOI"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + SourceAccount: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the operation is not the account sign +// the request is calling sign on a bad request response is returned. +func TestAccountSign_signingAddressAccountAuthenticatedOpSourceAccountInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + SourceAccount: "GA47G3ZQBUR5NF2ZECG774O3QGKFMAW75XLXSCDICFDDV5GKGRFGFSOI", + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the operation and transaction is not +// the account sign the request is calling sign on a bad request response is +// returned. +func TestAccountSign_signingAddressAccountAuthenticatedTxAndOpSourceAccountInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA47G3ZQBUR5NF2ZECG774O3QGKFMAW75XLXSCDICFDDV5GKGRFGFSOI"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + SourceAccount: "GA47G3ZQBUR5NF2ZECG774O3QGKFMAW75XLXSCDICFDDV5GKGRFGFSOI", + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when authenticated with a phone number signing is possible. +func TestAccountSign_signingAddressPhoneNumberOwnerAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when authenticated with a phone number signing is possible. +func TestAccountSign_signingAddressPhoneNumberOtherAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypePhoneNumber, Value: "+10000000000"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{PhoneNumber: "+10000000000"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when authenticated with a email signing is possible. +func TestAccountSign_signingAddressEmailOwnerAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the operation is listed in the allowed +// source accounts a successful response is returned. +func TestAccountSign_signingAddressEmailOwnerAuthenticatedOpSourceAccountIsAllowedSourceAccount(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + AllowedSourceAccounts: []*keypair.FromAddress{ + keypair.MustParseAddress("GDR3RJVOHYR5A4RSLZ7D3GOSTPBGD2FY7KJD7ZB7363ROOQHWYDVVULS"), + }, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + SourceAccount: "GDR3RJVOHYR5A4RSLZ7D3GOSTPBGD2FY7KJD7ZB7363ROOQHWYDVVULS", + }, + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + &txnbuild.EndSponsoringFutureReserves{}, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + req := fmt.Sprintf(`{"transaction": "%s"}`, txEnc) + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "Tpl/yZoKkahakaX4fSrdIeBLL2oi4uKegs5bLXFj5fG6Rcfe2D4EeSHcjJmmO2ZscuY8pX8+YPo70AvCtfw9Ag==", + "network_passphrase": "Test SDF Network ; September 2015" + }` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the source account of the operation is not the account sign +// the request is calling sign on a bad request response is returned. +func TestAccountSign_signingAddressEmailOwnerAuthenticatedOpSourceAccountInvalid(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "sender", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + AllowedSourceAccounts: nil, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + SourceAccount: "GDR3RJVOHYR5A4RSLZ7D3GOSTPBGD2FY7KJD7ZB7363ROOQHWYDVVULS", + }, + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + &txnbuild.EndSponsoringFutureReserves{}, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + req := fmt.Sprintf(`{"transaction": "%s"}`, txEnc) + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when authenticated with a email signing is possible. +func TestAccountSign_signingAddressEmailOtherAuthenticated(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + Identities: []account.Identity{ + { + Role: "receiver", + AuthMethods: []account.AuthMethod{ + {Type: account.AuthMethodTypeEmail, Value: "user1@example.com"}, + }, + }, + }, + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Email: "user1@example.com"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that when the transaction cannot be parsed it errors. +func TestAccountSign_signingAddressCannotParseTransaction(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + txEnc := "AAAAADx2k+7TdIuhwctzxD5y0/w5ASkvTD68az9Nh2fWY+ShAAAAZAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAPHaT7tN0i6HBy3PEPnLT/DkBKS9MPrxrP02HZ9Zj5KEAAAAAAJiWgAAAAAAAAAA" + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + txEnc + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that the sign endpoint responds with an error when given a fee bump transaction. +func TestAccountSign_signingAddressRejectsFeeBumpTx(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + + feeBumpTx, err := txnbuild.NewFeeBumpTransaction( + txnbuild.FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + BaseFee: 2 * txnbuild.MinBaseFee, + }, + ) + + feeBumpTxB64, err := feeBumpTx.Base64() + require.NoError(t, err) + t.Log("Tx:", feeBumpTxB64) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + req := `{ + "transaction": "` + feeBumpTxB64 + `" +}` + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{"error": "The request was invalid in some way."}` + assert.JSONEq(t, wantBody, string(body)) +} + +// Test that the request can be made as content-text form instead of JSON. +func TestAccountSign_signingAddressValidContentTypeForm(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4", + }) + h := accountSignHandler{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + SigningKeys: []*keypair.Full{ + keypair.MustParseFull("SBIB72S6JMTGJRC6LMKLC5XMHZ2IOHZSZH4SASTN47LECEEJ7QEB6EYK"), // GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H + keypair.MustParseFull("SBJGZKZ7LU2FQNEFBUOBW4LHCA5BOZCABIJTR7BQIFWQ3P763ZW7MYDD"), // GAPE22DOMALCH42VOR4S3HN6KIZZ643G7D3GNTYF4YOWWXP6UVRAF5JS + }, + NetworkPassphrase: network.TestNetworkPassphrase, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: "GD7CGJSJ5OBOU5KOP2UQDH3MPY75UTEY27HVV5XPSL2X6DJ2VGTOSXEU", + Weight: 20, + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimebounds(0, 1), + }, + ) + require.NoError(t, err) + txEnc, err := tx.Base64() + require.NoError(t, err) + t.Log("Tx:", txEnc) + + ctx := context.Background() + ctx = auth.NewContext(ctx, auth.Auth{Address: "GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4"}) + reqValues := url.Values{} + reqValues.Set("transaction", txEnc) + req := reqValues.Encode() + t.Log("Request Body:", req) + r := httptest.NewRequest("POST", "/GA6HNE7O2N2IXIOBZNZ4IPTS2P6DSAJJF5GD5PDLH5GYOZ6WMPSKCXD4/sign/GBOG4KF66M4AFRBUHOTJQJRO7BGGFCSGIICTI5BHXHKXCWV2C67QRN5H", strings.NewReader(req)) + r = r.WithContext(ctx) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + w := httptest.NewRecorder() + m := chi.NewMux() + m.Post("/{address}/sign/{signing-address}", h.ServeHTTP) + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "signature": "okp0ISR/hjU6ItsfXie6ArlQ3YWkBBqEAM5TJrthALdawV5DzcpuwBKi0QE/iBgoU7eY0hY3RPdxm8mXGNYfCQ==", + "network_passphrase": "Test SDF Network ; September 2015" +}` + assert.JSONEq(t, wantBody, string(body)) +} diff --git a/exp/services/recoverysigner/internal/serve/auth/auth.go b/exp/services/recoverysigner/internal/serve/auth/auth.go new file mode 100644 index 0000000000..e0d30f011a --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/auth/auth.go @@ -0,0 +1,33 @@ +package auth + +import ( + "context" +) + +type contextKey int + +const ( + authContextKey contextKey = iota +) + +// Auth holds a set of details that have been authenticated about a client. +type Auth struct { + Address string + PhoneNumber string + Email string +} + +// FromContext returns auth details that are stored in the context. +func FromContext(ctx context.Context) (Auth, bool) { + if a, ok := ctx.Value(authContextKey).(Auth); ok { + return a, true + } + return Auth{}, false +} + +// NewContext returns a new context that is a copy of the given context with +// the auth details set within. An Auth can be retrieved from the context using +// FromContext. +func NewContext(ctx context.Context, a Auth) context.Context { + return context.WithValue(ctx, authContextKey, a) +} diff --git a/exp/services/recoverysigner/internal/serve/auth/firebase.go b/exp/services/recoverysigner/internal/serve/auth/firebase.go new file mode 100644 index 0000000000..ed55e5a0dd --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/auth/firebase.go @@ -0,0 +1,86 @@ +package auth + +import ( + "context" + "net/http" + "strings" + + firebase "firebase.google.com/go" + firebaseauth "firebase.google.com/go/auth" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpauthz" + "github.com/stellar/go/support/log" + "google.golang.org/api/option" +) + +type FirebaseTokenVerifier interface { + Verify(r *http.Request) (*firebaseauth.Token, bool) +} + +type FirebaseTokenVerifierFunc func(r *http.Request) (*firebaseauth.Token, bool) + +func (v FirebaseTokenVerifierFunc) Verify(r *http.Request) (*firebaseauth.Token, bool) { + return v(r) +} + +func FirebaseMiddleware(v FirebaseTokenVerifier) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if token, ok := v.Verify(r); ok { + ctx := r.Context() + auth, _ := FromContext(ctx) + auth.PhoneNumber, _ = token.Claims["phone_number"].(string) + if emailVerified, _ := token.Claims["email_verified"].(bool); emailVerified { + auth.Email, _ = token.Claims["email"].(string) + } + + authTypes := []string{} + if auth.PhoneNumber != "" { + authTypes = append(authTypes, "phone_number") + } + if auth.Email != "" { + authTypes = append(authTypes, "email") + } + log.Ctx(ctx). + WithField("auth_types", strings.Join(authTypes, ", ")). + Info("Firebase JWT verified.") + + ctx = NewContext(ctx, auth) + r = r.WithContext(ctx) + } + next.ServeHTTP(w, r) + }) + } +} + +func NewFirebaseAuthClient(firebaseProjectID string) (*firebaseauth.Client, error) { + credentialsJSON := `{"type":"service_account","project_id":"` + firebaseProjectID + `"}` + firebaseCredentials := option.WithCredentialsJSON([]byte(credentialsJSON)) + firebaseApp, err := firebase.NewApp(context.Background(), nil, firebaseCredentials) + if err != nil { + return nil, errors.Wrap(err, "instantiating firebase app") + } + firebaseAuthClient, err := firebaseApp.Auth(context.Background()) + if err != nil { + return nil, errors.Wrap(err, "instantiating firebase auth client") + } + return firebaseAuthClient, nil +} + +type FirebaseTokenVerifierLive struct { + AuthClient *firebaseauth.Client +} + +func (v FirebaseTokenVerifierLive) Verify(r *http.Request) (*firebaseauth.Token, bool) { + ctx := r.Context() + authHeader := r.Header.Get("Authorization") + tokenEncoded := httpauthz.ParseBearerToken(authHeader) + if tokenEncoded == "" { + return nil, false + } + token, err := v.AuthClient.VerifyIDToken(ctx, tokenEncoded) + if err != nil { + return nil, false + } + return token, true +} diff --git a/exp/services/recoverysigner/internal/serve/auth/firebase_test.go b/exp/services/recoverysigner/internal/serve/auth/firebase_test.go new file mode 100644 index 0000000000..95ca9de780 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/auth/firebase_test.go @@ -0,0 +1,243 @@ +package auth + +import ( + "net/http" + "net/http/httptest" + "testing" + + firebaseauth "firebase.google.com/go/auth" + "github.com/stretchr/testify/assert" +) + +// Test that if the token verifier says there is a Firebase token that contains +// a phone number claim, the claims stored in the context should contain it. +func TestFirebase_tokenWithPhoneNumber(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "phone_number": "+10000000000", + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{ + PhoneNumber: "+10000000000", + } + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is a Firebase token that contains +// an email and email_verified=true claim, the claims stored in the context +// should contain it. +func TestFirebase_tokenWithEmailVerifiedTrue(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "email": "user@example.com", + "email_verified": true, + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{ + Email: "user@example.com", + } + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is a Firebase token that contains +// an email claim and email_verified=false, the claims stored in the context +// should not contain the email. +func TestFirebase_tokenWithEmailVerifiedFalse(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "email": "user@example.com", + "email_verified": false, + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is a Firebase token that contains +// an email claim without email_verified, the claims stored in the context +// should not contain the email. +func TestFirebase_tokenWithEmail(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "email": "user@example.com", + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is a Firebase token that +// contains a phone number and an email claim, the claims stored in the +// context should contain both. +func TestFirebase_tokenWithPhoneNumberAndEmail(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "phone_number": "+10000000000", + "email": "user@example.com", + "email_verified": true, + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{ + PhoneNumber: "+10000000000", + Email: "user@example.com", + } + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is a Firebase token that contains +// a phone number or an email claim, and there are other claims fields filled +// in, the claims stored in the context should contain the merging of both. +func TestFirebase_tokenWithPhoneNumberAndEmailAppendsToOtherClaims(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + token := &firebaseauth.Token{ + Claims: map[string]interface{}{ + "phone_number": "+10000000000", + "email": "user@example.com", + "email_verified": true, + }, + } + return token, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + initialClaims := Auth{ + Address: "GCJYKECSRMIQX3KK62VPJ64NFNWV3EKJPUAXSXKKA7XSHN43VDHNKANO", + } + r = r.WithContext(NewContext(r.Context(), initialClaims)) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{ + Address: "GCJYKECSRMIQX3KK62VPJ64NFNWV3EKJPUAXSXKKA7XSHN43VDHNKANO", + PhoneNumber: "+10000000000", + Email: "user@example.com", + } + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is an empty Firebase token that +// does not have a phone number or email claim, the claims stored in the +// context should be empty. +func TestFirebase_tokenWithNone(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + return &firebaseauth.Token{}, true + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) + assert.Equal(t, true, claimsOK) +} + +// Test that if the token verifier says there is no Firebase token, the claims +// stored in the context should be empty. +func TestFirebase_noToken(t *testing.T) { + tokenVerifier := FirebaseTokenVerifierFunc(func(_ *http.Request) (*firebaseauth.Token, bool) { + return nil, false + }) + + claims := Auth{} + claimsOK := false + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + claims, claimsOK = FromContext(r.Context()) + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + FirebaseMiddleware(tokenVerifier)(next).ServeHTTP(w, r) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) + assert.Equal(t, false, claimsOK) +} diff --git a/exp/services/recoverysigner/internal/serve/auth/sep10.go b/exp/services/recoverysigner/internal/serve/auth/sep10.go new file mode 100644 index 0000000000..d7c6e94354 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/auth/sep10.go @@ -0,0 +1,89 @@ +package auth + +import ( + "errors" + "net/http" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/http/httpauthz" + "github.com/stellar/go/support/log" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" +) + +// SEP10Middleware provides middleware for handling an authentication SEP-10 JWT. +func SEP10Middleware(issuer string, ks jose.JSONWebKeySet) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if address, k, ok := sep10ClaimsFromRequest(r, issuer, ks); ok { + ctx := r.Context() + auth, _ := FromContext(ctx) + auth.Address = address + + log.Ctx(ctx). + WithField("jwkkid", k.KeyID). + WithField("address", address). + Info("SEP-10 JWT verified.") + + ctx = NewContext(ctx, auth) + r = r.WithContext(ctx) + } + next.ServeHTTP(w, r) + }) + } +} + +type sep10JWTClaims struct { + jwt.Claims +} + +func (c sep10JWTClaims) Validate(issuer string) error { + if c.Claims.IssuedAt == nil { + return errors.New("validation failed, no issued at (iat) in token") + } + if c.Claims.Expiry == nil { + return errors.New("validation failed, no expiry (exp) in token") + } + expectedClaims := jwt.Expected{ + Issuer: issuer, + Time: time.Now(), + } + return c.Claims.Validate(expectedClaims) +} + +func sep10ClaimsFromRequest(r *http.Request, issuer string, ks jose.JSONWebKeySet) (address string, k jose.JSONWebKey, ok bool) { + authHeader := r.Header.Get("Authorization") + tokenEncoded := httpauthz.ParseBearerToken(authHeader) + if tokenEncoded == "" { + return "", jose.JSONWebKey{}, false + } + token, err := jwt.ParseSigned(tokenEncoded) + if err != nil { + return "", jose.JSONWebKey{}, false + } + tokenClaims := sep10JWTClaims{} + verified := false + verifiedWithKey := jose.JSONWebKey{} + for _, k := range ks.Keys { + err = token.Claims(k, &tokenClaims) + if err == nil { + verified = true + verifiedWithKey = k + break + } + } + if !verified { + return "", jose.JSONWebKey{}, false + } + err = tokenClaims.Validate(issuer) + if err != nil { + return "", jose.JSONWebKey{}, false + } + address = tokenClaims.Subject + _, err = keypair.ParseAddress(address) + if err != nil { + return "", jose.JSONWebKey{}, false + } + return address, verifiedWithKey, true +} diff --git a/exp/services/recoverysigner/internal/serve/auth/sep10_test.go b/exp/services/recoverysigner/internal/serve/auth/sep10_test.go new file mode 100644 index 0000000000..4238d24650 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/auth/sep10_test.go @@ -0,0 +1,616 @@ +package auth + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/golang-jwt/jwt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/square/go-jose.v2" +) + +func TestSEP10_addsAddressToClaimIfJWTValid(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, true, ok) + + wantClaims := Auth{ + Address: "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + } + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_addsAddressToClaimIfJWTValidMultipleJWKS(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + k2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + keys := []*ecdsa.PrivateKey{k1, k2} + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + {Key: &k2.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + for i, k := range keys { + t.Run(fmt.Sprintf("known key %d", i), func(t *testing.T) { + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, true, ok) + + wantClaims := Auth{ + Address: "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + } + assert.Equal(t, wantClaims, claims) + }) + } + t.Run("unknown key", func(t *testing.T) { + k3, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k3) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) + }) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTNotPresent(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTNoSignature(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SigningString() + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTWrongAlg(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodNone, jwtClaims).SignedString(jwt.UnsafeAllowNoneSignatureType) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTInvalidSignature(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + k2, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k2) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTExpired(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": 1, + "exp": 1, + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTMissingIAT(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTMissingEXP(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTMissingSUB(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTHasSUBNotContainingGStrkey(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "https://webauth.example.com", + "sub": "SBAZWVXOQ5LWT5PJSVOA62PVIYZIV3T3HQ3GFC2RUZ6K43QFNF5BLLDE", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTMissingISSButRequired(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_doesNotAddAddressToClaimIfJWTHasISSButUnexpectedValue(t *testing.T) { + issuer := "https://webauth.example.com" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "otherissuer", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, false, ok) + + wantClaims := Auth{} + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_addAddressToClaimIfJWTMissingISSButNotRequired(t *testing.T) { + issuer := "" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, true, ok) + + wantClaims := Auth{ + Address: "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + } + assert.Equal(t, wantClaims, claims) +} + +func TestSEP10_addAddressToClaimIfJWTHasISSButNotRequired(t *testing.T) { + issuer := "" + + k1, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(t, err) + jwks := jose.JSONWebKeySet{ + Keys: []jose.JSONWebKey{ + {Key: &k1.PublicKey}, + }, + } + + ctx := context.Context(nil) + next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx = r.Context() + }) + middleware := SEP10Middleware(issuer, jwks) + handler := middleware(next) + + r := httptest.NewRequest("GET", "/", nil) + jwtClaims := jwt.MapClaims{ + "iss": "otherservice", + "sub": "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + "iat": time.Now().Unix(), + "exp": time.Now().Add(time.Hour).Unix(), + } + jwtToken, err := jwt.NewWithClaims(jwt.SigningMethodES256, jwtClaims).SignedString(k1) + require.NoError(t, err) + r.Header.Set("Authorization", "Bearer "+jwtToken) + handler.ServeHTTP(nil, r) + + assert.NotNil(t, ctx) + claims, ok := FromContext(ctx) + assert.Equal(t, true, ok) + + wantClaims := Auth{ + Address: "GDKABHI4LTLG7UCE6O7Y4D6REHJVS4DLXTVVXTE3BPRRLXPASHSOKG2D", + } + assert.Equal(t, wantClaims, claims) +} diff --git a/exp/services/recoverysigner/internal/serve/errors.go b/exp/services/recoverysigner/internal/serve/errors.go new file mode 100644 index 0000000000..b5fc370910 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/errors.go @@ -0,0 +1,49 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +var serverError = errorResponse{ + Status: http.StatusInternalServerError, + Error: "An error occurred while processing this request.", +} +var notFound = errorResponse{ + Status: http.StatusNotFound, + Error: "The resource at the url requested was not found.", +} +var methodNotAllowed = errorResponse{ + Status: http.StatusMethodNotAllowed, + Error: "The method is not allowed for resource at the url requested.", +} +var badRequest = errorResponse{ + Status: http.StatusBadRequest, + Error: "The request was invalid in some way.", +} +var conflict = errorResponse{ + Status: http.StatusConflict, + Error: "The request could not be completed because the resource already exists.", +} +var unauthorized = errorResponse{ + Status: http.StatusUnauthorized, + Error: "The request could not be authenticated.", +} + +type errorResponse struct { + Status int `json:"-"` + Error string `json:"error"` +} + +func (e errorResponse) Render(w http.ResponseWriter) { + httpjson.RenderStatus(w, e.Status, e, httpjson.JSON) +} + +type errorHandler struct { + Error errorResponse +} + +func (h errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Error.Render(w) +} diff --git a/exp/services/recoverysigner/internal/serve/errors_test.go b/exp/services/recoverysigner/internal/serve/errors_test.go new file mode 100644 index 0000000000..d66c30b6c9 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/errors_test.go @@ -0,0 +1,33 @@ +package serve + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorResponseRender(t *testing.T) { + w := httptest.NewRecorder() + serverError.Render(w) + resp := w.Result() + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"An error occurred while processing this request."}`, string(body)) +} + +func TestErrorHandler(t *testing.T) { + r := httptest.NewRequest("GET", "/404", nil) + w := httptest.NewRecorder() + handler := errorHandler{Error: notFound} + handler.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"The resource at the url requested was not found."}`, string(body)) +} diff --git a/exp/services/recoverysigner/internal/serve/metrics_accounts_count.go b/exp/services/recoverysigner/internal/serve/metrics_accounts_count.go new file mode 100644 index 0000000000..cfdd52a7a7 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/metrics_accounts_count.go @@ -0,0 +1,29 @@ +package serve + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + supportlog "github.com/stellar/go/support/log" +) + +type metricAccountsCount struct { + Logger *supportlog.Entry + AccountStore account.Store +} + +func (m metricAccountsCount) NewCollector() prometheus.Collector { + opts := prometheus.GaugeOpts{ + Name: "accounts_count", + Help: "Number of active accounts.", + } + return prometheus.NewGaugeFunc(opts, m.gauge) +} + +func (m metricAccountsCount) gauge() float64 { + count, err := m.AccountStore.Count() + if err != nil { + m.Logger.Warnf("Error getting count from account store: %v", err) + return 0 + } + return float64(count) +} diff --git a/exp/services/recoverysigner/internal/serve/serve.go b/exp/services/recoverysigner/internal/serve/serve.go new file mode 100644 index 0000000000..88a4200377 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/serve.go @@ -0,0 +1,221 @@ +package serve + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + + firebaseauth "firebase.google.com/go/auth" + "github.com/go-chi/chi" + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db" + "github.com/stellar/go/exp/services/recoverysigner/internal/serve/auth" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + supporthttp "github.com/stellar/go/support/http" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/health" + "gopkg.in/square/go-jose.v2" +) + +type Options struct { + Logger *supportlog.Entry + DatabaseURL string + DatabaseMaxOpenConns int + Port int + NetworkPassphrase string + SigningKeys string + SEP10JWKS string + SEP10JWTIssuer string + FirebaseProjectID string + + AdminPort int + MetricsNamespace string + + AllowedSourceAccounts string +} + +func Serve(opts Options) { + deps, err := getHandlerDeps(opts) + if err != nil { + opts.Logger.Fatalf("Error: %v", err) + return + } + + if opts.AdminPort != 0 { + adminDeps := adminDeps{ + Logger: opts.Logger, + MetricsGatherer: deps.MetricsRegistry, + } + go serveAdmin(opts, adminDeps) + } + + handler := handler(deps) + + addr := fmt.Sprintf(":%d", opts.Port) + supporthttp.Run(supporthttp.Config{ + ListenAddr: addr, + Handler: handler, + OnStarting: func() { + deps.Logger.Infof("Starting SEP-30 Recovery Signer server on %s", addr) + }, + }) +} + +type handlerDeps struct { + Logger *supportlog.Entry + NetworkPassphrase string + SigningKeys []*keypair.Full + SigningAddresses []*keypair.FromAddress + AccountStore account.Store + SEP10JWKS jose.JSONWebKeySet + SEP10JWTIssuer string + FirebaseAuthClient *firebaseauth.Client + MetricsRegistry *prometheus.Registry + AllowedSourceAccounts []*keypair.FromAddress +} + +func getHandlerDeps(opts Options) (handlerDeps, error) { + // TODO: Replace this signing key with randomly generating a unique signing + // key for each account so that it is not possible to identify which + // accounts are recoverable via a recovery signer. + signingKeys := []*keypair.Full{} + signingAddresses := []*keypair.FromAddress{} + for i, signingKeyStr := range strings.Split(opts.SigningKeys, ",") { + signingKey, err := keypair.ParseFull(signingKeyStr) + if err != nil { + return handlerDeps{}, errors.Wrap(err, "parsing signing key seed") + } + signingKeys = append(signingKeys, signingKey) + signingAddresses = append(signingAddresses, signingKey.FromAddress()) + opts.Logger.Info("Signing key ", i, ": ", signingKey.Address()) + } + + sep10JWKS := jose.JSONWebKeySet{} + err := json.Unmarshal([]byte(opts.SEP10JWKS), &sep10JWKS) + if err != nil { + return handlerDeps{}, errors.Wrap(err, "parsing SEP-10 JSON Web Key (JWK) Set") + } + if len(sep10JWKS.Keys) == 0 { + return handlerDeps{}, errors.New("no keys included in SEP-10 JSON Web Key (JWK) Set") + } + opts.Logger.Infof("SEP10 JWKS contains %d keys", len(sep10JWKS.Keys)) + + db, err := db.Open(opts.DatabaseURL) + if err != nil { + return handlerDeps{}, errors.Wrap(err, "error parsing database url") + } + db.SetMaxOpenConns(opts.DatabaseMaxOpenConns) + + err = db.Ping() + if err != nil { + opts.Logger.Warn("Error pinging to Database: ", err) + } + accountStore := &account.DBStore{DB: db} + + firebaseAuthClient, err := auth.NewFirebaseAuthClient(opts.FirebaseProjectID) + if err != nil { + return handlerDeps{}, errors.Wrap(err, "error setting up firebase auth client") + } + + metricsRegistry := prometheus.NewRegistry() + + err = metricsRegistry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) + if err != nil { + opts.Logger.Warn("Error registering metric for process: ", err) + } + err = metricsRegistry.Register(prometheus.NewGoCollector()) + if err != nil { + opts.Logger.Warn("Error registering metric for Go: ", err) + } + + metricsRegistryNamespaced := prometheus.Registerer(metricsRegistry) + if opts.MetricsNamespace != "" { + metricsRegistryNamespaced = prometheus.WrapRegistererWithPrefix(opts.MetricsNamespace+"_", metricsRegistry) + } + + err = metricsRegistryNamespaced.Register(metricAccountsCount{ + Logger: opts.Logger, + AccountStore: accountStore, + }.NewCollector()) + if err != nil { + opts.Logger.Warn("Error registering metric for accounts count: ", err) + } + + allowedSourceAccounts := []*keypair.FromAddress{} + for _, addressStr := range strings.Split(opts.AllowedSourceAccounts, ",") { + accountAddress, err := keypair.ParseAddress(addressStr) + if err != nil { + return handlerDeps{}, errors.Wrap(err, "parsing allowed source accounts") + } + allowedSourceAccounts = append(allowedSourceAccounts, accountAddress) + } + + deps := handlerDeps{ + Logger: opts.Logger, + NetworkPassphrase: opts.NetworkPassphrase, + SigningKeys: signingKeys, + SigningAddresses: signingAddresses, + AccountStore: accountStore, + SEP10JWKS: sep10JWKS, + SEP10JWTIssuer: opts.SEP10JWTIssuer, + FirebaseAuthClient: firebaseAuthClient, + MetricsRegistry: metricsRegistry, + AllowedSourceAccounts: allowedSourceAccounts, + } + + return deps, nil +} + +func handler(deps handlerDeps) http.Handler { + mux := supporthttp.NewAPIMux(deps.Logger) + + mux.NotFound(errorHandler{Error: notFound}.ServeHTTP) + mux.MethodNotAllowed(errorHandler{Error: methodNotAllowed}.ServeHTTP) + + mux.Get("/health", health.PassHandler{}.ServeHTTP) + mux.Route("/accounts", func(mux chi.Router) { + mux.Use(auth.SEP10Middleware(deps.SEP10JWTIssuer, deps.SEP10JWKS)) + mux.Use(auth.FirebaseMiddleware(auth.FirebaseTokenVerifierLive{AuthClient: deps.FirebaseAuthClient})) + mux.Get("/", accountListHandler{ + Logger: deps.Logger, + SigningAddresses: deps.SigningAddresses, + AccountStore: deps.AccountStore, + }.ServeHTTP) + mux.Route("/{address}", func(mux chi.Router) { + mux.Post("/", accountPostHandler{ + Logger: deps.Logger, + SigningAddresses: deps.SigningAddresses, + AccountStore: deps.AccountStore, + }.ServeHTTP) + mux.Put("/", accountPutHandler{ + Logger: deps.Logger, + SigningAddresses: deps.SigningAddresses, + AccountStore: deps.AccountStore, + }.ServeHTTP) + mux.Get("/", accountGetHandler{ + Logger: deps.Logger, + SigningAddresses: deps.SigningAddresses, + AccountStore: deps.AccountStore, + }.ServeHTTP) + mux.Delete("/", accountDeleteHandler{ + Logger: deps.Logger, + SigningAddresses: deps.SigningAddresses, + AccountStore: deps.AccountStore, + }.ServeHTTP) + signHandler := accountSignHandler{ + Logger: deps.Logger, + SigningKeys: deps.SigningKeys, + NetworkPassphrase: deps.NetworkPassphrase, + AccountStore: deps.AccountStore, + AllowedSourceAccounts: deps.AllowedSourceAccounts, + } + mux.Post("/sign", signHandler.ServeHTTP) + mux.Post("/sign/{signing-address}", signHandler.ServeHTTP) + }) + }) + + return mux +} diff --git a/exp/services/recoverysigner/internal/serve/serve_admin.go b/exp/services/recoverysigner/internal/serve/serve_admin.go new file mode 100644 index 0000000000..e2b59dab0e --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/serve_admin.go @@ -0,0 +1,35 @@ +package serve + +import ( + "fmt" + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + supporthttp "github.com/stellar/go/support/http" + supportlog "github.com/stellar/go/support/log" +) + +func serveAdmin(opts Options, deps adminDeps) { + adminHandler := adminHandler(deps) + + addr := fmt.Sprintf(":%d", opts.AdminPort) + supporthttp.Run(supporthttp.Config{ + ListenAddr: addr, + Handler: adminHandler, + OnStarting: func() { + deps.Logger.Infof("Starting admin port server on %s", addr) + }, + }) +} + +type adminDeps struct { + Logger *supportlog.Entry + MetricsGatherer prometheus.Gatherer +} + +func adminHandler(deps adminDeps) http.Handler { + mux := supporthttp.NewMux(deps.Logger) + mux.Handle("/metrics", promhttp.HandlerFor(deps.MetricsGatherer, promhttp.HandlerOpts{})) + return mux +} diff --git a/exp/services/recoverysigner/internal/serve/serve_admin_test.go b/exp/services/recoverysigner/internal/serve/serve_admin_test.go new file mode 100644 index 0000000000..c740e707f0 --- /dev/null +++ b/exp/services/recoverysigner/internal/serve/serve_admin_test.go @@ -0,0 +1,121 @@ +package serve + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/exp/services/recoverysigner/internal/account" + "github.com/stellar/go/exp/services/recoverysigner/internal/db/dbtest" + supportlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAdminHandler_metricsAccountsCountNone(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + + mr := prometheus.NewRegistry() + mr.MustRegister(metricAccountsCount{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + }.NewCollector()) + + deps := adminDeps{ + Logger: supportlog.DefaultLogger, + MetricsGatherer: mr, + } + h := adminHandler(deps) + + r := httptest.NewRequest("GET", "/metrics", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `accounts_count 0` + assert.Contains(t, string(body), wantBody) +} + +func TestAdminHandler_metricsAccountsCountSome(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{{Role: "owner", AuthMethods: []account.AuthMethod{{Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}}}}, + }) + s.Add(account.Account{ + Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM", + Identities: []account.Identity{{Role: "owner", AuthMethods: []account.AuthMethod{{Type: account.AuthMethodTypeAddress, Value: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}}}}, + }) + + mr := prometheus.NewRegistry() + mr.MustRegister(metricAccountsCount{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + }.NewCollector()) + + deps := adminDeps{ + Logger: supportlog.DefaultLogger, + MetricsGatherer: mr, + } + + h := adminHandler(deps) + + r := httptest.NewRequest("GET", "/metrics", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `accounts_count 2` + assert.Contains(t, string(body), wantBody) +} + +func TestAdminHandler_metricsAccountsCountSomeDeleted(t *testing.T) { + s := &account.DBStore{DB: dbtest.Open(t).Open()} + s.Add(account.Account{ + Address: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N", + Identities: []account.Identity{{Role: "owner", AuthMethods: []account.AuthMethod{{Type: account.AuthMethodTypeAddress, Value: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM"}}}}, + }) + s.Add(account.Account{ + Address: "GCGZ3CNBE47IWAA5YIKDZL2XYYLA2UKJPS55P5EJ4VOMLK523PF3G7EM", + Identities: []account.Identity{{Role: "owner", AuthMethods: []account.AuthMethod{{Type: account.AuthMethodTypeAddress, Value: "GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N"}}}}, + }) + s.Delete("GDIXCQJ2W2N6TAS6AYW4LW2EBV7XNRUCLNHQB37FARDEWBQXRWP47Q6N") + + mr := prometheus.NewRegistry() + mr.MustRegister(metricAccountsCount{ + Logger: supportlog.DefaultLogger, + AccountStore: s, + }.NewCollector()) + + deps := adminDeps{ + Logger: supportlog.DefaultLogger, + MetricsGatherer: mr, + } + + h := adminHandler(deps) + + r := httptest.NewRequest("GET", "/metrics", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `accounts_count 1` + assert.Contains(t, string(body), wantBody) +} diff --git a/exp/services/recoverysigner/main.go b/exp/services/recoverysigner/main.go new file mode 100644 index 0000000000..d8c7bb157d --- /dev/null +++ b/exp/services/recoverysigner/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stellar/go/exp/services/recoverysigner/cmd" + supportlog "github.com/stellar/go/support/log" +) + +func main() { + logger := supportlog.New() + logger.SetLevel(logrus.TraceLevel) + + rootCmd := &cobra.Command{ + Use: "recoverysigner [command]", + Short: "SEP-30 Recovery Signer server", + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + + rootCmd.AddCommand((&cmd.ServeCommand{Logger: logger}).Command()) + rootCmd.AddCommand((&cmd.DBCommand{Logger: logger}).Command()) + + err := rootCmd.Execute() + if err != nil { + logger.Fatal(err) + } +} diff --git a/exp/services/webauth/Makefile b/exp/services/webauth/Makefile new file mode 100644 index 0000000000..f29f06d53c --- /dev/null +++ b/exp/services/webauth/Makefile @@ -0,0 +1,16 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/webauth:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f exp/services/webauth/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../../ && \ + $(SUDO) docker push $(TAG) diff --git a/exp/services/webauth/README.md b/exp/services/webauth/README.md new file mode 100644 index 0000000000..da0a48450e --- /dev/null +++ b/exp/services/webauth/README.md @@ -0,0 +1,57 @@ +# webauth + +This is a [SEP-10] Web Authentication implementation based on SEP-10 v3.2.1 +that requires a user to prove they possess a signing key(s) that meets the high +threshold for an account, i.e. they have the ability to perform any high +threshold operation on the given account. If an account does not exist it may +be optionally verified using the account's master key. + +SEP-10 defines an endpoint for authenticating a user in possession of a Stellar +account using their Stellar account as credentials. This implementation is a +standalone microservice that implements the minimum requirements as defined by +the SEP-10 protocol and will be adapted as the protocol evolves. + +This implementation is not polished and is still experimental. +Running this implementation in production is not recommended. + +## Usage + +``` +$ webauth --help +SEP-10 Web Authentication Server + +Usage: + webauth [command] [flags] + webauth [command] + +Available Commands: + genjwk Generate a JSON Web Key (ECDSA/ES256) for JWT issuing + serve Run the SEP-10 Web Authentication server + +Use "webauth [command] --help" for more information about a command. +``` + +## Usage: Serve + +``` +$ webauth serve --help +Run the SEP-10 Web Authentication server + +Usage: + webauth serve [flags] + +Flags: + --allow-accounts-that-do-not-exist Allow accounts that do not exist (ALLOW_ACCOUNTS_THAT_DO_NOT_EXIST) + --auth-home-domain string Home domain(s) of the service(s) requiring SEP-10 authentication comma separated (first domain is the default domain) (AUTH_HOME_DOMAIN) + --challenge-expires-in int The time period in seconds after which the challenge transaction expires (CHALLENGE_EXPIRES_IN) (default 300) + --domain string Domain that this service is hosted at (DOMAIN) + --horizon-url string Horizon URL used for looking up account details (HORIZON_URL) (default "https://horizon-testnet.stellar.org/") + --jwk string JSON Web Key (JWK) used for signing JWTs (if the key is an asymmetric key that has separate public and private key, the JWK must contain the private key) (JWK) + --jwt-expires-in int The time period in seconds after which the JWT expires (JWT_EXPIRES_IN) (default 300) + --jwt-issuer string The issuer to set in the JWT iss claim (JWT_ISSUER) + --network-passphrase string Network passphrase of the Stellar network transactions should be signed for (NETWORK_PASSPHRASE) (default "Test SDF Network ; September 2015") + --port int Port to listen and serve on (PORT) (default 8000) + --signing-key string Stellar signing key(s) used for signing transactions comma separated (first key is used for signing, others used for verifying challenges) (SIGNING_KEY) +``` + +[SEP-10]: https://github.com/stellar/stellar-protocol/blob/28c636b4ef5074ca0c3d46bbe9bf0f3f38095233/ecosystem/sep-0010.md diff --git a/exp/services/webauth/cmd/genjwk.go b/exp/services/webauth/cmd/genjwk.go new file mode 100644 index 0000000000..aa0541d4df --- /dev/null +++ b/exp/services/webauth/cmd/genjwk.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "encoding/json" + + "github.com/spf13/cobra" + "github.com/stellar/go/exp/support/jwtkey" + supportlog "github.com/stellar/go/support/log" + "gopkg.in/square/go-jose.v2" +) + +type GenJWKCommand struct { + Logger *supportlog.Entry +} + +func (c *GenJWKCommand) Command() *cobra.Command { + cmd := &cobra.Command{ + Use: "genjwk", + Short: "Generate a JSON Web Key (ECDSA/ES256) for JWT issuing", + Run: func(_ *cobra.Command, _ []string) { + c.Run() + }, + } + return cmd +} + +func (c *GenJWKCommand) Run() { + k, err := jwtkey.GenerateKey() + if err != nil { + c.Logger.Fatal(err) + } + + alg := jose.ES256 + + { + jwk := jose.JSONWebKey{Key: &k.PublicKey, Algorithm: string(alg)} + bytes, err := json.Marshal(jwk) + if err == nil { + c.Logger.Print("Public:", string(bytes)) + } else { + c.Logger.Print("Public:", err) + } + } + + { + jwk := jose.JSONWebKey{Key: k, Algorithm: string(alg)} + bytes, err := json.Marshal(jwk) + if err == nil { + c.Logger.Print("Private:", string(bytes)) + } else { + c.Logger.Print("Private:", err) + } + } +} diff --git a/exp/services/webauth/cmd/serve.go b/exp/services/webauth/cmd/serve.go new file mode 100644 index 0000000000..204edd9c45 --- /dev/null +++ b/exp/services/webauth/cmd/serve.go @@ -0,0 +1,123 @@ +package cmd + +import ( + "go/types" + + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/exp/services/webauth/internal/serve" + "github.com/stellar/go/network" + "github.com/stellar/go/support/config" + supportlog "github.com/stellar/go/support/log" +) + +type ServeCommand struct { + Logger *supportlog.Entry +} + +func (c *ServeCommand) Command() *cobra.Command { + opts := serve.Options{ + Logger: c.Logger, + } + configOpts := config.ConfigOptions{ + { + Name: "port", + Usage: "Port to listen and serve on", + OptType: types.Int, + ConfigKey: &opts.Port, + FlagDefault: 8000, + Required: true, + }, + { + Name: "horizon-url", + Usage: "Horizon URL used for looking up account details", + OptType: types.String, + ConfigKey: &opts.HorizonURL, + FlagDefault: horizonclient.DefaultTestNetClient.HorizonURL, + Required: true, + }, + { + Name: "network-passphrase", + Usage: "Network passphrase of the Stellar network transactions should be signed for", + OptType: types.String, + ConfigKey: &opts.NetworkPassphrase, + FlagDefault: network.TestNetworkPassphrase, + Required: true, + }, + { + Name: "signing-key", + Usage: "Stellar signing key(s) used for signing transactions comma separated (first key is used for signing, others used for verifying challenges)", + OptType: types.String, + ConfigKey: &opts.SigningKeys, + Required: true, + }, + { + Name: "domain", + Usage: "Domain that this this service is hosted at", + OptType: types.String, + ConfigKey: &opts.Domain, + Required: true, + }, + { + Name: "auth-home-domain", + Usage: "Home domain(s) of the service(s) requiring SEP-10 authentication comma separated (first domain is the default domain)", + OptType: types.String, + ConfigKey: &opts.AuthHomeDomains, + Required: true, + }, + { + Name: "challenge-expires-in", + Usage: "The time period in seconds after which the challenge transaction expires", + OptType: types.Int, + CustomSetValue: config.SetDuration, + ConfigKey: &opts.ChallengeExpiresIn, + FlagDefault: 300, + Required: true, + }, + { + Name: "jwk", + Usage: "JSON Web Key (JWK) used for signing JWTs (if the key is an asymmetric key that has separate public and private key, the JWK must contain the private key)", + OptType: types.String, + ConfigKey: &opts.JWK, + Required: true, + }, + { + Name: "jwt-issuer", + Usage: "The issuer to set in the JWT iss claim", + OptType: types.String, + ConfigKey: &opts.JWTIssuer, + Required: true, + }, + { + Name: "jwt-expires-in", + Usage: "The time period in seconds after which the JWT expires", + OptType: types.Int, + CustomSetValue: config.SetDuration, + ConfigKey: &opts.JWTExpiresIn, + FlagDefault: 300, + Required: true, + }, + { + Name: "allow-accounts-that-do-not-exist", + Usage: "Allow accounts that do not exist", + OptType: types.Bool, + ConfigKey: &opts.AllowAccountsThatDoNotExist, + FlagDefault: false, + }, + } + cmd := &cobra.Command{ + Use: "serve", + Short: "Run the SEP-10 Web Authentication server", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + c.Run(opts) + }, + } + configOpts.Init(cmd) + return cmd +} + +func (c *ServeCommand) Run(opts serve.Options) { + serve.Serve(opts) +} diff --git a/exp/services/webauth/docker/Dockerfile b/exp/services/webauth/docker/Dockerfile new file mode 100644 index 0000000000..f7c497b582 --- /dev/null +++ b/exp/services/webauth/docker/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.17 as build + +ADD . /src/webauth +WORKDIR /src/webauth +RUN go build -o /bin/webauth ./exp/services/webauth + + +FROM ubuntu:20.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates +COPY --from=build /bin/webauth /app/ +EXPOSE 8000 +ENTRYPOINT ["/app/webauth"] +CMD ["serve"] diff --git a/exp/services/webauth/internal/serve/challenge.go b/exp/services/webauth/internal/serve/challenge.go new file mode 100644 index 0000000000..a01458215d --- /dev/null +++ b/exp/services/webauth/internal/serve/challenge.go @@ -0,0 +1,101 @@ +package serve + +import ( + "net/http" + "strings" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/strkey" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/txnbuild" +) + +// ChallengeHandler implements the SEP-10 challenge endpoint and handles +// requests for a new challenge transaction. +type challengeHandler struct { + Logger *supportlog.Entry + NetworkPassphrase string + SigningKey *keypair.Full + ChallengeExpiresIn time.Duration + Domain string + HomeDomains []string +} + +type challengeResponse struct { + Transaction string `json:"transaction"` + NetworkPassphrase string `json:"network_passphrase"` +} + +func (h challengeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + queryValues := r.URL.Query() + + account := queryValues.Get("account") + if !strkey.IsValidEd25519PublicKey(account) { + badRequest.Render(w) + return + } + + homeDomain := queryValues.Get("home_domain") + if homeDomain != "" { + // In some cases the full stop (period) character is used at the end of a FQDN. + homeDomain = strings.TrimSuffix(homeDomain, ".") + matched := false + for _, supportedDomain := range h.HomeDomains { + if homeDomain == supportedDomain { + matched = true + break + } + } + if !matched { + badRequest.Render(w) + return + } + } else { + homeDomain = h.HomeDomains[0] + } + + tx, err := txnbuild.BuildChallengeTx( + h.SigningKey.Seed(), + account, + h.Domain, + homeDomain, + h.NetworkPassphrase, + h.ChallengeExpiresIn, + ) + if err != nil { + h.Logger.Ctx(ctx).WithStack(err).Error(err) + serverError.Render(w) + return + } + + hash, err := tx.HashHex(h.NetworkPassphrase) + if err != nil { + h.Logger.Ctx(ctx).WithStack(err).Error(err) + serverError.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("tx", hash). + WithField("account", account). + WithField("serversigner", h.SigningKey.Address()). + WithField("homedomain", homeDomain) + + l.Info("Generated challenge transaction for account.") + + txeBase64, err := tx.Base64() + if err != nil { + h.Logger.Ctx(ctx).WithStack(err).Error(err) + serverError.Render(w) + return + } + + res := challengeResponse{ + Transaction: txeBase64, + NetworkPassphrase: h.NetworkPassphrase, + } + httpjson.Render(w, res, httpjson.JSON) +} diff --git a/exp/services/webauth/internal/serve/challenge_test.go b/exp/services/webauth/internal/serve/challenge_test.go new file mode 100644 index 0000000000..9391224763 --- /dev/null +++ b/exp/services/webauth/internal/serve/challenge_test.go @@ -0,0 +1,187 @@ +package serve + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestChallenge(t *testing.T) { + serverKey := keypair.MustRandom() + account := keypair.MustRandom() + + h := challengeHandler{ + Logger: supportlog.DefaultLogger, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningKey: serverKey, + ChallengeExpiresIn: time.Minute, + Domain: "webauthdomain", + HomeDomains: []string{"testdomain"}, + } + + r := httptest.NewRequest("GET", "/?account="+account.Address(), nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Transaction string `json:"transaction"` + NetworkPassphrase string `json:"network_passphrase"` + }{} + err := json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + var tx xdr.TransactionEnvelope + err = xdr.SafeUnmarshalBase64(res.Transaction, &tx) + require.NoError(t, err) + + assert.Len(t, tx.Signatures(), 1) + sourceAccount := tx.SourceAccount().ToAccountId() + assert.Equal(t, serverKey.Address(), sourceAccount.Address()) + assert.Equal(t, tx.SeqNum(), int64(0)) + assert.Equal(t, time.Unix(int64(tx.TimeBounds().MaxTime), 0).Sub(time.Unix(int64(tx.TimeBounds().MinTime), 0)), time.Minute) + assert.Len(t, tx.Operations(), 2) + op0SourceAccount := tx.Operations()[0].SourceAccount.ToAccountId() + assert.Equal(t, account.Address(), op0SourceAccount.Address()) + assert.Equal(t, xdr.OperationTypeManageData, tx.Operations()[0].Body.Type) + assert.Regexp(t, "^testdomain auth", tx.Operations()[0].Body.ManageDataOp.DataName) + op1SourceAccount := tx.Operations()[1].SourceAccount.ToAccountId() + assert.Equal(t, sourceAccount.Address(), op1SourceAccount.Address()) + assert.Equal(t, xdr.OperationTypeManageData, tx.Operations()[1].Body.Type) + assert.Equal(t, "web_auth_domain", string(tx.Operations()[1].Body.ManageDataOp.DataName)) + assert.Equal(t, "webauthdomain", string(*tx.Operations()[1].Body.ManageDataOp.DataValue)) + + hash, err := network.HashTransactionInEnvelope(tx, res.NetworkPassphrase) + require.NoError(t, err) + assert.NoError(t, serverKey.FromAddress().Verify(hash[:], tx.Signatures()[0].Signature)) + + assert.Equal(t, network.TestNetworkPassphrase, res.NetworkPassphrase) +} + +func TestChallenge_anotherHomeDomain(t *testing.T) { + serverKey := keypair.MustRandom() + account := keypair.MustRandom() + anotherDomain := "anotherdomain" + + h := challengeHandler{ + Logger: supportlog.DefaultLogger, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningKey: serverKey, + ChallengeExpiresIn: time.Minute, + Domain: "webauthdomain", + HomeDomains: []string{"testdomain", anotherDomain}, + } + + r := httptest.NewRequest("GET", fmt.Sprintf("/?account=%s&home_domain=%s", account.Address(), anotherDomain), nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Transaction string `json:"transaction"` + NetworkPassphrase string `json:"network_passphrase"` + }{} + err := json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + var tx xdr.TransactionEnvelope + err = xdr.SafeUnmarshalBase64(res.Transaction, &tx) + require.NoError(t, err) + + assert.Len(t, tx.Signatures(), 1) + sourceAccount := tx.SourceAccount().ToAccountId() + assert.Equal(t, serverKey.Address(), sourceAccount.Address()) + assert.Equal(t, tx.SeqNum(), int64(0)) + assert.Equal(t, time.Unix(int64(tx.TimeBounds().MaxTime), 0).Sub(time.Unix(int64(tx.TimeBounds().MinTime), 0)), time.Minute) + assert.Len(t, tx.Operations(), 2) + op0SourceAccount := tx.Operations()[0].SourceAccount.ToAccountId() + assert.Equal(t, account.Address(), op0SourceAccount.Address()) + assert.Equal(t, xdr.OperationTypeManageData, tx.Operations()[0].Body.Type) + assert.Regexp(t, "^anotherdomain auth", tx.Operations()[0].Body.ManageDataOp.DataName) + op1SourceAccount := tx.Operations()[1].SourceAccount.ToAccountId() + assert.Equal(t, sourceAccount.Address(), op1SourceAccount.Address()) + assert.Equal(t, xdr.OperationTypeManageData, tx.Operations()[1].Body.Type) + assert.Equal(t, "web_auth_domain", string(tx.Operations()[1].Body.ManageDataOp.DataName)) + assert.Equal(t, "webauthdomain", string(*tx.Operations()[1].Body.ManageDataOp.DataValue)) + + hash, err := network.HashTransactionInEnvelope(tx, res.NetworkPassphrase) + require.NoError(t, err) + assert.NoError(t, serverKey.FromAddress().Verify(hash[:], tx.Signatures()[0].Signature)) + + assert.Equal(t, network.TestNetworkPassphrase, res.NetworkPassphrase) +} + +func TestChallenge_noAccount(t *testing.T) { + h := challengeHandler{ + SigningKey: keypair.MustRandom(), + } + + r := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"The request was invalid in some way."}`, string(body)) +} + +func TestChallenge_invalidAccount(t *testing.T) { + h := challengeHandler{ + SigningKey: keypair.MustRandom(), + } + + r := httptest.NewRequest("GET", "/?account=GREATACCOUNT", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"The request was invalid in some way."}`, string(body)) +} + +func TestChallenge_invalidHomeDomain(t *testing.T) { + account := keypair.MustRandom() + anotherDomain := "anotherdomain" + + h := challengeHandler{ + SigningKey: keypair.MustRandom(), + HomeDomains: []string{"testdomain"}, + } + + r := httptest.NewRequest("GET", fmt.Sprintf("/?account=%s&home_domain=%s", account.Address(), anotherDomain), nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"The request was invalid in some way."}`, string(body)) +} diff --git a/exp/services/webauth/internal/serve/errors.go b/exp/services/webauth/internal/serve/errors.go new file mode 100644 index 0000000000..2cf61da23a --- /dev/null +++ b/exp/services/webauth/internal/serve/errors.go @@ -0,0 +1,45 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +var serverError = errorResponse{ + Status: http.StatusInternalServerError, + Error: "An error occurred while processing this request.", +} +var notFound = errorResponse{ + Status: http.StatusNotFound, + Error: "The resource at the url requested was not found.", +} +var methodNotAllowed = errorResponse{ + Status: http.StatusMethodNotAllowed, + Error: "The method is not allowed for resource at the url requested.", +} +var badRequest = errorResponse{ + Status: http.StatusBadRequest, + Error: "The request was invalid in some way.", +} +var unauthorized = errorResponse{ + Status: http.StatusUnauthorized, + Error: "The request could not be authenticated.", +} + +type errorResponse struct { + Status int `json:"-"` + Error string `json:"error"` +} + +func (e errorResponse) Render(w http.ResponseWriter) { + httpjson.RenderStatus(w, e.Status, e, httpjson.JSON) +} + +type errorHandler struct { + Error errorResponse +} + +func (h errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.Error.Render(w) +} diff --git a/exp/services/webauth/internal/serve/errors_test.go b/exp/services/webauth/internal/serve/errors_test.go new file mode 100644 index 0000000000..d66c30b6c9 --- /dev/null +++ b/exp/services/webauth/internal/serve/errors_test.go @@ -0,0 +1,33 @@ +package serve + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestErrorResponseRender(t *testing.T) { + w := httptest.NewRecorder() + serverError.Render(w) + resp := w.Result() + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"An error occurred while processing this request."}`, string(body)) +} + +func TestErrorHandler(t *testing.T) { + r := httptest.NewRequest("GET", "/404", nil) + w := httptest.NewRecorder() + handler := errorHandler{Error: notFound} + handler.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusNotFound, resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, `{"error":"The resource at the url requested was not found."}`, string(body)) +} diff --git a/exp/services/webauth/internal/serve/serve.go b/exp/services/webauth/internal/serve/serve.go new file mode 100644 index 0000000000..b6457064c6 --- /dev/null +++ b/exp/services/webauth/internal/serve/serve.go @@ -0,0 +1,121 @@ +package serve + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + supporthttp "github.com/stellar/go/support/http" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/health" + "gopkg.in/square/go-jose.v2" +) + +type Options struct { + Logger *supportlog.Entry + HorizonURL string + Port int + NetworkPassphrase string + SigningKeys string + Domain string + AuthHomeDomains string + ChallengeExpiresIn time.Duration + JWK string + JWTIssuer string + JWTExpiresIn time.Duration + AllowAccountsThatDoNotExist bool +} + +func Serve(opts Options) { + handler, err := handler(opts) + if err != nil { + opts.Logger.Fatalf("Error: %v", err) + return + } + + addr := fmt.Sprintf(":%d", opts.Port) + supporthttp.Run(supporthttp.Config{ + ListenAddr: addr, + Handler: handler, + OnStarting: func() { + opts.Logger.Info("Starting SEP-10 Web Authentication Server") + opts.Logger.Infof("Listening on %s", addr) + }, + }) +} + +func handler(opts Options) (http.Handler, error) { + signingKeys := []*keypair.Full{} + signingKeyStrs := strings.Split(opts.SigningKeys, ",") + signingAddresses := make([]*keypair.FromAddress, 0, len(signingKeyStrs)) + + for i, signingKeyStr := range signingKeyStrs { + signingKey, err := keypair.ParseFull(signingKeyStr) + if err != nil { + return nil, errors.Wrap(err, "parsing signing key seed") + } + signingKeys = append(signingKeys, signingKey) + signingAddresses = append(signingAddresses, signingKey.FromAddress()) + opts.Logger.Info("Signing key ", i, ": ", signingKey.Address()) + } + + homeDomains := strings.Split(opts.AuthHomeDomains, ",") + trimmedHomeDomains := make([]string, 0, len(homeDomains)) + for _, homeDomain := range homeDomains { + // In some cases the full stop (period) character is used at the end of a FQDN. + trimmedHomeDomains = append(trimmedHomeDomains, strings.TrimSuffix(homeDomain, ".")) + } + + jwk := jose.JSONWebKey{} + err := json.Unmarshal([]byte(opts.JWK), &jwk) + if err != nil { + return nil, errors.Wrap(err, "parsing JSON Web Key (JWK)") + } + if jwk.Algorithm == "" { + return nil, errors.New("algorithm (alg) field must be set") + } + + horizonTimeout := horizonclient.HorizonTimeout + httpClient := &http.Client{ + Timeout: horizonTimeout, + } + horizonClient := &horizonclient.Client{ + HorizonURL: opts.HorizonURL, + HTTP: httpClient, + } + horizonClient.SetHorizonTimeout(horizonTimeout) + + mux := supporthttp.NewAPIMux(opts.Logger) + + mux.NotFound(errorHandler{Error: notFound}.ServeHTTP) + mux.MethodNotAllowed(errorHandler{Error: methodNotAllowed}.ServeHTTP) + + mux.Get("/health", health.PassHandler{}.ServeHTTP) + mux.Get("/", challengeHandler{ + Logger: opts.Logger, + NetworkPassphrase: opts.NetworkPassphrase, + SigningKey: signingKeys[0], + ChallengeExpiresIn: opts.ChallengeExpiresIn, + Domain: opts.Domain, + HomeDomains: trimmedHomeDomains, + }.ServeHTTP) + mux.Post("/", tokenHandler{ + Logger: opts.Logger, + HorizonClient: horizonClient, + NetworkPassphrase: opts.NetworkPassphrase, + SigningAddresses: signingAddresses, + JWK: jwk, + JWTIssuer: opts.JWTIssuer, + JWTExpiresIn: opts.JWTExpiresIn, + AllowAccountsThatDoNotExist: opts.AllowAccountsThatDoNotExist, + Domain: opts.Domain, + HomeDomains: trimmedHomeDomains, + }.ServeHTTP) + + return mux, nil +} diff --git a/exp/services/webauth/internal/serve/token.go b/exp/services/webauth/internal/serve/token.go new file mode 100644 index 0000000000..56db5f269f --- /dev/null +++ b/exp/services/webauth/internal/serve/token.go @@ -0,0 +1,161 @@ +package serve + +import ( + "net/http" + "strings" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/http/httpdecode" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/txnbuild" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" +) + +type tokenHandler struct { + Logger *supportlog.Entry + HorizonClient horizonclient.ClientInterface + NetworkPassphrase string + SigningAddresses []*keypair.FromAddress + JWK jose.JSONWebKey + JWTIssuer string + JWTExpiresIn time.Duration + AllowAccountsThatDoNotExist bool + Domain string + HomeDomains []string +} + +type tokenRequest struct { + Transaction string `json:"transaction" form:"transaction"` +} + +type tokenResponse struct { + Token string `json:"token"` +} + +func (h tokenHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + req := tokenRequest{} + + err := httpdecode.Decode(r, &req) + if err != nil { + badRequest.Render(w) + return + } + + var ( + tx *txnbuild.Transaction + clientAccountID string + signingAddress *keypair.FromAddress + homeDomain string + ) + for _, s := range h.SigningAddresses { + tx, clientAccountID, homeDomain, err = txnbuild.ReadChallengeTx(req.Transaction, s.Address(), h.NetworkPassphrase, h.Domain, h.HomeDomains) + if err == nil { + signingAddress = s + break + } + } + if signingAddress == nil { + badRequest.Render(w) + return + } + if homeDomain == "" { + badRequest.Render(w) + return + } + + hash, err := tx.HashHex(h.NetworkPassphrase) + if err != nil { + h.Logger.Ctx(ctx).WithStack(err).Error(err) + serverError.Render(w) + return + } + + l := h.Logger.Ctx(ctx). + WithField("tx", hash). + WithField("account", clientAccountID). + WithField("serversigner", signingAddress.Address()). + WithField("homedomain", homeDomain) + + l.Info("Start verifying challenge transaction.") + + var clientAccountExists bool + clientAccount, err := h.HorizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: clientAccountID}) + switch { + case err == nil: + clientAccountExists = true + l.Infof("Account exists.") + case horizonclient.IsNotFoundError(err): + clientAccountExists = false + l.Infof("Account does not exist.") + default: + l.WithStack(err).Error(err) + serverError.Render(w) + return + } + + var signersVerified []string + if clientAccountExists { + requiredThreshold := txnbuild.Threshold(clientAccount.Thresholds.HighThreshold) + clientSignerSummary := clientAccount.SignerSummary() + signersVerified, err = txnbuild.VerifyChallengeTxThreshold(req.Transaction, signingAddress.Address(), h.NetworkPassphrase, h.Domain, h.HomeDomains, requiredThreshold, clientSignerSummary) + if err != nil { + l. + WithField("signersCount", len(clientSignerSummary)). + WithField("signaturesCount", len(tx.Signatures())). + WithField("requiredThreshold", requiredThreshold). + Info("Failed to verify with signers that do not meet threshold.") + unauthorized.Render(w) + return + } + } else { + if !h.AllowAccountsThatDoNotExist { + l.Infof("Failed to verify because accounts that do not exist are not allowed.") + unauthorized.Render(w) + return + } + signersVerified, err = txnbuild.VerifyChallengeTxSigners(req.Transaction, signingAddress.Address(), h.NetworkPassphrase, h.Domain, h.HomeDomains, clientAccountID) + if err != nil { + l.Infof("Failed to verify with account master key as signer.") + unauthorized.Render(w) + return + } + } + + l. + WithField("signers", strings.Join(signersVerified, ",")). + Infof("Successfully verified challenge transaction.") + + jwsOptions := &jose.SignerOptions{} + jwsOptions.WithType("JWT") + jws, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.SignatureAlgorithm(h.JWK.Algorithm), Key: h.JWK.Key}, jwsOptions) + if err != nil { + l.WithStack(err).Error(err) + serverError.Render(w) + return + } + + issuedAt := time.Unix(tx.Timebounds().MinTime, 0) + claims := jwt.Claims{ + Issuer: h.JWTIssuer, + Subject: clientAccountID, + IssuedAt: jwt.NewNumericDate(issuedAt), + Expiry: jwt.NewNumericDate(issuedAt.Add(h.JWTExpiresIn)), + } + tokenStr, err := jwt.Signed(jws).Claims(claims).CompactSerialize() + if err != nil { + l.WithStack(err).Error(err) + serverError.Render(w) + return + } + + res := tokenResponse{ + Token: tokenStr, + } + httpjson.Render(w, res, httpjson.JSON) +} diff --git a/exp/services/webauth/internal/serve/token_test.go b/exp/services/webauth/internal/serve/token_test.go new file mode 100644 index 0000000000..ceb1c1e771 --- /dev/null +++ b/exp/services/webauth/internal/serve/token_test.go @@ -0,0 +1,1309 @@ +package serve + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/golang-jwt/jwt" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/exp/support/jwtkey" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + supportlog "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/square/go-jose.v2" +) + +func TestToken_formInputSuccess(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: account.Address(), + Weight: 100, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := url.Values{} + body.Set("transaction", txSigned) + r := httptest.NewRequest("POST", "/", strings.NewReader(body.Encode())) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(tx.Timebounds().MinTime), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) +} + +func TestToken_formInputSuccess_jwtHeaderAndPayloadAreDeterministic(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: account.Address(), + Weight: 100, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := url.Values{} + body.Set("transaction", txSigned) + r := httptest.NewRequest("POST", "/", strings.NewReader(body.Encode())) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res1 := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res1) + require.NoError(t, err) + + t.Logf("JWT 1: %s", res1.Token) + + // let's replay the transaction to make sure the returned JWT remains the same + time.Sleep(time.Second) + r = httptest.NewRequest("POST", "/", strings.NewReader(body.Encode())) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + w = httptest.NewRecorder() + h.ServeHTTP(w, r) + resp = w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res2 := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res2) + require.NoError(t, err) + + t.Logf("JWT 2: %s", res2.Token) + + jwtParts1 := strings.Split(res1.Token, ".") + require.Len(t, jwtParts1, 3) + jwtParts2 := strings.Split(res2.Token, ".") + require.Len(t, jwtParts2, 3) + require.Equal(t, jwtParts1[:2], jwtParts2[:2]) +} + +func TestToken_jsonInputSuccess(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: account.Address(), + Weight: 100, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(tx.Timebounds().MinTime), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) +} + +// This test ensures that when multiple server keys are configured on the +// server that a challenge transaction is accepted if it was signed with either +// key, along with the accounts signing keys. +func TestToken_jsonInputValidRotatingServerSigners(t *testing.T) { + serverKeys := []*keypair.Full{keypair.MustRandom(), keypair.MustRandom()} + serverKeyAddresses := []*keypair.FromAddress{} + for i, serverKey := range serverKeys { + serverKeyAddress := serverKey.FromAddress() + serverKeyAddresses = append(serverKeyAddresses, serverKeyAddress) + t.Logf("Server signing key %d: %v", i, serverKeyAddress) + } + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + accountSigner1 := keypair.MustRandom() + t.Logf("Client account signer 1: %s", accountSigner1.Address()) + + accountSigner2 := keypair.MustRandom() + t.Logf("Client account signer 2: %s", accountSigner2.Address()) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: accountSigner1.Address(), + Weight: 40, + }, + { + Key: accountSigner2.Address(), + Weight: 60, + }, + }}, + nil, + ) + + domain := "webauth.example.com" + homeDomain := "example.com" + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: serverKeyAddresses, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + for i, serverKey := range serverKeys { + t.Run(fmt.Sprintf("signed with server key %d", i), func(t *testing.T) { + // Build challenge transaction using one server signing key + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + // Sign the challenge transaction with the accounts signers + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, accountSigner1, accountSigner2) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + // Post the signed challenge transaction back to the server's token endpoint + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + // Check that we get back an ok response + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + // Check that we get back the valid JWT token + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(tx.Timebounds().MinTime), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) + }) + } +} + +func TestToken_jsonInputValidMultipleSigners(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + accountSigner1 := keypair.MustRandom() + t.Logf("Client account signer 1: %s", accountSigner1.Address()) + + accountSigner2 := keypair.MustRandom() + t.Logf("Client account signer 2: %s", accountSigner2.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, accountSigner1, accountSigner2) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: accountSigner1.Address(), + Weight: 40, + }, + { + Key: accountSigner2.Address(), + Weight: 60, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(tx.Timebounds().MinTime), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) +} + +func TestToken_jsonInputNotEnoughWeight(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: account.Address(), + Weight: 10, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 401, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request could not be authenticated."}`, string(respBodyBytes)) +} + +func TestToken_jsonInputUnrecognizedSigner(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{ + LowThreshold: 1, + MedThreshold: 10, + HighThreshold: 100, + }, + Signers: []horizon.Signer{ + { + Key: keypair.MustRandom().Address(), + Weight: 100, + }, + }}, + nil, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 401, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request could not be authenticated."}`, string(respBodyBytes)) +} + +func TestToken_jsonInputAccountNotExistSuccess(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: true, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(tx.Timebounds().MinTime), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) +} + +func TestToken_jsonInputAccountNotExistFail(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + otherSigner := keypair.MustRandom() + t.Logf("Other signer: %s", otherSigner.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, otherSigner) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: true, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 401, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request could not be authenticated."}`, string(respBodyBytes)) +} + +func TestToken_jsonInputAccountNotExistNotAllowed(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: false, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 401, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request could not be authenticated."}`, string(respBodyBytes)) +} + +func TestToken_jsonInputUnrecognizedServerSigner(t *testing.T) { + serverKey1 := keypair.MustRandom() + t.Logf("Server signing key 1: %s", serverKey1.Address()) + serverKey2 := keypair.MustRandom() + t.Logf("Server signing key 2: %s", serverKey2.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey1.Seed(), + account.Address(), + domain, + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey2.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: false, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 400, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request was invalid in some way."}`, string(respBodyBytes)) +} + +func TestToken_jsonInputNoWebAuthDomainSuccess(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + now := time.Now().UTC() + txMinTimebounds := now.Unix() + txMaxTimebounds := now.Add(time.Second * 60).Unix() + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &txnbuild.SimpleAccount{AccountID: serverKey.Address()}, + IncrementSequenceNum: false, + Operations: []txnbuild.Operation{ + &txnbuild.ManageData{ + SourceAccount: account.Address(), + Name: homeDomain + " auth", + Value: []byte("ICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAg"), + }, + }, + BaseFee: txnbuild.MinBaseFee, + Memo: nil, + Timebounds: txnbuild.NewTimebounds(txMinTimebounds, txMaxTimebounds), + }, + ) + require.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKey) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, account) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: true, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + res := struct { + Token string `json:"token"` + }{} + err = json.NewDecoder(resp.Body).Decode(&res) + require.NoError(t, err) + + t.Logf("JWT: %s", res.Token) + + token, err := jwt.Parse(res.Token, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodECDSA); !ok { + return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"]) + } + return &jwtPrivateKey.PublicKey, nil + }) + require.NoError(t, err) + + claims := token.Claims.(jwt.MapClaims) + assert.Equal(t, "https://example.com", claims["iss"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, account.Address(), claims["sub"]) + assert.Equal(t, float64(txMinTimebounds), claims["iat"]) + iat := time.Unix(int64(claims["iat"].(float64)), 0) + assert.Equal(t, float64(iat.Add(h.JWTExpiresIn).Unix()), claims["exp"]) +} + +func TestToken_jsonInputInvalidWebAuthDomainFail(t *testing.T) { + serverKey := keypair.MustRandom() + t.Logf("Server signing key: %s", serverKey.Address()) + + jwtPrivateKey, err := jwtkey.GenerateKey() + require.NoError(t, err) + jwk := jose.JSONWebKey{Key: jwtPrivateKey, Algorithm: string(jose.ES256)} + + account := keypair.MustRandom() + t.Logf("Client account: %s", account.Address()) + + otherSigner := keypair.MustRandom() + t.Logf("Other signer: %s", otherSigner.Address()) + + domain := "webauth.example.com" + homeDomain := "example.com" + tx, err := txnbuild.BuildChallengeTx( + serverKey.Seed(), + account.Address(), + "invalidwebauthdomain.example.com", + homeDomain, + network.TestNetworkPassphrase, + time.Minute, + ) + require.NoError(t, err) + + chTx, err := tx.Base64() + require.NoError(t, err) + t.Logf("Tx: %s", chTx) + + tx, err = tx.Sign(network.TestNetworkPassphrase, otherSigner) + require.NoError(t, err) + txSigned, err := tx.Base64() + require.NoError(t, err) + t.Logf("Signed: %s", txSigned) + + horizonClient := &horizonclient.MockClient{} + horizonClient. + On("AccountDetail", horizonclient.AccountRequest{AccountID: account.Address()}). + Return( + horizon.Account{}, + &horizonclient.Error{ + Problem: problem.P{ + Type: "https://stellar.org/horizon-errors/not_found", + Title: "Resource Missing", + Status: 404, + }, + }, + ) + + h := tokenHandler{ + Logger: supportlog.DefaultLogger, + HorizonClient: horizonClient, + NetworkPassphrase: network.TestNetworkPassphrase, + SigningAddresses: []*keypair.FromAddress{serverKey.FromAddress()}, + JWK: jwk, + JWTIssuer: "https://example.com", + JWTExpiresIn: time.Minute, + AllowAccountsThatDoNotExist: true, + Domain: domain, + HomeDomains: []string{homeDomain}, + } + + body := struct { + Transaction string `json:"transaction"` + }{ + Transaction: txSigned, + } + bodyBytes, err := json.Marshal(body) + require.NoError(t, err) + r := httptest.NewRequest("POST", "/", bytes.NewReader(bodyBytes)) + r.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, 400, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + respBodyBytes, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"error":"The request was invalid in some way."}`, string(respBodyBytes)) +} diff --git a/exp/services/webauth/main.go b/exp/services/webauth/main.go new file mode 100644 index 0000000000..873db8f55e --- /dev/null +++ b/exp/services/webauth/main.go @@ -0,0 +1,29 @@ +package main + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stellar/go/exp/services/webauth/cmd" + supportlog "github.com/stellar/go/support/log" +) + +func main() { + logger := supportlog.New() + logger.SetLevel(logrus.TraceLevel) + + rootCmd := &cobra.Command{ + Use: "webauth [command]", + Short: "SEP-10 Web Authentication Server", + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + + rootCmd.AddCommand((&cmd.ServeCommand{Logger: logger}).Command()) + rootCmd.AddCommand((&cmd.GenJWKCommand{Logger: logger}).Command()) + + err := rootCmd.Execute() + if err != nil { + logger.Fatal(err) + } +} diff --git a/exp/support/jwtkey/jwtkey.go b/exp/support/jwtkey/jwtkey.go new file mode 100644 index 0000000000..a97e985a28 --- /dev/null +++ b/exp/support/jwtkey/jwtkey.go @@ -0,0 +1,26 @@ +// Package jwtkey provides utility functions for generating, serializing and +// deserializing JWT ECDSA keys. +// +// TODO: Replace EC function usages with PKCS8 functions for supporting ECDSA +// and RSA keys instead of only supporting ECDSA. The fact this package only +// supports ECDSA is unnecessary. +package jwtkey + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + + "github.com/stellar/go/support/errors" +) + +// GenerateKey is a convenience function for generating an ECDSA key for use as +// a JWT key. It uses the P256 curve. To use other curves use the crypto/ecdsa +// package directly. +func GenerateKey() (*ecdsa.PrivateKey, error) { + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, errors.Wrap(err, "generating ECDSA key") + } + return k, nil +} diff --git a/exp/support/jwtkey/jwtkey_test.go b/exp/support/jwtkey/jwtkey_test.go new file mode 100644 index 0000000000..e2c0a92a71 --- /dev/null +++ b/exp/support/jwtkey/jwtkey_test.go @@ -0,0 +1,15 @@ +package jwtkey + +import ( + "crypto/elliptic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenerate(t *testing.T) { + key, err := GenerateKey() + require.NoError(t, err) + assert.Equal(t, elliptic.P256(), key.Curve) +} diff --git a/exp/support/pipeline/buffered_read_writer.go b/exp/support/pipeline/buffered_read_writer.go new file mode 100644 index 0000000000..22e5169097 --- /dev/null +++ b/exp/support/pipeline/buffered_read_writer.go @@ -0,0 +1,80 @@ +package pipeline + +import ( + "context" + "io" +) + +// bufferSize is a size of a buffered channel in BufferedReadWriter. +// This should be big enough to hold a short lag of items in a pipeline +// but small enough to not consume too much memory. +// In pipelines with no slow processors a buffered channel will be empty +// or almost empty most of the time. +const bufferSize = 50000 + +func (b *BufferedReadWriter) init() { + b.buffer = make(chan interface{}, bufferSize) +} + +func (b *BufferedReadWriter) close() { + b.writeCloseMutex.Lock() + defer b.writeCloseMutex.Unlock() + + close(b.buffer) + b.closed = true +} + +func (b *BufferedReadWriter) GetContext() context.Context { + return b.context +} + +func (b *BufferedReadWriter) Read() (interface{}, error) { + b.initOnce.Do(b.init) + + entry, more := <-b.buffer + if more { + b.readEntriesMutex.Lock() + b.readEntries++ + b.readEntriesMutex.Unlock() + return entry, nil + } + + return nil, io.EOF +} + +func (b *BufferedReadWriter) Write(entry interface{}) error { + b.initOnce.Do(b.init) + + b.writeCloseMutex.Lock() + defer b.writeCloseMutex.Unlock() + + if b.closed { + return io.ErrClosedPipe + } + + b.buffer <- entry + b.wroteEntries++ + return nil +} + +func (b *BufferedReadWriter) QueuedEntries() int { + b.initOnce.Do(b.init) + return len(b.buffer) +} + +// Close can be called in `Writer` and `Reader` context. +// +// In `Reader` it means that no more values will be read so writer can +// stop writing to a buffer (`io.ErrClosedPipe` will be returned for calls to +// `Write()`). +// +// In `Writer` it means that no more values will be written so reader +// should start returning `io.EOF` error after returning all queued values. +func (b *BufferedReadWriter) Close() error { + b.initOnce.Do(b.init) + b.closeOnce.Do(b.close) + return nil +} + +var _ Reader = &BufferedReadWriter{} +var _ Writer = &BufferedReadWriter{} diff --git a/exp/support/pipeline/buffered_read_writer_test.go b/exp/support/pipeline/buffered_read_writer_test.go new file mode 100644 index 0000000000..8254059a2e --- /dev/null +++ b/exp/support/pipeline/buffered_read_writer_test.go @@ -0,0 +1,45 @@ +package pipeline + +import ( + "io" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBuffer(t *testing.T) { + buffer := &BufferedReadWriter{} + write := 20 + read := 0 + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + for { + _, err := buffer.Read() + if err != nil { + if err == io.EOF { + break + } else { + panic(err) + } + } + read++ + } + }() + + go func() { + defer wg.Done() + for i := 0; i < write; i++ { + buffer.Write("test") + } + buffer.Close() + }() + + wg.Wait() + + assert.Equal(t, 20, read) +} diff --git a/exp/support/pipeline/doc.go b/exp/support/pipeline/doc.go new file mode 100644 index 0000000000..0ba7674d76 --- /dev/null +++ b/exp/support/pipeline/doc.go @@ -0,0 +1,2 @@ +// TODO explain here how to write wrappers to use without casting from `interface{}`. +package pipeline diff --git a/exp/support/pipeline/main.go b/exp/support/pipeline/main.go new file mode 100644 index 0000000000..766b4548fb --- /dev/null +++ b/exp/support/pipeline/main.go @@ -0,0 +1,198 @@ +package pipeline + +import ( + "context" + "sync" + + "github.com/stellar/go/support/errors" +) + +// ErrShutdown is an error send to post-processing hook when pipeline has been +// shutdown. +var ErrShutdown = errors.New("Pipeline shutdown") + +// BufferedReadWriter implements Reader and Writer and acts +// like a pipe. All writes are queued in a buffered channel and are waiting +// to be consumed. +// +// Used internally by Pipeline but also helpful for testing. +type BufferedReadWriter struct { + initOnce sync.Once + + context context.Context + + // readEntriesMutex protects readEntries variable + readEntriesMutex sync.Mutex + readEntries int + + // writeCloseMutex protects from writing to a closed buffer + // and wroteEntries variable + writeCloseMutex sync.Mutex + wroteEntries int + + // closeOnce protects from closing buffer twice + closeOnce sync.Once + buffer chan interface{} + closed bool +} + +type multiWriter struct { + writers []Writer + + mutex sync.Mutex + closeAfter int + wroteEntries int +} + +type Pipeline struct { + root *PipelineNode + + preProcessingHooks []func(context.Context) (context.Context, error) + postProcessingHooks []func(context.Context, error) error + + // mutex protects internal fields that may be modified from + // multiple go routines. + mutex sync.Mutex + running bool + shutDown bool + cancelled bool + cancelFunc context.CancelFunc +} + +// PipelineInterface is an interface that defines common pipeline methods +// in structs that embed Pipeline. +type PipelineInterface interface { + SetRoot(rootProcessor *PipelineNode) + // AddPreProcessingHook adds a pre-processing hook function. Returned + // context.Context will be passed to the processors. If error is returned + // pipeline will not start processing data. + AddPreProcessingHook(hook func(context.Context) (context.Context, error)) + AddPostProcessingHook(hook func(context.Context, error) error) + Shutdown() + PrintStatus() +} + +var _ PipelineInterface = &Pipeline{} + +type PipelineNode struct { + // Remember to update reset() method if you ever add a new field to this struct! + Processor Processor + Children []*PipelineNode + + readEntries int + readsPerSecond int + queuedEntries int + wroteEntries int + writesPerSecond int +} + +// Reader interface placeholder +type Reader interface { + // GetContext returns context with values of the current reader. Can be + // helpful to provide data to structs that wrap `Reader`. + GetContext() context.Context + // Read should return next entry. If there are no more + // entries it should return `io.EOF` error. + Read() (interface{}, error) + // Close should be called when reading is finished. This is especially + // helpful when there are still some entries available so reader can stop + // streaming them. + Close() error +} + +// Writer interface placeholder +type Writer interface { + // Write is used to pass entry to the next processor. It can return + // `io.ErrClosedPipe` when the pipe between processors has been closed meaning + // that next processor does not need more data. In such situation the current + // processor can terminate as sending more entries to a `Writer` + // does not make sense (will not be read). + Write(interface{}) error + // Close should be called when there are no more entries + // to write. + Close() error +} + +// Processor defines methods required by the processing pipeline. +type Processor interface { + // Process is a main method of `Processor`. It receives `Reader` + // that contains object passed down the pipeline from the previous procesor. Writes to + // `Writer` will be passed to the next processor. WARNING! `Process` + // should **always** call `Close()` on `Writer` when no more object will be + // written and `Close()` on `Reader` when reading is finished. + // Data required by following processors (like aggregated data) should be saved in + // `Store`. Read `Store` godoc to understand how to use it. + // The first argument `ctx` is a context with cancel. Processor should monitor + // `ctx.Done()` channel and exit when it returns a value. This can happen when + // pipeline execution is interrupted, ex. due to an error. + // + // Given all information above `Process` should always look like this: + // + // func (p *Processor) Process(ctx context.Context, store *pipeline.Store, r Reader, w Writer) error { + // defer r.Close() + // defer w.Close() + // + // // Some pre code... + // + // for { + // entry, err := r.Read() + // if err != nil { + // if err == io.EOF { + // break + // } else { + // return errors.Wrap(err, "Error reading from Reader in [ProcessorName]") + // } + // } + // + // // Process entry... + // + // // Write to Writer if needed but exit if pipe is closed: + // err = w.Write(entry) + // if err != nil { + // if err == io.ErrClosedPipe { + // // Reader does not need more data + // return nil + // } + // return errors.Wrap(err, "Error writing to Writer in [ProcessorName]") + // } + // + // // Return errors if needed... + // + // // Exit when pipeline terminated due to an error in another processor... + // select { + // case <-ctx.Done(): + // return nil + // default: + // continue + // } + // } + // + // // Some post code... + // + // return nil + // } + Process(context.Context, *Store, Reader, Writer) error + // Returns processor name. Helpful for errors, debuging and reports. + Name() string + // Reset resets internal state of the processor. This is run by the pipeline + // everytime before the pipeline starts running. + // It is extremely important to implement this method, otherwise internal + // state of the processor will be maintained between pipeline runs and may + // result in an invalid data. + Reset() +} + +// Store allows storing data connected to pipeline execution. +// It exposes `Lock()` and `Unlock()` methods that must be called +// when accessing the `Store` for both `Put` and `Get` calls. +// +// Example (incrementing a number): +// s.Lock() +// v := s.Get("value") +// s.Put("value", v.(int)+1) +// s.Unlock() +type Store struct { + sync.Mutex + initOnce sync.Once + values map[string]interface{} +} diff --git a/exp/support/pipeline/multi_writer.go b/exp/support/pipeline/multi_writer.go new file mode 100644 index 0000000000..b34e1bc53e --- /dev/null +++ b/exp/support/pipeline/multi_writer.go @@ -0,0 +1,67 @@ +package pipeline + +import ( + "io" + + "github.com/stellar/go/support/errors" +) + +func (m *multiWriter) Write(entry interface{}) error { + m.mutex.Lock() + m.wroteEntries++ + m.mutex.Unlock() + + results := make(chan error, len(m.writers)) + + for _, w := range m.writers { + go func(w Writer) { + // We can keep sending entries even when io.ErrClosedPipe is returned + // as bufferedStateReadWriter will ignore them (won't add them to + // a channel). + results <- w.Write(entry) + }(w) + } + + countClosedPipes := 0 + for range m.writers { + err := <-results + if err != nil { + if err == io.ErrClosedPipe { + countClosedPipes++ + } else { + return err + } + } + } + + // When all pipes are closed return `io.ErrClosedPipe` because there are no + // active readers anymore. + if countClosedPipes == len(m.writers) { + return io.ErrClosedPipe + } + + return nil +} + +func (m *multiWriter) Close() error { + m.mutex.Lock() + defer m.mutex.Unlock() + + m.closeAfter-- + if m.closeAfter > 0 { + return nil + } else if m.closeAfter < 0 { + return errors.New("Close() called more times than closeAfter") + } + + for _, w := range m.writers { + err := w.Close() + if err != nil { + return err + } + } + + return nil +} + +var _ Writer = &multiWriter{} diff --git a/exp/support/pipeline/pipeline.go b/exp/support/pipeline/pipeline.go new file mode 100644 index 0000000000..edefb14dd2 --- /dev/null +++ b/exp/support/pipeline/pipeline.go @@ -0,0 +1,269 @@ +package pipeline + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/stellar/go/support/errors" +) + +func New(rootProcessor *PipelineNode) *Pipeline { + return &Pipeline{root: rootProcessor} +} + +func Node(processor Processor) *PipelineNode { + return &PipelineNode{ + Processor: processor, + } +} + +func (p *Pipeline) PrintStatus() { + p.printNodeStatus(p.root, 0) +} + +// AddPreProcessingHook adds post-processing hook. Context will be a main +// reader context. +func (p *Pipeline) AddPreProcessingHook(hook func(context.Context) (context.Context, error)) { + p.preProcessingHooks = append(p.preProcessingHooks, hook) +} + +// AddPostProcessingHook adds post-processing hook. Context will be a main +// reader context and error will be nil, if processing was successful, +// ErrShutdown when pipeline was shutdown and non nil otherwise. +func (p *Pipeline) AddPostProcessingHook(hook func(context.Context, error) error) { + p.postProcessingHooks = append(p.postProcessingHooks, hook) +} + +func (p *Pipeline) printNodeStatus(node *PipelineNode, level int) { + fmt.Print(strings.Repeat(" ", level)) + + var wrRatio = float32(0) + if node.readEntries > 0 { + wrRatio = float32(node.wroteEntries) / float32(node.readEntries) + } + + icon := "" + if node.queuedEntries > bufferSize/10*9 { + icon = "⚠️ " + } + + fmt.Printf( + "β”” %s%s read=%d (queued=%d rps=%d) wrote=%d (w/r ratio = %1.5f)\n", + icon, + node.Processor.Name(), + node.readEntries, + node.queuedEntries, + node.readsPerSecond, + node.wroteEntries, + wrRatio, + ) + + for _, child := range node.Children { + p.printNodeStatus(child, level+1) + } +} + +func (p *Pipeline) SetRoot(rootProcessor *PipelineNode) { + p.root = rootProcessor +} + +// setRunning protects from processing more than once at a time. +func (p *Pipeline) setRunning(setRunning bool) error { + if setRunning { + if p.running { + panic("Cannot start processing, pipeline is running...") + } + + if p.shutDown { + return ErrShutdown + } + } + + p.running = setRunning + return nil +} + +// IsRunning returns true if pipeline is running +func (p *Pipeline) IsRunning() bool { + // Protects internal fields + p.mutex.Lock() + defer p.mutex.Unlock() + return p.running +} + +// reset resets internal state of the pipeline and all the nodes and processors. +func (p *Pipeline) reset() { + p.cancelled = false + p.resetNode(p.root) +} + +func (p *Pipeline) sendPreProcessingHooks(ctx context.Context) (context.Context, error) { + var err error + + for _, hook := range p.preProcessingHooks { + ctx, err = hook(ctx) + if err != nil { + return ctx, err + } + } + + return ctx, nil +} + +func (p *Pipeline) sendPostProcessingHooks(ctx context.Context, processingError error) error { + for _, hook := range p.postProcessingHooks { + err := hook(ctx, processingError) + if err != nil { + return err + } + } + + return nil +} + +// resetNode resets internal state of the pipeline node and internal processor and +// calls itself recursively on all of the children. +func (p *Pipeline) resetNode(node *PipelineNode) { + node.reset() + for _, child := range node.Children { + p.resetNode(child) + } +} + +// Process starts pipeline. Return channel will return if an error occured in +// any of the processors or any of the pipeline hooks. Will return ErrShutdown +// if the pipeline was shutdown. +func (p *Pipeline) Process(reader Reader) <-chan error { + // Protects internal fields + p.mutex.Lock() + defer p.mutex.Unlock() + + errorChan := make(chan error, 1) + err := p.setRunning(true) + if err != nil { + errorChan <- err + return errorChan + } + p.reset() + + ctx, err := p.sendPreProcessingHooks(reader.GetContext()) + if err != nil { + p.setRunning(false) + errorChan <- errors.Wrap(err, "Error running pre-hook") + return errorChan + } + + ctx, p.cancelFunc = context.WithCancel(ctx) + return p.processStateNode(ctx, &Store{}, p.root, reader) +} + +func (p *Pipeline) processStateNode(ctx context.Context, store *Store, node *PipelineNode, reader Reader) <-chan error { + outputs := make([]Writer, len(node.Children)) + + for i := range outputs { + outputs[i] = &BufferedReadWriter{ + context: reader.GetContext(), + } + } + + var wg sync.WaitGroup + + writer := &multiWriter{ + writers: outputs, + closeAfter: 1, + } + + var processingError error + + wg.Add(1) + go func() { + defer wg.Done() + + err := node.Processor.Process(ctx, store, reader, writer) + if err != nil { + // Protects from cancelling twice and sending multiple errors to err channel + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.cancelled { + return + } + + wrappedErr := errors.Wrap(err, fmt.Sprintf("Processor %s errored", node.Processor.Name())) + + p.cancelled = true + p.cancelFunc() + processingError = wrappedErr + } + }() + + for i, child := range node.Children { + wg.Add(1) + go func(i int, child *PipelineNode) { + defer wg.Done() + err := <-p.processStateNode(ctx, store, child, outputs[i].(*BufferedReadWriter)) + if err != nil { + processingError = err + } + }(i, child) + } + + errorChan := make(chan error, 1) + + go func() { + wg.Wait() + if node == p.root { + // If pipeline processing is finished run post-hooks and send error + // if not already sent. + var returnError error + var hookError error + + hookError = processingError + if hookError == nil && p.shutDown { + hookError = ErrShutdown + } + + err := p.sendPostProcessingHooks(reader.GetContext(), hookError) + if err != nil { + returnError = errors.Wrap(err, "Error running post-hook") + } else { + returnError = processingError + } + + if returnError == nil && p.shutDown { + returnError = ErrShutdown + } + + p.mutex.Lock() + p.setRunning(false) + p.mutex.Unlock() + + errorChan <- returnError + } else { + // For non-root node just send an error + errorChan <- processingError + } + }() + + return errorChan +} + +// Shutdown stops the processing. Please note that post-processing hooks will +// receive ErrShutdown when Shutdown() is called. +func (p *Pipeline) Shutdown() { + // Protects internal fields + p.mutex.Lock() + defer p.mutex.Unlock() + + if p.cancelled { + return + } + p.shutDown = true + p.cancelled = true + // It's possible that Shutdown will be called before first run. + if p.cancelFunc != nil { + p.cancelFunc() + } +} diff --git a/exp/support/pipeline/pipeline_node.go b/exp/support/pipeline/pipeline_node.go new file mode 100644 index 0000000000..70d51dbf12 --- /dev/null +++ b/exp/support/pipeline/pipeline_node.go @@ -0,0 +1,16 @@ +package pipeline + +func (p *PipelineNode) Pipe(children ...*PipelineNode) *PipelineNode { + p.Children = children + return p +} + +func (p *PipelineNode) reset() { + p.Processor.Reset() + + p.readEntries = 0 + p.readsPerSecond = 0 + p.queuedEntries = 0 + p.wroteEntries = 0 + p.writesPerSecond = 0 +} diff --git a/exp/support/pipeline/pipeline_test.go b/exp/support/pipeline/pipeline_test.go new file mode 100644 index 0000000000..b03c570b2a --- /dev/null +++ b/exp/support/pipeline/pipeline_test.go @@ -0,0 +1,230 @@ +package pipeline_test + +import ( + "context" + "io" + "sync" + "testing" + "time" + + "github.com/stellar/go/exp/support/pipeline" + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/assert" +) + +func TestPipelineCanBeProcessedAgain(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + assert.NoError(t, <-p.Process(&SimpleReader{CountObject: 10})) + assert.NoError(t, <-p.Process(&SimpleReader{CountObject: 20})) +} + +func TestCannotRunProcessOnRunningPipeline(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + go p.Process(&SimpleReader{}) + defer p.Shutdown() + time.Sleep(100 * time.Millisecond) + assert.Panics(t, func() { + p.Process(&SimpleReader{}) + }) +} + +func TestNoErrorsOnSuccess(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + assert.NoError(t, <-p.Process(&SimpleReader{CountObject: 10})) +} + +func TestErrorsOnFailure(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{ReturnError: true}), + ) + + err := <-p.Process(&SimpleReader{CountObject: 10}) + assert.Error(t, err) + assert.Equal(t, "Processor NoOpProcessor errored: Test error", err.Error()) +} + +func TestHooksCalled(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + preHookCalled := false + p.AddPreProcessingHook(func(ctx context.Context) (context.Context, error) { + preHookCalled = true + return ctx, nil + }) + + postHookCalled := false + p.AddPostProcessingHook(func(ctx context.Context, err error) error { + postHookCalled = true + return nil + }) + + err := <-p.Process(&SimpleReader{CountObject: 10}) + assert.NoError(t, err) + assert.True(t, preHookCalled, "pre-hook not called") + assert.True(t, postHookCalled, "post-hook not called") +} + +func TestPostHooksCalledWithError(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{ReturnError: true}), + ) + + errChan := make(chan error, 1) + + p.AddPostProcessingHook(func(ctx context.Context, err error) error { + errChan <- err + return nil + }) + + err := <-p.Process(&SimpleReader{CountObject: 10}) + assert.Error(t, err) + assert.Equal(t, "Processor NoOpProcessor errored: Test error", err.Error()) + + hookErr := <-errChan + assert.Error(t, hookErr) + assert.Equal(t, "Processor NoOpProcessor errored: Test error", hookErr.Error()) +} + +func TestProcessReturnsErrorWhenPostHooksErrors(t *testing.T) { + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + p.AddPostProcessingHook(func(ctx context.Context, err error) error { + return errors.New("post-hook error") + }) + + err := <-p.Process(&SimpleReader{CountObject: 10}) + assert.Error(t, err) + assert.Equal(t, "Error running post-hook: post-hook error", err.Error()) +} + +func TestPostHookWhenShutDown(t *testing.T) { + done := make(chan bool) + p := pipeline.New( + pipeline.Node(&NoOpProcessor{}), + ) + + p.AddPostProcessingHook(func(ctx context.Context, err error) error { + assert.Equal(t, pipeline.ErrShutdown, err) + done <- true + return nil + }) + + go p.Process(&SimpleReader{}) + time.Sleep(100 * time.Millisecond) + p.Shutdown() + <-done +} + +func TestProcessShutdown(t *testing.T) { + done := make(chan bool) + p := pipeline.New( + pipeline.Node(&WaitForShutDownProcessor{}), + ) + + go func() { + err := <-p.Process(&SimpleReader{}) + assert.Equal(t, pipeline.ErrShutdown, err) + done <- true + }() + time.Sleep(100 * time.Millisecond) + p.Shutdown() + <-done + + // Calling it again should also return error (different code path) + err := <-p.Process(&SimpleReader{}) + assert.Equal(t, pipeline.ErrShutdown, err) +} + +// SimpleReader sends CountObject objects. If CountObject = 0 it +// streams infinite number of objects. +type SimpleReader struct { + sync.Mutex + CountObject int + + sent int +} + +func (r *SimpleReader) GetContext() context.Context { + return context.Background() +} + +func (r *SimpleReader) Read() (interface{}, error) { + r.Lock() + defer r.Unlock() + + if r.CountObject != 0 && r.sent == r.CountObject { + return nil, io.EOF + } + + r.sent++ + return "test", nil +} + +func (r *SimpleReader) Close() error { + return nil +} + +type NoOpProcessor struct { + ReturnError bool +} + +func (p *NoOpProcessor) Process(ctx context.Context, store *pipeline.Store, r pipeline.Reader, w pipeline.Writer) error { + defer r.Close() + defer w.Close() + + for { + _, err := r.Read() + if err != nil { + if err == io.EOF { + break + } else { + return errors.Wrap(err, "Error reading from Reader") + } + } + + if p.ReturnError { + return errors.New("Test error") + } + + select { + case <-ctx.Done(): + return nil + default: + continue + } + } + + return nil +} + +func (p *NoOpProcessor) Name() string { + return "NoOpProcessor" +} + +func (p *NoOpProcessor) Reset() {} + +type WaitForShutDownProcessor struct{} + +func (p *WaitForShutDownProcessor) Process(ctx context.Context, store *pipeline.Store, r pipeline.Reader, w pipeline.Writer) error { + <-ctx.Done() + return nil +} + +func (p *WaitForShutDownProcessor) Name() string { + return "WaitForShutDownProcessor" +} + +func (p *WaitForShutDownProcessor) Reset() {} diff --git a/exp/support/pipeline/store.go b/exp/support/pipeline/store.go new file mode 100644 index 0000000000..6a294f3fab --- /dev/null +++ b/exp/support/pipeline/store.go @@ -0,0 +1,15 @@ +package pipeline + +func (s *Store) init() { + s.values = make(map[string]interface{}) +} + +func (s *Store) Put(name string, value interface{}) { + s.initOnce.Do(s.init) + s.values[name] = value +} + +func (s *Store) Get(name string) interface{} { + s.initOnce.Do(s.init) + return s.values[name] +} diff --git a/exp/support/pipeline/store_test.go b/exp/support/pipeline/store_test.go new file mode 100644 index 0000000000..b1ae158bb8 --- /dev/null +++ b/exp/support/pipeline/store_test.go @@ -0,0 +1,22 @@ +package pipeline + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestStore(t *testing.T) { + var s Store + + s.Lock() + s.Put("value", 0) + s.Unlock() + + s.Lock() + v := s.Get("value") + s.Put("value", v.(int)+1) + s.Unlock() + + assert.Equal(t, 1, s.Get("value")) +} diff --git a/exp/tools/captive-core-start-tester/main.go b/exp/tools/captive-core-start-tester/main.go new file mode 100644 index 0000000000..7e44dea282 --- /dev/null +++ b/exp/tools/captive-core-start-tester/main.go @@ -0,0 +1,59 @@ +package main + +import ( + "context" + "fmt" + + "github.com/stellar/go/ingest/ledgerbackend" +) + +// This little app helped testing CaptiveStellarCore.runFromParams on a living +// Stellar-Core. Adding it to the repo because it can be useful in a future if +// Stellar-Core behaviour changes again. +// To make it work, run standalone network (RUN_STANDALONE=false to allow outside +// connections) and update paths below. +func main() { + // check(1) // err expected, cannot stream in captive core + checkLedgers := []uint32{2, 3, 62, 63, 64, 65, 126, 127, 128} + for _, ledger := range checkLedgers { + ok := check(ledger) + if !ok { + panic("test failed error") + } + } +} + +func check(ledger uint32) bool { + c, err := ledgerbackend.NewCaptive( + ledgerbackend.CaptiveCoreConfig{ + BinaryPath: "stellar-core", + NetworkPassphrase: "Standalone Network ; February 2017", + HistoryArchiveURLs: []string{"http://localhost:1570"}, + }, + ) + if err != nil { + panic(err) + } + defer c.Close() + + ctx := context.Background() + err = c.PrepareRange(ctx, ledgerbackend.UnboundedRange(ledger)) + if err != nil { + fmt.Println(err) + return false + } + + meta, err := c.GetLedger(ctx, ledger) + if err != nil { + fmt.Println(err) + return false + } + + if meta.LedgerSequence() != ledger { + fmt.Println("wrong ledger", meta.LedgerSequence()) + return false + } + + fmt.Println(ledger, "ok") + return true +} diff --git a/exp/tools/dump-ledger-state/Dockerfile b/exp/tools/dump-ledger-state/Dockerfile new file mode 100644 index 0000000000..422ca25762 --- /dev/null +++ b/exp/tools/dump-ledger-state/Dockerfile @@ -0,0 +1,45 @@ +FROM ubuntu:20.04 + +ENV STELLAR_CORE_VERSION=18.1.0-760.rc1.dc5f5a392.focal +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils +RUN wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true apt-key add - +RUN echo "deb https://apt.stellar.org focal stable" >/etc/apt/sources.list.d/SDF.list +RUN echo "deb https://apt.stellar.org focal unstable" >/etc/apt/sources.list.d/SDF-unstable.list +RUN apt-get update -y + +RUN apt-get install -y stellar-core=${STELLAR_CORE_VERSION} jq +RUN apt-get clean +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ $(env -i bash -c '. /etc/os-release; echo $VERSION_CODENAME')-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \ + wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \ + apt-get update && \ + DEBIAN_FRONTEND="noninteractive" apt-get install -y postgresql-9.6 postgresql-contrib-9.6 postgresql-client-9.6 + +# Create a PostgreSQL role named `circleci` and then create a database `core` owned by the `circleci` role. +RUN su - postgres -c "/etc/init.d/postgresql start && psql --command \"CREATE USER circleci WITH SUPERUSER;\" && createdb -O circleci core" + +# Adjust PostgreSQL configuration so that remote connections to the +# database are possible. +RUN echo "host all all all trust" > /etc/postgresql/9.6/main/pg_hba.conf + +# And add `listen_addresses` to `/etc/postgresql/9.6/main/postgresql.conf` +RUN echo "listen_addresses='*'" >> /etc/postgresql/9.6/main/postgresql.conf + +RUN curl -sL https://storage.googleapis.com/golang/go1.16.5.linux-amd64.tar.gz | tar -C /usr/local -xz +RUN ln -s /usr/local/go/bin/go /usr/local/bin/go +WORKDIR /go/src/github.com/stellar/go +COPY go.mod go.sum ./ +RUN go mod download +COPY . ./ + +ENV PGPORT=5432 +ENV PGUSER=circleci +ENV PGHOST=localhost + +WORKDIR /go/src/github.com/stellar/go/exp/tools/dump-ledger-state + +ARG GITCOMMIT +ENV GITCOMMIT=${GITCOMMIT} + +ENTRYPOINT ["./docker-entrypoint.sh"] diff --git a/exp/tools/dump-ledger-state/README.md b/exp/tools/dump-ledger-state/README.md new file mode 100644 index 0000000000..17376bd17d --- /dev/null +++ b/exp/tools/dump-ledger-state/README.md @@ -0,0 +1,14 @@ +# dump-ledger-state + +This tool dumps the state from history archive buckets to 4 separate files: +* accounts.csv +* accountdata.csv +* offers.csv +* trustlines.csv +* claimablebalances.csv + +It's primary use is to test `SingleLedgerStateReader`. To run the test (`run_test.sh`) it: +1. Runs `dump-ledger-state`. +2. Syncs stellar-core to the same checkpoint: `stellar-core catchup [ledger]/1`. +3. Dumps stellar-core DB by using `dump_core_db.sh` script. +4. Diffs results by using `diff_test.sh` script. diff --git a/exp/tools/dump-ledger-state/diff_test.sh b/exp/tools/dump-ledger-state/diff_test.sh new file mode 100755 index 0000000000..69295b2a82 --- /dev/null +++ b/exp/tools/dump-ledger-state/diff_test.sh @@ -0,0 +1,36 @@ +ENTRIES=(accounts accountdata offers trustlines claimablebalances pools) + +echo "Sorting dump-ledger-state output files..." +for i in "${ENTRIES[@]}" +do + if test -f "${i}_sorted.csv"; then + echo "Skipping, ${i}_sorted.csv exists (remove if out of date to sort again)" + continue + fi + wc -l ${i}.csv + sort -S 500M -o ${i}_sorted.csv ${i}.csv +done + +echo "Sorting stellar-core output files..." +for i in "${ENTRIES[@]}" +do + if test -f "${i}_core_sorted.csv"; then + echo "Skipping, ${i}_core_sorted.csv exists (remove if out of date to sort again)" + continue + fi + wc -l ${i}_core.csv + sort -S 500M -o ${i}_core_sorted.csv ${i}_core.csv +done + +echo "Checking diffs..." +for type in "${ENTRIES[@]}" +do + diff -q ${type}_core_sorted.csv ${type}_sorted.csv + if [ "$?" -ne "0" ] + then + echo "ERROR: $type does NOT match"; + exit -1 + else + echo "$type OK"; + fi +done diff --git a/exp/tools/dump-ledger-state/docker-entrypoint.sh b/exp/tools/dump-ledger-state/docker-entrypoint.sh new file mode 100755 index 0000000000..f1451c2ad5 --- /dev/null +++ b/exp/tools/dump-ledger-state/docker-entrypoint.sh @@ -0,0 +1,39 @@ +#! /bin/bash +set -e + +/etc/init.d/postgresql start + +while ! psql -U circleci -d core -h localhost -p 5432 -c 'select 1' >/dev/null 2>&1; do + echo "Waiting for postgres to be available..." + sleep 1 +done + +echo "using version $(stellar-core version)" + +if [ -z ${TESTNET+x} ]; then + stellar-core --conf ./stellar-core.cfg new-db +else + stellar-core --conf ./stellar-core-testnet.cfg new-db +fi + +if [ -z ${LATEST_LEDGER+x} ]; then + # Get latest ledger + echo "Getting latest checkpoint ledger..." + if [ -z ${TESTNET+x} ]; then + export LATEST_LEDGER=`curl -s http://history.stellar.org/prd/core-live/core_live_001/.well-known/stellar-history.json | jq -r '.currentLedger'` + else + export LATEST_LEDGER=`curl -s http://history.stellar.org/prd/core-testnet/core_testnet_001/.well-known/stellar-history.json | jq -r '.currentLedger'` + fi +fi + +if [[ -z "${LATEST_LEDGER}" ]]; then + echo "could not obtain latest ledger" + exit 1 +fi + +echo "Latest ledger: $LATEST_LEDGER" + +if ! ./run_test.sh; then + echo "ingestion dump (git commit \`$GITCOMMIT\`) of ledger \`$LATEST_LEDGER\` does not match stellar core db." + exit 1 +fi \ No newline at end of file diff --git a/exp/tools/dump-ledger-state/dump_core_db.sh b/exp/tools/dump-ledger-state/dump_core_db.sh new file mode 100755 index 0000000000..ebd8871a47 --- /dev/null +++ b/exp/tools/dump-ledger-state/dump_core_db.sh @@ -0,0 +1,27 @@ +# Get state from stellar-core DB, colums match CSV printer +# FETCH_COUNT is there for circleci to use cursor-based method of getting rows (less RAM usage): +# https://dba.stackexchange.com/a/101510 + +echo "Fetching accounts from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select accountid, balance, seqnum, numsubentries, inflationdest, homedomain, thresholds, flags, COALESCE(extension, 'AAAAAA=='), signers, ledgerext from accounts" > accounts_core.csv +rm accounts_core_sorted.csv || true # Remove if exist in case original files are rebuilt + +echo "Fetching accountdata from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select accountid, dataname, datavalue, COALESCE(extension, 'AAAAAA=='), ledgerext from accountdata" > accountdata_core.csv +rm accountdata_core_sorted.csv || true # Remove if exist in case original files are rebuilt + +echo "Fetching offers from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select sellerid, offerid, sellingasset, buyingasset, amount, pricen, priced, flags, COALESCE(extension, 'AAAAAA=='), ledgerext from offers" > offers_core.csv +rm offers_core_sorted.csv || true # Remove if exist in case original files are rebuilt + +echo "Fetching trustlines from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select ledgerentry from trustlines" > trustlines_core.csv +rm trustlines_core_sorted.csv || true # Remove if exist in case original files are rebuilt + +echo "Fetching claimable balances from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select balanceid, ledgerentry from claimablebalance" > claimablebalances_core.csv +rm claimablebalances_core_sorted.csv || true # Remove if exist in case original files are rebuilt + +echo "Fetching liquidity pools from stellar-core DB..." +psql -d core -t -A -F"," --variable="FETCH_COUNT=10000" -c "select ledgerentry from liquiditypool" > pools_core.csv +rm pools_core_sorted.csv || true # Remove if exist in case original files are rebuilt \ No newline at end of file diff --git a/exp/tools/dump-ledger-state/main.go b/exp/tools/dump-ledger-state/main.go new file mode 100644 index 0000000000..ce3ef58f5e --- /dev/null +++ b/exp/tools/dump-ledger-state/main.go @@ -0,0 +1,358 @@ +package main + +import ( + "context" + "encoding/base64" + "encoding/csv" + "flag" + "fmt" + "io" + "os" + "runtime" + "strconv" + "time" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +// csvMap maintains a mapping from ledger entry type to csv file +type csvMap struct { + files map[xdr.LedgerEntryType]*os.File + writers map[xdr.LedgerEntryType]*csv.Writer +} + +// newCSVMap constructs an empty csvMap instance +func newCSVMap() csvMap { + return csvMap{ + files: map[xdr.LedgerEntryType]*os.File{}, + writers: map[xdr.LedgerEntryType]*csv.Writer{}, + } +} + +// put creates a new file with the given file name and links that file to the +// given ledger entry type +func (c csvMap) put(entryType xdr.LedgerEntryType, fileName string) error { + if _, ok := c.files[entryType]; ok { + return errors.Errorf("entry type %s is already present in the file set", fileName) + } + + file, err := os.Create(fileName) + if err != nil { + return errors.Wrapf(err, "could not open file %s", fileName) + } + + c.files[entryType] = file + c.writers[entryType] = csv.NewWriter(file) + + return nil +} + +// get returns a csv writer for the given ledger entry type if it exists in the mapping +func (c csvMap) get(entryType xdr.LedgerEntryType) (*csv.Writer, bool) { + writer, ok := c.writers[entryType] + return writer, ok +} + +// close will close all files contained in the mapping +func (c csvMap) close() { + for entryType, file := range c.files { + if err := file.Close(); err != nil { + log.WithField("type", entryType.String()).Warn("could not close csv file") + } + delete(c.files, entryType) + delete(c.writers, entryType) + } +} + +type csvProcessor struct { + files csvMap + changeStats *ingest.StatsChangeProcessor +} + +func (processor csvProcessor) ProcessChange(change ingest.Change) error { + csvWriter, ok := processor.files.get(change.Type) + if !ok { + return nil + } + if err := processor.changeStats.ProcessChange(context.Background(), change); err != nil { + return err + } + + legerExt, err := xdr.MarshalBase64(change.Post.Ext) + if err != nil { + return err + } + + switch change.Type { + case xdr.LedgerEntryTypeAccount: + account := change.Post.Data.MustAccount() + + inflationDest := "" + if account.InflationDest != nil { + inflationDest = account.InflationDest.Address() + } + + var signers string + if len(account.Signers) > 0 { + var err error + signers, err = xdr.MarshalBase64(account.Signers) + if err != nil { + return err + } + } + + accountExt, err := xdr.MarshalBase64(account.Ext) + if err != nil { + return err + } + + csvWriter.Write([]string{ + account.AccountId.Address(), + strconv.FormatInt(int64(account.Balance), 10), + strconv.FormatInt(int64(account.SeqNum), 10), + strconv.FormatInt(int64(account.NumSubEntries), 10), + inflationDest, + base64.StdEncoding.EncodeToString([]byte(account.HomeDomain)), + base64.StdEncoding.EncodeToString(account.Thresholds[:]), + strconv.FormatInt(int64(account.Flags), 10), + accountExt, + signers, + legerExt, + }) + case xdr.LedgerEntryTypeTrustline: + ledgerEntry, err := xdr.MarshalBase64(change.Post) + if err != nil { + return err + } + csvWriter.Write([]string{ + ledgerEntry, + }) + case xdr.LedgerEntryTypeOffer: + offer := change.Post.Data.MustOffer() + + selling, err := xdr.MarshalBase64(offer.Selling) + if err != nil { + return err + } + + buying, err := xdr.MarshalBase64(offer.Buying) + if err != nil { + return err + } + + offerExt, err := xdr.MarshalBase64(offer.Ext) + if err != nil { + return err + } + + csvWriter.Write([]string{ + offer.SellerId.Address(), + strconv.FormatInt(int64(offer.OfferId), 10), + selling, + buying, + strconv.FormatInt(int64(offer.Amount), 10), + strconv.FormatInt(int64(offer.Price.N), 10), + strconv.FormatInt(int64(offer.Price.D), 10), + strconv.FormatInt(int64(offer.Flags), 10), + offerExt, + legerExt, + }) + case xdr.LedgerEntryTypeData: + accountData := change.Post.Data.MustData() + accountDataExt, err := xdr.MarshalBase64(accountData.Ext) + if err != nil { + return err + } + + csvWriter.Write([]string{ + accountData.AccountId.Address(), + base64.StdEncoding.EncodeToString([]byte(accountData.DataName)), + base64.StdEncoding.EncodeToString(accountData.DataValue), + accountDataExt, + legerExt, + }) + case xdr.LedgerEntryTypeClaimableBalance: + claimableBalance := change.Post.Data.MustClaimableBalance() + + ledgerEntry, err := xdr.MarshalBase64(change.Post) + if err != nil { + return err + } + + balanceID, err := xdr.MarshalBase64(claimableBalance.BalanceId) + if err != nil { + return err + } + + csvWriter.Write([]string{ + balanceID, + ledgerEntry, + }) + case xdr.LedgerEntryTypeLiquidityPool: + ledgerEntry, err := xdr.MarshalBase64(change.Post) + if err != nil { + return err + } + csvWriter.Write([]string{ + ledgerEntry, + }) + default: + return errors.Errorf("Invalid LedgerEntryType: %d", change.Type) + } + + if err := csvWriter.Error(); err != nil { + return errors.Wrap(err, "Error during csv.Writer.Write") + } + + csvWriter.Flush() + + if err := csvWriter.Error(); err != nil { + return errors.Wrap(err, "Error during csv.Writer.Flush") + } + return nil +} + +func main() { + testnet := flag.Bool("testnet", false, "connect to the Stellar test network") + flag.Parse() + + archive, err := archive(*testnet) + if err != nil { + panic(err) + } + log.SetLevel(log.InfoLevel) + + files := newCSVMap() + defer files.close() + + for entryType, fileName := range map[xdr.LedgerEntryType]string{ + xdr.LedgerEntryTypeAccount: "./accounts.csv", + xdr.LedgerEntryTypeData: "./accountdata.csv", + xdr.LedgerEntryTypeOffer: "./offers.csv", + xdr.LedgerEntryTypeTrustline: "./trustlines.csv", + xdr.LedgerEntryTypeClaimableBalance: "./claimablebalances.csv", + xdr.LedgerEntryTypeLiquidityPool: "./pools.csv", + } { + if err = files.put(entryType, fileName); err != nil { + log.WithField("err", err). + WithField("file", fileName). + Fatal("cannot create csv file") + } + } + + ledgerSequenceString := os.Getenv("LATEST_LEDGER") + ledgerSequence, err := strconv.Atoi(ledgerSequenceString) + if err != nil { + log.WithField("ledger", ledgerSequenceString). + WithField("err", err). + Fatal("cannot parse latest ledger") + } + log.WithField("ledger", ledgerSequence). + Info("Processing entries from History Archive Snapshot") + + changeReader, err := ingest.NewCheckpointChangeReader( + context.Background(), + archive, + uint32(ledgerSequence), + ) + if err != nil { + log.WithField("err", err).Fatal("cannot construct change reader") + } + defer changeReader.Close() + + changeStats := &ingest.StatsChangeProcessor{} + doneStats := printPipelineStats(changeStats) + changeProcessor := csvProcessor{files: files, changeStats: changeStats} + logFatalError := func(err error) { + log.WithField("err", err).Fatal("could not process all changes from HAS") + } + for { + change, err := changeReader.Read() + if err == io.EOF { + break + } + if err != nil { + logFatalError(errors.Wrap(err, "could not read transaction")) + } + + if err = changeProcessor.ProcessChange(change); err != nil { + logFatalError(errors.Wrap(err, "could not process change")) + } + } + + // Remove sorted files + sortedFiles := []string{ + "./accounts_sorted.csv", + "./accountdata_sorted.csv", + "./offers_sorted.csv", + "./trustlines_sorted.csv", + "./claimablebalances_sort.csv", + } + for _, file := range sortedFiles { + err := os.Remove(file) + // Ignore not exist errors + if err != nil && !os.IsNotExist(err) { + panic(err) + } + } + + doneStats <- true +} + +func archive(testnet bool) (*historyarchive.Archive, error) { + if testnet { + return historyarchive.Connect( + "https://history.stellar.org/prd/core-testnet/core_testnet_001", + historyarchive.ConnectOptions{}, + ) + } + + return historyarchive.Connect( + fmt.Sprintf("https://history.stellar.org/prd/core-live/core_live_001/"), + historyarchive.ConnectOptions{}, + ) +} + +func printPipelineStats(reporter *ingest.StatsChangeProcessor) chan<- bool { + startTime := time.Now() + done := make(chan bool) + ticker := time.NewTicker(10 * time.Second) + + go func() { + defer ticker.Stop() + + for { + var m runtime.MemStats + runtime.ReadMemStats(&m) + results := reporter.GetResults() + stats := log.F(results.Map()) + stats["Alloc"] = bToMb(m.Alloc) + stats["HeapAlloc"] = bToMb(m.HeapAlloc) + stats["Sys"] = bToMb(m.Sys) + stats["NumGC"] = m.NumGC + stats["Goroutines"] = runtime.NumGoroutine() + stats["NumCPU"] = runtime.NumCPU() + stats["Duration"] = time.Since(startTime) + + log.WithFields(stats).Info("Current Job Status") + + select { + case <-ticker.C: + continue + case <-done: + // Pipeline done + return + } + } + }() + + return done +} + +func bToMb(b uint64) uint64 { + return b / 1024 / 1024 +} diff --git a/exp/tools/dump-ledger-state/run_test.sh b/exp/tools/dump-ledger-state/run_test.sh new file mode 100755 index 0000000000..ef2b56356c --- /dev/null +++ b/exp/tools/dump-ledger-state/run_test.sh @@ -0,0 +1,39 @@ +#! /bin/bash +set -e + +if [ -z ${LATEST_LEDGER+x} ]; then + # Get latest ledger + echo "Getting latest checkpoint ledger..." + if [ -z ${TESTNET+x} ]; then + export LATEST_LEDGER=`curl -s http://history.stellar.org/prd/core-live/core_live_001/.well-known/stellar-history.json | jq -r '.currentLedger'` + else + export LATEST_LEDGER=`curl -s http://history.stellar.org/prd/core-testnet/core_testnet_001/.well-known/stellar-history.json | jq -r '.currentLedger'` + fi + echo "Latest ledger: $LATEST_LEDGER" +fi + +# Dump state using Golang +if [ -z ${TESTNET+x} ]; then + echo "Dumping pubnet state using ingest..." + go run ./main.go +else + echo "Dumping testnet state using ingest..." + go run ./main.go --testnet +fi +echo "State dumped..." + +# Catchup core +if [ -z ${TESTNET+x} ]; then + echo "Catch up from pubnet" + stellar-core --conf ./stellar-core.cfg catchup $LATEST_LEDGER/1 +else + echo "Catch up from testnet" + stellar-core --conf ./stellar-core-testnet.cfg catchup $LATEST_LEDGER/1 +fi + +echo "Dumping state from stellar-core..." +./dump_core_db.sh +echo "State dumped..." + +echo "Comparing state dumps..." +./diff_test.sh diff --git a/exp/tools/dump-ledger-state/stellar-core-testnet.cfg b/exp/tools/dump-ledger-state/stellar-core-testnet.cfg new file mode 100644 index 0000000000..5162295622 --- /dev/null +++ b/exp/tools/dump-ledger-state/stellar-core-testnet.cfg @@ -0,0 +1,37 @@ +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true +LOG_FILE_PATH="" + +DATABASE="postgresql://dbname=core host=localhost user=circleci" +NETWORK_PASSPHRASE="Test SDF Network ; September 2015" +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 +CATCHUP_RECENT=8640 + +[HISTORY.cache] +get="cp /opt/stellar/history-cache/{0} {1}" + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/exp/tools/dump-ledger-state/stellar-core.cfg b/exp/tools/dump-ledger-state/stellar-core.cfg new file mode 100644 index 0000000000..3d01c4ea19 --- /dev/null +++ b/exp/tools/dump-ledger-state/stellar-core.cfg @@ -0,0 +1,149 @@ +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true +LOG_FILE_PATH="" + +DATABASE="postgresql://dbname=core host=localhost user=circleci" +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +CATCHUP_RECENT=1 + +[HISTORY.cache] +get="cp /opt/stellar/history-cache/{0} {1}" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="satoshipay.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="lobstr.co" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="www.coinqvest.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="keybase.io" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_1" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" +ADDRESS="core-live-a.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_2" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" +ADDRESS="core-live-b.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_3" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" +ADDRESS="core-live-c.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_singapore" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" +ADDRESS="stellar-sg-sin.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_iowa" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" +ADDRESS="stellar-us-iowa.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_frankfurt" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" +ADDRESS="stellar-de-fra.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_1_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" +ADDRESS="v1.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_2_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GDXQB3OMMQ6MGG43PWFBZWBFKBBDUZIVSUDAZZTRAWQZKES2CDSE5HKJ" +ADDRESS="v2.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_3_north_america" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" +ADDRESS="v3.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-3-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_4_asia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" +ADDRESS="v4.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-4-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_5_australia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" +ADDRESS="v5.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-5-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_hong_kong" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" +ADDRESS="hongkong.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://hongkong.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_germany" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" +ADDRESS="germany.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_finland" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" +ADDRESS="finland.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_io" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GCWJKM4EGTGJUVSWUJDPCQEOEP5LHSOFKSA4HALBTOO4T4H3HCHOM6UX" +ADDRESS="stellar0.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_1" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GDKWELGJURRKXECG3HHFHXMRX64YWQPUHKCVRESOX3E5PM6DM4YXLZJM" +ADDRESS="stellar1.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory1.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_2" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GA35T3723UP2XJLC2H7MNL6VMKZZIFL2VW7XHMFFJKKIA2FJCYTLKFBW" +ADDRESS="stellar2.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory2.keybase.io/{0} -o {1}" \ No newline at end of file diff --git a/exp/tools/dump-orderbook/main.go b/exp/tools/dump-orderbook/main.go new file mode 100644 index 0000000000..eee12037b5 --- /dev/null +++ b/exp/tools/dump-orderbook/main.go @@ -0,0 +1,120 @@ +package main + +import ( + "bytes" + "context" + "flag" + "fmt" + "io" + "os" + "strings" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +// This program will dump all the offers from a history archive checkpoint. +// The offers dump can then be fed in to the path finding benchmarks in exp/orderbook/graph_benchmark_test.go +func main() { + testnet := flag.Bool("testnet", false, "connect to the Stellar test network") + checkpointLedger := flag.Int( + "checkpoint-ledger", + 0, + "checkpoint ledger sequence to ingest, if omitted will use latest checkpoint ledger.", + ) + offersOutput := flag.String("offers-output", "offers.dump", "output file which will be populated with offerss") + poolsOutput := flag.String("pools-output", "pools.dump", "output file which will be populated with pools") + + flag.Parse() + + archive, err := archive(*testnet) + if err != nil { + panic(err) + } + log.SetLevel(log.InfoLevel) + + sequence := uint32(*checkpointLedger) + if sequence == 0 { + var root historyarchive.HistoryArchiveState + root, err = archive.GetRootHAS() + if err != nil { + log.WithField("err", err).Fatal("could not fetch root has") + } + sequence = root.CurrentLedger + } + log.WithField("ledger", sequence). + Info("Processing entries from History Archive Snapshot") + + var changeReader ingest.ChangeReader + changeReader, err = ingest.NewCheckpointChangeReader( + context.Background(), + archive, + sequence, + ) + if err != nil { + log.WithField("err", err).Fatal("cannot construct change reader") + } + defer changeReader.Close() + var offersFile, poolsFile *os.File + + if offersFile, err = os.Create(*offersOutput); err != nil { + log.WithField("err", err).Fatal("could not create offers file") + } + if poolsFile, err = os.Create(*poolsOutput); err != nil { + log.WithField("err", err).Fatal("could not create pools file") + } + + var offerXDRs []string + var poolXDRs []string + + for { + var change ingest.Change + change, err = changeReader.Read() + if err == io.EOF { + break + } + if err != nil { + log.WithField("err", err).Fatal("could not read change") + } + + switch change.Type { + case xdr.LedgerEntryTypeOffer: + var serialized string + serialized, err = xdr.MarshalBase64(change.Post.Data.MustOffer()) + if err != nil { + log.WithField("err", err).Fatal("could not marshall offer") + } + offerXDRs = append(offerXDRs, serialized) + case xdr.LedgerEntryTypeLiquidityPool: + var serialized string + serialized, err = xdr.MarshalBase64(change.Post.Data.MustLiquidityPool()) + if err != nil { + log.WithField("err", err).Fatal("could not marshall liquidity pool") + } + poolXDRs = append(poolXDRs, serialized) + } + } + + if _, err = io.Copy(offersFile, bytes.NewBufferString(strings.Join(offerXDRs, "\n"))); err != nil { + log.WithField("err", err).Fatal("could not write offer dump file") + } + if _, err = io.Copy(poolsFile, bytes.NewBufferString(strings.Join(poolXDRs, "\n"))); err != nil { + log.WithField("err", err).Fatal("could not write pool dump file") + } +} + +func archive(testnet bool) (*historyarchive.Archive, error) { + if testnet { + return historyarchive.Connect( + "https://history.stellar.org/prd/core-testnet/core_testnet_001", + historyarchive.ConnectOptions{}, + ) + } + + return historyarchive.Connect( + fmt.Sprintf("https://history.stellar.org/prd/core-live/core_live_001/"), + historyarchive.ConnectOptions{}, + ) +} diff --git a/glide.lock b/glide.lock deleted file mode 100644 index 49f15a2116..0000000000 --- a/glide.lock +++ /dev/null @@ -1,209 +0,0 @@ -hash: 9c1652a73f35bbad40c76ccfd9a7460bdf9cbecf9a54bc849bbfde8dd5d52011 -updated: 2016-08-17T08:31:13.332086432-07:00 -imports: -- name: github.com/agl/ed25519 - version: 278e1ec8e8a6e017cd07577924d6766039146ced - subpackages: - - edwards25519 -- name: github.com/ajg/form - version: cc2954064ec9ea8d93917f0f87456e11d7b881ad -- name: github.com/asaskevich/govalidator - version: 593d64559f7600f29581a3ee42177f5dbded27a9 -- name: github.com/aws/aws-sdk-go - version: 35c21ff262580265c1d77095d6f712605fd0c3f4 - subpackages: - - aws - - aws/session - - service/s3 - - aws/awserr - - aws/credentials - - aws/client - - aws/corehandlers - - aws/credentials/stscreds - - aws/defaults - - aws/request - - private/endpoints - - aws/awsutil - - aws/client/metadata - - aws/signer/v4 - - private/protocol - - private/protocol/restxml - - private/waiter - - service/sts - - aws/credentials/ec2rolecreds - - aws/credentials/endpointcreds - - aws/ec2metadata - - private/protocol/rest - - private/protocol/query - - private/protocol/xml/xmlutil - - private/protocol/query/queryutil -- name: github.com/BurntSushi/toml - version: 99064174e013895bbd9b025c31100bd1d9b590ca -- name: github.com/davecgh/go-spew - version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d - subpackages: - - spew -- name: github.com/fatih/structs - version: dc3312cb1a4513a366c4c9e622ad55c32df12ed3 -- name: github.com/gavv/gojsondiff - version: 36046c6e558e7f854ebd3fd97d1e9812ebe8709b - subpackages: - - formatter -- name: github.com/gavv/monotime - version: 259cd7b345f5aa080eff16f21d7866ef7dea9528 -- name: github.com/getsentry/raven-go - version: c9d3cc542ad199f62c0264286be537f9bce6063c -- name: github.com/go-ini/ini - version: a2610b3a793cfa7fdf0b07038068af5ddc12aba1 -- name: github.com/go-sql-driver/mysql - version: 0b58b37b664c21f3010e836f1b931e1d0b0b0685 -- name: github.com/google/go-querystring - version: 9235644dd9e52eeae6fa48efd539fdc351a0af53 - subpackages: - - query -- name: github.com/howeyc/gopass - version: b63a7d07e65df376d14e2d72907a93d4847dffe4 -- name: github.com/imkira/go-interpol - version: b9781c93ae51c8b4fc3af1b1426909721af0a624 -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/jmespath/go-jmespath - version: bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -- name: github.com/jmoiron/sqlx - version: 7396209bbeada6a4fcc28aa9408f89b2e71cac39 - subpackages: - - reflectx -- name: github.com/klauspost/compress - version: 14eb9c4951195779ecfbec34431a976de7335b0a - subpackages: - - flate - - gzip - - zlib -- name: github.com/klauspost/cpuid - version: 09cded8978dc9e80714c4d85b0322337b0a1e5e0 -- name: github.com/klauspost/crc32 - version: 19b0b332c9e4516a6370a0456e6182c3b5036720 -- name: github.com/lann/builder - version: f22ce00fd9394014049dad11c244859432bd6820 -- name: github.com/lann/ps - version: 62de8c46ede02a7675c4c79c84883eb164cb71e3 -- name: github.com/lann/squirrel - version: caff2d522d550f0998629c85898ad35e8a603328 -- name: github.com/lib/pq - version: 80f8150043c80fb52dee6bc863a709cdac7ec8f8 - subpackages: - - oid -- name: github.com/mattn/go-sqlite3 - version: c3e9588849195eefa783417c3a53d092f6e93526 -- name: github.com/moul/http2curl - version: b1479103caacaa39319f75e7f57fc545287fca0d -- name: github.com/nullstyle/go-xdr - version: 8bf8a5d05c8612d4039a7f67ddce3d49ee61b398 - subpackages: - - xdr3 -- name: github.com/onsi/ginkgo - version: 120efcfd33906beedc53369a8659c41cc9190e81 - subpackages: - - extensions/table - - config - - internal/codelocation - - internal/failer - - internal/remote - - internal/suite - - internal/testingtproxy - - internal/writer - - reporters - - reporters/stenographer - - types - - internal/containernode - - internal/leafnodes - - internal/spec - - internal/specrunner -- name: github.com/onsi/gomega - version: 9ed8da19f2156b87a803a8fdf6d126f627a12db1 - subpackages: - - types - - internal/assertion - - internal/asyncassertion - - internal/testingtsupport - - matchers - - internal/oraclematcher - - format - - matchers/support/goraph/bipartitegraph - - matchers/support/goraph/edge - - matchers/support/goraph/node - - matchers/support/goraph/util -- name: github.com/pkg/errors - version: 01fa4104b9c248c8945d14d9f128454d5b28d595 -- name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d - subpackages: - - difflib -- name: github.com/rs/cors - version: a62a804a8a009876ca59105f7899938a1349f4b3 -- name: github.com/rs/xhandler - version: ed27b6fd65218132ee50cd95f38474a3d8a2cd12 -- name: github.com/segmentio/go-loggly - version: e78f6971ebca5835614673e2f5f6a47ca5f13501 -- name: github.com/sergi/go-diff - version: 97b2266dfe4bd4ea1b81a463322f04f8b724801e - subpackages: - - diffmatchpatch -- name: github.com/Sirupsen/logrus - version: a283a10442df8dc09befd873fab202bf8a253d6a -- name: github.com/spf13/cobra - version: 7c674d9e72017ed25f6d2b5e497a1368086b6a6f -- name: github.com/spf13/pflag - version: f676131e2660dc8cd88de99f7486d34aa8172635 -- name: github.com/stretchr/testify - version: d77da356e56a7428ad25149ca77381849a6a5232 - subpackages: - - assert - - require -- name: github.com/valyala/bytebufferpool - version: 8ebd0474e5a2f0a5c7a74ad2bf421a1d1a90264f -- name: github.com/valyala/fasthttp - version: 45697fe30a130ec6a54426a069c82f3abe76b63d - subpackages: - - fasthttputil -- name: github.com/visionmedia/go-debug - version: ff4a55a20a86994118644bbddc6a216da193cc13 -- name: github.com/xeipuuv/gojsonpointer - version: e0fe6f68307607d540ed8eac07a342c33fa1b54a -- name: github.com/xeipuuv/gojsonreference - version: e02fc20de94c78484cd5ffb007f8af96be030a45 -- name: github.com/xeipuuv/gojsonschema - version: 4f624f6197547606054e042e7903db103585e151 -- name: github.com/yalp/jsonpath - version: 31a79c7593bb93eb10b163650d4a3e6ca190e4dc -- name: github.com/yudai/golcs - version: d1c525dea8ce39ea9a783d33cf08932305373f2c -- name: goji.io - version: e355964ac565b94cf0fc7f218346626529125086 - subpackages: - - pat - - internal - - pattern -- name: golang.org/x/crypto - version: 595bbbd7f5f308415a544d3c55743c91427c8d99 - subpackages: - - ssh/terminal -- name: golang.org/x/net - version: 075e191f18186a8ff2becaf64478e30f4545cdad - subpackages: - - context - - http2 - - http2/hpack - - lex/httplex - - publicsuffix -- name: golang.org/x/sys - version: a646d33e2ee3172a661fc09bca23bb4889a41bc8 - subpackages: - - unix -- name: gopkg.in/gavv/httpexpect.v1 - version: 2493e01c9e420f0119fefd521c46b1168f33bbf3 -- name: gopkg.in/tylerb/graceful.v1 - version: c838c13b2beeea4f4f54496da96a3a6ae567c37a -testImports: -- name: gopkg.in/yaml.v2 - version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e diff --git a/glide.yaml b/glide.yaml deleted file mode 100644 index d093079b07..0000000000 --- a/glide.yaml +++ /dev/null @@ -1,29 +0,0 @@ -package: github.com/stellar/go -import: -- package: github.com/howeyc/gopass -- package: github.com/agl/ed25519 -- package: github.com/nullstyle/go-xdr -- package: github.com/pkg/errors - version: ^0.7.0 -- package: github.com/getsentry/raven-go -- package: github.com/segmentio/go-loggly -- package: golang.org/x/net/context -- package: golang.org/x/net/http2 -- package: github.com/stretchr/testify -- package: github.com/jmoiron/sqlx -- package: github.com/lann/squirrel -- package: github.com/lib/pq -- package: github.com/go-sql-driver/mysql -- package: github.com/mattn/go-sqlite3 -- package: gopkg.in/tylerb/graceful.v1 -- package: github.com/rs/cors -- package: goji.io -- package: github.com/asaskevich/govalidator -- package: github.com/BurntSushi/toml -- package: github.com/spf13/cobra -- package: gopkg.in/gavv/httpexpect.v1 -- package: github.com/onsi/ginkgo - version: master -- package: github.com/onsi/gomega - version: master -- package: github.com/aws/aws-sdk-go/aws \ No newline at end of file diff --git a/go.list b/go.list new file mode 100644 index 0000000000..a548f11d55 --- /dev/null +++ b/go.list @@ -0,0 +1,106 @@ +cloud.google.com/go v0.84.0 +cloud.google.com/go/firestore v1.5.0 +cloud.google.com/go/storage v1.10.0 +firebase.google.com/go v3.12.0+incompatible +github.com/BurntSushi/toml v0.3.1 +github.com/Masterminds/squirrel v1.5.0 +github.com/adjust/goautoneg v0.0.0-20150426214442-d788f35a0315 +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d +github.com/aws/aws-sdk-go v1.39.5 +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 +github.com/davecgh/go-spew v1.1.1 +github.com/elazarl/go-bindata-assetfs v1.0.0 +github.com/fatih/structs v1.0.0 +github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 +github.com/getsentry/raven-go v0.0.0-20160805001729-c9d3cc542ad1 +github.com/go-chi/chi v4.0.3+incompatible +github.com/go-errors/errors v0.0.0-20150906023321-a41850380601 +github.com/golang-jwt/jwt v3.2.1+incompatible +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e +github.com/golang/protobuf v1.5.2 +github.com/google/go-cmp v0.5.6 +github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 +github.com/google/uuid v1.2.0 +github.com/googleapis/gax-go/v2 v2.0.5 +github.com/gorilla/schema v1.1.0 +github.com/graph-gophers/graphql-go v0.0.0-20190225005345-3e8838d4614c +github.com/guregu/null v2.1.3-0.20151024101046-79c5bd36b615+incompatible +github.com/hashicorp/golang-lru v0.5.1 +github.com/holiman/uint256 v1.2.0 +github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c +github.com/hpcloud/tail v1.0.0 +github.com/imkira/go-interpol v1.1.0 +github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31 +github.com/jmespath/go-jmespath v0.4.0 +github.com/jmoiron/sqlx v1.2.0 +github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd +github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 +github.com/kr/pretty v0.1.0 +github.com/kr/text v0.1.0 +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 +github.com/lib/pq v1.2.0 +github.com/magiconair/properties v1.5.4 +github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739 +github.com/matttproud/golang_protobuf_extensions v1.0.1 +github.com/mitchellh/go-homedir v1.1.0 +github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366 +github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db +github.com/onsi/ginkgo v1.7.0 +github.com/onsi/gomega v1.4.3 +github.com/opentracing/opentracing-go v1.1.0 +github.com/pelletier/go-toml v1.9.0 +github.com/pkg/errors v0.9.1 +github.com/pmezard/go-difflib v1.0.0 +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 +github.com/prometheus/common v0.2.0 +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 +github.com/rubenv/sql-migrate v0.0.0-20190717103323-87ce952f7079 +github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2 +github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 +github.com/sirupsen/logrus v1.4.1 +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 +github.com/spf13/cobra v0.0.0-20160830174925-9c28e4bbd74e +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 +github.com/spf13/pflag v0.0.0-20161005214240-4bd69631f475 +github.com/spf13/viper v0.0.0-20150621231900-db7ff930a189 +github.com/stellar/go +github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee +github.com/stellar/throttled v2.2.3-0.20190823235211-89d75816f59d+incompatible +github.com/stretchr/objx v0.3.0 +github.com/stretchr/testify v1.7.0 +github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8 +github.com/valyala/bytebufferpool v1.0.0 +github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6 +github.com/xdrpp/goxdr v0.1.1 +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c +github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce +github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb +github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d +github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce +go.opencensus.io v0.23.0 +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 +golang.org/x/text v0.3.6 +google.golang.org/api v0.50.0 +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 +google.golang.org/grpc v1.38.0 +google.golang.org/protobuf v1.26.0 +gopkg.in/fsnotify.v1 v1.4.7 +gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 +gopkg.in/gorp.v1 v1.7.1 +gopkg.in/square/go-jose.v2 v2.4.1 +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 +gopkg.in/tylerb/graceful.v1 v1.2.13 +gopkg.in/yaml.v2 v2.2.8 +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c diff --git a/go.mod b/go.mod new file mode 100644 index 0000000000..d64fe9cf9f --- /dev/null +++ b/go.mod @@ -0,0 +1,83 @@ +module github.com/stellar/go + +go 1.16 + +require ( + cloud.google.com/go/firestore v1.5.0 // indirect + firebase.google.com/go v3.12.0+incompatible + github.com/BurntSushi/toml v0.3.1 + github.com/Masterminds/squirrel v1.5.0 + github.com/Microsoft/go-winio v0.4.14 + github.com/adjust/goautoneg v0.0.0-20150426214442-d788f35a0315 + github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f // indirect + github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d + github.com/aws/aws-sdk-go v1.39.5 + github.com/elazarl/go-bindata-assetfs v1.0.0 + github.com/fatih/structs v1.0.0 // indirect + github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 // indirect + github.com/getsentry/raven-go v0.0.0-20160805001729-c9d3cc542ad1 + github.com/go-chi/chi v4.0.3+incompatible + github.com/go-errors/errors v0.0.0-20150906023321-a41850380601 + github.com/go-jet/jet/v2 v2.5.1-0.20211207130744-5ec19e1f5d86 + github.com/gobuffalo/packr v1.12.1 // indirect + github.com/golang-jwt/jwt v3.2.1+incompatible + github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 // indirect + github.com/google/uuid v1.2.0 + github.com/gorilla/schema v1.1.0 + github.com/graph-gophers/graphql-go v0.0.0-20190225005345-3e8838d4614c + github.com/guregu/null v2.1.3-0.20151024101046-79c5bd36b615+incompatible + github.com/holiman/uint256 v1.2.0 + github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c + github.com/imkira/go-interpol v1.1.0 // indirect + github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31 + github.com/jmoiron/sqlx v1.2.0 + github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd // indirect + github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc // indirect + github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 // indirect + github.com/lib/pq v1.7.0 + github.com/magiconair/properties v1.5.4 // indirect + github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739 + github.com/mitchellh/go-homedir v1.1.0 + github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db // indirect + github.com/onsi/ginkgo v1.7.0 + github.com/onsi/gomega v1.4.3 + github.com/pelletier/go-toml v1.9.0 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_model v0.2.0 + github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 + github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect + github.com/rubenv/sql-migrate v0.0.0-20190717103323-87ce952f7079 + github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2 + github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca // indirect + github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 + github.com/sirupsen/logrus v1.6.0 + github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 // indirect + github.com/spf13/cobra v0.0.3 + github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 // indirect + github.com/spf13/pflag v1.0.1 + github.com/spf13/viper v0.0.0-20150621231900-db7ff930a189 + github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee + github.com/stellar/throttled v2.2.3-0.20190823235211-89d75816f59d+incompatible + github.com/stretchr/objx v0.3.0 // indirect + github.com/stretchr/testify v1.7.0 + github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8 + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6 // indirect + github.com/xdrpp/goxdr v0.1.1 + github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c // indirect + github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce // indirect + github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb // indirect + github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d // indirect + github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce // indirect + github.com/yudai/pp v2.0.1+incompatible // indirect + github.com/ziutek/mymysql v1.5.4 // indirect + golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect + google.golang.org/api v0.50.0 + gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 + gopkg.in/gorp.v1 v1.7.1 // indirect + gopkg.in/square/go-jose.v2 v2.4.1 + gopkg.in/tylerb/graceful.v1 v1.2.13 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000000..e2c9f58d82 --- /dev/null +++ b/go.sum @@ -0,0 +1,1074 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0 h1:hVhK90DwCdOAYGME/FJd9vNIZye9HBR6Yy3fu4js3N8= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.5.0 h1:4qNItsmc4GP6UOZPGemmHY4ZfPofVhcaKXsYw9wm9oA= +cloud.google.com/go/firestore v1.5.0/go.mod h1:c4nNYR1qdq7eaZ+jSc5fonrQN2k3M7sWATcYTiakjEo= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +firebase.google.com/go v3.12.0+incompatible h1:q70KCp/J0oOL8kJ8oV2j3646kV4TB8Y5IvxXC0WT1bo= +firebase.google.com/go v3.12.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= +github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/adjust/goautoneg v0.0.0-20150426214442-d788f35a0315 h1:zje9aPr1kQ5nKwjO5MC0S/jehRtNrjfYuLfFRWZH6kY= +github.com/adjust/goautoneg v0.0.0-20150426214442-d788f35a0315/go.mod h1:4U522XvlkqOY2AVBUM7ISHODDb6tdB+KAXfGaBDsWts= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f h1:zvClvFQwU++UpIUBGC8YmDlfhUrweEy1R1Fj1gu5iIM= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= +github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.39.5 h1:yoJEE1NJxbpZ3CtPxvOSFJ9ByxiXmBTKk8J+XU5ldtg= +github.com/aws/aws-sdk-go v1.39.5/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk= +github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4= +github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= +github.com/getsentry/raven-go v0.0.0-20160805001729-c9d3cc542ad1 h1:qIqziX4EA/OBdmMgtaqdKBWWOZIfyXYClCoa56NgVEk= +github.com/getsentry/raven-go v0.0.0-20160805001729-c9d3cc542ad1/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.0.3+incompatible h1:gakN3pDJnzZN5jqFV2TEdF66rTfKeITyR8qu6ekICEY= +github.com/go-chi/chi v4.0.3+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-errors/errors v0.0.0-20150906023321-a41850380601 h1:jxTbmDuqQUTI6MscgbqB39vtxGfr2fi61nYIcFQUnlE= +github.com/go-errors/errors v0.0.0-20150906023321-a41850380601/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-jet/jet/v2 v2.5.1-0.20211207130744-5ec19e1f5d86 h1:B5Yh1Ur7SRcUQ1oz8QfH+nOVLfMmErUiZzRG7uwfVOc= +github.com/go-jet/jet/v2 v2.5.1-0.20211207130744-5ec19e1f5d86/go.mod h1:krPWOkiD0qbvTY73rWgSacs9u0s8KU+uRC2PeXFMWTw= +github.com/go-jet/jet/v2 v2.6.0 h1:yS896B4+SO97GpLFZE03qBF75acKNuHB6YL6dGIqYLA= +github.com/go-jet/jet/v2 v2.6.0/go.mod h1:krPWOkiD0qbvTY73rWgSacs9u0s8KU+uRC2PeXFMWTw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/packr v1.12.1 h1:+5u3rqgdhswdYXhrX6DHaO7BM4P8oxrbvgZm9H1cRI4= +github.com/gobuffalo/packr v1.12.1/go.mod h1:H2dZhQFqHeZwr/5A/uGQkBp7xYuMGuzXFeKhYdcz5No= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 h1:oERTZ1buOUYlpmKaqlO5fYmz8cZ1rYu5DieJzF4ZVmU= +github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY= +github.com/gorilla/schema v1.1.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/graph-gophers/graphql-go v0.0.0-20190225005345-3e8838d4614c h1:YyFUsspLqAt3noyPCLz7EFK/o1LpC1j/6MjU0bSVOQ4= +github.com/graph-gophers/graphql-go v0.0.0-20190225005345-3e8838d4614c/go.mod h1:uJhtPXrcJLqyi0H5IuMFh+fgW+8cMMakK3Txrbk/WJE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/guregu/null v2.1.3-0.20151024101046-79c5bd36b615+incompatible h1:SZmF1M6CdAm4MmTPYYTG+x9EC8D3FOxUq9S4D37irQg= +github.com/guregu/null v2.1.3-0.20151024101046-79c5bd36b615+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0= +github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.8.1/go.mod h1:JV6m6b6jhjdmzchES0drzCcYcAHS1OPD5xu3OZ/lE2g= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.7.0/go.mod h1:ZnHF+rMePVqDKaOfJVI4Q8IVvAQMryDlDkZnKOI75BE= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.11.0/go.mod h1:i62xJgdrtVDsnL3U8ekyrQXEwGNTRoG7/8r+CIdYfcc= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31 h1:Aw95BEvxJ3K6o9GGv5ppCd1P8hkeIeEJ30FO+OhOJpM= +github.com/jarcoal/httpmock v0.0.0-20161210151336-4442edb3db31/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd h1:vQ0EEfHpdFUtNRj1ri25MUq5jb3Vma+kKhLyjeUTVow= +github.com/klauspost/compress v0.0.0-20161106143436-e3b7981a12dd/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc h1:WW8B7p7QBnFlqRVv/k6ro/S8Z7tCnYjJHcQNScx9YVs= +github.com/klauspost/cpuid v0.0.0-20160302075316-09cded8978dc/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6 h1:KAZ1BW2TCmT6PRihDPpocIy1QTtsAsrx6TneU/4+CMg= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.5.4 h1:5Y3GEEL4cWijFkb6jtcVs3lX2EWA1ZKq64qu9cd8W7s= +github.com/magiconair/properties v1.5.4/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739 h1:ykXz+pRRTibcSjG1yRhpdSHInF8yZY/mfn+Rz2Nd1rE= +github.com/manucorporat/sse v0.0.0-20160126180136-ee05b128a739/go.mod h1:zUx1mhth20V3VKgL5jbd1BSQcW4Fy6Qs4PZvQwRFwzM= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.8 h1:gDp86IdQsN/xWjIEmr9MF6o9mpksUgh0fu+9ByFxzIU= +github.com/mattn/go-sqlite3 v1.14.8/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db h1:eZgFHVkk9uOTaOQLC6tgjkzdp7Ays8eEVecBcfHZlJQ= +github.com/moul/http2curl v0.0.0-20161031194548-4e24498b31db/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0= +github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE= +github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= +github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rubenv/sql-migrate v0.0.0-20190717103323-87ce952f7079 h1:xPeaaIHjF9j8jbYQ5xdvLnFp+lpmGYFG1uBPtXNBHno= +github.com/rubenv/sql-migrate v0.0.0-20190717103323-87ce952f7079/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2 h1:S4OC0+OBKz6mJnzuHioeEat74PuQ4Sgvbf8eus695sc= +github.com/segmentio/go-loggly v0.5.1-0.20171222203950-eb91657e62b2/go.mod h1:8zLRYR5npGjaOXgPSKat5+oOh+UHd8OdbS18iqX9F6Y= +github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca h1:oR/RycYTFTVXzND5r4FdsvbnBn0HJXSVeNAnwaTXRwk= +github.com/sergi/go-diff v0.0.0-20161205080420-83532ca1c1ca/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= +github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk= +github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE= +github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v0.0.0-20150621231900-db7ff930a189 h1:fvB1AFbBd6SfI9Rd0ooAJp8uLkZDbZaLFHi7ZnNP6uI= +github.com/spf13/viper v0.0.0-20150621231900-db7ff930a189/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee h1:fbVs0xmXpBvVS4GBeiRmAE3Le70ofAqFMch1GTiq/e8= +github.com/stellar/go-xdr v0.0.0-20211103144802-8017fc4bdfee/go.mod h1:yoxyU/M8nl9LKeWIoBrbDPQ7Cy+4jxRcWcOayZ4BMps= +github.com/stellar/throttled v2.2.3-0.20190823235211-89d75816f59d+incompatible h1:jMXXAcz6xTarGDQ4VtVbtERogcmDQw4RaE85Cr9CgoQ= +github.com/stellar/throttled v2.2.3-0.20190823235211-89d75816f59d+incompatible/go.mod h1:7CJ23pXirXBJq45DqvO6clzTEGM/l1SfKrgrzLry8b4= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= +github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8 h1:g3yQGZK+G6dfF/mw/SOwsTMzUVkpT4hB8pHxpbTXkKw= +github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6 h1:s0IDmR1jFyWvOK7jVIuAsmHQaGkXUuTas8NXFUOwuAI= +github.com/valyala/fasthttp v0.0.0-20170109085056-0a7f0a797cd6/go.mod h1:+g/po7GqyG5E+1CNgquiIxJnsXEi5vwFn5weFujbO78= +github.com/xdrpp/goxdr v0.1.1 h1:E1B2c6E8eYhOVyd7yEpOyopzTPirUeF6mVOfXfGyJyc= +github.com/xdrpp/goxdr v0.1.1/go.mod h1:dXo1scL/l6s7iME1gxHWo2XCppbHEKZS7m/KyYWkNzA= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076 h1:KM4T3G70MiR+JtqplcYkNVoNz7pDwYaBxWBXQK804So= +github.com/xeipuuv/gojsonpointer v0.0.0-20151027082146-e0fe6f683076/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c h1:XZWnr3bsDQWAZg4Ne+cPoXRPILrNlPNQfxBuwLl43is= +github.com/xeipuuv/gojsonreference v0.0.0-20150808065054-e02fc20de94c/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce h1:cVSRGH8cOveJNwFEEZLXtB+XMnRqKLjUP6V/ZFYQCXI= +github.com/xeipuuv/gojsonschema v0.0.0-20161231055540-f06f290571ce/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb h1:06WAhQa+mYv7BiOk13B/ywyTlkoE/S7uu6TBKU6FHnE= +github.com/yalp/jsonpath v0.0.0-20150812003900-31a79c7593bb/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= +github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d h1:yJIizrfO599ot2kQ6Af1enICnwBD3XoxgX3MrMwot2M= +github.com/yudai/gojsondiff v0.0.0-20170107030110-7b1b7adf999d/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce h1:888GrqRxabUce7lj4OaoShPxodm3kXOMpSa85wdYzfY= +github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8= +golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210223095934-7937bea0104d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0 h1:LX7NFCFYOHzr7WHaYiRUpeipZe9o5L8T+2F4Z798VDw= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 h1:r5ptJ1tBxVAeqw4CrYWhXIMr0SybY3CDHuIbCg5CFVw= +gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0/go.mod h1:WtiW9ZA1LdaWqtQRo1VbIL/v4XZ8NDta+O/kSpGgVek= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gorp.v1 v1.7.1 h1:GBB9KrWRATQZh95HJyVGUZrWwOPswitEYEyqlK8JbAA= +gopkg.in/gorp.v1 v1.7.1/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/tylerb/graceful.v1 v1.2.13 h1:UWJlWJHZepntB0PJ9RTgW3X+zVLjfmWbx/V1X/V/XoA= +gopkg.in/tylerb/graceful.v1 v1.2.13/go.mod h1:yBhekWvR20ACXVObSSdD3u6S9DeSylanL2PAbAC/uJ8= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/gofmt.sh b/gofmt.sh new file mode 100755 index 0000000000..f51f2c931c --- /dev/null +++ b/gofmt.sh @@ -0,0 +1,11 @@ +#! /bin/bash +set -e + +printf "Running gofmt checks...\n" +OUTPUT=$(gofmt -d .) + +if [[ $OUTPUT ]]; then + printf "gofmt found unformatted files:\n\n" + echo "$OUTPUT" + exit 1 +fi diff --git a/gogenerate.sh b/gogenerate.sh new file mode 100755 index 0000000000..620437e0a1 --- /dev/null +++ b/gogenerate.sh @@ -0,0 +1,8 @@ +#! /bin/bash +set -e + +printf "Running go generate...\n" +go generate ./... + +printf "Checking for no diff...\n" +git diff --exit-code || (echo "Files changed after running go generate. Run go generate ./... locally and update generated files." && exit 1) diff --git a/golist.sh b/golist.sh new file mode 100755 index 0000000000..740b390ab4 --- /dev/null +++ b/golist.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +go list -f '{{with .Module}}{{.Path}} {{.Version}}{{end}}' all | LC_ALL=C sort -u diff --git a/gomod.sh b/gomod.sh new file mode 100755 index 0000000000..c894870623 --- /dev/null +++ b/gomod.sh @@ -0,0 +1,8 @@ +#! /bin/bash +set -e + +go mod tidy +git diff --exit-code -- go.mod || (echo "Go file go.mod is dirty, update the file with 'go mod tidy' locally." && exit 1) +git diff --exit-code -- go.sum || (echo "Go file go.sum is dirty, update the file with 'go mod tidy' locally." && exit 1) +diff -u go.list <(./golist.sh) || (echo "Go dependencies have changed, update the go.list file with './golist.sh > go.list' locally." && exit 1) +go mod verify || (echo "One or more Go dependencies failed verification. Either a version is no longer available, or the author or someone else has modified the version so it no longer points to the same code." && exit 1) diff --git a/govet.sh b/govet.sh new file mode 100755 index 0000000000..5b074c5dfe --- /dev/null +++ b/govet.sh @@ -0,0 +1,16 @@ +#! /bin/bash +set -e + +printf "Running go vet...\n" +go vet -all -composites=false -unreachable=false -tests=false ./... + +printf "Running go vet shadow...\n" +command -v shadow >/dev/null 2>&1 || ( + dir=$(mktemp -d) + pushd $dir + go mod init shadow + go get golang.org/x/tools/go/analysis/passes/shadow/cmd/shadow + popd +) + +go vet -vettool=$(which shadow) ./... diff --git a/gxdr/dump.go b/gxdr/dump.go new file mode 100644 index 0000000000..0146a8e585 --- /dev/null +++ b/gxdr/dump.go @@ -0,0 +1,25 @@ +package gxdr + +import ( + "bytes" + "encoding" + + goxdr "github.com/xdrpp/goxdr/xdr" +) + +// Dump serializes the given goxdr value into binary. +func Dump(v goxdr.XdrType) []byte { + var buf bytes.Buffer + writer := goxdr.XdrOut{Out: &buf} + writer.Marshal("", v) + return buf.Bytes() +} + +// Convert serializes the given goxdr value into another destination value +// which supports binary unmarshalling. +// +// This function can be used to convert github.com/xdrpp/goxdr/xdr values into +// equivalent https://github.com/stellar/go-xdr values. +func Convert(src goxdr.XdrType, dest encoding.BinaryUnmarshaler) error { + return dest.UnmarshalBinary(Dump(src)) +} diff --git a/gxdr/generate.go b/gxdr/generate.go new file mode 100644 index 0000000000..446ffcf59e --- /dev/null +++ b/gxdr/generate.go @@ -0,0 +1,4 @@ +package gxdr + +//go:generate rm -f xdr_generated.go +//go:generate make -C ../ gxdr/xdr_generated.go diff --git a/gxdr/xdr_generated.go b/gxdr/xdr_generated.go new file mode 100644 index 0000000000..0b440002fd --- /dev/null +++ b/gxdr/xdr_generated.go @@ -0,0 +1,18812 @@ +// Code generated by goxdr -p gxdr -enum-comments -o gxdr/xdr_generated.go xdr/Stellar-SCP.x xdr/Stellar-ledger-entries.x xdr/Stellar-ledger.x xdr/Stellar-overlay.x xdr/Stellar-transaction.x xdr/Stellar-types.x; DO NOT EDIT. + +package gxdr + +import "fmt" +import "context" +import . "github.com/xdrpp/goxdr/xdr" + +var _ XDR +var _ = fmt.Sprintf +var _ context.Context + +// +// Data types defined in XDR file +// + +type Value = []byte + +type SCPBallot struct { + // n + Counter Uint32 + // x + Value Value +} + +type SCPStatementType int32 + +const ( + SCP_ST_PREPARE SCPStatementType = 0 + SCP_ST_CONFIRM SCPStatementType = 1 + SCP_ST_EXTERNALIZE SCPStatementType = 2 + SCP_ST_NOMINATE SCPStatementType = 3 +) + +type SCPNomination struct { + // D + QuorumSetHash Hash + // X + Votes []Value + // Y + Accepted []Value +} + +type SCPStatement struct { + // v + NodeID NodeID + // i + SlotIndex Uint64 + Pledges XdrAnon_SCPStatement_Pledges +} +type XdrAnon_SCPStatement_Pledges struct { + // The union discriminant Type selects among the following arms: + // SCP_ST_PREPARE: + // Prepare() *XdrAnon_SCPStatement_Pledges_Prepare + // SCP_ST_CONFIRM: + // Confirm() *XdrAnon_SCPStatement_Pledges_Confirm + // SCP_ST_EXTERNALIZE: + // Externalize() *XdrAnon_SCPStatement_Pledges_Externalize + // SCP_ST_NOMINATE: + // Nominate() *SCPNomination + Type SCPStatementType + _u interface{} +} +type XdrAnon_SCPStatement_Pledges_Prepare struct { + // D + QuorumSetHash Hash + // b + Ballot SCPBallot + // p + Prepared *SCPBallot + // p' + PreparedPrime *SCPBallot + // c.n + NC Uint32 + // h.n + NH Uint32 +} +type XdrAnon_SCPStatement_Pledges_Confirm struct { + // b + Ballot SCPBallot + // p.n + NPrepared Uint32 + // c.n + NCommit Uint32 + // h.n + NH Uint32 + // D + QuorumSetHash Hash +} +type XdrAnon_SCPStatement_Pledges_Externalize struct { + // c + Commit SCPBallot + // h.n + NH Uint32 + // D used before EXTERNALIZE + CommitQuorumSetHash Hash +} + +type SCPEnvelope struct { + Statement SCPStatement + Signature Signature +} + +// supports things like: A,B,C,(D,E,F),(G,H,(I,J,K,L)) +// only allows 2 levels of nesting +type SCPQuorumSet struct { + Threshold Uint32 + Validators []NodeID + InnerSets []SCPQuorumSet +} + +type AccountID = PublicKey + +type Thresholds = [4]byte + +type String32 = string // bound 32 + +type String64 = string // bound 64 + +type SequenceNumber = Int64 + +type TimePoint = Uint64 + +type DataValue = []byte // bound 64 + +// SHA256(LiquidityPoolParameters) +type PoolID = Hash + +// 1-4 alphanumeric characters right-padded with 0 bytes +type AssetCode4 = [4]byte + +// 5-12 alphanumeric characters right-padded with 0 bytes +type AssetCode12 = [12]byte + +type AssetType int32 + +const ( + ASSET_TYPE_NATIVE AssetType = 0 + ASSET_TYPE_CREDIT_ALPHANUM4 AssetType = 1 + ASSET_TYPE_CREDIT_ALPHANUM12 AssetType = 2 + ASSET_TYPE_POOL_SHARE AssetType = 3 +) + +type AssetCode struct { + // The union discriminant Type selects among the following arms: + // ASSET_TYPE_CREDIT_ALPHANUM4: + // AssetCode4() *AssetCode4 + // ASSET_TYPE_CREDIT_ALPHANUM12: + // AssetCode12() *AssetCode12 + Type AssetType + _u interface{} +} + +type AlphaNum4 struct { + AssetCode AssetCode4 + Issuer AccountID +} + +type AlphaNum12 struct { + AssetCode AssetCode12 + Issuer AccountID +} + +type Asset struct { + // The union discriminant Type selects among the following arms: + // ASSET_TYPE_NATIVE: + // void + // ASSET_TYPE_CREDIT_ALPHANUM4: + // AlphaNum4() *AlphaNum4 + // ASSET_TYPE_CREDIT_ALPHANUM12: + // AlphaNum12() *AlphaNum12 + Type AssetType + _u interface{} +} + +// price in fractional representation +type Price struct { + // numerator + N Int32 + // denominator + D Int32 +} + +type Liabilities struct { + Buying Int64 + Selling Int64 +} + +// the 'Thresholds' type is packed uint8_t values +// defined by these indexes +type ThresholdIndexes int32 + +const ( + THRESHOLD_MASTER_WEIGHT ThresholdIndexes = 0 + THRESHOLD_LOW ThresholdIndexes = 1 + THRESHOLD_MED ThresholdIndexes = 2 + THRESHOLD_HIGH ThresholdIndexes = 3 +) + +type LedgerEntryType int32 + +const ( + ACCOUNT LedgerEntryType = 0 + TRUSTLINE LedgerEntryType = 1 + OFFER LedgerEntryType = 2 + DATA LedgerEntryType = 3 + CLAIMABLE_BALANCE LedgerEntryType = 4 + LIQUIDITY_POOL LedgerEntryType = 5 +) + +type Signer struct { + Key SignerKey + // really only need 1 byte + Weight Uint32 +} + +type AccountFlags int32 + +const ( + // Flags set on issuer accounts + // TrustLines are created with authorized set to "false" requiring + // the issuer to set it for each TrustLine + AUTH_REQUIRED_FLAG AccountFlags = AccountFlags(0x1) + // If set, the authorized flag in TrustLines can be cleared + // otherwise, authorization cannot be revoked + AUTH_REVOCABLE_FLAG AccountFlags = AccountFlags(0x2) + // Once set, causes all AUTH_* flags to be read-only + AUTH_IMMUTABLE_FLAG AccountFlags = AccountFlags(0x4) + // Trustlines are created with clawback enabled set to "true", + // and claimable balances created from those trustlines are created + // with clawback enabled set to "true" + AUTH_CLAWBACK_ENABLED_FLAG AccountFlags = AccountFlags(0x8) +) + +// mask for all valid flags +const MASK_ACCOUNT_FLAGS = 0x7 + +const MASK_ACCOUNT_FLAGS_V17 = 0xF + +// maximum number of signers +const MAX_SIGNERS = 20 + +type SponsorshipDescriptor = *AccountID + +type AccountEntryExtensionV2 struct { + NumSponsored Uint32 + NumSponsoring Uint32 + SignerSponsoringIDs []SponsorshipDescriptor // bound MAX_SIGNERS + Ext XdrAnon_AccountEntryExtensionV2_Ext +} +type XdrAnon_AccountEntryExtensionV2_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type AccountEntryExtensionV1 struct { + Liabilities Liabilities + Ext XdrAnon_AccountEntryExtensionV1_Ext +} +type XdrAnon_AccountEntryExtensionV1_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 2: + // V2() *AccountEntryExtensionV2 + V int32 + _u interface{} +} + +/* AccountEntry + + Main entry representing a user in Stellar. All transactions are + performed using an account. + + Other ledger entries created require an account. + +*/ +type AccountEntry struct { + // master public key for this account + AccountID AccountID + // in stroops + Balance Int64 + // last sequence number used for this account + SeqNum SequenceNumber + // number of sub-entries this account has + NumSubEntries Uint32 + // drives the reserve + InflationDest *AccountID + // see AccountFlags + Flags Uint32 + // can be used for reverse federation and memo lookup + HomeDomain String32 + // fields used for signatures + // thresholds stores unsigned bytes: [weight of master|low|medium|high] + Thresholds Thresholds + // possible signers for this account + Signers []Signer // bound MAX_SIGNERS + Ext XdrAnon_AccountEntry_Ext +} + +// reserved for future use +type XdrAnon_AccountEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 1: + // V1() *AccountEntryExtensionV1 + V int32 + _u interface{} +} + +type TrustLineFlags int32 + +const ( + // issuer has authorized account to perform transactions with its credit + AUTHORIZED_FLAG TrustLineFlags = 1 + // issuer has authorized account to maintain and reduce liabilities for its + // credit + AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG TrustLineFlags = 2 + // issuer has specified that it may clawback its credit, and that claimable + // balances created with its credit may also be clawed back + TRUSTLINE_CLAWBACK_ENABLED_FLAG TrustLineFlags = 4 +) + +// mask for all trustline flags +const MASK_TRUSTLINE_FLAGS = 1 + +const MASK_TRUSTLINE_FLAGS_V13 = 3 + +const MASK_TRUSTLINE_FLAGS_V17 = 7 + +type LiquidityPoolType int32 + +const ( + LIQUIDITY_POOL_CONSTANT_PRODUCT LiquidityPoolType = 0 +) + +type TrustLineAsset struct { + // The union discriminant Type selects among the following arms: + // ASSET_TYPE_NATIVE: + // void + // ASSET_TYPE_CREDIT_ALPHANUM4: + // AlphaNum4() *AlphaNum4 + // ASSET_TYPE_CREDIT_ALPHANUM12: + // AlphaNum12() *AlphaNum12 + // ASSET_TYPE_POOL_SHARE: + // LiquidityPoolID() *PoolID + Type AssetType + _u interface{} +} + +type TrustLineEntryExtensionV2 struct { + LiquidityPoolUseCount Int32 + Ext XdrAnon_TrustLineEntryExtensionV2_Ext +} +type XdrAnon_TrustLineEntryExtensionV2_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type TrustLineEntry struct { + // account this trustline belongs to + AccountID AccountID + // type of asset (with issuer) + Asset TrustLineAsset + // how much of this asset the user has. + Balance Int64 + // balance cannot be above this + Limit Int64 + // see TrustLineFlags + Flags Uint32 + Ext XdrAnon_TrustLineEntry_Ext +} + +// reserved for future use +type XdrAnon_TrustLineEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 1: + // V1() *XdrAnon_TrustLineEntry_Ext_V1 + V int32 + _u interface{} +} +type XdrAnon_TrustLineEntry_Ext_V1 struct { + Liabilities Liabilities + Ext XdrAnon_TrustLineEntry_Ext_V1_Ext +} +type XdrAnon_TrustLineEntry_Ext_V1_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 2: + // V2() *TrustLineEntryExtensionV2 + V int32 + _u interface{} +} + +type OfferEntryFlags int32 + +const ( + // issuer has authorized account to perform transactions with its credit + PASSIVE_FLAG OfferEntryFlags = 1 +) + +// Mask for OfferEntry flags +const MASK_OFFERENTRY_FLAGS = 1 + +/* OfferEntry + An offer is the building block of the offer book, they are automatically + claimed by payments when the price set by the owner is met. + + For example an Offer is selling 10A where 1A is priced at 1.5B + +*/ +type OfferEntry struct { + SellerID AccountID + OfferID Int64 + // A + Selling Asset + // B + Buying Asset + // amount of A + Amount Int64 + /* price for this offer: + price of A in terms of B + price=AmountB/AmountA=priceNumerator/priceDenominator + price is after fees + */ + Price Price + // see OfferEntryFlags + Flags Uint32 + Ext XdrAnon_OfferEntry_Ext +} + +// reserved for future use +type XdrAnon_OfferEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +/* DataEntry + Data can be attached to accounts. +*/ +type DataEntry struct { + // account this data belongs to + AccountID AccountID + DataName String64 + DataValue DataValue + Ext XdrAnon_DataEntry_Ext +} + +// reserved for future use +type XdrAnon_DataEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type ClaimPredicateType int32 + +const ( + CLAIM_PREDICATE_UNCONDITIONAL ClaimPredicateType = 0 + CLAIM_PREDICATE_AND ClaimPredicateType = 1 + CLAIM_PREDICATE_OR ClaimPredicateType = 2 + CLAIM_PREDICATE_NOT ClaimPredicateType = 3 + CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME ClaimPredicateType = 4 + CLAIM_PREDICATE_BEFORE_RELATIVE_TIME ClaimPredicateType = 5 +) + +type ClaimPredicate struct { + // The union discriminant Type selects among the following arms: + // CLAIM_PREDICATE_UNCONDITIONAL: + // void + // CLAIM_PREDICATE_AND: + // AndPredicates() *[]ClaimPredicate // bound 2 + // CLAIM_PREDICATE_OR: + // OrPredicates() *[]ClaimPredicate // bound 2 + // CLAIM_PREDICATE_NOT: + // NotPredicate() **ClaimPredicate + // CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + // AbsBefore() *Int64 + // CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + // RelBefore() *Int64 + Type ClaimPredicateType + _u interface{} +} + +type ClaimantType int32 + +const ( + CLAIMANT_TYPE_V0 ClaimantType = 0 +) + +type Claimant struct { + // The union discriminant Type selects among the following arms: + // CLAIMANT_TYPE_V0: + // V0() *XdrAnon_Claimant_V0 + Type ClaimantType + _u interface{} +} +type XdrAnon_Claimant_V0 struct { + // The account that can use this condition + Destination AccountID + // Claimable if predicate is true + Predicate ClaimPredicate +} + +type ClaimableBalanceIDType int32 + +const ( + CLAIMABLE_BALANCE_ID_TYPE_V0 ClaimableBalanceIDType = 0 +) + +type ClaimableBalanceID struct { + // The union discriminant Type selects among the following arms: + // CLAIMABLE_BALANCE_ID_TYPE_V0: + // V0() *Hash + Type ClaimableBalanceIDType + _u interface{} +} + +type ClaimableBalanceFlags int32 + +const ( + // If set, the issuer account of the asset held by the claimable balance may + // clawback the claimable balance + CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG ClaimableBalanceFlags = ClaimableBalanceFlags(0x1) +) + +const MASK_CLAIMABLE_BALANCE_FLAGS = 0x1 + +type ClaimableBalanceEntryExtensionV1 struct { + Ext XdrAnon_ClaimableBalanceEntryExtensionV1_Ext + // see ClaimableBalanceFlags + Flags Uint32 +} +type XdrAnon_ClaimableBalanceEntryExtensionV1_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type ClaimableBalanceEntry struct { + // Unique identifier for this ClaimableBalanceEntry + BalanceID ClaimableBalanceID + // List of claimants with associated predicate + Claimants []Claimant // bound 10 + // Any asset including native + Asset Asset + // Amount of asset + Amount Int64 + Ext XdrAnon_ClaimableBalanceEntry_Ext +} + +// reserved for future use +type XdrAnon_ClaimableBalanceEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 1: + // V1() *ClaimableBalanceEntryExtensionV1 + V int32 + _u interface{} +} + +type LiquidityPoolConstantProductParameters struct { + // assetA < assetB + AssetA Asset + AssetB Asset + // Fee is in basis points, so the actual rate is (fee/100)% + Fee Int32 +} + +type LiquidityPoolEntry struct { + LiquidityPoolID PoolID + Body XdrAnon_LiquidityPoolEntry_Body +} +type XdrAnon_LiquidityPoolEntry_Body struct { + // The union discriminant Type selects among the following arms: + // LIQUIDITY_POOL_CONSTANT_PRODUCT: + // ConstantProduct() *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct + Type LiquidityPoolType + _u interface{} +} +type XdrAnon_LiquidityPoolEntry_Body_ConstantProduct struct { + Params LiquidityPoolConstantProductParameters + // amount of A in the pool + ReserveA Int64 + // amount of B in the pool + ReserveB Int64 + // total number of pool shares issued + TotalPoolShares Int64 + // number of trust lines for the associated pool shares + PoolSharesTrustLineCount Int64 +} + +type LedgerEntryExtensionV1 struct { + SponsoringID SponsorshipDescriptor + Ext XdrAnon_LedgerEntryExtensionV1_Ext +} +type XdrAnon_LedgerEntryExtensionV1_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type LedgerEntry struct { + // ledger the LedgerEntry was last changed + LastModifiedLedgerSeq Uint32 + Data XdrAnon_LedgerEntry_Data + Ext XdrAnon_LedgerEntry_Ext +} +type XdrAnon_LedgerEntry_Data struct { + // The union discriminant Type selects among the following arms: + // ACCOUNT: + // Account() *AccountEntry + // TRUSTLINE: + // TrustLine() *TrustLineEntry + // OFFER: + // Offer() *OfferEntry + // DATA: + // Data() *DataEntry + // CLAIMABLE_BALANCE: + // ClaimableBalance() *ClaimableBalanceEntry + // LIQUIDITY_POOL: + // LiquidityPool() *LiquidityPoolEntry + Type LedgerEntryType + _u interface{} +} + +// reserved for future use +type XdrAnon_LedgerEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 1: + // V1() *LedgerEntryExtensionV1 + V int32 + _u interface{} +} + +type LedgerKey struct { + // The union discriminant Type selects among the following arms: + // ACCOUNT: + // Account() *XdrAnon_LedgerKey_Account + // TRUSTLINE: + // TrustLine() *XdrAnon_LedgerKey_TrustLine + // OFFER: + // Offer() *XdrAnon_LedgerKey_Offer + // DATA: + // Data() *XdrAnon_LedgerKey_Data + // CLAIMABLE_BALANCE: + // ClaimableBalance() *XdrAnon_LedgerKey_ClaimableBalance + // LIQUIDITY_POOL: + // LiquidityPool() *XdrAnon_LedgerKey_LiquidityPool + Type LedgerEntryType + _u interface{} +} +type XdrAnon_LedgerKey_Account struct { + AccountID AccountID +} +type XdrAnon_LedgerKey_TrustLine struct { + AccountID AccountID + Asset TrustLineAsset +} +type XdrAnon_LedgerKey_Offer struct { + SellerID AccountID + OfferID Int64 +} +type XdrAnon_LedgerKey_Data struct { + AccountID AccountID + DataName String64 +} +type XdrAnon_LedgerKey_ClaimableBalance struct { + BalanceID ClaimableBalanceID +} +type XdrAnon_LedgerKey_LiquidityPool struct { + LiquidityPoolID PoolID +} + +// list of all envelope types used in the application +// those are prefixes used when building signatures for +// the respective envelopes +type EnvelopeType int32 + +const ( + ENVELOPE_TYPE_TX_V0 EnvelopeType = 0 + ENVELOPE_TYPE_SCP EnvelopeType = 1 + ENVELOPE_TYPE_TX EnvelopeType = 2 + ENVELOPE_TYPE_AUTH EnvelopeType = 3 + ENVELOPE_TYPE_SCPVALUE EnvelopeType = 4 + ENVELOPE_TYPE_TX_FEE_BUMP EnvelopeType = 5 + ENVELOPE_TYPE_OP_ID EnvelopeType = 6 + ENVELOPE_TYPE_POOL_REVOKE_OP_ID EnvelopeType = 7 +) + +type UpgradeType = []byte // bound 128 + +type StellarValueType int32 + +const ( + STELLAR_VALUE_BASIC StellarValueType = 0 + STELLAR_VALUE_SIGNED StellarValueType = 1 +) + +type LedgerCloseValueSignature struct { + // which node introduced the value + NodeID NodeID + // nodeID's signature + Signature Signature +} + +/* StellarValue is the value used by SCP to reach consensus on a given ledger + */ +type StellarValue struct { + // transaction set to apply to previous ledger + TxSetHash Hash + // network close time + CloseTime TimePoint + // upgrades to apply to the previous ledger (usually empty) + // this is a vector of encoded 'LedgerUpgrade' so that nodes can drop + // unknown steps during consensus if needed. + // see notes below on 'LedgerUpgrade' for more detail + // max size is dictated by number of upgrade types (+ room for future) + Upgrades []UpgradeType // bound 6 + Ext XdrAnon_StellarValue_Ext +} + +// reserved for future use +type XdrAnon_StellarValue_Ext struct { + // The union discriminant V selects among the following arms: + // STELLAR_VALUE_BASIC: + // void + // STELLAR_VALUE_SIGNED: + // LcValueSignature() *LedgerCloseValueSignature + V StellarValueType + _u interface{} +} + +const MASK_LEDGER_HEADER_FLAGS = 0x7 + +type LedgerHeaderFlags int32 + +const ( + DISABLE_LIQUIDITY_POOL_TRADING_FLAG LedgerHeaderFlags = LedgerHeaderFlags(0x1) + DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG LedgerHeaderFlags = LedgerHeaderFlags(0x2) + DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG LedgerHeaderFlags = LedgerHeaderFlags(0x4) +) + +type LedgerHeaderExtensionV1 struct { + // LedgerHeaderFlags + Flags Uint32 + Ext XdrAnon_LedgerHeaderExtensionV1_Ext +} +type XdrAnon_LedgerHeaderExtensionV1_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +/* The LedgerHeader is the highest level structure representing the + * state of a ledger, cryptographically linked to previous ledgers. + */ +type LedgerHeader struct { + // the protocol version of the ledger + LedgerVersion Uint32 + // hash of the previous ledger header + PreviousLedgerHash Hash + // what consensus agreed to + ScpValue StellarValue + // the TransactionResultSet that led to this ledger + TxSetResultHash Hash + // hash of the ledger state + BucketListHash Hash + // sequence number of this ledger + LedgerSeq Uint32 + // total number of stroops in existence. + TotalCoins Int64 + // fees burned since last inflation run + FeePool Int64 + // inflation sequence number + InflationSeq Uint32 + // last used global ID, used for generating objects + IdPool Uint64 + // base fee per operation in stroops + BaseFee Uint32 + // account base reserve in stroops + BaseReserve Uint32 + // maximum size a transaction set can be + MaxTxSetSize Uint32 + // hashes of ledgers in the past. allows you to jump back + SkipList [4]Hash + Ext XdrAnon_LedgerHeader_Ext +} + +// reserved for future use +type XdrAnon_LedgerHeader_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + // 1: + // V1() *LedgerHeaderExtensionV1 + V int32 + _u interface{} +} + +/* Ledger upgrades +note that the `upgrades` field from StellarValue is normalized such that +it only contains one entry per LedgerUpgradeType, and entries are sorted +in ascending order +*/ +type LedgerUpgradeType int32 + +const ( + LEDGER_UPGRADE_VERSION LedgerUpgradeType = 1 + LEDGER_UPGRADE_BASE_FEE LedgerUpgradeType = 2 + LEDGER_UPGRADE_MAX_TX_SET_SIZE LedgerUpgradeType = 3 + LEDGER_UPGRADE_BASE_RESERVE LedgerUpgradeType = 4 + LEDGER_UPGRADE_FLAGS LedgerUpgradeType = 5 +) + +type LedgerUpgrade struct { + // The union discriminant Type selects among the following arms: + // LEDGER_UPGRADE_VERSION: + // NewLedgerVersion() *Uint32 + // LEDGER_UPGRADE_BASE_FEE: + // NewBaseFee() *Uint32 + // LEDGER_UPGRADE_MAX_TX_SET_SIZE: + // NewMaxTxSetSize() *Uint32 + // LEDGER_UPGRADE_BASE_RESERVE: + // NewBaseReserve() *Uint32 + // LEDGER_UPGRADE_FLAGS: + // NewFlags() *Uint32 + Type LedgerUpgradeType + _u interface{} +} + +/* Entries used to define the bucket list */ +type BucketEntryType int32 + +const ( + // At-and-after protocol 11: bucket metadata, should come first. + METAENTRY BucketEntryType = -1 + // Before protocol 11: created-or-updated; + LIVEENTRY BucketEntryType = 0 + // At-and-after protocol 11: only updated. + DEADENTRY BucketEntryType = 1 + // At-and-after protocol 11: only created. + INITENTRY BucketEntryType = 2 +) + +type BucketMetadata struct { + // Indicates the protocol version used to create / merge this bucket. + LedgerVersion Uint32 + Ext XdrAnon_BucketMetadata_Ext +} + +// reserved for future use +type XdrAnon_BucketMetadata_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type BucketEntry struct { + // The union discriminant Type selects among the following arms: + // LIVEENTRY, INITENTRY: + // LiveEntry() *LedgerEntry + // DEADENTRY: + // DeadEntry() *LedgerKey + // METAENTRY: + // MetaEntry() *BucketMetadata + Type BucketEntryType + _u interface{} +} + +// Transaction sets are the unit used by SCP to decide on transitions +// between ledgers +type TransactionSet struct { + PreviousLedgerHash Hash + Txs []TransactionEnvelope +} + +type TransactionResultPair struct { + TransactionHash Hash + // result for the transaction + Result TransactionResult +} + +// TransactionResultSet is used to recover results between ledgers +type TransactionResultSet struct { + Results []TransactionResultPair +} + +type TransactionHistoryEntry struct { + LedgerSeq Uint32 + TxSet TransactionSet + Ext XdrAnon_TransactionHistoryEntry_Ext +} + +// reserved for future use +type XdrAnon_TransactionHistoryEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type TransactionHistoryResultEntry struct { + LedgerSeq Uint32 + TxResultSet TransactionResultSet + Ext XdrAnon_TransactionHistoryResultEntry_Ext +} + +// reserved for future use +type XdrAnon_TransactionHistoryResultEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type LedgerHeaderHistoryEntry struct { + Hash Hash + Header LedgerHeader + Ext XdrAnon_LedgerHeaderHistoryEntry_Ext +} + +// reserved for future use +type XdrAnon_LedgerHeaderHistoryEntry_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type LedgerSCPMessages struct { + LedgerSeq Uint32 + Messages []SCPEnvelope +} + +// note: ledgerMessages may refer to any quorumSets encountered +// in the file so far, not just the one from this entry +type SCPHistoryEntryV0 struct { + // additional quorum sets used by ledgerMessages + QuorumSets []SCPQuorumSet + LedgerMessages LedgerSCPMessages +} + +// SCP history file is an array of these +type SCPHistoryEntry struct { + // The union discriminant V selects among the following arms: + // 0: + // V0() *SCPHistoryEntryV0 + V int32 + _u interface{} +} + +type LedgerEntryChangeType int32 + +const ( + // entry was added to the ledger + LEDGER_ENTRY_CREATED LedgerEntryChangeType = 0 + // entry was modified in the ledger + LEDGER_ENTRY_UPDATED LedgerEntryChangeType = 1 + // entry was removed from the ledger + LEDGER_ENTRY_REMOVED LedgerEntryChangeType = 2 + // value of the entry + LEDGER_ENTRY_STATE LedgerEntryChangeType = 3 +) + +type LedgerEntryChange struct { + // The union discriminant Type selects among the following arms: + // LEDGER_ENTRY_CREATED: + // Created() *LedgerEntry + // LEDGER_ENTRY_UPDATED: + // Updated() *LedgerEntry + // LEDGER_ENTRY_REMOVED: + // Removed() *LedgerKey + // LEDGER_ENTRY_STATE: + // State() *LedgerEntry + Type LedgerEntryChangeType + _u interface{} +} + +type LedgerEntryChanges = []LedgerEntryChange + +type OperationMeta struct { + Changes LedgerEntryChanges +} + +type TransactionMetaV1 struct { + // tx level changes if any + TxChanges LedgerEntryChanges + // meta for each operation + Operations []OperationMeta +} + +type TransactionMetaV2 struct { + // tx level changes before operations + TxChangesBefore LedgerEntryChanges + // are applied if any + Operations []OperationMeta + // tx level changes after operations are + TxChangesAfter LedgerEntryChanges +} + +// this is the meta produced when applying transactions +// it does not include pre-apply updates such as fees +type TransactionMeta struct { + // The union discriminant V selects among the following arms: + // 0: + // Operations() *[]OperationMeta + // 1: + // V1() *TransactionMetaV1 + // 2: + // V2() *TransactionMetaV2 + V int32 + _u interface{} +} + +// This struct groups together changes on a per transaction basis +// note however that fees and transaction application are done in separate +// phases +type TransactionResultMeta struct { + Result TransactionResultPair + FeeProcessing LedgerEntryChanges + TxApplyProcessing TransactionMeta +} + +// this represents a single upgrade that was performed as part of a ledger +// upgrade +type UpgradeEntryMeta struct { + Upgrade LedgerUpgrade + Changes LedgerEntryChanges +} + +type LedgerCloseMetaV0 struct { + LedgerHeader LedgerHeaderHistoryEntry + // NB: txSet is sorted in "Hash order" + TxSet TransactionSet + // NB: transactions are sorted in apply order here + // fees for all transactions are processed first + // followed by applying transactions + TxProcessing []TransactionResultMeta + // upgrades are applied last + UpgradesProcessing []UpgradeEntryMeta + // other misc information attached to the ledger close + ScpInfo []SCPHistoryEntry +} + +type LedgerCloseMeta struct { + // The union discriminant V selects among the following arms: + // 0: + // V0() *LedgerCloseMetaV0 + V int32 + _u interface{} +} + +type ErrorCode int32 + +const ( + // Unspecific error + ERR_MISC ErrorCode = 0 + // Malformed data + ERR_DATA ErrorCode = 1 + // Misconfiguration error + ERR_CONF ErrorCode = 2 + // Authentication failure + ERR_AUTH ErrorCode = 3 + // System overloaded + ERR_LOAD ErrorCode = 4 +) + +type Error struct { + Code ErrorCode + Msg string // bound 100 +} + +type AuthCert struct { + Pubkey Curve25519Public + Expiration Uint64 + Sig Signature +} + +type Hello struct { + LedgerVersion Uint32 + OverlayVersion Uint32 + OverlayMinVersion Uint32 + NetworkID Hash + VersionStr string // bound 100 + ListeningPort int32 + PeerID NodeID + Cert AuthCert + Nonce Uint256 +} + +type Auth struct { + // Empty message, just to confirm + // establishment of MAC keys. + Unused int32 +} + +type IPAddrType int32 + +const ( + IPv4 IPAddrType = 0 + IPv6 IPAddrType = 1 +) + +type PeerAddress struct { + Ip XdrAnon_PeerAddress_Ip + Port Uint32 + NumFailures Uint32 +} +type XdrAnon_PeerAddress_Ip struct { + // The union discriminant Type selects among the following arms: + // IPv4: + // Ipv4() *[4]byte + // IPv6: + // Ipv6() *[16]byte + Type IPAddrType + _u interface{} +} + +type MessageType int32 + +const ( + ERROR_MSG MessageType = 0 + AUTH MessageType = 2 + DONT_HAVE MessageType = 3 + // gets a list of peers this guy knows about + GET_PEERS MessageType = 4 + PEERS MessageType = 5 + // gets a particular txset by hash + GET_TX_SET MessageType = 6 + TX_SET MessageType = 7 + // pass on a tx you have heard about + TRANSACTION MessageType = 8 + // SCP + GET_SCP_QUORUMSET MessageType = 9 + SCP_QUORUMSET MessageType = 10 + SCP_MESSAGE MessageType = 11 + GET_SCP_STATE MessageType = 12 + // new messages + HELLO MessageType = 13 + SURVEY_REQUEST MessageType = 14 + SURVEY_RESPONSE MessageType = 15 +) + +type DontHave struct { + Type MessageType + ReqHash Uint256 +} + +type SurveyMessageCommandType int32 + +const ( + SURVEY_TOPOLOGY SurveyMessageCommandType = 0 +) + +type SurveyRequestMessage struct { + SurveyorPeerID NodeID + SurveyedPeerID NodeID + LedgerNum Uint32 + EncryptionKey Curve25519Public + CommandType SurveyMessageCommandType +} + +type SignedSurveyRequestMessage struct { + RequestSignature Signature + Request SurveyRequestMessage +} + +type EncryptedBody = []byte // bound 64000 + +type SurveyResponseMessage struct { + SurveyorPeerID NodeID + SurveyedPeerID NodeID + LedgerNum Uint32 + CommandType SurveyMessageCommandType + EncryptedBody EncryptedBody +} + +type SignedSurveyResponseMessage struct { + ResponseSignature Signature + Response SurveyResponseMessage +} + +type PeerStats struct { + Id NodeID + VersionStr string // bound 100 + MessagesRead Uint64 + MessagesWritten Uint64 + BytesRead Uint64 + BytesWritten Uint64 + SecondsConnected Uint64 + UniqueFloodBytesRecv Uint64 + DuplicateFloodBytesRecv Uint64 + UniqueFetchBytesRecv Uint64 + DuplicateFetchBytesRecv Uint64 + UniqueFloodMessageRecv Uint64 + DuplicateFloodMessageRecv Uint64 + UniqueFetchMessageRecv Uint64 + DuplicateFetchMessageRecv Uint64 +} + +type PeerStatList = []PeerStats // bound 25 + +type TopologyResponseBody struct { + InboundPeers PeerStatList + OutboundPeers PeerStatList + TotalInboundPeerCount Uint32 + TotalOutboundPeerCount Uint32 +} + +type SurveyResponseBody struct { + // The union discriminant Type selects among the following arms: + // SURVEY_TOPOLOGY: + // TopologyResponseBody() *TopologyResponseBody + Type SurveyMessageCommandType + _u interface{} +} + +type StellarMessage struct { + // The union discriminant Type selects among the following arms: + // ERROR_MSG: + // Error() *Error + // HELLO: + // Hello() *Hello + // AUTH: + // Auth() *Auth + // DONT_HAVE: + // DontHave() *DontHave + // GET_PEERS: + // void + // PEERS: + // Peers() *[]PeerAddress // bound 100 + // GET_TX_SET: + // TxSetHash() *Uint256 + // TX_SET: + // TxSet() *TransactionSet + // TRANSACTION: + // Transaction() *TransactionEnvelope + // SURVEY_REQUEST: + // SignedSurveyRequestMessage() *SignedSurveyRequestMessage + // SURVEY_RESPONSE: + // SignedSurveyResponseMessage() *SignedSurveyResponseMessage + // GET_SCP_QUORUMSET: + // QSetHash() *Uint256 + // SCP_QUORUMSET: + // QSet() *SCPQuorumSet + // SCP_MESSAGE: + // Envelope() *SCPEnvelope + // GET_SCP_STATE: + // GetSCPLedgerSeq() *Uint32 + Type MessageType + _u interface{} +} + +type AuthenticatedMessage struct { + // The union discriminant V selects among the following arms: + // 0: + // V0() *XdrAnon_AuthenticatedMessage_V0 + V Uint32 + _u interface{} +} +type XdrAnon_AuthenticatedMessage_V0 struct { + Sequence Uint64 + Message StellarMessage + Mac HmacSha256Mac +} + +type LiquidityPoolParameters struct { + // The union discriminant Type selects among the following arms: + // LIQUIDITY_POOL_CONSTANT_PRODUCT: + // ConstantProduct() *LiquidityPoolConstantProductParameters + Type LiquidityPoolType + _u interface{} +} + +// Source or destination of a payment operation +type MuxedAccount struct { + // The union discriminant Type selects among the following arms: + // KEY_TYPE_ED25519: + // Ed25519() *Uint256 + // KEY_TYPE_MUXED_ED25519: + // Med25519() *XdrAnon_MuxedAccount_Med25519 + Type CryptoKeyType + _u interface{} +} +type XdrAnon_MuxedAccount_Med25519 struct { + Id Uint64 + Ed25519 Uint256 +} + +type DecoratedSignature struct { + // last 4 bytes of the public key, used as a hint + Hint SignatureHint + // actual signature + Signature Signature +} + +type OperationType int32 + +const ( + CREATE_ACCOUNT OperationType = 0 + PAYMENT OperationType = 1 + PATH_PAYMENT_STRICT_RECEIVE OperationType = 2 + MANAGE_SELL_OFFER OperationType = 3 + CREATE_PASSIVE_SELL_OFFER OperationType = 4 + SET_OPTIONS OperationType = 5 + CHANGE_TRUST OperationType = 6 + ALLOW_TRUST OperationType = 7 + ACCOUNT_MERGE OperationType = 8 + INFLATION OperationType = 9 + MANAGE_DATA OperationType = 10 + BUMP_SEQUENCE OperationType = 11 + MANAGE_BUY_OFFER OperationType = 12 + PATH_PAYMENT_STRICT_SEND OperationType = 13 + CREATE_CLAIMABLE_BALANCE OperationType = 14 + CLAIM_CLAIMABLE_BALANCE OperationType = 15 + BEGIN_SPONSORING_FUTURE_RESERVES OperationType = 16 + END_SPONSORING_FUTURE_RESERVES OperationType = 17 + REVOKE_SPONSORSHIP OperationType = 18 + CLAWBACK OperationType = 19 + CLAWBACK_CLAIMABLE_BALANCE OperationType = 20 + SET_TRUST_LINE_FLAGS OperationType = 21 + LIQUIDITY_POOL_DEPOSIT OperationType = 22 + LIQUIDITY_POOL_WITHDRAW OperationType = 23 +) + +/* CreateAccount +Creates and funds a new account with the specified starting balance. + +Threshold: med + +Result: CreateAccountResult + +*/ +type CreateAccountOp struct { + // account to create + Destination AccountID + // amount they end up with + StartingBalance Int64 +} + +/* Payment + + Send an amount in specified asset to a destination account. + + Threshold: med + + Result: PaymentResult +*/ +type PaymentOp struct { + // recipient of the payment + Destination MuxedAccount + // what they end up with + Asset Asset + // amount they end up with + Amount Int64 +} + +/* PathPaymentStrictReceive + +send an amount to a destination account through a path. +(up to sendMax, sendAsset) +(X0, Path[0]) .. (Xn, Path[n]) +(destAmount, destAsset) + +Threshold: med + +Result: PathPaymentStrictReceiveResult +*/ +type PathPaymentStrictReceiveOp struct { + // asset we pay with + SendAsset Asset + // the maximum amount of sendAsset to + SendMax Int64 + // recipient of the payment + Destination MuxedAccount + // what they end up with + DestAsset Asset + // amount they end up with + DestAmount Int64 + // additional hops it must go through to get there + Path []Asset // bound 5 +} + +/* PathPaymentStrictSend + +send an amount to a destination account through a path. +(sendMax, sendAsset) +(X0, Path[0]) .. (Xn, Path[n]) +(at least destAmount, destAsset) + +Threshold: med + +Result: PathPaymentStrictSendResult +*/ +type PathPaymentStrictSendOp struct { + // asset we pay with + SendAsset Asset + // amount of sendAsset to send (excluding fees) + SendAmount Int64 + // recipient of the payment + Destination MuxedAccount + // what they end up with + DestAsset Asset + // the minimum amount of dest asset to + DestMin Int64 + // additional hops it must go through to get there + Path []Asset // bound 5 +} + +/* Creates, updates or deletes an offer + +Threshold: med + +Result: ManageSellOfferResult + +*/ +type ManageSellOfferOp struct { + Selling Asset + Buying Asset + // amount being sold. if set to 0, delete the offer + Amount Int64 + // price of thing being sold in terms of what you are buying + Price Price + // 0=create a new offer, otherwise edit an existing offer + OfferID Int64 +} + +/* Creates, updates or deletes an offer with amount in terms of buying asset + +Threshold: med + +Result: ManageBuyOfferResult + +*/ +type ManageBuyOfferOp struct { + Selling Asset + Buying Asset + // amount being bought. if set to 0, delete the offer + BuyAmount Int64 + // price of thing being bought in terms of what you are + Price Price + // 0=create a new offer, otherwise edit an existing offer + OfferID Int64 +} + +/* Creates an offer that doesn't take offers of the same price + +Threshold: med + +Result: CreatePassiveSellOfferResult + +*/ +type CreatePassiveSellOfferOp struct { + // A + Selling Asset + // B + Buying Asset + // amount taker gets + Amount Int64 + // cost of A in terms of B + Price Price +} + +/* Set Account Options + + updates "AccountEntry" fields. + note: updating thresholds or signers requires high threshold + + Threshold: med or high + + Result: SetOptionsResult +*/ +type SetOptionsOp struct { + // sets the inflation destination + InflationDest *AccountID + // which flags to clear + ClearFlags *Uint32 + // which flags to set + SetFlags *Uint32 + // account threshold manipulation + MasterWeight *Uint32 + LowThreshold *Uint32 + MedThreshold *Uint32 + HighThreshold *Uint32 + // sets the home domain + HomeDomain *String32 + // Add, update or remove a signer for the account + // signer is deleted if the weight is 0 + Signer *Signer +} + +type ChangeTrustAsset struct { + // The union discriminant Type selects among the following arms: + // ASSET_TYPE_NATIVE: + // void + // ASSET_TYPE_CREDIT_ALPHANUM4: + // AlphaNum4() *AlphaNum4 + // ASSET_TYPE_CREDIT_ALPHANUM12: + // AlphaNum12() *AlphaNum12 + // ASSET_TYPE_POOL_SHARE: + // LiquidityPool() *LiquidityPoolParameters + Type AssetType + _u interface{} +} + +/* Creates, updates or deletes a trust line + + Threshold: med + + Result: ChangeTrustResult + +*/ +type ChangeTrustOp struct { + Line ChangeTrustAsset + // if limit is set to 0, deletes the trust line + Limit Int64 +} + +/* Updates the "authorized" flag of an existing trust line + this is called by the issuer of the related asset. + + note that authorize can only be set (and not cleared) if + the issuer account does not have the AUTH_REVOCABLE_FLAG set + Threshold: low + + Result: AllowTrustResult +*/ +type AllowTrustOp struct { + Trustor AccountID + Asset AssetCode + // One of 0, AUTHORIZED_FLAG, or AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG + Authorize Uint32 +} + +/* ManageData + Adds, Updates, or Deletes a key value pair associated with a particular + account. + + Threshold: med + + Result: ManageDataResult +*/ +type ManageDataOp struct { + DataName String64 + // set to null to clear + DataValue *DataValue +} + +/* Bump Sequence + + increases the sequence to a given level + + Threshold: low + + Result: BumpSequenceResult +*/ +type BumpSequenceOp struct { + BumpTo SequenceNumber +} + +/* Creates a claimable balance entry + + Threshold: med + + Result: CreateClaimableBalanceResult +*/ +type CreateClaimableBalanceOp struct { + Asset Asset + Amount Int64 + Claimants []Claimant // bound 10 +} + +/* Claims a claimable balance entry + + Threshold: low + + Result: ClaimClaimableBalanceResult +*/ +type ClaimClaimableBalanceOp struct { + BalanceID ClaimableBalanceID +} + +/* BeginSponsoringFutureReserves + + Establishes the is-sponsoring-future-reserves-for relationship between + the source account and sponsoredID + + Threshold: med + + Result: BeginSponsoringFutureReservesResult +*/ +type BeginSponsoringFutureReservesOp struct { + SponsoredID AccountID +} + +/* RevokeSponsorship + + If source account is not sponsored or is sponsored by the owner of the + specified entry or sub-entry, then attempt to revoke the sponsorship. + If source account is sponsored, then attempt to transfer the sponsorship + to the sponsor of source account. + + Threshold: med + + Result: RevokeSponsorshipResult +*/ +type RevokeSponsorshipType int32 + +const ( + REVOKE_SPONSORSHIP_LEDGER_ENTRY RevokeSponsorshipType = 0 + REVOKE_SPONSORSHIP_SIGNER RevokeSponsorshipType = 1 +) + +type RevokeSponsorshipOp struct { + // The union discriminant Type selects among the following arms: + // REVOKE_SPONSORSHIP_LEDGER_ENTRY: + // LedgerKey() *LedgerKey + // REVOKE_SPONSORSHIP_SIGNER: + // Signer() *XdrAnon_RevokeSponsorshipOp_Signer + Type RevokeSponsorshipType + _u interface{} +} +type XdrAnon_RevokeSponsorshipOp_Signer struct { + AccountID AccountID + SignerKey SignerKey +} + +/* Claws back an amount of an asset from an account + + Threshold: med + + Result: ClawbackResult +*/ +type ClawbackOp struct { + Asset Asset + From MuxedAccount + Amount Int64 +} + +/* Claws back a claimable balance + + Threshold: med + + Result: ClawbackClaimableBalanceResult +*/ +type ClawbackClaimableBalanceOp struct { + BalanceID ClaimableBalanceID +} + +/* SetTrustLineFlagsOp + + Updates the flags of an existing trust line. + This is called by the issuer of the related asset. + + Threshold: low + + Result: SetTrustLineFlagsResult +*/ +type SetTrustLineFlagsOp struct { + Trustor AccountID + Asset Asset + // which flags to clear + ClearFlags Uint32 + // which flags to set + SetFlags Uint32 +} + +const LIQUIDITY_POOL_FEE_V18 = 30 + +/* Deposit assets into a liquidity pool + + Threshold: med + + Result: LiquidityPoolDepositResult +*/ +type LiquidityPoolDepositOp struct { + LiquidityPoolID PoolID + // maximum amount of first asset to deposit + MaxAmountA Int64 + // maximum amount of second asset to deposit + MaxAmountB Int64 + // minimum depositA/depositB + MinPrice Price + // maximum depositA/depositB + MaxPrice Price +} + +/* Withdraw assets from a liquidity pool + + Threshold: med + + Result: LiquidityPoolWithdrawResult +*/ +type LiquidityPoolWithdrawOp struct { + LiquidityPoolID PoolID + // amount of pool shares to withdraw + Amount Int64 + // minimum amount of first asset to withdraw + MinAmountA Int64 + // minimum amount of second asset to withdraw + MinAmountB Int64 +} + +/* An operation is the lowest unit of work that a transaction does */ +type Operation struct { + // sourceAccount is the account used to run the operation + // if not set, the runtime defaults to "sourceAccount" specified at + // the transaction level + SourceAccount *MuxedAccount + Body XdrAnon_Operation_Body +} +type XdrAnon_Operation_Body struct { + // The union discriminant Type selects among the following arms: + // CREATE_ACCOUNT: + // CreateAccountOp() *CreateAccountOp + // PAYMENT: + // PaymentOp() *PaymentOp + // PATH_PAYMENT_STRICT_RECEIVE: + // PathPaymentStrictReceiveOp() *PathPaymentStrictReceiveOp + // MANAGE_SELL_OFFER: + // ManageSellOfferOp() *ManageSellOfferOp + // CREATE_PASSIVE_SELL_OFFER: + // CreatePassiveSellOfferOp() *CreatePassiveSellOfferOp + // SET_OPTIONS: + // SetOptionsOp() *SetOptionsOp + // CHANGE_TRUST: + // ChangeTrustOp() *ChangeTrustOp + // ALLOW_TRUST: + // AllowTrustOp() *AllowTrustOp + // ACCOUNT_MERGE: + // Destination() *MuxedAccount + // INFLATION: + // void + // MANAGE_DATA: + // ManageDataOp() *ManageDataOp + // BUMP_SEQUENCE: + // BumpSequenceOp() *BumpSequenceOp + // MANAGE_BUY_OFFER: + // ManageBuyOfferOp() *ManageBuyOfferOp + // PATH_PAYMENT_STRICT_SEND: + // PathPaymentStrictSendOp() *PathPaymentStrictSendOp + // CREATE_CLAIMABLE_BALANCE: + // CreateClaimableBalanceOp() *CreateClaimableBalanceOp + // CLAIM_CLAIMABLE_BALANCE: + // ClaimClaimableBalanceOp() *ClaimClaimableBalanceOp + // BEGIN_SPONSORING_FUTURE_RESERVES: + // BeginSponsoringFutureReservesOp() *BeginSponsoringFutureReservesOp + // END_SPONSORING_FUTURE_RESERVES: + // void + // REVOKE_SPONSORSHIP: + // RevokeSponsorshipOp() *RevokeSponsorshipOp + // CLAWBACK: + // ClawbackOp() *ClawbackOp + // CLAWBACK_CLAIMABLE_BALANCE: + // ClawbackClaimableBalanceOp() *ClawbackClaimableBalanceOp + // SET_TRUST_LINE_FLAGS: + // SetTrustLineFlagsOp() *SetTrustLineFlagsOp + // LIQUIDITY_POOL_DEPOSIT: + // LiquidityPoolDepositOp() *LiquidityPoolDepositOp + // LIQUIDITY_POOL_WITHDRAW: + // LiquidityPoolWithdrawOp() *LiquidityPoolWithdrawOp + Type OperationType + _u interface{} +} + +type HashIDPreimage struct { + // The union discriminant Type selects among the following arms: + // ENVELOPE_TYPE_OP_ID: + // OperationID() *XdrAnon_HashIDPreimage_OperationID + // ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + // RevokeID() *XdrAnon_HashIDPreimage_RevokeID + Type EnvelopeType + _u interface{} +} +type XdrAnon_HashIDPreimage_OperationID struct { + SourceAccount AccountID + SeqNum SequenceNumber + OpNum Uint32 +} +type XdrAnon_HashIDPreimage_RevokeID struct { + SourceAccount AccountID + SeqNum SequenceNumber + OpNum Uint32 + LiquidityPoolID PoolID + Asset Asset +} + +type MemoType int32 + +const ( + MEMO_NONE MemoType = 0 + MEMO_TEXT MemoType = 1 + MEMO_ID MemoType = 2 + MEMO_HASH MemoType = 3 + MEMO_RETURN MemoType = 4 +) + +type Memo struct { + // The union discriminant Type selects among the following arms: + // MEMO_NONE: + // void + // MEMO_TEXT: + // Text() *string // bound 28 + // MEMO_ID: + // Id() *Uint64 + // MEMO_HASH: + // Hash() *Hash + // MEMO_RETURN: + // RetHash() *Hash + Type MemoType + _u interface{} +} + +type TimeBounds struct { + MinTime TimePoint + // 0 here means no maxTime + MaxTime TimePoint +} + +// maximum number of operations per transaction +const MAX_OPS_PER_TX = 100 + +// TransactionV0 is a transaction with the AccountID discriminant stripped off, +// leaving a raw ed25519 public key to identify the source account. This is used +// for backwards compatibility starting from the protocol 12/13 boundary. If an +// "old-style" TransactionEnvelope containing a Transaction is parsed with this +// XDR definition, it will be parsed as a "new-style" TransactionEnvelope +// containing a TransactionV0. +type TransactionV0 struct { + SourceAccountEd25519 Uint256 + Fee Uint32 + SeqNum SequenceNumber + TimeBounds *TimeBounds + Memo Memo + Operations []Operation // bound MAX_OPS_PER_TX + Ext XdrAnon_TransactionV0_Ext +} +type XdrAnon_TransactionV0_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type TransactionV0Envelope struct { + Tx TransactionV0 + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ + Signatures []DecoratedSignature // bound 20 +} + +/* a transaction is a container for a set of operations + - is executed by an account + - fees are collected from the account + - operations are executed in order as one ACID transaction + either all operations are applied or none are + if any returns a failing code +*/ +type Transaction struct { + // account used to run the transaction + SourceAccount MuxedAccount + // the fee the sourceAccount will pay + Fee Uint32 + // sequence number to consume in the account + SeqNum SequenceNumber + // validity range (inclusive) for the last ledger close time + TimeBounds *TimeBounds + Memo Memo + Operations []Operation // bound MAX_OPS_PER_TX + Ext XdrAnon_Transaction_Ext +} + +// reserved for future use +type XdrAnon_Transaction_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type TransactionV1Envelope struct { + Tx Transaction + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ + Signatures []DecoratedSignature // bound 20 +} + +type FeeBumpTransaction struct { + FeeSource MuxedAccount + Fee Int64 + InnerTx XdrAnon_FeeBumpTransaction_InnerTx + Ext XdrAnon_FeeBumpTransaction_Ext +} +type XdrAnon_FeeBumpTransaction_InnerTx struct { + // The union discriminant Type selects among the following arms: + // ENVELOPE_TYPE_TX: + // V1() *TransactionV1Envelope + Type EnvelopeType + _u interface{} +} +type XdrAnon_FeeBumpTransaction_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type FeeBumpTransactionEnvelope struct { + Tx FeeBumpTransaction + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ + Signatures []DecoratedSignature // bound 20 +} + +/* A TransactionEnvelope wraps a transaction with signatures. */ +type TransactionEnvelope struct { + // The union discriminant Type selects among the following arms: + // ENVELOPE_TYPE_TX_V0: + // V0() *TransactionV0Envelope + // ENVELOPE_TYPE_TX: + // V1() *TransactionV1Envelope + // ENVELOPE_TYPE_TX_FEE_BUMP: + // FeeBump() *FeeBumpTransactionEnvelope + Type EnvelopeType + _u interface{} +} + +type TransactionSignaturePayload struct { + NetworkId Hash + TaggedTransaction XdrAnon_TransactionSignaturePayload_TaggedTransaction +} +type XdrAnon_TransactionSignaturePayload_TaggedTransaction struct { + // The union discriminant Type selects among the following arms: + // ENVELOPE_TYPE_TX: + // Tx() *Transaction + // ENVELOPE_TYPE_TX_FEE_BUMP: + // FeeBump() *FeeBumpTransaction + Type EnvelopeType + _u interface{} +} + +type ClaimAtomType int32 + +const ( + CLAIM_ATOM_TYPE_V0 ClaimAtomType = 0 + CLAIM_ATOM_TYPE_ORDER_BOOK ClaimAtomType = 1 + CLAIM_ATOM_TYPE_LIQUIDITY_POOL ClaimAtomType = 2 +) + +// ClaimOfferAtomV0 is a ClaimOfferAtom with the AccountID discriminant stripped +// off, leaving a raw ed25519 public key to identify the source account. This is +// used for backwards compatibility starting from the protocol 17/18 boundary. +// If an "old-style" ClaimOfferAtom is parsed with this XDR definition, it will +// be parsed as a "new-style" ClaimAtom containing a ClaimOfferAtomV0. +type ClaimOfferAtomV0 struct { + // emitted to identify the offer + SellerEd25519 Uint256 + OfferID Int64 + // amount and asset taken from the owner + AssetSold Asset + AmountSold Int64 + // amount and asset sent to the owner + AssetBought Asset + AmountBought Int64 +} + +type ClaimOfferAtom struct { + // emitted to identify the offer + SellerID AccountID + OfferID Int64 + // amount and asset taken from the owner + AssetSold Asset + AmountSold Int64 + // amount and asset sent to the owner + AssetBought Asset + AmountBought Int64 +} + +type ClaimLiquidityAtom struct { + LiquidityPoolID PoolID + // amount and asset taken from the pool + AssetSold Asset + AmountSold Int64 + // amount and asset sent to the pool + AssetBought Asset + AmountBought Int64 +} + +/* This result is used when offers are taken or liquidity is exchanged with a + liquidity pool during an operation +*/ +type ClaimAtom struct { + // The union discriminant Type selects among the following arms: + // CLAIM_ATOM_TYPE_V0: + // V0() *ClaimOfferAtomV0 + // CLAIM_ATOM_TYPE_ORDER_BOOK: + // OrderBook() *ClaimOfferAtom + // CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + // LiquidityPool() *ClaimLiquidityAtom + Type ClaimAtomType + _u interface{} +} + +type CreateAccountResultCode int32 + +const ( + // account was created + CREATE_ACCOUNT_SUCCESS CreateAccountResultCode = 0 + // invalid destination + CREATE_ACCOUNT_MALFORMED CreateAccountResultCode = -1 + // not enough funds in source account + CREATE_ACCOUNT_UNDERFUNDED CreateAccountResultCode = -2 + // would create an account below the min reserve + CREATE_ACCOUNT_LOW_RESERVE CreateAccountResultCode = -3 + // account already exists + CREATE_ACCOUNT_ALREADY_EXIST CreateAccountResultCode = -4 +) + +type CreateAccountResult struct { + // The union discriminant Code selects among the following arms: + // CREATE_ACCOUNT_SUCCESS: + // void + // default: + // void + Code CreateAccountResultCode + _u interface{} +} + +type PaymentResultCode int32 + +const ( + // payment successfully completed + PAYMENT_SUCCESS PaymentResultCode = 0 + // bad input + PAYMENT_MALFORMED PaymentResultCode = -1 + // not enough funds in source account + PAYMENT_UNDERFUNDED PaymentResultCode = -2 + // no trust line on source account + PAYMENT_SRC_NO_TRUST PaymentResultCode = -3 + // source not authorized to transfer + PAYMENT_SRC_NOT_AUTHORIZED PaymentResultCode = -4 + // destination account does not exist + PAYMENT_NO_DESTINATION PaymentResultCode = -5 + // destination missing a trust line for asset + PAYMENT_NO_TRUST PaymentResultCode = -6 + // destination not authorized to hold asset + PAYMENT_NOT_AUTHORIZED PaymentResultCode = -7 + // destination would go above their limit + PAYMENT_LINE_FULL PaymentResultCode = -8 + // missing issuer on asset + PAYMENT_NO_ISSUER PaymentResultCode = -9 +) + +type PaymentResult struct { + // The union discriminant Code selects among the following arms: + // PAYMENT_SUCCESS: + // void + // default: + // void + Code PaymentResultCode + _u interface{} +} + +type PathPaymentStrictReceiveResultCode int32 + +const ( + // success + PATH_PAYMENT_STRICT_RECEIVE_SUCCESS PathPaymentStrictReceiveResultCode = 0 + // bad input + PATH_PAYMENT_STRICT_RECEIVE_MALFORMED PathPaymentStrictReceiveResultCode = -1 + // not enough funds in source account + PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED PathPaymentStrictReceiveResultCode = -2 + // no trust line on source account + PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST PathPaymentStrictReceiveResultCode = -3 + // source not authorized to transfer + PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED PathPaymentStrictReceiveResultCode = -4 + // destination account does not exist + PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION PathPaymentStrictReceiveResultCode = -5 + // dest missing a trust line for asset + PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST PathPaymentStrictReceiveResultCode = -6 + // dest not authorized to hold asset + PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED PathPaymentStrictReceiveResultCode = -7 + // dest would go above their limit + PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL PathPaymentStrictReceiveResultCode = -8 + // missing issuer on one asset + PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER PathPaymentStrictReceiveResultCode = -9 + // not enough offers to satisfy path + PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS PathPaymentStrictReceiveResultCode = -10 + // would cross one of its own offers + PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF PathPaymentStrictReceiveResultCode = -11 + // could not satisfy sendmax + PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX PathPaymentStrictReceiveResultCode = -12 +) + +type SimplePaymentResult struct { + Destination AccountID + Asset Asset + Amount Int64 +} + +type PathPaymentStrictReceiveResult struct { + // The union discriminant Code selects among the following arms: + // PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + // Success() *XdrAnon_PathPaymentStrictReceiveResult_Success + // PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + // NoIssuer() *Asset + // default: + // void + Code PathPaymentStrictReceiveResultCode + _u interface{} +} +type XdrAnon_PathPaymentStrictReceiveResult_Success struct { + Offers []ClaimAtom + Last SimplePaymentResult +} + +type PathPaymentStrictSendResultCode int32 + +const ( + // success + PATH_PAYMENT_STRICT_SEND_SUCCESS PathPaymentStrictSendResultCode = 0 + // bad input + PATH_PAYMENT_STRICT_SEND_MALFORMED PathPaymentStrictSendResultCode = -1 + // not enough funds in source account + PATH_PAYMENT_STRICT_SEND_UNDERFUNDED PathPaymentStrictSendResultCode = -2 + // no trust line on source account + PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST PathPaymentStrictSendResultCode = -3 + // source not authorized to transfer + PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED PathPaymentStrictSendResultCode = -4 + // destination account does not exist + PATH_PAYMENT_STRICT_SEND_NO_DESTINATION PathPaymentStrictSendResultCode = -5 + // dest missing a trust line for asset + PATH_PAYMENT_STRICT_SEND_NO_TRUST PathPaymentStrictSendResultCode = -6 + // dest not authorized to hold asset + PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED PathPaymentStrictSendResultCode = -7 + // dest would go above their limit + PATH_PAYMENT_STRICT_SEND_LINE_FULL PathPaymentStrictSendResultCode = -8 + // missing issuer on one asset + PATH_PAYMENT_STRICT_SEND_NO_ISSUER PathPaymentStrictSendResultCode = -9 + // not enough offers to satisfy path + PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS PathPaymentStrictSendResultCode = -10 + // would cross one of its own offers + PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF PathPaymentStrictSendResultCode = -11 + // could not satisfy destMin + PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN PathPaymentStrictSendResultCode = -12 +) + +type PathPaymentStrictSendResult struct { + // The union discriminant Code selects among the following arms: + // PATH_PAYMENT_STRICT_SEND_SUCCESS: + // Success() *XdrAnon_PathPaymentStrictSendResult_Success + // PATH_PAYMENT_STRICT_SEND_NO_ISSUER: + // NoIssuer() *Asset + // default: + // void + Code PathPaymentStrictSendResultCode + _u interface{} +} +type XdrAnon_PathPaymentStrictSendResult_Success struct { + Offers []ClaimAtom + Last SimplePaymentResult +} + +type ManageSellOfferResultCode int32 + +const ( + // codes considered as "success" for the operation + MANAGE_SELL_OFFER_SUCCESS ManageSellOfferResultCode = 0 + // generated offer would be invalid + MANAGE_SELL_OFFER_MALFORMED ManageSellOfferResultCode = -1 + // no trust line for what we're selling + MANAGE_SELL_OFFER_SELL_NO_TRUST ManageSellOfferResultCode = -2 + // no trust line for what we're buying + MANAGE_SELL_OFFER_BUY_NO_TRUST ManageSellOfferResultCode = -3 + // not authorized to sell + MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED ManageSellOfferResultCode = -4 + // not authorized to buy + MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED ManageSellOfferResultCode = -5 + // can't receive more of what it's buying + MANAGE_SELL_OFFER_LINE_FULL ManageSellOfferResultCode = -6 + // doesn't hold what it's trying to sell + MANAGE_SELL_OFFER_UNDERFUNDED ManageSellOfferResultCode = -7 + // would cross an offer from the same user + MANAGE_SELL_OFFER_CROSS_SELF ManageSellOfferResultCode = -8 + // no issuer for what we're selling + MANAGE_SELL_OFFER_SELL_NO_ISSUER ManageSellOfferResultCode = -9 + // no issuer for what we're buying + MANAGE_SELL_OFFER_BUY_NO_ISSUER ManageSellOfferResultCode = -10 + // offerID does not match an existing offer + MANAGE_SELL_OFFER_NOT_FOUND ManageSellOfferResultCode = -11 + // not enough funds to create a new Offer + MANAGE_SELL_OFFER_LOW_RESERVE ManageSellOfferResultCode = -12 +) + +type ManageOfferEffect int32 + +const ( + MANAGE_OFFER_CREATED ManageOfferEffect = 0 + MANAGE_OFFER_UPDATED ManageOfferEffect = 1 + MANAGE_OFFER_DELETED ManageOfferEffect = 2 +) + +type ManageOfferSuccessResult struct { + // offers that got claimed while creating this offer + OffersClaimed []ClaimAtom + Offer XdrAnon_ManageOfferSuccessResult_Offer +} +type XdrAnon_ManageOfferSuccessResult_Offer struct { + // The union discriminant Effect selects among the following arms: + // MANAGE_OFFER_CREATED, MANAGE_OFFER_UPDATED: + // Offer() *OfferEntry + // default: + // void + Effect ManageOfferEffect + _u interface{} +} + +type ManageSellOfferResult struct { + // The union discriminant Code selects among the following arms: + // MANAGE_SELL_OFFER_SUCCESS: + // Success() *ManageOfferSuccessResult + // default: + // void + Code ManageSellOfferResultCode + _u interface{} +} + +type ManageBuyOfferResultCode int32 + +const ( + // codes considered as "success" for the operation + MANAGE_BUY_OFFER_SUCCESS ManageBuyOfferResultCode = 0 + // generated offer would be invalid + MANAGE_BUY_OFFER_MALFORMED ManageBuyOfferResultCode = -1 + // no trust line for what we're selling + MANAGE_BUY_OFFER_SELL_NO_TRUST ManageBuyOfferResultCode = -2 + // no trust line for what we're buying + MANAGE_BUY_OFFER_BUY_NO_TRUST ManageBuyOfferResultCode = -3 + // not authorized to sell + MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED ManageBuyOfferResultCode = -4 + // not authorized to buy + MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED ManageBuyOfferResultCode = -5 + // can't receive more of what it's buying + MANAGE_BUY_OFFER_LINE_FULL ManageBuyOfferResultCode = -6 + // doesn't hold what it's trying to sell + MANAGE_BUY_OFFER_UNDERFUNDED ManageBuyOfferResultCode = -7 + // would cross an offer from the same user + MANAGE_BUY_OFFER_CROSS_SELF ManageBuyOfferResultCode = -8 + // no issuer for what we're selling + MANAGE_BUY_OFFER_SELL_NO_ISSUER ManageBuyOfferResultCode = -9 + // no issuer for what we're buying + MANAGE_BUY_OFFER_BUY_NO_ISSUER ManageBuyOfferResultCode = -10 + // offerID does not match an existing offer + MANAGE_BUY_OFFER_NOT_FOUND ManageBuyOfferResultCode = -11 + // not enough funds to create a new Offer + MANAGE_BUY_OFFER_LOW_RESERVE ManageBuyOfferResultCode = -12 +) + +type ManageBuyOfferResult struct { + // The union discriminant Code selects among the following arms: + // MANAGE_BUY_OFFER_SUCCESS: + // Success() *ManageOfferSuccessResult + // default: + // void + Code ManageBuyOfferResultCode + _u interface{} +} + +type SetOptionsResultCode int32 + +const ( + // codes considered as "success" for the operation + SET_OPTIONS_SUCCESS SetOptionsResultCode = 0 + // not enough funds to add a signer + SET_OPTIONS_LOW_RESERVE SetOptionsResultCode = -1 + // max number of signers already reached + SET_OPTIONS_TOO_MANY_SIGNERS SetOptionsResultCode = -2 + // invalid combination of clear/set flags + SET_OPTIONS_BAD_FLAGS SetOptionsResultCode = -3 + // inflation account does not exist + SET_OPTIONS_INVALID_INFLATION SetOptionsResultCode = -4 + // can no longer change this option + SET_OPTIONS_CANT_CHANGE SetOptionsResultCode = -5 + // can't set an unknown flag + SET_OPTIONS_UNKNOWN_FLAG SetOptionsResultCode = -6 + // bad value for weight/threshold + SET_OPTIONS_THRESHOLD_OUT_OF_RANGE SetOptionsResultCode = -7 + // signer cannot be masterkey + SET_OPTIONS_BAD_SIGNER SetOptionsResultCode = -8 + // malformed home domain + SET_OPTIONS_INVALID_HOME_DOMAIN SetOptionsResultCode = -9 + // auth revocable is required for clawback + SET_OPTIONS_AUTH_REVOCABLE_REQUIRED SetOptionsResultCode = -10 +) + +type SetOptionsResult struct { + // The union discriminant Code selects among the following arms: + // SET_OPTIONS_SUCCESS: + // void + // default: + // void + Code SetOptionsResultCode + _u interface{} +} + +type ChangeTrustResultCode int32 + +const ( + // codes considered as "success" for the operation + CHANGE_TRUST_SUCCESS ChangeTrustResultCode = 0 + // bad input + CHANGE_TRUST_MALFORMED ChangeTrustResultCode = -1 + // could not find issuer + CHANGE_TRUST_NO_ISSUER ChangeTrustResultCode = -2 + // cannot drop limit below balance + CHANGE_TRUST_INVALID_LIMIT ChangeTrustResultCode = -3 + // not enough funds to create a new trust line, + CHANGE_TRUST_LOW_RESERVE ChangeTrustResultCode = -4 + // trusting self is not allowed + CHANGE_TRUST_SELF_NOT_ALLOWED ChangeTrustResultCode = -5 + // Asset trustline is missing for pool + CHANGE_TRUST_TRUST_LINE_MISSING ChangeTrustResultCode = -6 + // Asset trustline is still referenced in a pool + CHANGE_TRUST_CANNOT_DELETE ChangeTrustResultCode = -7 + // Asset trustline is deauthorized + CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES ChangeTrustResultCode = -8 +) + +type ChangeTrustResult struct { + // The union discriminant Code selects among the following arms: + // CHANGE_TRUST_SUCCESS: + // void + // default: + // void + Code ChangeTrustResultCode + _u interface{} +} + +type AllowTrustResultCode int32 + +const ( + // codes considered as "success" for the operation + ALLOW_TRUST_SUCCESS AllowTrustResultCode = 0 + // asset is not ASSET_TYPE_ALPHANUM + ALLOW_TRUST_MALFORMED AllowTrustResultCode = -1 + // trustor does not have a trustline + ALLOW_TRUST_NO_TRUST_LINE AllowTrustResultCode = -2 + // source account does not require trust + ALLOW_TRUST_TRUST_NOT_REQUIRED AllowTrustResultCode = -3 + // source account can't revoke trust, + ALLOW_TRUST_CANT_REVOKE AllowTrustResultCode = -4 + // trusting self is not allowed + ALLOW_TRUST_SELF_NOT_ALLOWED AllowTrustResultCode = -5 + // claimable balances can't be created + ALLOW_TRUST_LOW_RESERVE AllowTrustResultCode = -6 +) + +type AllowTrustResult struct { + // The union discriminant Code selects among the following arms: + // ALLOW_TRUST_SUCCESS: + // void + // default: + // void + Code AllowTrustResultCode + _u interface{} +} + +type AccountMergeResultCode int32 + +const ( + // codes considered as "success" for the operation + ACCOUNT_MERGE_SUCCESS AccountMergeResultCode = 0 + // can't merge onto itself + ACCOUNT_MERGE_MALFORMED AccountMergeResultCode = -1 + // destination does not exist + ACCOUNT_MERGE_NO_ACCOUNT AccountMergeResultCode = -2 + // source account has AUTH_IMMUTABLE set + ACCOUNT_MERGE_IMMUTABLE_SET AccountMergeResultCode = -3 + // account has trust lines/offers + ACCOUNT_MERGE_HAS_SUB_ENTRIES AccountMergeResultCode = -4 + // sequence number is over max allowed + ACCOUNT_MERGE_SEQNUM_TOO_FAR AccountMergeResultCode = -5 + // can't add source balance to + ACCOUNT_MERGE_DEST_FULL AccountMergeResultCode = -6 + // destination balance + ACCOUNT_MERGE_IS_SPONSOR AccountMergeResultCode = -7 +) + +type AccountMergeResult struct { + // The union discriminant Code selects among the following arms: + // ACCOUNT_MERGE_SUCCESS: + // SourceAccountBalance() *Int64 + // default: + // void + Code AccountMergeResultCode + _u interface{} +} + +type InflationResultCode int32 + +const ( + // codes considered as "success" for the operation + INFLATION_SUCCESS InflationResultCode = 0 + // codes considered as "failure" for the operation + INFLATION_NOT_TIME InflationResultCode = -1 +) + +type InflationPayout struct { + Destination AccountID + Amount Int64 +} + +type InflationResult struct { + // The union discriminant Code selects among the following arms: + // INFLATION_SUCCESS: + // Payouts() *[]InflationPayout + // default: + // void + Code InflationResultCode + _u interface{} +} + +type ManageDataResultCode int32 + +const ( + // codes considered as "success" for the operation + MANAGE_DATA_SUCCESS ManageDataResultCode = 0 + // The network hasn't moved to this protocol change yet + MANAGE_DATA_NOT_SUPPORTED_YET ManageDataResultCode = -1 + // Trying to remove a Data Entry that isn't there + MANAGE_DATA_NAME_NOT_FOUND ManageDataResultCode = -2 + // not enough funds to create a new Data Entry + MANAGE_DATA_LOW_RESERVE ManageDataResultCode = -3 + // Name not a valid string + MANAGE_DATA_INVALID_NAME ManageDataResultCode = -4 +) + +type ManageDataResult struct { + // The union discriminant Code selects among the following arms: + // MANAGE_DATA_SUCCESS: + // void + // default: + // void + Code ManageDataResultCode + _u interface{} +} + +type BumpSequenceResultCode int32 + +const ( + // codes considered as "success" for the operation + BUMP_SEQUENCE_SUCCESS BumpSequenceResultCode = 0 + // codes considered as "failure" for the operation + BUMP_SEQUENCE_BAD_SEQ BumpSequenceResultCode = -1 +) + +type BumpSequenceResult struct { + // The union discriminant Code selects among the following arms: + // BUMP_SEQUENCE_SUCCESS: + // void + // default: + // void + Code BumpSequenceResultCode + _u interface{} +} + +type CreateClaimableBalanceResultCode int32 + +const ( + CREATE_CLAIMABLE_BALANCE_SUCCESS CreateClaimableBalanceResultCode = 0 + CREATE_CLAIMABLE_BALANCE_MALFORMED CreateClaimableBalanceResultCode = -1 + CREATE_CLAIMABLE_BALANCE_LOW_RESERVE CreateClaimableBalanceResultCode = -2 + CREATE_CLAIMABLE_BALANCE_NO_TRUST CreateClaimableBalanceResultCode = -3 + CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED CreateClaimableBalanceResultCode = -4 + CREATE_CLAIMABLE_BALANCE_UNDERFUNDED CreateClaimableBalanceResultCode = -5 +) + +type CreateClaimableBalanceResult struct { + // The union discriminant Code selects among the following arms: + // CREATE_CLAIMABLE_BALANCE_SUCCESS: + // BalanceID() *ClaimableBalanceID + // default: + // void + Code CreateClaimableBalanceResultCode + _u interface{} +} + +type ClaimClaimableBalanceResultCode int32 + +const ( + CLAIM_CLAIMABLE_BALANCE_SUCCESS ClaimClaimableBalanceResultCode = 0 + CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST ClaimClaimableBalanceResultCode = -1 + CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM ClaimClaimableBalanceResultCode = -2 + CLAIM_CLAIMABLE_BALANCE_LINE_FULL ClaimClaimableBalanceResultCode = -3 + CLAIM_CLAIMABLE_BALANCE_NO_TRUST ClaimClaimableBalanceResultCode = -4 + CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED ClaimClaimableBalanceResultCode = -5 +) + +type ClaimClaimableBalanceResult struct { + // The union discriminant Code selects among the following arms: + // CLAIM_CLAIMABLE_BALANCE_SUCCESS: + // void + // default: + // void + Code ClaimClaimableBalanceResultCode + _u interface{} +} + +type BeginSponsoringFutureReservesResultCode int32 + +const ( + // codes considered as "success" for the operation + BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS BeginSponsoringFutureReservesResultCode = 0 + // codes considered as "failure" for the operation + BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED BeginSponsoringFutureReservesResultCode = -1 + BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED BeginSponsoringFutureReservesResultCode = -2 + BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE BeginSponsoringFutureReservesResultCode = -3 +) + +type BeginSponsoringFutureReservesResult struct { + // The union discriminant Code selects among the following arms: + // BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: + // void + // default: + // void + Code BeginSponsoringFutureReservesResultCode + _u interface{} +} + +type EndSponsoringFutureReservesResultCode int32 + +const ( + // codes considered as "success" for the operation + END_SPONSORING_FUTURE_RESERVES_SUCCESS EndSponsoringFutureReservesResultCode = 0 + // codes considered as "failure" for the operation + END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED EndSponsoringFutureReservesResultCode = -1 +) + +type EndSponsoringFutureReservesResult struct { + // The union discriminant Code selects among the following arms: + // END_SPONSORING_FUTURE_RESERVES_SUCCESS: + // void + // default: + // void + Code EndSponsoringFutureReservesResultCode + _u interface{} +} + +type RevokeSponsorshipResultCode int32 + +const ( + // codes considered as "success" for the operation + REVOKE_SPONSORSHIP_SUCCESS RevokeSponsorshipResultCode = 0 + // codes considered as "failure" for the operation + REVOKE_SPONSORSHIP_DOES_NOT_EXIST RevokeSponsorshipResultCode = -1 + REVOKE_SPONSORSHIP_NOT_SPONSOR RevokeSponsorshipResultCode = -2 + REVOKE_SPONSORSHIP_LOW_RESERVE RevokeSponsorshipResultCode = -3 + REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE RevokeSponsorshipResultCode = -4 + REVOKE_SPONSORSHIP_MALFORMED RevokeSponsorshipResultCode = -5 +) + +type RevokeSponsorshipResult struct { + // The union discriminant Code selects among the following arms: + // REVOKE_SPONSORSHIP_SUCCESS: + // void + // default: + // void + Code RevokeSponsorshipResultCode + _u interface{} +} + +type ClawbackResultCode int32 + +const ( + // codes considered as "success" for the operation + CLAWBACK_SUCCESS ClawbackResultCode = 0 + // codes considered as "failure" for the operation + CLAWBACK_MALFORMED ClawbackResultCode = -1 + CLAWBACK_NOT_CLAWBACK_ENABLED ClawbackResultCode = -2 + CLAWBACK_NO_TRUST ClawbackResultCode = -3 + CLAWBACK_UNDERFUNDED ClawbackResultCode = -4 +) + +type ClawbackResult struct { + // The union discriminant Code selects among the following arms: + // CLAWBACK_SUCCESS: + // void + // default: + // void + Code ClawbackResultCode + _u interface{} +} + +type ClawbackClaimableBalanceResultCode int32 + +const ( + // codes considered as "success" for the operation + CLAWBACK_CLAIMABLE_BALANCE_SUCCESS ClawbackClaimableBalanceResultCode = 0 + // codes considered as "failure" for the operation + CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST ClawbackClaimableBalanceResultCode = -1 + CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER ClawbackClaimableBalanceResultCode = -2 + CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED ClawbackClaimableBalanceResultCode = -3 +) + +type ClawbackClaimableBalanceResult struct { + // The union discriminant Code selects among the following arms: + // CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: + // void + // default: + // void + Code ClawbackClaimableBalanceResultCode + _u interface{} +} + +type SetTrustLineFlagsResultCode int32 + +const ( + // codes considered as "success" for the operation + SET_TRUST_LINE_FLAGS_SUCCESS SetTrustLineFlagsResultCode = 0 + // codes considered as "failure" for the operation + SET_TRUST_LINE_FLAGS_MALFORMED SetTrustLineFlagsResultCode = -1 + SET_TRUST_LINE_FLAGS_NO_TRUST_LINE SetTrustLineFlagsResultCode = -2 + SET_TRUST_LINE_FLAGS_CANT_REVOKE SetTrustLineFlagsResultCode = -3 + SET_TRUST_LINE_FLAGS_INVALID_STATE SetTrustLineFlagsResultCode = -4 + // claimable balances can't be created + SET_TRUST_LINE_FLAGS_LOW_RESERVE SetTrustLineFlagsResultCode = -5 +) + +type SetTrustLineFlagsResult struct { + // The union discriminant Code selects among the following arms: + // SET_TRUST_LINE_FLAGS_SUCCESS: + // void + // default: + // void + Code SetTrustLineFlagsResultCode + _u interface{} +} + +type LiquidityPoolDepositResultCode int32 + +const ( + // codes considered as "success" for the operation + LIQUIDITY_POOL_DEPOSIT_SUCCESS LiquidityPoolDepositResultCode = 0 + // bad input + LIQUIDITY_POOL_DEPOSIT_MALFORMED LiquidityPoolDepositResultCode = -1 + // no trust line for one of the + LIQUIDITY_POOL_DEPOSIT_NO_TRUST LiquidityPoolDepositResultCode = -2 + // not authorized for one of the + LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED LiquidityPoolDepositResultCode = -3 + // not enough balance for one of + LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED LiquidityPoolDepositResultCode = -4 + // pool share trust line doesn't + LIQUIDITY_POOL_DEPOSIT_LINE_FULL LiquidityPoolDepositResultCode = -5 + // deposit price outside bounds + LIQUIDITY_POOL_DEPOSIT_BAD_PRICE LiquidityPoolDepositResultCode = -6 + // pool reserves are full + LIQUIDITY_POOL_DEPOSIT_POOL_FULL LiquidityPoolDepositResultCode = -7 +) + +type LiquidityPoolDepositResult struct { + // The union discriminant Code selects among the following arms: + // LIQUIDITY_POOL_DEPOSIT_SUCCESS: + // void + // default: + // void + Code LiquidityPoolDepositResultCode + _u interface{} +} + +type LiquidityPoolWithdrawResultCode int32 + +const ( + // codes considered as "success" for the operation + LIQUIDITY_POOL_WITHDRAW_SUCCESS LiquidityPoolWithdrawResultCode = 0 + // bad input + LIQUIDITY_POOL_WITHDRAW_MALFORMED LiquidityPoolWithdrawResultCode = -1 + // no trust line for one of the + LIQUIDITY_POOL_WITHDRAW_NO_TRUST LiquidityPoolWithdrawResultCode = -2 + // not enough balance of the + LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED LiquidityPoolWithdrawResultCode = -3 + // would go above limit for one + LIQUIDITY_POOL_WITHDRAW_LINE_FULL LiquidityPoolWithdrawResultCode = -4 + // of the assets + LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM LiquidityPoolWithdrawResultCode = -5 +) + +type LiquidityPoolWithdrawResult struct { + // The union discriminant Code selects among the following arms: + // LIQUIDITY_POOL_WITHDRAW_SUCCESS: + // void + // default: + // void + Code LiquidityPoolWithdrawResultCode + _u interface{} +} + +/* High level Operation Result */ +type OperationResultCode int32 + +const ( + // inner object result is valid + OpINNER OperationResultCode = 0 + // too few valid signatures / wrong network + OpBAD_AUTH OperationResultCode = -1 + // source account was not found + OpNO_ACCOUNT OperationResultCode = -2 + // operation not supported at this time + OpNOT_SUPPORTED OperationResultCode = -3 + // max number of subentries already reached + OpTOO_MANY_SUBENTRIES OperationResultCode = -4 + // operation did too much work + OpEXCEEDED_WORK_LIMIT OperationResultCode = -5 + // account is sponsoring too many entries + OpTOO_MANY_SPONSORING OperationResultCode = -6 +) + +type OperationResult struct { + // The union discriminant Code selects among the following arms: + // OpINNER: + // Tr() *XdrAnon_OperationResult_Tr + // default: + // void + Code OperationResultCode + _u interface{} +} +type XdrAnon_OperationResult_Tr struct { + // The union discriminant Type selects among the following arms: + // CREATE_ACCOUNT: + // CreateAccountResult() *CreateAccountResult + // PAYMENT: + // PaymentResult() *PaymentResult + // PATH_PAYMENT_STRICT_RECEIVE: + // PathPaymentStrictReceiveResult() *PathPaymentStrictReceiveResult + // MANAGE_SELL_OFFER: + // ManageSellOfferResult() *ManageSellOfferResult + // CREATE_PASSIVE_SELL_OFFER: + // CreatePassiveSellOfferResult() *ManageSellOfferResult + // SET_OPTIONS: + // SetOptionsResult() *SetOptionsResult + // CHANGE_TRUST: + // ChangeTrustResult() *ChangeTrustResult + // ALLOW_TRUST: + // AllowTrustResult() *AllowTrustResult + // ACCOUNT_MERGE: + // AccountMergeResult() *AccountMergeResult + // INFLATION: + // InflationResult() *InflationResult + // MANAGE_DATA: + // ManageDataResult() *ManageDataResult + // BUMP_SEQUENCE: + // BumpSeqResult() *BumpSequenceResult + // MANAGE_BUY_OFFER: + // ManageBuyOfferResult() *ManageBuyOfferResult + // PATH_PAYMENT_STRICT_SEND: + // PathPaymentStrictSendResult() *PathPaymentStrictSendResult + // CREATE_CLAIMABLE_BALANCE: + // CreateClaimableBalanceResult() *CreateClaimableBalanceResult + // CLAIM_CLAIMABLE_BALANCE: + // ClaimClaimableBalanceResult() *ClaimClaimableBalanceResult + // BEGIN_SPONSORING_FUTURE_RESERVES: + // BeginSponsoringFutureReservesResult() *BeginSponsoringFutureReservesResult + // END_SPONSORING_FUTURE_RESERVES: + // EndSponsoringFutureReservesResult() *EndSponsoringFutureReservesResult + // REVOKE_SPONSORSHIP: + // RevokeSponsorshipResult() *RevokeSponsorshipResult + // CLAWBACK: + // ClawbackResult() *ClawbackResult + // CLAWBACK_CLAIMABLE_BALANCE: + // ClawbackClaimableBalanceResult() *ClawbackClaimableBalanceResult + // SET_TRUST_LINE_FLAGS: + // SetTrustLineFlagsResult() *SetTrustLineFlagsResult + // LIQUIDITY_POOL_DEPOSIT: + // LiquidityPoolDepositResult() *LiquidityPoolDepositResult + // LIQUIDITY_POOL_WITHDRAW: + // LiquidityPoolWithdrawResult() *LiquidityPoolWithdrawResult + Type OperationType + _u interface{} +} + +type TransactionResultCode int32 + +const ( + // fee bump inner transaction succeeded + TxFEE_BUMP_INNER_SUCCESS TransactionResultCode = 1 + // all operations succeeded + TxSUCCESS TransactionResultCode = 0 + // one of the operations failed (none were applied) + TxFAILED TransactionResultCode = -1 + // ledger closeTime before minTime + TxTOO_EARLY TransactionResultCode = -2 + // ledger closeTime after maxTime + TxTOO_LATE TransactionResultCode = -3 + // no operation was specified + TxMISSING_OPERATION TransactionResultCode = -4 + // sequence number does not match source account + TxBAD_SEQ TransactionResultCode = -5 + // too few valid signatures / wrong network + TxBAD_AUTH TransactionResultCode = -6 + // fee would bring account below reserve + TxINSUFFICIENT_BALANCE TransactionResultCode = -7 + // source account not found + TxNO_ACCOUNT TransactionResultCode = -8 + // fee is too small + TxINSUFFICIENT_FEE TransactionResultCode = -9 + // unused signatures attached to transaction + TxBAD_AUTH_EXTRA TransactionResultCode = -10 + // an unknown error occurred + TxINTERNAL_ERROR TransactionResultCode = -11 + // transaction type not supported + TxNOT_SUPPORTED TransactionResultCode = -12 + // fee bump inner transaction failed + TxFEE_BUMP_INNER_FAILED TransactionResultCode = -13 + // sponsorship not confirmed + TxBAD_SPONSORSHIP TransactionResultCode = -14 +) + +// InnerTransactionResult must be binary compatible with TransactionResult +// because it is be used to represent the result of a Transaction. +type InnerTransactionResult struct { + // Always 0. Here for binary compatibility. + FeeCharged Int64 + Result XdrAnon_InnerTransactionResult_Result + Ext XdrAnon_InnerTransactionResult_Ext +} +type XdrAnon_InnerTransactionResult_Result struct { + // The union discriminant Code selects among the following arms: + // TxSUCCESS, TxFAILED: + // Results() *[]OperationResult + // TxTOO_EARLY, TxTOO_LATE, TxMISSING_OPERATION, TxBAD_SEQ, TxBAD_AUTH, TxINSUFFICIENT_BALANCE, TxNO_ACCOUNT, TxINSUFFICIENT_FEE, TxBAD_AUTH_EXTRA, TxINTERNAL_ERROR, TxNOT_SUPPORTED, TxBAD_SPONSORSHIP: + // void + Code TransactionResultCode + _u interface{} +} + +// reserved for future use +type XdrAnon_InnerTransactionResult_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type InnerTransactionResultPair struct { + // hash of the inner transaction + TransactionHash Hash + // result for the inner transaction + Result InnerTransactionResult +} + +type TransactionResult struct { + // actual fee charged for the transaction + FeeCharged Int64 + Result XdrAnon_TransactionResult_Result + Ext XdrAnon_TransactionResult_Ext +} +type XdrAnon_TransactionResult_Result struct { + // The union discriminant Code selects among the following arms: + // TxFEE_BUMP_INNER_SUCCESS, TxFEE_BUMP_INNER_FAILED: + // InnerResultPair() *InnerTransactionResultPair + // TxSUCCESS, TxFAILED: + // Results() *[]OperationResult + // default: + // void + Code TransactionResultCode + _u interface{} +} + +// reserved for future use +type XdrAnon_TransactionResult_Ext struct { + // The union discriminant V selects among the following arms: + // 0: + // void + V int32 + _u interface{} +} + +type Hash = [32]byte + +type Uint256 = [32]byte + +type Uint32 = uint32 + +type Int32 = int32 + +type Uint64 = uint64 + +type Int64 = int64 + +type CryptoKeyType int32 + +const ( + KEY_TYPE_ED25519 CryptoKeyType = 0 + KEY_TYPE_PRE_AUTH_TX CryptoKeyType = 1 + KEY_TYPE_HASH_X CryptoKeyType = 2 + // MUXED enum values for supported type are derived from the enum values + // above by ORing them with 0x100 + KEY_TYPE_MUXED_ED25519 CryptoKeyType = CryptoKeyType(0x100) +) + +type PublicKeyType int32 + +const ( + PUBLIC_KEY_TYPE_ED25519 PublicKeyType = PublicKeyType(KEY_TYPE_ED25519) +) + +type SignerKeyType int32 + +const ( + SIGNER_KEY_TYPE_ED25519 SignerKeyType = SignerKeyType(KEY_TYPE_ED25519) + SIGNER_KEY_TYPE_PRE_AUTH_TX SignerKeyType = SignerKeyType(KEY_TYPE_PRE_AUTH_TX) + SIGNER_KEY_TYPE_HASH_X SignerKeyType = SignerKeyType(KEY_TYPE_HASH_X) +) + +type PublicKey struct { + // The union discriminant Type selects among the following arms: + // PUBLIC_KEY_TYPE_ED25519: + // Ed25519() *Uint256 + Type PublicKeyType + _u interface{} +} + +type SignerKey struct { + // The union discriminant Type selects among the following arms: + // SIGNER_KEY_TYPE_ED25519: + // Ed25519() *Uint256 + // SIGNER_KEY_TYPE_PRE_AUTH_TX: + // PreAuthTx() *Uint256 + // SIGNER_KEY_TYPE_HASH_X: + // HashX() *Uint256 + Type SignerKeyType + _u interface{} +} + +// variable size as the size depends on the signature scheme used +type Signature = []byte // bound 64 + +type SignatureHint = [4]byte + +type NodeID = PublicKey + +type Curve25519Secret struct { + Key [32]byte +} + +type Curve25519Public struct { + Key [32]byte +} + +type HmacSha256Key struct { + Key [32]byte +} + +type HmacSha256Mac struct { + Mac [32]byte +} + +// +// Helper types and generated marshaling functions +// + +type XdrType_Value struct { + XdrVecOpaque +} + +func XDR_Value(v *Value) XdrType_Value { + return XdrType_Value{XdrVecOpaque{v, 0xffffffff}} +} +func (XdrType_Value) XdrTypeName() string { return "Value" } +func (v XdrType_Value) XdrUnwrap() XdrType { return v.XdrVecOpaque } + +type XdrType_SCPBallot = *SCPBallot + +func (v *SCPBallot) XdrPointer() interface{} { return v } +func (SCPBallot) XdrTypeName() string { return "SCPBallot" } +func (v SCPBallot) XdrValue() interface{} { return v } +func (v *SCPBallot) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPBallot) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%scounter", name), XDR_Uint32(&v.Counter)) + x.Marshal(x.Sprintf("%svalue", name), XDR_Value(&v.Value)) +} +func XDR_SCPBallot(v *SCPBallot) *SCPBallot { return v } + +var _XdrNames_SCPStatementType = map[int32]string{ + int32(SCP_ST_PREPARE): "SCP_ST_PREPARE", + int32(SCP_ST_CONFIRM): "SCP_ST_CONFIRM", + int32(SCP_ST_EXTERNALIZE): "SCP_ST_EXTERNALIZE", + int32(SCP_ST_NOMINATE): "SCP_ST_NOMINATE", +} +var _XdrValues_SCPStatementType = map[string]int32{ + "SCP_ST_PREPARE": int32(SCP_ST_PREPARE), + "SCP_ST_CONFIRM": int32(SCP_ST_CONFIRM), + "SCP_ST_EXTERNALIZE": int32(SCP_ST_EXTERNALIZE), + "SCP_ST_NOMINATE": int32(SCP_ST_NOMINATE), +} + +func (SCPStatementType) XdrEnumNames() map[int32]string { + return _XdrNames_SCPStatementType +} +func (v SCPStatementType) String() string { + if s, ok := _XdrNames_SCPStatementType[int32(v)]; ok { + return s + } + return fmt.Sprintf("SCPStatementType#%d", v) +} +func (v *SCPStatementType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_SCPStatementType[stok]; ok { + *v = SCPStatementType(val) + return nil + } else if stok == "SCPStatementType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid SCPStatementType.", stok)) + } +} +func (v SCPStatementType) GetU32() uint32 { return uint32(v) } +func (v *SCPStatementType) SetU32(n uint32) { *v = SCPStatementType(n) } +func (v *SCPStatementType) XdrPointer() interface{} { return v } +func (SCPStatementType) XdrTypeName() string { return "SCPStatementType" } +func (v SCPStatementType) XdrValue() interface{} { return v } +func (v *SCPStatementType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SCPStatementType = *SCPStatementType + +func XDR_SCPStatementType(v *SCPStatementType) *SCPStatementType { return v } + +type _XdrVec_unbounded_Value []Value + +func (_XdrVec_unbounded_Value) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_Value) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_Value length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_Value length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_Value) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_Value) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]Value, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_Value) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_Value(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_Value) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_Value) XdrTypeName() string { return "Value<>" } +func (v *_XdrVec_unbounded_Value) XdrPointer() interface{} { return (*[]Value)(v) } +func (v _XdrVec_unbounded_Value) XdrValue() interface{} { return ([]Value)(v) } +func (v *_XdrVec_unbounded_Value) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SCPNomination = *SCPNomination + +func (v *SCPNomination) XdrPointer() interface{} { return v } +func (SCPNomination) XdrTypeName() string { return "SCPNomination" } +func (v SCPNomination) XdrValue() interface{} { return v } +func (v *SCPNomination) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPNomination) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%squorumSetHash", name), XDR_Hash(&v.QuorumSetHash)) + x.Marshal(x.Sprintf("%svotes", name), (*_XdrVec_unbounded_Value)(&v.Votes)) + x.Marshal(x.Sprintf("%saccepted", name), (*_XdrVec_unbounded_Value)(&v.Accepted)) +} +func XDR_SCPNomination(v *SCPNomination) *SCPNomination { return v } + +type _XdrPtr_SCPBallot struct { + p **SCPBallot +} +type _ptrflag_SCPBallot _XdrPtr_SCPBallot + +func (v _ptrflag_SCPBallot) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_SCPBallot) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("SCPBallot flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_SCPBallot) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_SCPBallot) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(SCPBallot) + } + default: + XdrPanic("*SCPBallot present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_SCPBallot) XdrTypeName() string { return "SCPBallot?" } +func (v _ptrflag_SCPBallot) XdrPointer() interface{} { return nil } +func (v _ptrflag_SCPBallot) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_SCPBallot) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_SCPBallot) XdrBound() uint32 { return 1 } +func (v _XdrPtr_SCPBallot) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_SCPBallot) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(SCPBallot) + } +} +func (v _XdrPtr_SCPBallot) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_SCPBallot(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_SCPBallot) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_SCPBallot) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_SCPBallot(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_SCPBallot) XdrTypeName() string { return "SCPBallot*" } +func (v _XdrPtr_SCPBallot) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_SCPBallot) XdrValue() interface{} { return *v.p } + +type XdrType_XdrAnon_SCPStatement_Pledges_Prepare = *XdrAnon_SCPStatement_Pledges_Prepare + +func (v *XdrAnon_SCPStatement_Pledges_Prepare) XdrPointer() interface{} { return v } +func (XdrAnon_SCPStatement_Pledges_Prepare) XdrTypeName() string { + return "XdrAnon_SCPStatement_Pledges_Prepare" +} +func (v XdrAnon_SCPStatement_Pledges_Prepare) XdrValue() interface{} { return v } +func (v *XdrAnon_SCPStatement_Pledges_Prepare) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_SCPStatement_Pledges_Prepare) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%squorumSetHash", name), XDR_Hash(&v.QuorumSetHash)) + x.Marshal(x.Sprintf("%sballot", name), XDR_SCPBallot(&v.Ballot)) + x.Marshal(x.Sprintf("%sprepared", name), _XdrPtr_SCPBallot{&v.Prepared}) + x.Marshal(x.Sprintf("%spreparedPrime", name), _XdrPtr_SCPBallot{&v.PreparedPrime}) + x.Marshal(x.Sprintf("%snC", name), XDR_Uint32(&v.NC)) + x.Marshal(x.Sprintf("%snH", name), XDR_Uint32(&v.NH)) +} +func XDR_XdrAnon_SCPStatement_Pledges_Prepare(v *XdrAnon_SCPStatement_Pledges_Prepare) *XdrAnon_SCPStatement_Pledges_Prepare { + return v +} + +type XdrType_XdrAnon_SCPStatement_Pledges_Confirm = *XdrAnon_SCPStatement_Pledges_Confirm + +func (v *XdrAnon_SCPStatement_Pledges_Confirm) XdrPointer() interface{} { return v } +func (XdrAnon_SCPStatement_Pledges_Confirm) XdrTypeName() string { + return "XdrAnon_SCPStatement_Pledges_Confirm" +} +func (v XdrAnon_SCPStatement_Pledges_Confirm) XdrValue() interface{} { return v } +func (v *XdrAnon_SCPStatement_Pledges_Confirm) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_SCPStatement_Pledges_Confirm) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sballot", name), XDR_SCPBallot(&v.Ballot)) + x.Marshal(x.Sprintf("%snPrepared", name), XDR_Uint32(&v.NPrepared)) + x.Marshal(x.Sprintf("%snCommit", name), XDR_Uint32(&v.NCommit)) + x.Marshal(x.Sprintf("%snH", name), XDR_Uint32(&v.NH)) + x.Marshal(x.Sprintf("%squorumSetHash", name), XDR_Hash(&v.QuorumSetHash)) +} +func XDR_XdrAnon_SCPStatement_Pledges_Confirm(v *XdrAnon_SCPStatement_Pledges_Confirm) *XdrAnon_SCPStatement_Pledges_Confirm { + return v +} + +type XdrType_XdrAnon_SCPStatement_Pledges_Externalize = *XdrAnon_SCPStatement_Pledges_Externalize + +func (v *XdrAnon_SCPStatement_Pledges_Externalize) XdrPointer() interface{} { return v } +func (XdrAnon_SCPStatement_Pledges_Externalize) XdrTypeName() string { + return "XdrAnon_SCPStatement_Pledges_Externalize" +} +func (v XdrAnon_SCPStatement_Pledges_Externalize) XdrValue() interface{} { return v } +func (v *XdrAnon_SCPStatement_Pledges_Externalize) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_SCPStatement_Pledges_Externalize) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%scommit", name), XDR_SCPBallot(&v.Commit)) + x.Marshal(x.Sprintf("%snH", name), XDR_Uint32(&v.NH)) + x.Marshal(x.Sprintf("%scommitQuorumSetHash", name), XDR_Hash(&v.CommitQuorumSetHash)) +} +func XDR_XdrAnon_SCPStatement_Pledges_Externalize(v *XdrAnon_SCPStatement_Pledges_Externalize) *XdrAnon_SCPStatement_Pledges_Externalize { + return v +} + +var _XdrTags_XdrAnon_SCPStatement_Pledges = map[int32]bool{ + XdrToI32(SCP_ST_PREPARE): true, + XdrToI32(SCP_ST_CONFIRM): true, + XdrToI32(SCP_ST_EXTERNALIZE): true, + XdrToI32(SCP_ST_NOMINATE): true, +} + +func (_ XdrAnon_SCPStatement_Pledges) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_SCPStatement_Pledges +} +func (u *XdrAnon_SCPStatement_Pledges) Prepare() *XdrAnon_SCPStatement_Pledges_Prepare { + switch u.Type { + case SCP_ST_PREPARE: + if v, ok := u._u.(*XdrAnon_SCPStatement_Pledges_Prepare); ok { + return v + } else { + var zero XdrAnon_SCPStatement_Pledges_Prepare + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_SCPStatement_Pledges.Prepare accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_SCPStatement_Pledges) Confirm() *XdrAnon_SCPStatement_Pledges_Confirm { + switch u.Type { + case SCP_ST_CONFIRM: + if v, ok := u._u.(*XdrAnon_SCPStatement_Pledges_Confirm); ok { + return v + } else { + var zero XdrAnon_SCPStatement_Pledges_Confirm + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_SCPStatement_Pledges.Confirm accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_SCPStatement_Pledges) Externalize() *XdrAnon_SCPStatement_Pledges_Externalize { + switch u.Type { + case SCP_ST_EXTERNALIZE: + if v, ok := u._u.(*XdrAnon_SCPStatement_Pledges_Externalize); ok { + return v + } else { + var zero XdrAnon_SCPStatement_Pledges_Externalize + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_SCPStatement_Pledges.Externalize accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_SCPStatement_Pledges) Nominate() *SCPNomination { + switch u.Type { + case SCP_ST_NOMINATE: + if v, ok := u._u.(*SCPNomination); ok { + return v + } else { + var zero SCPNomination + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_SCPStatement_Pledges.Nominate accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_SCPStatement_Pledges) XdrValid() bool { + switch u.Type { + case SCP_ST_PREPARE, SCP_ST_CONFIRM, SCP_ST_EXTERNALIZE, SCP_ST_NOMINATE: + return true + } + return false +} +func (u *XdrAnon_SCPStatement_Pledges) XdrUnionTag() XdrNum32 { + return XDR_SCPStatementType(&u.Type) +} +func (u *XdrAnon_SCPStatement_Pledges) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_SCPStatement_Pledges) XdrUnionBody() XdrType { + switch u.Type { + case SCP_ST_PREPARE: + return XDR_XdrAnon_SCPStatement_Pledges_Prepare(u.Prepare()) + case SCP_ST_CONFIRM: + return XDR_XdrAnon_SCPStatement_Pledges_Confirm(u.Confirm()) + case SCP_ST_EXTERNALIZE: + return XDR_XdrAnon_SCPStatement_Pledges_Externalize(u.Externalize()) + case SCP_ST_NOMINATE: + return XDR_SCPNomination(u.Nominate()) + } + return nil +} +func (u *XdrAnon_SCPStatement_Pledges) XdrUnionBodyName() string { + switch u.Type { + case SCP_ST_PREPARE: + return "Prepare" + case SCP_ST_CONFIRM: + return "Confirm" + case SCP_ST_EXTERNALIZE: + return "Externalize" + case SCP_ST_NOMINATE: + return "Nominate" + } + return "" +} + +type XdrType_XdrAnon_SCPStatement_Pledges = *XdrAnon_SCPStatement_Pledges + +func (v *XdrAnon_SCPStatement_Pledges) XdrPointer() interface{} { return v } +func (XdrAnon_SCPStatement_Pledges) XdrTypeName() string { return "XdrAnon_SCPStatement_Pledges" } +func (v XdrAnon_SCPStatement_Pledges) XdrValue() interface{} { return v } +func (v *XdrAnon_SCPStatement_Pledges) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_SCPStatement_Pledges) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_SCPStatementType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case SCP_ST_PREPARE: + x.Marshal(x.Sprintf("%sprepare", name), XDR_XdrAnon_SCPStatement_Pledges_Prepare(u.Prepare())) + return + case SCP_ST_CONFIRM: + x.Marshal(x.Sprintf("%sconfirm", name), XDR_XdrAnon_SCPStatement_Pledges_Confirm(u.Confirm())) + return + case SCP_ST_EXTERNALIZE: + x.Marshal(x.Sprintf("%sexternalize", name), XDR_XdrAnon_SCPStatement_Pledges_Externalize(u.Externalize())) + return + case SCP_ST_NOMINATE: + x.Marshal(x.Sprintf("%snominate", name), XDR_SCPNomination(u.Nominate())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_SCPStatement_Pledges", u.Type) +} +func XDR_XdrAnon_SCPStatement_Pledges(v *XdrAnon_SCPStatement_Pledges) *XdrAnon_SCPStatement_Pledges { + return v +} + +type XdrType_SCPStatement = *SCPStatement + +func (v *SCPStatement) XdrPointer() interface{} { return v } +func (SCPStatement) XdrTypeName() string { return "SCPStatement" } +func (v SCPStatement) XdrValue() interface{} { return v } +func (v *SCPStatement) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPStatement) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%snodeID", name), XDR_NodeID(&v.NodeID)) + x.Marshal(x.Sprintf("%sslotIndex", name), XDR_Uint64(&v.SlotIndex)) + x.Marshal(x.Sprintf("%spledges", name), XDR_XdrAnon_SCPStatement_Pledges(&v.Pledges)) +} +func XDR_SCPStatement(v *SCPStatement) *SCPStatement { return v } + +type XdrType_SCPEnvelope = *SCPEnvelope + +func (v *SCPEnvelope) XdrPointer() interface{} { return v } +func (SCPEnvelope) XdrTypeName() string { return "SCPEnvelope" } +func (v SCPEnvelope) XdrValue() interface{} { return v } +func (v *SCPEnvelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPEnvelope) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sstatement", name), XDR_SCPStatement(&v.Statement)) + x.Marshal(x.Sprintf("%ssignature", name), XDR_Signature(&v.Signature)) +} +func XDR_SCPEnvelope(v *SCPEnvelope) *SCPEnvelope { return v } + +type _XdrVec_unbounded_NodeID []NodeID + +func (_XdrVec_unbounded_NodeID) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_NodeID) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_NodeID length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_NodeID length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_NodeID) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_NodeID) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]NodeID, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_NodeID) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_NodeID(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_NodeID) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_NodeID) XdrTypeName() string { return "NodeID<>" } +func (v *_XdrVec_unbounded_NodeID) XdrPointer() interface{} { return (*[]NodeID)(v) } +func (v _XdrVec_unbounded_NodeID) XdrValue() interface{} { return ([]NodeID)(v) } +func (v *_XdrVec_unbounded_NodeID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type _XdrVec_unbounded_SCPQuorumSet []SCPQuorumSet + +func (_XdrVec_unbounded_SCPQuorumSet) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_SCPQuorumSet) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_SCPQuorumSet length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_SCPQuorumSet length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_SCPQuorumSet) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_SCPQuorumSet) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]SCPQuorumSet, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_SCPQuorumSet) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_SCPQuorumSet(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_SCPQuorumSet) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_SCPQuorumSet) XdrTypeName() string { return "SCPQuorumSet<>" } +func (v *_XdrVec_unbounded_SCPQuorumSet) XdrPointer() interface{} { return (*[]SCPQuorumSet)(v) } +func (v _XdrVec_unbounded_SCPQuorumSet) XdrValue() interface{} { return ([]SCPQuorumSet)(v) } +func (v *_XdrVec_unbounded_SCPQuorumSet) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SCPQuorumSet = *SCPQuorumSet + +func (v *SCPQuorumSet) XdrPointer() interface{} { return v } +func (SCPQuorumSet) XdrTypeName() string { return "SCPQuorumSet" } +func (v SCPQuorumSet) XdrValue() interface{} { return v } +func (v *SCPQuorumSet) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPQuorumSet) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sthreshold", name), XDR_Uint32(&v.Threshold)) + x.Marshal(x.Sprintf("%svalidators", name), (*_XdrVec_unbounded_NodeID)(&v.Validators)) + x.Marshal(x.Sprintf("%sinnerSets", name), (*_XdrVec_unbounded_SCPQuorumSet)(&v.InnerSets)) +} +func XDR_SCPQuorumSet(v *SCPQuorumSet) *SCPQuorumSet { return v } + +type XdrType_AccountID struct { + XdrType_PublicKey +} + +func XDR_AccountID(v *AccountID) XdrType_AccountID { + return XdrType_AccountID{XDR_PublicKey(v)} +} +func (XdrType_AccountID) XdrTypeName() string { return "AccountID" } +func (v XdrType_AccountID) XdrUnwrap() XdrType { return v.XdrType_PublicKey } + +type _XdrArray_4_opaque [4]byte + +func (v *_XdrArray_4_opaque) GetByteSlice() []byte { return v[:] } +func (v *_XdrArray_4_opaque) XdrTypeName() string { return "opaque[]" } +func (v *_XdrArray_4_opaque) XdrValue() interface{} { return v[:] } +func (v *_XdrArray_4_opaque) XdrPointer() interface{} { return (*[4]byte)(v) } +func (v *_XdrArray_4_opaque) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *_XdrArray_4_opaque) String() string { return fmt.Sprintf("%x", v[:]) } +func (v *_XdrArray_4_opaque) Scan(ss fmt.ScanState, c rune) error { + return XdrArrayOpaqueScan(v[:], ss, c) +} +func (_XdrArray_4_opaque) XdrArraySize() uint32 { + const bound uint32 = 4 // Force error if not const or doesn't fit + return bound +} + +type XdrType_Thresholds struct { + *_XdrArray_4_opaque +} + +func XDR_Thresholds(v *Thresholds) XdrType_Thresholds { + return XdrType_Thresholds{(*_XdrArray_4_opaque)(v)} +} +func (XdrType_Thresholds) XdrTypeName() string { return "Thresholds" } +func (v XdrType_Thresholds) XdrUnwrap() XdrType { return v._XdrArray_4_opaque } + +type XdrType_String32 struct { + XdrString +} + +func XDR_String32(v *String32) XdrType_String32 { + return XdrType_String32{XdrString{v, 32}} +} +func (XdrType_String32) XdrTypeName() string { return "String32" } +func (v XdrType_String32) XdrUnwrap() XdrType { return v.XdrString } + +type XdrType_String64 struct { + XdrString +} + +func XDR_String64(v *String64) XdrType_String64 { + return XdrType_String64{XdrString{v, 64}} +} +func (XdrType_String64) XdrTypeName() string { return "String64" } +func (v XdrType_String64) XdrUnwrap() XdrType { return v.XdrString } + +type XdrType_SequenceNumber struct { + XdrType_Int64 +} + +func XDR_SequenceNumber(v *SequenceNumber) XdrType_SequenceNumber { + return XdrType_SequenceNumber{XDR_Int64(v)} +} +func (XdrType_SequenceNumber) XdrTypeName() string { return "SequenceNumber" } +func (v XdrType_SequenceNumber) XdrUnwrap() XdrType { return v.XdrType_Int64 } + +type XdrType_TimePoint struct { + XdrType_Uint64 +} + +func XDR_TimePoint(v *TimePoint) XdrType_TimePoint { + return XdrType_TimePoint{XDR_Uint64(v)} +} +func (XdrType_TimePoint) XdrTypeName() string { return "TimePoint" } +func (v XdrType_TimePoint) XdrUnwrap() XdrType { return v.XdrType_Uint64 } + +type XdrType_DataValue struct { + XdrVecOpaque +} + +func XDR_DataValue(v *DataValue) XdrType_DataValue { + return XdrType_DataValue{XdrVecOpaque{v, 64}} +} +func (XdrType_DataValue) XdrTypeName() string { return "DataValue" } +func (v XdrType_DataValue) XdrUnwrap() XdrType { return v.XdrVecOpaque } + +type XdrType_PoolID struct { + XdrType_Hash +} + +func XDR_PoolID(v *PoolID) XdrType_PoolID { + return XdrType_PoolID{XDR_Hash(v)} +} +func (XdrType_PoolID) XdrTypeName() string { return "PoolID" } +func (v XdrType_PoolID) XdrUnwrap() XdrType { return v.XdrType_Hash } + +type XdrType_AssetCode4 struct { + *_XdrArray_4_opaque +} + +func XDR_AssetCode4(v *AssetCode4) XdrType_AssetCode4 { + return XdrType_AssetCode4{(*_XdrArray_4_opaque)(v)} +} +func (XdrType_AssetCode4) XdrTypeName() string { return "AssetCode4" } +func (v XdrType_AssetCode4) XdrUnwrap() XdrType { return v._XdrArray_4_opaque } + +type _XdrArray_12_opaque [12]byte + +func (v *_XdrArray_12_opaque) GetByteSlice() []byte { return v[:] } +func (v *_XdrArray_12_opaque) XdrTypeName() string { return "opaque[]" } +func (v *_XdrArray_12_opaque) XdrValue() interface{} { return v[:] } +func (v *_XdrArray_12_opaque) XdrPointer() interface{} { return (*[12]byte)(v) } +func (v *_XdrArray_12_opaque) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *_XdrArray_12_opaque) String() string { return fmt.Sprintf("%x", v[:]) } +func (v *_XdrArray_12_opaque) Scan(ss fmt.ScanState, c rune) error { + return XdrArrayOpaqueScan(v[:], ss, c) +} +func (_XdrArray_12_opaque) XdrArraySize() uint32 { + const bound uint32 = 12 // Force error if not const or doesn't fit + return bound +} + +type XdrType_AssetCode12 struct { + *_XdrArray_12_opaque +} + +func XDR_AssetCode12(v *AssetCode12) XdrType_AssetCode12 { + return XdrType_AssetCode12{(*_XdrArray_12_opaque)(v)} +} +func (XdrType_AssetCode12) XdrTypeName() string { return "AssetCode12" } +func (v XdrType_AssetCode12) XdrUnwrap() XdrType { return v._XdrArray_12_opaque } + +var _XdrNames_AssetType = map[int32]string{ + int32(ASSET_TYPE_NATIVE): "ASSET_TYPE_NATIVE", + int32(ASSET_TYPE_CREDIT_ALPHANUM4): "ASSET_TYPE_CREDIT_ALPHANUM4", + int32(ASSET_TYPE_CREDIT_ALPHANUM12): "ASSET_TYPE_CREDIT_ALPHANUM12", + int32(ASSET_TYPE_POOL_SHARE): "ASSET_TYPE_POOL_SHARE", +} +var _XdrValues_AssetType = map[string]int32{ + "ASSET_TYPE_NATIVE": int32(ASSET_TYPE_NATIVE), + "ASSET_TYPE_CREDIT_ALPHANUM4": int32(ASSET_TYPE_CREDIT_ALPHANUM4), + "ASSET_TYPE_CREDIT_ALPHANUM12": int32(ASSET_TYPE_CREDIT_ALPHANUM12), + "ASSET_TYPE_POOL_SHARE": int32(ASSET_TYPE_POOL_SHARE), +} + +func (AssetType) XdrEnumNames() map[int32]string { + return _XdrNames_AssetType +} +func (v AssetType) String() string { + if s, ok := _XdrNames_AssetType[int32(v)]; ok { + return s + } + return fmt.Sprintf("AssetType#%d", v) +} +func (v *AssetType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_AssetType[stok]; ok { + *v = AssetType(val) + return nil + } else if stok == "AssetType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid AssetType.", stok)) + } +} +func (v AssetType) GetU32() uint32 { return uint32(v) } +func (v *AssetType) SetU32(n uint32) { *v = AssetType(n) } +func (v *AssetType) XdrPointer() interface{} { return v } +func (AssetType) XdrTypeName() string { return "AssetType" } +func (v AssetType) XdrValue() interface{} { return v } +func (v *AssetType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AssetType = *AssetType + +func XDR_AssetType(v *AssetType) *AssetType { return v } + +var _XdrTags_AssetCode = map[int32]bool{ + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM4): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM12): true, +} + +func (_ AssetCode) XdrValidTags() map[int32]bool { + return _XdrTags_AssetCode +} +func (u *AssetCode) AssetCode4() *AssetCode4 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + if v, ok := u._u.(*AssetCode4); ok { + return v + } else { + var zero AssetCode4 + u._u = &zero + return &zero + } + default: + XdrPanic("AssetCode.AssetCode4 accessed when Type == %v", u.Type) + return nil + } +} +func (u *AssetCode) AssetCode12() *AssetCode12 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM12: + if v, ok := u._u.(*AssetCode12); ok { + return v + } else { + var zero AssetCode12 + u._u = &zero + return &zero + } + default: + XdrPanic("AssetCode.AssetCode12 accessed when Type == %v", u.Type) + return nil + } +} +func (u AssetCode) XdrValid() bool { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4, ASSET_TYPE_CREDIT_ALPHANUM12: + return true + } + return false +} +func (u *AssetCode) XdrUnionTag() XdrNum32 { + return XDR_AssetType(&u.Type) +} +func (u *AssetCode) XdrUnionTagName() string { + return "Type" +} +func (u *AssetCode) XdrUnionBody() XdrType { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + return XDR_AssetCode4(u.AssetCode4()) + case ASSET_TYPE_CREDIT_ALPHANUM12: + return XDR_AssetCode12(u.AssetCode12()) + } + return nil +} +func (u *AssetCode) XdrUnionBodyName() string { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + return "AssetCode4" + case ASSET_TYPE_CREDIT_ALPHANUM12: + return "AssetCode12" + } + return "" +} + +type XdrType_AssetCode = *AssetCode + +func (v *AssetCode) XdrPointer() interface{} { return v } +func (AssetCode) XdrTypeName() string { return "AssetCode" } +func (v AssetCode) XdrValue() interface{} { return v } +func (v *AssetCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *AssetCode) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AssetType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + x.Marshal(x.Sprintf("%sassetCode4", name), XDR_AssetCode4(u.AssetCode4())) + return + case ASSET_TYPE_CREDIT_ALPHANUM12: + x.Marshal(x.Sprintf("%sassetCode12", name), XDR_AssetCode12(u.AssetCode12())) + return + } + XdrPanic("invalid Type (%v) in AssetCode", u.Type) +} +func (v *AssetCode) XdrInitialize() { + var zero AssetType + switch zero { + case ASSET_TYPE_CREDIT_ALPHANUM4, ASSET_TYPE_CREDIT_ALPHANUM12: + default: + if v.Type == zero { + v.Type = ASSET_TYPE_CREDIT_ALPHANUM4 + } + } +} +func XDR_AssetCode(v *AssetCode) *AssetCode { return v } + +type XdrType_AlphaNum4 = *AlphaNum4 + +func (v *AlphaNum4) XdrPointer() interface{} { return v } +func (AlphaNum4) XdrTypeName() string { return "AlphaNum4" } +func (v AlphaNum4) XdrValue() interface{} { return v } +func (v *AlphaNum4) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AlphaNum4) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sassetCode", name), XDR_AssetCode4(&v.AssetCode)) + x.Marshal(x.Sprintf("%sissuer", name), XDR_AccountID(&v.Issuer)) +} +func XDR_AlphaNum4(v *AlphaNum4) *AlphaNum4 { return v } + +type XdrType_AlphaNum12 = *AlphaNum12 + +func (v *AlphaNum12) XdrPointer() interface{} { return v } +func (AlphaNum12) XdrTypeName() string { return "AlphaNum12" } +func (v AlphaNum12) XdrValue() interface{} { return v } +func (v *AlphaNum12) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AlphaNum12) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sassetCode", name), XDR_AssetCode12(&v.AssetCode)) + x.Marshal(x.Sprintf("%sissuer", name), XDR_AccountID(&v.Issuer)) +} +func XDR_AlphaNum12(v *AlphaNum12) *AlphaNum12 { return v } + +var _XdrTags_Asset = map[int32]bool{ + XdrToI32(ASSET_TYPE_NATIVE): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM4): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM12): true, +} + +func (_ Asset) XdrValidTags() map[int32]bool { + return _XdrTags_Asset +} +func (u *Asset) AlphaNum4() *AlphaNum4 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + if v, ok := u._u.(*AlphaNum4); ok { + return v + } else { + var zero AlphaNum4 + u._u = &zero + return &zero + } + default: + XdrPanic("Asset.AlphaNum4 accessed when Type == %v", u.Type) + return nil + } +} +func (u *Asset) AlphaNum12() *AlphaNum12 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM12: + if v, ok := u._u.(*AlphaNum12); ok { + return v + } else { + var zero AlphaNum12 + u._u = &zero + return &zero + } + default: + XdrPanic("Asset.AlphaNum12 accessed when Type == %v", u.Type) + return nil + } +} +func (u Asset) XdrValid() bool { + switch u.Type { + case ASSET_TYPE_NATIVE, ASSET_TYPE_CREDIT_ALPHANUM4, ASSET_TYPE_CREDIT_ALPHANUM12: + return true + } + return false +} +func (u *Asset) XdrUnionTag() XdrNum32 { + return XDR_AssetType(&u.Type) +} +func (u *Asset) XdrUnionTagName() string { + return "Type" +} +func (u *Asset) XdrUnionBody() XdrType { + switch u.Type { + case ASSET_TYPE_NATIVE: + return nil + case ASSET_TYPE_CREDIT_ALPHANUM4: + return XDR_AlphaNum4(u.AlphaNum4()) + case ASSET_TYPE_CREDIT_ALPHANUM12: + return XDR_AlphaNum12(u.AlphaNum12()) + } + return nil +} +func (u *Asset) XdrUnionBodyName() string { + switch u.Type { + case ASSET_TYPE_NATIVE: + return "" + case ASSET_TYPE_CREDIT_ALPHANUM4: + return "AlphaNum4" + case ASSET_TYPE_CREDIT_ALPHANUM12: + return "AlphaNum12" + } + return "" +} + +type XdrType_Asset = *Asset + +func (v *Asset) XdrPointer() interface{} { return v } +func (Asset) XdrTypeName() string { return "Asset" } +func (v Asset) XdrValue() interface{} { return v } +func (v *Asset) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *Asset) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AssetType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ASSET_TYPE_NATIVE: + return + case ASSET_TYPE_CREDIT_ALPHANUM4: + x.Marshal(x.Sprintf("%salphaNum4", name), XDR_AlphaNum4(u.AlphaNum4())) + return + case ASSET_TYPE_CREDIT_ALPHANUM12: + x.Marshal(x.Sprintf("%salphaNum12", name), XDR_AlphaNum12(u.AlphaNum12())) + return + } + XdrPanic("invalid Type (%v) in Asset", u.Type) +} +func XDR_Asset(v *Asset) *Asset { return v } + +type XdrType_Price = *Price + +func (v *Price) XdrPointer() interface{} { return v } +func (Price) XdrTypeName() string { return "Price" } +func (v Price) XdrValue() interface{} { return v } +func (v *Price) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Price) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sn", name), XDR_Int32(&v.N)) + x.Marshal(x.Sprintf("%sd", name), XDR_Int32(&v.D)) +} +func XDR_Price(v *Price) *Price { return v } + +type XdrType_Liabilities = *Liabilities + +func (v *Liabilities) XdrPointer() interface{} { return v } +func (Liabilities) XdrTypeName() string { return "Liabilities" } +func (v Liabilities) XdrValue() interface{} { return v } +func (v *Liabilities) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Liabilities) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbuying", name), XDR_Int64(&v.Buying)) + x.Marshal(x.Sprintf("%sselling", name), XDR_Int64(&v.Selling)) +} +func XDR_Liabilities(v *Liabilities) *Liabilities { return v } + +var _XdrNames_ThresholdIndexes = map[int32]string{ + int32(THRESHOLD_MASTER_WEIGHT): "THRESHOLD_MASTER_WEIGHT", + int32(THRESHOLD_LOW): "THRESHOLD_LOW", + int32(THRESHOLD_MED): "THRESHOLD_MED", + int32(THRESHOLD_HIGH): "THRESHOLD_HIGH", +} +var _XdrValues_ThresholdIndexes = map[string]int32{ + "THRESHOLD_MASTER_WEIGHT": int32(THRESHOLD_MASTER_WEIGHT), + "THRESHOLD_LOW": int32(THRESHOLD_LOW), + "THRESHOLD_MED": int32(THRESHOLD_MED), + "THRESHOLD_HIGH": int32(THRESHOLD_HIGH), +} + +func (ThresholdIndexes) XdrEnumNames() map[int32]string { + return _XdrNames_ThresholdIndexes +} +func (v ThresholdIndexes) String() string { + if s, ok := _XdrNames_ThresholdIndexes[int32(v)]; ok { + return s + } + return fmt.Sprintf("ThresholdIndexes#%d", v) +} +func (v *ThresholdIndexes) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ThresholdIndexes[stok]; ok { + *v = ThresholdIndexes(val) + return nil + } else if stok == "ThresholdIndexes" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ThresholdIndexes.", stok)) + } +} +func (v ThresholdIndexes) GetU32() uint32 { return uint32(v) } +func (v *ThresholdIndexes) SetU32(n uint32) { *v = ThresholdIndexes(n) } +func (v *ThresholdIndexes) XdrPointer() interface{} { return v } +func (ThresholdIndexes) XdrTypeName() string { return "ThresholdIndexes" } +func (v ThresholdIndexes) XdrValue() interface{} { return v } +func (v *ThresholdIndexes) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ThresholdIndexes = *ThresholdIndexes + +func XDR_ThresholdIndexes(v *ThresholdIndexes) *ThresholdIndexes { return v } + +var _XdrNames_LedgerEntryType = map[int32]string{ + int32(ACCOUNT): "ACCOUNT", + int32(TRUSTLINE): "TRUSTLINE", + int32(OFFER): "OFFER", + int32(DATA): "DATA", + int32(CLAIMABLE_BALANCE): "CLAIMABLE_BALANCE", + int32(LIQUIDITY_POOL): "LIQUIDITY_POOL", +} +var _XdrValues_LedgerEntryType = map[string]int32{ + "ACCOUNT": int32(ACCOUNT), + "TRUSTLINE": int32(TRUSTLINE), + "OFFER": int32(OFFER), + "DATA": int32(DATA), + "CLAIMABLE_BALANCE": int32(CLAIMABLE_BALANCE), + "LIQUIDITY_POOL": int32(LIQUIDITY_POOL), +} + +func (LedgerEntryType) XdrEnumNames() map[int32]string { + return _XdrNames_LedgerEntryType +} +func (v LedgerEntryType) String() string { + if s, ok := _XdrNames_LedgerEntryType[int32(v)]; ok { + return s + } + return fmt.Sprintf("LedgerEntryType#%d", v) +} +func (v *LedgerEntryType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LedgerEntryType[stok]; ok { + *v = LedgerEntryType(val) + return nil + } else if stok == "LedgerEntryType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LedgerEntryType.", stok)) + } +} +func (v LedgerEntryType) GetU32() uint32 { return uint32(v) } +func (v *LedgerEntryType) SetU32(n uint32) { *v = LedgerEntryType(n) } +func (v *LedgerEntryType) XdrPointer() interface{} { return v } +func (LedgerEntryType) XdrTypeName() string { return "LedgerEntryType" } +func (v LedgerEntryType) XdrValue() interface{} { return v } +func (v *LedgerEntryType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerEntryType = *LedgerEntryType + +func XDR_LedgerEntryType(v *LedgerEntryType) *LedgerEntryType { return v } + +type XdrType_Signer = *Signer + +func (v *Signer) XdrPointer() interface{} { return v } +func (Signer) XdrTypeName() string { return "Signer" } +func (v Signer) XdrValue() interface{} { return v } +func (v *Signer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Signer) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%skey", name), XDR_SignerKey(&v.Key)) + x.Marshal(x.Sprintf("%sweight", name), XDR_Uint32(&v.Weight)) +} +func XDR_Signer(v *Signer) *Signer { return v } + +var _XdrNames_AccountFlags = map[int32]string{ + int32(AUTH_REQUIRED_FLAG): "AUTH_REQUIRED_FLAG", + int32(AUTH_REVOCABLE_FLAG): "AUTH_REVOCABLE_FLAG", + int32(AUTH_IMMUTABLE_FLAG): "AUTH_IMMUTABLE_FLAG", + int32(AUTH_CLAWBACK_ENABLED_FLAG): "AUTH_CLAWBACK_ENABLED_FLAG", +} +var _XdrValues_AccountFlags = map[string]int32{ + "AUTH_REQUIRED_FLAG": int32(AUTH_REQUIRED_FLAG), + "AUTH_REVOCABLE_FLAG": int32(AUTH_REVOCABLE_FLAG), + "AUTH_IMMUTABLE_FLAG": int32(AUTH_IMMUTABLE_FLAG), + "AUTH_CLAWBACK_ENABLED_FLAG": int32(AUTH_CLAWBACK_ENABLED_FLAG), +} + +func (AccountFlags) XdrEnumNames() map[int32]string { + return _XdrNames_AccountFlags +} +func (v AccountFlags) String() string { + if s, ok := _XdrNames_AccountFlags[int32(v)]; ok { + return s + } + return fmt.Sprintf("AccountFlags#%d", v) +} +func (v *AccountFlags) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_AccountFlags[stok]; ok { + *v = AccountFlags(val) + return nil + } else if stok == "AccountFlags" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid AccountFlags.", stok)) + } +} +func (v AccountFlags) GetU32() uint32 { return uint32(v) } +func (v *AccountFlags) SetU32(n uint32) { *v = AccountFlags(n) } +func (v *AccountFlags) XdrPointer() interface{} { return v } +func (AccountFlags) XdrTypeName() string { return "AccountFlags" } +func (v AccountFlags) XdrValue() interface{} { return v } +func (v *AccountFlags) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AccountFlags = *AccountFlags + +func XDR_AccountFlags(v *AccountFlags) *AccountFlags { return v } + +var _XdrComments_AccountFlags = map[int32]string{ + int32(AUTH_REQUIRED_FLAG): "Flags set on issuer accounts TrustLines are created with authorized set to \"false\" requiring the issuer to set it for each TrustLine", + int32(AUTH_REVOCABLE_FLAG): "If set, the authorized flag in TrustLines can be cleared otherwise, authorization cannot be revoked", + int32(AUTH_IMMUTABLE_FLAG): "Once set, causes all AUTH_* flags to be read-only", + int32(AUTH_CLAWBACK_ENABLED_FLAG): "Trustlines are created with clawback enabled set to \"true\", and claimable balances created from those trustlines are created with clawback enabled set to \"true\"", +} + +func (e AccountFlags) XdrEnumComments() map[int32]string { + return _XdrComments_AccountFlags +} +func (v *AccountFlags) XdrInitialize() { + switch AccountFlags(0) { + case AUTH_REQUIRED_FLAG, AUTH_REVOCABLE_FLAG, AUTH_IMMUTABLE_FLAG, AUTH_CLAWBACK_ENABLED_FLAG: + default: + if *v == AccountFlags(0) { + *v = AUTH_REQUIRED_FLAG + } + } +} + +type _XdrPtr_AccountID struct { + p **AccountID +} +type _ptrflag_AccountID _XdrPtr_AccountID + +func (v _ptrflag_AccountID) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_AccountID) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("AccountID flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_AccountID) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_AccountID) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(AccountID) + } + default: + XdrPanic("*AccountID present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_AccountID) XdrTypeName() string { return "AccountID?" } +func (v _ptrflag_AccountID) XdrPointer() interface{} { return nil } +func (v _ptrflag_AccountID) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_AccountID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_AccountID) XdrBound() uint32 { return 1 } +func (v _XdrPtr_AccountID) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_AccountID) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(AccountID) + } +} +func (v _XdrPtr_AccountID) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_AccountID(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_AccountID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_AccountID) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_AccountID(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_AccountID) XdrTypeName() string { return "AccountID*" } +func (v _XdrPtr_AccountID) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_AccountID) XdrValue() interface{} { return *v.p } + +type XdrType_SponsorshipDescriptor struct { + _XdrPtr_AccountID +} + +func XDR_SponsorshipDescriptor(v *SponsorshipDescriptor) XdrType_SponsorshipDescriptor { + return XdrType_SponsorshipDescriptor{_XdrPtr_AccountID{v}} +} +func (XdrType_SponsorshipDescriptor) XdrTypeName() string { return "SponsorshipDescriptor" } +func (v XdrType_SponsorshipDescriptor) XdrUnwrap() XdrType { return v._XdrPtr_AccountID } + +var _XdrTags_XdrAnon_AccountEntryExtensionV2_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_AccountEntryExtensionV2_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_AccountEntryExtensionV2_Ext +} +func (u XdrAnon_AccountEntryExtensionV2_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_AccountEntryExtensionV2_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_AccountEntryExtensionV2_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_AccountEntryExtensionV2_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_AccountEntryExtensionV2_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_AccountEntryExtensionV2_Ext = *XdrAnon_AccountEntryExtensionV2_Ext + +func (v *XdrAnon_AccountEntryExtensionV2_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_AccountEntryExtensionV2_Ext) XdrTypeName() string { + return "XdrAnon_AccountEntryExtensionV2_Ext" +} +func (v XdrAnon_AccountEntryExtensionV2_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_AccountEntryExtensionV2_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_AccountEntryExtensionV2_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_AccountEntryExtensionV2_Ext", u.V) +} +func XDR_XdrAnon_AccountEntryExtensionV2_Ext(v *XdrAnon_AccountEntryExtensionV2_Ext) *XdrAnon_AccountEntryExtensionV2_Ext { + return v +} + +type _XdrVec_20_SponsorshipDescriptor []SponsorshipDescriptor + +func (_XdrVec_20_SponsorshipDescriptor) XdrBound() uint32 { + const bound uint32 = 20 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_20_SponsorshipDescriptor) XdrCheckLen(length uint32) { + if length > uint32(20) { + XdrPanic("_XdrVec_20_SponsorshipDescriptor length %d exceeds bound 20", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_20_SponsorshipDescriptor length %d exceeds max int", length) + } +} +func (v _XdrVec_20_SponsorshipDescriptor) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_20_SponsorshipDescriptor) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(20); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]SponsorshipDescriptor, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_20_SponsorshipDescriptor) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_SponsorshipDescriptor(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_20_SponsorshipDescriptor) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 20} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_20_SponsorshipDescriptor) XdrTypeName() string { return "SponsorshipDescriptor<>" } +func (v *_XdrVec_20_SponsorshipDescriptor) XdrPointer() interface{} { + return (*[]SponsorshipDescriptor)(v) +} +func (v _XdrVec_20_SponsorshipDescriptor) XdrValue() interface{} { return ([]SponsorshipDescriptor)(v) } +func (v *_XdrVec_20_SponsorshipDescriptor) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AccountEntryExtensionV2 = *AccountEntryExtensionV2 + +func (v *AccountEntryExtensionV2) XdrPointer() interface{} { return v } +func (AccountEntryExtensionV2) XdrTypeName() string { return "AccountEntryExtensionV2" } +func (v AccountEntryExtensionV2) XdrValue() interface{} { return v } +func (v *AccountEntryExtensionV2) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AccountEntryExtensionV2) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%snumSponsored", name), XDR_Uint32(&v.NumSponsored)) + x.Marshal(x.Sprintf("%snumSponsoring", name), XDR_Uint32(&v.NumSponsoring)) + x.Marshal(x.Sprintf("%ssignerSponsoringIDs", name), (*_XdrVec_20_SponsorshipDescriptor)(&v.SignerSponsoringIDs)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_AccountEntryExtensionV2_Ext(&v.Ext)) +} +func XDR_AccountEntryExtensionV2(v *AccountEntryExtensionV2) *AccountEntryExtensionV2 { return v } + +var _XdrTags_XdrAnon_AccountEntryExtensionV1_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(2): true, +} + +func (_ XdrAnon_AccountEntryExtensionV1_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_AccountEntryExtensionV1_Ext +} +func (u *XdrAnon_AccountEntryExtensionV1_Ext) V2() *AccountEntryExtensionV2 { + switch u.V { + case 2: + if v, ok := u._u.(*AccountEntryExtensionV2); ok { + return v + } else { + var zero AccountEntryExtensionV2 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_AccountEntryExtensionV1_Ext.V2 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_AccountEntryExtensionV1_Ext) XdrValid() bool { + switch u.V { + case 0, 2: + return true + } + return false +} +func (u *XdrAnon_AccountEntryExtensionV1_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_AccountEntryExtensionV1_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_AccountEntryExtensionV1_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 2: + return XDR_AccountEntryExtensionV2(u.V2()) + } + return nil +} +func (u *XdrAnon_AccountEntryExtensionV1_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 2: + return "V2" + } + return "" +} + +type XdrType_XdrAnon_AccountEntryExtensionV1_Ext = *XdrAnon_AccountEntryExtensionV1_Ext + +func (v *XdrAnon_AccountEntryExtensionV1_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_AccountEntryExtensionV1_Ext) XdrTypeName() string { + return "XdrAnon_AccountEntryExtensionV1_Ext" +} +func (v XdrAnon_AccountEntryExtensionV1_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_AccountEntryExtensionV1_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_AccountEntryExtensionV1_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 2: + x.Marshal(x.Sprintf("%sv2", name), XDR_AccountEntryExtensionV2(u.V2())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_AccountEntryExtensionV1_Ext", u.V) +} +func XDR_XdrAnon_AccountEntryExtensionV1_Ext(v *XdrAnon_AccountEntryExtensionV1_Ext) *XdrAnon_AccountEntryExtensionV1_Ext { + return v +} + +type XdrType_AccountEntryExtensionV1 = *AccountEntryExtensionV1 + +func (v *AccountEntryExtensionV1) XdrPointer() interface{} { return v } +func (AccountEntryExtensionV1) XdrTypeName() string { return "AccountEntryExtensionV1" } +func (v AccountEntryExtensionV1) XdrValue() interface{} { return v } +func (v *AccountEntryExtensionV1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AccountEntryExtensionV1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliabilities", name), XDR_Liabilities(&v.Liabilities)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_AccountEntryExtensionV1_Ext(&v.Ext)) +} +func XDR_AccountEntryExtensionV1(v *AccountEntryExtensionV1) *AccountEntryExtensionV1 { return v } + +var _XdrTags_XdrAnon_AccountEntry_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, +} + +func (_ XdrAnon_AccountEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_AccountEntry_Ext +} +func (u *XdrAnon_AccountEntry_Ext) V1() *AccountEntryExtensionV1 { + switch u.V { + case 1: + if v, ok := u._u.(*AccountEntryExtensionV1); ok { + return v + } else { + var zero AccountEntryExtensionV1 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_AccountEntry_Ext.V1 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_AccountEntry_Ext) XdrValid() bool { + switch u.V { + case 0, 1: + return true + } + return false +} +func (u *XdrAnon_AccountEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_AccountEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_AccountEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 1: + return XDR_AccountEntryExtensionV1(u.V1()) + } + return nil +} +func (u *XdrAnon_AccountEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 1: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_AccountEntry_Ext = *XdrAnon_AccountEntry_Ext + +func (v *XdrAnon_AccountEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_AccountEntry_Ext) XdrTypeName() string { return "XdrAnon_AccountEntry_Ext" } +func (v XdrAnon_AccountEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_AccountEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_AccountEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_AccountEntryExtensionV1(u.V1())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_AccountEntry_Ext", u.V) +} +func XDR_XdrAnon_AccountEntry_Ext(v *XdrAnon_AccountEntry_Ext) *XdrAnon_AccountEntry_Ext { return v } + +type _XdrVec_20_Signer []Signer + +func (_XdrVec_20_Signer) XdrBound() uint32 { + const bound uint32 = 20 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_20_Signer) XdrCheckLen(length uint32) { + if length > uint32(20) { + XdrPanic("_XdrVec_20_Signer length %d exceeds bound 20", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_20_Signer length %d exceeds max int", length) + } +} +func (v _XdrVec_20_Signer) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_20_Signer) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(20); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]Signer, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_20_Signer) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_Signer(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_20_Signer) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 20} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_20_Signer) XdrTypeName() string { return "Signer<>" } +func (v *_XdrVec_20_Signer) XdrPointer() interface{} { return (*[]Signer)(v) } +func (v _XdrVec_20_Signer) XdrValue() interface{} { return ([]Signer)(v) } +func (v *_XdrVec_20_Signer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AccountEntry = *AccountEntry + +func (v *AccountEntry) XdrPointer() interface{} { return v } +func (AccountEntry) XdrTypeName() string { return "AccountEntry" } +func (v AccountEntry) XdrValue() interface{} { return v } +func (v *AccountEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AccountEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%sbalance", name), XDR_Int64(&v.Balance)) + x.Marshal(x.Sprintf("%sseqNum", name), XDR_SequenceNumber(&v.SeqNum)) + x.Marshal(x.Sprintf("%snumSubEntries", name), XDR_Uint32(&v.NumSubEntries)) + x.Marshal(x.Sprintf("%sinflationDest", name), _XdrPtr_AccountID{&v.InflationDest}) + x.Marshal(x.Sprintf("%sflags", name), XDR_Uint32(&v.Flags)) + x.Marshal(x.Sprintf("%shomeDomain", name), XDR_String32(&v.HomeDomain)) + x.Marshal(x.Sprintf("%sthresholds", name), XDR_Thresholds(&v.Thresholds)) + x.Marshal(x.Sprintf("%ssigners", name), (*_XdrVec_20_Signer)(&v.Signers)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_AccountEntry_Ext(&v.Ext)) +} +func XDR_AccountEntry(v *AccountEntry) *AccountEntry { return v } + +var _XdrNames_TrustLineFlags = map[int32]string{ + int32(AUTHORIZED_FLAG): "AUTHORIZED_FLAG", + int32(AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG): "AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG", + int32(TRUSTLINE_CLAWBACK_ENABLED_FLAG): "TRUSTLINE_CLAWBACK_ENABLED_FLAG", +} +var _XdrValues_TrustLineFlags = map[string]int32{ + "AUTHORIZED_FLAG": int32(AUTHORIZED_FLAG), + "AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG": int32(AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG), + "TRUSTLINE_CLAWBACK_ENABLED_FLAG": int32(TRUSTLINE_CLAWBACK_ENABLED_FLAG), +} + +func (TrustLineFlags) XdrEnumNames() map[int32]string { + return _XdrNames_TrustLineFlags +} +func (v TrustLineFlags) String() string { + if s, ok := _XdrNames_TrustLineFlags[int32(v)]; ok { + return s + } + return fmt.Sprintf("TrustLineFlags#%d", v) +} +func (v *TrustLineFlags) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_TrustLineFlags[stok]; ok { + *v = TrustLineFlags(val) + return nil + } else if stok == "TrustLineFlags" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid TrustLineFlags.", stok)) + } +} +func (v TrustLineFlags) GetU32() uint32 { return uint32(v) } +func (v *TrustLineFlags) SetU32(n uint32) { *v = TrustLineFlags(n) } +func (v *TrustLineFlags) XdrPointer() interface{} { return v } +func (TrustLineFlags) XdrTypeName() string { return "TrustLineFlags" } +func (v TrustLineFlags) XdrValue() interface{} { return v } +func (v *TrustLineFlags) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TrustLineFlags = *TrustLineFlags + +func XDR_TrustLineFlags(v *TrustLineFlags) *TrustLineFlags { return v } + +var _XdrComments_TrustLineFlags = map[int32]string{ + int32(AUTHORIZED_FLAG): "issuer has authorized account to perform transactions with its credit", + int32(AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG): "issuer has authorized account to maintain and reduce liabilities for its credit", + int32(TRUSTLINE_CLAWBACK_ENABLED_FLAG): "issuer has specified that it may clawback its credit, and that claimable balances created with its credit may also be clawed back", +} + +func (e TrustLineFlags) XdrEnumComments() map[int32]string { + return _XdrComments_TrustLineFlags +} +func (v *TrustLineFlags) XdrInitialize() { + switch TrustLineFlags(0) { + case AUTHORIZED_FLAG, AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG, TRUSTLINE_CLAWBACK_ENABLED_FLAG: + default: + if *v == TrustLineFlags(0) { + *v = AUTHORIZED_FLAG + } + } +} + +var _XdrNames_LiquidityPoolType = map[int32]string{ + int32(LIQUIDITY_POOL_CONSTANT_PRODUCT): "LIQUIDITY_POOL_CONSTANT_PRODUCT", +} +var _XdrValues_LiquidityPoolType = map[string]int32{ + "LIQUIDITY_POOL_CONSTANT_PRODUCT": int32(LIQUIDITY_POOL_CONSTANT_PRODUCT), +} + +func (LiquidityPoolType) XdrEnumNames() map[int32]string { + return _XdrNames_LiquidityPoolType +} +func (v LiquidityPoolType) String() string { + if s, ok := _XdrNames_LiquidityPoolType[int32(v)]; ok { + return s + } + return fmt.Sprintf("LiquidityPoolType#%d", v) +} +func (v *LiquidityPoolType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LiquidityPoolType[stok]; ok { + *v = LiquidityPoolType(val) + return nil + } else if stok == "LiquidityPoolType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LiquidityPoolType.", stok)) + } +} +func (v LiquidityPoolType) GetU32() uint32 { return uint32(v) } +func (v *LiquidityPoolType) SetU32(n uint32) { *v = LiquidityPoolType(n) } +func (v *LiquidityPoolType) XdrPointer() interface{} { return v } +func (LiquidityPoolType) XdrTypeName() string { return "LiquidityPoolType" } +func (v LiquidityPoolType) XdrValue() interface{} { return v } +func (v *LiquidityPoolType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LiquidityPoolType = *LiquidityPoolType + +func XDR_LiquidityPoolType(v *LiquidityPoolType) *LiquidityPoolType { return v } + +var _XdrTags_TrustLineAsset = map[int32]bool{ + XdrToI32(ASSET_TYPE_NATIVE): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM4): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM12): true, + XdrToI32(ASSET_TYPE_POOL_SHARE): true, +} + +func (_ TrustLineAsset) XdrValidTags() map[int32]bool { + return _XdrTags_TrustLineAsset +} +func (u *TrustLineAsset) AlphaNum4() *AlphaNum4 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + if v, ok := u._u.(*AlphaNum4); ok { + return v + } else { + var zero AlphaNum4 + u._u = &zero + return &zero + } + default: + XdrPanic("TrustLineAsset.AlphaNum4 accessed when Type == %v", u.Type) + return nil + } +} +func (u *TrustLineAsset) AlphaNum12() *AlphaNum12 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM12: + if v, ok := u._u.(*AlphaNum12); ok { + return v + } else { + var zero AlphaNum12 + u._u = &zero + return &zero + } + default: + XdrPanic("TrustLineAsset.AlphaNum12 accessed when Type == %v", u.Type) + return nil + } +} +func (u *TrustLineAsset) LiquidityPoolID() *PoolID { + switch u.Type { + case ASSET_TYPE_POOL_SHARE: + if v, ok := u._u.(*PoolID); ok { + return v + } else { + var zero PoolID + u._u = &zero + return &zero + } + default: + XdrPanic("TrustLineAsset.LiquidityPoolID accessed when Type == %v", u.Type) + return nil + } +} +func (u TrustLineAsset) XdrValid() bool { + switch u.Type { + case ASSET_TYPE_NATIVE, ASSET_TYPE_CREDIT_ALPHANUM4, ASSET_TYPE_CREDIT_ALPHANUM12, ASSET_TYPE_POOL_SHARE: + return true + } + return false +} +func (u *TrustLineAsset) XdrUnionTag() XdrNum32 { + return XDR_AssetType(&u.Type) +} +func (u *TrustLineAsset) XdrUnionTagName() string { + return "Type" +} +func (u *TrustLineAsset) XdrUnionBody() XdrType { + switch u.Type { + case ASSET_TYPE_NATIVE: + return nil + case ASSET_TYPE_CREDIT_ALPHANUM4: + return XDR_AlphaNum4(u.AlphaNum4()) + case ASSET_TYPE_CREDIT_ALPHANUM12: + return XDR_AlphaNum12(u.AlphaNum12()) + case ASSET_TYPE_POOL_SHARE: + return XDR_PoolID(u.LiquidityPoolID()) + } + return nil +} +func (u *TrustLineAsset) XdrUnionBodyName() string { + switch u.Type { + case ASSET_TYPE_NATIVE: + return "" + case ASSET_TYPE_CREDIT_ALPHANUM4: + return "AlphaNum4" + case ASSET_TYPE_CREDIT_ALPHANUM12: + return "AlphaNum12" + case ASSET_TYPE_POOL_SHARE: + return "LiquidityPoolID" + } + return "" +} + +type XdrType_TrustLineAsset = *TrustLineAsset + +func (v *TrustLineAsset) XdrPointer() interface{} { return v } +func (TrustLineAsset) XdrTypeName() string { return "TrustLineAsset" } +func (v TrustLineAsset) XdrValue() interface{} { return v } +func (v *TrustLineAsset) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *TrustLineAsset) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AssetType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ASSET_TYPE_NATIVE: + return + case ASSET_TYPE_CREDIT_ALPHANUM4: + x.Marshal(x.Sprintf("%salphaNum4", name), XDR_AlphaNum4(u.AlphaNum4())) + return + case ASSET_TYPE_CREDIT_ALPHANUM12: + x.Marshal(x.Sprintf("%salphaNum12", name), XDR_AlphaNum12(u.AlphaNum12())) + return + case ASSET_TYPE_POOL_SHARE: + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(u.LiquidityPoolID())) + return + } + XdrPanic("invalid Type (%v) in TrustLineAsset", u.Type) +} +func XDR_TrustLineAsset(v *TrustLineAsset) *TrustLineAsset { return v } + +var _XdrTags_XdrAnon_TrustLineEntryExtensionV2_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_TrustLineEntryExtensionV2_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TrustLineEntryExtensionV2_Ext +} +func (u XdrAnon_TrustLineEntryExtensionV2_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_TrustLineEntryExtensionV2_Ext = *XdrAnon_TrustLineEntryExtensionV2_Ext + +func (v *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TrustLineEntryExtensionV2_Ext) XdrTypeName() string { + return "XdrAnon_TrustLineEntryExtensionV2_Ext" +} +func (v XdrAnon_TrustLineEntryExtensionV2_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TrustLineEntryExtensionV2_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_TrustLineEntryExtensionV2_Ext", u.V) +} +func XDR_XdrAnon_TrustLineEntryExtensionV2_Ext(v *XdrAnon_TrustLineEntryExtensionV2_Ext) *XdrAnon_TrustLineEntryExtensionV2_Ext { + return v +} + +type XdrType_TrustLineEntryExtensionV2 = *TrustLineEntryExtensionV2 + +func (v *TrustLineEntryExtensionV2) XdrPointer() interface{} { return v } +func (TrustLineEntryExtensionV2) XdrTypeName() string { return "TrustLineEntryExtensionV2" } +func (v TrustLineEntryExtensionV2) XdrValue() interface{} { return v } +func (v *TrustLineEntryExtensionV2) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TrustLineEntryExtensionV2) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolUseCount", name), XDR_Int32(&v.LiquidityPoolUseCount)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TrustLineEntryExtensionV2_Ext(&v.Ext)) +} +func XDR_TrustLineEntryExtensionV2(v *TrustLineEntryExtensionV2) *TrustLineEntryExtensionV2 { return v } + +var _XdrTags_XdrAnon_TrustLineEntry_Ext_V1_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(2): true, +} + +func (_ XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TrustLineEntry_Ext_V1_Ext +} +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) V2() *TrustLineEntryExtensionV2 { + switch u.V { + case 2: + if v, ok := u._u.(*TrustLineEntryExtensionV2); ok { + return v + } else { + var zero TrustLineEntryExtensionV2 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TrustLineEntry_Ext_V1_Ext.V2 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrValid() bool { + switch u.V { + case 0, 2: + return true + } + return false +} +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 2: + return XDR_TrustLineEntryExtensionV2(u.V2()) + } + return nil +} +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 2: + return "V2" + } + return "" +} + +type XdrType_XdrAnon_TrustLineEntry_Ext_V1_Ext = *XdrAnon_TrustLineEntry_Ext_V1_Ext + +func (v *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrTypeName() string { + return "XdrAnon_TrustLineEntry_Ext_V1_Ext" +} +func (v XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TrustLineEntry_Ext_V1_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 2: + x.Marshal(x.Sprintf("%sv2", name), XDR_TrustLineEntryExtensionV2(u.V2())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_TrustLineEntry_Ext_V1_Ext", u.V) +} +func XDR_XdrAnon_TrustLineEntry_Ext_V1_Ext(v *XdrAnon_TrustLineEntry_Ext_V1_Ext) *XdrAnon_TrustLineEntry_Ext_V1_Ext { + return v +} + +type XdrType_XdrAnon_TrustLineEntry_Ext_V1 = *XdrAnon_TrustLineEntry_Ext_V1 + +func (v *XdrAnon_TrustLineEntry_Ext_V1) XdrPointer() interface{} { return v } +func (XdrAnon_TrustLineEntry_Ext_V1) XdrTypeName() string { return "XdrAnon_TrustLineEntry_Ext_V1" } +func (v XdrAnon_TrustLineEntry_Ext_V1) XdrValue() interface{} { return v } +func (v *XdrAnon_TrustLineEntry_Ext_V1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_TrustLineEntry_Ext_V1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliabilities", name), XDR_Liabilities(&v.Liabilities)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TrustLineEntry_Ext_V1_Ext(&v.Ext)) +} +func XDR_XdrAnon_TrustLineEntry_Ext_V1(v *XdrAnon_TrustLineEntry_Ext_V1) *XdrAnon_TrustLineEntry_Ext_V1 { + return v +} + +var _XdrTags_XdrAnon_TrustLineEntry_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, +} + +func (_ XdrAnon_TrustLineEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TrustLineEntry_Ext +} +func (u *XdrAnon_TrustLineEntry_Ext) V1() *XdrAnon_TrustLineEntry_Ext_V1 { + switch u.V { + case 1: + if v, ok := u._u.(*XdrAnon_TrustLineEntry_Ext_V1); ok { + return v + } else { + var zero XdrAnon_TrustLineEntry_Ext_V1 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TrustLineEntry_Ext.V1 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_TrustLineEntry_Ext) XdrValid() bool { + switch u.V { + case 0, 1: + return true + } + return false +} +func (u *XdrAnon_TrustLineEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TrustLineEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TrustLineEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 1: + return XDR_XdrAnon_TrustLineEntry_Ext_V1(u.V1()) + } + return nil +} +func (u *XdrAnon_TrustLineEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 1: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_TrustLineEntry_Ext = *XdrAnon_TrustLineEntry_Ext + +func (v *XdrAnon_TrustLineEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TrustLineEntry_Ext) XdrTypeName() string { return "XdrAnon_TrustLineEntry_Ext" } +func (v XdrAnon_TrustLineEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TrustLineEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TrustLineEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_XdrAnon_TrustLineEntry_Ext_V1(u.V1())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_TrustLineEntry_Ext", u.V) +} +func XDR_XdrAnon_TrustLineEntry_Ext(v *XdrAnon_TrustLineEntry_Ext) *XdrAnon_TrustLineEntry_Ext { + return v +} + +type XdrType_TrustLineEntry = *TrustLineEntry + +func (v *TrustLineEntry) XdrPointer() interface{} { return v } +func (TrustLineEntry) XdrTypeName() string { return "TrustLineEntry" } +func (v TrustLineEntry) XdrValue() interface{} { return v } +func (v *TrustLineEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TrustLineEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%sasset", name), XDR_TrustLineAsset(&v.Asset)) + x.Marshal(x.Sprintf("%sbalance", name), XDR_Int64(&v.Balance)) + x.Marshal(x.Sprintf("%slimit", name), XDR_Int64(&v.Limit)) + x.Marshal(x.Sprintf("%sflags", name), XDR_Uint32(&v.Flags)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TrustLineEntry_Ext(&v.Ext)) +} +func XDR_TrustLineEntry(v *TrustLineEntry) *TrustLineEntry { return v } + +var _XdrNames_OfferEntryFlags = map[int32]string{ + int32(PASSIVE_FLAG): "PASSIVE_FLAG", +} +var _XdrValues_OfferEntryFlags = map[string]int32{ + "PASSIVE_FLAG": int32(PASSIVE_FLAG), +} + +func (OfferEntryFlags) XdrEnumNames() map[int32]string { + return _XdrNames_OfferEntryFlags +} +func (v OfferEntryFlags) String() string { + if s, ok := _XdrNames_OfferEntryFlags[int32(v)]; ok { + return s + } + return fmt.Sprintf("OfferEntryFlags#%d", v) +} +func (v *OfferEntryFlags) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_OfferEntryFlags[stok]; ok { + *v = OfferEntryFlags(val) + return nil + } else if stok == "OfferEntryFlags" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid OfferEntryFlags.", stok)) + } +} +func (v OfferEntryFlags) GetU32() uint32 { return uint32(v) } +func (v *OfferEntryFlags) SetU32(n uint32) { *v = OfferEntryFlags(n) } +func (v *OfferEntryFlags) XdrPointer() interface{} { return v } +func (OfferEntryFlags) XdrTypeName() string { return "OfferEntryFlags" } +func (v OfferEntryFlags) XdrValue() interface{} { return v } +func (v *OfferEntryFlags) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_OfferEntryFlags = *OfferEntryFlags + +func XDR_OfferEntryFlags(v *OfferEntryFlags) *OfferEntryFlags { return v } + +var _XdrComments_OfferEntryFlags = map[int32]string{ + int32(PASSIVE_FLAG): "issuer has authorized account to perform transactions with its credit", +} + +func (e OfferEntryFlags) XdrEnumComments() map[int32]string { + return _XdrComments_OfferEntryFlags +} +func (v *OfferEntryFlags) XdrInitialize() { + switch OfferEntryFlags(0) { + case PASSIVE_FLAG: + default: + if *v == OfferEntryFlags(0) { + *v = PASSIVE_FLAG + } + } +} + +var _XdrTags_XdrAnon_OfferEntry_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_OfferEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_OfferEntry_Ext +} +func (u XdrAnon_OfferEntry_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_OfferEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_OfferEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_OfferEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_OfferEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_OfferEntry_Ext = *XdrAnon_OfferEntry_Ext + +func (v *XdrAnon_OfferEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_OfferEntry_Ext) XdrTypeName() string { return "XdrAnon_OfferEntry_Ext" } +func (v XdrAnon_OfferEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_OfferEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_OfferEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_OfferEntry_Ext", u.V) +} +func XDR_XdrAnon_OfferEntry_Ext(v *XdrAnon_OfferEntry_Ext) *XdrAnon_OfferEntry_Ext { return v } + +type XdrType_OfferEntry = *OfferEntry + +func (v *OfferEntry) XdrPointer() interface{} { return v } +func (OfferEntry) XdrTypeName() string { return "OfferEntry" } +func (v OfferEntry) XdrValue() interface{} { return v } +func (v *OfferEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *OfferEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssellerID", name), XDR_AccountID(&v.SellerID)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) + x.Marshal(x.Sprintf("%sselling", name), XDR_Asset(&v.Selling)) + x.Marshal(x.Sprintf("%sbuying", name), XDR_Asset(&v.Buying)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sprice", name), XDR_Price(&v.Price)) + x.Marshal(x.Sprintf("%sflags", name), XDR_Uint32(&v.Flags)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_OfferEntry_Ext(&v.Ext)) +} +func XDR_OfferEntry(v *OfferEntry) *OfferEntry { return v } + +var _XdrTags_XdrAnon_DataEntry_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_DataEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_DataEntry_Ext +} +func (u XdrAnon_DataEntry_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_DataEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_DataEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_DataEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_DataEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_DataEntry_Ext = *XdrAnon_DataEntry_Ext + +func (v *XdrAnon_DataEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_DataEntry_Ext) XdrTypeName() string { return "XdrAnon_DataEntry_Ext" } +func (v XdrAnon_DataEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_DataEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_DataEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_DataEntry_Ext", u.V) +} +func XDR_XdrAnon_DataEntry_Ext(v *XdrAnon_DataEntry_Ext) *XdrAnon_DataEntry_Ext { return v } + +type XdrType_DataEntry = *DataEntry + +func (v *DataEntry) XdrPointer() interface{} { return v } +func (DataEntry) XdrTypeName() string { return "DataEntry" } +func (v DataEntry) XdrValue() interface{} { return v } +func (v *DataEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *DataEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%sdataName", name), XDR_String64(&v.DataName)) + x.Marshal(x.Sprintf("%sdataValue", name), XDR_DataValue(&v.DataValue)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_DataEntry_Ext(&v.Ext)) +} +func XDR_DataEntry(v *DataEntry) *DataEntry { return v } + +var _XdrNames_ClaimPredicateType = map[int32]string{ + int32(CLAIM_PREDICATE_UNCONDITIONAL): "CLAIM_PREDICATE_UNCONDITIONAL", + int32(CLAIM_PREDICATE_AND): "CLAIM_PREDICATE_AND", + int32(CLAIM_PREDICATE_OR): "CLAIM_PREDICATE_OR", + int32(CLAIM_PREDICATE_NOT): "CLAIM_PREDICATE_NOT", + int32(CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME): "CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME", + int32(CLAIM_PREDICATE_BEFORE_RELATIVE_TIME): "CLAIM_PREDICATE_BEFORE_RELATIVE_TIME", +} +var _XdrValues_ClaimPredicateType = map[string]int32{ + "CLAIM_PREDICATE_UNCONDITIONAL": int32(CLAIM_PREDICATE_UNCONDITIONAL), + "CLAIM_PREDICATE_AND": int32(CLAIM_PREDICATE_AND), + "CLAIM_PREDICATE_OR": int32(CLAIM_PREDICATE_OR), + "CLAIM_PREDICATE_NOT": int32(CLAIM_PREDICATE_NOT), + "CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME": int32(CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME), + "CLAIM_PREDICATE_BEFORE_RELATIVE_TIME": int32(CLAIM_PREDICATE_BEFORE_RELATIVE_TIME), +} + +func (ClaimPredicateType) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimPredicateType +} +func (v ClaimPredicateType) String() string { + if s, ok := _XdrNames_ClaimPredicateType[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimPredicateType#%d", v) +} +func (v *ClaimPredicateType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimPredicateType[stok]; ok { + *v = ClaimPredicateType(val) + return nil + } else if stok == "ClaimPredicateType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimPredicateType.", stok)) + } +} +func (v ClaimPredicateType) GetU32() uint32 { return uint32(v) } +func (v *ClaimPredicateType) SetU32(n uint32) { *v = ClaimPredicateType(n) } +func (v *ClaimPredicateType) XdrPointer() interface{} { return v } +func (ClaimPredicateType) XdrTypeName() string { return "ClaimPredicateType" } +func (v ClaimPredicateType) XdrValue() interface{} { return v } +func (v *ClaimPredicateType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimPredicateType = *ClaimPredicateType + +func XDR_ClaimPredicateType(v *ClaimPredicateType) *ClaimPredicateType { return v } + +type _XdrVec_2_ClaimPredicate []ClaimPredicate + +func (_XdrVec_2_ClaimPredicate) XdrBound() uint32 { + const bound uint32 = 2 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_2_ClaimPredicate) XdrCheckLen(length uint32) { + if length > uint32(2) { + XdrPanic("_XdrVec_2_ClaimPredicate length %d exceeds bound 2", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_2_ClaimPredicate length %d exceeds max int", length) + } +} +func (v _XdrVec_2_ClaimPredicate) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_2_ClaimPredicate) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(2); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]ClaimPredicate, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_2_ClaimPredicate) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_ClaimPredicate(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_2_ClaimPredicate) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 2} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_2_ClaimPredicate) XdrTypeName() string { return "ClaimPredicate<>" } +func (v *_XdrVec_2_ClaimPredicate) XdrPointer() interface{} { return (*[]ClaimPredicate)(v) } +func (v _XdrVec_2_ClaimPredicate) XdrValue() interface{} { return ([]ClaimPredicate)(v) } +func (v *_XdrVec_2_ClaimPredicate) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type _XdrPtr_ClaimPredicate struct { + p **ClaimPredicate +} +type _ptrflag_ClaimPredicate _XdrPtr_ClaimPredicate + +func (v _ptrflag_ClaimPredicate) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_ClaimPredicate) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("ClaimPredicate flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_ClaimPredicate) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_ClaimPredicate) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(ClaimPredicate) + } + default: + XdrPanic("*ClaimPredicate present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_ClaimPredicate) XdrTypeName() string { return "ClaimPredicate?" } +func (v _ptrflag_ClaimPredicate) XdrPointer() interface{} { return nil } +func (v _ptrflag_ClaimPredicate) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_ClaimPredicate) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_ClaimPredicate) XdrBound() uint32 { return 1 } +func (v _XdrPtr_ClaimPredicate) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_ClaimPredicate) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(ClaimPredicate) + } +} +func (v _XdrPtr_ClaimPredicate) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_ClaimPredicate(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_ClaimPredicate) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_ClaimPredicate) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_ClaimPredicate(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_ClaimPredicate) XdrTypeName() string { return "ClaimPredicate*" } +func (v _XdrPtr_ClaimPredicate) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_ClaimPredicate) XdrValue() interface{} { return *v.p } + +var _XdrTags_ClaimPredicate = map[int32]bool{ + XdrToI32(CLAIM_PREDICATE_UNCONDITIONAL): true, + XdrToI32(CLAIM_PREDICATE_AND): true, + XdrToI32(CLAIM_PREDICATE_OR): true, + XdrToI32(CLAIM_PREDICATE_NOT): true, + XdrToI32(CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME): true, + XdrToI32(CLAIM_PREDICATE_BEFORE_RELATIVE_TIME): true, +} + +func (_ ClaimPredicate) XdrValidTags() map[int32]bool { + return _XdrTags_ClaimPredicate +} +func (u *ClaimPredicate) AndPredicates() *[]ClaimPredicate { + switch u.Type { + case CLAIM_PREDICATE_AND: + if v, ok := u._u.(*[]ClaimPredicate); ok { + return v + } else { + var zero []ClaimPredicate + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimPredicate.AndPredicates accessed when Type == %v", u.Type) + return nil + } +} +func (u *ClaimPredicate) OrPredicates() *[]ClaimPredicate { + switch u.Type { + case CLAIM_PREDICATE_OR: + if v, ok := u._u.(*[]ClaimPredicate); ok { + return v + } else { + var zero []ClaimPredicate + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimPredicate.OrPredicates accessed when Type == %v", u.Type) + return nil + } +} +func (u *ClaimPredicate) NotPredicate() **ClaimPredicate { + switch u.Type { + case CLAIM_PREDICATE_NOT: + if v, ok := u._u.(**ClaimPredicate); ok { + return v + } else { + var zero *ClaimPredicate + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimPredicate.NotPredicate accessed when Type == %v", u.Type) + return nil + } +} + +// Predicate will be true if closeTime < absBefore +func (u *ClaimPredicate) AbsBefore() *Int64 { + switch u.Type { + case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + if v, ok := u._u.(*Int64); ok { + return v + } else { + var zero Int64 + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimPredicate.AbsBefore accessed when Type == %v", u.Type) + return nil + } +} + +// Seconds since closeTime of the ledger in which the +func (u *ClaimPredicate) RelBefore() *Int64 { + switch u.Type { + case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + if v, ok := u._u.(*Int64); ok { + return v + } else { + var zero Int64 + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimPredicate.RelBefore accessed when Type == %v", u.Type) + return nil + } +} +func (u ClaimPredicate) XdrValid() bool { + switch u.Type { + case CLAIM_PREDICATE_UNCONDITIONAL, CLAIM_PREDICATE_AND, CLAIM_PREDICATE_OR, CLAIM_PREDICATE_NOT, CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME, CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + return true + } + return false +} +func (u *ClaimPredicate) XdrUnionTag() XdrNum32 { + return XDR_ClaimPredicateType(&u.Type) +} +func (u *ClaimPredicate) XdrUnionTagName() string { + return "Type" +} +func (u *ClaimPredicate) XdrUnionBody() XdrType { + switch u.Type { + case CLAIM_PREDICATE_UNCONDITIONAL: + return nil + case CLAIM_PREDICATE_AND: + return (*_XdrVec_2_ClaimPredicate)(u.AndPredicates()) + case CLAIM_PREDICATE_OR: + return (*_XdrVec_2_ClaimPredicate)(u.OrPredicates()) + case CLAIM_PREDICATE_NOT: + return _XdrPtr_ClaimPredicate{u.NotPredicate()} + case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + return XDR_Int64(u.AbsBefore()) + case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + return XDR_Int64(u.RelBefore()) + } + return nil +} +func (u *ClaimPredicate) XdrUnionBodyName() string { + switch u.Type { + case CLAIM_PREDICATE_UNCONDITIONAL: + return "" + case CLAIM_PREDICATE_AND: + return "AndPredicates" + case CLAIM_PREDICATE_OR: + return "OrPredicates" + case CLAIM_PREDICATE_NOT: + return "NotPredicate" + case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + return "AbsBefore" + case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + return "RelBefore" + } + return "" +} + +type XdrType_ClaimPredicate = *ClaimPredicate + +func (v *ClaimPredicate) XdrPointer() interface{} { return v } +func (ClaimPredicate) XdrTypeName() string { return "ClaimPredicate" } +func (v ClaimPredicate) XdrValue() interface{} { return v } +func (v *ClaimPredicate) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClaimPredicate) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClaimPredicateType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CLAIM_PREDICATE_UNCONDITIONAL: + return + case CLAIM_PREDICATE_AND: + x.Marshal(x.Sprintf("%sandPredicates", name), (*_XdrVec_2_ClaimPredicate)(u.AndPredicates())) + return + case CLAIM_PREDICATE_OR: + x.Marshal(x.Sprintf("%sorPredicates", name), (*_XdrVec_2_ClaimPredicate)(u.OrPredicates())) + return + case CLAIM_PREDICATE_NOT: + x.Marshal(x.Sprintf("%snotPredicate", name), _XdrPtr_ClaimPredicate{u.NotPredicate()}) + return + case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + x.Marshal(x.Sprintf("%sabsBefore", name), XDR_Int64(u.AbsBefore())) + return + case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + x.Marshal(x.Sprintf("%srelBefore", name), XDR_Int64(u.RelBefore())) + return + } + XdrPanic("invalid Type (%v) in ClaimPredicate", u.Type) +} +func XDR_ClaimPredicate(v *ClaimPredicate) *ClaimPredicate { return v } + +var _XdrNames_ClaimantType = map[int32]string{ + int32(CLAIMANT_TYPE_V0): "CLAIMANT_TYPE_V0", +} +var _XdrValues_ClaimantType = map[string]int32{ + "CLAIMANT_TYPE_V0": int32(CLAIMANT_TYPE_V0), +} + +func (ClaimantType) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimantType +} +func (v ClaimantType) String() string { + if s, ok := _XdrNames_ClaimantType[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimantType#%d", v) +} +func (v *ClaimantType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimantType[stok]; ok { + *v = ClaimantType(val) + return nil + } else if stok == "ClaimantType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimantType.", stok)) + } +} +func (v ClaimantType) GetU32() uint32 { return uint32(v) } +func (v *ClaimantType) SetU32(n uint32) { *v = ClaimantType(n) } +func (v *ClaimantType) XdrPointer() interface{} { return v } +func (ClaimantType) XdrTypeName() string { return "ClaimantType" } +func (v ClaimantType) XdrValue() interface{} { return v } +func (v *ClaimantType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimantType = *ClaimantType + +func XDR_ClaimantType(v *ClaimantType) *ClaimantType { return v } + +type XdrType_XdrAnon_Claimant_V0 = *XdrAnon_Claimant_V0 + +func (v *XdrAnon_Claimant_V0) XdrPointer() interface{} { return v } +func (XdrAnon_Claimant_V0) XdrTypeName() string { return "XdrAnon_Claimant_V0" } +func (v XdrAnon_Claimant_V0) XdrValue() interface{} { return v } +func (v *XdrAnon_Claimant_V0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_Claimant_V0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdestination", name), XDR_AccountID(&v.Destination)) + x.Marshal(x.Sprintf("%spredicate", name), XDR_ClaimPredicate(&v.Predicate)) +} +func XDR_XdrAnon_Claimant_V0(v *XdrAnon_Claimant_V0) *XdrAnon_Claimant_V0 { return v } + +var _XdrTags_Claimant = map[int32]bool{ + XdrToI32(CLAIMANT_TYPE_V0): true, +} + +func (_ Claimant) XdrValidTags() map[int32]bool { + return _XdrTags_Claimant +} +func (u *Claimant) V0() *XdrAnon_Claimant_V0 { + switch u.Type { + case CLAIMANT_TYPE_V0: + if v, ok := u._u.(*XdrAnon_Claimant_V0); ok { + return v + } else { + var zero XdrAnon_Claimant_V0 + u._u = &zero + return &zero + } + default: + XdrPanic("Claimant.V0 accessed when Type == %v", u.Type) + return nil + } +} +func (u Claimant) XdrValid() bool { + switch u.Type { + case CLAIMANT_TYPE_V0: + return true + } + return false +} +func (u *Claimant) XdrUnionTag() XdrNum32 { + return XDR_ClaimantType(&u.Type) +} +func (u *Claimant) XdrUnionTagName() string { + return "Type" +} +func (u *Claimant) XdrUnionBody() XdrType { + switch u.Type { + case CLAIMANT_TYPE_V0: + return XDR_XdrAnon_Claimant_V0(u.V0()) + } + return nil +} +func (u *Claimant) XdrUnionBodyName() string { + switch u.Type { + case CLAIMANT_TYPE_V0: + return "V0" + } + return "" +} + +type XdrType_Claimant = *Claimant + +func (v *Claimant) XdrPointer() interface{} { return v } +func (Claimant) XdrTypeName() string { return "Claimant" } +func (v Claimant) XdrValue() interface{} { return v } +func (v *Claimant) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *Claimant) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClaimantType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CLAIMANT_TYPE_V0: + x.Marshal(x.Sprintf("%sv0", name), XDR_XdrAnon_Claimant_V0(u.V0())) + return + } + XdrPanic("invalid Type (%v) in Claimant", u.Type) +} +func XDR_Claimant(v *Claimant) *Claimant { return v } + +var _XdrNames_ClaimableBalanceIDType = map[int32]string{ + int32(CLAIMABLE_BALANCE_ID_TYPE_V0): "CLAIMABLE_BALANCE_ID_TYPE_V0", +} +var _XdrValues_ClaimableBalanceIDType = map[string]int32{ + "CLAIMABLE_BALANCE_ID_TYPE_V0": int32(CLAIMABLE_BALANCE_ID_TYPE_V0), +} + +func (ClaimableBalanceIDType) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimableBalanceIDType +} +func (v ClaimableBalanceIDType) String() string { + if s, ok := _XdrNames_ClaimableBalanceIDType[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimableBalanceIDType#%d", v) +} +func (v *ClaimableBalanceIDType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimableBalanceIDType[stok]; ok { + *v = ClaimableBalanceIDType(val) + return nil + } else if stok == "ClaimableBalanceIDType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimableBalanceIDType.", stok)) + } +} +func (v ClaimableBalanceIDType) GetU32() uint32 { return uint32(v) } +func (v *ClaimableBalanceIDType) SetU32(n uint32) { *v = ClaimableBalanceIDType(n) } +func (v *ClaimableBalanceIDType) XdrPointer() interface{} { return v } +func (ClaimableBalanceIDType) XdrTypeName() string { return "ClaimableBalanceIDType" } +func (v ClaimableBalanceIDType) XdrValue() interface{} { return v } +func (v *ClaimableBalanceIDType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimableBalanceIDType = *ClaimableBalanceIDType + +func XDR_ClaimableBalanceIDType(v *ClaimableBalanceIDType) *ClaimableBalanceIDType { return v } + +var _XdrTags_ClaimableBalanceID = map[int32]bool{ + XdrToI32(CLAIMABLE_BALANCE_ID_TYPE_V0): true, +} + +func (_ ClaimableBalanceID) XdrValidTags() map[int32]bool { + return _XdrTags_ClaimableBalanceID +} +func (u *ClaimableBalanceID) V0() *Hash { + switch u.Type { + case CLAIMABLE_BALANCE_ID_TYPE_V0: + if v, ok := u._u.(*Hash); ok { + return v + } else { + var zero Hash + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimableBalanceID.V0 accessed when Type == %v", u.Type) + return nil + } +} +func (u ClaimableBalanceID) XdrValid() bool { + switch u.Type { + case CLAIMABLE_BALANCE_ID_TYPE_V0: + return true + } + return false +} +func (u *ClaimableBalanceID) XdrUnionTag() XdrNum32 { + return XDR_ClaimableBalanceIDType(&u.Type) +} +func (u *ClaimableBalanceID) XdrUnionTagName() string { + return "Type" +} +func (u *ClaimableBalanceID) XdrUnionBody() XdrType { + switch u.Type { + case CLAIMABLE_BALANCE_ID_TYPE_V0: + return XDR_Hash(u.V0()) + } + return nil +} +func (u *ClaimableBalanceID) XdrUnionBodyName() string { + switch u.Type { + case CLAIMABLE_BALANCE_ID_TYPE_V0: + return "V0" + } + return "" +} + +type XdrType_ClaimableBalanceID = *ClaimableBalanceID + +func (v *ClaimableBalanceID) XdrPointer() interface{} { return v } +func (ClaimableBalanceID) XdrTypeName() string { return "ClaimableBalanceID" } +func (v ClaimableBalanceID) XdrValue() interface{} { return v } +func (v *ClaimableBalanceID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClaimableBalanceID) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClaimableBalanceIDType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CLAIMABLE_BALANCE_ID_TYPE_V0: + x.Marshal(x.Sprintf("%sv0", name), XDR_Hash(u.V0())) + return + } + XdrPanic("invalid Type (%v) in ClaimableBalanceID", u.Type) +} +func XDR_ClaimableBalanceID(v *ClaimableBalanceID) *ClaimableBalanceID { return v } + +var _XdrNames_ClaimableBalanceFlags = map[int32]string{ + int32(CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG): "CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG", +} +var _XdrValues_ClaimableBalanceFlags = map[string]int32{ + "CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG": int32(CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG), +} + +func (ClaimableBalanceFlags) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimableBalanceFlags +} +func (v ClaimableBalanceFlags) String() string { + if s, ok := _XdrNames_ClaimableBalanceFlags[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimableBalanceFlags#%d", v) +} +func (v *ClaimableBalanceFlags) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimableBalanceFlags[stok]; ok { + *v = ClaimableBalanceFlags(val) + return nil + } else if stok == "ClaimableBalanceFlags" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimableBalanceFlags.", stok)) + } +} +func (v ClaimableBalanceFlags) GetU32() uint32 { return uint32(v) } +func (v *ClaimableBalanceFlags) SetU32(n uint32) { *v = ClaimableBalanceFlags(n) } +func (v *ClaimableBalanceFlags) XdrPointer() interface{} { return v } +func (ClaimableBalanceFlags) XdrTypeName() string { return "ClaimableBalanceFlags" } +func (v ClaimableBalanceFlags) XdrValue() interface{} { return v } +func (v *ClaimableBalanceFlags) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimableBalanceFlags = *ClaimableBalanceFlags + +func XDR_ClaimableBalanceFlags(v *ClaimableBalanceFlags) *ClaimableBalanceFlags { return v } + +var _XdrComments_ClaimableBalanceFlags = map[int32]string{ + int32(CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG): "If set, the issuer account of the asset held by the claimable balance may clawback the claimable balance", +} + +func (e ClaimableBalanceFlags) XdrEnumComments() map[int32]string { + return _XdrComments_ClaimableBalanceFlags +} +func (v *ClaimableBalanceFlags) XdrInitialize() { + switch ClaimableBalanceFlags(0) { + case CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG: + default: + if *v == ClaimableBalanceFlags(0) { + *v = CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG + } + } +} + +var _XdrTags_XdrAnon_ClaimableBalanceEntryExtensionV1_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_ClaimableBalanceEntryExtensionV1_Ext +} +func (u XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_ClaimableBalanceEntryExtensionV1_Ext = *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext + +func (v *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrTypeName() string { + return "XdrAnon_ClaimableBalanceEntryExtensionV1_Ext" +} +func (v XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (u *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_ClaimableBalanceEntryExtensionV1_Ext", u.V) +} +func XDR_XdrAnon_ClaimableBalanceEntryExtensionV1_Ext(v *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext) *XdrAnon_ClaimableBalanceEntryExtensionV1_Ext { + return v +} + +type XdrType_ClaimableBalanceEntryExtensionV1 = *ClaimableBalanceEntryExtensionV1 + +func (v *ClaimableBalanceEntryExtensionV1) XdrPointer() interface{} { return v } +func (ClaimableBalanceEntryExtensionV1) XdrTypeName() string { + return "ClaimableBalanceEntryExtensionV1" +} +func (v ClaimableBalanceEntryExtensionV1) XdrValue() interface{} { return v } +func (v *ClaimableBalanceEntryExtensionV1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimableBalanceEntryExtensionV1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_ClaimableBalanceEntryExtensionV1_Ext(&v.Ext)) + x.Marshal(x.Sprintf("%sflags", name), XDR_Uint32(&v.Flags)) +} +func XDR_ClaimableBalanceEntryExtensionV1(v *ClaimableBalanceEntryExtensionV1) *ClaimableBalanceEntryExtensionV1 { + return v +} + +var _XdrTags_XdrAnon_ClaimableBalanceEntry_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, +} + +func (_ XdrAnon_ClaimableBalanceEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_ClaimableBalanceEntry_Ext +} +func (u *XdrAnon_ClaimableBalanceEntry_Ext) V1() *ClaimableBalanceEntryExtensionV1 { + switch u.V { + case 1: + if v, ok := u._u.(*ClaimableBalanceEntryExtensionV1); ok { + return v + } else { + var zero ClaimableBalanceEntryExtensionV1 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_ClaimableBalanceEntry_Ext.V1 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_ClaimableBalanceEntry_Ext) XdrValid() bool { + switch u.V { + case 0, 1: + return true + } + return false +} +func (u *XdrAnon_ClaimableBalanceEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_ClaimableBalanceEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_ClaimableBalanceEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 1: + return XDR_ClaimableBalanceEntryExtensionV1(u.V1()) + } + return nil +} +func (u *XdrAnon_ClaimableBalanceEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 1: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_ClaimableBalanceEntry_Ext = *XdrAnon_ClaimableBalanceEntry_Ext + +func (v *XdrAnon_ClaimableBalanceEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_ClaimableBalanceEntry_Ext) XdrTypeName() string { + return "XdrAnon_ClaimableBalanceEntry_Ext" +} +func (v XdrAnon_ClaimableBalanceEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_ClaimableBalanceEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_ClaimableBalanceEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_ClaimableBalanceEntryExtensionV1(u.V1())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_ClaimableBalanceEntry_Ext", u.V) +} +func XDR_XdrAnon_ClaimableBalanceEntry_Ext(v *XdrAnon_ClaimableBalanceEntry_Ext) *XdrAnon_ClaimableBalanceEntry_Ext { + return v +} + +type _XdrVec_10_Claimant []Claimant + +func (_XdrVec_10_Claimant) XdrBound() uint32 { + const bound uint32 = 10 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_10_Claimant) XdrCheckLen(length uint32) { + if length > uint32(10) { + XdrPanic("_XdrVec_10_Claimant length %d exceeds bound 10", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_10_Claimant length %d exceeds max int", length) + } +} +func (v _XdrVec_10_Claimant) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_10_Claimant) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(10); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]Claimant, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_10_Claimant) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_Claimant(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_10_Claimant) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 10} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_10_Claimant) XdrTypeName() string { return "Claimant<>" } +func (v *_XdrVec_10_Claimant) XdrPointer() interface{} { return (*[]Claimant)(v) } +func (v _XdrVec_10_Claimant) XdrValue() interface{} { return ([]Claimant)(v) } +func (v *_XdrVec_10_Claimant) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimableBalanceEntry = *ClaimableBalanceEntry + +func (v *ClaimableBalanceEntry) XdrPointer() interface{} { return v } +func (ClaimableBalanceEntry) XdrTypeName() string { return "ClaimableBalanceEntry" } +func (v ClaimableBalanceEntry) XdrValue() interface{} { return v } +func (v *ClaimableBalanceEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimableBalanceEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbalanceID", name), XDR_ClaimableBalanceID(&v.BalanceID)) + x.Marshal(x.Sprintf("%sclaimants", name), (*_XdrVec_10_Claimant)(&v.Claimants)) + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_ClaimableBalanceEntry_Ext(&v.Ext)) +} +func XDR_ClaimableBalanceEntry(v *ClaimableBalanceEntry) *ClaimableBalanceEntry { return v } + +type XdrType_LiquidityPoolConstantProductParameters = *LiquidityPoolConstantProductParameters + +func (v *LiquidityPoolConstantProductParameters) XdrPointer() interface{} { return v } +func (LiquidityPoolConstantProductParameters) XdrTypeName() string { + return "LiquidityPoolConstantProductParameters" +} +func (v LiquidityPoolConstantProductParameters) XdrValue() interface{} { return v } +func (v *LiquidityPoolConstantProductParameters) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LiquidityPoolConstantProductParameters) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sassetA", name), XDR_Asset(&v.AssetA)) + x.Marshal(x.Sprintf("%sassetB", name), XDR_Asset(&v.AssetB)) + x.Marshal(x.Sprintf("%sfee", name), XDR_Int32(&v.Fee)) +} +func XDR_LiquidityPoolConstantProductParameters(v *LiquidityPoolConstantProductParameters) *LiquidityPoolConstantProductParameters { + return v +} + +type XdrType_XdrAnon_LiquidityPoolEntry_Body_ConstantProduct = *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct + +func (v *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) XdrPointer() interface{} { return v } +func (XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) XdrTypeName() string { + return "XdrAnon_LiquidityPoolEntry_Body_ConstantProduct" +} +func (v XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) XdrValue() interface{} { return v } +func (v *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (v *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sparams", name), XDR_LiquidityPoolConstantProductParameters(&v.Params)) + x.Marshal(x.Sprintf("%sreserveA", name), XDR_Int64(&v.ReserveA)) + x.Marshal(x.Sprintf("%sreserveB", name), XDR_Int64(&v.ReserveB)) + x.Marshal(x.Sprintf("%stotalPoolShares", name), XDR_Int64(&v.TotalPoolShares)) + x.Marshal(x.Sprintf("%spoolSharesTrustLineCount", name), XDR_Int64(&v.PoolSharesTrustLineCount)) +} +func XDR_XdrAnon_LiquidityPoolEntry_Body_ConstantProduct(v *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct) *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct { + return v +} + +var _XdrTags_XdrAnon_LiquidityPoolEntry_Body = map[int32]bool{ + XdrToI32(LIQUIDITY_POOL_CONSTANT_PRODUCT): true, +} + +func (_ XdrAnon_LiquidityPoolEntry_Body) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LiquidityPoolEntry_Body +} +func (u *XdrAnon_LiquidityPoolEntry_Body) ConstantProduct() *XdrAnon_LiquidityPoolEntry_Body_ConstantProduct { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + if v, ok := u._u.(*XdrAnon_LiquidityPoolEntry_Body_ConstantProduct); ok { + return v + } else { + var zero XdrAnon_LiquidityPoolEntry_Body_ConstantProduct + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LiquidityPoolEntry_Body.ConstantProduct accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_LiquidityPoolEntry_Body) XdrValid() bool { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return true + } + return false +} +func (u *XdrAnon_LiquidityPoolEntry_Body) XdrUnionTag() XdrNum32 { + return XDR_LiquidityPoolType(&u.Type) +} +func (u *XdrAnon_LiquidityPoolEntry_Body) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_LiquidityPoolEntry_Body) XdrUnionBody() XdrType { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return XDR_XdrAnon_LiquidityPoolEntry_Body_ConstantProduct(u.ConstantProduct()) + } + return nil +} +func (u *XdrAnon_LiquidityPoolEntry_Body) XdrUnionBodyName() string { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return "ConstantProduct" + } + return "" +} + +type XdrType_XdrAnon_LiquidityPoolEntry_Body = *XdrAnon_LiquidityPoolEntry_Body + +func (v *XdrAnon_LiquidityPoolEntry_Body) XdrPointer() interface{} { return v } +func (XdrAnon_LiquidityPoolEntry_Body) XdrTypeName() string { return "XdrAnon_LiquidityPoolEntry_Body" } +func (v XdrAnon_LiquidityPoolEntry_Body) XdrValue() interface{} { return v } +func (v *XdrAnon_LiquidityPoolEntry_Body) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LiquidityPoolEntry_Body) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LiquidityPoolType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + x.Marshal(x.Sprintf("%sconstantProduct", name), XDR_XdrAnon_LiquidityPoolEntry_Body_ConstantProduct(u.ConstantProduct())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_LiquidityPoolEntry_Body", u.Type) +} +func XDR_XdrAnon_LiquidityPoolEntry_Body(v *XdrAnon_LiquidityPoolEntry_Body) *XdrAnon_LiquidityPoolEntry_Body { + return v +} + +type XdrType_LiquidityPoolEntry = *LiquidityPoolEntry + +func (v *LiquidityPoolEntry) XdrPointer() interface{} { return v } +func (LiquidityPoolEntry) XdrTypeName() string { return "LiquidityPoolEntry" } +func (v LiquidityPoolEntry) XdrValue() interface{} { return v } +func (v *LiquidityPoolEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LiquidityPoolEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) + x.Marshal(x.Sprintf("%sbody", name), XDR_XdrAnon_LiquidityPoolEntry_Body(&v.Body)) +} +func XDR_LiquidityPoolEntry(v *LiquidityPoolEntry) *LiquidityPoolEntry { return v } + +var _XdrTags_XdrAnon_LedgerEntryExtensionV1_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_LedgerEntryExtensionV1_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerEntryExtensionV1_Ext +} +func (u XdrAnon_LedgerEntryExtensionV1_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_LedgerEntryExtensionV1_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_LedgerEntryExtensionV1_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_LedgerEntryExtensionV1_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_LedgerEntryExtensionV1_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_LedgerEntryExtensionV1_Ext = *XdrAnon_LedgerEntryExtensionV1_Ext + +func (v *XdrAnon_LedgerEntryExtensionV1_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerEntryExtensionV1_Ext) XdrTypeName() string { + return "XdrAnon_LedgerEntryExtensionV1_Ext" +} +func (v XdrAnon_LedgerEntryExtensionV1_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerEntryExtensionV1_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerEntryExtensionV1_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_LedgerEntryExtensionV1_Ext", u.V) +} +func XDR_XdrAnon_LedgerEntryExtensionV1_Ext(v *XdrAnon_LedgerEntryExtensionV1_Ext) *XdrAnon_LedgerEntryExtensionV1_Ext { + return v +} + +type XdrType_LedgerEntryExtensionV1 = *LedgerEntryExtensionV1 + +func (v *LedgerEntryExtensionV1) XdrPointer() interface{} { return v } +func (LedgerEntryExtensionV1) XdrTypeName() string { return "LedgerEntryExtensionV1" } +func (v LedgerEntryExtensionV1) XdrValue() interface{} { return v } +func (v *LedgerEntryExtensionV1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerEntryExtensionV1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssponsoringID", name), XDR_SponsorshipDescriptor(&v.SponsoringID)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_LedgerEntryExtensionV1_Ext(&v.Ext)) +} +func XDR_LedgerEntryExtensionV1(v *LedgerEntryExtensionV1) *LedgerEntryExtensionV1 { return v } + +var _XdrTags_XdrAnon_LedgerEntry_Data = map[int32]bool{ + XdrToI32(ACCOUNT): true, + XdrToI32(TRUSTLINE): true, + XdrToI32(OFFER): true, + XdrToI32(DATA): true, + XdrToI32(CLAIMABLE_BALANCE): true, + XdrToI32(LIQUIDITY_POOL): true, +} + +func (_ XdrAnon_LedgerEntry_Data) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerEntry_Data +} +func (u *XdrAnon_LedgerEntry_Data) Account() *AccountEntry { + switch u.Type { + case ACCOUNT: + if v, ok := u._u.(*AccountEntry); ok { + return v + } else { + var zero AccountEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.Account accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_LedgerEntry_Data) TrustLine() *TrustLineEntry { + switch u.Type { + case TRUSTLINE: + if v, ok := u._u.(*TrustLineEntry); ok { + return v + } else { + var zero TrustLineEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.TrustLine accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_LedgerEntry_Data) Offer() *OfferEntry { + switch u.Type { + case OFFER: + if v, ok := u._u.(*OfferEntry); ok { + return v + } else { + var zero OfferEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.Offer accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_LedgerEntry_Data) Data() *DataEntry { + switch u.Type { + case DATA: + if v, ok := u._u.(*DataEntry); ok { + return v + } else { + var zero DataEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.Data accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_LedgerEntry_Data) ClaimableBalance() *ClaimableBalanceEntry { + switch u.Type { + case CLAIMABLE_BALANCE: + if v, ok := u._u.(*ClaimableBalanceEntry); ok { + return v + } else { + var zero ClaimableBalanceEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.ClaimableBalance accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_LedgerEntry_Data) LiquidityPool() *LiquidityPoolEntry { + switch u.Type { + case LIQUIDITY_POOL: + if v, ok := u._u.(*LiquidityPoolEntry); ok { + return v + } else { + var zero LiquidityPoolEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Data.LiquidityPool accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_LedgerEntry_Data) XdrValid() bool { + switch u.Type { + case ACCOUNT, TRUSTLINE, OFFER, DATA, CLAIMABLE_BALANCE, LIQUIDITY_POOL: + return true + } + return false +} +func (u *XdrAnon_LedgerEntry_Data) XdrUnionTag() XdrNum32 { + return XDR_LedgerEntryType(&u.Type) +} +func (u *XdrAnon_LedgerEntry_Data) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_LedgerEntry_Data) XdrUnionBody() XdrType { + switch u.Type { + case ACCOUNT: + return XDR_AccountEntry(u.Account()) + case TRUSTLINE: + return XDR_TrustLineEntry(u.TrustLine()) + case OFFER: + return XDR_OfferEntry(u.Offer()) + case DATA: + return XDR_DataEntry(u.Data()) + case CLAIMABLE_BALANCE: + return XDR_ClaimableBalanceEntry(u.ClaimableBalance()) + case LIQUIDITY_POOL: + return XDR_LiquidityPoolEntry(u.LiquidityPool()) + } + return nil +} +func (u *XdrAnon_LedgerEntry_Data) XdrUnionBodyName() string { + switch u.Type { + case ACCOUNT: + return "Account" + case TRUSTLINE: + return "TrustLine" + case OFFER: + return "Offer" + case DATA: + return "Data" + case CLAIMABLE_BALANCE: + return "ClaimableBalance" + case LIQUIDITY_POOL: + return "LiquidityPool" + } + return "" +} + +type XdrType_XdrAnon_LedgerEntry_Data = *XdrAnon_LedgerEntry_Data + +func (v *XdrAnon_LedgerEntry_Data) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerEntry_Data) XdrTypeName() string { return "XdrAnon_LedgerEntry_Data" } +func (v XdrAnon_LedgerEntry_Data) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerEntry_Data) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerEntry_Data) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LedgerEntryType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ACCOUNT: + x.Marshal(x.Sprintf("%saccount", name), XDR_AccountEntry(u.Account())) + return + case TRUSTLINE: + x.Marshal(x.Sprintf("%strustLine", name), XDR_TrustLineEntry(u.TrustLine())) + return + case OFFER: + x.Marshal(x.Sprintf("%soffer", name), XDR_OfferEntry(u.Offer())) + return + case DATA: + x.Marshal(x.Sprintf("%sdata", name), XDR_DataEntry(u.Data())) + return + case CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclaimableBalance", name), XDR_ClaimableBalanceEntry(u.ClaimableBalance())) + return + case LIQUIDITY_POOL: + x.Marshal(x.Sprintf("%sliquidityPool", name), XDR_LiquidityPoolEntry(u.LiquidityPool())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_LedgerEntry_Data", u.Type) +} +func XDR_XdrAnon_LedgerEntry_Data(v *XdrAnon_LedgerEntry_Data) *XdrAnon_LedgerEntry_Data { return v } + +var _XdrTags_XdrAnon_LedgerEntry_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, +} + +func (_ XdrAnon_LedgerEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerEntry_Ext +} +func (u *XdrAnon_LedgerEntry_Ext) V1() *LedgerEntryExtensionV1 { + switch u.V { + case 1: + if v, ok := u._u.(*LedgerEntryExtensionV1); ok { + return v + } else { + var zero LedgerEntryExtensionV1 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerEntry_Ext.V1 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_LedgerEntry_Ext) XdrValid() bool { + switch u.V { + case 0, 1: + return true + } + return false +} +func (u *XdrAnon_LedgerEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_LedgerEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_LedgerEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 1: + return XDR_LedgerEntryExtensionV1(u.V1()) + } + return nil +} +func (u *XdrAnon_LedgerEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 1: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_LedgerEntry_Ext = *XdrAnon_LedgerEntry_Ext + +func (v *XdrAnon_LedgerEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerEntry_Ext) XdrTypeName() string { return "XdrAnon_LedgerEntry_Ext" } +func (v XdrAnon_LedgerEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_LedgerEntryExtensionV1(u.V1())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_LedgerEntry_Ext", u.V) +} +func XDR_XdrAnon_LedgerEntry_Ext(v *XdrAnon_LedgerEntry_Ext) *XdrAnon_LedgerEntry_Ext { return v } + +type XdrType_LedgerEntry = *LedgerEntry + +func (v *LedgerEntry) XdrPointer() interface{} { return v } +func (LedgerEntry) XdrTypeName() string { return "LedgerEntry" } +func (v LedgerEntry) XdrValue() interface{} { return v } +func (v *LedgerEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%slastModifiedLedgerSeq", name), XDR_Uint32(&v.LastModifiedLedgerSeq)) + x.Marshal(x.Sprintf("%sdata", name), XDR_XdrAnon_LedgerEntry_Data(&v.Data)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_LedgerEntry_Ext(&v.Ext)) +} +func XDR_LedgerEntry(v *LedgerEntry) *LedgerEntry { return v } + +type XdrType_XdrAnon_LedgerKey_Account = *XdrAnon_LedgerKey_Account + +func (v *XdrAnon_LedgerKey_Account) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_Account) XdrTypeName() string { return "XdrAnon_LedgerKey_Account" } +func (v XdrAnon_LedgerKey_Account) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_Account) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_Account) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) +} +func XDR_XdrAnon_LedgerKey_Account(v *XdrAnon_LedgerKey_Account) *XdrAnon_LedgerKey_Account { return v } + +type XdrType_XdrAnon_LedgerKey_TrustLine = *XdrAnon_LedgerKey_TrustLine + +func (v *XdrAnon_LedgerKey_TrustLine) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_TrustLine) XdrTypeName() string { return "XdrAnon_LedgerKey_TrustLine" } +func (v XdrAnon_LedgerKey_TrustLine) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_TrustLine) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_TrustLine) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%sasset", name), XDR_TrustLineAsset(&v.Asset)) +} +func XDR_XdrAnon_LedgerKey_TrustLine(v *XdrAnon_LedgerKey_TrustLine) *XdrAnon_LedgerKey_TrustLine { + return v +} + +type XdrType_XdrAnon_LedgerKey_Offer = *XdrAnon_LedgerKey_Offer + +func (v *XdrAnon_LedgerKey_Offer) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_Offer) XdrTypeName() string { return "XdrAnon_LedgerKey_Offer" } +func (v XdrAnon_LedgerKey_Offer) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_Offer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_Offer) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssellerID", name), XDR_AccountID(&v.SellerID)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) +} +func XDR_XdrAnon_LedgerKey_Offer(v *XdrAnon_LedgerKey_Offer) *XdrAnon_LedgerKey_Offer { return v } + +type XdrType_XdrAnon_LedgerKey_Data = *XdrAnon_LedgerKey_Data + +func (v *XdrAnon_LedgerKey_Data) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_Data) XdrTypeName() string { return "XdrAnon_LedgerKey_Data" } +func (v XdrAnon_LedgerKey_Data) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_Data) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_Data) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%sdataName", name), XDR_String64(&v.DataName)) +} +func XDR_XdrAnon_LedgerKey_Data(v *XdrAnon_LedgerKey_Data) *XdrAnon_LedgerKey_Data { return v } + +type XdrType_XdrAnon_LedgerKey_ClaimableBalance = *XdrAnon_LedgerKey_ClaimableBalance + +func (v *XdrAnon_LedgerKey_ClaimableBalance) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_ClaimableBalance) XdrTypeName() string { + return "XdrAnon_LedgerKey_ClaimableBalance" +} +func (v XdrAnon_LedgerKey_ClaimableBalance) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_ClaimableBalance) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_ClaimableBalance) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbalanceID", name), XDR_ClaimableBalanceID(&v.BalanceID)) +} +func XDR_XdrAnon_LedgerKey_ClaimableBalance(v *XdrAnon_LedgerKey_ClaimableBalance) *XdrAnon_LedgerKey_ClaimableBalance { + return v +} + +type XdrType_XdrAnon_LedgerKey_LiquidityPool = *XdrAnon_LedgerKey_LiquidityPool + +func (v *XdrAnon_LedgerKey_LiquidityPool) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerKey_LiquidityPool) XdrTypeName() string { return "XdrAnon_LedgerKey_LiquidityPool" } +func (v XdrAnon_LedgerKey_LiquidityPool) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerKey_LiquidityPool) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_LedgerKey_LiquidityPool) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) +} +func XDR_XdrAnon_LedgerKey_LiquidityPool(v *XdrAnon_LedgerKey_LiquidityPool) *XdrAnon_LedgerKey_LiquidityPool { + return v +} + +var _XdrTags_LedgerKey = map[int32]bool{ + XdrToI32(ACCOUNT): true, + XdrToI32(TRUSTLINE): true, + XdrToI32(OFFER): true, + XdrToI32(DATA): true, + XdrToI32(CLAIMABLE_BALANCE): true, + XdrToI32(LIQUIDITY_POOL): true, +} + +func (_ LedgerKey) XdrValidTags() map[int32]bool { + return _XdrTags_LedgerKey +} +func (u *LedgerKey) Account() *XdrAnon_LedgerKey_Account { + switch u.Type { + case ACCOUNT: + if v, ok := u._u.(*XdrAnon_LedgerKey_Account); ok { + return v + } else { + var zero XdrAnon_LedgerKey_Account + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.Account accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerKey) TrustLine() *XdrAnon_LedgerKey_TrustLine { + switch u.Type { + case TRUSTLINE: + if v, ok := u._u.(*XdrAnon_LedgerKey_TrustLine); ok { + return v + } else { + var zero XdrAnon_LedgerKey_TrustLine + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.TrustLine accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerKey) Offer() *XdrAnon_LedgerKey_Offer { + switch u.Type { + case OFFER: + if v, ok := u._u.(*XdrAnon_LedgerKey_Offer); ok { + return v + } else { + var zero XdrAnon_LedgerKey_Offer + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.Offer accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerKey) Data() *XdrAnon_LedgerKey_Data { + switch u.Type { + case DATA: + if v, ok := u._u.(*XdrAnon_LedgerKey_Data); ok { + return v + } else { + var zero XdrAnon_LedgerKey_Data + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.Data accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerKey) ClaimableBalance() *XdrAnon_LedgerKey_ClaimableBalance { + switch u.Type { + case CLAIMABLE_BALANCE: + if v, ok := u._u.(*XdrAnon_LedgerKey_ClaimableBalance); ok { + return v + } else { + var zero XdrAnon_LedgerKey_ClaimableBalance + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.ClaimableBalance accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerKey) LiquidityPool() *XdrAnon_LedgerKey_LiquidityPool { + switch u.Type { + case LIQUIDITY_POOL: + if v, ok := u._u.(*XdrAnon_LedgerKey_LiquidityPool); ok { + return v + } else { + var zero XdrAnon_LedgerKey_LiquidityPool + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerKey.LiquidityPool accessed when Type == %v", u.Type) + return nil + } +} +func (u LedgerKey) XdrValid() bool { + switch u.Type { + case ACCOUNT, TRUSTLINE, OFFER, DATA, CLAIMABLE_BALANCE, LIQUIDITY_POOL: + return true + } + return false +} +func (u *LedgerKey) XdrUnionTag() XdrNum32 { + return XDR_LedgerEntryType(&u.Type) +} +func (u *LedgerKey) XdrUnionTagName() string { + return "Type" +} +func (u *LedgerKey) XdrUnionBody() XdrType { + switch u.Type { + case ACCOUNT: + return XDR_XdrAnon_LedgerKey_Account(u.Account()) + case TRUSTLINE: + return XDR_XdrAnon_LedgerKey_TrustLine(u.TrustLine()) + case OFFER: + return XDR_XdrAnon_LedgerKey_Offer(u.Offer()) + case DATA: + return XDR_XdrAnon_LedgerKey_Data(u.Data()) + case CLAIMABLE_BALANCE: + return XDR_XdrAnon_LedgerKey_ClaimableBalance(u.ClaimableBalance()) + case LIQUIDITY_POOL: + return XDR_XdrAnon_LedgerKey_LiquidityPool(u.LiquidityPool()) + } + return nil +} +func (u *LedgerKey) XdrUnionBodyName() string { + switch u.Type { + case ACCOUNT: + return "Account" + case TRUSTLINE: + return "TrustLine" + case OFFER: + return "Offer" + case DATA: + return "Data" + case CLAIMABLE_BALANCE: + return "ClaimableBalance" + case LIQUIDITY_POOL: + return "LiquidityPool" + } + return "" +} + +type XdrType_LedgerKey = *LedgerKey + +func (v *LedgerKey) XdrPointer() interface{} { return v } +func (LedgerKey) XdrTypeName() string { return "LedgerKey" } +func (v LedgerKey) XdrValue() interface{} { return v } +func (v *LedgerKey) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LedgerKey) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LedgerEntryType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ACCOUNT: + x.Marshal(x.Sprintf("%saccount", name), XDR_XdrAnon_LedgerKey_Account(u.Account())) + return + case TRUSTLINE: + x.Marshal(x.Sprintf("%strustLine", name), XDR_XdrAnon_LedgerKey_TrustLine(u.TrustLine())) + return + case OFFER: + x.Marshal(x.Sprintf("%soffer", name), XDR_XdrAnon_LedgerKey_Offer(u.Offer())) + return + case DATA: + x.Marshal(x.Sprintf("%sdata", name), XDR_XdrAnon_LedgerKey_Data(u.Data())) + return + case CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclaimableBalance", name), XDR_XdrAnon_LedgerKey_ClaimableBalance(u.ClaimableBalance())) + return + case LIQUIDITY_POOL: + x.Marshal(x.Sprintf("%sliquidityPool", name), XDR_XdrAnon_LedgerKey_LiquidityPool(u.LiquidityPool())) + return + } + XdrPanic("invalid Type (%v) in LedgerKey", u.Type) +} +func XDR_LedgerKey(v *LedgerKey) *LedgerKey { return v } + +var _XdrNames_EnvelopeType = map[int32]string{ + int32(ENVELOPE_TYPE_TX_V0): "ENVELOPE_TYPE_TX_V0", + int32(ENVELOPE_TYPE_SCP): "ENVELOPE_TYPE_SCP", + int32(ENVELOPE_TYPE_TX): "ENVELOPE_TYPE_TX", + int32(ENVELOPE_TYPE_AUTH): "ENVELOPE_TYPE_AUTH", + int32(ENVELOPE_TYPE_SCPVALUE): "ENVELOPE_TYPE_SCPVALUE", + int32(ENVELOPE_TYPE_TX_FEE_BUMP): "ENVELOPE_TYPE_TX_FEE_BUMP", + int32(ENVELOPE_TYPE_OP_ID): "ENVELOPE_TYPE_OP_ID", + int32(ENVELOPE_TYPE_POOL_REVOKE_OP_ID): "ENVELOPE_TYPE_POOL_REVOKE_OP_ID", +} +var _XdrValues_EnvelopeType = map[string]int32{ + "ENVELOPE_TYPE_TX_V0": int32(ENVELOPE_TYPE_TX_V0), + "ENVELOPE_TYPE_SCP": int32(ENVELOPE_TYPE_SCP), + "ENVELOPE_TYPE_TX": int32(ENVELOPE_TYPE_TX), + "ENVELOPE_TYPE_AUTH": int32(ENVELOPE_TYPE_AUTH), + "ENVELOPE_TYPE_SCPVALUE": int32(ENVELOPE_TYPE_SCPVALUE), + "ENVELOPE_TYPE_TX_FEE_BUMP": int32(ENVELOPE_TYPE_TX_FEE_BUMP), + "ENVELOPE_TYPE_OP_ID": int32(ENVELOPE_TYPE_OP_ID), + "ENVELOPE_TYPE_POOL_REVOKE_OP_ID": int32(ENVELOPE_TYPE_POOL_REVOKE_OP_ID), +} + +func (EnvelopeType) XdrEnumNames() map[int32]string { + return _XdrNames_EnvelopeType +} +func (v EnvelopeType) String() string { + if s, ok := _XdrNames_EnvelopeType[int32(v)]; ok { + return s + } + return fmt.Sprintf("EnvelopeType#%d", v) +} +func (v *EnvelopeType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_EnvelopeType[stok]; ok { + *v = EnvelopeType(val) + return nil + } else if stok == "EnvelopeType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid EnvelopeType.", stok)) + } +} +func (v EnvelopeType) GetU32() uint32 { return uint32(v) } +func (v *EnvelopeType) SetU32(n uint32) { *v = EnvelopeType(n) } +func (v *EnvelopeType) XdrPointer() interface{} { return v } +func (EnvelopeType) XdrTypeName() string { return "EnvelopeType" } +func (v EnvelopeType) XdrValue() interface{} { return v } +func (v *EnvelopeType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_EnvelopeType = *EnvelopeType + +func XDR_EnvelopeType(v *EnvelopeType) *EnvelopeType { return v } + +type XdrType_UpgradeType struct { + XdrVecOpaque +} + +func XDR_UpgradeType(v *UpgradeType) XdrType_UpgradeType { + return XdrType_UpgradeType{XdrVecOpaque{v, 128}} +} +func (XdrType_UpgradeType) XdrTypeName() string { return "UpgradeType" } +func (v XdrType_UpgradeType) XdrUnwrap() XdrType { return v.XdrVecOpaque } + +var _XdrNames_StellarValueType = map[int32]string{ + int32(STELLAR_VALUE_BASIC): "STELLAR_VALUE_BASIC", + int32(STELLAR_VALUE_SIGNED): "STELLAR_VALUE_SIGNED", +} +var _XdrValues_StellarValueType = map[string]int32{ + "STELLAR_VALUE_BASIC": int32(STELLAR_VALUE_BASIC), + "STELLAR_VALUE_SIGNED": int32(STELLAR_VALUE_SIGNED), +} + +func (StellarValueType) XdrEnumNames() map[int32]string { + return _XdrNames_StellarValueType +} +func (v StellarValueType) String() string { + if s, ok := _XdrNames_StellarValueType[int32(v)]; ok { + return s + } + return fmt.Sprintf("StellarValueType#%d", v) +} +func (v *StellarValueType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_StellarValueType[stok]; ok { + *v = StellarValueType(val) + return nil + } else if stok == "StellarValueType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid StellarValueType.", stok)) + } +} +func (v StellarValueType) GetU32() uint32 { return uint32(v) } +func (v *StellarValueType) SetU32(n uint32) { *v = StellarValueType(n) } +func (v *StellarValueType) XdrPointer() interface{} { return v } +func (StellarValueType) XdrTypeName() string { return "StellarValueType" } +func (v StellarValueType) XdrValue() interface{} { return v } +func (v *StellarValueType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_StellarValueType = *StellarValueType + +func XDR_StellarValueType(v *StellarValueType) *StellarValueType { return v } + +type XdrType_LedgerCloseValueSignature = *LedgerCloseValueSignature + +func (v *LedgerCloseValueSignature) XdrPointer() interface{} { return v } +func (LedgerCloseValueSignature) XdrTypeName() string { return "LedgerCloseValueSignature" } +func (v LedgerCloseValueSignature) XdrValue() interface{} { return v } +func (v *LedgerCloseValueSignature) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerCloseValueSignature) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%snodeID", name), XDR_NodeID(&v.NodeID)) + x.Marshal(x.Sprintf("%ssignature", name), XDR_Signature(&v.Signature)) +} +func XDR_LedgerCloseValueSignature(v *LedgerCloseValueSignature) *LedgerCloseValueSignature { return v } + +var _XdrTags_XdrAnon_StellarValue_Ext = map[int32]bool{ + XdrToI32(STELLAR_VALUE_BASIC): true, + XdrToI32(STELLAR_VALUE_SIGNED): true, +} + +func (_ XdrAnon_StellarValue_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_StellarValue_Ext +} +func (u *XdrAnon_StellarValue_Ext) LcValueSignature() *LedgerCloseValueSignature { + switch u.V { + case STELLAR_VALUE_SIGNED: + if v, ok := u._u.(*LedgerCloseValueSignature); ok { + return v + } else { + var zero LedgerCloseValueSignature + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_StellarValue_Ext.LcValueSignature accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_StellarValue_Ext) XdrValid() bool { + switch u.V { + case STELLAR_VALUE_BASIC, STELLAR_VALUE_SIGNED: + return true + } + return false +} +func (u *XdrAnon_StellarValue_Ext) XdrUnionTag() XdrNum32 { + return XDR_StellarValueType(&u.V) +} +func (u *XdrAnon_StellarValue_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_StellarValue_Ext) XdrUnionBody() XdrType { + switch u.V { + case STELLAR_VALUE_BASIC: + return nil + case STELLAR_VALUE_SIGNED: + return XDR_LedgerCloseValueSignature(u.LcValueSignature()) + } + return nil +} +func (u *XdrAnon_StellarValue_Ext) XdrUnionBodyName() string { + switch u.V { + case STELLAR_VALUE_BASIC: + return "" + case STELLAR_VALUE_SIGNED: + return "LcValueSignature" + } + return "" +} + +type XdrType_XdrAnon_StellarValue_Ext = *XdrAnon_StellarValue_Ext + +func (v *XdrAnon_StellarValue_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_StellarValue_Ext) XdrTypeName() string { return "XdrAnon_StellarValue_Ext" } +func (v XdrAnon_StellarValue_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_StellarValue_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_StellarValue_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_StellarValueType(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case STELLAR_VALUE_BASIC: + return + case STELLAR_VALUE_SIGNED: + x.Marshal(x.Sprintf("%slcValueSignature", name), XDR_LedgerCloseValueSignature(u.LcValueSignature())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_StellarValue_Ext", u.V) +} +func XDR_XdrAnon_StellarValue_Ext(v *XdrAnon_StellarValue_Ext) *XdrAnon_StellarValue_Ext { return v } + +type _XdrVec_6_UpgradeType []UpgradeType + +func (_XdrVec_6_UpgradeType) XdrBound() uint32 { + const bound uint32 = 6 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_6_UpgradeType) XdrCheckLen(length uint32) { + if length > uint32(6) { + XdrPanic("_XdrVec_6_UpgradeType length %d exceeds bound 6", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_6_UpgradeType length %d exceeds max int", length) + } +} +func (v _XdrVec_6_UpgradeType) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_6_UpgradeType) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(6); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]UpgradeType, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_6_UpgradeType) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_UpgradeType(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_6_UpgradeType) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 6} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_6_UpgradeType) XdrTypeName() string { return "UpgradeType<>" } +func (v *_XdrVec_6_UpgradeType) XdrPointer() interface{} { return (*[]UpgradeType)(v) } +func (v _XdrVec_6_UpgradeType) XdrValue() interface{} { return ([]UpgradeType)(v) } +func (v *_XdrVec_6_UpgradeType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_StellarValue = *StellarValue + +func (v *StellarValue) XdrPointer() interface{} { return v } +func (StellarValue) XdrTypeName() string { return "StellarValue" } +func (v StellarValue) XdrValue() interface{} { return v } +func (v *StellarValue) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *StellarValue) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stxSetHash", name), XDR_Hash(&v.TxSetHash)) + x.Marshal(x.Sprintf("%scloseTime", name), XDR_TimePoint(&v.CloseTime)) + x.Marshal(x.Sprintf("%supgrades", name), (*_XdrVec_6_UpgradeType)(&v.Upgrades)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_StellarValue_Ext(&v.Ext)) +} +func XDR_StellarValue(v *StellarValue) *StellarValue { return v } + +var _XdrNames_LedgerHeaderFlags = map[int32]string{ + int32(DISABLE_LIQUIDITY_POOL_TRADING_FLAG): "DISABLE_LIQUIDITY_POOL_TRADING_FLAG", + int32(DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG): "DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG", + int32(DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG): "DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG", +} +var _XdrValues_LedgerHeaderFlags = map[string]int32{ + "DISABLE_LIQUIDITY_POOL_TRADING_FLAG": int32(DISABLE_LIQUIDITY_POOL_TRADING_FLAG), + "DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG": int32(DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG), + "DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG": int32(DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG), +} + +func (LedgerHeaderFlags) XdrEnumNames() map[int32]string { + return _XdrNames_LedgerHeaderFlags +} +func (v LedgerHeaderFlags) String() string { + if s, ok := _XdrNames_LedgerHeaderFlags[int32(v)]; ok { + return s + } + return fmt.Sprintf("LedgerHeaderFlags#%d", v) +} +func (v *LedgerHeaderFlags) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LedgerHeaderFlags[stok]; ok { + *v = LedgerHeaderFlags(val) + return nil + } else if stok == "LedgerHeaderFlags" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LedgerHeaderFlags.", stok)) + } +} +func (v LedgerHeaderFlags) GetU32() uint32 { return uint32(v) } +func (v *LedgerHeaderFlags) SetU32(n uint32) { *v = LedgerHeaderFlags(n) } +func (v *LedgerHeaderFlags) XdrPointer() interface{} { return v } +func (LedgerHeaderFlags) XdrTypeName() string { return "LedgerHeaderFlags" } +func (v LedgerHeaderFlags) XdrValue() interface{} { return v } +func (v *LedgerHeaderFlags) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerHeaderFlags = *LedgerHeaderFlags + +func XDR_LedgerHeaderFlags(v *LedgerHeaderFlags) *LedgerHeaderFlags { return v } +func (v *LedgerHeaderFlags) XdrInitialize() { + switch LedgerHeaderFlags(0) { + case DISABLE_LIQUIDITY_POOL_TRADING_FLAG, DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG, DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG: + default: + if *v == LedgerHeaderFlags(0) { + *v = DISABLE_LIQUIDITY_POOL_TRADING_FLAG + } + } +} + +var _XdrTags_XdrAnon_LedgerHeaderExtensionV1_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_LedgerHeaderExtensionV1_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerHeaderExtensionV1_Ext +} +func (u XdrAnon_LedgerHeaderExtensionV1_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_LedgerHeaderExtensionV1_Ext = *XdrAnon_LedgerHeaderExtensionV1_Ext + +func (v *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerHeaderExtensionV1_Ext) XdrTypeName() string { + return "XdrAnon_LedgerHeaderExtensionV1_Ext" +} +func (v XdrAnon_LedgerHeaderExtensionV1_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerHeaderExtensionV1_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_LedgerHeaderExtensionV1_Ext", u.V) +} +func XDR_XdrAnon_LedgerHeaderExtensionV1_Ext(v *XdrAnon_LedgerHeaderExtensionV1_Ext) *XdrAnon_LedgerHeaderExtensionV1_Ext { + return v +} + +type XdrType_LedgerHeaderExtensionV1 = *LedgerHeaderExtensionV1 + +func (v *LedgerHeaderExtensionV1) XdrPointer() interface{} { return v } +func (LedgerHeaderExtensionV1) XdrTypeName() string { return "LedgerHeaderExtensionV1" } +func (v LedgerHeaderExtensionV1) XdrValue() interface{} { return v } +func (v *LedgerHeaderExtensionV1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerHeaderExtensionV1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sflags", name), XDR_Uint32(&v.Flags)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_LedgerHeaderExtensionV1_Ext(&v.Ext)) +} +func XDR_LedgerHeaderExtensionV1(v *LedgerHeaderExtensionV1) *LedgerHeaderExtensionV1 { return v } + +var _XdrTags_XdrAnon_LedgerHeader_Ext = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, +} + +func (_ XdrAnon_LedgerHeader_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerHeader_Ext +} +func (u *XdrAnon_LedgerHeader_Ext) V1() *LedgerHeaderExtensionV1 { + switch u.V { + case 1: + if v, ok := u._u.(*LedgerHeaderExtensionV1); ok { + return v + } else { + var zero LedgerHeaderExtensionV1 + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_LedgerHeader_Ext.V1 accessed when V == %v", u.V) + return nil + } +} +func (u XdrAnon_LedgerHeader_Ext) XdrValid() bool { + switch u.V { + case 0, 1: + return true + } + return false +} +func (u *XdrAnon_LedgerHeader_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_LedgerHeader_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_LedgerHeader_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + case 1: + return XDR_LedgerHeaderExtensionV1(u.V1()) + } + return nil +} +func (u *XdrAnon_LedgerHeader_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + case 1: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_LedgerHeader_Ext = *XdrAnon_LedgerHeader_Ext + +func (v *XdrAnon_LedgerHeader_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerHeader_Ext) XdrTypeName() string { return "XdrAnon_LedgerHeader_Ext" } +func (v XdrAnon_LedgerHeader_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerHeader_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerHeader_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_LedgerHeaderExtensionV1(u.V1())) + return + } + XdrPanic("invalid V (%v) in XdrAnon_LedgerHeader_Ext", u.V) +} +func XDR_XdrAnon_LedgerHeader_Ext(v *XdrAnon_LedgerHeader_Ext) *XdrAnon_LedgerHeader_Ext { return v } + +type _XdrArray_4_Hash [4]Hash + +func (_XdrArray_4_Hash) XdrArraySize() uint32 { + const bound uint32 = 4 // Force error if not const or doesn't fit + return bound +} +func (v *_XdrArray_4_Hash) XdrRecurse(x XDR, name string) { + for i := 0; i < len(*v); i++ { + XDR_Hash(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } +} +func (v *_XdrArray_4_Hash) XdrPointer() interface{} { return (*[4]Hash)(v) } +func (_XdrArray_4_Hash) XdrTypeName() string { return "Hash[]" } +func (v *_XdrArray_4_Hash) XdrValue() interface{} { return v[:] } +func (v *_XdrArray_4_Hash) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerHeader = *LedgerHeader + +func (v *LedgerHeader) XdrPointer() interface{} { return v } +func (LedgerHeader) XdrTypeName() string { return "LedgerHeader" } +func (v LedgerHeader) XdrValue() interface{} { return v } +func (v *LedgerHeader) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerHeader) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerVersion", name), XDR_Uint32(&v.LedgerVersion)) + x.Marshal(x.Sprintf("%spreviousLedgerHash", name), XDR_Hash(&v.PreviousLedgerHash)) + x.Marshal(x.Sprintf("%sscpValue", name), XDR_StellarValue(&v.ScpValue)) + x.Marshal(x.Sprintf("%stxSetResultHash", name), XDR_Hash(&v.TxSetResultHash)) + x.Marshal(x.Sprintf("%sbucketListHash", name), XDR_Hash(&v.BucketListHash)) + x.Marshal(x.Sprintf("%sledgerSeq", name), XDR_Uint32(&v.LedgerSeq)) + x.Marshal(x.Sprintf("%stotalCoins", name), XDR_Int64(&v.TotalCoins)) + x.Marshal(x.Sprintf("%sfeePool", name), XDR_Int64(&v.FeePool)) + x.Marshal(x.Sprintf("%sinflationSeq", name), XDR_Uint32(&v.InflationSeq)) + x.Marshal(x.Sprintf("%sidPool", name), XDR_Uint64(&v.IdPool)) + x.Marshal(x.Sprintf("%sbaseFee", name), XDR_Uint32(&v.BaseFee)) + x.Marshal(x.Sprintf("%sbaseReserve", name), XDR_Uint32(&v.BaseReserve)) + x.Marshal(x.Sprintf("%smaxTxSetSize", name), XDR_Uint32(&v.MaxTxSetSize)) + x.Marshal(x.Sprintf("%sskipList", name), (*_XdrArray_4_Hash)(&v.SkipList)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_LedgerHeader_Ext(&v.Ext)) +} +func XDR_LedgerHeader(v *LedgerHeader) *LedgerHeader { return v } + +var _XdrNames_LedgerUpgradeType = map[int32]string{ + int32(LEDGER_UPGRADE_VERSION): "LEDGER_UPGRADE_VERSION", + int32(LEDGER_UPGRADE_BASE_FEE): "LEDGER_UPGRADE_BASE_FEE", + int32(LEDGER_UPGRADE_MAX_TX_SET_SIZE): "LEDGER_UPGRADE_MAX_TX_SET_SIZE", + int32(LEDGER_UPGRADE_BASE_RESERVE): "LEDGER_UPGRADE_BASE_RESERVE", + int32(LEDGER_UPGRADE_FLAGS): "LEDGER_UPGRADE_FLAGS", +} +var _XdrValues_LedgerUpgradeType = map[string]int32{ + "LEDGER_UPGRADE_VERSION": int32(LEDGER_UPGRADE_VERSION), + "LEDGER_UPGRADE_BASE_FEE": int32(LEDGER_UPGRADE_BASE_FEE), + "LEDGER_UPGRADE_MAX_TX_SET_SIZE": int32(LEDGER_UPGRADE_MAX_TX_SET_SIZE), + "LEDGER_UPGRADE_BASE_RESERVE": int32(LEDGER_UPGRADE_BASE_RESERVE), + "LEDGER_UPGRADE_FLAGS": int32(LEDGER_UPGRADE_FLAGS), +} + +func (LedgerUpgradeType) XdrEnumNames() map[int32]string { + return _XdrNames_LedgerUpgradeType +} +func (v LedgerUpgradeType) String() string { + if s, ok := _XdrNames_LedgerUpgradeType[int32(v)]; ok { + return s + } + return fmt.Sprintf("LedgerUpgradeType#%d", v) +} +func (v *LedgerUpgradeType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LedgerUpgradeType[stok]; ok { + *v = LedgerUpgradeType(val) + return nil + } else if stok == "LedgerUpgradeType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LedgerUpgradeType.", stok)) + } +} +func (v LedgerUpgradeType) GetU32() uint32 { return uint32(v) } +func (v *LedgerUpgradeType) SetU32(n uint32) { *v = LedgerUpgradeType(n) } +func (v *LedgerUpgradeType) XdrPointer() interface{} { return v } +func (LedgerUpgradeType) XdrTypeName() string { return "LedgerUpgradeType" } +func (v LedgerUpgradeType) XdrValue() interface{} { return v } +func (v *LedgerUpgradeType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerUpgradeType = *LedgerUpgradeType + +func XDR_LedgerUpgradeType(v *LedgerUpgradeType) *LedgerUpgradeType { return v } +func (v *LedgerUpgradeType) XdrInitialize() { + switch LedgerUpgradeType(0) { + case LEDGER_UPGRADE_VERSION, LEDGER_UPGRADE_BASE_FEE, LEDGER_UPGRADE_MAX_TX_SET_SIZE, LEDGER_UPGRADE_BASE_RESERVE, LEDGER_UPGRADE_FLAGS: + default: + if *v == LedgerUpgradeType(0) { + *v = LEDGER_UPGRADE_VERSION + } + } +} + +var _XdrTags_LedgerUpgrade = map[int32]bool{ + XdrToI32(LEDGER_UPGRADE_VERSION): true, + XdrToI32(LEDGER_UPGRADE_BASE_FEE): true, + XdrToI32(LEDGER_UPGRADE_MAX_TX_SET_SIZE): true, + XdrToI32(LEDGER_UPGRADE_BASE_RESERVE): true, + XdrToI32(LEDGER_UPGRADE_FLAGS): true, +} + +func (_ LedgerUpgrade) XdrValidTags() map[int32]bool { + return _XdrTags_LedgerUpgrade +} + +// update ledgerVersion +func (u *LedgerUpgrade) NewLedgerVersion() *Uint32 { + switch u.Type { + case LEDGER_UPGRADE_VERSION: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerUpgrade.NewLedgerVersion accessed when Type == %v", u.Type) + return nil + } +} + +// update baseFee +func (u *LedgerUpgrade) NewBaseFee() *Uint32 { + switch u.Type { + case LEDGER_UPGRADE_BASE_FEE: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerUpgrade.NewBaseFee accessed when Type == %v", u.Type) + return nil + } +} + +// update maxTxSetSize +func (u *LedgerUpgrade) NewMaxTxSetSize() *Uint32 { + switch u.Type { + case LEDGER_UPGRADE_MAX_TX_SET_SIZE: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerUpgrade.NewMaxTxSetSize accessed when Type == %v", u.Type) + return nil + } +} + +// update baseReserve +func (u *LedgerUpgrade) NewBaseReserve() *Uint32 { + switch u.Type { + case LEDGER_UPGRADE_BASE_RESERVE: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerUpgrade.NewBaseReserve accessed when Type == %v", u.Type) + return nil + } +} + +// update flags +func (u *LedgerUpgrade) NewFlags() *Uint32 { + switch u.Type { + case LEDGER_UPGRADE_FLAGS: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerUpgrade.NewFlags accessed when Type == %v", u.Type) + return nil + } +} +func (u LedgerUpgrade) XdrValid() bool { + switch u.Type { + case LEDGER_UPGRADE_VERSION, LEDGER_UPGRADE_BASE_FEE, LEDGER_UPGRADE_MAX_TX_SET_SIZE, LEDGER_UPGRADE_BASE_RESERVE, LEDGER_UPGRADE_FLAGS: + return true + } + return false +} +func (u *LedgerUpgrade) XdrUnionTag() XdrNum32 { + return XDR_LedgerUpgradeType(&u.Type) +} +func (u *LedgerUpgrade) XdrUnionTagName() string { + return "Type" +} +func (u *LedgerUpgrade) XdrUnionBody() XdrType { + switch u.Type { + case LEDGER_UPGRADE_VERSION: + return XDR_Uint32(u.NewLedgerVersion()) + case LEDGER_UPGRADE_BASE_FEE: + return XDR_Uint32(u.NewBaseFee()) + case LEDGER_UPGRADE_MAX_TX_SET_SIZE: + return XDR_Uint32(u.NewMaxTxSetSize()) + case LEDGER_UPGRADE_BASE_RESERVE: + return XDR_Uint32(u.NewBaseReserve()) + case LEDGER_UPGRADE_FLAGS: + return XDR_Uint32(u.NewFlags()) + } + return nil +} +func (u *LedgerUpgrade) XdrUnionBodyName() string { + switch u.Type { + case LEDGER_UPGRADE_VERSION: + return "NewLedgerVersion" + case LEDGER_UPGRADE_BASE_FEE: + return "NewBaseFee" + case LEDGER_UPGRADE_MAX_TX_SET_SIZE: + return "NewMaxTxSetSize" + case LEDGER_UPGRADE_BASE_RESERVE: + return "NewBaseReserve" + case LEDGER_UPGRADE_FLAGS: + return "NewFlags" + } + return "" +} + +type XdrType_LedgerUpgrade = *LedgerUpgrade + +func (v *LedgerUpgrade) XdrPointer() interface{} { return v } +func (LedgerUpgrade) XdrTypeName() string { return "LedgerUpgrade" } +func (v LedgerUpgrade) XdrValue() interface{} { return v } +func (v *LedgerUpgrade) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LedgerUpgrade) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LedgerUpgradeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case LEDGER_UPGRADE_VERSION: + x.Marshal(x.Sprintf("%snewLedgerVersion", name), XDR_Uint32(u.NewLedgerVersion())) + return + case LEDGER_UPGRADE_BASE_FEE: + x.Marshal(x.Sprintf("%snewBaseFee", name), XDR_Uint32(u.NewBaseFee())) + return + case LEDGER_UPGRADE_MAX_TX_SET_SIZE: + x.Marshal(x.Sprintf("%snewMaxTxSetSize", name), XDR_Uint32(u.NewMaxTxSetSize())) + return + case LEDGER_UPGRADE_BASE_RESERVE: + x.Marshal(x.Sprintf("%snewBaseReserve", name), XDR_Uint32(u.NewBaseReserve())) + return + case LEDGER_UPGRADE_FLAGS: + x.Marshal(x.Sprintf("%snewFlags", name), XDR_Uint32(u.NewFlags())) + return + } + XdrPanic("invalid Type (%v) in LedgerUpgrade", u.Type) +} +func (v *LedgerUpgrade) XdrInitialize() { + var zero LedgerUpgradeType + switch zero { + case LEDGER_UPGRADE_VERSION, LEDGER_UPGRADE_BASE_FEE, LEDGER_UPGRADE_MAX_TX_SET_SIZE, LEDGER_UPGRADE_BASE_RESERVE, LEDGER_UPGRADE_FLAGS: + default: + if v.Type == zero { + v.Type = LEDGER_UPGRADE_VERSION + } + } +} +func XDR_LedgerUpgrade(v *LedgerUpgrade) *LedgerUpgrade { return v } + +var _XdrNames_BucketEntryType = map[int32]string{ + int32(METAENTRY): "METAENTRY", + int32(LIVEENTRY): "LIVEENTRY", + int32(DEADENTRY): "DEADENTRY", + int32(INITENTRY): "INITENTRY", +} +var _XdrValues_BucketEntryType = map[string]int32{ + "METAENTRY": int32(METAENTRY), + "LIVEENTRY": int32(LIVEENTRY), + "DEADENTRY": int32(DEADENTRY), + "INITENTRY": int32(INITENTRY), +} + +func (BucketEntryType) XdrEnumNames() map[int32]string { + return _XdrNames_BucketEntryType +} +func (v BucketEntryType) String() string { + if s, ok := _XdrNames_BucketEntryType[int32(v)]; ok { + return s + } + return fmt.Sprintf("BucketEntryType#%d", v) +} +func (v *BucketEntryType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_BucketEntryType[stok]; ok { + *v = BucketEntryType(val) + return nil + } else if stok == "BucketEntryType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid BucketEntryType.", stok)) + } +} +func (v BucketEntryType) GetU32() uint32 { return uint32(v) } +func (v *BucketEntryType) SetU32(n uint32) { *v = BucketEntryType(n) } +func (v *BucketEntryType) XdrPointer() interface{} { return v } +func (BucketEntryType) XdrTypeName() string { return "BucketEntryType" } +func (v BucketEntryType) XdrValue() interface{} { return v } +func (v *BucketEntryType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_BucketEntryType = *BucketEntryType + +func XDR_BucketEntryType(v *BucketEntryType) *BucketEntryType { return v } + +var _XdrComments_BucketEntryType = map[int32]string{ + int32(METAENTRY): "At-and-after protocol 11: bucket metadata, should come first.", + int32(LIVEENTRY): "Before protocol 11: created-or-updated;", + int32(DEADENTRY): "At-and-after protocol 11: only updated.", + int32(INITENTRY): "At-and-after protocol 11: only created.", +} + +func (e BucketEntryType) XdrEnumComments() map[int32]string { + return _XdrComments_BucketEntryType +} + +var _XdrTags_XdrAnon_BucketMetadata_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_BucketMetadata_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_BucketMetadata_Ext +} +func (u XdrAnon_BucketMetadata_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_BucketMetadata_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_BucketMetadata_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_BucketMetadata_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_BucketMetadata_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_BucketMetadata_Ext = *XdrAnon_BucketMetadata_Ext + +func (v *XdrAnon_BucketMetadata_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_BucketMetadata_Ext) XdrTypeName() string { return "XdrAnon_BucketMetadata_Ext" } +func (v XdrAnon_BucketMetadata_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_BucketMetadata_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_BucketMetadata_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_BucketMetadata_Ext", u.V) +} +func XDR_XdrAnon_BucketMetadata_Ext(v *XdrAnon_BucketMetadata_Ext) *XdrAnon_BucketMetadata_Ext { + return v +} + +type XdrType_BucketMetadata = *BucketMetadata + +func (v *BucketMetadata) XdrPointer() interface{} { return v } +func (BucketMetadata) XdrTypeName() string { return "BucketMetadata" } +func (v BucketMetadata) XdrValue() interface{} { return v } +func (v *BucketMetadata) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *BucketMetadata) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerVersion", name), XDR_Uint32(&v.LedgerVersion)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_BucketMetadata_Ext(&v.Ext)) +} +func XDR_BucketMetadata(v *BucketMetadata) *BucketMetadata { return v } + +var _XdrTags_BucketEntry = map[int32]bool{ + XdrToI32(LIVEENTRY): true, + XdrToI32(INITENTRY): true, + XdrToI32(DEADENTRY): true, + XdrToI32(METAENTRY): true, +} + +func (_ BucketEntry) XdrValidTags() map[int32]bool { + return _XdrTags_BucketEntry +} +func (u *BucketEntry) LiveEntry() *LedgerEntry { + switch u.Type { + case LIVEENTRY, INITENTRY: + if v, ok := u._u.(*LedgerEntry); ok { + return v + } else { + var zero LedgerEntry + u._u = &zero + return &zero + } + default: + XdrPanic("BucketEntry.LiveEntry accessed when Type == %v", u.Type) + return nil + } +} +func (u *BucketEntry) DeadEntry() *LedgerKey { + switch u.Type { + case DEADENTRY: + if v, ok := u._u.(*LedgerKey); ok { + return v + } else { + var zero LedgerKey + u._u = &zero + return &zero + } + default: + XdrPanic("BucketEntry.DeadEntry accessed when Type == %v", u.Type) + return nil + } +} +func (u *BucketEntry) MetaEntry() *BucketMetadata { + switch u.Type { + case METAENTRY: + if v, ok := u._u.(*BucketMetadata); ok { + return v + } else { + var zero BucketMetadata + u._u = &zero + return &zero + } + default: + XdrPanic("BucketEntry.MetaEntry accessed when Type == %v", u.Type) + return nil + } +} +func (u BucketEntry) XdrValid() bool { + switch u.Type { + case LIVEENTRY, INITENTRY, DEADENTRY, METAENTRY: + return true + } + return false +} +func (u *BucketEntry) XdrUnionTag() XdrNum32 { + return XDR_BucketEntryType(&u.Type) +} +func (u *BucketEntry) XdrUnionTagName() string { + return "Type" +} +func (u *BucketEntry) XdrUnionBody() XdrType { + switch u.Type { + case LIVEENTRY, INITENTRY: + return XDR_LedgerEntry(u.LiveEntry()) + case DEADENTRY: + return XDR_LedgerKey(u.DeadEntry()) + case METAENTRY: + return XDR_BucketMetadata(u.MetaEntry()) + } + return nil +} +func (u *BucketEntry) XdrUnionBodyName() string { + switch u.Type { + case LIVEENTRY, INITENTRY: + return "LiveEntry" + case DEADENTRY: + return "DeadEntry" + case METAENTRY: + return "MetaEntry" + } + return "" +} + +type XdrType_BucketEntry = *BucketEntry + +func (v *BucketEntry) XdrPointer() interface{} { return v } +func (BucketEntry) XdrTypeName() string { return "BucketEntry" } +func (v BucketEntry) XdrValue() interface{} { return v } +func (v *BucketEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *BucketEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_BucketEntryType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case LIVEENTRY, INITENTRY: + x.Marshal(x.Sprintf("%sliveEntry", name), XDR_LedgerEntry(u.LiveEntry())) + return + case DEADENTRY: + x.Marshal(x.Sprintf("%sdeadEntry", name), XDR_LedgerKey(u.DeadEntry())) + return + case METAENTRY: + x.Marshal(x.Sprintf("%smetaEntry", name), XDR_BucketMetadata(u.MetaEntry())) + return + } + XdrPanic("invalid Type (%v) in BucketEntry", u.Type) +} +func XDR_BucketEntry(v *BucketEntry) *BucketEntry { return v } + +type _XdrVec_unbounded_TransactionEnvelope []TransactionEnvelope + +func (_XdrVec_unbounded_TransactionEnvelope) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_TransactionEnvelope) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_TransactionEnvelope length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_TransactionEnvelope length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_TransactionEnvelope) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_TransactionEnvelope) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]TransactionEnvelope, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_TransactionEnvelope) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_TransactionEnvelope(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_TransactionEnvelope) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_TransactionEnvelope) XdrTypeName() string { return "TransactionEnvelope<>" } +func (v *_XdrVec_unbounded_TransactionEnvelope) XdrPointer() interface{} { + return (*[]TransactionEnvelope)(v) +} +func (v _XdrVec_unbounded_TransactionEnvelope) XdrValue() interface{} { + return ([]TransactionEnvelope)(v) +} +func (v *_XdrVec_unbounded_TransactionEnvelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionSet = *TransactionSet + +func (v *TransactionSet) XdrPointer() interface{} { return v } +func (TransactionSet) XdrTypeName() string { return "TransactionSet" } +func (v TransactionSet) XdrValue() interface{} { return v } +func (v *TransactionSet) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionSet) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%spreviousLedgerHash", name), XDR_Hash(&v.PreviousLedgerHash)) + x.Marshal(x.Sprintf("%stxs", name), (*_XdrVec_unbounded_TransactionEnvelope)(&v.Txs)) +} +func XDR_TransactionSet(v *TransactionSet) *TransactionSet { return v } + +type XdrType_TransactionResultPair = *TransactionResultPair + +func (v *TransactionResultPair) XdrPointer() interface{} { return v } +func (TransactionResultPair) XdrTypeName() string { return "TransactionResultPair" } +func (v TransactionResultPair) XdrValue() interface{} { return v } +func (v *TransactionResultPair) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionResultPair) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stransactionHash", name), XDR_Hash(&v.TransactionHash)) + x.Marshal(x.Sprintf("%sresult", name), XDR_TransactionResult(&v.Result)) +} +func XDR_TransactionResultPair(v *TransactionResultPair) *TransactionResultPair { return v } + +type _XdrVec_unbounded_TransactionResultPair []TransactionResultPair + +func (_XdrVec_unbounded_TransactionResultPair) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_TransactionResultPair) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_TransactionResultPair length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_TransactionResultPair length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_TransactionResultPair) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_TransactionResultPair) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]TransactionResultPair, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_TransactionResultPair) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_TransactionResultPair(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_TransactionResultPair) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_TransactionResultPair) XdrTypeName() string { return "TransactionResultPair<>" } +func (v *_XdrVec_unbounded_TransactionResultPair) XdrPointer() interface{} { + return (*[]TransactionResultPair)(v) +} +func (v _XdrVec_unbounded_TransactionResultPair) XdrValue() interface{} { + return ([]TransactionResultPair)(v) +} +func (v *_XdrVec_unbounded_TransactionResultPair) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionResultSet = *TransactionResultSet + +func (v *TransactionResultSet) XdrPointer() interface{} { return v } +func (TransactionResultSet) XdrTypeName() string { return "TransactionResultSet" } +func (v TransactionResultSet) XdrValue() interface{} { return v } +func (v *TransactionResultSet) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionResultSet) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sresults", name), (*_XdrVec_unbounded_TransactionResultPair)(&v.Results)) +} +func XDR_TransactionResultSet(v *TransactionResultSet) *TransactionResultSet { return v } + +var _XdrTags_XdrAnon_TransactionHistoryEntry_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_TransactionHistoryEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TransactionHistoryEntry_Ext +} +func (u XdrAnon_TransactionHistoryEntry_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_TransactionHistoryEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TransactionHistoryEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TransactionHistoryEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_TransactionHistoryEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_TransactionHistoryEntry_Ext = *XdrAnon_TransactionHistoryEntry_Ext + +func (v *XdrAnon_TransactionHistoryEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionHistoryEntry_Ext) XdrTypeName() string { + return "XdrAnon_TransactionHistoryEntry_Ext" +} +func (v XdrAnon_TransactionHistoryEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionHistoryEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TransactionHistoryEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_TransactionHistoryEntry_Ext", u.V) +} +func XDR_XdrAnon_TransactionHistoryEntry_Ext(v *XdrAnon_TransactionHistoryEntry_Ext) *XdrAnon_TransactionHistoryEntry_Ext { + return v +} + +type XdrType_TransactionHistoryEntry = *TransactionHistoryEntry + +func (v *TransactionHistoryEntry) XdrPointer() interface{} { return v } +func (TransactionHistoryEntry) XdrTypeName() string { return "TransactionHistoryEntry" } +func (v TransactionHistoryEntry) XdrValue() interface{} { return v } +func (v *TransactionHistoryEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionHistoryEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerSeq", name), XDR_Uint32(&v.LedgerSeq)) + x.Marshal(x.Sprintf("%stxSet", name), XDR_TransactionSet(&v.TxSet)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TransactionHistoryEntry_Ext(&v.Ext)) +} +func XDR_TransactionHistoryEntry(v *TransactionHistoryEntry) *TransactionHistoryEntry { return v } + +var _XdrTags_XdrAnon_TransactionHistoryResultEntry_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_TransactionHistoryResultEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TransactionHistoryResultEntry_Ext +} +func (u XdrAnon_TransactionHistoryResultEntry_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_TransactionHistoryResultEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TransactionHistoryResultEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TransactionHistoryResultEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_TransactionHistoryResultEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_TransactionHistoryResultEntry_Ext = *XdrAnon_TransactionHistoryResultEntry_Ext + +func (v *XdrAnon_TransactionHistoryResultEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionHistoryResultEntry_Ext) XdrTypeName() string { + return "XdrAnon_TransactionHistoryResultEntry_Ext" +} +func (v XdrAnon_TransactionHistoryResultEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionHistoryResultEntry_Ext) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (u *XdrAnon_TransactionHistoryResultEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_TransactionHistoryResultEntry_Ext", u.V) +} +func XDR_XdrAnon_TransactionHistoryResultEntry_Ext(v *XdrAnon_TransactionHistoryResultEntry_Ext) *XdrAnon_TransactionHistoryResultEntry_Ext { + return v +} + +type XdrType_TransactionHistoryResultEntry = *TransactionHistoryResultEntry + +func (v *TransactionHistoryResultEntry) XdrPointer() interface{} { return v } +func (TransactionHistoryResultEntry) XdrTypeName() string { return "TransactionHistoryResultEntry" } +func (v TransactionHistoryResultEntry) XdrValue() interface{} { return v } +func (v *TransactionHistoryResultEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionHistoryResultEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerSeq", name), XDR_Uint32(&v.LedgerSeq)) + x.Marshal(x.Sprintf("%stxResultSet", name), XDR_TransactionResultSet(&v.TxResultSet)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TransactionHistoryResultEntry_Ext(&v.Ext)) +} +func XDR_TransactionHistoryResultEntry(v *TransactionHistoryResultEntry) *TransactionHistoryResultEntry { + return v +} + +var _XdrTags_XdrAnon_LedgerHeaderHistoryEntry_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_LedgerHeaderHistoryEntry_Ext +} +func (u XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_LedgerHeaderHistoryEntry_Ext = *XdrAnon_LedgerHeaderHistoryEntry_Ext + +func (v *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrTypeName() string { + return "XdrAnon_LedgerHeaderHistoryEntry_Ext" +} +func (v XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_LedgerHeaderHistoryEntry_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_LedgerHeaderHistoryEntry_Ext", u.V) +} +func XDR_XdrAnon_LedgerHeaderHistoryEntry_Ext(v *XdrAnon_LedgerHeaderHistoryEntry_Ext) *XdrAnon_LedgerHeaderHistoryEntry_Ext { + return v +} + +type XdrType_LedgerHeaderHistoryEntry = *LedgerHeaderHistoryEntry + +func (v *LedgerHeaderHistoryEntry) XdrPointer() interface{} { return v } +func (LedgerHeaderHistoryEntry) XdrTypeName() string { return "LedgerHeaderHistoryEntry" } +func (v LedgerHeaderHistoryEntry) XdrValue() interface{} { return v } +func (v *LedgerHeaderHistoryEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerHeaderHistoryEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%shash", name), XDR_Hash(&v.Hash)) + x.Marshal(x.Sprintf("%sheader", name), XDR_LedgerHeader(&v.Header)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_LedgerHeaderHistoryEntry_Ext(&v.Ext)) +} +func XDR_LedgerHeaderHistoryEntry(v *LedgerHeaderHistoryEntry) *LedgerHeaderHistoryEntry { return v } + +type _XdrVec_unbounded_SCPEnvelope []SCPEnvelope + +func (_XdrVec_unbounded_SCPEnvelope) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_SCPEnvelope) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_SCPEnvelope length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_SCPEnvelope length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_SCPEnvelope) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_SCPEnvelope) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]SCPEnvelope, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_SCPEnvelope) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_SCPEnvelope(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_SCPEnvelope) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_SCPEnvelope) XdrTypeName() string { return "SCPEnvelope<>" } +func (v *_XdrVec_unbounded_SCPEnvelope) XdrPointer() interface{} { return (*[]SCPEnvelope)(v) } +func (v _XdrVec_unbounded_SCPEnvelope) XdrValue() interface{} { return ([]SCPEnvelope)(v) } +func (v *_XdrVec_unbounded_SCPEnvelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerSCPMessages = *LedgerSCPMessages + +func (v *LedgerSCPMessages) XdrPointer() interface{} { return v } +func (LedgerSCPMessages) XdrTypeName() string { return "LedgerSCPMessages" } +func (v LedgerSCPMessages) XdrValue() interface{} { return v } +func (v *LedgerSCPMessages) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerSCPMessages) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerSeq", name), XDR_Uint32(&v.LedgerSeq)) + x.Marshal(x.Sprintf("%smessages", name), (*_XdrVec_unbounded_SCPEnvelope)(&v.Messages)) +} +func XDR_LedgerSCPMessages(v *LedgerSCPMessages) *LedgerSCPMessages { return v } + +type XdrType_SCPHistoryEntryV0 = *SCPHistoryEntryV0 + +func (v *SCPHistoryEntryV0) XdrPointer() interface{} { return v } +func (SCPHistoryEntryV0) XdrTypeName() string { return "SCPHistoryEntryV0" } +func (v SCPHistoryEntryV0) XdrValue() interface{} { return v } +func (v *SCPHistoryEntryV0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SCPHistoryEntryV0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%squorumSets", name), (*_XdrVec_unbounded_SCPQuorumSet)(&v.QuorumSets)) + x.Marshal(x.Sprintf("%sledgerMessages", name), XDR_LedgerSCPMessages(&v.LedgerMessages)) +} +func XDR_SCPHistoryEntryV0(v *SCPHistoryEntryV0) *SCPHistoryEntryV0 { return v } + +var _XdrTags_SCPHistoryEntry = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ SCPHistoryEntry) XdrValidTags() map[int32]bool { + return _XdrTags_SCPHistoryEntry +} +func (u *SCPHistoryEntry) V0() *SCPHistoryEntryV0 { + switch u.V { + case 0: + if v, ok := u._u.(*SCPHistoryEntryV0); ok { + return v + } else { + var zero SCPHistoryEntryV0 + u._u = &zero + return &zero + } + default: + XdrPanic("SCPHistoryEntry.V0 accessed when V == %v", u.V) + return nil + } +} +func (u SCPHistoryEntry) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *SCPHistoryEntry) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *SCPHistoryEntry) XdrUnionTagName() string { + return "V" +} +func (u *SCPHistoryEntry) XdrUnionBody() XdrType { + switch u.V { + case 0: + return XDR_SCPHistoryEntryV0(u.V0()) + } + return nil +} +func (u *SCPHistoryEntry) XdrUnionBodyName() string { + switch u.V { + case 0: + return "V0" + } + return "" +} + +type XdrType_SCPHistoryEntry = *SCPHistoryEntry + +func (v *SCPHistoryEntry) XdrPointer() interface{} { return v } +func (SCPHistoryEntry) XdrTypeName() string { return "SCPHistoryEntry" } +func (v SCPHistoryEntry) XdrValue() interface{} { return v } +func (v *SCPHistoryEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *SCPHistoryEntry) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + x.Marshal(x.Sprintf("%sv0", name), XDR_SCPHistoryEntryV0(u.V0())) + return + } + XdrPanic("invalid V (%v) in SCPHistoryEntry", u.V) +} +func XDR_SCPHistoryEntry(v *SCPHistoryEntry) *SCPHistoryEntry { return v } + +var _XdrNames_LedgerEntryChangeType = map[int32]string{ + int32(LEDGER_ENTRY_CREATED): "LEDGER_ENTRY_CREATED", + int32(LEDGER_ENTRY_UPDATED): "LEDGER_ENTRY_UPDATED", + int32(LEDGER_ENTRY_REMOVED): "LEDGER_ENTRY_REMOVED", + int32(LEDGER_ENTRY_STATE): "LEDGER_ENTRY_STATE", +} +var _XdrValues_LedgerEntryChangeType = map[string]int32{ + "LEDGER_ENTRY_CREATED": int32(LEDGER_ENTRY_CREATED), + "LEDGER_ENTRY_UPDATED": int32(LEDGER_ENTRY_UPDATED), + "LEDGER_ENTRY_REMOVED": int32(LEDGER_ENTRY_REMOVED), + "LEDGER_ENTRY_STATE": int32(LEDGER_ENTRY_STATE), +} + +func (LedgerEntryChangeType) XdrEnumNames() map[int32]string { + return _XdrNames_LedgerEntryChangeType +} +func (v LedgerEntryChangeType) String() string { + if s, ok := _XdrNames_LedgerEntryChangeType[int32(v)]; ok { + return s + } + return fmt.Sprintf("LedgerEntryChangeType#%d", v) +} +func (v *LedgerEntryChangeType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LedgerEntryChangeType[stok]; ok { + *v = LedgerEntryChangeType(val) + return nil + } else if stok == "LedgerEntryChangeType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LedgerEntryChangeType.", stok)) + } +} +func (v LedgerEntryChangeType) GetU32() uint32 { return uint32(v) } +func (v *LedgerEntryChangeType) SetU32(n uint32) { *v = LedgerEntryChangeType(n) } +func (v *LedgerEntryChangeType) XdrPointer() interface{} { return v } +func (LedgerEntryChangeType) XdrTypeName() string { return "LedgerEntryChangeType" } +func (v LedgerEntryChangeType) XdrValue() interface{} { return v } +func (v *LedgerEntryChangeType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerEntryChangeType = *LedgerEntryChangeType + +func XDR_LedgerEntryChangeType(v *LedgerEntryChangeType) *LedgerEntryChangeType { return v } + +var _XdrComments_LedgerEntryChangeType = map[int32]string{ + int32(LEDGER_ENTRY_CREATED): "entry was added to the ledger", + int32(LEDGER_ENTRY_UPDATED): "entry was modified in the ledger", + int32(LEDGER_ENTRY_REMOVED): "entry was removed from the ledger", + int32(LEDGER_ENTRY_STATE): "value of the entry", +} + +func (e LedgerEntryChangeType) XdrEnumComments() map[int32]string { + return _XdrComments_LedgerEntryChangeType +} + +var _XdrTags_LedgerEntryChange = map[int32]bool{ + XdrToI32(LEDGER_ENTRY_CREATED): true, + XdrToI32(LEDGER_ENTRY_UPDATED): true, + XdrToI32(LEDGER_ENTRY_REMOVED): true, + XdrToI32(LEDGER_ENTRY_STATE): true, +} + +func (_ LedgerEntryChange) XdrValidTags() map[int32]bool { + return _XdrTags_LedgerEntryChange +} +func (u *LedgerEntryChange) Created() *LedgerEntry { + switch u.Type { + case LEDGER_ENTRY_CREATED: + if v, ok := u._u.(*LedgerEntry); ok { + return v + } else { + var zero LedgerEntry + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerEntryChange.Created accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerEntryChange) Updated() *LedgerEntry { + switch u.Type { + case LEDGER_ENTRY_UPDATED: + if v, ok := u._u.(*LedgerEntry); ok { + return v + } else { + var zero LedgerEntry + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerEntryChange.Updated accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerEntryChange) Removed() *LedgerKey { + switch u.Type { + case LEDGER_ENTRY_REMOVED: + if v, ok := u._u.(*LedgerKey); ok { + return v + } else { + var zero LedgerKey + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerEntryChange.Removed accessed when Type == %v", u.Type) + return nil + } +} +func (u *LedgerEntryChange) State() *LedgerEntry { + switch u.Type { + case LEDGER_ENTRY_STATE: + if v, ok := u._u.(*LedgerEntry); ok { + return v + } else { + var zero LedgerEntry + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerEntryChange.State accessed when Type == %v", u.Type) + return nil + } +} +func (u LedgerEntryChange) XdrValid() bool { + switch u.Type { + case LEDGER_ENTRY_CREATED, LEDGER_ENTRY_UPDATED, LEDGER_ENTRY_REMOVED, LEDGER_ENTRY_STATE: + return true + } + return false +} +func (u *LedgerEntryChange) XdrUnionTag() XdrNum32 { + return XDR_LedgerEntryChangeType(&u.Type) +} +func (u *LedgerEntryChange) XdrUnionTagName() string { + return "Type" +} +func (u *LedgerEntryChange) XdrUnionBody() XdrType { + switch u.Type { + case LEDGER_ENTRY_CREATED: + return XDR_LedgerEntry(u.Created()) + case LEDGER_ENTRY_UPDATED: + return XDR_LedgerEntry(u.Updated()) + case LEDGER_ENTRY_REMOVED: + return XDR_LedgerKey(u.Removed()) + case LEDGER_ENTRY_STATE: + return XDR_LedgerEntry(u.State()) + } + return nil +} +func (u *LedgerEntryChange) XdrUnionBodyName() string { + switch u.Type { + case LEDGER_ENTRY_CREATED: + return "Created" + case LEDGER_ENTRY_UPDATED: + return "Updated" + case LEDGER_ENTRY_REMOVED: + return "Removed" + case LEDGER_ENTRY_STATE: + return "State" + } + return "" +} + +type XdrType_LedgerEntryChange = *LedgerEntryChange + +func (v *LedgerEntryChange) XdrPointer() interface{} { return v } +func (LedgerEntryChange) XdrTypeName() string { return "LedgerEntryChange" } +func (v LedgerEntryChange) XdrValue() interface{} { return v } +func (v *LedgerEntryChange) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LedgerEntryChange) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LedgerEntryChangeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case LEDGER_ENTRY_CREATED: + x.Marshal(x.Sprintf("%screated", name), XDR_LedgerEntry(u.Created())) + return + case LEDGER_ENTRY_UPDATED: + x.Marshal(x.Sprintf("%supdated", name), XDR_LedgerEntry(u.Updated())) + return + case LEDGER_ENTRY_REMOVED: + x.Marshal(x.Sprintf("%sremoved", name), XDR_LedgerKey(u.Removed())) + return + case LEDGER_ENTRY_STATE: + x.Marshal(x.Sprintf("%sstate", name), XDR_LedgerEntry(u.State())) + return + } + XdrPanic("invalid Type (%v) in LedgerEntryChange", u.Type) +} +func XDR_LedgerEntryChange(v *LedgerEntryChange) *LedgerEntryChange { return v } + +type _XdrVec_unbounded_LedgerEntryChange []LedgerEntryChange + +func (_XdrVec_unbounded_LedgerEntryChange) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_LedgerEntryChange) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_LedgerEntryChange length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_LedgerEntryChange length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_LedgerEntryChange) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_LedgerEntryChange) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]LedgerEntryChange, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_LedgerEntryChange) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_LedgerEntryChange(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_LedgerEntryChange) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_LedgerEntryChange) XdrTypeName() string { return "LedgerEntryChange<>" } +func (v *_XdrVec_unbounded_LedgerEntryChange) XdrPointer() interface{} { + return (*[]LedgerEntryChange)(v) +} +func (v _XdrVec_unbounded_LedgerEntryChange) XdrValue() interface{} { return ([]LedgerEntryChange)(v) } +func (v *_XdrVec_unbounded_LedgerEntryChange) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerEntryChanges struct { + *_XdrVec_unbounded_LedgerEntryChange +} + +func XDR_LedgerEntryChanges(v *LedgerEntryChanges) XdrType_LedgerEntryChanges { + return XdrType_LedgerEntryChanges{(*_XdrVec_unbounded_LedgerEntryChange)(v)} +} +func (XdrType_LedgerEntryChanges) XdrTypeName() string { return "LedgerEntryChanges" } +func (v XdrType_LedgerEntryChanges) XdrUnwrap() XdrType { return v._XdrVec_unbounded_LedgerEntryChange } + +type XdrType_OperationMeta = *OperationMeta + +func (v *OperationMeta) XdrPointer() interface{} { return v } +func (OperationMeta) XdrTypeName() string { return "OperationMeta" } +func (v OperationMeta) XdrValue() interface{} { return v } +func (v *OperationMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *OperationMeta) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%schanges", name), XDR_LedgerEntryChanges(&v.Changes)) +} +func XDR_OperationMeta(v *OperationMeta) *OperationMeta { return v } + +type _XdrVec_unbounded_OperationMeta []OperationMeta + +func (_XdrVec_unbounded_OperationMeta) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_OperationMeta) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_OperationMeta length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_OperationMeta length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_OperationMeta) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_OperationMeta) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]OperationMeta, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_OperationMeta) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_OperationMeta(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_OperationMeta) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_OperationMeta) XdrTypeName() string { return "OperationMeta<>" } +func (v *_XdrVec_unbounded_OperationMeta) XdrPointer() interface{} { return (*[]OperationMeta)(v) } +func (v _XdrVec_unbounded_OperationMeta) XdrValue() interface{} { return ([]OperationMeta)(v) } +func (v *_XdrVec_unbounded_OperationMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionMetaV1 = *TransactionMetaV1 + +func (v *TransactionMetaV1) XdrPointer() interface{} { return v } +func (TransactionMetaV1) XdrTypeName() string { return "TransactionMetaV1" } +func (v TransactionMetaV1) XdrValue() interface{} { return v } +func (v *TransactionMetaV1) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionMetaV1) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stxChanges", name), XDR_LedgerEntryChanges(&v.TxChanges)) + x.Marshal(x.Sprintf("%soperations", name), (*_XdrVec_unbounded_OperationMeta)(&v.Operations)) +} +func XDR_TransactionMetaV1(v *TransactionMetaV1) *TransactionMetaV1 { return v } + +type XdrType_TransactionMetaV2 = *TransactionMetaV2 + +func (v *TransactionMetaV2) XdrPointer() interface{} { return v } +func (TransactionMetaV2) XdrTypeName() string { return "TransactionMetaV2" } +func (v TransactionMetaV2) XdrValue() interface{} { return v } +func (v *TransactionMetaV2) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionMetaV2) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stxChangesBefore", name), XDR_LedgerEntryChanges(&v.TxChangesBefore)) + x.Marshal(x.Sprintf("%soperations", name), (*_XdrVec_unbounded_OperationMeta)(&v.Operations)) + x.Marshal(x.Sprintf("%stxChangesAfter", name), XDR_LedgerEntryChanges(&v.TxChangesAfter)) +} +func XDR_TransactionMetaV2(v *TransactionMetaV2) *TransactionMetaV2 { return v } + +var _XdrTags_TransactionMeta = map[int32]bool{ + XdrToI32(0): true, + XdrToI32(1): true, + XdrToI32(2): true, +} + +func (_ TransactionMeta) XdrValidTags() map[int32]bool { + return _XdrTags_TransactionMeta +} +func (u *TransactionMeta) Operations() *[]OperationMeta { + switch u.V { + case 0: + if v, ok := u._u.(*[]OperationMeta); ok { + return v + } else { + var zero []OperationMeta + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionMeta.Operations accessed when V == %v", u.V) + return nil + } +} +func (u *TransactionMeta) V1() *TransactionMetaV1 { + switch u.V { + case 1: + if v, ok := u._u.(*TransactionMetaV1); ok { + return v + } else { + var zero TransactionMetaV1 + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionMeta.V1 accessed when V == %v", u.V) + return nil + } +} +func (u *TransactionMeta) V2() *TransactionMetaV2 { + switch u.V { + case 2: + if v, ok := u._u.(*TransactionMetaV2); ok { + return v + } else { + var zero TransactionMetaV2 + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionMeta.V2 accessed when V == %v", u.V) + return nil + } +} +func (u TransactionMeta) XdrValid() bool { + switch u.V { + case 0, 1, 2: + return true + } + return false +} +func (u *TransactionMeta) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *TransactionMeta) XdrUnionTagName() string { + return "V" +} +func (u *TransactionMeta) XdrUnionBody() XdrType { + switch u.V { + case 0: + return (*_XdrVec_unbounded_OperationMeta)(u.Operations()) + case 1: + return XDR_TransactionMetaV1(u.V1()) + case 2: + return XDR_TransactionMetaV2(u.V2()) + } + return nil +} +func (u *TransactionMeta) XdrUnionBodyName() string { + switch u.V { + case 0: + return "Operations" + case 1: + return "V1" + case 2: + return "V2" + } + return "" +} + +type XdrType_TransactionMeta = *TransactionMeta + +func (v *TransactionMeta) XdrPointer() interface{} { return v } +func (TransactionMeta) XdrTypeName() string { return "TransactionMeta" } +func (v TransactionMeta) XdrValue() interface{} { return v } +func (v *TransactionMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *TransactionMeta) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + x.Marshal(x.Sprintf("%soperations", name), (*_XdrVec_unbounded_OperationMeta)(u.Operations())) + return + case 1: + x.Marshal(x.Sprintf("%sv1", name), XDR_TransactionMetaV1(u.V1())) + return + case 2: + x.Marshal(x.Sprintf("%sv2", name), XDR_TransactionMetaV2(u.V2())) + return + } + XdrPanic("invalid V (%v) in TransactionMeta", u.V) +} +func XDR_TransactionMeta(v *TransactionMeta) *TransactionMeta { return v } + +type XdrType_TransactionResultMeta = *TransactionResultMeta + +func (v *TransactionResultMeta) XdrPointer() interface{} { return v } +func (TransactionResultMeta) XdrTypeName() string { return "TransactionResultMeta" } +func (v TransactionResultMeta) XdrValue() interface{} { return v } +func (v *TransactionResultMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionResultMeta) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sresult", name), XDR_TransactionResultPair(&v.Result)) + x.Marshal(x.Sprintf("%sfeeProcessing", name), XDR_LedgerEntryChanges(&v.FeeProcessing)) + x.Marshal(x.Sprintf("%stxApplyProcessing", name), XDR_TransactionMeta(&v.TxApplyProcessing)) +} +func XDR_TransactionResultMeta(v *TransactionResultMeta) *TransactionResultMeta { return v } + +type XdrType_UpgradeEntryMeta = *UpgradeEntryMeta + +func (v *UpgradeEntryMeta) XdrPointer() interface{} { return v } +func (UpgradeEntryMeta) XdrTypeName() string { return "UpgradeEntryMeta" } +func (v UpgradeEntryMeta) XdrValue() interface{} { return v } +func (v *UpgradeEntryMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *UpgradeEntryMeta) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%supgrade", name), XDR_LedgerUpgrade(&v.Upgrade)) + x.Marshal(x.Sprintf("%schanges", name), XDR_LedgerEntryChanges(&v.Changes)) +} +func XDR_UpgradeEntryMeta(v *UpgradeEntryMeta) *UpgradeEntryMeta { return v } + +type _XdrVec_unbounded_TransactionResultMeta []TransactionResultMeta + +func (_XdrVec_unbounded_TransactionResultMeta) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_TransactionResultMeta) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_TransactionResultMeta length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_TransactionResultMeta length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_TransactionResultMeta) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_TransactionResultMeta) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]TransactionResultMeta, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_TransactionResultMeta) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_TransactionResultMeta(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_TransactionResultMeta) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_TransactionResultMeta) XdrTypeName() string { return "TransactionResultMeta<>" } +func (v *_XdrVec_unbounded_TransactionResultMeta) XdrPointer() interface{} { + return (*[]TransactionResultMeta)(v) +} +func (v _XdrVec_unbounded_TransactionResultMeta) XdrValue() interface{} { + return ([]TransactionResultMeta)(v) +} +func (v *_XdrVec_unbounded_TransactionResultMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type _XdrVec_unbounded_UpgradeEntryMeta []UpgradeEntryMeta + +func (_XdrVec_unbounded_UpgradeEntryMeta) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_UpgradeEntryMeta) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_UpgradeEntryMeta length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_UpgradeEntryMeta length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_UpgradeEntryMeta) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_UpgradeEntryMeta) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]UpgradeEntryMeta, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_UpgradeEntryMeta) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_UpgradeEntryMeta(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_UpgradeEntryMeta) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_UpgradeEntryMeta) XdrTypeName() string { return "UpgradeEntryMeta<>" } +func (v *_XdrVec_unbounded_UpgradeEntryMeta) XdrPointer() interface{} { + return (*[]UpgradeEntryMeta)(v) +} +func (v _XdrVec_unbounded_UpgradeEntryMeta) XdrValue() interface{} { return ([]UpgradeEntryMeta)(v) } +func (v *_XdrVec_unbounded_UpgradeEntryMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type _XdrVec_unbounded_SCPHistoryEntry []SCPHistoryEntry + +func (_XdrVec_unbounded_SCPHistoryEntry) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_SCPHistoryEntry) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_SCPHistoryEntry length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_SCPHistoryEntry length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_SCPHistoryEntry) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_SCPHistoryEntry) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]SCPHistoryEntry, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_SCPHistoryEntry) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_SCPHistoryEntry(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_SCPHistoryEntry) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_SCPHistoryEntry) XdrTypeName() string { return "SCPHistoryEntry<>" } +func (v *_XdrVec_unbounded_SCPHistoryEntry) XdrPointer() interface{} { return (*[]SCPHistoryEntry)(v) } +func (v _XdrVec_unbounded_SCPHistoryEntry) XdrValue() interface{} { return ([]SCPHistoryEntry)(v) } +func (v *_XdrVec_unbounded_SCPHistoryEntry) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LedgerCloseMetaV0 = *LedgerCloseMetaV0 + +func (v *LedgerCloseMetaV0) XdrPointer() interface{} { return v } +func (LedgerCloseMetaV0) XdrTypeName() string { return "LedgerCloseMetaV0" } +func (v LedgerCloseMetaV0) XdrValue() interface{} { return v } +func (v *LedgerCloseMetaV0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LedgerCloseMetaV0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerHeader", name), XDR_LedgerHeaderHistoryEntry(&v.LedgerHeader)) + x.Marshal(x.Sprintf("%stxSet", name), XDR_TransactionSet(&v.TxSet)) + x.Marshal(x.Sprintf("%stxProcessing", name), (*_XdrVec_unbounded_TransactionResultMeta)(&v.TxProcessing)) + x.Marshal(x.Sprintf("%supgradesProcessing", name), (*_XdrVec_unbounded_UpgradeEntryMeta)(&v.UpgradesProcessing)) + x.Marshal(x.Sprintf("%sscpInfo", name), (*_XdrVec_unbounded_SCPHistoryEntry)(&v.ScpInfo)) +} +func XDR_LedgerCloseMetaV0(v *LedgerCloseMetaV0) *LedgerCloseMetaV0 { return v } + +var _XdrTags_LedgerCloseMeta = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ LedgerCloseMeta) XdrValidTags() map[int32]bool { + return _XdrTags_LedgerCloseMeta +} +func (u *LedgerCloseMeta) V0() *LedgerCloseMetaV0 { + switch u.V { + case 0: + if v, ok := u._u.(*LedgerCloseMetaV0); ok { + return v + } else { + var zero LedgerCloseMetaV0 + u._u = &zero + return &zero + } + default: + XdrPanic("LedgerCloseMeta.V0 accessed when V == %v", u.V) + return nil + } +} +func (u LedgerCloseMeta) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *LedgerCloseMeta) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *LedgerCloseMeta) XdrUnionTagName() string { + return "V" +} +func (u *LedgerCloseMeta) XdrUnionBody() XdrType { + switch u.V { + case 0: + return XDR_LedgerCloseMetaV0(u.V0()) + } + return nil +} +func (u *LedgerCloseMeta) XdrUnionBodyName() string { + switch u.V { + case 0: + return "V0" + } + return "" +} + +type XdrType_LedgerCloseMeta = *LedgerCloseMeta + +func (v *LedgerCloseMeta) XdrPointer() interface{} { return v } +func (LedgerCloseMeta) XdrTypeName() string { return "LedgerCloseMeta" } +func (v LedgerCloseMeta) XdrValue() interface{} { return v } +func (v *LedgerCloseMeta) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LedgerCloseMeta) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + x.Marshal(x.Sprintf("%sv0", name), XDR_LedgerCloseMetaV0(u.V0())) + return + } + XdrPanic("invalid V (%v) in LedgerCloseMeta", u.V) +} +func XDR_LedgerCloseMeta(v *LedgerCloseMeta) *LedgerCloseMeta { return v } + +var _XdrNames_ErrorCode = map[int32]string{ + int32(ERR_MISC): "ERR_MISC", + int32(ERR_DATA): "ERR_DATA", + int32(ERR_CONF): "ERR_CONF", + int32(ERR_AUTH): "ERR_AUTH", + int32(ERR_LOAD): "ERR_LOAD", +} +var _XdrValues_ErrorCode = map[string]int32{ + "ERR_MISC": int32(ERR_MISC), + "ERR_DATA": int32(ERR_DATA), + "ERR_CONF": int32(ERR_CONF), + "ERR_AUTH": int32(ERR_AUTH), + "ERR_LOAD": int32(ERR_LOAD), +} + +func (ErrorCode) XdrEnumNames() map[int32]string { + return _XdrNames_ErrorCode +} +func (v ErrorCode) String() string { + if s, ok := _XdrNames_ErrorCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ErrorCode#%d", v) +} +func (v *ErrorCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ErrorCode[stok]; ok { + *v = ErrorCode(val) + return nil + } else if stok == "ErrorCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ErrorCode.", stok)) + } +} +func (v ErrorCode) GetU32() uint32 { return uint32(v) } +func (v *ErrorCode) SetU32(n uint32) { *v = ErrorCode(n) } +func (v *ErrorCode) XdrPointer() interface{} { return v } +func (ErrorCode) XdrTypeName() string { return "ErrorCode" } +func (v ErrorCode) XdrValue() interface{} { return v } +func (v *ErrorCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ErrorCode = *ErrorCode + +func XDR_ErrorCode(v *ErrorCode) *ErrorCode { return v } + +var _XdrComments_ErrorCode = map[int32]string{ + int32(ERR_MISC): "Unspecific error", + int32(ERR_DATA): "Malformed data", + int32(ERR_CONF): "Misconfiguration error", + int32(ERR_AUTH): "Authentication failure", + int32(ERR_LOAD): "System overloaded", +} + +func (e ErrorCode) XdrEnumComments() map[int32]string { + return _XdrComments_ErrorCode +} + +type XdrType_Error = *Error + +func (v *Error) XdrPointer() interface{} { return v } +func (Error) XdrTypeName() string { return "Error" } +func (v Error) XdrValue() interface{} { return v } +func (v *Error) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Error) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%scode", name), XDR_ErrorCode(&v.Code)) + x.Marshal(x.Sprintf("%smsg", name), XdrString{&v.Msg, 100}) +} +func XDR_Error(v *Error) *Error { return v } + +type XdrType_AuthCert = *AuthCert + +func (v *AuthCert) XdrPointer() interface{} { return v } +func (AuthCert) XdrTypeName() string { return "AuthCert" } +func (v AuthCert) XdrValue() interface{} { return v } +func (v *AuthCert) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AuthCert) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%spubkey", name), XDR_Curve25519Public(&v.Pubkey)) + x.Marshal(x.Sprintf("%sexpiration", name), XDR_Uint64(&v.Expiration)) + x.Marshal(x.Sprintf("%ssig", name), XDR_Signature(&v.Sig)) +} +func XDR_AuthCert(v *AuthCert) *AuthCert { return v } + +type XdrType_Hello = *Hello + +func (v *Hello) XdrPointer() interface{} { return v } +func (Hello) XdrTypeName() string { return "Hello" } +func (v Hello) XdrValue() interface{} { return v } +func (v *Hello) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Hello) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sledgerVersion", name), XDR_Uint32(&v.LedgerVersion)) + x.Marshal(x.Sprintf("%soverlayVersion", name), XDR_Uint32(&v.OverlayVersion)) + x.Marshal(x.Sprintf("%soverlayMinVersion", name), XDR_Uint32(&v.OverlayMinVersion)) + x.Marshal(x.Sprintf("%snetworkID", name), XDR_Hash(&v.NetworkID)) + x.Marshal(x.Sprintf("%sversionStr", name), XdrString{&v.VersionStr, 100}) + x.Marshal(x.Sprintf("%slisteningPort", name), XDR_int32(&v.ListeningPort)) + x.Marshal(x.Sprintf("%speerID", name), XDR_NodeID(&v.PeerID)) + x.Marshal(x.Sprintf("%scert", name), XDR_AuthCert(&v.Cert)) + x.Marshal(x.Sprintf("%snonce", name), XDR_Uint256(&v.Nonce)) +} +func XDR_Hello(v *Hello) *Hello { return v } + +type XdrType_Auth = *Auth + +func (v *Auth) XdrPointer() interface{} { return v } +func (Auth) XdrTypeName() string { return "Auth" } +func (v Auth) XdrValue() interface{} { return v } +func (v *Auth) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Auth) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sunused", name), XDR_int32(&v.Unused)) +} +func XDR_Auth(v *Auth) *Auth { return v } + +var _XdrNames_IPAddrType = map[int32]string{ + int32(IPv4): "IPv4", + int32(IPv6): "IPv6", +} +var _XdrValues_IPAddrType = map[string]int32{ + "IPv4": int32(IPv4), + "IPv6": int32(IPv6), +} + +func (IPAddrType) XdrEnumNames() map[int32]string { + return _XdrNames_IPAddrType +} +func (v IPAddrType) String() string { + if s, ok := _XdrNames_IPAddrType[int32(v)]; ok { + return s + } + return fmt.Sprintf("IPAddrType#%d", v) +} +func (v *IPAddrType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_IPAddrType[stok]; ok { + *v = IPAddrType(val) + return nil + } else if stok == "IPAddrType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid IPAddrType.", stok)) + } +} +func (v IPAddrType) GetU32() uint32 { return uint32(v) } +func (v *IPAddrType) SetU32(n uint32) { *v = IPAddrType(n) } +func (v *IPAddrType) XdrPointer() interface{} { return v } +func (IPAddrType) XdrTypeName() string { return "IPAddrType" } +func (v IPAddrType) XdrValue() interface{} { return v } +func (v *IPAddrType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_IPAddrType = *IPAddrType + +func XDR_IPAddrType(v *IPAddrType) *IPAddrType { return v } + +type _XdrArray_16_opaque [16]byte + +func (v *_XdrArray_16_opaque) GetByteSlice() []byte { return v[:] } +func (v *_XdrArray_16_opaque) XdrTypeName() string { return "opaque[]" } +func (v *_XdrArray_16_opaque) XdrValue() interface{} { return v[:] } +func (v *_XdrArray_16_opaque) XdrPointer() interface{} { return (*[16]byte)(v) } +func (v *_XdrArray_16_opaque) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *_XdrArray_16_opaque) String() string { return fmt.Sprintf("%x", v[:]) } +func (v *_XdrArray_16_opaque) Scan(ss fmt.ScanState, c rune) error { + return XdrArrayOpaqueScan(v[:], ss, c) +} +func (_XdrArray_16_opaque) XdrArraySize() uint32 { + const bound uint32 = 16 // Force error if not const or doesn't fit + return bound +} + +var _XdrTags_XdrAnon_PeerAddress_Ip = map[int32]bool{ + XdrToI32(IPv4): true, + XdrToI32(IPv6): true, +} + +func (_ XdrAnon_PeerAddress_Ip) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_PeerAddress_Ip +} +func (u *XdrAnon_PeerAddress_Ip) Ipv4() *[4]byte { + switch u.Type { + case IPv4: + if v, ok := u._u.(*[4]byte); ok { + return v + } else { + var zero [4]byte + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_PeerAddress_Ip.Ipv4 accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_PeerAddress_Ip) Ipv6() *[16]byte { + switch u.Type { + case IPv6: + if v, ok := u._u.(*[16]byte); ok { + return v + } else { + var zero [16]byte + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_PeerAddress_Ip.Ipv6 accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_PeerAddress_Ip) XdrValid() bool { + switch u.Type { + case IPv4, IPv6: + return true + } + return false +} +func (u *XdrAnon_PeerAddress_Ip) XdrUnionTag() XdrNum32 { + return XDR_IPAddrType(&u.Type) +} +func (u *XdrAnon_PeerAddress_Ip) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_PeerAddress_Ip) XdrUnionBody() XdrType { + switch u.Type { + case IPv4: + return (*_XdrArray_4_opaque)(u.Ipv4()) + case IPv6: + return (*_XdrArray_16_opaque)(u.Ipv6()) + } + return nil +} +func (u *XdrAnon_PeerAddress_Ip) XdrUnionBodyName() string { + switch u.Type { + case IPv4: + return "Ipv4" + case IPv6: + return "Ipv6" + } + return "" +} + +type XdrType_XdrAnon_PeerAddress_Ip = *XdrAnon_PeerAddress_Ip + +func (v *XdrAnon_PeerAddress_Ip) XdrPointer() interface{} { return v } +func (XdrAnon_PeerAddress_Ip) XdrTypeName() string { return "XdrAnon_PeerAddress_Ip" } +func (v XdrAnon_PeerAddress_Ip) XdrValue() interface{} { return v } +func (v *XdrAnon_PeerAddress_Ip) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_PeerAddress_Ip) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_IPAddrType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case IPv4: + x.Marshal(x.Sprintf("%sipv4", name), (*_XdrArray_4_opaque)(u.Ipv4())) + return + case IPv6: + x.Marshal(x.Sprintf("%sipv6", name), (*_XdrArray_16_opaque)(u.Ipv6())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_PeerAddress_Ip", u.Type) +} +func XDR_XdrAnon_PeerAddress_Ip(v *XdrAnon_PeerAddress_Ip) *XdrAnon_PeerAddress_Ip { return v } + +type XdrType_PeerAddress = *PeerAddress + +func (v *PeerAddress) XdrPointer() interface{} { return v } +func (PeerAddress) XdrTypeName() string { return "PeerAddress" } +func (v PeerAddress) XdrValue() interface{} { return v } +func (v *PeerAddress) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *PeerAddress) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sip", name), XDR_XdrAnon_PeerAddress_Ip(&v.Ip)) + x.Marshal(x.Sprintf("%sport", name), XDR_Uint32(&v.Port)) + x.Marshal(x.Sprintf("%snumFailures", name), XDR_Uint32(&v.NumFailures)) +} +func XDR_PeerAddress(v *PeerAddress) *PeerAddress { return v } + +var _XdrNames_MessageType = map[int32]string{ + int32(ERROR_MSG): "ERROR_MSG", + int32(AUTH): "AUTH", + int32(DONT_HAVE): "DONT_HAVE", + int32(GET_PEERS): "GET_PEERS", + int32(PEERS): "PEERS", + int32(GET_TX_SET): "GET_TX_SET", + int32(TX_SET): "TX_SET", + int32(TRANSACTION): "TRANSACTION", + int32(GET_SCP_QUORUMSET): "GET_SCP_QUORUMSET", + int32(SCP_QUORUMSET): "SCP_QUORUMSET", + int32(SCP_MESSAGE): "SCP_MESSAGE", + int32(GET_SCP_STATE): "GET_SCP_STATE", + int32(HELLO): "HELLO", + int32(SURVEY_REQUEST): "SURVEY_REQUEST", + int32(SURVEY_RESPONSE): "SURVEY_RESPONSE", +} +var _XdrValues_MessageType = map[string]int32{ + "ERROR_MSG": int32(ERROR_MSG), + "AUTH": int32(AUTH), + "DONT_HAVE": int32(DONT_HAVE), + "GET_PEERS": int32(GET_PEERS), + "PEERS": int32(PEERS), + "GET_TX_SET": int32(GET_TX_SET), + "TX_SET": int32(TX_SET), + "TRANSACTION": int32(TRANSACTION), + "GET_SCP_QUORUMSET": int32(GET_SCP_QUORUMSET), + "SCP_QUORUMSET": int32(SCP_QUORUMSET), + "SCP_MESSAGE": int32(SCP_MESSAGE), + "GET_SCP_STATE": int32(GET_SCP_STATE), + "HELLO": int32(HELLO), + "SURVEY_REQUEST": int32(SURVEY_REQUEST), + "SURVEY_RESPONSE": int32(SURVEY_RESPONSE), +} + +func (MessageType) XdrEnumNames() map[int32]string { + return _XdrNames_MessageType +} +func (v MessageType) String() string { + if s, ok := _XdrNames_MessageType[int32(v)]; ok { + return s + } + return fmt.Sprintf("MessageType#%d", v) +} +func (v *MessageType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_MessageType[stok]; ok { + *v = MessageType(val) + return nil + } else if stok == "MessageType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid MessageType.", stok)) + } +} +func (v MessageType) GetU32() uint32 { return uint32(v) } +func (v *MessageType) SetU32(n uint32) { *v = MessageType(n) } +func (v *MessageType) XdrPointer() interface{} { return v } +func (MessageType) XdrTypeName() string { return "MessageType" } +func (v MessageType) XdrValue() interface{} { return v } +func (v *MessageType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_MessageType = *MessageType + +func XDR_MessageType(v *MessageType) *MessageType { return v } + +var _XdrComments_MessageType = map[int32]string{ + int32(GET_PEERS): "gets a list of peers this guy knows about", + int32(GET_TX_SET): "gets a particular txset by hash", + int32(TRANSACTION): "pass on a tx you have heard about", + int32(GET_SCP_QUORUMSET): "SCP", + int32(HELLO): "new messages", +} + +func (e MessageType) XdrEnumComments() map[int32]string { + return _XdrComments_MessageType +} + +type XdrType_DontHave = *DontHave + +func (v *DontHave) XdrPointer() interface{} { return v } +func (DontHave) XdrTypeName() string { return "DontHave" } +func (v DontHave) XdrValue() interface{} { return v } +func (v *DontHave) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *DontHave) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stype", name), XDR_MessageType(&v.Type)) + x.Marshal(x.Sprintf("%sreqHash", name), XDR_Uint256(&v.ReqHash)) +} +func XDR_DontHave(v *DontHave) *DontHave { return v } + +var _XdrNames_SurveyMessageCommandType = map[int32]string{ + int32(SURVEY_TOPOLOGY): "SURVEY_TOPOLOGY", +} +var _XdrValues_SurveyMessageCommandType = map[string]int32{ + "SURVEY_TOPOLOGY": int32(SURVEY_TOPOLOGY), +} + +func (SurveyMessageCommandType) XdrEnumNames() map[int32]string { + return _XdrNames_SurveyMessageCommandType +} +func (v SurveyMessageCommandType) String() string { + if s, ok := _XdrNames_SurveyMessageCommandType[int32(v)]; ok { + return s + } + return fmt.Sprintf("SurveyMessageCommandType#%d", v) +} +func (v *SurveyMessageCommandType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_SurveyMessageCommandType[stok]; ok { + *v = SurveyMessageCommandType(val) + return nil + } else if stok == "SurveyMessageCommandType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid SurveyMessageCommandType.", stok)) + } +} +func (v SurveyMessageCommandType) GetU32() uint32 { return uint32(v) } +func (v *SurveyMessageCommandType) SetU32(n uint32) { *v = SurveyMessageCommandType(n) } +func (v *SurveyMessageCommandType) XdrPointer() interface{} { return v } +func (SurveyMessageCommandType) XdrTypeName() string { return "SurveyMessageCommandType" } +func (v SurveyMessageCommandType) XdrValue() interface{} { return v } +func (v *SurveyMessageCommandType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SurveyMessageCommandType = *SurveyMessageCommandType + +func XDR_SurveyMessageCommandType(v *SurveyMessageCommandType) *SurveyMessageCommandType { return v } + +type XdrType_SurveyRequestMessage = *SurveyRequestMessage + +func (v *SurveyRequestMessage) XdrPointer() interface{} { return v } +func (SurveyRequestMessage) XdrTypeName() string { return "SurveyRequestMessage" } +func (v SurveyRequestMessage) XdrValue() interface{} { return v } +func (v *SurveyRequestMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SurveyRequestMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssurveyorPeerID", name), XDR_NodeID(&v.SurveyorPeerID)) + x.Marshal(x.Sprintf("%ssurveyedPeerID", name), XDR_NodeID(&v.SurveyedPeerID)) + x.Marshal(x.Sprintf("%sledgerNum", name), XDR_Uint32(&v.LedgerNum)) + x.Marshal(x.Sprintf("%sencryptionKey", name), XDR_Curve25519Public(&v.EncryptionKey)) + x.Marshal(x.Sprintf("%scommandType", name), XDR_SurveyMessageCommandType(&v.CommandType)) +} +func XDR_SurveyRequestMessage(v *SurveyRequestMessage) *SurveyRequestMessage { return v } + +type XdrType_SignedSurveyRequestMessage = *SignedSurveyRequestMessage + +func (v *SignedSurveyRequestMessage) XdrPointer() interface{} { return v } +func (SignedSurveyRequestMessage) XdrTypeName() string { return "SignedSurveyRequestMessage" } +func (v SignedSurveyRequestMessage) XdrValue() interface{} { return v } +func (v *SignedSurveyRequestMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SignedSurveyRequestMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%srequestSignature", name), XDR_Signature(&v.RequestSignature)) + x.Marshal(x.Sprintf("%srequest", name), XDR_SurveyRequestMessage(&v.Request)) +} +func XDR_SignedSurveyRequestMessage(v *SignedSurveyRequestMessage) *SignedSurveyRequestMessage { + return v +} + +type XdrType_EncryptedBody struct { + XdrVecOpaque +} + +func XDR_EncryptedBody(v *EncryptedBody) XdrType_EncryptedBody { + return XdrType_EncryptedBody{XdrVecOpaque{v, 64000}} +} +func (XdrType_EncryptedBody) XdrTypeName() string { return "EncryptedBody" } +func (v XdrType_EncryptedBody) XdrUnwrap() XdrType { return v.XdrVecOpaque } + +type XdrType_SurveyResponseMessage = *SurveyResponseMessage + +func (v *SurveyResponseMessage) XdrPointer() interface{} { return v } +func (SurveyResponseMessage) XdrTypeName() string { return "SurveyResponseMessage" } +func (v SurveyResponseMessage) XdrValue() interface{} { return v } +func (v *SurveyResponseMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SurveyResponseMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssurveyorPeerID", name), XDR_NodeID(&v.SurveyorPeerID)) + x.Marshal(x.Sprintf("%ssurveyedPeerID", name), XDR_NodeID(&v.SurveyedPeerID)) + x.Marshal(x.Sprintf("%sledgerNum", name), XDR_Uint32(&v.LedgerNum)) + x.Marshal(x.Sprintf("%scommandType", name), XDR_SurveyMessageCommandType(&v.CommandType)) + x.Marshal(x.Sprintf("%sencryptedBody", name), XDR_EncryptedBody(&v.EncryptedBody)) +} +func XDR_SurveyResponseMessage(v *SurveyResponseMessage) *SurveyResponseMessage { return v } + +type XdrType_SignedSurveyResponseMessage = *SignedSurveyResponseMessage + +func (v *SignedSurveyResponseMessage) XdrPointer() interface{} { return v } +func (SignedSurveyResponseMessage) XdrTypeName() string { return "SignedSurveyResponseMessage" } +func (v SignedSurveyResponseMessage) XdrValue() interface{} { return v } +func (v *SignedSurveyResponseMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SignedSurveyResponseMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sresponseSignature", name), XDR_Signature(&v.ResponseSignature)) + x.Marshal(x.Sprintf("%sresponse", name), XDR_SurveyResponseMessage(&v.Response)) +} +func XDR_SignedSurveyResponseMessage(v *SignedSurveyResponseMessage) *SignedSurveyResponseMessage { + return v +} + +type XdrType_PeerStats = *PeerStats + +func (v *PeerStats) XdrPointer() interface{} { return v } +func (PeerStats) XdrTypeName() string { return "PeerStats" } +func (v PeerStats) XdrValue() interface{} { return v } +func (v *PeerStats) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *PeerStats) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sid", name), XDR_NodeID(&v.Id)) + x.Marshal(x.Sprintf("%sversionStr", name), XdrString{&v.VersionStr, 100}) + x.Marshal(x.Sprintf("%smessagesRead", name), XDR_Uint64(&v.MessagesRead)) + x.Marshal(x.Sprintf("%smessagesWritten", name), XDR_Uint64(&v.MessagesWritten)) + x.Marshal(x.Sprintf("%sbytesRead", name), XDR_Uint64(&v.BytesRead)) + x.Marshal(x.Sprintf("%sbytesWritten", name), XDR_Uint64(&v.BytesWritten)) + x.Marshal(x.Sprintf("%ssecondsConnected", name), XDR_Uint64(&v.SecondsConnected)) + x.Marshal(x.Sprintf("%suniqueFloodBytesRecv", name), XDR_Uint64(&v.UniqueFloodBytesRecv)) + x.Marshal(x.Sprintf("%sduplicateFloodBytesRecv", name), XDR_Uint64(&v.DuplicateFloodBytesRecv)) + x.Marshal(x.Sprintf("%suniqueFetchBytesRecv", name), XDR_Uint64(&v.UniqueFetchBytesRecv)) + x.Marshal(x.Sprintf("%sduplicateFetchBytesRecv", name), XDR_Uint64(&v.DuplicateFetchBytesRecv)) + x.Marshal(x.Sprintf("%suniqueFloodMessageRecv", name), XDR_Uint64(&v.UniqueFloodMessageRecv)) + x.Marshal(x.Sprintf("%sduplicateFloodMessageRecv", name), XDR_Uint64(&v.DuplicateFloodMessageRecv)) + x.Marshal(x.Sprintf("%suniqueFetchMessageRecv", name), XDR_Uint64(&v.UniqueFetchMessageRecv)) + x.Marshal(x.Sprintf("%sduplicateFetchMessageRecv", name), XDR_Uint64(&v.DuplicateFetchMessageRecv)) +} +func XDR_PeerStats(v *PeerStats) *PeerStats { return v } + +type _XdrVec_25_PeerStats []PeerStats + +func (_XdrVec_25_PeerStats) XdrBound() uint32 { + const bound uint32 = 25 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_25_PeerStats) XdrCheckLen(length uint32) { + if length > uint32(25) { + XdrPanic("_XdrVec_25_PeerStats length %d exceeds bound 25", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_25_PeerStats length %d exceeds max int", length) + } +} +func (v _XdrVec_25_PeerStats) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_25_PeerStats) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(25); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]PeerStats, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_25_PeerStats) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_PeerStats(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_25_PeerStats) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 25} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_25_PeerStats) XdrTypeName() string { return "PeerStats<>" } +func (v *_XdrVec_25_PeerStats) XdrPointer() interface{} { return (*[]PeerStats)(v) } +func (v _XdrVec_25_PeerStats) XdrValue() interface{} { return ([]PeerStats)(v) } +func (v *_XdrVec_25_PeerStats) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PeerStatList struct { + *_XdrVec_25_PeerStats +} + +func XDR_PeerStatList(v *PeerStatList) XdrType_PeerStatList { + return XdrType_PeerStatList{(*_XdrVec_25_PeerStats)(v)} +} +func (XdrType_PeerStatList) XdrTypeName() string { return "PeerStatList" } +func (v XdrType_PeerStatList) XdrUnwrap() XdrType { return v._XdrVec_25_PeerStats } + +type XdrType_TopologyResponseBody = *TopologyResponseBody + +func (v *TopologyResponseBody) XdrPointer() interface{} { return v } +func (TopologyResponseBody) XdrTypeName() string { return "TopologyResponseBody" } +func (v TopologyResponseBody) XdrValue() interface{} { return v } +func (v *TopologyResponseBody) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TopologyResponseBody) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sinboundPeers", name), XDR_PeerStatList(&v.InboundPeers)) + x.Marshal(x.Sprintf("%soutboundPeers", name), XDR_PeerStatList(&v.OutboundPeers)) + x.Marshal(x.Sprintf("%stotalInboundPeerCount", name), XDR_Uint32(&v.TotalInboundPeerCount)) + x.Marshal(x.Sprintf("%stotalOutboundPeerCount", name), XDR_Uint32(&v.TotalOutboundPeerCount)) +} +func XDR_TopologyResponseBody(v *TopologyResponseBody) *TopologyResponseBody { return v } + +var _XdrTags_SurveyResponseBody = map[int32]bool{ + XdrToI32(SURVEY_TOPOLOGY): true, +} + +func (_ SurveyResponseBody) XdrValidTags() map[int32]bool { + return _XdrTags_SurveyResponseBody +} +func (u *SurveyResponseBody) TopologyResponseBody() *TopologyResponseBody { + switch u.Type { + case SURVEY_TOPOLOGY: + if v, ok := u._u.(*TopologyResponseBody); ok { + return v + } else { + var zero TopologyResponseBody + u._u = &zero + return &zero + } + default: + XdrPanic("SurveyResponseBody.TopologyResponseBody accessed when Type == %v", u.Type) + return nil + } +} +func (u SurveyResponseBody) XdrValid() bool { + switch u.Type { + case SURVEY_TOPOLOGY: + return true + } + return false +} +func (u *SurveyResponseBody) XdrUnionTag() XdrNum32 { + return XDR_SurveyMessageCommandType(&u.Type) +} +func (u *SurveyResponseBody) XdrUnionTagName() string { + return "Type" +} +func (u *SurveyResponseBody) XdrUnionBody() XdrType { + switch u.Type { + case SURVEY_TOPOLOGY: + return XDR_TopologyResponseBody(u.TopologyResponseBody()) + } + return nil +} +func (u *SurveyResponseBody) XdrUnionBodyName() string { + switch u.Type { + case SURVEY_TOPOLOGY: + return "TopologyResponseBody" + } + return "" +} + +type XdrType_SurveyResponseBody = *SurveyResponseBody + +func (v *SurveyResponseBody) XdrPointer() interface{} { return v } +func (SurveyResponseBody) XdrTypeName() string { return "SurveyResponseBody" } +func (v SurveyResponseBody) XdrValue() interface{} { return v } +func (v *SurveyResponseBody) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *SurveyResponseBody) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_SurveyMessageCommandType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case SURVEY_TOPOLOGY: + x.Marshal(x.Sprintf("%stopologyResponseBody", name), XDR_TopologyResponseBody(u.TopologyResponseBody())) + return + } + XdrPanic("invalid Type (%v) in SurveyResponseBody", u.Type) +} +func XDR_SurveyResponseBody(v *SurveyResponseBody) *SurveyResponseBody { return v } + +type _XdrVec_100_PeerAddress []PeerAddress + +func (_XdrVec_100_PeerAddress) XdrBound() uint32 { + const bound uint32 = 100 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_100_PeerAddress) XdrCheckLen(length uint32) { + if length > uint32(100) { + XdrPanic("_XdrVec_100_PeerAddress length %d exceeds bound 100", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_100_PeerAddress length %d exceeds max int", length) + } +} +func (v _XdrVec_100_PeerAddress) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_100_PeerAddress) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(100); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]PeerAddress, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_100_PeerAddress) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_PeerAddress(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_100_PeerAddress) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 100} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_100_PeerAddress) XdrTypeName() string { return "PeerAddress<>" } +func (v *_XdrVec_100_PeerAddress) XdrPointer() interface{} { return (*[]PeerAddress)(v) } +func (v _XdrVec_100_PeerAddress) XdrValue() interface{} { return ([]PeerAddress)(v) } +func (v *_XdrVec_100_PeerAddress) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +var _XdrTags_StellarMessage = map[int32]bool{ + XdrToI32(ERROR_MSG): true, + XdrToI32(HELLO): true, + XdrToI32(AUTH): true, + XdrToI32(DONT_HAVE): true, + XdrToI32(GET_PEERS): true, + XdrToI32(PEERS): true, + XdrToI32(GET_TX_SET): true, + XdrToI32(TX_SET): true, + XdrToI32(TRANSACTION): true, + XdrToI32(SURVEY_REQUEST): true, + XdrToI32(SURVEY_RESPONSE): true, + XdrToI32(GET_SCP_QUORUMSET): true, + XdrToI32(SCP_QUORUMSET): true, + XdrToI32(SCP_MESSAGE): true, + XdrToI32(GET_SCP_STATE): true, +} + +func (_ StellarMessage) XdrValidTags() map[int32]bool { + return _XdrTags_StellarMessage +} +func (u *StellarMessage) Error() *Error { + switch u.Type { + case ERROR_MSG: + if v, ok := u._u.(*Error); ok { + return v + } else { + var zero Error + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Error accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) Hello() *Hello { + switch u.Type { + case HELLO: + if v, ok := u._u.(*Hello); ok { + return v + } else { + var zero Hello + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Hello accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) Auth() *Auth { + switch u.Type { + case AUTH: + if v, ok := u._u.(*Auth); ok { + return v + } else { + var zero Auth + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Auth accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) DontHave() *DontHave { + switch u.Type { + case DONT_HAVE: + if v, ok := u._u.(*DontHave); ok { + return v + } else { + var zero DontHave + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.DontHave accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) Peers() *[]PeerAddress { + switch u.Type { + case PEERS: + if v, ok := u._u.(*[]PeerAddress); ok { + return v + } else { + var zero []PeerAddress + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Peers accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) TxSetHash() *Uint256 { + switch u.Type { + case GET_TX_SET: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.TxSetHash accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) TxSet() *TransactionSet { + switch u.Type { + case TX_SET: + if v, ok := u._u.(*TransactionSet); ok { + return v + } else { + var zero TransactionSet + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.TxSet accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) Transaction() *TransactionEnvelope { + switch u.Type { + case TRANSACTION: + if v, ok := u._u.(*TransactionEnvelope); ok { + return v + } else { + var zero TransactionEnvelope + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Transaction accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) SignedSurveyRequestMessage() *SignedSurveyRequestMessage { + switch u.Type { + case SURVEY_REQUEST: + if v, ok := u._u.(*SignedSurveyRequestMessage); ok { + return v + } else { + var zero SignedSurveyRequestMessage + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.SignedSurveyRequestMessage accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) SignedSurveyResponseMessage() *SignedSurveyResponseMessage { + switch u.Type { + case SURVEY_RESPONSE: + if v, ok := u._u.(*SignedSurveyResponseMessage); ok { + return v + } else { + var zero SignedSurveyResponseMessage + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.SignedSurveyResponseMessage accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) QSetHash() *Uint256 { + switch u.Type { + case GET_SCP_QUORUMSET: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.QSetHash accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) QSet() *SCPQuorumSet { + switch u.Type { + case SCP_QUORUMSET: + if v, ok := u._u.(*SCPQuorumSet); ok { + return v + } else { + var zero SCPQuorumSet + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.QSet accessed when Type == %v", u.Type) + return nil + } +} +func (u *StellarMessage) Envelope() *SCPEnvelope { + switch u.Type { + case SCP_MESSAGE: + if v, ok := u._u.(*SCPEnvelope); ok { + return v + } else { + var zero SCPEnvelope + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.Envelope accessed when Type == %v", u.Type) + return nil + } +} + +// ledger seq requested ; if 0, requests the latest +func (u *StellarMessage) GetSCPLedgerSeq() *Uint32 { + switch u.Type { + case GET_SCP_STATE: + if v, ok := u._u.(*Uint32); ok { + return v + } else { + var zero Uint32 + u._u = &zero + return &zero + } + default: + XdrPanic("StellarMessage.GetSCPLedgerSeq accessed when Type == %v", u.Type) + return nil + } +} +func (u StellarMessage) XdrValid() bool { + switch u.Type { + case ERROR_MSG, HELLO, AUTH, DONT_HAVE, GET_PEERS, PEERS, GET_TX_SET, TX_SET, TRANSACTION, SURVEY_REQUEST, SURVEY_RESPONSE, GET_SCP_QUORUMSET, SCP_QUORUMSET, SCP_MESSAGE, GET_SCP_STATE: + return true + } + return false +} +func (u *StellarMessage) XdrUnionTag() XdrNum32 { + return XDR_MessageType(&u.Type) +} +func (u *StellarMessage) XdrUnionTagName() string { + return "Type" +} +func (u *StellarMessage) XdrUnionBody() XdrType { + switch u.Type { + case ERROR_MSG: + return XDR_Error(u.Error()) + case HELLO: + return XDR_Hello(u.Hello()) + case AUTH: + return XDR_Auth(u.Auth()) + case DONT_HAVE: + return XDR_DontHave(u.DontHave()) + case GET_PEERS: + return nil + case PEERS: + return (*_XdrVec_100_PeerAddress)(u.Peers()) + case GET_TX_SET: + return XDR_Uint256(u.TxSetHash()) + case TX_SET: + return XDR_TransactionSet(u.TxSet()) + case TRANSACTION: + return XDR_TransactionEnvelope(u.Transaction()) + case SURVEY_REQUEST: + return XDR_SignedSurveyRequestMessage(u.SignedSurveyRequestMessage()) + case SURVEY_RESPONSE: + return XDR_SignedSurveyResponseMessage(u.SignedSurveyResponseMessage()) + case GET_SCP_QUORUMSET: + return XDR_Uint256(u.QSetHash()) + case SCP_QUORUMSET: + return XDR_SCPQuorumSet(u.QSet()) + case SCP_MESSAGE: + return XDR_SCPEnvelope(u.Envelope()) + case GET_SCP_STATE: + return XDR_Uint32(u.GetSCPLedgerSeq()) + } + return nil +} +func (u *StellarMessage) XdrUnionBodyName() string { + switch u.Type { + case ERROR_MSG: + return "Error" + case HELLO: + return "Hello" + case AUTH: + return "Auth" + case DONT_HAVE: + return "DontHave" + case GET_PEERS: + return "" + case PEERS: + return "Peers" + case GET_TX_SET: + return "TxSetHash" + case TX_SET: + return "TxSet" + case TRANSACTION: + return "Transaction" + case SURVEY_REQUEST: + return "SignedSurveyRequestMessage" + case SURVEY_RESPONSE: + return "SignedSurveyResponseMessage" + case GET_SCP_QUORUMSET: + return "QSetHash" + case SCP_QUORUMSET: + return "QSet" + case SCP_MESSAGE: + return "Envelope" + case GET_SCP_STATE: + return "GetSCPLedgerSeq" + } + return "" +} + +type XdrType_StellarMessage = *StellarMessage + +func (v *StellarMessage) XdrPointer() interface{} { return v } +func (StellarMessage) XdrTypeName() string { return "StellarMessage" } +func (v StellarMessage) XdrValue() interface{} { return v } +func (v *StellarMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *StellarMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_MessageType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ERROR_MSG: + x.Marshal(x.Sprintf("%serror", name), XDR_Error(u.Error())) + return + case HELLO: + x.Marshal(x.Sprintf("%shello", name), XDR_Hello(u.Hello())) + return + case AUTH: + x.Marshal(x.Sprintf("%sauth", name), XDR_Auth(u.Auth())) + return + case DONT_HAVE: + x.Marshal(x.Sprintf("%sdontHave", name), XDR_DontHave(u.DontHave())) + return + case GET_PEERS: + return + case PEERS: + x.Marshal(x.Sprintf("%speers", name), (*_XdrVec_100_PeerAddress)(u.Peers())) + return + case GET_TX_SET: + x.Marshal(x.Sprintf("%stxSetHash", name), XDR_Uint256(u.TxSetHash())) + return + case TX_SET: + x.Marshal(x.Sprintf("%stxSet", name), XDR_TransactionSet(u.TxSet())) + return + case TRANSACTION: + x.Marshal(x.Sprintf("%stransaction", name), XDR_TransactionEnvelope(u.Transaction())) + return + case SURVEY_REQUEST: + x.Marshal(x.Sprintf("%ssignedSurveyRequestMessage", name), XDR_SignedSurveyRequestMessage(u.SignedSurveyRequestMessage())) + return + case SURVEY_RESPONSE: + x.Marshal(x.Sprintf("%ssignedSurveyResponseMessage", name), XDR_SignedSurveyResponseMessage(u.SignedSurveyResponseMessage())) + return + case GET_SCP_QUORUMSET: + x.Marshal(x.Sprintf("%sqSetHash", name), XDR_Uint256(u.QSetHash())) + return + case SCP_QUORUMSET: + x.Marshal(x.Sprintf("%sqSet", name), XDR_SCPQuorumSet(u.QSet())) + return + case SCP_MESSAGE: + x.Marshal(x.Sprintf("%senvelope", name), XDR_SCPEnvelope(u.Envelope())) + return + case GET_SCP_STATE: + x.Marshal(x.Sprintf("%sgetSCPLedgerSeq", name), XDR_Uint32(u.GetSCPLedgerSeq())) + return + } + XdrPanic("invalid Type (%v) in StellarMessage", u.Type) +} +func XDR_StellarMessage(v *StellarMessage) *StellarMessage { return v } + +type XdrType_XdrAnon_AuthenticatedMessage_V0 = *XdrAnon_AuthenticatedMessage_V0 + +func (v *XdrAnon_AuthenticatedMessage_V0) XdrPointer() interface{} { return v } +func (XdrAnon_AuthenticatedMessage_V0) XdrTypeName() string { return "XdrAnon_AuthenticatedMessage_V0" } +func (v XdrAnon_AuthenticatedMessage_V0) XdrValue() interface{} { return v } +func (v *XdrAnon_AuthenticatedMessage_V0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_AuthenticatedMessage_V0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssequence", name), XDR_Uint64(&v.Sequence)) + x.Marshal(x.Sprintf("%smessage", name), XDR_StellarMessage(&v.Message)) + x.Marshal(x.Sprintf("%smac", name), XDR_HmacSha256Mac(&v.Mac)) +} +func XDR_XdrAnon_AuthenticatedMessage_V0(v *XdrAnon_AuthenticatedMessage_V0) *XdrAnon_AuthenticatedMessage_V0 { + return v +} + +var _XdrTags_AuthenticatedMessage = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ AuthenticatedMessage) XdrValidTags() map[int32]bool { + return _XdrTags_AuthenticatedMessage +} +func (u *AuthenticatedMessage) V0() *XdrAnon_AuthenticatedMessage_V0 { + switch u.V { + case 0: + if v, ok := u._u.(*XdrAnon_AuthenticatedMessage_V0); ok { + return v + } else { + var zero XdrAnon_AuthenticatedMessage_V0 + u._u = &zero + return &zero + } + default: + XdrPanic("AuthenticatedMessage.V0 accessed when V == %v", u.V) + return nil + } +} +func (u AuthenticatedMessage) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *AuthenticatedMessage) XdrUnionTag() XdrNum32 { + return XDR_Uint32(&u.V) +} +func (u *AuthenticatedMessage) XdrUnionTagName() string { + return "V" +} +func (u *AuthenticatedMessage) XdrUnionBody() XdrType { + switch u.V { + case 0: + return XDR_XdrAnon_AuthenticatedMessage_V0(u.V0()) + } + return nil +} +func (u *AuthenticatedMessage) XdrUnionBodyName() string { + switch u.V { + case 0: + return "V0" + } + return "" +} + +type XdrType_AuthenticatedMessage = *AuthenticatedMessage + +func (v *AuthenticatedMessage) XdrPointer() interface{} { return v } +func (AuthenticatedMessage) XdrTypeName() string { return "AuthenticatedMessage" } +func (v AuthenticatedMessage) XdrValue() interface{} { return v } +func (v *AuthenticatedMessage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *AuthenticatedMessage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_Uint32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + x.Marshal(x.Sprintf("%sv0", name), XDR_XdrAnon_AuthenticatedMessage_V0(u.V0())) + return + } + XdrPanic("invalid V (%v) in AuthenticatedMessage", u.V) +} +func XDR_AuthenticatedMessage(v *AuthenticatedMessage) *AuthenticatedMessage { return v } + +var _XdrTags_LiquidityPoolParameters = map[int32]bool{ + XdrToI32(LIQUIDITY_POOL_CONSTANT_PRODUCT): true, +} + +func (_ LiquidityPoolParameters) XdrValidTags() map[int32]bool { + return _XdrTags_LiquidityPoolParameters +} +func (u *LiquidityPoolParameters) ConstantProduct() *LiquidityPoolConstantProductParameters { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + if v, ok := u._u.(*LiquidityPoolConstantProductParameters); ok { + return v + } else { + var zero LiquidityPoolConstantProductParameters + u._u = &zero + return &zero + } + default: + XdrPanic("LiquidityPoolParameters.ConstantProduct accessed when Type == %v", u.Type) + return nil + } +} +func (u LiquidityPoolParameters) XdrValid() bool { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return true + } + return false +} +func (u *LiquidityPoolParameters) XdrUnionTag() XdrNum32 { + return XDR_LiquidityPoolType(&u.Type) +} +func (u *LiquidityPoolParameters) XdrUnionTagName() string { + return "Type" +} +func (u *LiquidityPoolParameters) XdrUnionBody() XdrType { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return XDR_LiquidityPoolConstantProductParameters(u.ConstantProduct()) + } + return nil +} +func (u *LiquidityPoolParameters) XdrUnionBodyName() string { + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + return "ConstantProduct" + } + return "" +} + +type XdrType_LiquidityPoolParameters = *LiquidityPoolParameters + +func (v *LiquidityPoolParameters) XdrPointer() interface{} { return v } +func (LiquidityPoolParameters) XdrTypeName() string { return "LiquidityPoolParameters" } +func (v LiquidityPoolParameters) XdrValue() interface{} { return v } +func (v *LiquidityPoolParameters) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LiquidityPoolParameters) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LiquidityPoolType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + x.Marshal(x.Sprintf("%sconstantProduct", name), XDR_LiquidityPoolConstantProductParameters(u.ConstantProduct())) + return + } + XdrPanic("invalid Type (%v) in LiquidityPoolParameters", u.Type) +} +func XDR_LiquidityPoolParameters(v *LiquidityPoolParameters) *LiquidityPoolParameters { return v } + +type XdrType_XdrAnon_MuxedAccount_Med25519 = *XdrAnon_MuxedAccount_Med25519 + +func (v *XdrAnon_MuxedAccount_Med25519) XdrPointer() interface{} { return v } +func (XdrAnon_MuxedAccount_Med25519) XdrTypeName() string { return "XdrAnon_MuxedAccount_Med25519" } +func (v XdrAnon_MuxedAccount_Med25519) XdrValue() interface{} { return v } +func (v *XdrAnon_MuxedAccount_Med25519) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_MuxedAccount_Med25519) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sid", name), XDR_Uint64(&v.Id)) + x.Marshal(x.Sprintf("%sed25519", name), XDR_Uint256(&v.Ed25519)) +} +func XDR_XdrAnon_MuxedAccount_Med25519(v *XdrAnon_MuxedAccount_Med25519) *XdrAnon_MuxedAccount_Med25519 { + return v +} + +var _XdrTags_MuxedAccount = map[int32]bool{ + XdrToI32(KEY_TYPE_ED25519): true, + XdrToI32(KEY_TYPE_MUXED_ED25519): true, +} + +func (_ MuxedAccount) XdrValidTags() map[int32]bool { + return _XdrTags_MuxedAccount +} +func (u *MuxedAccount) Ed25519() *Uint256 { + switch u.Type { + case KEY_TYPE_ED25519: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("MuxedAccount.Ed25519 accessed when Type == %v", u.Type) + return nil + } +} +func (u *MuxedAccount) Med25519() *XdrAnon_MuxedAccount_Med25519 { + switch u.Type { + case KEY_TYPE_MUXED_ED25519: + if v, ok := u._u.(*XdrAnon_MuxedAccount_Med25519); ok { + return v + } else { + var zero XdrAnon_MuxedAccount_Med25519 + u._u = &zero + return &zero + } + default: + XdrPanic("MuxedAccount.Med25519 accessed when Type == %v", u.Type) + return nil + } +} +func (u MuxedAccount) XdrValid() bool { + switch u.Type { + case KEY_TYPE_ED25519, KEY_TYPE_MUXED_ED25519: + return true + } + return false +} +func (u *MuxedAccount) XdrUnionTag() XdrNum32 { + return XDR_CryptoKeyType(&u.Type) +} +func (u *MuxedAccount) XdrUnionTagName() string { + return "Type" +} +func (u *MuxedAccount) XdrUnionBody() XdrType { + switch u.Type { + case KEY_TYPE_ED25519: + return XDR_Uint256(u.Ed25519()) + case KEY_TYPE_MUXED_ED25519: + return XDR_XdrAnon_MuxedAccount_Med25519(u.Med25519()) + } + return nil +} +func (u *MuxedAccount) XdrUnionBodyName() string { + switch u.Type { + case KEY_TYPE_ED25519: + return "Ed25519" + case KEY_TYPE_MUXED_ED25519: + return "Med25519" + } + return "" +} + +type XdrType_MuxedAccount = *MuxedAccount + +func (v *MuxedAccount) XdrPointer() interface{} { return v } +func (MuxedAccount) XdrTypeName() string { return "MuxedAccount" } +func (v MuxedAccount) XdrValue() interface{} { return v } +func (v *MuxedAccount) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *MuxedAccount) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_CryptoKeyType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case KEY_TYPE_ED25519: + x.Marshal(x.Sprintf("%sed25519", name), XDR_Uint256(u.Ed25519())) + return + case KEY_TYPE_MUXED_ED25519: + x.Marshal(x.Sprintf("%smed25519", name), XDR_XdrAnon_MuxedAccount_Med25519(u.Med25519())) + return + } + XdrPanic("invalid Type (%v) in MuxedAccount", u.Type) +} +func XDR_MuxedAccount(v *MuxedAccount) *MuxedAccount { return v } + +type XdrType_DecoratedSignature = *DecoratedSignature + +func (v *DecoratedSignature) XdrPointer() interface{} { return v } +func (DecoratedSignature) XdrTypeName() string { return "DecoratedSignature" } +func (v DecoratedSignature) XdrValue() interface{} { return v } +func (v *DecoratedSignature) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *DecoratedSignature) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%shint", name), XDR_SignatureHint(&v.Hint)) + x.Marshal(x.Sprintf("%ssignature", name), XDR_Signature(&v.Signature)) +} +func XDR_DecoratedSignature(v *DecoratedSignature) *DecoratedSignature { return v } + +var _XdrNames_OperationType = map[int32]string{ + int32(CREATE_ACCOUNT): "CREATE_ACCOUNT", + int32(PAYMENT): "PAYMENT", + int32(PATH_PAYMENT_STRICT_RECEIVE): "PATH_PAYMENT_STRICT_RECEIVE", + int32(MANAGE_SELL_OFFER): "MANAGE_SELL_OFFER", + int32(CREATE_PASSIVE_SELL_OFFER): "CREATE_PASSIVE_SELL_OFFER", + int32(SET_OPTIONS): "SET_OPTIONS", + int32(CHANGE_TRUST): "CHANGE_TRUST", + int32(ALLOW_TRUST): "ALLOW_TRUST", + int32(ACCOUNT_MERGE): "ACCOUNT_MERGE", + int32(INFLATION): "INFLATION", + int32(MANAGE_DATA): "MANAGE_DATA", + int32(BUMP_SEQUENCE): "BUMP_SEQUENCE", + int32(MANAGE_BUY_OFFER): "MANAGE_BUY_OFFER", + int32(PATH_PAYMENT_STRICT_SEND): "PATH_PAYMENT_STRICT_SEND", + int32(CREATE_CLAIMABLE_BALANCE): "CREATE_CLAIMABLE_BALANCE", + int32(CLAIM_CLAIMABLE_BALANCE): "CLAIM_CLAIMABLE_BALANCE", + int32(BEGIN_SPONSORING_FUTURE_RESERVES): "BEGIN_SPONSORING_FUTURE_RESERVES", + int32(END_SPONSORING_FUTURE_RESERVES): "END_SPONSORING_FUTURE_RESERVES", + int32(REVOKE_SPONSORSHIP): "REVOKE_SPONSORSHIP", + int32(CLAWBACK): "CLAWBACK", + int32(CLAWBACK_CLAIMABLE_BALANCE): "CLAWBACK_CLAIMABLE_BALANCE", + int32(SET_TRUST_LINE_FLAGS): "SET_TRUST_LINE_FLAGS", + int32(LIQUIDITY_POOL_DEPOSIT): "LIQUIDITY_POOL_DEPOSIT", + int32(LIQUIDITY_POOL_WITHDRAW): "LIQUIDITY_POOL_WITHDRAW", +} +var _XdrValues_OperationType = map[string]int32{ + "CREATE_ACCOUNT": int32(CREATE_ACCOUNT), + "PAYMENT": int32(PAYMENT), + "PATH_PAYMENT_STRICT_RECEIVE": int32(PATH_PAYMENT_STRICT_RECEIVE), + "MANAGE_SELL_OFFER": int32(MANAGE_SELL_OFFER), + "CREATE_PASSIVE_SELL_OFFER": int32(CREATE_PASSIVE_SELL_OFFER), + "SET_OPTIONS": int32(SET_OPTIONS), + "CHANGE_TRUST": int32(CHANGE_TRUST), + "ALLOW_TRUST": int32(ALLOW_TRUST), + "ACCOUNT_MERGE": int32(ACCOUNT_MERGE), + "INFLATION": int32(INFLATION), + "MANAGE_DATA": int32(MANAGE_DATA), + "BUMP_SEQUENCE": int32(BUMP_SEQUENCE), + "MANAGE_BUY_OFFER": int32(MANAGE_BUY_OFFER), + "PATH_PAYMENT_STRICT_SEND": int32(PATH_PAYMENT_STRICT_SEND), + "CREATE_CLAIMABLE_BALANCE": int32(CREATE_CLAIMABLE_BALANCE), + "CLAIM_CLAIMABLE_BALANCE": int32(CLAIM_CLAIMABLE_BALANCE), + "BEGIN_SPONSORING_FUTURE_RESERVES": int32(BEGIN_SPONSORING_FUTURE_RESERVES), + "END_SPONSORING_FUTURE_RESERVES": int32(END_SPONSORING_FUTURE_RESERVES), + "REVOKE_SPONSORSHIP": int32(REVOKE_SPONSORSHIP), + "CLAWBACK": int32(CLAWBACK), + "CLAWBACK_CLAIMABLE_BALANCE": int32(CLAWBACK_CLAIMABLE_BALANCE), + "SET_TRUST_LINE_FLAGS": int32(SET_TRUST_LINE_FLAGS), + "LIQUIDITY_POOL_DEPOSIT": int32(LIQUIDITY_POOL_DEPOSIT), + "LIQUIDITY_POOL_WITHDRAW": int32(LIQUIDITY_POOL_WITHDRAW), +} + +func (OperationType) XdrEnumNames() map[int32]string { + return _XdrNames_OperationType +} +func (v OperationType) String() string { + if s, ok := _XdrNames_OperationType[int32(v)]; ok { + return s + } + return fmt.Sprintf("OperationType#%d", v) +} +func (v *OperationType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_OperationType[stok]; ok { + *v = OperationType(val) + return nil + } else if stok == "OperationType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid OperationType.", stok)) + } +} +func (v OperationType) GetU32() uint32 { return uint32(v) } +func (v *OperationType) SetU32(n uint32) { *v = OperationType(n) } +func (v *OperationType) XdrPointer() interface{} { return v } +func (OperationType) XdrTypeName() string { return "OperationType" } +func (v OperationType) XdrValue() interface{} { return v } +func (v *OperationType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_OperationType = *OperationType + +func XDR_OperationType(v *OperationType) *OperationType { return v } + +type XdrType_CreateAccountOp = *CreateAccountOp + +func (v *CreateAccountOp) XdrPointer() interface{} { return v } +func (CreateAccountOp) XdrTypeName() string { return "CreateAccountOp" } +func (v CreateAccountOp) XdrValue() interface{} { return v } +func (v *CreateAccountOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *CreateAccountOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdestination", name), XDR_AccountID(&v.Destination)) + x.Marshal(x.Sprintf("%sstartingBalance", name), XDR_Int64(&v.StartingBalance)) +} +func XDR_CreateAccountOp(v *CreateAccountOp) *CreateAccountOp { return v } + +type XdrType_PaymentOp = *PaymentOp + +func (v *PaymentOp) XdrPointer() interface{} { return v } +func (PaymentOp) XdrTypeName() string { return "PaymentOp" } +func (v PaymentOp) XdrValue() interface{} { return v } +func (v *PaymentOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *PaymentOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdestination", name), XDR_MuxedAccount(&v.Destination)) + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) +} +func XDR_PaymentOp(v *PaymentOp) *PaymentOp { return v } + +type _XdrVec_5_Asset []Asset + +func (_XdrVec_5_Asset) XdrBound() uint32 { + const bound uint32 = 5 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_5_Asset) XdrCheckLen(length uint32) { + if length > uint32(5) { + XdrPanic("_XdrVec_5_Asset length %d exceeds bound 5", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_5_Asset length %d exceeds max int", length) + } +} +func (v _XdrVec_5_Asset) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_5_Asset) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(5); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]Asset, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_5_Asset) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_Asset(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_5_Asset) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 5} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_5_Asset) XdrTypeName() string { return "Asset<>" } +func (v *_XdrVec_5_Asset) XdrPointer() interface{} { return (*[]Asset)(v) } +func (v _XdrVec_5_Asset) XdrValue() interface{} { return ([]Asset)(v) } +func (v *_XdrVec_5_Asset) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PathPaymentStrictReceiveOp = *PathPaymentStrictReceiveOp + +func (v *PathPaymentStrictReceiveOp) XdrPointer() interface{} { return v } +func (PathPaymentStrictReceiveOp) XdrTypeName() string { return "PathPaymentStrictReceiveOp" } +func (v PathPaymentStrictReceiveOp) XdrValue() interface{} { return v } +func (v *PathPaymentStrictReceiveOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *PathPaymentStrictReceiveOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssendAsset", name), XDR_Asset(&v.SendAsset)) + x.Marshal(x.Sprintf("%ssendMax", name), XDR_Int64(&v.SendMax)) + x.Marshal(x.Sprintf("%sdestination", name), XDR_MuxedAccount(&v.Destination)) + x.Marshal(x.Sprintf("%sdestAsset", name), XDR_Asset(&v.DestAsset)) + x.Marshal(x.Sprintf("%sdestAmount", name), XDR_Int64(&v.DestAmount)) + x.Marshal(x.Sprintf("%spath", name), (*_XdrVec_5_Asset)(&v.Path)) +} +func XDR_PathPaymentStrictReceiveOp(v *PathPaymentStrictReceiveOp) *PathPaymentStrictReceiveOp { + return v +} + +type XdrType_PathPaymentStrictSendOp = *PathPaymentStrictSendOp + +func (v *PathPaymentStrictSendOp) XdrPointer() interface{} { return v } +func (PathPaymentStrictSendOp) XdrTypeName() string { return "PathPaymentStrictSendOp" } +func (v PathPaymentStrictSendOp) XdrValue() interface{} { return v } +func (v *PathPaymentStrictSendOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *PathPaymentStrictSendOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssendAsset", name), XDR_Asset(&v.SendAsset)) + x.Marshal(x.Sprintf("%ssendAmount", name), XDR_Int64(&v.SendAmount)) + x.Marshal(x.Sprintf("%sdestination", name), XDR_MuxedAccount(&v.Destination)) + x.Marshal(x.Sprintf("%sdestAsset", name), XDR_Asset(&v.DestAsset)) + x.Marshal(x.Sprintf("%sdestMin", name), XDR_Int64(&v.DestMin)) + x.Marshal(x.Sprintf("%spath", name), (*_XdrVec_5_Asset)(&v.Path)) +} +func XDR_PathPaymentStrictSendOp(v *PathPaymentStrictSendOp) *PathPaymentStrictSendOp { return v } + +type XdrType_ManageSellOfferOp = *ManageSellOfferOp + +func (v *ManageSellOfferOp) XdrPointer() interface{} { return v } +func (ManageSellOfferOp) XdrTypeName() string { return "ManageSellOfferOp" } +func (v ManageSellOfferOp) XdrValue() interface{} { return v } +func (v *ManageSellOfferOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ManageSellOfferOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sselling", name), XDR_Asset(&v.Selling)) + x.Marshal(x.Sprintf("%sbuying", name), XDR_Asset(&v.Buying)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sprice", name), XDR_Price(&v.Price)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) +} +func XDR_ManageSellOfferOp(v *ManageSellOfferOp) *ManageSellOfferOp { return v } + +type XdrType_ManageBuyOfferOp = *ManageBuyOfferOp + +func (v *ManageBuyOfferOp) XdrPointer() interface{} { return v } +func (ManageBuyOfferOp) XdrTypeName() string { return "ManageBuyOfferOp" } +func (v ManageBuyOfferOp) XdrValue() interface{} { return v } +func (v *ManageBuyOfferOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ManageBuyOfferOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sselling", name), XDR_Asset(&v.Selling)) + x.Marshal(x.Sprintf("%sbuying", name), XDR_Asset(&v.Buying)) + x.Marshal(x.Sprintf("%sbuyAmount", name), XDR_Int64(&v.BuyAmount)) + x.Marshal(x.Sprintf("%sprice", name), XDR_Price(&v.Price)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) +} +func XDR_ManageBuyOfferOp(v *ManageBuyOfferOp) *ManageBuyOfferOp { return v } + +type XdrType_CreatePassiveSellOfferOp = *CreatePassiveSellOfferOp + +func (v *CreatePassiveSellOfferOp) XdrPointer() interface{} { return v } +func (CreatePassiveSellOfferOp) XdrTypeName() string { return "CreatePassiveSellOfferOp" } +func (v CreatePassiveSellOfferOp) XdrValue() interface{} { return v } +func (v *CreatePassiveSellOfferOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *CreatePassiveSellOfferOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sselling", name), XDR_Asset(&v.Selling)) + x.Marshal(x.Sprintf("%sbuying", name), XDR_Asset(&v.Buying)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sprice", name), XDR_Price(&v.Price)) +} +func XDR_CreatePassiveSellOfferOp(v *CreatePassiveSellOfferOp) *CreatePassiveSellOfferOp { return v } + +type _XdrPtr_Uint32 struct { + p **Uint32 +} +type _ptrflag_Uint32 _XdrPtr_Uint32 + +func (v _ptrflag_Uint32) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_Uint32) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("Uint32 flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_Uint32) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_Uint32) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(Uint32) + } + default: + XdrPanic("*Uint32 present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_Uint32) XdrTypeName() string { return "Uint32?" } +func (v _ptrflag_Uint32) XdrPointer() interface{} { return nil } +func (v _ptrflag_Uint32) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_Uint32) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_Uint32) XdrBound() uint32 { return 1 } +func (v _XdrPtr_Uint32) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_Uint32) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(Uint32) + } +} +func (v _XdrPtr_Uint32) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_Uint32(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_Uint32) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_Uint32) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_Uint32(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_Uint32) XdrTypeName() string { return "Uint32*" } +func (v _XdrPtr_Uint32) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_Uint32) XdrValue() interface{} { return *v.p } + +type _XdrPtr_String32 struct { + p **String32 +} +type _ptrflag_String32 _XdrPtr_String32 + +func (v _ptrflag_String32) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_String32) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("String32 flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_String32) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_String32) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(String32) + } + default: + XdrPanic("*String32 present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_String32) XdrTypeName() string { return "String32?" } +func (v _ptrflag_String32) XdrPointer() interface{} { return nil } +func (v _ptrflag_String32) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_String32) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_String32) XdrBound() uint32 { return 1 } +func (v _XdrPtr_String32) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_String32) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(String32) + } +} +func (v _XdrPtr_String32) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_String32(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_String32) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_String32) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_String32(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_String32) XdrTypeName() string { return "String32*" } +func (v _XdrPtr_String32) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_String32) XdrValue() interface{} { return *v.p } + +type _XdrPtr_Signer struct { + p **Signer +} +type _ptrflag_Signer _XdrPtr_Signer + +func (v _ptrflag_Signer) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_Signer) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("Signer flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_Signer) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_Signer) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(Signer) + } + default: + XdrPanic("*Signer present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_Signer) XdrTypeName() string { return "Signer?" } +func (v _ptrflag_Signer) XdrPointer() interface{} { return nil } +func (v _ptrflag_Signer) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_Signer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_Signer) XdrBound() uint32 { return 1 } +func (v _XdrPtr_Signer) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_Signer) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(Signer) + } +} +func (v _XdrPtr_Signer) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_Signer(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_Signer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_Signer) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_Signer(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_Signer) XdrTypeName() string { return "Signer*" } +func (v _XdrPtr_Signer) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_Signer) XdrValue() interface{} { return *v.p } + +type XdrType_SetOptionsOp = *SetOptionsOp + +func (v *SetOptionsOp) XdrPointer() interface{} { return v } +func (SetOptionsOp) XdrTypeName() string { return "SetOptionsOp" } +func (v SetOptionsOp) XdrValue() interface{} { return v } +func (v *SetOptionsOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SetOptionsOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sinflationDest", name), _XdrPtr_AccountID{&v.InflationDest}) + x.Marshal(x.Sprintf("%sclearFlags", name), _XdrPtr_Uint32{&v.ClearFlags}) + x.Marshal(x.Sprintf("%ssetFlags", name), _XdrPtr_Uint32{&v.SetFlags}) + x.Marshal(x.Sprintf("%smasterWeight", name), _XdrPtr_Uint32{&v.MasterWeight}) + x.Marshal(x.Sprintf("%slowThreshold", name), _XdrPtr_Uint32{&v.LowThreshold}) + x.Marshal(x.Sprintf("%smedThreshold", name), _XdrPtr_Uint32{&v.MedThreshold}) + x.Marshal(x.Sprintf("%shighThreshold", name), _XdrPtr_Uint32{&v.HighThreshold}) + x.Marshal(x.Sprintf("%shomeDomain", name), _XdrPtr_String32{&v.HomeDomain}) + x.Marshal(x.Sprintf("%ssigner", name), _XdrPtr_Signer{&v.Signer}) +} +func XDR_SetOptionsOp(v *SetOptionsOp) *SetOptionsOp { return v } + +var _XdrTags_ChangeTrustAsset = map[int32]bool{ + XdrToI32(ASSET_TYPE_NATIVE): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM4): true, + XdrToI32(ASSET_TYPE_CREDIT_ALPHANUM12): true, + XdrToI32(ASSET_TYPE_POOL_SHARE): true, +} + +func (_ ChangeTrustAsset) XdrValidTags() map[int32]bool { + return _XdrTags_ChangeTrustAsset +} +func (u *ChangeTrustAsset) AlphaNum4() *AlphaNum4 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM4: + if v, ok := u._u.(*AlphaNum4); ok { + return v + } else { + var zero AlphaNum4 + u._u = &zero + return &zero + } + default: + XdrPanic("ChangeTrustAsset.AlphaNum4 accessed when Type == %v", u.Type) + return nil + } +} +func (u *ChangeTrustAsset) AlphaNum12() *AlphaNum12 { + switch u.Type { + case ASSET_TYPE_CREDIT_ALPHANUM12: + if v, ok := u._u.(*AlphaNum12); ok { + return v + } else { + var zero AlphaNum12 + u._u = &zero + return &zero + } + default: + XdrPanic("ChangeTrustAsset.AlphaNum12 accessed when Type == %v", u.Type) + return nil + } +} +func (u *ChangeTrustAsset) LiquidityPool() *LiquidityPoolParameters { + switch u.Type { + case ASSET_TYPE_POOL_SHARE: + if v, ok := u._u.(*LiquidityPoolParameters); ok { + return v + } else { + var zero LiquidityPoolParameters + u._u = &zero + return &zero + } + default: + XdrPanic("ChangeTrustAsset.LiquidityPool accessed when Type == %v", u.Type) + return nil + } +} +func (u ChangeTrustAsset) XdrValid() bool { + switch u.Type { + case ASSET_TYPE_NATIVE, ASSET_TYPE_CREDIT_ALPHANUM4, ASSET_TYPE_CREDIT_ALPHANUM12, ASSET_TYPE_POOL_SHARE: + return true + } + return false +} +func (u *ChangeTrustAsset) XdrUnionTag() XdrNum32 { + return XDR_AssetType(&u.Type) +} +func (u *ChangeTrustAsset) XdrUnionTagName() string { + return "Type" +} +func (u *ChangeTrustAsset) XdrUnionBody() XdrType { + switch u.Type { + case ASSET_TYPE_NATIVE: + return nil + case ASSET_TYPE_CREDIT_ALPHANUM4: + return XDR_AlphaNum4(u.AlphaNum4()) + case ASSET_TYPE_CREDIT_ALPHANUM12: + return XDR_AlphaNum12(u.AlphaNum12()) + case ASSET_TYPE_POOL_SHARE: + return XDR_LiquidityPoolParameters(u.LiquidityPool()) + } + return nil +} +func (u *ChangeTrustAsset) XdrUnionBodyName() string { + switch u.Type { + case ASSET_TYPE_NATIVE: + return "" + case ASSET_TYPE_CREDIT_ALPHANUM4: + return "AlphaNum4" + case ASSET_TYPE_CREDIT_ALPHANUM12: + return "AlphaNum12" + case ASSET_TYPE_POOL_SHARE: + return "LiquidityPool" + } + return "" +} + +type XdrType_ChangeTrustAsset = *ChangeTrustAsset + +func (v *ChangeTrustAsset) XdrPointer() interface{} { return v } +func (ChangeTrustAsset) XdrTypeName() string { return "ChangeTrustAsset" } +func (v ChangeTrustAsset) XdrValue() interface{} { return v } +func (v *ChangeTrustAsset) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ChangeTrustAsset) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AssetType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ASSET_TYPE_NATIVE: + return + case ASSET_TYPE_CREDIT_ALPHANUM4: + x.Marshal(x.Sprintf("%salphaNum4", name), XDR_AlphaNum4(u.AlphaNum4())) + return + case ASSET_TYPE_CREDIT_ALPHANUM12: + x.Marshal(x.Sprintf("%salphaNum12", name), XDR_AlphaNum12(u.AlphaNum12())) + return + case ASSET_TYPE_POOL_SHARE: + x.Marshal(x.Sprintf("%sliquidityPool", name), XDR_LiquidityPoolParameters(u.LiquidityPool())) + return + } + XdrPanic("invalid Type (%v) in ChangeTrustAsset", u.Type) +} +func XDR_ChangeTrustAsset(v *ChangeTrustAsset) *ChangeTrustAsset { return v } + +type XdrType_ChangeTrustOp = *ChangeTrustOp + +func (v *ChangeTrustOp) XdrPointer() interface{} { return v } +func (ChangeTrustOp) XdrTypeName() string { return "ChangeTrustOp" } +func (v ChangeTrustOp) XdrValue() interface{} { return v } +func (v *ChangeTrustOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ChangeTrustOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sline", name), XDR_ChangeTrustAsset(&v.Line)) + x.Marshal(x.Sprintf("%slimit", name), XDR_Int64(&v.Limit)) +} +func XDR_ChangeTrustOp(v *ChangeTrustOp) *ChangeTrustOp { return v } + +type XdrType_AllowTrustOp = *AllowTrustOp + +func (v *AllowTrustOp) XdrPointer() interface{} { return v } +func (AllowTrustOp) XdrTypeName() string { return "AllowTrustOp" } +func (v AllowTrustOp) XdrValue() interface{} { return v } +func (v *AllowTrustOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *AllowTrustOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%strustor", name), XDR_AccountID(&v.Trustor)) + x.Marshal(x.Sprintf("%sasset", name), XDR_AssetCode(&v.Asset)) + x.Marshal(x.Sprintf("%sauthorize", name), XDR_Uint32(&v.Authorize)) +} +func XDR_AllowTrustOp(v *AllowTrustOp) *AllowTrustOp { return v } + +type _XdrPtr_DataValue struct { + p **DataValue +} +type _ptrflag_DataValue _XdrPtr_DataValue + +func (v _ptrflag_DataValue) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_DataValue) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("DataValue flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_DataValue) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_DataValue) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(DataValue) + } + default: + XdrPanic("*DataValue present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_DataValue) XdrTypeName() string { return "DataValue?" } +func (v _ptrflag_DataValue) XdrPointer() interface{} { return nil } +func (v _ptrflag_DataValue) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_DataValue) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_DataValue) XdrBound() uint32 { return 1 } +func (v _XdrPtr_DataValue) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_DataValue) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(DataValue) + } +} +func (v _XdrPtr_DataValue) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_DataValue(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_DataValue) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_DataValue) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_DataValue(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_DataValue) XdrTypeName() string { return "DataValue*" } +func (v _XdrPtr_DataValue) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_DataValue) XdrValue() interface{} { return *v.p } + +type XdrType_ManageDataOp = *ManageDataOp + +func (v *ManageDataOp) XdrPointer() interface{} { return v } +func (ManageDataOp) XdrTypeName() string { return "ManageDataOp" } +func (v ManageDataOp) XdrValue() interface{} { return v } +func (v *ManageDataOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ManageDataOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdataName", name), XDR_String64(&v.DataName)) + x.Marshal(x.Sprintf("%sdataValue", name), _XdrPtr_DataValue{&v.DataValue}) +} +func XDR_ManageDataOp(v *ManageDataOp) *ManageDataOp { return v } + +type XdrType_BumpSequenceOp = *BumpSequenceOp + +func (v *BumpSequenceOp) XdrPointer() interface{} { return v } +func (BumpSequenceOp) XdrTypeName() string { return "BumpSequenceOp" } +func (v BumpSequenceOp) XdrValue() interface{} { return v } +func (v *BumpSequenceOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *BumpSequenceOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbumpTo", name), XDR_SequenceNumber(&v.BumpTo)) +} +func XDR_BumpSequenceOp(v *BumpSequenceOp) *BumpSequenceOp { return v } + +type XdrType_CreateClaimableBalanceOp = *CreateClaimableBalanceOp + +func (v *CreateClaimableBalanceOp) XdrPointer() interface{} { return v } +func (CreateClaimableBalanceOp) XdrTypeName() string { return "CreateClaimableBalanceOp" } +func (v CreateClaimableBalanceOp) XdrValue() interface{} { return v } +func (v *CreateClaimableBalanceOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *CreateClaimableBalanceOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sclaimants", name), (*_XdrVec_10_Claimant)(&v.Claimants)) +} +func XDR_CreateClaimableBalanceOp(v *CreateClaimableBalanceOp) *CreateClaimableBalanceOp { return v } + +type XdrType_ClaimClaimableBalanceOp = *ClaimClaimableBalanceOp + +func (v *ClaimClaimableBalanceOp) XdrPointer() interface{} { return v } +func (ClaimClaimableBalanceOp) XdrTypeName() string { return "ClaimClaimableBalanceOp" } +func (v ClaimClaimableBalanceOp) XdrValue() interface{} { return v } +func (v *ClaimClaimableBalanceOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimClaimableBalanceOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbalanceID", name), XDR_ClaimableBalanceID(&v.BalanceID)) +} +func XDR_ClaimClaimableBalanceOp(v *ClaimClaimableBalanceOp) *ClaimClaimableBalanceOp { return v } + +type XdrType_BeginSponsoringFutureReservesOp = *BeginSponsoringFutureReservesOp + +func (v *BeginSponsoringFutureReservesOp) XdrPointer() interface{} { return v } +func (BeginSponsoringFutureReservesOp) XdrTypeName() string { return "BeginSponsoringFutureReservesOp" } +func (v BeginSponsoringFutureReservesOp) XdrValue() interface{} { return v } +func (v *BeginSponsoringFutureReservesOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *BeginSponsoringFutureReservesOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssponsoredID", name), XDR_AccountID(&v.SponsoredID)) +} +func XDR_BeginSponsoringFutureReservesOp(v *BeginSponsoringFutureReservesOp) *BeginSponsoringFutureReservesOp { + return v +} + +var _XdrNames_RevokeSponsorshipType = map[int32]string{ + int32(REVOKE_SPONSORSHIP_LEDGER_ENTRY): "REVOKE_SPONSORSHIP_LEDGER_ENTRY", + int32(REVOKE_SPONSORSHIP_SIGNER): "REVOKE_SPONSORSHIP_SIGNER", +} +var _XdrValues_RevokeSponsorshipType = map[string]int32{ + "REVOKE_SPONSORSHIP_LEDGER_ENTRY": int32(REVOKE_SPONSORSHIP_LEDGER_ENTRY), + "REVOKE_SPONSORSHIP_SIGNER": int32(REVOKE_SPONSORSHIP_SIGNER), +} + +func (RevokeSponsorshipType) XdrEnumNames() map[int32]string { + return _XdrNames_RevokeSponsorshipType +} +func (v RevokeSponsorshipType) String() string { + if s, ok := _XdrNames_RevokeSponsorshipType[int32(v)]; ok { + return s + } + return fmt.Sprintf("RevokeSponsorshipType#%d", v) +} +func (v *RevokeSponsorshipType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_RevokeSponsorshipType[stok]; ok { + *v = RevokeSponsorshipType(val) + return nil + } else if stok == "RevokeSponsorshipType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid RevokeSponsorshipType.", stok)) + } +} +func (v RevokeSponsorshipType) GetU32() uint32 { return uint32(v) } +func (v *RevokeSponsorshipType) SetU32(n uint32) { *v = RevokeSponsorshipType(n) } +func (v *RevokeSponsorshipType) XdrPointer() interface{} { return v } +func (RevokeSponsorshipType) XdrTypeName() string { return "RevokeSponsorshipType" } +func (v RevokeSponsorshipType) XdrValue() interface{} { return v } +func (v *RevokeSponsorshipType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_RevokeSponsorshipType = *RevokeSponsorshipType + +func XDR_RevokeSponsorshipType(v *RevokeSponsorshipType) *RevokeSponsorshipType { return v } + +type XdrType_XdrAnon_RevokeSponsorshipOp_Signer = *XdrAnon_RevokeSponsorshipOp_Signer + +func (v *XdrAnon_RevokeSponsorshipOp_Signer) XdrPointer() interface{} { return v } +func (XdrAnon_RevokeSponsorshipOp_Signer) XdrTypeName() string { + return "XdrAnon_RevokeSponsorshipOp_Signer" +} +func (v XdrAnon_RevokeSponsorshipOp_Signer) XdrValue() interface{} { return v } +func (v *XdrAnon_RevokeSponsorshipOp_Signer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_RevokeSponsorshipOp_Signer) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%saccountID", name), XDR_AccountID(&v.AccountID)) + x.Marshal(x.Sprintf("%ssignerKey", name), XDR_SignerKey(&v.SignerKey)) +} +func XDR_XdrAnon_RevokeSponsorshipOp_Signer(v *XdrAnon_RevokeSponsorshipOp_Signer) *XdrAnon_RevokeSponsorshipOp_Signer { + return v +} + +var _XdrTags_RevokeSponsorshipOp = map[int32]bool{ + XdrToI32(REVOKE_SPONSORSHIP_LEDGER_ENTRY): true, + XdrToI32(REVOKE_SPONSORSHIP_SIGNER): true, +} + +func (_ RevokeSponsorshipOp) XdrValidTags() map[int32]bool { + return _XdrTags_RevokeSponsorshipOp +} +func (u *RevokeSponsorshipOp) LedgerKey() *LedgerKey { + switch u.Type { + case REVOKE_SPONSORSHIP_LEDGER_ENTRY: + if v, ok := u._u.(*LedgerKey); ok { + return v + } else { + var zero LedgerKey + u._u = &zero + return &zero + } + default: + XdrPanic("RevokeSponsorshipOp.LedgerKey accessed when Type == %v", u.Type) + return nil + } +} +func (u *RevokeSponsorshipOp) Signer() *XdrAnon_RevokeSponsorshipOp_Signer { + switch u.Type { + case REVOKE_SPONSORSHIP_SIGNER: + if v, ok := u._u.(*XdrAnon_RevokeSponsorshipOp_Signer); ok { + return v + } else { + var zero XdrAnon_RevokeSponsorshipOp_Signer + u._u = &zero + return &zero + } + default: + XdrPanic("RevokeSponsorshipOp.Signer accessed when Type == %v", u.Type) + return nil + } +} +func (u RevokeSponsorshipOp) XdrValid() bool { + switch u.Type { + case REVOKE_SPONSORSHIP_LEDGER_ENTRY, REVOKE_SPONSORSHIP_SIGNER: + return true + } + return false +} +func (u *RevokeSponsorshipOp) XdrUnionTag() XdrNum32 { + return XDR_RevokeSponsorshipType(&u.Type) +} +func (u *RevokeSponsorshipOp) XdrUnionTagName() string { + return "Type" +} +func (u *RevokeSponsorshipOp) XdrUnionBody() XdrType { + switch u.Type { + case REVOKE_SPONSORSHIP_LEDGER_ENTRY: + return XDR_LedgerKey(u.LedgerKey()) + case REVOKE_SPONSORSHIP_SIGNER: + return XDR_XdrAnon_RevokeSponsorshipOp_Signer(u.Signer()) + } + return nil +} +func (u *RevokeSponsorshipOp) XdrUnionBodyName() string { + switch u.Type { + case REVOKE_SPONSORSHIP_LEDGER_ENTRY: + return "LedgerKey" + case REVOKE_SPONSORSHIP_SIGNER: + return "Signer" + } + return "" +} + +type XdrType_RevokeSponsorshipOp = *RevokeSponsorshipOp + +func (v *RevokeSponsorshipOp) XdrPointer() interface{} { return v } +func (RevokeSponsorshipOp) XdrTypeName() string { return "RevokeSponsorshipOp" } +func (v RevokeSponsorshipOp) XdrValue() interface{} { return v } +func (v *RevokeSponsorshipOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *RevokeSponsorshipOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_RevokeSponsorshipType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case REVOKE_SPONSORSHIP_LEDGER_ENTRY: + x.Marshal(x.Sprintf("%sledgerKey", name), XDR_LedgerKey(u.LedgerKey())) + return + case REVOKE_SPONSORSHIP_SIGNER: + x.Marshal(x.Sprintf("%ssigner", name), XDR_XdrAnon_RevokeSponsorshipOp_Signer(u.Signer())) + return + } + XdrPanic("invalid Type (%v) in RevokeSponsorshipOp", u.Type) +} +func XDR_RevokeSponsorshipOp(v *RevokeSponsorshipOp) *RevokeSponsorshipOp { return v } + +type XdrType_ClawbackOp = *ClawbackOp + +func (v *ClawbackOp) XdrPointer() interface{} { return v } +func (ClawbackOp) XdrTypeName() string { return "ClawbackOp" } +func (v ClawbackOp) XdrValue() interface{} { return v } +func (v *ClawbackOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClawbackOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%sfrom", name), XDR_MuxedAccount(&v.From)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) +} +func XDR_ClawbackOp(v *ClawbackOp) *ClawbackOp { return v } + +type XdrType_ClawbackClaimableBalanceOp = *ClawbackClaimableBalanceOp + +func (v *ClawbackClaimableBalanceOp) XdrPointer() interface{} { return v } +func (ClawbackClaimableBalanceOp) XdrTypeName() string { return "ClawbackClaimableBalanceOp" } +func (v ClawbackClaimableBalanceOp) XdrValue() interface{} { return v } +func (v *ClawbackClaimableBalanceOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClawbackClaimableBalanceOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sbalanceID", name), XDR_ClaimableBalanceID(&v.BalanceID)) +} +func XDR_ClawbackClaimableBalanceOp(v *ClawbackClaimableBalanceOp) *ClawbackClaimableBalanceOp { + return v +} + +type XdrType_SetTrustLineFlagsOp = *SetTrustLineFlagsOp + +func (v *SetTrustLineFlagsOp) XdrPointer() interface{} { return v } +func (SetTrustLineFlagsOp) XdrTypeName() string { return "SetTrustLineFlagsOp" } +func (v SetTrustLineFlagsOp) XdrValue() interface{} { return v } +func (v *SetTrustLineFlagsOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SetTrustLineFlagsOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%strustor", name), XDR_AccountID(&v.Trustor)) + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%sclearFlags", name), XDR_Uint32(&v.ClearFlags)) + x.Marshal(x.Sprintf("%ssetFlags", name), XDR_Uint32(&v.SetFlags)) +} +func XDR_SetTrustLineFlagsOp(v *SetTrustLineFlagsOp) *SetTrustLineFlagsOp { return v } + +type XdrType_LiquidityPoolDepositOp = *LiquidityPoolDepositOp + +func (v *LiquidityPoolDepositOp) XdrPointer() interface{} { return v } +func (LiquidityPoolDepositOp) XdrTypeName() string { return "LiquidityPoolDepositOp" } +func (v LiquidityPoolDepositOp) XdrValue() interface{} { return v } +func (v *LiquidityPoolDepositOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LiquidityPoolDepositOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) + x.Marshal(x.Sprintf("%smaxAmountA", name), XDR_Int64(&v.MaxAmountA)) + x.Marshal(x.Sprintf("%smaxAmountB", name), XDR_Int64(&v.MaxAmountB)) + x.Marshal(x.Sprintf("%sminPrice", name), XDR_Price(&v.MinPrice)) + x.Marshal(x.Sprintf("%smaxPrice", name), XDR_Price(&v.MaxPrice)) +} +func XDR_LiquidityPoolDepositOp(v *LiquidityPoolDepositOp) *LiquidityPoolDepositOp { return v } + +type XdrType_LiquidityPoolWithdrawOp = *LiquidityPoolWithdrawOp + +func (v *LiquidityPoolWithdrawOp) XdrPointer() interface{} { return v } +func (LiquidityPoolWithdrawOp) XdrTypeName() string { return "LiquidityPoolWithdrawOp" } +func (v LiquidityPoolWithdrawOp) XdrValue() interface{} { return v } +func (v *LiquidityPoolWithdrawOp) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *LiquidityPoolWithdrawOp) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) + x.Marshal(x.Sprintf("%sminAmountA", name), XDR_Int64(&v.MinAmountA)) + x.Marshal(x.Sprintf("%sminAmountB", name), XDR_Int64(&v.MinAmountB)) +} +func XDR_LiquidityPoolWithdrawOp(v *LiquidityPoolWithdrawOp) *LiquidityPoolWithdrawOp { return v } + +var _XdrTags_XdrAnon_Operation_Body = map[int32]bool{ + XdrToI32(CREATE_ACCOUNT): true, + XdrToI32(PAYMENT): true, + XdrToI32(PATH_PAYMENT_STRICT_RECEIVE): true, + XdrToI32(MANAGE_SELL_OFFER): true, + XdrToI32(CREATE_PASSIVE_SELL_OFFER): true, + XdrToI32(SET_OPTIONS): true, + XdrToI32(CHANGE_TRUST): true, + XdrToI32(ALLOW_TRUST): true, + XdrToI32(ACCOUNT_MERGE): true, + XdrToI32(INFLATION): true, + XdrToI32(MANAGE_DATA): true, + XdrToI32(BUMP_SEQUENCE): true, + XdrToI32(MANAGE_BUY_OFFER): true, + XdrToI32(PATH_PAYMENT_STRICT_SEND): true, + XdrToI32(CREATE_CLAIMABLE_BALANCE): true, + XdrToI32(CLAIM_CLAIMABLE_BALANCE): true, + XdrToI32(BEGIN_SPONSORING_FUTURE_RESERVES): true, + XdrToI32(END_SPONSORING_FUTURE_RESERVES): true, + XdrToI32(REVOKE_SPONSORSHIP): true, + XdrToI32(CLAWBACK): true, + XdrToI32(CLAWBACK_CLAIMABLE_BALANCE): true, + XdrToI32(SET_TRUST_LINE_FLAGS): true, + XdrToI32(LIQUIDITY_POOL_DEPOSIT): true, + XdrToI32(LIQUIDITY_POOL_WITHDRAW): true, +} + +func (_ XdrAnon_Operation_Body) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_Operation_Body +} +func (u *XdrAnon_Operation_Body) CreateAccountOp() *CreateAccountOp { + switch u.Type { + case CREATE_ACCOUNT: + if v, ok := u._u.(*CreateAccountOp); ok { + return v + } else { + var zero CreateAccountOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.CreateAccountOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) PaymentOp() *PaymentOp { + switch u.Type { + case PAYMENT: + if v, ok := u._u.(*PaymentOp); ok { + return v + } else { + var zero PaymentOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.PaymentOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) PathPaymentStrictReceiveOp() *PathPaymentStrictReceiveOp { + switch u.Type { + case PATH_PAYMENT_STRICT_RECEIVE: + if v, ok := u._u.(*PathPaymentStrictReceiveOp); ok { + return v + } else { + var zero PathPaymentStrictReceiveOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.PathPaymentStrictReceiveOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ManageSellOfferOp() *ManageSellOfferOp { + switch u.Type { + case MANAGE_SELL_OFFER: + if v, ok := u._u.(*ManageSellOfferOp); ok { + return v + } else { + var zero ManageSellOfferOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ManageSellOfferOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) CreatePassiveSellOfferOp() *CreatePassiveSellOfferOp { + switch u.Type { + case CREATE_PASSIVE_SELL_OFFER: + if v, ok := u._u.(*CreatePassiveSellOfferOp); ok { + return v + } else { + var zero CreatePassiveSellOfferOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.CreatePassiveSellOfferOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) SetOptionsOp() *SetOptionsOp { + switch u.Type { + case SET_OPTIONS: + if v, ok := u._u.(*SetOptionsOp); ok { + return v + } else { + var zero SetOptionsOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.SetOptionsOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ChangeTrustOp() *ChangeTrustOp { + switch u.Type { + case CHANGE_TRUST: + if v, ok := u._u.(*ChangeTrustOp); ok { + return v + } else { + var zero ChangeTrustOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ChangeTrustOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) AllowTrustOp() *AllowTrustOp { + switch u.Type { + case ALLOW_TRUST: + if v, ok := u._u.(*AllowTrustOp); ok { + return v + } else { + var zero AllowTrustOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.AllowTrustOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) Destination() *MuxedAccount { + switch u.Type { + case ACCOUNT_MERGE: + if v, ok := u._u.(*MuxedAccount); ok { + return v + } else { + var zero MuxedAccount + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.Destination accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ManageDataOp() *ManageDataOp { + switch u.Type { + case MANAGE_DATA: + if v, ok := u._u.(*ManageDataOp); ok { + return v + } else { + var zero ManageDataOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ManageDataOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) BumpSequenceOp() *BumpSequenceOp { + switch u.Type { + case BUMP_SEQUENCE: + if v, ok := u._u.(*BumpSequenceOp); ok { + return v + } else { + var zero BumpSequenceOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.BumpSequenceOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ManageBuyOfferOp() *ManageBuyOfferOp { + switch u.Type { + case MANAGE_BUY_OFFER: + if v, ok := u._u.(*ManageBuyOfferOp); ok { + return v + } else { + var zero ManageBuyOfferOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ManageBuyOfferOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) PathPaymentStrictSendOp() *PathPaymentStrictSendOp { + switch u.Type { + case PATH_PAYMENT_STRICT_SEND: + if v, ok := u._u.(*PathPaymentStrictSendOp); ok { + return v + } else { + var zero PathPaymentStrictSendOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.PathPaymentStrictSendOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) CreateClaimableBalanceOp() *CreateClaimableBalanceOp { + switch u.Type { + case CREATE_CLAIMABLE_BALANCE: + if v, ok := u._u.(*CreateClaimableBalanceOp); ok { + return v + } else { + var zero CreateClaimableBalanceOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.CreateClaimableBalanceOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ClaimClaimableBalanceOp() *ClaimClaimableBalanceOp { + switch u.Type { + case CLAIM_CLAIMABLE_BALANCE: + if v, ok := u._u.(*ClaimClaimableBalanceOp); ok { + return v + } else { + var zero ClaimClaimableBalanceOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ClaimClaimableBalanceOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) BeginSponsoringFutureReservesOp() *BeginSponsoringFutureReservesOp { + switch u.Type { + case BEGIN_SPONSORING_FUTURE_RESERVES: + if v, ok := u._u.(*BeginSponsoringFutureReservesOp); ok { + return v + } else { + var zero BeginSponsoringFutureReservesOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.BeginSponsoringFutureReservesOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) RevokeSponsorshipOp() *RevokeSponsorshipOp { + switch u.Type { + case REVOKE_SPONSORSHIP: + if v, ok := u._u.(*RevokeSponsorshipOp); ok { + return v + } else { + var zero RevokeSponsorshipOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.RevokeSponsorshipOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ClawbackOp() *ClawbackOp { + switch u.Type { + case CLAWBACK: + if v, ok := u._u.(*ClawbackOp); ok { + return v + } else { + var zero ClawbackOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ClawbackOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) ClawbackClaimableBalanceOp() *ClawbackClaimableBalanceOp { + switch u.Type { + case CLAWBACK_CLAIMABLE_BALANCE: + if v, ok := u._u.(*ClawbackClaimableBalanceOp); ok { + return v + } else { + var zero ClawbackClaimableBalanceOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.ClawbackClaimableBalanceOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) SetTrustLineFlagsOp() *SetTrustLineFlagsOp { + switch u.Type { + case SET_TRUST_LINE_FLAGS: + if v, ok := u._u.(*SetTrustLineFlagsOp); ok { + return v + } else { + var zero SetTrustLineFlagsOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.SetTrustLineFlagsOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) LiquidityPoolDepositOp() *LiquidityPoolDepositOp { + switch u.Type { + case LIQUIDITY_POOL_DEPOSIT: + if v, ok := u._u.(*LiquidityPoolDepositOp); ok { + return v + } else { + var zero LiquidityPoolDepositOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.LiquidityPoolDepositOp accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_Operation_Body) LiquidityPoolWithdrawOp() *LiquidityPoolWithdrawOp { + switch u.Type { + case LIQUIDITY_POOL_WITHDRAW: + if v, ok := u._u.(*LiquidityPoolWithdrawOp); ok { + return v + } else { + var zero LiquidityPoolWithdrawOp + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_Operation_Body.LiquidityPoolWithdrawOp accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_Operation_Body) XdrValid() bool { + switch u.Type { + case CREATE_ACCOUNT, PAYMENT, PATH_PAYMENT_STRICT_RECEIVE, MANAGE_SELL_OFFER, CREATE_PASSIVE_SELL_OFFER, SET_OPTIONS, CHANGE_TRUST, ALLOW_TRUST, ACCOUNT_MERGE, INFLATION, MANAGE_DATA, BUMP_SEQUENCE, MANAGE_BUY_OFFER, PATH_PAYMENT_STRICT_SEND, CREATE_CLAIMABLE_BALANCE, CLAIM_CLAIMABLE_BALANCE, BEGIN_SPONSORING_FUTURE_RESERVES, END_SPONSORING_FUTURE_RESERVES, REVOKE_SPONSORSHIP, CLAWBACK, CLAWBACK_CLAIMABLE_BALANCE, SET_TRUST_LINE_FLAGS, LIQUIDITY_POOL_DEPOSIT, LIQUIDITY_POOL_WITHDRAW: + return true + } + return false +} +func (u *XdrAnon_Operation_Body) XdrUnionTag() XdrNum32 { + return XDR_OperationType(&u.Type) +} +func (u *XdrAnon_Operation_Body) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_Operation_Body) XdrUnionBody() XdrType { + switch u.Type { + case CREATE_ACCOUNT: + return XDR_CreateAccountOp(u.CreateAccountOp()) + case PAYMENT: + return XDR_PaymentOp(u.PaymentOp()) + case PATH_PAYMENT_STRICT_RECEIVE: + return XDR_PathPaymentStrictReceiveOp(u.PathPaymentStrictReceiveOp()) + case MANAGE_SELL_OFFER: + return XDR_ManageSellOfferOp(u.ManageSellOfferOp()) + case CREATE_PASSIVE_SELL_OFFER: + return XDR_CreatePassiveSellOfferOp(u.CreatePassiveSellOfferOp()) + case SET_OPTIONS: + return XDR_SetOptionsOp(u.SetOptionsOp()) + case CHANGE_TRUST: + return XDR_ChangeTrustOp(u.ChangeTrustOp()) + case ALLOW_TRUST: + return XDR_AllowTrustOp(u.AllowTrustOp()) + case ACCOUNT_MERGE: + return XDR_MuxedAccount(u.Destination()) + case INFLATION: + return nil + case MANAGE_DATA: + return XDR_ManageDataOp(u.ManageDataOp()) + case BUMP_SEQUENCE: + return XDR_BumpSequenceOp(u.BumpSequenceOp()) + case MANAGE_BUY_OFFER: + return XDR_ManageBuyOfferOp(u.ManageBuyOfferOp()) + case PATH_PAYMENT_STRICT_SEND: + return XDR_PathPaymentStrictSendOp(u.PathPaymentStrictSendOp()) + case CREATE_CLAIMABLE_BALANCE: + return XDR_CreateClaimableBalanceOp(u.CreateClaimableBalanceOp()) + case CLAIM_CLAIMABLE_BALANCE: + return XDR_ClaimClaimableBalanceOp(u.ClaimClaimableBalanceOp()) + case BEGIN_SPONSORING_FUTURE_RESERVES: + return XDR_BeginSponsoringFutureReservesOp(u.BeginSponsoringFutureReservesOp()) + case END_SPONSORING_FUTURE_RESERVES: + return nil + case REVOKE_SPONSORSHIP: + return XDR_RevokeSponsorshipOp(u.RevokeSponsorshipOp()) + case CLAWBACK: + return XDR_ClawbackOp(u.ClawbackOp()) + case CLAWBACK_CLAIMABLE_BALANCE: + return XDR_ClawbackClaimableBalanceOp(u.ClawbackClaimableBalanceOp()) + case SET_TRUST_LINE_FLAGS: + return XDR_SetTrustLineFlagsOp(u.SetTrustLineFlagsOp()) + case LIQUIDITY_POOL_DEPOSIT: + return XDR_LiquidityPoolDepositOp(u.LiquidityPoolDepositOp()) + case LIQUIDITY_POOL_WITHDRAW: + return XDR_LiquidityPoolWithdrawOp(u.LiquidityPoolWithdrawOp()) + } + return nil +} +func (u *XdrAnon_Operation_Body) XdrUnionBodyName() string { + switch u.Type { + case CREATE_ACCOUNT: + return "CreateAccountOp" + case PAYMENT: + return "PaymentOp" + case PATH_PAYMENT_STRICT_RECEIVE: + return "PathPaymentStrictReceiveOp" + case MANAGE_SELL_OFFER: + return "ManageSellOfferOp" + case CREATE_PASSIVE_SELL_OFFER: + return "CreatePassiveSellOfferOp" + case SET_OPTIONS: + return "SetOptionsOp" + case CHANGE_TRUST: + return "ChangeTrustOp" + case ALLOW_TRUST: + return "AllowTrustOp" + case ACCOUNT_MERGE: + return "Destination" + case INFLATION: + return "" + case MANAGE_DATA: + return "ManageDataOp" + case BUMP_SEQUENCE: + return "BumpSequenceOp" + case MANAGE_BUY_OFFER: + return "ManageBuyOfferOp" + case PATH_PAYMENT_STRICT_SEND: + return "PathPaymentStrictSendOp" + case CREATE_CLAIMABLE_BALANCE: + return "CreateClaimableBalanceOp" + case CLAIM_CLAIMABLE_BALANCE: + return "ClaimClaimableBalanceOp" + case BEGIN_SPONSORING_FUTURE_RESERVES: + return "BeginSponsoringFutureReservesOp" + case END_SPONSORING_FUTURE_RESERVES: + return "" + case REVOKE_SPONSORSHIP: + return "RevokeSponsorshipOp" + case CLAWBACK: + return "ClawbackOp" + case CLAWBACK_CLAIMABLE_BALANCE: + return "ClawbackClaimableBalanceOp" + case SET_TRUST_LINE_FLAGS: + return "SetTrustLineFlagsOp" + case LIQUIDITY_POOL_DEPOSIT: + return "LiquidityPoolDepositOp" + case LIQUIDITY_POOL_WITHDRAW: + return "LiquidityPoolWithdrawOp" + } + return "" +} + +type XdrType_XdrAnon_Operation_Body = *XdrAnon_Operation_Body + +func (v *XdrAnon_Operation_Body) XdrPointer() interface{} { return v } +func (XdrAnon_Operation_Body) XdrTypeName() string { return "XdrAnon_Operation_Body" } +func (v XdrAnon_Operation_Body) XdrValue() interface{} { return v } +func (v *XdrAnon_Operation_Body) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_Operation_Body) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_OperationType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CREATE_ACCOUNT: + x.Marshal(x.Sprintf("%screateAccountOp", name), XDR_CreateAccountOp(u.CreateAccountOp())) + return + case PAYMENT: + x.Marshal(x.Sprintf("%spaymentOp", name), XDR_PaymentOp(u.PaymentOp())) + return + case PATH_PAYMENT_STRICT_RECEIVE: + x.Marshal(x.Sprintf("%spathPaymentStrictReceiveOp", name), XDR_PathPaymentStrictReceiveOp(u.PathPaymentStrictReceiveOp())) + return + case MANAGE_SELL_OFFER: + x.Marshal(x.Sprintf("%smanageSellOfferOp", name), XDR_ManageSellOfferOp(u.ManageSellOfferOp())) + return + case CREATE_PASSIVE_SELL_OFFER: + x.Marshal(x.Sprintf("%screatePassiveSellOfferOp", name), XDR_CreatePassiveSellOfferOp(u.CreatePassiveSellOfferOp())) + return + case SET_OPTIONS: + x.Marshal(x.Sprintf("%ssetOptionsOp", name), XDR_SetOptionsOp(u.SetOptionsOp())) + return + case CHANGE_TRUST: + x.Marshal(x.Sprintf("%schangeTrustOp", name), XDR_ChangeTrustOp(u.ChangeTrustOp())) + return + case ALLOW_TRUST: + x.Marshal(x.Sprintf("%sallowTrustOp", name), XDR_AllowTrustOp(u.AllowTrustOp())) + return + case ACCOUNT_MERGE: + x.Marshal(x.Sprintf("%sdestination", name), XDR_MuxedAccount(u.Destination())) + return + case INFLATION: + return + case MANAGE_DATA: + x.Marshal(x.Sprintf("%smanageDataOp", name), XDR_ManageDataOp(u.ManageDataOp())) + return + case BUMP_SEQUENCE: + x.Marshal(x.Sprintf("%sbumpSequenceOp", name), XDR_BumpSequenceOp(u.BumpSequenceOp())) + return + case MANAGE_BUY_OFFER: + x.Marshal(x.Sprintf("%smanageBuyOfferOp", name), XDR_ManageBuyOfferOp(u.ManageBuyOfferOp())) + return + case PATH_PAYMENT_STRICT_SEND: + x.Marshal(x.Sprintf("%spathPaymentStrictSendOp", name), XDR_PathPaymentStrictSendOp(u.PathPaymentStrictSendOp())) + return + case CREATE_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%screateClaimableBalanceOp", name), XDR_CreateClaimableBalanceOp(u.CreateClaimableBalanceOp())) + return + case CLAIM_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclaimClaimableBalanceOp", name), XDR_ClaimClaimableBalanceOp(u.ClaimClaimableBalanceOp())) + return + case BEGIN_SPONSORING_FUTURE_RESERVES: + x.Marshal(x.Sprintf("%sbeginSponsoringFutureReservesOp", name), XDR_BeginSponsoringFutureReservesOp(u.BeginSponsoringFutureReservesOp())) + return + case END_SPONSORING_FUTURE_RESERVES: + return + case REVOKE_SPONSORSHIP: + x.Marshal(x.Sprintf("%srevokeSponsorshipOp", name), XDR_RevokeSponsorshipOp(u.RevokeSponsorshipOp())) + return + case CLAWBACK: + x.Marshal(x.Sprintf("%sclawbackOp", name), XDR_ClawbackOp(u.ClawbackOp())) + return + case CLAWBACK_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclawbackClaimableBalanceOp", name), XDR_ClawbackClaimableBalanceOp(u.ClawbackClaimableBalanceOp())) + return + case SET_TRUST_LINE_FLAGS: + x.Marshal(x.Sprintf("%ssetTrustLineFlagsOp", name), XDR_SetTrustLineFlagsOp(u.SetTrustLineFlagsOp())) + return + case LIQUIDITY_POOL_DEPOSIT: + x.Marshal(x.Sprintf("%sliquidityPoolDepositOp", name), XDR_LiquidityPoolDepositOp(u.LiquidityPoolDepositOp())) + return + case LIQUIDITY_POOL_WITHDRAW: + x.Marshal(x.Sprintf("%sliquidityPoolWithdrawOp", name), XDR_LiquidityPoolWithdrawOp(u.LiquidityPoolWithdrawOp())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_Operation_Body", u.Type) +} +func XDR_XdrAnon_Operation_Body(v *XdrAnon_Operation_Body) *XdrAnon_Operation_Body { return v } + +type _XdrPtr_MuxedAccount struct { + p **MuxedAccount +} +type _ptrflag_MuxedAccount _XdrPtr_MuxedAccount + +func (v _ptrflag_MuxedAccount) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_MuxedAccount) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("MuxedAccount flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_MuxedAccount) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_MuxedAccount) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(MuxedAccount) + } + default: + XdrPanic("*MuxedAccount present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_MuxedAccount) XdrTypeName() string { return "MuxedAccount?" } +func (v _ptrflag_MuxedAccount) XdrPointer() interface{} { return nil } +func (v _ptrflag_MuxedAccount) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_MuxedAccount) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_MuxedAccount) XdrBound() uint32 { return 1 } +func (v _XdrPtr_MuxedAccount) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_MuxedAccount) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(MuxedAccount) + } +} +func (v _XdrPtr_MuxedAccount) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_MuxedAccount(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_MuxedAccount) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_MuxedAccount) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_MuxedAccount(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_MuxedAccount) XdrTypeName() string { return "MuxedAccount*" } +func (v _XdrPtr_MuxedAccount) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_MuxedAccount) XdrValue() interface{} { return *v.p } + +type XdrType_Operation = *Operation + +func (v *Operation) XdrPointer() interface{} { return v } +func (Operation) XdrTypeName() string { return "Operation" } +func (v Operation) XdrValue() interface{} { return v } +func (v *Operation) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Operation) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssourceAccount", name), _XdrPtr_MuxedAccount{&v.SourceAccount}) + x.Marshal(x.Sprintf("%sbody", name), XDR_XdrAnon_Operation_Body(&v.Body)) +} +func XDR_Operation(v *Operation) *Operation { return v } + +type XdrType_XdrAnon_HashIDPreimage_OperationID = *XdrAnon_HashIDPreimage_OperationID + +func (v *XdrAnon_HashIDPreimage_OperationID) XdrPointer() interface{} { return v } +func (XdrAnon_HashIDPreimage_OperationID) XdrTypeName() string { + return "XdrAnon_HashIDPreimage_OperationID" +} +func (v XdrAnon_HashIDPreimage_OperationID) XdrValue() interface{} { return v } +func (v *XdrAnon_HashIDPreimage_OperationID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_HashIDPreimage_OperationID) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssourceAccount", name), XDR_AccountID(&v.SourceAccount)) + x.Marshal(x.Sprintf("%sseqNum", name), XDR_SequenceNumber(&v.SeqNum)) + x.Marshal(x.Sprintf("%sopNum", name), XDR_Uint32(&v.OpNum)) +} +func XDR_XdrAnon_HashIDPreimage_OperationID(v *XdrAnon_HashIDPreimage_OperationID) *XdrAnon_HashIDPreimage_OperationID { + return v +} + +type XdrType_XdrAnon_HashIDPreimage_RevokeID = *XdrAnon_HashIDPreimage_RevokeID + +func (v *XdrAnon_HashIDPreimage_RevokeID) XdrPointer() interface{} { return v } +func (XdrAnon_HashIDPreimage_RevokeID) XdrTypeName() string { return "XdrAnon_HashIDPreimage_RevokeID" } +func (v XdrAnon_HashIDPreimage_RevokeID) XdrValue() interface{} { return v } +func (v *XdrAnon_HashIDPreimage_RevokeID) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *XdrAnon_HashIDPreimage_RevokeID) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssourceAccount", name), XDR_AccountID(&v.SourceAccount)) + x.Marshal(x.Sprintf("%sseqNum", name), XDR_SequenceNumber(&v.SeqNum)) + x.Marshal(x.Sprintf("%sopNum", name), XDR_Uint32(&v.OpNum)) + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) +} +func XDR_XdrAnon_HashIDPreimage_RevokeID(v *XdrAnon_HashIDPreimage_RevokeID) *XdrAnon_HashIDPreimage_RevokeID { + return v +} + +var _XdrTags_HashIDPreimage = map[int32]bool{ + XdrToI32(ENVELOPE_TYPE_OP_ID): true, + XdrToI32(ENVELOPE_TYPE_POOL_REVOKE_OP_ID): true, +} + +func (_ HashIDPreimage) XdrValidTags() map[int32]bool { + return _XdrTags_HashIDPreimage +} +func (u *HashIDPreimage) OperationID() *XdrAnon_HashIDPreimage_OperationID { + switch u.Type { + case ENVELOPE_TYPE_OP_ID: + if v, ok := u._u.(*XdrAnon_HashIDPreimage_OperationID); ok { + return v + } else { + var zero XdrAnon_HashIDPreimage_OperationID + u._u = &zero + return &zero + } + default: + XdrPanic("HashIDPreimage.OperationID accessed when Type == %v", u.Type) + return nil + } +} +func (u *HashIDPreimage) RevokeID() *XdrAnon_HashIDPreimage_RevokeID { + switch u.Type { + case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + if v, ok := u._u.(*XdrAnon_HashIDPreimage_RevokeID); ok { + return v + } else { + var zero XdrAnon_HashIDPreimage_RevokeID + u._u = &zero + return &zero + } + default: + XdrPanic("HashIDPreimage.RevokeID accessed when Type == %v", u.Type) + return nil + } +} +func (u HashIDPreimage) XdrValid() bool { + switch u.Type { + case ENVELOPE_TYPE_OP_ID, ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + return true + } + return false +} +func (u *HashIDPreimage) XdrUnionTag() XdrNum32 { + return XDR_EnvelopeType(&u.Type) +} +func (u *HashIDPreimage) XdrUnionTagName() string { + return "Type" +} +func (u *HashIDPreimage) XdrUnionBody() XdrType { + switch u.Type { + case ENVELOPE_TYPE_OP_ID: + return XDR_XdrAnon_HashIDPreimage_OperationID(u.OperationID()) + case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + return XDR_XdrAnon_HashIDPreimage_RevokeID(u.RevokeID()) + } + return nil +} +func (u *HashIDPreimage) XdrUnionBodyName() string { + switch u.Type { + case ENVELOPE_TYPE_OP_ID: + return "OperationID" + case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + return "RevokeID" + } + return "" +} + +type XdrType_HashIDPreimage = *HashIDPreimage + +func (v *HashIDPreimage) XdrPointer() interface{} { return v } +func (HashIDPreimage) XdrTypeName() string { return "HashIDPreimage" } +func (v HashIDPreimage) XdrValue() interface{} { return v } +func (v *HashIDPreimage) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *HashIDPreimage) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_EnvelopeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ENVELOPE_TYPE_OP_ID: + x.Marshal(x.Sprintf("%soperationID", name), XDR_XdrAnon_HashIDPreimage_OperationID(u.OperationID())) + return + case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + x.Marshal(x.Sprintf("%srevokeID", name), XDR_XdrAnon_HashIDPreimage_RevokeID(u.RevokeID())) + return + } + XdrPanic("invalid Type (%v) in HashIDPreimage", u.Type) +} +func (v *HashIDPreimage) XdrInitialize() { + var zero EnvelopeType + switch zero { + case ENVELOPE_TYPE_OP_ID, ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + default: + if v.Type == zero { + v.Type = ENVELOPE_TYPE_OP_ID + } + } +} +func XDR_HashIDPreimage(v *HashIDPreimage) *HashIDPreimage { return v } + +var _XdrNames_MemoType = map[int32]string{ + int32(MEMO_NONE): "MEMO_NONE", + int32(MEMO_TEXT): "MEMO_TEXT", + int32(MEMO_ID): "MEMO_ID", + int32(MEMO_HASH): "MEMO_HASH", + int32(MEMO_RETURN): "MEMO_RETURN", +} +var _XdrValues_MemoType = map[string]int32{ + "MEMO_NONE": int32(MEMO_NONE), + "MEMO_TEXT": int32(MEMO_TEXT), + "MEMO_ID": int32(MEMO_ID), + "MEMO_HASH": int32(MEMO_HASH), + "MEMO_RETURN": int32(MEMO_RETURN), +} + +func (MemoType) XdrEnumNames() map[int32]string { + return _XdrNames_MemoType +} +func (v MemoType) String() string { + if s, ok := _XdrNames_MemoType[int32(v)]; ok { + return s + } + return fmt.Sprintf("MemoType#%d", v) +} +func (v *MemoType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_MemoType[stok]; ok { + *v = MemoType(val) + return nil + } else if stok == "MemoType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid MemoType.", stok)) + } +} +func (v MemoType) GetU32() uint32 { return uint32(v) } +func (v *MemoType) SetU32(n uint32) { *v = MemoType(n) } +func (v *MemoType) XdrPointer() interface{} { return v } +func (MemoType) XdrTypeName() string { return "MemoType" } +func (v MemoType) XdrValue() interface{} { return v } +func (v *MemoType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_MemoType = *MemoType + +func XDR_MemoType(v *MemoType) *MemoType { return v } + +var _XdrTags_Memo = map[int32]bool{ + XdrToI32(MEMO_NONE): true, + XdrToI32(MEMO_TEXT): true, + XdrToI32(MEMO_ID): true, + XdrToI32(MEMO_HASH): true, + XdrToI32(MEMO_RETURN): true, +} + +func (_ Memo) XdrValidTags() map[int32]bool { + return _XdrTags_Memo +} +func (u *Memo) Text() *string { + switch u.Type { + case MEMO_TEXT: + if v, ok := u._u.(*string); ok { + return v + } else { + var zero string + u._u = &zero + return &zero + } + default: + XdrPanic("Memo.Text accessed when Type == %v", u.Type) + return nil + } +} +func (u *Memo) Id() *Uint64 { + switch u.Type { + case MEMO_ID: + if v, ok := u._u.(*Uint64); ok { + return v + } else { + var zero Uint64 + u._u = &zero + return &zero + } + default: + XdrPanic("Memo.Id accessed when Type == %v", u.Type) + return nil + } +} + +// the hash of what to pull from the content server +func (u *Memo) Hash() *Hash { + switch u.Type { + case MEMO_HASH: + if v, ok := u._u.(*Hash); ok { + return v + } else { + var zero Hash + u._u = &zero + return &zero + } + default: + XdrPanic("Memo.Hash accessed when Type == %v", u.Type) + return nil + } +} + +// the hash of the tx you are rejecting +func (u *Memo) RetHash() *Hash { + switch u.Type { + case MEMO_RETURN: + if v, ok := u._u.(*Hash); ok { + return v + } else { + var zero Hash + u._u = &zero + return &zero + } + default: + XdrPanic("Memo.RetHash accessed when Type == %v", u.Type) + return nil + } +} +func (u Memo) XdrValid() bool { + switch u.Type { + case MEMO_NONE, MEMO_TEXT, MEMO_ID, MEMO_HASH, MEMO_RETURN: + return true + } + return false +} +func (u *Memo) XdrUnionTag() XdrNum32 { + return XDR_MemoType(&u.Type) +} +func (u *Memo) XdrUnionTagName() string { + return "Type" +} +func (u *Memo) XdrUnionBody() XdrType { + switch u.Type { + case MEMO_NONE: + return nil + case MEMO_TEXT: + return XdrString{u.Text(), 28} + case MEMO_ID: + return XDR_Uint64(u.Id()) + case MEMO_HASH: + return XDR_Hash(u.Hash()) + case MEMO_RETURN: + return XDR_Hash(u.RetHash()) + } + return nil +} +func (u *Memo) XdrUnionBodyName() string { + switch u.Type { + case MEMO_NONE: + return "" + case MEMO_TEXT: + return "Text" + case MEMO_ID: + return "Id" + case MEMO_HASH: + return "Hash" + case MEMO_RETURN: + return "RetHash" + } + return "" +} + +type XdrType_Memo = *Memo + +func (v *Memo) XdrPointer() interface{} { return v } +func (Memo) XdrTypeName() string { return "Memo" } +func (v Memo) XdrValue() interface{} { return v } +func (v *Memo) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *Memo) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_MemoType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case MEMO_NONE: + return + case MEMO_TEXT: + x.Marshal(x.Sprintf("%stext", name), XdrString{u.Text(), 28}) + return + case MEMO_ID: + x.Marshal(x.Sprintf("%sid", name), XDR_Uint64(u.Id())) + return + case MEMO_HASH: + x.Marshal(x.Sprintf("%shash", name), XDR_Hash(u.Hash())) + return + case MEMO_RETURN: + x.Marshal(x.Sprintf("%sretHash", name), XDR_Hash(u.RetHash())) + return + } + XdrPanic("invalid Type (%v) in Memo", u.Type) +} +func XDR_Memo(v *Memo) *Memo { return v } + +type XdrType_TimeBounds = *TimeBounds + +func (v *TimeBounds) XdrPointer() interface{} { return v } +func (TimeBounds) XdrTypeName() string { return "TimeBounds" } +func (v TimeBounds) XdrValue() interface{} { return v } +func (v *TimeBounds) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TimeBounds) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sminTime", name), XDR_TimePoint(&v.MinTime)) + x.Marshal(x.Sprintf("%smaxTime", name), XDR_TimePoint(&v.MaxTime)) +} +func XDR_TimeBounds(v *TimeBounds) *TimeBounds { return v } + +var _XdrTags_XdrAnon_TransactionV0_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_TransactionV0_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TransactionV0_Ext +} +func (u XdrAnon_TransactionV0_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_TransactionV0_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TransactionV0_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TransactionV0_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_TransactionV0_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_TransactionV0_Ext = *XdrAnon_TransactionV0_Ext + +func (v *XdrAnon_TransactionV0_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionV0_Ext) XdrTypeName() string { return "XdrAnon_TransactionV0_Ext" } +func (v XdrAnon_TransactionV0_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionV0_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TransactionV0_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_TransactionV0_Ext", u.V) +} +func XDR_XdrAnon_TransactionV0_Ext(v *XdrAnon_TransactionV0_Ext) *XdrAnon_TransactionV0_Ext { return v } + +type _XdrPtr_TimeBounds struct { + p **TimeBounds +} +type _ptrflag_TimeBounds _XdrPtr_TimeBounds + +func (v _ptrflag_TimeBounds) String() string { + if *v.p == nil { + return "nil" + } + return "non-nil" +} +func (v _ptrflag_TimeBounds) Scan(ss fmt.ScanState, r rune) error { + tok, err := ss.Token(true, func(c rune) bool { + return c == '-' || (c >= 'a' && c <= 'z') + }) + if err != nil { + return err + } + switch string(tok) { + case "nil": + v.SetU32(0) + case "non-nil": + v.SetU32(1) + default: + return XdrError("TimeBounds flag should be \"nil\" or \"non-nil\"") + } + return nil +} +func (v _ptrflag_TimeBounds) GetU32() uint32 { + if *v.p == nil { + return 0 + } + return 1 +} +func (v _ptrflag_TimeBounds) SetU32(nv uint32) { + switch nv { + case 0: + *v.p = nil + case 1: + if *v.p == nil { + *v.p = new(TimeBounds) + } + default: + XdrPanic("*TimeBounds present flag value %d should be 0 or 1", nv) + } +} +func (_ptrflag_TimeBounds) XdrTypeName() string { return "TimeBounds?" } +func (v _ptrflag_TimeBounds) XdrPointer() interface{} { return nil } +func (v _ptrflag_TimeBounds) XdrValue() interface{} { return v.GetU32() != 0 } +func (v _ptrflag_TimeBounds) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _ptrflag_TimeBounds) XdrBound() uint32 { return 1 } +func (v _XdrPtr_TimeBounds) GetPresent() bool { return *v.p != nil } +func (v _XdrPtr_TimeBounds) SetPresent(present bool) { + if !present { + *v.p = nil + } else if *v.p == nil { + *v.p = new(TimeBounds) + } +} +func (v _XdrPtr_TimeBounds) XdrMarshalValue(x XDR, name string) { + if *v.p != nil { + XDR_TimeBounds(*v.p).XdrMarshal(x, name) + } +} +func (v _XdrPtr_TimeBounds) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v _XdrPtr_TimeBounds) XdrRecurse(x XDR, name string) { + x.Marshal(name, _ptrflag_TimeBounds(v)) + v.XdrMarshalValue(x, name) +} +func (_XdrPtr_TimeBounds) XdrTypeName() string { return "TimeBounds*" } +func (v _XdrPtr_TimeBounds) XdrPointer() interface{} { return v.p } +func (v _XdrPtr_TimeBounds) XdrValue() interface{} { return *v.p } + +type _XdrVec_100_Operation []Operation + +func (_XdrVec_100_Operation) XdrBound() uint32 { + const bound uint32 = 100 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_100_Operation) XdrCheckLen(length uint32) { + if length > uint32(100) { + XdrPanic("_XdrVec_100_Operation length %d exceeds bound 100", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_100_Operation length %d exceeds max int", length) + } +} +func (v _XdrVec_100_Operation) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_100_Operation) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(100); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]Operation, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_100_Operation) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_Operation(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_100_Operation) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 100} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_100_Operation) XdrTypeName() string { return "Operation<>" } +func (v *_XdrVec_100_Operation) XdrPointer() interface{} { return (*[]Operation)(v) } +func (v _XdrVec_100_Operation) XdrValue() interface{} { return ([]Operation)(v) } +func (v *_XdrVec_100_Operation) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionV0 = *TransactionV0 + +func (v *TransactionV0) XdrPointer() interface{} { return v } +func (TransactionV0) XdrTypeName() string { return "TransactionV0" } +func (v TransactionV0) XdrValue() interface{} { return v } +func (v *TransactionV0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionV0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssourceAccountEd25519", name), XDR_Uint256(&v.SourceAccountEd25519)) + x.Marshal(x.Sprintf("%sfee", name), XDR_Uint32(&v.Fee)) + x.Marshal(x.Sprintf("%sseqNum", name), XDR_SequenceNumber(&v.SeqNum)) + x.Marshal(x.Sprintf("%stimeBounds", name), _XdrPtr_TimeBounds{&v.TimeBounds}) + x.Marshal(x.Sprintf("%smemo", name), XDR_Memo(&v.Memo)) + x.Marshal(x.Sprintf("%soperations", name), (*_XdrVec_100_Operation)(&v.Operations)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TransactionV0_Ext(&v.Ext)) +} +func XDR_TransactionV0(v *TransactionV0) *TransactionV0 { return v } + +type _XdrVec_20_DecoratedSignature []DecoratedSignature + +func (_XdrVec_20_DecoratedSignature) XdrBound() uint32 { + const bound uint32 = 20 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_20_DecoratedSignature) XdrCheckLen(length uint32) { + if length > uint32(20) { + XdrPanic("_XdrVec_20_DecoratedSignature length %d exceeds bound 20", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_20_DecoratedSignature length %d exceeds max int", length) + } +} +func (v _XdrVec_20_DecoratedSignature) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_20_DecoratedSignature) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(20); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]DecoratedSignature, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_20_DecoratedSignature) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_DecoratedSignature(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_20_DecoratedSignature) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 20} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_20_DecoratedSignature) XdrTypeName() string { return "DecoratedSignature<>" } +func (v *_XdrVec_20_DecoratedSignature) XdrPointer() interface{} { return (*[]DecoratedSignature)(v) } +func (v _XdrVec_20_DecoratedSignature) XdrValue() interface{} { return ([]DecoratedSignature)(v) } +func (v *_XdrVec_20_DecoratedSignature) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionV0Envelope = *TransactionV0Envelope + +func (v *TransactionV0Envelope) XdrPointer() interface{} { return v } +func (TransactionV0Envelope) XdrTypeName() string { return "TransactionV0Envelope" } +func (v TransactionV0Envelope) XdrValue() interface{} { return v } +func (v *TransactionV0Envelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionV0Envelope) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stx", name), XDR_TransactionV0(&v.Tx)) + x.Marshal(x.Sprintf("%ssignatures", name), (*_XdrVec_20_DecoratedSignature)(&v.Signatures)) +} +func XDR_TransactionV0Envelope(v *TransactionV0Envelope) *TransactionV0Envelope { return v } + +var _XdrTags_XdrAnon_Transaction_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_Transaction_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_Transaction_Ext +} +func (u XdrAnon_Transaction_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_Transaction_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_Transaction_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_Transaction_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_Transaction_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_Transaction_Ext = *XdrAnon_Transaction_Ext + +func (v *XdrAnon_Transaction_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_Transaction_Ext) XdrTypeName() string { return "XdrAnon_Transaction_Ext" } +func (v XdrAnon_Transaction_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_Transaction_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_Transaction_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_Transaction_Ext", u.V) +} +func XDR_XdrAnon_Transaction_Ext(v *XdrAnon_Transaction_Ext) *XdrAnon_Transaction_Ext { return v } + +type XdrType_Transaction = *Transaction + +func (v *Transaction) XdrPointer() interface{} { return v } +func (Transaction) XdrTypeName() string { return "Transaction" } +func (v Transaction) XdrValue() interface{} { return v } +func (v *Transaction) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Transaction) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssourceAccount", name), XDR_MuxedAccount(&v.SourceAccount)) + x.Marshal(x.Sprintf("%sfee", name), XDR_Uint32(&v.Fee)) + x.Marshal(x.Sprintf("%sseqNum", name), XDR_SequenceNumber(&v.SeqNum)) + x.Marshal(x.Sprintf("%stimeBounds", name), _XdrPtr_TimeBounds{&v.TimeBounds}) + x.Marshal(x.Sprintf("%smemo", name), XDR_Memo(&v.Memo)) + x.Marshal(x.Sprintf("%soperations", name), (*_XdrVec_100_Operation)(&v.Operations)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_Transaction_Ext(&v.Ext)) +} +func XDR_Transaction(v *Transaction) *Transaction { return v } + +type XdrType_TransactionV1Envelope = *TransactionV1Envelope + +func (v *TransactionV1Envelope) XdrPointer() interface{} { return v } +func (TransactionV1Envelope) XdrTypeName() string { return "TransactionV1Envelope" } +func (v TransactionV1Envelope) XdrValue() interface{} { return v } +func (v *TransactionV1Envelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionV1Envelope) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stx", name), XDR_Transaction(&v.Tx)) + x.Marshal(x.Sprintf("%ssignatures", name), (*_XdrVec_20_DecoratedSignature)(&v.Signatures)) +} +func XDR_TransactionV1Envelope(v *TransactionV1Envelope) *TransactionV1Envelope { return v } + +var _XdrTags_XdrAnon_FeeBumpTransaction_InnerTx = map[int32]bool{ + XdrToI32(ENVELOPE_TYPE_TX): true, +} + +func (_ XdrAnon_FeeBumpTransaction_InnerTx) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_FeeBumpTransaction_InnerTx +} +func (u *XdrAnon_FeeBumpTransaction_InnerTx) V1() *TransactionV1Envelope { + switch u.Type { + case ENVELOPE_TYPE_TX: + if v, ok := u._u.(*TransactionV1Envelope); ok { + return v + } else { + var zero TransactionV1Envelope + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_FeeBumpTransaction_InnerTx.V1 accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_FeeBumpTransaction_InnerTx) XdrValid() bool { + switch u.Type { + case ENVELOPE_TYPE_TX: + return true + } + return false +} +func (u *XdrAnon_FeeBumpTransaction_InnerTx) XdrUnionTag() XdrNum32 { + return XDR_EnvelopeType(&u.Type) +} +func (u *XdrAnon_FeeBumpTransaction_InnerTx) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_FeeBumpTransaction_InnerTx) XdrUnionBody() XdrType { + switch u.Type { + case ENVELOPE_TYPE_TX: + return XDR_TransactionV1Envelope(u.V1()) + } + return nil +} +func (u *XdrAnon_FeeBumpTransaction_InnerTx) XdrUnionBodyName() string { + switch u.Type { + case ENVELOPE_TYPE_TX: + return "V1" + } + return "" +} + +type XdrType_XdrAnon_FeeBumpTransaction_InnerTx = *XdrAnon_FeeBumpTransaction_InnerTx + +func (v *XdrAnon_FeeBumpTransaction_InnerTx) XdrPointer() interface{} { return v } +func (XdrAnon_FeeBumpTransaction_InnerTx) XdrTypeName() string { + return "XdrAnon_FeeBumpTransaction_InnerTx" +} +func (v XdrAnon_FeeBumpTransaction_InnerTx) XdrValue() interface{} { return v } +func (v *XdrAnon_FeeBumpTransaction_InnerTx) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_FeeBumpTransaction_InnerTx) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_EnvelopeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ENVELOPE_TYPE_TX: + x.Marshal(x.Sprintf("%sv1", name), XDR_TransactionV1Envelope(u.V1())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_FeeBumpTransaction_InnerTx", u.Type) +} +func (v *XdrAnon_FeeBumpTransaction_InnerTx) XdrInitialize() { + var zero EnvelopeType + switch zero { + case ENVELOPE_TYPE_TX: + default: + if v.Type == zero { + v.Type = ENVELOPE_TYPE_TX + } + } +} +func XDR_XdrAnon_FeeBumpTransaction_InnerTx(v *XdrAnon_FeeBumpTransaction_InnerTx) *XdrAnon_FeeBumpTransaction_InnerTx { + return v +} + +var _XdrTags_XdrAnon_FeeBumpTransaction_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_FeeBumpTransaction_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_FeeBumpTransaction_Ext +} +func (u XdrAnon_FeeBumpTransaction_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_FeeBumpTransaction_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_FeeBumpTransaction_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_FeeBumpTransaction_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_FeeBumpTransaction_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_FeeBumpTransaction_Ext = *XdrAnon_FeeBumpTransaction_Ext + +func (v *XdrAnon_FeeBumpTransaction_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_FeeBumpTransaction_Ext) XdrTypeName() string { return "XdrAnon_FeeBumpTransaction_Ext" } +func (v XdrAnon_FeeBumpTransaction_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_FeeBumpTransaction_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_FeeBumpTransaction_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_FeeBumpTransaction_Ext", u.V) +} +func XDR_XdrAnon_FeeBumpTransaction_Ext(v *XdrAnon_FeeBumpTransaction_Ext) *XdrAnon_FeeBumpTransaction_Ext { + return v +} + +type XdrType_FeeBumpTransaction = *FeeBumpTransaction + +func (v *FeeBumpTransaction) XdrPointer() interface{} { return v } +func (FeeBumpTransaction) XdrTypeName() string { return "FeeBumpTransaction" } +func (v FeeBumpTransaction) XdrValue() interface{} { return v } +func (v *FeeBumpTransaction) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *FeeBumpTransaction) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sfeeSource", name), XDR_MuxedAccount(&v.FeeSource)) + x.Marshal(x.Sprintf("%sfee", name), XDR_Int64(&v.Fee)) + x.Marshal(x.Sprintf("%sinnerTx", name), XDR_XdrAnon_FeeBumpTransaction_InnerTx(&v.InnerTx)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_FeeBumpTransaction_Ext(&v.Ext)) +} +func XDR_FeeBumpTransaction(v *FeeBumpTransaction) *FeeBumpTransaction { return v } + +type XdrType_FeeBumpTransactionEnvelope = *FeeBumpTransactionEnvelope + +func (v *FeeBumpTransactionEnvelope) XdrPointer() interface{} { return v } +func (FeeBumpTransactionEnvelope) XdrTypeName() string { return "FeeBumpTransactionEnvelope" } +func (v FeeBumpTransactionEnvelope) XdrValue() interface{} { return v } +func (v *FeeBumpTransactionEnvelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *FeeBumpTransactionEnvelope) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stx", name), XDR_FeeBumpTransaction(&v.Tx)) + x.Marshal(x.Sprintf("%ssignatures", name), (*_XdrVec_20_DecoratedSignature)(&v.Signatures)) +} +func XDR_FeeBumpTransactionEnvelope(v *FeeBumpTransactionEnvelope) *FeeBumpTransactionEnvelope { + return v +} + +var _XdrTags_TransactionEnvelope = map[int32]bool{ + XdrToI32(ENVELOPE_TYPE_TX_V0): true, + XdrToI32(ENVELOPE_TYPE_TX): true, + XdrToI32(ENVELOPE_TYPE_TX_FEE_BUMP): true, +} + +func (_ TransactionEnvelope) XdrValidTags() map[int32]bool { + return _XdrTags_TransactionEnvelope +} +func (u *TransactionEnvelope) V0() *TransactionV0Envelope { + switch u.Type { + case ENVELOPE_TYPE_TX_V0: + if v, ok := u._u.(*TransactionV0Envelope); ok { + return v + } else { + var zero TransactionV0Envelope + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionEnvelope.V0 accessed when Type == %v", u.Type) + return nil + } +} +func (u *TransactionEnvelope) V1() *TransactionV1Envelope { + switch u.Type { + case ENVELOPE_TYPE_TX: + if v, ok := u._u.(*TransactionV1Envelope); ok { + return v + } else { + var zero TransactionV1Envelope + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionEnvelope.V1 accessed when Type == %v", u.Type) + return nil + } +} +func (u *TransactionEnvelope) FeeBump() *FeeBumpTransactionEnvelope { + switch u.Type { + case ENVELOPE_TYPE_TX_FEE_BUMP: + if v, ok := u._u.(*FeeBumpTransactionEnvelope); ok { + return v + } else { + var zero FeeBumpTransactionEnvelope + u._u = &zero + return &zero + } + default: + XdrPanic("TransactionEnvelope.FeeBump accessed when Type == %v", u.Type) + return nil + } +} +func (u TransactionEnvelope) XdrValid() bool { + switch u.Type { + case ENVELOPE_TYPE_TX_V0, ENVELOPE_TYPE_TX, ENVELOPE_TYPE_TX_FEE_BUMP: + return true + } + return false +} +func (u *TransactionEnvelope) XdrUnionTag() XdrNum32 { + return XDR_EnvelopeType(&u.Type) +} +func (u *TransactionEnvelope) XdrUnionTagName() string { + return "Type" +} +func (u *TransactionEnvelope) XdrUnionBody() XdrType { + switch u.Type { + case ENVELOPE_TYPE_TX_V0: + return XDR_TransactionV0Envelope(u.V0()) + case ENVELOPE_TYPE_TX: + return XDR_TransactionV1Envelope(u.V1()) + case ENVELOPE_TYPE_TX_FEE_BUMP: + return XDR_FeeBumpTransactionEnvelope(u.FeeBump()) + } + return nil +} +func (u *TransactionEnvelope) XdrUnionBodyName() string { + switch u.Type { + case ENVELOPE_TYPE_TX_V0: + return "V0" + case ENVELOPE_TYPE_TX: + return "V1" + case ENVELOPE_TYPE_TX_FEE_BUMP: + return "FeeBump" + } + return "" +} + +type XdrType_TransactionEnvelope = *TransactionEnvelope + +func (v *TransactionEnvelope) XdrPointer() interface{} { return v } +func (TransactionEnvelope) XdrTypeName() string { return "TransactionEnvelope" } +func (v TransactionEnvelope) XdrValue() interface{} { return v } +func (v *TransactionEnvelope) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *TransactionEnvelope) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_EnvelopeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ENVELOPE_TYPE_TX_V0: + x.Marshal(x.Sprintf("%sv0", name), XDR_TransactionV0Envelope(u.V0())) + return + case ENVELOPE_TYPE_TX: + x.Marshal(x.Sprintf("%sv1", name), XDR_TransactionV1Envelope(u.V1())) + return + case ENVELOPE_TYPE_TX_FEE_BUMP: + x.Marshal(x.Sprintf("%sfeeBump", name), XDR_FeeBumpTransactionEnvelope(u.FeeBump())) + return + } + XdrPanic("invalid Type (%v) in TransactionEnvelope", u.Type) +} +func XDR_TransactionEnvelope(v *TransactionEnvelope) *TransactionEnvelope { return v } + +var _XdrTags_XdrAnon_TransactionSignaturePayload_TaggedTransaction = map[int32]bool{ + XdrToI32(ENVELOPE_TYPE_TX): true, + XdrToI32(ENVELOPE_TYPE_TX_FEE_BUMP): true, +} + +func (_ XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TransactionSignaturePayload_TaggedTransaction +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) Tx() *Transaction { + switch u.Type { + case ENVELOPE_TYPE_TX: + if v, ok := u._u.(*Transaction); ok { + return v + } else { + var zero Transaction + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TransactionSignaturePayload_TaggedTransaction.Tx accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) FeeBump() *FeeBumpTransaction { + switch u.Type { + case ENVELOPE_TYPE_TX_FEE_BUMP: + if v, ok := u._u.(*FeeBumpTransaction); ok { + return v + } else { + var zero FeeBumpTransaction + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TransactionSignaturePayload_TaggedTransaction.FeeBump accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrValid() bool { + switch u.Type { + case ENVELOPE_TYPE_TX, ENVELOPE_TYPE_TX_FEE_BUMP: + return true + } + return false +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrUnionTag() XdrNum32 { + return XDR_EnvelopeType(&u.Type) +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrUnionBody() XdrType { + switch u.Type { + case ENVELOPE_TYPE_TX: + return XDR_Transaction(u.Tx()) + case ENVELOPE_TYPE_TX_FEE_BUMP: + return XDR_FeeBumpTransaction(u.FeeBump()) + } + return nil +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrUnionBodyName() string { + switch u.Type { + case ENVELOPE_TYPE_TX: + return "Tx" + case ENVELOPE_TYPE_TX_FEE_BUMP: + return "FeeBump" + } + return "" +} + +type XdrType_XdrAnon_TransactionSignaturePayload_TaggedTransaction = *XdrAnon_TransactionSignaturePayload_TaggedTransaction + +func (v *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrTypeName() string { + return "XdrAnon_TransactionSignaturePayload_TaggedTransaction" +} +func (v XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (u *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_EnvelopeType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case ENVELOPE_TYPE_TX: + x.Marshal(x.Sprintf("%stx", name), XDR_Transaction(u.Tx())) + return + case ENVELOPE_TYPE_TX_FEE_BUMP: + x.Marshal(x.Sprintf("%sfeeBump", name), XDR_FeeBumpTransaction(u.FeeBump())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_TransactionSignaturePayload_TaggedTransaction", u.Type) +} +func (v *XdrAnon_TransactionSignaturePayload_TaggedTransaction) XdrInitialize() { + var zero EnvelopeType + switch zero { + case ENVELOPE_TYPE_TX, ENVELOPE_TYPE_TX_FEE_BUMP: + default: + if v.Type == zero { + v.Type = ENVELOPE_TYPE_TX + } + } +} +func XDR_XdrAnon_TransactionSignaturePayload_TaggedTransaction(v *XdrAnon_TransactionSignaturePayload_TaggedTransaction) *XdrAnon_TransactionSignaturePayload_TaggedTransaction { + return v +} + +type XdrType_TransactionSignaturePayload = *TransactionSignaturePayload + +func (v *TransactionSignaturePayload) XdrPointer() interface{} { return v } +func (TransactionSignaturePayload) XdrTypeName() string { return "TransactionSignaturePayload" } +func (v TransactionSignaturePayload) XdrValue() interface{} { return v } +func (v *TransactionSignaturePayload) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionSignaturePayload) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%snetworkId", name), XDR_Hash(&v.NetworkId)) + x.Marshal(x.Sprintf("%staggedTransaction", name), XDR_XdrAnon_TransactionSignaturePayload_TaggedTransaction(&v.TaggedTransaction)) +} +func XDR_TransactionSignaturePayload(v *TransactionSignaturePayload) *TransactionSignaturePayload { + return v +} + +var _XdrNames_ClaimAtomType = map[int32]string{ + int32(CLAIM_ATOM_TYPE_V0): "CLAIM_ATOM_TYPE_V0", + int32(CLAIM_ATOM_TYPE_ORDER_BOOK): "CLAIM_ATOM_TYPE_ORDER_BOOK", + int32(CLAIM_ATOM_TYPE_LIQUIDITY_POOL): "CLAIM_ATOM_TYPE_LIQUIDITY_POOL", +} +var _XdrValues_ClaimAtomType = map[string]int32{ + "CLAIM_ATOM_TYPE_V0": int32(CLAIM_ATOM_TYPE_V0), + "CLAIM_ATOM_TYPE_ORDER_BOOK": int32(CLAIM_ATOM_TYPE_ORDER_BOOK), + "CLAIM_ATOM_TYPE_LIQUIDITY_POOL": int32(CLAIM_ATOM_TYPE_LIQUIDITY_POOL), +} + +func (ClaimAtomType) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimAtomType +} +func (v ClaimAtomType) String() string { + if s, ok := _XdrNames_ClaimAtomType[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimAtomType#%d", v) +} +func (v *ClaimAtomType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimAtomType[stok]; ok { + *v = ClaimAtomType(val) + return nil + } else if stok == "ClaimAtomType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimAtomType.", stok)) + } +} +func (v ClaimAtomType) GetU32() uint32 { return uint32(v) } +func (v *ClaimAtomType) SetU32(n uint32) { *v = ClaimAtomType(n) } +func (v *ClaimAtomType) XdrPointer() interface{} { return v } +func (ClaimAtomType) XdrTypeName() string { return "ClaimAtomType" } +func (v ClaimAtomType) XdrValue() interface{} { return v } +func (v *ClaimAtomType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimAtomType = *ClaimAtomType + +func XDR_ClaimAtomType(v *ClaimAtomType) *ClaimAtomType { return v } + +type XdrType_ClaimOfferAtomV0 = *ClaimOfferAtomV0 + +func (v *ClaimOfferAtomV0) XdrPointer() interface{} { return v } +func (ClaimOfferAtomV0) XdrTypeName() string { return "ClaimOfferAtomV0" } +func (v ClaimOfferAtomV0) XdrValue() interface{} { return v } +func (v *ClaimOfferAtomV0) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimOfferAtomV0) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssellerEd25519", name), XDR_Uint256(&v.SellerEd25519)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) + x.Marshal(x.Sprintf("%sassetSold", name), XDR_Asset(&v.AssetSold)) + x.Marshal(x.Sprintf("%samountSold", name), XDR_Int64(&v.AmountSold)) + x.Marshal(x.Sprintf("%sassetBought", name), XDR_Asset(&v.AssetBought)) + x.Marshal(x.Sprintf("%samountBought", name), XDR_Int64(&v.AmountBought)) +} +func XDR_ClaimOfferAtomV0(v *ClaimOfferAtomV0) *ClaimOfferAtomV0 { return v } + +type XdrType_ClaimOfferAtom = *ClaimOfferAtom + +func (v *ClaimOfferAtom) XdrPointer() interface{} { return v } +func (ClaimOfferAtom) XdrTypeName() string { return "ClaimOfferAtom" } +func (v ClaimOfferAtom) XdrValue() interface{} { return v } +func (v *ClaimOfferAtom) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimOfferAtom) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%ssellerID", name), XDR_AccountID(&v.SellerID)) + x.Marshal(x.Sprintf("%sofferID", name), XDR_Int64(&v.OfferID)) + x.Marshal(x.Sprintf("%sassetSold", name), XDR_Asset(&v.AssetSold)) + x.Marshal(x.Sprintf("%samountSold", name), XDR_Int64(&v.AmountSold)) + x.Marshal(x.Sprintf("%sassetBought", name), XDR_Asset(&v.AssetBought)) + x.Marshal(x.Sprintf("%samountBought", name), XDR_Int64(&v.AmountBought)) +} +func XDR_ClaimOfferAtom(v *ClaimOfferAtom) *ClaimOfferAtom { return v } + +type XdrType_ClaimLiquidityAtom = *ClaimLiquidityAtom + +func (v *ClaimLiquidityAtom) XdrPointer() interface{} { return v } +func (ClaimLiquidityAtom) XdrTypeName() string { return "ClaimLiquidityAtom" } +func (v ClaimLiquidityAtom) XdrValue() interface{} { return v } +func (v *ClaimLiquidityAtom) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ClaimLiquidityAtom) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sliquidityPoolID", name), XDR_PoolID(&v.LiquidityPoolID)) + x.Marshal(x.Sprintf("%sassetSold", name), XDR_Asset(&v.AssetSold)) + x.Marshal(x.Sprintf("%samountSold", name), XDR_Int64(&v.AmountSold)) + x.Marshal(x.Sprintf("%sassetBought", name), XDR_Asset(&v.AssetBought)) + x.Marshal(x.Sprintf("%samountBought", name), XDR_Int64(&v.AmountBought)) +} +func XDR_ClaimLiquidityAtom(v *ClaimLiquidityAtom) *ClaimLiquidityAtom { return v } + +var _XdrTags_ClaimAtom = map[int32]bool{ + XdrToI32(CLAIM_ATOM_TYPE_V0): true, + XdrToI32(CLAIM_ATOM_TYPE_ORDER_BOOK): true, + XdrToI32(CLAIM_ATOM_TYPE_LIQUIDITY_POOL): true, +} + +func (_ ClaimAtom) XdrValidTags() map[int32]bool { + return _XdrTags_ClaimAtom +} +func (u *ClaimAtom) V0() *ClaimOfferAtomV0 { + switch u.Type { + case CLAIM_ATOM_TYPE_V0: + if v, ok := u._u.(*ClaimOfferAtomV0); ok { + return v + } else { + var zero ClaimOfferAtomV0 + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimAtom.V0 accessed when Type == %v", u.Type) + return nil + } +} +func (u *ClaimAtom) OrderBook() *ClaimOfferAtom { + switch u.Type { + case CLAIM_ATOM_TYPE_ORDER_BOOK: + if v, ok := u._u.(*ClaimOfferAtom); ok { + return v + } else { + var zero ClaimOfferAtom + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimAtom.OrderBook accessed when Type == %v", u.Type) + return nil + } +} +func (u *ClaimAtom) LiquidityPool() *ClaimLiquidityAtom { + switch u.Type { + case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + if v, ok := u._u.(*ClaimLiquidityAtom); ok { + return v + } else { + var zero ClaimLiquidityAtom + u._u = &zero + return &zero + } + default: + XdrPanic("ClaimAtom.LiquidityPool accessed when Type == %v", u.Type) + return nil + } +} +func (u ClaimAtom) XdrValid() bool { + switch u.Type { + case CLAIM_ATOM_TYPE_V0, CLAIM_ATOM_TYPE_ORDER_BOOK, CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + return true + } + return false +} +func (u *ClaimAtom) XdrUnionTag() XdrNum32 { + return XDR_ClaimAtomType(&u.Type) +} +func (u *ClaimAtom) XdrUnionTagName() string { + return "Type" +} +func (u *ClaimAtom) XdrUnionBody() XdrType { + switch u.Type { + case CLAIM_ATOM_TYPE_V0: + return XDR_ClaimOfferAtomV0(u.V0()) + case CLAIM_ATOM_TYPE_ORDER_BOOK: + return XDR_ClaimOfferAtom(u.OrderBook()) + case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + return XDR_ClaimLiquidityAtom(u.LiquidityPool()) + } + return nil +} +func (u *ClaimAtom) XdrUnionBodyName() string { + switch u.Type { + case CLAIM_ATOM_TYPE_V0: + return "V0" + case CLAIM_ATOM_TYPE_ORDER_BOOK: + return "OrderBook" + case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + return "LiquidityPool" + } + return "" +} + +type XdrType_ClaimAtom = *ClaimAtom + +func (v *ClaimAtom) XdrPointer() interface{} { return v } +func (ClaimAtom) XdrTypeName() string { return "ClaimAtom" } +func (v ClaimAtom) XdrValue() interface{} { return v } +func (v *ClaimAtom) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClaimAtom) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClaimAtomType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CLAIM_ATOM_TYPE_V0: + x.Marshal(x.Sprintf("%sv0", name), XDR_ClaimOfferAtomV0(u.V0())) + return + case CLAIM_ATOM_TYPE_ORDER_BOOK: + x.Marshal(x.Sprintf("%sorderBook", name), XDR_ClaimOfferAtom(u.OrderBook())) + return + case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + x.Marshal(x.Sprintf("%sliquidityPool", name), XDR_ClaimLiquidityAtom(u.LiquidityPool())) + return + } + XdrPanic("invalid Type (%v) in ClaimAtom", u.Type) +} +func XDR_ClaimAtom(v *ClaimAtom) *ClaimAtom { return v } + +var _XdrNames_CreateAccountResultCode = map[int32]string{ + int32(CREATE_ACCOUNT_SUCCESS): "CREATE_ACCOUNT_SUCCESS", + int32(CREATE_ACCOUNT_MALFORMED): "CREATE_ACCOUNT_MALFORMED", + int32(CREATE_ACCOUNT_UNDERFUNDED): "CREATE_ACCOUNT_UNDERFUNDED", + int32(CREATE_ACCOUNT_LOW_RESERVE): "CREATE_ACCOUNT_LOW_RESERVE", + int32(CREATE_ACCOUNT_ALREADY_EXIST): "CREATE_ACCOUNT_ALREADY_EXIST", +} +var _XdrValues_CreateAccountResultCode = map[string]int32{ + "CREATE_ACCOUNT_SUCCESS": int32(CREATE_ACCOUNT_SUCCESS), + "CREATE_ACCOUNT_MALFORMED": int32(CREATE_ACCOUNT_MALFORMED), + "CREATE_ACCOUNT_UNDERFUNDED": int32(CREATE_ACCOUNT_UNDERFUNDED), + "CREATE_ACCOUNT_LOW_RESERVE": int32(CREATE_ACCOUNT_LOW_RESERVE), + "CREATE_ACCOUNT_ALREADY_EXIST": int32(CREATE_ACCOUNT_ALREADY_EXIST), +} + +func (CreateAccountResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_CreateAccountResultCode +} +func (v CreateAccountResultCode) String() string { + if s, ok := _XdrNames_CreateAccountResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("CreateAccountResultCode#%d", v) +} +func (v *CreateAccountResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_CreateAccountResultCode[stok]; ok { + *v = CreateAccountResultCode(val) + return nil + } else if stok == "CreateAccountResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid CreateAccountResultCode.", stok)) + } +} +func (v CreateAccountResultCode) GetU32() uint32 { return uint32(v) } +func (v *CreateAccountResultCode) SetU32(n uint32) { *v = CreateAccountResultCode(n) } +func (v *CreateAccountResultCode) XdrPointer() interface{} { return v } +func (CreateAccountResultCode) XdrTypeName() string { return "CreateAccountResultCode" } +func (v CreateAccountResultCode) XdrValue() interface{} { return v } +func (v *CreateAccountResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_CreateAccountResultCode = *CreateAccountResultCode + +func XDR_CreateAccountResultCode(v *CreateAccountResultCode) *CreateAccountResultCode { return v } + +var _XdrComments_CreateAccountResultCode = map[int32]string{ + int32(CREATE_ACCOUNT_SUCCESS): "account was created", + int32(CREATE_ACCOUNT_MALFORMED): "invalid destination", + int32(CREATE_ACCOUNT_UNDERFUNDED): "not enough funds in source account", + int32(CREATE_ACCOUNT_LOW_RESERVE): "would create an account below the min reserve", + int32(CREATE_ACCOUNT_ALREADY_EXIST): "account already exists", +} + +func (e CreateAccountResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_CreateAccountResultCode +} +func (_ CreateAccountResult) XdrValidTags() map[int32]bool { + return nil +} +func (u CreateAccountResult) XdrValid() bool { + return true +} +func (u *CreateAccountResult) XdrUnionTag() XdrNum32 { + return XDR_CreateAccountResultCode(&u.Code) +} +func (u *CreateAccountResult) XdrUnionTagName() string { + return "Code" +} +func (u *CreateAccountResult) XdrUnionBody() XdrType { + switch u.Code { + case CREATE_ACCOUNT_SUCCESS: + return nil + default: + return nil + } +} +func (u *CreateAccountResult) XdrUnionBodyName() string { + switch u.Code { + case CREATE_ACCOUNT_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_CreateAccountResult = *CreateAccountResult + +func (v *CreateAccountResult) XdrPointer() interface{} { return v } +func (CreateAccountResult) XdrTypeName() string { return "CreateAccountResult" } +func (v CreateAccountResult) XdrValue() interface{} { return v } +func (v *CreateAccountResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *CreateAccountResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_CreateAccountResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CREATE_ACCOUNT_SUCCESS: + return + default: + return + } +} +func XDR_CreateAccountResult(v *CreateAccountResult) *CreateAccountResult { return v } + +var _XdrNames_PaymentResultCode = map[int32]string{ + int32(PAYMENT_SUCCESS): "PAYMENT_SUCCESS", + int32(PAYMENT_MALFORMED): "PAYMENT_MALFORMED", + int32(PAYMENT_UNDERFUNDED): "PAYMENT_UNDERFUNDED", + int32(PAYMENT_SRC_NO_TRUST): "PAYMENT_SRC_NO_TRUST", + int32(PAYMENT_SRC_NOT_AUTHORIZED): "PAYMENT_SRC_NOT_AUTHORIZED", + int32(PAYMENT_NO_DESTINATION): "PAYMENT_NO_DESTINATION", + int32(PAYMENT_NO_TRUST): "PAYMENT_NO_TRUST", + int32(PAYMENT_NOT_AUTHORIZED): "PAYMENT_NOT_AUTHORIZED", + int32(PAYMENT_LINE_FULL): "PAYMENT_LINE_FULL", + int32(PAYMENT_NO_ISSUER): "PAYMENT_NO_ISSUER", +} +var _XdrValues_PaymentResultCode = map[string]int32{ + "PAYMENT_SUCCESS": int32(PAYMENT_SUCCESS), + "PAYMENT_MALFORMED": int32(PAYMENT_MALFORMED), + "PAYMENT_UNDERFUNDED": int32(PAYMENT_UNDERFUNDED), + "PAYMENT_SRC_NO_TRUST": int32(PAYMENT_SRC_NO_TRUST), + "PAYMENT_SRC_NOT_AUTHORIZED": int32(PAYMENT_SRC_NOT_AUTHORIZED), + "PAYMENT_NO_DESTINATION": int32(PAYMENT_NO_DESTINATION), + "PAYMENT_NO_TRUST": int32(PAYMENT_NO_TRUST), + "PAYMENT_NOT_AUTHORIZED": int32(PAYMENT_NOT_AUTHORIZED), + "PAYMENT_LINE_FULL": int32(PAYMENT_LINE_FULL), + "PAYMENT_NO_ISSUER": int32(PAYMENT_NO_ISSUER), +} + +func (PaymentResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_PaymentResultCode +} +func (v PaymentResultCode) String() string { + if s, ok := _XdrNames_PaymentResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("PaymentResultCode#%d", v) +} +func (v *PaymentResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_PaymentResultCode[stok]; ok { + *v = PaymentResultCode(val) + return nil + } else if stok == "PaymentResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid PaymentResultCode.", stok)) + } +} +func (v PaymentResultCode) GetU32() uint32 { return uint32(v) } +func (v *PaymentResultCode) SetU32(n uint32) { *v = PaymentResultCode(n) } +func (v *PaymentResultCode) XdrPointer() interface{} { return v } +func (PaymentResultCode) XdrTypeName() string { return "PaymentResultCode" } +func (v PaymentResultCode) XdrValue() interface{} { return v } +func (v *PaymentResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PaymentResultCode = *PaymentResultCode + +func XDR_PaymentResultCode(v *PaymentResultCode) *PaymentResultCode { return v } + +var _XdrComments_PaymentResultCode = map[int32]string{ + int32(PAYMENT_SUCCESS): "payment successfully completed", + int32(PAYMENT_MALFORMED): "bad input", + int32(PAYMENT_UNDERFUNDED): "not enough funds in source account", + int32(PAYMENT_SRC_NO_TRUST): "no trust line on source account", + int32(PAYMENT_SRC_NOT_AUTHORIZED): "source not authorized to transfer", + int32(PAYMENT_NO_DESTINATION): "destination account does not exist", + int32(PAYMENT_NO_TRUST): "destination missing a trust line for asset", + int32(PAYMENT_NOT_AUTHORIZED): "destination not authorized to hold asset", + int32(PAYMENT_LINE_FULL): "destination would go above their limit", + int32(PAYMENT_NO_ISSUER): "missing issuer on asset", +} + +func (e PaymentResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_PaymentResultCode +} +func (_ PaymentResult) XdrValidTags() map[int32]bool { + return nil +} +func (u PaymentResult) XdrValid() bool { + return true +} +func (u *PaymentResult) XdrUnionTag() XdrNum32 { + return XDR_PaymentResultCode(&u.Code) +} +func (u *PaymentResult) XdrUnionTagName() string { + return "Code" +} +func (u *PaymentResult) XdrUnionBody() XdrType { + switch u.Code { + case PAYMENT_SUCCESS: + return nil + default: + return nil + } +} +func (u *PaymentResult) XdrUnionBodyName() string { + switch u.Code { + case PAYMENT_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_PaymentResult = *PaymentResult + +func (v *PaymentResult) XdrPointer() interface{} { return v } +func (PaymentResult) XdrTypeName() string { return "PaymentResult" } +func (v PaymentResult) XdrValue() interface{} { return v } +func (v *PaymentResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *PaymentResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_PaymentResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case PAYMENT_SUCCESS: + return + default: + return + } +} +func XDR_PaymentResult(v *PaymentResult) *PaymentResult { return v } + +var _XdrNames_PathPaymentStrictReceiveResultCode = map[int32]string{ + int32(PATH_PAYMENT_STRICT_RECEIVE_SUCCESS): "PATH_PAYMENT_STRICT_RECEIVE_SUCCESS", + int32(PATH_PAYMENT_STRICT_RECEIVE_MALFORMED): "PATH_PAYMENT_STRICT_RECEIVE_MALFORMED", + int32(PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED): "PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED", + int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST): "PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST", + int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED): "PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION): "PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST): "PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST", + int32(PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED): "PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED", + int32(PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL): "PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER): "PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER", + int32(PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS): "PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS", + int32(PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF): "PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF", + int32(PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX): "PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX", +} +var _XdrValues_PathPaymentStrictReceiveResultCode = map[string]int32{ + "PATH_PAYMENT_STRICT_RECEIVE_SUCCESS": int32(PATH_PAYMENT_STRICT_RECEIVE_SUCCESS), + "PATH_PAYMENT_STRICT_RECEIVE_MALFORMED": int32(PATH_PAYMENT_STRICT_RECEIVE_MALFORMED), + "PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED": int32(PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED), + "PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST": int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST), + "PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED": int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED), + "PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION": int32(PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION), + "PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST": int32(PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST), + "PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED": int32(PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED), + "PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL": int32(PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL), + "PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER": int32(PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER), + "PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS": int32(PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS), + "PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF": int32(PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF), + "PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX": int32(PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX), +} + +func (PathPaymentStrictReceiveResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_PathPaymentStrictReceiveResultCode +} +func (v PathPaymentStrictReceiveResultCode) String() string { + if s, ok := _XdrNames_PathPaymentStrictReceiveResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("PathPaymentStrictReceiveResultCode#%d", v) +} +func (v *PathPaymentStrictReceiveResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_PathPaymentStrictReceiveResultCode[stok]; ok { + *v = PathPaymentStrictReceiveResultCode(val) + return nil + } else if stok == "PathPaymentStrictReceiveResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid PathPaymentStrictReceiveResultCode.", stok)) + } +} +func (v PathPaymentStrictReceiveResultCode) GetU32() uint32 { return uint32(v) } +func (v *PathPaymentStrictReceiveResultCode) SetU32(n uint32) { + *v = PathPaymentStrictReceiveResultCode(n) +} +func (v *PathPaymentStrictReceiveResultCode) XdrPointer() interface{} { return v } +func (PathPaymentStrictReceiveResultCode) XdrTypeName() string { + return "PathPaymentStrictReceiveResultCode" +} +func (v PathPaymentStrictReceiveResultCode) XdrValue() interface{} { return v } +func (v *PathPaymentStrictReceiveResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PathPaymentStrictReceiveResultCode = *PathPaymentStrictReceiveResultCode + +func XDR_PathPaymentStrictReceiveResultCode(v *PathPaymentStrictReceiveResultCode) *PathPaymentStrictReceiveResultCode { + return v +} + +var _XdrComments_PathPaymentStrictReceiveResultCode = map[int32]string{ + int32(PATH_PAYMENT_STRICT_RECEIVE_SUCCESS): "success", + int32(PATH_PAYMENT_STRICT_RECEIVE_MALFORMED): "bad input", + int32(PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED): "not enough funds in source account", + int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST): "no trust line on source account", + int32(PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED): "source not authorized to transfer", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION): "destination account does not exist", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST): "dest missing a trust line for asset", + int32(PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED): "dest not authorized to hold asset", + int32(PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL): "dest would go above their limit", + int32(PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER): "missing issuer on one asset", + int32(PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS): "not enough offers to satisfy path", + int32(PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF): "would cross one of its own offers", + int32(PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX): "could not satisfy sendmax", +} + +func (e PathPaymentStrictReceiveResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_PathPaymentStrictReceiveResultCode +} + +type XdrType_SimplePaymentResult = *SimplePaymentResult + +func (v *SimplePaymentResult) XdrPointer() interface{} { return v } +func (SimplePaymentResult) XdrTypeName() string { return "SimplePaymentResult" } +func (v SimplePaymentResult) XdrValue() interface{} { return v } +func (v *SimplePaymentResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *SimplePaymentResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdestination", name), XDR_AccountID(&v.Destination)) + x.Marshal(x.Sprintf("%sasset", name), XDR_Asset(&v.Asset)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) +} +func XDR_SimplePaymentResult(v *SimplePaymentResult) *SimplePaymentResult { return v } + +type _XdrVec_unbounded_ClaimAtom []ClaimAtom + +func (_XdrVec_unbounded_ClaimAtom) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_ClaimAtom) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_ClaimAtom length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_ClaimAtom length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_ClaimAtom) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_ClaimAtom) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]ClaimAtom, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_ClaimAtom) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_ClaimAtom(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_ClaimAtom) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_ClaimAtom) XdrTypeName() string { return "ClaimAtom<>" } +func (v *_XdrVec_unbounded_ClaimAtom) XdrPointer() interface{} { return (*[]ClaimAtom)(v) } +func (v _XdrVec_unbounded_ClaimAtom) XdrValue() interface{} { return ([]ClaimAtom)(v) } +func (v *_XdrVec_unbounded_ClaimAtom) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_XdrAnon_PathPaymentStrictReceiveResult_Success = *XdrAnon_PathPaymentStrictReceiveResult_Success + +func (v *XdrAnon_PathPaymentStrictReceiveResult_Success) XdrPointer() interface{} { return v } +func (XdrAnon_PathPaymentStrictReceiveResult_Success) XdrTypeName() string { + return "XdrAnon_PathPaymentStrictReceiveResult_Success" +} +func (v XdrAnon_PathPaymentStrictReceiveResult_Success) XdrValue() interface{} { return v } +func (v *XdrAnon_PathPaymentStrictReceiveResult_Success) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (v *XdrAnon_PathPaymentStrictReceiveResult_Success) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%soffers", name), (*_XdrVec_unbounded_ClaimAtom)(&v.Offers)) + x.Marshal(x.Sprintf("%slast", name), XDR_SimplePaymentResult(&v.Last)) +} +func XDR_XdrAnon_PathPaymentStrictReceiveResult_Success(v *XdrAnon_PathPaymentStrictReceiveResult_Success) *XdrAnon_PathPaymentStrictReceiveResult_Success { + return v +} +func (_ PathPaymentStrictReceiveResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *PathPaymentStrictReceiveResult) Success() *XdrAnon_PathPaymentStrictReceiveResult_Success { + switch u.Code { + case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + if v, ok := u._u.(*XdrAnon_PathPaymentStrictReceiveResult_Success); ok { + return v + } else { + var zero XdrAnon_PathPaymentStrictReceiveResult_Success + u._u = &zero + return &zero + } + default: + XdrPanic("PathPaymentStrictReceiveResult.Success accessed when Code == %v", u.Code) + return nil + } +} + +// the asset that caused the error +func (u *PathPaymentStrictReceiveResult) NoIssuer() *Asset { + switch u.Code { + case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + if v, ok := u._u.(*Asset); ok { + return v + } else { + var zero Asset + u._u = &zero + return &zero + } + default: + XdrPanic("PathPaymentStrictReceiveResult.NoIssuer accessed when Code == %v", u.Code) + return nil + } +} +func (u PathPaymentStrictReceiveResult) XdrValid() bool { + return true +} +func (u *PathPaymentStrictReceiveResult) XdrUnionTag() XdrNum32 { + return XDR_PathPaymentStrictReceiveResultCode(&u.Code) +} +func (u *PathPaymentStrictReceiveResult) XdrUnionTagName() string { + return "Code" +} +func (u *PathPaymentStrictReceiveResult) XdrUnionBody() XdrType { + switch u.Code { + case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + return XDR_XdrAnon_PathPaymentStrictReceiveResult_Success(u.Success()) + case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + return XDR_Asset(u.NoIssuer()) + default: + return nil + } +} +func (u *PathPaymentStrictReceiveResult) XdrUnionBodyName() string { + switch u.Code { + case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + return "Success" + case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + return "NoIssuer" + default: + return "" + } +} + +type XdrType_PathPaymentStrictReceiveResult = *PathPaymentStrictReceiveResult + +func (v *PathPaymentStrictReceiveResult) XdrPointer() interface{} { return v } +func (PathPaymentStrictReceiveResult) XdrTypeName() string { return "PathPaymentStrictReceiveResult" } +func (v PathPaymentStrictReceiveResult) XdrValue() interface{} { return v } +func (v *PathPaymentStrictReceiveResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *PathPaymentStrictReceiveResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_PathPaymentStrictReceiveResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + x.Marshal(x.Sprintf("%ssuccess", name), XDR_XdrAnon_PathPaymentStrictReceiveResult_Success(u.Success())) + return + case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + x.Marshal(x.Sprintf("%snoIssuer", name), XDR_Asset(u.NoIssuer())) + return + default: + return + } +} +func XDR_PathPaymentStrictReceiveResult(v *PathPaymentStrictReceiveResult) *PathPaymentStrictReceiveResult { + return v +} + +var _XdrNames_PathPaymentStrictSendResultCode = map[int32]string{ + int32(PATH_PAYMENT_STRICT_SEND_SUCCESS): "PATH_PAYMENT_STRICT_SEND_SUCCESS", + int32(PATH_PAYMENT_STRICT_SEND_MALFORMED): "PATH_PAYMENT_STRICT_SEND_MALFORMED", + int32(PATH_PAYMENT_STRICT_SEND_UNDERFUNDED): "PATH_PAYMENT_STRICT_SEND_UNDERFUNDED", + int32(PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST): "PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST", + int32(PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED): "PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED", + int32(PATH_PAYMENT_STRICT_SEND_NO_DESTINATION): "PATH_PAYMENT_STRICT_SEND_NO_DESTINATION", + int32(PATH_PAYMENT_STRICT_SEND_NO_TRUST): "PATH_PAYMENT_STRICT_SEND_NO_TRUST", + int32(PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED): "PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED", + int32(PATH_PAYMENT_STRICT_SEND_LINE_FULL): "PATH_PAYMENT_STRICT_SEND_LINE_FULL", + int32(PATH_PAYMENT_STRICT_SEND_NO_ISSUER): "PATH_PAYMENT_STRICT_SEND_NO_ISSUER", + int32(PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS): "PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS", + int32(PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF): "PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF", + int32(PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN): "PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN", +} +var _XdrValues_PathPaymentStrictSendResultCode = map[string]int32{ + "PATH_PAYMENT_STRICT_SEND_SUCCESS": int32(PATH_PAYMENT_STRICT_SEND_SUCCESS), + "PATH_PAYMENT_STRICT_SEND_MALFORMED": int32(PATH_PAYMENT_STRICT_SEND_MALFORMED), + "PATH_PAYMENT_STRICT_SEND_UNDERFUNDED": int32(PATH_PAYMENT_STRICT_SEND_UNDERFUNDED), + "PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST": int32(PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST), + "PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED": int32(PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED), + "PATH_PAYMENT_STRICT_SEND_NO_DESTINATION": int32(PATH_PAYMENT_STRICT_SEND_NO_DESTINATION), + "PATH_PAYMENT_STRICT_SEND_NO_TRUST": int32(PATH_PAYMENT_STRICT_SEND_NO_TRUST), + "PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED": int32(PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED), + "PATH_PAYMENT_STRICT_SEND_LINE_FULL": int32(PATH_PAYMENT_STRICT_SEND_LINE_FULL), + "PATH_PAYMENT_STRICT_SEND_NO_ISSUER": int32(PATH_PAYMENT_STRICT_SEND_NO_ISSUER), + "PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS": int32(PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS), + "PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF": int32(PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF), + "PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN": int32(PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN), +} + +func (PathPaymentStrictSendResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_PathPaymentStrictSendResultCode +} +func (v PathPaymentStrictSendResultCode) String() string { + if s, ok := _XdrNames_PathPaymentStrictSendResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("PathPaymentStrictSendResultCode#%d", v) +} +func (v *PathPaymentStrictSendResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_PathPaymentStrictSendResultCode[stok]; ok { + *v = PathPaymentStrictSendResultCode(val) + return nil + } else if stok == "PathPaymentStrictSendResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid PathPaymentStrictSendResultCode.", stok)) + } +} +func (v PathPaymentStrictSendResultCode) GetU32() uint32 { return uint32(v) } +func (v *PathPaymentStrictSendResultCode) SetU32(n uint32) { *v = PathPaymentStrictSendResultCode(n) } +func (v *PathPaymentStrictSendResultCode) XdrPointer() interface{} { return v } +func (PathPaymentStrictSendResultCode) XdrTypeName() string { return "PathPaymentStrictSendResultCode" } +func (v PathPaymentStrictSendResultCode) XdrValue() interface{} { return v } +func (v *PathPaymentStrictSendResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PathPaymentStrictSendResultCode = *PathPaymentStrictSendResultCode + +func XDR_PathPaymentStrictSendResultCode(v *PathPaymentStrictSendResultCode) *PathPaymentStrictSendResultCode { + return v +} + +var _XdrComments_PathPaymentStrictSendResultCode = map[int32]string{ + int32(PATH_PAYMENT_STRICT_SEND_SUCCESS): "success", + int32(PATH_PAYMENT_STRICT_SEND_MALFORMED): "bad input", + int32(PATH_PAYMENT_STRICT_SEND_UNDERFUNDED): "not enough funds in source account", + int32(PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST): "no trust line on source account", + int32(PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED): "source not authorized to transfer", + int32(PATH_PAYMENT_STRICT_SEND_NO_DESTINATION): "destination account does not exist", + int32(PATH_PAYMENT_STRICT_SEND_NO_TRUST): "dest missing a trust line for asset", + int32(PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED): "dest not authorized to hold asset", + int32(PATH_PAYMENT_STRICT_SEND_LINE_FULL): "dest would go above their limit", + int32(PATH_PAYMENT_STRICT_SEND_NO_ISSUER): "missing issuer on one asset", + int32(PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS): "not enough offers to satisfy path", + int32(PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF): "would cross one of its own offers", + int32(PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN): "could not satisfy destMin", +} + +func (e PathPaymentStrictSendResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_PathPaymentStrictSendResultCode +} + +type XdrType_XdrAnon_PathPaymentStrictSendResult_Success = *XdrAnon_PathPaymentStrictSendResult_Success + +func (v *XdrAnon_PathPaymentStrictSendResult_Success) XdrPointer() interface{} { return v } +func (XdrAnon_PathPaymentStrictSendResult_Success) XdrTypeName() string { + return "XdrAnon_PathPaymentStrictSendResult_Success" +} +func (v XdrAnon_PathPaymentStrictSendResult_Success) XdrValue() interface{} { return v } +func (v *XdrAnon_PathPaymentStrictSendResult_Success) XdrMarshal(x XDR, name string) { + x.Marshal(name, v) +} +func (v *XdrAnon_PathPaymentStrictSendResult_Success) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%soffers", name), (*_XdrVec_unbounded_ClaimAtom)(&v.Offers)) + x.Marshal(x.Sprintf("%slast", name), XDR_SimplePaymentResult(&v.Last)) +} +func XDR_XdrAnon_PathPaymentStrictSendResult_Success(v *XdrAnon_PathPaymentStrictSendResult_Success) *XdrAnon_PathPaymentStrictSendResult_Success { + return v +} +func (_ PathPaymentStrictSendResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *PathPaymentStrictSendResult) Success() *XdrAnon_PathPaymentStrictSendResult_Success { + switch u.Code { + case PATH_PAYMENT_STRICT_SEND_SUCCESS: + if v, ok := u._u.(*XdrAnon_PathPaymentStrictSendResult_Success); ok { + return v + } else { + var zero XdrAnon_PathPaymentStrictSendResult_Success + u._u = &zero + return &zero + } + default: + XdrPanic("PathPaymentStrictSendResult.Success accessed when Code == %v", u.Code) + return nil + } +} + +// the asset that caused the error +func (u *PathPaymentStrictSendResult) NoIssuer() *Asset { + switch u.Code { + case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: + if v, ok := u._u.(*Asset); ok { + return v + } else { + var zero Asset + u._u = &zero + return &zero + } + default: + XdrPanic("PathPaymentStrictSendResult.NoIssuer accessed when Code == %v", u.Code) + return nil + } +} +func (u PathPaymentStrictSendResult) XdrValid() bool { + return true +} +func (u *PathPaymentStrictSendResult) XdrUnionTag() XdrNum32 { + return XDR_PathPaymentStrictSendResultCode(&u.Code) +} +func (u *PathPaymentStrictSendResult) XdrUnionTagName() string { + return "Code" +} +func (u *PathPaymentStrictSendResult) XdrUnionBody() XdrType { + switch u.Code { + case PATH_PAYMENT_STRICT_SEND_SUCCESS: + return XDR_XdrAnon_PathPaymentStrictSendResult_Success(u.Success()) + case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: + return XDR_Asset(u.NoIssuer()) + default: + return nil + } +} +func (u *PathPaymentStrictSendResult) XdrUnionBodyName() string { + switch u.Code { + case PATH_PAYMENT_STRICT_SEND_SUCCESS: + return "Success" + case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: + return "NoIssuer" + default: + return "" + } +} + +type XdrType_PathPaymentStrictSendResult = *PathPaymentStrictSendResult + +func (v *PathPaymentStrictSendResult) XdrPointer() interface{} { return v } +func (PathPaymentStrictSendResult) XdrTypeName() string { return "PathPaymentStrictSendResult" } +func (v PathPaymentStrictSendResult) XdrValue() interface{} { return v } +func (v *PathPaymentStrictSendResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *PathPaymentStrictSendResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_PathPaymentStrictSendResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case PATH_PAYMENT_STRICT_SEND_SUCCESS: + x.Marshal(x.Sprintf("%ssuccess", name), XDR_XdrAnon_PathPaymentStrictSendResult_Success(u.Success())) + return + case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: + x.Marshal(x.Sprintf("%snoIssuer", name), XDR_Asset(u.NoIssuer())) + return + default: + return + } +} +func XDR_PathPaymentStrictSendResult(v *PathPaymentStrictSendResult) *PathPaymentStrictSendResult { + return v +} + +var _XdrNames_ManageSellOfferResultCode = map[int32]string{ + int32(MANAGE_SELL_OFFER_SUCCESS): "MANAGE_SELL_OFFER_SUCCESS", + int32(MANAGE_SELL_OFFER_MALFORMED): "MANAGE_SELL_OFFER_MALFORMED", + int32(MANAGE_SELL_OFFER_SELL_NO_TRUST): "MANAGE_SELL_OFFER_SELL_NO_TRUST", + int32(MANAGE_SELL_OFFER_BUY_NO_TRUST): "MANAGE_SELL_OFFER_BUY_NO_TRUST", + int32(MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED): "MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED", + int32(MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED): "MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED", + int32(MANAGE_SELL_OFFER_LINE_FULL): "MANAGE_SELL_OFFER_LINE_FULL", + int32(MANAGE_SELL_OFFER_UNDERFUNDED): "MANAGE_SELL_OFFER_UNDERFUNDED", + int32(MANAGE_SELL_OFFER_CROSS_SELF): "MANAGE_SELL_OFFER_CROSS_SELF", + int32(MANAGE_SELL_OFFER_SELL_NO_ISSUER): "MANAGE_SELL_OFFER_SELL_NO_ISSUER", + int32(MANAGE_SELL_OFFER_BUY_NO_ISSUER): "MANAGE_SELL_OFFER_BUY_NO_ISSUER", + int32(MANAGE_SELL_OFFER_NOT_FOUND): "MANAGE_SELL_OFFER_NOT_FOUND", + int32(MANAGE_SELL_OFFER_LOW_RESERVE): "MANAGE_SELL_OFFER_LOW_RESERVE", +} +var _XdrValues_ManageSellOfferResultCode = map[string]int32{ + "MANAGE_SELL_OFFER_SUCCESS": int32(MANAGE_SELL_OFFER_SUCCESS), + "MANAGE_SELL_OFFER_MALFORMED": int32(MANAGE_SELL_OFFER_MALFORMED), + "MANAGE_SELL_OFFER_SELL_NO_TRUST": int32(MANAGE_SELL_OFFER_SELL_NO_TRUST), + "MANAGE_SELL_OFFER_BUY_NO_TRUST": int32(MANAGE_SELL_OFFER_BUY_NO_TRUST), + "MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED": int32(MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED), + "MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED": int32(MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED), + "MANAGE_SELL_OFFER_LINE_FULL": int32(MANAGE_SELL_OFFER_LINE_FULL), + "MANAGE_SELL_OFFER_UNDERFUNDED": int32(MANAGE_SELL_OFFER_UNDERFUNDED), + "MANAGE_SELL_OFFER_CROSS_SELF": int32(MANAGE_SELL_OFFER_CROSS_SELF), + "MANAGE_SELL_OFFER_SELL_NO_ISSUER": int32(MANAGE_SELL_OFFER_SELL_NO_ISSUER), + "MANAGE_SELL_OFFER_BUY_NO_ISSUER": int32(MANAGE_SELL_OFFER_BUY_NO_ISSUER), + "MANAGE_SELL_OFFER_NOT_FOUND": int32(MANAGE_SELL_OFFER_NOT_FOUND), + "MANAGE_SELL_OFFER_LOW_RESERVE": int32(MANAGE_SELL_OFFER_LOW_RESERVE), +} + +func (ManageSellOfferResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ManageSellOfferResultCode +} +func (v ManageSellOfferResultCode) String() string { + if s, ok := _XdrNames_ManageSellOfferResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ManageSellOfferResultCode#%d", v) +} +func (v *ManageSellOfferResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ManageSellOfferResultCode[stok]; ok { + *v = ManageSellOfferResultCode(val) + return nil + } else if stok == "ManageSellOfferResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ManageSellOfferResultCode.", stok)) + } +} +func (v ManageSellOfferResultCode) GetU32() uint32 { return uint32(v) } +func (v *ManageSellOfferResultCode) SetU32(n uint32) { *v = ManageSellOfferResultCode(n) } +func (v *ManageSellOfferResultCode) XdrPointer() interface{} { return v } +func (ManageSellOfferResultCode) XdrTypeName() string { return "ManageSellOfferResultCode" } +func (v ManageSellOfferResultCode) XdrValue() interface{} { return v } +func (v *ManageSellOfferResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ManageSellOfferResultCode = *ManageSellOfferResultCode + +func XDR_ManageSellOfferResultCode(v *ManageSellOfferResultCode) *ManageSellOfferResultCode { return v } + +var _XdrComments_ManageSellOfferResultCode = map[int32]string{ + int32(MANAGE_SELL_OFFER_SUCCESS): "codes considered as \"success\" for the operation", + int32(MANAGE_SELL_OFFER_MALFORMED): "generated offer would be invalid", + int32(MANAGE_SELL_OFFER_SELL_NO_TRUST): "no trust line for what we're selling", + int32(MANAGE_SELL_OFFER_BUY_NO_TRUST): "no trust line for what we're buying", + int32(MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED): "not authorized to sell", + int32(MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED): "not authorized to buy", + int32(MANAGE_SELL_OFFER_LINE_FULL): "can't receive more of what it's buying", + int32(MANAGE_SELL_OFFER_UNDERFUNDED): "doesn't hold what it's trying to sell", + int32(MANAGE_SELL_OFFER_CROSS_SELF): "would cross an offer from the same user", + int32(MANAGE_SELL_OFFER_SELL_NO_ISSUER): "no issuer for what we're selling", + int32(MANAGE_SELL_OFFER_BUY_NO_ISSUER): "no issuer for what we're buying", + int32(MANAGE_SELL_OFFER_NOT_FOUND): "offerID does not match an existing offer", + int32(MANAGE_SELL_OFFER_LOW_RESERVE): "not enough funds to create a new Offer", +} + +func (e ManageSellOfferResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ManageSellOfferResultCode +} + +var _XdrNames_ManageOfferEffect = map[int32]string{ + int32(MANAGE_OFFER_CREATED): "MANAGE_OFFER_CREATED", + int32(MANAGE_OFFER_UPDATED): "MANAGE_OFFER_UPDATED", + int32(MANAGE_OFFER_DELETED): "MANAGE_OFFER_DELETED", +} +var _XdrValues_ManageOfferEffect = map[string]int32{ + "MANAGE_OFFER_CREATED": int32(MANAGE_OFFER_CREATED), + "MANAGE_OFFER_UPDATED": int32(MANAGE_OFFER_UPDATED), + "MANAGE_OFFER_DELETED": int32(MANAGE_OFFER_DELETED), +} + +func (ManageOfferEffect) XdrEnumNames() map[int32]string { + return _XdrNames_ManageOfferEffect +} +func (v ManageOfferEffect) String() string { + if s, ok := _XdrNames_ManageOfferEffect[int32(v)]; ok { + return s + } + return fmt.Sprintf("ManageOfferEffect#%d", v) +} +func (v *ManageOfferEffect) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ManageOfferEffect[stok]; ok { + *v = ManageOfferEffect(val) + return nil + } else if stok == "ManageOfferEffect" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ManageOfferEffect.", stok)) + } +} +func (v ManageOfferEffect) GetU32() uint32 { return uint32(v) } +func (v *ManageOfferEffect) SetU32(n uint32) { *v = ManageOfferEffect(n) } +func (v *ManageOfferEffect) XdrPointer() interface{} { return v } +func (ManageOfferEffect) XdrTypeName() string { return "ManageOfferEffect" } +func (v ManageOfferEffect) XdrValue() interface{} { return v } +func (v *ManageOfferEffect) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ManageOfferEffect = *ManageOfferEffect + +func XDR_ManageOfferEffect(v *ManageOfferEffect) *ManageOfferEffect { return v } +func (_ XdrAnon_ManageOfferSuccessResult_Offer) XdrValidTags() map[int32]bool { + return nil +} +func (u *XdrAnon_ManageOfferSuccessResult_Offer) Offer() *OfferEntry { + switch u.Effect { + case MANAGE_OFFER_CREATED, MANAGE_OFFER_UPDATED: + if v, ok := u._u.(*OfferEntry); ok { + return v + } else { + var zero OfferEntry + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_ManageOfferSuccessResult_Offer.Offer accessed when Effect == %v", u.Effect) + return nil + } +} +func (u XdrAnon_ManageOfferSuccessResult_Offer) XdrValid() bool { + return true +} +func (u *XdrAnon_ManageOfferSuccessResult_Offer) XdrUnionTag() XdrNum32 { + return XDR_ManageOfferEffect(&u.Effect) +} +func (u *XdrAnon_ManageOfferSuccessResult_Offer) XdrUnionTagName() string { + return "Effect" +} +func (u *XdrAnon_ManageOfferSuccessResult_Offer) XdrUnionBody() XdrType { + switch u.Effect { + case MANAGE_OFFER_CREATED, MANAGE_OFFER_UPDATED: + return XDR_OfferEntry(u.Offer()) + default: + return nil + } +} +func (u *XdrAnon_ManageOfferSuccessResult_Offer) XdrUnionBodyName() string { + switch u.Effect { + case MANAGE_OFFER_CREATED, MANAGE_OFFER_UPDATED: + return "Offer" + default: + return "" + } +} + +type XdrType_XdrAnon_ManageOfferSuccessResult_Offer = *XdrAnon_ManageOfferSuccessResult_Offer + +func (v *XdrAnon_ManageOfferSuccessResult_Offer) XdrPointer() interface{} { return v } +func (XdrAnon_ManageOfferSuccessResult_Offer) XdrTypeName() string { + return "XdrAnon_ManageOfferSuccessResult_Offer" +} +func (v XdrAnon_ManageOfferSuccessResult_Offer) XdrValue() interface{} { return v } +func (v *XdrAnon_ManageOfferSuccessResult_Offer) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_ManageOfferSuccessResult_Offer) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ManageOfferEffect(&u.Effect).XdrMarshal(x, x.Sprintf("%seffect", name)) + switch u.Effect { + case MANAGE_OFFER_CREATED, MANAGE_OFFER_UPDATED: + x.Marshal(x.Sprintf("%soffer", name), XDR_OfferEntry(u.Offer())) + return + default: + return + } +} +func XDR_XdrAnon_ManageOfferSuccessResult_Offer(v *XdrAnon_ManageOfferSuccessResult_Offer) *XdrAnon_ManageOfferSuccessResult_Offer { + return v +} + +type XdrType_ManageOfferSuccessResult = *ManageOfferSuccessResult + +func (v *ManageOfferSuccessResult) XdrPointer() interface{} { return v } +func (ManageOfferSuccessResult) XdrTypeName() string { return "ManageOfferSuccessResult" } +func (v ManageOfferSuccessResult) XdrValue() interface{} { return v } +func (v *ManageOfferSuccessResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *ManageOfferSuccessResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%soffersClaimed", name), (*_XdrVec_unbounded_ClaimAtom)(&v.OffersClaimed)) + x.Marshal(x.Sprintf("%soffer", name), XDR_XdrAnon_ManageOfferSuccessResult_Offer(&v.Offer)) +} +func XDR_ManageOfferSuccessResult(v *ManageOfferSuccessResult) *ManageOfferSuccessResult { return v } +func (_ ManageSellOfferResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *ManageSellOfferResult) Success() *ManageOfferSuccessResult { + switch u.Code { + case MANAGE_SELL_OFFER_SUCCESS: + if v, ok := u._u.(*ManageOfferSuccessResult); ok { + return v + } else { + var zero ManageOfferSuccessResult + u._u = &zero + return &zero + } + default: + XdrPanic("ManageSellOfferResult.Success accessed when Code == %v", u.Code) + return nil + } +} +func (u ManageSellOfferResult) XdrValid() bool { + return true +} +func (u *ManageSellOfferResult) XdrUnionTag() XdrNum32 { + return XDR_ManageSellOfferResultCode(&u.Code) +} +func (u *ManageSellOfferResult) XdrUnionTagName() string { + return "Code" +} +func (u *ManageSellOfferResult) XdrUnionBody() XdrType { + switch u.Code { + case MANAGE_SELL_OFFER_SUCCESS: + return XDR_ManageOfferSuccessResult(u.Success()) + default: + return nil + } +} +func (u *ManageSellOfferResult) XdrUnionBodyName() string { + switch u.Code { + case MANAGE_SELL_OFFER_SUCCESS: + return "Success" + default: + return "" + } +} + +type XdrType_ManageSellOfferResult = *ManageSellOfferResult + +func (v *ManageSellOfferResult) XdrPointer() interface{} { return v } +func (ManageSellOfferResult) XdrTypeName() string { return "ManageSellOfferResult" } +func (v ManageSellOfferResult) XdrValue() interface{} { return v } +func (v *ManageSellOfferResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ManageSellOfferResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ManageSellOfferResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case MANAGE_SELL_OFFER_SUCCESS: + x.Marshal(x.Sprintf("%ssuccess", name), XDR_ManageOfferSuccessResult(u.Success())) + return + default: + return + } +} +func XDR_ManageSellOfferResult(v *ManageSellOfferResult) *ManageSellOfferResult { return v } + +var _XdrNames_ManageBuyOfferResultCode = map[int32]string{ + int32(MANAGE_BUY_OFFER_SUCCESS): "MANAGE_BUY_OFFER_SUCCESS", + int32(MANAGE_BUY_OFFER_MALFORMED): "MANAGE_BUY_OFFER_MALFORMED", + int32(MANAGE_BUY_OFFER_SELL_NO_TRUST): "MANAGE_BUY_OFFER_SELL_NO_TRUST", + int32(MANAGE_BUY_OFFER_BUY_NO_TRUST): "MANAGE_BUY_OFFER_BUY_NO_TRUST", + int32(MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED): "MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED", + int32(MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED): "MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED", + int32(MANAGE_BUY_OFFER_LINE_FULL): "MANAGE_BUY_OFFER_LINE_FULL", + int32(MANAGE_BUY_OFFER_UNDERFUNDED): "MANAGE_BUY_OFFER_UNDERFUNDED", + int32(MANAGE_BUY_OFFER_CROSS_SELF): "MANAGE_BUY_OFFER_CROSS_SELF", + int32(MANAGE_BUY_OFFER_SELL_NO_ISSUER): "MANAGE_BUY_OFFER_SELL_NO_ISSUER", + int32(MANAGE_BUY_OFFER_BUY_NO_ISSUER): "MANAGE_BUY_OFFER_BUY_NO_ISSUER", + int32(MANAGE_BUY_OFFER_NOT_FOUND): "MANAGE_BUY_OFFER_NOT_FOUND", + int32(MANAGE_BUY_OFFER_LOW_RESERVE): "MANAGE_BUY_OFFER_LOW_RESERVE", +} +var _XdrValues_ManageBuyOfferResultCode = map[string]int32{ + "MANAGE_BUY_OFFER_SUCCESS": int32(MANAGE_BUY_OFFER_SUCCESS), + "MANAGE_BUY_OFFER_MALFORMED": int32(MANAGE_BUY_OFFER_MALFORMED), + "MANAGE_BUY_OFFER_SELL_NO_TRUST": int32(MANAGE_BUY_OFFER_SELL_NO_TRUST), + "MANAGE_BUY_OFFER_BUY_NO_TRUST": int32(MANAGE_BUY_OFFER_BUY_NO_TRUST), + "MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED": int32(MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED), + "MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED": int32(MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED), + "MANAGE_BUY_OFFER_LINE_FULL": int32(MANAGE_BUY_OFFER_LINE_FULL), + "MANAGE_BUY_OFFER_UNDERFUNDED": int32(MANAGE_BUY_OFFER_UNDERFUNDED), + "MANAGE_BUY_OFFER_CROSS_SELF": int32(MANAGE_BUY_OFFER_CROSS_SELF), + "MANAGE_BUY_OFFER_SELL_NO_ISSUER": int32(MANAGE_BUY_OFFER_SELL_NO_ISSUER), + "MANAGE_BUY_OFFER_BUY_NO_ISSUER": int32(MANAGE_BUY_OFFER_BUY_NO_ISSUER), + "MANAGE_BUY_OFFER_NOT_FOUND": int32(MANAGE_BUY_OFFER_NOT_FOUND), + "MANAGE_BUY_OFFER_LOW_RESERVE": int32(MANAGE_BUY_OFFER_LOW_RESERVE), +} + +func (ManageBuyOfferResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ManageBuyOfferResultCode +} +func (v ManageBuyOfferResultCode) String() string { + if s, ok := _XdrNames_ManageBuyOfferResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ManageBuyOfferResultCode#%d", v) +} +func (v *ManageBuyOfferResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ManageBuyOfferResultCode[stok]; ok { + *v = ManageBuyOfferResultCode(val) + return nil + } else if stok == "ManageBuyOfferResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ManageBuyOfferResultCode.", stok)) + } +} +func (v ManageBuyOfferResultCode) GetU32() uint32 { return uint32(v) } +func (v *ManageBuyOfferResultCode) SetU32(n uint32) { *v = ManageBuyOfferResultCode(n) } +func (v *ManageBuyOfferResultCode) XdrPointer() interface{} { return v } +func (ManageBuyOfferResultCode) XdrTypeName() string { return "ManageBuyOfferResultCode" } +func (v ManageBuyOfferResultCode) XdrValue() interface{} { return v } +func (v *ManageBuyOfferResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ManageBuyOfferResultCode = *ManageBuyOfferResultCode + +func XDR_ManageBuyOfferResultCode(v *ManageBuyOfferResultCode) *ManageBuyOfferResultCode { return v } + +var _XdrComments_ManageBuyOfferResultCode = map[int32]string{ + int32(MANAGE_BUY_OFFER_SUCCESS): "codes considered as \"success\" for the operation", + int32(MANAGE_BUY_OFFER_MALFORMED): "generated offer would be invalid", + int32(MANAGE_BUY_OFFER_SELL_NO_TRUST): "no trust line for what we're selling", + int32(MANAGE_BUY_OFFER_BUY_NO_TRUST): "no trust line for what we're buying", + int32(MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED): "not authorized to sell", + int32(MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED): "not authorized to buy", + int32(MANAGE_BUY_OFFER_LINE_FULL): "can't receive more of what it's buying", + int32(MANAGE_BUY_OFFER_UNDERFUNDED): "doesn't hold what it's trying to sell", + int32(MANAGE_BUY_OFFER_CROSS_SELF): "would cross an offer from the same user", + int32(MANAGE_BUY_OFFER_SELL_NO_ISSUER): "no issuer for what we're selling", + int32(MANAGE_BUY_OFFER_BUY_NO_ISSUER): "no issuer for what we're buying", + int32(MANAGE_BUY_OFFER_NOT_FOUND): "offerID does not match an existing offer", + int32(MANAGE_BUY_OFFER_LOW_RESERVE): "not enough funds to create a new Offer", +} + +func (e ManageBuyOfferResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ManageBuyOfferResultCode +} +func (_ ManageBuyOfferResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *ManageBuyOfferResult) Success() *ManageOfferSuccessResult { + switch u.Code { + case MANAGE_BUY_OFFER_SUCCESS: + if v, ok := u._u.(*ManageOfferSuccessResult); ok { + return v + } else { + var zero ManageOfferSuccessResult + u._u = &zero + return &zero + } + default: + XdrPanic("ManageBuyOfferResult.Success accessed when Code == %v", u.Code) + return nil + } +} +func (u ManageBuyOfferResult) XdrValid() bool { + return true +} +func (u *ManageBuyOfferResult) XdrUnionTag() XdrNum32 { + return XDR_ManageBuyOfferResultCode(&u.Code) +} +func (u *ManageBuyOfferResult) XdrUnionTagName() string { + return "Code" +} +func (u *ManageBuyOfferResult) XdrUnionBody() XdrType { + switch u.Code { + case MANAGE_BUY_OFFER_SUCCESS: + return XDR_ManageOfferSuccessResult(u.Success()) + default: + return nil + } +} +func (u *ManageBuyOfferResult) XdrUnionBodyName() string { + switch u.Code { + case MANAGE_BUY_OFFER_SUCCESS: + return "Success" + default: + return "" + } +} + +type XdrType_ManageBuyOfferResult = *ManageBuyOfferResult + +func (v *ManageBuyOfferResult) XdrPointer() interface{} { return v } +func (ManageBuyOfferResult) XdrTypeName() string { return "ManageBuyOfferResult" } +func (v ManageBuyOfferResult) XdrValue() interface{} { return v } +func (v *ManageBuyOfferResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ManageBuyOfferResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ManageBuyOfferResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case MANAGE_BUY_OFFER_SUCCESS: + x.Marshal(x.Sprintf("%ssuccess", name), XDR_ManageOfferSuccessResult(u.Success())) + return + default: + return + } +} +func XDR_ManageBuyOfferResult(v *ManageBuyOfferResult) *ManageBuyOfferResult { return v } + +var _XdrNames_SetOptionsResultCode = map[int32]string{ + int32(SET_OPTIONS_SUCCESS): "SET_OPTIONS_SUCCESS", + int32(SET_OPTIONS_LOW_RESERVE): "SET_OPTIONS_LOW_RESERVE", + int32(SET_OPTIONS_TOO_MANY_SIGNERS): "SET_OPTIONS_TOO_MANY_SIGNERS", + int32(SET_OPTIONS_BAD_FLAGS): "SET_OPTIONS_BAD_FLAGS", + int32(SET_OPTIONS_INVALID_INFLATION): "SET_OPTIONS_INVALID_INFLATION", + int32(SET_OPTIONS_CANT_CHANGE): "SET_OPTIONS_CANT_CHANGE", + int32(SET_OPTIONS_UNKNOWN_FLAG): "SET_OPTIONS_UNKNOWN_FLAG", + int32(SET_OPTIONS_THRESHOLD_OUT_OF_RANGE): "SET_OPTIONS_THRESHOLD_OUT_OF_RANGE", + int32(SET_OPTIONS_BAD_SIGNER): "SET_OPTIONS_BAD_SIGNER", + int32(SET_OPTIONS_INVALID_HOME_DOMAIN): "SET_OPTIONS_INVALID_HOME_DOMAIN", + int32(SET_OPTIONS_AUTH_REVOCABLE_REQUIRED): "SET_OPTIONS_AUTH_REVOCABLE_REQUIRED", +} +var _XdrValues_SetOptionsResultCode = map[string]int32{ + "SET_OPTIONS_SUCCESS": int32(SET_OPTIONS_SUCCESS), + "SET_OPTIONS_LOW_RESERVE": int32(SET_OPTIONS_LOW_RESERVE), + "SET_OPTIONS_TOO_MANY_SIGNERS": int32(SET_OPTIONS_TOO_MANY_SIGNERS), + "SET_OPTIONS_BAD_FLAGS": int32(SET_OPTIONS_BAD_FLAGS), + "SET_OPTIONS_INVALID_INFLATION": int32(SET_OPTIONS_INVALID_INFLATION), + "SET_OPTIONS_CANT_CHANGE": int32(SET_OPTIONS_CANT_CHANGE), + "SET_OPTIONS_UNKNOWN_FLAG": int32(SET_OPTIONS_UNKNOWN_FLAG), + "SET_OPTIONS_THRESHOLD_OUT_OF_RANGE": int32(SET_OPTIONS_THRESHOLD_OUT_OF_RANGE), + "SET_OPTIONS_BAD_SIGNER": int32(SET_OPTIONS_BAD_SIGNER), + "SET_OPTIONS_INVALID_HOME_DOMAIN": int32(SET_OPTIONS_INVALID_HOME_DOMAIN), + "SET_OPTIONS_AUTH_REVOCABLE_REQUIRED": int32(SET_OPTIONS_AUTH_REVOCABLE_REQUIRED), +} + +func (SetOptionsResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_SetOptionsResultCode +} +func (v SetOptionsResultCode) String() string { + if s, ok := _XdrNames_SetOptionsResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("SetOptionsResultCode#%d", v) +} +func (v *SetOptionsResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_SetOptionsResultCode[stok]; ok { + *v = SetOptionsResultCode(val) + return nil + } else if stok == "SetOptionsResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid SetOptionsResultCode.", stok)) + } +} +func (v SetOptionsResultCode) GetU32() uint32 { return uint32(v) } +func (v *SetOptionsResultCode) SetU32(n uint32) { *v = SetOptionsResultCode(n) } +func (v *SetOptionsResultCode) XdrPointer() interface{} { return v } +func (SetOptionsResultCode) XdrTypeName() string { return "SetOptionsResultCode" } +func (v SetOptionsResultCode) XdrValue() interface{} { return v } +func (v *SetOptionsResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SetOptionsResultCode = *SetOptionsResultCode + +func XDR_SetOptionsResultCode(v *SetOptionsResultCode) *SetOptionsResultCode { return v } + +var _XdrComments_SetOptionsResultCode = map[int32]string{ + int32(SET_OPTIONS_SUCCESS): "codes considered as \"success\" for the operation", + int32(SET_OPTIONS_LOW_RESERVE): "not enough funds to add a signer", + int32(SET_OPTIONS_TOO_MANY_SIGNERS): "max number of signers already reached", + int32(SET_OPTIONS_BAD_FLAGS): "invalid combination of clear/set flags", + int32(SET_OPTIONS_INVALID_INFLATION): "inflation account does not exist", + int32(SET_OPTIONS_CANT_CHANGE): "can no longer change this option", + int32(SET_OPTIONS_UNKNOWN_FLAG): "can't set an unknown flag", + int32(SET_OPTIONS_THRESHOLD_OUT_OF_RANGE): "bad value for weight/threshold", + int32(SET_OPTIONS_BAD_SIGNER): "signer cannot be masterkey", + int32(SET_OPTIONS_INVALID_HOME_DOMAIN): "malformed home domain", + int32(SET_OPTIONS_AUTH_REVOCABLE_REQUIRED): "auth revocable is required for clawback", +} + +func (e SetOptionsResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_SetOptionsResultCode +} +func (_ SetOptionsResult) XdrValidTags() map[int32]bool { + return nil +} +func (u SetOptionsResult) XdrValid() bool { + return true +} +func (u *SetOptionsResult) XdrUnionTag() XdrNum32 { + return XDR_SetOptionsResultCode(&u.Code) +} +func (u *SetOptionsResult) XdrUnionTagName() string { + return "Code" +} +func (u *SetOptionsResult) XdrUnionBody() XdrType { + switch u.Code { + case SET_OPTIONS_SUCCESS: + return nil + default: + return nil + } +} +func (u *SetOptionsResult) XdrUnionBodyName() string { + switch u.Code { + case SET_OPTIONS_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_SetOptionsResult = *SetOptionsResult + +func (v *SetOptionsResult) XdrPointer() interface{} { return v } +func (SetOptionsResult) XdrTypeName() string { return "SetOptionsResult" } +func (v SetOptionsResult) XdrValue() interface{} { return v } +func (v *SetOptionsResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *SetOptionsResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_SetOptionsResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case SET_OPTIONS_SUCCESS: + return + default: + return + } +} +func XDR_SetOptionsResult(v *SetOptionsResult) *SetOptionsResult { return v } + +var _XdrNames_ChangeTrustResultCode = map[int32]string{ + int32(CHANGE_TRUST_SUCCESS): "CHANGE_TRUST_SUCCESS", + int32(CHANGE_TRUST_MALFORMED): "CHANGE_TRUST_MALFORMED", + int32(CHANGE_TRUST_NO_ISSUER): "CHANGE_TRUST_NO_ISSUER", + int32(CHANGE_TRUST_INVALID_LIMIT): "CHANGE_TRUST_INVALID_LIMIT", + int32(CHANGE_TRUST_LOW_RESERVE): "CHANGE_TRUST_LOW_RESERVE", + int32(CHANGE_TRUST_SELF_NOT_ALLOWED): "CHANGE_TRUST_SELF_NOT_ALLOWED", + int32(CHANGE_TRUST_TRUST_LINE_MISSING): "CHANGE_TRUST_TRUST_LINE_MISSING", + int32(CHANGE_TRUST_CANNOT_DELETE): "CHANGE_TRUST_CANNOT_DELETE", + int32(CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES): "CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES", +} +var _XdrValues_ChangeTrustResultCode = map[string]int32{ + "CHANGE_TRUST_SUCCESS": int32(CHANGE_TRUST_SUCCESS), + "CHANGE_TRUST_MALFORMED": int32(CHANGE_TRUST_MALFORMED), + "CHANGE_TRUST_NO_ISSUER": int32(CHANGE_TRUST_NO_ISSUER), + "CHANGE_TRUST_INVALID_LIMIT": int32(CHANGE_TRUST_INVALID_LIMIT), + "CHANGE_TRUST_LOW_RESERVE": int32(CHANGE_TRUST_LOW_RESERVE), + "CHANGE_TRUST_SELF_NOT_ALLOWED": int32(CHANGE_TRUST_SELF_NOT_ALLOWED), + "CHANGE_TRUST_TRUST_LINE_MISSING": int32(CHANGE_TRUST_TRUST_LINE_MISSING), + "CHANGE_TRUST_CANNOT_DELETE": int32(CHANGE_TRUST_CANNOT_DELETE), + "CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES": int32(CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES), +} + +func (ChangeTrustResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ChangeTrustResultCode +} +func (v ChangeTrustResultCode) String() string { + if s, ok := _XdrNames_ChangeTrustResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ChangeTrustResultCode#%d", v) +} +func (v *ChangeTrustResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ChangeTrustResultCode[stok]; ok { + *v = ChangeTrustResultCode(val) + return nil + } else if stok == "ChangeTrustResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ChangeTrustResultCode.", stok)) + } +} +func (v ChangeTrustResultCode) GetU32() uint32 { return uint32(v) } +func (v *ChangeTrustResultCode) SetU32(n uint32) { *v = ChangeTrustResultCode(n) } +func (v *ChangeTrustResultCode) XdrPointer() interface{} { return v } +func (ChangeTrustResultCode) XdrTypeName() string { return "ChangeTrustResultCode" } +func (v ChangeTrustResultCode) XdrValue() interface{} { return v } +func (v *ChangeTrustResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ChangeTrustResultCode = *ChangeTrustResultCode + +func XDR_ChangeTrustResultCode(v *ChangeTrustResultCode) *ChangeTrustResultCode { return v } + +var _XdrComments_ChangeTrustResultCode = map[int32]string{ + int32(CHANGE_TRUST_SUCCESS): "codes considered as \"success\" for the operation", + int32(CHANGE_TRUST_MALFORMED): "bad input", + int32(CHANGE_TRUST_NO_ISSUER): "could not find issuer", + int32(CHANGE_TRUST_INVALID_LIMIT): "cannot drop limit below balance", + int32(CHANGE_TRUST_LOW_RESERVE): "not enough funds to create a new trust line,", + int32(CHANGE_TRUST_SELF_NOT_ALLOWED): "trusting self is not allowed", + int32(CHANGE_TRUST_TRUST_LINE_MISSING): "Asset trustline is missing for pool", + int32(CHANGE_TRUST_CANNOT_DELETE): "Asset trustline is still referenced in a pool", + int32(CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES): "Asset trustline is deauthorized", +} + +func (e ChangeTrustResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ChangeTrustResultCode +} +func (_ ChangeTrustResult) XdrValidTags() map[int32]bool { + return nil +} +func (u ChangeTrustResult) XdrValid() bool { + return true +} +func (u *ChangeTrustResult) XdrUnionTag() XdrNum32 { + return XDR_ChangeTrustResultCode(&u.Code) +} +func (u *ChangeTrustResult) XdrUnionTagName() string { + return "Code" +} +func (u *ChangeTrustResult) XdrUnionBody() XdrType { + switch u.Code { + case CHANGE_TRUST_SUCCESS: + return nil + default: + return nil + } +} +func (u *ChangeTrustResult) XdrUnionBodyName() string { + switch u.Code { + case CHANGE_TRUST_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_ChangeTrustResult = *ChangeTrustResult + +func (v *ChangeTrustResult) XdrPointer() interface{} { return v } +func (ChangeTrustResult) XdrTypeName() string { return "ChangeTrustResult" } +func (v ChangeTrustResult) XdrValue() interface{} { return v } +func (v *ChangeTrustResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ChangeTrustResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ChangeTrustResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CHANGE_TRUST_SUCCESS: + return + default: + return + } +} +func XDR_ChangeTrustResult(v *ChangeTrustResult) *ChangeTrustResult { return v } + +var _XdrNames_AllowTrustResultCode = map[int32]string{ + int32(ALLOW_TRUST_SUCCESS): "ALLOW_TRUST_SUCCESS", + int32(ALLOW_TRUST_MALFORMED): "ALLOW_TRUST_MALFORMED", + int32(ALLOW_TRUST_NO_TRUST_LINE): "ALLOW_TRUST_NO_TRUST_LINE", + int32(ALLOW_TRUST_TRUST_NOT_REQUIRED): "ALLOW_TRUST_TRUST_NOT_REQUIRED", + int32(ALLOW_TRUST_CANT_REVOKE): "ALLOW_TRUST_CANT_REVOKE", + int32(ALLOW_TRUST_SELF_NOT_ALLOWED): "ALLOW_TRUST_SELF_NOT_ALLOWED", + int32(ALLOW_TRUST_LOW_RESERVE): "ALLOW_TRUST_LOW_RESERVE", +} +var _XdrValues_AllowTrustResultCode = map[string]int32{ + "ALLOW_TRUST_SUCCESS": int32(ALLOW_TRUST_SUCCESS), + "ALLOW_TRUST_MALFORMED": int32(ALLOW_TRUST_MALFORMED), + "ALLOW_TRUST_NO_TRUST_LINE": int32(ALLOW_TRUST_NO_TRUST_LINE), + "ALLOW_TRUST_TRUST_NOT_REQUIRED": int32(ALLOW_TRUST_TRUST_NOT_REQUIRED), + "ALLOW_TRUST_CANT_REVOKE": int32(ALLOW_TRUST_CANT_REVOKE), + "ALLOW_TRUST_SELF_NOT_ALLOWED": int32(ALLOW_TRUST_SELF_NOT_ALLOWED), + "ALLOW_TRUST_LOW_RESERVE": int32(ALLOW_TRUST_LOW_RESERVE), +} + +func (AllowTrustResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_AllowTrustResultCode +} +func (v AllowTrustResultCode) String() string { + if s, ok := _XdrNames_AllowTrustResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("AllowTrustResultCode#%d", v) +} +func (v *AllowTrustResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_AllowTrustResultCode[stok]; ok { + *v = AllowTrustResultCode(val) + return nil + } else if stok == "AllowTrustResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid AllowTrustResultCode.", stok)) + } +} +func (v AllowTrustResultCode) GetU32() uint32 { return uint32(v) } +func (v *AllowTrustResultCode) SetU32(n uint32) { *v = AllowTrustResultCode(n) } +func (v *AllowTrustResultCode) XdrPointer() interface{} { return v } +func (AllowTrustResultCode) XdrTypeName() string { return "AllowTrustResultCode" } +func (v AllowTrustResultCode) XdrValue() interface{} { return v } +func (v *AllowTrustResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AllowTrustResultCode = *AllowTrustResultCode + +func XDR_AllowTrustResultCode(v *AllowTrustResultCode) *AllowTrustResultCode { return v } + +var _XdrComments_AllowTrustResultCode = map[int32]string{ + int32(ALLOW_TRUST_SUCCESS): "codes considered as \"success\" for the operation", + int32(ALLOW_TRUST_MALFORMED): "asset is not ASSET_TYPE_ALPHANUM", + int32(ALLOW_TRUST_NO_TRUST_LINE): "trustor does not have a trustline", + int32(ALLOW_TRUST_TRUST_NOT_REQUIRED): "source account does not require trust", + int32(ALLOW_TRUST_CANT_REVOKE): "source account can't revoke trust,", + int32(ALLOW_TRUST_SELF_NOT_ALLOWED): "trusting self is not allowed", + int32(ALLOW_TRUST_LOW_RESERVE): "claimable balances can't be created", +} + +func (e AllowTrustResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_AllowTrustResultCode +} +func (_ AllowTrustResult) XdrValidTags() map[int32]bool { + return nil +} +func (u AllowTrustResult) XdrValid() bool { + return true +} +func (u *AllowTrustResult) XdrUnionTag() XdrNum32 { + return XDR_AllowTrustResultCode(&u.Code) +} +func (u *AllowTrustResult) XdrUnionTagName() string { + return "Code" +} +func (u *AllowTrustResult) XdrUnionBody() XdrType { + switch u.Code { + case ALLOW_TRUST_SUCCESS: + return nil + default: + return nil + } +} +func (u *AllowTrustResult) XdrUnionBodyName() string { + switch u.Code { + case ALLOW_TRUST_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_AllowTrustResult = *AllowTrustResult + +func (v *AllowTrustResult) XdrPointer() interface{} { return v } +func (AllowTrustResult) XdrTypeName() string { return "AllowTrustResult" } +func (v AllowTrustResult) XdrValue() interface{} { return v } +func (v *AllowTrustResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *AllowTrustResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AllowTrustResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case ALLOW_TRUST_SUCCESS: + return + default: + return + } +} +func XDR_AllowTrustResult(v *AllowTrustResult) *AllowTrustResult { return v } + +var _XdrNames_AccountMergeResultCode = map[int32]string{ + int32(ACCOUNT_MERGE_SUCCESS): "ACCOUNT_MERGE_SUCCESS", + int32(ACCOUNT_MERGE_MALFORMED): "ACCOUNT_MERGE_MALFORMED", + int32(ACCOUNT_MERGE_NO_ACCOUNT): "ACCOUNT_MERGE_NO_ACCOUNT", + int32(ACCOUNT_MERGE_IMMUTABLE_SET): "ACCOUNT_MERGE_IMMUTABLE_SET", + int32(ACCOUNT_MERGE_HAS_SUB_ENTRIES): "ACCOUNT_MERGE_HAS_SUB_ENTRIES", + int32(ACCOUNT_MERGE_SEQNUM_TOO_FAR): "ACCOUNT_MERGE_SEQNUM_TOO_FAR", + int32(ACCOUNT_MERGE_DEST_FULL): "ACCOUNT_MERGE_DEST_FULL", + int32(ACCOUNT_MERGE_IS_SPONSOR): "ACCOUNT_MERGE_IS_SPONSOR", +} +var _XdrValues_AccountMergeResultCode = map[string]int32{ + "ACCOUNT_MERGE_SUCCESS": int32(ACCOUNT_MERGE_SUCCESS), + "ACCOUNT_MERGE_MALFORMED": int32(ACCOUNT_MERGE_MALFORMED), + "ACCOUNT_MERGE_NO_ACCOUNT": int32(ACCOUNT_MERGE_NO_ACCOUNT), + "ACCOUNT_MERGE_IMMUTABLE_SET": int32(ACCOUNT_MERGE_IMMUTABLE_SET), + "ACCOUNT_MERGE_HAS_SUB_ENTRIES": int32(ACCOUNT_MERGE_HAS_SUB_ENTRIES), + "ACCOUNT_MERGE_SEQNUM_TOO_FAR": int32(ACCOUNT_MERGE_SEQNUM_TOO_FAR), + "ACCOUNT_MERGE_DEST_FULL": int32(ACCOUNT_MERGE_DEST_FULL), + "ACCOUNT_MERGE_IS_SPONSOR": int32(ACCOUNT_MERGE_IS_SPONSOR), +} + +func (AccountMergeResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_AccountMergeResultCode +} +func (v AccountMergeResultCode) String() string { + if s, ok := _XdrNames_AccountMergeResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("AccountMergeResultCode#%d", v) +} +func (v *AccountMergeResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_AccountMergeResultCode[stok]; ok { + *v = AccountMergeResultCode(val) + return nil + } else if stok == "AccountMergeResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid AccountMergeResultCode.", stok)) + } +} +func (v AccountMergeResultCode) GetU32() uint32 { return uint32(v) } +func (v *AccountMergeResultCode) SetU32(n uint32) { *v = AccountMergeResultCode(n) } +func (v *AccountMergeResultCode) XdrPointer() interface{} { return v } +func (AccountMergeResultCode) XdrTypeName() string { return "AccountMergeResultCode" } +func (v AccountMergeResultCode) XdrValue() interface{} { return v } +func (v *AccountMergeResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_AccountMergeResultCode = *AccountMergeResultCode + +func XDR_AccountMergeResultCode(v *AccountMergeResultCode) *AccountMergeResultCode { return v } + +var _XdrComments_AccountMergeResultCode = map[int32]string{ + int32(ACCOUNT_MERGE_SUCCESS): "codes considered as \"success\" for the operation", + int32(ACCOUNT_MERGE_MALFORMED): "can't merge onto itself", + int32(ACCOUNT_MERGE_NO_ACCOUNT): "destination does not exist", + int32(ACCOUNT_MERGE_IMMUTABLE_SET): "source account has AUTH_IMMUTABLE set", + int32(ACCOUNT_MERGE_HAS_SUB_ENTRIES): "account has trust lines/offers", + int32(ACCOUNT_MERGE_SEQNUM_TOO_FAR): "sequence number is over max allowed", + int32(ACCOUNT_MERGE_DEST_FULL): "can't add source balance to", + int32(ACCOUNT_MERGE_IS_SPONSOR): "destination balance", +} + +func (e AccountMergeResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_AccountMergeResultCode +} +func (_ AccountMergeResult) XdrValidTags() map[int32]bool { + return nil +} + +// how much got transferred from source account +func (u *AccountMergeResult) SourceAccountBalance() *Int64 { + switch u.Code { + case ACCOUNT_MERGE_SUCCESS: + if v, ok := u._u.(*Int64); ok { + return v + } else { + var zero Int64 + u._u = &zero + return &zero + } + default: + XdrPanic("AccountMergeResult.SourceAccountBalance accessed when Code == %v", u.Code) + return nil + } +} +func (u AccountMergeResult) XdrValid() bool { + return true +} +func (u *AccountMergeResult) XdrUnionTag() XdrNum32 { + return XDR_AccountMergeResultCode(&u.Code) +} +func (u *AccountMergeResult) XdrUnionTagName() string { + return "Code" +} +func (u *AccountMergeResult) XdrUnionBody() XdrType { + switch u.Code { + case ACCOUNT_MERGE_SUCCESS: + return XDR_Int64(u.SourceAccountBalance()) + default: + return nil + } +} +func (u *AccountMergeResult) XdrUnionBodyName() string { + switch u.Code { + case ACCOUNT_MERGE_SUCCESS: + return "SourceAccountBalance" + default: + return "" + } +} + +type XdrType_AccountMergeResult = *AccountMergeResult + +func (v *AccountMergeResult) XdrPointer() interface{} { return v } +func (AccountMergeResult) XdrTypeName() string { return "AccountMergeResult" } +func (v AccountMergeResult) XdrValue() interface{} { return v } +func (v *AccountMergeResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *AccountMergeResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_AccountMergeResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case ACCOUNT_MERGE_SUCCESS: + x.Marshal(x.Sprintf("%ssourceAccountBalance", name), XDR_Int64(u.SourceAccountBalance())) + return + default: + return + } +} +func XDR_AccountMergeResult(v *AccountMergeResult) *AccountMergeResult { return v } + +var _XdrNames_InflationResultCode = map[int32]string{ + int32(INFLATION_SUCCESS): "INFLATION_SUCCESS", + int32(INFLATION_NOT_TIME): "INFLATION_NOT_TIME", +} +var _XdrValues_InflationResultCode = map[string]int32{ + "INFLATION_SUCCESS": int32(INFLATION_SUCCESS), + "INFLATION_NOT_TIME": int32(INFLATION_NOT_TIME), +} + +func (InflationResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_InflationResultCode +} +func (v InflationResultCode) String() string { + if s, ok := _XdrNames_InflationResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("InflationResultCode#%d", v) +} +func (v *InflationResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_InflationResultCode[stok]; ok { + *v = InflationResultCode(val) + return nil + } else if stok == "InflationResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid InflationResultCode.", stok)) + } +} +func (v InflationResultCode) GetU32() uint32 { return uint32(v) } +func (v *InflationResultCode) SetU32(n uint32) { *v = InflationResultCode(n) } +func (v *InflationResultCode) XdrPointer() interface{} { return v } +func (InflationResultCode) XdrTypeName() string { return "InflationResultCode" } +func (v InflationResultCode) XdrValue() interface{} { return v } +func (v *InflationResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_InflationResultCode = *InflationResultCode + +func XDR_InflationResultCode(v *InflationResultCode) *InflationResultCode { return v } + +var _XdrComments_InflationResultCode = map[int32]string{ + int32(INFLATION_SUCCESS): "codes considered as \"success\" for the operation", + int32(INFLATION_NOT_TIME): "codes considered as \"failure\" for the operation", +} + +func (e InflationResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_InflationResultCode +} + +type XdrType_InflationPayout = *InflationPayout + +func (v *InflationPayout) XdrPointer() interface{} { return v } +func (InflationPayout) XdrTypeName() string { return "InflationPayout" } +func (v InflationPayout) XdrValue() interface{} { return v } +func (v *InflationPayout) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *InflationPayout) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sdestination", name), XDR_AccountID(&v.Destination)) + x.Marshal(x.Sprintf("%samount", name), XDR_Int64(&v.Amount)) +} +func XDR_InflationPayout(v *InflationPayout) *InflationPayout { return v } + +type _XdrVec_unbounded_InflationPayout []InflationPayout + +func (_XdrVec_unbounded_InflationPayout) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_InflationPayout) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_InflationPayout length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_InflationPayout length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_InflationPayout) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_InflationPayout) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]InflationPayout, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_InflationPayout) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_InflationPayout(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_InflationPayout) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_InflationPayout) XdrTypeName() string { return "InflationPayout<>" } +func (v *_XdrVec_unbounded_InflationPayout) XdrPointer() interface{} { return (*[]InflationPayout)(v) } +func (v _XdrVec_unbounded_InflationPayout) XdrValue() interface{} { return ([]InflationPayout)(v) } +func (v *_XdrVec_unbounded_InflationPayout) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (_ InflationResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *InflationResult) Payouts() *[]InflationPayout { + switch u.Code { + case INFLATION_SUCCESS: + if v, ok := u._u.(*[]InflationPayout); ok { + return v + } else { + var zero []InflationPayout + u._u = &zero + return &zero + } + default: + XdrPanic("InflationResult.Payouts accessed when Code == %v", u.Code) + return nil + } +} +func (u InflationResult) XdrValid() bool { + return true +} +func (u *InflationResult) XdrUnionTag() XdrNum32 { + return XDR_InflationResultCode(&u.Code) +} +func (u *InflationResult) XdrUnionTagName() string { + return "Code" +} +func (u *InflationResult) XdrUnionBody() XdrType { + switch u.Code { + case INFLATION_SUCCESS: + return (*_XdrVec_unbounded_InflationPayout)(u.Payouts()) + default: + return nil + } +} +func (u *InflationResult) XdrUnionBodyName() string { + switch u.Code { + case INFLATION_SUCCESS: + return "Payouts" + default: + return "" + } +} + +type XdrType_InflationResult = *InflationResult + +func (v *InflationResult) XdrPointer() interface{} { return v } +func (InflationResult) XdrTypeName() string { return "InflationResult" } +func (v InflationResult) XdrValue() interface{} { return v } +func (v *InflationResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *InflationResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_InflationResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case INFLATION_SUCCESS: + x.Marshal(x.Sprintf("%spayouts", name), (*_XdrVec_unbounded_InflationPayout)(u.Payouts())) + return + default: + return + } +} +func XDR_InflationResult(v *InflationResult) *InflationResult { return v } + +var _XdrNames_ManageDataResultCode = map[int32]string{ + int32(MANAGE_DATA_SUCCESS): "MANAGE_DATA_SUCCESS", + int32(MANAGE_DATA_NOT_SUPPORTED_YET): "MANAGE_DATA_NOT_SUPPORTED_YET", + int32(MANAGE_DATA_NAME_NOT_FOUND): "MANAGE_DATA_NAME_NOT_FOUND", + int32(MANAGE_DATA_LOW_RESERVE): "MANAGE_DATA_LOW_RESERVE", + int32(MANAGE_DATA_INVALID_NAME): "MANAGE_DATA_INVALID_NAME", +} +var _XdrValues_ManageDataResultCode = map[string]int32{ + "MANAGE_DATA_SUCCESS": int32(MANAGE_DATA_SUCCESS), + "MANAGE_DATA_NOT_SUPPORTED_YET": int32(MANAGE_DATA_NOT_SUPPORTED_YET), + "MANAGE_DATA_NAME_NOT_FOUND": int32(MANAGE_DATA_NAME_NOT_FOUND), + "MANAGE_DATA_LOW_RESERVE": int32(MANAGE_DATA_LOW_RESERVE), + "MANAGE_DATA_INVALID_NAME": int32(MANAGE_DATA_INVALID_NAME), +} + +func (ManageDataResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ManageDataResultCode +} +func (v ManageDataResultCode) String() string { + if s, ok := _XdrNames_ManageDataResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ManageDataResultCode#%d", v) +} +func (v *ManageDataResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ManageDataResultCode[stok]; ok { + *v = ManageDataResultCode(val) + return nil + } else if stok == "ManageDataResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ManageDataResultCode.", stok)) + } +} +func (v ManageDataResultCode) GetU32() uint32 { return uint32(v) } +func (v *ManageDataResultCode) SetU32(n uint32) { *v = ManageDataResultCode(n) } +func (v *ManageDataResultCode) XdrPointer() interface{} { return v } +func (ManageDataResultCode) XdrTypeName() string { return "ManageDataResultCode" } +func (v ManageDataResultCode) XdrValue() interface{} { return v } +func (v *ManageDataResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ManageDataResultCode = *ManageDataResultCode + +func XDR_ManageDataResultCode(v *ManageDataResultCode) *ManageDataResultCode { return v } + +var _XdrComments_ManageDataResultCode = map[int32]string{ + int32(MANAGE_DATA_SUCCESS): "codes considered as \"success\" for the operation", + int32(MANAGE_DATA_NOT_SUPPORTED_YET): "The network hasn't moved to this protocol change yet", + int32(MANAGE_DATA_NAME_NOT_FOUND): "Trying to remove a Data Entry that isn't there", + int32(MANAGE_DATA_LOW_RESERVE): "not enough funds to create a new Data Entry", + int32(MANAGE_DATA_INVALID_NAME): "Name not a valid string", +} + +func (e ManageDataResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ManageDataResultCode +} +func (_ ManageDataResult) XdrValidTags() map[int32]bool { + return nil +} +func (u ManageDataResult) XdrValid() bool { + return true +} +func (u *ManageDataResult) XdrUnionTag() XdrNum32 { + return XDR_ManageDataResultCode(&u.Code) +} +func (u *ManageDataResult) XdrUnionTagName() string { + return "Code" +} +func (u *ManageDataResult) XdrUnionBody() XdrType { + switch u.Code { + case MANAGE_DATA_SUCCESS: + return nil + default: + return nil + } +} +func (u *ManageDataResult) XdrUnionBodyName() string { + switch u.Code { + case MANAGE_DATA_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_ManageDataResult = *ManageDataResult + +func (v *ManageDataResult) XdrPointer() interface{} { return v } +func (ManageDataResult) XdrTypeName() string { return "ManageDataResult" } +func (v ManageDataResult) XdrValue() interface{} { return v } +func (v *ManageDataResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ManageDataResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ManageDataResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case MANAGE_DATA_SUCCESS: + return + default: + return + } +} +func XDR_ManageDataResult(v *ManageDataResult) *ManageDataResult { return v } + +var _XdrNames_BumpSequenceResultCode = map[int32]string{ + int32(BUMP_SEQUENCE_SUCCESS): "BUMP_SEQUENCE_SUCCESS", + int32(BUMP_SEQUENCE_BAD_SEQ): "BUMP_SEQUENCE_BAD_SEQ", +} +var _XdrValues_BumpSequenceResultCode = map[string]int32{ + "BUMP_SEQUENCE_SUCCESS": int32(BUMP_SEQUENCE_SUCCESS), + "BUMP_SEQUENCE_BAD_SEQ": int32(BUMP_SEQUENCE_BAD_SEQ), +} + +func (BumpSequenceResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_BumpSequenceResultCode +} +func (v BumpSequenceResultCode) String() string { + if s, ok := _XdrNames_BumpSequenceResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("BumpSequenceResultCode#%d", v) +} +func (v *BumpSequenceResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_BumpSequenceResultCode[stok]; ok { + *v = BumpSequenceResultCode(val) + return nil + } else if stok == "BumpSequenceResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid BumpSequenceResultCode.", stok)) + } +} +func (v BumpSequenceResultCode) GetU32() uint32 { return uint32(v) } +func (v *BumpSequenceResultCode) SetU32(n uint32) { *v = BumpSequenceResultCode(n) } +func (v *BumpSequenceResultCode) XdrPointer() interface{} { return v } +func (BumpSequenceResultCode) XdrTypeName() string { return "BumpSequenceResultCode" } +func (v BumpSequenceResultCode) XdrValue() interface{} { return v } +func (v *BumpSequenceResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_BumpSequenceResultCode = *BumpSequenceResultCode + +func XDR_BumpSequenceResultCode(v *BumpSequenceResultCode) *BumpSequenceResultCode { return v } + +var _XdrComments_BumpSequenceResultCode = map[int32]string{ + int32(BUMP_SEQUENCE_SUCCESS): "codes considered as \"success\" for the operation", + int32(BUMP_SEQUENCE_BAD_SEQ): "codes considered as \"failure\" for the operation", +} + +func (e BumpSequenceResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_BumpSequenceResultCode +} +func (_ BumpSequenceResult) XdrValidTags() map[int32]bool { + return nil +} +func (u BumpSequenceResult) XdrValid() bool { + return true +} +func (u *BumpSequenceResult) XdrUnionTag() XdrNum32 { + return XDR_BumpSequenceResultCode(&u.Code) +} +func (u *BumpSequenceResult) XdrUnionTagName() string { + return "Code" +} +func (u *BumpSequenceResult) XdrUnionBody() XdrType { + switch u.Code { + case BUMP_SEQUENCE_SUCCESS: + return nil + default: + return nil + } +} +func (u *BumpSequenceResult) XdrUnionBodyName() string { + switch u.Code { + case BUMP_SEQUENCE_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_BumpSequenceResult = *BumpSequenceResult + +func (v *BumpSequenceResult) XdrPointer() interface{} { return v } +func (BumpSequenceResult) XdrTypeName() string { return "BumpSequenceResult" } +func (v BumpSequenceResult) XdrValue() interface{} { return v } +func (v *BumpSequenceResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *BumpSequenceResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_BumpSequenceResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case BUMP_SEQUENCE_SUCCESS: + return + default: + return + } +} +func XDR_BumpSequenceResult(v *BumpSequenceResult) *BumpSequenceResult { return v } + +var _XdrNames_CreateClaimableBalanceResultCode = map[int32]string{ + int32(CREATE_CLAIMABLE_BALANCE_SUCCESS): "CREATE_CLAIMABLE_BALANCE_SUCCESS", + int32(CREATE_CLAIMABLE_BALANCE_MALFORMED): "CREATE_CLAIMABLE_BALANCE_MALFORMED", + int32(CREATE_CLAIMABLE_BALANCE_LOW_RESERVE): "CREATE_CLAIMABLE_BALANCE_LOW_RESERVE", + int32(CREATE_CLAIMABLE_BALANCE_NO_TRUST): "CREATE_CLAIMABLE_BALANCE_NO_TRUST", + int32(CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED): "CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED", + int32(CREATE_CLAIMABLE_BALANCE_UNDERFUNDED): "CREATE_CLAIMABLE_BALANCE_UNDERFUNDED", +} +var _XdrValues_CreateClaimableBalanceResultCode = map[string]int32{ + "CREATE_CLAIMABLE_BALANCE_SUCCESS": int32(CREATE_CLAIMABLE_BALANCE_SUCCESS), + "CREATE_CLAIMABLE_BALANCE_MALFORMED": int32(CREATE_CLAIMABLE_BALANCE_MALFORMED), + "CREATE_CLAIMABLE_BALANCE_LOW_RESERVE": int32(CREATE_CLAIMABLE_BALANCE_LOW_RESERVE), + "CREATE_CLAIMABLE_BALANCE_NO_TRUST": int32(CREATE_CLAIMABLE_BALANCE_NO_TRUST), + "CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED": int32(CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED), + "CREATE_CLAIMABLE_BALANCE_UNDERFUNDED": int32(CREATE_CLAIMABLE_BALANCE_UNDERFUNDED), +} + +func (CreateClaimableBalanceResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_CreateClaimableBalanceResultCode +} +func (v CreateClaimableBalanceResultCode) String() string { + if s, ok := _XdrNames_CreateClaimableBalanceResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("CreateClaimableBalanceResultCode#%d", v) +} +func (v *CreateClaimableBalanceResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_CreateClaimableBalanceResultCode[stok]; ok { + *v = CreateClaimableBalanceResultCode(val) + return nil + } else if stok == "CreateClaimableBalanceResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid CreateClaimableBalanceResultCode.", stok)) + } +} +func (v CreateClaimableBalanceResultCode) GetU32() uint32 { return uint32(v) } +func (v *CreateClaimableBalanceResultCode) SetU32(n uint32) { *v = CreateClaimableBalanceResultCode(n) } +func (v *CreateClaimableBalanceResultCode) XdrPointer() interface{} { return v } +func (CreateClaimableBalanceResultCode) XdrTypeName() string { + return "CreateClaimableBalanceResultCode" +} +func (v CreateClaimableBalanceResultCode) XdrValue() interface{} { return v } +func (v *CreateClaimableBalanceResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_CreateClaimableBalanceResultCode = *CreateClaimableBalanceResultCode + +func XDR_CreateClaimableBalanceResultCode(v *CreateClaimableBalanceResultCode) *CreateClaimableBalanceResultCode { + return v +} +func (_ CreateClaimableBalanceResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *CreateClaimableBalanceResult) BalanceID() *ClaimableBalanceID { + switch u.Code { + case CREATE_CLAIMABLE_BALANCE_SUCCESS: + if v, ok := u._u.(*ClaimableBalanceID); ok { + return v + } else { + var zero ClaimableBalanceID + u._u = &zero + return &zero + } + default: + XdrPanic("CreateClaimableBalanceResult.BalanceID accessed when Code == %v", u.Code) + return nil + } +} +func (u CreateClaimableBalanceResult) XdrValid() bool { + return true +} +func (u *CreateClaimableBalanceResult) XdrUnionTag() XdrNum32 { + return XDR_CreateClaimableBalanceResultCode(&u.Code) +} +func (u *CreateClaimableBalanceResult) XdrUnionTagName() string { + return "Code" +} +func (u *CreateClaimableBalanceResult) XdrUnionBody() XdrType { + switch u.Code { + case CREATE_CLAIMABLE_BALANCE_SUCCESS: + return XDR_ClaimableBalanceID(u.BalanceID()) + default: + return nil + } +} +func (u *CreateClaimableBalanceResult) XdrUnionBodyName() string { + switch u.Code { + case CREATE_CLAIMABLE_BALANCE_SUCCESS: + return "BalanceID" + default: + return "" + } +} + +type XdrType_CreateClaimableBalanceResult = *CreateClaimableBalanceResult + +func (v *CreateClaimableBalanceResult) XdrPointer() interface{} { return v } +func (CreateClaimableBalanceResult) XdrTypeName() string { return "CreateClaimableBalanceResult" } +func (v CreateClaimableBalanceResult) XdrValue() interface{} { return v } +func (v *CreateClaimableBalanceResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *CreateClaimableBalanceResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_CreateClaimableBalanceResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CREATE_CLAIMABLE_BALANCE_SUCCESS: + x.Marshal(x.Sprintf("%sbalanceID", name), XDR_ClaimableBalanceID(u.BalanceID())) + return + default: + return + } +} +func XDR_CreateClaimableBalanceResult(v *CreateClaimableBalanceResult) *CreateClaimableBalanceResult { + return v +} + +var _XdrNames_ClaimClaimableBalanceResultCode = map[int32]string{ + int32(CLAIM_CLAIMABLE_BALANCE_SUCCESS): "CLAIM_CLAIMABLE_BALANCE_SUCCESS", + int32(CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST): "CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST", + int32(CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM): "CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM", + int32(CLAIM_CLAIMABLE_BALANCE_LINE_FULL): "CLAIM_CLAIMABLE_BALANCE_LINE_FULL", + int32(CLAIM_CLAIMABLE_BALANCE_NO_TRUST): "CLAIM_CLAIMABLE_BALANCE_NO_TRUST", + int32(CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED): "CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED", +} +var _XdrValues_ClaimClaimableBalanceResultCode = map[string]int32{ + "CLAIM_CLAIMABLE_BALANCE_SUCCESS": int32(CLAIM_CLAIMABLE_BALANCE_SUCCESS), + "CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST": int32(CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST), + "CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM": int32(CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM), + "CLAIM_CLAIMABLE_BALANCE_LINE_FULL": int32(CLAIM_CLAIMABLE_BALANCE_LINE_FULL), + "CLAIM_CLAIMABLE_BALANCE_NO_TRUST": int32(CLAIM_CLAIMABLE_BALANCE_NO_TRUST), + "CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED": int32(CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED), +} + +func (ClaimClaimableBalanceResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ClaimClaimableBalanceResultCode +} +func (v ClaimClaimableBalanceResultCode) String() string { + if s, ok := _XdrNames_ClaimClaimableBalanceResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClaimClaimableBalanceResultCode#%d", v) +} +func (v *ClaimClaimableBalanceResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClaimClaimableBalanceResultCode[stok]; ok { + *v = ClaimClaimableBalanceResultCode(val) + return nil + } else if stok == "ClaimClaimableBalanceResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClaimClaimableBalanceResultCode.", stok)) + } +} +func (v ClaimClaimableBalanceResultCode) GetU32() uint32 { return uint32(v) } +func (v *ClaimClaimableBalanceResultCode) SetU32(n uint32) { *v = ClaimClaimableBalanceResultCode(n) } +func (v *ClaimClaimableBalanceResultCode) XdrPointer() interface{} { return v } +func (ClaimClaimableBalanceResultCode) XdrTypeName() string { return "ClaimClaimableBalanceResultCode" } +func (v ClaimClaimableBalanceResultCode) XdrValue() interface{} { return v } +func (v *ClaimClaimableBalanceResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClaimClaimableBalanceResultCode = *ClaimClaimableBalanceResultCode + +func XDR_ClaimClaimableBalanceResultCode(v *ClaimClaimableBalanceResultCode) *ClaimClaimableBalanceResultCode { + return v +} +func (_ ClaimClaimableBalanceResult) XdrValidTags() map[int32]bool { + return nil +} +func (u ClaimClaimableBalanceResult) XdrValid() bool { + return true +} +func (u *ClaimClaimableBalanceResult) XdrUnionTag() XdrNum32 { + return XDR_ClaimClaimableBalanceResultCode(&u.Code) +} +func (u *ClaimClaimableBalanceResult) XdrUnionTagName() string { + return "Code" +} +func (u *ClaimClaimableBalanceResult) XdrUnionBody() XdrType { + switch u.Code { + case CLAIM_CLAIMABLE_BALANCE_SUCCESS: + return nil + default: + return nil + } +} +func (u *ClaimClaimableBalanceResult) XdrUnionBodyName() string { + switch u.Code { + case CLAIM_CLAIMABLE_BALANCE_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_ClaimClaimableBalanceResult = *ClaimClaimableBalanceResult + +func (v *ClaimClaimableBalanceResult) XdrPointer() interface{} { return v } +func (ClaimClaimableBalanceResult) XdrTypeName() string { return "ClaimClaimableBalanceResult" } +func (v ClaimClaimableBalanceResult) XdrValue() interface{} { return v } +func (v *ClaimClaimableBalanceResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClaimClaimableBalanceResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClaimClaimableBalanceResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CLAIM_CLAIMABLE_BALANCE_SUCCESS: + return + default: + return + } +} +func XDR_ClaimClaimableBalanceResult(v *ClaimClaimableBalanceResult) *ClaimClaimableBalanceResult { + return v +} + +var _XdrNames_BeginSponsoringFutureReservesResultCode = map[int32]string{ + int32(BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS): "BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS", + int32(BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED): "BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED", + int32(BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED): "BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED", + int32(BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE): "BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE", +} +var _XdrValues_BeginSponsoringFutureReservesResultCode = map[string]int32{ + "BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS": int32(BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS), + "BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED": int32(BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED), + "BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED": int32(BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED), + "BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE": int32(BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE), +} + +func (BeginSponsoringFutureReservesResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_BeginSponsoringFutureReservesResultCode +} +func (v BeginSponsoringFutureReservesResultCode) String() string { + if s, ok := _XdrNames_BeginSponsoringFutureReservesResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("BeginSponsoringFutureReservesResultCode#%d", v) +} +func (v *BeginSponsoringFutureReservesResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_BeginSponsoringFutureReservesResultCode[stok]; ok { + *v = BeginSponsoringFutureReservesResultCode(val) + return nil + } else if stok == "BeginSponsoringFutureReservesResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid BeginSponsoringFutureReservesResultCode.", stok)) + } +} +func (v BeginSponsoringFutureReservesResultCode) GetU32() uint32 { return uint32(v) } +func (v *BeginSponsoringFutureReservesResultCode) SetU32(n uint32) { + *v = BeginSponsoringFutureReservesResultCode(n) +} +func (v *BeginSponsoringFutureReservesResultCode) XdrPointer() interface{} { return v } +func (BeginSponsoringFutureReservesResultCode) XdrTypeName() string { + return "BeginSponsoringFutureReservesResultCode" +} +func (v BeginSponsoringFutureReservesResultCode) XdrValue() interface{} { return v } +func (v *BeginSponsoringFutureReservesResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_BeginSponsoringFutureReservesResultCode = *BeginSponsoringFutureReservesResultCode + +func XDR_BeginSponsoringFutureReservesResultCode(v *BeginSponsoringFutureReservesResultCode) *BeginSponsoringFutureReservesResultCode { + return v +} + +var _XdrComments_BeginSponsoringFutureReservesResultCode = map[int32]string{ + int32(BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS): "codes considered as \"success\" for the operation", + int32(BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED): "codes considered as \"failure\" for the operation", +} + +func (e BeginSponsoringFutureReservesResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_BeginSponsoringFutureReservesResultCode +} +func (_ BeginSponsoringFutureReservesResult) XdrValidTags() map[int32]bool { + return nil +} +func (u BeginSponsoringFutureReservesResult) XdrValid() bool { + return true +} +func (u *BeginSponsoringFutureReservesResult) XdrUnionTag() XdrNum32 { + return XDR_BeginSponsoringFutureReservesResultCode(&u.Code) +} +func (u *BeginSponsoringFutureReservesResult) XdrUnionTagName() string { + return "Code" +} +func (u *BeginSponsoringFutureReservesResult) XdrUnionBody() XdrType { + switch u.Code { + case BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: + return nil + default: + return nil + } +} +func (u *BeginSponsoringFutureReservesResult) XdrUnionBodyName() string { + switch u.Code { + case BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_BeginSponsoringFutureReservesResult = *BeginSponsoringFutureReservesResult + +func (v *BeginSponsoringFutureReservesResult) XdrPointer() interface{} { return v } +func (BeginSponsoringFutureReservesResult) XdrTypeName() string { + return "BeginSponsoringFutureReservesResult" +} +func (v BeginSponsoringFutureReservesResult) XdrValue() interface{} { return v } +func (v *BeginSponsoringFutureReservesResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *BeginSponsoringFutureReservesResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_BeginSponsoringFutureReservesResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: + return + default: + return + } +} +func XDR_BeginSponsoringFutureReservesResult(v *BeginSponsoringFutureReservesResult) *BeginSponsoringFutureReservesResult { + return v +} + +var _XdrNames_EndSponsoringFutureReservesResultCode = map[int32]string{ + int32(END_SPONSORING_FUTURE_RESERVES_SUCCESS): "END_SPONSORING_FUTURE_RESERVES_SUCCESS", + int32(END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED): "END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED", +} +var _XdrValues_EndSponsoringFutureReservesResultCode = map[string]int32{ + "END_SPONSORING_FUTURE_RESERVES_SUCCESS": int32(END_SPONSORING_FUTURE_RESERVES_SUCCESS), + "END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED": int32(END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED), +} + +func (EndSponsoringFutureReservesResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_EndSponsoringFutureReservesResultCode +} +func (v EndSponsoringFutureReservesResultCode) String() string { + if s, ok := _XdrNames_EndSponsoringFutureReservesResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("EndSponsoringFutureReservesResultCode#%d", v) +} +func (v *EndSponsoringFutureReservesResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_EndSponsoringFutureReservesResultCode[stok]; ok { + *v = EndSponsoringFutureReservesResultCode(val) + return nil + } else if stok == "EndSponsoringFutureReservesResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid EndSponsoringFutureReservesResultCode.", stok)) + } +} +func (v EndSponsoringFutureReservesResultCode) GetU32() uint32 { return uint32(v) } +func (v *EndSponsoringFutureReservesResultCode) SetU32(n uint32) { + *v = EndSponsoringFutureReservesResultCode(n) +} +func (v *EndSponsoringFutureReservesResultCode) XdrPointer() interface{} { return v } +func (EndSponsoringFutureReservesResultCode) XdrTypeName() string { + return "EndSponsoringFutureReservesResultCode" +} +func (v EndSponsoringFutureReservesResultCode) XdrValue() interface{} { return v } +func (v *EndSponsoringFutureReservesResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_EndSponsoringFutureReservesResultCode = *EndSponsoringFutureReservesResultCode + +func XDR_EndSponsoringFutureReservesResultCode(v *EndSponsoringFutureReservesResultCode) *EndSponsoringFutureReservesResultCode { + return v +} + +var _XdrComments_EndSponsoringFutureReservesResultCode = map[int32]string{ + int32(END_SPONSORING_FUTURE_RESERVES_SUCCESS): "codes considered as \"success\" for the operation", + int32(END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED): "codes considered as \"failure\" for the operation", +} + +func (e EndSponsoringFutureReservesResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_EndSponsoringFutureReservesResultCode +} +func (_ EndSponsoringFutureReservesResult) XdrValidTags() map[int32]bool { + return nil +} +func (u EndSponsoringFutureReservesResult) XdrValid() bool { + return true +} +func (u *EndSponsoringFutureReservesResult) XdrUnionTag() XdrNum32 { + return XDR_EndSponsoringFutureReservesResultCode(&u.Code) +} +func (u *EndSponsoringFutureReservesResult) XdrUnionTagName() string { + return "Code" +} +func (u *EndSponsoringFutureReservesResult) XdrUnionBody() XdrType { + switch u.Code { + case END_SPONSORING_FUTURE_RESERVES_SUCCESS: + return nil + default: + return nil + } +} +func (u *EndSponsoringFutureReservesResult) XdrUnionBodyName() string { + switch u.Code { + case END_SPONSORING_FUTURE_RESERVES_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_EndSponsoringFutureReservesResult = *EndSponsoringFutureReservesResult + +func (v *EndSponsoringFutureReservesResult) XdrPointer() interface{} { return v } +func (EndSponsoringFutureReservesResult) XdrTypeName() string { + return "EndSponsoringFutureReservesResult" +} +func (v EndSponsoringFutureReservesResult) XdrValue() interface{} { return v } +func (v *EndSponsoringFutureReservesResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *EndSponsoringFutureReservesResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_EndSponsoringFutureReservesResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case END_SPONSORING_FUTURE_RESERVES_SUCCESS: + return + default: + return + } +} +func XDR_EndSponsoringFutureReservesResult(v *EndSponsoringFutureReservesResult) *EndSponsoringFutureReservesResult { + return v +} + +var _XdrNames_RevokeSponsorshipResultCode = map[int32]string{ + int32(REVOKE_SPONSORSHIP_SUCCESS): "REVOKE_SPONSORSHIP_SUCCESS", + int32(REVOKE_SPONSORSHIP_DOES_NOT_EXIST): "REVOKE_SPONSORSHIP_DOES_NOT_EXIST", + int32(REVOKE_SPONSORSHIP_NOT_SPONSOR): "REVOKE_SPONSORSHIP_NOT_SPONSOR", + int32(REVOKE_SPONSORSHIP_LOW_RESERVE): "REVOKE_SPONSORSHIP_LOW_RESERVE", + int32(REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE): "REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE", + int32(REVOKE_SPONSORSHIP_MALFORMED): "REVOKE_SPONSORSHIP_MALFORMED", +} +var _XdrValues_RevokeSponsorshipResultCode = map[string]int32{ + "REVOKE_SPONSORSHIP_SUCCESS": int32(REVOKE_SPONSORSHIP_SUCCESS), + "REVOKE_SPONSORSHIP_DOES_NOT_EXIST": int32(REVOKE_SPONSORSHIP_DOES_NOT_EXIST), + "REVOKE_SPONSORSHIP_NOT_SPONSOR": int32(REVOKE_SPONSORSHIP_NOT_SPONSOR), + "REVOKE_SPONSORSHIP_LOW_RESERVE": int32(REVOKE_SPONSORSHIP_LOW_RESERVE), + "REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE": int32(REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE), + "REVOKE_SPONSORSHIP_MALFORMED": int32(REVOKE_SPONSORSHIP_MALFORMED), +} + +func (RevokeSponsorshipResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_RevokeSponsorshipResultCode +} +func (v RevokeSponsorshipResultCode) String() string { + if s, ok := _XdrNames_RevokeSponsorshipResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("RevokeSponsorshipResultCode#%d", v) +} +func (v *RevokeSponsorshipResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_RevokeSponsorshipResultCode[stok]; ok { + *v = RevokeSponsorshipResultCode(val) + return nil + } else if stok == "RevokeSponsorshipResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid RevokeSponsorshipResultCode.", stok)) + } +} +func (v RevokeSponsorshipResultCode) GetU32() uint32 { return uint32(v) } +func (v *RevokeSponsorshipResultCode) SetU32(n uint32) { *v = RevokeSponsorshipResultCode(n) } +func (v *RevokeSponsorshipResultCode) XdrPointer() interface{} { return v } +func (RevokeSponsorshipResultCode) XdrTypeName() string { return "RevokeSponsorshipResultCode" } +func (v RevokeSponsorshipResultCode) XdrValue() interface{} { return v } +func (v *RevokeSponsorshipResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_RevokeSponsorshipResultCode = *RevokeSponsorshipResultCode + +func XDR_RevokeSponsorshipResultCode(v *RevokeSponsorshipResultCode) *RevokeSponsorshipResultCode { + return v +} + +var _XdrComments_RevokeSponsorshipResultCode = map[int32]string{ + int32(REVOKE_SPONSORSHIP_SUCCESS): "codes considered as \"success\" for the operation", + int32(REVOKE_SPONSORSHIP_DOES_NOT_EXIST): "codes considered as \"failure\" for the operation", +} + +func (e RevokeSponsorshipResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_RevokeSponsorshipResultCode +} +func (_ RevokeSponsorshipResult) XdrValidTags() map[int32]bool { + return nil +} +func (u RevokeSponsorshipResult) XdrValid() bool { + return true +} +func (u *RevokeSponsorshipResult) XdrUnionTag() XdrNum32 { + return XDR_RevokeSponsorshipResultCode(&u.Code) +} +func (u *RevokeSponsorshipResult) XdrUnionTagName() string { + return "Code" +} +func (u *RevokeSponsorshipResult) XdrUnionBody() XdrType { + switch u.Code { + case REVOKE_SPONSORSHIP_SUCCESS: + return nil + default: + return nil + } +} +func (u *RevokeSponsorshipResult) XdrUnionBodyName() string { + switch u.Code { + case REVOKE_SPONSORSHIP_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_RevokeSponsorshipResult = *RevokeSponsorshipResult + +func (v *RevokeSponsorshipResult) XdrPointer() interface{} { return v } +func (RevokeSponsorshipResult) XdrTypeName() string { return "RevokeSponsorshipResult" } +func (v RevokeSponsorshipResult) XdrValue() interface{} { return v } +func (v *RevokeSponsorshipResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *RevokeSponsorshipResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_RevokeSponsorshipResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case REVOKE_SPONSORSHIP_SUCCESS: + return + default: + return + } +} +func XDR_RevokeSponsorshipResult(v *RevokeSponsorshipResult) *RevokeSponsorshipResult { return v } + +var _XdrNames_ClawbackResultCode = map[int32]string{ + int32(CLAWBACK_SUCCESS): "CLAWBACK_SUCCESS", + int32(CLAWBACK_MALFORMED): "CLAWBACK_MALFORMED", + int32(CLAWBACK_NOT_CLAWBACK_ENABLED): "CLAWBACK_NOT_CLAWBACK_ENABLED", + int32(CLAWBACK_NO_TRUST): "CLAWBACK_NO_TRUST", + int32(CLAWBACK_UNDERFUNDED): "CLAWBACK_UNDERFUNDED", +} +var _XdrValues_ClawbackResultCode = map[string]int32{ + "CLAWBACK_SUCCESS": int32(CLAWBACK_SUCCESS), + "CLAWBACK_MALFORMED": int32(CLAWBACK_MALFORMED), + "CLAWBACK_NOT_CLAWBACK_ENABLED": int32(CLAWBACK_NOT_CLAWBACK_ENABLED), + "CLAWBACK_NO_TRUST": int32(CLAWBACK_NO_TRUST), + "CLAWBACK_UNDERFUNDED": int32(CLAWBACK_UNDERFUNDED), +} + +func (ClawbackResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ClawbackResultCode +} +func (v ClawbackResultCode) String() string { + if s, ok := _XdrNames_ClawbackResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClawbackResultCode#%d", v) +} +func (v *ClawbackResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClawbackResultCode[stok]; ok { + *v = ClawbackResultCode(val) + return nil + } else if stok == "ClawbackResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClawbackResultCode.", stok)) + } +} +func (v ClawbackResultCode) GetU32() uint32 { return uint32(v) } +func (v *ClawbackResultCode) SetU32(n uint32) { *v = ClawbackResultCode(n) } +func (v *ClawbackResultCode) XdrPointer() interface{} { return v } +func (ClawbackResultCode) XdrTypeName() string { return "ClawbackResultCode" } +func (v ClawbackResultCode) XdrValue() interface{} { return v } +func (v *ClawbackResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClawbackResultCode = *ClawbackResultCode + +func XDR_ClawbackResultCode(v *ClawbackResultCode) *ClawbackResultCode { return v } + +var _XdrComments_ClawbackResultCode = map[int32]string{ + int32(CLAWBACK_SUCCESS): "codes considered as \"success\" for the operation", + int32(CLAWBACK_MALFORMED): "codes considered as \"failure\" for the operation", +} + +func (e ClawbackResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ClawbackResultCode +} +func (_ ClawbackResult) XdrValidTags() map[int32]bool { + return nil +} +func (u ClawbackResult) XdrValid() bool { + return true +} +func (u *ClawbackResult) XdrUnionTag() XdrNum32 { + return XDR_ClawbackResultCode(&u.Code) +} +func (u *ClawbackResult) XdrUnionTagName() string { + return "Code" +} +func (u *ClawbackResult) XdrUnionBody() XdrType { + switch u.Code { + case CLAWBACK_SUCCESS: + return nil + default: + return nil + } +} +func (u *ClawbackResult) XdrUnionBodyName() string { + switch u.Code { + case CLAWBACK_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_ClawbackResult = *ClawbackResult + +func (v *ClawbackResult) XdrPointer() interface{} { return v } +func (ClawbackResult) XdrTypeName() string { return "ClawbackResult" } +func (v ClawbackResult) XdrValue() interface{} { return v } +func (v *ClawbackResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClawbackResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClawbackResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CLAWBACK_SUCCESS: + return + default: + return + } +} +func XDR_ClawbackResult(v *ClawbackResult) *ClawbackResult { return v } + +var _XdrNames_ClawbackClaimableBalanceResultCode = map[int32]string{ + int32(CLAWBACK_CLAIMABLE_BALANCE_SUCCESS): "CLAWBACK_CLAIMABLE_BALANCE_SUCCESS", + int32(CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST): "CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST", + int32(CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER): "CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER", + int32(CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED): "CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED", +} +var _XdrValues_ClawbackClaimableBalanceResultCode = map[string]int32{ + "CLAWBACK_CLAIMABLE_BALANCE_SUCCESS": int32(CLAWBACK_CLAIMABLE_BALANCE_SUCCESS), + "CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST": int32(CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST), + "CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER": int32(CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER), + "CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED": int32(CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED), +} + +func (ClawbackClaimableBalanceResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_ClawbackClaimableBalanceResultCode +} +func (v ClawbackClaimableBalanceResultCode) String() string { + if s, ok := _XdrNames_ClawbackClaimableBalanceResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("ClawbackClaimableBalanceResultCode#%d", v) +} +func (v *ClawbackClaimableBalanceResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_ClawbackClaimableBalanceResultCode[stok]; ok { + *v = ClawbackClaimableBalanceResultCode(val) + return nil + } else if stok == "ClawbackClaimableBalanceResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid ClawbackClaimableBalanceResultCode.", stok)) + } +} +func (v ClawbackClaimableBalanceResultCode) GetU32() uint32 { return uint32(v) } +func (v *ClawbackClaimableBalanceResultCode) SetU32(n uint32) { + *v = ClawbackClaimableBalanceResultCode(n) +} +func (v *ClawbackClaimableBalanceResultCode) XdrPointer() interface{} { return v } +func (ClawbackClaimableBalanceResultCode) XdrTypeName() string { + return "ClawbackClaimableBalanceResultCode" +} +func (v ClawbackClaimableBalanceResultCode) XdrValue() interface{} { return v } +func (v *ClawbackClaimableBalanceResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_ClawbackClaimableBalanceResultCode = *ClawbackClaimableBalanceResultCode + +func XDR_ClawbackClaimableBalanceResultCode(v *ClawbackClaimableBalanceResultCode) *ClawbackClaimableBalanceResultCode { + return v +} + +var _XdrComments_ClawbackClaimableBalanceResultCode = map[int32]string{ + int32(CLAWBACK_CLAIMABLE_BALANCE_SUCCESS): "codes considered as \"success\" for the operation", + int32(CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST): "codes considered as \"failure\" for the operation", +} + +func (e ClawbackClaimableBalanceResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_ClawbackClaimableBalanceResultCode +} +func (_ ClawbackClaimableBalanceResult) XdrValidTags() map[int32]bool { + return nil +} +func (u ClawbackClaimableBalanceResult) XdrValid() bool { + return true +} +func (u *ClawbackClaimableBalanceResult) XdrUnionTag() XdrNum32 { + return XDR_ClawbackClaimableBalanceResultCode(&u.Code) +} +func (u *ClawbackClaimableBalanceResult) XdrUnionTagName() string { + return "Code" +} +func (u *ClawbackClaimableBalanceResult) XdrUnionBody() XdrType { + switch u.Code { + case CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: + return nil + default: + return nil + } +} +func (u *ClawbackClaimableBalanceResult) XdrUnionBodyName() string { + switch u.Code { + case CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_ClawbackClaimableBalanceResult = *ClawbackClaimableBalanceResult + +func (v *ClawbackClaimableBalanceResult) XdrPointer() interface{} { return v } +func (ClawbackClaimableBalanceResult) XdrTypeName() string { return "ClawbackClaimableBalanceResult" } +func (v ClawbackClaimableBalanceResult) XdrValue() interface{} { return v } +func (v *ClawbackClaimableBalanceResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *ClawbackClaimableBalanceResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_ClawbackClaimableBalanceResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: + return + default: + return + } +} +func XDR_ClawbackClaimableBalanceResult(v *ClawbackClaimableBalanceResult) *ClawbackClaimableBalanceResult { + return v +} + +var _XdrNames_SetTrustLineFlagsResultCode = map[int32]string{ + int32(SET_TRUST_LINE_FLAGS_SUCCESS): "SET_TRUST_LINE_FLAGS_SUCCESS", + int32(SET_TRUST_LINE_FLAGS_MALFORMED): "SET_TRUST_LINE_FLAGS_MALFORMED", + int32(SET_TRUST_LINE_FLAGS_NO_TRUST_LINE): "SET_TRUST_LINE_FLAGS_NO_TRUST_LINE", + int32(SET_TRUST_LINE_FLAGS_CANT_REVOKE): "SET_TRUST_LINE_FLAGS_CANT_REVOKE", + int32(SET_TRUST_LINE_FLAGS_INVALID_STATE): "SET_TRUST_LINE_FLAGS_INVALID_STATE", + int32(SET_TRUST_LINE_FLAGS_LOW_RESERVE): "SET_TRUST_LINE_FLAGS_LOW_RESERVE", +} +var _XdrValues_SetTrustLineFlagsResultCode = map[string]int32{ + "SET_TRUST_LINE_FLAGS_SUCCESS": int32(SET_TRUST_LINE_FLAGS_SUCCESS), + "SET_TRUST_LINE_FLAGS_MALFORMED": int32(SET_TRUST_LINE_FLAGS_MALFORMED), + "SET_TRUST_LINE_FLAGS_NO_TRUST_LINE": int32(SET_TRUST_LINE_FLAGS_NO_TRUST_LINE), + "SET_TRUST_LINE_FLAGS_CANT_REVOKE": int32(SET_TRUST_LINE_FLAGS_CANT_REVOKE), + "SET_TRUST_LINE_FLAGS_INVALID_STATE": int32(SET_TRUST_LINE_FLAGS_INVALID_STATE), + "SET_TRUST_LINE_FLAGS_LOW_RESERVE": int32(SET_TRUST_LINE_FLAGS_LOW_RESERVE), +} + +func (SetTrustLineFlagsResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_SetTrustLineFlagsResultCode +} +func (v SetTrustLineFlagsResultCode) String() string { + if s, ok := _XdrNames_SetTrustLineFlagsResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("SetTrustLineFlagsResultCode#%d", v) +} +func (v *SetTrustLineFlagsResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_SetTrustLineFlagsResultCode[stok]; ok { + *v = SetTrustLineFlagsResultCode(val) + return nil + } else if stok == "SetTrustLineFlagsResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid SetTrustLineFlagsResultCode.", stok)) + } +} +func (v SetTrustLineFlagsResultCode) GetU32() uint32 { return uint32(v) } +func (v *SetTrustLineFlagsResultCode) SetU32(n uint32) { *v = SetTrustLineFlagsResultCode(n) } +func (v *SetTrustLineFlagsResultCode) XdrPointer() interface{} { return v } +func (SetTrustLineFlagsResultCode) XdrTypeName() string { return "SetTrustLineFlagsResultCode" } +func (v SetTrustLineFlagsResultCode) XdrValue() interface{} { return v } +func (v *SetTrustLineFlagsResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SetTrustLineFlagsResultCode = *SetTrustLineFlagsResultCode + +func XDR_SetTrustLineFlagsResultCode(v *SetTrustLineFlagsResultCode) *SetTrustLineFlagsResultCode { + return v +} + +var _XdrComments_SetTrustLineFlagsResultCode = map[int32]string{ + int32(SET_TRUST_LINE_FLAGS_SUCCESS): "codes considered as \"success\" for the operation", + int32(SET_TRUST_LINE_FLAGS_MALFORMED): "codes considered as \"failure\" for the operation", + int32(SET_TRUST_LINE_FLAGS_LOW_RESERVE): "claimable balances can't be created", +} + +func (e SetTrustLineFlagsResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_SetTrustLineFlagsResultCode +} +func (_ SetTrustLineFlagsResult) XdrValidTags() map[int32]bool { + return nil +} +func (u SetTrustLineFlagsResult) XdrValid() bool { + return true +} +func (u *SetTrustLineFlagsResult) XdrUnionTag() XdrNum32 { + return XDR_SetTrustLineFlagsResultCode(&u.Code) +} +func (u *SetTrustLineFlagsResult) XdrUnionTagName() string { + return "Code" +} +func (u *SetTrustLineFlagsResult) XdrUnionBody() XdrType { + switch u.Code { + case SET_TRUST_LINE_FLAGS_SUCCESS: + return nil + default: + return nil + } +} +func (u *SetTrustLineFlagsResult) XdrUnionBodyName() string { + switch u.Code { + case SET_TRUST_LINE_FLAGS_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_SetTrustLineFlagsResult = *SetTrustLineFlagsResult + +func (v *SetTrustLineFlagsResult) XdrPointer() interface{} { return v } +func (SetTrustLineFlagsResult) XdrTypeName() string { return "SetTrustLineFlagsResult" } +func (v SetTrustLineFlagsResult) XdrValue() interface{} { return v } +func (v *SetTrustLineFlagsResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *SetTrustLineFlagsResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_SetTrustLineFlagsResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case SET_TRUST_LINE_FLAGS_SUCCESS: + return + default: + return + } +} +func XDR_SetTrustLineFlagsResult(v *SetTrustLineFlagsResult) *SetTrustLineFlagsResult { return v } + +var _XdrNames_LiquidityPoolDepositResultCode = map[int32]string{ + int32(LIQUIDITY_POOL_DEPOSIT_SUCCESS): "LIQUIDITY_POOL_DEPOSIT_SUCCESS", + int32(LIQUIDITY_POOL_DEPOSIT_MALFORMED): "LIQUIDITY_POOL_DEPOSIT_MALFORMED", + int32(LIQUIDITY_POOL_DEPOSIT_NO_TRUST): "LIQUIDITY_POOL_DEPOSIT_NO_TRUST", + int32(LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED): "LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED", + int32(LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED): "LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED", + int32(LIQUIDITY_POOL_DEPOSIT_LINE_FULL): "LIQUIDITY_POOL_DEPOSIT_LINE_FULL", + int32(LIQUIDITY_POOL_DEPOSIT_BAD_PRICE): "LIQUIDITY_POOL_DEPOSIT_BAD_PRICE", + int32(LIQUIDITY_POOL_DEPOSIT_POOL_FULL): "LIQUIDITY_POOL_DEPOSIT_POOL_FULL", +} +var _XdrValues_LiquidityPoolDepositResultCode = map[string]int32{ + "LIQUIDITY_POOL_DEPOSIT_SUCCESS": int32(LIQUIDITY_POOL_DEPOSIT_SUCCESS), + "LIQUIDITY_POOL_DEPOSIT_MALFORMED": int32(LIQUIDITY_POOL_DEPOSIT_MALFORMED), + "LIQUIDITY_POOL_DEPOSIT_NO_TRUST": int32(LIQUIDITY_POOL_DEPOSIT_NO_TRUST), + "LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED": int32(LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED), + "LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED": int32(LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED), + "LIQUIDITY_POOL_DEPOSIT_LINE_FULL": int32(LIQUIDITY_POOL_DEPOSIT_LINE_FULL), + "LIQUIDITY_POOL_DEPOSIT_BAD_PRICE": int32(LIQUIDITY_POOL_DEPOSIT_BAD_PRICE), + "LIQUIDITY_POOL_DEPOSIT_POOL_FULL": int32(LIQUIDITY_POOL_DEPOSIT_POOL_FULL), +} + +func (LiquidityPoolDepositResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_LiquidityPoolDepositResultCode +} +func (v LiquidityPoolDepositResultCode) String() string { + if s, ok := _XdrNames_LiquidityPoolDepositResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("LiquidityPoolDepositResultCode#%d", v) +} +func (v *LiquidityPoolDepositResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LiquidityPoolDepositResultCode[stok]; ok { + *v = LiquidityPoolDepositResultCode(val) + return nil + } else if stok == "LiquidityPoolDepositResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LiquidityPoolDepositResultCode.", stok)) + } +} +func (v LiquidityPoolDepositResultCode) GetU32() uint32 { return uint32(v) } +func (v *LiquidityPoolDepositResultCode) SetU32(n uint32) { *v = LiquidityPoolDepositResultCode(n) } +func (v *LiquidityPoolDepositResultCode) XdrPointer() interface{} { return v } +func (LiquidityPoolDepositResultCode) XdrTypeName() string { return "LiquidityPoolDepositResultCode" } +func (v LiquidityPoolDepositResultCode) XdrValue() interface{} { return v } +func (v *LiquidityPoolDepositResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LiquidityPoolDepositResultCode = *LiquidityPoolDepositResultCode + +func XDR_LiquidityPoolDepositResultCode(v *LiquidityPoolDepositResultCode) *LiquidityPoolDepositResultCode { + return v +} + +var _XdrComments_LiquidityPoolDepositResultCode = map[int32]string{ + int32(LIQUIDITY_POOL_DEPOSIT_SUCCESS): "codes considered as \"success\" for the operation", + int32(LIQUIDITY_POOL_DEPOSIT_MALFORMED): "bad input", + int32(LIQUIDITY_POOL_DEPOSIT_NO_TRUST): "no trust line for one of the", + int32(LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED): "not authorized for one of the", + int32(LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED): "not enough balance for one of", + int32(LIQUIDITY_POOL_DEPOSIT_LINE_FULL): "pool share trust line doesn't", + int32(LIQUIDITY_POOL_DEPOSIT_BAD_PRICE): "deposit price outside bounds", + int32(LIQUIDITY_POOL_DEPOSIT_POOL_FULL): "pool reserves are full", +} + +func (e LiquidityPoolDepositResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_LiquidityPoolDepositResultCode +} +func (_ LiquidityPoolDepositResult) XdrValidTags() map[int32]bool { + return nil +} +func (u LiquidityPoolDepositResult) XdrValid() bool { + return true +} +func (u *LiquidityPoolDepositResult) XdrUnionTag() XdrNum32 { + return XDR_LiquidityPoolDepositResultCode(&u.Code) +} +func (u *LiquidityPoolDepositResult) XdrUnionTagName() string { + return "Code" +} +func (u *LiquidityPoolDepositResult) XdrUnionBody() XdrType { + switch u.Code { + case LIQUIDITY_POOL_DEPOSIT_SUCCESS: + return nil + default: + return nil + } +} +func (u *LiquidityPoolDepositResult) XdrUnionBodyName() string { + switch u.Code { + case LIQUIDITY_POOL_DEPOSIT_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_LiquidityPoolDepositResult = *LiquidityPoolDepositResult + +func (v *LiquidityPoolDepositResult) XdrPointer() interface{} { return v } +func (LiquidityPoolDepositResult) XdrTypeName() string { return "LiquidityPoolDepositResult" } +func (v LiquidityPoolDepositResult) XdrValue() interface{} { return v } +func (v *LiquidityPoolDepositResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LiquidityPoolDepositResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LiquidityPoolDepositResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case LIQUIDITY_POOL_DEPOSIT_SUCCESS: + return + default: + return + } +} +func XDR_LiquidityPoolDepositResult(v *LiquidityPoolDepositResult) *LiquidityPoolDepositResult { + return v +} + +var _XdrNames_LiquidityPoolWithdrawResultCode = map[int32]string{ + int32(LIQUIDITY_POOL_WITHDRAW_SUCCESS): "LIQUIDITY_POOL_WITHDRAW_SUCCESS", + int32(LIQUIDITY_POOL_WITHDRAW_MALFORMED): "LIQUIDITY_POOL_WITHDRAW_MALFORMED", + int32(LIQUIDITY_POOL_WITHDRAW_NO_TRUST): "LIQUIDITY_POOL_WITHDRAW_NO_TRUST", + int32(LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED): "LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED", + int32(LIQUIDITY_POOL_WITHDRAW_LINE_FULL): "LIQUIDITY_POOL_WITHDRAW_LINE_FULL", + int32(LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM): "LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM", +} +var _XdrValues_LiquidityPoolWithdrawResultCode = map[string]int32{ + "LIQUIDITY_POOL_WITHDRAW_SUCCESS": int32(LIQUIDITY_POOL_WITHDRAW_SUCCESS), + "LIQUIDITY_POOL_WITHDRAW_MALFORMED": int32(LIQUIDITY_POOL_WITHDRAW_MALFORMED), + "LIQUIDITY_POOL_WITHDRAW_NO_TRUST": int32(LIQUIDITY_POOL_WITHDRAW_NO_TRUST), + "LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED": int32(LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED), + "LIQUIDITY_POOL_WITHDRAW_LINE_FULL": int32(LIQUIDITY_POOL_WITHDRAW_LINE_FULL), + "LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM": int32(LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM), +} + +func (LiquidityPoolWithdrawResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_LiquidityPoolWithdrawResultCode +} +func (v LiquidityPoolWithdrawResultCode) String() string { + if s, ok := _XdrNames_LiquidityPoolWithdrawResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("LiquidityPoolWithdrawResultCode#%d", v) +} +func (v *LiquidityPoolWithdrawResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_LiquidityPoolWithdrawResultCode[stok]; ok { + *v = LiquidityPoolWithdrawResultCode(val) + return nil + } else if stok == "LiquidityPoolWithdrawResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid LiquidityPoolWithdrawResultCode.", stok)) + } +} +func (v LiquidityPoolWithdrawResultCode) GetU32() uint32 { return uint32(v) } +func (v *LiquidityPoolWithdrawResultCode) SetU32(n uint32) { *v = LiquidityPoolWithdrawResultCode(n) } +func (v *LiquidityPoolWithdrawResultCode) XdrPointer() interface{} { return v } +func (LiquidityPoolWithdrawResultCode) XdrTypeName() string { return "LiquidityPoolWithdrawResultCode" } +func (v LiquidityPoolWithdrawResultCode) XdrValue() interface{} { return v } +func (v *LiquidityPoolWithdrawResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_LiquidityPoolWithdrawResultCode = *LiquidityPoolWithdrawResultCode + +func XDR_LiquidityPoolWithdrawResultCode(v *LiquidityPoolWithdrawResultCode) *LiquidityPoolWithdrawResultCode { + return v +} + +var _XdrComments_LiquidityPoolWithdrawResultCode = map[int32]string{ + int32(LIQUIDITY_POOL_WITHDRAW_SUCCESS): "codes considered as \"success\" for the operation", + int32(LIQUIDITY_POOL_WITHDRAW_MALFORMED): "bad input", + int32(LIQUIDITY_POOL_WITHDRAW_NO_TRUST): "no trust line for one of the", + int32(LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED): "not enough balance of the", + int32(LIQUIDITY_POOL_WITHDRAW_LINE_FULL): "would go above limit for one", + int32(LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM): "of the assets", +} + +func (e LiquidityPoolWithdrawResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_LiquidityPoolWithdrawResultCode +} +func (_ LiquidityPoolWithdrawResult) XdrValidTags() map[int32]bool { + return nil +} +func (u LiquidityPoolWithdrawResult) XdrValid() bool { + return true +} +func (u *LiquidityPoolWithdrawResult) XdrUnionTag() XdrNum32 { + return XDR_LiquidityPoolWithdrawResultCode(&u.Code) +} +func (u *LiquidityPoolWithdrawResult) XdrUnionTagName() string { + return "Code" +} +func (u *LiquidityPoolWithdrawResult) XdrUnionBody() XdrType { + switch u.Code { + case LIQUIDITY_POOL_WITHDRAW_SUCCESS: + return nil + default: + return nil + } +} +func (u *LiquidityPoolWithdrawResult) XdrUnionBodyName() string { + switch u.Code { + case LIQUIDITY_POOL_WITHDRAW_SUCCESS: + return "" + default: + return "" + } +} + +type XdrType_LiquidityPoolWithdrawResult = *LiquidityPoolWithdrawResult + +func (v *LiquidityPoolWithdrawResult) XdrPointer() interface{} { return v } +func (LiquidityPoolWithdrawResult) XdrTypeName() string { return "LiquidityPoolWithdrawResult" } +func (v LiquidityPoolWithdrawResult) XdrValue() interface{} { return v } +func (v *LiquidityPoolWithdrawResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *LiquidityPoolWithdrawResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_LiquidityPoolWithdrawResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case LIQUIDITY_POOL_WITHDRAW_SUCCESS: + return + default: + return + } +} +func XDR_LiquidityPoolWithdrawResult(v *LiquidityPoolWithdrawResult) *LiquidityPoolWithdrawResult { + return v +} + +var _XdrNames_OperationResultCode = map[int32]string{ + int32(OpINNER): "opINNER", + int32(OpBAD_AUTH): "opBAD_AUTH", + int32(OpNO_ACCOUNT): "opNO_ACCOUNT", + int32(OpNOT_SUPPORTED): "opNOT_SUPPORTED", + int32(OpTOO_MANY_SUBENTRIES): "opTOO_MANY_SUBENTRIES", + int32(OpEXCEEDED_WORK_LIMIT): "opEXCEEDED_WORK_LIMIT", + int32(OpTOO_MANY_SPONSORING): "opTOO_MANY_SPONSORING", +} +var _XdrValues_OperationResultCode = map[string]int32{ + "opINNER": int32(OpINNER), + "opBAD_AUTH": int32(OpBAD_AUTH), + "opNO_ACCOUNT": int32(OpNO_ACCOUNT), + "opNOT_SUPPORTED": int32(OpNOT_SUPPORTED), + "opTOO_MANY_SUBENTRIES": int32(OpTOO_MANY_SUBENTRIES), + "opEXCEEDED_WORK_LIMIT": int32(OpEXCEEDED_WORK_LIMIT), + "opTOO_MANY_SPONSORING": int32(OpTOO_MANY_SPONSORING), +} + +func (OperationResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_OperationResultCode +} +func (v OperationResultCode) String() string { + if s, ok := _XdrNames_OperationResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("OperationResultCode#%d", v) +} +func (v *OperationResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_OperationResultCode[stok]; ok { + *v = OperationResultCode(val) + return nil + } else if stok == "OperationResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid OperationResultCode.", stok)) + } +} +func (v OperationResultCode) GetU32() uint32 { return uint32(v) } +func (v *OperationResultCode) SetU32(n uint32) { *v = OperationResultCode(n) } +func (v *OperationResultCode) XdrPointer() interface{} { return v } +func (OperationResultCode) XdrTypeName() string { return "OperationResultCode" } +func (v OperationResultCode) XdrValue() interface{} { return v } +func (v *OperationResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_OperationResultCode = *OperationResultCode + +func XDR_OperationResultCode(v *OperationResultCode) *OperationResultCode { return v } + +var _XdrComments_OperationResultCode = map[int32]string{ + int32(OpINNER): "inner object result is valid", + int32(OpBAD_AUTH): "too few valid signatures / wrong network", + int32(OpNO_ACCOUNT): "source account was not found", + int32(OpNOT_SUPPORTED): "operation not supported at this time", + int32(OpTOO_MANY_SUBENTRIES): "max number of subentries already reached", + int32(OpEXCEEDED_WORK_LIMIT): "operation did too much work", + int32(OpTOO_MANY_SPONSORING): "account is sponsoring too many entries", +} + +func (e OperationResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_OperationResultCode +} + +var _XdrTags_XdrAnon_OperationResult_Tr = map[int32]bool{ + XdrToI32(CREATE_ACCOUNT): true, + XdrToI32(PAYMENT): true, + XdrToI32(PATH_PAYMENT_STRICT_RECEIVE): true, + XdrToI32(MANAGE_SELL_OFFER): true, + XdrToI32(CREATE_PASSIVE_SELL_OFFER): true, + XdrToI32(SET_OPTIONS): true, + XdrToI32(CHANGE_TRUST): true, + XdrToI32(ALLOW_TRUST): true, + XdrToI32(ACCOUNT_MERGE): true, + XdrToI32(INFLATION): true, + XdrToI32(MANAGE_DATA): true, + XdrToI32(BUMP_SEQUENCE): true, + XdrToI32(MANAGE_BUY_OFFER): true, + XdrToI32(PATH_PAYMENT_STRICT_SEND): true, + XdrToI32(CREATE_CLAIMABLE_BALANCE): true, + XdrToI32(CLAIM_CLAIMABLE_BALANCE): true, + XdrToI32(BEGIN_SPONSORING_FUTURE_RESERVES): true, + XdrToI32(END_SPONSORING_FUTURE_RESERVES): true, + XdrToI32(REVOKE_SPONSORSHIP): true, + XdrToI32(CLAWBACK): true, + XdrToI32(CLAWBACK_CLAIMABLE_BALANCE): true, + XdrToI32(SET_TRUST_LINE_FLAGS): true, + XdrToI32(LIQUIDITY_POOL_DEPOSIT): true, + XdrToI32(LIQUIDITY_POOL_WITHDRAW): true, +} + +func (_ XdrAnon_OperationResult_Tr) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_OperationResult_Tr +} +func (u *XdrAnon_OperationResult_Tr) CreateAccountResult() *CreateAccountResult { + switch u.Type { + case CREATE_ACCOUNT: + if v, ok := u._u.(*CreateAccountResult); ok { + return v + } else { + var zero CreateAccountResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.CreateAccountResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) PaymentResult() *PaymentResult { + switch u.Type { + case PAYMENT: + if v, ok := u._u.(*PaymentResult); ok { + return v + } else { + var zero PaymentResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.PaymentResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) PathPaymentStrictReceiveResult() *PathPaymentStrictReceiveResult { + switch u.Type { + case PATH_PAYMENT_STRICT_RECEIVE: + if v, ok := u._u.(*PathPaymentStrictReceiveResult); ok { + return v + } else { + var zero PathPaymentStrictReceiveResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.PathPaymentStrictReceiveResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ManageSellOfferResult() *ManageSellOfferResult { + switch u.Type { + case MANAGE_SELL_OFFER: + if v, ok := u._u.(*ManageSellOfferResult); ok { + return v + } else { + var zero ManageSellOfferResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ManageSellOfferResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) CreatePassiveSellOfferResult() *ManageSellOfferResult { + switch u.Type { + case CREATE_PASSIVE_SELL_OFFER: + if v, ok := u._u.(*ManageSellOfferResult); ok { + return v + } else { + var zero ManageSellOfferResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.CreatePassiveSellOfferResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) SetOptionsResult() *SetOptionsResult { + switch u.Type { + case SET_OPTIONS: + if v, ok := u._u.(*SetOptionsResult); ok { + return v + } else { + var zero SetOptionsResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.SetOptionsResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ChangeTrustResult() *ChangeTrustResult { + switch u.Type { + case CHANGE_TRUST: + if v, ok := u._u.(*ChangeTrustResult); ok { + return v + } else { + var zero ChangeTrustResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ChangeTrustResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) AllowTrustResult() *AllowTrustResult { + switch u.Type { + case ALLOW_TRUST: + if v, ok := u._u.(*AllowTrustResult); ok { + return v + } else { + var zero AllowTrustResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.AllowTrustResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) AccountMergeResult() *AccountMergeResult { + switch u.Type { + case ACCOUNT_MERGE: + if v, ok := u._u.(*AccountMergeResult); ok { + return v + } else { + var zero AccountMergeResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.AccountMergeResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) InflationResult() *InflationResult { + switch u.Type { + case INFLATION: + if v, ok := u._u.(*InflationResult); ok { + return v + } else { + var zero InflationResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.InflationResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ManageDataResult() *ManageDataResult { + switch u.Type { + case MANAGE_DATA: + if v, ok := u._u.(*ManageDataResult); ok { + return v + } else { + var zero ManageDataResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ManageDataResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) BumpSeqResult() *BumpSequenceResult { + switch u.Type { + case BUMP_SEQUENCE: + if v, ok := u._u.(*BumpSequenceResult); ok { + return v + } else { + var zero BumpSequenceResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.BumpSeqResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ManageBuyOfferResult() *ManageBuyOfferResult { + switch u.Type { + case MANAGE_BUY_OFFER: + if v, ok := u._u.(*ManageBuyOfferResult); ok { + return v + } else { + var zero ManageBuyOfferResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ManageBuyOfferResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) PathPaymentStrictSendResult() *PathPaymentStrictSendResult { + switch u.Type { + case PATH_PAYMENT_STRICT_SEND: + if v, ok := u._u.(*PathPaymentStrictSendResult); ok { + return v + } else { + var zero PathPaymentStrictSendResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.PathPaymentStrictSendResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) CreateClaimableBalanceResult() *CreateClaimableBalanceResult { + switch u.Type { + case CREATE_CLAIMABLE_BALANCE: + if v, ok := u._u.(*CreateClaimableBalanceResult); ok { + return v + } else { + var zero CreateClaimableBalanceResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.CreateClaimableBalanceResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ClaimClaimableBalanceResult() *ClaimClaimableBalanceResult { + switch u.Type { + case CLAIM_CLAIMABLE_BALANCE: + if v, ok := u._u.(*ClaimClaimableBalanceResult); ok { + return v + } else { + var zero ClaimClaimableBalanceResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ClaimClaimableBalanceResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) BeginSponsoringFutureReservesResult() *BeginSponsoringFutureReservesResult { + switch u.Type { + case BEGIN_SPONSORING_FUTURE_RESERVES: + if v, ok := u._u.(*BeginSponsoringFutureReservesResult); ok { + return v + } else { + var zero BeginSponsoringFutureReservesResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.BeginSponsoringFutureReservesResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) EndSponsoringFutureReservesResult() *EndSponsoringFutureReservesResult { + switch u.Type { + case END_SPONSORING_FUTURE_RESERVES: + if v, ok := u._u.(*EndSponsoringFutureReservesResult); ok { + return v + } else { + var zero EndSponsoringFutureReservesResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.EndSponsoringFutureReservesResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) RevokeSponsorshipResult() *RevokeSponsorshipResult { + switch u.Type { + case REVOKE_SPONSORSHIP: + if v, ok := u._u.(*RevokeSponsorshipResult); ok { + return v + } else { + var zero RevokeSponsorshipResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.RevokeSponsorshipResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ClawbackResult() *ClawbackResult { + switch u.Type { + case CLAWBACK: + if v, ok := u._u.(*ClawbackResult); ok { + return v + } else { + var zero ClawbackResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ClawbackResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) ClawbackClaimableBalanceResult() *ClawbackClaimableBalanceResult { + switch u.Type { + case CLAWBACK_CLAIMABLE_BALANCE: + if v, ok := u._u.(*ClawbackClaimableBalanceResult); ok { + return v + } else { + var zero ClawbackClaimableBalanceResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.ClawbackClaimableBalanceResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) SetTrustLineFlagsResult() *SetTrustLineFlagsResult { + switch u.Type { + case SET_TRUST_LINE_FLAGS: + if v, ok := u._u.(*SetTrustLineFlagsResult); ok { + return v + } else { + var zero SetTrustLineFlagsResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.SetTrustLineFlagsResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) LiquidityPoolDepositResult() *LiquidityPoolDepositResult { + switch u.Type { + case LIQUIDITY_POOL_DEPOSIT: + if v, ok := u._u.(*LiquidityPoolDepositResult); ok { + return v + } else { + var zero LiquidityPoolDepositResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.LiquidityPoolDepositResult accessed when Type == %v", u.Type) + return nil + } +} +func (u *XdrAnon_OperationResult_Tr) LiquidityPoolWithdrawResult() *LiquidityPoolWithdrawResult { + switch u.Type { + case LIQUIDITY_POOL_WITHDRAW: + if v, ok := u._u.(*LiquidityPoolWithdrawResult); ok { + return v + } else { + var zero LiquidityPoolWithdrawResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_OperationResult_Tr.LiquidityPoolWithdrawResult accessed when Type == %v", u.Type) + return nil + } +} +func (u XdrAnon_OperationResult_Tr) XdrValid() bool { + switch u.Type { + case CREATE_ACCOUNT, PAYMENT, PATH_PAYMENT_STRICT_RECEIVE, MANAGE_SELL_OFFER, CREATE_PASSIVE_SELL_OFFER, SET_OPTIONS, CHANGE_TRUST, ALLOW_TRUST, ACCOUNT_MERGE, INFLATION, MANAGE_DATA, BUMP_SEQUENCE, MANAGE_BUY_OFFER, PATH_PAYMENT_STRICT_SEND, CREATE_CLAIMABLE_BALANCE, CLAIM_CLAIMABLE_BALANCE, BEGIN_SPONSORING_FUTURE_RESERVES, END_SPONSORING_FUTURE_RESERVES, REVOKE_SPONSORSHIP, CLAWBACK, CLAWBACK_CLAIMABLE_BALANCE, SET_TRUST_LINE_FLAGS, LIQUIDITY_POOL_DEPOSIT, LIQUIDITY_POOL_WITHDRAW: + return true + } + return false +} +func (u *XdrAnon_OperationResult_Tr) XdrUnionTag() XdrNum32 { + return XDR_OperationType(&u.Type) +} +func (u *XdrAnon_OperationResult_Tr) XdrUnionTagName() string { + return "Type" +} +func (u *XdrAnon_OperationResult_Tr) XdrUnionBody() XdrType { + switch u.Type { + case CREATE_ACCOUNT: + return XDR_CreateAccountResult(u.CreateAccountResult()) + case PAYMENT: + return XDR_PaymentResult(u.PaymentResult()) + case PATH_PAYMENT_STRICT_RECEIVE: + return XDR_PathPaymentStrictReceiveResult(u.PathPaymentStrictReceiveResult()) + case MANAGE_SELL_OFFER: + return XDR_ManageSellOfferResult(u.ManageSellOfferResult()) + case CREATE_PASSIVE_SELL_OFFER: + return XDR_ManageSellOfferResult(u.CreatePassiveSellOfferResult()) + case SET_OPTIONS: + return XDR_SetOptionsResult(u.SetOptionsResult()) + case CHANGE_TRUST: + return XDR_ChangeTrustResult(u.ChangeTrustResult()) + case ALLOW_TRUST: + return XDR_AllowTrustResult(u.AllowTrustResult()) + case ACCOUNT_MERGE: + return XDR_AccountMergeResult(u.AccountMergeResult()) + case INFLATION: + return XDR_InflationResult(u.InflationResult()) + case MANAGE_DATA: + return XDR_ManageDataResult(u.ManageDataResult()) + case BUMP_SEQUENCE: + return XDR_BumpSequenceResult(u.BumpSeqResult()) + case MANAGE_BUY_OFFER: + return XDR_ManageBuyOfferResult(u.ManageBuyOfferResult()) + case PATH_PAYMENT_STRICT_SEND: + return XDR_PathPaymentStrictSendResult(u.PathPaymentStrictSendResult()) + case CREATE_CLAIMABLE_BALANCE: + return XDR_CreateClaimableBalanceResult(u.CreateClaimableBalanceResult()) + case CLAIM_CLAIMABLE_BALANCE: + return XDR_ClaimClaimableBalanceResult(u.ClaimClaimableBalanceResult()) + case BEGIN_SPONSORING_FUTURE_RESERVES: + return XDR_BeginSponsoringFutureReservesResult(u.BeginSponsoringFutureReservesResult()) + case END_SPONSORING_FUTURE_RESERVES: + return XDR_EndSponsoringFutureReservesResult(u.EndSponsoringFutureReservesResult()) + case REVOKE_SPONSORSHIP: + return XDR_RevokeSponsorshipResult(u.RevokeSponsorshipResult()) + case CLAWBACK: + return XDR_ClawbackResult(u.ClawbackResult()) + case CLAWBACK_CLAIMABLE_BALANCE: + return XDR_ClawbackClaimableBalanceResult(u.ClawbackClaimableBalanceResult()) + case SET_TRUST_LINE_FLAGS: + return XDR_SetTrustLineFlagsResult(u.SetTrustLineFlagsResult()) + case LIQUIDITY_POOL_DEPOSIT: + return XDR_LiquidityPoolDepositResult(u.LiquidityPoolDepositResult()) + case LIQUIDITY_POOL_WITHDRAW: + return XDR_LiquidityPoolWithdrawResult(u.LiquidityPoolWithdrawResult()) + } + return nil +} +func (u *XdrAnon_OperationResult_Tr) XdrUnionBodyName() string { + switch u.Type { + case CREATE_ACCOUNT: + return "CreateAccountResult" + case PAYMENT: + return "PaymentResult" + case PATH_PAYMENT_STRICT_RECEIVE: + return "PathPaymentStrictReceiveResult" + case MANAGE_SELL_OFFER: + return "ManageSellOfferResult" + case CREATE_PASSIVE_SELL_OFFER: + return "CreatePassiveSellOfferResult" + case SET_OPTIONS: + return "SetOptionsResult" + case CHANGE_TRUST: + return "ChangeTrustResult" + case ALLOW_TRUST: + return "AllowTrustResult" + case ACCOUNT_MERGE: + return "AccountMergeResult" + case INFLATION: + return "InflationResult" + case MANAGE_DATA: + return "ManageDataResult" + case BUMP_SEQUENCE: + return "BumpSeqResult" + case MANAGE_BUY_OFFER: + return "ManageBuyOfferResult" + case PATH_PAYMENT_STRICT_SEND: + return "PathPaymentStrictSendResult" + case CREATE_CLAIMABLE_BALANCE: + return "CreateClaimableBalanceResult" + case CLAIM_CLAIMABLE_BALANCE: + return "ClaimClaimableBalanceResult" + case BEGIN_SPONSORING_FUTURE_RESERVES: + return "BeginSponsoringFutureReservesResult" + case END_SPONSORING_FUTURE_RESERVES: + return "EndSponsoringFutureReservesResult" + case REVOKE_SPONSORSHIP: + return "RevokeSponsorshipResult" + case CLAWBACK: + return "ClawbackResult" + case CLAWBACK_CLAIMABLE_BALANCE: + return "ClawbackClaimableBalanceResult" + case SET_TRUST_LINE_FLAGS: + return "SetTrustLineFlagsResult" + case LIQUIDITY_POOL_DEPOSIT: + return "LiquidityPoolDepositResult" + case LIQUIDITY_POOL_WITHDRAW: + return "LiquidityPoolWithdrawResult" + } + return "" +} + +type XdrType_XdrAnon_OperationResult_Tr = *XdrAnon_OperationResult_Tr + +func (v *XdrAnon_OperationResult_Tr) XdrPointer() interface{} { return v } +func (XdrAnon_OperationResult_Tr) XdrTypeName() string { return "XdrAnon_OperationResult_Tr" } +func (v XdrAnon_OperationResult_Tr) XdrValue() interface{} { return v } +func (v *XdrAnon_OperationResult_Tr) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_OperationResult_Tr) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_OperationType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case CREATE_ACCOUNT: + x.Marshal(x.Sprintf("%screateAccountResult", name), XDR_CreateAccountResult(u.CreateAccountResult())) + return + case PAYMENT: + x.Marshal(x.Sprintf("%spaymentResult", name), XDR_PaymentResult(u.PaymentResult())) + return + case PATH_PAYMENT_STRICT_RECEIVE: + x.Marshal(x.Sprintf("%spathPaymentStrictReceiveResult", name), XDR_PathPaymentStrictReceiveResult(u.PathPaymentStrictReceiveResult())) + return + case MANAGE_SELL_OFFER: + x.Marshal(x.Sprintf("%smanageSellOfferResult", name), XDR_ManageSellOfferResult(u.ManageSellOfferResult())) + return + case CREATE_PASSIVE_SELL_OFFER: + x.Marshal(x.Sprintf("%screatePassiveSellOfferResult", name), XDR_ManageSellOfferResult(u.CreatePassiveSellOfferResult())) + return + case SET_OPTIONS: + x.Marshal(x.Sprintf("%ssetOptionsResult", name), XDR_SetOptionsResult(u.SetOptionsResult())) + return + case CHANGE_TRUST: + x.Marshal(x.Sprintf("%schangeTrustResult", name), XDR_ChangeTrustResult(u.ChangeTrustResult())) + return + case ALLOW_TRUST: + x.Marshal(x.Sprintf("%sallowTrustResult", name), XDR_AllowTrustResult(u.AllowTrustResult())) + return + case ACCOUNT_MERGE: + x.Marshal(x.Sprintf("%saccountMergeResult", name), XDR_AccountMergeResult(u.AccountMergeResult())) + return + case INFLATION: + x.Marshal(x.Sprintf("%sinflationResult", name), XDR_InflationResult(u.InflationResult())) + return + case MANAGE_DATA: + x.Marshal(x.Sprintf("%smanageDataResult", name), XDR_ManageDataResult(u.ManageDataResult())) + return + case BUMP_SEQUENCE: + x.Marshal(x.Sprintf("%sbumpSeqResult", name), XDR_BumpSequenceResult(u.BumpSeqResult())) + return + case MANAGE_BUY_OFFER: + x.Marshal(x.Sprintf("%smanageBuyOfferResult", name), XDR_ManageBuyOfferResult(u.ManageBuyOfferResult())) + return + case PATH_PAYMENT_STRICT_SEND: + x.Marshal(x.Sprintf("%spathPaymentStrictSendResult", name), XDR_PathPaymentStrictSendResult(u.PathPaymentStrictSendResult())) + return + case CREATE_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%screateClaimableBalanceResult", name), XDR_CreateClaimableBalanceResult(u.CreateClaimableBalanceResult())) + return + case CLAIM_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclaimClaimableBalanceResult", name), XDR_ClaimClaimableBalanceResult(u.ClaimClaimableBalanceResult())) + return + case BEGIN_SPONSORING_FUTURE_RESERVES: + x.Marshal(x.Sprintf("%sbeginSponsoringFutureReservesResult", name), XDR_BeginSponsoringFutureReservesResult(u.BeginSponsoringFutureReservesResult())) + return + case END_SPONSORING_FUTURE_RESERVES: + x.Marshal(x.Sprintf("%sendSponsoringFutureReservesResult", name), XDR_EndSponsoringFutureReservesResult(u.EndSponsoringFutureReservesResult())) + return + case REVOKE_SPONSORSHIP: + x.Marshal(x.Sprintf("%srevokeSponsorshipResult", name), XDR_RevokeSponsorshipResult(u.RevokeSponsorshipResult())) + return + case CLAWBACK: + x.Marshal(x.Sprintf("%sclawbackResult", name), XDR_ClawbackResult(u.ClawbackResult())) + return + case CLAWBACK_CLAIMABLE_BALANCE: + x.Marshal(x.Sprintf("%sclawbackClaimableBalanceResult", name), XDR_ClawbackClaimableBalanceResult(u.ClawbackClaimableBalanceResult())) + return + case SET_TRUST_LINE_FLAGS: + x.Marshal(x.Sprintf("%ssetTrustLineFlagsResult", name), XDR_SetTrustLineFlagsResult(u.SetTrustLineFlagsResult())) + return + case LIQUIDITY_POOL_DEPOSIT: + x.Marshal(x.Sprintf("%sliquidityPoolDepositResult", name), XDR_LiquidityPoolDepositResult(u.LiquidityPoolDepositResult())) + return + case LIQUIDITY_POOL_WITHDRAW: + x.Marshal(x.Sprintf("%sliquidityPoolWithdrawResult", name), XDR_LiquidityPoolWithdrawResult(u.LiquidityPoolWithdrawResult())) + return + } + XdrPanic("invalid Type (%v) in XdrAnon_OperationResult_Tr", u.Type) +} +func XDR_XdrAnon_OperationResult_Tr(v *XdrAnon_OperationResult_Tr) *XdrAnon_OperationResult_Tr { + return v +} +func (_ OperationResult) XdrValidTags() map[int32]bool { + return nil +} +func (u *OperationResult) Tr() *XdrAnon_OperationResult_Tr { + switch u.Code { + case OpINNER: + if v, ok := u._u.(*XdrAnon_OperationResult_Tr); ok { + return v + } else { + var zero XdrAnon_OperationResult_Tr + u._u = &zero + return &zero + } + default: + XdrPanic("OperationResult.Tr accessed when Code == %v", u.Code) + return nil + } +} +func (u OperationResult) XdrValid() bool { + return true +} +func (u *OperationResult) XdrUnionTag() XdrNum32 { + return XDR_OperationResultCode(&u.Code) +} +func (u *OperationResult) XdrUnionTagName() string { + return "Code" +} +func (u *OperationResult) XdrUnionBody() XdrType { + switch u.Code { + case OpINNER: + return XDR_XdrAnon_OperationResult_Tr(u.Tr()) + default: + return nil + } +} +func (u *OperationResult) XdrUnionBodyName() string { + switch u.Code { + case OpINNER: + return "Tr" + default: + return "" + } +} + +type XdrType_OperationResult = *OperationResult + +func (v *OperationResult) XdrPointer() interface{} { return v } +func (OperationResult) XdrTypeName() string { return "OperationResult" } +func (v OperationResult) XdrValue() interface{} { return v } +func (v *OperationResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *OperationResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_OperationResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case OpINNER: + x.Marshal(x.Sprintf("%str", name), XDR_XdrAnon_OperationResult_Tr(u.Tr())) + return + default: + return + } +} +func XDR_OperationResult(v *OperationResult) *OperationResult { return v } + +var _XdrNames_TransactionResultCode = map[int32]string{ + int32(TxFEE_BUMP_INNER_SUCCESS): "txFEE_BUMP_INNER_SUCCESS", + int32(TxSUCCESS): "txSUCCESS", + int32(TxFAILED): "txFAILED", + int32(TxTOO_EARLY): "txTOO_EARLY", + int32(TxTOO_LATE): "txTOO_LATE", + int32(TxMISSING_OPERATION): "txMISSING_OPERATION", + int32(TxBAD_SEQ): "txBAD_SEQ", + int32(TxBAD_AUTH): "txBAD_AUTH", + int32(TxINSUFFICIENT_BALANCE): "txINSUFFICIENT_BALANCE", + int32(TxNO_ACCOUNT): "txNO_ACCOUNT", + int32(TxINSUFFICIENT_FEE): "txINSUFFICIENT_FEE", + int32(TxBAD_AUTH_EXTRA): "txBAD_AUTH_EXTRA", + int32(TxINTERNAL_ERROR): "txINTERNAL_ERROR", + int32(TxNOT_SUPPORTED): "txNOT_SUPPORTED", + int32(TxFEE_BUMP_INNER_FAILED): "txFEE_BUMP_INNER_FAILED", + int32(TxBAD_SPONSORSHIP): "txBAD_SPONSORSHIP", +} +var _XdrValues_TransactionResultCode = map[string]int32{ + "txFEE_BUMP_INNER_SUCCESS": int32(TxFEE_BUMP_INNER_SUCCESS), + "txSUCCESS": int32(TxSUCCESS), + "txFAILED": int32(TxFAILED), + "txTOO_EARLY": int32(TxTOO_EARLY), + "txTOO_LATE": int32(TxTOO_LATE), + "txMISSING_OPERATION": int32(TxMISSING_OPERATION), + "txBAD_SEQ": int32(TxBAD_SEQ), + "txBAD_AUTH": int32(TxBAD_AUTH), + "txINSUFFICIENT_BALANCE": int32(TxINSUFFICIENT_BALANCE), + "txNO_ACCOUNT": int32(TxNO_ACCOUNT), + "txINSUFFICIENT_FEE": int32(TxINSUFFICIENT_FEE), + "txBAD_AUTH_EXTRA": int32(TxBAD_AUTH_EXTRA), + "txINTERNAL_ERROR": int32(TxINTERNAL_ERROR), + "txNOT_SUPPORTED": int32(TxNOT_SUPPORTED), + "txFEE_BUMP_INNER_FAILED": int32(TxFEE_BUMP_INNER_FAILED), + "txBAD_SPONSORSHIP": int32(TxBAD_SPONSORSHIP), +} + +func (TransactionResultCode) XdrEnumNames() map[int32]string { + return _XdrNames_TransactionResultCode +} +func (v TransactionResultCode) String() string { + if s, ok := _XdrNames_TransactionResultCode[int32(v)]; ok { + return s + } + return fmt.Sprintf("TransactionResultCode#%d", v) +} +func (v *TransactionResultCode) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_TransactionResultCode[stok]; ok { + *v = TransactionResultCode(val) + return nil + } else if stok == "TransactionResultCode" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid TransactionResultCode.", stok)) + } +} +func (v TransactionResultCode) GetU32() uint32 { return uint32(v) } +func (v *TransactionResultCode) SetU32(n uint32) { *v = TransactionResultCode(n) } +func (v *TransactionResultCode) XdrPointer() interface{} { return v } +func (TransactionResultCode) XdrTypeName() string { return "TransactionResultCode" } +func (v TransactionResultCode) XdrValue() interface{} { return v } +func (v *TransactionResultCode) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_TransactionResultCode = *TransactionResultCode + +func XDR_TransactionResultCode(v *TransactionResultCode) *TransactionResultCode { return v } + +var _XdrComments_TransactionResultCode = map[int32]string{ + int32(TxFEE_BUMP_INNER_SUCCESS): "fee bump inner transaction succeeded", + int32(TxSUCCESS): "all operations succeeded", + int32(TxFAILED): "one of the operations failed (none were applied)", + int32(TxTOO_EARLY): "ledger closeTime before minTime", + int32(TxTOO_LATE): "ledger closeTime after maxTime", + int32(TxMISSING_OPERATION): "no operation was specified", + int32(TxBAD_SEQ): "sequence number does not match source account", + int32(TxBAD_AUTH): "too few valid signatures / wrong network", + int32(TxINSUFFICIENT_BALANCE): "fee would bring account below reserve", + int32(TxNO_ACCOUNT): "source account not found", + int32(TxINSUFFICIENT_FEE): "fee is too small", + int32(TxBAD_AUTH_EXTRA): "unused signatures attached to transaction", + int32(TxINTERNAL_ERROR): "an unknown error occurred", + int32(TxNOT_SUPPORTED): "transaction type not supported", + int32(TxFEE_BUMP_INNER_FAILED): "fee bump inner transaction failed", + int32(TxBAD_SPONSORSHIP): "sponsorship not confirmed", +} + +func (e TransactionResultCode) XdrEnumComments() map[int32]string { + return _XdrComments_TransactionResultCode +} + +type _XdrVec_unbounded_OperationResult []OperationResult + +func (_XdrVec_unbounded_OperationResult) XdrBound() uint32 { + const bound uint32 = 4294967295 // Force error if not const or doesn't fit + return bound +} +func (_XdrVec_unbounded_OperationResult) XdrCheckLen(length uint32) { + if length > uint32(4294967295) { + XdrPanic("_XdrVec_unbounded_OperationResult length %d exceeds bound 4294967295", length) + } else if int(length) < 0 { + XdrPanic("_XdrVec_unbounded_OperationResult length %d exceeds max int", length) + } +} +func (v _XdrVec_unbounded_OperationResult) GetVecLen() uint32 { return uint32(len(v)) } +func (v *_XdrVec_unbounded_OperationResult) SetVecLen(length uint32) { + v.XdrCheckLen(length) + if int(length) <= cap(*v) { + if int(length) != len(*v) { + *v = (*v)[:int(length)] + } + return + } + newcap := 2 * cap(*v) + if newcap < int(length) { // also catches overflow where 2*cap < 0 + newcap = int(length) + } else if bound := uint(4294967295); uint(newcap) > bound { + if int(bound) < 0 { + bound = ^uint(0) >> 1 + } + newcap = int(bound) + } + nv := make([]OperationResult, int(length), newcap) + copy(nv, *v) + *v = nv +} +func (v *_XdrVec_unbounded_OperationResult) XdrMarshalN(x XDR, name string, n uint32) { + v.XdrCheckLen(n) + for i := 0; i < int(n); i++ { + if i >= len(*v) { + v.SetVecLen(uint32(i + 1)) + } + XDR_OperationResult(&(*v)[i]).XdrMarshal(x, x.Sprintf("%s[%d]", name, i)) + } + if int(n) < len(*v) { + *v = (*v)[:int(n)] + } +} +func (v *_XdrVec_unbounded_OperationResult) XdrRecurse(x XDR, name string) { + size := XdrSize{Size: uint32(len(*v)), Bound: 4294967295} + x.Marshal(name, &size) + v.XdrMarshalN(x, name, size.Size) +} +func (_XdrVec_unbounded_OperationResult) XdrTypeName() string { return "OperationResult<>" } +func (v *_XdrVec_unbounded_OperationResult) XdrPointer() interface{} { return (*[]OperationResult)(v) } +func (v _XdrVec_unbounded_OperationResult) XdrValue() interface{} { return ([]OperationResult)(v) } +func (v *_XdrVec_unbounded_OperationResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +var _XdrTags_XdrAnon_InnerTransactionResult_Result = map[int32]bool{ + XdrToI32(TxSUCCESS): true, + XdrToI32(TxFAILED): true, + XdrToI32(TxTOO_EARLY): true, + XdrToI32(TxTOO_LATE): true, + XdrToI32(TxMISSING_OPERATION): true, + XdrToI32(TxBAD_SEQ): true, + XdrToI32(TxBAD_AUTH): true, + XdrToI32(TxINSUFFICIENT_BALANCE): true, + XdrToI32(TxNO_ACCOUNT): true, + XdrToI32(TxINSUFFICIENT_FEE): true, + XdrToI32(TxBAD_AUTH_EXTRA): true, + XdrToI32(TxINTERNAL_ERROR): true, + XdrToI32(TxNOT_SUPPORTED): true, + XdrToI32(TxBAD_SPONSORSHIP): true, +} + +func (_ XdrAnon_InnerTransactionResult_Result) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_InnerTransactionResult_Result +} +func (u *XdrAnon_InnerTransactionResult_Result) Results() *[]OperationResult { + switch u.Code { + case TxSUCCESS, TxFAILED: + if v, ok := u._u.(*[]OperationResult); ok { + return v + } else { + var zero []OperationResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_InnerTransactionResult_Result.Results accessed when Code == %v", u.Code) + return nil + } +} +func (u XdrAnon_InnerTransactionResult_Result) XdrValid() bool { + switch u.Code { + case TxSUCCESS, TxFAILED, TxTOO_EARLY, TxTOO_LATE, TxMISSING_OPERATION, TxBAD_SEQ, TxBAD_AUTH, TxINSUFFICIENT_BALANCE, TxNO_ACCOUNT, TxINSUFFICIENT_FEE, TxBAD_AUTH_EXTRA, TxINTERNAL_ERROR, TxNOT_SUPPORTED, TxBAD_SPONSORSHIP: + return true + } + return false +} +func (u *XdrAnon_InnerTransactionResult_Result) XdrUnionTag() XdrNum32 { + return XDR_TransactionResultCode(&u.Code) +} +func (u *XdrAnon_InnerTransactionResult_Result) XdrUnionTagName() string { + return "Code" +} +func (u *XdrAnon_InnerTransactionResult_Result) XdrUnionBody() XdrType { + switch u.Code { + case TxSUCCESS, TxFAILED: + return (*_XdrVec_unbounded_OperationResult)(u.Results()) + case TxTOO_EARLY, TxTOO_LATE, TxMISSING_OPERATION, TxBAD_SEQ, TxBAD_AUTH, TxINSUFFICIENT_BALANCE, TxNO_ACCOUNT, TxINSUFFICIENT_FEE, TxBAD_AUTH_EXTRA, TxINTERNAL_ERROR, TxNOT_SUPPORTED, TxBAD_SPONSORSHIP: + return nil + } + return nil +} +func (u *XdrAnon_InnerTransactionResult_Result) XdrUnionBodyName() string { + switch u.Code { + case TxSUCCESS, TxFAILED: + return "Results" + case TxTOO_EARLY, TxTOO_LATE, TxMISSING_OPERATION, TxBAD_SEQ, TxBAD_AUTH, TxINSUFFICIENT_BALANCE, TxNO_ACCOUNT, TxINSUFFICIENT_FEE, TxBAD_AUTH_EXTRA, TxINTERNAL_ERROR, TxNOT_SUPPORTED, TxBAD_SPONSORSHIP: + return "" + } + return "" +} + +type XdrType_XdrAnon_InnerTransactionResult_Result = *XdrAnon_InnerTransactionResult_Result + +func (v *XdrAnon_InnerTransactionResult_Result) XdrPointer() interface{} { return v } +func (XdrAnon_InnerTransactionResult_Result) XdrTypeName() string { + return "XdrAnon_InnerTransactionResult_Result" +} +func (v XdrAnon_InnerTransactionResult_Result) XdrValue() interface{} { return v } +func (v *XdrAnon_InnerTransactionResult_Result) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_InnerTransactionResult_Result) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_TransactionResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case TxSUCCESS, TxFAILED: + x.Marshal(x.Sprintf("%sresults", name), (*_XdrVec_unbounded_OperationResult)(u.Results())) + return + case TxTOO_EARLY, TxTOO_LATE, TxMISSING_OPERATION, TxBAD_SEQ, TxBAD_AUTH, TxINSUFFICIENT_BALANCE, TxNO_ACCOUNT, TxINSUFFICIENT_FEE, TxBAD_AUTH_EXTRA, TxINTERNAL_ERROR, TxNOT_SUPPORTED, TxBAD_SPONSORSHIP: + return + } + XdrPanic("invalid Code (%v) in XdrAnon_InnerTransactionResult_Result", u.Code) +} +func XDR_XdrAnon_InnerTransactionResult_Result(v *XdrAnon_InnerTransactionResult_Result) *XdrAnon_InnerTransactionResult_Result { + return v +} + +var _XdrTags_XdrAnon_InnerTransactionResult_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_InnerTransactionResult_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_InnerTransactionResult_Ext +} +func (u XdrAnon_InnerTransactionResult_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_InnerTransactionResult_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_InnerTransactionResult_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_InnerTransactionResult_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_InnerTransactionResult_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_InnerTransactionResult_Ext = *XdrAnon_InnerTransactionResult_Ext + +func (v *XdrAnon_InnerTransactionResult_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_InnerTransactionResult_Ext) XdrTypeName() string { + return "XdrAnon_InnerTransactionResult_Ext" +} +func (v XdrAnon_InnerTransactionResult_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_InnerTransactionResult_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_InnerTransactionResult_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_InnerTransactionResult_Ext", u.V) +} +func XDR_XdrAnon_InnerTransactionResult_Ext(v *XdrAnon_InnerTransactionResult_Ext) *XdrAnon_InnerTransactionResult_Ext { + return v +} + +type XdrType_InnerTransactionResult = *InnerTransactionResult + +func (v *InnerTransactionResult) XdrPointer() interface{} { return v } +func (InnerTransactionResult) XdrTypeName() string { return "InnerTransactionResult" } +func (v InnerTransactionResult) XdrValue() interface{} { return v } +func (v *InnerTransactionResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *InnerTransactionResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sfeeCharged", name), XDR_Int64(&v.FeeCharged)) + x.Marshal(x.Sprintf("%sresult", name), XDR_XdrAnon_InnerTransactionResult_Result(&v.Result)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_InnerTransactionResult_Ext(&v.Ext)) +} +func XDR_InnerTransactionResult(v *InnerTransactionResult) *InnerTransactionResult { return v } + +type XdrType_InnerTransactionResultPair = *InnerTransactionResultPair + +func (v *InnerTransactionResultPair) XdrPointer() interface{} { return v } +func (InnerTransactionResultPair) XdrTypeName() string { return "InnerTransactionResultPair" } +func (v InnerTransactionResultPair) XdrValue() interface{} { return v } +func (v *InnerTransactionResultPair) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *InnerTransactionResultPair) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%stransactionHash", name), XDR_Hash(&v.TransactionHash)) + x.Marshal(x.Sprintf("%sresult", name), XDR_InnerTransactionResult(&v.Result)) +} +func XDR_InnerTransactionResultPair(v *InnerTransactionResultPair) *InnerTransactionResultPair { + return v +} +func (_ XdrAnon_TransactionResult_Result) XdrValidTags() map[int32]bool { + return nil +} +func (u *XdrAnon_TransactionResult_Result) InnerResultPair() *InnerTransactionResultPair { + switch u.Code { + case TxFEE_BUMP_INNER_SUCCESS, TxFEE_BUMP_INNER_FAILED: + if v, ok := u._u.(*InnerTransactionResultPair); ok { + return v + } else { + var zero InnerTransactionResultPair + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TransactionResult_Result.InnerResultPair accessed when Code == %v", u.Code) + return nil + } +} +func (u *XdrAnon_TransactionResult_Result) Results() *[]OperationResult { + switch u.Code { + case TxSUCCESS, TxFAILED: + if v, ok := u._u.(*[]OperationResult); ok { + return v + } else { + var zero []OperationResult + u._u = &zero + return &zero + } + default: + XdrPanic("XdrAnon_TransactionResult_Result.Results accessed when Code == %v", u.Code) + return nil + } +} +func (u XdrAnon_TransactionResult_Result) XdrValid() bool { + return true +} +func (u *XdrAnon_TransactionResult_Result) XdrUnionTag() XdrNum32 { + return XDR_TransactionResultCode(&u.Code) +} +func (u *XdrAnon_TransactionResult_Result) XdrUnionTagName() string { + return "Code" +} +func (u *XdrAnon_TransactionResult_Result) XdrUnionBody() XdrType { + switch u.Code { + case TxFEE_BUMP_INNER_SUCCESS, TxFEE_BUMP_INNER_FAILED: + return XDR_InnerTransactionResultPair(u.InnerResultPair()) + case TxSUCCESS, TxFAILED: + return (*_XdrVec_unbounded_OperationResult)(u.Results()) + default: + return nil + } +} +func (u *XdrAnon_TransactionResult_Result) XdrUnionBodyName() string { + switch u.Code { + case TxFEE_BUMP_INNER_SUCCESS, TxFEE_BUMP_INNER_FAILED: + return "InnerResultPair" + case TxSUCCESS, TxFAILED: + return "Results" + default: + return "" + } +} + +type XdrType_XdrAnon_TransactionResult_Result = *XdrAnon_TransactionResult_Result + +func (v *XdrAnon_TransactionResult_Result) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionResult_Result) XdrTypeName() string { + return "XdrAnon_TransactionResult_Result" +} +func (v XdrAnon_TransactionResult_Result) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionResult_Result) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TransactionResult_Result) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_TransactionResultCode(&u.Code).XdrMarshal(x, x.Sprintf("%scode", name)) + switch u.Code { + case TxFEE_BUMP_INNER_SUCCESS, TxFEE_BUMP_INNER_FAILED: + x.Marshal(x.Sprintf("%sinnerResultPair", name), XDR_InnerTransactionResultPair(u.InnerResultPair())) + return + case TxSUCCESS, TxFAILED: + x.Marshal(x.Sprintf("%sresults", name), (*_XdrVec_unbounded_OperationResult)(u.Results())) + return + default: + return + } +} +func XDR_XdrAnon_TransactionResult_Result(v *XdrAnon_TransactionResult_Result) *XdrAnon_TransactionResult_Result { + return v +} + +var _XdrTags_XdrAnon_TransactionResult_Ext = map[int32]bool{ + XdrToI32(0): true, +} + +func (_ XdrAnon_TransactionResult_Ext) XdrValidTags() map[int32]bool { + return _XdrTags_XdrAnon_TransactionResult_Ext +} +func (u XdrAnon_TransactionResult_Ext) XdrValid() bool { + switch u.V { + case 0: + return true + } + return false +} +func (u *XdrAnon_TransactionResult_Ext) XdrUnionTag() XdrNum32 { + return XDR_int32(&u.V) +} +func (u *XdrAnon_TransactionResult_Ext) XdrUnionTagName() string { + return "V" +} +func (u *XdrAnon_TransactionResult_Ext) XdrUnionBody() XdrType { + switch u.V { + case 0: + return nil + } + return nil +} +func (u *XdrAnon_TransactionResult_Ext) XdrUnionBodyName() string { + switch u.V { + case 0: + return "" + } + return "" +} + +type XdrType_XdrAnon_TransactionResult_Ext = *XdrAnon_TransactionResult_Ext + +func (v *XdrAnon_TransactionResult_Ext) XdrPointer() interface{} { return v } +func (XdrAnon_TransactionResult_Ext) XdrTypeName() string { return "XdrAnon_TransactionResult_Ext" } +func (v XdrAnon_TransactionResult_Ext) XdrValue() interface{} { return v } +func (v *XdrAnon_TransactionResult_Ext) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *XdrAnon_TransactionResult_Ext) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_int32(&u.V).XdrMarshal(x, x.Sprintf("%sv", name)) + switch u.V { + case 0: + return + } + XdrPanic("invalid V (%v) in XdrAnon_TransactionResult_Ext", u.V) +} +func XDR_XdrAnon_TransactionResult_Ext(v *XdrAnon_TransactionResult_Ext) *XdrAnon_TransactionResult_Ext { + return v +} + +type XdrType_TransactionResult = *TransactionResult + +func (v *TransactionResult) XdrPointer() interface{} { return v } +func (TransactionResult) XdrTypeName() string { return "TransactionResult" } +func (v TransactionResult) XdrValue() interface{} { return v } +func (v *TransactionResult) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *TransactionResult) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%sfeeCharged", name), XDR_Int64(&v.FeeCharged)) + x.Marshal(x.Sprintf("%sresult", name), XDR_XdrAnon_TransactionResult_Result(&v.Result)) + x.Marshal(x.Sprintf("%sext", name), XDR_XdrAnon_TransactionResult_Ext(&v.Ext)) +} +func XDR_TransactionResult(v *TransactionResult) *TransactionResult { return v } + +type _XdrArray_32_opaque [32]byte + +func (v *_XdrArray_32_opaque) GetByteSlice() []byte { return v[:] } +func (v *_XdrArray_32_opaque) XdrTypeName() string { return "opaque[]" } +func (v *_XdrArray_32_opaque) XdrValue() interface{} { return v[:] } +func (v *_XdrArray_32_opaque) XdrPointer() interface{} { return (*[32]byte)(v) } +func (v *_XdrArray_32_opaque) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *_XdrArray_32_opaque) String() string { return fmt.Sprintf("%x", v[:]) } +func (v *_XdrArray_32_opaque) Scan(ss fmt.ScanState, c rune) error { + return XdrArrayOpaqueScan(v[:], ss, c) +} +func (_XdrArray_32_opaque) XdrArraySize() uint32 { + const bound uint32 = 32 // Force error if not const or doesn't fit + return bound +} + +type XdrType_Hash struct { + *_XdrArray_32_opaque +} + +func XDR_Hash(v *Hash) XdrType_Hash { + return XdrType_Hash{(*_XdrArray_32_opaque)(v)} +} +func (XdrType_Hash) XdrTypeName() string { return "Hash" } +func (v XdrType_Hash) XdrUnwrap() XdrType { return v._XdrArray_32_opaque } + +type XdrType_Uint256 struct { + *_XdrArray_32_opaque +} + +func XDR_Uint256(v *Uint256) XdrType_Uint256 { + return XdrType_Uint256{(*_XdrArray_32_opaque)(v)} +} +func (XdrType_Uint256) XdrTypeName() string { return "Uint256" } +func (v XdrType_Uint256) XdrUnwrap() XdrType { return v._XdrArray_32_opaque } + +type XdrType_Uint32 struct { + XdrType_uint32 +} + +func XDR_Uint32(v *Uint32) XdrType_Uint32 { + return XdrType_Uint32{XDR_uint32(v)} +} +func (XdrType_Uint32) XdrTypeName() string { return "Uint32" } +func (v XdrType_Uint32) XdrUnwrap() XdrType { return v.XdrType_uint32 } + +type XdrType_Int32 struct { + XdrType_int32 +} + +func XDR_Int32(v *Int32) XdrType_Int32 { + return XdrType_Int32{XDR_int32(v)} +} +func (XdrType_Int32) XdrTypeName() string { return "Int32" } +func (v XdrType_Int32) XdrUnwrap() XdrType { return v.XdrType_int32 } + +type XdrType_Uint64 struct { + XdrType_uint64 +} + +func XDR_Uint64(v *Uint64) XdrType_Uint64 { + return XdrType_Uint64{XDR_uint64(v)} +} +func (XdrType_Uint64) XdrTypeName() string { return "Uint64" } +func (v XdrType_Uint64) XdrUnwrap() XdrType { return v.XdrType_uint64 } + +type XdrType_Int64 struct { + XdrType_int64 +} + +func XDR_Int64(v *Int64) XdrType_Int64 { + return XdrType_Int64{XDR_int64(v)} +} +func (XdrType_Int64) XdrTypeName() string { return "Int64" } +func (v XdrType_Int64) XdrUnwrap() XdrType { return v.XdrType_int64 } + +var _XdrNames_CryptoKeyType = map[int32]string{ + int32(KEY_TYPE_ED25519): "KEY_TYPE_ED25519", + int32(KEY_TYPE_PRE_AUTH_TX): "KEY_TYPE_PRE_AUTH_TX", + int32(KEY_TYPE_HASH_X): "KEY_TYPE_HASH_X", + int32(KEY_TYPE_MUXED_ED25519): "KEY_TYPE_MUXED_ED25519", +} +var _XdrValues_CryptoKeyType = map[string]int32{ + "KEY_TYPE_ED25519": int32(KEY_TYPE_ED25519), + "KEY_TYPE_PRE_AUTH_TX": int32(KEY_TYPE_PRE_AUTH_TX), + "KEY_TYPE_HASH_X": int32(KEY_TYPE_HASH_X), + "KEY_TYPE_MUXED_ED25519": int32(KEY_TYPE_MUXED_ED25519), +} + +func (CryptoKeyType) XdrEnumNames() map[int32]string { + return _XdrNames_CryptoKeyType +} +func (v CryptoKeyType) String() string { + if s, ok := _XdrNames_CryptoKeyType[int32(v)]; ok { + return s + } + return fmt.Sprintf("CryptoKeyType#%d", v) +} +func (v *CryptoKeyType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_CryptoKeyType[stok]; ok { + *v = CryptoKeyType(val) + return nil + } else if stok == "CryptoKeyType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid CryptoKeyType.", stok)) + } +} +func (v CryptoKeyType) GetU32() uint32 { return uint32(v) } +func (v *CryptoKeyType) SetU32(n uint32) { *v = CryptoKeyType(n) } +func (v *CryptoKeyType) XdrPointer() interface{} { return v } +func (CryptoKeyType) XdrTypeName() string { return "CryptoKeyType" } +func (v CryptoKeyType) XdrValue() interface{} { return v } +func (v *CryptoKeyType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_CryptoKeyType = *CryptoKeyType + +func XDR_CryptoKeyType(v *CryptoKeyType) *CryptoKeyType { return v } + +var _XdrComments_CryptoKeyType = map[int32]string{ + int32(KEY_TYPE_MUXED_ED25519): "MUXED enum values for supported type are derived from the enum values above by ORing them with 0x100", +} + +func (e CryptoKeyType) XdrEnumComments() map[int32]string { + return _XdrComments_CryptoKeyType +} + +var _XdrNames_PublicKeyType = map[int32]string{ + int32(PUBLIC_KEY_TYPE_ED25519): "PUBLIC_KEY_TYPE_ED25519", +} +var _XdrValues_PublicKeyType = map[string]int32{ + "PUBLIC_KEY_TYPE_ED25519": int32(PUBLIC_KEY_TYPE_ED25519), +} + +func (PublicKeyType) XdrEnumNames() map[int32]string { + return _XdrNames_PublicKeyType +} +func (v PublicKeyType) String() string { + if s, ok := _XdrNames_PublicKeyType[int32(v)]; ok { + return s + } + return fmt.Sprintf("PublicKeyType#%d", v) +} +func (v *PublicKeyType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_PublicKeyType[stok]; ok { + *v = PublicKeyType(val) + return nil + } else if stok == "PublicKeyType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid PublicKeyType.", stok)) + } +} +func (v PublicKeyType) GetU32() uint32 { return uint32(v) } +func (v *PublicKeyType) SetU32(n uint32) { *v = PublicKeyType(n) } +func (v *PublicKeyType) XdrPointer() interface{} { return v } +func (PublicKeyType) XdrTypeName() string { return "PublicKeyType" } +func (v PublicKeyType) XdrValue() interface{} { return v } +func (v *PublicKeyType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_PublicKeyType = *PublicKeyType + +func XDR_PublicKeyType(v *PublicKeyType) *PublicKeyType { return v } + +var _XdrNames_SignerKeyType = map[int32]string{ + int32(SIGNER_KEY_TYPE_ED25519): "SIGNER_KEY_TYPE_ED25519", + int32(SIGNER_KEY_TYPE_PRE_AUTH_TX): "SIGNER_KEY_TYPE_PRE_AUTH_TX", + int32(SIGNER_KEY_TYPE_HASH_X): "SIGNER_KEY_TYPE_HASH_X", +} +var _XdrValues_SignerKeyType = map[string]int32{ + "SIGNER_KEY_TYPE_ED25519": int32(SIGNER_KEY_TYPE_ED25519), + "SIGNER_KEY_TYPE_PRE_AUTH_TX": int32(SIGNER_KEY_TYPE_PRE_AUTH_TX), + "SIGNER_KEY_TYPE_HASH_X": int32(SIGNER_KEY_TYPE_HASH_X), +} + +func (SignerKeyType) XdrEnumNames() map[int32]string { + return _XdrNames_SignerKeyType +} +func (v SignerKeyType) String() string { + if s, ok := _XdrNames_SignerKeyType[int32(v)]; ok { + return s + } + return fmt.Sprintf("SignerKeyType#%d", v) +} +func (v *SignerKeyType) Scan(ss fmt.ScanState, _ rune) error { + if tok, err := ss.Token(true, XdrSymChar); err != nil { + return err + } else { + stok := string(tok) + if val, ok := _XdrValues_SignerKeyType[stok]; ok { + *v = SignerKeyType(val) + return nil + } else if stok == "SignerKeyType" { + if n, err := fmt.Fscanf(ss, "#%d", (*int32)(v)); n == 1 && err == nil { + return nil + } + } + return XdrError(fmt.Sprintf("%s is not a valid SignerKeyType.", stok)) + } +} +func (v SignerKeyType) GetU32() uint32 { return uint32(v) } +func (v *SignerKeyType) SetU32(n uint32) { *v = SignerKeyType(n) } +func (v *SignerKeyType) XdrPointer() interface{} { return v } +func (SignerKeyType) XdrTypeName() string { return "SignerKeyType" } +func (v SignerKeyType) XdrValue() interface{} { return v } +func (v *SignerKeyType) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } + +type XdrType_SignerKeyType = *SignerKeyType + +func XDR_SignerKeyType(v *SignerKeyType) *SignerKeyType { return v } + +var _XdrTags_PublicKey = map[int32]bool{ + XdrToI32(PUBLIC_KEY_TYPE_ED25519): true, +} + +func (_ PublicKey) XdrValidTags() map[int32]bool { + return _XdrTags_PublicKey +} +func (u *PublicKey) Ed25519() *Uint256 { + switch u.Type { + case PUBLIC_KEY_TYPE_ED25519: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("PublicKey.Ed25519 accessed when Type == %v", u.Type) + return nil + } +} +func (u PublicKey) XdrValid() bool { + switch u.Type { + case PUBLIC_KEY_TYPE_ED25519: + return true + } + return false +} +func (u *PublicKey) XdrUnionTag() XdrNum32 { + return XDR_PublicKeyType(&u.Type) +} +func (u *PublicKey) XdrUnionTagName() string { + return "Type" +} +func (u *PublicKey) XdrUnionBody() XdrType { + switch u.Type { + case PUBLIC_KEY_TYPE_ED25519: + return XDR_Uint256(u.Ed25519()) + } + return nil +} +func (u *PublicKey) XdrUnionBodyName() string { + switch u.Type { + case PUBLIC_KEY_TYPE_ED25519: + return "Ed25519" + } + return "" +} + +type XdrType_PublicKey = *PublicKey + +func (v *PublicKey) XdrPointer() interface{} { return v } +func (PublicKey) XdrTypeName() string { return "PublicKey" } +func (v PublicKey) XdrValue() interface{} { return v } +func (v *PublicKey) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *PublicKey) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_PublicKeyType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case PUBLIC_KEY_TYPE_ED25519: + x.Marshal(x.Sprintf("%sed25519", name), XDR_Uint256(u.Ed25519())) + return + } + XdrPanic("invalid Type (%v) in PublicKey", u.Type) +} +func XDR_PublicKey(v *PublicKey) *PublicKey { return v } + +var _XdrTags_SignerKey = map[int32]bool{ + XdrToI32(SIGNER_KEY_TYPE_ED25519): true, + XdrToI32(SIGNER_KEY_TYPE_PRE_AUTH_TX): true, + XdrToI32(SIGNER_KEY_TYPE_HASH_X): true, +} + +func (_ SignerKey) XdrValidTags() map[int32]bool { + return _XdrTags_SignerKey +} +func (u *SignerKey) Ed25519() *Uint256 { + switch u.Type { + case SIGNER_KEY_TYPE_ED25519: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("SignerKey.Ed25519 accessed when Type == %v", u.Type) + return nil + } +} + +/* SHA-256 Hash of TransactionSignaturePayload structure */ +func (u *SignerKey) PreAuthTx() *Uint256 { + switch u.Type { + case SIGNER_KEY_TYPE_PRE_AUTH_TX: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("SignerKey.PreAuthTx accessed when Type == %v", u.Type) + return nil + } +} + +/* Hash of random 256 bit preimage X */ +func (u *SignerKey) HashX() *Uint256 { + switch u.Type { + case SIGNER_KEY_TYPE_HASH_X: + if v, ok := u._u.(*Uint256); ok { + return v + } else { + var zero Uint256 + u._u = &zero + return &zero + } + default: + XdrPanic("SignerKey.HashX accessed when Type == %v", u.Type) + return nil + } +} +func (u SignerKey) XdrValid() bool { + switch u.Type { + case SIGNER_KEY_TYPE_ED25519, SIGNER_KEY_TYPE_PRE_AUTH_TX, SIGNER_KEY_TYPE_HASH_X: + return true + } + return false +} +func (u *SignerKey) XdrUnionTag() XdrNum32 { + return XDR_SignerKeyType(&u.Type) +} +func (u *SignerKey) XdrUnionTagName() string { + return "Type" +} +func (u *SignerKey) XdrUnionBody() XdrType { + switch u.Type { + case SIGNER_KEY_TYPE_ED25519: + return XDR_Uint256(u.Ed25519()) + case SIGNER_KEY_TYPE_PRE_AUTH_TX: + return XDR_Uint256(u.PreAuthTx()) + case SIGNER_KEY_TYPE_HASH_X: + return XDR_Uint256(u.HashX()) + } + return nil +} +func (u *SignerKey) XdrUnionBodyName() string { + switch u.Type { + case SIGNER_KEY_TYPE_ED25519: + return "Ed25519" + case SIGNER_KEY_TYPE_PRE_AUTH_TX: + return "PreAuthTx" + case SIGNER_KEY_TYPE_HASH_X: + return "HashX" + } + return "" +} + +type XdrType_SignerKey = *SignerKey + +func (v *SignerKey) XdrPointer() interface{} { return v } +func (SignerKey) XdrTypeName() string { return "SignerKey" } +func (v SignerKey) XdrValue() interface{} { return v } +func (v *SignerKey) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (u *SignerKey) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + XDR_SignerKeyType(&u.Type).XdrMarshal(x, x.Sprintf("%stype", name)) + switch u.Type { + case SIGNER_KEY_TYPE_ED25519: + x.Marshal(x.Sprintf("%sed25519", name), XDR_Uint256(u.Ed25519())) + return + case SIGNER_KEY_TYPE_PRE_AUTH_TX: + x.Marshal(x.Sprintf("%spreAuthTx", name), XDR_Uint256(u.PreAuthTx())) + return + case SIGNER_KEY_TYPE_HASH_X: + x.Marshal(x.Sprintf("%shashX", name), XDR_Uint256(u.HashX())) + return + } + XdrPanic("invalid Type (%v) in SignerKey", u.Type) +} +func XDR_SignerKey(v *SignerKey) *SignerKey { return v } + +type XdrType_Signature struct { + XdrVecOpaque +} + +func XDR_Signature(v *Signature) XdrType_Signature { + return XdrType_Signature{XdrVecOpaque{v, 64}} +} +func (XdrType_Signature) XdrTypeName() string { return "Signature" } +func (v XdrType_Signature) XdrUnwrap() XdrType { return v.XdrVecOpaque } + +type XdrType_SignatureHint struct { + *_XdrArray_4_opaque +} + +func XDR_SignatureHint(v *SignatureHint) XdrType_SignatureHint { + return XdrType_SignatureHint{(*_XdrArray_4_opaque)(v)} +} +func (XdrType_SignatureHint) XdrTypeName() string { return "SignatureHint" } +func (v XdrType_SignatureHint) XdrUnwrap() XdrType { return v._XdrArray_4_opaque } + +type XdrType_NodeID struct { + XdrType_PublicKey +} + +func XDR_NodeID(v *NodeID) XdrType_NodeID { + return XdrType_NodeID{XDR_PublicKey(v)} +} +func (XdrType_NodeID) XdrTypeName() string { return "NodeID" } +func (v XdrType_NodeID) XdrUnwrap() XdrType { return v.XdrType_PublicKey } + +type XdrType_Curve25519Secret = *Curve25519Secret + +func (v *Curve25519Secret) XdrPointer() interface{} { return v } +func (Curve25519Secret) XdrTypeName() string { return "Curve25519Secret" } +func (v Curve25519Secret) XdrValue() interface{} { return v } +func (v *Curve25519Secret) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Curve25519Secret) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%skey", name), (*_XdrArray_32_opaque)(&v.Key)) +} +func XDR_Curve25519Secret(v *Curve25519Secret) *Curve25519Secret { return v } + +type XdrType_Curve25519Public = *Curve25519Public + +func (v *Curve25519Public) XdrPointer() interface{} { return v } +func (Curve25519Public) XdrTypeName() string { return "Curve25519Public" } +func (v Curve25519Public) XdrValue() interface{} { return v } +func (v *Curve25519Public) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *Curve25519Public) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%skey", name), (*_XdrArray_32_opaque)(&v.Key)) +} +func XDR_Curve25519Public(v *Curve25519Public) *Curve25519Public { return v } + +type XdrType_HmacSha256Key = *HmacSha256Key + +func (v *HmacSha256Key) XdrPointer() interface{} { return v } +func (HmacSha256Key) XdrTypeName() string { return "HmacSha256Key" } +func (v HmacSha256Key) XdrValue() interface{} { return v } +func (v *HmacSha256Key) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *HmacSha256Key) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%skey", name), (*_XdrArray_32_opaque)(&v.Key)) +} +func XDR_HmacSha256Key(v *HmacSha256Key) *HmacSha256Key { return v } + +type XdrType_HmacSha256Mac = *HmacSha256Mac + +func (v *HmacSha256Mac) XdrPointer() interface{} { return v } +func (HmacSha256Mac) XdrTypeName() string { return "HmacSha256Mac" } +func (v HmacSha256Mac) XdrValue() interface{} { return v } +func (v *HmacSha256Mac) XdrMarshal(x XDR, name string) { x.Marshal(name, v) } +func (v *HmacSha256Mac) XdrRecurse(x XDR, name string) { + if name != "" { + name = x.Sprintf("%s.", name) + } + x.Marshal(x.Sprintf("%smac", name), (*_XdrArray_32_opaque)(&v.Mac)) +} +func XDR_HmacSha256Mac(v *HmacSha256Mac) *HmacSha256Mac { return v } diff --git a/handlers/federation/handler.go b/handlers/federation/handler.go index 89b702adf5..3a7bcfd11e 100644 --- a/handlers/federation/handler.go +++ b/handlers/federation/handler.go @@ -4,9 +4,11 @@ import ( "encoding/json" "fmt" "net/http" + "net/url" "github.com/pkg/errors" "github.com/stellar/go/address" + proto "github.com/stellar/go/protocols/federation" "github.com/stellar/go/support/log" ) @@ -14,7 +16,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { typ := r.URL.Query().Get("type") q := r.URL.Query().Get("q") - if q == "" { + if typ != "forward" && q == "" { h.writeJSON(w, ErrorResponse{ Code: "invalid_request", Message: "q parameter is blank", @@ -24,9 +26,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { switch typ { case "name": - h.lookupByName(w, q) + h.lookupByName(w, r, q) case "id": - h.lookupByID(w, q) + h.lookupByID(w, r, q) + case "forward": + h.lookupByForward(w, r.URL.Query()) case "txid": h.failNotImplemented(w, "txid type queries are not supported") default: @@ -35,7 +39,6 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Message: fmt.Sprintf("invalid type: '%s'", typ), }, http.StatusBadRequest) } - } func (h *Handler) failNotFound(w http.ResponseWriter) { @@ -52,7 +55,7 @@ func (h *Handler) failNotImplemented(w http.ResponseWriter, msg string) { }, http.StatusNotImplemented) } -func (h *Handler) lookupByID(w http.ResponseWriter, q string) { +func (h *Handler) lookupByID(w http.ResponseWriter, r *http.Request, q string) { rd, ok := h.Driver.(ReverseDriver) if !ok { @@ -62,7 +65,7 @@ func (h *Handler) lookupByID(w http.ResponseWriter, q string) { // TODO: validate that `q` is a strkey encoded address - rec, err := rd.LookupReverseRecord(q) + rec, err := rd.LookupReverseRecord(r.Context(), q) if err != nil { h.writeError(w, errors.Wrap(err, "lookup record")) return @@ -73,13 +76,12 @@ func (h *Handler) lookupByID(w http.ResponseWriter, q string) { return } - h.writeJSON(w, SuccessResponse{ - StellarAddress: address.New(rec.Name, rec.Domain), - AccountID: q, + h.writeJSON(w, proto.IDResponse{ + Address: address.New(rec.Name, rec.Domain), }, http.StatusOK) } -func (h *Handler) lookupByName(w http.ResponseWriter, q string) { +func (h *Handler) lookupByName(w http.ResponseWriter, r *http.Request, q string) { name, domain, err := address.Split(q) if err != nil { h.writeJSON(w, ErrorResponse{ @@ -89,9 +91,34 @@ func (h *Handler) lookupByName(w http.ResponseWriter, q string) { return } - rec, err := h.Driver.LookupRecord(name, domain) + rec, err := h.Driver.LookupRecord(r.Context(), name, domain) if err != nil { - h.writeError(w, errors.Wrap(err, "lookup record")) + h.writeError(w, errors.Wrap(err, "lookupByName")) + return + } + if rec == nil { + h.failNotFound(w) + return + } + + h.writeJSON(w, proto.NameResponse{ + AccountID: rec.AccountID, + Memo: proto.Memo{rec.Memo}, + MemoType: rec.MemoType, + }, http.StatusOK) +} + +func (h *Handler) lookupByForward(w http.ResponseWriter, query url.Values) { + fd, ok := h.Driver.(ForwardDriver) + + if !ok { + h.failNotImplemented(w, "forward type queries are not supported") + return + } + + rec, err := fd.LookupForwardingRecord(query) + if err != nil { + h.writeError(w, errors.Wrap(err, "lookupByForward")) return } if rec == nil { @@ -99,11 +126,10 @@ func (h *Handler) lookupByName(w http.ResponseWriter, q string) { return } - h.writeJSON(w, SuccessResponse{ - StellarAddress: q, - AccountID: rec.AccountID, - Memo: rec.Memo, - MemoType: rec.MemoType, + h.writeJSON(w, proto.NameResponse{ + AccountID: rec.AccountID, + Memo: proto.Memo{rec.Memo}, + MemoType: rec.MemoType, }, http.StatusOK) } @@ -129,6 +155,11 @@ func (h *Handler) writeJSON( } func (h *Handler) writeError(w http.ResponseWriter, err error) { - log.Error(err) - http.Error(w, "An internal error occurred", http.StatusInternalServerError) + switch err := errors.Cause(err).(type) { + case ErrorResponse: + h.writeJSON(w, err, err.StatusCode) + default: + log.Error(err) + http.Error(w, "An internal error occurred", http.StatusInternalServerError) + } } diff --git a/handlers/federation/handler_test.go b/handlers/federation/handler_test.go index 1168e1d008..377831f75b 100644 --- a/handlers/federation/handler_test.go +++ b/handlers/federation/handler_test.go @@ -1,7 +1,9 @@ package federation import ( + "context" "net/http" + "net/url" "testing" "github.com/stellar/go/support/db/dbtest" @@ -9,7 +11,7 @@ import ( ) func TestHandler(t *testing.T) { - db := dbtest.Postgres().Load(` + db := dbtest.Postgres(t).Load(` CREATE TABLE people (id character varying, name character varying, domain character varying); INSERT INTO people (id, name, domain) VALUES ('GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG', 'scott', 'stellar.org'), @@ -17,10 +19,12 @@ func TestHandler(t *testing.T) { `) defer db.Close() - driver := &SQLDriver{ - DB: db.Open().DB, - Dialect: db.Dialect, - LookupRecordQuery: "SELECT id FROM people WHERE name = ? AND domain = ?", + driver := &ReverseSQLDriver{ + SQLDriver: SQLDriver{ + DB: db.Open().DB, + Dialect: db.Dialect, + LookupRecordQuery: "SELECT id FROM people WHERE name = ? AND domain = ?", + }, LookupReverseRecordQuery: "SELECT name, domain FROM people WHERE id = ?", } @@ -30,7 +34,7 @@ func TestHandler(t *testing.T) { server := httptest.NewServer(t, handler) defer server.Close() - // Good forward request + // Good name request server.GET("/federation"). WithQuery("type", "name"). WithQuery("q", "scott*stellar.org"). @@ -134,3 +138,116 @@ func TestHandler(t *testing.T) { ValueEqual("code", "invalid_request") } + +func TestNameHandler(t *testing.T) { + db := dbtest.Postgres(t).Load(` + CREATE TABLE people (id character varying, name character varying, domain character varying); + INSERT INTO people (id, name, domain) VALUES + ('GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG', 'scott', 'stellar.org'), + ('GCYMGWPZ6NC2U7SO6SMXOP5ZLXOEC5SYPKITDMVEONLCHFSCCQR2J4S3', 'bartek', 'stellar.org'); + `) + defer db.Close() + + driver := &SQLDriver{ + DB: db.Open().DB, + Dialect: db.Dialect, + LookupRecordQuery: "SELECT id FROM people WHERE name = ? AND domain = ?", + } + + defer driver.DB.Close() + + handler := &Handler{driver} + server := httptest.NewServer(t, handler) + defer server.Close() + + // Good name request + server.GET("/federation"). + WithQuery("type", "name"). + WithQuery("q", "scott*stellar.org"). + Expect(). + Status(http.StatusOK). + JSON().Object(). + ContainsKey("account_id"). + ValueEqual("account_id", "GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG") + + // Reverse request + server.GET("/federation"). + WithQuery("type", "id"). + WithQuery("q", "GA3R753JKGXU6ETHNY3U6PYIY7D6UUCXXDYBRF4XURNAGXW3CVGQH2ZA"). + Expect(). + Status(http.StatusNotImplemented). + JSON().Object(). + ContainsKey("code"). + ValueEqual("code", "not_implemented") +} + +type ForwardTestDriver struct{} + +func (fd ForwardTestDriver) LookupForwardingRecord(query url.Values) (*Record, error) { + if query.Get("acct") == "1234" { + return &Record{ + AccountID: "GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG", + MemoType: "id", + Memo: "1", + }, nil + } else if query.Get("acct") == "4321" { + return &Record{ + AccountID: "GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG", + MemoType: "text", + Memo: "test", + }, nil + } else { + return nil, nil + } +} + +func (fd ForwardTestDriver) LookupRecord(ctx context.Context, name string, domain string) (*Record, error) { + return nil, nil +} + +func TestForwardHandler(t *testing.T) { + handler := &Handler{ForwardTestDriver{}} + server := httptest.NewServer(t, handler) + defer server.Close() + + // Good forward request + server.GET("/federation"). + WithQuery("type", "forward"). + WithQuery("forward_type", "bank_account"). + WithQuery("acct", "1234"). + Expect(). + Status(http.StatusOK). + JSON().Object(). + ContainsKey("account_id"). + ValueEqual("account_id", "GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG"). + ContainsKey("memo_type"). + ValueEqual("memo_type", "id"). + ContainsKey("memo"). + ValueEqual("memo", "1") + + // Good forward request + server.GET("/federation"). + WithQuery("type", "forward"). + WithQuery("forward_type", "bank_account"). + WithQuery("acct", "4321"). + Expect(). + Status(http.StatusOK). + JSON().Object(). + ContainsKey("account_id"). + ValueEqual("account_id", "GD2GJPL3UOK5LX7TWXOACK2ZPWPFSLBNKL3GTGH6BLBNISK4BGWMFBBG"). + ContainsKey("memo_type"). + ValueEqual("memo_type", "text"). + ContainsKey("memo"). + ValueEqual("memo", "test") + + // Not Found forward request + server.GET("/federation"). + WithQuery("type", "forward"). + WithQuery("forward_type", "bank_account"). + WithQuery("acct", "8888"). + Expect(). + Status(http.StatusNotFound). + JSON().Object(). + ContainsKey("code"). + ValueEqual("code", "not_found") +} diff --git a/handlers/federation/main.go b/handlers/federation/main.go index f11ce94106..00c192713a 100644 --- a/handlers/federation/main.go +++ b/handlers/federation/main.go @@ -11,7 +11,9 @@ package federation import ( + "context" "database/sql" + "net/url" "sync" "github.com/stellar/go/support/db" @@ -20,18 +22,24 @@ import ( // Driver represents a data source against which federation queries can be // executed. type Driver interface { - // LookupRecord is called when a handler receives a so-called "forward" + // LookupRecord is called when a handler receives a so-called "name" // federation request to lookup a `Record` using the provided stellar address. // An implementation should return a nil `*Record` value if the lookup // successfully executed but no result was found. - LookupRecord(name string, domain string) (*Record, error) + LookupRecord(ctx context.Context, name, domain string) (*Record, error) } // ErrorResponse represents the JSON response sent to a client when the request -// triggered an error. +// triggered an error. FederationDriver methods can return this as an error and +// it will be passed to end user. type ErrorResponse struct { - Code string `json:"code"` - Message string `json:"message"` + StatusCode int `json:"-"` + Code string `json:"code"` + Message string `json:"message"` +} + +func (response ErrorResponse) Error() string { + return response.Message } // Handler represents an http handler that can service http requests that @@ -58,7 +66,18 @@ type ReverseDriver interface { // federation request to lookup a `ReverseRecord` using the provided strkey // encoded accountID. An implementation should return a nil `*ReverseRecord` // value if the lookup successfully executed but no result was found. - LookupReverseRecord(accountID string) (*ReverseRecord, error) + LookupReverseRecord(ctx context.Context, accountID string) (*ReverseRecord, error) +} + +// ForwardDriver represents a data source against which forward queries can +// be executed. +type ForwardDriver interface { + // Forward is called when a handler receives a so-called "forward" + // federation request to lookup a `Record` using the provided data (ex. bank + // account number). + // An implementation should return a nil `*Record` value if the lookup + // successfully executed but no result was found. + LookupForwardingRecord(query url.Values) (*Record, error) } // ReverseRecord represents the result from performing a "Reverse federation" @@ -68,7 +87,20 @@ type ReverseRecord struct { Domain string `db:"domain"` } -// SQLDriver represents an implementation of `Driver` and `ReverseDriver` that +// ReverseSQLDriver provides a `ReverseDriver` implementation based upon a SQL +// Server. See `SQLDriver`, the forward only version, for more details. +type ReverseSQLDriver struct { + SQLDriver + + // LookupReverseRecordQuery is a SQL query used for performing "reverse" + // federation queries. This query should accomodate a single parameter, using + // "?" as the placeholder. This provided parameter will be a strkey-encoded + // stellar account id to lookup, such as + // "GDOP3VI4UA5LS7AMLJI66RJUXEQ4HX46WUXTRTJGI5IKDLNWUBOW3FUK". + LookupReverseRecordQuery string +} + +// SQLDriver represents an implementation of `Driver` that // provides a simple way to incorporate a SQL-backed federation handler into an // application. Note: this type is not designed for dynamic configuration // changes. Once a method is called on the struct the public fields of this @@ -87,22 +119,6 @@ type SQLDriver struct { // the placeholder. This provided parameters will be a name and domain LookupRecordQuery string - // LookupReverseRecordQuery is a SQL query used for performing "reverse" - // federation queries. This query should accomodate a single parameter, using - // "?" as the placeholder. This provided parameter will be a strkey-encoded - // stellar account id to lookup, such as - // "GDOP3VI4UA5LS7AMLJI66RJUXEQ4HX46WUXTRTJGI5IKDLNWUBOW3FUK". - LookupReverseRecordQuery string - init sync.Once - db *db.Repo -} - -// SuccessResponse represents the successful JSON response that will be -// delivered to a client. -type SuccessResponse struct { - StellarAddress string `json:"stellar_address"` - AccountID string `json:"account_id"` - MemoType string `json:"memo_type,omitempty"` - Memo string `json:"memo,omitempty"` + db *db.Session } diff --git a/handlers/federation/reverse_sql_driver.go b/handlers/federation/reverse_sql_driver.go new file mode 100644 index 0000000000..62059d513f --- /dev/null +++ b/handlers/federation/reverse_sql_driver.go @@ -0,0 +1,29 @@ +package federation + +import ( + "context" + + "github.com/stellar/go/support/errors" +) + +// LookupReverseRecord implements `ReverseDriver` by performing +// `drv.LookupReverseRecordQuery` against `drv.DB` using the provided parameter +func (drv *ReverseSQLDriver) LookupReverseRecord( + ctx context.Context, + accountid string, +) (*ReverseRecord, error) { + drv.initDB() + var result ReverseRecord + + err := drv.db.GetRaw(ctx, &result, drv.LookupReverseRecordQuery, accountid) + + if drv.db.NoRows(err) { + return nil, nil + } else if err != nil { + return nil, errors.Wrap(err, "db get") + } + + return &result, nil +} + +var _ ReverseDriver = &ReverseSQLDriver{} diff --git a/handlers/federation/sql_driver.go b/handlers/federation/sql_driver.go index bf13c5f042..bab79bc62b 100644 --- a/handlers/federation/sql_driver.go +++ b/handlers/federation/sql_driver.go @@ -1,34 +1,19 @@ package federation -import "github.com/stellar/go/support/db" -import "github.com/stellar/go/support/errors" +import ( + "context" + + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" +) // LookupRecord implements `Driver` by performing `drv.LookupRecordQuery` // against `drv.DB` using the provided parameters -func (drv *SQLDriver) LookupRecord(name, domain string) (*Record, error) { +func (drv *SQLDriver) LookupRecord(ctx context.Context, name, domain string) (*Record, error) { drv.initDB() var result Record - err := drv.db.GetRaw(&result, drv.LookupRecordQuery, name, domain) - - if drv.db.NoRows(err) { - return nil, nil - } else if err != nil { - return nil, errors.Wrap(err, "db get") - } - - return &result, nil -} - -// LookupReverseRecord implements `ReverseDriver` by performing -// `drv.LookupReverseRecordQuery` against `drv.DB` using the provided parameter -func (drv *SQLDriver) LookupReverseRecord( - accountid string, -) (*ReverseRecord, error) { - drv.initDB() - var result ReverseRecord - - err := drv.db.GetRaw(&result, drv.LookupReverseRecordQuery, accountid) + err := drv.db.GetRaw(ctx, &result, drv.LookupRecordQuery, name, domain) if drv.db.NoRows(err) { return nil, nil @@ -40,10 +25,13 @@ func (drv *SQLDriver) LookupReverseRecord( } var _ Driver = &SQLDriver{} -var _ ReverseDriver = &SQLDriver{} func (drv *SQLDriver) initDB() { drv.init.Do(func() { + if drv.Dialect == "" { + panic("no dialect specified") + } + drv.db = db.Wrap(drv.DB, drv.Dialect) }) } diff --git a/historyarchive/archive.go b/historyarchive/archive.go new file mode 100644 index 0000000000..9ddfb0f37a --- /dev/null +++ b/historyarchive/archive.go @@ -0,0 +1,435 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "path" + "regexp" + "strconv" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const hexPrefixPat = "/[0-9a-f]{2}/[0-9a-f]{2}/[0-9a-f]{2}/" +const rootHASPath = ".well-known/stellar-history.json" + +type CommandOptions struct { + Concurrency int + Range Range + DryRun bool + Force bool + Verify bool + Thorough bool +} + +type ConnectOptions struct { + Context context.Context + // NetworkPassphrase defines the expected network of history archive. It is + // checked when getting HAS. If network passphrase does not match, error is + // returned. + NetworkPassphrase string + S3Region string + S3Endpoint string + UnsignedRequests bool + // CheckpointFrequency is the number of ledgers between checkpoints + // if unset, DefaultCheckpointFrequency will be used + CheckpointFrequency uint32 +} + +type Ledger struct { + Header xdr.LedgerHeaderHistoryEntry + Transaction xdr.TransactionHistoryEntry + TransactionResult xdr.TransactionHistoryResultEntry +} + +type ArchiveBackend interface { + Exists(path string) (bool, error) + Size(path string) (int64, error) + GetFile(path string) (io.ReadCloser, error) + PutFile(path string, in io.ReadCloser) error + ListFiles(path string) (chan string, chan error) + CanListFiles() bool +} + +type ArchiveInterface interface { + GetPathHAS(path string) (HistoryArchiveState, error) + PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error + BucketExists(bucket Hash) (bool, error) + BucketSize(bucket Hash) (int64, error) + CategoryCheckpointExists(cat string, chk uint32) (bool, error) + GetLedgerHeader(chk uint32) (xdr.LedgerHeaderHistoryEntry, error) + GetRootHAS() (HistoryArchiveState, error) + GetLedgers(start, end uint32) (map[uint32]*Ledger, error) + GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) + PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error + PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error + ListBucket(dp DirPrefix) (chan string, chan error) + ListAllBuckets() (chan string, chan error) + ListAllBucketHashes() (chan Hash, chan error) + ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) + GetXdrStreamForHash(hash Hash) (*XdrStream, error) + GetXdrStream(pth string) (*XdrStream, error) + GetCheckpointManager() CheckpointManager +} + +var _ ArchiveInterface = &Archive{} + +type Archive struct { + networkPassphrase string + + mutex sync.Mutex + checkpointFiles map[string](map[uint32]bool) + allBuckets map[Hash]bool + referencedBuckets map[Hash]bool + + expectLedgerHashes map[uint32]Hash + actualLedgerHashes map[uint32]Hash + expectTxSetHashes map[uint32]Hash + actualTxSetHashes map[uint32]Hash + expectTxResultSetHashes map[uint32]Hash + actualTxResultSetHashes map[uint32]Hash + + invalidBuckets int + + invalidLedgers int + invalidTxSets int + invalidTxResultSets int + + checkpointManager CheckpointManager + + backend ArchiveBackend +} + +func (arch *Archive) GetCheckpointManager() CheckpointManager { + return arch.checkpointManager +} + +func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) { + var has HistoryArchiveState + rdr, err := a.backend.GetFile(path) + if err != nil { + return has, err + } + defer rdr.Close() + dec := json.NewDecoder(rdr) + err = dec.Decode(&has) + if err != nil { + return has, err + } + + // Compare network passphrase only when non empty. The field was added in + // Stellar-Core 14.1.0. + if has.NetworkPassphrase != "" && a.networkPassphrase != "" && + has.NetworkPassphrase != a.networkPassphrase { + return has, errors.Errorf( + "Network passphrase does not match! expected=%s actual=%s", + a.networkPassphrase, + has.NetworkPassphrase, + ) + } + + return has, nil +} + +func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error { + exists, err := a.backend.Exists(path) + if err != nil { + return err + } + if exists && !opts.Force { + log.Printf("skipping existing " + path) + return nil + } + buf, err := json.MarshalIndent(has, "", " ") + if err != nil { + return err + } + return a.backend.PutFile(path, + ioutil.NopCloser(bytes.NewReader(buf))) +} + +func (a *Archive) BucketExists(bucket Hash) (bool, error) { + return a.backend.Exists(BucketPath(bucket)) +} + +func (a *Archive) BucketSize(bucket Hash) (int64, error) { + return a.backend.Size(BucketPath(bucket)) +} + +func (a *Archive) CategoryCheckpointExists(cat string, chk uint32) (bool, error) { + return a.backend.Exists(CategoryCheckpointPath(cat, chk)) +} + +func (a *Archive) GetLedgerHeader(ledger uint32) (xdr.LedgerHeaderHistoryEntry, error) { + checkpoint := ledger + if !a.checkpointManager.IsCheckpoint(checkpoint) { + checkpoint = a.checkpointManager.NextCheckpoint(ledger) + } + path := CategoryCheckpointPath("ledger", checkpoint) + xdrStream, err := a.GetXdrStream(path) + if err != nil { + return xdr.LedgerHeaderHistoryEntry{}, errors.Wrap(err, "error opening ledger stream") + } + defer xdrStream.Close() + + for { + var ledgerHeader xdr.LedgerHeaderHistoryEntry + err = xdrStream.ReadOne(&ledgerHeader) + if err != nil { + if err == io.EOF { + break + } + return ledgerHeader, errors.Wrap(err, "error reading from ledger stream") + } + + if uint32(ledgerHeader.Header.LedgerSeq) == ledger { + return ledgerHeader, nil + } + } + + return xdr.LedgerHeaderHistoryEntry{}, errors.New("ledger header not found in checkpoint") +} + +func (a *Archive) GetRootHAS() (HistoryArchiveState, error) { + return a.GetPathHAS(rootHASPath) +} + +func (a *Archive) GetLedgers(start, end uint32) (map[uint32]*Ledger, error) { + if start > end { + return nil, errors.Errorf("range is invalid, start: %d end: %d", start, end) + } + startCheckpoint := a.GetCheckpointManager().GetCheckpoint(start) + endCheckpoint := a.GetCheckpointManager().GetCheckpoint(end) + cache := map[uint32]*Ledger{} + for cur := startCheckpoint; cur <= endCheckpoint; cur += a.GetCheckpointManager().GetCheckpointFrequency() { + for _, category := range []string{"ledger", "transactions", "results"} { + if exists, err := a.CategoryCheckpointExists(category, cur); err != nil { + return nil, errors.Wrap(err, "could not check if category checkpoint exists") + } else if !exists { + return nil, errors.Errorf("checkpoint %d is not published", cur) + } + + if err := a.fetchCategory(cache, category, cur); err != nil { + return nil, errors.Wrap(err, "could not fetch category checkpoint") + } + } + } + + return cache, nil +} + +func (a *Archive) fetchCategory(cache map[uint32]*Ledger, category string, checkpointSequence uint32) error { + checkpointPath := CategoryCheckpointPath(category, checkpointSequence) + xdrStream, err := a.GetXdrStream(checkpointPath) + if err != nil { + return errors.Wrapf(err, "error opening %s stream", category) + } + defer xdrStream.Close() + + for { + switch category { + case "ledger": + var object xdr.LedgerHeaderHistoryEntry + if err = xdrStream.ReadOne(&object); err == nil { + entry := cache[uint32(object.Header.LedgerSeq)] + if entry == nil { + entry = &Ledger{} + } + entry.Header = object + cache[uint32(object.Header.LedgerSeq)] = entry + } + case "transactions": + var object xdr.TransactionHistoryEntry + if err = xdrStream.ReadOne(&object); err == nil { + entry := cache[uint32(object.LedgerSeq)] + if entry == nil { + entry = &Ledger{} + } + entry.Transaction = object + cache[uint32(object.LedgerSeq)] = entry + } + case "results": + var object xdr.TransactionHistoryResultEntry + if err = xdrStream.ReadOne(&object); err == nil { + entry := cache[uint32(object.LedgerSeq)] + if entry == nil { + entry = &Ledger{} + } + entry.TransactionResult = object + cache[uint32(object.LedgerSeq)] = entry + } + default: + panic("unknown category") + } + + if err == io.EOF { + break + } else if err != nil { + return errors.Wrapf(err, "error reading from %s stream", category) + } + } + + return nil +} + +func (a *Archive) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) { + return a.GetPathHAS(CategoryCheckpointPath("history", chk)) +} + +func (a *Archive) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error { + return a.PutPathHAS(CategoryCheckpointPath("history", chk), has, opts) +} + +func (a *Archive) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error { + force := opts.Force + opts.Force = true + e := a.PutPathHAS(rootHASPath, has, opts) + opts.Force = force + return e +} + +func (a *Archive) ListBucket(dp DirPrefix) (chan string, chan error) { + return a.backend.ListFiles(path.Join("bucket", dp.Path())) +} + +func (a *Archive) ListAllBuckets() (chan string, chan error) { + return a.backend.ListFiles("bucket") +} + +func (a *Archive) ListAllBucketHashes() (chan Hash, chan error) { + sch, errs := a.backend.ListFiles("bucket") + ch := make(chan Hash) + rx := regexp.MustCompile("bucket" + hexPrefixPat + "bucket-([0-9a-f]{64})\\.xdr\\.gz$") + errs = makeErrorPump(errs) + go func() { + for s := range sch { + m := rx.FindStringSubmatch(s) + if m != nil { + ch <- MustDecodeHash(m[1]) + } + } + close(ch) + }() + return ch, errs +} + +func (a *Archive) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) { + ext := categoryExt(cat) + rx := regexp.MustCompile(cat + hexPrefixPat + cat + + "-([0-9a-f]{8})\\." + regexp.QuoteMeta(ext) + "$") + sch, errs := a.backend.ListFiles(path.Join(cat, pth)) + ch := make(chan uint32) + errs = makeErrorPump(errs) + + go func() { + for s := range sch { + m := rx.FindStringSubmatch(s) + if m != nil { + i, e := strconv.ParseUint(m[1], 16, 32) + if e == nil { + ch <- uint32(i) + } else { + errs <- errors.New("decoding checkpoint number in filename " + s) + } + } + } + close(ch) + }() + return ch, errs +} + +func (a *Archive) GetBucketPathForHash(hash Hash) string { + return fmt.Sprintf( + "bucket/%s/bucket-%s.xdr.gz", + HashPrefix(hash).Path(), + hash.String(), + ) +} + +func (a *Archive) GetXdrStreamForHash(hash Hash) (*XdrStream, error) { + return a.GetXdrStream(a.GetBucketPathForHash(hash)) +} + +func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) { + if !strings.HasSuffix(pth, ".xdr.gz") { + return nil, errors.New("File has non-.xdr.gz suffix: " + pth) + } + rdr, err := a.backend.GetFile(pth) + if err != nil { + return nil, err + } + return NewXdrGzStream(rdr) +} + +func Connect(u string, opts ConnectOptions) (*Archive, error) { + arch := Archive{ + networkPassphrase: opts.NetworkPassphrase, + checkpointFiles: make(map[string](map[uint32]bool)), + allBuckets: make(map[Hash]bool), + referencedBuckets: make(map[Hash]bool), + expectLedgerHashes: make(map[uint32]Hash), + actualLedgerHashes: make(map[uint32]Hash), + expectTxSetHashes: make(map[uint32]Hash), + actualTxSetHashes: make(map[uint32]Hash), + expectTxResultSetHashes: make(map[uint32]Hash), + actualTxResultSetHashes: make(map[uint32]Hash), + checkpointManager: NewCheckpointManager(opts.CheckpointFrequency), + } + for _, cat := range Categories() { + arch.checkpointFiles[cat] = make(map[uint32]bool) + } + + if u == "" { + return &arch, errors.New("URL is empty") + } + + parsed, err := url.Parse(u) + if err != nil { + return &arch, err + } + + if opts.Context == nil { + opts.Context = context.Background() + } + + pth := parsed.Path + if parsed.Scheme == "s3" { + // Inside s3, all paths start _without_ the leading / + if len(pth) > 0 && pth[0] == '/' { + pth = pth[1:] + } + arch.backend, err = makeS3Backend(parsed.Host, pth, opts) + } else if parsed.Scheme == "file" { + pth = path.Join(parsed.Host, pth) + arch.backend = makeFsBackend(pth, opts) + } else if parsed.Scheme == "http" || parsed.Scheme == "https" { + arch.backend = makeHttpBackend(parsed, opts) + } else if parsed.Scheme == "mock" { + arch.backend = makeMockBackend(opts) + } else { + err = errors.New("unknown URL scheme: '" + parsed.Scheme + "'") + } + return &arch, err +} + +func MustConnect(u string, opts ConnectOptions) *Archive { + arch, err := Connect(u, opts) + if err != nil { + log.Fatal(err) + } + return arch +} diff --git a/historyarchive/archive_pool.go b/historyarchive/archive_pool.go new file mode 100644 index 0000000000..590988e483 --- /dev/null +++ b/historyarchive/archive_pool.go @@ -0,0 +1,137 @@ +// Copyright 2021 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "math/rand" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// A ArchivePool is just a collection of `ArchiveInterface`s so that we can +// distribute requests fairly throughout the pool. +type ArchivePool []ArchiveInterface + +// NewArchivePool tries connecting to each of the provided history archive URLs, +// returning a pool of valid archives. +// +// If none of the archives work, this returns the error message of the last +// failed archive. Note that the errors for each individual archive are hard to +// track if there's success overall. +func NewArchivePool(archiveURLs []string, config ConnectOptions) (ArchivePool, error) { + if len(archiveURLs) <= 0 { + return nil, errors.New("No history archives provided") + } + + var lastErr error = nil + + // Try connecting to all of the listed archives, but only store valid ones. + var validArchives ArchivePool + for _, url := range archiveURLs { + archive, err := Connect( + url, + ConnectOptions{ + NetworkPassphrase: config.NetworkPassphrase, + CheckpointFrequency: config.CheckpointFrequency, + Context: config.Context, + }, + ) + + if err != nil { + lastErr = errors.Wrapf(err, "Error connecting to history archive (%s)", url) + continue + } + + validArchives = append(validArchives, archive) + } + + if len(validArchives) == 0 { + return nil, lastErr + } + + return validArchives, nil +} + +// Ensure the pool conforms to the ArchiveInterface +var _ ArchiveInterface = ArchivePool{} + +// Below are the ArchiveInterface method implementations. + +func (pa ArchivePool) GetAnyArchive() ArchiveInterface { + return pa[rand.Intn(len(pa))] +} + +func (pa ArchivePool) GetPathHAS(path string) (HistoryArchiveState, error) { + return pa.GetAnyArchive().GetPathHAS(path) +} + +func (pa ArchivePool) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error { + return pa.GetAnyArchive().PutPathHAS(path, has, opts) +} + +func (pa ArchivePool) BucketExists(bucket Hash) (bool, error) { + return pa.GetAnyArchive().BucketExists(bucket) +} + +func (pa ArchivePool) BucketSize(bucket Hash) (int64, error) { + return pa.GetAnyArchive().BucketSize(bucket) +} + +func (pa ArchivePool) CategoryCheckpointExists(cat string, chk uint32) (bool, error) { + return pa.GetAnyArchive().CategoryCheckpointExists(cat, chk) +} + +func (pa ArchivePool) GetLedgerHeader(chk uint32) (xdr.LedgerHeaderHistoryEntry, error) { + return pa.GetAnyArchive().GetLedgerHeader(chk) +} + +func (pa ArchivePool) GetRootHAS() (HistoryArchiveState, error) { + return pa.GetAnyArchive().GetRootHAS() +} + +func (pa ArchivePool) GetLedgers(start, end uint32) (map[uint32]*Ledger, error) { + return pa.GetAnyArchive().GetLedgers(start, end) +} + +func (pa ArchivePool) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) { + return pa.GetAnyArchive().GetCheckpointHAS(chk) +} + +func (pa ArchivePool) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error { + return pa.GetAnyArchive().PutCheckpointHAS(chk, has, opts) +} + +func (pa ArchivePool) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error { + return pa.GetAnyArchive().PutRootHAS(has, opts) +} + +func (pa ArchivePool) ListBucket(dp DirPrefix) (chan string, chan error) { + return pa.GetAnyArchive().ListBucket(dp) +} + +func (pa ArchivePool) ListAllBuckets() (chan string, chan error) { + return pa.GetAnyArchive().ListAllBuckets() +} + +func (pa ArchivePool) ListAllBucketHashes() (chan Hash, chan error) { + return pa.GetAnyArchive().ListAllBucketHashes() +} + +func (pa ArchivePool) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) { + return pa.GetAnyArchive().ListCategoryCheckpoints(cat, pth) +} + +func (pa ArchivePool) GetXdrStreamForHash(hash Hash) (*XdrStream, error) { + return pa.GetAnyArchive().GetXdrStreamForHash(hash) +} + +func (pa ArchivePool) GetXdrStream(pth string) (*XdrStream, error) { + return pa.GetAnyArchive().GetXdrStream(pth) +} + +func (pa ArchivePool) GetCheckpointManager() CheckpointManager { + return pa.GetAnyArchive().GetCheckpointManager() +} diff --git a/historyarchive/archive_test.go b/historyarchive/archive_test.go new file mode 100644 index 0000000000..ae712e6d73 --- /dev/null +++ b/historyarchive/archive_test.go @@ -0,0 +1,585 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "bytes" + "compress/gzip" + "crypto/rand" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "math/big" + "os" + "strings" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func GetTestS3Archive() *Archive { + mx := big.NewInt(0xffffffff) + r, e := rand.Int(rand.Reader, mx) + if e != nil { + panic(e) + } + bucket := fmt.Sprintf("s3://history-stg.stellar.org/dev/archivist/test-%s", r) + region := "eu-west-1" + if env_bucket := os.Getenv("ARCHIVIST_TEST_S3_BUCKET"); env_bucket != "" { + bucket = fmt.Sprintf(env_bucket+"/archivist/test-%s", r) + } + if env_region := os.Getenv("ARCHIVIST_TEST_S3_REGION"); env_region != "" { + region = env_region + } + return MustConnect(bucket, ConnectOptions{S3Region: region, CheckpointFrequency: 64}) +} + +func GetTestMockArchive() *Archive { + return MustConnect("mock://test", ConnectOptions{CheckpointFrequency: 64}) +} + +var tmpdirs []string + +func GetTestFileArchive() *Archive { + d, e := ioutil.TempDir("/tmp", "archivist") + if e != nil { + panic(e) + } + if tmpdirs == nil { + tmpdirs = []string{d} + } else { + tmpdirs = append(tmpdirs, d) + } + return MustConnect("file://"+d, ConnectOptions{CheckpointFrequency: 64}) +} + +func cleanup() { + for _, d := range tmpdirs { + os.RemoveAll(d) + } +} + +func GetTestArchive() *Archive { + ty := os.Getenv("ARCHIVIST_TEST_TYPE") + if ty == "file" { + return GetTestFileArchive() + } else if ty == "s3" { + return GetTestS3Archive() + } else { + return GetTestMockArchive() + } +} + +func (arch *Archive) AddRandomBucket() (Hash, error) { + var h Hash + buf := make([]byte, 1024) + _, e := rand.Read(buf) + if e != nil { + return h, e + } + h = sha256.Sum256(buf) + pth := BucketPath(h) + e = arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf))) + return h, e +} + +func (arch *Archive) AddRandomCheckpointFile(cat string, chk uint32) error { + buf := make([]byte, 1024) + _, e := rand.Read(buf) + if e != nil { + return e + } + pth := CategoryCheckpointPath(cat, chk) + return arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf))) +} + +func (arch *Archive) AddRandomCheckpoint(chk uint32) error { + opts := &CommandOptions{Force: true} + for _, cat := range Categories() { + if cat == "history" { + var has HistoryArchiveState + has.CurrentLedger = chk + for i := 0; i < NumLevels; i++ { + curr, e := arch.AddRandomBucket() + if e != nil { + return e + } + snap, e := arch.AddRandomBucket() + if e != nil { + return e + } + next, e := arch.AddRandomBucket() + if e != nil { + return e + } + has.CurrentBuckets[i].Curr = curr.String() + has.CurrentBuckets[i].Snap = snap.String() + has.CurrentBuckets[i].Next.Output = next.String() + } + arch.PutCheckpointHAS(chk, has, opts) + arch.PutRootHAS(has, opts) + } else { + arch.AddRandomCheckpointFile(cat, chk) + } + } + return nil +} + +func (arch *Archive) PopulateRandomRange(rng Range) error { + for chk := range rng.GenerateCheckpoints(arch.checkpointManager) { + if e := arch.AddRandomCheckpoint(chk); e != nil { + return e + } + } + return nil +} + +func (arch *Archive) PopulateRandomRangeWithGap(rng Range, gap uint32) error { + for chk := range rng.GenerateCheckpoints(arch.checkpointManager) { + if chk == gap { + continue + } + if e := arch.AddRandomCheckpoint(chk); e != nil { + return e + } + } + return nil +} + +func testRange() Range { + return Range{Low: 63, High: 0x3bf} +} + +func testOptions() *CommandOptions { + return &CommandOptions{Range: testRange(), Concurrency: 16} +} + +func GetRandomPopulatedArchive() *Archive { + a := GetTestArchive() + a.PopulateRandomRange(testRange()) + return a +} + +func GetRandomPopulatedArchiveWithGapAt(gap uint32) *Archive { + a := GetTestArchive() + a.PopulateRandomRangeWithGap(testRange(), gap) + return a +} + +func TestScan(t *testing.T) { + defer cleanup() + opts := testOptions() + GetRandomPopulatedArchive().Scan(opts) +} + +func TestScanSize(t *testing.T) { + defer cleanup() + opts := testOptions() + arch := GetRandomPopulatedArchive() + arch.Scan(opts) + assert.Equal(t, opts.Range.SizeInCheckPoints(arch.checkpointManager), + len(arch.checkpointFiles["history"])) +} + +func TestScanSizeSubrange(t *testing.T) { + defer cleanup() + opts := testOptions() + arch := GetRandomPopulatedArchive() + opts.Range.Low = arch.checkpointManager.NextCheckpoint(opts.Range.Low) + opts.Range.High = arch.checkpointManager.PrevCheckpoint(opts.Range.High) + arch.Scan(opts) + assert.Equal(t, opts.Range.SizeInCheckPoints(arch.checkpointManager), + len(arch.checkpointFiles["history"])) +} + +func TestScanSizeSubrangeFewBuckets(t *testing.T) { + defer cleanup() + opts := testOptions() + arch := GetRandomPopulatedArchive() + opts.Range.Low = 0x1ff + opts.Range.High = 0x1ff + arch.Scan(opts) + // We should only scan one checkpoint worth of buckets. + assert.Less(t, len(arch.allBuckets), 40) +} + +func TestScanSizeSubrangeAllBuckets(t *testing.T) { + defer cleanup() + opts := testOptions() + arch := GetRandomPopulatedArchive() + arch.Scan(opts) + // We should scan all checkpoints worth of buckets. + assert.Less(t, 300, len(arch.allBuckets)) +} + +func countMissing(arch *Archive, opts *CommandOptions) int { + n := 0 + arch.Scan(opts) + for _, missing := range arch.CheckCheckpointFilesMissing(opts) { + n += len(missing) + } + n += len(arch.CheckBucketsMissing()) + return n +} + +func TestScanSlowMissing(t *testing.T) { + defer cleanup() + opts := testOptions() + arch := GetRandomPopulatedArchiveWithGapAt(0x1bf) + arch.ScanCheckpointsSlow(opts) + n := 0 + for _, missing := range arch.CheckCheckpointFilesMissing(opts) { + n += len(missing) + } + assert.Equal(t, 5, n) +} + +func TestMirror(t *testing.T) { + defer cleanup() + opts := testOptions() + src := GetRandomPopulatedArchive() + dst := GetTestArchive() + Mirror(src, dst, opts) + assert.Equal(t, 0, countMissing(dst, opts)) +} + +func copyFile(category string, checkpoint uint32, src *Archive, dst *Archive) { + pth := CategoryCheckpointPath(category, checkpoint) + rdr, err := src.backend.GetFile(pth) + if err != nil { + panic(err) + } + if err = dst.backend.PutFile(pth, rdr); err != nil { + panic(err) + } +} + +func TestMirrorThenRepair(t *testing.T) { + defer cleanup() + opts := testOptions() + src := GetRandomPopulatedArchive() + dst := GetTestArchive() + Mirror(src, dst, opts) + assert.Equal(t, 0, countMissing(dst, opts)) + bad := opts.Range.Low + uint32(opts.Range.SizeInCheckPoints(src.checkpointManager)/2) + src.AddRandomCheckpoint(bad) + copyFile("history", bad, src, dst) + assert.NotEqual(t, 0, countMissing(dst, opts)) + Repair(src, dst, opts) + assert.Equal(t, 0, countMissing(dst, opts)) +} + +func (a *Archive) MustGetRootHAS() HistoryArchiveState { + has, e := a.GetRootHAS() + if e != nil { + panic("failed to get root HAS") + } + return has +} + +func TestMirrorSubsetDoPointerUpdate(t *testing.T) { + defer cleanup() + opts := testOptions() + src := GetRandomPopulatedArchive() + dst := GetTestArchive() + Mirror(src, dst, opts) + oldHigh := opts.Range.High + assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger) + opts.Range.High = src.checkpointManager.NextCheckpoint(oldHigh) + src.AddRandomCheckpoint(opts.Range.High) + Mirror(src, dst, opts) + assert.Equal(t, opts.Range.High, dst.MustGetRootHAS().CurrentLedger) +} + +func TestMirrorSubsetNoPointerUpdate(t *testing.T) { + defer cleanup() + opts := testOptions() + src := GetRandomPopulatedArchive() + dst := GetTestArchive() + Mirror(src, dst, opts) + oldHigh := opts.Range.High + assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger) + src.AddRandomCheckpoint(src.checkpointManager.NextCheckpoint(oldHigh)) + opts.Range.Low = 0x7f + opts.Range.High = 0xff + Mirror(src, dst, opts) + assert.Equal(t, oldHigh, dst.MustGetRootHAS().CurrentLedger) +} + +func TestDryRunNoRepair(t *testing.T) { + defer cleanup() + opts := testOptions() + src := GetRandomPopulatedArchive() + dst := GetTestArchive() + Mirror(src, dst, opts) + assert.Equal(t, 0, countMissing(dst, opts)) + bad := opts.Range.Low + uint32(opts.Range.SizeInCheckPoints(src.checkpointManager)/2) + src.AddRandomCheckpoint(bad) + copyFile("history", bad, src, dst) + assert.NotEqual(t, 0, countMissing(dst, opts)) + opts.DryRun = true + Repair(src, dst, opts) + assert.NotEqual(t, 0, countMissing(dst, opts)) +} + +func TestNetworkPassphrase(t *testing.T) { + makeHASReader := func() io.ReadCloser { + return ioutil.NopCloser(strings.NewReader(` +{ + "version": 1, + "server": "v14.1.0rc2", + "currentLedger": 31883135, + "networkPassphrase": "Public Global Stellar Network ; September 2015" +}`)) + } + + makeHASReaderNoNetwork := func() io.ReadCloser { + return ioutil.NopCloser(strings.NewReader(` +{ + "version": 1, + "server": "v14.1.0rc2", + "currentLedger": 31883135 +}`)) + } + + // No network passphrase set in options + archive := MustConnect("mock://test", ConnectOptions{CheckpointFrequency: 64}) + err := archive.backend.PutFile("has.json", makeHASReader()) + assert.NoError(t, err) + _, err = archive.GetPathHAS("has.json") + assert.NoError(t, err) + + // No network passphrase set in HAS + archive = MustConnect("mock://test", ConnectOptions{ + NetworkPassphrase: "Public Global Stellar Network ; September 2015", + CheckpointFrequency: 64, + }) + err = archive.backend.PutFile("has.json", makeHASReaderNoNetwork()) + assert.NoError(t, err) + _, err = archive.GetPathHAS("has.json") + assert.NoError(t, err) + + // Correct network passphrase set in options + archive = MustConnect("mock://test", ConnectOptions{ + NetworkPassphrase: "Public Global Stellar Network ; September 2015", + CheckpointFrequency: 64, + }) + err = archive.backend.PutFile("has.json", makeHASReader()) + assert.NoError(t, err) + _, err = archive.GetPathHAS("has.json") + assert.NoError(t, err) + + // Incorrect network passphrase set in options + archive = MustConnect("mock://test", ConnectOptions{ + NetworkPassphrase: "Test SDF Network ; September 2015", + CheckpointFrequency: 64, + }) + err = archive.backend.PutFile("has.json", makeHASReader()) + assert.NoError(t, err) + _, err = archive.GetPathHAS("has.json") + assert.EqualError(t, err, "Network passphrase does not match! expected=Test SDF Network ; September 2015 actual=Public Global Stellar Network ; September 2015") +} + +func TestXdrDecode(t *testing.T) { + + xdrbytes := []byte{ + + 0, 0, 0, 0, // entry type 0, liveentry + + 0, 32, 223, 100, // lastmodified 2154340 + + 0, 0, 0, 0, // entry type 0, account + + 0, 0, 0, 0, // key type 0 + 23, 140, 68, 253, // ed25519 key (32 bytes) + 184, 162, 186, 195, + 118, 239, 158, 210, + 100, 241, 174, 254, + 108, 110, 165, 140, + 75, 76, 83, 141, + 104, 212, 227, 80, + 1, 214, 157, 7, + + 0, 0, 0, 29, // 64bit balance: 125339976000 + 46, 216, 65, 64, + + 0, 0, 129, 170, // 64bit seqnum: 142567144423475 + 0, 0, 0, 51, + + 0, 0, 0, 1, // numsubentries: 1 + + 0, 0, 0, 1, // inflationdest type, populated + + 0, 0, 0, 0, // key type 0 + 87, 240, 19, 71, // ed25519 key (32 bytes) + 52, 91, 9, 62, + 213, 239, 178, 85, + 161, 119, 108, 251, + 168, 90, 76, 116, + 12, 48, 134, 248, + 115, 255, 117, 50, + 19, 18, 170, 203, + + 0, 0, 0, 0, // flags + + 0, 0, 0, 19, // homedomain: 19 bytes + 1 null padding + 99, 101, 110, 116, // "centaurus.xcoins.de" + 97, 117, 114, 117, + 115, 46, 120, 99, + 111, 105, 110, 115, + 46, 100, 101, 0, + + 1, 0, 0, 0, // thresholds + 0, 0, 0, 0, // signers (null) + + 0, 0, 0, 0, // entry.account.ext.v: 0 + + 0, 0, 0, 0, // entry.ext.v: 0 + } + + assert.Equal(t, len(xdrbytes), 152) + + var tmp xdr.BucketEntry + n, err := xdr.Unmarshal(bytes.NewReader(xdrbytes[:]), &tmp) + fmt.Printf("Decoded %d bytes\n", n) + if err != nil { + panic(err) + } + assert.Equal(t, len(xdrbytes), n) + + var out bytes.Buffer + n, err = xdr.Marshal(&out, &tmp) + fmt.Printf("Encoded %d bytes\n", n) + if err != nil { + panic(err) + } + + assert.Equal(t, out.Len(), n) + assert.Equal(t, out.Bytes(), xdrbytes) +} + +type xdrEntry interface { + MarshalBinary() ([]byte, error) +} + +func writeCategoryFile(t *testing.T, backend ArchiveBackend, path string, entries []xdrEntry) { + file := &bytes.Buffer{} + writer := gzip.NewWriter(file) + + for _, entry := range entries { + assert.NoError(t, xdr.MarshalFramed(writer, entry)) + } + assert.NoError(t, writer.Close()) + + assert.NoError(t, backend.PutFile(path, ioutil.NopCloser(file))) +} + +func assertXdrEquals(t *testing.T, a, b xdrEntry) { + b64, err := xdr.MarshalBase64(a) + assert.NoError(t, err) + other, err := xdr.MarshalBase64(b) + assert.NoError(t, err) + assert.Equal(t, b64, other) +} + +func TestGetLedgers(t *testing.T) { + archive := GetTestMockArchive() + _, err := archive.GetLedgers(1000, 1002) + assert.EqualError(t, err, "checkpoint 1023 is not published") + + ledgerHeaders := []xdr.LedgerHeaderHistoryEntry{ + { + Hash: xdr.Hash{1}, + Header: xdr.LedgerHeader{ + LedgerSeq: 1000, + }, + }, + { + Hash: xdr.Hash{2}, + Header: xdr.LedgerHeader{ + LedgerSeq: 1001, + }, + }, + { + Hash: xdr.Hash{3}, + Header: xdr.LedgerHeader{ + LedgerSeq: 1002, + }, + }, + } + writeCategoryFile( + t, archive.backend, "ledger/00/00/03/ledger-000003ff.xdr.gz", + []xdrEntry{ledgerHeaders[0], ledgerHeaders[1], ledgerHeaders[2]}, + ) + + transactions := []xdr.TransactionHistoryEntry{ + { + LedgerSeq: 1000, + TxSet: xdr.TransactionSet{ + PreviousLedgerHash: xdr.Hash{10}, + }, + }, + { + LedgerSeq: 1001, + TxSet: xdr.TransactionSet{ + PreviousLedgerHash: xdr.Hash{11}, + }, + }, + { + LedgerSeq: 1002, + TxSet: xdr.TransactionSet{ + PreviousLedgerHash: xdr.Hash{12}, + }, + }, + } + writeCategoryFile( + t, archive.backend, "transactions/00/00/03/transactions-000003ff.xdr.gz", + []xdrEntry{transactions[0], transactions[1], transactions[2]}, + ) + + result := xdr.TransactionResult{Result: xdr.TransactionResultResult{Code: xdr.TransactionResultCodeTxBadSeq}} + results := []xdr.TransactionHistoryResultEntry{ + { + LedgerSeq: 1000, + TxResultSet: xdr.TransactionResultSet{ + Results: []xdr.TransactionResultPair{ + {TransactionHash: xdr.Hash{213}, Result: result}, + }, + }, + }, + { + LedgerSeq: 1001, + TxResultSet: xdr.TransactionResultSet{ + Results: []xdr.TransactionResultPair{ + {TransactionHash: xdr.Hash{198}, Result: result}, + }, + }, + }, + { + LedgerSeq: 1002, + TxResultSet: xdr.TransactionResultSet{ + Results: []xdr.TransactionResultPair{ + {TransactionHash: xdr.Hash{131}, Result: result}, + }, + }, + }, + } + writeCategoryFile( + t, archive.backend, "results/00/00/03/results-000003ff.xdr.gz", + []xdrEntry{results[0], results[1], results[2]}, + ) + + ledgers, err := archive.GetLedgers(1000, 1002) + assert.NoError(t, err) + assert.Len(t, ledgers, 3) + for i, seq := range []uint32{1000, 1001, 1002} { + ledger := ledgers[seq] + assertXdrEquals(t, ledgerHeaders[i], ledger.Header) + assertXdrEquals(t, transactions[i], ledger.Transaction) + assertXdrEquals(t, results[i], ledger.TransactionResult) + } +} diff --git a/tools/stellar-archivist/internal/dirprefix.go b/historyarchive/dirprefix.go similarity index 86% rename from tools/stellar-archivist/internal/dirprefix.go rename to historyarchive/dirprefix.go index 3e34f01d64..6030b0ffed 100644 --- a/tools/stellar-archivist/internal/dirprefix.go +++ b/historyarchive/dirprefix.go @@ -2,7 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "fmt" @@ -12,10 +12,10 @@ import ( type DirPrefix [3]uint8 func (d DirPrefix) Path() string { - return d.PathPrefix(len(d)) + return d.pathPrefix(len(d)) } -func (d DirPrefix) PathPrefix(n int) string { +func (d DirPrefix) pathPrefix(n int) string { tmp := []string{} for i, b := range d { if i > n { @@ -35,7 +35,7 @@ func CheckpointPrefix(seq uint32) DirPrefix { } func HashPrefix(h Hash) DirPrefix { - return DirPrefix{ h[0], h[1], h[2] } + return DirPrefix{h[0], h[1], h[2]} } // Returns an array of path prefixes to walk to enumerate all the @@ -48,7 +48,7 @@ func RangePaths(r Range) []string { for i, e := range lowpre { diff = i if highpre[i] != e { - break; + break } } // log.Printf("prefix %s and %s differ at point %d", @@ -56,7 +56,7 @@ func RangePaths(r Range) []string { tmp := lowpre for i := int(lowpre[diff]); i <= int(highpre[diff]); i++ { tmp[diff] = uint8(i) - res = append(res, tmp.PathPrefix(diff)) + res = append(res, tmp.pathPrefix(diff)) } return res } diff --git a/tools/stellar-archivist/internal/dirprefix_test.go b/historyarchive/dirprefix_test.go similarity index 72% rename from tools/stellar-archivist/internal/dirprefix_test.go rename to historyarchive/dirprefix_test.go index e35690ad82..5e4d9d6937 100644 --- a/tools/stellar-archivist/internal/dirprefix_test.go +++ b/historyarchive/dirprefix_test.go @@ -2,10 +2,11 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "testing" + "github.com/stretchr/testify/assert" ) @@ -18,16 +19,16 @@ func TestDirPrefixPath(t *testing.T) { } func TestRangePaths(t *testing.T) { - r := Range{ Low:0x0010001f, High:0x0014001b, } - assert.Equal(t, RangePaths(r), []string{ + r := Range{Low: 0x0010001f, High: 0x0014001b} + assert.Equal(t, []string{ "00/10", "00/11", "00/12", "00/13", "00/14", - }) - r = Range{ Low:0x00100000, High:0x0010ff00, } + }, RangePaths(r)) + r = Range{Low: 0x00100000, High: 0x0010ff00} rps := RangePaths(r) - assert.Equal(t, rps[0], "00/10/00") - assert.Equal(t, rps[255], "00/10/ff") + assert.Equal(t, "00/10/00", rps[0]) + assert.Equal(t, "00/10/ff", rps[255]) } diff --git a/historyarchive/fs_archive.go b/historyarchive/fs_archive.go new file mode 100644 index 0000000000..3a241076b8 --- /dev/null +++ b/historyarchive/fs_archive.go @@ -0,0 +1,128 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "io" + "os" + "path" + "path/filepath" + + log "github.com/sirupsen/logrus" +) + +type FsArchiveBackend struct { + prefix string +} + +func (b *FsArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { + return os.Open(path.Join(b.prefix, pth)) +} + +func (b *FsArchiveBackend) Exists(pth string) (bool, error) { + pth = path.Join(b.prefix, pth) + log.WithField("path", pth).Trace("fs: check exists") + _, err := os.Stat(pth) + if err != nil { + if os.IsNotExist(err) { + log.WithField("path", pth).WithField("exists", false).Trace("fs: check exists") + return false, nil + } else { + log.WithField("path", pth).WithError(err).Error("fs: check exists") + return false, err + } + } + log.WithField("path", pth).WithField("exists", true).Trace("fs: check exists") + return true, nil +} + +func (b *FsArchiveBackend) Size(pth string) (int64, error) { + pth = path.Join(b.prefix, pth) + log.WithField("path", pth).Trace("fs: get size") + fi, err := os.Stat(pth) + if err != nil { + if os.IsNotExist(err) { + log.WithField("path", pth).WithError(err).Warn("fs: get size") + return 0, nil + } else { + log.WithField("path", pth).WithError(err).Error("fs: get size") + return 0, err + } + } + log.WithField("path", pth).WithField("size", fi.Size()).Trace("fs: got size") + return fi.Size(), nil +} + +func (b *FsArchiveBackend) PutFile(pth string, in io.ReadCloser) error { + dir := path.Join(b.prefix, path.Dir(pth)) + log.WithField("path", pth).Trace("fs: put file") + exists, err := b.Exists(dir) + if err != nil { + log.WithField("path", pth).WithError(err).Error("fs: put file (check exists)") + return err + } + + if !exists { + if e := os.MkdirAll(dir, 0755); e != nil { + log.WithField("path", pth).WithError(err).Error("fs: put file (mkdir)") + return e + } + } + + pth = path.Join(b.prefix, pth) + out, e := os.Create(pth) + if e != nil { + log.WithField("path", pth).WithError(err).Error("fs: put file (create)") + return e + } + defer in.Close() + defer out.Close() + _, e = io.Copy(out, in) + if e != nil { + log.WithField("path", pth).WithError(err).Error("fs: put file (copy)") + } + return e +} + +func (b *FsArchiveBackend) ListFiles(pth string) (chan string, chan error) { + ch := make(chan string) + errs := make(chan error) + go func() { + log.WithField("path", pth).Trace("fs: list files") + exists, err := b.Exists(pth) + if err != nil { + errs <- err + return + } + if exists { + filepath.Walk(path.Join(b.prefix, pth), + func(p string, info os.FileInfo, err error) error { + if err != nil { + log.WithField("path", pth).WithError(err).Error("fs: list files (walk)") + errs <- err + return nil + } + if info != nil && !info.IsDir() { + log.WithField("found", p).Trace("fs: list files (walk)") + ch <- p + } + return nil + }) + } + close(ch) + close(errs) + }() + return ch, errs +} + +func (b *FsArchiveBackend) CanListFiles() bool { + return true +} + +func makeFsBackend(pth string, opts ConnectOptions) ArchiveBackend { + return &FsArchiveBackend{ + prefix: pth, + } +} diff --git a/tools/stellar-archivist/internal/hash.go b/historyarchive/hash.go similarity index 86% rename from tools/stellar-archivist/internal/hash.go rename to historyarchive/hash.go index c3462ea8c2..ef4727ab1b 100644 --- a/tools/stellar-archivist/internal/hash.go +++ b/historyarchive/hash.go @@ -2,12 +2,11 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "crypto/sha256" "encoding/hex" - "errors" "fmt" ) @@ -20,11 +19,11 @@ func DecodeHash(s string) (Hash, error) { return h, err } if len(hs) != sha256.Size { - return h, errors.New(fmt.Sprintf("unexpected hash size: %d", len(hs))) + return h, fmt.Errorf("unexpected hash size: %d", len(hs)) } n := copy(h[:], hs) if n != sha256.Size { - return h, errors.New(fmt.Sprintf("copy() returned unexpected count: %d", n)) + return h, fmt.Errorf("copy() returned unexpected count: %d", n) } return h, nil } @@ -41,7 +40,6 @@ func MustDecodeHash(s string) Hash { return h } - func EmptyXdrArrayHash() Hash { // This is the hash of [0,0,0,0], the value wire-entry you get when you write an // XDR variable-length array with zero entries. This appears as a hash in a diff --git a/tools/stellar-archivist/internal/hash_test.go b/historyarchive/hash_test.go similarity index 95% rename from tools/stellar-archivist/internal/hash_test.go rename to historyarchive/hash_test.go index eb0eeb8f95..10e84b49a0 100644 --- a/tools/stellar-archivist/internal/hash_test.go +++ b/historyarchive/hash_test.go @@ -2,10 +2,11 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "testing" + "github.com/stretchr/testify/assert" ) diff --git a/historyarchive/history_archive_state.go b/historyarchive/history_archive_state.go new file mode 100644 index 0000000000..d106803660 --- /dev/null +++ b/historyarchive/history_archive_state.go @@ -0,0 +1,116 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const NumLevels = 11 + +type HistoryArchiveState struct { + Version int `json:"version"` + Server string `json:"server"` + CurrentLedger uint32 `json:"currentLedger"` + // NetworkPassphrase was added in Stellar-Core v14.1.0. Can be missing + // in HAS created by previous versions. + NetworkPassphrase string `json:"networkPassphrase"` + CurrentBuckets [NumLevels]struct { + Curr string `json:"curr"` + Snap string `json:"snap"` + Next struct { + State uint32 `json:"state"` + Output string `json:"output,omitempty"` + } `json:"next"` + } `json:"currentBuckets"` +} + +func (h *HistoryArchiveState) LevelSummary() (string, int, error) { + summ := "" + nz := 0 + for _, b := range h.CurrentBuckets { + state := '_' + for _, bs := range []string{ + b.Curr, b.Snap, b.Next.Output, + } { + // Ignore empty values + if bs == "" { + continue + } + + h, err := DecodeHash(bs) + if err != nil { + return summ, nz, err + } + + if !h.IsZero() { + state = '#' + } + } + if state != '_' { + nz += 1 + } + summ += string(state) + } + return summ, nz, nil +} + +func (h *HistoryArchiveState) Buckets() ([]Hash, error) { + r := []Hash{} + for _, b := range h.CurrentBuckets { + for _, bs := range []string{ + b.Curr, b.Snap, b.Next.Output, + } { + // Ignore empty values + if bs == "" { + continue + } + + h, err := DecodeHash(bs) + if err != nil { + return r, err + } + if !h.IsZero() { + r = append(r, h) + } + } + } + return r, nil +} + +// BucketListHash calculates the hash of bucket list in the HistoryArchiveState. +// This can be later compared with LedgerHeader.BucketListHash of the checkpoint +// ledger to ensure data in history archive has not been changed by a malicious +// actor. +// Warning: Ledger header should be fetched from a trusted (!) stellar-core +// instead of ex. history archives! +func (h *HistoryArchiveState) BucketListHash() (xdr.Hash, error) { + total := []byte{} + + for i, b := range h.CurrentBuckets { + curr, err := hex.DecodeString(b.Curr) + if err != nil { + return xdr.Hash{}, errors.Wrap(err, fmt.Sprintf("Error decoding hex of %d.curr", i)) + } + snap, err := hex.DecodeString(b.Snap) + if err != nil { + return xdr.Hash{}, errors.Wrap(err, fmt.Sprintf("Error decoding hex of %d.snap", i)) + } + both := append(curr, snap...) + bothHash := sha256.Sum256(both) + total = append(total, bothHash[:]...) + } + + return sha256.Sum256(total), nil +} + +func (h *HistoryArchiveState) Range() Range { + return Range{Low: 63, High: h.CurrentLedger} +} diff --git a/historyarchive/history_archive_state_test.go b/historyarchive/history_archive_state_test.go new file mode 100644 index 0000000000..c797cfc3cb --- /dev/null +++ b/historyarchive/history_archive_state_test.go @@ -0,0 +1,158 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "encoding/hex" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUnmarshalState(t *testing.T) { + var jsonBlob = []byte(`{ + "version": 1, + "server": "v0.4.0-34-g2f015f6", + "currentLedger": 2113919, + "currentBuckets": [ + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "0000000000000000000000000000000000000000000000000000000000000000" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }`) + + var state HistoryArchiveState + err := json.Unmarshal(jsonBlob, &state) + if err != nil { + t.Error(err) + } else if state.CurrentLedger != 2113919 { + t.Error(state) + } +} + +func TestHashValidation(t *testing.T) { + // This is real bucket hash list for pubnet's ledger: 24088895 + // https://horizon.stellar.org/ledgers/24088895 + // http://history.stellar.org/prd/core-live/core_live_001/history/01/6f/91/history-016f913f.json + var jsonBlob = []byte(`{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 24088895, + "currentBuckets": [ + { + "curr": "2a4416e7f3e301c2fc1078dce0e1dd109b8ae6d3958942b91b447f24014a7b5c", + "next": { + "state": 0 + }, + "snap": "7ff95a98838dfd39a36858f15c8d503641560f02a52aa15335559e1183ce2ca1" + }, + { + "curr": "2c7e74c4c5555e41b39a5fc04e77e77852c35e7769e32b486e07a072b9b3177c", + "next": { + "state": 1, + "output": "7ff95a98838dfd39a36858f15c8d503641560f02a52aa15335559e1183ce2ca1" + }, + "snap": "5f0bc7d0bd9e8ed6530fc270339b7dd2fbcedf0d80235f5ef64daa90b84259f4" + }, + { + "curr": "068f2a1ece2817c98c0d21d5ac20817637c331df6793d0ff3e874e29da5d65b1", + "next": { + "state": 1, + "output": "e93d50365d74d8a8dc2ff7631dfb506b7e6b2245f7f46556d407e82f197a6c59" + }, + "snap": "875cbdf9ab03c488c075a36ee3ee1e02aef9d5fe9d253a2b1f99b92fe64598b8" + }, + { + "curr": "f413ff9d27e2cad12754ff84ca905f8c309ca7b68a6fbe8e9b01ecd18f5d3759", + "next": { + "state": 1, + "output": "ffbb6cd3a4170dbf547ab0783fea90c1a57a28e562db7bcd3a079374f1e63464" + }, + "snap": "5d198cdc5a2139d92fe74f6541a098c27aba61e8aee543a6a551814aae9adb5a" + }, + { + "curr": "1c6f9ec76b06aac2aac77e9a0da2224d120dc25c1cf10211ce33475db4d66f13", + "next": { + "state": 1, + "output": "6473d4a3ff5b6448fc6dfd279ef33bf0b1524d8361b758dbde49fc84691cadbe" + }, + "snap": "6dd30650a7c8cadad545561d732828cf55cefdf5f70c615fbdc33e01c647907b" + }, + { + "curr": "b3b3c9b54db9e08f3994a17a40e5be7583c546488e88523ebf8b444ee53f4aec", + "next": { + "state": 1, + "output": "ed452df8b803190b7a1cf07894c27c03415029272e9c4a3171e7f3ad6e67c90a" + }, + "snap": "7d84d34019975b37758278e858e86265323ddbb7b46f6d679433c93bbae693ee" + }, + { + "curr": "a6c20a247ed2afc2cea7f4dc5856efa61a51b4e4b0318877eebdf8ad47be83b7", + "next": { + "state": 1, + "output": "ce9a7c779d0873ff364a9abd20007bbf7e41646ac4662eb87f89a5c39b69f70d" + }, + "snap": "285ac930ee2bd358d3202666c545fd3b94ee973d1a0cd2569de357042ec12b3d" + }, + { + "curr": "2e779b37b97052a1141a65a92c4ca14a7bd28f7c2d646749b1d584f45d50fa7b", + "next": { + "state": 1, + "output": "e4dba3994ad576489880eee38db2d8c0f8889585e932b7192dd7af168d79b43f" + }, + "snap": "37094a837769dbae5783dca9831be463b895f1b07d1cd24e271966e10503fdfc" + }, + { + "curr": "48f435285dd96511d0822f7ae1a20e28c6c28019e385313713655fc76fe3bc03", + "next": { + "state": 1, + "output": "11f8c2f8e1cb0d47576f74d9e2fa838f5f3a37180907a24a85d0ad8b647862e4" + }, + "snap": "96e0d8bf7d7eb775299edf285b6324499a1a05122d95eed9289c6477cf6a01cb" + }, + { + "curr": "4100ad3b1085bd14d1c808ece3b38db97171532d0d11ed5edd57aff0e416e06a", + "next": { + "state": 1, + "output": "5f351041761b45f3e725f98bb8b6713873e30ab6c8aee56ba0823d357c7ebd0d" + }, + "snap": "23669fa3d310ca8ac8dbe9dcce7e4e4361b1c3334da1dda2fb6447a30c67422f" + }, + { + "curr": "14cc632ab181396418fc761503105047e3b63d0455d0a4e9480578129ea8e9dc", + "next": { + "state": 1, + "output": "a4811c9ba9505e421f0015e5fcfd9f5d204ae85b584766759e844ef85db10d47" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}`) + + var state HistoryArchiveState + err := json.Unmarshal(jsonBlob, &state) + require.NoError(t, err) + + expectedHash, err := hex.DecodeString("fc5fe47af3f5a9b18b278f2a7edbbc641e1934bf68131d9aa5ab7aebb4aa8aa3") + require.NoError(t, err) + + hash, err := state.BucketListHash() + require.NoError(t, err) + assert.Equal(t, expectedHash, hash[:]) +} diff --git a/historyarchive/http_archive.go b/historyarchive/http_archive.go new file mode 100644 index 0000000000..e1f5f8f49f --- /dev/null +++ b/historyarchive/http_archive.go @@ -0,0 +1,133 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "path" + + "github.com/stellar/go/support/errors" +) + +type HttpArchiveBackend struct { + ctx context.Context + client http.Client + base url.URL +} + +func checkResp(r *http.Response) error { + if r.StatusCode >= 200 && r.StatusCode < 400 { + return nil + } else { + return fmt.Errorf("Bad HTTP response '%s' for %s '%s'", + r.Status, r.Request.Method, r.Request.URL.String()) + } +} + +func (b *HttpArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { + var derived url.URL = b.base + derived.Path = path.Join(derived.Path, pth) + req, err := http.NewRequest("GET", derived.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(b.ctx) + logReq(req) + resp, err := b.client.Do(req) + logResp(resp) + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + err = checkResp(resp) + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + return resp.Body, nil +} + +func (b *HttpArchiveBackend) Head(pth string) (*http.Response, error) { + var derived url.URL = b.base + derived.Path = path.Join(derived.Path, pth) + req, err := http.NewRequest("HEAD", derived.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(b.ctx) + logReq(req) + resp, err := b.client.Do(req) + logResp(resp) + if err != nil { + return nil, err + } + + if resp.Body != nil { + resp.Body.Close() + } + + return resp, nil +} + +func (b *HttpArchiveBackend) Exists(pth string) (bool, error) { + resp, err := b.Head(pth) + if err != nil { + return false, err + } + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } else { + return false, errors.Errorf("Unkown status code=%d", resp.StatusCode) + } +} + +func (b *HttpArchiveBackend) Size(pth string) (int64, error) { + resp, err := b.Head(pth) + if err != nil { + return 0, err + } + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return resp.ContentLength, nil + } else if resp.StatusCode == http.StatusNotFound { + return 0, nil + } else { + return 0, errors.Errorf("Unkown status code=%d", resp.StatusCode) + } +} + +func (b *HttpArchiveBackend) PutFile(pth string, in io.ReadCloser) error { + in.Close() + return errors.New("PutFile not available over HTTP") +} + +func (b *HttpArchiveBackend) ListFiles(pth string) (chan string, chan error) { + ch := make(chan string) + er := make(chan error) + close(ch) + er <- errors.New("ListFiles not available over HTTP") + close(er) + return ch, er +} + +func (b *HttpArchiveBackend) CanListFiles() bool { + return false +} + +func makeHttpBackend(base *url.URL, opts ConnectOptions) ArchiveBackend { + return &HttpArchiveBackend{ + ctx: opts.Context, + base: *base, + } +} diff --git a/tools/stellar-archivist/internal/json.go b/historyarchive/json.go similarity index 59% rename from tools/stellar-archivist/internal/json.go rename to historyarchive/json.go index f2057f5006..73bcfa11f1 100644 --- a/tools/stellar-archivist/internal/json.go +++ b/historyarchive/json.go @@ -2,7 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "compress/gzip" @@ -17,12 +17,7 @@ import ( ) func DumpXdrAsJson(args []string) error { - var lhe xdr.LedgerHeaderHistoryEntry - var the xdr.TransactionHistoryEntry - var thre xdr.TransactionHistoryResultEntry - var bke xdr.BucketEntry - var scp xdr.ScpHistoryEntry - var tmp interface{} + var tmp xdr.DecoderFrom var rdr io.ReadCloser var err error @@ -40,23 +35,30 @@ func DumpXdrAsJson(args []string) error { } base := path.Base(arg) - if strings.HasPrefix(base, "bucket") { - tmp = &bke - } else if strings.HasPrefix(base, "ledger") { - tmp = &lhe - } else if strings.HasPrefix(base, "transactions") { - tmp = &the - } else if strings.HasPrefix(base, "results") { - tmp = &thre - } else if strings.HasPrefix(base, "scp") { - tmp = &scp - } else { - return fmt.Errorf("Error: unrecognized XDR file type %s", base) - } xr := NewXdrStream(rdr) n := 0 for { - if err = xr.ReadOne(&tmp); err != nil { + var lhe xdr.LedgerHeaderHistoryEntry + var the xdr.TransactionHistoryEntry + var thre xdr.TransactionHistoryResultEntry + var bke xdr.BucketEntry + var scp xdr.ScpHistoryEntry + + if strings.HasPrefix(base, "bucket") { + tmp = &bke + } else if strings.HasPrefix(base, "ledger") { + tmp = &lhe + } else if strings.HasPrefix(base, "transactions") { + tmp = &the + } else if strings.HasPrefix(base, "results") { + tmp = &thre + } else if strings.HasPrefix(base, "scp") { + tmp = &scp + } else { + return fmt.Errorf("Error: unrecognized XDR file type %s", base) + } + + if err = xr.ReadOne(tmp); err != nil { if err == io.EOF { break } else { diff --git a/historyarchive/log.go b/historyarchive/log.go new file mode 100644 index 0000000000..2251f8ac9e --- /dev/null +++ b/historyarchive/log.go @@ -0,0 +1,134 @@ +// Copyright 2019 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + log "github.com/sirupsen/logrus" + "github.com/stellar/go/xdr" + "io" + "time" +) + +func (has *HistoryArchiveState) GetChangedBuckets(arch *Archive, prevHas *HistoryArchiveState) (string, int, int64) { + var ( + nChangedBytes int64 + nChangedBuckets int + changedBuckets string + ) + + for i, b := range has.CurrentBuckets { + if prevHas.CurrentBuckets[i].Curr != b.Curr { + nChangedBuckets += 1 + changedBuckets += "#" + nChangedBytes += arch.MustGetBucketSize(MustDecodeHash(b.Curr)) + } else { + changedBuckets += "_" + } + if prevHas.CurrentBuckets[i].Snap != b.Snap { + nChangedBuckets += 1 + changedBuckets += "#" + nChangedBytes += arch.MustGetBucketSize(MustDecodeHash(b.Snap)) + } else { + changedBuckets += "_" + } + } + return changedBuckets, nChangedBuckets, nChangedBytes +} + +func (arch *Archive) MustGetBucketSize(hash Hash) int64 { + sz, err := arch.backend.Size(arch.GetBucketPathForHash(hash)) + if err != nil { + panic(err) + } + return sz +} + +func (arch *Archive) MustGetLedgerHeaderHistoryEntries(chk uint32) []xdr.LedgerHeaderHistoryEntry { + path := CategoryCheckpointPath("ledger", chk) + rdr, err := arch.GetXdrStream(path) + if err != nil { + panic(err) + } + defer rdr.Close() + var lhes []xdr.LedgerHeaderHistoryEntry + for { + lhe := xdr.LedgerHeaderHistoryEntry{} + if err = rdr.ReadOne(&lhe); err != nil { + if err == io.EOF { + break + } else { + panic(err) + } + } + lhes = append(lhes, lhe) + } + return lhes +} + +func (arch *Archive) MustGetTransactionHistoryEntries(chk uint32) []xdr.TransactionHistoryEntry { + path := CategoryCheckpointPath("transactions", chk) + rdr, err := arch.GetXdrStream(path) + if err != nil { + panic(err) + } + defer rdr.Close() + var thes []xdr.TransactionHistoryEntry + for { + the := xdr.TransactionHistoryEntry{} + if err = rdr.ReadOne(&the); err != nil { + if err == io.EOF { + break + } else { + panic(err) + } + } + thes = append(thes, the) + } + return thes +} + +func (arch *Archive) Log(opts *CommandOptions) error { + state, e := arch.GetRootHAS() + if e != nil { + return e + } + opts.Range = opts.Range.clamp(state.Range(), arch.checkpointManager) + + log.Printf("Log of checkpoint files in range: %s", opts.Range) + log.Printf("\n") + log.Printf("%10s | %10s | %20s | %5s | %s", + "ledger", "hex", "close time", "txs", "buckets changed") + + prevHas, err := arch.GetCheckpointHAS(arch.checkpointManager.PrevCheckpoint(opts.Range.Low)) + if err != nil { + return err + } + + for chk := range opts.Range.GenerateCheckpoints(arch.checkpointManager) { + has, err := arch.GetCheckpointHAS(chk) + if err != nil { + return err + } + + changedBuckets, nChangedBuckets, nChangedBytes := has.GetChangedBuckets(arch, &prevHas) + prevHas = has + + lhes := arch.MustGetLedgerHeaderHistoryEntries(chk) + lastlhe := lhes[len(lhes)-1] + closeTime := time.Unix(int64(lastlhe.Header.ScpValue.CloseTime), 0) + + nTxs := 0 + thes := arch.MustGetTransactionHistoryEntries(chk) + for _, tx := range thes { + nTxs += len(tx.TxSet.Txs) + } + + log.Printf("%10d | 0x%08x | %20s | %5d | %2d buckets, %10d bytes, %s", + has.CurrentLedger, has.CurrentLedger, + closeTime.UTC().Format(time.RFC3339), + nTxs, nChangedBuckets, nChangedBytes, changedBuckets) + } + return nil +} diff --git a/tools/stellar-archivist/internal/mirror.go b/historyarchive/mirror.go similarity index 50% rename from tools/stellar-archivist/internal/mirror.go rename to historyarchive/mirror.go index 05683cd3da..30c9badcd3 100644 --- a/tools/stellar-archivist/internal/mirror.go +++ b/historyarchive/mirror.go @@ -2,22 +2,25 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( - "log" "fmt" + "log" "sync" "sync/atomic" + + "github.com/stellar/go/support/errors" ) +// Mirror mirrors an archive, it assumes that the source and destination have the same checkpoint ledger frequency func Mirror(src *Archive, dst *Archive, opts *CommandOptions) error { rootHAS, e := src.GetRootHAS() if e != nil { return e } - opts.Range = opts.Range.Clamp(rootHAS.Range()) + opts.Range = opts.Range.clamp(rootHAS.Range(), src.checkpointManager) log.Printf("copying range %s\n", opts.Range) @@ -29,31 +32,36 @@ func Mirror(src *Archive, dst *Archive, opts *CommandOptions) error { var errs uint32 tick := makeTicker(func(ticks uint) { bucketFetchMutex.Lock() - sz := opts.Range.Size() + sz := opts.Range.SizeInCheckPoints(src.checkpointManager) log.Printf("Copied %d/%d checkpoints (%f%%), %d buckets", ticks, sz, - 100.0 * float64(ticks)/float64(sz), + 100.0*float64(ticks)/float64(sz), len(bucketFetch)) bucketFetchMutex.Unlock() }) - var wg sync.WaitGroup - checkpoints := opts.Range.Checkpoints() + checkpoints := opts.Range.GenerateCheckpoints(src.checkpointManager) wg.Add(opts.Concurrency) for i := 0; i < opts.Concurrency; i++ { go func() { for { - ix, ok := <- checkpoints + ix, ok := <-checkpoints if !ok { break } - has, e := src.GetCheckpointHAS(ix) - if e != nil { - atomic.AddUint32(&errs, noteError(e)) + has, err := src.GetCheckpointHAS(ix) + if err != nil { + atomic.AddUint32(&errs, noteError(err)) continue } - for _, bucket := range has.Buckets() { + + buckets, err := has.Buckets() + if err != nil { + panic(errors.Wrap(err, "error getting buckets")) + } + + for _, bucket := range buckets { alreadyFetching := false bucketFetchMutex.Lock() _, alreadyFetching = bucketFetch[bucket] @@ -63,18 +71,18 @@ func Mirror(src *Archive, dst *Archive, opts *CommandOptions) error { bucketFetchMutex.Unlock() if !alreadyFetching { pth := BucketPath(bucket) - e = copyPath(src, dst, pth, opts) - atomic.AddUint32(&errs, noteError(e)) + err = copyPath(src, dst, pth, opts) + atomic.AddUint32(&errs, noteError(err)) } } for _, cat := range Categories() { pth := CategoryCheckpointPath(cat, ix) - e = copyPath(src, dst, pth, opts) - if e != nil && !categoryRequired(cat) { + err = copyPath(src, dst, pth, opts) + if err != nil && !categoryRequired(cat) { continue } - atomic.AddUint32(&errs, noteError(e)) + atomic.AddUint32(&errs, noteError(err)) } tick <- true } @@ -83,11 +91,23 @@ func Mirror(src *Archive, dst *Archive, opts *CommandOptions) error { } wg.Wait() - log.Printf("Copied %d checkpoints, %d buckets", - opts.Range.Size(), len(bucketFetch)) + log.Printf("copied %d checkpoints, %d buckets, range %s", + opts.Range.SizeInCheckPoints(src.checkpointManager), len(bucketFetch), opts.Range) close(tick) - e = dst.PutRootHAS(rootHAS, opts) - errs += noteError(e) + if rootHAS.CurrentLedger == opts.Range.High { + log.Printf("updating destination archive current-ledger pointer to 0x%8.8x", + rootHAS.CurrentLedger) + e = dst.PutRootHAS(rootHAS, opts) + errs += noteError(e) + } else { + dstHAS, e := dst.GetRootHAS() + if e != nil { + errs += noteError(e) + } else { + log.Printf("leaving destination archive current-ledger pointer at 0x%8.8x", + dstHAS.CurrentLedger) + } + } if errs != 0 { return fmt.Errorf("%d errors while mirroring", errs) } diff --git a/tools/stellar-archivist/internal/mock_archive.go b/historyarchive/mock_archive.go similarity index 79% rename from tools/stellar-archivist/internal/mock_archive.go rename to historyarchive/mock_archive.go index 63e3aefeb7..fc8095bbb4 100644 --- a/tools/stellar-archivist/internal/mock_archive.go +++ b/historyarchive/mock_archive.go @@ -2,14 +2,14 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "bytes" - "strings" + "errors" "io" "io/ioutil" - "errors" + "strings" "sync" ) @@ -18,11 +18,22 @@ type MockArchiveBackend struct { files map[string][]byte } -func (b *MockArchiveBackend) Exists(pth string) bool { +func (b *MockArchiveBackend) Exists(pth string) (bool, error) { b.mutex.Lock() defer b.mutex.Unlock() _, ok := b.files[pth] - return ok + return ok, nil +} + +func (b *MockArchiveBackend) Size(pth string) (int64, error) { + b.mutex.Lock() + defer b.mutex.Unlock() + f, ok := b.files[pth] + sz := int64(0) + if ok { + sz = int64(len(f)) + } + return sz, nil } func (b *MockArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { @@ -53,7 +64,7 @@ func (b *MockArchiveBackend) ListFiles(pth string) (chan string, chan error) { ch := make(chan string) errs := make(chan error) files := make([]string, 0, len(b.files)) - for k, _ := range b.files { + for k := range b.files { files = append(files, k) } go func() { @@ -72,7 +83,7 @@ func (b *MockArchiveBackend) CanListFiles() bool { return true } -func MakeMockBackend(opts *ConnectOptions) ArchiveBackend { +func makeMockBackend(opts ConnectOptions) ArchiveBackend { b := new(MockArchiveBackend) b.files = make(map[string][]byte) return b diff --git a/historyarchive/mocks.go b/historyarchive/mocks.go new file mode 100644 index 0000000000..3952211cd3 --- /dev/null +++ b/historyarchive/mocks.go @@ -0,0 +1,105 @@ +package historyarchive + +import ( + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" +) + +// MockArchive is a mockable archive. +type MockArchive struct { + mock.Mock +} + +func (m *MockArchive) GetCheckpointManager() CheckpointManager { + a := m.Called() + return a.Get(0).(CheckpointManager) +} + +func (m *MockArchive) GetPathHAS(path string) (HistoryArchiveState, error) { + a := m.Called(path) + return a.Get(0).(HistoryArchiveState), a.Error(1) +} + +func (m *MockArchive) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error { + a := m.Called(path, has, opts) + return a.Error(0) +} + +func (m *MockArchive) BucketExists(bucket Hash) (bool, error) { + a := m.Called(bucket) + return a.Get(0).(bool), a.Error(1) +} + +func (m *MockArchive) BucketSize(bucket Hash) (int64, error) { + a := m.Called(bucket) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockArchive) CategoryCheckpointExists(cat string, chk uint32) (bool, error) { + a := m.Called(cat, chk) + return a.Get(0).(bool), a.Error(1) +} + +func (m *MockArchive) GetLedgerHeader(chk uint32) (xdr.LedgerHeaderHistoryEntry, error) { + a := m.Called(chk) + return a.Get(0).(xdr.LedgerHeaderHistoryEntry), a.Error(1) +} + +func (m *MockArchive) GetLedgers(start, end uint32) (map[uint32]*Ledger, error) { + a := m.Called(start, end) + return a.Get(0).(map[uint32]*Ledger), a.Error(1) +} + +func (m *MockArchive) GetRootHAS() (HistoryArchiveState, error) { + a := m.Called() + return a.Get(0).(HistoryArchiveState), a.Error(1) +} + +func (m *MockArchive) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) { + a := m.Called(chk) + return a.Get(0).(HistoryArchiveState), a.Error(1) +} + +func (m *MockArchive) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error { + a := m.Called(chk, has, opts) + return a.Error(0) +} + +func (m *MockArchive) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error { + a := m.Called(has, opts) + return a.Error(0) +} + +func (m *MockArchive) ListBucket(dp DirPrefix) (chan string, chan error) { + m.Called(dp) + panic("Returning channels not implemented") + return make(chan string), make(chan error) +} + +func (m *MockArchive) ListAllBuckets() (chan string, chan error) { + m.Called() + panic("Returning channels not implemented") + return make(chan string), make(chan error) +} + +func (m *MockArchive) ListAllBucketHashes() (chan Hash, chan error) { + m.Called() + panic("Returning channels not implemented") + return make(chan Hash), make(chan error) +} + +func (m *MockArchive) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) { + m.Called(cat, pth) + panic("Returning channels not implemented") + return make(chan uint32), make(chan error) +} + +func (m *MockArchive) GetXdrStreamForHash(hash Hash) (*XdrStream, error) { + a := m.Called(hash) + return a.Get(0).(*XdrStream), a.Error(1) +} + +func (m *MockArchive) GetXdrStream(pth string) (*XdrStream, error) { + a := m.Called(pth) + return a.Get(0).(*XdrStream), a.Error(1) +} diff --git a/historyarchive/range.go b/historyarchive/range.go new file mode 100644 index 0000000000..2610bf960e --- /dev/null +++ b/historyarchive/range.go @@ -0,0 +1,165 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "fmt" + "sort" + "strings" +) + +const DefaultCheckpointFrequency = uint32(64) + +type Range struct { + Low uint32 + High uint32 +} + +type CheckpointManager struct { + checkpointFreq uint32 +} + +// NewCheckpointManager creates a CheckpointManager based on a checkpoint frequency +// (the number of ledgers between ledger checkpoints). If checkpointFrequency is +// 0 DefaultCheckpointFrequency will be used. +func NewCheckpointManager(checkpointFrequency uint32) CheckpointManager { + if checkpointFrequency == 0 { + checkpointFrequency = DefaultCheckpointFrequency + } + return CheckpointManager{checkpointFrequency} +} + +func (c CheckpointManager) GetCheckpointFrequency() uint32 { + return c.checkpointFreq +} + +func (c CheckpointManager) IsCheckpoint(i uint32) bool { + return (i+1)%c.checkpointFreq == 0 +} + +func (c CheckpointManager) PrevCheckpoint(i uint32) uint32 { + freq := c.checkpointFreq + if i < freq { + return freq - 1 + } + return (((i + 1) / freq) * freq) - 1 +} + +func (c CheckpointManager) NextCheckpoint(i uint32) uint32 { + if i == 0 { + return c.checkpointFreq - 1 + } + freq := uint64(c.checkpointFreq) + v := uint64(i) + n := (((v + freq) / freq) * freq) - 1 + if n >= 0xffffffff { + return 0xffffffff + } + return uint32(n) +} + +// GetCheckPoint gets the checkpoint containing information about the given ledger sequence +func (c CheckpointManager) GetCheckpoint(i uint32) uint32 { + return c.NextCheckpoint(i) +} + +// GetCheckpointRange gets the range of the checkpoint containing information for the given ledger sequence +func (c CheckpointManager) GetCheckpointRange(i uint32) Range { + checkpoint := c.GetCheckpoint(i) + low := checkpoint - c.checkpointFreq + 1 + if low == 0 { + // ledger 0 does not exist + low++ + } + return Range{ + Low: low, + High: checkpoint, + } +} + +func (c CheckpointManager) MakeRange(low uint32, high uint32) Range { + if high < low { + high = low + } + return Range{ + Low: c.PrevCheckpoint(low), + High: c.NextCheckpoint(high), + } +} + +func (r Range) clamp(other Range, cManager CheckpointManager) Range { + low := r.Low + high := r.High + if low < other.Low { + low = other.Low + } + if high > other.High { + high = other.High + } + return cManager.MakeRange(low, high) +} + +func (r Range) String() string { + return fmt.Sprintf("[0x%8.8x, 0x%8.8x]", r.Low, r.High) +} + +func (r Range) GenerateCheckpoints(cManager CheckpointManager) chan uint32 { + ch := make(chan uint32) + go func() { + for i := uint64(r.Low); i <= uint64(r.High); i += uint64(cManager.checkpointFreq) { + ch <- uint32(i) + } + close(ch) + }() + return ch +} + +func (r Range) SizeInCheckPoints(cManager CheckpointManager) int { + return 1 + (int(r.High-r.Low) / int(cManager.checkpointFreq)) +} + +func (r Range) collapsedString() string { + if r.Low == r.High { + return fmt.Sprintf("0x%8.8x", r.Low) + } else { + return fmt.Sprintf("[0x%8.8x-0x%8.8x]", r.Low, r.High) + } +} + +func (r Range) InRange(sequence uint32) bool { + return sequence >= r.Low && sequence <= r.High +} + +type byUint32 []uint32 + +func (a byUint32) Len() int { return len(a) } +func (a byUint32) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byUint32) Less(i, j int) bool { return a[i] < a[j] } + +func fmtRangeList(vs []uint32, cManager CheckpointManager) string { + + sort.Sort(byUint32(vs)) + + s := make([]string, 0, 10) + var curr *Range + + for _, t := range vs { + if curr != nil { + if curr.High+cManager.checkpointFreq == t { + curr.High = t + continue + } else { + s = append(s, curr.collapsedString()) + curr = nil + } + } + curr = &Range{Low: t, High: t} + } + if curr != nil { + s = append(s, curr.collapsedString()) + } + + return strings.Join(s, ", ") +} diff --git a/historyarchive/range_test.go b/historyarchive/range_test.go new file mode 100644 index 0000000000..5cdfde5793 --- /dev/null +++ b/historyarchive/range_test.go @@ -0,0 +1,94 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func (r Range) allCheckpoints() []uint32 { + var s []uint32 + mgr := NewCheckpointManager(64) + for chk := range r.GenerateCheckpoints(mgr) { + s = append(s, chk) + } + return s +} + +func TestRangeSize(t *testing.T) { + mgr := NewCheckpointManager(64) + + assert.Equal(t, 1, + mgr.MakeRange(0x3f, 0x3f).SizeInCheckPoints(mgr)) + + assert.Equal(t, 2, + mgr.MakeRange(0x3f, 0x7f).SizeInCheckPoints(mgr)) + + assert.Equal(t, 2, + mgr.MakeRange(0, 100).SizeInCheckPoints(mgr)) + + assert.Equal(t, 4, + mgr.MakeRange(0xff3f, 0xffff).SizeInCheckPoints(mgr)) +} + +func TestRangeEnumeration(t *testing.T) { + + mgr := NewCheckpointManager(64) + + assert.Equal(t, + []uint32{0x3f, 0x7f}, + mgr.MakeRange(0x3f, 0x7f).allCheckpoints()) + + assert.Equal(t, + []uint32{0x3f}, + mgr.MakeRange(0x3f, 0x3f).allCheckpoints()) + + assert.Equal(t, + []uint32{0x3f}, + mgr.MakeRange(0, 0).allCheckpoints()) + + assert.Equal(t, + []uint32{0x3f, 0x7f}, + mgr.MakeRange(0, 0x40).allCheckpoints()) + + assert.Equal(t, + []uint32{0xff}, + mgr.MakeRange(0xff, 0x40).allCheckpoints()) +} + +func TestFmtRangeList(t *testing.T) { + + mgr := NewCheckpointManager(64) + + assert.Equal(t, + "", + fmtRangeList([]uint32{}, mgr)) + + assert.Equal(t, + "0x0000003f", + fmtRangeList([]uint32{0x3f}, mgr)) + + assert.Equal(t, + "[0x0000003f-0x0000007f]", + fmtRangeList([]uint32{0x3f, 0x7f}, mgr)) + + assert.Equal(t, + "[0x0000003f-0x000000bf]", + fmtRangeList([]uint32{0x3f, 0x7f, 0xbf}, mgr)) + + assert.Equal(t, + "[0x0000003f-0x0000007f], 0x000000ff", + fmtRangeList([]uint32{0x3f, 0x7f, 0xff}, mgr)) + + assert.Equal(t, + "[0x0000003f-0x0000007f], [0x000000ff-0x0000017f]", + fmtRangeList([]uint32{0x3f, 0x7f, 0xff, 0x13f, 0x17f}, mgr)) + + assert.Equal(t, + "[0x0000003f-0x0000007f], 0x000000ff, [0x0000017f-0x000001bf]", + fmtRangeList([]uint32{0x3f, 0x7f, 0xff, 0x17f, 0x1bf}, mgr)) +} diff --git a/tools/stellar-archivist/internal/repair.go b/historyarchive/repair.go similarity index 73% rename from tools/stellar-archivist/internal/repair.go rename to historyarchive/repair.go index f96b03ffe4..80bb873bcc 100644 --- a/tools/stellar-archivist/internal/repair.go +++ b/historyarchive/repair.go @@ -2,19 +2,21 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( - "log" "fmt" + log "github.com/sirupsen/logrus" ) +// Repair repairs a destination archive based on a source archive, it assumes that the source and destination have the +// same checkpoint ledger frequency func Repair(src *Archive, dst *Archive, opts *CommandOptions) error { state, e := dst.GetRootHAS() if e != nil { return e } - opts.Range = opts.Range.Clamp(state.Range()) + opts.Range = opts.Range.clamp(state.Range(), src.checkpointManager) log.Printf("Starting scan for repair") var errs uint32 @@ -27,8 +29,12 @@ func Repair(src *Archive, dst *Archive, opts *CommandOptions) error { for cat, missing := range missingCheckpointFiles { for _, chk := range missing { pth := CategoryCheckpointPath(cat, chk) - if !categoryRequired(cat) && !src.backend.Exists(pth) { - log.Printf("Skipping nonexistent, optional %s file %s", cat, pth) + exists, err := src.backend.Exists(pth) + if err != nil { + return err + } + if !categoryRequired(cat) && !exists { + log.Warnf("Skipping nonexistent, optional %s file %s", cat, pth) continue } log.Printf("Repairing %s", pth) @@ -50,7 +56,7 @@ func Repair(src *Archive, dst *Archive, opts *CommandOptions) error { log.Printf("Examining buckets referenced by checkpoints") missingBuckets := dst.CheckBucketsMissing() - for bkt, _ := range missingBuckets { + for bkt := range missingBuckets { pth := BucketPath(bkt) log.Printf("Repairing %s", pth) errs += noteError(copyPath(src, dst, pth, opts)) @@ -61,4 +67,3 @@ func Repair(src *Archive, dst *Archive, opts *CommandOptions) error { } return nil } - diff --git a/historyarchive/s3_archive.go b/historyarchive/s3_archive.go new file mode 100644 index 0000000000..3ed9d8eab9 --- /dev/null +++ b/historyarchive/s3_archive.go @@ -0,0 +1,220 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "bytes" + "context" + log "github.com/sirupsen/logrus" + "io" + "net/http" + "path" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/stellar/go/support/errors" +) + +type S3ArchiveBackend struct { + ctx context.Context + svc *s3.S3 + bucket string + prefix string + unsignedRequests bool +} + +func (b *S3ArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { + key := path.Join(b.prefix, pth) + params := &s3.GetObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + } + + req, resp := b.svc.GetObjectRequest(params) + if b.unsignedRequests { + req.Handlers.Sign.Clear() // makes this request unsigned + } + req.SetContext(b.ctx) + logReq(req.HTTPRequest) + err := req.Send() + logResp(req.HTTPResponse) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +func (b *S3ArchiveBackend) Head(pth string) (*http.Response, error) { + key := path.Join(b.prefix, pth) + params := &s3.HeadObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + } + + req, _ := b.svc.HeadObjectRequest(params) + if b.unsignedRequests { + req.Handlers.Sign.Clear() // makes this request unsigned + } + req.SetContext(b.ctx) + logReq(req.HTTPRequest) + err := req.Send() + logResp(req.HTTPResponse) + + if req.HTTPResponse != nil && req.HTTPResponse.StatusCode == http.StatusNotFound { + // Lately the S3 SDK has started treating a 404 as generating a non-nil + // 'err', so we have to test for this _before_ we test 'err' for + // nil-ness. This is undocumented, as is the err.Code returned in that + // error ("NotFound"), and it's a breaking change from what it used to + // do, and not what one would expect, but who's counting? We'll just + // turn it _back_ into what it used to be: 404 as a non-erroneously + // received HTTP response. + return req.HTTPResponse, nil + } + + if err != nil { + return nil, err + } + return req.HTTPResponse, nil +} + +func (b *S3ArchiveBackend) Exists(pth string) (bool, error) { + resp, err := b.Head(pth) + if err != nil { + return false, err + } + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } else { + return false, errors.Errorf("Unkown status code=%d", resp.StatusCode) + } +} + +func (b *S3ArchiveBackend) Size(pth string) (int64, error) { + resp, err := b.Head(pth) + if err != nil { + return 0, err + } + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return resp.ContentLength, nil + } else if resp.StatusCode == http.StatusNotFound { + return 0, nil + } else { + return 0, errors.Errorf("Unkown status code=%d", resp.StatusCode) + } +} + +func (b *S3ArchiveBackend) PutFile(pth string, in io.ReadCloser) error { + var buf bytes.Buffer + _, err := buf.ReadFrom(in) + in.Close() + if err != nil { + return err + } + key := path.Join(b.prefix, pth) + params := &s3.PutObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + ACL: aws.String(s3.ObjectCannedACLPublicRead), + Body: bytes.NewReader(buf.Bytes()), + } + req, _ := b.svc.PutObjectRequest(params) + if b.unsignedRequests { + req.Handlers.Sign.Clear() // makes this request unsigned + } + req.SetContext(b.ctx) + logReq(req.HTTPRequest) + err = req.Send() + logResp(req.HTTPResponse) + + in.Close() + return err +} + +func (b *S3ArchiveBackend) ListFiles(pth string) (chan string, chan error) { + prefix := path.Join(b.prefix, pth) + ch := make(chan string) + errs := make(chan error) + + params := &s3.ListObjectsInput{ + Bucket: aws.String(b.bucket), + MaxKeys: aws.Int64(1000), + Prefix: aws.String(prefix), + } + req, resp := b.svc.ListObjectsRequest(params) + if b.unsignedRequests { + req.Handlers.Sign.Clear() // makes this request unsigned + } + req.SetContext(b.ctx) + logReq(req.HTTPRequest) + err := req.Send() + logResp(req.HTTPResponse) + if err != nil { + errs <- err + close(ch) + close(errs) + return ch, errs + } + go func() { + for { + for _, c := range resp.Contents { + params.Marker = c.Key + log.WithField("key", *c.Key).Trace("s3: ListFiles") + ch <- *c.Key + } + if *resp.IsTruncated { + req, resp = b.svc.ListObjectsRequest(params) + if b.unsignedRequests { + req.Handlers.Sign.Clear() // makes this request unsigned + } + req.SetContext(b.ctx) + logReq(req.HTTPRequest) + err := req.Send() + logResp(req.HTTPResponse) + if err != nil { + errs <- err + } + } else { + break + } + } + close(ch) + close(errs) + }() + return ch, errs +} + +func (b *S3ArchiveBackend) CanListFiles() bool { + return true +} + +func makeS3Backend(bucket string, prefix string, opts ConnectOptions) (ArchiveBackend, error) { + log.WithFields(log.Fields{"bucket": bucket, + "prefix": prefix, + "region": opts.S3Region, + "endpoint": opts.S3Endpoint}).Debug("s3: making backend") + cfg := &aws.Config{ + Region: aws.String(opts.S3Region), + Endpoint: aws.String(opts.S3Endpoint), + } + cfg = cfg.WithS3ForcePathStyle(true) + + sess, err := session.NewSession(cfg) + if err != nil { + return nil, err + } + + backend := S3ArchiveBackend{ + ctx: opts.Context, + svc: s3.New(sess), + bucket: bucket, + prefix: prefix, + unsignedRequests: opts.UnsignedRequests, + } + return &backend, nil +} diff --git a/tools/stellar-archivist/internal/scan.go b/historyarchive/scan.go similarity index 78% rename from tools/stellar-archivist/internal/scan.go rename to historyarchive/scan.go index 7b56568abb..d1406129c5 100644 --- a/tools/stellar-archivist/internal/scan.go +++ b/historyarchive/scan.go @@ -2,12 +2,12 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "errors" "fmt" - "log" + log "github.com/sirupsen/logrus" "strings" "sync" "sync/atomic" @@ -28,7 +28,7 @@ func (arch *Archive) ScanCheckpoints(opts *CommandOptions) error { if e != nil { return e } - opts.Range = opts.Range.Clamp(state.Range()) + opts.Range = opts.Range.clamp(state.Range(), arch.checkpointManager) log.Printf("Scanning checkpoint files in range: %s", opts.Range) @@ -58,7 +58,7 @@ func (arch *Archive) ScanCheckpointsSlow(opts *CommandOptions) error { cats := Categories() go func() { for _, cat := range cats { - for chk := range opts.Range.Checkpoints() { + for chk := range opts.Range.GenerateCheckpoints(arch.checkpointManager) { req <- scanCheckpointSlowReq{category: cat, checkpoint: chk} } } @@ -72,7 +72,10 @@ func (arch *Archive) ScanCheckpointsSlow(opts *CommandOptions) error { if !ok { break } - exists := arch.CategoryCheckpointExists(r.category, r.checkpoint) + exists, err := arch.CategoryCheckpointExists(r.category, r.checkpoint) + if err != nil { + panic(err) + } tick <- true arch.NoteCheckpointFile(r.category, r.checkpoint, exists) if exists && opts.Verify { @@ -130,6 +133,9 @@ func (arch *Archive) ScanCheckpointsFast(opts *CommandOptions) error { } ch, es := arch.ListCategoryCheckpoints(r.category, r.pathprefix) for n := range ch { + if n < opts.Range.Low || n > opts.Range.High { + continue + } tick <- true arch.NoteCheckpointFile(r.category, n, true) if opts.Verify { @@ -155,10 +161,10 @@ func (arch *Archive) ScanCheckpointsFast(opts *CommandOptions) error { func (arch *Archive) Scan(opts *CommandOptions) error { e1 := arch.ScanCheckpoints(opts) - e2 := arch.ScanBuckets(opts) if e1 != nil { return e1 } + e2 := arch.ScanBuckets(opts) if e2 != nil { return e2 } @@ -194,11 +200,22 @@ func (arch *Archive) ScanBuckets(opts *CommandOptions) error { var errs uint32 - // First scan _all_ buckets if we can; if not, we'll do an exists-check - // on each bucket as we go. But this is faster when we can do it. + // First scan _all_ buckets if we can (and should -- if asked to look at the + // entire range); if not, we'll do an exists-check on each bucket as we + // go. But this is faster when we can do it. doList := arch.backend.CanListFiles() + has, err := arch.GetRootHAS() + if err == nil { + fullRange := arch.checkpointManager.MakeRange(0, has.CurrentLedger) + doList = doList && opts.Range.SizeInCheckPoints(arch.checkpointManager) == fullRange.SizeInCheckPoints(arch.checkpointManager) + } else { + log.Print("Error retrieving root archive state, possibly corrupt archive:", err) + log.Print("Continuing and will do an exists-check on each bucket as we go, this will be slower") + } if doList { errs += noteError(arch.ScanAllBuckets()) + } else { + log.Printf("Scanning buckets for %d checkpoints", opts.Range.SizeInCheckPoints(arch.checkpointManager)) } // Grab the set of checkpoints we have HASs for, to read references. @@ -237,14 +254,22 @@ func (arch *Archive) ScanBuckets(opts *CommandOptions) error { } has, e := arch.GetCheckpointHAS(ix) atomic.AddUint32(&errs, noteError(e)) - for _, bucket := range has.Buckets() { + buckets, err := has.Buckets() + if err != nil { + panic(err) + } + for _, bucket := range buckets { new := arch.NoteReferencedBucket(bucket) if !new { continue } if !doList || opts.Verify { - if arch.BucketExists(bucket) { + exists, err := arch.BucketExists(bucket) + if err != nil { + panic(err) + } + if exists { if !doList { arch.NoteExistingBucket(bucket) } @@ -323,8 +348,8 @@ func (arch *Archive) NoteExistingBucket(bucket Hash) { func (arch *Archive) NoteReferencedBucket(bucket Hash) bool { arch.mutex.Lock() defer arch.mutex.Unlock() - _, exists := arch.referencedBuckets[bucket] - if exists { + _, mapEntryExists := arch.referencedBuckets[bucket] + if mapEntryExists { return false } arch.referencedBuckets[bucket] = true @@ -337,9 +362,9 @@ func (arch *Archive) CheckCheckpointFilesMissing(opts *CommandOptions) map[strin missing := make(map[string][]uint32) for _, cat := range Categories() { missing[cat] = make([]uint32, 0) - for ix := range opts.Range.Checkpoints() { - _, ok := arch.checkpointFiles[cat][ix] - if !ok { + for ix := range opts.Range.GenerateCheckpoints(arch.checkpointManager) { + fileExists := arch.checkpointFiles[cat][ix] + if !fileExists { missing[cat] = append(missing[cat], ix) } } @@ -351,16 +376,16 @@ func (arch *Archive) CheckBucketsMissing() map[Hash]bool { arch.mutex.Lock() defer arch.mutex.Unlock() missing := make(map[Hash]bool) - for k, _ := range arch.referencedBuckets { - _, ok := arch.allBuckets[k] - if !ok { + for k := range arch.referencedBuckets { + bucketExists := arch.allBuckets[k] + if !bucketExists { missing[k] = true } } return missing } -func (arch *Archive) ReportMissing(opts *CommandOptions) error { +func (arch *Archive) ReportMissing(opts *CommandOptions) (bool, error) { log.Printf("Examining checkpoint files for gaps") missingCheckpointFiles := arch.CheckCheckpointFilesMissing(opts) @@ -370,12 +395,16 @@ func (arch *Archive) ReportMissing(opts *CommandOptions) error { missingCheckpoints := false for cat, missing := range missingCheckpointFiles { if !categoryRequired(cat) { + if len(missing) > 0 { + s := fmtRangeList(missing, arch.checkpointManager) + log.Warnf("Missing non-required %s (%d): %s", cat, len(missing), s) + } continue } if len(missing) != 0 { - s := fmtRangeList(missing) + s := fmtRangeList(missing, arch.checkpointManager) missingCheckpoints = true - log.Printf("Missing %s (%d): %s", cat, len(missing), s) + log.Errorf("Missing %s (%d): %s", cat, len(missing), s) } } @@ -383,13 +412,13 @@ func (arch *Archive) ReportMissing(opts *CommandOptions) error { log.Printf("No checkpoint files missing in range %s", opts.Range) } - for bucket, _ := range missingBuckets { - log.Printf("Missing bucket: %s", bucket) + for bucket := range missingBuckets { + log.Errorf("Missing bucket: %s", bucket) } if len(missingBuckets) == 0 { log.Printf("No missing buckets referenced in range %s", opts.Range) } - return nil + return missingCheckpoints || len(missingBuckets) != 0, nil } diff --git a/tools/stellar-archivist/internal/util.go b/historyarchive/util.go similarity index 76% rename from tools/stellar-archivist/internal/util.go rename to historyarchive/util.go index 84d7a6907b..b2a7c96778 100644 --- a/tools/stellar-archivist/internal/util.go +++ b/historyarchive/util.go @@ -2,14 +2,15 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( - "path" - "log" - "fmt" "bufio" + "fmt" + log "github.com/sirupsen/logrus" "io" + "net/http" + "path" ) func makeTicker(onTick func(uint)) chan bool { @@ -18,7 +19,7 @@ func makeTicker(onTick func(uint)) chan bool { var k uint = 0 for range tick { k++ - if k & 0xfff == 0 { + if k&0xfff == 0 { onTick(k) } } @@ -38,7 +39,11 @@ func copyPath(src *Archive, dst *Archive, pth string, opts *CommandOptions) erro log.Printf("dryrun skipping " + pth) return nil } - if dst.backend.Exists(pth) && !opts.Force { + exists, err := dst.backend.Exists(pth) + if err != nil { + return err + } + if exists && !opts.Force { log.Printf("skipping existing " + pth) return nil } @@ -52,7 +57,7 @@ func copyPath(src *Archive, dst *Archive, pth string, opts *CommandOptions) erro } func Categories() []string { - return []string{ "history", "ledger", "transactions", "results", "scp"} + return []string{"history", "ledger", "transactions", "results", "scp"} } func categoryExt(n string) string { @@ -78,7 +83,6 @@ func BucketPath(bucket Hash) string { return path.Join("bucket", pre.Path(), fmt.Sprintf("bucket-%s.xdr.gz", bucket)) } - // Make a goroutine that unconditionally pulls an error channel into // (unbounded) local memory, and feeds it to a downstream consumer. This is // slightly hacky, but the alternatives are to either send {val,err} pairs @@ -120,7 +124,7 @@ func makeErrorPump(in chan error) chan error { func noteError(e error) uint32 { if e != nil { - log.Printf("Error: " + e.Error()) + log.Errorf("Error: " + e.Error()) return 1 } return 0 @@ -133,3 +137,23 @@ func drainErrors(errs chan error) uint32 { } return count } + +func logReq(r *http.Request) { + if r == nil { + return + } + logFields := log.Fields{"method": r.Method, "url": r.URL.String()} + log.WithFields(logFields).Trace("http: Req") +} + +func logResp(r *http.Response) { + if r == nil || r.Request == nil { + return + } + logFields := log.Fields{"method": r.Request.Method, "status": r.Status, "url": r.Request.URL.String()} + if r.StatusCode >= 200 && r.StatusCode < 400 { + log.WithFields(logFields).Trace("http: OK") + } else { + log.WithFields(logFields).Warn("http: Bad") + } +} diff --git a/tools/stellar-archivist/internal/verify.go b/historyarchive/verify.go similarity index 90% rename from tools/stellar-archivist/internal/verify.go rename to historyarchive/verify.go index abf6be32fd..b7171c69c6 100644 --- a/tools/stellar-archivist/internal/verify.go +++ b/historyarchive/verify.go @@ -2,7 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 -package archivist +package historyarchive import ( "bytes" @@ -11,9 +11,10 @@ import ( "fmt" "hash" "io" - "log" "sort" + log "github.com/sirupsen/logrus" + "github.com/stellar/go/xdr" ) @@ -30,22 +31,22 @@ import ( // Moreover, txsets (when sorted) are _not_ hashed by simply hashing the // XDR; they have a slightly-more-manual hashing process. -type ByHash struct { +type byHash struct { txe []xdr.TransactionEnvelope hsh []Hash } -func (h *ByHash) Len() int { return len(h.hsh) } -func (h *ByHash) Swap(i, j int) { +func (h *byHash) Len() int { return len(h.hsh) } +func (h *byHash) Swap(i, j int) { h.txe[i], h.txe[j] = h.txe[j], h.txe[i] h.hsh[i], h.hsh[j] = h.hsh[j], h.hsh[i] } -func (h *ByHash) Less(i, j int) bool { +func (h *byHash) Less(i, j int) bool { return bytes.Compare(h.hsh[i][:], h.hsh[j][:]) < 0 } func SortTxsForHash(txset *xdr.TransactionSet) error { - bh := &ByHash{ + bh := &byHash{ txe: txset.Txs, hsh: make([]Hash, len(txset.Txs)), } @@ -138,9 +139,9 @@ func (arch *Archive) VerifyCategoryCheckpoint(cat string, chk uint32) error { } defer rdr.Close() - var tmp interface{} - step := func() error { return nil } - reset := func() {} + var tmp xdr.DecoderFrom + var step func() error + var reset func() var lhe xdr.LedgerHeaderHistoryEntry var the xdr.TransactionHistoryEntry @@ -177,7 +178,7 @@ func (arch *Archive) VerifyCategoryCheckpoint(cat string, chk uint32) error { for { reset() - if err = rdr.ReadOne(&tmp); err != nil { + if err = rdr.ReadOne(tmp); err != nil { if err == io.EOF { break } else { @@ -228,7 +229,7 @@ func (arch *Archive) VerifyBucketEntries(h Hash) error { var entry xdr.BucketEntry err = rdr.ReadOne(&entry) if err == nil { - err2 := WriteFramedXdr(hsh, &entry) + err2 := xdr.MarshalFramed(hsh, &entry) if err2 != nil { return err2 } @@ -246,7 +247,7 @@ func reportValidity(ty string, nbad int, total int) { if nbad == 0 { log.Printf("Verified %d %ss have expected hashes", total, ty) } else { - log.Printf("Error: %d %ss (of %d checked) have unexpected hashes", nbad, ty, total) + log.Errorf("Error: %d %ss (of %d checked) have unexpected hashes", nbad, ty, total) } } @@ -260,7 +261,7 @@ func compareHashMaps(expect map[uint32]Hash, actual map[uint32]Hash, ty string, } if ahash != ehash { n++ - log.Printf("Error: mismatched hash on %s 0x%8.8x: expected %s, got %s", + log.Errorf("Error: mismatched hash on %s 0x%8.8x: expected %s, got %s", ty, eledger, ehash, ahash) } } @@ -268,16 +269,16 @@ func compareHashMaps(expect map[uint32]Hash, actual map[uint32]Hash, ty string, return n } -func (arch *Archive) ReportInvalid(opts *CommandOptions) error { +func (arch *Archive) ReportInvalid(opts *CommandOptions) (bool, error) { if !opts.Verify { - return nil + return false, nil } arch.mutex.Lock() defer arch.mutex.Unlock() lowest := uint32(0xffffffff) - for i, _ := range arch.expectLedgerHashes { + for i := range arch.expectLedgerHashes { if i < lowest { lowest = i } @@ -316,7 +317,7 @@ func (arch *Archive) ReportInvalid(opts *CommandOptions) error { totalInvalid += arch.invalidTxResultSets if totalInvalid != 0 { - return fmt.Errorf("Detected %d objects with unexpected hashes", totalInvalid) + return true, fmt.Errorf("Detected %d objects with unexpected hashes", totalInvalid) } - return nil + return false, nil } diff --git a/historyarchive/xdrstream.go b/historyarchive/xdrstream.go new file mode 100644 index 0000000000..e0d9745585 --- /dev/null +++ b/historyarchive/xdrstream.go @@ -0,0 +1,220 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/binary" + "fmt" + "hash" + "io" + "io/ioutil" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type XdrStream struct { + buf bytes.Buffer + gzipReader *countReader + rdr *countReader + rdr2 io.ReadCloser + sha256Hash hash.Hash + + validateHash bool + expectedHash [sha256.Size]byte + xdrDecoder *xdr.BytesDecoder +} + +type countReader struct { + io.ReadCloser + bytesRead int64 +} + +func (c *countReader) Read(p []byte) (int, error) { + n, err := c.ReadCloser.Read(p) + c.bytesRead += int64(n) + return n, err +} + +func newCountReader(r io.ReadCloser) *countReader { + return &countReader{ + r, 0, + } +} + +func NewXdrStream(in io.ReadCloser) *XdrStream { + // We write all we read from in to sha256Hash that can be later + // compared with `expectedHash` using SetExpectedHash and Close. + sha256Hash := sha256.New() + teeReader := io.TeeReader(in, sha256Hash) + return &XdrStream{ + rdr: newCountReader( + struct { + io.Reader + io.Closer + }{bufio.NewReader(teeReader), in}, + ), + sha256Hash: sha256Hash, + xdrDecoder: xdr.NewBytesDecoder(), + } +} + +func NewXdrGzStream(in io.ReadCloser) (*XdrStream, error) { + gzipCountReader := newCountReader(in) + rdr, err := gzip.NewReader(bufReadCloser(gzipCountReader)) + if err != nil { + in.Close() + return nil, err + } + + stream := NewXdrStream(rdr) + stream.rdr2 = in + stream.gzipReader = gzipCountReader + return stream, nil +} + +func HashXdr(x interface{}) (Hash, error) { + var msg bytes.Buffer + _, err := xdr.Marshal(&msg, x) + if err != nil { + var zero Hash + return zero, err + } + return Hash(sha256.Sum256(msg.Bytes())), nil +} + +// SetExpectedHash sets expected hash that will be checked in Close(). +// This (obviously) needs to be set before Close() is called. +func (x *XdrStream) SetExpectedHash(hash [sha256.Size]byte) { + x.validateHash = true + x.expectedHash = hash +} + +// ExpectedHash returns the expected hash and a boolean indicating if the +// expected hash was set +func (x *XdrStream) ExpectedHash() ([sha256.Size]byte, bool) { + return x.expectedHash, x.validateHash +} + +// Close closes all internal readers and checks if the expected hash +// (if set by SetExpectedHash) matches the actual hash of the stream. +func (x *XdrStream) Close() error { + if x.validateHash { + // Read all remaining data from rdr + _, err := io.Copy(ioutil.Discard, x.rdr) + if err != nil { + // close the internal readers to avoid memory leaks + x.closeReaders() + return errors.Wrap(err, "Error reading remaining bytes from rdr") + } + + actualHash := x.sha256Hash.Sum([]byte{}) + + if !bytes.Equal(x.expectedHash[:], actualHash[:]) { + // close the internal readers to avoid memory leaks + x.closeReaders() + return errors.New("Stream hash does not match expected hash!") + } + } + + return x.closeReaders() +} + +func (x *XdrStream) closeReaders() error { + var err error + + if x.rdr != nil { + if err2 := x.rdr.Close(); err2 != nil { + err = err2 + } + } + if x.rdr2 != nil { + if err2 := x.rdr2.Close(); err2 != nil { + err = err2 + } + } + if x.gzipReader != nil { + if err2 := x.gzipReader.Close(); err2 != nil { + err = err2 + } + } + + return err +} + +func (x *XdrStream) ReadOne(in xdr.DecoderFrom) error { + var nbytes uint32 + err := binary.Read(x.rdr, binary.BigEndian, &nbytes) + if err != nil { + x.rdr.Close() + if err == io.EOF { + // Do not wrap io.EOF + return err + } + return errors.Wrap(err, "binary.Read error") + } + nbytes &= 0x7fffffff + x.buf.Reset() + if nbytes == 0 { + x.rdr.Close() + return io.EOF + } + x.buf.Grow(int(nbytes)) + read, err := x.buf.ReadFrom(io.LimitReader(x.rdr, int64(nbytes))) + if err != nil { + x.rdr.Close() + return err + } + if read != int64(nbytes) { + x.rdr.Close() + return errors.New("Read wrong number of bytes from XDR") + } + + readi, err := x.xdrDecoder.DecodeBytes(in, x.buf.Bytes()) + if err != nil { + x.rdr.Close() + return err + } + if int64(readi) != int64(nbytes) { + return fmt.Errorf("Unmarshalled %d bytes from XDR, expected %d)", + readi, nbytes) + } + return nil +} + +// BytesRead returns the number of bytes read in the stream +func (x *XdrStream) BytesRead() int64 { + return x.rdr.bytesRead +} + +// GzipBytesRead returns the number of gzip bytes read in the stream. +// Returns -1 if underlying reader is not gzipped. +func (x *XdrStream) GzipBytesRead() int64 { + if x.gzipReader == nil { + return -1 + } + return x.gzipReader.bytesRead +} + +// Discard removes n bytes from the stream +func (x *XdrStream) Discard(n int64) (int64, error) { + return io.CopyN(ioutil.Discard, x.rdr, n) +} + +func CreateXdrStream(entries ...xdr.BucketEntry) *XdrStream { + b := &bytes.Buffer{} + for _, e := range entries { + err := xdr.MarshalFramed(b, e) + if err != nil { + panic(err) + } + } + + return NewXdrStream(ioutil.NopCloser(b)) +} diff --git a/historyarchive/xdrstream_test.go b/historyarchive/xdrstream_test.go new file mode 100644 index 0000000000..76323edb34 --- /dev/null +++ b/historyarchive/xdrstream_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package historyarchive + +import ( + "bytes" + "crypto/sha256" + "io" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestXdrStreamHash(t *testing.T) { + bucketEntry := xdr.BucketEntry{ + Type: xdr.BucketEntryTypeLiveentry, + LiveEntry: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: xdr.Int64(200000000), + }, + }, + }, + } + stream := CreateXdrStream(bucketEntry) + + // Stream hash should be equal sha256 hash of concatenation of: + // - uint32 representing the number of bytes of a structure, + // - xdr-encoded `BucketEntry` above. + b := &bytes.Buffer{} + err := xdr.MarshalFramed(b, bucketEntry) + require.NoError(t, err) + + expectedHash := sha256.Sum256(b.Bytes()) + stream.SetExpectedHash(expectedHash) + + var readBucketEntry xdr.BucketEntry + err = stream.ReadOne(&readBucketEntry) + require.NoError(t, err) + assert.Equal(t, bucketEntry, readBucketEntry) + + assert.Equal(t, int(stream.BytesRead()), b.Len()) + + assert.Equal(t, io.EOF, stream.ReadOne(&readBucketEntry)) + assert.Equal(t, int(stream.BytesRead()), b.Len()) + + assert.NoError(t, stream.Close()) +} + +func TestXdrStreamDiscard(t *testing.T) { + firstEntry := xdr.BucketEntry{ + Type: xdr.BucketEntryTypeLiveentry, + LiveEntry: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: xdr.Int64(200000000), + }, + }, + }, + } + secondEntry := xdr.BucketEntry{ + Type: xdr.BucketEntryTypeLiveentry, + LiveEntry: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"), + Balance: xdr.Int64(100000000), + }, + }, + }, + } + + fullStream := CreateXdrStream(firstEntry, secondEntry) + b := &bytes.Buffer{} + require.NoError(t, xdr.MarshalFramed(b, firstEntry)) + require.NoError(t, xdr.MarshalFramed(b, secondEntry)) + expectedHash := sha256.Sum256(b.Bytes()) + fullStream.SetExpectedHash(expectedHash) + + discardStream := CreateXdrStream(firstEntry, secondEntry) + discardStream.SetExpectedHash(expectedHash) + + var readBucketEntry xdr.BucketEntry + require.NoError(t, fullStream.ReadOne(&readBucketEntry)) + assert.Equal(t, firstEntry, readBucketEntry) + + skipAmount := fullStream.BytesRead() + bytesRead, err := discardStream.Discard(skipAmount) + require.NoError(t, err) + assert.Equal(t, bytesRead, skipAmount) + + require.NoError(t, fullStream.ReadOne(&readBucketEntry)) + assert.Equal(t, secondEntry, readBucketEntry) + + require.NoError(t, discardStream.ReadOne(&readBucketEntry)) + assert.Equal(t, secondEntry, readBucketEntry) + + assert.Equal(t, int(fullStream.BytesRead()), b.Len()) + assert.Equal(t, fullStream.BytesRead(), discardStream.BytesRead()) + + assert.Equal(t, io.EOF, fullStream.ReadOne(&readBucketEntry)) + assert.Equal(t, io.EOF, discardStream.ReadOne(&readBucketEntry)) + + assert.Equal(t, int(fullStream.BytesRead()), b.Len()) + assert.Equal(t, fullStream.BytesRead(), discardStream.BytesRead()) + + assert.NoError(t, discardStream.Close()) + assert.NoError(t, fullStream.Close()) +} diff --git a/ingest/CHANGELOG.md b/ingest/CHANGELOG.md new file mode 100644 index 0000000000..edd428a62f --- /dev/null +++ b/ingest/CHANGELOG.md @@ -0,0 +1,38 @@ +# Changelog + +All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). + + +## Unreleased + +* Let filewatcher use binary hash instead of timestamp to detect core version update [4050](https://github.com/stellar/go/pull/4050) + +### New Features +* **Performance improvement**: the Captive Core backend now reuses bucket files whenever it finds existing ones in the corresponding `--captive-core-storage-path` (introduced in [v2.0](#v2.0.0)) rather than generating a one-time temporary sub-directory ([#3670](https://github.com/stellar/go/pull/3670)). Note that taking advantage of this feature requires [Stellar-Core v17.1.0](https://github.com/stellar/stellar-core/releases/tag/v17.1.0) or later. + +### Bug Fixes +* The Stellar Core runner now parses logs from its underlying subprocess better [#3746](https://github.com/stellar/go/pull/3746). + + +## v2.0.0 + +This release is related to the release of [Horizon v2.3.0](https://github.com/stellar/go/releases/tag/horizon-v2.3.0) and introduces some breaking changes to the `ingest` package for those building their own tools. + +### Breaking Changes +- Many APIs now require a `context.Context` parameter, allowing you to interact with the backends and control calls in a more finely-controlled manner. This includes the readers (`ChangeReader` et al.) as well as the backends themselves (`CaptiveStellarCore` et al.). + +- **`GetLedger()` always blocks** now, even for an `UnboundedRange`. + +- The `CaptiveCoreBackend` now requires an all-inclusive `CaptiveCoreToml` object to configure Captive Core rather than an assortment of individual parameters. This object can be built from a TOML file (see `NewCaptiveCoreTomlFromFile`) or from parameters (see `NewCaptiveCoreToml`) as was done before. + +- `LedgerTransaction.Meta` has been renamed to `UnsafeMeta` to highlight that users should be careful when interacting with it. + +- Remote Captive Core no longer includes the `present` field in the ledger response JSON. + +### New Features +- `NewLedgerChangeReaderFromLedgerCloseMeta` and `NewLedgerTransactionReaderFromLedgerCloseMeta` are new ways to construct readers from a particular single ledger. + +### Other Changes +- The remote Captive Core client timeout has doubled. + +- Captive Core now creates a temporary directory (`captive-core-...`) in the specified storage path (current directory by default) that it cleans it up on shutdown rather than in the OS's temp directory. diff --git a/ingest/README.md b/ingest/README.md new file mode 100644 index 0000000000..e07dd64ec8 --- /dev/null +++ b/ingest/README.md @@ -0,0 +1,405 @@ +# Ingestion Library +The `ingest` package provides primitives for building custom ingestion engines. + +Very often, developers need features that are outside of Horizon's scope. While it provides APIs for building the most common applications, it's not possible to add all possible features. That's why this package was created. + + + +# Architecture +From a high level, the ingestion library is broken down into a few modular components: + +``` + [ Processors ] + | + / \ + / \ + / \ + [Change] [Transaction] + | | + |---+---| | + Checkpoint Ledger Ledger + Change Change Transaction + Reader Reader Reader + + [ Ledger Backend ] + | + one of... + | + --------|-----+------|----------| + | | | | + Captive Database Remote etc. + Core Captive + Core +``` + +This is described in a little more detail in [`doc.go`](./doc.go), its accompanying examples, the documentation within this package, and the rest of this tutorial. + + + +# Hello, World! +As is tradition, we'll start with a simplistic example that ingests a single ledger from the network. We're immediately faced with a decision, though: _What's the backend?_ We'll use a **Captive Stellar-Core backend** in this example because it requires (little-to-)no setup, but there are couple of alternatives available. You could also use: + + - a **database** (via `NewDatabaseBackend()`), which would ingest ledgers stored in a Stellar-Core database, or + + - a **remote Captive Core** instance (via `NewRemoteCaptive()`), which works much like Captive Core, but points to an instance that isn't (necessarily) running locally. + +With that in mind, here's a minimalist example of the ingestion library: + +```go +package main + +import ( + "context" + "fmt" + + backends "github.com/stellar/go/ingest/ledgerbackend" +) + +func main() { + ctx := context.Background() + backend, err := backends.NewCaptive(config) + panicIf(err) + defer backend.Close() + + // Prepare a single ledger to be ingested, + err = backend.PrepareRange(ctx, backends.BoundedRange(123456, 123456)) + panicIf(err) + + // then retrieve it: + ledger, err := backend.GetLedger(ctx, 123456) + panicIf(err) + + // Now `ledger` is a raw `xdr.LedgerCloseMeta` object containing the + // transactions contained within this ledger. + fmt.Printf("\nHello, Sequence %d.\n", ledger.LedgerSequence()) +} +``` + +_(The `panicIf` function is defined in the [footnotes](#footnotes); it's used here for error-checking brevity.)_ + +Notice that the mysterious `config` variable above isn't defined. This will be environment-specific and users should consult both the [Captive Core documentation](../../services/horizon/internal/docs/captive_core.md) and the [config docs](./ledgerbackend/captive_core_backend.go#L96-L125) directly for more details if they want to use this backend in production. For now, though, we'll have some hardcoded values for the SDF testnet: + +```go +networkPassphrase := "Test SDF Network ; September 2015" +captiveCoreToml, err := ledgerbackend.NewCaptiveCoreToml( + ledgerbackend.CaptiveCoreTomlParams{ + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: []string{ + "https://history.stellar.org/prd/core-testnet/core_testnet_001", + }, + }) +panicIf(err) + +config := ledgerbackend.CaptiveCoreConfig{ + // Change these based on your environment: + BinaryPath: "/usr/bin/stellar-core", + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: archiveURLs, + Toml: captiveCoreToml, +} +``` + +(Again, see the format of the stub file, etc. in the linked docs.) + +Running this should dump a ton of logs while Captive Core boots up, downloads a history archive, and ultimately pops up the ledger sequence number we ingested: + +``` +$ go run ./example.go +INFO[...] default: Config from /tmp/captive-stellar-core365405852/stellar-core.conf pid=20574 +INFO[...] default: RUN_STANDALONE enabled in configuration file - node will not function properly with most networks pid=20574 +INFO[...] default: Generated QUORUM_SET: { pid=20574 +INFO[...] "t" : 2, pid=20574 +INFO[...] "v" : [ "sdf_testnet_2", "sdf_testnet_3", "sdf_testnet_1" ] pid=20574 +INFO[...] } pid=20574 +INFO[...] default: Assigning calculated value of 1 to FAILURE_SAFETY pid=20574 +INFO[...] Database: Connecting to: sqlite3://:memory: pid=20574 +INFO[...] SCP: LocalNode::LocalNode@GCVAA qSet: 59d361 pid=20574 +INFO[...] default: * pid=20574 +INFO[...] default: * The database has been initialized pid=20574 +INFO[...] default: * pid=20574 +INFO[...] Database: Applying DB schema upgrade to version 13 pid=20574 +INFO[...] Database: Adding column 'ledgerext' to table 'accounts' pid=20574 +... +INFO[...] Ledger: Established genesis ledger, closing pid=20574 +INFO[...] Ledger: Root account seed: SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4 pid=20574 +INFO[...] default: * pid=20574 +INFO[...] default: * The next launch will catchup from the network afresh. pid=20574 +INFO[...] default: * pid=20574 +INFO[...] default: Application destructing pid=20574 +INFO[...] default: Application destroyed pid=20574 +... +INFO[...] History: Starting catchup with configuration: pid=20574 +INFO[...] lastClosedLedger: 1 pid=20574 +INFO[...] toLedger: 123457 pid=20574 +INFO[...] count: 2 pid=20574 +INFO[...] History: Catching up to ledger 123457: Downloading state file history/00/01/e2/history-0001e27f.json for ledger 123519 pid=20574 +... +INFO[...] History: Catching up to ledger 123457: downloading and verifying buckets: 16/17 (94%) pid=20574 +INFO[...] History: Verifying bucket d4db982884941c0b82422996e26ae0778b4a85385ef657ffacee9b11adf72882 pid=20574 +INFO[...] History: Catching up to ledger 123457: Succeeded: download-verify-buckets : 17/17 children completed pid=20574 +INFO[...] History: Applying buckets pid=20574 +INFO[...] History: Catching up to ledger 123457: Applying buckets 0%. Currently on level 9 pid=20574 +... +INFO[...] Bucket: Bucket-apply: 158366 entries in 17.12MB/17.12MB in 17/17 files (100%) pid=20574 +INFO[...] History: Catching up to ledger 123457: Applying buckets 100%. Currently on level 0 pid=20574 +INFO[...] History: ApplyBuckets : done, restarting merges pid=20574 +INFO[...] History: Catching up to ledger 123457: Succeeded: download-verify-apply-buckets pid=20574 +INFO[...] History: Downloading, unzipping and applying transactions for checkpoint 123519 pid=20574 +INFO[...] History: Catching up to ledger 123457: Download & apply checkpoints: num checkpoints left to apply:1 (0% done) pid=20574 + +Hello, Ledger #123456. +``` + +There's obviously much, *much* more we can do with the ingestion library. Let's work through some more comprehensive examples. + + + +# **Example**: Ledger Statistics +In this section, we'll demonstrate how to combine a backend with a reader to actually learn something meaningful about the Stellar network. Again, we'll use a specific backend here (Captive Core, again), but the processing can be done with any of them. + +More specifically, we're going to analyze the ledgers and track some statistics about the success/failure of transactions and their relative operations using `LedgerTransactionReader`. While this is technically doable by manipulating the Horizon API and some fancy JSON parsing, it serves as a useful yet concise demonstration of the ingestion library's features. + + +## Preamble +Let's get the boilerplate out of the way first. Again, we presume `config` is some sensible Captive Core configuration. + +```go +package main + +import ( + "context" + "fmt" + "io" + + "github.com/sirupsen/logrus" + "github.com/stellar/go/ingest" + backends "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/log" +) + +func statistics() { + ctx := context.Background() + // Only log errors from the backend to keep output cleaner. + lg := log.New() + lg.SetLevel(logrus.ErrorLevel) + config.Log = lg + + backend, err := backends.NewCaptive(config) + panicIf(err) + defer backend.Close() + + // ... +``` + +## Reading Transactions +Now, let's identify a range of ledgers we wish to process. For simplicity, let's work on the first 10,000 ledgers on the network. + +```go + // Prepare a range to be ingested: + var startingSeq uint32 = 2 // can't start with genesis ledger + var ledgersToRead uint32 = 10000 + + fmt.Printf("Preparing range (%d ledgers)...\n", ledgersToRead) + ledgerRange := backends.BoundedRange(startingSeq, startingSeq+ledgersToRead) + err = backend.PrepareRange(ctx, ledgerRange) + panicIf(err) +``` + +This part will take a bit of time as Captive Core (or whatever backend) processes these ledgers and prepares them for ingestion. + +Now, we'll actually use a `LedgerTransactionReader` object to use the backend and read the transactions ledger by ledger. It takes the backend, the network passphrase, and the ledger you'd like to process as parameters, giving you back an object that returns raw transaction objects row by row. + +```go + // These are the statistics that we're tracking. + var successfulTransactions, failedTransactions int + var operationsInSuccessful, operationsInFailed int + + for seq := startingSeq; seq <= startingSeq+ledgersToRead; seq++ { + fmt.Printf("Processed ledger %d...\r", seq) + + txReader, err := ingest.NewLedgerTransactionReader( + ctx, backend, config.NetworkPassphrase, seq, + ) + panicIf(err) + defer txReader.Close() +``` + +Each ledger likely has many transactions, so we nest in another loop to process them all: + +```go + // Read each transaction within the ledger, extract its operations, and + // accumulate the statistics we're interested in. + for { + tx, err := txReader.Read() + if err == io.EOF { + break + } + panicIf(err) + + envelope := tx.Envelope + operationCount := len(envelope.Operations()) + if tx.Result.Successful() { + successfulTransactions++ + operationsInSuccessful += operationCount + } else { + failedTransactions++ + operationsInFailed += operationCount + } + } + } // outer loop +``` + +And that's it! We can print the statistics out of interest: + +```go + fmt.Println("\nDone. Results:") + fmt.Printf(" - total transactions: %d\n", successfulTransactions+failedTransactions) + fmt.Printf(" - succeeded / failed: %d / %d\n", successfulTransactions, failedTransactions) + fmt.Printf(" - total operations: %d\n", operationsInSuccessful+operationsInFailed) + fmt.Printf(" - succeeded / failed: %d / %d\n", operationsInSuccessful, operationsInFailed) +} // end of main +``` + +As of this writing, the stats are as follows: + + Results: + - total transactions: 24159 + - succeeded / failed: 16037 / 8122 + - total operations: 33845 + - succeeded / failed: 25387 / 8458 + +The full, runnable example is available [here](./example_statistics.go). + + +# **Example**: Feature Popularity +In this example, we'll leverage the `CheckpointChangeReader` to determine the popularity of a feature introduced in [Protocol 15](https://www.stellar.org/blog/protocol-14-improvements): claimable balances. Specifically, we'll be investigating how many claimable balances were created in an arbitrary ledger range. + +Let's begin. As before, there's a bit of boilerplate necessary. There's only a single additional import necessary relative to the [previous Preamble](#preamble). Since we're working with checkpoint ledgers, history archives come into play: + +```go +import "github.com/stellar/go/historyarchive" +``` + +This time, we don't need a `LedgerBackend` instance whatsoever. The ledger changes we want to process will be fed into the reader through a different means. In our example, the history archives have the ~droids~ ledgers that we are looking for. + + +## History Archive Connections +First thing's first: we need to establish a connection to a history archive. + +```go + // Open a history archive using our existing configuration details. + historyArchive, err := historyarchive.Connect( + config.HistoryArchiveURLs[0], // assumes a CaptiveCoreConfig + historyarchive.ConnectOptions{ + NetworkPassphrase: config.NetworkPassphrase, + S3Region: "us-west-1", + UnsignedRequests: false, + }, + ) + panicIf(err) +``` + +## Tracking Changes +Each history archive contains the current cumulative state of the entire network. + +Now we can use the history archive to actually read in all of the changes that have accumulated in the entire network by a particular checkpoint. + +```go + // First, we need to establish a safe fallback in case of any problems + // during the history archive download+processing, so we'll set a 30-second + // timeout. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + reader, err := ingest.NewCheckpointChangeReader(ctx, historyArchive, 123455) + panicIf(err) +``` + +In our examples, we refer to the testnet, whose archives are much smaller. When using the pubnet, a 30 *minute* timeout may be more appropriate (depending on system specs): Horizon takes around 15-20 minutes to process pubnet history archives. + +By default, checkpoints occur every 64 ledgers (see `historyarchive.ConnectOptions` for changing this). More specifically, given ledger `n`, if `n+1 mod 64 == 0`, then `n` is a checkpoint ledger. Alternatively, this is when `n*64 - 1` for `n = 1, 2, 3, ...` and so on. This is true above for `n == 123455`. + +Since history archives store global cumulative state, our `ChangeReader` will report every entry as being "new", reading out a list of *all* ledger entries. We can then process them and establish how many claimable balances have been created in the testnet's lifetime: + +```go + entries, newCBs := 0, 0 + for { + entry, err := reader.Read() + if err == io.EOF { + break + } + panicIf(err) + + entries++ + + switch entry.Type { + case xdr.LedgerEntryTypeClaimableBalance: + newCBs++ + // these are included for completeness of the demonstration + case xdr.LedgerEntryTypeAccount: + case xdr.LedgerEntryTypeData: + case xdr.LedgerEntryTypeTrustline: + case xdr.LedgerEntryTypeOffer: + default: + panic(fmt.Errorf("Unknown type: %+v", entry.Type)) + } + } + + fmt.Printf("%d/%d entries were claimable balances\n", newCBs, entries) +} // end of main() +``` + + + +# Snippets +This section outlines a brief collection of common things you may want to do with the library. We assume a very generic `backend` variable where necessary that is one of the aforementioned `LedgerBackend` instances to avoid boilerplate. + + +### Controlling `LedgerBackend` log verbosity +Certain backends (like Captive Core) can be very noisy; they will log to standard output by default at the "Info" level. + +You can suppress many logs by changing the level to only print warnings and errors: + +```go +package main + +import ( + ingest "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/log" + "github.com/sirupsen/logrus" +) + +func main() { + lg := log.New() + lg.SetLevel(logrus.WarnLevel) + config.Log = lg // assume config is otherwise predefined + + backend, err := ingest.NewCaptive(config) // (or other backend) + // ... +} +``` + +Or even disable output entirely by redirecting to `ioutil.Discard`: + +```go +lg.Entry.Logger.Out = ioutil.Discard +``` + + +#### Footnotes + + 1. The minimalist error handler (if `panic`king counts as "handling" an error) `panicIf` used throughout this tutorial is defined simply as: + +```go +func panicIf(err error) { + if err != nil { + panic(err) + } +} +``` + + **Please don't use it in production code**; it's provided here for completeness, convenience, and brevity of examples. + + 2. Since the Stellar testnet undergoes periodic resets, the example outputs from various sections (especially regarding network statistics) will not always be accurate. + + 3. It's worth noting that even though the [second example](example-tracking-feature-popularity) could *also* be done by using the `LedgerTransactionReader` and inspecting the individual operations, that'd be bit redundant as far as examples go. diff --git a/ingest/change.go b/ingest/change.go new file mode 100644 index 0000000000..11877cde61 --- /dev/null +++ b/ingest/change.go @@ -0,0 +1,247 @@ +package ingest + +import ( + "bytes" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// Change is a developer friendly representation of LedgerEntryChanges. +// It also provides some helper functions to quickly check if a given +// change has occurred in an entry. +// +// If an entry is created: Pre is nil and Post is not nil. +// If an entry is updated: Pre is not nil and Post is not nil. +// If an entry is removed: Pre is not nil and Post is nil. +type Change struct { + Type xdr.LedgerEntryType + Pre *xdr.LedgerEntry + Post *xdr.LedgerEntry +} + +// GetChangesFromLedgerEntryChanges transforms LedgerEntryChanges to []Change. +// Each `update` and `removed` is preceded with `state` and `create` changes +// are alone, without `state`. The transformation we're doing is to move each +// change (state/update, state/removed or create) to an array of pre/post pairs. +// Then: +// - for create, pre is null and post is a new entry, +// - for update, pre is previous state and post is the current state, +// - for removed, pre is previous state and post is null. +// +// stellar-core source: +// https://github.com/stellar/stellar-core/blob/e584b43/src/ledger/LedgerTxn.cpp#L582 +func GetChangesFromLedgerEntryChanges(ledgerEntryChanges xdr.LedgerEntryChanges) []Change { + changes := []Change{} + + for i, entryChange := range ledgerEntryChanges { + switch entryChange.Type { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + created := entryChange.MustCreated() + changes = append(changes, Change{ + Type: created.Data.Type, + Pre: nil, + Post: &created, + }) + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + state := ledgerEntryChanges[i-1].MustState() + updated := entryChange.MustUpdated() + changes = append(changes, Change{ + Type: state.Data.Type, + Pre: &state, + Post: &updated, + }) + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + state := ledgerEntryChanges[i-1].MustState() + changes = append(changes, Change{ + Type: state.Data.Type, + Pre: &state, + Post: nil, + }) + case xdr.LedgerEntryChangeTypeLedgerEntryState: + continue + default: + panic("Invalid LedgerEntryChangeType") + } + } + + return changes +} + +// LedgerEntryChangeType returns type in terms of LedgerEntryChangeType. +func (c *Change) LedgerEntryChangeType() xdr.LedgerEntryChangeType { + switch { + case c.Pre == nil && c.Post != nil: + return xdr.LedgerEntryChangeTypeLedgerEntryCreated + case c.Pre != nil && c.Post == nil: + return xdr.LedgerEntryChangeTypeLedgerEntryRemoved + case c.Pre != nil && c.Post != nil: + return xdr.LedgerEntryChangeTypeLedgerEntryUpdated + default: + panic("Invalid state of Change (Pre == nil && Post == nil)") + } +} + +// getLiquidityPool gets the most recent state of the LiquidityPool that exists or existed. +func (c *Change) getLiquidityPool() (*xdr.LiquidityPoolEntry, error) { + var entry *xdr.LiquidityPoolEntry + if c.Pre != nil { + entry = c.Pre.Data.LiquidityPool + } + if c.Post != nil { + entry = c.Post.Data.LiquidityPool + } + if entry == nil { + return &xdr.LiquidityPoolEntry{}, errors.New("this change does not include a liquidity pool") + } + return entry, nil +} + +// GetLiquidityPoolType returns the liquidity pool type. +func (c *Change) GetLiquidityPoolType() (xdr.LiquidityPoolType, error) { + lp, err := c.getLiquidityPool() + if err != nil { + return xdr.LiquidityPoolType(0), err + } + return lp.Body.Type, nil +} + +// AccountChangedExceptSigners returns true if account has changed WITHOUT +// checking the signers (except master key weight!). In other words, if the only +// change is connected to signers, this function will return false. +func (c *Change) AccountChangedExceptSigners() (bool, error) { + if c.Type != xdr.LedgerEntryTypeAccount { + panic("This should not be called on changes other than Account changes") + } + + // New account + if c.Pre == nil { + return true, nil + } + + // Account merged + // c.Pre != nil at this point. + if c.Post == nil { + return true, nil + } + + // c.Pre != nil && c.Post != nil at this point. + if c.Pre.LastModifiedLedgerSeq != c.Post.LastModifiedLedgerSeq { + return true, nil + } + + // Don't use short assignment statement (:=) to ensure variables below + // are not pointers (if `xdr` package changes in the future)! + var preAccountEntry, postAccountEntry xdr.AccountEntry + preAccountEntry = c.Pre.Data.MustAccount() + postAccountEntry = c.Post.Data.MustAccount() + + // preAccountEntry and postAccountEntry are copies so it's fine to + // modify them here, EXCEPT pointers inside them! + if preAccountEntry.Ext.V == 0 { + preAccountEntry.Ext.V = 1 + preAccountEntry.Ext.V1 = &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: 0, + Selling: 0, + }, + } + } + + preAccountEntry.Signers = nil + + if postAccountEntry.Ext.V == 0 { + postAccountEntry.Ext.V = 1 + postAccountEntry.Ext.V1 = &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: 0, + Selling: 0, + }, + } + } + + postAccountEntry.Signers = nil + + preBinary, err := preAccountEntry.MarshalBinary() + if err != nil { + return false, errors.Wrap(err, "Error running preAccountEntry.MarshalBinary") + } + + postBinary, err := postAccountEntry.MarshalBinary() + if err != nil { + return false, errors.Wrap(err, "Error running postAccountEntry.MarshalBinary") + } + + return !bytes.Equal(preBinary, postBinary), nil +} + +// AccountSignersChanged returns true if account signers have changed. +// Notice: this will return true on master key changes too! +func (c *Change) AccountSignersChanged() bool { + if c.Type != xdr.LedgerEntryTypeAccount { + panic("This should not be called on changes other than Account changes") + } + + // New account so new master key (which is also a signer) + if c.Pre == nil { + return true + } + + // Account merged. Account being merge can still have signers. + // c.Pre != nil at this point. + if c.Post == nil { + return true + } + + // c.Pre != nil && c.Post != nil at this point. + preAccountEntry := c.Pre.Data.MustAccount() + postAccountEntry := c.Post.Data.MustAccount() + + preSigners := preAccountEntry.SignerSummary() + postSigners := postAccountEntry.SignerSummary() + + if len(preSigners) != len(postSigners) { + return true + } + + for postSigner, postWeight := range postSigners { + preWeight, exist := preSigners[postSigner] + if !exist { + return true + } + + if preWeight != postWeight { + return true + } + } + + preSignerSponsors := preAccountEntry.SignerSponsoringIDs() + postSignerSponsors := postAccountEntry.SignerSponsoringIDs() + + if len(preSignerSponsors) != len(postSignerSponsors) { + return true + } + + for i := 0; i < len(preSignerSponsors); i++ { + preSponsor := preSignerSponsors[i] + postSponsor := postSignerSponsors[i] + + if preSponsor == nil && postSponsor != nil { + return true + } else if preSponsor != nil && postSponsor == nil { + return true + } else if preSponsor != nil && postSponsor != nil { + preSponsorAccountID := xdr.AccountId(*preSponsor) + preSponsorAddress := preSponsorAccountID.Address() + + postSponsorAccountID := xdr.AccountId(*postSponsor) + postSponsorAddress := postSponsorAccountID.Address() + + if preSponsorAddress != postSponsorAddress { + return true + } + } + } + + return false +} diff --git a/ingest/change_compactor.go b/ingest/change_compactor.go new file mode 100644 index 0000000000..dfac201269 --- /dev/null +++ b/ingest/change_compactor.go @@ -0,0 +1,235 @@ +package ingest + +import ( + "encoding/base64" + "sync" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ChangeCompactor is a cache of ledger entry changes that squashes all +// changes within a single ledger. By doing this, it decreases number of DB +// queries sent to a DB to update the current state of the ledger. +// It has integrity checks built in so ex. removing an account that was +// previously removed returns an error. In such case verify.StateError is +// returned. +// +// It applies changes to the cache using the following algorithm: +// +// 1. If the change is CREATED it checks if any change connected to given entry +// is already in the cache. If not, it adds CREATED change. Otherwise, if +// existing change is: +// a. CREATED it returns error because we can't add an entry that already +// exists. +// b. UPDATED it returns error because we can't add an entry that already +// exists. +// c. REMOVED it means that due to previous transitions we want to remove +// this from a DB what means that it already exists in a DB so we need to +// update the type of change to UPDATED. +// 2. If the change is UPDATE it checks if any change connected to given entry +// is already in the cache. If not, it adds UPDATE change. Otherwise, if +// existing change is: +// a. CREATED it means that due to previous transitions we want to create +// this in a DB what means that it doesn't exist in a DB so we need to +// update the entry but stay with CREATED type. +// b. UPDATED we simply update it with the new value. +// c. REMOVED it means that at this point in the ledger the entry is removed +// so updating it returns an error. +// 3. If the change is REMOVE it checks if any change connected to given entry +// is already in the cache. If not, it adds REMOVE change. Otherwise, if +// existing change is: +// a. CREATED it means that due to previous transitions we want to create +// this in a DB what means that it doesn't exist in a DB. If it was +// created and removed in the same ledger it's a noop so we remove entry +// from the cache. +// b. UPDATED we simply update it to be a REMOVE change because the UPDATE +// change means the entry exists in a DB. +// c. REMOVED it returns error because we can't remove an entry that was +// already removed. +type ChangeCompactor struct { + // ledger key => Change + cache map[string]Change + mutex sync.Mutex + encodingBuffer *xdr.EncodingBuffer +} + +// NewChangeCompactor returns a new ChangeCompactor. +func NewChangeCompactor() *ChangeCompactor { + return &ChangeCompactor{ + cache: make(map[string]Change), + encodingBuffer: xdr.NewEncodingBuffer(), + } +} + +// AddChange adds a change to ChangeCompactor. All changes are stored +// in memory. To get the final, squashed changes call GetChanges. +// +// Please note that the current ledger capacity in pubnet (max 1000 ops/ledger) +// makes ChangeCompactor safe to use in terms of memory usage. If the +// cache takes too much memory, you apply changes returned by GetChanges and +// create a new ChangeCompactor object to continue ingestion. +func (c *ChangeCompactor) AddChange(change Change) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + switch { + case change.Pre == nil && change.Post != nil: + return c.addCreatedChange(change) + case change.Pre != nil && change.Post != nil: + return c.addUpdatedChange(change) + case change.Pre != nil && change.Post == nil: + return c.addRemovedChange(change) + default: + return errors.New("Unknown entry change state") + } +} + +// addCreatedChange adds a change to the cache, but returns an error if create +// change is unexpected. +func (c *ChangeCompactor) addCreatedChange(change Change) error { + // safe, since we later cast to string (causing a copy) + ledgerKey, err := c.encodingBuffer.UnsafeMarshalBinary(change.Post.LedgerKey()) + if err != nil { + return errors.Wrap(err, "Error MarshalBinary") + } + + ledgerKeyString := string(ledgerKey) + + existingChange, exist := c.cache[ledgerKeyString] + if !exist { + c.cache[ledgerKeyString] = change + return nil + } + + switch existingChange.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + return NewStateError(errors.Errorf( + "can't create an entry that already exists (ledger key = %s)", + base64.StdEncoding.EncodeToString(ledgerKey), + )) + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + return NewStateError(errors.Errorf( + "can't create an entry that already exists (ledger key = %s)", + base64.StdEncoding.EncodeToString(ledgerKey), + )) + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + // If existing type is removed it means that this entry does exist + // in a DB so we update entry change. + c.cache[ledgerKeyString] = Change{ + Type: change.Post.LedgerKey().Type, + Pre: existingChange.Pre, + Post: change.Post, + } + default: + return errors.Errorf("Unknown LedgerEntryChangeType: %d", existingChange.LedgerEntryChangeType()) + } + + return nil +} + +// addUpdatedChange adds a change to the cache, but returns an error if update +// change is unexpected. +func (c *ChangeCompactor) addUpdatedChange(change Change) error { + // safe, since we later cast to string (causing a copy) + ledgerKey, err := c.encodingBuffer.UnsafeMarshalBinary(change.Post.LedgerKey()) + if err != nil { + return errors.Wrap(err, "Error MarshalBinary") + } + + ledgerKeyString := string(ledgerKey) + + existingChange, exist := c.cache[ledgerKeyString] + if !exist { + c.cache[ledgerKeyString] = change + return nil + } + + switch existingChange.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + // If existing type is created it means that this entry does not + // exist in a DB so we update entry change. + c.cache[ledgerKeyString] = Change{ + Type: change.Post.LedgerKey().Type, + Pre: existingChange.Pre, // = nil + Post: change.Post, + } + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + c.cache[ledgerKeyString] = Change{ + Type: change.Post.LedgerKey().Type, + Pre: existingChange.Pre, + Post: change.Post, + } + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + return NewStateError(errors.Errorf( + "can't update an entry that was previously removed (ledger key = %s)", + base64.StdEncoding.EncodeToString(ledgerKey), + )) + default: + return errors.Errorf("Unknown LedgerEntryChangeType: %d", existingChange.Type) + } + + return nil +} + +// addRemovedChange adds a change to the cache, but returns an error if remove +// change is unexpected. +func (c *ChangeCompactor) addRemovedChange(change Change) error { + // safe, since we later cast to string (causing a copy) + ledgerKey, err := c.encodingBuffer.UnsafeMarshalBinary(change.Pre.LedgerKey()) + if err != nil { + return errors.Wrap(err, "Error MarshalBinary") + } + + ledgerKeyString := string(ledgerKey) + + existingChange, exist := c.cache[ledgerKeyString] + if !exist { + c.cache[ledgerKeyString] = change + return nil + } + + switch existingChange.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + // If existing type is created it means that this will be no op. + // Entry was created and is now removed in a single ledger. + delete(c.cache, ledgerKeyString) + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + c.cache[ledgerKeyString] = Change{ + Type: change.Pre.LedgerKey().Type, + Pre: existingChange.Pre, + Post: nil, + } + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + return NewStateError(errors.Errorf( + "can't remove an entry that was previously removed (ledger key = %s)", + base64.StdEncoding.EncodeToString(ledgerKey), + )) + default: + return errors.Errorf("Unknown LedgerEntryChangeType: %d", existingChange.Type) + } + + return nil +} + +// GetChanges returns a slice of Changes in the cache. The order of changes is +// random but each change is connected to a separate entry. +func (c *ChangeCompactor) GetChanges() []Change { + c.mutex.Lock() + defer c.mutex.Unlock() + + changes := make([]Change, 0, len(c.cache)) + + for _, entryChange := range c.cache { + changes = append(changes, entryChange) + } + + return changes +} + +// Size returns number of ledger entries in the cache. +func (c *ChangeCompactor) Size() int { + c.mutex.Lock() + defer c.mutex.Unlock() + return len(c.cache) +} diff --git a/ingest/change_compactor_test.go b/ingest/change_compactor_test.go new file mode 100644 index 0000000000..b861eb1352 --- /dev/null +++ b/ingest/change_compactor_test.go @@ -0,0 +1,398 @@ +package ingest + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +func TestChangeCompactorExistingCreated(t *testing.T) { + suite.Run(t, new(TestChangeCompactorExistingCreatedSuite)) +} + +// TestChangeCompactorExistingCreatedSuite tests transitions from +// existing CREATED state in the cache. +type TestChangeCompactorExistingCreatedSuite struct { + suite.Suite + cache *ChangeCompactor +} + +func (s *TestChangeCompactorExistingCreatedSuite) SetupTest() { + s.cache = NewChangeCompactor() + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryCreated) +} + +func (s *TestChangeCompactorExistingCreatedSuite) TestChangeCreated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().EqualError( + s.cache.AddChange(change), + "can't create an entry that already exists (ledger key = AAAAAAAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWg==)", + ) +} + +func (s *TestChangeCompactorExistingCreatedSuite) TestChangeUpdated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryCreated) +} + +func (s *TestChangeCompactorExistingCreatedSuite) TestChangeRemoved() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: nil, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 0) +} + +func TestLedgerEntryChangeCacheExistingUpdated(t *testing.T) { + suite.Run(t, new(TestChangeCompactorExistingUpdatedSuite)) +} + +// TestChangeCompactorExistingUpdatedSuite tests transitions from existing +// UPDATED state in the cache. +type TestChangeCompactorExistingUpdatedSuite struct { + suite.Suite + cache *ChangeCompactor +} + +func (s *TestChangeCompactorExistingUpdatedSuite) SetupTest() { + s.cache = NewChangeCompactor() + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryUpdated) +} + +func (s *TestChangeCompactorExistingUpdatedSuite) TestChangeCreated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().EqualError( + s.cache.AddChange(change), + "can't create an entry that already exists (ledger key = AAAAAAAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWg==)", + ) +} + +func (s *TestChangeCompactorExistingUpdatedSuite) TestChangeUpdated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryUpdated) + s.Assert().Equal(changes[0].Post.LastModifiedLedgerSeq, xdr.Uint32(12)) +} + +func (s *TestChangeCompactorExistingUpdatedSuite) TestChangeRemoved() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: nil, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryRemoved) +} + +func TestChangeCompactorExistingRemoved(t *testing.T) { + suite.Run(t, new(TestChangeCompactorExistingRemovedSuite)) +} + +// TestChangeCompactorExistingRemovedSuite tests transitions from existing +// REMOVED state in the cache. +type TestChangeCompactorExistingRemovedSuite struct { + suite.Suite + cache *ChangeCompactor +} + +func (s *TestChangeCompactorExistingRemovedSuite) SetupTest() { + s.cache = NewChangeCompactor() + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: nil, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryRemoved) +} + +func (s *TestChangeCompactorExistingRemovedSuite) TestChangeCreated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().NoError(s.cache.AddChange(change)) + changes := s.cache.GetChanges() + s.Assert().Len(changes, 1) + s.Assert().Equal(changes[0].LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryUpdated) + s.Assert().Equal(changes[0].Post.LastModifiedLedgerSeq, xdr.Uint32(12)) +} + +func (s *TestChangeCompactorExistingRemovedSuite) TestChangeUpdated() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + s.Assert().EqualError( + s.cache.AddChange(change), + "can't update an entry that was previously removed (ledger key = AAAAAAAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWg==)", + ) +} + +func (s *TestChangeCompactorExistingRemovedSuite) TestChangeRemoved() { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: nil, + } + s.Assert().EqualError( + s.cache.AddChange(change), + "can't remove an entry that was previously removed (ledger key = AAAAAAAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWg==)", + ) +} + +// TestChangeCompactorSquashMultiplePayments simulates sending multiple payments +// between two accounts. Ledger cache should squash multiple changes into just +// two. +// +// GAJ2T6NQ6TDZRVRSNWM3JC7L3TG4H7UBCVK3GUHKP3TQ5NQ3LM4JGBTJ sends money +// GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML receives money +func TestChangeCompactorSquashMultiplePayments(t *testing.T) { + cache := NewChangeCompactor() + + for i := 1; i <= 1000; i++ { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAJ2T6NQ6TDZRVRSNWM3JC7L3TG4H7UBCVK3GUHKP3TQ5NQ3LM4JGBTJ"), + Balance: xdr.Int64(2000 - i + 1), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAJ2T6NQ6TDZRVRSNWM3JC7L3TG4H7UBCVK3GUHKP3TQ5NQ3LM4JGBTJ"), + Balance: xdr.Int64(2000 - i), + }, + }, + }, + } + assert.NoError(t, cache.AddChange(change)) + + change = Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: xdr.Int64(2000 + i - 1), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 12, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: xdr.Int64(2000 + i), + }, + }, + }, + } + assert.NoError(t, cache.AddChange(change)) + } + + changes := cache.GetChanges() + assert.Len(t, changes, 2) + for _, change := range changes { + assert.Equal(t, change.LedgerEntryChangeType(), xdr.LedgerEntryChangeTypeLedgerEntryUpdated) + account := change.Post.Data.MustAccount() + switch account.AccountId.Address() { + case "GAJ2T6NQ6TDZRVRSNWM3JC7L3TG4H7UBCVK3GUHKP3TQ5NQ3LM4JGBTJ": + assert.Equal(t, account.Balance, xdr.Int64(1000)) + case "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML": + assert.Equal(t, account.Balance, xdr.Int64(3000)) + default: + assert.Fail(t, "Invalid account") + } + } +} diff --git a/ingest/checkpoint_change_reader.go b/ingest/checkpoint_change_reader.go new file mode 100644 index 0000000000..723d830367 --- /dev/null +++ b/ingest/checkpoint_change_reader.go @@ -0,0 +1,551 @@ +package ingest + +import ( + "context" + "io" + "sync" + "time" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// readResult is the result of reading a bucket value +type readResult struct { + entryChange xdr.LedgerEntryChange + e error +} + +// CheckpointChangeReader is a ChangeReader which returns Changes from a history archive +// snapshot. The Changes produced by a CheckpointChangeReader reflect the state of the Stellar +// network at a particular checkpoint ledger sequence. +type CheckpointChangeReader struct { + ctx context.Context + has *historyarchive.HistoryArchiveState + archive historyarchive.ArchiveInterface + tempStore tempSet + sequence uint32 + readChan chan readResult + streamOnce sync.Once + closeOnce sync.Once + done chan bool + + readBytesMutex sync.RWMutex + totalRead int64 + totalSize int64 + + encodingBuffer *xdr.EncodingBuffer + + // This should be set to true in tests only + disableBucketListHashValidation bool + sleep func(time.Duration) +} + +// Ensure CheckpointChangeReader implements ChangeReader +var _ ChangeReader = &CheckpointChangeReader{} + +// tempSet is an interface that must be implemented by stores that +// hold temporary set of objects for state reader. The implementation +// does not need to be thread-safe. +type tempSet interface { + Open() error + // Preload batch-loads keys into internal cache (if a store has any) to + // improve execution time by removing many round-trips. + Preload(keys []string) error + // Add adds key to the store. + Add(key string) error + // Exist returns value true if the value is found in the store. + // If the value has not been set, it should return false. + Exist(key string) (bool, error) + Close() error +} + +const ( + // maxStreamRetries defines how many times should we retry when there are errors in + // the xdr stream returned by GetXdrStreamForHash(). + maxStreamRetries = 3 + msrBufferSize = 50000 + + // preloadedEntries defines a number of bucket entries to preload from a + // bucket in a single run. This is done to allow preloading keys from + // temp set. + preloadedEntries = 20000 + + sleepDuration = time.Second +) + +// NewCheckpointChangeReader constructs a new CheckpointChangeReader instance. +// +// The ledger sequence must be a checkpoint ledger. By default (see +// `historyarchive.ConnectOptions.CheckpointFrequency` for configuring this), +// its next sequence number would have to be a multiple of 64, e.g. +// sequence=100031 is a checkpoint ledger, since: (100031+1) mod 64 == 0 +func NewCheckpointChangeReader( + ctx context.Context, + archive historyarchive.ArchiveInterface, + sequence uint32, +) (*CheckpointChangeReader, error) { + manager := archive.GetCheckpointManager() + + // The nth ledger is a checkpoint ledger iff: n+1 mod f == 0, where f is the + // checkpoint frequency (64 by default). + if !manager.IsCheckpoint(sequence) { + return nil, errors.Errorf( + "%d is not a checkpoint ledger, try %d or %d "+ + "(in general, try n where n+1 mod %d == 0)", + sequence, manager.PrevCheckpoint(sequence), + manager.NextCheckpoint(sequence), + manager.GetCheckpointFrequency()) + } + + has, err := archive.GetCheckpointHAS(sequence) + if err != nil { + return nil, errors.Wrapf(err, "unable to get checkpoint HAS at ledger sequence %d", sequence) + } + + tempStore := &memoryTempSet{} + err = tempStore.Open() + if err != nil { + return nil, errors.Wrap(err, "unable to get open temp store") + } + + return &CheckpointChangeReader{ + ctx: ctx, + has: &has, + archive: archive, + tempStore: tempStore, + sequence: sequence, + readChan: make(chan readResult, msrBufferSize), + streamOnce: sync.Once{}, + closeOnce: sync.Once{}, + done: make(chan bool), + encodingBuffer: xdr.NewEncodingBuffer(), + sleep: time.Sleep, + }, nil +} + +func (r *CheckpointChangeReader) bucketExists(hash historyarchive.Hash) (bool, error) { + duration := sleepDuration + var exists bool + var err error + for attempts := 0; ; attempts++ { + exists, err = r.archive.BucketExists(hash) + if err == nil { + return exists, nil + } + if attempts >= maxStreamRetries { + break + } + r.sleep(duration) + duration *= 2 + } + return exists, err +} + +// streamBuckets is internal method that streams buckets from the given HAS. +// +// Buckets should be processed from oldest to newest, `snap` and then `curr` at +// each level. The correct value of ledger entry is the latest seen +// `INITENTRY`/`LIVEENTRY` except the case when there's a `DEADENTRY` later +// which removes the entry. +// +// We can implement trivial algorithm (processing from oldest to newest buckets) +// but it requires to keep map of all entries in memory and stream what's left +// when all buckets are processed. +// +// However, we can modify this algorithm to work from newest to oldest ledgers: +// +// 1. For each `INITENTRY`/`LIVEENTRY` we check if we've seen the key before +// (stored in `tempStore`). If the key hasn't been seen, we write that bucket +// entry to the stream and add it to the `tempStore` (we don't mark `INITENTRY`, +// see the inline comment or CAP-20). +// 2. For each `DEADENTRY` we keep track of removed bucket entries in +// `tempStore` map. +// +// In such algorithm we just need to store a set of keys that require much less space. +// The memory requirements will be lowered when CAP-0020 is live and older buckets are +// rewritten. Then, we will only need to keep track of `DEADENTRY`. +func (r *CheckpointChangeReader) streamBuckets() { + defer func() { + err := r.tempStore.Close() + if err != nil { + r.readChan <- r.error(errors.New("Error closing tempStore")) + } + + r.closeOnce.Do(r.close) + close(r.readChan) + }() + + var buckets []historyarchive.Hash + for i := 0; i < len(r.has.CurrentBuckets); i++ { + b := r.has.CurrentBuckets[i] + for _, hashString := range []string{b.Curr, b.Snap} { + hash, err := historyarchive.DecodeHash(hashString) + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error decoding bucket hash")) + return + } + + if hash.IsZero() { + continue + } + + buckets = append(buckets, hash) + } + } + + for _, hash := range buckets { + exists, err := r.bucketExists(hash) + if err != nil { + r.readChan <- r.error( + errors.Wrapf(err, "error checking if bucket exists: %s", hash), + ) + return + } + + if !exists { + r.readChan <- r.error( + errors.Errorf("bucket hash does not exist: %s", hash), + ) + return + } + + size, err := r.archive.BucketSize(hash) + if err != nil { + r.readChan <- r.error( + errors.Wrapf(err, "error checking bucket size: %s", hash), + ) + return + } + + r.readBytesMutex.Lock() + r.totalSize += size + r.readBytesMutex.Unlock() + } + + for i, hash := range buckets { + oldestBucket := i == len(buckets)-1 + if shouldContinue := r.streamBucketContents(hash, oldestBucket); !shouldContinue { + break + } + } +} + +// readBucketEntry will attempt to read a bucket entry from `stream`. +// If any errors are encountered while reading from `stream`, readBucketEntry will +// retry the operation using a new *historyarchive.XdrStream. +// The total number of retries will not exceed `maxStreamRetries`. +func (r *CheckpointChangeReader) readBucketEntry(stream *historyarchive.XdrStream, hash historyarchive.Hash) ( + xdr.BucketEntry, + error, +) { + var entry xdr.BucketEntry + var err error + currentPosition := stream.BytesRead() + gzipCurrentPosition := stream.GzipBytesRead() + + for attempts := 0; ; attempts++ { + if r.ctx.Err() != nil { + err = r.ctx.Err() + break + } + if err == nil { + err = stream.ReadOne(&entry) + if err == nil || err == io.EOF { + r.readBytesMutex.Lock() + r.totalRead += stream.GzipBytesRead() - gzipCurrentPosition + r.readBytesMutex.Unlock() + break + } + } + if attempts >= maxStreamRetries { + break + } + + stream.Close() + + var retryStream *historyarchive.XdrStream + retryStream, err = r.newXDRStream(hash) + if err != nil { + err = errors.Wrap(err, "Error creating new xdr stream") + continue + } + + *stream = *retryStream + + _, err = stream.Discard(currentPosition) + if err != nil { + err = errors.Wrap(err, "Error discarding from xdr stream") + continue + } + } + + return entry, err +} + +func (r *CheckpointChangeReader) newXDRStream(hash historyarchive.Hash) ( + *historyarchive.XdrStream, + error, +) { + rdr, e := r.archive.GetXdrStreamForHash(hash) + if e == nil && !r.disableBucketListHashValidation { + // Calling SetExpectedHash will enable validation of the stream hash. If hashes + // don't match, rdr.Close() will return an error. + rdr.SetExpectedHash(hash) + } + + return rdr, e +} + +// streamBucketContents pushes value onto the read channel, returning false when the channel needs to be closed otherwise true +func (r *CheckpointChangeReader) streamBucketContents(hash historyarchive.Hash, oldestBucket bool) bool { + rdr, e := r.newXDRStream(hash) + if e != nil { + r.readChan <- r.error( + errors.Wrapf(e, "cannot get xdr stream for hash '%s'", hash.String()), + ) + return false + } + + defer func() { + err := rdr.Close() + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error closing xdr stream")) + // Stop streaming from the rest of the files. + r.Close() + } + }() + + // bucketProtocolVersion is a protocol version read from METAENTRY or 0 when no METAENTRY. + // No METAENTRY means that bucket originates from before protocol version 11. + bucketProtocolVersion := uint32(0) + + n := -1 + var batch []xdr.BucketEntry + lastBatch := false + +LoopBucketEntry: + for { + // Preload entries for faster retrieve from temp store. + if len(batch) == 0 { + if lastBatch { + return true + } + + preloadKeys := []string{} + + for i := 0; i < preloadedEntries; i++ { + var entry xdr.BucketEntry + entry, e = r.readBucketEntry(rdr, hash) + if e != nil { + if e == io.EOF { + if len(batch) == 0 { + // No entries loaded for this batch, nothing more to process + return true + } + lastBatch = true + break + } + r.readChan <- r.error( + errors.Wrapf(e, "Error on XDR record %d of hash '%s'", n, hash.String()), + ) + return false + } + + batch = append(batch, entry) + + // Generate a key + var key xdr.LedgerKey + + switch entry.Type { + case xdr.BucketEntryTypeLiveentry, xdr.BucketEntryTypeInitentry: + liveEntry := entry.MustLiveEntry() + key = liveEntry.LedgerKey() + case xdr.BucketEntryTypeDeadentry: + key = entry.MustDeadEntry() + default: + // No ledger key associated with this entry, continue to the next one. + continue + } + + // We're using compressed keys here + // safe, since we are converting to string right away + keyBytes, e := r.encodingBuffer.LedgerKeyUnsafeMarshalBinaryCompress(key) + if e != nil { + r.readChan <- r.error( + errors.Wrapf(e, "Error marshaling XDR record %d of hash '%s'", n, hash.String()), + ) + return false + } + + h := string(keyBytes) + preloadKeys = append(preloadKeys, h) + } + + err := r.tempStore.Preload(preloadKeys) + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error preloading keys")) + return false + } + } + + var entry xdr.BucketEntry + entry, batch = batch[0], batch[1:] + + n++ + + var key xdr.LedgerKey + + switch entry.Type { + case xdr.BucketEntryTypeMetaentry: + if n != 0 { + r.readChan <- r.error( + errors.Errorf( + "METAENTRY not the first entry (n=%d) in the bucket hash '%s'", + n, hash.String(), + ), + ) + return false + } + // We can't use MustMetaEntry() here. Check: + // https://github.com/golang/go/issues/32560 + bucketProtocolVersion = uint32(entry.MetaEntry.LedgerVersion) + continue LoopBucketEntry + case xdr.BucketEntryTypeLiveentry, xdr.BucketEntryTypeInitentry: + liveEntry := entry.MustLiveEntry() + key = liveEntry.LedgerKey() + case xdr.BucketEntryTypeDeadentry: + key = entry.MustDeadEntry() + default: + r.readChan <- r.error( + errors.Errorf("Unknown BucketEntryType=%d: %d@%s", entry.Type, n, hash.String()), + ) + return false + } + + // We're using compressed keys here + // Safe, since we are converting to string right away + keyBytes, e := r.encodingBuffer.LedgerKeyUnsafeMarshalBinaryCompress(key) + if e != nil { + r.readChan <- r.error( + errors.Wrapf( + e, "Error marshaling XDR record %d of hash '%s'", n, hash.String(), + ), + ) + return false + } + + h := string(keyBytes) + + switch entry.Type { + case xdr.BucketEntryTypeLiveentry, xdr.BucketEntryTypeInitentry: + if entry.Type == xdr.BucketEntryTypeInitentry && bucketProtocolVersion < 11 { + r.readChan <- r.error( + errors.Errorf("Read INITENTRY from version <11 bucket: %d@%s", n, hash.String()), + ) + return false + } + + seen, err := r.tempStore.Exist(h) + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error reading from tempStore")) + return false + } + + if !seen { + // Return LEDGER_ENTRY_STATE changes only now. + liveEntry := entry.MustLiveEntry() + entryChange := xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &liveEntry, + } + r.readChan <- readResult{entryChange, nil} + + // We don't update `tempStore` for INITENTRY because CAP-20 says: + // > a bucket entry marked INITENTRY implies that either no entry + // > with the same ledger key exists in an older bucket, or else + // > that the (chronologically) preceding entry with the same ledger + // > key was DEADENTRY. + if entry.Type == xdr.BucketEntryTypeLiveentry { + // We skip adding entries from the last bucket to tempStore because: + // 1. Ledger keys are unique within a single bucket. + // 2. This is the last bucket we process so there's no need to track + // seen last entries in this bucket. + if oldestBucket { + continue + } + err := r.tempStore.Add(h) + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error updating to tempStore")) + return false + } + } + } + case xdr.BucketEntryTypeDeadentry: + err := r.tempStore.Add(h) + if err != nil { + r.readChan <- r.error(errors.Wrap(err, "Error writing to tempStore")) + return false + } + default: + r.readChan <- r.error( + errors.Errorf("Unexpected entry type %d: %d@%s", entry.Type, n, hash.String()), + ) + return false + } + + select { + case <-r.done: + // Close() called: stop processing buckets. + return false + default: + continue + } + } + + panic("Shouldn't happen") +} + +// Read returns a new ledger entry change on each call, returning io.EOF when the stream ends. +func (r *CheckpointChangeReader) Read() (Change, error) { + r.streamOnce.Do(func() { + go r.streamBuckets() + }) + + // blocking call. anytime we consume from this channel, the background goroutine will stream in the next value + result, ok := <-r.readChan + if !ok { + // when channel is closed then return io.EOF + return Change{}, io.EOF + } + + if result.e != nil { + return Change{}, errors.Wrap(result.e, "Error while reading from buckets") + } + return Change{ + Type: result.entryChange.EntryType(), + Post: result.entryChange.State, + }, nil +} + +func (r *CheckpointChangeReader) error(err error) readResult { + return readResult{xdr.LedgerEntryChange{}, err} +} + +func (r *CheckpointChangeReader) close() { + close(r.done) +} + +// Progress returns progress reading all buckets in percents. +func (r *CheckpointChangeReader) Progress() float64 { + r.readBytesMutex.RLock() + defer r.readBytesMutex.RUnlock() + return float64(r.totalRead) / float64(r.totalSize) * 100 +} + +// Close should be called when reading is finished. +func (r *CheckpointChangeReader) Close() error { + r.closeOnce.Do(r.close) + return nil +} diff --git a/ingest/checkpoint_change_reader_test.go b/ingest/checkpoint_change_reader_test.go new file mode 100644 index 0000000000..6f580a9e0e --- /dev/null +++ b/ingest/checkpoint_change_reader_test.go @@ -0,0 +1,926 @@ +package ingest + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "sync" + "testing" + "time" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +func TestSingleLedgerStateReaderTestSuite(t *testing.T) { + suite.Run(t, new(SingleLedgerStateReaderTestSuite)) +} + +type SingleLedgerStateReaderTestSuite struct { + suite.Suite + mockArchive *historyarchive.MockArchive + reader *CheckpointChangeReader + has historyarchive.HistoryArchiveState + mockBucketExistsCall *mock.Call + mockBucketSizeCall *mock.Call +} + +func (s *SingleLedgerStateReaderTestSuite) SetupTest() { + s.mockArchive = &historyarchive.MockArchive{} + + err := json.Unmarshal([]byte(hasExample), &s.has) + s.Require().NoError(err) + + ledgerSeq := uint32(24123007) + + s.mockArchive. + On("GetCheckpointHAS", ledgerSeq). + Return(s.has, nil) + + // BucketExists should be called 21 times (11 levels, last without `snap`) + s.mockBucketExistsCall = s.mockArchive. + On("BucketExists", mock.AnythingOfType("historyarchive.Hash")). + Return(true, nil).Times(21) + + // BucketSize should be called 21 times (11 levels, last without `snap`) + s.mockBucketSizeCall = s.mockArchive. + On("BucketSize", mock.AnythingOfType("historyarchive.Hash")). + Return(int64(100), nil).Times(21) + + s.mockArchive. + On("GetCheckpointManager"). + Return(historyarchive.NewCheckpointManager( + historyarchive.DefaultCheckpointFrequency)) + + s.reader, err = NewCheckpointChangeReader( + context.Background(), + s.mockArchive, + ledgerSeq, + ) + s.Require().NotNil(s.reader) + s.Require().NoError(err) + s.Assert().Equal(ledgerSeq, s.reader.sequence) + + // Disable hash validation. We trust historyarchive.XdrStream tests here. + s.reader.disableBucketListHashValidation = true +} + +func (s *SingleLedgerStateReaderTestSuite) TearDownTest() { + s.mockArchive.AssertExpectations(s.T()) +} + +// TestSimple test reading buckets with a single live entry. +func (s *SingleLedgerStateReaderTestSuite) TestSimple() { + curr1 := createXdrStream( + metaEntry(11), + entryAccount(xdr.BucketEntryTypeLiveentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 stream for the first bucket... + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + // ...and empty streams for the rest of the buckets. + for hash := range nextBucket { + s.mockArchive. + On("GetXdrStreamForHash", hash). + Return(createXdrStream(), nil).Once() + } + + var e Change + var err error + e, err = s.reader.Read() + s.Require().NoError(err) + + id := e.Post.Data.MustAccount().AccountId + s.Assert().Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", id.Address()) + + _, err = s.reader.Read() + s.Require().Equal(err, io.EOF) +} + +// TestRemoved test reading buckets with a single live entry that was removed. +func (s *SingleLedgerStateReaderTestSuite) TestRemoved() { + curr1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeDeadentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + ) + + snap1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeLiveentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 and snap1 stream for the first two bucket... + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(snap1, nil).Once() + + // ...and empty streams for the rest of the buckets. + for hash := range nextBucket { + s.mockArchive. + On("GetXdrStreamForHash", hash). + Return(createXdrStream(), nil).Once() + } + + _, err := s.reader.Read() + s.Require().Equal(err, io.EOF) +} + +// TestConcurrentRead test concurrent reads for race conditions +func (s *SingleLedgerStateReaderTestSuite) TestConcurrentRead() { + curr1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeDeadentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + ) + + snap1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeLiveentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + entryAccount(xdr.BucketEntryTypeLiveentry, "GCMNSW2UZMSH3ZFRLWP6TW2TG4UX4HLSYO5HNIKUSFMLN2KFSF26JKWF", 1), + entryAccount(xdr.BucketEntryTypeLiveentry, "GB6IPC7LIOSRY26MXHQ3QJ32MTELYAA6YFIRBXZVVGTU7AOI4KUFOQ54", 1), + entryAccount(xdr.BucketEntryTypeLiveentry, "GCK45YKCFNIOICB4TWPCOPWLQYNUKCJVV7OMMHH55AB3DD67K4E54STO", 1), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 and snap1 stream for the first two bucket... + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(snap1, nil).Once() + + // ...and empty streams for the rest of the buckets. + for hash := range nextBucket { + s.mockArchive. + On("GetXdrStreamForHash", hash). + Return(createXdrStream(), nil).Once() + } + + // 3 live entries + var wg sync.WaitGroup + + for i := 0; i < 3; i++ { + wg.Add(1) + go func() { + _, err := s.reader.Read() + s.Assert().Nil(err) + wg.Done() + }() + } + + wg.Wait() + + // Next call should return io.EOF + _, err := s.reader.Read() + s.Require().Equal(err, io.EOF) +} + +// TestEnsureLatestLiveEntry tests if a live entry overrides an older initentry +func (s *SingleLedgerStateReaderTestSuite) TestEnsureLatestLiveEntry() { + curr1 := createXdrStream( + metaEntry(11), + entryAccount(xdr.BucketEntryTypeLiveentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + entryAccount(xdr.BucketEntryTypeInitentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 2), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 stream, rest won't be read due to an error + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + // ...and empty streams for the rest of the buckets. + for hash := range nextBucket { + s.mockArchive. + On("GetXdrStreamForHash", hash). + Return(createXdrStream(), nil).Once() + } + + entry, err := s.reader.Read() + s.Require().Nil(err) + // Latest entry balance is 1 + s.Assert().Equal(xdr.Int64(1), entry.Post.Data.Account.Balance) + + _, err = s.reader.Read() + s.Require().Equal(err, io.EOF) +} + +// TestMalformedProtocol11Bucket tests a buggy protocol 11 bucket (meta not the first entry) +func (s *SingleLedgerStateReaderTestSuite) TestMalformedProtocol11Bucket() { + curr1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeLiveentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + metaEntry(11), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 stream, rest won't be read due to an error + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + // Account entry + _, err := s.reader.Read() + s.Require().Nil(err) + + // Meta entry + _, err = s.reader.Read() + s.Require().NotNil(err) + s.Assert().Equal("Error while reading from buckets: METAENTRY not the first entry (n=1) in the bucket hash '517bea4c6627a688a8ce501febd8c562e737e3d86b29689d9956217640f3c74b'", err.Error()) +} + +// TestMalformedProtocol11BucketNoMeta tests a buggy protocol 11 bucket (no meta entry) +func (s *SingleLedgerStateReaderTestSuite) TestMalformedProtocol11BucketNoMeta() { + curr1 := createXdrStream( + entryAccount(xdr.BucketEntryTypeInitentry, "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", 1), + ) + + nextBucket := s.getNextBucketChannel() + + // Return curr1 stream, rest won't be read due to an error + s.mockArchive. + On("GetXdrStreamForHash", <-nextBucket). + Return(curr1, nil).Once() + + // Init entry without meta + _, err := s.reader.Read() + s.Require().NotNil(err) + s.Assert().Equal("Error while reading from buckets: Read INITENTRY from version <11 bucket: 0@517bea4c6627a688a8ce501febd8c562e737e3d86b29689d9956217640f3c74b", err.Error()) +} + +func TestBucketExistsTestSuite(t *testing.T) { + suite.Run(t, new(BucketExistsTestSuite)) +} + +type BucketExistsTestSuite struct { + suite.Suite + mockArchive *historyarchive.MockArchive + reader *CheckpointChangeReader + cancel context.CancelFunc + expectedSleeps []time.Duration +} + +func (s *BucketExistsTestSuite) SetupTest() { + s.mockArchive = &historyarchive.MockArchive{} + + ledgerSeq := uint32(24123007) + s.mockArchive. + On("GetCheckpointHAS", ledgerSeq). + Return(historyarchive.HistoryArchiveState{}, nil) + + s.mockArchive. + On("GetCheckpointManager"). + Return(historyarchive.NewCheckpointManager( + historyarchive.DefaultCheckpointFrequency)) + + ctx, cancel := context.WithCancel(context.Background()) + var err error + s.reader, err = NewCheckpointChangeReader( + ctx, + s.mockArchive, + ledgerSeq, + ) + s.cancel = cancel + s.Require().NoError(err) + s.reader.sleep = func(d time.Duration) { + if len(s.expectedSleeps) == 0 { + s.Assert().Fail("unexpected call to sleep()") + return + } + s.Assert().Equal(s.expectedSleeps[0], d) + s.expectedSleeps = s.expectedSleeps[1:] + } +} + +func (s *BucketExistsTestSuite) TearDownTest() { + s.mockArchive.AssertExpectations(s.T()) +} + +func (s *BucketExistsTestSuite) testBucketExists( + numErrors int, expectedSleeps []time.Duration, +) { + for _, expected := range []bool{true, false} { + hash := historyarchive.Hash{1, 2, 3} + if numErrors > 0 { + s.mockArchive.On("BucketExists", hash). + Return(true, errors.New("transient error")).Times(numErrors) + } + s.mockArchive.On("BucketExists", hash). + Return(expected, nil).Once() + s.expectedSleeps = expectedSleeps + exists, err := s.reader.bucketExists(hash) + s.Assert().Equal(expected, exists) + s.Assert().NoError(err) + s.Assert().Empty(s.expectedSleeps) + } +} + +func (s *BucketExistsTestSuite) TestSucceedsFirstTime() { + s.testBucketExists(0, []time.Duration{}) +} + +func (s *BucketExistsTestSuite) TestSucceedsSecondTime() { + s.testBucketExists(1, []time.Duration{time.Second}) +} + +func (s *BucketExistsTestSuite) TestSucceedsThirdime() { + s.testBucketExists(2, []time.Duration{time.Second, 2 * time.Second}) +} + +func (s *BucketExistsTestSuite) TestFailsAfterThirdTime() { + hash := historyarchive.Hash{1, 2, 3} + s.mockArchive.On("BucketExists", hash). + Return(true, errors.New("transient error")).Times(4) + s.expectedSleeps = []time.Duration{ + time.Second, 2 * time.Second, 4 * time.Second, + } + _, err := s.reader.bucketExists(hash) + s.Assert().EqualError(err, "transient error") + s.Assert().Empty(s.expectedSleeps) +} + +func TestReadBucketEntryTestSuite(t *testing.T) { + suite.Run(t, new(ReadBucketEntryTestSuite)) +} + +type ReadBucketEntryTestSuite struct { + suite.Suite + mockArchive *historyarchive.MockArchive + reader *CheckpointChangeReader + cancel context.CancelFunc +} + +func (s *ReadBucketEntryTestSuite) SetupTest() { + s.mockArchive = &historyarchive.MockArchive{} + + ledgerSeq := uint32(24123007) + s.mockArchive. + On("GetCheckpointHAS", ledgerSeq). + Return(historyarchive.HistoryArchiveState{}, nil) + + s.mockArchive. + On("GetCheckpointManager"). + Return(historyarchive.NewCheckpointManager( + historyarchive.DefaultCheckpointFrequency)) + + ctx, cancel := context.WithCancel(context.Background()) + var err error + s.reader, err = NewCheckpointChangeReader( + ctx, + s.mockArchive, + ledgerSeq, + ) + s.cancel = cancel + s.Require().NoError(err) +} + +func (s *ReadBucketEntryTestSuite) TearDownTest() { + s.mockArchive.AssertExpectations(s.T()) +} + +func (s *ReadBucketEntryTestSuite) TestNewXDRStream() { + emptyHash := historyarchive.EmptyXdrArrayHash() + expectedStream := createXdrStream(metaEntry(1), metaEntry(2)) + + hash, ok := expectedStream.ExpectedHash() + s.Require().NotEqual(historyarchive.Hash(hash), emptyHash) + s.Require().False(ok) + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(expectedStream, nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + s.Require().True(stream == expectedStream) + + hash, ok = stream.ExpectedHash() + s.Require().Equal(historyarchive.Hash(hash), emptyHash) + s.Require().True(ok) +} + +func (s *ReadBucketEntryTestSuite) TestReadAllEntries() { + emptyHash := historyarchive.EmptyXdrArrayHash() + firstEntry := metaEntry(1) + secondEntry := metaEntry(2) + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry, secondEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + + entry, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, secondEntry) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(io.EOF, err) +} + +func (s *ReadBucketEntryTestSuite) TestFirstReadFailsWithContextError() { + emptyHash := historyarchive.EmptyXdrArrayHash() + firstEntry := metaEntry(1) + secondEntry := metaEntry(2) + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry, secondEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + s.cancel() + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(context.Canceled, err) +} + +func (s *ReadBucketEntryTestSuite) TestSecondReadFailsWithContextError() { + emptyHash := historyarchive.EmptyXdrArrayHash() + firstEntry := metaEntry(1) + secondEntry := metaEntry(2) + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry, secondEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + s.cancel() + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(context.Canceled, err) +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryAllRetriesFail() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + for i := 0; i < 4; i++ { + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createInvalidXdrStream(nil), nil).Once() + } + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().EqualError(err, "Read wrong number of bytes from XDR") +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetryIgnoresProtocolCloseError() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return( + createInvalidXdrStream(errors.New("stream error: stream ID 75; PROTOCOL_ERROR")), + nil, + ).Once() + + expectedEntry := metaEntry(1) + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(expectedEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, expectedEntry) + + hash, ok := stream.ExpectedHash() + s.Require().Equal(historyarchive.Hash(hash), emptyHash) + s.Require().True(ok) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(err, io.EOF) +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetryFailsToCreateNewStream() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createInvalidXdrStream(nil), nil).Once() + + var nilStream *historyarchive.XdrStream + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(nilStream, errors.New("cannot create new stream")).Times(3) + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().EqualError(err, "Error creating new xdr stream: cannot create new stream") +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetrySucceedsAfterFailsToCreateNewStream() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createInvalidXdrStream(nil), nil).Once() + + var nilStream *historyarchive.XdrStream + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(nilStream, errors.New("cannot create new stream")).Once() + + firstEntry := metaEntry(1) + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(io.EOF, err) +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetrySucceeds() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createInvalidXdrStream(nil), nil).Once() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createInvalidXdrStream(nil), nil).Once() + + expectedEntry := metaEntry(1) + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(expectedEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, expectedEntry) + + hash, ok := stream.ExpectedHash() + s.Require().Equal(historyarchive.Hash(hash), emptyHash) + s.Require().True(ok) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(err, io.EOF) +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetrySucceedsWithDiscard() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + firstEntry := metaEntry(1) + secondEntry := metaEntry(2) + + b := &bytes.Buffer{} + s.Require().NoError(xdr.MarshalFramed(b, firstEntry)) + writeInvalidFrame(b) + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(xdrStreamFromBuffer(b), nil).Once() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry, secondEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + + entry, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, secondEntry) + + hash, ok := stream.ExpectedHash() + s.Require().Equal(historyarchive.Hash(hash), emptyHash) + s.Require().True(ok) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(err, io.EOF) +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetryFailsWithDiscardError() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + firstEntry := metaEntry(1) + + b := &bytes.Buffer{} + s.Require().NoError(xdr.MarshalFramed(b, firstEntry)) + writeInvalidFrame(b) + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(xdrStreamFromBuffer(b), nil).Times(4) + + b = &bytes.Buffer{} + b.WriteString("a") + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().EqualError(err, "Error discarding from xdr stream: EOF") +} + +func (s *ReadBucketEntryTestSuite) TestReadEntryRetrySucceedsAfterDiscardError() { + emptyHash := historyarchive.EmptyXdrArrayHash() + + firstEntry := metaEntry(1) + secondEntry := metaEntry(2) + + b := &bytes.Buffer{} + s.Require().NoError(xdr.MarshalFramed(b, firstEntry)) + writeInvalidFrame(b) + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(xdrStreamFromBuffer(b), nil).Once() + + b = &bytes.Buffer{} + b.WriteString("a") + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(xdrStreamFromBuffer(b), nil).Once() + + s.mockArchive. + On("GetXdrStreamForHash", emptyHash). + Return(createXdrStream(firstEntry, secondEntry), nil).Once() + + stream, err := s.reader.newXDRStream(emptyHash) + s.Require().NoError(err) + + entry, err := s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, firstEntry) + + entry, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().NoError(err) + s.Require().Equal(entry, secondEntry) + + _, err = s.reader.readBucketEntry(stream, emptyHash) + s.Require().Equal(io.EOF, err) +} + +func TestCheckpointLedgersTestSuite(t *testing.T) { + suite.Run(t, new(CheckpointLedgersTestSuite)) +} + +type CheckpointLedgersTestSuite struct { + suite.Suite +} + +// TestNonCheckpointLedger ensures that the reader errors on a non-checkpoint ledger +func (s *CheckpointLedgersTestSuite) TestNonCheckpointLedger() { + mockArchive := &historyarchive.MockArchive{} + ledgerSeq := uint32(123456) + + for _, freq := range []uint32{historyarchive.DefaultCheckpointFrequency, 5} { + mockArchive. + On("GetCheckpointManager"). + Return(historyarchive.NewCheckpointManager(freq)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := NewCheckpointChangeReader(ctx, mockArchive, ledgerSeq) + s.Require().Error(err) + } +} + +func metaEntry(version uint32) xdr.BucketEntry { + return xdr.BucketEntry{ + Type: xdr.BucketEntryTypeMetaentry, + MetaEntry: &xdr.BucketMetadata{ + LedgerVersion: xdr.Uint32(version), + }, + } +} + +func entryAccount(t xdr.BucketEntryType, id string, balance uint32) xdr.BucketEntry { + switch t { + case xdr.BucketEntryTypeLiveentry, xdr.BucketEntryTypeInitentry: + return xdr.BucketEntry{ + Type: t, + LiveEntry: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress(id), + Balance: xdr.Int64(balance), + }, + }, + }, + } + case xdr.BucketEntryTypeDeadentry: + return xdr.BucketEntry{ + Type: xdr.BucketEntryTypeDeadentry, + DeadEntry: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.LedgerKeyAccount{xdr.MustAddress(id)}, + }, + } + default: + panic("Unkown entry type") + } +} + +type errCloser struct { + io.Reader + err error +} + +func (e errCloser) Close() error { return e.err } + +func createInvalidXdrStream(closeError error) *historyarchive.XdrStream { + b := &bytes.Buffer{} + writeInvalidFrame(b) + + return historyarchive.NewXdrStream(errCloser{b, closeError}) +} + +func writeInvalidFrame(b *bytes.Buffer) { + bufferSize := b.Len() + err := xdr.MarshalFramed(b, metaEntry(1)) + if err != nil { + panic(err) + } + frameSize := b.Len() - bufferSize + b.Truncate(bufferSize + frameSize/2) +} + +func createXdrStream(entries ...xdr.BucketEntry) *historyarchive.XdrStream { + b := &bytes.Buffer{} + for _, e := range entries { + err := xdr.MarshalFramed(b, e) + if err != nil { + panic(err) + } + } + + return xdrStreamFromBuffer(b) +} + +func xdrStreamFromBuffer(b *bytes.Buffer) *historyarchive.XdrStream { + return historyarchive.NewXdrStream(ioutil.NopCloser(b)) +} + +// getNextBucket is a helper that returns next bucket hash in the order of processing. +// This allows to write simpler test code that ensures that mocked calls are in a +// correct order. +func (s *SingleLedgerStateReaderTestSuite) getNextBucketChannel() <-chan (historyarchive.Hash) { + // 11 levels with 2 buckets each = buffer of 22 + c := make(chan (historyarchive.Hash), 22) + + for i := 0; i < len(s.has.CurrentBuckets); i++ { + b := s.has.CurrentBuckets[i] + + curr := historyarchive.MustDecodeHash(b.Curr) + if !curr.IsZero() { + c <- curr + } + + snap := historyarchive.MustDecodeHash(b.Snap) + if !snap.IsZero() { + c <- snap + } + } + + close(c) + return c +} + +var hasExample = `{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 24123007, + "currentBuckets": [ + { + "curr": "517bea4c6627a688a8ce501febd8c562e737e3d86b29689d9956217640f3c74b", + "next": { + "state": 0 + }, + "snap": "75c8c5540a825da61e05ae23d0b0be9d29f2bdb8fdfa550a3f3496f030f62ffd" + }, + { + "curr": "5bca6165dbf6832ff4550e67d0e564eca56494acfc9b7fd46c740f4d66c74609", + "next": { + "state": 1, + "output": "75c8c5540a825da61e05ae23d0b0be9d29f2bdb8fdfa550a3f3496f030f62ffd" + }, + "snap": "b6bad6183a3394087aae3d05ed393c5dcb80e35ed557e2c8935cba855f20dfa5" + }, + { + "curr": "56b70bb56fcb27dd05759b00b07bc3c9dc7cc6dbfc9d409cfec2a41d9fd4a1e8", + "next": { + "state": 1, + "output": "cfa973ce4ba1fbdf3b5767e398a5b7b86e30461855d24b7b50dc499eb313b4d0" + }, + "snap": "974a089a6980bf25d8ad1a6a71370bac2663e9bb14702ba90b9db657464c0b3a" + }, + { + "curr": "16742c8e61a4dde3b35179bedbdd7c56e67d03a5faf8973a6094c57e430322df", + "next": { + "state": 1, + "output": "ef39804657a928139750e801c63d1d911532d7d126c80f151ba362f49147972e" + }, + "snap": "b415a283c5e33d8c425cbb003a86c780f73e8d2016fb5dcc6ba1477e551a2c66" + }, + { + "curr": "b081e1c075c9114a6c74cf87a0767ee877f02e88e18a8bf97b8f268ff120ad0d", + "next": { + "state": 1, + "output": "162b859558c7c51c6416f659dbd8d70236c75540196e5d7a5dee2a66744ebbf5" + }, + "snap": "66f8fb3f36bbe328bbbe14151951891d455ad0fba1d19d05531226c0909a84c7" + }, + { + "curr": "822b766e755e83d4ad08a38e86466f47452a2d7c4702295ebd3235332db76a05", + "next": { + "state": 1, + "output": "1c04dc66c3410efc535044f4250c02490627b549f99a8873e4857b2cec4d51c8" + }, + "snap": "163a49fa560761217710f6bbbf85179514aa7714d373337dde7f200f8d6c623a" + }, + { + "curr": "75b77814875529876258760ed6b6f37d81b5a39183812c684b9e3014bb6b8cf6", + "next": { + "state": 1, + "output": "444088f447eb7ea3d397e7098d57c4f63b66912d24c4a26a29bf1dde7a4fdecc" + }, + "snap": "35472156c463eaf62867c9b229b92e8192e2fe40cf86e269cab65fd0045c996f" + }, + { + "curr": "b331675d693bdb4456f409083a1b8cbadbcef977df765ba7d4ddd787800bdc84", + "next": { + "state": 1, + "output": "3d9627fa5ef81486688dc584f52445560a55496d3b961a7664b0e536655179bb" + }, + "snap": "5a7996730755a90ea5cbd2d726a982f3f3703c3db8bc2a2217bd496b9c0cf3d1" + }, + { + "curr": "11f8c2f8e1cb0d47576f74d9e2fa838f5f3a37180907a24a85d0ad8b647862e4", + "next": { + "state": 1, + "output": "6c0133dfd0411f9975c74d792911bb80fc1555830a943249cea6c2a80e5064d1" + }, + "snap": "48f435285dd96511d0822f7ae1a20e28c6c28019e385313713655fc76fe3bc03" + }, + { + "curr": "5f351041761b45f3e725f98bb8b6713873e30ab6c8aee56ba0823d357c7ebd0d", + "next": { + "state": 1, + "output": "264d3a93bc5fff47a968cc53f0f2f50297e5f9015300bbc357cfb8dec30899c6" + }, + "snap": "4100ad3b1085bd14d1c808ece3b38db97171532d0d11ed5edd57aff0e416e06a" + }, + { + "curr": "a4811c9ba9505e421f0015e5fcfd9f5d204ae85b584766759e844ef85db10d47", + "next": { + "state": 1, + "output": "be4ecc289998a40319be24662c88f161f5e78d4be846b083923614573aa17336" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}` diff --git a/ingest/doc.go b/ingest/doc.go new file mode 100644 index 0000000000..35e2436bab --- /dev/null +++ b/ingest/doc.go @@ -0,0 +1,44 @@ +/* + +Package ingest provides primitives for building custom ingestion engines. + +Very often developers need features that are outside of Horizon's scope. While +it provides APIs for building the most common apps, it's not possible to add all +possible features. This is why this package was created. + +Ledger Backend + +Ledger backends are sources of information about Stellar network ledgers. This +can be, for example: a Stellar-Core database, (possibly-remote) Captive +Stellar-Core instances, or History Archives. Please consult the "ledgerbackend" +package docs for more information about each backend. + +Warning: Ledger backends provide low-level xdr.LedgerCloseMeta that should not + be used directly unless the developer really understands this data + structure. Read on to understand how to use ledger backend in higher + level objects. + +Readers + +Readers are objects that wrap ledger backend and provide higher level, developer +friendly APIs for reading ledger data. + +Currently there are three types of readers: + * CheckpointChangeReader reads ledger entries from history buckets for a given + checkpoint ledger. Allow building state (all accounts, trust lines etc.) at + any checkpoint ledger. + * LedgerTransactionReader reads transactions for a given ledger sequence. + * LedgerChangeReader reads all changes to ledger entries created as a result of + transactions (fees and meta) and protocol upgrades in a given ledger. + +Warning: Readers stream BOTH successful and failed transactions; check +transactions status in your application if required. + +Tutorial + +Refer to the examples below for simple use cases, or check out the README (and +its corresponding tutorial/ subfolder) in the repository for a Getting Started +guide: https://github.com/stellar/go/blob/master/ingest/README.md + +*/ +package ingest diff --git a/ingest/doc_test.go b/ingest/doc_test.go new file mode 100644 index 0000000000..7ac0719df3 --- /dev/null +++ b/ingest/doc_test.go @@ -0,0 +1,142 @@ +package ingest + +import ( + "context" + "fmt" + "io" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/xdr" +) + +// Example_ledgerentrieshistoryarchive demonstrates how to stream all ledger +// entries live at specific checkpoint ledger from history archives. +func Example_ledgerentrieshistoryarchive() { + archiveURL := "http://history.stellar.org/prd/core-live/core_live_001" + + archive, err := historyarchive.Connect( + archiveURL, + historyarchive.ConnectOptions{Context: context.TODO()}, + ) + if err != nil { + panic(err) + } + + // Ledger must be a checkpoint ledger: (100031+1) mod 64 == 0. + reader, err := NewCheckpointChangeReader(context.TODO(), archive, 100031) + if err != nil { + panic(err) + } + + var accounts, data, trustlines, offers int + for { + entry, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + + switch entry.Type { + case xdr.LedgerEntryTypeAccount: + accounts++ + case xdr.LedgerEntryTypeData: + data++ + case xdr.LedgerEntryTypeTrustline: + trustlines++ + case xdr.LedgerEntryTypeOffer: + offers++ + default: + panic("Unknown type") + } + } + + fmt.Println("accounts", accounts) + fmt.Println("data", data) + fmt.Println("trustlines", trustlines) + fmt.Println("offers", offers) +} + +// Example_changes demonstrates how to stream ledger entry changes +// for a specific ledger using captive stellar-core. Please note that transaction +// meta IS available when using this backend. +func Example_changes() { + ctx := context.Background() + archiveURL := "http://history.stellar.org/prd/core-live/core_live_001" + networkPassphrase := network.PublicNetworkPassphrase + + captiveCoreToml, err := ledgerbackend.NewCaptiveCoreToml(ledgerbackend.CaptiveCoreTomlParams{ + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: []string{archiveURL}, + }) + if err != nil { + panic(err) + } + + // Requires Stellar-Core 13.2.0+ + backend, err := ledgerbackend.NewCaptive( + ledgerbackend.CaptiveCoreConfig{ + BinaryPath: "/bin/stellar-core", + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: []string{archiveURL}, + Toml: captiveCoreToml, + }, + ) + if err != nil { + panic(err) + } + + sequence := uint32(3) + + err = backend.PrepareRange(ctx, ledgerbackend.SingleLedgerRange(sequence)) + if err != nil { + panic(err) + } + + changeReader, err := NewLedgerChangeReader(ctx, backend, networkPassphrase, sequence) + if err != nil { + panic(err) + } + + for { + change, err := changeReader.Read() + if err == io.EOF { + break + } + if err != nil { + panic(err) + } + + var action string + switch { + case change.Pre == nil && change.Post != nil: + action = "created" + case change.Pre != nil && change.Post != nil: + action = "updated" + case change.Pre != nil && change.Post == nil: + action = "removed" + } + + switch change.Type { + case xdr.LedgerEntryTypeAccount: + var accountEntry xdr.AccountEntry + if change.Pre != nil { + accountEntry = change.Pre.Data.MustAccount() + } else { + accountEntry = change.Post.Data.MustAccount() + } + fmt.Println("account", accountEntry.AccountId.Address(), action) + case xdr.LedgerEntryTypeData: + fmt.Println("data", action) + case xdr.LedgerEntryTypeTrustline: + fmt.Println("trustline", action) + case xdr.LedgerEntryTypeOffer: + fmt.Println("offer", action) + default: + panic("Unknown type") + } + } +} diff --git a/ingest/errors.go b/ingest/errors.go new file mode 100644 index 0000000000..334ba80317 --- /dev/null +++ b/ingest/errors.go @@ -0,0 +1,20 @@ +package ingest + +import ( + "github.com/stellar/go/support/errors" +) + +// ErrNotFound is returned when the requested ledger is not found +var ErrNotFound = errors.New("ledger not found") + +// StateError is a fatal error indicating that the Change stream +// produced a result which violates fundamental invariants (e.g. an account +// transferred more XLM than the account held in its balance). +type StateError struct { + error +} + +// NewStateError creates a new StateError. +func NewStateError(err error) StateError { + return StateError{err} +} diff --git a/ingest/genesis.go b/ingest/genesis.go new file mode 100644 index 0000000000..ecc38d6243 --- /dev/null +++ b/ingest/genesis.go @@ -0,0 +1,31 @@ +package ingest + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/keypair" + "github.com/stellar/go/xdr" +) + +// GenesisChange returns the Change occurring at the genesis ledger (ledgerseq = 1).. +func GenesisChange(networkPassPhrase string) Change { + masterKeyPair := keypair.Master(networkPassPhrase) + + masterAccountEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress(masterKeyPair.Address()), + // 100B + Balance: amount.MustParse("100000000000"), + SeqNum: 0, + Thresholds: xdr.Thresholds{1, 0, 0, 0}, + }, + }, + } + + return Change{ + Type: masterAccountEntry.Data.Type, + Post: &masterAccountEntry, + } +} diff --git a/ingest/genesis_test.go b/ingest/genesis_test.go new file mode 100644 index 0000000000..47fe097fc9 --- /dev/null +++ b/ingest/genesis_test.go @@ -0,0 +1,19 @@ +package ingest + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestGenesisLeaderStateReader(t *testing.T) { + change := GenesisChange("Public Global Stellar Network ; September 2015") + assert.Equal(t, xdr.LedgerEntryTypeAccount, change.Type) + assert.Equal(t, xdr.Uint32(1), change.Post.LastModifiedLedgerSeq) + account := change.Post.Data.MustAccount() + assert.Equal(t, "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", account.AccountId.Address()) + assert.Equal(t, xdr.SequenceNumber(0), account.SeqNum) + assert.Equal(t, xdr.Int64(1000000000000000000), account.Balance) + assert.Equal(t, xdr.Thresholds{1, 0, 0, 0}, account.Thresholds) +} diff --git a/ingest/ledger_change_reader.go b/ingest/ledger_change_reader.go new file mode 100644 index 0000000000..e1aa3fb2ed --- /dev/null +++ b/ingest/ledger_change_reader.go @@ -0,0 +1,144 @@ +package ingest + +import ( + "context" + "io" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/xdr" +) + +// ChangeReader provides convenient, streaming access to a sequence of Changes. +type ChangeReader interface { + // Read should return the next `Change` in the leader. If there are no more + // changes left it should return an `io.EOF` error. + Read() (Change, error) + // Close should be called when reading is finished. This is especially + // helpful when there are still some changes available so reader can stop + // streaming them. + Close() error +} + +// ledgerChangeReaderState defines possible states of LedgerChangeReader. +type ledgerChangeReaderState int + +const ( + // feeChangesState is active when LedgerChangeReader is reading fee changes. + feeChangesState ledgerChangeReaderState = iota + // feeChangesState is active when LedgerChangeReader is reading transaction meta changes. + metaChangesState + // feeChangesState is active when LedgerChangeReader is reading upgrade changes. + upgradeChangesState +) + +// LedgerChangeReader is a ChangeReader which returns Changes from Stellar Core +// for a single ledger +type LedgerChangeReader struct { + *LedgerTransactionReader + state ledgerChangeReaderState + pending []Change + pendingIndex int + upgradeIndex int +} + +// Ensure LedgerChangeReader implements ChangeReader +var _ ChangeReader = (*LedgerChangeReader)(nil) + +// NewLedgerChangeReader constructs a new LedgerChangeReader instance bound to the given ledger. +// Note that the returned LedgerChangeReader is not thread safe and should not be shared +// by multiple goroutines. +func NewLedgerChangeReader(ctx context.Context, backend ledgerbackend.LedgerBackend, networkPassphrase string, sequence uint32) (*LedgerChangeReader, error) { + transactionReader, err := NewLedgerTransactionReader(ctx, backend, networkPassphrase, sequence) + if err != nil { + return nil, err + } + + return &LedgerChangeReader{ + LedgerTransactionReader: transactionReader, + state: feeChangesState, + }, nil +} + +// NewLedgerChangeReaderFromLedgerCloseMeta constructs a new LedgerChangeReader instance bound to the given ledger. +// Note that the returned LedgerChangeReader is not thread safe and should not be shared +// by multiple goroutines. +func NewLedgerChangeReaderFromLedgerCloseMeta(networkPassphrase string, ledger xdr.LedgerCloseMeta) (*LedgerChangeReader, error) { + transactionReader, err := NewLedgerTransactionReaderFromLedgerCloseMeta(networkPassphrase, ledger) + if err != nil { + return nil, err + } + + return &LedgerChangeReader{ + LedgerTransactionReader: transactionReader, + state: feeChangesState, + }, nil +} + +// Read returns the next change in the stream. +// If there are no changes remaining io.EOF is returned as an error. +func (r *LedgerChangeReader) Read() (Change, error) { + // Changes within a ledger should be read in the following order: + // - fee changes of all transactions, + // - transaction meta changes of all transactions, + // - upgrade changes. + // Because a single transaction can introduce many changes we read all the + // changes from a single transaction and save them in r.pending. + // When Read() is called we stream pending changes first. We also call Read() + // recursively after adding some changes (what will return them from r.pending) + // to not duplicate the code. + if r.pendingIndex < len(r.pending) { + next := r.pending[r.pendingIndex] + r.pendingIndex++ + if r.pendingIndex == len(r.pending) { + r.pendingIndex = 0 + r.pending = r.pending[:0] + } + return next, nil + } + + switch r.state { + case feeChangesState, metaChangesState: + tx, err := r.LedgerTransactionReader.Read() + if err != nil { + if err == io.EOF { + // If done streaming fee changes rewind to stream meta changes + if r.state == feeChangesState { + r.LedgerTransactionReader.Rewind() + } + r.state++ + return r.Read() + } + return Change{}, err + } + + switch r.state { + case feeChangesState: + r.pending = append(r.pending, tx.GetFeeChanges()...) + case metaChangesState: + metaChanges, err := tx.GetChanges() + if err != nil { + return Change{}, err + } + r.pending = append(r.pending, metaChanges...) + } + return r.Read() + case upgradeChangesState: + // Get upgrade changes + if r.upgradeIndex < len(r.LedgerTransactionReader.ledgerCloseMeta.V0.UpgradesProcessing) { + changes := GetChangesFromLedgerEntryChanges( + r.LedgerTransactionReader.ledgerCloseMeta.V0.UpgradesProcessing[r.upgradeIndex].Changes, + ) + r.pending = append(r.pending, changes...) + r.upgradeIndex++ + return r.Read() + } + } + + return Change{}, io.EOF +} + +// Close should be called when reading is finished. +func (r *LedgerChangeReader) Close() error { + r.pending = nil + return r.LedgerTransactionReader.Close() +} diff --git a/ingest/ledger_change_reader_test.go b/ingest/ledger_change_reader_test.go new file mode 100644 index 0000000000..c461ef28c3 --- /dev/null +++ b/ingest/ledger_change_reader_test.go @@ -0,0 +1,323 @@ +package ingest + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/xdr" +) + +const ( + feeAddress = "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A" + metaAddress = "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX" + upgradeAddress = "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2" +) + +func TestNewLedgerChangeReaderFails(t *testing.T) { + ctx := context.Background() + mock := &ledgerbackend.MockDatabaseBackend{} + seq := uint32(123) + mock.On("GetLedger", ctx, seq).Return( + xdr.LedgerCloseMeta{}, + fmt.Errorf("ledger error"), + ).Once() + _, err := NewLedgerChangeReader(ctx, mock, network.TestNetworkPassphrase, seq) + assert.EqualError( + t, + err, + "error getting ledger from the backend: ledger error", + ) +} + +func TestNewLedgerChangeReaderSucceeds(t *testing.T) { + ctx := context.Background() + mock := &ledgerbackend.MockDatabaseBackend{} + seq := uint32(123) + + header := xdr.LedgerHeaderHistoryEntry{ + Hash: xdr.Hash{1, 2, 3}, + Header: xdr.LedgerHeader{ + LedgerVersion: 7, + }, + } + + mock.On("GetLedger", ctx, seq).Return( + xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: header, + }, + }, + nil, + ).Once() + + reader, err := NewLedgerChangeReader(ctx, mock, network.TestNetworkPassphrase, seq) + assert.NoError(t, err) + + assert.Equal(t, reader.GetHeader(), header) +} + +func buildChange(account string, balance int64) xdr.LedgerEntryChange { + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress(account), + Balance: xdr.Int64(balance), + }, + }, + }, + } +} + +type balanceEntry struct { + address string + balance int64 +} + +func parseChange(change Change) balanceEntry { + address := change.Post.Data.Account.AccountId.Address() + balance := int64(change.Post.Data.Account.Balance) + + return balanceEntry{address, balance} +} + +func assertChangesEqual( + t *testing.T, + ctx context.Context, + sequence uint32, + backend ledgerbackend.LedgerBackend, + expected []balanceEntry, +) { + reader, err := NewLedgerChangeReader(ctx, backend, network.TestNetworkPassphrase, sequence) + assert.NoError(t, err) + + changes := []balanceEntry{} + for { + change, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + changes = append(changes, parseChange(change)) + } + + assert.Equal(t, expected, changes) +} + +func TestLedgerChangeReaderOrder(t *testing.T) { + ctx := context.Background() + mock := &ledgerbackend.MockDatabaseBackend{} + seq := uint32(123) + + src := xdr.MustAddress("GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON") + firstTx := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Fee: 1, + SourceAccount: src.ToMuxedAccount(), + }, + }, + } + firstTxHash, err := network.HashTransactionInEnvelope(firstTx, network.TestNetworkPassphrase) + assert.NoError(t, err) + + src = xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU") + secondTx := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Fee: 2, + SourceAccount: src.ToMuxedAccount(), + }, + }, + } + secondTxHash, err := network.HashTransactionInEnvelope(secondTx, network.TestNetworkPassphrase) + assert.NoError(t, err) + + ledger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{Header: xdr.LedgerHeader{LedgerVersion: 10}}, + TxSet: xdr.TransactionSet{ + Txs: []xdr.TransactionEnvelope{ + secondTx, + firstTx, + }, + }, + TxProcessing: []xdr.TransactionResultMeta{ + { + Result: xdr.TransactionResultPair{TransactionHash: firstTxHash}, + FeeProcessing: xdr.LedgerEntryChanges{ + buildChange(feeAddress, 100), + buildChange(feeAddress, 200), + }, + TxApplyProcessing: xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + buildChange( + metaAddress, + 300, + ), + buildChange( + metaAddress, + 400, + ), + }, + }, + }, + }, + }, + }, + { + Result: xdr.TransactionResultPair{TransactionHash: secondTxHash}, + FeeProcessing: xdr.LedgerEntryChanges{ + buildChange(feeAddress, 300), + }, + TxApplyProcessing: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + TxChangesBefore: xdr.LedgerEntryChanges{ + buildChange(metaAddress, 600), + }, + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + buildChange(metaAddress, 700), + }, + }, + }, + TxChangesAfter: xdr.LedgerEntryChanges{ + buildChange(metaAddress, 800), + buildChange(metaAddress, 900), + }, + }, + }, + }, + }, + UpgradesProcessing: []xdr.UpgradeEntryMeta{ + { + Changes: xdr.LedgerEntryChanges{ + buildChange(upgradeAddress, 2), + }, + }, + { + Changes: xdr.LedgerEntryChanges{ + buildChange(upgradeAddress, 3), + }, + }, + }, + }, + } + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + + assertChangesEqual(t, ctx, seq, mock, []balanceEntry{ + {feeAddress, 100}, + {feeAddress, 200}, + {feeAddress, 300}, + {metaAddress, 300}, + {metaAddress, 400}, + {metaAddress, 600}, + {metaAddress, 700}, + {metaAddress, 800}, + {metaAddress, 900}, + {upgradeAddress, 2}, + {upgradeAddress, 3}, + }) + mock.AssertExpectations(t) + + ledger.V0.LedgerHeader.Header.LedgerVersion = 8 + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + _, err = NewLedgerChangeReader(ctx, mock, network.TestNetworkPassphrase, seq) + assert.EqualError( + t, + err, + "error extracting transactions from ledger close meta: TransactionMeta.V=2 is required in protocol"+ + " version older than version 10. Please process ledgers again using the latest stellar-core version.", + ) + mock.AssertExpectations(t) + + ledger.V0.LedgerHeader.Header.LedgerVersion = 9 + ledger.V0.TxProcessing[0].FeeProcessing = xdr.LedgerEntryChanges{} + ledger.V0.TxProcessing[1].FeeProcessing = xdr.LedgerEntryChanges{} + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + + assertChangesEqual(t, ctx, seq, mock, []balanceEntry{ + {metaAddress, 300}, + {metaAddress, 400}, + {metaAddress, 600}, + {metaAddress, 700}, + {metaAddress, 800}, + {metaAddress, 900}, + {upgradeAddress, 2}, + {upgradeAddress, 3}, + }) + mock.AssertExpectations(t) + + ledger.V0.LedgerHeader.Header.LedgerVersion = 10 + ledger.V0.TxProcessing[0].FeeProcessing = xdr.LedgerEntryChanges{} + ledger.V0.TxProcessing[1].FeeProcessing = xdr.LedgerEntryChanges{} + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + + assertChangesEqual(t, ctx, seq, mock, []balanceEntry{ + {metaAddress, 300}, + {metaAddress, 400}, + {metaAddress, 600}, + {metaAddress, 700}, + {metaAddress, 800}, + {metaAddress, 900}, + {upgradeAddress, 2}, + {upgradeAddress, 3}, + }) + mock.AssertExpectations(t) + + ledger.V0.UpgradesProcessing = []xdr.UpgradeEntryMeta{ + { + Changes: xdr.LedgerEntryChanges{}, + }, + { + Changes: xdr.LedgerEntryChanges{}, + }, + } + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + + assertChangesEqual(t, ctx, seq, mock, []balanceEntry{ + {metaAddress, 300}, + {metaAddress, 400}, + {metaAddress, 600}, + {metaAddress, 700}, + {metaAddress, 800}, + {metaAddress, 900}, + }) + mock.AssertExpectations(t) + + ledger.V0.TxProcessing[0].TxApplyProcessing = xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: []xdr.OperationMeta{}, + }, + } + ledger.V0.TxProcessing[1].TxApplyProcessing = xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: []xdr.OperationMeta{}, + }, + } + mock.On("GetLedger", ctx, seq).Return(ledger, nil).Once() + + assertChangesEqual(t, ctx, seq, mock, []balanceEntry{}) + mock.AssertExpectations(t) +} diff --git a/ingest/ledger_transaction.go b/ingest/ledger_transaction.go new file mode 100644 index 0000000000..9410dcd0eb --- /dev/null +++ b/ingest/ledger_transaction.go @@ -0,0 +1,127 @@ +package ingest + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LedgerTransaction represents the data for a single transaction within a ledger. +type LedgerTransaction struct { + Index uint32 + Envelope xdr.TransactionEnvelope + Result xdr.TransactionResultPair + // FeeChanges and UnsafeMeta are low level values, do not use them directly unless + // you know what you are doing. + // Use LedgerTransaction.GetChanges() for higher level access to ledger + // entry changes. + FeeChanges xdr.LedgerEntryChanges + UnsafeMeta xdr.TransactionMeta +} + +func (t *LedgerTransaction) txInternalError() bool { + return t.Result.Result.Result.Code == xdr.TransactionResultCodeTxInternalError +} + +// GetFeeChanges returns a developer friendly representation of LedgerEntryChanges +// connected to fees. +func (t *LedgerTransaction) GetFeeChanges() []Change { + return GetChangesFromLedgerEntryChanges(t.FeeChanges) +} + +// GetChanges returns a developer friendly representation of LedgerEntryChanges. +// It contains transaction changes and operation changes in that order. If the +// transaction failed with TxInternalError, operations and txChangesAfter are +// omitted. It doesn't support legacy TransactionMeta.V=0. +func (t *LedgerTransaction) GetChanges() ([]Change, error) { + var changes []Change + + // Transaction meta + switch t.UnsafeMeta.V { + case 0: + return changes, errors.New("TransactionMeta.V=0 not supported") + case 1: + v1Meta := t.UnsafeMeta.MustV1() + txChanges := GetChangesFromLedgerEntryChanges(v1Meta.TxChanges) + changes = append(changes, txChanges...) + + // Ignore operations meta if txInternalError https://github.com/stellar/go/issues/2111 + if t.txInternalError() { + return changes, nil + } + + for _, operationMeta := range v1Meta.Operations { + opChanges := GetChangesFromLedgerEntryChanges( + operationMeta.Changes, + ) + changes = append(changes, opChanges...) + } + + case 2: + v2Meta := t.UnsafeMeta.MustV2() + txChangesBefore := GetChangesFromLedgerEntryChanges(v2Meta.TxChangesBefore) + changes = append(changes, txChangesBefore...) + + // Ignore operations meta and txChangesAfter if txInternalError + // https://github.com/stellar/go/issues/2111 + if t.txInternalError() { + return changes, nil + } + + for _, operationMeta := range v2Meta.Operations { + opChanges := GetChangesFromLedgerEntryChanges( + operationMeta.Changes, + ) + changes = append(changes, opChanges...) + } + + txChangesAfter := GetChangesFromLedgerEntryChanges(v2Meta.TxChangesAfter) + changes = append(changes, txChangesAfter...) + default: + return changes, errors.New("Unsupported TransactionMeta version") + } + + return changes, nil +} + +// GetOperationChanges returns a developer friendly representation of LedgerEntryChanges. +// It contains only operation changes. +func (t *LedgerTransaction) GetOperationChanges(operationIndex uint32) ([]Change, error) { + changes := []Change{} + + // Transaction meta + switch t.UnsafeMeta.V { + case 0: + return changes, errors.New("TransactionMeta.V=0 not supported") + case 1: + // Ignore operations meta if txInternalError https://github.com/stellar/go/issues/2111 + if t.txInternalError() { + return changes, nil + } + + v1Meta := t.UnsafeMeta.MustV1() + changes = operationChanges(v1Meta.Operations, operationIndex) + case 2: + // Ignore operations meta if txInternalError https://github.com/stellar/go/issues/2111 + if t.txInternalError() { + return changes, nil + } + + v2Meta := t.UnsafeMeta.MustV2() + changes = operationChanges(v2Meta.Operations, operationIndex) + default: + return changes, errors.New("Unsupported TransactionMeta version") + } + + return changes, nil +} + +func operationChanges(ops []xdr.OperationMeta, index uint32) []Change { + if len(ops) == 0 || int(index) >= len(ops) { + return []Change{} + } + + operationMeta := ops[index] + return GetChangesFromLedgerEntryChanges( + operationMeta.Changes, + ) +} diff --git a/ingest/ledger_transaction_reader.go b/ingest/ledger_transaction_reader.go new file mode 100644 index 0000000000..ef68e67aa0 --- /dev/null +++ b/ingest/ledger_transaction_reader.go @@ -0,0 +1,115 @@ +package ingest + +import ( + "context" + "encoding/hex" + "io" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LedgerTransactionReader reads transactions for a given ledger sequence from a backend. +// Use NewTransactionReader to create a new instance. +type LedgerTransactionReader struct { + ledgerCloseMeta xdr.LedgerCloseMeta + transactions []LedgerTransaction + readIdx int +} + +// NewLedgerTransactionReader creates a new TransactionReader instance. +// Note that TransactionReader is not thread safe and should not be shared by multiple goroutines. +func NewLedgerTransactionReader(ctx context.Context, backend ledgerbackend.LedgerBackend, networkPassphrase string, sequence uint32) (*LedgerTransactionReader, error) { + ledgerCloseMeta, err := backend.GetLedger(ctx, sequence) + if err != nil { + return nil, errors.Wrap(err, "error getting ledger from the backend") + } + + return NewLedgerTransactionReaderFromLedgerCloseMeta(networkPassphrase, ledgerCloseMeta) +} + +// NewLedgerTransactionReaderFromXdr creates a new TransactionReader instance from xdr.LedgerCloseMeta. +// Note that TransactionReader is not thread safe and should not be shared by multiple goroutines. +func NewLedgerTransactionReaderFromLedgerCloseMeta(networkPassphrase string, ledgerCloseMeta xdr.LedgerCloseMeta) (*LedgerTransactionReader, error) { + reader := &LedgerTransactionReader{ledgerCloseMeta: ledgerCloseMeta} + if err := reader.storeTransactions(ledgerCloseMeta, networkPassphrase); err != nil { + return nil, errors.Wrap(err, "error extracting transactions from ledger close meta") + } + return reader, nil +} + +// GetSequence returns the sequence number of the ledger data stored by this object. +func (reader *LedgerTransactionReader) GetSequence() uint32 { + return reader.ledgerCloseMeta.LedgerSequence() +} + +// GetHeader returns the XDR Header data associated with the stored ledger. +func (reader *LedgerTransactionReader) GetHeader() xdr.LedgerHeaderHistoryEntry { + return reader.ledgerCloseMeta.V0.LedgerHeader +} + +// Read returns the next transaction in the ledger, ordered by tx number, each time +// it is called. When there are no more transactions to return, an EOF error is returned. +func (reader *LedgerTransactionReader) Read() (LedgerTransaction, error) { + if reader.readIdx < len(reader.transactions) { + reader.readIdx++ + return reader.transactions[reader.readIdx-1], nil + } + return LedgerTransaction{}, io.EOF +} + +// Rewind resets the reader back to the first transaction in the ledger +func (reader *LedgerTransactionReader) Rewind() { + reader.readIdx = 0 +} + +// storeTransactions maps the close meta data into a slice of LedgerTransaction structs, to provide +// a per-transaction view of the data when Read() is called. +func (reader *LedgerTransactionReader) storeTransactions(lcm xdr.LedgerCloseMeta, networkPassphrase string) error { + byHash := map[xdr.Hash]xdr.TransactionEnvelope{} + for i, tx := range lcm.V0.TxSet.Txs { + hash, err := network.HashTransactionInEnvelope(tx, networkPassphrase) + if err != nil { + return errors.Wrapf(err, "could not hash transaction %d in TxSet", i) + } + byHash[hash] = tx + } + + for i := range lcm.V0.TxProcessing { + result := lcm.V0.TxProcessing[i].Result + envelope, ok := byHash[result.TransactionHash] + if !ok { + hexHash := hex.EncodeToString(result.TransactionHash[:]) + return errors.Errorf("unknown tx hash in LedgerCloseMeta: %v", hexHash) + } + + // We check the version only if FeeProcessing are non empty because some backends + // (like HistoryArchiveBackend) do not return meta. + if lcm.V0.LedgerHeader.Header.LedgerVersion < 10 && lcm.V0.TxProcessing[i].TxApplyProcessing.V != 2 && + len(lcm.V0.TxProcessing[i].FeeProcessing) > 0 { + return errors.New( + "TransactionMeta.V=2 is required in protocol version older than version 10. " + + "Please process ledgers again using the latest stellar-core version.", + ) + } + + reader.transactions = append(reader.transactions, LedgerTransaction{ + Index: uint32(i + 1), // Transactions start at '1' + Envelope: envelope, + Result: result, + UnsafeMeta: lcm.V0.TxProcessing[i].TxApplyProcessing, + FeeChanges: lcm.V0.TxProcessing[i].FeeProcessing, + }) + } + return nil +} + +// Close should be called when reading is finished. This is especially +// helpful when there are still some transactions available so reader can stop +// streaming them. +func (reader *LedgerTransactionReader) Close() error { + reader.transactions = nil + return nil +} diff --git a/ingest/ledger_transaction_test.go b/ingest/ledger_transaction_test.go new file mode 100644 index 0000000000..348b1d9688 --- /dev/null +++ b/ingest/ledger_transaction_test.go @@ -0,0 +1,963 @@ +package ingest + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestChangeAccountChangedExceptSignersInvalidType(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeOffer, + } + + assert.Panics(t, func() { + change.AccountChangedExceptSigners() + }) +} + +func TestFeeMetaAndOperationsChangesSeparate(t *testing.T) { + tx := LedgerTransaction{ + FeeChanges: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 100, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 200, + }, + }, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 300, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 400, + }, + }, + }, + }, + }, + }, + }, + }, + }} + + feeChanges := tx.GetFeeChanges() + assert.Len(t, feeChanges, 1) + assert.Equal(t, feeChanges[0].Pre.Data.MustAccount().Balance, xdr.Int64(100)) + assert.Equal(t, feeChanges[0].Post.Data.MustAccount().Balance, xdr.Int64(200)) + + metaChanges, err := tx.GetChanges() + assert.NoError(t, err) + assert.Len(t, metaChanges, 1) + assert.Equal(t, metaChanges[0].Pre.Data.MustAccount().Balance, xdr.Int64(300)) + assert.Equal(t, metaChanges[0].Post.Data.MustAccount().Balance, xdr.Int64(400)) + + operationChanges, err := tx.GetOperationChanges(0) + assert.NoError(t, err) + assert.Len(t, operationChanges, 1) + assert.Equal(t, operationChanges[0].Pre.Data.MustAccount().Balance, xdr.Int64(300)) + assert.Equal(t, operationChanges[0].Post.Data.MustAccount().Balance, xdr.Int64(400)) + + // Ignore operation meta if tx result is txInternalError + // https://github.com/stellar/go/issues/2111 + tx.Result.Result.Result.Code = xdr.TransactionResultCodeTxInternalError + metaChanges, err = tx.GetChanges() + assert.NoError(t, err) + assert.Len(t, metaChanges, 0) + + operationChanges, err = tx.GetOperationChanges(0) + assert.NoError(t, err) + assert.Len(t, operationChanges, 0) +} + +func TestFailedTransactionOperationChangesMeta(t *testing.T) { + testCases := []struct { + desc string + meta xdr.TransactionMeta + }{ + { + desc: "V0", + meta: xdr.TransactionMeta{ + Operations: &[]xdr.OperationMeta{}, + }, + }, + { + desc: "V1", + meta: xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{}, + }, + }, + { + desc: "V2", + meta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tx := LedgerTransaction{ + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFailed, + }, + }, + }, + UnsafeMeta: tc.meta, + } + + operationChanges, err := tx.GetOperationChanges(0) + if tx.UnsafeMeta.V == 0 { + assert.Error(t, err) + assert.EqualError(t, err, "TransactionMeta.V=0 not supported") + } else { + assert.NoError(t, err) + assert.Len(t, operationChanges, 0) + } + }) + } +} +func TestMetaV2Order(t *testing.T) { + tx := LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + TxChangesBefore: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX"), + Balance: 100, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX"), + Balance: 200, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 100, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 200, + }, + }, + }, + }, + }, + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 300, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 400, + }, + }, + }, + }, + }, + }, + }, + TxChangesAfter: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX"), + Balance: 300, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX"), + Balance: 400, + }, + }, + }, + }, + }, + }, + }} + + metaChanges, err := tx.GetChanges() + assert.NoError(t, err) + assert.Len(t, metaChanges, 4) + + change := metaChanges[0] + id := change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(100)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(200)) + + change = metaChanges[1] + id = change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(100)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(200)) + + change = metaChanges[2] + id = change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(300)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(400)) + + change = metaChanges[3] + id = change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(300)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(400)) + + operationChanges, err := tx.GetOperationChanges(0) + assert.NoError(t, err) + assert.Len(t, operationChanges, 1) + + // Ignore operations meta and txChangesAfter if txInternalError + // https://github.com/stellar/go/issues/2111 + tx.Result.Result.Result.Code = xdr.TransactionResultCodeTxInternalError + metaChanges, err = tx.GetChanges() + assert.NoError(t, err) + assert.Len(t, metaChanges, 2) + + change = metaChanges[0] + id = change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(100)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(200)) + + change = metaChanges[1] + id = change.Pre.Data.MustAccount().AccountId + assert.Equal(t, id.Address(), "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + assert.Equal(t, change.Pre.Data.MustAccount().Balance, xdr.Int64(100)) + assert.Equal(t, change.Post.Data.MustAccount().Balance, xdr.Int64(200)) + + operationChanges, err = tx.GetOperationChanges(0) + assert.NoError(t, err) + assert.Len(t, operationChanges, 0) + +} + +func TestMetaV0(t *testing.T) { + tx := LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 0, + }} + + _, err := tx.GetChanges() + assert.Error(t, err) + assert.EqualError(t, err, "TransactionMeta.V=0 not supported") +} + +func TestChangeAccountChangedExceptSignersLastModifiedLedgerSeq(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 11, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.True(t, changed) +} + +func TestChangeAccountChangedExceptSignersNoPre(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.True(t, changed) +} + +func TestChangeAccountChangedExceptSignersNoPost(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Post: nil, + } + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.True(t, changed) +} + +func TestChangeAccountChangedExceptSignersMasterKeyRemoved(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 1 + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 0 + Thresholds: [4]byte{0, 1, 1, 1}, + }, + }, + }, + } + + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.True(t, changed) +} + +func TestChangeAccountChangedExceptSignersSignerChange(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 2, + }, + }, + }, + }, + }, + } + + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.False(t, changed) +} + +func TestChangeAccountChangedExceptSignersNoChanges(t *testing.T) { + inflationDest := xdr.MustAddress("GBAH2GBLJB54JAROJ3FVO4ZTTJJI3XKOBTMJOZFUJ3UHYIVNJTLJUYFY") + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: 1000, + SeqNum: 432894732, + NumSubEntries: 2, + InflationDest: &inflationDest, + Flags: 4, + HomeDomain: "stellar.org", + Thresholds: [4]byte{1, 1, 1, 1}, + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: 10, + Selling: 20, + }, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Balance: 1000, + SeqNum: 432894732, + NumSubEntries: 2, + InflationDest: &inflationDest, + Flags: 4, + HomeDomain: "stellar.org", + Thresholds: [4]byte{1, 1, 1, 1}, + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: 10, + Selling: 20, + }, + }, + }, + }, + }, + }, + } + + changed, err := change.AccountChangedExceptSigners() + assert.NoError(t, err) + assert.False(t, changed) + + // Make sure pre and post not modified + assert.NotNil(t, change.Pre.Data.Account.Signers) + assert.Len(t, change.Pre.Data.Account.Signers, 1) + + assert.NotNil(t, change.Post.Data.Account.Signers) + assert.Len(t, change.Post.Data.Account.Signers, 1) +} + +func TestChangeAccountSignersChangedInvalidType(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeOffer, + } + + assert.Panics(t, func() { + change.AccountSignersChanged() + }) +} + +func TestChangeAccountSignersChangedNoPre(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedNoPostMasterKey(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 1 + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: nil, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedNoPostNoMasterKey(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 0 + Thresholds: [4]byte{0, 1, 1, 1}, + }, + }, + }, + Post: nil, + } + + // Account being merge can still have signers so they will be removed. + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedMasterKeyRemoved(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 1 + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 0 + Thresholds: [4]byte{0, 1, 1, 1}, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedMasterKeyAdded(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 0 + Thresholds: [4]byte{0, 1, 1, 1}, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + // Master weight = 1 + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSignerAdded(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{}, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSignerRemoved(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{}, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSignerWeightChanged(t *testing.T) { + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 2, + }, + }, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSponsorAdded(t *testing.T) { + sponsor, err := xdr.AddressToAccountId("GBADGWKHSUFOC4C7E3KXKINZSRX5KPHUWHH67UGJU77LEORGVLQ3BN3B") + assert.NoError(t, err) + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V1: &xdr.AccountEntryExtensionV1{ + Ext: xdr.AccountEntryExtensionV1Ext{ + V2: &xdr.AccountEntryExtensionV2{ + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &sponsor, + }, + }, + }, + }, + }, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSponsorRemoved(t *testing.T) { + sponsor, err := xdr.AddressToAccountId("GBADGWKHSUFOC4C7E3KXKINZSRX5KPHUWHH67UGJU77LEORGVLQ3BN3B") + assert.NoError(t, err) + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V1: &xdr.AccountEntryExtensionV1{ + Ext: xdr.AccountEntryExtensionV1Ext{ + V2: &xdr.AccountEntryExtensionV2{ + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &sponsor, + }, + }, + }, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} + +func TestChangeAccountSignersChangedSponsorChanged(t *testing.T) { + sponsor, err := xdr.AddressToAccountId("GBADGWKHSUFOC4C7E3KXKINZSRX5KPHUWHH67UGJU77LEORGVLQ3BN3B") + assert.NoError(t, err) + + newSponsor, err := xdr.AddressToAccountId("GB2Y6D5QFDJSCR6GSBO5D2LOLGZI4RVPRGZSSPLIFWNJZ7SL73TOMXAQ") + assert.NoError(t, err) + + change := Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V1: &xdr.AccountEntryExtensionV1{ + Ext: xdr.AccountEntryExtensionV1Ext{ + V2: &xdr.AccountEntryExtensionV2{ + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &sponsor, + }, + }, + }, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 10, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Weight: 1, + }, + }, + Ext: xdr.AccountEntryExt{ + V1: &xdr.AccountEntryExtensionV1{ + Ext: xdr.AccountEntryExtensionV1Ext{ + V2: &xdr.AccountEntryExtensionV2{ + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &newSponsor, + }, + }, + }, + }, + }, + }, + }, + }, + } + + assert.True(t, change.AccountSignersChanged()) +} diff --git a/ingest/ledgerbackend/buffered_meta_pipe_reader.go b/ingest/ledgerbackend/buffered_meta_pipe_reader.go new file mode 100644 index 0000000000..303604768c --- /dev/null +++ b/ingest/ledgerbackend/buffered_meta_pipe_reader.go @@ -0,0 +1,133 @@ +package ledgerbackend + +import ( + "bufio" + "io" + "time" + + "github.com/pkg/errors" + xdr3 "github.com/stellar/go-xdr/xdr3" + + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +const ( + // The constants below define sizes of metaPipeBufferSize (binary) and + // ledgerReadAheadBufferSize (in ledgers). In general: + // + // metaPipeBufferSize >= + // ledgerReadAheadBufferSize * max over networks (average ledger size in bytes) + // + // so that meta pipe buffer always have binary data that can be unmarshaled into + // ledger buffer. + // After checking a few latest ledgers in pubnet and testnet the average size + // is: 100,000 and 5,000 bytes respectively. + + // metaPipeBufferSize defines the meta pipe buffer size. We need at least + // a couple MB to ensure there are at least a few ledgers captive core can + // unmarshal into read-ahead buffer while waiting for client to finish + // processing previous ledgers. + metaPipeBufferSize = 10 * 1024 * 1024 + // ledgerReadAheadBufferSize defines the size (in ledgers) of read ahead + // buffer that stores unmarshalled ledgers. This is especially important in + // an online mode when GetLedger calls are not blocking. In such case, clients + // usually wait for a specific time duration before checking if the ledger is + // available. When catching up and small buffer this can increase the overall + // time because ledgers are not available. + ledgerReadAheadBufferSize = 20 +) + +type metaResult struct { + *xdr.LedgerCloseMeta + err error +} + +// bufferedLedgerMetaReader is responsible for buffering meta pipe data in a +// fast and safe manner and unmarshaling it into XDR objects. +// +// It solves the following issues: +// +// * Decouples buffering from stellarCoreRunner so it can focus on running core. +// * Decouples unmarshalling and buffering of LedgerCloseMeta's from CaptiveCore. +// * By adding buffering it allows unmarshaling the ledgers available in Stellar-Core +// while previous ledger are being processed. +// * Limits memory usage in case of large ledgers are closed by the network. +// +// Internally, it keeps two buffers: bufio.Reader with binary ledger data and +// buffered channel with unmarshaled xdr.LedgerCloseMeta objects ready for +// processing. The first buffer removes overhead time connected to reading from +// a file. The second buffer allows unmarshaling binary data into XDR objects +// (which can be a bottleneck) while clients are processing previous ledgers. +// +// Finally, when a large ledger (larger than binary buffer) is closed it waits +// until xdr.LedgerCloseMeta objects channel is empty. This prevents memory +// exhaustion when network closes a series a large ledgers. +type bufferedLedgerMetaReader struct { + r *bufio.Reader + c chan metaResult + decoder *xdr3.Decoder +} + +// newBufferedLedgerMetaReader creates a new meta reader that will shutdown +// when stellar-core terminates. +func newBufferedLedgerMetaReader(reader io.Reader) *bufferedLedgerMetaReader { + r := bufio.NewReaderSize(reader, metaPipeBufferSize) + return &bufferedLedgerMetaReader{ + c: make(chan metaResult, ledgerReadAheadBufferSize), + r: r, + decoder: xdr3.NewDecoder(r), + } +} + +// readLedgerMetaFromPipe unmarshalls the next ledger from meta pipe. +// It can block for two reasons: +// * Meta pipe buffer is full so it will wait until it refills. +// * The next ledger available in the buffer exceeds the meta pipe buffer size. +// In such case the method will block until LedgerCloseMeta buffer is empty. +func (b *bufferedLedgerMetaReader) readLedgerMetaFromPipe() (*xdr.LedgerCloseMeta, error) { + frameLength, err := xdr.ReadFrameLength(b.decoder) + if err != nil { + return nil, errors.Wrap(err, "error reading frame length") + } + + for frameLength > metaPipeBufferSize && len(b.c) > 0 { + // Wait for LedgerCloseMeta buffer to be cleared to minimize memory usage. + <-time.After(time.Second) + } + + var xlcm xdr.LedgerCloseMeta + _, err = xlcm.DecodeFrom(b.decoder) + if err != nil { + return nil, errors.Wrap(err, "unmarshalling framed LedgerCloseMeta") + } + return &xlcm, nil +} + +func (b *bufferedLedgerMetaReader) getChannel() <-chan metaResult { + return b.c +} + +// Start starts a loop that reads binary ledger data into internal buffers. +// The function returns when it encounters an error (including io.EOF). +func (b *bufferedLedgerMetaReader) start() { + printBufferOccupation := time.NewTicker(5 * time.Second) + defer printBufferOccupation.Stop() + defer close(b.c) + + for { + select { + case <-printBufferOccupation.C: + log.Debug("captive core read-ahead buffer occupation:", len(b.c)) + default: + } + + meta, err := b.readLedgerMetaFromPipe() + if err != nil { + b.c <- metaResult{nil, err} + return + } + + b.c <- metaResult{meta, nil} + } +} diff --git a/ingest/ledgerbackend/captive_core_backend.go b/ingest/ledgerbackend/captive_core_backend.go new file mode 100644 index 0000000000..80890229f2 --- /dev/null +++ b/ingest/ledgerbackend/captive_core_backend.go @@ -0,0 +1,656 @@ +package ledgerbackend + +import ( + "context" + "encoding/hex" + "os" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +// Ensure CaptiveStellarCore implements LedgerBackend +var _ LedgerBackend = (*CaptiveStellarCore)(nil) + +func (c *CaptiveStellarCore) roundDownToFirstReplayAfterCheckpointStart(ledger uint32) uint32 { + r := c.checkpointManager.GetCheckpointRange(ledger) + if r.Low <= 1 { + // Stellar-Core doesn't stream ledger 1 + return 2 + } + // All other checkpoints start at the next multiple of 64 + return r.Low +} + +// CaptiveStellarCore is a ledger backend that starts internal Stellar-Core +// subprocess responsible for streaming ledger data. It provides better decoupling +// than DatabaseBackend but requires some extra init time. +// +// It operates in two modes: +// * When a BoundedRange is prepared it starts Stellar-Core in catchup mode that +// replays ledgers in memory. This is very fast but requires Stellar-Core to +// keep ledger state in RAM. It requires around 3GB of RAM as of August 2020. +// * When a UnboundedRange is prepared it runs Stellar-Core catchup mode to +// sync with the first ledger and then runs it in a normal mode. This +// requires the configAppendPath to be provided because a quorum set needs to +// be selected. +// +// When running CaptiveStellarCore will create a temporary folder to store +// bucket files and other temporary files. The folder is removed when Close is +// called. +// +// The communication is performed via filesystem pipe which is created in a +// temporary folder. +// +// Currently BoundedRange requires a full-trust on history archive. This issue is +// being fixed in Stellar-Core. +// +// While using BoundedRanges is straightforward there are a few gotchas connected +// to UnboundedRanges: +// * PrepareRange takes more time because all ledger entries must be stored on +// disk instead of RAM. +// * If GetLedger is not called frequently (every 5 sec. on average) the +// Stellar-Core process can go out of sync with the network. This happens +// because there is no buffering of communication pipe and CaptiveStellarCore +// has a very small internal buffer and Stellar-Core will not close the new +// ledger if it's not read. +// +// Except for the Close function, CaptiveStellarCore is not thread-safe and should +// not be accessed by multiple go routines. Close is thread-safe and can be called +// from another go routine. Once Close is called it will interrupt and cancel any +// pending operations. +// +// Requires Stellar-Core v13.2.0+. +type CaptiveStellarCore struct { + archive historyarchive.ArchiveInterface + checkpointManager historyarchive.CheckpointManager + ledgerHashStore TrustedLedgerHashStore + + // cancel is the CancelFunc for context which controls the lifetime of a CaptiveStellarCore instance. + // Once it is invoked CaptiveStellarCore will not be able to stream ledgers from Stellar Core or + // spawn new instances of Stellar Core. + cancel context.CancelFunc + + stellarCoreRunner stellarCoreRunnerInterface + // stellarCoreLock protects access to stellarCoreRunner. When the read lock + // is acquired stellarCoreRunner can be accessed. When the write lock is acquired + // stellarCoreRunner can be updated. + stellarCoreLock sync.RWMutex + + // For testing + stellarCoreRunnerFactory func(mode stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) + + // cachedMeta keeps that ledger data of the last fetched ledger. Updated in GetLedger(). + cachedMeta *xdr.LedgerCloseMeta + + prepared *Range // non-nil if any range is prepared + closed bool // False until the core is closed + nextLedger uint32 // next ledger expected, error w/ restart if not seen + lastLedger *uint32 // end of current segment if offline, nil if online + previousLedgerHash *string +} + +// CaptiveCoreConfig contains all the parameters required to create a CaptiveStellarCore instance +type CaptiveCoreConfig struct { + // BinaryPath is the file path to the Stellar Core binary + BinaryPath string + // NetworkPassphrase is the Stellar network passphrase used by captive core when connecting to the Stellar network + NetworkPassphrase string + // HistoryArchiveURLs are a list of history archive urls + HistoryArchiveURLs []string + Toml *CaptiveCoreToml + + // Optional fields + + // CheckpointFrequency is the number of ledgers between checkpoints + // if unset, DefaultCheckpointFrequency will be used + CheckpointFrequency uint32 + // LedgerHashStore is an optional store used to obtain hashes for ledger sequences from a trusted source + LedgerHashStore TrustedLedgerHashStore + // Log is an (optional) custom logger which will capture any output from the Stellar Core process. + // If Log is omitted then all output will be printed to stdout. + Log *log.Entry + // Context is the (optional) context which controls the lifetime of a CaptiveStellarCore instance. Once the context is done + // the CaptiveStellarCore instance will not be able to stream ledgers from Stellar Core or spawn new + // instances of Stellar Core. If Context is omitted CaptiveStellarCore will default to using context.Background. + Context context.Context + // StoragePath is the (optional) base path passed along to Core's + // BUCKET_DIR_PATH which specifies where various bucket data should be + // stored. We always append /captive-core to this directory, since we clean + // it up entirely on shutdown. + StoragePath string +} + +// NewCaptive returns a new CaptiveStellarCore instance. +func NewCaptive(config CaptiveCoreConfig) (*CaptiveStellarCore, error) { + // Here we set defaults in the config. Because config is not a pointer this code should + // not mutate the original CaptiveCoreConfig instance which was passed into NewCaptive() + + // Log Captive Core straight to stdout by default + if config.Log == nil { + config.Log = log.New() + config.Log.SetOutput(os.Stdout) + config.Log.SetLevel(logrus.InfoLevel) + } + + parentCtx := config.Context + if parentCtx == nil { + parentCtx = context.Background() + } + var cancel context.CancelFunc + config.Context, cancel = context.WithCancel(parentCtx) + + archivePool, err := historyarchive.NewArchivePool( + config.HistoryArchiveURLs, + historyarchive.ConnectOptions{ + NetworkPassphrase: config.NetworkPassphrase, + CheckpointFrequency: config.CheckpointFrequency, + Context: config.Context, + }, + ) + + if err != nil { + cancel() + return nil, errors.Wrap(err, "Error connecting to ALL history archives.") + } + + c := &CaptiveStellarCore{ + archive: &archivePool, + ledgerHashStore: config.LedgerHashStore, + cancel: cancel, + checkpointManager: historyarchive.NewCheckpointManager(config.CheckpointFrequency), + } + + c.stellarCoreRunnerFactory = func(mode stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return newStellarCoreRunner(config, mode) + } + return c, nil +} + +func (c *CaptiveStellarCore) getLatestCheckpointSequence() (uint32, error) { + has, err := c.archive.GetRootHAS() + if err != nil { + return 0, errors.Wrap(err, "error getting root HAS") + } + + return has.CurrentLedger, nil +} + +func (c *CaptiveStellarCore) openOfflineReplaySubprocess(from, to uint32) error { + latestCheckpointSequence, err := c.getLatestCheckpointSequence() + if err != nil { + return errors.Wrap(err, "error getting latest checkpoint sequence") + } + + if from > latestCheckpointSequence { + return errors.Errorf( + "from sequence: %d is greater than max available in history archives: %d", + from, + latestCheckpointSequence, + ) + } + + if to > latestCheckpointSequence { + return errors.Errorf( + "to sequence: %d is greater than max available in history archives: %d", + to, + latestCheckpointSequence, + ) + } + + var runner stellarCoreRunnerInterface + if runner, err = c.stellarCoreRunnerFactory(stellarCoreRunnerModeOffline); err != nil { + return errors.Wrap(err, "error creating stellar-core runner") + } else { + // only assign c.stellarCoreRunner if runner is not nil to avoid nil interface check + // see https://golang.org/doc/faq#nil_error + c.stellarCoreRunner = runner + } + + err = c.stellarCoreRunner.catchup(from, to) + if err != nil { + return errors.Wrap(err, "error running stellar-core") + } + + // The next ledger should be the first ledger of the checkpoint containing + // the requested ledger + ran := BoundedRange(from, to) + c.prepared = &ran + c.nextLedger = c.roundDownToFirstReplayAfterCheckpointStart(from) + c.lastLedger = &to + c.previousLedgerHash = nil + + return nil +} + +func (c *CaptiveStellarCore) openOnlineReplaySubprocess(ctx context.Context, from uint32) error { + latestCheckpointSequence, err := c.getLatestCheckpointSequence() + if err != nil { + return errors.Wrap(err, "error getting latest checkpoint sequence") + } + + // We don't allow starting the online mode starting with more than two + // checkpoints from now. Such requests are likely buggy. + // We should allow only one checkpoint here but sometimes there are up to a + // minute delays when updating root HAS by stellar-core. + twoCheckPointsLength := (c.checkpointManager.GetCheckpoint(0) + 1) * 2 + maxLedger := latestCheckpointSequence + twoCheckPointsLength + if from > maxLedger { + return errors.Errorf( + "trying to start online mode too far (latest checkpoint=%d), only two checkpoints in the future allowed", + latestCheckpointSequence, + ) + } + + var runner stellarCoreRunnerInterface + if runner, err = c.stellarCoreRunnerFactory(stellarCoreRunnerModeOnline); err != nil { + return errors.Wrap(err, "error creating stellar-core runner") + } else { + // only assign c.stellarCoreRunner if runner is not nil to avoid nil interface check + // see https://golang.org/doc/faq#nil_error + c.stellarCoreRunner = runner + } + + runFrom, ledgerHash, err := c.runFromParams(ctx, from) + if err != nil { + return errors.Wrap(err, "error calculating ledger and hash for stellar-core run") + } + + err = c.stellarCoreRunner.runFrom(runFrom, ledgerHash) + if err != nil { + return errors.Wrap(err, "error running stellar-core") + } + + // In the online mode we update nextLedger after streaming the first ledger. + // This is to support versions before and after/including v17.1.0 that + // introduced minimal persistent DB. + c.nextLedger = 0 + ran := UnboundedRange(from) + c.prepared = &ran + c.lastLedger = nil + c.previousLedgerHash = nil + + return nil +} + +// runFromParams receives a ledger sequence and calculates the required values to call stellar-core run with --start-ledger and --start-hash +func (c *CaptiveStellarCore) runFromParams(ctx context.Context, from uint32) (runFrom uint32, ledgerHash string, err error) { + if from == 1 { + // Trying to start-from 1 results in an error from Stellar-Core: + // Target ledger 1 is not newer than last closed ledger 1 - nothing to do + // TODO maybe we can fix it by generating 1st ledger meta + // like GenesisLedgerStateReader? + err = errors.New("CaptiveCore is unable to start from ledger 1, start from ledger 2") + return + } + + if from <= 63 { + // The line below is to support a special case for streaming ledger 2 + // that works for all other ledgers <= 63 (fast-forward). + // We can't set from=2 because Stellar-Core will not allow starting from 1. + // To solve this we start from 3 and exploit the fast that Stellar-Core + // will stream data from 2 for the first checkpoint. + from = 3 + } + + runFrom = from - 1 + if c.ledgerHashStore != nil { + var exists bool + ledgerHash, exists, err = c.ledgerHashStore.GetLedgerHash(ctx, runFrom) + if err != nil { + err = errors.Wrapf(err, "error trying to read ledger hash %d", runFrom) + return + } + if exists { + return + } + } + + ledgerHeader, err2 := c.archive.GetLedgerHeader(from) + if err2 != nil { + err = errors.Wrapf(err2, "error trying to read ledger header %d from HAS", from) + return + } + ledgerHash = hex.EncodeToString(ledgerHeader.Header.PreviousLedgerHash[:]) + return +} + +// nextExpectedSequence returns nextLedger (if currently set) or start of +// prepared range. Otherwise it returns 0. +// This is done because `nextLedger` is 0 between the moment Stellar-Core is +// started and streaming the first ledger (in such case we return first ledger +// in requested range). +func (c *CaptiveStellarCore) nextExpectedSequence() uint32 { + if c.nextLedger == 0 && c.prepared != nil { + return c.prepared.from + } + return c.nextLedger +} + +func (c *CaptiveStellarCore) startPreparingRange(ctx context.Context, ledgerRange Range) (bool, error) { + c.stellarCoreLock.Lock() + defer c.stellarCoreLock.Unlock() + + if c.isPrepared(ledgerRange) { + return true, nil + } + + if c.stellarCoreRunner != nil { + if err := c.stellarCoreRunner.close(); err != nil { + return false, errors.Wrap(err, "error closing existing session") + } + + // Make sure Stellar-Core is terminated before starting a new instance. + processExited, _ := c.stellarCoreRunner.getProcessExitError() + if !processExited { + return false, errors.New("the previous Stellar-Core instance is still running") + } + } + + var err error + if ledgerRange.bounded { + err = c.openOfflineReplaySubprocess(ledgerRange.from, ledgerRange.to) + } else { + err = c.openOnlineReplaySubprocess(ctx, ledgerRange.from) + } + if err != nil { + return false, errors.Wrap(err, "opening subprocess") + } + + return false, nil +} + +// PrepareRange prepares the given range (including from and to) to be loaded. +// Captive stellar-core backend needs to initialize Stellar-Core state to be +// able to stream ledgers. +// Stellar-Core mode depends on the provided ledgerRange: +// * For BoundedRange it will start Stellar-Core in catchup mode. +// * For UnboundedRange it will first catchup to starting ledger and then run +// it normally (including connecting to the Stellar network). +// Please note that using a BoundedRange, currently, requires a full-trust on +// history archive. This issue is being fixed in Stellar-Core. +func (c *CaptiveStellarCore) PrepareRange(ctx context.Context, ledgerRange Range) error { + if alreadyPrepared, err := c.startPreparingRange(ctx, ledgerRange); err != nil { + return errors.Wrap(err, "error starting prepare range") + } else if alreadyPrepared { + return nil + } + + _, err := c.GetLedger(ctx, ledgerRange.from) + if err != nil { + return errors.Wrapf(err, "Error fast-forwarding to %d", ledgerRange.from) + } + + return nil +} + +// IsPrepared returns true if a given ledgerRange is prepared. +func (c *CaptiveStellarCore) IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) { + c.stellarCoreLock.RLock() + defer c.stellarCoreLock.RUnlock() + + return c.isPrepared(ledgerRange), nil +} + +func (c *CaptiveStellarCore) isPrepared(ledgerRange Range) bool { + if c.isClosed() { + return false + } + + if c.stellarCoreRunner == nil { + return false + } + if c.closed { + return false + } + lastLedger := uint32(0) + if c.lastLedger != nil { + lastLedger = *c.lastLedger + } + + cachedLedger := uint32(0) + if c.cachedMeta != nil { + cachedLedger = c.cachedMeta.LedgerSequence() + } + + if c.prepared == nil { + return false + } + + if lastLedger == 0 { + return c.nextExpectedSequence() <= ledgerRange.from || cachedLedger == ledgerRange.from + } + + // From now on: lastLedger != 0 so current range is bounded + + if ledgerRange.bounded { + return (c.nextExpectedSequence() <= ledgerRange.from || cachedLedger == ledgerRange.from) && + lastLedger >= ledgerRange.to + } + + // Requested range is unbounded but current one is bounded + return false +} + +// GetLedger will block until the ledger is available in the backend +// (even for UnboundedRange), then return it's LedgerCloseMeta. +// +// Call PrepareRange first to instruct the backend which ledgers to fetch. +// CaptiveStellarCore requires PrepareRange call first to initialize Stellar-Core. +// Requesting a ledger on non-prepared backend will return an error. +// +// Please note that requesting a ledger sequence far after current +// ledger will block the execution for a long time. +// +// Because ledger data is streamed from Stellar-Core sequentially, users should +// request sequences in a non-decreasing order. If the requested sequence number +// is less than the last requested sequence number, an error will be returned. +// +// This function behaves differently for bounded and unbounded ranges: +// * BoundedRange: After getting the last ledger in a range this method will +// also Close() the backend. +func (c *CaptiveStellarCore) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + c.stellarCoreLock.RLock() + defer c.stellarCoreLock.RUnlock() + + if c.cachedMeta != nil && sequence == c.cachedMeta.LedgerSequence() { + // GetLedger can be called multiple times using the same sequence, ex. to create + // change and transaction readers. If we have this ledger buffered, let's return it. + return *c.cachedMeta, nil + } + + if c.isClosed() { + return xdr.LedgerCloseMeta{}, errors.New("stellar-core is no longer usable") + } + + if c.prepared == nil { + return xdr.LedgerCloseMeta{}, errors.New("session is not prepared, call PrepareRange first") + } + + if c.stellarCoreRunner == nil { + return xdr.LedgerCloseMeta{}, errors.New("stellar-core cannot be nil, call PrepareRange first") + } + if c.closed { + return xdr.LedgerCloseMeta{}, errors.New("stellar-core has an error, call PrepareRange first") + } + + if sequence < c.nextExpectedSequence() { + return xdr.LedgerCloseMeta{}, errors.Errorf( + "requested ledger %d is behind the captive core stream (expected=%d)", + sequence, + c.nextExpectedSequence(), + ) + } + + if c.lastLedger != nil && sequence > *c.lastLedger { + return xdr.LedgerCloseMeta{}, errors.Errorf( + "reading past bounded range (requested sequence=%d, last ledger in range=%d)", + sequence, + *c.lastLedger, + ) + } + + // Now loop along the range until we find the ledger we want. + for { + select { + case <-ctx.Done(): + return xdr.LedgerCloseMeta{}, ctx.Err() + case result, ok := <-c.stellarCoreRunner.getMetaPipe(): + found, ledger, err := c.handleMetaPipeResult(sequence, result, ok) + if found || err != nil { + return ledger, err + } + } + } +} + +func (c *CaptiveStellarCore) handleMetaPipeResult(sequence uint32, result metaResult, ok bool) (bool, xdr.LedgerCloseMeta, error) { + if err := c.checkMetaPipeResult(result, ok); err != nil { + c.stellarCoreRunner.close() + return false, xdr.LedgerCloseMeta{}, err + } + + seq := result.LedgerCloseMeta.LedgerSequence() + // If we got something unexpected; close and reset + if c.nextLedger != 0 && seq != c.nextLedger { + c.stellarCoreRunner.close() + return false, xdr.LedgerCloseMeta{}, errors.Errorf( + "unexpected ledger sequence (expected=%d actual=%d)", + c.nextLedger, + seq, + ) + } else if c.nextLedger == 0 && seq > c.prepared.from { + // First stream ledger is greater than prepared.from + c.stellarCoreRunner.close() + return false, xdr.LedgerCloseMeta{}, errors.Errorf( + "unexpected ledger sequence (expected=<=%d actual=%d)", + c.prepared.from, + seq, + ) + } + + newPreviousLedgerHash := result.LedgerCloseMeta.PreviousLedgerHash().HexString() + if c.previousLedgerHash != nil && *c.previousLedgerHash != newPreviousLedgerHash { + // We got something unexpected; close and reset + c.stellarCoreRunner.close() + return false, xdr.LedgerCloseMeta{}, errors.Errorf( + "unexpected previous ledger hash for ledger %d (expected=%s actual=%s)", + seq, + *c.previousLedgerHash, + newPreviousLedgerHash, + ) + } + + c.nextLedger = result.LedgerSequence() + 1 + currentLedgerHash := result.LedgerCloseMeta.LedgerHash().HexString() + c.previousLedgerHash = ¤tLedgerHash + + // Update cache with the latest value because we incremented nextLedger. + c.cachedMeta = result.LedgerCloseMeta + + if seq == sequence { + // If we got the _last_ ledger in a segment, close before returning. + if c.lastLedger != nil && *c.lastLedger == seq { + if err := c.stellarCoreRunner.close(); err != nil { + return false, xdr.LedgerCloseMeta{}, errors.Wrap(err, "error closing session") + } + } + return true, *c.cachedMeta, nil + } + + return false, xdr.LedgerCloseMeta{}, nil +} + +func (c *CaptiveStellarCore) checkMetaPipeResult(result metaResult, ok bool) error { + // There are 3 types of errors we check for: + // 1. User initiated shutdown by canceling the parent context or calling Close(). + // 2. The stellar core process exited unexpectedly. + // 3. Some error was encountered while consuming the ledgers emitted by captive core (e.g. parsing invalid xdr) + if err := c.stellarCoreRunner.context().Err(); err != nil { + // Case 1 - User initiated shutdown by canceling the parent context or calling Close() + return err + } + if !ok || result.err != nil { + if result.err != nil { + // Case 3 - Some error was encountered while consuming the ledger stream emitted by captive core. + return result.err + } else if exited, err := c.stellarCoreRunner.getProcessExitError(); exited { + // Case 2 - The stellar core process exited unexpectedly + if err == nil { + return errors.Errorf("stellar core exited unexpectedly") + } else { + return errors.Wrap(err, "stellar core exited unexpectedly") + } + } else if !ok { + // This case should never happen because the ledger buffer channel can only be closed + // if and only if the process exits or the context is cancelled. + // However, we add this check for the sake of completeness + return errors.Errorf("meta pipe closed unexpectedly") + } + } + return nil +} + +// GetLatestLedgerSequence returns the sequence of the latest ledger available +// in the backend. This method returns an error if not in a session (start with +// PrepareRange). +// +// Note that for UnboundedRange the returned sequence number is not necessarily +// the latest sequence closed by the network. It's always the last value available +// in the backend. +func (c *CaptiveStellarCore) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { + c.stellarCoreLock.RLock() + defer c.stellarCoreLock.RUnlock() + + if c.isClosed() { + return 0, errors.New("stellar-core is no longer usable") + } + if c.prepared == nil { + return 0, errors.New("stellar-core must be prepared, call PrepareRange first") + } + if c.stellarCoreRunner == nil { + return 0, errors.New("stellar-core cannot be nil, call PrepareRange first") + } + if c.closed { + return 0, errors.New("stellar-core is closed, call PrepareRange first") + + } + if c.lastLedger == nil { + return c.nextExpectedSequence() - 1 + uint32(len(c.stellarCoreRunner.getMetaPipe())), nil + } + return *c.lastLedger, nil +} + +func (c *CaptiveStellarCore) isClosed() bool { + return c.closed +} + +// Close closes existing Stellar-Core process, streaming sessions and removes all +// temporary files. Note, once a CaptiveStellarCore instance is closed it can no longer be used and +// all subsequent calls to PrepareRange(), GetLedger(), etc will fail. +// Close is thread-safe and can be called from another go routine. +func (c *CaptiveStellarCore) Close() error { + c.stellarCoreLock.RLock() + defer c.stellarCoreLock.RUnlock() + + c.closed = true + + // after the CaptiveStellarCore context is canceled all subsequent calls to PrepareRange() will fail + c.cancel() + + // TODO: Sucks to ignore the error here, but no worse than it was before, + // so... + if c.ledgerHashStore != nil { + c.ledgerHashStore.Close() + } + + if c.stellarCoreRunner != nil { + return c.stellarCoreRunner.close() + } + return nil +} diff --git a/ingest/ledgerbackend/captive_core_backend_test.go b/ingest/ledgerbackend/captive_core_backend_test.go new file mode 100644 index 0000000000..eda05cf20c --- /dev/null +++ b/ingest/ledgerbackend/captive_core_backend_test.go @@ -0,0 +1,1423 @@ +package ledgerbackend + +import ( + "context" + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/network" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TODO: test frame decoding +// TODO: test from static base64-encoded data + +type stellarCoreRunnerMock struct { + mock.Mock +} + +func (m *stellarCoreRunnerMock) context() context.Context { + a := m.Called() + return a.Get(0).(context.Context) +} + +func (m *stellarCoreRunnerMock) catchup(from, to uint32) error { + a := m.Called(from, to) + return a.Error(0) +} + +func (m *stellarCoreRunnerMock) runFrom(from uint32, hash string) error { + a := m.Called(from, hash) + return a.Error(0) +} + +func (m *stellarCoreRunnerMock) getMetaPipe() <-chan metaResult { + a := m.Called() + return a.Get(0).(<-chan metaResult) +} + +func (m *stellarCoreRunnerMock) getProcessExitError() (bool, error) { + a := m.Called() + return a.Bool(0), a.Error(1) +} + +func (m *stellarCoreRunnerMock) close() error { + a := m.Called() + return a.Error(0) +} + +func buildLedgerCloseMeta(header testLedgerHeader) xdr.LedgerCloseMeta { + opResults := []xdr.OperationResult{} + opMeta := []xdr.OperationMeta{} + + tmpHash, _ := hex.DecodeString("cde54da3901f5b9c0331d24fbb06ac9c5c5de76de9fb2d4a7b86c09e46f11d8c") + var hash [32]byte + copy(hash[:], tmpHash) + + var ledgerHash [32]byte + if header.hash != "" { + tmpHash, err := hex.DecodeString(header.hash) + if err != nil { + panic(err) + } + copy(ledgerHash[:], tmpHash) + } + + var previousLedgerHash [32]byte + if header.hash != "" { + tmpHash, err := hex.DecodeString(header.previousLedgerHash) + if err != nil { + panic(err) + } + copy(previousLedgerHash[:], tmpHash) + } + + source := xdr.MustAddress("GAEJJMDDCRYF752PKIJICUVL7MROJBNXDV2ZB455T7BAFHU2LCLSE2LW") + return xdr.LedgerCloseMeta{ + V: 0, + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Hash: ledgerHash, + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(header.sequence), + PreviousLedgerHash: previousLedgerHash, + }, + }, + TxSet: xdr.TransactionSet{ + Txs: []xdr.TransactionEnvelope{ + { + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: source.ToMuxedAccount(), + Fee: xdr.Uint32(header.sequence), + }, + }, + }, + }, + }, + TxProcessing: []xdr.TransactionResultMeta{ + { + Result: xdr.TransactionResultPair{ + TransactionHash: xdr.Hash(hash), + Result: xdr.TransactionResult{ + FeeCharged: xdr.Int64(header.sequence), + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &opResults, + }, + }, + }, + TxApplyProcessing: xdr.TransactionMeta{ + Operations: &opMeta, + }, + }, + }, + }, + } + +} + +type testLedgerHeader struct { + sequence uint32 + hash string + previousLedgerHash string +} + +func TestCaptiveNew(t *testing.T) { + executablePath := "/etc/stellar-core" + networkPassphrase := network.PublicNetworkPassphrase + historyURLs := []string{"http://history.stellar.org/prd/core-live/core_live_001"} + + captiveStellarCore, err := NewCaptive( + CaptiveCoreConfig{ + BinaryPath: executablePath, + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: historyURLs, + }, + ) + + assert.NoError(t, err) + assert.Equal(t, uint32(0), captiveStellarCore.nextLedger) + assert.NotNil(t, captiveStellarCore.archive) +} + +func TestCaptivePrepareRange(t *testing.T) { + metaChan := make(chan metaResult, 100) + + // Core will actually start with the last checkpoint before the from ledger + // and then rewind to the `from` ledger. + for i := 64; i <= 100; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + cancelCalled := false + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + cancel: context.CancelFunc(func() { + cancelCalled = true + }), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.NoError(t, err) + mockRunner.On("close").Return(nil).Once() + err = captiveBackend.Close() + assert.NoError(t, err) + assert.True(t, cancelCalled) + mockRunner.AssertExpectations(t) + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRangeCrash(t *testing.T) { + metaChan := make(chan metaResult) + close(metaChan) + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() + mockRunner.On("getProcessExitError").Return(true, errors.New("exit code -1")) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("close").Return(nil).Once() + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.EqualError(t, err, "Error fast-forwarding to 100: stellar core exited unexpectedly: exit code -1") + mockRunner.AssertExpectations(t) + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRangeTerminated(t *testing.T) { + metaChan := make(chan metaResult, 100) + + // Core will actually start with the last checkpoint before the from ledger + // and then rewind to the `from` ledger. + for i := 64; i <= 100; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + close(metaChan) + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.NoError(t, err) + mockRunner.AssertExpectations(t) + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRangeCloseNotFullyTerminated(t *testing.T) { + metaChan := make(chan metaResult, 100) + for i := 64; i <= 100; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx, cancel := context.WithCancel(context.Background()) + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(100), uint32(200)).Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.NoError(t, err) + + // Simulates a long (but graceful) shutdown... + cancel() + + err = captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.NoError(t, err) + + mockRunner.AssertExpectations(t) + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRange_ErrClosingSession(t *testing.T) { + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("close").Return(fmt.Errorf("transient error")) + + captiveBackend := CaptiveStellarCore{ + nextLedger: 300, + stellarCoreRunner: mockRunner, + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.EqualError(t, err, "error starting prepare range: error closing existing session: transient error") + + err = captiveBackend.PrepareRange(ctx, UnboundedRange(64)) + assert.EqualError(t, err, "error starting prepare range: error closing existing session: transient error") + + mockRunner.AssertExpectations(t) +} + +func TestCaptivePrepareRange_ErrGettingRootHAS(t *testing.T) { + ctx := context.Background() + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{}, errors.New("transient error")) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: error getting root HAS: transient error") + + err = captiveBackend.PrepareRange(ctx, UnboundedRange(100)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: error getting root HAS: transient error") + + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRange_FromIsAheadOfRootHAS(t *testing.T) { + ctx := context.Background() + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(64), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 200)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: from sequence: 100 is greater than max available in history archives: 64") + + err = captiveBackend.PrepareRange(ctx, UnboundedRange(100)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: trying to start online mode too far (latest checkpoint=64), only two checkpoints in the future allowed") + + mockArchive.AssertExpectations(t) +} + +func TestCaptivePrepareRange_ToIsAheadOfRootHAS(t *testing.T) { + mockRunner := &stellarCoreRunnerMock{} + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(192), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(context.Background(), BoundedRange(100, 200)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: to sequence: 200 is greater than max available in history archives: 192") + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptivePrepareRange_ErrCatchup(t *testing.T) { + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(100), uint32(192)).Return(errors.New("transient error")).Once() + mockRunner.On("close").Return(nil).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(192), + }, nil) + + ctx := context.Background() + cancelCalled := false + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + cancel: context.CancelFunc(func() { + cancelCalled = true + }), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(100, 192)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: error running stellar-core: transient error") + + // make sure we can Close without errors + assert.NoError(t, captiveBackend.Close()) + assert.True(t, cancelCalled) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptivePrepareRangeUnboundedRange_ErrRunFrom(t *testing.T) { + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(127), "0000000000000000000000000000000000000000000000000000000000000000").Return(errors.New("transient error")).Once() + mockRunner.On("close").Return(nil).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(127), + }, nil) + + mockArchive. + On("GetLedgerHeader", uint32(128)). + Return(xdr.LedgerHeaderHistoryEntry{}, nil) + + ctx := context.Background() + cancelCalled := false + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + cancel: context.CancelFunc(func() { + cancelCalled = true + }), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(128)) + assert.EqualError(t, err, "error starting prepare range: opening subprocess: error running stellar-core: transient error") + + // make sure we can Close without errors + assert.NoError(t, captiveBackend.Close()) + assert.True(t, cancelCalled) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptivePrepareRangeUnboundedRange_ReuseSession(t *testing.T) { + metaChan := make(chan metaResult, 100) + + // Core will actually start with the last checkpoint before the from ledger + // and then rewind to the `from` ledger. + for i := 2; i <= 65; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(64), "0000000000000000000000000000000000000000000000000000000000000000").Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(129), + }, nil) + mockArchive. + On("GetLedgerHeader", uint32(65)). + Return(xdr.LedgerHeaderHistoryEntry{}, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(65)) + assert.NoError(t, err) + + captiveBackend.nextLedger = 64 + err = captiveBackend.PrepareRange(ctx, UnboundedRange(65)) + assert.NoError(t, err) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestGetLatestLedgerSequence(t *testing.T) { + metaChan := make(chan metaResult, 300) + + // Core will actually start with the last checkpoint before the `from` ledger + // and then rewind to the `from` ledger. + for i := 2; i <= 200; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(63), "0000000000000000000000000000000000000000000000000000000000000000").Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + mockArchive. + On("GetLedgerHeader", uint32(64)). + Return(xdr.LedgerHeaderHistoryEntry{}, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(64)) + assert.NoError(t, err) + + latest, err := captiveBackend.GetLatestLedgerSequence(ctx) + assert.NoError(t, err) + assert.Equal(t, uint32(200), latest) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveGetLedger(t *testing.T) { + tt := assert.New(t) + metaChan := make(chan metaResult, 300) + + for i := 64; i <= 66; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + // requires PrepareRange + _, err := captiveBackend.GetLedger(ctx, 64) + tt.EqualError(err, "session is not prepared, call PrepareRange first") + + ledgerRange := BoundedRange(65, 66) + tt.False(captiveBackend.isPrepared(ledgerRange), "core is not prepared until explicitly prepared") + tt.False(captiveBackend.isClosed()) + err = captiveBackend.PrepareRange(ctx, ledgerRange) + assert.NoError(t, err) + + tt.True(captiveBackend.isPrepared(ledgerRange)) + tt.False(captiveBackend.isClosed()) + + _, err = captiveBackend.GetLedger(ctx, 64) + tt.Error(err, "requested ledger 64 is behind the captive core stream (expected=66)") + + // reads value from buffer + meta, err := captiveBackend.GetLedger(ctx, 65) + tt.NoError(err) + tt.Equal(xdr.Uint32(65), meta.V0.LedgerHeader.Header.LedgerSeq) + + // reads value from cachedMeta + cachedMeta, err := captiveBackend.GetLedger(ctx, 65) + tt.NoError(err) + tt.Equal(meta, cachedMeta) + + // next sequence number didn't get consumed + tt.Equal(uint32(66), captiveBackend.nextLedger) + + mockRunner.On("close").Return(nil).Run(func(args mock.Arguments) { + cancel() + }).Once() + + _, err = captiveBackend.GetLedger(ctx, 66) + tt.NoError(err) + + tt.False(captiveBackend.isPrepared(ledgerRange)) + tt.False(captiveBackend.isClosed()) + _, err = captiveBackend.GetLedger(ctx, 66) + tt.NoError(err) + + // core is not closed unless it's explicitly closed + tt.False(captiveBackend.isClosed()) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +// TestCaptiveGetLedgerCacheLatestLedger test the following case: +// 1. Prepare Unbounded range. +// 2. GetLedger that is still not in the buffer. +// 3. Get latest ledger in the buffer using GetLedger. +// +// Before 3d97762 this test failed because cachedMeta was only updated when +// the ledger with a requested sequence was reached while streaming meta. +// +// TODO: Not sure this test is really valid or worth it anymore, now that GetLedger is always blocking. +func TestCaptiveGetLedgerCacheLatestLedger(t *testing.T) { + tt := assert.New(t) + metaChan := make(chan metaResult, 300) + + for i := 2; i <= 67; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(65), "0101010100000000000000000000000000000000000000000000000000000000").Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + mockArchive. + On("GetLedgerHeader", uint32(66)). + Return(xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, + }, + }, nil).Once() + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(66)) + assert.NoError(t, err) + + // found, _, err := captiveBackend.GetLedger(ctx, 68) + // tt.NoError(err) + // tt.False(found) + // tt.Equal(uint32(67), captiveBackend.cachedMeta.LedgerSequence()) + // tt.Equal(uint32(68), captiveBackend.nextLedger) + + meta, err := captiveBackend.GetLedger(ctx, 67) + tt.NoError(err) + tt.Equal(uint32(67), meta.LedgerSequence()) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveGetLedger_NextLedgerIsDifferentToLedgerFromBuffer(t *testing.T) { + metaChan := make(chan metaResult, 100) + + for i := 64; i <= 65; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(68)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(nil) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) + assert.NoError(t, err) + + _, err = captiveBackend.GetLedger(ctx, 66) + assert.EqualError(t, err, "unexpected ledger sequence (expected=66 actual=68)") + + // TODO assertions should work - to be fixed in a separate PR. + // _, err = captiveBackend.GetLedger(ctx, 66) + // assert.EqualError(t, err, "session is closed, call PrepareRange first") + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveGetLedger_NextLedger0RangeFromIsSmallerThanLedgerFromBuffer(t *testing.T) { + metaChan := make(chan metaResult, 100) + + for i := 66; i <= 66; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(64), mock.Anything).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(nil) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + mockArchive. + On("GetLedgerHeader", uint32(65)). + Return(xdr.LedgerHeaderHistoryEntry{}, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(65)) + assert.EqualError(t, err, "Error fast-forwarding to 65: unexpected ledger sequence (expected=<=65 actual=66)") + + // TODO assertions should work - to be fixed in a separate PR. + // prepared, err := captiveBackend.IsPrepared(ctx, UnboundedRange(65)) + // assert.NoError(t, err) + // assert.False(t, prepared) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveStellarCore_PrepareRangeAfterClose(t *testing.T) { + ctx := context.Background() + executablePath := "/etc/stellar-core" + networkPassphrase := network.PublicNetworkPassphrase + historyURLs := []string{"http://localhost"} + + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + captiveStellarCore, err := NewCaptive( + CaptiveCoreConfig{ + BinaryPath: executablePath, + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: historyURLs, + Toml: captiveCoreToml, + }, + ) + assert.NoError(t, err) + + assert.NoError(t, captiveStellarCore.Close()) + + assert.EqualError( + t, + captiveStellarCore.PrepareRange(ctx, BoundedRange(65, 66)), + "error starting prepare range: opening subprocess: error getting latest checkpoint sequence: "+ + "error getting root HAS: Get \"http://localhost/.well-known/stellar-history.json\": context canceled", + ) + + // even if the request to fetch the latest checkpoint succeeds, we should fail at creating the subprocess + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + captiveStellarCore.archive = mockArchive + assert.EqualError( + t, + captiveStellarCore.PrepareRange(ctx, BoundedRange(65, 66)), + "error starting prepare range: opening subprocess: error running stellar-core: context canceled", + ) + mockArchive.AssertExpectations(t) +} + +func TestCaptiveGetLedger_ErrReadingMetaResult(t *testing.T) { + tt := assert.New(t) + metaChan := make(chan metaResult, 100) + + for i := 64; i <= 65; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + metaChan <- metaResult{ + err: fmt.Errorf("unmarshalling error"), + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + ctx, cancel := context.WithCancel(ctx) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(nil).Run(func(args mock.Arguments) { + cancel() + }).Once() + + // even if the request to fetch the latest checkpoint succeeds, we should fail at creating the subprocess + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) + assert.NoError(t, err) + + meta, err := captiveBackend.GetLedger(ctx, 65) + tt.NoError(err) + tt.Equal(xdr.Uint32(65), meta.V0.LedgerHeader.Header.LedgerSeq) + + tt.False(captiveBackend.isClosed()) + + // try reading from an empty buffer + _, err = captiveBackend.GetLedger(ctx, 66) + tt.EqualError(err, "unmarshalling error") + + // not closed even if there is an error getting ledger + tt.False(captiveBackend.isClosed()) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveGetLedger_ErrClosingAfterLastLedger(t *testing.T) { + tt := assert.New(t) + metaChan := make(chan metaResult, 100) + + for i := 64; i <= 66; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(fmt.Errorf("transient error")).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(65, 66)) + assert.NoError(t, err) + + _, err = captiveBackend.GetLedger(ctx, 66) + tt.EqualError(err, "error closing session: transient error") + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveAfterClose(t *testing.T) { + metaChan := make(chan metaResult, 100) + + for i := 64; i <= 66; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + mockRunner := &stellarCoreRunnerMock{} + ctx, cancel := context.WithCancel(context.Background()) + mockRunner.On("catchup", uint32(65), uint32(66)).Return(nil) + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(nil).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + cancel: cancel, + } + + boundedRange := BoundedRange(65, 66) + err := captiveBackend.PrepareRange(ctx, boundedRange) + assert.NoError(t, err) + + assert.NoError(t, captiveBackend.Close()) + assert.True(t, captiveBackend.isClosed()) + + _, err = captiveBackend.GetLedger(ctx, boundedRange.to) + assert.EqualError(t, err, "stellar-core is no longer usable") + + var prepared bool + prepared, err = captiveBackend.IsPrepared(ctx, boundedRange) + assert.False(t, prepared) + assert.NoError(t, err) + + _, err = captiveBackend.GetLatestLedgerSequence(ctx) + assert.EqualError(t, err, "stellar-core is no longer usable") + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestGetLedgerBoundsCheck(t *testing.T) { + metaChan := make(chan metaResult, 100) + + for i := 128; i <= 130; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(i)}) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(128), uint32(130)).Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(128, 130)) + assert.NoError(t, err) + + meta, err := captiveBackend.GetLedger(ctx, 128) + assert.NoError(t, err) + assert.Equal(t, uint32(128), meta.LedgerSequence()) + + prev := meta + meta, err = captiveBackend.GetLedger(ctx, 128) + assert.NoError(t, err) + assert.Equal(t, prev, meta) + + _, err = captiveBackend.GetLedger(ctx, 64) + assert.EqualError(t, err, "requested ledger 64 is behind the captive core stream (expected=129)") + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) +} + +func TestCaptiveGetLedgerTerminatedUnexpectedly(t *testing.T) { + ledger64 := buildLedgerCloseMeta(testLedgerHeader{sequence: uint32(64)}) + + for _, testCase := range []struct { + name string + ctx context.Context + ledgers []metaResult + processExited bool + processExitedError error + expectedError string + }{ + { + "stellar core exited unexpectedly without error", + context.Background(), + []metaResult{{LedgerCloseMeta: &ledger64}}, + true, + nil, + "stellar core exited unexpectedly", + }, + { + "stellar core exited unexpectedly with an error", + context.Background(), + []metaResult{{LedgerCloseMeta: &ledger64}}, + true, + fmt.Errorf("signal kill"), + "stellar core exited unexpectedly: signal kill", + }, + { + "stellar core exited unexpectedly without error and closed channel", + context.Background(), + []metaResult{{LedgerCloseMeta: &ledger64}}, + true, + nil, + "stellar core exited unexpectedly", + }, + { + "stellar core exited unexpectedly with an error and closed channel", + context.Background(), + []metaResult{{LedgerCloseMeta: &ledger64}}, + true, + fmt.Errorf("signal kill"), + "stellar core exited unexpectedly: signal kill", + }, + { + "meta pipe closed unexpectedly", + context.Background(), + []metaResult{{LedgerCloseMeta: &ledger64}}, + false, + nil, + "meta pipe closed unexpectedly", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + metaChan := make(chan metaResult, 100) + + for _, result := range testCase.ledgers { + metaChan <- result + } + close(metaChan) + + ctx := testCase.ctx + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("catchup", uint32(64), uint32(100)).Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("getProcessExitError").Return(testCase.processExited, testCase.processExitedError) + mockRunner.On("close").Return(nil).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(200), + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, BoundedRange(64, 100)) + assert.NoError(t, err) + + meta, err := captiveBackend.GetLedger(ctx, 64) + assert.NoError(t, err) + assert.Equal(t, uint32(64), meta.LedgerSequence()) + + _, err = captiveBackend.GetLedger(ctx, 65) + assert.EqualError(t, err, testCase.expectedError) + + mockArchive.AssertExpectations(t) + mockRunner.AssertExpectations(t) + }) + } +} + +func TestCaptiveUseOfLedgerHashStore(t *testing.T) { + ctx := context.Background() + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetLedgerHeader", uint32(300)). + Return(xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, + }, + }, nil) + + mockLedgerHashStore := &MockLedgerHashStore{} + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(1049)). + Return("", false, fmt.Errorf("transient error")).Once() + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(299)). + Return("", false, nil).Once() + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(85)). + Return("cde", true, nil).Once() + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(127)). + Return("ghi", true, nil).Once() + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(2)). + Return("mnb", true, nil).Once() + + cancelCalled := false + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + ledgerHashStore: mockLedgerHashStore, + checkpointManager: historyarchive.NewCheckpointManager(64), + cancel: context.CancelFunc(func() { + cancelCalled = true + }), + } + + runFrom, ledgerHash, err := captiveBackend.runFromParams(ctx, 24) + assert.NoError(t, err) + assert.Equal(t, uint32(2), runFrom) + assert.Equal(t, "mnb", ledgerHash) + + runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 86) + assert.NoError(t, err) + assert.Equal(t, uint32(85), runFrom) + assert.Equal(t, "cde", ledgerHash) + + runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 128) + assert.NoError(t, err) + assert.Equal(t, uint32(127), runFrom) + assert.Equal(t, "ghi", ledgerHash) + + _, _, err = captiveBackend.runFromParams(ctx, 1050) + assert.EqualError(t, err, "error trying to read ledger hash 1049: transient error") + + runFrom, ledgerHash, err = captiveBackend.runFromParams(ctx, 300) + assert.NoError(t, err) + assert.Equal(t, uint32(299), runFrom, "runFrom") + assert.Equal(t, "0101010100000000000000000000000000000000000000000000000000000000", ledgerHash) + + mockLedgerHashStore.On("Close").Return(nil).Once() + err = captiveBackend.Close() + assert.NoError(t, err) + assert.True(t, cancelCalled) + mockLedgerHashStore.AssertExpectations(t) + mockArchive.AssertExpectations(t) +} + +func TestCaptiveRunFromParams(t *testing.T) { + var tests = []struct { + from uint32 + runFrom uint32 + ledgerArchives uint32 + }{ + // Before and including 1st checkpoint: + {2, 2, 3}, + {3, 2, 3}, + {3, 2, 3}, + {4, 2, 3}, + {62, 2, 3}, + {63, 2, 3}, + + // Starting from 64 we go normal path: between 1st and 2nd checkpoint: + {64, 63, 64}, + {65, 64, 65}, + {66, 65, 66}, + {126, 125, 126}, + + // between 2nd and 3rd checkpoint... and so on. + {127, 126, 127}, + {128, 127, 128}, + {129, 128, 129}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("from_%d", tc.from), func(t *testing.T) { + tt := assert.New(t) + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetLedgerHeader", uint32(tc.ledgerArchives)). + Return(xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, + }, + }, nil) + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + ctx := context.Background() + runFrom, ledgerHash, err := captiveBackend.runFromParams(ctx, tc.from) + tt.NoError(err) + tt.Equal(tc.runFrom, runFrom, "runFrom") + tt.Equal("0101010100000000000000000000000000000000000000000000000000000000", ledgerHash) + + mockArchive.AssertExpectations(t) + }) + } +} + +func TestCaptiveIsPrepared(t *testing.T) { + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("context").Return(context.Background()).Maybe() + + // c.prepared == nil + captiveBackend := CaptiveStellarCore{ + nextLedger: 0, + } + + result := captiveBackend.isPrepared(UnboundedRange(100)) + assert.False(t, result) + + // c.prepared != nil: + var tests = []struct { + nextLedger uint32 + lastLedger uint32 + cachedLedger uint32 + preparedRange Range + ledgerRange Range + result bool + }{ + // If nextLedger == 0, prepared range is checked + {0, 0, 0, UnboundedRange(100), UnboundedRange(100), true}, + {0, 0, 0, UnboundedRange(100), UnboundedRange(99), false}, + {0, 0, 0, UnboundedRange(100), BoundedRange(100, 200), true}, + + {100, 0, 0, UnboundedRange(99), UnboundedRange(101), true}, + {101, 0, 100, UnboundedRange(99), UnboundedRange(100), true}, + {100, 200, 0, BoundedRange(99, 200), UnboundedRange(100), false}, + + {100, 200, 0, BoundedRange(99, 200), BoundedRange(100, 200), true}, + {100, 200, 0, BoundedRange(99, 200), BoundedRange(100, 201), false}, + {100, 201, 0, BoundedRange(99, 201), BoundedRange(100, 200), true}, + {101, 200, 100, BoundedRange(99, 200), BoundedRange(100, 200), true}, + } + + for _, tc := range tests { + t.Run(fmt.Sprintf("next_%d_last_%d_cached_%d_range_%v", tc.nextLedger, tc.lastLedger, tc.cachedLedger, tc.ledgerRange), func(t *testing.T) { + captiveBackend := CaptiveStellarCore{ + stellarCoreRunner: mockRunner, + nextLedger: tc.nextLedger, + prepared: &tc.preparedRange, + } + if tc.lastLedger > 0 { + captiveBackend.lastLedger = &tc.lastLedger + } + if tc.cachedLedger > 0 { + meta := buildLedgerCloseMeta(testLedgerHeader{ + sequence: tc.cachedLedger, + }) + captiveBackend.cachedMeta = &meta + } + + result := captiveBackend.isPrepared(tc.ledgerRange) + assert.Equal(t, tc.result, result) + }) + } +} + +// TestCaptivePreviousLedgerCheck checks if previousLedgerHash is set in PrepareRange +// and then checked and updated in GetLedger. +func TestCaptivePreviousLedgerCheck(t *testing.T) { + metaChan := make(chan metaResult, 200) + + h := 3 + for i := 192; i <= 300; i++ { + meta := buildLedgerCloseMeta(testLedgerHeader{ + sequence: uint32(i), + hash: fmt.Sprintf("%02x00000000000000000000000000000000000000000000000000000000000000", h), + previousLedgerHash: fmt.Sprintf("%02x00000000000000000000000000000000000000000000000000000000000000", h-1), + }) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + h++ + } + + { + // Write invalid hash + meta := buildLedgerCloseMeta(testLedgerHeader{ + sequence: 301, + hash: "0000000000000000000000000000000000000000000000000000000000000000", + previousLedgerHash: "0000000000000000000000000000000000000000000000000000000000000000", + }) + metaChan <- metaResult{ + LedgerCloseMeta: &meta, + } + + } + + ctx := context.Background() + mockRunner := &stellarCoreRunnerMock{} + mockRunner.On("runFrom", uint32(299), "0101010100000000000000000000000000000000000000000000000000000000").Return(nil).Once() + mockRunner.On("getMetaPipe").Return((<-chan metaResult)(metaChan)) + mockRunner.On("context").Return(ctx) + mockRunner.On("close").Return(nil).Once() + + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("GetRootHAS"). + Return(historyarchive.HistoryArchiveState{ + CurrentLedger: uint32(255), + }, nil) + mockArchive. + On("GetLedgerHeader", uint32(300)). + Return(xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + PreviousLedgerHash: xdr.Hash{1, 1, 1, 1}, + }, + }, nil).Once() + + mockLedgerHashStore := &MockLedgerHashStore{} + mockLedgerHashStore.On("GetLedgerHash", ctx, uint32(299)). + Return("", false, nil).Once() + + captiveBackend := CaptiveStellarCore{ + archive: mockArchive, + stellarCoreRunnerFactory: func(_ stellarCoreRunnerMode) (stellarCoreRunnerInterface, error) { + return mockRunner, nil + }, + ledgerHashStore: mockLedgerHashStore, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + err := captiveBackend.PrepareRange(ctx, UnboundedRange(300)) + assert.NoError(t, err) + + meta, err := captiveBackend.GetLedger(ctx, 300) + assert.NoError(t, err) + assert.NotNil(t, captiveBackend.previousLedgerHash) + assert.Equal(t, uint32(301), captiveBackend.nextLedger) + assert.Equal(t, meta.LedgerHash().HexString(), *captiveBackend.previousLedgerHash) + + _, err = captiveBackend.GetLedger(ctx, 301) + assert.EqualError(t, err, "unexpected previous ledger hash for ledger 301 (expected=6f00000000000000000000000000000000000000000000000000000000000000 actual=0000000000000000000000000000000000000000000000000000000000000000)") + + mockRunner.AssertExpectations(t) + mockArchive.AssertExpectations(t) + mockLedgerHashStore.AssertExpectations(t) +} diff --git a/ingest/ledgerbackend/database_backend.go b/ingest/ledgerbackend/database_backend.go new file mode 100644 index 0000000000..189cfcaf75 --- /dev/null +++ b/ingest/ledgerbackend/database_backend.go @@ -0,0 +1,240 @@ +package ledgerbackend + +import ( + "context" + "database/sql" + "sort" + "time" + + "github.com/stellar/go/network" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const ( + latestLedgerSeqQuery = "select ledgerseq, closetime from ledgerheaders order by ledgerseq desc limit 1" + txHistoryQuery = "select txbody, txresult, txmeta, txindex from txhistory where ledgerseq = ? " + ledgerHeaderQuery = "select ledgerhash, data from ledgerheaders where ledgerseq = ? " + txFeeHistoryQuery = "select txchanges, txindex from txfeehistory where ledgerseq = ? " + upgradeHistoryQuery = "select ledgerseq, upgradeindex, upgrade, changes from upgradehistory where ledgerseq = ? order by upgradeindex asc" + orderBy = "order by txindex asc" + dbDriver = "postgres" +) + +// Ensure DatabaseBackend implements LedgerBackend +var _ LedgerBackend = (*DatabaseBackend)(nil) + +// DatabaseBackend implements a database data store. +type DatabaseBackend struct { + networkPassphrase string + session session +} + +func NewDatabaseBackend(dataSourceName, networkPassphrase string) (*DatabaseBackend, error) { + session, err := createSession(dataSourceName) + if err != nil { + return nil, err + } + + return NewDatabaseBackendFromSession(session, networkPassphrase) +} + +func NewDatabaseBackendFromSession(session db.SessionInterface, networkPassphrase string) (*DatabaseBackend, error) { + return &DatabaseBackend{ + session: session, + networkPassphrase: networkPassphrase, + }, nil +} + +func (dbb *DatabaseBackend) PrepareRange(ctx context.Context, ledgerRange Range) error { + _, err := dbb.GetLedger(ctx, ledgerRange.from) + if err != nil { + return errors.Wrap(err, "error getting ledger") + } + + if ledgerRange.bounded { + _, err := dbb.GetLedger(ctx, ledgerRange.to) + if err != nil { + return errors.Wrap(err, "error getting ledger") + } + } + + return nil +} + +// IsPrepared returns true if a given ledgerRange is prepared. +func (*DatabaseBackend) IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) { + return true, nil +} + +// GetLatestLedgerSequence returns the most recent ledger sequence number present in the database. +func (dbb *DatabaseBackend) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { + var ledger []ledgerHeader + err := dbb.session.SelectRaw(ctx, &ledger, latestLedgerSeqQuery) + if err != nil { + return 0, errors.Wrap(err, "couldn't select ledger sequence") + } + if len(ledger) == 0 { + return 0, errors.New("no ledgers exist in ledgerheaders table") + } + + return ledger[0].LedgerSeq, nil +} + +func sortByHash(transactions []xdr.TransactionEnvelope, passphrase string) error { + hashes := make([]xdr.Hash, len(transactions)) + txByHash := map[xdr.Hash]xdr.TransactionEnvelope{} + for i, tx := range transactions { + hash, err := network.HashTransactionInEnvelope(tx, passphrase) + if err != nil { + return errors.Wrap(err, "cannot hash transaction") + } + hashes[i] = hash + txByHash[hash] = tx + } + + sort.Slice(hashes, func(i, j int) bool { + a := hashes[i] + b := hashes[j] + for k := range a { + if a[k] < b[k] { + return true + } + if a[k] > b[k] { + return false + } + } + return false + }) + + for i, hash := range hashes { + transactions[i] = txByHash[hash] + } + return nil +} + +// GetLedger will block until the ledger is +// available in the backend (even for UnaboundedRange). +// Please note that requesting a ledger sequence far after current ledger will +// block the execution for a long time. +func (dbb *DatabaseBackend) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + for { + exists, meta, err := dbb.getLedgerQuery(ctx, sequence) + if err != nil { + return xdr.LedgerCloseMeta{}, err + } + + if exists { + return meta, nil + } else { + time.Sleep(time.Second) + } + } +} + +// getLedgerQuery returns the LedgerCloseMeta for the given ledger sequence number. +// The first returned value is false when the ledger does not exist in the database. +func (dbb *DatabaseBackend) getLedgerQuery(ctx context.Context, sequence uint32) (bool, xdr.LedgerCloseMeta, error) { + lcm := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{}, + } + + // Query - ledgerheader + var lRow ledgerHeaderHistory + + err := dbb.session.GetRaw(ctx, &lRow, ledgerHeaderQuery, sequence) + // Return errors... + if err != nil { + switch err { + case sql.ErrNoRows: + // Ledger was not found + return false, xdr.LedgerCloseMeta{}, nil + default: + return false, xdr.LedgerCloseMeta{}, errors.Wrap(err, "Error getting ledger header") + } + } + + // ...otherwise store the header + lcm.V0.LedgerHeader = xdr.LedgerHeaderHistoryEntry{ + Hash: lRow.Hash, + Header: lRow.Header, + Ext: xdr.LedgerHeaderHistoryEntryExt{}, + } + + // Query - txhistory + var txhRows []txHistory + err = dbb.session.SelectRaw(ctx, &txhRows, txHistoryQuery+orderBy, sequence) + // Return errors... + if err != nil { + return false, lcm, errors.Wrap(err, "Error getting txHistory") + } + + // ...otherwise store the data + for i, tx := range txhRows { + // Sanity check index. Note that first TXIndex in a ledger is 1 + if i != int(tx.TXIndex)-1 { + return false, xdr.LedgerCloseMeta{}, errors.New("transactions read from DB history table are misordered") + } + + lcm.V0.TxSet.Txs = append(lcm.V0.TxSet.Txs, tx.TXBody) + lcm.V0.TxProcessing = append(lcm.V0.TxProcessing, xdr.TransactionResultMeta{ + Result: tx.TXResult, + TxApplyProcessing: tx.TXMeta, + }) + } + + if err = sortByHash(lcm.V0.TxSet.Txs, dbb.networkPassphrase); err != nil { + return false, xdr.LedgerCloseMeta{}, errors.Wrap(err, "could not sort txset") + } + + // Query - txfeehistory + var txfhRows []txFeeHistory + err = dbb.session.SelectRaw(ctx, &txfhRows, txFeeHistoryQuery+orderBy, sequence) + // Return errors... + if err != nil { + return false, lcm, errors.Wrap(err, "Error getting txFeeHistory") + } + + // ...otherwise store the data + for i, tx := range txfhRows { + // Sanity check index. Note that first TXIndex in a ledger is 1 + if i != int(tx.TXIndex)-1 { + return false, xdr.LedgerCloseMeta{}, errors.New("transactions read from DB fee history table are misordered") + } + lcm.V0.TxProcessing[i].FeeProcessing = tx.TXChanges + } + + // Query - upgradehistory + var upgradeHistoryRows []upgradeHistory + err = dbb.session.SelectRaw(ctx, &upgradeHistoryRows, upgradeHistoryQuery, sequence) + // Return errors... + if err != nil { + return false, lcm, errors.Wrap(err, "Error getting upgradeHistoryRows") + } + + // ...otherwise store the data + lcm.V0.UpgradesProcessing = make([]xdr.UpgradeEntryMeta, len(upgradeHistoryRows)) + for i, upgradeHistoryRow := range upgradeHistoryRows { + lcm.V0.UpgradesProcessing[i] = xdr.UpgradeEntryMeta{ + Upgrade: upgradeHistoryRow.Upgrade, + Changes: upgradeHistoryRow.Changes, + } + } + + return true, lcm, nil +} + +// CreateSession returns a new db.Session that connects to the given DB settings. +func createSession(dataSourceName string) (*db.Session, error) { + if dataSourceName == "" { + return nil, errors.New("missing DatabaseBackend.DataSourceName (e.g. \"postgres://stellar:postgres@localhost:8002/core\")") + } + + return db.Open(dbDriver, dataSourceName) +} + +// Close disconnects an active database session. +func (dbb *DatabaseBackend) Close() error { + return dbb.session.Close() +} diff --git a/ingest/ledgerbackend/file_watcher.go b/ingest/ledgerbackend/file_watcher.go new file mode 100644 index 0000000000..7b468d9378 --- /dev/null +++ b/ingest/ledgerbackend/file_watcher.go @@ -0,0 +1,112 @@ +package ledgerbackend + +import ( + "bytes" + "crypto/sha1" + "io" + "os" + "sync" + "time" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +type hash []byte + +func (h hash) Equals(other hash) bool { + return bytes.Equal(h, other) +} + +type fileWatcher struct { + pathToFile string + duration time.Duration + onChange func() + exit <-chan struct{} + log *log.Entry + hashFile func(string) (hash, error) + lastHash hash +} + +func hashFile(filename string) (hash, error) { + f, err := os.Open(filename) + if err != nil { + return hash{}, errors.Wrapf(err, "unable to open %v", f) + } + defer f.Close() + + h := sha1.New() + if _, err := io.Copy(h, f); err != nil { + return hash{}, errors.Wrapf(err, "unable to copy %v into buffer", f) + } + + return h.Sum(nil), nil +} + +func newFileWatcher(runner *stellarCoreRunner) (*fileWatcher, error) { + return newFileWatcherWithOptions(runner, hashFile, 10*time.Second) +} + +func newFileWatcherWithOptions( + runner *stellarCoreRunner, + hashFile func(string) (hash, error), + tickerDuration time.Duration, +) (*fileWatcher, error) { + hashResult, err := hashFile(runner.executablePath) + if err != nil { + return nil, errors.Wrap(err, "could not hash captive core binary") + } + + once := &sync.Once{} + return &fileWatcher{ + pathToFile: runner.executablePath, + duration: tickerDuration, + onChange: func() { + once.Do(func() { + runner.log.Warnf("detected new version of captive core binary %s , aborting session.", runner.executablePath) + if err := runner.close(); err != nil { + runner.log.Warnf("could not close captive core %v", err) + } + }) + }, + exit: runner.ctx.Done(), + log: runner.log, + hashFile: hashFile, + lastHash: hashResult, + }, nil +} + +func (f *fileWatcher) loop() { + ticker := time.NewTicker(f.duration) + + for { + select { + case <-f.exit: + ticker.Stop() + return + case <-ticker.C: + if f.fileChanged() { + f.onChange() + } + } + } +} + +func (f *fileWatcher) fileChanged() bool { + hashResult, err := f.hashFile(f.pathToFile) + if err != nil { + f.log.Warnf("could not hash contents of %s: %v", f.pathToFile, err) + return false + } + + if !f.lastHash.Equals(hashResult) { + f.log.Infof( + "detected update to %s. previous file hash was %v current hash is %v", + f.pathToFile, + f.lastHash, + hashResult, + ) + return true + } + return false +} diff --git a/ingest/ledgerbackend/file_watcher_test.go b/ingest/ledgerbackend/file_watcher_test.go new file mode 100644 index 0000000000..e46606d2e8 --- /dev/null +++ b/ingest/ledgerbackend/file_watcher_test.go @@ -0,0 +1,176 @@ +package ledgerbackend + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stellar/go/support/log" + + "github.com/stretchr/testify/assert" +) + +type mockHash struct { + sync.Mutex + t *testing.T + expectedPath string + hashResult hash + err error + callCount int +} + +func (m *mockHash) setResponse(hashResult hash, err error) { + m.Lock() + defer m.Unlock() + m.hashResult = hashResult + m.err = err +} + +func (m *mockHash) getCallCount() int { + m.Lock() + defer m.Unlock() + return m.callCount +} + +func (m *mockHash) hashFile(fp string) (hash, error) { + m.Lock() + defer m.Unlock() + m.callCount++ + assert.Equal(m.t, m.expectedPath, fp) + return m.hashResult, m.err +} + +func createFWFixtures(t *testing.T) (*mockHash, *stellarCoreRunner, *fileWatcher) { + ms := &mockHash{ + hashResult: hash{}, + expectedPath: "/some/path", + t: t, + } + + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + runner, err := newStellarCoreRunner(CaptiveCoreConfig{ + BinaryPath: "/some/path", + HistoryArchiveURLs: []string{"http://localhost"}, + Log: log.New(), + Context: context.Background(), + Toml: captiveCoreToml, + }, stellarCoreRunnerModeOffline) + assert.NoError(t, err) + + fw, err := newFileWatcherWithOptions(runner, ms.hashFile, time.Millisecond) + assert.NoError(t, err) + assert.Equal(t, 1, ms.getCallCount()) + + return ms, runner, fw +} + +func TestNewFileWatcherError(t *testing.T) { + ms := &mockHash{ + hashResult: hash{}, + expectedPath: "/some/path", + t: t, + } + ms.setResponse(hash{}, fmt.Errorf("test error")) + + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + runner, err := newStellarCoreRunner(CaptiveCoreConfig{ + BinaryPath: "/some/path", + HistoryArchiveURLs: []string{"http://localhost"}, + Log: log.New(), + Context: context.Background(), + Toml: captiveCoreToml, + }, stellarCoreRunnerModeOffline) + assert.NoError(t, err) + + _, err = newFileWatcherWithOptions(runner, ms.hashFile, time.Millisecond) + assert.EqualError(t, err, "could not hash captive core binary: test error") + assert.Equal(t, 1, ms.getCallCount()) +} + +func TestFileChanged(t *testing.T) { + ms, _, fw := createFWFixtures(t) + + assert.False(t, fw.fileChanged()) + assert.False(t, fw.fileChanged()) + assert.Equal(t, 3, ms.getCallCount()) + + ms.setResponse(hash{}, fmt.Errorf("test error")) + assert.False(t, fw.fileChanged()) + assert.Equal(t, 4, ms.getCallCount()) + + ms.setResponse(ms.hashResult, nil) + assert.False(t, fw.fileChanged()) + assert.Equal(t, 5, ms.getCallCount()) + + ms.setResponse(hash{1}, nil) + assert.True(t, fw.fileChanged()) + assert.Equal(t, 6, ms.getCallCount()) +} + +func TestCloseRunnerBeforeFileWatcherLoop(t *testing.T) { + _, runner, fw := createFWFixtures(t) + + assert.NoError(t, runner.close()) + + // loop should exit almost immediately because the runner is closed + fw.loop() +} + +func TestCloseRunnerDuringFileWatcherLoop(t *testing.T) { + ms, runner, fw := createFWFixtures(t) + done := make(chan struct{}) + go func() { + fw.loop() + close(done) + }() + + // fw.loop will repeatedly check if the file has changed by calling hash. + // This test ensures that closing the runner will exit fw.loop so that the goroutine is not leaked. + + closedRunner := false + for { + select { + case <-done: + assert.True(t, closedRunner) + return + default: + if ms.getCallCount() > 20 { + runner.close() + closedRunner = true + } + } + } +} + +func TestFileChangesTriggerRunnerClose(t *testing.T) { + ms, runner, fw := createFWFixtures(t) + done := make(chan struct{}) + go func() { + fw.loop() + close(done) + }() + + // fw.loop will repeatedly check if the file has changed by calling hash + // This test ensures that modifying the file will trigger the closing of the runner. + modifiedFile := false + for { + select { + case <-done: + assert.True(t, modifiedFile) + // the runner is closed if and only if runner.ctx.Err() is non-nil + assert.Error(t, runner.ctx.Err()) + return + default: + if ms.getCallCount() > 20 { + ms.setResponse(hash{1}, nil) + modifiedFile = true + } + } + } +} diff --git a/ingest/ledgerbackend/hash_order_test.go b/ingest/ledgerbackend/hash_order_test.go new file mode 100644 index 0000000000..3de34d8662 --- /dev/null +++ b/ingest/ledgerbackend/hash_order_test.go @@ -0,0 +1,68 @@ +package ledgerbackend + +import ( + "github.com/stellar/go/network" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestHashOrder(t *testing.T) { + source := xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU") + account := source.ToMuxedAccount() + original := []xdr.TransactionEnvelope{ + { + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: account, + SeqNum: 1, + }, + }, + }, + { + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: account, + SeqNum: 2, + }, + }, + }, + { + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: account, + SeqNum: 3, + }, + }, + }, + } + + sortByHash(original, network.TestNetworkPassphrase) + hashes := map[int]xdr.Hash{} + + for i, tx := range original { + var err error + hashes[i], err = network.HashTransactionInEnvelope(tx, network.TestNetworkPassphrase) + if err != nil { + assert.NoError(t, err) + } + } + + for i := range original { + if i == 0 { + continue + } + prev := hashes[i-1] + cur := hashes[i] + for j := range prev { + if !assert.True(t, prev[j] < cur[j]) { + break + } else { + break + } + } + } +} diff --git a/ingest/ledgerbackend/ledger_backend.go b/ingest/ledgerbackend/ledger_backend.go new file mode 100644 index 0000000000..572de2e183 --- /dev/null +++ b/ingest/ledgerbackend/ledger_backend.go @@ -0,0 +1,80 @@ +package ledgerbackend + +import ( + "context" + + "github.com/stellar/go/xdr" +) + +// LedgerBackend represents the interface to a ledger data store. +type LedgerBackend interface { + // GetLatestLedgerSequence returns the sequence of the latest ledger available + // in the backend. + GetLatestLedgerSequence(ctx context.Context) (sequence uint32, err error) + // GetLedger will block until the ledger is available. + GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) + // PrepareRange prepares the given range (including from and to) to be loaded. + // Some backends (like captive stellar-core) need to initalize data to be + // able to stream ledgers. Blocks until the first ledger is available. + PrepareRange(ctx context.Context, ledgerRange Range) error + // IsPrepared returns true if a given ledgerRange is prepared. + IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) + Close() error +} + +// session is the interface needed to access a persistent database session. +// TODO can't use this until we add Close() to the existing db.Session object +type session interface { + GetRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error + SelectRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Close() error +} + +// ledgerHeaderHistory is a helper struct used to unmarshall header fields from a stellar-core DB. +type ledgerHeaderHistory struct { + Hash xdr.Hash `db:"ledgerhash"` + Header xdr.LedgerHeader `db:"data"` +} + +// ledgerHeader holds a row of data from the stellar-core `ledgerheaders` table. +type ledgerHeader struct { + LedgerHash string `db:"ledgerhash"` + PrevHash string `db:"prevhash"` + BucketListHash string `db:"bucketlisthash"` + CloseTime int64 `db:"closetime"` + LedgerSeq uint32 `db:"ledgerseq"` + Data xdr.LedgerHeader `db:"data"` +} + +// txHistory holds a row of data from the stellar-core `txhistory` table. +type txHistory struct { + TXID string `db:"txid"` + LedgerSeq uint32 `db:"ledgerseq"` + TXIndex uint32 `db:"txindex"` + TXBody xdr.TransactionEnvelope `db:"txbody"` + TXResult xdr.TransactionResultPair `db:"txresult"` + TXMeta xdr.TransactionMeta `db:"txmeta"` +} + +// txFeeHistory holds a row of data from the stellar-core `txfeehistory` table. +type txFeeHistory struct { + TXID string `db:"txid"` + LedgerSeq uint32 `db:"ledgerseq"` + TXIndex uint32 `db:"txindex"` + TXChanges xdr.LedgerEntryChanges `db:"txchanges"` +} + +// scpHistory holds a row of data from the stellar-core `scphistory` table. +// type scpHistory struct { +// NodeID string `db:"nodeid"` +// LedgerSeq uint32 `db:"ledgerseq"` +// Envelope string `db:"envelope"` +// } + +// upgradeHistory holds a row of data from the stellar-core `upgradehistory` table. +type upgradeHistory struct { + LedgerSeq uint32 `db:"ledgerseq"` + UpgradeIndex uint32 `db:"upgradeindex"` + Upgrade xdr.LedgerUpgrade `db:"upgrade"` + Changes xdr.LedgerEntryChanges `db:"changes"` +} diff --git a/ingest/ledgerbackend/ledger_hash_store.go b/ingest/ledgerbackend/ledger_hash_store.go new file mode 100644 index 0000000000..be9ebf1bfc --- /dev/null +++ b/ingest/ledgerbackend/ledger_hash_store.go @@ -0,0 +1,62 @@ +package ledgerbackend + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/support/db" +) + +// TrustedLedgerHashStore is used to query ledger data from a trusted source. +// The store should contain ledgers verified by Stellar-Core, do not use untrusted +// source like history archives. +type TrustedLedgerHashStore interface { + // GetLedgerHash returns the ledger hash for the given sequence number + GetLedgerHash(ctx context.Context, seq uint32) (string, bool, error) + Close() error +} + +// HorizonDBLedgerHashStore is a TrustedLedgerHashStore which uses horizon's db to look up ledger hashes +type HorizonDBLedgerHashStore struct { + session db.SessionInterface +} + +// NewHorizonDBLedgerHashStore constructs a new TrustedLedgerHashStore backed by the horizon db +func NewHorizonDBLedgerHashStore(session db.SessionInterface) TrustedLedgerHashStore { + return HorizonDBLedgerHashStore{session: session} +} + +// GetLedgerHash returns the ledger hash for the given sequence number +func (h HorizonDBLedgerHashStore) GetLedgerHash(ctx context.Context, seq uint32) (string, bool, error) { + sql := sq.Select("hl.ledger_hash").From("history_ledgers hl"). + Limit(1).Where("sequence = ?", seq) + + var hash string + err := h.session.Get(ctx, &hash, sql) + if h.session.NoRows(err) { + return hash, false, nil + } + return hash, true, err +} + +func (h HorizonDBLedgerHashStore) Close() error { + return h.session.Close() +} + +// MockLedgerHashStore is a mock implementation of TrustedLedgerHashStore +type MockLedgerHashStore struct { + mock.Mock +} + +// GetLedgerHash returns the ledger hash for the given sequence number +func (m *MockLedgerHashStore) GetLedgerHash(ctx context.Context, seq uint32) (string, bool, error) { + args := m.Called(ctx, seq) + return args.Get(0).(string), args.Get(1).(bool), args.Error(2) +} + +func (m *MockLedgerHashStore) Close() error { + args := m.Called() + return args.Error(0) +} diff --git a/ingest/ledgerbackend/mock_database_backend.go b/ingest/ledgerbackend/mock_database_backend.go new file mode 100644 index 0000000000..c5f85ecef7 --- /dev/null +++ b/ingest/ledgerbackend/mock_database_backend.go @@ -0,0 +1,40 @@ +package ledgerbackend + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/xdr" +) + +var _ LedgerBackend = (*MockDatabaseBackend)(nil) + +type MockDatabaseBackend struct { + mock.Mock +} + +func (m *MockDatabaseBackend) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *MockDatabaseBackend) PrepareRange(ctx context.Context, ledgerRange Range) error { + args := m.Called(ctx, ledgerRange) + return args.Error(0) +} + +func (m *MockDatabaseBackend) IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) { + args := m.Called(ctx, ledgerRange) + return args.Bool(0), args.Error(1) +} + +func (m *MockDatabaseBackend) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + args := m.Called(ctx, sequence) + return args.Get(0).(xdr.LedgerCloseMeta), args.Error(1) +} + +func (m *MockDatabaseBackend) Close() error { + args := m.Called() + return args.Error(0) +} diff --git a/ingest/ledgerbackend/range.go b/ingest/ledgerbackend/range.go new file mode 100644 index 0000000000..f0c80695a1 --- /dev/null +++ b/ingest/ledgerbackend/range.go @@ -0,0 +1,72 @@ +package ledgerbackend + +import ( + "encoding/json" + "fmt" +) + +// Range represents a range of ledger sequence numbers. +type Range struct { + from uint32 + to uint32 + bounded bool +} + +type jsonRange struct { + From uint32 `json:"from"` + To uint32 `json:"to"` + Bounded bool `json:"bounded"` +} + +func (r *Range) UnmarshalJSON(b []byte) error { + var s jsonRange + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + r.from = s.From + r.to = s.To + r.bounded = s.Bounded + + return nil +} + +func (r Range) MarshalJSON() ([]byte, error) { + return json.Marshal(jsonRange{ + From: r.from, + To: r.to, + Bounded: r.bounded, + }) +} + +func (r Range) String() string { + if r.bounded { + return fmt.Sprintf("[%d,%d]", r.from, r.to) + } + return fmt.Sprintf("[%d,latest)", r.from) +} + +func (r Range) Contains(other Range) bool { + if r.bounded && !other.bounded { + return false + } + if r.bounded && other.bounded { + return r.from <= other.from && r.to >= other.to + } + return r.from <= other.from +} + +// SingleLedgerRange constructs a bounded range containing a single ledger. +func SingleLedgerRange(ledger uint32) Range { + return Range{from: ledger, to: ledger, bounded: true} +} + +// BoundedRange constructs a bounded range of ledgers with a fixed starting ledger and ending ledger. +func BoundedRange(from uint32, to uint32) Range { + return Range{from: from, to: to, bounded: true} +} + +// BoundedRange constructs a unbounded range of ledgers with a fixed starting ledger. +func UnboundedRange(from uint32) Range { + return Range{from: from, bounded: false} +} diff --git a/ingest/ledgerbackend/remote_captive_core.go b/ingest/ledgerbackend/remote_captive_core.go new file mode 100644 index 0000000000..18946a8d3c --- /dev/null +++ b/ingest/ledgerbackend/remote_captive_core.go @@ -0,0 +1,261 @@ +package ledgerbackend + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "path" + "strconv" + "sync" + "time" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// PrepareRangeResponse describes the status of the pending PrepareRange operation. +type PrepareRangeResponse struct { + LedgerRange Range `json:"ledgerRange"` + StartTime time.Time `json:"startTime"` + Ready bool `json:"ready"` + ReadyDuration int `json:"readyDuration"` +} + +// LatestLedgerSequenceResponse is the response for the GetLatestLedgerSequence command. +type LatestLedgerSequenceResponse struct { + Sequence uint32 `json:"sequence"` +} + +// LedgerResponse is the response for the GetLedger command. +type LedgerResponse struct { + Ledger Base64Ledger `json:"ledger"` +} + +// Base64Ledger extends xdr.LedgerCloseMeta with JSON encoding and decoding +type Base64Ledger xdr.LedgerCloseMeta + +func (r *Base64Ledger) UnmarshalJSON(b []byte) error { + var base64 string + if err := json.Unmarshal(b, &base64); err != nil { + return err + } + + var parsed xdr.LedgerCloseMeta + if err := xdr.SafeUnmarshalBase64(base64, &parsed); err != nil { + return err + } + *r = Base64Ledger(parsed) + + return nil +} + +func (r Base64Ledger) MarshalJSON() ([]byte, error) { + base64, err := xdr.MarshalBase64(xdr.LedgerCloseMeta(r)) + if err != nil { + return nil, err + } + return json.Marshal(base64) +} + +// RemoteCaptiveStellarCore is an http client for interacting with a remote captive core server. +type RemoteCaptiveStellarCore struct { + url *url.URL + client *http.Client + lock *sync.Mutex + prepareRangePollInterval time.Duration +} + +// RemoteCaptiveOption values can be passed into NewRemoteCaptive to customize a RemoteCaptiveStellarCore instance. +type RemoteCaptiveOption func(c *RemoteCaptiveStellarCore) + +// PrepareRangePollInterval configures how often the captive core server will be polled when blocking +// on the PrepareRange operation. +func PrepareRangePollInterval(d time.Duration) RemoteCaptiveOption { + return func(c *RemoteCaptiveStellarCore) { + c.prepareRangePollInterval = d + } +} + +// NewRemoteCaptive returns a new RemoteCaptiveStellarCore instance. +// +// Only the captiveCoreURL parameter is required. +func NewRemoteCaptive(captiveCoreURL string, options ...RemoteCaptiveOption) (RemoteCaptiveStellarCore, error) { + u, err := url.Parse(captiveCoreURL) + if err != nil { + return RemoteCaptiveStellarCore{}, errors.Wrap(err, "unparseable url") + } + + client := RemoteCaptiveStellarCore{ + prepareRangePollInterval: time.Second, + url: u, + client: &http.Client{Timeout: 10 * time.Second}, + lock: &sync.Mutex{}, + } + for _, option := range options { + option(&client) + } + return client, nil +} + +func decodeResponse(response *http.Response, payload interface{}) error { + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return errors.Wrap(err, "failed to read response body") + } + + return errors.New(string(body)) + } + + if err := json.NewDecoder(response.Body).Decode(payload); err != nil { + return errors.Wrap(err, "failed to decode json payload") + } + return nil +} + +// GetLatestLedgerSequence returns the sequence of the latest ledger available +// in the backend. This method returns an error if not in a session (start with +// PrepareRange). +// +// Note that for UnboundedRange the returned sequence number is not necessarily +// the latest sequence closed by the network. It's always the last value available +// in the backend. +func (c RemoteCaptiveStellarCore) GetLatestLedgerSequence(ctx context.Context) (sequence uint32, err error) { + // TODO: Have a context on this request so we can cancel all outstanding + // requests, not just PrepareRange. + u := *c.url + u.Path = path.Join(u.Path, "latest-sequence") + request, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return 0, errors.Wrap(err, "cannot construct http request") + } + + response, err := c.client.Do(request) + if err != nil { + return 0, errors.Wrap(err, "failed to execute request") + } + + var parsed LatestLedgerSequenceResponse + if err = decodeResponse(response, &parsed); err != nil { + return 0, err + } + + return parsed.Sequence, nil +} + +// Close cancels any pending PrepareRange requests. +func (c RemoteCaptiveStellarCore) Close() error { + return nil +} + +// PrepareRange prepares the given range (including from and to) to be loaded. +// Captive stellar-core backend needs to initalize Stellar-Core state to be +// able to stream ledgers. +// Stellar-Core mode depends on the provided ledgerRange: +// * For BoundedRange it will start Stellar-Core in catchup mode. +// * For UnboundedRange it will first catchup to starting ledger and then run +// it normally (including connecting to the Stellar network). +// Please note that using a BoundedRange, currently, requires a full-trust on +// history archive. This issue is being fixed in Stellar-Core. +func (c RemoteCaptiveStellarCore) PrepareRange(ctx context.Context, ledgerRange Range) error { + // TODO: removing createContext call here means we could technically have + // multiple prepareRange requests happening at the same time. Do we still + // need to enforce that? + + timer := time.NewTimer(c.prepareRangePollInterval) + defer timer.Stop() + + for { + ready, err := c.IsPrepared(ctx, ledgerRange) + if err != nil { + return err + } + if ready { + return nil + } + + select { + case <-ctx.Done(): + return errors.Wrap(ctx.Err(), "shutting down") + case <-timer.C: + timer.Reset(c.prepareRangePollInterval) + } + } +} + +// IsPrepared returns true if a given ledgerRange is prepared. +func (c RemoteCaptiveStellarCore) IsPrepared(ctx context.Context, ledgerRange Range) (bool, error) { + // TODO: Have some way to cancel all outstanding requests, not just + // PrepareRange. + u := *c.url + u.Path = path.Join(u.Path, "prepare-range") + rangeBytes, err := json.Marshal(ledgerRange) + if err != nil { + return false, errors.Wrap(err, "cannot serialize range") + } + body := bytes.NewReader(rangeBytes) + request, err := http.NewRequestWithContext(ctx, "POST", u.String(), body) + if err != nil { + return false, errors.Wrap(err, "cannot construct http request") + } + request.Header.Add("Content-Type", "application/json; charset=utf-8") + + var response *http.Response + response, err = c.client.Do(request) + if err != nil { + return false, errors.Wrap(err, "failed to execute request") + } + + var parsed PrepareRangeResponse + if err = decodeResponse(response, &parsed); err != nil { + return false, err + } + + return parsed.Ready, nil +} + +// GetLedger long-polls a remote stellar core backend, until the requested +// ledger is ready. + +// Call PrepareRange first to instruct the backend which ledgers to fetch. +// +// Requesting a ledger on non-prepared backend will return an error. +// +// Because data is streamed from Stellar-Core ledger after ledger user should +// request sequences in a non-decreasing order. If the requested sequence number +// is less than the last requested sequence number, an error will be returned. +func (c RemoteCaptiveStellarCore) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + for { + // TODO: Have some way to cancel all outstanding requests, not just + // PrepareRange. + u := *c.url + u.Path = path.Join(u.Path, "ledger", strconv.FormatUint(uint64(sequence), 10)) + request, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return xdr.LedgerCloseMeta{}, errors.Wrap(err, "cannot construct http request") + } + + response, err := c.client.Do(request) + if err != nil { + return xdr.LedgerCloseMeta{}, errors.Wrap(err, "failed to execute request") + } + + if response.StatusCode == http.StatusRequestTimeout { + response.Body.Close() + // This request timed out. Retry. + continue + } + + var parsed LedgerResponse + if err = decodeResponse(response, &parsed); err != nil { + return xdr.LedgerCloseMeta{}, err + } + + return xdr.LedgerCloseMeta(parsed.Ledger), nil + } +} diff --git a/ingest/ledgerbackend/remote_captive_core_test.go b/ingest/ledgerbackend/remote_captive_core_test.go new file mode 100644 index 0000000000..3a4b4d28a7 --- /dev/null +++ b/ingest/ledgerbackend/remote_captive_core_test.go @@ -0,0 +1,74 @@ +package ledgerbackend + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stellar/go/xdr" +) + +func TestGetLedgerSucceeds(t *testing.T) { + expectedLedger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 64, + }, + }, + }, + } + called := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called++ + json.NewEncoder(w).Encode(LedgerResponse{ + Ledger: Base64Ledger(expectedLedger), + }) + })) + defer server.Close() + + client, err := NewRemoteCaptive(server.URL) + require.NoError(t, err) + + ledger, err := client.GetLedger(context.Background(), 64) + require.NoError(t, err) + require.Equal(t, 1, called) + require.Equal(t, expectedLedger, ledger) +} + +func TestGetLedgerTakesAWhile(t *testing.T) { + expectedLedger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 64, + }, + }, + }, + } + called := 0 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called++ + if called == 1 { + // TODO: Check this is what the server really does. + w.WriteHeader(http.StatusRequestTimeout) + return + } + json.NewEncoder(w).Encode(LedgerResponse{ + Ledger: Base64Ledger(expectedLedger), + }) + })) + defer server.Close() + + client, err := NewRemoteCaptive(server.URL) + require.NoError(t, err) + + ledger, err := client.GetLedger(context.Background(), 64) + require.NoError(t, err) + require.Equal(t, 2, called) + require.Equal(t, expectedLedger, ledger) +} diff --git a/ingest/ledgerbackend/stellar_core_runner.go b/ingest/ledgerbackend/stellar_core_runner.go new file mode 100644 index 0000000000..df9d8fc271 --- /dev/null +++ b/ingest/ledgerbackend/stellar_core_runner.go @@ -0,0 +1,482 @@ +package ledgerbackend + +import ( + "bufio" + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/stellar/go/support/log" +) + +type stellarCoreRunnerInterface interface { + catchup(from, to uint32) error + runFrom(from uint32, hash string) error + getMetaPipe() <-chan metaResult + context() context.Context + getProcessExitError() (bool, error) + close() error +} + +type stellarCoreRunnerMode int + +const ( + stellarCoreRunnerModeOnline stellarCoreRunnerMode = iota + stellarCoreRunnerModeOffline +) + +// stellarCoreRunner uses a named pipe ( https://en.wikipedia.org/wiki/Named_pipe ) to stream ledgers directly +// from Stellar Core +type pipe struct { + // stellarCoreRunner will be reading ledgers emitted by Stellar Core from the pipe. + // After the Stellar Core process exits, stellarCoreRunner should eventually close the reader. + Reader io.ReadCloser + // stellarCoreRunner is responsible for closing the named pipe file after the Stellar Core process exits. + // However, only the Stellar Core process will be writing to the pipe. stellarCoreRunner should not + // write anything to the named pipe file which is why the type of File is io.Closer. + File io.Closer +} + +type stellarCoreRunner struct { + executablePath string + + started bool + cmd *exec.Cmd + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + ledgerBuffer *bufferedLedgerMetaReader + pipe pipe + mode stellarCoreRunnerMode + + lock sync.Mutex + processExited bool + processExitError error + + storagePath string + nonce string + + log *log.Entry +} + +func createRandomHexString(n int) string { + hex := []rune("abcdef1234567890") + b := make([]rune, n) + for i := range b { + b[i] = hex[rand.Intn(len(hex))] + } + return string(b) +} + +func newStellarCoreRunner(config CaptiveCoreConfig, mode stellarCoreRunnerMode) (*stellarCoreRunner, error) { + var fullStoragePath string + if runtime.GOOS == "windows" || mode == stellarCoreRunnerModeOffline { + // On Windows, first we ALWAYS append something to the base storage path, + // because we will delete the directory entirely when Horizon stops. We also + // add a random suffix in order to ensure that there aren't naming + // conflicts. + // This is done because it's impossible to send SIGINT on Windows so + // buckets can become corrupted. + // We also want to use random directories in offline mode (reingestion) + // because it's possible it's running multiple Stellar-Cores on a single + // machine. + fullStoragePath = path.Join(config.StoragePath, "captive-core-"+createRandomHexString(8)) + } else { + // Use the specified directory to store Captive Core's data: + // https://github.com/stellar/go/issues/3437 + // but be sure to re-use rather than replace it: + // https://github.com/stellar/go/issues/3631 + fullStoragePath = path.Join(config.StoragePath, "captive-core") + } + + info, err := os.Stat(fullStoragePath) + if os.IsNotExist(err) { + innerErr := os.MkdirAll(fullStoragePath, os.FileMode(int(0755))) // rwx|rx|rx + if innerErr != nil { + return nil, errors.Wrap(innerErr, fmt.Sprintf( + "failed to create storage directory (%s)", fullStoragePath)) + } + } else if !info.IsDir() { + return nil, errors.New(fmt.Sprintf("%s is not a directory", fullStoragePath)) + } else if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf( + "error accessing storage directory (%s)", fullStoragePath)) + } + + ctx, cancel := context.WithCancel(config.Context) + + runner := &stellarCoreRunner{ + executablePath: config.BinaryPath, + ctx: ctx, + cancel: cancel, + storagePath: fullStoragePath, + mode: mode, + nonce: fmt.Sprintf( + "captive-stellar-core-%x", + rand.New(rand.NewSource(time.Now().UnixNano())).Uint64(), + ), + log: config.Log, + } + + if conf, err := writeConf(config.Toml, mode, runner.getConfFileName()); err != nil { + return nil, errors.Wrap(err, "error writing configuration") + } else { + runner.log.Debugf("captive core config file contents:\n%s", conf) + } + + return runner, nil +} + +func writeConf(captiveCoreToml *CaptiveCoreToml, mode stellarCoreRunnerMode, location string) (string, error) { + text, err := generateConfig(captiveCoreToml, mode) + if err != nil { + return "", err + } + + return string(text), ioutil.WriteFile(location, text, 0644) +} + +func generateConfig(captiveCoreToml *CaptiveCoreToml, mode stellarCoreRunnerMode) ([]byte, error) { + if mode == stellarCoreRunnerModeOffline { + var err error + captiveCoreToml, err = captiveCoreToml.CatchupToml() + if err != nil { + return nil, errors.Wrap(err, "could not generate catch up config") + } + } + + if !captiveCoreToml.QuorumSetIsConfigured() { + return nil, errors.New("captive-core config file does not define any quorum set") + } + + text, err := captiveCoreToml.Marshal() + if err != nil { + return nil, errors.Wrap(err, "could not marshal captive core config") + } + return text, nil +} + +func (r *stellarCoreRunner) getConfFileName() string { + joinedPath := filepath.Join(r.storagePath, "stellar-core.conf") + + // Given that `storagePath` can be anything, we need the full, absolute path + // here so that everything Core needs is created under the storagePath + // subdirectory. + // + // If the path *can't* be absolutely resolved (bizarre), we can still try + // recovering by using the path the user specified directly. + path, err := filepath.Abs(joinedPath) + if err != nil { + r.log.Warnf("Failed to resolve %s as an absolute path: %s", joinedPath, err) + return joinedPath + } + return path +} + +func (r *stellarCoreRunner) getLogLineWriter() io.Writer { + rd, wr := io.Pipe() + br := bufio.NewReader(rd) + + // Strip timestamps from log lines from captive stellar-core. We emit our own. + dateRx := regexp.MustCompile(`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3} `) + go func() { + levelRx := regexp.MustCompile(`\[(\w+) ([A-Z]+)\] (.*)`) + for { + line, err := br.ReadString('\n') + if err != nil { + break + } + line = dateRx.ReplaceAllString(line, "") + line = strings.TrimSpace(line) + + if line == "" { + continue + } + + matches := levelRx.FindStringSubmatch(line) + if len(matches) >= 4 { + // Extract the substrings from the log entry and trim it + category, level := matches[1], matches[2] + line = matches[3] + + levelMapping := map[string]func(string, ...interface{}){ + "FATAL": r.log.Errorf, + "ERROR": r.log.Errorf, + "WARNING": r.log.Warnf, + "INFO": r.log.Infof, + "DEBUG": r.log.Debugf, + } + + writer := r.log.Infof + if f, ok := levelMapping[strings.ToUpper(level)]; ok { + writer = f + } + writer("%s: %s", category, line) + } else { + r.log.Info(line) + } + } + }() + return wr +} + +func (r *stellarCoreRunner) createCmd(params ...string) *exec.Cmd { + allParams := append([]string{"--conf", r.getConfFileName()}, params...) + cmd := exec.Command(r.executablePath, allParams...) + cmd.Dir = r.storagePath + cmd.Stdout = r.getLogLineWriter() + cmd.Stderr = r.getLogLineWriter() + return cmd +} + +// context returns the context.Context instance associated with the running captive core instance +func (r *stellarCoreRunner) context() context.Context { + return r.ctx +} + +// catchup executes the catchup command on the captive core subprocess +func (r *stellarCoreRunner) catchup(from, to uint32) error { + r.lock.Lock() + defer r.lock.Unlock() + + // check if we have already been closed + if r.ctx.Err() != nil { + return r.ctx.Err() + } + + if r.started { + return errors.New("runner already started") + } + + rangeArg := fmt.Sprintf("%d/%d", to, to-from+1) + r.cmd = r.createCmd( + "catchup", rangeArg, + "--metadata-output-stream", r.getPipeName(), + "--in-memory", + ) + + var err error + r.pipe, err = r.start(r.cmd) + if err != nil { + r.closeLogLineWriters(r.cmd) + return errors.Wrap(err, "error starting `stellar-core catchup` subprocess") + } + + r.started = true + r.ledgerBuffer = newBufferedLedgerMetaReader(r.pipe.Reader) + go r.ledgerBuffer.start() + + if binaryWatcher, err := newFileWatcher(r); err != nil { + r.log.Warnf("could not create captive core binary watcher: %v", err) + } else { + go binaryWatcher.loop() + } + + r.wg.Add(1) + go r.handleExit() + + return nil +} + +// runFrom executes the run command with a starting ledger on the captive core subprocess +func (r *stellarCoreRunner) runFrom(from uint32, hash string) error { + r.lock.Lock() + defer r.lock.Unlock() + + // check if we have already been closed + if r.ctx.Err() != nil { + return r.ctx.Err() + } + + if r.started { + return errors.New("runner already started") + } + + r.cmd = r.createCmd( + "run", + "--in-memory", + "--start-at-ledger", fmt.Sprintf("%d", from), + "--start-at-hash", hash, + "--metadata-output-stream", r.getPipeName(), + ) + + var err error + r.pipe, err = r.start(r.cmd) + if err != nil { + r.closeLogLineWriters(r.cmd) + return errors.Wrap(err, "error starting `stellar-core run` subprocess") + } + + r.started = true + r.ledgerBuffer = newBufferedLedgerMetaReader(r.pipe.Reader) + go r.ledgerBuffer.start() + + if binaryWatcher, err := newFileWatcher(r); err != nil { + r.log.Warnf("could not create captive core binary watcher: %v", err) + } else { + go binaryWatcher.loop() + } + + r.wg.Add(1) + go r.handleExit() + + return nil +} + +func (r *stellarCoreRunner) handleExit() { + defer r.wg.Done() + + // Pattern recommended in: + // https://github.com/golang/go/blob/cacac8bdc5c93e7bc71df71981fdf32dded017bf/src/cmd/go/script_test.go#L1091-L1098 + var interrupt os.Signal = os.Interrupt + if runtime.GOOS == "windows" { + // Per https://golang.org/pkg/os/#Signal, β€œInterrupt is not implemented on + // Windows; using it with os.Process.Signal will return an error.” + // Fall back to Kill instead. + interrupt = os.Kill + } + + errc := make(chan error) + go func() { + select { + case errc <- nil: + return + case <-r.ctx.Done(): + } + + err := r.cmd.Process.Signal(interrupt) + if err == nil { + err = r.ctx.Err() // Report ctx.Err() as the reason we interrupted. + } else if err.Error() == "os: process already finished" { + errc <- nil + return + } + + timer := time.NewTimer(10 * time.Second) + select { + // Report ctx.Err() as the reason we interrupted the process... + case errc <- r.ctx.Err(): + timer.Stop() + return + // ...but after killDelay has elapsed, fall back to a stronger signal. + case <-timer.C: + } + + // Wait still hasn't returned. + // Kill the process harder to make sure that it exits. + // + // Ignore any error: if cmd.Process has already terminated, we still + // want to send ctx.Err() (or the error from the Interrupt call) + // to properly attribute the signal that may have terminated it. + _ = r.cmd.Process.Kill() + + errc <- err + }() + + waitErr := r.cmd.Wait() + r.closeLogLineWriters(r.cmd) + + r.lock.Lock() + defer r.lock.Unlock() + + // By closing the pipe file we will send an EOF to the pipe reader used by ledgerBuffer. + // We need to do this operation with the lock to ensure that the processExitError is available + // when the ledgerBuffer channel is closed + if closeErr := r.pipe.File.Close(); closeErr != nil { + r.log.WithError(closeErr).Warn("could not close captive core write pipe") + } + + r.processExited = true + if interruptErr := <-errc; interruptErr != nil { + r.processExitError = interruptErr + } else { + r.processExitError = waitErr + } +} + +// closeLogLineWriters closes the go routines created by getLogLineWriter() +func (r *stellarCoreRunner) closeLogLineWriters(cmd *exec.Cmd) { + cmd.Stdout.(*io.PipeWriter).Close() + cmd.Stderr.(*io.PipeWriter).Close() +} + +// getMetaPipe returns a channel which contains ledgers streamed from the captive core subprocess +func (r *stellarCoreRunner) getMetaPipe() <-chan metaResult { + return r.ledgerBuffer.getChannel() +} + +// getProcessExitError returns an exit error (can be nil) of the process and a bool indicating +// if the process has exited yet +// getProcessExitError is thread safe +func (r *stellarCoreRunner) getProcessExitError() (bool, error) { + r.lock.Lock() + defer r.lock.Unlock() + return r.processExited, r.processExitError +} + +// close kills the captive core process if it is still running and performs +// the necessary cleanup on the resources associated with the captive core process +// close is both thread safe and idempotent +func (r *stellarCoreRunner) close() error { + r.lock.Lock() + started := r.started + storagePath := r.storagePath + + r.storagePath = "" + + // check if we have already closed + if storagePath == "" { + r.lock.Unlock() + return nil + } + + if !started { + // Update processExited if handleExit that updates it not even started + // (error before command run). + r.processExited = true + } + + r.cancel() + r.lock.Unlock() + + // only reap captive core sub process and related go routines if we've started + // otherwise, just cleanup the temp dir + if started { + // wait for the stellar core process to terminate + r.wg.Wait() + + // drain meta pipe channel to make sure the ledger buffer goroutine exits + for range r.getMetaPipe() { + + } + + // now it's safe to close the pipe reader + // because the ledger buffer is no longer reading from it + r.pipe.Reader.Close() + } + + if runtime.GOOS == "windows" || + (r.processExitError != nil && r.processExitError != context.Canceled) || + r.mode == stellarCoreRunnerModeOffline { + // It's impossible to send SIGINT on Windows so buckets can become + // corrupted. If we can't reuse it, then remove it. + // We also remove the storage path if there was an error terminating the + // process (files can be corrupted). + // We remove all files when reingesting to save disk space. + return os.RemoveAll(storagePath) + } + + return nil +} diff --git a/ingest/ledgerbackend/stellar_core_runner_posix.go b/ingest/ledgerbackend/stellar_core_runner_posix.go new file mode 100644 index 0000000000..2b0f2b4115 --- /dev/null +++ b/ingest/ledgerbackend/stellar_core_runner_posix.go @@ -0,0 +1,42 @@ +//go:build !windows +// +build !windows + +package ledgerbackend + +import ( + "os" + "os/exec" + + "github.com/pkg/errors" +) + +// Posix-specific methods for the StellarCoreRunner type. + +func (c *stellarCoreRunner) getPipeName() string { + // The exec.Cmd.ExtraFiles field carries *io.File values that are assigned + // to child process fds counting from 3, and we'll be passing exactly one + // fd: the write end of the anonymous pipe below. + return "fd:3" +} + +func (c *stellarCoreRunner) start(cmd *exec.Cmd) (pipe, error) { + // First make an anonymous pipe. + // Note io.File objects close-on-finalization. + readFile, writeFile, err := os.Pipe() + if err != nil { + return pipe{}, errors.Wrap(err, "error making a pipe") + } + p := pipe{Reader: readFile, File: writeFile} + + // Add the write-end to the set of inherited file handles. This is defined + // to be fd 3 on posix platforms. + cmd.ExtraFiles = []*os.File{writeFile} + err = cmd.Start() + if err != nil { + writeFile.Close() + readFile.Close() + return pipe{}, errors.Wrap(err, "error starting stellar-core") + } + + return p, nil +} diff --git a/ingest/ledgerbackend/stellar_core_runner_test.go b/ingest/ledgerbackend/stellar_core_runner_test.go new file mode 100644 index 0000000000..18e6b0656a --- /dev/null +++ b/ingest/ledgerbackend/stellar_core_runner_test.go @@ -0,0 +1,92 @@ +package ledgerbackend + +import ( + "context" + "os" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/support/log" +) + +func TestCloseBeforeStartOffline(t *testing.T) { + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + runner, err := newStellarCoreRunner(CaptiveCoreConfig{ + HistoryArchiveURLs: []string{"http://localhost"}, + Log: log.New(), + Context: context.Background(), + Toml: captiveCoreToml, + }, stellarCoreRunnerModeOffline) + assert.NoError(t, err) + + tempDir := runner.storagePath + info, err := os.Stat(tempDir) + assert.NoError(t, err) + assert.True(t, info.IsDir()) + + assert.NoError(t, runner.close()) + + // Directory cleaned up on shutdown when reingesting to save space + _, err = os.Stat(tempDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no such file or directory") +} + +func TestCloseBeforeStartOnline(t *testing.T) { + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + captiveCoreToml.AddExamplePubnetValidators() + + runner, err := newStellarCoreRunner(CaptiveCoreConfig{ + HistoryArchiveURLs: []string{"http://localhost"}, + Log: log.New(), + Context: context.Background(), + Toml: captiveCoreToml, + }, stellarCoreRunnerModeOnline) + assert.NoError(t, err) + + tempDir := runner.storagePath + info, err := os.Stat(tempDir) + assert.NoError(t, err) + assert.True(t, info.IsDir()) + + assert.NoError(t, runner.close()) + + // Directory no longer cleaned up on shutdown (perf. bump in v2.5.0) + _, err = os.Stat(tempDir) + assert.NoError(t, err) +} + +func TestCloseBeforeStartOnlineWithError(t *testing.T) { + captiveCoreToml, err := NewCaptiveCoreToml(CaptiveCoreTomlParams{}) + assert.NoError(t, err) + + captiveCoreToml.AddExamplePubnetValidators() + + runner, err := newStellarCoreRunner(CaptiveCoreConfig{ + HistoryArchiveURLs: []string{"http://localhost"}, + Log: log.New(), + Context: context.Background(), + Toml: captiveCoreToml, + }, stellarCoreRunnerModeOnline) + assert.NoError(t, err) + + runner.processExitError = errors.New("some error") + + tempDir := runner.storagePath + info, err := os.Stat(tempDir) + assert.NoError(t, err) + assert.True(t, info.IsDir()) + + assert.NoError(t, runner.close()) + + // Directory cleaned up on shutdown with error (potentially corrupted files) + _, err = os.Stat(tempDir) + assert.Error(t, err) + assert.Contains(t, err.Error(), "no such file or directory") +} diff --git a/ingest/ledgerbackend/stellar_core_runner_windows.go b/ingest/ledgerbackend/stellar_core_runner_windows.go new file mode 100644 index 0000000000..80932aaf50 --- /dev/null +++ b/ingest/ledgerbackend/stellar_core_runner_windows.go @@ -0,0 +1,41 @@ +//go:build windows +// +build windows + +package ledgerbackend + +import ( + "fmt" + "os/exec" + + "github.com/Microsoft/go-winio" +) + +// Windows-specific methods for the stellarCoreRunner type. + +func (c *stellarCoreRunner) getPipeName() string { + return fmt.Sprintf(`\\.\pipe\%s`, c.nonce) +} + +func (c *stellarCoreRunner) start(cmd *exec.Cmd) (pipe, error) { + // First set up the server pipe. + listener, err := winio.ListenPipe(c.getPipeName(), nil) + if err != nil { + return pipe{}, err + } + + // Then start the process. + err = cmd.Start() + if err != nil { + listener.Close() + return pipe{}, err + } + + // Then accept on the server end. + connection, err := listener.Accept() + if err != nil { + listener.Close() + return pipe{}, err + } + + return pipe{Reader: connection, File: listener}, nil +} diff --git a/ingest/ledgerbackend/testdata/appendix-with-bucket-dir-path.cfg b/ingest/ledgerbackend/testdata/appendix-with-bucket-dir-path.cfg new file mode 100644 index 0000000000..9dc27bf8f6 --- /dev/null +++ b/ingest/ledgerbackend/testdata/appendix-with-bucket-dir-path.cfg @@ -0,0 +1,2 @@ +BUCKET_DIR_PATH="test-buckets" + diff --git a/ingest/ledgerbackend/testdata/appendix-with-fields.cfg b/ingest/ledgerbackend/testdata/appendix-with-fields.cfg new file mode 100644 index 0000000000..3a7d641878 --- /dev/null +++ b/ingest/ledgerbackend/testdata/appendix-with-fields.cfg @@ -0,0 +1,66 @@ +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +FAILURE_SAFETY=2 +UNSAFE_QUORUM=false +PUBLIC_HTTP_PORT=true +RUN_STANDALONE=false +HTTP_PORT = 6789 +PEER_PORT = 12345 +LOG_FILE_PATH = "" + +NODE_NAMES=[ + "GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza", + "GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000", + "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ victor" +] + +[HISTORY] +get="curl http://mirror.history.stellar.org/{0} -o {1}" + +[HISTORY.stellar] +get="curl http://history.stellar.org/{0} -o {1}" +put="aws s3 cp {0} s3://history.stellar.org/{1}" + +[HISTORY.stellar---backup] +get="curl http://history.stellar.org/{0} -o {1}" +put="aws s3 cp [QUORUM_SET.1]" + + +#[QUORUM_SET--1] +#THRESHOLD_PERCENT=66 +#VALIDATORS=[ +# "GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above", +# "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above", +# "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above" +#] + +[QUORUM_SET] +THRESHOLD_PERCENT=66 +VALIDATORS=[ + "GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above", + "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above", + "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above" +] + +[QUORUM_SET.1] +THRESHOLD_PERCENT=67 +VALIDATORS=[ + "$self", + "GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above", + "GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above" +] + +[QUORUM_SET.2] +THRESHOLD_PERCENT=100 +VALIDATORS=[ + "GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above", + "GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above" +] + +[QUORUM_SET.2.1] +THRESHOLD_PERCENT=50 +VALIDATORS=[ + "GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above", + "GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above", + "GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above", + "GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above" +] diff --git a/ingest/ledgerbackend/testdata/duplicate-home-domain.cfg b/ingest/ledgerbackend/testdata/duplicate-home-domain.cfg new file mode 100644 index 0000000000..d3fd5a77dc --- /dev/null +++ b/ingest/ledgerbackend/testdata/duplicate-home-domain.cfg @@ -0,0 +1,13 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/duplicate-validator.cfg b/ingest/ledgerbackend/testdata/duplicate-validator.cfg new file mode 100644 index 0000000000..10b916cc2c --- /dev/null +++ b/ingest/ledgerbackend/testdata/duplicate-validator.cfg @@ -0,0 +1,16 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="pubnet.stellar.org" +PUBLIC_KEY="GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ" +ADDRESS="localhost:9834" +QUALITY="HIGH" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/empty-home-domain-quality.cfg b/ingest/ledgerbackend/testdata/empty-home-domain-quality.cfg new file mode 100644 index 0000000000..f4e259fdcf --- /dev/null +++ b/ingest/ledgerbackend/testdata/empty-home-domain-quality.cfg @@ -0,0 +1,8 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/empty-home-domain.cfg b/ingest/ledgerbackend/testdata/empty-home-domain.cfg new file mode 100644 index 0000000000..c4ae874593 --- /dev/null +++ b/ingest/ledgerbackend/testdata/empty-home-domain.cfg @@ -0,0 +1,12 @@ +[[HOME_DOMAINS]] +QUALITY="MEDIUM" + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/expected-offline-core.cfg b/ingest/ledgerbackend/testdata/expected-offline-core.cfg new file mode 100644 index 0000000000..62aeeb6664 --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-offline-core.cfg @@ -0,0 +1,15 @@ +# Generated file, do not edit +FAILURE_SAFETY = 0 +HTTP_PORT = 0 +LOG_FILE_PATH = "" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +PEER_PORT = 12345 +RUN_STANDALONE = true +UNSAFE_QUORUM = true + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" + +[QUORUM_SET] + THRESHOLD_PERCENT = 100 + VALIDATORS = ["GCZBOIAY4HLKAJVNJORXZOZRAY2BJDBZHKPBHZCRAIUR5IHC2UHBGCQR"] diff --git a/ingest/ledgerbackend/testdata/expected-offline-with-appendix-core.cfg b/ingest/ledgerbackend/testdata/expected-offline-with-appendix-core.cfg new file mode 100644 index 0000000000..124abc435b --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-offline-with-appendix-core.cfg @@ -0,0 +1,21 @@ +# Generated file, do not edit +FAILURE_SAFETY = 0 +HTTP_PORT = 0 +LOG_FILE_PATH = "" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +PEER_PORT = 12345 +RUN_STANDALONE = true +UNSAFE_QUORUM = true + +[[HOME_DOMAINS]] + HOME_DOMAIN = "testnet.stellar.org" + QUALITY = "MEDIUM" + +[[VALIDATORS]] + ADDRESS = "localhost:123" + HOME_DOMAIN = "testnet.stellar.org" + NAME = "sdf_testnet_1" + PUBLIC_KEY = "GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" diff --git a/ingest/ledgerbackend/testdata/expected-offline-with-extra-fields.cfg b/ingest/ledgerbackend/testdata/expected-offline-with-extra-fields.cfg new file mode 100644 index 0000000000..d7b41a0b81 --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-offline-with-extra-fields.cfg @@ -0,0 +1,36 @@ +# Generated file, do not edit +FAILURE_SAFETY = 0 +HTTP_PORT = 0 +LOG_FILE_PATH = "" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +NODE_NAMES = ["GA22N4YGO7IJDRF2SISA5KHULGYYKDXBQGYIWUVNMSNHF5G2DNBKP3M5 eliza", "GCDENOCHA6TQL6DFC4FS54HIH7RP7XR7VZCQZFANMGLT2WXJ7D7KGV2P hal9000", "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ victor"] +PEER_PORT = 12345 +RUN_STANDALONE = true +UNSAFE_QUORUM = true + +[HISTORY] + get = "curl http://mirror.history.stellar.org/{0} -o {1}" + +[HISTORY.stellar] + get = "curl http://history.stellar.org/{0} -o {1}" + put = "aws s3 cp {0} s3://history.stellar.org/{1}" + +[HISTORY.stellar---backup] + get = "curl http://history.stellar.org/{0} -o {1}" + put = "aws s3 cp [QUORUM_SET.1]" + +[QUORUM_SET] + THRESHOLD_PERCENT = 66 + VALIDATORS = ["GDQWITFJLZ5HT6JCOXYEVV5VFD6FTLAKJAUDKHAV3HKYGVJWA2DPYSQV A_from_above", "GANLKVE4WOTE75MJS6FQ73CL65TSPYYMFZKC4VDEZ45LGQRCATGAIGIA B_from_above", "GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ C_from_above"] + +[QUORUM_SET.1] + THRESHOLD_PERCENT = 67 + VALIDATORS = ["$self", "GDXJAZZJ3H5MJGR6PDQX3JHRREAVYNCVM7FJYGLZJKEHQV2ZXEUO5SX2 E_from_above", "GB6GK3WWTZYY2JXWM6C5LRKLQ2X7INQ7IYTSECCG3SMZFYOZNEZR4SO5 F_from_above"] + +[QUORUM_SET.2] + THRESHOLD_PERCENT = 100 + VALIDATORS = ["GCTAIXWDDBM3HBDHGSAOLY223QZHPS2EDROF7YUBB3GNYXLOCPV5PXUK G_from_above", "GCJ6UBAOXNQFN3HGLCVQBWGEZO6IABSMNE2OCQC4FJAZXJA5AIE7WSPW H_from_above"] + +[QUORUM_SET.2.1] + THRESHOLD_PERCENT = 50 + VALIDATORS = ["GC4X65TQJVI3OWAS4DTA2EN2VNZ5ZRJD646H5WKEJHO5ZHURDRAX2OTH I_from_above", "GAXSWUO4RBELRQT5WMDLIKTRIKC722GGXX2GIGEYQZDQDLOTINQ4DX6F J_from_above", "GAWOEMG7DQDWHCFDTPJEBYWRKUUZTX2M2HLMNABM42G7C7IAPU54GL6X K_from_above", "GDZAJNUUDJFKTZX3YWZSOAS4S4NGCJ5RQAY7JPYBG5CUFL3JZ5C3ECOH L_from_above"] diff --git a/ingest/ledgerbackend/testdata/expected-offline-with-no-peer-port.cfg b/ingest/ledgerbackend/testdata/expected-offline-with-no-peer-port.cfg new file mode 100644 index 0000000000..9eca1ccad1 --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-offline-with-no-peer-port.cfg @@ -0,0 +1,14 @@ +# Generated file, do not edit +FAILURE_SAFETY = 0 +HTTP_PORT = 0 +LOG_FILE_PATH = "/var/stellar-core/test.log" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +RUN_STANDALONE = true +UNSAFE_QUORUM = true + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" + +[QUORUM_SET] + THRESHOLD_PERCENT = 100 + VALIDATORS = ["GCZBOIAY4HLKAJVNJORXZOZRAY2BJDBZHKPBHZCRAIUR5IHC2UHBGCQR"] diff --git a/ingest/ledgerbackend/testdata/expected-online-core.cfg b/ingest/ledgerbackend/testdata/expected-online-core.cfg new file mode 100644 index 0000000000..57a5e7ff2c --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-online-core.cfg @@ -0,0 +1,19 @@ +# Generated file, do not edit +FAILURE_SAFETY = -1 +HTTP_PORT = 6789 +LOG_FILE_PATH = "" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +PEER_PORT = 12345 + +[[HOME_DOMAINS]] + HOME_DOMAIN = "testnet.stellar.org" + QUALITY = "MEDIUM" + +[[VALIDATORS]] + ADDRESS = "localhost:123" + HOME_DOMAIN = "testnet.stellar.org" + NAME = "sdf_testnet_1" + PUBLIC_KEY = "GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" diff --git a/ingest/ledgerbackend/testdata/expected-online-with-no-http-port.cfg b/ingest/ledgerbackend/testdata/expected-online-with-no-http-port.cfg new file mode 100644 index 0000000000..89e1762757 --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-online-with-no-http-port.cfg @@ -0,0 +1,19 @@ +# Generated file, do not edit +FAILURE_SAFETY = -1 +HTTP_PORT = 11626 +LOG_FILE_PATH = "" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" +PEER_PORT = 12345 + +[[HOME_DOMAINS]] + HOME_DOMAIN = "testnet.stellar.org" + QUALITY = "MEDIUM" + +[[VALIDATORS]] + ADDRESS = "localhost:123" + HOME_DOMAIN = "testnet.stellar.org" + NAME = "sdf_testnet_1" + PUBLIC_KEY = "GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" diff --git a/ingest/ledgerbackend/testdata/expected-online-with-no-peer-port.cfg b/ingest/ledgerbackend/testdata/expected-online-with-no-peer-port.cfg new file mode 100644 index 0000000000..1b65c5f318 --- /dev/null +++ b/ingest/ledgerbackend/testdata/expected-online-with-no-peer-port.cfg @@ -0,0 +1,18 @@ +# Generated file, do not edit +FAILURE_SAFETY = -1 +HTTP_PORT = 6789 +LOG_FILE_PATH = "/var/stellar-core/test.log" +NETWORK_PASSPHRASE = "Public Global Stellar Network ; September 2015" + +[[HOME_DOMAINS]] + HOME_DOMAIN = "testnet.stellar.org" + QUALITY = "MEDIUM" + +[[VALIDATORS]] + ADDRESS = "localhost:123" + HOME_DOMAIN = "testnet.stellar.org" + NAME = "sdf_testnet_1" + PUBLIC_KEY = "GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" + +[HISTORY.h0] + get = "curl -sf http://localhost:1170/{0} -o {1}" diff --git a/ingest/ledgerbackend/testdata/invalid-captive-core-field.cfg b/ingest/ledgerbackend/testdata/invalid-captive-core-field.cfg new file mode 100644 index 0000000000..11cb9d83cc --- /dev/null +++ b/ingest/ledgerbackend/testdata/invalid-captive-core-field.cfg @@ -0,0 +1,12 @@ +# CATCHUP_RECENT is not supported by captive core +CATCHUP_RECENT=100 + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/invalid-home-domain-quality.cfg b/ingest/ledgerbackend/testdata/invalid-home-domain-quality.cfg new file mode 100644 index 0000000000..715e0286bc --- /dev/null +++ b/ingest/ledgerbackend/testdata/invalid-home-domain-quality.cfg @@ -0,0 +1,9 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="DECENT" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/sample-appendix.cfg b/ingest/ledgerbackend/testdata/sample-appendix.cfg new file mode 100644 index 0000000000..08791e06a2 --- /dev/null +++ b/ingest/ledgerbackend/testdata/sample-appendix.cfg @@ -0,0 +1,9 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" diff --git a/ingest/ledgerbackend/testdata/validator-has-invalid-public-key.cfg b/ingest/ledgerbackend/testdata/validator-has-invalid-public-key.cfg new file mode 100644 index 0000000000..46fb13e6f9 --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-has-invalid-public-key.cfg @@ -0,0 +1,6 @@ +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet2.stellar.org" +PUBLIC_KEY="invalid-key" +ADDRESS="localhost:9834" +QUALITY="HIGH" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/validator-has-invalid-quality.cfg b/ingest/ledgerbackend/testdata/validator-has-invalid-quality.cfg new file mode 100644 index 0000000000..863eba5d23 --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-has-invalid-quality.cfg @@ -0,0 +1,6 @@ +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet2.stellar.org" +PUBLIC_KEY="GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ" +ADDRESS="localhost:9834" +QUALITY="DECENT" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/validator-missing-home-domain.cfg b/ingest/ledgerbackend/testdata/validator-missing-home-domain.cfg new file mode 100644 index 0000000000..95d7d95de4 --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-missing-home-domain.cfg @@ -0,0 +1,9 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" +QUALITY="MEDIUM" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/validator-missing-name.cfg b/ingest/ledgerbackend/testdata/validator-missing-name.cfg new file mode 100644 index 0000000000..d3ace191cb --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-missing-name.cfg @@ -0,0 +1,5 @@ +[[VALIDATORS]] +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" +QUALITY="MEDIUM" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/validator-missing-public-key.cfg b/ingest/ledgerbackend/testdata/validator-missing-public-key.cfg new file mode 100644 index 0000000000..61d11b0628 --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-missing-public-key.cfg @@ -0,0 +1,8 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +ADDRESS="localhost:123" \ No newline at end of file diff --git a/ingest/ledgerbackend/testdata/validator-missing-quality.cfg b/ingest/ledgerbackend/testdata/validator-missing-quality.cfg new file mode 100644 index 0000000000..eab0af956a --- /dev/null +++ b/ingest/ledgerbackend/testdata/validator-missing-quality.cfg @@ -0,0 +1,15 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="MEDIUM" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="localhost:123" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet2.stellar.org" +PUBLIC_KEY="GDV46EIEF57TDL4W27UFDAUVPDDCKJNVBYB3WIV2WYUYUG753FCFU6EJ" +ADDRESS="localhost:9834" diff --git a/ingest/ledgerbackend/toml.go b/ingest/ledgerbackend/toml.go new file mode 100644 index 0000000000..c6ac869a9f --- /dev/null +++ b/ingest/ledgerbackend/toml.go @@ -0,0 +1,553 @@ +package ledgerbackend + +import ( + "bytes" + "fmt" + "io/ioutil" + "regexp" + "strings" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" + + "github.com/pelletier/go-toml" +) + +const ( + defaultHTTPPort = 11626 + defaultFailureSafety = -1 + + // if LOG_FILE_PATH is omitted stellar core actually defaults to "stellar-core.log" + // however, we are overriding this default for captive core + defaultLogFilePath = "" // by default we disable logging to a file +) + +var validQuality = map[string]bool{ + "CRITICAL": true, + "HIGH": true, + "MEDIUM": true, + "LOW": true, +} + +// Validator represents a [[VALIDATORS]] entry in the captive core toml file. +type Validator struct { + Name string `toml:"NAME"` + Quality string `toml:"QUALITY,omitempty"` + HomeDomain string `toml:"HOME_DOMAIN"` + PublicKey string `toml:"PUBLIC_KEY"` + Address string `toml:"ADDRESS,omitempty"` + History string `toml:"HISTORY,omitempty"` +} + +// HomeDomain represents a [[HOME_DOMAINS]] entry in the captive core toml file. +type HomeDomain struct { + HomeDomain string `toml:"HOME_DOMAIN"` + Quality string `toml:"QUALITY"` +} + +// History represents a [HISTORY] table in the captive core toml file. +type History struct { + Get string `toml:"get"` + // should we allow put and mkdir for captive core? + Put string `toml:"put,omitempty"` + Mkdir string `toml:"mkdir,omitempty"` +} + +// QuorumSet represents a [QUORUM_SET] table in the captive core toml file. +type QuorumSet struct { + ThresholdPercent int `toml:"THRESHOLD_PERCENT"` + Validators []string `toml:"VALIDATORS"` +} + +type captiveCoreTomlValues struct { + // we cannot omitempty because the empty string is a valid configuration for LOG_FILE_PATH + // and the default is stellar-core.log + LogFilePath string `toml:"LOG_FILE_PATH"` + BucketDirPath string `toml:"BUCKET_DIR_PATH,omitempty"` + // we cannot omitempty because 0 is a valid configuration for HTTP_PORT + // and the default is 11626 + HTTPPort uint `toml:"HTTP_PORT"` + PublicHTTPPort bool `toml:"PUBLIC_HTTP_PORT,omitempty"` + NodeNames []string `toml:"NODE_NAMES,omitempty"` + NetworkPassphrase string `toml:"NETWORK_PASSPHRASE,omitempty"` + PeerPort uint `toml:"PEER_PORT,omitempty"` + // we cannot omitempty because 0 is a valid configuration for FAILURE_SAFETY + // and the default is -1 + FailureSafety int `toml:"FAILURE_SAFETY"` + UnsafeQuorum bool `toml:"UNSAFE_QUORUM,omitempty"` + RunStandalone bool `toml:"RUN_STANDALONE,omitempty"` + ArtificiallyAccelerateTimeForTesting bool `toml:"ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING,omitempty"` + HomeDomains []HomeDomain `toml:"HOME_DOMAINS,omitempty"` + Validators []Validator `toml:"VALIDATORS,omitempty"` + HistoryEntries map[string]History `toml:"-"` + QuorumSetEntries map[string]QuorumSet `toml:"-"` +} + +// QuorumSetIsConfigured returns true if there is a quorum set defined in the configuration. +func (c *captiveCoreTomlValues) QuorumSetIsConfigured() bool { + return len(c.QuorumSetEntries) > 0 || len(c.Validators) > 0 +} + +// HistoryIsConfigured returns true if the history archive locations are configured. +func (c *captiveCoreTomlValues) HistoryIsConfigured() bool { + if len(c.HistoryEntries) > 0 { + return true + } + for _, v := range c.Validators { + if v.History != "" { + return true + } + } + return false +} + +type placeholders struct { + labels map[string]string + count int +} + +func (p *placeholders) newPlaceholder(key string) string { + if p.labels == nil { + p.labels = map[string]string{} + } + placeHolder := fmt.Sprintf("__placeholder_label_%d__", p.count) + p.count++ + p.labels[placeHolder] = key + return placeHolder +} + +func (p *placeholders) get(placeholder string) (string, bool) { + if p.labels == nil { + return "", false + } + val, ok := p.labels[placeholder] + return val, ok +} + +// CaptiveCoreToml represents a parsed captive core configuration. +type CaptiveCoreToml struct { + captiveCoreTomlValues + tree *toml.Tree + tablePlaceholders *placeholders +} + +// flattenTables will transform a given toml text by flattening all nested tables +// whose root can be found in `rootNames`. +// +// In the TOML spec dotted keys represents nesting. So we flatten the table key by replacing each table +// path with a placeholder. For example: +// +// text := `[QUORUM_SET.a.b.c] +// THRESHOLD_PERCENT=67 +// VALIDATORS=["a","b"]` +// flattenTables(text, []string{"QUORUM_SET"}) -> +// +// `[__placeholder_label_0__] +// THRESHOLD_PERCENT=67 +// VALIDATORS=["a","b"]` +func flattenTables(text string, rootNames []string) (string, *placeholders) { + orExpression := strings.Join(rootNames, "|") + re := regexp.MustCompile(`\[(` + orExpression + `)(\..+)?\]`) + + tablePlaceHolders := &placeholders{} + + flattened := re.ReplaceAllStringFunc(text, func(match string) string { + insideBrackets := match[1 : len(match)-1] + return "[" + tablePlaceHolders.newPlaceholder(insideBrackets) + "]" + }) + return flattened, tablePlaceHolders +} + +// unflattenTables is the inverse of flattenTables, it restores the +// text back to its original form by replacing all placeholders with their +// original values. +func unflattenTables(text string, tablePlaceHolders *placeholders) string { + re := regexp.MustCompile(`\[.*\]`) + + return re.ReplaceAllStringFunc(text, func(match string) string { + insideBrackets := match[1 : len(match)-1] + original, ok := tablePlaceHolders.get(insideBrackets) + if !ok { + return match + } + return "[" + original + "]" + }) +} + +// AddExamplePubnetQuorum adds example pubnet validators to toml file +func (c *CaptiveCoreToml) AddExamplePubnetValidators() { + c.captiveCoreTomlValues.Validators = []Validator{ + { + Name: "sdf_1", + HomeDomain: "stellar.org", + PublicKey: "GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH", + Address: "core-live-a.stellar.org:11625", + History: "curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}", + }, + { + Name: "sdf_2", + HomeDomain: "stellar.org", + PublicKey: "GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK", + Address: "core-live-b.stellar.org:11625", + History: "curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}", + }, + { + Name: "sdf_3", + HomeDomain: "stellar.org", + PublicKey: "GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ", + Address: "core-live-c.stellar.org:11625", + History: "curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}", + }, + } +} + +// Marshal serializes the CaptiveCoreToml into a toml document. +func (c *CaptiveCoreToml) Marshal() ([]byte, error) { + var sb strings.Builder + sb.WriteString("# Generated file, do not edit\n") + encoder := toml.NewEncoder(&sb) + if err := encoder.Encode(c.captiveCoreTomlValues); err != nil { + return nil, errors.Wrap(err, "could not encode toml file") + } + + if len(c.HistoryEntries) > 0 { + if err := encoder.Encode(c.HistoryEntries); err != nil { + return nil, errors.Wrap(err, "could not encode history entries") + } + } + + if len(c.QuorumSetEntries) > 0 { + if err := encoder.Encode(c.QuorumSetEntries); err != nil { + return nil, errors.Wrap(err, "could not encode quorum set") + } + } + + return []byte(unflattenTables(sb.String(), c.tablePlaceholders)), nil +} + +func unmarshalTreeNode(t *toml.Tree, key string, dest interface{}) error { + tree, ok := t.Get(key).(*toml.Tree) + if !ok { + return fmt.Errorf("unexpected key %v", key) + } + return tree.Unmarshal(dest) +} + +func (c *CaptiveCoreToml) unmarshal(data []byte, strict bool) error { + quorumSetEntries := map[string]QuorumSet{} + historyEntries := map[string]History{} + // The toml library has trouble with nested tables so we need to flatten all nested + // QUORUM_SET and HISTORY tables as a workaround. + // In Marshal() we apply the inverse process to unflatten the nested tables. + flattened, tablePlaceholders := flattenTables(string(data), []string{"QUORUM_SET", "HISTORY"}) + + tree, err := toml.Load(flattened) + if err != nil { + return err + } + + for _, key := range tree.Keys() { + originalKey, ok := tablePlaceholders.get(key) + if !ok { + continue + } + + switch { + case strings.HasPrefix(originalKey, "QUORUM_SET"): + var qs QuorumSet + if err = unmarshalTreeNode(tree, key, &qs); err != nil { + return err + } + quorumSetEntries[key] = qs + case strings.HasPrefix(originalKey, "HISTORY"): + var h History + if err = unmarshalTreeNode(tree, key, &h); err != nil { + return err + } + historyEntries[key] = h + } + if err = tree.Delete(key); err != nil { + return err + } + } + + var body captiveCoreTomlValues + if withoutPlaceHolders, err := tree.Marshal(); err != nil { + return err + } else if err = toml.NewDecoder(bytes.NewReader(withoutPlaceHolders)).Strict(strict).Decode(&body); err != nil { + if message := err.Error(); strings.HasPrefix(message, "undecoded keys") { + return fmt.Errorf(strings.Replace( + message, + "undecoded keys", + "these fields are not supported by captive core", + 1, + )) + } + return err + } + + c.tree = tree + c.captiveCoreTomlValues = body + c.tablePlaceholders = tablePlaceholders + c.QuorumSetEntries = quorumSetEntries + c.HistoryEntries = historyEntries + return nil +} + +// CaptiveCoreTomlParams defines captive core configuration provided by Horizon flags. +type CaptiveCoreTomlParams struct { + // NetworkPassphrase is the Stellar network passphrase used by captive core when connecting to the Stellar network. + NetworkPassphrase string + // HistoryArchiveURLs are a list of history archive urls. + HistoryArchiveURLs []string + // HTTPPort is the TCP port to listen for requests (defaults to 0, which disables the HTTP server). + HTTPPort *uint + // PeerPort is the TCP port to bind to for connecting to the Stellar network + // (defaults to 11625). It may be useful for example when there's >1 Stellar-Core + // instance running on a machine. + PeerPort *uint + // LogPath is the (optional) path in which to store Core logs, passed along + // to Stellar Core's LOG_FILE_PATH. + LogPath *string + // Strict is a flag which, if enabled, rejects Stellar Core toml fields which are not supported by captive core. + Strict bool +} + +// NewCaptiveCoreTomlFromFile constructs a new CaptiveCoreToml instance by merging configuration +// from the toml file located at `configPath` and the configuration provided by `params`. +func NewCaptiveCoreTomlFromFile(configPath string, params CaptiveCoreTomlParams) (*CaptiveCoreToml, error) { + var captiveCoreToml CaptiveCoreToml + data, err := ioutil.ReadFile(configPath) + if err != nil { + return nil, errors.Wrap(err, "could not load toml path") + } + + if err = captiveCoreToml.unmarshal(data, params.Strict); err != nil { + return nil, errors.Wrap(err, "could not unmarshal captive core toml") + } + // disallow setting BUCKET_DIR_PATH through a file since it can cause multiple + // running captive-core instances to clash + if params.Strict && captiveCoreToml.BucketDirPath != "" { + return nil, errors.New("could not unmarshal captive core toml: setting BUCKET_DIR_PATH is disallowed, it can cause clashes between instances") + } + + if err = captiveCoreToml.validate(params); err != nil { + return nil, errors.Wrap(err, "invalid captive core toml") + } + + if len(captiveCoreToml.HistoryEntries) > 0 { + log.Warnf( + "Configuring captive core with history archive from %s instead of %v", + configPath, + params.HistoryArchiveURLs, + ) + } + + captiveCoreToml.setDefaults(params) + return &captiveCoreToml, nil +} + +// NewCaptiveCoreToml constructs a new CaptiveCoreToml instance based off +// the configuration in `params`. +func NewCaptiveCoreToml(params CaptiveCoreTomlParams) (*CaptiveCoreToml, error) { + var captiveCoreToml CaptiveCoreToml + var err error + + captiveCoreToml.tablePlaceholders = &placeholders{} + captiveCoreToml.tree, err = toml.TreeFromMap(map[string]interface{}{}) + if err != nil { + return nil, err + } + + captiveCoreToml.setDefaults(params) + return &captiveCoreToml, nil +} + +func (c *CaptiveCoreToml) clone() (*CaptiveCoreToml, error) { + data, err := c.Marshal() + if err != nil { + return nil, errors.Wrap(err, "could not marshal toml") + } + var cloned CaptiveCoreToml + if err = cloned.unmarshal(data, false); err != nil { + return nil, errors.Wrap(err, "could not unmarshal captive core toml") + } + return &cloned, nil +} + +// CatchupToml returns a new CaptiveCoreToml instance based off the existing +// instance with some modifications which are suitable for running +// the catchup command on captive core. +func (c *CaptiveCoreToml) CatchupToml() (*CaptiveCoreToml, error) { + offline, err := c.clone() + if err != nil { + return nil, errors.Wrap(err, "could not clone toml") + } + + offline.RunStandalone = true + offline.UnsafeQuorum = true + offline.PublicHTTPPort = false + offline.HTTPPort = 0 + offline.FailureSafety = 0 + + if !c.QuorumSetIsConfigured() { + // Add a fictional quorum -- necessary to convince core to start up; + // but not used at all for our purposes. Pubkey here is just random. + offline.QuorumSetEntries = map[string]QuorumSet{ + "QUORUM_SET": { + ThresholdPercent: 100, + Validators: []string{"GCZBOIAY4HLKAJVNJORXZOZRAY2BJDBZHKPBHZCRAIUR5IHC2UHBGCQR"}, + }, + } + } + return offline, nil +} + +func (c *CaptiveCoreToml) setDefaults(params CaptiveCoreTomlParams) { + if !c.tree.Has("NETWORK_PASSPHRASE") { + c.NetworkPassphrase = params.NetworkPassphrase + } + + if def := c.tree.Has("HTTP_PORT"); !def && params.HTTPPort != nil { + c.HTTPPort = *params.HTTPPort + } else if !def && params.HTTPPort == nil { + c.HTTPPort = defaultHTTPPort + } + + if def := c.tree.Has("PEER_PORT"); !def && params.PeerPort != nil { + c.PeerPort = *params.PeerPort + } + + if def := c.tree.Has("LOG_FILE_PATH"); !def && params.LogPath != nil { + c.LogFilePath = *params.LogPath + } else if !def && params.LogPath == nil { + c.LogFilePath = defaultLogFilePath + } + + if !c.tree.Has("FAILURE_SAFETY") { + c.FailureSafety = defaultFailureSafety + } + if !c.HistoryIsConfigured() { + c.HistoryEntries = map[string]History{} + for i, val := range params.HistoryArchiveURLs { + name := fmt.Sprintf("HISTORY.h%d", i) + c.HistoryEntries[c.tablePlaceholders.newPlaceholder(name)] = History{ + Get: fmt.Sprintf("curl -sf %s/{0} -o {1}", val), + } + } + } +} + +func (c *CaptiveCoreToml) validate(params CaptiveCoreTomlParams) error { + if def := c.tree.Has("NETWORK_PASSPHRASE"); def && c.NetworkPassphrase != params.NetworkPassphrase { + return fmt.Errorf( + "NETWORK_PASSPHRASE in captive core config file: %s does not match Horizon network-passphrase flag: %s", + c.NetworkPassphrase, + params.NetworkPassphrase, + ) + } + + if def := c.tree.Has("HTTP_PORT"); def && params.HTTPPort != nil && c.HTTPPort != *params.HTTPPort { + return fmt.Errorf( + "HTTP_PORT in captive core config file: %d does not match Horizon captive-core-http-port flag: %d", + c.HTTPPort, + *params.HTTPPort, + ) + } + + if def := c.tree.Has("PEER_PORT"); def && params.PeerPort != nil && c.PeerPort != *params.PeerPort { + return fmt.Errorf( + "PEER_PORT in captive core config file: %d does not match Horizon captive-core-peer-port flag: %d", + c.PeerPort, + *params.PeerPort, + ) + } + + if def := c.tree.Has("LOG_FILE_PATH"); def && params.LogPath != nil && c.LogFilePath != *params.LogPath { + return fmt.Errorf( + "LOG_FILE_PATH in captive core config file: %s does not match Horizon captive-core-log-path flag: %s", + c.LogFilePath, + *params.LogPath, + ) + } + + homeDomainSet := map[string]HomeDomain{} + for _, hd := range c.HomeDomains { + if _, ok := homeDomainSet[hd.HomeDomain]; ok { + return fmt.Errorf( + "found duplicate home domain in captive core configuration: %s", + hd.HomeDomain, + ) + } + if hd.HomeDomain == "" { + return fmt.Errorf( + "found invalid home domain entry which is missing a HOME_DOMAIN value", + ) + } + if hd.Quality == "" { + return fmt.Errorf( + "found invalid home domain entry which is missing a QUALITY value: %s", + hd.HomeDomain, + ) + } + if !validQuality[hd.Quality] { + return fmt.Errorf( + "found invalid home domain entry which has an invalid QUALITY value: %s", + hd.HomeDomain, + ) + } + homeDomainSet[hd.HomeDomain] = hd + } + + names := map[string]bool{} + for _, v := range c.Validators { + if names[v.Name] { + return fmt.Errorf( + "found duplicate validator in captive core configuration: %s", + v.Name, + ) + } + if v.Name == "" { + return fmt.Errorf( + "found invalid validator entry which is missing a NAME value: %s", + v.Name, + ) + } + if v.HomeDomain == "" { + return fmt.Errorf( + "found invalid validator entry which is missing a HOME_DOMAIN value: %s", + v.Name, + ) + } + if v.PublicKey == "" { + return fmt.Errorf( + "found invalid validator entry which is missing a PUBLIC_KEY value: %s", + v.Name, + ) + } + if _, err := xdr.AddressToAccountId(v.PublicKey); err != nil { + return fmt.Errorf( + "found invalid validator entry which has an invalid PUBLIC_KEY : %s", + v.Name, + ) + } + if v.Quality == "" { + if _, ok := homeDomainSet[v.HomeDomain]; !ok { + return fmt.Errorf( + "found invalid validator entry which is missing a QUALITY value: %s", + v.Name, + ) + } + } else if !validQuality[v.Quality] { + return fmt.Errorf( + "found invalid validator entry which has an invalid QUALITY value: %s", + v.Name, + ) + } + + names[v.Name] = true + } + + return nil +} diff --git a/ingest/ledgerbackend/toml_test.go b/ingest/ledgerbackend/toml_test.go new file mode 100644 index 0000000000..b8da4de03c --- /dev/null +++ b/ingest/ledgerbackend/toml_test.go @@ -0,0 +1,320 @@ +package ledgerbackend + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func newUint(v uint) *uint { + return &v +} + +func newString(s string) *string { + return &s +} + +func TestCaptiveCoreTomlValidation(t *testing.T) { + for _, testCase := range []struct { + name string + networkPassphrase string + appendPath string + httpPort *uint + peerPort *uint + logPath *string + expectedError string + }{ + { + name: "mismatching NETWORK_PASSPHRASE", + networkPassphrase: "bogus passphrase", + appendPath: filepath.Join("testdata", "appendix-with-fields.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + expectedError: "invalid captive core toml: NETWORK_PASSPHRASE in captive core config file: " + + "Public Global Stellar Network ; September 2015 does not match Horizon network-passphrase " + + "flag: bogus passphrase", + }, + { + name: "mismatching HTTP_PORT", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "appendix-with-fields.cfg"), + httpPort: newUint(1161), + peerPort: newUint(12345), + logPath: nil, + expectedError: "invalid captive core toml: HTTP_PORT in captive core config file: 6789 " + + "does not match Horizon captive-core-http-port flag: 1161", + }, + { + name: "mismatching PEER_PORT", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "appendix-with-fields.cfg"), + httpPort: newUint(6789), + peerPort: newUint(2346), + logPath: nil, + expectedError: "invalid captive core toml: PEER_PORT in captive core config file: 12345 " + + "does not match Horizon captive-core-peer-port flag: 2346", + }, + { + name: "mismatching LOG_FILE_PATH", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "appendix-with-fields.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: newString("/my/test/path"), + expectedError: "invalid captive core toml: LOG_FILE_PATH in captive core config file: " + + "does not match Horizon captive-core-log-path flag: /my/test/path", + }, + { + name: "duplicate HOME_DOMAIN entry", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "duplicate-home-domain.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found duplicate home domain in captive " + + "core configuration: testnet.stellar.org", + }, + { + name: "empty HOME_DOMAIN entry", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "empty-home-domain.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid home domain entry which is " + + "missing a HOME_DOMAIN value", + }, + { + name: "HOME_DOMAIN with empty QUALITY", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "empty-home-domain-quality.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid home domain entry which is " + + "missing a QUALITY value: testnet.stellar.org", + }, + { + name: "HOME_DOMAIN with invalid QUALITY", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "invalid-home-domain-quality.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid home domain entry which has an " + + "invalid QUALITY value: testnet.stellar.org", + }, + { + name: "duplicate VALIDATOR entry", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "duplicate-validator.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found duplicate validator in captive core " + + "configuration: sdf_testnet_1", + }, + { + name: "VALIDATOR with invalid public key", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-has-invalid-public-key.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which has an invalid " + + "PUBLIC_KEY : sdf_testnet_2", + }, + { + name: "VALIDATOR with invalid quality", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-has-invalid-quality.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which has an invalid " + + "QUALITY value: sdf_testnet_2", + }, + { + name: "VALIDATOR without home domain", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-missing-home-domain.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which is missing a " + + "HOME_DOMAIN value: sdf_testnet_1", + }, + { + name: "VALIDATOR without name", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-missing-name.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which is missing " + + "a NAME value: ", + }, + { + name: "VALIDATOR without public key", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-missing-public-key.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which is missing " + + "a PUBLIC_KEY value: sdf_testnet_1", + }, + { + name: "VALIDATOR without quality", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "validator-missing-quality.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "invalid captive core toml: found invalid validator entry which is missing " + + "a QUALITY value: sdf_testnet_2", + }, + { + name: "field not supported by captive core", + networkPassphrase: "Public Global Stellar Network ; September 2015", + appendPath: filepath.Join("testdata", "invalid-captive-core-field.cfg"), + httpPort: nil, + peerPort: nil, + logPath: nil, + expectedError: "could not unmarshal captive core toml: these fields are not supported by captive core: [\"CATCHUP_RECENT\"]", + }, + { + name: "unexpected BUCKET_DIR_PATH", + appendPath: filepath.Join("testdata", "appendix-with-bucket-dir-path.cfg"), + expectedError: "could not unmarshal captive core toml: setting BUCKET_DIR_PATH is disallowed, it can cause clashes between instances", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + params := CaptiveCoreTomlParams{ + NetworkPassphrase: testCase.networkPassphrase, + HistoryArchiveURLs: []string{"http://localhost:1170"}, + HTTPPort: testCase.httpPort, + PeerPort: testCase.peerPort, + LogPath: testCase.logPath, + Strict: true, + } + _, err := NewCaptiveCoreTomlFromFile(testCase.appendPath, params) + assert.EqualError(t, err, testCase.expectedError) + }) + } +} + +func TestGenerateConfig(t *testing.T) { + for _, testCase := range []struct { + name string + appendPath string + mode stellarCoreRunnerMode + expectedPath string + httpPort *uint + peerPort *uint + logPath *string + }{ + { + name: "offline config with no appendix", + mode: stellarCoreRunnerModeOffline, + appendPath: "", + expectedPath: filepath.Join("testdata", "expected-offline-core.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + }, + { + name: "offline config with no peer port", + mode: stellarCoreRunnerModeOffline, + appendPath: "", + expectedPath: filepath.Join("testdata", "expected-offline-with-no-peer-port.cfg"), + httpPort: newUint(6789), + peerPort: nil, + logPath: newString("/var/stellar-core/test.log"), + }, + { + name: "online config with appendix", + mode: stellarCoreRunnerModeOnline, + appendPath: filepath.Join("testdata", "sample-appendix.cfg"), + expectedPath: filepath.Join("testdata", "expected-online-core.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + }, + { + name: "online config with unsupported field in appendix", + mode: stellarCoreRunnerModeOnline, + appendPath: filepath.Join("testdata", "invalid-captive-core-field.cfg"), + expectedPath: filepath.Join("testdata", "expected-online-core.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + }, + { + name: "online config with no peer port", + mode: stellarCoreRunnerModeOnline, + appendPath: filepath.Join("testdata", "sample-appendix.cfg"), + expectedPath: filepath.Join("testdata", "expected-online-with-no-peer-port.cfg"), + httpPort: newUint(6789), + peerPort: nil, + logPath: newString("/var/stellar-core/test.log"), + }, + { + name: "online config with no http port", + mode: stellarCoreRunnerModeOnline, + appendPath: filepath.Join("testdata", "sample-appendix.cfg"), + expectedPath: filepath.Join("testdata", "expected-online-with-no-http-port.cfg"), + httpPort: nil, + peerPort: newUint(12345), + logPath: nil, + }, + { + name: "offline config with appendix", + mode: stellarCoreRunnerModeOffline, + appendPath: filepath.Join("testdata", "sample-appendix.cfg"), + expectedPath: filepath.Join("testdata", "expected-offline-with-appendix-core.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + }, + { + name: "offline config with extra fields in appendix", + mode: stellarCoreRunnerModeOffline, + appendPath: filepath.Join("testdata", "appendix-with-fields.cfg"), + expectedPath: filepath.Join("testdata", "expected-offline-with-extra-fields.cfg"), + httpPort: newUint(6789), + peerPort: newUint(12345), + logPath: nil, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + var err error + var captiveCoreToml *CaptiveCoreToml + params := CaptiveCoreTomlParams{ + NetworkPassphrase: "Public Global Stellar Network ; September 2015", + HistoryArchiveURLs: []string{"http://localhost:1170"}, + HTTPPort: testCase.httpPort, + PeerPort: testCase.peerPort, + LogPath: testCase.logPath, + Strict: false, + } + if testCase.appendPath != "" { + captiveCoreToml, err = NewCaptiveCoreTomlFromFile(testCase.appendPath, params) + } else { + captiveCoreToml, err = NewCaptiveCoreToml(params) + } + assert.NoError(t, err) + + configBytes, err := generateConfig(captiveCoreToml, testCase.mode) + assert.NoError(t, err) + + expectedByte, err := ioutil.ReadFile(testCase.expectedPath) + assert.NoError(t, err) + + assert.Equal(t, string(configBytes), string(expectedByte)) + }) + } +} diff --git a/ingest/memory_temp_set.go b/ingest/memory_temp_set.go new file mode 100644 index 0000000000..26096e57e6 --- /dev/null +++ b/ingest/memory_temp_set.go @@ -0,0 +1,38 @@ +package ingest + +// memoryTempSet is an in-memory implementation of TempSet interface. +// As of July 2019 this requires up to ~4GB of memory for pubnet ledger +// state processing. The internal structure is dereferenced after the +// store is closed. +type memoryTempSet struct { + m map[string]bool +} + +// Open initialize internals data structure. +func (s *memoryTempSet) Open() error { + s.m = make(map[string]bool) + return nil +} + +// Add adds a key to TempSet. +func (s *memoryTempSet) Add(key string) error { + s.m[key] = true + return nil +} + +// Preload does not do anything. This TempSet keeps everything in memory +// so no preloading needed. +func (s *memoryTempSet) Preload(keys []string) error { + return nil +} + +// Exist check if the key exists in a TempSet. +func (s *memoryTempSet) Exist(key string) (bool, error) { + return s.m[key], nil +} + +// Close removes reference to internal data structure. +func (s *memoryTempSet) Close() error { + s.m = nil + return nil +} diff --git a/ingest/memory_temp_set_test.go b/ingest/memory_temp_set_test.go new file mode 100644 index 0000000000..8f56c0f86f --- /dev/null +++ b/ingest/memory_temp_set_test.go @@ -0,0 +1,38 @@ +package ingest + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMemoryTempSet(t *testing.T) { + s := memoryTempSet{} + assert.Nil(t, s.m) + err := s.Open() + assert.NoError(t, err) + assert.NotNil(t, s.m) + + err = s.Add("a") + assert.NoError(t, err) + + err = s.Add("b") + assert.NoError(t, err) + + v, err := s.Exist("a") + assert.NoError(t, err) + assert.True(t, v) + + v, err = s.Exist("b") + assert.NoError(t, err) + assert.True(t, v) + + // Get for not-set key should return false + v, err = s.Exist("c") + assert.NoError(t, err) + assert.False(t, v) + + err = s.Close() + assert.NoError(t, err) + assert.Nil(t, s.m) +} diff --git a/ingest/mock_change_reader.go b/ingest/mock_change_reader.go new file mode 100644 index 0000000000..c70d78d397 --- /dev/null +++ b/ingest/mock_change_reader.go @@ -0,0 +1,21 @@ +package ingest + +import ( + "github.com/stretchr/testify/mock" +) + +var _ ChangeReader = (*MockChangeReader)(nil) + +type MockChangeReader struct { + mock.Mock +} + +func (m *MockChangeReader) Read() (Change, error) { + args := m.Called() + return args.Get(0).(Change), args.Error(1) +} + +func (m *MockChangeReader) Close() error { + args := m.Called() + return args.Error(0) +} diff --git a/ingest/stats_change_processor.go b/ingest/stats_change_processor.go new file mode 100644 index 0000000000..d342bc91d5 --- /dev/null +++ b/ingest/stats_change_processor.go @@ -0,0 +1,133 @@ +package ingest + +import ( + "context" + + "github.com/stellar/go/xdr" +) + +// StatsChangeProcessor is a state processors that counts number of changes types +// and entry types. +type StatsChangeProcessor struct { + results StatsChangeProcessorResults +} + +// StatsChangeProcessorResults contains results after running StatsChangeProcessor. +type StatsChangeProcessorResults struct { + AccountsCreated int64 + AccountsUpdated int64 + AccountsRemoved int64 + + ClaimableBalancesCreated int64 + ClaimableBalancesUpdated int64 + ClaimableBalancesRemoved int64 + + DataCreated int64 + DataUpdated int64 + DataRemoved int64 + + OffersCreated int64 + OffersUpdated int64 + OffersRemoved int64 + + TrustLinesCreated int64 + TrustLinesUpdated int64 + TrustLinesRemoved int64 + + LiquidityPoolsCreated int64 + LiquidityPoolsUpdated int64 + LiquidityPoolsRemoved int64 +} + +func (p *StatsChangeProcessor) ProcessChange(ctx context.Context, change Change) error { + switch change.Type { + case xdr.LedgerEntryTypeAccount: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.AccountsCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.AccountsUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.AccountsRemoved++ + } + case xdr.LedgerEntryTypeClaimableBalance: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.ClaimableBalancesCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.ClaimableBalancesUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.ClaimableBalancesRemoved++ + } + case xdr.LedgerEntryTypeData: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.DataCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.DataUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.DataRemoved++ + } + case xdr.LedgerEntryTypeOffer: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.OffersCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.OffersUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.OffersRemoved++ + } + case xdr.LedgerEntryTypeTrustline: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.TrustLinesCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.TrustLinesUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.TrustLinesRemoved++ + } + case xdr.LedgerEntryTypeLiquidityPool: + switch change.LedgerEntryChangeType() { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + p.results.LiquidityPoolsCreated++ + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + p.results.LiquidityPoolsUpdated++ + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + p.results.LiquidityPoolsRemoved++ + } + } + + return nil +} + +func (p *StatsChangeProcessor) GetResults() StatsChangeProcessorResults { + return p.results +} + +func (stats *StatsChangeProcessorResults) Map() map[string]interface{} { + return map[string]interface{}{ + "stats_accounts_created": stats.AccountsCreated, + "stats_accounts_updated": stats.AccountsUpdated, + "stats_accounts_removed": stats.AccountsRemoved, + + "stats_claimable_balances_created": stats.ClaimableBalancesCreated, + "stats_claimable_balances_updated": stats.ClaimableBalancesUpdated, + "stats_claimable_balances_removed": stats.ClaimableBalancesRemoved, + + "stats_data_created": stats.DataCreated, + "stats_data_updated": stats.DataUpdated, + "stats_data_removed": stats.DataRemoved, + + "stats_offers_created": stats.OffersCreated, + "stats_offers_updated": stats.OffersUpdated, + "stats_offers_removed": stats.OffersRemoved, + + "stats_trust_lines_created": stats.TrustLinesCreated, + "stats_trust_lines_updated": stats.TrustLinesUpdated, + "stats_trust_lines_removed": stats.TrustLinesRemoved, + + "stats_liquidity_pools_created": stats.LiquidityPoolsCreated, + "stats_liquidity_pools_updated": stats.LiquidityPoolsUpdated, + "stats_liquidity_pools_removed": stats.LiquidityPoolsRemoved, + } +} diff --git a/ingest/stats_change_processor_test.go b/ingest/stats_change_processor_test.go new file mode 100644 index 0000000000..5eb3bacd52 --- /dev/null +++ b/ingest/stats_change_processor_test.go @@ -0,0 +1,148 @@ +package ingest + +import ( + "context" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestStatsChangeProcessor(t *testing.T) { + ctx := context.Background() + processor := &StatsChangeProcessor{} + + // Created + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeData, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: nil, + Post: &xdr.LedgerEntry{}, + })) + + // Updated + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeData, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: &xdr.LedgerEntry{}, + Post: &xdr.LedgerEntry{}, + })) + + // Removed + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeData, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + assert.NoError(t, processor.ProcessChange(ctx, Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: &xdr.LedgerEntry{}, + Post: nil, + })) + + results := processor.GetResults() + + assert.Equal(t, int64(1), results.AccountsCreated) + assert.Equal(t, int64(1), results.ClaimableBalancesCreated) + assert.Equal(t, int64(1), results.DataCreated) + assert.Equal(t, int64(1), results.OffersCreated) + assert.Equal(t, int64(1), results.TrustLinesCreated) + assert.Equal(t, int64(1), results.LiquidityPoolsCreated) + + assert.Equal(t, int64(1), results.AccountsUpdated) + assert.Equal(t, int64(1), results.ClaimableBalancesUpdated) + assert.Equal(t, int64(1), results.DataUpdated) + assert.Equal(t, int64(1), results.OffersUpdated) + assert.Equal(t, int64(1), results.TrustLinesUpdated) + assert.Equal(t, int64(1), results.LiquidityPoolsUpdated) + + assert.Equal(t, int64(1), results.AccountsRemoved) + assert.Equal(t, int64(1), results.ClaimableBalancesRemoved) + assert.Equal(t, int64(1), results.DataRemoved) + assert.Equal(t, int64(1), results.OffersRemoved) + assert.Equal(t, int64(1), results.TrustLinesRemoved) + assert.Equal(t, int64(1), results.LiquidityPoolsRemoved) +} diff --git a/ingest/tutorial/captive-core-stub.toml b/ingest/tutorial/captive-core-stub.toml new file mode 100644 index 0000000000..ba47bd2203 --- /dev/null +++ b/ingest/tutorial/captive-core-stub.toml @@ -0,0 +1,24 @@ +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" diff --git a/ingest/tutorial/example_claimables.go b/ingest/tutorial/example_claimables.go new file mode 100644 index 0000000000..161e538aae --- /dev/null +++ b/ingest/tutorial/example_claimables.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" +) + +func claimables() { + // Open a history archive using our existing configuration details. + historyArchive, err := historyarchive.Connect( + config.HistoryArchiveURLs[0], + historyarchive.ConnectOptions{ + NetworkPassphrase: config.NetworkPassphrase, + S3Region: "us-west-1", + UnsignedRequests: false, + }, + ) + panicIf(err) + + // First, we need to establish a safe fallback in case of any problems + // during the history archive download+processing, so we'll set a 30-second + // timeout. + // + // NOTE: We're using the testnet here, whose archives are much smaller. For + // the pubnet, a 30 *minute* timeout may be more appropriate. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + // We pass 123455 because given a checkpoint frequency of 64 ledgers (the + // default in `ConnectOptions`, above), 123455+1 mod 64 == 0. Incompatible + // sequence numbers will likely result in 404 errors. + reader, err := ingest.NewCheckpointChangeReader(ctx, historyArchive, 123455) + panicIf(err) + + entries, newCBs := 0, 0 + for { + entry, err := reader.Read() + if err == io.EOF { + break + } + panicIf(err) + + entries++ + + switch entry.Type { + case xdr.LedgerEntryTypeClaimableBalance: + newCBs++ + // these are included for completeness of the demonstration + case xdr.LedgerEntryTypeAccount: + case xdr.LedgerEntryTypeData: + case xdr.LedgerEntryTypeTrustline: + case xdr.LedgerEntryTypeOffer: + default: + panic(fmt.Errorf("Unknown type: %+v", entry.Type)) + } + + fmt.Printf("Processed %d ledger entry changes...\r", entries) + } + + fmt.Println() + fmt.Printf("%d/%d created entries were claimable balances\n", newCBs, entries) +} diff --git a/ingest/tutorial/example_common.go b/ingest/tutorial/example_common.go new file mode 100644 index 0000000000..133ac02de6 --- /dev/null +++ b/ingest/tutorial/example_common.go @@ -0,0 +1,39 @@ +package main + +import ( + "fmt" + + "github.com/stellar/go/ingest/ledgerbackend" +) + +var ( + config = captiveCoreConfig() +) + +func captiveCoreConfig() ledgerbackend.CaptiveCoreConfig { + archiveURLs := []string{ + "https://history.stellar.org/prd/core-testnet/core_testnet_001", + "https://history.stellar.org/prd/core-testnet/core_testnet_002", + "https://history.stellar.org/prd/core-testnet/core_testnet_003", + } + networkPassphrase := "Test SDF Network ; September 2015" + captiveCoreToml, err := ledgerbackend.NewCaptiveCoreToml(ledgerbackend.CaptiveCoreTomlParams{ + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: archiveURLs, + }) + panicIf(err) + + return ledgerbackend.CaptiveCoreConfig{ + // Change these based on your environment: + BinaryPath: "/usr/local/bin/stellar-core", + NetworkPassphrase: networkPassphrase, + HistoryArchiveURLs: archiveURLs, + Toml: captiveCoreToml, + } +} + +func panicIf(err error) { + if err != nil { + panic(fmt.Errorf("An error occurred, panicking: %s\n", err)) + } +} diff --git a/ingest/tutorial/example_hello.go b/ingest/tutorial/example_hello.go new file mode 100644 index 0000000000..44c9f409b2 --- /dev/null +++ b/ingest/tutorial/example_hello.go @@ -0,0 +1,27 @@ +package main + +import ( + "context" + "fmt" + + backends "github.com/stellar/go/ingest/ledgerbackend" +) + +func helloworld() { + ctx := context.Background() + backend, err := backends.NewCaptive(config) + panicIf(err) + defer backend.Close() + + // Prepare a single ledger to be ingested, + err = backend.PrepareRange(ctx, backends.BoundedRange(123456, 123456)) + panicIf(err) + + // then retrieve it: + ledger, err := backend.GetLedger(ctx, 123456) + panicIf(err) + + // Now `ledger` is a raw `xdr.LedgerCloseMeta` object containing the + // transactions contained within this ledger. + fmt.Printf("\nHello, Sequence %d.\n", ledger.LedgerSequence()) +} diff --git a/ingest/tutorial/example_main.go b/ingest/tutorial/example_main.go new file mode 100644 index 0000000000..2b7e401655 --- /dev/null +++ b/ingest/tutorial/example_main.go @@ -0,0 +1,7 @@ +package main + +func main() { + helloworld() + statistics() + claimables() +} diff --git a/ingest/tutorial/example_statistics.go b/ingest/tutorial/example_statistics.go new file mode 100644 index 0000000000..f6174fb1b0 --- /dev/null +++ b/ingest/tutorial/example_statistics.go @@ -0,0 +1,73 @@ +package main + +import ( + "context" + "fmt" + "io" + + "github.com/sirupsen/logrus" + "github.com/stellar/go/ingest" + backends "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/log" +) + +func statistics() { + ctx := context.Background() + // Only log errors from the backend to keep output cleaner. + lg := log.New() + lg.SetLevel(logrus.ErrorLevel) + config.Log = lg + + backend, err := backends.NewCaptive(config) + panicIf(err) + defer backend.Close() + + // Prepare a range to be ingested: + var startingSeq uint32 = 2 // can't start with genesis ledger + var ledgersToRead uint32 = 10000 + + fmt.Printf("Preparing range (%d ledgers)...\n", ledgersToRead) + ledgerRange := backends.BoundedRange(startingSeq, startingSeq+ledgersToRead) + err = backend.PrepareRange(ctx, ledgerRange) + panicIf(err) + + // These are the statistics that we're tracking. + var successfulTransactions, failedTransactions int + var operationsInSuccessful, operationsInFailed int + + for seq := startingSeq; seq <= startingSeq+ledgersToRead; seq++ { + fmt.Printf("Processed ledger %d...\r", seq) + + txReader, err := ingest.NewLedgerTransactionReader( + ctx, backend, config.NetworkPassphrase, seq, + ) + panicIf(err) + defer txReader.Close() + + // Read each transaction within the ledger, extract its operations, and + // accumulate the statistics we're interested in. + for { + tx, err := txReader.Read() + if err == io.EOF { + break + } + panicIf(err) + + envelope := tx.Envelope + operationCount := len(envelope.Operations()) + if tx.Result.Successful() { + successfulTransactions++ + operationsInSuccessful += operationCount + } else { + failedTransactions++ + operationsInFailed += operationCount + } + } + } + + fmt.Println("\nDone. Results:") + fmt.Printf(" - total transactions: %d\n", successfulTransactions+failedTransactions) + fmt.Printf(" - succeeded / failed: %d / %d\n", successfulTransactions, failedTransactions) + fmt.Printf(" - total operations: %d\n", operationsInSuccessful+operationsInFailed) + fmt.Printf(" - succeeded / failed: %d / %d\n", operationsInSuccessful, operationsInFailed) +} diff --git a/install-core.sh b/install-core.sh new file mode 100755 index 0000000000..145d120f04 --- /dev/null +++ b/install-core.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# CORE_VERSION="15.3.0-498.7a7f18c.xenial~SetTrustlineFlagsPR~buildtests" + +CORE_PACKAGE=stellar-core +if [[ "$CORE_VERSION" != "" ]]; then + CORE_PACKAGE="$CORE_PACKAGE=$CORE_VERSION" +fi +sudo wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true sudo apt-key add - +sudo bash -c 'echo "deb https://apt.stellar.org xenial unstable" > /etc/apt/sources.list.d/SDF-unstable.list' +sudo apt-get update && sudo apt-get install -y "$CORE_PACKAGE" +echo "using stellar core version $(stellar-core version)" +echo "export CAPTIVE_CORE_BIN=/usr/bin/stellar-core" >> ~/.bashrc + diff --git a/integration.sh b/integration.sh new file mode 100755 index 0000000000..9ce0cb9b69 --- /dev/null +++ b/integration.sh @@ -0,0 +1,23 @@ +#! /bin/bash +set -e + +cd "$(dirname "${BASH_SOURCE[0]}")" + +export HORIZON_INTEGRATION_TESTS=true +export HORIZON_INTEGRATION_ENABLE_CAP_35=${HORIZON_INTEGRATION_ENABLE_CAP_35:-} +export HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE=${HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE:-} +export CAPTIVE_CORE_BIN=${CAPTIVE_CORE_BIN:-/usr/bin/stellar-core} +export TRACY_NO_INVARIANT_CHECK=1 # This fails on my dev vm. - Paul + +# launch postgres if it's not already. +if [[ "$(docker inspect integration_postgres -f '{{.State.Running}}')" != "true" ]]; then + docker rm -f integration_postgres || true; + docker run -d \ + --name integration_postgres \ + --platform linux/amd64 \ + --env POSTGRES_HOST_AUTH_METHOD=trust \ + -p 5432:5432 \ + circleci/postgres:9.6.5-alpine +fi + +exec go test -timeout 25m github.com/stellar/go/services/horizon/internal/integration/... "$@" diff --git a/keypair/benchmarks_test.go b/keypair/benchmarks_test.go new file mode 100644 index 0000000000..d464de3e11 --- /dev/null +++ b/keypair/benchmarks_test.go @@ -0,0 +1,159 @@ +package keypair_test + +import ( + "crypto/rand" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stretchr/testify/require" +) + +func BenchmarkFromAddress_ParseAddress(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + address := sk.Address() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = keypair.ParseAddress(address) + } +} + +func BenchmarkFromAddress_FromAddress(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + // Public key that'll be used during the benchmark. + pk := sk.FromAddress() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pk.FromAddress() + } +} + +func BenchmarkFromAddress_Hint(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + // Public key that'll be used during the benchmark. + pk := sk.FromAddress() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pk.Hint() + } +} + +func BenchmarkFromAddress_Verify(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + // Random input for creating a valid signature. + input := [32]byte{} + _, err := rand.Read(input[:]) + require.NoError(b, err) + + // Valid signature to use for verification. + sig, err := sk.Sign(input[:]) + require.NoError(b, err) + + // Public key that'll be used during the benchmark. + pk := sk.FromAddress() + + // Double check that the function succeeds without error when run with these + // inputs to ensure the benchmark is a fair benchmark. + err = pk.Verify(input[:], sig) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = pk.Verify(input[:], sig) + } +} + +func BenchmarkFull_ParseFull(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + address := sk.Address() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = keypair.ParseFull(address) + } +} + +func BenchmarkFull_FromRawSeed(b *testing.B) { + rawSeed := [32]byte{} + _, err := rand.Read(rawSeed[:]) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = keypair.FromRawSeed(rawSeed) + } +} + +func BenchmarkFull_FromAddress(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = sk.FromAddress() + } +} + +func BenchmarkFull_Hint(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = sk.Hint() + } +} + +func BenchmarkFull_Verify(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + // Random input for creating a valid signature. + input := [32]byte{} + _, err := rand.Read(input[:]) + require.NoError(b, err) + + // Valid signature to use for verification. + sig, err := sk.Sign(input[:]) + require.NoError(b, err) + + // Double check that the function succeeds without error when run with these + // inputs to ensure the benchmark is a fair benchmark. + err = sk.Verify(input[:], sig) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = sk.Verify(input[:], sig) + } +} + +func BenchmarkFull_Sign(b *testing.B) { + // Secret key for setting up the components. + sk := keypair.MustRandom() + + // Random input for creating a valid signature. + input := [32]byte{} + _, err := rand.Read(input[:]) + require.NoError(b, err) + + // Double check that the function succeeds without error when run with these + // inputs to ensure the benchmark is a fair benchmark. + _, err = sk.Sign(input[:]) + require.NoError(b, err) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = sk.Sign(input[:]) + } +} diff --git a/keypair/from_address.go b/keypair/from_address.go index 4295551edf..2c6782f347 100644 --- a/keypair/from_address.go +++ b/keypair/from_address.go @@ -1,7 +1,9 @@ package keypair import ( - "github.com/agl/ed25519" + "crypto/ed25519" + "encoding" + "github.com/stellar/go/strkey" "github.com/stellar/go/xdr" ) @@ -13,15 +15,41 @@ import ( // Some operations will panic otherwise. It's recommended that you create these // structs through the Parse() method. type FromAddress struct { - address string + address string + publicKey ed25519.PublicKey +} + +func newFromAddress(address string) (*FromAddress, error) { + payload, err := strkey.Decode(strkey.VersionByteAccountID, address) + if err != nil { + return nil, err + } + pub := ed25519.PublicKey(payload) + return &FromAddress{ + address: address, + publicKey: pub, + }, nil +} + +func newFromAddressWithPublicKey(address string, publicKey ed25519.PublicKey) *FromAddress { + return &FromAddress{ + address: address, + publicKey: publicKey, + } } func (kp *FromAddress) Address() string { return kp.address } +// FromAddress gets the address-only representation, or public key, of this +// keypair, which is itself. +func (kp *FromAddress) FromAddress() *FromAddress { + return kp +} + func (kp *FromAddress) Hint() (r [4]byte) { - copy(r[:], kp.publicKey()[28:]) + copy(r[:], kp.publicKey[28:]) return } @@ -29,11 +57,7 @@ func (kp *FromAddress) Verify(input []byte, sig []byte) error { if len(sig) != 64 { return ErrInvalidSignature } - - var asig [64]byte - copy(asig[:], sig[:]) - - if !ed25519.Verify(kp.publicKey(), input, &asig) { + if !ed25519.Verify(kp.publicKey, input, sig) { return ErrInvalidSignature } return nil @@ -43,15 +67,66 @@ func (kp *FromAddress) Sign(input []byte) ([]byte, error) { return nil, ErrCannotSign } +func (kp *FromAddress) SignBase64(input []byte) (string, error) { + return "", ErrCannotSign +} + func (kp *FromAddress) SignDecorated(input []byte) (xdr.DecoratedSignature, error) { return xdr.DecoratedSignature{}, ErrCannotSign } -func (kp *FromAddress) publicKey() *[32]byte { - bytes := strkey.MustDecode(strkey.VersionByteAccountID, kp.address) - var result [32]byte +func (kp *FromAddress) Equal(a *FromAddress) bool { + if kp == nil && a == nil { + return true + } + if kp == nil || a == nil { + return false + } + return kp.address == a.address +} - copy(result[:], bytes) +var ( + _ = encoding.TextMarshaler(&FromAddress{}) + _ = encoding.TextUnmarshaler(&FromAddress{}) +) + +func (kp *FromAddress) UnmarshalText(text []byte) error { + textKP, err := ParseAddress(string(text)) + if err != nil { + return err + } + *kp = *textKP + return nil +} + +func (kp *FromAddress) MarshalText() ([]byte, error) { + return []byte(kp.address), nil +} - return &result +var ( + _ = encoding.BinaryMarshaler(&FromAddress{}) + _ = encoding.BinaryUnmarshaler(&FromAddress{}) +) + +func (kp *FromAddress) UnmarshalBinary(b []byte) error { + accountID := xdr.AccountId{} + err := xdr.SafeUnmarshal(b, &accountID) + if err != nil { + return err + } + address := accountID.Address() + binKP, err := ParseAddress(address) + if err != nil { + return err + } + *kp = *binKP + return nil +} + +func (kp *FromAddress) MarshalBinary() ([]byte, error) { + accountID, err := xdr.AddressToAccountId(kp.address) + if err != nil { + return nil, err + } + return accountID.MarshalBinary() } diff --git a/keypair/from_address_test.go b/keypair/from_address_test.go index 620e570cdb..aad7cf9b62 100644 --- a/keypair/from_address_test.go +++ b/keypair/from_address_test.go @@ -1,15 +1,55 @@ package keypair import ( + "testing" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + "github.com/stretchr/testify/assert" ) +func TestFromAddress_Hint(t *testing.T) { + kp := MustParseAddress("GAYUB4KATGTUZEGUMJEOZDPPWM4MQLHCIKC4T55YSXHN234WI6BJMIY2") + assert.Equal(t, [4]byte{0x96, 0x47, 0x82, 0x96}, kp.Hint()) +} + +func TestFromAddress_Equal(t *testing.T) { + // A nil FromAddress. + var kp0 *FromAddress + + // A FromAddress with a value. + kp1 := MustParseAddress("GAYUB4KATGTUZEGUMJEOZDPPWM4MQLHCIKC4T55YSXHN234WI6BJMIY2") + + // Another FromAddress with a value. + kp2 := MustParseAddress("GD5II5W6KQTJPES32LL6VJK6PLOHMEKYUXJPLERXUKR3MCLM3TNFSIPW") + + // A nil FromAddress should be equal to a nil FromAddress. + assert.True(t, kp0.Equal(nil)) + + // A non-nil FromAddress is not equal to a nil KP with no type. + assert.False(t, kp1.Equal(nil)) + + // A non-nil FromAddress is not equal to a nil FromAddress. + assert.False(t, kp1.Equal(nil)) + + // A non-nil FromAddress is equal to itself. + assert.True(t, kp1.Equal(kp1)) + + // A non-nil FromAddress is equal to another FromAddress containing the same address. + assert.True(t, kp1.Equal(MustParseAddress(kp1.address))) + + // A non-nil FromAddress is not equal a non-nil FromAddress of different value. + assert.False(t, kp1.Equal(kp2)) + assert.False(t, kp2.Equal(kp1)) +} + var _ = Describe("keypair.FromAddress", func() { var subject KP JustBeforeEach(func() { - subject = &FromAddress{address} + subject = MustParse(address) }) ItBehavesLikeAKP(&subject) @@ -19,7 +59,12 @@ var _ = Describe("keypair.FromAddress", func() { _, err := subject.Sign(message) Expect(err).To(HaveOccurred()) }) - + }) + Describe("SignBase64()", func() { + It("fails", func() { + _, err := subject.SignBase64(message) + Expect(err).To(HaveOccurred()) + }) }) Describe("SignDecorated()", func() { It("fails", func() { @@ -27,4 +72,146 @@ var _ = Describe("keypair.FromAddress", func() { Expect(err).To(HaveOccurred()) }) }) + + Describe("MarshalText()", func() { + type Case struct { + Input *FromAddress + BytesCase types.GomegaMatcher + ErrCase types.GomegaMatcher + } + DescribeTable("MarshalText()", + func(c Case) { + bytes, err := c.Input.MarshalText() + Expect(bytes).To(c.BytesCase) + Expect(err).To(c.ErrCase) + }, + Entry("a valid address", Case{ + Input: MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + BytesCase: Equal([]byte("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")), + ErrCase: Not(HaveOccurred()), + }), + Entry("an empty address", Case{ + Input: &FromAddress{}, + BytesCase: Equal([]byte("")), + ErrCase: Not(HaveOccurred()), + }), + ) + }) + + Describe("UnmarshalText()", func() { + type Case struct { + Address *FromAddress + Input []byte + AddressCase types.GomegaMatcher + ErrCase types.GomegaMatcher + FuncCase types.GomegaMatcher + } + DescribeTable("UnmarshalText()", + func(c Case) { + f := func() { + err := c.Address.UnmarshalText(c.Input) + Expect(c.Address).To(c.AddressCase) + Expect(err).To(c.ErrCase) + } + Expect(f).To(c.FuncCase) + }, + Entry("a valid address into an empty FromAddress", Case{ + Address: &FromAddress{}, + Input: []byte("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + AddressCase: Equal(MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")), + ErrCase: Not(HaveOccurred()), + FuncCase: Not(Panic()), + }), + Entry("an invalid address into an empty FromAddress", Case{ + Address: &FromAddress{}, + Input: []byte("?BRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + AddressCase: Equal(&FromAddress{}), + ErrCase: HaveOccurred(), + FuncCase: Not(Panic()), + }), + Entry("a valid address into a nil FromAddress", Case{ + // This test case is included to indicate nil handling is not + // supported. Handling this case is unnecessary because the + // encoding packages in the stdlib protect against unmarshaling + // into nil objects when calling Unmarshal directly on a nil + // object and by allocating a new value in other cases. + Address: nil, + Input: []byte("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + AddressCase: Equal(MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")), + ErrCase: Not(HaveOccurred()), + FuncCase: Panic(), + }), + ) + }) + + Describe("MarshalBinary()", func() { + type Case struct { + Input *FromAddress + BytesCase types.GomegaMatcher + ErrCase types.GomegaMatcher + } + DescribeTable("MarshalBinary()", + func(c Case) { + bytes, err := c.Input.MarshalBinary() + Expect(bytes).To(c.BytesCase) + Expect(err).To(c.ErrCase) + }, + Entry("a valid address", Case{ + Input: MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + BytesCase: Equal([]byte{0, 0, 0, 0, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}), + ErrCase: Not(HaveOccurred()), + }), + Entry("an empty address", Case{ + Input: &FromAddress{}, + BytesCase: Equal([]byte("")), + ErrCase: HaveOccurred(), + }), + ) + }) + + Describe("UnmarshalBinary()", func() { + type Case struct { + Address *FromAddress + Input []byte + AddressCase types.GomegaMatcher + ErrCase types.GomegaMatcher + FuncCase types.GomegaMatcher + } + DescribeTable("UnmarshalBinary()", + func(c Case) { + f := func() { + err := c.Address.UnmarshalBinary(c.Input) + Expect(c.Address).To(c.AddressCase) + Expect(err).To(c.ErrCase) + } + Expect(f).To(c.FuncCase) + }, + Entry("a valid address into an empty FromAddress", Case{ + Address: &FromAddress{}, + Input: []byte{0, 0, 0, 0, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + AddressCase: Equal(MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")), + ErrCase: Not(HaveOccurred()), + FuncCase: Not(Panic()), + }), + Entry("an invalid address into an empty FromAddress", Case{ + Address: &FromAddress{}, + Input: []byte{0, 0, 0, 1, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + AddressCase: Equal(&FromAddress{}), + ErrCase: HaveOccurred(), + FuncCase: Not(Panic()), + }), + Entry("a valid address into a nil FromAddress", Case{ + // This test case is included to indicate nil handling is not + // supported. Handling this case is unnecessary because the + // encoding packages in the stdlib protect against unmarshaling + // into nil objects when calling Unmarshal directly on a nil + // object and by allocating a new value in other cases. + Address: nil, + Input: []byte{0, 0, 0, 0, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + AddressCase: Equal(MustParseAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")), + ErrCase: Not(HaveOccurred()), + FuncCase: Panic(), + }), + ) + }) }) diff --git a/keypair/full.go b/keypair/full.go index 0141a3233e..ee05c302a1 100644 --- a/keypair/full.go +++ b/keypair/full.go @@ -2,22 +2,76 @@ package keypair import ( "bytes" + "crypto/ed25519" + "encoding/base64" - "github.com/agl/ed25519" "github.com/stellar/go/strkey" "github.com/stellar/go/xdr" ) type Full struct { - seed string + address string + seed string + publicKey ed25519.PublicKey + privateKey ed25519.PrivateKey +} + +func newFull(seed string) (*Full, error) { + rawSeed, err := strkey.Decode(strkey.VersionByteSeed, seed) + if err != nil { + return nil, err + } + reader := bytes.NewReader(rawSeed) + pub, priv, err := ed25519.GenerateKey(reader) + if err != nil { + return nil, err + } + address, err := strkey.Encode(strkey.VersionByteAccountID, pub) + if err != nil { + return nil, err + } + return &Full{ + address: address, + seed: seed, + publicKey: pub, + privateKey: priv, + }, nil +} + +func newFullFromRawSeed(rawSeed [32]byte) (*Full, error) { + seed, err := strkey.Encode(strkey.VersionByteSeed, rawSeed[:]) + if err != nil { + return nil, err + } + reader := bytes.NewReader(rawSeed[:]) + pub, priv, err := ed25519.GenerateKey(reader) + if err != nil { + return nil, err + } + address, err := strkey.Encode(strkey.VersionByteAccountID, pub) + if err != nil { + return nil, err + } + return &Full{ + address: address, + seed: seed, + publicKey: pub, + privateKey: priv, + }, nil } func (kp *Full) Address() string { - return strkey.MustEncode(strkey.VersionByteAccountID, kp.publicKey()[:]) + return kp.address +} + +// FromAddress gets the address-only representation, or public key, of this +// Full keypair. +func (kp *Full) FromAddress() *FromAddress { + return newFromAddressWithPublicKey(kp.address, kp.publicKey) } func (kp *Full) Hint() (r [4]byte) { - copy(r[:], kp.publicKey()[28:]) + copy(r[:], kp.publicKey[28:]) return } @@ -29,19 +83,24 @@ func (kp *Full) Verify(input []byte, sig []byte) error { if len(sig) != 64 { return ErrInvalidSignature } - - var asig [64]byte - copy(asig[:], sig[:]) - - if !ed25519.Verify(kp.publicKey(), input, &asig) { + if !ed25519.Verify(kp.publicKey, input, sig) { return ErrInvalidSignature } return nil } func (kp *Full) Sign(input []byte) ([]byte, error) { - _, priv := kp.keys() - return xdr.Signature(ed25519.Sign(priv, input)[:]), nil + return ed25519.Sign(kp.privateKey, input), nil +} + +// SignBase64 signs the input data and returns a base64 encoded string, the +// common format in which signatures are exchanged. +func (kp *Full) SignBase64(input []byte) (string, error) { + sig, err := kp.Sign(input) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(sig), nil } func (kp *Full) SignDecorated(input []byte) (xdr.DecoratedSignature, error) { @@ -56,20 +115,12 @@ func (kp *Full) SignDecorated(input []byte) (xdr.DecoratedSignature, error) { }, nil } -func (kp *Full) publicKey() *[32]byte { - pub, _ := kp.keys() - return pub -} - -func (kp *Full) keys() (*[32]byte, *[64]byte) { - reader := bytes.NewReader(kp.rawSeed()) - pub, priv, err := ed25519.GenerateKey(reader) - if err != nil { - panic(err) +func (kp *Full) Equal(f *Full) bool { + if kp == nil && f == nil { + return true } - return pub, priv -} - -func (kp *Full) rawSeed() []byte { - return strkey.MustDecode(strkey.VersionByteSeed, kp.seed) + if kp == nil || f == nil { + return false + } + return kp.seed == f.seed } diff --git a/keypair/full_test.go b/keypair/full_test.go index b7e2eeb423..68ba5b7bcf 100644 --- a/keypair/full_test.go +++ b/keypair/full_test.go @@ -2,16 +2,55 @@ package keypair import ( "encoding/hex" + "testing" + . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" ) +func TestFull_Hint(t *testing.T) { + kp := MustParseFull("SBFGFF27Y64ZUGFAIG5AMJGQODZZKV2YQKAVUUN4HNE24XZXD2OEUVUP") + assert.Equal(t, [4]byte{0x96, 0x47, 0x82, 0x96}, kp.Hint()) + assert.Equal(t, [4]byte{0x96, 0x47, 0x82, 0x96}, kp.FromAddress().Hint()) +} + +func TestFull_Equal(t *testing.T) { + // A nil Full. + var kp0 *Full + + // A Full with a value. + kp1 := MustParseFull("SBFGFF27Y64ZUGFAIG5AMJGQODZZKV2YQKAVUUN4HNE24XZXD2OEUVUP") + + // Another Full with a value. + kp2 := MustParseFull("SBPBTSQAIEA5HLWLVWA4TJ7RBKHCEERE2W2DZLB6AUUCEUIYWLJF2EUS") + + // A nil Full should be equal to a nil Full. + assert.True(t, kp0.Equal(nil)) + + // A non-nil Full is not equal to a nil KP with no type. + assert.False(t, kp1.Equal(nil)) + + // A non-nil Full is not equal to a nil Full. + assert.False(t, kp1.Equal(nil)) + + // A non-nil Full is equal to itself. + assert.True(t, kp1.Equal(kp1)) + + // A non-nil Full is equal to another Full containing the same address. + assert.True(t, kp1.Equal(MustParseFull(kp1.seed))) + + // A non-nil Full is not equal a non-nil Full of different value. + assert.False(t, kp1.Equal(kp2)) + assert.False(t, kp2.Equal(kp1)) +} + var _ = Describe("keypair.Full", func() { var subject KP JustBeforeEach(func() { - subject = &Full{seed} + subject = MustParseFull(seed) }) ItBehavesLikeAKP(&subject) @@ -40,6 +79,24 @@ var _ = Describe("keypair.Full", func() { }), ) + DescribeTable("SignBase64()", + func(c SignCase) { + sig, err := subject.SignBase64([]byte(c.Message)) + + Expect(sig).To(Equal(c.Signature)) + Expect(err).To(BeNil()) + }, + + Entry("hello", SignCase{ + "hello", + "LnXMINUZERyqqt3fRku2UNLq8KXRjXRWk6FhAPKkk3vB3/qLCx9honaZbX7o3rLQ3Z7lEFVgd7At7BZ5LpFcCg==", + }), + Entry("this is a message", SignCase{ + "this is a message", + "e36Z09ZgpTkTBk1dqWq8+gxCKojx3KfxTNvSIEW1UAMOYPzRqthf0Iu3Ql2VymkMj2MjGJX2sN18DHNyJwkqAA==", + }), + ) + Describe("SignDecorated()", func() { It("returns the correct xdr struct", func() { sig, err := subject.SignDecorated(message) diff --git a/keypair/main.go b/keypair/main.go index 3529e27f54..be0a79ead5 100644 --- a/keypair/main.go +++ b/keypair/main.go @@ -34,9 +34,11 @@ const ( // KP is the main interface for this package type KP interface { Address() string + FromAddress() *FromAddress Hint() [4]byte Verify(input []byte, signature []byte) error Sign(input []byte) ([]byte, error) + SignBase64(input []byte) (string, error) SignDecorated(input []byte) (xdr.DecoratedSignature, error) } @@ -50,7 +52,6 @@ func Random() (*Full, error) { } kp, err := FromRawSeed(rawSeed) - if err != nil { return nil, err } @@ -59,9 +60,14 @@ func Random() (*Full, error) { } // Master returns the master keypair for a given network passphrase +// Deprecated: Use keypair.Root instead. func Master(networkPassphrase string) KP { - kp, err := FromRawSeed(network.ID(networkPassphrase)) + return Root(networkPassphrase) +} +// Root returns the root account keypair for a given network passphrase. +func Root(networkPassphrase string) *Full { + kp, err := FromRawSeed(network.ID(networkPassphrase)) if err != nil { panic(err) } @@ -73,31 +79,33 @@ func Master(networkPassphrase string) KP { // an address, or a seed. If the provided input is a seed, the resulting KP // will have signing capabilities. func Parse(addressOrSeed string) (KP, error) { - _, err := strkey.Decode(strkey.VersionByteAccountID, addressOrSeed) + addr, err := ParseAddress(addressOrSeed) if err == nil { - return &FromAddress{addressOrSeed}, nil + return addr, nil } if err != strkey.ErrInvalidVersionByte { return nil, err } - _, err = strkey.Decode(strkey.VersionByteSeed, addressOrSeed) - if err == nil { - return &Full{addressOrSeed}, nil - } + return ParseFull(addressOrSeed) +} - return nil, err +// ParseAddress constructs a new FromAddress keypair from the provided string, +// which should be an address. +func ParseAddress(address string) (*FromAddress, error) { + return newFromAddress(address) } -// FromRawSeed creates a new keypair from the provided raw ED25519 seed:w -func FromRawSeed(rawSeed [32]byte) (*Full, error) { - seed, err := strkey.Encode(strkey.VersionByteSeed, rawSeed[:]) - if err != nil { - return nil, err - } +// ParseFull constructs a new Full keypair from the provided string, which should +// be a seed. +func ParseFull(seed string) (*Full, error) { + return newFull(seed) +} - return &Full{seed}, nil +// FromRawSeed creates a new keypair from the provided raw ED25519 seed +func FromRawSeed(rawSeed [32]byte) (*Full, error) { + return newFullFromRawSeed(rawSeed) } // MustParse is the panic-on-fail version of Parse @@ -109,3 +117,33 @@ func MustParse(addressOrSeed string) KP { return kp } + +// MustParseAddress is the panic-on-fail version of ParseAddress +func MustParseAddress(address string) *FromAddress { + kp, err := ParseAddress(address) + if err != nil { + panic(err) + } + + return kp +} + +// MustParseFull is the panic-on-fail version of ParseFull +func MustParseFull(seed string) *Full { + kp, err := ParseFull(seed) + if err != nil { + panic(err) + } + + return kp +} + +// MustRandom is the panic-on-fail version of Random. +func MustRandom() *Full { + kp, err := Random() + if err != nil { + panic(err) + } + + return kp +} diff --git a/keypair/main_test.go b/keypair/main_test.go index 17b71cc3d1..35fadb5ae8 100644 --- a/keypair/main_test.go +++ b/keypair/main_test.go @@ -1,6 +1,10 @@ package keypair import ( + "crypto/ed25519" + "crypto/rand" + "errors" + "io" "testing" . "github.com/onsi/ginkgo" @@ -30,7 +34,6 @@ var ( ) func ItBehavesLikeAKP(subject *KP) { - // NOTE: subject will only be valid to dereference when inside am "It" // example. @@ -40,6 +43,13 @@ func ItBehavesLikeAKP(subject *KP) { }) }) + Describe("FromAddress()", func() { + It("returns an address-only representation, or public key, of this key", func() { + fromAddress := (*subject).FromAddress() + Expect(fromAddress.Address()).To(Equal(address)) + }) + }) + Describe("Hint()", func() { It("returns the correct hint", func() { Expect((*subject).Hint()).To(Equal(hint)) @@ -104,3 +114,242 @@ var _ = DescribeTable("keypair.Parse()", ErrCase: HaveOccurred(), }), ) + +type ParseFullCase struct { + Input string + FullCase types.GomegaMatcher + ErrCase types.GomegaMatcher +} + +var _ = DescribeTable("keypair.ParseFull()", + func(c ParseFullCase) { + kp, err := ParseFull(c.Input) + + Expect(kp).To(c.FullCase) + Expect(err).To(c.ErrCase) + }, + + Entry("a valid address", ParseFullCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + FullCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a corrupted address", ParseFullCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7O32H", + FullCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a valid seed", ParseFullCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + FullCase: Equal(&Full{ + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + seed: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + publicKey: ed25519.PublicKey{98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + privateKey: ed25519.PrivateKey{206, 224, 48, 45, 89, 132, 77, 50, 189, 202, 145, 92, 130, 3, 221, 68, 179, 63, 187, 126, 220, 25, 5, 30, 163, 122, 190, 223, 40, 236, 212, 114, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + }), + ErrCase: BeNil(), + }), + Entry("a corrupted seed", ParseFullCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL3", + FullCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a blank string", ParseFullCase{ + Input: "", + FullCase: BeNil(), + ErrCase: HaveOccurred(), + }), +) + +type MustParseFullCase struct { + Input string + FullCase types.GomegaMatcher + FuncCase types.GomegaMatcher +} + +var _ = DescribeTable("keypair.MustParseFull()", + func(c MustParseFullCase) { + f := func() { + kp := MustParseFull(c.Input) + Expect(kp).To(c.FullCase) + } + Expect(f).To(c.FuncCase) + }, + + Entry("a valid address", MustParseFullCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + FullCase: BeNil(), + FuncCase: Panic(), + }), + Entry("a corrupted address", MustParseFullCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7O32H", + FullCase: BeNil(), + FuncCase: Panic(), + }), + Entry("a valid seed", MustParseFullCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + FullCase: Equal(&Full{ + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + seed: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + publicKey: ed25519.PublicKey{98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + privateKey: ed25519.PrivateKey{206, 224, 48, 45, 89, 132, 77, 50, 189, 202, 145, 92, 130, 3, 221, 68, 179, 63, 187, 126, 220, 25, 5, 30, 163, 122, 190, 223, 40, 236, 212, 114, 98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + }), + FuncCase: Not(Panic()), + }), + Entry("a corrupted seed", MustParseFullCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL3", + FullCase: BeNil(), + FuncCase: Panic(), + }), + Entry("a blank string", MustParseFullCase{ + Input: "", + FullCase: BeNil(), + FuncCase: Panic(), + }), +) + +type ParseAddressCase struct { + Input string + AddressCase types.GomegaMatcher + ErrCase types.GomegaMatcher +} + +var _ = DescribeTable("keypair.ParseAddress()", + func(c ParseAddressCase) { + kp, err := ParseAddress(c.Input) + + Expect(kp).To(c.AddressCase) + Expect(err).To(c.ErrCase) + }, + + Entry("a valid address", ParseAddressCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AddressCase: Equal(&FromAddress{ + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + publicKey: ed25519.PublicKey{98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + }), + ErrCase: BeNil(), + }), + Entry("a corrupted address", ParseAddressCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7O32H", + AddressCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a valid seed", ParseAddressCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + AddressCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a corrupted seed", ParseAddressCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL3", + AddressCase: BeNil(), + ErrCase: HaveOccurred(), + }), + Entry("a blank string", ParseAddressCase{ + Input: "", + AddressCase: BeNil(), + ErrCase: HaveOccurred(), + }), +) + +type MustParseAddressCase struct { + Input string + AddressCase types.GomegaMatcher + FuncCase types.GomegaMatcher +} + +var _ = DescribeTable("keypair.MustParseAddress()", + func(c MustParseAddressCase) { + f := func() { + kp := MustParseAddress(c.Input) + Expect(kp).To(c.AddressCase) + } + Expect(f).To(c.FuncCase) + }, + + Entry("a valid address", MustParseAddressCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AddressCase: Equal(&FromAddress{ + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + publicKey: ed25519.PublicKey{98, 252, 29, 11, 208, 145, 178, 182, 28, 13, 214, 86, 52, 107, 42, 104, 215, 211, 71, 198, 242, 194, 200, 238, 109, 4, 71, 2, 86, 252, 5, 247}, + }), + FuncCase: Not(Panic()), + }), + Entry("a corrupted address", MustParseAddressCase{ + Input: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7O32H", + FuncCase: Panic(), + }), + Entry("a valid seed", MustParseAddressCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL4", + FuncCase: Panic(), + }), + Entry("a corrupted seed", MustParseAddressCase{ + Input: "SDHOAMBNLGCE2MV5ZKIVZAQD3VCLGP53P3OBSBI6UN5L5XZI5TKHFQL3", + FuncCase: Panic(), + }), + Entry("a blank string", MustParseAddressCase{ + Input: "", + FuncCase: Panic(), + }), +) + +var _ = Describe("keypair.Random()", func() { + It("does not return the same value twice", func() { + seen := map[string]bool{} + for i := 0; i < 1000; i++ { + kp, err := Random() + Expect(err).To(BeNil()) + seed := kp.Seed() + Expect(seen).ToNot(ContainElement(seed)) + seen[seed] = true + } + }) +}) + +type errReader struct { + Err error +} + +func (r errReader) Read(_ []byte) (n int, err error) { + return 0, r.Err +} + +var _ = Describe("keypair.MustRandom()", func() { + It("does not return the same value twice", func() { + seen := map[string]bool{} + for i := 0; i < 1000; i++ { + kp := MustRandom() + seed := kp.Seed() + Expect(seen).ToNot(ContainElement(seed)) + seen[seed] = true + } + }) + + Describe("when error", func() { + var originalRandReader io.Reader + BeforeEach(func() { + originalRandReader = rand.Reader + rand.Reader = errReader{Err: errors.New("an error")} + }) + AfterEach(func() { + rand.Reader = originalRandReader + }) + It("panics", func() { + defer func() { + r := recover() + Expect(r).ToNot(BeNil()) + Expect(r).To(Equal(errors.New("an error"))) + }() + MustRandom() + }) + }) +}) + +var _ = Describe("keypair.Root()", func() { + It("returns the root key pair for the passphrase", func() { + networkPassphrase := "Standalone Network ; February 2017" + kp := Root(networkPassphrase) + seed := kp.Seed() + Expect(seed).To(Equal("SC5O7VZUXDJ6JBDSZ74DSERXL7W3Y5LTOAMRF7RQRL3TAGAPS7LUVG3L")) + }) +}) diff --git a/main_test.go b/main_test.go deleted file mode 100644 index 1aa9a2909d..0000000000 --- a/main_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package stellargo - -import ( - "bytes" - "encoding/base64" - "fmt" - "log" - "strings" - - b "github.com/stellar/go/build" - "github.com/stellar/go/hash" - "github.com/stellar/go/keypair" - "github.com/stellar/go/xdr" -) - -// ExampleDecodeTransaction shows the lowest-level process to decode a base64 -// envelope encoded in base64. -func ExampleDecodeTransaction() { - data := "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAACgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEAKZ7IPj/46PuWU6ZOtyMosctNAkXRNX9WCAI5RnfRk+AyxDLoDZP/9l3NvsxQtWj9juQOuoBlFLnWu8intgxQA" - - rawr := strings.NewReader(data) - b64r := base64.NewDecoder(base64.StdEncoding, rawr) - - var tx xdr.TransactionEnvelope - bytesRead, err := xdr.Unmarshal(b64r, &tx) - - fmt.Printf("read %d bytes\n", bytesRead) - - if err != nil { - log.Fatal(err) - } - - fmt.Printf("This tx has %d operations\n", len(tx.Tx.Operations)) - // Output: read 192 bytes - // This tx has 1 operations -} - -// ExampleBuildTransaction creates and signs a simple transaction using the -// build package. The build package is designed to make it easier and more -// intuitive to configure and sign a transaction. -func ExampleBuildTransaction() { - source := "SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX" - tx := b.Transaction( - b.SourceAccount{source}, - b.Sequence{1}, - b.Payment( - b.Destination{"SBQHO2IMYKXAYJFCWGXC7YKLJD2EGDPSK3IUDHVJ6OOTTKLSCK6Z6POM"}, - b.NativeAmount{"50.0"}, - ), - ) - - txe := tx.Sign(source) - txeB64, err := txe.Base64() - - if err != nil { - panic(err) - } - - fmt.Printf("tx base64: %s", txeB64) -} - -// ExampleLowLevelTransaction creates and signs a simple transaction, and then -// encodes it into a hex string capable of being submitted to stellar-core. -// -// It uses the low-level xdr facilities to create the transaction. -func ExampleLowLevelTransaction() { - skp := keypair.MustParse("SA26PHIKZM6CXDGR472SSGUQQRYXM6S437ZNHZGRM6QA4FOPLLLFRGDX") - dkp := keypair.MustParse("SBQHO2IMYKXAYJFCWGXC7YKLJD2EGDPSK3IUDHVJ6OOTTKLSCK6Z6POM") - - asset, err := xdr.NewAsset(xdr.AssetTypeAssetTypeNative, nil) - if err != nil { - panic(err) - } - - var destination xdr.AccountId - err = destination.SetAddress(dkp.Address()) - if err != nil { - panic(err) - } - - op := xdr.PaymentOp{ - Destination: destination, - Asset: asset, - Amount: 50 * 10000000, - } - - memo, err := xdr.NewMemo(xdr.MemoTypeMemoNone, nil) - - var source xdr.AccountId - err = source.SetAddress(skp.Address()) - if err != nil { - panic(err) - } - - body, err := xdr.NewOperationBody(xdr.OperationTypePayment, op) - if err != nil { - panic(err) - } - - tx := xdr.Transaction{ - SourceAccount: source, - Fee: 10, - SeqNum: xdr.SequenceNumber(1), - Memo: memo, - Operations: []xdr.Operation{ - {Body: body}, - }, - } - - var txBytes bytes.Buffer - _, err = xdr.Marshal(&txBytes, tx) - if err != nil { - panic(err) - } - - txHash := hash.Hash(txBytes.Bytes()) - signature, err := skp.Sign(txHash[:]) - if err != nil { - panic(err) - } - - ds := xdr.DecoratedSignature{ - Hint: skp.Hint(), - Signature: xdr.Signature(signature[:]), - } - - txe := xdr.TransactionEnvelope{ - Tx: tx, - Signatures: []xdr.DecoratedSignature{ds}, - } - - var txeBytes bytes.Buffer - _, err = xdr.Marshal(&txeBytes, txe) - if err != nil { - panic(err) - } - txeB64 := base64.StdEncoding.EncodeToString(txeBytes.Bytes()) - - fmt.Printf("tx base64: %s", txeB64) - // Output: tx base64: AAAAAAU08yUQ8sHqhY8j9mXWwERfHC/3cKFSe/spAr0rGtO2AAAACgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA+fnTe7/v4whpBUx96oj92jfZPz7S00l3O2xeyeqWIA0AAAAAAAAAAB3NZQAAAAAAAAAAASsa07YAAABAieruUIGcQH6RlQ+prYflPFU3nED2NvWhtaC+tgnKsqgiKURK4xo/W7EgH0+I6aQok52awbE+ksOxEQ5MLJ9eAw== -} diff --git a/meta/bundle.go b/meta/bundle.go deleted file mode 100644 index c74a3a2a1c..0000000000 --- a/meta/bundle.go +++ /dev/null @@ -1,137 +0,0 @@ -package meta - -import ( - "errors" - "fmt" - "math" - - "github.com/stellar/go/xdr" -) - -// ErrMetaNotFound is returned when no meta that matches a provided filter can -// be found. -var ErrMetaNotFound = errors.New("meta: no changes found") - -// InitialState returns the initial state of the LedgerEntry identified by `key` -// just prior to the application of the transaction the produced `b`. Returns -// nil if the ledger entry did not exist prior to the bundle. -func (b *Bundle) InitialState(key xdr.LedgerKey) (*xdr.LedgerEntry, error) { - all := b.Changes(key) - - if len(all) == 0 { - return nil, ErrMetaNotFound - } - - first := all[0] - - if first.Type != xdr.LedgerEntryChangeTypeLedgerEntryState { - return nil, nil - } - - result := first.MustState() - - return &result, nil -} - -// Changes returns any changes within the bundle that apply to the entry -// identified by `key`. -func (b *Bundle) Changes(target xdr.LedgerKey) (ret []xdr.LedgerEntryChange) { - return b.changes(target, math.MaxInt32) -} - -// StateAfter returns the state of entry `key` after the application of the -// operation at `opidx` -func (b *Bundle) StateAfter(key xdr.LedgerKey, opidx int) (*xdr.LedgerEntry, error) { - all := b.changes(key, opidx) - - if len(all) == 0 { - return nil, ErrMetaNotFound - } - - change := all[len(all)-1] - - switch change.Type { - case xdr.LedgerEntryChangeTypeLedgerEntryCreated: - entry := change.MustCreated() - return &entry, nil - case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: - return nil, nil - case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: - entry := change.MustUpdated() - return &entry, nil - case xdr.LedgerEntryChangeTypeLedgerEntryState: - // scott: stellar-core should not emit a lone state entry, and we are - // retrieving changes from the end of the collection. If this situation - // occurs, it means that I didn't understand something correctly or there is - // a bug in stellar-core. - panic(fmt.Errorf("Unexpected 'state' entry")) - default: - panic(fmt.Errorf("Unknown change type: %v", change.Type)) - } -} - -// StateBefore returns the state of entry `key` just prior to the application of -// the operation at `opidx` -func (b *Bundle) StateBefore(key xdr.LedgerKey, opidx int) (*xdr.LedgerEntry, error) { - all := b.changes(key, opidx) - - if len(all) == 0 { - return nil, ErrMetaNotFound - } - - // If we only found one entry, that means it didn't exist prior to this - // operation - if len(all) == 1 { - return nil, nil - } - - change := all[len(all)-2] - - switch change.Type { - case xdr.LedgerEntryChangeTypeLedgerEntryCreated: - entry := change.MustCreated() - return &entry, nil - case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: - return nil, nil - case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: - entry := change.MustUpdated() - return &entry, nil - case xdr.LedgerEntryChangeTypeLedgerEntryState: - entry := change.MustState() - return &entry, nil - default: - panic(fmt.Errorf("Unknown change type: %v", change.Type)) - } -} - -// changes returns any changes within the bundle that apply to the entry -// identified by `key` that occurred at or before `maxOp`. -func (b *Bundle) changes(target xdr.LedgerKey, maxOp int) (ret []xdr.LedgerEntryChange) { - for _, change := range b.FeeMeta { - key := change.LedgerKey() - - if !key.Equals(target) { - continue - } - - ret = append(ret, change) - } - - for i, op := range b.TransactionMeta.MustOperations() { - if i > maxOp { - break - } - - for _, change := range op.Changes { - key := change.LedgerKey() - - if !key.Equals(target) { - continue - } - - ret = append(ret, change) - } - } - - return -} diff --git a/meta/bundle_test.go b/meta/bundle_test.go deleted file mode 100644 index df95adeebf..0000000000 --- a/meta/bundle_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package meta_test - -import ( - . "github.com/stellar/go/meta" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/stellar/go/xdr" -) - -var _ = Describe("meta.Bundle", func() { - var createAccount = bundle( - "AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", - "AAAAAAAAAAEAAAACAAAAAAAAAAIAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygAAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAAAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3DeC2s2vJNNQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAA", - ) - - var removeTrustline = bundle( - "AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", - "AAAAAAAAAAEAAAADAAAAAQAAAAUAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAlQL4tQAAAACAAAAAwAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAAAAAAAAAAAlQL5AAAAAAAQAAAAAAAAAAAAAAAgAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7w==", - ) - - var updateTrustline = bundle( - "AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", - "AAAAAAAAAAEAAAACAAAAAwAAAAMAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAAAAAAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAQAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAAAAAAAAAAAlQL5AAAAAAAQAAAAAAAAAA", - ) - // var mergeAccount = nil //TODO - - var newAccount xdr.AccountId - var masterAccount xdr.AccountId - var nonexistantAccount xdr.AccountId - var gatewayAccount xdr.AccountId - - BeforeEach(func() { - err := newAccount.SetAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU") - Expect(err).ToNot(HaveOccurred()) - err = masterAccount.SetAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") - Expect(err).ToNot(HaveOccurred()) - err = nonexistantAccount.SetAddress("GDGAWQZT2RALG2XBEESTMA7PHDASK4EZGXWGBCIHZRSGGLZOGZGV5JL3") - Expect(err).ToNot(HaveOccurred()) - err = gatewayAccount.SetAddress("GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4") - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("InitialState", func() { - - It("errors when `key` does not appear in the bundle", func() { - _, err := createAccount.InitialState(nonexistantAccount.LedgerKey()) - Expect(err).To(MatchError("meta: no changes found")) - }) - - It("returns nil if `key` gets created within the bundle", func() { - found, err := createAccount.InitialState(newAccount.LedgerKey()) - Expect(err).ToNot(HaveOccurred()) - Expect(found).To(BeNil()) - }) - - It("returns the state if found", func() { - found, err := createAccount.InitialState(masterAccount.LedgerKey()) - Expect(err).ToNot(HaveOccurred()) - Expect(found).ToNot(BeNil()) - Expect(found.Data.Type).To(Equal(xdr.LedgerEntryTypeAccount)) - - account := found.Data.MustAccount().AccountId - Expect(account.Equals(masterAccount)).To(BeTrue()) - }) - }) - - Describe("StateAfter", func() { - It("returns newly created entries correctly", func() { - state, err := createAccount.StateAfter(newAccount.LedgerKey(), 0) - Expect(err).ToNot(HaveOccurred()) - Expect(state).ToNot(BeNil()) - - account := state.Data.MustAccount() - Expect(account.Balance).To(Equal(xdr.Int64(1000000000))) - }) - }) - - Describe("StateBefore", func() { - Context("Accounts", func() { - It("return nil when the account was created in the operation", func() { - state, err := createAccount.StateBefore(newAccount.LedgerKey(), 0) - Expect(err).ToNot(HaveOccurred()) - Expect(state).To(BeNil()) - }) - - It("passes a sanity test", func() { - before, err := createAccount.StateBefore(masterAccount.LedgerKey(), 0) - Expect(err).ToNot(HaveOccurred()) - Expect(before).ToNot(BeNil()) - after, err := createAccount.StateAfter(masterAccount.LedgerKey(), 0) - Expect(err).ToNot(HaveOccurred()) - Expect(after).ToNot(BeNil()) - Expect(before.Data.MustAccount().Balance).To(BeNumerically(">", after.Data.MustAccount().Balance)) - }) - }) - - Context("Trustlines", func() { - var tlkey xdr.LedgerKey - var line xdr.Asset - BeforeEach(func() { - line.SetCredit("USD", gatewayAccount) - tlkey.SetTrustline(newAccount, line) - }) - - It("properly returns the state of a trustlines that gets removed", func() { - before, err := removeTrustline.StateBefore(tlkey, 0) - Expect(err).ToNot(HaveOccurred()) - Expect(before).ToNot(BeNil()) - - tl := before.Data.MustTrustLine() - Expect(tl.Limit).To(Equal(xdr.Int64(40000000000))) - }) - - It("properly returns the state of a trustlines that gets removed", func() { - before, err := updateTrustline.StateBefore(tlkey, 0) - Expect(err).ToNot(HaveOccurred()) - Expect(before).ToNot(BeNil()) - tl := before.Data.MustTrustLine() - Expect(tl.Limit).To((BeNumerically(">", 40000000000))) - }) - }) - }) -}) - -func bundle(feeMeta, resultMeta string) (ret Bundle) { - err := xdr.SafeUnmarshalBase64(feeMeta, &ret.FeeMeta) - if err != nil { - panic(err) - } - err = xdr.SafeUnmarshalBase64(resultMeta, &ret.TransactionMeta) - if err != nil { - panic(err) - } - return -} diff --git a/meta/main.go b/meta/main.go deleted file mode 100644 index e40c38f75e..0000000000 --- a/meta/main.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package meta provides helpers for processing the metadata that is produced by -// stellar-core while processing transactions. -package meta - -import "github.com/stellar/go/xdr" - -// Bundle represents all of the metadata emitted from the application of a single -// stellar transaction; Both fee meta and result meta is included. -type Bundle struct { - FeeMeta xdr.LedgerEntryChanges - TransactionMeta xdr.TransactionMeta -} diff --git a/meta/meta_suite_test.go b/meta/meta_suite_test.go deleted file mode 100644 index c66388d1af..0000000000 --- a/meta/meta_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package meta_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestMeta(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Meta Suite") -} diff --git a/network/main.go b/network/main.go index a74c1e0a28..76c21d8c32 100644 --- a/network/main.go +++ b/network/main.go @@ -1,7 +1,15 @@ +// Package network contains functions that deal with stellar network passphrases +// and IDs. package network import ( + "bytes" + + "strings" + "github.com/stellar/go/hash" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" ) const ( @@ -11,6 +19,95 @@ const ( TestNetworkPassphrase = "Test SDF Network ; September 2015" ) +// ID returns the network ID derived from the provided passphrase. This value +// also happens to be the raw (i.e. not strkey encoded) secret key for the root +// account of the network. func ID(passphrase string) [32]byte { return hash.Hash([]byte(passphrase)) } + +// HashTransactionInEnvelope derives the network specific hash for the transaction +// contained in the provided envelope using the network identified by the supplied passphrase. +// The resulting hash is the value that can be signed by stellar secret key to +// authorize the transaction identified by the hash to stellar validators. +func HashTransactionInEnvelope(envelope xdr.TransactionEnvelope, passphrase string) ([32]byte, error) { + var hash [32]byte + var err error + switch envelope.Type { + case xdr.EnvelopeTypeEnvelopeTypeTx: + hash, err = HashTransaction(envelope.V1.Tx, passphrase) + case xdr.EnvelopeTypeEnvelopeTypeTxV0: + hash, err = HashTransactionV0(envelope.V0.Tx, passphrase) + case xdr.EnvelopeTypeEnvelopeTypeTxFeeBump: + hash, err = HashFeeBumpTransaction(envelope.FeeBump.Tx, passphrase) + default: + err = errors.New("invalid transaction type") + } + return hash, err +} + +// HashTransaction derives the network specific hash for the provided +// transaction using the network identified by the supplied passphrase. The +// resulting hash is the value that can be signed by stellar secret key to +// authorize the transaction identified by the hash to stellar validators. +func HashTransaction(tx xdr.Transaction, passphrase string) ([32]byte, error) { + taggedTx := xdr.TransactionSignaturePayloadTaggedTransaction{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + Tx: &tx, + } + return hashTx(taggedTx, passphrase) +} + +// HashFeeBumpTransaction derives the network specific hash for the provided +// fee bump transaction using the network identified by the supplied passphrase. The +// resulting hash is the value that can be signed by stellar secret key to +// authorize the transaction identified by the hash to stellar validators. +func HashFeeBumpTransaction(tx xdr.FeeBumpTransaction, passphrase string) ([32]byte, error) { + taggedTx := xdr.TransactionSignaturePayloadTaggedTransaction{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &tx, + } + return hashTx(taggedTx, passphrase) +} + +// HashTransactionV0 derives the network specific hash for the provided +// legacy transaction using the network identified by the supplied passphrase. The +// resulting hash is the value that can be signed by stellar secret key to +// authorize the transaction identified by the hash to stellar validators. +func HashTransactionV0(tx xdr.TransactionV0, passphrase string) ([32]byte, error) { + sa, err := xdr.NewMuxedAccount(xdr.CryptoKeyTypeKeyTypeEd25519, tx.SourceAccountEd25519) + if err != nil { + return [32]byte{}, err + } + v1Tx := xdr.Transaction{ + SourceAccount: sa, + Fee: tx.Fee, + Memo: tx.Memo, + Operations: tx.Operations, + SeqNum: tx.SeqNum, + TimeBounds: tx.TimeBounds, + } + return HashTransaction(v1Tx, passphrase) +} + +func hashTx( + tx xdr.TransactionSignaturePayloadTaggedTransaction, + passphrase string, +) ([32]byte, error) { + if strings.TrimSpace(passphrase) == "" { + return [32]byte{}, errors.New("empty network passphrase") + } + + var txBytes bytes.Buffer + payload := xdr.TransactionSignaturePayload{ + NetworkId: ID(passphrase), + TaggedTransaction: tx, + } + + _, err := xdr.Marshal(&txBytes, payload) + if err != nil { + return [32]byte{}, errors.Wrap(err, "marshal tx failed") + } + + return hash.Hash(txBytes.Bytes()), nil +} diff --git a/network/main_test.go b/network/main_test.go new file mode 100644 index 0000000000..265d6931eb --- /dev/null +++ b/network/main_test.go @@ -0,0 +1,99 @@ +package network + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHashTransaction(t *testing.T) { + var txe xdr.TransactionEnvelope + err := xdr.SafeUnmarshalBase64("AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAACgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEAKZ7IPj/46PuWU6ZOtyMosctNAkXRNX9WCAI5RnfRk+AyxDLoDZP/9l3NvsxQtWj9juQOuoBlFLnWu8intgxQA", &txe) + + require.NoError(t, err) + + expected := [32]byte{ + 0xc4, 0x92, 0xd8, 0x7c, 0x46, 0x42, 0x81, 0x5d, + 0xfb, 0x3c, 0x7d, 0xcc, 0xe0, 0x1a, 0xf4, 0xef, + 0xfd, 0x16, 0x2b, 0x03, 0x10, 0x64, 0x09, 0x8a, + 0x0d, 0x78, 0x6b, 0x6e, 0x0a, 0x00, 0xfd, 0x74, + } + actual, err := HashTransactionV0(txe.V0.Tx, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + actual, err = HashTransactionInEnvelope(txe, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + _, err = HashTransactionV0(txe.V0.Tx, "") + assert.Contains(t, err.Error(), "empty network passphrase") + _, err = HashTransactionInEnvelope(txe, "") + assert.Contains(t, err.Error(), "empty network passphrase") + + tx := xdr.Transaction{ + SourceAccount: txe.SourceAccount(), + Fee: xdr.Uint32(txe.Fee()), + Memo: txe.Memo(), + Operations: txe.Operations(), + SeqNum: xdr.SequenceNumber(txe.SeqNum()), + TimeBounds: txe.TimeBounds(), + } + actual, err = HashTransaction(tx, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + txe.Type = xdr.EnvelopeTypeEnvelopeTypeTx + txe.V0 = nil + txe.V1 = &xdr.TransactionV1Envelope{ + Tx: tx, + } + actual, err = HashTransactionInEnvelope(txe, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + // sadpath: empty passphrase + _, err = HashTransaction(tx, "") + assert.Contains(t, err.Error(), "empty network passphrase") + _, err = HashTransactionInEnvelope(txe, "") + assert.Contains(t, err.Error(), "empty network passphrase") + + sourceAID := xdr.MustAddress("GCLOMB72ODBFUGK4E2BK7VMR3RNZ5WSTMEOGNA2YUVHFR3WMH2XBAB6H") + feeBumpTx := xdr.FeeBumpTransaction{ + Fee: 123456, + FeeSource: sourceAID.ToMuxedAccount(), + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: tx, + Signatures: []xdr.DecoratedSignature{}, + }, + }, + } + + expected = [32]uint8{ + 0x4d, 0x4c, 0xe9, 0x2, 0x63, 0x72, 0x27, 0xfb, + 0x8b, 0x52, 0x2a, 0xe4, 0x8c, 0xcd, 0xd6, 0x9d, + 0x32, 0x51, 0x72, 0x46, 0xd9, 0xfc, 0x23, 0xff, + 0x8b, 0x7a, 0x85, 0xdd, 0x4b, 0xbc, 0xef, 0x5f, + } + actual, err = HashFeeBumpTransaction(feeBumpTx, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + txe.Type = xdr.EnvelopeTypeEnvelopeTypeTxFeeBump + txe.V1 = nil + txe.FeeBump = &xdr.FeeBumpTransactionEnvelope{ + Tx: feeBumpTx, + } + actual, err = HashTransactionInEnvelope(txe, TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, expected, actual) + + _, err = HashFeeBumpTransaction(feeBumpTx, "") + assert.Contains(t, err.Error(), "empty network passphrase") + _, err = HashTransactionInEnvelope(txe, "") + assert.Contains(t, err.Error(), "empty network passphrase") +} diff --git a/price/main.go b/price/main.go index 9a5acf6827..7737c8dfa3 100644 --- a/price/main.go +++ b/price/main.go @@ -1,3 +1,7 @@ +// Package price implements functions to ease working with stellar price values. +// At present, prices are only used within the offer system, and are represented +// by a fraction whose numberator and denominator are both 32-bit signed +// integers. package price import ( @@ -5,17 +9,51 @@ import ( "fmt" "math" "math/big" + "math/bits" + "regexp" + "strconv" "github.com/stellar/go/xdr" ) -// Parse calculates and returns the best rational approximation of the given real number price. +var ( + // validAmountSimple is a simple regular expression checking if a string looks like + // a number, more or less. The details will be checked in `math/big` internally. + // What we want to prevent is passing very big numbers like `1e9223372036854775807` + // to `big.Rat.SetString` triggering long calculations. + // Note: {1,20} because the biggest amount you can use in Stellar is: + // len("922337203685.4775807") = 20. + validAmountSimple = regexp.MustCompile("^-?[.0-9]{1,20}$") + // ErrDivisionByZero is returned when a price operation would result in a division by 0 + ErrDivisionByZero = errors.New("division by 0") + // ErrOverflow is returned when a price operation would result in an integer overflow + ErrOverflow = errors.New("overflow") +) + +// Parse calculates and returns the best rational approximation of the given +// real number price while still keeping both the numerator and the denominator +// of the resulting value within the precision limits of a 32-bit signed +// integer.. func Parse(v string) (xdr.Price, error) { return continuedFraction(v) } -// continuedFraction calculates and returns the best rational approximation of the given real number. +// MustParse is like Parse except that it panics on errors. +func MustParse(v string) xdr.Price { + result, err := Parse(v) + if err != nil { + panic(err) + } + return result +} + +// continuedFraction calculates and returns the best rational approximation of +// the given real number. func continuedFraction(price string) (xdrPrice xdr.Price, err error) { + if !validAmountSimple.MatchString(price) { + return xdrPrice, fmt.Errorf("invalid price format: %s", price) + } + number := &big.Rat{} maxInt32 := &big.Rat{} zero := &big.Rat{} @@ -83,3 +121,108 @@ func floor(n *big.Rat) *big.Rat { f.SetInt(z) return f } + +//StringFromFloat64 will format a float64 to decimal representation with 7 digits after the decimal point +func StringFromFloat64(v float64) string { + return strconv.FormatFloat(v, 'f', 7, 64) +} + +// ConvertToBuyingUnits uses special rounding logic to multiply the amount by +// the price and returns (buyingUnits, sellingUnits) that can be taken from the +// offer +// +// offerSellingBound = (offer.price.n > offer.price.d) +// ? offer.amount +// : ceil(floor(offer.amount * offer.price) / offer.price) +// pathPaymentAmountBought = min(offerSellingBound, pathPaymentBuyingBound) +// pathPaymentAmountSold = ceil(pathPaymentAmountBought * offer.price) +// +// offer.amount = amount selling +// offerSellingBound = roundingCorrectedOffer +// pathPaymentBuyingBound = needed +// pathPaymentAmountBought = what we are consuming from offer +// pathPaymentAmountSold = amount we are giving to the buyer +// +// Sell units = pathPaymentAmountSold and buy units = pathPaymentAmountBought +// +// this is how we do floor and ceiling in stellar-core: +// https://github.com/stellar/stellar-core/blob/9af27ef4e20b66f38ab148d52ba7904e74fe502f/src/util/types.cpp#L201 +func ConvertToBuyingUnits(sellingOfferAmount int64, sellingUnitsNeeded int64, pricen int64, priced int64) (int64, int64, error) { + var e error + // offerSellingBound + result := sellingOfferAmount + if pricen <= priced { + result, e = MulFractionRoundDown(sellingOfferAmount, pricen, priced) + if e != nil { + return 0, 0, e + } + result, e = mulFractionRoundUp(result, priced, pricen) + if e != nil { + return 0, 0, e + } + } + + // pathPaymentAmountBought + result = min(result, sellingUnitsNeeded) + sellingUnitsExtracted := result + + // pathPaymentAmountSold + result, e = mulFractionRoundUp(result, pricen, priced) + if e != nil { + return 0, 0, e + } + + return result, sellingUnitsExtracted, nil +} + +// MulFractionRoundDown sets x = (x * n) / d, which is a round-down operation +// see https://github.com/stellar/stellar-core/blob/9af27ef4e20b66f38ab148d52ba7904e74fe502f/src/util/types.cpp#L201 +func MulFractionRoundDown(x int64, n int64, d int64) (int64, error) { + if d == 0 { + return 0, ErrDivisionByZero + } + + hi, lo := bits.Mul64(uint64(x), uint64(n)) + + denominator := uint64(d) + if denominator <= hi { + return 0, ErrOverflow + } + q, _ := bits.Div64(hi, lo, denominator) + if q > math.MaxInt64 { + return 0, ErrOverflow + } + + return int64(q), nil +} + +// mulFractionRoundUp sets x = ((x * n) + d - 1) / d, which is a round-up operation +// see https://github.com/stellar/stellar-core/blob/9af27ef4e20b66f38ab148d52ba7904e74fe502f/src/util/types.cpp#L201 +func mulFractionRoundUp(x int64, n int64, d int64) (int64, error) { + if d == 0 { + return 0, ErrDivisionByZero + } + + hi, lo := bits.Mul64(uint64(x), uint64(n)) + lo, carry := bits.Add64(lo, uint64(d-1), 0) + hi += carry + + denominator := uint64(d) + if denominator <= hi { + return 0, ErrOverflow + } + q, _ := bits.Div64(hi, lo, denominator) + if q > math.MaxInt64 { + return 0, ErrOverflow + } + + return int64(q), nil +} + +// min impl for int64 +func min(x int64, y int64) int64 { + if x <= y { + return x + } + return y +} diff --git a/price/main_test.go b/price/main_test.go index 50347ab6e7..2b83c5d88c 100644 --- a/price/main_test.go +++ b/price/main_test.go @@ -1,57 +1,132 @@ -package price_test +package price import ( + "math" + "strings" "testing" - "github.com/stellar/go/price" "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" ) var Tests = []struct { S string P xdr.Price + V bool }{ - {"0.1", xdr.Price{1, 10}}, - {"0.01", xdr.Price{1, 100}}, - {"0.001", xdr.Price{1, 1000}}, - {"543.017930", xdr.Price{54301793, 100000}}, - {"319.69983", xdr.Price{31969983, 100000}}, - {"0.93", xdr.Price{93, 100}}, - {"0.5", xdr.Price{1, 2}}, - {"1.730", xdr.Price{173, 100}}, - {"0.85334384", xdr.Price{5333399, 6250000}}, - {"5.5", xdr.Price{11, 2}}, - {"2.72783", xdr.Price{272783, 100000}}, - {"638082.0", xdr.Price{638082, 1}}, - {"2.93850088", xdr.Price{36731261, 12500000}}, - {"58.04", xdr.Price{1451, 25}}, - {"41.265", xdr.Price{8253, 200}}, - {"5.1476", xdr.Price{12869, 2500}}, - {"95.14", xdr.Price{4757, 50}}, - {"0.74580", xdr.Price{3729, 5000}}, - {"4119.0", xdr.Price{4119, 1}}, + {"0.1", xdr.Price{1, 10}, true}, + {"0.01", xdr.Price{1, 100}, true}, + {"0.001", xdr.Price{1, 1000}, true}, + {"543.017930", xdr.Price{54301793, 100000}, true}, + {"319.69983", xdr.Price{31969983, 100000}, true}, + {"0.93", xdr.Price{93, 100}, true}, + {"0.5", xdr.Price{1, 2}, true}, + {"1.730", xdr.Price{173, 100}, true}, + {"0.85334384", xdr.Price{5333399, 6250000}, true}, + {"5.5", xdr.Price{11, 2}, true}, + {"2.72783", xdr.Price{272783, 100000}, true}, + {"638082.0", xdr.Price{638082, 1}, true}, + {"2.93850088", xdr.Price{36731261, 12500000}, true}, + {"58.04", xdr.Price{1451, 25}, true}, + {"41.265", xdr.Price{8253, 200}, true}, + {"5.1476", xdr.Price{12869, 2500}, true}, + {"95.14", xdr.Price{4757, 50}, true}, + {"0.74580", xdr.Price{3729, 5000}, true}, + {"4119.0", xdr.Price{4119, 1}, true}, + + // Expensive inputs: + {strings.Repeat("1", 22), xdr.Price{}, false}, + {strings.Repeat("1", 1000000), xdr.Price{}, false}, + {"0." + strings.Repeat("1", 1000000), xdr.Price{}, false}, + {"1E9223372036854775807", xdr.Price{}, false}, + {"1e9223372036854775807", xdr.Price{}, false}, } func TestParse(t *testing.T) { for _, v := range Tests { - o, err := price.Parse(v.S) - if err != nil { + o, err := Parse(v.S) + if v.V && err != nil { t.Errorf("Couldn't parse %s: %v+", v.S, err) continue } + o, err = Parse(v.S) + if !v.V && err == nil { + t.Errorf("expected err for input %s", v.S) + continue + } + if o.N != v.P.N || o.D != v.P.D { t.Errorf("%s parsed to %d, not %d", v.S, o, v.P) } } - _, err := price.Parse("0.0000000003") + _, err := Parse("0.0000000003") if err == nil { t.Error("Expected error") } - _, err = price.Parse("2147483649") + _, err = Parse("2147483649") if err == nil { t.Error("Expected error") } } + +func TestStringFromFloat64(t *testing.T) { + + tests := map[float64]string{ + 0: "0.0000000", + 0.0000001: "0.0000001", + 1.0000001: "1.0000001", + 123: "123.0000000", + } + + for f, s := range tests { + assert.Equal(t, s, StringFromFloat64(f)) + } +} + +func TestConvertToBuyingUnits(t *testing.T) { + testCases := []struct { + sellingOfferAmount int64 + sellingUnitsNeeded int64 + pricen int64 + priced int64 + wantBuyingUnits int64 + wantSellingUnits int64 + }{ + {7, 2, 3, 7, 1, 2}, + {math.MaxInt64, 2, 3, 7, 1, 2}, + {20, 20, 1, 4, 5, 20}, + {20, 100, 1, 4, 5, 20}, + {20, 20, 7, 11, 13, 19}, + {20, 20, 11, 7, 32, 20}, + {20, 100, 7, 11, 13, 19}, + {20, 100, 11, 7, 32, 20}, + {1, 0, 3, 7, 0, 0}, + {1, 0, 7, 3, 0, 0}, + {math.MaxInt64, 0, 3, 7, 0, 0}, + } + for _, kase := range testCases { + t.Run(t.Name(), func(t *testing.T) { + buyingUnits, sellingUnits, e := ConvertToBuyingUnits(kase.sellingOfferAmount, kase.sellingUnitsNeeded, kase.pricen, kase.priced) + if !assert.Nil(t, e) { + return + } + assert.Equal(t, kase.wantBuyingUnits, buyingUnits) + assert.Equal(t, kase.wantSellingUnits, sellingUnits) + }) + } +} + +func TestMulFractionOverflow(t *testing.T) { + _, e := MulFractionRoundDown(math.MaxInt64/2+1, 2, 1) + if e != ErrOverflow { + t.Fatal("expected overflow error") + } + + _, e = mulFractionRoundUp(math.MaxInt64/2+1, 2, 1) + if e != ErrOverflow { + t.Fatal("expected overflow error") + } +} diff --git a/protocols/federation/main.go b/protocols/federation/main.go new file mode 100644 index 0000000000..ae4abf33ef --- /dev/null +++ b/protocols/federation/main.go @@ -0,0 +1,55 @@ +package federation + +import ( + "encoding/json" + "strconv" +) + +// NameResponse represents the result of a federation request +// for `name` and `forward` requests. +type NameResponse struct { + AccountID string `json:"account_id"` + MemoType string `json:"memo_type,omitempty"` + Memo Memo `json:"memo,omitempty"` +} + +// IDResponse represents the result of a federation request +// for `id` request. +type IDResponse struct { + Address string `json:"stellar_address"` +} + +// Memo value can be either integer or string in JSON. This struct +// allows marshaling and unmarshaling both types. +type Memo struct { + Value string +} + +func (m Memo) MarshalJSON() ([]byte, error) { + // Memo after marshalling should always be a string + value, err := json.Marshal(m.Value) + if err != nil { + return []byte{}, err + } + return value, nil +} + +func (m *Memo) UnmarshalJSON(value []byte) error { + // Try to unmarshal value into uint64. If that fails + // unmarshal into string. + var uintValue uint64 + err := json.Unmarshal(value, &uintValue) + if err == nil { + m.Value = strconv.FormatUint(uintValue, 10) + return nil + } + err = json.Unmarshal(value, &m.Value) + if err != nil { + return err + } + return nil +} + +func (m *Memo) String() string { + return m.Value +} diff --git a/protocols/federation/main_test.go b/protocols/federation/main_test.go new file mode 100644 index 0000000000..9325ee06eb --- /dev/null +++ b/protocols/federation/main_test.go @@ -0,0 +1,50 @@ +package federation + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMarshal(t *testing.T) { + var m Memo + + m = Memo{"123"} + value, err := json.Marshal(m) + assert.NoError(t, err) + assert.Equal(t, `"123"`, string(value)) + + m = Memo{"Test"} + value, err = json.Marshal(m) + assert.NoError(t, err) + assert.Equal(t, `"Test"`, string(value)) + + resp := NameResponse{ + AccountID: "GCQ4MQ4ZOS6P6RON4HH6FNWNABCLZUCNBSDE3QXFZOX5VYJDDKRQDQOJ", + MemoType: "id", + Memo: Memo{"123"}, + } + value, err = json.Marshal(resp) + assert.NoError(t, err) + assert.Equal(t, `{"account_id":"GCQ4MQ4ZOS6P6RON4HH6FNWNABCLZUCNBSDE3QXFZOX5VYJDDKRQDQOJ","memo_type":"id","memo":"123"}`, string(value)) +} + +func TestUnmarshal(t *testing.T) { + var m Memo + + err := json.Unmarshal([]byte("123"), &m) + assert.NoError(t, err) + assert.Equal(t, "123", m.Value) + + err = json.Unmarshal([]byte(`"123"`), &m) + assert.NoError(t, err) + assert.Equal(t, "123", m.Value) + + err = json.Unmarshal([]byte(`"Test"`), &m) + assert.NoError(t, err) + assert.Equal(t, "Test", m.Value) + + err = json.Unmarshal([]byte("-123"), &m) + assert.Error(t, err) +} diff --git a/protocols/horizon/README.md b/protocols/horizon/README.md new file mode 100644 index 0000000000..c6f7ba2654 --- /dev/null +++ b/protocols/horizon/README.md @@ -0,0 +1,186 @@ +# Horizon Protocol Changelog + +Any changes to the Horizon Public API should be included in this doc. + +## SDK support + +We started tracking SDK support at version 0.12.3. Support for 0.12.3 means that SDK can correctly: + +* Send requests using all available query params / POST params / headers, +* Parse all fields in responses structs and headers. + +For each new version we will only track changes from the previous version. + +## Changes + +### Unreleased + +#### Changes + +* Operations responses may include a `transaction` field which represents the transaction that created the operation. + +### 0.15.0 + +#### SDKs with full support + +- [JS SDK 0.10.2](https://github.com/stellar/js-stellar-sdk/releases/tag/v0.10.2) +- [Java SDK 0.4.0](https://github.com/stellar/java-stellar-sdk/releases/tag/0.4.0) + +#### Changes + +* Assets stats are disabled by default. This can be changed using an environment variable (`ENABLE_ASSET_STATS=true`) or +CLI parameter (`--enable-asset-stats=true`). Please note that it has a negative impact on a DB and ingestion time. +* In ["Offers for Account"](https://developers.stellar.org/api/resources/accounts/offers/), +`last_modified_time` field endpoint can be `null` when ledger data is not available (has not been ingested yet). +* ["Trades for Offer"](https://developers.stellar.org/api/resources/offers/trades/) endpoint +will query for trades that match the given offer on either side of trades, rather than just the "sell" offer. +Offer IDs are now [synthetic](https://developers.stellar.org/api/resources/trades/). +* New `/operation_fee_stats` endpoint includes fee stats for the last 5 ledgers. +* ["Trades"](https://developers.stellar.org/api/resources/trades/list/) endpoint can now be streamed. +* In ["Trade Aggregations"](https://developers.stellar.org/api/aggregations/trade-aggregations/list/) endpoint, +`offset` parameter has been added. +* Account flags now display `auth_immutable` value. +* Rate limiting in streams has been changed to be more fair. Now 1 *credit* has to be *paid* every time there's a new ledger +instead of per request. + +| Resource | Changes | Go SDK 1 | JS SDK | Java SDK | +|:------------------------------------|:-----------------------------------------|:--------------------|:-------|:---------| +| `GET /assets` | Disabled by default. | + | 0.10.2 | 0.4.0 | +| `GET /accounts/{account_id}/offers` | `last_modified_time` field can be `null` | - | 0.10.2 | 0.4.0 | +| `GET /offers/{offer_id}/trades` | Query fields and syntetic IDs | - | 0.10.2 | 0.4.0 | +| `GET /trades` SSE | Can be streamed | - | - | 0.4.0 | +| `GET /operation_fee_stats` | New endpoint | - | - | 0.4.0 | +| `GET /trade_aggregations` | New `offset` parameter | - | - | 0.4.0 | +| `GET /accounts/{account_id}` | Displaying `auth_immutable` flag | - | 0.10.2 | 0.4.0 | + +### 0.14.0 + +#### SDKs with full support + +- [JS SDK 0.10.2](https://github.com/stellar/js-stellar-sdk/releases/tag/v0.10.2) +- [Java SDK 0.3.1](https://github.com/stellar/java-stellar-sdk/releases/tag/0.3.1) + +#### Changes + +* New [`bump_sequence`](https://www.stellar.org/developers/horizon/reference/resources/operation.html#bump-sequence) operation. +* New `sequence_bumped` effect. +* New fields in Account > Balances collection: `buying_liabilities` and `selling_liabilities`. +* Offer resource `last_modified` field removed, replaced by `last_modified_ledger` and `last_modified_time`. +* Trade aggregations endpoint accepts only specific time ranges now (1/5/15 minutes, 1 hour, 1 day, 1 week). +* Horizon now sends `Cache-Control: no-cache, no-store, max-age=0` HTTP header for all responses. + +| Resource | Changes | Go SDK 1 | JS SDK | Java SDK | +|:--------------------------------------------|:-------------------------------------------|:--------------------|:-------|:---------| +| `GET /accounts/{account_id}` | Liabilities fields in Balances collection. | + | 0.10.2 | 0.3.1 | +| `GET /accounts/{account_id}/effects` | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /accounts/{account_id}/effects` SSE | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /accounts/{account_id}/offers` | `last_modified` field removed | - | 0.10.2 | 0.3.1 | +| `GET /accounts/{account_id}/operations` | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /accounts/{account_id}/operations` SSE | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /effects` | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /effects` SSE | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /ledgers/{ledger_id}/operations` | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /ledgers/{ledger_id}/operations` SSE | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /ledgers/{ledger_id}/effects` | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /ledgers/{ledger_id}/effects` SSE | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /operations` | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /operations` SSE | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /operations/{op_id}` | `bump_sequence` operation | + | 0.10.2 | 0.3.1 | +| `GET /trades_aggregations` | Only specific time ranges allowed | + | 0.10.2 | 0.3.1 | +| `GET /transactions/{tx_id}/operations` | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /transactions/{tx_id}/operations` SSE | `bump_sequence` operation | - | 0.10.2 | 0.3.1 | +| `GET /transactions/{tx_id}/effects` | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | +| `GET /transactions/{tx_id}/effects` SSE | `sequence_bumped` effect | - | 0.10.2 | 0.3.1 | + +### 0.13.0 + +#### SDKs with full support + +- [JS SDK 0.8.2](https://github.com/stellar/js-stellar-sdk/releases/tag/v0.8.2) +- [Java SDK 0.2.1](https://github.com/stellar/java-stellar-sdk/releases/tag/0.2.1) + +#### Changes + +- `amount` field in `/assets` is now a String (to support Stellar amounts larger than `int64`). +- Effect resource contains a new `created_at` field. + +| Resource | Changes | Go SDK 1 | JS SDK | Java SDK | +|:-----------------------------------------|:---------------------------------------------|:--------------------|:-------------------|:---------| +| `GET /assets` | `amount` can be larger than `MAX_INT64`/10^7 | + | 0.8.2 2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/effects` | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /ledgers/{ledger_id}/effects` SSE | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /accounts/{account_id}/effects` | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /accounts/{account_id}/effects` SSE | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /transactions/{tx_id}/effects` | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /transactions/{tx_id}/effects` SSE | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /operations/{op_id}/effects` | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /operations/{op_id}/effects` SSE | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /effects` | `created_at` field added | + | 0.8.2 2 | 0.2.1 | +| `GET /effects` SSE | `created_at` field added | + | 0.8.2 2 | 0.2.1 | + +### 0.12.3 + +#### SDKs with full support + +- [JS SDK 0.8.2](https://github.com/stellar/js-stellar-sdk/releases/tag/v0.8.2) +- [Java SDK 0.2.1](https://github.com/stellar/java-stellar-sdk/releases/tag/0.2.1) + +#### Changes + +| Resource | Go SDK 1 | JS SDK | Java SDK | +|:----------------------------------------------|:-------------------------------|:-------|:--------------------------------------------------| +| `GET /` | +
(some `_links` missing) | - | 0.2.1 | +| `GET /metrics` | - | - | - | +| `GET /ledgers` | - | 0.8.2 | 0.2.0 | +| `GET /ledgers` SSE | + | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}` | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/transactions` | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/transactions` SSE | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/operations` | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/operations` SSE | - | 0.8.2 | 0.2.1 | +| `GET /ledgers/{ledger_id}/payments` | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/payments` SSE | - | 0.8.2 | 0.2.0 | +| `GET /ledgers/{ledger_id}/effects` | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /ledgers/{ledger_id}/effects` SSE | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /accounts/{account_id}` | + | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/transactions` | - | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/transactions` SSE | - | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/operations` | - | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/operations` SSE | - | 0.8.2 | 0.2.1 | +| `GET /accounts/{account_id}/payments` | - | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/payments` SSE | - | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/effects` | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /accounts/{account_id}/effects` SSE | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /accounts/{account_id}/offers` | + | 0.8.2 | 0.2.0 | +| `GET /accounts/{account_id}/trades` | - | 0.8.2 | 0.2.1 | +| `GET /accounts/{account_id}/data/{key}` | - | - | - | +| `POST /transactions` | - | 0.8.2 | 0.2.0 | +| `GET /transactions` | + | 0.8.2 | 0.2.0 | +| `GET /transactions` SSE | + | 0.8.2 | 0.2.0 | +| `GET /transactions/{tx_id}` | + | 0.8.2 | 0.2.0 | +| `GET /transactions/{tx_id}/operations` | - | 0.8.2 | 0.2.0 | +| `GET /transactions/{tx_id}/operations` SSE | - | 0.8.2 | 0.2.1 | +| `GET /transactions/{tx_id}/payments` | - | 0.8.2 | 0.2.0 | +| `GET /transactions/{tx_id}/payments` SSE | - | 0.8.2 | 0.2.0 | +| `GET /transactions/{tx_id}/effects` | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /transactions/{tx_id}/effects` SSE | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /operations` | - | 0.8.2 | 0.2.0 | +| `GET /operations` SSE | - | 0.8.2 | 0.2.1 | +| `GET /operations/{op_id}` | - | 0.8.2 | 0.2.0 | +| `GET /operations/{op_id}/effects` | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /operations/{op_id}/effects` SSE | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /payments` | - | 0.8.2 | 0.2.0 | +| `GET /payments` SSE | + | 0.8.2 | 0.2.0 | +| `GET /effects` | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /effects` SSE | - | 0.8.2 | 0.2.0
(no support for data, inflation types) | +| `GET /trades` | + | 0.8.2 | 0.2.0
(no `price` field) | +| `GET /trades_aggregations` | + | 0.8.2 | 0.2.0 | +| `GET /offers/{offer_id}/trades` | - | 0.8.2 | 0.2.1 | +| `GET /order_book` | + | 0.8.2 | 0.2.0 | +| `GET /order_book` SSE | - | 0.8.2 | 0.2.1 | +| `GET /paths` | - | 0.8.2 | 0.2.0 | +| `GET /assets` | - | 0.8.2 | 0.2.0 | + +1 We don't do proper versioning for GO SDK yet. `+` means implemented in `master` branch. + +2 Native JSON support in JS, no changes needed. diff --git a/protocols/horizon/base/main.go b/protocols/horizon/base/main.go new file mode 100644 index 0000000000..a2f3cc5a3b --- /dev/null +++ b/protocols/horizon/base/main.go @@ -0,0 +1,34 @@ +package base + +type Price struct { + N int32 `json:"n"` + D int32 `json:"d"` +} + +type Asset struct { + Type string `json:"asset_type"` + Code string `json:"asset_code,omitempty"` + Issuer string `json:"asset_issuer,omitempty"` +} + +type AssetAmount struct { + // Asset may be empty when unknown (e.g. when used in the representation of operations whose transaction failed) + Asset string `json:"asset,omitempty"` + Amount string `json:"amount"` +} + +type LiquidityPoolOrAsset struct { + Asset + LiquidityPoolID string `json:"liquidity_pool_id,omitempty"` +} + +// Rehydratable values can be expanded in place by calling their Rehydrate +// method. This mechanism is intended to be used for populating resource +// structs from database structs when custom logic is needed, for example if a +// resource name has been changed but the underlying database record has not. +// This interface is especially useful to facilitate field deprecation: Add a +// new field to the response struct and implement this interface to copy the +// value from the old field to the new field. +type Rehydratable interface { + Rehydrate() error +} diff --git a/protocols/horizon/effects/main.go b/protocols/horizon/effects/main.go new file mode 100644 index 0000000000..5d04dff8c3 --- /dev/null +++ b/protocols/horizon/effects/main.go @@ -0,0 +1,968 @@ +package effects + +import ( + "encoding/json" + "time" + + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// Peter 30-04-2019: this is copied from the history package "github.com/stellar/go/services/horizon/internal/db2/history" +// Could not import this because internal package imports must share the same path prefix as the importer. +// Maybe this should be housed here and imported into internal packages? + +// EffectType is the numeric type for an effect +type EffectType int + +const ( + // account effects + + // EffectAccountCreated effects occur when a new account is created + EffectAccountCreated EffectType = 0 // from create_account + + // EffectAccountRemoved effects occur when one account is merged into another + EffectAccountRemoved EffectType = 1 // from merge_account + + // EffectAccountCredited effects occur when an account receives some currency + EffectAccountCredited EffectType = 2 // from create_account, payment, path_payment, merge_account + + // EffectAccountDebited effects occur when an account sends some currency + EffectAccountDebited EffectType = 3 // from create_account, payment, path_payment, create_account + + // EffectAccountThresholdsUpdated effects occur when an account changes its + // multisig thresholds. + EffectAccountThresholdsUpdated EffectType = 4 // from set_options + + // EffectAccountHomeDomainUpdated effects occur when an account changes its + // home domain. + EffectAccountHomeDomainUpdated EffectType = 5 // from set_options + + // EffectAccountFlagsUpdated effects occur when an account changes its + // account flags, either clearing or setting. + EffectAccountFlagsUpdated EffectType = 6 // from set_options + + // unused + // EffectAccountInflationDestinationUpdated effects occur when an account changes its + // inflation destination. + EffectAccountInflationDestinationUpdated EffectType = 7 // from set_options + + // signer effects + + // EffectSignerCreated occurs when an account gains a signer + EffectSignerCreated EffectType = 10 // from set_options + + // EffectSignerRemoved occurs when an account loses a signer + EffectSignerRemoved EffectType = 11 // from set_options + + // EffectSignerUpdated occurs when an account changes the weight of one of its + // signers. + EffectSignerUpdated EffectType = 12 // from set_options + + // trustline effects + + // EffectTrustlineCreated occurs when an account trusts an anchor + EffectTrustlineCreated EffectType = 20 // from change_trust + + // EffectTrustlineRemoved occurs when an account removes struct by setting the + // limit of a trustline to 0 + EffectTrustlineRemoved EffectType = 21 // from change_trust + + // EffectTrustlineUpdated occurs when an account changes a trustline's limit + EffectTrustlineUpdated EffectType = 22 // from change_trust, allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead + // EffectTrustlineAuthorized occurs when an anchor has AUTH_REQUIRED flag set + // to true and it authorizes another account's trustline + EffectTrustlineAuthorized EffectType = 23 // from allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead + // EffectTrustlineDeauthorized occurs when an anchor revokes access to a asset + // it issues. + EffectTrustlineDeauthorized EffectType = 24 // from allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead + // EffectTrustlineAuthorizedToMaintainLiabilities occurs when an anchor has AUTH_REQUIRED flag set + // to true and it authorizes another account's trustline to maintain liabilities + EffectTrustlineAuthorizedToMaintainLiabilities EffectType = 25 // from allow_trust + + // EffectTrustlineFlagsUpdated effects occur when a TrustLine changes its + // flags, either clearing or setting. + EffectTrustlineFlagsUpdated EffectType = 26 // from set_trust_line flags + + // trading effects + + // unused + // EffectOfferCreated occurs when an account offers to trade an asset + // EffectOfferCreated EffectType = 30 // from manage_offer, creat_passive_offer + // EffectOfferRemoved occurs when an account removes an offer + // EffectOfferRemoved EffectType = 31 // from manage_offer, creat_passive_offer, path_payment + // EffectOfferUpdated occurs when an offer is updated by the offering account. + // EffectOfferUpdated EffectType = 32 // from manage_offer, creat_passive_offer, path_payment + + // EffectTrade occurs when a trade is initiated because of a path payment or + // offer operation. + EffectTrade EffectType = 33 // from manage_offer, creat_passive_offer, path_payment + + // data effects + + // EffectDataCreated occurs when an account gets a new data field + EffectDataCreated EffectType = 40 // from manage_data + + // EffectDataRemoved occurs when an account removes a data field + EffectDataRemoved EffectType = 41 // from manage_data + + // EffectDataUpdated occurs when an account changes a data field's value + EffectDataUpdated EffectType = 42 // from manage_data + + // EffectSequenceBumped occurs when an account bumps their sequence number + EffectSequenceBumped EffectType = 43 // from bump_sequence + + // claimable balance effects + + // EffectClaimableBalanceCreated occurs when a claimable balance is created + EffectClaimableBalanceCreated EffectType = 50 // from create_claimable_balance + + // EffectClaimableBalanceClaimantCreated occurs when a claimable balance claimant is created + EffectClaimableBalanceClaimantCreated EffectType = 51 // from create_claimable_balance + + // EffectClaimableBalanceClaimed occurs when a claimable balance is claimed + EffectClaimableBalanceClaimed EffectType = 52 // from claim_claimable_balance + + // sponsorship effects + + // EffectAccountSponsorshipCreated occurs when an account ledger entry is sponsored + EffectAccountSponsorshipCreated EffectType = 60 // from create_account + + // EffectAccountSponsorshipUpdated occurs when the sponsoring of an account ledger entry is updated + EffectAccountSponsorshipUpdated EffectType = 61 // from revoke_sponsorship + + // EffectAccountSponsorshipRemoved occurs when the sponsorship of an account ledger entry is removed + EffectAccountSponsorshipRemoved EffectType = 62 // from revoke_sponsorship + + // EffectTrustlineSponsorshipCreated occurs when a trustline ledger entry is sponsored + EffectTrustlineSponsorshipCreated EffectType = 63 // from change_trust + + // EffectTrustlineSponsorshipUpdated occurs when the sponsoring of a trustline ledger entry is updated + EffectTrustlineSponsorshipUpdated EffectType = 64 // from revoke_sponsorship + + // EffectTrustlineSponsorshipRemoved occurs when the sponsorship of a trustline ledger entry is removed + EffectTrustlineSponsorshipRemoved EffectType = 65 // from revoke_sponsorship + + // EffectDataSponsorshipCreated occurs when a trustline ledger entry is sponsored + EffectDataSponsorshipCreated EffectType = 66 // from manage_data + + // EffectDataSponsorshipUpdated occurs when the sponsoring of a trustline ledger entry is updated + EffectDataSponsorshipUpdated EffectType = 67 // from revoke_sponsorship + + // EffectDataSponsorshipRemoved occurs when the sponsorship of a trustline ledger entry is removed + EffectDataSponsorshipRemoved EffectType = 68 // from revoke_sponsorship + + // EffectClaimableBalanceSponsorshipCreated occurs when a claimable balance ledger entry is sponsored + EffectClaimableBalanceSponsorshipCreated EffectType = 69 // from create_claimable_balance + + // EffectClaimableBalanceSponsorshipUpdated occurs when the sponsoring of a claimable balance ledger entry + // is updated + EffectClaimableBalanceSponsorshipUpdated EffectType = 70 // from revoke_sponsorship + + // EffectClaimableBalanceSponsorshipRemoved occurs when the sponsorship of a claimable balance ledger entry + // is removed + EffectClaimableBalanceSponsorshipRemoved EffectType = 71 // from claim_claimable_balance + + // EffectSignerSponsorshipCreated occurs when the sponsorship of a signer is created + EffectSignerSponsorshipCreated EffectType = 72 // from set_options + + // EffectSignerSponsorshipUpdated occurs when the sponsorship of a signer is updated + EffectSignerSponsorshipUpdated EffectType = 73 // from revoke_sponsorship + + // EffectSignerSponsorshipRemoved occurs when the sponsorship of a signer is removed + EffectSignerSponsorshipRemoved EffectType = 74 // from revoke_sponsorship + + // EffectClaimableBalanceClawedBack occurs when a claimable balance is clawed back + EffectClaimableBalanceClawedBack EffectType = 80 // from clawback_claimable_balance + + // EffectLiquidityPoolDeposited occurs when a liquidity pool incurs a deposit + EffectLiquidityPoolDeposited EffectType = 90 // from liquidity_pool_deposit + + // EffectLiquidityPoolWithdrew occurs when a liquidity pool incurs a withdrawal + EffectLiquidityPoolWithdrew EffectType = 91 // from liquidity_pool_withdraw + + // EffectLiquidityPoolTrade occurs when a trade happens in a liquidity pool + EffectLiquidityPoolTrade EffectType = 92 + + // EffectLiquidityPoolCreated occurs when a liquidity pool is created + EffectLiquidityPoolCreated EffectType = 93 // from change_trust + + // EffectLiquidityPoolRemoved occurs when a liquidity pool is removed + EffectLiquidityPoolRemoved EffectType = 94 // from change_trust + + // EffectLiquidityPoolRevoked occurs when a liquidity pool is revoked + EffectLiquidityPoolRevoked EffectType = 95 // from change_trust_line_flags and allow_trust +) + +// Peter 30-04-2019: this is copied from the resourcadapter package +// "github.com/stellar/go/services/horizon/internal/resourceadapter" +// Could not import this because internal package imports must share the same path prefix as the importer. + +// EffectTypeNames stores a map of effect type ID and names +var EffectTypeNames = map[EffectType]string{ + EffectAccountCreated: "account_created", + EffectAccountRemoved: "account_removed", + EffectAccountCredited: "account_credited", + EffectAccountDebited: "account_debited", + EffectAccountThresholdsUpdated: "account_thresholds_updated", + EffectAccountHomeDomainUpdated: "account_home_domain_updated", + EffectAccountFlagsUpdated: "account_flags_updated", + EffectAccountInflationDestinationUpdated: "account_inflation_destination_updated", + EffectSignerCreated: "signer_created", + EffectSignerRemoved: "signer_removed", + EffectSignerUpdated: "signer_updated", + EffectTrustlineCreated: "trustline_created", + EffectTrustlineRemoved: "trustline_removed", + EffectTrustlineUpdated: "trustline_updated", + EffectTrustlineAuthorized: "trustline_authorized", + EffectTrustlineAuthorizedToMaintainLiabilities: "trustline_authorized_to_maintain_liabilities", + EffectTrustlineDeauthorized: "trustline_deauthorized", + EffectTrustlineFlagsUpdated: "trustline_flags_updated", + // unused + // EffectOfferCreated: "offer_created", + // EffectOfferRemoved: "offer_removed", + // EffectOfferUpdated: "offer_updated", + EffectTrade: "trade", + EffectDataCreated: "data_created", + EffectDataRemoved: "data_removed", + EffectDataUpdated: "data_updated", + EffectSequenceBumped: "sequence_bumped", + EffectClaimableBalanceCreated: "claimable_balance_created", + EffectClaimableBalanceClaimed: "claimable_balance_claimed", + EffectClaimableBalanceClaimantCreated: "claimable_balance_claimant_created", + EffectAccountSponsorshipCreated: "account_sponsorship_created", + EffectAccountSponsorshipUpdated: "account_sponsorship_updated", + EffectAccountSponsorshipRemoved: "account_sponsorship_removed", + EffectTrustlineSponsorshipCreated: "trustline_sponsorship_created", + EffectTrustlineSponsorshipUpdated: "trustline_sponsorship_updated", + EffectTrustlineSponsorshipRemoved: "trustline_sponsorship_removed", + EffectDataSponsorshipCreated: "data_sponsorship_created", + EffectDataSponsorshipUpdated: "data_sponsorship_updated", + EffectDataSponsorshipRemoved: "data_sponsorship_removed", + EffectClaimableBalanceSponsorshipCreated: "claimable_balance_sponsorship_created", + EffectClaimableBalanceSponsorshipUpdated: "claimable_balance_sponsorship_updated", + EffectClaimableBalanceSponsorshipRemoved: "claimable_balance_sponsorship_removed", + EffectSignerSponsorshipCreated: "signer_sponsorship_created", + EffectSignerSponsorshipUpdated: "signer_sponsorship_updated", + EffectSignerSponsorshipRemoved: "signer_sponsorship_removed", + EffectClaimableBalanceClawedBack: "claimable_balance_clawed_back", + EffectLiquidityPoolDeposited: "liquidity_pool_deposited", + EffectLiquidityPoolWithdrew: "liquidity_pool_withdrew", + EffectLiquidityPoolTrade: "liquidity_pool_trade", + EffectLiquidityPoolCreated: "liquidity_pool_created", + EffectLiquidityPoolRemoved: "liquidity_pool_removed", + EffectLiquidityPoolRevoked: "liquidity_pool_revoked", +} + +// Base provides the common structure for any effect resource effect. +type Base struct { + Links struct { + Operation hal.Link `json:"operation"` + Succeeds hal.Link `json:"succeeds"` + Precedes hal.Link `json:"precedes"` + } `json:"_links"` + + ID string `json:"id"` + PT string `json:"paging_token"` + Account string `json:"account"` + AccountMuxed string `json:"account_muxed,omitempty"` + AccountMuxedID uint64 `json:"account_muxed_id,omitempty,string"` + Type string `json:"type"` + TypeI int32 `json:"type_i"` + LedgerCloseTime time.Time `json:"created_at"` +} + +// PagingToken implements `hal.Pageable` and Effect +func (b Base) PagingToken() string { + return b.PT +} + +type AccountCreated struct { + Base + StartingBalance string `json:"starting_balance"` +} + +type AccountCredited struct { + Base + base.Asset + Amount string `json:"amount"` +} + +type AccountDebited struct { + Base + base.Asset + Amount string `json:"amount"` +} + +type AccountThresholdsUpdated struct { + Base + LowThreshold int32 `json:"low_threshold"` + MedThreshold int32 `json:"med_threshold"` + HighThreshold int32 `json:"high_threshold"` +} + +type AccountHomeDomainUpdated struct { + Base + HomeDomain string `json:"home_domain"` +} + +type AccountFlagsUpdated struct { + Base + AuthRequired *bool `json:"auth_required_flag,omitempty"` + AuthRevokable *bool `json:"auth_revokable_flag,omitempty"` +} + +type DataCreated struct { + Base + Name string `json:"name"` + Value string `json:"value"` +} + +type DataUpdated struct { + Base + Name string `json:"name"` + Value string `json:"value"` +} + +type DataRemoved struct { + Base + Name string `json:"name"` +} + +type SequenceBumped struct { + Base + NewSeq int64 `json:"new_seq,string"` +} + +type SignerCreated struct { + Base + Weight int32 `json:"weight"` + PublicKey string `json:"public_key"` + Key string `json:"key"` +} + +type SignerRemoved struct { + Base + Weight int32 `json:"weight"` + PublicKey string `json:"public_key"` + Key string `json:"key"` +} + +type SignerUpdated struct { + Base + Weight int32 `json:"weight"` + PublicKey string `json:"public_key"` + Key string `json:"key"` +} + +type TrustlineCreated struct { + Base + base.LiquidityPoolOrAsset + Limit string `json:"limit"` +} + +type TrustlineRemoved struct { + Base + base.LiquidityPoolOrAsset + Limit string `json:"limit"` +} + +type TrustlineUpdated struct { + Base + base.LiquidityPoolOrAsset + Limit string `json:"limit"` +} + +// Deprecated: use TrustlineFlagsUpdated instead +type TrustlineAuthorized struct { + Base + Trustor string `json:"trustor"` + AssetType string `json:"asset_type"` + AssetCode string `json:"asset_code,omitempty"` +} + +// Deprecated: use TrustlineFlagsUpdated instead +type TrustlineAuthorizedToMaintainLiabilities struct { + Base + Trustor string `json:"trustor"` + AssetType string `json:"asset_type"` + AssetCode string `json:"asset_code,omitempty"` +} + +// Deprecated: use TrustlineFlagsUpdated instead +type TrustlineDeauthorized struct { + Base + Trustor string `json:"trustor"` + AssetType string `json:"asset_type"` + AssetCode string `json:"asset_code,omitempty"` +} + +type Trade struct { + Base + Seller string `json:"seller"` + SellerMuxed string `json:"seller_muxed,omitempty"` + SellerMuxedID uint64 `json:"seller_muxed_id,omitempty,string"` + OfferID int64 `json:"offer_id,string"` + SoldAmount string `json:"sold_amount"` + SoldAssetType string `json:"sold_asset_type"` + SoldAssetCode string `json:"sold_asset_code,omitempty"` + SoldAssetIssuer string `json:"sold_asset_issuer,omitempty"` + BoughtAmount string `json:"bought_amount"` + BoughtAssetType string `json:"bought_asset_type"` + BoughtAssetCode string `json:"bought_asset_code,omitempty"` + BoughtAssetIssuer string `json:"bought_asset_issuer,omitempty"` +} + +type ClaimableBalanceCreated struct { + Base + Asset string `json:"asset"` + BalanceID string `json:"balance_id"` + Amount string `json:"amount"` +} + +type ClaimableBalanceClaimed struct { + Base + Asset string `json:"asset"` + BalanceID string `json:"balance_id"` + Amount string `json:"amount"` +} + +type ClaimableBalanceClaimantCreated struct { + Base + Asset string `json:"asset"` + BalanceID string `json:"balance_id"` + Amount string `json:"amount"` + Predicate xdr.ClaimPredicate `json:"predicate"` +} + +type AccountSponsorshipCreated struct { + Base + Sponsor string `json:"sponsor"` +} + +type AccountSponsorshipUpdated struct { + Base + FormerSponsor string `json:"former_sponsor"` + NewSponsor string `json:"new_sponsor"` +} + +type AccountSponsorshipRemoved struct { + Base + FormerSponsor string `json:"former_sponsor"` +} + +type TrustlineSponsorshipCreated struct { + Base + Type string `json:"asset_type"` + Asset string `json:"asset,omitempty"` + LiquidityPoolID string `json:"liquidity_pool_id,omitempty"` + Sponsor string `json:"sponsor"` +} + +type TrustlineSponsorshipUpdated struct { + Base + Type string `json:"asset_type"` + Asset string `json:"asset,omitempty"` + LiquidityPoolID string `json:"liquidity_pool_id,omitempty"` + FormerSponsor string `json:"former_sponsor"` + NewSponsor string `json:"new_sponsor"` +} + +type TrustlineSponsorshipRemoved struct { + Base + Type string `json:"asset_type"` + Asset string `json:"asset,omitempty"` + LiquidityPoolID string `json:"liquidity_pool_id,omitempty"` + FormerSponsor string `json:"former_sponsor"` +} + +type DataSponsorshipCreated struct { + Base + DataName string `json:"data_name"` + Sponsor string `json:"sponsor"` +} + +type DataSponsorshipUpdated struct { + Base + DataName string `json:"data_name"` + FormerSponsor string `json:"former_sponsor"` + NewSponsor string `json:"new_sponsor"` +} + +type DataSponsorshipRemoved struct { + Base + DataName string `json:"data_name"` + FormerSponsor string `json:"former_sponsor"` +} + +type ClaimableBalanceSponsorshipCreated struct { + Base + BalanceID string `json:"balance_id"` + Sponsor string `json:"sponsor"` +} + +type ClaimableBalanceSponsorshipUpdated struct { + Base + BalanceID string `json:"balance_id"` + FormerSponsor string `json:"former_sponsor"` + NewSponsor string `json:"new_sponsor"` +} + +type ClaimableBalanceSponsorshipRemoved struct { + Base + BalanceID string `json:"balance_id"` + FormerSponsor string `json:"former_sponsor"` +} + +type SignerSponsorshipCreated struct { + Base + Signer string `json:"signer"` + Sponsor string `json:"sponsor"` +} + +type SignerSponsorshipUpdated struct { + Base + Signer string `json:"signer"` + FormerSponsor string `json:"former_sponsor"` + NewSponsor string `json:"new_sponsor"` +} + +type SignerSponsorshipRemoved struct { + Base + Signer string `json:"signer"` + FormerSponsor string `json:"former_sponsor"` +} + +type ClaimableBalanceClawedBack struct { + Base + BalanceID string `json:"balance_id"` +} + +type TrustlineFlagsUpdated struct { + Base + base.Asset + Trustor string `json:"trustor"` + Authorized *bool `json:"authorized_flag,omitempty"` + AuthorizedToMaintainLiabilities *bool `json:"authorized_to_maintain_liabilites_flag,omitempty"` + ClawbackEnabled *bool `json:"clawback_enabled_flag,omitempty"` +} + +type LiquidityPool struct { + ID string `json:"id"` + FeeBP uint32 `json:"fee_bp"` + Type string `json:"type"` + TotalTrustlines uint64 `json:"total_trustlines,string"` + TotalShares string `json:"total_shares"` + Reserves []base.AssetAmount `json:"reserves"` +} + +type LiquidityPoolDeposited struct { + Base + LiquidityPool LiquidityPool `json:"liquidity_pool"` + ReservesDeposited []base.AssetAmount `json:"reserves_deposited"` + SharesReceived string `json:"shares_received"` +} + +type LiquidityPoolWithdrew struct { + Base + LiquidityPool LiquidityPool `json:"liquidity_pool"` + ReservesReceived []base.AssetAmount `json:"reserves_received"` + SharesRedeemed string `json:"shares_redeemed"` +} + +type LiquidityPoolTrade struct { + Base + LiquidityPool LiquidityPool `json:"liquidity_pool"` + Sold base.AssetAmount `json:"sold"` + Bought base.AssetAmount `json:"bought"` +} + +type LiquidityPoolCreated struct { + Base + LiquidityPool LiquidityPool `json:"liquidity_pool"` +} + +type LiquidityPoolRemoved struct { + Base + LiquidityPoolID string `json:"liquidity_pool_id"` +} + +type LiquidityPoolClaimableAssetAmount struct { + Asset string `json:"asset"` + Amount string `json:"amount"` + ClaimableBalanceID string `json:"claimable_balance_id"` +} + +type LiquidityPoolRevoked struct { + Base + LiquidityPool LiquidityPool `json:"liquidity_pool"` + ReservesRevoked []LiquidityPoolClaimableAssetAmount `json:"reserves_revoked"` + SharesRevoked string `json:"shares_revoked"` +} + +// Effect contains methods that are implemented by all effect types. +type Effect interface { + PagingToken() string + GetType() string + GetID() string + GetAccount() string +} + +// GetType implements Effect +func (b Base) GetType() string { + return b.Type +} + +// GetID implements Effect +func (b Base) GetID() string { + return b.ID +} + +// GetAccount implements Effect +func (b Base) GetAccount() string { + return b.Account +} + +// EffectsPage contains page of effects returned by Horizon. +type EffectsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Effect + } `json:"_embedded"` +} + +// UnmarshalJSON is the custom unmarshal method for EffectsPage +func (effects *EffectsPage) UnmarshalJSON(data []byte) error { + var effectsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []interface{} + } `json:"_embedded"` + } + + if err := json.Unmarshal(data, &effectsPage); err != nil { + return err + } + + for _, j := range effectsPage.Embedded.Records { + var b Base + dataString, err := json.Marshal(j) + if err != nil { + return err + } + if err = json.Unmarshal(dataString, &b); err != nil { + return err + } + + ef, err := UnmarshalEffect(b.Type, dataString) + if err != nil { + return err + } + + effects.Embedded.Records = append(effects.Embedded.Records, ef) + } + + effects.Links = effectsPage.Links + return nil +} + +// UnmarshalEffect decodes responses to the correct effect struct +func UnmarshalEffect(effectType string, dataString []byte) (effects Effect, err error) { + switch effectType { + case EffectTypeNames[EffectAccountCreated]: + var effect AccountCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountCredited]: + var effect AccountCredited + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountDebited]: + var effect AccountDebited + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountThresholdsUpdated]: + var effect AccountThresholdsUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountHomeDomainUpdated]: + var effect AccountHomeDomainUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountFlagsUpdated]: + var effect AccountFlagsUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSequenceBumped]: + var effect SequenceBumped + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerCreated]: + var effect SignerCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerRemoved]: + var effect SignerRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerUpdated]: + var effect SignerUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineAuthorized]: + var effect TrustlineAuthorized + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineAuthorizedToMaintainLiabilities]: + var effect TrustlineAuthorizedToMaintainLiabilities + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineCreated]: + var effect TrustlineCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineDeauthorized]: + var effect TrustlineDeauthorized + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineFlagsUpdated]: + var effect TrustlineFlagsUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineRemoved]: + var effect TrustlineRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineUpdated]: + var effect TrustlineUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrade]: + var effect Trade + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataCreated]: + var effect DataCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataUpdated]: + var effect DataUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataRemoved]: + var effect DataRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceCreated]: + var effect ClaimableBalanceCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceClaimed]: + var effect ClaimableBalanceClaimed + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceClaimantCreated]: + var effect ClaimableBalanceClaimantCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountSponsorshipCreated]: + var effect AccountSponsorshipCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountSponsorshipUpdated]: + var effect AccountSponsorshipUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectAccountSponsorshipRemoved]: + var effect AccountSponsorshipRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineSponsorshipCreated]: + var effect TrustlineSponsorshipCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineSponsorshipUpdated]: + var effect TrustlineSponsorshipUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectTrustlineSponsorshipRemoved]: + var effect TrustlineSponsorshipRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataSponsorshipCreated]: + var effect DataSponsorshipCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataSponsorshipUpdated]: + var effect DataSponsorshipUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectDataSponsorshipRemoved]: + var effect DataSponsorshipRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceSponsorshipCreated]: + var effect ClaimableBalanceSponsorshipCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceSponsorshipUpdated]: + var effect ClaimableBalanceSponsorshipUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceSponsorshipRemoved]: + var effect ClaimableBalanceSponsorshipRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerSponsorshipCreated]: + var effect SignerSponsorshipCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerSponsorshipUpdated]: + var effect SignerSponsorshipUpdated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectSignerSponsorshipRemoved]: + var effect SignerSponsorshipRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectClaimableBalanceClawedBack]: + var effect ClaimableBalanceClawedBack + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolDeposited]: + var effect LiquidityPoolDeposited + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolWithdrew]: + var effect LiquidityPoolWithdrew + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolTrade]: + var effect LiquidityPoolTrade + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolCreated]: + var effect LiquidityPoolCreated + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolRemoved]: + var effect LiquidityPoolRemoved + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + case EffectTypeNames[EffectLiquidityPoolRevoked]: + var effect LiquidityPoolRevoked + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + default: + var effect Base + if err = json.Unmarshal(dataString, &effect); err != nil { + return + } + effects = effect + } + return +} + +// interface implementations +var _ base.Rehydratable = &SignerCreated{} +var _ base.Rehydratable = &SignerRemoved{} +var _ base.Rehydratable = &SignerUpdated{} diff --git a/protocols/horizon/effects/signer_effects.go b/protocols/horizon/effects/signer_effects.go new file mode 100644 index 0000000000..5cdf52e945 --- /dev/null +++ b/protocols/horizon/effects/signer_effects.go @@ -0,0 +1,19 @@ +package effects + +// Rehydrate implements base.Rehydratable interface +func (sc *SignerCreated) Rehydrate() error { + sc.Key = sc.PublicKey + return nil +} + +// Rehydrate implements base.Rehydratable interface +func (sr *SignerRemoved) Rehydrate() error { + sr.Key = sr.PublicKey + return nil +} + +// Rehydrate implements base.Rehydratable interface +func (su *SignerUpdated) Rehydrate() error { + su.Key = su.PublicKey + return nil +} diff --git a/protocols/horizon/main.go b/protocols/horizon/main.go new file mode 100644 index 0000000000..41d4522e6c --- /dev/null +++ b/protocols/horizon/main.go @@ -0,0 +1,819 @@ +// Package horizon contains the type definitions for all of horizon's +// response resources. +package horizon + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "math/big" + "strconv" + "time" + + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// KeyTypeNames maps from strkey version bytes into json string values to use in +// horizon responses. +var KeyTypeNames = map[strkey.VersionByte]string{ + strkey.VersionByteAccountID: "ed25519_public_key", + strkey.VersionByteSeed: "ed25519_secret_seed", + strkey.VersionByteHashX: "sha256_hash", + strkey.VersionByteHashTx: "preauth_tx", +} + +// Account is the summary of an account +type Account struct { + Links struct { + Self hal.Link `json:"self"` + Transactions hal.Link `json:"transactions"` + Operations hal.Link `json:"operations"` + Payments hal.Link `json:"payments"` + Effects hal.Link `json:"effects"` + Offers hal.Link `json:"offers"` + Trades hal.Link `json:"trades"` + Data hal.Link `json:"data"` + } `json:"_links"` + + ID string `json:"id"` + AccountID string `json:"account_id"` + Sequence string `json:"sequence"` + SubentryCount int32 `json:"subentry_count"` + InflationDestination string `json:"inflation_destination,omitempty"` + HomeDomain string `json:"home_domain,omitempty"` + LastModifiedLedger uint32 `json:"last_modified_ledger"` + LastModifiedTime *time.Time `json:"last_modified_time"` + Thresholds AccountThresholds `json:"thresholds"` + Flags AccountFlags `json:"flags"` + Balances []Balance `json:"balances"` + Signers []Signer `json:"signers"` + Data map[string]string `json:"data"` + NumSponsoring uint32 `json:"num_sponsoring"` + NumSponsored uint32 `json:"num_sponsored"` + Sponsor string `json:"sponsor,omitempty"` + PT string `json:"paging_token"` +} + +// PagingToken implementation for hal.Pageable +func (res Account) PagingToken() string { + return res.PT +} + +// GetAccountID returns the Stellar account ID. This is to satisfy the +// Account interface of txnbuild. +func (a Account) GetAccountID() string { + return a.AccountID +} + +// GetNativeBalance returns the native balance of the account +func (a Account) GetNativeBalance() (string, error) { + for _, balance := range a.Balances { + if balance.Asset.Type == "native" { + return balance.Balance, nil + } + } + + return "0", errors.New("account does not have a native balance") +} + +// GetCreditBalance returns the balance for given code and issuer +func (a Account) GetCreditBalance(code string, issuer string) string { + for _, balance := range a.Balances { + if balance.Asset.Code == code && balance.Asset.Issuer == issuer { + return balance.Balance + } + } + + return "0" +} + +// GetSequenceNumber returns the sequence number of the account, +// and returns it as a 64-bit integer. +func (a Account) GetSequenceNumber() (int64, error) { + seqNum, err := strconv.ParseInt(a.Sequence, 10, 64) + if err != nil { + return 0, errors.Wrap(err, "Failed to parse account sequence number") + } + + return seqNum, nil +} + +// IncrementSequenceNumber increments the internal record of the account's sequence +// number by 1. This is typically used after a transaction build so that the next +// transaction to be built will be valid. +func (a *Account) IncrementSequenceNumber() (int64, error) { + seqNum, err := a.GetSequenceNumber() + if err != nil { + return 0, err + } + if seqNum == math.MaxInt64 { + return 0, fmt.Errorf("sequence cannot be increased, it already reached MaxInt64 (%d)", int64(math.MaxInt64)) + } + seqNum++ + a.Sequence = strconv.FormatInt(seqNum, 10) + return seqNum, nil +} + +// MustGetData returns decoded value for a given key. If the key does +// not exist, empty slice will be returned. If there is an error +// decoding a value, it will panic. +func (a *Account) MustGetData(key string) []byte { + bytes, err := a.GetData(key) + if err != nil { + panic(err) + } + return bytes +} + +// GetData returns decoded value for a given key. If the key does +// not exist, empty slice will be returned. +func (a *Account) GetData(key string) ([]byte, error) { + return base64.StdEncoding.DecodeString(a.Data[key]) +} + +// SignerSummary returns a map of signer's keys to weights. +func (a *Account) SignerSummary() map[string]int32 { + m := map[string]int32{} + for _, s := range a.Signers { + m[s.Key] = s.Weight + } + return m +} + +// AccountFlags represents the state of an account's flags +type AccountFlags struct { + AuthRequired bool `json:"auth_required"` + AuthRevocable bool `json:"auth_revocable"` + AuthImmutable bool `json:"auth_immutable"` + AuthClawbackEnabled bool `json:"auth_clawback_enabled"` +} + +// AccountThresholds represents an accounts "thresholds", the numerical values +// needed to satisfy the authorization of a given operation. +type AccountThresholds struct { + LowThreshold byte `json:"low_threshold"` + MedThreshold byte `json:"med_threshold"` + HighThreshold byte `json:"high_threshold"` +} + +// Asset represents a single asset +type Asset base.Asset + +// AssetStat represents the statistics for a single Asset +type AssetStat struct { + Links struct { + Toml hal.Link `json:"toml"` + } `json:"_links"` + + base.Asset + PT string `json:"paging_token"` + // Action needed in release: horizon-v3.0.0: deprecated field + NumAccounts int32 `json:"num_accounts"` + NumClaimableBalances int32 `json:"num_claimable_balances"` + NumLiquidityPools int32 `json:"num_liquidity_pools"` + // Action needed in release: horizon-v3.0.0: deprecated field + Amount string `json:"amount"` + Accounts AssetStatAccounts `json:"accounts"` + ClaimableBalancesAmount string `json:"claimable_balances_amount"` + LiquidityPoolsAmount string `json:"liquidity_pools_amount"` + Balances AssetStatBalances `json:"balances"` + Flags AccountFlags `json:"flags"` +} + +// PagingToken implementation for hal.Pageable +func (res AssetStat) PagingToken() string { + return res.PT +} + +// AssetStatBalances represents the summarized balances for a single Asset +type AssetStatBalances struct { + Authorized string `json:"authorized"` + AuthorizedToMaintainLiabilities string `json:"authorized_to_maintain_liabilities"` + Unauthorized string `json:"unauthorized"` +} + +// AssetStatAccounts represents the summarized acount numbers for a single Asset +type AssetStatAccounts struct { + Authorized int32 `json:"authorized"` + AuthorizedToMaintainLiabilities int32 `json:"authorized_to_maintain_liabilities"` + Unauthorized int32 `json:"unauthorized"` +} + +// Balance represents an account's holdings for either a single currency type or +// shares in a liquidity pool. +type Balance struct { + Balance string `json:"balance"` + LiquidityPoolId string `json:"liquidity_pool_id,omitempty"` + Limit string `json:"limit,omitempty"` + BuyingLiabilities string `json:"buying_liabilities,omitempty"` + SellingLiabilities string `json:"selling_liabilities,omitempty"` + Sponsor string `json:"sponsor,omitempty"` + LastModifiedLedger uint32 `json:"last_modified_ledger,omitempty"` + IsAuthorized *bool `json:"is_authorized,omitempty"` + IsAuthorizedToMaintainLiabilities *bool `json:"is_authorized_to_maintain_liabilities,omitempty"` + IsClawbackEnabled *bool `json:"is_clawback_enabled,omitempty"` + base.Asset +} + +// Ledger represents a single closed ledger +type Ledger struct { + Links struct { + Self hal.Link `json:"self"` + Transactions hal.Link `json:"transactions"` + Operations hal.Link `json:"operations"` + Payments hal.Link `json:"payments"` + Effects hal.Link `json:"effects"` + } `json:"_links"` + ID string `json:"id"` + PT string `json:"paging_token"` + Hash string `json:"hash"` + PrevHash string `json:"prev_hash,omitempty"` + Sequence int32 `json:"sequence"` + SuccessfulTransactionCount int32 `json:"successful_transaction_count"` + FailedTransactionCount *int32 `json:"failed_transaction_count"` + OperationCount int32 `json:"operation_count"` + TxSetOperationCount *int32 `json:"tx_set_operation_count"` + ClosedAt time.Time `json:"closed_at"` + TotalCoins string `json:"total_coins"` + FeePool string `json:"fee_pool"` + BaseFee int32 `json:"base_fee_in_stroops"` + BaseReserve int32 `json:"base_reserve_in_stroops"` + MaxTxSetSize int32 `json:"max_tx_set_size"` + ProtocolVersion int32 `json:"protocol_version"` + HeaderXDR string `json:"header_xdr"` +} + +func (l Ledger) PagingToken() string { + return l.PT +} + +// Offer is the display form of an offer to trade currency. +type Offer struct { + Links struct { + Self hal.Link `json:"self"` + OfferMaker hal.Link `json:"offer_maker"` + } `json:"_links"` + + ID int64 `json:"id,string"` + PT string `json:"paging_token"` + Seller string `json:"seller"` + Selling Asset `json:"selling"` + Buying Asset `json:"buying"` + Amount string `json:"amount"` + PriceR Price `json:"price_r"` + Price string `json:"price"` + LastModifiedLedger int32 `json:"last_modified_ledger"` + LastModifiedTime *time.Time `json:"last_modified_time"` + Sponsor string `json:"sponsor,omitempty"` +} + +func (o Offer) PagingToken() string { + return o.PT +} + +// OrderBookSummary represents a snapshot summary of a given order book +type OrderBookSummary struct { + Bids []PriceLevel `json:"bids"` + Asks []PriceLevel `json:"asks"` + Selling Asset `json:"base"` + Buying Asset `json:"counter"` +} + +// Path represents a single payment path. +type Path struct { + SourceAssetType string `json:"source_asset_type"` + SourceAssetCode string `json:"source_asset_code,omitempty"` + SourceAssetIssuer string `json:"source_asset_issuer,omitempty"` + SourceAmount string `json:"source_amount"` + DestinationAssetType string `json:"destination_asset_type"` + DestinationAssetCode string `json:"destination_asset_code,omitempty"` + DestinationAssetIssuer string `json:"destination_asset_issuer,omitempty"` + DestinationAmount string `json:"destination_amount"` + Path []Asset `json:"path"` +} + +// stub implementation to satisfy pageable interface +func (p Path) PagingToken() string { + return "" +} + +// Price represents a price for an offer +type Price base.Price + +// PriceLevel represents an aggregation of offers that share a given price +type PriceLevel struct { + PriceR Price `json:"price_r"` + Price string `json:"price"` + Amount string `json:"amount"` +} + +// Root is the initial map of links into the api. +type Root struct { + Links struct { + Account hal.Link `json:"account"` + Accounts *hal.Link `json:"accounts,omitempty"` + AccountTransactions hal.Link `json:"account_transactions"` + ClaimableBalances *hal.Link `json:"claimable_balances"` + Assets hal.Link `json:"assets"` + Effects hal.Link `json:"effects"` + FeeStats hal.Link `json:"fee_stats"` + Friendbot *hal.Link `json:"friendbot,omitempty"` + Ledger hal.Link `json:"ledger"` + Ledgers hal.Link `json:"ledgers"` + LiquidityPools *hal.Link `json:"liquidity_pools"` + Offer *hal.Link `json:"offer,omitempty"` + Offers *hal.Link `json:"offers,omitempty"` + Operation hal.Link `json:"operation"` + Operations hal.Link `json:"operations"` + OrderBook hal.Link `json:"order_book"` + Payments hal.Link `json:"payments"` + Self hal.Link `json:"self"` + StrictReceivePaths *hal.Link `json:"strict_receive_paths"` + StrictSendPaths *hal.Link `json:"strict_send_paths"` + TradeAggregations hal.Link `json:"trade_aggregations"` + Trades hal.Link `json:"trades"` + Transaction hal.Link `json:"transaction"` + Transactions hal.Link `json:"transactions"` + } `json:"_links"` + + HorizonVersion string `json:"horizon_version"` + StellarCoreVersion string `json:"core_version"` + IngestSequence uint32 `json:"ingest_latest_ledger"` + HorizonSequence int32 `json:"history_latest_ledger"` + HorizonLatestClosedAt time.Time `json:"history_latest_ledger_closed_at"` + HistoryElderSequence int32 `json:"history_elder_ledger"` + CoreSequence int32 `json:"core_latest_ledger"` + NetworkPassphrase string `json:"network_passphrase"` + CurrentProtocolVersion int32 `json:"current_protocol_version"` + CoreSupportedProtocolVersion int32 `json:"core_supported_protocol_version"` +} + +// Signer represents one of an account's signers. +type Signer struct { + Weight int32 `json:"weight"` + Key string `json:"key"` + Type string `json:"type"` + Sponsor string `json:"sponsor,omitempty"` +} + +// TradePrice represents a price for a trade +type TradePrice struct { + N int64 `json:"n,string"` + D int64 `json:"d,string"` +} + +// String returns a string representation of the trade price +func (p TradePrice) String() string { + return big.NewRat(p.N, p.D).FloatString(7) +} + +// UnmarshalJSON implements a custom unmarshaler for TradePrice +// which can handle a numerator and denominator fields which can be a string or int +func (p *TradePrice) UnmarshalJSON(data []byte) error { + v := struct { + N json.Number `json:"n"` + D json.Number `json:"d"` + }{} + err := json.Unmarshal(data, &v) + if err != nil { + return err + } + + if v.N != "" { + p.N, err = v.N.Int64() + if err != nil { + return err + } + } + if v.D != "" { + p.D, err = v.D.Int64() + if err != nil { + return err + } + } + return nil +} + +// Trade represents a horizon digested trade +type Trade struct { + Links struct { + Self hal.Link `json:"self"` + Base hal.Link `json:"base"` + Counter hal.Link `json:"counter"` + Operation hal.Link `json:"operation"` + } `json:"_links"` + + ID string `json:"id"` + PT string `json:"paging_token"` + LedgerCloseTime time.Time `json:"ledger_close_time"` + OfferID string `json:"offer_id,omitempty"` + TradeType string `json:"trade_type"` + LiquidityPoolFeeBP uint32 `json:"liquidity_pool_fee_bp,omitempty"` + BaseLiquidityPoolID string `json:"base_liquidity_pool_id,omitempty"` + BaseOfferID string `json:"base_offer_id,omitempty"` + BaseAccount string `json:"base_account,omitempty"` + BaseAmount string `json:"base_amount"` + BaseAssetType string `json:"base_asset_type"` + BaseAssetCode string `json:"base_asset_code,omitempty"` + BaseAssetIssuer string `json:"base_asset_issuer,omitempty"` + CounterLiquidityPoolID string `json:"counter_liquidity_pool_id,omitempty"` + CounterOfferID string `json:"counter_offer_id,omitempty"` + CounterAccount string `json:"counter_account,omitempty"` + CounterAmount string `json:"counter_amount"` + CounterAssetType string `json:"counter_asset_type"` + CounterAssetCode string `json:"counter_asset_code,omitempty"` + CounterAssetIssuer string `json:"counter_asset_issuer,omitempty"` + BaseIsSeller bool `json:"base_is_seller"` + Price TradePrice `json:"price,omitempty"` +} + +// PagingToken implementation for hal.Pageable +func (res Trade) PagingToken() string { + return res.PT +} + +// TradeEffect represents a trade effect resource. +type TradeEffect struct { + Links struct { + Self hal.Link `json:"self"` + Seller hal.Link `json:"seller"` + Buyer hal.Link `json:"buyer"` + Operation hal.Link `json:"operation"` + } `json:"_links"` + + ID string `json:"id"` + PT string `json:"paging_token"` + OfferID string `json:"offer_id"` + Seller string `json:"seller"` + SoldAmount string `json:"sold_amount"` + SoldAssetType string `json:"sold_asset_type"` + SoldAssetCode string `json:"sold_asset_code,omitempty"` + SoldAssetIssuer string `json:"sold_asset_issuer,omitempty"` + Buyer string `json:"buyer"` + BoughtAmount string `json:"bought_amount"` + BoughtAssetType string `json:"bought_asset_type"` + BoughtAssetCode string `json:"bought_asset_code,omitempty"` + BoughtAssetIssuer string `json:"bought_asset_issuer,omitempty"` + LedgerCloseTime time.Time `json:"created_at"` +} + +// TradeAggregation represents trade data aggregation over a period of time +type TradeAggregation struct { + Timestamp int64 `json:"timestamp,string"` + TradeCount int64 `json:"trade_count,string"` + BaseVolume string `json:"base_volume"` + CounterVolume string `json:"counter_volume"` + Average string `json:"avg"` + High string `json:"high"` + HighR TradePrice `json:"high_r"` + Low string `json:"low"` + LowR TradePrice `json:"low_r"` + Open string `json:"open"` + OpenR TradePrice `json:"open_r"` + Close string `json:"close"` + CloseR TradePrice `json:"close_r"` +} + +// PagingToken implementation for hal.Pageable. Not actually used +func (res TradeAggregation) PagingToken() string { + return strconv.FormatInt(res.Timestamp, 10) +} + +// Transaction represents a single, successful transaction +type Transaction struct { + Links struct { + Self hal.Link `json:"self"` + Account hal.Link `json:"account"` + Ledger hal.Link `json:"ledger"` + Operations hal.Link `json:"operations"` + Effects hal.Link `json:"effects"` + Precedes hal.Link `json:"precedes"` + Succeeds hal.Link `json:"succeeds"` + // Temporarily include Transaction as a link so that Transaction + // can be fully compatible with TransactionSuccess + // When TransactionSuccess is removed from the SDKs we can remove this HAL link + Transaction hal.Link `json:"transaction"` + } `json:"_links"` + ID string `json:"id"` + PT string `json:"paging_token"` + Successful bool `json:"successful"` + Hash string `json:"hash"` + Ledger int32 `json:"ledger"` + LedgerCloseTime time.Time `json:"created_at"` + Account string `json:"source_account"` + AccountMuxed string `json:"account_muxed,omitempty"` + AccountMuxedID uint64 `json:"account_muxed_id,omitempty,string"` + AccountSequence string `json:"source_account_sequence"` + FeeAccount string `json:"fee_account"` + FeeAccountMuxed string `json:"fee_account_muxed,omitempty"` + FeeAccountMuxedID uint64 `json:"fee_account_muxed_id,omitempty,string"` + FeeCharged int64 `json:"fee_charged,string"` + MaxFee int64 `json:"max_fee,string"` + OperationCount int32 `json:"operation_count"` + EnvelopeXdr string `json:"envelope_xdr"` + ResultXdr string `json:"result_xdr"` + ResultMetaXdr string `json:"result_meta_xdr"` + FeeMetaXdr string `json:"fee_meta_xdr"` + MemoType string `json:"memo_type"` + MemoBytes string `json:"memo_bytes,omitempty"` + Memo string `json:"memo,omitempty"` + Signatures []string `json:"signatures"` + ValidAfter string `json:"valid_after,omitempty"` + ValidBefore string `json:"valid_before,omitempty"` + FeeBumpTransaction *FeeBumpTransaction `json:"fee_bump_transaction,omitempty"` + InnerTransaction *InnerTransaction `json:"inner_transaction,omitempty"` +} + +// FeeBumpTransaction contains information about a fee bump transaction +type FeeBumpTransaction struct { + Hash string `json:"hash"` + Signatures []string `json:"signatures"` +} + +// InnerTransaction contains information about the inner transaction contained +// within a fee bump transaction +type InnerTransaction struct { + Hash string `json:"hash"` + Signatures []string `json:"signatures"` + MaxFee int64 `json:"max_fee,string"` +} + +// MarshalJSON implements a custom marshaler for Transaction. +// The memo field should be omitted if and only if the +// memo_type is "none". +func (t Transaction) MarshalJSON() ([]byte, error) { + type Alias Transaction + v := &struct { + Memo *string `json:"memo,omitempty"` + MemoBytes *string `json:"memo_bytes,omitempty"` + *Alias + }{ + Alias: (*Alias)(&t), + } + if t.MemoType != "none" { + v.Memo = &t.Memo + } + + if t.MemoType == "text" { + v.MemoBytes = &t.MemoBytes + } + + return json.Marshal(v) +} + +// UnmarshalJSON implements a custom unmarshaler for Transaction +// which can handle a max_fee field which can be a string or int +func (t *Transaction) UnmarshalJSON(data []byte) error { + type Alias Transaction // we define Alias to avoid infinite recursion when calling UnmarshalJSON() + v := &struct { + FeeCharged json.Number `json:"fee_charged"` + MaxFee json.Number `json:"max_fee"` + *Alias + }{ + Alias: (*Alias)(t), + } + err := json.Unmarshal(data, &v) + if err != nil { + return err + } + + if v.FeeCharged != "" { + t.FeeCharged, err = v.FeeCharged.Int64() + if err != nil { + return err + } + } + if v.MaxFee != "" { + t.MaxFee, err = v.MaxFee.Int64() + if err != nil { + return err + } + } + return nil +} + +// PagingToken implementation for hal.Pageable +func (t Transaction) PagingToken() string { + return t.PT +} + +// TransactionResultCodes represent a summary of result codes returned from +// a single xdr TransactionResult +type TransactionResultCodes struct { + TransactionCode string `json:"transaction"` + InnerTransactionCode string `json:"inner_transaction,omitempty"` + OperationCodes []string `json:"operations,omitempty"` +} + +// KeyTypeFromAddress converts the version byte of the provided strkey encoded +// value (for example an account id or a signer key) and returns the appropriate +// horizon-specific type name. +func KeyTypeFromAddress(address string) (string, error) { + vb, err := strkey.Version(address) + if err != nil { + return "", errors.Wrap(err, "invalid address") + } + + result, ok := KeyTypeNames[vb] + if !ok { + result = "unknown" + } + + return result, nil +} + +// MustKeyTypeFromAddress is the panicking variant of KeyTypeFromAddress. +func MustKeyTypeFromAddress(address string) string { + ret, err := KeyTypeFromAddress(address) + if err != nil { + panic(err) + } + + return ret +} + +// AccountData represents a single data object stored on by an account +type AccountData struct { + Value string `json:"value"` + Sponsor string `json:"sponsor,omitempty"` +} + +// AccountsPage returns a list of account records +type AccountsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Account `json:"records"` + } `json:"_embedded"` +} + +// TradeAggregationsPage returns a list of aggregated trade records, aggregated by resolution +type TradeAggregationsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []TradeAggregation `json:"records"` + } `json:"_embedded"` +} + +// TradesPage returns a list of trade records +type TradesPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Trade `json:"records"` + } `json:"_embedded"` +} + +// OffersPage returns a list of offers +type OffersPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Offer `json:"records"` + } `json:"_embedded"` +} + +// AssetsPage contains page of assets returned by Horizon. +type AssetsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []AssetStat + } `json:"_embedded"` +} + +// LedgersPage contains page of ledger information returned by Horizon +type LedgersPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Ledger + } `json:"_embedded"` +} + +type FeeDistribution struct { + Max int64 `json:"max,string"` + Min int64 `json:"min,string"` + Mode int64 `json:"mode,string"` + P10 int64 `json:"p10,string"` + P20 int64 `json:"p20,string"` + P30 int64 `json:"p30,string"` + P40 int64 `json:"p40,string"` + P50 int64 `json:"p50,string"` + P60 int64 `json:"p60,string"` + P70 int64 `json:"p70,string"` + P80 int64 `json:"p80,string"` + P90 int64 `json:"p90,string"` + P95 int64 `json:"p95,string"` + P99 int64 `json:"p99,string"` +} + +// FeeStats represents a response of fees from horizon +// To do: implement fee suggestions if agreement is reached in https://github.com/stellar/go/issues/926 +type FeeStats struct { + LastLedger uint32 `json:"last_ledger,string"` + LastLedgerBaseFee int64 `json:"last_ledger_base_fee,string"` + LedgerCapacityUsage float64 `json:"ledger_capacity_usage,string"` + + FeeCharged FeeDistribution `json:"fee_charged"` + MaxFee FeeDistribution `json:"max_fee"` +} + +// TransactionsPage contains records of transaction information returned by Horizon +type TransactionsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Transaction + } `json:"_embedded"` +} + +// PathsPage contains records of payment paths found by horizon +type PathsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Path + } `json:"_embedded"` +} + +// ClaimableBalanceFlags represents the state of a claimable balance's flags +type ClaimableBalanceFlags struct { + ClawbackEnabled bool `json:"clawback_enabled"` +} + +// ClaimableBalance represents a claimable balance +type ClaimableBalance struct { + Links struct { + Self hal.Link `json:"self"` + Transactions hal.Link `json:"transactions"` + Operations hal.Link `json:"operations"` + } `json:"_links"` + + BalanceID string `json:"id"` + Asset string `json:"asset"` + Amount string `json:"amount"` + Sponsor string `json:"sponsor,omitempty"` + LastModifiedLedger uint32 `json:"last_modified_ledger"` + LastModifiedTime *time.Time `json:"last_modified_time"` + Claimants []Claimant `json:"claimants"` + Flags ClaimableBalanceFlags `json:"flags"` + PT string `json:"paging_token"` +} + +type ClaimableBalances struct { + Links struct { + Self hal.Link `json:"self"` + } `json:"_links"` + + Embedded struct { + Records []ClaimableBalance `json:"records"` + } `json:"_embedded"` +} + +// PagingToken implementation for hal.Pageable +func (res ClaimableBalance) PagingToken() string { + return res.PT +} + +// Claimant represents a claimable balance claimant +type Claimant struct { + Destination string `json:"destination"` + Predicate xdr.ClaimPredicate `json:"predicate"` +} + +// LiquidityPool represents a liquidity pool +type LiquidityPool struct { + Links struct { + Self hal.Link `json:"self"` + Transactions hal.Link `json:"transactions"` + Operations hal.Link `json:"operations"` + } `json:"_links"` + + ID string `json:"id"` + PT string `json:"paging_token"` + FeeBP uint32 `json:"fee_bp"` + Type string `json:"type"` + TotalTrustlines uint64 `json:"total_trustlines,string"` + TotalShares string `json:"total_shares"` + Reserves []LiquidityPoolReserve `json:"reserves"` + LastModifiedLedger uint32 `json:"last_modified_ledger"` + LastModifiedTime *time.Time `json:"last_modified_time"` +} + +// PagingToken implementation for hal.Pageable +func (res LiquidityPool) PagingToken() string { + return res.PT +} + +// LiquidityPoolsPage returns a list of liquidity pool records +type LiquidityPoolsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []LiquidityPool `json:"records"` + } `json:"_embedded"` +} + +// LiquidityPoolReserve represents a liquidity pool asset reserve +type LiquidityPoolReserve struct { + Asset string `json:"asset"` + Amount string `json:"amount"` +} diff --git a/protocols/horizon/main_test.go b/protocols/horizon/main_test.go new file mode 100644 index 0000000000..17f5f2ba10 --- /dev/null +++ b/protocols/horizon/main_test.go @@ -0,0 +1,240 @@ +package horizon + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" +) + +// Account Tests +// An example account to be used in all the Account tests +var exampleAccount = Account{ + Data: map[string]string{ + "test": "aGVsbG8=", + "invalid": "a_*&^*", + }, + Sequence: "3002985298788353", +} + +func TestAccount_IncrementSequenceNumber(t *testing.T) { + seqNum, err := exampleAccount.IncrementSequenceNumber() + + assert.Nil(t, err) + assert.Equal(t, "3002985298788354", exampleAccount.Sequence, "sequence number string was incremented") + assert.Equal(t, int64(3002985298788354), seqNum, "incremented sequence number is correct value/type") +} + +func TestAccount_GetData(t *testing.T) { + decoded, err := exampleAccount.GetData("test") + assert.Nil(t, err) + assert.Equal(t, string(decoded), "hello", "returns decoded value when key exists") + + decoded, err = exampleAccount.GetData("test2") + assert.Nil(t, err) + assert.Equal(t, len(decoded), 0, "returns empty slice if key doesn't exist") + + _, err = exampleAccount.GetData("invalid") + assert.NotNil(t, err, "returns error slice if value is invalid") +} + +func TestAccount_MustGetData(t *testing.T) { + decoded := exampleAccount.MustGetData("test") + assert.Equal(t, string(decoded), "hello", "returns decoded value when the key exists") + + decoded = exampleAccount.MustGetData("test2") + assert.Equal(t, len(decoded), 0, "returns empty slice if key doesn't exist") + + assert.Panics(t, func() { exampleAccount.MustGetData("invalid") }, "panics on invalid input") +} + +// Transaction Tests +func TestTransactionJSONMarshal(t *testing.T) { + transaction := Transaction{ + ID: "12345", + MaxFee: 11, + FeeCharged: 10, + MemoType: "text", + Memo: "", + } + marshaledTransaction, marshalErr := json.Marshal(transaction) + assert.Nil(t, marshalErr) + var result Transaction + json.Unmarshal(marshaledTransaction, &result) + assert.Equal(t, result, transaction, "data matches original input") +} + +// Test that a typical friendbot fund response can unmarshal to the Transaction +// type. The horizonclient uses the Transaction type for friendbot responses +// also, but their response is a slimmed down version of the full transaction +// response. This test confirms there are no errors unmarshalling that slimmed +// down version. +func TestTransactionUnmarshalsFriendbotFund(t *testing.T) { + friendbotFundResponse := `{ + "_links": { + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/94e42f65d3ff5f30669b6109c2ce3e82c0e592c52004e3b41bb30e24df33954e" + } + }, + "hash": "94e42f65d3ff5f30669b6109c2ce3e82c0e592c52004e3b41bb30e24df33954e", + "ledger": 8269, + "envelope_xdr": "AAAAAgAAAAD2Leuk4afNVCYqxbN03yPH6kgKe/o2yiOd3CQNkpkpQwABhqAAAAFSAAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAAAAAAABW9+rbvt6YXwwXyFszptQFlfzzFMrWObLiJmBhOzNblAAAABdIdugAAAAAAAAAAAKSmSlDAAAAQHWNbXOoVQqH0YJRr8LAtpalV+NoXb8Tv/ETkPNv2NignhN8seUSde8m2HLNLHOo+5W34BXfxfBmDXgZn8yHkwSGVuCcAAAAQDQLh1UAxYZ27sIxyYgyYFo8IUbTiANWadUJUR7K0q1eY6Q5J/BFfNlf6UqLqJ5zd8uI3TXCaBNJDkiQc1ZLEg4=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAgAAAAIAAAADAAAgTQAAAAAAAAAA9i3rpOGnzVQmKsWzdN8jx+pICnv6NsojndwkDZKZKUMAAAAAPDNbbAAAAVIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAgTQAAAAAAAAAA9i3rpOGnzVQmKsWzdN8jx+pICnv6NsojndwkDZKZKUMAAAAAPDNbbAAAAVIAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAACBMAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFg09HQY/uMAAAA2wAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAACBNAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAFg07qH7ROMAAAA2wAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAACBNAAAAAAAAAABW9+rbvt6YXwwXyFszptQFlfzzFMrWObLiJmBhOzNblAAAABdIdugAAAAgTQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAA=" +}` + transaction := Transaction{} + err := json.Unmarshal([]byte(friendbotFundResponse), &transaction) + assert.Nil(t, err) +} + +func TestTransactionEmptyMemoText(t *testing.T) { + transaction := Transaction{ + MemoType: "text", + Memo: "", + MemoBytes: "", + } + marshaledTransaction, marshalErr := json.Marshal(transaction) + assert.Nil(t, marshalErr) + var result struct { + Memo *string + MemoBytes *string `json:"memo_bytes"` + } + json.Unmarshal(marshaledTransaction, &result) + assert.NotNil(t, result.Memo, "memo field is present even if input memo was empty string") + assert.NotNil(t, result.MemoBytes, "memo_bytes field is present even if input memo was empty string") +} + +func TestTransactionMemoTypeNone(t *testing.T) { + transaction := Transaction{ + MemoType: "none", + } + marshaledTransaction, marshalErr := json.Marshal(transaction) + assert.Nil(t, marshalErr) + var result struct { + Memo *string + } + json.Unmarshal(marshaledTransaction, &result) + assert.Nil(t, result.Memo, "no memo field is present when memo input type was `none`") +} + +func TestTransactionUnmarshalJSON(t *testing.T) { + const feesAsInt64s = `{ + "memo": "MzUyODFmNThmZjkxMGNiMTVhYWQ1NjM2ZGIyNzUzZTA=", + "_links": { + "self": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033" + }, + "account": { + "href": "https://horizon.stellar.org/accounts/GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3" + }, + "ledger": { + "href": "https://horizon.stellar.org/ledgers/29113108" + }, + "operations": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon.stellar.org/transactions?order=asc\u0026cursor=125039846745419776" + }, + "succeeds": { + "href": "https://horizon.stellar.org/transactions?order=desc\u0026cursor=125039846745419776" + }, + "transaction": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033" + } + }, + "id": "998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033", + "paging_token": "125039846745419776", + "successful": true, + "hash": "998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033", + "ledger": 29113108, + "created_at": "2020-04-10T17:03:18Z", + "source_account": "GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3", + "source_account_sequence": "113942901088600162", + "fee_account": "GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3", + "fee_charged": 3000000000, + "max_fee": 2500000000, + "operation_count": 1, + "envelope_xdr": "AAAAAGmBpPsDnlK0e194Og7IO5mFUc0deRAdxxha3Q+t4F77AAAAZAGUzncAEEBiAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAADMzUyODFmNThmZjkxMGNiMTVhYWQ1NjM2ZGIyNzUzZTAAAAABAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAEAAAAAP29uBulc9ouSoH62BRypPhD6zcLWoS5sj7CHf5SJ15MAAAABTk9ETAAAAAB1jYLXrFzNBOWCoPnZSHI3PJAhHtc1TrCaiPuZwSf5pgAAAAAAAAABAAAAAAAAAALw9Tl2AAAAQOknEHs7ZaPNVlXMU0uOtT+0TVo9kW/jDuNxN40FdJDic0p23V4lxOfPGCgQwBgTehqCIEzCMQ4LkbfzkdgkFAut4F77AAAAQKtFmT73srS8RHeQgWWia8mb+TrLCr1CJbK+MAKGdUnb4s4JBOKUjHhqQLrs7GCkJ3wOpgTbtW8VpwNedCJhFQ0=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAbw7FAAAAAAAAAAAaYGk+wOeUrR7X3g6Dsg7mYVRzR15EB3HGFrdD63gXvsAAAAA0gRBOAGUzncAEEBhAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAbw7FAAAAAAAAAAAaYGk+wOeUrR7X3g6Dsg7mYVRzR15EB3HGFrdD63gXvsAAAAA0gRBOAGUzncAEEBiAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMBvDsUAAAAAQAAAAA/b24G6Vz2i5KgfrYFHKk+EPrNwtahLmyPsId/lInXkwAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAAMUrlJIASptjhEiAAAAAAAEAAAAAAAAAAAAAAAEBvDsUAAAAAQAAAAA/b24G6Vz2i5KgfrYFHKk+EPrNwtahLmyPsId/lInXkwAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAAMUrlJMASptjhEiAAAAAAAEAAAAAAAAAAAAAAAMBvDsUAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAKdrbU7B//////////wAAAAEAAAAAAAAAAAAAAAEBvDsUAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAKdrbU69//////////wAAAAEAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMBvDsTAAAAAAAAAABpgaT7A55StHtfeDoOyDuZhVHNHXkQHccYWt0PreBe+wAAAADSBEGcAZTOdwAQQGEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEBvDsUAAAAAAAAAABpgaT7A55StHtfeDoOyDuZhVHNHXkQHccYWt0PreBe+wAAAADSBEE4AZTOdwAQQGEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "hash", + "signatures": [ + "6ScQeztlo81WVcxTS461P7RNWj2Rb+MO43E3jQV0kOJzSnbdXiXE588YKBDAGBN6GoIgTMIxDguRt/OR2CQUCw==", + "q0WZPveytLxEd5CBZaJryZv5OssKvUIlsr4wAoZ1SdvizgkE4pSMeGpAuuzsYKQnfA6mBNu1bxWnA150ImEVDQ==" + ], + "valid_after": "1970-01-01T00:00:00Z" + }` + + const feesAsStrings = `{ + "memo": "MzUyODFmNThmZjkxMGNiMTVhYWQ1NjM2ZGIyNzUzZTA=", + "_links": { + "self": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033" + }, + "account": { + "href": "https://horizon.stellar.org/accounts/GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3" + }, + "ledger": { + "href": "https://horizon.stellar.org/ledgers/29113108" + }, + "operations": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon.stellar.org/transactions?order=asc\u0026cursor=125039846745419776" + }, + "succeeds": { + "href": "https://horizon.stellar.org/transactions?order=desc\u0026cursor=125039846745419776" + }, + "transaction": { + "href": "https://horizon.stellar.org/transactions/998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033" + } + }, + "id": "998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033", + "paging_token": "125039846745419776", + "successful": true, + "hash": "998605ace4a0b89293cf729cf216405f29c1ce5d44d6a40232982a4bdccda033", + "ledger": 29113108, + "created_at": "2020-04-10T17:03:18Z", + "source_account": "GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3", + "source_account_sequence": "113942901088600162", + "fee_account": "GBUYDJH3AOPFFND3L54DUDWIHOMYKUONDV4RAHOHDBNN2D5N4BPPWDQ3", + "fee_charged": "3000000000", + "max_fee": "2500000000", + "operation_count": 1, + "envelope_xdr": "AAAAAGmBpPsDnlK0e194Og7IO5mFUc0deRAdxxha3Q+t4F77AAAAZAGUzncAEEBiAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAADMzUyODFmNThmZjkxMGNiMTVhYWQ1NjM2ZGIyNzUzZTAAAAABAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAEAAAAAP29uBulc9ouSoH62BRypPhD6zcLWoS5sj7CHf5SJ15MAAAABTk9ETAAAAAB1jYLXrFzNBOWCoPnZSHI3PJAhHtc1TrCaiPuZwSf5pgAAAAAAAAABAAAAAAAAAALw9Tl2AAAAQOknEHs7ZaPNVlXMU0uOtT+0TVo9kW/jDuNxN40FdJDic0p23V4lxOfPGCgQwBgTehqCIEzCMQ4LkbfzkdgkFAut4F77AAAAQKtFmT73srS8RHeQgWWia8mb+TrLCr1CJbK+MAKGdUnb4s4JBOKUjHhqQLrs7GCkJ3wOpgTbtW8VpwNedCJhFQ0=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAbw7FAAAAAAAAAAAaYGk+wOeUrR7X3g6Dsg7mYVRzR15EB3HGFrdD63gXvsAAAAA0gRBOAGUzncAEEBhAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAbw7FAAAAAAAAAAAaYGk+wOeUrR7X3g6Dsg7mYVRzR15EB3HGFrdD63gXvsAAAAA0gRBOAGUzncAEEBiAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMBvDsUAAAAAQAAAAA/b24G6Vz2i5KgfrYFHKk+EPrNwtahLmyPsId/lInXkwAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAAMUrlJIASptjhEiAAAAAAAEAAAAAAAAAAAAAAAEBvDsUAAAAAQAAAAA/b24G6Vz2i5KgfrYFHKk+EPrNwtahLmyPsId/lInXkwAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAAMUrlJMASptjhEiAAAAAAAEAAAAAAAAAAAAAAAMBvDsUAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAKdrbU7B//////////wAAAAEAAAAAAAAAAAAAAAEBvDsUAAAAAQAAAADhZHiqD/Q3uSTgjYEWGVRfCCHYvFmeqJU12G9SkzJYEQAAAAFOT0RMAAAAAHWNgtesXM0E5YKg+dlIcjc8kCEe1zVOsJqI+5nBJ/mmAAAAKdrbU69//////////wAAAAEAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMBvDsTAAAAAAAAAABpgaT7A55StHtfeDoOyDuZhVHNHXkQHccYWt0PreBe+wAAAADSBEGcAZTOdwAQQGEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEBvDsUAAAAAAAAAABpgaT7A55StHtfeDoOyDuZhVHNHXkQHccYWt0PreBe+wAAAADSBEE4AZTOdwAQQGEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "hash", + "signatures": [ + "6ScQeztlo81WVcxTS461P7RNWj2Rb+MO43E3jQV0kOJzSnbdXiXE588YKBDAGBN6GoIgTMIxDguRt/OR2CQUCw==", + "q0WZPveytLxEd5CBZaJryZv5OssKvUIlsr4wAoZ1SdvizgkE4pSMeGpAuuzsYKQnfA6mBNu1bxWnA150ImEVDQ==" + ], + "valid_after": "1970-01-01T00:00:00Z" + }` + + var parsedFeesAsInts, parsedFeesAsStrings Transaction + assert.NoError(t, json.Unmarshal([]byte(feesAsInt64s), &parsedFeesAsInts)) + assert.NoError(t, json.Unmarshal([]byte(feesAsStrings), &parsedFeesAsStrings)) + assert.Equal(t, parsedFeesAsInts, parsedFeesAsStrings) + assert.Equal(t, int64(2500000000), parsedFeesAsInts.MaxFee) + assert.Equal(t, int64(3000000000), parsedFeesAsInts.FeeCharged) +} + +func TestTradeAggregation_PagingToken(t *testing.T) { + ta := TradeAggregation{Timestamp: 64} + assert.Equal(t, "64", ta.PagingToken()) +} diff --git a/protocols/horizon/operations/main.go b/protocols/horizon/operations/main.go new file mode 100644 index 0000000000..a9b912c45d --- /dev/null +++ b/protocols/horizon/operations/main.go @@ -0,0 +1,573 @@ +package operations + +import ( + "encoding/json" + "errors" + "time" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// TypeNames maps from operation type to the string used to represent that type +// in horizon's JSON responses +var TypeNames = map[xdr.OperationType]string{ + xdr.OperationTypeCreateAccount: "create_account", + xdr.OperationTypePayment: "payment", + xdr.OperationTypePathPaymentStrictReceive: "path_payment_strict_receive", + xdr.OperationTypeManageSellOffer: "manage_sell_offer", + xdr.OperationTypeCreatePassiveSellOffer: "create_passive_sell_offer", + xdr.OperationTypeSetOptions: "set_options", + xdr.OperationTypeChangeTrust: "change_trust", + xdr.OperationTypeAllowTrust: "allow_trust", + xdr.OperationTypeAccountMerge: "account_merge", + xdr.OperationTypeInflation: "inflation", + xdr.OperationTypeManageData: "manage_data", + xdr.OperationTypeBumpSequence: "bump_sequence", + xdr.OperationTypeManageBuyOffer: "manage_buy_offer", + xdr.OperationTypePathPaymentStrictSend: "path_payment_strict_send", + xdr.OperationTypeCreateClaimableBalance: "create_claimable_balance", + xdr.OperationTypeClaimClaimableBalance: "claim_claimable_balance", + xdr.OperationTypeBeginSponsoringFutureReserves: "begin_sponsoring_future_reserves", + xdr.OperationTypeEndSponsoringFutureReserves: "end_sponsoring_future_reserves", + xdr.OperationTypeRevokeSponsorship: "revoke_sponsorship", + xdr.OperationTypeClawback: "clawback", + xdr.OperationTypeClawbackClaimableBalance: "clawback_claimable_balance", + xdr.OperationTypeSetTrustLineFlags: "set_trust_line_flags", + xdr.OperationTypeLiquidityPoolDeposit: "liquidity_pool_deposit", + xdr.OperationTypeLiquidityPoolWithdraw: "liquidity_pool_withdraw", +} + +// Base represents the common attributes of an operation resource +type Base struct { + Links struct { + Self hal.Link `json:"self"` + Transaction hal.Link `json:"transaction"` + Effects hal.Link `json:"effects"` + Succeeds hal.Link `json:"succeeds"` + Precedes hal.Link `json:"precedes"` + } `json:"_links"` + + ID string `json:"id"` + PT string `json:"paging_token"` + // TransactionSuccessful defines if this operation is part of + // successful transaction. + TransactionSuccessful bool `json:"transaction_successful"` + SourceAccount string `json:"source_account"` + SourceAccountMuxed string `json:"source_account_muxed,omitempty"` + SourceAccountMuxedID uint64 `json:"source_account_muxed_id,omitempty,string"` + Type string `json:"type"` + TypeI int32 `json:"type_i"` + LedgerCloseTime time.Time `json:"created_at"` + // TransactionHash is the hash of the transaction which created the operation + // Note that the Transaction field below is not always present in the Operation response. + // If the Transaction field is present TransactionHash is redundant since the same information + // is present in Transaction. But, if the Transaction field is nil then TransactionHash is useful. + // Transaction is non nil when the "join=transactions" parameter is present in the operations request + TransactionHash string `json:"transaction_hash"` + Transaction *horizon.Transaction `json:"transaction,omitempty"` + Sponsor string `json:"sponsor,omitempty"` +} + +// PagingToken implements hal.Pageable +func (base Base) PagingToken() string { + return base.PT +} + +// BumpSequence is the json resource representing a single operation whose type is +// BumpSequence. +type BumpSequence struct { + Base + BumpTo string `json:"bump_to"` +} + +// CreateAccount is the json resource representing a single operation whose type +// is CreateAccount. +type CreateAccount struct { + Base + StartingBalance string `json:"starting_balance"` + Funder string `json:"funder"` + FunderMuxed string `json:"funder_muxed,omitempty"` + FunderMuxedID uint64 `json:"funder_muxed_id,omitempty,string"` + Account string `json:"account"` +} + +// Payment is the json resource representing a single operation whose type is +// Payment. +type Payment struct { + Base + base.Asset + From string `json:"from"` + FromMuxed string `json:"from_muxed,omitempty"` + FromMuxedID uint64 `json:"from_muxed_id,omitempty,string"` + To string `json:"to"` + ToMuxed string `json:"to_muxed,omitempty"` + ToMuxedID uint64 `json:"to_muxed_id,omitempty,string"` + Amount string `json:"amount"` +} + +// PathPayment is the json resource representing a single operation whose type +// is PathPayment. +type PathPayment struct { + Payment + Path []base.Asset `json:"path"` + SourceAmount string `json:"source_amount"` + SourceMax string `json:"source_max"` + SourceAssetType string `json:"source_asset_type"` + SourceAssetCode string `json:"source_asset_code,omitempty"` + SourceAssetIssuer string `json:"source_asset_issuer,omitempty"` +} + +// PathPaymentStrictSend is the json resource representing a single operation whose type +// is PathPaymentStrictSend. +type PathPaymentStrictSend struct { + Payment + Path []base.Asset `json:"path"` + SourceAmount string `json:"source_amount"` + DestinationMin string `json:"destination_min"` + SourceAssetType string `json:"source_asset_type"` + SourceAssetCode string `json:"source_asset_code,omitempty"` + SourceAssetIssuer string `json:"source_asset_issuer,omitempty"` +} + +// ManageData represents a ManageData operation as it is serialized into json +// for the horizon API. +type ManageData struct { + Base + Name string `json:"name"` + Value string `json:"value"` +} + +// Offer is an embedded resource used in offer type operations. +type Offer struct { + Base + Amount string `json:"amount"` + Price string `json:"price"` + PriceR base.Price `json:"price_r"` + BuyingAssetType string `json:"buying_asset_type"` + BuyingAssetCode string `json:"buying_asset_code,omitempty"` + BuyingAssetIssuer string `json:"buying_asset_issuer,omitempty"` + SellingAssetType string `json:"selling_asset_type"` + SellingAssetCode string `json:"selling_asset_code,omitempty"` + SellingAssetIssuer string `json:"selling_asset_issuer,omitempty"` +} + +// CreatePassiveSellOffer is the json resource representing a single operation whose +// type is CreatePassiveSellOffer. +type CreatePassiveSellOffer struct { + Offer +} + +// ManageSellOffer is the json resource representing a single operation whose type +// is ManageSellOffer. +type ManageSellOffer struct { + Offer + OfferID int64 `json:"offer_id,string"` +} + +// ManageBuyOffer is the json resource representing a single operation whose type +// is ManageBuyOffer. +type ManageBuyOffer struct { + Offer + OfferID int64 `json:"offer_id,string"` +} + +// SetOptions is the json resource representing a single operation whose type is +// SetOptions. +type SetOptions struct { + Base + HomeDomain string `json:"home_domain,omitempty"` + InflationDest string `json:"inflation_dest,omitempty"` + + MasterKeyWeight *int `json:"master_key_weight,omitempty"` + SignerKey string `json:"signer_key,omitempty"` + SignerWeight *int `json:"signer_weight,omitempty"` + + SetFlags []int `json:"set_flags,omitempty"` + SetFlagsS []string `json:"set_flags_s,omitempty"` + ClearFlags []int `json:"clear_flags,omitempty"` + ClearFlagsS []string `json:"clear_flags_s,omitempty"` + + LowThreshold *int `json:"low_threshold,omitempty"` + MedThreshold *int `json:"med_threshold,omitempty"` + HighThreshold *int `json:"high_threshold,omitempty"` +} + +// ChangeTrust is the json resource representing a single operation whose type +// is ChangeTrust. +type ChangeTrust struct { + Base + base.LiquidityPoolOrAsset + Limit string `json:"limit"` + Trustee string `json:"trustee,omitempty"` + Trustor string `json:"trustor"` + TrustorMuxed string `json:"trustor_muxed,omitempty"` + TrustorMuxedID uint64 `json:"trustor_muxed_id,omitempty,string"` +} + +// Deprecated: use SetTrustLineFlags instead. +// AllowTrust is the json resource representing a single operation whose type is +// AllowTrust. +type AllowTrust struct { + Base + base.Asset + Trustee string `json:"trustee"` + TrusteeMuxed string `json:"trustee_muxed,omitempty"` + TrusteeMuxedID uint64 `json:"trustee_muxed_id,omitempty,string"` + Trustor string `json:"trustor"` + Authorize bool `json:"authorize"` + AuthorizeToMaintainLiabilities bool `json:"authorize_to_maintain_liabilities"` +} + +// AccountMerge is the json resource representing a single operation whose type +// is AccountMerge. +type AccountMerge struct { + Base + Account string `json:"account"` + AccountMuxed string `json:"account_muxed,omitempty"` + AccountMuxedID uint64 `json:"account_muxed_id,omitempty,string"` + Into string `json:"into"` + IntoMuxed string `json:"into_muxed,omitempty"` + IntoMuxedID uint64 `json:"into_muxed_id,omitempty,string"` +} + +// Inflation is the json resource representing a single operation whose type is +// Inflation. +type Inflation struct { + Base +} + +// CreateClaimableBalance is the json resource representing a single operation whose type is +// CreateClaimableBalance. +type CreateClaimableBalance struct { + Base + Asset string `json:"asset"` + Amount string `json:"amount"` + Claimants []horizon.Claimant `json:"claimants"` +} + +// ClaimClaimableBalance is the json resource representing a single operation whose type is +// ClaimClaimableBalance. +type ClaimClaimableBalance struct { + Base + BalanceID string `json:"balance_id"` + Claimant string `json:"claimant"` + ClaimantMuxed string `json:"claimant_muxed,omitempty"` + ClaimantMuxedID uint64 `json:"claimant_muxed_id,omitempty,string"` +} + +// BeginSponsoringFutureReserves is the json resource representing a single operation whose type is +// BeginSponsoringFutureReserves. +type BeginSponsoringFutureReserves struct { + Base + SponsoredID string `json:"sponsored_id"` +} + +// EndSponsoringFutureReserves is the json resource representing a single operation whose type is +// EndSponsoringFutureReserves. +type EndSponsoringFutureReserves struct { + Base + BeginSponsor string `json:"begin_sponsor,omitempty"` + BeginSponsorMuxed string `json:"begin_sponsor_muxed,omitempty"` + BeginSponsorMuxedID uint64 `json:"begin_sponsor_muxed_id,omitempty,string"` +} + +// RevokeSponsorship is the json resource representing a single operation whose type is +// RevokeSponsorship. +type RevokeSponsorship struct { + Base + AccountID *string `json:"account_id,omitempty"` + ClaimableBalanceID *string `json:"claimable_balance_id,omitempty"` + DataAccountID *string `json:"data_account_id,omitempty"` + DataName *string `json:"data_name,omitempty"` + OfferID *int64 `json:"offer_id,omitempty,string"` + TrustlineAccountID *string `json:"trustline_account_id,omitempty"` + TrustlineLiquidityPoolID *string `json:"trustline_liquidity_pool_id,omitempty"` + TrustlineAsset *string `json:"trustline_asset,omitempty"` + SignerAccountID *string `json:"signer_account_id,omitempty"` + SignerKey *string `json:"signer_key,omitempty"` +} + +// Clawback is the json resource representing a single operation whose type is +// Clawback. +type Clawback struct { + Base + base.Asset + From string `json:"from"` + FromMuxed string `json:"from_muxed,omitempty"` + FromMuxedID uint64 `json:"from_muxed_id,omitempty,string"` + Amount string `json:"amount"` +} + +// ClawbackClaimableBalance is the json resource representing a single operation whose type is +// ClawbackClaimableBalance. +type ClawbackClaimableBalance struct { + Base + BalanceID string `json:"balance_id"` +} + +// SetTrustLineFlags is the json resource representing a single operation whose type is +// SetTrustLineFlags. +type SetTrustLineFlags struct { + Base + base.Asset + Trustor string `json:"trustor"` + SetFlags []int `json:"set_flags,omitempty"` + SetFlagsS []string `json:"set_flags_s,omitempty"` + ClearFlags []int `json:"clear_flags,omitempty"` + ClearFlagsS []string `json:"clear_flags_s,omitempty"` +} + +// LiquidityPoolDeposit is the json resource representing a single operation whose type is +// LiquidityPoolDeposit. +type LiquidityPoolDeposit struct { + Base + LiquidityPoolID string `json:"liquidity_pool_id"` + ReservesMax []base.AssetAmount `json:"reserves_max"` + MinPrice string `json:"min_price"` + MinPriceR base.Price `json:"min_price_r"` + MaxPrice string `json:"max_price"` + MaxPriceR base.Price `json:"max_price_r"` + ReservesDeposited []base.AssetAmount `json:"reserves_deposited"` + SharesReceived string `json:"shares_received"` +} + +// LiquidityPoolWithdraw is the json resource representing a single operation whose type is +// LiquidityPoolWithdraw. +type LiquidityPoolWithdraw struct { + Base + LiquidityPoolID string `json:"liquidity_pool_id"` + ReservesMin []base.AssetAmount `json:"reserves_min"` + Shares string `json:"shares"` + ReservesReceived []base.AssetAmount `json:"reserves_received"` +} + +// Operation interface contains methods implemented by the operation types +type Operation interface { + PagingToken() string + GetType() string + GetID() string + GetTransactionHash() string + IsTransactionSuccessful() bool +} + +// GetType returns the type of operation +func (base Base) GetType() string { + return base.Type +} + +// GetTypeI returns the ID of type of operation +func (base Base) GetTypeI() int32 { + return base.TypeI +} + +func (base Base) GetID() string { + return base.ID +} + +func (base Base) GetTransactionHash() string { + return base.TransactionHash +} + +func (base Base) IsTransactionSuccessful() bool { + return base.TransactionSuccessful +} + +// OperationsPage is the json resource representing a page of operations. +// OperationsPage.Record can contain various operation types. +type OperationsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []Operation + } `json:"_embedded"` +} + +func (ops *OperationsPage) UnmarshalJSON(data []byte) error { + var opsPage struct { + Links hal.Links `json:"_links"` + Embedded struct { + Records []interface{} + } `json:"_embedded"` + } + + if err := json.Unmarshal(data, &opsPage); err != nil { + return err + } + + for _, j := range opsPage.Embedded.Records { + var b Base + dataString, err := json.Marshal(j) + if err != nil { + return err + } + if err = json.Unmarshal(dataString, &b); err != nil { + return err + } + + op, err := UnmarshalOperation(b.TypeI, dataString) + if err != nil { + return err + } + + ops.Embedded.Records = append(ops.Embedded.Records, op) + } + + ops.Links = opsPage.Links + return nil +} + +// UnmarshalOperation decodes responses to the correct operation struct +func UnmarshalOperation(operationTypeID int32, dataString []byte) (ops Operation, err error) { + switch xdr.OperationType(operationTypeID) { + case xdr.OperationTypeCreateAccount: + var op CreateAccount + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypePathPaymentStrictReceive: + var op PathPayment + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypePayment: + var op Payment + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeManageSellOffer: + var op ManageSellOffer + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeCreatePassiveSellOffer: + var op CreatePassiveSellOffer + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeSetOptions: + var op SetOptions + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeChangeTrust: + var op ChangeTrust + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeAllowTrust: + var op AllowTrust + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeAccountMerge: + var op AccountMerge + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeInflation: + var op Inflation + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeManageData: + var op ManageData + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeBumpSequence: + var op BumpSequence + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeManageBuyOffer: + var op ManageBuyOffer + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypePathPaymentStrictSend: + var op PathPaymentStrictSend + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeCreateClaimableBalance: + var op CreateClaimableBalance + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeClaimClaimableBalance: + var op ClaimClaimableBalance + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeBeginSponsoringFutureReserves: + var op BeginSponsoringFutureReserves + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeEndSponsoringFutureReserves: + var op EndSponsoringFutureReserves + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeRevokeSponsorship: + var op RevokeSponsorship + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeClawback: + var op Clawback + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeClawbackClaimableBalance: + var op ClawbackClaimableBalance + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeSetTrustLineFlags: + var op SetTrustLineFlags + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeLiquidityPoolDeposit: + var op LiquidityPoolDeposit + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + case xdr.OperationTypeLiquidityPoolWithdraw: + var op LiquidityPoolWithdraw + if err = json.Unmarshal(dataString, &op); err != nil { + return + } + ops = op + default: + err = errors.New("Invalid operation format, unable to unmarshal json response") + } + + return +} diff --git a/protocols/horizon/operations/main_test.go b/protocols/horizon/operations/main_test.go new file mode 100644 index 0000000000..b08a0d5f25 --- /dev/null +++ b/protocols/horizon/operations/main_test.go @@ -0,0 +1,32 @@ +package operations + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestTypeNamesAllCovered(t *testing.T) { + for typ, s := range xdr.OperationTypeToStringMap { + _, ok := TypeNames[xdr.OperationType(typ)] + assert.True(t, ok, s) + } +} + +func TestUnmarshalOperationAllCovered(t *testing.T) { + mistmatchErr := errors.New("Invalid operation format, unable to unmarshal json response") + for typ, s := range xdr.OperationTypeToStringMap { + _, err := UnmarshalOperation(typ, []byte{}) + assert.Error(t, err, s) + // it should be a parsing error, not the default branch + assert.NotEqual(t, mistmatchErr, err, s) + } + + // make sure the check works for an unknown operation type + _, err := UnmarshalOperation(200000, []byte{}) + assert.Error(t, err) + assert.Equal(t, mistmatchErr, err) +} diff --git a/protocols/stellarcore/info_response.go b/protocols/stellarcore/info_response.go new file mode 100644 index 0000000000..248d0e8b9a --- /dev/null +++ b/protocols/stellarcore/info_response.go @@ -0,0 +1,34 @@ +package stellarcore + +// InfoResponse is the json response returned from stellar-core's /info +// endpoint. +type InfoResponse struct { + Info struct { + Build string `json:"build"` + Network string `json:"network"` + ProtocolVersion int `json:"protocol_version"` + State string `json:"state"` + Ledger LedgerInfo `json:"ledger"` + + // TODO: all the other fields + } +} + +// LedgerInfo is the part of the stellar-core's info json response. +// It's returned under `ledger` key +type LedgerInfo struct { + Age int `json:"age"` + BaseFee int `json:"baseFee"` + BaseReserve int `json:"baseReserve"` + CloseTime int `json:"closeTime"` + Hash string `json:"hash"` + MaxTxSetSize int `json:"maxTxSetSize"` + Num int `json:"num"` + Version int `json:"version"` +} + +// IsSynced returns a boolean indicating whether stellarcore is synced with the +// network. +func (resp *InfoResponse) IsSynced() bool { + return resp.Info.State == "Synced!" +} diff --git a/protocols/stellarcore/info_response_test.go b/protocols/stellarcore/info_response_test.go new file mode 100644 index 0000000000..461a2cfcfe --- /dev/null +++ b/protocols/stellarcore/info_response_test.go @@ -0,0 +1,75 @@ +package stellarcore + +import "testing" +import "encoding/json" +import "github.com/stretchr/testify/require" +import "github.com/stretchr/testify/assert" + +func TestInfoResponse_IsSynced(t *testing.T) { + cases := []struct { + Name string + JSON string + Expected bool + }{ + { + Name: "synced", + JSON: `{ + "info": { + "UNSAFE_QUORUM": "UNSAFE QUORUM ALLOWED", + "build": "v0.6.4", + "ledger": { + "age": 0, + "closeTime": 1512512956, + "hash": "a0035988ef68f225df4fb37b4639b8648c2d77dc4b3b1b0f5cd3bfa385fb4cc3", + "num": 5787995 + }, + "network": "Test SDF Network ; September 2015", + "numPeers": 6, + "protocol_version": 8, + "quorum": { + "5787994": { + "agree": 3, + "disagree": 0, + "fail_at": 2, + "hash": "273af2", + "missing": 0, + "phase": "EXTERNALIZE" + } + }, + "state": "Synced!" + } + }`, + Expected: true, + }, + { + Name: "joining scp", + JSON: `{ + "info": { + "UNSAFE_QUORUM": "UNSAFE QUORUM ALLOWED", + "build": "v0.6.4", + "ledger": { + "age": 17, + "closeTime": 1512520421, + "hash": "263c1e575422e960cb1b51a38feac8f54947d1cd6ba8c7f1da5302b063ad7045", + "num": 5789919 + }, + "network": "Test SDF Network ; September 2015", + "numPeers": 0, + "protocol_version": 8, + "state": "Joining SCP" + } + }`, + Expected: false, + }, + } + + for _, kase := range cases { + t.Run(kase.Name, func(t *testing.T) { + var resp InfoResponse + err := json.Unmarshal([]byte(kase.JSON), &resp) + require.NoError(t, err) + + assert.True(t, kase.Expected == resp.IsSynced(), "sync state is unexpected") + }) + } +} diff --git a/protocols/stellarcore/tx_response.go b/protocols/stellarcore/tx_response.go new file mode 100644 index 0000000000..ee8556adc3 --- /dev/null +++ b/protocols/stellarcore/tx_response.go @@ -0,0 +1,33 @@ +package stellarcore + +const ( + // TXStatusError represents the status value returned by stellar-core when an error occurred from + // submitting a transaction + TXStatusError = "ERROR" + + // TXStatusPending represents the status value returned by stellar-core when a transaction has + // been accepted for processing + TXStatusPending = "PENDING" + + // TXStatusDuplicate represents the status value returned by stellar-core when a submitted + // transaction is a duplicate + TXStatusDuplicate = "DUPLICATE" + + // TXStatusTryAgainLater represents the status value returned by stellar-core when a submitted + // transaction was not included in the previous 4 ledgers and get banned for being added in the + // next few ledgers. + TXStatusTryAgainLater = "TRY_AGAIN_LATER" +) + +// TXResponse represents the response returned from a submission request sent to stellar-core's /tx +// endpoint +type TXResponse struct { + Exception string `json:"exception"` + Error string `json:"error"` + Status string `json:"status"` +} + +// IsException returns true if the response represents an exception response from stellar-core +func (resp *TXResponse) IsException() bool { + return resp.Exception != "" +} diff --git a/randxdr/generator.go b/randxdr/generator.go new file mode 100644 index 0000000000..8cf1b38ffc --- /dev/null +++ b/randxdr/generator.go @@ -0,0 +1,52 @@ +package randxdr + +import ( + "math/rand" + + goxdr "github.com/xdrpp/goxdr/xdr" +) + +// Generator generates random XDR values. +type Generator struct { + // MaxBytesSize configures the upper bound limit for variable length + // opaque data and variable length strings + // https://tools.ietf.org/html/rfc4506#section-4.10 + MaxBytesSize uint32 + // MaxVecLen configures the upper bound limit for variable length arrays + // https://tools.ietf.org/html/rfc4506#section-4.13 + MaxVecLen uint32 + // Source is the rand.Source which is used by the Generator to create + // random values + Source rand.Source +} + +const ( + // DefaultMaxBytesSize is the MaxBytesSize value in the Generator returned by NewGenerator() + DefaultMaxBytesSize = 1024 + // DefaultMaxVecLen is the MaxVecLen value in the Generator returned by NewGenerator() + DefaultMaxVecLen = 10 + // DefaultSeed is the seed for the Source value in the Generator returned by NewGenerator() + DefaultSeed = 99 +) + +// NewGenerator returns a new Generator instance configured with default settings. +// The returned Generator is deterministic but it is not thread-safe. +func NewGenerator() Generator { + return Generator{ + MaxBytesSize: DefaultMaxBytesSize, + MaxVecLen: DefaultMaxVecLen, + // rand.NewSource returns a source which is *not* safe for concurrent use + Source: rand.NewSource(DefaultSeed), + } +} + +// Next modifies the given shape and populates it with random value fields. +func (g Generator) Next(shape goxdr.XdrType, presets []Preset) { + marshaller := &randMarshaller{ + rand: rand.New(g.Source), + maxBytesSize: g.MaxBytesSize, + maxVecLen: g.MaxVecLen, + presets: presets, + } + marshaller.Marshal("", shape) +} diff --git a/randxdr/generator_test.go b/randxdr/generator_test.go new file mode 100644 index 0000000000..45ad9da2ea --- /dev/null +++ b/randxdr/generator_test.go @@ -0,0 +1,55 @@ +package randxdr + +import ( + "bytes" + "testing" + + "github.com/stellar/go/gxdr" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/assert" +) + +func TestRandLedgerCloseMeta(t *testing.T) { + gen := NewGenerator() + for i := 0; i < 100; i++ { + // generate random ledgers + lcm := &xdr.LedgerCloseMeta{} + shape := &gxdr.LedgerCloseMeta{} + gen.Next( + shape, + []Preset{ + {IsNestedInnerSet, SetVecLen(0)}, + }, + ) + // check that the goxdr representation matches the go-xdr representation + assert.NoError(t, gxdr.Convert(shape, lcm)) + + lcmBytes, err := lcm.MarshalBinary() + assert.NoError(t, err) + + assert.True(t, bytes.Equal(gxdr.Dump(shape), lcmBytes)) + } +} + +func TestGeneratorIsDeterministic(t *testing.T) { + gen := NewGenerator() + shape := &gxdr.LedgerCloseMeta{} + gen.Next( + shape, + []Preset{ + {IsNestedInnerSet, SetVecLen(0)}, + }, + ) + + otherGen := NewGenerator() + otherShape := &gxdr.LedgerCloseMeta{} + otherGen.Next( + otherShape, + []Preset{ + {IsNestedInnerSet, SetVecLen(0)}, + }, + ) + + assert.True(t, bytes.Equal(gxdr.Dump(shape), gxdr.Dump(otherShape))) +} diff --git a/randxdr/marshaller.go b/randxdr/marshaller.go new file mode 100644 index 0000000000..b51526c9b5 --- /dev/null +++ b/randxdr/marshaller.go @@ -0,0 +1,128 @@ +package randxdr + +import ( + "fmt" + "math/rand" + "reflect" + "sort" + + goxdr "github.com/xdrpp/goxdr/xdr" +) + +type randMarshaller struct { + useTag bool + tag uint32 + rand *rand.Rand + maxBytesSize uint32 + maxVecLen uint32 + presets []Preset +} + +func (*randMarshaller) Sprintf(f string, args ...interface{}) string { + return fmt.Sprintf(f, args...) +} + +func (rm *randMarshaller) randomKey(m interface{}) int32 { + keys := reflect.ValueOf(m).MapKeys() + // the keys of a map in golang are returned in random order + // here we sort the keys to ensure the selection is + // deterministic for the same rand seed + sort.Slice(keys, func(i, j int) bool { + return keys[i].Int() < keys[j].Int() + }) + return int32(keys[rm.rand.Intn(len(keys))].Int()) +} + +func (rm *randMarshaller) applyPreset(field string, i goxdr.XdrType) bool { + for _, preset := range rm.presets { + if preset.Selector(field, i) { + switch goxdr.XdrBaseType(i).(type) { + case goxdr.XdrEnum, goxdr.XdrNum32: + rm.useTag = false + } + + preset.Setter(rm, field, i) + return true + } + } + return false +} + +// Marshal populates a given goxdr.XdrType with random values. +// +// Every complex goxdr.XdrType has functions like XdrRecurse() which +// allow you to visit subfields of the complex type. That is how +// randMarshaller is able to populate subfields of a complex type with +// random values. +// +// Note that randMarshaller is stateful because of how union types are handled. +// Therefore Marshal() should not be used concurrently. +func (rm *randMarshaller) Marshal(field string, i goxdr.XdrType) { + if rm.applyPreset(field, i) { + return + } + + switch t := goxdr.XdrBaseType(i).(type) { + case goxdr.XdrVarBytes: + bound := t.XdrBound() + if bound > rm.maxBytesSize { + bound = rm.maxBytesSize + } + bound++ + bs := make([]byte, rm.rand.Uint32()%bound) + rm.rand.Read(bs) + t.SetByteSlice(bs) + case goxdr.XdrBytes: + // t.GetByteSlice() returns the underlying byte slice + // rm.rand.Read() will fill that byte slice with random values + rm.rand.Read(t.GetByteSlice()) + case goxdr.XdrVec: + bound := t.XdrBound() + if bound > rm.maxVecLen { + bound = rm.maxVecLen + } + bound++ + vecLen := rm.rand.Uint32() % bound + t.SetVecLen(vecLen) + t.XdrMarshalN(rm, field, vecLen) + case goxdr.XdrPtr: + present := rm.rand.Uint32()&1 == 1 + t.SetPresent(present) + t.XdrMarshalValue(rm, field) + case *goxdr.XdrBool: + t.SetU32(rm.rand.Uint32() & 1) + case goxdr.XdrEnum: + if rm.useTag { + rm.useTag = false + t.SetU32(rm.tag) + } else { + t.SetU32(uint32(rm.randomKey(t.XdrEnumNames()))) + } + case goxdr.XdrNum32: + if rm.useTag { + rm.useTag = false + t.SetU32(rm.tag) + } else { + t.SetU32(rm.rand.Uint32()) + } + case goxdr.XdrNum64: + t.SetU64(rm.rand.Uint64()) + case goxdr.XdrUnion: + // If we have an XDR union we need to set the tag of the union. + // However, there is no SetTag() function in the goxdr.XdrUnion interface. + // We must rely on these two facts: + // * when XdrRecurse() is called on a union, the first field which will be marshalled is the tag field + // * the tag field can be one of two types: uint32 or enum + if m := t.XdrValidTags(); m != nil { + rm.tag = uint32(rm.randomKey(m)) + rm.useTag = true + // The next field the marshaller will visit is the tag field. + // Once the tag is set, we need to toggle rm.useTag to false. + } + t.XdrRecurse(rm, field) + case goxdr.XdrAggregate: + t.XdrRecurse(rm, field) + default: + panic(fmt.Sprintf("field %s has unexpected xdr type %v", field, t)) + } +} diff --git a/randxdr/presets.go b/randxdr/presets.go new file mode 100644 index 0000000000..b77b0ec55a --- /dev/null +++ b/randxdr/presets.go @@ -0,0 +1,140 @@ +package randxdr + +import ( + "math" + "regexp" + "strings" + + goxdr "github.com/xdrpp/goxdr/xdr" +) + +// Selector is function used to match fields of a goxdr.XdrType +type Selector func(string, goxdr.XdrType) bool + +// Setter is a function used to set field values for a goxdr.XdrType +type Setter func(*randMarshaller, string, goxdr.XdrType) + +// Preset can be used to restrict values for specific fields of a goxdr.XdrType. +type Preset struct { + Selector Selector + Setter Setter +} + +// FieldEquals returns a Selector which matches on a field name by equality +func FieldEquals(toMatch string) Selector { + return func(name string, xdrType goxdr.XdrType) bool { + return name == toMatch + } +} + +// FieldMatches returns a Selector which matches on a field name by regexp +func FieldMatches(r *regexp.Regexp) Selector { + return func(name string, xdrType goxdr.XdrType) bool { + return r.MatchString(name) + } +} + +// And is a Selector which returns true if the given pair of selectors +// match the field. +func And(a, b Selector) Selector { + return func(s string, xdrType goxdr.XdrType) bool { + return a(s, xdrType) && b(s, xdrType) + } +} + +// IsPtr is a Selector which matches on all XDR pointer fields +var IsPtr Selector = func(name string, xdrType goxdr.XdrType) bool { + _, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr) + return ok +} + +// IsNestedInnerSet is a Selector which identifies nesting for the following xdr type: +// struct SCPQuorumSet +// { +// uint32 threshold; +// PublicKey validators<>; +// SCPQuorumSet innerSets<>; +// }; +// supports things like: A,B,C,(D,E,F),(G,H,(I,J,K,L)) +// only allows 2 levels of nesting +var IsNestedInnerSet Selector = func(name string, xdrType goxdr.XdrType) bool { + if strings.HasSuffix(name, ".innerSets") && strings.Count(name, ".innerSets[") > 0 { + _, ok := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec) + return ok + } + return false +} + +// SetPtr is a Setter which sets the xdr pointer to null if present is false +func SetPtr(present bool) Setter { + return func(m *randMarshaller, name string, xdrType goxdr.XdrType) { + p := goxdr.XdrBaseType(xdrType).(goxdr.XdrPtr) + p.SetPresent(present) + p.XdrMarshalValue(m, name) + } +} + +// SetVecLen returns a Setter which sets the length of a variable length +// array ( https://tools.ietf.org/html/rfc4506#section-4.13 ) to a fixed value +func SetVecLen(vecLen uint32) Setter { + return func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + v := goxdr.XdrBaseType(xdrType).(goxdr.XdrVec) + v.SetVecLen(vecLen) + v.XdrMarshalN(x, field, vecLen) + } +} + +// SetU32 returns a Setter which sets a uint32 XDR field to a fixed value +func SetU32(val uint32) Setter { + return func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32) + f.SetU32(val) + } +} + +// SetPositiveNum64 returns a Setter which sets a uint64 XDR field to a random positive value +var SetPositiveNum64 Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum64) + f.SetU64(uint64(x.rand.Int63n(math.MaxInt64))) +} + +// SetPositiveNum32 returns a Setter which sets a uint32 XDR field to a random positive value +var SetPositiveNum32 Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + + f := goxdr.XdrBaseType(xdrType).(goxdr.XdrNum32) + f.SetU32(uint32(x.rand.Int31n(math.MaxInt32))) +} + +const alphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +// SetAssetCode returns a Setter which sets an asset code XDR field to a +// random alphanumeric string right-padded with 0 bytes +var SetAssetCode Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + f := goxdr.XdrBaseType(xdrType).(goxdr.XdrBytes) + slice := f.GetByteSlice() + var end int + switch len(slice) { + case 4: + end = int(x.rand.Int31n(4)) + case 12: + end = int(4 + x.rand.Int31n(8)) + } + + for i := 0; i <= end; i++ { + slice[i] = alphaNumeric[x.rand.Int31n(int32(len(alphaNumeric)))] + } +} + +// SetPrintableASCII returns a Setter which sets a home domain string32 with a random +// printable ascii string +var SetPrintableASCII Setter = func(x *randMarshaller, field string, xdrType goxdr.XdrType) { + f := goxdr.XdrBaseType(xdrType).(goxdr.XdrString) + end := int(x.rand.Int31n(int32(f.Bound))) + var text []byte + for i := 0; i <= end; i++ { + // printable ascii range is from 32 - 127 + printableChar := byte(32 + x.rand.Int31n(95)) + text = append(text, printableChar) + } + f.SetString(string(text)) +} diff --git a/services/federation/CHANGELOG.md b/services/federation/CHANGELOG.md index 7476d1746b..14436e3920 100644 --- a/services/federation/CHANGELOG.md +++ b/services/federation/CHANGELOG.md @@ -6,6 +6,35 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). As this project is pre 1.0, breaking changes may happen for minor version bumps. A breaking change will get clearly notified in this log. +## Unreleased + +* Dropped support for Go 1.12. +* Dropped support for Go 1.13. +* Log User-Agent header in request logs. + +## [v0.3.0] - 2019-11-20 + +### Changed + +- BREAKING CHANGE: MySQL is no longer supported. To migrate your data to postgresql use any of the tools provided [here](https://wiki.postgresql.org/wiki/Converting_from_other_Databases_to_PostgreSQL#MySQL). +- Add `ReadTimeout` to HTTP server configuration to fix potential DoS vector. +- Dropped support for Go 1.10, 1.11. + +## [v0.2.1] - 2017-02-14 + +### Changed + +- BREAKING CHANGE: The `url` database configuration has been renamed to `dsn` to more accurately reflect its content. + +### Fixed + +- TLS support re-enabled. + +### Added + +- Reverse federation is now optional. +- Logging: http requests will be logged at the "Info" log level + ## [v0.2.0] - 2016-08-17 Initial release after import from https://github.com/stellar/federation diff --git a/services/federation/README.md b/services/federation/README.md index 55cb84fcec..49f2412d3d 100644 --- a/services/federation/README.md +++ b/services/federation/README.md @@ -1,7 +1,7 @@ # federation server -Go implementation of [Federation](https://www.stellar.org/developers/learn/concepts/federation.html) protocol server. This federation server is designed to be dropped in to your existing infrastructure. It can be configured to pull the data it needs out of your existing DB. +Go implementation of [Federation](https://developers.stellar.org/docs/glossary/federation/) protocol server. This federation server is designed to be dropped in to your existing infrastructure. It can be configured to pull the data it needs out of your existing DB. ## Downloading the server @@ -13,11 +13,15 @@ By default this server uses a config file named `federation.cfg` in the current * `port` - server listening port * `database` - * `type` - database type (sqlite3, mysql, postgres) - * `dsn` - The DSN (data source name) used to connect to the database connection. This value should be appropriate for the databse type chosen. + * `type` - database type (sqlite3, postgres) + * `dsn` - The DSN (data source name) used to connect to the database connection. This value should be appropriate for the database type chosen. + * for `postgres`: `postgres://user:password@host/dbname?sslmode=sslmode` ([more info](https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters)) * `queries` - * `federation` - Implementation dependent query to fetch federation results, should return either 1 or 3 columns. These columns should be labeled `id`,`memo`,`memo_type`. Memo and memo_type are optional - see [Federation](https://www.stellar.org/developers/learn/concepts/federation.html) docs for more detail). When executed, this query will be provided with two input parameters, the first will be the name portion of a stellar address and the second will be the domain portion of a stellar address. For example, a request for `scott*stellar.org` would trigger a query with two input parameters, `scott` and `stellar.org` respectively. - * `reverse-federation` - A SQL query to fetch reverse federation results that should return two columns, labeled `name` and `domain`. When executed, this query will be provided with one input parameter, a [stellar account ID](https://www.stellar.org/developers/guides/concepts/accounts.html#account-id) used to lookup the name and domain mapping. + * `federation` - Implementation dependent query to fetch federation results, should return either 1 or 3 columns. These columns should be labeled `id`,`memo`,`memo_type`. Memo and memo_type are optional - see [Federation](https://developers.stellar.org/docs/glossary/federation/) docs for more detail). When executed, this query will be provided with two input parameters, the first will be the name portion of a stellar address and the second will be the domain portion of a stellar address. For example, a request for `scott*stellar.org` would trigger a query with two input parameters, `scott` and `stellar.org` respectively. + * `reverse-federation` - A SQL query to fetch reverse federation results that should return two columns, labeled `name` and `domain`. When executed, this query will be provided with one input parameter, a [stellar account ID](https://developers.stellar.org/docs/glossary/accounts/#account-id) used to lookup the name and domain mapping. + + If reverse-lookup isn't supported (e.g. you have a single Stellar account for all users), leave this entry out. + * `tls` (only when running HTTPS server) * `certificate-file` - a file containing a certificate * `private-key-file` - a file containing a matching private key @@ -32,18 +36,18 @@ In this section you can find config examples for the two main ways of setting up ### #1: Every user has their own Stellar account -In case every user owns Stellar account you don't need `memo`. You can simply return `id` based on the username. Your `queries` section could look like this: +In the case that every user owns a Stellar account, you don't need `memo`. You can simply return `id` based on the username. Your `queries` section could look like this: ```toml port = 8000 [database] -type = "mysql" -url = "root:@/dbname" +type = "postgres" +dsn = "postgres://user:password@host/dbname?sslmode=sslmode" [queries] -federation = "SELECT account_id as id FROM Users WHERE username = ?" -reverse-federation = "SELECT username as name FROM Users WHERE account_id = $1" +federation = "SELECT account_id as id FROM Users WHERE username = ? AND domain = ?" +reverse-federation = "SELECT username as name, domain FROM Users WHERE account_id = ?" ``` @@ -51,18 +55,18 @@ reverse-federation = "SELECT username as name FROM Users WHERE account_id = $1" If you have a single Stellar account for all incoming transactions you need to use `memo` to check which internal account should receive the payment. -Let's say that your Stellar account ID is: `GAHG6B6QWTC3YNJIKJYUFGRMQNQNEGBALDYNZUEAPVCN2SGIKHTQIKPV` and every user has an `id` and `username` in your database. Then your `queries` section could look like this: +Let's say that your Stellar account ID is: `GD6WU64OEP5C4LRBH6NK3MHYIA2ADN6K6II6EXPNVUR3ERBXT4AN4ACD` and every user has an `id` and `username` in your database. Then your `queries` section could look like this: ```toml port = 8000 [database] -type = "mysql" -url = "root:@/dbname" +type = "postgres" +dsn = "postgres://user:password@host/dbname?sslmode=sslmode" [queries] federation = "SELECT username as memo, 'text' as memo_type, 'GD6WU64OEP5C4LRBH6NK3MHYIA2ADN6K6II6EXPNVUR3ERBXT4AN4ACD' as id FROM Users WHERE username = ? AND domain = ?" -reverse-federation = "SELECT username as name, domain FROM Users WHERE account_id = ?" +# No entry for `reverse-federation` since a reverse-lookup isn't possible ``` ## Providing federation for a single domain @@ -70,8 +74,8 @@ reverse-federation = "SELECT username as name, domain FROM Users WHERE account_i In the event that your organization only wants to offer federation for a single domain, a little bit of trickery can be used to configure your queries to satisfy this use case. For example, let's say you own `acme.org` and want to provide only results for that domain. The following example config illustrates: ```toml -federation = "SELECT username as memo, 'text' as memo_type, 'GD6WU64OEP5C4LRBH6NK3MHYIA2ADN6K6II6EXPNVUR3ERBXT4AN4ACD' as id FROM Users WHERE username = ? AND ? = 'acme.org" -reverse-federation = "SELECT username as name, 'acme.org' FROM Users WHERE account_id = ?" +federation = "SELECT username as memo, 'text' as memo_type, 'GD6WU64OEP5C4LRBH6NK3MHYIA2ADN6K6II6EXPNVUR3ERBXT4AN4ACD' as id FROM Users WHERE username = ? AND ? = 'acme.org'" +reverse-federation = "SELECT username as name, 'acme.org' as domain FROM Users WHERE account_id = ?" ``` Notice that SQL fragment `? = 'acme.org"` on the `federation` query: It ensures the incoming query is for the correct domain. Additionally, the `reverse-federation` query always returns `acme.org` for the domain. @@ -102,4 +106,4 @@ After successful completion, you should find `bin/federation` is present in your ``` go test -``` \ No newline at end of file +``` diff --git a/services/federation/federation.cfg b/services/federation/federation.cfg index e90ead3afd..78ecb0243e 100644 --- a/services/federation/federation.cfg +++ b/services/federation/federation.cfg @@ -2,7 +2,7 @@ port = 8000 [database] type = "postgres" -url = "postgres://localhost/federation_sample?sslmode=disable" +dsn = "postgres://localhost/federation_sample?sslmode=disable" [queries] federation = "SELECT id FROM people WHERE name = ? AND domain = ?" diff --git a/services/federation/main.go b/services/federation/main.go index 53e1c65e63..8898001a8b 100644 --- a/services/federation/main.go +++ b/services/federation/main.go @@ -4,10 +4,7 @@ import ( "fmt" "os" - "goji.io" - "goji.io/pat" - - "github.com/rs/cors" + "github.com/go-chi/chi" "github.com/spf13/cobra" "github.com/stellar/go/handlers/federation" "github.com/stellar/go/support/app" @@ -22,17 +19,14 @@ import ( type Config struct { Port int `valid:"required"` Database struct { - Type string `valid:"matches(^mysql|sqlite3|postgres$)"` - URL string `valid:"required"` + Type string `valid:"matches(^sqlite3|postgres$)"` + DSN string `valid:"required"` } `valid:"required"` Queries struct { Federation string `valid:"required"` - ReverseFederation string `toml:"reverse-federation" valid:"required"` + ReverseFederation string `toml:"reverse-federation" valid:"optional"` } `valid:"required"` - TLS struct { - CertificateFile string `toml:"certificate-file"` - PrivateKeyFile string `toml:"private-key-file"` - } `valid:"optional"` + TLS *config.TLS `valid:"optional"` } func main() { @@ -40,9 +34,9 @@ func main() { Use: "federation", Short: "stellar federation server", Long: ` -The stellar federation server let's you easily integrate the stellar federation -protocol with your organization. This is achieved by connecting the -application to your customer database and providing the appropriate queries in +The stellar federation server let's you easily integrate the stellar federation +protocol with your organization. This is achieved by connecting the +application to your customer database and providing the appropriate queries in the config file. `, Run: run, @@ -82,6 +76,7 @@ func run(cmd *cobra.Command, args []string) { http.Run(http.Config{ ListenAddr: addr, Handler: mux, + TLS: cfg.TLS, OnStarting: func() { log.Infof("starting federation server - %s", app.Version()) log.Infof("listening on %s", addr) @@ -94,7 +89,7 @@ func initDriver(cfg Config) (federation.Driver, error) { switch cfg.Database.Type { case "mysql": - dialect = "mysql" + return nil, errors.Errorf("Invalid db type: %s, mysql support is discontinued", cfg.Database.Type) case "postgres": dialect = "postgres" case "sqlite3": @@ -103,32 +98,42 @@ func initDriver(cfg Config) (federation.Driver, error) { return nil, errors.Errorf("Invalid db type: %s", cfg.Database.Type) } - repo, err := db.Open(dialect, cfg.Database.URL) + repo, err := db.Open(dialect, cfg.Database.DSN) if err != nil { return nil, errors.Wrap(err, "db open failed") } - return &federation.SQLDriver{ - DB: repo.DB.DB, // unwrap the repo to the bare *sql.DB instance, - LookupRecordQuery: cfg.Queries.Federation, + sqld := federation.SQLDriver{ + DB: repo.DB.DB, // unwrap the repo to the bare *sql.DB instance, + Dialect: dialect, + LookupRecordQuery: cfg.Queries.Federation, + } + + if cfg.Queries.ReverseFederation == "" { + return &sqld, nil + } + + rsqld := federation.ReverseSQLDriver{ + SQLDriver: federation.SQLDriver{ + DB: repo.DB.DB, + Dialect: dialect, + LookupRecordQuery: cfg.Queries.Federation, + }, LookupReverseRecordQuery: cfg.Queries.ReverseFederation, - }, nil + } + + return &rsqld, nil } -func initMux(driver federation.Driver) *goji.Mux { - mux := goji.NewMux() +func initMux(driver federation.Driver) *chi.Mux { + mux := http.NewAPIMux(log.DefaultLogger) - c := cors.New(cors.Options{ - AllowedOrigins: []string{"*"}, - AllowedHeaders: []string{"*"}, - AllowedMethods: []string{"GET"}, - }) - mux.Use(c.Handler) - - fed := &federation.Handler{driver} + fed := &federation.Handler{ + Driver: driver, + } - mux.Handle(pat.Get("/federation"), fed) - mux.Handle(pat.Get("/federation/"), fed) + mux.Get("/federation", fed.ServeHTTP) + mux.Get("/federation/", fed.ServeHTTP) return mux } diff --git a/services/federation/main_test.go b/services/federation/main_test.go new file mode 100644 index 0000000000..92594377a7 --- /dev/null +++ b/services/federation/main_test.go @@ -0,0 +1,39 @@ +package main + +import ( + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInitDriver_dialect(t *testing.T) { + c := Config{} + + testCases := []struct { + dbType string + dbDSN string + wantErr error + }{ + {dbType: "", wantErr: errors.New("Invalid db type: ")}, + {dbType: "postgres", dbDSN: dbtest.Postgres(t).DSN, wantErr: nil}, + {dbType: "mysql", wantErr: errors.New("Invalid db type: mysql, mysql support is discontinued")}, + {dbType: "bogus", wantErr: errors.New("Invalid db type: bogus")}, + } + + for _, tc := range testCases { + t.Run(tc.dbType, func(t *testing.T) { + c.Database.Type = tc.dbType + c.Database.DSN = tc.dbDSN + _, err := initDriver(c) + if tc.wantErr == nil { + require.Nil(t, err) + } else { + require.NotNil(t, err) + assert.Equal(t, tc.wantErr.Error(), err.Error()) + } + }) + } +} diff --git a/services/friendbot/CHANGELOG.md b/services/friendbot/CHANGELOG.md new file mode 100644 index 0000000000..05d09ac778 --- /dev/null +++ b/services/friendbot/CHANGELOG.md @@ -0,0 +1,23 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + +As this project is pre 1.0, breaking changes may happen for minor version +bumps. A breaking change will get clearly notified in this log. + +## Unreleased + +* Log User-Agent header in request logs. + +## [v0.0.2] - 2019-11-20 + +### Changed + +- Add `ReadTimeout` to HTTP server configuration to fix potential DoS vector. + +## [v0.0.1] - 2018-10-16 + +### Added + +- Extracted friendbot out of horizon diff --git a/services/friendbot/Makefile b/services/friendbot/Makefile new file mode 100644 index 0000000000..49ab73a5cd --- /dev/null +++ b/services/friendbot/Makefile @@ -0,0 +1,16 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/friendbot:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f services/friendbot/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../ && \ + $(SUDO) docker push $(TAG) diff --git a/services/friendbot/README.md b/services/friendbot/README.md new file mode 100644 index 0000000000..56e894d54b --- /dev/null +++ b/services/friendbot/README.md @@ -0,0 +1,7 @@ +# Friendbot Service for the Stellar Test Network + +Friendbot helps users of the Stellar testnet by exposing a REST endpoint that creates & funds new accounts. + +Horizon needs to be started with the following command line param: --friendbot-url="http://localhost:8004/" +This will forward any query params received against /friendbot to the friendbot instance. +The ideal setup for horizon is to proxy all requests to the /friendbot url to the friendbot service diff --git a/services/friendbot/docker/Dockerfile b/services/friendbot/docker/Dockerfile new file mode 100644 index 0000000000..953a006325 --- /dev/null +++ b/services/friendbot/docker/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.17 as build + +ADD . /src/friendbot +WORKDIR /src/friendbot +RUN go build -o /bin/friendbot ./services/friendbot + +FROM ubuntu:20.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates +COPY --from=build /bin/friendbot /app/ +EXPOSE 8004 +ENTRYPOINT ["/app/friendbot"] diff --git a/services/friendbot/friendbot.cfg b/services/friendbot/friendbot.cfg new file mode 100644 index 0000000000..da8f7aefcf --- /dev/null +++ b/services/friendbot/friendbot.cfg @@ -0,0 +1,10 @@ +port = 8000 +friendbot_secret = "SDANVB2UMNILW5UIMWO5BOZ4QYYEKQ34JNFCTDKTTLCBRG2ELDTNRGAM" +network_passphrase = "Test SDF Network ; September 2015" +horizon_url = "https://horizon-testnet.stellar.org" +starting_balance = "10000.00" +num_minions = 1000 +base_fee = 100000 +minion_batch_size = 50 +submit_tx_retries_allowed = 5 + diff --git a/services/friendbot/init_friendbot.go b/services/friendbot/init_friendbot.go new file mode 100644 index 0000000000..ec8d1051cc --- /dev/null +++ b/services/friendbot/init_friendbot.go @@ -0,0 +1,169 @@ +package main + +import ( + "fmt" + "log" + "net/http" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/friendbot/internal" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/txnbuild" +) + +func initFriendbot( + friendbotSecret string, + networkPassphrase string, + horizonURL string, + startingBalance string, + numMinions int, + baseFee int64, + minionBatchSize int, + submitTxRetriesAllowed int, +) (*internal.Bot, error) { + if friendbotSecret == "" || networkPassphrase == "" || horizonURL == "" || startingBalance == "" || numMinions < 0 { + return nil, errors.New("invalid input param(s)") + } + + // Guarantee that friendbotSecret is a seed, if not blank. + strkey.MustDecode(strkey.VersionByteSeed, friendbotSecret) + + hclient := &horizonclient.Client{ + HorizonURL: horizonURL, + HTTP: http.DefaultClient, + AppName: "friendbot", + } + + botKP, err := keypair.Parse(friendbotSecret) + if err != nil { + return nil, errors.Wrap(err, "parsing bot keypair") + } + + // Casting from the interface type will work, since we + // already confirmed that friendbotSecret is a seed. + botKeypair := botKP.(*keypair.Full) + botAccount := internal.Account{AccountID: botKeypair.Address()} + // set default values + minionBalance := "101.00" + if numMinions == 0 { + numMinions = 1000 + } + if minionBatchSize == 0 { + minionBatchSize = 50 + } + if submitTxRetriesAllowed == 0 { + submitTxRetriesAllowed = 5 + } + log.Printf("Found all valid params, now creating %d minions", numMinions) + minions, err := createMinionAccounts(botAccount, botKeypair, networkPassphrase, startingBalance, minionBalance, numMinions, minionBatchSize, submitTxRetriesAllowed, baseFee, hclient) + if err != nil && len(minions) == 0 { + return nil, errors.Wrap(err, "creating minion accounts") + } + log.Printf("Adding %d minions to friendbot", len(minions)) + return &internal.Bot{Minions: minions}, nil +} + +func createMinionAccounts(botAccount internal.Account, botKeypair *keypair.Full, networkPassphrase, newAccountBalance, minionBalance string, + numMinions, minionBatchSize, submitTxRetriesAllowed int, baseFee int64, hclient horizonclient.ClientInterface) ([]internal.Minion, error) { + + var minions []internal.Minion + numRemainingMinions := numMinions + // Allow retries to account for testnet congestion + currentSubmitTxRetry := 0 + + for numRemainingMinions > 0 { + var ( + newMinions []internal.Minion + ops []txnbuild.Operation + ) + // Refresh the sequence number before submitting a new transaction. + rerr := botAccount.RefreshSequenceNumber(hclient) + if rerr != nil { + return minions, errors.Wrap(rerr, "refreshing bot seqnum") + } + // The tx will create min(numRemainingMinions, minionBatchSize) Minion accounts. + numCreateMinions := minionBatchSize + if numRemainingMinions < minionBatchSize { + numCreateMinions = numRemainingMinions + } + log.Printf("Creating %d new minion accounts", numCreateMinions) + for i := 0; i < numCreateMinions; i++ { + minionKeypair, err := keypair.Random() + if err != nil { + return minions, errors.Wrap(err, "making keypair") + } + newMinions = append(newMinions, internal.Minion{ + Account: internal.Account{AccountID: minionKeypair.Address()}, + Keypair: minionKeypair, + BotAccount: botAccount, + BotKeypair: botKeypair, + Horizon: hclient, + Network: networkPassphrase, + StartingBalance: newAccountBalance, + SubmitTransaction: internal.SubmitTransaction, + CheckSequenceRefresh: internal.CheckSequenceRefresh, + BaseFee: baseFee, + }) + + ops = append(ops, &txnbuild.CreateAccount{ + Destination: minionKeypair.Address(), + Amount: minionBalance, + }) + } + + // Build and submit batched account creation tx. + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: botAccount, + IncrementSequenceNum: true, + Operations: ops, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return minions, errors.Wrap(err, "unable to build tx") + } + + tx, err = tx.Sign(networkPassphrase, botKeypair) + if err != nil { + return minions, errors.Wrap(err, "unable to sign tx") + } + + txe, err := tx.Base64() + if err != nil { + return minions, errors.Wrap(err, "unable to serialize tx") + } + + resp, err := hclient.SubmitTransactionXDR(txe) + if err != nil { + log.Printf("%+v\n", resp) + switch e := err.(type) { + case *horizonclient.Error: + problemString := fmt.Sprintf("Problem[Type=%s, Title=%s, Status=%d, Detail=%s, Extras=%v]", e.Problem.Type, e.Problem.Title, e.Problem.Status, e.Problem.Detail, e.Problem.Extras) + // If we hit an error here due to network congestion, try again until we hit max # of retries allowed + if e.Problem.Status == http.StatusGatewayTimeout { + err = errors.Wrap(errors.Wrap(e, problemString), "submitting create accounts tx") + if currentSubmitTxRetry >= submitTxRetriesAllowed { + return minions, errors.Wrap(err, fmt.Sprintf("after retrying %d times", currentSubmitTxRetry)) + } + log.Println(err) + log.Println("trying again to submit create accounts tx") + currentSubmitTxRetry += 1 + continue + } + return minions, errors.Wrap(errors.Wrap(e, problemString), "submitting create accounts tx") + } + return minions, errors.Wrap(err, "submitting create accounts tx") + } + currentSubmitTxRetry = 0 + + // Process successful create accounts tx. + numRemainingMinions -= numCreateMinions + minions = append(minions, newMinions...) + log.Printf("Submitted create accounts tx for %d minions successfully", numCreateMinions) + } + return minions, nil +} diff --git a/services/friendbot/init_friendbot_test.go b/services/friendbot/init_friendbot_test.go new file mode 100644 index 0000000000..3063304508 --- /dev/null +++ b/services/friendbot/init_friendbot_test.go @@ -0,0 +1,91 @@ +package main + +import ( + "net/http" + "testing" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/friendbot/internal" + "github.com/stellar/go/support/render/problem" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestInitFriendbot_createMinionAccounts_success(t *testing.T) { + + randSecretKey := "SDLNA2YUQSFIWVEB57M6D3OOCJHFVCVQZJ33LPA656KJESVRK5DQUZOH" + botKP, err := keypair.Parse(randSecretKey) + assert.NoError(t, err) + + botKeypair := botKP.(*keypair.Full) + botAccountID := botKeypair.Address() + botAccountMock := horizon.Account{ + AccountID: botAccountID, + Sequence: "1", + } + botAccount := internal.Account{AccountID: botAccountID, Sequence: 1} + + horizonClientMock := horizonclient.MockClient{} + horizonClientMock. + On("AccountDetail", horizonclient.AccountRequest{ + AccountID: botAccountID, + }). + Return(botAccountMock, nil) + horizonClientMock. + On("SubmitTransactionXDR", mock.Anything). + Return(horizon.Transaction{}, nil) + + numMinion := 1000 + minionBatchSize := 50 + submitTxRetriesAllowed := 5 + createdMinions, err := createMinionAccounts(botAccount, botKeypair, "Test SDF Network ; September 2015", "10000", "101", numMinion, minionBatchSize, submitTxRetriesAllowed, 1000, &horizonClientMock) + assert.NoError(t, err) + + assert.Equal(t, 1000, len(createdMinions)) +} + +func TestInitFriendbot_createMinionAccounts_timeoutError(t *testing.T) { + randSecretKey := "SDLNA2YUQSFIWVEB57M6D3OOCJHFVCVQZJ33LPA656KJESVRK5DQUZOH" + botKP, err := keypair.Parse(randSecretKey) + assert.NoError(t, err) + + botKeypair := botKP.(*keypair.Full) + botAccountID := botKeypair.Address() + botAccountMock := horizon.Account{ + AccountID: botAccountID, + Sequence: "1", + } + botAccount := internal.Account{AccountID: botAccountID, Sequence: 1} + + horizonClientMock := horizonclient.MockClient{} + horizonClientMock. + On("AccountDetail", horizonclient.AccountRequest{ + AccountID: botAccountID, + }). + Return(botAccountMock, nil) + + // Successful on first 3 calls only, and then a timeout error occurs + horizonClientMock. + On("SubmitTransactionXDR", mock.Anything). + Return(horizon.Transaction{}, nil).Times(3) + hError := &horizonclient.Error{ + Problem: problem.P{ + Type: "timeout", + Title: "Timeout", + Status: http.StatusGatewayTimeout, + }, + } + horizonClientMock. + On("SubmitTransactionXDR", mock.Anything). + Return(horizon.Transaction{}, hError) + + numMinion := 1000 + minionBatchSize := 50 + submitTxRetriesAllowed := 5 + createdMinions, err := createMinionAccounts(botAccount, botKeypair, "Test SDF Network ; September 2015", "10000", "101", numMinion, minionBatchSize, submitTxRetriesAllowed, 1000, &horizonClientMock) + assert.Equal(t, 150, len(createdMinions)) + assert.Error(t, err) + assert.Contains(t, err.Error(), "after retrying 5 times: submitting create accounts tx:") +} diff --git a/services/friendbot/internal/account.go b/services/friendbot/internal/account.go new file mode 100644 index 0000000000..e65815db6f --- /dev/null +++ b/services/friendbot/internal/account.go @@ -0,0 +1,45 @@ +package internal + +import ( + "strconv" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/support/errors" +) + +// Account implements the `txnbuild.Account` interface. +type Account struct { + AccountID string + Sequence int64 +} + +// GetAccountID returns the Account ID. +func (a Account) GetAccountID() string { + return a.AccountID +} + +// IncrementSequenceNumber increments the internal record of the +// account's sequence number by 1. +func (a Account) IncrementSequenceNumber() (int64, error) { + a.Sequence++ + return a.Sequence, nil +} + +func (a Account) GetSequenceNumber() (int64, error) { + return a.Sequence, nil +} + +// RefreshSequenceNumber gets an Account's correct in-memory sequence number from Horizon. +func (a *Account) RefreshSequenceNumber(hclient horizonclient.ClientInterface) error { + accountRequest := horizonclient.AccountRequest{AccountID: a.GetAccountID()} + accountDetail, err := hclient.AccountDetail(accountRequest) + if err != nil { + return errors.Wrap(err, "getting account detail") + } + seq, err := strconv.ParseInt(accountDetail.Sequence, 10, 64) + if err != nil { + return errors.Wrap(err, "parsing account seqnum") + } + a.Sequence = seq + return nil +} diff --git a/services/friendbot/internal/friendbot.go b/services/friendbot/internal/friendbot.go new file mode 100644 index 0000000000..1ad9af63d2 --- /dev/null +++ b/services/friendbot/internal/friendbot.go @@ -0,0 +1,36 @@ +package internal + +import ( + "log" + "sync" + + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// Bot represents the friendbot subsystem and primarily delegates work +// to its Minions. +type Bot struct { + Minions []Minion + nextMinionIndex int + indexMux sync.Mutex +} + +// SubmitResult is the result from the asynchronous tx submission. +type SubmitResult struct { + maybeTransactionSuccess *hProtocol.Transaction + maybeErr error +} + +// Pay funds the account at `destAddress`. +func (bot *Bot) Pay(destAddress string) (*hProtocol.Transaction, error) { + bot.indexMux.Lock() + log.Printf("Selecting minion at index %d of max length %d", bot.nextMinionIndex, len(bot.Minions)) + minion := bot.Minions[bot.nextMinionIndex] + bot.nextMinionIndex = (bot.nextMinionIndex + 1) % len(bot.Minions) + bot.indexMux.Unlock() + resultChan := make(chan SubmitResult) + go minion.Run(destAddress, resultChan) + maybeSubmitResult := <-resultChan + close(resultChan) + return maybeSubmitResult.maybeTransactionSuccess, maybeSubmitResult.maybeErr +} diff --git a/services/friendbot/internal/friendbot_handler.go b/services/friendbot/internal/friendbot_handler.go new file mode 100644 index 0000000000..16dd548dae --- /dev/null +++ b/services/friendbot/internal/friendbot_handler.go @@ -0,0 +1,58 @@ +package internal + +import ( + "net/http" + "net/url" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" +) + +// FriendbotHandler causes an account at `Address` to be created. +type FriendbotHandler struct { + Friendbot *Bot +} + +// Handle is a method that implements http.HandlerFunc +func (handler *FriendbotHandler) Handle(w http.ResponseWriter, r *http.Request) { + accountExistsProblem := problem.BadRequest + accountExistsProblem.Detail = ErrAccountExists.Error() + problem.RegisterError(ErrAccountExists, accountExistsProblem) + + result, err := handler.doHandle(r) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + hal.Render(w, *result) +} + +// doHandle is just a convenience method that returns the object to be rendered +func (handler *FriendbotHandler) doHandle(r *http.Request) (*horizon.Transaction, error) { + err := r.ParseForm() + if err != nil { + p := problem.BadRequest + p.Detail = "Request parameters are not escaped or incorrectly formatted." + return nil, &p + } + + address, err := handler.loadAddress(r) + if err != nil { + return nil, problem.MakeInvalidFieldProblem("addr", err) + } + return handler.Friendbot.Pay(address) +} + +func (handler *FriendbotHandler) loadAddress(r *http.Request) (string, error) { + address := r.Form.Get("addr") + unescaped, err := url.QueryUnescape(address) + if err != nil { + return unescaped, err + } + + _, err = strkey.Decode(strkey.VersionByteAccountID, unescaped) + return unescaped, err +} diff --git a/services/friendbot/internal/friendbot_test.go b/services/friendbot/internal/friendbot_test.go new file mode 100644 index 0000000000..a9f2864c83 --- /dev/null +++ b/services/friendbot/internal/friendbot_test.go @@ -0,0 +1,75 @@ +package internal + +import ( + "sync" + "testing" + + "github.com/stellar/go/txnbuild" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" +) + +func TestFriendbot_Pay(t *testing.T) { + mockSubmitTransaction := func(minion *Minion, hclient horizonclient.ClientInterface, tx string) (*hProtocol.Transaction, error) { + // Instead of submitting the tx, we emulate a success. + txSuccess := hProtocol.Transaction{EnvelopeXdr: tx, Successful: true} + return &txSuccess, nil + } + + // Public key: GD25B4QI6KWVDWXDW25CIM7EKR6A6PBSWE2RCNSAC4NJQDQJXZJYMMKR + botSeed := "SCWNLYELENPBXN46FHYXETT5LJCYBZD5VUQQVW4KZPHFO2YTQJUWT4D5" + botKeypair, err := keypair.Parse(botSeed) + if !assert.NoError(t, err) { + return + } + botAccount := Account{AccountID: botKeypair.Address()} + + // Public key: GD4AGPPDFFHKK3Z2X4XZDRXX6GZQKP4FMLVQ5T55NDEYGG3GIP7BQUHM + minionSeed := "SDTNSEERJPJFUE2LSDNYBFHYGVTPIWY7TU2IOJZQQGLWO2THTGB7NU5A" + minionKeypair, err := keypair.Parse(minionSeed) + if !assert.NoError(t, err) { + return + } + + minion := Minion{ + Account: Account{ + AccountID: minionKeypair.Address(), + Sequence: 1, + }, + Keypair: minionKeypair.(*keypair.Full), + BotAccount: botAccount, + BotKeypair: botKeypair.(*keypair.Full), + Network: "Test SDF Network ; September 2015", + StartingBalance: "10000.00", + SubmitTransaction: mockSubmitTransaction, + CheckSequenceRefresh: CheckSequenceRefresh, + BaseFee: txnbuild.MinBaseFee, + } + fb := &Bot{Minions: []Minion{minion}} + + recipientAddress := "GDJIN6W6PLTPKLLM57UW65ZH4BITUXUMYQHIMAZFYXF45PZVAWDBI77Z" + txSuccess, err := fb.Pay(recipientAddress) + if !assert.NoError(t, err) { + return + } + expectedTxn := "AAAAAgAAAAD4Az3jKU6lbzq/L5HG9/GzBT+FYusOz71oyYMbZkP+GAAAAGQAAAAAAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAPXQ8gjyrVHa47a6JDPkVHwPPDKxNRE2QBcamA4JvlOGAAAAAAAAAADShvreeub1LWzv6W93J+BROl6MxA6GAyXFy86/NQWGFAAAABdIdugAAAAAAAAAAAJmQ/4YAAAAQDRLEljDVYALnTk9mDceQEd5PrjQyE3LUAjstIyTWH5t/TP909F66TgEfBFKMxSKF6fka7ZuPcSs40ix4AomEgoJvlOGAAAAQPSGs88OwXubz7UT6nFhvhF47EQfaOsmiIsOkjgzUrmBoypJQTmMMbgeix0kdbfHqS75+iefJpdXLNFDreGnxgE=" + assert.Equal(t, expectedTxn, txSuccess.EnvelopeXdr) + + // Don't assert on tx values below, since the completion order is unknown. + var wg sync.WaitGroup + wg.Add(2) + go func() { + _, err := fb.Pay(recipientAddress) + assert.NoError(t, err) + wg.Done() + }() + go func() { + _, err := fb.Pay(recipientAddress) + assert.NoError(t, err) + wg.Done() + }() + wg.Wait() +} diff --git a/services/friendbot/internal/minion.go b/services/friendbot/internal/minion.go new file mode 100644 index 0000000000..826826728e --- /dev/null +++ b/services/friendbot/internal/minion.go @@ -0,0 +1,143 @@ +package internal + +import ( + "fmt" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/txnbuild" +) + +const createAccountAlreadyExistXDR = "AAAAAAAAAGT/////AAAAAQAAAAAAAAAA/////AAAAAA=" + +var ErrAccountExists error = errors.New(fmt.Sprintf("createAccountAlreadyExist (%s)", createAccountAlreadyExistXDR)) + +// Minion contains a Stellar channel account and Go channels to communicate with friendbot. +type Minion struct { + Account Account + Keypair *keypair.Full + BotAccount txnbuild.Account + BotKeypair *keypair.Full + Horizon horizonclient.ClientInterface + Network string + StartingBalance string + BaseFee int64 + + // Mockable functions + SubmitTransaction func(minion *Minion, hclient horizonclient.ClientInterface, tx string) (*hProtocol.Transaction, error) + CheckSequenceRefresh func(minion *Minion, hclient horizonclient.ClientInterface) error + + // Uninitialized. + forceRefreshSequence bool +} + +// Run reads a payment destination address and an output channel. It attempts +// to pay that address and submits the result to the channel. +func (minion *Minion) Run(destAddress string, resultChan chan SubmitResult) { + err := minion.CheckSequenceRefresh(minion, minion.Horizon) + if err != nil { + resultChan <- SubmitResult{ + maybeTransactionSuccess: nil, + maybeErr: errors.Wrap(err, "checking minion seq"), + } + return + } + txStr, err := minion.makeTx(destAddress) + if err != nil { + resultChan <- SubmitResult{ + maybeTransactionSuccess: nil, + maybeErr: errors.Wrap(err, "making payment tx"), + } + return + } + succ, err := minion.SubmitTransaction(minion, minion.Horizon, txStr) + resultChan <- SubmitResult{ + maybeTransactionSuccess: succ, + maybeErr: errors.Wrap(err, "submitting tx to minion"), + } +} + +// SubmitTransaction should be passed to the Minion. +func SubmitTransaction(minion *Minion, hclient horizonclient.ClientInterface, tx string) (*hProtocol.Transaction, error) { + result, err := hclient.SubmitTransactionXDR(tx) + if err != nil { + errStr := "submitting tx to horizon" + switch e := err.(type) { + case *horizonclient.Error: + minion.checkHandleBadSequence(e) + resStr, resErr := e.ResultString() + if resErr != nil { + errStr += ": error getting horizon error code: " + resErr.Error() + } else if resStr == createAccountAlreadyExistXDR { + return nil, errors.Wrap(ErrAccountExists, errStr) + } else { + errStr += ": horizon error string: " + resStr + } + return nil, errors.New(errStr) + } + return nil, errors.Wrap(err, errStr) + } + return &result, nil +} + +// CheckSequenceRefresh establishes the minion's initial sequence number, if needed. +// This should also be passed to the minion. +func CheckSequenceRefresh(minion *Minion, hclient horizonclient.ClientInterface) error { + if minion.Account.Sequence != 0 && !minion.forceRefreshSequence { + return nil + } + err := minion.Account.RefreshSequenceNumber(hclient) + if err != nil { + return errors.Wrap(err, "refreshing minion seqnum") + } + minion.forceRefreshSequence = false + return nil +} + +func (minion *Minion) checkHandleBadSequence(err *horizonclient.Error) { + resCode, e := err.ResultCodes() + isTxBadSeqCode := e == nil && resCode.TransactionCode == "tx_bad_seq" + if !isTxBadSeqCode { + return + } + minion.forceRefreshSequence = true +} + +func (minion *Minion) makeTx(destAddress string) (string, error) { + createAccountOp := txnbuild.CreateAccount{ + Destination: destAddress, + SourceAccount: minion.BotAccount.GetAccountID(), + Amount: minion.StartingBalance, + } + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: minion.Account, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&createAccountOp}, + BaseFee: minion.BaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + if err != nil { + return "", errors.Wrap(err, "unable to build tx") + } + + tx, err = tx.Sign(minion.Network, minion.Keypair, minion.BotKeypair) + if err != nil { + return "", errors.Wrap(err, "unable to sign tx") + } + + txe, err := tx.Base64() + if err != nil { + return "", errors.Wrap(err, "unable to serialize") + } + + // Increment the in-memory sequence number, since the tx will be submitted. + _, err = minion.Account.IncrementSequenceNumber() + if err != nil { + return "", errors.Wrap(err, "incrementing minion seq") + } + return txe, err +} diff --git a/services/friendbot/internal/minion_test.go b/services/friendbot/internal/minion_test.go new file mode 100644 index 0000000000..6099d4b3d6 --- /dev/null +++ b/services/friendbot/internal/minion_test.go @@ -0,0 +1,137 @@ +package internal + +import ( + "sync" + "testing" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" +) + +// This test aims to reproduce the issue found on https://github.com/stellar/go/issues/2271 +// in which Minion.Run() will try to send multiple messages to a channel that gets closed +// immediately after receiving one message. +func TestMinion_NoChannelErrors(t *testing.T) { + mockSubmitTransaction := func(minion *Minion, hclient horizonclient.ClientInterface, tx string) (txn *hProtocol.Transaction, err error) { + return txn, nil + } + + mockCheckSequenceRefresh := func(minion *Minion, hclient horizonclient.ClientInterface) (err error) { + return errors.New("could not refresh sequence") + } + + // Public key: GD25B4QI6KWVDWXDW25CIM7EKR6A6PBSWE2RCNSAC4NJQDQJXZJYMMKR + botSeed := "SCWNLYELENPBXN46FHYXETT5LJCYBZD5VUQQVW4KZPHFO2YTQJUWT4D5" + botKeypair, err := keypair.Parse(botSeed) + if !assert.NoError(t, err) { + return + } + botAccount := Account{AccountID: botKeypair.Address()} + + // Public key: GD4AGPPDFFHKK3Z2X4XZDRXX6GZQKP4FMLVQ5T55NDEYGG3GIP7BQUHM + minionSeed := "SDTNSEERJPJFUE2LSDNYBFHYGVTPIWY7TU2IOJZQQGLWO2THTGB7NU5A" + minionKeypair, err := keypair.Parse(minionSeed) + if !assert.NoError(t, err) { + return + } + + minion := Minion{ + Account: Account{ + AccountID: minionKeypair.Address(), + Sequence: 1, + }, + Keypair: minionKeypair.(*keypair.Full), + BotAccount: botAccount, + BotKeypair: botKeypair.(*keypair.Full), + Network: "Test SDF Network ; September 2015", + StartingBalance: "10000.00", + SubmitTransaction: mockSubmitTransaction, + CheckSequenceRefresh: mockCheckSequenceRefresh, + BaseFee: txnbuild.MinBaseFee, + } + fb := &Bot{Minions: []Minion{minion}} + + recipientAddress := "GDJIN6W6PLTPKLLM57UW65ZH4BITUXUMYQHIMAZFYXF45PZVAWDBI77Z" + + // Prior to the bug fix, the following should consistently trigger a panic + // (send on closed channel) + numTests := 1000 + var wg sync.WaitGroup + wg.Add(numTests) + + for i := 0; i < numTests; i++ { + go func() { + fb.Pay(recipientAddress) + wg.Done() + }() + } + wg.Wait() +} + +func TestMinion_CorrectNumberOfTxSubmissions(t *testing.T) { + var ( + numTxSubmits int + mux sync.Mutex + ) + + mockSubmitTransaction := func(minion *Minion, hclient horizonclient.ClientInterface, tx string) (txn *hProtocol.Transaction, err error) { + mux.Lock() + numTxSubmits++ + mux.Unlock() + return txn, nil + } + + mockCheckSequenceRefresh := func(minion *Minion, hclient horizonclient.ClientInterface) (err error) { + return nil + } + + // Public key: GD25B4QI6KWVDWXDW25CIM7EKR6A6PBSWE2RCNSAC4NJQDQJXZJYMMKR + botSeed := "SCWNLYELENPBXN46FHYXETT5LJCYBZD5VUQQVW4KZPHFO2YTQJUWT4D5" + botKeypair, err := keypair.Parse(botSeed) + if !assert.NoError(t, err) { + return + } + botAccount := Account{AccountID: botKeypair.Address()} + + // Public key: GD4AGPPDFFHKK3Z2X4XZDRXX6GZQKP4FMLVQ5T55NDEYGG3GIP7BQUHM + minionSeed := "SDTNSEERJPJFUE2LSDNYBFHYGVTPIWY7TU2IOJZQQGLWO2THTGB7NU5A" + minionKeypair, err := keypair.Parse(minionSeed) + if !assert.NoError(t, err) { + return + } + + minion := Minion{ + Account: Account{ + AccountID: minionKeypair.Address(), + Sequence: 1, + }, + Keypair: minionKeypair.(*keypair.Full), + BotAccount: botAccount, + BotKeypair: botKeypair.(*keypair.Full), + Network: "Test SDF Network ; September 2015", + StartingBalance: "10000.00", + SubmitTransaction: mockSubmitTransaction, + CheckSequenceRefresh: mockCheckSequenceRefresh, + BaseFee: txnbuild.MinBaseFee, + } + fb := &Bot{Minions: []Minion{minion}} + + recipientAddress := "GDJIN6W6PLTPKLLM57UW65ZH4BITUXUMYQHIMAZFYXF45PZVAWDBI77Z" + + numTests := 1000 + var wg sync.WaitGroup + wg.Add(numTests) + + for i := 0; i < numTests; i++ { + go func() { + fb.Pay(recipientAddress) + wg.Done() + }() + } + wg.Wait() + assert.Equal(t, numTests, numTxSubmits) +} diff --git a/services/friendbot/loadtest/loadtest.go b/services/friendbot/loadtest/loadtest.go new file mode 100644 index 0000000000..e40409ee27 --- /dev/null +++ b/services/friendbot/loadtest/loadtest.go @@ -0,0 +1,80 @@ +package main + +import ( + "encoding/json" + "flag" + "log" + "net/http" + "net/url" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" +) + +type maybeDuration struct { + maybeDuration time.Duration + maybeError error +} + +func main() { + // Friendbot must be running as a local server. Get Friendbot URL from CL. + fbURL := flag.String("url", "http://0.0.0.0:8000/", "URL of friendbot") + numRequests := flag.Int("requests", 500, "number of requests") + flag.Parse() + durationChannel := make(chan maybeDuration, *numRequests) + for i := 0; i < *numRequests; i++ { + kp, err := keypair.Random() + if err != nil { + panic(err) + } + address := kp.Address() + go makeFriendbotRequest(address, *fbURL, durationChannel) + + time.Sleep(time.Duration(500) * time.Millisecond) + } + durations := []maybeDuration{} + for i := 0; i < *numRequests; i++ { + durations = append(durations, <-durationChannel) + } + close(durationChannel) + log.Printf("Got %d times with average %s", *numRequests, mean(durations)) +} + +func makeFriendbotRequest(address, fbURL string, durationChannel chan maybeDuration) { + start := time.Now() + formData := url.Values{ + "addr": {address}, + } + resp, err := http.PostForm(fbURL, formData) + if err != nil { + log.Printf("Got post error: %s", err) + durationChannel <- maybeDuration{maybeError: errors.Wrap(err, "posting form")} + } + var result map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + log.Printf("Got decode error: %s", err) + durationChannel <- maybeDuration{maybeError: errors.Wrap(err, "decoding json")} + } + timeTrack(start, "makeFriendbotRequest", durationChannel) +} + +func timeTrack(start time.Time, name string, durationChannel chan maybeDuration) { + elapsed := time.Since(start) + log.Printf("%s took %s", name, elapsed) + durationChannel <- maybeDuration{maybeDuration: elapsed} +} + +func mean(durations []maybeDuration) time.Duration { + var total time.Duration + count := 0 + for _, d := range durations { + if d.maybeError != nil { + continue + } + total += d.maybeDuration + count++ + } + return total / time.Duration(count) +} diff --git a/services/friendbot/main.go b/services/friendbot/main.go new file mode 100644 index 0000000000..000f98fce3 --- /dev/null +++ b/services/friendbot/main.go @@ -0,0 +1,102 @@ +package main + +import ( + "database/sql" + "fmt" + stdhttp "net/http" + "os" + + "github.com/go-chi/chi" + "github.com/spf13/cobra" + "github.com/stellar/go/services/friendbot/internal" + "github.com/stellar/go/support/app" + "github.com/stellar/go/support/config" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" +) + +// Config represents the configuration of a friendbot server +type Config struct { + Port int `toml:"port" valid:"required"` + FriendbotSecret string `toml:"friendbot_secret" valid:"required"` + NetworkPassphrase string `toml:"network_passphrase" valid:"required"` + HorizonURL string `toml:"horizon_url" valid:"required"` + StartingBalance string `toml:"starting_balance" valid:"required"` + TLS *config.TLS `valid:"optional"` + NumMinions int `toml:"num_minions" valid:"optional"` + BaseFee int64 `toml:"base_fee" valid:"optional"` + MinionBatchSize int `toml:"minion_batch_size" valid:"optional"` + SubmitTxRetriesAllowed int `toml:"submit_tx_retries_allowed" valid:"optional"` +} + +func main() { + + rootCmd := &cobra.Command{ + Use: "friendbot", + Short: "friendbot for the Stellar Test Network", + Long: "Client-facing API server for the friendbot service on the Stellar Test Network", + Run: run, + } + + rootCmd.PersistentFlags().String("conf", "./friendbot.cfg", "config file path") + rootCmd.Execute() +} + +func run(cmd *cobra.Command, args []string) { + var ( + cfg Config + cfgPath = cmd.PersistentFlags().Lookup("conf").Value.String() + ) + log.SetLevel(log.InfoLevel) + + err := config.Read(cfgPath, &cfg) + if err != nil { + switch cause := errors.Cause(err).(type) { + case *config.InvalidConfigError: + log.Error("config file: ", cause) + default: + log.Error(err) + } + os.Exit(1) + } + + fb, err := initFriendbot(cfg.FriendbotSecret, cfg.NetworkPassphrase, cfg.HorizonURL, cfg.StartingBalance, + cfg.NumMinions, cfg.BaseFee, cfg.MinionBatchSize, cfg.SubmitTxRetriesAllowed) + if err != nil { + log.Error(err) + os.Exit(1) + } + router := initRouter(fb) + registerProblems() + + addr := fmt.Sprintf("0.0.0.0:%d", cfg.Port) + + http.Run(http.Config{ + ListenAddr: addr, + Handler: router, + TLS: cfg.TLS, + OnStarting: func() { + log.Infof("starting friendbot server - %s", app.Version()) + log.Infof("listening on %s", addr) + }, + }) +} + +func initRouter(fb *internal.Bot) *chi.Mux { + mux := http.NewAPIMux(log.DefaultLogger) + + handler := &internal.FriendbotHandler{Friendbot: fb} + mux.Get("/", handler.Handle) + mux.Post("/", handler.Handle) + mux.NotFound(stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + problem.Render(r.Context(), w, problem.NotFound) + })) + + return mux +} + +func registerProblems() { + problem.RegisterError(sql.ErrNoRows, problem.NotFound) +} diff --git a/services/horizon/.gitignore b/services/horizon/.gitignore new file mode 100644 index 0000000000..49eaa485a1 --- /dev/null +++ b/services/horizon/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +internal/docs/public +/.env +/.go +/tmp/ +/pkg +/bin +/dist +/vendor/pkg +/vendor/bin +/vendor/src +/.vscode/last.sql +/.vscode/temp.sql +/.vscode/* +*.bts +*.swp +*.swo + +/test.go +/tls/*.crt +/tls/*.key +/tls/*.csr diff --git a/services/horizon/.projections.json b/services/horizon/.projections.json new file mode 100644 index 0000000000..3b69acc5b8 --- /dev/null +++ b/services/horizon/.projections.json @@ -0,0 +1,4 @@ +{ + "*.go": {"alternate": "{}_test.go"}, + "*_test.go": {"alternate": "{}.go"} +} diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md new file mode 100644 index 0000000000..d986ce323f --- /dev/null +++ b/services/horizon/CHANGELOG.md @@ -0,0 +1,1467 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + +## Unreleased + +* Generate Http Status code of 499 for Client Disconnects, should propagate into `horizon_http_requests_duration_seconds_count` + metric key with status=499 label. ([4098](horizon_http_requests_duration_seconds_count)) +* Improve performance of `/trades?trade_type=liquidity_pool` requests. ([4149](https://github.com/stellar/go/pull/4149)) +* Added `absBeforeEpoch` to ClaimableBalance API Resources. It will contain the Unix epoch representation of absolute before date. ([4148](https://github.com/stellar/go/pull/4148)) + +### DB Schema Migration + +* DB migrations add a column and index to the `history_trades` table. This is very large table so migration may take a long time (depending on your DB hardware). Please test the migrations execution time on the copy of your production DB first. + +## v2.12.1 + +### Fixes +* Fixes a critical vulnerability in HTTP server of Golang <=1.17.4. An attacker can cause unbounded memory growth in a Go server accepting HTTP/2 requests. + +## v2.12.0 + +### Features +* Result codes for fee-bump transactions will now also include the inner result codes ([4081](https://github.com/stellar/go/pull/4081)) + +### Performance improvements +* XDR encoding/decoding pipelines have been optimized ([4069](https://github.com/stellar/go/pull/4069), [4068](https://github.com/stellar/go/pull/4068), [4073](https://github.com/stellar/go/pull/4073), [4064](https://github.com/stellar/go/pull/4064), [4071](https://github.com/stellar/go/pull/4071), [4075](https://github.com/stellar/go/pull/4075), [4077](https://github.com/stellar/go/pull/4077)) + +* Path-finding on the `/paths` endpoint has been sped up significantly ([4091](https://github.com/stellar/go/pull/4091), [4096](https://github.com/stellar/go/pull/4096), [4102](https://github.com/stellar/go/pull/4102)), [4105](https://github.com/stellar/go/pull/4105), [4113](https://github.com/stellar/go/pull/4113) + +* Unused database indices have been removed ([4085](https://github.com/stellar/go/pull/4085), [4089](https://github.com/stellar/go/pull/4089)) + +### Fixes +* Improves error parsing from Captive Core ([4066](https://github.com/stellar/go/pull/4066)) + +* Prevent duplicate errors related to liquidity pool tables during repeated reingestion of same range ([4114](https://github.com/stellar/go/pull/4114)) + +* In the 2.11.0 release there was a bug introduced which made the `horizon db reingest range` command ignore optional parameters like `--parallel-workers`. This bug is now fixed so all optional command line flags are parsed correctly ([4127](https://github.com/stellar/go/pull/4127)) + +## v2.11.0 + +### Changes + +* Add a new horizon flag `--max-assets-per-path-request` (`15` by default) that sets the number of assets to consider for strict-send and strict-recieve requests ([4046](https://github.com/stellar/go/pull/4046)) +* Add an endpoint `/liquidity_pools?account={account_id}` which returns the liquidity pools an account is participating in [4043](https://github.com/stellar/go/pull/4043) +* Add a new horizon command `horizon db fill-gaps` which fills any gaps in history in the horizon db. The command takes optional start and end ledger parameters. If the start and end ledger is provided then horizon will only fill the gaps found within the given ledger range [4060](https://github.com/stellar/go/pull/4060) +* Improve performance of `/liquidity_pools/{liquidity_pool_id}/effects` endpoint by optimizing the db query to fetch effects for a liquidity pool [4065](https://github.com/stellar/go/pull/4065) +* Include the captive core binary in the `stellar/horizon` Docker image [4019](https://github.com/stellar/go/pull/4019) +* Remove `--captive-core-reuse-storage-dir` horizon flag [4048](https://github.com/stellar/go/pull/4048) +* Improve performance of XDR encoding which should also improve ingestion speeds [4063](https://github.com/stellar/go/pull/4063), [4056](https://github.com/stellar/go/pull/4056), [3957](https://github.com/stellar/go/pull/3957) +* Improve detection of when the Stellar Core binary has been modified [4050](https://github.com/stellar/go/pull/4050) +* `horizon_ingest_state_verify_ledger_entries` metric was changed to gauge [4054](https://github.com/stellar/go/pull/4054) + +## v2.10.0 + +This is a minor release with no DB Schema migrations nor explicit state rebuild. + +### Changes + +* Use the correct asset when calculating liquidity pool disbursements ([4018](https://github.com/stellar/go/pull/4018)) +* Make sure Stellar-Core is not started before previous instance termination ([4020](https://github.com/stellar/go/pull/4020)) +* Add a new feature flag `--ingest-enable-extended-log-ledger-stats` (`false` by default) that enables extra ledger stats when logging ledger processing info ([4017](https://github.com/stellar/go/pull/4017)) +* Add a new command `horizon record-metrics` that records `:[ADMIN_PORT]/metrics` into a zip file for debugging purposes ([4023](https://github.com/stellar/go/pull/4023)) +* Expose the `Latest-Ledger` header to browser web pages ([3995](https://github.com/stellar/go/pull/3995)) +* Correct `horizon db reingest range` output command name when invoking `horizon db detect-gaps` ([4007](https://github.com/stellar/go/pull/4007)) +* Add new prometheus metrics: + * `round_trip_time_seconds`: time required to run `select 1` query in the DB ([4009](https://github.com/stellar/go/pull/4009)) + * `state_verify_ledger_entries_count`: number of ledger entries downloaded from buckets in a single state verifier run ([4015](https://github.com/stellar/go/pull/4015)) + * `ledger_fetch_duration_seconds`: duration of fetching ledgers from ledger backend, sliding window = 10m ([4016](https://github.com/stellar/go/pull/4016)) + + +## v2.9.0 + +**Upgrading to this version from <= v2.8.3 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +**Protocol 18 support:** This release adds support for Protocol 18 ([CAP 38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md): Automated Market Makers). + +### DB Schema Migration + +* This release comes with a DB migration removing `offer_id` field from `history_trades` table and adding new tables related to AMM. It should not take more than 15 minutes to complete the migration. + +### Breaking changes + +* There are multiple breaking changes that will activate on Protocol 18 upgrade. Please check the [Horizon Liquidity Pool API](https://docs.google.com/document/d/1pXL8kr1a2vfYSap9T67R-g72B_WWbaE1YsLMa04OgoU/edit) doc for more information. Please upgrade to the latest SDKs that are backward compatible. +* The `--ingest` flag is set by default. If `--captive-core-config-path` is not set, the config file is generated based on network passhprase ([3783](https://github.com/stellar/go/pull/3783)). + +### Changes + +* **[CAP 38](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0038.md): Automated Market Makers) support.** All the API changes have been outlined in [Horizon Liquidity Pool API](https://docs.google.com/document/d/1pXL8kr1a2vfYSap9T67R-g72B_WWbaE1YsLMa04OgoU/edit) doc. +* Update `/paths` endpoint to take liquidity pools into account when searching for possible routes between assets ([3818](https://github.com/stellar/go/pull/3818)). +* Multiple performance improvements in `/paths`: [3816](https://github.com/stellar/go/pull/3816), [3965](https://github.com/stellar/go/pull/3965), [3933](https://github.com/stellar/go/pull/3933). +* Requests to `/paths` are now cancelled, respecting `--connection-timeout` flag value ([3081](https://github.com/stellar/go/pull/3081)). +* Multiple performance improvements to state ingestion processors: [3945](https://github.com/stellar/go/pull/3945), [3956](https://github.com/stellar/go/pull/3956), [3963](https://github.com/stellar/go/pull/3963), [3953](https://github.com/stellar/go/pull/3953), [3944](https://github.com/stellar/go/pull/3944). +* Add missing tx result codes in `txsub` ([3866](https://github.com/stellar/go/pull/3866)). +* Add new metric `ProcessorsRunDurationSummary`, old `ProcessorsRunDuration` is deprecated ([3940](https://github.com/stellar/go/pull/3940)). +* Logs during state ingesiton now display `progress` value which is percentage progress indicator ([3946](https://github.com/stellar/go/pull/3946)). + +## v2.8.3 +**Upgrading to this version from <= v2.8.0 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +### DB Schema Migration + +* This release comes with a small DB migration. It should not take more than a couple minutes. + +### Scheduled Changes + +**In the 2.9.0 Horizon release, the `--ingest` flag will be set to `true` by default.** + +### Changes + +* Fix ingestion of fee bump transactions which have muxed source accounts ([3948](https://github.com/stellar/go/pull/3948)). +* Add an index on trade aggregations, to improve ingestion performance ([3947](https://github.com/stellar/go/pull/3947)). + +## v2.8.2 +**Upgrading to this version from <= v2.8.0 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +**In the 2.9.0 Horizon release, the `--ingest` flag will be set to `true` by default.** + +* Improve performance of `OffersProcessor`. This should speed up ingestion of latest Stellar Public Network activity by up to 30%. Please note that this change does not improve reingestion speed because ledger entries are not processed during reingestion. ([3917](https://github.com/stellar/go/pull/3917)) + +## v2.8.1 +**Upgrading to this version from <= v2.8.0 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +**In the 2.9.0 Horizon release, the `--ingest` flag will be set to `true` by default.** + +* Fix bug in asset balance classification where clawback is enabled. ([3847](https://github.com/stellar/go/pull/3847)) + +## v2.8.0 +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +**In the 2.9.0 Horizon release, the `--ingest` flag will be set to `true` by default.** + +* Limit reap to 100k ledgers/second, to prevent excess CPU usage ([3823](https://github.com/stellar/go/pull/3823)). +* Improve performance of path finding endpoints ([3818](https://github.com/stellar/go/pull/3818)). + +## v2.7.0 +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +**In the 2.9.0 Horizon release, the `--ingest` flag will be set to `true` by default.** + +* If `--captive-core-config-path` is not set, the config file is generated based on network passhprase. ([3783](https://github.com/stellar/go/pull/3783)) +* Fix bug in horizon reap system (used by `horizon db reap` command and when horizon is configured with `--history-retention-count`) which could lead to partial deletions. ([3754](https://github.com/stellar/go/pull/3754)) +* Log debug messages from captive core at the appropriate log level. ([3746](https://github.com/stellar/go/pull/3746)) +* Add a feature flag `--captive-core-reuse-storage-path`/`CAPTIVE_CORE_REUSE_STORAGE_PATH` that will reuse Captive Core's storage path for bucket files when applicable for better performance. ([3750](https://github.com/stellar/go/pull/3750)) + +* Add the ability to filter accounts by their participation in a particular liquidity pool ([3873](https://github.com/stellar/go/pull/3873)). + +### Update +* Include pool shares in account balances ([3873](https://github.com/stellar/go/pull/3873)). + +## v2.6.1 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +* Fix bug introduced in v2.6.0 ([#3737](https://github.com/stellar/go/pull/3737)), preventing usage of `horizon db migrate up/down/redo` commands. ([#3762](https://github.com/stellar/go/pull/3762)) + +## v2.6.0 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which will take at least 10 minutes), Horizon will not ingest new ledgers.** + +* Precompute trade aggregations during ingestion to improve performance. Will rebuild the aggregations as part of the database migrations. ([3641](https://github.com/stellar/go/pull/3641) & [3760](https://github.com/stellar/go/pull/3760)). +* Require `COUNT` param when running `horizon db migrate down COUNT` to prevent accidentally running all downwards migrations. Add `horizon db migrate status` command. ([#3737](https://github.com/stellar/go/pull/3737)) +* Fix a bug in `fee_account_muxed` and `fee_account_muxed_id` fields (the fields were incorrectly populated with the source account details). ([3735](https://github.com/stellar/go/pull/3735)) +* Validate ledger range when calling `horizon db reingest range` so that we respond with an error when attempting to ingest ledgers which are not available in the history archives. ([3738](https://github.com/stellar/go/pull/3738)) +* Improve performance of transaction submission. ([3563](https://github.com/stellar/go/pull/3563)) + + +## v2.5.2 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +* Fix a bug in the method unmarshaling payment operation details. ([#3722](https://github.com/stellar/go/pull/3722)) + +## v2.5.1 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +* Fix for Stellar-Core 17.1.0 bug that can potentially corrupt Captive-Core storage dir. +* All muxed ID fields are now represented as strings. This is to support JS that may not handle uint64 values in JSON responses properly. + +## v2.5.0 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +* Add new command `horizon db detect-gaps`, which detects ingestion gaps in the database. The command prints out the `db reingest` commands to run in order to fill the gaps found ([3672](https://github.com/stellar/go/pull/3672)). +* Performance improvement: Captive Core now reuses bucket files whenever it finds existing ones in the corresponding `--captive-core-storage-path` (introduced in [v2.1.0](#v2.1.0) rather than generating a one-time temporary sub-directory ([3670](https://github.com/stellar/go/pull/3670)). **This feature requires Stellar-Core version 17.1 or later.** +* Horizon now monitors the Stellar Core binary on disk (pointed to by `--stellar-core-binary-path`/`STELLAR_CORE_BINARY_PATH`) and restarts its Captive Core subprocess if it detects changes (i.e a more recent file timestamp for the Stellar Core binary) ([3687](https://github.com/stellar/go/pull/3687)). +* `POST /transactions` return `503 Service Unavailable` instead of `504 Gateway Timeout` if connected Stellar-Core is out of sync ([3653](https://github.com/stellar/go/pull/3653)). +* Add protocol version metrics: `horizon_ingest_max_supported_protocol_version`, `horizon_ingest_captive_stellar_core_supported_protocol_version`, `horizon_stellar_core_supported_protocol_version` ([3634](https://github.com/stellar/go/pull/3634)). +* Fixed crash in `horizon ingest verify-range` command ([3682](https://github.com/stellar/go/pull/3682)). +* Handle replica conflict errors gracefully ([3674](https://github.com/stellar/go/pull/3674)). +* Fix data race in request parameters handling ([3690](https://github.com/stellar/go/pull/3690)). +* Fix bug where the configuration for `CAPTIVE_CORE_LOG_PATH`, `CAPTIVE_CORE_PEER_PORT`, and `CAPTIVE_CORE_HTTP_PORT` were ignored if they were configured via environment variables instead of command line arguments. ([3702](https://github.com/stellar/go/pull/3702)). +* Error when setting `BUCKET_DIR_PATH` through `--captive-core-config-path` ([3707](https://github.com/stellar/go/pull/3707)). + +## v2.4.1 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +### Bug Fixes +* Fix bug in `horizon db reingest range` command, which would throw a duplicate entry conflict error from the DB. ([3661](https://github.com/stellar/go/pull/3661)). +* Fix bug in DB metrics preventing Horizon from starting when read-only replica middleware is enabled. ([3668](https://github.com/stellar/go/pull/3668)). +* Fix bug in the value of `route` in the logs for rate-limited requests (previously it was set to `undefined`). ([3658](https://github.com/stellar/go/pull/3658)). + +## v2.4.0 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +### DB State Migration + +* This release comes with a small DB schema change (new multiplexed-account-related columns are incorporated). It should not take more than five minutes to run due to new columns being NULL-able. + +### Deprecations + +* Deprecate `--captive-core-config-append-path` in favor of `--captive-core-config-path`. The difference between the two flags is that `--captive-core-config-path` will validate the configuration file to reject any fields which are not supported by captive core ([3629](https://github.com/stellar/go/pull/3629)). + +### New features + +* Add more in-depth Prometheus metrics (count & duration) for db queries. ([3597](https://github.com/stellar/go/pull/3597), [3605](https://github.com/stellar/go/pull/3605)) + +* HTTP request logs will now print the Origin header if Referer is not set. ([3599](https://github.com/stellar/go/pull/3599)) + +* Add Multiplexed Account details to API responses (additional `_muxed` and `_muxed_id` optional fields following what's described in [SEP 23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md#horizon-api-changes)): + * Transactions: `account_muxed`, `account_muxed_id`, `fee_account` and `fee_account_muxed`. + * Operations: `source_account_muxed`, `source_account_muxed_id` and additional fields depending on the operation (e.g. `from_muxed`, `from_muxed_id`, `to_muxed` and `to_muxed_id` for Payment operations) + * Effects: `account_muxed`, `account_muxed_id` and additional fields depending on the effect (e.g. `seller_muxed` and `seller_muxed_id` for the Trade effect). + +### Code Changes + +* Fix bug in `horizon db reingest range` command which required the `--ingest` flag to be set ([3625](https://github.com/stellar/go/pull/3625)). + +* Fix bug in causing database connections to be closed when the HTTP request was cancelled. ([3630](https://github.com/stellar/go/pull/3630)) + +## v2.3.0 + +**Upgrading to this version from <= v2.1.1 will trigger a state rebuild. During this process (which can take up to 20 minutes), Horizon will not ingest new ledgers.** + +### New features +* Introduces a flag (`--ro-database-url` / `RO_DATABASE_URL`) which allows setting a connection to a read-replica database. This flag makes Horizon take into account data propagation lag to the replica instance, adding retries if the data is out of sync ([3574](https://github.com/stellar/go/pull/3574)). + +### Code changes +* Improved test suite coverage and stability ([3560](https://github.com/stellar/go/pull/3560), [3562](https://github.com/stellar/go/pull/3562), [3551](https://github.com/stellar/go/pull/3551), and [3547](https://github.com/stellar/go/pull/3547)). + +* Improved session handling and timeouts ([3576](https://github.com/stellar/go/pull/3576), [3545](https://github.com/stellar/go/pull/3545), and [3567](https://github.com/stellar/go/pull/3567)). + +* Improved stability of Captive Core's configuration options. Specifically, it will now prefer either the command-line parameter (e.g. `--captive-core-peer-port` or its env-var equivalent) or the user-supplied append file (`--captive-core-append-path`) over Horizon's internal defaults. However, if a value is set in *both* the append file and at the command-line, an error will be thrown unless both values are equal ([3558](https://github.com/stellar/go/pull/3558)). + + +## v2.2.0 + +**Upgrading to this version will trigger state rebuild. During this process (which can take up to 20 minutes) it will not ingest new ledgers.** + +* Add `num_claimable_balances` and `claimable_balances_amount` fields to asset stat summaries at `/assets` ([3502](https://github.com/stellar/go/pull/3502)). +* Improve ingestion reliability when running multiple Horizon ingestion instances ([3518](https://github.com/stellar/go/pull/3518)). + +## v2.1.1 + +* When ingesting a backlog of ledgers, Horizon sometimes consumes ledgers faster than the rate at which Captive Core emits them. Previously this scenario caused failures in the ingestion system. That is now fixed in ([3531](https://github.com/stellar/go/pull/3531)). + +## v2.1.0 + +### DB State Migration + +* This release comes with an internal DB represenatation change: the `claimable_balances` table now represents the claimable balance identifiers as an hexadecimal string (as opposed to base64). + +**The migration will be performed by the ingestion system (through a state rebuild) and, thus, if some of your Horizon nodes are not ingestors (i.e. no `--ingestion` flag enabled) you may experience 500s in the `GET /claimable_balances/` requests until an ingestion node is upgraded. Also, it's worth noting that the rebuild process will take several minutes and no new ledgers will be ingested until the rebuild is finished.** + +### Breaking changes + +* Add a flag `--captive-core-storage-path`/`CAPTIVE_CORE_STORAGE_PATH` that allows users to control the storage location for Captive Core bucket data ([3479](https://github.com/stellar/go/pull/3479)). + - Previously, Horizon created a directory in `/tmp` to store Captive Core bucket data. Now, if the captive core storage path flag is not set, Horizon will default to using the current working directory. +* Add a flag `--captive-core-log-path`/`CAPTIVE_CORE_LOG_PATH` that allows users to control the location of the logs emitted by Captive Core ([3472](https://github.com/stellar/go/pull/3472)). If you have a `LOG_FILE_PATH` entry in your Captive Core toml file remove that entry and use the horizon flag instead. +* `--stellar-core-db-url` / `STELLAR_CORE_DATABASE_URL` should only be configured if Horizon ingestion is enabled otherwise Horizon will not start ([3477](https://github.com/stellar/go/pull/3477)). + +### New features + +* Add an endpoint which determines if Horizon is healthy enough to receive traffic ([3435](https://github.com/stellar/go/pull/3435)). +* Sanitize route regular expressions for Prometheus metrics ([3459](https://github.com/stellar/go/pull/3459)). +* Add asset stat summaries per trust-line flag category ([3454](https://github.com/stellar/go/pull/3454)). + - The `amount`, and `num_accounts` fields in `/assets` endpoint are deprecated. Fields will be removed in Horizon 3.0. You can find the same data under `balances.authorized`, and `accounts.authorized`, respectively. +* Add a flag `--captive-core-peer-port`/`CAPTIVE_CORE_PEER_PORT` that allows users to control which port the Captive Core subprocess will bind to for connecting to the Stellar swarm. ([3483](https://github.com/stellar/go/pull/3484)). +* Add 2 new HTTP endpoints `GET claimable_balances/{id}/transactions` and `GET claimable_balances/{id}/operations`, which respectively return the transactions and operations related to a provided Claimable Balance Identifier `{id}`. +* Add Stellar Protocol 16 support. This release comes with support for Protocol 16 ([CAP 35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md): asset clawback). See [the downstream SDK issue template](https://gist.github.com/2opremio/89c4775104635382d51b6f5e41cbf6d5) for details on what changed on Horizon's side. For full details, please read [CAP 35](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0035.md). + + +## v2.0.0 + +### Before you upgrade + +Please read the [Captive Core](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/captive_core.md) doc which contains new requirements and migration guide. + +### Captive Stellar-Core + +Introducing the stable release with Captive Stellar-Core mode enabled by default. Captive mode relaxes Horizon's operational requirements. It allows running Horizon without a fully fledged Core instance and, most importantly, without a Core database. More information about this new mode can be found in [Captive Core](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/captive_core.md) doc. + +If you run into issues please check [Known Issues](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/captive_core.md#known-issues) or [report an issue](https://github.com/stellar/go/issues/new/choose). Please ask questions in [Keybase](https://keybase.io/team/stellar.public) or [Stack Exchange](https://stellar.stackexchange.com/). + +### Breaking changes + +* There are new config params (below) required by Captive Stellar-Core. Please check the [Captive Core](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/captive_core.md) guide for migration tips. + * `STELLAR_CORE_BINARY_PATH` - a path to Stellar-Core binary, + * `CAPTIVE_CORE_CONFIG_APPEND_PATH` - defines a path to a file to append to the Stellar Core configuration file used by captive core. +* The `expingest` command has been renamed to `ingest` since the ingestion system is not experimental anymore. +* Removed `--rate-limit-redis-key` and `--redis-url` configuration flags. + +## v2.0.0 Release Candidate + +**This is a release candidate: while SDF is confident that there are no critical bugs and release candidate is safe to use in production we encourage organizations to deploy it to production only after org-specific testing.** + +### Before you upgrade + +Please read the [Captive Core](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md) doc which contains new requirements and migration guide. + +### Captive Stellar-Core + +Introducing the release candidate with Captive Stellar-Core mode enabled by default. Captive mode relaxes Horizon's operational requirements. It allows running Horizon without a fully fledged Core instance and, most importantly, without a Core database. More information about this new mode can be found in [Captive Core](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md) doc. + +If you run into issues please check [Known Issues](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md#known-issues) or [report an issue](https://github.com/stellar/go/issues/new/choose). Please ask questions in [Keybase](https://keybase.io/team/stellar.public) or [Stack Exchange](https://stellar.stackexchange.com/). + +### Breaking changes + +* The `expingest` command has been renamed to `ingest` since the ingestion system is not experimental anymore. +* Removed `--rate-limit-redis-key` and `--redis-url` configuration flags. + +## v1.14.0 + +* Fix bug `/fee_stats` endpoint. The endpoint was not including the additional base fee charge for fee bump transactions ([#3354](https://github.com/stellar/go/pull/3354)) +* Expose the timestamp of the most recently ingested ledger in the root resource response and in the `/metrics` response ([#3281](https://github.com/stellar/go/pull/3281)) +* Add `--checkpoint-frequency` flag to configure how many ledgers span a history archive checkpoint ([#3273](https://github.com/stellar/go/pull/3273)). This is useful in the context of creating standalone Stellar networks in [integration tests](internal/docs/captive_core.md#private-networks). + +## v1.13.1 + +**Upgrading to this version from version before v1.10.0 will trigger state rebuild. During this process (which can take several minutes) it will not ingest new ledgers.** + +* Fixed a bug in `/fee_stats` endpoint that could calculate invalid stats if fee bump transactions were included in the ledger ([#3326](https://github.com/stellar/go/pull/3326)) + +## v2.0.0 Beta + +**THIS IS A BETA RELEASE! DO NOT USE IN PRODUCTION. The release may contain critical bugs. It's not suitable for production use.** + +### Before you upgrade + +Please read the [Captive Core](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md) doc which contains new requirements and migration guide. + +### Captive Stellar-Core + +Introducing the beta release with Captive Stellar-Core mode enabled by default. Captive mode relaxes Horizon's operational requirements. It allows running Horizon without a fully fledged Core instance and, most importantly, without a Core database. More information about this new mode can be found in [Captive Core](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md) doc. + +This version may contain bugs. If you run into issues please check [Known Issues](https://github.com/stellar/go/blob/release-horizon-v2.0.0-beta/services/horizon/internal/docs/captive_core.md#known-issues) or [report an issue](https://github.com/stellar/go/issues/new/choose). Please ask questions in [Keybase](https://keybase.io/team/stellar.public) or [Stack Exchange](https://stellar.stackexchange.com/). + +## v1.13.0 + +**Upgrading to this version from version before v1.10.0 will trigger state rebuild. During this process (which can take several minutes) it will not ingest new ledgers.** + +* Improved performance of `OfferProcessor` ([#3249](https://github.com/stellar/go/pull/3249)). +* Improved speed of state verification startup time ([#3251](https://github.com/stellar/go/pull/3251)). +* Multiple Captive Core improvements and fixes ([#3237](https://github.com/stellar/go/pull/3237), [#3257](https://github.com/stellar/go/pull/3257), [#3260](https://github.com/stellar/go/pull/3260), [#3264](https://github.com/stellar/go/pull/3264), [#3262](https://github.com/stellar/go/pull/3262), [#3265](https://github.com/stellar/go/pull/3265), [#3269](https://github.com/stellar/go/pull/3269), [#3271](https://github.com/stellar/go/pull/3271), [#3270](https://github.com/stellar/go/pull/3270), [#3272](https://github.com/stellar/go/pull/3272)). + +## v1.12.0 + +* Add Prometheus metrics for the duration of ingestion processors ([#3224](https://github.com/stellar/go/pull/3224)) +* Many Captive Core improvements and fixes ([#3232](https://github.com/stellar/go/pull/3232), [#3223](https://github.com/stellar/go/pull/3223), [#3226](https://github.com/stellar/go/pull/3226), [#3203](https://github.com/stellar/go/pull/3203), [#3189](https://github.com/stellar/go/pull/3189), [#3187](https://github.com/stellar/go/pull/3187)) + +## v1.11.1 + +* Fix bug in parsing `db-url` parameter in `horizon db migrate` and `horizon db init` commands ([#3192](https://github.com/stellar/go/pull/3192)). + +## v1.11.0 + +* The `service` field emitted in ingestion logs has been changed from `expingest` to `ingest` ([#3118](https://github.com/stellar/go/pull/3118)). +* Ledger stats are now exported in `/metrics` in `horizon_ingest_ledger_stats_total` metric ([#3148](https://github.com/stellar/go/pull/3148)). +* Stellar Core database URL is no longer required when running in captive mode ([#3150](https://github.com/stellar/go/pull/3150)). +* xdr: Add a custom marshaller for claim predicate timestamp ([#3183](https://github.com/stellar/go/pull/3183)). + +## v1.10.1 + +* Bump max supported protocol version to 15. + +## v1.10.0 + +**After upgrading Horizon will rebuild its state. During this process (which can take several minutes) it will not ingest new ledgers.** + +* Fixed a bug that caused a fresh instance of Horizon to be unable to sync with testnet (Protocol 14) correctly. ([#3100](https://github.com/stellar/go/pull/3100)) +* Add Golang- and process-related metrics. ([#3103](https://github.com/stellar/go/pull/3103)) +* New `network_passphrase` field in History Archives (added in Stellar-Core 14.1.0) is now checked. Horizon will return error if incorrect archive is used. ([#3082](https://github.com/stellar/go/pull/3082)) +* Fixed a bug that caused some errors to be logged with `info` level instead of `error` level. ([#3094](https://github.com/stellar/go/pull/3094)) +* Fixed a bug in `/claimable_balances` that returned 500 error instead of 400 for some requests. ([#3088](https://github.com/stellar/go/pull/3088)) +* Print a friendly message when Horizon does not support the current Stellar protocol version. ([#3093](https://github.com/stellar/go/pull/3093)) + +## v1.9.1 + +* Fixed a bug that caused a fresh instance of Horizon to be unable to sync with testnet (Protocol 14) correctly. ([#3096](https://github.com/stellar/go/pull/3096)) +* Use underscore in JSON fields for claim predicate to make the API consistent. ([#3086](https://github.com/stellar/go/pull/3086)) + +## v1.9.0 + +This is release adds support for the upcoming Protocol 14 upgrade. However, Horizon still maintains backwards compatibility with Protocol 13, which means it is still safe to run this release before Protocol 14 is deployed. + +**After upgrading Horizon will rebuild it's state. During this process (which can take several minutes) it will not ingest new ledgers.** + +The two main features of Protocol 14 are [CAP 23 Claimable Balances](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md) and [CAP 33 Sponsored Reserves](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md). +Claimable balances provide a mechanism for setting up a payment which can be claimed in the future. This allows you to make payments to accounts which are currently not able to accept them. +Sponsored Reserves allows an account to pay the reserves on behalf of another account. + +In this release there is a new claimable balance resource which has a unique id, an asset (describing which asset can be claimed), an amount (the amount of the asset that can be claimed), and a list of claimants (an immutable list of accounts that could potentially claim the balance). +The `GET /claimable_balances/{id}` endpoint was added to Horizon's API to allow looking up a claimable balance by its id. See the sample response below: + +```json +{ + "_links": { + "self": { + "href": "/claimable_balances/000000000102030000000000000000000000000000000000000000000000000000000000" + } + }, + "id": "000000000102030000000000000000000000000000000000000000000000000000000000", + "asset": "native", + "amount": "10.0000000", + "sponsor": "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "last_modified_ledger": 123, + "claimants": [ + { + "destination": "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "predicate": { + "unconditional": true + } + } + ], + "paging_token": "123-000000000102030000000000000000000000000000000000000000000000000000000000" +} +``` + +There is also a `GET /claimable_balances` endpoint which searches for claimable balances by asset, sponsor, or claimant destination. + +To support [CAP 33 Sponsored Reserves](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md) we have added an optional `sponsor` attribute in the following Horizon resources: accounts, account signers, offers, trustlines, and claimable balances. +If the `sponsor` field is present it means that the account with id `sponsor` is paying for the reserves for the sponsored account / account signer / offer / trustline / claimable balance. We have also added an optional `sponsor` query parameter to the following endpoints: +* `GET /accounts` +* `GET /offers` +* `GET /claimable_balances` + +If the `sponsor` query param is provided, Horizon will search for objects sponsored by the given account id. + +## v1.8.2 + +* Fixed a bug which prevented Horizon from accepting TLS connections. + +## v1.8.1 + +* Fixed a bug in a code ingesting fee bump transactions. + +## v1.8.0 + +### Changes + +* Added new and changed existing metrics: + * `horizon_build_info` - contains build information in labels (`version` - Horizon version, `goversion` - Go runtime version), + * `horizon_ingest_enable` - equals `1` if ingestion system is running, `0` otherwise, + * `horizon_ingest_state_invalid` - equals `1` if state is invalid, `0` otherwise, + * `horizon_db_max_open_connections` - determines the maximum possible opened DB connections, + * `horizon_db_wait_duration_seconds_total` - changed the values to be in seconds instead of nanoseconds. +* Fixed a data race when shutting down the HTTP server. ([#2958](https://github.com/stellar/go/pull/2958)). +* Fixed emitting incorrect errors related to OrderBook Stream when shutting down the app. ([#2964](https://github.com/stellar/go/pull/2964)) + +### Experimental + +The previous implementation of Captive Stellar-Core streams meta stream using a filesystem pipe. This implies that both Horizon and Stellar-Core had to be deployed to the same server. One of the disadvantages of such requirement is a need for detailed per-process monitoring to be able to connect potential issues (like memory leaks) to the specific service. + +To solve this it's now possible to start a [`captivecore`](https://github.com/stellar/go/tree/master/exp/services/captivecore) on another machine and configure Horizon to use it in ingestion. This requires two config options set: +* `ENABLE_CAPTIVE_CORE_INGESTION=true`, +* `REMOTE_CAPTIVE_CORE_URL` - pointing to `captivecore` server. + +## v1.7.1 + +This patch release fixes a regression introduced in 1.7.0, breaking the + `/offers` endpoint. Thus, we recommend upgrading as soon as possible. + +### Changes +* Fix path parameter mismatch in `/offers` endpoint + [#2927](https://github.com/stellar/go/pull/2927). + +## v1.7.0 + +### DB schema migration (expected migration time: < 10 mins) + * Add new multicolumn index to improve the `/trades`'s + endpoint performance [#2869](https://github.com/stellar/go/pull/2869). + * Add constraints on database columns which cannot hold + negative values [#2827](https://github.com/stellar/go/pull/2827). + +### Changes +* Update Go toolchain to 1.14.6 in order to fix [golang/go#34775](https://github.com/golang/go/issues/34775), + which caused some database queries to be executed instead of rolled back. +* Fix panic on missing command line arguments [#2872](https://github.com/stellar/go/pull/2872) +* Fix race condition where submitting a transaction to Horizon can result in a bad sequence error even though Stellar Core accepted the transaction. [#2877](https://github.com/stellar/go/pull/2877) +* Add new DB metrics ([#2844](https://github.com/stellar/go/pull/2844)): + * `db_in_use_connections` - number of opened DB connections in use (not idle), + * `db_wait_count` - number of connections waited for, + * `db_wait_duration` - total time blocked waiting for a new connection. + +## v1.6.0 + +* Add `--parallel-workers` and `--parallel-job-size` to `horizon db reingest range`. `--parallel-workers` will parallelize reingestion using the supplied number of workers. ([#2724](https://github.com/stellar/go/pull/2724)) +* Remove Stellar Core's database dependency for non-ingesting instances of Horizon. ([#2759](https://github.com/stellar/go/pull/2759)) + Horizon doesn't require access to a Stellar Core database if it is only serving HTTP request, this allows the separation of front-end and ingesting instances. + The following config parameters were removed: + - `core-db-max-open-connections` + - `core-db-max-idle-connections` +* HAL response population is implemented using Go `strings` package instead of `regexp`, improving its performance. ([#2806](https://github.com/stellar/go/pull/2806)) +* Fix a bug in `POST /transactions` that could cause `tx_bad_seq` errors instead of processing a valid transaction. ([#2805](https://github.com/stellar/go/pull/2805)) +* The `--connection-timeout` param is ignored in `POST /transactions`. The requests sent to that endpoint will always timeout after 30 seconds. ([#2818](https://github.com/stellar/go/pull/2818)) + +### Experimental + +* Add experimental support for live ingestion using a Stellar Core subprocess instead of a persistent Stellar Core database. + + Stellar-core now contains an experimental feature which allows replaying ledger's metadata in-memory. This feature starts paving the way to remove the dependency between Stellar Core's database and Horizon. Requires [Stellar Core v13.2.0](https://github.com/stellar/stellar-core/releases/tag/v13.2.0). + + To try out this new experimental feature, you need to specify the following parameters when starting ingesting Horizon instance: + + - `--enable-captive-core-ingestion` or `ENABLE_CAPTIVE_CORE_INGESTION=true`. + - `--stellar-core-binary-path` or `STELLAR_CORE_BINARY_PATH`. + +## v1.5.0 + +### Changes + +* Remove `--ingest-failed-transactions` flag. From now on Horizon will always ingest failed transactions. WARNING: If your application is using Horizon DB directly (not recommended!) remember that now it will also contain failed txs. ([#2702](https://github.com/stellar/go/pull/2702)). +* Add transaction set operation count to `history_ledger`([#2690](https://github.com/stellar/go/pull/2690)). +Extend ingestion to store the total number of operations in the transaction set and expose it in the ledger resource via `tx_set_operation_count`. This feature allows you to assess the used capacity of a transaction set. +* Fix `/metrics` end-point ([#2717](https://github.com/stellar/go/pull/2717)). +* Gracefully handle incorrect assets in the query parameters of GET `/offers` ([#2634](https://github.com/stellar/go/pull/2634)). +* Fix logging message in OrderBookStream ([#2699](https://github.com/stellar/go/pull/2699)). +* Fix data race in root endpoint ([#2745](https://github.com/stellar/go/pull/2745)). + +### Experimental + +* Add experimental support for database reingestion using a Stellar Core subprocess instead of a persistent Stellar Core database ([#2695](https://github.com/stellar/go/pull/2695)). + + [Stellar Core v12.3.0](https://github.com/stellar/stellar-core/releases/tag/v12.3.0) added an experimental feature which allows replaying ledger's metadata in-memory. This feature speeds up reingestion and starts paving the way to remove the dependency between Stellar Core's database and Horizon. + + For now, this is only supported while running `horizon db reingest`. To try out this new experimental feature, you need to specify the following parameters: + + - `--enable-captive-core-ingestion` or `ENABLE_CAPTIVE_CORE_INGESTION=true`. + - `--stellar-core-binary-path` or `STELLAR_CORE_BINARY_PATH`. + +### SDK Maintainers: action needed + +- Add the new field `tx_set_operation_count` to the `ledger` resource ([#2690](https://github.com/stellar/go/pull/2690)). This field can be a `number` or `null`. + +## v1.4.0 + +* Drop support for MuxedAccounts strkeys (spec'ed in [SEP23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md)). + SEP23 is still a draft and we don't want to encourage storing strkeys which may not be definite. +* Replace `SequenceProvider` implementation with one which queries the Horizon DB for sequence numbers instead of the Stellar Core DB. +* Use the Horizon DB instead of Horizon's in memory order book graph to query orderbook details for the /order_book endpoint. +* Remove JSON variant of `GET /metrics`, both in the server and client code. It's using Prometheus format by default now. +* Decreased a memory usage of initial state ingestion stage and state verifier ([#2618](https://github.com/stellar/go/pull/2618)). +* Remove `--exp-ingest-in-memory-only` Horizon flag. The in memory order book graph which powers the path finding endpoints is now updated using the Horizon DB instead of directly via ingestion ([#2630](https://github.com/stellar/go/pull/2630)). + +## v1.3.0 + +### Breaking changes + +* The type for the following attributes has been changed from `int64` to `string` ([#2555](https://github.com/stellar/go/pull/2555)): + - Attribute `fee_charged` in [Transaction](https://www.stellar.org/developers/horizon/reference/resources/transaction.html) resource. + - Attribute `max_fee` in [Transaction](https://www.stellar.org/developers/horizon/reference/resources/transaction.html) resource. + +### Changes + +* Add `last_modified_time` to account responses. `last_modified_time` is the closing time of the most recent ledger in which the account was modified ([#2528](https://github.com/stellar/go/pull/2528)). +* Balances in the Account resource are now sorted by asset code and asset issuer ([#2516](https://github.com/stellar/go/pull/2516)). +* Ingestion system has its dedicated DB connection pool ([#2560](https://github.com/stellar/go/pull/2560)). +* A new metric has been added to `/metrics` ([#2537](https://github.com/stellar/go/pull/2537) and [#2553](https://github.com/stellar/go/pull/2553)): + - `ingest.local_latest_ledger`: a gauge with the local latest ledger, + - `txsub.v0`: a meter counting `v0` transactions in `POST /transaction`, + - `txsub.v1`: a meter counting `v1` transactions in `POST /transaction`, + - `txsub.feebump`: a meter counting `feebump` transactions in `POST /transaction`. +* Fix a memory leak in the code responsible for streaming ([#2548](https://github.com/stellar/go/pull/2548), [#2575](https://github.com/stellar/go/pull/2575) and [#2576](https://github.com/stellar/go/pull/2576)). + +## v1.2.2 + +* Fix bug which occurs when ingesting ledgers containing both fee bump and normal transactions. + +## v1.2.1 + +### Database migration notes + +This version removes two unused columns that could overflow in catchup complete deployments. If your Horizon database contains entire public network history, you should upgrade to this version as soon as possible and run `horizon db migrate up`. + +### Changes + +* Remove `id` columns from `history_operation_participants` and `history_transaction_participants` to prevent possible integer overflow [#2532](https://github.com/stellar/go/pull/2532). +## v1.2.0 + +### Scheduled Breaking Changes + +* The type for the following attributes will be changed from `int64` to `string` in 1.3.0: + - Attribute `fee_charged` in [Transaction](https://www.stellar.org/developers/horizon/reference/resources/transaction.html) resource. + - Attribute `max_fee` in [Transaction](https://www.stellar.org/developers/horizon/reference/resources/transaction.html) resource. + +The changes are required by [CAP-15](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md). + +### Changes + +* Added support for [CAP-27](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) and [SEP-23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md) [#2491](https://github.com/stellar/go/pull/2491). +* The XDR definition of a transaction memo is a string. +However, XDR strings are actually binary blobs with no enforced encoding. +It is possible to set the memo in a transaction envelope to a binary sequence which is not valid ASCII or unicode. +Previously, if you wanted to recover the original binary sequence for a transaction memo, you would have to decode the transaction's envelope. +In this release, we have added a `memo_bytes` field to the Horizon transaction response for transactions with `memo_type` equal `text`. +`memo_bytes` stores the base 64 encoding of the memo bytes set in the transaction envelope [#2485](https://github.com/stellar/go/pull/2485). + +## v1.1.0 + +### **IMPORTANT**: Database migration + +This version includes a significant database migration which changes the column types of `fee_charged` and `max_fee` in the `history_transactions` table from `integer` to `bigint`. This essential change paves the way for fee bump transactions ([CAP 15](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md)), a major improvement that will be released soon in Stellar Protocol 13. + +This migration will run for a long time, especially if you have a Horizon database with full history. For reference, it took around 8 hours and 42 minutes to complete this migration on a AWS db.r4.8xlarge instance with full transaction history. + +To execute the migration run `horizon db migrate up` using the Horizon v1.1.0 binary. + +**Important Note**: Horizon should not be serving requests or ingesting while the migration is running. For service continuity, if you run a production Horizon deployment it is recommended that you perform the migration on a second instance and then switch over. + +### Changes +* DB: Remove unnecessary duplicate indexes: `index_history_transactions_on_id`, `index_history_ledgers_on_id`, `exp_asset_stats_by_code`, and `asset_by_code` ([#2419](https://github.com/stellar/go/pull/2419)). +* DB: Remove asset_stats table which is no longer necessary ([#2419](https://github.com/stellar/go/pull/2419)). +* Validate transaction hash IDs as 64 lowercase hex chars. As such, wrongly-formatted parameters which used to cause 404 (`Not found`) errors will now cause 400 (`Bad request`) HTTP errors ([#2394](https://github.com/stellar/go/pull/2394)). +* Fix ask and bid price levels of `GET /order_book` when encountering non-canonical price values. The `limit` parameter is now respected and levels are coallesced properly. Also, `price_r` is now in canonical form ([#2400](https://github.com/stellar/go/pull/2400)). +* Added missing top-level HAL links to the `GET /` response ([#2407](https://github.com/stellar/go/pull/2407)). +* Full transaction details are now included in the `POST /transactions` response. If you submit a transaction and it succeeds, the response will match the `GET /transactions/{hash}` response ([#2406](https://github.com/stellar/go/pull/2406)). +* The following attributes are now included in the transaction resource: + * `fee_account` (the account which paid the transaction fee) + * `fee_bump_transaction` (only present in Protocol 13 fee bump transactions) + * `inner_transaction` (only present in Protocol 13 fee bump transactions) ([#2406](https://github.com/stellar/go/pull/2406)). +* Add support for [CAP0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md): Fine-Grained Control of Authorization (Protocol 13) ([#2423](https://github.com/stellar/go/pull/2423)). + - Add `is_authorized_to_maintain_liabilities` to `Balance`. +
+    "balances": [
+      {
+        "is_authorized": true,
+        "is_authorized_to_maintain_liabilities": true,
+        "balance": "27.1374422",
+        "limit": "922337203685.4775807",
+        "buying_liabilities": "0.0000000",
+        "selling_liabilities": "0.0000000",
+        "last_modified_ledger": 28893780,
+        "asset_type": "credit_alphanum4",
+        "asset_code": "USD",
+        "asset_issuer": "GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK"
+      },
+      {
+        "balance": "1.5000000",
+        "buying_liabilities": "0.0000000",
+        "selling_liabilities": "0.0000000",
+        "asset_type": "native"
+      }
+    ]
+    
+ - Add `authorize_to_maintain_liabilities` to `AllowTrust` operation. +
+    {
+      "id": "124042211741474817",
+      "paging_token": "124042211741474817",
+      "transaction_successful": true,
+      "source_account": "GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK",
+      "type": "allow_trust",
+      "type_i": 7,
+      "created_at": "2020-03-27T03:40:10Z",
+      "transaction_hash": "a77d4ee5346d55fb8026cdcdad6e4b5e0c440c96b4627e3727f4ccfa6d199e94",
+      "asset_type": "credit_alphanum4",
+      "asset_code": "USD",
+      "asset_issuer": "GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK",
+      "trustee": "GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK",
+      "trustor": "GA332TXN6BX2DYKGYB7FW5BWV2JLQKERNX4T7EUJT4MHWOW2TSGC2SPM",
+      "authorize": true,
+      "authorize_to_maintain_liabilities": true,
+    }
+    
+ - Add effect `trustline_authorized_to_maintain_liabilities`. +
+    {
+      "id": "0124042211741474817-0000000001",
+      "paging_token": "124042211741474817-1",
+      "account": "GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK",
+      "type": "trustline_authorized_to_maintain_liabilities",
+      "type_i": 25,
+      "created_at": "2020-03-27T03:40:10Z",
+      "trustor": "GA332TXN6BX2DYKGYB7FW5BWV2JLQKERNX4T7EUJT4MHWOW2TSGC2SPM",
+      "asset_type": "credit_alphanum4",
+      "asset_code": "USD"
+    }
+    
+* It is no longer possible to use Redis as a mechanism for rate-limiting requests ([#2409](https://github.com/stellar/go/pull/2409)). + +* Make `GET /trades` generate an empty response instead of a 404 when no + trades are found. + +## v1.0.1 + +### Fixed +* Fix `horizon db reap` bug which caused the command to exit without deleting any history table rows ([#2336](https://github.com/stellar/go/pull/2336)). +* The horizon reap system now also deletes rows from `history_trades`. Previously, the reap system only deleted rows from `history_operation_participants`, `history_operations`, `history_transaction_participants`, `history_transactions`, `history_ledgers`, and `history_effects` ([#2336](https://github.com/stellar/go/pull/2336)). +* Fix deadlock when running `horizon db reingest range` ([#2373](https://github.com/stellar/go/pull/2373)). +* Fix signer update effects ([#2375](https://github.com/stellar/go/pull/2375)). +* Fix incorrect error in log when shutting down the system while `verifyState` is running ([#2366](https://github.com/stellar/go/pull/2366)). +* Expose date header to CORS clients ([#2316](https://github.com/stellar/go/pull/2316)). +* Fix inconsistent ledger view in `/accounts/{id}` when streaming ([#2344](https://github.com/stellar/go/pull/2344)). + +### Removed +* Dropped support for Go 1.12. ([#2346](https://github.com/stellar/go/pull/2346)). + +## v1.0.0 + +### Before you upgrade + +* If you were using the new ingestion in one of the previous versions of Horizon, you must first remove `ENABLE_EXPERIMENTAL_INGESTION` feature flag and restart all Horizon instances before deploying a new version. +* The init stage (state ingestion) for the public Stellar network requires around 1.5GB of RAM. This memory is released after the state ingestion. State ingestion is performed only once. Restarting the server will not trigger it unless Horizon has been upgraded to a newer version (with an updated ingestion pipeline). It's worth noting that the required memory will become smaller and smaller as more of the buckets in the history archive become [CAP-20](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0020.md) compatible. Some endpoints are **not available** during state ingestion. +* The CPU footprint of the new ingestion is modest. We were able to successfully run ingestion on an [AWS `c5.xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) instance. The init stage takes a few minutes on `c5.xlarge`. `c5.xlarge` is the equivalent of 4 vCPUs and 8GB of RAM. The definition of vCPU for the c5 large family in AWS is the following: +> The 2nd generation Intel Xeon Scalable Processors (Cascade Lake) or 1st generation Intel Xeon Platinum 8000 series (Skylake-SP) processor with a sustained all core Turbo frequency of up to 3.4GHz, and single core turbo frequency of up to 3.5 GHz. + +* The state data requires an additional 6GB DB disk space for the public Stellar network (as of February 2020). The disk usage will increase when the number of Stellar ledger entries increases. + * `accounts_signers` table: 2340 MB + * `trust_lines` table: 2052 MB + * `accounts` table: 1545 MB + * `offers` table: 61 MB + * `accounts_data` table: 15 MB + * `exp_asset_stats` table: less than 1 MB +* A new environment variable (or command line flag) needs to be set so that Horizon can ingest state from the history archives: + * `HISTORY_ARCHIVE_URLS="archive1,archive2,archive3"` (if you don't have your own pubnet history archive, you can use one of SDF's archives, for example `https://history.stellar.org/prd/core-live/core_live_001`) +* Horizon serves the endpoints `/paths` and `/order_book` from an in-memory graph, which is only available on ingesting instances. If some of the instances in your cluster are not configured to ingest, you can configure your proxy server to route those endpoints to the ingesting instances. This is beyond the scope of this document - consult the relevant documentation for your proxy server. A better solution for this will be released in the next Horizon version. + +### New Ingestion System + +The most substantial element of this release is a full rewrite of Horizon's ledger ingestion engine, which enables some key features: + +* A set of important new endpoints (see below). Some of these were impossible under the previous ingestion architecture. +* An in-memory order book graph for rapid querying. +* The ability to run parallel ingestion over multiple Horizon hosts, improving service availability for production deployments. + +The new engine resolves multiple issues that were present in the old system. For example: + +* Horizon's coupling to Stellar-Core's database is dramatically reduced. +* Data inconsistency due to lag between endpoints is eliminated. +* Slow endpoints (path-finding for example) are now speedy. + +Finally, the rearchitecting makes new reliability features possible. An example is the new internal state verifier, which guarantees consistency between the local Horizon state and the public history archives. + +The [admin guide](https://github.com/stellar/go/blob/release-horizon-v0.25.0/services/horizon/internal/docs/admin.md) contains all the information needed to operate the new ingestion system. + +### Added + +- Add [/accounts](https://www.stellar.org/developers/horizon/reference/endpoints/accounts.html) endpoint, which allows filtering accounts that have a given signer or a trustline to an asset. +- Add [/offers](https://www.stellar.org/developers/horizon/reference/endpoints/offers.html) endpoint, which lists all offers on the network and allows filtering by seller account or by selling or buying asset. +- Add [/paths/strict-send](https://www.stellar.org/developers/horizon/reference/endpoints/path-finding-strict-send.html) endpoint, which enables discovery of optimal "strict send" paths between assets. +- Add [/paths/strict-receive](https://www.stellar.org/developers/horizon/reference/endpoints/path-finding-strict-receive.html) endpoint, which enables discovery of optimal "strict receive" paths between assets. +- Add the fields `max_fee` and `fee_charged` to [/fee_stats](https://www.stellar.org/developers/horizon/reference/endpoints/fee-stats.html). + +### Breaking changes + +### Changed + +- Change multiple operation types to their canonical names for [operation resources](https://www.stellar.org/developers/horizon/reference/resources/operation.html) ([#2134](https://github.com/stellar/go/pull/2134)). +- Change the type of the following fields from `number` to `string`: + + - Attribute `offer_id` in [manage buy offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-buy-offer) and [manage sell offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-sell-offer) operations. + - Attribute `offer_id` in `Trade` [effect](https://www.stellar.org/developers/horizon/reference/resources/effect.html#trading-effects). + - Attribute `id` in [Offer](https://www.stellar.org/developers/horizon/reference/resources/offer.html) resource. + - Attribute `timestamp` and `trade_count` in [Trade Aggregation](https://www.stellar.org/developers/horizon/reference/resources/trade_aggregation.html) resource. + + See [#1609](https://github.com/stellar/go/issues/1609), [#1909](https://github.com/stellar/go/pull/1909) and [#1912](https://github.com/stellar/go/issues/1912) for more details. + +### Removed + +- `/metrics` endpoint is no longer part of the public API. It is now served on `ADMIN_PORT/metrics`. `ADMIN_PORT` can be set using env variable or `--admin-port` CLI param. +- Remove the following fields from [/fee_stats](https://www.stellar.org/developers/horizon/reference/endpoints/fee-stats.html): + + - `min_accepted_fee` + - `mode_accepted_fee` + - `p10_accepted_fee` + - `p20_accepted_fee` + - `p30_accepted_fee` + - `p40_accepted_fee` + - `p50_accepted_fee` + - `p60_accepted_fee` + - `p70_accepted_fee` + - `p80_accepted_fee` + - `p90_accepted_fee` + - `p95_accepted_fee` + - `p99_accepted_fee` + +- Remove `fee_paid` field from [Transaction resource](https://www.stellar.org/developers/horizon/reference/resources/transaction.html) (Use `fee_charged` and `max_fee` fields instead - see [#1372](https://github.com/stellar/go/issues/1372)). + +## v0.24.1 + +* Add cache to improve performance of experimental ingestion system (#[2004](https://github.com/stellar/go/pull/2004)). +* Fix experimental ingestion bug where ledger changes were not applied in the correct order (#[2050](https://github.com/stellar/go/pull/2050)). +* Fix experimental ingestion bug where unique constraint errors are incurred when the ingestion system has to reingest state from history archive checkpoints (#[2055](https://github.com/stellar/go/pull/2055)). +* Fix experimental ingestion bug where a race condition during shutdown leads to a crash (#[2058](https://github.com/stellar/go/pull/2058)). + +## v0.24.0 + +* Add `fee_charged` and `max_fee` objects to `/fee_stats` endpoint ([#1964](https://github.com/stellar/go/pull/1964)). +* Experimental ledger header ingestion processor ([#1949](https://github.com/stellar/go/pull/1949)). +* Improved performance of asset stats processor ([#1987](https://github.com/stellar/go/pull/1987)). +* Provide mechanism for retrying XDR stream errors ([#1899](https://github.com/stellar/go/pull/1899)). +* Emit error level log after 3 failed attempts to validate state ([#1918](https://github.com/stellar/go/pull/1918)). +* Fixed out of bounds error in ledger backend reader ([#1914](https://github.com/stellar/go/pull/1914)). +* Fixed out of bounds error in URL params handler ([#1973](https://github.com/stellar/go/pull/1973)). +* Rename `OperationFeeStats` to `FeeStats` ([#1952](https://github.com/stellar/go/pull/1952)). +* All DB queries are now cancelled when request is cancelled/timeout. ([#1950](https://github.com/stellar/go/pull/1950)). +* Fixed multiple issues connected to graceful shutdown of Horizon. + +### Scheduled Breaking Changes + +* All `*_accepted_fee` fields in `/fee_stats` endpoint are deprecated. Fields will be removed in Horizon 0.25.0. + +Previously scheduled breaking changes reminders: + +* The following operation type names have been deprecated: `path_payment`, `manage_offer` and `create_passive_offer`. The names will be changed to: `path_payment_strict_receive`, `manage_sell_offer` and `create_passive_sell_offer` in 0.25.0. This has been previously scheduled for 0.22.0 release. +* `fee_paid` field on Transaction resource has been deprecated and will be removed in 0.25.0 (previously scheduled for 0.22.0). Please use new fields added in 0.18.0: `max_fee` that defines the maximum fee the source account is willing to pay and `fee_charged` that defines the fee that was actually paid for a transaction. See [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) for more information. +* The type for the following attributes will be changed from `int64` to `string` in 0.25.0 (previously scheduled for 0.22.0): + - Attribute `offer_id` in [manage buy offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-buy-offer) and [manage sell offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-sell-offer) operations. + - Attribute `offer_id` in `Trade` effect. + - Attribute `id` in [Offer](https://www.stellar.org/developers/horizon/reference/resources/offer.html) resource. + - Attribute `timestamp` and `trade_count` in [Trade Aggregation](https://www.stellar.org/developers/horizon/reference/resources/trade_aggregation.html) resource. + +Check [Beta Testing New Ingestion System](https://github.com/stellar/go/blob/master/services/horizon/internal/expingest/BETA_TESTING.md) if you want to test the new ingestion system. + +## v0.23.1 + +* Add `ReadTimeout` to Horizon HTTP server configuration to fix potential DoS vector. + +## v0.23.0 + +* New features in experimental ingestion (to enable: set `--enable-experimental-ingestion` CLI param or `ENABLE_EXPERIMENTAL_INGESTION=true` env variable): + * All state-related endpoints (i.e. ledger entries) are now served from Horizon DB (except `/account/{account_id}`) + + * `/order_book` offers data is served from in-memory store ([#1761](https://github.com/stellar/go/pull/1761)) + + * Add `Latest-Ledger` header with the sequence number of the most recent ledger processed by the experimental ingestion system. Endpoints built on the experimental ingestion system will always respond with data which is consistent with the ledger in `Latest-Ledger` ([#1830](https://github.com/stellar/go/pull/1830)) + + * Add experimental support for filtering accounts who are trustees to an asset via `/accounts`. Example:\ + `/accounts?asset=COP:GC2GFGZ5CZCFCDJSQF3YYEAYBOS3ZREXJSPU7LUJ7JU3LP3BQNHY7YKS`\ + returns all accounts who have a trustline to the asset `COP` issued by account `GC2GFG...` ([#1835](https://github.com/stellar/go/pull/1835)) + + * Experimental "Accounts For Signers" end-point now returns a full account resource ([#1876](https://github.com/stellar/go/issues/1875)) +* Prevent "`multiple response.WriteHeader calls`" errors when streaming ([#1870](https://github.com/stellar/go/issues/1870)) +* Fix an interpolation bug in `/fee_stats` ([#1857](https://github.com/stellar/go/pull/1857)) +* Fix a bug in `/paths/strict-send` where occasionally bad paths were returned ([#1863](https://github.com/stellar/go/pull/1863)) + +## v0.22.2 + +* Fixes a bug in accounts for signer ingestion processor. + +## v0.22.1 + +* Fixes a bug in path payment ingestion code. + +## v0.22.0 + +* Adds support for Stellar Protocol v12. + +### Scheduled Breaking Changes + +* The following operation type names have been deprecated: `path_payment`, `manage_offer` and `create_passive_offer`. The names will be changed to: `path_payment_strict_receive`, `manage_sell_offer` and `create_passive_sell_offer` in 0.25.0. This has been previously scheduled for 0.22.0 release. +* `fee_paid` field on Transaction resource has been deprecated and will be removed in 0.23.0 (previously scheduled for 0.22.0). Please use new fields added in 0.18.0: `max_fee` that defines the maximum fee the source account is willing to pay and `fee_charged` that defines the fee that was actually paid for a transaction. See [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) for more information. +* The type for the following attributes will be changed from `int64` to `string` in 0.23.0 (previously scheduled for 0.22.0): + - Attribute `offer_id` in [manage buy offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-buy-offer) and [manage sell offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-sell-offer) operations. + - Attribute `offer_id` in `Trade` effect. + - Attribute `id` in [Offer](https://www.stellar.org/developers/horizon/reference/resources/offer.html) resource. + - Attribute `timestamp` and `trade_count` in [Trade Aggregation](https://www.stellar.org/developers/horizon/reference/resources/trade_aggregation.html) resource. + +## v0.21.1 + +* Fixes a bug in initial schema migration file. + +## v0.21.0 + +### Database migration notes + +This version adds a new index on a table used by experimental ingestion system. If it has not been enabled, migration will be instant. If you migrate from a previous version with experimental ingestion system enabled database migration can take a couple minutes. + +### Changes + +* `/paths/strict-send` can now accept a `destination_account` parameter. If `destination_account` is provided then the endpoint will return all payment paths which terminate with an asset held by `destination_account`. Note that the endpoint will accept `destination_account` or `destination_assets` but not both. `destination_assets` is a comma separated list of assets encoded as `native` or `code:issuer`. +* `/paths/strict-receive` can now accept a `source_assets` parameter instead of `source_account` parameter. If `source_assets` is provided the endpoint will return all payment paths originating from an asset in `source_assets`. Note that the endpoint will accept `source_account` or `source_assets` but not both. `source_assets` is a comma separated list of assets encoded as `native` or `code:issuer`. +* Add experimental support for `/offers`. To enable it, set `--enable-experimental-ingestion` CLI param or `ENABLE_EXPERIMENTAL_INGESTION=true` env variable. +* When experimental ingestion is enabled a state verification routine is started every 64 ledgers to ensure a local state is the same as in history buckets. This can be disabled by setting `--ingest-disable-state-verification` CLI param or `INGEST-DISABLE-STATE-VERIFICATION` env variable. +* Add flag to apply pending migrations before running horizon. If there are pending migrations, previously you needed to run `horizon db migrate up` before running `horizon`. Those two steps can be combined into one with the `--apply-migrations` flag (`APPLY_MIGRATIONS` env variable). +* Improved the speed of state ingestion in experimental ingestion system. +* Fixed a bug in "Signers for Account" (experimental) transaction meta ingesting code. +* Fixed performance issue in Effects related endpoints. +* Fixed DoS vector in Go HTTP/2 implementation. +* Dropped support for Go 1.10, 1.11. + +Check [Beta Testing New Ingestion System](https://github.com/stellar/go/blob/master/services/horizon/internal/expingest/BETA_TESTING.md) if you want to test new ingestion system. + +## v0.20.1 + +* Add `--ingest-state-reader-temp-set` flag (`INGEST_STATE_READER_TEMP_SET` env variable) which defines the storage type used for temporary objects during state ingestion in the new ingestion system. The possible options are: `memory` (requires ~1.5GB RAM, fast) and `postgres` (stores data in temporary table in Postgres, less RAM but slower). + +Check [Beta Testing New Ingestion System](https://github.com/stellar/go/blob/master/services/horizon/internal/expingest/BETA_TESTING.md) if you want to test new ingestion system. + +## v0.20.0 + +If you want to use experimental ingestion skip this version and use v0.20.1 instead. v0.20.0 has a performance issue. + +### Changes + +* Experimental ingestion system is now run concurrently on all Horizon servers (with feature flag set - see below). This improves ingestion availability. +* Add experimental path finding endpoints which use an in memory order book for improved performance. To enable the endpoints set `--enable-experimental-ingestion` CLI param or `ENABLE_EXPERIMENTAL_INGESTION=true` env variable. Note that the `enable-experimental-ingestion` flag enables both the new path finding endpoints and the accounts for signer endpoint. There are two path finding endpoints. `/paths/strict-send` returns payment paths where both the source and destination assets are fixed. This endpoint is able to answer questions like: "Get me the most EUR possible for my 10 USD." `/paths/strict-receive` is the other endpoint which is an alias to the existing `/paths` endpoint. +* `--enable-accounts-for-signer` CLI param or `ENABLE_ACCOUNTS_FOR_SIGNER=true` env variable are merged with `--enable-experimental-ingestion` CLI param or `ENABLE_EXPERIMENTAL_INGESTION=true` env variable. +* Add experimental get offers by id endpoint`/offers/{id}` which uses the new ingestion system to fill up the offers table. To enable it, set `--enable-experimental-ingestion` CLI param or `ENABLE_EXPERIMENTAL_INGESTION=true` env variable. + +Check [Beta Testing New Ingestion System](https://github.com/stellar/go/blob/master/services/horizon/internal/expingest/BETA_TESTING.md) if you want to test new ingestion system. + +### Scheduled Breaking Changes + +* `fee_paid` field on Transaction resource has been deprecated and will be removed in 0.22.0. Please use new fields added in 0.18.0: `max_fee` that defines the maximum fee the source account is willing to pay and `fee_charged` that defines the fee that was actually paid for a transaction. See [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) for more information. This change has been previously scheduled for 0.19.0 release. +* The following operation type names have been deprecated: `manage_offer` and `create_passive_offer`. The names will be changed to: `manage_sell_offer` and `create_passive_offer` in 0.22.0. This has been previously scheduled for 0.19.0 release. +* The type for the following attributes will be changed from `int64` to `string` in 0.22.0: + - Attribute `offer_id` in [manage buy offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-buy-offer) and [manage sell offer](https://www.stellar.org/developers/horizon/reference/resources/operation.html#manage-sell-offer) operations. + - Attribute `offer_id` in `Trade` effect. + - Attribute `id` in [Offer](https://www.stellar.org/developers/horizon/reference/resources/offer.html) resource. + - Attribute `timestamp` and `trade_count` in [Trade Aggregation](https://www.stellar.org/developers/horizon/reference/resources/trade_aggregation.html) resource. + +If you are an SDK maintainer, update your code to prepare for this change. + +## v0.19.0 + +* Add `join` parameter to operations and payments endpoints. Currently, the only valid value for the parameter is `transactions`. If `join=transactions` is included in a request then the response will include a `transaction` field for each operation in the response. +* Add experimental "Accounts For Signers" endpoint. To enable it set `--enable-accounts-for-signer` CLI param or `ENABLE_ACCOUNTS_FOR_SIGNER=true` env variable. Additionally new feature requires links to history archive: CLI: `--history-archive-urls="archive1,archive2,archive3"`, env variable: `HISTORY_ARCHIVE_URLS="archive1,archive2,archive3"`. This will expose `/accounts` endpoint. This requires around 4GB of RAM for initial state ingestion. + +Check [Beta Testing New Ingestion System](https://github.com/stellar/go/blob/master/services/horizon/internal/expingest/BETA_TESTING.md) if you want to test new ingestion system. + +## v0.18.1 + +* Fixed `/fee_stats` to correctly calculate ledger capacity in protocol v11. +* Fixed `horizon db clean` command to truncate all tables. + +## v0.18.0 + +### Breaking changes + +* Horizon requires Postgres 9.5+. +* Removed `paging_token` field from `/accounts/{id}` endpoint. +* Removed `/operation_fee_stats` endpoint. Please use `/fee_stats`. + +### Deprecations + +* `fee_paid` field on Transaction resource has been deprecated and will be removed in 0.19.0. Two new fields have been added: `max_fee` that defines the maximum fee the source account is willing to pay and `fee_charged` that defines the fee that was actually paid for a transaction. See [CAP-0005](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0005.md) for more information. +* The following operation type names have been deprecated: `manage_offer` and `create_passive_offer`. The names will be changed to: `manage_sell_offer` and `create_passive_offer` in 0.19.0. + +### Changes + +* The following new config parameters were added. When old `max-db-connections` config parameter is set, it has a priority over the the new params. Run `horizon help` for more information. + * `horizon-db-max-open-connections`, + * `horizon-db-max-idle-connections`, + * `core-db-max-open-connections`, + * `core-db-max-idle-connections`. +* Fixed `fee_paid` value in Transaction resource (#1358). +* Fix "int64: value out of range" errors in trade aggregations (#1319). +* Improved `horizon db reingest range` command. + +## v0.17.6 - 2019-04-29 + +* Fixed a bug in `/order_book` when sum of amounts at a single price level exceeds `int64_max` (#1037). +* Fixed a bug generating `ERROR` level log entries for bad requests (#1186). + +## v0.17.5 - 2019-04-24 + +* Support for stellar-core [v11.0.0](https://github.com/stellar/stellar-core/releases/tag/v11.0.0). +* Display trustline authorization state in the balances list. +* Improved actions code. +* Improved `horizon db reingest` command handling code. +* Tracking app name and version that connects to Horizon (`X-App-Name`, `X-App-Version`). + +## v0.17.4 - 2019-03-14 + +* Support for Stellar-Core 10.3.0 (new database schema v9). +* Fix a bug in `horizon db reingest` command (no log output). +* Multiple code improvements. + +## v0.17.3 - 2019-03-01 + +* Fix a bug in `txsub` package that caused returning invalid status when resubmitting old transactions (#969). + +## v0.17.2 - 2019-02-28 + +* Critical fix bug + +## v0.17.1 - 2019-02-28 + +### Changes + +* Fixes high severity error in ingestion system. +* Account detail endpoint (`/accounts/{id}`) includes `last_modified_ledger` field for account and for each non-native asset balance. + +## v0.17.0 - 2019-02-26 + +### Upgrade notes + +This release introduces ingestion of failed transactions. This feature is turned off by default. To turn it on set environment variable: `INGEST_FAILED_TRANSACTIONS=true` or CLI param: `--ingest-failed-transactions=true`. Please note that ingesting failed transactions can double DB space requirements (especially important for full history deployments). + +### Database migration notes + +Previous versions work fine with new schema so you can migrate (`horizon db migrate up` using new binary) database without stopping the Horizon process. To reingest ledgers run `horizon db reingest` using Horizon 0.17.0 binary. You can take advantage of the new `horizon db reingest range` for parallel reingestion. + +### Deprecations + +* `/operation_fee_stats` is deprecated in favour of `/fee_stats`. Will be removed in v0.18.0. + +### Breaking changes + +* Fields removed in this version: + * Root > `protocol_version`, use `current_protocol_version` and `core_supported_protocol_version`. + * Ledger > `transaction_count`, use `successful_transaction_count` and `failed_transaction_count`. + * Signer > `public_key`, use `key`. +* This Horizon version no longer supports Core <10.0.0. Horizon can still ingest version <10 ledgers. +* Error event name during streaming changed to `error` to follow W3C specification. + +### Changes + +* Added ingestion of failed transactions (see Upgrade notes). Use `include_failed=true` GET parameter to display failed transactions and operations in collection endpoints. +* `/fee_stats` endpoint has been extended with fee percentiles and ledger capacity usage. Both are useful in transaction fee estimations. +* Fixed a bug causing slice bounds out of range at `/account/{id}/offers` endpoint during streaming. +* Added `horizon db reingest range X Y` that reingests ledgers between X and Y sequence number (closed intervals). +* Many code improvements. + +## v0.16.0 - 2019-02-04 + +### Upgrade notes + +* Ledger > Admins need to reingest old ledgers because we introduced `successful_transaction_count` and `failed_transaction_count`. + +### Database migration notes + +Previous versions work fine with Horizon 0.16.0 schema so you can migrate (`horizon db migrate up`) database without stopping the Horizon process. To reingest ledgers run `horizon db reingest` using Horizon 0.16.0 binary. + +### Deprecations + +* Root > `protocol_version` will be removed in v0.17.0. It is replaced by `current_protocol_version` and `core_supported_protocol_version`. +* Ledger > `transaction_count` will be removed in v0.17.0. +* Signer > `public_key` will be removed in v0.17.0. + +### Changes + +* Improved `horizon db migrate` script. It will now either success or show a detailed message regarding why it failed. +* Fixed effects ingestion of circular payments. +* Improved account query performances for payments and operations. +* Added `successful_transaction_count` and `failed_transaction_count` to `ledger` resource. +* Fixed the wrong protocol version displayed in `root` resource by adding `current_protocol_version` and `core_supported_protocol_version`. +* Improved streaming for single objects. It won't send an event back if the current event is the same as the last event sent. +* Fixed ingesting effects of empty trades. Empty trades will be ignored during ingestion. + +## v0.15.4 - 2019-01-17 + +* Fixed multiple issues in transaction submission subsystem. +* Support for client fingerprint headers. +* Fixed parameter checking in `horizon db backfill` command. + +## v0.15.3 - 2019-01-07 + +* Fixed a bug in Horizon DB reaping code. +* Fixed query checking code that generated `ERROR`-level log entries for invalid input. + +## v0.15.2 - 2018-12-13 + +* Added `horizon db init-asset-stats` command to initialize `asset_stats` table. This command should be run once before starting ingestion if asset stats are enabled (`ENABLE_ASSET_STATS=true`). +* Fixed `asset_stats` table to support longer `home_domain`s. +* Fixed slow trades DB query. + +## v0.15.1 - 2018-11-09 + +* Fixed memory leak in SSE stream code. + +## v0.15.0 - 2018-11-06 + +DB migrations add a new fields and indexes on `history_trades` table. This is a very large table in `CATCHUP_COMPLETE` deployments so migration may take a long time (depending on your DB hardware). Please test the migrations execution time on the copy of your production DB first. + +This release contains several bug fixes and improvements: + +* New `/operation_fee_stats` endpoint includes fee stats for the last 5 ledgers. +* ["Trades"](https://www.stellar.org/developers/horizon/reference/endpoints/trades.html) endpoint can now be streamed. +* In ["Trade Aggregations"](https://www.stellar.org/developers/horizon/reference/endpoints/trade_aggregations.html) endpoint, `offset` parameter has been added. +* Path finding bugs have been fixed and the algorithm has been improved. Check [#719](https://github.com/stellar/go/pull/719) for more information. +* Connections (including streams) are closed after timeout defined using `--connection-timeout` CLI param or `CONNECTION_TIMEOUT` environment variable. If Horizon is behind a load balancer with idle timeout set, it is recommended to set this to a value equal a few seconds less than idle timeout so streams can be properly closed by Horizon. +* Streams have been improved to check for updates every `--sse-update-frequency` CLI param or `SSE_UPDATE_FREQUENCY` environment variable seconds. If a new ledger has been closed in this period, new events will be sent to a stream. Previously streams checked for new events every 1 second, even when there were no new ledgers. +* Rate limiting algorithm has been changed to [GCRA](https://brandur.org/rate-limiting#gcra). +* Rate limiting in streams has been changed to be more fair. Now 1 *credit* has to be *paid* every time there's a new ledger instead of per request. +* Rate limiting can be disabled completely by setting `--per-hour-rate-limit=0` CLI param or `PER_HOUR_RATE_LIMIT=0` environment variable. +* Account flags now display `auth_immutable` value. +* Logs can be sent to a file. Destination file can be set using an environment variable (`LOG_FILE={file}`) or CLI parameter (`--log-file={file}`). + +### Breaking changes + +* Assets stats are disabled by default. This can be changed using an environment variable (`ENABLE_ASSET_STATS=true`) or CLI parameter (`--enable-asset-stats=true`). Please note that it has a negative impact on a DB and ingestion time. +* In ["Offers for Account"](https://www.stellar.org/developers/horizon/reference/endpoints/offers-for-account.html), `last_modified_time` field endpoint can be `null` when ledger data is not available (has not been ingested yet). +* ["Trades for Offer"](https://www.stellar.org/developers/horizon/reference/endpoints/trades-for-offer.html) endpoint will query for trades that match the given offer on either side of trades, rather than just the "sell" offer. Offer IDs are now [synthetic](https://www.stellar.org/developers/horizon/reference/resources/trade.html#synthetic-offer-ids). You have to reingest history to update offer IDs. + +### Other bug fixes + +* `horizon db backfill` command has been fixed. +* Fixed `remoteAddrIP` function to support IPv6. +* Fixed `route` field in the logs when the request is rate limited. + +## v0.14.2 - 2018-09-27 + +### Bug fixes + +* Fixed and improved `txsub` package (#695). This should resolve many issues connected to `Timeout` responses. +* Improve stream error reporting (#680). +* Checking `ingest.Cursor` errors in `Session` (#679). +* Added account ID validation in `/account/{id}` endpoints (#684). + +## v0.14.1 - 2018-09-19 + +This release contains several bug fixes: + +* Assets stats can cause high CPU usage on stellar-core DB. If this slows down the database it's now possible to turn off this feature by setting `DISABLE_ASSET_STATS` feature flag. This can be set as environment variable (`DISABLE_ASSET_STATS=true`) or CLI parameter (`--disable-asset-stats=true`). +* Sometimes `/accounts/{id}/offers` returns `500 Internal Server Error` response when ledger data is not available yet (for new ledgers) or no longer available (`CATCHUP_RECENT` deployments). It's possible to set `ALLOW_EMPTY_LEDGER_DATA_RESPONSES` feature flag as environment variable (`ALLOW_EMPTY_LEDGER_DATA_RESPONSES=true`) or CLI parameter (`--allow-empty-ledger-data-responses=true`). With the flag set to `true` "Offers for Account" endpoint will return `null` in `last_modified_time` field when ledger data is not available, instead of `500 Internal Server Error` error. + +### Bug fixes + +* Feature flag to disable asset stats (#668). +* Feature flag to allow null ledger data in responses (#672). +* Fix empty memo field in JSON when memo_type is text (#635). +* Improved logging: some bad requests no longer generate `ERROR` level log entries (#654). +* `/friendbot` endpoint is available only when `FriendbotURL` is set in the config. + +## v0.14.0 - 2018-09-06 + +### Breaking changes + +* Offer resource `last_modified` field removed (see Bug Fixes section). +* Trade aggregations endpoint accepts only specific time ranges now (1/5/15 minutes, 1 hour, 1 day, 1 week). +* Horizon sends `Cache-Control: no-cache, no-store, max-age=0` HTTP header for all responses. + +### Deprecations + +* Account > Signers collection `public_key` field is deprecated, replaced by `key`. + +### Changes + +* Protocol V10 features: + * New `bump_sequence` operation (as in [CAP-0001](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0001.md)). + * New [`bump_sequence`](https://www.stellar.org/developers/horizon/reference/resources/operation.html#bump-sequence) operation. + * New `sequence_bumped` effect. + * Please check [CAP-0001](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0001.md) for new error codes for transaction submission. + * Offer liabilities (as in [CAP-0003](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0003.md)): + * `/accounts/{id}` resources contain new fields: `buying_liabilities` and `selling_liabilities` for each entry in `balances`. + * Please check [CAP-0003](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0003.md) for new error codes for transaction submission. +* Added `source_amount` field to `path_payment` operations. +* Added `account_credited` and `account_debited` effects for `path_payment` operations. +* Friendbot link in Root endpoint is empty if not set in configuration. +* Improved `ingest` package logging. +* Improved HTTP logging (`forwarded_ip`, `route` fields, `duration` is always in seconds). +* `LOGGLY_HOST` env variable has been replaced with `LOGGLY_TAG` and is adding a tag to every log event. +* Dropped support for Go 1.8. + +### Bug fixes + +* New fields in Offer resource: `last_modified_ledger` and `last_modified_time`, replace buggy `last_modified` (#478). +* Fixed pagination in Trades for account endpoint (#486). +* Fixed a synchronization issue in `ingest` package (#603). +* Fixed Order Book resource links in Root endpoint. +* Fixed streaming in Offers for Account endpoint. + +## v0.13.3 - 2018-08-23 + +### Bug fixes + +* Fixed large amounts rendering in `/assets`. + +## v0.13.2 - 2018-08-13 + +### Bug fixes + +* Fixed a bug in `amount` and `price` packages triggering long calculations. + +## v0.13.1 - 2018-07-26 + +### Bug fixes + +* Fixed a conversion bug when `timebounds.max_time` is set to `INT64_MAX`. + +## v0.13.0 - 2018-06-06 + +### Breaking changes + +- `amount` field in `/assets` is now a String (to support Stellar amounts larger than `int64`). + +### Changes + +- Effect resource contains a new `created_at` field. +- Horizon responses are compressed. +- Ingestion errors have been improved. +- `horizon rebase` command was improved. + +### Bug fixes + +- Horizon now returns `400 Bad Request` for negative `cursor` values. + +**Upgrade notes** + +DB migrations add a new indexes on `history_trades`. This is very large table so migration may take a long time (depending on your DB hardware). Please test the migrations execution time on the copy of your production DB first. + +## v0.12.3 - 2017-03-20 + +### Bug fixes + +- Fix a service stutter caused by excessive `info` commands being issued from the root endpoint. + + +## v0.12.2 - 2017-03-14 + +This release is a bug fix release for v0.12.1 and v0.12.2. *Please see the upgrade notes below if you did not already migrate your db for v0.12.0* + +### Changes + +- Remove strict validation on the `resolution` parameter for trade aggregations endpoint. We will add this feature back in to the next major release. + + +## v0.12.1 - 2017-03-13 + +This release is a bug fix release for v0.12.0. *Please see the upgrade notes below if you did not already migrate your db for v0.12.0* + +### Bug fixes + +- Fixed an issue caused by un-migrated trade rows. (https://github.com/stellar/go/issues/357) +- Command line flags are now useable for subcommands of horizon. + + +## v0.12.0 - 2017-03-08 + +Big release this time for horizon: We've made a number of breaking changes since v0.11.0 and have revised both our database schema as well as our data ingestion system. We recommend that you take a backup of your horizon database prior to upgrading, just in case. + +### Upgrade Notes + +Since this release changes both the schema and the data ingestion system, we recommend the following upgrade path to minimize downtime: + +1. Upgrade horizon binaries, but do not restart the service +2. Run `horizon db migrate up` to migrate the db schema +3. Run `horizon db reingest` in a background session to begin the data reingestion process +4. Restart horizon + +### Added + +- Operation and payment resources were changed to add `transaction_hash` and `created_at` properties. +- The ledger resource was changed to add a `header_xdr` property. Existing horizon installations should re-ingest all ledgers to populate the history database tables with the data. In future versions of horizon we will disallow null values in this column. Going forward, this change reduces the coupling of horizon to stellar-core, ensuring that horizon can re-import history even when the data is no longer stored within stellar-core's database. +- All Assets endpoint (`/assets`) that returns a list of all the assets in the system along with some stats per asset. The filters allow you to narrow down to any specific asset of interest. +- Trade Aggregations endpoint (`/trade_aggregations`) allow for efficient gathering of historical trade data. This is done by dividing a given time range into segments and aggregate statistics, for a given asset pair (`base`, `counter`) over each of these segments. + +### Bug fixes + +- Ingestion performance and stability has been improved. +- Changes to an account's inflation destination no longer produce erroneous "signer_updated" effects. (https://github.com/stellar/horizon/issues/390) + + +### Changed + +- BREAKING CHANGE: The `base_fee` property of the ledger resource has been renamed to `base_fee_in_stroops` +- BREAKING CHANGE: The `base_reserve` property of the ledger resource has been renamed to `base_reserve_in_stroops` and is now expressed in stroops (rather than lumens) and as a JSON number. +- BREAKING CHANGE: The "Orderbook Trades" (`/orderbook/trades`) endpoint has been removed and replaced by the "All Trades" (`/trades`) endpoint. +- BREAKING CHANGE: The Trade resource has been modified to generalize assets as (`base`, `counter`) pairs, rather than the previous (`sold`,`bought`) pairs. +- Full reingestion (i.e. running `horizon db reingest`) now runs in reverse chronological order. + +### Removed + +- BREAKING CHANGE: Friendbot has been extracted to an external microservice. + + +## [v0.11.0] - 2017-08-15 + +### Bug fixes + +- The ingestion system can now properly import envelopes that contain signatures that are zero-length strings. +- BREAKING CHANGE: specifying a `limit` of `0` now triggers an error instead of interpreting the value to mean "use the default limit". +- Requests that ask for more records than the maximum page size now trigger a bad request error, instead of an internal server error. +- Upstream bug fixes to xdr decoding from `github.com/stellar/go`. + +### Changed + +- BREAKING CHANGE: The payments endpoint now includes `account_merge` operations in the response. +- "Finished Request" log lines now include additional fields: `streaming`, `path`, `ip`, and `host`. +- Responses now include a `Content-Disposition: inline` header. + + +## [v0.10.1] - 2017-03-29 + +### Fixed +- Ingestion was fixed to protect against text memos that contain null bytes. While memos with null bytes are legal in stellar-core, PostgreSQL does not support such values in string columns. Horizon now strips those null bytes to fix the issue. + +## [v0.10.0] - 2017-03-20 + +This is a fix release for v0.9.0 and v0.9.1 + + +### Added +- Added `horizon db clear` helper command to clear previously ingested history. + +### Fixed + +- Embedded sql files for the database schema have been fixed agsain to be compatible with postgres 9.5. The configuration setting `row_security` has been removed from the dumped files. + +## [v0.9.1] - 2017-03-20 + +### Fixed + +- Embedded sql files for the database schema have been fixed to be compatible with postgres 9.5. The configuration setting `idle_in_transaction_session_timeout` has been removed from the dumped files. + +## [v0.9.0] - 2017-03-20 + +This release was retracted due to a bug discovered after release. + +### Added +- Horizon now exposes the stellar network protocol in several places: It shows the currently reported protocol version (as returned by the stellar-core `info` command) on the root endpoint, and it reports the protocol version of each ledger resource. +- Trade resources now include a `created_at` timestamp. + +### Fixed + +- BREAKING CHANGE: The reingestion process has been updated. Prior versions of horizon would enter a failed state when a gap between the imported history and the stellar-core database formed or when a previously imported ledger was no longer found in the stellar-core database. This usually occurs when running stellar-core with the `CATCHUP_RECENT` config option. With these changed, horizon will automatically trim "abandonded" ledgers: ledgers that are older than the core elder ledger. + + +## [v0.8.0] - 2017-02-07 + +### Added + +- account signer resources now contain a type specifying the type of the signer: `ed25519_public_key`, `sha256_hash`, and `preauth_tx` are the present values used for the respective signer types. + +### Changed + +- The `public_key` field on signer effects and an account's signer summary has been renamed to `key` to reflect that new signer types are not necessarily specifying a public key anymore. + +### Deprecated + +- The `public_key` field on account signers and signer effects are deprecated + +## [v0.7.1] - 2017-01-12 + +### Bug fixes + +- Trade resources now include "bought_amount" and "sold_amount" fields when being viewed through the "Orderbook Trades" endpoint. +- Fixes #322: orderbook summaries with over 20 bids now return the correct price levels, starting with the closest to the spread. + +## [v0.7.0] - 2017-01-10 + +### Added + +- The account resource now includes links to the account's trades and data values. + +### Bug fixes + +- Fixes paging_token attribute of account resource +- Fixes race conditions in friendbot +- Fixes #202: Add price and price_r to "manage_offer" operation resources +- Fixes #318: order books for the native currency now filters correctly. + +## [v0.6.2] - 2016-08-18 + +### Bug fixes + +- Fixes streaming (SSE) requests, which were broken in v0.6.0 + +## [v0.6.1] - 2016-07-26 + +### Bug fixes + +- Fixed an issue where accounts were not being properly returned when the history database had no record of the account. + + +## [v0.6.0] - 2016-07-20 + +This release contains the initial implementation of the "Abridged History System". It allows a horizon system to be operated without complete knowledge of the ledger's history. With this release, horizon will start ingesting data from the earliest point known to the connected stellar-core instance, rather than ledger 1 as it behaved previously. See the admin guide section titled "Ingesting stellar-core data" for more details. + +### Added + +- *Elder* ledgers have been introduced: An elder ledger is the oldest ledger known to a db. For example, the `core_elder_ledger` attribute on the root endpoint refers to the oldest known ledger stored in the connected stellar-core database. +- Added the `history-retention-count` command line flag, used to specify the amount of historical data to keep in the history db. This is expressed as a number of ledgers, for example a value of `362880` would retain roughly 6 weeks of data given an average of 10 seconds per ledger. +- Added the `history-stale-threshold` command line flag to enable stale history protection. See the admin guide for more info. +- Horizon now reports the last ledger ingested to stellar-core using the `setcursor` command. +- Requests for data that precede the recorded window of history stored by horizon will receive a `410 Gone` http response to allow software to differentiate from other "not found" situations. +- The new `db reap` command will manually trigger the deletion of unretained historical data +- A background process on the server now deletes unretained historical once per hour. + +### Changed + +- BREAKING: When making a streaming request, a normal error response will be returned if an error occurs prior to sending the first event. Additionally, the initial http response and streaming preamble will not be sent until the first event is available. +- BREAKING: `horizon_latest_ledger` has renamed to `history_latest_ledger` +- Horizon no longer needs to begin the ingestion of historical data from ledger sequence 1. +- Rows in the `history_accounts` table are no longer identified using the "Total Order ID" that other historical records use, but are rather using a simple auto-incremented id. + +### Removed + +- The `/accounts` endpoint, which lets a consumer page through the entire set of accounts in the ledger, has been removed. The change from complete to an abridged history in horizon makes the endpoint mostly useless, and after consulting with the community we have decided to remove the endpoint. + +## [v0.5.1] - 2016-04-28 + +### Added + + - ManageData operation data is now rendered in the various operation end points. + +### Bug fixes + +- Transaction memos that contain utf-8 are now properly rendered in browsers by properly setting the charset of the http response. + +## [v0.5.0] - 2016-04-22 + +### Added + +- BREAKING: Horizon can now import data from stellar-core without the aid of the horizon-importer project. This process is now known as "ingestion", and is enabled by either setting the `INGEST` environment variable to "true" or specifying "--ingest" on the launch arguments for the horizon process. Only one process should be running in this mode for any given horizon database. +- Add `horizon db init`, used to install the latest bundled schema for the horizon database. +- Add `horizon db reingest` command, used to update outdated or corrupt horizon database information. Admins may now use `horizon db reingest outdated` to migrate any old data when updated horizon. +- Added `network_passphrase` field to root resource. +- Added `fee_meta_xdr` field to transaction resource. + +### Bug fixes +- Corrected casing on the "offers" link of an account resource. + +## [v0.4.0] - 2016-02-19 + +### Added + +- Add `horizon db migrate [up|down|redo]` commands, used for installing schema migrations. This work is in service of porting the horizon-importer project directly to horizon. +- Add support for TLS: specify `--tls-cert` and `--tls-key` to enable. +- Add support for HTTP/2. To enable, use TLS. + +### Removed + +- BREAKING CHANGE: Removed support for building on go versions lower than 1.6 + +## [v0.3.0] - 2016-01-29 + +### Changes + +- Fixed incorrect `source_amount` attribute on pathfinding responses. +- BREAKING CHANGE: Sequence numbers are now encoded as strings in JSON responses. +- Fixed broken link in the successful response to a posted transaction + +## [v0.2.0] - 2015-12-01 +### Changes + +- BREAKING CHANGE: the `address` field of a signer in the account resource has been renamed to `public_key`. +- BREAKING CHANGE: the `address` on the account resource has been renamed to `account_id`. + +## [v0.1.1] - 2015-12-01 + +### Added +- Github releases are created from tagged travis builds automatically + +[v0.11.0]: https://github.com/stellar/horizon/compare/v0.10.1...v0.11.0 +[v0.10.1]: https://github.com/stellar/horizon/compare/v0.10.0...v0.10.1 +[v0.10.0]: https://github.com/stellar/horizon/compare/v0.9.1...v0.10.0 +[v0.9.1]: https://github.com/stellar/horizon/compare/v0.9.0...v0.9.1 +[v0.9.0]: https://github.com/stellar/horizon/compare/v0.8.0...v0.9.0 +[v0.8.0]: https://github.com/stellar/horizon/compare/v0.7.1...v0.8.0 +[v0.7.1]: https://github.com/stellar/horizon/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/stellar/horizon/compare/v0.6.2...v0.7.0 +[v0.6.2]: https://github.com/stellar/horizon/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/stellar/horizon/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/stellar/horizon/compare/v0.5.1...v0.6.0 +[v0.5.1]: https://github.com/stellar/horizon/compare/v0.5.0...v0.5.1 +[v0.5.0]: https://github.com/stellar/horizon/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/stellar/horizon/compare/v0.3.0...v0.4.0 +[v0.3.0]: https://github.com/stellar/horizon/compare/v0.2.0...v0.3.0 +[v0.2.0]: https://github.com/stellar/horizon/compare/v0.1.1...v0.2.0 +[v0.1.1]: https://github.com/stellar/horizon/compare/v0.1.0...v0.1.1 diff --git a/services/horizon/CONTRIBUTING.md b/services/horizon/CONTRIBUTING.md new file mode 100644 index 0000000000..26ba4d1ad0 --- /dev/null +++ b/services/horizon/CONTRIBUTING.md @@ -0,0 +1,6 @@ +# How to contribute + +Please read the [Contribution Guide](https://github.com/stellar/docs/blob/master/CONTRIBUTING.md). + +Then please [sign the Contributor License Agreement](https://docs.google.com/forms/d/1g7EF6PERciwn7zfmfke5Sir2n10yddGGSXyZsq98tVY/viewform?usp=send_form). + diff --git a/services/horizon/README.md b/services/horizon/README.md new file mode 100644 index 0000000000..cd6eabcbdd --- /dev/null +++ b/services/horizon/README.md @@ -0,0 +1,13 @@ +# Horizon +[![Build Status](https://circleci.com/gh/stellar/go.svg?style=shield)](https://circleci.com/gh/stellar/go) + +Horizon is the client facing API server for the [Stellar ecosystem](https://developers.stellar.org/docs/start/introduction/). It acts as the interface between [Stellar Core](https://developers.stellar.org/docs/run-core-node/) and applications that want to access the Stellar network. It allows you to submit transactions to the network, check the status of accounts, subscribe to event streams and more. + +## Try it out +See Horizon in action by running your own Stellar node as part of the Stellar [testnet](https://developers.stellar.org/docs/glossary/testnet/). With our Docker quick-start image, you can be running your own fully functional node in around 20 minutes. See the [Quickstart Guide](internal/docs/quickstart.md) to get up and running. + +## Run a production server +If you're an administrator planning to run a production instance of Horizon as part of the public Stellar network, check out the detailed [Administration Guide](internal/docs/admin.md). It covers installation, monitoring, error scenarios and more. + +## Contributing +As an open source project, development of Horizon is public, and you can help! We welcome new issue reports, documentation and bug fixes, and contributions that further the project roadmap. The [Development Guide](internal/docs/developing.md) will show you how to build Horizon, see what's going on behind the scenes, and set up an effective develop-test-push cycle so that you can get your work incorporated quickly. diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go new file mode 100644 index 0000000000..d4f88b9aa7 --- /dev/null +++ b/services/horizon/cmd/db.go @@ -0,0 +1,520 @@ +package cmd + +import ( + "context" + "database/sql" + "fmt" + "go/types" + "log" + "os" + "strconv" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stellar/go/services/horizon/internal/db2/history" + + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/db2/schema" + "github.com/stellar/go/services/horizon/internal/ingest" + support "github.com/stellar/go/support/config" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + hlog "github.com/stellar/go/support/log" +) + +var dbCmd = &cobra.Command{ + Use: "db [command]", + Short: "commands to manage horizon's postgres db", +} + +var dbMigrateCmd = &cobra.Command{ + Use: "migrate [command]", + Short: "commands to run schema migrations on horizon's postgres db", +} + +func requireAndSetFlag(name string) error { + for _, flag := range flags { + if flag.Name == name { + flag.Require() + flag.SetValue() + return nil + } + } + return fmt.Errorf("could not find %s flag", name) +} + +var dbInitCmd = &cobra.Command{ + Use: "init", + Short: "install schema", + Long: "init initializes the postgres database used by horizon.", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + db, err := sql.Open("postgres", config.DatabaseURL) + if err != nil { + return err + } + + numMigrationsRun, err := schema.Migrate(db, schema.MigrateUp, 0) + if err != nil { + return err + } + + if numMigrationsRun == 0 { + log.Println("No migrations applied.") + } else { + log.Printf("Successfully applied %d migrations.\n", numMigrationsRun) + } + return nil + }, +} + +func migrate(dir schema.MigrateDir, count int) error { + dbConn, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return err + } + + numMigrationsRun, err := schema.Migrate(dbConn.DB.DB, dir, count) + if err != nil { + return err + } + + if numMigrationsRun == 0 { + log.Println("No migrations applied.") + } else { + log.Printf("Successfully applied %d migrations.\n", numMigrationsRun) + } + return nil +} + +var dbMigrateDownCmd = &cobra.Command{ + Use: "down COUNT", + Short: "run upwards db schema migrations", + Long: "performs a downards schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + // Only allow invokations with 1 args. + if len(args) != 1 { + return ErrUsage{cmd} + } + + count, err := strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + + return migrate(schema.MigrateDown, count) + }, +} + +var dbMigrateRedoCmd = &cobra.Command{ + Use: "redo COUNT", + Short: "redo db schema migrations", + Long: "performs a redo schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + // Only allow invokations with 1 args. + if len(args) != 1 { + return ErrUsage{cmd} + } + + count, err := strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + + return migrate(schema.MigrateRedo, count) + }, +} + +var dbMigrateStatusCmd = &cobra.Command{ + Use: "status", + Short: "print current database migration status", + Long: "print current database migration status", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + // Only allow invokations with 0 args. + if len(args) != 0 { + fmt.Println(args) + return ErrUsage{cmd} + } + + dbConn, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return err + } + + status, err := schema.Status(dbConn.DB.DB) + if err != nil { + return err + } + + fmt.Println(status) + return nil + }, +} + +var dbMigrateUpCmd = &cobra.Command{ + Use: "up [COUNT]", + Short: "run upwards db schema migrations", + Long: "performs an upwards schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + // Only allow invokations with 0-1 args. + if len(args) > 1 { + return ErrUsage{cmd} + } + + count := 0 + if len(args) == 1 { + var err error + count, err = strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + } + + return migrate(schema.MigrateUp, count) + }, +} + +var dbReapCmd = &cobra.Command{ + Use: "reap", + Short: "reaps (i.e. removes) any reapable history data", + Long: "reap removes any historical data that is earlier than the configured retention cutoff", + RunE: func(cmd *cobra.Command, args []string) error { + app, err := horizon.NewAppFromFlags(config, flags) + if err != nil { + return err + } + ctx := context.Background() + app.UpdateHorizonLedgerState(ctx) + return app.DeleteUnretainedHistory(ctx) + }, +} + +var dbReingestCmd = &cobra.Command{ + Use: "reingest", + Short: "reingest commands", + Long: "reingest ingests historical data for every ledger or ledgers specified by subcommand", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println("Use one of the subcomands...") + return ErrUsage{cmd} + }, +} + +var ( + reingestForce bool + parallelWorkers uint + parallelJobSize uint32 + retries uint + retryBackoffSeconds uint +) + +func ingestRangeCmdOpts() support.ConfigOptions { + return support.ConfigOptions{ + { + Name: "force", + ConfigKey: &reingestForce, + OptType: types.Bool, + Required: false, + FlagDefault: false, + Usage: "[optional] if this flag is set, horizon will be blocked " + + "from ingesting until the reingestion command completes (incompatible with --parallel-workers > 1)", + }, + { + Name: "parallel-workers", + ConfigKey: ¶llelWorkers, + OptType: types.Uint, + Required: false, + FlagDefault: uint(1), + Usage: "[optional] if this flag is set to > 1, horizon will parallelize reingestion using the supplied number of workers", + }, + { + Name: "parallel-job-size", + ConfigKey: ¶llelJobSize, + OptType: types.Uint32, + Required: false, + FlagDefault: uint32(100000), + Usage: "[optional] parallel workers will run jobs processing ledger batches of the supplied size", + }, + { + Name: "retries", + ConfigKey: &retries, + OptType: types.Uint, + Required: false, + FlagDefault: uint(0), + Usage: "[optional] number of reingest retries", + }, + { + Name: "retry-backoff-seconds", + ConfigKey: &retryBackoffSeconds, + OptType: types.Uint, + Required: false, + FlagDefault: uint(5), + Usage: "[optional] backoff seconds between reingest retries", + }, + } +} + +var dbReingestRangeCmdOpts = ingestRangeCmdOpts() +var dbReingestRangeCmd = &cobra.Command{ + Use: "range [Start sequence number] [End sequence number]", + Short: "reingests ledgers within a range", + Long: "reingests ledgers between X and Y sequence number (closed intervals)", + RunE: func(cmd *cobra.Command, args []string) error { + if err := dbReingestRangeCmdOpts.RequireE(); err != nil { + return err + } + if err := dbReingestRangeCmdOpts.SetValues(); err != nil { + return err + } + + if len(args) != 2 { + return ErrUsage{cmd} + } + + argsUInt32 := make([]uint32, 2) + for i, arg := range args { + if seq, err := strconv.ParseUint(arg, 10, 32); err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, arg) + } else { + argsUInt32[i] = uint32(seq) + } + } + + err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}) + if err != nil { + return err + } + return runDBReingestRange( + []history.LedgerRange{{StartSequence: argsUInt32[0], EndSequence: argsUInt32[1]}}, + reingestForce, + parallelWorkers, + *config, + ) + }, +} + +var dbFillGapsCmdOpts = ingestRangeCmdOpts() +var dbFillGapsCmd = &cobra.Command{ + Use: "fill-gaps [Start sequence number] [End sequence number]", + Short: "Ingests any gaps found in the horizon db", + Long: "Ingests any gaps found in the horizon db. The command takes an optional start and end parameters which restrict the range of ledgers ingested.", + RunE: func(cmd *cobra.Command, args []string) error { + if err := dbFillGapsCmdOpts.RequireE(); err != nil { + return err + } + if err := dbFillGapsCmdOpts.SetValues(); err != nil { + return err + } + + if len(args) != 0 && len(args) != 2 { + hlog.Errorf("Expected either 0 arguments or 2 but found %v arguments", len(args)) + return ErrUsage{cmd} + } + + var start, end uint64 + var withRange bool + if len(args) == 2 { + var err error + start, err = strconv.ParseUint(args[0], 10, 32) + if err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, args[0]) + } + end, err = strconv.ParseUint(args[1], 10, 32) + if err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, args[1]) + } + withRange = true + } + + err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}) + if err != nil { + return err + } + var gaps []history.LedgerRange + if withRange { + gaps, err = runDBDetectGapsInRange(*config, uint32(start), uint32(end)) + if err != nil { + return err + } + hlog.Infof("found gaps %v within range [%v, %v]", gaps, start, end) + } else { + gaps, err = runDBDetectGaps(*config) + if err != nil { + return err + } + hlog.Infof("found gaps %v", gaps) + } + + return runDBReingestRange(gaps, reingestForce, parallelWorkers, *config) + }, +} + +func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config) error { + if reingestForce && parallelWorkers > 1 { + return errors.New("--force is incompatible with --parallel-workers > 1") + } + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + + ingestConfig := ingest.Config{ + NetworkPassphrase: config.NetworkPassphrase, + HistorySession: horizonSession, + HistoryArchiveURL: config.HistoryArchiveURLs[0], + CheckpointFrequency: config.CheckpointFrequency, + MaxReingestRetries: int(retries), + ReingestRetryBackoffSeconds: int(retryBackoffSeconds), + EnableCaptiveCore: config.EnableCaptiveCoreIngestion, + CaptiveCoreBinaryPath: config.CaptiveCoreBinaryPath, + RemoteCaptiveCoreURL: config.RemoteCaptiveCoreURL, + CaptiveCoreToml: config.CaptiveCoreToml, + CaptiveCoreStoragePath: config.CaptiveCoreStoragePath, + StellarCoreCursor: config.CursorName, + StellarCoreURL: config.StellarCoreURL, + } + + if !ingestConfig.EnableCaptiveCore { + if config.StellarCoreDatabaseURL == "" { + return fmt.Errorf("flag --%s cannot be empty", horizon.StellarCoreDBURLFlagName) + } + coreSession, dbErr := db.Open("postgres", config.StellarCoreDatabaseURL) + if dbErr != nil { + return fmt.Errorf("cannot open Core DB: %v", dbErr) + } + ingestConfig.CoreSession = coreSession + } + + if parallelWorkers > 1 { + system, systemErr := ingest.NewParallelSystems(ingestConfig, parallelWorkers) + if systemErr != nil { + return systemErr + } + + return system.ReingestRange( + ledgerRanges, + parallelJobSize, + ) + } + + system, systemErr := ingest.NewSystem(ingestConfig) + if systemErr != nil { + return systemErr + } + + err = system.ReingestRange(ledgerRanges, reingestForce) + if err != nil { + if _, ok := errors.Cause(err).(ingest.ErrReingestRangeConflict); ok { + return fmt.Errorf(`The range you have provided overlaps with Horizon's most recently ingested ledger. +It is not possible to run the reingest command on this range in parallel with +Horizon's ingestion system. +Either reduce the range so that it doesn't overlap with Horizon's ingestion system, +or, use the force flag to ensure that Horizon's ingestion system is blocked until +the reingest command completes.`) + } + + return err + } + hlog.Info("Range run successfully!") + return nil +} + +var dbDetectGapsCmd = &cobra.Command{ + Use: "detect-gaps", + Short: "detects ingestion gaps in Horizon's database", + Long: "detects ingestion gaps in Horizon's database and prints a list of reingest commands needed to fill the gaps", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlag(horizon.DatabaseURLFlagName); err != nil { + return err + } + + if len(args) != 0 { + return ErrUsage{cmd} + } + gaps, err := runDBDetectGaps(*config) + if err != nil { + return err + } + if len(gaps) == 0 { + hlog.Info("No gaps found") + return nil + } + fmt.Println("Horizon commands to run in order to fill in the gaps:") + cmdname := os.Args[0] + for _, g := range gaps { + fmt.Printf("%s db reingest range %d %d\n", cmdname, g.StartSequence, g.EndSequence) + } + return nil + }, +} + +func runDBDetectGaps(config horizon.Config) ([]history.LedgerRange, error) { + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return nil, err + } + q := &history.Q{horizonSession} + return q.GetLedgerGaps(context.Background()) +} + +func runDBDetectGapsInRange(config horizon.Config, start, end uint32) ([]history.LedgerRange, error) { + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return nil, err + } + q := &history.Q{horizonSession} + return q.GetLedgerGapsInRange(context.Background(), start, end) +} + +func init() { + if err := dbReingestRangeCmdOpts.Init(dbReingestRangeCmd); err != nil { + log.Fatal(err.Error()) + } + if err := dbFillGapsCmdOpts.Init(dbFillGapsCmd); err != nil { + log.Fatal(err.Error()) + } + + viper.BindPFlags(dbReingestRangeCmd.PersistentFlags()) + viper.BindPFlags(dbFillGapsCmd.PersistentFlags()) + + RootCmd.AddCommand(dbCmd) + dbCmd.AddCommand( + dbInitCmd, + dbMigrateCmd, + dbReapCmd, + dbReingestCmd, + dbDetectGapsCmd, + dbFillGapsCmd, + ) + dbMigrateCmd.AddCommand( + dbMigrateDownCmd, + dbMigrateRedoCmd, + dbMigrateStatusCmd, + dbMigrateUpCmd, + ) + dbReingestCmd.AddCommand(dbReingestRangeCmd) +} diff --git a/services/horizon/cmd/ingest.go b/services/horizon/cmd/ingest.go new file mode 100644 index 0000000000..2a7599921c --- /dev/null +++ b/services/horizon/cmd/ingest.go @@ -0,0 +1,349 @@ +package cmd + +import ( + "context" + "fmt" + "go/types" + "net/http" + _ "net/http/pprof" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "github.com/stellar/go/historyarchive" + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + support "github.com/stellar/go/support/config" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" +) + +var ingestCmd = &cobra.Command{ + Use: "ingest", + Short: "ingestion related commands", +} + +var ingestVerifyFrom, ingestVerifyTo, ingestVerifyDebugServerPort uint32 +var ingestVerifyState bool + +var ingestVerifyRangeCmdOpts = []*support.ConfigOption{ + { + Name: "from", + ConfigKey: &ingestVerifyFrom, + OptType: types.Uint32, + Required: true, + FlagDefault: uint32(0), + Usage: "first ledger of the range to ingest", + }, + { + Name: "to", + ConfigKey: &ingestVerifyTo, + OptType: types.Uint32, + Required: true, + FlagDefault: uint32(0), + Usage: "last ledger of the range to ingest", + }, + { + Name: "verify-state", + ConfigKey: &ingestVerifyState, + OptType: types.Bool, + Required: false, + FlagDefault: false, + Usage: "[optional] verifies state at the last ledger of the range when true", + }, + { + Name: "debug-server-port", + ConfigKey: &ingestVerifyDebugServerPort, + OptType: types.Uint32, + Required: false, + FlagDefault: uint32(0), + Usage: "[optional] opens a net/http/pprof server at given port", + }, +} + +var ingestVerifyRangeCmd = &cobra.Command{ + Use: "verify-range", + Short: "runs ingestion pipeline within a range. warning! requires clean DB.", + Long: "runs ingestion pipeline between X and Y sequence number (inclusive)", + RunE: func(cmd *cobra.Command, args []string) error { + for _, co := range ingestVerifyRangeCmdOpts { + if err := co.RequireE(); err != nil { + return err + } + co.SetValue() + } + + if err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}); err != nil { + return err + } + + if ingestVerifyDebugServerPort != 0 { + go func() { + log.Infof("Starting debug server at: %d", ingestVerifyDebugServerPort) + err := http.ListenAndServe( + fmt.Sprintf("localhost:%d", ingestVerifyDebugServerPort), + nil, + ) + if err != nil { + log.Error(err) + } + }() + } + + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + mngr := historyarchive.NewCheckpointManager(config.CheckpointFrequency) + if !mngr.IsCheckpoint(ingestVerifyFrom) && ingestVerifyFrom != 1 { + return fmt.Errorf("`--from` must be a checkpoint ledger") + } + + if ingestVerifyState && !mngr.IsCheckpoint(ingestVerifyTo) { + return fmt.Errorf("`--to` must be a checkpoint ledger when `--verify-state` is set.") + } + + ingestConfig := ingest.Config{ + NetworkPassphrase: config.NetworkPassphrase, + HistorySession: horizonSession, + HistoryArchiveURL: config.HistoryArchiveURLs[0], + EnableCaptiveCore: config.EnableCaptiveCoreIngestion, + CaptiveCoreBinaryPath: config.CaptiveCoreBinaryPath, + RemoteCaptiveCoreURL: config.RemoteCaptiveCoreURL, + CheckpointFrequency: config.CheckpointFrequency, + CaptiveCoreToml: config.CaptiveCoreToml, + CaptiveCoreStoragePath: config.CaptiveCoreStoragePath, + } + + if !ingestConfig.EnableCaptiveCore { + if config.StellarCoreDatabaseURL == "" { + return fmt.Errorf("flag --%s cannot be empty", horizon.StellarCoreDBURLFlagName) + } + + coreSession, dbErr := db.Open("postgres", config.StellarCoreDatabaseURL) + if dbErr != nil { + return fmt.Errorf("cannot open Core DB: %v", dbErr) + } + ingestConfig.CoreSession = coreSession + } + + system, err := ingest.NewSystem(ingestConfig) + if err != nil { + return err + } + + err = system.VerifyRange( + ingestVerifyFrom, + ingestVerifyTo, + ingestVerifyState, + ) + if err != nil { + return err + } + + log.Info("Range run successfully!") + return nil + }, +} + +var stressTestNumTransactions, stressTestChangesPerTransaction int + +var stressTestCmdOpts = []*support.ConfigOption{ + { + Name: "transactions", + ConfigKey: &stressTestNumTransactions, + OptType: types.Int, + Required: false, + FlagDefault: int(1000), + Usage: "total number of transactions to ingest (at most 1000)", + }, + { + Name: "changes", + ConfigKey: &stressTestChangesPerTransaction, + OptType: types.Int, + Required: false, + FlagDefault: int(4000), + Usage: "changes per transaction to ingest (at most 4000)", + }, +} + +var ingestStressTestCmd = &cobra.Command{ + Use: "stress-test", + Short: "runs ingestion pipeline on a ledger with many changes. warning! requires clean DB.", + Long: "runs ingestion pipeline on a ledger with many changes. warning! requires clean DB.", + RunE: func(cmd *cobra.Command, args []string) error { + for _, co := range stressTestCmdOpts { + if err := co.RequireE(); err != nil { + return err + } + co.SetValue() + } + + if err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}); err != nil { + return err + } + + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + + if stressTestNumTransactions <= 0 { + return fmt.Errorf("`--transactions` must be positive") + } + + if stressTestChangesPerTransaction <= 0 { + return fmt.Errorf("`--changes` must be positive") + } + + ingestConfig := ingest.Config{ + NetworkPassphrase: config.NetworkPassphrase, + HistorySession: horizonSession, + HistoryArchiveURL: config.HistoryArchiveURLs[0], + EnableCaptiveCore: config.EnableCaptiveCoreIngestion, + } + + if config.EnableCaptiveCoreIngestion { + ingestConfig.CaptiveCoreBinaryPath = config.CaptiveCoreBinaryPath + ingestConfig.RemoteCaptiveCoreURL = config.RemoteCaptiveCoreURL + } else { + if config.StellarCoreDatabaseURL == "" { + return fmt.Errorf("flag --%s cannot be empty", horizon.StellarCoreDBURLFlagName) + } + + coreSession, dbErr := db.Open("postgres", config.StellarCoreDatabaseURL) + if dbErr != nil { + return fmt.Errorf("cannot open Core DB: %v", dbErr) + } + ingestConfig.CoreSession = coreSession + } + + system, err := ingest.NewSystem(ingestConfig) + if err != nil { + return err + } + + err = system.StressTest( + stressTestNumTransactions, + stressTestChangesPerTransaction, + ) + if err != nil { + return err + } + + log.Info("Stress test completed successfully!") + return nil + }, +} + +var ingestTriggerStateRebuildCmd = &cobra.Command{ + Use: "trigger-state-rebuild", + Short: "updates a database to trigger state rebuild, state will be rebuilt by a running Horizon instance, DO NOT RUN production DB, some endpoints will be unavailable until state is rebuilt", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + if err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}); err != nil { + return err + } + + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + + historyQ := &history.Q{horizonSession} + if err := historyQ.UpdateIngestVersion(ctx, 0); err != nil { + return fmt.Errorf("cannot trigger state rebuild: %v", err) + } + + log.Info("Triggered state rebuild") + return nil + }, +} + +var ingestInitGenesisStateCmd = &cobra.Command{ + Use: "init-genesis-state", + Short: "ingests genesis state (ledger 1)", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() + if err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{RequireCaptiveCoreConfig: false, AlwaysIngest: true}); err != nil { + return err + } + + horizonSession, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + + historyQ := &history.Q{horizonSession} + + lastIngestedLedger, err := historyQ.GetLastLedgerIngestNonBlocking(ctx) + if err != nil { + return fmt.Errorf("cannot get last ledger value: %v", err) + } + + if lastIngestedLedger != 0 { + return fmt.Errorf("cannot run on non-empty DB") + } + + ingestConfig := ingest.Config{ + NetworkPassphrase: config.NetworkPassphrase, + HistorySession: horizonSession, + HistoryArchiveURL: config.HistoryArchiveURLs[0], + EnableCaptiveCore: config.EnableCaptiveCoreIngestion, + CheckpointFrequency: config.CheckpointFrequency, + } + + if config.EnableCaptiveCoreIngestion { + ingestConfig.CaptiveCoreBinaryPath = config.CaptiveCoreBinaryPath + } else { + if config.StellarCoreDatabaseURL == "" { + return fmt.Errorf("flag --%s cannot be empty", horizon.StellarCoreDBURLFlagName) + } + + coreSession, dbErr := db.Open("postgres", config.StellarCoreDatabaseURL) + if dbErr != nil { + return fmt.Errorf("cannot open Core DB: %v", dbErr) + } + ingestConfig.CoreSession = coreSession + } + + system, err := ingest.NewSystem(ingestConfig) + if err != nil { + return err + } + + err = system.BuildGenesisState() + if err != nil { + return err + } + + log.Info("Genesis ledger stat successfully ingested!") + return nil + }, +} + +func init() { + for _, co := range ingestVerifyRangeCmdOpts { + err := co.Init(ingestVerifyRangeCmd) + if err != nil { + log.Fatal(err.Error()) + } + } + + for _, co := range stressTestCmdOpts { + err := co.Init(ingestStressTestCmd) + if err != nil { + log.Fatal(err.Error()) + } + } + + viper.BindPFlags(ingestVerifyRangeCmd.PersistentFlags()) + + RootCmd.AddCommand(ingestCmd) + ingestCmd.AddCommand( + ingestVerifyRangeCmd, + ingestStressTestCmd, + ingestTriggerStateRebuildCmd, + ingestInitGenesisStateCmd, + ) +} diff --git a/services/horizon/cmd/record_metrics.go b/services/horizon/cmd/record_metrics.go new file mode 100644 index 0000000000..cd4f335da1 --- /dev/null +++ b/services/horizon/cmd/record_metrics.go @@ -0,0 +1,89 @@ +package cmd + +import ( + "archive/zip" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/spf13/cobra" + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +var recordMetricsCmd = &cobra.Command{ + Use: "record-metrics", + Short: "records `/metrics` on admin port for debuging purposes", + Long: "", + RunE: func(cmd *cobra.Command, args []string) error { + if err := horizon.ApplyFlags(config, flags, horizon.ApplyOptions{}); err != nil { + return err + } + + const ( + timeFormat = "2006-01-02-15-04-05" + scrapeIntervalSeconds = 15 + scrapesCount = (60 / scrapeIntervalSeconds) * 10 // remember about rounding if change is required + ) + + client := &http.Client{ + Timeout: 5 * time.Second, + } + + outputFileName := fmt.Sprintf("./metrics-%s.zip", time.Now().Format(timeFormat)) + outputFile, err := os.Create(outputFileName) + if err != nil { + return err + } + + w := zip.NewWriter(outputFile) + defer w.Close() + + for i := 1; i <= scrapesCount; i++ { + log.Infof( + "Getting metrics %d/%d... ETA: %s", + i, + scrapesCount, + time.Duration(time.Duration(scrapeIntervalSeconds*(scrapesCount-i))*time.Second), + ) + + metricsResponse, err := client.Get(fmt.Sprintf("http://127.0.0.1:%d/metrics", config.AdminPort)) + if err != nil { + return errors.Wrap(err, "Error fetching metrics. Is admin server running?") + } + + if metricsResponse.StatusCode != http.StatusOK { + return errors.Errorf("Invalid status code: %d. Is admin server running?", metricsResponse.StatusCode) + } + + metricsFile, err := w.Create(time.Now().Format(timeFormat)) + if err != nil { + return err + } + + if _, err = io.Copy(metricsFile, metricsResponse.Body); err != nil { + return errors.Wrap(err, "Error reading response body. Is admin server running?") + } + + // Flush to keep memory usage log and save at least some records in case of errors later. + err = w.Flush() + if err != nil { + return err + } + + if i < scrapesCount { + time.Sleep(scrapeIntervalSeconds * time.Second) + } + } + + log.Infof("Metrics recorded to %s!", outputFileName) + return nil + }, +} + +func init() { + RootCmd.AddCommand(recordMetricsCmd) +} diff --git a/services/horizon/cmd/root.go b/services/horizon/cmd/root.go new file mode 100644 index 0000000000..a387a8e90f --- /dev/null +++ b/services/horizon/cmd/root.go @@ -0,0 +1,55 @@ +package cmd + +import ( + "fmt" + stdLog "log" + + "github.com/spf13/cobra" + horizon "github.com/stellar/go/services/horizon/internal" +) + +var ( + config, flags = horizon.Flags() + + RootCmd = &cobra.Command{ + Use: "horizon", + Short: "client-facing api server for the Stellar network", + SilenceErrors: true, + SilenceUsage: true, + Long: "Client-facing API server for the Stellar network. It acts as the interface between Stellar Core and applications that want to access the Stellar network. It allows you to submit transactions to the network, check the status of accounts, subscribe to event streams and more.", + RunE: func(cmd *cobra.Command, args []string) error { + app, err := horizon.NewAppFromFlags(config, flags) + if err != nil { + return err + } + return app.Serve() + }, + } +) + +// ErrUsage indicates we should print the usage string and exit with code 1 +type ErrUsage struct { + cmd *cobra.Command +} + +func (e ErrUsage) Error() string { + return e.cmd.UsageString() +} + +// Indicates we want to exit with a specific error code without printing an error. +type ErrExitCode int + +func (e ErrExitCode) Error() string { + return fmt.Sprintf("exit code: %d", e) +} + +func init() { + err := flags.Init(RootCmd) + if err != nil { + stdLog.Fatal(err.Error()) + } +} + +func Execute() error { + return RootCmd.Execute() +} diff --git a/services/horizon/cmd/serve.go b/services/horizon/cmd/serve.go new file mode 100644 index 0000000000..8e06855d3c --- /dev/null +++ b/services/horizon/cmd/serve.go @@ -0,0 +1,23 @@ +package cmd + +import ( + "github.com/spf13/cobra" + horizon "github.com/stellar/go/services/horizon/internal" +) + +var serveCmd = &cobra.Command{ + Use: "serve", + Short: "run horizon server", + Long: "serve initializes then starts the horizon HTTP server", + RunE: func(cmd *cobra.Command, args []string) error { + app, err := horizon.NewAppFromFlags(config, flags) + if err != nil { + return err + } + return app.Serve() + }, +} + +func init() { + RootCmd.AddCommand(serveCmd) +} diff --git a/services/horizon/cmd/version.go b/services/horizon/cmd/version.go new file mode 100644 index 0000000000..d75e10a0f3 --- /dev/null +++ b/services/horizon/cmd/version.go @@ -0,0 +1,24 @@ +package cmd + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + apkg "github.com/stellar/go/support/app" +) + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "print horizon and Golang runtime version", + Long: "", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println(apkg.Version()) + fmt.Println(runtime.Version()) + return nil + }, +} + +func init() { + RootCmd.AddCommand(versionCmd) +} diff --git a/services/horizon/configs/captive-core-pubnet.cfg b/services/horizon/configs/captive-core-pubnet.cfg new file mode 100644 index 0000000000..2fcbdf96b6 --- /dev/null +++ b/services/horizon/configs/captive-core-pubnet.cfg @@ -0,0 +1,193 @@ +# WARNING! Do not use this config in production. Quorum sets should +# be carefully selected manually. +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +HTTP_PORT=11626 + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="satoshipay.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="lobstr.co" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="www.coinqvest.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="keybase.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.blockdaemon.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="wirexapp.com" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_1" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" +ADDRESS="core-live-a.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_2" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" +ADDRESS="core-live-b.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_3" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" +ADDRESS="core-live-c.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_singapore" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" +ADDRESS="stellar-sg-sin.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_iowa" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" +ADDRESS="stellar-us-iowa.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_frankfurt" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" +ADDRESS="stellar-de-fra.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_1_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" +ADDRESS="v1.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_2_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GDXQB3OMMQ6MGG43PWFBZWBFKBBDUZIVSUDAZZTRAWQZKES2CDSE5HKJ" +ADDRESS="v2.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_3_north_america" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" +ADDRESS="v3.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-3-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_4_asia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" +ADDRESS="v4.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-4-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_5_australia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" +ADDRESS="v5.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-5-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_hong_kong" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" +ADDRESS="hongkong.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://hongkong.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_germany" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" +ADDRESS="germany.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_finland" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" +ADDRESS="finland.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_io" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GCWJKM4EGTGJUVSWUJDPCQEOEP5LHSOFKSA4HALBTOO4T4H3HCHOM6UX" +ADDRESS="stellar0.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_1" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GDKWELGJURRKXECG3HHFHXMRX64YWQPUHKCVRESOX3E5PM6DM4YXLZJM" +ADDRESS="stellar1.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory1.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_2" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GA35T3723UP2XJLC2H7MNL6VMKZZIFL2VW7XHMFFJKKIA2FJCYTLKFBW" +ADDRESS="stellar2.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory2.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_1" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAAV2GCVFLNN522ORUYFV33E76VPC22E72S75AQ6MBR5V45Z5DWVPWEU" +ADDRESS="stellar-full-validator1.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history1.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_2" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAVXB7SBJRYHSG6KSQHY74N7JAFRL4PFVZCNWW2ARI6ZEKNBJSMSKW7C" +ADDRESS="stellar-full-validator2.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history2.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_3" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAYXZ4PZ7P6QOX7EBHPIZXNWY4KCOBYWJCA4WKWRKC7XIUS3UJPT6EZ4" +ADDRESS="stellar-full-validator3.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history3.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUS" +ADDRESS="us.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GDXUKFGG76WJC7ACEH3JUPLKM5N5S76QSMNDBONREUXPCZYVPOLFWXUS" +HISTORY="curl -sf http://wxhorizonusstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUK" +ADDRESS="uk.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GBBQQT3EIUSXRJC6TGUCGVA3FVPXVZLGG3OJYACWBEWYBHU46WJLWXEU" +HISTORY="curl -sf http://wxhorizonukstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexSG" +ADDRESS="sg.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GAB3GZIE6XAYWXGZUDM4GMFFLJBFMLE2JDPUCWUZXMOMT3NHXDHEWXAS" +HISTORY="curl -sf http://wxhorizonasiastga1.blob.core.windows.net/history/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/configs/captive-core-testnet.cfg b/services/horizon/configs/captive-core-testnet.cfg new file mode 100644 index 0000000000..9abeecc8f5 --- /dev/null +++ b/services/horizon/configs/captive-core-testnet.cfg @@ -0,0 +1,28 @@ +NETWORK_PASSPHRASE="Test SDF Network ; September 2015" +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/docker/.gitignore b/services/horizon/docker/.gitignore new file mode 100644 index 0000000000..2eea525d88 --- /dev/null +++ b/services/horizon/docker/.gitignore @@ -0,0 +1 @@ +.env \ No newline at end of file diff --git a/services/horizon/docker/Dockerfile b/services/horizon/docker/Dockerfile new file mode 100644 index 0000000000..1b20729761 --- /dev/null +++ b/services/horizon/docker/Dockerfile @@ -0,0 +1,13 @@ +FROM ubuntu:focal + +ARG VERSION +ARG DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y wget apt-transport-https gnupg2 && \ + wget -qO /etc/apt/trusted.gpg.d/SDF.asc https://apt.stellar.org/SDF.asc && \ + echo "deb https://apt.stellar.org focal stable" | tee -a /etc/apt/sources.list.d/SDF.list && \ + apt-get update && apt-cache madison stellar-horizon stellar-core && apt-get install -y stellar-horizon=${VERSION} stellar-core && \ + apt-get clean && rm -rf /var/lib/apt/lists/* /var/log/*.log /var/log/*/*.log + +EXPOSE 8000 +ENTRYPOINT ["/usr/bin/stellar-horizon"] diff --git a/services/horizon/docker/Dockerfile.dev b/services/horizon/docker/Dockerfile.dev new file mode 100644 index 0000000000..2e50c298ba --- /dev/null +++ b/services/horizon/docker/Dockerfile.dev @@ -0,0 +1,28 @@ +FROM golang:1.17 AS builder + +WORKDIR /go/src/github.com/stellar/go +COPY go.mod go.sum ./ +RUN go mod download +COPY . ./ +RUN go install github.com/stellar/go/services/horizon +RUN go install github.com/stellar/go/exp/services/captivecore + +FROM ubuntu:20.04 +ARG STELLAR_CORE_VERSION +ENV STELLAR_CORE_VERSION=${STELLAR_CORE_VERSION:-*} +ENV STELLAR_CORE_BINARY_PATH /usr/bin/stellar-core + +ENV DEBIAN_FRONTEND=noninteractive +# ca-certificates are required to make tls connections +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils +RUN wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true apt-key add - +RUN echo "deb https://apt.stellar.org focal stable" >/etc/apt/sources.list.d/SDF.list +RUN echo "deb https://apt.stellar.org focal unstable" >/etc/apt/sources.list.d/SDF-unstable.list +RUN apt-get update && apt-get install -y stellar-core=${STELLAR_CORE_VERSION} +RUN apt-get clean + +COPY --from=builder /go/bin/horizon ./ +COPY --from=builder /go/bin/captivecore ./ + +ENTRYPOINT ["./horizon"] + diff --git a/services/horizon/docker/Makefile b/services/horizon/docker/Makefile new file mode 100644 index 0000000000..2ae7ffd816 --- /dev/null +++ b/services/horizon/docker/Makefile @@ -0,0 +1,21 @@ +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +TAG ?= stellar/stellar-horizon:$(VERSION) + +docker-build: +ifndef VERSION + $(error VERSION environment variable must be set. For example VERSION=2.4.1-101 ) +endif + $(SUDO) docker build --pull \ + --label org.opencontainers.image.created="$(BUILD_DATE)" \ + --build-arg VERSION=$(VERSION) \ + -t $(TAG) . + +docker-push: +ifndef TAG + $(error Must set VERSION or TAG environment variable. For example VERSION=2.4.1-101 ) +endif + $(SUDO) docker push $(TAG) diff --git a/services/horizon/docker/README.md b/services/horizon/docker/README.md new file mode 100644 index 0000000000..04439bb8dd --- /dev/null +++ b/services/horizon/docker/README.md @@ -0,0 +1,97 @@ +# Overview + +Files related to docker and docker-compose +* `Dockerfile` and `Makefile` - used to build the official, package-based docker image for stellar-horizon +* `Dockerfile.dev` - used with docker-compose + +# Running Stellar with Docker Compose + +## Dependencies + +The only dependency you will need to install is [Docker](https://www.docker.com/products/docker-desktop). + +## Start script + +[start.sh](./start.sh) will setup the env file and run docker-compose to start the Stellar docker containers. Feel free to use this script, otherwise continue with the next two steps. + +The script takes one optional parameter which configures the Stellar network used by the docker containers. If no parameter is supplied, the containers will run on the Stellar test network. + +`./start.sh pubnet` will run the containers on the Stellar public network. + +`./start.sh standalone` will run the containers on a private standalone Stellar network. + +## Run docker-compose + +Run the following command to start all the Stellar docker containers: + +``` +docker-compose up -d --build +``` + +Horizon will be exposed on port 8000. Stellar Core will be exposed on port 11626. The Stellar Core postgres instance will be exposed on port 5641. +The Horizon postgres instance will be exposed on port 5432. + +## Swapping in a local service + +If you're developing a service locally you may want to run that service locally while also being able to interact with the other Stellar components running in Docker. You can do that by stopping the container corresponding to the service you're developing. + +For example, to run Horizon locally from source, you would perform the following steps: + +``` +# stop horizon in docker-compose +docker-compose stop horizon +``` + +Now you can run horizon locally in vscode using the following configuration: +``` + { + "name": "Launch", + "type": "go", + "request": "launch", + "mode": "debug", + "remotePath": "", + "port": 2345, + "host": "127.0.0.1", + "program": "${workspaceRoot}/services/horizon/main.go", + "env": { + "DATABASE_URL": "postgres://postgres@localhost:5432/horizon?sslmode=disable", + "STELLAR_CORE_DATABASE_URL": "postgres://postgres:mysecretpassword@localhost:5641/stellar?sslmode=disable", + "NETWORK_PASSPHRASE": "Test SDF Network ; September 2015", + "STELLAR_CORE_URL": "http://localhost:11626", + "INGEST": "true", + }, + "args": [] + } +``` + +Similarly, to run Stellar core locally from source and have it interact with Horizon in docker, all you need to do is run `docker-compose stop core` before running Stellar core from source. + +## Connecting to the Stellar Public Network + +By default, the Docker Compose file configures Stellar Core to connect to the Stellar test network. If you would like to run the docker containers on the +Stellar public network, run `docker-compose -f docker-compose.yml -f docker-compose.pubnet.yml up -d --build`. + +To run the containers on a private stand-alone network, run `docker-compose -f docker-compose.yml -f docker-compose.standalone.yml up -d --build`. +When you run Stellar Core on a private stand-alone network, an account will be created which will hold 100 billion Lumens. +The seed for the account will be emitted in the Stellar Core logs: + +``` +2020-04-22T18:39:19.248 GD5KD [Ledger INFO] Root account seed: SC5O7VZUXDJ6JBDSZ74DSERXL7W3Y5LTOAMRF7RQRL3TAGAPS7LUVG3L +``` + +When running Horizon on a private stand-alone network, Horizon will not start ingesting until Stellar Core creates its first history archive snapshot. Stellar Core creates snapshots every 64 ledgers, which means ingestion will be delayed until ledger 64. + +When you switch between different networks you will need to clear the Stellar Core and Stellar Horizon databases. You can wipe out the databases by running `docker-compose down --remove-orphans -v`. + +## Using a specific version of Stellar Core + +By default the Docker Compose file is configured to use version 18 of Protocol and Stellar Core. You want the Core version to be at same level as the version horizon repo expects for ingestion. You can specify optional environment variables from the command shell for stating version overrides for either the docker-compose or start.sh invocations. + +PROTOCOL_VERSION=18 // the Stellar Protocol version number +CORE_IMAGE=stellar/stellar-core:18 // the docker hub image:tag +STELLAR_CORE_VERSION=18.1.1-779.ef0f44b44.focal // the apt deb package version from apt.stellar.org + +Example: + +Runs Stellar Protocol and Core version 18, for any mode of testnet,standalone,pubnet +```PROTOCOL_VERSION=18 CORE_IMAGE=stellar/stellar-core:18 STELLAR_CORE_VERSION=18.1.1-779.ef0f44b44.focal ./start.sh [standalone|pubnet]``` diff --git a/services/horizon/docker/captive-core-integration-tests.cfg b/services/horizon/docker/captive-core-integration-tests.cfg new file mode 100644 index 0000000000..ed8ac3ed73 --- /dev/null +++ b/services/horizon/docker/captive-core-integration-tests.cfg @@ -0,0 +1,13 @@ +PEER_PORT=11725 +ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=true + +UNSAFE_QUORUM=true +FAILURE_SAFETY=0 + +[[VALIDATORS]] +NAME="local_core" +HOME_DOMAIN="core.local" +# From "SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" +PUBLIC_KEY="GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS" +ADDRESS="localhost" +QUALITY="MEDIUM" diff --git a/services/horizon/docker/captive-core-pubnet.cfg b/services/horizon/docker/captive-core-pubnet.cfg new file mode 100644 index 0000000000..978e647b65 --- /dev/null +++ b/services/horizon/docker/captive-core-pubnet.cfg @@ -0,0 +1,192 @@ +PEER_PORT=11725 + +FAILURE_SAFETY=1 + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="satoshipay.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="lobstr.co" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="www.coinqvest.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="keybase.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.blockdaemon.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="wirexapp.com" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_1" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" +ADDRESS="core-live-a.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_2" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" +ADDRESS="core-live-b.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_3" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" +ADDRESS="core-live-c.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_singapore" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" +ADDRESS="stellar-sg-sin.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_iowa" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" +ADDRESS="stellar-us-iowa.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_frankfurt" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" +ADDRESS="stellar-de-fra.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_1_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" +ADDRESS="v1.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_2_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GDXQB3OMMQ6MGG43PWFBZWBFKBBDUZIVSUDAZZTRAWQZKES2CDSE5HKJ" +ADDRESS="v2.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_3_north_america" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" +ADDRESS="v3.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-3-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_4_asia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" +ADDRESS="v4.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-4-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_5_australia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" +ADDRESS="v5.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-5-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_hong_kong" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" +ADDRESS="hongkong.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://hongkong.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_germany" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" +ADDRESS="germany.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_finland" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" +ADDRESS="finland.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_io" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GCWJKM4EGTGJUVSWUJDPCQEOEP5LHSOFKSA4HALBTOO4T4H3HCHOM6UX" +ADDRESS="stellar0.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_1" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GDKWELGJURRKXECG3HHFHXMRX64YWQPUHKCVRESOX3E5PM6DM4YXLZJM" +ADDRESS="stellar1.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory1.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_2" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GA35T3723UP2XJLC2H7MNL6VMKZZIFL2VW7XHMFFJKKIA2FJCYTLKFBW" +ADDRESS="stellar2.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory2.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_1" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAAV2GCVFLNN522ORUYFV33E76VPC22E72S75AQ6MBR5V45Z5DWVPWEU" +ADDRESS="stellar-full-validator1.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history1.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_2" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAVXB7SBJRYHSG6KSQHY74N7JAFRL4PFVZCNWW2ARI6ZEKNBJSMSKW7C" +ADDRESS="stellar-full-validator2.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history2.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_3" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAYXZ4PZ7P6QOX7EBHPIZXNWY4KCOBYWJCA4WKWRKC7XIUS3UJPT6EZ4" +ADDRESS="stellar-full-validator3.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history3.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUS" +ADDRESS="us.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GDXUKFGG76WJC7ACEH3JUPLKM5N5S76QSMNDBONREUXPCZYVPOLFWXUS" +HISTORY="curl -sf http://wxhorizonusstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUK" +ADDRESS="uk.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GBBQQT3EIUSXRJC6TGUCGVA3FVPXVZLGG3OJYACWBEWYBHU46WJLWXEU" +HISTORY="curl -sf http://wxhorizonukstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexSG" +ADDRESS="sg.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GAB3GZIE6XAYWXGZUDM4GMFFLJBFMLE2JDPUCWUZXMOMT3NHXDHEWXAS" +HISTORY="curl -sf http://wxhorizonasiastga1.blob.core.windows.net/history/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/docker/captive-core-reingest-range-integration-tests.cfg b/services/horizon/docker/captive-core-reingest-range-integration-tests.cfg new file mode 100644 index 0000000000..4902cf8d15 --- /dev/null +++ b/services/horizon/docker/captive-core-reingest-range-integration-tests.cfg @@ -0,0 +1,9 @@ +ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=true + +[[VALIDATORS]] +NAME="local_core" +HOME_DOMAIN="core.local" +# From "SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" +PUBLIC_KEY="GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS" +ADDRESS="localhost" +QUALITY="MEDIUM" diff --git a/services/horizon/docker/captive-core-standalone.cfg b/services/horizon/docker/captive-core-standalone.cfg new file mode 100644 index 0000000000..d54b0ecae1 --- /dev/null +++ b/services/horizon/docker/captive-core-standalone.cfg @@ -0,0 +1,12 @@ +PEER_PORT=11725 + +UNSAFE_QUORUM=true +FAILURE_SAFETY=0 + +[[VALIDATORS]] +NAME="local_core" +HOME_DOMAIN="core.local" +# From "SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" +PUBLIC_KEY="GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS" +ADDRESS="host.docker.internal" +QUALITY="MEDIUM" \ No newline at end of file diff --git a/services/horizon/docker/captive-core-testnet.cfg b/services/horizon/docker/captive-core-testnet.cfg new file mode 100644 index 0000000000..af327834d7 --- /dev/null +++ b/services/horizon/docker/captive-core-testnet.cfg @@ -0,0 +1,29 @@ +PEER_PORT=11725 + +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/docker/core-start.sh b/services/horizon/docker/core-start.sh new file mode 100755 index 0000000000..b0fadb6bb2 --- /dev/null +++ b/services/horizon/docker/core-start.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +set -e +set -x + +source /etc/profile +# work within the current docker working dir +if [ ! -f "./stellar-core.cfg" ]; then + cp /stellar-core.cfg ./ +fi + +echo "using config:" +cat stellar-core.cfg + +# initialize new db +stellar-core new-db + +if [ "$1" = "standalone" ]; then + # initialize for new history archive path, remove any pre-existing on same path from base image + rm -rf ./history + stellar-core new-hist vs + + # serve history archives to horizon on port 1570 + pushd ./history/vs/ + python3 -m http.server 1570 & + popd +fi + +exec stellar-core run diff --git a/services/horizon/docker/docker-compose.integration-tests.yml b/services/horizon/docker/docker-compose.integration-tests.yml new file mode 100644 index 0000000000..96ef94f12b --- /dev/null +++ b/services/horizon/docker/docker-compose.integration-tests.yml @@ -0,0 +1,34 @@ +version: '3' +services: + core-postgres: + image: postgres:9.6.17-alpine + restart: on-failure + environment: + - POSTGRES_PASSWORD=mysecretpassword + - POSTGRES_DB=stellar + ports: + - "5641:5641" + command: ["-p", "5641"] + core: + platform: linux/amd64 + # TODO replace with official SDF image when ready. Note that this: + # https://github.com/stellar/stellar-core/commit/31597b760f8e325fc84da0937adc373a78878ca9 + # breaks the tests. I reverted it before building temp docker image. + # Command used to build custom image: + # docker build -t bartekno/stellar-core:17.4.0-p18 --build-arg STELLAR_CORE_VERSION=17.3.1-679.c5f6349.focal~protocol18~buildtests --build-arg DISTRO=focal . + image: ${CORE_IMAGE:-stellar/stellar-core:18} + depends_on: + - core-postgres + restart: on-failure + environment: + - TRACY_NO_INVARIANT_CHECK=1 + ports: + - "11625:11625" + - "11626:11626" + # add extra port for history archive server + - "1570:1570" + entrypoint: /usr/bin/env + command: /start standalone + volumes: + - ./stellar-core-integration-tests.cfg:/stellar-core.cfg + - ./core-start.sh:/start diff --git a/services/horizon/docker/docker-compose.pubnet.yml b/services/horizon/docker/docker-compose.pubnet.yml new file mode 100644 index 0000000000..f6e380749d --- /dev/null +++ b/services/horizon/docker/docker-compose.pubnet.yml @@ -0,0 +1,10 @@ +version: '3' +services: + horizon: + platform: linux/amd64 + environment: + - HISTORY_ARCHIVE_URLS=https://history.stellar.org/prd/core-live/core_live_001 + - NETWORK_PASSPHRASE=Public Global Stellar Network ; September 2015 + - CAPTIVE_CORE_CONFIG_APPEND_PATH=/captive-core-pubnet.cfg + volumes: + - ./captive-core-pubnet.cfg:/captive-core-pubnet.cfg diff --git a/services/horizon/docker/docker-compose.standalone.yml b/services/horizon/docker/docker-compose.standalone.yml new file mode 100644 index 0000000000..43bd9ac9d7 --- /dev/null +++ b/services/horizon/docker/docker-compose.standalone.yml @@ -0,0 +1,52 @@ +version: '3' +services: + core-postgres: + image: postgres:9.6.17-alpine + restart: on-failure + environment: + - POSTGRES_PASSWORD=mysecretpassword + - POSTGRES_DB=stellar + ports: + - "5641:5641" + command: ["-p", "5641"] + volumes: + - "core-db-data:/var/lib/postgresql/data" + + core: + platform: linux/amd64 + image: ${CORE_IMAGE:-stellar/stellar-core:18} + depends_on: + - core-postgres + - core-upgrade + restart: on-failure + ports: + - "11625:11625" + - "11626:11626" + # add extra port for history archive server + - "1570:1570" + entrypoint: /usr/bin/env + command: /start standalone + volumes: + - ./stellar-core-standalone.cfg:/stellar-core.cfg + - ./core-start.sh:/start + extra_hosts: + - "host.docker.internal:host-gateway" + + horizon: + environment: + - HISTORY_ARCHIVE_URLS=http://host.docker.internal:1570 + - NETWORK_PASSPHRASE=Standalone Network ; February 2017 + - CAPTIVE_CORE_CONFIG_APPEND_PATH=/captive-core-standalone.cfg + volumes: + - ./captive-core-standalone.cfg:/captive-core-standalone.cfg + + # this container will invoke a request to upgrade stellar core to protocol 17 (by default) + core-upgrade: + restart: on-failure + image: curlimages/curl:7.69.1 + command: ["-v", "-f", "http://host.docker.internal:11626/upgrades?mode=set&upgradetime=1970-01-01T00:00:00Z&protocolversion=${PROTOCOL_VERSION:-18}"] + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + core-db-data: diff --git a/services/horizon/docker/docker-compose.yml b/services/horizon/docker/docker-compose.yml new file mode 100644 index 0000000000..40bced6677 --- /dev/null +++ b/services/horizon/docker/docker-compose.yml @@ -0,0 +1,42 @@ +version: '3' +services: + horizon-postgres: + image: postgres:9.6.17-alpine + restart: on-failure + environment: + - POSTGRES_HOST_AUTH_METHOD=trust + - POSTGRES_DB=horizon + ports: + - "5432:5432" + volumes: + - "horizon-db-data:/var/lib/postgresql/data" + + horizon: + platform: linux/amd64 + depends_on: + - horizon-postgres + build: + # set build context to the root directory of the go monorepo + context: ../../../ + args: + STELLAR_CORE_VERSION: ${STELLAR_CORE_VERSION:-} + dockerfile: services/horizon/docker/Dockerfile.dev + restart: on-failure + ports: + - "8000:8000" + - "11725:11725" + environment: + - DATABASE_URL=postgres://postgres@host.docker.internal:5432/horizon?sslmode=disable + - CAPTIVE_CORE_CONFIG_APPEND_PATH=/captive-core-testnet.cfg + - HISTORY_ARCHIVE_URLS=https://history.stellar.org/prd/core-testnet/core_testnet_001 + - NETWORK_PASSPHRASE=Test SDF Network ; September 2015 + - INGEST=true + - PER_HOUR_RATE_LIMIT=0 + volumes: + - ./captive-core-testnet.cfg:/captive-core-testnet.cfg + command: ["--apply-migrations"] + extra_hosts: + - "host.docker.internal:host-gateway" + +volumes: + horizon-db-data: diff --git a/services/horizon/docker/start.sh b/services/horizon/docker/start.sh new file mode 100755 index 0000000000..824fa19a0a --- /dev/null +++ b/services/horizon/docker/start.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -e + +NETWORK=${1:-testnet} + +case $NETWORK in + standalone) + DOCKER_FLAGS="-f docker-compose.yml -f docker-compose.standalone.yml" + echo "running on standalone network" + ;; + + pubnet) + DOCKER_FLAGS="-f docker-compose.yml -f docker-compose.pubnet.yml" + echo "running on public network" + ;; + + testnet) + echo "running on test network" + ;; + + *) + echo "$1 is not a supported option " + exit 1 + ;; +esac + +docker-compose $DOCKER_FLAGS up --build -d \ No newline at end of file diff --git a/services/horizon/docker/stellar-core-integration-tests.cfg b/services/horizon/docker/stellar-core-integration-tests.cfg new file mode 100644 index 0000000000..e27cfe14ed --- /dev/null +++ b/services/horizon/docker/stellar-core-integration-tests.cfg @@ -0,0 +1,24 @@ +ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=true + +NETWORK_PASSPHRASE="Standalone Network ; February 2017" + +PEER_PORT=11625 +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true + +NODE_SEED="SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" + +NODE_IS_VALIDATOR=true +UNSAFE_QUORUM=true +FAILURE_SAFETY=0 + +DATABASE="postgresql://user=postgres password=mysecretpassword host=core-postgres port=5641 dbname=stellar" + +[QUORUM_SET] +THRESHOLD_PERCENT=100 +VALIDATORS=["GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS"] + +[HISTORY.vs] +get="cp history/vs/{0} {1}" +put="cp {0} history/vs/{1}" +mkdir="mkdir -p history/vs/{0}" \ No newline at end of file diff --git a/services/horizon/docker/stellar-core-pubnet.cfg b/services/horizon/docker/stellar-core-pubnet.cfg new file mode 100644 index 0000000000..4efe7e46cd --- /dev/null +++ b/services/horizon/docker/stellar-core-pubnet.cfg @@ -0,0 +1,202 @@ +# simple configuration for a standalone test "network" +# see stellar-core_example.cfg for a description of the configuration parameters + +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true +LOG_FILE_PATH="" + +DATABASE="postgresql://user=postgres password=mysecretpassword host=host.docker.internal port=5641 dbname=stellar" +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +CATCHUP_RECENT=100 + +[HISTORY.cache] +get="cp /opt/stellar/history-cache/{0} {1}" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.org" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="satoshipay.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="lobstr.co" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="www.coinqvest.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="keybase.io" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="stellar.blockdaemon.com" +QUALITY="HIGH" + +[[HOME_DOMAINS]] +HOME_DOMAIN="wirexapp.com" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_1" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH" +ADDRESS="core-live-a.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_2" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK" +ADDRESS="core-live-b.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_3" +HOME_DOMAIN="stellar.org" +PUBLIC_KEY="GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ" +ADDRESS="core-live-c.stellar.org:11625" +HISTORY="curl -sf https://history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_singapore" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GBJQUIXUO4XSNPAUT6ODLZUJRV2NPXYASKUBY4G5MYP3M47PCVI55MNT" +ADDRESS="stellar-sg-sin.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-sg-sin.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_iowa" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GAK6Z5UVGUVSEK6PEOCAYJISTT5EJBB34PN3NOLEQG2SUKXRVV2F6HZY" +ADDRESS="stellar-us-iowa.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-us-iowa.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="satoshipay_frankfurt" +HOME_DOMAIN="satoshipay.io" +PUBLIC_KEY="GC5SXLNAM3C4NMGK2PXK4R34B5GNZ47FYQ24ZIBFDFOCU6D4KBN4POAE" +ADDRESS="stellar-de-fra.satoshipay.io:11625" +HISTORY="curl -sf https://stellar-history-de-fra.satoshipay.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_1_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GCFONE23AB7Y6C5YZOMKUKGETPIAJA4QOYLS5VNS4JHBGKRZCPYHDLW7" +ADDRESS="v1.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-1-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_2_europe" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GDXQB3OMMQ6MGG43PWFBZWBFKBBDUZIVSUDAZZTRAWQZKES2CDSE5HKJ" +ADDRESS="v2.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-2-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_3_north_america" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GD5QWEVV4GZZTQP46BRXV5CUMMMLP4JTGFD7FWYJJWRL54CELY6JGQ63" +ADDRESS="v3.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-3-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_4_asia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA7TEPCBDQKI7JQLQ34ZURRMK44DVYCIGVXQQWNSWAEQR6KB4FMCBT7J" +ADDRESS="v4.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-4-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="lobstr_5_australia" +HOME_DOMAIN="lobstr.co" +PUBLIC_KEY="GA5STBMV6QDXFDGD62MEHLLHZTPDI77U3PFOD2SELU5RJDHQWBR5NNK7" +ADDRESS="v5.stellar.lobstr.co:11625" +HISTORY="curl -sf https://stellar-archive-5-lobstr.s3.amazonaws.com/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_hong_kong" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GAZ437J46SCFPZEDLVGDMKZPLFO77XJ4QVAURSJVRZK2T5S7XUFHXI2Z" +ADDRESS="hongkong.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://hongkong.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_germany" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GD6SZQV3WEJUH352NTVLKEV2JM2RH266VPEM7EH5QLLI7ZZAALMLNUVN" +ADDRESS="germany.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://germany.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="coinqvest_finland" +HOME_DOMAIN="www.coinqvest.com" +PUBLIC_KEY="GADLA6BJK6VK33EM2IDQM37L5KGVCY5MSHSHVJA4SCNGNUIEOTCR6J5T" +ADDRESS="finland.stellar.coinqvest.com:11625" +HISTORY="curl -sf https://finland.stellar.coinqvest.com/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_io" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GCWJKM4EGTGJUVSWUJDPCQEOEP5LHSOFKSA4HALBTOO4T4H3HCHOM6UX" +ADDRESS="stellar0.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_1" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GDKWELGJURRKXECG3HHFHXMRX64YWQPUHKCVRESOX3E5PM6DM4YXLZJM" +ADDRESS="stellar1.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory1.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="keybase_2" +HOME_DOMAIN="keybase.io" +PUBLIC_KEY="GA35T3723UP2XJLC2H7MNL6VMKZZIFL2VW7XHMFFJKKIA2FJCYTLKFBW" +ADDRESS="stellar2.keybase.io:11625" +HISTORY="curl -sf https://stellarhistory2.keybase.io/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_1" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAAV2GCVFLNN522ORUYFV33E76VPC22E72S75AQ6MBR5V45Z5DWVPWEU" +ADDRESS="stellar-full-validator1.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history1.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_2" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAVXB7SBJRYHSG6KSQHY74N7JAFRL4PFVZCNWW2ARI6ZEKNBJSMSKW7C" +ADDRESS="stellar-full-validator2.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history2.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="Blockdaemon_Validator_3" +HOME_DOMAIN="stellar.blockdaemon.com" +PUBLIC_KEY="GAYXZ4PZ7P6QOX7EBHPIZXNWY4KCOBYWJCA4WKWRKC7XIUS3UJPT6EZ4" +ADDRESS="stellar-full-validator3.bdnodes.net" +HISTORY="curl -sf https://stellar-full-history3.bdnodes.net/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUS" +ADDRESS="us.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GDXUKFGG76WJC7ACEH3JUPLKM5N5S76QSMNDBONREUXPCZYVPOLFWXUS" +HISTORY="curl -sf http://wxhorizonusstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexUK" +ADDRESS="uk.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GBBQQT3EIUSXRJC6TGUCGVA3FVPXVZLGG3OJYACWBEWYBHU46WJLWXEU" +HISTORY="curl -sf http://wxhorizonukstga1.blob.core.windows.net/history/{0} -o {1}" + +[[VALIDATORS]] +NAME="wirexSG" +ADDRESS="sg.stellar.wirexapp.com" +HOME_DOMAIN="wirexapp.com" +PUBLIC_KEY="GAB3GZIE6XAYWXGZUDM4GMFFLJBFMLE2JDPUCWUZXMOMT3NHXDHEWXAS" +HISTORY="curl -sf http://wxhorizonasiastga1.blob.core.windows.net/history/{0} -o {1}" \ No newline at end of file diff --git a/services/horizon/docker/stellar-core-standalone.cfg b/services/horizon/docker/stellar-core-standalone.cfg new file mode 100644 index 0000000000..a2b7e806c9 --- /dev/null +++ b/services/horizon/docker/stellar-core-standalone.cfg @@ -0,0 +1,25 @@ +# simple configuration for a standalone test "network" +# see stellar-core_example.cfg for a description of the configuration parameters + +NETWORK_PASSPHRASE="Standalone Network ; February 2017" + +PEER_PORT=11625 +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true + +NODE_SEED="SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK" + +NODE_IS_VALIDATOR=true +UNSAFE_QUORUM=true +FAILURE_SAFETY=0 + +DATABASE="postgresql://user=postgres password=mysecretpassword host=host.docker.internal port=5641 dbname=stellar" + +[QUORUM_SET] +THRESHOLD_PERCENT=100 +VALIDATORS=["GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS"] + +[HISTORY.vs] +get="cp history/vs/{0} {1}" +put="cp {0} history/vs/{1}" +mkdir="mkdir -p history/vs/{0}" \ No newline at end of file diff --git a/services/horizon/docker/stellar-core-testnet.cfg b/services/horizon/docker/stellar-core-testnet.cfg new file mode 100644 index 0000000000..cf8546a3e9 --- /dev/null +++ b/services/horizon/docker/stellar-core-testnet.cfg @@ -0,0 +1,41 @@ +# simple configuration for a standalone test "network" +# see stellar-core_example.cfg for a description of the configuration parameters + +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true +LOG_FILE_PATH="" + +NETWORK_PASSPHRASE="Test SDF Network ; September 2015" + +DATABASE="postgresql://user=postgres password=mysecretpassword host=host.docker.internal port=5641 dbname=stellar" +UNSAFE_QUORUM=true +FAILURE_SAFETY=1 +CATCHUP_RECENT=100 + +[HISTORY.cache] +get="cp /opt/stellar/history-cache/{0} {1}" + +[[HOME_DOMAINS]] +HOME_DOMAIN="testnet.stellar.org" +QUALITY="HIGH" + +[[VALIDATORS]] +NAME="sdf_testnet_1" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y" +ADDRESS="core-testnet1.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_001/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_2" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP" +ADDRESS="core-testnet2.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_002/{0} -o {1}" + +[[VALIDATORS]] +NAME="sdf_testnet_3" +HOME_DOMAIN="testnet.stellar.org" +PUBLIC_KEY="GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z" +ADDRESS="core-testnet3.stellar.org" +HISTORY="curl -sf http://history.stellar.org/prd/core-testnet/core_testnet_003/{0} -o {1}" diff --git a/services/horizon/docker/verify-range/Dockerfile b/services/horizon/docker/verify-range/Dockerfile new file mode 100644 index 0000000000..4cc335b664 --- /dev/null +++ b/services/horizon/docker/verify-range/Dockerfile @@ -0,0 +1,19 @@ +FROM ubuntu:20.04 + +MAINTAINER Bartek Nowotarski + +ARG STELLAR_CORE_VERSION +ENV STELLAR_CORE_VERSION=${STELLAR_CORE_VERSION:-*} +# to remove tzdata interactive flow +ENV DEBIAN_FRONTEND=noninteractive + +ADD dependencies / +RUN ["chmod", "+x", "dependencies"] +RUN /dependencies + +ADD stellar-core.cfg / + +ADD start / +RUN ["chmod", "+x", "start"] + +ENTRYPOINT ["/start"] diff --git a/services/horizon/docker/verify-range/README.md b/services/horizon/docker/verify-range/README.md new file mode 100644 index 0000000000..fb55a0b9ab --- /dev/null +++ b/services/horizon/docker/verify-range/README.md @@ -0,0 +1,44 @@ +# `stellar/expingest-verify-range` + +This docker image allows running multiple instances of `horizon ingest verify-command` on a single machine or running it in [AWS Batch](https://aws.amazon.com/batch/). + +## Env variables + +### Running locally + +| Name | Description | +|----------|-------------------------------------------------------| +| `BRANCH` | Git branch to build (useful for testing PRs) | +| `FROM` | First ledger of the range (must be checkpoint ledger) | +| `TO` | Last ledger of the range (must be checkpoint ledger) | + +### Running in AWS Batch + +| Name | Description | +|----------------------|----------------------------------------------------------------------| +| `BRANCH` | Git branch to build (useful for testing PRs) | +| `BATCH_START_LEDGER` | First ledger of the AWS Batch Job, must be a checkpoint ledger or 1. | +| `BATCH_SIZE` | Size of the batch, must be multiple of 64. | + +#### Example + +When you start 10 jobs with `BATCH_START_LEDGER=63` and `BATCH_SIZE=64` +it will run the following ranges: + +| `AWS_BATCH_JOB_ARRAY_INDEX` | `FROM` | `TO` | +|-----------------------------|--------|------| +| 0 | 63 | 127 | +| 1 | 127 | 191 | +| 2 | 191 | 255 | +| 3 | 255 | 319 | + +## Tips when using AWS Batch + +* In "Job definition" set vCPUs to 2 and Memory to 4096. This represents the `c5.large` instances Horizon should be using. +* In "Compute environments": + * Set instance type to "c5.large". + * Set "Maximum vCPUs" to 2x the number of instances you want to start (because "c5.large" has 2 vCPUs). Ex. 10 vCPUs = 5 x "c5.large" instances. +* Use spot instances! It's much cheaper and speed of testing will be the same in 99% of cases. +* You need to publish the image if there are any changes in `Dockerfile` or one of the scripts. +* When batch processing is over check if instances have been terminated. Sometimes AWS doesn't terminate them. +* Make sure the job timeout is set to a larger value if you verify larger ranges. Default is just 100 seconds. diff --git a/services/horizon/docker/verify-range/dependencies b/services/horizon/docker/verify-range/dependencies new file mode 100644 index 0000000000..5712727a4f --- /dev/null +++ b/services/horizon/docker/verify-range/dependencies @@ -0,0 +1,29 @@ +#! /usr/bin/env bash +set -e + +apt-get update +apt-get install -y curl git libpq-dev libsqlite3-dev libsasl2-dev postgresql-client postgresql postgresql-contrib sudo vim zlib1g-dev wget gnupg2 lsb-release +apt-get clean + +wget -qO - https://apt.stellar.org/SDF.asc | apt-key add - +echo "deb https://apt.stellar.org $(lsb_release -cs) stable" | sudo tee -a /etc/apt/sources.list.d/SDF.list +echo "deb https://apt.stellar.org $(lsb_release -cs) unstable" | sudo tee -a /etc/apt/sources.list.d/SDF-unstable.list +apt-get update +apt-get install -y stellar-core=${STELLAR_CORE_VERSION} + +wget https://dl.google.com/go/go1.17.linux-amd64.tar.gz +tar -C /usr/local -xzf go1.17.linux-amd64.tar.gz + +# configure postgres +service postgresql start +sudo -u postgres createdb horizon + +sudo -u postgres psql -c "ALTER USER postgres PASSWORD 'postgres';" + +git clone https://github.com/stellar/go.git stellar-go +cd stellar-go +# By default "git fetch" only fetches refs/ +# Below ensures we also fetch PR refs +git config --add remote.origin.fetch "+refs/pull/*/head:refs/remotes/origin/pull/*" +git fetch --force --quiet origin +/usr/local/go/bin/go build -v ./services/horizon diff --git a/services/horizon/docker/verify-range/start b/services/horizon/docker/verify-range/start new file mode 100644 index 0000000000..0ebc868873 --- /dev/null +++ b/services/horizon/docker/verify-range/start @@ -0,0 +1,148 @@ +#! /usr/bin/env bash +set -e + +service postgresql start + +# Calculate params for AWS Batch +if [ ! -z "$AWS_BATCH_JOB_ARRAY_INDEX" ]; then + # The batch should have three env variables: + # * BATCH_START_LEDGER - start ledger of the job, must be equal 1 or a + # checkpoint ledger (i + 1) % 64 == 0. + # * BATCH_SIZE - size of the batch in ledgers, must be multiple of 64! + # * BRANCH - git branch to build + # + # Ex: BATCH_START_LEDGER=63, BATCH_SIZE=64 will create the following ranges: + # AWS_BATCH_JOB_ARRAY_INDEX=0: [63, 127] + # AWS_BATCH_JOB_ARRAY_INDEX=1: [127, 191] + # AWS_BATCH_JOB_ARRAY_INDEX=2: [191, 255] + # AWS_BATCH_JOB_ARRAY_INDEX=3: [255, 319] + # ... + + if [ $BATCH_START_LEDGER -eq 1 ]; then + export FROM=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER" - "2"` + export TO=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER" + "$BATCH_SIZE" - "2"` + if [ $FROM -eq -1 ]; then + export FROM="1" + fi + else + export FROM=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER"` + export TO=`expr "$BATCH_SIZE" \* "$AWS_BATCH_JOB_ARRAY_INDEX" + "$BATCH_START_LEDGER" + "$BATCH_SIZE"` + fi +fi + +export LEDGER_COUNT=`expr "$TO" - "$FROM" + "1"` + +echo "FROM: $FROM TO: $TO" + +dump_horizon_db() { + echo "dumping history_effects" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_effects.history_operation_id, history_effects.order, type, details, history_accounts.address, address_muxed from history_effects left join history_accounts on history_accounts.id = history_effects.history_account_id order by history_operation_id asc, \"order\" asc" > "${1}_effects" + echo "dumping history_ledgers" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select sequence, ledger_hash, previous_ledger_hash, transaction_count, operation_count, closed_at, id, total_coins, fee_pool, base_fee, base_reserve, max_tx_set_size, protocol_version, ledger_header, successful_transaction_count, failed_transaction_count from history_ledgers order by sequence asc" > "${1}_ledgers" + echo "dumping history_operations" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select * from history_operations order by id asc" > "${1}_operations" + echo "dumping history_operation_claimable_balances" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_operation_id, history_claimable_balance_id from history_operation_claimable_balances left join history_claimable_balances on history_claimable_balances.id = history_operation_claimable_balances.history_claimable_balance_id order by history_operation_id asc, id asc" > "${1}_operation_claimable_balances" + echo "dumping history_operation_participants" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_operation_id, address from history_operation_participants left join history_accounts on history_accounts.id = history_operation_participants.history_account_id order by history_operation_id asc, address asc" > "${1}_operation_participants" + echo "dumping history_trades" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_trades.history_operation_id, history_trades.order, history_trades.ledger_closed_at, CASE WHEN history_trades.base_is_seller THEN history_trades.price_n ELSE history_trades.price_d END, CASE WHEN history_trades.base_is_seller THEN history_trades.price_d ELSE history_trades.price_n END, CASE WHEN history_trades.base_is_seller THEN history_trades.base_offer_id ELSE history_trades.counter_offer_id END, CASE WHEN history_trades.base_is_seller THEN history_trades.counter_offer_id ELSE history_trades.base_offer_id END, CASE WHEN history_trades.base_is_seller THEN baccount.address ELSE caccount.address END, CASE WHEN history_trades.base_is_seller THEN caccount.address ELSE baccount.address END, CASE WHEN history_trades.base_is_seller THEN basset.asset_type ELSE casset.asset_type END, CASE WHEN history_trades.base_is_seller THEN basset.asset_code ELSE casset.asset_code END, CASE WHEN history_trades.base_is_seller THEN basset.asset_issuer ELSE casset.asset_issuer END, CASE WHEN history_trades.base_is_seller THEN casset.asset_type ELSE basset.asset_type END, CASE WHEN history_trades.base_is_seller THEN casset.asset_code ELSE basset.asset_code END, CASE WHEN history_trades.base_is_seller THEN casset.asset_issuer ELSE basset.asset_issuer END from history_trades left join history_accounts baccount on baccount.id = history_trades.base_account_id left join history_accounts caccount on caccount.id = history_trades.counter_account_id left join history_assets basset on basset.id = history_trades.base_asset_id left join history_assets casset on casset.id = history_trades.counter_asset_id order by history_operation_id asc, \"order\" asc" > "${1}_trades" + echo "dumping history_transactions" + # Note: we skip `tx_meta` field here because it's a data structure (C++ unordered_map) which can be in different order + # in different Stellar-Core instances. The final fix should probably: unmarshal `tx_meta`, sort it, marshal and compare. + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select transaction_hash, ledger_sequence, application_order, account, account_sequence, max_fee, operation_count, id, tx_envelope, tx_result, tx_fee_meta, signatures, memo_type, memo, time_bounds, successful, fee_charged, inner_transaction_hash, fee_account, inner_signatures, new_max_fee, account_muxed, fee_account_muxed from history_transactions order by id asc" > "${1}_transactions" + echo "dumping history_transaction_claimable_balances" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_transaction_id, history_claimable_balance_id from history_transaction_claimable_balances left join history_claimable_balances on history_claimable_balances.id = history_transaction_claimable_balances.history_claimable_balance_id order by history_transaction_id, id" > "${1}_transaction_claimable_balances" + echo "dumping history_transaction_participants" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -t -A -F"," --variable="FETCH_COUNT=100" -c "select history_transaction_id, address from history_transaction_participants left join history_accounts on history_accounts.id = history_transaction_participants.history_account_id order by history_transaction_id, address" > "${1}_transaction_participants" +} + +# pubnet horizon config +export NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +export HISTORY_ARCHIVE_URLS="https://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_001" +export DATABASE_URL="postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" + +cd stellar-go +git pull origin +if [ ! -z "$BRANCH" ]; then + git checkout $BRANCH +fi +git log -1 --pretty=oneline + +function alter_tables_unlogged() { + # UNLOGGED for performance reasons (order is important because some tables reference others) + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE accounts SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE accounts_data SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE accounts_signers SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE claimable_balances SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE exp_asset_stats SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_trades SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_accounts SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_assets SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_claimable_balances SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_effects SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_ledgers SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_operation_claimable_balances SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_operation_participants SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_operations SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_transaction_claimable_balances SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_transaction_participants SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE history_transactions SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE offers SET UNLOGGED;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "ALTER TABLE trust_lines SET UNLOGGED;" +} + +/usr/local/go/bin/go build -v ./services/horizon +./horizon db migrate up +alter_tables_unlogged +./horizon ingest verify-range --from $FROM --to $TO --verify-state + +function compare() { + local expected="old_$1" + local actual="new_$1" + + # Files can be very large, leading to `diff` running out of memory. + # As a workaround, since files are expected to be identical, + # we compare the hashes first. + local hash=$(shasum -a 256 "$expected" | cut -f 1 -d ' ') + local check_command="$hash $actual" + + if ! ( echo "$check_command" | shasum -a 256 -c ); then + diff --speed-large-files "$expected" "$actual" + fi +} + +BASE_BRANCH=${BASE_BRANCH:-horizon-v2.0.0} + +if [ ! -z "$VERIFY_HISTORY" ]; then + dump_horizon_db "new_history" + + echo "Done dump_horizon_db new_history" + + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "DROP SCHEMA public CASCADE;" + psql "postgres://postgres:postgres@localhost:5432/horizon?sslmode=disable" -c "CREATE SCHEMA public;" + + git checkout "$BASE_BRANCH" + + /usr/local/go/bin/go build -v ./services/horizon + + ./horizon db migrate up + alter_tables_unlogged + REINGEST_FROM=$((FROM + 1)) # verify-range does not ingest starting ledger + ./horizon db reingest range $REINGEST_FROM $TO + + dump_horizon_db "old_history" + echo "Done dump_horizon_db old_history" + + compare history_effects + compare history_ledgers + compare history_operations + compare history_operation_claimable_balances + compare history_operation_participants + compare history_trades + compare history_transactions + compare history_transaction_claimable_balances + compare history_transaction_participants +fi + +echo "OK" diff --git a/services/horizon/docker/verify-range/stellar-core.cfg b/services/horizon/docker/verify-range/stellar-core.cfg new file mode 100644 index 0000000000..3dc6c70fb0 --- /dev/null +++ b/services/horizon/docker/verify-range/stellar-core.cfg @@ -0,0 +1,60 @@ +AUTOMATIC_MAINTENANCE_PERIOD=0 + +HTTP_PORT=11626 +PUBLIC_HTTP_PORT=true +LOG_FILE_PATH="" + +DATABASE="postgresql://dbname=core host=localhost user=postgres password=postgres" +NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" +CATCHUP_RECENT=100 + +AUTOMATIC_MAINTENANCE_COUNT=0 + +NODE_NAMES=[ +"GAOO3LWBC4XF6VWRP5ESJ6IBHAISVJMSBTALHOQM2EZG7Q477UWA6L7U eno", +"GAXP5DW4CVCW2BJNPFGTWCEGZTJKTNWFQQBE5SCWNJIJ54BOHR3WQC3W moni", +"GBFZFQRGOPQC5OEAWO76NOY6LBRLUNH4I5QYPUYAK53QSQWVTQ2D4FT5 dzham", +"GDXWQCSKVYAJSUGR2HBYVFVR7NA7YWYSYK3XYKKFO553OQGOHAUP2PX2 jianing", +"GCJCSMSPIWKKPR7WEPIQG63PDF7JGGEENRC33OKVBSPUDIRL6ZZ5M7OO tempo.eu.com", +"GCCW4H2DKAC7YYW62H3ZBDRRE5KXRLYLI4T5QOSO6EAMUOE37ICSKKRJ sparrow_tw", +"GD5DJQDDBKGAYNEAXU562HYGOOSYAEOO6AS53PZXBOZGCP5M2OPGMZV3 fuxi.lab", +"GBGGNBZVYNMVLCWNQRO7ASU6XX2MRPITAGLASRWOWLB4ZIIPHMGNMC4I huang.lab", +"GDPJ4DPPFEIP2YTSQNOKT7NMLPKU2FFVOEIJMG36RCMBWBUR4GTXLL57 nezha.lab", +"GCDLFPQ76D6YUSCUECLKI3AFEVXFWVRY2RZH2YQNYII35FDECWUGV24T SnT.Lux", +"GBAR4OY6T6M4P344IF5II5DNWHVUJU7OLQPSMG2FWVJAFF642BX5E3GB telindus", +# non validating +"GCGB2S2KGYARPVIA37HYZXVRM2YZUEXA6S33ZU5BUDC6THSB62LZSTYH sdf_watcher1", +"GCM6QMP3DLRPTAZW2UZPCPX2LF3SXWXKPMP3GKFZBDSF3QZGV2G5QSTK sdf_watcher2", +"GABMKJM6I25XI4K7U6XWMULOUQIQ27BCTMLS6BYYSOWKTBUXVRJSXHYQ sdf_watcher3", +# seem down +"GB6REF5GOGGSEHZ3L2YK6K4T4KX3YDMWHDCPMV7MZJDLHBDNZXEPRBGM donovan", +"GBGR22MRCIVW2UZHFXMY5UIBJGPYABPQXQ5GGMNCSUM2KHE3N6CNH6G5 nelisky1", +"GA2DE5AQF32LU5OZ5OKAFGPA2DLW4H6JHPGYJUVTNS3W7N2YZCTQFFV6 nelisky2", +"GDJ73EX25GGUVMUBCK6DPSTJLYP3IC7I3H2URLXJQ5YP56BW756OUHIG w00kie", +"GAM7A32QZF5PJASRSGVFPAB36WWTHCBHO5CHG3WUFTUQPT7NZX3ONJU4 ptarasov" +] + +KNOWN_PEERS=[ +"core-live-a.stellar.org:11625", +"core-live-b.stellar.org:11625", +"core-live-c.stellar.org:11625", +"confucius.strllar.org", +"stellar1.bitventure.co", +"stellar.256kw.com"] + +UNSAFE_QUORUM=true + +[QUORUM_SET] +VALIDATORS=[ +"$sdf_watcher1","$sdf_watcher2","$sdf_watcher3" +] + +# Stellar.org history store +[HISTORY.sdf1] +get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_001/{0} -o {1}" + +[HISTORY.sdf2] +get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_002/{0} -o {1}" + +[HISTORY.sdf3] +get="curl -sf http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-live/core_live_003/{0} -o {1}" diff --git a/services/horizon/internal/action_offers_test.go b/services/horizon/internal/action_offers_test.go new file mode 100644 index 0000000000..13458db9fe --- /dev/null +++ b/services/horizon/internal/action_offers_test.go @@ -0,0 +1,86 @@ +package horizon + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/xdr" +) + +func TestOfferActions_Show(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + q := &history.Q{ht.HorizonSession()} + ctx := context.Background() + + err := q.UpdateLastLedgerIngest(ctx, 100) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + + ledgerCloseTime := time.Now().Unix() + _, err = q.InsertLedger(ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + ht.Assert.NoError(err) + + issuer := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + nativeAsset := xdr.MustNewNativeAsset() + usdAsset := xdr.MustNewCreditAsset("USD", issuer.Address()) + eurAsset := xdr.MustNewCreditAsset("EUR", issuer.Address()) + + eurOffer := history.Offer{ + SellerID: issuer.Address(), + OfferID: int64(4), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(1), + Priced: int32(1), + Price: float64(1), + Flags: 1, + LastModifiedLedger: uint32(3), + } + usdOffer := history.Offer{ + SellerID: issuer.Address(), + OfferID: int64(6), + + BuyingAsset: usdAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(1), + Priced: int32(1), + Price: float64(1), + Flags: 1, + LastModifiedLedger: uint32(4), + } + + err = q.UpsertOffers(ctx, []history.Offer{eurOffer, usdOffer}) + ht.Assert.NoError(err) + + w := ht.Get("/offers") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + w = ht.Get("/offers/4") + if ht.Assert.Equal(200, w.Code) { + var response horizon.Offer + err = json.Unmarshal(w.Body.Bytes(), &response) + ht.Assert.NoError(err) + ht.Assert.Equal(int64(4), response.ID) + } +} diff --git a/services/horizon/internal/actions/account.go b/services/horizon/internal/actions/account.go new file mode 100644 index 0000000000..82856de3b7 --- /dev/null +++ b/services/horizon/internal/actions/account.go @@ -0,0 +1,324 @@ +package actions + +import ( + "context" + "net/http" + "strings" + + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// AccountInfo returns the information about an account identified by addr. +func AccountInfo(ctx context.Context, hq *history.Q, addr string) (*protocol.Account, error) { + var ( + record history.AccountEntry + data []history.Data + signers []history.AccountSigner + trustlines []history.TrustLine + resouce protocol.Account + ) + + record, err := hq.GetAccountByID(ctx, addr) + if err != nil { + return nil, errors.Wrap(err, "getting history account record") + } + + data, err = hq.GetAccountDataByAccountID(ctx, addr) + if err != nil { + return nil, errors.Wrap(err, "getting history account data") + } + + signers, err = hq.GetAccountSignersByAccountID(ctx, addr) + if err != nil { + return nil, errors.Wrap(err, "getting history signers") + } + + trustlines, err = hq.GetSortedTrustLinesByAccountID(ctx, addr) + if err != nil { + return nil, errors.Wrap(err, "getting history trustlines") + } + + ledger, err := getLedgerBySequence(ctx, hq, int32(record.LastModifiedLedger)) + if err != nil { + return nil, err + } + + err = resourceadapter.PopulateAccountEntry( + ctx, + &resouce, + record, + data, + signers, + trustlines, + ledger, + ) + if err != nil { + return nil, errors.Wrap(err, "populating account entry") + } + + return &resouce, nil +} + +// AccountsQuery query struct for accounts end-point +type AccountsQuery struct { + Signer string `schema:"signer" valid:"accountID,optional"` + Sponsor string `schema:"sponsor" valid:"accountID,optional"` + AssetFilter string `schema:"asset" valid:"asset,optional"` + LiquidityPool string `schema:"liquidity_pool" valid:"sha256,optional"` +} + +// URITemplate returns a rfc6570 URI template the query struct +func (q AccountsQuery) URITemplate() string { + return getURITemplate(&q, "accounts", true) +} + +var invalidAccountsParams = problem.P{ + Type: "invalid_accounts_params", + Title: "Invalid Accounts Parameters", + Status: http.StatusBadRequest, + Detail: "Exactly one filter is required. Please ensure that you are including a signer, sponsor, asset, or liquidity pool filter.", +} + +// Validate runs custom validations. +func (q AccountsQuery) Validate() error { + if q.AssetFilter == "native" { + return problem.MakeInvalidFieldProblem( + "asset", + errors.New("you can't filter by asset: native"), + ) + } + + numParams, err := countNonEmpty(q.Sponsor, q.Signer, q.Asset(), q.LiquidityPool) + if err != nil { + return errors.Wrap(err, "Could not count request params") + } + if numParams != 1 { + return invalidAccountsParams + } + + return nil +} + +// Asset returns an xdr.Asset representing the Asset we want to find the trustees by. +func (q AccountsQuery) Asset() *xdr.Asset { + if len(q.AssetFilter) == 0 { + return nil + } + + parts := strings.Split(q.AssetFilter, ":") + asset := xdr.MustNewCreditAsset(parts[0], parts[1]) + + return &asset +} + +// GetAccountsHandler is the action handler for the /accounts endpoint +type GetAccountsHandler struct { + LedgerState *ledger.State +} + +// GetResourcePage returns a page containing the account records that have +// `signer` as a signer, `sponsor` as a sponsor, a trustline to the given +// `asset`, or participate in a particular `liquidity_pool`. +func (handler GetAccountsHandler) GetResourcePage( + w HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + ctx := r.Context() + pq, err := GetPageQuery(handler.LedgerState, r, DisableCursorValidation) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + qp := AccountsQuery{} + err = getParams(&qp, r) + if err != nil { + return nil, err + } + + var records []history.AccountEntry + + if len(qp.Sponsor) > 0 { + records, err = historyQ.AccountsForSponsor(ctx, qp.Sponsor, pq) + if err != nil { + return nil, errors.Wrap(err, "loading account records") + } + } else if len(qp.Signer) > 0 { + records, err = historyQ.AccountEntriesForSigner(ctx, qp.Signer, pq) + if err != nil { + return nil, errors.Wrap(err, "loading account records") + } + } else if len(qp.LiquidityPool) > 0 { + records, err = historyQ.AccountsForLiquidityPool(ctx, qp.LiquidityPool, pq) + if err != nil { + return nil, errors.Wrap(err, "loading account records") + } + } else { + records, err = historyQ.AccountsForAsset(ctx, *qp.Asset(), pq) + if err != nil { + return nil, errors.Wrap(err, "loading account records") + } + } + + accounts := make([]hal.Pageable, 0, len(records)) + + if len(records) == 0 { + // early return + return accounts, nil + } + + accountIDs := make([]string, 0, len(records)) + for _, record := range records { + accountIDs = append(accountIDs, record.AccountID) + } + + signers, err := handler.loadSigners(ctx, historyQ, accountIDs) + if err != nil { + return nil, err + } + + trustlines, err := handler.loadTrustlines(ctx, historyQ, accountIDs) + if err != nil { + return nil, err + } + + data, err := handler.loadData(ctx, historyQ, accountIDs) + if err != nil { + return nil, err + } + + ledgerCache := history.LedgerCache{} + for _, record := range records { + ledgerCache.Queue(int32(record.LastModifiedLedger)) + } + + if err := ledgerCache.Load(ctx, historyQ); err != nil { + return nil, errors.Wrap(err, "failed to load ledger batch") + } + + for _, record := range records { + var res protocol.Account + s := signers[record.AccountID] + t := trustlines[record.AccountID] + d := data[record.AccountID] + var ledger *history.Ledger + if l, ok := ledgerCache.Records[int32(record.LastModifiedLedger)]; ok { + ledger = &l + } + resourceadapter.PopulateAccountEntry(ctx, &res, record, d, s, t, ledger) + + accounts = append(accounts, res) + } + + return accounts, nil +} + +func (handler GetAccountsHandler) loadData(ctx context.Context, historyQ *history.Q, accounts []string) (map[string][]history.Data, error) { + data := make(map[string][]history.Data) + + records, err := historyQ.GetAccountDataByAccountsID(ctx, accounts) + if err != nil { + return data, errors.Wrap(err, "loading account data records by accounts id") + } + + for _, record := range records { + data[record.AccountID] = append(data[record.AccountID], record) + } + + return data, nil +} + +func (handler GetAccountsHandler) loadTrustlines(ctx context.Context, historyQ *history.Q, accounts []string) (map[string][]history.TrustLine, error) { + trustLines := make(map[string][]history.TrustLine) + + records, err := historyQ.GetSortedTrustLinesByAccountIDs(ctx, accounts) + if err != nil { + return trustLines, errors.Wrap(err, "loading trustline records by accounts") + } + + for _, record := range records { + trustLines[record.AccountID] = append(trustLines[record.AccountID], record) + } + + return trustLines, nil +} + +func (handler GetAccountsHandler) loadSigners(ctx context.Context, historyQ *history.Q, accounts []string) (map[string][]history.AccountSigner, error) { + signers := make(map[string][]history.AccountSigner) + + records, err := historyQ.SignersForAccounts(ctx, accounts) + if err != nil { + return signers, errors.Wrap(err, "loading account signers by account") + } + + for _, record := range records { + signers[record.Account] = append(signers[record.Account], record) + } + + return signers, nil +} + +func getLedgerBySequence(ctx context.Context, hq *history.Q, sequence int32) (*history.Ledger, error) { + ledger := &history.Ledger{} + err := hq.LedgerBySequence(ctx, ledger, sequence) + switch { + case hq.NoRows(err): + return nil, nil + case err != nil: + return nil, err + default: + return ledger, nil + } +} + +// AccountByIDQuery query struct for accounts/{account_id} end-point +type AccountByIDQuery struct { + AccountID string `schema:"account_id" valid:"accountID,optional"` +} + +// GetAccountByIDHandler is the action handler for the /accounts/{account_id} endpoint +type GetAccountByIDHandler struct{} + +type Account protocol.Account + +func (a Account) Equals(other StreamableObjectResponse) bool { + otherAccount, ok := other.(Account) + if !ok { + return false + } + return a.ID == otherAccount.ID +} + +func (handler GetAccountByIDHandler) GetResource( + w HeaderWriter, + r *http.Request, +) (StreamableObjectResponse, error) { + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + qp := AccountByIDQuery{} + err = getParams(&qp, r) + if err != nil { + return nil, err + } + account, err := AccountInfo(r.Context(), historyQ, qp.AccountID) + if err != nil { + return Account{}, err + } + return Account(*account), nil +} diff --git a/services/horizon/internal/actions/account_data.go b/services/horizon/internal/actions/account_data.go new file mode 100644 index 0000000000..9b3e7e499c --- /dev/null +++ b/services/horizon/internal/actions/account_data.go @@ -0,0 +1,68 @@ +package actions + +import ( + "io" + "net/http" + + "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" +) + +// AccountDataQuery query struct for account data end-point +type AccountDataQuery struct { + AccountID string `schema:"account_id" valid:"accountID"` + Key string `schema:"key" valid:"length(1|64)"` +} + +type accountDataResponse struct { + Value string `json:"value"` + Sponsor string `json:"sponsor,omitempty"` +} + +func (adr accountDataResponse) Equals(other StreamableObjectResponse) bool { + other, ok := other.(accountDataResponse) + if !ok { + return false + } + return adr == other +} + +type GetAccountDataHandler struct{} + +func (handler GetAccountDataHandler) GetResource(w HeaderWriter, r *http.Request) (StreamableObjectResponse, error) { + data, err := loadAccountData(r) + if err != nil { + return nil, err + } + response := accountDataResponse{Value: data.Value.Base64()} + if data.Sponsor.Valid { + response.Sponsor = data.Sponsor.String + } + return response, nil +} + +func (handler GetAccountDataHandler) WriteRawResponse(w io.Writer, r *http.Request) error { + data, err := loadAccountData(r) + if err != nil { + return err + } + _, err = w.Write(data.Value) + return err +} + +func loadAccountData(r *http.Request) (history.Data, error) { + qp := AccountDataQuery{} + err := getParams(&qp, r) + if err != nil { + return history.Data{}, err + } + historyQ, err := context.HistoryQFromRequest(r) + if err != nil { + return history.Data{}, err + } + data, err := historyQ.GetAccountDataByName(r.Context(), qp.AccountID, qp.Key) + if err != nil { + return history.Data{}, err + } + return data, nil +} diff --git a/services/horizon/internal/actions/account_test.go b/services/horizon/internal/actions/account_test.go new file mode 100644 index 0000000000..2d062f17a2 --- /dev/null +++ b/services/horizon/internal/actions/account_test.go @@ -0,0 +1,722 @@ +package actions + +import ( + "net/http/httptest" + "testing" + "time" + + "github.com/guregu/null" + + "github.com/stretchr/testify/assert" + + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +var ( + trustLineIssuer = "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + accountOne = "GABGMPEKKDWR2WFH5AJOZV5PDKLJEHGCR3Q24ALETWR5H3A7GI3YTS7V" + accountTwo = "GADTXHUTHIAESMMQ2ZWSTIIGBZRLHUCBLCHPLLUEIAWDEFRDC4SYDKOZ" + signer = "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU" + sponsor = "GCO26ZSBD63TKYX45H2C7D2WOFWOUSG5BMTNC3BG4QMXM3PAYI6WHKVZ" + usd = xdr.MustNewCreditAsset("USD", trustLineIssuer) + euro = xdr.MustNewCreditAsset("EUR", trustLineIssuer) + + account1 = history.AccountEntry{ + LastModifiedLedger: 1234, + AccountID: accountOne, + Balance: 20000, + SequenceNumber: 223456789, + NumSubEntries: 10, + Flags: 1, + HomeDomain: "stellar.org", + MasterWeight: 1, + ThresholdLow: 2, + ThresholdMedium: 3, + ThresholdHigh: 4, + BuyingLiabilities: 3, + SellingLiabilities: 3, + } + + account2 = history.AccountEntry{ + LastModifiedLedger: 1234, + AccountID: accountTwo, + Balance: 50000, + SequenceNumber: 648736, + NumSubEntries: 10, + Flags: 2, + HomeDomain: "meridian.stellar.org", + MasterWeight: 5, + ThresholdLow: 6, + ThresholdMedium: 7, + ThresholdHigh: 8, + BuyingLiabilities: 30, + SellingLiabilities: 40, + } + + account3 = history.AccountEntry{ + LastModifiedLedger: 1234, + AccountID: signer, + Balance: 50000, + SequenceNumber: 648736, + NumSubEntries: 10, + Flags: 2, + MasterWeight: 5, + ThresholdLow: 6, + ThresholdMedium: 7, + ThresholdHigh: 8, + BuyingLiabilities: 30, + SellingLiabilities: 40, + NumSponsored: 1, + NumSponsoring: 2, + Sponsor: null.StringFrom(sponsor), + } + + eurTrustLine = history.TrustLine{ + AccountID: accountOne, + AssetType: euro.Type, + AssetIssuer: trustLineIssuer, + AssetCode: "EUR", + Balance: 20000, + LedgerKey: "eur-trustline1", + Limit: 223456789, + LiquidityPoolID: "", + BuyingLiabilities: 3, + SellingLiabilities: 4, + Flags: 1, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + } + + usdTrustLine = history.TrustLine{ + AccountID: accountTwo, + AssetType: usd.Type, + AssetIssuer: trustLineIssuer, + AssetCode: "USD", + Balance: 10000, + LedgerKey: "usd-trustline1", + Limit: 123456789, + LiquidityPoolID: "", + BuyingLiabilities: 1, + SellingLiabilities: 2, + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + } + + data1 = history.Data{ + AccountID: accountOne, + Name: "test data", + // This also tests if base64 encoding is working as 0 is invalid UTF-8 byte + Value: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + LastModifiedLedger: 1234, + } + + data2 = history.Data{ + AccountID: accountTwo, + Name: "test data2", + Value: []byte{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, + LastModifiedLedger: 1234, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } + + accountSigners = []history.AccountSigner{ + { + Account: accountOne, + Signer: accountOne, + Weight: 1, + }, + { + Account: accountTwo, + Signer: accountTwo, + Weight: 1, + }, + { + Account: accountOne, + Signer: signer, + Weight: 1, + }, + { + Account: accountTwo, + Signer: signer, + Weight: 2, + }, + { + Account: signer, + Signer: signer, + Weight: 3, + }, + } +) + +func TestAccountInfo(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + var thresholds xdr.Thresholds + tt.Assert.NoError( + xdr.SafeUnmarshalBase64("AQAAAA==", &thresholds), + ) + accountID := "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU" + accountEntry := history.AccountEntry{ + LastModifiedLedger: 4, + AccountID: accountID, + Balance: 9999999900, + SequenceNumber: 8589934593, + NumSubEntries: 1, + InflationDestination: "", + HomeDomain: "", + MasterWeight: thresholds[0], + ThresholdLow: thresholds[1], + ThresholdMedium: thresholds[2], + ThresholdHigh: thresholds[3], + Flags: 0, + } + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{accountEntry}) + assert.NoError(t, err) + + tt.Assert.NoError(err) + + err = q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: accountID, + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + AssetCode: "USD", + Balance: 0, + LedgerKey: "test-usd-tl-1", + Limit: 9223372036854775807, + LiquidityPoolID: "", + BuyingLiabilities: 0, + SellingLiabilities: 0, + Flags: 1, + LastModifiedLedger: 6, + Sponsor: null.String{}, + }, + { + AccountID: accountID, + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + AssetCode: "EUR", + Balance: 0, + LedgerKey: "test-eur-tl-1", + Limit: 9223372036854775807, + LiquidityPoolID: "", + BuyingLiabilities: 0, + SellingLiabilities: 0, + Flags: 1, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + }, + }) + assert.NoError(t, err) + + ledgerFourCloseTime := time.Now().Unix() + _, err = q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 4, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerFourCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + assert.NoError(t, err) + + account, err := AccountInfo(tt.Ctx, &history.Q{tt.HorizonSession()}, accountID) + tt.Assert.NoError(err) + + tt.Assert.Equal("8589934593", account.Sequence) + tt.Assert.Equal(uint32(4), account.LastModifiedLedger) + tt.Assert.NotNil(account.LastModifiedTime) + tt.Assert.Equal(ledgerFourCloseTime, account.LastModifiedTime.Unix()) + tt.Assert.Len(account.Balances, 3) + + tt.Assert.Equal(account.Balances[0].Code, "EUR") + tt.Assert.Equal(account.Balances[1].Code, "USD") + tt.Assert.Equal( + "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + account.Balances[1].Issuer, + ) + tt.Assert.NotEqual(uint32(0), account.Balances[1].LastModifiedLedger) + tt.Assert.Equal(account.Balances[2].Type, "native") + tt.Assert.Equal(uint32(0), account.Balances[2].LastModifiedLedger) + tt.Assert.Len(account.Signers, 1) + + // Regression: no trades link + tt.Assert.Contains(account.Links.Trades.Href, "/trades") + // Regression: no data link + tt.Assert.Contains(account.Links.Data.Href, "/data/{key}") + tt.Assert.True(account.Links.Data.Templated) + + // try to fetch account which does not exist + _, err = AccountInfo(tt.Ctx, &history.Q{tt.HorizonSession()}, "GDBAPLDCAEJV6LSEDFEAUDAVFYSNFRUYZ4X75YYJJMMX5KFVUOHX46SQ") + tt.Assert.True(q.NoRows(errors.Cause(err))) +} + +func TestGetAccountsHandlerPageNoResults(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "signer": signer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 0) +} + +func TestGetAccountsHandlerPageResultsBySigner(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{account1, account2, account3}) + assert.NoError(t, err) + + for _, row := range accountSigners { + q.CreateAccountSigner(tt.Ctx, row.Account, row.Signer, row.Weight, nil) + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "signer": signer, + }, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(3, len(records)) + + want := map[string]bool{ + accountOne: true, + accountTwo: true, + signer: true, + } + + for _, row := range records { + result := row.(protocol.Account) + tt.Assert.True(want[result.AccountID]) + delete(want, result.AccountID) + } + + tt.Assert.Empty(want) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "signer": signer, + "cursor": accountOne, + }, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(2, len(records)) + + want = map[string]bool{ + accountTwo: true, + signer: true, + } + + for _, row := range records { + result := row.(protocol.Account) + tt.Assert.True(want[result.AccountID]) + delete(want, result.AccountID) + } + + tt.Assert.Empty(want) +} + +func TestGetAccountsHandlerPageResultsBySponsor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{account1, account2, account3}) + assert.NoError(t, err) + + for _, row := range accountSigners { + q.CreateAccountSigner(tt.Ctx, row.Account, row.Signer, row.Weight, nil) + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "sponsor": sponsor, + }, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(1, len(records)) + tt.Assert.Equal(signer, records[0].(protocol.Account).ID) +} + +func TestGetAccountsHandlerPageResultsByAsset(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{account1, account2}) + assert.NoError(t, err) + ledgerCloseTime := time.Now().Unix() + _, err = q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 1234, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + assert.NoError(t, err) + + for _, row := range accountSigners { + _, err = q.CreateAccountSigner(tt.Ctx, row.Account, row.Signer, row.Weight, nil) + tt.Assert.NoError(err) + } + + err = q.UpsertAccountData(tt.Ctx, []history.Data{data1, data2}) + assert.NoError(t, err) + + var assetType, code, issuer string + usd.MustExtract(&assetType, &code, &issuer) + params := map[string]string{ + "asset": code + ":" + issuer, + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + params, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(0, len(records)) + + err = q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + eurTrustLine, + usdTrustLine, + }) + assert.NoError(t, err) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + params, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(1, len(records)) + result := records[0].(protocol.Account) + tt.Assert.Equal(accountTwo, result.AccountID) + tt.Assert.NotNil(result.LastModifiedTime) + tt.Assert.Equal(ledgerCloseTime, result.LastModifiedTime.Unix()) + tt.Assert.Len(result.Balances, 2) + tt.Assert.Len(result.Signers, 2) + + _, ok := result.Data[data2.Name] + tt.Assert.True(ok) +} + +func createLP(tt *test.T, q *history.Q) history.LiquidityPool { + lp := history.LiquidityPool{ + PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: 34, + TrustlineCount: 52115, + ShareCount: 412241, + AssetReserves: []history.LiquidityPoolAssetReserve{ + { + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Reserve: 450, + }, + { + Asset: xdr.MustNewNativeAsset(), + Reserve: 450, + }, + }, + LastModifiedLedger: 123, + } + + err := q.UpsertLiquidityPools(tt.Ctx, []history.LiquidityPool{lp}) + tt.Assert.NoError(err) + return lp +} + +func TestGetAccountsHandlerPageResultsByLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{account1, account2}) + assert.NoError(t, err) + + ledgerCloseTime := time.Now().Unix() + _, err = q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 1234, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + assert.NoError(t, err) + var assetType, code, issuer string + usd.MustExtract(&assetType, &code, &issuer) + params := map[string]string{ + "liquidity_pool": "cafebabedeadbeef000000000000000000000000000000000000000000000000", + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + params, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(0, len(records)) + + lp := createLP(tt, q) + err = q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: account1.AccountID, + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: 10, + LedgerKey: "pool-share-1", + Limit: 100, + LiquidityPoolID: lp.PoolID, + Flags: uint32(xdr.TrustLineFlagsAuthorizedFlag), + LastModifiedLedger: lp.LastModifiedLedger, + }, + }) + assert.NoError(t, err) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + params, + map[string]string{}, + q, + ), + ) + + tt.Assert.NoError(err) + tt.Assert.Equal(1, len(records)) + result := records[0].(protocol.Account) + tt.Assert.Equal(accountOne, result.AccountID) + tt.Assert.NotNil(result.LastModifiedTime) + tt.Assert.Equal(ledgerCloseTime, result.LastModifiedTime.Unix()) + tt.Assert.Len(result.Balances, 2) + tt.Assert.True(*result.Balances[0].IsAuthorized) + tt.Assert.True(*result.Balances[0].IsAuthorizedToMaintainLiabilities) + result.Balances[0].IsAuthorized = nil + result.Balances[0].IsAuthorizedToMaintainLiabilities = nil + tt.Assert.Equal( + protocol.Balance{ + Balance: "0.0000010", + LiquidityPoolId: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Limit: "0.0000100", + BuyingLiabilities: "", + SellingLiabilities: "", + Sponsor: "", + LastModifiedLedger: 123, + IsAuthorized: nil, + IsAuthorizedToMaintainLiabilities: nil, + IsClawbackEnabled: nil, + Asset: base.Asset{ + Type: "liquidity_pool_shares", + }, + }, + result.Balances[0], + ) +} + +func TestGetAccountsHandlerInvalidParams(t *testing.T) { + testCases := []struct { + desc string + params map[string]string + expectedInvalidField string + expectedErr string + isInvalidAccountsParams bool + }{ + { + desc: "empty filters", + isInvalidAccountsParams: true, + }, + { + desc: "signer and asset", + params: map[string]string{ + "signer": accountOne, + "asset": "USD" + ":" + accountOne, + }, + isInvalidAccountsParams: true, + }, + { + desc: "signer and liquidity pool", + params: map[string]string{ + "signer": accountOne, + "liquidity_pool": "48672641c88264272787837f5c306f5ce93be3c2c7df68a092fbea55f5f4aa1d", + }, + isInvalidAccountsParams: true, + }, + { + desc: "signer and sponsor", + params: map[string]string{ + "signer": accountOne, + "sponsor": accountTwo, + }, + isInvalidAccountsParams: true, + }, + { + desc: "asset and sponsor", + params: map[string]string{ + "asset": "USD" + ":" + accountOne, + "sponsor": accountTwo, + }, + isInvalidAccountsParams: true, + }, + { + desc: "asset and liquidity pool", + params: map[string]string{ + "asset": "USD" + ":" + accountOne, + "liquidity_pool": "48672641c88264272787837f5c306f5ce93be3c2c7df68a092fbea55f5f4aa1d", + }, + isInvalidAccountsParams: true, + }, + { + desc: "sponsor and liquidity pool", + params: map[string]string{ + "sponsor": accountTwo, + "liquidity_pool": "48672641c88264272787837f5c306f5ce93be3c2c7df68a092fbea55f5f4aa1d", + }, + isInvalidAccountsParams: true, + }, + { + desc: "filtering by native asset", + params: map[string]string{ + "asset": "native", + }, + expectedInvalidField: "asset", + expectedErr: "you can't filter by asset: native", + }, + { + desc: "invalid asset", + params: map[string]string{ + "asset_issuer": accountOne, + "asset": "USDCOP:someissuer", + }, + expectedInvalidField: "asset", + expectedErr: customTagsErrorMessages["asset"], + }, + { + desc: "invalid liquidity pool", + params: map[string]string{ + "liquidity_pool": "USDCOP:someissuer", + }, + expectedInvalidField: "liquidity_pool", + expectedErr: "USDCOP:someissuer does not validate as sha256", + }, + { + desc: "liquidity pool too short", + params: map[string]string{ + "liquidity_pool": "48672641c882642727", + }, + expectedInvalidField: "liquidity_pool", + expectedErr: "48672641c882642727 does not validate as sha256", + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + q := &history.Q{tt.HorizonSession()} + handler := &GetAccountsHandler{} + + _, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + tc.params, + map[string]string{}, + q, + ), + ) + tt.Assert.Error(err) + if tc.isInvalidAccountsParams { + tt.Assert.Equal(invalidAccountsParams, err) + } else { + if tt.Assert.IsType(&problem.P{}, err) { + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal(tc.expectedInvalidField, p.Extras["invalid_field"]) + tt.Assert.Equal( + tc.expectedErr, + p.Extras["reason"], + ) + } + } + }) + } +} + +func TestAccountQueryURLTemplate(t *testing.T) { + tt := assert.New(t) + expected := "/accounts{?signer,sponsor,asset,liquidity_pool,cursor,limit,order}" + accountsQuery := AccountsQuery{} + tt.Equal(expected, accountsQuery.URITemplate()) +} diff --git a/services/horizon/internal/actions/asset.go b/services/horizon/internal/actions/asset.go new file mode 100644 index 0000000000..27235fb5eb --- /dev/null +++ b/services/horizon/internal/actions/asset.go @@ -0,0 +1,171 @@ +package actions + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// AssetStatsHandler is the action handler for the /asset endpoint +type AssetStatsHandler struct { + LedgerState *ledger.State +} + +func (handler AssetStatsHandler) validateAssetParams(code, issuer string, pq db2.PageQuery) error { + if code != "" { + if !xdr.ValidAssetCode.MatchString(code) { + return problem.MakeInvalidFieldProblem( + "asset_code", + fmt.Errorf("%s is not a valid asset code", code), + ) + } + } + + if issuer != "" { + if _, err := xdr.AddressToAccountId(issuer); err != nil { + return problem.MakeInvalidFieldProblem( + "asset_issuer", + fmt.Errorf("%s is not a valid asset issuer", issuer), + ) + } + } + + if pq.Cursor != "" { + parts := strings.SplitN(pq.Cursor, "_", 3) + if len(parts) != 3 { + return problem.MakeInvalidFieldProblem( + "cursor", + errors.New("the cursor is not a valid paging_token"), + ) + } + + cursorCode, cursorIssuer, assetType := parts[0], parts[1], parts[2] + if !xdr.ValidAssetCode.MatchString(cursorCode) { + return problem.MakeInvalidFieldProblem( + "cursor", + fmt.Errorf("%s is not a valid asset code", cursorCode), + ) + } + + if _, err := xdr.AddressToAccountId(cursorIssuer); err != nil { + return problem.MakeInvalidFieldProblem( + "cursor", + fmt.Errorf("%s is not a valid asset issuer", cursorIssuer), + ) + } + + if _, ok := xdr.StringToAssetType[assetType]; !ok { + return problem.MakeInvalidFieldProblem( + "cursor", + fmt.Errorf("%s is not a valid asset type", assetType), + ) + } + + } + + return nil +} + +func (handler AssetStatsHandler) findIssuersForAssets( + ctx context.Context, + historyQ *history.Q, + assetStats []history.ExpAssetStat, +) (map[string]history.AccountEntry, error) { + issuerSet := map[string]bool{} + issuers := []string{} + for _, assetStat := range assetStats { + if issuerSet[assetStat.AssetIssuer] { + continue + } + issuerSet[assetStat.AssetIssuer] = true + issuers = append(issuers, assetStat.AssetIssuer) + } + + accountsByID := map[string]history.AccountEntry{} + accounts, err := historyQ.GetAccountsByIDs(ctx, issuers) + if err != nil { + return nil, err + } + for _, account := range accounts { + accountsByID[account.AccountID] = account + delete(issuerSet, account.AccountID) + } + + // Note it's possible that no accounts can be found for certain issuers. + // That can occur because an account can be removed when there are only empty trustlines + // pointing to it. We still continue to serve asset stats for such issuers. + + return accountsByID, nil +} + +// GetResourcePage returns a page of offers. +func (handler AssetStatsHandler) GetResourcePage( + w HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + ctx := r.Context() + + code, err := getString(r, "asset_code") + if err != nil { + return nil, err + } + + issuer, err := getString(r, "asset_issuer") + if err != nil { + return nil, err + } + + pq, err := GetPageQuery(handler.LedgerState, r, DisableCursorValidation) + if err != nil { + return nil, err + } + + if err = handler.validateAssetParams(code, issuer, pq); err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + assetStats, err := historyQ.GetAssetStats(ctx, code, issuer, pq) + if err != nil { + return nil, err + } + + issuerAccounts, err := handler.findIssuersForAssets(ctx, historyQ, assetStats) + if err != nil { + return nil, err + } + + var response []hal.Pageable + for _, record := range assetStats { + var assetStatResponse horizon.AssetStat + + err := resourceadapter.PopulateAssetStat( + ctx, + &assetStatResponse, + record, + issuerAccounts[record.AssetIssuer], + ) + if err != nil { + return nil, err + } + response = append(response, assetStatResponse) + } + + return response, nil +} diff --git a/services/horizon/internal/actions/asset_test.go b/services/horizon/internal/actions/asset_test.go new file mode 100644 index 0000000000..fcedf5985d --- /dev/null +++ b/services/horizon/internal/actions/asset_test.go @@ -0,0 +1,486 @@ +package actions + +import ( + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +func TestAssetStatsValidation(t *testing.T) { + handler := AssetStatsHandler{} + + for _, testCase := range []struct { + name string + queryParams map[string]string + expectedErrorField string + expectedError string + }{ + { + "invalid asset code", + map[string]string{ + "asset_code": "tooooooooolong", + }, + "asset_code", + "not a valid asset code", + }, + { + "invalid asset issuer", + map[string]string{ + "asset_issuer": "invalid", + }, + "asset_issuer", + "not a valid asset issuer", + }, + { + "cursor has too many underscores", + map[string]string{ + "cursor": "ABC_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum4_", + }, + "cursor", + "credit_alphanum4_ is not a valid asset type", + }, + { + "invalid cursor code", + map[string]string{ + "cursor": "tooooooooolong_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum12", + }, + "cursor", + "not a valid asset code", + }, + { + "invalid cursor issuer", + map[string]string{ + "cursor": "ABC_invalidissuer_credit_alphanum4", + }, + "cursor", + "not a valid asset issuer", + }, + { + "invalid cursor type", + map[string]string{ + "cursor": "ABC_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum123", + }, + "cursor", + "credit_alphanum123 is not a valid asset type", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + r := makeRequest(t, testCase.queryParams, map[string]string{}, nil) + _, err := handler.GetResourcePage(httptest.NewRecorder(), r) + if err == nil { + t.Fatalf("expected error %v but got %v", testCase.expectedError, err) + } + + problem := err.(*problem.P) + if field := problem.Extras["invalid_field"]; field != testCase.expectedErrorField { + t.Fatalf( + "expected error field %v but got %v", + testCase.expectedErrorField, + field, + ) + } + + reason := problem.Extras["reason"] + if !strings.Contains(reason.(string), testCase.expectedError) { + t.Fatalf("expected reason %v but got %v", testCase.expectedError, reason) + } + }) + } +} + +func TestAssetStats(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + handler := AssetStatsHandler{} + + issuer := history.AccountEntry{ + AccountID: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + Flags: uint32(xdr.AccountFlagsAuthRequiredFlag) | + uint32(xdr.AccountFlagsAuthImmutableFlag) | + uint32(xdr.AccountFlagsAuthClawbackEnabledFlag), + } + issuerFlags := horizon.AccountFlags{ + AuthRequired: true, + AuthImmutable: true, + AuthClawbackEnabled: true, + } + otherIssuer := history.AccountEntry{ + AccountID: "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + HomeDomain: "xim.com", + } + + usdAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: issuer.AccountID, + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + ClaimableBalances: 1, + LiquidityPools: 5, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "10", + LiquidityPools: "20", + }, + Amount: "1", + NumAccounts: 2, + } + usdAssetStatResponse := horizon.AssetStat{ + Accounts: horizon.AssetStatAccounts{ + Authorized: usdAssetStat.Accounts.Authorized, + AuthorizedToMaintainLiabilities: usdAssetStat.Accounts.AuthorizedToMaintainLiabilities, + Unauthorized: usdAssetStat.Accounts.Unauthorized, + }, + NumClaimableBalances: usdAssetStat.Accounts.ClaimableBalances, + NumLiquidityPools: usdAssetStat.Accounts.LiquidityPools, + Balances: horizon.AssetStatBalances{ + Authorized: "0.0000001", + AuthorizedToMaintainLiabilities: "0.0000002", + Unauthorized: "0.0000003", + }, + ClaimableBalancesAmount: "0.0000010", + LiquidityPoolsAmount: "0.0000020", + Amount: "0.0000001", + NumAccounts: usdAssetStat.NumAccounts, + Asset: base.Asset{ + Type: "credit_alphanum4", + Code: usdAssetStat.AssetCode, + Issuer: usdAssetStat.AssetIssuer, + }, + PT: usdAssetStat.PagingToken(), + Flags: issuerFlags, + } + + etherAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: issuer.AccountID, + AssetCode: "ETHER", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + AuthorizedToMaintainLiabilities: 2, + Unauthorized: 3, + ClaimableBalances: 0, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "23", + AuthorizedToMaintainLiabilities: "46", + Unauthorized: "92", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "23", + NumAccounts: 1, + } + etherAssetStatResponse := horizon.AssetStat{ + Accounts: horizon.AssetStatAccounts{ + Authorized: etherAssetStat.Accounts.Authorized, + AuthorizedToMaintainLiabilities: etherAssetStat.Accounts.AuthorizedToMaintainLiabilities, + Unauthorized: etherAssetStat.Accounts.Unauthorized, + }, + NumClaimableBalances: etherAssetStat.Accounts.ClaimableBalances, + Balances: horizon.AssetStatBalances{ + Authorized: "0.0000023", + AuthorizedToMaintainLiabilities: "0.0000046", + Unauthorized: "0.0000092", + }, + ClaimableBalancesAmount: "0.0000000", + LiquidityPoolsAmount: "0.0000000", + Amount: "0.0000023", + NumAccounts: etherAssetStat.NumAccounts, + Asset: base.Asset{ + Type: "credit_alphanum4", + Code: etherAssetStat.AssetCode, + Issuer: etherAssetStat.AssetIssuer, + }, + PT: etherAssetStat.PagingToken(), + Flags: issuerFlags, + } + + otherUSDAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: otherIssuer.AccountID, + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + ClaimableBalances: 0, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "1", + NumAccounts: 2, + } + otherUSDAssetStatResponse := horizon.AssetStat{ + Accounts: horizon.AssetStatAccounts{ + Authorized: otherUSDAssetStat.Accounts.Authorized, + AuthorizedToMaintainLiabilities: otherUSDAssetStat.Accounts.AuthorizedToMaintainLiabilities, + Unauthorized: otherUSDAssetStat.Accounts.Unauthorized, + }, + NumClaimableBalances: otherUSDAssetStat.Accounts.ClaimableBalances, + Balances: horizon.AssetStatBalances{ + Authorized: "0.0000001", + AuthorizedToMaintainLiabilities: "0.0000002", + Unauthorized: "0.0000003", + }, + ClaimableBalancesAmount: "0.0000000", + LiquidityPoolsAmount: "0.0000000", + Amount: "0.0000001", + NumAccounts: otherUSDAssetStat.NumAccounts, + Asset: base.Asset{ + Type: "credit_alphanum4", + Code: otherUSDAssetStat.AssetCode, + Issuer: otherUSDAssetStat.AssetIssuer, + }, + PT: otherUSDAssetStat.PagingToken(), + } + otherUSDAssetStatResponse.Links.Toml = hal.NewLink( + "https://" + otherIssuer.HomeDomain + "/.well-known/stellar.toml", + ) + + eurAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: otherIssuer.AccountID, + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 3, + AuthorizedToMaintainLiabilities: 4, + Unauthorized: 5, + ClaimableBalances: 0, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "111", + AuthorizedToMaintainLiabilities: "222", + Unauthorized: "333", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "111", + NumAccounts: 3, + } + eurAssetStatResponse := horizon.AssetStat{ + Accounts: horizon.AssetStatAccounts{ + Authorized: eurAssetStat.Accounts.Authorized, + AuthorizedToMaintainLiabilities: eurAssetStat.Accounts.AuthorizedToMaintainLiabilities, + Unauthorized: eurAssetStat.Accounts.Unauthorized, + }, + NumClaimableBalances: eurAssetStat.Accounts.ClaimableBalances, + Balances: horizon.AssetStatBalances{ + Authorized: "0.0000111", + AuthorizedToMaintainLiabilities: "0.0000222", + Unauthorized: "0.0000333", + }, + ClaimableBalancesAmount: "0.0000000", + LiquidityPoolsAmount: "0.0000000", + Amount: "0.0000111", + NumAccounts: eurAssetStat.NumAccounts, + Asset: base.Asset{ + Type: "credit_alphanum4", + Code: eurAssetStat.AssetCode, + Issuer: eurAssetStat.AssetIssuer, + }, + PT: eurAssetStat.PagingToken(), + } + eurAssetStatResponse.Links.Toml = hal.NewLink( + "https://" + otherIssuer.HomeDomain + "/.well-known/stellar.toml", + ) + + for _, assetStat := range []history.ExpAssetStat{ + etherAssetStat, + eurAssetStat, + otherUSDAssetStat, + usdAssetStat, + } { + numChanged, err := q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + } + + for _, account := range []history.AccountEntry{ + issuer, + otherIssuer, + } { + accountEntry := history.AccountEntry{ + LastModifiedLedger: 100, + AccountID: account.AccountID, + Flags: account.Flags, + HomeDomain: account.HomeDomain, + } + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{accountEntry}) + tt.Assert.NoError(err) + } + + for _, testCase := range []struct { + name string + queryParams map[string]string + expected []horizon.AssetStat + }{ + { + "default parameters", + map[string]string{}, + []horizon.AssetStat{ + etherAssetStatResponse, + eurAssetStatResponse, + otherUSDAssetStatResponse, + usdAssetStatResponse, + }, + }, + { + "with cursor", + map[string]string{ + "cursor": etherAssetStatResponse.PagingToken(), + }, + []horizon.AssetStat{ + eurAssetStatResponse, + otherUSDAssetStatResponse, + usdAssetStatResponse, + }, + }, + { + "descending order", + map[string]string{"order": "desc"}, + []horizon.AssetStat{ + usdAssetStatResponse, + otherUSDAssetStatResponse, + eurAssetStatResponse, + etherAssetStatResponse, + }, + }, + { + "filter by asset code", + map[string]string{ + "asset_code": "USD", + }, + []horizon.AssetStat{ + otherUSDAssetStatResponse, + usdAssetStatResponse, + }, + }, + { + "filter by asset issuer", + map[string]string{ + "asset_issuer": issuer.AccountID, + }, + []horizon.AssetStat{ + etherAssetStatResponse, + usdAssetStatResponse, + }, + }, + { + "filter by both asset code and asset issuer", + map[string]string{ + "asset_code": "USD", + "asset_issuer": issuer.AccountID, + }, + []horizon.AssetStat{ + usdAssetStatResponse, + }, + }, + { + "filter produces empty set", + map[string]string{ + "asset_code": "XYZ", + "asset_issuer": issuer.AccountID, + }, + []horizon.AssetStat{}, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + r := makeRequest(t, testCase.queryParams, map[string]string{}, q) + results, err := handler.GetResourcePage(httptest.NewRecorder(), r) + assert.NoError(t, err) + + assert.Len(t, results, len(testCase.expected)) + for i, item := range results { + assetStat := item.(horizon.AssetStat) + assert.Equal(t, testCase.expected[i], assetStat) + } + }) + } +} + +func TestAssetStatsIssuerDoesNotExist(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + handler := AssetStatsHandler{} + + usdAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + ClaimableBalances: 0, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "0", + }, + Amount: "1", + NumAccounts: 2, + } + numChanged, err := q.InsertAssetStat(tt.Ctx, usdAssetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + + r := makeRequest(t, map[string]string{}, map[string]string{}, q) + results, err := handler.GetResourcePage(httptest.NewRecorder(), r) + tt.Assert.NoError(err) + + expectedAssetStatResponse := horizon.AssetStat{ + Accounts: horizon.AssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + NumClaimableBalances: 0, + Balances: horizon.AssetStatBalances{ + Authorized: "0.0000001", + AuthorizedToMaintainLiabilities: "0.0000002", + Unauthorized: "0.0000003", + }, + ClaimableBalancesAmount: "0.0000000", + LiquidityPoolsAmount: "0.0000000", + Amount: "0.0000001", + NumAccounts: usdAssetStat.NumAccounts, + Asset: base.Asset{ + Type: "credit_alphanum4", + Code: usdAssetStat.AssetCode, + Issuer: usdAssetStat.AssetIssuer, + }, + PT: usdAssetStat.PagingToken(), + } + + tt.Assert.Len(results, 1) + assetStat := results[0].(horizon.AssetStat) + tt.Assert.Equal(assetStat, expectedAssetStatResponse) +} diff --git a/services/horizon/internal/actions/claimable_balance.go b/services/horizon/internal/actions/claimable_balance.go new file mode 100644 index 0000000000..dbfa02c298 --- /dev/null +++ b/services/horizon/internal/actions/claimable_balance.go @@ -0,0 +1,183 @@ +package actions + +import ( + "context" + "net/http" + "strings" + + "github.com/stellar/go/protocols/horizon" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// GetClaimableBalanceByIDHandler is the action handler for all end-points returning a claimable balance. +type GetClaimableBalanceByIDHandler struct{} + +// ClaimableBalanceQuery query struct for claimables_balances/id end-point +type ClaimableBalanceQuery struct { + ID string `schema:"id" valid:"claimableBalanceID,required"` +} + +// GetResource returns an claimable balance page. +func (handler GetClaimableBalanceByIDHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + qp := ClaimableBalanceQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + cb, err := historyQ.FindClaimableBalanceByID(ctx, qp.ID) + if err != nil { + return nil, err + } + ledger := &history.Ledger{} + err = historyQ.LedgerBySequence(ctx, + ledger, + int32(cb.LastModifiedLedger), + ) + if historyQ.NoRows(err) { + ledger = nil + } else if err != nil { + return nil, errors.Wrap(err, "LedgerBySequence error") + } + + var resource protocol.ClaimableBalance + err = resourceadapter.PopulateClaimableBalance(ctx, &resource, cb, ledger) + if err != nil { + return nil, err + } + + return resource, nil +} + +// ClaimableBalancesQuery query struct for claimable_balances end-point +type ClaimableBalancesQuery struct { + AssetFilter string `schema:"asset" valid:"asset,optional"` + SponsorFilter string `schema:"sponsor" valid:"accountID,optional"` + ClaimantFilter string `schema:"claimant" valid:"accountID,optional"` +} + +func (q ClaimableBalancesQuery) asset() *xdr.Asset { + if len(q.AssetFilter) > 0 { + switch q.AssetFilter { + case "native": + asset := xdr.MustNewNativeAsset() + return &asset + default: + parts := strings.Split(q.AssetFilter, ":") + asset := xdr.MustNewCreditAsset(parts[0], parts[1]) + return &asset + } + } + return nil +} + +func (q ClaimableBalancesQuery) sponsor() *xdr.AccountId { + if q.SponsorFilter != "" { + return xdr.MustAddressPtr(q.SponsorFilter) + } + return nil +} + +func (q ClaimableBalancesQuery) claimant() *xdr.AccountId { + if q.ClaimantFilter != "" { + return xdr.MustAddressPtr(q.ClaimantFilter) + } + return nil +} + +// URITemplate returns a rfc6570 URI template the query struct +func (q ClaimableBalancesQuery) URITemplate() string { + return getURITemplate(&q, "claimable_balances", true) +} + +type GetClaimableBalancesHandler struct { + LedgerState *ledger.State +} + +// GetResourcePage returns a page of claimable balances. +func (handler GetClaimableBalancesHandler) GetResourcePage( + w HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + ctx := r.Context() + qp := ClaimableBalancesQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + pq, err := GetPageQuery(handler.LedgerState, r, DisableCursorValidation) + if err != nil { + return nil, err + } + + query := history.ClaimableBalancesQuery{ + PageQuery: pq, + Asset: qp.asset(), + Sponsor: qp.sponsor(), + Claimant: qp.claimant(), + } + + _, _, err = query.Cursor() + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "cursor", + errors.New("The first part should be a number higher than 0 and the second part should be a valid claimable balance ID"), + ) + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + claimableBalances, err := getClaimableBalancesPage(ctx, historyQ, query) + if err != nil { + return nil, err + } + + return claimableBalances, nil +} + +func getClaimableBalancesPage(ctx context.Context, historyQ *history.Q, query history.ClaimableBalancesQuery) ([]hal.Pageable, error) { + records, err := historyQ.GetClaimableBalances(ctx, query) + if err != nil { + return nil, err + } + + ledgerCache := history.LedgerCache{} + for _, record := range records { + ledgerCache.Queue(int32(record.LastModifiedLedger)) + } + if err := ledgerCache.Load(ctx, historyQ); err != nil { + return nil, errors.Wrap(err, "failed to load ledger batch") + } + + var claimableBalances []hal.Pageable + for _, record := range records { + var response horizon.ClaimableBalance + + var ledger *history.Ledger + if l, ok := ledgerCache.Records[int32(record.LastModifiedLedger)]; ok { + ledger = &l + } + + resourceadapter.PopulateClaimableBalance(ctx, &response, record, ledger) + claimableBalances = append(claimableBalances, response) + } + + return claimableBalances, nil +} diff --git a/services/horizon/internal/actions/claimable_balance_test.go b/services/horizon/internal/actions/claimable_balance_test.go new file mode 100644 index 0000000000..ad6bcd575a --- /dev/null +++ b/services/horizon/internal/actions/claimable_balance_test.go @@ -0,0 +1,539 @@ +package actions + +import ( + "net/http/httptest" + "testing" + + "github.com/guregu/null" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestGetClaimableBalanceByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + accountID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + asset := xdr.MustNewCreditAsset("USD", accountID) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := history.ClaimableBalance{ + BalanceID: id, + Claimants: []history.Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + Amount: 10, + LastModifiedLedger: 123, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []history.ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + handler := GetClaimableBalanceByIDHandler{} + response, err := handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"id": id}, + q, + )) + tt.Assert.NoError(err) + + resource := response.(protocol.ClaimableBalance) + tt.Assert.Equal(id, resource.BalanceID) + + // try to fetch claimable balance which does not exist + balanceID = xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 1, 1}, + } + id, err = xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + _, err = handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"id": id}, + q, + )) + tt.Assert.Error(err) + tt.Assert.True(q.NoRows(errors.Cause(err))) + + // try to fetch a random invalid hex id + _, err = handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"id": "0000001112122"}, + q, + )) + tt.Assert.Error(err) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("id", p.Extras["invalid_field"]) + tt.Assert.Equal("0000001112122 does not validate as claimableBalanceID", p.Extras["reason"]) + + // try to fetch an empty id + _, err = handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"id": ""}, + q, + )) + tt.Assert.Error(err) + p = err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("id", p.Extras["invalid_field"]) + tt.Assert.Equal("non zero value required", p.Extras["reason"]) +} + +func buildClaimableBalance(tt *test.T, balanceIDHash xdr.Hash, accountID string, ledger int32, asset *xdr.Asset) history.ClaimableBalance { + balanceAsset := xdr.MustNewNativeAsset() + var sponsor null.String + if asset != nil { + balanceAsset = *asset + sponsor = null.StringFrom(accountID) + } + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &balanceIDHash, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + return history.ClaimableBalance{ + BalanceID: id, + Claimants: []history.Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: balanceAsset, + Amount: 10, + LastModifiedLedger: uint32(ledger), + Sponsor: sponsor, + } +} + +func TestGetClaimableBalances(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + entriesMeta := []struct { + id xdr.Hash + accountID string + ledger int32 + asset *xdr.Asset + }{ + { + xdr.Hash{4, 0, 0}, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + 1235, + &usd, + }, + { + xdr.Hash{3, 0, 0}, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + 1235, + &euro, + }, + { + xdr.Hash{2, 0, 0}, + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + 1234, + &usd, + }, + { + xdr.Hash{1, 0, 0}, + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + 1233, + nil, + }, + } + + var hCBs []history.ClaimableBalance + + for _, e := range entriesMeta { + cb := buildClaimableBalance(tt, e.id, e.accountID, e.ledger, e.asset) + hCBs = append(hCBs, cb) + } + + err := q.UpsertClaimableBalances(tt.Ctx, hCBs) + tt.Assert.NoError(err) + + handler := GetClaimableBalancesHandler{} + response, err := handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{}, + q, + )) + tt.Assert.NoError(err) + tt.Assert.Len(response, 4) + + // check response is sorted in ascending order + for entriesIndex, responseIndex := len(hCBs)-1, 0; entriesIndex >= 0; entriesIndex, responseIndex = entriesIndex-1, responseIndex+1 { + entry := hCBs[entriesIndex] + tt.Assert.Equal(entry.BalanceID, response[responseIndex].(protocol.ClaimableBalance).BalanceID) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "cursor": response[3].(protocol.ClaimableBalance).PagingToken(), + }, + map[string]string{}, + q, + )) + tt.Assert.NoError(err) + tt.Assert.Len(response, 0) + + // test limit + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{"limit": "2"}, + map[string]string{}, + q, + )) + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + // response should be the last 2 elements of entries sorted by ID + for entriesIndex, responseIndex := len(hCBs)-1, 0; entriesIndex >= 2; entriesIndex, responseIndex = entriesIndex-1, responseIndex+1 { + entry := hCBs[entriesIndex] + tt.Assert.Equal(entry.BalanceID, response[responseIndex].(protocol.ClaimableBalance).BalanceID) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "cursor": response[1].(protocol.ClaimableBalance).PagingToken(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + // response should be the first 2 elements of entries sorted by ID + for entriesIndex, responseIndex := len(hCBs)-3, 0; entriesIndex >= 0; entriesIndex, responseIndex = entriesIndex-1, responseIndex+1 { + entry := hCBs[entriesIndex] + tt.Assert.Equal(entry.BalanceID, response[responseIndex].(protocol.ClaimableBalance).BalanceID) + } + + // next page should be 0, there are no new claimable balances ingested + lastIngestedCursor := response[1].(protocol.ClaimableBalance).PagingToken() + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "cursor": lastIngestedCursor, + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 0) + + // new claimable balances are ingest and one of them updated, they should appear in the next pages + cbToBeUpdated := hCBs[3] + cbToBeUpdated.Sponsor = null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML") + cbToBeUpdated.LastModifiedLedger = 1238 + q.UpsertClaimableBalances(tt.Ctx, []history.ClaimableBalance{cbToBeUpdated}) + + entriesMeta = []struct { + id xdr.Hash + accountID string + ledger int32 + asset *xdr.Asset + }{ + { + xdr.Hash{4, 4, 4}, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + 1236, + nil, + }, + { + xdr.Hash{1, 1, 1}, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + 1237, + nil, + }, + } + + hCBs = nil + for _, e := range entriesMeta { + entry := buildClaimableBalance(tt, e.id, e.accountID, e.ledger, e.asset) + hCBs = append(hCBs, entry) + } + + err = q.UpsertClaimableBalances(tt.Ctx, hCBs) + tt.Assert.NoError(err) + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 6) + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "cursor": lastIngestedCursor, + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + // response should be the first 2 elements of entries + for i, entry := range hCBs { + tt.Assert.Equal(entry.BalanceID, response[i].(protocol.ClaimableBalance).BalanceID) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "cursor": response[1].(protocol.ClaimableBalance).PagingToken(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 1) + + tt.Assert.Equal(cbToBeUpdated.BalanceID, response[0].(protocol.ClaimableBalance).BalanceID) + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "cursor": response[0].(protocol.ClaimableBalance).PagingToken(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 0) + + // in descending order + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "2", + "order": "desc", + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + tt.Assert.Equal(cbToBeUpdated.BalanceID, response[0].(protocol.ClaimableBalance).BalanceID) + + tt.Assert.Equal(hCBs[1].BalanceID, response[1].(protocol.ClaimableBalance).BalanceID) + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "limit": "1", + "order": "desc", + "cursor": response[1].(protocol.ClaimableBalance).PagingToken(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 1) + + tt.Assert.Equal(hCBs[0].BalanceID, response[0].(protocol.ClaimableBalance).BalanceID) + + // filter by asset + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "asset": native.StringCanonical(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 3) + + for _, resource := range response { + tt.Assert.Equal( + native.StringCanonical(), + resource.(protocol.ClaimableBalance).Asset, + ) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "asset": usd.StringCanonical(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + for _, resource := range response { + tt.Assert.Equal( + usd.StringCanonical(), + resource.(protocol.ClaimableBalance).Asset, + ) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "asset": euro.StringCanonical(), + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 1) + + for _, resource := range response { + tt.Assert.Equal( + euro.StringCanonical(), + resource.(protocol.ClaimableBalance).Asset, + ) + } + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "sponsor": "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 3) + for _, resource := range response { + tt.Assert.Equal( + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + resource.(protocol.ClaimableBalance).Sponsor, + ) + } + + // filter by claimant + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "claimant": "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 4) + + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "claimant": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + map[string]string{}, + q, + )) + + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) +} + +func TestCursorAndOrderValidation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + handler := GetClaimableBalancesHandler{} + _, err := handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "cursor": "-1-00000043d380c38a2f2cac46ab63674064c56fdce6b977fdef1a278ad50e1a7e6a5e18", + }, + map[string]string{}, + q, + )) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("cursor", p.Extras["invalid_field"]) + tt.Assert.Equal("The first part should be a number higher than 0 and the second part should be a valid claimable balance ID", p.Extras["reason"]) + + _, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "cursor": "1003529-00000043d380c38a2f2cac46ab63674064c56fdce6b977fdef1a278ad50e1a7e6a5e18", + }, + map[string]string{}, + q, + )) + p = err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("cursor", p.Extras["invalid_field"]) + tt.Assert.Equal("The first part should be a number higher than 0 and the second part should be a valid claimable balance ID", p.Extras["reason"]) + + _, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{ + "order": "arriba", + "cursor": "1003529-00000043d380c38a2f2cac46ab63674064c56fdce6b977fdef1a278ad50e1a7e6a5e18", + }, + map[string]string{}, + q, + )) + p = err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("order", p.Extras["invalid_field"]) + tt.Assert.Equal("order: invalid value", p.Extras["reason"]) +} + +func TestClaimableBalancesQueryURLTemplate(t *testing.T) { + tt := assert.New(t) + expected := "/claimable_balances{?asset,sponsor,claimant,cursor,limit,order}" + q := ClaimableBalancesQuery{} + tt.Equal(expected, q.URITemplate()) +} diff --git a/services/horizon/internal/actions/doc.go b/services/horizon/internal/actions/doc.go new file mode 100644 index 0000000000..7f33dbe9ea --- /dev/null +++ b/services/horizon/internal/actions/doc.go @@ -0,0 +1,4 @@ +// Package actions provides the infrastructure for defining and executing +// actions (code that is triggered in response to an client request) on horizon. +// At present it allows for defining actions that can respond using JSON or SSE. +package actions diff --git a/services/horizon/internal/actions/effects.go b/services/horizon/internal/actions/effects.go new file mode 100644 index 0000000000..a141067d25 --- /dev/null +++ b/services/horizon/internal/actions/effects.go @@ -0,0 +1,130 @@ +package actions + +import ( + "context" + "net/http" + + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" +) + +// EffectsQuery query struct for effects end-points +type EffectsQuery struct { + AccountID string `schema:"account_id" valid:"accountID,optional"` + OperationID uint64 `schema:"op_id" valid:"-"` + LiquidityPoolID string `schema:"liquidity_pool_id" valid:"sha256,optional"` + TxHash string `schema:"tx_id" valid:"transactionHash,optional"` + LedgerID uint32 `schema:"ledger_id" valid:"-"` +} + +// Validate runs extra validations on query parameters +func (qp EffectsQuery) Validate() error { + count, err := countNonEmpty( + qp.AccountID, + qp.OperationID, + qp.LiquidityPoolID, + qp.TxHash, + qp.LedgerID, + ) + + if err != nil { + return problem.BadRequest + } + + if count > 1 { + return problem.MakeInvalidFieldProblem( + "filters", + errors.New("Use a single filter for effects, you can only use one of account_id, op_id, tx_id or ledger_id"), + ) + } + return nil +} + +type GetEffectsHandler struct { + LedgerState *ledger.State +} + +func (handler GetEffectsHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + + qp := EffectsQuery{} + err = getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + records, err := loadEffectRecords(r.Context(), historyQ, qp, pq) + if err != nil { + return nil, errors.Wrap(err, "loading transaction records") + } + + ledgers, err := loadEffectLedgers(r.Context(), historyQ, records) + if err != nil { + return nil, errors.Wrap(err, "loading ledgers") + } + + var result []hal.Pageable + for _, record := range records { + effect, err := resourceadapter.NewEffect(r.Context(), record, ledgers[record.LedgerSequence()]) + if err != nil { + return nil, errors.Wrap(err, "could not create effect") + } + result = append(result, effect) + } + + return result, nil +} + +func loadEffectRecords(ctx context.Context, hq *history.Q, qp EffectsQuery, pq db2.PageQuery) ([]history.Effect, error) { + effects := hq.Effects() + + switch { + case qp.AccountID != "": + effects.ForAccount(ctx, qp.AccountID) + case qp.LiquidityPoolID != "": + effects.ForLiquidityPool(ctx, pq, qp.LiquidityPoolID) + case qp.OperationID > 0: + effects.ForOperation(int64(qp.OperationID)) + case qp.LedgerID > 0: + effects.ForLedger(ctx, int32(qp.LedgerID)) + case qp.TxHash != "": + effects.ForTransaction(ctx, qp.TxHash) + } + + var result []history.Effect + err := effects.Page(pq).Select(ctx, &result) + + return result, err +} + +func loadEffectLedgers(ctx context.Context, hq *history.Q, effects []history.Effect) (map[int32]history.Ledger, error) { + ledgers := &history.LedgerCache{} + + for _, e := range effects { + ledgers.Queue(e.LedgerSequence()) + } + + if err := ledgers.Load(ctx, hq); err != nil { + return nil, err + } + return ledgers.Records, nil +} diff --git a/services/horizon/internal/actions/effects_test.go b/services/horizon/internal/actions/effects_test.go new file mode 100644 index 0000000000..4df50af6cd --- /dev/null +++ b/services/horizon/internal/actions/effects_test.go @@ -0,0 +1,38 @@ +package actions + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/support/http/httptest" + "github.com/stellar/go/support/render/problem" +) + +func TestEffectsQuery_BadOperationID(t *testing.T) { + called := false + s := httptest.NewServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + qp := EffectsQuery{} + err := getParams(&qp, r) + assert.Error(t, err) + p, ok := err.(*problem.P) + if assert.True(t, ok) { + assert.Equal(t, 400, p.Status) + assert.NotNil(t, p.Extras) + assert.Equal(t, "op_id", p.Extras["invalid_field"]) + assert.Equal(t, "Operation ID must be an integer higher than 0", p.Extras["reason"]) + } + called = true + })) + defer s.Close() + + _, err := http.Get(s.URL + "/?op_id=-1") + assert.NoError(t, err) + assert.True(t, called) + + called = false + _, err = http.Get(s.URL + "/?op_id=foobar") + assert.NoError(t, err) + assert.True(t, called) +} diff --git a/services/horizon/internal/actions/fee_stats.go b/services/horizon/internal/actions/fee_stats.go new file mode 100644 index 0000000000..5a51d01c4b --- /dev/null +++ b/services/horizon/internal/actions/fee_stats.go @@ -0,0 +1,68 @@ +package actions + +import ( + "net/http" + "strconv" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/operationfeestats" +) + +// FeeStatsHandler is the action handler for the /fee_stats endpoint +type FeeStatsHandler struct { +} + +// GetResource fee stats resource +func (handler FeeStatsHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + feeStats := horizon.FeeStats{} + + cur, ok := operationfeestats.CurrentState() + feeStats.LastLedgerBaseFee = cur.LastBaseFee + feeStats.LastLedger = cur.LastLedger + + // LedgerCapacityUsage is the empty string when operationfeestats has not had its state set + if ok { + capacity, err := strconv.ParseFloat( + cur.LedgerCapacityUsage, + 64, + ) + if err != nil { + return nil, err + } + feeStats.LedgerCapacityUsage = capacity + } + + // FeeCharged + feeStats.FeeCharged.Max = cur.FeeChargedMax + feeStats.FeeCharged.Min = cur.FeeChargedMin + feeStats.FeeCharged.Mode = cur.FeeChargedMode + feeStats.FeeCharged.P10 = cur.FeeChargedP10 + feeStats.FeeCharged.P20 = cur.FeeChargedP20 + feeStats.FeeCharged.P30 = cur.FeeChargedP30 + feeStats.FeeCharged.P40 = cur.FeeChargedP40 + feeStats.FeeCharged.P50 = cur.FeeChargedP50 + feeStats.FeeCharged.P60 = cur.FeeChargedP60 + feeStats.FeeCharged.P70 = cur.FeeChargedP70 + feeStats.FeeCharged.P80 = cur.FeeChargedP80 + feeStats.FeeCharged.P90 = cur.FeeChargedP90 + feeStats.FeeCharged.P95 = cur.FeeChargedP95 + feeStats.FeeCharged.P99 = cur.FeeChargedP99 + + // MaxFee + feeStats.MaxFee.Max = cur.MaxFeeMax + feeStats.MaxFee.Min = cur.MaxFeeMin + feeStats.MaxFee.Mode = cur.MaxFeeMode + feeStats.MaxFee.P10 = cur.MaxFeeP10 + feeStats.MaxFee.P20 = cur.MaxFeeP20 + feeStats.MaxFee.P30 = cur.MaxFeeP30 + feeStats.MaxFee.P40 = cur.MaxFeeP40 + feeStats.MaxFee.P50 = cur.MaxFeeP50 + feeStats.MaxFee.P60 = cur.MaxFeeP60 + feeStats.MaxFee.P70 = cur.MaxFeeP70 + feeStats.MaxFee.P80 = cur.MaxFeeP80 + feeStats.MaxFee.P90 = cur.MaxFeeP90 + feeStats.MaxFee.P95 = cur.MaxFeeP95 + feeStats.MaxFee.P99 = cur.MaxFeeP99 + + return feeStats, nil +} diff --git a/services/horizon/internal/actions/helpers.go b/services/horizon/internal/actions/helpers.go new file mode 100644 index 0000000000..2cfe9b9738 --- /dev/null +++ b/services/horizon/internal/actions/helpers.go @@ -0,0 +1,618 @@ +package actions + +import ( + "context" + "encoding/hex" + "fmt" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "github.com/asaskevich/govalidator" + "github.com/go-chi/chi" + "github.com/gorilla/schema" + + "github.com/stellar/go/services/horizon/internal/assets" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/ledger" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// TODO: move these constants to urlparam.go as we should parse the params with http handlers +// in the upper level package. +const ( + // ParamCursor is a query string param name + ParamCursor = "cursor" + // ParamOrder is a query string param name + ParamOrder = "order" + // ParamLimit is a query string param name + ParamLimit = "limit" + // LastLedgerHeaderName is the header which is set on all endpoints + LastLedgerHeaderName = "Latest-Ledger" +) + +type Opt int + +const ( + // DisableCursorValidation disables cursor validation in GetPageQuery + DisableCursorValidation Opt = iota +) + +// HeaderWriter is an interface for setting HTTP response headers +type HeaderWriter interface { + Header() http.Header +} + +// SetLastLedgerHeader sets the Latest-Ledger header +func SetLastLedgerHeader(w HeaderWriter, lastLedger uint32) { + w.Header().Set(LastLedgerHeaderName, strconv.FormatUint(uint64(lastLedger), 10)) +} + +// getCursor retrieves a string from either the URLParams, form or query string. +// This method uses the priority (URLParams, Form, Query). +func getCursor(ledgerState *ledger.State, r *http.Request, name string) (string, error) { + cursor, err := getString(r, name) + + if err != nil { + return "", err + } + + if cursor == "now" { + tid := toid.AfterLedger(ledgerState.CurrentStatus().HistoryLatest) + cursor = tid.String() + } + + if lastEventID := r.Header.Get("Last-Event-ID"); lastEventID != "" { + cursor = lastEventID + } + + // In case cursor is negative value, return InvalidField error + cursorInt, err := strconv.Atoi(cursor) + if err == nil && cursorInt < 0 { + msg := fmt.Sprintf("the cursor %d is a negative number: ", cursorInt) + + return "", problem.MakeInvalidFieldProblem( + name, + errors.New(msg), + ) + } + + return cursor, nil +} + +func checkUTF8(name, value string) error { + if !utf8.ValidString(value) { + return problem.MakeInvalidFieldProblem(name, errors.New("invalid value")) + } + return nil +} + +// getStringFromURLParam retrieves a string from the URLParams. +func getStringFromURLParam(r *http.Request, name string) (string, error) { + fromURL, ok := getURLParam(r, name) + if ok { + ret, err := url.PathUnescape(fromURL) + if err != nil { + return "", problem.MakeInvalidFieldProblem(name, err) + } + + if err := checkUTF8(name, ret); err != nil { + return "", err + } + return ret, nil + } + + return "", nil +} + +// getString retrieves a string from either the URLParams, form or query string. +// This method uses the priority (URLParams, Form, Query). +func getString(r *http.Request, name string) (string, error) { + fromURL, ok := getURLParam(r, name) + if ok { + ret, err := url.PathUnescape(fromURL) + if err != nil { + return "", problem.MakeInvalidFieldProblem(name, err) + } + + if err := checkUTF8(name, ret); err != nil { + return "", err + } + + return ret, nil + } + + fromForm := r.FormValue(name) + if fromForm != "" { + if err := checkUTF8(name, fromForm); err != nil { + return "", err + } + return fromForm, nil + } + + value := r.URL.Query().Get(name) + if err := checkUTF8(name, value); err != nil { + return "", err + } + + return value, nil +} + +// getLimit retrieves a uint64 limit from the action parameter of the given +// name. Populates err if the value is not a valid limit. Uses the provided +// default value if the limit parameter is a blank string. +func getLimit(r *http.Request, name string, def uint64, max uint64) (uint64, error) { + limit, err := getString(r, name) + + if err != nil { + return 0, err + } + if limit == "" { + return def, nil + } + + asI64, err := strconv.ParseInt(limit, 10, 64) + if err != nil { + + return 0, problem.MakeInvalidFieldProblem(name, errors.New("unparseable value")) + } + + if asI64 <= 0 { + err = errors.New("invalid limit: non-positive value provided") + } else if asI64 > int64(max) { + err = errors.Errorf("invalid limit: value provided that is over limit max of %d", max) + } + + if err != nil { + return 0, problem.MakeInvalidFieldProblem(name, err) + } + + return uint64(asI64), nil +} + +// GetPageQuery is a helper that returns a new db.PageQuery struct initialized +// using the results from a call to GetPagingParams() +func GetPageQuery(ledgerState *ledger.State, r *http.Request, opts ...Opt) (db2.PageQuery, error) { + disableCursorValidation := false + for _, opt := range opts { + if opt == DisableCursorValidation { + disableCursorValidation = true + } + } + + cursor, err := getCursor(ledgerState, r, ParamCursor) + if err != nil { + return db2.PageQuery{}, err + } + order, err := getString(r, ParamOrder) + if err != nil { + return db2.PageQuery{}, err + } + limit, err := getLimit(r, ParamLimit, db2.DefaultPageSize, db2.MaxPageSize) + if err != nil { + return db2.PageQuery{}, err + } + + pageQuery, err := db2.NewPageQuery(cursor, !disableCursorValidation, order, limit) + if err != nil { + if invalidFieldError, ok := err.(*db2.InvalidFieldError); ok { + err = problem.MakeInvalidFieldProblem( + invalidFieldError.Name, + err, + ) + } else { + err = problem.BadRequest + } + + return db2.PageQuery{}, err + } + + return pageQuery, nil +} + +// GetTransactionID retireves a transaction identifier by attempting to decode an hex-encoded, +// 64-digit lowercase string at the provided name. +func GetTransactionID(r *http.Request, name string) (string, error) { + value, err := getStringFromURLParam(r, name) + if err != nil { + return "", err + } + + if value != "" { + if _, err = hex.DecodeString(value); err != nil || len(value) != 64 || strings.ToLower(value) != value { + return "", problem.MakeInvalidFieldProblem(name, errors.New("invalid hash format")) + } + } + + return value, nil +} + +// getAccountID retireves an xdr.AccountID by attempting to decode a stellar +// address at the provided name. +func getAccountID(r *http.Request, name string) (xdr.AccountId, error) { + value, err := getString(r, name) + if err != nil { + return xdr.AccountId{}, err + } + + result, err := xdr.AddressToAccountId(value) + if err != nil { + return result, problem.MakeInvalidFieldProblem( + name, + errors.New("invalid address"), + ) + } + + return result, nil +} + +// getAssetType is a helper that returns a xdr.AssetType by reading a string +func getAssetType(r *http.Request, name string) (xdr.AssetType, error) { + val, err := getString(r, name) + if err != nil { + return xdr.AssetTypeAssetTypeNative, nil + } + + t, err := assets.Parse(val) + if err != nil { + return t, problem.MakeInvalidFieldProblem( + name, + err, + ) + } + + return t, nil +} + +// getAsset decodes an asset from the request fields prefixed by `prefix`. To +// succeed, three prefixed fields must be present: asset_type, asset_code, and +// asset_issuer. +func getAsset(r *http.Request, prefix string) (xdr.Asset, error) { + var value interface{} + t, err := getAssetType(r, prefix+"asset_type") + if err != nil { + return xdr.Asset{}, err + } + + switch t { + case xdr.AssetTypeAssetTypeCreditAlphanum4: + a := xdr.AlphaNum4{} + a.Issuer, err = getAccountID(r, prefix+"asset_issuer") + if err != nil { + return xdr.Asset{}, err + } + + var code string + code, err = getString(r, prefix+"asset_code") + if err != nil { + return xdr.Asset{}, err + } + if len(code) > len(a.AssetCode) { + err := problem.MakeInvalidFieldProblem( + prefix+"asset_code", + errors.New("code too long"), + ) + return xdr.Asset{}, err + } + + copy(a.AssetCode[:len(code)], []byte(code)) + value = a + case xdr.AssetTypeAssetTypeCreditAlphanum12: + a := xdr.AlphaNum12{} + a.Issuer, err = getAccountID(r, prefix+"asset_issuer") + if err != nil { + return xdr.Asset{}, err + } + + var code string + code, err = getString(r, prefix+"asset_code") + if err != nil { + return xdr.Asset{}, err + } + if len(code) > len(a.AssetCode) { + err := problem.MakeInvalidFieldProblem( + prefix+"asset_code", + errors.New("code too long"), + ) + return xdr.Asset{}, err + } + + copy(a.AssetCode[:len(code)], []byte(code)) + value = a + } + + result, err := xdr.NewAsset(t, value) + if err != nil { + panic(err) + } + + return result, nil +} + +// getURLParam returns the corresponding URL parameter value from the request +// routing context and an additional boolean reflecting whether or not the +// param was found. This is ported from Chi since the Chi version returns "" +// for params not found. This is undesirable since "" also is a valid url param. +// Ref: https://github.com/go-chi/chi/blob/d132b31857e5922a2cc7963f4fcfd8f46b3f2e97/context.go#L69 +func getURLParam(r *http.Request, key string) (string, bool) { + rctx := chi.RouteContext(r.Context()) + + if rctx == nil { + return "", false + } + + // Return immediately if keys does not match Values + // This can happen when a named param is not specified. + // This is a bug in chi: https://github.com/go-chi/chi/issues/426 + if len(rctx.URLParams.Keys) != len(rctx.URLParams.Values) { + return "", false + } + + for k := len(rctx.URLParams.Keys) - 1; k >= 0; k-- { + if rctx.URLParams.Keys[k] == key { + return rctx.URLParams.Values[k], true + } + } + + return "", false +} + +// FullURL returns a URL containing the information regarding the original +// request stored in the context. +func FullURL(ctx context.Context) *url.URL { + url := horizonContext.BaseURL(ctx) + r := horizonContext.RequestFromContext(ctx) + if r != nil { + url.Path = r.URL.Path + url.RawQuery = r.URL.RawQuery + } + return url +} + +// Note from chi: it is a good idea to set a Decoder instance as a package +// global, because it caches meta-data about structs, and an instance can be +// shared safely: +var decoder = schema.NewDecoder() + +// getParams fills a struct with values read from a request's query parameters. +func getParams(dst interface{}, r *http.Request) error { + query := r.URL.Query() + + // Merge chi's URLParams with URL Query Params. Given + // `/accounts/{account_id}/transactions?foo=bar`, chi's URLParams will + // contain `account_id` and URL Query params will contain `foo`. + if rctx := chi.RouteContext(r.Context()); rctx != nil { + for _, key := range rctx.URLParams.Keys { + if key == "*" { + continue + } + param, _ := getURLParam(r, key) + query.Set(key, param) + } + } + + if err := decoder.Decode(dst, query); err != nil { + for k, e := range err.(schema.MultiError) { + return problem.NewProblemWithInvalidField( + problem.BadRequest, + k, + getSchemaErrorFieldMessage(k, e), + ) + } + } + + if _, err := govalidator.ValidateStruct(dst); err != nil { + field, message := getErrorFieldMessage(err) + err = problem.MakeInvalidFieldProblem( + getSchemaTag(dst, field), + errors.New(message), + ) + + return err + } + + if v, ok := dst.(Validateable); ok { + if err := v.Validate(); err != nil { + return err + } + } + + return nil +} + +func getSchemaTag(params interface{}, field string) string { + v := reflect.ValueOf(params).Elem() + qt := v.Type() + f, _ := qt.FieldByName(field) + return f.Tag.Get("schema") +} + +// getURIParams returns a list of query parameters for a given query struct +func getURIParams(query interface{}, paginated bool) []string { + params := getSchemaTags(reflect.ValueOf(query).Elem()) + if paginated { + pagingParams := []string{ + ParamCursor, + ParamLimit, + ParamOrder, + } + params = append(params, pagingParams...) + } + return params +} + +func getURITemplate(query interface{}, basePath string, paginated bool) string { + return "/" + basePath + "{?" + strings.Join(getURIParams(query, paginated), ",") + "}" +} + +func getSchemaTags(v reflect.Value) []string { + qt := v.Type() + fields := make([]string, 0, v.NumField()) + + for i := 0; i < qt.NumField(); i++ { + f := qt.Field(i) + // Query structs can have embedded query structs + if f.Type.Kind() == reflect.Struct { + fields = append(fields, getSchemaTags(v.Field(i))...) + } else { + tag, ok := f.Tag.Lookup("schema") + if ok { + fields = append(fields, tag) + } + } + } + + return fields +} + +// validateAssetParams runs multiple checks on an asset query parameter +func validateAssetParams(aType, code, issuer, prefix string) error { + // If asset type is not present but code or issuer are, then there is a + // missing parameter and the request is unprocessable. + if len(aType) == 0 { + if len(code) > 0 || len(issuer) > 0 { + return problem.MakeInvalidFieldProblem( + prefix+"asset_type", + errors.New("Missing parameter"), + ) + } + + return nil + } + + t, err := assets.Parse(aType) + if err != nil { + return problem.MakeInvalidFieldProblem( + prefix+"asset_type", + err, + ) + } + + var validLen int + switch t { + case xdr.AssetTypeAssetTypeNative: + // If asset type is native, issuer or code should not be included in the + // request + switch { + case len(code) > 0: + return problem.MakeInvalidFieldProblem( + prefix+"asset_code", + errors.New("native asset does not have a code"), + ) + case len(issuer) > 0: + return problem.MakeInvalidFieldProblem( + prefix+"asset_issuer", + errors.New("native asset does not have an issuer"), + ) + } + + return nil + case xdr.AssetTypeAssetTypeCreditAlphanum4: + validLen = len(xdr.AlphaNum4{}.AssetCode) + case xdr.AssetTypeAssetTypeCreditAlphanum12: + validLen = len(xdr.AlphaNum12{}.AssetCode) + } + + codeLen := len(code) + if codeLen == 0 || codeLen > validLen { + return problem.MakeInvalidFieldProblem( + prefix+"asset_code", + errors.New("Asset code must be 1-12 alphanumeric characters"), + ) + } + + if len(issuer) == 0 { + return problem.MakeInvalidFieldProblem( + prefix+"asset_issuer", + errors.New("Missing parameter"), + ) + } + + return nil +} + +// validateCursorWithinHistory compares the requested page of data against the +// ledger state of the history database. In the event that the cursor is +// guaranteed to return no results, we return a 410 GONE http response. +func validateCursorWithinHistory(ledgerState *ledger.State, pq db2.PageQuery) error { + // an ascending query should never return a gone response: An ascending query + // prior to known history should return results at the beginning of history, + // and an ascending query beyond the end of history should not error out but + // rather return an empty page (allowing code that tracks the procession of + // some resource more easily). + if pq.Order != "desc" { + return nil + } + + var cursor int64 + var err error + + // Checking for the presence of "-" to see whether we should use CursorInt64 + // or CursorInt64Pair + if strings.Contains(pq.Cursor, "-") { + cursor, _, err = pq.CursorInt64Pair("-") + } else { + cursor, err = pq.CursorInt64() + } + + if err != nil { + return problem.MakeInvalidFieldProblem("cursor", errors.New("invalid value")) + } + + elder := toid.New(ledgerState.CurrentStatus().HistoryElder, 0, 0) + + if cursor <= elder.ToInt64() { + return &hProblem.BeforeHistory + } + + return nil +} + +func countNonEmpty(params ...interface{}) (int, error) { + count := 0 + + for _, param := range params { + switch param := param.(type) { + default: + return 0, errors.Errorf("unexpected type %T", param) + case int32: + if param != 0 { + count++ + } + case uint32: + if param != 0 { + count++ + } + case int64: + if param != 0 { + count++ + } + case uint64: + if param != 0 { + count++ + } + case string: + if param != "" { + count++ + } + case *xdr.Asset: + if param != nil { + count++ + } + } + } + + return count, nil +} + +func init() { + decoder.IgnoreUnknownKeys(true) +} diff --git a/services/horizon/internal/actions/helpers_test.go b/services/horizon/internal/actions/helpers_test.go new file mode 100644 index 0000000000..eb17e709af --- /dev/null +++ b/services/horizon/internal/actions/helpers_test.go @@ -0,0 +1,557 @@ +package actions + +import ( + "context" + "fmt" + "math" + "net/http" + "net/url" + "testing" + + "github.com/go-chi/chi" + "github.com/stretchr/testify/assert" + + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +func TestGetTransactionID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + + txID, err := GetTransactionID(r, "valid_tx_id") + tt.Assert.NoError(err) + tt.Assert.Equal( + "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf", + txID, + ) + + txID, err = GetTransactionID(r, "invalid_uppercase_tx_id") + tt.Assert.Error(err) + + txID, err = GetTransactionID(r, "invalid_too_short_tx_id") + tt.Assert.Error(err) +} + +func TestGetAssetType(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + + ts, err := getAssetType(r, "native_asset_type") + if tt.Assert.NoError(err) { + tt.Assert.Equal(xdr.AssetTypeAssetTypeNative, ts) + } + + ts, err = getAssetType(r, "4_asset_type") + if tt.Assert.NoError(err) { + tt.Assert.Equal(xdr.AssetTypeAssetTypeCreditAlphanum4, ts) + } + + ts, err = getAssetType(r, "12_asset_type") + if tt.Assert.NoError(err) { + tt.Assert.Equal(xdr.AssetTypeAssetTypeCreditAlphanum12, ts) + } +} + +func TestGetCursor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + ledgerState := &ledger.State{} + // now uses the ledger state + r := makeTestActionRequest("/?cursor=now", nil) + cursor, err := getCursor(ledgerState, r, "cursor") + if tt.Assert.NoError(err) { + expected := toid.AfterLedger(ledgerState.CurrentStatus().HistoryLatest).String() + tt.Assert.Equal(expected, cursor) + } + + //Last-Event-ID overrides cursor + r = makeFooBarTestActionRequest() + r.Header.Set("Last-Event-ID", "from_header") + cursor, err = getCursor(ledgerState, r, "cursor") + if tt.Assert.NoError(err) { + tt.Assert.Equal("from_header", cursor) + } +} + +func TestValidateCursorWithinHistory(t *testing.T) { + tt := assert.New(t) + testCases := []struct { + cursor string + order string + valid bool + }{ + { + cursor: "10", + order: "desc", + valid: true, + }, + { + cursor: "10-1234", + order: "desc", + valid: true, + }, + { + cursor: "0", + order: "desc", + valid: false, + }, + { + cursor: "0-1234", + order: "desc", + valid: false, + }, + { + cursor: "10", + order: "asc", + valid: true, + }, + { + cursor: "10-1234", + order: "asc", + valid: true, + }, + { + cursor: "0", + order: "asc", + valid: true, + }, + { + cursor: "0-1234", + order: "asc", + valid: true, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("cursor: %s", tc.cursor), func(t *testing.T) { + pq, err := db2.NewPageQuery(tc.cursor, false, tc.order, 10) + tt.NoError(err) + err = validateCursorWithinHistory(&ledger.State{}, pq) + + if tc.valid { + tt.NoError(err) + } else { + tt.EqualError(err, "problem: before_history") + } + }) + } +} + +func TestActionGetLimit(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + // happy path + r := makeFooBarTestActionRequest() + limit, err := getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(2), limit) + } + + r = makeTestActionRequest("/?limit=200", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(200), limit) + } + + // defaults + r = makeTestActionRequest("/", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(5), limit) + } + + r = makeTestActionRequest("/?limit=", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(5), limit) + } + + // invalids + r = makeTestActionRequest("/?limit=0", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) + + r = makeTestActionRequest("/?limit=-1", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) + + r = makeTestActionRequest("/?limit=201", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) +} + +func TestGetLimit(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + // happy path + r := makeFooBarTestActionRequest() + limit, err := getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(2), limit) + } + + r = makeTestActionRequest("/?limit=200", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(200), limit) + } + + // defaults + r = makeTestActionRequest("/", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(5), limit) + } + + r = makeTestActionRequest("/?limit=", nil) + limit, err = getLimit(r, "limit", 5, 200) + if tt.Assert.NoError(err) { + tt.Assert.Equal(uint64(5), limit) + } + + // invalids + r = makeTestActionRequest("/?limit=0", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) + + r = makeTestActionRequest("/?limit=-1", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) + + r = makeTestActionRequest("/?limit=201", nil) + _, err = getLimit(r, "limit", 5, 200) + tt.Assert.Error(err) +} + +func TestActionGetPageQuery(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + ledgerState := &ledger.State{} + + // happy path + pq, err := GetPageQuery(ledgerState, r) + tt.Assert.NoError(err) + tt.Assert.Equal("123456", pq.Cursor) + tt.Assert.Equal(uint64(2), pq.Limit) + tt.Assert.Equal("asc", pq.Order) + + // regression: GetPagQuery does not overwrite err + r = makeTestActionRequest("/?limit=foo", nil) + _, err = getLimit(r, "limit", 1, 200) + tt.Assert.Error(err) + _, err = GetPageQuery(ledgerState, r) + tt.Assert.Error(err) + + // regression: https://github.com/stellar/go/services/horizon/internal/issues/372 + // (limit of 0 turns into 10) + makeTestActionRequest("/?limit=0", nil) + _, err = GetPageQuery(ledgerState, r) + tt.Assert.Error(err) +} + +func TestGetPageQuery(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + ledgerState := &ledger.State{} + + // happy path + pq, err := GetPageQuery(ledgerState, r) + tt.Assert.NoError(err) + tt.Assert.Equal("123456", pq.Cursor) + tt.Assert.Equal(uint64(2), pq.Limit) + tt.Assert.Equal("asc", pq.Order) + + // regression: GetPagQuery does not overwrite err + r = makeTestActionRequest("/?limit=foo", nil) + _, err = getLimit(r, "limit", 1, 200) + tt.Assert.Error(err) + _, err = GetPageQuery(ledgerState, r) + tt.Assert.Error(err) + + // regression: https://github.com/stellar/go/services/horizon/internal/issues/372 + // (limit of 0 turns into 10) + r = makeTestActionRequest("/?limit=0", nil) + _, err = GetPageQuery(ledgerState, r) + tt.Assert.Error(err) +} + +func TestGetString(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + cursor, err := getString(r, "cursor") + tt.Assert.NoError(err) + tt.Assert.Equal("123456", cursor) + r.Form = url.Values{ + "cursor": {"goodbye"}, + } + cursor, err = getString(r, "cursor") + tt.Assert.NoError(err) + tt.Assert.Equal("goodbye", cursor) +} + +func TestPath(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + + tt.Assert.Equal("/foo-bar/blah", r.URL.Path) +} + +func TestBaseGetURLParam(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + + val, ok := getURLParam(r, "two") + tt.Assert.Equal("2", val) + tt.Assert.Equal(true, ok) + + // valid empty string + val, ok = getURLParam(r, "blank") + tt.Assert.Equal("", val) + tt.Assert.Equal(true, ok) + + // url param not found + val, ok = getURLParam(r, "foobarcursor") + tt.Assert.Equal("", val) + tt.Assert.Equal(false, ok) +} + +func TestGetURLParam(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeTestActionRequest("/accounts/{account_id}/operations?limit=100", nil) + + // simulates a request where the named param is not passed. + // Regression for https://github.com/stellar/go/issues/1965 + rctx := chi.RouteContext(r.Context()) + rctx.URLParams.Keys = []string{ + "account_id", + } + + val, ok := getURLParam(r, "account_id") + tt.Assert.Empty(val) + tt.Assert.False(ok) +} + +func TestFullURL(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + r := makeFooBarTestActionRequest() + + url := FullURL(r.Context()) + tt.Assert.Equal("http:///foo-bar/blah?limit=2&cursor=123456", url.String()) +} + +func TestGetParams(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + type QueryParams struct { + SellingBuyingAssetQueryParams `valid:"-"` + Account string `schema:"account_id" valid:"accountID"` + } + + account := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + + // Simulate chi's URL params. The following would be equivalent to having a + // chi route like the following `/accounts/{account_id}` + urlParams := map[string]string{ + "account_id": account, + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "USD", + "selling_asset_issuer": account, + } + + r := makeTestActionRequest("/transactions?limit=2&cursor=123456&order=desc", urlParams) + qp := QueryParams{} + err := getParams(&qp, r) + + tt.Assert.NoError(err) + tt.Assert.Equal(account, qp.Account) + selling, err := qp.Selling() + tt.Assert.NoError(err) + tt.Assert.NotNil(selling) + tt.Assert.True(usd.Equals(*selling)) + + urlParams = map[string]string{ + "account_id": account, + "selling_asset_type": "native", + } + + r = makeTestActionRequest("/transactions?limit=2&cursor=123456&order=desc", urlParams) + qp = QueryParams{} + err = getParams(&qp, r) + + tt.Assert.NoError(err) + selling, err = qp.Selling() + tt.Assert.NoError(err) + tt.Assert.NotNil(selling) + tt.Assert.True(native.Equals(*selling)) + + urlParams = map[string]string{"account_id": "1"} + r = makeTestActionRequest("/transactions?limit=2&cursor=123456&order=desc", urlParams) + qp = QueryParams{} + err = getParams(&qp, r) + + if tt.Assert.IsType(&problem.P{}, err) { + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("account_id", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Account ID must start with `G` and contain 56 alphanum characters", + p.Extras["reason"], + ) + } + + // Test that we get the URL parameter properly + // when a query parameter with the same name is provided + urlParams = map[string]string{ + "account_id": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + } + r = makeTestActionRequest("/transactions?account_id=bar", urlParams) + err = getParams(&qp, r) + tt.Assert.NoError(err) + tt.Assert.Equal("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", qp.Account) + +} + +type ParamsValidator struct { + Account string `schema:"account_id" valid:"required"` +} + +func (pv ParamsValidator) Validate() error { + return problem.MakeInvalidFieldProblem( + "Name", + errors.New("Invalid"), + ) +} + +func TestGetParamsCustomValidator(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + urlParams := map[string]string{"account_id": "1"} + r := makeTestActionRequest("/transactions", urlParams) + qp := ParamsValidator{} + err := getParams(&qp, r) + + if tt.Assert.IsType(&problem.P{}, err) { + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("Name", p.Extras["invalid_field"]) + } +} + +func makeFooBarTestActionRequest() *http.Request { + return makeTestActionRequest("/foo-bar/blah?limit=2&cursor=123456", testURLParams()) +} + +func makeTestActionRequest(path string, body map[string]string) *http.Request { + rctx := chi.NewRouteContext() + for k, v := range body { + rctx.URLParams.Add(k, v) + } + + r, _ := http.NewRequest("GET", path, nil) + + ctx := context.WithValue(r.Context(), chi.RouteCtxKey, rctx) + return r.WithContext(context.WithValue(ctx, &horizonContext.RequestContextKey, r)) +} + +func testURLParams() map[string]string { + return map[string]string{ + "blank": "", + "minus_one": "-1", + "zero": "0", + "two": "2", + "twenty": "20", + "32min": fmt.Sprint(math.MinInt32), + "32max": fmt.Sprint(math.MaxInt32), + "64min": fmt.Sprint(math.MinInt64), + "64max": fmt.Sprint(math.MaxInt64), + "native_asset_type": "native", + "4_asset_type": "credit_alphanum4", + "4_asset_code": "USD", + "4_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "12_asset_type": "credit_alphanum12", + "12_asset_code": "USD", + "12_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "long_4_asset_type": "credit_alphanum4", + "long_4_asset_code": "SPOOON", + "long_4_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "long_12_asset_type": "credit_alphanum12", + "long_12_asset_code": "OHMYGODITSSOLONG", + "long_12_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "valid_tx_id": "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf", + "invalid_uppercase_tx_id": "AA168F12124B7C196C0ADAEE7C73A64D37F99428CACB59A91FF389626845E7CF", + "invalid_too_short_tx_id": "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7", + } +} + +func makeRequest( + t *testing.T, + queryParams map[string]string, + routeParams map[string]string, + session db.SessionInterface, +) *http.Request { + request, err := http.NewRequest("GET", "/", nil) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + query := url.Values{} + for key, value := range queryParams { + query.Set(key, value) + } + request.URL.RawQuery = query.Encode() + + chiRouteContext := chi.NewRouteContext() + for key, value := range routeParams { + chiRouteContext.URLParams.Add(key, value) + } + ctx := context.WithValue( + context.WithValue(context.Background(), chi.RouteCtxKey, chiRouteContext), + &horizonContext.SessionContextKey, + session, + ) + + return request.WithContext(ctx) +} + +func TestGetURIParams(t *testing.T) { + tt := assert.New(t) + type QueryParams struct { + SellingBuyingAssetQueryParams `valid:"-"` + Account string `schema:"account_id" valid:"accountID"` + } + + expected := []string{ + "selling_asset_type", + "selling_asset_issuer", + "selling_asset_code", + "buying_asset_type", + "buying_asset_issuer", + "buying_asset_code", + "selling", + "buying", + "account_id", + } + + qp := QueryParams{} + tt.Equal(expected, getURIParams(&qp, false)) +} diff --git a/services/horizon/internal/actions/ledger.go b/services/horizon/internal/actions/ledger.go new file mode 100644 index 0000000000..37fddb5bd9 --- /dev/null +++ b/services/horizon/internal/actions/ledger.go @@ -0,0 +1,83 @@ +package actions + +import ( + "net/http" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/render/hal" +) + +type GetLedgersHandler struct { + LedgerState *ledger.State +} + +func (handler GetLedgersHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + + historyQ, err := context.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + var records []history.Ledger + if err = historyQ.Ledgers().Page(pq).Select(r.Context(), &records); err != nil { + return nil, err + } + + var result []hal.Pageable + for _, record := range records { + var ledger horizon.Ledger + resourceadapter.PopulateLedger(r.Context(), &ledger, record) + if err != nil { + return nil, err + } + result = append(result, ledger) + } + + return result, nil +} + +// LedgerByIDQuery query struct for the ledger/{id} endpoint +type LedgerByIDQuery struct { + LedgerID uint32 `schema:"ledger_id" valid:"-"` +} + +type GetLedgerByIDHandler struct { + LedgerState *ledger.State +} + +func (handler GetLedgerByIDHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + qp := LedgerByIDQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + if int32(qp.LedgerID) < handler.LedgerState.CurrentStatus().HistoryElder { + return nil, problem.BeforeHistory + } + historyQ, err := context.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + var ledger history.Ledger + err = historyQ.LedgerBySequence(r.Context(), &ledger, int32(qp.LedgerID)) + if err != nil { + return nil, err + } + var result horizon.Ledger + resourceadapter.PopulateLedger(r.Context(), &result, ledger) + return result, nil +} diff --git a/services/horizon/internal/actions/liquidity_pool.go b/services/horizon/internal/actions/liquidity_pool.go new file mode 100644 index 0000000000..bb1aedd882 --- /dev/null +++ b/services/horizon/internal/actions/liquidity_pool.go @@ -0,0 +1,170 @@ +package actions + +import ( + "context" + "net/http" + "strings" + + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// GetLiquidityPoolByIDHandler is the action handler for all end-points returning a liquidity pool. +type GetLiquidityPoolByIDHandler struct{} + +// LiquidityPoolQuery query struct for liquidity_pools/id endpoint +type LiquidityPoolQuery struct { + ID string `schema:"liquidity_pool_id" valid:"sha256"` +} + +// GetResource returns an claimable balance page. +func (handler GetLiquidityPoolByIDHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + qp := LiquidityPoolQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + cb, err := historyQ.FindLiquidityPoolByID(ctx, qp.ID) + if err != nil { + return nil, err + } + ledger := &history.Ledger{} + err = historyQ.LedgerBySequence(ctx, ledger, int32(cb.LastModifiedLedger)) + if historyQ.NoRows(err) { + ledger = nil + } else if err != nil { + return nil, errors.Wrap(err, "LedgerBySequence error") + } + + var resource protocol.LiquidityPool + err = resourceadapter.PopulateLiquidityPool(ctx, &resource, cb, ledger) + if err != nil { + return nil, err + } + + return resource, nil +} + +// LiquidityPoolsQuery query struct for liquidity_pools end-point +type LiquidityPoolsQuery struct { + Reserves string `schema:"reserves" valid:"optional"` + Account string `schema:"account" valid:"optional"` + + reserves []xdr.Asset +} + +// URITemplate returns a rfc6570 URI template the query struct +func (q LiquidityPoolsQuery) URITemplate() string { + return getURITemplate(&q, "liquidity_pools", true) +} + +// Validate validates and parses the query +func (q *LiquidityPoolsQuery) Validate() error { + assets := []xdr.Asset{} + reserves := strings.Split(q.Reserves, ",") + reservesErr := problem.MakeInvalidFieldProblem( + "reserves", + errors.New("Invalid reserves, should be comma-separated list of assets in canonical form"), + ) + for _, reserve := range reserves { + if reserve == "" { + continue + } + switch reserve { + case "native": + assets = append(assets, xdr.MustNewNativeAsset()) + default: + parts := strings.Split(reserve, ":") + if len(parts) != 2 { + return reservesErr + } + asset, err := xdr.NewCreditAsset(parts[0], parts[1]) + if err != nil { + return reservesErr + } + assets = append(assets, asset) + } + } + q.reserves = assets + return nil +} + +type GetLiquidityPoolsHandler struct { + LedgerState *ledger.State +} + +// GetResourcePage returns a page of liquidity pools. +func (handler GetLiquidityPoolsHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + ctx := r.Context() + qp := LiquidityPoolsQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + pq, err := GetPageQuery(handler.LedgerState, r, DisableCursorValidation) + if err != nil { + return nil, err + } + + query := history.LiquidityPoolsQuery{ + PageQuery: pq, + Account: qp.Account, + Assets: qp.reserves, + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + liquidityPools, err := handler.getLiquidityPoolsPage(ctx, historyQ, query) + if err != nil { + return nil, err + } + + return liquidityPools, nil +} + +func (handler GetLiquidityPoolsHandler) getLiquidityPoolsPage(ctx context.Context, historyQ *history.Q, query history.LiquidityPoolsQuery) ([]hal.Pageable, error) { + records, err := historyQ.GetLiquidityPools(ctx, query) + if err != nil { + return nil, err + } + + ledgerCache := history.LedgerCache{} + for _, record := range records { + ledgerCache.Queue(int32(record.LastModifiedLedger)) + } + if err := ledgerCache.Load(ctx, historyQ); err != nil { + return nil, errors.Wrap(err, "failed to load ledger batch") + } + + var liquidityPools []hal.Pageable + for _, record := range records { + var response protocol.LiquidityPool + + var ledger *history.Ledger + if l, ok := ledgerCache.Records[int32(record.LastModifiedLedger)]; ok { + ledger = &l + } + + resourceadapter.PopulateLiquidityPool(ctx, &response, record, ledger) + liquidityPools = append(liquidityPools, response) + } + + return liquidityPools, nil +} diff --git a/services/horizon/internal/actions/liquidity_pool_test.go b/services/horizon/internal/actions/liquidity_pool_test.go new file mode 100644 index 0000000000..1203db2137 --- /dev/null +++ b/services/horizon/internal/actions/liquidity_pool_test.go @@ -0,0 +1,170 @@ +package actions + +import ( + "fmt" + "net/http/httptest" + "testing" + + "github.com/stellar/go/keypair" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestGetLiquidityPoolByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + lp := history.MakeTestPool(xdr.MustNewNativeAsset(), 100, usdAsset, 200) + err := q.UpsertLiquidityPools(tt.Ctx, []history.LiquidityPool{lp}) + tt.Assert.NoError(err) + + handler := GetLiquidityPoolByIDHandler{} + response, err := handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"liquidity_pool_id": lp.PoolID}, + q, + )) + tt.Assert.NoError(err) + + resource := response.(protocol.LiquidityPool) + tt.Assert.Equal(lp.PoolID, resource.ID) + tt.Assert.Equal("constant_product", resource.Type) + tt.Assert.Equal(uint32(30), resource.FeeBP) + tt.Assert.Equal(uint64(12345), resource.TotalTrustlines) + tt.Assert.Equal("0.0067890", resource.TotalShares) + tt.Assert.Equal("native", resource.Reserves[0].Asset) + tt.Assert.Equal("0.0000100", resource.Reserves[0].Amount) + + tt.Assert.Equal(usdAsset.StringCanonical(), resource.Reserves[1].Asset) + tt.Assert.Equal("0.0000200", resource.Reserves[1].Amount) + + // try to fetch pool which does not exist + _, err = handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"liquidity_pool_id": "123816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, + q, + )) + tt.Assert.Error(err) + tt.Assert.True(q.NoRows(errors.Cause(err))) + + // try to fetch a random invalid hex id + _, err = handler.GetResource(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{"liquidity_pool_id": "0000001112122"}, + q, + )) + tt.Assert.Error(err) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("liquidity_pool_id", p.Extras["invalid_field"]) + tt.Assert.Equal("0000001112122 does not validate as sha256", p.Extras["reason"]) +} + +func TestGetLiquidityPools(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + lp1 := history.MakeTestPool(nativeAsset, 100, usdAsset, 200) + lp2 := history.MakeTestPool(eurAsset, 300, usdAsset, 400) + err := q.UpsertLiquidityPools(tt.Ctx, []history.LiquidityPool{lp1, lp2}) + tt.Assert.NoError(err) + + handler := GetLiquidityPoolsHandler{} + response, err := handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{}, + map[string]string{}, + q, + )) + tt.Assert.NoError(err) + tt.Assert.Len(response, 2) + + resource := response[0].(protocol.LiquidityPool) + tt.Assert.Equal(lp1.PoolID, resource.ID) + tt.Assert.Equal("constant_product", resource.Type) + tt.Assert.Equal(uint32(30), resource.FeeBP) + tt.Assert.Equal(uint64(12345), resource.TotalTrustlines) + tt.Assert.Equal("0.0067890", resource.TotalShares) + + tt.Assert.Equal("native", resource.Reserves[0].Asset) + tt.Assert.Equal("0.0000100", resource.Reserves[0].Amount) + + tt.Assert.Equal(usdAsset.StringCanonical(), resource.Reserves[1].Asset) + tt.Assert.Equal("0.0000200", resource.Reserves[1].Amount) + + resource = response[1].(protocol.LiquidityPool) + tt.Assert.Equal(lp2.PoolID, resource.ID) + tt.Assert.Equal("constant_product", resource.Type) + tt.Assert.Equal(uint32(30), resource.FeeBP) + tt.Assert.Equal(uint64(12345), resource.TotalTrustlines) + tt.Assert.Equal("0.0067890", resource.TotalShares) + + tt.Assert.Equal(eurAsset.StringCanonical(), resource.Reserves[0].Asset) + tt.Assert.Equal("0.0000300", resource.Reserves[0].Amount) + + tt.Assert.Equal(usdAsset.StringCanonical(), resource.Reserves[1].Asset) + tt.Assert.Equal("0.0000400", resource.Reserves[1].Amount) + + t.Run("filtering by reserves", func(t *testing.T) { + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{"reserves": "native"}, + map[string]string{}, + q, + )) + assert.NoError(t, err) + assert.Len(t, response, 1) + }) + + t.Run("paging via cursor", func(t *testing.T) { + response, err = handler.GetResourcePage(httptest.NewRecorder(), makeRequest( + t, + map[string]string{"cursor": lp1.PoolID}, + map[string]string{}, + q, + )) + assert.NoError(t, err) + assert.Len(t, response, 1) + resource = response[0].(protocol.LiquidityPool) + assert.Equal(t, lp2.PoolID, resource.ID) + }) + + t.Run("filtering by participating account", func(t *testing.T) { + // we need to add trustlines to filter by account + accountId := keypair.MustRandom().Address() + assert.NoError(t, q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + history.MakeTestTrustline(accountId, nativeAsset, ""), + history.MakeTestTrustline(accountId, eurAsset, ""), + history.MakeTestTrustline(accountId, xdr.Asset{}, lp1.PoolID), + })) + + request := makeRequest( + t, + map[string]string{"account": accountId}, + map[string]string{}, + q, + ) + assert.Contains(t, request.URL.String(), fmt.Sprintf("account=%s", accountId)) + + handler := GetLiquidityPoolsHandler{} + response, err := handler.GetResourcePage(httptest.NewRecorder(), request) + assert.NoError(t, err) + assert.Len(t, response, 1) + + assert.IsType(t, protocol.LiquidityPool{}, response[0]) + resource = response[0].(protocol.LiquidityPool) + assert.Equal(t, lp1.PoolID, resource.ID) + }) +} diff --git a/services/horizon/internal/actions/main.go b/services/horizon/internal/actions/main.go new file mode 100644 index 0000000000..efdca37256 --- /dev/null +++ b/services/horizon/internal/actions/main.go @@ -0,0 +1,7 @@ +package actions + +import "github.com/stellar/go/services/horizon/internal/corestate" + +type CoreStateGetter interface { + GetCoreState() corestate.State +} diff --git a/services/horizon/internal/actions/offer.go b/services/horizon/internal/actions/offer.go new file mode 100644 index 0000000000..c5de593c4b --- /dev/null +++ b/services/horizon/internal/actions/offer.go @@ -0,0 +1,213 @@ +package actions + +import ( + "context" + "net/http" + + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" +) + +// AccountOffersQuery query struct for offers end-point +type OfferByIDQuery struct { + OfferID uint64 `schema:"offer_id" valid:"-"` +} + +// GetOfferByID is the action handler for the /offers/{id} endpoint +type GetOfferByID struct{} + +// GetResource returns an offer by id. +func (handler GetOfferByID) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + + qp := OfferByIDQuery{} + if err := getParams(&qp, r); err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + record, err := historyQ.GetOfferByID(r.Context(), int64(qp.OfferID)) + if err != nil { + return nil, err + } + + ledger := &history.Ledger{} + err = historyQ.LedgerBySequence( + r.Context(), + ledger, + int32(record.LastModifiedLedger), + ) + if historyQ.NoRows(err) { + ledger = nil + } else if err != nil { + return nil, err + } + + var offerResponse horizon.Offer + resourceadapter.PopulateOffer(ctx, &offerResponse, record, ledger) + return offerResponse, nil +} + +// OffersQuery query struct for offers end-point +type OffersQuery struct { + SellingBuyingAssetQueryParams `valid:"-"` + Seller string `schema:"seller" valid:"accountID,optional"` + Sponsor string `schema:"sponsor" valid:"accountID,optional"` +} + +// URITemplate returns a rfc6570 URI template the query struct +func (q OffersQuery) URITemplate() string { + // building this manually since we don't want to include all the params in SellingBuyingAssetQueryParams + return "/offers{?selling,buying,seller,sponsor,cursor,limit,order}" +} + +// Validate runs custom validations. +func (q OffersQuery) Validate() error { + return q.SellingBuyingAssetQueryParams.Validate() +} + +// GetOffersHandler is the action handler for the /offers endpoint +type GetOffersHandler struct { + LedgerState *ledger.State +} + +// GetResourcePage returns a page of offers. +func (handler GetOffersHandler) GetResourcePage( + w HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + ctx := r.Context() + qp := OffersQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + selling, err := qp.Selling() + if err != nil { + return nil, err + } + buying, err := qp.Buying() + if err != nil { + return nil, err + } + + query := history.OffersQuery{ + PageQuery: pq, + SellerID: qp.Seller, + Sponsor: qp.Sponsor, + Selling: selling, + Buying: buying, + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + offers, err := getOffersPage(ctx, historyQ, query) + if err != nil { + return nil, err + } + + return offers, nil +} + +// AccountOffersQuery query struct for offers end-point +type AccountOffersQuery struct { + AccountID string `schema:"account_id" valid:"accountID,required"` +} + +// GetAccountOffersHandler is the action handler for the +// `/accounts/{account_id}/offers` endpoint when using experimental ingestion. +type GetAccountOffersHandler struct { + LedgerState *ledger.State +} + +func (handler GetAccountOffersHandler) parseOffersQuery(r *http.Request) (history.OffersQuery, error) { + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return history.OffersQuery{}, err + } + + qp := AccountOffersQuery{} + if err = getParams(&qp, r); err != nil { + return history.OffersQuery{}, err + } + + query := history.OffersQuery{ + PageQuery: pq, + SellerID: qp.AccountID, + } + + return query, nil +} + +// GetResourcePage returns a page of offers for a given account. +func (handler GetAccountOffersHandler) GetResourcePage( + w HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + ctx := r.Context() + query, err := handler.parseOffersQuery(r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + offers, err := getOffersPage(ctx, historyQ, query) + if err != nil { + return nil, err + } + + return offers, nil +} + +func getOffersPage(ctx context.Context, historyQ *history.Q, query history.OffersQuery) ([]hal.Pageable, error) { + records, err := historyQ.GetOffers(ctx, query) + if err != nil { + return nil, err + } + + ledgerCache := history.LedgerCache{} + for _, record := range records { + ledgerCache.Queue(int32(record.LastModifiedLedger)) + } + + if err := ledgerCache.Load(ctx, historyQ); err != nil { + return nil, errors.Wrap(err, "failed to load ledger batch") + } + + var offers []hal.Pageable + for _, record := range records { + var offerResponse horizon.Offer + + var ledger *history.Ledger + if l, ok := ledgerCache.Records[int32(record.LastModifiedLedger)]; ok { + ledger = &l + } + + resourceadapter.PopulateOffer(ctx, &offerResponse, record, ledger) + offers = append(offers, offerResponse) + } + + return offers, nil +} diff --git a/services/horizon/internal/actions/offer_test.go b/services/horizon/internal/actions/offer_test.go new file mode 100644 index 0000000000..41663c65d5 --- /dev/null +++ b/services/horizon/internal/actions/offer_test.go @@ -0,0 +1,516 @@ +package actions + +import ( + "database/sql" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/guregu/null" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +var ( + issuer = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + seller = xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + + nativeAsset = xdr.MustNewNativeAsset() + usdAsset = xdr.MustNewCreditAsset("USD", issuer.Address()) + eurAsset = xdr.MustNewCreditAsset("EUR", issuer.Address()) + + eurOffer = history.Offer{ + SellerID: issuer.Address(), + OfferID: int64(4), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(1), + Priced: int32(1), + Price: float64(1), + Flags: 1, + LastModifiedLedger: uint32(3), + } + twoEurOffer = history.Offer{ + SellerID: seller.Address(), + OfferID: int64(5), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(4), + Sponsor: null.StringFrom(sponsor), + } + usdOffer = history.Offer{ + SellerID: issuer.Address(), + OfferID: int64(6), + + BuyingAsset: usdAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(1), + Priced: int32(1), + Price: float64(1), + Flags: 1, + LastModifiedLedger: uint32(4), + } +) + +func TestGetOfferByIDHandler(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := GetOfferByID{} + + ledgerCloseTime := time.Now().Unix() + _, err := q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 3, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + tt.Assert.NoError(err) + + err = q.UpsertOffers(tt.Ctx, []history.Offer{eurOffer, usdOffer}) + tt.Assert.NoError(err) + + for _, testCase := range []struct { + name string + request *http.Request + expectedError func(error) + expectedOffer func(interface{}) + }{ + { + "offer id is invalid", + makeRequest( + t, map[string]string{}, map[string]string{"offer_id": "invalid"}, q, + ), + func(err error) { + tt.Assert.Error(err) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("offer_id", p.Extras["invalid_field"]) + tt.Assert.Equal("Offer ID must be an integer higher than 0", p.Extras["reason"]) + }, + func(response interface{}) { + tt.Assert.Nil(response) + }, + }, + { + "offer does not exist", + makeRequest( + t, map[string]string{}, map[string]string{"offer_id": "1234567"}, q, + ), + func(err error) { + tt.Assert.Equal(err, sql.ErrNoRows) + }, + func(response interface{}) { + tt.Assert.Nil(response) + }, + }, + { + "offer with ledger close time", + makeRequest( + t, map[string]string{}, map[string]string{"offer_id": "4"}, q, + ), + func(err error) { + tt.Assert.NoError(err) + }, + func(response interface{}) { + offer := response.(horizon.Offer) + tt.Assert.Equal(int64(eurOffer.OfferID), offer.ID) + tt.Assert.Equal("native", offer.Selling.Type) + tt.Assert.Equal("credit_alphanum4", offer.Buying.Type) + tt.Assert.Equal("EUR", offer.Buying.Code) + tt.Assert.Equal(issuer.Address(), offer.Seller) + tt.Assert.Equal(issuer.Address(), offer.Buying.Issuer) + tt.Assert.Equal(int32(3), offer.LastModifiedLedger) + tt.Assert.Equal(ledgerCloseTime, offer.LastModifiedTime.Unix()) + }, + }, + { + "offer without ledger close time", + makeRequest( + t, map[string]string{}, map[string]string{"offer_id": "6"}, q, + ), + func(err error) { + tt.Assert.NoError(err) + }, + func(response interface{}) { + offer := response.(horizon.Offer) + tt.Assert.Equal(int64(usdOffer.OfferID), offer.ID) + tt.Assert.Equal("credit_alphanum4", offer.Selling.Type) + tt.Assert.Equal("EUR", offer.Selling.Code) + tt.Assert.Equal("credit_alphanum4", offer.Buying.Type) + tt.Assert.Equal("USD", offer.Buying.Code) + tt.Assert.Equal(issuer.Address(), offer.Seller) + tt.Assert.Equal(issuer.Address(), offer.Selling.Issuer) + tt.Assert.Equal(issuer.Address(), offer.Buying.Issuer) + tt.Assert.Equal(int32(4), offer.LastModifiedLedger) + tt.Assert.Nil(offer.LastModifiedTime) + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + offer, err := handler.GetResource(httptest.NewRecorder(), testCase.request) + testCase.expectedError(err) + testCase.expectedOffer(offer) + }) + } +} + +func TestGetOffersHandler(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + handler := GetOffersHandler{} + + ledgerCloseTime := time.Now().Unix() + _, err := q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 3, + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + tt.Assert.NoError(err) + + err = q.UpsertOffers(tt.Ctx, []history.Offer{eurOffer, twoEurOffer, usdOffer}) + tt.Assert.NoError(err) + + t.Run("No filter", func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 3) + + offers := pageableToOffers(t, records) + + tt.Assert.Equal(int64(eurOffer.OfferID), offers[0].ID) + tt.Assert.Equal("native", offers[0].Selling.Type) + tt.Assert.Equal("credit_alphanum4", offers[0].Buying.Type) + tt.Assert.Equal(issuer.Address(), offers[0].Seller) + tt.Assert.Equal(issuer.Address(), offers[0].Buying.Issuer) + tt.Assert.Equal(int32(3), offers[0].LastModifiedLedger) + tt.Assert.Equal(ledgerCloseTime, offers[0].LastModifiedTime.Unix()) + }) + + t.Run("Filter by seller", func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "seller": issuer.Address(), + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + offers := pageableToOffers(t, records) + for _, offer := range offers { + tt.Assert.Equal(issuer.Address(), offer.Seller) + } + + _, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "seller": "GCXEWJ6U4KPGTNTBY5HX4WQ2EEVPWV2QKXEYIQ32IDYIX", + }, + map[string]string{}, + q, + ), + ) + tt.Assert.Error(err) + tt.Assert.IsType(&problem.P{}, err) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("seller", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Account ID must start with `G` and contain 56 alphanum characters", + p.Extras["reason"], + ) + }) + + t.Run("Filter by sponsor", func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "sponsor": sponsor, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + offers := pageableToOffers(t, records) + tt.Assert.Equal(int64(twoEurOffer.OfferID), offers[0].ID) + + _, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "sponsor": "GCXEWJ6U4KPGTNTBY5HX4WQ2EEVPWV2QKXEYIQ32IDYIX", + }, + map[string]string{}, + q, + ), + ) + tt.Assert.Error(err) + tt.Assert.IsType(&problem.P{}, err) + p := err.(*problem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("sponsor", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Account ID must start with `G` and contain 56 alphanum characters", + p.Extras["reason"], + ) + }) + + t.Run("Filter by selling asset", func(t *testing.T) { + asset := horizon.Asset{} + nativeAsset.Extract(&asset.Type, &asset.Code, &asset.Issuer) + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "selling_asset_type": asset.Type, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + offers := pageableToOffers(t, records) + for _, offer := range offers { + tt.Assert.Equal(asset, offer.Selling) + } + + asset = horizon.Asset{} + eurAsset.Extract(&asset.Type, &asset.Code, &asset.Issuer) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "selling_asset_type": asset.Type, + "selling_asset_code": asset.Code, + "selling_asset_issuer": asset.Issuer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + offers = pageableToOffers(t, records) + tt.Assert.Equal(asset, offers[0].Selling) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "selling": asset.Code + ":" + asset.Issuer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + offers = pageableToOffers(t, records) + tt.Assert.Equal(asset, offers[0].Selling) + }) + + t.Run("Filter by buying asset", func(t *testing.T) { + asset := horizon.Asset{} + eurAsset.Extract(&asset.Type, &asset.Code, &asset.Issuer) + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "buying_asset_type": asset.Type, + "buying_asset_code": asset.Code, + "buying_asset_issuer": asset.Issuer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + offers := pageableToOffers(t, records) + for _, offer := range offers { + tt.Assert.Equal(asset, offer.Buying) + } + + asset = horizon.Asset{} + usdAsset.Extract(&asset.Type, &asset.Code, &asset.Issuer) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "buying_asset_type": asset.Type, + "buying_asset_code": asset.Code, + "buying_asset_issuer": asset.Issuer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + offers = pageableToOffers(t, records) + for _, offer := range offers { + tt.Assert.Equal(asset, offer.Buying) + } + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "buying": asset.Code + ":" + asset.Issuer, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + offers = pageableToOffers(t, records) + for _, offer := range offers { + tt.Assert.Equal(asset, offer.Buying) + } + }) + + t.Run("Wrong buying query parameter", func(t *testing.T) { + asset := horizon.Asset{} + eurAsset.Extract(&asset.Type, &asset.Code, &asset.Issuer) + + _, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{ + "buying": `native\\u0026cursor=\\u0026limit=10\\u0026order=asc\\u0026selling=BTC:GAEDZ7BHMDYEMU6IJT3CTTGDUSLZWS5CQWZHGP4XUOIDG5ISH3AFAEK2`, + }, + map[string]string{}, + q, + ), + ) + tt.Assert.Error(err) + p, ok := err.(*problem.P) + if tt.Assert.True(ok) { + tt.Assert.Equal(400, p.Status) + tt.Assert.NotNil(p.Extras) + tt.Assert.Equal(p.Extras["invalid_field"], "buying") + tt.Assert.Equal(p.Extras["reason"], "Asset code length is invalid") + } + }) +} + +func TestGetAccountOffersHandler(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + handler := GetAccountOffersHandler{} + + err := q.UpsertOffers(tt.Ctx, []history.Offer{eurOffer, twoEurOffer, usdOffer}) + tt.Assert.NoError(err) + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{}, + map[string]string{"account_id": issuer.Address()}, + q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + offers := pageableToOffers(t, records) + + for _, offer := range offers { + tt.Assert.Equal(issuer.Address(), offer.Seller) + } + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, + map[string]string{}, + map[string]string{}, + q, + ), + ) + tt.Assert.Error(err) +} + +func pageableToOffers(t *testing.T, page []hal.Pageable) []horizon.Offer { + var offers []horizon.Offer + for _, entry := range page { + offers = append(offers, entry.(horizon.Offer)) + } + return offers +} + +func TestOffersQueryURLTemplate(t *testing.T) { + tt := assert.New(t) + expected := "/offers{?selling,buying,seller,sponsor,cursor,limit,order}" + offersQuery := OffersQuery{} + tt.Equal(expected, offersQuery.URITemplate()) +} diff --git a/services/horizon/internal/actions/operation.go b/services/horizon/internal/actions/operation.go new file mode 100644 index 0000000000..8a3451595d --- /dev/null +++ b/services/horizon/internal/actions/operation.go @@ -0,0 +1,227 @@ +package actions + +import ( + "context" + "fmt" + "net/http" + + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + supportProblem "github.com/stellar/go/support/render/problem" +) + +// Joinable query struct for join query parameter +type Joinable struct { + Join string `schema:"join" valid:"in(transactions)~Accepted values: transactions,optional"` +} + +// IncludeTransactions returns extra fields to include in the response +func (qp Joinable) IncludeTransactions() bool { + return qp.Join == "transactions" +} + +// OperationsQuery query struct for operations end-points +type OperationsQuery struct { + Joinable `valid:"optional"` + AccountID string `schema:"account_id" valid:"accountID,optional"` + ClaimableBalanceID string `schema:"claimable_balance_id" valid:"claimableBalanceID,optional"` + LiquidityPoolID string `schema:"liquidity_pool_id" valid:"sha256,optional"` + TransactionHash string `schema:"tx_id" valid:"transactionHash,optional"` + IncludeFailedTransactions bool `schema:"include_failed" valid:"-"` + LedgerID uint32 `schema:"ledger_id" valid:"-"` +} + +// Validate runs extra validations on query parameters +func (qp OperationsQuery) Validate() error { + filters, err := countNonEmpty( + qp.AccountID, + qp.ClaimableBalanceID, + qp.LiquidityPoolID, + qp.LedgerID, + qp.TransactionHash, + ) + + if err != nil { + return supportProblem.BadRequest + } + + if filters > 1 { + return supportProblem.MakeInvalidFieldProblem( + "filters", + errors.New("Use a single filter for operations, you can only use one of tx_id, account_id or ledger_id"), + ) + } + + return nil +} + +// GetOperationsHandler is the action handler for all end-points returning a list of operations. +type GetOperationsHandler struct { + LedgerState *ledger.State + OnlyPayments bool +} + +// GetResourcePage returns a page of operations. +func (handler GetOperationsHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + ctx := r.Context() + + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + + qp := OperationsQuery{} + err = getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + query := historyQ.Operations() + + switch { + case qp.AccountID != "": + query.ForAccount(ctx, qp.AccountID) + case qp.ClaimableBalanceID != "": + query.ForClaimableBalance(ctx, qp.ClaimableBalanceID) + case qp.LiquidityPoolID != "": + query.ForLiquidityPool(ctx, qp.LiquidityPoolID) + case qp.LedgerID > 0: + query.ForLedger(ctx, int32(qp.LedgerID)) + case qp.TransactionHash != "": + query.ForTransaction(ctx, qp.TransactionHash) + } + // When querying operations for transaction return both successful + // and failed operations. We assume that because the user is querying + // this specific transactions, they knows its status. + if qp.TransactionHash != "" || qp.IncludeFailedTransactions { + query.IncludeFailed() + } + + if qp.IncludeTransactions() { + query.IncludeTransactions() + } + + if handler.OnlyPayments { + query.OnlyPayments() + } + + ops, txs, err := query.Page(pq).Fetch(ctx) + if err != nil { + return nil, err + } + + return buildOperationsPage(ctx, historyQ, ops, txs, qp.IncludeTransactions()) +} + +// GetOperationByIDHandler is the action handler for all end-points returning a list of operations. +type GetOperationByIDHandler struct { + LedgerState *ledger.State +} + +// OperationQuery query struct for operation/id end-point +type OperationQuery struct { + LedgerState *ledger.State `valid:"-"` + Joinable `valid:"optional"` + ID uint64 `schema:"id" valid:"-"` +} + +// Validate runs extra validations on query parameters +func (qp OperationQuery) Validate() error { + parsed := toid.Parse(int64(qp.ID)) + if parsed.LedgerSequence < qp.LedgerState.CurrentStatus().HistoryElder { + return problem.BeforeHistory + } + return nil +} + +// GetResource returns an operation page. +func (handler GetOperationByIDHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + qp := OperationQuery{ + LedgerState: handler.LedgerState, + } + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + op, tx, err := historyQ.OperationByID(ctx, qp.IncludeTransactions(), int64(qp.ID)) + if err != nil { + return nil, err + } + + var ledger history.Ledger + err = historyQ.LedgerBySequence(ctx, &ledger, op.LedgerSequence()) + if err != nil { + return nil, err + } + + return resourceadapter.NewOperation( + ctx, + op, + op.TransactionHash, + tx, + ledger, + ) +} + +func buildOperationsPage(ctx context.Context, historyQ *history.Q, operations []history.Operation, transactions []history.Transaction, includeTransactions bool) ([]hal.Pageable, error) { + ledgerCache := history.LedgerCache{} + for _, record := range operations { + ledgerCache.Queue(record.LedgerSequence()) + } + + if err := ledgerCache.Load(ctx, historyQ); err != nil { + return nil, errors.Wrap(err, "failed to load ledger batch") + } + + var response []hal.Pageable + for i, operationRecord := range operations { + ledger, found := ledgerCache.Records[operationRecord.LedgerSequence()] + if !found { + msg := fmt.Sprintf("could not find ledger data for sequence %d", operationRecord.LedgerSequence()) + return nil, errors.New(msg) + } + + var transactionRecord *history.Transaction + + if includeTransactions { + transactionRecord = &transactions[i] + } + + var res hal.Pageable + res, err := resourceadapter.NewOperation( + ctx, + operationRecord, + operationRecord.TransactionHash, + transactionRecord, + ledger, + ) + if err != nil { + return nil, err + } + response = append(response, res) + } + + return response, nil +} diff --git a/services/horizon/internal/actions/operation_test.go b/services/horizon/internal/actions/operation_test.go new file mode 100644 index 0000000000..9144f5004c --- /dev/null +++ b/services/horizon/internal/actions/operation_test.go @@ -0,0 +1,697 @@ +package actions + +import ( + "database/sql" + "fmt" + "net/http/httptest" + "testing" + "time" + + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/test" + supportProblem "github.com/stellar/go/support/render/problem" +) + +func TestGetOperationsWithoutFilter(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 4) +} + +func TestGetOperationsExclusiveFilters(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + testCases := []struct { + desc string + query map[string]string + }{ + { + desc: "tx_id & ledger_id", + query: map[string]string{ + "tx_id": "1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292", + "ledger_id": "1", + }, + }, + { + desc: "tx_id & account_id", + query: map[string]string{ + "tx_id": "1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292", + "account_id": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + }, + }, + { + desc: "account_id & ledger_id", + query: map[string]string{ + "account_id": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "ledger_id": "1", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + _, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, tc.query, map[string]string{}, q, + ), + ) + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("filters", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Use a single filter for operations, you can only use one of tx_id, account_id or ledger_id", + p.Extras["reason"], + ) + }) + } + +} + +func TestGetOperationsByLiquidityPool(t *testing.T) { + +} + +func TestGetOperationsFilterByAccountID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + testCases := []struct { + accountID string + expected int + }{ + { + accountID: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + expected: 3, + }, + { + accountID: "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + expected: 1, + }, + { + accountID: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + expected: 2, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("account %s operations", tc.accountID), func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": tc.accountID, + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, tc.expected) + }) + } +} + +func TestGetOperationsFilterByTxID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + testCases := []struct { + desc string + transactionID string + expected int + expectedErr string + notFound bool + }{ + { + desc: "operations for 2374...6d4d", + transactionID: "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", + expected: 1, + }, + { + desc: "operations for 164a...33b6", + transactionID: "164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6", + expected: 1, + }, + { + desc: "missing transaction", + transactionID: "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedErr: "sql: no rows in result set", + notFound: true, + }, + { + desc: "uppercase tx hash not accepted", + transactionID: "2374E99349B9EF7DBA9A5DB3339B78FDA8F34777B1AF33BA468AD5C0DF946D4D", + expectedErr: "Transaction hash must be a hex-encoded, lowercase SHA-256 hash", + }, + { + desc: "badly formated tx hash not accepted", + transactionID: "%00%1E4%5E%EF%BF%BD%EF%BF%BD%EF%BF%BDpVP%EF%BF%BDI&R%0BK%EF%BF%BD%1D%EF%BF%BD%EF%BF%BD=%EF%BF%BD%3F%23%EF%BF%BD%EF%BF%BDl%EF%BF%BD%1El%EF%BF%BD%EF%BF%BD", + expectedErr: "Transaction hash must be a hex-encoded, lowercase SHA-256 hash", + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf(tc.desc), func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": tc.transactionID, + }, map[string]string{}, q, + ), + ) + + if tc.expectedErr == "" { + tt.Assert.NoError(err) + tt.Assert.Len(records, tc.expected) + } else { + if tc.notFound { + tt.Assert.EqualError(err, tc.expectedErr) + } else { + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("tx_id", p.Extras["invalid_field"]) + tt.Assert.Equal( + tc.expectedErr, + p.Extras["reason"], + ) + } + } + }) + } +} + +func TestGetOperationsIncludeFailed(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("failed_transactions") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "limit": "200", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + + successful := 0 + failed := 0 + + for _, record := range records { + op := record.(operations.Operation) + if op.IsTransactionSuccessful() { + successful++ + } else { + failed++ + } + } + + tt.Assert.Equal(8, successful) + tt.Assert.Equal(0, failed) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "include_failed": "true", + "limit": "200", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + + successful = 0 + failed = 0 + + for _, record := range records { + op := record.(operations.Operation) + if op.IsTransactionSuccessful() { + successful++ + } else { + failed++ + } + } + + tt.Assert.Equal(8, successful) + tt.Assert.Equal(1, failed) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + for _, record := range records { + op := record.(operations.Operation) + tt.Assert.False(op.IsTransactionSuccessful()) + } + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + for _, record := range records { + op := record.(operations.Operation) + tt.Assert.True(op.IsTransactionSuccessful()) + } + + // NULL value + _, err = tt.HorizonSession().ExecRaw(tt.Ctx, + `UPDATE history_transactions SET successful = NULL WHERE transaction_hash = ?`, + "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + ) + tt.Assert.NoError(err) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + for _, record := range records { + op := record.(operations.Operation) + tt.Assert.True(op.IsTransactionSuccessful()) + } + + _, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "include_failed": "foo", + }, map[string]string{}, q, + ), + ) + tt.Assert.Error(err) + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("include_failed", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Filter should be true or false", + p.Extras["reason"], + ) +} + +func TestGetOperationsFilterByLedgerID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + testCases := []struct { + ledgerID string + expected int + expectedErr string + notFound bool + }{ + { + ledgerID: "1", + expected: 0, + }, + { + ledgerID: "2", + expected: 3, + }, + { + ledgerID: "3", + expected: 1, + }, + { + ledgerID: "10000", + expectedErr: "sql: no rows in result set", + notFound: true, + }, + { + ledgerID: "-1", + expectedErr: "Ledger ID must be an integer higher than 0", + }, + { + ledgerID: "one", + expectedErr: "Ledger ID must be an integer higher than 0", + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("ledger %s operations", tc.ledgerID), func(t *testing.T) { + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": tc.ledgerID, + }, map[string]string{}, q, + ), + ) + if tc.expectedErr == "" { + tt.Assert.NoError(err) + tt.Assert.Len(records, tc.expected) + } else { + if tc.notFound { + tt.Assert.EqualError(err, tc.expectedErr) + } else { + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("ledger_id", p.Extras["invalid_field"]) + tt.Assert.Equal( + tc.expectedErr, + p.Extras["reason"], + ) + } + } + }) + } +} + +func TestGetOperationsOnlyPayments(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{ + OnlyPayments: true, + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 4) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 0) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "3", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + tt.Scenario("pathed_payment") + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": "b52f16ffb98c047e33b9c2ec30880330cde71f85b3443dae2c5cb86c7d4d8452", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 0) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "tx_id": "1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + record := records[0].(operations.PathPayment) + tt.Assert.Equal("10.0000000", record.SourceAmount) +} + +func TestOperation_CreatedAt(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "3", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + + l := history.Ledger{} + tt.Assert.NoError(q.LedgerBySequence(tt.Ctx, &l, 3)) + + record := records[0].(operations.Payment) + + tt.Assert.WithinDuration(l.ClosedAt, record.LedgerCloseTime, 1*time.Second) +} +func TestGetOperationsPagination(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("base") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{ + LedgerState: &ledger.State{}, + } + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "order": "asc", + "limit": "1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + descRecords, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "limit": "1", + "order": "desc", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.NotEqual(records, descRecords) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "order": "desc", + "cursor": "12884905985", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 3) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "order": "desc", + "cursor": "0", + }, map[string]string{}, q, + ), + ) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "problem: before_history") +} + +func TestGetOperations_IncludeTransactions(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("failed_transactions") + + q := &history.Q{tt.HorizonSession()} + handler := GetOperationsHandler{} + + _, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "join": "accounts", + }, map[string]string{}, q, + ), + ) + tt.Assert.Error(err) + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("join", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Accepted values: transactions", + p.Extras["reason"], + ) + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "join": "transactions", + "limit": "1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + for _, record := range records { + op := record.(operations.CreateAccount) + tt.Assert.NotNil(op.Transaction) + } + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "limit": "1", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + for _, record := range records { + op := record.(operations.CreateAccount) + tt.Assert.Nil(op.Transaction) + } +} +func TestGetOperation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + handler := GetOperationByIDHandler{ + LedgerState: &ledger.State{}, + } + handler.LedgerState.SetStatus(tt.Scenario("base")) + + record, err := handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{"id": "8589938689"}, tt.HorizonSession(), + ), + ) + tt.Assert.NoError(err) + op := record.(operations.Operation) + tt.Assert.Equal("8589938689", op.PagingToken()) + tt.Assert.Equal("2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", op.GetTransactionHash()) + + _, err = handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{"id": "9589938689"}, tt.HorizonSession(), + ), + ) + + tt.Assert.Equal(err, sql.ErrNoRows) + + _, err = handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{"id": "0"}, tt.HorizonSession(), + ), + ) + tt.Assert.Equal(err, problem.BeforeHistory) +} + +func TestOperation_IncludeTransaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + tt.Scenario("kahuna") + + handler := GetOperationByIDHandler{ + LedgerState: &ledger.State{}, + } + record, err := handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{"id": "261993009153"}, tt.HorizonSession(), + ), + ) + + tt.Assert.NoError(err) + + op := record.(operations.BumpSequence) + tt.Assert.Nil(op.Transaction) + + record, err = handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{"join": "transactions"}, map[string]string{"id": "261993009153"}, tt.HorizonSession(), + ), + ) + op = record.(operations.BumpSequence) + tt.Assert.NotNil(op.Transaction) + tt.Assert.Equal(op.TransactionHash, op.Transaction.ID) +} diff --git a/services/horizon/internal/actions/orderbook.go b/services/horizon/internal/actions/orderbook.go new file mode 100644 index 0000000000..c00d5ceae4 --- /dev/null +++ b/services/horizon/internal/actions/orderbook.go @@ -0,0 +1,119 @@ +package actions + +import ( + "net/http" + + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/render/problem" +) + +// StreamableObjectResponse is an interface for objects returned by streamable object endpoints +// A streamable object endpoint is an SSE endpoint which returns a single JSON object response +// instead of a page of items. +type StreamableObjectResponse interface { + Equals(other StreamableObjectResponse) bool +} + +// OrderBookResponse is the response for the /order_book endpoint +// OrderBookResponse implements StreamableObjectResponse +type OrderBookResponse struct { + protocol.OrderBookSummary +} + +func priceLevelsEqual(a, b []protocol.PriceLevel) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} + +// Equals returns true if the OrderBookResponse is equal to `other` +func (o OrderBookResponse) Equals(other StreamableObjectResponse) bool { + otherOrderBook, ok := other.(OrderBookResponse) + if !ok { + return false + } + return otherOrderBook.Selling == o.Selling && + otherOrderBook.Buying == o.Buying && + priceLevelsEqual(otherOrderBook.Bids, o.Bids) && + priceLevelsEqual(otherOrderBook.Asks, o.Asks) +} + +var invalidOrderBook = problem.P{ + Type: "invalid_order_book", + Title: "Invalid Order Book Parameters", + Status: http.StatusBadRequest, + Detail: "The parameters that specify what order book to view are invalid in some way. " + + "Please ensure that your type parameters (selling_asset_type and buying_asset_type) are one the " + + "following valid values: native, credit_alphanum4, credit_alphanum12. Also ensure that you " + + "have specified selling_asset_code and selling_asset_issuer if selling_asset_type is not 'native', as well " + + "as buying_asset_code and buying_asset_issuer if buying_asset_type is not 'native'", +} + +// GetOrderbookHandler is the action handler for the /order_book endpoint +type GetOrderbookHandler struct { +} + +func convertPriceLevels(src []history.PriceLevel) []protocol.PriceLevel { + result := make([]protocol.PriceLevel, len(src)) + for i, l := range src { + result[i] = protocol.PriceLevel{ + PriceR: protocol.Price{ + N: l.Pricen, + D: l.Priced, + }, + Price: l.Pricef, + Amount: l.Amount, + } + } + + return result +} + +// GetResource implements the /order_book endpoint +func (handler GetOrderbookHandler) GetResource(w HeaderWriter, r *http.Request) (StreamableObjectResponse, error) { + selling, err := getAsset(r, "selling_") + if err != nil { + return nil, invalidOrderBook + } + buying, err := getAsset(r, "buying_") + if err != nil { + return nil, invalidOrderBook + } + limit, err := getLimit(r, "limit", 20, 200) + if err != nil { + return nil, invalidOrderBook + } + + historyQ, err := context.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + summary, err := historyQ.GetOrderBookSummary(r.Context(), selling, buying, int(limit)) + if err != nil { + return nil, err + } + + var response OrderBookResponse + if err := resourceadapter.PopulateAsset(r.Context(), &response.Selling, selling); err != nil { + return nil, err + } + if err := resourceadapter.PopulateAsset(r.Context(), &response.Buying, buying); err != nil { + return nil, err + } + response.Bids = convertPriceLevels(summary.Bids) + response.Asks = convertPriceLevels(summary.Asks) + + return response, nil +} diff --git a/services/horizon/internal/actions/orderbook_test.go b/services/horizon/internal/actions/orderbook_test.go new file mode 100644 index 0000000000..b73191361a --- /dev/null +++ b/services/horizon/internal/actions/orderbook_test.go @@ -0,0 +1,669 @@ +package actions + +import ( + "database/sql" + "math" + "net/http/httptest" + "strconv" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" + + protocol "github.com/stellar/go/protocols/horizon" +) + +type intObject int + +func (i intObject) Equals(other StreamableObjectResponse) bool { + return i == other.(intObject) +} + +func TestOrderBookResponseEquals(t *testing.T) { + for _, testCase := range []struct { + name string + response protocol.OrderBookSummary + other StreamableObjectResponse + expected bool + }{ + { + "empty orderbook summary", + protocol.OrderBookSummary{}, + OrderBookResponse{}, + true, + }, + { + "types don't match", + protocol.OrderBookSummary{}, + intObject(0), + false, + }, + { + "buying asset doesn't match", + protocol.OrderBookSummary{ + Buying: protocol.Asset{ + Type: "native", + }, + Selling: protocol.Asset{ + Type: "native", + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Buying: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Selling: protocol.Asset{ + Type: "native", + }, + }, + }, + false, + }, + { + "selling asset doesn't match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "native", + }, + Buying: protocol.Asset{ + Type: "native", + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + }, + }, + false, + }, + { + "bid lengths don't match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + }, + }, + }, + false, + }, + { + "ask lengths don't match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + }, + }, + }, + false, + }, + { + "bids don't match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 2, D: 1}, + Price: "2.0", + Amount: "123", + }, + }, + }, + }, + false, + }, + { + "asks don't match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "12", + }, + }, + }, + }, + false, + }, + { + "orderbook summaries match", + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 3}, + Price: "0.33", + Amount: "13", + }, + }, + }, + OrderBookResponse{ + protocol.OrderBookSummary{ + Selling: protocol.Asset{ + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + }, + Buying: protocol.Asset{ + Type: "native", + }, + Bids: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 3}, + Price: "0.33", + Amount: "13", + }, + }, + Asks: []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: 1, D: 2}, + Price: "0.5", + Amount: "123", + }, + { + PriceR: protocol.Price{N: 1, D: 1}, + Price: "1.0", + Amount: "123", + }, + }, + }, + }, + true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + equals := (OrderBookResponse{testCase.response}).Equals(testCase.other) + if equals != testCase.expected { + t.Fatalf("expected %v but got %v", testCase.expected, equals) + } + }) + } +} + +func TestOrderbookGetResourceValidation(t *testing.T) { + handler := GetOrderbookHandler{} + + var eurAssetType, eurAssetCode, eurAssetIssuer string + if err := eurAsset.Extract(&eurAssetType, &eurAssetCode, &eurAssetIssuer); err != nil { + t.Fatalf("cound not extract eur asset: %v", err) + } + var usdAssetType, usdAssetCode, usdAssetIssuer string + if err := eurAsset.Extract(&usdAssetType, &usdAssetCode, &usdAssetIssuer); err != nil { + t.Fatalf("cound not extract usd asset: %v", err) + } + + for _, testCase := range []struct { + name string + queryParams map[string]string + }{ + { + "missing all params", + map[string]string{}, + }, + { + "missing buying asset", + map[string]string{ + "selling_asset_type": eurAssetType, + "selling_asset_code": eurAssetCode, + "selling_asset_issuer": eurAssetIssuer, + "limit": "25", + }, + }, + { + "missing selling asset", + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "limit": "25", + }, + }, + { + "invalid buying asset", + map[string]string{ + "buying_asset_type": "invalid", + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": usdAssetType, + "selling_asset_code": usdAssetCode, + "selling_asset_issuer": usdAssetIssuer, + "limit": "25", + }, + }, + { + "invalid selling asset", + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": "invalid", + "selling_asset_code": usdAssetCode, + "selling_asset_issuer": usdAssetIssuer, + "limit": "25", + }, + }, + { + "limit is not a number", + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": usdAssetType, + "selling_asset_code": usdAssetCode, + "selling_asset_issuer": usdAssetIssuer, + "limit": "avcdef", + }, + }, + { + "limit is negative", + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": usdAssetType, + "selling_asset_code": usdAssetCode, + "selling_asset_issuer": usdAssetIssuer, + "limit": "-1", + }, + }, + { + "limit is too high", + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": usdAssetType, + "selling_asset_code": usdAssetCode, + "selling_asset_issuer": usdAssetIssuer, + "limit": "20000", + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + r := makeRequest(t, testCase.queryParams, map[string]string{}, nil) + w := httptest.NewRecorder() + _, err := handler.GetResource(w, r) + if err == nil || err.Error() != invalidOrderBook.Error() { + t.Fatalf("expected error %v but got %v", invalidOrderBook, err) + } + if lastLedger := w.Header().Get(LastLedgerHeaderName); lastLedger != "" { + t.Fatalf("expected last ledger to be not set but got %v", lastLedger) + } + }) + } +} + +func TestOrderbookGetResource(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + var eurAssetType, eurAssetCode, eurAssetIssuer string + if err := eurAsset.Extract(&eurAssetType, &eurAssetCode, &eurAssetIssuer); err != nil { + t.Fatalf("cound not extract eur asset: %v", err) + } + + empty := OrderBookResponse{ + OrderBookSummary: protocol.OrderBookSummary{ + Bids: []protocol.PriceLevel{}, + Asks: []protocol.PriceLevel{}, + Selling: protocol.Asset{ + Type: "native", + }, + Buying: protocol.Asset{ + Type: eurAssetType, + Code: eurAssetCode, + Issuer: eurAssetIssuer, + }, + }, + } + + sellEurOffer := history.Offer{ + SellerID: seller.Address(), + OfferID: int64(15), + + BuyingAsset: nativeAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(4), + } + + otherEurOffer := history.Offer{ + SellerID: seller.Address(), + OfferID: int64(16), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(math.MaxInt64), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + nonCanonicalPriceTwoEurOffer := history.Offer{ + SellerID: seller.Address(), + OfferID: int64(7), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(2 * 15), + Priced: int32(1 * 15), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(4), + } + + threeEurOffer := history.Offer{ + SellerID: seller.Address(), + OfferID: int64(20), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(3), + Priced: int32(1), + Price: float64(3), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + otherSellEurOffer := history.Offer{ + SellerID: seller.Address(), + OfferID: int64(17), + + BuyingAsset: nativeAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(5), + Priced: int32(9), + Price: float64(5) / float64(9), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + offers := []history.Offer{ + twoEurOffer, + otherEurOffer, + nonCanonicalPriceTwoEurOffer, + threeEurOffer, + sellEurOffer, + otherSellEurOffer, + } + + assert.NoError(t, q.TruncateTables(tt.Ctx, []string{"offers"})) + assert.NoError(t, q.UpsertOffers(tt.Ctx, offers)) + + assert.NoError(t, q.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + })) + defer q.Rollback() + + fullResponse := empty + fullResponse.Asks = []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: int32(twoEurOffer.Pricen), D: int32(twoEurOffer.Priced)}, + Price: "2.0000000", + Amount: "922337203685.4776807", + }, + { + PriceR: protocol.Price{N: int32(threeEurOffer.Pricen), D: int32(threeEurOffer.Priced)}, + Price: "3.0000000", + Amount: "0.0000500", + }, + } + fullResponse.Bids = []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: int32(otherSellEurOffer.Priced), D: int32(otherSellEurOffer.Pricen)}, + Price: "1.8000000", + Amount: "0.0000500", + }, + { + PriceR: protocol.Price{N: int32(sellEurOffer.Priced), D: int32(sellEurOffer.Pricen)}, + Price: "0.5000000", + Amount: "0.0000500", + }, + } + + limitResponse := empty + limitResponse.Asks = []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: int32(twoEurOffer.Pricen), D: int32(twoEurOffer.Priced)}, + Price: "2.0000000", + Amount: "922337203685.4776807", + }, + } + limitResponse.Bids = []protocol.PriceLevel{ + { + PriceR: protocol.Price{N: int32(otherSellEurOffer.Priced), D: int32(otherSellEurOffer.Pricen)}, + Price: "1.8000000", + Amount: "0.0000500", + }, + } + + for _, testCase := range []struct { + name string + limit int + expected OrderBookResponse + }{ + + { + "full orderbook", + 10, + fullResponse, + }, + { + "limit request", + 1, + limitResponse, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + handler := GetOrderbookHandler{} + r := makeRequest( + t, + map[string]string{ + "buying_asset_type": eurAssetType, + "buying_asset_code": eurAssetCode, + "buying_asset_issuer": eurAssetIssuer, + "selling_asset_type": "native", + "limit": strconv.Itoa(testCase.limit), + }, + map[string]string{}, + q, + ) + w := httptest.NewRecorder() + response, err := handler.GetResource(w, r) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + if !response.Equals(testCase.expected) { + t.Fatalf("expected %v but got %v", testCase.expected, response) + } + }) + } +} diff --git a/services/horizon/internal/actions/path.go b/services/horizon/internal/actions/path.go new file mode 100644 index 0000000000..6c2e97fcd2 --- /dev/null +++ b/services/horizon/internal/actions/path.go @@ -0,0 +1,382 @@ +package actions + +import ( + "context" + "database/sql" + "fmt" + "net/http" + + "github.com/stellar/go/amount" + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/paths" + horizonProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/services/horizon/internal/simplepath" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// FindPathsHandler is the http handler for the find payment paths endpoint +type FindPathsHandler struct { + StaleThreshold uint + MaxPathLength uint + SetLastLedgerHeader bool + MaxAssetsParamLength int + PathFinder paths.Finder +} + +// StrictReceivePathsQuery query struct for paths/strict-send end-point +type StrictReceivePathsQuery struct { + SourceAssets string `schema:"source_assets" valid:"-"` + SourceAccount string `schema:"source_account" valid:"accountID,optional"` + DestinationAccount string `schema:"destination_account" valid:"accountID,optional"` + DestinationAssetType string `schema:"destination_asset_type" valid:"assetType"` + DestinationAssetIssuer string `schema:"destination_asset_issuer" valid:"accountID,optional"` + DestinationAssetCode string `schema:"destination_asset_code" valid:"-"` + DestinationAmount string `schema:"destination_amount" valid:"amount"` +} + +// Assets returns a list of xdr.Asset +func (q StrictReceivePathsQuery) Assets() ([]xdr.Asset, error) { + return xdr.BuildAssets(q.SourceAssets) +} + +// Amount returns source amount +func (q StrictReceivePathsQuery) Amount() xdr.Int64 { + parsed, err := amount.Parse(q.DestinationAmount) + if err != nil { + panic(err) + } + return parsed +} + +// DestinationAsset returns an xdr.Asset +func (q StrictReceivePathsQuery) DestinationAsset() xdr.Asset { + asset, err := xdr.BuildAsset( + q.DestinationAssetType, + q.DestinationAssetIssuer, + q.DestinationAssetCode, + ) + + if err != nil { + panic(err) + } + + return asset +} + +// URITemplate returns a rfc6570 URI template for the query struct +func (q StrictReceivePathsQuery) URITemplate() string { + return getURITemplate(&q, "paths/strict-receive", false) +} + +// Validate runs custom validations. +func (q StrictReceivePathsQuery) Validate() error { + if (len(q.SourceAccount) > 0) == (len(q.SourceAssets) > 0) { + return SourceAssetsOrSourceAccountProblem + } + + err := validateAssetParams( + q.DestinationAssetType, + q.DestinationAssetCode, + q.DestinationAssetIssuer, + "destination_", + ) + + if err != nil { + return err + } + + _, err = q.Assets() + + if err != nil { + return problem.MakeInvalidFieldProblem( + "source_assets", + err, + ) + } + + return nil +} + +// SourceAssetsOrSourceAccountProblem custom error where source assets or account is required +var SourceAssetsOrSourceAccountProblem = problem.P{ + Type: "bad_request", + Title: "Bad Request", + Status: http.StatusBadRequest, + Detail: "The request requires either a list of source assets or a source account. " + + "Both fields cannot be present.", +} + +// GetResource finds a list of strict receive paths +func (handler FindPathsHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + var err error + ctx := r.Context() + qp := StrictReceivePathsQuery{} + + if err = getParams(&qp, r); err != nil { + return nil, err + } + + query := paths.Query{} + query.DestinationAmount = qp.Amount() + sourceAccount := qp.SourceAccount + query.SourceAssets, _ = qp.Assets() + + if len(query.SourceAssets) > handler.MaxAssetsParamLength { + return nil, problem.MakeInvalidFieldProblem( + "source_assets", + fmt.Errorf("list of assets exceeds maximum length of %d", handler.MaxPathLength), + ) + } + query.DestinationAsset = qp.DestinationAsset() + if sourceAccount != "" { + sourceAccount := xdr.MustAddress(sourceAccount) + query.SourceAccount = &sourceAccount + query.ValidateSourceBalance = true + query.SourceAssets, query.SourceAssetBalances, err = assetsForAddress(r, query.SourceAccount.Address()) + if err != nil { + return nil, err + } + } else { + for range query.SourceAssets { + query.SourceAssetBalances = append(query.SourceAssetBalances, 0) + } + } + + records := []paths.Path{} + if len(query.SourceAssets) > 0 { + var lastIngestedLedger uint32 + records, lastIngestedLedger, err = handler.PathFinder.Find(ctx, query, handler.MaxPathLength) + if err == simplepath.ErrEmptyInMemoryOrderBook { + err = horizonProblem.StillIngesting + } + if err != nil { + return nil, err + } + + if handler.SetLastLedgerHeader { + // To make the Last-Ledger header consistent with the response content, + // we need to extract it from the ledger and not the DB. + // Thus, we overwrite the header if it was previously set. + SetLastLedgerHeader(w, lastIngestedLedger) + } + } + + return renderPaths(ctx, records) +} + +func renderPaths(ctx context.Context, records []paths.Path) (hal.BasePage, error) { + var page hal.BasePage + page.Init() + for _, p := range records { + var res horizon.Path + if err := resourceadapter.PopulatePath(ctx, &res, p); err != nil { + return hal.BasePage{}, err + } + page.Add(res) + } + return page, nil +} + +// FindFixedPathsHandler is the http handler for the find fixed payment paths endpoint +// Fixed payment paths are payment paths where both the source and destination asset are fixed +type FindFixedPathsHandler struct { + MaxPathLength uint + MaxAssetsParamLength int + SetLastLedgerHeader bool + PathFinder paths.Finder +} + +// DestinationAssetsOrDestinationAccountProblem custom error where destination asserts or accounts are required +var DestinationAssetsOrDestinationAccountProblem = problem.P{ + Type: "bad_request", + Title: "Bad Request", + Status: http.StatusBadRequest, + Detail: "The request requires either a list of destination assets or a destination account. " + + "Both fields cannot be present.", +} + +// FindFixedPathsQuery query struct for paths/strict-send end-point +type FindFixedPathsQuery struct { + DestinationAccount string `schema:"destination_account" valid:"accountID,optional"` + DestinationAssets string `schema:"destination_assets" valid:"-"` + SourceAssetType string `schema:"source_asset_type" valid:"assetType"` + SourceAssetIssuer string `schema:"source_asset_issuer" valid:"accountID,optional"` + SourceAssetCode string `schema:"source_asset_code" valid:"-"` + SourceAmount string `schema:"source_amount" valid:"amount"` +} + +// URITemplate returns a rfc6570 URI template for the query struct +func (q FindFixedPathsQuery) URITemplate() string { + return getURITemplate(&q, "paths/strict-send", false) +} + +// Validate runs custom validations. +func (q FindFixedPathsQuery) Validate() error { + if (len(q.DestinationAccount) > 0) == (len(q.DestinationAssets) > 0) { + return DestinationAssetsOrDestinationAccountProblem + } + + err := validateAssetParams( + q.SourceAssetType, + q.SourceAssetCode, + q.SourceAssetIssuer, + "source_", + ) + + if err != nil { + return err + } + + _, err = q.Assets() + + if err != nil { + return problem.MakeInvalidFieldProblem( + "destination_assets", + err, + ) + } + + return nil +} + +// Assets returns a list of xdr.Asset +func (q FindFixedPathsQuery) Assets() ([]xdr.Asset, error) { + return xdr.BuildAssets(q.DestinationAssets) +} + +// Amount returns source amount +func (q FindFixedPathsQuery) Amount() xdr.Int64 { + parsed, err := amount.Parse(q.SourceAmount) + if err != nil { + panic(err) + } + return parsed +} + +// SourceAsset returns an xdr.Asset +func (q FindFixedPathsQuery) SourceAsset() xdr.Asset { + asset, err := xdr.BuildAsset( + q.SourceAssetType, + q.SourceAssetIssuer, + q.SourceAssetCode, + ) + + if err != nil { + panic(err) + } + + return asset +} + +// GetResource returns a list of strict send paths +func (handler FindFixedPathsHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + var err error + ctx := r.Context() + qp := FindFixedPathsQuery{} + + if err = getParams(&qp, r); err != nil { + return nil, err + } + + destinationAccount := qp.DestinationAccount + destinationAssets, _ := qp.Assets() + + if len(destinationAssets) > handler.MaxAssetsParamLength { + return nil, problem.MakeInvalidFieldProblem( + "destination_assets", + fmt.Errorf("list of assets exceeds maximum length of %d", handler.MaxPathLength), + ) + } + + if destinationAccount != "" { + destinationAssets, _, err = assetsForAddress(r, destinationAccount) + if err != nil { + return nil, err + } + } + + sourceAsset := qp.SourceAsset() + amountToSpend := qp.Amount() + + records := []paths.Path{} + if len(destinationAssets) > 0 { + var lastIngestedLedger uint32 + records, lastIngestedLedger, err = handler.PathFinder.FindFixedPaths( + ctx, + sourceAsset, + amountToSpend, + destinationAssets, + handler.MaxPathLength, + ) + if err == simplepath.ErrEmptyInMemoryOrderBook { + err = horizonProblem.StillIngesting + } + if err != nil { + return nil, err + } + + if handler.SetLastLedgerHeader { + // To make the Last-Ledger header consistent with the response content, + // we need to extract it from the ledger and not the DB. + // Thus, we overwrite the header if it was previously set. + SetLastLedgerHeader(w, lastIngestedLedger) + } + } + + return renderPaths(ctx, records) +} + +func assetsForAddress(r *http.Request, addy string) ([]xdr.Asset, []xdr.Int64, error) { + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, nil, errors.Wrap(err, "could not obtain historyQ from request") + } + if historyQ.SessionInterface.GetTx() == nil { + return nil, nil, errors.New("cannot be called outside of a transaction") + } + if opts := historyQ.SessionInterface.GetTxOptions(); opts == nil || !opts.ReadOnly || opts.Isolation != sql.LevelRepeatableRead { + return nil, nil, errors.New("should only be called in a repeatable read transaction") + } + + var account history.AccountEntry + account, err = historyQ.GetAccountByID(r.Context(), addy) + if historyQ.NoRows(err) { + return []xdr.Asset{}, []xdr.Int64{}, nil + } else if err != nil { + return nil, nil, errors.Wrap(err, "could not fetch account") + } + + var trustlines []history.TrustLine + trustlines, err = historyQ.GetSortedTrustLinesByAccountID(r.Context(), addy) + if err != nil { + return nil, nil, errors.Wrap(err, "could not fetch trustlines for account") + } + + var assets []xdr.Asset + var balances []xdr.Int64 + + for _, trustline := range trustlines { + // Ignore pool share assets because pool shares are not transferable and cannot be traded. + // Therefore, it doesn't make sense to send path payments where the source / destination assets are pool shares. + if trustline.AssetType == xdr.AssetTypeAssetTypePoolShare { + continue + } + var asset xdr.Asset + asset, err = xdr.NewCreditAsset(trustline.AssetCode, trustline.AssetIssuer) + if err != nil { + return nil, nil, errors.Wrap(err, "invalid trustline asset") + } + assets = append(assets, asset) + balances = append(balances, xdr.Int64(trustline.Balance)) + } + assets = append(assets, xdr.MustNewNativeAsset()) + balances = append(balances, xdr.Int64(account.Balance)) + + return assets, balances, nil +} diff --git a/services/horizon/internal/actions/path_test.go b/services/horizon/internal/actions/path_test.go new file mode 100644 index 0000000000..b481caf000 --- /dev/null +++ b/services/horizon/internal/actions/path_test.go @@ -0,0 +1,35 @@ +package actions + +import ( + "context" + "net/http" + "testing" + + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +func TestAssetsForAddressRequiresTransaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + + r := &http.Request{} + ctx := context.WithValue( + r.Context(), + &horizonContext.SessionContextKey, + q, + ) + + _, _, err := assetsForAddress(r.WithContext(ctx), "GCATOZ7YJV2FANQQLX47TIV6P7VMPJCEEJGQGR6X7TONPKBN3UCLKEIS") + assert.EqualError(t, err, "cannot be called outside of a transaction") + + assert.NoError(t, q.Begin()) + defer q.Rollback() + + _, _, err = assetsForAddress(r.WithContext(ctx), "GCATOZ7YJV2FANQQLX47TIV6P7VMPJCEEJGQGR6X7TONPKBN3UCLKEIS") + assert.EqualError(t, err, "should only be called in a repeatable read transaction") +} diff --git a/services/horizon/internal/actions/query_params.go b/services/horizon/internal/actions/query_params.go new file mode 100644 index 0000000000..738b75c43d --- /dev/null +++ b/services/horizon/internal/actions/query_params.go @@ -0,0 +1,144 @@ +package actions + +import ( + "fmt" + "strings" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// SellingBuyingAssetQueryParams query struct for end-points requiring a selling +// and buying asset +type SellingBuyingAssetQueryParams struct { + SellingAssetType string `schema:"selling_asset_type" valid:"assetType,optional"` + SellingAssetIssuer string `schema:"selling_asset_issuer" valid:"accountID,optional"` + SellingAssetCode string `schema:"selling_asset_code" valid:"-"` + BuyingAssetType string `schema:"buying_asset_type" valid:"assetType,optional"` + BuyingAssetIssuer string `schema:"buying_asset_issuer" valid:"accountID,optional"` + BuyingAssetCode string `schema:"buying_asset_code" valid:"-"` + + // allow selling and buying using an asset's canonical representation. We + // are keeping the former selling_* and buying_* for backwards compatibility + // but it should not be documented. + SellingAsset string `schema:"selling" valid:"asset,optional"` + BuyingAsset string `schema:"buying" valid:"asset,optional"` +} + +// Validate runs custom validations buying and selling +func (q SellingBuyingAssetQueryParams) Validate() error { + ambiguousErr := "Ambiguous parameter, you can't include both `%[1]s` and `%[1]s_asset_type`. Remove all parameters of the form `%[1]s_`" + if len(q.SellingAssetType) > 0 && len(q.SellingAsset) > 0 { + return problem.MakeInvalidFieldProblem( + "selling_asset_type", + errors.New(fmt.Sprintf(ambiguousErr, "selling")), + ) + } + err := validateAssetParams(q.SellingAssetType, q.SellingAssetCode, q.SellingAssetIssuer, "selling_") + if err != nil { + return err + } + + if len(q.BuyingAssetType) > 0 && len(q.BuyingAsset) > 0 { + return problem.MakeInvalidFieldProblem( + "buying_asset_type", + errors.New(fmt.Sprintf(ambiguousErr, "buying")), + ) + } + err = validateAssetParams(q.BuyingAssetType, q.BuyingAssetCode, q.BuyingAssetIssuer, "buying_") + if err != nil { + return err + } + return nil +} + +// Selling returns an xdr.Asset representing the selling side of the offer. +func (q SellingBuyingAssetQueryParams) Selling() (*xdr.Asset, error) { + if len(q.SellingAsset) > 0 { + switch q.SellingAsset { + case "native": + asset := xdr.MustNewNativeAsset() + return &asset, nil + default: + parts := strings.Split(q.SellingAsset, ":") + if len(parts) != 2 { + return nil, problem.MakeInvalidFieldProblem( + "selling", + errors.New("missing colon"), + ) + } + asset, err := xdr.NewCreditAsset(parts[0], parts[1]) + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "selling", + err, + ) + } + return &asset, err + } + } + + if len(q.SellingAssetType) == 0 { + return nil, nil + } + + selling, err := xdr.BuildAsset( + q.SellingAssetType, + q.SellingAssetIssuer, + q.SellingAssetCode, + ) + + if err != nil { + p := problem.BadRequest + p.Extras = map[string]interface{}{"reason": fmt.Sprintf("bad selling asset: %s", err.Error())} + return nil, p + } + + return &selling, nil +} + +// Buying returns an *xdr.Asset representing the buying side of the offer. +func (q SellingBuyingAssetQueryParams) Buying() (*xdr.Asset, error) { + if len(q.BuyingAsset) > 0 { + switch q.BuyingAsset { + case "native": + asset := xdr.MustNewNativeAsset() + return &asset, nil + default: + parts := strings.Split(q.BuyingAsset, ":") + if len(parts) != 2 { + return nil, problem.MakeInvalidFieldProblem( + "buying", + errors.New("missing colon"), + ) + } + asset, err := xdr.NewCreditAsset(parts[0], parts[1]) + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "buying", + err, + ) + } + return &asset, err + } + } + + if len(q.BuyingAssetType) == 0 { + return nil, nil + } + + buying, err := xdr.BuildAsset( + q.BuyingAssetType, + q.BuyingAssetIssuer, + q.BuyingAssetCode, + ) + + if err != nil { + p := problem.BadRequest + p.Extras = map[string]interface{}{"reason": fmt.Sprintf("bad buying asset: %s", err.Error())} + return nil, p + } + + return &buying, nil +} diff --git a/services/horizon/internal/actions/query_params_test.go b/services/horizon/internal/actions/query_params_test.go new file mode 100644 index 0000000000..0f372885b5 --- /dev/null +++ b/services/horizon/internal/actions/query_params_test.go @@ -0,0 +1,330 @@ +package actions + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +var ( + native = xdr.MustNewNativeAsset() +) + +func TestSellingBuyingAssetQueryParams(t *testing.T) { + testCases := []struct { + desc string + urlParams map[string]string + expectedInvalidField string + expectedErr string + }{ + { + desc: "Invalid selling_asset_type", + urlParams: map[string]string{ + "selling_asset_type": "invalid", + }, + expectedInvalidField: "selling_asset_type", + expectedErr: "Asset type must be native, credit_alphanum4 or credit_alphanum12", + }, + { + desc: "Invalid buying_asset_type", + urlParams: map[string]string{ + "buying_asset_type": "invalid", + }, + expectedInvalidField: "buying_asset_type", + expectedErr: "Asset type must be native, credit_alphanum4 or credit_alphanum12", + }, { + desc: "Invalid selling_asset_code for credit_alphanum4", + urlParams: map[string]string{ + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "invalid", + "selling_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "selling_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Invalid buying_asset_code for credit_alphanum4", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "invalid", + }, + expectedInvalidField: "buying_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Empty selling_asset_code for credit_alphanum4", + urlParams: map[string]string{ + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "", + }, + expectedInvalidField: "selling_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Empty buying_asset_code for credit_alphanum4", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "", + }, + expectedInvalidField: "buying_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Empty selling_asset_code for credit_alphanum12", + urlParams: map[string]string{ + "selling_asset_type": "credit_alphanum12", + "selling_asset_code": "", + }, + expectedInvalidField: "selling_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Empty buying_asset_code for credit_alphanum12", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum12", + "buying_asset_code": "", + }, + expectedInvalidField: "buying_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Invalid selling_asset_code for credit_alphanum12", + urlParams: map[string]string{ + "selling_asset_type": "credit_alphanum12", + "selling_asset_code": "OHLOOOOOOOOOONG", + }, + expectedInvalidField: "selling_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Invalid buying_asset_code for credit_alphanum12", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum12", + "buying_asset_code": "OHLOOOOOOOOOONG", + }, + expectedInvalidField: "buying_asset_code", + expectedErr: "Asset code must be 1-12 alphanumeric characters", + }, { + desc: "Invalid selling_asset_issuer", + urlParams: map[string]string{ + "selling_asset_issuer": "GFOOO", + }, + expectedInvalidField: "selling_asset_issuer", + expectedErr: "Account ID must start with `G` and contain 56 alphanum characters", + }, { + desc: "Invalid buying_asset_issuer", + urlParams: map[string]string{ + "buying_asset_issuer": "GFOOO", + }, + expectedInvalidField: "buying_asset_issuer", + expectedErr: "Account ID must start with `G` and contain 56 alphanum characters", + }, { + desc: "Missing selling_asset_type", + urlParams: map[string]string{ + "selling_asset_code": "OHLOOOOOOOOOONG", + "selling_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "selling_asset_type", + expectedErr: "Missing parameter", + }, { + desc: "Missing buying_asset_type", + urlParams: map[string]string{ + "buying_asset_code": "OHLOOOOOOOOOONG", + "buying_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "buying_asset_type", + expectedErr: "Missing parameter", + }, { + desc: "Missing selling_asset_issuer", + urlParams: map[string]string{ + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "USD", + }, + expectedInvalidField: "selling_asset_issuer", + expectedErr: "Missing parameter", + }, { + desc: "Missing buying_asset_issuer", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "USD", + }, + expectedInvalidField: "buying_asset_issuer", + expectedErr: "Missing parameter", + }, { + desc: "Native with issued asset info: buying_asset_issuer", + urlParams: map[string]string{ + "buying_asset_type": "native", + "buying_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "buying_asset_issuer", + expectedErr: "native asset does not have an issuer", + }, { + desc: "Native with issued asset info: buying_asset_code", + urlParams: map[string]string{ + "buying_asset_type": "native", + "buying_asset_code": "USD", + }, + expectedInvalidField: "buying_asset_code", + expectedErr: "native asset does not have a code", + }, { + desc: "Native with issued asset info: selling_asset_issuer", + urlParams: map[string]string{ + "selling_asset_type": "native", + "selling_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "selling_asset_issuer", + expectedErr: "native asset does not have an issuer", + }, { + desc: "Native with issued asset info: selling_asset_code", + urlParams: map[string]string{ + "selling_asset_type": "native", + "selling_asset_code": "USD", + }, + expectedInvalidField: "selling_asset_code", + expectedErr: "native asset does not have a code", + }, { + desc: "Valid parameters", + urlParams: map[string]string{ + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "USD", + "buying_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "EUR", + "selling_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + }, + { + desc: "Valid parameters with canonical representation", + urlParams: map[string]string{ + "buying": "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "selling": "EUR:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + r := makeTestActionRequest("/", tc.urlParams) + qp := SellingBuyingAssetQueryParams{} + err := getParams(&qp, r) + + if len(tc.expectedInvalidField) == 0 { + tt.NoError(err) + } else { + if tt.IsType(&problem.P{}, err) { + p := err.(*problem.P) + tt.Equal("bad_request", p.Type) + tt.Equal(tc.expectedInvalidField, p.Extras["invalid_field"]) + tt.Equal( + tc.expectedErr, + p.Extras["reason"], + ) + } + } + + }) + } +} + +func TestSellingBuyingAssetQueryParamsWithCanonicalRepresenation(t *testing.T) { + + testCases := []struct { + desc string + urlParams map[string]string + expectedSelling *xdr.Asset + expectedBuying *xdr.Asset + expectedInvalidField string + expectedErr string + }{ + { + desc: "selling native and buying issued asset", + urlParams: map[string]string{ + "buying": "native", + "selling": "EUR:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedBuying: &native, + expectedSelling: &euro, + }, + { + desc: "selling issued and buying native asset", + urlParams: map[string]string{ + "buying": "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "selling": "native", + }, + expectedBuying: &usd, + expectedSelling: &native, + }, + { + desc: "selling and buying issued assets", + urlParams: map[string]string{ + "buying": "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "selling": "EUR:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedBuying: &usd, + expectedSelling: &euro, + }, + { + desc: "new and old format for buying", + urlParams: map[string]string{ + "buying": "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "buying_asset_type": "credit_alphanum4", + "buying_asset_code": "USD", + "buying_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "buying_asset_type", + expectedErr: "Ambiguous parameter, you can't include both `buying` and `buying_asset_type`. Remove all parameters of the form `buying_`", + }, + { + desc: "new and old format for selling", + urlParams: map[string]string{ + "selling": "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "selling_asset_type": "credit_alphanum4", + "selling_asset_code": "USD", + "selling_asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, + expectedInvalidField: "selling_asset_type", + expectedErr: "Ambiguous parameter, you can't include both `selling` and `selling_asset_type`. Remove all parameters of the form `selling_`", + }, + { + desc: "invalid selling asset", + urlParams: map[string]string{ + "selling": "LOLUSD", + }, + expectedInvalidField: "selling", + expectedErr: "Asset must be the string \"native\" or a string of the form \"Code:IssuerAccountID\" for issued assets.", + }, + { + desc: "invalid buying asset", + urlParams: map[string]string{ + "buying": "LOLEUR:", + }, + expectedInvalidField: "buying", + expectedErr: "Asset must be the string \"native\" or a string of the form \"Code:IssuerAccountID\" for issued assets.", + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + r := makeTestActionRequest("/", tc.urlParams) + qp := SellingBuyingAssetQueryParams{} + err := getParams(&qp, r) + + if len(tc.expectedInvalidField) == 0 { + tt.NoError(err) + selling, sellingErr := qp.Selling() + tt.NoError(sellingErr) + buying, buyingErr := qp.Buying() + tt.NoError(buyingErr) + tt.Equal(tc.expectedBuying, buying) + tt.Equal(tc.expectedSelling, selling) + } else { + if tt.IsType(&problem.P{}, err) { + p := err.(*problem.P) + tt.Equal("bad_request", p.Type) + tt.Equal(tc.expectedInvalidField, p.Extras["invalid_field"]) + tt.Equal( + tc.expectedErr, + p.Extras["reason"], + ) + } + } + + }) + } +} diff --git a/services/horizon/internal/actions/root.go b/services/horizon/internal/actions/root.go new file mode 100644 index 0000000000..42bbc1b3b3 --- /dev/null +++ b/services/horizon/internal/actions/root.go @@ -0,0 +1,44 @@ +package actions + +import ( + "net/http" + "net/url" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" +) + +type GetRootHandler struct { + LedgerState *ledger.State + CoreStateGetter + NetworkPassphrase string + FriendbotURL *url.URL + HorizonVersion string +} + +func (handler GetRootHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + var res horizon.Root + templates := map[string]string{ + "accounts": AccountsQuery{}.URITemplate(), + "claimableBalances": ClaimableBalancesQuery{}.URITemplate(), + "liquidityPools": LiquidityPoolsQuery{}.URITemplate(), + "offers": OffersQuery{}.URITemplate(), + "strictReceivePaths": StrictReceivePathsQuery{}.URITemplate(), + "strictSendPaths": FindFixedPathsQuery{}.URITemplate(), + } + coreState := handler.GetCoreState() + resourceadapter.PopulateRoot( + r.Context(), + &res, + handler.LedgerState.CurrentStatus(), + handler.HorizonVersion, + coreState.CoreVersion, + handler.NetworkPassphrase, + coreState.CurrentProtocolVersion, + coreState.CoreSupportedProtocolVersion, + handler.FriendbotURL, + templates, + ) + return res, nil +} diff --git a/services/horizon/internal/actions/submit_transaction.go b/services/horizon/internal/actions/submit_transaction.go new file mode 100644 index 0000000000..703412c801 --- /dev/null +++ b/services/horizon/internal/actions/submit_transaction.go @@ -0,0 +1,170 @@ +package actions + +import ( + "context" + "encoding/hex" + "mime" + "net/http" + + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/services/horizon/internal/txsub" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +type NetworkSubmitter interface { + Submit( + ctx context.Context, + rawTx string, + envelope xdr.TransactionEnvelope, + hash string) <-chan txsub.Result +} + +type SubmitTransactionHandler struct { + Submitter NetworkSubmitter + NetworkPassphrase string + CoreStateGetter +} + +type envelopeInfo struct { + hash string + raw string + parsed xdr.TransactionEnvelope +} + +func extractEnvelopeInfo(raw string, passphrase string) (envelopeInfo, error) { + result := envelopeInfo{raw: raw} + err := xdr.SafeUnmarshalBase64(raw, &result.parsed) + if err != nil { + return result, err + } + + var hash [32]byte + hash, err = network.HashTransactionInEnvelope(result.parsed, passphrase) + if err != nil { + return result, err + } + result.hash = hex.EncodeToString(hash[:]) + return result, nil +} + +func (handler SubmitTransactionHandler) validateBodyType(r *http.Request) error { + c := r.Header.Get("Content-Type") + if c == "" { + return nil + } + + mt, _, err := mime.ParseMediaType(c) + if err != nil { + return errors.Wrap(err, "Could not determine mime type") + } + + if mt != "application/x-www-form-urlencoded" && mt != "multipart/form-data" { + return &hProblem.UnsupportedMediaType + } + return nil +} + +func (handler SubmitTransactionHandler) response(r *http.Request, info envelopeInfo, result txsub.Result) (hal.Pageable, error) { + if result.Err == nil { + var resource horizon.Transaction + err := resourceadapter.PopulateTransaction( + r.Context(), + info.hash, + &resource, + result.Transaction, + ) + return resource, err + } + + if result.Err == txsub.ErrTimeout { + return nil, &hProblem.Timeout + } + + if result.Err == txsub.ErrCanceled { + return nil, &hProblem.ClientDisconnected + } + + switch err := result.Err.(type) { + case *txsub.FailedTransactionError: + rcr := horizon.TransactionResultCodes{} + resourceadapter.PopulateTransactionResultCodes( + r.Context(), + info.hash, + &rcr, + err, + ) + + return nil, &problem.P{ + Type: "transaction_failed", + Title: "Transaction Failed", + Status: http.StatusBadRequest, + Detail: "The transaction failed when submitted to the stellar network. " + + "The `extras.result_codes` field on this response contains further " + + "details. Descriptions of each code can be found at: " + + "https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/transaction-failed/", + Extras: map[string]interface{}{ + "envelope_xdr": info.raw, + "result_xdr": err.ResultXDR, + "result_codes": rcr, + }, + } + } + + return nil, result.Err +} + +func (handler SubmitTransactionHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + if err := handler.validateBodyType(r); err != nil { + return nil, err + } + + raw, err := getString(r, "tx") + if err != nil { + return nil, err + } + + info, err := extractEnvelopeInfo(raw, handler.NetworkPassphrase) + if err != nil { + return nil, &problem.P{ + Type: "transaction_malformed", + Title: "Transaction Malformed", + Status: http.StatusBadRequest, + Detail: "Horizon could not decode the transaction envelope in this " + + "request. A transaction should be an XDR TransactionEnvelope struct " + + "encoded using base64. The envelope read from this request is " + + "echoed in the `extras.envelope_xdr` field of this response for your " + + "convenience.", + Extras: map[string]interface{}{ + "envelope_xdr": raw, + }, + } + } + + coreState := handler.GetCoreState() + if !coreState.Synced { + return nil, hProblem.StaleHistory + } + + submission := handler.Submitter.Submit( + r.Context(), + info.raw, + info.parsed, + info.hash, + ) + + select { + case result := <-submission: + return handler.response(r, info, result) + case <-r.Context().Done(): + if r.Context().Err() == context.Canceled { + return nil, hProblem.ClientDisconnected + } + return nil, hProblem.Timeout + } +} diff --git a/services/horizon/internal/actions/submit_transaction_test.go b/services/horizon/internal/actions/submit_transaction_test.go new file mode 100644 index 0000000000..72ccb5a297 --- /dev/null +++ b/services/horizon/internal/actions/submit_transaction_test.go @@ -0,0 +1,159 @@ +package actions + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/corestate" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/txsub" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestStellarCoreMalformedTx(t *testing.T) { + handler := SubmitTransactionHandler{} + + r := httptest.NewRequest("POST", "https://horizon.stellar.org/transactions", nil) + w := httptest.NewRecorder() + _, err := handler.GetResource(w, r) + assert.Error(t, err) + assert.Equal(t, http.StatusBadRequest, err.(*problem.P).Status) + assert.Equal(t, "Transaction Malformed", err.(*problem.P).Title) +} + +type coreStateGetterMock struct { + mock.Mock +} + +func (m *coreStateGetterMock) GetCoreState() corestate.State { + a := m.Called() + return a.Get(0).(corestate.State) +} + +type networkSubmitterMock struct { + mock.Mock +} + +func (m *networkSubmitterMock) Submit( + ctx context.Context, + rawTx string, + envelope xdr.TransactionEnvelope, + hash string) <-chan txsub.Result { + a := m.Called() + return a.Get(0).(chan txsub.Result) +} + +func TestStellarCoreNotSynced(t *testing.T) { + mock := &coreStateGetterMock{} + mock.On("GetCoreState").Return(corestate.State{ + Synced: false, + }) + + handler := SubmitTransactionHandler{ + NetworkPassphrase: network.PublicNetworkPassphrase, + CoreStateGetter: mock, + } + + form := url.Values{} + form.Set("tx", "AAAAAAGUcmKO5465JxTSLQOQljwk2SfqAJmZSG6JH6wtqpwhAAABLAAAAAAAAAABAAAAAAAAAAEAAAALaGVsbG8gd29ybGQAAAAAAwAAAAAAAAAAAAAAABbxCy3mLg3hiTqX4VUEEp60pFOrJNxYM1JtxXTwXhY2AAAAAAvrwgAAAAAAAAAAAQAAAAAW8Qst5i4N4Yk6l+FVBBKetKRTqyTcWDNSbcV08F4WNgAAAAAN4Lazj4x61AAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABLaqcIQAAAEBKwqWy3TaOxoGnfm9eUjfTRBvPf34dvDA0Nf+B8z4zBob90UXtuCqmQqwMCyH+okOI3c05br3khkH0yP4kCwcE") + + request, err := http.NewRequest( + "POST", + "https://horizon.stellar.org/transactions", + strings.NewReader(form.Encode()), + ) + require.NoError(t, err) + request.Header.Add("Content-Type", "application/x-www-form-urlencoded") + + w := httptest.NewRecorder() + _, err = handler.GetResource(w, request) + assert.Error(t, err) + assert.Equal(t, http.StatusServiceUnavailable, err.(problem.P).Status) + assert.Equal(t, "stale_history", err.(problem.P).Type) + assert.Equal(t, "Historical DB Is Too Stale", err.(problem.P).Title) +} + +func TestTimeoutSubmission(t *testing.T) { + mockSubmitChannel := make(chan txsub.Result) + + mock := &coreStateGetterMock{} + mock.On("GetCoreState").Return(corestate.State{ + Synced: true, + }) + + mockSubmitter := &networkSubmitterMock{} + mockSubmitter.On("Submit").Return(mockSubmitChannel) + + handler := SubmitTransactionHandler{ + Submitter: mockSubmitter, + NetworkPassphrase: network.PublicNetworkPassphrase, + CoreStateGetter: mock, + } + + form := url.Values{} + form.Set("tx", "AAAAAAGUcmKO5465JxTSLQOQljwk2SfqAJmZSG6JH6wtqpwhAAABLAAAAAAAAAABAAAAAAAAAAEAAAALaGVsbG8gd29ybGQAAAAAAwAAAAAAAAAAAAAAABbxCy3mLg3hiTqX4VUEEp60pFOrJNxYM1JtxXTwXhY2AAAAAAvrwgAAAAAAAAAAAQAAAAAW8Qst5i4N4Yk6l+FVBBKetKRTqyTcWDNSbcV08F4WNgAAAAAN4Lazj4x61AAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABLaqcIQAAAEBKwqWy3TaOxoGnfm9eUjfTRBvPf34dvDA0Nf+B8z4zBob90UXtuCqmQqwMCyH+okOI3c05br3khkH0yP4kCwcE") + + request, err := http.NewRequest( + "POST", + "https://horizon.stellar.org/transactions", + strings.NewReader(form.Encode()), + ) + + require.NoError(t, err) + request.Header.Add("Content-Type", "application/x-www-form-urlencoded") + ctx, cancel := context.WithTimeout(request.Context(), time.Duration(0)) + defer cancel() + request = request.WithContext(ctx) + + w := httptest.NewRecorder() + _, err = handler.GetResource(w, request) + assert.Error(t, err) + assert.Equal(t, hProblem.Timeout, err) +} + +func TestClientDisconnectSubmission(t *testing.T) { + mockSubmitChannel := make(chan txsub.Result) + + mock := &coreStateGetterMock{} + mock.On("GetCoreState").Return(corestate.State{ + Synced: true, + }) + + mockSubmitter := &networkSubmitterMock{} + mockSubmitter.On("Submit").Return(mockSubmitChannel) + + handler := SubmitTransactionHandler{ + Submitter: mockSubmitter, + NetworkPassphrase: network.PublicNetworkPassphrase, + CoreStateGetter: mock, + } + + form := url.Values{} + form.Set("tx", "AAAAAAGUcmKO5465JxTSLQOQljwk2SfqAJmZSG6JH6wtqpwhAAABLAAAAAAAAAABAAAAAAAAAAEAAAALaGVsbG8gd29ybGQAAAAAAwAAAAAAAAAAAAAAABbxCy3mLg3hiTqX4VUEEp60pFOrJNxYM1JtxXTwXhY2AAAAAAvrwgAAAAAAAAAAAQAAAAAW8Qst5i4N4Yk6l+FVBBKetKRTqyTcWDNSbcV08F4WNgAAAAAN4Lazj4x61AAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABLaqcIQAAAEBKwqWy3TaOxoGnfm9eUjfTRBvPf34dvDA0Nf+B8z4zBob90UXtuCqmQqwMCyH+okOI3c05br3khkH0yP4kCwcE") + + request, err := http.NewRequest( + "POST", + "https://horizon.stellar.org/transactions", + strings.NewReader(form.Encode()), + ) + + require.NoError(t, err) + request.Header.Add("Content-Type", "application/x-www-form-urlencoded") + ctx, cancel := context.WithCancel(request.Context()) + cancel() + request = request.WithContext(ctx) + + w := httptest.NewRecorder() + _, err = handler.GetResource(w, request) + assert.Equal(t, hProblem.ClientDisconnected, err) +} diff --git a/services/horizon/internal/actions/trade.go b/services/horizon/internal/actions/trade.go new file mode 100644 index 0000000000..d6bf415424 --- /dev/null +++ b/services/horizon/internal/actions/trade.go @@ -0,0 +1,467 @@ +package actions + +import ( + "context" + "fmt" + "net/http" + "strconv" + gTime "time" + + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/support/time" + "github.com/stellar/go/xdr" +) + +// TradeAssetsQueryParams represents the base and counter assets on trade related end-points. +type TradeAssetsQueryParams struct { + BaseAssetType string `schema:"base_asset_type" valid:"assetType,optional"` + BaseAssetIssuer string `schema:"base_asset_issuer" valid:"accountID,optional"` + BaseAssetCode string `schema:"base_asset_code" valid:"-"` + CounterAssetType string `schema:"counter_asset_type" valid:"assetType,optional"` + CounterAssetIssuer string `schema:"counter_asset_issuer" valid:"accountID,optional"` + CounterAssetCode string `schema:"counter_asset_code" valid:"-"` +} + +// Base returns an xdr.Asset representing the base side of the trade. +func (q TradeAssetsQueryParams) Base() (*xdr.Asset, error) { + if len(q.BaseAssetType) == 0 { + return nil, nil + } + + base, err := xdr.BuildAsset( + q.BaseAssetType, + q.BaseAssetIssuer, + q.BaseAssetCode, + ) + + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "base_asset", + errors.New(fmt.Sprintf("invalid base_asset: %s", err.Error())), + ) + } + + return &base, nil +} + +// Counter returns an *xdr.Asset representing the counter asset side of the trade. +func (q TradeAssetsQueryParams) Counter() (*xdr.Asset, error) { + if len(q.CounterAssetType) == 0 { + return nil, nil + } + + counter, err := xdr.BuildAsset( + q.CounterAssetType, + q.CounterAssetIssuer, + q.CounterAssetCode, + ) + + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "counter_asset", + errors.New(fmt.Sprintf("invalid counter_asset: %s", err.Error())), + ) + } + + return &counter, nil +} + +// TradesQuery query struct for trades end-points +type TradesQuery struct { + AccountID string `schema:"account_id" valid:"accountID,optional"` + OfferID uint64 `schema:"offer_id" valid:"-"` + PoolID string `schema:"liquidity_pool_id" valid:"sha256,optional"` + TradeType string `schema:"trade_type" valid:"tradeType,optional"` + TradeAssetsQueryParams `valid:"optional"` +} + +// Validate runs custom validations base and counter +func (q TradesQuery) Validate() error { + base, err := q.Base() + if err != nil { + return err + } + counter, err := q.Counter() + if err != nil { + return err + } + + if (base != nil && counter == nil) || (base == nil && counter != nil) { + return problem.MakeInvalidFieldProblem( + "base_asset_type,counter_asset_type", + errors.New("this endpoint supports asset pairs but only one asset supplied"), + ) + } + + if base != nil && q.OfferID != 0 { + return problem.MakeInvalidFieldProblem( + "base_asset_type,counter_asset_type,offer_id", + errors.New("this endpoint does not support filtering by both asset pair and offer id "), + ) + } + + if base != nil && q.PoolID != "" { + return problem.MakeInvalidFieldProblem( + "base_asset_type,counter_asset_type,liquidity_pool_id", + errors.New("this endpoint does not support filtering by both asset pair and liquidity pool id "), + ) + } + + count, err := countNonEmpty( + q.AccountID, + q.OfferID, + q.PoolID, + ) + if err != nil { + return problem.BadRequest + } + + if count > 1 { + return problem.MakeInvalidFieldProblem( + "account_id,liquidity_pool_id,offer_id", + errors.New( + "Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", + ), + ) + } + + if q.OfferID != 0 && q.TradeType == history.LiquidityPoolTrades { + return problem.MakeInvalidFieldProblem( + "trade_type", + errors.Errorf("trade_type %s cannot be used with the offer_id filter", q.TradeType), + ) + } + + if q.PoolID != "" && q.TradeType == history.OrderbookTrades { + return problem.MakeInvalidFieldProblem( + "trade_type", + errors.Errorf("trade_type %s cannot be used with the liquidity_pool_id filter", q.TradeType), + ) + } + return nil +} + +// GetTradesHandler is the action handler for all end-points returning a list of trades. +type GetTradesHandler struct { + LedgerState *ledger.State + CoreStateGetter +} + +// GetResourcePage returns a page of trades. +func (handler GetTradesHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + ctx := r.Context() + + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + + qp := TradesQuery{} + if err = getParams(&qp, r); err != nil { + return nil, err + } + if qp.TradeType == "" { + qp.TradeType = history.AllTrades + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + var records []history.Trade + var baseAsset, counterAsset *xdr.Asset + baseAsset, err = qp.Base() + if err != nil { + return nil, err + } + + if baseAsset != nil { + counterAsset, err = qp.Counter() + if err != nil { + return nil, err + } + + records, err = historyQ.GetTradesForAssets(ctx, pq, qp.AccountID, qp.TradeType, *baseAsset, *counterAsset) + } else if qp.OfferID != 0 { + records, err = historyQ.GetTradesForOffer(ctx, pq, int64(qp.OfferID)) + } else if qp.PoolID != "" { + records, err = historyQ.GetTradesForLiquidityPool(ctx, pq, qp.PoolID) + } else { + records, err = historyQ.GetTrades(ctx, pq, qp.AccountID, qp.TradeType) + } + if err != nil { + return nil, err + } + + var response []hal.Pageable + for _, record := range records { + var res horizon.Trade + resourceadapter.PopulateTrade(ctx, &res, record) + response = append(response, res) + } + + return response, nil +} + +// TradeAggregationsQuery query struct for trade_aggregations end-point +type TradeAggregationsQuery struct { + OffsetFilter uint64 `schema:"offset" valid:"-"` + StartTimeFilter time.Millis `schema:"start_time" valid:"-"` + EndTimeFilter time.Millis `schema:"end_time" valid:"-"` + ResolutionFilter uint64 `schema:"resolution" valid:"-"` + TradeAssetsQueryParams `valid:"optional"` +} + +// Validate runs validations on tradeAggregationsQuery +func (q TradeAggregationsQuery) Validate() error { + base, err := q.Base() + if err != nil { + return err + } + if base == nil { + return problem.MakeInvalidFieldProblem( + "base_asset_type", + errors.New("Missing required field"), + ) + } + counter, err := q.Counter() + if err != nil { + return err + } + if counter == nil { + return problem.MakeInvalidFieldProblem( + "counter_asset_type", + errors.New("Missing required field"), + ) + } + + //check if resolution is legal + resolutionDuration := gTime.Duration(q.ResolutionFilter) * gTime.Millisecond + if history.StrictResolutionFiltering { + if _, ok := history.AllowedResolutions[resolutionDuration]; !ok { + return problem.MakeInvalidFieldProblem( + "resolution", + errors.New("illegal or missing resolution. "+ + "allowed resolutions are: 1 minute (60000), 5 minutes (300000), 15 minutes (900000), 1 hour (3600000), "+ + "1 day (86400000) and 1 week (604800000)"), + ) + } + } + // check if offset is legal + offsetDuration := gTime.Duration(q.OffsetFilter) * gTime.Millisecond + if offsetDuration%gTime.Hour != 0 || offsetDuration >= gTime.Hour*24 || offsetDuration > resolutionDuration { + return problem.MakeInvalidFieldProblem( + "offset", + errors.New("illegal or missing offset. offset must be a multiple of an"+ + " hour, less than or equal to the resolution, and less than 24 hours"), + ) + } + + return nil +} + +// GetTradeAggregationsHandler is the action handler for trade_aggregations +type GetTradeAggregationsHandler struct { + LedgerState *ledger.State + CoreStateGetter +} + +// GetResource returns a page of trade aggregations +func (handler GetTradeAggregationsHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + qp := TradeAggregationsQuery{} + if err = getParams(&qp, r); err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + records, err := handler.fetchRecords(ctx, historyQ, qp, pq) + if err != nil { + return nil, err + } + var aggregations []hal.Pageable + for _, record := range records { + var res horizon.TradeAggregation + err = resourceadapter.PopulateTradeAggregation(ctx, &res, record) + if err != nil { + return nil, err + } + aggregations = append(aggregations, res) + } + + return handler.buildPage(r, aggregations) +} + +func (handler GetTradeAggregationsHandler) fetchRecords(ctx context.Context, historyQ *history.Q, qp TradeAggregationsQuery, pq db2.PageQuery) ([]history.TradeAggregation, error) { + baseAsset, err := qp.Base() + if err != nil { + return nil, err + } + + baseAssetID, err := historyQ.GetAssetID(ctx, *baseAsset) + if err != nil { + p := problem.BadRequest + if historyQ.NoRows(err) { + p = problem.NotFound + err = errors.New("not found") + } + return nil, problem.NewProblemWithInvalidField( + p, + "base_asset", + err, + ) + } + + counterAsset, err := qp.Counter() + if err != nil { + return nil, err + } + + counterAssetID, err := historyQ.GetAssetID(ctx, *counterAsset) + if err != nil { + p := problem.BadRequest + if historyQ.NoRows(err) { + p = problem.NotFound + err = errors.New("not found") + } + + return nil, problem.NewProblemWithInvalidField( + p, + "counter_asset", + err, + ) + } + + //initialize the query builder with required params + tradeAggregationsQ, err := historyQ.GetTradeAggregationsQ( + baseAssetID, + counterAssetID, + int64(qp.ResolutionFilter), + int64(qp.OffsetFilter), + pq, + ) + if err != nil { + return nil, err + } + + //set time range if supplied + if !qp.StartTimeFilter.IsNil() { + tradeAggregationsQ, err = tradeAggregationsQ.WithStartTime(qp.StartTimeFilter) + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "start_time", + errors.New( + "illegal start time. adjusted start time must "+ + "be less than the provided end time if the end time is greater than 0", + ), + ) + } + } + if !qp.EndTimeFilter.IsNil() { + tradeAggregationsQ, err = tradeAggregationsQ.WithEndTime(qp.EndTimeFilter) + if err != nil { + return nil, problem.MakeInvalidFieldProblem( + "end_time", + errors.New( + "illegal end time. adjusted end time "+ + "must be greater than the offset and greater than the provided start time", + ), + ) + } + } + + var records []history.TradeAggregation + err = historyQ.Select(ctx, &records, tradeAggregationsQ.GetSql()) + if err != nil { + return nil, err + } + return records, err +} + +// BuildPage builds a custom hal page for this handler +func (handler GetTradeAggregationsHandler) buildPage(r *http.Request, records []hal.Pageable) (hal.Page, error) { + ctx := r.Context() + pageQuery, err := GetPageQuery(handler.LedgerState, r, DisableCursorValidation) + if err != nil { + return hal.Page{}, err + } + qp := TradeAggregationsQuery{} + if err = getParams(&qp, r); err != nil { + return hal.Page{}, err + } + + page := hal.Page{ + Cursor: pageQuery.Cursor, + Order: pageQuery.Order, + Limit: pageQuery.Limit, + } + page.Init() + + for _, record := range records { + page.Add(record) + } + + newURL := FullURL(ctx) + q := newURL.Query() + + page.Links.Self = hal.NewLink(newURL.String()) + + //adjust time range for next page + if uint64(len(records)) == 0 { + page.Links.Next = page.Links.Self + } else { + lastRecord := records[len(records)-1] + + lastRecordTA, ok := lastRecord.(horizon.TradeAggregation) + if !ok { + panic(fmt.Sprintf("Unknown type: %T", lastRecord)) + } + timestamp := lastRecordTA.Timestamp + + if page.Order == "asc" { + newStartTime := timestamp + int64(qp.ResolutionFilter) + if newStartTime >= qp.EndTimeFilter.ToInt64() { + newStartTime = qp.EndTimeFilter.ToInt64() + } + q.Set("start_time", strconv.FormatInt(newStartTime, 10)) + newURL.RawQuery = q.Encode() + page.Links.Next = hal.NewLink(newURL.String()) + } else { //desc + newEndTime := timestamp + if newEndTime <= qp.StartTimeFilter.ToInt64() { + newEndTime = qp.StartTimeFilter.ToInt64() + } + q.Set("end_time", strconv.FormatInt(newEndTime, 10)) + newURL.RawQuery = q.Encode() + page.Links.Next = hal.NewLink(newURL.String()) + } + } + + return page, nil +} diff --git a/services/horizon/internal/actions/transaction.go b/services/horizon/internal/actions/transaction.go new file mode 100644 index 0000000000..9b527afb46 --- /dev/null +++ b/services/horizon/internal/actions/transaction.go @@ -0,0 +1,185 @@ +package actions + +import ( + "context" + "net/http" + + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/resourceadapter" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + supportProblem "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +// TransactionQuery query struct for transactions/id end-point +type TransactionQuery struct { + TransactionHash string `schema:"tx_id" valid:"transactionHash,optional"` +} + +// GetTransactionByHashHandler is the action handler for the end-point returning a transaction. +type GetTransactionByHashHandler struct { +} + +// GetResource returns a transaction page. +func (handler GetTransactionByHashHandler) GetResource(w HeaderWriter, r *http.Request) (interface{}, error) { + ctx := r.Context() + qp := TransactionQuery{} + err := getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + var ( + record history.Transaction + resource horizon.Transaction + ) + + err = historyQ.TransactionByHash(ctx, &record, qp.TransactionHash) + if err != nil { + return resource, errors.Wrap(err, "loading transaction record") + } + + if err = resourceadapter.PopulateTransaction(ctx, qp.TransactionHash, &resource, record); err != nil { + return resource, errors.Wrap(err, "could not populate transaction") + } + return resource, nil +} + +// TransactionsQuery query struct for transactions end-points +type TransactionsQuery struct { + AccountID string `schema:"account_id" valid:"accountID,optional"` + ClaimableBalanceID string `schema:"claimable_balance_id" valid:"claimableBalanceID,optional"` + LiquidityPoolID string `schema:"liquidity_pool_id" valid:"sha256,optional"` + IncludeFailedTransactions bool `schema:"include_failed" valid:"-"` + LedgerID uint32 `schema:"ledger_id" valid:"-"` +} + +// Validate runs extra validations on query parameters +func (qp TransactionsQuery) Validate() error { + filters, err := countNonEmpty( + qp.AccountID, + qp.ClaimableBalanceID, + qp.LiquidityPoolID, + qp.LedgerID, + ) + + if err != nil { + return supportProblem.BadRequest + } + + if filters > 1 { + return supportProblem.MakeInvalidFieldProblem( + "filters", + errors.New("Use a single filter for transaction, you can only use one of account_id, claimable_balance_id or ledger_id"), + ) + } + + return nil +} + +// GetTransactionsHandler is the action handler for all end-points returning a list of transactions. +type GetTransactionsHandler struct { + LedgerState *ledger.State +} + +// GetResourcePage returns a page of transactions. +func (handler GetTransactionsHandler) GetResourcePage(w HeaderWriter, r *http.Request) ([]hal.Pageable, error) { + ctx := r.Context() + + pq, err := GetPageQuery(handler.LedgerState, r) + if err != nil { + return nil, err + } + + err = validateCursorWithinHistory(handler.LedgerState, pq) + if err != nil { + return nil, err + } + + qp := TransactionsQuery{} + err = getParams(&qp, r) + if err != nil { + return nil, err + } + + historyQ, err := horizonContext.HistoryQFromRequest(r) + if err != nil { + return nil, err + } + + records, err := loadTransactionRecords(ctx, historyQ, qp, pq) + if err != nil { + return nil, errors.Wrap(err, "loading transaction records") + } + + var response []hal.Pageable + + for _, record := range records { + var res horizon.Transaction + err = resourceadapter.PopulateTransaction(ctx, record.TransactionHash, &res, record) + if err != nil { + return nil, errors.Wrap(err, "could not populate transaction") + } + response = append(response, res) + } + + return response, nil +} + +// loadTransactionRecords returns a slice of transaction records of an +// account/ledger identified by accountID/ledgerID based on pq and +// includeFailedTx. +func loadTransactionRecords(ctx context.Context, hq *history.Q, qp TransactionsQuery, pq db2.PageQuery) ([]history.Transaction, error) { + var records []history.Transaction + + txs := hq.Transactions() + switch { + case qp.AccountID != "": + txs.ForAccount(ctx, qp.AccountID) + case qp.ClaimableBalanceID != "": + txs.ForClaimableBalance(ctx, qp.ClaimableBalanceID) + case qp.LiquidityPoolID != "": + txs.ForLiquidityPool(ctx, qp.LiquidityPoolID) + case qp.LedgerID > 0: + txs.ForLedger(ctx, int32(qp.LedgerID)) + } + + if qp.IncludeFailedTransactions { + txs.IncludeFailed() + } + + err := txs.Page(pq).Select(ctx, &records) + if err != nil { + return nil, errors.Wrap(err, "executing transaction records query") + } + + for _, t := range records { + if !qp.IncludeFailedTransactions { + if !t.Successful { + return nil, errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s", t.TransactionHash) + } + + var resultXDR xdr.TransactionResult + err = xdr.SafeUnmarshalBase64(t.TxResult, &resultXDR) + if err != nil { + return nil, errors.Wrap(err, "unmarshalling tx result") + } + + if !resultXDR.Successful() { + return nil, errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s %s", t.TransactionHash, t.TxResult) + } + } + } + + return records, nil +} diff --git a/services/horizon/internal/actions/transaction_test.go b/services/horizon/internal/actions/transaction_test.go new file mode 100644 index 0000000000..e029edef3a --- /dev/null +++ b/services/horizon/internal/actions/transaction_test.go @@ -0,0 +1,223 @@ +package actions + +import ( + "net/http/httptest" + "testing" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + supportProblem "github.com/stellar/go/support/render/problem" +) + +func TestGetTransactionsHandler(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + + q := &history.Q{tt.HorizonSession()} + handler := GetTransactionsHandler{} + + // filter by account + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 3) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + // // filter by ledger + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "1", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 0) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "2", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 3) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "ledger_id": "3", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + + records, err = handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{ + "account_id": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "ledger_id": "3", + "include_failed": "true", + }, map[string]string{}, q, + ), + ) + tt.Assert.IsType(&supportProblem.P{}, err) + p := err.(*supportProblem.P) + tt.Assert.Equal("bad_request", p.Type) + tt.Assert.Equal("filters", p.Extras["invalid_field"]) + tt.Assert.Equal( + "Use a single filter for transaction, you can only use one of account_id, claimable_balance_id or ledger_id", + p.Extras["reason"], + ) +} + +func checkOuterHashResponse( + tt *test.T, + fixture history.FeeBumpFixture, + transactionResponse horizon.Transaction, +) { + tt.Assert.Equal(fixture.Transaction.Account, transactionResponse.Account) + tt.Assert.Equal(fixture.Transaction.AccountSequence, transactionResponse.AccountSequence) + tt.Assert.Equal(fixture.Transaction.FeeAccount.String, transactionResponse.FeeAccount) + tt.Assert.Equal(fixture.Transaction.FeeCharged, transactionResponse.FeeCharged) + tt.Assert.Equal(fixture.Transaction.TransactionHash, transactionResponse.ID) + tt.Assert.Equal(fixture.Transaction.MaxFee, transactionResponse.InnerTransaction.MaxFee) + tt.Assert.Equal( + []string(fixture.Transaction.InnerSignatures), + transactionResponse.InnerTransaction.Signatures, + ) + tt.Assert.Equal( + fixture.Transaction.InnerTransactionHash.String, + transactionResponse.InnerTransaction.Hash, + ) + tt.Assert.Equal(fixture.Transaction.NewMaxFee.Int64, transactionResponse.MaxFee) + tt.Assert.Equal(fixture.Transaction.Memo.String, transactionResponse.Memo) + tt.Assert.Equal(fixture.Transaction.MemoType, transactionResponse.MemoType) + tt.Assert.Equal(fixture.Transaction.OperationCount, transactionResponse.OperationCount) + tt.Assert.Equal( + []string(fixture.Transaction.Signatures), + transactionResponse.Signatures, + ) + tt.Assert.Equal(fixture.Transaction.Successful, transactionResponse.Successful) + tt.Assert.Equal(fixture.Transaction.TotalOrderID.PagingToken(), transactionResponse.PT) + tt.Assert.Equal(fixture.Transaction.TransactionHash, transactionResponse.Hash) + tt.Assert.Equal(fixture.Transaction.TxEnvelope, transactionResponse.EnvelopeXdr) + tt.Assert.Equal(fixture.Transaction.TxFeeMeta, transactionResponse.FeeMetaXdr) + tt.Assert.Equal(fixture.Transaction.TxMeta, transactionResponse.ResultMetaXdr) + tt.Assert.Equal(fixture.Transaction.TxResult, transactionResponse.ResultXdr) +} + +func TestFeeBumpTransactionPage(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + fixture := history.FeeBumpScenario(tt, q, true) + handler := GetTransactionsHandler{} + + records, err := handler.GetResourcePage( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{}, q, + ), + ) + tt.Assert.NoError(err) + tt.Assert.Len(records, 2) + + feeBumpResponse := records[0].(horizon.Transaction) + checkOuterHashResponse(tt, fixture, feeBumpResponse) + + normalTxResponse := records[1].(horizon.Transaction) + tt.Assert.Equal(fixture.NormalTransaction.TransactionHash, normalTxResponse.ID) +} + +func TestFeeBumpTransactionResource(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{tt.HorizonSession()} + fixture := history.FeeBumpScenario(tt, q, true) + + handler := GetTransactionByHashHandler{} + resource, err := handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{ + "tx_id": fixture.OuterHash, + }, q, + ), + ) + tt.Assert.NoError(err) + byOuterHash := resource.(horizon.Transaction) + checkOuterHashResponse(tt, fixture, byOuterHash) + + resource, err = handler.GetResource( + httptest.NewRecorder(), + makeRequest( + t, map[string]string{}, map[string]string{ + "tx_id": fixture.InnerHash, + }, q, + ), + ) + tt.Assert.NoError(err) + + byInnerHash := resource.(horizon.Transaction) + + tt.Assert.NotEqual(byOuterHash.Hash, byInnerHash.Hash) + tt.Assert.NotEqual(byOuterHash.ID, byInnerHash.ID) + tt.Assert.NotEqual(byOuterHash.Signatures, byInnerHash.Signatures) + + tt.Assert.Equal(fixture.InnerHash, byInnerHash.Hash) + tt.Assert.Equal(fixture.InnerHash, byInnerHash.ID) + tt.Assert.Equal( + []string(fixture.Transaction.InnerSignatures), + byInnerHash.Signatures, + ) + + byInnerHash.Hash = byOuterHash.Hash + byInnerHash.ID = byOuterHash.ID + byInnerHash.Signatures = byOuterHash.Signatures + byInnerHash.Links = byOuterHash.Links + tt.Assert.Equal(byOuterHash, byInnerHash) +} diff --git a/services/horizon/internal/actions/validators.go b/services/horizon/internal/actions/validators.go new file mode 100644 index 0000000000..4ac8ae8bcf --- /dev/null +++ b/services/horizon/internal/actions/validators.go @@ -0,0 +1,167 @@ +package actions + +import ( + "encoding/hex" + "github.com/stellar/go/services/horizon/internal/db2/history" + "strings" + + "github.com/asaskevich/govalidator" + "github.com/gorilla/schema" + + "github.com/stellar/go/amount" + "github.com/stellar/go/services/horizon/internal/assets" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// Validateable allow structs to define their own custom validations. +type Validateable interface { + Validate() error +} + +func init() { + govalidator.TagMap["accountID"] = isAccountID + govalidator.TagMap["amount"] = isAmount + govalidator.TagMap["assetType"] = isAssetType + govalidator.TagMap["asset"] = isAsset + govalidator.TagMap["claimableBalanceID"] = isClaimableBalanceID + govalidator.TagMap["transactionHash"] = isTransactionHash + govalidator.TagMap["sha256"] = govalidator.IsSHA256 + govalidator.TagMap["tradeType"] = isTradeType +} + +var customTagsErrorMessages = map[string]string{ + "accountID": "Account ID must start with `G` and contain 56 alphanum characters", + "amount": "Amount must be positive", + "asset": "Asset must be the string \"native\" or a string of the form \"Code:IssuerAccountID\" for issued assets.", + "assetType": "Asset type must be native, credit_alphanum4 or credit_alphanum12", + "bool": "Filter should be true or false", + "claimable_balance_id": "Claimable Balance ID must be the hex-encoded XDR representation of a Claimable Balance ID", + "ledger_id": "Ledger ID must be an integer higher than 0", + "offer_id": "Offer ID must be an integer higher than 0", + "op_id": "Operation ID must be an integer higher than 0", + "transactionHash": "Transaction hash must be a hex-encoded, lowercase SHA-256 hash", + "tradeType": "Trade type must be all, orderbook, or liquidity_pool", +} + +func isTradeType(tradeType string) bool { + return tradeType == history.AllTrades || + tradeType == history.OrderbookTrades || + tradeType == history.LiquidityPoolTrades +} + +// isAsset validates if string contains a valid SEP11 asset +func isAsset(assetString string) bool { + var asset xdr.Asset + + if strings.ToLower(assetString) == "native" { + if err := asset.SetNative(); err != nil { + return false + } + } else { + + parts := strings.Split(assetString, ":") + if len(parts) != 2 { + return false + } + + code := parts[0] + if !xdr.ValidAssetCode.MatchString(code) { + return false + } + + issuer, err := xdr.AddressToAccountId(parts[1]) + if err != nil { + return false + } + + if err := asset.SetCredit(code, issuer); err != nil { + return false + } + } + + return true +} + +func getSchemaErrorFieldMessage(field string, err error) error { + if customMessage, ok := customTagsErrorMessages[field]; ok { + return errors.New(customMessage) + } + + if ce, ok := err.(schema.ConversionError); ok { + customMessage, ok := customTagsErrorMessages[ce.Type.String()] + if ok { + return errors.New(customMessage) + } + } + + return err +} + +func getErrorFieldMessage(err error) (string, string) { + var field, message string + + switch err := err.(type) { + case govalidator.Error: + field = err.Name + validator := err.Validator + m, ok := customTagsErrorMessages[validator] + // Give priority to inline custom error messages. + // CustomErrorMessageExists when the validator is defined like: + // `validatorName~custom message` + if !ok || err.CustomErrorMessageExists { + m = err.Err.Error() + } + message = m + case govalidator.Errors: + for _, item := range err.Errors() { + field, message = getErrorFieldMessage(item) + break + } + } + + return field, message +} + +func isAssetType(str string) bool { + if _, err := assets.Parse(str); err != nil { + return false + } + + return true +} + +func isAccountID(str string) bool { + if _, err := xdr.AddressToAccountId(str); err != nil { + return false + } + + return true +} + +func isTransactionHash(str string) bool { + decoded, err := hex.DecodeString(str) + if err != nil { + return false + } + + return len(decoded) == 32 && strings.ToLower(str) == str +} + +func isAmount(str string) bool { + parsed, err := amount.Parse(str) + switch { + case err != nil: + return false + case parsed <= 0: + return false + } + + return true +} + +func isClaimableBalanceID(str string) bool { + var cbID xdr.ClaimableBalanceId + err := xdr.SafeUnmarshalHex(str, &cbID) + return err == nil +} diff --git a/services/horizon/internal/actions/validators_test.go b/services/horizon/internal/actions/validators_test.go new file mode 100644 index 0000000000..1ddd6d763c --- /dev/null +++ b/services/horizon/internal/actions/validators_test.go @@ -0,0 +1,279 @@ +package actions + +import ( + "fmt" + "testing" + + "github.com/asaskevich/govalidator" + "github.com/stretchr/testify/assert" +) + +func TestAssetTypeValidator(t *testing.T) { + type Query struct { + AssetType string `valid:"assetType,optional"` + } + + for _, testCase := range []struct { + assetType string + valid bool + }{ + { + "native", + true, + }, + { + "credit_alphanum4", + true, + }, + { + "credit_alphanum12", + true, + }, + { + "", + true, + }, + { + "stellar_asset_type", + false, + }, + } { + t.Run(testCase.assetType, func(t *testing.T) { + tt := assert.New(t) + + q := Query{ + AssetType: testCase.assetType, + } + + result, err := govalidator.ValidateStruct(q) + if testCase.valid { + tt.NoError(err) + tt.True(result) + } else { + tt.Equal("AssetType: stellar_asset_type does not validate as assetType", err.Error()) + } + }) + } +} + +func TestAccountIDValidator(t *testing.T) { + type Query struct { + Account string `valid:"accountID,optional"` + } + + for _, testCase := range []struct { + name string + value string + expectedError string + }{ + { + "invalid stellar address", + "FON4WOTCFSASG3J6SGLLQZURDDUVNBQANAHEQJ3PBNDZ74X63UZWQPZW", + "Account: FON4WOTCFSASG3J6SGLLQZURDDUVNBQANAHEQJ3PBNDZ74X63UZWQPZW does not validate as accountID", + }, + { + "valid stellar address", + "GAN4WOTCFSASG3J6SGLLQZURDDUVNBQANAHEQJ3PBNDZ74X63UZWQPZW", + "", + }, + { + "empty stellar address should not be validated", + "", + "", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + tt := assert.New(t) + + q := Query{ + Account: testCase.value, + } + + result, err := govalidator.ValidateStruct(q) + if testCase.expectedError == "" { + tt.NoError(err) + tt.True(result) + } else { + tt.Equal(testCase.expectedError, err.Error()) + } + }) + } +} + +func TestAssetValidator(t *testing.T) { + type Query struct { + Asset string `valid:"asset"` + } + + for _, testCase := range []struct { + desc string + asset string + valid bool + }{ + { + "native", + "native", + true, + }, + { + "credit_alphanum4", + "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + true, + }, + { + "credit_alphanum12", + "SDFUSD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + true, + }, + { + "invalid credit_alphanum12", + "SDFUSDSDFUSDSDFUSD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + false, + }, + { + "invalid no issuer", + "FOO", + false, + }, + { + "invalid issuer", + "FOO:BAR", + false, + }, + { + "empty colon", + ":", + false, + }, + } { + t.Run(testCase.desc, func(t *testing.T) { + tt := assert.New(t) + + q := Query{ + Asset: testCase.asset, + } + + result, err := govalidator.ValidateStruct(q) + if testCase.valid { + tt.NoError(err) + tt.True(result) + } else { + tt.Error(err) + } + }) + } +} + +func TestAmountValidator(t *testing.T) { + type Query struct { + Amount string `valid:"amount"` + } + + for _, testCase := range []struct { + name string + value string + expectedError string + }{ + { + "valid", + "10", + "", + }, + { + "zero", + "0", + "Amount: 0 does not validate as amount", + }, + { + "negative", + "-1", + "Amount: -1 does not validate as amount", + }, + { + "non-number", + "one", + "Amount: one does not validate as amount", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + tt := assert.New(t) + + q := Query{ + Amount: testCase.value, + } + + result, err := govalidator.ValidateStruct(q) + if testCase.expectedError == "" { + tt.NoError(err) + tt.True(result) + } else { + tt.Equal(testCase.expectedError, err.Error()) + } + }) + } +} + +func TestTransactionHashValidator(t *testing.T) { + type Query struct { + TransactionHash string `valid:"transactionHash,optional"` + } + + for _, testCase := range []struct { + name string + value string + valid bool + }{ + { + "length 63", + "1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29", + false, + }, + { + "length 66", + "1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29222", + false, + }, + { + "uppercase hash", + "2374E99349B9EF7DBA9A5DB3339B78FDA8F34777B1AF33BA468AD5C0DF946D4D", + false, + }, + { + "badly formated tx hash", + "%00%1E4%5E%EF%BF%BD%EF%BF%BD%EF%BF%BDpVP%EF%BF%BDI&R%0BK%EF%BF%BD%1D%EF%BF%BD%EF%BF%BD=%EF%BF%BD%3F%23%EF%BF%BD%EF%BF%BDl%EF%BF%BD%1El%EF%BF%BD%EF%BF%BD", + false, + }, + { + "valid tx hash", + "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", + true, + }, + { + "empty transaction hash should not be validated", + "", + true, + }, + { + "0x prefixed hash", + "0x2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", + false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + tt := assert.New(t) + + q := Query{ + TransactionHash: testCase.value, + } + + result, err := govalidator.ValidateStruct(q) + if testCase.valid { + tt.NoError(err) + tt.True(result) + } else { + expected := fmt.Sprintf("TransactionHash: %s does not validate as transactionHash", testCase.value) + tt.Equal(expected, err.Error()) + } + }) + } +} diff --git a/services/horizon/internal/actions_account_test.go b/services/horizon/internal/actions_account_test.go new file mode 100644 index 0000000000..541300c3a6 --- /dev/null +++ b/services/horizon/internal/actions_account_test.go @@ -0,0 +1,33 @@ +package horizon + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/xdr" +) + +func TestAccountActions_InvalidID(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + // Makes StateMiddleware happy + q := history.Q{ht.HorizonSession()} + err := q.UpdateLastLedgerIngest(ht.Ctx, 100) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ht.Ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + _, err = q.InsertLedger(ht.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + }, + }, 0, 0, 0, 0, 0) + ht.Assert.NoError(err) + + // existing account + w := ht.Get( + "/accounts/=cr%FF%98%CB%F3%AF%E72%D85%FE%28%15y%8Fz%C4Ng%CE%98h%02%2A:%B6%FF%B9%CF%92%88O%91%10d&S%7C%9Bi%D4%CFI%28%CFo", + ) + ht.Assert.Equal(400, w.Code) +} diff --git a/services/horizon/internal/actions_data_test.go b/services/horizon/internal/actions_data_test.go new file mode 100644 index 0000000000..3de82a915d --- /dev/null +++ b/services/horizon/internal/actions_data_test.go @@ -0,0 +1,109 @@ +package horizon + +import ( + "encoding/base64" + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +var ( + data1 = history.Data{ + LastModifiedLedger: 100, + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Name: "name1", + // This also tests if base64 encoding is working as 0 is invalid UTF-8 byte + Value: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } + + data2 = history.Data{ + LastModifiedLedger: 100, + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Name: "name ", + Value: []byte("it got spaces!"), + } +) + +func TestDataActions_Show(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + test.ResetHorizonDB(t, ht.HorizonDB) + q := &history.Q{ht.HorizonSession()} + + // Makes StateMiddleware happy + err := q.UpdateLastLedgerIngest(ht.Ctx, 100) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ht.Ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + _, err = q.InsertLedger(ht.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + }, + }, 0, 0, 0, 0, 0) + ht.Assert.NoError(err) + + err = q.UpsertAccountData(ht.Ctx, []history.Data{data1, data2}) + assert.NoError(t, err) + + prefix := "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB" + result := map[string]string{} + + // json + w := ht.Get(prefix + "/data/name1") + if ht.Assert.Equal(200, w.Code) { + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Assert.NoError(err) + decoded, err := base64.StdEncoding.DecodeString(result["value"]) + ht.Assert.NoError(err) + ht.Assert.Equal([]byte(data1.Value), decoded) + ht.Assert.Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", result["sponsor"]) + } + + // raw + w = ht.Get(prefix+"/data/name1", test.RequestHelperRaw) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.Equal([]byte(data1.Value), w.Body.Bytes()) + } + + result = map[string]string{} + // regression: https://github.com/stellar/horizon/issues/325 + // names with special characters do not work + w = ht.Get(prefix + "/data/name%20") + if ht.Assert.Equal(200, w.Code) { + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Assert.NoError(err) + + decoded, err := base64.StdEncoding.DecodeString(result["value"]) + ht.Assert.NoError(err) + ht.Assert.Equal([]byte(data2.Value), decoded) + ht.Assert.Equal("", result["sponsor"]) + } + + w = ht.Get(prefix+"/data/name%20", test.RequestHelperRaw) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.Equal("it got spaces!", w.Body.String()) + } + + // missing + w = ht.Get(prefix + "/data/missing") + ht.Assert.Equal(404, w.Code) + + w = ht.Get(prefix+"/data/missing", test.RequestHelperRaw) + ht.Assert.Equal(404, w.Code) + + // Too long + w = ht.Get(prefix+"/data/01234567890123456789012345678901234567890123456789012345678901234567890123456789", test.RequestHelperRaw) + if ht.Assert.Equal(400, w.Code) { + ht.Assert.Contains(w.Body.String(), "does not validate as length(1|64)") + } + +} diff --git a/services/horizon/internal/actions_effects_test.go b/services/horizon/internal/actions_effects_test.go new file mode 100644 index 0000000000..b4b07185bf --- /dev/null +++ b/services/horizon/internal/actions_effects_test.go @@ -0,0 +1,152 @@ +package horizon + +import ( + "testing" + + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestEffectActions_Index(t *testing.T) { + + t.Run("omnibus test", func(t *testing.T) { + + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/effects?limit=20") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(11, w.Body) + } + + // test streaming, regression for https://github.com/stellar/go/services/horizon/internal/issues/147 + w = ht.Get("/effects?limit=2", test.RequestHelperStreaming) + ht.Assert.Equal(200, w.Code) + + // filtered by ledger + w = ht.Get("/ledgers/1/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + + w = ht.Get("/ledgers/2/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(9, w.Body) + } + + w = ht.Get("/ledgers/3/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + // Makes StateMiddleware happy + q := history.Q{ht.HorizonSession()} + err := q.UpdateLastLedgerIngest(ht.Ctx, 3) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ht.Ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + + // checks if empty param returns 404 instead of all payments + w = ht.Get("/accounts//effects") + ht.Assert.NotEqual(404, w.Code) + + // filtered by account + w = ht.Get("/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + w = ht.Get("/accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + // filtered by transaction + w = ht.Get("/transactions/2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + // missing tx + w = ht.Get("/transactions/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/effects") + ht.Assert.Equal(404, w.Code) + // uppercase tx hash not accepted + w = ht.Get("/transactions/2374E99349B9EF7DBA9A5DB3339B78FDA8F34777B1AF33BA468AD5C0DF946D4D/effects") + ht.Assert.Equal(400, w.Code) + // badly formated tx hash not accepted + w = ht.Get("/transactions/%00%1E4%5E%EF%BF%BD%EF%BF%BD%EF%BF%BDpVP%EF%BF%BDI&R%0BK%EF%BF%BD%1D%EF%BF%BD%EF%BF%BD=%EF%BF%BD%3F%23%EF%BF%BD%EF%BF%BDl%EF%BF%BD%1El%EF%BF%BD%EF%BF%BD/effects") + ht.Assert.Equal(400, w.Code) + + w = ht.Get("/transactions/2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + // filtered by operation + w = ht.Get("/operations/8589938689/effects") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + // Check extra params + w = ht.Get("/ledgers/100/effects?account_id=GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + ht.Assert.Equal(400, w.Code) + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/effects?ledger_id=5") + ht.Assert.Equal(400, w.Code) + + // before history + ht.ReapHistory(1) + w = ht.Get("/effects?order=desc&cursor=8589938689-1") + ht.Assert.Equal(410, w.Code) + ht.T.T.Log(w.Body.String()) + }) + + t.Run("Effect resource props", func(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // created_at + w := ht.Get("/ledgers/2/effects") + if ht.Assert.Equal(200, w.Code) { + var result []effects.Base + _ = ht.UnmarshalPage(w.Body, &result) + ht.Require.NotEmpty(result, "unexpected empty response") + + e1 := result[0] + + var ledger2 history.Ledger + err := ht.HorizonDB.Get(&ledger2, "SELECT * FROM history_ledgers WHERE sequence = 2") + ht.Require.NoError(err, "failed to load ledger") + + ht.Assert.Equal(ledger2.ClosedAt.UTC(), e1.LedgerCloseTime.UTC()) + } + }) +} + +func TestEffectsForFeeBumpTransaction(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + test.ResetHorizonDB(t, ht.HorizonDB) + q := &history.Q{ht.HorizonSession()} + fixture := history.FeeBumpScenario(ht.T, q, true) + + w := ht.Get("/transactions/" + fixture.OuterHash + "/effects") + ht.Assert.Equal(200, w.Code) + var byOuterHash []effects.Base + ht.UnmarshalPage(w.Body, &byOuterHash) + ht.Assert.Len(byOuterHash, 1) + + w = ht.Get("/transactions/" + fixture.InnerHash + "/effects") + ht.Assert.Equal(200, w.Code) + var byInnerHash []effects.Base + ht.UnmarshalPage(w.Body, &byInnerHash) + ht.Assert.Len(byInnerHash, 1) + + ht.Assert.Equal(byOuterHash, byInnerHash) +} diff --git a/services/horizon/internal/actions_ledger_test.go b/services/horizon/internal/actions_ledger_test.go new file mode 100644 index 0000000000..263cd95e37 --- /dev/null +++ b/services/horizon/internal/actions_ledger_test.go @@ -0,0 +1,75 @@ +package horizon + +import ( + "encoding/json" + "testing" + + "github.com/stellar/go/protocols/horizon" +) + +func TestLedgerActions_Index(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // default params + w := ht.Get("/ledgers") + + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + // with limit + w = ht.RH.Get("/ledgers?limit=1") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } +} + +func TestLedgerActions_Show(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/ledgers/2") + ht.Assert.Equal(200, w.Code) + + var result horizon.Ledger + err := json.Unmarshal(w.Body.Bytes(), &result) + if ht.Assert.NoError(err) { + ht.Assert.Equal(int32(2), result.Sequence) + ht.Assert.NotEmpty(result.HeaderXDR) + ht.Assert.Equal(int32(3), result.SuccessfulTransactionCount) + ht.Assert.Equal(int32(0), *result.FailedTransactionCount) + ht.Assert.Nil(result.TxSetOperationCount) + } + + // There's no way to test previous versions of ingestion right now + // so let's manually update the state to look like version 14 of ingesiton + // only the latest gap is considered for determining the elder ledger + _, err = ht.HorizonDB.Exec(` + UPDATE history_ledgers SET successful_transaction_count = NULL, failed_transaction_count = NULL, tx_set_operation_count = 5 WHERE sequence = 2 + `) + ht.Require.NoError(err, "failed to update history_ledgers") + + w = ht.Get("/ledgers/2") + ht.Assert.Equal(200, w.Code) + + result = horizon.Ledger{} + err = json.Unmarshal(w.Body.Bytes(), &result) + if ht.Assert.NoError(err) { + ht.Assert.Equal(int32(2), result.Sequence) + ht.Assert.NotEmpty(result.HeaderXDR) + ht.Assert.Equal(int32(3), result.SuccessfulTransactionCount) + ht.Assert.Nil(result.FailedTransactionCount) + ht.Assert.Equal(int32(5), *result.TxSetOperationCount) + } + + // ledger higher than history + w = ht.Get("/ledgers/100") + ht.Assert.Equal(404, w.Code) + + // ledger that was reaped + ht.ReapHistory(1) + + w = ht.Get("/ledgers/1") + ht.Assert.Equal(410, w.Code) +} diff --git a/services/horizon/internal/actions_operation_fee_stats_test.go b/services/horizon/internal/actions_operation_fee_stats_test.go new file mode 100644 index 0000000000..c1556dee71 --- /dev/null +++ b/services/horizon/internal/actions_operation_fee_stats_test.go @@ -0,0 +1,375 @@ +package horizon + +import ( + "encoding/json" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" +) + +func TestOperationFeeTestsActions_Show(t *testing.T) { + testCases := []struct { + scenario string + lastbasefee int64 + maxFeeMax int + maxFeeMin int + maxFeeMode int + maxFeeP10 int + maxFeeP20 int + maxFeeP30 int + maxFeeP40 int + maxFeeP50 int + maxFeeP60 int + maxFeeP70 int + maxFeeP80 int + maxFeeP90 int + maxFeeP95 int + maxFeeP99 int + feeChargedMax int64 + feeChargedMin int64 + feeChargedMode int64 + feeChargedP10 int64 + feeChargedP20 int64 + feeChargedP30 int64 + feeChargedP40 int64 + feeChargedP50 int64 + feeChargedP60 int64 + feeChargedP70 int64 + feeChargedP80 int64 + feeChargedP90 int64 + feeChargedP95 int64 + feeChargedP99 int64 + ledgerCapacityUsage float64 + }{ + // happy path + { + scenario: "operation_fee_stats_1", + lastbasefee: 100, + maxFeeMax: 100, + maxFeeMin: 100, + maxFeeMode: 100, + maxFeeP10: 100, + maxFeeP20: 100, + maxFeeP30: 100, + maxFeeP40: 100, + maxFeeP50: 100, + maxFeeP60: 100, + maxFeeP70: 100, + maxFeeP80: 100, + maxFeeP90: 100, + maxFeeP95: 100, + maxFeeP99: 100, + feeChargedMax: 100, + feeChargedMin: 100, + feeChargedMode: 100, + feeChargedP10: 100, + feeChargedP20: 100, + feeChargedP30: 100, + feeChargedP40: 100, + feeChargedP50: 100, + feeChargedP60: 100, + feeChargedP70: 100, + feeChargedP80: 100, + feeChargedP90: 100, + feeChargedP95: 100, + feeChargedP99: 100, + ledgerCapacityUsage: 0.04, + }, + // no transactions in last 5 ledgers + { + scenario: "operation_fee_stats_2", + ledgerCapacityUsage: 0.00, + lastbasefee: 100, + maxFeeMax: 100, + maxFeeMin: 100, + maxFeeMode: 100, + maxFeeP10: 100, + maxFeeP20: 100, + maxFeeP30: 100, + maxFeeP40: 100, + maxFeeP50: 100, + maxFeeP60: 100, + maxFeeP70: 100, + maxFeeP80: 100, + maxFeeP90: 100, + maxFeeP95: 100, + maxFeeP99: 100, + feeChargedMax: 100, + feeChargedMin: 100, + feeChargedMode: 100, + feeChargedP10: 100, + feeChargedP20: 100, + feeChargedP30: 100, + feeChargedP40: 100, + feeChargedP50: 100, + feeChargedP60: 100, + feeChargedP70: 100, + feeChargedP80: 100, + feeChargedP90: 100, + feeChargedP95: 100, + feeChargedP99: 100, + }, + // transactions with varying fees + { + scenario: "operation_fee_stats_3", + ledgerCapacityUsage: 0.03, + lastbasefee: 100, + maxFeeMax: 400, + maxFeeMin: 200, + maxFeeMode: 400, + maxFeeP10: 200, + maxFeeP20: 300, + maxFeeP30: 400, + maxFeeP40: 400, + maxFeeP50: 400, + maxFeeP60: 400, + maxFeeP70: 400, + maxFeeP80: 400, + maxFeeP90: 400, + maxFeeP95: 400, + maxFeeP99: 400, + feeChargedMax: 100, + feeChargedMin: 100, + feeChargedMode: 100, + feeChargedP10: 100, + feeChargedP20: 100, + feeChargedP30: 100, + feeChargedP40: 100, + feeChargedP50: 100, + feeChargedP60: 100, + feeChargedP70: 100, + feeChargedP80: 100, + feeChargedP90: 100, + feeChargedP95: 100, + feeChargedP99: 100, + }, + } + + for _, kase := range testCases { + t.Run("/fee_stats", func(t *testing.T) { + ht := StartHTTPTest(t, kase.scenario) + defer ht.Finish() + + // Update max_tx_set_size on ledgers + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_ledgers SET max_tx_set_size = 50") + ht.Require.NoError(err) + + ht.App.UpdateFeeStatsState(ht.Ctx) + + w := ht.Get("/fee_stats") + + if ht.Assert.Equal(200, w.Code) { + var result hProtocol.FeeStats + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err) + ht.Assert.Equal(kase.lastbasefee, result.LastLedgerBaseFee, "base_fee") + ht.Assert.Equal(kase.ledgerCapacityUsage, result.LedgerCapacityUsage, "ledger_capacity_usage") + + ht.Assert.Equal(int64(kase.maxFeeMin), result.MaxFee.Min, "min") + ht.Assert.Equal(int64(kase.maxFeeMode), result.MaxFee.Mode, "mode") + ht.Assert.Equal(int64(kase.maxFeeP10), result.MaxFee.P10, "p10") + ht.Assert.Equal(int64(kase.maxFeeP20), result.MaxFee.P20, "p20") + ht.Assert.Equal(int64(kase.maxFeeP30), result.MaxFee.P30, "p30") + ht.Assert.Equal(int64(kase.maxFeeP40), result.MaxFee.P40, "p40") + ht.Assert.Equal(int64(kase.maxFeeP50), result.MaxFee.P50, "p50") + ht.Assert.Equal(int64(kase.maxFeeP60), result.MaxFee.P60, "p60") + ht.Assert.Equal(int64(kase.maxFeeP70), result.MaxFee.P70, "p70") + ht.Assert.Equal(int64(kase.maxFeeP80), result.MaxFee.P80, "p80") + ht.Assert.Equal(int64(kase.maxFeeP90), result.MaxFee.P90, "p90") + ht.Assert.Equal(int64(kase.maxFeeP95), result.MaxFee.P95, "p95") + ht.Assert.Equal(int64(kase.maxFeeP99), result.MaxFee.P99, "p99") + + ht.Assert.Equal(kase.feeChargedMax, result.FeeCharged.Max, "fee_charged_max") + ht.Assert.Equal(kase.feeChargedMin, result.FeeCharged.Min, "fee_charged_min") + ht.Assert.Equal(kase.feeChargedMode, result.FeeCharged.Mode, "fee_charged_mode") + ht.Assert.Equal(kase.feeChargedP10, result.FeeCharged.P10, "fee_charged_p10") + ht.Assert.Equal(kase.feeChargedP20, result.FeeCharged.P20, "fee_charged_p20") + ht.Assert.Equal(kase.feeChargedP30, result.FeeCharged.P30, "fee_charged_p30") + ht.Assert.Equal(kase.feeChargedP40, result.FeeCharged.P40, "fee_charged_p40") + ht.Assert.Equal(kase.feeChargedP50, result.FeeCharged.P50, "fee_charged_p50") + ht.Assert.Equal(kase.feeChargedP60, result.FeeCharged.P60, "fee_charged_p60") + ht.Assert.Equal(kase.feeChargedP70, result.FeeCharged.P70, "fee_charged_p70") + ht.Assert.Equal(kase.feeChargedP80, result.FeeCharged.P80, "fee_charged_p80") + ht.Assert.Equal(kase.feeChargedP90, result.FeeCharged.P90, "fee_charged_p90") + ht.Assert.Equal(kase.feeChargedP95, result.FeeCharged.P95, "fee_charged_p95") + ht.Assert.Equal(kase.feeChargedP99, result.FeeCharged.P99, "fee_charged_p99") + } + }) + } +} + +// TestOperationFeeTestsActions_ShowMultiOp tests fee stats in case transactions contain multiple operations. +// In such case, since protocol v11, we should use number of operations as the indicator of ledger capacity usage. +func TestOperationFeeTestsActions_ShowMultiOp(t *testing.T) { + ht := StartHTTPTest(t, "operation_fee_stats_3") + defer ht.Finish() + + // Update max_tx_set_size on ledgers + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_ledgers SET max_tx_set_size = 50") + ht.Require.NoError(err) + + // Update number of ops on each transaction + _, err = ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_transactions SET operation_count = operation_count * 2") + ht.Require.NoError(err) + + ht.App.UpdateFeeStatsState(ht.Ctx) + + w := ht.Get("/fee_stats") + + if ht.Assert.Equal(200, w.Code) { + var result hProtocol.FeeStats + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err) + ht.Assert.Equal(int64(100), result.LastLedgerBaseFee, "base_fee") + ht.Assert.Equal(0.06, result.LedgerCapacityUsage, "ledger_capacity_usage") + + ht.Assert.Equal(int64(200), result.MaxFee.Max, "max_fee_max") + ht.Assert.Equal(int64(100), result.MaxFee.Min, "max_fee_min") + ht.Assert.Equal(int64(200), result.MaxFee.Mode, "max_fee_mode") + ht.Assert.Equal(int64(100), result.MaxFee.P10, "max_fee_p10") + ht.Assert.Equal(int64(150), result.MaxFee.P20, "max_fee_p20") + ht.Assert.Equal(int64(200), result.MaxFee.P30, "max_fee_p30") + ht.Assert.Equal(int64(200), result.MaxFee.P40, "max_fee_p40") + ht.Assert.Equal(int64(200), result.MaxFee.P50, "max_fee_p50") + ht.Assert.Equal(int64(200), result.MaxFee.P60, "max_fee_p60") + ht.Assert.Equal(int64(200), result.MaxFee.P70, "max_fee_p70") + ht.Assert.Equal(int64(200), result.MaxFee.P80, "max_fee_p80") + ht.Assert.Equal(int64(200), result.MaxFee.P90, "max_fee_p90") + ht.Assert.Equal(int64(200), result.MaxFee.P95, "max_fee_p95") + ht.Assert.Equal(int64(200), result.MaxFee.P99, "max_fee_p99") + + ht.Assert.Equal(int64(50), result.FeeCharged.Max, "fee_charged_max") + ht.Assert.Equal(int64(50), result.FeeCharged.Min, "fee_charged_min") + ht.Assert.Equal(int64(50), result.FeeCharged.Mode, "fee_charged_mode") + ht.Assert.Equal(int64(50), result.FeeCharged.P10, "fee_charged_p10") + ht.Assert.Equal(int64(50), result.FeeCharged.P20, "fee_charged_p20") + ht.Assert.Equal(int64(50), result.FeeCharged.P30, "fee_charged_p30") + ht.Assert.Equal(int64(50), result.FeeCharged.P40, "fee_charged_p40") + ht.Assert.Equal(int64(50), result.FeeCharged.P50, "fee_charged_p50") + ht.Assert.Equal(int64(50), result.FeeCharged.P60, "fee_charged_p60") + ht.Assert.Equal(int64(50), result.FeeCharged.P70, "fee_charged_p70") + ht.Assert.Equal(int64(50), result.FeeCharged.P80, "fee_charged_p80") + ht.Assert.Equal(int64(50), result.FeeCharged.P90, "fee_charged_p90") + ht.Assert.Equal(int64(50), result.FeeCharged.P95, "fee_charged_p95") + ht.Assert.Equal(int64(50), result.FeeCharged.P99, "fee_charged_p99") + } +} + +func TestEmptyFeeStats(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + w := ht.Get("/fee_stats") + ht.Assert.Equal(200, w.Code) + var result hProtocol.FeeStats + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err) + ht.Assert.Equal(result, hProtocol.FeeStats{}) +} + +func TestOperationFeeTestsActions_NotInterpolating(t *testing.T) { + ht := StartHTTPTest(t, "operation_fee_stats_3") + defer ht.Finish() + + // Update max_tx_set_size on ledgers + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_ledgers SET max_tx_set_size = 50") + ht.Require.NoError(err) + + // Update one tx to a huge fee + _, err = ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_transactions SET max_fee = 256000, operation_count = 16 WHERE transaction_hash = '6a349e7331e93a251367287e274fb1699abaf723bde37aebe96248c76fd3071a'") + ht.Require.NoError(err) + + ht.App.UpdateFeeStatsState(ht.Ctx) + + w := ht.Get("/fee_stats") + + if ht.Assert.Equal(200, w.Code) { + var result hProtocol.FeeStats + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err) + ht.Assert.Equal(int64(100), result.LastLedgerBaseFee, "base_fee") + ht.Assert.Equal(0.09, result.LedgerCapacityUsage, "ledger_capacity_usage") + + ht.Assert.Equal(int64(16000), result.MaxFee.Max, "max_fee_max") + ht.Assert.Equal(int64(200), result.MaxFee.Min, "max_fee_min") + ht.Assert.Equal(int64(400), result.MaxFee.Mode, "max_fee_mode") + ht.Assert.Equal(int64(200), result.MaxFee.P10, "max_fee_p10") + ht.Assert.Equal(int64(300), result.MaxFee.P20, "max_fee_p20") + ht.Assert.Equal(int64(400), result.MaxFee.P30, "max_fee_p30") + ht.Assert.Equal(int64(400), result.MaxFee.P40, "max_fee_p40") + ht.Assert.Equal(int64(400), result.MaxFee.P50, "max_fee_p50") + ht.Assert.Equal(int64(400), result.MaxFee.P60, "max_fee_p60") + ht.Assert.Equal(int64(400), result.MaxFee.P70, "max_fee_p70") + ht.Assert.Equal(int64(400), result.MaxFee.P80, "max_fee_p80") + ht.Assert.Equal(int64(16000), result.MaxFee.P90, "max_fee_p90") + ht.Assert.Equal(int64(16000), result.MaxFee.P95, "max_fee_p95") + ht.Assert.Equal(int64(16000), result.MaxFee.P99, "max_fee_p99") + + ht.Assert.Equal(int64(100), result.FeeCharged.Max, "fee_charged_max") + ht.Assert.Equal(int64(6), result.FeeCharged.Min, "fee_charged_min") + ht.Assert.Equal(int64(100), result.FeeCharged.Mode, "fee_charged_mode") + ht.Assert.Equal(int64(6), result.FeeCharged.P10, "fee_charged_p10") + ht.Assert.Equal(int64(100), result.FeeCharged.P20, "fee_charged_p20") + ht.Assert.Equal(int64(100), result.FeeCharged.P30, "fee_charged_p30") + ht.Assert.Equal(int64(100), result.FeeCharged.P40, "fee_charged_p40") + ht.Assert.Equal(int64(100), result.FeeCharged.P50, "fee_charged_p50") + ht.Assert.Equal(int64(100), result.FeeCharged.P60, "fee_charged_p60") + ht.Assert.Equal(int64(100), result.FeeCharged.P70, "fee_charged_p70") + ht.Assert.Equal(int64(100), result.FeeCharged.P80, "fee_charged_p80") + ht.Assert.Equal(int64(100), result.FeeCharged.P90, "fee_charged_p90") + ht.Assert.Equal(int64(100), result.FeeCharged.P95, "fee_charged_p95") + ht.Assert.Equal(int64(100), result.FeeCharged.P99, "fee_charged_p99") + } +} + +func TestOperationFeeTestsActions_FeeBump(t *testing.T) { + ht := StartHTTPTest(t, "operation_fee_stats_3") + defer ht.Finish() + + // Update one tx to be a fee bump + result, err := ht.HorizonSession().ExecRaw(ht.Ctx, "UPDATE history_transactions SET max_fee = 10, new_max_fee = 1000, fee_charged = 200 WHERE transaction_hash = '6a349e7331e93a251367287e274fb1699abaf723bde37aebe96248c76fd3071a'") + ht.Require.NoError(err) + rowsAffected, err := result.RowsAffected() + ht.Require.NoError(err) + ht.Require.Equal(int64(1), rowsAffected) + + ht.App.UpdateFeeStatsState(ht.Ctx) + + w := ht.Get("/fee_stats") + + if ht.Assert.Equal(200, w.Code) { + var result hProtocol.FeeStats + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err) + ht.Assert.Equal(int64(100), result.LastLedgerBaseFee, "base_fee") + + ht.Assert.Equal(int64(500), result.MaxFee.Max, "max_fee_max") + ht.Assert.Equal(int64(200), result.MaxFee.Min, "max_fee_min") + ht.Assert.Equal(int64(400), result.MaxFee.Mode, "max_fee_mode") + ht.Assert.Equal(int64(200), result.MaxFee.P10, "max_fee_p10") + ht.Assert.Equal(int64(300), result.MaxFee.P20, "max_fee_p20") + ht.Assert.Equal(int64(400), result.MaxFee.P30, "max_fee_p30") + ht.Assert.Equal(int64(400), result.MaxFee.P40, "max_fee_p40") + ht.Assert.Equal(int64(400), result.MaxFee.P50, "max_fee_p50") + ht.Assert.Equal(int64(400), result.MaxFee.P60, "max_fee_p60") + ht.Assert.Equal(int64(400), result.MaxFee.P70, "max_fee_p70") + ht.Assert.Equal(int64(400), result.MaxFee.P80, "max_fee_p80") + ht.Assert.Equal(int64(500), result.MaxFee.P90, "max_fee_p90") + ht.Assert.Equal(int64(500), result.MaxFee.P95, "max_fee_p95") + ht.Assert.Equal(int64(500), result.MaxFee.P99, "max_fee_p99") + + ht.Assert.Equal(int64(100), result.FeeCharged.Max, "fee_charged_max") + ht.Assert.Equal(int64(100), result.FeeCharged.Min, "fee_charged_min") + ht.Assert.Equal(int64(100), result.FeeCharged.Mode, "fee_charged_mode") + ht.Assert.Equal(int64(100), result.FeeCharged.P10, "fee_charged_p10") + ht.Assert.Equal(int64(100), result.FeeCharged.P20, "fee_charged_p20") + ht.Assert.Equal(int64(100), result.FeeCharged.P30, "fee_charged_p30") + ht.Assert.Equal(int64(100), result.FeeCharged.P40, "fee_charged_p40") + ht.Assert.Equal(int64(100), result.FeeCharged.P50, "fee_charged_p50") + ht.Assert.Equal(int64(100), result.FeeCharged.P60, "fee_charged_p60") + ht.Assert.Equal(int64(100), result.FeeCharged.P70, "fee_charged_p70") + ht.Assert.Equal(int64(100), result.FeeCharged.P80, "fee_charged_p80") + ht.Assert.Equal(int64(100), result.FeeCharged.P90, "fee_charged_p90") + ht.Assert.Equal(int64(100), result.FeeCharged.P95, "fee_charged_p95") + ht.Assert.Equal(int64(100), result.FeeCharged.P99, "fee_charged_p99") + } +} diff --git a/services/horizon/internal/actions_operation_test.go b/services/horizon/internal/actions_operation_test.go new file mode 100644 index 0000000000..8398667862 --- /dev/null +++ b/services/horizon/internal/actions_operation_test.go @@ -0,0 +1,368 @@ +package horizon + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestOperationActions_Index(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // no filter + w := ht.Get("/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(4, w.Body) + } + + // filtered by ledger sequence + w = ht.Get("/ledgers/1/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + + w = ht.Get("/ledgers/2/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + w = ht.Get("/ledgers/3/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // filtered by account + w = ht.Get("/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + w = ht.Get("/accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + // filtered by claimable balance + w = ht.Get("/claimable_balances/00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + // filtered by transaction + w = ht.Get("/transactions/2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + w = ht.Get("/transactions/164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // 400 for invalid tx hash + w = ht.Get("/transactions/ /operations") + ht.Assert.Equal(400, w.Code) + + w = ht.Get("/transactions/invalid/operations") + ht.Assert.Equal(400, w.Code) + + w = ht.Get("/transactions/1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29/operations") + ht.Assert.Equal(400, w.Code) + + w = ht.Get("/transactions/1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29222/operations") + ht.Assert.Equal(400, w.Code) + + // filtered by ledger + w = ht.Get("/ledgers/3/operations") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // missing ledger + w = ht.Get("/ledgers/100/operations") + ht.Assert.Equal(404, w.Code) +} + +func TestOperationActions_Show_Failed(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + // Should show successful transactions only + w := ht.Get("/operations?limit=200") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, op := range records { + if op.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(8, successful) + ht.Assert.Equal(0, failed) + } + + // Should show all transactions: both successful and failed + w = ht.Get("/operations?limit=200&include_failed=true") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, op := range records { + if op.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(8, successful) + ht.Assert.Equal(1, failed) + } + + w = ht.Get("/transactions/aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf/operations") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.False(op.TransactionSuccessful) + ht.Assert.Equal("aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf", op.TransactionHash) + } + } + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1/operations") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.True(op.TransactionSuccessful) + } + } + + // NULL value + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, + `UPDATE history_transactions SET successful = NULL WHERE transaction_hash = ?`, + "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + ) + ht.Require.NoError(err) + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1/operations") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.True(op.TransactionSuccessful) + } + } +} + +func TestOperationActions_IncludeTransactions(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + w := ht.Get("/operations?account_id=GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + ht.Assert.Equal(200, w.Code) + withoutTransactions := []operations.Base{} + ht.UnmarshalPage(w.Body, &withoutTransactions) + + w = ht.Get("/operations?account_id=GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2&join=transactions") + ht.Assert.Equal(200, w.Code) + withTransactions := []operations.Base{} + ht.UnmarshalPage(w.Body, &withTransactions) + + for i, operation := range withTransactions { + getTransaction := ht.Get("/transactions/" + operation.Transaction.ID) + ht.Assert.Equal(200, getTransaction.Code) + var getTransactionResponse horizon.Transaction + err := json.Unmarshal(getTransaction.Body.Bytes(), &getTransactionResponse) + + ht.Require.NoError(err, "failed to parse body") + tx := operation.Transaction + ht.Assert.Equal(*tx, getTransactionResponse) + + withTransactions[i].Transaction = nil + } + + ht.Assert.Equal(withoutTransactions, withTransactions) +} + +func TestOperationActions_Show(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // exists + w := ht.Get("/operations/8589938689") + if ht.Assert.Equal(200, w.Code) { + var result operations.Base + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err, "failed to parse body") + ht.Assert.Equal("8589938689", result.PT) + ht.Assert.Equal("2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", result.TransactionHash) + } + + // doesn't exist + w = ht.Get("/operations/9589938689") + ht.Assert.Equal(404, w.Code) + + // before history + ht.ReapHistory(1) + w = ht.Get("/operations/8589938689") + ht.Assert.Equal(410, w.Code) +} + +func TestOperationActions_StreamRegression(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // ensure that trying to stream ops from an account that doesn't exist + // fails before streaming the hello message. Regression test for #285 + w := ht.Get("/accounts/GAS2FZOQRFVHIDY35TUSBWFGCROPLWPZVFRN5JZEOUUVRGDRZGHPBLYZ/operations?limit=1", test.RequestHelperStreaming) + if ht.Assert.Equal(404, w.Code) { + ht.Assert.ProblemType(w.Body, "not_found") + } +} + +func TestOperation_CreatedAt(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/ledgers/3/operations") + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + l := history.Ledger{} + hq := history.Q{SessionInterface: ht.HorizonSession()} + ht.Require.NoError(hq.LedgerBySequence(ht.Ctx, &l, 3)) + + ht.Assert.WithinDuration(l.ClosedAt, records[0].LedgerCloseTime, 1*time.Second) +} + +func TestOperation_BumpSequence(t *testing.T) { + ht := StartHTTPTest(t, "kahuna") + defer ht.Finish() + + w := ht.Get("/operations/261993009153") + if ht.Assert.Equal(200, w.Code) { + var result operations.BumpSequence + err := json.Unmarshal(w.Body.Bytes(), &result) + ht.Require.NoError(err, "failed to parse body") + ht.Assert.Equal("bump_sequence", result.Type) + ht.Assert.Equal("300000000003", result.BumpTo) + } +} + +func TestOperationEffect_BumpSequence(t *testing.T) { + ht := StartHTTPTest(t, "kahuna") + defer ht.Finish() + + w := ht.Get("/operations/249108107265/effects") + if ht.Assert.Equal(200, w.Code) { + var result []effects.SequenceBumped + ht.UnmarshalPage(w.Body, &result) + ht.Assert.Equal(int64(300000000000), result[0].NewSeq) + + data, err := json.Marshal(&result[0]) + ht.Assert.NoError(err) + effect := struct { + NewSeq string `json:"new_seq"` + }{} + + json.Unmarshal(data, &effect) + ht.Assert.Equal("300000000000", effect.NewSeq) + } +} +func TestOperationEffect_Trade(t *testing.T) { + ht := StartHTTPTest(t, "kahuna") + defer ht.Finish() + + w := ht.Get("/operations/103079219201/effects") + if ht.Assert.Equal(200, w.Code) { + var result []effects.Trade + ht.UnmarshalPage(w.Body, &result) + ht.Assert.Equal(int64(3), result[0].OfferID) + + data, err := json.Marshal(&result[0]) + ht.Assert.NoError(err) + effect := struct { + OfferID string `json:"offer_id"` + }{} + + json.Unmarshal(data, &effect) + ht.Assert.Equal("3", effect.OfferID) + } +} + +func TestOperation_IncludeTransaction(t *testing.T) { + ht := StartHTTPTest(t, "kahuna") + defer ht.Finish() + + withoutTransaction := ht.Get("/operations/261993009153") + ht.Assert.Equal(200, withoutTransaction.Code) + var responseWithoutTransaction operations.BumpSequence + err := json.Unmarshal(withoutTransaction.Body.Bytes(), &responseWithoutTransaction) + ht.Require.NoError(err, "failed to parse body") + ht.Assert.Nil(responseWithoutTransaction.Transaction) + + withTransaction := ht.Get("/operations/261993009153?join=transactions") + ht.Assert.Equal(200, withTransaction.Code) + var responseWithTransaction operations.BumpSequence + err = json.Unmarshal(withTransaction.Body.Bytes(), &responseWithTransaction) + ht.Require.NoError(err, "failed to parse body") + + transactionInOperationsResponse := *responseWithTransaction.Transaction + responseWithTransaction.Transaction = nil + ht.Assert.Equal(responseWithoutTransaction, responseWithTransaction) + + getTransaction := ht.Get("/transactions/" + transactionInOperationsResponse.ID) + ht.Assert.Equal(200, getTransaction.Code) + var getTransactionResponse horizon.Transaction + err = json.Unmarshal(getTransaction.Body.Bytes(), &getTransactionResponse) + ht.Require.NoError(err, "failed to parse body") + ht.Assert.Equal(transactionInOperationsResponse, getTransactionResponse) +} +func TestOperationActions_Show_Extra_TxID(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + w := ht.Get("/accounts/GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON/operations?limit=200&tx_id=aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf") + + ht.Assert.Equal(400, w.Code) + payload := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("filters", payload["invalid_field"]) + ht.Assert.Equal( + "Use a single filter for operations, you can only use one of tx_id, account_id or ledger_id", + payload["reason"], + ) +} diff --git a/services/horizon/internal/actions_path_test.go b/services/horizon/internal/actions_path_test.go new file mode 100644 index 0000000000..2cd8c8b61b --- /dev/null +++ b/services/horizon/internal/actions_path_test.go @@ -0,0 +1,675 @@ +package horizon + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/guregu/null" + + "github.com/go-chi/chi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/actions" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/httpx" + "github.com/stellar/go/services/horizon/internal/paths" + horizonProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/simplepath" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/xdr" +) + +func mockPathFindingClient( + tt *test.T, + finder paths.Finder, + maxAssetsParamLength int, + session *db.Session, +) test.RequestHelper { + router := chi.NewRouter() + findPaths := httpx.ObjectActionHandler{actions.FindPathsHandler{ + PathFinder: finder, + MaxAssetsParamLength: maxAssetsParamLength, + MaxPathLength: 3, + SetLastLedgerHeader: true, + }} + findFixedPaths := httpx.ObjectActionHandler{actions.FindFixedPathsHandler{ + PathFinder: finder, + MaxAssetsParamLength: maxAssetsParamLength, + MaxPathLength: 3, + SetLastLedgerHeader: true, + }} + + router.Use(func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + s := session.Clone() + s.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + defer s.Rollback() + + ctx := context.WithValue( + r.Context(), + &horizonContext.SessionContextKey, + s, + ) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) + + router.Group(func(r chi.Router) { + router.Method("GET", "/paths", findPaths) + router.Method("GET", "/paths/strict-receive", findPaths) + router.Method("GET", "/paths/strict-send", findFixedPaths) + }) + + return test.NewRequestHelper(router) +} + +func TestPathActionsStillIngesting(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + assertions := &test.Assertions{tt.Assert} + finder := paths.MockFinder{} + finder.On("Find", mock.Anything, mock.Anything, uint(3)). + Return([]paths.Path{}, uint32(0), simplepath.ErrEmptyInMemoryOrderBook).Times(2) + finder.On("FindFixedPaths", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]paths.Path{}, uint32(0), simplepath.ErrEmptyInMemoryOrderBook).Times(1) + + rh := mockPathFindingClient( + tt, + &finder, + 2, + tt.HorizonSession(), + ) + + var q = make(url.Values) + + q.Add( + "source_assets", + "native", + ) + q.Add( + "destination_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + q.Add("destination_asset_type", "credit_alphanum4") + q.Add("destination_asset_code", "EUR") + q.Add("destination_amount", "10") + + for _, uri := range []string{"/paths", "/paths/strict-receive"} { + w := rh.Get(uri + "?" + q.Encode()) + assertions.Equal(horizonProblem.StillIngesting.Status, w.Code) + assertions.Problem(w.Body, horizonProblem.StillIngesting) + assertions.Equal("", w.Header().Get(actions.LastLedgerHeaderName)) + } + + q = make(url.Values) + + q.Add("destination_assets", "native") + q.Add("source_asset_issuer", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN") + q.Add("source_asset_type", "credit_alphanum4") + q.Add("source_asset_code", "EUR") + q.Add("source_amount", "10") + + w := rh.Get("/paths/strict-send" + "?" + q.Encode()) + assertions.Equal(horizonProblem.StillIngesting.Status, w.Code) + assertions.Problem(w.Body, horizonProblem.StillIngesting) + assertions.Equal("", w.Header().Get(actions.LastLedgerHeaderName)) + + finder.AssertExpectations(t) +} + +func TestPathActionsStrictReceive(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + sourceAssets := []xdr.Asset{ + xdr.MustNewCreditAsset("AAA", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"), + xdr.MustNewCreditAsset("USD", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"), + xdr.MustNewNativeAsset(), + } + sourceAccount := "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP" + + q := &history.Q{tt.HorizonSession()} + + account := history.AccountEntry{ + LastModifiedLedger: 1234, + AccountID: sourceAccount, + Balance: 20000, + SequenceNumber: 223456789, + NumSubEntries: 10, + Flags: 1, + MasterWeight: 1, + ThresholdLow: 2, + ThresholdMedium: 3, + ThresholdHigh: 4, + BuyingLiabilities: 3, + SellingLiabilities: 4, + } + + err := q.UpsertAccounts(tt.Ctx, []history.AccountEntry{account}) + assert.NoError(t, err) + + assetsByKeys := map[string]xdr.Asset{} + + for _, asset := range sourceAssets { + code := asset.String() + assetsByKeys[code] = asset + if code == "native" { + continue + } + + var assetType, assetCode, assetIssuer string + asset.MustExtract(&assetType, &assetCode, &assetIssuer) + + var lk xdr.LedgerKey + var lkStr string + assert.NoError(t, lk.SetTrustline(xdr.MustAddress(sourceAccount), asset.ToTrustLineAsset())) + lkStr, err = lk.MarshalBinaryBase64() + assert.NoError(t, err) + + err = q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: sourceAccount, + AssetType: asset.Type, + AssetIssuer: assetIssuer, + AssetCode: assetCode, + Balance: 10000, + LedgerKey: lkStr, + Limit: 123456789, + LiquidityPoolID: "", + BuyingLiabilities: 1, + SellingLiabilities: 2, + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + }, + }) + assert.NoError(t, err) + } + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: sourceAccount, + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: 9876, + LedgerKey: "poolshareid1", + Limit: 123456789, + LiquidityPoolID: "lpid123", + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + }, + })) + + finder := paths.MockFinder{} + withSourceAssetsBalance := true + + finder.On("Find", mock.Anything, mock.Anything, uint(3)).Return([]paths.Path{}, uint32(1234), nil).Run(func(args mock.Arguments) { + query := args.Get(1).(paths.Query) + for _, asset := range query.SourceAssets { + var assetType, code, issuer string + + asset.MustExtract(&assetType, &code, &issuer) + if assetType == "native" { + tt.Assert.NotNil(assetsByKeys["native"]) + } else { + tt.Assert.NotNil(assetsByKeys[code]) + } + + } + tt.Assert.Equal(xdr.MustNewCreditAsset("EUR", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"), query.DestinationAsset) + tt.Assert.Equal(xdr.Int64(100000000), query.DestinationAmount) + + if withSourceAssetsBalance { + tt.Assert.Equal([]xdr.Int64{10000, 10000, 20000}, query.SourceAssetBalances) + tt.Assert.True(query.ValidateSourceBalance) + } else { + tt.Assert.Equal([]xdr.Int64{0, 0, 0}, query.SourceAssetBalances) + tt.Assert.False(query.ValidateSourceBalance) + } + + }).Times(4) + + rh := mockPathFindingClient( + tt, + &finder, + len(sourceAssets), + tt.HorizonSession(), + ) + + var withSourceAccount = make(url.Values) + withSourceAccount.Add( + "destination_account", + "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + ) + withSourceAccount.Add( + "source_account", + sourceAccount, + ) + withSourceAccount.Add( + "destination_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + withSourceAccount.Add("destination_asset_type", "credit_alphanum4") + withSourceAccount.Add("destination_asset_code", "EUR") + withSourceAccount.Add("destination_amount", "10") + + withSourceAssets, err := url.ParseQuery( + withSourceAccount.Encode(), + ) + tt.Assert.NoError(err) + withSourceAssets.Del("source_account") + withSourceAssets.Add("source_assets", assetsToURLParam(sourceAssets)) + + for _, uri := range []string{"/paths", "/paths/strict-receive"} { + w := rh.Get(uri + "?" + withSourceAccount.Encode()) + tt.Assert.Equal(http.StatusOK, w.Code) + tt.Assert.Equal("1234", w.Header().Get(actions.LastLedgerHeaderName)) + + withSourceAssetsBalance = false + w = rh.Get(uri + "?" + withSourceAssets.Encode()) + tt.Assert.Equal(http.StatusOK, w.Code) + tt.Assert.Equal("1234", w.Header().Get(actions.LastLedgerHeaderName)) + withSourceAssetsBalance = true + } + + finder.AssertExpectations(t) +} + +func TestPathActionsEmptySourceAcount(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + assertions := &test.Assertions{tt.Assert} + finder := paths.MockFinder{} + rh := mockPathFindingClient( + tt, + &finder, + 2, + tt.HorizonSession(), + ) + var q = make(url.Values) + + q.Add( + "destination_account", + "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + ) + q.Add( + "source_account", + // there is no account associated with this address + "GD5PM5X7Q5MM54ERO2P5PXW3HD6HVZI5IRZGEDWS4OPFBGHNTF6XOWQO", + ) + q.Add( + "destination_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + q.Add("destination_asset_type", "credit_alphanum4") + q.Add("destination_asset_code", "EUR") + q.Add("destination_amount", "10") + + for _, uri := range []string{"/paths", "/paths/strict-receive"} { + w := rh.Get(uri + "?" + q.Encode()) + assertions.Equal(http.StatusOK, w.Code) + inMemoryResponse := []horizon.Path{} + tt.UnmarshalPage(w.Body, &inMemoryResponse) + assertions.Empty(inMemoryResponse) + tt.Assert.Equal("", w.Header().Get(actions.LastLedgerHeaderName)) + } +} + +func TestPathActionsSourceAssetsValidation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + assertions := &test.Assertions{tt.Assert} + finder := paths.MockFinder{} + rh := mockPathFindingClient( + tt, + &finder, + 2, + tt.HorizonSession(), + ) + + missingSourceAccountAndAssets := make(url.Values) + missingSourceAccountAndAssets.Add( + "destination_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + missingSourceAccountAndAssets.Add("destination_asset_type", "credit_alphanum4") + missingSourceAccountAndAssets.Add("destination_asset_code", "USD") + missingSourceAccountAndAssets.Add("destination_amount", "10") + + sourceAccountAndAssets, err := url.ParseQuery( + missingSourceAccountAndAssets.Encode(), + ) + tt.Assert.NoError(err) + sourceAccountAndAssets.Add( + "source_assets", + "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + sourceAccountAndAssets.Add( + "source_account", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + + tooManySourceAssets, err := url.ParseQuery( + missingSourceAccountAndAssets.Encode(), + ) + tt.Assert.NoError(err) + tooManySourceAssets.Add( + "source_assets", + "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "GBP:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "USD:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "SEK:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + + for _, testCase := range []struct { + name string + q url.Values + expectedProblem problem.P + }{ + { + "both destination asset and destination account are missing", + missingSourceAccountAndAssets, + actions.SourceAssetsOrSourceAccountProblem, + }, + { + "both destination asset and destination account are present", + sourceAccountAndAssets, + actions.SourceAssetsOrSourceAccountProblem, + }, + { + "too many assets in destination_assets", + tooManySourceAssets, + *problem.MakeInvalidFieldProblem( + "source_assets", + fmt.Errorf("list of assets exceeds maximum length of 3"), + ), + }, + } { + t.Run(testCase.name, func(t *testing.T) { + w := rh.Get("/paths/strict-receive?" + testCase.q.Encode()) + assertions.Equal(testCase.expectedProblem.Status, w.Code) + assertions.Problem(w.Body, testCase.expectedProblem) + assertions.Equal("", w.Header().Get(actions.LastLedgerHeaderName)) + }) + } +} + +func TestPathActionsDestinationAssetsValidation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + assertions := &test.Assertions{tt.Assert} + finder := paths.MockFinder{} + rh := mockPathFindingClient( + tt, + &finder, + 2, + tt.HorizonSession(), + ) + missingDestinationAccountAndAssets := make(url.Values) + missingDestinationAccountAndAssets.Add( + "source_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + missingDestinationAccountAndAssets.Add( + "source_account", + "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP", + ) + missingDestinationAccountAndAssets.Add("source_asset_type", "credit_alphanum4") + missingDestinationAccountAndAssets.Add("source_asset_code", "USD") + missingDestinationAccountAndAssets.Add("source_amount", "10") + + destinationAccountAndAssets, err := url.ParseQuery( + missingDestinationAccountAndAssets.Encode(), + ) + tt.Assert.NoError(err) + destinationAccountAndAssets.Add( + "destination_assets", + "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + destinationAccountAndAssets.Add( + "destination_account", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + + tooManyDestinationAssets, err := url.ParseQuery( + missingDestinationAccountAndAssets.Encode(), + ) + tt.Assert.NoError(err) + tooManyDestinationAssets.Add( + "destination_assets", + "EUR:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "GBP:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "USD:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN,"+ + "SEK:GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + + for _, testCase := range []struct { + name string + q url.Values + expectedProblem problem.P + }{ + { + "both destination asset and destination account are missing", + missingDestinationAccountAndAssets, + actions.DestinationAssetsOrDestinationAccountProblem, + }, + { + "both destination asset and destination account are present", + destinationAccountAndAssets, + actions.DestinationAssetsOrDestinationAccountProblem, + }, + { + "too many assets in destination_assets", + tooManyDestinationAssets, + *problem.MakeInvalidFieldProblem( + "destination_assets", + fmt.Errorf("list of assets exceeds maximum length of 3"), + ), + }, + } { + t.Run(testCase.name, func(t *testing.T) { + w := rh.Get("/paths/strict-send?" + testCase.q.Encode()) + assertions.Equal(testCase.expectedProblem.Status, w.Code) + assertions.Problem(w.Body, testCase.expectedProblem) + assertions.Equal("", w.Header().Get(actions.LastLedgerHeaderName)) + }) + } +} + +func TestPathActionsStrictSend(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + assertions := &test.Assertions{tt.Assert} + historyQ := &history.Q{tt.HorizonSession()} + destinationAccount := "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP" + destinationAssets := []xdr.Asset{ + xdr.MustNewCreditAsset("AAA", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"), + xdr.MustNewCreditAsset("USD", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN"), + xdr.MustNewNativeAsset(), + } + + account := history.AccountEntry{ + LastModifiedLedger: 1234, + AccountID: destinationAccount, + Balance: 20000, + SequenceNumber: 223456789, + NumSubEntries: 10, + Flags: 1, + MasterWeight: 1, + ThresholdLow: 2, + ThresholdMedium: 3, + ThresholdHigh: 4, + BuyingLiabilities: 3, + SellingLiabilities: 4, + } + + err := historyQ.UpsertAccounts(tt.Ctx, []history.AccountEntry{account}) + assert.NoError(t, err) + + assetsByKeys := map[string]xdr.Asset{} + + for _, asset := range destinationAssets { + code := asset.String() + assetsByKeys[code] = asset + if code == "native" { + continue + } + + var assetType, assetCode, assetIssuer string + asset.MustExtract(&assetType, &assetCode, &assetIssuer) + + var lk xdr.LedgerKey + var lkStr string + assert.NoError(t, lk.SetTrustline(xdr.MustAddress(destinationAccount), asset.ToTrustLineAsset())) + lkStr, err = lk.MarshalBinaryBase64() + assert.NoError(t, err) + + err = historyQ.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: destinationAccount, + AssetType: asset.Type, + AssetIssuer: assetIssuer, + AssetCode: assetCode, + Balance: 10000, + LedgerKey: lkStr, + Limit: 123456789, + LiquidityPoolID: "", + BuyingLiabilities: 1, + SellingLiabilities: 2, + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + }, + }) + assert.NoError(t, err) + } + tt.Assert.NoError(historyQ.UpsertTrustLines(tt.Ctx, []history.TrustLine{ + { + AccountID: destinationAccount, + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: 9876, + LedgerKey: "poolshareid1", + Limit: 123456789, + LiquidityPoolID: "lpid123", + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + }, + })) + + finder := paths.MockFinder{} + // withSourceAssetsBalance := true + sourceAsset := xdr.MustNewCreditAsset("USD", "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN") + + finder.On("FindFixedPaths", mock.Anything, sourceAsset, xdr.Int64(100000000), mock.Anything, uint(3)).Return([]paths.Path{}, uint32(1234), nil).Run(func(args mock.Arguments) { + destinationAssets := args.Get(3).([]xdr.Asset) + for _, asset := range destinationAssets { + var assetType, code, issuer string + + asset.MustExtract(&assetType, &code, &issuer) + if assetType == "native" { + tt.Assert.NotNil(assetsByKeys["native"]) + } else { + tt.Assert.NotNil(assetsByKeys[code]) + } + + } + }).Times(2) + + rh := mockPathFindingClient( + tt, + &finder, + len(destinationAssets), + tt.HorizonSession(), + ) + + var q = make(url.Values) + + q.Add( + "source_asset_issuer", + "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + ) + q.Add("source_asset_type", "credit_alphanum4") + q.Add("source_asset_code", "USD") + q.Add("source_amount", "10") + q.Add( + "destination_account", + destinationAccount, + ) + + w := rh.Get("/paths/strict-send?" + q.Encode()) + assertions.Equal(http.StatusOK, w.Code) + assertions.Equal("1234", w.Header().Get(actions.LastLedgerHeaderName)) + + q.Del("destination_account") + q.Add("destination_assets", assetsToURLParam(destinationAssets)) + w = rh.Get("/paths/strict-send?" + q.Encode()) + assertions.Equal(http.StatusOK, w.Code) + assertions.Equal("1234", w.Header().Get(actions.LastLedgerHeaderName)) + + finder.AssertExpectations(t) +} + +func assetsToURLParam(xdrAssets []xdr.Asset) string { + var assets []string + for _, xdrAsset := range xdrAssets { + var assetType, code, issuer string + xdrAsset.MustExtract(&assetType, &code, &issuer) + if assetType == "native" { + assets = append(assets, "native") + } else { + assets = append(assets, fmt.Sprintf("%s:%s", code, issuer)) + } + } + + return strings.Join(assets, ",") +} + +func TestFindFixedPathsQueryQueryURLTemplate(t *testing.T) { + tt := assert.New(t) + params := []string{ + "destination_account", + "destination_assets", + "source_asset_type", + "source_asset_issuer", + "source_asset_code", + "source_amount", + } + expected := "/paths/strict-send{?" + strings.Join(params, ",") + "}" + qp := actions.FindFixedPathsQuery{} + tt.Equal(expected, qp.URITemplate()) +} + +func TestStrictReceivePathsQueryURLTemplate(t *testing.T) { + tt := assert.New(t) + params := []string{ + "source_assets", + "source_account", + "destination_account", + "destination_asset_type", + "destination_asset_issuer", + "destination_asset_code", + "destination_amount", + } + expected := "/paths/strict-receive{?" + strings.Join(params, ",") + "}" + qp := actions.StrictReceivePathsQuery{} + tt.Equal(expected, qp.URITemplate()) +} diff --git a/services/horizon/internal/actions_payment_test.go b/services/horizon/internal/actions_payment_test.go new file mode 100644 index 0000000000..446fc02d67 --- /dev/null +++ b/services/horizon/internal/actions_payment_test.go @@ -0,0 +1,321 @@ +package horizon + +import ( + "encoding/json" + "testing" + "time" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" +) + +// Moved to TestGetOperationsOnlyPayments +func TestPaymentActions(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(4, w.Body) + } + + // filtered by ledger + w = ht.Get("/ledgers/1/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + + w = ht.Get("/ledgers/3/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // Makes StateMiddleware happy + initializeStateMiddleware := func() { + q := history.Q{ht.HorizonSession()} + err := q.UpdateLastLedgerIngest(ht.Ctx, 3) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ht.Ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + } + initializeStateMiddleware() + + // checks if empty param returns 404 instead of all payments + w = ht.Get("/accounts//payments") + ht.Assert.NotEqual(404, w.Code) + + // filtered by account + w = ht.Get("/accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // switch scenarios + ht.T.Scenario("pathed_payment") + + // filtered by transaction + w = ht.Get("/transactions/b52f16ffb98c047e33b9c2ec30880330cde71f85b3443dae2c5cb86c7d4d8452/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + // =========================================== + // The following scenarios are handled in the action test + // missing tx + w = ht.Get("/transactions/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff/payments") + ht.Assert.Equal(404, w.Code) + // uppercase tx hash not accepted + w = ht.Get("/transactions/2374E99349B9EF7DBA9A5DB3339B78FDA8F34777B1AF33BA468AD5C0DF946D4D/payments") + ht.Assert.Equal(400, w.Code) + // badly formated tx hash not accepted + w = ht.Get("/transactions/%00%1E4%5E%EF%BF%BD%EF%BF%BD%EF%BF%BDpVP%EF%BF%BDI&R%0BK%EF%BF%BD%1D%EF%BF%BD%EF%BF%BD=%EF%BF%BD%3F%23%EF%BF%BD%EF%BF%BDl%EF%BF%BD%1El%EF%BF%BD%EF%BF%BD/payments") + ht.Assert.Equal(400, w.Code) + // =========================================== + + // TODO: test at the routing level + // 400 for invalid tx hash + w = ht.Get("/transactions/ /payments") + ht.Assert.Equal(400, w.Code) + + // this is handled in operations test, invalid will not match as a valid tx_id. + w = ht.Get("/transactions/invalid/payments") + ht.Assert.Equal(400, w.Code) + + // This is already handled in operations test + w = ht.Get("/transactions/1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29/payments") + ht.Assert.Equal(400, w.Code) + + // This is already handled in operations test + w = ht.Get("/transactions/1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc29222/payments") + ht.Assert.Equal(400, w.Code) + + w = ht.Get("/transactions/1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292/payments") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + + // test for existence of source_amount in path payment details + var records []map[string]interface{} + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal("10.0000000", records[0]["source_amount"]) + } + + initializeStateMiddleware() + + // This is tested in PageQueryTest + // Regression: negative cursor + w = ht.Get("/accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2/payments?cursor=-23667108046966785&order=asc&limit=100") + ht.Assert.Equal(400, w.Code) +} + +func TestPaymentActions_Includetransactions(t *testing.T) { + ht := StartHTTPTest(t, "base") + + defer ht.Finish() + + w := ht.Get("/payments") + ht.Assert.Equal(200, w.Code) + withoutTransactions := []operations.Base{} + ht.UnmarshalPage(w.Body, &withoutTransactions) + + w = ht.Get("/payments?join=transactions") + ht.Assert.Equal(200, w.Code) + withTransactions := []operations.Base{} + ht.UnmarshalPage(w.Body, &withTransactions) + + for i, operation := range withTransactions { + getTransaction := ht.Get("/transactions/" + operation.Transaction.ID) + ht.Assert.Equal(200, getTransaction.Code) + var getTransactionResponse horizon.Transaction + err := json.Unmarshal(getTransaction.Body.Bytes(), &getTransactionResponse) + + ht.Require.NoError(err, "failed to parse body") + tx := operation.Transaction + ht.Assert.Equal(*tx, getTransactionResponse) + + withTransactions[i].Transaction = nil + } + + ht.Assert.Equal(withoutTransactions, withTransactions) +} + +func TestPaymentActions_Show_Failed(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + // Should show successful transactions only + w := ht.Get("/payments?limit=200") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, op := range records { + if op.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(5, successful) + ht.Assert.Equal(0, failed) + } + + // Should show all transactions: both successful and failed + w = ht.Get("/payments?limit=200&include_failed=true") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, op := range records { + if op.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(5, successful) + ht.Assert.Equal(1, failed) + } + + w = ht.Get("/transactions/aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf/payments") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.False(op.TransactionSuccessful) + } + } + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1/payments") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.True(op.TransactionSuccessful) + } + } + + // NULL value + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, + `UPDATE history_transactions SET successful = NULL WHERE transaction_hash = ?`, + "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + ) + ht.Require.NoError(err) + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1/payments") + + if ht.Assert.Equal(200, w.Code) { + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal(1, len(records)) + for _, op := range records { + ht.Assert.True(op.TransactionSuccessful) + } + } +} + +func TestPayment_CreatedAt(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/ledgers/3/payments") + records := []operations.Base{} + ht.UnmarshalPage(w.Body, &records) + + l := history.Ledger{} + hq := history.Q{SessionInterface: ht.HorizonSession()} + ht.Require.NoError(hq.LedgerBySequence(ht.Ctx, &l, 3)) + + ht.Assert.WithinDuration(l.ClosedAt, records[0].LedgerCloseTime, 1*time.Second) +} + +func TestPaymentActions_Show_Extra_TxID(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + w := ht.Get("/accounts/GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON/payments?limit=200&tx_id=aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf") + + ht.Assert.Equal(400, w.Code) + payload := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("filters", payload["invalid_field"]) + ht.Assert.Equal( + "Use a single filter for operations, you can only use one of tx_id, account_id or ledger_id", + payload["reason"], + ) +} + +func TestPaymentActionsPathPaymentStrictSend(t *testing.T) { + ht := StartHTTPTest(t, "paths_strict_send") + defer ht.Finish() + + w := ht.Get("/payments?order=desc&limit=100") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(11, w.Body) + + var records []map[string]interface{} + ht.UnmarshalPage(w.Body, &records) + + // Record #1 + ht.Assert.Equal("path_payment_strict_send", records[0]["type"]) + ht.Assert.Equal("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", records[0]["from"]) + ht.Assert.Equal("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", records[0]["to"]) + ht.Assert.Equal("15.8400000", records[0]["amount"]) + ht.Assert.Equal("EUR", records[0]["asset_code"]) + ht.Assert.Equal("GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", records[0]["asset_issuer"]) + ht.Assert.Equal("2.0000000", records[0]["destination_min"]) + ht.Assert.Equal("12.0000000", records[0]["source_amount"]) + ht.Assert.Equal("USD", records[0]["source_asset_code"]) + ht.Assert.Equal("GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", records[0]["source_asset_issuer"]) + ht.Assert.Equal([]interface{}{map[string]interface{}{"asset_type": "native"}}, records[0]["path"]) + + // Record #2 + ht.Assert.Equal("path_payment_strict_send", records[1]["type"]) + ht.Assert.Equal("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", records[1]["from"]) + ht.Assert.Equal("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", records[1]["to"]) + ht.Assert.Equal("13.0000000", records[1]["amount"]) + ht.Assert.Equal("EUR", records[1]["asset_code"]) + ht.Assert.Equal("GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", records[1]["asset_issuer"]) + ht.Assert.Equal("1.0000000", records[1]["destination_min"]) + ht.Assert.Equal("10.0000000", records[1]["source_amount"]) + ht.Assert.Equal("USD", records[1]["source_asset_code"]) + ht.Assert.Equal("GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", records[1]["source_asset_issuer"]) + ht.Assert.Equal([]interface{}{}, records[1]["path"]) + } + + // One failed: + w = ht.Get("/payments?order=desc&include_failed=true&limit=100") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(12, w.Body) + + var records []map[string]interface{} + ht.UnmarshalPage(w.Body, &records) + + ht.Assert.Equal("path_payment_strict_send", records[0]["type"]) + ht.Assert.Equal("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", records[0]["from"]) + ht.Assert.Equal("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", records[0]["to"]) + ht.Assert.Equal("0.0000000", records[0]["amount"]) // failed + ht.Assert.Equal("EUR", records[0]["asset_code"]) + ht.Assert.Equal("GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", records[0]["asset_issuer"]) + ht.Assert.Equal("100.0000000", records[0]["destination_min"]) + ht.Assert.Equal("13.0000000", records[0]["source_amount"]) + ht.Assert.Equal("USD", records[0]["source_asset_code"]) + ht.Assert.Equal("GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", records[0]["source_asset_issuer"]) + } +} diff --git a/services/horizon/internal/actions_root_test.go b/services/horizon/internal/actions_root_test.go new file mode 100644 index 0000000000..4f0586c255 --- /dev/null +++ b/services/horizon/internal/actions_root_test.go @@ -0,0 +1,109 @@ +package horizon + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +func TestRootAction(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + server := test.NewStaticMockServer(`{ + "info": { + "network": "test", + "build": "test-core", + "ledger": { + "version": 3, + "num": 64 + }, + "protocol_version": 4 + } + }`) + defer server.Close() + + ht.App.config.StellarCoreURL = server.URL + ht.App.config.NetworkPassphrase = "test" + assert.NoError(t, ht.App.UpdateStellarCoreInfo(ht.Ctx)) + ht.App.UpdateCoreLedgerState(ht.Ctx) + ht.App.UpdateHorizonLedgerState(ht.Ctx) + + w := ht.Get("/") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Root + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + ht.Assert.Equal("devel", actual.HorizonVersion) + ht.Assert.Equal("test-core", actual.StellarCoreVersion) + ht.Assert.Equal(int32(4), actual.CoreSupportedProtocolVersion) + ht.Assert.Equal(int32(3), actual.CurrentProtocolVersion) + ht.Assert.Equal(int32(64), actual.CoreSequence) + + err = json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + ht.Assert.Equal( + "http://localhost/accounts{?signer,sponsor,asset,liquidity_pool,cursor,limit,order}", + actual.Links.Accounts.Href, + ) + ht.Assert.Equal( + "http://localhost/offers{?selling,buying,seller,sponsor,cursor,limit,order}", + actual.Links.Offers.Href, + ) + + params := []string{ + "destination_account", + "destination_assets", + "source_asset_type", + "source_asset_issuer", + "source_asset_code", + "source_amount", + } + + ht.Assert.Equal( + "http://localhost/paths/strict-send{?"+strings.Join(params, ",")+"}", + actual.Links.StrictSendPaths.Href, + ) + + params = []string{ + "source_assets", + "source_account", + "destination_account", + "destination_asset_type", + "destination_asset_issuer", + "destination_asset_code", + "destination_amount", + } + + ht.Assert.Equal( + "http://localhost/paths/strict-receive{?"+strings.Join(params, ",")+"}", + actual.Links.StrictReceivePaths.Href, + ) + } +} + +func TestRootCoreClientInfoErrored(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + // an empty payload causes the core client to err + server := test.NewStaticMockServer(`{}`) + defer server.Close() + + ht.App.config.StellarCoreURL = server.URL + ht.App.UpdateCoreLedgerState(ht.Ctx) + + w := ht.Get("/") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Root + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + ht.Assert.Equal(int32(18), actual.CurrentProtocolVersion) + } +} diff --git a/services/horizon/internal/actions_trade_test.go b/services/horizon/internal/actions_trade_test.go new file mode 100644 index 0000000000..c4871d4d42 --- /dev/null +++ b/services/horizon/internal/actions_trade_test.go @@ -0,0 +1,877 @@ +//lint:file-ignore U1001 Ignore all unused code, thinks the code is unused because of the test skips +package horizon + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + "strconv" + "strings" + "testing" + "time" + + "github.com/guregu/null" + "github.com/stellar/go/amount" + "github.com/stellar/go/keypair" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + stellarTime "github.com/stellar/go/support/time" + "github.com/stellar/go/xdr" +) + +func TestLiquidityPoolTrades(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + fixtures := history.TradeScenario(ht.T, dbQ) + + for _, liquidityPoolID := range fixtures.LiquidityPools { + expected := fixtures.TradesByPool[liquidityPoolID] + var records []horizon.Trade + // All trades + w := ht.Get("/liquidity_pools/" + liquidityPoolID + "/trades") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(expected), w.Body) + ht.UnmarshalPage(w.Body, &records) + for i, row := range expected { + record := records[i] + assertResponseTradeEqualsDBTrade(ht, row, record) + } + } + } + + w := ht.Get("/liquidity_pools/" + fixtures.LiquidityPools[0] + "/trades?account_id=" + fixtures.Addresses[0]) + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/liquidity_pools/" + fixtures.LiquidityPools[0] + "/trades?offer_id=1") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/liquidity_pools/" + fixtures.LiquidityPools[0] + "/trades?trade_type=orderbook") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("trade_type", extras["invalid_field"]) + ht.Assert.Equal("trade_type orderbook cannot be used with the liquidity_pool_id filter", extras["reason"]) + } +} + +func TestOrderbookTrades(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + fixtures := history.TradeScenario(ht.T, dbQ) + + for offerID, expected := range fixtures.TradesByOffer { + var records []horizon.Trade + // All trades + w := ht.Get("/offers/" + strconv.FormatInt(offerID, 10) + "/trades") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(expected), w.Body) + ht.UnmarshalPage(w.Body, &records) + for i, row := range expected { + record := records[i] + assertResponseTradeEqualsDBTrade(ht, row, record) + } + } + } + + w := ht.Get("/offers/1/trades?account_id=" + fixtures.Addresses[0]) + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/offers/1/trades?liquidity_pool_id=" + fixtures.LiquidityPools[0]) + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/offers/1/trades?trade_type=liquidity_pool") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("trade_type", extras["invalid_field"]) + ht.Assert.Equal("trade_type liquidity_pool cannot be used with the offer_id filter", extras["reason"]) + } +} + +func TestAccountTrades(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + fixtures := history.TradeScenario(ht.T, dbQ) + + for _, tradeType := range []string{"", history.AllTrades, history.OrderbookTrades, history.LiquidityPoolTrades} { + for accountAddress, expected := range fixtures.TradesByAccount { + var query string + if tradeType != "" { + expected = history.FilterTradesByType(expected, tradeType) + query = "?trade_type=" + tradeType + } + var records []horizon.Trade + // All trades + w := ht.Get("/accounts/" + accountAddress + "/trades" + query) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(expected), w.Body) + ht.UnmarshalPage(w.Body, &records) + for i, row := range expected { + record := records[i] + assertResponseTradeEqualsDBTrade(ht, row, record) + } + } + } + } + + w := ht.Get("/accounts/" + fixtures.Addresses[0] + "/trades?offer_id=1") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/accounts/" + fixtures.Addresses[0] + "/trades?liquidity_pool_id=" + fixtures.LiquidityPools[0]) + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("account_id,liquidity_pool_id,offer_id", extras["invalid_field"]) + ht.Assert.Equal("Use a single filter for trades, you can only use one of account_id, liquidity_pool_id, offer_id", extras["reason"]) + } + + w = ht.Get("/accounts/" + fixtures.Addresses[0] + "/trades?trade_type=invalid") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("trade_type", extras["invalid_field"]) + ht.Assert.Equal("Trade type must be all, orderbook, or liquidity_pool", extras["reason"]) + } +} + +func TestTrades(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + fixtures := history.TradeScenario(ht.T, dbQ) + + for _, tradeType := range []string{"", history.AllTrades, history.OrderbookTrades, history.LiquidityPoolTrades} { + var query string + expected := fixtures.Trades + if tradeType != "" { + expected = history.FilterTradesByType(expected, tradeType) + query = "trade_type=" + tradeType + } + w := ht.Get("/trades?" + query) + var records []horizon.Trade + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(expected), w.Body) + ht.UnmarshalPage(w.Body, &records) + for i, row := range expected { + record := records[i] + assertResponseTradeEqualsDBTrade(ht, row, record) + } + } + + // reverseTrade order + w = ht.Get("/trades?order=desc&" + query) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(records), w.Body) + var reverseRecords []horizon.Trade + ht.UnmarshalPage(w.Body, &reverseRecords) + ht.Assert.Len(reverseRecords, len(records)) + + // ensure that ordering is indeed reversed + for i := 0; i < len(records); i++ { + ht.Assert.Equal(records[i], reverseRecords[len(reverseRecords)-1-i]) + } + } + } + + w := ht.Get("/trades?trade_type=invalid") + if ht.Assert.Equal(400, w.Code) { + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("trade_type", extras["invalid_field"]) + ht.Assert.Equal("Trade type must be all, orderbook, or liquidity_pool", extras["reason"]) + } +} + +func TestTradesForAssetPair(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + fixtures := history.TradeScenario(ht.T, dbQ) + + q := make(url.Values) + q.Add("base_asset_type", fixtures.Trades[0].BaseAssetType) + q.Add("base_asset_code", fixtures.Trades[0].BaseAssetCode) + q.Add("base_asset_issuer", fixtures.Trades[0].BaseAssetIssuer) + q.Add("counter_asset_type", fixtures.Trades[0].CounterAssetType) + q.Add("counter_asset_code", fixtures.Trades[0].CounterAssetCode) + q.Add("counter_asset_issuer", fixtures.Trades[0].CounterAssetIssuer) + + reverseQ := make(url.Values) + reverseQ.Add("counter_asset_type", fixtures.Trades[0].BaseAssetType) + reverseQ.Add("counter_asset_code", fixtures.Trades[0].BaseAssetCode) + reverseQ.Add("counter_asset_issuer", fixtures.Trades[0].BaseAssetIssuer) + reverseQ.Add("base_asset_type", fixtures.Trades[0].CounterAssetType) + reverseQ.Add("base_asset_code", fixtures.Trades[0].CounterAssetCode) + reverseQ.Add("base_asset_issuer", fixtures.Trades[0].CounterAssetIssuer) + + baseAsset, err := xdr.BuildAsset( + fixtures.Trades[0].BaseAssetType, fixtures.Trades[0].BaseAssetIssuer, fixtures.Trades[0].BaseAssetCode, + ) + ht.Assert.NoError(err) + counterAsset, err := xdr.BuildAsset( + fixtures.Trades[0].CounterAssetType, fixtures.Trades[0].CounterAssetIssuer, fixtures.Trades[0].CounterAssetCode, + ) + ht.Assert.NoError(err) + + rows := fixtures.TradesByAssetPair(baseAsset, counterAsset) + + for _, tradeType := range []string{"", history.AllTrades, history.OrderbookTrades, history.LiquidityPoolTrades} { + expected := rows + if tradeType != "" { + expected = history.FilterTradesByType(expected, tradeType) + q.Set("trade_type", tradeType) + reverseQ.Set("trade_type", tradeType) + } + + w := ht.GetWithParams("/trades", q) + var tradesForPair []horizon.Trade + if ht.Assert.Equal(200, w.Code) { + ht.UnmarshalPage(w.Body, &tradesForPair) + + ht.Assert.Equal(len(expected), len(tradesForPair)) + for i, row := range expected { + assertResponseTradeEqualsDBTrade(ht, row, tradesForPair[i]) + } + } + + w = ht.GetWithParams("/trades", reverseQ) + if ht.Assert.Equal(200, w.Code) { + var trades []horizon.Trade + ht.UnmarshalPage(w.Body, &trades) + ht.Assert.Equal(len(tradesForPair), len(trades)) + + for i, expected := range tradesForPair { + ht.Assert.Equal(reverseTrade(expected), trades[i]) + } + } + } +} + +func reverseTrade(expected horizon.Trade) horizon.Trade { + expected.Links.Base, expected.Links.Counter = expected.Links.Counter, expected.Links.Base + expected.BaseIsSeller = !expected.BaseIsSeller + expected.BaseAssetCode, expected.CounterAssetCode = expected.CounterAssetCode, expected.BaseAssetCode + expected.BaseAssetIssuer, expected.CounterAssetIssuer = expected.CounterAssetIssuer, expected.BaseAssetIssuer + expected.BaseOfferID, expected.CounterOfferID = expected.CounterOfferID, expected.BaseOfferID + expected.BaseLiquidityPoolID, expected.CounterLiquidityPoolID = expected.CounterLiquidityPoolID, expected.BaseLiquidityPoolID + expected.BaseAssetType, expected.CounterAssetType = expected.CounterAssetType, expected.BaseAssetType + expected.BaseAccount, expected.CounterAccount = expected.CounterAccount, expected.BaseAccount + expected.BaseAmount, expected.CounterAmount = expected.CounterAmount, expected.BaseAmount + expected.Price.N, expected.Price.D = expected.Price.D, expected.Price.N + return expected +} + +func assertResponseTradeEqualsDBTrade(ht *HTTPT, row history.Trade, record horizon.Trade) { + ht.Assert.Equal(row.BaseAssetCode, record.BaseAssetCode) + ht.Assert.Equal(row.BaseAssetType, record.BaseAssetType) + ht.Assert.Equal(row.BaseAssetIssuer, record.BaseAssetIssuer) + if row.BaseOfferID.Valid { + ht.Assert.Equal(strconv.FormatInt(row.BaseOfferID.Int64, 10), record.BaseOfferID) + } else { + ht.Assert.Equal("", record.BaseOfferID) + } + ht.Assert.Equal(row.BaseAmount, int64(amount.MustParse(record.BaseAmount))) + ht.Assert.Equal(row.BaseLiquidityPoolID.String, record.BaseLiquidityPoolID) + ht.Assert.Equal(row.BaseAccount.String, record.BaseAccount) + ht.Assert.Equal(row.BaseLiquidityPoolID.String, record.BaseLiquidityPoolID) + ht.Assert.Equal(row.BaseIsSeller, record.BaseIsSeller) + + ht.Assert.Equal(row.CounterAssetCode, record.CounterAssetCode) + ht.Assert.Equal(row.CounterAssetType, record.CounterAssetType) + ht.Assert.Equal(row.CounterAssetIssuer, record.CounterAssetIssuer) + if row.CounterOfferID.Valid { + ht.Assert.Equal(strconv.FormatInt(row.CounterOfferID.Int64, 10), record.CounterOfferID) + } else { + ht.Assert.Equal("", record.CounterOfferID) + } + ht.Assert.Equal(row.CounterAmount, int64(amount.MustParse(record.CounterAmount))) + ht.Assert.Equal(row.CounterLiquidityPoolID.String, record.CounterLiquidityPoolID) + ht.Assert.Equal(row.CounterAccount.String, record.CounterAccount) + ht.Assert.Equal(row.CounterLiquidityPoolID.String, record.CounterLiquidityPoolID) + + ht.Assert.Equal(uint32(row.LiquidityPoolFee.Int64), record.LiquidityPoolFeeBP) + ht.Assert.Equal(row.PagingToken(), record.PagingToken()) + ht.Assert.Equal(row.LedgerCloseTime.Unix(), record.LedgerCloseTime.Unix()) + ht.Assert.Equal(row.PriceN.Int64, record.Price.N) + ht.Assert.Equal(row.PriceD.Int64, record.Price.D) + + switch row.Type { + case history.OrderbookTradeType: + ht.Assert.Equal(history.OrderbookTrades, record.TradeType) + case history.LiquidityPoolTradeType: + ht.Assert.Equal(history.LiquidityPoolTrades, record.TradeType) + default: + ht.Assert.Fail("invalid trade type %v", row.Type) + } +} + +// setAssetQuery adds an asset filter with a given prefix to a query +func setAssetQuery(q *url.Values, prefix string, asset xdr.Asset) { + var assetType, assetCode, assetFilter string + asset.Extract(&assetType, &assetCode, &assetFilter) + q.Add(prefix+"asset_type", assetType) + q.Add(prefix+"asset_code", assetCode) + q.Add(prefix+"asset_issuer", assetFilter) +} + +// unsetAssetQuery removes an asset filter with a given prefix from a query +func unsetAssetQuery(q *url.Values, prefix string) { + q.Del(prefix + "asset_type") + q.Del(prefix + "asset_code") + q.Del(prefix + "asset_issuer") +} + +//testPrice ensures that the price float string is equal to the rational price +func testPrice(t *HTTPT, priceStr string, priceR horizon.TradePrice) { + price, err := strconv.ParseFloat(priceStr, 64) + if t.Assert.NoError(err) { + t.Assert.Equal(price, float64(priceR.N)/float64(priceR.D)) + } +} + +func testTradeAggregationPrices(t *HTTPT, record horizon.TradeAggregation) { + testPrice(t, record.High, record.HighR) + testPrice(t, record.Low, record.LowR) + testPrice(t, record.Open, record.OpenR) + testPrice(t, record.Close, record.CloseR) +} + +const minute = int64(time.Minute / time.Millisecond) +const hour = int64(time.Hour / time.Millisecond) +const day = int64(24 * time.Hour / time.Millisecond) +const week = int64(7 * 24 * time.Hour / time.Millisecond) +const aggregationPath = "/trade_aggregations" + +func TestTradeActions_Aggregation(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + const numOfTrades = 10 + + //a realistic millis (since epoch) value to start the test from + //it represents a round hour and is bigger than a max int32 + const start = int64(1510693200000) + + dbQ := &history.Q{ht.HorizonSession()} + ass1, ass2, err := PopulateTestTrades(dbQ, start, numOfTrades, minute, 0) + ht.Require.NoError(err) + + //add other trades as noise, to ensure asset filtering is working + _, _, err = PopulateTestTrades(dbQ, start, numOfTrades, minute, numOfTrades) + ht.Require.NoError(err) + + var records []horizon.TradeAggregation + var record horizon.TradeAggregation + var nextLink string + + q := make(url.Values) + setAssetQuery(&q, "base_", ass1) + setAssetQuery(&q, "counter_", ass2) + + q.Add("start_time", strconv.FormatInt(start, 10)) + q.Add("end_time", strconv.FormatInt(start+hour, 10)) + q.Add("order", "asc") + + //test no resolution provided + w := ht.GetWithParams(aggregationPath, q) + ht.Assert.Equal(400, w.Code) + + //test illegal resolution + if history.StrictResolutionFiltering { + q.Add("resolution", strconv.FormatInt(hour/2, 10)) + w = ht.GetWithParams(aggregationPath, q) + ht.Assert.Equal(400, w.Code) + } + + //test one bucket for all trades + q.Set("resolution", strconv.FormatInt(hour, 10)) + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + ht.UnmarshalPage(w.Body, &records) + record = records[0] //Save the single aggregation record for next test + testTradeAggregationPrices(ht, record) + ht.Assert.Equal("0.0005500", records[0].BaseVolume) + } + + //test reverseTrade one bucket - make sure values don't change + q.Set("order", "desc") + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal(record, records[0]) + } + + //Test bucket per trade + q.Set("order", "asc") + q.Set("resolution", strconv.FormatInt(minute, 10)) + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + if ht.Assert.PageOf(numOfTrades, w.Body) { + //test that asset filters work + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal(int64(1), records[0].TradeCount) + ht.Assert.Equal("0.0000100", records[0].BaseVolume) + ht.Assert.Equal("1.0000000", records[0].Average) + } + } + + //test partial range by modifying endTime to be one minute above half range. + //half of the results are expected + endTime := start + (numOfTrades/2)*minute + q.Set("end_time", strconv.FormatInt(endTime, 10)) + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(numOfTrades/2, w.Body) + } + + //test that page limit works + limit := 3 + q.Add("limit", strconv.Itoa(limit)) + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(limit, w.Body) + } + + //test that next page delivers the correct amount of records + w = ht.GetWithParams(aggregationPath, q) + nextLink = ht.UnmarshalNext(w.Body) + //make sure the next link is a full url and not just a path + ht.Assert.Equal(true, strings.HasPrefix(nextLink, "http://localhost")) + w = ht.Get(nextLink) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(numOfTrades/2-limit, w.Body) + ht.UnmarshalPage(w.Body, &records) + //test for expected value on timestamp of first record on next page + ht.Assert.Equal(start+int64(limit)*minute, records[0].Timestamp) + } + + //test direction (desc) + q.Set("order", "desc") + w = ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + if ht.Assert.PageOf(limit, w.Body) { + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal(int64(start+(numOfTrades/2-1)*minute), records[0].Timestamp) + } + } + + //test next link desc + w = ht.GetWithParams(aggregationPath, q) + nextLink = ht.UnmarshalNext(w.Body) + w = ht.Get(nextLink) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(numOfTrades/2-limit, w.Body) + } + + //test next next link empty + w = ht.GetWithParams(aggregationPath, q) + nextLink = ht.UnmarshalNext(w.Body) + w = ht.Get(nextLink) + nextLink = ht.UnmarshalNext(w.Body) + w = ht.Get(nextLink) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + + //test non-existent base asset + foo := GetTestAsset("FOO") + + unsetAssetQuery(&q, "base_") + setAssetQuery(&q, "base_", foo) + + w = ht.GetWithParams(aggregationPath, q) + ht.Assert.Equal(404, w.Code) + + jsonErr := map[string]interface{}{} + err = json.Unmarshal(w.Body.Bytes(), &jsonErr) + ht.Assert.NoError(err) + ht.Assert.Equal(float64(404), jsonErr["status"]) + ht.Assert.Equal( + map[string]interface{}{ + "invalid_field": "base_asset", + "reason": "not found", + }, + jsonErr["extras"], + ) + + unsetAssetQuery(&q, "base_") + setAssetQuery(&q, "base_", ass1) + + //test non-existent counter asset + unsetAssetQuery(&q, "counter_") + setAssetQuery(&q, "counter_", foo) + + w = ht.GetWithParams(aggregationPath, q) + ht.Assert.Equal(404, w.Code) + + err = json.Unmarshal(w.Body.Bytes(), &jsonErr) + ht.Assert.NoError(err) + ht.Assert.Equal(float64(404), jsonErr["status"]) + ht.Assert.Equal( + map[string]interface{}{ + "invalid_field": "counter_asset", + "reason": "not found", + }, + jsonErr["extras"], + ) +} + +func TestTradeActions_AmountsExceedInt64(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + dbQ := &history.Q{ht.HorizonSession()} + + const start = int64(1510693200000) + + acc1 := GetTestAccount() + acc2 := GetTestAccount() + ass1 := GetTestAsset("euro") + ass2 := GetTestAsset("usd") + for i := 1; i <= 3; i++ { + timestamp := stellarTime.MillisFromInt64(start + (minute * int64(i-1))) + err := IngestTestTrade( + dbQ, ass1, ass2, acc1, acc2, int64(9131689504000000000), int64(9131689504000000000), timestamp, int64(i)) + ht.Require.NoError(err) + } + + var records []horizon.TradeAggregation + + q := make(url.Values) + setAssetQuery(&q, "base_", ass1) + setAssetQuery(&q, "counter_", ass2) + + q.Add("start_time", strconv.FormatInt(start, 10)) + q.Add("end_time", strconv.FormatInt(start+hour, 10)) + q.Add("order", "asc") + q.Set("resolution", strconv.FormatInt(hour, 10)) + + w := ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal("2739506851200.0000000", records[0].BaseVolume) + ht.Assert.Equal("2739506851200.0000000", records[0].CounterVolume) + } +} + +func TestTradeActions_IndexRegressions(t *testing.T) { + t.Run("Assets Dont Exist trades - 404", func(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + var q = make(url.Values) + q.Add("base_asset_type", "credit_alphanum4") + q.Add("base_asset_code", "EUR") + q.Add("base_asset_issuer", "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG") + q.Add("counter_asset_type", "native") + + w := ht.Get("/trades?" + q.Encode()) + + ht.Assert.Equal(404, w.Code) //This used to be 200 with length 0 + }) + + t.Run("Regression for nil prices: https://github.com/stellar/go/issues/357", func(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + dbQ := &history.Q{ht.HorizonSession()} + history.TradeScenario(ht.T, dbQ) + defer ht.Finish() + + w := ht.Get("/trades") + ht.Require.Equal(200, w.Code) + + _ = ht.HorizonDB.MustExec("UPDATE history_trades SET price_n = NULL, price_d = NULL") + w = ht.Get("/trades") + ht.Assert.Equal(200, w.Code, "nil-price trades failed") + }) +} + +// TestTradeActions_AggregationOrdering checks that open/close aggregation +// fields are correct for multiple trades that occur in the same ledger +// https://github.com/stellar/go/issues/215 +func TestTradeActions_AggregationOrdering(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + seller := GetTestAccount() + buyer := GetTestAccount() + ass1 := GetTestAsset("euro") + ass2 := GetTestAsset("usd") + + dbQ := &history.Q{ht.HorizonSession()} + IngestTestTrade(dbQ, ass1, ass2, seller, buyer, 1, 3, 0, 3) + IngestTestTrade(dbQ, ass1, ass2, seller, buyer, 1, 1, 0, 1) + IngestTestTrade(dbQ, ass1, ass2, seller, buyer, 1, 2, 0, 2) + + q := make(url.Values) + setAssetQuery(&q, "base_", ass1) + setAssetQuery(&q, "counter_", ass2) + + q.Add("start_time", "0") + q.Add("end_time", "60000") + q.Add("order", "asc") + q.Add("resolution", "60000") + + var records []horizon.TradeAggregation + w := ht.GetWithParams("/trade_aggregations", q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + ht.UnmarshalPage(w.Body, &records) + ht.Assert.Equal("1.0000000", records[0].Open) + ht.Assert.Equal("3.0000000", records[0].Close) + } +} + +func TestTradeActions_AssetValidation(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + var q = make(url.Values) + q.Add("base_asset_type", "native") + + w := ht.GetWithParams("/trades", q) + ht.Assert.Equal(400, w.Code) + + extras := ht.UnmarshalExtras(w.Body) + ht.Assert.Equal("base_asset_type,counter_asset_type", extras["invalid_field"]) + ht.Assert.Equal("this endpoint supports asset pairs but only one asset supplied", extras["reason"]) +} + +func TestTradeActions_AggregationInvalidOffset(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + dbQ := &history.Q{ht.HorizonSession()} + ass1, ass2, err := PopulateTestTrades(dbQ, 0, 100, hour, 1) + ht.Require.NoError(err) + + q := make(url.Values) + setAssetQuery(&q, "base_", ass1) + setAssetQuery(&q, "counter_", ass2) + q.Add("order", "asc") + + testCases := []struct { + offset int64 + resolution int64 + startTime int64 + endTime int64 + }{ + {offset: minute, resolution: hour}, // Test invalid offset value that's not hour aligned + {offset: 25 * hour, resolution: week}, // Test invalid offset value that's greater than 24 hours + {offset: 3 * hour, resolution: hour}, // Test invalid offset value that's greater than the resolution + {offset: 3 * hour, startTime: 28 * hour, endTime: 26 * hour, resolution: day}, // Test invalid end time that's less than the start time + {offset: 3 * hour, startTime: 6 * hour, endTime: 26 * hour, resolution: day}, // Test invalid end time that's less than the offset-adjusted start time + {offset: 1 * hour, startTime: 5 * hour, endTime: 3 * hour, resolution: day}, // Test invalid end time that's less than the offset-adjusted start time + {offset: 3 * hour, endTime: 1 * hour, resolution: day}, // Test invalid end time that's less than the offset + {startTime: 3 * minute, endTime: 1 * minute, resolution: minute}, // Test invalid end time that's less than the start time (no offset) + } + + for _, tc := range testCases { + t.Run("Testing invalid offset parameters", func(t *testing.T) { + q.Add("offset", strconv.FormatInt(tc.offset, 10)) + q.Add("resolution", strconv.FormatInt(tc.resolution, 10)) + q.Add("start_time", strconv.FormatInt(tc.startTime, 10)) + if tc.endTime != 0 { + q.Add("end_time", strconv.FormatInt(tc.endTime, 10)) + } + w := ht.GetWithParams(aggregationPath, q) + ht.Assert.Equal(400, w.Code) + }) + } +} + +func TestTradeActions_AggregationOffset(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + dbQ := &history.Q{ht.HorizonSession()} + // One trade every hour + ass1, ass2, err := PopulateTestTrades(dbQ, 0, 100, hour, 1) + ht.Require.NoError(err) + + q := make(url.Values) + setAssetQuery(&q, "base_", ass1) + setAssetQuery(&q, "counter_", ass2) + q.Add("order", "asc") + + q.Set("resolution", strconv.FormatInt(day, 10)) + testCases := []struct { + offset int64 + startTime int64 + endTime int64 + expectedTimestamps []int64 + }{ + {offset: 2 * hour, expectedTimestamps: []int64{2 * hour, 26 * hour, 50 * hour, 74 * hour, 98 * hour}}, //Test with no start time + {offset: 1 * hour, startTime: 25 * hour, expectedTimestamps: []int64{25 * hour, 49 * hour, 73 * hour, 97 * hour}}, + {offset: 3 * hour, startTime: 10 * hour, expectedTimestamps: []int64{27 * hour, 51 * hour, 75 * hour, 99 * hour}}, + {offset: 6 * hour, startTime: 1 * hour, expectedTimestamps: []int64{6 * hour, 30 * hour, 54 * hour, 78 * hour}}, + {offset: 18 * hour, startTime: 30 * hour, expectedTimestamps: []int64{42 * hour, 66 * hour, 90 * hour}}, + {offset: 10 * hour, startTime: 35 * hour, expectedTimestamps: []int64{58 * hour, 82 * hour}}, + {offset: 18 * hour, startTime: 96 * hour, expectedTimestamps: []int64{}}, // No results since last timestamp is at 100 + {offset: 1 * hour, startTime: 5 * hour, endTime: 95 * hour, expectedTimestamps: []int64{25 * hour, 49 * hour}}, + {offset: 1 * hour, startTime: 5 * hour, endTime: 26 * hour, expectedTimestamps: []int64{}}, // end time and start time should both be at 25 hours + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("Testing trade aggregations bucket with offset %d (hour) start time %d (hour)", + tc.offset/hour, tc.startTime/hour), func(t *testing.T) { + q.Set("offset", strconv.FormatInt(tc.offset, 10)) + if tc.startTime != 0 { + q.Set("start_time", strconv.FormatInt(tc.startTime, 10)) + } + if tc.endTime != 0 { + q.Set("end_time", strconv.FormatInt(tc.endTime, 10)) + } + w := ht.GetWithParams(aggregationPath, q) + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(len(tc.expectedTimestamps), w.Body) + var records []horizon.TradeAggregation + ht.UnmarshalPage(w.Body, &records) + if len(records) > 0 { + for i, record := range records { + ht.Assert.Equal(tc.expectedTimestamps[i], record.Timestamp) + } + } + } + }) + } +} + +//GetTestAsset generates an issuer on the fly and creates a CreditAlphanum4 Asset with given code +func GetTestAsset(code string) xdr.Asset { + var codeBytes [4]byte + copy(codeBytes[:], []byte(code)) + ca4 := xdr.AlphaNum4{Issuer: GetTestAccount(), AssetCode: codeBytes} + return xdr.Asset{Type: xdr.AssetTypeAssetTypeCreditAlphanum4, AlphaNum4: &ca4, AlphaNum12: nil} +} + +//Get generates and returns an account on the fly +func GetTestAccount() xdr.AccountId { + var key xdr.Uint256 + kp, _ := keypair.Random() + copy(key[:], kp.Address()) + acc, _ := xdr.NewAccountId(xdr.PublicKeyTypePublicKeyTypeEd25519, key) + return acc +} + +func abs(a xdr.Int32) xdr.Int32 { + if a < 0 { + return -a + } + return a +} + +//IngestTestTrade mock ingests a trade +func IngestTestTrade( + q *history.Q, + assetSold xdr.Asset, + assetBought xdr.Asset, + seller xdr.AccountId, + buyer xdr.AccountId, + amountSold int64, + amountBought int64, + timestamp stellarTime.Millis, + opCounter int64) error { + + trade := xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + AmountBought: xdr.Int64(amountBought), + SellerId: seller, + AmountSold: xdr.Int64(amountSold), + AssetBought: assetBought, + AssetSold: assetSold, + OfferId: 100, + }, + } + + price := xdr.Price{ + N: abs(xdr.Int32(amountBought)), + D: abs(xdr.Int32(amountSold)), + } + + ctx := context.Background() + accounts, err := q.CreateAccounts(ctx, []string{seller.Address(), buyer.Address()}, 2) + if err != nil { + return err + } + assets, err := q.CreateAssets(ctx, []xdr.Asset{assetBought, assetSold}, 2) + if err != nil { + return err + } + + batch := q.NewTradeBatchInsertBuilder(0) + batch.Add(ctx, history.InsertTrade{ + HistoryOperationID: opCounter, + Order: 0, + CounterAssetID: assets[assetBought.String()].ID, + CounterAccountID: null.IntFrom(accounts[buyer.Address()]), + CounterAmount: amountBought, + + BaseAssetID: assets[assetSold.String()].ID, + BaseAccountID: null.IntFrom(accounts[seller.Address()]), + BaseAmount: amountSold, + BaseOfferID: null.IntFrom(int64(trade.OfferId())), + BaseIsSeller: true, + PriceN: int64(price.N), + PriceD: int64(price.D), + LedgerCloseTime: timestamp.ToTime(), + + Type: history.OrderbookTradeType, + }) + err = batch.Exec(ctx) + if err != nil { + return err + } + + err = q.RebuildTradeAggregationTimes(context.Background(), timestamp, timestamp) + if err != nil { + return err + } + + return nil +} + +//PopulateTestTrades generates and ingests trades between two assets according to given parameters +func PopulateTestTrades( + q *history.Q, + startTs int64, + numOfTrades int, + delta int64, + opStart int64) (ass1 xdr.Asset, ass2 xdr.Asset, err error) { + + acc1 := GetTestAccount() + acc2 := GetTestAccount() + ass1 = GetTestAsset("euro") + ass2 = GetTestAsset("usd") + for i := 1; i <= numOfTrades; i++ { + timestamp := stellarTime.MillisFromInt64(startTs + (delta * int64(i-1))) + err = IngestTestTrade( + q, ass1, ass2, acc1, acc2, int64(i*100), int64(i*100)*int64(i), timestamp, opStart+int64(i)) + //tt.Assert.NoError(err) + if err != nil { + return + } + } + return +} diff --git a/services/horizon/internal/actions_transaction_test.go b/services/horizon/internal/actions_transaction_test.go new file mode 100644 index 0000000000..605a0a8f69 --- /dev/null +++ b/services/horizon/internal/actions_transaction_test.go @@ -0,0 +1,425 @@ +package horizon + +import ( + "encoding/json" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/corestate" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +func TestTransactionActions_Show(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/transactions/2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.Equal( + "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", + actual.Hash, + ) + } + + // missing tx + w = ht.Get("/transactions/ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + ht.Assert.Equal(404, w.Code) + + // uppercase tx hash not accepted + w = ht.Get("/transactions/2374E99349B9EF7DBA9A5DB3339B78FDA8F34777B1AF33BA468AD5C0DF946D4D") + ht.Assert.Equal(400, w.Code) + + // badly formated tx hash not accepted + w = ht.Get("/transactions/%00%1E4%5E%EF%BF%BD%EF%BF%BD%EF%BF%BDpVP%EF%BF%BDI&R%0BK%EF%BF%BD%1D%EF%BF%BD%EF%BF%BD=%EF%BF%BD%3F%23%EF%BF%BD%EF%BF%BDl%EF%BF%BD%1El%EF%BF%BD%EF%BF%BD") + ht.Assert.Equal(400, w.Code) +} + +func TestTransactionActions_Show_Failed(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + // Failed single + w := ht.Get("/transactions/aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.Equal( + "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf", + actual.Hash, + ) + + ht.Assert.False(actual.Successful) + } + + // Successful single + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.Equal( + "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + actual.Hash, + ) + + ht.Assert.True(actual.Successful) + } + + // Should show successful transactions only + w = ht.Get("/transactions?limit=200") + + if ht.Assert.Equal(200, w.Code) { + records := []horizon.Transaction{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, tx := range records { + if tx.Successful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(8, successful) + ht.Assert.Equal(0, failed) + } + + // Should show all transactions: both successful and failed + w = ht.Get("/transactions?limit=200&include_failed=true") + + if ht.Assert.Equal(200, w.Code) { + records := []horizon.Transaction{} + ht.UnmarshalPage(w.Body, &records) + + successful := 0 + failed := 0 + + for _, tx := range records { + if tx.Successful { + successful++ + } else { + failed++ + } + } + + ht.Assert.Equal(8, successful) + ht.Assert.Equal(1, failed) + } + + w = ht.Get("/transactions/aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.False(actual.Successful) + } + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.True(actual.Successful) + } + + // NULL value + _, err := ht.HorizonSession().ExecRaw(ht.Ctx, + `UPDATE history_transactions SET successful = NULL WHERE transaction_hash = ?`, + "56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1", + ) + ht.Require.NoError(err) + + w = ht.Get("/transactions/56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1") + + if ht.Assert.Equal(200, w.Code) { + var actual horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &actual) + ht.Require.NoError(err) + + ht.Assert.True(actual.Successful) + } +} + +func TestTransactionActions_Index(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + w := ht.Get("/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(4, w.Body) + } + + // filtered by ledger + w = ht.Get("/ledgers/1/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(0, w.Body) + } + + w = ht.Get("/ledgers/2/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + w = ht.Get("/ledgers/3/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + // missing ledger + w = ht.Get("/ledgers/100/transactions") + ht.Assert.Equal(404, w.Code) + + // Makes StateMiddleware happy + q := history.Q{ht.HorizonSession()} + err := q.UpdateLastLedgerIngest(ht.Ctx, 100) + ht.Assert.NoError(err) + err = q.UpdateIngestVersion(ht.Ctx, ingest.CurrentVersion) + ht.Assert.NoError(err) + + // checks if empty param returns 404 instead of all payments + w = ht.Get("/accounts//transactions") + ht.Assert.NotEqual(404, w.Code) + + // filtering by account + w = ht.Get("/accounts/GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(3, w.Body) + } + + w = ht.Get("/accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(1, w.Body) + } + + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + // filtering by claimable balance + w = ht.Get("/claimable_balances/00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9/transactions") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.PageOf(2, w.Body) + } + + // Check extra params + w = ht.Get("/ledgers/100/transactions?account_id=GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + ht.Assert.Equal(400, w.Code) + w = ht.Get("/ledgers/100/transactions?claimable_balance_id=00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9") + ht.Assert.Equal(400, w.Code) + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/transactions?ledger_id=5") + ht.Assert.Equal(400, w.Code) + w = ht.Get("/accounts/GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU/transactions?cursor=limit=order=") + ht.Assert.Equal(400, w.Code) + + // regression: https://github.com/stellar/go/services/horizon/internal/issues/365 + w = ht.Get("/transactions?limit=200") + ht.Require.Equal(200, w.Code) + w = ht.Get("/transactions?limit=201") + ht.Assert.Equal(400, w.Code) + w = ht.Get("/transactions?limit=0") + ht.Assert.Equal(400, w.Code) + +} + +func TestTransactionActions_Post(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // Pass Synced check + ht.App.coreState.SetState(corestate.State{Synced: true}) + + tx := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxV0, + V0: &xdr.TransactionV0Envelope{ + Tx: xdr.TransactionV0{ + SourceAccountEd25519: *xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H").Ed25519, + Fee: 100, + SeqNum: 1, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreateAccount, + CreateAccountOp: &xdr.CreateAccountOp{ + Destination: xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"), + StartingBalance: 1000000000, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{86, 252, 5, 247}, + Signature: xdr.Signature{131, 206, 171, 228, 64, 20, 40, 52, 2, 98, 124, 244, 87, 14, 130, 225, 190, 220, 156, 79, 121, 69, 60, 36, 57, 214, 9, 29, 176, 81, 218, 4, 213, 176, 211, 148, 191, 86, 21, 180, 94, 9, 43, 208, 32, 79, 19, 131, 90, 21, 93, 138, 153, 203, 55, 103, 2, 230, 137, 190, 19, 70, 179, 11}, + }, + }, + }, + } + + txStr, err := xdr.MarshalBase64(tx) + assert.NoError(t, err) + form := url.Values{"tx": []string{txStr}} + + // existing transaction + w := ht.Post("/transactions", form) + ht.Assert.Equal(200, w.Code) +} + +func TestTransactionActions_PostSuccessful(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + // Pass Synced check + ht.App.coreState.SetState(corestate.State{Synced: true}) + + destAID := xdr.MustAddress("GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON") + tx2 := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxV0, + V0: &xdr.TransactionV0Envelope{ + Tx: xdr.TransactionV0{ + SourceAccountEd25519: *xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU").Ed25519, + Fee: 100, + SeqNum: 8589934593, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: destAID.ToMuxedAccount(), + Asset: xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: xdr.AssetCode4{85, 83, 68, 0}, + Issuer: xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"), + }, + }, + Amount: 1000000000, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{174, 228, 190, 76}, + Signature: xdr.Signature{73, 202, 13, 176, 216, 188, 169, 9, 141, 130, 180, 106, 187, 225, 22, 89, 254, 24, 173, 62, 236, 12, 186, 131, 70, 190, 214, 24, 209, 69, 233, 68, 1, 238, 48, 154, 55, 170, 53, 196, 96, 218, 110, 2, 159, 187, 120, 2, 50, 115, 2, 192, 208, 35, 72, 151, 106, 17, 155, 160, 147, 200, 52, 12}, + }, + }, + }, + } + + txStr, err := xdr.MarshalBase64(tx2) + assert.NoError(t, err) + + // 56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1 + form := url.Values{"tx": []string{txStr}} + + w := ht.Post("/transactions", form) + ht.Assert.Equal(200, w.Code) + ht.Assert.Contains(string(w.Body.Bytes()), `"result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA="`) +} + +func TestTransactionActions_PostFailed(t *testing.T) { + ht := StartHTTPTest(t, "failed_transactions") + defer ht.Finish() + + // Pass Synced check + ht.App.coreState.SetState(corestate.State{Synced: true}) + + // aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf + form := url.Values{"tx": []string{"AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAB3NZQAAAAAAAAAAAFvFIhaAAAAQKcGS9OsVnVHCVIH04C9ZKzzKYBRdCmy+Jwmzld7QcALOxZUcAgkuGfoSdvXpH38mNvrqQiaMsSNmTJWYRzHvgo="}} + + w := ht.Post("/transactions", form) + ht.Assert.Equal(400, w.Code) + ht.Assert.Contains(string(w.Body.Bytes()), "op_underfunded") + ht.Assert.Contains(string(w.Body.Bytes()), `"result_xdr": "AAAAAAAAAGT/////AAAAAQAAAAAAAAAB/////gAAAAA="`) +} + +func TestPostFeeBumpTransaction(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + // Pass Synced check + ht.App.coreState.SetState(corestate.State{Synced: true}) + + test.ResetHorizonDB(t, ht.HorizonDB) + q := &history.Q{ht.HorizonSession()} + fixture := history.FeeBumpScenario(ht.T, q, true) + + form := url.Values{"tx": []string{fixture.Transaction.TxEnvelope}} + w := ht.Post("/transactions", form) + ht.Assert.Equal(200, w.Code) + var response horizon.Transaction + err := json.Unmarshal(w.Body.Bytes(), &response) + ht.Assert.NoError(err) + + ht.Assert.Equal(fixture.Transaction.TxResult, response.ResultXdr) + ht.Assert.Equal(fixture.Transaction.TxMeta, response.ResultMetaXdr) + ht.Assert.Equal(fixture.Transaction.TransactionHash, response.Hash) + ht.Assert.Equal(fixture.Transaction.TxEnvelope, response.EnvelopeXdr) + ht.Assert.Equal(fixture.Transaction.LedgerSequence, response.Ledger) + + innerTxEnvelope, err := xdr.MarshalBase64(fixture.Envelope.FeeBump.Tx.InnerTx.V1) + ht.Assert.NoError(err) + form = url.Values{"tx": []string{innerTxEnvelope}} + w = ht.Post("/transactions", form) + ht.Assert.Equal(200, w.Code) + err = json.Unmarshal(w.Body.Bytes(), &response) + ht.Assert.NoError(err) + + ht.Assert.Equal(fixture.Transaction.TxResult, response.ResultXdr) + ht.Assert.Equal(fixture.Transaction.TxMeta, response.ResultMetaXdr) + ht.Assert.Equal(fixture.InnerHash, response.Hash) + ht.Assert.Equal(fixture.Transaction.TxEnvelope, response.EnvelopeXdr) + ht.Assert.Equal(fixture.Transaction.LedgerSequence, response.Ledger) +} + +func TestPostFailedFeeBumpTransaction(t *testing.T) { + ht := StartHTTPTestWithoutScenario(t) + defer ht.Finish() + + // Pass Synced check + ht.App.coreState.SetState(corestate.State{Synced: true}) + + test.ResetHorizonDB(t, ht.HorizonDB) + q := &history.Q{ht.HorizonSession()} + fixture := history.FeeBumpScenario(ht.T, q, false) + + form := url.Values{"tx": []string{fixture.Transaction.TxEnvelope}} + w := ht.Post("/transactions", form) + ht.Assert.Equal(400, w.Code) + ht.Assert.Contains(string(w.Body.Bytes()), "tx_fee_bump_inner_failed") + ht.Assert.Contains(string(w.Body.Bytes()), "tx_bad_auth") + + innerTxEnvelope, err := xdr.MarshalBase64(fixture.Envelope.FeeBump.Tx.InnerTx.V1) + ht.Assert.NoError(err) + form = url.Values{"tx": []string{innerTxEnvelope}} + w = ht.Post("/transactions", form) + ht.Assert.Equal(400, w.Code) + ht.Assert.Contains(string(w.Body.Bytes()), "tx_bad_auth") + ht.Assert.NotContains(string(w.Body.Bytes()), "tx_fee_bump_inner_failed") +} diff --git a/services/horizon/internal/app.go b/services/horizon/internal/app.go new file mode 100644 index 0000000000..5a326821c1 --- /dev/null +++ b/services/horizon/internal/app.go @@ -0,0 +1,583 @@ +package horizon + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/clients/stellarcore" + "github.com/stellar/go/services/horizon/internal/corestate" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/httpx" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/logmetrics" + "github.com/stellar/go/services/horizon/internal/operationfeestats" + "github.com/stellar/go/services/horizon/internal/paths" + "github.com/stellar/go/services/horizon/internal/reap" + "github.com/stellar/go/services/horizon/internal/txsub" + "github.com/stellar/go/support/app" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +// App represents the root of the state of a horizon instance. +type App struct { + done chan struct{} + doneOnce sync.Once + config Config + webServer *httpx.Server + historyQ *history.Q + primaryHistoryQ *history.Q + ctx context.Context + cancel func() + horizonVersion string + coreState corestate.Store + orderBookStream *ingest.OrderBookStream + submitter *txsub.System + paths paths.Finder + ingester ingest.System + reaper *reap.System + ticks *time.Ticker + ledgerState *ledger.State + + // metrics + prometheusRegistry *prometheus.Registry + buildInfoGauge *prometheus.GaugeVec + ingestingGauge prometheus.Gauge +} + +func (a *App) GetCoreState() corestate.State { + return a.coreState.Get() +} + +const tickerMaxFrequency = 1 * time.Second +const tickerMaxDuration = 5 * time.Second + +// NewApp constructs an new App instance from the provided config. +func NewApp(config Config) (*App, error) { + a := &App{ + config: config, + ledgerState: &ledger.State{}, + horizonVersion: app.Version(), + ticks: time.NewTicker(tickerMaxFrequency), + done: make(chan struct{}), + } + + if err := a.init(); err != nil { + return nil, err + } + return a, nil +} + +// Serve starts the horizon web server, binding it to a socket, setting up +// the shutdown signals. +func (a *App) Serve() error { + + log.Infof("Starting horizon on :%d (ingest: %v)", a.config.Port, a.config.Ingest) + + if a.config.AdminPort != 0 { + log.Infof("Starting internal server on :%d", a.config.AdminPort) + } + + go a.run() + go a.orderBookStream.Run(a.ctx) + + // WaitGroup for all go routines. Makes sure that DB is closed when + // all services gracefully shutdown. + var wg sync.WaitGroup + + if a.ingester != nil { + wg.Add(1) + go func() { + a.ingester.Run() + wg.Done() + }() + } + + if a.reaper != nil { + wg.Add(1) + go func() { + a.reaper.Run() + wg.Done() + }() + } + + // configure shutdown signal handler + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) + + if a.config.UsingDefaultPubnetConfig { + const warnMsg = "Horizon started using the default pubnet configuration. " + + "This is not safe! Please provide a custom --captive-core-config-path." + log.Warn(warnMsg) + go func() { + for { + select { + case <-time.After(time.Hour): + log.Warn(warnMsg) + case <-a.done: + return + } + } + }() + } + + go func() { + select { + case <-signalChan: + a.Close() + case <-a.done: + return + } + }() + + wg.Add(1) + go func() { + a.waitForDone() + wg.Done() + }() + + log.Infof("Starting to serve requests") + err := a.webServer.Serve() + if err != nil && err != http.ErrServerClosed { + return err + } + + wg.Wait() + a.CloseDB() + + log.Info("stopped") + return nil +} + +// Close cancels the app. It does not close DB connections - use App.CloseDB(). +func (a *App) Close() { + a.doneOnce.Do(func() { + close(a.done) + }) +} + +func (a *App) waitForDone() { + <-a.done + webShutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + a.webServer.Shutdown(webShutdownCtx) + a.cancel() + if a.ingester != nil { + a.ingester.Shutdown() + } + if a.reaper != nil { + a.reaper.Shutdown() + } + a.ticks.Stop() +} + +// CloseDB closes DB connections. When using during web server shut down make +// sure all requests are first properly finished to avoid "sql: database is +// closed" errors. +func (a *App) CloseDB() { + a.historyQ.SessionInterface.Close() +} + +// HistoryQ returns a helper object for performing sql queries against the +// history portion of horizon's database. +func (a *App) HistoryQ() *history.Q { + return a.historyQ +} + +// Ingestion returns the ingestion system associated with this Horizon instance +func (a *App) Ingestion() ingest.System { + return a.ingester +} + +// HorizonSession returns a new session that loads data from the horizon +// database. +func (a *App) HorizonSession() db.SessionInterface { + return a.historyQ.SessionInterface.Clone() +} + +func (a *App) Config() Config { + return a.config +} + +// UpdateCoreLedgerState triggers a refresh of Stellar-Core ledger state. +// This is done separately from Horizon ledger state update to prevent issues +// in case Stellar-Core query timeout. +func (a *App) UpdateCoreLedgerState(ctx context.Context) { + var next ledger.CoreStatus + + logErr := func(err error, msg string) { + log.WithStack(err).WithField("err", err.Error()).Error(msg) + } + + coreClient := &stellarcore.Client{ + HTTP: http.DefaultClient, + URL: a.config.StellarCoreURL, + } + + coreInfo, err := coreClient.Info(a.ctx) + if err != nil { + logErr(err, "failed to load the stellar-core info") + return + } + next.CoreLatest = int32(coreInfo.Info.Ledger.Num) + a.ledgerState.SetCoreStatus(next) +} + +// UpdateHorizonLedgerState triggers a refresh of Horizon ledger state. +// This is done separately from Core ledger state update to prevent issues +// in case Stellar-Core query timeout. +func (a *App) UpdateHorizonLedgerState(ctx context.Context) { + var next ledger.HorizonStatus + + logErr := func(err error, msg string) { + log.WithStack(err).WithField("err", err.Error()).Error(msg) + } + + var err error + next.HistoryLatest, next.HistoryLatestClosedAt, err = + a.HistoryQ().LatestLedgerSequenceClosedAt(ctx) + if err != nil { + logErr(err, "failed to load the latest known ledger state from history DB") + return + } + + err = a.HistoryQ().ElderLedger(ctx, &next.HistoryElder) + if err != nil { + logErr(err, "failed to load the oldest known ledger state from history DB") + return + } + + next.ExpHistoryLatest, err = a.HistoryQ().GetLastLedgerIngestNonBlocking(ctx) + if err != nil { + logErr(err, "failed to load the oldest known exp ledger state from history DB") + return + } + + a.ledgerState.SetHorizonStatus(next) +} + +// UpdateFeeStatsState triggers a refresh of several operation fee metrics. +func (a *App) UpdateFeeStatsState(ctx context.Context) { + var ( + next operationfeestats.State + latest history.LatestLedger + feeStats history.FeeStats + capacityStats history.LedgerCapacityUsageStats + ) + + logErr := func(err error, msg string) { + // If DB is empty ignore the error + if errors.Cause(err) == sql.ErrNoRows { + return + } + + log.WithStack(err).WithField("err", err.Error()).Error(msg) + } + + cur, ok := operationfeestats.CurrentState() + + err := a.HistoryQ().LatestLedgerBaseFeeAndSequence(ctx, &latest) + if err != nil { + logErr(err, "failed to load the latest known ledger's base fee and sequence number") + return + } + + // finish early if no new ledgers + if ok && cur.LastLedger == uint32(latest.Sequence) { + return + } + + next.LastBaseFee = int64(latest.BaseFee) + next.LastLedger = uint32(latest.Sequence) + + err = a.HistoryQ().FeeStats(ctx, latest.Sequence, &feeStats) + if err != nil { + logErr(err, "failed to load operation fee stats") + return + } + + err = a.HistoryQ().LedgerCapacityUsageStats(ctx, latest.Sequence, &capacityStats) + if err != nil { + logErr(err, "failed to load ledger capacity usage stats") + return + } + + next.LedgerCapacityUsage = capacityStats.CapacityUsage.String + + // if no transactions in last 5 ledgers, return + // latest ledger's base fee for all + if !feeStats.MaxFeeMode.Valid && !feeStats.MaxFeeMin.Valid { + // MaxFee + next.MaxFeeMax = next.LastBaseFee + next.MaxFeeMin = next.LastBaseFee + next.MaxFeeMode = next.LastBaseFee + next.MaxFeeP10 = next.LastBaseFee + next.MaxFeeP20 = next.LastBaseFee + next.MaxFeeP30 = next.LastBaseFee + next.MaxFeeP40 = next.LastBaseFee + next.MaxFeeP50 = next.LastBaseFee + next.MaxFeeP60 = next.LastBaseFee + next.MaxFeeP70 = next.LastBaseFee + next.MaxFeeP80 = next.LastBaseFee + next.MaxFeeP90 = next.LastBaseFee + next.MaxFeeP95 = next.LastBaseFee + next.MaxFeeP99 = next.LastBaseFee + + // FeeCharged + next.FeeChargedMax = next.LastBaseFee + next.FeeChargedMin = next.LastBaseFee + next.FeeChargedMode = next.LastBaseFee + next.FeeChargedP10 = next.LastBaseFee + next.FeeChargedP20 = next.LastBaseFee + next.FeeChargedP30 = next.LastBaseFee + next.FeeChargedP40 = next.LastBaseFee + next.FeeChargedP50 = next.LastBaseFee + next.FeeChargedP60 = next.LastBaseFee + next.FeeChargedP70 = next.LastBaseFee + next.FeeChargedP80 = next.LastBaseFee + next.FeeChargedP90 = next.LastBaseFee + next.FeeChargedP95 = next.LastBaseFee + next.FeeChargedP99 = next.LastBaseFee + + } else { + // MaxFee + next.MaxFeeMax = feeStats.MaxFeeMax.Int64 + next.MaxFeeMin = feeStats.MaxFeeMin.Int64 + next.MaxFeeMode = feeStats.MaxFeeMode.Int64 + next.MaxFeeP10 = feeStats.MaxFeeP10.Int64 + next.MaxFeeP20 = feeStats.MaxFeeP20.Int64 + next.MaxFeeP30 = feeStats.MaxFeeP30.Int64 + next.MaxFeeP40 = feeStats.MaxFeeP40.Int64 + next.MaxFeeP50 = feeStats.MaxFeeP50.Int64 + next.MaxFeeP60 = feeStats.MaxFeeP60.Int64 + next.MaxFeeP70 = feeStats.MaxFeeP70.Int64 + next.MaxFeeP80 = feeStats.MaxFeeP80.Int64 + next.MaxFeeP90 = feeStats.MaxFeeP90.Int64 + next.MaxFeeP95 = feeStats.MaxFeeP95.Int64 + next.MaxFeeP99 = feeStats.MaxFeeP99.Int64 + + // FeeCharged + next.FeeChargedMax = feeStats.FeeChargedMax.Int64 + next.FeeChargedMin = feeStats.FeeChargedMin.Int64 + next.FeeChargedMode = feeStats.FeeChargedMode.Int64 + next.FeeChargedP10 = feeStats.FeeChargedP10.Int64 + next.FeeChargedP20 = feeStats.FeeChargedP20.Int64 + next.FeeChargedP30 = feeStats.FeeChargedP30.Int64 + next.FeeChargedP40 = feeStats.FeeChargedP40.Int64 + next.FeeChargedP50 = feeStats.FeeChargedP50.Int64 + next.FeeChargedP60 = feeStats.FeeChargedP60.Int64 + next.FeeChargedP70 = feeStats.FeeChargedP70.Int64 + next.FeeChargedP80 = feeStats.FeeChargedP80.Int64 + next.FeeChargedP90 = feeStats.FeeChargedP90.Int64 + next.FeeChargedP95 = feeStats.FeeChargedP95.Int64 + next.FeeChargedP99 = feeStats.FeeChargedP99.Int64 + } + + operationfeestats.SetState(next) +} + +// UpdateStellarCoreInfo updates the value of CoreVersion, +// CurrentProtocolVersion, and CoreSupportedProtocolVersion from the Stellar +// core API. +// +// Warning: This method should only return an error if it is fatal. See usage +// in `App.Tick` +func (a *App) UpdateStellarCoreInfo(ctx context.Context) error { + if a.config.StellarCoreURL == "" { + return nil + } + + core := &stellarcore.Client{ + URL: a.config.StellarCoreURL, + } + + resp, err := core.Info(ctx) + if err != nil { + log.Warnf("could not load stellar-core info: %s", err) + return nil + } + + // Check if NetworkPassphrase is different, if so exit Horizon as it can break the + // state of the application. + if resp.Info.Network != a.config.NetworkPassphrase { + return fmt.Errorf( + "Network passphrase of stellar-core (%s) does not match Horizon configuration (%s). Exiting...", + resp.Info.Network, + a.config.NetworkPassphrase, + ) + } + + a.coreState.Set(resp) + return nil +} + +// DeleteUnretainedHistory forwards to the app's reaper. See +// `reap.DeleteUnretainedHistory` for details +func (a *App) DeleteUnretainedHistory(ctx context.Context) error { + return a.reaper.DeleteUnretainedHistory(ctx) +} + +// Tick triggers horizon to update all of it's background processes such as +// transaction submission, metrics, ingestion and reaping. +func (a *App) Tick(ctx context.Context) error { + var wg sync.WaitGroup + log.Debug("ticking app") + + // update ledger state, operation fee state, and stellar-core info in parallel + wg.Add(4) + var err error + go func() { a.UpdateCoreLedgerState(ctx); wg.Done() }() + go func() { a.UpdateHorizonLedgerState(ctx); wg.Done() }() + go func() { a.UpdateFeeStatsState(ctx); wg.Done() }() + go func() { err = a.UpdateStellarCoreInfo(ctx); wg.Done() }() + wg.Wait() + if err != nil { + return err + } + + wg.Add(1) + go func() { a.submitter.Tick(ctx); wg.Done() }() + wg.Wait() + + log.Debug("finished ticking app") + return ctx.Err() +} + +// Init initializes app, using the config to populate db connections and +// whatnot. +func (a *App) init() error { + // app-context + a.ctx, a.cancel = context.WithCancel(context.Background()) + + // log + log.DefaultLogger.SetLevel(a.config.LogLevel) + log.DefaultLogger.AddHook(logmetrics.DefaultMetrics) + + // sentry + initSentry(a) + + // loggly + initLogglyLog(a) + + // metrics and log.metrics + a.prometheusRegistry = prometheus.NewRegistry() + for _, meter := range *logmetrics.DefaultMetrics { + a.prometheusRegistry.MustRegister(meter) + } + + // stellarCoreInfo + a.UpdateStellarCoreInfo(a.ctx) + + // horizon-db and core-db + mustInitHorizonDB(a) + + if a.config.Ingest { + // ingester + initIngester(a) + } + initPathFinder(a) + + // txsub + initSubmissionSystem(a) + + // reaper + a.reaper = reap.New(a.config.HistoryRetentionCount, a.HorizonSession(), a.ledgerState) + + // go metrics + initGoMetrics(a) + + // process metrics + initProcessMetrics(a) + + // db-metrics + initDbMetrics(a) + + // ingest.metrics + initIngestMetrics(a) + + // txsub.metrics + initTxSubMetrics(a) + + routerConfig := httpx.RouterConfig{ + DBSession: a.historyQ.SessionInterface, + TxSubmitter: a.submitter, + RateQuota: a.config.RateQuota, + BehindCloudflare: a.config.BehindCloudflare, + BehindAWSLoadBalancer: a.config.BehindAWSLoadBalancer, + SSEUpdateFrequency: a.config.SSEUpdateFrequency, + StaleThreshold: a.config.StaleThreshold, + ConnectionTimeout: a.config.ConnectionTimeout, + NetworkPassphrase: a.config.NetworkPassphrase, + MaxPathLength: a.config.MaxPathLength, + MaxAssetsPerPathRequest: a.config.MaxAssetsPerPathRequest, + PathFinder: a.paths, + PrometheusRegistry: a.prometheusRegistry, + CoreGetter: a, + HorizonVersion: a.horizonVersion, + FriendbotURL: a.config.FriendbotURL, + HealthCheck: healthCheck{ + session: a.historyQ.SessionInterface, + ctx: a.ctx, + core: &stellarcore.Client{ + HTTP: &http.Client{Timeout: infoRequestTimeout}, + URL: a.config.StellarCoreURL, + }, + cache: newHealthCache(healthCacheTTL), + }, + } + + if a.primaryHistoryQ != nil { + routerConfig.PrimaryDBSession = a.primaryHistoryQ.SessionInterface + } + + var err error + config := httpx.ServerConfig{ + Port: uint16(a.config.Port), + AdminPort: uint16(a.config.AdminPort), + } + if a.config.TLSCert != "" && a.config.TLSKey != "" { + config.TLSConfig = &httpx.TLSConfig{ + CertPath: a.config.TLSCert, + KeyPath: a.config.TLSKey, + } + } + a.webServer, err = httpx.NewServer(config, routerConfig, a.ledgerState) + if err != nil { + return err + } + + // web.metrics + initWebMetrics(a) + + return nil +} + +// run is the function that runs in the background that triggers Tick each +// second +func (a *App) run() { + for { + select { + case <-a.ticks.C: + ctx, cancel := context.WithTimeout(a.ctx, tickerMaxDuration) + err := a.Tick(ctx) + if err != nil { + log.Warnf("error ticking app: %s", err) + } + cancel() // Release timer + case <-a.ctx.Done(): + log.Info("finished background ticker") + return + } + } +} diff --git a/services/horizon/internal/app_test.go b/services/horizon/internal/app_test.go new file mode 100644 index 0000000000..d6a2194d1d --- /dev/null +++ b/services/horizon/internal/app_test.go @@ -0,0 +1,78 @@ +package horizon + +import ( + "context" + "net/http" + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestGenericHTTPFeatures(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // CORS + w := ht.Get("/") + if ht.Assert.Equal(200, w.Code) { + ht.Assert.Empty(w.HeaderMap.Get("Access-Control-Allow-Origin")) + } + + w = ht.Get("/", func(r *http.Request) { + r.Header.Set("Origin", "somewhere.com") + }) + + if ht.Assert.Equal(200, w.Code) { + ht.Assert.Equal( + "somewhere.com", + w.HeaderMap.Get("Access-Control-Allow-Origin"), + ) + } + + // Trailing slash is stripped + w = ht.Get("/ledgers") + ht.Assert.Equal(200, w.Code) + w = ht.Get("/ledgers/") + ht.Assert.Equal(200, w.Code) +} + +func TestMetrics(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + adminRouterRH := test.NewRequestHelper(ht.App.webServer.Router.Internal) + w := adminRouterRH.Get("/metrics") + ht.Assert.Equal(200, w.Code) + + hl := ht.App.ledgerState.Metrics.HistoryLatestLedgerCounter + hlc := ht.App.ledgerState.Metrics.HistoryLatestLedgerClosedAgoGauge + he := ht.App.ledgerState.Metrics.HistoryElderLedgerCounter + cl := ht.App.ledgerState.Metrics.CoreLatestLedgerCounter + + ht.Require.EqualValues(3, getMetricValue(hl).GetCounter().GetValue()) + ht.Require.Less(float64(1000), getMetricValue(hlc).GetGauge().GetValue()) + ht.Require.EqualValues(1, getMetricValue(he).GetCounter().GetValue()) + ht.Require.EqualValues(64, getMetricValue(cl).GetCounter().GetValue()) +} + +func TestTick(t *testing.T) { + ht := StartHTTPTest(t, "base") + defer ht.Finish() + + // Just sanity-check that we return the context error... + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := ht.App.Tick(ctx) + ht.Assert.EqualError(err, context.Canceled.Error()) +} + +func getMetricValue(metric prometheus.Metric) *dto.Metric { + value := &dto.Metric{} + err := metric.Write(value) + if err != nil { + panic(err) + } + return value +} diff --git a/services/horizon/internal/assets/main.go b/services/horizon/internal/assets/main.go new file mode 100644 index 0000000000..6613f62bab --- /dev/null +++ b/services/horizon/internal/assets/main.go @@ -0,0 +1,42 @@ +//Package assets is a simple helper package to help convert to/from xdr.AssetType values +package assets + +import ( + "github.com/go-errors/errors" + "github.com/stellar/go/xdr" +) + +// ErrInvalidString gets returns when the string form of the asset type is invalid +var ErrInvalidString = errors.New("invalid asset type: was not one of 'native', 'credit_alphanum4', 'credit_alphanum12', 'liquidity_pool_shares'") + +//ErrInvalidValue gets returned when the xdr.AssetType int value is not one of the valid enum values +var ErrInvalidValue = errors.New("unknown asset type, cannot convert to string") + +// AssetTypeMap is the read-only (i.e. don't modify it) map from string names to +// xdr.AssetType values +var AssetTypeMap = map[string]xdr.AssetType{ + "native": xdr.AssetTypeAssetTypeNative, + "credit_alphanum4": xdr.AssetTypeAssetTypeCreditAlphanum4, + "credit_alphanum12": xdr.AssetTypeAssetTypeCreditAlphanum12, +} + +//Parse creates an asset from the provided strings. See AssetTypeMap for valid strings for aType. +func Parse(aType string) (result xdr.AssetType, err error) { + result, ok := AssetTypeMap[aType] + if !ok { + err = errors.New(ErrInvalidString) + } + + return +} + +//String returns the appropriate string representation of the provided xdr.AssetType. +func String(aType xdr.AssetType) (string, error) { + for s, v := range AssetTypeMap { + if v == aType { + return s, nil + } + } + + return "", errors.New(ErrInvalidValue) +} diff --git a/services/horizon/internal/assets/main_test.go b/services/horizon/internal/assets/main_test.go new file mode 100644 index 0000000000..ef10e5c1c5 --- /dev/null +++ b/services/horizon/internal/assets/main_test.go @@ -0,0 +1,59 @@ +package assets + +import ( + "testing" + + "github.com/go-errors/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +// Tests for Parse +func TestParseAssets(t *testing.T) { + var ( + result xdr.AssetType + err error + ) + + result, err = Parse("native") + assert.Equal(t, xdr.AssetTypeAssetTypeNative, result) + + assert.Nil(t, err) + + result, err = Parse("credit_alphanum4") + assert.Equal(t, xdr.AssetTypeAssetTypeCreditAlphanum4, result) + assert.Nil(t, err) + + result, err = Parse("credit_alphanum12") + assert.Equal(t, xdr.AssetTypeAssetTypeCreditAlphanum12, result) + assert.Nil(t, err) + + _, err = Parse("not_real") + assert.True(t, errors.Is(err, ErrInvalidString)) + + _, err = Parse("") + assert.True(t, errors.Is(err, ErrInvalidString)) +} + +// Tests for String +func TestStringAssets(t *testing.T) { + var ( + result string + err error + ) + + result, err = String(xdr.AssetTypeAssetTypeNative) + assert.Equal(t, "native", result) + assert.Nil(t, err) + + result, err = String(xdr.AssetTypeAssetTypeCreditAlphanum4) + assert.Equal(t, "credit_alphanum4", result) + assert.Nil(t, err) + + result, err = String(xdr.AssetTypeAssetTypeCreditAlphanum12) + assert.Equal(t, "credit_alphanum12", result) + assert.Nil(t, err) + + _, err = String(xdr.AssetType(15)) + assert.True(t, errors.Is(err, ErrInvalidValue)) +} diff --git a/services/horizon/internal/codes/main.go b/services/horizon/internal/codes/main.go new file mode 100644 index 0000000000..4c8f8379ee --- /dev/null +++ b/services/horizon/internal/codes/main.go @@ -0,0 +1,557 @@ +//Package codes is a helper package to help convert to transaction and operation result codes +//to strings used in horizon. +package codes + +import ( + "github.com/go-errors/errors" + "github.com/stellar/go/xdr" +) + +// ErrUnknownCode is returned when an unexepcted value is provided to `String` +var ErrUnknownCode = errors.New("Unknown result code") + +const ( + // OpSuccess is the string code used to specify the operation was successful + OpSuccess = "op_success" + // OpMalformed is the string code used to specify the operation was malformed + // in some way. + OpMalformed = "op_malformed" + // OpUnderfunded is the string code used to specify the operation failed + // due to a lack of funds. + OpUnderfunded = "op_underfunded" + // OpLowReserve is the string code used to specify the operation failed + // because the account in question does not have enough balance to satisfy + // what their new minimum balance would be. + OpLowReserve = "op_low_reserve" + // OpLineFull occurs when a payment would cause a destination account to + // exceed their declared trust limit for the asset being sent. + OpLineFull = "op_line_full" + // OpNoIssuer occurs when a operation does not correctly specify an issuing + // asset + OpNoIssuer = "op_no_issuer" + // OpNoTrust occurs when there is no trust line to a given asset + OpNoTrust = "op_no_trust" + // OpNotAuthorized occurs when a trust line is not authorized + OpNotAuthorized = "op_not_authorized" + // OpDoesNotExist occurs when claimable balance or sponsorship does not exist + OpDoesNotExist = "op_does_not_exist" +) + +//String returns the appropriate string representation of the provided result code +func String(code interface{}) (string, error) { + switch code := code.(type) { + case xdr.TransactionResultCode: + switch code { + case xdr.TransactionResultCodeTxFeeBumpInnerSuccess: + return "tx_fee_bump_inner_success", nil + case xdr.TransactionResultCodeTxFeeBumpInnerFailed: + return "tx_fee_bump_inner_failed", nil + case xdr.TransactionResultCodeTxNotSupported: + return "tx_not_supported", nil + case xdr.TransactionResultCodeTxSuccess: + return "tx_success", nil + case xdr.TransactionResultCodeTxFailed: + return "tx_failed", nil + case xdr.TransactionResultCodeTxTooEarly: + return "tx_too_early", nil + case xdr.TransactionResultCodeTxTooLate: + return "tx_too_late", nil + case xdr.TransactionResultCodeTxMissingOperation: + return "tx_missing_operation", nil + case xdr.TransactionResultCodeTxBadSeq: + return "tx_bad_seq", nil + case xdr.TransactionResultCodeTxBadAuth: + return "tx_bad_auth", nil + case xdr.TransactionResultCodeTxInsufficientBalance: + return "tx_insufficient_balance", nil + case xdr.TransactionResultCodeTxNoAccount: + return "tx_no_source_account", nil + case xdr.TransactionResultCodeTxInsufficientFee: + return "tx_insufficient_fee", nil + case xdr.TransactionResultCodeTxBadAuthExtra: + return "tx_bad_auth_extra", nil + case xdr.TransactionResultCodeTxInternalError: + return "tx_internal_error", nil + case xdr.TransactionResultCodeTxBadSponsorship: + return "tx_bad_sponsorship", nil + } + case xdr.OperationResultCode: + switch code { + case xdr.OperationResultCodeOpInner: + return "op_inner", nil + case xdr.OperationResultCodeOpBadAuth: + return "op_bad_auth", nil + case xdr.OperationResultCodeOpNoAccount: + return "op_no_source_account", nil + case xdr.OperationResultCodeOpNotSupported: + return "op_not_supported", nil + case xdr.OperationResultCodeOpTooManySubentries: + return "op_too_many_subentries", nil + case xdr.OperationResultCodeOpExceededWorkLimit: + return "op_exceeded_work_limit", nil + } + case xdr.CreateAccountResultCode: + switch code { + case xdr.CreateAccountResultCodeCreateAccountSuccess: + return OpSuccess, nil + case xdr.CreateAccountResultCodeCreateAccountMalformed: + return OpMalformed, nil + case xdr.CreateAccountResultCodeCreateAccountUnderfunded: + return OpUnderfunded, nil + case xdr.CreateAccountResultCodeCreateAccountLowReserve: + return OpLowReserve, nil + case xdr.CreateAccountResultCodeCreateAccountAlreadyExist: + return "op_already_exists", nil + } + case xdr.PaymentResultCode: + switch code { + case xdr.PaymentResultCodePaymentSuccess: + return OpSuccess, nil + case xdr.PaymentResultCodePaymentMalformed: + return OpMalformed, nil + case xdr.PaymentResultCodePaymentUnderfunded: + return OpUnderfunded, nil + case xdr.PaymentResultCodePaymentSrcNoTrust: + return "op_src_no_trust", nil + case xdr.PaymentResultCodePaymentSrcNotAuthorized: + return "op_src_not_authorized", nil + case xdr.PaymentResultCodePaymentNoDestination: + return "op_no_destination", nil + case xdr.PaymentResultCodePaymentNoTrust: + return OpNoTrust, nil + case xdr.PaymentResultCodePaymentNotAuthorized: + return OpNotAuthorized, nil + case xdr.PaymentResultCodePaymentLineFull: + return OpLineFull, nil + case xdr.PaymentResultCodePaymentNoIssuer: + return OpNoIssuer, nil + } + case xdr.PathPaymentStrictReceiveResultCode: + switch code { + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess: + return OpSuccess, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveMalformed: + return OpMalformed, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveUnderfunded: + return OpUnderfunded, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNoTrust: + return "op_src_no_trust", nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNotAuthorized: + return "op_src_not_authorized", nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoDestination: + return "op_no_destination", nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoTrust: + return OpNoTrust, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNotAuthorized: + return OpNotAuthorized, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveLineFull: + return OpLineFull, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer: + return OpNoIssuer, nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveTooFewOffers: + return "op_too_few_offers", nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOfferCrossSelf: + return "op_cross_self", nil + case xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOverSendmax: + return "op_over_source_max", nil + } + case xdr.ManageBuyOfferResultCode: + switch code { + case xdr.ManageBuyOfferResultCodeManageBuyOfferSuccess: + return OpSuccess, nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferMalformed: + return OpMalformed, nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferSellNoTrust: + return "op_sell_no_trust", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferBuyNoTrust: + return "op_buy_no_trust", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferSellNotAuthorized: + return "sell_not_authorized", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferBuyNotAuthorized: + return "buy_not_authorized", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferLineFull: + return OpLineFull, nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferUnderfunded: + return OpUnderfunded, nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferCrossSelf: + return "op_cross_self", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferSellNoIssuer: + return "op_sell_no_issuer", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferBuyNoIssuer: + return "buy_no_issuer", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferNotFound: + return "op_offer_not_found", nil + case xdr.ManageBuyOfferResultCodeManageBuyOfferLowReserve: + return OpLowReserve, nil + } + case xdr.ManageSellOfferResultCode: + switch code { + case xdr.ManageSellOfferResultCodeManageSellOfferSuccess: + return OpSuccess, nil + case xdr.ManageSellOfferResultCodeManageSellOfferMalformed: + return OpMalformed, nil + case xdr.ManageSellOfferResultCodeManageSellOfferSellNoTrust: + return "op_sell_no_trust", nil + case xdr.ManageSellOfferResultCodeManageSellOfferBuyNoTrust: + return "op_buy_no_trust", nil + case xdr.ManageSellOfferResultCodeManageSellOfferSellNotAuthorized: + return "sell_not_authorized", nil + case xdr.ManageSellOfferResultCodeManageSellOfferBuyNotAuthorized: + return "buy_not_authorized", nil + case xdr.ManageSellOfferResultCodeManageSellOfferLineFull: + return OpLineFull, nil + case xdr.ManageSellOfferResultCodeManageSellOfferUnderfunded: + return OpUnderfunded, nil + case xdr.ManageSellOfferResultCodeManageSellOfferCrossSelf: + return "op_cross_self", nil + case xdr.ManageSellOfferResultCodeManageSellOfferSellNoIssuer: + return "op_sell_no_issuer", nil + case xdr.ManageSellOfferResultCodeManageSellOfferBuyNoIssuer: + return "buy_no_issuer", nil + case xdr.ManageSellOfferResultCodeManageSellOfferNotFound: + return "op_offer_not_found", nil + case xdr.ManageSellOfferResultCodeManageSellOfferLowReserve: + return OpLowReserve, nil + } + case xdr.SetOptionsResultCode: + switch code { + case xdr.SetOptionsResultCodeSetOptionsSuccess: + return OpSuccess, nil + case xdr.SetOptionsResultCodeSetOptionsLowReserve: + return OpLowReserve, nil + case xdr.SetOptionsResultCodeSetOptionsTooManySigners: + return "op_too_many_signers", nil + case xdr.SetOptionsResultCodeSetOptionsBadFlags: + return "op_bad_flags", nil + case xdr.SetOptionsResultCodeSetOptionsInvalidInflation: + return "op_invalid_inflation", nil + case xdr.SetOptionsResultCodeSetOptionsCantChange: + return "op_cant_change", nil + case xdr.SetOptionsResultCodeSetOptionsUnknownFlag: + return "op_unknown_flag", nil + case xdr.SetOptionsResultCodeSetOptionsThresholdOutOfRange: + return "op_threshold_out_of_range", nil + case xdr.SetOptionsResultCodeSetOptionsBadSigner: + return "op_bad_signer", nil + case xdr.SetOptionsResultCodeSetOptionsInvalidHomeDomain: + return "op_invalid_home_domain", nil + case xdr.SetOptionsResultCodeSetOptionsAuthRevocableRequired: + return "op_auth_revocable_required", nil + } + case xdr.ChangeTrustResultCode: + switch code { + case xdr.ChangeTrustResultCodeChangeTrustSuccess: + return OpSuccess, nil + case xdr.ChangeTrustResultCodeChangeTrustMalformed: + return OpMalformed, nil + case xdr.ChangeTrustResultCodeChangeTrustNoIssuer: + return OpNoIssuer, nil + case xdr.ChangeTrustResultCodeChangeTrustInvalidLimit: + return "op_invalid_limit", nil + case xdr.ChangeTrustResultCodeChangeTrustLowReserve: + return OpLowReserve, nil + case xdr.ChangeTrustResultCodeChangeTrustSelfNotAllowed: + return "op_self_not_allowed", nil + case xdr.ChangeTrustResultCodeChangeTrustTrustLineMissing: + return "op_trust_line_missing", nil + case xdr.ChangeTrustResultCodeChangeTrustCannotDelete: + return "op_cannot_delete", nil + case xdr.ChangeTrustResultCodeChangeTrustNotAuthMaintainLiabilities: + return "op_not_aut_maintain_liabilities", nil + } + case xdr.AllowTrustResultCode: + switch code { + case xdr.AllowTrustResultCodeAllowTrustSuccess: + return OpSuccess, nil + case xdr.AllowTrustResultCodeAllowTrustMalformed: + return OpMalformed, nil + case xdr.AllowTrustResultCodeAllowTrustNoTrustLine: + return OpNoTrust, nil + case xdr.AllowTrustResultCodeAllowTrustTrustNotRequired: + return "op_not_required", nil + case xdr.AllowTrustResultCodeAllowTrustCantRevoke: + return "op_cant_revoke", nil + case xdr.AllowTrustResultCodeAllowTrustSelfNotAllowed: + return "op_self_not_allowed", nil + case xdr.AllowTrustResultCodeAllowTrustLowReserve: + return OpLowReserve, nil + } + case xdr.AccountMergeResultCode: + switch code { + case xdr.AccountMergeResultCodeAccountMergeSuccess: + return OpSuccess, nil + case xdr.AccountMergeResultCodeAccountMergeMalformed: + return OpMalformed, nil + case xdr.AccountMergeResultCodeAccountMergeNoAccount: + return "op_no_account", nil + case xdr.AccountMergeResultCodeAccountMergeImmutableSet: + return "op_immutable_set", nil + case xdr.AccountMergeResultCodeAccountMergeHasSubEntries: + return "op_has_sub_entries", nil + case xdr.AccountMergeResultCodeAccountMergeSeqnumTooFar: + return "op_seq_num_too_far", nil + case xdr.AccountMergeResultCodeAccountMergeDestFull: + return "op_dest_full", nil + case xdr.AccountMergeResultCodeAccountMergeIsSponsor: + return "op_is_sponsor", nil + } + case xdr.InflationResultCode: + switch code { + case xdr.InflationResultCodeInflationSuccess: + return OpSuccess, nil + case xdr.InflationResultCodeInflationNotTime: + return "op_not_time", nil + } + case xdr.ManageDataResultCode: + switch code { + case xdr.ManageDataResultCodeManageDataSuccess: + return OpSuccess, nil + case xdr.ManageDataResultCodeManageDataNotSupportedYet: + return "op_not_supported_yet", nil + case xdr.ManageDataResultCodeManageDataNameNotFound: + return "op_data_name_not_found", nil + case xdr.ManageDataResultCodeManageDataLowReserve: + return OpLowReserve, nil + case xdr.ManageDataResultCodeManageDataInvalidName: + return "op_data_invalid_name", nil + } + case xdr.BumpSequenceResultCode: + switch code { + case xdr.BumpSequenceResultCodeBumpSequenceSuccess: + return OpSuccess, nil + case xdr.BumpSequenceResultCodeBumpSequenceBadSeq: + return "op_bad_seq", nil + } + case xdr.PathPaymentStrictSendResultCode: + switch code { + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess: + return OpSuccess, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendMalformed: + return OpMalformed, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendUnderfunded: + return OpUnderfunded, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNoTrust: + return "op_src_no_trust", nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNotAuthorized: + return "op_src_not_authorized", nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendNoDestination: + return "op_no_destination", nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendNoTrust: + return OpNoTrust, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendNotAuthorized: + return OpNotAuthorized, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendLineFull: + return OpLineFull, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer: + return OpNoIssuer, nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendTooFewOffers: + return "op_too_few_offers", nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendOfferCrossSelf: + return "op_cross_self", nil + case xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendUnderDestmin: + return "op_under_dest_min", nil + } + case xdr.CreateClaimableBalanceResultCode: + switch code { + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess: + return OpSuccess, nil + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceMalformed: + return OpMalformed, nil + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceLowReserve: + return OpLowReserve, nil + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceNoTrust: + return OpNoTrust, nil + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceNotAuthorized: + return OpNotAuthorized, nil + case xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceUnderfunded: + return "op_underfunded", nil + } + case xdr.ClaimClaimableBalanceResultCode: + switch code { + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess: + return OpSuccess, nil + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceDoesNotExist: + return OpDoesNotExist, nil + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceCannotClaim: + return "op_cannot_claim", nil + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceLineFull: + return OpLineFull, nil + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceNoTrust: + return OpNoTrust, nil + case xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceNotAuthorized: + return OpNotAuthorized, nil + } + case xdr.BeginSponsoringFutureReservesResultCode: + switch code { + case xdr.BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess: + return OpSuccess, nil + case xdr.BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesMalformed: + return OpMalformed, nil + case xdr.BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesAlreadySponsored: + return "op_already_sponsored", nil + case xdr.BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesRecursive: + return "op_recursive", nil + } + case xdr.EndSponsoringFutureReservesResultCode: + switch code { + case xdr.EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess: + return OpSuccess, nil + case xdr.EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesNotSponsored: + return "op_not_sponsored", nil + } + case xdr.RevokeSponsorshipResultCode: + switch code { + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipSuccess: + return OpSuccess, nil + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipDoesNotExist: + return OpDoesNotExist, nil + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipNotSponsor: + return "op_not_sponsor", nil + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipLowReserve: + return OpLowReserve, nil + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipOnlyTransferable: + return "op_only_transferable", nil + case xdr.RevokeSponsorshipResultCodeRevokeSponsorshipMalformed: + return OpMalformed, nil + } + case xdr.ClawbackResultCode: + switch code { + case xdr.ClawbackResultCodeClawbackSuccess: + return OpSuccess, nil + case xdr.ClawbackResultCodeClawbackMalformed: + return OpMalformed, nil + case xdr.ClawbackResultCodeClawbackNotClawbackEnabled: + return "op_not_clawback_enabled", nil + case xdr.ClawbackResultCodeClawbackNoTrust: + return OpNoTrust, nil + case xdr.ClawbackResultCodeClawbackUnderfunded: + return OpUnderfunded, nil + } + case xdr.ClawbackClaimableBalanceResultCode: + switch code { + case xdr.ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess: + return OpSuccess, nil + case xdr.ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceDoesNotExist: + return OpDoesNotExist, nil + case xdr.ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotIssuer: + return OpNoIssuer, nil + case xdr.ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotClawbackEnabled: + return "op_not_clawback_enabled", nil + } + case xdr.SetTrustLineFlagsResultCode: + switch code { + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess: + return OpSuccess, nil + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsMalformed: + return OpMalformed, nil + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsNoTrustLine: + return OpNoTrust, nil + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsCantRevoke: + return "op_cant_revoke", nil + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsInvalidState: + return "op_invalid_state", nil + case xdr.SetTrustLineFlagsResultCodeSetTrustLineFlagsLowReserve: + return OpLowReserve, nil + } + case xdr.LiquidityPoolDepositResultCode: + switch code { + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess: + return OpSuccess, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositMalformed: + return OpMalformed, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositNoTrust: + return OpNoTrust, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositNotAuthorized: + return OpNotAuthorized, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositUnderfunded: + return OpUnderfunded, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositLineFull: + return OpLineFull, nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositBadPrice: + return "op_bad_price", nil + case xdr.LiquidityPoolDepositResultCodeLiquidityPoolDepositPoolFull: + return "op_pool_full", nil + } + case xdr.LiquidityPoolWithdrawResultCode: + switch code { + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess: + return OpSuccess, nil + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawMalformed: + return OpMalformed, nil + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawNoTrust: + return OpNoTrust, nil + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderfunded: + return OpUnderfunded, nil + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawLineFull: + return OpLineFull, nil + case xdr.LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderMinimum: + return "op_under_minimum", nil + } + } + + return "", errors.New(ErrUnknownCode) +} + +// ForOperationResult returns the strong representation used by horizon for the +// error code `opr` +func ForOperationResult(opr xdr.OperationResult) (string, error) { + if opr.Code != xdr.OperationResultCodeOpInner { + return String(opr.Code) + } + + ir := opr.MustTr() + var ic interface{} + + switch ir.Type { + case xdr.OperationTypeCreateAccount: + ic = ir.MustCreateAccountResult().Code + case xdr.OperationTypePayment: + ic = ir.MustPaymentResult().Code + case xdr.OperationTypePathPaymentStrictReceive: + ic = ir.MustPathPaymentStrictReceiveResult().Code + case xdr.OperationTypeManageBuyOffer: + ic = ir.MustManageBuyOfferResult().Code + case xdr.OperationTypeManageSellOffer: + ic = ir.MustManageSellOfferResult().Code + case xdr.OperationTypeCreatePassiveSellOffer: + ic = ir.MustCreatePassiveSellOfferResult().Code + case xdr.OperationTypeSetOptions: + ic = ir.MustSetOptionsResult().Code + case xdr.OperationTypeChangeTrust: + ic = ir.MustChangeTrustResult().Code + case xdr.OperationTypeAllowTrust: + ic = ir.MustAllowTrustResult().Code + case xdr.OperationTypeAccountMerge: + ic = ir.MustAccountMergeResult().Code + case xdr.OperationTypeInflation: + ic = ir.MustInflationResult().Code + case xdr.OperationTypeManageData: + ic = ir.MustManageDataResult().Code + case xdr.OperationTypeBumpSequence: + ic = ir.MustBumpSeqResult().Code + case xdr.OperationTypePathPaymentStrictSend: + ic = ir.MustPathPaymentStrictSendResult().Code + case xdr.OperationTypeCreateClaimableBalance: + ic = ir.MustCreateClaimableBalanceResult().Code + case xdr.OperationTypeClaimClaimableBalance: + ic = ir.MustClaimClaimableBalanceResult().Code + case xdr.OperationTypeBeginSponsoringFutureReserves: + ic = ir.MustBeginSponsoringFutureReservesResult().Code + case xdr.OperationTypeEndSponsoringFutureReserves: + ic = ir.MustEndSponsoringFutureReservesResult().Code + case xdr.OperationTypeRevokeSponsorship: + ic = ir.MustRevokeSponsorshipResult().Code + case xdr.OperationTypeClawback: + ic = ir.MustClawbackResult().Code + case xdr.OperationTypeClawbackClaimableBalance: + ic = ir.MustClawbackClaimableBalanceResult().Code + case xdr.OperationTypeSetTrustLineFlags: + ic = ir.MustSetTrustLineFlagsResult().Code + case xdr.OperationTypeLiquidityPoolDeposit: + ic = ir.MustLiquidityPoolDepositResult().Code + case xdr.OperationTypeLiquidityPoolWithdraw: + ic = ir.MustLiquidityPoolWithdrawResult().Code + } + + return String(ic) +} diff --git a/services/horizon/internal/codes/main_test.go b/services/horizon/internal/codes/main_test.go new file mode 100644 index 0000000000..1a0ce3e601 --- /dev/null +++ b/services/horizon/internal/codes/main_test.go @@ -0,0 +1,131 @@ +package codes + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestForOperationResultCoversForAllOpTypes(t *testing.T) { + for typ, s := range xdr.OperationTypeToStringMap { + result := xdr.OperationResult{ + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationType(typ), + }, + } + f := func() { + ForOperationResult(result) + } + // it must panic because the operation result is not set + assert.Panics(t, f, s) + } + + // Check if all operations' codes are covered + resultTypes := map[xdr.OperationType]reflect.Type{ + xdr.OperationTypeCreateAccount: reflect.TypeOf(xdr.CreateAccountResultCode(0)), + xdr.OperationTypePayment: reflect.TypeOf(xdr.PaymentResultCode(0)), + xdr.OperationTypePathPaymentStrictReceive: reflect.TypeOf(xdr.PathPaymentStrictReceiveResultCode(0)), + xdr.OperationTypeManageSellOffer: reflect.TypeOf(xdr.ManageSellOfferResultCode(0)), + xdr.OperationTypeCreatePassiveSellOffer: reflect.TypeOf(xdr.ManageSellOfferResultCode(0)), + xdr.OperationTypeSetOptions: reflect.TypeOf(xdr.SetOptionsResultCode(0)), + xdr.OperationTypeChangeTrust: reflect.TypeOf(xdr.ChangeTrustResultCode(0)), + xdr.OperationTypeAllowTrust: reflect.TypeOf(xdr.AllowTrustResultCode(0)), + xdr.OperationTypeAccountMerge: reflect.TypeOf(xdr.AccountMergeResultCode(0)), + xdr.OperationTypeInflation: reflect.TypeOf(xdr.InflationResultCode(0)), + xdr.OperationTypeManageData: reflect.TypeOf(xdr.ManageDataResultCode(0)), + xdr.OperationTypeBumpSequence: reflect.TypeOf(xdr.BumpSequenceResultCode(0)), + xdr.OperationTypeManageBuyOffer: reflect.TypeOf(xdr.ManageBuyOfferResultCode(0)), + xdr.OperationTypePathPaymentStrictSend: reflect.TypeOf(xdr.PathPaymentStrictSendResultCode(0)), + xdr.OperationTypeCreateClaimableBalance: reflect.TypeOf(xdr.CreateClaimableBalanceResultCode(0)), + xdr.OperationTypeClaimClaimableBalance: reflect.TypeOf(xdr.ClaimClaimableBalanceResultCode(0)), + xdr.OperationTypeBeginSponsoringFutureReserves: reflect.TypeOf(xdr.BeginSponsoringFutureReservesResultCode(0)), + xdr.OperationTypeEndSponsoringFutureReserves: reflect.TypeOf(xdr.EndSponsoringFutureReservesResultCode(0)), + xdr.OperationTypeRevokeSponsorship: reflect.TypeOf(xdr.RevokeSponsorshipResultCode(0)), + xdr.OperationTypeClawback: reflect.TypeOf(xdr.ClawbackResultCode(0)), + xdr.OperationTypeClawbackClaimableBalance: reflect.TypeOf(xdr.ClawbackClaimableBalanceResultCode(0)), + xdr.OperationTypeSetTrustLineFlags: reflect.TypeOf(xdr.SetTrustLineFlagsResultCode(0)), + xdr.OperationTypeLiquidityPoolDeposit: reflect.TypeOf(xdr.LiquidityPoolDepositResultCode(0)), + xdr.OperationTypeLiquidityPoolWithdraw: reflect.TypeOf(xdr.LiquidityPoolWithdrawResultCode(0)), + } + // If this is not equal it means one or more result struct is missing in resultTypes map. + assert.Equal(t, len(xdr.OperationTypeToStringMap), len(resultTypes)) + + type validEnum interface { + ValidEnum(v int32) bool + } + + for _, resultCode := range resultTypes { + integerCode := int32(0) + for { + // Create a new variable of result code type and set it to the current + // integer Code. + val := reflect.New(resultCode).Elem() + val.SetInt(int64(integerCode)) + + // Then check if integer value of the code is valid. If it's not, break. + // We exploit the fact that the code's integer values are a sequence: + // [0, -1, -2, ...]. + iValue := val.Interface() + valid := iValue.(validEnum).ValidEnum(integerCode) + if !valid { + break + } + + res, err := String(iValue) + if assert.NoError(t, err, fmt.Sprintf("type=%T code=%d not implemented", iValue, iValue)) { + // Ensure value is not empty even when implemented + assert.NotEmpty(t, res, fmt.Sprintf("type=%T code=%d empty", iValue, iValue)) + } + integerCode-- + } + } + + // make sure the check works for an unknown operation type + result := xdr.OperationResult{ + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationType(200000), + }, + } + f := func() { + ForOperationResult(result) + } + // it doesn't panic because it doesn't branch out into the operation type + assert.NotPanics(t, f) +} + +func TestString(t *testing.T) { + tests := []struct { + Input interface{} + Expected string + Err error + }{ + {xdr.TransactionResultCodeTxSuccess, "tx_success", nil}, + {xdr.OperationResultCodeOpBadAuth, "op_bad_auth", nil}, + {xdr.CreateAccountResultCodeCreateAccountLowReserve, "op_low_reserve", nil}, + {xdr.PaymentResultCodePaymentSrcNoTrust, "op_src_no_trust", nil}, + {xdr.SetOptionsResultCodeSetOptionsAuthRevocableRequired, "op_auth_revocable_required", nil}, + {xdr.ClawbackResultCodeClawbackNotClawbackEnabled, "op_not_clawback_enabled", nil}, + {0, "", ErrUnknownCode}, + } + + for _, test := range tests { + actual, err := String(test.Input) + + if test.Err != nil { + assert.NotNil(t, err) + assert.Equal(t, test.Err.Error(), err.Error()) + } else { + assert.Nil(t, err) + assert.Equal(t, test.Expected, actual) + } + } +} + +//TODO: op_inner refers to inner result code +//TODO: non op_inner uses the outer result code +//TODO: one test for each operation type diff --git a/services/horizon/internal/config.go b/services/horizon/internal/config.go new file mode 100644 index 0000000000..b9e69d3b36 --- /dev/null +++ b/services/horizon/internal/config.go @@ -0,0 +1,98 @@ +package horizon + +import ( + "net/url" + "time" + + "github.com/stellar/go/ingest/ledgerbackend" + + "github.com/sirupsen/logrus" + "github.com/stellar/throttled" +) + +// Config is the configuration for horizon. It gets populated by the +// app's main function and is provided to NewApp. +type Config struct { + DatabaseURL string + RoDatabaseURL string + HistoryArchiveURLs []string + Port uint + AdminPort uint + + EnableCaptiveCoreIngestion bool + UsingDefaultPubnetConfig bool + CaptiveCoreBinaryPath string + RemoteCaptiveCoreURL string + CaptiveCoreConfigPath string + CaptiveCoreTomlParams ledgerbackend.CaptiveCoreTomlParams + CaptiveCoreToml *ledgerbackend.CaptiveCoreToml + CaptiveCoreStoragePath string + CaptiveCoreReuseStoragePath bool + + StellarCoreDatabaseURL string + StellarCoreURL string + + // MaxDBConnections has a priority over all 4 values below. + MaxDBConnections int + HorizonDBMaxOpenConnections int + HorizonDBMaxIdleConnections int + + SSEUpdateFrequency time.Duration + ConnectionTimeout time.Duration + RateQuota *throttled.RateQuota + FriendbotURL *url.URL + LogLevel logrus.Level + LogFile string + + // MaxPathLength is the maximum length of the path returned by `/paths` endpoint. + MaxPathLength uint + // MaxAssetsPerPathRequest is the maximum number of assets considered for `/paths/strict-send` and `/paths/strict-recieve` + MaxAssetsPerPathRequest int + DisablePoolPathFinding bool + + NetworkPassphrase string + SentryDSN string + LogglyToken string + LogglyTag string + // TLSCert is a path to a certificate file to use for horizon's TLS config + TLSCert string + // TLSKey is the path to a private key file to use for horizon's TLS config + TLSKey string + // Ingest toggles whether this horizon instance should run the data ingestion subsystem. + Ingest bool + // CursorName is the cursor used for ingesting from stellar-core. + // Setting multiple cursors in different Horizon instances allows multiple + // Horizons to ingest from the same stellar-core instance without cursor + // collisions. + CursorName string + // HistoryRetentionCount represents the minimum number of ledgers worth of + // history data to retain in the horizon database. For the purposes of + // determining a "retention duration", each ledger roughly corresponds to 10 + // seconds of real time. + HistoryRetentionCount uint + // StaleThreshold represents the number of ledgers a history database may be + // out-of-date by before horizon begins to respond with an error to history + // requests. + StaleThreshold uint + // SkipCursorUpdate causes the ingestor to skip reporting the "last imported + // ledger" state to stellar-core. + SkipCursorUpdate bool + // IngestDisableStateVerification disables state verification + // `System.verifyState()` when set to `true`. + IngestDisableStateVerification bool + // IngestEnableExtendedLogLedgerStats enables extended ledger stats in + // logging. + IngestEnableExtendedLogLedgerStats bool + // ApplyMigrations will apply pending migrations to the horizon database + // before starting the horizon service + ApplyMigrations bool + // CheckpointFrequency establishes how many ledgers exist between checkpoints + CheckpointFrequency uint32 + // BehindCloudflare determines if Horizon instance is behind Cloudflare. In + // such case http.Request.RemoteAddr will be replaced with Cloudflare header. + BehindCloudflare bool + // BehindAWSLoadBalancer determines if Horizon instance is behind AWS load + // balances like ELB or ALB. In such case http.Request.RemoteAddr will be + // replaced with the last IP in X-Forwarded-For header. + BehindAWSLoadBalancer bool +} diff --git a/services/horizon/internal/context/context.go b/services/horizon/internal/context/context.go new file mode 100644 index 0000000000..eefac63a76 --- /dev/null +++ b/services/horizon/internal/context/context.go @@ -0,0 +1,63 @@ +package context + +import ( + "context" + "errors" + "net/http" + "net/url" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/db" +) + +type CtxKey string + +var RequestContextKey = CtxKey("request") +var SessionContextKey = CtxKey("session") + +func RequestFromContext(ctx context.Context) *http.Request { + found, _ := ctx.Value(&RequestContextKey).(*http.Request) + return found +} + +// requestContext returns a context representing the provided http action. +func RequestContext(ctx context.Context, w http.ResponseWriter, r *http.Request) context.Context { + if r == nil { + panic("Cannot bind nil *http.Request to context tree") + } + + return context.WithValue(ctx, &RequestContextKey, r) +} + +// BaseURL returns the "base" url for this request, defined as a url containing +// the Host and Scheme portions of the request uri. +func BaseURL(ctx context.Context) *url.URL { + r := RequestFromContext(ctx) + if r == nil { + return nil + } + + var scheme string + switch { + case r.Header.Get("X-Forwarded-Proto") != "": + scheme = r.Header.Get("X-Forwarded-Proto") + case r.TLS != nil: + scheme = "https" + default: + scheme = "http" + } + + return &url.URL{ + Scheme: scheme, + Host: r.Host, + } +} + +func HistoryQFromRequest(request *http.Request) (*history.Q, error) { + ctx := request.Context() + session, ok := ctx.Value(&SessionContextKey).(db.SessionInterface) + if !ok { + return nil, errors.New("missing session in request context") + } + return &history.Q{session}, nil +} diff --git a/services/horizon/internal/corestate/main.go b/services/horizon/internal/corestate/main.go new file mode 100644 index 0000000000..7bb0ba3e00 --- /dev/null +++ b/services/horizon/internal/corestate/main.go @@ -0,0 +1,75 @@ +package corestate + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/protocols/stellarcore" +) + +type State struct { + Synced bool + CurrentProtocolVersion int32 + CoreSupportedProtocolVersion int32 + CoreVersion string +} + +type Store struct { + sync.RWMutex + state State + + // metrics + Metrics struct { + CoreSynced prometheus.GaugeFunc + CoreSupportedProtocolVersion prometheus.GaugeFunc + } +} + +func (c *Store) Set(resp *stellarcore.InfoResponse) { + c.Lock() + defer c.Unlock() + c.state.Synced = resp.IsSynced() + c.state.CoreVersion = resp.Info.Build + c.state.CurrentProtocolVersion = int32(resp.Info.Ledger.Version) + c.state.CoreSupportedProtocolVersion = int32(resp.Info.ProtocolVersion) +} + +func (c *Store) SetState(state State) { + c.Lock() + defer c.Unlock() + c.state = state +} + +func (c *Store) Get() State { + c.RLock() + defer c.RUnlock() + return c.state +} + +func (c *Store) RegisterMetrics(registry *prometheus.Registry) { + c.Metrics.CoreSynced = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "stellar_core", Name: "synced", + Help: "determines if Stellar-Core defined by --stellar-core-url is synced with the network", + }, + func() float64 { + if c.Get().Synced { + return 1 + } else { + return 0 + } + }, + ) + registry.MustRegister(c.Metrics.CoreSynced) + + c.Metrics.CoreSupportedProtocolVersion = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "stellar_core", Name: "supported_protocol_version", + Help: "determines the supported version of the protocol by Stellar-Core defined by --stellar-core-url", + }, + func() float64 { + return float64(c.Get().CoreSupportedProtocolVersion) + }, + ) + registry.MustRegister(c.Metrics.CoreSupportedProtocolVersion) +} diff --git a/services/horizon/internal/db2/assets/asset_stat.go b/services/horizon/internal/db2/assets/asset_stat.go new file mode 100644 index 0000000000..c7a5e69eec --- /dev/null +++ b/services/horizon/internal/db2/assets/asset_stat.go @@ -0,0 +1,60 @@ +package assets + +import ( + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/services/horizon/internal/db2" +) + +// PagingToken implementation for hal.Pageable +//func (res AssetStat) PagingToken() string { +// return res.PT +//} + +// AssetStatsQ is the query to fetch all assets in the system +type AssetStatsQ struct { + AssetCode *string + AssetIssuer *string + PageQuery *db2.PageQuery +} + +// GetSQL allows this query to be executed by the caller +func (q AssetStatsQ) GetSQL() (sq.SelectBuilder, error) { + sql := selectQuery + if q.AssetCode != nil && *q.AssetCode != "" { + sql = sql.Where("hist.asset_code = ?", *q.AssetCode) + } + if q.AssetIssuer != nil && *q.AssetIssuer != "" { + sql = sql.Where("hist.asset_issuer = ?", *q.AssetIssuer) + } + + var err error + if q.PageQuery != nil { + // cursor needs to work for descending case as well + cursor := q.PageQuery.Cursor + if q.PageQuery.Order == "desc" && cursor == "" { + cursor = "zzzzzzzzzzzzz" // 12 + 1 "z"s so it will always be greater than the _ delimiter since code is max 12 chars + } + + sql, err = q.PageQuery.ApplyToUsingCursor(sql, "concat(hist.asset_code, '_', hist.asset_issuer, '_', hist.asset_type)", cursor) + if err != nil { + return sql, err + } + } else { + sql = sql.OrderBy("sort_key ASC") + } + return sql, nil +} + +var selectQuery = sq. + Select( + "concat(hist.asset_code, '_', hist.asset_issuer, '_', hist.asset_type) as sort_key", + "hist.asset_type", + "hist.asset_code", + "hist.asset_issuer", + "stats.amount", + "stats.num_accounts", + "stats.flags", + "stats.toml", + ). + From("history_assets hist"). + Join("asset_stats stats ON hist.id = stats.id") diff --git a/services/horizon/internal/db2/assets/asset_stat_test.go b/services/horizon/internal/db2/assets/asset_stat_test.go new file mode 100644 index 0000000000..c87edc19cb --- /dev/null +++ b/services/horizon/internal/db2/assets/asset_stat_test.go @@ -0,0 +1,107 @@ +package assets + +import ( + "context" + "strconv" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" +) + +// AssetStatsR is the result from the AssetStatsQ query +type AssetStatsR struct { + SortKey string `db:"sort_key"` + Type string `db:"asset_type"` + Code string `db:"asset_code"` + Issuer string `db:"asset_issuer"` + Amount string `db:"amount"` + NumAccounts int32 `db:"num_accounts"` + Flags int8 `db:"flags"` + Toml string `db:"toml"` +} + +func TestAssetsStatsQExec(t *testing.T) { + item0 := AssetStatsR{ + SortKey: "BTC_GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4_credit_alphanum4", + Type: "credit_alphanum4", + Code: "BTC", + Issuer: "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + Amount: "1009876000", + NumAccounts: 1, + Flags: 1, + Toml: "https://test.com/.well-known/stellar.toml", + } + + item1 := AssetStatsR{ + SortKey: "SCOT_GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU_credit_alphanum4", + Type: "credit_alphanum4", + Code: "SCOT", + Issuer: "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + Amount: "10000000000", + NumAccounts: 1, + Flags: 2, + Toml: "", + } + + item2 := AssetStatsR{ + SortKey: "USD_GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4_credit_alphanum4", + Type: "credit_alphanum4", + Code: "USD", + Issuer: "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + Amount: "3000010434000", + NumAccounts: 2, + Flags: 1, + Toml: "https://test.com/.well-known/stellar.toml", + } + + testCases := []struct { + query AssetStatsQ + want []AssetStatsR + }{ + { + AssetStatsQ{}, + []AssetStatsR{item0, item1, item2}, + }, { + AssetStatsQ{ + PageQuery: &db2.PageQuery{ + Order: "asc", + Limit: 10, + }, + }, + []AssetStatsR{item0, item1, item2}, + }, { + AssetStatsQ{ + PageQuery: &db2.PageQuery{ + Order: "desc", + Limit: 10, + }, + }, + []AssetStatsR{item2, item1, item0}, + }, + } + + for i, kase := range testCases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + tt := test.Start(t) + tt.Scenario("ingest_asset_stats") + defer tt.Finish() + + sql, err := kase.query.GetSQL() + tt.Require.NoError(err) + + var results []AssetStatsR + err = history.Q{SessionInterface: tt.HorizonSession()}.Select(context.Background(), &results, sql) + tt.Require.NoError(err) + if !tt.Assert.Equal(3, len(results)) { + return + } + + tt.Assert.Equal(len(kase.want), len(results)) + for i := range kase.want { + tt.Assert.Equal(kase.want[i], results[i]) + } + }) + } +} diff --git a/services/horizon/internal/db2/history/account.go b/services/horizon/internal/db2/history/account.go new file mode 100644 index 0000000000..1b0c34da58 --- /dev/null +++ b/services/horizon/internal/db2/history/account.go @@ -0,0 +1,82 @@ +package history + +import ( + "context" + "sort" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" +) + +// AccountByAddress loads a row from `history_accounts`, by address +func (q *Q) AccountByAddress(ctx context.Context, dest interface{}, addy string) error { + sql := selectAccount.Limit(1).Where("ha.address = ?", addy) + return q.Get(ctx, dest, sql) +} + +// AccountsByAddresses loads a rows from `history_accounts`, by addresses +func (q *Q) AccountsByAddresses(ctx context.Context, dest interface{}, addresses []string) error { + sql := selectAccount.Where(map[string]interface{}{ + "ha.address": addresses, // ha.address IN (...) + }) + return q.Select(ctx, dest, sql) +} + +// CreateAccounts creates rows in the history_accounts table for a given list of addresses. +// CreateAccounts returns a mapping of account address to its corresponding id in the history_accounts table +func (q *Q) CreateAccounts(ctx context.Context, addresses []string, batchSize int) (map[string]int64, error) { + builder := &db.BatchInsertBuilder{ + Table: q.GetTable("history_accounts"), + MaxBatchSize: batchSize, + Suffix: "ON CONFLICT (address) DO NOTHING", + } + + // sort assets before inserting rows into history_assets to prevent deadlocks on acquiring a ShareLock + // https://github.com/stellar/go/issues/2370 + sort.Strings(addresses) + var deduped []string + for i, address := range addresses { + if i > 0 && address == addresses[i-1] { + // skip duplicates + continue + } + deduped = append(deduped, address) + err := builder.Row(ctx, map[string]interface{}{ + "address": address, + }) + if err != nil { + return nil, errors.Wrap(err, "could not insert history_accounts row") + } + } + + err := builder.Exec(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not exec asset insert builder") + } + + addressToID := map[string]int64{} + const selectBatchSize = 10000 + + for i := 0; i < len(deduped); i += selectBatchSize { + end := i + selectBatchSize + if end > len(deduped) { + end = len(deduped) + } + subset := deduped[i:end] + + var accounts []Account + if err := q.AccountsByAddresses(ctx, &accounts, subset); err != nil { + return nil, errors.Wrap(err, "could not select accounts") + } + + for _, account := range accounts { + addressToID[account.Address] = account.ID + } + } + + return addressToID, nil +} + +var selectAccount = sq.Select("ha.*").From("history_accounts ha") diff --git a/services/horizon/internal/db2/history/account_data.go b/services/horizon/internal/db2/history/account_data.go new file mode 100644 index 0000000000..cb3cede489 --- /dev/null +++ b/services/horizon/internal/db2/history/account_data.go @@ -0,0 +1,144 @@ +package history + +import ( + "context" + "encoding/base64" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func (q *Q) CountAccountsData(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").From("accounts_data") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +// GetAccountDataByName loads account data for a given account ID and data name +func (q *Q) GetAccountDataByName(ctx context.Context, id, name string) (Data, error) { + var data Data + sql := selectAccountData.Where(sq.Eq{ + "account_id": id, + "name": name, + }).Limit(1) + err := q.Get(ctx, &data, sql) + return data, err +} + +// GetAccountDataByAccountID loads account data for a given account ID +func (q *Q) GetAccountDataByAccountID(ctx context.Context, id string) ([]Data, error) { + var data []Data + sql := selectAccountData.Where(sq.Eq{"account_id": id}) + err := q.Select(ctx, &data, sql) + return data, err +} + +// GetAccountDataByKeys loads a row from the `accounts_data` table, selected by multiple keys. +func (q *Q) GetAccountDataByKeys(ctx context.Context, keys []AccountDataKey) ([]Data, error) { + var data []Data + lkeys := make([]string, 0, len(keys)) + for _, key := range keys { + lkey, err := accountDataKeyToString(key) + if err != nil { + return nil, errors.Wrap(err, "Error running accountDataKeyToString") + } + lkeys = append(lkeys, lkey) + } + sql := selectAccountData.Where(map[string]interface{}{"accounts_data.ledger_key": lkeys}) + err := q.Select(ctx, &data, sql) + return data, err +} + +func accountDataKeyToString(key AccountDataKey) (string, error) { + var aid xdr.AccountId + err := aid.SetAddress(key.AccountID) + if err != nil { + return "", err + } + var ledgerKey xdr.LedgerKey + if err = ledgerKey.SetData(aid, key.DataName); err != nil { + return "", errors.Wrap(err, "Error running ledgerKey.SetData") + } + lKey, err := ledgerKey.MarshalBinary() + if err != nil { + return "", errors.Wrap(err, "Error running MarshalBinaryCompress") + } + + return base64.StdEncoding.EncodeToString(lKey), nil +} + +// UpsertAccountData upserts a batch of data in the account_Data table. +func (q *Q) UpsertAccountData(ctx context.Context, data []Data) error { + var ledgerKey, accountID, name, value, lastModifiedLedger, sponsor []interface{} + + for _, d := range data { + key, err := accountDataKeyToString(AccountDataKey{ + AccountID: d.AccountID, + DataName: d.Name, + }) + if err != nil { + return err + } + ledgerKey = append(ledgerKey, key) + accountID = append(accountID, d.AccountID) + name = append(name, d.Name) + value = append(value, d.Value) + lastModifiedLedger = append(lastModifiedLedger, d.LastModifiedLedger) + sponsor = append(sponsor, d.Sponsor) + } + + upsertFields := []upsertField{ + {"ledger_key", "character varying(150)", ledgerKey}, + {"account_id", "character varying(56)", accountID}, + {"name", "character varying(64)", name}, + {"value", "character varying(90)", value}, + {"last_modified_ledger", "integer", lastModifiedLedger}, + {"sponsor", "text", sponsor}, + } + + return q.upsertRows(ctx, "accounts_data", "ledger_key", upsertFields) +} + +// RemoveAccountData deletes a row in the accounts_data table. +// Returns number of rows affected and error. +func (q *Q) RemoveAccountData(ctx context.Context, keys []AccountDataKey) (int64, error) { + lkeys := make([]string, 0, len(keys)) + for _, key := range keys { + lkey, err := accountDataKeyToString(key) + if err != nil { + return 0, errors.Wrap(err, "Error running accountDataKeyToString") + } + lkeys = append(lkeys, lkey) + } + + sql := sq.Delete("accounts_data"). + Where(sq.Eq{"ledger_key": lkeys}) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// GetAccountDataByAccountsID loads account data for a list of account ID +func (q *Q) GetAccountDataByAccountsID(ctx context.Context, id []string) ([]Data, error) { + var data []Data + sql := selectAccountData.Where(sq.Eq{"account_id": id}) + err := q.Select(ctx, &data, sql) + return data, err +} + +var selectAccountData = sq.Select(` + account_id, + name, + value, + last_modified_ledger, + sponsor +`).From("accounts_data") diff --git a/services/horizon/internal/db2/history/account_data_test.go b/services/horizon/internal/db2/history/account_data_test.go new file mode 100644 index 0000000000..1665d0ab5d --- /dev/null +++ b/services/horizon/internal/db2/history/account_data_test.go @@ -0,0 +1,182 @@ +package history + +import ( + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +var ( + data1 = Data{ + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Name: "test data", + // This also tests if base64 encoding is working as 0 is invalid UTF-8 byte + Value: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + LastModifiedLedger: 1234, + } + + data2 = Data{ + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Name: "test data2", + Value: []byte{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, + LastModifiedLedger: 1234, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } +) + +func TestInsertAccountData(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + + err = q.UpsertAccountData(tt.Ctx, []Data{data2}) + assert.NoError(t, err) + + keys := []AccountDataKey{ + {AccountID: data1.AccountID, DataName: data1.Name}, + {AccountID: data2.AccountID, DataName: data2.Name}, + } + + datas, err := q.GetAccountDataByKeys(tt.Ctx, keys) + assert.NoError(t, err) + assert.Len(t, datas, 2) + + tt.Assert.Equal(data1.Name, datas[0].Name) + tt.Assert.Equal(data1.Value, datas[0].Value) + tt.Assert.True(datas[0].Sponsor.IsZero()) + + tt.Assert.Equal(data2.Name, datas[1].Name) + tt.Assert.Equal(data2.Value, datas[1].Value) + tt.Assert.Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", datas[1].Sponsor.String) +} + +func TestUpdateAccountData(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + + modifiedData := data1 + value2 := make([]byte, len(modifiedData.Value)) + copy(value2, modifiedData.Value) + value2[0] = 1 + modifiedData.Value = value2 + + err = q.UpsertAccountData(tt.Ctx, []Data{modifiedData}) + assert.NoError(t, err) + + keys := []AccountDataKey{ + {AccountID: data1.AccountID, DataName: data1.Name}, + } + datas, err := q.GetAccountDataByKeys(tt.Ctx, keys) + assert.NoError(t, err) + assert.Len(t, datas, 1) + + tt.Assert.Equal(modifiedData.Name, datas[0].Name) + tt.Assert.Equal(modifiedData.Value, datas[0].Value) + tt.Assert.Equal(uint32(1234), datas[0].LastModifiedLedger) +} + +func TestRemoveAccountData(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + + key := AccountDataKey{AccountID: data1.AccountID, DataName: data1.Name} + rows, err := q.RemoveAccountData(tt.Ctx, []AccountDataKey{key}) + assert.NoError(t, err) + tt.Assert.Equal(int64(1), rows) + + datas, err := q.GetAccountDataByKeys(tt.Ctx, []AccountDataKey{key}) + assert.NoError(t, err) + assert.Len(t, datas, 0) + + // Doesn't exist anymore + rows, err = q.RemoveAccountData(tt.Ctx, []AccountDataKey{key}) + assert.NoError(t, err) + tt.Assert.Equal(int64(0), rows) +} + +func TestGetAccountDataByAccountsID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + err = q.UpsertAccountData(tt.Ctx, []Data{data2}) + assert.NoError(t, err) + + ids := []string{ + data1.AccountID, + data2.AccountID, + } + datas, err := q.GetAccountDataByAccountsID(tt.Ctx, ids) + assert.NoError(t, err) + assert.Len(t, datas, 2) + + tt.Assert.Equal(data1.Name, datas[0].Name) + tt.Assert.Equal(data1.Value, datas[0].Value) + + tt.Assert.Equal(data2.Name, datas[1].Name) + tt.Assert.Equal(data2.Value, datas[1].Value) +} + +func TestGetAccountDataByAccountID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + err = q.UpsertAccountData(tt.Ctx, []Data{data2}) + assert.NoError(t, err) + + records, err := q.GetAccountDataByAccountID(tt.Ctx, data1.AccountID) + assert.NoError(t, err) + assert.Len(t, records, 2) + + tt.Assert.Equal(data1.Name, records[0].Name) + tt.Assert.Equal(data1.Value, records[0].Value) + + tt.Assert.Equal(data2.Name, records[1].Name) + tt.Assert.Equal(data2.Value, records[1].Value) +} + +func TestGetAccountDataByName(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccountData(tt.Ctx, []Data{data1}) + assert.NoError(t, err) + err = q.UpsertAccountData(tt.Ctx, []Data{data2}) + assert.NoError(t, err) + + record, err := q.GetAccountDataByName(tt.Ctx, data1.AccountID, data1.Name) + assert.NoError(t, err) + tt.Assert.Equal(data1.Name, record.Name) + tt.Assert.Equal(data1.Value, record.Value) + + record, err = q.GetAccountDataByName(tt.Ctx, data1.AccountID, data2.Name) + assert.NoError(t, err) + tt.Assert.Equal(data2.Name, record.Name) + tt.Assert.Equal(data2.Value, record.Value) + +} diff --git a/services/horizon/internal/db2/history/account_data_value.go b/services/horizon/internal/db2/history/account_data_value.go new file mode 100644 index 0000000000..efcd8d319b --- /dev/null +++ b/services/horizon/internal/db2/history/account_data_value.go @@ -0,0 +1,30 @@ +package history + +import ( + "database/sql" + "database/sql/driver" + "encoding/base64" +) + +var _ driver.Valuer = (*AccountDataValue)(nil) +var _ sql.Scanner = (*AccountDataValue)(nil) + +// Scan base64 decodes into an []byte +func (t *AccountDataValue) Scan(src interface{}) error { + decoded, err := base64.StdEncoding.DecodeString(src.(string)) + if err != nil { + return err + } + + *t = decoded + return nil +} + +// Value implements driver.Valuer +func (value AccountDataValue) Value() (driver.Value, error) { + return driver.Value([]uint8(base64.StdEncoding.EncodeToString(value))), nil +} + +func (value AccountDataValue) Base64() string { + return base64.StdEncoding.EncodeToString(value) +} diff --git a/services/horizon/internal/db2/history/account_signers.go b/services/horizon/internal/db2/history/account_signers.go new file mode 100644 index 0000000000..32c6cdc583 --- /dev/null +++ b/services/horizon/internal/db2/history/account_signers.go @@ -0,0 +1,84 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/errors" +) + +func (q *Q) GetAccountSignersByAccountID(ctx context.Context, id string) ([]AccountSigner, error) { + sql := selectAccountSigners. + Where(sq.Eq{"accounts_signers.account_id": id}). + OrderBy("accounts_signers.signer asc") + + var results []AccountSigner + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +func (q *Q) SignersForAccounts(ctx context.Context, accounts []string) ([]AccountSigner, error) { + sql := selectAccountSigners. + Where(map[string]interface{}{"accounts_signers.account_id": accounts}) + + var results []AccountSigner + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +// AccountsForSigner returns a list of `AccountSigner` rows for a given signer +func (q *Q) AccountsForSigner(ctx context.Context, signer string, page db2.PageQuery) ([]AccountSigner, error) { + sql := selectAccountSigners.Where("accounts_signers.signer = ?", signer) + sql, err := page.ApplyToUsingCursor(sql, "accounts_signers.account_id", page.Cursor) + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + var results []AccountSigner + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +// CreateAccountSigner creates a row in the accounts_signers table. +// Returns number of rows affected and error. +func (q *Q) CreateAccountSigner(ctx context.Context, account, signer string, weight int32, sponsor *string) (int64, error) { + sql := sq.Insert("accounts_signers"). + Columns("account_id", "signer", "weight", "sponsor"). + Values(account, signer, weight, sponsor) + + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// RemoveAccountSigner deletes a row in the accounts_signers table. +// Returns number of rows affected and error. +func (q *Q) RemoveAccountSigner(ctx context.Context, account, signer string) (int64, error) { + sql := sq.Delete("accounts_signers").Where(sq.Eq{ + "account_id": account, + "signer": signer, + }) + + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +var selectAccountSigners = sq.Select("accounts_signers.*").From("accounts_signers") diff --git a/services/horizon/internal/db2/history/account_signers_batch_insert_builder.go b/services/horizon/internal/db2/history/account_signers_batch_insert_builder.go new file mode 100644 index 0000000000..fd1cab291c --- /dev/null +++ b/services/horizon/internal/db2/history/account_signers_batch_insert_builder.go @@ -0,0 +1,18 @@ +package history + +import ( + "context" +) + +func (i *accountSignersBatchInsertBuilder) Add(ctx context.Context, signer AccountSigner) error { + return i.builder.Row(ctx, map[string]interface{}{ + "account_id": signer.Account, + "signer": signer.Signer, + "weight": signer.Weight, + "sponsor": signer.Sponsor, + }) +} + +func (i *accountSignersBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/account_signers_test.go b/services/horizon/internal/db2/history/account_signers_test.go new file mode 100644 index 0000000000..bf4183396b --- /dev/null +++ b/services/horizon/internal/db2/history/account_signers_test.go @@ -0,0 +1,194 @@ +package history + +import ( + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestQueryEmptyAccountSigners(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO0" + results, err := q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 0) +} + +func TestInsertAccountSigner(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4" + weight := int32(123) + rowsAffected, err := q.CreateAccountSigner(tt.Ctx, account, signer, weight, nil) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsAffected) + + expected := AccountSigner{ + Account: account, + Signer: signer, + Weight: weight, + } + results, err := q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 1) + tt.Assert.Equal(expected, results[0]) + + weight = 321 + _, err = q.CreateAccountSigner(tt.Ctx, account, signer, weight, nil) + tt.Assert.Error(err) + tt.Assert.EqualError(err, `exec failed: pq: duplicate key value violates unique constraint "accounts_signers_pkey"`) +} + +func TestInsertAccountSignerSponsor(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4" + weight := int32(123) + sponsor := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + rowsAffected, err := q.CreateAccountSigner(tt.Ctx, account, signer, weight, &sponsor) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsAffected) + + expected := AccountSigner{ + Account: account, + Signer: signer, + Weight: weight, + Sponsor: null.StringFrom(sponsor), + } + results, err := q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 1) + tt.Assert.Equal(expected, results[0]) +} + +func TestMultipleAccountsForSigner(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH1" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO2" + weight := int32(123) + rowsAffected, err := q.CreateAccountSigner(tt.Ctx, account, signer, weight, nil) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsAffected) + + anotherAccount := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + anotherWeight := int32(321) + rowsAffected, err = q.CreateAccountSigner(tt.Ctx, anotherAccount, signer, anotherWeight, nil) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsAffected) + + expected := []AccountSigner{ + AccountSigner{ + Account: account, + Signer: signer, + Weight: weight, + }, + AccountSigner{ + Account: anotherAccount, + Signer: signer, + Weight: anotherWeight, + }, + } + results, err := q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 2) + tt.Assert.Equal(expected, results) +} + +func TestRemoveNonExistantAccountSigner(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH3" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO5" + rowsAffected, err := q.RemoveAccountSigner(tt.Ctx, account, signer) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), rowsAffected) +} + +func TestRemoveAccountSigner(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH6" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO7" + weight := int32(123) + _, err := q.CreateAccountSigner(tt.Ctx, account, signer, weight, nil) + tt.Assert.NoError(err) + + expected := AccountSigner{ + Account: account, + Signer: signer, + Weight: weight, + } + results, err := q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 1) + tt.Assert.Equal(expected, results[0]) + + rowsAffected, err := q.RemoveAccountSigner(tt.Ctx, account, signer) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsAffected) + + results, err = q.AccountsForSigner(tt.Ctx, signer, db2.PageQuery{Order: "asc", Limit: 10}) + tt.Assert.NoError(err) + tt.Assert.Len(results, 0) +} + +func TestGetAccountSignersByAccountID(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + account := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH6" + signer := "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO7" + weight := int32(123) + _, err := q.CreateAccountSigner(tt.Ctx, account, signer, weight, nil) + tt.Assert.NoError(err) + + signer2 := "GC2WJF6YWMAEHGGAK2UOMZCIOMH4RU7KY2CQEWZQJV2ZQJVXJ335ZSXG" + weight2 := int32(100) + sponsor := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + _, err = q.CreateAccountSigner(tt.Ctx, account, signer2, weight2, &sponsor) + tt.Assert.NoError(err) + + expected := []AccountSigner{ + { + Account: account, + Signer: signer, + Weight: weight, + }, + { + Account: account, + Signer: signer2, + Weight: weight2, + Sponsor: null.StringFrom(sponsor), + }, + } + results, err := q.GetAccountSignersByAccountID(tt.Ctx, account) + tt.Assert.NoError(err) + tt.Assert.Len(results, 2) + tt.Assert.Equal(expected, results) +} diff --git a/services/horizon/internal/db2/history/account_test.go b/services/horizon/internal/db2/history/account_test.go new file mode 100644 index 0000000000..c74571c4b6 --- /dev/null +++ b/services/horizon/internal/db2/history/account_test.go @@ -0,0 +1,122 @@ +package history + +import ( + "sort" + "testing" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +func TestIsAuthRequired(t *testing.T) { + tt := assert.New(t) + + account := AccountEntry{Flags: 1} + tt.True(account.IsAuthRequired()) + + account = AccountEntry{Flags: 0} + tt.False(account.IsAuthRequired()) +} + +func TestIsAuthRevocable(t *testing.T) { + tt := assert.New(t) + + account := AccountEntry{Flags: 2} + tt.True(account.IsAuthRevocable()) + + account = AccountEntry{Flags: 1} + tt.False(account.IsAuthRevocable()) +} +func TestIsAuthImmutable(t *testing.T) { + tt := assert.New(t) + + account := AccountEntry{Flags: 4} + tt.True(account.IsAuthImmutable()) + + account = AccountEntry{Flags: 0} + tt.False(account.IsAuthImmutable()) +} + +func assertAccountsContainAddresses(tt *test.T, accounts map[string]int64, addresses []string) { + tt.Assert.Len(accounts, len(addresses)) + set := map[int64]bool{} + for _, address := range addresses { + accountID, ok := accounts[address] + tt.Assert.True(ok) + tt.Assert.False(set[accountID]) + set[accountID] = true + } +} + +func TestCreateAccountsSortedOrder(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + addresses := []string{ + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "GCYVFGI3SEQJGBNQQG7YCMFWEYOHK3XPVOVPA6C566PXWN4SN7LILZSM", + "GBYSBDAJZMHL5AMD7QXQ3JEP3Q4GLKADWIJURAAHQALNAWD6Z5XF2RAC", + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + } + accounts, err := q.CreateAccounts(tt.Ctx, addresses, 1) + tt.Assert.NoError(err) + + idToAddress := map[int64]string{} + sortedIDs := []int64{} + for address, id := range accounts { + idToAddress[id] = address + sortedIDs = append(sortedIDs, id) + } + + sort.Slice(sortedIDs, func(i, j int) bool { + return sortedIDs[i] < sortedIDs[j] + }) + sort.Strings(addresses) + + values := []string{} + for _, id := range sortedIDs { + values = append(values, idToAddress[id]) + } + + tt.Assert.Equal(addresses, values) +} + +func TestCreateAccounts(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + addresses := []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + } + accounts, err := q.CreateAccounts(tt.Ctx, addresses, 1) + tt.Assert.NoError(err) + tt.Assert.Len(accounts, 2) + assertAccountsContainAddresses(tt, accounts, addresses) + + dupAccounts, err := q.CreateAccounts(tt.Ctx, []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }, 2) + tt.Assert.NoError(err) + tt.Assert.Equal(accounts, dupAccounts) + + addresses = []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "GCYVFGI3SEQJGBNQQG7YCMFWEYOHK3XPVOVPA6C566PXWN4SN7LILZSM", + "GBYSBDAJZMHL5AMD7QXQ3JEP3Q4GLKADWIJURAAHQALNAWD6Z5XF2RAC", + } + accounts, err = q.CreateAccounts(tt.Ctx, addresses, 1) + tt.Assert.NoError(err) + assertAccountsContainAddresses(tt, accounts, addresses) + for address, accountID := range dupAccounts { + id, ok := accounts[address] + tt.Assert.True(ok) + tt.Assert.Equal(id, accountID) + } +} diff --git a/services/horizon/internal/db2/history/accounts.go b/services/horizon/internal/db2/history/accounts.go new file mode 100644 index 0000000000..adfb92d6e4 --- /dev/null +++ b/services/horizon/internal/db2/history/accounts.go @@ -0,0 +1,304 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + jet "github.com/go-jet/jet/v2/postgres" + "github.com/go-jet/jet/v2/qrm" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + + "github.com/stellar/go/services/horizon/internal/db2/schema/generated/db/horizon/public/table" + "github.com/stellar/go/support/log" +) + +// IsAuthRequired returns true if the account has the "AUTH_REQUIRED" option +// turned on. +func (account AccountEntry) IsAuthRequired() bool { + return xdr.AccountFlags(account.Flags).IsAuthRequired() +} + +// IsAuthRevocable returns true if the account has the "AUTH_REVOCABLE" option +// turned on. +func (account AccountEntry) IsAuthRevocable() bool { + return xdr.AccountFlags(account.Flags).IsAuthRevocable() +} + +// IsAuthImmutable returns true if the account has the "AUTH_IMMUTABLE" option +// turned on. +func (account AccountEntry) IsAuthImmutable() bool { + return xdr.AccountFlags(account.Flags).IsAuthImmutable() +} + +// IsAuthClawbackEnabled returns true if the account has the "AUTH_CLAWBACK_ENABLED" option +// turned on. +func (account AccountEntry) IsAuthClawbackEnabled() bool { + return xdr.AccountFlags(account.Flags).IsAuthClawbackEnabled() +} + +func (q *Q) CountAccounts(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").From("accounts") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +func (q *Q) GetAccountByID(ctx context.Context, id string) (AccountEntry, error) { + var account AccountEntry + sql := selectAccounts.Where(sq.Eq{"account_id": id}) + err := q.Get(ctx, &account, sql) + return account, err +} + +func (q *Q) GetAccountsByIDs(ctx context.Context, ids []string) ([]AccountEntry, error) { + var accounts []AccountEntry + sql := selectAccounts.Where(map[string]interface{}{"accounts.account_id": ids}) + err := q.Select(ctx, &accounts, sql) + return accounts, err +} + +// UpsertAccounts upserts a batch of accounts in the accounts table. +// There's currently no limit of the number of accounts this method can +// accept other than 2GB limit of the query string length what should be enough +// for each ledger with the current limits. +func (q *Q) UpsertAccounts(ctx context.Context, accounts []AccountEntry) error { + var accountID, inflationDestination, homeDomain, balance, buyingLiabilities, + sellingLiabilities, sequenceNumber, numSubEntries, flags, lastModifiedLedger, + numSponsored, numSponsoring, masterWeight, thresholdLow, thresholdMedium, + thresholdHigh, sponsor []interface{} + + for _, account := range accounts { + accountID = append(accountID, account.AccountID) + balance = append(balance, account.Balance) + buyingLiabilities = append(buyingLiabilities, account.BuyingLiabilities) + sellingLiabilities = append(sellingLiabilities, account.SellingLiabilities) + sequenceNumber = append(sequenceNumber, account.SequenceNumber) + numSubEntries = append(numSubEntries, account.NumSubEntries) + inflationDestination = append(inflationDestination, account.InflationDestination) + homeDomain = append(homeDomain, account.HomeDomain) + flags = append(flags, account.Flags) + masterWeight = append(masterWeight, account.MasterWeight) + thresholdLow = append(thresholdLow, account.ThresholdLow) + thresholdMedium = append(thresholdMedium, account.ThresholdMedium) + thresholdHigh = append(thresholdHigh, account.ThresholdHigh) + lastModifiedLedger = append(lastModifiedLedger, account.LastModifiedLedger) + sponsor = append(sponsor, account.Sponsor) + numSponsored = append(numSponsored, account.NumSponsored) + numSponsoring = append(numSponsoring, account.NumSponsoring) + } + + upsertFields := []upsertField{ + {"account_id", "text", accountID}, + {"balance", "bigint", balance}, + {"buying_liabilities", "bigint", buyingLiabilities}, + {"selling_liabilities", "bigint", sellingLiabilities}, + {"sequence_number", "bigint", sequenceNumber}, + {"num_subentries", "int", numSubEntries}, + {"inflation_destination", "text", inflationDestination}, + {"flags", "int", flags}, + {"home_domain", "text", homeDomain}, + {"master_weight", "int", masterWeight}, + {"threshold_low", "int", thresholdLow}, + {"threshold_medium", "int", thresholdMedium}, + {"threshold_high", "int", thresholdHigh}, + {"last_modified_ledger", "int", lastModifiedLedger}, + {"sponsor", "text", sponsor}, + {"num_sponsored", "int", numSponsored}, + {"num_sponsoring", "int", numSponsoring}, + } + + return q.upsertRows(ctx, "accounts", "account_id", upsertFields) +} + +// RemoveAccounts deletes a row in the accounts table. +// Returns number of rows affected and error. +func (q *Q) RemoveAccounts(ctx context.Context, accountIDs []string) (int64, error) { + sql := sq.Delete("accounts").Where(sq.Eq{"account_id": accountIDs}) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// AccountsForAsset returns a list of `AccountEntry` rows who are trustee to an +// asset +func (q *Q) AccountsForAsset(ctx context.Context, asset xdr.Asset, page db2.PageQuery) ([]AccountEntry, error) { + var assetType, code, issuer string + asset.MustExtract(&assetType, &code, &issuer) + + sql := sq. + Select("accounts.*"). + From("accounts"). + Join("trust_lines ON accounts.account_id = trust_lines.account_id"). + Where(map[string]interface{}{ + "trust_lines.asset_type": int32(asset.Type), + "trust_lines.asset_issuer": issuer, + "trust_lines.asset_code": code, + }) + + sql, err := page.ApplyToUsingCursor(sql, "trust_lines.account_id", page.Cursor) + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + var results []AccountEntry + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +// AccountsForLiquidityPool returns a list of `AccountEntry` rows who are trustee to a +// liquidity pool share asset +func (q *Q) AccountsForLiquidityPool(ctx context.Context, poolID string, page db2.PageQuery) ([]AccountEntry, error) { + sql := sq. + Select("accounts.*"). + From("accounts"). + Join("trust_lines ON accounts.account_id = trust_lines.account_id"). + Where(map[string]interface{}{ + "trust_lines.liquidity_pool_id": poolID, + }) + + sql, err := page.ApplyToUsingCursor(sql, "trust_lines.account_id", page.Cursor) + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + var results []AccountEntry + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +func selectBySponsor(table, sponsor string, page db2.PageQuery) (sq.SelectBuilder, error) { + sql := sq. + Select("account_id"). + From(table). + Where(map[string]interface{}{ + "sponsor": sponsor, + }) + + sql, err := page.ApplyToUsingCursor(sql, "account_id", page.Cursor) + if err != nil { + return sql, errors.Wrap(err, "could not apply query to page") + } + return sql, err +} + +func selectUnionBySponsor(tables []string, sponsor string, page db2.PageQuery) (sq.SelectBuilder, error) { + var selectIDs sq.SelectBuilder + for i, table := range tables { + sql, err := selectBySponsor(table, sponsor, page) + if err != nil { + return sql, errors.Wrap(err, "could not construct account id query") + } + sql = sql.Prefix("(").Suffix(")") + + if i == 0 { + selectIDs = sql + continue + } + + sqlStr, args, err := sql.ToSql() + if err != nil { + return sql, errors.Wrap(err, "could not construct account id query") + } + selectIDs = selectIDs.Suffix("UNION "+sqlStr, args...) + } + + return sq. + Select("accounts.*"). + FromSelect(selectIDs, "accountSet"). + Join("accounts ON accounts.account_id = accountSet.account_id"). + OrderBy("accounts.account_id " + page.Order). + Limit(page.Limit), nil +} + +// AccountsForSponsor return all the accounts where `sponsor`` is sponsoring the account entry or +// any of its subentries (trust lines, signers, data, or account entry) +func (q *Q) AccountsForSponsor(ctx context.Context, sponsor string, page db2.PageQuery) ([]AccountEntry, error) { + sql, err := selectUnionBySponsor( + []string{"accounts", "accounts_data", "accounts_signers", "trust_lines"}, + sponsor, + page, + ) + if err != nil { + return nil, errors.Wrap(err, "could not construct accounts query") + } + + var results []AccountEntry + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +func currentDBConn(q *Q, tableName string) qrm.DB { + if q.GetTx() != nil { + return q.GetTx() + } else { + return q.GetTable(tableName).Session.DB + } +} + +// AccountEntriesForSigner returns a list of `AccountEntry` rows for a given signer +func (q *Q) AccountEntriesForSigner(ctx context.Context, signer string, page db2.PageQuery) ([]AccountEntry, error) { + var results []AccountEntry + + byId := table.AccountsSigners.Signer.EQ(jet.String(signer)) + sql := jet.SELECT(table.Accounts.AS("AccountEntry").AllColumns). + FROM(table.Accounts.AS("AccountEntry"). + INNER_JOIN(table.AccountsSigners, + table.Accounts.AS("AccountEntry").AccountID.EQ(table.AccountsSigners.AccountID))). + WHERE(byId) + + sql, err := page.ApplyToJetUsingCursor(sql, + table.AccountsSigners.AccountID, + table.AccountsSigners.AccountID.GT(jet.String(page.Cursor)), + table.AccountsSigners.AccountID.LT(jet.String(page.Cursor)), + byId, + ) + + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + query, args := sql.Sql() + log.Debugf("sql was :%v values: %v)", query, args) + + err = sql.Query(currentDBConn(q, table.Accounts.TableName()), &results) + + return results, err +} + +var selectAccounts = sq.Select(` + account_id, + balance, + buying_liabilities, + selling_liabilities, + sequence_number, + num_subentries, + inflation_destination, + flags, + home_domain, + master_weight, + threshold_low, + threshold_medium, + threshold_high, + last_modified_ledger, + sponsor, + num_sponsored, + num_sponsoring +`).From("accounts") diff --git a/services/horizon/internal/db2/history/accounts_test.go b/services/horizon/internal/db2/history/accounts_test.go new file mode 100644 index 0000000000..bbd3c17ca0 --- /dev/null +++ b/services/horizon/internal/db2/history/accounts_test.go @@ -0,0 +1,458 @@ +package history + +import ( + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +var ( + inflationDest = "GBUH7T6U36DAVEKECMKN5YEBQYZVRBPNSZAAKBCO6P5HBMDFSQMQL4Z4" + sponsor = "GCO26ZSBD63TKYX45H2C7D2WOFWOUSG5BMTNC3BG4QMXM3PAYI6WHKVZ" + + account1 = AccountEntry{ + LastModifiedLedger: 1234, + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Balance: 20000, + SequenceNumber: 223456789, + NumSubEntries: 10, + InflationDestination: inflationDest, + Flags: 1, + HomeDomain: "stellar.org", + MasterWeight: 1, + ThresholdLow: 2, + ThresholdMedium: 3, + ThresholdHigh: 4, + BuyingLiabilities: 3, + SellingLiabilities: 4, + } + + account2 = AccountEntry{ + LastModifiedLedger: 1235, + AccountID: "GCT2NQM5KJJEF55NPMY444C6M6CA7T33HRNCMA6ZFBIIXKNCRO6J25K7", + Balance: 50000, + SequenceNumber: 648736, + NumSubEntries: 10, + InflationDestination: inflationDest, + Flags: 2, + HomeDomain: "meridian.stellar.org", + MasterWeight: 5, + ThresholdLow: 6, + ThresholdMedium: 7, + ThresholdHigh: 8, + BuyingLiabilities: 30, + SellingLiabilities: 40, + NumSponsored: 1, + NumSponsoring: 2, + Sponsor: null.StringFrom(sponsor), + } + + account3 = AccountEntry{ + LastModifiedLedger: 1235, + AccountID: "GDPGOMFSP4IF7A4P7UBKA4UC4QTRLEHGBD6IMDIS3W3KBDNBFAQ7FXDY", + Balance: 50000, + SequenceNumber: 648736, + NumSubEntries: 10, + InflationDestination: inflationDest, + Flags: 2, + MasterWeight: 5, + ThresholdLow: 6, + ThresholdMedium: 7, + ThresholdHigh: 8, + BuyingLiabilities: 30, + SellingLiabilities: 40, + } +) + +func TestInsertAccount(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1, account2}) + assert.NoError(t, err) + + accounts, err := q.GetAccountsByIDs(tt.Ctx, []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GCT2NQM5KJJEF55NPMY444C6M6CA7T33HRNCMA6ZFBIIXKNCRO6J25K7", + }) + assert.NoError(t, err) + assert.Len(t, accounts, 2) + + assert.Equal(t, "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", accounts[0].AccountID) + assert.Equal(t, int64(20000), accounts[0].Balance) + assert.Equal(t, int64(223456789), accounts[0].SequenceNumber) + assert.Equal(t, uint32(10), accounts[0].NumSubEntries) + assert.Equal(t, "GBUH7T6U36DAVEKECMKN5YEBQYZVRBPNSZAAKBCO6P5HBMDFSQMQL4Z4", accounts[0].InflationDestination) + assert.Equal(t, uint32(1), accounts[0].Flags) + assert.Equal(t, "stellar.org", accounts[0].HomeDomain) + assert.Equal(t, byte(1), accounts[0].MasterWeight) + assert.Equal(t, byte(2), accounts[0].ThresholdLow) + assert.Equal(t, byte(3), accounts[0].ThresholdMedium) + assert.Equal(t, byte(4), accounts[0].ThresholdHigh) + assert.Equal(t, int64(3), accounts[0].BuyingLiabilities) + assert.Equal(t, int64(4), accounts[0].SellingLiabilities) + assert.Equal(t, uint32(0), accounts[0].NumSponsored) + assert.Equal(t, uint32(0), accounts[0].NumSponsoring) + assert.Equal(t, null.String{}, accounts[0].Sponsor) + + assert.Equal(t, uint32(1), accounts[1].NumSponsored) + assert.Equal(t, uint32(2), accounts[1].NumSponsoring) + assert.Equal(t, null.StringFrom(sponsor), accounts[1].Sponsor) +} + +func TestUpsertAccount(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + ledgerEntries := []AccountEntry{account1, account2} + err := q.UpsertAccounts(tt.Ctx, ledgerEntries) + assert.NoError(t, err) + + modifiedAccount := AccountEntry{ + LastModifiedLedger: 1234, + AccountID: "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + Balance: 32847893, + SequenceNumber: 223456789, + NumSubEntries: 10, + InflationDestination: inflationDest, + Flags: 1, + HomeDomain: "stellar.org", + MasterWeight: 1, + ThresholdLow: 2, + ThresholdMedium: 3, + ThresholdHigh: 4, + BuyingLiabilities: 3, + SellingLiabilities: 4, + } + + err = q.UpsertAccounts(tt.Ctx, []AccountEntry{modifiedAccount}) + assert.NoError(t, err) + + keys := []string{ + account1.AccountID, + account2.AccountID, + } + accounts, err := q.GetAccountsByIDs(tt.Ctx, keys) + assert.NoError(t, err) + assert.Len(t, accounts, 2) + + assert.Equal(t, uint32(1), accounts[0].NumSponsored) + assert.Equal(t, uint32(2), accounts[0].NumSponsoring) + assert.Equal(t, null.StringFrom(sponsor), accounts[0].Sponsor) + + assert.Equal(t, uint32(0), accounts[1].NumSponsored) + assert.Equal(t, uint32(0), accounts[1].NumSponsoring) + assert.Equal(t, null.String{}, accounts[1].Sponsor) + + accounts, err = q.GetAccountsByIDs(tt.Ctx, []string{account1.AccountID}) + assert.NoError(t, err) + assert.Len(t, accounts, 1) + + assert.Equal(t, modifiedAccount, accounts[0]) + assert.Equal(t, uint32(1234), accounts[0].LastModifiedLedger) + + accounts, err = q.GetAccountsByIDs(tt.Ctx, []string{account2.AccountID}) + assert.NoError(t, err) + assert.Len(t, accounts, 1) + + assert.Equal(t, account2, accounts[0]) + assert.Equal(t, uint32(1235), accounts[0].LastModifiedLedger) +} + +func TestRemoveAccount(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1}) + assert.NoError(t, err) + + var rows int64 + rows, err = q.RemoveAccounts(tt.Ctx, []string{"GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"}) + assert.NoError(t, err) + assert.Equal(t, int64(1), rows) + + accounts, err := q.GetAccountsByIDs(tt.Ctx, []string{"GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"}) + assert.NoError(t, err) + assert.Len(t, accounts, 0) + + // Doesn't exist anymore + rows, err = q.RemoveAccounts(tt.Ctx, []string{"GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"}) + assert.NoError(t, err) + assert.Equal(t, int64(0), rows) +} + +func TestAccountsForAsset(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + eurTL := eurTrustLine + usdTL := usdTrustLine + psTL := poolShareTrustLine + + eurTL.AccountID = account1.AccountID + usdTL.AccountID = account2.AccountID + psTL.AccountID = account1.AccountID + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1, account2}) + assert.NoError(t, err) + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{ + eurTL, + usdTL, + psTL, + })) + + pq := db2.PageQuery{ + Order: db2.OrderAscending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err := q.AccountsForAsset( + tt.Ctx, + xdr.MustNewCreditAsset(eurTL.AssetCode, eurTL.AssetIssuer), + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) + tt.Assert.Equal(account1.AccountID, accounts[0].AccountID) + + accounts, err = q.AccountsForAsset( + tt.Ctx, + xdr.MustNewCreditAsset(usdTL.AssetCode, usdTL.AssetIssuer), + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) + tt.Assert.Equal(account2.AccountID, accounts[0].AccountID) + + pq.Cursor = account2.AccountID + accounts, err = q.AccountsForAsset( + tt.Ctx, + xdr.MustNewCreditAsset(usdTL.AssetCode, usdTL.AssetIssuer), + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 0) + + pq = db2.PageQuery{ + Order: db2.OrderDescending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err = q.AccountsForAsset( + tt.Ctx, + xdr.MustNewCreditAsset(usdTL.AssetCode, eurTL.AssetIssuer), + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) +} + +func TestAccountsForLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + eurTL := eurTrustLine + psTL := poolShareTrustLine + + eurTL.AccountID = account1.AccountID + psTL.AccountID = account1.AccountID + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1}) + assert.NoError(t, err) + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{ + eurTL, + psTL, + })) + + pq := db2.PageQuery{ + Order: db2.OrderAscending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err := q.AccountsForLiquidityPool( + tt.Ctx, + psTL.LiquidityPoolID, + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) + tt.Assert.Equal(account1.AccountID, accounts[0].AccountID) + + pq.Cursor = account1.AccountID + accounts, err = q.AccountsForLiquidityPool( + tt.Ctx, + psTL.LiquidityPoolID, + pq, + ) + assert.NoError(t, err) + tt.Assert.Len(accounts, 0) +} + +func TestAccountsForSponsor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + eurTL := eurTrustLine + usdTL := usdTrustLine + + eurTL.AccountID = account1.AccountID + usdTL.AccountID = account2.AccountID + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1, account2, account3}) + assert.NoError(t, err) + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTL, usdTL})) + + _, err = q.CreateAccountSigner(tt.Ctx, account1.AccountID, account1.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account2.AccountID, account2.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account3.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account1.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account2.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + + pq := db2.PageQuery{ + Order: db2.OrderAscending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err := q.AccountsForSponsor(tt.Ctx, sponsor, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 2) + tt.Assert.Equal(account1.AccountID, accounts[0].AccountID) + tt.Assert.Equal(account2.AccountID, accounts[1].AccountID) +} + +func TestAccountEntriesForSigner(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + eurTL := eurTrustLine + usdTL := usdTrustLine + + eurTL.AccountID = account1.AccountID + usdTL.AccountID = account2.AccountID + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1, account2, account3}) + assert.NoError(t, err) + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTL, usdTL})) + + _, err = q.CreateAccountSigner(tt.Ctx, account1.AccountID, account1.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account2.AccountID, account2.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account3.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account1.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + _, err = q.CreateAccountSigner(tt.Ctx, account2.AccountID, account3.AccountID, 1, nil) + tt.Assert.NoError(err) + + pq := db2.PageQuery{ + Order: db2.OrderAscending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err := q.AccountEntriesForSigner(tt.Ctx, account1.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) + tt.Assert.Equal(account1.AccountID, accounts[0].AccountID) + + accounts, err = q.AccountEntriesForSigner(tt.Ctx, account2.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) + tt.Assert.Equal(account2.AccountID, accounts[0].AccountID) + + want := map[string]bool{ + account1.AccountID: true, + account2.AccountID: true, + account3.AccountID: true, + } + + accounts, err = q.AccountEntriesForSigner(tt.Ctx, account3.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 3) + + for _, account := range accounts { + tt.Assert.True(want[account.AccountID]) + delete(want, account.AccountID) + } + + tt.Assert.Len(want, 0) + + pq.Cursor = accounts[len(accounts)-1].AccountID + accounts, err = q.AccountEntriesForSigner(tt.Ctx, account3.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 0) + + pq.Order = "desc" + accounts, err = q.AccountEntriesForSigner(tt.Ctx, account3.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 2) + + pq = db2.PageQuery{ + Order: db2.OrderDescending, + Limit: db2.DefaultPageSize, + Cursor: "", + } + + accounts, err = q.AccountEntriesForSigner(tt.Ctx, account1.AccountID, pq) + assert.NoError(t, err) + tt.Assert.Len(accounts, 1) +} + +func TestGetAccountByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1}) + assert.NoError(t, err) + + resultAccount, err := q.GetAccountByID(tt.Ctx, account1.AccountID) + assert.NoError(t, err) + + assert.Equal(t, "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", resultAccount.AccountID) + assert.Equal(t, int64(20000), resultAccount.Balance) + assert.Equal(t, int64(223456789), resultAccount.SequenceNumber) + assert.Equal(t, uint32(10), resultAccount.NumSubEntries) + assert.Equal(t, "GBUH7T6U36DAVEKECMKN5YEBQYZVRBPNSZAAKBCO6P5HBMDFSQMQL4Z4", resultAccount.InflationDestination) + assert.Equal(t, uint32(1), resultAccount.Flags) + assert.Equal(t, "stellar.org", resultAccount.HomeDomain) + assert.Equal(t, byte(1), resultAccount.MasterWeight) + assert.Equal(t, byte(2), resultAccount.ThresholdLow) + assert.Equal(t, byte(3), resultAccount.ThresholdMedium) + assert.Equal(t, byte(4), resultAccount.ThresholdHigh) + assert.Equal(t, int64(3), resultAccount.BuyingLiabilities) + assert.Equal(t, int64(4), resultAccount.SellingLiabilities) +} diff --git a/services/horizon/internal/db2/history/asset.go b/services/horizon/internal/db2/history/asset.go new file mode 100644 index 0000000000..a3bac68435 --- /dev/null +++ b/services/horizon/internal/db2/history/asset.go @@ -0,0 +1,111 @@ +package history + +import ( + "context" + "sort" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// GetAssetID fetches the id for an Asset +func (q *Q) GetAssetID(ctx context.Context, asset xdr.Asset) (id int64, err error) { + var ( + assetType string + assetCode string + assetIssuer string + ) + + err = asset.Extract(&assetType, &assetCode, &assetIssuer) + if err != nil { + return + } + + sql := sq.Select("id").From("history_assets").Limit(1).Where(sq.Eq{ + "asset_type": assetType, + "asset_code": assetCode, + "asset_issuer": assetIssuer}) + + err = q.Get(ctx, &id, sql) + return +} + +// CreateAssets creates rows in the history_assets table for a given list of assets. +func (q *Q) CreateAssets(ctx context.Context, assets []xdr.Asset, batchSize int) (map[string]Asset, error) { + searchStrings := make([]string, 0, len(assets)) + assetToKey := map[[3]string]string{} + + builder := &db.BatchInsertBuilder{ + Table: q.GetTable("history_assets"), + MaxBatchSize: batchSize, + Suffix: "ON CONFLICT (asset_code, asset_type, asset_issuer) DO NOTHING", + } + + // sort assets before inserting rows into history_assets to prevent deadlocks on acquiring a ShareLock + // https://github.com/stellar/go/issues/2370 + sort.Slice(assets, func(i, j int) bool { + return assets[i].String() < assets[j].String() + }) + for _, asset := range assets { + var assetType, assetCode, assetIssuer string + err := asset.Extract(&assetType, &assetCode, &assetIssuer) + if err != nil { + return nil, errors.Wrap(err, "could not extract asset details") + } + + assetTuple := [3]string{ + assetType, + assetCode, + assetIssuer, + } + if _, contains := assetToKey[assetTuple]; !contains { + searchStrings = append(searchStrings, assetType+"/"+assetCode+"/"+assetIssuer) + assetToKey[assetTuple] = asset.String() + + err = builder.Row(ctx, map[string]interface{}{ + "asset_type": assetType, + "asset_code": assetCode, + "asset_issuer": assetIssuer, + }) + if err != nil { + return nil, errors.Wrap(err, "could not insert history_assets row") + } + } + } + + err := builder.Exec(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not exec asset insert builder") + } + assetMap := map[string]Asset{} + + const selectBatchSize = 1000 + var rows []Asset + for i := 0; i < len(searchStrings); i += selectBatchSize { + end := i + selectBatchSize + if end > len(searchStrings) { + end = len(searchStrings) + } + subset := searchStrings[i:end] + + err = q.Select(ctx, &rows, sq.Select("*").From("history_assets").Where(sq.Eq{ + "concat(asset_type, '/', asset_code, '/', asset_issuer)": subset, + })) + if err != nil { + return nil, errors.Wrap(err, "could not select assets") + } + + for _, row := range rows { + key := assetToKey[[3]string{ + row.Type, + row.Code, + row.Issuer, + }] + assetMap[key] = row + } + } + + return assetMap, nil +} diff --git a/services/horizon/internal/db2/history/asset_stats.go b/services/horizon/internal/db2/history/asset_stats.go new file mode 100644 index 0000000000..e24f3fe887 --- /dev/null +++ b/services/horizon/internal/db2/history/asset_stats.go @@ -0,0 +1,184 @@ +package history + +import ( + "context" + "fmt" + "strings" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func assetStatToMap(assetStat ExpAssetStat) map[string]interface{} { + return map[string]interface{}{ + "asset_type": assetStat.AssetType, + "asset_code": assetStat.AssetCode, + "asset_issuer": assetStat.AssetIssuer, + "accounts": assetStat.Accounts, + "balances": assetStat.Balances, + "amount": assetStat.Amount, + "num_accounts": assetStat.NumAccounts, + } +} + +func assetStatToPrimaryKeyMap(assetStat ExpAssetStat) map[string]interface{} { + return map[string]interface{}{ + "asset_type": assetStat.AssetType, + "asset_code": assetStat.AssetCode, + "asset_issuer": assetStat.AssetIssuer, + } +} + +// InsertAssetStats a set of asset stats into the exp_asset_stats +func (q *Q) InsertAssetStats(ctx context.Context, assetStats []ExpAssetStat, batchSize int) error { + builder := &db.BatchInsertBuilder{ + Table: q.GetTable("exp_asset_stats"), + MaxBatchSize: batchSize, + } + + for _, assetStat := range assetStats { + if err := builder.Row(ctx, assetStatToMap(assetStat)); err != nil { + return errors.Wrap(err, "could not insert asset assetStat row") + } + } + + if err := builder.Exec(ctx); err != nil { + return errors.Wrap(err, "could not exec asset assetStats insert builder") + } + + return nil +} + +// InsertAssetStat a single asset assetStat row into the exp_asset_stats +// Returns number of rows affected and error. +func (q *Q) InsertAssetStat(ctx context.Context, assetStat ExpAssetStat) (int64, error) { + sql := sq.Insert("exp_asset_stats").SetMap(assetStatToMap(assetStat)) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// UpdateAssetStat updates a row in the exp_asset_stats table. +// Returns number of rows affected and error. +func (q *Q) UpdateAssetStat(ctx context.Context, assetStat ExpAssetStat) (int64, error) { + sql := sq.Update("exp_asset_stats"). + SetMap(assetStatToMap(assetStat)). + Where(assetStatToPrimaryKeyMap(assetStat)) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// RemoveAssetStat removes a row in the exp_asset_stats table. +func (q *Q) RemoveAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (int64, error) { + sql := sq.Delete("exp_asset_stats"). + Where(map[string]interface{}{ + "asset_type": assetType, + "asset_code": assetCode, + "asset_issuer": assetIssuer, + }) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// GetAssetStat returns a row in the exp_asset_stats table. +func (q *Q) GetAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (ExpAssetStat, error) { + sql := selectAssetStats.Where(map[string]interface{}{ + "asset_type": assetType, + "asset_code": assetCode, + "asset_issuer": assetIssuer, + }) + var assetStat ExpAssetStat + err := q.Get(ctx, &assetStat, sql) + return assetStat, err +} + +func parseAssetStatsCursor(cursor string) (string, string, error) { + parts := strings.SplitN(cursor, "_", 3) + if len(parts) != 3 { + return "", "", fmt.Errorf("invalid asset stats cursor: %v", cursor) + } + + code, issuer, assetType := parts[0], parts[1], parts[2] + var issuerAccount xdr.AccountId + var asset xdr.Asset + + if err := issuerAccount.SetAddress(issuer); err != nil { + return "", "", errors.Wrap( + err, + fmt.Sprintf("invalid issuer in asset stats cursor: %v", cursor), + ) + } + + if err := asset.SetCredit(code, issuerAccount); err != nil { + return "", "", errors.Wrap( + err, + fmt.Sprintf("invalid asset stats cursor: %v", cursor), + ) + } + + if _, ok := xdr.StringToAssetType[assetType]; !ok { + return "", "", errors.Errorf("invalid asset type in asset stats cursor: %v", cursor) + } + + return code, issuer, nil +} + +// GetAssetStats returns a page of exp_asset_stats rows. +func (q *Q) GetAssetStats(ctx context.Context, assetCode, assetIssuer string, page db2.PageQuery) ([]ExpAssetStat, error) { + sql := selectAssetStats + filters := map[string]interface{}{} + if assetCode != "" { + filters["asset_code"] = assetCode + } + if assetIssuer != "" { + filters["asset_issuer"] = assetIssuer + } + + if len(filters) > 0 { + sql = sql.Where(filters) + } + + var cursorComparison, orderBy string + switch page.Order { + case "asc": + cursorComparison, orderBy = ">", "asc" + case "desc": + cursorComparison, orderBy = "<", "desc" + default: + return nil, fmt.Errorf("invalid page order %s", page.Order) + } + + if page.Cursor != "" { + cursorCode, cursorIssuer, err := parseAssetStatsCursor(page.Cursor) + if err != nil { + return nil, err + } + + sql = sql.Where("((asset_code, asset_issuer) "+cursorComparison+" (?,?))", cursorCode, cursorIssuer) + } + + sql = sql.OrderBy("(asset_code, asset_issuer) " + orderBy).Limit(page.Limit) + + var results []ExpAssetStat + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +var selectAssetStats = sq.Select("exp_asset_stats.*").From("exp_asset_stats") diff --git a/services/horizon/internal/db2/history/asset_stats_test.go b/services/horizon/internal/db2/history/asset_stats_test.go new file mode 100644 index 0000000000..33a041fa7c --- /dev/null +++ b/services/horizon/internal/db2/history/asset_stats_test.go @@ -0,0 +1,774 @@ +package history + +import ( + "database/sql" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +func TestInsertAssetStats(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + tt.Assert.NoError(q.InsertAssetStats(tt.Ctx, []ExpAssetStat{}, 1)) + + assetStats := []ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + }, + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "ETHER", + Accounts: ExpAssetStatAccounts{ + Authorized: 1, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "23", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "23", + NumAccounts: 1, + }, + } + tt.Assert.NoError(q.InsertAssetStats(tt.Ctx, assetStats, 1)) + + for _, assetStat := range assetStats { + got, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) + } +} + +func TestInsertAssetStat(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + assetStats := []ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + }, + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "ETHER", + Accounts: ExpAssetStatAccounts{ + Authorized: 1, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "23", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "23", + NumAccounts: 1, + }, + } + + for _, assetStat := range assetStats { + numChanged, err := q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + + got, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) + } +} + +func TestInsertAssetStatAlreadyExistsError(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + assetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + } + + numChanged, err := q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + + numChanged, err = q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.Error(err) + tt.Assert.Equal(numChanged, int64(0)) + + assetStat.NumAccounts = 4 + assetStat.Amount = "3" + numChanged, err = q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.Error(err) + tt.Assert.Equal(numChanged, int64(0)) + + assetStat.NumAccounts = 2 + assetStat.Amount = "1" + got, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) +} + +func TestUpdateAssetStatDoesNotExistsError(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + assetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + } + + numChanged, err := q.UpdateAssetStat(tt.Ctx, assetStat) + tt.Assert.Nil(err) + tt.Assert.Equal(numChanged, int64(0)) + + _, err = q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.Equal(err, sql.ErrNoRows) +} + +func TestUpdateStat(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + assetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + } + + numChanged, err := q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + + got, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) + + assetStat.NumAccounts = 50 + assetStat.Amount = "23" + + numChanged, err = q.UpdateAssetStat(tt.Ctx, assetStat) + tt.Assert.Nil(err) + tt.Assert.Equal(numChanged, int64(1)) + + got, err = q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) +} + +func TestGetAssetStatDoesNotExist(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + assetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + } + + _, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.Equal(err, sql.ErrNoRows) +} + +func TestRemoveAssetStat(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + assetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "4", + LiquidityPools: "5", + }, + Amount: "1", + NumAccounts: 2, + } + + numChanged, err := q.RemoveAssetStat(tt.Ctx, + assetStat.AssetType, + assetStat.AssetCode, + assetStat.AssetIssuer, + ) + tt.Assert.Nil(err) + tt.Assert.Equal(numChanged, int64(0)) + + numChanged, err = q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + + got, err := q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.NoError(err) + tt.Assert.Equal(got, assetStat) + + numChanged, err = q.RemoveAssetStat(tt.Ctx, + assetStat.AssetType, + assetStat.AssetCode, + assetStat.AssetIssuer, + ) + tt.Assert.Nil(err) + tt.Assert.Equal(numChanged, int64(1)) + + _, err = q.GetAssetStat(tt.Ctx, assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + tt.Assert.Equal(err, sql.ErrNoRows) +} + +func TestGetAssetStatsCursorValidation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + for _, testCase := range []struct { + name string + cursor string + expectedError string + }{ + { + "cursor does not use underscore as serpator", + "usdc-GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "invalid asset stats cursor", + }, + { + "cursor has no underscore", + "usdc", + "invalid asset stats cursor", + }, + { + "cursor has too many underscores", + "usdc_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum4_", + "invalid asset type in asset stats cursor", + }, + { + "issuer in cursor is invalid", + "usd_abcdefghijklmnopqrstuv_credit_alphanum4", + "invalid issuer in asset stats cursor", + }, + { + "asset type in cursor is invalid", + "usd_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum", + "invalid asset type in asset stats cursor", + }, + { + "asset code in cursor is too long", + "abcdefghijklmnopqrstuv_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum12", + "invalid asset stats cursor", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + page := db2.PageQuery{ + Cursor: testCase.cursor, + Order: "asc", + Limit: 5, + } + results, err := q.GetAssetStats(tt.Ctx, "", "", page) + tt.Assert.Empty(results) + tt.Assert.NotNil(err) + tt.Assert.Contains(err.Error(), testCase.expectedError) + }) + } +} + +func TestGetAssetStatsOrderValidation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + page := db2.PageQuery{ + Order: "invalid", + Limit: 5, + } + results, err := q.GetAssetStats(tt.Ctx, "", "", page) + tt.Assert.Empty(results) + tt.Assert.NotNil(err) + tt.Assert.Contains(err.Error(), "invalid page order") +} + +func reverseAssetStats(a []ExpAssetStat) { + for i := len(a)/2 - 1; i >= 0; i-- { + opp := len(a) - 1 - i + a[i], a[opp] = a[opp], a[i] + } +} + +func TestGetAssetStatsFiltersAndCursor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + usdAssetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "1", + NumAccounts: 2, + } + etherAssetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + AssetCode: "ETHER", + Accounts: ExpAssetStatAccounts{ + Authorized: 1, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "23", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "23", + NumAccounts: 1, + } + otherUSDAssetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + AssetCode: "USD", + Accounts: ExpAssetStatAccounts{ + Authorized: 2, + AuthorizedToMaintainLiabilities: 3, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "1", + NumAccounts: 2, + } + eurAssetStat := ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + AssetCode: "EUR", + Accounts: ExpAssetStatAccounts{ + Authorized: 3, + AuthorizedToMaintainLiabilities: 2, + Unauthorized: 4, + }, + Balances: ExpAssetStatBalances{ + Authorized: "111", + AuthorizedToMaintainLiabilities: "2", + Unauthorized: "3", + ClaimableBalances: "1", + LiquidityPools: "2", + }, + Amount: "111", + NumAccounts: 3, + } + assetStats := []ExpAssetStat{ + etherAssetStat, + eurAssetStat, + otherUSDAssetStat, + usdAssetStat, + } + for _, assetStat := range assetStats { + numChanged, err := q.InsertAssetStat(tt.Ctx, assetStat) + tt.Assert.NoError(err) + tt.Assert.Equal(numChanged, int64(1)) + } + + for _, testCase := range []struct { + name string + assetCode string + assetIssuer string + cursor string + order string + expected []ExpAssetStat + }{ + { + "no filter without cursor", + "", + "", + "", + "asc", + []ExpAssetStat{ + etherAssetStat, + eurAssetStat, + otherUSDAssetStat, + usdAssetStat, + }, + }, + { + "no filter with cursor", + "", + "", + "ABC_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + []ExpAssetStat{ + etherAssetStat, + eurAssetStat, + otherUSDAssetStat, + usdAssetStat, + }, + }, + { + "no filter with cursor descending", + "", + "", + "ZZZ_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum4", + "desc", + []ExpAssetStat{ + usdAssetStat, + otherUSDAssetStat, + eurAssetStat, + etherAssetStat, + }, + }, + { + "no filter with cursor and offset", + "", + "", + "ETHER_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum12", + "asc", + []ExpAssetStat{ + eurAssetStat, + otherUSDAssetStat, + usdAssetStat, + }, + }, + { + "no filter with cursor and offset descending", + "", + "", + "EUR_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "desc", + []ExpAssetStat{ + etherAssetStat, + }, + }, + { + "no filter with cursor and offset descending including eur", + "", + "", + "EUR_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum4", + "desc", + []ExpAssetStat{ + eurAssetStat, + etherAssetStat, + }, + }, + { + "filter on code without cursor", + "USD", + "", + "", + "asc", + []ExpAssetStat{ + otherUSDAssetStat, + usdAssetStat, + }, + }, + { + "filter on code with cursor", + "USD", + "", + "USD_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + []ExpAssetStat{ + usdAssetStat, + }, + }, + { + "filter on code with cursor descending", + "USD", + "", + "USD_GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H_credit_alphanum4", + "desc", + []ExpAssetStat{ + otherUSDAssetStat, + }, + }, + { + "filter on issuer without cursor", + "", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "", + "asc", + []ExpAssetStat{ + eurAssetStat, + otherUSDAssetStat, + }, + }, + { + "filter on issuer with cursor", + "", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "EUR_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + []ExpAssetStat{ + otherUSDAssetStat, + }, + }, + { + "filter on issuer with cursor descending", + "", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "USD_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "desc", + []ExpAssetStat{ + eurAssetStat, + }, + }, + { + "filter on non existant code without cursor", + "BTC", + "", + "", + "asc", + nil, + }, + { + "filter on non existant code with cursor", + "BTC", + "", + "BTC_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + nil, + }, + { + "filter on non existant issuer without cursor", + "", + "GAEIHD6U4WSBHJGA2HPWOQ3OQEFQ3Y7QZE2DR76YKZNKPW5YDLYW4UGF", + "", + "asc", + nil, + }, + { + "filter on non existant issuer with cursor", + "", + "GAEIHD6U4WSBHJGA2HPWOQ3OQEFQ3Y7QZE2DR76YKZNKPW5YDLYW4UGF", + "AAA_GAEIHD6U4WSBHJGA2HPWOQ3OQEFQ3Y7QZE2DR76YKZNKPW5YDLYW4UGF_credit_alphanum4", + "asc", + nil, + }, + { + "filter on non existant code and non existant issuer without cursor", + "BTC", + "GAEIHD6U4WSBHJGA2HPWOQ3OQEFQ3Y7QZE2DR76YKZNKPW5YDLYW4UGF", + "", + "asc", + nil, + }, + { + "filter on non existant code and non existant issuer with cursor", + "BTC", + "GAEIHD6U4WSBHJGA2HPWOQ3OQEFQ3Y7QZE2DR76YKZNKPW5YDLYW4UGF", + "AAA_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + nil, + }, + { + "filter on both code and issuer without cursor", + "USD", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "", + "asc", + []ExpAssetStat{ + otherUSDAssetStat, + }, + }, + { + "filter on both code and issuer with cursor", + "USD", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "USC_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + []ExpAssetStat{ + otherUSDAssetStat, + }, + }, + { + "filter on both code and issuer with cursor descending", + "USD", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "USE_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "desc", + []ExpAssetStat{ + otherUSDAssetStat, + }, + }, + { + "cursor negates filter", + "USD", + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "USD_GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2_credit_alphanum4", + "asc", + nil, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + page := db2.PageQuery{ + Order: testCase.order, + Cursor: testCase.cursor, + Limit: 5, + } + results, err := q.GetAssetStats(tt.Ctx, testCase.assetCode, testCase.assetIssuer, page) + tt.Assert.NoError(err) + tt.Assert.Equal(testCase.expected, results) + + page.Limit = 1 + results, err = q.GetAssetStats(tt.Ctx, testCase.assetCode, testCase.assetIssuer, page) + tt.Assert.NoError(err) + if len(testCase.expected) == 0 { + tt.Assert.Equal(testCase.expected, results) + } else { + tt.Assert.Equal(testCase.expected[:1], results) + } + + if page.Cursor == "" { + page = page.Invert() + page.Limit = 5 + + results, err = q.GetAssetStats(tt.Ctx, testCase.assetCode, testCase.assetIssuer, page) + tt.Assert.NoError(err) + reverseAssetStats(results) + tt.Assert.Equal(testCase.expected, results) + } + }) + } +} diff --git a/services/horizon/internal/db2/history/asset_test.go b/services/horizon/internal/db2/history/asset_test.go new file mode 100644 index 0000000000..9289f44622 --- /dev/null +++ b/services/horizon/internal/db2/history/asset_test.go @@ -0,0 +1,126 @@ +package history + +import ( + "sort" + "testing" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +func TestCreateAssetsSortedOrder(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + assets := []xdr.Asset{ + usdAsset, nativeAsset, eurAsset, + xdr.MustNewCreditAsset("CNY", issuer.Address()), + } + assetMap, err := q.CreateAssets(tt.Ctx, + assets, + 2, + ) + tt.Assert.NoError(err) + + idsToAsset := map[int64]string{} + sortedIDs := []int64{} + for assetString, asset := range assetMap { + idsToAsset[asset.ID] = assetString + sortedIDs = append(sortedIDs, asset.ID) + } + + sort.Slice(assets, func(i, j int) bool { + return assets[i].String() < assets[j].String() + }) + sort.Slice(sortedIDs, func(i, j int) bool { + return sortedIDs[i] < sortedIDs[j] + }) + + var assetStrings []string + for _, asset := range assets { + assetStrings = append(assetStrings, asset.String()) + } + + var values []string + for _, id := range sortedIDs { + values = append(values, idsToAsset[id]) + } + tt.Assert.Equal(assetStrings, values) +} + +func TestCreateAssets(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + // CreateAssets creates new rows + assets := []xdr.Asset{ + nativeAsset, eurAsset, + } + assetMap, err := q.CreateAssets(tt.Ctx, assets, 1) + tt.Assert.NoError(err) + tt.Assert.Len(assetMap, len(assets)) + + set := map[int64]bool{} + for _, asset := range assets { + row := assetMap[asset.String()] + + tt.Assert.False(set[row.ID]) + set[row.ID] = true + + var assetType, assetCode, assetIssuer string + asset.MustExtract(&assetType, &assetCode, &assetIssuer) + + tt.Assert.Equal(row.Type, assetType) + tt.Assert.Equal(row.Code, assetCode) + tt.Assert.Equal(row.Issuer, assetIssuer) + } + + // CreateAssets handles duplicates + assetMap, err = q.CreateAssets(tt.Ctx, + []xdr.Asset{ + nativeAsset, nativeAsset, eurAsset, eurAsset, + nativeAsset, nativeAsset, eurAsset, eurAsset, + }, + 2, + ) + tt.Assert.NoError(err) + tt.Assert.Len(assetMap, len(assets)) + + for _, asset := range assets { + row := assetMap[asset.String()] + + tt.Assert.True(set[row.ID]) + + var assetType, assetCode, assetIssuer string + asset.MustExtract(&assetType, &assetCode, &assetIssuer) + + tt.Assert.Equal(row.Type, assetType) + tt.Assert.Equal(row.Code, assetCode) + tt.Assert.Equal(row.Issuer, assetIssuer) + } + + // CreateAssets handles duplicates and new rows + assets = append(assets, usdAsset) + assetMap, err = q.CreateAssets(tt.Ctx, assets, 2) + tt.Assert.NoError(err) + tt.Assert.Len(assetMap, len(assets)) + + for _, asset := range assets { + row := assetMap[asset.String()] + + inSet := !asset.Equals(usdAsset) + tt.Assert.Equal(inSet, set[row.ID]) + + var assetType, assetCode, assetIssuer string + asset.MustExtract(&assetType, &assetCode, &assetIssuer) + + tt.Assert.Equal(row.Type, assetType) + tt.Assert.Equal(row.Code, assetCode) + tt.Assert.Equal(row.Issuer, assetIssuer) + } +} diff --git a/services/horizon/internal/db2/history/claimable_balances.go b/services/horizon/internal/db2/history/claimable_balances.go new file mode 100644 index 0000000000..f5eb663828 --- /dev/null +++ b/services/horizon/internal/db2/history/claimable_balances.go @@ -0,0 +1,243 @@ +package history + +import ( + "context" + "database/sql/driver" + "encoding/json" + "strconv" + "strings" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ClaimableBalancesQuery is a helper struct to configure queries to claimable balances +type ClaimableBalancesQuery struct { + PageQuery db2.PageQuery + Asset *xdr.Asset + Sponsor *xdr.AccountId + Claimant *xdr.AccountId +} + +// Cursor validates and returns the query page cursor +func (cbq ClaimableBalancesQuery) Cursor() (int64, string, error) { + p := cbq.PageQuery + var l int64 + var r string + var err error + + if p.Cursor != "" { + parts := strings.SplitN(p.Cursor, "-", 2) + if len(parts) != 2 { + return l, r, errors.New("Invalid cursor") + } + + l, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return l, r, errors.Wrap(err, "Invalid cursor - first value should be higher than 0") + } + + var balanceID xdr.ClaimableBalanceId + if err = xdr.SafeUnmarshalHex(parts[1], &balanceID); err != nil { + return l, r, errors.Wrap(err, "Invalid cursor - second value should be a valid claimable balance id") + } + r = parts[1] + if l < 0 { + return l, r, errors.Wrap(err, "Invalid cursor - first value should be higher than 0") + } + } + + return l, r, nil +} + +// ApplyCursor applies cursor to the given sql. For performance reason the limit +// is not applied here. This allows us to hint the planner later to use the right +// indexes. +func (cbq ClaimableBalancesQuery) ApplyCursor(sql sq.SelectBuilder) (sq.SelectBuilder, error) { + p := cbq.PageQuery + l, r, err := cbq.Cursor() + if err != nil { + return sql, err + } + + switch p.Order { + case db2.OrderAscending: + if l > 0 && r != "" { + sql = sql. + Where(sq.Expr("(cb.last_modified_ledger, cb.id) > (?, ?)", l, r)) + } + sql = sql.OrderBy("cb.last_modified_ledger asc, cb.id asc") + case db2.OrderDescending: + if l > 0 && r != "" { + sql = sql. + Where(sq.Expr("(cb.last_modified_ledger, cb.id) < (?, ?)", l, r)) + } + + sql = sql.OrderBy("cb.last_modified_ledger desc, cb.id desc") + default: + return sql, errors.Errorf("invalid order: %s", p.Order) + } + + return sql, nil +} + +// ClaimableBalance is a row of data from the `claimable_balances` table. +type ClaimableBalance struct { + BalanceID string `db:"id"` + Claimants Claimants `db:"claimants"` + Asset xdr.Asset `db:"asset"` + Amount xdr.Int64 `db:"amount"` + Sponsor null.String `db:"sponsor"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Flags uint32 `db:"flags"` +} + +type Claimants []Claimant + +func (c Claimants) Value() (driver.Value, error) { + return json.Marshal(c) +} + +func (c *Claimants) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &c) +} + +type Claimant struct { + Destination string `json:"destination"` + Predicate xdr.ClaimPredicate `json:"predicate"` +} + +// QClaimableBalances defines claimable-balance-related related queries. +type QClaimableBalances interface { + UpsertClaimableBalances(ctx context.Context, cb []ClaimableBalance) error + RemoveClaimableBalances(ctx context.Context, ids []string) (int64, error) + GetClaimableBalancesByID(ctx context.Context, ids []string) ([]ClaimableBalance, error) + CountClaimableBalances(ctx context.Context) (int, error) +} + +// CountClaimableBalances returns the total number of claimable balances in the DB +func (q *Q) CountClaimableBalances(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").From("claimable_balances") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +// GetClaimableBalancesByID finds all claimable balances by ClaimableBalanceId +func (q *Q) GetClaimableBalancesByID(ctx context.Context, ids []string) ([]ClaimableBalance, error) { + var cBalances []ClaimableBalance + sql := selectClaimableBalances.Where(map[string]interface{}{"cb.id": ids}) + err := q.Select(ctx, &cBalances, sql) + return cBalances, err +} + +// UpsertClaimableBalances upserts a batch of claimable balances in the claimable_balances table. +// There's currently no limit of the number of offers this method can +// accept other than 2GB limit of the query string length what should be enough +// for each ledger with the current limits. +func (q *Q) UpsertClaimableBalances(ctx context.Context, cbs []ClaimableBalance) error { + var id, claimants, asset, amount, sponsor, lastModifiedLedger, flags []interface{} + + for _, cb := range cbs { + id = append(id, cb.BalanceID) + claimants = append(claimants, cb.Claimants) + asset = append(asset, cb.Asset) + amount = append(amount, cb.Amount) + sponsor = append(sponsor, cb.Sponsor) + lastModifiedLedger = append(lastModifiedLedger, cb.LastModifiedLedger) + flags = append(flags, cb.Flags) + } + + upsertFields := []upsertField{ + {"id", "text", id}, + {"claimants", "jsonb", claimants}, + {"asset", "text", asset}, + {"amount", "bigint", amount}, + {"sponsor", "text", sponsor}, + {"last_modified_ledger", "integer", lastModifiedLedger}, + {"flags", "int", flags}, + } + + return q.upsertRows(ctx, "claimable_balances", "id", upsertFields) +} + +// RemoveClaimableBalances deletes claimable balances table. +// Returns number of rows affected and error. +func (q *Q) RemoveClaimableBalances(ctx context.Context, ids []string) (int64, error) { + sql := sq.Delete("claimable_balances"). + Where(sq.Eq{"id": ids}) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// FindClaimableBalanceByID returns a claimable balance. +func (q *Q) FindClaimableBalanceByID(ctx context.Context, balanceID string) (ClaimableBalance, error) { + var claimableBalance ClaimableBalance + sql := selectClaimableBalances.Limit(1).Where("cb.id = ?", balanceID) + err := q.Get(ctx, &claimableBalance, sql) + return claimableBalance, err +} + +// GetClaimableBalances finds all claimable balances where accountID is one of the claimants +func (q *Q) GetClaimableBalances(ctx context.Context, query ClaimableBalancesQuery) ([]ClaimableBalance, error) { + sql, err := query.ApplyCursor(selectClaimableBalances) + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + if query.Asset != nil { + sql = sql.Where("cb.asset = ?", query.Asset) + } + + if query.Sponsor != nil { + sql = sql.Where("cb.sponsor = ?", query.Sponsor.Address()) + } + + if query.Claimant != nil { + sql = sql. + Where(`cb.claimants @> '[{"destination": "` + query.Claimant.Address() + `"}]'`) + } + + // we need to use WITH syntax to force the query planner to use the right + // indexes, otherwise when the limit is small, it will use an index scan + // which will be very slow once we have millions of records + sql = sql. + Prefix("WITH cb AS ("). + Suffix( + ") select "+claimableBalancesSelectStatement+" from cb LIMIT ?", + query.PageQuery.Limit, + ) + + var results []ClaimableBalance + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +var claimableBalancesSelectStatement = "cb.id, " + + "cb.claimants, " + + "cb.asset, " + + "cb.amount, " + + "cb.sponsor, " + + "cb.last_modified_ledger, " + + "cb.flags" + +var selectClaimableBalances = sq.Select(claimableBalancesSelectStatement).From("claimable_balances cb") diff --git a/services/horizon/internal/db2/history/claimable_balances_test.go b/services/horizon/internal/db2/history/claimable_balances_test.go new file mode 100644 index 0000000000..e0d7de2e67 --- /dev/null +++ b/services/horizon/internal/db2/history/claimable_balances_test.go @@ -0,0 +1,292 @@ +package history + +import ( + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +func TestRemoveClaimableBalance(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + accountID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + asset := xdr.MustNewCreditAsset("USD", accountID) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + r, err := q.FindClaimableBalanceByID(tt.Ctx, id) + tt.Assert.NoError(err) + tt.Assert.NotNil(r) + + removed, err := q.RemoveClaimableBalances(tt.Ctx, []string{id}) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), removed) + + cbs := []ClaimableBalance{} + err = q.Select(tt.Ctx, &cbs, selectClaimableBalances) + + if tt.Assert.NoError(err) { + tt.Assert.Len(cbs, 0) + } +} + +func TestFindClaimableBalancesByDestination(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + dest1 := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + dest2 := "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + + asset := xdr.MustNewCreditAsset("USD", dest1) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: dest1, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + balanceID = xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{3, 2, 1}, + } + id, err = xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance = ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: dest1, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + { + Destination: dest2, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + query := ClaimableBalancesQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + Claimant: xdr.MustAddressPtr(dest1), + } + + cbs, err := q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 2) + + for _, cb := range cbs { + tt.Assert.Equal(dest1, cb.Claimants[0].Destination) + } + + query.Claimant = xdr.MustAddressPtr(dest2) + cbs, err = q.GetClaimableBalances(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal(dest2, cbs[0].Claimants[1].Destination) +} + +func TestUpdateClaimableBalance(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + accountID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + lastModifiedLedgerSeq := xdr.Uint32(123) + asset := xdr.MustNewCreditAsset("USD", accountID) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + // add sponsor + cBalance2 := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123 + 1, + Amount: 10, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance2}) + tt.Assert.NoError(err) + + cbs := []ClaimableBalance{} + err = q.Select(tt.Ctx, &cbs, selectClaimableBalances) + tt.Assert.NoError(err) + tt.Assert.Len(cbs, 1) + tt.Assert.Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", cbs[0].Sponsor.String) + tt.Assert.Equal(uint32(lastModifiedLedgerSeq+1), cbs[0].LastModifiedLedger) +} + +func TestFindClaimableBalance(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + accountID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + asset := xdr.MustNewCreditAsset("USD", accountID) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + cb, err := q.FindClaimableBalanceByID(tt.Ctx, id) + tt.Assert.NoError(err) + + tt.Assert.Equal(cBalance.BalanceID, cb.BalanceID) + tt.Assert.Equal(cBalance.Asset, cb.Asset) + tt.Assert.Equal(cBalance.Amount, cb.Amount) + + for i, hClaimant := range cb.Claimants { + tt.Assert.Equal(cBalance.Claimants[i].Destination, hClaimant.Destination) + tt.Assert.Equal(cBalance.Claimants[i].Predicate, hClaimant.Predicate) + } +} +func TestGetClaimableBalancesByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + accountID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + asset := xdr.MustNewCreditAsset("USD", accountID) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + id, err := xdr.MarshalHex(balanceID) + tt.Assert.NoError(err) + cBalance := ClaimableBalance{ + BalanceID: id, + Claimants: []Claimant{ + { + Destination: accountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Asset: asset, + LastModifiedLedger: 123, + Amount: 10, + } + + err = q.UpsertClaimableBalances(tt.Ctx, []ClaimableBalance{cBalance}) + tt.Assert.NoError(err) + + r, err := q.GetClaimableBalancesByID(tt.Ctx, []string{id}) + tt.Assert.NoError(err) + tt.Assert.Len(r, 1) + + removed, err := q.RemoveClaimableBalances(tt.Ctx, []string{id}) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), removed) + + r, err = q.GetClaimableBalancesByID(tt.Ctx, []string{id}) + tt.Assert.NoError(err) + tt.Assert.Len(r, 0) +} diff --git a/services/horizon/internal/db2/history/effect.go b/services/horizon/internal/db2/history/effect.go new file mode 100644 index 0000000000..c7618b019f --- /dev/null +++ b/services/horizon/internal/db2/history/effect.go @@ -0,0 +1,254 @@ +package history + +import ( + "context" + "encoding/json" + "fmt" + "math" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" +) + +// UnmarshalDetails unmarshals the details of this effect into `dest` +func (r *Effect) UnmarshalDetails(dest interface{}) error { + if !r.DetailsString.Valid { + return nil + } + + err := errors.Wrap(json.Unmarshal([]byte(r.DetailsString.String), &dest), "unmarshal effect details failed") + if err == nil { + // In 2.9.0 a new `asset_type` was introduced to include liquidity + // pools. Instead of reingesting entire history, let's fill the + // `asset_type` here if it's empty. + // (I hate to convert to `protocol` types here but there's no other way + // without larger refactor.) + switch dest := dest.(type) { + case *effects.TrustlineSponsorshipCreated: + if dest.Type == "" { + dest.Type = getAssetTypeForCanonicalAsset(dest.Asset) + } + case *effects.TrustlineSponsorshipUpdated: + if dest.Type == "" { + dest.Type = getAssetTypeForCanonicalAsset(dest.Asset) + } + case *effects.TrustlineSponsorshipRemoved: + if dest.Type == "" { + dest.Type = getAssetTypeForCanonicalAsset(dest.Asset) + } + } + } + return err +} + +func getAssetTypeForCanonicalAsset(canonicalAsset string) string { + if len(canonicalAsset) <= 61 { + return "credit_alphanum4" + } else { + return "credit_alphanum12" + } +} + +// ID returns a lexically ordered id for this effect record +func (r *Effect) ID() string { + return fmt.Sprintf("%019d-%010d", r.HistoryOperationID, r.Order) +} + +// LedgerSequence return the ledger in which the effect occurred. +func (r *Effect) LedgerSequence() int32 { + id := toid.Parse(r.HistoryOperationID) + return id.LedgerSequence +} + +// PagingToken returns a cursor for this effect +func (r *Effect) PagingToken() string { + return fmt.Sprintf("%d-%d", r.HistoryOperationID, r.Order) +} + +// Effects provides a helper to filter rows from the `history_effects` +// table with pre-defined filters. See `TransactionsQ` methods for the +// available filters. +func (q *Q) Effects() *EffectsQ { + return &EffectsQ{ + parent: q, + sql: selectEffect, + } +} + +// ForAccount filters the operations collection to a specific account +func (q *EffectsQ) ForAccount(ctx context.Context, aid string) *EffectsQ { + var account Account + q.Err = q.parent.AccountByAddress(ctx, &account, aid) + if q.Err != nil { + return q + } + + q.sql = q.sql.Where("heff.history_account_id = ?", account.ID) + + return q +} + +// ForLedger filters the query to only effects in a specific ledger, +// specified by its sequence. +func (q *EffectsQ) ForLedger(ctx context.Context, seq int32) *EffectsQ { + var ledger Ledger + q.Err = q.parent.LedgerBySequence(ctx, &ledger, seq) + if q.Err != nil { + return q + } + + start := toid.ID{LedgerSequence: seq} + end := toid.ID{LedgerSequence: seq + 1} + q.sql = q.sql.Where( + "heff.history_operation_id >= ? AND heff.history_operation_id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// ForOperation filters the query to only effects in a specific operation, +// specified by its id. +func (q *EffectsQ) ForOperation(id int64) *EffectsQ { + start := toid.Parse(id) + end := start + end.IncOperationOrder() + q.sql = q.sql.Where( + "heff.history_operation_id >= ? AND heff.history_operation_id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// ForLiquidityPool filters the query to only effects in a specific liquidity pool, +// specified by its id. +func (q *EffectsQ) ForLiquidityPool(ctx context.Context, page db2.PageQuery, id string) *EffectsQ { + if q.Err != nil { + return q + } + + op, _, err := page.CursorInt64Pair(db2.DefaultPairSep) + if err != nil { + q.Err = err + return q + } + + query := `SELECT holp.history_operation_id + FROM history_operation_liquidity_pools holp + WHERE holp.history_liquidity_pool_id = (SELECT id FROM history_liquidity_pools WHERE liquidity_pool_id = ?) + ` + switch page.Order { + case "asc": + query += "AND holp.history_operation_id >= ? ORDER BY holp.history_operation_id asc LIMIT ?" + case "desc": + query += "AND holp.history_operation_id <= ? ORDER BY holp.history_operation_id desc LIMIT ?" + default: + q.Err = errors.Errorf("invalid paging order: %s", page.Order) + return q + } + + var liquidityPoolOperationIDs []int64 + err = q.parent.SelectRaw(ctx, &liquidityPoolOperationIDs, query, id, op, page.Limit) + if err != nil { + q.Err = err + return q + } + + q.sql = q.sql.Where(map[string]interface{}{ + "heff.history_operation_id": liquidityPoolOperationIDs, + }) + return q +} + +// ForTransaction filters the query to only effects in a specific +// transaction, specified by the transactions's hex-encoded hash. +func (q *EffectsQ) ForTransaction(ctx context.Context, hash string) *EffectsQ { + var tx Transaction + q.Err = q.parent.TransactionByHash(ctx, &tx, hash) + if q.Err != nil { + return q + } + + start := toid.Parse(tx.ID) + end := start + end.TransactionOrder++ + q.sql = q.sql.Where( + "heff.history_operation_id >= ? AND heff.history_operation_id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// Page specifies the paging constraints for the query being built by `q`. +func (q *EffectsQ) Page(page db2.PageQuery) *EffectsQ { + if q.Err != nil { + return q + } + + op, idx, err := page.CursorInt64Pair(db2.DefaultPairSep) + if err != nil { + q.Err = err + return q + } + + if idx > math.MaxInt32 { + idx = math.MaxInt32 + } + + // NOTE: Remember to test the queries below with EXPLAIN / EXPLAIN ANALYZE + // before changing them. + // This condition is using multicolumn index and it's easy to write it in a way that + // DB will perform a full table scan. + switch page.Order { + case "asc": + q.sql = q.sql. + Where(`( + heff.history_operation_id >= ? + AND ( + heff.history_operation_id > ? OR + (heff.history_operation_id = ? AND heff.order > ?) + ))`, op, op, op, idx). + OrderBy("heff.history_operation_id asc, heff.order asc") + case "desc": + q.sql = q.sql. + Where(`( + heff.history_operation_id <= ? + AND ( + heff.history_operation_id < ? OR + (heff.history_operation_id = ? AND heff.order < ?) + ))`, op, op, op, idx). + OrderBy("heff.history_operation_id desc, heff.order desc") + } + + q.sql = q.sql.Limit(page.Limit) + return q +} + +// Select loads the results of the query specified by `q` into `dest`. +func (q *EffectsQ) Select(ctx context.Context, dest interface{}) error { + if q.Err != nil { + return q.Err + } + + q.Err = q.parent.Select(ctx, dest, q.sql) + return q.Err +} + +// QEffects defines history_effects related queries. +type QEffects interface { + QCreateAccountsHistory + NewEffectBatchInsertBuilder(maxBatchSize int) EffectBatchInsertBuilder +} + +var selectEffect = sq.Select("heff.*, hacc.address"). + From("history_effects heff"). + LeftJoin("history_accounts hacc ON hacc.id = heff.history_account_id") diff --git a/services/horizon/internal/db2/history/effect_batch_insert_builder.go b/services/horizon/internal/db2/history/effect_batch_insert_builder.go new file mode 100644 index 0000000000..8b2522cf9e --- /dev/null +++ b/services/horizon/internal/db2/history/effect_batch_insert_builder.go @@ -0,0 +1,62 @@ +package history + +import ( + "context" + + "github.com/guregu/null" + "github.com/stellar/go/support/db" +) + +// EffectBatchInsertBuilder is used to insert effects into the +// history_effects table +type EffectBatchInsertBuilder interface { + Add( + ctx context.Context, + accountID int64, + muxedAccount null.String, + operationID int64, + order uint32, + effectType EffectType, + details []byte, + ) error + Exec(ctx context.Context) error +} + +// effectBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type effectBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +// NewEffectBatchInsertBuilder constructs a new EffectBatchInsertBuilder instance +func (q *Q) NewEffectBatchInsertBuilder(maxBatchSize int) EffectBatchInsertBuilder { + return &effectBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_effects"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a effect to the batch +func (i *effectBatchInsertBuilder) Add( + ctx context.Context, + accountID int64, + muxedAccount null.String, + operationID int64, + order uint32, + effectType EffectType, + details []byte, +) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_account_id": accountID, + "address_muxed": muxedAccount, + "history_operation_id": operationID, + "\"order\"": order, + "type": effectType, + "details": details, + }) +} + +func (i *effectBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/effect_batch_insert_builder_test.go b/services/horizon/internal/db2/history/effect_batch_insert_builder_test.go new file mode 100644 index 0000000000..fd9a01fa87 --- /dev/null +++ b/services/horizon/internal/db2/history/effect_batch_insert_builder_test.go @@ -0,0 +1,54 @@ +package history + +import ( + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" +) + +func TestAddEffect(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + address := "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY" + muxedAddres := "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + accounIDs, err := q.CreateAccounts(tt.Ctx, []string{address}, 1) + tt.Assert.NoError(err) + + builder := q.NewEffectBatchInsertBuilder(2) + sequence := int32(56) + details, err := json.Marshal(map[string]string{ + "amount": "1000.0000000", + "asset_type": "native", + }) + + err = builder.Add(tt.Ctx, + accounIDs[address], + null.StringFrom(muxedAddres), + toid.New(sequence, 1, 1).ToInt64(), + 1, + 3, + details, + ) + tt.Assert.NoError(err) + + err = builder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + effects := []Effect{} + tt.Assert.NoError(q.Effects().Select(tt.Ctx, &effects)) + tt.Assert.Len(effects, 1) + + effect := effects[0] + tt.Assert.Equal(address, effect.Account) + tt.Assert.Equal(muxedAddres, effect.AccountMuxed.String) + tt.Assert.Equal(int64(240518172673), effect.HistoryOperationID) + tt.Assert.Equal(int32(1), effect.Order) + tt.Assert.Equal(EffectType(3), effect.Type) + tt.Assert.Equal("{\"amount\": \"1000.0000000\", \"asset_type\": \"native\"}", effect.DetailsString.String) +} diff --git a/services/horizon/internal/db2/history/effect_test.go b/services/horizon/internal/db2/history/effect_test.go new file mode 100644 index 0000000000..4cb055d34f --- /dev/null +++ b/services/horizon/internal/db2/history/effect_test.go @@ -0,0 +1,187 @@ +package history + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" +) + +func TestEffectsForLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + // Insert Effect + address := "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY" + muxedAddres := "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + accountIDs, err := q.CreateAccounts(tt.Ctx, []string{address}, 1) + tt.Assert.NoError(err) + + builder := q.NewEffectBatchInsertBuilder(2) + sequence := int32(56) + details, err := json.Marshal(map[string]string{ + "amount": "1000.0000000", + "asset_type": "native", + }) + opID := toid.New(sequence, 1, 1).ToInt64() + err = builder.Add(tt.Ctx, + accountIDs[address], + null.StringFrom(muxedAddres), + opID, + 1, + 3, + details, + ) + tt.Assert.NoError(err) + + err = builder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + // Insert Liquidity Pool history + liquidityPoolID := "abcde" + toInternalID, err := q.CreateHistoryLiquidityPools(tt.Ctx, []string{liquidityPoolID}, 2) + tt.Assert.NoError(err) + operationBuilder := q.NewOperationLiquidityPoolBatchInsertBuilder(2) + tt.Assert.NoError(err) + internalID, ok := toInternalID[liquidityPoolID] + tt.Assert.True(ok) + err = operationBuilder.Add(tt.Ctx, opID, internalID) + tt.Assert.NoError(err) + err = operationBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + var result []Effect + err = q.Effects().ForLiquidityPool(tt.Ctx, db2.PageQuery{ + Cursor: "0-0", + Order: "asc", + Limit: 10, + }, liquidityPoolID).Select(tt.Ctx, &result) + tt.Assert.NoError(err) + + tt.Assert.Len(result, 1) + tt.Assert.Equal(result[0].Account, address) + +} + +func TestEffectsForTrustlinesSponsorshipEmptyAssetType(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + address := "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY" + muxedAddres := "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + accountIDs, err := q.CreateAccounts(tt.Ctx, []string{address}, 1) + tt.Assert.NoError(err) + + builder := q.NewEffectBatchInsertBuilder(1) + sequence := int32(56) + tests := []struct { + effectType EffectType + details map[string]string + expectedAssetType string + }{ + { + EffectTrustlineSponsorshipCreated, + map[string]string{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum4", + }, + { + EffectTrustlineSponsorshipCreated, + map[string]string{ + "asset": "USDCE:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum12", + }, + { + EffectTrustlineSponsorshipUpdated, + map[string]string{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum4", + }, + { + EffectTrustlineSponsorshipUpdated, + map[string]string{ + "asset": "USDCE:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum12", + }, + { + EffectTrustlineSponsorshipRemoved, + map[string]string{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum4", + }, + { + EffectTrustlineSponsorshipRemoved, + map[string]string{ + "asset": "USDCE:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + "credit_alphanum12", + }, + } + opID := toid.New(sequence, 1, 1).ToInt64() + + for i, test := range tests { + var bytes []byte + bytes, err = json.Marshal(test.details) + tt.Require.NoError(err) + + err = builder.Add(tt.Ctx, + accountIDs[address], + null.StringFrom(muxedAddres), + opID, + uint32(i), + test.effectType, + bytes, + ) + tt.Require.NoError(err) + } + + err = builder.Exec(tt.Ctx) + tt.Require.NoError(err) + + var results []Effect + err = q.Effects().Select(tt.Ctx, &results) + tt.Require.NoError(err) + tt.Require.Len(results, len(tests)) + + for i, test := range tests { + switch test.effectType { + case EffectTrustlineSponsorshipCreated: + var eff effects.TrustlineSponsorshipCreated + err := results[i].UnmarshalDetails(&eff) + tt.Require.NoError(err) + tt.Assert.Equal(test.expectedAssetType, eff.Type) + case EffectTrustlineSponsorshipUpdated: + var eff effects.TrustlineSponsorshipUpdated + err := results[i].UnmarshalDetails(&eff) + tt.Require.NoError(err) + tt.Assert.Equal(test.expectedAssetType, eff.Type) + case EffectTrustlineSponsorshipRemoved: + var eff effects.TrustlineSponsorshipRemoved + err := results[i].UnmarshalDetails(&eff) + tt.Require.NoError(err) + tt.Assert.Equal(test.expectedAssetType, eff.Type) + default: + panic(fmt.Sprintf("Unknown type %v", test.effectType)) + } + } +} diff --git a/services/horizon/internal/db2/history/fee_bump_scenario.go b/services/horizon/internal/db2/history/fee_bump_scenario.go new file mode 100644 index 0000000000..fc2532028f --- /dev/null +++ b/services/horizon/internal/db2/history/fee_bump_scenario.go @@ -0,0 +1,354 @@ +package history + +import ( + "context" + "encoding/hex" + "encoding/json" + "testing" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func ledgerToMap(ledger Ledger) map[string]interface{} { + return map[string]interface{}{ + "importer_version": ledger.ImporterVersion, + "id": ledger.TotalOrderID.ID, + "sequence": ledger.Sequence, + "ledger_hash": ledger.LedgerHash, + "previous_ledger_hash": ledger.PreviousLedgerHash, + "total_coins": ledger.TotalCoins, + "fee_pool": ledger.FeePool, + "base_fee": ledger.BaseFee, + "base_reserve": ledger.BaseReserve, + "max_tx_set_size": ledger.MaxTxSetSize, + "closed_at": ledger.ClosedAt, + "created_at": ledger.CreatedAt, + "updated_at": ledger.UpdatedAt, + "transaction_count": ledger.SuccessfulTransactionCount, + "successful_transaction_count": ledger.SuccessfulTransactionCount, + "failed_transaction_count": ledger.FailedTransactionCount, + "operation_count": ledger.OperationCount, + "protocol_version": ledger.ProtocolVersion, + "ledger_header": ledger.LedgerHeaderXDR, + } +} + +type testTransaction struct { + index uint32 + envelopeXDR string + resultXDR string + feeChangesXDR string + metaXDR string + hash string +} + +func buildLedgerTransaction(t *testing.T, tx testTransaction) ingest.LedgerTransaction { + transaction := ingest.LedgerTransaction{ + Index: tx.index, + Envelope: xdr.TransactionEnvelope{}, + Result: xdr.TransactionResultPair{}, + FeeChanges: xdr.LedgerEntryChanges{}, + UnsafeMeta: xdr.TransactionMeta{}, + } + + tt := assert.New(t) + + err := xdr.SafeUnmarshalBase64(tx.envelopeXDR, &transaction.Envelope) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.resultXDR, &transaction.Result.Result) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.metaXDR, &transaction.UnsafeMeta) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.feeChangesXDR, &transaction.FeeChanges) + tt.NoError(err) + + _, err = hex.Decode(transaction.Result.TransactionHash[:], []byte(tx.hash)) + tt.NoError(err) + + return transaction +} + +// FeeBumpFixture contains the data inserted into the database +// when running FeeBumpScenario +type FeeBumpFixture struct { + Ledger Ledger + Envelope xdr.TransactionEnvelope + Transaction Transaction + NormalTransaction Transaction + OuterHash string + InnerHash string +} + +// FeeBumpScenario creates a ledger containing a fee bump transaction, +// an operation, and an effect +func FeeBumpScenario(tt *test.T, q *Q, successful bool) FeeBumpFixture { + fixture := FeeBumpFixture{} + sequence := uint32(123) + fixture.Ledger = Ledger{ + Sequence: int32(sequence), + LedgerHash: "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + PreviousLedgerHash: null.NewString("4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", true), + TotalOrderID: TotalOrderID{toid.New(int32(69859), 0, 0).ToInt64()}, + ImporterVersion: 321, + TransactionCount: 12, + SuccessfulTransactionCount: new(int32), + FailedTransactionCount: new(int32), + OperationCount: 23, + TotalCoins: 23451, + FeePool: 213, + BaseReserve: 687, + MaxTxSetSize: 345, + ProtocolVersion: 12, + BaseFee: 100, + ClosedAt: time.Now().UTC().Truncate(time.Second), + LedgerHeaderXDR: null.NewString("temp", true), + } + *fixture.Ledger.SuccessfulTransactionCount = 1 + *fixture.Ledger.FailedTransactionCount = 0 + _, err := q.Exec(context.Background(), sq.Insert("history_ledgers").SetMap(ledgerToMap(fixture.Ledger))) + tt.Assert.NoError(err) + + fixture.Envelope = xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeEd25519, + Ed25519: &xdr.Uint256{2, 2, 2}, + }, + Fee: 776, + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeEd25519, + Ed25519: &xdr.Uint256{ + 3, 3, 3, + }, + }, + Fee: 99, + Memo: xdr.Memo{ + Type: xdr.MemoTypeMemoNone, + }, + SeqNum: 97, + TimeBounds: &xdr.TimeBounds{ + MinTime: 2, + MaxTime: 4, + }, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeBumpSequence, + BumpSequenceOp: &xdr.BumpSequenceOp{ + BumpTo: 98, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{2, 2, 2, 2}, + Signature: xdr.Signature{20, 20, 20}, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{3, 3, 3, 3}, + Signature: xdr.Signature{30, 30, 30}, + }, + }, + }, + } + envelopeXDR, err := xdr.MarshalBase64(fixture.Envelope) + tt.Assert.NoError(err) + + innerHash, err := network.HashTransaction( + fixture.Envelope.FeeBump.Tx.InnerTx.V1.Tx, + "Test SDF Network ; September 2015", + ) + tt.Assert.NoError(err) + fixture.InnerHash = hex.EncodeToString(innerHash[:]) + + outerHash, err := network.HashFeeBumpTransaction( + fixture.Envelope.FeeBump.Tx, + "Test SDF Network ; September 2015", + ) + tt.Assert.NoError(err) + fixture.OuterHash = hex.EncodeToString(outerHash[:]) + + tt.Assert.NotEqual(fixture.InnerHash, fixture.OuterHash) + + resultPair := xdr.TransactionResultPair{ + TransactionHash: xdr.Hash(outerHash), + Result: xdr.TransactionResult{ + FeeCharged: 123, + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFeeBumpInnerSuccess, + InnerResultPair: &xdr.InnerTransactionResultPair{ + TransactionHash: xdr.Hash(innerHash), + Result: xdr.InnerTransactionResult{ + Result: xdr.InnerTransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &[]xdr.OperationResult{ + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeBumpSequence, + BumpSeqResult: &xdr.BumpSequenceResult{ + Code: xdr.BumpSequenceResultCodeBumpSequenceSuccess, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + if !successful { + resultPair.Result.Result.Code = xdr.TransactionResultCodeTxFeeBumpInnerFailed + resultPair.Result.Result.InnerResultPair.Result.Result.Code = xdr.TransactionResultCodeTxBadAuth + } + + resultXDR, err := xdr.MarshalBase64(resultPair.Result) + tt.Assert.NoError(err) + + feeBumpTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: envelopeXDR, + resultXDR: resultXDR, + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: fixture.OuterHash, + }) + normalTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: 2, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + }) + ctx := context.Background() + insertBuilder := q.NewTransactionBatchInsertBuilder(2) + // include both fee bump and normal transaction in the same batch + // to make sure both kinds of transactions can be inserted using a single exec statement + tt.Assert.NoError(insertBuilder.Add(ctx, feeBumpTransaction, sequence)) + tt.Assert.NoError(insertBuilder.Add(ctx, normalTransaction, sequence)) + tt.Assert.NoError(insertBuilder.Exec(ctx)) + + account := fixture.Envelope.SourceAccount().ToAccountId() + feeBumpAccount := fixture.Envelope.FeeBumpAccount().ToAccountId() + + opBuilder := q.NewOperationBatchInsertBuilder(1) + details, err := json.Marshal(map[string]string{ + "bump_to": "98", + }) + tt.Assert.NoError(err) + + tt.Assert.NoError(opBuilder.Add( + ctx, + toid.New(fixture.Ledger.Sequence, 1, 1).ToInt64(), + toid.New(fixture.Ledger.Sequence, 1, 0).ToInt64(), + 1, + xdr.OperationTypeBumpSequence, + details, + account.Address(), + null.String{}, + )) + tt.Assert.NoError(opBuilder.Exec(ctx)) + + effectBuilder := q.NewEffectBatchInsertBuilder(2) + details, err = json.Marshal(map[string]interface{}{"new_seq": 98}) + tt.Assert.NoError(err) + + accounIDs, err := q.CreateAccounts(ctx, []string{account.Address()}, 1) + tt.Assert.NoError(err) + + err = effectBuilder.Add( + ctx, + accounIDs[account.Address()], + null.String{}, + toid.New(fixture.Ledger.Sequence, 1, 1).ToInt64(), + 1, + EffectSequenceBumped, + details, + ) + tt.Assert.NoError(err) + tt.Assert.NoError(effectBuilder.Exec(ctx)) + + fixture.Transaction = Transaction{ + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: fixture.OuterHash, + LedgerSequence: fixture.Ledger.Sequence, + ApplicationOrder: 1, + Account: account.Address(), + AccountSequence: "97", + MaxFee: int64(fixture.Envelope.Fee()), + FeeCharged: int64(resultPair.Result.FeeCharged), + OperationCount: 1, + TxEnvelope: envelopeXDR, + TxResult: resultXDR, + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: TimeBounds{Lower: null.IntFrom(2), Upper: null.IntFrom(4)}, + Signatures: signatures(fixture.Envelope.FeeBumpSignatures()), + InnerSignatures: signatures(fixture.Envelope.Signatures()), + Successful: successful, + NewMaxFee: null.IntFrom(int64(fixture.Envelope.FeeBumpFee())), + InnerTransactionHash: null.StringFrom(fixture.InnerHash), + FeeAccount: null.StringFrom(feeBumpAccount.Address()), + }, + } + + fixture.NormalTransaction = Transaction{ + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + LedgerSequence: fixture.Ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + MemoType: "text", + Memo: null.NewString("test memo", true), + Successful: successful, + }, + } + + results, err := q.TransactionsByIDs(ctx, fixture.Transaction.ID, fixture.NormalTransaction.ID) + tt.Assert.NoError(err) + + fixture.Transaction.CreatedAt = results[fixture.Transaction.ID].CreatedAt + fixture.Transaction.UpdatedAt = results[fixture.Transaction.ID].UpdatedAt + fixture.Transaction.LedgerCloseTime = results[fixture.Transaction.ID].LedgerCloseTime + + fixture.NormalTransaction.CreatedAt = results[fixture.NormalTransaction.ID].CreatedAt + fixture.NormalTransaction.UpdatedAt = results[fixture.NormalTransaction.ID].UpdatedAt + fixture.NormalTransaction.LedgerCloseTime = results[fixture.NormalTransaction.ID].LedgerCloseTime + + return fixture +} diff --git a/services/horizon/internal/db2/history/history_claimable_balances.go b/services/horizon/internal/db2/history/history_claimable_balances.go new file mode 100644 index 0000000000..b67294f4a6 --- /dev/null +++ b/services/horizon/internal/db2/history/history_claimable_balances.go @@ -0,0 +1,154 @@ +package history + +import ( + "context" + "sort" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" +) + +// QHistoryClaimableBalances defines account related queries. +type QHistoryClaimableBalances interface { + CreateHistoryClaimableBalances(ctx context.Context, ids []string, batchSize int) (map[string]int64, error) + NewOperationClaimableBalanceBatchInsertBuilder(maxBatchSize int) OperationClaimableBalanceBatchInsertBuilder + NewTransactionClaimableBalanceBatchInsertBuilder(maxBatchSize int) TransactionClaimableBalanceBatchInsertBuilder +} + +// CreateHistoryClaimableBalances creates rows in the history_claimable_balances table for a given list of ids. +// CreateHistoryClaimableBalances returns a mapping of id to its corresponding internal id in the history_claimable_balances table +func (q *Q) CreateHistoryClaimableBalances(ctx context.Context, ids []string, batchSize int) (map[string]int64, error) { + builder := &db.BatchInsertBuilder{ + Table: q.GetTable("history_claimable_balances"), + MaxBatchSize: batchSize, + Suffix: "ON CONFLICT (claimable_balance_id) DO NOTHING", + } + + // sort before inserting to prevent deadlocks on acquiring a ShareLock + // https://github.com/stellar/go/issues/2370 + sort.Strings(ids) + for _, id := range ids { + err := builder.Row(ctx, map[string]interface{}{ + "claimable_balance_id": id, + }) + if err != nil { + return nil, errors.Wrap(err, "could not insert history_claimable_balances row") + } + } + + err := builder.Exec(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not exec claimable balance insert builder") + } + + var cbs []HistoryClaimableBalance + toInternalID := map[string]int64{} + const selectBatchSize = 10000 + + for i := 0; i < len(ids); i += selectBatchSize { + end := i + selectBatchSize + if end > len(ids) { + end = len(ids) + } + subset := ids[i:end] + + cbs, err = q.ClaimableBalancesByIDs(ctx, subset) + if err != nil { + return nil, errors.Wrap(err, "could not select claimable balances") + } + + for _, cb := range cbs { + toInternalID[cb.BalanceID] = cb.InternalID + } + } + + return toInternalID, nil +} + +// HistoryClaimableBalance is a row of data from the `history_claimable_balances` table +type HistoryClaimableBalance struct { + BalanceID string `db:"claimable_balance_id"` + InternalID int64 `db:"id"` +} + +var selectHistoryClaimableBalance = sq.Select("hcb.*").From("history_claimable_balances hcb") + +// ClaimableBalancesByIDs loads rows from `history_claimable_balances`, by claimable_balance_id +func (q *Q) ClaimableBalancesByIDs(ctx context.Context, ids []string) (dest []HistoryClaimableBalance, err error) { + sql := selectHistoryClaimableBalance.Where(map[string]interface{}{ + "hcb.claimable_balance_id": ids, // hcb.claimable_balance_id IN (...) + }) + err = q.Select(ctx, &dest, sql) + return dest, err +} + +// ClaimableBalanceByID loads a row from `history_claimable_balances`, by claimable_balance_id +func (q *Q) ClaimableBalanceByID(ctx context.Context, id string) (dest HistoryClaimableBalance, err error) { + sql := selectHistoryClaimableBalance.Limit(1).Where("hcb.claimable_balance_id = ?", id) + err = q.Get(ctx, &dest, sql) + return dest, err +} + +type OperationClaimableBalanceBatchInsertBuilder interface { + Add(ctx context.Context, operationID, internalID int64) error + Exec(ctx context.Context) error +} + +type operationClaimableBalanceBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +func (q *Q) NewOperationClaimableBalanceBatchInsertBuilder(maxBatchSize int) OperationClaimableBalanceBatchInsertBuilder { + return &operationClaimableBalanceBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_operation_claimable_balances"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new operation claimable balance to the batch +func (i *operationClaimableBalanceBatchInsertBuilder) Add(ctx context.Context, operationID, internalID int64) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_operation_id": operationID, + "history_claimable_balance_id": internalID, + }) +} + +// Exec flushes all pending operation claimable balances to the db +func (i *operationClaimableBalanceBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} + +type TransactionClaimableBalanceBatchInsertBuilder interface { + Add(ctx context.Context, transactionID, internalID int64) error + Exec(ctx context.Context) error +} + +type transactionClaimableBalanceBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +func (q *Q) NewTransactionClaimableBalanceBatchInsertBuilder(maxBatchSize int) TransactionClaimableBalanceBatchInsertBuilder { + return &transactionClaimableBalanceBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_transaction_claimable_balances"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new transaction claimable balance to the batch +func (i *transactionClaimableBalanceBatchInsertBuilder) Add(ctx context.Context, transactionID, internalID int64) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_transaction_id": transactionID, + "history_claimable_balance_id": internalID, + }) +} + +// Exec flushes all pending transaction claimable balances to the db +func (i *transactionClaimableBalanceBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/history_liquidity_pools.go b/services/horizon/internal/db2/history/history_liquidity_pools.go new file mode 100644 index 0000000000..0601d91be7 --- /dev/null +++ b/services/horizon/internal/db2/history/history_liquidity_pools.go @@ -0,0 +1,163 @@ +package history + +import ( + "context" + "sort" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" +) + +// QHistoryLiquidityPools defines account related queries. +type QHistoryLiquidityPools interface { + CreateHistoryLiquidityPools(ctx context.Context, poolIDs []string, batchSize int) (map[string]int64, error) + NewOperationLiquidityPoolBatchInsertBuilder(maxBatchSize int) OperationLiquidityPoolBatchInsertBuilder + NewTransactionLiquidityPoolBatchInsertBuilder(maxBatchSize int) TransactionLiquidityPoolBatchInsertBuilder +} + +// CreateHistoryLiquidityPools creates rows in the history_liquidity_pools table for a given list of ids. +// CreateHistoryLiquidityPools returns a mapping of id to its corresponding internal id in the history_liquidity_pools table +func (q *Q) CreateHistoryLiquidityPools(ctx context.Context, poolIDs []string, batchSize int) (map[string]int64, error) { + if len(poolIDs) == 0 { + return nil, nil + } + + builder := &db.BatchInsertBuilder{ + Table: q.GetTable("history_liquidity_pools"), + MaxBatchSize: batchSize, + Suffix: "ON CONFLICT (liquidity_pool_id) DO NOTHING", + } + + // sort before inserting to prevent deadlocks on acquiring a ShareLock + // https://github.com/stellar/go/issues/2370 + sort.Strings(poolIDs) + var deduped []string + for i, id := range poolIDs { + if i > 0 && id == poolIDs[i-1] { + // skip duplicates + continue + } + deduped = append(deduped, id) + err := builder.Row(ctx, map[string]interface{}{ + "liquidity_pool_id": id, + }) + if err != nil { + return nil, errors.Wrap(err, "could not insert history_liquidity_pools row") + } + } + + err := builder.Exec(ctx) + if err != nil { + return nil, errors.Wrap(err, "could not exec claimable balance insert builder") + } + + var lps []HistoryLiquidityPool + toInternalID := map[string]int64{} + const selectBatchSize = 10000 + + for i := 0; i < len(deduped); i += selectBatchSize { + end := i + selectBatchSize + if end > len(deduped) { + end = len(deduped) + } + subset := deduped[i:end] + + lps, err = q.LiquidityPoolsByIDs(ctx, subset) + if err != nil { + return nil, errors.Wrap(err, "could not select claimable balances") + } + + for _, lp := range lps { + toInternalID[lp.PoolID] = lp.InternalID + } + } + + return toInternalID, nil +} + +// HistoryLiquidityPool is a row of data from the `history_liquidity_pools` table +type HistoryLiquidityPool struct { + PoolID string `db:"liquidity_pool_id"` + InternalID int64 `db:"id"` +} + +var selectHistoryLiquidityPool = sq.Select("hlp.*").From("history_liquidity_pools hlp") + +// LiquidityPoolsByIDs loads rows from `history_liquidity_pools`, by liquidity_pool_id +func (q *Q) LiquidityPoolsByIDs(ctx context.Context, poolIDs []string) (dest []HistoryLiquidityPool, err error) { + sql := selectHistoryLiquidityPool.Where(map[string]interface{}{ + "hlp.liquidity_pool_id": poolIDs, // hlp.liquidity_pool_id IN (...) + }) + err = q.Select(ctx, &dest, sql) + return dest, err +} + +// LiquidityPoolByID loads a row from `history_liquidity_pools`, by liquidity_pool_id +func (q *Q) LiquidityPoolByID(ctx context.Context, poolID string) (dest HistoryLiquidityPool, err error) { + sql := selectHistoryLiquidityPool.Limit(1).Where("hlp.liquidity_pool_id = ?", poolID) + err = q.Get(ctx, &dest, sql) + return dest, err +} + +type OperationLiquidityPoolBatchInsertBuilder interface { + Add(ctx context.Context, operationID, internalID int64) error + Exec(ctx context.Context) error +} + +type operationLiquidityPoolBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +func (q *Q) NewOperationLiquidityPoolBatchInsertBuilder(maxBatchSize int) OperationLiquidityPoolBatchInsertBuilder { + return &operationLiquidityPoolBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_operation_liquidity_pools"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new operation claimable balance to the batch +func (i *operationLiquidityPoolBatchInsertBuilder) Add(ctx context.Context, operationID, internalID int64) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_operation_id": operationID, + "history_liquidity_pool_id": internalID, + }) +} + +// Exec flushes all pending operation claimable balances to the db +func (i *operationLiquidityPoolBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} + +type TransactionLiquidityPoolBatchInsertBuilder interface { + Add(ctx context.Context, transactionID, internalID int64) error + Exec(ctx context.Context) error +} + +type transactionLiquidityPoolBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +func (q *Q) NewTransactionLiquidityPoolBatchInsertBuilder(maxBatchSize int) TransactionLiquidityPoolBatchInsertBuilder { + return &transactionLiquidityPoolBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_transaction_liquidity_pools"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new transaction claimable balance to the batch +func (i *transactionLiquidityPoolBatchInsertBuilder) Add(ctx context.Context, transactionID, internalID int64) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_transaction_id": transactionID, + "history_liquidity_pool_id": internalID, + }) +} + +// Exec flushes all pending transaction claimable balances to the db +func (i *transactionLiquidityPoolBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/ingestion.go b/services/horizon/internal/db2/history/ingestion.go new file mode 100644 index 0000000000..518dfd86c4 --- /dev/null +++ b/services/horizon/internal/db2/history/ingestion.go @@ -0,0 +1,23 @@ +package history + +import ( + "context" +) + +// TruncateIngestStateTables clears out ingestion state tables. +// Ingestion state tables are horizon database tables populated by +// the ingestion system using history archive snapshots. +// Any horizon database tables which cannot be populated using +// history archive snapshots will not be truncated. +func (q *Q) TruncateIngestStateTables(ctx context.Context) error { + return q.TruncateTables(ctx, []string{ + "accounts", + "accounts_data", + "accounts_signers", + "claimable_balances", + "exp_asset_stats", + "liquidity_pools", + "offers", + "trust_lines", + }) +} diff --git a/services/horizon/internal/db2/history/key_value.go b/services/horizon/internal/db2/history/key_value.go new file mode 100644 index 0000000000..c41fec77d9 --- /dev/null +++ b/services/horizon/internal/db2/history/key_value.go @@ -0,0 +1,215 @@ +package history + +import ( + "context" + "database/sql" + "strconv" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/errors" +) + +const ( + ingestVersion = "exp_ingest_version" + // Distributed ingestion in Horizon relies on this key and it is part + // of migration files. If you need to update the key name remember + // to upgrade it in migration files too! + lastLedgerKey = "exp_ingest_last_ledger" + stateInvalid = "exp_state_invalid" + offerCompactionSequence = "offer_compaction_sequence" + liquidityPoolCompactionSequence = "liquidity_pool_compaction_sequence" +) + +// GetLastLedgerIngestNonBlocking works like GetLastLedgerIngest but +// it does not block the value and does not return error if the value +// has not been previously set. +// This is used in status reporting (ex. in root resource of Horizon). +func (q *Q) GetLastLedgerIngestNonBlocking(ctx context.Context) (uint32, error) { + lastIngestedLedger, err := q.getValueFromStore(ctx, lastLedgerKey, false) + if err != nil { + return 0, err + } + + if lastIngestedLedger == "" { + return 0, nil + } else { + ledgerSequence, err := strconv.ParseUint(lastIngestedLedger, 10, 32) + if err != nil { + return 0, errors.Wrap(err, "Error converting lastIngestedLedger value") + } + + return uint32(ledgerSequence), nil + } +} + +// GetLastLedgerIngest returns the last ledger ingested by ingest system +// in Horizon. Returns ErrKeyNotFound error if no value has been previously set. +// This is using `SELECT ... FOR UPDATE` what means it's blocking the row for all other +// transactions.This behaviour is critical in distributed ingestion so do not change +// it unless you know what you are doing. +// The value can be set using UpdateLastLedgerIngest. +func (q *Q) GetLastLedgerIngest(ctx context.Context) (uint32, error) { + lastIngestedLedger, err := q.getValueFromStore(ctx, lastLedgerKey, true) + if err != nil { + return 0, err + } + + if lastIngestedLedger == "" { + // This key should always be in a DB (is added in migrations). Otherwise + // locking won't work. + return 0, errors.Errorf("`%s` key cannot be found in the key value store", ingestVersion) + } else { + ledgerSequence, err := strconv.ParseUint(lastIngestedLedger, 10, 32) + if err != nil { + return 0, errors.Wrap(err, "Error converting lastIngestedLedger value") + } + + return uint32(ledgerSequence), nil + } +} + +// UpdateLastLedgerIngest updates the last ledger ingested by ingest system. +// Can be read using GetLastLedgerExpIngest. +func (q *Q) UpdateLastLedgerIngest(ctx context.Context, ledgerSequence uint32) error { + return q.updateValueInStore( + ctx, + lastLedgerKey, + strconv.FormatUint(uint64(ledgerSequence), 10), + ) +} + +// GetIngestVersion returns the ingestion version. Returns zero +// if there is no value. +func (q *Q) GetIngestVersion(ctx context.Context) (int, error) { + parsed, err := q.getIntValueFromStore(ctx, ingestVersion, 32) + if err != nil { + return 0, errors.Wrap(err, "Error converting sequence value") + } + return int(parsed), nil +} + +// UpdateIngestVersion updates the ingestion version. +func (q *Q) UpdateIngestVersion(ctx context.Context, version int) error { + return q.updateValueInStore( + ctx, + ingestVersion, + strconv.FormatUint(uint64(version), 10), + ) +} + +// GetExpStateInvalid returns true if the state was found to be invalid. +// Returns false otherwise. +func (q *Q) GetExpStateInvalid(ctx context.Context) (bool, error) { + invalid, err := q.getValueFromStore(ctx, stateInvalid, false) + if err != nil { + return false, err + } + + if invalid == "" { + return false, nil + } else { + val, err := strconv.ParseBool(invalid) + if err != nil { + return false, errors.Wrap(err, "Error converting invalid value") + } + + return val, nil + } +} + +// UpdateExpStateInvalid updates the state invalid value. +func (q *Q) UpdateExpStateInvalid(ctx context.Context, val bool) error { + return q.updateValueInStore( + ctx, + stateInvalid, + strconv.FormatBool(val), + ) +} + +// GetOfferCompactionSequence returns the sequence number corresponding to the +// last time the offers table was compacted. +func (q *Q) GetOfferCompactionSequence(ctx context.Context) (uint32, error) { + parsed, err := q.getIntValueFromStore(ctx, offerCompactionSequence, 32) + if err != nil { + return 0, errors.Wrap(err, "Error converting sequence value") + } + return uint32(parsed), nil +} + +// GetLiquidityPoolCompactionSequence returns the sequence number corresponding to the +// last time the liquidity pools table was compacted. +func (q *Q) GetLiquidityPoolCompactionSequence(ctx context.Context) (uint32, error) { + parsed, err := q.getIntValueFromStore(ctx, liquidityPoolCompactionSequence, 32) + if err != nil { + return 0, errors.Wrap(err, "Error converting sequence value") + } + + return uint32(parsed), nil +} + +func (q *Q) getIntValueFromStore(ctx context.Context, key string, bitSize int) (int64, error) { + sequence, err := q.getValueFromStore(ctx, key, false) + if err != nil { + return 0, err + } + + if sequence == "" { + return 0, nil + } + parsed, err := strconv.ParseInt(sequence, 10, bitSize) + if err != nil { + return 0, errors.Wrap(err, "Error converting value") + } + return parsed, nil +} + +// UpdateOfferCompactionSequence sets the sequence number corresponding to the +// last time the offers table was compacted. +func (q *Q) UpdateOfferCompactionSequence(ctx context.Context, sequence uint32) error { + return q.updateValueInStore( + ctx, + offerCompactionSequence, + strconv.FormatUint(uint64(sequence), 10), + ) +} + +func (q *Q) UpdateLiquidityPoolCompactionSequence(ctx context.Context, sequence uint32) error { + return q.updateValueInStore( + ctx, + liquidityPoolCompactionSequence, + strconv.FormatUint(uint64(sequence), 10), + ) +} + +// getValueFromStore returns a value for a given key from KV store. If value +// is not present in the key value store "" will be returned. +func (q *Q) getValueFromStore(ctx context.Context, key string, forUpdate bool) (string, error) { + query := sq.Select("key_value_store.value"). + From("key_value_store"). + Where("key_value_store.key = ?", key) + + if forUpdate { + query = query.Suffix("FOR UPDATE") + } + + var value string + if err := q.Get(ctx, &value, query); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + return "", nil + } + return "", errors.Wrap(err, "could not get value") + } + + return value, nil +} + +// updateValueInStore updates a value for a given key in KV store +func (q *Q) updateValueInStore(ctx context.Context, key, value string) error { + query := sq.Insert("key_value_store"). + Columns("key", "value"). + Values(key, value). + Suffix("ON CONFLICT (key) DO UPDATE SET value=EXCLUDED.value") + + _, err := q.Exec(ctx, query) + return err +} diff --git a/services/horizon/internal/db2/history/ledger.go b/services/horizon/internal/db2/history/ledger.go new file mode 100644 index 0000000000..dea79aee65 --- /dev/null +++ b/services/horizon/internal/db2/history/ledger.go @@ -0,0 +1,285 @@ +package history + +import ( + "context" + "encoding/hex" + "fmt" + "sort" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LedgerBySequence loads the single ledger at `seq` into `dest` +func (q *Q) LedgerBySequence(ctx context.Context, dest interface{}, seq int32) error { + sql := selectLedger. + Limit(1). + Where("sequence = ?", seq) + + return q.Get(ctx, dest, sql) +} + +// Ledgers provides a helper to filter rows from the `history_ledgers` table +// with pre-defined filters. See `LedgersQ` methods for the available filters. +func (q *Q) Ledgers() *LedgersQ { + return &LedgersQ{ + parent: q, + sql: selectLedger, + } +} + +// LedgersBySequence loads the a set of ledgers identified by the sequences +// `seqs` into `dest`. +func (q *Q) LedgersBySequence(ctx context.Context, dest interface{}, seqs ...int32) error { + if len(seqs) == 0 { + return errors.New("no sequence arguments provided") + } + in := fmt.Sprintf("sequence IN (%s)", sq.Placeholders(len(seqs))) + + whereArgs := make([]interface{}, len(seqs)) + for i, s := range seqs { + whereArgs[i] = s + } + + sql := selectLedger.Where(in, whereArgs...) + + return q.Select(ctx, dest, sql) +} + +// LedgerCapacityUsageStats returns ledger capacity stats for the last 5 ledgers. +// Currently, we hard code the query to return the last 5 ledgers. +// TODO: make the number of ledgers configurable. +func (q *Q) LedgerCapacityUsageStats(ctx context.Context, currentSeq int32, dest *LedgerCapacityUsageStats) error { + const ledgers int32 = 5 + return q.GetRaw(ctx, dest, ` + SELECT ROUND(SUM(CAST(operation_count as decimal))/SUM(max_tx_set_size), 2) as ledger_capacity_usage FROM + (SELECT + hl.sequence, COALESCE(SUM(ht.operation_count), 0) as operation_count, hl.max_tx_set_size + FROM history_ledgers hl + LEFT JOIN history_transactions ht ON ht.ledger_sequence = hl.sequence + WHERE hl.sequence > $1 AND hl.sequence <= $2 + GROUP BY hl.sequence, hl.max_tx_set_size) as a + `, currentSeq-ledgers, currentSeq) +} + +// Page specifies the paging constraints for the query being built by `q`. +func (q *LedgersQ) Page(page db2.PageQuery) *LedgersQ { + if q.Err != nil { + return q + } + + q.sql, q.Err = page.ApplyTo(q.sql, "hl.id") + return q +} + +// Select loads the results of the query specified by `q` into `dest`. +func (q *LedgersQ) Select(ctx context.Context, dest interface{}) error { + if q.Err != nil { + return q.Err + } + + q.Err = q.parent.Select(ctx, dest, q.sql) + return q.Err +} + +// QLedgers defines ingestion ledger related queries. +type QLedgers interface { + InsertLedger( + ctx context.Context, + ledger xdr.LedgerHeaderHistoryEntry, + successTxsCount int, + failedTxsCount int, + opCount int, + txSetOpCount int, + ingestVersion int, + ) (int64, error) +} + +// InsertLedger creates a row in the history_ledgers table. +// Returns number of rows affected and error. +func (q *Q) InsertLedger(ctx context.Context, + ledger xdr.LedgerHeaderHistoryEntry, + successTxsCount int, + failedTxsCount int, + opCount int, + txSetOpCount int, + ingestVersion int, +) (int64, error) { + m, err := ledgerHeaderToMap( + ledger, + successTxsCount, + failedTxsCount, + opCount, + txSetOpCount, + ingestVersion, + ) + if err != nil { + return 0, err + } + + sql := sq.Insert("history_ledgers").SetMap(m) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// GetLedgerGaps obtains ingestion gaps in the history_ledgers table. +// Returns the gaps and error. +func (q *Q) GetLedgerGaps(ctx context.Context) ([]LedgerRange, error) { + var gaps []LedgerRange + query := ` + SELECT sequence + 1 AS start, + next_number - 1 AS end + FROM ( + SELECT sequence, + LEAD(sequence) OVER (ORDER BY sequence) AS next_number + FROM history_ledgers + ) number + WHERE sequence + 1 <> next_number;` + if err := q.SelectRaw(ctx, &gaps, query); err != nil { + return nil, err + } + sort.Slice(gaps, func(i, j int) bool { + return gaps[i].StartSequence < gaps[j].StartSequence + }) + return gaps, nil +} + +func max(a, b uint32) uint32 { + if a > b { + return a + } + return b +} + +func min(a, b uint32) uint32 { + if a > b { + return b + } + return a +} + +// GetLedgerGapsInRange obtains ingestion gaps in the history_ledgers table within the given range. +// Returns the gaps and error. +func (q *Q) GetLedgerGapsInRange(ctx context.Context, start, end uint32) ([]LedgerRange, error) { + var result []LedgerRange + var oldestLedger, latestLedger uint32 + + if err := q.ElderLedger(ctx, &oldestLedger); err != nil { + return nil, errors.Wrap(err, "Could not query elder ledger") + } else if oldestLedger == 0 { + return []LedgerRange{{ + StartSequence: start, + EndSequence: end, + }}, nil + } + + if err := q.LatestLedger(ctx, &latestLedger); err != nil { + return nil, errors.Wrap(err, "Could not query latest ledger") + } + + if start < oldestLedger { + result = append(result, LedgerRange{ + StartSequence: start, + EndSequence: min(end, oldestLedger-1), + }) + } + if end <= oldestLedger { + return result, nil + } + + gaps, err := q.GetLedgerGaps(ctx) + if err != nil { + return nil, err + } + + for _, gap := range gaps { + if gap.EndSequence < start { + continue + } + if gap.StartSequence > end { + break + } + result = append(result, LedgerRange{ + StartSequence: max(gap.StartSequence, start), + EndSequence: min(gap.EndSequence, end), + }) + } + + if latestLedger < end { + result = append(result, LedgerRange{ + StartSequence: max(latestLedger+1, start), + EndSequence: end, + }) + } + + return result, nil +} + +func ledgerHeaderToMap( + ledger xdr.LedgerHeaderHistoryEntry, + successTxsCount int, + failedTxsCount int, + opCount int, + txSetOpCount int, + importerVersion int, +) (map[string]interface{}, error) { + ledgerHeaderBase64, err := xdr.MarshalBase64(ledger.Header) + if err != nil { + return nil, err + } + closeTime := time.Unix(int64(ledger.Header.ScpValue.CloseTime), 0).UTC() + return map[string]interface{}{ + "importer_version": importerVersion, + "id": toid.New(int32(ledger.Header.LedgerSeq), 0, 0).ToInt64(), + "sequence": ledger.Header.LedgerSeq, + "ledger_hash": hex.EncodeToString(ledger.Hash[:]), + "previous_ledger_hash": null.NewString(hex.EncodeToString(ledger.Header.PreviousLedgerHash[:]), ledger.Header.LedgerSeq > 1), + "total_coins": ledger.Header.TotalCoins, + "fee_pool": ledger.Header.FeePool, + "base_fee": ledger.Header.BaseFee, + "base_reserve": ledger.Header.BaseReserve, + "max_tx_set_size": ledger.Header.MaxTxSetSize, + "closed_at": closeTime, + "created_at": time.Now().UTC(), + "updated_at": time.Now().UTC(), + "transaction_count": successTxsCount, + "successful_transaction_count": successTxsCount, + "failed_transaction_count": failedTxsCount, + "operation_count": opCount, + "tx_set_operation_count": txSetOpCount, + "protocol_version": ledger.Header.LedgerVersion, + "ledger_header": ledgerHeaderBase64, + }, nil +} + +var selectLedger = sq.Select( + "hl.id", + "hl.sequence", + "hl.importer_version", + "hl.ledger_hash", + "hl.previous_ledger_hash", + "hl.transaction_count", + "hl.successful_transaction_count", + "hl.failed_transaction_count", + "hl.operation_count", + "hl.tx_set_operation_count", + "hl.closed_at", + "hl.created_at", + "hl.updated_at", + "hl.total_coins", + "hl.fee_pool", + "hl.base_fee", + "hl.base_reserve", + "hl.max_tx_set_size", + "hl.protocol_version", + "hl.ledger_header", +).From("history_ledgers hl") diff --git a/services/horizon/internal/db2/history/ledger_cache.go b/services/horizon/internal/db2/history/ledger_cache.go new file mode 100644 index 0000000000..3318048ef7 --- /dev/null +++ b/services/horizon/internal/db2/history/ledger_cache.go @@ -0,0 +1,49 @@ +package history + +import ( + "context" + + "github.com/stellar/go/support/errors" +) + +// Queue adds `seq` to the load queue for the cache. +func (lc *LedgerCache) Queue(seq int32) { + lc.lock.Lock() + + if lc.queued == nil { + lc.queued = map[int32]struct{}{} + } + + lc.queued[seq] = struct{}{} + lc.lock.Unlock() +} + +// Load loads a batch of ledgers identified by `sequences`, using `q`, +// and populates the cache with the results +func (lc *LedgerCache) Load(ctx context.Context, q *Q) error { + lc.lock.Lock() + defer lc.lock.Unlock() + + if len(lc.queued) == 0 { + return nil + } + + sequences := make([]int32, 0, len(lc.queued)) + for seq := range lc.queued { + sequences = append(sequences, seq) + } + + var ledgers []Ledger + err := q.LedgersBySequence(ctx, &ledgers, sequences...) + if err != nil { + return errors.Wrap(err, "failed to load ledger batch") + } + + lc.Records = map[int32]Ledger{} + for _, l := range ledgers { + lc.Records[l.Sequence] = l + } + + lc.queued = nil + return nil +} diff --git a/services/horizon/internal/db2/history/ledger_cache_test.go b/services/horizon/internal/db2/history/ledger_cache_test.go new file mode 100644 index 0000000000..17720a72fe --- /dev/null +++ b/services/horizon/internal/db2/history/ledger_cache_test.go @@ -0,0 +1,26 @@ +package history + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestLedgerCache(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + var lc LedgerCache + lc.Queue(2) + lc.Queue(3) + + err := lc.Load(tt.Ctx, q) + + if tt.Assert.NoError(err) { + tt.Assert.Contains(lc.Records, int32(2)) + tt.Assert.Contains(lc.Records, int32(3)) + tt.Assert.NotContains(lc.Records, int32(1)) + } +} diff --git a/services/horizon/internal/db2/history/ledger_test.go b/services/horizon/internal/db2/history/ledger_test.go new file mode 100644 index 0000000000..0fc5280ed4 --- /dev/null +++ b/services/horizon/internal/db2/history/ledger_test.go @@ -0,0 +1,339 @@ +package history + +import ( + "context" + "database/sql" + "encoding/hex" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/guregu/null" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" +) + +func TestLedgerQueries(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + // Test LedgerBySequence + var l Ledger + err := q.LedgerBySequence(tt.Ctx, &l, 3) + tt.Assert.NoError(err) + + err = q.LedgerBySequence(tt.Ctx, &l, 100000) + tt.Assert.Equal(err, sql.ErrNoRows) + + // Test Ledgers() + ls := []Ledger{} + err = q.Ledgers().Select(tt.Ctx, &ls) + + if tt.Assert.NoError(err) { + tt.Assert.Len(ls, 3) + } + + // LedgersBySequence + err = q.LedgersBySequence(tt.Ctx, &ls, 1, 2, 3) + + if tt.Assert.NoError(err) { + tt.Assert.Len(ls, 3) + + foundSeqs := make([]int32, len(ls)) + for i := range ls { + foundSeqs[i] = ls[i].Sequence + } + + tt.Assert.Contains(foundSeqs, int32(1)) + tt.Assert.Contains(foundSeqs, int32(2)) + tt.Assert.Contains(foundSeqs, int32(3)) + } +} + +func TestInsertLedger(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + ledgerHashStore := ledgerbackend.NewHorizonDBLedgerHashStore(tt.HorizonSession()) + _, exists, err := ledgerHashStore.GetLedgerHash(tt.Ctx, 100) + tt.Assert.NoError(err) + tt.Assert.False(exists) + + expectedLedger := Ledger{ + Sequence: 69859, + LedgerHash: "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + PreviousLedgerHash: null.NewString("4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", true), + TotalOrderID: TotalOrderID{toid.New(int32(69859), 0, 0).ToInt64()}, + ImporterVersion: 123, + TransactionCount: 12, + SuccessfulTransactionCount: new(int32), + FailedTransactionCount: new(int32), + TxSetOperationCount: new(int32), + OperationCount: 23, + TotalCoins: 23451, + FeePool: 213, + BaseReserve: 687, + MaxTxSetSize: 345, + ProtocolVersion: 12, + BaseFee: 100, + ClosedAt: time.Now().UTC().Truncate(time.Second), + } + *expectedLedger.SuccessfulTransactionCount = 12 + *expectedLedger.FailedTransactionCount = 3 + *expectedLedger.TxSetOperationCount = 26 + + var ledgerHash, previousLedgerHash xdr.Hash + + written, err := hex.Decode(ledgerHash[:], []byte(expectedLedger.LedgerHash)) + tt.Assert.NoError(err) + tt.Assert.Equal(len(ledgerHash), written) + + written, err = hex.Decode(previousLedgerHash[:], []byte(expectedLedger.PreviousLedgerHash.String)) + tt.Assert.NoError(err) + tt.Assert.Equal(len(previousLedgerHash), written) + + ledgerEntry := xdr.LedgerHeaderHistoryEntry{ + Hash: ledgerHash, + Header: xdr.LedgerHeader{ + LedgerVersion: 12, + PreviousLedgerHash: previousLedgerHash, + LedgerSeq: xdr.Uint32(expectedLedger.Sequence), + TotalCoins: xdr.Int64(expectedLedger.TotalCoins), + FeePool: xdr.Int64(expectedLedger.FeePool), + BaseFee: xdr.Uint32(expectedLedger.BaseFee), + BaseReserve: xdr.Uint32(expectedLedger.BaseReserve), + MaxTxSetSize: xdr.Uint32(expectedLedger.MaxTxSetSize), + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(expectedLedger.ClosedAt.Unix()), + }, + }, + } + ledgerHeaderBase64, err := xdr.MarshalBase64(ledgerEntry.Header) + tt.Assert.NoError(err) + expectedLedger.LedgerHeaderXDR = null.NewString(ledgerHeaderBase64, true) + + rowsAffected, err := q.InsertLedger(tt.Ctx, + ledgerEntry, + 12, + 3, + 23, + 26, + int(expectedLedger.ImporterVersion), + ) + tt.Assert.NoError(err) + tt.Assert.Equal(rowsAffected, int64(1)) + + var ledgerFromDB Ledger + err = q.LedgerBySequence(tt.Ctx, &ledgerFromDB, 69859) + tt.Assert.NoError(err) + + expectedLedger.CreatedAt = ledgerFromDB.CreatedAt + expectedLedger.UpdatedAt = ledgerFromDB.UpdatedAt + tt.Assert.True(ledgerFromDB.CreatedAt.After(expectedLedger.ClosedAt)) + tt.Assert.True(ledgerFromDB.UpdatedAt.After(expectedLedger.ClosedAt)) + tt.Assert.True(ledgerFromDB.CreatedAt.Before(expectedLedger.ClosedAt.Add(time.Hour))) + tt.Assert.True(ledgerFromDB.UpdatedAt.Before(expectedLedger.ClosedAt.Add(time.Hour))) + + tt.Assert.True(expectedLedger.ClosedAt.Equal(ledgerFromDB.ClosedAt)) + expectedLedger.ClosedAt = ledgerFromDB.ClosedAt + + tt.Assert.Equal(expectedLedger, ledgerFromDB) + + hash, exists, err := ledgerHashStore.GetLedgerHash(tt.Ctx, uint32(expectedLedger.Sequence)) + tt.Assert.NoError(err) + tt.Assert.True(exists) + tt.Assert.Equal(expectedLedger.LedgerHash, hash) +} + +func insertLedgerWithSequence(tt *test.T, q *Q, seq uint32) { + // generate random hashes to avoid insert clashes due to UNIQUE constraints + var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + ledgerHashHex := fmt.Sprintf("%064x", rnd.Uint32()) + previousLedgerHashHex := fmt.Sprintf("%064x", rnd.Uint32()) + + expectedLedger := Ledger{ + Sequence: int32(seq), + LedgerHash: ledgerHashHex, + PreviousLedgerHash: null.NewString(previousLedgerHashHex, true), + TotalOrderID: TotalOrderID{toid.New(int32(69859), 0, 0).ToInt64()}, + ImporterVersion: 123, + TransactionCount: 12, + SuccessfulTransactionCount: new(int32), + FailedTransactionCount: new(int32), + TxSetOperationCount: new(int32), + OperationCount: 23, + TotalCoins: 23451, + FeePool: 213, + BaseReserve: 687, + MaxTxSetSize: 345, + ProtocolVersion: 12, + BaseFee: 100, + ClosedAt: time.Now().UTC().Truncate(time.Second), + } + *expectedLedger.SuccessfulTransactionCount = 12 + *expectedLedger.FailedTransactionCount = 3 + *expectedLedger.TxSetOperationCount = 26 + + var ledgerHash, previousLedgerHash xdr.Hash + + written, err := hex.Decode(ledgerHash[:], []byte(expectedLedger.LedgerHash)) + tt.Assert.NoError(err) + tt.Assert.Equal(len(ledgerHash), written) + + written, err = hex.Decode(previousLedgerHash[:], []byte(expectedLedger.PreviousLedgerHash.String)) + tt.Assert.NoError(err) + tt.Assert.Equal(len(previousLedgerHash), written) + + ledgerEntry := xdr.LedgerHeaderHistoryEntry{ + Hash: ledgerHash, + Header: xdr.LedgerHeader{ + LedgerVersion: 12, + PreviousLedgerHash: previousLedgerHash, + LedgerSeq: xdr.Uint32(expectedLedger.Sequence), + TotalCoins: xdr.Int64(expectedLedger.TotalCoins), + FeePool: xdr.Int64(expectedLedger.FeePool), + BaseFee: xdr.Uint32(expectedLedger.BaseFee), + BaseReserve: xdr.Uint32(expectedLedger.BaseReserve), + MaxTxSetSize: xdr.Uint32(expectedLedger.MaxTxSetSize), + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(expectedLedger.ClosedAt.Unix()), + }, + }, + } + ledgerHeaderBase64, err := xdr.MarshalBase64(ledgerEntry.Header) + tt.Assert.NoError(err) + expectedLedger.LedgerHeaderXDR = null.NewString(ledgerHeaderBase64, true) + rowsAffected, err := q.InsertLedger(tt.Ctx, + ledgerEntry, + 12, + 3, + 23, + 26, + int(expectedLedger.ImporterVersion), + ) + tt.Assert.NoError(err) + tt.Assert.Equal(rowsAffected, int64(1)) +} + +func TestGetLedgerGaps(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &Q{tt.HorizonSession()} + + // The DB is empty, so there shouldn't be any gaps + gaps, err := q.GetLedgerGaps(context.Background()) + tt.Assert.NoError(err) + tt.Assert.Len(gaps, 0) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 1, 100) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 1, EndSequence: 100}}, gaps) + + // Lets insert a few gaps and make sure they are detected incrementally + insertLedgerWithSequence(tt, q, 4) + insertLedgerWithSequence(tt, q, 5) + insertLedgerWithSequence(tt, q, 6) + insertLedgerWithSequence(tt, q, 7) + + // since there is a single ledger cluster, there should still be no gaps + // (we don't start from ledger 0) + gaps, err = q.GetLedgerGaps(context.Background()) + tt.Assert.NoError(err) + tt.Assert.Len(gaps, 0) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 1, 2) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 1, EndSequence: 2}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 1, 3) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 1, EndSequence: 3}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 1, 6) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 1, EndSequence: 3}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 3, 5) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 3, EndSequence: 3}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 4, 6) + tt.Assert.NoError(err) + tt.Assert.Len(gaps, 0) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 4, 8) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 8, EndSequence: 8}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 4, 10) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 8, EndSequence: 10}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 8, 10) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 8, EndSequence: 10}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 9, 11) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 9, EndSequence: 11}}, gaps) + + var expectedGaps []LedgerRange + + insertLedgerWithSequence(tt, q, 99) + insertLedgerWithSequence(tt, q, 100) + insertLedgerWithSequence(tt, q, 101) + insertLedgerWithSequence(tt, q, 102) + + gaps, err = q.GetLedgerGaps(context.Background()) + tt.Assert.NoError(err) + expectedGaps = append(expectedGaps, LedgerRange{8, 98}) + tt.Assert.Equal(expectedGaps, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 10, 11) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 10, EndSequence: 11}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 4, 11) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 8, EndSequence: 11}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 1, 11) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 1, EndSequence: 3}, {StartSequence: 8, EndSequence: 11}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 10, 105) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 10, EndSequence: 98}, {StartSequence: 103, EndSequence: 105}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 100, 105) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 103, EndSequence: 105}}, gaps) + + gaps, err = q.GetLedgerGapsInRange(context.Background(), 105, 110) + tt.Assert.NoError(err) + tt.Assert.Equal([]LedgerRange{{StartSequence: 105, EndSequence: 110}}, gaps) + + // Yet another gap, this time to a single-ledger cluster + insertLedgerWithSequence(tt, q, 1000) + + gaps, err = q.GetLedgerGaps(context.Background()) + tt.Assert.NoError(err) + expectedGaps = append(expectedGaps, LedgerRange{103, 999}) + tt.Assert.Equal(expectedGaps, gaps) + + // Yet another gap, this time the gap only contains a ledger + insertLedgerWithSequence(tt, q, 1002) + gaps, err = q.GetLedgerGaps(context.Background()) + tt.Assert.NoError(err) + expectedGaps = append(expectedGaps, LedgerRange{1001, 1001}) + tt.Assert.Equal(expectedGaps, gaps) +} diff --git a/services/horizon/internal/db2/history/liquidity_pools.go b/services/horizon/internal/db2/history/liquidity_pools.go new file mode 100644 index 0000000000..0c79dc890c --- /dev/null +++ b/services/horizon/internal/db2/history/liquidity_pools.go @@ -0,0 +1,312 @@ +package history + +import ( + "context" + "database/sql/driver" + "encoding/json" + "fmt" + "strings" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/jmoiron/sqlx" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LiquidityPoolsQuery is a helper struct to configure queries to liquidity pools +type LiquidityPoolsQuery struct { + PageQuery db2.PageQuery + Assets []xdr.Asset + Account string +} + +// LiquidityPool is a row of data from the `liquidity_pools`. +type LiquidityPool struct { + PoolID string `db:"id"` + Type xdr.LiquidityPoolType `db:"type"` + Fee uint32 `db:"fee"` + TrustlineCount uint64 `db:"trustline_count"` + ShareCount uint64 `db:"share_count"` + AssetReserves LiquidityPoolAssetReserves `db:"asset_reserves"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Deleted bool `db:"deleted"` +} + +type LiquidityPoolAssetReserves []LiquidityPoolAssetReserve + +func (c LiquidityPoolAssetReserves) Value() (driver.Value, error) { + return json.Marshal(c) +} + +func (c *LiquidityPoolAssetReserves) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return errors.New("type assertion to []byte failed") + } + + return json.Unmarshal(b, &c) +} + +type LiquidityPoolAssetReserve struct { + Asset xdr.Asset + Reserve uint64 +} + +// liquidityPoolAssetReserveJSON is an intermediate representation to allow encoding assets as base64 when stored in the DB +type liquidityPoolAssetReserveJSON struct { + Asset string `json:"asset"` + Reserve uint64 `json:"reserve,string"` // use string-encoding to avoid problems with pgx https://github.com/jackc/pgx/issues/289 +} + +func (lpar LiquidityPoolAssetReserve) MarshalJSON() ([]byte, error) { + asset, err := xdr.MarshalBase64(lpar.Asset) + if err != nil { + return nil, err + } + return json.Marshal(liquidityPoolAssetReserveJSON{asset, lpar.Reserve}) +} + +func (lpar *LiquidityPoolAssetReserve) UnmarshalJSON(data []byte) error { + var lparJSON liquidityPoolAssetReserveJSON + if err := json.Unmarshal(data, &lparJSON); err != nil { + return err + } + var asset xdr.Asset + if err := xdr.SafeUnmarshalBase64(lparJSON.Asset, &asset); err != nil { + return err + } + lpar.Reserve = lparJSON.Reserve + lpar.Asset = asset + return nil +} + +// QLiquidityPools defines liquidity-pool-related queries. +type QLiquidityPools interface { + UpsertLiquidityPools(ctx context.Context, lps []LiquidityPool) error + GetLiquidityPoolsByID(ctx context.Context, poolIDs []string) ([]LiquidityPool, error) + StreamAllLiquidityPools(ctx context.Context, callback func(LiquidityPool) error) error + CountLiquidityPools(ctx context.Context) (int, error) + FindLiquidityPoolByID(ctx context.Context, liquidityPoolID string) (LiquidityPool, error) + GetUpdatedLiquidityPools(ctx context.Context, newerThanSequence uint32) ([]LiquidityPool, error) + CompactLiquidityPools(ctx context.Context, cutOffSequence uint32) (int64, error) +} + +// UpsertLiquidityPools upserts a batch of liquidity pools in the liquidity_pools table. +// There's currently no limit of the number of liquidity pools this method can +// accept other than 2GB limit of the query string length what should be enough +// for each ledger with the current limits. +func (q *Q) UpsertLiquidityPools(ctx context.Context, lps []LiquidityPool) error { + var poolID, typ, fee, shareCount, trustlineCount, + assetReserves, lastModifiedLedger, deleted []interface{} + + for _, lp := range lps { + poolID = append(poolID, lp.PoolID) + typ = append(typ, lp.Type) + fee = append(fee, lp.Fee) + trustlineCount = append(trustlineCount, lp.TrustlineCount) + shareCount = append(shareCount, lp.ShareCount) + assetReserves = append(assetReserves, lp.AssetReserves) + lastModifiedLedger = append(lastModifiedLedger, lp.LastModifiedLedger) + deleted = append(deleted, lp.Deleted) + } + + upsertFields := []upsertField{ + {"id", "text", poolID}, + {"type", "smallint", typ}, + {"fee", "integer", fee}, + {"trustline_count", "bigint", trustlineCount}, + {"share_count", "bigint", shareCount}, + {"asset_reserves", "jsonb", assetReserves}, + {"last_modified_ledger", "integer", lastModifiedLedger}, + {"deleted", "boolean", deleted}, + } + + return q.upsertRows(ctx, "liquidity_pools", "id", upsertFields) +} + +// CountLiquidityPools returns the total number of liquidity pools in the DB +func (q *Q) CountLiquidityPools(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").Where("deleted = ?", false).From("liquidity_pools") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +// GetLiquidityPoolsByID finds all liquidity pools by PoolId +func (q *Q) GetLiquidityPoolsByID(ctx context.Context, poolIDs []string) ([]LiquidityPool, error) { + var liquidityPools []LiquidityPool + sql := selectLiquidityPools.Where("deleted = ?", false). + Where(map[string]interface{}{"lp.id": poolIDs}) + err := q.Select(ctx, &liquidityPools, sql) + return liquidityPools, err +} + +// FindLiquidityPoolByID returns a liquidity pool. +func (q *Q) FindLiquidityPoolByID(ctx context.Context, liquidityPoolID string) (LiquidityPool, error) { + var lp LiquidityPool + sql := selectLiquidityPools.Limit(1).Where("deleted = ?", false).Where("lp.id = ?", liquidityPoolID) + err := q.Get(ctx, &lp, sql) + return lp, err +} + +// GetLiquidityPools finds all liquidity pools where accountID owns assets +func (q *Q) GetLiquidityPools(ctx context.Context, query LiquidityPoolsQuery) ([]LiquidityPool, error) { + if len(query.Account) > 0 && len(query.Assets) > 0 { + return nil, fmt.Errorf("this endpoint does not support filtering by both accountID and reserve assets.") + } + + sql, err := query.PageQuery.ApplyRawTo(selectLiquidityPools, "lp.id") + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + if len(query.Account) > 0 { + sql = sql.LeftJoin("trust_lines ON id = liquidity_pool_id").Where("trust_lines.account_id = ?", query.Account) + } else if len(query.Assets) > 0 { + for _, asset := range query.Assets { + assetB64, err := xdr.MarshalBase64(asset) + if err != nil { + return nil, err + } + sql = sql. + Where(`lp.asset_reserves @> '[{"asset": "` + assetB64 + `"}]'`) + } + } + sql = sql.Where("lp.deleted = ?", false) + + var results []LiquidityPool + if err := q.Select(ctx, &results, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return results, nil +} + +func (q *Q) StreamAllLiquidityPools(ctx context.Context, callback func(LiquidityPool) error) error { + var rows *sqlx.Rows + var err error + + if rows, err = q.Query(ctx, selectLiquidityPools.Where("deleted = ?", false)); err != nil { + return errors.Wrap(err, "could not run all liquidity pools select query") + } + + defer rows.Close() + liquidityPool := LiquidityPool{} + + for rows.Next() { + if err = rows.StructScan(&liquidityPool); err != nil { + return errors.Wrap(err, "could not scan row into liquidity pool struct") + } + if err = callback(liquidityPool); err != nil { + return err + } + } + + return rows.Err() +} + +// GetUpdatedLiquidityPools returns all liquidity pools created, updated, or deleted after the given ledger sequence. +func (q *Q) GetUpdatedLiquidityPools(ctx context.Context, newerThanSequence uint32) ([]LiquidityPool, error) { + var pools []LiquidityPool + err := q.Select(ctx, &pools, selectLiquidityPools.Where("lp.last_modified_ledger > ?", newerThanSequence)) + return pools, err +} + +// CompactLiquidityPools removes rows from the liquidity pools table which are marked for deletion. +func (q *Q) CompactLiquidityPools(ctx context.Context, cutOffSequence uint32) (int64, error) { + sql := sq.Delete("liquidity_pools"). + Where("deleted = ?", true). + Where("last_modified_ledger <= ?", cutOffSequence) + + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, errors.Wrap(err, "cannot delete offer rows") + } + + if err = q.UpdateLiquidityPoolCompactionSequence(ctx, cutOffSequence); err != nil { + return 0, errors.Wrap(err, "cannot update liquidity pool compaction sequence") + } + + return result.RowsAffected() +} + +var liquidityPoolsSelectStatement = "lp.id, " + + "lp.type, " + + "lp.fee, " + + "lp.trustline_count, " + + "lp.share_count, " + + "lp.asset_reserves, " + + "lp.deleted, " + + "lp.last_modified_ledger" + +var selectLiquidityPools = sq.Select(liquidityPoolsSelectStatement).From("liquidity_pools lp") + +// MakeTestPool is a helper to make liquidity pools for testing purposes. It's +// public because it's used in other test suites. +func MakeTestPool(A xdr.Asset, a uint64, B xdr.Asset, b uint64) LiquidityPool { + if !A.LessThan(B) { + B, A = A, B + b, a = a, b + } + + poolId, _ := xdr.NewPoolId(A, B, xdr.LiquidityPoolFeeV18) + hexPoolId, _ := xdr.MarshalHex(poolId) + return LiquidityPool{ + PoolID: hexPoolId, + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 12345, + ShareCount: 67890, + AssetReserves: []LiquidityPoolAssetReserve{ + {Asset: A, Reserve: a}, + {Asset: B, Reserve: b}, + }, + LastModifiedLedger: 123, + } +} + +func MakeTestTrustline(account string, asset xdr.Asset, poolId string) TrustLine { + trustline := TrustLine{ + AccountID: account, + Balance: 1000, + AssetCode: "", + AssetIssuer: "", + LedgerKey: account + asset.StringCanonical() + poolId, // irrelevant, just needs to be unique + LiquidityPoolID: poolId, + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + } + + if poolId == "" { + trustline.AssetType = asset.Type + switch asset.Type { + case xdr.AssetTypeAssetTypeNative: + trustline.AssetCode = "native" + + case xdr.AssetTypeAssetTypeCreditAlphanum4: + fallthrough + case xdr.AssetTypeAssetTypeCreditAlphanum12: + trustline.AssetCode = strings.TrimRight(asset.GetCode(), "\x00") // no nulls in db string + trustline.AssetIssuer = asset.GetIssuer() + trustline.BuyingLiabilities = 1 + trustline.SellingLiabilities = 1 + + default: + panic("invalid asset type") + } + + trustline.Limit = trustline.Balance * 10 + trustline.BuyingLiabilities = 1 + trustline.SellingLiabilities = 2 + } else { + trustline.AssetType = xdr.AssetTypeAssetTypePoolShare + } + + return trustline +} diff --git a/services/horizon/internal/db2/history/liquidity_pools_test.go b/services/horizon/internal/db2/history/liquidity_pools_test.go new file mode 100644 index 0000000000..eb95e35a3e --- /dev/null +++ b/services/horizon/internal/db2/history/liquidity_pools_test.go @@ -0,0 +1,340 @@ +package history + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +var ( + xlmAsset = xdr.MustNewNativeAsset() +) + +func TestFindLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + lp := MakeTestPool(usdAsset, 450, xlmAsset, 450) + + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + + lpObtained, err := q.FindLiquidityPoolByID(tt.Ctx, lp.PoolID) + tt.Assert.NoError(err) + + tt.Assert.Equal(lp, lpObtained) +} + +func removeLiquidityPool(t *test.T, q *Q, lp LiquidityPool, sequence uint32) { + removed := lp + removed.Deleted = true + removed.LastModifiedLedger = sequence + err := q.UpsertLiquidityPools(t.Ctx, []LiquidityPool{removed}) + t.Assert.NoError(err) +} + +func TestRemoveLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + lp := MakeTestPool(usdAsset, 450, xlmAsset, 450) + + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + + count, err := q.CountLiquidityPools(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(1, count) + + lpObtained, err := q.FindLiquidityPoolByID(tt.Ctx, lp.PoolID) + tt.Assert.NoError(err) + tt.Assert.NotNil(lpObtained) + + removeLiquidityPool(tt, q, lp, 200) + + _, err = q.FindLiquidityPoolByID(tt.Ctx, lp.PoolID) + tt.Assert.EqualError(err, "sql: no rows in result set") + + count, err = q.CountLiquidityPools(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(0, count) + + lps := []LiquidityPool{} + err = q.Select(tt.Ctx, &lps, selectLiquidityPools) + tt.Assert.NoError(err) + + tt.Assert.Len(lps, 1) + expected := lp + expected.Deleted = true + expected.LastModifiedLedger = 200 + tt.Assert.Equal(expected, lps[0]) + + lp.LastModifiedLedger = 250 + lp.Deleted = false + lp.ShareCount = 1 + lp.TrustlineCount = 2 + err = q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + tt.Assert.NoError(err) + + lpObtained, err = q.FindLiquidityPoolByID(tt.Ctx, lp.PoolID) + tt.Assert.NoError(err) + tt.Assert.NotNil(lpObtained) + + tt.Assert.Equal(lp, lpObtained) +} + +func TestFindLiquidityPoolsByAssets(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + lp := MakeTestPool(usdAsset, 450, xlmAsset, 450) + + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + + // query by no asset + query := LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + } + + lps, err := q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + pool := lps[0] + lps = nil + err = q.StreamAllLiquidityPools(tt.Ctx, func(liqudityPool LiquidityPool) error { + lps = append(lps, liqudityPool) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + tt.Assert.Equal(pool, lps[0]) + + // query by one asset + query = LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + Assets: []xdr.Asset{usdAsset}, + } + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + // query by two assets + query = LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + Assets: []xdr.Asset{usdAsset, xlmAsset}, + } + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + // query by an asset not present + query = LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + Assets: []xdr.Asset{eurAsset}, + } + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) + + removeLiquidityPool(tt, q, lp, 200) + + query = LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + } + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) + + // query by one asset + query = LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + Assets: []xdr.Asset{usdAsset}, + } + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) +} + +func TestLiquidityPoolCompaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + lp := MakeTestPool(usdAsset, 450, xlmAsset, 450) + + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + + compationSequence, err := q.GetLiquidityPoolCompactionSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(0), compationSequence) + + rowsRemoved, err := q.CompactLiquidityPools(tt.Ctx, lp.LastModifiedLedger) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), rowsRemoved) + + compationSequence, err = q.GetLiquidityPoolCompactionSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(lp.LastModifiedLedger, compationSequence) + + // query by no asset + query := LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "", 10), + } + + lps, err := q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + removeLiquidityPool(tt, q, lp, 200) + + lps, err = q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) + + lps = nil + err = q.StreamAllLiquidityPools(tt.Ctx, func(liqudityPool LiquidityPool) error { + lps = append(lps, liqudityPool) + return nil + }) + + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) + + err = q.Select(tt.Ctx, &lps, selectLiquidityPools) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + rowsRemoved, err = q.CompactLiquidityPools(tt.Ctx, 199) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), rowsRemoved) + err = q.Select(tt.Ctx, &lps, selectLiquidityPools) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + + rowsRemoved, err = q.CompactLiquidityPools(tt.Ctx, 200) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), rowsRemoved) + err = q.Select(tt.Ctx, &lps, selectLiquidityPools) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 0) +} + +func TestUpdateLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + initialLP := MakeTestPool(usdAsset, 450, xlmAsset, 450) + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{initialLP}) + tt.Assert.NoError(err) + + updatedLP := clonePool(initialLP) + updatedLP.TrustlineCount += 1 + updatedLP.ShareCount = 100000 + updatedLP.AssetReserves[0].Reserve = 500 + updatedLP.AssetReserves[1].Reserve = 600 + updatedLP.LastModifiedLedger += 1 + + err = q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{updatedLP}) + tt.Assert.NoError(err) + + lps := []LiquidityPool{} + err = q.Select(tt.Ctx, &lps, selectLiquidityPools) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 1) + lp := lps[0] + tt.Assert.Equal(updatedLP, lp) +} + +func TestGetLiquidityPoolsByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + lp := MakeTestPool(usdAsset, 450, xlmAsset, 450) + + err := q.UpsertLiquidityPools(tt.Ctx, []LiquidityPool{lp}) + tt.Assert.NoError(err) + + r, err := q.GetLiquidityPoolsByID(tt.Ctx, []string{lp.PoolID}) + tt.Assert.NoError(err) + tt.Assert.Len(r, 1) + + removeLiquidityPool(tt, q, lp, 200) + + r, err = q.GetLiquidityPoolsByID(tt.Ctx, []string{lp.PoolID}) + tt.Assert.NoError(err) + tt.Assert.Len(r, 0) +} + +func clonePool(lp LiquidityPool) LiquidityPool { + assetReserveCopy := make([]LiquidityPoolAssetReserve, len(lp.AssetReserves)) + for i, reserve := range lp.AssetReserves { + assetReserveCopy[i] = LiquidityPoolAssetReserve{ + Asset: reserve.Asset, + Reserve: reserve.Reserve, + } + } + + return LiquidityPool{ + PoolID: lp.PoolID, + Type: lp.Type, + Fee: lp.Fee, + TrustlineCount: lp.TrustlineCount, + ShareCount: lp.ShareCount, + AssetReserves: assetReserveCopy, + LastModifiedLedger: lp.LastModifiedLedger, + } +} + +func TestLiquidityPoolByAccountId(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + pools := []LiquidityPool{ + MakeTestPool(usdAsset, 450, xlmAsset, 450), + MakeTestPool(eurAsset, 450, xlmAsset, 450), + } + err := q.UpsertLiquidityPools(tt.Ctx, pools) + tt.Assert.NoError(err) + + lines := []TrustLine{ + MakeTestTrustline(account1.AccountID, xlmAsset, ""), + MakeTestTrustline(account1.AccountID, eurAsset, ""), + MakeTestTrustline(account1.AccountID, usdAsset, ""), + MakeTestTrustline(account1.AccountID, xdr.Asset{}, pools[0].PoolID), + MakeTestTrustline(account1.AccountID, xdr.Asset{}, pools[1].PoolID), + } + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, lines)) + + query := LiquidityPoolsQuery{ + PageQuery: db2.MustPageQuery("", false, "asc", 10), + Account: account1.AccountID, + } + + lps, err := q.GetLiquidityPools(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(lps, 2) +} diff --git a/services/horizon/internal/db2/history/main.go b/services/horizon/internal/db2/history/main.go new file mode 100644 index 0000000000..2ebf2d812d --- /dev/null +++ b/services/horizon/internal/db2/history/main.go @@ -0,0 +1,893 @@ +// Package history contains database record definitions useable for +// reading rows from a the history portion of horizon's database +package history + +import ( + "context" + "database/sql" + "database/sql/driver" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/jmoiron/sqlx" + "github.com/lib/pq" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + strtime "github.com/stellar/go/support/time" + "github.com/stellar/go/xdr" +) + +const ( + // account effects + + // EffectAccountCreated effects occur when a new account is created + EffectAccountCreated EffectType = 0 // from create_account + + // EffectAccountRemoved effects occur when one account is merged into another + EffectAccountRemoved EffectType = 1 // from merge_account + + // EffectAccountCredited effects occur when an account receives some currency + EffectAccountCredited EffectType = 2 // from create_account, payment, path_payment, merge_account + + // EffectAccountDebited effects occur when an account sends some currency + EffectAccountDebited EffectType = 3 // from create_account, payment, path_payment, create_account + + // EffectAccountThresholdsUpdated effects occur when an account changes its + // multisig thresholds. + EffectAccountThresholdsUpdated EffectType = 4 // from set_options + + // EffectAccountHomeDomainUpdated effects occur when an account changes its + // home domain. + EffectAccountHomeDomainUpdated EffectType = 5 // from set_options + + // EffectAccountFlagsUpdated effects occur when an account changes its + // account flags, either clearing or setting. + EffectAccountFlagsUpdated EffectType = 6 // from set_options + + // EffectAccountInflationDestinationUpdated effects occur when an account changes its + // inflation destination. + EffectAccountInflationDestinationUpdated EffectType = 7 // from set_options + + // signer effects + + // EffectSignerCreated occurs when an account gains a signer + EffectSignerCreated EffectType = 10 // from set_options + + // EffectSignerRemoved occurs when an account loses a signer + EffectSignerRemoved EffectType = 11 // from set_options + + // EffectSignerUpdated occurs when an account changes the weight of one of its + // signers. + EffectSignerUpdated EffectType = 12 // from set_options + + // trustline effects + + // EffectTrustlineCreated occurs when an account trusts an anchor + EffectTrustlineCreated EffectType = 20 // from change_trust + + // EffectTrustlineRemoved occurs when an account removes struct by setting the + // limit of a trustline to 0 + EffectTrustlineRemoved EffectType = 21 // from change_trust + + // EffectTrustlineUpdated occurs when an account changes a trustline's limit + EffectTrustlineUpdated EffectType = 22 // from change_trust, allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead. + // EffectTrustlineAuthorized occurs when an anchor has AUTH_REQUIRED flag set + // to true and it authorizes another account's trustline + EffectTrustlineAuthorized EffectType = 23 // from allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead. + // EffectTrustlineDeauthorized occurs when an anchor revokes access to a asset + // it issues. + EffectTrustlineDeauthorized EffectType = 24 // from allow_trust + + // Deprecated: use EffectTrustlineFlagsUpdated instead. + // EffectTrustlineAuthorizedToMaintainLiabilities occurs when an anchor has AUTH_REQUIRED flag set + // to true and it authorizes another account's trustline to maintain liabilities + EffectTrustlineAuthorizedToMaintainLiabilities EffectType = 25 // from allow_trust + + // EffectTrustlineFlagsUpdated effects occur when a TrustLine changes its + // flags, either clearing or setting. + EffectTrustlineFlagsUpdated EffectType = 26 // from set_trust_line flags + + // trading effects + + // unused + // EffectOfferCreated occurs when an account offers to trade an asset + // EffectOfferCreated EffectType = 30 // from manage_offer, creat_passive_offer + // EffectOfferRemoved occurs when an account removes an offer + // EffectOfferRemoved EffectType = 31 // from manage_offer, create_passive_offer, path_payment + // EffectOfferUpdated occurs when an offer is updated by the offering account. + // EffectOfferUpdated EffectType = 32 // from manage_offer, creat_passive_offer, path_payment + + // EffectTrade occurs when a trade is initiated because of a path payment or + // offer operation. + EffectTrade EffectType = 33 // from manage_offer, creat_passive_offer, path_payment + + // data effects + + // EffectDataCreated occurs when an account gets a new data field + EffectDataCreated EffectType = 40 // from manage_data + + // EffectDataRemoved occurs when an account removes a data field + EffectDataRemoved EffectType = 41 // from manage_data + + // EffectDataUpdated occurs when an account changes a data field's value + EffectDataUpdated EffectType = 42 // from manage_data + + // EffectSequenceBumped occurs when an account bumps their sequence number + EffectSequenceBumped EffectType = 43 // from bump_sequence + + // claimable balance effects + + // EffectClaimableBalanceCreated occurs when a claimable balance is created + EffectClaimableBalanceCreated EffectType = 50 // from create_claimable_balance + + // EffectClaimableBalanceClaimantCreated occurs when a claimable balance claimant is created + EffectClaimableBalanceClaimantCreated EffectType = 51 // from create_claimable_balance + + // EffectClaimableBalanceClaimed occurs when a claimable balance is claimed + EffectClaimableBalanceClaimed EffectType = 52 // from claim_claimable_balance + + // sponsorship effects + + // EffectAccountSponsorshipCreated occurs when an account ledger entry is sponsored + EffectAccountSponsorshipCreated EffectType = 60 // from create_account + + // EffectAccountSponsorshipUpdated occurs when the sponsoring of an account ledger entry is updated + EffectAccountSponsorshipUpdated EffectType = 61 // from revoke_sponsorship + + // EffectAccountSponsorshipRemoved occurs when the sponsorship of an account ledger entry is removed + EffectAccountSponsorshipRemoved EffectType = 62 // from revoke_sponsorship + + // EffectTrustlineSponsorshipCreated occurs when a trustline ledger entry is sponsored + EffectTrustlineSponsorshipCreated EffectType = 63 // from change_trust + + // EffectTrustlineSponsorshipUpdated occurs when the sponsoring of a trustline ledger entry is updated + EffectTrustlineSponsorshipUpdated EffectType = 64 // from revoke_sponsorship + + // EffectTrustlineSponsorshipRemoved occurs when the sponsorship of a trustline ledger entry is removed + EffectTrustlineSponsorshipRemoved EffectType = 65 // from revoke_sponsorship + + // EffectDataSponsorshipCreated occurs when a trustline ledger entry is sponsored + EffectDataSponsorshipCreated EffectType = 66 // from manage_data + + // EffectDataSponsorshipUpdated occurs when the sponsoring of a trustline ledger entry is updated + EffectDataSponsorshipUpdated EffectType = 67 // from revoke_sponsorship + + // EffectDataSponsorshipRemoved occurs when the sponsorship of a trustline ledger entry is removed + EffectDataSponsorshipRemoved EffectType = 68 // from revoke_sponsorship + + // EffectClaimableBalanceSponsorshipCreated occurs when a claimable balance ledger entry is sponsored + EffectClaimableBalanceSponsorshipCreated EffectType = 69 // from create_claimable_balance + + // EffectClaimableBalanceSponsorshipUpdated occurs when the sponsoring of a claimable balance ledger entry + // is updated + EffectClaimableBalanceSponsorshipUpdated EffectType = 70 // from revoke_sponsorship + + // EffectClaimableBalanceSponsorshipRemoved occurs when the sponsorship of a claimable balance ledger entry + // is removed + EffectClaimableBalanceSponsorshipRemoved EffectType = 71 // from claim_claimable_balance + + // EffectSignerSponsorshipCreated occurs when the sponsorship of a signer is created + EffectSignerSponsorshipCreated EffectType = 72 // from set_options + + // EffectSignerSponsorshipUpdated occurs when the sponsorship of a signer is updated + EffectSignerSponsorshipUpdated EffectType = 73 // from revoke_sponsorship + + // EffectSignerSponsorshipRemoved occurs when the sponsorship of a signer is removed + EffectSignerSponsorshipRemoved EffectType = 74 // from revoke_sponsorship + + // EffectClaimableBalanceClawedBack occurs when a claimable balance is clawed back + EffectClaimableBalanceClawedBack EffectType = 80 // from clawback_claimable_balance + + // EffectLiquidityPoolDeposited occurs when a liquidity pool incurs a deposit + EffectLiquidityPoolDeposited EffectType = 90 // from liquidity_pool_deposit + + // EffectLiquidityPoolWithdrew occurs when a liquidity pool incurs a withdrawal + EffectLiquidityPoolWithdrew EffectType = 91 // from liquidity_pool_withdraw + + // EffectLiquidityPoolTrade occurs when a trade happens in a liquidity pool + EffectLiquidityPoolTrade EffectType = 92 + + // EffectLiquidityPoolCreated occurs when a liquidity pool is created + EffectLiquidityPoolCreated EffectType = 93 // from change_trust + + // EffectLiquidityPoolRemoved occurs when a liquidity pool is removed + EffectLiquidityPoolRemoved EffectType = 94 // from change_trust + + // EffectLiquidityPoolRevoked occurs when a liquidity pool is revoked + EffectLiquidityPoolRevoked EffectType = 95 // from change_trust_line_flags and allow_trust +) + +// Account is a row of data from the `history_accounts` table +type Account struct { + ID int64 `db:"id"` + Address string `db:"address"` +} + +// AccountEntry is a row of data from the `account` table +type AccountEntry struct { + AccountID string `db:"account_id"` + Balance int64 `db:"balance"` + BuyingLiabilities int64 `db:"buying_liabilities"` + SellingLiabilities int64 `db:"selling_liabilities"` + SequenceNumber int64 `db:"sequence_number"` + NumSubEntries uint32 `db:"num_subentries"` + InflationDestination string `db:"inflation_destination"` + HomeDomain string `db:"home_domain"` + Flags uint32 `db:"flags"` + MasterWeight byte `db:"master_weight"` + ThresholdLow byte `db:"threshold_low"` + ThresholdMedium byte `db:"threshold_medium"` + ThresholdHigh byte `db:"threshold_high"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Sponsor null.String `db:"sponsor"` + NumSponsored uint32 `db:"num_sponsored"` + NumSponsoring uint32 `db:"num_sponsoring"` +} + +type IngestionQ interface { + QAccounts + QAssetStats + QClaimableBalances + QHistoryClaimableBalances + QData + QEffects + QLedgers + QLiquidityPools + QHistoryLiquidityPools + QOffers + QOperations + // QParticipants + // Copy the small interfaces with shared methods directly, otherwise error: + // duplicate method CreateAccounts + NewTransactionParticipantsBatchInsertBuilder(maxBatchSize int) TransactionParticipantsBatchInsertBuilder + NewOperationParticipantBatchInsertBuilder(maxBatchSize int) OperationParticipantBatchInsertBuilder + QSigners + //QTrades + NewTradeBatchInsertBuilder(maxBatchSize int) TradeBatchInsertBuilder + RebuildTradeAggregationTimes(ctx context.Context, from, to strtime.Millis) error + RebuildTradeAggregationBuckets(ctx context.Context, fromLedger, toLedger uint32) error + CreateAssets(ctx context.Context, assets []xdr.Asset, batchSize int) (map[string]Asset, error) + QTransactions + QTrustLines + + Begin() error + BeginTx(*sql.TxOptions) error + Commit() error + CloneIngestionQ() IngestionQ + Rollback() error + GetTx() *sqlx.Tx + GetIngestVersion(context.Context) (int, error) + UpdateExpStateInvalid(context.Context, bool) error + UpdateIngestVersion(context.Context, int) error + GetExpStateInvalid(context.Context) (bool, error) + GetLatestHistoryLedger(context.Context) (uint32, error) + GetOfferCompactionSequence(context.Context) (uint32, error) + GetLiquidityPoolCompactionSequence(context.Context) (uint32, error) + TruncateIngestStateTables(context.Context) error + DeleteRangeAll(ctx context.Context, start, end int64) error +} + +// QAccounts defines account related queries. +type QAccounts interface { + GetAccountsByIDs(ctx context.Context, ids []string) ([]AccountEntry, error) + UpsertAccounts(ctx context.Context, accounts []AccountEntry) error + RemoveAccounts(ctx context.Context, accountIDs []string) (int64, error) +} + +// AccountSigner is a row of data from the `accounts_signers` table +type AccountSigner struct { + Account string `db:"account_id"` + Signer string `db:"signer"` + Weight int32 `db:"weight"` + Sponsor null.String `db:"sponsor"` +} + +type AccountSignersBatchInsertBuilder interface { + Add(ctx context.Context, signer AccountSigner) error + Exec(ctx context.Context) error +} + +// accountSignersBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type accountSignersBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +// Data is a row of data from the `account_data` table +type Data struct { + AccountID string `db:"account_id"` + Name string `db:"name"` + Value AccountDataValue `db:"value"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Sponsor null.String `db:"sponsor"` +} + +type AccountDataValue []byte + +type AccountDataKey struct { + AccountID string + DataName string +} + +// QData defines account data related queries. +type QData interface { + CountAccountsData(ctx context.Context) (int, error) + GetAccountDataByKeys(ctx context.Context, keys []AccountDataKey) ([]Data, error) + UpsertAccountData(ctx context.Context, data []Data) error + RemoveAccountData(ctx context.Context, keys []AccountDataKey) (int64, error) +} + +// Asset is a row of data from the `history_assets` table +type Asset struct { + ID int64 `db:"id"` + Type string `db:"asset_type"` + Code string `db:"asset_code"` + Issuer string `db:"asset_issuer"` +} + +// ExpAssetStat is a row in the exp_asset_stats table representing the stats per Asset +type ExpAssetStat struct { + AssetType xdr.AssetType `db:"asset_type"` + AssetCode string `db:"asset_code"` + AssetIssuer string `db:"asset_issuer"` + Accounts ExpAssetStatAccounts `db:"accounts"` + Balances ExpAssetStatBalances `db:"balances"` + Amount string `db:"amount"` + NumAccounts int32 `db:"num_accounts"` +} + +// PagingToken returns a cursor for this asset stat +func (e ExpAssetStat) PagingToken() string { + return fmt.Sprintf( + "%s_%s_%s", + e.AssetCode, + e.AssetIssuer, + xdr.AssetTypeToString[e.AssetType], + ) +} + +// ExpAssetStatAccounts represents the summarized acount numbers for a single Asset +type ExpAssetStatAccounts struct { + Authorized int32 `json:"authorized"` + AuthorizedToMaintainLiabilities int32 `json:"authorized_to_maintain_liabilities"` + ClaimableBalances int32 `json:"claimable_balances"` + LiquidityPools int32 `json:"liquidity_pools"` + Unauthorized int32 `json:"unauthorized"` +} + +func (e ExpAssetStatAccounts) Value() (driver.Value, error) { + return json.Marshal(e) +} + +func (e *ExpAssetStatAccounts) Scan(src interface{}) error { + source, ok := src.([]byte) + if !ok { + return errors.New("Type assertion .([]byte) failed.") + } + + return json.Unmarshal(source, &e) +} + +func (a ExpAssetStatAccounts) Add(b ExpAssetStatAccounts) ExpAssetStatAccounts { + return ExpAssetStatAccounts{ + Authorized: a.Authorized + b.Authorized, + AuthorizedToMaintainLiabilities: a.AuthorizedToMaintainLiabilities + b.AuthorizedToMaintainLiabilities, + ClaimableBalances: a.ClaimableBalances + b.ClaimableBalances, + LiquidityPools: a.LiquidityPools + b.LiquidityPools, + Unauthorized: a.Unauthorized + b.Unauthorized, + } +} + +func (a ExpAssetStatAccounts) IsZero() bool { + return a == ExpAssetStatAccounts{} +} + +// ExpAssetStatBalances represents the summarized balances for a single Asset +// Note: the string representation is in stroops! +type ExpAssetStatBalances struct { + Authorized string `json:"authorized"` + AuthorizedToMaintainLiabilities string `json:"authorized_to_maintain_liabilities"` + ClaimableBalances string `json:"claimable_balances"` + LiquidityPools string `json:"liquidity_pools"` + Unauthorized string `json:"unauthorized"` +} + +func (e ExpAssetStatBalances) Value() (driver.Value, error) { + return json.Marshal(e) +} + +func (e *ExpAssetStatBalances) Scan(src interface{}) error { + source, ok := src.([]byte) + if !ok { + return errors.New("Type assertion .([]byte) failed.") + } + + err := json.Unmarshal(source, &e) + if err != nil { + return err + } + + // Sets zero values for empty balances + if e.Authorized == "" { + e.Authorized = "0" + } + if e.AuthorizedToMaintainLiabilities == "" { + e.AuthorizedToMaintainLiabilities = "0" + } + if e.ClaimableBalances == "" { + e.ClaimableBalances = "0" + } + if e.LiquidityPools == "" { + e.LiquidityPools = "0" + } + if e.Unauthorized == "" { + e.Unauthorized = "0" + } + + return nil +} + +// QAssetStats defines exp_asset_stats related queries. +type QAssetStats interface { + InsertAssetStats(ctx context.Context, stats []ExpAssetStat, batchSize int) error + InsertAssetStat(ctx context.Context, stat ExpAssetStat) (int64, error) + UpdateAssetStat(ctx context.Context, stat ExpAssetStat) (int64, error) + GetAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (ExpAssetStat, error) + RemoveAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (int64, error) + GetAssetStats(ctx context.Context, assetCode, assetIssuer string, page db2.PageQuery) ([]ExpAssetStat, error) + CountTrustLines(ctx context.Context) (int, error) +} + +type QCreateAccountsHistory interface { + CreateAccounts(ctx context.Context, addresses []string, maxBatchSize int) (map[string]int64, error) +} + +// Effect is a row of data from the `history_effects` table +type Effect struct { + HistoryAccountID int64 `db:"history_account_id"` + Account string `db:"address"` + AccountMuxed null.String `db:"address_muxed"` + HistoryOperationID int64 `db:"history_operation_id"` + Order int32 `db:"order"` + Type EffectType `db:"type"` + DetailsString null.String `db:"details"` +} + +// TradeEffectDetails is a struct of data from `effects.DetailsString` +// when the effect type is trade +type TradeEffectDetails struct { + Seller string `json:"seller"` + SellerMuxed string `json:"seller_muxed,omitempty"` + SellerMuxedID uint64 `json:"seller_muxed_id,omitempty"` + OfferID int64 `json:"offer_id"` + SoldAmount string `json:"sold_amount"` + SoldAssetType string `json:"sold_asset_type"` + SoldAssetCode string `json:"sold_asset_code,omitempty"` + SoldAssetIssuer string `json:"sold_asset_issuer,omitempty"` + BoughtAmount string `json:"bought_amount"` + BoughtAssetType string `json:"bought_asset_type"` + BoughtAssetCode string `json:"bought_asset_code,omitempty"` + BoughtAssetIssuer string `json:"bought_asset_issuer,omitempty"` +} + +// SequenceBumped is a struct of data from `effects.DetailsString` +// when the effect type is sequence bumped. +type SequenceBumped struct { + NewSeq int64 `json:"new_seq"` +} + +// EffectsQ is a helper struct to aid in configuring queries that loads +// slices of Ledger structs. +type EffectsQ struct { + Err error + parent *Q + sql sq.SelectBuilder +} + +// EffectType is the numeric type for an effect, used as the `type` field in the +// `history_effects` table. +type EffectType int + +// FeeStats is a row of data from the min, mode, percentile aggregate functions over the +// `history_transactions` table. +type FeeStats struct { + FeeChargedMax null.Int `db:"fee_charged_max"` + FeeChargedMin null.Int `db:"fee_charged_min"` + FeeChargedMode null.Int `db:"fee_charged_mode"` + FeeChargedP10 null.Int `db:"fee_charged_p10"` + FeeChargedP20 null.Int `db:"fee_charged_p20"` + FeeChargedP30 null.Int `db:"fee_charged_p30"` + FeeChargedP40 null.Int `db:"fee_charged_p40"` + FeeChargedP50 null.Int `db:"fee_charged_p50"` + FeeChargedP60 null.Int `db:"fee_charged_p60"` + FeeChargedP70 null.Int `db:"fee_charged_p70"` + FeeChargedP80 null.Int `db:"fee_charged_p80"` + FeeChargedP90 null.Int `db:"fee_charged_p90"` + FeeChargedP95 null.Int `db:"fee_charged_p95"` + FeeChargedP99 null.Int `db:"fee_charged_p99"` + MaxFeeMax null.Int `db:"max_fee_max"` + MaxFeeMin null.Int `db:"max_fee_min"` + MaxFeeMode null.Int `db:"max_fee_mode"` + MaxFeeP10 null.Int `db:"max_fee_p10"` + MaxFeeP20 null.Int `db:"max_fee_p20"` + MaxFeeP30 null.Int `db:"max_fee_p30"` + MaxFeeP40 null.Int `db:"max_fee_p40"` + MaxFeeP50 null.Int `db:"max_fee_p50"` + MaxFeeP60 null.Int `db:"max_fee_p60"` + MaxFeeP70 null.Int `db:"max_fee_p70"` + MaxFeeP80 null.Int `db:"max_fee_p80"` + MaxFeeP90 null.Int `db:"max_fee_p90"` + MaxFeeP95 null.Int `db:"max_fee_p95"` + MaxFeeP99 null.Int `db:"max_fee_p99"` +} + +// LatestLedger represents a response from the raw LatestLedgerBaseFeeAndSequence +// query. +type LatestLedger struct { + BaseFee int32 `db:"base_fee"` + Sequence int32 `db:"sequence"` +} + +// Ledger is a row of data from the `history_ledgers` table +type Ledger struct { + TotalOrderID + Sequence int32 `db:"sequence"` + ImporterVersion int32 `db:"importer_version"` + LedgerHash string `db:"ledger_hash"` + PreviousLedgerHash null.String `db:"previous_ledger_hash"` + TransactionCount int32 `db:"transaction_count"` + SuccessfulTransactionCount *int32 `db:"successful_transaction_count"` + FailedTransactionCount *int32 `db:"failed_transaction_count"` + OperationCount int32 `db:"operation_count"` + TxSetOperationCount *int32 `db:"tx_set_operation_count"` + ClosedAt time.Time `db:"closed_at"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + TotalCoins int64 `db:"total_coins"` + FeePool int64 `db:"fee_pool"` + BaseFee int32 `db:"base_fee"` + BaseReserve int32 `db:"base_reserve"` + MaxTxSetSize int32 `db:"max_tx_set_size"` + ProtocolVersion int32 `db:"protocol_version"` + LedgerHeaderXDR null.String `db:"ledger_header"` +} + +// LedgerCapacityUsageStats contains ledgers fullness stats. +type LedgerCapacityUsageStats struct { + CapacityUsage null.String `db:"ledger_capacity_usage"` +} + +// LedgerCache is a helper struct to load ledger data related to a batch of +// sequences. +type LedgerCache struct { + Records map[int32]Ledger + + lock sync.Mutex + queued map[int32]struct{} +} + +type LedgerRange struct { + StartSequence uint32 `db:"start"` + EndSequence uint32 `db:"end"` +} + +// LedgersQ is a helper struct to aid in configuring queries that loads +// slices of Ledger structs. +type LedgersQ struct { + Err error + parent *Q + sql sq.SelectBuilder +} + +// Operation is a row of data from the `history_operations` table +type Operation struct { + TotalOrderID + TransactionID int64 `db:"transaction_id"` + TransactionHash string `db:"transaction_hash"` + TxResult string `db:"tx_result"` + ApplicationOrder int32 `db:"application_order"` + Type xdr.OperationType `db:"type"` + DetailsString null.String `db:"details"` + SourceAccount string `db:"source_account"` + SourceAccountMuxed null.String `db:"source_account_muxed"` + TransactionSuccessful bool `db:"transaction_successful"` +} + +// ManageOffer is a struct of data from `operations.DetailsString` +// when the operation type is manage sell offer or manage buy offer +type ManageOffer struct { + OfferID int64 `json:"offer_id"` +} + +// upsertField is used in upsertRows function generating upsert query for +// different tables. +type upsertField struct { + name string + dbType string + objects []interface{} +} + +// Offer is row of data from the `offers` table from horizon DB +type Offer struct { + SellerID string `db:"seller_id"` + OfferID int64 `db:"offer_id"` + + SellingAsset xdr.Asset `db:"selling_asset"` + BuyingAsset xdr.Asset `db:"buying_asset"` + + Amount int64 `db:"amount"` + Pricen int32 `db:"pricen"` + Priced int32 `db:"priced"` + Price float64 `db:"price"` + Flags int32 `db:"flags"` + Deleted bool `db:"deleted"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Sponsor null.String `db:"sponsor"` +} + +// OperationsQ is a helper struct to aid in configuring queries that loads +// slices of Operation structs. +type OperationsQ struct { + Err error + parent *Q + sql sq.SelectBuilder + opIdCol string + includeFailed bool + includeTransactions bool +} + +// Q is a helper struct on which to hang common_trades queries against a history +// portion of the horizon database. +type Q struct { + db.SessionInterface +} + +// QSigners defines signer related queries. +type QSigners interface { + GetLastLedgerIngestNonBlocking(ctx context.Context) (uint32, error) + GetLastLedgerIngest(ctx context.Context) (uint32, error) + UpdateLastLedgerIngest(ctx context.Context, ledgerSequence uint32) error + AccountsForSigner(ctx context.Context, signer string, page db2.PageQuery) ([]AccountSigner, error) + NewAccountSignersBatchInsertBuilder(maxBatchSize int) AccountSignersBatchInsertBuilder + CreateAccountSigner(ctx context.Context, account, signer string, weight int32, sponsor *string) (int64, error) + RemoveAccountSigner(ctx context.Context, account, signer string) (int64, error) + SignersForAccounts(ctx context.Context, accounts []string) ([]AccountSigner, error) + CountAccounts(ctx context.Context) (int, error) +} + +// OffersQuery is a helper struct to configure queries to offers +type OffersQuery struct { + PageQuery db2.PageQuery + SellerID string + Sponsor string + Selling *xdr.Asset + Buying *xdr.Asset +} + +// TotalOrderID represents the ID portion of rows that are identified by the +// "TotalOrderID". See total_order_id.go in the `db` package for details. +type TotalOrderID struct { + ID int64 `db:"id"` +} + +// Trade represents a trade from the trades table, joined with asset information from the assets table and account +// addresses from the accounts table +type Trade struct { + HistoryOperationID int64 `db:"history_operation_id"` + Order int32 `db:"order"` + LedgerCloseTime time.Time `db:"ledger_closed_at"` + BaseOfferID null.Int `db:"base_offer_id"` + BaseAccount null.String `db:"base_account"` + BaseAssetType string `db:"base_asset_type"` + BaseAssetCode string `db:"base_asset_code"` + BaseAssetIssuer string `db:"base_asset_issuer"` + BaseAmount int64 `db:"base_amount"` + BaseLiquidityPoolID null.String `db:"base_liquidity_pool_id"` + CounterOfferID null.Int `db:"counter_offer_id"` + CounterAccount null.String `db:"counter_account"` + CounterAssetType string `db:"counter_asset_type"` + CounterAssetCode string `db:"counter_asset_code"` + CounterAssetIssuer string `db:"counter_asset_issuer"` + CounterAmount int64 `db:"counter_amount"` + CounterLiquidityPoolID null.String `db:"counter_liquidity_pool_id"` + LiquidityPoolFee null.Int `db:"liquidity_pool_fee"` + BaseIsSeller bool `db:"base_is_seller"` + PriceN null.Int `db:"price_n"` + PriceD null.Int `db:"price_d"` + Type TradeType `db:"trade_type"` +} + +// Transaction is a row of data from the `history_transactions` table +type Transaction struct { + LedgerCloseTime time.Time `db:"ledger_close_time"` + TransactionWithoutLedger +} + +// TransactionsQ is a helper struct to aid in configuring queries that loads +// slices of transaction structs. +type TransactionsQ struct { + Err error + parent *Q + sql sq.SelectBuilder + includeFailed bool +} + +// TrustLine is row of data from the `trust_lines` table from horizon DB +type TrustLine struct { + AccountID string `db:"account_id"` + AssetType xdr.AssetType `db:"asset_type"` + AssetIssuer string `db:"asset_issuer"` + AssetCode string `db:"asset_code"` + Balance int64 `db:"balance"` + LedgerKey string `db:"ledger_key"` + Limit int64 `db:"trust_line_limit"` + LiquidityPoolID string `db:"liquidity_pool_id"` + BuyingLiabilities int64 `db:"buying_liabilities"` + SellingLiabilities int64 `db:"selling_liabilities"` + Flags uint32 `db:"flags"` + LastModifiedLedger uint32 `db:"last_modified_ledger"` + Sponsor null.String `db:"sponsor"` +} + +// QTrustLines defines trust lines related queries. +type QTrustLines interface { + GetTrustLinesByKeys(ctx context.Context, ledgerKeys []string) ([]TrustLine, error) + UpsertTrustLines(ctx context.Context, trustlines []TrustLine) error + RemoveTrustLines(ctx context.Context, ledgerKeys []string) (int64, error) +} + +func (q *Q) NewAccountSignersBatchInsertBuilder(maxBatchSize int) AccountSignersBatchInsertBuilder { + return &accountSignersBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("accounts_signers"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// ElderLedger loads the oldest ledger known to the history database +func (q *Q) ElderLedger(ctx context.Context, dest interface{}) error { + return q.GetRaw(ctx, dest, `SELECT COALESCE(MIN(sequence), 0) FROM history_ledgers`) +} + +// GetLatestHistoryLedger loads the latest known ledger. Returns 0 if no ledgers in +// `history_ledgers` table. +func (q *Q) GetLatestHistoryLedger(ctx context.Context) (uint32, error) { + var value uint32 + err := q.LatestLedger(ctx, &value) + return value, err +} + +// LatestLedger loads the latest known ledger +func (q *Q) LatestLedger(ctx context.Context, dest interface{}) error { + return q.GetRaw(ctx, dest, `SELECT COALESCE(MAX(sequence), 0) FROM history_ledgers`) +} + +// LatestLedgerSequenceClosedAt loads the latest known ledger sequence and close time, +// returns empty values if no ledgers in a DB. +func (q *Q) LatestLedgerSequenceClosedAt(ctx context.Context) (int32, time.Time, error) { + ledger := struct { + Sequence int32 `db:"sequence"` + ClosedAt time.Time `db:"closed_at"` + }{} + err := q.GetRaw(ctx, &ledger, `SELECT sequence, closed_at FROM history_ledgers ORDER BY sequence DESC LIMIT 1`) + if err == sql.ErrNoRows { + // Will return empty values + return ledger.Sequence, ledger.ClosedAt, nil + } + return ledger.Sequence, ledger.ClosedAt, err +} + +// LatestLedgerBaseFeeAndSequence loads the latest known ledger's base fee and +// sequence number. +func (q *Q) LatestLedgerBaseFeeAndSequence(ctx context.Context, dest interface{}) error { + return q.GetRaw(ctx, dest, ` + SELECT base_fee, sequence + FROM history_ledgers + WHERE sequence = (SELECT COALESCE(MAX(sequence), 0) FROM history_ledgers) + `) +} + +// CloneIngestionQ clones underlying db.Session and returns IngestionQ +func (q *Q) CloneIngestionQ() IngestionQ { + return &Q{q.Clone()} +} + +// DeleteRangeAll deletes a range of rows from all history tables between +// `start` and `end` (exclusive). +func (q *Q) DeleteRangeAll(ctx context.Context, start, end int64) error { + for table, column := range map[string]string{ + "history_effects": "history_operation_id", + "history_ledgers": "id", + "history_operation_claimable_balances": "history_operation_id", + "history_operation_participants": "history_operation_id", + "history_operation_liquidity_pools": "history_operation_id", + "history_operations": "id", + "history_trades": "history_operation_id", + "history_trades_60000": "open_ledger_toid", + "history_transaction_claimable_balances": "history_transaction_id", + "history_transaction_participants": "history_transaction_id", + "history_transaction_liquidity_pools": "history_transaction_id", + "history_transactions": "id", + } { + err := q.DeleteRange(ctx, start, end, table, column) + if err != nil { + return errors.Wrapf(err, "Error clearing %s", table) + } + } + return nil +} + +// upsertRows builds and executes an upsert query that allows very fast upserts +// to a given table. The final query is of form: +// +// WITH r AS +// (SELECT +// /* unnestPart */ +// unnest(?::type1[]), /* field1 */ +// unnest(?::type2[]), /* field2 */ +// ... +// ) +// INSERT INTO table ( +// /* insertFieldsPart */ +// field1, +// field2, +// ... +// ) +// SELECT * from r +// ON CONFLICT (conflictField) DO UPDATE SET +// /* onConflictPart */ +// field1 = excluded.field1, +// field2 = excluded.field2, +// ... +func (q *Q) upsertRows(ctx context.Context, table string, conflictField string, fields []upsertField) error { + unnestPart := make([]string, 0, len(fields)) + insertFieldsPart := make([]string, 0, len(fields)) + onConflictPart := make([]string, 0, len(fields)) + pqArrays := make([]interface{}, 0, len(fields)) + + for _, field := range fields { + unnestPart = append( + unnestPart, + fmt.Sprintf("unnest(?::%s[]) /* %s */", field.dbType, field.name), + ) + insertFieldsPart = append( + insertFieldsPart, + field.name, + ) + onConflictPart = append( + onConflictPart, + fmt.Sprintf("%s = excluded.%s", field.name, field.name), + ) + pqArrays = append( + pqArrays, + pq.Array(field.objects), + ) + } + + sql := ` + WITH r AS + (SELECT ` + strings.Join(unnestPart, ",") + `) + INSERT INTO ` + table + ` + (` + strings.Join(insertFieldsPart, ",") + `) + SELECT * from r + ON CONFLICT (` + conflictField + `) DO UPDATE SET + ` + strings.Join(onConflictPart, ",") + + _, err := q.ExecRaw( + context.WithValue(ctx, &db.QueryTypeContextKey, db.UpsertQueryType), + sql, + pqArrays..., + ) + return err +} diff --git a/services/horizon/internal/db2/history/main_test.go b/services/horizon/internal/db2/history/main_test.go new file mode 100644 index 0000000000..c94154b463 --- /dev/null +++ b/services/horizon/internal/db2/history/main_test.go @@ -0,0 +1,68 @@ +package history + +import ( + "testing" + "time" + + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestLatestLedger(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + var seq int + err := q.LatestLedger(tt.Ctx, &seq) + + if tt.Assert.NoError(err) { + tt.Assert.Equal(3, seq) + } +} + +func TestLatestLedgerSequenceClosedAt(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + sequence, closedAt, err := q.LatestLedgerSequenceClosedAt(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Equal(int32(3), sequence) + tt.Assert.Equal("2019-10-31T13:19:46Z", closedAt.Format(time.RFC3339)) + } + + test.ResetHorizonDB(t, tt.HorizonDB) + + sequence, closedAt, err = q.LatestLedgerSequenceClosedAt(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Equal(int32(0), sequence) + tt.Assert.Equal("0001-01-01T00:00:00Z", closedAt.Format(time.RFC3339)) + } +} + +func TestGetLatestHistoryLedgerEmptyDB(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + value, err := q.GetLatestHistoryLedger(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(0), value) +} + +func TestElderLedger(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + var seq int + err := q.ElderLedger(tt.Ctx, &seq) + + if tt.Assert.NoError(err) { + tt.Assert.Equal(1, seq) + } +} diff --git a/services/horizon/internal/db2/history/mock_account_signers_batch_insert_builder.go b/services/horizon/internal/db2/history/mock_account_signers_batch_insert_builder.go new file mode 100644 index 0000000000..3f786f65ac --- /dev/null +++ b/services/horizon/internal/db2/history/mock_account_signers_batch_insert_builder.go @@ -0,0 +1,20 @@ +package history + +import ( + "context" + "github.com/stretchr/testify/mock" +) + +type MockAccountSignersBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockAccountSignersBatchInsertBuilder) Add(ctx context.Context, signer AccountSigner) error { + a := m.Called(ctx, signer) + return a.Error(0) +} + +func (m *MockAccountSignersBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_effect_batch_insert_builder.go b/services/horizon/internal/db2/history/mock_effect_batch_insert_builder.go new file mode 100644 index 0000000000..48ee96e306 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_effect_batch_insert_builder.go @@ -0,0 +1,39 @@ +package history + +import ( + "context" + + "github.com/guregu/null" + "github.com/stretchr/testify/mock" +) + +// MockEffectBatchInsertBuilder mock EffectBatchInsertBuilder +type MockEffectBatchInsertBuilder struct { + mock.Mock +} + +// Add mock +func (m *MockEffectBatchInsertBuilder) Add(ctx context.Context, + accountID int64, + muxedAccount null.String, + operationID int64, + order uint32, + effectType EffectType, + details []byte, +) error { + a := m.Called(ctx, + accountID, + muxedAccount, + operationID, + order, + effectType, + details, + ) + return a.Error(0) +} + +// Exec mock +func (m *MockEffectBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_operation_participant_batch_insert_builder.go b/services/horizon/internal/db2/history/mock_operation_participant_batch_insert_builder.go new file mode 100644 index 0000000000..014763f989 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_operation_participant_batch_insert_builder.go @@ -0,0 +1,23 @@ +package history + +import ( + "context" + "github.com/stretchr/testify/mock" +) + +// MockOperationParticipantBatchInsertBuilder OperationParticipantBatchInsertBuilder mock +type MockOperationParticipantBatchInsertBuilder struct { + mock.Mock +} + +// Add mock +func (m *MockOperationParticipantBatchInsertBuilder) Add(ctx context.Context, operationID int64, accountID int64) error { + a := m.Called(ctx, operationID, accountID) + return a.Error(0) +} + +// Exec mock +func (m *MockOperationParticipantBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_operations_batch_insert_builder.go b/services/horizon/internal/db2/history/mock_operations_batch_insert_builder.go new file mode 100644 index 0000000000..e57eb93db9 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_operations_batch_insert_builder.go @@ -0,0 +1,42 @@ +package history + +import ( + "context" + + "github.com/guregu/null" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" +) + +// MockOperationsBatchInsertBuilder OperationsBatchInsertBuilder mock +type MockOperationsBatchInsertBuilder struct { + mock.Mock +} + +// Add mock +func (m *MockOperationsBatchInsertBuilder) Add(ctx context.Context, + id int64, + transactionID int64, + applicationOrder uint32, + operationType xdr.OperationType, + details []byte, + sourceAccount string, + sourceAccountMuxed null.String, +) error { + a := m.Called(ctx, + id, + transactionID, + applicationOrder, + operationType, + details, + sourceAccount, + sourceAccountMuxed, + ) + return a.Error(0) +} + +// Exec mock +func (m *MockOperationsBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_q_accounts.go b/services/horizon/internal/db2/history/mock_q_accounts.go new file mode 100644 index 0000000000..99f793d147 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_accounts.go @@ -0,0 +1,27 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQAccounts is a mock implementation of the QAccounts interface +type MockQAccounts struct { + mock.Mock +} + +func (m *MockQAccounts) GetAccountsByIDs(ctx context.Context, ids []string) ([]AccountEntry, error) { + a := m.Called(ctx, ids) + return a.Get(0).([]AccountEntry), a.Error(1) +} + +func (m *MockQAccounts) UpsertAccounts(ctx context.Context, accounts []AccountEntry) error { + a := m.Called(ctx, accounts) + return a.Error(0) +} + +func (m *MockQAccounts) RemoveAccounts(ctx context.Context, accountIDs []string) (int64, error) { + a := m.Called(ctx, accountIDs) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_asset_stats.go b/services/horizon/internal/db2/history/mock_q_asset_stats.go new file mode 100644 index 0000000000..17334039e8 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_asset_stats.go @@ -0,0 +1,50 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/xdr" +) + +// MockQAssetStats is a mock implementation of the QAssetStats interface +type MockQAssetStats struct { + mock.Mock +} + +func (m *MockQAssetStats) InsertAssetStats(ctx context.Context, assetStats []ExpAssetStat, batchSize int) error { + a := m.Called(ctx, assetStats, batchSize) + return a.Error(0) +} + +func (m *MockQAssetStats) InsertAssetStat(ctx context.Context, assetStat ExpAssetStat) (int64, error) { + a := m.Called(ctx, assetStat) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockQAssetStats) UpdateAssetStat(ctx context.Context, assetStat ExpAssetStat) (int64, error) { + a := m.Called(ctx, assetStat) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockQAssetStats) GetAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (ExpAssetStat, error) { + a := m.Called(ctx, assetType, assetCode, assetIssuer) + return a.Get(0).(ExpAssetStat), a.Error(1) +} + +func (m *MockQAssetStats) RemoveAssetStat(ctx context.Context, assetType xdr.AssetType, assetCode, assetIssuer string) (int64, error) { + a := m.Called(ctx, assetType, assetCode, assetIssuer) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockQAssetStats) GetAssetStats(ctx context.Context, assetCode, assetIssuer string, page db2.PageQuery) ([]ExpAssetStat, error) { + a := m.Called(ctx, assetCode, assetIssuer, page) + return a.Get(0).([]ExpAssetStat), a.Error(1) +} + +func (m *MockQAssetStats) CountTrustLines(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_claimable_balances.go b/services/horizon/internal/db2/history/mock_q_claimable_balances.go new file mode 100644 index 0000000000..9d1cdcdcb0 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_claimable_balances.go @@ -0,0 +1,32 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQClaimableBalances is a mock implementation of the QAccounts interface +type MockQClaimableBalances struct { + mock.Mock +} + +func (m *MockQClaimableBalances) CountClaimableBalances(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} + +func (m *MockQClaimableBalances) GetClaimableBalancesByID(ctx context.Context, ids []string) ([]ClaimableBalance, error) { + a := m.Called(ctx, ids) + return a.Get(0).([]ClaimableBalance), a.Error(1) +} + +func (m *MockQClaimableBalances) UpsertClaimableBalances(ctx context.Context, cbs []ClaimableBalance) error { + a := m.Called(ctx, cbs) + return a.Error(0) +} + +func (m *MockQClaimableBalances) RemoveClaimableBalances(ctx context.Context, ids []string) (int64, error) { + a := m.Called(ctx, ids) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_data.go b/services/horizon/internal/db2/history/mock_q_data.go new file mode 100644 index 0000000000..3316aaa51b --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_data.go @@ -0,0 +1,32 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQData is a mock implementation of the QAccounts interface +type MockQData struct { + mock.Mock +} + +func (m *MockQData) GetAccountDataByKeys(ctx context.Context, keys []AccountDataKey) ([]Data, error) { + a := m.Called(ctx) + return a.Get(0).([]Data), a.Error(1) +} + +func (m *MockQData) CountAccountsData(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} + +func (m *MockQData) UpsertAccountData(ctx context.Context, data []Data) error { + a := m.Called(ctx, data) + return a.Error(0) +} + +func (m *MockQData) RemoveAccountData(ctx context.Context, keys []AccountDataKey) (int64, error) { + a := m.Called(ctx, keys) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_effects.go b/services/horizon/internal/db2/history/mock_q_effects.go new file mode 100644 index 0000000000..d8bdd97765 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_effects.go @@ -0,0 +1,21 @@ +package history + +import ( + "context" + "github.com/stretchr/testify/mock" +) + +// MockQEffects is a mock implementation of the QEffects interface +type MockQEffects struct { + mock.Mock +} + +func (m *MockQEffects) NewEffectBatchInsertBuilder(maxBatchSize int) EffectBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(EffectBatchInsertBuilder) +} + +func (m *MockQEffects) CreateAccounts(ctx context.Context, addresses []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, addresses, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_history_claimable_balances.go b/services/horizon/internal/db2/history/mock_q_history_claimable_balances.go new file mode 100644 index 0000000000..9b7fe1d3b6 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_history_claimable_balances.go @@ -0,0 +1,60 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQHistoryClaimableBalances is a mock implementation of the QClaimableBalances interface +type MockQHistoryClaimableBalances struct { + mock.Mock +} + +func (m *MockQHistoryClaimableBalances) CreateHistoryClaimableBalances(ctx context.Context, ids []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, ids, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} + +func (m *MockQHistoryClaimableBalances) NewTransactionClaimableBalanceBatchInsertBuilder(maxBatchSize int) TransactionClaimableBalanceBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(TransactionClaimableBalanceBatchInsertBuilder) +} + +// MockTransactionClaimableBalanceBatchInsertBuilder is a mock implementation of the +// TransactionClaimableBalanceBatchInsertBuilder interface +type MockTransactionClaimableBalanceBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockTransactionClaimableBalanceBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + a := m.Called(ctx, transactionID, accountID) + return a.Error(0) +} + +func (m *MockTransactionClaimableBalanceBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} + +// NewOperationClaimableBalanceBatchInsertBuilder mock +func (m *MockQHistoryClaimableBalances) NewOperationClaimableBalanceBatchInsertBuilder(maxBatchSize int) OperationClaimableBalanceBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(OperationClaimableBalanceBatchInsertBuilder) +} + +// MockOperationClaimableBalanceBatchInsertBuilder is a mock implementation of the +// OperationClaimableBalanceBatchInsertBuilder interface +type MockOperationClaimableBalanceBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockOperationClaimableBalanceBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + a := m.Called(ctx, transactionID, accountID) + return a.Error(0) +} + +func (m *MockOperationClaimableBalanceBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_q_history_liquidity_pools.go b/services/horizon/internal/db2/history/mock_q_history_liquidity_pools.go new file mode 100644 index 0000000000..08f4920de2 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_history_liquidity_pools.go @@ -0,0 +1,60 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQHistoryLiquidityPools is a mock implementation of the QLiquidityPools interface +type MockQHistoryLiquidityPools struct { + mock.Mock +} + +func (m *MockQHistoryLiquidityPools) CreateHistoryLiquidityPools(ctx context.Context, poolIDs []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, poolIDs, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} + +func (m *MockQHistoryLiquidityPools) NewTransactionLiquidityPoolBatchInsertBuilder(maxBatchSize int) TransactionLiquidityPoolBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(TransactionLiquidityPoolBatchInsertBuilder) +} + +// MockTransactionLiquidityPoolBatchInsertBuilder is a mock implementation of the +// TransactionLiquidityPoolBatchInsertBuilder interface +type MockTransactionLiquidityPoolBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockTransactionLiquidityPoolBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + a := m.Called(ctx, transactionID, accountID) + return a.Error(0) +} + +func (m *MockTransactionLiquidityPoolBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} + +// NewOperationLiquidityPoolBatchInsertBuilder mock +func (m *MockQHistoryLiquidityPools) NewOperationLiquidityPoolBatchInsertBuilder(maxBatchSize int) OperationLiquidityPoolBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(OperationLiquidityPoolBatchInsertBuilder) +} + +// MockOperationLiquidityPoolBatchInsertBuilder is a mock implementation of the +// OperationLiquidityPoolBatchInsertBuilder interface +type MockOperationLiquidityPoolBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockOperationLiquidityPoolBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + a := m.Called(ctx, transactionID, accountID) + return a.Error(0) +} + +func (m *MockOperationLiquidityPoolBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_q_ledgers.go b/services/horizon/internal/db2/history/mock_q_ledgers.go new file mode 100644 index 0000000000..16d3ef5524 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_ledgers.go @@ -0,0 +1,25 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/xdr" +) + +type MockQLedgers struct { + mock.Mock +} + +func (m *MockQLedgers) InsertLedger(ctx context.Context, + ledger xdr.LedgerHeaderHistoryEntry, + successTxsCount int, + failedTxsCount int, + opCount int, + txSetOpCount int, + ingestVersion int, +) (int64, error) { + a := m.Called(ctx, ledger, successTxsCount, failedTxsCount, opCount, txSetOpCount, ingestVersion) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_liquidity_pools.go b/services/horizon/internal/db2/history/mock_q_liquidity_pools.go new file mode 100644 index 0000000000..7b64b24126 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_liquidity_pools.go @@ -0,0 +1,47 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQLiquidityPools is a mock implementation of the QAccounts interface +type MockQLiquidityPools struct { + mock.Mock +} + +func (m *MockQLiquidityPools) UpsertLiquidityPools(ctx context.Context, lps []LiquidityPool) error { + a := m.Called(ctx, lps) + return a.Error(0) +} + +func (m *MockQLiquidityPools) CountLiquidityPools(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} + +func (m *MockQLiquidityPools) GetLiquidityPoolsByID(ctx context.Context, poolIDs []string) ([]LiquidityPool, error) { + a := m.Called(ctx, poolIDs) + return a.Get(0).([]LiquidityPool), a.Error(1) +} + +func (m *MockQLiquidityPools) FindLiquidityPoolByID(ctx context.Context, liquidityPoolID string) (LiquidityPool, error) { + a := m.Called(ctx, liquidityPoolID) + return a.Get(0).(LiquidityPool), a.Error(1) +} + +func (m *MockQLiquidityPools) StreamAllLiquidityPools(ctx context.Context, callback func(LiquidityPool) error) error { + a := m.Called(ctx, callback) + return a.Error(0) +} + +func (m *MockQLiquidityPools) GetUpdatedLiquidityPools(ctx context.Context, sequence uint32) ([]LiquidityPool, error) { + a := m.Called(ctx, sequence) + return a.Get(0).([]LiquidityPool), a.Error(1) +} + +func (m *MockQLiquidityPools) CompactLiquidityPools(ctx context.Context, cutOffSequence uint32) (int64, error) { + a := m.Called(ctx, cutOffSequence) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_offers.go b/services/horizon/internal/db2/history/mock_q_offers.go new file mode 100644 index 0000000000..0c4bc5e9bb --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_offers.go @@ -0,0 +1,42 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQOffers is a mock implementation of the QOffers interface +type MockQOffers struct { + mock.Mock +} + +func (m *MockQOffers) StreamAllOffers(ctx context.Context, callback func(Offer) error) error { + a := m.Called(ctx, callback) + return a.Error(0) +} + +func (m *MockQOffers) GetOffersByIDs(ctx context.Context, ids []int64) ([]Offer, error) { + a := m.Called(ctx, ids) + return a.Get(0).([]Offer), a.Error(1) +} + +func (m *MockQOffers) GetUpdatedOffers(ctx context.Context, newerThanSequence uint32) ([]Offer, error) { + a := m.Called(ctx, newerThanSequence) + return a.Get(0).([]Offer), a.Error(1) +} + +func (m *MockQOffers) CountOffers(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} + +func (m *MockQOffers) UpsertOffers(ctx context.Context, rows []Offer) error { + a := m.Called(ctx, rows) + return a.Error(0) +} + +func (m *MockQOffers) CompactOffers(ctx context.Context, cutOffSequence uint32) (int64, error) { + a := m.Called(ctx, cutOffSequence) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_operations.go b/services/horizon/internal/db2/history/mock_q_operations.go new file mode 100644 index 0000000000..08a97c6da9 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_operations.go @@ -0,0 +1,14 @@ +package history + +import "github.com/stretchr/testify/mock" + +// MockQOperations is a mock implementation of the QOperations interface +type MockQOperations struct { + mock.Mock +} + +// NewOperationBatchInsertBuilder mock +func (m *MockQOperations) NewOperationBatchInsertBuilder(maxBatchSize int) OperationBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(OperationBatchInsertBuilder) +} diff --git a/services/horizon/internal/db2/history/mock_q_participants.go b/services/horizon/internal/db2/history/mock_q_participants.go new file mode 100644 index 0000000000..9365e06db3 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_participants.go @@ -0,0 +1,43 @@ +package history + +import ( + "context" + "github.com/stretchr/testify/mock" +) + +// MockQParticipants is a mock implementation of the QParticipants interface +type MockQParticipants struct { + mock.Mock +} + +func (m *MockQParticipants) CreateAccounts(ctx context.Context, addresses []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, addresses, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} + +func (m *MockQParticipants) NewTransactionParticipantsBatchInsertBuilder(maxBatchSize int) TransactionParticipantsBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(TransactionParticipantsBatchInsertBuilder) +} + +// MockTransactionParticipantsBatchInsertBuilder is a mock implementation of the +// TransactionParticipantsBatchInsertBuilder interface +type MockTransactionParticipantsBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockTransactionParticipantsBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + a := m.Called(ctx, transactionID, accountID) + return a.Error(0) +} + +func (m *MockTransactionParticipantsBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} + +// NewOperationParticipantBatchInsertBuilder mock +func (m *MockQParticipants) NewOperationParticipantBatchInsertBuilder(maxBatchSize int) OperationParticipantBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(OperationParticipantBatchInsertBuilder) +} diff --git a/services/horizon/internal/db2/history/mock_q_signers.go b/services/horizon/internal/db2/history/mock_q_signers.go new file mode 100644 index 0000000000..81f56d9fd1 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_signers.go @@ -0,0 +1,58 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/services/horizon/internal/db2" +) + +type MockQSigners struct { + mock.Mock +} + +func (m *MockQSigners) GetLastLedgerIngestNonBlocking(ctx context.Context) (uint32, error) { + a := m.Called(ctx) + return a.Get(0).(uint32), a.Error(1) +} + +func (m *MockQSigners) GetLastLedgerIngest(ctx context.Context) (uint32, error) { + a := m.Called(ctx) + return a.Get(0).(uint32), a.Error(1) +} + +func (m *MockQSigners) UpdateLastLedgerIngest(ctx context.Context, ledgerSequence uint32) error { + a := m.Called(ctx, ledgerSequence) + return a.Error(0) +} + +func (m *MockQSigners) AccountsForSigner(ctx context.Context, signer string, page db2.PageQuery) ([]AccountSigner, error) { + a := m.Called(ctx, signer, page) + return a.Get(0).([]AccountSigner), a.Error(1) +} + +func (m *MockQSigners) NewAccountSignersBatchInsertBuilder(maxBatchSize int) AccountSignersBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(AccountSignersBatchInsertBuilder) +} + +func (m *MockQSigners) CreateAccountSigner(ctx context.Context, account, signer string, weight int32, sponsor *string) (int64, error) { + a := m.Called(ctx, account, signer, weight, sponsor) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockQSigners) RemoveAccountSigner(ctx context.Context, account, signer string) (int64, error) { + a := m.Called(ctx, account, signer) + return a.Get(0).(int64), a.Error(1) +} + +func (m *MockQSigners) SignersForAccounts(ctx context.Context, accounts []string) ([]AccountSigner, error) { + a := m.Called(ctx, accounts) + return a.Get(0).([]AccountSigner), a.Error(1) +} + +func (m *MockQSigners) CountAccounts(ctx context.Context) (int, error) { + a := m.Called(ctx) + return a.Get(0).(int), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_q_trades.go b/services/horizon/internal/db2/history/mock_q_trades.go new file mode 100644 index 0000000000..aa56d527ff --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_trades.go @@ -0,0 +1,52 @@ +package history + +import ( + "context" + + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/mock" +) + +type MockQTrades struct { + mock.Mock +} + +func (m *MockQTrades) CreateAccounts(ctx context.Context, addresses []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, addresses, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} + +func (m *MockQTrades) CreateAssets(ctx context.Context, assets []xdr.Asset, maxBatchSize int) (map[string]Asset, error) { + a := m.Called(ctx, assets, maxBatchSize) + return a.Get(0).(map[string]Asset), a.Error(1) +} + +func (m *MockQTrades) CreateHistoryLiquidityPools(ctx context.Context, poolIDs []string, maxBatchSize int) (map[string]int64, error) { + a := m.Called(ctx, poolIDs, maxBatchSize) + return a.Get(0).(map[string]int64), a.Error(1) +} + +func (m *MockQTrades) NewTradeBatchInsertBuilder(maxBatchSize int) TradeBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(TradeBatchInsertBuilder) +} + +func (m *MockQTrades) RebuildTradeAggregationBuckets(ctx context.Context, fromLedger, toLedger uint32) error { + a := m.Called(ctx, fromLedger, toLedger) + return a.Error(0) +} + +type MockTradeBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockTradeBatchInsertBuilder) Add(ctx context.Context, entries ...InsertTrade) error { + a := m.Called(ctx, entries) + return a.Error(0) +} + +func (m *MockTradeBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/mock_q_transactions.go b/services/horizon/internal/db2/history/mock_q_transactions.go new file mode 100644 index 0000000000..6fdac71c0f --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_transactions.go @@ -0,0 +1,13 @@ +package history + +import "github.com/stretchr/testify/mock" + +// MockQTransactions is a mock implementation of the QTransactions interface +type MockQTransactions struct { + mock.Mock +} + +func (m *MockQTransactions) NewTransactionBatchInsertBuilder(maxBatchSize int) TransactionBatchInsertBuilder { + a := m.Called(maxBatchSize) + return a.Get(0).(TransactionBatchInsertBuilder) +} diff --git a/services/horizon/internal/db2/history/mock_q_trust_lines.go b/services/horizon/internal/db2/history/mock_q_trust_lines.go new file mode 100644 index 0000000000..f6b5b53017 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_q_trust_lines.go @@ -0,0 +1,27 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" +) + +// MockQTrustLines is a mock implementation of the QOffers interface +type MockQTrustLines struct { + mock.Mock +} + +func (m *MockQTrustLines) GetTrustLinesByKeys(ctx context.Context, keys []string) ([]TrustLine, error) { + a := m.Called(ctx, keys) + return a.Get(0).([]TrustLine), a.Error(1) +} + +func (m *MockQTrustLines) UpsertTrustLines(ctx context.Context, trustLines []TrustLine) error { + a := m.Called(ctx, trustLines) + return a.Error(0) +} + +func (m *MockQTrustLines) RemoveTrustLines(ctx context.Context, ledgerKeys []string) (int64, error) { + a := m.Called(ctx, ledgerKeys) + return a.Get(0).(int64), a.Error(1) +} diff --git a/services/horizon/internal/db2/history/mock_transactions_batch_insert_builder.go b/services/horizon/internal/db2/history/mock_transactions_batch_insert_builder.go new file mode 100644 index 0000000000..8e2608d553 --- /dev/null +++ b/services/horizon/internal/db2/history/mock_transactions_batch_insert_builder.go @@ -0,0 +1,23 @@ +package history + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/ingest" +) + +type MockTransactionsBatchInsertBuilder struct { + mock.Mock +} + +func (m *MockTransactionsBatchInsertBuilder) Add(ctx context.Context, transaction ingest.LedgerTransaction, sequence uint32) error { + a := m.Called(ctx, transaction, sequence) + return a.Error(0) +} + +func (m *MockTransactionsBatchInsertBuilder) Exec(ctx context.Context) error { + a := m.Called(ctx) + return a.Error(0) +} diff --git a/services/horizon/internal/db2/history/offers.go b/services/horizon/internal/db2/history/offers.go new file mode 100644 index 0000000000..1d10b1bcde --- /dev/null +++ b/services/horizon/internal/db2/history/offers.go @@ -0,0 +1,189 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + + "github.com/stellar/go/support/errors" +) + +// QOffers defines offer related queries. +type QOffers interface { + StreamAllOffers(ctx context.Context, callback func(Offer) error) error + GetOffersByIDs(ctx context.Context, ids []int64) ([]Offer, error) + CountOffers(ctx context.Context) (int, error) + GetUpdatedOffers(ctx context.Context, newerThanSequence uint32) ([]Offer, error) + UpsertOffers(ctx context.Context, offers []Offer) error + CompactOffers(ctx context.Context, cutOffSequence uint32) (int64, error) +} + +func (q *Q) CountOffers(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").Where("deleted = ?", false).From("offers") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +// GetOfferByID loads a row from the `offers` table, selected by offerid. +func (q *Q) GetOfferByID(ctx context.Context, id int64) (Offer, error) { + var offer Offer + sql := selectOffers.Where("deleted = ?", false). + Where("offers.offer_id = ?", id) + err := q.Get(ctx, &offer, sql) + return offer, err +} + +// GetOffersByIDs loads a row from the `offers` table, selected by multiple offerid. +func (q *Q) GetOffersByIDs(ctx context.Context, ids []int64) ([]Offer, error) { + var offers []Offer + sql := selectOffers.Where("deleted = ?", false). + Where(map[string]interface{}{"offers.offer_id": ids}) + err := q.Select(ctx, &offers, sql) + return offers, err +} + +// GetOffers loads rows from `offers` by paging query. +func (q *Q) GetOffers(ctx context.Context, query OffersQuery) ([]Offer, error) { + sql := selectOffers.Where("deleted = ?", false) + sql, err := query.PageQuery.ApplyTo(sql, "offers.offer_id") + + if err != nil { + return nil, errors.Wrap(err, "could not apply query to page") + } + + if query.SellerID != "" { + sql = sql.Where("offers.seller_id = ?", query.SellerID) + } + + if query.Selling != nil { + sql = sql.Where("offers.selling_asset = ?", query.Selling) + } + + if query.Buying != nil { + sql = sql.Where("offers.buying_asset = ?", query.Buying) + } + + if query.Sponsor != "" { + sql = sql.Where("offers.sponsor = ?", query.Sponsor) + } + + var offers []Offer + if err := q.Select(ctx, &offers, sql); err != nil { + return nil, errors.Wrap(err, "could not run select query") + } + + return offers, nil +} + +// StreamAllOffers loads all non deleted offers +func (q *Q) StreamAllOffers(ctx context.Context, callback func(Offer) error) error { + var rows *sqlx.Rows + var err error + + if rows, err = q.Query(ctx, selectOffers.Where("deleted = ?", false)); err != nil { + return errors.Wrap(err, "could not run all offers select query") + } + + defer rows.Close() + offer := Offer{} + + for rows.Next() { + if err = rows.StructScan(&offer); err != nil { + return errors.Wrap(err, "could not scan row into offer struct") + } + + if err = callback(offer); err != nil { + return err + } + } + + return rows.Err() + +} + +// GetUpdatedOffers returns all offers created, updated, or deleted after the given ledger sequence. +func (q *Q) GetUpdatedOffers(ctx context.Context, newerThanSequence uint32) ([]Offer, error) { + var offers []Offer + err := q.Select(ctx, &offers, selectOffers.Where("offers.last_modified_ledger > ?", newerThanSequence)) + return offers, err +} + +// UpsertOffers upserts a batch of offers in the offers table. +// There's currently no limit of the number of offers this method can +// accept other than 2GB limit of the query string length what should be enough +// for each ledger with the current limits. +func (q *Q) UpsertOffers(ctx context.Context, offers []Offer) error { + var sellerID, sellingAsset, buyingAsset, offerID, amount, priceN, priceD, + price, flags, lastModifiedLedger, deleted, sponsor []interface{} + + for _, offer := range offers { + sellerID = append(sellerID, offer.SellerID) + offerID = append(offerID, offer.OfferID) + sellingAsset = append(sellingAsset, offer.SellingAsset) + buyingAsset = append(buyingAsset, offer.BuyingAsset) + amount = append(amount, offer.Amount) + priceN = append(priceN, offer.Pricen) + priceD = append(priceD, offer.Priced) + price = append(price, offer.Price) + flags = append(flags, offer.Flags) + lastModifiedLedger = append(lastModifiedLedger, offer.LastModifiedLedger) + deleted = append(deleted, offer.Deleted) + sponsor = append(sponsor, offer.Sponsor) + } + + upsertFields := []upsertField{ + {"seller_id", "text", sellerID}, + {"offer_id", "bigint", offerID}, + {"selling_asset", "text", sellingAsset}, + {"buying_asset", "text", buyingAsset}, + {"amount", "bigint", amount}, + {"pricen", "integer", priceN}, + {"priced", "integer", priceD}, + {"price", "double precision", price}, + {"flags", "integer", flags}, + {"deleted", "bool", deleted}, + {"last_modified_ledger", "integer", lastModifiedLedger}, + {"sponsor", "text", sponsor}, + } + + return q.upsertRows(ctx, "offers", "offer_id", upsertFields) +} + +// CompactOffers removes rows from the offers table which are marked for deletion. +func (q *Q) CompactOffers(ctx context.Context, cutOffSequence uint32) (int64, error) { + sql := sq.Delete("offers"). + Where("deleted = ?", true). + Where("last_modified_ledger <= ?", cutOffSequence) + + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, errors.Wrap(err, "cannot delete offer rows") + } + + if err = q.UpdateOfferCompactionSequence(ctx, cutOffSequence); err != nil { + return 0, errors.Wrap(err, "cannot update offer compaction sequence") + } + + return result.RowsAffected() +} + +var selectOffers = sq.Select(` + seller_id, + offer_id, + selling_asset, + buying_asset, + amount, + pricen, + priced, + price, + flags, + deleted, + last_modified_ledger, + sponsor +`).From("offers") diff --git a/services/horizon/internal/db2/history/offers_test.go b/services/horizon/internal/db2/history/offers_test.go new file mode 100644 index 0000000000..85951bfa1c --- /dev/null +++ b/services/horizon/internal/db2/history/offers_test.go @@ -0,0 +1,455 @@ +package history + +import ( + "strconv" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +var ( + issuer = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + twoEurOfferSeller = xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + + nativeAsset = xdr.MustNewNativeAsset() + eurAsset = xdr.MustNewCreditAsset("EUR", issuer.Address()) + usdAsset = xdr.MustNewCreditAsset("USD", issuer.Address()) + + eurOffer = Offer{ + SellerID: issuer.Address(), + OfferID: int64(4), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(1), + Priced: int32(1), + Price: float64(1), + Flags: 1, + LastModifiedLedger: uint32(1234), + Sponsor: null.StringFrom(sponsor), + } + twoEurOffer = Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(5), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + threeEurOffer = Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(50), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(3), + Priced: int32(1), + Price: float64(3), + Flags: 2, + LastModifiedLedger: uint32(1234), + } +) + +func insertOffer(tt *test.T, q *Q, offer Offer) error { + return q.UpsertOffers(tt.Ctx, []Offer{offer}) +} + +func TestGetOfferByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := insertOffer(tt, q, eurOffer) + tt.Assert.NoError(err) + offer, err := q.GetOfferByID(tt.Ctx, eurOffer.OfferID) + tt.Assert.NoError(err) + tt.Assert.Equal(offer, eurOffer) +} + +func TestGetNonExistentOfferByID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + _, err := q.GetOfferByID(tt.Ctx, 12345) + tt.Assert.True(q.NoRows(err)) +} + +func TestQueryEmptyOffers(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + var offers []Offer + err := q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + + tt.Assert.NoError(err) + tt.Assert.Len(offers, 0) + + updated, err := q.GetUpdatedOffers(tt.Ctx, 0) + tt.Assert.NoError(err) + tt.Assert.Len(updated, 0) + + count, err := q.CountOffers(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(0, count) + + numRemoved, err := q.CompactOffers(tt.Ctx, 100) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), numRemoved) + seq, err := q.GetOfferCompactionSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(100), seq) +} + +func TestInsertOffers(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := insertOffer(tt, q, eurOffer) + tt.Assert.NoError(err) + err = insertOffer(tt, q, twoEurOffer) + tt.Assert.NoError(err) + + var offers []Offer + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 2) + + offersByID := map[int64]Offer{ + offers[0].OfferID: offers[0], + offers[1].OfferID: offers[1], + } + + tt.Assert.Equal(offersByID[eurOffer.OfferID], eurOffer) + tt.Assert.Equal(offersByID[twoEurOffer.OfferID], twoEurOffer) + + count, err := q.CountOffers(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(2, count) + + numRemoved, err := q.CompactOffers(tt.Ctx, 12350) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), numRemoved) + seq, err := q.GetOfferCompactionSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(12350), seq) + + afterCompactionCount, err := q.CountOffers(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(2, afterCompactionCount) + + var afterCompactionOffers []Offer + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + afterCompactionOffers = append(afterCompactionOffers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(afterCompactionOffers, 2) +} + +func TestUpdateOffer(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := insertOffer(tt, q, eurOffer) + tt.Assert.NoError(err) + + var offers []Offer + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + updatedOffers, err := q.GetUpdatedOffers(tt.Ctx, 1233) + tt.Assert.NoError(err) + tt.Assert.Equal(offers, updatedOffers) + + updatedOffers, err = q.GetUpdatedOffers(tt.Ctx, 100) + tt.Assert.NoError(err) + tt.Assert.Equal(offers, updatedOffers) + + updatedOffers, err = q.GetUpdatedOffers(tt.Ctx, 1234) + tt.Assert.NoError(err) + tt.Assert.Len(updatedOffers, 0) + + tt.Assert.Equal(offers[0], eurOffer) + + modifiedEurOffer := eurOffer + modifiedEurOffer.Amount -= 10 + + err = q.UpsertOffers(tt.Ctx, []Offer{modifiedEurOffer}) + tt.Assert.NoError(err) + + offers = nil + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + updatedOffers, err = q.GetUpdatedOffers(tt.Ctx, 1233) + tt.Assert.NoError(err) + tt.Assert.Equal(offers, updatedOffers) + + updatedOffers, err = q.GetUpdatedOffers(tt.Ctx, 1235) + tt.Assert.NoError(err) + tt.Assert.Len(updatedOffers, 0) + + tt.Assert.Equal(offers[0], modifiedEurOffer) +} + +func TestRemoveOffer(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := insertOffer(tt, q, eurOffer) + tt.Assert.NoError(err) + var offers []Offer + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + tt.Assert.Equal(offers[0], eurOffer) + + deletedOffer := eurOffer + deletedOffer.Deleted = true + deletedOffer.LastModifiedLedger = 1236 + err = q.UpsertOffers(tt.Ctx, []Offer{deletedOffer}) + tt.Assert.NoError(err) + expectedUpdates := offers + expectedUpdates[0].LastModifiedLedger = 1236 + expectedUpdates[0].Deleted = true + + offers = nil + err = q.StreamAllOffers(tt.Ctx, func(offer Offer) error { + offers = append(offers, offer) + return nil + }) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 0) + + offers, err = q.GetOffersByIDs(tt.Ctx, []int64{expectedUpdates[0].OfferID}) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 0) + + _, err = q.GetOfferByID(tt.Ctx, int64(expectedUpdates[0].OfferID)) + tt.Assert.True(q.NoRows(err)) + + updated, err := q.GetUpdatedOffers(tt.Ctx, 1234) + tt.Assert.NoError(err) + tt.Assert.Equal(expectedUpdates, updated) + + count, err := q.CompactOffers(tt.Ctx, 1235) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(0), count) + + updated, err = q.GetUpdatedOffers(tt.Ctx, 1234) + tt.Assert.NoError(err) + tt.Assert.Equal(expectedUpdates, updated) + + count, err = q.CompactOffers(tt.Ctx, 1236) + tt.Assert.NoError(err) + tt.Assert.Equal(int64(1), count) + seq, err := q.GetOfferCompactionSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(1236), seq) + + updated, err = q.GetUpdatedOffers(tt.Ctx, 1234) + tt.Assert.NoError(err) + tt.Assert.Len(updated, 0) +} + +func TestGetOffers(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := insertOffer(tt, q, eurOffer) + tt.Assert.NoError(err) + err = insertOffer(tt, q, twoEurOffer) + tt.Assert.NoError(err) + + // check removed offers aren't included in GetOffer queries + err = insertOffer(tt, q, threeEurOffer) + tt.Assert.NoError(err) + deletedOffer := threeEurOffer + deletedOffer.Deleted = true + deletedOffer.LastModifiedLedger = 1235 + err = q.UpsertOffers(tt.Ctx, []Offer{deletedOffer}) + tt.Assert.NoError(err) + + pageQuery, err := db2.NewPageQuery("", false, "", 10) + tt.Assert.NoError(err) + + t.Run("Filter by selling asset", func(t *testing.T) { + query := OffersQuery{ + PageQuery: pageQuery, + Selling: &usdAsset, + } + + offers, err := q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 0) + + query = OffersQuery{ + PageQuery: pageQuery, + Selling: &nativeAsset, + } + + offers, err = q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 2) + + for _, offer := range offers { + tt.Assert.Equal(nativeAsset, offer.SellingAsset) + } + }) + + t.Run("Filter by buying asset", func(t *testing.T) { + query := OffersQuery{ + PageQuery: pageQuery, + Buying: &eurAsset, + } + + offers, err := q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 2) + + for _, offer := range offers { + tt.Assert.Equal(eurAsset, offer.BuyingAsset) + } + + query = OffersQuery{ + PageQuery: pageQuery, + Buying: &usdAsset, + } + + offers, err = q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 0) + }) + + t.Run("Filter by seller", func(t *testing.T) { + sellerID := issuer.Address() + query := OffersQuery{ + PageQuery: pageQuery, + SellerID: sellerID, + } + + offers, err := q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + tt.Assert.Equal(offers[0], eurOffer) + }) + + t.Run("Filter by sponsor", func(t *testing.T) { + query := OffersQuery{ + PageQuery: pageQuery, + Sponsor: sponsor, + } + + offers, err := q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + tt.Assert.Equal(offers[0], eurOffer) + }) + + t.Run("PageQuery", func(t *testing.T) { + pageQuery, err := db2.NewPageQuery("", false, "", 10) + tt.Assert.NoError(err) + + query := OffersQuery{ + PageQuery: pageQuery, + } + + offers, err := q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 2) + + offersByID := map[int64]Offer{ + offers[0].OfferID: offers[0], + offers[1].OfferID: offers[1], + } + + tt.Assert.Equal(offersByID[eurOffer.OfferID], eurOffer) + tt.Assert.Equal(offersByID[twoEurOffer.OfferID], twoEurOffer) + + pageQuery, err = db2.NewPageQuery("", false, "asc", 1) + tt.Assert.NoError(err) + query = OffersQuery{ + PageQuery: pageQuery, + } + + offers, err = q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + tt.Assert.Equal(offers[0], eurOffer) + + pageQuery, err = db2.NewPageQuery("", false, "desc", 1) + tt.Assert.NoError(err) + query = OffersQuery{ + PageQuery: pageQuery, + } + + offers, err = q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + tt.Assert.Equal(offers[0], twoEurOffer) + + pageQuery, err = db2.NewPageQuery( + strconv.FormatInt(int64(eurOffer.OfferID), 10), + false, + "", + 10, + ) + tt.Assert.NoError(err) + query = OffersQuery{ + PageQuery: pageQuery, + } + + offers, err = q.GetOffers(tt.Ctx, query) + tt.Assert.NoError(err) + tt.Assert.Len(offers, 1) + + tt.Assert.Equal(offers[0], twoEurOffer) + }) +} diff --git a/services/horizon/internal/db2/history/operation.go b/services/horizon/internal/db2/history/operation.go new file mode 100644 index 0000000000..25b99e60fa --- /dev/null +++ b/services/horizon/internal/db2/history/operation.go @@ -0,0 +1,422 @@ +package history + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + jet "github.com/go-jet/jet/v2/postgres" + "github.com/stellar/go/services/horizon/internal/db2/schema/generated/db/horizon/public/table" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LedgerSequence return the ledger in which the effect occurred. +func (r *Operation) LedgerSequence() int32 { + id := toid.Parse(r.ID) + return id.LedgerSequence +} + +// UnmarshalDetails unmarshals the details of this operation into `dest` +func (r *Operation) UnmarshalDetails(dest interface{}) error { + if !r.DetailsString.Valid { + return nil + } + preprocessedDetails, err := preprocessDetails(r.DetailsString.String) + if err != nil { + return errors.Wrap(err, "error in unmarshal") + } + err = json.Unmarshal(preprocessedDetails, &dest) + if err != nil { + return errors.Wrap(err, "error in unmarshal") + } + + return nil +} + +func preprocessDetails(details string) ([]byte, error) { + var dest map[string]interface{} + // Create a decoder using Number instead of float64 when decoding + // (so that decoding covers the full uint64 range) + decoder := json.NewDecoder(strings.NewReader(details)) + decoder.UseNumber() + if err := decoder.Decode(&dest); err != nil { + return nil, err + } + for k, v := range dest { + if strings.HasSuffix(k, "_muxed_id") { + if vNumber, ok := v.(json.Number); ok { + // transform it into a string so that _muxed_id unmarshalling works with `,string` tags + // see https://github.com/stellar/go/pull/3716#issuecomment-867057436 + dest[k] = vNumber.String() + } + } + } + return json.Marshal(dest) +} + +func generateFeePercentileProjections(percentiles []int, operationCount jet.IntegerExpression) []jet.Projection { + var projections []jet.Projection + for _, percentile := range percentiles { + //ceil(percentile_disc(0.{{ . }}) WITHIN GROUP (ORDER BY fee_charged/{{template "operation_count"}}))::bigint AS "fee_charged_p{{ . }}", + projection := jet.CAST( + jet.CEIL( + jet.CAST( + jet.PERCENTILE_DISC(jet.Float(float64(percentile) / 100)).WITHIN_GROUP( + jet.ORDER_BY(table.HistoryTransactions.FeeCharged.DIV(operationCount)))). + AS_DOUBLE())). + AS_BIGINT().AS(fmt.Sprintf("FeeStats.fee_charged_p%d", percentile)) + projections = append(projections, (projection)) + + //ceil(percentile_disc(0.{{ . }}) WITHIN GROUP (ORDER BY COALESCE(new_max_fee, max_fee)/{{template "operation_count"}}))::bigint AS "max_fee_p{{ . }}", + projection = jet.CAST( + jet.CEIL( + jet.CAST( + jet.PERCENTILE_DISC(jet.Float(float64(percentile) / 100)).WITHIN_GROUP( + jet.ORDER_BY(jet.IntExp( + jet.COALESCE(table.HistoryTransactions.NewMaxFee, table.HistoryTransactions.MaxFee)). + DIV(operationCount)))). + AS_DOUBLE())). + AS_BIGINT().AS(fmt.Sprintf("FeeStats.max_fee_p%d", percentile)) + projections = append(projections, (projection)) + } + return projections +} + +func generateFeeStatsQuery(percentiles []int, fromSeq int32, toSeq int32) jet.SelectStatement { + + //{{define "operation_count"}}(CASE WHEN new_max_fee IS NULL THEN operation_count ELSE operation_count + 1 END){{end}} + operationCount := jet.IntExp(jet.CASE().WHEN(table.HistoryTransactions.NewMaxFee.IS_NULL()).THEN(table.HistoryTransactions.OperationCount). + ELSE(table.HistoryTransactions.OperationCount.ADD(jet.Int(1)))) + + projections := append(generateFeePercentileProjections(percentiles, operationCount), + jet.CAST(jet.CEIL(jet.FloatExp(jet.MAX(table.HistoryTransactions.FeeCharged.DIV(operationCount))))).AS_BIGINT().AS("FeeStats.fee_charged_max"), + jet.CAST(jet.CEIL(jet.FloatExp(jet.MIN(table.HistoryTransactions.FeeCharged.DIV(operationCount))))).AS_BIGINT().AS("FeeStats.fee_charged_min"), + jet.CAST(jet.CEIL(jet.CAST(jet.MODE().WITHIN_GROUP(jet.ORDER_BY(table.HistoryTransactions.FeeCharged.DIV(operationCount)))).AS_DOUBLE())).AS_BIGINT().AS("FeeStats.fee_charged_mode"), + jet.CAST(jet.MAX(jet.IntExp(jet.COALESCE(table.HistoryTransactions.NewMaxFee, table.HistoryTransactions.MaxFee)).DIV(operationCount))).AS_BIGINT().AS("FeeStats.max_fee_max"), + jet.CAST(jet.MIN(jet.IntExp(jet.COALESCE(table.HistoryTransactions.NewMaxFee, table.HistoryTransactions.MaxFee)).DIV(operationCount))).AS_BIGINT().AS("FeeStats.max_fee_min"), + jet.CAST(jet.CEIL(jet.CAST(jet.MODE().WITHIN_GROUP(jet.ORDER_BY(jet.IntExp(jet.COALESCE(table.HistoryTransactions.NewMaxFee, table.HistoryTransactions.MaxFee)).DIV(operationCount)))).AS_DOUBLE())).AS_BIGINT().AS("FeeStats.max_fee_mode")) + + sql := jet.SELECT(projections[0], projections[1:]...).FROM(table.HistoryTransactions).WHERE( + table.HistoryTransactions.LedgerSequence.GT(jet.Int32(fromSeq)).AND(table.HistoryTransactions.LedgerSequence.LT_EQ(jet.Int32(toSeq)))) + + return sql +} + +// FeeStats returns operation fee stats for the last 5 ledgers. +// Currently, we hard code the query to return the last 5 ledgers worth of transactions. +// TODO: make the number of ledgers configurable. +func (q *Q) FeeStats(ctx context.Context, currentSeq int32, dest *FeeStats) error { + percentiles := []int{10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 99} + sql := generateFeeStatsQuery(percentiles, currentSeq-5, currentSeq) + return sql.QueryContext(ctx, currentDBConn(q, table.HistoryTransactions.TableName()), dest) +} + +// Operations provides a helper to filter the operations table with pre-defined +// filters. See `OperationsQ` for the available filters. +func (q *Q) Operations() *OperationsQ { + query := &OperationsQ{ + parent: q, + opIdCol: "hop.id", + includeFailed: false, + includeTransactions: false, + sql: selectOperation, + } + + return query +} + +// OperationByID returns an Operation and optionally a Transaction given an operation id +func (q *Q) OperationByID(ctx context.Context, includeTransactions bool, id int64) (Operation, *Transaction, error) { + sql := selectOperation. + Limit(1). + Where("hop.id = ?", id) + + var operation Operation + err := q.Get(ctx, &operation, sql) + if err != nil { + return operation, nil, err + } + + if includeTransactions { + var transaction Transaction + if err = q.TransactionByHash(ctx, &transaction, operation.TransactionHash); err != nil { + return operation, nil, err + } + + err = validateTransactionForOperation(transaction, operation) + if err != nil { + return operation, nil, err + } + + return operation, &transaction, err + } + return operation, nil, err +} + +// ForAccount filters the operations collection to a specific account +func (q *OperationsQ) ForAccount(ctx context.Context, aid string) *OperationsQ { + var account Account + q.Err = q.parent.AccountByAddress(ctx, &account, aid) + if q.Err != nil { + return q + } + + q.sql = q.sql.Join( + "history_operation_participants hopp ON "+ + "hopp.history_operation_id = hop.id", + ).Where("hopp.history_account_id = ?", account.ID) + + // in order to use history_operation_participants.hist_op_p_id index + q.opIdCol = "hopp.history_operation_id" + + return q +} + +// ForClaimableBalance filters the query to only operations pertaining to a +// claimable balance, specified by the claimable balance's hex-encoded id. +func (q *OperationsQ) ForClaimableBalance(ctx context.Context, cbID string) *OperationsQ { + var hCB HistoryClaimableBalance + hCB, q.Err = q.parent.ClaimableBalanceByID(ctx, cbID) + if q.Err != nil { + return q + } + + q.sql = q.sql.Join( + "history_operation_claimable_balances hocb ON "+ + "hocb.history_operation_id = hop.id", + ).Where("hocb.history_claimable_balance_id = ?", hCB.InternalID) + + // in order to use hocb.history_operation_id index + q.opIdCol = "hocb.history_operation_id" + + return q +} + +// ForLiquidityPools filters the query to only operations pertaining to a +// liquidity pool, specified by the liquidity pool id as an hex-encoded string. +func (q *OperationsQ) ForLiquidityPool(ctx context.Context, lpID string) *OperationsQ { + var hLP HistoryLiquidityPool + hLP, q.Err = q.parent.LiquidityPoolByID(ctx, lpID) + if q.Err != nil { + return q + } + + q.sql = q.sql.Join( + "history_operation_liquidity_pools holp ON "+ + "holp.history_operation_id = hop.id", + ).Where("holp.history_liquidity_pool_id = ?", hLP.InternalID) + + // in order to use holp.history_operation_id index + q.opIdCol = "holp.history_operation_id" + + return q +} + +// ForLedger filters the query to a only operations in a specific ledger, +// specified by its sequence. +func (q *OperationsQ) ForLedger(ctx context.Context, seq int32) *OperationsQ { + var ledger Ledger + q.Err = q.parent.LedgerBySequence(ctx, &ledger, seq) + if q.Err != nil { + return q + } + + start := toid.ID{LedgerSequence: seq} + end := toid.ID{LedgerSequence: seq + 1} + q.sql = q.sql.Where( + "hop.id >= ? AND hop.id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// ForTransaction filters the query to only operations in a specific +// transaction, specified by the transactions's hex-encoded hash. +func (q *OperationsQ) ForTransaction(ctx context.Context, hash string) *OperationsQ { + var tx Transaction + q.Err = q.parent.TransactionByHash(ctx, &tx, hash) + if q.Err != nil { + return q + } + + start := toid.Parse(tx.ID) + end := start + end.TransactionOrder++ + q.sql = q.sql.Where( + "hop.id >= ? AND hop.id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// OnlyPayments filters the query being built to only include operations that +// are in the "payment" class of operations: CreateAccountOps, Payments, and +// PathPayments. +func (q *OperationsQ) OnlyPayments() *OperationsQ { + q.sql = q.sql.Where(sq.Eq{"hop.type": []xdr.OperationType{ + xdr.OperationTypeCreateAccount, + xdr.OperationTypePayment, + xdr.OperationTypePathPaymentStrictReceive, + xdr.OperationTypePathPaymentStrictSend, + xdr.OperationTypeAccountMerge, + }}) + return q +} + +// IncludeFailed changes the query to include failed transactions. +func (q *OperationsQ) IncludeFailed() *OperationsQ { + q.includeFailed = true + return q +} + +// IncludeTransactions changes the query to fetch transaction data in addition to operation records. +func (q *OperationsQ) IncludeTransactions() *OperationsQ { + q.includeTransactions = true + return q +} + +// Page specifies the paging constraints for the query being built by `q`. +func (q *OperationsQ) Page(page db2.PageQuery) *OperationsQ { + if q.Err != nil { + return q + } + + q.sql, q.Err = page.ApplyTo(q.sql, q.opIdCol) + return q +} + +// Fetch returns results specified by a filtered operations query +func (q *OperationsQ) Fetch(ctx context.Context) ([]Operation, []Transaction, error) { + if q.Err != nil { + return nil, nil, q.Err + } + + if !q.includeFailed { + q.sql = q.sql. + Where("(ht.successful = true OR ht.successful IS NULL)") + } + + var operations []Operation + var transactions []Transaction + q.Err = q.parent.Select(ctx, &operations, q.sql) + if q.Err != nil { + return nil, nil, q.Err + } + set := map[int64]bool{} + transactionIDs := []int64{} + + for _, o := range operations { + var resultXDR xdr.TransactionResult + err := xdr.SafeUnmarshalBase64(o.TxResult, &resultXDR) + if err != nil { + return nil, nil, err + } + + if !set[o.TransactionID] { + set[o.TransactionID] = true + transactionIDs = append(transactionIDs, o.TransactionID) + } + + if !q.includeFailed { + if !o.TransactionSuccessful { + return nil, nil, errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s", o.TransactionHash) + } + + if !resultXDR.Successful() { + return nil, nil, errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s %s", o.TransactionHash, o.TxResult) + } + } + + // Check if `successful` equals resultXDR + if o.TransactionSuccessful && !resultXDR.Successful() { + return nil, nil, errors.Errorf("Corrupted data! `successful=true` but returned transaction is not success: %s %s", o.TransactionHash, o.TxResult) + } + + if !o.TransactionSuccessful && resultXDR.Successful() { + return nil, nil, errors.Errorf("Corrupted data! `successful=false` but returned transaction is success: %s %s", o.TransactionHash, o.TxResult) + } + } + + if q.includeTransactions && len(transactionIDs) > 0 { + transactionsByID, err := q.parent.TransactionsByIDs(ctx, transactionIDs...) + if err != nil { + return nil, nil, err + } + for _, o := range operations { + transaction, ok := transactionsByID[o.TransactionID] + if !ok { + return nil, nil, errors.Errorf("transaction with id %v could not be found", o.TransactionID) + } + err = validateTransactionForOperation(transaction, o) + if err != nil { + return nil, nil, err + } + + transactions = append(transactions, transaction) + } + } + + return operations, transactions, nil +} + +func validateTransactionForOperation(transaction Transaction, operation Operation) error { + if transaction.ID != operation.TransactionID { + return errors.Errorf( + "transaction id %v does not match transaction id in operation %v", + transaction.ID, + operation.TransactionID, + ) + } + if transaction.TransactionHash != operation.TransactionHash { + return errors.Errorf( + "transaction hash %v does not match transaction hash in operation %v", + transaction.TransactionHash, + operation.TransactionHash, + ) + } + if transaction.TxResult != operation.TxResult { + return errors.Errorf( + "transaction result %v does not match transaction result in operation %v", + transaction.TxResult, + operation.TxResult, + ) + } + if transaction.Successful != operation.TransactionSuccessful { + return errors.Errorf( + "transaction successful flag %v does not match transaction successful flag in operation %v", + transaction.Successful, + operation.TransactionSuccessful, + ) + } + + return nil +} + +// QOperations defines history_operation related queries. +type QOperations interface { + NewOperationBatchInsertBuilder(maxBatchSize int) OperationBatchInsertBuilder +} + +var selectOperation = sq.Select( + "hop.id, " + + "hop.transaction_id, " + + "hop.application_order, " + + "hop.type, " + + "hop.details, " + + "hop.source_account, " + + "hop.source_account_muxed, " + + "ht.transaction_hash, " + + "ht.tx_result, " + + "COALESCE(ht.successful, true) as transaction_successful"). + From("history_operations hop"). + LeftJoin("history_transactions ht ON ht.id = hop.transaction_id") diff --git a/services/horizon/internal/db2/history/operation_batch_insert_builder.go b/services/horizon/internal/db2/history/operation_batch_insert_builder.go new file mode 100644 index 0000000000..a3baee8863 --- /dev/null +++ b/services/horizon/internal/db2/history/operation_batch_insert_builder.go @@ -0,0 +1,67 @@ +package history + +import ( + "context" + + "github.com/guregu/null" + "github.com/stellar/go/support/db" + "github.com/stellar/go/xdr" +) + +// OperationBatchInsertBuilder is used to insert a transaction's operations into the +// history_operations table +type OperationBatchInsertBuilder interface { + Add( + ctx context.Context, + id int64, + transactionID int64, + applicationOrder uint32, + operationType xdr.OperationType, + details []byte, + sourceAccount string, + sourceAcccountMuxed null.String, + ) error + Exec(ctx context.Context) error +} + +// operationBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type operationBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +// NewOperationBatchInsertBuilder constructs a new TransactionBatchInsertBuilder instance +func (q *Q) NewOperationBatchInsertBuilder(maxBatchSize int) OperationBatchInsertBuilder { + return &operationBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_operations"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a transaction's operations to the batch +func (i *operationBatchInsertBuilder) Add( + ctx context.Context, + id int64, + transactionID int64, + applicationOrder uint32, + operationType xdr.OperationType, + details []byte, + sourceAccount string, + sourceAccountMuxed null.String, +) error { + return i.builder.Row(ctx, map[string]interface{}{ + "id": id, + "transaction_id": transactionID, + "application_order": applicationOrder, + "type": operationType, + "details": details, + "source_account": sourceAccount, + "source_account_muxed": sourceAccountMuxed, + }) + +} + +func (i *operationBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/operation_batch_insert_builder_test.go b/services/horizon/internal/db2/history/operation_batch_insert_builder_test.go new file mode 100644 index 0000000000..e5f2206f64 --- /dev/null +++ b/services/horizon/internal/db2/history/operation_batch_insert_builder_test.go @@ -0,0 +1,86 @@ +package history + +import ( + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" +) + +func TestAddOperation(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + txBatch := q.NewTransactionBatchInsertBuilder(0) + + builder := q.NewOperationBatchInsertBuilder(1) + + transactionHash := "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c" + transactionResult := "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=" + transaction := buildLedgerTransaction( + t, + testTransaction{ + index: 1, + envelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + resultXDR: transactionResult, + metaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: transactionHash, + }, + ) + + sequence := int32(56) + tt.Assert.NoError(txBatch.Add(tt.Ctx, transaction, uint32(sequence))) + tt.Assert.NoError(txBatch.Exec(tt.Ctx)) + + details, err := json.Marshal(map[string]string{ + "to": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + "from": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "amount": "10.0000000", + "asset_type": "native", + }) + tt.Assert.NoError(err) + + sourceAccount := "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY" + sourceAccountMuxed := "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + err = builder.Add(tt.Ctx, + toid.New(sequence, 1, 1).ToInt64(), + toid.New(sequence, 1, 0).ToInt64(), + 1, + xdr.OperationTypePayment, + details, + sourceAccount, + null.StringFrom(sourceAccountMuxed), + ) + tt.Assert.NoError(err) + + err = builder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + ops := []Operation{} + err = q.Select(tt.Ctx, &ops, selectOperation) + + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 1) + + op := ops[0] + tt.Assert.Equal(int64(240518172673), op.ID) + tt.Assert.Equal(int64(240518172672), op.TransactionID) + tt.Assert.Equal(transactionHash, op.TransactionHash) + tt.Assert.Equal(transactionResult, op.TxResult) + tt.Assert.Equal(int32(1), op.ApplicationOrder) + tt.Assert.Equal(xdr.OperationTypePayment, op.Type) + tt.Assert.Equal( + "{\"to\": \"GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y\", \"from\": \"GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY\", \"amount\": \"10.0000000\", \"asset_type\": \"native\"}", + op.DetailsString.String, + ) + tt.Assert.Equal(sourceAccount, op.SourceAccount) + tt.Assert.Equal(sourceAccountMuxed, op.SourceAccountMuxed.String) + tt.Assert.Equal(true, op.TransactionSuccessful) + } +} diff --git a/services/horizon/internal/db2/history/operation_participant_batch_insert_builder.go b/services/horizon/internal/db2/history/operation_participant_batch_insert_builder.go new file mode 100644 index 0000000000..6b2b3afb56 --- /dev/null +++ b/services/horizon/internal/db2/history/operation_participant_batch_insert_builder.go @@ -0,0 +1,49 @@ +package history + +import ( + "context" + + "github.com/stellar/go/support/db" +) + +// OperationParticipantBatchInsertBuilder is used to insert a transaction's operations into the +// history_operations table +type OperationParticipantBatchInsertBuilder interface { + Add( + ctx context.Context, + operationID int64, + accountID int64, + ) error + Exec(ctx context.Context) error +} + +// operationParticipantBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type operationParticipantBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +// NewOperationParticipantBatchInsertBuilder constructs a new TransactionBatchInsertBuilder instance +func (q *Q) NewOperationParticipantBatchInsertBuilder(maxBatchSize int) OperationParticipantBatchInsertBuilder { + return &operationParticipantBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_operation_participants"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds an operation participant to the batch +func (i *operationParticipantBatchInsertBuilder) Add( + ctx context.Context, + operationID int64, + accountID int64, +) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_operation_id": operationID, + "history_account_id": accountID, + }) +} + +func (i *operationParticipantBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/operation_participant_batch_insert_builder_test.go b/services/horizon/internal/db2/history/operation_participant_batch_insert_builder_test.go new file mode 100644 index 0000000000..51a0c4800d --- /dev/null +++ b/services/horizon/internal/db2/history/operation_participant_batch_insert_builder_test.go @@ -0,0 +1,42 @@ +package history + +import ( + "testing" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestAddOperationParticipants(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + builder := q.NewOperationParticipantBatchInsertBuilder(1) + err := builder.Add(tt.Ctx, 240518172673, 1) + tt.Assert.NoError(err) + + err = builder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + type hop struct { + OperationID int64 `db:"history_operation_id"` + AccountID int64 `db:"history_account_id"` + } + + ops := []hop{} + err = q.Select(tt.Ctx, &ops, sq.Select( + "hopp.history_operation_id, "+ + "hopp.history_account_id"). + From("history_operation_participants hopp"), + ) + + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 1) + + op := ops[0] + tt.Assert.Equal(int64(240518172673), op.OperationID) + tt.Assert.Equal(int64(1), op.AccountID) + } +} diff --git a/services/horizon/internal/db2/history/operation_test.go b/services/horizon/internal/db2/history/operation_test.go new file mode 100644 index 0000000000..4163e02665 --- /dev/null +++ b/services/horizon/internal/db2/history/operation_test.go @@ -0,0 +1,502 @@ +package history + +import ( + "testing" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +func TestOperationQueries(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + // Test OperationByID + op, transaction, err := q.OperationByID(tt.Ctx, false, 8589938689) + if tt.Assert.NoError(err) { + tt.Assert.Equal(int64(8589938689), op.ID) + } + tt.Assert.Nil(transaction) + + // Test Operations() + ops, transactions, err := q.Operations(). + ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"). + Fetch(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 2) + } + tt.Assert.Len(transactions, 0) + + // ledger filter works + ops, transactions, err = q.Operations().ForLedger(tt.Ctx, 2).Fetch(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 3) + } + tt.Assert.Len(transactions, 0) + + // tx filter works + hash := "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d" + ops, transactions, err = q.Operations().ForTransaction(tt.Ctx, hash).Fetch(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 1) + } + tt.Assert.Len(transactions, 0) + + // payment filter works + tt.Scenario("pathed_payment") + ops, transactions, err = q.Operations().OnlyPayments().Fetch(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 10) + } + tt.Assert.Len(transactions, 0) + + // payment filter includes account merges + tt.Scenario("account_merge") + ops, transactions, err = q.Operations().OnlyPayments().Fetch(tt.Ctx) + if tt.Assert.NoError(err) { + tt.Assert.Len(ops, 3) + } + tt.Assert.Len(transactions, 0) +} + +func TestOperationByLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + txIndex := int32(1) + sequence := int32(56) + txID := toid.New(sequence, txIndex, 0).ToInt64() + opID1 := toid.New(sequence, txIndex, 1).ToInt64() + opID2 := toid.New(sequence, txIndex, 2).ToInt64() + + // Insert a phony transaction + transactionBuilder := q.NewTransactionBatchInsertBuilder(2) + firstTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: uint32(txIndex), + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }) + err := transactionBuilder.Add(tt.Ctx, firstTransaction, uint32(sequence)) + tt.Assert.NoError(err) + err = transactionBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + // Insert a two phony operations + operationBuilder := q.NewOperationBatchInsertBuilder(2) + err = operationBuilder.Add( + tt.Ctx, + opID1, + txID, + 1, + xdr.OperationTypeEndSponsoringFutureReserves, + []byte("{}"), + "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + null.String{}, + ) + tt.Assert.NoError(err) + err = operationBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + err = operationBuilder.Add( + tt.Ctx, + opID2, + txID, + 1, + xdr.OperationTypeEndSponsoringFutureReserves, + []byte("{}"), + "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + null.String{}, + ) + tt.Assert.NoError(err) + err = operationBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + // Insert Liquidity Pool history + liquidityPoolID := "a2f38836a839de008cf1d782c81f45e1253cc5d3dad9110b872965484fec0a49" + toInternalID, err := q.CreateHistoryLiquidityPools(tt.Ctx, []string{liquidityPoolID}, 2) + tt.Assert.NoError(err) + lpOperationBuilder := q.NewOperationLiquidityPoolBatchInsertBuilder(3) + tt.Assert.NoError(err) + internalID, ok := toInternalID[liquidityPoolID] + tt.Assert.True(ok) + err = lpOperationBuilder.Add(tt.Ctx, opID1, internalID) + tt.Assert.NoError(err) + err = lpOperationBuilder.Add(tt.Ctx, opID2, internalID) + tt.Assert.NoError(err) + err = lpOperationBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + // Check ascending order + pq := db2.PageQuery{ + Cursor: "", + Order: "asc", + Limit: 2, + } + ops, _, err := q.Operations().ForLiquidityPool(tt.Ctx, liquidityPoolID).Page(pq).Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(ops, 2) + tt.Assert.Equal(ops[0].ID, opID1) + tt.Assert.Equal(ops[1].ID, opID2) + + // Check descending order + pq.Order = "desc" + ops, _, err = q.Operations().ForLiquidityPool(tt.Ctx, liquidityPoolID).Page(pq).Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(ops, 2) + tt.Assert.Equal(ops[0].ID, opID2) + tt.Assert.Equal(ops[1].ID, opID1) +} + +func TestFeeStats(t *testing.T) { + log.DefaultLogger.SetLevel(log.DebugLevel) + tt := test.Start(t) + tt.Scenario("operation_fee_stats_1") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + feeStats := &FeeStats{} + + err := q.FeeStats(tt.Ctx, 7, feeStats) + tt.Assert.NoError(err) + tt.Assert.Equal(feeStats.FeeChargedMax, null.NewInt(100, true)) +} + +func TestOperationQueryBuilder(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + opsQ := q.Operations().ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON").Page(db2.PageQuery{Cursor: "8589938689", Order: "asc", Limit: 10}) + tt.Assert.NoError(opsQ.Err) + got, _, err := opsQ.sql.ToSql() + tt.Assert.NoError(err) + + // Operations for account queries will use hopp.history_operation_id in their predicates. + want := "SELECT hop.id, hop.transaction_id, hop.application_order, hop.type, hop.details, hop.source_account, hop.source_account_muxed, ht.transaction_hash, ht.tx_result, COALESCE(ht.successful, true) as transaction_successful FROM history_operations hop LEFT JOIN history_transactions ht ON ht.id = hop.transaction_id JOIN history_operation_participants hopp ON hopp.history_operation_id = hop.id WHERE hopp.history_account_id = ? AND hopp.history_operation_id > ? ORDER BY hopp.history_operation_id asc LIMIT 10" + tt.Assert.EqualValues(want, got) + + opsQ = q.Operations().ForLedger(tt.Ctx, 2).Page(db2.PageQuery{Cursor: "8589938689", Order: "asc", Limit: 10}) + tt.Assert.NoError(opsQ.Err) + got, _, err = opsQ.sql.ToSql() + tt.Assert.NoError(err) + + // Other operation queries will use hop.id in their predicates. + want = "SELECT hop.id, hop.transaction_id, hop.application_order, hop.type, hop.details, hop.source_account, hop.source_account_muxed, ht.transaction_hash, ht.tx_result, COALESCE(ht.successful, true) as transaction_successful FROM history_operations hop LEFT JOIN history_transactions ht ON ht.id = hop.transaction_id WHERE hop.id >= ? AND hop.id < ? AND hop.id > ? ORDER BY hop.id asc LIMIT 10" + tt.Assert.EqualValues(want, got) +} + +// TestOperationSuccessfulOnly tests if default query returns operations in +// successful transactions only. +// If it's not enclosed in brackets, it may return incorrect result when mixed +// with `ForAccount` or `ForLedger` filters. +func TestOperationSuccessfulOnly(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + + operations, transactions, err := query.Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(transactions, 0) + + tt.Assert.Equal(3, len(operations)) + + for _, operation := range operations { + tt.Assert.True(operation.TransactionSuccessful) + } + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + // Note: brackets around `(ht.successful = true OR ht.successful IS NULL)` are critical! + tt.Assert.Contains(sql, "WHERE hopp.history_account_id = ? AND (ht.successful = true OR ht.successful IS NULL)") +} + +// TestOperationIncludeFailed tests `IncludeFailed` method. +func TestOperationIncludeFailed(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"). + IncludeFailed() + + operations, transactions, err := query.Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(transactions, 0) + + var failed, successful int + for _, operation := range operations { + if operation.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + tt.Assert.Equal(3, successful) + tt.Assert.Equal(1, failed) + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + tt.Assert.Equal("SELECT hop.id, hop.transaction_id, hop.application_order, hop.type, hop.details, hop.source_account, hop.source_account_muxed, ht.transaction_hash, ht.tx_result, COALESCE(ht.successful, true) as transaction_successful FROM history_operations hop LEFT JOIN history_transactions ht ON ht.id = hop.transaction_id JOIN history_operation_participants hopp ON hopp.history_operation_id = hop.id WHERE hopp.history_account_id = ?", sql) +} + +// TestPaymentsSuccessfulOnly tests if default query returns payments in +// successful transactions only. +// If it's not enclosed in brackets, it may return incorrect result when mixed +// with `ForAccount` or `ForLedger` filters. +func TestPaymentsSuccessfulOnly(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + OnlyPayments(). + ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON") + + operations, transactions, err := query.Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(transactions, 0) + + tt.Assert.Equal(2, len(operations)) + + for _, operation := range operations { + tt.Assert.True(operation.TransactionSuccessful) + } + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + // Note: brackets around `(ht.successful = true OR ht.successful IS NULL)` are critical! + tt.Assert.Contains(sql, "WHERE hop.type IN (?,?,?,?,?) AND hopp.history_account_id = ? AND (ht.successful = true OR ht.successful IS NULL)") +} + +// TestPaymentsIncludeFailed tests `IncludeFailed` method. +func TestPaymentsIncludeFailed(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + OnlyPayments(). + ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"). + IncludeFailed() + + operations, transactions, err := query.Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(transactions, 0) + + var failed, successful int + for _, operation := range operations { + if operation.TransactionSuccessful { + successful++ + } else { + failed++ + } + } + + tt.Assert.Equal(2, successful) + tt.Assert.Equal(1, failed) + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + tt.Assert.Equal("SELECT hop.id, hop.transaction_id, hop.application_order, hop.type, hop.details, hop.source_account, hop.source_account_muxed, ht.transaction_hash, ht.tx_result, COALESCE(ht.successful, true) as transaction_successful FROM history_operations hop LEFT JOIN history_transactions ht ON ht.id = hop.transaction_id JOIN history_operation_participants hopp ON hopp.history_operation_id = hop.id WHERE hop.type IN (?,?,?,?,?) AND hopp.history_account_id = ?", sql) +} + +func TestExtraChecksOperationsTransactionSuccessfulTrueResultFalse(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + // successful `true` but tx result `false` + _, err := tt.HorizonDB.Exec( + `UPDATE history_transactions SET successful = true WHERE transaction_hash = 'aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf'`, + ) + tt.Require.NoError(err) + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"). + IncludeFailed() + + _, _, err = query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.Contains(err.Error(), "Corrupted data! `successful=true` but returned transaction is not success") +} + +func TestExtraChecksOperationsTransactionSuccessfulFalseResultTrue(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + // successful `false` but tx result `true` + _, err := tt.HorizonDB.Exec( + `UPDATE history_transactions SET successful = false WHERE transaction_hash = 'a2dabf4e9d1642722602272e178a37c973c9177b957da86192a99b3e9f3a9aa4'`, + ) + tt.Require.NoError(err) + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"). + IncludeFailed() + + _, _, err = query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.Contains(err.Error(), "Corrupted data! `successful=false` but returned transaction is success") +} + +func assertOperationMatchesTransaction(tt *test.T, operation Operation, transaction Transaction) { + tt.Assert.Equal(operation.TransactionID, transaction.ID) + tt.Assert.Equal(operation.TransactionHash, transaction.TransactionHash) + tt.Assert.Equal(operation.TxResult, transaction.TxResult) + tt.Assert.Equal(operation.TransactionSuccessful, transaction.Successful) +} + +// TestOperationIncludeTransactions tests that transactions are included when fetching records from the db. +func TestOperationIncludeTransactions(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + accountID := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2" + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + IncludeTransactions(). + ForAccount(tt.Ctx, accountID) + + operations, transactions, err := query.Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(transactions, 3) + tt.Assert.Len(transactions, len(operations)) + + for i := range operations { + operation := operations[i] + transaction := transactions[i] + assertOperationMatchesTransaction(tt, operation, transaction) + } + + withoutTransactionsQuery := (&Q{tt.HorizonSession()}).Operations(). + ForAccount(tt.Ctx, accountID) + + var expectedTransactions []Transaction + err = (&Q{tt.HorizonSession()}).Transactions().ForAccount(tt.Ctx, accountID).Select(tt.Ctx, &expectedTransactions) + tt.Assert.NoError(err) + + expectedOperations, _, err := withoutTransactionsQuery.Fetch(tt.Ctx) + tt.Assert.NoError(err) + + tt.Assert.Equal(operations, expectedOperations) + tt.Assert.Equal(transactions, expectedTransactions) + + op, transaction, err := q.OperationByID(tt.Ctx, true, expectedOperations[0].ID) + tt.Assert.NoError(err) + tt.Assert.Equal(op, expectedOperations[0]) + tt.Assert.Equal(*transaction, expectedTransactions[0]) + assertOperationMatchesTransaction(tt, op, *transaction) + + _, err = q.Exec(tt.Ctx, sq.Delete("history_transactions")) + tt.Assert.NoError(err) + _, _, err = q.OperationByID(tt.Ctx, true, 17179877377) + tt.Assert.Error(err) +} + +func TestValidateTransactionForOperation(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + selectTransactionCopy := selectTransaction + defer func() { + selectTransaction = selectTransactionCopy + tt.Finish() + }() + + selectTransaction = sq.Select( + "ht.transaction_hash, " + + "ht.tx_result, " + + "COALESCE(ht.successful, true) as successful"). + From("history_transactions ht") + + accountID := "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2" + + q := &Q{tt.HorizonSession()} + query := q.Operations(). + IncludeTransactions(). + ForAccount(tt.Ctx, accountID) + + _, _, err := query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction with id 17179877376 could not be found") + + _, _, err = q.OperationByID(tt.Ctx, true, 17179877377) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction id 0 does not match transaction id in operation 17179877376") + + selectTransaction = sq.Select( + "ht.id, " + + "ht.transaction_hash, " + + "COALESCE(ht.successful, true) as successful"). + From("history_transactions ht") + query = q.Operations(). + IncludeTransactions(). + ForAccount(tt.Ctx, accountID) + + _, _, err = query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction result does not match transaction result in operation AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=") + + _, _, err = q.OperationByID(tt.Ctx, true, 17179877377) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction result does not match transaction result in operation AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=") + + selectTransaction = sq.Select( + "ht.id, " + + "ht.tx_result, " + + "COALESCE(ht.successful, true) as successful"). + From("history_transactions ht") + query = q.Operations(). + IncludeTransactions(). + ForAccount(tt.Ctx, accountID) + + _, _, err = query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction hash does not match transaction hash in operation 1c454630267aa8767ec8c8e30450cea6ba660145e9c924abb75d7a6669b6c28a") + + _, _, err = q.OperationByID(tt.Ctx, true, 17179877377) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction hash does not match transaction hash in operation 1c454630267aa8767ec8c8e30450cea6ba660145e9c924abb75d7a6669b6c28a") + + selectTransaction = sq.Select( + "ht.id, " + + "ht.tx_result, " + + "ht.transaction_hash"). + From("history_transactions ht") + query = q.Operations(). + IncludeTransactions(). + ForAccount(tt.Ctx, accountID) + + _, _, err = query.Fetch(tt.Ctx) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction successful flag false does not match transaction successful flag in operation true") + + _, _, err = q.OperationByID(tt.Ctx, true, 17179877377) + tt.Assert.Error(err) + tt.Assert.EqualError(err, "transaction successful flag false does not match transaction successful flag in operation true") +} diff --git a/services/horizon/internal/db2/history/orderbook.go b/services/horizon/internal/db2/history/orderbook.go new file mode 100644 index 0000000000..3de321a4ac --- /dev/null +++ b/services/horizon/internal/db2/history/orderbook.go @@ -0,0 +1,159 @@ +package history + +import ( + "context" + "database/sql" + "math/big" + + "github.com/stellar/go/amount" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type priceLevel struct { + Type string `db:"type"` + Pricen int32 `db:"pricen"` + Priced int32 `db:"priced"` + Price float64 `db:"price"` +} + +type offerSummary struct { + Type string `db:"type"` + Amount string `db:"amount"` + Price float64 `db:"price"` +} + +// PriceLevel represents an aggregation of offers to trade at a certain +// price. +type PriceLevel struct { + Pricen int32 + Priced int32 + Pricef string + Amount string +} + +// OrderBookSummary is a summary of a set of offers for a given base and +// counter currency +type OrderBookSummary struct { + Asks []PriceLevel + Bids []PriceLevel +} + +// GetOrderBookSummary returns an OrderBookSummary for a given trading pair. +// GetOrderBookSummary should only be called in a repeatable read transaction. +func (q *Q) GetOrderBookSummary(ctx context.Context, sellingAsset, buyingAsset xdr.Asset, maxPriceLevels int) (OrderBookSummary, error) { + var result OrderBookSummary + + if tx := q.GetTx(); tx == nil { + return result, errors.New("cannot be called outside of a transaction") + } + if opts := q.GetTxOptions(); opts == nil || !opts.ReadOnly || opts.Isolation != sql.LevelRepeatableRead { + return result, errors.New("should only be called in a repeatable read transaction") + } + + selling, err := xdr.MarshalBase64(sellingAsset) + if err != nil { + return result, errors.Wrap(err, "cannot marshal selling asset") + } + buying, err := xdr.MarshalBase64(buyingAsset) + if err != nil { + return result, errors.Wrap(err, "cannot marshal Buying asset") + } + + var levels []priceLevel + // First, obtain the price fractions for each price level. + // In the next query, we'll sum the amounts for each price level. + // Finally, we will combine the results to produce a OrderBookSummary. + selectPriceLevels := ` + (SELECT DISTINCT ON (price) + 'ask' as type, pricen, priced, price + FROM offers + WHERE selling_asset = $1 AND buying_asset = $2 AND deleted = false + ORDER BY price ASC LIMIT $3) + UNION ALL + (SELECT DISTINCT ON (price) + 'bid' as type, pricen, priced, price + FROM offers + WHERE selling_asset = $2 AND buying_asset = $1 AND deleted = false + ORDER BY price ASC LIMIT $3) + ` + + var offers []offerSummary + // The SUM() value in postgres has type decimal which means it will + // handle values that exceed max int64 so we don't need to worry about + // overflows. + selectOfferSummaries := ` + ( + SELECT + 'ask' as type, co.price, SUM(co.amount) as amount + FROM offers co + WHERE selling_asset = $1 AND buying_asset = $2 AND deleted = false + GROUP BY co.price + ORDER BY co.price ASC + LIMIT $3 + ) UNION ALL ( + SELECT + 'bid' as type, co.price, SUM(co.amount) as amount + FROM offers co + WHERE selling_asset = $2 AND buying_asset = $1 AND deleted = false + GROUP BY co.price + ORDER BY co.price ASC + LIMIT $3 + ) + ` + // Add explicit query type for prometheus metrics, since we use raw sql. + ctx = context.WithValue(ctx, &db.QueryTypeContextKey, db.SelectQueryType) + err = q.SelectRaw(ctx, &levels, selectPriceLevels, selling, buying, maxPriceLevels) + if err != nil { + return result, errors.Wrap(err, "cannot select price levels") + } + + err = q.SelectRaw(ctx, &offers, selectOfferSummaries, selling, buying, maxPriceLevels) + if err != nil { + return result, errors.Wrap(err, "cannot select offer summaries") + } + + // we don't expect there to be any inconsistency between levels and offers because + // this function should only be invoked in a repeatable read transaction + if len(levels) != len(offers) { + return result, errors.Wrap(err, "price levels length does not match summaries length") + } + for i, level := range levels { + sum := offers[i] + if level.Type != sum.Type { + return result, errors.Wrap(err, "price level type does not match offer summary type") + } + if level.Price != sum.Price { + return result, errors.Wrap(err, "price level price does not match offer summary price") + } + // use big.Rat to get reduced fractions + priceFraction := big.NewRat(int64(level.Pricen), int64(level.Priced)) + if sum.Type == "bid" { + // only invert bids + if level.Pricen == 0 { + return result, errors.Wrap(err, "bid has price denominator equal to 0") + } + priceFraction = priceFraction.Inv(priceFraction) + } + + entry := PriceLevel{ + Pricef: priceFraction.FloatString(7), + Pricen: int32(priceFraction.Num().Int64()), + Priced: int32(priceFraction.Denom().Int64()), + } + entry.Amount, err = amount.IntStringToAmount(sum.Amount) + if err != nil { + return result, errors.Wrap(err, "could not determine summary amount") + } + if sum.Type == "ask" { + result.Asks = append(result.Asks, entry) + } else if sum.Type == "bid" { + result.Bids = append(result.Bids, entry) + } else { + return result, errors.Wrap(err, "invalid offer type") + } + } + + return result, nil +} diff --git a/services/horizon/internal/db2/history/orderbook_test.go b/services/horizon/internal/db2/history/orderbook_test.go new file mode 100644 index 0000000000..b265d50c86 --- /dev/null +++ b/services/horizon/internal/db2/history/orderbook_test.go @@ -0,0 +1,308 @@ +package history + +import ( + "database/sql" + "math" + "testing" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +func TestGetOrderBookSummaryRequiresTransaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + _, err := q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, 10) + assert.EqualError(t, err, "cannot be called outside of a transaction") + + assert.NoError(t, q.Begin()) + defer q.Rollback() + + _, err = q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, 10) + assert.EqualError(t, err, "should only be called in a repeatable read transaction") +} + +func TestGetOrderBookSummary(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + asksButNoBids := []Offer{twoEurOffer} + asksButNoBidsResponse := OrderBookSummary{ + Asks: []PriceLevel{ + { + Pricen: int32(twoEurOffer.Pricen), + Priced: int32(twoEurOffer.Priced), + Pricef: "2.0000000", + Amount: "0.0000500", + }, + }, + } + + sellEurOffer := Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(15), + + BuyingAsset: nativeAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + bidsButNoAsks := []Offer{sellEurOffer} + bidsButNoAsksResponse := OrderBookSummary{ + Bids: []PriceLevel{ + { + Pricen: int32(sellEurOffer.Priced), + Priced: int32(sellEurOffer.Pricen), + Pricef: "0.5000000", + Amount: "0.0000500", + }, + }, + } + + otherEurOffer := Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(6), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(math.MaxInt64), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + nonCanonicalPriceTwoEurOffer := Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(30), + + BuyingAsset: eurAsset, + SellingAsset: nativeAsset, + + Amount: int64(500), + Pricen: int32(2 * 15), + Priced: int32(1 * 15), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + otherSellEurOffer := Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(17), + + BuyingAsset: nativeAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(9), + Priced: int32(5), + Price: float64(9) / float64(5), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + fullOffers := []Offer{ + twoEurOffer, + otherEurOffer, + nonCanonicalPriceTwoEurOffer, + threeEurOffer, + sellEurOffer, + otherSellEurOffer, + } + + fullResponse := OrderBookSummary{ + Asks: []PriceLevel{ + { + Pricen: int32(twoEurOffer.Pricen), + Priced: int32(twoEurOffer.Priced), + Pricef: "2.0000000", + Amount: "922337203685.4776807", + }, + { + Pricen: int32(threeEurOffer.Pricen), + Priced: int32(threeEurOffer.Priced), + Pricef: "3.0000000", + Amount: "0.0000500", + }, + }, + Bids: []PriceLevel{ + { + Pricen: int32(otherSellEurOffer.Priced), + Priced: int32(otherSellEurOffer.Pricen), + Pricef: "0.5555556", + Amount: "0.0000500", + }, + { + Pricen: int32(sellEurOffer.Priced), + Priced: int32(sellEurOffer.Pricen), + Pricef: "0.5000000", + Amount: "0.0000500", + }, + }, + } + + limitResponse := OrderBookSummary{ + Asks: []PriceLevel{ + { + Pricen: int32(twoEurOffer.Pricen), + Priced: int32(twoEurOffer.Priced), + Pricef: "2.0000000", + Amount: "922337203685.4776807", + }, + }, + Bids: []PriceLevel{ + { + Pricen: int32(otherSellEurOffer.Priced), + Priced: int32(otherSellEurOffer.Pricen), + Pricef: "0.5555556", + Amount: "0.0000500", + }, + }, + } + + for _, testCase := range []struct { + name string + offers []Offer + limit int + expected OrderBookSummary + }{ + { + "empty orderbook", + []Offer{}, + 10, + OrderBookSummary{}, + }, + { + "orderbook with asks but no bids", + asksButNoBids, + 10, + asksButNoBidsResponse, + }, + { + "orderbook with bids but no asks", + bidsButNoAsks, + 10, + bidsButNoAsksResponse, + }, + { + "full orderbook", + fullOffers, + 10, + fullResponse, + }, + { + "limit summaries", + fullOffers, + 1, + limitResponse, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + assert.NoError(t, q.TruncateTables(tt.Ctx, []string{"offers"})) + assert.NoError(t, q.UpsertOffers(tt.Ctx, testCase.offers)) + + assert.NoError(t, q.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + })) + defer q.Rollback() + + result, err := q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, testCase.limit) + assert.NoError(t, err) + assert.Equal(t, testCase.expected, result) + }) + } +} + +func TestGetOrderBookSummaryExcludesRemovedOffers(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + sellEurOffer := Offer{ + SellerID: twoEurOfferSeller.Address(), + OfferID: int64(15), + + BuyingAsset: nativeAsset, + SellingAsset: eurAsset, + + Amount: int64(500), + Pricen: int32(2), + Priced: int32(1), + Price: float64(2), + Flags: 2, + LastModifiedLedger: uint32(1234), + } + + offers := []Offer{ + twoEurOffer, + threeEurOffer, + sellEurOffer, + } + + assert.NoError(t, q.UpsertOffers(tt.Ctx, offers)) + + assert.NoError(t, q.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + })) + + result, err := q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, 100) + assert.NoError(t, err) + assert.Len(t, result.Asks, 2) + assert.Len(t, result.Bids, 1) + + assert.NoError(t, q.Rollback()) + + var offersToDelete []Offer + for i, offer := range offers { + toDelete := offer + toDelete.Deleted = true + toDelete.LastModifiedLedger = uint32(i + 2) + offersToDelete = append(offersToDelete, toDelete) + } + assert.NoError(t, q.UpsertOffers(tt.Ctx, offersToDelete)) + + assert.NoError(t, q.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + })) + + result, err = q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, 100) + assert.NoError(t, err) + assert.Len(t, result.Asks, 0) + assert.Len(t, result.Bids, 0) + + assert.NoError(t, q.Rollback()) + + count, err := q.CompactOffers(tt.Ctx, 1000) + assert.NoError(t, err) + assert.Equal(t, int64(len(offers)), count) + + assert.NoError(t, q.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + })) + + result, err = q.GetOrderBookSummary(tt.Ctx, nativeAsset, eurAsset, 100) + assert.NoError(t, err) + assert.Len(t, result.Asks, 0) + assert.Len(t, result.Bids, 0) + + assert.NoError(t, q.Rollback()) +} diff --git a/services/horizon/internal/db2/history/participants.go b/services/horizon/internal/db2/history/participants.go new file mode 100644 index 0000000000..658e877f78 --- /dev/null +++ b/services/horizon/internal/db2/history/participants.go @@ -0,0 +1,48 @@ +package history + +import ( + "context" + + "github.com/stellar/go/support/db" +) + +// QParticipants defines ingestion participant related queries. +type QParticipants interface { + QCreateAccountsHistory + NewTransactionParticipantsBatchInsertBuilder(maxBatchSize int) TransactionParticipantsBatchInsertBuilder + NewOperationParticipantBatchInsertBuilder(maxBatchSize int) OperationParticipantBatchInsertBuilder +} + +// TransactionParticipantsBatchInsertBuilder is used to insert transaction participants into the +// history_transaction_participants table +type TransactionParticipantsBatchInsertBuilder interface { + Add(ctx context.Context, transactionID, accountID int64) error + Exec(ctx context.Context) error +} + +type transactionParticipantsBatchInsertBuilder struct { + builder db.BatchInsertBuilder +} + +// NewTransactionParticipantsBatchInsertBuilder constructs a new TransactionParticipantsBatchInsertBuilder instance +func (q *Q) NewTransactionParticipantsBatchInsertBuilder(maxBatchSize int) TransactionParticipantsBatchInsertBuilder { + return &transactionParticipantsBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_transaction_participants"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new transaction participant to the batch +func (i *transactionParticipantsBatchInsertBuilder) Add(ctx context.Context, transactionID, accountID int64) error { + return i.builder.Row(ctx, map[string]interface{}{ + "history_transaction_id": transactionID, + "history_account_id": accountID, + }) +} + +// Exec flushes all pending transaction participants to the db +func (i *transactionParticipantsBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} diff --git a/services/horizon/internal/db2/history/participants_test.go b/services/horizon/internal/db2/history/participants_test.go new file mode 100644 index 0000000000..ee37f2b833 --- /dev/null +++ b/services/horizon/internal/db2/history/participants_test.go @@ -0,0 +1,58 @@ +package history + +import ( + "testing" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/services/horizon/internal/test" +) + +type transactionParticipant struct { + TransactionID int64 `db:"history_transaction_id"` + AccountID int64 `db:"history_account_id"` +} + +func getTransactionParticipants(tt *test.T, q *Q) []transactionParticipant { + var participants []transactionParticipant + sql := sq.Select("history_transaction_id", "history_account_id"). + From("history_transaction_participants"). + OrderBy("(history_transaction_id, history_account_id) asc") + + err := q.Select(tt.Ctx, &participants, sql) + if err != nil { + tt.T.Fatal(err) + } + + return participants +} + +func TestTransactionParticipantsBatch(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + batch := q.NewTransactionParticipantsBatchInsertBuilder(0) + + transactionID := int64(1) + otherTransactionID := int64(2) + accountID := int64(100) + + for i := int64(0); i < 3; i++ { + tt.Assert.NoError(batch.Add(tt.Ctx, transactionID, accountID+i)) + } + + tt.Assert.NoError(batch.Add(tt.Ctx, otherTransactionID, accountID)) + tt.Assert.NoError(batch.Exec(tt.Ctx)) + + participants := getTransactionParticipants(tt, q) + tt.Assert.Equal( + []transactionParticipant{ + transactionParticipant{TransactionID: 1, AccountID: 100}, + transactionParticipant{TransactionID: 1, AccountID: 101}, + transactionParticipant{TransactionID: 1, AccountID: 102}, + transactionParticipant{TransactionID: 2, AccountID: 100}, + }, + participants, + ) +} diff --git a/services/horizon/internal/db2/history/sequence_provider.go b/services/horizon/internal/db2/history/sequence_provider.go new file mode 100644 index 0000000000..97f53e9253 --- /dev/null +++ b/services/horizon/internal/db2/history/sequence_provider.go @@ -0,0 +1,25 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/support/errors" +) + +func (q *Q) GetSequenceNumbers(ctx context.Context, addresses []string) (map[string]uint64, error) { + var accounts []AccountEntry + sql := sq.Select("account_id, sequence_number").From("accounts"). + Where(map[string]interface{}{"accounts.account_id": addresses}) + if err := q.Select(ctx, &accounts, sql); err != nil { + return nil, errors.Wrap(err, "could not query accounts") + } + + sequenceNumbers := map[string]uint64{} + for _, account := range accounts { + sequenceNumbers[account.AccountID] = uint64(account.SequenceNumber) + } + + return sequenceNumbers, nil +} diff --git a/services/horizon/internal/db2/history/sequence_provider_test.go b/services/horizon/internal/db2/history/sequence_provider_test.go new file mode 100644 index 0000000000..086c471be1 --- /dev/null +++ b/services/horizon/internal/db2/history/sequence_provider_test.go @@ -0,0 +1,43 @@ +package history + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" +) + +func TestSequenceProviderEmptyDB(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + addresses := []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + } + results, err := q.GetSequenceNumbers(tt.Ctx, addresses) + assert.NoError(t, err) + assert.Len(t, results, 0) +} + +func TestSequenceProviderGet(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertAccounts(tt.Ctx, []AccountEntry{account1, account2}) + assert.NoError(t, err) + + results, err := q.GetSequenceNumbers(tt.Ctx, []string{ + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + "GCT2NQM5KJJEF55NPMY444C6M6CA7T33HRNCMA6ZFBIIXKNCRO6J25K7", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + }) + assert.NoError(t, err) + assert.Len(t, results, 2) + assert.Equal(t, uint64(account1.SequenceNumber), results["GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"]) + assert.Equal(t, uint64(account2.SequenceNumber), results["GCT2NQM5KJJEF55NPMY444C6M6CA7T33HRNCMA6ZFBIIXKNCRO6J25K7"]) +} diff --git a/services/horizon/internal/db2/history/total_order_id.go b/services/horizon/internal/db2/history/total_order_id.go new file mode 100644 index 0000000000..50bf6d7ba6 --- /dev/null +++ b/services/horizon/internal/db2/history/total_order_id.go @@ -0,0 +1,10 @@ +package history + +import ( + "fmt" +) + +// PagingToken returns a cursor for this record +func (r *TotalOrderID) PagingToken() string { + return fmt.Sprintf("%d", r.ID) +} diff --git a/services/horizon/internal/db2/history/trade.go b/services/horizon/internal/db2/history/trade.go new file mode 100644 index 0000000000..1d1048cfe0 --- /dev/null +++ b/services/horizon/internal/db2/history/trade.go @@ -0,0 +1,355 @@ +package history + +import ( + "context" + "fmt" + "math" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// PagingToken returns a cursor for this trade +func (r *Trade) PagingToken() string { + return fmt.Sprintf("%d-%d", r.HistoryOperationID, r.Order) +} + +// HasPrice returns true if the trade has non-null price data +func (r *Trade) HasPrice() bool { + return r.PriceN.Valid && r.PriceD.Valid +} + +const ( + AllTrades = "all" + OrderbookTrades = "orderbook" + LiquidityPoolTrades = "liquidity_pool" +) + +type tradesQuery struct { + baseAsset *xdr.Asset + counterAsset *xdr.Asset + tradeType string + account string + liquidityPool string + offer int64 +} + +func (q *Q) GetTrades( + ctx context.Context, page db2.PageQuery, account string, tradeType string, +) ([]Trade, error) { + return q.getTrades(ctx, page, tradesQuery{ + account: account, + tradeType: tradeType, + }) +} + +func (q *Q) GetTradesForOffer( + ctx context.Context, page db2.PageQuery, offerID int64, +) ([]Trade, error) { + return q.getTrades(ctx, page, tradesQuery{ + offer: offerID, + tradeType: AllTrades, + }) +} + +func (q *Q) GetTradesForLiquidityPool( + ctx context.Context, page db2.PageQuery, poolID string, +) ([]Trade, error) { + return q.getTrades(ctx, page, tradesQuery{ + liquidityPool: poolID, + tradeType: AllTrades, + }) +} + +func (q *Q) GetTradesForAssets( + ctx context.Context, page db2.PageQuery, account, tradeType string, baseAsset, counterAsset xdr.Asset, +) ([]Trade, error) { + return q.getTrades(ctx, page, tradesQuery{ + account: account, + baseAsset: &baseAsset, + counterAsset: &counterAsset, + tradeType: tradeType, + }) +} + +type historyTradesQuery struct { + baseAssetID int64 + counterAssetID int64 + accountID int64 + offerID int64 + poolID int64 + orderPreserved bool + tradeType string +} + +func (q *Q) getTrades(ctx context.Context, page db2.PageQuery, query tradesQuery) ([]Trade, error) { + // Add explicit query type for prometheus metrics, since we use raw sql. + ctx = context.WithValue(ctx, &db.QueryTypeContextKey, db.SelectQueryType) + + internalTradesQuery, err := q.transformTradesQuery(ctx, query) + if err != nil { + return nil, errors.Wrap(err, "invalid trade query") + } + rawSQL, args, err := createTradesSQL(page, internalTradesQuery) + if err != nil { + return nil, errors.Wrap(err, "could not create trades sql query") + } + + var dest []Trade + if err = q.SelectRaw(ctx, &dest, rawSQL, args...); err != nil { + return nil, errors.Wrap(err, "could not select trades") + } + + return dest, nil +} + +func (q *Q) transformTradesQuery(ctx context.Context, query tradesQuery) (historyTradesQuery, error) { + internalQuery := historyTradesQuery{ + orderPreserved: true, + tradeType: query.tradeType, + offerID: query.offer, + } + + if query.account != "" { + var account Account + if err := q.AccountByAddress(ctx, &account, query.account); err != nil { + return internalQuery, errors.Wrap(err, "could not get account by address") + } + internalQuery.accountID = account.ID + } + + if query.baseAsset != nil { + var err error + internalQuery.baseAssetID, err = q.GetAssetID(ctx, *query.baseAsset) + if err != nil { + return internalQuery, errors.Wrap(err, "could not get base asset id") + } + + internalQuery.counterAssetID, err = q.GetAssetID(ctx, *query.counterAsset) + if err != nil { + return internalQuery, errors.Wrap(err, "could not get counter asset id") + } + internalQuery.orderPreserved, internalQuery.baseAssetID, internalQuery.counterAssetID = getCanonicalAssetOrder( + internalQuery.baseAssetID, internalQuery.counterAssetID, + ) + } + + if query.liquidityPool != "" { + historyPool, err := q.LiquidityPoolByID(ctx, query.liquidityPool) + if err != nil { + return internalQuery, errors.Wrap(err, "could not get pool id") + } + internalQuery.poolID = historyPool.InternalID + } + + return internalQuery, nil +} + +func createTradesSQL(page db2.PageQuery, query historyTradesQuery) (string, []interface{}, error) { + base := selectTradeFields + if !query.orderPreserved { + base = selectReverseTradeFields + } + sql := joinTradeAssets( + joinTradeLiquidityPools( + joinTradeAccounts( + base.From("history_trades htrd"), + "history_accounts", + ), + "history_liquidity_pools", + ), + "history_assets", + ) + + if query.baseAssetID != 0 { + sql = sql.Where(sq.Eq{"base_asset_id": query.baseAssetID, "counter_asset_id": query.counterAssetID}) + } + + switch query.tradeType { + case OrderbookTrades: + sql = sql.Where(sq.Eq{"htrd.trade_type": OrderbookTradeType}) + case LiquidityPoolTrades: + sql = sql.Where(sq.Eq{"htrd.trade_type": LiquidityPoolTradeType}) + case AllTrades: + default: + return "", nil, errors.Errorf("Invalid trade type: %v", query.tradeType) + } + + op, idx, err := page.CursorInt64Pair(db2.DefaultPairSep) + if err != nil { + return "", nil, errors.Wrap(err, "could not parse cursor") + } + + // constrain the second portion of the cursor pair to 32-bits + if idx > math.MaxInt32 { + idx = math.MaxInt32 + } + + if query.accountID != 0 || query.offerID != 0 || query.poolID != 0 { + // Construct UNION query + var firstSelect, secondSelect sq.SelectBuilder + switch { + case query.accountID != 0: + firstSelect = sql.Where("htrd.base_account_id = ?", query.accountID) + secondSelect = sql.Where("htrd.counter_account_id = ?", query.accountID) + case query.offerID != 0: + firstSelect = sql.Where("htrd.base_offer_id = ?", query.offerID) + secondSelect = sql.Where("htrd.counter_offer_id = ?", query.offerID) + case query.poolID != 0: + firstSelect = sql.Where("htrd.base_liquidity_pool_id = ?", query.poolID) + secondSelect = sql.Where("htrd.counter_liquidity_pool_id = ?", query.poolID) + } + + firstSelect = appendOrdering(firstSelect, op, idx, page.Order) + secondSelect = appendOrdering(secondSelect, op, idx, page.Order) + firstSQL, firstArgs, err := firstSelect.ToSql() + if err != nil { + return "", nil, errors.Wrap(err, "error building a firstSelect query") + } + secondSQL, secondArgs, err := secondSelect.ToSql() + if err != nil { + return "", nil, errors.Wrap(err, "error building a secondSelect query") + } + + rawSQL := fmt.Sprintf("(%s) UNION (%s) ", firstSQL, secondSQL) + args := append(firstArgs, secondArgs...) + // Order the final UNION: + switch page.Order { + case "asc": + rawSQL = rawSQL + `ORDER BY history_operation_id asc, "order" asc ` + case "desc": + rawSQL = rawSQL + `ORDER BY history_operation_id desc, "order" desc ` + default: + panic("Invalid order") + } + rawSQL = rawSQL + fmt.Sprintf("LIMIT %d", page.Limit) + return rawSQL, args, nil + } else { + sql = appendOrdering(sql, op, idx, page.Order) + sql = sql.Limit(page.Limit) + rawSQL, args, err := sql.ToSql() + if err != nil { + return "", nil, errors.Wrap(err, "error building sql query") + } + return rawSQL, args, nil + } +} + +func appendOrdering(sel sq.SelectBuilder, op, idx int64, order string) sq.SelectBuilder { + // NOTE: Remember to test the queries below with EXPLAIN / EXPLAIN ANALYZE + // before changing them. + // This condition is using multicolumn index and it's easy to write it in a way that + // DB will perform a full table scan. + switch order { + case "asc": + return sel. + Where(`( + htrd.history_operation_id >= ? + AND ( + htrd.history_operation_id > ? OR + (htrd.history_operation_id = ? AND htrd.order > ?) + ))`, op, op, op, idx). + OrderBy("htrd.history_operation_id asc, htrd.order asc") + case "desc": + return sel. + Where(`( + htrd.history_operation_id <= ? + AND ( + htrd.history_operation_id < ? OR + (htrd.history_operation_id = ? AND htrd.order < ?) + ))`, op, op, op, idx). + OrderBy("htrd.history_operation_id desc, htrd.order desc") + default: + panic("Invalid order") + } +} + +func joinTradeAccounts(selectBuilder sq.SelectBuilder, historyAccountsTable string) sq.SelectBuilder { + return selectBuilder. + LeftJoin(historyAccountsTable + " base_accounts ON base_account_id = base_accounts.id"). + LeftJoin(historyAccountsTable + " counter_accounts ON counter_account_id = counter_accounts.id") +} + +func joinTradeAssets(selectBuilder sq.SelectBuilder, historyAssetsTable string) sq.SelectBuilder { + return selectBuilder. + Join(historyAssetsTable + " base_assets ON base_asset_id = base_assets.id"). + Join(historyAssetsTable + " counter_assets ON counter_asset_id = counter_assets.id") +} + +func joinTradeLiquidityPools(selectBuilder sq.SelectBuilder, historyLiquidityPoolsTable string) sq.SelectBuilder { + return selectBuilder. + LeftJoin(historyLiquidityPoolsTable + " blp ON base_liquidity_pool_id = blp.id"). + LeftJoin(historyLiquidityPoolsTable + " clp ON counter_liquidity_pool_id = clp.id") +} + +var selectTradeFields = sq.Select( + "history_operation_id", + "htrd.\"order\"", + "htrd.ledger_closed_at", + "htrd.base_offer_id", + "base_accounts.address as base_account", + "base_assets.asset_type as base_asset_type", + "base_assets.asset_code as base_asset_code", + "base_assets.asset_issuer as base_asset_issuer", + "blp.liquidity_pool_id as base_liquidity_pool_id", + "htrd.base_amount", + "htrd.counter_offer_id", + "counter_accounts.address as counter_account", + "counter_assets.asset_type as counter_asset_type", + "counter_assets.asset_code as counter_asset_code", + "counter_assets.asset_issuer as counter_asset_issuer", + "clp.liquidity_pool_id as counter_liquidity_pool_id", + "htrd.counter_amount", + "liquidity_pool_fee", + "htrd.base_is_seller", + "htrd.price_n", + "htrd.price_d", + "htrd.trade_type", +) + +var selectReverseTradeFields = sq.Select( + "history_operation_id", + "htrd.\"order\"", + "htrd.ledger_closed_at", + "htrd.counter_offer_id as base_offer_id", + "counter_accounts.address as base_account", + "counter_assets.asset_type as base_asset_type", + "counter_assets.asset_code as base_asset_code", + "counter_assets.asset_issuer as base_asset_issuer", + "clp.liquidity_pool_id as base_liquidity_pool_id", + "htrd.counter_amount as base_amount", + "htrd.base_offer_id as counter_offer_id", + "base_accounts.address as counter_account", + "base_assets.asset_type as counter_asset_type", + "base_assets.asset_code as counter_asset_code", + "base_assets.asset_issuer as counter_asset_issuer", + "blp.liquidity_pool_id as counter_liquidity_pool_id", + "htrd.base_amount as counter_amount", + "liquidity_pool_fee", + "NOT(htrd.base_is_seller) as base_is_seller", + "htrd.price_d as price_n", + "htrd.price_n as price_d", + "htrd.trade_type", +) + +func getCanonicalAssetOrder( + assetId1 int64, assetId2 int64, +) (orderPreserved bool, baseAssetId int64, counterAssetId int64) { + if assetId1 < assetId2 { + return true, assetId1, assetId2 + } else { + return false, assetId2, assetId1 + } +} + +type QTrades interface { + QCreateAccountsHistory + NewTradeBatchInsertBuilder(maxBatchSize int) TradeBatchInsertBuilder + RebuildTradeAggregationBuckets(ctx context.Context, fromledger, toLedger uint32) error + CreateAssets(ctx context.Context, assets []xdr.Asset, maxBatchSize int) (map[string]Asset, error) + CreateHistoryLiquidityPools(ctx context.Context, poolIDs []string, batchSize int) (map[string]int64, error) +} diff --git a/services/horizon/internal/db2/history/trade_aggregation.go b/services/horizon/internal/db2/history/trade_aggregation.go new file mode 100644 index 0000000000..20efbcd688 --- /dev/null +++ b/services/horizon/internal/db2/history/trade_aggregation.go @@ -0,0 +1,317 @@ +package history + +import ( + "context" + "fmt" + "time" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + strtime "github.com/stellar/go/support/time" +) + +// AllowedResolutions is the set of trade aggregation time windows allowed to be used as the +// `resolution` parameter. +var AllowedResolutions = map[time.Duration]struct{}{ + time.Minute: {}, //1 minute + time.Minute * 5: {}, //5 minutes + time.Minute * 15: {}, //15 minutes + time.Hour: {}, //1 hour + time.Hour * 24: {}, //day + time.Hour * 24 * 7: {}, //week +} + +// StrictResolutionFiltering represents a simple feature flag to determine whether only +// predetermined resolutions of trade aggregations are allowed. +var StrictResolutionFiltering = true + +// TradeAggregation represents an aggregation of trades from the trades table +type TradeAggregation struct { + Timestamp int64 `db:"timestamp"` + TradeCount int64 `db:"count"` + BaseVolume string `db:"base_volume"` + CounterVolume string `db:"counter_volume"` + Average float64 `db:"avg"` + HighN int64 `db:"high_n"` + HighD int64 `db:"high_d"` + LowN int64 `db:"low_n"` + LowD int64 `db:"low_d"` + OpenN int64 `db:"open_n"` + OpenD int64 `db:"open_d"` + CloseN int64 `db:"close_n"` + CloseD int64 `db:"close_d"` +} + +// TradeAggregationsQ is a helper struct to aid in configuring queries to +// bucket and aggregate trades +type TradeAggregationsQ struct { + baseAssetID int64 + counterAssetID int64 + resolution int64 + offset int64 + startTime strtime.Millis + endTime strtime.Millis + pagingParams db2.PageQuery +} + +// GetTradeAggregationsQ initializes a TradeAggregationsQ query builder based on the required parameters +func (q Q) GetTradeAggregationsQ(baseAssetID int64, counterAssetID int64, resolution int64, + offset int64, pagingParams db2.PageQuery) (*TradeAggregationsQ, error) { + + //convert resolution to a duration struct + resolutionDuration := time.Duration(resolution) * time.Millisecond + offsetDuration := time.Duration(offset) * time.Millisecond + + //check if resolution allowed + if StrictResolutionFiltering { + if _, ok := AllowedResolutions[resolutionDuration]; !ok { + return &TradeAggregationsQ{}, errors.New("resolution is not allowed") + } + } + // check if offset is allowed. Offset must be 1) a multiple of an hour 2) less than the resolution and 3) + // less than 24 hours + if offsetDuration%time.Hour != 0 || offsetDuration >= time.Hour*24 || offsetDuration > resolutionDuration { + return &TradeAggregationsQ{}, errors.New("offset is not allowed.") + } + + return &TradeAggregationsQ{ + baseAssetID: baseAssetID, + counterAssetID: counterAssetID, + resolution: resolution, + offset: offset, + pagingParams: pagingParams, + }, nil +} + +// WithStartTime adds an optional lower time boundary filter to the trades being aggregated. +func (q *TradeAggregationsQ) WithStartTime(startTime strtime.Millis) (*TradeAggregationsQ, error) { + offsetMillis := strtime.MillisFromInt64(q.offset) + var adjustedStartTime strtime.Millis + // Round up to offset if the provided start time is less than the offset. + if startTime < offsetMillis { + adjustedStartTime = offsetMillis + } else { + adjustedStartTime = (startTime - offsetMillis).RoundUp(q.resolution) + offsetMillis + } + if !q.endTime.IsNil() && adjustedStartTime > q.endTime { + return &TradeAggregationsQ{}, errors.New("start time is not allowed") + } else { + q.startTime = adjustedStartTime + return q, nil + } +} + +// WithEndTime adds an upper optional time boundary filter to the trades being aggregated. +func (q *TradeAggregationsQ) WithEndTime(endTime strtime.Millis) (*TradeAggregationsQ, error) { + // Round upper boundary down, to not deliver partial bucket + offsetMillis := strtime.MillisFromInt64(q.offset) + var adjustedEndTime strtime.Millis + // the end time isn't allowed to be less than the offset + if endTime < offsetMillis { + return &TradeAggregationsQ{}, errors.New("end time is not allowed") + } else { + adjustedEndTime = (endTime - offsetMillis).RoundDown(q.resolution) + offsetMillis + } + if adjustedEndTime < q.startTime { + return &TradeAggregationsQ{}, errors.New("end time is not allowed") + } else { + q.endTime = adjustedEndTime + return q, nil + } +} + +// GetSql generates a sql statement to aggregate Trades based on given parameters +func (q *TradeAggregationsQ) GetSql() sq.SelectBuilder { + var orderPreserved bool + orderPreserved, q.baseAssetID, q.counterAssetID = getCanonicalAssetOrder(q.baseAssetID, q.counterAssetID) + + var bucketSQL sq.SelectBuilder + if orderPreserved { + bucketSQL = bucketTrades(q.resolution, q.offset) + } else { + bucketSQL = reverseBucketTrades(q.resolution, q.offset) + } + + bucketSQL = bucketSQL.From("history_trades_60000"). + Where(sq.Eq{"base_asset_id": q.baseAssetID, "counter_asset_id": q.counterAssetID}) + + //adjust time range and apply time filters + bucketSQL = bucketSQL.Where(sq.GtOrEq{"timestamp": q.startTime}) + if !q.endTime.IsNil() { + bucketSQL = bucketSQL.Where(sq.Lt{"timestamp": q.endTime}) + } + + if q.resolution != 60000 { + //ensure open/close order for cases when multiple trades occur in the same ledger + bucketSQL = bucketSQL.OrderBy("timestamp ASC", "open_ledger_toid ASC") + // Do on-the-fly aggregation for higher resolutions. + bucketSQL = aggregate(bucketSQL) + } + + return bucketSQL. + Limit(q.pagingParams.Limit). + OrderBy("timestamp " + q.pagingParams.Order) +} + +// formatBucketTimestampSelect formats a sql select clause for a bucketed timestamp, based on given resolution +// and the offset. Given a time t, it gives it a timestamp defined by +// f(t) = ((t - offset)/resolution)*resolution + offset. +func formatBucketTimestampSelect(resolution int64, offset int64) string { + return fmt.Sprintf("((timestamp - %d) / %d) * %d + %d as timestamp", offset, resolution, resolution, offset) +} + +// bucketTrades generates a select statement to filter rows from the `history_trades` table in +// a compact form, with a timestamp rounded to resolution and reversed base/counter. +func bucketTrades(resolution int64, offset int64) sq.SelectBuilder { + return sq.Select( + formatBucketTimestampSelect(resolution, offset), + "count", + "base_volume", + "counter_volume", + "avg", + "high_n", + "high_d", + "low_n", + "low_d", + "open_n", + "open_d", + "close_n", + "close_d", + ) +} + +// reverseBucketTrades generates a select statement to filter rows from the `history_trades` table in +// a compact form, with a timestamp rounded to resolution and reversed base/counter. +func reverseBucketTrades(resolution int64, offset int64) sq.SelectBuilder { + return sq.Select( + formatBucketTimestampSelect(resolution, offset), + "count", + "base_volume as counter_volume", + "counter_volume as base_volume", + "(base_volume::numeric/counter_volume::numeric) as avg", + "low_n as high_d", + "low_d as high_n", + "high_n as low_d", + "high_d as low_n", + "open_n as open_d", + "open_d as open_n", + "close_n as close_d", + "close_d as close_n", + ) +} + +func aggregate(query sq.SelectBuilder) sq.SelectBuilder { + return sq.Select( + "timestamp", + "sum(\"count\") as count", + "sum(base_volume) as base_volume", + "sum(counter_volume) as counter_volume", + "sum(counter_volume::numeric)/sum(base_volume::numeric) as avg", + "(max_price(ARRAY[high_n, high_d]))[1] as high_n", + "(max_price(ARRAY[high_n, high_d]))[2] as high_d", + "(min_price(ARRAY[low_n, low_d]))[1] as low_n", + "(min_price(ARRAY[low_n, low_d]))[2] as low_d", + "(first(ARRAY[open_n, open_d]))[1] as open_n", + "(first(ARRAY[open_n, open_d]))[2] as open_d", + "(last(ARRAY[close_n, close_d]))[1] as close_n", + "(last(ARRAY[close_n, close_d]))[2] as close_d", + ).FromSelect(query, "htrd").GroupBy("timestamp") +} + +// RebuildTradeAggregationTimes rebuilds a specific set of trade aggregation +// buckets, (specified by start and end times) to ensure complete data in case +// of partial reingestion. +func (q Q) RebuildTradeAggregationTimes(ctx context.Context, from, to strtime.Millis) error { + from = from.RoundDown(60_000) + to = to.RoundDown(60_000) + // Clear out the old bucket values. + _, err := q.Exec(ctx, sq.Delete("history_trades_60000").Where( + sq.GtOrEq{"timestamp": from}, + ).Where( + sq.LtOrEq{"timestamp": to}, + )) + if err != nil { + return errors.Wrap(err, "could not rebuild trade aggregation bucket") + } + + // find all related trades + trades := sq.Select( + "to_millis(ledger_closed_at, 60000) as timestamp", + "history_operation_id", + "\"order\"", + "base_asset_id", + "base_amount", + "counter_asset_id", + "counter_amount", + "ARRAY[price_n, price_d] as price", + ).From("history_trades").Where( + sq.GtOrEq{"to_millis(ledger_closed_at, 60000)": from}, + ).Where( + sq.LtOrEq{"to_millis(ledger_closed_at, 60000)": to}, + ).OrderBy("base_asset_id", "counter_asset_id", "history_operation_id", "\"order\"") + + // figure out the new bucket values + rebuilt := sq.Select( + "timestamp", + "base_asset_id", + "counter_asset_id", + "count(*) as count", + "sum(base_amount) as base_volume", + "sum(counter_amount) as counter_volume", + "sum(counter_amount::numeric)/sum(base_amount::numeric) as avg", + "(max_price(price))[1] as high_n", + "(max_price(price))[2] as high_d", + "(min_price(price))[1] as low_n", + "(min_price(price))[2] as low_d", + "first(history_operation_id) as open_ledger_toid", + "(first(price))[1] as open_n", + "(first(price))[2] as open_d", + "last(history_operation_id) as close_ledger_toid", + "(last(price))[1] as close_n", + "(last(price))[2] as close_d", + ).FromSelect(trades, "trades").GroupBy("base_asset_id", "counter_asset_id", "timestamp") + + // Insert the new bucket values. + _, err = q.Exec(ctx, sq.Insert("history_trades_60000").Select(rebuilt)) + if err != nil { + return errors.Wrap(err, "could not rebuild trade aggregation bucket") + } + return nil +} + +// RebuildTradeAggregationBuckets rebuilds a specific set of trade aggregation +// buckets, (specified by start and end ledger seq) to ensure complete data in +// case of partial reingestion. +func (q Q) RebuildTradeAggregationBuckets(ctx context.Context, fromSeq, toSeq uint32) error { + fromLedgerToid := toid.New(int32(fromSeq), 0, 0).ToInt64() + // toLedger should be inclusive here. + toLedgerToid := toid.New(int32(toSeq+1), 0, 0).ToInt64() + + // Get the affected timestamp buckets + timestamps := sq.Select( + "to_millis(closed_at, 60000)", + ).From("history_ledgers").Where( + sq.GtOrEq{"id": fromLedgerToid}, + ).Where( + sq.Lt{"id": toLedgerToid}, + ) + + // Get first bucket timestamp in the ledger range + var from strtime.Millis + err := q.Get(ctx, &from, timestamps.OrderBy("id").Limit(1)) + if err != nil { + return errors.Wrap(err, "could not rebuild trade aggregation bucket") + } + + // Get last bucket timestamp in the ledger range + var to strtime.Millis + err = q.Get(ctx, &to, timestamps.OrderBy("id DESC").Limit(1)) + if err != nil { + return errors.Wrap(err, "could not rebuild trade aggregation bucket") + } + + return q.RebuildTradeAggregationTimes(ctx, from, to) +} diff --git a/services/horizon/internal/db2/history/trade_batch_insert_builder.go b/services/horizon/internal/db2/history/trade_batch_insert_builder.go new file mode 100644 index 0000000000..e53b7f4194 --- /dev/null +++ b/services/horizon/internal/db2/history/trade_batch_insert_builder.go @@ -0,0 +1,91 @@ +package history + +import ( + "context" + "time" + + "github.com/guregu/null" + + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" +) + +// TradeType is an enum which indicates the type of trade +type TradeType int16 + +const ( + // OrderbookTradeType is a trade which exercises an offer on the orderbook. + OrderbookTradeType = TradeType(1) + // LiquidityPoolTradeType is a trade which exercises a liquidity pool. + LiquidityPoolTradeType = TradeType(2) +) + +// InsertTrade represents the arguments to TradeBatchInsertBuilder.Add() which is used to insert +// rows into the history_trades table +type InsertTrade struct { + HistoryOperationID int64 `db:"history_operation_id"` + Order int32 `db:"\"order\""` + LedgerCloseTime time.Time `db:"ledger_closed_at"` + + CounterAssetID int64 `db:"counter_asset_id"` + CounterAmount int64 `db:"counter_amount"` + CounterAccountID null.Int `db:"counter_account_id"` + CounterOfferID null.Int `db:"counter_offer_id"` + CounterLiquidityPoolID null.Int `db:"counter_liquidity_pool_id"` + + LiquidityPoolFee null.Int `db:"liquidity_pool_fee"` + + BaseAssetID int64 `db:"base_asset_id"` + BaseAmount int64 `db:"base_amount"` + BaseAccountID null.Int `db:"base_account_id"` + BaseOfferID null.Int `db:"base_offer_id"` + BaseLiquidityPoolID null.Int `db:"base_liquidity_pool_id"` + + BaseIsSeller bool `db:"base_is_seller"` + + Type TradeType `db:"trade_type"` + + PriceN int64 `db:"price_n"` + PriceD int64 `db:"price_d"` +} + +// TradeBatchInsertBuilder is used to insert trades into the +// history_trades table +type TradeBatchInsertBuilder interface { + Add(ctx context.Context, entries ...InsertTrade) error + Exec(ctx context.Context) error +} + +// tradeBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type tradeBatchInsertBuilder struct { + builder db.BatchInsertBuilder + q *Q +} + +// NewTradeBatchInsertBuilder constructs a new TradeBatchInsertBuilder instance +func (q *Q) NewTradeBatchInsertBuilder(maxBatchSize int) TradeBatchInsertBuilder { + return &tradeBatchInsertBuilder{ + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_trades"), + MaxBatchSize: maxBatchSize, + }, + q: q, + } +} + +// Exec flushes all outstanding trades to the database +func (i *tradeBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} + +// Add adds a new trade to the batch +func (i *tradeBatchInsertBuilder) Add(ctx context.Context, entries ...InsertTrade) error { + for _, entry := range entries { + err := i.builder.RowStruct(ctx, entry) + if err != nil { + return errors.Wrap(err, "failed to add trade") + } + } + + return nil +} diff --git a/services/horizon/internal/db2/history/trade_scenario.go b/services/horizon/internal/db2/history/trade_scenario.go new file mode 100644 index 0000000000..5ba118dd83 --- /dev/null +++ b/services/horizon/internal/db2/history/trade_scenario.go @@ -0,0 +1,429 @@ +package history + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/guregu/null" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" +) + +func createInsertTrades( + accountIDs, assetIDs, poolIDs []int64, ledger int32, +) []InsertTrade { + first := InsertTrade{ + HistoryOperationID: toid.New(ledger, 1, 1).ToInt64(), + Order: 1, + LedgerCloseTime: time.Unix(10000000, 0).UTC(), + CounterOfferID: null.IntFrom(32145), + BaseAccountID: null.IntFrom(accountIDs[0]), + CounterAccountID: null.IntFrom(accountIDs[1]), + BaseAssetID: assetIDs[0], + CounterAssetID: assetIDs[1], + BaseOfferID: null.IntFrom(214515), + BaseIsSeller: true, + BaseAmount: 7986, + CounterAmount: 896, + PriceN: 1, + PriceD: 3, + Type: OrderbookTradeType, + } + + second := first + second.CounterOfferID = null.Int{} + second.Order = 2 + + third := InsertTrade{ + HistoryOperationID: toid.New(ledger, 2, 1).ToInt64(), + Order: 1, + LedgerCloseTime: time.Unix(10000001, 0).UTC(), + CounterOfferID: null.IntFrom(2), + BaseAccountID: null.IntFrom(accountIDs[0]), + CounterAccountID: null.IntFrom(accountIDs[1]), + BaseAssetID: assetIDs[1], + CounterAssetID: assetIDs[2], + BaseOfferID: null.IntFrom(7), + BaseIsSeller: false, + BaseAmount: 123, + CounterAmount: 6, + PriceN: 1156, + PriceD: 3, + Type: OrderbookTradeType, + } + + fourth := InsertTrade{ + HistoryOperationID: toid.New(ledger, 2, 2).ToInt64(), + Order: 3, + LedgerCloseTime: time.Unix(10000001, 0).UTC(), + CounterAssetID: assetIDs[4], + CounterAmount: 675, + CounterAccountID: null.IntFrom(accountIDs[0]), + LiquidityPoolFee: null.IntFrom(xdr.LiquidityPoolFeeV18), + BaseAssetID: assetIDs[3], + BaseAmount: 981, + BaseLiquidityPoolID: null.IntFrom(poolIDs[0]), + BaseIsSeller: true, + PriceN: 675, + PriceD: 981, + Type: LiquidityPoolTradeType, + } + + fifth := InsertTrade{ + HistoryOperationID: toid.New(ledger, 3, 1).ToInt64(), + Order: 1, + LedgerCloseTime: time.Unix(10002000, 0).UTC(), + CounterAssetID: assetIDs[1], + CounterAmount: 300, + CounterAccountID: null.IntFrom(accountIDs[1]), + LiquidityPoolFee: null.IntFrom(xdr.LiquidityPoolFeeV18), + BaseAssetID: assetIDs[0], + BaseAmount: 200, + BaseLiquidityPoolID: null.IntFrom(poolIDs[1]), + BaseIsSeller: true, + PriceN: 43, + PriceD: 56, + Type: LiquidityPoolTradeType, + } + + return []InsertTrade{ + first, + second, + third, + fourth, + fifth, + } +} + +func createHistoryIDs( + tt *test.T, q *Q, accounts []string, assets []xdr.Asset, pools []string, +) ([]int64, []int64, []int64) { + addressToAccounts, err := q.CreateAccounts(tt.Ctx, accounts, len(accounts)) + tt.Assert.NoError(err) + + accountIDs := []int64{} + for _, account := range accounts { + accountIDs = append(accountIDs, addressToAccounts[account]) + } + + assetMap, err := q.CreateAssets(tt.Ctx, assets, len(assets)) + tt.Assert.NoError(err) + + assetIDs := []int64{} + for _, asset := range assets { + assetIDs = append(assetIDs, assetMap[asset.String()].ID) + } + + poolsMap, err := q.CreateHistoryLiquidityPools(tt.Ctx, pools, len(pools)) + tt.Assert.NoError(err) + poolIDs := []int64{} + for _, pool := range pools { + poolIDs = append(poolIDs, poolsMap[pool]) + } + + return accountIDs, assetIDs, poolIDs +} + +func buildIDtoAccountMapping(addresses []string, ids []int64) map[int64]xdr.AccountId { + idToAccount := map[int64]xdr.AccountId{} + for i, id := range ids { + account := xdr.MustAddress(addresses[i]) + idToAccount[id] = account + } + + return idToAccount +} + +func buildIDtoAssetMapping(assets []xdr.Asset, ids []int64) map[int64]xdr.Asset { + idToAsset := map[int64]xdr.Asset{} + for i, id := range ids { + idToAsset[id] = assets[i] + } + + return idToAsset +} + +// TradeFixtures contains the data inserted into the database +// when running TradeScenario +type TradeFixtures struct { + Addresses []string + Assets []xdr.Asset + Trades []Trade + LiquidityPools []string + TradesByAccount map[string][]Trade + TradesByAsset map[string][]Trade + TradesByPool map[string][]Trade + TradesByOffer map[int64][]Trade +} + +// TradesByAssetPair returns the trades which match a given trading pair +func (f TradeFixtures) TradesByAssetPair(a, b xdr.Asset) []Trade { + set := map[string]bool{} + var intersection []Trade + for _, trade := range f.TradesByAsset[a.String()] { + set[trade.PagingToken()] = true + } + + for _, trade := range f.TradesByAsset[b.String()] { + if set[trade.PagingToken()] { + intersection = append(intersection, trade) + } + } + return intersection +} + +// FilterTradesByType filters the given trades by type +func FilterTradesByType(trades []Trade, tradeType string) []Trade { + var result []Trade + for _, trade := range trades { + switch tradeType { + case AllTrades: + result = append(result, trade) + case OrderbookTrades: + if trade.BaseOfferID.Valid || trade.CounterOfferID.Valid { + result = append(result, trade) + } + case LiquidityPoolTrades: + if trade.BaseLiquidityPoolID.Valid || trade.CounterLiquidityPoolID.Valid { + result = append(result, trade) + } + } + } + return result +} + +// TradeScenario inserts trade rows into the Horizon DB +func TradeScenario(tt *test.T, q *Q) TradeFixtures { + builder := q.NewTradeBatchInsertBuilder(0) + + addresses := []string{ + "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD", + "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", + } + issuer := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + nativeAsset := xdr.MustNewNativeAsset() + eurAsset := xdr.MustNewCreditAsset("EUR", issuer.Address()) + usdAsset := xdr.MustNewCreditAsset("USD", issuer.Address()) + + assets := []xdr.Asset{ + eurAsset, + usdAsset, + nativeAsset, + xdr.MustNewCreditAsset("JPY", addresses[0]), + xdr.MustNewCreditAsset("CHF", addresses[1]), + } + hash := [32]byte{1, 2, 3, 4, 5} + otherHash := [32]byte{6, 7, 8, 9, 10} + pools := []string{hex.EncodeToString(hash[:]), hex.EncodeToString(otherHash[:])} + accountIDs, assetIDs, poolIDs := createHistoryIDs( + tt, q, + addresses, + assets, + pools, + ) + + inserts := createInsertTrades(accountIDs, assetIDs, poolIDs, 3) + + tt.Assert.NoError( + builder.Add(tt.Ctx, inserts...), + ) + tt.Assert.NoError(builder.Exec(tt.Ctx)) + + idToAccount := buildIDtoAccountMapping(addresses, accountIDs) + idToAsset := buildIDtoAssetMapping(assets, assetIDs) + + firstSellerAccount := idToAccount[inserts[0].BaseAccountID.Int64] + firstBuyerAccount := idToAccount[inserts[0].CounterAccountID.Int64] + var firstSoldAssetType, firstSoldAssetCode, firstSoldAssetIssuer string + idToAsset[inserts[0].BaseAssetID].MustExtract( + &firstSoldAssetType, &firstSoldAssetCode, &firstSoldAssetIssuer, + ) + var firstBoughtAssetType, firstBoughtAssetCode, firstBoughtAssetIssuer string + idToAsset[inserts[0].CounterAssetID].MustExtract( + &firstBoughtAssetType, &firstBoughtAssetCode, &firstBoughtAssetIssuer, + ) + + secondSellerAccount := idToAccount[inserts[1].BaseAccountID.Int64] + secondBuyerAccount := idToAccount[inserts[1].CounterAccountID.Int64] + var secondSoldAssetType, secondSoldAssetCode, secondSoldAssetIssuer string + idToAsset[inserts[1].BaseAssetID].MustExtract( + &secondSoldAssetType, &secondSoldAssetCode, &secondSoldAssetIssuer, + ) + var secondBoughtAssetType, secondBoughtAssetCode, secondBoughtAssetIssuer string + idToAsset[inserts[1].CounterAssetID].MustExtract( + &secondBoughtAssetType, &secondBoughtAssetCode, &secondBoughtAssetIssuer, + ) + + thirdSellerAccount := idToAccount[inserts[2].BaseAccountID.Int64] + thirdBuyerAccount := idToAccount[inserts[2].CounterAccountID.Int64] + var thirdSoldAssetType, thirdSoldAssetCode, thirdSoldAssetIssuer string + idToAsset[inserts[2].BaseAssetID].MustExtract( + &thirdSoldAssetType, &thirdSoldAssetCode, &thirdSoldAssetIssuer, + ) + var thirdBoughtAssetType, thirdBoughtAssetCode, thirdBoughtAssetIssuer string + idToAsset[inserts[2].CounterAssetID].MustExtract( + &thirdBoughtAssetType, &thirdBoughtAssetCode, &thirdBoughtAssetIssuer, + ) + + var fourthSoldAssetType, fourthSoldAssetCode, fourthSoldAssetIssuer string + idToAsset[inserts[3].BaseAssetID].MustExtract( + &fourthSoldAssetType, &fourthSoldAssetCode, &fourthSoldAssetIssuer, + ) + var fourthBoughtAssetType, fourthBoughtAssetCode, fourthBoughtAssetIssuer string + idToAsset[inserts[3].CounterAssetID].MustExtract( + &fourthBoughtAssetType, &fourthBoughtAssetCode, &fourthBoughtAssetIssuer, + ) + + trades := []Trade{ + { + HistoryOperationID: inserts[0].HistoryOperationID, + Order: inserts[0].Order, + LedgerCloseTime: inserts[0].LedgerCloseTime, + BaseOfferID: inserts[0].BaseOfferID, + BaseAccount: null.StringFrom(firstSellerAccount.Address()), + BaseAssetType: firstSoldAssetType, + BaseAssetIssuer: firstSoldAssetIssuer, + BaseAssetCode: firstSoldAssetCode, + BaseAmount: inserts[0].BaseAmount, + CounterOfferID: inserts[0].CounterOfferID, + CounterAccount: null.StringFrom(firstBuyerAccount.Address()), + CounterAssetType: firstBoughtAssetType, + CounterAssetIssuer: firstBoughtAssetIssuer, + CounterAssetCode: firstBoughtAssetCode, + CounterAmount: inserts[0].CounterAmount, + BaseIsSeller: true, + PriceN: null.IntFrom(inserts[0].PriceN), + PriceD: null.IntFrom(inserts[0].PriceD), + Type: OrderbookTradeType, + }, + { + HistoryOperationID: inserts[1].HistoryOperationID, + Order: inserts[1].Order, + LedgerCloseTime: inserts[1].LedgerCloseTime, + BaseOfferID: inserts[1].BaseOfferID, + BaseAccount: null.StringFrom(secondSellerAccount.Address()), + BaseAssetType: secondSoldAssetType, + BaseAssetIssuer: secondSoldAssetIssuer, + BaseAssetCode: secondSoldAssetCode, + BaseAmount: inserts[1].BaseAmount, + CounterOfferID: null.Int{}, + CounterAccount: null.StringFrom(secondBuyerAccount.Address()), + CounterAssetType: secondBoughtAssetType, + CounterAssetCode: secondBoughtAssetCode, + CounterAssetIssuer: secondBoughtAssetIssuer, + CounterAmount: inserts[1].CounterAmount, + BaseIsSeller: true, + PriceN: null.IntFrom(inserts[1].PriceN), + PriceD: null.IntFrom(inserts[1].PriceD), + Type: OrderbookTradeType, + }, + { + HistoryOperationID: inserts[2].HistoryOperationID, + Order: inserts[2].Order, + LedgerCloseTime: inserts[2].LedgerCloseTime, + BaseOfferID: inserts[2].BaseOfferID, + BaseAccount: null.StringFrom(thirdSellerAccount.Address()), + BaseAssetType: thirdSoldAssetType, + BaseAssetCode: thirdSoldAssetCode, + BaseAssetIssuer: thirdSoldAssetIssuer, + BaseAmount: inserts[2].BaseAmount, + CounterOfferID: inserts[2].CounterOfferID, + CounterAccount: null.StringFrom(thirdBuyerAccount.Address()), + CounterAssetType: thirdBoughtAssetType, + CounterAssetCode: thirdBoughtAssetCode, + CounterAssetIssuer: thirdBoughtAssetIssuer, + CounterAmount: inserts[2].CounterAmount, + BaseIsSeller: false, + PriceN: null.IntFrom(inserts[2].PriceN), + PriceD: null.IntFrom(inserts[2].PriceD), + Type: OrderbookTradeType, + }, + { + HistoryOperationID: inserts[3].HistoryOperationID, + Order: inserts[3].Order, + LedgerCloseTime: inserts[3].LedgerCloseTime, + BaseOfferID: inserts[3].BaseOfferID, + BaseAssetType: fourthSoldAssetType, + BaseAssetCode: fourthSoldAssetCode, + BaseAssetIssuer: fourthSoldAssetIssuer, + BaseLiquidityPoolID: null.StringFrom(pools[0]), + BaseAmount: inserts[3].BaseAmount, + CounterOfferID: null.Int{}, + CounterAccount: null.StringFrom(thirdSellerAccount.Address()), + CounterAssetType: fourthBoughtAssetType, + CounterAssetCode: fourthBoughtAssetCode, + CounterAssetIssuer: fourthBoughtAssetIssuer, + CounterAmount: inserts[3].CounterAmount, + BaseIsSeller: inserts[3].BaseIsSeller, + LiquidityPoolFee: inserts[3].LiquidityPoolFee, + PriceN: null.IntFrom(inserts[3].PriceN), + PriceD: null.IntFrom(inserts[3].PriceD), + Type: LiquidityPoolTradeType, + }, + { + HistoryOperationID: inserts[4].HistoryOperationID, + Order: inserts[4].Order, + LedgerCloseTime: inserts[4].LedgerCloseTime, + BaseOfferID: inserts[4].BaseOfferID, + BaseAssetType: firstSoldAssetType, + BaseAssetIssuer: firstSoldAssetIssuer, + BaseAssetCode: firstSoldAssetCode, + BaseLiquidityPoolID: null.StringFrom(pools[1]), + BaseAmount: inserts[4].BaseAmount, + CounterOfferID: null.Int{}, + CounterAccount: null.StringFrom(thirdBuyerAccount.Address()), + CounterAssetType: firstBoughtAssetType, + CounterAssetIssuer: firstBoughtAssetIssuer, + CounterAssetCode: firstBoughtAssetCode, + CounterAmount: inserts[4].CounterAmount, + BaseIsSeller: inserts[4].BaseIsSeller, + LiquidityPoolFee: inserts[4].LiquidityPoolFee, + PriceN: null.IntFrom(inserts[4].PriceN), + PriceD: null.IntFrom(inserts[4].PriceD), + Type: LiquidityPoolTradeType, + }, + } + + fixtures := TradeFixtures{ + Addresses: addresses, + Assets: assets, + Trades: trades, + LiquidityPools: pools, + TradesByAccount: map[string][]Trade{}, + TradesByAsset: map[string][]Trade{}, + TradesByPool: map[string][]Trade{}, + TradesByOffer: map[int64][]Trade{}, + } + + for _, trade := range trades { + if trade.BaseAccount.Valid { + fixtures.TradesByAccount[trade.BaseAccount.String] = append(fixtures.TradesByAccount[trade.BaseAccount.String], trade) + } + if trade.CounterAccount.Valid { + fixtures.TradesByAccount[trade.CounterAccount.String] = append(fixtures.TradesByAccount[trade.CounterAccount.String], trade) + } + baseAsset := strings.Join([]string{trade.BaseAssetType, trade.BaseAssetCode, trade.BaseAssetIssuer}, "/") + fixtures.TradesByAsset[baseAsset] = append(fixtures.TradesByAsset[baseAsset], trade) + + counterAsset := strings.Join([]string{trade.CounterAssetType, trade.CounterAssetCode, trade.CounterAssetIssuer}, "/") + fixtures.TradesByAsset[counterAsset] = append(fixtures.TradesByAsset[counterAsset], trade) + + if trade.BaseLiquidityPoolID.Valid { + fixtures.TradesByPool[trade.BaseLiquidityPoolID.String] = append(fixtures.TradesByPool[trade.BaseLiquidityPoolID.String], trade) + } + if trade.CounterLiquidityPoolID.Valid { + fixtures.TradesByPool[trade.CounterLiquidityPoolID.String] = append(fixtures.TradesByPool[trade.CounterLiquidityPoolID.String], trade) + } + if trade.BaseOfferID.Valid { + fixtures.TradesByOffer[trade.BaseOfferID.Int64] = append(fixtures.TradesByOffer[trade.BaseOfferID.Int64], trade) + } + if trade.CounterOfferID.Valid { + fixtures.TradesByOffer[trade.CounterOfferID.Int64] = append(fixtures.TradesByOffer[trade.CounterOfferID.Int64], trade) + } + } + + return fixtures +} diff --git a/services/horizon/internal/db2/history/trade_test.go b/services/horizon/internal/db2/history/trade_test.go new file mode 100644 index 0000000000..e91a1f7b4f --- /dev/null +++ b/services/horizon/internal/db2/history/trade_test.go @@ -0,0 +1,242 @@ +package history + +import ( + "github.com/stellar/go/xdr" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/test" +) + +var ( + ascPQ = db2.MustPageQuery("", false, "asc", 100) + descPQ = db2.MustPageQuery("", false, "desc", 100) +) + +func assertTradesAreEqual(tt *test.T, expected, rows []Trade) { + tt.Assert.Len(rows, len(expected)) + for i := 0; i < len(rows); i++ { + tt.Assert.Equal(expected[i].LedgerCloseTime.Unix(), rows[i].LedgerCloseTime.Unix()) + rows[i].LedgerCloseTime = expected[i].LedgerCloseTime + tt.Assert.Equal( + expected[i], + rows[i], + ) + } +} + +const allAccounts = "" + +func filterByAccount(trades []Trade, account string) []Trade { + var result []Trade + for _, trade := range trades { + if account == allAccounts || + (trade.BaseAccount.Valid && trade.BaseAccount.String == account) || + (trade.CounterAccount.Valid && trade.CounterAccount.String == account) { + result = append(result, trade) + } + } + return result +} + +func TestSelectTrades(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + + for _, account := range append([]string{allAccounts}, fixtures.Addresses...) { + for _, tradeType := range []string{AllTrades, OrderbookTrades, LiquidityPoolTrades} { + expected := filterByAccount(FilterTradesByType(fixtures.Trades, tradeType), account) + rows, err := q.GetTrades(tt.Ctx, ascPQ, account, tradeType) + tt.Assert.NoError(err) + + assertTradesAreEqual(tt, expected, rows) + + rows, err = q.GetTrades(tt.Ctx, descPQ, account, tradeType) + tt.Assert.NoError(err) + start, end := 0, len(rows)-1 + for start < end { + rows[start], rows[end] = rows[end], rows[start] + start++ + end-- + } + + assertTradesAreEqual(tt, expected, rows) + } + } +} + +func TestSelectTradesCursor(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + + for _, account := range append([]string{allAccounts}, fixtures.Addresses...) { + for _, tradeType := range []string{AllTrades, OrderbookTrades, LiquidityPoolTrades} { + expected := filterByAccount(FilterTradesByType(fixtures.Trades, tradeType), account) + if len(expected) == 0 { + continue + } + + rows, err := q.GetTrades( + tt.Ctx, + db2.MustPageQuery(expected[0].PagingToken(), false, "asc", 100), + account, + tradeType, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, rows, expected[1:]) + + if len(expected) == 1 { + continue + } + + rows, err = q.GetTrades( + tt.Ctx, + db2.MustPageQuery(expected[1].PagingToken(), false, "asc", 100), + account, + tradeType, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, rows, expected[2:]) + } + } +} + +func TestTradesQueryForOffer(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + tt.Assert.NotEmpty(fixtures.TradesByOffer) + + for offer, expected := range fixtures.TradesByOffer { + trades, err := q.GetTradesForOffer(tt.Ctx, ascPQ, offer) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected, trades) + + trades, err = q.GetTradesForOffer( + tt.Ctx, + db2.MustPageQuery(expected[0].PagingToken(), false, "asc", 100), + offer, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected[1:], trades) + } +} + +func TestTradesQueryForLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + tt.Assert.NotEmpty(fixtures.TradesByOffer) + + for poolID, expected := range fixtures.TradesByPool { + trades, err := q.GetTradesForLiquidityPool(tt.Ctx, ascPQ, poolID) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected, trades) + + trades, err = q.GetTradesForLiquidityPool( + tt.Ctx, + db2.MustPageQuery(expected[0].PagingToken(), false, "asc", 100), + poolID, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected[1:], trades) + } +} + +func TestTradesForAssetPair(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + eurAsset := xdr.MustNewCreditAsset("EUR", issuer.Address()) + chfAsset := xdr.MustNewCreditAsset("CHF", "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU") + allTrades := fixtures.TradesByAssetPair(eurAsset, chfAsset) + + for _, account := range append([]string{allAccounts}, fixtures.Addresses...) { + for _, tradeType := range []string{AllTrades, OrderbookTrades, LiquidityPoolTrades} { + expected := filterByAccount(FilterTradesByType(allTrades, tradeType), account) + + trades, err := q.GetTradesForAssets(tt.Ctx, ascPQ, account, tradeType, chfAsset, eurAsset) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected, trades) + + if len(expected) == 0 { + continue + } + + trades, err = q.GetTradesForAssets( + tt.Ctx, + db2.MustPageQuery(expected[0].PagingToken(), false, "asc", 100), + account, + tradeType, + chfAsset, + eurAsset, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected[1:], trades) + } + } +} + +func reverseTrade(expected Trade) Trade { + expected.BaseIsSeller = !expected.BaseIsSeller + expected.BaseAssetCode, expected.CounterAssetCode = expected.CounterAssetCode, expected.BaseAssetCode + expected.BaseAssetIssuer, expected.CounterAssetIssuer = expected.CounterAssetIssuer, expected.BaseAssetIssuer + expected.BaseOfferID, expected.CounterOfferID = expected.CounterOfferID, expected.BaseOfferID + expected.BaseLiquidityPoolID, expected.CounterLiquidityPoolID = expected.CounterLiquidityPoolID, expected.BaseLiquidityPoolID + expected.BaseAssetType, expected.CounterAssetType = expected.CounterAssetType, expected.BaseAssetType + expected.BaseAccount, expected.CounterAccount = expected.CounterAccount, expected.BaseAccount + expected.BaseAmount, expected.CounterAmount = expected.CounterAmount, expected.BaseAmount + expected.PriceN, expected.PriceD = expected.PriceD, expected.PriceN + return expected +} + +func TestTradesForReverseAssetPair(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + fixtures := TradeScenario(tt, q) + eurAsset := xdr.MustNewCreditAsset("EUR", issuer.Address()) + chfAsset := xdr.MustNewCreditAsset("CHF", "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU") + allTrades := fixtures.TradesByAssetPair(eurAsset, chfAsset) + + for _, account := range append([]string{allAccounts}, fixtures.Addresses...) { + for _, tradeType := range []string{AllTrades, OrderbookTrades, LiquidityPoolTrades} { + expected := filterByAccount(FilterTradesByType(allTrades, tradeType), account) + for i := range expected { + expected[i] = reverseTrade(expected[i]) + } + + trades, err := q.GetTradesForAssets(tt.Ctx, ascPQ, account, tradeType, eurAsset, chfAsset) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected, trades) + + if len(expected) == 0 { + continue + } + + trades, err = q.GetTradesForAssets( + tt.Ctx, + db2.MustPageQuery(expected[0].PagingToken(), false, "asc", 100), + account, + tradeType, + eurAsset, + chfAsset, + ) + tt.Assert.NoError(err) + assertTradesAreEqual(tt, expected[1:], trades) + } + } +} diff --git a/services/horizon/internal/db2/history/transaction.go b/services/horizon/internal/db2/history/transaction.go new file mode 100644 index 0000000000..101ba0a057 --- /dev/null +++ b/services/horizon/internal/db2/history/transaction.go @@ -0,0 +1,261 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TransactionByHash is a query that loads a single row from the +// `history_transactions` table based upon the provided hash. +func (q *Q) TransactionByHash(ctx context.Context, dest interface{}, hash string) error { + byHash := selectTransaction. + Where("ht.transaction_hash = ?", hash) + byInnerHash := selectTransaction. + Where("ht.inner_transaction_hash = ?", hash) + + byInnerHashString, args, err := byInnerHash.ToSql() + if err != nil { + return errors.Wrap(err, "could not get string for inner hash sql query") + } + union := byHash.Suffix("UNION ALL "+byInnerHashString, args...) + + return q.Get(ctx, dest, union) +} + +// TransactionsByHashesSinceLedger fetches transactions from the `history_transactions` +// table which match the given hash since the given ledger sequence (for perf reasons). +func (q *Q) TransactionsByHashesSinceLedger(ctx context.Context, hashes []string, sinceLedgerSeq uint32) ([]Transaction, error) { + var dest []Transaction + byHash := selectTransaction. + Where(map[string]interface{}{"ht.transaction_hash": hashes}). + Where(sq.GtOrEq{"ht.ledger_sequence": sinceLedgerSeq}) + byInnerHash := selectTransaction. + Where(map[string]interface{}{"ht.inner_transaction_hash": hashes}). + Where(sq.GtOrEq{"ht.ledger_sequence": sinceLedgerSeq}) + + byInnerHashString, args, err := byInnerHash.ToSql() + if err != nil { + return nil, errors.Wrap(err, "could not get string for inner hash sql query") + } + union := byHash.Suffix("UNION ALL "+byInnerHashString, args...) + + err = q.Select(ctx, &dest, union) + if err != nil { + return nil, err + } + + return dest, nil +} + +// TransactionsByIDs fetches transactions from the `history_transactions` table +// which match the given ids +func (q *Q) TransactionsByIDs(ctx context.Context, ids ...int64) (map[int64]Transaction, error) { + if len(ids) == 0 { + return nil, errors.New("no id arguments provided") + } + + sql := selectTransaction.Where(map[string]interface{}{ + "ht.id": ids, + }) + + var transactions []Transaction + if err := q.Select(ctx, &transactions, sql); err != nil { + return nil, err + } + + byID := map[int64]Transaction{} + for _, transaction := range transactions { + byID[transaction.TotalOrderID.ID] = transaction + } + + return byID, nil +} + +// Transactions provides a helper to filter rows from the `history_transactions` +// table with pre-defined filters. See `TransactionsQ` methods for the +// available filters. +func (q *Q) Transactions() *TransactionsQ { + return &TransactionsQ{ + parent: q, + sql: selectTransaction, + includeFailed: false, + } +} + +// ForAccount filters the transactions collection to a specific account +func (q *TransactionsQ) ForAccount(ctx context.Context, aid string) *TransactionsQ { + var account Account + q.Err = q.parent.AccountByAddress(ctx, &account, aid) + if q.Err != nil { + return q + } + + q.sql = q.sql. + Join("history_transaction_participants htp ON htp.history_transaction_id = ht.id"). + Where("htp.history_account_id = ?", account.ID) + + return q +} + +// ForClaimableBalance filters the transactions collection to a specific claimable balance +func (q *TransactionsQ) ForClaimableBalance(ctx context.Context, cbID string) *TransactionsQ { + + var hCB HistoryClaimableBalance + hCB, q.Err = q.parent.ClaimableBalanceByID(ctx, cbID) + if q.Err != nil { + return q + } + + q.sql = q.sql. + Join("history_transaction_claimable_balances htcb ON htcb.history_transaction_id = ht.id"). + Where("htcb.history_claimable_balance_id = ?", hCB.InternalID) + + return q +} + +// ForLiquidityPool filters the transactions collection to a specific liquidity pool +func (q *TransactionsQ) ForLiquidityPool(ctx context.Context, poolID string) *TransactionsQ { + + var hLP HistoryLiquidityPool + hLP, q.Err = q.parent.LiquidityPoolByID(ctx, poolID) + if q.Err != nil { + return q + } + + q.sql = q.sql. + Join("history_transaction_liquidity_pools htlp ON htlp.history_transaction_id = ht.id"). + Where("htlp.history_liquidity_pool_id = ?", hLP.InternalID) + + return q +} + +// ForLedger filters the query to a only transactions in a specific ledger, +// specified by its sequence. +func (q *TransactionsQ) ForLedger(ctx context.Context, seq int32) *TransactionsQ { + var ledger Ledger + q.Err = q.parent.LedgerBySequence(ctx, &ledger, seq) + if q.Err != nil { + return q + } + + start := toid.ID{LedgerSequence: seq} + end := toid.ID{LedgerSequence: seq + 1} + q.sql = q.sql.Where( + "ht.id >= ? AND ht.id < ?", + start.ToInt64(), + end.ToInt64(), + ) + + return q +} + +// IncludeFailed changes the query to include failed transactions. +func (q *TransactionsQ) IncludeFailed() *TransactionsQ { + q.includeFailed = true + return q +} + +// Page specifies the paging constraints for the query being built by `q`. +func (q *TransactionsQ) Page(page db2.PageQuery) *TransactionsQ { + if q.Err != nil { + return q + } + + q.sql, q.Err = page.ApplyTo(q.sql, "ht.id") + return q +} + +// Select loads the results of the query specified by `q` into `dest`. +func (q *TransactionsQ) Select(ctx context.Context, dest interface{}) error { + if q.Err != nil { + return q.Err + } + + if !q.includeFailed { + q.sql = q.sql. + Where("(ht.successful = true OR ht.successful IS NULL)") + } + + q.Err = q.parent.Select(ctx, dest, q.sql) + if q.Err != nil { + return q.Err + } + + transactions, ok := dest.(*[]Transaction) + if !ok { + return errors.New("dest is not *[]Transaction") + } + + for _, t := range *transactions { + var resultXDR xdr.TransactionResult + err := xdr.SafeUnmarshalBase64(t.TxResult, &resultXDR) + if err != nil { + return err + } + + if !q.includeFailed { + if !t.Successful { + return errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s", t.TransactionHash) + } + + if !resultXDR.Successful() { + return errors.Errorf("Corrupted data! `include_failed=false` but returned transaction is failed: %s %s", t.TransactionHash, t.TxResult) + } + } + + // Check if `successful` equals resultXDR + if t.Successful && !resultXDR.Successful() { + return errors.Errorf("Corrupted data! `successful=true` but returned transaction is not success: %s %s", t.TransactionHash, t.TxResult) + } + + if !t.Successful && resultXDR.Successful() { + return errors.Errorf("Corrupted data! `successful=false` but returned transaction is success: %s %s", t.TransactionHash, t.TxResult) + } + } + + return nil +} + +// QTransactions defines transaction related queries. +type QTransactions interface { + NewTransactionBatchInsertBuilder(maxBatchSize int) TransactionBatchInsertBuilder +} + +var selectTransaction = sq.Select( + "ht.id, " + + "ht.transaction_hash, " + + "ht.ledger_sequence, " + + "ht.application_order, " + + "ht.account, " + + "ht.account_muxed, " + + "ht.account_sequence, " + + "ht.max_fee, " + + // `fee_charged` is NULL by default, DB needs to be reingested + // to populate the value. If value is not present display `max_fee`. + "COALESCE(ht.fee_charged, ht.max_fee) as fee_charged, " + + "ht.operation_count, " + + "ht.tx_envelope, " + + "ht.tx_result, " + + "ht.tx_meta, " + + "ht.tx_fee_meta, " + + "ht.created_at, " + + "ht.updated_at, " + + "COALESCE(ht.successful, true) as successful, " + + "ht.signatures, " + + "ht.memo_type, " + + "ht.memo, " + + "time_bounds, " + + "hl.closed_at AS ledger_close_time, " + + "ht.inner_transaction_hash, " + + "ht.fee_account, " + + "ht.fee_account_muxed, " + + "ht.new_max_fee, " + + "ht.inner_signatures"). + From("history_transactions ht"). + LeftJoin("history_ledgers hl ON ht.ledger_sequence = hl.sequence") diff --git a/services/horizon/internal/db2/history/transaction_batch_insert_builder.go b/services/horizon/internal/db2/history/transaction_batch_insert_builder.go new file mode 100644 index 0000000000..ddbe401222 --- /dev/null +++ b/services/horizon/internal/db2/history/transaction_batch_insert_builder.go @@ -0,0 +1,305 @@ +package history + +import ( + "context" + "database/sql/driver" + "encoding/base64" + "encoding/hex" + "fmt" + "math" + "strconv" + "strings" + "time" + + "github.com/guregu/null" + "github.com/lib/pq" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/services/horizon/internal/utf8" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TransactionBatchInsertBuilder is used to insert transactions into the +// history_transactions table +type TransactionBatchInsertBuilder interface { + Add(ctx context.Context, transaction ingest.LedgerTransaction, sequence uint32) error + Exec(ctx context.Context) error +} + +// transactionBatchInsertBuilder is a simple wrapper around db.BatchInsertBuilder +type transactionBatchInsertBuilder struct { + encodingBuffer *xdr.EncodingBuffer + builder db.BatchInsertBuilder +} + +// NewTransactionBatchInsertBuilder constructs a new TransactionBatchInsertBuilder instance +func (q *Q) NewTransactionBatchInsertBuilder(maxBatchSize int) TransactionBatchInsertBuilder { + return &transactionBatchInsertBuilder{ + encodingBuffer: xdr.NewEncodingBuffer(), + builder: db.BatchInsertBuilder{ + Table: q.GetTable("history_transactions"), + MaxBatchSize: maxBatchSize, + }, + } +} + +// Add adds a new transaction to the batch +func (i *transactionBatchInsertBuilder) Add(ctx context.Context, transaction ingest.LedgerTransaction, sequence uint32) error { + row, err := i.transactionToRow(transaction, sequence) + if err != nil { + return err + } + + return i.builder.RowStruct(ctx, row) +} + +func (i *transactionBatchInsertBuilder) Exec(ctx context.Context) error { + return i.builder.Exec(ctx) +} + +// TimeBounds represents the time bounds of a Stellar transaction +type TimeBounds struct { + Null bool + Upper null.Int + Lower null.Int +} + +// Scan implements the database/sql Scanner interface. +func (t *TimeBounds) Scan(src interface{}) error { + if src == nil { + *t = TimeBounds{Null: true} + return nil + } + + var rangeText string + switch src := src.(type) { + case string: + rangeText = src + case []byte: + rangeText = string(src) + default: + return errors.Errorf("cannot scan %T", src) + } + + rangeText = strings.TrimSpace(rangeText) + if len(rangeText) < 3 { + return errors.Errorf("range is invalid %s", rangeText) + } + inner := rangeText[1 : len(rangeText)-1] + parts := strings.Split(inner, ",") + if len(parts) != 2 { + return errors.Errorf("%s does not have 2 comma separated values", rangeText) + } + + lower, upper := parts[0], parts[1] + if len(lower) > 0 { + if err := t.Lower.Scan(lower); err != nil { + return errors.Wrap(err, "cannot parse lower bound") + } + } + if len(upper) > 0 { + if err := t.Upper.Scan(upper); err != nil { + return errors.Wrap(err, "cannot parse upper bound") + } + } + + return nil +} + +// Value implements the database/sql/driver Valuer interface. +func (t TimeBounds) Value() (driver.Value, error) { + if t.Null { + return nil, nil + } + + if !t.Upper.Valid { + return fmt.Sprintf("[%d,)", t.Lower.Int64), nil + } + + return fmt.Sprintf("[%d, %d)", t.Lower.Int64, t.Upper.Int64), nil +} + +func formatTimeBounds(transaction ingest.LedgerTransaction) TimeBounds { + timeBounds := transaction.Envelope.TimeBounds() + if timeBounds == nil { + return TimeBounds{Null: true} + } + + if timeBounds.MaxTime == 0 { + return TimeBounds{ + Lower: null.IntFrom(int64(timeBounds.MinTime)), + } + } + + maxTime := timeBounds.MaxTime + if maxTime > math.MaxInt64 { + maxTime = math.MaxInt64 + } + + return TimeBounds{ + Lower: null.IntFrom(int64(timeBounds.MinTime)), + Upper: null.IntFrom(int64(maxTime)), + } +} + +func signatures(xdrSignatures []xdr.DecoratedSignature) pq.StringArray { + signatures := make([]string, len(xdrSignatures)) + for i, sig := range xdrSignatures { + signatures[i] = base64.StdEncoding.EncodeToString(sig.Signature) + } + return signatures +} + +func memoType(transaction ingest.LedgerTransaction) string { + switch transaction.Envelope.Memo().Type { + case xdr.MemoTypeMemoNone: + return "none" + case xdr.MemoTypeMemoText: + return "text" + case xdr.MemoTypeMemoId: + return "id" + case xdr.MemoTypeMemoHash: + return "hash" + case xdr.MemoTypeMemoReturn: + return "return" + default: + panic(fmt.Errorf("invalid memo type: %v", transaction.Envelope.Memo().Type)) + } +} + +func memo(transaction ingest.LedgerTransaction) null.String { + var ( + value string + valid bool + ) + memo := transaction.Envelope.Memo() + switch memo.Type { + case xdr.MemoTypeMemoNone: + value, valid = "", false + case xdr.MemoTypeMemoText: + scrubbed := utf8.Scrub(memo.MustText()) + notnull := strings.Join(strings.Split(scrubbed, "\x00"), "") + value, valid = notnull, true + case xdr.MemoTypeMemoId: + value, valid = fmt.Sprintf("%d", memo.MustId()), true + case xdr.MemoTypeMemoHash: + hash := memo.MustHash() + value, valid = + base64.StdEncoding.EncodeToString(hash[:]), + true + case xdr.MemoTypeMemoReturn: + hash := memo.MustRetHash() + value, valid = + base64.StdEncoding.EncodeToString(hash[:]), + true + default: + panic(fmt.Errorf("invalid memo type: %v", memo.Type)) + } + + return null.NewString(value, valid) +} + +type TransactionWithoutLedger struct { + TotalOrderID + TransactionHash string `db:"transaction_hash"` + LedgerSequence int32 `db:"ledger_sequence"` + ApplicationOrder int32 `db:"application_order"` + Account string `db:"account"` + AccountMuxed null.String `db:"account_muxed"` + AccountSequence string `db:"account_sequence"` + MaxFee int64 `db:"max_fee"` + FeeCharged int64 `db:"fee_charged"` + OperationCount int32 `db:"operation_count"` + TxEnvelope string `db:"tx_envelope"` + TxResult string `db:"tx_result"` + TxMeta string `db:"tx_meta"` + TxFeeMeta string `db:"tx_fee_meta"` + Signatures pq.StringArray `db:"signatures"` + MemoType string `db:"memo_type"` + Memo null.String `db:"memo"` + TimeBounds TimeBounds `db:"time_bounds"` + CreatedAt time.Time `db:"created_at"` + UpdatedAt time.Time `db:"updated_at"` + Successful bool `db:"successful"` + FeeAccount null.String `db:"fee_account"` + FeeAccountMuxed null.String `db:"fee_account_muxed"` + InnerTransactionHash null.String `db:"inner_transaction_hash"` + NewMaxFee null.Int `db:"new_max_fee"` + InnerSignatures pq.StringArray `db:"inner_signatures"` +} + +func (i *transactionBatchInsertBuilder) transactionToRow(transaction ingest.LedgerTransaction, sequence uint32) (TransactionWithoutLedger, error) { + envelopeBase64, err := i.encodingBuffer.MarshalBase64(transaction.Envelope) + if err != nil { + return TransactionWithoutLedger{}, err + } + resultBase64, err := i.encodingBuffer.MarshalBase64(&transaction.Result.Result) + if err != nil { + return TransactionWithoutLedger{}, err + } + metaBase64, err := i.encodingBuffer.MarshalBase64(transaction.UnsafeMeta) + if err != nil { + return TransactionWithoutLedger{}, err + } + feeMetaBase64, err := i.encodingBuffer.MarshalBase64(transaction.FeeChanges) + if err != nil { + return TransactionWithoutLedger{}, err + } + + source := transaction.Envelope.SourceAccount() + account := source.ToAccountId() + var accountMuxed null.String + if source.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + accountMuxed = null.StringFrom(source.Address()) + } + t := TransactionWithoutLedger{ + TransactionHash: hex.EncodeToString(transaction.Result.TransactionHash[:]), + LedgerSequence: int32(sequence), + ApplicationOrder: int32(transaction.Index), + Account: account.Address(), + AccountMuxed: accountMuxed, + AccountSequence: strconv.FormatInt(transaction.Envelope.SeqNum(), 10), + MaxFee: int64(transaction.Envelope.Fee()), + FeeCharged: int64(transaction.Result.Result.FeeCharged), + OperationCount: int32(len(transaction.Envelope.Operations())), + TxEnvelope: envelopeBase64, + TxResult: resultBase64, + TxMeta: metaBase64, + TxFeeMeta: feeMetaBase64, + TimeBounds: formatTimeBounds(transaction), + MemoType: memoType(transaction), + Memo: memo(transaction), + CreatedAt: time.Now().UTC(), + UpdatedAt: time.Now().UTC(), + Successful: transaction.Result.Successful(), + } + t.TotalOrderID.ID = toid.New(int32(sequence), int32(transaction.Index), 0).ToInt64() + + if transaction.Envelope.IsFeeBump() { + innerHash := transaction.Result.InnerHash() + t.InnerTransactionHash = null.StringFrom(hex.EncodeToString(innerHash[:])) + feeBumpAccount := transaction.Envelope.FeeBumpAccount() + feeAccount := feeBumpAccount.ToAccountId() + var feeAccountMuxed null.String + if feeBumpAccount.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + feeAccountMuxed = null.StringFrom(feeBumpAccount.Address()) + } + t.FeeAccount = null.StringFrom(feeAccount.Address()) + t.FeeAccountMuxed = feeAccountMuxed + t.NewMaxFee = null.IntFrom(transaction.Envelope.FeeBumpFee()) + t.InnerSignatures = signatures(transaction.Envelope.Signatures()) + t.Signatures = signatures(transaction.Envelope.FeeBumpSignatures()) + } else { + t.InnerTransactionHash = null.StringFromPtr(nil) + t.FeeAccount = null.StringFromPtr(nil) + t.FeeAccountMuxed = null.StringFromPtr(nil) + t.NewMaxFee = null.IntFromPtr(nil) + t.InnerSignatures = nil + t.Signatures = signatures(transaction.Envelope.Signatures()) + } + + return t, nil +} diff --git a/services/horizon/internal/db2/history/transaction_batch_insert_builder_test.go b/services/horizon/internal/db2/history/transaction_batch_insert_builder_test.go new file mode 100644 index 0000000000..aaf623a545 --- /dev/null +++ b/services/horizon/internal/db2/history/transaction_batch_insert_builder_test.go @@ -0,0 +1,182 @@ +package history + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" +) + +func TestTransactionToMap_muxed(t *testing.T) { + innerSource := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 1, + Ed25519: xdr.Uint256{3, 2, 1}, + }, + } + innerAccountID := innerSource.ToAccountId() + feeSource := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 1, + Ed25519: xdr.Uint256{0, 1, 2}, + }, + } + feeSourceAccountID := feeSource.ToAccountId() + tx := ingest.LedgerTransaction{ + Index: 1, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: feeSource, + Fee: 200, + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: innerSource, + Operations: []xdr.Operation{ + { + SourceAccount: &innerSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: innerSource, + Asset: xdr.Asset{Type: xdr.AssetTypeAssetTypeNative}, + Amount: 100, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Result: xdr.TransactionResultPair{ + TransactionHash: xdr.Hash{1, 2, 3}, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFeeBumpInnerSuccess, + InnerResultPair: &xdr.InnerTransactionResultPair{ + TransactionHash: xdr.Hash{3, 2, 1}, + Result: xdr.InnerTransactionResult{ + Result: xdr.InnerTransactionResultResult{ + Results: &[]xdr.OperationResult{}, + }, + }, + }, + Results: &[]xdr.OperationResult{}, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 1, + Operations: &[]xdr.OperationMeta{}, + V1: &xdr.TransactionMetaV1{ + TxChanges: []xdr.LedgerEntryChange{}, + Operations: []xdr.OperationMeta{}, + }, + }, + } + b := &transactionBatchInsertBuilder{ + encodingBuffer: xdr.NewEncodingBuffer(), + } + row, err := b.transactionToRow(tx, 20) + assert.NoError(t, err) + + assert.Equal(t, innerAccountID.Address(), row.Account) + + assert.Equal(t, feeSourceAccountID.Address(), row.FeeAccount.String) + + assert.Equal(t, feeSource.Address(), row.FeeAccountMuxed.String) +} + +func TestTransactionToMap_SourceMuxedAndFeeSourceUnmuxed(t *testing.T) { + innerSource := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 1, + Ed25519: xdr.Uint256{3, 2, 1}, + }, + } + innerAccountID := innerSource.ToAccountId() + feeSource := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeEd25519, + Ed25519: &xdr.Uint256{0, 1, 2}, + } + tx := ingest.LedgerTransaction{ + Index: 1, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: feeSource, + Fee: 200, + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: innerSource, + Operations: []xdr.Operation{ + { + SourceAccount: &innerSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: innerSource, + Asset: xdr.Asset{Type: xdr.AssetTypeAssetTypeNative}, + Amount: 100, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Result: xdr.TransactionResultPair{ + TransactionHash: xdr.Hash{1, 2, 3}, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFeeBumpInnerSuccess, + InnerResultPair: &xdr.InnerTransactionResultPair{ + TransactionHash: xdr.Hash{3, 2, 1}, + Result: xdr.InnerTransactionResult{ + Result: xdr.InnerTransactionResultResult{ + Results: &[]xdr.OperationResult{}, + }, + }, + }, + Results: &[]xdr.OperationResult{}, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 1, + Operations: &[]xdr.OperationMeta{}, + V1: &xdr.TransactionMetaV1{ + TxChanges: []xdr.LedgerEntryChange{}, + Operations: []xdr.OperationMeta{}, + }, + }, + } + b := &transactionBatchInsertBuilder{ + encodingBuffer: xdr.NewEncodingBuffer(), + } + row, err := b.transactionToRow(tx, 20) + assert.NoError(t, err) + + assert.Equal(t, innerAccountID.Address(), row.Account) + + assert.Equal(t, feeSource.Address(), row.FeeAccount.String) + + assert.False(t, row.FeeAccountMuxed.Valid) +} diff --git a/services/horizon/internal/db2/history/transaction_test.go b/services/horizon/internal/db2/history/transaction_test.go new file mode 100644 index 0000000000..ab34d817e3 --- /dev/null +++ b/services/horizon/internal/db2/history/transaction_test.go @@ -0,0 +1,824 @@ +package history + +import ( + "database/sql" + "testing" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/guregu/null" + "github.com/stellar/go/xdr" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/toid" +) + +func TestTransactionQueries(t *testing.T) { + tt := test.Start(t) + test.ResetHorizonDB(t, tt.HorizonDB) + tt.Scenario("base") + defer tt.Finish() + q := &Q{tt.HorizonSession()} + + // Test TransactionByHash + var tx Transaction + real := "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d" + err := q.TransactionByHash(tt.Ctx, &tx, real) + tt.Assert.NoError(err) + + fake := "not_real" + err = q.TransactionByHash(tt.Ctx, &tx, fake) + tt.Assert.Equal(err, sql.ErrNoRows) +} + +func TestTransactionByLiquidityPool(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + txIndex := int32(1) + sequence := int32(56) + txID := toid.New(sequence, int32(1), 0).ToInt64() + + // Insert a phony ledger + ledgerCloseTime := time.Now().Unix() + _, err := q.InsertLedger(tt.Ctx, xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(sequence), + ScpValue: xdr.StellarValue{ + CloseTime: xdr.TimePoint(ledgerCloseTime), + }, + }, + }, 0, 0, 0, 0, 0) + tt.Assert.NoError(err) + + // Insert a phony transaction + transactionBuilder := q.NewTransactionBatchInsertBuilder(2) + firstTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: uint32(txIndex), + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }) + err = transactionBuilder.Add(tt.Ctx, firstTransaction, uint32(sequence)) + tt.Assert.NoError(err) + err = transactionBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + // Insert Liquidity Pool history + liquidityPoolID := "a2f38836a839de008cf1d782c81f45e1253cc5d3dad9110b872965484fec0a49" + toInternalID, err := q.CreateHistoryLiquidityPools(tt.Ctx, []string{liquidityPoolID}, 2) + tt.Assert.NoError(err) + lpTransactionBuilder := q.NewTransactionLiquidityPoolBatchInsertBuilder(2) + tt.Assert.NoError(err) + internalID, ok := toInternalID[liquidityPoolID] + tt.Assert.True(ok) + err = lpTransactionBuilder.Add(tt.Ctx, txID, internalID) + tt.Assert.NoError(err) + err = lpTransactionBuilder.Exec(tt.Ctx) + tt.Assert.NoError(err) + + var records []Transaction + err = q.Transactions().ForLiquidityPool(tt.Ctx, liquidityPoolID).Select(tt.Ctx, &records) + tt.Assert.NoError(err) + tt.Assert.Len(records, 1) + +} + +// TestTransactionSuccessfulOnly tests if default query returns successful +// transactions only. +// If it's not enclosed in brackets, it may return incorrect result when mixed +// with `ForAccount` or `ForLedger` filters. +func TestTransactionSuccessfulOnly(t *testing.T) { + tt := test.Start(t) + test.ResetHorizonDB(t, tt.HorizonDB) + tt.Scenario("failed_transactions") + defer tt.Finish() + + var transactions []Transaction + + q := &Q{tt.HorizonSession()} + query := q.Transactions(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + + err := query.Select(tt.Ctx, &transactions) + tt.Assert.NoError(err) + + tt.Assert.Equal(3, len(transactions)) + + for _, transaction := range transactions { + tt.Assert.True(transaction.Successful) + } + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + // Note: brackets around `(ht.successful = true OR ht.successful IS NULL)` are critical! + tt.Assert.Contains(sql, "WHERE htp.history_account_id = ? AND (ht.successful = true OR ht.successful IS NULL)") +} + +// TestTransactionIncludeFailed tests `IncludeFailed` method. +func TestTransactionIncludeFailed(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + var transactions []Transaction + + q := &Q{tt.HorizonSession()} + query := q.Transactions(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"). + IncludeFailed() + + err := query.Select(tt.Ctx, &transactions) + tt.Assert.NoError(err) + + var failed, successful int + for _, transaction := range transactions { + if transaction.Successful { + successful++ + } else { + failed++ + } + } + + tt.Assert.Equal(3, successful) + tt.Assert.Equal(1, failed) + + sql, _, err := query.sql.ToSql() + tt.Assert.NoError(err) + tt.Assert.Equal("SELECT ht.id, ht.transaction_hash, ht.ledger_sequence, ht.application_order, ht.account, ht.account_muxed, ht.account_sequence, ht.max_fee, COALESCE(ht.fee_charged, ht.max_fee) as fee_charged, ht.operation_count, ht.tx_envelope, ht.tx_result, ht.tx_meta, ht.tx_fee_meta, ht.created_at, ht.updated_at, COALESCE(ht.successful, true) as successful, ht.signatures, ht.memo_type, ht.memo, time_bounds, hl.closed_at AS ledger_close_time, ht.inner_transaction_hash, ht.fee_account, ht.fee_account_muxed, ht.new_max_fee, ht.inner_signatures FROM history_transactions ht LEFT JOIN history_ledgers hl ON ht.ledger_sequence = hl.sequence JOIN history_transaction_participants htp ON htp.history_transaction_id = ht.id WHERE htp.history_account_id = ?", sql) +} + +func TestExtraChecksTransactionSuccessfulTrueResultFalse(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + // successful `true` but tx result `false` + _, err := tt.HorizonDB.Exec( + `UPDATE history_transactions SET successful = true WHERE transaction_hash = 'aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf'`, + ) + tt.Require.NoError(err) + + var transactions []Transaction + + q := &Q{tt.HorizonSession()} + query := q.Transactions(). + ForAccount(tt.Ctx, "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"). + IncludeFailed() + + err = query.Select(tt.Ctx, &transactions) + tt.Assert.Error(err) + tt.Assert.Contains(err.Error(), "Corrupted data! `successful=true` but returned transaction is not success") +} + +func TestExtraChecksTransactionSuccessfulFalseResultTrue(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + + // successful `false` but tx result `true` + _, err := tt.HorizonDB.Exec( + `UPDATE history_transactions SET successful = false WHERE transaction_hash = 'a2dabf4e9d1642722602272e178a37c973c9177b957da86192a99b3e9f3a9aa4'`, + ) + tt.Require.NoError(err) + + var transactions []Transaction + + q := &Q{tt.HorizonSession()} + query := q.Transactions(). + ForAccount(tt.Ctx, "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"). + IncludeFailed() + + err = query.Select(tt.Ctx, &transactions) + tt.Assert.Error(err) + tt.Assert.Contains(err.Error(), "Corrupted data! `successful=false` but returned transaction is success") +} + +func TestInsertTransactionDoesNotAllowDuplicateIndex(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + sequence := uint32(123) + insertBuilder := q.NewTransactionBatchInsertBuilder(0) + + firstTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }) + secondTransaction := buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAIAAAAAAAAAewAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "7e2def20d5a21a56be2a457b648f702ee1af889d3df65790e92a05081e9fabf1", + }) + + tt.Assert.NoError(insertBuilder.Add(tt.Ctx, firstTransaction, sequence)) + tt.Assert.NoError(insertBuilder.Exec(tt.Ctx)) + + tt.Assert.NoError(insertBuilder.Add(tt.Ctx, secondTransaction, sequence)) + tt.Assert.EqualError( + insertBuilder.Exec(tt.Ctx), + "error adding values while inserting to history_transactions: "+ + "exec failed: pq: duplicate key value violates unique constraint "+ + "\"hs_transaction_by_id\"", + ) + + ledger := Ledger{ + Sequence: int32(sequence), + LedgerHash: "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + PreviousLedgerHash: null.NewString("4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", true), + TotalOrderID: TotalOrderID{toid.New(int32(69859), 0, 0).ToInt64()}, + ImporterVersion: 321, + TransactionCount: 12, + SuccessfulTransactionCount: new(int32), + FailedTransactionCount: new(int32), + OperationCount: 23, + TotalCoins: 23451, + FeePool: 213, + BaseReserve: 687, + MaxTxSetSize: 345, + ProtocolVersion: 12, + BaseFee: 100, + ClosedAt: time.Now().UTC().Truncate(time.Second), + LedgerHeaderXDR: null.NewString("temp", true), + } + *ledger.SuccessfulTransactionCount = 12 + *ledger.FailedTransactionCount = 3 + _, err := q.Exec(tt.Ctx, sq.Insert("history_ledgers").SetMap(ledgerToMap(ledger))) + tt.Assert.NoError(err) + + var transactions []Transaction + tt.Assert.NoError(q.Transactions().Select(tt.Ctx, &transactions)) + tt.Assert.Len(transactions, 1) + tt.Assert.Equal( + "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + transactions[0].TransactionHash, + ) +} + +func TestInsertTransaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + sequence := uint32(123) + ledger := Ledger{ + Sequence: int32(sequence), + LedgerHash: "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + PreviousLedgerHash: null.NewString("4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", true), + TotalOrderID: TotalOrderID{toid.New(int32(69859), 0, 0).ToInt64()}, + ImporterVersion: 321, + TransactionCount: 12, + SuccessfulTransactionCount: new(int32), + FailedTransactionCount: new(int32), + OperationCount: 23, + TotalCoins: 23451, + FeePool: 213, + BaseReserve: 687, + MaxTxSetSize: 345, + ProtocolVersion: 12, + BaseFee: 100, + ClosedAt: time.Now().UTC().Truncate(time.Second), + LedgerHeaderXDR: null.NewString("temp", true), + } + *ledger.SuccessfulTransactionCount = 12 + *ledger.FailedTransactionCount = 3 + _, err := q.Exec(tt.Ctx, sq.Insert("history_ledgers").SetMap(ledgerToMap(ledger))) + tt.Assert.NoError(err) + + insertBuilder := q.NewTransactionBatchInsertBuilder(0) + + success := true + + emptySignatures := []string{} + var nullSignatures []string + + nullTimeBounds := TimeBounds{Null: true} + + infiniteTimeBounds := TimeBounds{Lower: null.IntFrom(0)} + timeBoundWithMin := TimeBounds{Lower: null.IntFrom(1576195867)} + timeBoundWithMax := TimeBounds{Lower: null.IntFrom(0), Upper: null.IntFrom(1576195867)} + timeboundsWithMinAndMax := TimeBounds{Lower: null.IntFrom(1576095867), Upper: null.IntFrom(1576195867)} + + withMultipleSignatures := []string{ + "MID8kIOLP/yEymCyhU7A/YeVpnVTDzAqszWtv8c+/qAw542BaKWxCJxl/jsggY0mF+SR8X0bvWXvPBgyYcDZDw==", + "J0J8qTsKREW29GAmZMXXBTVkYKkGbOk1AUPUalbIiDdDjd8mpIIdMStqo9w+k5A8UKRTm/iO2V/riQ14CF9IAg==", + } + + withSingleSignature := []string{ + "MID8kIOLP/yEymCyhU7A/YeVpnVTDzAqszWtv8c+/qAw542BaKWxCJxl/jsggY0mF+SR8X0bvWXvPBgyYcDZDw==", + } + + for _, testCase := range []struct { + name string + toInsert ingest.LedgerTransaction + expected Transaction + }{ + { + "successful transaction without signatures", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + Successful: success, + TimeBounds: nullTimeBounds, + }, + }, + }, + { + "successful transaction with multiple signatures", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAACQmz0pAAAAEAwgPyQg4s//ITKYLKFTsD9h5WmdVMPMCqzNa2/xz7+oDDnjYFopbEInGX+OyCBjSYX5JHxfRu9Ze88GDJhwNkPto+xlgAAAEAnQnypOwpERbb0YCZkxdcFNWRgqQZs6TUBQ9RqVsiIN0ON3yakgh0xK2qj3D6TkDxQpFOb+I7ZX+uJDXgIX0gC", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAACQmz0pAAAAEAwgPyQg4s//ITKYLKFTsD9h5WmdVMPMCqzNa2/xz7+oDDnjYFopbEInGX+OyCBjSYX5JHxfRu9Ze88GDJhwNkPto+xlgAAAEAnQnypOwpERbb0YCZkxdcFNWRgqQZs6TUBQ9RqVsiIN0ON3yakgh0xK2qj3D6TkDxQpFOb+I7ZX+uJDXgIX0gC", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: withMultipleSignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: nullTimeBounds, + Successful: success, + }, + }, + }, + { + "failed transaction", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAABQmz0pAAAAEAwgPyQg4s//ITKYLKFTsD9h5WmdVMPMCqzNa2/xz7+oDDnjYFopbEInGX+OyCBjSYX5JHxfRu9Ze88GDJhwNkP", + resultXDR: "AAAAAAAAAHv////6AAAAAA==", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "19aaa18db88605aedec04659fb45e06f240b022eb2d429e05133e4d53cd945ba", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 123, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAAAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAABQmz0pAAAAEAwgPyQg4s//ITKYLKFTsD9h5WmdVMPMCqzNa2/xz7+oDDnjYFopbEInGX+OyCBjSYX5JHxfRu9Ze88GDJhwNkP", + TxResult: "AAAAAAAAAHv////6AAAAAA==", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: withSingleSignature, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: nullTimeBounds, + Successful: false, + }, + }, + }, + { + "transaction with 64 bit fee charged", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rlQL5AAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + resultXDR: "AAAAAgAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + // set max fee to a value larger than MAX_INT32 but less than or equal to MAX_UINT32 + MaxFee: 2500000000, + FeeCharged: int64(1 << 33), + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rlQL5AAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + TxResult: "AAAAAgAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "text", + Memo: null.NewString("test memo", true), + TimeBounds: infiniteTimeBounds, + Successful: success, + }, + }, + }, + { + "transaction with text memo", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "edba3051b2f2d9b713e8a08709d631eccb72c59864ff3c564c68792271bb24a7", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAACXRlc3QgbWVtbwAAAAAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "text", + Memo: null.NewString("test memo", true), + TimeBounds: infiniteTimeBounds, + Successful: success, + }, + }, + }, + { + "transaction with id memo", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAIAAAAAAAAAewAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "7e2def20d5a21a56be2a457b648f702ee1af889d3df65790e92a05081e9fabf1", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "7e2def20d5a21a56be2a457b648f702ee1af889d3df65790e92a05081e9fabf1", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAAAAAAIAAAAAAAAAewAAAAEAAAAAAAAACwEXUhsAAFfhAAAAAAAAAAA=", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "id", + Memo: null.NewString("123", true), + TimeBounds: nullTimeBounds, + Successful: success, + }, + }, + }, + { + "transaction with hash memo", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAADfi3vINWiGla+KkV7ZI9wLuGviJ099leQ6SoFCB6fq/EAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "8aba253c2adc4083f35830cec37d9c6226b757ab3a94f15a12d6c22973fd5f3f", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "8aba253c2adc4083f35830cec37d9c6226b757ab3a94f15a12d6c22973fd5f3f", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAADfi3vINWiGla+KkV7ZI9wLuGviJ099leQ6SoFCB6fq/EAAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "hash", + Memo: null.NewString("fi3vINWiGla+KkV7ZI9wLuGviJ099leQ6SoFCB6fq/E=", true), + TimeBounds: infiniteTimeBounds, + Successful: success, + }, + }, + }, + { + "transaction with return memo", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAEzdjArlILa/LNv7o7lo/qv5+fVVPNl0yPgZQWB6u+gL4AAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "9e932a86cea43239aed62d8cd3b6b5ad2d8eb0a63247355e4ab44f2994209f2a", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "9e932a86cea43239aed62d8cd3b6b5ad2d8eb0a63247355e4ab44f2994209f2a", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "78621794419880145", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAEXUhsAADDRAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAEzdjArlILa/LNv7o7lo/qv5+fVVPNl0yPgZQWB6u+gL4AAAABAAAAAAAAAAsBF1IbAABX4QAAAAAAAAAA", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "return", + Memo: null.NewString("zdjArlILa/LNv7o7lo/qv5+fVVPNl0yPgZQWB6u+gL4=", true), + TimeBounds: infiniteTimeBounds, + Successful: success, + }, + }, + }, + { + "transaction with min time bound", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAABd8tcbAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "2a9ec3733989aa9a542ed6d0adbcc664915b1c3a72a019e6e23c2860f1ab342b", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "2a9ec3733989aa9a542ed6d0adbcc664915b1c3a72a019e6e23c2860f1ab342b", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "123456", + MaxFee: 100, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAABd8tcbAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: timeBoundWithMin, + Successful: success, + }, + }, + }, + { + "transaction with max time bound", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAAAAAAAAAAAAAF3y1xsAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "5858709ae02992431f98f7410be3d3586c5a83e9e7105d270ce1ddc2cf45358a", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "5858709ae02992431f98f7410be3d3586c5a83e9e7105d270ce1ddc2cf45358a", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "123456", + MaxFee: 100, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAAAAAAAAAAAAAF3y1xsAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: timeBoundWithMax, + Successful: success, + }, + }, + }, + { + "transaction with min and max time bound", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAABd8VB7AAAAAF3y1xsAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "7aa3419a833fb14e312ae47a98e565f668a72f23c39e0cf79f598d3d3e793b2d", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "7aa3419a833fb14e312ae47a98e565f668a72f23c39e0cf79f598d3d3e793b2d", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "123456", + MaxFee: 100, + FeeCharged: 300, + OperationCount: 1, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAZAAAAAAAAeJAAAAAAQAAAABd8VB7AAAAAF3y1xsAAAAAAAAAAQAAAAAAAAALAAAAAAAS1ocAAAAAAAAAAA==", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: timeboundsWithMinAndMax, + Successful: success, + }, + }, + }, + { + "transaction with multiple operations", + buildLedgerTransaction(tt.T, testTransaction{ + index: 1, + envelopeXDR: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAAAAAAAAeJAAAAAAAAAAAAAAAACAAAAAAAAAAsAAAAAABLWhwAAAAAAAAALAAAAAAAS1ogAAAAAAAAAAA==", + resultXDR: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + metaXDR: "AAAAAQAAAAAAAAAA", + hash: "6a3698a409141c6e45cb254aaaf94254c36a34323146a58214ce47b9f921174c", + }), + Transaction{ + LedgerCloseTime: ledger.ClosedAt, + TransactionWithoutLedger: TransactionWithoutLedger{ + TotalOrderID: TotalOrderID{528280981504}, + TransactionHash: "6a3698a409141c6e45cb254aaaf94254c36a34323146a58214ce47b9f921174c", + LedgerSequence: ledger.Sequence, + ApplicationOrder: 1, + Account: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + AccountSequence: "123456", + MaxFee: 200, + FeeCharged: 300, + OperationCount: 2, + TxEnvelope: "AAAAACiSTRmpH6bHC6Ekna5e82oiGY5vKDEEUgkq9CB//t+rAAAAyAAAAAAAAeJAAAAAAAAAAAAAAAACAAAAAAAAAAsAAAAAABLWhwAAAAAAAAALAAAAAAAS1ogAAAAAAAAAAA==", + TxResult: "AAAAAAAAASwAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + TxFeeMeta: "AAAAAA==", + TxMeta: "AAAAAQAAAAAAAAAA", + Signatures: emptySignatures, + InnerSignatures: nullSignatures, + MemoType: "none", + Memo: null.NewString("", false), + TimeBounds: nullTimeBounds, + Successful: success, + }, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + tt.Assert.NoError(insertBuilder.Add(tt.Ctx, testCase.toInsert, sequence)) + tt.Assert.NoError(insertBuilder.Exec(tt.Ctx)) + + var transactions []Transaction + tt.Assert.NoError(q.Transactions().IncludeFailed().Select(tt.Ctx, &transactions)) + tt.Assert.Len(transactions, 1) + + transaction := transactions[0] + + // ignore created time and updated time + transaction.CreatedAt = testCase.expected.CreatedAt + transaction.UpdatedAt = testCase.expected.UpdatedAt + + // compare ClosedAt separately because reflect.DeepEqual does not handle time.Time + closedAt := transaction.LedgerCloseTime + transaction.LedgerCloseTime = testCase.expected.LedgerCloseTime + + tt.Assert.True(closedAt.Equal(testCase.expected.LedgerCloseTime)) + tt.Assert.Equal(transaction, testCase.expected) + + _, err = q.Exec(tt.Ctx, sq.Delete("history_transactions")) + tt.Assert.NoError(err) + }) + } +} + +func TestFetchFeeBumpTransaction(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + fixture := FeeBumpScenario(tt, q, true) + + var byOuterhash, byInnerHash Transaction + tt.Assert.NoError(q.TransactionByHash(tt.Ctx, &byOuterhash, fixture.OuterHash)) + tt.Assert.NoError(q.TransactionByHash(tt.Ctx, &byInnerHash, fixture.InnerHash)) + + tt.Assert.Equal(byOuterhash, byInnerHash) + tt.Assert.Equal(byOuterhash, fixture.Transaction) + + outerOps, outerTransactions, err := q.Operations().IncludeTransactions(). + ForTransaction(tt.Ctx, fixture.OuterHash).Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(outerTransactions, 1) + tt.Assert.Len(outerOps, 1) + + innerOps, innerTransactions, err := q.Operations().IncludeTransactions(). + ForTransaction(tt.Ctx, fixture.InnerHash).Fetch(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Len(innerTransactions, 1) + tt.Assert.Equal(innerOps, outerOps) + + for _, tx := range append(outerTransactions, innerTransactions...) { + tt.Assert.True(byOuterhash.CreatedAt.Equal(tx.CreatedAt)) + tt.Assert.True(byOuterhash.LedgerCloseTime.Equal(tx.LedgerCloseTime)) + byOuterhash.CreatedAt = tx.CreatedAt + byOuterhash.LedgerCloseTime = tx.LedgerCloseTime + tt.Assert.Equal(byOuterhash, byInnerHash) + } + + var outerEffects, innerEffects []Effect + err = q.Effects().ForTransaction(tt.Ctx, fixture.OuterHash).Select(tt.Ctx, &outerEffects) + tt.Assert.NoError(err) + tt.Assert.Len(outerEffects, 1) + + err = q.Effects().ForTransaction(tt.Ctx, fixture.InnerHash).Select(tt.Ctx, &innerEffects) + tt.Assert.NoError(err) + tt.Assert.Equal(outerEffects, innerEffects) +} diff --git a/services/horizon/internal/db2/history/trust_lines.go b/services/horizon/internal/db2/history/trust_lines.go new file mode 100644 index 0000000000..9773f06fd8 --- /dev/null +++ b/services/horizon/internal/db2/history/trust_lines.go @@ -0,0 +1,132 @@ +package history + +import ( + "context" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// IsAuthorized returns true if issuer has authorized account to perform +// transactions with its credit +func (trustLine TrustLine) IsAuthorized() bool { + return xdr.TrustLineFlags(trustLine.Flags).IsAuthorized() +} + +// IsAuthorizedToMaintainLiabilities returns true if issuer has authorized the account to maintain +// liabilities with its credit +func (trustLine TrustLine) IsAuthorizedToMaintainLiabilities() bool { + return xdr.TrustLineFlags(trustLine.Flags).IsAuthorizedToMaintainLiabilitiesFlag() +} + +// IsClawbackEnabled returns true if issuer has authorized the account to claw +// assets back +func (trustLine TrustLine) IsClawbackEnabled() bool { + return xdr.TrustLineFlags(trustLine.Flags).IsClawbackEnabledFlag() +} + +func (q *Q) CountTrustLines(ctx context.Context) (int, error) { + sql := sq.Select("count(*)").From("trust_lines") + + var count int + if err := q.Get(ctx, &count, sql); err != nil { + return 0, errors.Wrap(err, "could not run select query") + } + + return count, nil +} + +func (q *Q) GetSortedTrustLinesByAccountID(ctx context.Context, id string) ([]TrustLine, error) { + return q.GetSortedTrustLinesByAccountIDs(ctx, []string{id}) +} + +// GetTrustLinesByKeys loads a row from the `trust_lines` table, selected by multiple keys. +func (q *Q) GetTrustLinesByKeys(ctx context.Context, ledgerKeys []string) ([]TrustLine, error) { + var trustLines []TrustLine + sql := selectTrustLines.Where(map[string]interface{}{"trust_lines.ledger_key": ledgerKeys}) + err := q.Select(ctx, &trustLines, sql) + return trustLines, err +} + +// UpsertTrustLines upserts a batch of trust lines in the trust lines table. +// There's currently no limit of the number of trust lines this method can +// accept other than 2GB limit of the query string length what should be enough +// for each ledger with the current limits. +func (q *Q) UpsertTrustLines(ctx context.Context, trustLines []TrustLine) error { + + var accountID, assetType, assetIssuer, assetCode, balance, ledgerKey, limit, liquidityPoolID, buyingLiabilities, + sellingLiabilites, flags, lastModifiedLedger, sponsor []interface{} + + for _, trustLine := range trustLines { + accountID = append(accountID, trustLine.AccountID) + assetCode = append(assetCode, trustLine.AssetCode) + assetIssuer = append(assetIssuer, trustLine.AssetIssuer) + assetType = append(assetType, trustLine.AssetType) + balance = append(balance, trustLine.Balance) + ledgerKey = append(ledgerKey, trustLine.LedgerKey) + limit = append(limit, trustLine.Limit) + liquidityPoolID = append(liquidityPoolID, trustLine.LiquidityPoolID) + buyingLiabilities = append(buyingLiabilities, trustLine.BuyingLiabilities) + sellingLiabilites = append(sellingLiabilites, trustLine.SellingLiabilities) + flags = append(flags, trustLine.Flags) + lastModifiedLedger = append(lastModifiedLedger, trustLine.LastModifiedLedger) + sponsor = append(sponsor, trustLine.Sponsor) + } + + upsertFields := []upsertField{ + {"account_id", "character varying(56)", accountID}, + {"asset_code", "character varying(12)", assetCode}, + {"asset_issuer", "character varying(56)", assetIssuer}, + {"asset_type", "int", assetType}, + {"balance", "bigint", balance}, + {"ledger_key", "character varying(150)", ledgerKey}, + {"trust_line_limit", "bigint", limit}, + {"liquidity_pool_id", "text", liquidityPoolID}, + {"buying_liabilities", "bigint", buyingLiabilities}, + {"selling_liabilities", "bigint", sellingLiabilites}, + {"flags", "int", flags}, + {"last_modified_ledger", "int", lastModifiedLedger}, + {"sponsor", "text", sponsor}, + } + + return q.upsertRows(ctx, "trust_lines", "ledger_key", upsertFields) +} + +// RemoveTrustLine deletes a row in the trust lines table. +// Returns number of rows affected and error. +func (q *Q) RemoveTrustLines(ctx context.Context, ledgerKeys []string) (int64, error) { + sql := sq.Delete("trust_lines"). + Where(map[string]interface{}{"ledger_key": ledgerKeys}) + result, err := q.Exec(ctx, sql) + if err != nil { + return 0, err + } + + return result.RowsAffected() +} + +// GetSortedTrustLinesByAccountIDs loads trust lines for a list of accounts ID, ordered by asset and issuer +func (q *Q) GetSortedTrustLinesByAccountIDs(ctx context.Context, id []string) ([]TrustLine, error) { + var data []TrustLine + sql := selectTrustLines.Where(sq.Eq{"account_id": id}). + OrderBy("asset_code", "asset_issuer", "liquidity_pool_id") + err := q.Select(ctx, &data, sql) + return data, err +} + +var selectTrustLines = sq.Select(` + ledger_key, + account_id, + asset_type, + asset_issuer, + asset_code, + COALESCE(liquidity_pool_id, '') as liquidity_pool_id, + balance, + trust_line_limit, + buying_liabilities, + selling_liabilities, + flags, + last_modified_ledger, + sponsor +`).From("trust_lines") diff --git a/services/horizon/internal/db2/history/trust_lines_test.go b/services/horizon/internal/db2/history/trust_lines_test.go new file mode 100644 index 0000000000..3bc39d0e13 --- /dev/null +++ b/services/horizon/internal/db2/history/trust_lines_test.go @@ -0,0 +1,275 @@ +package history + +import ( + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" +) + +var ( + trustLineIssuer = "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H" + + eurTrustLine = TrustLine{ + AccountID: account1.AccountID, + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer, + AssetCode: "EUR", + Balance: 30000, + LedgerKey: "abcdef", + Limit: 223456789, + LiquidityPoolID: "", + BuyingLiabilities: 3, + SellingLiabilities: 4, + Flags: 1, + LastModifiedLedger: 1234, + Sponsor: null.StringFrom(sponsor), + } + + usdTrustLine = TrustLine{ + AccountID: "GCYVFGI3SEQJGBNQQG7YCMFWEYOHK3XPVOVPA6C566PXWN4SN7LILZSM", + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer, + AssetCode: "USD", + Balance: 10000, + LedgerKey: "jhkli", + Limit: 123456789, + LiquidityPoolID: "", + BuyingLiabilities: 1, + SellingLiabilities: 2, + Flags: 0, + LastModifiedLedger: 1235, + Sponsor: null.String{}, + } + + usdTrustLine2 = TrustLine{ + AccountID: "GBYSBDAJZMHL5AMD7QXQ3JEP3Q4GLKADWIJURAAHQALNAWD6Z5XF2RAC", + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer, + AssetCode: "USD", + Balance: 10000, + LedgerKey: "lkjpoi", + Limit: 123456789, + LiquidityPoolID: "", + BuyingLiabilities: 1, + SellingLiabilities: 2, + Flags: 0, + LastModifiedLedger: 1234, + Sponsor: null.String{}, + } + + poolShareTrustLine = TrustLine{ + AccountID: "GBB4JST32UWKOLGYYSCEYBHBCOFL2TGBHDVOMZP462ET4ZRD4ULA7S2L", + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: 976, + LedgerKey: "mlmn908", + Limit: 87654, + LiquidityPoolID: "mpolnbv", + Flags: 1, + LastModifiedLedger: 1235, + Sponsor: null.String{}, + } +) + +func TestIsAuthorized(t *testing.T) { + tt := assert.New(t) + tl := TrustLine{ + Flags: 1, + } + tt.True(tl.IsAuthorized()) + + tl = TrustLine{ + Flags: 0, + } + tt.False(tl.IsAuthorized()) +} +func TestInsertTrustLine(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine, usdTrustLine})) + + lines, err := q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey, usdTrustLine.LedgerKey}) + tt.Assert.NoError(err) + tt.Assert.Len(lines, 2) + + tt.Assert.Equal(null.StringFrom(sponsor), lines[0].Sponsor) + tt.Assert.Equal([]TrustLine{eurTrustLine, usdTrustLine}, lines) +} + +func TestUpdateTrustLine(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine})) + + lines, err := q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + assert.Equal(t, eurTrustLine, lines[0]) + + modifiedTrustLine := eurTrustLine + modifiedTrustLine.Balance = 30000 + modifiedTrustLine.Sponsor = null.String{} + + tt.Assert.NoError(q.UpsertTrustLines(tt.Ctx, []TrustLine{modifiedTrustLine})) + lines, err = q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + assert.Equal(t, modifiedTrustLine, lines[0]) +} + +func TestUpsertTrustLines(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + // Upserting nothing is no op + err := q.UpsertTrustLines(tt.Ctx, []TrustLine{}) + assert.NoError(t, err) + + err = q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine, usdTrustLine}) + assert.NoError(t, err) + + lines, err := q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + + lines, err = q.GetTrustLinesByKeys(tt.Ctx, []string{usdTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + + modifiedTrustLine := eurTrustLine + modifiedTrustLine.Balance = 30000 + + err = q.UpsertTrustLines(tt.Ctx, []TrustLine{modifiedTrustLine}) + assert.NoError(t, err) + + lines, err = q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + + assert.Equal(t, modifiedTrustLine, lines[0]) + assert.Equal(t, uint32(1234), lines[0].LastModifiedLedger) + + lines, err = q.GetTrustLinesByKeys(tt.Ctx, []string{usdTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 1) + + assert.Equal(t, usdTrustLine, lines[0]) + assert.Equal(t, uint32(1235), lines[0].LastModifiedLedger) +} + +func TestRemoveTrustLine(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine}) + assert.NoError(t, err) + + rows, err := q.RemoveTrustLines(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Equal(t, int64(1), rows) + + lines, err := q.GetTrustLinesByKeys(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Len(t, lines, 0) + + // Doesn't exist anymore + rows, err = q.RemoveTrustLines(tt.Ctx, []string{eurTrustLine.LedgerKey}) + assert.NoError(t, err) + assert.Equal(t, int64(0), rows) +} + +func TestGetSortedTrustLinesByAccountsID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + err := q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine, usdTrustLine, usdTrustLine2, poolShareTrustLine}) + assert.NoError(t, err) + + ids := []string{ + eurTrustLine.AccountID, + usdTrustLine.AccountID, + poolShareTrustLine.AccountID, + } + + records, err := q.GetSortedTrustLinesByAccountIDs(tt.Ctx, ids) + tt.Assert.NoError(err) + tt.Assert.Len(records, 3) + + m := map[string]TrustLine{ + eurTrustLine.AccountID: eurTrustLine, + usdTrustLine.AccountID: usdTrustLine, + poolShareTrustLine.AccountID: poolShareTrustLine, + } + + tt.Assert.Equal(poolShareTrustLine, records[0]) + delete(m, poolShareTrustLine.AccountID) + + lastAssetCode := "" + lastIssuer := records[1].AssetIssuer + for _, record := range records[1:] { + tt.Assert.LessOrEqual(lastAssetCode, record.AssetCode) + lastAssetCode = record.AssetCode + tt.Assert.LessOrEqual(lastIssuer, record.AssetIssuer) + lastIssuer = record.AssetIssuer + xtl, ok := m[record.AccountID] + tt.Assert.True(ok) + tt.Assert.Equal(record, xtl) + delete(m, record.AccountID) + } + + tt.Assert.Len(m, 0) +} + +func TestGetTrustLinesByAccountID(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + records, err := q.GetSortedTrustLinesByAccountID(tt.Ctx, eurTrustLine.AccountID) + tt.Assert.NoError(err) + tt.Assert.Empty(records) + + err = q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine}) + tt.Assert.NoError(err) + + records, err = q.GetSortedTrustLinesByAccountID(tt.Ctx, eurTrustLine.AccountID) + tt.Assert.NoError(err) + + tt.Assert.Equal(eurTrustLine, records[0]) + +} + +func TestAssetsForAddress(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &Q{tt.HorizonSession()} + + ledgerEntries := []AccountEntry{account1} + + err := q.UpsertAccounts(tt.Ctx, ledgerEntries) + assert.NoError(t, err) + + err = q.UpsertTrustLines(tt.Ctx, []TrustLine{eurTrustLine}) + tt.Assert.NoError(err) + + records, err := q.GetSortedTrustLinesByAccountID(tt.Ctx, eurTrustLine.AccountID) + tt.Assert.NoError(err) + tt.Assert.Equal(eurTrustLine, records[0]) +} diff --git a/services/horizon/internal/db2/main.go b/services/horizon/internal/db2/main.go new file mode 100644 index 0000000000..5ec81eb0f3 --- /dev/null +++ b/services/horizon/internal/db2/main.go @@ -0,0 +1,11 @@ +// Package db2 is the replacement for db. It provides low level db connection +// and query capabilities. +package db2 + +// PageQuery represents a portion of a Query struct concerned with paging +// through a large dataset. +type PageQuery struct { + Cursor string + Order string + Limit uint64 +} diff --git a/services/horizon/internal/db2/page_query.go b/services/horizon/internal/db2/page_query.go new file mode 100644 index 0000000000..7113d1d683 --- /dev/null +++ b/services/horizon/internal/db2/page_query.go @@ -0,0 +1,284 @@ +package db2 + +import ( + "fmt" + "math" + "strconv" + "strings" + + sq "github.com/Masterminds/squirrel" + jet "github.com/go-jet/jet/v2/postgres" + + "github.com/stellar/go/support/errors" +) + +const ( + // DefaultPageSize is the default page size for db queries + DefaultPageSize = 10 + // MaxPageSize is the max page size for db queries + MaxPageSize = 200 + + // OrderAscending is used to indicate an ascending order in request params + OrderAscending = "asc" + + // OrderDescending is used to indicate an descending order in request params + OrderDescending = "desc" + + // DefaultPairSep is the default separator used to separate two numbers for CursorInt64Pair + DefaultPairSep = "-" +) + +var ( + // ErrInvalidOrder is an error that occurs when a user-provided order string + // is invalid + ErrInvalidOrder = &InvalidFieldError{"order"} + // ErrInvalidLimit is an error that occurs when a user-provided limit num + // is invalid + ErrInvalidLimit = &InvalidFieldError{"limit"} + // ErrInvalidCursor is an error that occurs when a user-provided cursor string + // is invalid + ErrInvalidCursor = &InvalidFieldError{"cursor"} +) + +type InvalidFieldError struct { + Name string +} + +func (e *InvalidFieldError) Error() string { + return fmt.Sprintf("%s: invalid value", e.Name) +} + +// ApplyTo returns a new SelectBuilder after applying the paging effects of +// `p` to `sql`. This method provides the default case for paging: int64 +// cursor-based paging by an id column. +func (p PageQuery) ApplyTo( + sql sq.SelectBuilder, + col string, +) (sq.SelectBuilder, error) { + cursor, err := p.CursorInt64() + if err != nil { + return sql, err + } + + return p.ApplyToUsingCursor(sql, col, cursor) +} + +// ApplyRawTo applies the raw PageQuery.Cursor cursor to the builder +func (p PageQuery) ApplyRawTo( + sql sq.SelectBuilder, + col string, +) (sq.SelectBuilder, error) { + return p.ApplyToUsingCursor(sql, col, p.Cursor) +} + +func applyCursor(cursor string, cursorClause jet.BoolExpression, whereClause jet.BoolExpression, sql jet.SelectStatement) jet.SelectStatement { + if cursor != "" { + return sql.WHERE(whereClause.AND(cursorClause)) + } + return sql +} + +func (p PageQuery) ApplyToJetUsingCursor( + sql jet.SelectStatement, + col jet.Column, + gt jet.BoolExpression, + lt jet.BoolExpression, + where jet.BoolExpression, +) (jet.SelectStatement, error) { + sql = sql.LIMIT(int64(p.Limit)) + + switch p.Order { + case "asc": + sql = sql.ORDER_BY(col.ASC()) + sql = applyCursor(p.Cursor, gt, where, sql) + case "desc": + sql = sql.ORDER_BY(col.DESC()) + sql = applyCursor(p.Cursor, lt, where, sql) + default: + return sql, errors.Errorf("invalid order: %s", p.Order) + } + + return sql, nil +} + +// ApplyToUsingCursor returns a new SelectBuilder after applying the paging effects of +// `p` to `sql`. This method allows any type of cursor by a single column +func (p PageQuery) ApplyToUsingCursor( + sql sq.SelectBuilder, + col string, + cursor interface{}, +) (sq.SelectBuilder, error) { + sql = sql.Limit(p.Limit) + + switch p.Order { + case "asc": + if cursor == "" { + sql = sql. + OrderBy(fmt.Sprintf("%s asc", col)) + } else { + sql = sql. + Where(fmt.Sprintf("%s > ?", col), cursor). + OrderBy(fmt.Sprintf("%s asc", col)) + } + case "desc": + if cursor == "" { + sql = sql. + OrderBy(fmt.Sprintf("%s desc", col)) + } else { + sql = sql. + Where(fmt.Sprintf("%s < ?", col), cursor). + OrderBy(fmt.Sprintf("%s desc", col)) + } + default: + return sql, errors.Errorf("invalid order: %s", p.Order) + } + + return sql, nil +} + +// Invert returns a new PageQuery whose order is reversed +func (p PageQuery) Invert() PageQuery { + switch p.Order { + case OrderAscending: + p.Order = OrderDescending + case OrderDescending: + p.Order = OrderAscending + } + + return p +} + +// CursorInt64 parses this query's Cursor string as an int64 +func (p PageQuery) CursorInt64() (int64, error) { + if p.Cursor == "" { + switch p.Order { + case OrderAscending: + return 0, nil + case OrderDescending: + return math.MaxInt64, nil + default: + return 0, ErrInvalidOrder + } + } + + i, err := strconv.ParseInt(p.Cursor, 10, 64) + + if err != nil { + return 0, ErrInvalidCursor + } + + if i < 0 { + return 0, ErrInvalidCursor + } + + return i, nil + +} + +// CursorInt64Pair parses this query's Cursor string as two int64s, separated by the provided separator +func (p PageQuery) CursorInt64Pair(sep string) (l int64, r int64, err error) { + if p.Cursor == "" { + switch p.Order { + case OrderAscending: + l = 0 + r = 0 + case OrderDescending: + l = math.MaxInt64 + r = math.MaxInt64 + default: + err = ErrInvalidOrder + } + return + } + + parts := strings.SplitN(p.Cursor, sep, 2) + + // In the event that the cursor is only a single number + // we use maxInt as the second element. This ensures that + // cursors containing a single element skip past all entries + // specified by the first element. + // + // As an example, this behavior ensures that an effect cursor + // specified using only a ledger sequence will properly exclude + // all effects originated in the sequence provided. + if len(parts) != 2 { + // NOTE(scott): we work around a build issue here which cases an overflow + // when building for ARM. We assign the untyped constant explicitly to a + // int64. + var max int64 = math.MaxInt64 + parts = append(parts, fmt.Sprintf("%d", max)) + } + + l, err = strconv.ParseInt(parts[0], 10, 64) + if err != nil { + err = errors.Wrap(err, "first component unparseable") + return + } + + r, err = strconv.ParseInt(parts[1], 10, 64) + if err != nil { + err = errors.Wrap(err, "second component unparseable") + return + } + + if l < 0 || r < 0 { + err = ErrInvalidCursor + } + + return +} + +// NewPageQuery creates a new PageQuery struct, ensuring the order, limit, and +// cursor are set to the appropriate defaults and are valid. +func NewPageQuery( + cursor string, + validateCursor bool, + order string, + limit uint64, +) (result PageQuery, err error) { + + // Set order + switch order { + case "": + result.Order = OrderAscending + case OrderAscending, OrderDescending: + result.Order = order + default: + err = ErrInvalidOrder + return + } + + // Set cursor + result.Cursor = cursor + if validateCursor { + _, _, err = result.CursorInt64Pair(DefaultPairSep) + if err != nil { + err = ErrInvalidCursor + return + } + } + + // Set limit + switch { + case limit == 0: + err = ErrInvalidLimit + return + case limit > MaxPageSize: + err = ErrInvalidLimit + return + default: + result.Limit = limit + } + + return +} + +// MustPageQuery behaves as NewPageQuery, but panics upon error +func MustPageQuery(cursor string, validateCursor bool, order string, limit uint64) PageQuery { + r, err := NewPageQuery(cursor, validateCursor, order, limit) + if err != nil { + panic(err) + } + + return r +} diff --git a/services/horizon/internal/db2/page_query_test.go b/services/horizon/internal/db2/page_query_test.go new file mode 100644 index 0000000000..be8d1d96bf --- /dev/null +++ b/services/horizon/internal/db2/page_query_test.go @@ -0,0 +1,99 @@ +package db2 + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPageQuery(t *testing.T) { + assert := assert.New(t) + require := require.New(t) + + var p PageQuery + var err error + + p, err = NewPageQuery("10", true, "desc", 15) + require.NoError(err) + assert.Equal("10", p.Cursor) + assert.Equal("desc", p.Order) + assert.Equal(uint64(15), p.Limit) + + // Defaults + p, err = NewPageQuery("", true, "", 1) + require.NoError(err) + assert.Equal("asc", p.Order) + c, err := p.CursorInt64() + require.NoError(err) + assert.Equal(int64(0), c) + assert.Equal(uint64(1), p.Limit) + p, err = NewPageQuery("", true, "desc", 1) + require.NoError(err) + c, err = p.CursorInt64() + require.NoError(err) + assert.Equal(int64(9223372036854775807), c) + + // Max + p, err = NewPageQuery("", true, "", 200) + require.NoError(err) + + // Error states + _, err = NewPageQuery("", true, "foo", 1) + assert.Error(err) + _, err = NewPageQuery("", true, "", 0) + assert.Error(err) + _, err = NewPageQuery("", true, "", 201) + assert.Error(err) + +} + +func TestPageQuery_CursorInt64(t *testing.T) { + assertInstance := assert.New(t) + requireInstance := require.New(t) + + var p PageQuery + var err error + + p = MustPageQuery("1231-4456", false, "asc", 1) + l, r, err := p.CursorInt64Pair("-") + requireInstance.NoError(err) + assertInstance.Equal(int64(1231), l) + assertInstance.Equal(int64(4456), r) + + // Defaults + p = MustPageQuery("", false, "asc", 1) + l, r, err = p.CursorInt64Pair("-") + requireInstance.NoError(err) + assertInstance.Equal(int64(0), l) + assertInstance.Equal(int64(0), r) + p = MustPageQuery("", false, "desc", 1) + l, r, err = p.CursorInt64Pair("-") + requireInstance.NoError(err) + assertInstance.Equal(int64(math.MaxInt64), l) + assertInstance.Equal(int64(math.MaxInt64), r) + p = MustPageQuery("0", false, "", 1) + _, r, err = p.CursorInt64Pair("-") + requireInstance.NoError(err) + assertInstance.Equal(int64(math.MaxInt64), r) + + // Errors + p = MustPageQuery("123-foo", false, "", 1) + _, _, err = p.CursorInt64Pair("-") + assertInstance.Error(err) + p = MustPageQuery("foo-123", false, "", 1) + _, _, err = p.CursorInt64Pair("-") + assertInstance.Error(err) + p = MustPageQuery("-1:123", false, "", 1) + _, _, err = p.CursorInt64Pair("-") + assertInstance.Error(err) + p = MustPageQuery("111:-123", false, "", 1) + _, _, err = p.CursorInt64Pair("-") + assertInstance.Error(err) + + // Regression: -23667108046966785 + p = MustPageQuery("-23667108046966785", false, "asc", 100) + _, err = p.CursorInt64() + assertInstance.Error(err) +} diff --git a/services/horizon/internal/db2/schema/bindata.go b/services/horizon/internal/db2/schema/bindata.go new file mode 100644 index 0000000000..446a848a05 --- /dev/null +++ b/services/horizon/internal/db2/schema/bindata.go @@ -0,0 +1,1492 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// migrations/10_add_trades_price.sql (1.22kB) +// migrations/11_add_trades_account_index.sql (273B) +// migrations/12_asset_stats_amount_string.sql (197B) +// migrations/13_trade_offer_ids.sql (484B) +// migrations/14_fix_asset_toml_field.sql (156B) +// migrations/15_ledger_failed_txs.sql (333B) +// migrations/16_ingest_failed_transactions.sql (509B) +// migrations/17_transaction_fee_paid.sql (287B) +// migrations/18_account_for_signers.sql (481B) +// migrations/19_offers.sql (1.064kB) +// migrations/1_initial_schema.sql (9.977kB) +// migrations/20_account_for_signer_index.sql (140B) +// migrations/21_trades_remove_zero_amount_constraints.sql (765B) +// migrations/22_trust_lines.sql (955B) +// migrations/23_exp_asset_stats.sql (883B) +// migrations/24_accounts.sql (1.402kB) +// migrations/25_expingest_rename_columns.sql (641B) +// migrations/26_exp_history_ledgers.sql (209B) +// migrations/27_exp_history_transactions.sql (630B) +// migrations/28_exp_history_operations.sql (439B) +// migrations/29_exp_history_assets.sql (206B) +// migrations/2_index_participants_by_toid.sql (277B) +// migrations/30_exp_history_trades.sql (2.297kB) +// migrations/31_exp_history_effects.sql (209B) +// migrations/32_drop_exp_history_tables.sql (3.826kB) +// migrations/33_remove_unused.sql (860B) +// migrations/34_fee_bump_transactions.sql (863B) +// migrations/35_drop_participant_id.sql (306B) +// migrations/36_deleted_offers.sql (956B) +// migrations/37_add_tx_set_operation_count_to_ledgers.sql (176B) +// migrations/38_add_constraints.sql (7.33kB) +// migrations/39_claimable_balances.sql (750B) +// migrations/39_history_trades_indices.sql (183B) +// migrations/3_use_sequence_in_history_accounts.sql (447B) +// migrations/40_fix_inner_tx_max_fee_constraint.sql (392B) +// migrations/41_add_sponsor_to_state_tables.sql (800B) +// migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql (276B) +// migrations/43_add_claimable_balances_flags.sql (145B) +// migrations/44_asset_stat_accounts_and_balances.sql (439B) +// migrations/45_add_claimable_balances_history.sql (2.163kB) +// migrations/46_add_muxed_accounts.sql (465B) +// migrations/47_precompute_trade_aggregations.sql (1.687kB) +// migrations/48_rebuild_trade_aggregations.sql (1.243kB) +// migrations/49_add_brin_index_trade_aggregations.sql (206B) +// migrations/4_add_protocol_version.sql (188B) +// migrations/50_liquidity_pools.sql (3.876kB) +// migrations/51_remove_ht_unused_indexes.sql (321B) +// migrations/52_add_trade_type_index.sql (424B) +// migrations/5_create_trades_table.sql (1.1kB) +// migrations/6_create_assets_table.sql (366B) +// migrations/7_modify_trades_table.sql (2.303kB) +// migrations/8_add_aggregators.sql (907B) +// migrations/8_create_asset_stats_table.sql (441B) +// migrations/9_add_header_xdr.sql (161B) + +package schema + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _migrations10_add_trades_priceSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x53\x51\x8b\xda\x4c\x14\x7d\x9f\x5f\x71\x1e\xf2\xa0\x7c\xfa\x15\x7d\xb5\x5b\x18\x93\xdb\x34\x10\xa3\x9d\x99\xd0\x16\x11\x99\x35\x63\x76\xc0\x4c\x64\x92\xc5\xdd\x7f\x5f\xb2\xba\xae\x2d\xec\x62\x5b\x68\xdf\x4e\x6e\xce\x5c\xce\xb9\x87\x33\x1c\xe2\xbf\xca\x96\x5e\xb7\x06\xf9\x9e\xb1\xe1\x10\xbc\x28\xe0\x75\x6b\x6b\xa7\x77\xd8\x7b\xbb\x31\x68\x6b\xb4\x5e\x17\xa6\x41\xab\x6f\x77\x86\xf1\x54\x91\x80\xe2\xd3\x94\x70\x67\x9b\xb6\xf6\x8f\xeb\x13\x81\x47\xd1\xf1\xd1\xda\x61\x9a\xc4\x49\xa6\x26\xd7\xd1\x8b\x33\xbd\x13\xa1\xcb\xd2\x9b\xb2\x53\xb5\xbd\x77\x9b\x4e\x0c\xb6\xb5\xc7\xd6\xba\xc2\xba\x12\x95\x75\xb6\x3a\xcb\x3b\xdc\x19\x77\x82\xb6\x81\x37\x7b\x6f\x1a\xe3\x5a\x53\x40\x37\xd0\x0e\xda\x7b\xfd\x88\x7a\x8b\xf6\x50\xc3\xec\x4c\x65\x5c\xdb\x60\xe9\x06\xc5\x8a\x85\x82\xb8\x22\xcc\x05\x04\x2d\x52\x1e\x12\x3e\xe6\x59\xa8\x92\x79\x86\xfd\xfd\xed\xce\x6e\xfe\xaf\xac\x5b\x1f\x25\xea\xb2\x44\x0f\x59\x3e\x23\x91\x84\xcb\xd5\xe0\x05\xf6\x19\x20\x48\xe5\x22\x93\x2f\x43\xa4\x3c\x8b\x73\x1e\x13\xe4\xe7\x14\xc9\x6c\x96\x1f\x4f\x20\x95\x48\x42\x05\x2e\x11\x04\x90\x94\x52\xa8\xd0\x63\x40\xc8\x25\xe1\xcb\x27\xca\x10\x8c\x96\xa3\xd5\xbb\x60\xb4\x1c\xaf\xde\x07\xe3\x27\x3c\x5e\x8e\x57\x50\xc7\x9f\xa0\x54\x12\x82\x31\x28\x8b\xfa\x08\x82\x09\x7b\xb6\xc1\xe3\x58\x50\xdc\xa1\x93\xf8\x59\x92\xad\x17\x22\x09\x09\x3d\x26\x3b\x67\xb8\xc1\x22\x9f\xa6\x49\xf8\xa3\xaf\x01\x9b\x72\x49\xea\xdb\x82\x70\x73\xe1\x90\xc9\x9f\x47\xac\x7f\x5d\x40\xfa\xe1\xef\x05\xa4\x1f\xfe\x61\x40\x1f\xfe\x30\x20\xfe\xf5\xd5\x80\x2e\x7d\xfd\x72\x40\xe7\x5a\x47\xf5\xc1\xbd\x55\xc1\x48\xcc\x17\xcf\x95\x7d\xb3\xab\x17\xc4\x62\xc2\xd8\xd3\xe7\xef\x94\x05\xfd\xee\x92\x21\x8f\x68\xf2\xca\x92\x2b\x02\xbd\x58\xf2\x3d\x00\x00\xff\xff\x19\x48\x96\x1e\xc4\x04\x00\x00") + +func migrations10_add_trades_priceSqlBytes() ([]byte, error) { + return bindataRead( + _migrations10_add_trades_priceSql, + "migrations/10_add_trades_price.sql", + ) +} + +func migrations10_add_trades_priceSql() (*asset, error) { + bytes, err := migrations10_add_trades_priceSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/10_add_trades_price.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x85, 0x1e, 0x7f, 0x86, 0xc2, 0x2c, 0xc9, 0x91, 0xe2, 0x3f, 0x1a, 0xb2, 0x15, 0xea, 0x6c, 0x5, 0xfa, 0x1f, 0x99, 0xd2, 0xbb, 0x1, 0x5e, 0x75, 0x91, 0x8b, 0xb, 0x46, 0xa, 0x6, 0xbc, 0x61}} + return a, nil +} + +var _migrations11_add_trades_account_indexSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\x0e\x72\x75\x0c\x71\x55\xf0\xf4\x73\x71\x8d\x50\xc8\x28\x29\x4a\x89\x4f\xaa\x8c\x4f\x4a\x2c\x4e\x8d\x4f\x4c\x4e\xce\x2f\xcd\x2b\x51\xf0\xf7\x53\xc8\xc8\x2c\x2e\xc9\x2f\xaa\x8c\x2f\x29\x4a\x4c\x49\x2d\x56\x08\x0d\xf6\xf4\x73\x57\x70\x0a\x09\x72\x75\xd5\x40\x56\x1a\x9f\x99\xa2\x69\x8d\xdd\x44\xb0\x7c\x6a\x11\x91\x86\xa2\xa9\x86\x98\xcb\x85\xec\x74\x97\xfc\xf2\x3c\x2e\x2e\x97\x20\xff\x00\x3c\x4e\xb7\xc6\xa6\x00\xcd\x6c\x6b\x40\x00\x00\x00\xff\xff\x57\x79\x94\x68\x11\x01\x00\x00") + +func migrations11_add_trades_account_indexSqlBytes() ([]byte, error) { + return bindataRead( + _migrations11_add_trades_account_indexSql, + "migrations/11_add_trades_account_index.sql", + ) +} + +func migrations11_add_trades_account_indexSql() (*asset, error) { + bytes, err := migrations11_add_trades_account_indexSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/11_add_trades_account_index.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x98, 0xa4, 0x60, 0x40, 0x6b, 0xa6, 0x5e, 0xcc, 0x67, 0xb3, 0x85, 0x82, 0xce, 0x39, 0xc6, 0xbb, 0x2c, 0xa7, 0x2e, 0xb6, 0x9a, 0xd, 0xba, 0x91, 0x28, 0x80, 0x77, 0x46, 0x8c, 0x67, 0x55, 0x9f}} + return a, nil +} + +var _migrations12_asset_stats_amount_stringSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2e\x4e\x2d\x89\x2f\x2e\x49\x2c\x29\x86\x8a\x3b\xfb\xfb\x84\xfa\xfa\x29\x24\xe6\xe6\x97\xe6\x95\x28\x04\xbb\x86\x28\xb8\x38\x86\x38\x2a\x84\x44\x06\xb8\x2a\x24\x67\x24\x16\x25\x26\x97\xa4\x16\x29\x94\x25\x16\x55\x66\xe6\xa5\x5b\x73\x71\x21\x1b\xee\x92\x5f\x9e\x47\x81\xf1\x49\x99\xe9\x99\x79\x25\x0a\xa1\xc1\x9e\x7e\xee\x50\x15\x56\x56\x10\x41\x6b\x2e\x40\x00\x00\x00\xff\xff\x00\x82\x0f\xf1\xc5\x00\x00\x00") + +func migrations12_asset_stats_amount_stringSqlBytes() ([]byte, error) { + return bindataRead( + _migrations12_asset_stats_amount_stringSql, + "migrations/12_asset_stats_amount_string.sql", + ) +} + +func migrations12_asset_stats_amount_stringSql() (*asset, error) { + bytes, err := migrations12_asset_stats_amount_stringSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/12_asset_stats_amount_string.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3a, 0xd1, 0x4c, 0x37, 0xe7, 0xfd, 0xdb, 0x3a, 0xf2, 0x37, 0x9b, 0x8d, 0x77, 0x99, 0x61, 0x15, 0x10, 0x51, 0xe5, 0xe5, 0x7f, 0xec, 0x7e, 0x7, 0xe5, 0x18, 0x8a, 0xf2, 0xb4, 0x66, 0x17, 0x60}} + return a, nil +} + +var _migrations13_trade_offer_idsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\xc1\x8a\x83\x30\x14\x45\xf7\xef\x2b\xde\x72\x64\xf0\x0b\xb2\x8a\x26\x48\xc0\x89\x83\x13\x61\x76\x41\x6b\xac\x2e\x6a\x4a\x4c\x29\xfe\x7d\xa1\x9b\x1a\x6b\x75\x1d\xce\x79\x37\x27\x8e\xf1\xfb\x32\x9c\x5d\xed\x0d\x56\x57\x00\x9a\x2b\x5e\xa2\xa2\x49\xce\xb1\x1f\x26\x6f\xdd\xac\xbd\xab\x5b\x33\x21\x65\x0c\x9b\x7a\x32\xda\x76\x9d\x71\x7a\x68\x31\x11\x99\x90\x8a\x1c\x41\x27\x7b\x1b\xbd\x71\xef\x1c\xa4\x25\xa7\x8a\xa3\x90\x8c\xff\x63\xef\x5d\xab\x9b\x59\xbf\x6e\x60\x21\xd7\xbe\xea\x4f\xc8\x0c\x1b\xef\x8c\xf9\x0a\xc6\x44\x64\xdb\x16\x1c\x3f\x10\xae\x87\x46\x04\x60\x19\x88\xd9\xfb\x08\xc0\xca\xe2\xf7\xe3\x64\xb2\xf5\x1c\x78\xc9\x6e\xe4\x27\x9d\x16\x79\xf5\x23\xc3\xd8\xbb\x95\x97\xd4\xfa\x13\x04\x1e\x01\x00\x00\xff\xff\x6c\x7b\xed\xfa\xe4\x01\x00\x00") + +func migrations13_trade_offer_idsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations13_trade_offer_idsSql, + "migrations/13_trade_offer_ids.sql", + ) +} + +func migrations13_trade_offer_idsSql() (*asset, error) { + bytes, err := migrations13_trade_offer_idsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/13_trade_offer_ids.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x71, 0x79, 0x45, 0x9b, 0x9e, 0x80, 0x36, 0x80, 0x5a, 0xc2, 0x1f, 0xb4, 0xba, 0xdd, 0x85, 0x65, 0xd0, 0x5c, 0x47, 0x47, 0xf, 0x4d, 0xc7, 0x9b, 0x92, 0x36, 0xd7, 0xe6, 0x57, 0x46, 0xa}} + return a, nil +} + +var _migrations14_fix_asset_toml_fieldSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\x2c\x2e\x4e\x2d\x89\x2f\x2e\x49\x2c\x29\x56\x80\x88\x3b\xfb\xfb\x84\xfa\xfa\x29\x94\xe4\xe7\xe6\x28\x84\x44\x06\xb8\x2a\x94\x25\x16\x25\x67\x24\x16\x69\x18\x99\x9a\x6a\x5a\x73\x71\x21\x9b\xe6\x92\x5f\x9e\x47\xb6\x79\x66\x26\x9a\xd6\x5c\x80\x00\x00\x00\xff\xff\xac\xf9\x96\x09\x9c\x00\x00\x00") + +func migrations14_fix_asset_toml_fieldSqlBytes() ([]byte, error) { + return bindataRead( + _migrations14_fix_asset_toml_fieldSql, + "migrations/14_fix_asset_toml_field.sql", + ) +} + +func migrations14_fix_asset_toml_fieldSql() (*asset, error) { + bytes, err := migrations14_fix_asset_toml_fieldSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/14_fix_asset_toml_field.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdf, 0x28, 0x86, 0x8, 0xa5, 0xec, 0x44, 0x7b, 0xf7, 0x88, 0x3b, 0x6d, 0xf6, 0x5c, 0x6a, 0x17, 0x92, 0xbc, 0xd2, 0x88, 0x94, 0xd4, 0x42, 0xb1, 0xc2, 0xac, 0x97, 0xb6, 0xd0, 0xeb, 0xd8, 0xa7}} + return a, nil +} + +var _migrations15_ledger_failed_txsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xce\x41\xae\x82\x30\x10\x00\xd0\x7d\x4f\x31\xfb\x1f\x4e\xc0\xaa\xdf\xe2\x6a\x04\x43\xe8\x9a\x34\x65\xc0\x26\xd8\x9a\xce\x34\xc6\xdb\xbb\x65\x63\xf5\x02\x2f\xaf\x69\xe0\xef\x1e\xb6\xec\x84\xc0\x3e\x94\xd2\x38\x75\x23\x4c\xfa\x1f\x3b\xb8\x05\x96\x94\x5f\xf3\x4e\xcb\x46\x99\x41\x1b\x03\x5c\xbc\x27\xe6\xb5\xec\xb3\x64\x17\xd9\x79\x09\x29\xce\x3e\x95\x28\x10\xa2\xd0\x46\x19\x4c\x77\xd6\x16\x27\xe8\x2d\x62\xfb\x95\x5c\x5d\xd8\x69\xf9\x99\x53\xc7\xb2\x49\xcf\x58\x4f\x9b\x71\xb8\xc2\x69\x40\x7b\xe9\xab\xf9\xfa\xf3\xa8\x7c\xfa\xb6\xea\x1d\x00\x00\xff\xff\x1f\x77\x76\xa0\x4d\x01\x00\x00") + +func migrations15_ledger_failed_txsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations15_ledger_failed_txsSql, + "migrations/15_ledger_failed_txs.sql", + ) +} + +func migrations15_ledger_failed_txsSql() (*asset, error) { + bytes, err := migrations15_ledger_failed_txsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/15_ledger_failed_txs.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xeb, 0x7, 0xd7, 0xc9, 0x5d, 0xbe, 0xd2, 0x21, 0xc1, 0xb0, 0x20, 0xc8, 0x5f, 0x3a, 0xe9, 0x99, 0xba, 0x5d, 0x83, 0xb4, 0xe8, 0x9f, 0x2d, 0xc3, 0x9a, 0xe1, 0x46, 0xf2, 0xd1, 0x1b, 0x48, 0x7a}} + return a, nil +} + +var _migrations16_ingest_failed_transactionsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x90\xcd\x6a\xeb\x30\x14\x84\xf7\x7a\x8a\xb9\xeb\x8b\x5d\xe8\x36\x64\xe1\xc6\xea\x0f\x38\x49\x71\x6c\xba\x0c\x8a\x75\xd4\x88\x5a\x3a\x41\x72\x5a\xfa\xf6\x05\xa7\x24\x36\x08\xba\x94\x98\xf9\xf4\x8d\xb2\x0c\xff\x9d\x7d\x0f\x6a\x20\xb4\x27\x21\xb2\x0c\xab\x23\x75\x1f\xd0\x87\xfb\xbb\xa3\x8d\x03\x87\xef\xbc\x09\xca\x47\xd5\x0d\x96\x7d\xbe\x3b\x77\x1d\xc5\x68\xce\x3d\x8c\xa5\x5e\xa3\x63\xe7\xc8\x0f\x30\x1c\xe0\x38\x10\xac\x37\x1c\x9c\x1a\xd3\xa2\xa8\x1a\x59\xa3\x29\x1e\x2a\x89\x5f\xdc\x7e\xb8\xe1\x22\x8a\xb2\x44\xbc\x31\x0f\xcc\x3d\x29\xbf\x18\x4d\xae\x66\x25\x7f\xf9\xf1\xa6\x26\xc7\x9f\x04\xa3\x6c\x4f\x1a\x33\x90\xf2\x1a\x7c\xa2\xa0\x2e\x47\x13\xd8\xa5\x72\xff\x44\x29\x2b\xd9\x48\x3c\xd6\xdb\xf5\x55\x69\x52\x6c\x77\x2f\x9b\xa7\xa4\xab\x00\xde\x9e\x65\x9d\x1e\x92\x5b\x8d\x65\x82\x97\x4f\x42\x7b\xab\x51\x6c\x66\x7b\x97\x30\xaa\x8f\xb4\x48\x5a\xcd\xf6\x5d\x9e\x4e\x55\xff\xfc\xe3\xb2\xde\xbe\x62\xb5\xad\xda\xf5\x66\x02\x58\x88\x9f\x00\x00\x00\xff\xff\xe4\xf5\x4c\xc4\xfd\x01\x00\x00") + +func migrations16_ingest_failed_transactionsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations16_ingest_failed_transactionsSql, + "migrations/16_ingest_failed_transactions.sql", + ) +} + +func migrations16_ingest_failed_transactionsSql() (*asset, error) { + bytes, err := migrations16_ingest_failed_transactionsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/16_ingest_failed_transactions.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf9, 0x24, 0xd, 0x8a, 0x56, 0x1d, 0x41, 0x6a, 0x4, 0x7b, 0xe1, 0x9f, 0xfb, 0x78, 0x2, 0xec, 0xe2, 0x98, 0xac, 0xef, 0xc7, 0xc0, 0x96, 0xd1, 0xbf, 0x8f, 0xc6, 0x16, 0xa7, 0x3c, 0x4a, 0x33}} + return a, nil +} + +var _migrations17_transaction_fee_paidSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\xce\x31\x0e\xc2\x20\x14\x80\xe1\x9d\x53\xbc\xdd\xf4\x04\x9d\x50\xd8\x68\x31\x84\xce\xe4\xa5\x7d\xa5\x0c\x85\x06\x48\xd4\xdb\x1b\x4d\x4d\xba\xa9\x07\xf8\xff\x7c\x4d\x03\xa7\x35\xf8\x8c\x95\x60\xd8\x18\xe3\xca\x4a\x03\x96\x9f\x95\x84\x25\x94\x9a\xf2\xc3\xd5\x8c\xb1\xe0\x58\x43\x8a\x05\xb8\x10\x30\x13\xb9\x71\xc1\xec\x69\x82\x10\x2b\x79\xca\xed\xf7\xd2\xc8\x9e\x77\x12\x2e\x5a\x0d\x5d\xff\x7e\x6c\x18\x26\xb0\x1a\x56\xbc\xbb\x99\xa8\x65\xec\xc8\x11\xe9\x16\x7f\x00\x09\xa3\xaf\xc7\xe9\x0e\xfb\x1b\xb4\x23\x5e\x9e\x8f\xad\x65\xcf\x00\x00\x00\xff\xff\x1a\x59\x3c\x98\x1f\x01\x00\x00") + +func migrations17_transaction_fee_paidSqlBytes() ([]byte, error) { + return bindataRead( + _migrations17_transaction_fee_paidSql, + "migrations/17_transaction_fee_paid.sql", + ) +} + +func migrations17_transaction_fee_paidSql() (*asset, error) { + bytes, err := migrations17_transaction_fee_paidSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/17_transaction_fee_paid.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x64, 0x93, 0x1c, 0x21, 0xf6, 0x63, 0xac, 0x5c, 0x8, 0x6b, 0xe9, 0x4f, 0xcb, 0xcb, 0xae, 0xdf, 0xa, 0x28, 0xa8, 0x6a, 0xfd, 0x61, 0x29, 0xc2, 0x60, 0xaf, 0xb7, 0x74, 0xfd, 0x0, 0x85, 0xb7}} + return a, nil +} + +var _migrations18_account_for_signersSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x91\xbf\x6e\xf2\x40\x10\xc4\x7b\x3f\xc5\x94\xb6\x3e\xdc\x7c\x0a\x69\xa8\x48\x70\x11\x85\x00\xb2\xa0\xa0\x42\x97\xcb\xc6\x5e\xd9\xbe\x23\x77\x6b\xac\x7b\xfb\x08\x5b\x46\x80\x44\xda\xd9\xdf\xfe\x99\x9d\x34\xc5\xbf\x86\x0b\xa7\x84\xb0\x3b\x46\xd1\x6b\x9e\xcd\xb7\x19\xb6\xf3\x97\x65\x06\xa5\xb5\x6d\x8d\xf8\x83\xe7\xc2\x90\xf3\x88\x23\x00\xa3\x0c\x5d\x2a\xa7\xb4\x90\xc3\x49\xb9\xc0\xa6\x88\x9f\x9f\x92\x49\x8f\x0c\x0d\x7f\x11\x1d\x71\x51\x0a\xd8\x08\x15\xe4\xb0\x5a\x6f\xb1\xda\x2d\x97\x43\x31\x4d\xd1\x11\x3a\xae\x6b\xfc\xb4\xe4\x02\x3e\xc3\x38\xd2\x5b\x48\xa9\x04\xec\xd1\x95\x17\x95\x3d\xa4\x24\x7c\xb3\xf3\x02\x16\x6a\xc0\xa6\x57\xb4\x6d\x8e\xd6\xb3\x10\x2a\x0a\xfd\xec\x4d\xfe\xf6\x31\xcf\xf7\x78\xcf\xf6\x88\x87\xf6\xc9\x68\x29\x89\x92\xd9\xdd\x0f\x2a\x0a\x87\x93\xaa\x5b\x3a\x78\xb1\x8e\xfa\x17\x54\x14\xce\x86\xce\xee\xe2\xff\xd3\x69\x72\x73\x7d\x0f\x3f\x2e\xdf\xac\xaf\x28\x0c\x2b\xaf\x63\x58\xd8\xce\x44\x8b\x7c\xbd\x79\x94\x82\x56\x5e\xab\x2f\x9a\x5d\x43\xf7\x67\x5e\x98\xdf\x00\x00\x00\xff\xff\xbf\x12\xc9\xd9\xe1\x01\x00\x00") + +func migrations18_account_for_signersSqlBytes() ([]byte, error) { + return bindataRead( + _migrations18_account_for_signersSql, + "migrations/18_account_for_signers.sql", + ) +} + +func migrations18_account_for_signersSql() (*asset, error) { + bytes, err := migrations18_account_for_signersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/18_account_for_signers.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x37, 0xdc, 0xe4, 0xb4, 0xd5, 0xcc, 0x53, 0xd9, 0x42, 0x8c, 0x12, 0x37, 0xcf, 0x13, 0x2c, 0x47, 0xe, 0xc7, 0xba, 0xe5, 0xc2, 0x17, 0x73, 0xe2, 0xc, 0xd2, 0x4a, 0xb3, 0x62, 0x75, 0x54, 0x7f}} + return a, nil +} + +var _migrations19_offersSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x93\x41\x6f\xda\x4c\x10\x86\xef\xfe\x15\xef\x0d\xd0\x07\xd1\x77\x69\x2f\x39\x91\xd8\x4d\xac\x52\x3b\x32\xa6\x6d\x4e\xd6\x62\x8f\xcd\x88\x65\x17\xed\xae\x43\xdc\x5f\x5f\xad\x0d\xa8\x89\x80\xf3\xfb\xce\x73\x78\x66\x66\x36\xc3\x7f\x3b\x6e\x8c\x70\x84\xd5\x3e\x08\x1e\xb3\x68\x9e\x47\xc8\xe7\x0f\x8b\x08\xba\xae\xc9\x58\x8c\x03\x00\xb0\x24\x25\x19\xae\x50\x6e\x84\x11\xa5\x23\x83\x37\x61\x3a\x56\xcd\xf8\xcb\xd7\x09\x92\x34\x47\xb2\x5a\x2c\xa6\x7d\xb9\x9f\xe4\x0a\x6b\x6e\x58\x39\xbc\x64\xf1\x8f\x79\xf6\x8a\xef\xd1\xeb\xf4\x0c\x63\xd5\x08\x6b\xc9\xc1\xd1\xbb\xfb\x34\xbf\x6e\xbb\x5b\xb1\xd8\xe9\x56\xb9\x13\xfd\x63\xb6\x37\x5c\x92\x02\x2b\x47\x0d\x99\x4b\x61\x75\x2b\x44\xa5\xdb\xb5\x24\xec\x0d\x95\x6c\x59\xab\x4f\xa5\x5a\x8a\xc6\x5e\x01\x48\x61\x5d\xb1\xd3\x15\xd7\x4c\x55\x21\xa9\xf2\x95\x38\xc9\xcf\xb5\x60\x72\x7f\x56\x1c\x27\x61\xf4\xfb\xa8\xb8\x58\x77\xc5\xe0\x17\x69\x72\xd2\xbe\x5a\xc6\xc9\x13\x1e\xf2\x2c\x8a\xc6\x27\xf9\x93\xfb\x5b\xe3\xac\x9a\x62\x70\x76\x9d\x72\xd2\x7a\x9d\x34\xb8\xbf\x09\xfa\x67\x3d\xd7\x39\x17\x6d\x5c\xe6\x5d\xaa\x7a\x55\xb3\x19\x42\xb6\xce\xf0\xba\x75\xfd\xda\x1a\xb2\xce\xef\xc4\x90\x64\xb2\xd0\x0a\x02\x96\x55\x23\x09\x6f\x42\xb6\x04\xa9\xcb\x2d\x55\xa8\xb5\x41\xbb\xaf\x84\x63\xd5\x78\x0a\xfb\x62\xf8\x70\x87\x5f\x1b\x52\x78\xd6\x86\xff\x68\x05\xeb\x84\x71\x16\xa5\x24\x61\xe0\x36\x64\x08\x6c\xa1\xf4\x91\x65\x35\x0e\x84\xd2\x90\x7f\x0e\x76\x9e\xe3\x3b\x77\x88\xeb\xa1\x3d\xb2\x10\x28\xb5\xaa\x25\x97\x0e\xec\xb0\x23\xa1\xac\xcf\x8e\x04\xb6\x10\xd2\x90\xa8\xba\x23\xde\x6a\x4f\x39\xf8\x2b\x83\xd2\x6e\xc3\xaa\xb9\x0b\xe2\x64\x19\x65\xb9\x3f\x94\x14\x5b\xea\x8a\x7e\xb6\xb0\x4e\x1b\xc2\x78\x4b\xdd\x74\xa0\x4d\xfa\x1b\xfb\x39\x5f\xac\xa2\x25\xc6\x23\x7a\xdf\x17\x83\x90\xc1\xf4\x60\x6d\x34\xc5\xe8\xff\xd1\x50\x4d\x13\x3c\xa6\xc9\xb7\x45\xfc\x98\xf7\x9c\x09\xc2\xd4\x9f\xe2\x73\x9c\x3c\x0d\x72\xcf\xaf\x1f\xea\x83\x0a\x82\x30\x4b\x5f\x3e\xbe\x7e\x29\x6c\x29\x2a\xba\x0f\xfe\x06\x00\x00\xff\xff\x14\xd1\xb7\xec\x28\x04\x00\x00") + +func migrations19_offersSqlBytes() ([]byte, error) { + return bindataRead( + _migrations19_offersSql, + "migrations/19_offers.sql", + ) +} + +func migrations19_offersSql() (*asset, error) { + bytes, err := migrations19_offersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/19_offers.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbd, 0xff, 0xef, 0xfe, 0xb0, 0x5b, 0xa3, 0x92, 0xac, 0x9d, 0x98, 0x6c, 0xd4, 0x90, 0x9c, 0xe9, 0xae, 0x89, 0xfc, 0x54, 0x2b, 0xe3, 0x33, 0x1f, 0x0, 0xd6, 0x24, 0x9a, 0x1, 0xe1, 0xa6, 0x39}} + return a, nil +} + +var _migrations1_initial_schemaSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x5a\x51\x6f\xdb\x36\x10\x7e\xf7\xaf\x38\xf4\xc5\x09\x16\x0f\x2d\x3a\x14\x9d\x8d\x14\x70\x13\x75\x35\xe6\xc8\xad\x63\xaf\x0d\x8a\x82\xa0\xa5\xb3\xcc\x45\x26\x55\x92\x4a\x9c\x0e\xfb\xef\x83\x64\xc9\x96\x25\x51\x92\x13\x3b\x7b\xb4\x78\xbc\xfb\x3e\x92\xf7\xdd\x89\x72\xa7\x03\xbf\x2c\x99\x27\xa9\x46\x98\x06\xad\x4e\xa7\xd5\xe9\xc0\x27\xa1\xb4\x27\xf1\xfa\xf3\x10\x5c\xaa\xe9\x8c\x2a\x04\x37\x5c\xc6\xc3\xad\x6b\x6b\x02\x4a\x53\x8d\x4b\xe4\x9a\x68\xb6\x44\x11\x6a\x38\x87\x97\xbd\x78\xc8\x17\xce\x6d\xf1\xa9\xe3\xb3\xc8\x1a\xb9\x23\x5c\xc6\x3d\x38\x87\xf6\x74\xf2\xe1\x6d\xbb\x97\xba\xe3\x2e\x95\x2e\x71\x04\x9f\x0b\xb9\x64\xdc\x23\x4a\x4b\xc6\x3d\x05\xe7\x20\x78\xe2\x63\x81\xce\x2d\x99\x87\xdc\xd1\x4c\x70\x32\x13\x2e\xc3\x68\x7c\x4e\x7d\x85\x3b\x61\x96\x8c\x93\x25\x2a\x45\xbd\xd8\xe0\x9e\x4a\xce\xb8\xd7\x4b\xb0\x23\x95\xce\x82\x04\x54\x2f\xe0\x1c\x82\x70\xe6\x33\xe7\x0c\x02\x8f\x38\x54\x53\x5f\xa4\x66\x2e\xce\x69\xe8\x6b\xa2\xe9\xcc\x47\x15\x50\x07\x23\xd0\xed\xdc\xe8\x3d\xd3\x0b\x22\x98\x9b\xc1\x91\xac\xa1\x4d\x97\xd8\x85\x05\x53\x5a\xc8\x07\x42\x1d\x47\x84\x5c\xab\x1e\x4c\x1e\x02\xec\xc2\xa4\xff\x7e\x68\xf5\xe0\xda\x59\xe0\x92\x76\x13\x14\x3d\x18\xdd\x73\x94\x5d\xe8\xf4\x60\xb2\x09\xdb\x8d\x57\xfd\x62\x6c\xf5\x27\xd6\x7a\x5e\xc1\x2b\x9c\xb4\x00\x00\x98\x0b\x33\xe6\x31\xae\xc1\x1e\x4d\xc0\x9e\x0e\x87\x67\xf1\x73\xea\xba\x12\x95\x02\x67\x41\x25\x75\x34\x4a\xb8\xa3\xf2\x81\x71\xef\xe4\xcd\x6f\xa7\xad\xd3\x5e\xab\x1c\x33\xce\xe7\xe8\x1c\x1a\x72\xe2\x34\x41\x9c\x23\x42\x4c\x0c\x52\x3b\x11\xa0\xa4\xf1\xee\x9b\x2c\x5f\x08\xe9\xa2\x7c\x01\x8c\x6b\xf4\x50\xe6\x46\xf5\x43\x80\x86\x21\x17\x35\x65\xbe\x82\xbf\x95\xe0\x33\xf3\xa2\xf8\xe8\x7a\x28\x0f\xbc\x28\x89\xd3\x64\x51\x14\xfe\x08\x91\x3b\x26\xa0\x6b\x63\xb2\xa0\x6a\x51\xbe\xa3\x39\xfb\x40\xe2\x1d\x13\xa1\x22\xb5\x13\x93\x35\x92\x94\x2b\xba\xce\xb1\x78\x57\x36\x38\x2e\xad\x0f\xfd\xe9\x70\x02\x2f\x73\x11\xb6\xbb\xd2\xcc\xde\xf1\x85\x42\x97\x50\x0d\x91\x4e\x28\x4d\x97\x01\x44\x89\x14\x29\x46\xf4\x04\x7e\x0a\x8e\xf9\x39\x12\xa9\xae\x9d\xb4\xb6\x0d\x03\xb7\xb1\xed\xe6\x1c\x25\x3f\x97\x81\x90\x1a\x25\xb9\x43\xa9\x98\xe0\x05\x2e\xaf\xf2\x27\x4a\x68\xea\x13\x47\x30\xae\xca\x0f\xe4\x1c\x91\x04\x42\xf8\xe5\xa3\x91\xb4\x92\x39\x9a\xf6\x3a\x1e\x96\xa8\x50\xde\x99\x4c\x96\x74\x45\xf4\x8a\x28\xd4\x44\xb1\x9f\x45\x2b\xf3\x51\xde\x6e\x5b\x40\xa5\x66\x0e\x0b\xe8\xc1\x15\xaa\x3c\xc6\x56\xaf\xca\x39\x35\x4f\xf7\x7a\x01\xd9\x97\x3f\x61\x2e\x51\xf8\x23\x5d\x86\x6b\xeb\xf3\xd4\xb2\x2f\x2a\x56\x22\x4b\x3e\xb5\x6e\x16\x23\x66\x70\x3d\xe9\x8f\x27\xf0\x65\x30\xf9\x08\xaf\xe2\x07\x03\xfb\x62\x6c\x5d\x59\xf6\x04\xde\xdf\x24\x8f\xec\x11\x5c\x0d\xec\xbf\xfa\xc3\xa9\xb5\xf9\xdd\xff\xba\xfd\x7d\xd1\xbf\xf8\x68\xc1\xab\x83\x10\x85\xd1\x17\xdb\xba\x84\xf7\x37\x35\x8c\xfb\xc3\x89\x35\xde\x93\xf0\xc6\x77\x8d\xf9\xaf\xcc\xad\xe5\x72\xac\x83\x5a\x57\x4c\xb3\xf2\x68\x2c\xb8\x41\xe0\x33\x67\xcd\x2b\xae\x47\x4f\x2c\x47\xeb\x47\x4a\x84\xd2\xc1\xf4\xa8\x1b\xb4\x3f\xd5\xa9\x76\xbb\xdb\x2d\x58\x34\x48\x8a\x2c\xbd\xe3\xc9\x82\x29\x4a\xbc\xf6\x06\x59\x28\x9b\x5b\xbe\x01\x4f\x11\x05\x13\xb2\xc3\xca\x42\x4d\x94\xe7\x12\x86\x3d\xc9\x3e\x51\x1a\x6a\xa2\x15\xc5\xc1\x34\xa1\x42\x1e\x32\x53\x8e\x77\x64\x53\x89\xc8\xe2\x6b\xdc\x8e\x25\x5d\x58\x4d\x93\xd7\x54\x41\xaa\xc5\xa0\xd4\x76\x1b\xda\xdc\xaf\x50\x63\x69\x36\xf5\x7a\xff\x4b\xb7\xa6\x57\x04\xf9\x1d\xfa\x22\x40\xd0\xb8\x2a\x48\xf5\x2a\xea\x9d\x42\x5f\x1b\x06\x97\xa8\xa9\x61\x28\x5a\x05\xd3\xb0\x62\x1e\xa7\x3a\x94\x58\xf6\x46\xf5\xfb\x9b\xd3\x6f\xdf\xb7\x2a\xfc\xcf\xbf\x65\x3a\xfc\xed\x7b\xbe\x89\xc3\xa5\x20\x71\x35\x28\x6a\xf6\xc6\x17\x17\x1c\x2b\x55\x7d\xeb\xab\xe8\x26\x61\xc6\x96\x48\x66\x22\xe4\xae\x8a\x76\xee\xad\xa4\xdc\xc3\x58\x0c\xb3\xc9\xc4\xdc\x34\x75\x92\xd8\x8d\xf2\x7d\x9d\x2e\x23\x7b\x58\x57\xdd\x61\x6d\x7f\x31\x1a\x4e\xaf\xec\x68\x4b\xa3\x17\xea\x94\x25\xc7\x95\xbe\xa3\xfe\x49\xbb\x51\x43\xd1\xee\x76\x25\x7a\x8e\x4f\x95\x2a\x28\xfa\xc1\x58\x18\x8b\xd5\x5e\x3c\x6a\xd4\xaf\x8a\x49\xcd\x52\x04\xb7\xf8\x90\x52\xbd\x18\xd9\xd7\x93\x71\x7f\x60\x57\xb0\x2d\x0a\xde\x9e\x1b\x18\x1f\xa5\xfe\xe5\x65\x26\x5a\x13\x8c\xf0\x69\x3c\xb8\xea\x8f\x6f\xe0\x4f\xeb\x06\x4e\x98\xbb\x7f\x0d\x3e\x22\x53\x53\xcc\x2a\xae\x95\x38\x6b\xd9\xce\x36\x0d\x4a\x4a\x69\x60\x5f\x5a\x5f\x1f\x51\xa8\xe2\x79\x19\x7f\x30\xb2\xcb\xcb\xd6\xf4\x7a\x60\xff\x01\x33\x2d\x11\xe1\x24\x31\x3e\x2b\xd4\x85\x32\xa4\x51\x79\x3b\x18\xcc\xb8\x56\x36\xc2\x98\xaf\xb0\x65\xd0\xd6\x05\xf5\x60\xe0\xd6\xee\x9a\xc1\xcb\xd5\xf2\xb3\x62\xd9\x2e\x3d\xe3\x04\xc9\xec\x61\x3d\xfe\x54\xd8\x53\x7b\xf0\x79\x9a\xa2\xcf\xf9\xce\x72\x48\xaf\xdd\x76\xe0\x97\xbd\x66\x9f\xa5\x37\x68\x26\xe4\x5b\x59\x3d\x24\x66\xe6\x36\x46\xbb\xed\xea\xcf\x4a\x2f\x0a\x6a\x18\x88\x80\x04\x47\x21\x91\x38\xce\xf2\x30\xd4\xbf\x47\xd1\x2a\xb2\xd9\xdc\xe8\xcd\x1e\x0e\x4e\x68\xd7\x77\x96\x53\x7a\x57\xb9\x43\xa2\x1c\x5e\x36\x7b\x8f\x82\xb1\x10\xa0\x59\xda\x96\xa0\x65\xdc\xc5\x15\xc9\xdf\xab\x13\xc1\x49\x72\x79\x7e\x50\xe8\xb5\xd1\xb2\x3c\x36\x97\xfc\xbb\xea\xbd\x36\xdc\x83\xc8\x81\x97\xbf\x2a\x50\x3d\xfc\xda\x2d\x48\x24\x20\xf2\x17\xf5\xc5\x87\x91\xf7\xca\x10\xb5\x02\x14\x19\xd5\xa0\x4e\x92\x23\x72\xb9\xb9\xe4\x3e\x06\xf4\xb2\x38\xb5\x49\xba\xb1\x6c\x4e\xe2\xa8\x67\x66\x27\xce\x63\x24\xc6\xec\x2e\x77\x8b\x7f\xe4\x2d\x28\x7c\x34\xa8\xe5\x92\x9b\xd0\x9c\x59\xe6\x1b\xce\xf3\xec\x4c\xf6\xa3\x51\x1d\xad\x8c\x6d\x73\x46\x65\x9f\xa7\x9e\x87\x5a\xe9\x87\xb1\x3a\x8e\x65\x93\x9a\x93\x4d\x3b\xc5\xe7\x21\xb8\xb9\xe8\xa9\x23\x65\xec\xfc\x77\x5d\x6f\xef\xc8\x8f\xae\x0d\xf9\x50\xa5\x5d\xd5\xbe\x0a\xb1\xeb\x74\xf7\x1e\xf9\x18\x12\x51\x15\xaf\x09\xa1\xdd\x19\xfb\x91\x3b\x52\xcd\x2c\x46\x69\x44\xa4\xac\x72\xc6\x4d\xb3\x5e\x1d\xa9\x1b\x4f\x1c\x1b\x1a\xc2\x47\xf6\xe3\xc5\x0d\x31\xef\x47\xb6\xfd\x3c\x7a\xba\x14\x83\x3d\xba\x13\xd6\x92\xba\xb8\xe9\x8d\xd2\x77\x49\x32\x13\xe2\xf6\x30\x07\xaa\x22\x40\x6d\x0b\x76\x72\x92\x7e\x17\xeb\xbc\x7b\x07\x6d\x25\x7c\x97\x50\xa5\x50\xc7\x47\xb1\xdd\xed\x6a\x5c\xe9\xd3\xd3\x33\x30\x1b\x3a\xc2\x6d\x66\xc8\x94\x0a\x51\x9a\x4d\x67\x22\xf4\x16\xba\x51\xf8\x1d\xd3\x6a\x00\x3b\xa6\x39\x08\xa7\xf0\xe5\xa3\x35\xb6\xd6\xf9\x04\xe7\xf0\xfa\x75\xbc\x7b\xad\xec\x7f\xb8\x2e\xc5\x3d\x6f\xb9\x52\x04\x10\xff\x71\xa9\xfc\x0c\x38\x54\x39\xd4\xc5\x5e\x8d\xe1\x6e\x96\x54\x4d\xca\x24\x7e\x23\xb3\xe6\x9e\xd3\x7a\x55\x65\x93\x1e\x95\x2a\x9b\xcd\x6b\x48\x6a\xf4\x5f\x00\x00\x00\xff\xff\xc5\x76\xc1\xee\xf9\x26\x00\x00") + +func migrations1_initial_schemaSqlBytes() ([]byte, error) { + return bindataRead( + _migrations1_initial_schemaSql, + "migrations/1_initial_schema.sql", + ) +} + +func migrations1_initial_schemaSql() (*asset, error) { + bytes, err := migrations1_initial_schemaSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/1_initial_schema.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0xdd, 0xc3, 0x1d, 0x39, 0xb7, 0x5e, 0x6a, 0x64, 0x2b, 0xd5, 0x66, 0x83, 0x61, 0x79, 0xf7, 0x59, 0xce, 0x51, 0x66, 0x3c, 0xd0, 0xf3, 0x14, 0x35, 0xcd, 0x21, 0xc0, 0xae, 0x24, 0x84, 0x35}} + return a, nil +} + +var _migrations20_account_for_signer_indexSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\x0e\x72\x75\x0c\x71\x55\xf0\xf4\x73\x71\x8d\x50\x28\xce\x4c\xcf\x4b\x2d\x2a\x8e\x4f\xaa\x8c\x4f\x4c\x4e\xce\x2f\xcd\x2b\x51\xf0\xf7\x53\x80\x32\x8b\xe3\xa1\xd2\x0a\xa1\xc1\x9e\x7e\xee\x0a\x4e\x21\x41\xae\xae\x1a\x50\x49\x4d\x6b\x2e\x2e\x64\xa3\x5d\xf2\xcb\xf3\xb8\xb8\x5c\x82\xfc\x03\x70\x1a\x6d\xcd\x05\x08\x00\x00\xff\xff\x10\xbc\xb1\xe1\x8c\x00\x00\x00") + +func migrations20_account_for_signer_indexSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20_account_for_signer_indexSql, + "migrations/20_account_for_signer_index.sql", + ) +} + +func migrations20_account_for_signer_indexSql() (*asset, error) { + bytes, err := migrations20_account_for_signer_indexSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20_account_for_signer_index.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0xd5, 0x6a, 0x4f, 0x6a, 0x95, 0xe0, 0x16, 0xa8, 0x25, 0xd7, 0x4, 0xec, 0x85, 0xe9, 0x13, 0x33, 0x4c, 0x9a, 0xa8, 0x74, 0xf9, 0x63, 0x52, 0xa5, 0x18, 0xaa, 0xdb, 0x4, 0xb5, 0xf7, 0x6e}} + return a, nil +} + +var _migrations21_trades_remove_zero_amount_constraintsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xc8\xc8\x2c\x2e\xc9\x2f\xaa\x8c\x2f\x29\x4a\x4c\x49\x2d\x56\x70\x09\xf2\x0f\x50\x70\xf6\xf7\x0b\x0e\x09\x72\xf4\xf4\x0b\x41\x93\x8e\x4f\x4a\x2c\x4e\x8d\x4f\xcc\xcd\x2f\xcd\x2b\x89\x4f\xce\x48\x4d\xce\xb6\xa6\xc0\xb0\x64\x90\x31\xa9\x45\x68\xe6\xe1\x33\xd0\xd1\xc5\x85\x14\xc7\x29\x38\x7b\xb8\x3a\x7b\x2b\x68\x20\x49\x28\xd8\xd9\x2a\x18\x68\xe2\x75\x35\x7e\x4b\xb0\x39\x1a\x66\x0f\xaa\x1c\xcc\x2a\x2e\xe4\xe0\x77\xc9\x2f\xcf\x1b\xe1\x11\x40\xbf\xf0\x07\xdb\x04\x08\x00\x00\xff\xff\xfc\x1d\x38\xf5\xfd\x02\x00\x00") + +func migrations21_trades_remove_zero_amount_constraintsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations21_trades_remove_zero_amount_constraintsSql, + "migrations/21_trades_remove_zero_amount_constraints.sql", + ) +} + +func migrations21_trades_remove_zero_amount_constraintsSql() (*asset, error) { + bytes, err := migrations21_trades_remove_zero_amount_constraintsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/21_trades_remove_zero_amount_constraints.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x24, 0x1a, 0x4b, 0x15, 0xd5, 0xce, 0x4, 0xe9, 0x43, 0x61, 0x69, 0xce, 0xed, 0x82, 0x11, 0x4e, 0xc7, 0x58, 0xef, 0x4a, 0x46, 0xef, 0x2a, 0x28, 0x13, 0x5e, 0x59, 0xf0, 0x69, 0x50, 0x45, 0x84}} + return a, nil +} + +var _migrations22_trust_linesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x93\xc1\x6e\x9b\x40\x10\x86\xef\x3c\xc5\x1c\x6d\xd5\x54\x6d\xd5\xe4\xe2\x93\x5d\xa3\xca\x8a\x83\x23\x8a\xa5\xe6\xb4\x1a\x76\xc7\x64\xd4\x65\xd7\xdd\x59\x5a\xf1\xf6\x95\x89\x62\x93\x04\xa5\xce\x71\xc5\xc7\x3f\x2c\xdf\x3f\x69\x0a\x1f\x1a\xae\x03\x46\x82\xdd\x21\x49\xbe\x15\xd9\xa2\xcc\xa0\x5c\x2c\x37\x19\xc4\xd0\x4a\x54\x96\x1d\x09\x4c\x12\x00\x80\x34\x05\x4b\xa6\xa6\xa0\x7e\x51\x07\x2c\x80\xb0\xe9\xcf\x37\xd4\x41\x83\x41\x1e\xd0\x92\x81\x56\xd8\xd5\x70\xfb\x78\x5e\xb2\xc3\xd0\x3d\xbd\x8e\xce\x40\x85\x42\xd7\x5f\x53\x72\xda\x9b\x9e\x26\x03\xd1\x43\xe5\xbd\x44\x38\x50\xd8\xfb\x06\x9d\x26\xf0\x7b\x10\xdf\x10\xfc\x6e\x29\x30\xc9\xc7\x3e\x63\x30\x5f\x3f\x60\x40\x1d\x29\xc0\x1f\x0c\x1d\xbb\x7a\xf2\xf9\xea\xd3\x14\xf2\x6d\x09\xf9\x6e\xb3\x99\xf5\x3c\x6a\xed\x5b\x17\x15\x9b\x11\xfe\xea\xfa\x15\x2e\x42\x51\xc5\xee\x40\xc0\x2e\x8e\x3e\x64\x91\x96\xc2\x3b\xd2\x8e\xd7\x1c\xfb\xd8\x2f\x2f\xf1\x0a\x6d\x7f\xf1\x8a\xeb\xd7\xc3\xcf\x36\x94\xe5\x86\xe3\x38\x55\xb5\xc7\x6c\x65\x19\x2b\xb6\x1c\x99\x64\x9c\x13\xb2\xf6\x22\x70\x6f\xb1\x96\x91\x7f\x61\x51\xa2\x6a\xbc\xe1\x3d\x93\x51\x8f\x56\x60\x9d\x97\x2f\xb0\xbb\x62\x7d\xbb\x28\xee\xe1\x26\xbb\x87\xc9\xd9\xdd\x34\x99\xce\x4f\x6d\x5b\xe7\xab\xec\xe7\xb0\x6d\xaa\xea\xd4\xc0\xdb\x36\x7f\x56\xc5\xdd\x8f\x75\xfe\x1d\x96\x65\x91\x65\x93\x33\x35\x9d\xbf\x19\x77\x34\xda\x8b\x78\xd2\xf7\x56\xe8\xa9\x03\xb3\x81\xc1\xd9\x33\xfd\xff\x19\x77\xe9\x90\x53\x58\x32\xdc\xc4\x95\xff\xeb\x92\x64\x55\x6c\xef\x46\x36\x51\xa3\x68\x34\x34\xff\x17\x00\x00\xff\xff\x7a\x87\x20\x42\xbb\x03\x00\x00") + +func migrations22_trust_linesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations22_trust_linesSql, + "migrations/22_trust_lines.sql", + ) +} + +func migrations22_trust_linesSql() (*asset, error) { + bytes, err := migrations22_trust_linesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/22_trust_lines.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe7, 0xd9, 0x90, 0x83, 0xc9, 0xb3, 0x1b, 0xc4, 0xe9, 0xc4, 0xbb, 0xcb, 0xb5, 0x92, 0x15, 0xaa, 0xef, 0x5d, 0x4e, 0xcf, 0x16, 0x6b, 0x49, 0xef, 0x85, 0x1a, 0xbf, 0xb6, 0x71, 0xb3, 0x92, 0x33}} + return a, nil +} + +var _migrations23_exp_asset_statsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x51\x6f\xd3\x30\x14\x85\xdf\xf3\x2b\xce\x63\x2b\xda\x49\x20\x81\x90\xfa\x14\xd6\x68\x54\x94\x74\x0a\x29\xda\x9e\x22\xd7\xb9\x6b\x2d\x1c\xc7\xf8\xde\xd2\xe5\xdf\xa3\x8c\x6c\xb8\xb4\x13\xbd\x4f\x96\xfc\xe9\x9c\x73\x8f\xe5\xe9\x14\x6f\x1a\xb3\x0d\x4a\x08\x6b\x9f\x4c\xa7\xa0\x47\x5f\x29\x66\x92\x8a\x45\x09\xc3\x30\xac\xf9\x41\x90\x1d\x81\x1e\x0d\x8b\x71\x5b\xc4\x80\xa8\x8d\x25\x3c\x98\xc0\x82\x9a\x1e\x8c\xa3\x1a\xc6\xe1\x63\xa5\x03\x29\xa1\x58\xac\x7a\x62\xaf\xf8\xa7\xfd\xe3\xa4\xc9\x0b\x64\xa7\xe4\x9c\xab\x6f\xfd\xde\x2a\xa1\x1a\x9b\x6e\x70\xf7\x14\x4c\x43\x4e\x94\x85\x71\x5b\x62\x31\xad\x03\x77\x2c\xd4\xe0\xb0\x33\x96\xf0\x9a\x46\xef\x37\xc8\x58\xda\x2a\xdd\x9d\x08\x5c\x61\xe5\x34\x5d\x60\x14\xc8\x5b\xa5\x89\x63\xb1\xe1\x4a\x76\xe4\x7a\xa7\x03\x41\x2b\x87\x40\x4d\xfb\xeb\x28\x53\x92\x5c\x17\x59\x5a\x66\x28\xd3\x4f\xcb\xec\x64\xeb\x51\x02\x60\xe0\xa5\xf3\x84\xa7\x59\xe4\x25\xf2\x55\x89\x7c\xbd\x5c\x4e\x22\x42\xb7\xf5\x40\x7c\x4f\x8b\xeb\xcf\x69\x31\x7a\xfb\x6e\x7c\x96\x34\xcc\x7b\x0a\x31\xf9\xfe\xc3\x09\xd9\xb4\x7b\x27\x78\x99\x32\xbb\x2b\x9f\xcf\xc7\xa4\xdb\x37\x95\xd2\xba\xc7\x79\xc8\x97\xdd\x64\xc5\x19\xf2\xb6\x58\x7c\x4d\x8b\x7b\x7c\xc9\xee\x47\x7f\x33\x4f\x8e\x52\x4d\xa2\x7d\xc7\xc9\x78\xf6\x52\xd1\x22\x9f\x67\x77\xff\x56\x54\x6d\xba\xe7\x6d\x56\xf9\x49\x7f\xeb\x6f\x8b\xfc\x06\x1b\x09\x44\x18\xc5\x26\xe3\xd9\x7f\x55\x87\x36\x2f\x53\xed\xe1\x3e\x6a\xfc\x7d\xe6\xed\xc1\x25\xf3\x62\x75\xfb\xca\xe3\x6a\xc5\x5a\xd5\x34\xfb\x1d\x00\x00\xff\xff\x1c\x2f\xe1\x06\x73\x03\x00\x00") + +func migrations23_exp_asset_statsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations23_exp_asset_statsSql, + "migrations/23_exp_asset_stats.sql", + ) +} + +func migrations23_exp_asset_statsSql() (*asset, error) { + bytes, err := migrations23_exp_asset_statsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/23_exp_asset_stats.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5f, 0x23, 0x96, 0xcb, 0x81, 0x52, 0xd5, 0xb, 0x3c, 0xd4, 0xb9, 0xd9, 0x24, 0xd3, 0x1a, 0x3d, 0x1a, 0xe0, 0xd2, 0x4, 0x40, 0xf5, 0x75, 0xe2, 0x1d, 0x26, 0xd3, 0x19, 0xcf, 0x70, 0xf, 0x36}} + return a, nil +} + +var _migrations24_accountsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x94\x5d\x6f\xda\x4c\x10\x85\xef\xfd\x2b\xe6\x12\xf4\x86\x57\xfd\x48\x90\x2a\xae\xa0\x58\x15\x0a\x31\x29\x05\xa9\xb9\x5a\x8d\xbd\x83\x3d\xea\x7e\x24\xbb\xeb\x20\xfe\x7d\x65\x53\x62\xc7\x18\x25\xed\x9d\x57\x7e\xf6\xec\x9e\x99\x33\x3b\x1a\xc1\x7f\x9a\x73\x87\x81\x60\xfb\x18\x45\x5f\xd7\xf1\x74\x13\xc3\x66\x3a\x5b\xc6\x80\x59\x66\x4b\x13\x3c\x0c\x22\x00\x38\x2d\x05\x4b\xc8\x0a\x74\x98\x05\x72\xf0\x8c\xee\xc0\x26\x1f\xdc\x8c\x87\x90\xac\x36\x90\x6c\x97\xcb\xab\x1a\x4f\x51\xa1\xc9\x08\x52\xce\xd9\x84\xee\xcf\xb2\xda\x25\x14\x63\xca\x8a\x03\x93\xef\xe7\x3c\x29\xf5\x4e\xf0\xa9\x24\x93\x91\x30\xa5\x4e\xc9\xf5\x43\xa6\xd4\xc2\x97\x29\x99\xe0\x2a\xa1\x73\x80\xcd\x4e\x61\x60\x6b\x84\x24\x1f\xd8\xd4\xdf\xef\x72\xbb\x53\x98\xf7\x29\x16\x56\x93\x90\x56\x23\xf7\xe9\x7c\xfe\xd4\xd5\xd1\xe8\x03\x39\xb1\x27\xce\x8b\x00\x5e\x63\xe5\xbf\x2b\x1a\x0a\x47\xbe\xb0\x4a\x0a\x65\xf7\x6f\x43\x9a\x24\x97\xfa\x6d\xae\xe0\xbc\xb8\x44\x29\xf4\x41\x68\x2b\x79\xc7\x24\x85\x22\x99\x93\x83\x45\xb2\xe9\x60\xf7\xeb\xc5\xdd\x74\xfd\x00\xb7\xf1\x03\x0c\x9a\xc0\x0c\xa3\xe1\xe4\x25\x5c\x8b\x64\x1e\xff\x7c\x09\x97\xe8\xaf\xf9\x2a\x69\xe2\xb7\xfd\xb1\x48\xbe\xc1\x6c\xb3\x8e\xe3\x41\x2f\x3d\x9c\x5c\xd0\x6e\x57\xff\x92\x62\x8b\x69\x5d\xf2\xf5\x04\x08\x89\x01\xff\x8c\xc1\x68\x04\x47\xfb\xe2\x17\x1d\x80\x3d\x20\x2c\xeb\xf5\x2d\x1d\x40\xa3\xf3\x05\x2a\x92\x50\x7a\x36\x39\xdc\x1d\xd7\x33\x36\xe8\x0e\xa7\xed\x68\x24\xa4\xe8\x69\x7c\x3d\x22\x93\x59\x59\xd3\x24\x21\x58\x48\xad\xf5\x01\x1e\xc9\xed\xac\xae\xa7\xc7\xee\xc0\x5b\x4d\xf0\x54\x52\x95\xd9\xff\x8f\xcd\x68\xce\x3f\xcf\xd4\xc7\x9b\x0f\xdd\x50\xfd\xe5\xe4\x1a\xd4\xd4\x03\x8e\xaf\xbb\xe0\x33\xaa\xb2\x8f\xfc\xd2\xbe\x41\xe5\xb8\xe3\x76\x7c\x0d\xe9\x21\x90\xff\xe7\x64\x35\x05\x78\x95\xac\x6d\xb2\xf8\xbe\x3d\x0b\x41\xd5\x3b\xd1\x94\x40\xd4\xf6\x5a\x69\x38\x36\xb7\x1d\x89\x06\xbe\xaa\x8b\x51\x1d\xd1\x7e\x29\xe7\x76\x6f\xa2\x68\xbe\x5e\xdd\x77\x5f\xca\x0c\x7d\x86\x92\x26\x7d\x3f\x8f\xe7\x9c\x88\xdf\x01\x00\x00\xff\xff\xd2\xd6\x65\xae\x7a\x05\x00\x00") + +func migrations24_accountsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations24_accountsSql, + "migrations/24_accounts.sql", + ) +} + +func migrations24_accountsSql() (*asset, error) { + bytes, err := migrations24_accountsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/24_accounts.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa5, 0xf8, 0xf7, 0xeb, 0xe2, 0x3d, 0xda, 0xe, 0xc2, 0x78, 0x88, 0x16, 0x22, 0xbf, 0x22, 0xa8, 0x5a, 0x17, 0x72, 0xd9, 0xab, 0x56, 0xa8, 0x55, 0x5a, 0x3f, 0x47, 0xf6, 0x18, 0xfa, 0x43, 0xa7}} + return a, nil +} + +var _migrations25_expingest_rename_columnsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x90\xcd\xaa\x83\x30\x10\x85\xf7\x3e\xc5\xec\x2f\x3e\xc1\x5d\xd9\x36\x3b\x7f\x40\x74\x3d\x58\x8d\x61\xc0\xc6\xe2\x28\xa5\x6f\x5f\x4c\x0c\xa4\x76\xd1\xd8\xdd\x99\x09\x5f\x0e\xf3\xc5\x31\xfc\xdd\x48\x4d\xcd\x2c\xa1\xbe\x47\x51\x92\x56\xa2\x84\x2a\x39\xa5\x02\x9a\xb6\x1d\x17\x3d\x33\x32\x29\x2d\x27\x86\x52\xe4\x49\x26\xe0\x5c\xa4\x75\x96\xbb\x67\xa8\x0a\x17\x91\xba\xff\xf7\x2f\xc6\xbe\xff\x04\x59\x0e\x83\x9c\xa8\x5b\x49\x9b\x0d\xf8\x95\x33\x4b\x8b\x99\x18\x46\xad\x0d\xa4\x55\xc3\x2c\x67\xd7\x48\x5a\xa1\x59\x04\xf0\xd7\xe5\xe9\xe3\x76\x74\x74\xe4\xfb\xbb\x8c\x0f\xfd\x8b\x41\xb4\x37\x6d\x53\xb8\x41\xf4\x15\x86\x1b\x44\x4f\xe1\x11\x83\xb8\x57\x78\xcc\x20\xee\x14\x6e\xf4\x2b\x00\x00\xff\xff\x84\x7d\x6a\x64\x81\x02\x00\x00") + +func migrations25_expingest_rename_columnsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations25_expingest_rename_columnsSql, + "migrations/25_expingest_rename_columns.sql", + ) +} + +func migrations25_expingest_rename_columnsSql() (*asset, error) { + bytes, err := migrations25_expingest_rename_columnsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/25_expingest_rename_columns.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x80, 0x44, 0x81, 0x62, 0xeb, 0xcf, 0x82, 0xaa, 0x9, 0x14, 0x4c, 0xb6, 0xd2, 0x2c, 0x41, 0x2d, 0xf0, 0x34, 0x4a, 0x18, 0x5a, 0x95, 0x3e, 0x8d, 0x60, 0xbe, 0x5, 0x10, 0xf5, 0xc2, 0x9c, 0x3a}} + return a, nil +} + +var _migrations26_exp_history_ledgersSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\xce\xb1\x0a\xc2\x30\x10\x87\xf1\xfd\x9e\xe2\x3f\x2a\xd2\x27\xe8\x54\x6d\x06\xb1\xa0\x94\x3a\x97\x90\x9c\xf5\xa0\x5e\x4a\x92\x62\x7d\x7b\xc1\x49\x2a\xb8\x7e\xbf\xe5\x2b\x0a\xec\x1e\x32\x44\x9b\x19\xd7\x89\xe8\xd0\x9a\xaa\x33\xe8\xaa\x7d\x63\xc0\xcb\xd4\xdf\x25\xe5\x10\x5f\xfd\xc8\x7e\xe0\x98\xb0\x21\x00\x68\x8e\x27\x83\x15\x7d\x40\xd4\x8d\xb3\x17\x1d\xe0\xf9\x66\xe7\x31\xaf\xb3\x0b\x9a\x72\xb4\xa2\x3f\x22\xea\x79\xe1\x44\xdb\x92\xe8\x7b\xab\x0e\x4f\x25\xaa\xdb\xf3\xe5\xcf\x96\xb3\xc9\x59\xcf\x25\xbd\x03\x00\x00\xff\xff\xe9\x1c\x12\x26\xd1\x00\x00\x00") + +func migrations26_exp_history_ledgersSqlBytes() ([]byte, error) { + return bindataRead( + _migrations26_exp_history_ledgersSql, + "migrations/26_exp_history_ledgers.sql", + ) +} + +func migrations26_exp_history_ledgersSql() (*asset, error) { + bytes, err := migrations26_exp_history_ledgersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/26_exp_history_ledgers.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb0, 0xf9, 0xc6, 0xfd, 0xcb, 0x11, 0xc3, 0xc0, 0xbf, 0xac, 0x33, 0x8a, 0xc5, 0x8d, 0x47, 0xfd, 0x59, 0xd1, 0x1d, 0x69, 0x17, 0xba, 0xc7, 0xbb, 0xe6, 0x40, 0x58, 0x32, 0x14, 0x97, 0x96, 0x76}} + return a, nil +} + +var _migrations27_exp_history_transactionsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x91\x41\x4a\xc6\x30\x10\x85\xf7\x39\xc5\x2c\x15\xe9\x09\xba\xaa\x36\x0b\xb1\xa0\x94\xba\x2e\xc3\x24\xd6\x81\x3a\x09\xc9\x14\xeb\xed\x05\xa1\x52\xb4\xb1\x1b\xf9\xb7\xf9\x5e\xe0\x9b\xf7\xaa\x0a\x6e\xde\x78\x4a\xa8\x1e\x9e\xa3\x31\x77\xbd\x6d\x06\x0b\x43\x73\xdb\x59\xf0\x6b\x1c\x5f\x39\x6b\x48\x1f\xa3\x26\x94\x8c\xa4\x1c\x24\xc3\x95\x01\x00\xe8\xee\x1f\x2c\x1c\xf1\x2f\xca\x42\xf3\xe2\x58\x26\x70\xfe\x05\x97\x59\x7f\x3e\x53\x90\xac\x09\x59\x7e\x11\x16\xe7\x57\x9f\xcd\x75\xfd\x87\x10\x12\x85\x45\xf4\x50\x66\x63\x17\x11\xd9\x5d\x3e\x46\x4c\xca\xc4\x11\x0b\x62\xa5\xec\x7f\x8a\x9a\xfd\xa6\x6d\x78\x17\x63\xda\xfe\xf1\xe9\x6c\x53\xc2\x4c\xe8\x7c\x5d\x4c\x7f\x17\x7e\x9a\x2c\x36\xb2\xfd\xfc\x0c\x00\x00\xff\xff\x48\x5d\x4e\x5b\x76\x02\x00\x00") + +func migrations27_exp_history_transactionsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations27_exp_history_transactionsSql, + "migrations/27_exp_history_transactions.sql", + ) +} + +func migrations27_exp_history_transactionsSql() (*asset, error) { + bytes, err := migrations27_exp_history_transactionsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/27_exp_history_transactions.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x13, 0xbf, 0x14, 0x32, 0x4a, 0xc, 0x39, 0x8a, 0xff, 0x9d, 0xaf, 0x3f, 0x8e, 0xb4, 0xe, 0xf2, 0x2b, 0x26, 0x67, 0xf6, 0xcd, 0x20, 0xe2, 0x5b, 0x98, 0x78, 0xc2, 0x10, 0xcc, 0x52, 0xd6, 0xc3}} + return a, nil +} + +var _migrations28_exp_history_operationsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x90\xc1\xaa\xc2\x30\x10\x45\xf7\xf9\x8a\x59\xbe\x87\xf4\x0b\xba\xaa\x36\x0b\xb1\xa0\x94\xba\x2e\x43\x12\xeb\x40\x9d\x84\x64\x8a\xf5\xef\x05\x17\x62\x2d\x54\x17\x6e\xe7\xdc\x81\x73\x6f\x96\xc1\xea\x42\x5d\x44\x71\x70\x0c\x4a\x6d\x6a\x5d\x34\x1a\x9a\x62\x5d\x69\x70\x63\x68\xcf\x94\xc4\xc7\x5b\xeb\x83\x8b\x28\xe4\x39\xc1\x9f\x02\x00\xa8\xb6\x3b\x0d\x73\xfa\x60\xc4\xa6\x1f\x2c\x71\x07\xd6\x9d\x70\xe8\xe5\xfd\x6c\x3c\x27\x89\x48\x3c\x23\xc4\xd6\x8d\x2e\xa9\xff\xfc\x1b\x99\x36\x60\x14\x32\x14\x90\x65\x59\x6c\x92\xfc\xa5\xe4\xeb\x82\xa5\xbf\xb2\x52\x65\xbd\x3f\x2c\x2f\x68\x30\x19\xb4\x2e\xff\x9c\x9d\x16\x7c\xfe\xdd\x03\x00\x00\xff\xff\x70\x31\x67\x84\xb7\x01\x00\x00") + +func migrations28_exp_history_operationsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations28_exp_history_operationsSql, + "migrations/28_exp_history_operations.sql", + ) +} + +func migrations28_exp_history_operationsSql() (*asset, error) { + bytes, err := migrations28_exp_history_operationsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/28_exp_history_operations.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x59, 0x2d, 0xc6, 0x70, 0x76, 0x72, 0xc5, 0xb1, 0xc6, 0xe7, 0xa0, 0xb2, 0xfc, 0x78, 0x71, 0xa, 0x43, 0x8e, 0x53, 0x9, 0x19, 0x10, 0xcb, 0x75, 0x3b, 0x66, 0x1, 0x57, 0x43, 0xd2, 0x8e, 0x89}} + return a, nil +} + +var _migrations29_exp_history_assetsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xce\xb1\x0a\xc2\x40\x0c\x80\xe1\x3d\x4f\x91\x51\x91\x3e\x41\xa7\x6a\x6f\x10\x0b\x4a\xa9\x73\x09\x77\xb1\x06\x6a\xae\x5c\xae\x58\xdf\x5e\x70\x52\x8a\xeb\xff\x2f\x5f\x51\xe0\xee\x21\x43\xa2\xcc\x78\x9d\x00\x0e\xad\xab\x3a\x87\x5d\xb5\x6f\x1c\xf2\x32\xf5\x77\xb1\x1c\xd3\xab\x27\x33\xce\x86\x1b\x40\x44\x6c\x8e\x27\x87\xbf\xe7\xd3\x45\xfd\x38\x07\xd1\x01\x03\xdf\x68\x1e\x57\xd9\x47\xb5\x9c\x48\x74\x75\x44\x03\x2f\x6c\xb0\x2d\x01\xbe\x51\x75\x7c\x2a\x40\xdd\x9e\x2f\xff\x51\x9e\xcc\x53\xe0\x12\xde\x01\x00\x00\xff\xff\x17\x4e\x53\x21\xce\x00\x00\x00") + +func migrations29_exp_history_assetsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations29_exp_history_assetsSql, + "migrations/29_exp_history_assets.sql", + ) +} + +func migrations29_exp_history_assetsSql() (*asset, error) { + bytes, err := migrations29_exp_history_assetsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/29_exp_history_assets.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x25, 0xdf, 0x37, 0x6, 0x2c, 0x6e, 0x52, 0x9b, 0x16, 0x17, 0x9c, 0x29, 0x41, 0x41, 0xa6, 0x14, 0x59, 0xff, 0x29, 0xfa, 0x12, 0x2a, 0x8d, 0xce, 0x42, 0xad, 0xa6, 0x26, 0xef, 0x6f, 0x27, 0x17}} + return a, nil +} + +var _migrations2_index_participants_by_toidSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x8f\xb1\xca\xc2\x50\x0c\x46\xf7\x3c\x45\xc6\xff\x47\xfa\x04\x9d\xc4\x16\xe9\xd2\x4a\xb5\xe0\x76\x49\xdb\x8b\xcd\xe0\xcd\x25\x37\x20\x7d\x7b\x41\x07\x5b\xbb\xb8\x86\x8f\x73\x72\xb2\x0c\x77\x77\xbe\x29\x99\xc7\x2e\x02\x1c\xda\x72\x7f\x29\xb1\xaa\x8b\xf2\x8a\x93\x44\xd7\xcf\x6e\x12\x1e\xb1\xa9\x71\xe2\x64\xa2\xb3\x93\xe8\x95\x8c\x25\xb8\x48\x6a\x3c\x70\xa4\x60\x09\xbb\x73\x55\x1f\xb1\x37\xf5\x1e\xff\xb6\x5b\x1e\xff\xf3\x2f\xbc\xbd\xf1\xb6\xc6\x9b\x52\x48\x34\xfc\x28\x58\xae\x5f\x0a\x58\x26\x15\xf2\x08\x00\x45\xdb\x9c\xb6\x49\xf9\xea\xfe\xf9\x25\x87\x67\x00\x00\x00\xff\xff\x33\xec\x54\x7a\x15\x01\x00\x00") + +func migrations2_index_participants_by_toidSqlBytes() ([]byte, error) { + return bindataRead( + _migrations2_index_participants_by_toidSql, + "migrations/2_index_participants_by_toid.sql", + ) +} + +func migrations2_index_participants_by_toidSql() (*asset, error) { + bytes, err := migrations2_index_participants_by_toidSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/2_index_participants_by_toid.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdd, 0x9f, 0x5c, 0xe6, 0xd0, 0x43, 0x82, 0xa3, 0x8d, 0xb3, 0x64, 0xb1, 0x2, 0x4b, 0xe1, 0x96, 0x3, 0x92, 0xb3, 0xea, 0x3c, 0x2e, 0xb2, 0xad, 0x47, 0xcd, 0x92, 0x4c, 0x6c, 0x5c, 0x46, 0xfd}} + return a, nil +} + +var _migrations30_exp_history_tradesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x96\x41\x8f\xda\x3c\x10\x86\xef\xf9\x15\xa3\x3d\x81\x3e\xf8\xd4\x73\xb7\xad\xb4\x65\xd3\x16\x2d\x0a\x2d\x0b\x52\x6f\x96\x63\x0f\xc4\xda\x60\x47\xf6\x44\xec\xf6\xd7\x57\x4e\x1a\x36\x09\x86\x0d\x6a\x39\xce\x8c\x9f\x79\xfd\x7a\x1c\x33\x9d\xc2\x7f\x7b\xb5\xb3\x9c\x10\x36\x45\x14\x4d\xa7\x70\x40\x10\x5c\x6b\x43\x20\x2c\xfa\x38\x3e\x17\x2c\x53\x8e\x8c\x7d\x61\x64\xb9\x44\x07\xdc\xbd\xaf\x6a\x67\xab\xf8\x6e\x1d\xc3\xfa\xee\xf3\x22\x0e\xd5\x8d\x7c\x91\xff\x2d\xe6\x0f\x31\x74\x93\x4d\x4a\x69\x91\x97\x52\xe9\x1d\x48\xdc\xf2\x32\xa7\x40\x46\x18\xed\xc8\x72\xa5\x43\x49\xa5\x25\x3e\xd7\xbc\xf1\x6d\x25\x2b\x45\xc1\x4b\x87\x40\x19\xf6\x9a\x02\xf1\x34\x47\xc8\xb8\x03\x8b\x5b\xb4\xa8\x05\xb6\xe9\x40\xe6\xb8\x82\x0b\x61\x4a\x1f\xe3\x5a\xbe\x06\x9d\xc3\x5a\x84\x8f\x1e\x10\xa4\x01\xef\xd5\x81\x6b\xf2\x8b\x85\x29\x5e\x80\x32\xe3\x3a\xd8\xff\x41\x69\x47\xc8\xe5\xc4\x2f\x69\x6a\x5f\x15\x50\x86\x1e\xd9\x76\xb0\xd3\xbd\x93\xa8\x14\xd4\x1b\x71\x51\xf4\xf6\x11\x78\xb7\x9a\xa0\x29\xd0\x72\x52\x46\x33\x25\x21\x55\x3b\xa5\x09\x92\xe5\x1a\x92\xcd\x62\x31\xa9\x2a\x6f\x8c\x95\x68\x6f\x40\x69\xc2\x1d\xda\x5e\x36\x47\xb9\x43\xcb\x44\x6e\x1c\x4a\xc6\x09\x48\xed\xd1\x11\xdf\x17\x70\x50\x94\x99\xb2\x8e\xc0\x2f\xa3\xb1\xb7\xd4\x6c\xb7\x68\xcf\xb6\x4d\xb9\xc3\x66\xcf\x81\x22\x58\xc5\x5f\xe2\x55\x9c\xcc\xe2\xc7\xa0\x4b\x23\x25\xc7\x6d\x90\xf7\xe8\x1a\x4c\xe5\x69\x1f\xb2\xf7\xe8\xb0\xdc\xaa\x2b\xda\x7f\xa2\xf8\xc8\xfa\x4b\xd1\x47\xce\x05\xdd\xd5\xc6\x94\x63\x0e\xf3\x1c\x2d\xa4\xc6\xe4\xc8\x75\x9d\x2b\xac\x12\xc8\xf4\x9f\x85\xed\x98\xec\xc4\x2a\x46\xef\x34\xbb\x0a\x82\xc9\xd9\x32\x79\x5c\xaf\xee\xe6\xc9\x3a\x30\xa6\xac\xe5\x38\x13\x19\x8a\x27\x98\x7d\x8b\x67\x0f\x30\x1a\xb5\xcf\xe2\xd3\x47\x78\x37\x1e\x0f\xe1\x85\x18\x8d\xbf\x1f\x4e\x2c\x1f\xc8\xec\x18\xdc\x6b\xd1\x73\xbf\x56\x1a\xf9\x0f\x52\x73\x45\xe7\xc9\x7d\xfc\xb3\xe6\x92\x95\x2c\x7d\x61\xed\xa1\x87\x65\x12\xba\xbe\x9b\xc7\x79\xf2\x15\x52\xb2\x88\x30\xea\x5d\x12\x0f\xbf\xcc\xae\x0e\x62\x30\xb9\x39\xb6\x8b\xdc\xde\xe4\x0f\x81\x9f\x5e\x96\x41\x1d\x06\x8b\xef\x8f\xdd\x45\xfa\x60\xea\xdb\xb4\xa6\x6f\x6e\xcc\x53\x59\x5c\xe5\x44\x33\x77\x67\xd9\x05\x57\x96\xf9\x4f\xe9\x15\xf4\xce\x94\x4f\x4e\x86\x7c\x72\xf2\xf5\x6e\xb5\xdf\x24\xf3\x1f\x9b\x53\x15\x4a\x0e\x69\x1c\x7a\x5d\x26\xcd\x4b\x72\x7e\x8f\x57\x6e\x2f\xa4\xbe\xfd\xff\xe5\xde\x1c\x74\x14\xdd\xaf\x96\xdf\xcf\xbf\x86\x82\x3b\xc1\x25\xde\x46\xbf\x03\x00\x00\xff\xff\xd4\x84\xb9\x21\xf9\x08\x00\x00") + +func migrations30_exp_history_tradesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations30_exp_history_tradesSql, + "migrations/30_exp_history_trades.sql", + ) +} + +func migrations30_exp_history_tradesSql() (*asset, error) { + bytes, err := migrations30_exp_history_tradesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/30_exp_history_trades.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xaf, 0x32, 0xe, 0xc2, 0x9, 0x37, 0x62, 0x5f, 0x79, 0xfe, 0x3e, 0x11, 0x22, 0x66, 0x56, 0x36, 0x8, 0x0, 0x76, 0x70, 0xc3, 0x5c, 0x16, 0xe3, 0x2b, 0x73, 0x57, 0xe8, 0x38, 0x6c, 0x62, 0x1e}} + return a, nil +} + +var _migrations31_exp_history_effectsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\xce\xb1\xaa\xc2\x30\x14\x87\xf1\xfd\x3c\xc5\x7f\xbc\x17\xe9\x13\x74\xaa\x36\x83\x58\x50\x4a\x9d\x4b\x48\x4e\xeb\x81\x7a\x52\x92\x14\xeb\xdb\x0b\x4e\x62\xc1\xf5\xfb\x2d\x5f\x51\x60\x77\x97\x31\xda\xcc\xb8\xce\x44\x87\xd6\x54\x9d\x41\x57\xed\x1b\x03\x5e\xe7\xfe\x26\x29\x87\xf8\xec\x79\x18\xd8\xe5\x84\x3f\x02\x80\xe6\x78\x32\xf8\xa2\x37\x88\xba\x69\xf1\xa2\x23\x3c\x0f\x76\x99\x36\xd9\x05\x4d\x39\x5a\xd1\x8d\x88\x7a\x5e\x39\xd1\x7f\x49\xf4\xb9\x55\x87\x87\x12\xd5\xed\xf9\xf2\x63\xcb\xd9\xe4\xac\xe7\x92\x5e\x01\x00\x00\xff\xff\xaa\x4f\x70\x26\xd1\x00\x00\x00") + +func migrations31_exp_history_effectsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations31_exp_history_effectsSql, + "migrations/31_exp_history_effects.sql", + ) +} + +func migrations31_exp_history_effectsSql() (*asset, error) { + bytes, err := migrations31_exp_history_effectsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/31_exp_history_effects.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdf, 0x34, 0xf6, 0x6e, 0x86, 0x85, 0x4e, 0xfb, 0x74, 0xee, 0x7d, 0x4, 0xd2, 0xc7, 0x83, 0xae, 0x97, 0xab, 0xca, 0xe1, 0xb3, 0x7c, 0x25, 0xa8, 0x7f, 0x65, 0x2b, 0x1e, 0xdb, 0x88, 0x9b, 0xf}} + return a, nil +} + +var _migrations32_drop_exp_history_tablesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x57\xd1\x6e\xda\x30\x14\x7d\xe7\x2b\xae\xfa\x04\x1a\x4c\x7b\x5e\xb7\x49\x1d\xcd\x36\x54\x14\x36\x0a\xd2\xde\x22\xc7\xbe\x10\xab\xc1\x8e\x6c\x23\xda\x7d\xfd\x94\xa4\x69\x1d\xc7\xa4\x66\x43\xbc\xfa\x5c\x9f\x7b\x7c\x7c\x7c\x03\x93\x09\xbc\xdb\xf1\xad\x22\x06\x61\x5d\x0c\x06\xb7\xcb\xc5\x4f\x58\xdd\x7c\x9d\x47\x80\x8f\x45\x92\x71\x6d\xa4\x7a\x4a\x70\xb3\x41\x6a\x34\x50\xa2\x29\x61\x78\x7d\xb4\xd0\x28\xc2\x30\xa0\x8e\x68\x8d\x21\x7c\xb2\x40\x45\x0c\x97\x22\x29\x88\x32\x9c\xf2\x82\x88\x93\xf6\x85\x69\x16\x9a\xd0\x7f\xe8\x42\x28\x95\xfb\xa0\x4a\xab\x47\x40\x75\x8e\x6c\x8b\xca\x2e\xb4\xef\xe9\x56\x1e\xc4\x60\x30\x5d\x46\x37\xab\xa8\x67\xf3\x70\x00\x00\x30\x9f\xdd\x45\xe0\x40\x15\xc0\x05\xcd\xf7\x8c\x8b\x2d\x30\xdc\x90\x7d\x6e\xdc\x65\x2a\x85\x36\x8a\x70\xd1\x41\xb8\x60\xf8\x88\x7a\x30\xba\xee\x91\xd1\x3a\xb1\x47\x8b\x8d\x5f\x44\xd0\xcb\x65\x79\xc4\x34\xd8\xa5\x9d\x69\xe7\xad\xdf\xa5\x56\xed\x45\x84\x5a\x8f\xc8\x23\xed\x15\xbd\xac\x98\x37\x3d\xf3\x57\x5e\x26\x63\xf5\x58\xf3\x25\xac\x42\xce\x29\xa2\x9c\x09\x07\x04\x4a\x84\x90\x06\xa8\xc2\x72\x36\x78\x66\x31\xd1\x1f\xab\xda\xbe\x3c\x96\x75\xc3\xb2\xc8\x17\x40\x86\xba\x81\x3c\xd2\x3b\x88\xad\xbe\x03\x36\x07\x98\x4c\x60\x54\x8f\xb5\x14\x29\xd9\x6b\x04\x93\xa1\xd3\x14\x0c\x49\x73\x84\x8c\x68\x50\xb8\x41\x85\x82\xa2\xcd\x0e\x46\x76\x1e\x30\x10\xc1\x5c\xcf\x27\x93\x6a\xf5\x80\xc0\x24\x94\x5e\x1d\x88\x30\xe5\x66\x2a\x8b\x27\x30\x99\xd4\x2d\xda\xf7\xc0\x85\x36\x48\xd8\xb8\xdc\xd2\xd4\xbe\x2a\x30\x19\x96\x94\xde\xd1\x52\xf6\xf1\xe4\xa1\x3a\x88\xee\x1f\x09\xf5\x15\x94\x6e\x75\x73\xcc\x19\xa4\x7c\xcb\x85\x81\x78\xb1\x82\x78\x3d\x9f\x8f\xab\xca\x2b\xa9\x18\xaa\x2b\xe0\xc2\xe0\x16\x95\x83\xd6\xd3\x3e\xa1\xb9\xd4\xc8\x12\x62\xc0\xf0\x1d\x6a\x43\x76\x05\x1c\xb8\xc9\xe4\xbe\x5e\x81\x3f\x52\xa0\xb3\x55\x6e\x36\xa8\x8e\xb6\x4d\x89\xc6\xe6\xcc\x9e\x22\x58\x46\xdf\xa2\x65\x14\x4f\xa3\x7b\xaf\x4b\x43\xce\x46\x36\x51\xe9\xd1\x29\x34\x95\xa7\x2e\xc9\xae\xa4\xf6\xcb\xad\xba\xa2\x3a\x8b\xe2\x17\xae\xff\x14\xfd\xc2\xd3\xa3\xbb\x3a\x18\xd7\x89\xc6\x3c\x47\x05\xa9\x94\x39\x12\x51\x63\x85\xe2\x14\x13\xf1\xbc\xd1\x5e\x63\xad\xb5\x8a\xc3\xb9\xcd\xb6\x02\x2f\x38\x5d\xc4\xf7\xab\xe5\xcd\x2c\x5e\x79\x62\x9a\x58\x8e\x27\x34\x43\xfa\x00\xd3\x1f\xd1\xf4\x0e\x86\x43\xfb\x2e\xbe\x7c\x86\x0f\xa3\x51\x08\x9f\x8f\xa3\xf1\xf7\x53\xc7\xf2\x40\xce\x96\xc1\x4e\x0b\xc7\xfd\x5a\x69\x3d\x54\x9f\x9f\xe8\x2c\xbe\x8d\x7e\xd7\xbc\x46\xb1\x24\x7d\x4a\xec\xd0\xc3\x22\xf6\x3d\xdf\xf5\xfd\x2c\xfe\x0e\xa9\x51\x88\x30\x74\x1e\x89\xf5\xd9\x38\xc2\x5d\x5d\x44\x30\x73\x73\x6d\xbd\xbc\x4e\xf2\x43\xc8\xbb\x8f\x25\xa8\x43\xb0\x78\x37\x76\xbd\xec\xc1\xac\x6f\xb3\x35\x7d\x73\x29\x1f\xf6\xc5\x49\x4e\x34\xb9\x3b\xca\x5d\x10\xae\x92\x72\x94\x9e\xc0\xde\x4a\xf9\xb8\x13\xf2\x71\x67\x7a\x5b\xed\xd7\xf1\xec\xd7\xba\xab\x82\xb3\x90\xc6\xbe\xaf\xcb\xb8\xf9\x92\x1c\x3f\xe3\x89\xc7\xeb\x51\x7f\xfc\xdf\xa5\xe7\x67\xd3\x33\x74\xc6\xdf\x4d\x7f\x03\x00\x00\xff\xff\xf1\x72\xe9\x29\xf2\x0e\x00\x00") + +func migrations32_drop_exp_history_tablesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations32_drop_exp_history_tablesSql, + "migrations/32_drop_exp_history_tables.sql", + ) +} + +func migrations32_drop_exp_history_tablesSql() (*asset, error) { + bytes, err := migrations32_drop_exp_history_tablesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/32_drop_exp_history_tables.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1d, 0xf2, 0xbb, 0xf, 0x6d, 0x53, 0xdf, 0x60, 0x63, 0x87, 0x3c, 0xe9, 0xdb, 0x18, 0x23, 0x51, 0x40, 0xa4, 0x38, 0xdf, 0xb2, 0x9a, 0xb, 0xa0, 0xe6, 0xbe, 0x91, 0x3b, 0x41, 0x53, 0x69, 0x9d}} + return a, nil +} + +var _migrations33_remove_unusedSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x92\x41\x6f\xe2\x30\x10\x85\xef\xf9\x15\x73\x04\xed\x72\x5b\xed\x85\x93\x49\x66\x59\xab\xc1\x50\xc7\xa9\xca\xc9\x32\x89\x4b\x23\x91\x18\xc5\x46\x85\x7f\x5f\x99\x50\xea\xd0\xb4\xc2\x27\xdb\xf3\xe9\xbd\x79\xa3\x99\x4c\xe0\x57\x5d\x6d\x5b\xe5\x34\xe4\xfb\x28\x4a\xf8\x72\x05\x82\xcc\x52\x04\x65\xad\x76\xd2\x3a\xe5\x2c\x14\xca\x16\xaa\xd4\xd3\x0b\x40\x59\x82\xcf\xa0\x8f\x7b\x19\x40\x72\x73\x92\x85\xb9\x85\xaa\xa6\xd4\x47\xf9\x5a\x59\x67\xda\x93\x74\xad\x6a\xac\x2a\x5c\x65\x1a\x2b\x4d\x23\xab\xf2\x27\x7a\xa7\xcb\xad\x6e\x07\xc1\xce\xf7\xd3\x31\xcc\x91\x98\xb7\x26\x8a\x62\x8e\x44\xe0\x40\x96\x51\x04\x00\x50\x95\xd0\x3b\x33\x3a\xa7\x4c\x74\xf7\x15\xa7\x0b\xc2\xd7\xf0\x80\x6b\xe0\xf8\x0f\x39\xb2\x18\x33\xf8\x68\xeb\x2c\x66\x61\xc9\x20\xc1\x14\x05\x42\x4c\xb2\x98\x24\xe8\x7f\xf2\x55\xe2\x5d\x39\x66\x82\xd3\x58\xfc\x3e\x7b\xa9\xda\x1c\x1a\x37\xec\xc5\x96\x02\x58\x9e\xa6\x1d\xd9\x1c\x6a\xa9\x8a\xc2\xe3\xd6\xbf\x29\x13\x38\x47\x3e\x40\xbe\xec\xd4\xd6\x06\xfd\x67\x0b\x92\xa6\x17\xd5\x3e\xe9\x4c\xbd\x0b\x93\x3e\x11\x1e\xff\x27\x7c\xf4\xf7\xcf\xf8\x4a\x46\xe3\xe9\x75\x62\x03\x03\xf6\xc9\x6e\xd2\xe7\x19\x65\x73\xd8\xb8\x56\x6b\x18\x75\xb0\x27\xbf\xe8\x7c\xb3\x24\x5e\xf1\xa6\x74\x87\x64\xce\xe8\x63\x8e\x77\x6e\x56\xd8\x74\x58\xed\xfb\x54\xe5\x7d\xfa\xbd\x5d\x0c\xa5\x2f\x85\x01\xd5\xf7\x00\x00\x00\xff\xff\x0e\x3f\x2c\x91\x5c\x03\x00\x00") + +func migrations33_remove_unusedSqlBytes() ([]byte, error) { + return bindataRead( + _migrations33_remove_unusedSql, + "migrations/33_remove_unused.sql", + ) +} + +func migrations33_remove_unusedSql() (*asset, error) { + bytes, err := migrations33_remove_unusedSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/33_remove_unused.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5d, 0x11, 0x54, 0x91, 0x3f, 0x65, 0x72, 0x25, 0x53, 0xa7, 0xcf, 0x33, 0xdf, 0x79, 0x48, 0x4a, 0xda, 0xa8, 0x5e, 0x86, 0xff, 0xf1, 0xd7, 0x69, 0xa2, 0xc, 0x85, 0x7f, 0xbf, 0x23, 0xc2, 0xd9}} + return a, nil +} + +var _migrations34_fee_bump_transactionsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x92\xc1\x4f\xc2\x30\x18\xc5\xef\xfd\x2b\xbe\x23\x44\xb9\x19\x12\xb3\x13\xb2\x46\x49\x66\x47\xc6\x16\x35\xc6\x34\xdd\xfc\xe8\x7a\xa0\x33\x6d\x11\xf7\xdf\x1b\x98\x64\xc5\x95\xe0\xc1\x73\x7f\xdf\x7b\xaf\x2f\x6f\x32\x81\xab\x8d\x92\x46\x38\x84\xe2\x83\x90\x59\x92\xd3\x0c\xf2\xd9\x5d\x42\xa1\x56\xd6\x35\xa6\xe5\xce\x08\x6d\x45\xe5\x54\xa3\x2d\x01\x00\xe8\xa0\x79\x9a\x14\x8f\x0c\xd6\x88\xbc\xaa\x85\x91\xf8\x0e\xf9\xcb\x92\x42\xa9\xa4\xd2\xee\x7a\x48\x6e\xc4\x17\x5f\x23\x06\xa8\x38\x06\xa5\x35\x1a\xdf\x8a\xd7\xc2\xd6\xb0\x57\x16\x95\x43\x03\x9f\xc2\xb4\x4a\xcb\xd1\xf4\x66\xdc\x5f\xed\xcd\x45\x55\x35\x5b\xed\x2e\xa1\x9d\x81\x55\x52\x0b\xb7\x35\x68\x03\xfc\xed\x74\xfc\xfa\xd6\x5f\x68\xdc\xf1\x63\xe6\x2e\x6e\x44\xc8\x3c\xa3\xb3\x9c\xc2\x82\xc5\xf4\x19\xca\x96\x77\xb2\x87\xac\x29\x0b\x56\x06\xc5\x6a\xc1\xee\xa1\x74\x06\x11\x46\xe1\x7f\x8e\xe1\xe9\x81\x66\xf4\x5c\x0b\x8b\x15\xb0\x34\x07\x56\x24\x49\x34\x48\xe0\x77\xf0\xa7\x08\xde\xc1\xd1\xd7\xd7\x38\x31\x23\xfe\x40\xe2\x66\xa7\x09\x89\xb3\x74\x19\xfa\x7f\xf4\xeb\xc5\xd3\x8c\xfe\x65\x58\x4a\x3b\x94\x68\x2e\x2d\xeb\x04\x3b\x44\xfa\xa1\xc2\xe5\x0e\x39\x2f\xf8\x39\x91\x7e\x46\x43\xc2\x9b\x4d\xf4\x1d\x00\x00\xff\xff\x12\x9d\xf6\x27\x5f\x03\x00\x00") + +func migrations34_fee_bump_transactionsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations34_fee_bump_transactionsSql, + "migrations/34_fee_bump_transactions.sql", + ) +} + +func migrations34_fee_bump_transactionsSql() (*asset, error) { + bytes, err := migrations34_fee_bump_transactionsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/34_fee_bump_transactions.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd6, 0x2a, 0xe3, 0xb, 0x10, 0x90, 0x5a, 0x2b, 0x39, 0x76, 0x69, 0xe, 0x93, 0xcd, 0x5f, 0x61, 0xd1, 0x9a, 0xf2, 0xec, 0x95, 0x46, 0xc9, 0x79, 0x68, 0xd, 0xa9, 0x25, 0x4, 0xaf, 0x2a, 0x46}} + return a, nil +} + +var _migrations35_drop_participant_idSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\xce\x31\x0e\xc2\x30\x0c\x46\xe1\x3d\xa7\xf8\x77\xd4\x13\x30\x05\xd2\x2d\x50\x54\xb5\x73\x65\x95\xa8\x78\xc0\x89\x1c\x4b\x88\xdb\x33\xc2\x90\x01\x38\xc0\x7b\xfa\xba\x0e\xbb\x3b\x6f\x4a\x96\x30\x17\xe7\x7c\x9c\xfa\x11\x93\x3f\xc4\x1e\x37\xae\x96\xf5\xb9\xe4\x92\x94\x8c\xb3\x2c\x85\xd4\x78\xe5\x42\x62\xd5\x01\x40\x18\x87\x0b\x8e\x43\x9c\x4f\x67\xf0\x75\xdf\xee\x4d\x49\x2a\xad\x5f\x1e\x3e\x45\x21\x3f\xe4\x67\x93\x0f\xe1\x3d\x04\x8b\xa5\x2d\xe9\x3f\xb4\xf6\xe8\x15\x00\x00\xff\xff\xfa\x9c\x4a\x38\x32\x01\x00\x00") + +func migrations35_drop_participant_idSqlBytes() ([]byte, error) { + return bindataRead( + _migrations35_drop_participant_idSql, + "migrations/35_drop_participant_id.sql", + ) +} + +func migrations35_drop_participant_idSql() (*asset, error) { + bytes, err := migrations35_drop_participant_idSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/35_drop_participant_id.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xff, 0x7, 0x95, 0xf6, 0xca, 0xd3, 0x76, 0xfd, 0x68, 0x9b, 0xc5, 0x95, 0xb7, 0x43, 0x56, 0x2c, 0xbe, 0x33, 0x3d, 0xee, 0x33, 0xb7, 0xec, 0xf8, 0x18, 0xd6, 0x8a, 0xf3, 0x31, 0x37, 0x8f, 0x62}} + return a, nil +} + +var _migrations36_deleted_offersSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x92\x41\x6e\xb3\x30\x10\x85\xf7\x3e\xc5\x2c\x83\x7e\x72\x02\xf4\x2f\x48\xec\x54\x91\x08\x54\xd4\x48\xdd\x59\x50\x0f\xd4\x92\x03\x91\xed\x34\xca\xed\xab\x24\x40\x29\x85\x6e\xca\xd2\x1e\xcd\x37\xef\xcd\x9b\xf5\x1a\xfe\x1d\x55\x65\x72\x87\x90\x9d\x08\x09\x23\xce\x52\xe0\xe1\x26\x62\xd0\x94\x25\x1a\x0b\x21\xa5\x20\x51\xa3\x43\x09\x45\xd3\x68\xcc\x6b\xa0\x6c\x17\x66\x11\x87\x32\xd7\x16\x03\x42\xb6\x29\x0b\x39\x83\x7d\x4c\xd9\x2b\x14\x68\x9d\xb8\x37\x43\x12\x77\x94\xec\x65\x1f\x3f\xc1\x86\xa7\x8c\xc1\xca\xa2\xd6\xaa\xae\x44\x6e\x2d\x3a\x1f\x8a\xf3\x75\xf0\x6a\x67\xf9\x70\x32\xea\x0d\xbd\xe0\x3b\x5c\xab\x0f\x14\x2d\x73\x86\xde\x03\x74\x6e\x9d\x38\x36\x52\x95\x0a\xa5\xd0\x28\x2b\x34\x5e\x40\x08\x4d\x93\xe7\x16\xf7\xe8\x17\xc5\x55\xdc\x34\xa1\xf1\x47\x3f\x03\x5d\x5f\x85\xa1\xde\xb1\xf9\x31\x70\x5a\xe4\xea\x51\x14\x4a\xf6\x7e\xc7\x46\x67\x74\xfc\xc2\xfb\xb9\xc3\x79\xe6\xd0\xc2\x0c\x72\x32\x95\xdb\xfa\x86\x37\x43\x9b\x4b\x4d\x08\x65\x11\xe3\x0c\x76\x69\x72\xe8\x48\x97\x77\x34\xd8\xdf\xcd\x7f\x70\xe6\x8c\xcb\xaf\x7e\xe2\x5c\xef\x13\xb6\x49\x94\x1d\xe2\x6e\xfc\x5f\x33\x5a\x30\x9a\xc5\x12\xf1\x02\xf2\x19\x00\x00\xff\xff\xfa\xd2\x27\x51\xbc\x03\x00\x00") + +func migrations36_deleted_offersSqlBytes() ([]byte, error) { + return bindataRead( + _migrations36_deleted_offersSql, + "migrations/36_deleted_offers.sql", + ) +} + +func migrations36_deleted_offersSql() (*asset, error) { + bytes, err := migrations36_deleted_offersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/36_deleted_offers.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8c, 0xf8, 0x3f, 0x36, 0xd9, 0x93, 0x75, 0x16, 0xab, 0x80, 0xc6, 0x1d, 0xc6, 0x4a, 0x4e, 0x0, 0x2, 0xa9, 0xac, 0x37, 0x1c, 0x6a, 0x4f, 0x53, 0xf0, 0xe8, 0x78, 0x41, 0xeb, 0xd2, 0x85, 0x3}} + return a, nil +} + +var _migrations37_add_tx_set_operation_count_to_ledgersSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\xcd\x31\xae\x02\x21\x10\x06\xe0\x9e\x53\xfc\xfd\xcb\x9e\x60\x2b\x9e\x60\x35\xee\x9a\x0d\xd4\x64\xa3\x13\x24\x51\xd8\x0c\x63\xd4\xdb\xdb\xda\xe8\x05\xbe\x6f\x18\xf0\x77\x2b\x59\x56\x65\xc4\xcd\x18\x4b\xc1\x2f\x08\xf6\x9f\x3c\x2e\xa5\x6b\x93\x57\xba\xf2\x39\xb3\x74\x58\xe7\xa0\xcf\xd4\x59\x53\xdb\x58\x56\x2d\xad\xa6\x53\xbb\x57\x45\xa9\xca\x99\x05\xce\xef\x6d\xa4\x80\x29\x12\x8d\xc6\x7c\xea\xae\x3d\xea\x6f\xdf\x2d\xf3\x11\xbb\x99\xe2\x61\xfa\xf2\x8c\xe6\x1d\x00\x00\xff\xff\x32\x43\x33\x1d\xb0\x00\x00\x00") + +func migrations37_add_tx_set_operation_count_to_ledgersSqlBytes() ([]byte, error) { + return bindataRead( + _migrations37_add_tx_set_operation_count_to_ledgersSql, + "migrations/37_add_tx_set_operation_count_to_ledgers.sql", + ) +} + +func migrations37_add_tx_set_operation_count_to_ledgersSql() (*asset, error) { + bytes, err := migrations37_add_tx_set_operation_count_to_ledgersSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/37_add_tx_set_operation_count_to_ledgers.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc2, 0xf1, 0x5, 0x98, 0x10, 0xbb, 0xd8, 0x0, 0x7d, 0xda, 0xff, 0x9a, 0x91, 0xc8, 0xea, 0x31, 0x96, 0xba, 0xfb, 0x44, 0x84, 0xbe, 0x41, 0xc3, 0xac, 0x15, 0x42, 0x4c, 0xc0, 0x1e, 0xeb, 0xe8}} + return a, nil +} + +var _migrations38_add_constraintsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x58\x5d\x6f\xdb\x2c\x14\xbe\xef\xaf\xb0\x7a\xf5\xbe\x9a\x2a\xed\xbe\x5a\xa5\xac\xa9\xb4\x6a\x55\x3b\x75\xd9\x6e\x11\xb1\x8f\x13\x24\x0c\x1e\xe0\xa6\xdd\xaf\x9f\x92\xd8\x2e\x18\x03\x36\x66\xbb\xe4\xe0\xe7\x79\xcc\xc7\xf9\xe2\xea\x2a\xfb\x50\x91\x9d\xc0\x0a\xb2\x1f\xf5\xc5\xc5\xea\x61\x73\xf7\x9c\x6d\x56\x9f\x1f\xee\xb2\x3d\x91\x8a\x8b\x37\xa4\x04\x66\x12\xe7\x8a\x70\x26\xb3\xd5\x7a\x9d\xdd\x3e\x3d\x7e\xdf\x3c\xaf\xee\x1f\x37\xd9\x0b\xa6\xa4\x40\x14\x8a\x1d\x08\x24\xe1\x57\x03\x2c\x87\xec\xf6\xcb\xdd\xed\xd7\xec\xbf\xa1\xf9\x26\xfb\xf8\x7f\xf6\xf8\xb4\xc9\x7e\xae\x1e\xee\xd7\xd7\x91\x62\xb8\xae\x29\xc9\xf1\xf1\x0b\xc4\x45\x01\xa2\x93\xb3\x27\x6e\x3e\xa5\x51\xcc\x73\xde\x30\x65\xad\xcf\xb2\x27\xd2\xab\xf0\x2b\x2a\xa1\x97\xe9\x86\x69\xb6\x8f\xd7\x20\xce\x7b\x74\xfa\xf9\x4e\x64\x68\x4e\xb4\x94\x12\x00\xe5\x7b\x2c\x76\x50\x74\x4a\xba\x29\xcd\x92\x18\x1c\x86\x7b\xa6\x9b\x86\x22\x86\x0a\x2f\x4b\x10\xae\x73\xaf\xf4\x1d\x6a\x47\xde\x8d\xf1\xb1\xd5\x82\xe4\xc0\x3a\xb6\x76\xb4\x8c\xad\x30\xd8\x8a\x85\x6c\x06\x59\x3c\x17\xc5\x52\xa1\x8a\x17\xa4\x24\xd0\x85\x86\x3e\x22\x8c\xcd\x79\xcf\x47\x89\x46\x2a\x44\x09\x03\x87\xdc\x16\x53\xac\xf9\x64\x37\xf4\xfe\x7e\x90\xf4\xfd\x03\x44\x49\x45\xfa\x3b\x60\xd9\x97\xc9\x6c\x9b\x37\xc2\x76\x88\x12\xbc\x25\x94\x28\x02\xb2\x5f\x86\x3d\xb3\x4c\x4a\x02\xa5\x0e\xad\xb1\xa9\x65\x62\x69\xaf\x80\x16\x08\x0a\x97\xe2\xe9\xce\x22\xd3\xbb\x50\xc0\xbd\xa6\xf3\x9a\x7e\x86\x02\x8e\x36\x85\xd7\x48\x5c\x97\xa7\xd1\xa5\xcd\x3a\x4a\xdb\x47\xea\xbf\x90\x1f\x47\x05\xcf\x67\xe4\xbc\x57\x66\x4e\x9c\x95\xec\xbd\xcc\x5a\xdc\x37\x53\x95\x3d\x31\xe9\x38\xbc\x62\x29\xb3\xa2\x7f\x55\x5c\x61\x8a\x72\x4e\x58\xef\x7f\xba\x69\xb9\xc0\x31\xbd\xd6\x9c\x53\x3d\xdd\x9e\xc6\xcb\xa9\xb7\x58\x82\x9e\x64\xfb\x71\x22\x6a\x01\x12\xc4\x8b\x49\xdf\xd9\x96\x4b\x1c\xcb\x01\xf5\x8a\x24\x28\x24\xc9\x6f\xa3\xba\xd2\xcd\xcb\x85\x64\x93\xe7\x20\x65\xd9\x50\xf7\x1d\xf6\x7e\x93\xe0\x12\x60\x42\xc1\xe3\x42\xce\xf9\x69\x11\x01\xca\x12\x72\x95\x2e\xb4\xc1\x6b\x8d\xb0\x3c\x1d\x81\xc2\x2e\x5e\xd6\x54\x5d\x25\xde\xbb\x8e\x61\xf3\x6b\x74\x9f\x21\x49\x76\xcc\xb9\x71\x07\x20\xbb\x7d\xbf\x4d\xed\x68\x22\x71\x81\x15\xfe\x17\xf9\xb0\x5f\x70\xb2\x7a\x28\xc0\x98\xac\x4a\xf1\xeb\xa4\x2c\x51\xfc\x4a\x8b\xcf\x63\xce\xa2\xce\xf9\xf0\x78\x7d\xb7\xef\x2a\x43\xf3\x82\xc5\x1c\x7d\x40\x36\x5b\x60\x4a\x68\x3b\x36\xb0\x2e\xe0\xaf\xb0\x54\x20\x06\x9e\x61\x1a\x17\xb0\xab\xbd\x00\xb9\xe7\xb4\x40\x94\x1f\xfa\x8c\x68\x18\x93\xb0\x57\x50\x90\xa6\xb2\x05\x5a\x7b\x12\x8d\x3d\xd9\xed\x6d\x85\x93\xd5\x0e\x21\xfa\xcb\xcb\x9a\x1f\xd8\x84\xb7\x97\xf5\xf3\xd3\xb7\xe0\xe3\xcb\x84\x26\x7a\x9c\xc7\x2a\x0f\xe3\x99\x06\xef\x22\xd1\x44\x6d\x07\x1f\x8d\x1f\x94\x6f\xd1\x3c\xda\x93\x45\x34\x87\xf6\x22\x31\xfe\x04\xe1\xd8\xcb\xca\xfe\x73\x2f\xe0\xfc\xb0\x30\x17\x50\xcc\x05\xcc\xf8\x7e\x2c\xa2\x7a\xba\xfc\x71\x92\x36\x8f\xb9\x1b\xd1\x71\xd8\xb0\x61\x9f\x8b\xb7\x33\xdc\x5c\x86\x91\xd4\x35\x97\x62\xc2\x06\x0e\x7a\x4e\xcf\xc1\x21\xe6\xed\x56\x7d\x48\xf7\xe5\x77\x23\xdb\x48\x12\xea\x64\x27\x87\x24\x6f\x2d\xec\x3a\x01\x5f\x18\xf2\x63\xad\x0a\x39\x86\x64\x52\x18\x0a\xfc\xc7\x7b\x8f\x18\x03\xef\x9a\xc0\x18\x6c\xd7\xe5\x45\x63\xdb\x16\x2e\x06\x3f\x68\xce\x62\x28\x7c\x9d\x56\xd4\x56\x3a\x5a\xa7\x40\xab\x34\xdd\x3b\x86\xcd\x90\x23\x9d\x68\x9d\x4f\xa8\xd3\x19\x67\x38\xd7\x6e\xde\x66\x26\x3a\x1c\xf5\x15\xd3\x8c\x60\x1e\xc2\x04\x22\x71\x00\x1e\x0c\xc3\x01\xfc\xf8\xa2\x67\xfd\x80\x51\xf1\xcf\xc2\x9a\xc5\xfc\x2c\xa8\x51\xa8\xcf\x42\x1a\x45\x78\x24\xf2\x5c\x5d\x47\x82\x8f\x85\xf3\xf5\xc5\x9f\x00\x00\x00\xff\xff\x73\x20\xdd\xff\xa2\x1c\x00\x00") + +func migrations38_add_constraintsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations38_add_constraintsSql, + "migrations/38_add_constraints.sql", + ) +} + +func migrations38_add_constraintsSql() (*asset, error) { + bytes, err := migrations38_add_constraintsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/38_add_constraints.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x12, 0x99, 0x8d, 0xfe, 0xb4, 0x31, 0xeb, 0xc7, 0x56, 0x68, 0x95, 0x4f, 0xa3, 0x3e, 0x5d, 0x18, 0xf, 0x6d, 0x90, 0xc3, 0x6d, 0xf4, 0xd7, 0x75, 0x9a, 0xfb, 0x7e, 0x63, 0x18, 0xb5, 0xc5, 0xc}} + return a, nil +} + +var _migrations39_claimable_balancesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x92\x41\x4f\xbb\x40\x10\xc5\xef\x7c\x8a\x77\x84\xfc\x4b\xf2\x3f\x18\x2f\x4d\x4c\x5a\xd8\x28\x69\xa5\x0d\xd2\xa4\x3d\x6d\x16\x76\xc5\x31\xb0\x4b\xba\x6b\xd4\x6f\x6f\x0a\xad\xd5\x4a\xb5\x27\x12\xde\x9b\xdf\xce\xbc\x99\x30\xc4\xbf\x86\xaa\xad\x70\x0a\xab\xd6\xf3\xa2\x8c\x4d\x72\x86\x7c\x32\x9d\x33\x94\xb5\xa0\x46\x14\xb5\xe2\x85\xa8\x85\x2e\x95\x85\xef\x01\x00\x49\xe4\x6c\x9d\x23\x5d\xe4\x48\x57\xf3\xf9\x08\x61\x88\xe8\xe0\x9e\xf6\xe6\x24\x06\x69\x14\xc2\xaa\xeb\xab\xae\xaa\xc7\x69\x67\xf1\x6c\x8d\x2e\x8e\xd5\x9d\x2a\xac\x55\x0e\x4e\xbd\xb9\x53\xa1\x31\x2f\xda\xa1\xa0\x8a\xf4\x51\x43\x74\xc7\xa2\x19\xfc\xbd\x7a\x83\xff\x41\x6f\xb7\xad\xd1\xd6\x6c\xbb\x06\xfb\x3f\xb5\xb0\x8e\x37\x46\xd2\x23\x29\xc9\x6b\x25\x2b\xb5\x05\x69\xa7\x76\xdf\xef\x6f\x2d\xb3\xe4\x7e\x92\x6d\x30\x63\x1b\xf8\x24\x03\x2f\x18\x7f\x66\x92\xa4\x31\x5b\x0f\x64\xc2\x8b\x77\xde\x37\xbf\x48\x87\x22\x5b\x3d\x24\xe9\x2d\xa6\x79\xc6\x98\xdf\xf9\x82\xf1\x25\xc8\xc3\x1c\x7f\x43\xf7\xce\x33\xd8\x1f\xe0\xe3\x1a\x7e\x43\x57\xa4\xfd\x93\x85\xf1\x56\xb8\x27\x6e\x5a\x7b\xd9\x00\x43\xb1\x73\xa1\x25\x27\x79\xc1\x50\x43\xd5\x23\x90\xdc\x2d\xe4\xeb\xd1\xc6\xe6\x55\x7b\x5e\x9c\x2d\x96\xe7\x8f\xb6\x14\xb6\x14\x52\x8d\xbd\x8f\x00\x00\x00\xff\xff\x76\x45\x17\x78\xee\x02\x00\x00") + +func migrations39_claimable_balancesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations39_claimable_balancesSql, + "migrations/39_claimable_balances.sql", + ) +} + +func migrations39_claimable_balancesSql() (*asset, error) { + bytes, err := migrations39_claimable_balancesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/39_claimable_balances.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x58, 0xfd, 0xd0, 0xdd, 0xbf, 0xf, 0xb8, 0x78, 0xe3, 0x2, 0xe6, 0xa8, 0x94, 0x11, 0x15, 0x24, 0x35, 0xe0, 0x9c, 0xc9, 0x34, 0x36, 0xa3, 0x38, 0xdb, 0xb7, 0x86, 0xd0, 0xf2, 0xbe, 0x77, 0x49}} + return a, nil +} + +var _migrations39_history_trades_indicesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\xce\xb1\x0a\xc2\x40\x0c\x80\xe1\x3d\x4f\x11\x3a\x29\xda\x27\xe8\xa4\xf6\x90\x2e\xad\xd4\x16\xdc\x8e\xd3\x0b\x36\x83\xcd\x91\x8b\x88\x6f\x2f\x08\x42\x07\xd7\x1f\x7e\xf8\xca\x12\x37\x0f\xbe\x6b\x30\xc2\x31\x01\x1c\x7a\xb7\x1b\x1c\x36\x6d\xed\x2e\x38\x99\x46\x9f\x02\xab\x4f\x1c\xb1\x6b\x71\xe2\x6c\xa2\x6f\x6f\x1a\x22\x65\x1c\xcf\x4d\x7b\xc4\xfd\xd0\x3b\xb7\xba\x86\x4c\x3e\xe4\x4c\xe6\x39\x6e\xf1\x26\xcf\xd9\x48\x17\xe5\xf7\x4a\x22\x0d\xc6\x32\x7f\x6b\x21\x1a\x49\x8b\x75\x05\xb0\xa4\xd4\xf2\x9a\x01\xea\xbe\x3b\xfd\xa3\x54\xf0\x09\x00\x00\xff\xff\xb2\xf0\x3e\xfe\xb7\x00\x00\x00") + +func migrations39_history_trades_indicesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations39_history_trades_indicesSql, + "migrations/39_history_trades_indices.sql", + ) +} + +func migrations39_history_trades_indicesSql() (*asset, error) { + bytes, err := migrations39_history_trades_indicesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/39_history_trades_indices.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x50, 0x44, 0xfd, 0xa3, 0xc4, 0xc9, 0x68, 0x24, 0xb5, 0xf7, 0xea, 0xf3, 0x46, 0xb9, 0x2, 0xc1, 0x2d, 0xe7, 0xce, 0xd3, 0x42, 0x93, 0x2e, 0x87, 0x22, 0xca, 0xad, 0x97, 0xa9, 0xa, 0x6a, 0xe3}} + return a, nil +} + +var _migrations3_use_sequence_in_history_accountsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x91\x4d\x6b\xb3\x40\x14\x85\xf7\xf3\x2b\xce\x2e\xca\xfb\x66\x91\x6d\x5c\x4d\xc6\x1b\x22\x8c\x63\x3b\x5e\xdb\x64\x25\xa2\x43\x3a\x90\x6a\xeb\xd8\xaf\x7f\x5f\x48\xd3\x0f\x08\x6d\xa1\xcb\x73\x78\xe0\x39\xdc\x3b\x9f\xe3\xdf\xad\xdf\x8f\xcd\xe4\x50\xdd\x09\x65\x49\x32\xa1\xa4\xcb\x8a\x8c\x22\xdc\xf8\x30\x0d\xe3\x4b\xdd\xb4\xed\xf0\xd0\x4f\xa1\xf6\x5d\x1d\xdc\xbd\x00\x80\x92\xa5\x65\x5c\x67\xbc\xc1\xe2\x58\x64\x46\x59\xca\xc9\x30\x56\xbb\x53\x65\x0a\xe4\x99\xb9\x92\xba\xa2\x8f\x2c\xb7\x9f\x59\x49\xb5\x21\x2c\x12\x51\x92\x26\xc5\x08\x6e\x7a\x6c\x0e\xd1\xec\x1b\xef\xec\x3f\xa2\x13\x99\xcb\x6d\xe4\xbb\x18\x6b\x5b\xe4\x67\x33\xe3\x38\x11\x52\x33\x59\xb0\x5c\x69\x42\x61\xf4\xee\x0c\xc2\x1b\xa1\x0a\x5d\xe5\x06\xbe\x43\x49\x8c\x94\xd6\xb2\xd2\x8c\xde\x3d\xff\xbc\x64\xb9\x1c\xdd\xbe\x3d\x34\x21\xc4\x89\x10\x5f\xcf\x98\x0e\x4f\xfd\x1f\xec\xa9\x2d\x2e\xde\xf5\x89\x38\xa6\xdf\xde\x90\x88\xd7\x00\x00\x00\xff\xff\x55\xe2\xdd\x2c\xbf\x01\x00\x00") + +func migrations3_use_sequence_in_history_accountsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations3_use_sequence_in_history_accountsSql, + "migrations/3_use_sequence_in_history_accounts.sql", + ) +} + +func migrations3_use_sequence_in_history_accountsSql() (*asset, error) { + bytes, err := migrations3_use_sequence_in_history_accountsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/3_use_sequence_in_history_accounts.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x5, 0xcc, 0xcc, 0x51, 0xec, 0x12, 0x8, 0x85, 0x4d, 0x85, 0x25, 0x32, 0x18, 0x23, 0x54, 0xc5, 0x10, 0x42, 0x5b, 0x51, 0x28, 0xe4, 0x9, 0xa2, 0x56, 0xdc, 0xb5, 0xf8, 0x41, 0x1b, 0x28, 0x8}} + return a, nil +} + +var _migrations40_fix_inner_tx_max_fee_constraintSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x8f\xc1\x8b\x82\x40\x1c\x85\xef\xf3\x57\xbc\xe3\x2e\xcb\x80\x77\x71\x61\xd6\x11\x56\x12\x0d\x9b\xba\xca\x64\x63\x0e\xe4\x4f\x19\x27\xb3\xff\xbe\x4b\x41\x1d\xa2\x43\x1d\x1f\x3c\xf8\xbe\x8f\x73\xfc\x74\x76\xef\xb4\x37\x58\x0f\x8c\x89\x4c\x25\x25\x94\xf8\xcb\x12\xb4\x76\xf4\xbd\x3b\x57\xde\x69\x1a\x75\xed\x6d\x4f\x23\x64\x59\x2c\x11\x17\xf9\x4a\x95\x22\xcd\x15\x26\x7d\xb0\xbb\xaa\xd3\x73\xd5\x18\x13\x32\xce\x91\x12\x19\x07\x3f\xc3\x12\x1a\x63\xb0\x3d\x76\x03\x6a\x4d\x68\xf5\x64\x70\x7d\x46\xc1\x6b\x94\x90\xf2\x29\x09\xf1\x7f\x12\x2f\xf0\x75\x9b\xbf\x11\x82\x6f\xe4\x85\xc2\x46\x64\xa9\x0c\x19\xbb\x2f\x93\xfd\x89\xde\x6f\xfb\xac\xf0\xa3\xef\x25\x00\x00\xff\xff\x59\x2c\x3c\x0b\x88\x01\x00\x00") + +func migrations40_fix_inner_tx_max_fee_constraintSqlBytes() ([]byte, error) { + return bindataRead( + _migrations40_fix_inner_tx_max_fee_constraintSql, + "migrations/40_fix_inner_tx_max_fee_constraint.sql", + ) +} + +func migrations40_fix_inner_tx_max_fee_constraintSql() (*asset, error) { + bytes, err := migrations40_fix_inner_tx_max_fee_constraintSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/40_fix_inner_tx_max_fee_constraint.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9d, 0x58, 0xe3, 0x4c, 0x97, 0xa8, 0x17, 0x34, 0xca, 0x20, 0x47, 0x8c, 0x78, 0x9b, 0x81, 0x52, 0x3d, 0xde, 0xc8, 0xf1, 0x13, 0x7a, 0x46, 0xd1, 0xa0, 0x38, 0x43, 0x63, 0xcc, 0xc6, 0xd, 0xa5}} + return a, nil +} + +var _migrations41_add_sponsor_to_state_tablesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\xd1\x4a\x87\x30\x14\xc6\xef\x7d\x8a\x73\x59\xc4\xff\x09\xbc\x9a\xed\x10\x82\x68\xac\x09\xde\x8d\x65\x2a\x42\x6d\xb2\x4d\xa2\xb7\x8f\x70\xab\x25\xad\xd6\xf5\x7e\xe7\xfc\xf6\x1d\xbe\xcb\x05\x6e\x5e\xd6\xc5\x48\x37\x41\xbf\x15\x05\x69\x38\x32\xe0\xa4\x6a\x10\xe4\x38\xea\x5d\x39\x0b\x84\x52\xb0\x9b\x56\x56\x1b\xe0\x38\xf0\xb2\xb8\x65\x48\x38\x42\xdd\x52\x1c\x3e\x39\xf1\xf8\x26\x02\xd6\xb5\x5f\xe3\xfd\x43\xdd\xde\x41\xc5\x19\xe2\x95\x7f\xbf\x2e\x7f\x56\x89\x27\xe9\x64\xb6\xef\x03\x4e\x48\x8f\x45\xff\x31\xdb\x75\x51\x93\xc9\x0f\xeb\xf9\x94\x3f\xac\xcb\xf8\x82\x33\xbb\x75\xe2\x79\x55\xd3\x9f\xf6\x08\x3d\x89\xe3\x25\x19\x4e\x3d\xcf\x19\x61\x0f\xea\x64\xf2\xa3\x09\x49\x5c\x28\xaa\x5f\x55\xa2\x52\x94\x75\xf7\xc1\x5c\xfe\x52\x85\x0c\x2e\x5c\x3a\x8d\xc6\xb7\x49\x53\x3e\xd7\x77\xe0\x3d\x00\x00\xff\xff\xb5\xbf\x3f\xcc\x20\x03\x00\x00") + +func migrations41_add_sponsor_to_state_tablesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations41_add_sponsor_to_state_tablesSql, + "migrations/41_add_sponsor_to_state_tables.sql", + ) +} + +func migrations41_add_sponsor_to_state_tablesSql() (*asset, error) { + bytes, err := migrations41_add_sponsor_to_state_tablesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/41_add_sponsor_to_state_tables.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf8, 0xb2, 0xc6, 0x2e, 0x87, 0x75, 0x3, 0x0, 0x9c, 0x98, 0xc2, 0x26, 0x7, 0x99, 0x48, 0x2c, 0xcd, 0x44, 0xc5, 0x57, 0x0, 0x5c, 0xfe, 0xb9, 0x9, 0x1e, 0x2b, 0x55, 0xb5, 0xaa, 0x5, 0xab}} + return a, nil +} + +var _migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xce\xb1\x0a\xc2\x30\x14\x85\xe1\xfd\x3e\xc5\x19\x15\x5b\xe8\x5e\x14\x62\x13\x11\x8c\x56\x4a\x3b\x4b\xa9\x21\x64\xe8\x4d\x69\x52\x7c\x7d\xd1\x49\xc1\x80\xf3\x39\xfc\x7c\x79\x8e\xcd\xe8\xec\xdc\x47\x83\x6e\x22\x12\xba\x55\x0d\x5a\xb1\xd7\x0a\xfd\x30\xf8\x85\x63\x20\x21\x25\xaa\x5a\x77\xe7\x0b\x78\x19\x6f\x61\xf2\x1c\xfc\x6c\xee\x70\x1c\x8d\x35\x33\xa4\x3a\x88\x4e\xb7\x28\x50\x1d\x55\x75\xc2\xea\xfb\xb6\xdb\xa2\x58\x67\x89\x8c\x63\xfb\x57\xe7\xf5\x7b\x87\x4a\xa2\x4f\xb5\xf4\x0f\x4e\xb8\x65\x53\x5f\x7f\xc2\xb3\xd4\xe4\xd8\x96\xf4\x0c\x00\x00\xff\xff\xb6\x54\xe4\xeb\x14\x01\x00\x00") + +func migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSql, + "migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql", + ) +} + +func migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSql() (*asset, error) { + bytes, err := migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x15, 0x84, 0x5c, 0x24, 0x15, 0x82, 0x45, 0x31, 0xe7, 0x89, 0x90, 0xd9, 0x7f, 0xb6, 0xf8, 0xc3, 0x21, 0xc9, 0xd3, 0xa9, 0x11, 0xee, 0x6d, 0x7a, 0x19, 0xcc, 0xba, 0xd7, 0x20, 0xbd, 0xdc}} + return a, nil +} + +var _migrations43_add_claimable_balances_flagsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\xe2\xe2\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x48\xce\x49\xcc\xcc\x4d\x4c\xca\x49\x8d\x4f\x4a\xcc\x49\xcc\x4b\x4e\x2d\x56\x70\x74\x71\x51\x48\xcb\x49\x4c\x2f\x56\xc8\xcc\x2b\x51\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\x51\x70\x71\x75\x73\x0c\xf5\x09\x51\x30\xb0\xe6\xe2\x42\x36\xd2\x25\xbf\x3c\x8f\xa0\x99\x2e\x41\xfe\x01\x10\x43\xad\xb9\xb8\x00\x01\x00\x00\xff\xff\x88\xef\xa8\x21\x91\x00\x00\x00") + +func migrations43_add_claimable_balances_flagsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations43_add_claimable_balances_flagsSql, + "migrations/43_add_claimable_balances_flags.sql", + ) +} + +func migrations43_add_claimable_balances_flagsSql() (*asset, error) { + bytes, err := migrations43_add_claimable_balances_flagsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/43_add_claimable_balances_flags.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x91, 0x91, 0xc9, 0xa3, 0x4d, 0x66, 0x59, 0x6d, 0xbe, 0x30, 0xcb, 0x9d, 0x9b, 0x73, 0x1f, 0xc5, 0x98, 0xda, 0x39, 0x5c, 0x24, 0x99, 0x93, 0xa9, 0xd9, 0x47, 0xd3, 0x31, 0xf, 0x28, 0xc8, 0x12}} + return a, nil +} + +var _migrations44_asset_stat_accounts_and_balancesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\x4f\x4f\xc2\x30\x18\xc6\xef\xef\xa7\x78\x6e\x48\x2c\x9f\x60\xf1\x30\xec\x2e\xa6\x6e\x84\x75\xe7\xe6\x5d\x69\x74\x84\xb5\x84\xb6\xd1\xf8\xe9\x8d\x44\x70\x60\xa2\x5c\xfb\xfc\xf9\xf5\x79\x17\x0b\xdc\x8f\xc3\xcb\x81\x93\x43\xb7\x27\x2a\x95\xae\xd6\xd0\xe5\x52\x55\x70\xef\x7b\xc3\x31\xba\x64\x62\xe2\x14\xa9\x94\x12\x8f\x8d\xea\x9e\x6b\xb0\xb5\x21\xfb\x14\xf1\xd4\x36\xf5\x52\x4c\xa5\x9e\x77\xec\xad\xfb\x96\x0a\xea\x56\xb2\xd4\xbf\xcb\x80\xb6\xd2\x04\xe0\xa7\xeb\x01\xdb\x18\x7c\x6f\xfa\x3c\xec\x36\x26\xf4\x5b\x67\xd3\xdd\x8c\x73\x7a\x0d\x87\xe1\xc3\x6d\x66\x02\x3e\x8f\xe6\xe4\x9f\x8b\x63\xfc\xcc\xbb\x21\xce\xe3\x57\x72\x5e\xfc\xb3\xf3\xa8\x5d\x2f\x6d\x2b\x8d\xba\xd1\xa8\x3b\xa5\xc4\xa5\xe7\xfc\x85\xa9\xa7\x20\x9a\x1e\x57\x86\x37\xff\x37\x56\xae\x9b\xd5\x35\x55\x5c\xbc\x9e\x38\x05\x7d\x06\x00\x00\xff\xff\x9e\x44\x07\x8e\xb7\x01\x00\x00") + +func migrations44_asset_stat_accounts_and_balancesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations44_asset_stat_accounts_and_balancesSql, + "migrations/44_asset_stat_accounts_and_balances.sql", + ) +} + +func migrations44_asset_stat_accounts_and_balancesSql() (*asset, error) { + bytes, err := migrations44_asset_stat_accounts_and_balancesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/44_asset_stat_accounts_and_balances.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb0, 0x3b, 0xce, 0x82, 0x13, 0x91, 0xc0, 0xc2, 0x93, 0xb1, 0x17, 0xbc, 0x57, 0x34, 0xdd, 0x98, 0xc8, 0x36, 0xee, 0x29, 0xd, 0x1e, 0x69, 0xb, 0x3f, 0x31, 0xbc, 0x41, 0xd4, 0x7d, 0xa5, 0xfd}} + return a, nil +} + +var _migrations45_add_claimable_balances_historySql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x95\x41\xb3\x9a\x30\x14\x85\xf7\xfc\x8a\x3b\x6e\x94\xa9\x2e\xdc\xca\x0a\x21\xad\xcc\x60\xa8\x98\xb4\x76\xc5\x04\xc8\xd8\xcc\x20\x58\xc8\xb4\xf6\xdf\x77\xea\x53\x11\x8d\x01\x7c\xce\x5b\x92\xb9\xb9\xe7\x9c\xfb\x85\x64\x32\x81\x4f\x3b\xb1\x2d\x99\xe4\x40\xf7\x86\xe1\x84\xc8\x26\x08\xd6\x68\x45\x11\x76\x10\xfc\x14\x95\x2c\xca\xbf\x51\x92\x31\xb1\x63\x71\xc6\xa3\x98\x65\x2c\x4f\x78\x15\x89\x34\xaa\xf8\x2f\x03\x00\x60\x4d\xec\x90\xc0\x77\x8f\x2c\x60\x7a\x5c\xf0\xb0\x13\xa2\x25\xc2\x04\xe6\x3f\x4e\x4b\x38\x80\xa5\x87\xbf\xd9\x3e\x45\x97\x6f\x7b\x53\x7f\x3b\xb6\xb3\x40\x30\xb5\x2e\x1e\x88\x3d\xf7\x75\x06\x60\x74\xdc\x27\x52\x88\xc5\x56\xe4\x12\x70\x40\x00\x53\xdf\x07\x17\x7d\xb6\xa9\x4f\x20\xe7\x07\xf9\x9b\x65\xa3\x61\x6b\x8a\xe1\x6c\x56\xf2\x6d\x92\xb1\xaa\x32\xc7\xc7\xb6\x77\xa5\x91\x48\x41\xf2\x43\x2d\x63\x98\xb5\x57\x8a\xbd\x15\x45\xe0\x61\x17\x6d\x60\x20\xf2\x94\x1f\x22\x8d\x68\x91\x47\x22\x1d\x40\x80\x75\xf1\xe8\xda\xc3\x5f\x20\x96\x25\xe7\x30\x12\xa9\x69\x3d\x2f\xa6\x0a\xd3\x4b\x5e\xd5\xc0\x7c\x84\xaa\xd8\xf3\x92\x49\xa1\xd2\x3d\x43\xbb\x2f\xbd\xc7\x38\x6e\x54\x2a\x79\xdc\xec\xe8\x4a\x44\x67\xf0\x8d\x4d\xd5\x98\x8e\x36\x50\x63\x4e\xca\x5c\x63\x6d\x88\x9a\xeb\x53\x5e\xaf\x95\x5e\x68\xfa\x21\x5c\x59\xb2\xbc\x62\x49\x37\xbc\xd7\xc5\x1f\x09\x58\x6f\x52\x85\xb8\x25\x96\x72\x5e\x37\xe9\xde\x85\xb9\xdd\x71\x53\xed\xc5\xe6\xff\xcf\xf5\xfa\x29\x70\x8b\x3f\xb9\x61\xb8\x61\xf0\xb5\xe7\x9d\x66\xf5\xde\xa4\xbc\x9b\xac\x93\x78\xdb\x1b\x70\xae\xeb\xfe\x5e\x59\xba\x58\x5d\x2e\x06\x5d\xc2\x5e\x3f\xab\x3a\xa3\xae\x85\xd6\x7b\xb7\x33\xaf\x73\xdf\xfb\x0c\xaa\x13\xe8\xdb\x58\xc6\xbf\x00\x00\x00\xff\xff\xc3\x12\xac\xa0\x73\x08\x00\x00") + +func migrations45_add_claimable_balances_historySqlBytes() ([]byte, error) { + return bindataRead( + _migrations45_add_claimable_balances_historySql, + "migrations/45_add_claimable_balances_history.sql", + ) +} + +func migrations45_add_claimable_balances_historySql() (*asset, error) { + bytes, err := migrations45_add_claimable_balances_historySqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/45_add_claimable_balances_history.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0xa0, 0x1c, 0x80, 0x4f, 0x5b, 0xf9, 0xa6, 0x5d, 0xfe, 0x14, 0xd4, 0xa9, 0xaa, 0xf3, 0x89, 0x10, 0x6e, 0x23, 0x33, 0x86, 0xa3, 0xab, 0x1a, 0x71, 0x69, 0xd4, 0x53, 0x2c, 0x4a, 0x1, 0xe4}} + return a, nil +} + +var _migrations46_add_muxed_accountsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x90\xbf\xce\x82\x40\x10\xc4\xfb\x7b\x8a\x2d\xbf\x2f\x42\x6b\x62\xa8\x30\x47\x77\x51\x43\xa0\x26\x9b\x63\x11\x0a\xee\xc8\xde\xe1\x9f\xb7\x37\x01\x13\x31\x22\x1a\xdb\xcd\xcc\xce\xfc\x26\x0c\x61\xd5\x36\x47\x46\x4f\x90\x77\x42\xc4\x2a\x4b\x52\xc8\xe2\xad\x4a\xa0\x6e\x9c\xb7\x7c\x2d\x3c\xa3\x71\xa8\x7d\x63\x8d\x83\x58\x4a\x40\xad\x6d\x6f\x7c\xd1\xf6\x17\x2a\xe1\x84\xac\x6b\xe4\xbf\xf5\xe6\x1f\x76\xb9\x52\xc1\xa0\xa9\x88\x8a\x65\x5d\x34\x1b\x66\x3b\x62\x7c\x44\x39\xdb\xb3\xfe\xed\x13\x55\x15\x69\x7f\x6f\x5c\x96\x4c\xce\xbd\xf5\x8b\xe9\x0e\xd2\x9e\xcd\x17\x4b\xc8\x74\x7f\x78\x9e\x22\x18\x6f\x2f\xe8\x1f\x49\x07\xdb\x1c\xea\x32\xd9\xd8\x60\x8a\x16\x89\x5b\x00\x00\x00\xff\xff\x1e\x83\x01\x2a\xd1\x01\x00\x00") + +func migrations46_add_muxed_accountsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations46_add_muxed_accountsSql, + "migrations/46_add_muxed_accounts.sql", + ) +} + +func migrations46_add_muxed_accountsSql() (*asset, error) { + bytes, err := migrations46_add_muxed_accountsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/46_add_muxed_accounts.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf2, 0x2e, 0xec, 0xdc, 0x13, 0x2a, 0x94, 0xfa, 0xba, 0x5c, 0xe7, 0x2d, 0x10, 0x39, 0x2b, 0xf3, 0x28, 0xd, 0x46, 0x7f, 0xf, 0xc3, 0x4c, 0xd1, 0x43, 0xff, 0x81, 0xe1, 0x68, 0xa, 0xef, 0xd1}} + return a, nil +} + +var _migrations47_precompute_trade_aggregationsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x54\xc1\x6e\xda\x40\x10\xbd\xef\x57\xbc\x03\x52\xec\x14\x24\x72\xe9\x01\x4e\x0e\x38\x08\x95\x98\xc8\x60\xa9\x39\x59\x8b\x3d\x35\xab\xd8\xbb\xee\x7a\x1c\xda\x7e\x7d\x05\x98\x90\x80\x69\x22\x35\x17\x1f\x3c\x6f\xde\xcc\xbc\x7d\x33\xbd\x1e\xbe\x14\x2a\xb3\x92\x09\x51\x09\x6d\xd8\x4a\x5d\xc9\x84\x95\xd1\x42\xf4\x7a\x18\x59\xda\xc6\x78\x4d\xd0\xb4\x01\xcb\x55\x4e\x62\x14\xfa\xde\xd2\xc7\xd2\xbb\x9d\xf9\x58\xab\x8a\x8d\xfd\x1d\xb3\x95\x29\x55\xf1\xd7\x7e\xbf\xdf\x87\x23\x00\x56\x05\x55\x2c\x8b\x12\x2b\x95\x29\xcd\x5b\x76\xe8\x3a\xcf\xbb\x02\x58\xc9\x8a\x62\x59\x55\xc4\xb1\x4a\xdb\x00\x89\xa9\x35\x93\x7d\x1f\x03\xa5\x99\x32\xb2\xe7\xf4\xcf\x26\xaf\x0b\x82\xae\x0b\xb2\x2a\x69\x65\xff\x07\x44\x3e\x67\xad\xff\xd7\x2a\x5b\xc7\xfa\x72\x28\x6d\x0d\xe5\x66\x73\x21\x69\x1b\x69\xcf\x31\x25\xe9\x38\xa7\x34\x23\x1b\xb3\x69\x97\x60\x87\x69\x27\xde\x85\xda\x99\x93\xdc\x54\xf4\x1e\xf5\x1e\xd4\xce\xbd\x8f\xb5\x91\x0b\xe0\x21\x9c\xde\x7b\xe1\x23\xbe\xf9\x8f\xce\x9b\x77\xee\x9e\xbd\x6a\xf7\xe8\x12\x57\xb8\xc3\x9d\xe5\x5e\x0c\xb9\x60\xc9\x54\x90\xe6\x5b\xca\x94\x3e\xb8\x6e\x1e\x22\xf4\x1f\x66\xde\xc8\xc7\x5d\x14\x8c\x96\xd3\x79\x00\x36\x71\xa1\xf2\x5c\x55\x0e\xbf\xb2\xdd\x46\xf1\xda\xd4\xfb\x3f\xf8\x63\x34\x75\xc1\xb6\x3e\x0e\x34\xf6\xef\xbc\x68\xb6\xc4\x8d\x2b\x80\xd0\x5f\x46\x61\xb0\x38\x08\xe1\x2d\xd0\xe9\x08\xe0\xd6\x9f\x4c\x03\x01\x1c\x00\x48\xd5\xb3\x93\xc8\x8a\x1d\x87\x7e\xb1\x95\x09\x3b\x54\x9a\x64\x8d\x1f\xd6\x14\x60\x17\xd7\xb8\xd9\x2e\x80\x0b\x59\x35\x5c\xee\xbe\xac\x7b\xbd\xfd\x0e\x05\xe0\x07\xe3\xa1\xe8\x74\x30\xf3\x82\x49\xe4\x4d\x7c\x94\x79\x99\x55\x3f\x73\x4c\xef\xef\xa3\xdd\x52\x0d\xdb\x75\xf0\x75\xfa\xf9\x0a\x7d\xae\x3c\xaf\xea\x0c\x06\x2f\x85\x1a\x05\xfe\x7f\xf8\x66\xc2\x69\x30\xf6\xbf\x63\x34\x0f\x46\x51\x18\xfa\xc1\x72\xf6\x88\x35\xdb\x34\x96\x59\x16\xaf\xea\xe4\x89\x38\xce\x8d\x79\xaa\x4b\xcc\x83\x93\x03\x25\x80\x68\x31\x0d\x26\x58\xb1\x25\x82\x73\x6c\xb8\x59\x87\x9d\xb5\xd3\x58\x72\x17\x57\xbb\x63\x76\x35\x18\x34\x92\xb8\x5b\x87\x7e\xa0\x85\xb3\xc5\x3d\xeb\xa2\x39\x93\x6f\x3a\x39\xcd\x3a\x5d\x87\xb1\xd9\x68\x21\xc6\xe1\xfc\xa1\x29\x7e\xb1\xde\xb0\x15\xf5\x46\x98\x06\x72\xf9\x84\x37\x80\x36\x03\x5d\xb4\xcf\x41\xa6\x8f\xe6\x9e\x2c\xe7\x31\xfd\x6f\x00\x00\x00\xff\xff\x80\x13\x6d\x32\x97\x06\x00\x00") + +func migrations47_precompute_trade_aggregationsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations47_precompute_trade_aggregationsSql, + "migrations/47_precompute_trade_aggregations.sql", + ) +} + +func migrations47_precompute_trade_aggregationsSql() (*asset, error) { + bytes, err := migrations47_precompute_trade_aggregationsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/47_precompute_trade_aggregations.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd, 0x28, 0x13, 0x8, 0x93, 0xfc, 0x96, 0xf2, 0x6c, 0xab, 0x2, 0x54, 0x19, 0x7b, 0xdb, 0x48, 0x0, 0x21, 0xa, 0xef, 0x6d, 0xf0, 0xd1, 0x9f, 0xee, 0x8d, 0x91, 0xd8, 0x65, 0xe1, 0x1d, 0xd5}} + return a, nil +} + +var _migrations48_rebuild_trade_aggregationsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x94\x5f\x6f\xda\x30\x14\xc5\xdf\xfd\x29\xae\xfa\x94\x74\xd0\x31\x1e\x26\xad\x7d\xa2\x6d\xb6\x55\xea\x60\x0a\x41\x53\x55\x4d\x96\x49\x2e\xc4\xaa\xff\x20\xfb\xa6\xb4\xdf\x7e\xc2\x21\x09\xd0\xac\x7d\x41\x8e\xfd\xf3\xb9\xbe\x87\x63\x0f\x87\xf0\x49\xcb\xb5\x13\x84\xb0\xd8\x30\x36\x1c\xc2\xb5\xc8\x9f\x56\x52\x29\xa0\x12\x81\xc4\x52\x21\x6c\x25\x95\x80\x2f\xd2\x93\x34\x6b\x28\x04\x89\x0b\xc8\x4a\xe9\x81\xc4\x13\x7a\x10\x4b\x5b\x11\x7c\x03\x2d\x4d\x45\xe8\x2f\xd8\x9f\xbb\xec\x27\x90\x13\x05\x7a\x98\xcc\x21\x62\x00\xf3\xe4\x3e\xb9\xc9\x18\x00\x00\x59\xae\xa5\x52\xd2\x47\x0a\x8b\x35\x3a\x9e\x2b\xeb\xb1\xe0\x82\x06\xf0\x75\x34\x1a\x8d\x62\x10\x1e\x48\x6a\xf4\x24\xf4\x66\x10\x36\x95\xd2\x93\x75\xaf\xdc\x6e\xd0\x09\x92\xd6\x70\x59\xd4\x2b\x67\xd6\x15\xe8\xce\xea\x8f\xa5\xf0\xc8\x85\xf7\x48\xed\x7a\x3d\xa5\x6d\x65\xa8\x9e\xc8\x77\x43\x74\x27\x58\x3b\x7b\x40\x4e\xd2\x74\xf2\xf0\xb8\x71\x32\x47\x6e\x06\x50\x0f\x8a\xbf\xbb\xe3\x85\x31\x03\xf8\x9e\xce\x7e\xb5\x87\xab\x5b\x66\x00\xb3\xf4\x36\x49\xe1\xfa\xe1\xe4\x3c\x6f\x4b\xf7\xf7\xd5\xf4\xc4\xe2\x01\x38\x5c\x56\x52\xd1\xae\xe6\xa9\x8f\xc7\x0e\xf5\xb4\xfe\x4e\xa7\xd1\x79\x70\x39\xef\x9a\xf5\x95\x8e\x0e\xbc\x0a\xcb\xe1\xfb\xd9\xaa\x4a\x63\x07\x1d\x3b\xd5\xc9\xa0\xfb\x00\xbd\xbc\x34\x95\x46\x27\xf3\xf8\xf3\x49\xb1\x6e\x65\xa7\x26\x9e\xd7\xb5\x44\xa4\xc5\x0b\x0f\x4e\x47\xe1\x37\x8e\x1f\xbf\x04\xf7\x4b\xb9\x2e\xb9\xf9\x2f\x34\xee\xa0\xa2\x81\xa4\xe9\x55\x52\x76\xdb\x09\xbd\x61\xc6\x2d\xb3\xd7\x59\x49\xe7\x29\xea\xfb\xd3\xc2\xc9\xed\x06\x0d\xdf\xa7\x9a\x6c\x63\x78\x54\xef\x3a\x2e\x1c\x50\xd3\x0b\x8c\x3b\x60\xaf\xa0\xc4\x7b\x55\xc3\xfd\xe9\x29\x1b\x76\x1d\x57\xad\x51\xd3\x07\x8c\x0f\x80\xa2\x49\x76\x9b\xe8\x1f\xe9\x6c\xf1\x1b\x96\xaf\x1f\x27\xba\x8d\x25\x8b\x19\xc0\xdd\x74\x9e\xa4\x19\xdc\x4d\xb3\xd9\xc9\x35\xe1\xe1\xae\x87\x50\x37\xb1\x86\x73\x58\x39\xab\x9b\xcc\x33\x80\xf8\x2a\xbc\x49\xed\x1b\x75\x6b\xb7\x86\xb1\x2c\x5d\x4c\x6f\x26\x59\x02\xd9\xe4\xfa\x3e\xe9\xd5\xbd\x62\xff\x02\x00\x00\xff\xff\x0e\xad\x7c\x8e\xdb\x04\x00\x00") + +func migrations48_rebuild_trade_aggregationsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations48_rebuild_trade_aggregationsSql, + "migrations/48_rebuild_trade_aggregations.sql", + ) +} + +func migrations48_rebuild_trade_aggregationsSql() (*asset, error) { + bytes, err := migrations48_rebuild_trade_aggregationsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/48_rebuild_trade_aggregations.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xed, 0xea, 0xa6, 0x74, 0x5b, 0x14, 0xa9, 0x38, 0x4d, 0x4f, 0x90, 0xe7, 0xeb, 0x65, 0x8b, 0xe3, 0xbf, 0x89, 0x12, 0x2f, 0x4b, 0x35, 0xa5, 0x6e, 0x9f, 0xb1, 0x60, 0x92, 0xc7, 0x7, 0x39, 0x7d}} + return a, nil +} + +var _migrations49_add_brin_index_trade_aggregationsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\xce\xb1\xaa\xc2\x30\x14\xc6\xf1\xfd\x3c\xc5\x19\xef\x45\x0a\x99\x5c\x3a\x49\x9b\x4a\x40\x12\x49\x53\xa8\x53\x88\xb6\xb4\x19\x92\x94\xe4\x80\xf8\xf6\x22\x88\xb8\xb9\x7f\x1f\xbf\x7f\x55\xe1\x2e\xf8\x25\x3b\x9a\x71\xd8\x30\x26\xca\x2e\x16\x77\x23\x9f\x22\x40\xa3\xf9\xc1\x70\x14\xb2\xe5\x23\x36\x4a\x36\x83\xd6\x5c\x9a\xd3\x05\x45\x87\x52\x19\xe4\xa3\xe8\x4d\x8f\x2b\xe5\xc9\xba\x65\xb1\xe4\xc3\x5c\xc8\x85\xcd\x5e\xb3\x8f\xa8\x24\xae\xbe\x50\xca\x0f\x4b\xd9\x4d\x73\xb1\x7b\xc6\x18\xc3\xa1\x17\xf2\x88\xaf\xc9\xdf\xe7\xf1\x5f\x03\x7c\xd7\xb4\xe9\x1e\x01\x5a\xad\xce\x6f\x5f\x74\x3f\xb8\x1a\x9e\x01\x00\x00\xff\xff\xc9\x09\x7e\x00\xce\x00\x00\x00") + +func migrations49_add_brin_index_trade_aggregationsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations49_add_brin_index_trade_aggregationsSql, + "migrations/49_add_brin_index_trade_aggregations.sql", + ) +} + +func migrations49_add_brin_index_trade_aggregationsSql() (*asset, error) { + bytes, err := migrations49_add_brin_index_trade_aggregationsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/49_add_brin_index_trade_aggregations.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe7, 0xae, 0xcb, 0x90, 0xec, 0x3f, 0xea, 0x57, 0xa2, 0x2c, 0x2c, 0xe, 0xd2, 0x60, 0x1, 0xbe, 0xae, 0xa6, 0xe4, 0x2f, 0xcb, 0xa8, 0xac, 0x44, 0x63, 0x67, 0x5c, 0x63, 0x28, 0x15, 0xbd, 0x2f}} + return a, nil +} + +var _migrations4_add_protocol_versionSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\xcd\xb1\x0a\xc2\x30\x10\x06\xe0\x3d\x4f\xf1\xef\x52\x70\xef\x14\x4d\x9d\xce\x44\x4a\x32\x38\x15\xd1\xa3\x06\x6a\xae\x5c\x82\xe2\xdb\xbb\xba\x88\x4f\xf0\x75\x1d\x36\x8f\x3c\xeb\xa5\x31\xd2\x6a\x2c\xc5\x61\x44\xb4\x3b\x1a\x10\x3c\x9d\x71\xcf\xb5\x89\xbe\xa7\x85\x6f\x33\x6b\x85\x01\xac\x73\xd8\x07\x4a\x47\x8f\x55\xa5\xc9\x55\x96\xe9\xc9\x5a\xb3\x14\xe4\xd2\x78\x66\x85\x1b\x0e\x36\x51\xc4\x16\x3e\x44\xf8\x44\xd4\x1b\xf3\x6d\x39\x79\x95\xff\x9a\x1b\xc3\xe9\x97\xd5\x9b\x4f\x00\x00\x00\xff\xff\x83\xbb\x30\x2e\xbc\x00\x00\x00") + +func migrations4_add_protocol_versionSqlBytes() ([]byte, error) { + return bindataRead( + _migrations4_add_protocol_versionSql, + "migrations/4_add_protocol_version.sql", + ) +} + +func migrations4_add_protocol_versionSql() (*asset, error) { + bytes, err := migrations4_add_protocol_versionSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/4_add_protocol_version.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x26, 0x7f, 0xb6, 0x87, 0x30, 0xa5, 0x8c, 0xee, 0x55, 0xbb, 0x12, 0x6, 0x1b, 0xee, 0xfc, 0x6a, 0xa0, 0x71, 0x60, 0xcc, 0xf7, 0x36, 0x56, 0xb3, 0x39, 0x1f, 0x1a, 0xd2, 0x6, 0xe4, 0x58, 0x8e}} + return a, nil +} + +var _migrations50_liquidity_poolsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x56\x5d\x8f\xda\x38\x14\x7d\x9f\x5f\x71\xdf\x1a\xb4\x83\xd4\xbe\x16\x6d\xa5\x0c\x71\x77\xa2\x42\x98\x86\x64\xdb\x79\xb2\x0c\xbe\x80\x77\x33\x31\x8d\xcd\x2c\xfc\xfb\x55\x12\x42\x83\x63\x43\x8a\x46\xe5\x8d\xf8\x7e\x9c\x73\xcf\xf1\xc7\x70\x08\x7f\xbc\x88\x75\xc1\x34\x42\xba\xbd\xbb\x1b\xc7\xc4\x4f\x08\x24\xfe\xc3\x84\x40\x26\x7e\xec\x04\x17\xfa\x40\xb7\x52\x66\x0a\xbc\x3b\x00\x00\xc1\x41\xe3\x5e\x43\x34\x4b\x20\x4a\x27\x93\x7b\x18\x0e\x61\x83\xfb\x21\xe6\x4b\xc9\x91\xc3\x93\x94\x59\x18\x54\xb1\xfa\xb0\x45\x50\x2f\x2c\xcb\x44\xde\xca\xa8\xd6\x56\x88\x20\x72\x8d\x6b\x2c\x8c\x15\x5d\xec\x94\xce\x44\x8e\x74\x29\x77\xb9\x86\x85\x58\xb7\xd3\x61\xfc\x48\xc6\x5f\xc0\x33\xc3\x3e\xc1\xfb\x41\x5d\x40\x6d\x58\xe1\x4a\x0e\xc8\x67\x3f\x9d\x24\xf0\xbe\x2e\xe3\xb5\x63\x3f\xfd\x79\x2a\xc1\x94\x42\x4d\x0b\x54\x58\xbc\xa2\x82\x7f\x94\xcc\x17\x06\xcc\x8c\x29\x4d\x5f\x24\x17\x2b\x81\x9c\x66\xc8\x4b\x26\x76\x46\x1c\x33\xd4\xc8\x61\x21\x65\x86\x2c\xef\xa2\x59\xb1\x4c\x61\x1d\xfb\x14\x87\x53\x3f\x7e\x86\x2f\xe4\x19\x3c\xc1\x07\x77\x83\xd1\x49\x97\x30\x0a\xc8\x77\x53\x17\xba\x38\x50\x03\xee\x2c\xea\x88\x97\xce\xc3\xe8\x2f\x58\x8b\xdc\xb3\x51\xa3\x5b\xa6\x37\x54\x6e\xd5\x60\x64\xf6\x7a\x45\x6a\xd6\x72\x96\x7f\x48\x62\x42\xc0\x3b\xd2\xbd\xb7\x8e\xa8\x45\x67\x4e\xbe\xa6\x24\x1a\x13\xd8\x08\xa5\x65\x71\x30\x1b\x51\xc1\xa9\xc2\x1f\xd5\x58\xe6\x89\x1f\x27\xf0\x2d\x4c\x1e\xe1\x43\xf5\x21\x8c\xc6\x31\x99\x92\x28\x81\x87\xe7\xe3\xa7\x68\x06\xd3\x30\xfa\xdb\x9f\xa4\xe4\xf4\xdf\xff\xfe\xf3\xff\xd8\x1f\x3f\x12\xf8\x30\x32\x7c\xee\xe8\xfe\xd3\xef\x2e\x0f\xe5\xb8\xd7\xaf\x2c\xf3\xde\x5d\xc6\xff\xee\xe3\xc7\x02\xd7\xcb\x8c\x29\x75\x74\xd7\x79\x1c\x35\xb7\x54\x5b\xf2\x34\x0a\xbf\xa6\x8d\x1a\x22\xe7\xb8\xa7\xae\x6e\x32\x2f\x2b\xcd\x22\x27\xa1\x5a\xa4\x85\x2e\x10\x2b\x6b\x8d\x6e\x6b\xd2\x45\xdf\xb7\x67\x27\x73\xe0\xd2\x42\x6e\xb1\x60\x5a\x74\xba\x35\xaa\x74\xe3\xba\x3a\xdd\x9f\x45\x76\x51\x1b\xe1\x3d\xa7\xee\x44\x56\xcf\x5f\xb5\x87\xe1\x66\x71\x36\x16\x2b\x99\x7b\x37\x72\x73\x93\xfe\x02\xc0\xb3\x16\x6f\x83\xd4\xa9\xa1\x2e\x58\xae\xd8\xb2\x87\x8a\xed\xc8\xdf\xa5\xe3\x05\x74\x16\x25\x2f\x71\xb1\x4e\xc8\xa0\x74\xb3\x9a\x57\x60\x1a\x6d\xde\x0e\x71\x39\x44\x7f\x92\x90\xf8\x28\x6a\x75\xdb\xd2\xf2\xba\x55\xe0\x07\x81\xe3\x08\x33\xa8\xb4\x92\xca\x5b\xca\x7a\x70\xb4\x0b\xb7\x6e\x11\xfb\x59\x11\xc4\xb3\xa7\x63\xed\x8d\x2e\x78\x59\x54\xae\x56\x58\x8c\x3a\x2b\xd5\x8d\x8e\x05\xcd\xa4\xfc\x77\xb7\x35\xc8\xb4\x28\x73\x54\x50\xe5\x56\x75\xa8\xe0\xb5\xdb\x1c\xbf\xba\xc8\x82\x29\xa4\x6c\x59\xb5\x28\x59\x54\xf9\xe7\x6e\xbd\x98\xdf\x60\xbb\xad\x44\x10\xd4\x00\x5c\x1b\xe1\x6a\xf6\x69\x34\xb7\x16\x30\x12\x8f\x2f\x39\xf3\x99\xd2\x08\xe4\x00\x7b\x6e\x55\x6e\xa8\x6f\x4f\x32\xb7\x4a\xd3\xc2\xcd\xe8\x72\x17\x67\x5e\xe9\xb5\xf6\xdb\x38\x90\xff\xe5\x56\xf7\x39\x2b\x74\x1d\xe9\x1c\x45\xe9\x6b\x32\x21\x09\x81\xcf\xf1\x6c\x6a\xe2\xfd\xf6\x48\x62\x02\x9e\xc5\x33\xe1\xbc\x72\xcb\x00\x66\x31\x78\xa6\x27\x9b\xc5\x1e\xce\xef\xca\x79\xd1\x01\x55\x8e\x93\xf7\xf5\x54\xfb\x10\x6e\xdb\x36\x73\x92\xfc\xca\xc6\x33\x87\xd4\x3f\x3d\x08\x4e\x07\xc4\x71\x97\x58\xed\x7e\x7e\xea\x80\xcc\xfb\xb8\xaf\x7e\x90\xbb\xdd\x5d\x35\xbe\xe2\xe4\x06\x9c\x71\x48\x5e\x3b\x80\x47\xce\x33\xde\xe6\x8c\xda\xaa\xe5\xc2\xe5\x87\xf3\x92\xa9\x25\xe3\xd8\x04\xf7\x7c\xe6\xdb\x6b\xbb\x1f\x26\x46\x97\xfe\x6f\x0f\x6b\xa2\x3b\xe8\xff\x00\x00\x00\xff\xff\xf9\xb4\x97\x15\x24\x0f\x00\x00") + +func migrations50_liquidity_poolsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations50_liquidity_poolsSql, + "migrations/50_liquidity_pools.sql", + ) +} + +func migrations50_liquidity_poolsSql() (*asset, error) { + bytes, err := migrations50_liquidity_poolsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/50_liquidity_pools.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xf1, 0xc5, 0xdb, 0x7d, 0x7e, 0x61, 0xa4, 0x45, 0xee, 0x3f, 0x8, 0x28, 0x27, 0x29, 0x46, 0x4a, 0xc5, 0x5c, 0x20, 0xc, 0xdb, 0x8b, 0x6b, 0xa2, 0xe1, 0xd8, 0xd3, 0x25, 0xe5, 0x41, 0xfd}} + return a, nil +} + +var _migrations51_remove_ht_unused_indexesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xce\xbf\x4a\xc7\x30\x14\xc5\xf1\x3d\x4f\x71\x46\x8b\xe6\x09\x3a\x89\x8d\x1a\x28\xa9\x34\x2d\x76\x2b\x69\x7a\xd5\x0c\x26\x9a\x3f\x48\xdf\x5e\x90\x62\x0b\x22\xfc\xb6\x0c\xdf\xf3\xc9\xe5\x1c\xd7\xef\xee\x35\x9a\x4c\x18\x3f\x18\xe3\x1c\xc5\x97\x44\x2b\x9c\x5f\x9d\xa5\xc4\x9a\xbe\x7b\x82\x54\x8d\x98\x20\xef\x21\x26\xa9\x07\x8d\x65\x9b\x8d\xb5\xa1\xf8\x5c\xff\x1b\xbc\x10\x1d\xd1\x8f\xfc\xfb\x53\x13\xbe\x3c\xbb\xeb\xc5\xed\x20\xf6\xe5\x01\xa2\x53\x78\x73\x29\x87\xb8\xcd\x39\x1a\x9f\x8c\xcd\x2e\xf8\x84\x51\x4b\xf5\x80\x25\x47\x22\x5c\xed\xf1\x0d\xf6\xc7\x9c\xe8\xb3\x90\xb7\x54\xd5\x7f\xe0\xd3\x21\x97\xe1\xa7\x41\x85\xe7\x47\xd1\x0b\x9c\x0d\xa9\xa1\xba\x01\x6a\x6c\xdb\x9a\x7d\x07\x00\x00\xff\xff\x08\x04\xf4\x75\x41\x01\x00\x00") + +func migrations51_remove_ht_unused_indexesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations51_remove_ht_unused_indexesSql, + "migrations/51_remove_ht_unused_indexes.sql", + ) +} + +func migrations51_remove_ht_unused_indexesSql() (*asset, error) { + bytes, err := migrations51_remove_ht_unused_indexesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/51_remove_ht_unused_indexes.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe6, 0x27, 0xd4, 0x40, 0x1, 0x69, 0x1, 0x90, 0xeb, 0x92, 0x8a, 0x93, 0xd6, 0xa3, 0x5c, 0x67, 0x1, 0x4f, 0x15, 0xf9, 0x98, 0xcc, 0x62, 0xd6, 0xa7, 0x2e, 0x2a, 0xae, 0x88, 0xd3, 0x31, 0x79}} + return a, nil +} + +var _migrations52_add_trade_type_indexSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x90\x41\x4b\xc3\x30\x1c\xc5\xef\xf9\x14\x8f\x9d\x1c\x6e\xa0\x5e\x8b\x42\xb7\xfc\x75\xc5\xd2\x8e\x34\x45\x6f\x21\x33\xc1\x05\xba\xa6\xa6\x19\xd2\x6f\x2f\x4e\x91\xa0\xb8\x5b\x0e\xbf\xbc\xdf\xfb\xbf\xe5\x12\x97\x07\xf7\x1a\x74\xb4\x68\x07\xc6\xf2\x52\x92\x80\xcc\x57\x25\x61\xef\xc6\xe8\xc3\xa4\x62\xd0\xc6\x8e\xc8\x39\xc7\xe9\xa9\xe2\x34\x58\x8c\x07\xdd\x75\xae\x8f\xe0\x74\x9f\xb7\xa5\xc4\x35\xd6\x1b\x5a\x3f\x5e\x24\xcc\x1d\xae\xe6\x19\x6b\xb7\x3c\x97\x7f\xe2\x1a\x92\x69\xdc\x2d\x6e\xf0\xb4\x21\x41\xd8\xe9\xd1\xaa\xce\xbd\x1d\x9d\x71\x71\x52\x83\xf7\x9d\x72\x06\x45\x83\xaa\x96\xa8\xda\xb2\x44\x2d\xf0\xe2\x8f\x7d\xb4\xe1\x3c\x98\xb1\xb5\xa0\x4f\x77\x51\x71\x7a\xc6\x3e\x06\xa3\x76\xdf\x0d\xbe\xb4\x75\xf5\xbb\x57\xdb\x14\xd5\x03\x56\x52\x10\x25\xa7\x2c\x7e\x30\x3f\xd8\xa0\xa3\xf3\xbd\x72\x66\x81\x99\x0f\xc6\x86\xd9\x3c\x63\x2c\x9d\x92\xfb\xf7\x9e\x31\x2e\xea\xed\xbf\xea\xec\xdc\xd6\xa7\x9f\x29\xfb\x11\x00\x00\xff\xff\x80\x41\xbf\x62\xa8\x01\x00\x00") + +func migrations52_add_trade_type_indexSqlBytes() ([]byte, error) { + return bindataRead( + _migrations52_add_trade_type_indexSql, + "migrations/52_add_trade_type_index.sql", + ) +} + +func migrations52_add_trade_type_indexSql() (*asset, error) { + bytes, err := migrations52_add_trade_type_indexSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/52_add_trade_type_index.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa7, 0xbc, 0x9b, 0x94, 0xfb, 0x5b, 0x77, 0x33, 0x5b, 0x67, 0x5a, 0x64, 0x6e, 0x6c, 0x5f, 0xee, 0xe0, 0xd2, 0xcd, 0x61, 0xd6, 0x29, 0x8c, 0x74, 0xd0, 0xec, 0xaf, 0x32, 0x38, 0xb6, 0xd9, 0x71}} + return a, nil +} + +var _migrations5_create_trades_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x94\x51\x6f\xaa\x40\x10\x85\xdf\xf9\x15\x13\x9f\x30\x17\x93\x7b\x6f\x5a\x5f\x4c\x9a\x58\x25\xad\xa9\xc1\xd6\x4a\xd2\x37\xb2\xb0\x23\x6c\xa2\x2c\x99\x1d\xda\xf0\xef\x1b\x68\x69\x10\x57\xad\xaf\x9c\x39\x67\x38\xbb\x5f\x76\x34\x82\x3f\x7b\x95\x92\x60\x84\xb0\x70\x66\x6b\x7f\xba\xf1\x61\x33\xbd\x5f\xfa\x90\x29\xc3\x9a\xaa\x88\x49\x48\x34\xe0\x3a\x00\xf0\xf3\x51\x17\x48\x82\x95\xce\x23\x25\x21\x56\xa9\xca\x19\x82\xd5\x06\x82\x70\xb9\xf4\x9a\xc9\x81\x26\x89\x34\x00\x95\x33\xa6\x48\x1d\xb5\x91\xf5\x76\x8b\x64\x35\x37\xb2\xc1\xdd\xee\x84\x5e\xcb\x71\x59\x9d\x75\xeb\x9d\x8c\x84\x31\xc8\x11\x57\x05\x42\x92\x09\x12\x09\x23\xc1\xbb\xa0\x4a\xe5\xa9\x3b\xbe\x19\xf6\x22\x3b\x1e\x65\x4c\x89\x64\x71\xdd\x8e\xcf\xb8\x12\x2d\x6d\x9b\xfe\xfd\xb7\x7b\xf6\xba\xcc\xb9\xff\xff\x30\x7b\xf4\x67\x4f\xe0\x76\x47\xee\xe0\xef\xf0\xbb\x57\xac\xcb\x34\xe3\x6b\x9b\x1d\xb8\xae\xe8\x76\xe0\xfb\x75\xbb\xd6\x75\xb6\xdf\xe1\x50\xdd\xd0\x19\x4e\x9c\x96\xbf\x30\x58\xbc\x84\x3e\x2c\x82\xb9\xff\x06\x19\x93\x8c\x0a\x25\x61\x15\xf4\x91\x0c\x5f\x17\xc1\x03\xc4\x4c\x88\xe0\xda\xc8\xf4\x5a\x0a\x3b\xe1\x9d\xd4\xb8\x8a\x1a\x0c\x2f\x45\xb7\xac\xda\x52\xea\x90\xfa\xb6\x2e\x65\xf4\x90\xf4\xfa\xe4\x78\xc7\x00\x9e\x5a\xf7\x75\x78\x97\x16\x1e\xb1\xe2\x1d\x5f\xa8\x67\x63\xa3\x5e\xdb\x7d\x17\xe6\xfa\x23\x77\xe6\xeb\xd5\xb3\xfd\x5d\x48\x84\x49\x84\xc4\x89\xf3\x19\x00\x00\xff\xff\x79\x87\x24\x6b\x4c\x04\x00\x00") + +func migrations5_create_trades_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations5_create_trades_tableSql, + "migrations/5_create_trades_table.sql", + ) +} + +func migrations5_create_trades_tableSql() (*asset, error) { + bytes, err := migrations5_create_trades_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/5_create_trades_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4f, 0x51, 0x6e, 0x49, 0x10, 0xd6, 0xf1, 0x48, 0xc6, 0x8d, 0xe5, 0xbe, 0x2, 0x94, 0xba, 0x20, 0x37, 0x7b, 0x10, 0x8b, 0x84, 0x7, 0xac, 0x1b, 0xb4, 0xac, 0xc3, 0x6d, 0xbc, 0x54, 0x81, 0xe3}} + return a, nil +} + +var _migrations6_create_assets_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x90\x3d\x4f\xc3\x30\x18\x84\x77\xff\x8a\x1b\x1d\x91\x0e\x20\xe8\x92\xc9\x34\x16\x58\x18\xa7\xb8\x31\xa2\x53\xe5\x26\x16\x78\x80\x54\xb6\x11\xca\xbf\x47\xaa\x28\xf9\x50\xe6\x7b\xf4\xbc\xef\xdd\x6a\x85\xab\x4f\xff\x1e\x6c\x72\x30\x27\xb2\xd1\x9c\xd5\x1c\x35\xbb\x97\x1c\x1f\x3e\xa6\x2e\xf4\x07\x1b\xa3\x4b\x11\x94\x00\x80\x6f\xb1\xe3\x5a\x30\x89\xad\x16\xcf\x4c\xef\xf1\xc4\xf7\xc8\xcf\xd9\x19\x3c\xa4\xfe\xe4\xf0\xca\xf4\xe6\x91\x69\xba\xbe\xcd\xa0\xaa\x1a\xca\x48\x39\x86\x9a\xae\x1d\xa0\xeb\x9b\x65\xc8\xc7\xf8\xed\xc2\x3f\x76\xb7\x9e\x63\x46\x89\x17\xc3\xe9\xa0\xcc\x47\x3f\xe4\x13\x4b\x46\xb2\x82\x5c\xfa\x09\x55\xf2\xb7\xbf\xf8\xd8\x5f\xee\x54\x6a\x5e\xd9\xec\x84\x7a\xc0\x31\x05\xe7\x40\x27\xb6\x82\x90\xf1\x74\x65\xf7\xf3\x45\x4a\x5d\x6d\x97\xa7\x6b\x6c\x6c\x6c\xeb\x8a\xdf\x00\x00\x00\xff\xff\xfb\x53\x3e\x81\x6e\x01\x00\x00") + +func migrations6_create_assets_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations6_create_assets_tableSql, + "migrations/6_create_assets_table.sql", + ) +} + +func migrations6_create_assets_tableSql() (*asset, error) { + bytes, err := migrations6_create_assets_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/6_create_assets_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0x3f, 0x47, 0xea, 0x8a, 0x5d, 0x60, 0xc8, 0x90, 0x36, 0xc8, 0x4f, 0x68, 0xc5, 0xd3, 0xa0, 0xcd, 0xae, 0x5a, 0xc3, 0x75, 0xbd, 0xb4, 0xbb, 0x51, 0xf2, 0x68, 0x54, 0x79, 0xac, 0xa2, 0x8a}} + return a, nil +} + +var _migrations7_modify_trades_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x54\x4d\x8f\xda\x30\x14\xbc\xe7\x57\x3c\xed\x29\x51\xc3\xaa\xad\xda\xbd\x6c\x55\x09\x58\x97\x46\x65\xc3\x36\x04\xa9\xb7\xc8\x89\xdf\x06\xab\xc1\x8e\x6c\xa7\x88\x7f\x5f\x05\x08\xcd\x27\xb0\xbb\x87\x5e\x93\x99\x79\x6f\xec\xf1\x8c\x46\xf0\x6e\xc3\x53\x45\x0d\xc2\x2a\xb7\x46\x23\x60\x4a\xe6\x60\xd6\x08\x32\x63\x60\x14\x65\xa8\xc1\xd0\x38\xc3\x5b\xc8\x0b\x03\x14\x04\x6e\x41\x0a\x04\x2e\x20\xcf\x68\x82\xd6\x43\xb0\x78\x82\x70\x3c\x99\x13\x58\x73\x6d\xa4\xda\x45\x07\xde\xbd\x35\x0d\xc8\x38\x24\xbd\x3f\xc1\xb6\x00\xe0\xf4\x51\xe6\xa8\xa8\xe1\x52\x44\x9c\xc1\xc4\x9b\x79\x7e\x08\xfe\x22\x04\x7f\x35\x9f\xbb\x7b\xe4\x8d\x54\x0c\xd5\x0d\x78\x7e\x48\x66\x24\x68\xfd\xcd\x90\xa5\xa8\xa2\x24\x93\x1a\x59\x44\x0d\x84\xde\x23\x59\x86\xe3\xc7\xa7\x16\x50\x3e\x3f\xa3\x1a\x1c\x12\x53\x8d\x11\x4d\x12\x59\x08\xd3\x03\x82\x80\x7c\x23\x01\xf1\xa7\x64\x79\xda\xfc\x88\xd6\x36\x67\x4e\x5d\x44\x6b\xbc\x5a\xa2\xc4\x76\x04\x36\xa5\x6c\x87\x3e\xfd\x4e\xa6\x3f\xc0\xae\x43\xbe\xc2\xfb\x23\x71\xbf\x09\xaa\x37\x3b\x38\xe9\xbc\xc1\xc4\x49\xe3\xac\x8f\x16\xea\x9f\x95\xbd\x41\xae\x23\x8d\x59\x86\x0a\x26\x8b\xc5\x9c\x8c\xfd\xc3\xbf\x3d\xd7\x6e\x1e\xf3\x97\xce\xd2\x8e\xe5\xdc\x5b\x55\x04\x57\xbe\xf7\x73\x45\xc0\xf3\x1f\xc8\x2f\x58\x1b\xc5\xa2\x9c\x33\x58\xf8\xed\x54\xae\x96\x9e\x3f\x83\xd8\x28\x44\xb0\xfb\xc2\xe9\x56\x41\x74\x4e\xf1\xae\x8b\x52\xae\x22\xc3\x37\x18\x65\x52\xfe\x2e\xf2\xc1\x09\x93\x30\x20\xa4\x69\xc1\xed\x38\x70\x3b\xb1\xee\x1d\x5a\xd1\xae\x1a\xd9\x39\xa5\x3e\xc5\xeb\x1d\x5c\xb5\x60\xbc\x8b\xf6\xcf\xee\xd2\x79\x57\x6f\xb3\xbc\x37\xab\x5e\x4d\x0f\x72\x2b\x1a\xe5\x24\x70\x8b\xaa\xea\x25\x85\x5c\x68\x53\xe2\xaa\xde\x92\x02\x6f\x87\x7b\x09\x12\xaa\x13\xca\xf0\xd5\xfd\x14\xf3\x94\x0b\x33\xd0\x4f\x5c\x18\x4c\x51\x0d\xd5\x4e\x2f\xf7\x10\xf2\xc1\xdf\x71\xb1\x3b\x47\x96\x19\x3b\x5e\xa7\xd9\xe5\x08\xc9\x9a\x2a\x9a\x18\x54\xf0\x87\xaa\x1d\x17\xa9\x7d\xf7\xc9\x19\xe6\x70\xad\x0b\x54\x3d\xac\xcf\x77\x67\x58\x89\x64\x7d\x93\x3e\x7c\xec\xe7\x1c\x5e\x77\x6b\xfd\xaa\x03\xea\x90\x5a\x01\xc8\x22\x5d\x9b\x97\x1a\x6b\xb0\x5e\x60\xad\xc1\xbb\xda\x5c\xc5\x3a\x6b\xaf\x09\x2a\x0d\xfe\x87\x62\x7a\xc5\x13\x6c\x8b\x94\x1a\xe5\x55\x5d\x92\x68\xe5\xd1\x6d\xc7\xc6\xed\xa6\x6f\x60\xda\xe1\xe4\x2e\xcd\xeb\x04\xc5\xed\xde\xa6\xdb\x17\x0c\xe7\xfe\x6f\x00\x00\x00\xff\xff\x2a\xff\xe8\x4a\xff\x08\x00\x00") + +func migrations7_modify_trades_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations7_modify_trades_tableSql, + "migrations/7_modify_trades_table.sql", + ) +} + +func migrations7_modify_trades_tableSql() (*asset, error) { + bytes, err := migrations7_modify_trades_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/7_modify_trades_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb1, 0x2e, 0xba, 0x36, 0x5e, 0x3a, 0x3f, 0x3a, 0x8f, 0xe4, 0xfd, 0xc6, 0xb8, 0xeb, 0xbf, 0xda, 0x2b, 0xc6, 0xcd, 0xe3, 0xb5, 0x9a, 0x78, 0xf9, 0x9c, 0x2d, 0xcf, 0xe7, 0xb1, 0x6e, 0xa0, 0x3e}} + return a, nil +} + +var _migrations8_add_aggregatorsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x92\x31\x6f\xdb\x30\x14\x84\x77\xfe\x8a\x1b\x34\xd8\xa8\x65\xa3\x1d\x1b\x78\xa0\x65\x5a\x10\x40\x2b\xae\x48\x0d\x99\x02\x26\x61\x64\xa1\x32\xa5\x92\xcf\x30\xfc\xef\x0b\xaa\x4d\x6c\xb4\x05\x1a\x14\xcd\x46\x1c\xf8\x0e\x77\xdf\x7b\x69\x8a\x0f\x87\xb6\xf1\x86\x2c\xea\x81\xb1\x34\xc5\x9e\x68\x08\x9f\x17\x8b\x53\xfb\xb5\x9d\x0f\x7d\xa0\xc6\xdb\xf0\xad\x9b\xf7\xbe\x19\xb5\xc5\xa6\xf5\x81\x16\x9d\x09\x74\x3f\x31\x4d\xe3\x6d\x63\xc8\x4e\xe3\x68\xe6\x6d\x34\x32\x78\x3e\xba\x47\x6a\x7b\x07\xda\x1b\x82\xe9\x4e\xe6\x1c\xe0\x2d\x1d\xbd\x0b\xa0\xbd\xc5\x73\xf4\x80\xeb\x5d\x5a\xd6\x52\xa2\x25\x7b\x60\x59\x25\xb8\x16\xd8\xd4\x65\xa6\x8b\xdb\x12\xc3\xf1\xa1\x6b\x1f\xe7\xe3\xd7\x7b\xd3\x34\x98\xc0\xb8\xb3\xed\xec\xc1\x3a\x9a\x5d\xbd\x31\x65\x40\x25\x74\x5d\x95\xea\x5a\x96\xbc\xcc\x6b\x9e\x0b\xa8\x2f\x12\xc5\x76\x5b\x6b\xbe\x92\x02\x4a\x57\x45\xa6\xc1\x15\x92\x04\x4a\x48\x91\x69\x24\x1f\x91\x24\x37\x63\x7f\xee\x9e\x62\x44\x87\x93\x37\x03\x8c\xc3\x6b\x47\x18\xdf\x1f\xdd\x13\x5a\x7a\xc9\xca\xf3\xbc\x12\x79\x7c\xfd\x0c\xbb\x29\x2a\xa5\x31\x61\x2a\xb6\xc0\x12\xbb\x7a\x25\x8b\xec\xd2\x61\xc6\x56\x5c\x09\x7d\xb7\x13\x58\x82\x97\x77\x42\x8a\xad\x28\xf5\x8c\xa9\xdf\x34\x36\xfd\x91\xe7\xed\x50\xe3\x4a\xde\xc6\x74\x5c\xde\x7b\x23\xfd\xf4\x7f\x90\x4a\x3e\x12\x0d\xb1\x3e\x00\x2c\x7f\x2d\x31\x63\x0f\x26\x58\x3a\x0f\x16\xcb\xeb\x3a\x2c\x8c\xda\x38\x72\x91\x5f\xb0\xbe\x9e\xfd\xba\x3f\x39\xb6\xae\x6e\x77\xff\x74\x79\xc8\xb8\xca\xf8\x5a\xdc\xfc\xd9\xe2\x02\xfa\xaf\x06\xdf\x03\x00\x00\xff\xff\x7e\x17\x8e\x03\x8b\x03\x00\x00") + +func migrations8_add_aggregatorsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations8_add_aggregatorsSql, + "migrations/8_add_aggregators.sql", + ) +} + +func migrations8_add_aggregatorsSql() (*asset, error) { + bytes, err := migrations8_add_aggregatorsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/8_add_aggregators.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xae, 0xba, 0x87, 0x6f, 0x41, 0xf3, 0xf6, 0x28, 0x25, 0xc2, 0x19, 0xdf, 0x41, 0x9a, 0x4b, 0xf3, 0x8, 0x37, 0x29, 0x2b, 0x92, 0x12, 0x9f, 0xb5, 0x9f, 0x9d, 0x50, 0x82, 0x6, 0xa5, 0xbb, 0xf6}} + return a, nil +} + +var _migrations8_create_asset_stats_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x6c\x91\xc1\x6e\xf2\x30\x10\x84\xef\x79\x8a\x3d\x82\xfe\x9f\x5b\xd5\x0b\x27\x63\x6f\xa9\x55\xe3\x44\x1b\xa7\x2a\xa7\xc8\x24\x2e\x8d\x44\x92\x2a\x36\xaa\x78\xfb\xca\x84\x56\xa1\x8a\x4f\xb6\xf5\x69\x66\x67\x67\xb5\x82\x7f\x6d\x73\x1c\x6c\x70\x50\x7c\x26\x9c\x90\x19\x04\xc3\x36\x0a\xc1\x7a\xef\x42\xe9\x83\x0d\x1e\x16\x09\x00\x40\x53\xc3\xdd\xd9\xc8\xad\xd4\x66\xbc\x67\x24\x77\x8c\xf6\xf0\x82\x7b\x20\x7c\x42\x42\xcd\x31\x87\x8f\xc6\x87\x7e\xb8\x94\x57\x31\x0f\xa9\x06\x81\x0a\x0d\x02\x67\x39\x67\x02\xe3\x4f\x91\x89\xe8\x4a\x98\x1b\x92\xdc\xfc\xbf\x7a\xd9\xb6\x3f\x77\x61\xde\x4b\xa7\x06\x74\xa1\xd4\x48\x76\xe7\xb6\xb4\x55\x15\x71\x1f\xdf\x52\x1b\xdc\x22\xcd\x90\xef\x27\x7b\xf4\x93\xf9\xf3\x1d\x53\xea\xa6\x7a\x4f\x86\xbe\x3d\x4d\x93\xbe\x32\xe2\xcf\x8c\x16\x8f\x0f\xcb\x5f\x32\x59\xae\x93\x9f\x8d\x49\x2d\xf0\xed\xb6\xb1\xc3\xa5\xac\xfa\xda\xc5\x64\x7f\xd2\x17\xb9\xd4\x5b\x38\x84\xc1\x39\x58\x8c\x70\x24\xa3\xce\xb4\x08\xd1\x7f\x75\x89\xa0\x34\x9b\x29\xa2\xb2\xbe\xb2\xb5\x5b\x7f\x07\x00\x00\xff\xff\xa9\x7e\x10\x6f\xb9\x01\x00\x00") + +func migrations8_create_asset_stats_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations8_create_asset_stats_tableSql, + "migrations/8_create_asset_stats_table.sql", + ) +} + +func migrations8_create_asset_stats_tableSql() (*asset, error) { + bytes, err := migrations8_create_asset_stats_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/8_create_asset_stats_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x21, 0x5, 0xfc, 0x3f, 0xe0, 0xe1, 0xe6, 0x50, 0x2c, 0x25, 0xc0, 0x23, 0x87, 0xa3, 0x99, 0xad, 0xc8, 0xc1, 0x67, 0xca, 0x65, 0xc0, 0x91, 0xf6, 0x5c, 0x29, 0xab, 0x78, 0xbe, 0xe4, 0x5e}} + return a, nil +} + +var _migrations9_add_header_xdrSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\xf0\xf7\xf3\x89\x54\xc8\xc8\x2c\x2e\xc9\x2f\xaa\x8c\xcf\x49\x4d\x49\x4f\x2d\x2a\x56\x70\x74\x71\x51\x70\xf6\xf7\x09\xf5\xf5\x53\x80\x08\xc5\x67\xa4\x26\xa6\xa4\x16\x29\x94\xa4\x56\x94\x28\xf8\x85\xfa\xf8\x58\x73\x71\x21\x1b\xea\x92\x5f\x9e\x47\xd8\x58\x97\x20\xff\x00\xac\xe6\x5a\x03\x02\x00\x00\xff\xff\xe1\xe4\xef\x11\xa1\x00\x00\x00") + +func migrations9_add_header_xdrSqlBytes() ([]byte, error) { + return bindataRead( + _migrations9_add_header_xdrSql, + "migrations/9_add_header_xdr.sql", + ) +} + +func migrations9_add_header_xdrSql() (*asset, error) { + bytes, err := migrations9_add_header_xdrSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/9_add_header_xdr.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x44, 0xe5, 0x20, 0x47, 0xc8, 0x66, 0xd0, 0x16, 0xfa, 0xeb, 0xe, 0xba, 0x80, 0xbd, 0xc3, 0xa6, 0x0, 0x9e, 0xc, 0xb5, 0x45, 0xb9, 0x78, 0x26, 0x8, 0xef, 0x94, 0x23, 0xbe, 0x85, 0x2c, 0xe4}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "migrations/10_add_trades_price.sql": migrations10_add_trades_priceSql, + "migrations/11_add_trades_account_index.sql": migrations11_add_trades_account_indexSql, + "migrations/12_asset_stats_amount_string.sql": migrations12_asset_stats_amount_stringSql, + "migrations/13_trade_offer_ids.sql": migrations13_trade_offer_idsSql, + "migrations/14_fix_asset_toml_field.sql": migrations14_fix_asset_toml_fieldSql, + "migrations/15_ledger_failed_txs.sql": migrations15_ledger_failed_txsSql, + "migrations/16_ingest_failed_transactions.sql": migrations16_ingest_failed_transactionsSql, + "migrations/17_transaction_fee_paid.sql": migrations17_transaction_fee_paidSql, + "migrations/18_account_for_signers.sql": migrations18_account_for_signersSql, + "migrations/19_offers.sql": migrations19_offersSql, + "migrations/1_initial_schema.sql": migrations1_initial_schemaSql, + "migrations/20_account_for_signer_index.sql": migrations20_account_for_signer_indexSql, + "migrations/21_trades_remove_zero_amount_constraints.sql": migrations21_trades_remove_zero_amount_constraintsSql, + "migrations/22_trust_lines.sql": migrations22_trust_linesSql, + "migrations/23_exp_asset_stats.sql": migrations23_exp_asset_statsSql, + "migrations/24_accounts.sql": migrations24_accountsSql, + "migrations/25_expingest_rename_columns.sql": migrations25_expingest_rename_columnsSql, + "migrations/26_exp_history_ledgers.sql": migrations26_exp_history_ledgersSql, + "migrations/27_exp_history_transactions.sql": migrations27_exp_history_transactionsSql, + "migrations/28_exp_history_operations.sql": migrations28_exp_history_operationsSql, + "migrations/29_exp_history_assets.sql": migrations29_exp_history_assetsSql, + "migrations/2_index_participants_by_toid.sql": migrations2_index_participants_by_toidSql, + "migrations/30_exp_history_trades.sql": migrations30_exp_history_tradesSql, + "migrations/31_exp_history_effects.sql": migrations31_exp_history_effectsSql, + "migrations/32_drop_exp_history_tables.sql": migrations32_drop_exp_history_tablesSql, + "migrations/33_remove_unused.sql": migrations33_remove_unusedSql, + "migrations/34_fee_bump_transactions.sql": migrations34_fee_bump_transactionsSql, + "migrations/35_drop_participant_id.sql": migrations35_drop_participant_idSql, + "migrations/36_deleted_offers.sql": migrations36_deleted_offersSql, + "migrations/37_add_tx_set_operation_count_to_ledgers.sql": migrations37_add_tx_set_operation_count_to_ledgersSql, + "migrations/38_add_constraints.sql": migrations38_add_constraintsSql, + "migrations/39_claimable_balances.sql": migrations39_claimable_balancesSql, + "migrations/39_history_trades_indices.sql": migrations39_history_trades_indicesSql, + "migrations/3_use_sequence_in_history_accounts.sql": migrations3_use_sequence_in_history_accountsSql, + "migrations/40_fix_inner_tx_max_fee_constraint.sql": migrations40_fix_inner_tx_max_fee_constraintSql, + "migrations/41_add_sponsor_to_state_tables.sql": migrations41_add_sponsor_to_state_tablesSql, + "migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql": migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSql, + "migrations/43_add_claimable_balances_flags.sql": migrations43_add_claimable_balances_flagsSql, + "migrations/44_asset_stat_accounts_and_balances.sql": migrations44_asset_stat_accounts_and_balancesSql, + "migrations/45_add_claimable_balances_history.sql": migrations45_add_claimable_balances_historySql, + "migrations/46_add_muxed_accounts.sql": migrations46_add_muxed_accountsSql, + "migrations/47_precompute_trade_aggregations.sql": migrations47_precompute_trade_aggregationsSql, + "migrations/48_rebuild_trade_aggregations.sql": migrations48_rebuild_trade_aggregationsSql, + "migrations/49_add_brin_index_trade_aggregations.sql": migrations49_add_brin_index_trade_aggregationsSql, + "migrations/4_add_protocol_version.sql": migrations4_add_protocol_versionSql, + "migrations/50_liquidity_pools.sql": migrations50_liquidity_poolsSql, + "migrations/51_remove_ht_unused_indexes.sql": migrations51_remove_ht_unused_indexesSql, + "migrations/52_add_trade_type_index.sql": migrations52_add_trade_type_indexSql, + "migrations/5_create_trades_table.sql": migrations5_create_trades_tableSql, + "migrations/6_create_assets_table.sql": migrations6_create_assets_tableSql, + "migrations/7_modify_trades_table.sql": migrations7_modify_trades_tableSql, + "migrations/8_add_aggregators.sql": migrations8_add_aggregatorsSql, + "migrations/8_create_asset_stats_table.sql": migrations8_create_asset_stats_tableSql, + "migrations/9_add_header_xdr.sql": migrations9_add_header_xdrSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "migrations": &bintree{nil, map[string]*bintree{ + "10_add_trades_price.sql": &bintree{migrations10_add_trades_priceSql, map[string]*bintree{}}, + "11_add_trades_account_index.sql": &bintree{migrations11_add_trades_account_indexSql, map[string]*bintree{}}, + "12_asset_stats_amount_string.sql": &bintree{migrations12_asset_stats_amount_stringSql, map[string]*bintree{}}, + "13_trade_offer_ids.sql": &bintree{migrations13_trade_offer_idsSql, map[string]*bintree{}}, + "14_fix_asset_toml_field.sql": &bintree{migrations14_fix_asset_toml_fieldSql, map[string]*bintree{}}, + "15_ledger_failed_txs.sql": &bintree{migrations15_ledger_failed_txsSql, map[string]*bintree{}}, + "16_ingest_failed_transactions.sql": &bintree{migrations16_ingest_failed_transactionsSql, map[string]*bintree{}}, + "17_transaction_fee_paid.sql": &bintree{migrations17_transaction_fee_paidSql, map[string]*bintree{}}, + "18_account_for_signers.sql": &bintree{migrations18_account_for_signersSql, map[string]*bintree{}}, + "19_offers.sql": &bintree{migrations19_offersSql, map[string]*bintree{}}, + "1_initial_schema.sql": &bintree{migrations1_initial_schemaSql, map[string]*bintree{}}, + "20_account_for_signer_index.sql": &bintree{migrations20_account_for_signer_indexSql, map[string]*bintree{}}, + "21_trades_remove_zero_amount_constraints.sql": &bintree{migrations21_trades_remove_zero_amount_constraintsSql, map[string]*bintree{}}, + "22_trust_lines.sql": &bintree{migrations22_trust_linesSql, map[string]*bintree{}}, + "23_exp_asset_stats.sql": &bintree{migrations23_exp_asset_statsSql, map[string]*bintree{}}, + "24_accounts.sql": &bintree{migrations24_accountsSql, map[string]*bintree{}}, + "25_expingest_rename_columns.sql": &bintree{migrations25_expingest_rename_columnsSql, map[string]*bintree{}}, + "26_exp_history_ledgers.sql": &bintree{migrations26_exp_history_ledgersSql, map[string]*bintree{}}, + "27_exp_history_transactions.sql": &bintree{migrations27_exp_history_transactionsSql, map[string]*bintree{}}, + "28_exp_history_operations.sql": &bintree{migrations28_exp_history_operationsSql, map[string]*bintree{}}, + "29_exp_history_assets.sql": &bintree{migrations29_exp_history_assetsSql, map[string]*bintree{}}, + "2_index_participants_by_toid.sql": &bintree{migrations2_index_participants_by_toidSql, map[string]*bintree{}}, + "30_exp_history_trades.sql": &bintree{migrations30_exp_history_tradesSql, map[string]*bintree{}}, + "31_exp_history_effects.sql": &bintree{migrations31_exp_history_effectsSql, map[string]*bintree{}}, + "32_drop_exp_history_tables.sql": &bintree{migrations32_drop_exp_history_tablesSql, map[string]*bintree{}}, + "33_remove_unused.sql": &bintree{migrations33_remove_unusedSql, map[string]*bintree{}}, + "34_fee_bump_transactions.sql": &bintree{migrations34_fee_bump_transactionsSql, map[string]*bintree{}}, + "35_drop_participant_id.sql": &bintree{migrations35_drop_participant_idSql, map[string]*bintree{}}, + "36_deleted_offers.sql": &bintree{migrations36_deleted_offersSql, map[string]*bintree{}}, + "37_add_tx_set_operation_count_to_ledgers.sql": &bintree{migrations37_add_tx_set_operation_count_to_ledgersSql, map[string]*bintree{}}, + "38_add_constraints.sql": &bintree{migrations38_add_constraintsSql, map[string]*bintree{}}, + "39_claimable_balances.sql": &bintree{migrations39_claimable_balancesSql, map[string]*bintree{}}, + "39_history_trades_indices.sql": &bintree{migrations39_history_trades_indicesSql, map[string]*bintree{}}, + "3_use_sequence_in_history_accounts.sql": &bintree{migrations3_use_sequence_in_history_accountsSql, map[string]*bintree{}}, + "40_fix_inner_tx_max_fee_constraint.sql": &bintree{migrations40_fix_inner_tx_max_fee_constraintSql, map[string]*bintree{}}, + "41_add_sponsor_to_state_tables.sql": &bintree{migrations41_add_sponsor_to_state_tablesSql, map[string]*bintree{}}, + "42_add_num_sponsored_and_num_sponsoring_to_accounts.sql": &bintree{migrations42_add_num_sponsored_and_num_sponsoring_to_accountsSql, map[string]*bintree{}}, + "43_add_claimable_balances_flags.sql": &bintree{migrations43_add_claimable_balances_flagsSql, map[string]*bintree{}}, + "44_asset_stat_accounts_and_balances.sql": &bintree{migrations44_asset_stat_accounts_and_balancesSql, map[string]*bintree{}}, + "45_add_claimable_balances_history.sql": &bintree{migrations45_add_claimable_balances_historySql, map[string]*bintree{}}, + "46_add_muxed_accounts.sql": &bintree{migrations46_add_muxed_accountsSql, map[string]*bintree{}}, + "47_precompute_trade_aggregations.sql": &bintree{migrations47_precompute_trade_aggregationsSql, map[string]*bintree{}}, + "48_rebuild_trade_aggregations.sql": &bintree{migrations48_rebuild_trade_aggregationsSql, map[string]*bintree{}}, + "49_add_brin_index_trade_aggregations.sql": &bintree{migrations49_add_brin_index_trade_aggregationsSql, map[string]*bintree{}}, + "4_add_protocol_version.sql": &bintree{migrations4_add_protocol_versionSql, map[string]*bintree{}}, + "50_liquidity_pools.sql": &bintree{migrations50_liquidity_poolsSql, map[string]*bintree{}}, + "51_remove_ht_unused_indexes.sql": &bintree{migrations51_remove_ht_unused_indexesSql, map[string]*bintree{}}, + "52_add_trade_type_index.sql": &bintree{migrations52_add_trade_type_indexSql, map[string]*bintree{}}, + "5_create_trades_table.sql": &bintree{migrations5_create_trades_tableSql, map[string]*bintree{}}, + "6_create_assets_table.sql": &bintree{migrations6_create_assets_tableSql, map[string]*bintree{}}, + "7_modify_trades_table.sql": &bintree{migrations7_modify_trades_tableSql, map[string]*bintree{}}, + "8_add_aggregators.sql": &bintree{migrations8_add_aggregatorsSql, map[string]*bintree{}}, + "8_create_asset_stats_table.sql": &bintree{migrations8_create_asset_stats_tableSql, map[string]*bintree{}}, + "9_add_header_xdr.sql": &bintree{migrations9_add_header_xdrSql, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/services/horizon/internal/db2/schema/main.go b/services/horizon/internal/db2/schema/main.go new file mode 100644 index 0000000000..d8a50b37c7 --- /dev/null +++ b/services/horizon/internal/db2/schema/main.go @@ -0,0 +1,180 @@ +package schema + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + stdLog "log" + "text/tabwriter" + "time" + + migrate "github.com/rubenv/sql-migrate" +) + +//go:generate jet -dsn=postgres://postgres@localhost/horizon?sslmode=disable -path=./generated/db +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -pkg schema -o bindata.go migrations/ + +// MigrateDir represents a direction in which to perform schema migrations. +type MigrateDir string + +const ( + // MigrateUp causes migrations to be run in the "up" direction. + MigrateUp MigrateDir = "up" + // MigrateDown causes migrations to be run in the "down" direction. + MigrateDown MigrateDir = "down" + // MigrateRedo causes migrations to be run down, then up + MigrateRedo MigrateDir = "redo" +) + +// Migrations represents all of the schema migration for horizon +var Migrations migrate.MigrationSource = &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// Migrate performs schema migration. Migrations can occur in one of three +// ways: +// +// - up: migrations are performed from the currently installed version upwards. +// If count is 0, all unapplied migrations will be run. +// +// - down: migrations are performed from the current version downard. If count +// is 0, all applied migrations will be run in a downard direction. +// +// - redo: migrations are first ran downard `count` times, and then are rand +// upward back to the current version at the start of the process. If count is +// 0, a count of 1 will be assumed. +func Migrate(db *sql.DB, dir MigrateDir, count int) (int, error) { + switch dir { + case MigrateUp: + return migrate.ExecMax(db, "postgres", Migrations, migrate.Up, count) + case MigrateDown: + return migrate.ExecMax(db, "postgres", Migrations, migrate.Down, count) + case MigrateRedo: + + if count == 0 { + count = 1 + } + + down, err := migrate.ExecMax(db, "postgres", Migrations, migrate.Down, count) + if err != nil { + return down, err + } + + return migrate.ExecMax(db, "postgres", Migrations, migrate.Up, down) + default: + return 0, errors.New("Invalid migration direction") + } +} + +// GetMigrationsUp returns a list of names of any migrations needed in the +// "up" direction (more recent schema versions). +func GetMigrationsUp(dbUrl string) (migrationIds []string) { + // Get a DB handle + db, dbErr := sql.Open("postgres", dbUrl) + if dbErr != nil { + stdLog.Fatal(dbErr) + } + defer db.Close() + + // Get the possible migrations + possibleMigrations, _, migrateErr := migrate.PlanMigration(db, "postgres", Migrations, migrate.Up, 0) + if migrateErr != nil { + stdLog.Fatal(migrateErr) + } + + // Extract a list of the possible migration names + for _, m := range possibleMigrations { + migrationIds = append(migrationIds, m.Id) + } + + return migrationIds +} + +// GetNumMigrationsDown returns the number of migrations to apply in the +// "down" direction to return to the older schema version expected by this +// version of Horizon. To keep the code simple, it does not provide a list of +// migration names. +func GetNumMigrationsDown(dbUrl string) (nMigrations int) { + // Get a DB handle + db, dbErr := sql.Open("postgres", dbUrl) + if dbErr != nil { + stdLog.Fatal(dbErr) + } + defer db.Close() + + // Get the set of migrations recorded in the database + migrationRecords, recordErr := migrate.GetMigrationRecords(db, "postgres") + if recordErr != nil { + stdLog.Fatal(recordErr) + } + + // Get the list of migrations needed by this version of Horizon + allNeededMigrations, _, migrateErr := migrate.PlanMigration(db, "postgres", Migrations, migrate.Down, 0) + if migrateErr != nil { + stdLog.Fatal(migrateErr) + } + + // Return the size difference between the two sets of migrations + return len(migrationRecords) - len(allNeededMigrations) +} + +// Status returns information about the current status of db migrations. Which +// ones are pending, and when past ones were applied. +// +// From: https://github.com/rubenv/sql-migrate/blob/master/sql-migrate/command_status.go +func Status(db *sql.DB) (string, error) { + buffer := &bytes.Buffer{} + migrations, err := Migrations.FindMigrations() + if err != nil { + return "", err + } + + records, err := migrate.GetMigrationRecords(db, "postgres") + if err != nil { + return "", err + } + + table := tabwriter.NewWriter(buffer, 60, 8, 0, '\t', 0) + fmt.Fprintln(table, "Migration\tApplied") + + rows := make(map[string]*statusRow) + + for _, m := range migrations { + rows[m.Id] = &statusRow{ + Id: m.Id, + Migrated: false, + } + } + + for _, r := range records { + if rows[r.Id] == nil { + return "", fmt.Errorf("Could not find migration file: %v", r.Id) + } + + rows[r.Id].Migrated = true + rows[r.Id].AppliedAt = r.AppliedAt + } + + for _, m := range migrations { + if rows[m.Id] != nil && rows[m.Id].Migrated { + fmt.Fprintf(table, "%s\t%s\n", m.Id, rows[m.Id].AppliedAt.String()) + } else { + fmt.Fprintf(table, "%s\tno\n", m.Id) + } + } + + if err := table.Flush(); err != nil { + return "", err + } + + return buffer.String(), nil +} + +type statusRow struct { + Id string + Migrated bool + AppliedAt time.Time +} diff --git a/services/horizon/internal/db2/schema/main_test.go b/services/horizon/internal/db2/schema/main_test.go new file mode 100644 index 0000000000..41ee4928f8 --- /dev/null +++ b/services/horizon/internal/db2/schema/main_test.go @@ -0,0 +1,35 @@ +package schema + +import ( + "net/http" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/support/db/dbtest" + supportHttp "github.com/stellar/go/support/http" +) + +func TestInit(t *testing.T) { + tdb := dbtest.Postgres(t) + defer tdb.Close() + db := tdb.Open() + + defer db.Close() + + // make sure migrating in both directions works + + _, err := Migrate(db.DB, MigrateUp, 0) + assert.NoError(t, err) + + _, err = Migrate(db.DB, MigrateDown, 0) + assert.NoError(t, err) +} + +func TestGeneratedAssets(t *testing.T) { + generatedAssets := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo} + if !supportHttp.EqualFileSystems(http.Dir("."), generatedAssets, "migrations") { + t.Fatalf("generated migrations does not match local migrations") + } +} diff --git a/services/horizon/internal/db2/schema/migrations/10_add_trades_price.sql b/services/horizon/internal/db2/schema/migrations/10_add_trades_price.sql new file mode 100644 index 0000000000..60bafb9417 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/10_add_trades_price.sql @@ -0,0 +1,34 @@ +-- +migrate Up + +-- Add rational price to trades table +ALTER TABLE history_trades ADD price_n BIGINT; +ALTER TABLE history_trades ADD price_d BIGINT; + +-- aggregate function for finding minimal price when price is represented as an array of two elements [n,d] +CREATE OR REPLACE FUNCTION public.min_price_agg ( NUMERIC[], NUMERIC[]) + RETURNS NUMERIC[] LANGUAGE SQL IMMUTABLE STRICT AS $$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $$; + +CREATE AGGREGATE public.MIN_PRICE ( +SFUNC = PUBLIC.min_price_agg, +BASETYPE = NUMERIC[], +STYPE = NUMERIC[] +); + +-- aggregate function for finding maximal price when price is represented as an array of two elements [n,d] +CREATE OR REPLACE FUNCTION public.max_price_agg ( NUMERIC[], NUMERIC[]) + RETURNS NUMERIC[] LANGUAGE SQL IMMUTABLE STRICT AS $$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $$; + +CREATE AGGREGATE public.MAX_PRICE ( +SFUNC = PUBLIC.max_price_agg, +BASETYPE = NUMERIC[], +STYPE = NUMERIC[] +); + +-- +migrate Down +ALTER TABLE history_trades DROP price_n; +ALTER TABLE history_trades DROP price_d; + +DROP FUNCTION public.min_price_agg ( NUMERIC[], NUMERIC[] ) CASCADE; +DROP FUNCTION public.max_price_agg ( NUMERIC[], NUMERIC[] ) CASCADE; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/11_add_trades_account_index.sql b/services/horizon/internal/db2/schema/migrations/11_add_trades_account_index.sql new file mode 100644 index 0000000000..acd9b5c707 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/11_add_trades_account_index.sql @@ -0,0 +1,9 @@ +-- +migrate Up + +CREATE INDEX htrd_by_base_account ON history_trades USING BTREE(base_account_id); +CREATE INDEX htrd_by_counter_account ON history_trades USING BTREE(counter_account_id); + +-- +migrate Down + +DROP INDEX htrd_by_base_account; +DROP INDEX htrd_by_counter_account; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/12_asset_stats_amount_string.sql b/services/horizon/internal/db2/schema/migrations/12_asset_stats_amount_string.sql new file mode 100644 index 0000000000..d990e0c5f7 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/12_asset_stats_amount_string.sql @@ -0,0 +1,7 @@ +-- +migrate Up +ALTER TABLE asset_stats +ALTER COLUMN amount SET DATA TYPE character varying; + +-- +migrate Down +ALTER TABLE asset_stats +ALTER COLUMN amount SET DATA TYPE bigint USING amount::bigint; diff --git a/services/horizon/internal/db2/schema/migrations/13_trade_offer_ids.sql b/services/horizon/internal/db2/schema/migrations/13_trade_offer_ids.sql new file mode 100644 index 0000000000..47d88232f0 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/13_trade_offer_ids.sql @@ -0,0 +1,15 @@ +-- +migrate Up + +ALTER TABLE history_trades ADD base_offer_id BIGINT; +ALTER TABLE history_trades ADD counter_offer_id BIGINT; + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree(base_offer_id); +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree(counter_offer_id); + +-- +migrate Down + +DROP INDEX htrd_by_base_offer; +DROP INDEX htrd_by_counter_offer; + +ALTER TABLE history_trades DROP COLUMN base_offer_id; +ALTER TABLE history_trades DROP COLUMN counter_offer_id; diff --git a/services/horizon/internal/db2/schema/migrations/14_fix_asset_toml_field.sql b/services/horizon/internal/db2/schema/migrations/14_fix_asset_toml_field.sql new file mode 100644 index 0000000000..4c6272eb88 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/14_fix_asset_toml_field.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE asset_stats ALTER COLUMN toml TYPE varchar(255); + +-- +migrate Down + +ALTER TABLE asset_stats ALTER COLUMN toml TYPE varchar(64); diff --git a/services/horizon/internal/db2/schema/migrations/15_ledger_failed_txs.sql b/services/horizon/internal/db2/schema/migrations/15_ledger_failed_txs.sql new file mode 100644 index 0000000000..0abb617c3d --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/15_ledger_failed_txs.sql @@ -0,0 +1,9 @@ +-- +migrate Up + +ALTER TABLE history_ledgers ADD successful_transaction_count integer DEFAULT NULL; +ALTER TABLE history_ledgers ADD failed_transaction_count integer DEFAULT NULL; + +-- +migrate Down + +ALTER TABLE history_ledgers DROP COLUMN successful_transaction_count; +ALTER TABLE history_ledgers DROP COLUMN failed_transaction_count; diff --git a/services/horizon/internal/db2/schema/migrations/16_ingest_failed_transactions.sql b/services/horizon/internal/db2/schema/migrations/16_ingest_failed_transactions.sql new file mode 100644 index 0000000000..25d82c5e42 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/16_ingest_failed_transactions.sql @@ -0,0 +1,12 @@ +-- +migrate Up + +-- Check db2/history.Transaction.Successful field comment for more information. +ALTER TABLE history_transactions ADD successful boolean; + +-- +migrate Down + +-- Remove failed transactions and operations from failed transactions! +DELETE FROM history_operations USING history_transactions + WHERE history_transactions.id = history_operations.transaction_id AND successful = false; +DELETE FROM history_transactions WHERE successful = false; +ALTER TABLE history_transactions DROP COLUMN successful; diff --git a/services/horizon/internal/db2/schema/migrations/17_transaction_fee_paid.sql b/services/horizon/internal/db2/schema/migrations/17_transaction_fee_paid.sql new file mode 100644 index 0000000000..d258b487bb --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/17_transaction_fee_paid.sql @@ -0,0 +1,9 @@ +-- +migrate Up + +ALTER TABLE history_transactions ADD fee_charged integer; +ALTER TABLE history_transactions RENAME COLUMN fee_paid TO max_fee; + +-- +migrate Down + +ALTER TABLE history_transactions DROP COLUMN fee_charged; +ALTER TABLE history_transactions RENAME COLUMN max_fee TO fee_paid; diff --git a/services/horizon/internal/db2/schema/migrations/18_account_for_signers.sql b/services/horizon/internal/db2/schema/migrations/18_account_for_signers.sql new file mode 100644 index 0000000000..53ec719864 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/18_account_for_signers.sql @@ -0,0 +1,19 @@ +-- +migrate Up + +CREATE TABLE accounts_signers ( + account character varying(64), + signer character varying(64), + weight integer NOT NULL, + -- we will query by signer so that is why signer is the first item in the composite key + PRIMARY KEY (signer, account) +); + +CREATE TABLE key_value_store ( + key varchar(255) NOT NULL, + value varchar(255) NOT NULL, + PRIMARY KEY (key) +); + +-- +migrate Down +DROP TABLE accounts_signers cascade; +DROP TABLE key_value_store cascade; diff --git a/services/horizon/internal/db2/schema/migrations/19_offers.sql b/services/horizon/internal/db2/schema/migrations/19_offers.sql new file mode 100644 index 0000000000..dbf07d637a --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/19_offers.sql @@ -0,0 +1,31 @@ +-- +migrate Up + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint PRIMARY KEY, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + last_modified_ledger INT NOT NULL +); + +CREATE INDEX offers_by_seller ON offers USING BTREE(sellerid); +CREATE INDEX offers_by_selling_asset ON offers USING BTREE(sellingasset); +CREATE INDEX offers_by_buying_asset ON offers USING BTREE(buyingasset); +CREATE INDEX offers_by_last_modified_ledger ON offers USING BTREE(last_modified_ledger); + +-- Distributed ingestion relies on a single value locked for updating +-- in a DB. When Horizon starts clear there is no value so we create it +-- here. If there's a conflict it means the value is already there so +-- we do nothing. +INSERT INTO key_value_store (key, value) + VALUES ('exp_ingest_last_ledger', '0') + ON CONFLICT (key) DO NOTHING; + +-- +migrate Down + +DROP TABLE offers cascade; diff --git a/services/horizon/internal/db2/schema/migrations/1_initial_schema.sql b/services/horizon/internal/db2/schema/migrations/1_initial_schema.sql new file mode 100644 index 0000000000..9198c16103 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/1_initial_schema.sql @@ -0,0 +1,362 @@ +-- +migrate Up +-- +-- PostgreSQL database dump +-- + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_accounts ( + id bigint NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: -; Tablespace: +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + fee_paid integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range +); + +-- +-- Name: id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Name: history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: -; Tablespace: +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: -; Tablespace: +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + + +-- +migrate Down +drop table history_transactions cascade; +drop table history_transaction_participants cascade; +drop table history_operations cascade; +drop table history_operation_participants cascade; +drop table history_ledgers cascade; +drop table history_effects cascade; +drop table history_accounts cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/20_account_for_signer_index.sql b/services/horizon/internal/db2/schema/migrations/20_account_for_signer_index.sql new file mode 100644 index 0000000000..f6637a00ab --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/20_account_for_signer_index.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +CREATE INDEX signers_by_account ON accounts_signers USING BTREE(account); + +-- +migrate Down + +DROP INDEX signers_by_account; diff --git a/services/horizon/internal/db2/schema/migrations/21_trades_remove_zero_amount_constraints.sql b/services/horizon/internal/db2/schema/migrations/21_trades_remove_zero_amount_constraints.sql new file mode 100644 index 0000000000..cbe97eaf35 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/21_trades_remove_zero_amount_constraints.sql @@ -0,0 +1,15 @@ +-- +migrate Up + +ALTER TABLE history_trades DROP CONSTRAINT history_trades_base_amount_check; +ALTER TABLE history_trades DROP CONSTRAINT history_trades_counter_amount_check; + +ALTER TABLE history_trades ADD CONSTRAINT history_trades_base_amount_check CHECK (base_amount >= 0); +ALTER TABLE history_trades ADD CONSTRAINT history_trades_counter_amount_check CHECK (counter_amount >= 0); + +-- +migrate Down + +ALTER TABLE history_trades DROP CONSTRAINT history_trades_base_amount_check; +ALTER TABLE history_trades DROP CONSTRAINT history_trades_counter_amount_check; + +ALTER TABLE history_trades ADD CONSTRAINT history_trades_base_amount_check CHECK (base_amount > 0); +ALTER TABLE history_trades ADD CONSTRAINT history_trades_counter_amount_check CHECK (counter_amount > 0); diff --git a/services/horizon/internal/db2/schema/migrations/22_trust_lines.sql b/services/horizon/internal/db2/schema/migrations/22_trust_lines.sql new file mode 100644 index 0000000000..df7701665c --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/22_trust_lines.sql @@ -0,0 +1,26 @@ +-- +migrate Up + +CREATE TABLE trust_lines ( + -- ledger_key is a LedgerKey marshaled using MarshalBinary + -- and base64-encoded used to boost perfomance of some queries. + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + asset_type int NOT NULL, + asset_issuer character varying(56) NOT NULL, + asset_code character varying(12) NOT NULL, + balance bigint NOT NULL, + trust_line_limit bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + flags int NOT NULL, + last_modified_ledger INT NOT NULL, + PRIMARY KEY (ledger_key) +); + +CREATE INDEX trust_lines_by_account_id ON trust_lines USING BTREE(account_id); +CREATE INDEX trust_lines_by_type_code_issuer ON trust_lines USING BTREE(asset_type, asset_code, asset_issuer); +CREATE INDEX trust_lines_by_issuer ON trust_lines USING BTREE(asset_issuer); + +-- +migrate Down + +DROP TABLE trust_lines cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/23_exp_asset_stats.sql b/services/horizon/internal/db2/schema/migrations/23_exp_asset_stats.sql new file mode 100644 index 0000000000..01a41fbd94 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/23_exp_asset_stats.sql @@ -0,0 +1,20 @@ +-- +migrate Up +-- exp_asset_stats is like the existing asset_stats table first defined in 8_create_asset_stats_table.sql +-- except that exp_asset_stats is populated by the experimental ingestion system while asset_stats is populated +-- by the legacy ingestion system. Once the experimental ingestion system replaces the legacy system then +-- we can remove asset_stats + +CREATE TABLE exp_asset_stats ( + asset_type INT NOT NULL, + asset_code VARCHAR(12) NOT NULL, + asset_issuer VARCHAR(56) NOT NULL, + amount TEXT NOT NULL, + num_accounts INTEGER NOT NULL, + PRIMARY KEY(asset_code, asset_issuer, asset_type) +); + +CREATE INDEX exp_asset_stats_by_issuer ON exp_asset_stats USING btree (asset_issuer); +CREATE INDEX exp_asset_stats_by_code ON exp_asset_stats USING btree (asset_code); + +-- +migrate Down +DROP TABLE exp_asset_stats cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/24_accounts.sql b/services/horizon/internal/db2/schema/migrations/24_accounts.sql new file mode 100644 index 0000000000..aa678a0b96 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/24_accounts.sql @@ -0,0 +1,40 @@ +-- +migrate Up + +CREATE TABLE accounts ( + account_id character varying(56) NOT NULL, + balance bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + sequence_number bigint NOT NULL, + num_subentries int NOT NULL, + inflation_destination character varying(56) NOT NULL, + flags int NOT NULL, + home_domain character varying(32) NOT NULL, + master_weight smallint NOT NULL, + threshold_low smallint NOT NULL, + threshold_medium smallint NOT NULL, + threshold_high smallint NOT NULL, + last_modified_ledger INT NOT NULL, + PRIMARY KEY (account_id) +); + +CREATE INDEX accounts_inflation_destination ON accounts USING BTREE(inflation_destination); +CREATE INDEX accounts_home_domain ON accounts USING BTREE(home_domain); + +CREATE TABLE accounts_data ( + -- ledger_key is a LedgerKey marshaled using MarshalBinary + -- and base64-encoded used to boost perfomance of some queries. + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + name character varying(64) NOT NULL, + value character varying(90) NOT NULL, -- base64-encoded 64 bytes + last_modified_ledger INT NOT NULL, + PRIMARY KEY (ledger_key) +); + +CREATE UNIQUE INDEX accounts_data_account_id_name ON accounts_data USING BTREE(account_id, name); + +-- +migrate Down + +DROP TABLE accounts cascade; +DROP TABLE accounts_data cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/25_expingest_rename_columns.sql b/services/horizon/internal/db2/schema/migrations/25_expingest_rename_columns.sql new file mode 100644 index 0000000000..a0026d2c6b --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/25_expingest_rename_columns.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +ALTER TABLE accounts_signers RENAME COLUMN account TO account_id; + +ALTER TABLE offers RENAME COLUMN sellerid TO seller_id; +ALTER TABLE offers RENAME COLUMN offerid TO offer_id; +ALTER TABLE offers RENAME COLUMN sellingasset TO selling_asset; +ALTER TABLE offers RENAME COLUMN buyingasset TO buying_asset; + +-- +migrate Down + +ALTER TABLE accounts_signers RENAME COLUMN account_id TO account; + +ALTER TABLE offers RENAME COLUMN seller_id TO sellerid; +ALTER TABLE offers RENAME COLUMN offer_id TO offerid; +ALTER TABLE offers RENAME COLUMN selling_asset TO sellingasset; +ALTER TABLE offers RENAME COLUMN buying_asset TO buyingasset; diff --git a/services/horizon/internal/db2/schema/migrations/26_exp_history_ledgers.sql b/services/horizon/internal/db2/schema/migrations/26_exp_history_ledgers.sql new file mode 100644 index 0000000000..fe33a75239 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/26_exp_history_ledgers.sql @@ -0,0 +1,12 @@ +-- +migrate Up + +CREATE TABLE exp_history_ledgers ( + LIKE history_ledgers + including defaults + including constraints + including indexes +); + +-- +migrate Down + +DROP TABLE exp_history_ledgers cascade; diff --git a/services/horizon/internal/db2/schema/migrations/27_exp_history_transactions.sql b/services/horizon/internal/db2/schema/migrations/27_exp_history_transactions.sql new file mode 100644 index 0000000000..436de7b54a --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/27_exp_history_transactions.sql @@ -0,0 +1,31 @@ +-- +migrate Up + +CREATE TABLE exp_history_transactions ( + LIKE history_transactions + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_accounts ( + LIKE history_accounts + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_transaction_participants ( + LIKE history_transaction_participants + including defaults + including constraints + including indexes +); + + +-- +migrate Down + +DROP TABLE exp_history_transactions cascade; + +DROP TABLE exp_history_accounts cascade; + +DROP TABLE exp_history_transaction_participants cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/28_exp_history_operations.sql b/services/horizon/internal/db2/schema/migrations/28_exp_history_operations.sql new file mode 100644 index 0000000000..92eba3fea8 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/28_exp_history_operations.sql @@ -0,0 +1,21 @@ +-- +migrate Up + +CREATE TABLE exp_history_operations ( + LIKE history_operations + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_operation_participants ( + LIKE history_operation_participants + including defaults + including constraints + including indexes +); + +-- +migrate Down + +DROP TABLE exp_history_operations cascade; + +DROP TABLE exp_history_operation_participants cascade; diff --git a/services/horizon/internal/db2/schema/migrations/29_exp_history_assets.sql b/services/horizon/internal/db2/schema/migrations/29_exp_history_assets.sql new file mode 100644 index 0000000000..a775890967 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/29_exp_history_assets.sql @@ -0,0 +1,12 @@ +-- +migrate Up + +CREATE TABLE exp_history_assets ( + LIKE history_assets + including defaults + including constraints + including indexes +); + +-- +migrate Down + +DROP TABLE exp_history_assets cascade; diff --git a/services/horizon/internal/db2/schema/migrations/2_index_participants_by_toid.sql b/services/horizon/internal/db2/schema/migrations/2_index_participants_by_toid.sql new file mode 100644 index 0000000000..7b7067b50a --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/2_index_participants_by_toid.sql @@ -0,0 +1,9 @@ +-- +migrate Up + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + +-- +migrate Down + +DROP INDEX hop_by_hoid; +DROP INDEX htp_by_htid; diff --git a/services/horizon/internal/db2/schema/migrations/30_exp_history_trades.sql b/services/horizon/internal/db2/schema/migrations/30_exp_history_trades.sql new file mode 100644 index 0000000000..1b743fc680 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/30_exp_history_trades.sql @@ -0,0 +1,58 @@ +-- +migrate Up + +-- we cannot create exp_history_trades as: + +-- CREATE TABLE exp_history_trades ( +-- LIKE history_trades +-- including defaults +-- including constraints +-- including indexes +-- ); + +-- because the history_trades table has reference constraints to history_accounts and history_assets +-- and we do not want to copy those constraints. instead, we want to reference the +-- exp_history_accounts and exp_history_assets tables + +CREATE TABLE exp_history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL REFERENCES exp_history_accounts(id), + base_asset_id bigint NOT NULL REFERENCES exp_history_assets(id), + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL REFERENCES exp_history_accounts(id), + counter_asset_id bigint NOT NULL REFERENCES exp_history_assets(id), + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT exp_history_trades_base_amount_check CHECK ((base_amount >= 0)), + CONSTRAINT exp_history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT exp_history_trades_counter_amount_check CHECK ((counter_amount >= 0)) +); + + +CREATE INDEX exp_htrd_by_base_account ON exp_history_trades USING btree (base_account_id); + +CREATE INDEX exp_htrd_by_base_offer ON exp_history_trades USING btree (base_offer_id); + +CREATE INDEX exp_htrd_by_counter_account ON exp_history_trades USING btree (counter_account_id); + +CREATE INDEX exp_htrd_by_counter_offer ON exp_history_trades USING btree (counter_offer_id); + +CREATE INDEX exp_htrd_by_offer ON exp_history_trades USING btree (offer_id); + +CREATE INDEX exp_htrd_counter_lookup ON exp_history_trades USING btree (counter_asset_id); + +CREATE INDEX exp_htrd_pair_time_lookup ON exp_history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + +CREATE UNIQUE INDEX exp_htrd_pid ON exp_history_trades USING btree (history_operation_id, "order"); + +CREATE INDEX exp_htrd_time_lookup ON exp_history_trades USING btree (ledger_closed_at); + +-- +migrate Down + +DROP TABLE exp_history_trades cascade; diff --git a/services/horizon/internal/db2/schema/migrations/31_exp_history_effects.sql b/services/horizon/internal/db2/schema/migrations/31_exp_history_effects.sql new file mode 100644 index 0000000000..80a08d10ea --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/31_exp_history_effects.sql @@ -0,0 +1,12 @@ +-- +migrate Up + +CREATE TABLE exp_history_effects ( + LIKE history_effects + including defaults + including constraints + including indexes +); + +-- +migrate Down + +DROP TABLE exp_history_effects cascade; diff --git a/services/horizon/internal/db2/schema/migrations/32_drop_exp_history_tables.sql b/services/horizon/internal/db2/schema/migrations/32_drop_exp_history_tables.sql new file mode 100644 index 0000000000..25394fbdfb --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/32_drop_exp_history_tables.sql @@ -0,0 +1,131 @@ +-- +migrate Up + +DROP TABLE exp_history_effects cascade; + +DROP TABLE exp_history_trades cascade; + +DROP TABLE exp_history_assets cascade; + +DROP TABLE exp_history_operation_participants cascade; + +DROP TABLE exp_history_operations cascade; + +DROP TABLE exp_history_transaction_participants cascade; + +DROP TABLE exp_history_accounts cascade; + +DROP TABLE exp_history_transactions cascade; + +DROP TABLE exp_history_ledgers cascade; + +-- +migrate Down + +CREATE TABLE exp_history_ledgers ( + LIKE history_ledgers + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_transactions ( + LIKE history_transactions + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_accounts ( + LIKE history_accounts + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_transaction_participants ( + LIKE history_transaction_participants + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_operations ( + LIKE history_operations + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_operation_participants ( + LIKE history_operation_participants + including defaults + including constraints + including indexes +); + +CREATE TABLE exp_history_assets ( + LIKE history_assets + including defaults + including constraints + including indexes +); + + +-- we cannot create exp_history_trades as: + +-- CREATE TABLE exp_history_trades ( +-- LIKE history_trades +-- including defaults +-- including constraints +-- including indexes +-- ); + +-- because the history_trades table has reference constraints to history_accounts and history_assets +-- and we do not want to copy those constraints. instead, we want to reference the +-- exp_history_accounts and exp_history_assets tables + +CREATE TABLE exp_history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL REFERENCES exp_history_accounts(id), + base_asset_id bigint NOT NULL REFERENCES exp_history_assets(id), + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL REFERENCES exp_history_accounts(id), + counter_asset_id bigint NOT NULL REFERENCES exp_history_assets(id), + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT exp_history_trades_base_amount_check CHECK ((base_amount >= 0)), + CONSTRAINT exp_history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT exp_history_trades_counter_amount_check CHECK ((counter_amount >= 0)) +); + + +CREATE INDEX exp_htrd_by_base_account ON exp_history_trades USING btree (base_account_id); + +CREATE INDEX exp_htrd_by_base_offer ON exp_history_trades USING btree (base_offer_id); + +CREATE INDEX exp_htrd_by_counter_account ON exp_history_trades USING btree (counter_account_id); + +CREATE INDEX exp_htrd_by_counter_offer ON exp_history_trades USING btree (counter_offer_id); + +CREATE INDEX exp_htrd_by_offer ON exp_history_trades USING btree (offer_id); + +CREATE INDEX exp_htrd_counter_lookup ON exp_history_trades USING btree (counter_asset_id); + +CREATE INDEX exp_htrd_pair_time_lookup ON exp_history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + +CREATE UNIQUE INDEX exp_htrd_pid ON exp_history_trades USING btree (history_operation_id, "order"); + +CREATE INDEX exp_htrd_time_lookup ON exp_history_trades USING btree (ledger_closed_at); + +CREATE TABLE exp_history_effects ( + LIKE history_effects + including defaults + including constraints + including indexes +); diff --git a/services/horizon/internal/db2/schema/migrations/33_remove_unused.sql b/services/horizon/internal/db2/schema/migrations/33_remove_unused.sql new file mode 100644 index 0000000000..fa353359e5 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/33_remove_unused.sql @@ -0,0 +1,29 @@ +-- +migrate Up + +DROP TABLE asset_stats cascade; + +DROP INDEX exp_asset_stats_by_code; + +DROP INDEX index_history_transactions_on_id; + +DROP INDEX index_history_ledgers_on_id; + +DROP INDEX asset_by_code; + +-- +migrate Down + +CREATE TABLE asset_stats ( + id BIGINT PRIMARY KEY REFERENCES history_assets ON DELETE CASCADE ON UPDATE RESTRICT, + amount BIGINT NOT NULL, + num_accounts INTEGER NOT NULL, + flags SMALLINT NOT NULL, + toml VARCHAR(64) NOT NULL +); + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + +CREATE INDEX exp_asset_stats_by_code ON exp_asset_stats USING btree (asset_code); + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); diff --git a/services/horizon/internal/db2/schema/migrations/34_fee_bump_transactions.sql b/services/horizon/internal/db2/schema/migrations/34_fee_bump_transactions.sql new file mode 100644 index 0000000000..01380abea5 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/34_fee_bump_transactions.sql @@ -0,0 +1,25 @@ +-- +migrate Up + +ALTER TABLE history_transactions + ALTER COLUMN fee_charged TYPE bigint, + ALTER COLUMN max_fee TYPE bigint, + ADD inner_transaction_hash character varying(64), + ADD fee_account character varying(64), + ADD inner_signatures character varying(96)[], + ADD new_max_fee bigint; + +CREATE INDEX by_inner_hash ON history_transactions USING btree (inner_transaction_hash) WHERE inner_transaction_hash IS NOT NULL; +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; + +-- +migrate Down + +DROP INDEX by_inner_hash; +DROP INDEX by_fee_account; + +ALTER TABLE history_transactions + ALTER COLUMN fee_charged TYPE integer, + ALTER COLUMN max_fee TYPE integer, + DROP COLUMN inner_transaction_hash, + DROP COLUMN fee_account, + DROP COLUMN inner_signatures, + DROP COLUMN new_max_fee; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/35_drop_participant_id.sql b/services/horizon/internal/db2/schema/migrations/35_drop_participant_id.sql new file mode 100644 index 0000000000..e6f440b54b --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/35_drop_participant_id.sql @@ -0,0 +1,15 @@ +-- +migrate Up + +ALTER TABLE history_operation_participants + DROP COLUMN id; + +ALTER TABLE history_transaction_participants + DROP COLUMN id; + +-- +migrate Down + +ALTER TABLE history_operation_participants + ADD COLUMN id integer; + +ALTER TABLE history_transaction_participants + ADD COLUMN id integer; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/36_deleted_offers.sql b/services/horizon/internal/db2/schema/migrations/36_deleted_offers.sql new file mode 100644 index 0000000000..86a09b73b1 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/36_deleted_offers.sql @@ -0,0 +1,24 @@ +-- +migrate Up + +ALTER TABLE offers ADD deleted boolean DEFAULT false; + +CREATE INDEX best_offer ON offers USING BTREE (selling_asset, buying_asset, deleted, price); +CREATE INDEX live_offers ON offers USING BTREE (deleted, last_modified_ledger); + +DROP INDEX offers_by_seller, offers_by_selling_asset, offers_by_buying_asset; + +CREATE INDEX offers_by_seller ON offers USING BTREE(seller_id, deleted); +CREATE INDEX offers_by_selling_asset ON offers USING BTREE(selling_asset, deleted); +CREATE INDEX offers_by_buying_asset ON offers USING BTREE(buying_asset, deleted); + +-- +migrate Down + +DELETE FROM offers where deleted = true; + +DROP INDEX offers_by_seller, offers_by_selling_asset, offers_by_buying_asset; + +ALTER TABLE offers DROP COLUMN deleted; + +CREATE INDEX offers_by_seller ON offers USING BTREE(seller_id); +CREATE INDEX offers_by_selling_asset ON offers USING BTREE(selling_asset); +CREATE INDEX offers_by_buying_asset ON offers USING BTREE(buying_asset); diff --git a/services/horizon/internal/db2/schema/migrations/37_add_tx_set_operation_count_to_ledgers.sql b/services/horizon/internal/db2/schema/migrations/37_add_tx_set_operation_count_to_ledgers.sql new file mode 100644 index 0000000000..84353e7ef1 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/37_add_tx_set_operation_count_to_ledgers.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +ALTER TABLE history_ledgers ADD tx_set_operation_count integer DEFAULT NULL; + +-- +migrate Down + +ALTER TABLE history_ledgers DROP COLUMN tx_set_operation_count; diff --git a/services/horizon/internal/db2/schema/migrations/38_add_constraints.sql b/services/horizon/internal/db2/schema/migrations/38_add_constraints.sql new file mode 100644 index 0000000000..a768263961 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/38_add_constraints.sql @@ -0,0 +1,115 @@ +-- +migrate Up + +ALTER TABLE history_transactions ADD CONSTRAINT valid_ledger_sequence CHECK (ledger_sequence > 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_application_order CHECK (application_order >= 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_account_sequence CHECK (account_sequence >= 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_max_fee CHECK (max_fee > 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_operation_count CHECK (operation_count >= 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_fee_charged CHECK (fee_charged > 0) NOT VALID; +ALTER TABLE history_transactions ADD CONSTRAINT valid_new_max_fee CHECK (new_max_fee > 0) NOT VALID; + +ALTER TABLE offers ADD CONSTRAINT valid_amount CHECK (amount >= 0) NOT VALID; +ALTER TABLE offers ADD CONSTRAINT valid_pricen CHECK (pricen >= 0) NOT VALID; +ALTER TABLE offers ADD CONSTRAINT valid_priced CHECK (priced >= 0) NOT VALID; +ALTER TABLE offers ADD CONSTRAINT valid_price CHECK (price >= 0) NOT VALID; +ALTER TABLE offers ADD CONSTRAINT valid_last_modified_ledger CHECK (last_modified_ledger > 0) NOT VALID; + +ALTER TABLE trust_lines ADD CONSTRAINT valid_balance CHECK (balance >= 0) NOT VALID; +ALTER TABLE trust_lines ADD CONSTRAINT valid_trust_line_limit CHECK (trust_line_limit >= 0) NOT VALID; +ALTER TABLE trust_lines ADD CONSTRAINT valid_buying_liabilities CHECK (buying_liabilities >= 0) NOT VALID; +ALTER TABLE trust_lines ADD CONSTRAINT valid_selling_liabilities CHECK (selling_liabilities >= 0) NOT VALID; +ALTER TABLE trust_lines ADD CONSTRAINT valid_last_modified_ledger CHECK (last_modified_ledger > 0) NOT VALID; + +ALTER TABLE history_trades ADD CONSTRAINT valid_price_n CHECK (price_n >= 0) NOT VALID; +ALTER TABLE history_trades ADD CONSTRAINT valid_price_d CHECK (price_d >= 0) NOT VALID; +ALTER TABLE history_trades ADD CONSTRAINT valid_order CHECK ("order" >= 0) NOT VALID; + +ALTER TABLE history_operations ADD CONSTRAINT valid_application_order CHECK (application_order >= 0) NOT VALID; + +ALTER TABLE history_ledgers ADD CONSTRAINT valid_sequence CHECK (sequence > 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_transaction_count CHECK (transaction_count >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_operation_count CHECK (operation_count >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_total_coins CHECK (total_coins >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_fee_pool CHECK (fee_pool >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_base_fee CHECK (base_fee >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_base_reserve CHECK (base_reserve >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_max_tx_set_size CHECK (max_tx_set_size >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_successful_transaction_count CHECK (successful_transaction_count >= 0) NOT VALID; +ALTER TABLE history_ledgers ADD CONSTRAINT valid_failed_transaction_count CHECK (failed_transaction_count >= 0) NOT VALID; + +ALTER TABLE history_effects ADD CONSTRAINT valid_order CHECK ("order" >= 0) NOT VALID; + +ALTER TABLE exp_asset_stats ADD CONSTRAINT valid_num_accounts CHECK (num_accounts >= 0) NOT VALID; + +ALTER TABLE accounts_signers ADD CONSTRAINT valid_weight CHECK (weight >= 0) NOT VALID; + +ALTER TABLE accounts_data ADD CONSTRAINT valid_last_modified_ledger CHECK (last_modified_ledger > 0) NOT VALID; + +ALTER TABLE accounts ADD CONSTRAINT valid_balance CHECK (balance >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_buying_liabilities CHECK (buying_liabilities >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_selling_liabilities CHECK (selling_liabilities >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_last_modified_ledger CHECK (last_modified_ledger > 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_sequence_number CHECK (sequence_number >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_num_subentries CHECK (num_subentries >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_master_weight CHECK (master_weight >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_threshold_low CHECK (threshold_low >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_threshold_medium CHECK (threshold_medium >= 0) NOT VALID; +ALTER TABLE accounts ADD CONSTRAINT valid_threshold_high CHECK (threshold_high >= 0) NOT VALID; + +-- +migrate Down + +ALTER TABLE history_transactions DROP CONSTRAINT valid_ledger_sequence; +ALTER TABLE history_transactions DROP CONSTRAINT valid_application_order; +ALTER TABLE history_transactions DROP CONSTRAINT valid_account_sequence; +ALTER TABLE history_transactions DROP CONSTRAINT valid_max_fee; +ALTER TABLE history_transactions DROP CONSTRAINT valid_operation_count; +ALTER TABLE history_transactions DROP CONSTRAINT valid_fee_charged; +ALTER TABLE history_transactions DROP CONSTRAINT valid_new_max_fee; + +ALTER TABLE offers DROP CONSTRAINT valid_amount; +ALTER TABLE offers DROP CONSTRAINT valid_pricen; +ALTER TABLE offers DROP CONSTRAINT valid_priced; +ALTER TABLE offers DROP CONSTRAINT valid_price; +ALTER TABLE offers DROP CONSTRAINT valid_last_modified_ledger; + +ALTER TABLE trust_lines DROP CONSTRAINT valid_balance; +ALTER TABLE trust_lines DROP CONSTRAINT valid_trust_line_limit; +ALTER TABLE trust_lines DROP CONSTRAINT valid_buying_liabilities; +ALTER TABLE trust_lines DROP CONSTRAINT valid_selling_liabilities; +ALTER TABLE trust_lines DROP CONSTRAINT valid_last_modified_ledger; + +ALTER TABLE history_trades DROP CONSTRAINT valid_price_n; +ALTER TABLE history_trades DROP CONSTRAINT valid_price_d; +ALTER TABLE history_trades DROP CONSTRAINT valid_order; + +ALTER TABLE history_operations DROP CONSTRAINT valid_application_order; + +ALTER TABLE history_ledgers DROP CONSTRAINT valid_sequence; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_transaction_count; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_operation_count; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_total_coins; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_fee_pool; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_base_fee; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_base_reserve; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_max_tx_set_size; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_successful_transaction_count; +ALTER TABLE history_ledgers DROP CONSTRAINT valid_failed_transaction_count; + +ALTER TABLE history_effects DROP CONSTRAINT valid_order; + +ALTER TABLE exp_asset_stats DROP CONSTRAINT valid_num_accounts; + +ALTER TABLE accounts_signers DROP CONSTRAINT valid_weight; + +ALTER TABLE accounts_data DROP CONSTRAINT valid_last_modified_ledger; + +ALTER TABLE accounts DROP CONSTRAINT valid_balance; +ALTER TABLE accounts DROP CONSTRAINT valid_buying_liabilities; +ALTER TABLE accounts DROP CONSTRAINT valid_selling_liabilities; +ALTER TABLE accounts DROP CONSTRAINT valid_last_modified_ledger; +ALTER TABLE accounts DROP CONSTRAINT valid_sequence_number; +ALTER TABLE accounts DROP CONSTRAINT valid_num_subentries; +ALTER TABLE accounts DROP CONSTRAINT valid_master_weight; +ALTER TABLE accounts DROP CONSTRAINT valid_threshold_low; +ALTER TABLE accounts DROP CONSTRAINT valid_threshold_medium; +ALTER TABLE accounts DROP CONSTRAINT valid_threshold_high; diff --git a/services/horizon/internal/db2/schema/migrations/39_claimable_balances.sql b/services/horizon/internal/db2/schema/migrations/39_claimable_balances.sql new file mode 100644 index 0000000000..ba9b90766d --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/39_claimable_balances.sql @@ -0,0 +1,20 @@ +-- +migrate Up + +CREATE TABLE claimable_balances ( + id TEXT NOT NULL, -- ClaimableBalanceID in base64 + claimants jsonb NOT NULL, + asset text NOT NULL, + amount bigint NOT NULL CHECK (amount > 0), + sponsor TEXT, + last_modified_ledger integer NOT NULL, + PRIMARY KEY (id) +); + +CREATE INDEX claimable_balances_by_asset ON claimable_balances USING BTREE(asset); +CREATE INDEX claimable_balances_by_sponsor ON claimable_balances USING BTREE(sponsor); +CREATE INDEX claimabable_balances_by_claimants ON claimable_balances USING gin(claimants jsonb_path_ops); +CREATE INDEX claimable_balances_by_last_modified_ledger_and_id ON claimable_balances USING BTREE(last_modified_ledger, id); + +-- +migrate Down + +DROP TABLE claimable_balances cascade; diff --git a/services/horizon/internal/db2/schema/migrations/39_history_trades_indices.sql b/services/horizon/internal/db2/schema/migrations/39_history_trades_indices.sql new file mode 100644 index 0000000000..93ed26f601 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/39_history_trades_indices.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +CREATE INDEX htrd_pair_pid ON history_trades USING BTREE(base_asset_id, counter_asset_id, history_operation_id, "order"); + +-- +migrate Down + +DROP INDEX htrd_pair_pid; diff --git a/services/horizon/internal/db2/schema/migrations/3_use_sequence_in_history_accounts.sql b/services/horizon/internal/db2/schema/migrations/3_use_sequence_in_history_accounts.sql new file mode 100644 index 0000000000..331bd971da --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/3_use_sequence_in_history_accounts.sql @@ -0,0 +1,13 @@ +-- +migrate Up +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; +SELECT setval('history_accounts_id_seq', (SELECT MAX(id) FROM history_accounts)); +ALTER TABLE ONLY history_accounts ALTER COLUMN id SET DEFAULT nextval('history_accounts_id_seq'::regclass); + +-- +migrate Down +ALTER TABLE ONLY history_accounts ALTER COLUMN id DROP DEFAULT; +DROP SEQUENCE history_accounts_id_seq; diff --git a/services/horizon/internal/db2/schema/migrations/40_fix_inner_tx_max_fee_constraint.sql b/services/horizon/internal/db2/schema/migrations/40_fix_inner_tx_max_fee_constraint.sql new file mode 100644 index 0000000000..0b6fd5b11a --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/40_fix_inner_tx_max_fee_constraint.sql @@ -0,0 +1,10 @@ +-- +migrate Up + +ALTER TABLE history_transactions DROP CONSTRAINT valid_max_fee; +-- Inner tx in fee bump can have max_fee=0 +ALTER TABLE history_transactions ADD CONSTRAINT valid_max_fee CHECK (max_fee >= 0) NOT VALID; + +-- +migrate Down + +ALTER TABLE history_transactions DROP CONSTRAINT valid_max_fee; +ALTER TABLE history_transactions ADD CONSTRAINT valid_max_fee CHECK (max_fee > 0) NOT VALID; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/41_add_sponsor_to_state_tables.sql b/services/horizon/internal/db2/schema/migrations/41_add_sponsor_to_state_tables.sql new file mode 100644 index 0000000000..b756892c85 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/41_add_sponsor_to_state_tables.sql @@ -0,0 +1,24 @@ +-- +migrate Up + +ALTER TABLE accounts ADD sponsor TEXT; +CREATE INDEX accounts_by_sponsor ON accounts USING BTREE(sponsor); + +ALTER TABLE accounts_data ADD sponsor TEXT; +CREATE INDEX accounts_data_by_sponsor ON accounts_data USING BTREE(sponsor); + +ALTER TABLE accounts_signers ADD sponsor TEXT; +CREATE INDEX accounts_signers_by_sponsor ON accounts_signers USING BTREE(sponsor); + +ALTER TABLE trust_lines ADD sponsor TEXT; +CREATE INDEX trust_lines_by_sponsor ON trust_lines USING BTREE(sponsor); + +ALTER TABLE offers ADD sponsor TEXT; +CREATE INDEX offers_by_sponsor ON offers USING BTREE(sponsor); + +-- +migrate Down + +ALTER TABLE accounts DROP sponsor; +ALTER TABLE accounts_data DROP sponsor; +ALTER TABLE accounts_signers DROP sponsor; +ALTER TABLE trust_lines DROP sponsor; +ALTER TABLE offers DROP sponsor; diff --git a/services/horizon/internal/db2/schema/migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql b/services/horizon/internal/db2/schema/migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql new file mode 100644 index 0000000000..15c04eb333 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/42_add_num_sponsored_and_num_sponsoring_to_accounts.sql @@ -0,0 +1,11 @@ +-- +migrate Up + +ALTER TABLE accounts +ADD COLUMN num_sponsored integer DEFAULT 0 CHECK (num_sponsored >= 0), +ADD COLUMN num_sponsoring integer DEFAULT 0 CHECK (num_sponsoring >= 0); + +-- +migrate Down + +ALTER TABLE accounts +DROP COLUMN num_sponsored, +DROP COLUMN num_sponsoring; diff --git a/services/horizon/internal/db2/schema/migrations/43_add_claimable_balances_flags.sql b/services/horizon/internal/db2/schema/migrations/43_add_claimable_balances_flags.sql new file mode 100644 index 0000000000..54a6a31c3c --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/43_add_claimable_balances_flags.sql @@ -0,0 +1,9 @@ +-- +migrate Up + + +ALTER TABLE claimable_balances ADD flags int NOT NULL DEFAULT 0; + +-- +migrate Down + +ALTER TABLE claimable_balances DROP flags; + diff --git a/services/horizon/internal/db2/schema/migrations/44_asset_stat_accounts_and_balances.sql b/services/horizon/internal/db2/schema/migrations/44_asset_stat_accounts_and_balances.sql new file mode 100644 index 0000000000..93beade74b --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/44_asset_stat_accounts_and_balances.sql @@ -0,0 +1,19 @@ +-- +migrate Up + +ALTER TABLE exp_asset_stats +ADD COLUMN accounts JSONB, +ADD COLUMN balances JSONB; +UPDATE exp_asset_stats + SET + accounts = jsonb_build_object('authorized', num_accounts), + balances = jsonb_build_object('authorized', amount); + +ALTER TABLE exp_asset_stats +ALTER COLUMN accounts SET NOT NULL, +ALTER COLUMN balances SET NOT NULL; + +-- +migrate Down + +ALTER TABLE exp_asset_stats +DROP COLUMN accounts, +DROP COLUMN balances; diff --git a/services/horizon/internal/db2/schema/migrations/45_add_claimable_balances_history.sql b/services/horizon/internal/db2/schema/migrations/45_add_claimable_balances_history.sql new file mode 100644 index 0000000000..35476e9950 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/45_add_claimable_balances_history.sql @@ -0,0 +1,51 @@ +-- +migrate Up + +CREATE SEQUENCE history_claimable_balances_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_claimable_balances ( + id bigint NOT NULL DEFAULT nextval('history_claimable_balances_id_seq'::regclass), + claimable_balance_id text NOT NULL +); + +CREATE UNIQUE INDEX "index_history_claimable_balances_on_id" ON history_claimable_balances USING btree (id); +CREATE UNIQUE INDEX "index_history_claimable_balances_on_claimable_balance_id" ON history_claimable_balances USING btree (claimable_balance_id); + +CREATE TABLE history_operation_claimable_balances ( + history_operation_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_operation_claimable_balances_on_ids" ON history_operation_claimable_balances USING btree (history_operation_id , history_claimable_balance_id); +CREATE INDEX "index_history_operation_claimable_balances_on_operation_id" ON history_operation_claimable_balances USING btree (history_operation_id); + +CREATE TABLE history_transaction_claimable_balances ( + history_transaction_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_transaction_claimable_balances_on_ids" ON history_transaction_claimable_balances USING btree (history_transaction_id , history_claimable_balance_id); +CREATE INDEX "index_history_transaction_claimable_balances_on_transaction_id" ON history_transaction_claimable_balances USING btree (history_transaction_id); + +-- +migrate Down + +DROP INDEX "index_history_claimable_balances_on_id"; +DROP INDEX "index_history_claimable_balances_on_claimable_balance_id"; + +DROP TABLE history_claimable_balances; + +DROP SEQUENCE history_claimable_balances_id_seq; + +DROP INDEX "index_history_operation_claimable_balances_on_ids"; +DROP INDEX "index_history_operation_claimable_balances_on_operation_id"; + +DROP TABLE history_operation_claimable_balances; + +DROP INDEX "index_history_transaction_claimable_balances_on_ids"; +DROP INDEX "index_history_transaction_claimable_balances_on_transaction_id"; + +DROP TABLE history_transaction_claimable_balances; diff --git a/services/horizon/internal/db2/schema/migrations/46_add_muxed_accounts.sql b/services/horizon/internal/db2/schema/migrations/46_add_muxed_accounts.sql new file mode 100644 index 0000000000..3cc03750c7 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/46_add_muxed_accounts.sql @@ -0,0 +1,11 @@ +-- +migrate Up + +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + +-- +migrate Down + +ALTER TABLE history_transactions DROP account_muxed, DROP fee_account_muxed; +ALTER TABLE history_operations DROP source_account_muxed; +ALTER TABLE history_effects DROP address_muxed; diff --git a/services/horizon/internal/db2/schema/migrations/47_precompute_trade_aggregations.sql b/services/horizon/internal/db2/schema/migrations/47_precompute_trade_aggregations.sql new file mode 100644 index 0000000000..f25da4489e --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/47_precompute_trade_aggregations.sql @@ -0,0 +1,55 @@ +-- +migrate Up notransaction + +-- Create the new table +CREATE TABLE history_trades_60000 ( + timestamp bigint not null, + base_asset_id bigint not null, + counter_asset_id bigint not null, + count integer not null, + base_volume numeric not null, + counter_volume numeric not null, + avg numeric not null, + high_n numeric not null, + high_d numeric not null, + low_n numeric not null, + low_d numeric not null, + open_ledger_toid bigint not null, + open_n numeric not null, + open_d numeric not null, + close_ledger_toid bigint not null, + close_n numeric not null, + close_d numeric not null, + + PRIMARY KEY(base_asset_id, counter_asset_id, timestamp) +); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION to_millis(t timestamp without time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN div(cast((extract(epoch from t) * 1000 ) as bigint), trun)*trun; + END; +$$ LANGUAGE plpgsql IMMUTABLE; +-- +migrate StatementEnd + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION to_millis(t timestamp with time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN to_millis(t::timestamp, trun); + END; +$$ LANGUAGE plpgsql IMMUTABLE; +-- +migrate StatementEnd + +CREATE INDEX CONCURRENTLY htrd_agg_bucket_lookup ON history_trades + USING btree (to_millis(ledger_closed_at, '60000'::numeric)); + +CREATE INDEX CONCURRENTLY htrd_agg_open_ledger_toid ON history_trades_60000 USING btree (open_ledger_toid); + +-- +migrate Down + +DROP INDEX htrd_agg_open_ledger_toid; +DROP INDEX htrd_agg_bucket_lookup; +DROP TABLE history_trades_60000; +DROP FUNCTION to_millis(timestamp with time zone, numeric); +DROP FUNCTION to_millis(timestamp without time zone, numeric); diff --git a/services/horizon/internal/db2/schema/migrations/48_rebuild_trade_aggregations.sql b/services/horizon/internal/db2/schema/migrations/48_rebuild_trade_aggregations.sql new file mode 100644 index 0000000000..4ef6b6b6fb --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/48_rebuild_trade_aggregations.sql @@ -0,0 +1,44 @@ +-- +migrate Up + +-- Backfill the table with existing data. This takes about 9 minutes. +WITH trades AS ( + SELECT + to_millis(ledger_closed_at, 60000) as timestamp, + history_operation_id, + "order", + base_asset_id, + base_amount, + counter_asset_id, + counter_amount, + ARRAY[price_n, price_d] as price + FROM history_trades + ORDER BY base_asset_id, counter_asset_id, history_operation_id, "order" +), rebuilt as ( + SELECT + timestamp, + base_asset_id, + counter_asset_id, + count(*) as count, + sum(base_amount) as base_volume, + sum(counter_amount) as counter_volume, + sum(counter_amount::numeric)/sum(base_amount::numeric) as avg, + (max_price(price))[1] as high_n, + (max_price(price))[2] as high_d, + (min_price(price))[1] as low_n, + (min_price(price))[2] as low_d, + first(history_operation_id) as open_ledger_toid, + (first(price))[1] as open_n, + (first(price))[2] as open_d, + last(history_operation_id) as close_ledger_toid, + (last(price))[1] as close_n, + (last(price))[2] as close_d + FROM trades + GROUP by base_asset_id, counter_asset_id, timestamp +) + INSERT INTO history_trades_60000 ( + SELECT * from rebuilt + ); + +-- +migrate Down + +TRUNCATE TABLE history_trades_60000; diff --git a/services/horizon/internal/db2/schema/migrations/49_add_brin_index_trade_aggregations.sql b/services/horizon/internal/db2/schema/migrations/49_add_brin_index_trade_aggregations.sql new file mode 100644 index 0000000000..b073fc9d25 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/49_add_brin_index_trade_aggregations.sql @@ -0,0 +1,7 @@ +-- +migrate Up notransaction + +CREATE INDEX CONCURRENTLY IF NOT EXISTS htrd_agg_timestamp_brin ON history_trades_60000 USING brin(timestamp); + +-- +migrate Down + +DROP INDEX IF EXISTS htrd_agg_timestamp_brin; diff --git a/services/horizon/internal/db2/schema/migrations/4_add_protocol_version.sql b/services/horizon/internal/db2/schema/migrations/4_add_protocol_version.sql new file mode 100644 index 0000000000..bc745ffa0d --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/4_add_protocol_version.sql @@ -0,0 +1,6 @@ +-- +migrate Up +ALTER TABLE ONLY history_ledgers + ADD COLUMN protocol_version integer DEFAULT 0 NOT NULL; + +-- +migrate Down +ALTER TABLE ONLY history_ledgers DROP COLUMN protocol_version; diff --git a/services/horizon/internal/db2/schema/migrations/50_liquidity_pools.sql b/services/horizon/internal/db2/schema/migrations/50_liquidity_pools.sql new file mode 100644 index 0000000000..9686b7160b --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/50_liquidity_pools.sql @@ -0,0 +1,94 @@ +-- +migrate Up + +CREATE TABLE liquidity_pools ( + id text NOT NULL, -- hex-encoded PoolID + type smallint NOT NULL, + fee integer NOT NULL, + trustline_count bigint NOT NULL CHECK (trustline_count > 0), + share_count bigint NOT NULL DEFAULT 0 CHECK(share_count >= 0), + asset_reserves jsonb NOT NULL, + last_modified_ledger integer NOT NULL, + deleted boolean NOT NULL DEFAULT false, + PRIMARY KEY (id) +); + +CREATE INDEX liquidity_pools_by_asset_reserves ON liquidity_pools USING gin(asset_reserves jsonb_path_ops); +CREATE INDEX live_liquidity_pools ON liquidity_pools USING BTREE (deleted, last_modified_ledger); + +CREATE SEQUENCE history_liquidity_pools_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_liquidity_pools ( + id bigint NOT NULL DEFAULT nextval('history_liquidity_pools_id_seq'::regclass), + liquidity_pool_id text NOT NULL +); + +CREATE UNIQUE INDEX index_history_liquidity_pools_on_id ON history_liquidity_pools USING btree (id); +CREATE UNIQUE INDEX index_history_liquidity_pools_on_liquidity_pool_id ON history_liquidity_pools USING btree (liquidity_pool_id); + +CREATE TABLE history_operation_liquidity_pools ( + history_operation_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_operation_liquidity_pools_on_ids ON history_operation_liquidity_pools USING btree (history_operation_id , history_liquidity_pool_id); +CREATE INDEX index_history_operation_liquidity_pools_on_operation_id ON history_operation_liquidity_pools USING btree (history_operation_id); + +CREATE TABLE history_transaction_liquidity_pools ( + history_transaction_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_transaction_liquidity_pools_on_ids ON history_transaction_liquidity_pools USING btree (history_transaction_id , history_liquidity_pool_id); +CREATE INDEX index_history_transaction_liquidity_pools_on_transaction_id ON history_transaction_liquidity_pools USING btree (history_transaction_id); + +ALTER TABLE trust_lines ADD liquidity_pool_id text; +CREATE INDEX trust_lines_by_liquidity_pool_id ON trust_lines USING BTREE(liquidity_pool_id); + +DROP INDEX htrd_by_offer; +DROP INDEX htrd_counter_lookup; + +ALTER TABLE history_trades DROP offer_id, + ALTER base_account_id DROP NOT NULL, + ALTER counter_account_id DROP NOT NULL, + ADD base_liquidity_pool_id bigint, + ADD counter_liquidity_pool_id bigint, + ADD liquidity_pool_fee int; + +CREATE INDEX htrd_by_base_liquidity_pool_id ON history_trades USING BTREE(base_liquidity_pool_id); +CREATE INDEX htrd_by_counter_liquidity_pool_id ON history_trades USING BTREE(counter_liquidity_pool_id); + +-- +migrate Down + +DROP INDEX htrd_by_counter_liquidity_pool_id; +DROP INDEX htrd_by_base_liquidity_pool_id; + +DELETE FROM history_trades WHERE (counter_account_id IS NULL) OR (base_account_id IS NULL); + +ALTER TABLE history_trades DROP liquidity_pool_fee, + DROP counter_liquidity_pool_id, + DROP base_liquidity_pool_id, + ALTER counter_account_id SET NOT NULL, + ALTER base_account_id SET NOT NULL, + ADD offer_id bigint; + +CREATE INDEX htrd_counter_lookup on history_trades USING BTREE(counter_asset_id); +CREATE INDEX htrd_by_offer ON history_trades USING BTREE(offer_id); + +DROP INDEX trust_lines_by_liquidity_pool_id; +ALTER TABLE trust_lines DROP liquidity_pool_id; + +DROP TABLE history_liquidity_pools cascade; + +DROP SEQUENCE history_liquidity_pools_id_seq; + +DROP TABLE history_operation_liquidity_pools cascade; + +DROP TABLE history_transaction_liquidity_pools cascade; + +DROP TABLE liquidity_pools cascade; + diff --git a/services/horizon/internal/db2/schema/migrations/51_remove_ht_unused_indexes.sql b/services/horizon/internal/db2/schema/migrations/51_remove_ht_unused_indexes.sql new file mode 100644 index 0000000000..69e5b3bcfc --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/51_remove_ht_unused_indexes.sql @@ -0,0 +1,10 @@ +-- +migrate Up + +-- unused indices +DROP INDEX IF EXISTS by_account; +DROP INDEX IF EXISTS by_fee_account; + + +-- +migrate Down +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; diff --git a/services/horizon/internal/db2/schema/migrations/52_add_trade_type_index.sql b/services/horizon/internal/db2/schema/migrations/52_add_trade_type_index.sql new file mode 100644 index 0000000000..0abcd95c6f --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/52_add_trade_type_index.sql @@ -0,0 +1,10 @@ +-- +migrate Up + +ALTER TABLE history_trades ADD trade_type smallint DEFAULT 1 CHECK(trade_type > 0); +UPDATE history_trades SET trade_type = 2 WHERE base_liquidity_pool_id IS NOT NULL OR counter_liquidity_pool_id IS NOT NULL; +CREATE INDEX htrd_by_trade_type ON history_trades USING BTREE(trade_type, history_operation_id, "order"); + +-- +migrate Down + +DROP INDEX htrd_by_trade_type; +ALTER TABLE history_trades DROP trade_type; diff --git a/services/horizon/internal/db2/schema/migrations/5_create_trades_table.sql b/services/horizon/internal/db2/schema/migrations/5_create_trades_table.sql new file mode 100644 index 0000000000..a376c55ef6 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/5_create_trades_table.sql @@ -0,0 +1,31 @@ +-- +migrate Up +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + + offer_id bigint NOT NULL, + + seller_id bigint NOT NULL, + buyer_id bigint NOT NULL, + + sold_asset_type character varying(64) NOT NULL, + sold_asset_issuer character varying(56) NOT NULL, + sold_asset_code character varying(12) NOT NULL, + sold_amount bigint NOT NULL CHECK (sold_amount > 0), + + bought_asset_type character varying(64) NOT NULL, + bought_asset_issuer character varying(56) NOT NULL, + bought_asset_code character varying(12) NOT NULL, + bought_amount bigint NOT NULL CHECK (bought_amount > 0) +); + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + +CREATE INDEX htr_by_sold ON history_trades USING btree (sold_asset_type, sold_asset_code, sold_asset_issuer); + +CREATE INDEX htr_by_bought ON history_trades USING btree (bought_asset_type, bought_asset_code, bought_asset_issuer); + +-- +migrate Down +DROP TABLE history_trades cascade; diff --git a/services/horizon/internal/db2/schema/migrations/6_create_assets_table.sql b/services/horizon/internal/db2/schema/migrations/6_create_assets_table.sql new file mode 100644 index 0000000000..d4dcd57817 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/6_create_assets_table.sql @@ -0,0 +1,13 @@ +-- +migrate Up +CREATE TABLE history_assets ( + id SERIAL PRIMARY KEY , + asset_type VARCHAR(64) NOT NULL, + asset_code VARCHAR(12) NOT NULL, + asset_issuer VARCHAR(56) NOT NULL, + UNIQUE(asset_code, asset_type, asset_issuer) +); + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + +-- +migrate Down +DROP TABLE history_assets cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/7_modify_trades_table.sql b/services/horizon/internal/db2/schema/migrations/7_modify_trades_table.sql new file mode 100644 index 0000000000..cc9c3c319a --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/7_modify_trades_table.sql @@ -0,0 +1,48 @@ +-- +migrate Up +-- drop the old trades table. put a new one in place +DROP TABLE history_trades; +CREATE TABLE history_trades ( + history_operation_id BIGINT NOT NULL, + "order" INTEGER NOT NULL, + ledger_closed_at TIMESTAMP NOT NULL, + offer_id BIGINT NOT NULL, + base_account_id BIGINT NOT NULL REFERENCES history_accounts(id), + base_asset_id BIGINT NOT NULL REFERENCES history_assets(id), + base_amount BIGINT NOT NULL CHECK (base_amount > 0), + counter_account_id BIGINT NOT NULL REFERENCES history_accounts(id), + counter_asset_id BIGINT NOT NULL REFERENCES history_assets(id), + counter_amount BIGINT NOT NULL CHECK (counter_amount > 0), + base_is_seller BOOLEAN, + CHECK(base_asset_id < counter_asset_id) +); + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); +CREATE INDEX htrd_pair_time_lookup ON history_trades USING BTREE(base_asset_id, counter_asset_id, ledger_closed_at); +CREATE INDEX htrd_counter_lookup ON history_trades USING BTREE(counter_asset_id); +CREATE INDEX htrd_time_lookup ON history_trades USING BTREE(ledger_closed_at); +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +migrate Down +-- drop the newer table. reinstate the old one. +DROP TABLE history_trades cascade; +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + offer_id bigint NOT NULL, + seller_id bigint NOT NULL, + buyer_id bigint NOT NULL, + sold_asset_type character varying(64) NOT NULL, + sold_asset_issuer character varying(56) NOT NULL, + sold_asset_code character varying(12) NOT NULL, + sold_amount bigint NOT NULL CHECK (sold_amount > 0), + bought_asset_type character varying(64) NOT NULL, + bought_asset_issuer character varying(56) NOT NULL, + bought_asset_code character varying(12) NOT NULL, + bought_amount bigint NOT NULL CHECK (bought_amount > 0) +); + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); +CREATE INDEX htr_by_sold ON history_trades USING btree (sold_asset_type, sold_asset_code, sold_asset_issuer); +CREATE INDEX htr_by_bought ON history_trades USING btree (bought_asset_type, bought_asset_code, bought_asset_issuer); \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/8_add_aggregators.sql b/services/horizon/internal/db2/schema/migrations/8_add_aggregators.sql new file mode 100644 index 0000000000..79189c9279 --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/8_add_aggregators.sql @@ -0,0 +1,28 @@ +-- +migrate Up + +-- https://wiki.postgresql.org/wiki/First/last_(aggregate) +-- Create a function that always returns the first non-NULL item +CREATE FUNCTION public.first_agg ( anyelement, anyelement ) + RETURNS anyelement LANGUAGE SQL IMMUTABLE STRICT AS $$ SELECT $1 $$; + +-- And then wrap an aggregate around it +CREATE AGGREGATE public.FIRST ( +SFUNC = PUBLIC.first_agg, +BASETYPE = ANYELEMENT, +STYPE = ANYELEMENT +); + +-- Create a function that always returns the last non-NULL item +CREATE FUNCTION public.last_agg ( anyelement, anyelement ) + RETURNS anyelement LANGUAGE SQL IMMUTABLE STRICT AS $$ SELECT $2 $$; + +-- And then wrap an aggregate around it +CREATE AGGREGATE public.LAST ( +sfunc = public.last_agg, +basetype = anyelement, +stype = anyelement +); + +-- +migrate Down +DROP FUNCTION public.first_agg ( anyelement, anyelement ) CASCADE; +DROP FUNCTION public.last_agg (anyelement, anyelement ) CASCADE; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/8_create_asset_stats_table.sql b/services/horizon/internal/db2/schema/migrations/8_create_asset_stats_table.sql new file mode 100644 index 0000000000..05a3fc75ae --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/8_create_asset_stats_table.sql @@ -0,0 +1,13 @@ +-- +migrate Up +CREATE TABLE asset_stats ( + id BIGINT PRIMARY KEY REFERENCES history_assets ON DELETE CASCADE ON UPDATE RESTRICT, + amount BIGINT NOT NULL, + num_accounts INTEGER NOT NULL, + flags SMALLINT NOT NULL, + toml VARCHAR(64) NOT NULL +); + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + +-- +migrate Down +DROP TABLE asset_stats cascade; \ No newline at end of file diff --git a/services/horizon/internal/db2/schema/migrations/9_add_header_xdr.sql b/services/horizon/internal/db2/schema/migrations/9_add_header_xdr.sql new file mode 100644 index 0000000000..0ff73c0aea --- /dev/null +++ b/services/horizon/internal/db2/schema/migrations/9_add_header_xdr.sql @@ -0,0 +1,5 @@ +-- +migrate Up +ALTER TABLE ONLY history_ledgers ADD COLUMN ledger_header text NULL; + +-- +migrate Down +ALTER TABLE ONLY history_ledgers DROP COLUMN ledger_header; \ No newline at end of file diff --git a/services/horizon/internal/docs/HorizonWithCaptiveCore.png b/services/horizon/internal/docs/HorizonWithCaptiveCore.png new file mode 100644 index 0000000000..ef0e65aa1c Binary files /dev/null and b/services/horizon/internal/docs/HorizonWithCaptiveCore.png differ diff --git a/services/horizon/internal/docs/HorizonWithRemoteCaptiveCore.png b/services/horizon/internal/docs/HorizonWithRemoteCaptiveCore.png new file mode 100644 index 0000000000..ffa685cfc5 Binary files /dev/null and b/services/horizon/internal/docs/HorizonWithRemoteCaptiveCore.png differ diff --git a/services/horizon/internal/docs/HorizonWithoutCaptiveCore.png b/services/horizon/internal/docs/HorizonWithoutCaptiveCore.png new file mode 100644 index 0000000000..3e9cec413d Binary files /dev/null and b/services/horizon/internal/docs/HorizonWithoutCaptiveCore.png differ diff --git a/services/horizon/internal/docs/admin.md b/services/horizon/internal/docs/admin.md new file mode 100644 index 0000000000..f9585490c5 --- /dev/null +++ b/services/horizon/internal/docs/admin.md @@ -0,0 +1,6 @@ +--- +title: Horizon Administration Guide +replacement: https://developers.stellar.org/docs/run-api-server/ +--- + +Please refer to the [Developers' Documentation](https://developers.stellar.org/docs/run-api-server/) for the current deployment guide. diff --git a/services/horizon/internal/docs/captive_core.md b/services/horizon/internal/docs/captive_core.md new file mode 100644 index 0000000000..d455ff8fc5 --- /dev/null +++ b/services/horizon/internal/docs/captive_core.md @@ -0,0 +1,6 @@ +--- +title: Using Captive Core with Horizon +replacement: https://developers.stellar.org/docs/run-api-server/migrating/ +--- + +Please refer to the [Developers' Documentation](https://developers.stellar.org/docs/run-api-server/migrating/) for the current Captive Core migration guide. diff --git a/services/horizon/internal/docs/developing.md b/services/horizon/internal/docs/developing.md new file mode 100644 index 0000000000..12f27eb674 --- /dev/null +++ b/services/horizon/internal/docs/developing.md @@ -0,0 +1,107 @@ +--- +title: Horizon Development Guide +--- +## Horizon Development Guide + +This document describes how to build Horizon from source, so that you can test and edit the code locally to develop bug fixes and new features. + +If you are just starting with Horizon and want to try it out, consider the [Quickstart Guide](quickstart.md) instead. For information about administrating a Horizon instance in production, check out the [Administration Guide](admin.md). + +## Building Horizon +Building Horizon requires the following developer tools: + +- A [Unix-like](https://en.wikipedia.org/wiki/Unix-like) operating system with the common core commands (cp, tar, mkdir, bash, etc.) +- Go (this repository is officially supported on the last two releases of Go) +- [git](https://git-scm.com/) (to check out Horizon's source code) +- [mercurial](https://www.mercurial-scm.org/) (needed for `go-dep`) + +1. Set your [GOPATH](https://github.com/golang/go/wiki/GOPATH) environment variable, if you haven't already. The default `GOPATH` is `$HOME/go`. When building any Go package or application the binaries will be installed by default to `$GOPATH/bin`. +2. Clone the code into any directory you prefer: + ``` + git clone https://github.com/stellar/go + ``` + Or if you prefer to develop inside `GOPATH` check it out to `$GOPATH/src/github.com/stellar/go`: + ``` + git clone https://github.com/stellar/go $GOPATH/src/github.com/stellar/go + ``` + If developing inside `GOPATH` set the `GO111MODULE=on` environment variable to turn on Modules for managing dependencies. See the repository [README](../../../../README.md#dependencies) for more information. +3. Change to the directory where the repository is checked out. e.g. `cd go`, or if developing inside the `GOPATH`, `cd $GOPATH/src/github.com/stellar/go`. +4. Compile the Horizon binary: `go install ./services/horizon`. You should see the resulting `horizon` executable in `$GOPATH/bin`. +5. Add Go binaries to your PATH in your `bashrc` or equivalent, for easy access: `export PATH=${GOPATH//://bin:}/bin:$PATH` + +Open a new terminal. Confirm everything worked by running `horizon --help` successfully. You should see an informative message listing the command line options supported by Horizon. + +## Set up Horizon's database +Horizon uses a Postgres database backend to store test fixtures and record information ingested from an associated Stellar Core. To set this up: +1. Install [PostgreSQL](https://www.postgresql.org/). +2. Run `createdb horizon_dev` to initialise an empty database for Horizon's use. +3. Run `horizon db init --db-url postgres://localhost/horizon_dev` to install Horizon's database schema. + +### Database problems? +1. Depending on your installation's defaults, you may need to configure a Postgres DB user with appropriate permissions for Horizon to access the database you created. Refer to the [Postgres documentation](https://www.postgresql.org/docs/current/sql-createuser.html) for details. Note: Remember to restart the Postgres server after making any changes to `pg_hba.conf` (the Postgres configuration file), or your changes won't take effect! +2. Make sure you pass the appropriate database name and user (and port, if using something non-standard) to Horizon using `--db-url`. One way is to use a Postgres URI with the following form: `postgres://USERNAME:PASSWORD@localhost:PORT/DB_NAME`. +3. If you get the error `connect failed: pq: SSL is not enabled on the server`, add `?sslmode=disable` to the end of the Postgres URI to allow connecting without SSL. +If you get the error `zsh: no matches found: postgres://localhost/horizon_dev?sslmode=disable`, wrap the url with single quotes `horizon db init --db-url 'postgres://localhost/horizon_dev?sslmode=disable'` +4. If your server is responding strangely, and you've exhausted all other options, reboot the machine. On some systems `service postgresql restart` or equivalent may not fully reset the state of the server. + +## Run tests +At this point you should be able to run Horizon's unit tests: +```bash +cd $GOPATH/src/github.com/stellar/go/services/horizon +go test ./... +``` + +## Set up Stellar Core +Horizon provides an API to the Stellar network. It does this by ingesting data from an associated `stellar-core` instance. Thus, to run a full Horizon instance requires a `stellar-core` instance to be configured, up to date with the network state, and accessible to Horizon. Horizon accesses `stellar-core` through both an HTTP endpoint and by connecting directly to the `stellar-core` Postgres database. + +The simplest way to set up Stellar Core is using the [Stellar Quickstart Docker Image](https://github.com/stellar/docker-stellar-core-horizon). This is a Docker container that provides both `stellar-core` and `horizon`, pre-configured for testing. + +1. Install [Docker](https://www.docker.com/get-started). +2. Verify your Docker installation works: `docker run hello-world` +3. Create a local directory that the container can use to record state. This is helpful because it can take a few minutes to sync a new `stellar-core` with enough data for testing, and because it allows you to inspect and modify the configuration if needed. Here, we create a directory called `stellar` to use as the persistent volume: `cd $HOME; mkdir stellar` +4. Download and run the Stellar Quickstart container: + +```bash +docker run --rm -it -p "8000:8000" -p "11626:11626" -p "11625:11625" -p"8002:5432" -v $HOME/stellar:/opt/stellar --name stellar stellar/quickstart --testnet +``` + +In this example we run the container in interactive mode. We map the container's Horizon HTTP port (`8000`), the `stellar-core` HTTP port (`11626`), and the `stellar-core` peer node port (`11625`) from the container to the corresponding ports on `localhost`. Importantly, we map the container's `postgresql` port (`5432`) to a custom port (`8002`) on `localhost`, so that it doesn't clash with our local Postgres install. +The `-v` option mounts the `stellar` directory for use by the container. See the [Quickstart Image documentation](https://github.com/stellar/docker-stellar-core-horizon) for a detailed explanation of these options. + +5. The container is running both a `stellar-core` and a `horizon` instance. Log in to the container and stop Horizon: +```bash +docker exec -it stellar /bin/bash +supervisorctl +stop horizon +``` + +## Check Stellar Core status +Stellar Core takes some time to synchronise with the rest of the network. The default configuration will pull roughly a couple of day's worth of ledgers, and may take 15 - 30 minutes to catch up. Logs are stored in the container at `/var/log/supervisor`. You can check the progress by monitoring logs with `supervisorctl`: +```bash +docker exec -it stellar /bin/bash +supervisorctl tail -f stellar-core +``` + +You can also check status by looking at the HTTP endpoint, e.g. by visiting http://localhost:11626 in your browser. + +## Connect Horizon to Stellar Core +You can connect Horizon to `stellar-core` at any time, but Horizon will not begin ingesting data until `stellar-core` has completed its catch-up process. + +Now run your development version of Horizon (which is outside of the container), pointing it at the `stellar-core` running inside the container: + +```bash +horizon --db-url="postgres://localhost/horizon_dev" --stellar-core-db-url="postgres://stellar:postgres@localhost:8002/core" --stellar-core-url="http://localhost:11626" --port 8001 --network-passphrase "Test SDF Network ; September 2015" --ingest +``` + +If all is well, you should see ingest logs written to standard out. You can test your Horizon instance with a query like: http://localhost:8001/transactions?limit=10&order=asc. Use the [Stellar Laboratory](https://www.stellar.org/laboratory/) to craft other queries to try out, +and read about the available endpoints and see examples in the [Horizon API reference](https://www.stellar.org/developers/horizon/reference/). + +## The development cycle +Congratulations! You can now run the full development cycle to build and test your code. +1. Write code + tests +2. Run tests +3. Compile Horizon: `go install github.com/stellar/go/services/horizon` +4. Run Horizon (pointing at your running `stellar-core`) +5. Try Horizon queries + +Check out the [Stellar Contributing Guide](https://github.com/stellar/docs/blob/master/CONTRIBUTING.md) to see how to contribute your work to the Stellar repositories. Once you've got something that works, open a pull request, linking to the issue that you are resolving with your contribution. We'll get back to you as quickly as we can. diff --git a/services/horizon/internal/docs/notes_for_developers.md b/services/horizon/internal/docs/notes_for_developers.md new file mode 100644 index 0000000000..215b334956 --- /dev/null +++ b/services/horizon/internal/docs/notes_for_developers.md @@ -0,0 +1,144 @@ +## Notes for Developers + +This document contains additional information related to the development of Horizon. For a detailed discussion of how to build and develop against Horizon, see the [Horizon development guide](developing.md). + +- [Initial set up](#setup) +- [Regenerating generated code](#regen) +- [Running tests](#tests) +- [Logging](#logging) +- [Adding migrations](#migrations) + + +--- +## Initial set up +Compile and install Horizon as described in the [Horizon development guide](developing.md). + +## Regenerating generated code + +Horizon uses two Go tools you'll need to install: +1. [go-bindata](github.com/kevinburke/go-bindata) is used to bundle test data + +After the above are installed, run `go generate github.com/stellar/go/services/horizon/...`. + +### Example recipe + +Here's an example of recipe file with comments: +```rb +# Define two accounts test accounts +account :scott, Stellar::KeyPair.from_seed("SBZWG33UOQQCAIBAEAQCAIBAEAQCAIBAEAQCAIBAEAQCAIBAEAQCAPSA") +account :bartek, Stellar::KeyPair.from_seed("SBRGC4TUMVVSAIBAEAQCAIBAEAQCAIBAEAQCAIBAEAQCAIBAEAQCBDHV") + +# use_manual_close causes scc to run a process with MANUAL_CLOSE=true +use_manual_close + +# Create 2 accounts `scott` and `bartek` +create_account :scott, :master, 100 +create_account :bartek, :master, 100 + +# Close ledger +close_ledger + +# Send 5 XLM from `scott` to `bartek` +payment :scott, :bartek, [:native, 5] +``` + +You can find more recipes in [`scc` examples](https://github.com/stellar/stellar_core_commander/tree/84d5ffb97202ecc3a0ed34a739c98e69536c0c2c/examples). + +Scenarios are in [horizon test scenarios](https://github.com/stellar/go/tree/master/services/horizon/internal/test/scenarios). They are +used by many different integration tests. + +### Deprecated Scenario sql files + +1. Scenario .sql files are located in services/horizon/internal/test/scenarios and have been used in unit and integeration tests, however, they are deprecated and are not meant to be used or included in new development. They were manually maintained and have not been updated with more recent db schema changes and are not associated with db migrations. + + +## Running Tests + +run the all the Go monorepo tests like so (assuming you are at stellar/go, or run from stellar/go/services/horizon for just the Horizon subset): + +```bash +go test ./... +``` + +or run individual Horizon tests like so, providing the expected arguments: + +```bash +go test github.com/stellar/go/services/horizon/... +``` + +## Logging + +All logging infrastructure is in the `github.com/stellar/go/tree/master/services/horizon/internal/log` package. This package provides "level-based" logging: Each logging statement has a severity, one of "Debug", "Info", "Warn", "Error" or "Panic". The Horizon server has a configured level "filter", specified either using the `--log-level` command line flag or the `LOG_LEVEL` environment variable. When a logging statement is executed, the statements declared severity is checked against the filter and will only be emitted if the severity of the statement is equal or higher severity than the filter. + +In addition, the logging subsystem has support for fields: Arbitrary key-value pairs that will be associated with an entry to allow for filtering and additional contextual information. + +### Making logging statements + +Assuming that you've imports the log package, making a simple logging call is just: + +```go + +log.Info("my log line") +log.Infof("I take a %s", "format string") + +``` + +Adding fields to a statement happens with a call to `WithField` or `WithFields` + +```go +log.WithField("pid", 1234).Warn("i'm scared") + +log.WithFields(log.F{ + "some_field": 123, + "second_field": "hello", +}).Debug("here") +``` + +The return value from `WithField` or `WithFields` is a `*log.Entry`, which you can save to emit multiple logging +statements that all share the same field. For example, the action system for Horizon attaches a log entry to `action.Log` on every request that can be used to emit log entries that have the request's id attached as a field. + +### Logging and Context + +The logging package provides the root logger at `log.DefaultLogger` and the package level funcs such as `log.Info` operate against the default logger. However, often it is important to include request-specific fields in a logging statement that are not available in the local scope. For example, it is useful to include an http request's id in every log statement that is emitted by code running on behalf of the request. This allows for easier debugging, as an operator can filter the log stream to a specific request id and not have to wade through the entirety of the log. + +Unfortunately, it is not prudent to thread an `*http.Request` parameter to every downstream subroutine and so we need another way to make that information available. The idiomatic way to do this in Go is with a context parameter, as describe [on the Go blog](https://blog.golang.org/context). The logging provides a func to bind a logger to a context using `log.Set` and allows you to retrieve a bound logger using `log.Ctx(ctx)`. Functions that need to log on behalf of an server request should take a context parameter. + +Here's an example of using context: + +```go + +// create a new sublogger +sub := log.WithField("val", 1) + +// bind it to a context +ctx := log.Set(context.Background(), sub) + +log.Info("no fields on this statement") +log.Ctx(ctx).Info("This statement will use the sub logger") + +``` + +### Logging Best Practices + +It's recommended that you try to avoid contextual information in your logging messages. Instead, use fields to establish context and use a static string for your message. This practice allows Horizon operators to more easily filter log lines to provide better insight into the health of the server. Lets take an example: + +```go +// BAD +log.Infof("running initializer: %s", i.Name) + +//GOOD +log.WithField("init_name", i.Name).Info("running initializer") +``` + +With the "bad" form of the logging example above, an operator can filter on both the message as well as the initializer name independently. This gets more powerful when multiple fields are combined, allowing for all sorts of slicing and dicing. + + +## Enabling TLS on your local workstation + +Horizon support HTTP/2 when served using TLS. To enable TLS on your local workstation, you must generate a certificate and configure Horizon to use it. We've written a helper script at `tls/regen.sh` to make this simple. Run the script from your terminal, and simply choose all the default options. This will create two files: `tls/server.crt` and `tls/server.key`. + +Now you must configure Horizon to use them: You can simply add `--tls-cert tls/server.crt --tls-key tls/server.key` to your command line invocations of Horizon, or you may specify `TLS_CERT` and `TLS_KEY` environment variables. + +# Adding migrations +1. Add your migration to `services/horizon/internal/db2/schema/migrations/` using the same name nomenclature as other migrations. +2. After creating you migration, run `bash services/horizon/internal/scripts/rebuild_schema.bash` this script will create all the autogenerated code for migrations. diff --git a/services/horizon/internal/docs/plans/images/adapters.png b/services/horizon/internal/docs/plans/images/adapters.png new file mode 100644 index 0000000000..807cd46242 Binary files /dev/null and b/services/horizon/internal/docs/plans/images/adapters.png differ diff --git a/services/horizon/internal/docs/plans/images/historyarchive.png b/services/horizon/internal/docs/plans/images/historyarchive.png new file mode 100644 index 0000000000..e28f0df415 Binary files /dev/null and b/services/horizon/internal/docs/plans/images/historyarchive.png differ diff --git a/services/horizon/internal/docs/plans/images/io.png b/services/horizon/internal/docs/plans/images/io.png new file mode 100644 index 0000000000..5f49556fb4 Binary files /dev/null and b/services/horizon/internal/docs/plans/images/io.png differ diff --git a/services/horizon/internal/docs/plans/images/ledgerbackend.png b/services/horizon/internal/docs/plans/images/ledgerbackend.png new file mode 100644 index 0000000000..39990c77da Binary files /dev/null and b/services/horizon/internal/docs/plans/images/ledgerbackend.png differ diff --git a/services/horizon/internal/docs/plans/images/pipeline.png b/services/horizon/internal/docs/plans/images/pipeline.png new file mode 100644 index 0000000000..98e535d452 Binary files /dev/null and b/services/horizon/internal/docs/plans/images/pipeline.png differ diff --git a/services/horizon/internal/docs/plans/images/system.png b/services/horizon/internal/docs/plans/images/system.png new file mode 100644 index 0000000000..3a26c414d4 Binary files /dev/null and b/services/horizon/internal/docs/plans/images/system.png differ diff --git a/services/horizon/internal/docs/plans/new_horizon_ingest.md b/services/horizon/internal/docs/plans/new_horizon_ingest.md new file mode 100644 index 0000000000..615d671948 --- /dev/null +++ b/services/horizon/internal/docs/plans/new_horizon_ingest.md @@ -0,0 +1,181 @@ +# New Horizon Ingest + +This describes the goals, design, and implementation plan for the new Horizon ingestion system. + +## Project Goals + +- Handle need for Horizon to re-ingest, catch up after outage, or fill gaps +- No more stellar-core DB access from Horizon +- Full history ingestion of Stellar Public Network ledger shouldn’t take longer than 24h on x1.32xlarge AWS machine +- Horizon maintains own state of the ledger. Order books, at least, need to be kept in-memory to allow fast pathfinding. +- Multi-writer ingestion is provided for re-ingestion (speed) and for ledger stream (high availability) +- Ingestion is a collection of micro-services (trade aggregations, trades, txn history, etc…) +- Support plugins, so 3rd parties can implement custom ingestion schemes and easily plug them in +- Can run as standalone process, separate from Horizon +- Has clear API for building clients on top of it + +## Design + +### Inputs + +The ingestion system will read data from two sources: + +1. A History Archive [1](https://www.stellar.org/developers/stellar-core/software/admin.html#history-archives),[2](https://github.com/stellar/stellar-core/blob/master/docs/history.md), which is generated by `stellar-core` and provides a complete copy of a recent [ledger](https://www.stellar.org/developers/guides/concepts/ledger.html) as well as a history of `TransactionResult` XDR objects, which in turn contain [operation results](https://github.com/stellar/stellar-core/blob/master/src/xdr/Stellar-transaction.x#L382-L834). +2. A full ledger reader, which provides random and up-to-date access to the transaction sets & [`TransactionMeta`](https://github.com/stellar/stellar-core/blob/master/src/xdr/Stellar-ledger.x#L271-L318) in each ledger close. `TransactionMeta` info is essential for keeping a view of the ledger state up to date, as it describes the changes to ledger state that result from each operation. + +#### History Archive Reader + +The ingestion system needs to provide a full copy of current ledger state so that Horizon and other consumers of the ingestion system have no need to access the `stellar-core` database. The history archive reader allows ingestion to read the (near) current ledger state from a history archive. The advantage of this approach is that ingestion puts no load on the `stellar-core` database to pull in ledger state, since it reads entirely from a history archive, which is often stored in S3 or a separate file system. + +For context, a `stellar-core` can be configured to write out a history archive, which stores snapshots of the ledger state every 64 ledgers (approximately every 5 minutes). We envision that an administrator will run a `stellar-core`, configure it to write out a history archive, and then point the ingestion system at that history archive. This has two advantages: + +1. The ingestion does not put load on any external service (like SDF's history archives) +2. The administrator does not need to trust third party history archives + +Typically, the ingestion system will only access the history archive on startup to get a full copy of ledger state, and will then keep that copy of ledger state up to date using data from the separate ledger transaction set backend. However, the ingestion system could access older snapshots to construct a history of ledger state, or it could periodically re-ingest the full ledger state to detect any errors that accumulate over time from updating the ledger state. + +The history archive reader supports multiple backends to handle different ways that a history archive can be stored: + +1. S3 backend +2. HTTP backend +3. File backend + +UML Class diagram + +![History Archive Reader Class Diagram](images/historyarchive.png) + +Example of reading a history archive using `stellar-core`: + +```sh +wget http://history.stellar.org/prd/core-live/core_live_001/results/01/4d/f7/results-014df7ff.xdr.gz +gunzip results-014df7ff.xdr.gz +~/src/stellar-core/src/stellar-core dump-xdr results-014df7ff.xdr | head -n 40 +``` + +#### Full Ledger Reader + +The ingestion system needs a way to keep the current ledger state up to date, as well as a way to construct a history of events (typically transactions, operations, and effects) that have occurred over time on the Stellar network. The full ledger reader provides this ability. Specifically, it allows the ingestion system to access a stream of transaction metadata that encode state changes that happen as a result of every transaction. This information is missing from the history archives, and is also updated after every ledger close (vs. after every 64 in the history archives). The full ledger reader also has access to transaction sets for each ledger. + +Here's a summary of the features provided by the full ledger reader vs. the history archive reader: + +| Reader | txn resultsets | ledger state snapshots | transaction metadata | near-realtime (updates at every ledger close) | +| --- |:---:|:---:|:---:|:---:| +| history archive | X | X | | | +| full ledger | X | | X | X | + +The long term plan for the full ledger reader is for `stellar-core` to write transaction metadata out to an S3 bucket, which will allow the following: + +1. Reading transaction metadata without creating load on `stellar-core`'s database +2. Fast, parallel access to historical transaction metadata (allowing fast `CATCHUP_COMPLETE` ingestion) +3. Multiple `stellar-core`s writing the latest update to the same S3 object. This allows redundancy: one `stellar-core` can fail, and the stream of S3 objects will continue uninterrupted. + +However, this requires a change to `stellar-core`, which is estimated to happen in Q3 2019. Until then, the ingestion system will read from the `txfeehistory` and [`txhistory`](https://github.com/stellar/stellar-core/blob/master/src/transactions/TransactionFrame.cpp#L683) tables in the `stellar-core` database as it does currently. Unfortunately, we won't get any of the benefits listed above until the change is made to `stellar-core`. + +The full ledger reader will support multiple backends: + +1. `stellar-core` database reader (this will be implemented first, and is exactly what happens now) +2. File backend +3. S3 backend + +UML Class diagram + +![Full Ledger Reader Class Diagram](images/ledgerbackend.png) + +### Data Transformation + +The ingestion system has a data transformation pipeline in between its inputs (full ledger backend + history archive backend), and its outputs. + +#### Input Adapters + +The first step is to adapt the format of the data flowing from the two input backends into a format & interface that is easy for the ingestion system to use for both random access, and accessing the latest data. + +UML Class diagram + +![Input Adapters Class Diagram](images/adapters.png) + +Notes: + +- the `HistoryArchiveAdapter` supports both reading a ledger transaction result set via `GetLedger()` and reading ledger state via `GetState()`. +- Both adapters support `GetLatestLedgerSequence()`, which allows a consumer to look up the most recent ledger information in the backend. +- The adapters, rather than returning state and ledgers as objects stored in memory, return them as `ReadCloser` objects. This is because the ledger state or a particular transaction state may not fit in memory, and must be processed as a stream. + +Both `ReadCloser` and `WriteCloser` interfaces include `Close()` method. When `Close()` is called on `ReadCloser` it tells the reader that no more data is needed and it should stop streaming, ex. close buffered channels, close network connections, etc. When `Close` is is called on `WriteCloser` it means that no more data will be written to it. This is especially helpful when writer is writing to a pipe so the reader on the other end knows that no more objects will be streamed and can return `EOF`. + +The `ReadCloser` structs come from the `ingest/io` package, shown in the UML class diagram below: + +![IO package](images/io.png) + +#### Ingestion Pipeline + +At the center of ingestion is a `Pipeline`, which is initialized with a series of processors for each kind of data that ingestion handles (ledger state, a full ledger update, and an archive ledger update). Each processor implements a function that reads data from a `ReadCloser`, processes/filters it, and writes it to a `WriteCloser`. A few example processors: + +- A processor that passes on only information about a certain account +- A processor that only looks at certain kinds of events, such as offers being placed +- A processor sending a mobile notification for incoming payments +- A processor saving data to a database or in-memory store. + +See the rough UML diagram: + +![Filter package](images/pipeline.png) + +Notes: + +- The processors form a tree and are processed from root to leaves. +- Processing does not change the type of the data, but it can remove or change fields in the data +- Processors can write/read artifacts (ex. average balances) in a `Store` that is shared between all processors in a pipeline. `Store` is ephemeral, its contents is removed when pipeline processing is done. + +### Tying it all together + +The ingestion system can run as a stand-alone process or as part of a larger process like Horizon. It can handle three different kinds of sessions: + +- `IngestRangeSession`: ingest data over a range of ledgers. Used for ingesting historical data in parallel +- `LiveSession`: ingest the latest ledgers as soon as the close. This is the standard operating mode for a live Horizon instance +- `CheckpointsOnlySession`: ingest only history archive checkpoint data. + +Sessions are responsible for coordinating pipelines and ensuring data is in sync. When processors write to a single storage type, keeping data in sync is trivial. For example, for Postgres processors can share a single transaction object and commit when processing is done. However, there's also a significant challenge keeping data in sync when processors are writing to different storage types: any read operation that reads across stores or across data updated by multiple `Processes`s is at risk of reading inconsistent values. `System` or `Session` objects (TBD) should be responsible by keeping different stores in sync. For example: they can ensure that all queries sent to stores have the latest ledger sequence that was successfully saved all stores appended. This probably requires stores to keep data for the two (2) latest ledgers. + +![ingest package](images/system.png) + +## Open questions + +There are several open questions, and the design above isn't comprehensive. A few questions below: + +- What is the story around ingestion plugins? How do developers use the ingestion system in their own projects? +- How do we enable ingestion via multiple languages? +- Will we split Horizon into an ingestion process and an API server process, or keep a single process? +- Should a stand-alone ingestion process expose an RPC server or a more standard REST API? + - What's the exact API that an ingestion server exposes? +- Where is parallel ingestion logic handled? +- How exactly do we organize `Store`s, `ProcessingPipeline`, and `Process`es? +- How do we keep reads from multiple stores consistent? +- How do we make the `Store`s and `Process`es in `ingest/stores` reusable? + +## Implementation Plan + +This gives an overview of how we could move Horizon over to using the new ingestion system. + +### As we implement components + +We can create a command-line tool that computes basic stats and exercises all the components of ingestion to prove that they work together. For example, the tool can compute the total number of accounts every 5 minutes, or compute the number of transactions per second. + +### The proof of concept + +We can implement [accounts for signer](https://github.com/stellar/go/issues/432) using the new ingestion system. It's a good candidate because it's something we urgently need, and it's a new feature, so we don't risk breaking anything existing if there's an issue with the system. + +### Chunks that can be broken off and implemented separately + +- The projects below depend on the `io` package interfaces, which should be pretty minimal to add. +- History Archive Backend and History Archive Adapter (Nikhil is already on this) + - command-line tool takes a history archive file and prints out basic stats like # of accounts and transactions per second +- `DatabaseBackend` implementation of `LedgerBackend` and `LedgerBackendAdapter` + - command-line tool takes a database URI and computes stats like # of transactions in the latest ledger (it runs live and updates with each new ledger in the `DatabaseBackend`) +- `ingest/pipeline`: + - command-line tool that implements a few demo processors and given a DB URL and/or a historyarchive file location, streams out the data that passes the filter + +Once the above are all done, put together a basic ingestion server as laid out in the top-level `ingest` package. + +At this point, we'll have a full end-to-end implementation! Next, we start filling in the extra features we didn't need for the proof of concept. Once it's reasonably stable, we can release Horizon with the new `accoutns for signer` feature to get some real-world testing. + +### Porting the rest of Horizon over + +Once the proof of concept is functional, tables / features can be ported over one by one. Features that depend on `stellar-core` tables currently can be moved to equivalent tables maintained by Horizon. I imagine most features will still use postgres, but a few, such as pathfinding, will need in-memory storage for speed. If pathfinding can maintain a copy of order books [in memory](https://github.com/stellar/go/issues/849) it should run orders of magnitude faster. Horizon should be split out into a microservices architecture, with each feature implemented using a different `StateInitProcess` or `LedgerProcess` object. diff --git a/services/horizon/internal/docs/quickstart.md b/services/horizon/internal/docs/quickstart.md new file mode 100644 index 0000000000..0d7541d717 --- /dev/null +++ b/services/horizon/internal/docs/quickstart.md @@ -0,0 +1,42 @@ +--- +title: Horizon Quickstart +replacement: https://developers.stellar.org/docs/run-api-server/quickstart/ +--- +## Horizon Quickstart +This document describes how to quickly set up a **test** Stellar Core + Horizon node, that you can play around with to get a feel for how a stellar node operates. **This configuration is not secure!** It is **not** intended as a guide for production administration. + +For detailed information about running Horizon and Stellar Core safely in production see the [Horizon Administration Guide](admin.md) and the [Stellar Core Administration Guide](https://www.stellar.org/developers/stellar-core/software/admin.html). + +If you're ready to roll up your sleeves and dig into the code, check out the [Developer Guide](developing.md). + +### Install and run the Quickstart Docker Image +The fastest way to get up and running is using the [Stellar Quickstart Docker Image](https://github.com/stellar/docker-stellar-core-horizon). This is a Docker container that provides both `stellar-core` and `horizon`, pre-configured for testing. + +1. Install [Docker](https://www.docker.com/get-started). +2. Verify your Docker installation works: `docker run hello-world` +3. Create a local directory that the container can use to record state. This is helpful because it can take a few minutes to sync a new `stellar-core` with enough data for testing, and because it allows you to inspect and modify the configuration if needed. Here, we create a directory called `stellar` to use as the persistent volume: +`cd $HOME; mkdir stellar` +4. Download and run the Stellar Quickstart container, replacing `USER` with your username: + +```bash +docker run --rm -it -p "8000:8000" -p "11626:11626" -p "11625:11625" -p"8002:5432" -v $HOME/stellar:/opt/stellar --name stellar stellar/quickstart --testnet +``` + +You can check out Stellar Core status by browsing to http://localhost:11626. + +You can check out your Horizon instance by browsing to http://localhost:8000. + +You can tail logs within the container to see what's going on behind the scenes: +```bash +docker exec -it stellar /bin/bash +supervisorctl tail -f stellar-core +supervisorctl tail -f horizon stderr +``` + +On a modern laptop this test setup takes about 15 minutes to synchronise with the last couple of days of testnet ledgers. At that point Horizon will be available for querying. + +See the [Quickstart Docker Image](https://github.com/stellar/docker-stellar-core-horizon) documentation for more details, and alternative ways to run the container. + +You can test your Horizon instance with a query like: http://localhost:8000/transactions?cursor=&limit=10&order=asc. Use the [Stellar Laboratory](https://www.stellar.org/laboratory/) to craft other queries to try out, +and read about the available endpoints and see examples in the [Horizon API reference](https://www.stellar.org/developers/horizon/reference/). + diff --git a/services/horizon/internal/docs/readme.md b/services/horizon/internal/docs/readme.md new file mode 100644 index 0000000000..3ec0774e79 --- /dev/null +++ b/services/horizon/internal/docs/readme.md @@ -0,0 +1,25 @@ +--- +title: Horizon +replacement: https://developers.stellar.org/docs/run-api-server/ +--- + +Horizon is the server for the client facing API for the Stellar ecosystem. It acts as the interface between [stellar-core](https://www.stellar.org/developers/software/#stellar-core) and applications that want to access the Stellar network. It allows you to submit transactions to the network, check the status of accounts, subscribe to event streams, etc. See [an overview of the Stellar ecosystem](https://www.stellar.org/developers/guides/) for more details. + +You can interact directly with horizon via curl or a web browser but SDF provides a [JavaScript SDK](https://www.stellar.org/developers/js-stellar-sdk/reference/) for clients to use to interact with Horizon. + +SDF runs a instance of Horizon that is connected to the test net [https://horizon-testnet.stellar.org/](https://horizon-testnet.stellar.org/). + +## Libraries + +SDF maintained libraries:
+- [JavaScript](https://github.com/stellar/js-stellar-sdk) +- [Go](https://github.com/stellar/go/tree/master/clients/horizonclient) +- [Java](https://github.com/stellar/java-stellar-sdk) + +Community maintained libraries for interacting with Horizon in other languages:
+- [Python](https://github.com/StellarCN/py-stellar-base) +- [C# .NET Core 2.x](https://github.com/elucidsoft/dotnetcore-stellar-sdk) +- [Ruby](https://github.com/astroband/ruby-stellar-sdk) +- [iOS and macOS](https://github.com/Soneso/stellar-ios-mac-sdk) +- [Scala SDK](https://github.com/synesso/scala-stellar-sdk) +- [C++ SDK](https://github.com/bnogalm/StellarQtSDK) diff --git a/services/horizon/internal/docs/reference/endpoints/accounts-single.md b/services/horizon/internal/docs/reference/endpoints/accounts-single.md new file mode 100644 index 0000000000..ca0ccfdcf9 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/accounts-single.md @@ -0,0 +1,164 @@ +--- +title: Account Details +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=accounts&endpoint=single +replacement: https://developers.stellar.org/api/resources/accounts/single/ +--- + +Returns information and links relating to a single [account](../resources/account.md). + +The balances section in the returned JSON will also list all the +[trustlines](https://www.stellar.org/developers/learn/concepts/assets.html) this account +established. Note this will only return trustlines that have the necessary authorization to work. +Meaning if an account `A` trusts another account `B` that has the +[authorization required](https://www.stellar.org/developers/guides/concepts/accounts.html#flags) +flag set, the trustline won't show up until account `B` +[allows](https://www.stellar.org/developers/guides/concepts/list-of-operations.html#allow-trust) +account `A` to hold its assets. + +## Request + +``` +GET /accounts/{account} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `account` | required, string | Account ID | GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.accounts() + .accountId("GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB") + .call() + .then(function (accountResult) { + console.log(accountResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + +## Response + +This endpoint responds with the details of a single account for a given ID. See [account resource](../resources/account.md) for reference. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/offers{?cursor,limit,order}", + "templated": true + }, + "trades": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/trades{?cursor,limit,order}", + "templated": true + }, + "data": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/data/{key}", + "templated": true + } + }, + "id": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "paging_token": "", + "account_id": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "sequence": 7275146318446606, + "last_modified_ledger": 22379074, + "subentry_count": 4, + "thresholds": { + "low_threshold": 0, + "med_threshold": 0, + "high_threshold": 0 + }, + "flags": { + "auth_required": false, + "auth_revocable": false, + "auth_immutable": false + }, + "balances": [ + { + "balance": "1000000.0000000", + "limit": "922337203685.4775807", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "last_modified_ledger": 632070, + "is_authorized": true, + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR" + }, + { + "balance": "10000.0000000", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "asset_type": "native" + } + ], + "signers": [ + { + "public_key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "weight": 1, + "key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "type": "ed25519_public_key" + }, + { + "public_key": "GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K", + "weight": 1, + "key": "GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K", + "type": "sha256_hash" + }, + { + "public_key": "GDUDIN23QQTB23Q3Q6GUL6I7CEAQY4CWCFVRXFWPF4UJAQO47SPUFCXG", + "weight": 1, + "key": "GDUDIN23QQTB23Q3Q6GUL6I7CEAQY4CWCFVRXFWPF4UJAQO47SPUFCXG", + "type": "preauth_tx" + }, + { + "public_key": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "weight": 1, + "key": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "type": "ed25519_public_key" + } + ], + "data": { + "best_friend": "c3Ryb29weQ==" + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `account` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/accounts.md b/services/horizon/internal/docs/reference/endpoints/accounts.md new file mode 100644 index 0000000000..29bfd2bd40 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/accounts.md @@ -0,0 +1,177 @@ +--- +title: Accounts +replacement: https://developers.stellar.org/api/resources/accounts/ +--- + +This endpoint allows filtering accounts who have a given `signer` or have a trustline to an `asset`. The result is a list of [accounts](../resources/account.md). + +To find all accounts who are trustees to an asset, pass the query parameter `asset` using the canonical representation for an issued assets which is `Code:IssuerAccountID`. Read more about canonical representation of assets in [SEP-0011](https://github.com/stellar/stellar-protocol/blob/0c675fb3a482183dcf0f5db79c12685acf82a95c/ecosystem/sep-0011.md#values). + +### Notes +- The default behavior when filtering by `asset` is to return accounts with `authorized` and `unauthorized` trustlines. + +## Request + +``` +GET /accounts{?signer,asset,cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?signer` | optional, string | Account ID | GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB | +| `?asset` | optional, string | An issued asset represented as "Code:IssuerAccountID". | `USD:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V,native` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts?signer=GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K" +``` + + + + + + + + + + + + + + + + + +## Response + +This endpoint responds with the details of all accounts matching the filters. See [account resource](../resources/account.md) for reference. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=\u0026limit=10\u0026order=asc\u0026signer=GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=GDRREYWHQWJDICNH4SAH4TT2JRBYRPTDYIMLK4UWBDT3X3ZVVYT6I4UQ\u0026limit=10\u0026order=asc\u0026signer=GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts?cursor=GDRREYWHQWJDICNH4SAH4TT2JRBYRPTDYIMLK4UWBDT3X3ZVVYT6I4UQ\u0026limit=10\u0026order=desc\u0026signer=GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/offers{?cursor,limit,order}", + "templated": true + }, + "trades": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/trades{?cursor,limit,order}", + "templated": true + }, + "data": { + "href": "https://horizon-testnet.stellar.org/accounts/GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB/data/{key}", + "templated": true + } + }, + "id": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "paging_token": "", + "account_id": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "sequence": 7275146318446606, + "last_modified_ledger": 22379074, + "subentry_count": 4, + "thresholds": { + "low_threshold": 0, + "med_threshold": 0, + "high_threshold": 0 + }, + "flags": { + "auth_required": false, + "auth_revocable": false, + "auth_immutable": false + }, + "balances": [ + { + "balance": "1000000.0000000", + "limit": "922337203685.4775807", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "last_modified_ledger": 632070, + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "is_authorized": true + }, + { + "balance": "10000.0000000", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "asset_type": "native" + } + ], + "signers": [ + { + "public_key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "weight": 1, + "key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "type": "ed25519_public_key" + }, + { + "public_key": "GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K", + "weight": 1, + "key": "GBPOFUJUHOFTZHMZ63H5GE6NX5KVKQRD6N3I2E5AL3T2UG7HSLPLXN2K", + "type": "sha256_hash" + }, + { + "public_key": "GDUDIN23QQTB23Q3Q6GUL6I7CEAQY4CWCFVRXFWPF4UJAQO47SPUFCXG", + "weight": 1, + "key": "GDUDIN23QQTB23Q3Q6GUL6I7CEAQY4CWCFVRXFWPF4UJAQO47SPUFCXG", + "type": "preauth_tx" + }, + { + "public_key": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "weight": 1, + "key": "GD42RQNXTRIW6YR3E2HXV5T2AI27LBRHOERV2JIYNFMXOBA234SWLQQB", + "type": "ed25519_public_key" + } + ], + "data": { + "best_friend": "c3Ryb29weQ==" + } + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). diff --git a/services/horizon/internal/docs/reference/endpoints/assets-all.md b/services/horizon/internal/docs/reference/endpoints/assets-all.md new file mode 100644 index 0000000000..c5281a6b32 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/assets-all.md @@ -0,0 +1,159 @@ +--- +title: All Assets +clientData: + laboratoryUrl: +replacement: https://developers.stellar.org/api/resources/assets/ +--- + +This endpoint represents all [assets](../resources/asset.md). +It will give you all the assets in the system along with various statistics about each. + +## Request + +``` +GET /assets{?asset_code,asset_issuer,cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?asset_code` | optional, string, default _null_ | Code of the Asset to filter by | `USD` | +| `?asset_issuer` | optional, string, default _null_ | Issuer of the Asset to filter by | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36` | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. | `1` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc", ordered by asset_code then by asset_issuer. | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +# Retrieve the 200 assets, ordered alphabetically: +curl "https://horizon-testnet.stellar.org/assets?limit=200" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.assets() + .call() + .then(function (result) { + console.log(result.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +If called normally this endpoint responds with a [page](../resources/page.md) of assets. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "/assets?order=asc\u0026limit=10\u0026cursor=" + }, + "next": { + "href": "/assets?order=asc\u0026limit=10\u0026cursor=3" + }, + "prev": { + "href": "/assets?order=desc\u0026limit=10\u0026cursor=1" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "toml": { + "href": "https://www.stellar.org/.well-known/stellar.toml" + } + }, + "asset_type": "credit_alphanum12", + "asset_code": "BANANA", + "asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "paging_token": "BANANA_GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN_credit_alphanum4", + "accounts": { + "authorized": 2126, + "authorized_to_maintain_liabilities": 32, + "unauthorized": 5, + "claimable_balances": 18 + }, + "balances": { + "authorized": "10000.0000000", + "authorized_to_maintain_liabilities": "3000.0000000", + "unauthorized": "4000.0000000", + "claimable_balances": "2380.0000000" + }, + "flags": { + "auth_required": true, + "auth_revocable": false + } + }, + { + "_links": { + "toml": { + "href": "https://www.stellar.org/.well-known/stellar.toml" + } + }, + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG", + "paging_token": "BTC_GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG_credit_alphanum4", + "accounts": { + "authorized": 32, + "authorized_to_maintain_liabilities": 124, + "unauthorized": 6, + "claimable_balances": 18 + }, + "balances": { + "authorized": "5000.0000000", + "authorized_to_maintain_liabilities": "8000.0000000", + "unauthorized": "2000.0000000", + "claimable_balances": "1200.0000000" + }, + "flags": { + "auth_required": false, + "auth_revocable": false + } + }, + { + "_links": { + "toml": { + "href": "https://www.stellar.org/.well-known/stellar.toml" + } + }, + "asset_type": "credit_alphanum4", + "asset_code": "USD", + "asset_issuer": "GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG", + "paging_token": "USD_GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG_credit_alphanum4", + "accounts": { + "authorized": 91547871, + "authorized_to_maintain_liabilities": 45773935, + "unauthorized": 22886967, + "claimable_balances": 11443483 + }, + "balances": { + "authorized": "1000000000.0000000", + "authorized_to_maintain_liabilities": "500000000.0000000", + "unauthorized": "250000000.0000000", + "claimable_balances": "12500000.0000000" + }, + "flags": { + "auth_required": false, + "auth_revocable": false + } + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/data-for-account.md b/services/horizon/internal/docs/reference/endpoints/data-for-account.md new file mode 100644 index 0000000000..98ff017623 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/data-for-account.md @@ -0,0 +1,63 @@ +--- +title: Data for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=data&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/data/ +--- + +This endpoint represents a single [data](../resources/data.md) associated with a given [account](../resources/account.md). + +## Request + +``` +GET /accounts/{account}/data/{key} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `key`| required, string | Key name | `user-id`| + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/data/user-id" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.accounts() + .accountId("GAKLBGHNHFQ3BMUYG5KU4BEWO6EYQHZHAXEWC33W34PH2RBHZDSQBD75") + .call() + .then(function (account) { + return account.data({key: 'user-id'}) + }) + .then(function(dataValue) { + console.log(dataValue) + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a value of the data field for the given account. See [data resource](../resources/data.md) for reference. + +### Example Response + +```json +{ + "value": "MTAw" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `account` argument or there is no data field with a given key. diff --git a/services/horizon/internal/docs/reference/endpoints/effects-all.md b/services/horizon/internal/docs/reference/endpoints/effects-all.md new file mode 100644 index 0000000000..3150ed6fd7 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/effects-all.md @@ -0,0 +1,133 @@ +--- +title: All Effects +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=effects&endpoint=all +replacement: https://developers.stellar.org/api/resources/effects/list/ +--- + +This endpoint represents all [effects](../resources/effect.md). + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to listen for new effects as transactions happen in the Stellar network. +If called in streaming mode Horizon will start at the earliest known effect unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream effects created since your request time. + +## Request + +``` +GET /effects{?cursor,limit,order} +``` + +## Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/effects" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.effects() + .call() + .then(function (effectResults) { + //page 1 + console.log(effectResults.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var effectHandler = function (effectResponse) { + console.log(effectResponse); +}; + +var es = server.effects() + .cursor('now') + .stream({ + onmessage: effectHandler + }) +``` + +## Response + +The list of effects. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "/operations/279172878337" + }, + "precedes": { + "href": "/effects?cursor=279172878337-1\u0026order=asc" + }, + "succeeds": { + "href": "/effects?cursor=279172878337-1\u0026order=desc" + } + }, + "account": "GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "paging_token": "279172878337-1", + "starting_balance": "10000000.0", + "type_i": 0, + "type": "account_created" + }, + { + "_links": { + "operation": { + "href": "/operations/279172878337" + }, + "precedes": { + "href": "/effects?cursor=279172878337-2\u0026order=asc" + }, + "succeeds": { + "href": "/effects?cursor=279172878337-2\u0026order=desc" + } + }, + "account": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "amount": "10000000.0", + "asset_type": "native", + "paging_token": "279172878337-2", + "type_i": 3, + "type": "account_debited" + } + ] + }, + "_links": { + "next": { + "href": "/effects?order=asc\u0026limit=2\u0026cursor=279172878337-2" + }, + "prev": { + "href": "/effects?order=desc\u0026limit=2\u0026cursor=279172878337-1" + }, + "self": { + "href": "/effects?order=asc\u0026limit=2\u0026cursor=" + } + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there are no effects for the given account. diff --git a/services/horizon/internal/docs/reference/endpoints/effects-for-account.md b/services/horizon/internal/docs/reference/endpoints/effects-for-account.md new file mode 100644 index 0000000000..b60fbdacd8 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/effects-for-account.md @@ -0,0 +1,124 @@ +--- +title: Effects for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=effects&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/effects/ +--- + +This endpoint represents all [effects](../resources/effect.md) that changed a given +[account](../resources/account.md). It will return relevant effects from the creation of the +account to the current ledger. + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to +listen for new effects as transactions happen in the Stellar network. +If called in streaming mode Horizon will start at the earliest known effect unless a `cursor` is +set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only +stream effects created since your request time. + +## Request + +``` +GET /accounts/{account}/effects{?cursor,limit,order} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `account` | required, string | Account ID | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/effects?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.effects() + .forAccount("GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36") + .call() + .then(function (effectResults) { + // page 1 + console.log(effectResults.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var effectHandler = function (effectResponse) { + console.log(effectResponse); +}; + +var es = server.effects() + .forAccount("GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36") + .cursor('now') + .stream({ + onmessage: effectHandler + }) +``` + +## Response + +The list of effects. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/effects?cursor=&limit=1&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/effects?cursor=1919197546291201-1&limit=1&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/effects?cursor=1919197546291201-1&limit=1&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-1" + } + }, + "id": "0001919197546291201-0000000001", + "paging_token": "1919197546291201-1", + "account": "GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36", + "type": "account_created", + "type_i": 0, + "created_at": "2019-03-25T22:43:38Z", + "starting_balance": "10000.0000000" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there are no effects for the given account. diff --git a/services/horizon/internal/docs/reference/endpoints/effects-for-ledger.md b/services/horizon/internal/docs/reference/endpoints/effects-for-ledger.md new file mode 100644 index 0000000000..a40f49e1f2 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/effects-for-ledger.md @@ -0,0 +1,143 @@ +--- +title: Effects for Ledger +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=effects&endpoint=for_ledger +--- + +Effects are the specific ways that the ledger was changed by any operation. + +This endpoint represents all [effects](../resources/effect.md) that occurred in the given [ledger](../resources/ledger.md). + +## Request + +``` +GET /ledgers/{sequence}/effects{?cursor,limit,order} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `sequence` | required, number | Ledger Sequence Number | `680777` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/ledgers/680777/effects?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.effects() + .forLedger("680777") + .call() + .then(function (effectResults) { + //page 1 + console.log(effectResults.records) + }) + .catch(function (err) { + console.log(err) + }) + +``` + +## Response + +This endpoint responds with a list of effects that occurred in the ledger. See [effect resource](../resources/effect.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/680777/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers/680777/effects?cursor=2923914950873089-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers/680777/effects?cursor=2923914950873089-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/2923914950873089" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2923914950873089-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2923914950873089-1" + } + }, + "id": "0002923914950873089-0000000001", + "paging_token": "2923914950873089-1", + "account": "GC4ALQ3GTT5BTHTOULHCJGAT4P3MUSPLU4OEE74BAVIJ6K443O6RVLRT", + "type": "account_created", + "type_i": 0, + "created_at": "2019-04-08T20:47:22Z", + "starting_balance": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/2923914950873089" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2923914950873089-2" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2923914950873089-2" + } + }, + "id": "0002923914950873089-0000000002", + "paging_token": "2923914950873089-2", + "account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "account_debited", + "type_i": 3, + "created_at": "2019-04-08T20:47:22Z", + "asset_type": "native", + "amount": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/2923914950873089" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2923914950873089-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2923914950873089-3" + } + }, + "id": "0002923914950873089-0000000003", + "paging_token": "2923914950873089-3", + "account": "GC4ALQ3GTT5BTHTOULHCJGAT4P3MUSPLU4OEE74BAVIJ6K443O6RVLRT", + "type": "signer_created", + "type_i": 10, + "created_at": "2019-04-08T20:47:22Z", + "weight": 1, + "public_key": "GC4ALQ3GTT5BTHTOULHCJGAT4P3MUSPLU4OEE74BAVIJ6K443O6RVLRT", + "key": "" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there are no effects for a given ledger. diff --git a/services/horizon/internal/docs/reference/endpoints/effects-for-operation.md b/services/horizon/internal/docs/reference/endpoints/effects-for-operation.md new file mode 100644 index 0000000000..dd6b880442 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/effects-for-operation.md @@ -0,0 +1,142 @@ +--- +title: Effects for Operation +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=effects&endpoint=for_operation +replacement: https://developers.stellar.org/api/resources/operations/effects/ +--- + +This endpoint represents all [effects](../resources/effect.md) that occurred as a result of a given [operation](../resources/operation.md). + +## Request + +``` +GET /operations/{id}/effects{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `id` | required, number | An operation ID | `1919197546291201` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/operations/1919197546291201/effects" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.effects() + .forOperation("1919197546291201") + .call() + .then(function (effectResults) { + // page 1 + console.log(effectResults.records) + }) + .catch(function (err) { + console.log(err) + }) + +``` + +## Response + +This endpoint responds with a list of effects on the ledger as a result of a given operation. See [effect resource](../resources/effect.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201/effects?cursor=1919197546291201-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201/effects?cursor=1919197546291201-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-1" + } + }, + "id": "0001919197546291201-0000000001", + "paging_token": "1919197546291201-1", + "account": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "type": "account_created", + "type_i": 0, + "created_at": "2019-03-25T22:43:38Z", + "starting_balance": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-2" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-2" + } + }, + "id": "0001919197546291201-0000000002", + "paging_token": "1919197546291201-2", + "account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "account_debited", + "type_i": 3, + "created_at": "2019-03-25T22:43:38Z", + "asset_type": "native", + "amount": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-3" + } + }, + "id": "0001919197546291201-0000000003", + "paging_token": "1919197546291201-3", + "account": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "type": "signer_created", + "type_i": 10, + "created_at": "2019-03-25T22:43:38Z", + "weight": 1, + "public_key": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "key": "" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` errors will be returned if there are no effects for operation whose ID matches the `id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/effects-for-transaction.md b/services/horizon/internal/docs/reference/endpoints/effects-for-transaction.md new file mode 100644 index 0000000000..8bdc302d1b --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/effects-for-transaction.md @@ -0,0 +1,142 @@ +--- +title: Effects for Transaction +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=effects&endpoint=for_transaction +replacement: https://developers.stellar.org/api/resources/transactions/effects/ +--- + +This endpoint represents all [effects](../resources/effect.md) that occurred as a result of a given [transaction](../resources/transaction.md). + +## Request + +``` +GET /transactions/{hash}/effects{?cursor,limit,order} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `hash` | required, string | A transaction hash, hex-encoded, lowercase. | `7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/transactions/7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088/effects?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.effects() + .forTransaction("7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088") + .call() + .then(function (effectResults) { + //page 1 + console.log(effectResults.records) + }) + .catch(function (err) { + console.log(err) + }) + +``` + +## Response + +This endpoint responds with a list of effects on the ledger as a result of a given transaction. See [effect resource](../resources/effect.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088/effects?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions/7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088/effects?cursor=1919197546291201-3&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions/7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088/effects?cursor=1919197546291201-1&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-1" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-1" + } + }, + "id": "0001919197546291201-0000000001", + "paging_token": "1919197546291201-1", + "account": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "type": "account_created", + "type_i": 0, + "created_at": "2019-03-25T22:43:38Z", + "starting_balance": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-2" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-2" + } + }, + "id": "0001919197546291201-0000000002", + "paging_token": "1919197546291201-2", + "account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "account_debited", + "type_i": 3, + "created_at": "2019-03-25T22:43:38Z", + "asset_type": "native", + "amount": "10000.0000000" + }, + { + "_links": { + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201-3" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201-3" + } + }, + "id": "0001919197546291201-0000000003", + "paging_token": "1919197546291201-3", + "account": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "type": "signer_created", + "type_i": 10, + "created_at": "2019-03-25T22:43:38Z", + "weight": 1, + "public_key": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "key": "" + } + ] + } +} +``` + +## Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there are no effects for transaction whose hash matches the `hash` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/fee-stats.md b/services/horizon/internal/docs/reference/endpoints/fee-stats.md new file mode 100644 index 0000000000..1f8be226fc --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/fee-stats.md @@ -0,0 +1,124 @@ +--- +title: Fee Stats +clientData: + laboratoryUrl: +replacement: https://developers.stellar.org/api/aggregations/fee-stats/ +--- + +This endpoint gives useful information about per-operation fee stats in the last 5 ledgers. It can be used to +predict a fee set on the transaction that will be submitted to the network. + +## Request + +``` +GET /fee_stats +``` + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/fee_stats" +``` + +## Response + +Response contains the following fields: + +| Field | | +| - | - | +| last_ledger | Last ledger sequence number | +| last_ledger_base_fee | Base fee as defined in the last ledger | +| ledger_capacity_usage | Average capacity usage over the last 5 ledgers. (0 is no usage, 1.0 is completely full ledgers) | +| fee_charged | fee charged object | +| max_fee | max fee object | + +### Fee Charged Object + +Information about the fee charged for transactions in the last 5 ledgers. + +| Field | | +| - | - | +| min | Minimum fee charged over the last 5 ledgers. | +| mode | Mode fee charged over the last 5 ledgers. | +| p10 | 10th percentile fee charged over the last 5 ledgers. | +| p20 | 20th percentile fee charged over the last 5 ledgers. | +| p30 | 30th percentile fee charged over the last 5 ledgers. | +| p40 | 40th percentile fee charged over the last 5 ledgers. | +| p50 | 50th percentile fee charged over the last 5 ledgers. | +| p60 | 60th percentile fee charged over the last 5 ledgers. | +| p70 | 70th percentile fee charged over the last 5 ledgers. | +| p80 | 80th percentile fee charged over the last 5 ledgers. | +| p90 | 90th percentile fee charged over the last 5 ledgers. | +| p95 | 95th percentile fee charged over the last 5 ledgers. | +| p99 | 99th percentile fee charged over the last 5 ledgers. | + +Note: The difference between `fee_charged` and `max_fee` is that the former +represents the actual fee paid for the transaction while `max_fee` represents +the maximum bid the transaction creator was willing to pay for the transaction. + +### Max Fee Object + +Information about max fee bid for transactions over the last 5 ledgers. + +| Field | | +| - | - | +| min | Minimum (lowest) value of the maximum fee bid over the last 5 ledgers. | +| mode | Mode max fee over the last 5 ledgers. | +| p10 | 10th percentile max fee over the last 5 ledgers. | +| p20 | 20th percentile max fee over the last 5 ledgers. | +| p30 | 30th percentile max fee over the last 5 ledgers. | +| p40 | 40th percentile max fee over the last 5 ledgers. | +| p50 | 50th percentile max fee over the last 5 ledgers. | +| p60 | 60th percentile max fee over the last 5 ledgers. | +| p70 | 70th percentile max fee over the last 5 ledgers. | +| p80 | 80th percentile max fee over the last 5 ledgers. | +| p90 | 90th percentile max fee over the last 5 ledgers. | +| p95 | 95th percentile max fee over the last 5 ledgers. | +| p99 | 99th percentile max fee over the last 5 ledgers. | + + +### Example Response + +```json +{ + "last_ledger": "22606298", + "last_ledger_base_fee": "100", + "ledger_capacity_usage": "0.97", + "fee_charged": { + "max": "100", + "min": "100", + "mode": "100", + "p10": "100", + "p20": "100", + "p30": "100", + "p40": "100", + "p50": "100", + "p60": "100", + "p70": "100", + "p80": "100", + "p90": "100", + "p95": "100", + "p99": "100" + }, + "max_fee": { + "max": "100000", + "min": "100", + "mode": "100", + "p10": "100", + "p20": "100", + "p30": "100", + "p40": "100", + "p50": "100", + "p60": "100", + "p70": "100", + "p80": "100", + "p90": "15000", + "p95": "100000", + "p99": "100000" + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/ledgers-all.md b/services/horizon/internal/docs/reference/endpoints/ledgers-all.md new file mode 100644 index 0000000000..242c0df697 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/ledgers-all.md @@ -0,0 +1,205 @@ +--- +title: All Ledgers +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=ledgers&endpoint=all +replacement: https://developers.stellar.org/api/resources/ledgers/ +--- + +This endpoint represents all [ledgers](../resources/ledger.md). +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to get notifications as ledgers are closed by the Stellar network. +If called in streaming mode Horizon will start at the earliest known ledger unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream ledgers created since your request time. + +## Request + +``` +GET /ledgers{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +# Retrieve the 200 latest ledgers, ordered chronologically +curl "https://horizon-testnet.stellar.org/ledgers?limit=200&order=desc" +``` + +### JavaScript Example Request + +```javascript +server.ledgers() + .call() + .then(function (ledgerResult) { + // page 1 + console.log(ledgerResult.records) + return ledgerResult.next() + }) + .then(function (ledgerResult) { + // page 2 + console.log(ledgerResult.records) + }) + .catch(function(err) { + console.log(err) + }) +``` + + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var ledgerHandler = function (ledgerResponse) { + console.log(ledgerResponse); +}; + +var es = server.ledgers() + .cursor('now') + .stream({ + onmessage: ledgerHandler +}) +``` + +## Response + +This endpoint responds with a list of ledgers. See [ledger resource](../resources/ledger.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "effects": { + "href": "/ledgers/1/effects/{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/ledgers/1/operations/{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/ledgers/1" + }, + "transactions": { + "href": "/ledgers/1/transactions/{?cursor,limit,order}", + "templated": true + } + }, + "id": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1", + "paging_token": "4294967296", + "hash": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1", + "sequence": 1, + "transaction_count": 0, + "successful_transaction_count": 0, + "failed_transaction_count": 0, + "operation_count": 0, + "tx_set_operation_count": 0, + "closed_at": "1970-01-01T00:00:00Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0000000", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 100000000, + "max_tx_set_size": 50 + }, + { + "_links": { + "effects": { + "href": "/ledgers/2/effects/{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/ledgers/2/operations/{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/ledgers/2" + }, + "transactions": { + "href": "/ledgers/2/transactions/{?cursor,limit,order}", + "templated": true + } + }, + "id": "e12e5809ab8c59d8256e691cb48a024dd43960bc15902d9661cd627962b2bc71", + "paging_token": "8589934592", + "hash": "e12e5809ab8c59d8256e691cb48a024dd43960bc15902d9661cd627962b2bc71", + "prev_hash": "e8e10918f9c000c73119abe54cf089f59f9015cc93c49ccf00b5e8b9afb6e6b1", + "sequence": 2, + "transaction_count": 0, + "successful_transaction_count": 0, + "failed_transaction_count": 0, + "operation_count": 0, + "closed_at": "2015-07-16T23:49:00Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0000000", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 100000000, + "max_tx_set_size": 100 + } + ] + }, + "_links": { + "next": { + "href": "/ledgers?order=asc&limit=2&cursor=8589934592" + }, + "prev": { + "href": "/ledgers?order=desc&limit=2&cursor=4294967296" + }, + "self": { + "href": "/ledgers?order=asc&limit=2&cursor=" + } + } +} +``` + +### Example Streaming Event + +```json +{ + "_links": { + "effects": { + "href": "/ledgers/69859/effects/{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/ledgers/69859/operations/{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/ledgers/69859" + }, + "transactions": { + "href": "/ledgers/69859/transactions/{?cursor,limit,order}", + "templated": true + } + }, + "id": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + "paging_token": "300042120331264", + "hash": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + "prev_hash": "4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", + "sequence": 69859, + "transaction_count": 0, + "successful_transaction_count": 0, + "failed_transaction_count": 0, + "operation_count": 0, + "closed_at": "2015-07-20T15:51:52Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0025600", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": "100000000", + "max_tx_set_size": 50 +} +``` + +## Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/ledgers-single.md b/services/horizon/internal/docs/reference/endpoints/ledgers-single.md new file mode 100644 index 0000000000..f16c9485d8 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/ledgers-single.md @@ -0,0 +1,92 @@ +--- +title: Ledger Details +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=ledgers&endpoint=single +replacement: https://developers.stellar.org/api/resources/ledgers/single/ +--- + +The ledger details endpoint provides information on a single [ledger](../resources/ledger.md). + +## Request + +``` +GET /ledgers/{sequence} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `sequence` | required, number | Ledger Sequence | `69859` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/ledgers/69859" +``` + +### JavaScript Example Request + +```js +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.ledgers() + .ledger('69858') + .call() + .then(function(ledgerResult) { + console.log(ledgerResult) + }) + .catch(function(err) { + console.log(err) + }) + +``` +## Response + +This endpoint responds with a single Ledger. See [ledger resource](../resources/ledger.md) for reference. + +### Example Response + +```json +{ + "_links": { + "effects": { + "href": "/ledgers/69859/effects/{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/ledgers/69859/operations/{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/ledgers/69859" + }, + "transactions": { + "href": "/ledgers/69859/transactions/{?cursor,limit,order}", + "templated": true + } + }, + "id": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + "paging_token": "300042120331264", + "hash": "4db1e4f145e9ee75162040d26284795e0697e2e84084624e7c6c723ebbf80118", + "prev_hash": "4b0b8bace3b2438b2404776ce57643966855487ba6384724a3c664c7aa4cd9e4", + "sequence": 69859, + "transaction_count": 0, + "successful_transaction_count": 0, + "failed_transaction_count": 0, + "operation_count": 0, + "tx_set_operation_count": 0, + "closed_at": "2015-07-20T15:51:52Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0025600", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 100000000, + "max_tx_set_size": 50 +} +``` + +## Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no ledger whose sequence number matches the `sequence` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/metrics.md b/services/horizon/internal/docs/reference/endpoints/metrics.md new file mode 100644 index 0000000000..c0d1fba62b --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/metrics.md @@ -0,0 +1,126 @@ +--- +title: Metrics +--- + +The metrics endpoint returns a host of [Prometheus](https://prometheus.io/) metrics for monitoring the health of the underlying Horizon process. + +There is an [official Grafana Dashboard](https://grafana.com/grafana/dashboards/13793) to easily visualize those metrics. + +Since Horizon 1.0.0 this endpoint is not part of the public API. It's available in the internal server (listening on the internal port set via `ADMIN_PORT` env variable or `--admin-port` CLI param). + +## Request + +``` +GET /metrics +``` + +### curl Example Request + +Assuming a local Horizon instance is running with an admin port of 9090 (i.e. `ADMIN_PORT=9090` env variable or `--admin-port=9090`) + +```sh +curl "https://localhost:9090/metrics" +``` + + +## Response + +The `/metrics` endpoint returns a [Prometheus text-formated](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format) response. It is meant to be scraped by Prometheus. + +Below, each section of related data points are grouped together and annotated (***note**: this endpoint returns ALL this data in one response*). + + +#### Goroutines + +Horizon utilizes Go's built in concurrency primitives ([goroutines](https://gobyexample.com/goroutines) and [channels](https://gobyexample.com/channels)). The `goroutine` metric monitors the number of currently running goroutines on this Horizon's process. + + +#### History + +Horizon maintains its own database (postgres), a verbose and user friendly account of activity on the Stellar network. + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| history.elder_ledger | The sequence number of the oldest ledger recorded in Horizon's database. | +| history.latest_ledger | The sequence number of the youngest (most recent) ledger recorded in Horizon's database. | +| history.open_connections | The number of open connections to the Horizon database. | + + +#### Ingester + +Ingester represents metrics specific to Horizon's [ingestion](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/reference/admin.md#ingesting-stellar-core-data) process, or the process by which Horizon consumes transaction results from a connected Stellar Core instance. + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| ingester.clear_ledger | The count and rate of clearing (per ledger) for this Horizon process. | +| ingester.ingest_ledger | The count and rate of ingestion (per ledger) for this Horizon process. | + +These metrics contain useful [sub metrics](#sub-metrics). + + +#### Logging + +Horizon utilizes the standard `debug`, `error`, etc. levels of logging. This metric outputs stats for each level of log message produced, useful for a high-level monitoring of "is my Horizon instance functioning properly?" In order of increasing severity: + +* logging.debug +* logging.info +* logging.warning +* logging.error +* logging.panic + +These metrics contain useful [sub metrics](#sub-metrics). + +#### Requests + +Requests represent an overview of Horizon's incoming traffic. + +These metrics contain useful [sub metrics](#sub-metrics). + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| requests.failed | Failed requests are those that return a status code in [400, 600). | +| requests.succeeded | Successful requests are those that return a status code in [200, 400). | +| requests.total | Total number of received requests. | + +#### Stellar Core +As noted above, Horizon relies on Stellar Core to stay in sync with the Stellar network. These metrics are specific to the underlying Stellar Core instance. + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| stellar_core.latest_ledger | The sequence number of the latest (most recent) ledger recorded in Stellar Core's database. | +| stellar_core.open_connections | The number of open connections to the Stellar Core postgres database. | + +#### Transaction Submission + +Horizon does not submit transactions directly to the Stellar network. Instead, it sequences transactions and sends the base64 encoded, XDR serialized blob to its connected Stellar Core instance. + +##### Horizon Transaction Sequencing and Submission + +The following is a simplified version of the transaction submission process that glosses over the finer details. To dive deeper, check out the [source code](https://github.com/stellar/go/tree/master/services/horizon/internal/txsub). + +Horizon's sequencing mechanism consists of a [manager](https://github.com/stellar/go/blob/master/services/horizon/internal/txsub/sequence/manager.go) that keeps track of [submission queues](https://github.com/stellar/go/blob/master/services/horizon/internal/txsub/sequence/queue.go) for a set of addresses. A submission queue is a priority queue, prioritized by minimum transaction sequence number, that holds a set of pending transactions for an account. A pending transaction is represented as an object with a sequence number and a channel. Periodically, this queue is updated, popping off finished transactions, sending down the transaction's channel a successful/failure response. + +These metrics contain useful [sub metrics](#sub-metrics). + + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| txsub.buffered | The count of submissions buffered behind this Horizon's submission queue. | +| txsub.failed | The rate of failed transactions that have been submitted to this Horizon. | +| txsub.open | The count of "open" submissions (i.e.) submissions whose transactions haven't been confirmed successful or failed. | +| txsub.succeeded | The rate of successful transactions that have been submitted to this Horizon. | +| txsub.total | Both the rate and count of all transactions submitted to this Horizon. | + +### Sub Metrics +Various sub metrics related to a certain metric's performance. + +| Metric | Description | +| ---------------- | ------------------------------------------------------------------------------------------------------------------------------ | +| `1m.rate`, `5min.rate`, `etc.` | The per-minute moving average rate of events per second at the given time interval. | +| `75%`, `95%`, `etc.` | Counts at different percentiles. | +| `count` | Sum total of a certain metric value. | +| `max`, `mean`, `etc.` | Common statistic calculations. | + + + + diff --git a/services/horizon/internal/docs/reference/endpoints/offer-details.md b/services/horizon/internal/docs/reference/endpoints/offer-details.md new file mode 100644 index 0000000000..978d88f5f4 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/offer-details.md @@ -0,0 +1,71 @@ +--- +title: Offer Details +replacement: https://developers.stellar.org/api/resources/offers/ +--- + +Returns information and links relating to a single [offer](../resources/offer.md). + +## Request + +``` +GET /offers/{offer} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `offer` | required, string | Offer ID | `126628073` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/offers/1347876" +``` + + + +## Response + +This endpoint responds with the details of a single offer for a given ID. See [offer resource](../resources/offer.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/1347876" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C" + } + }, + "id": "1347876", + "paging_token": "1347876", + "seller": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "selling": { + "asset_type": "credit_alphanum4", + "asset_code": "DSQ", + "asset_issuer": "GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "USD", + "asset_issuer": "GAA4MFNZGUPJAVLWWG6G5XZJFZDHLKQNG3Q6KB24BAD6JHNNVXDCF4XG" + }, + "amount": "60.4544008", + "price_r": { + "n": 84293, + "d": 2000000 + }, + "price": "0.0421465", + "last_modified_ledger": 1429506, + "last_modified_time": "2019-10-29T22:08:23Z" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no offer whose ID matches the `offer` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/offers-for-account.md b/services/horizon/internal/docs/reference/endpoints/offers-for-account.md new file mode 100644 index 0000000000..906e302b6a --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/offers-for-account.md @@ -0,0 +1,131 @@ +--- +title: Offers for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=offers&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/offers/ +--- + +People on the Stellar network can make [offers](../resources/offer.md) to buy or sell assets. This +endpoint represents all the offers a particular account makes. + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to +listen as offers are processed in the Stellar network. If called in streaming mode Horizon will +start at the earliest known offer unless a `cursor` is set. In that case it will start from the +`cursor`. You can also set `cursor` value to `now` to only stream offers created since your request +time. + +## Request + +``` +GET /accounts/{account}/offers{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `account` | required, string | Account ID | `GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF` | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/offers" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.offers('accounts', 'GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF') + .call() + .then(function (offerResult) { + console.log(offerResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var offerHandler = function (offerResponse) { + console.log(offerResponse); +}; + +var es = server.offers('accounts', 'GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF') + .cursor('now') + .stream({ + onmessage: offerHandler + }) +``` + +## Response + +The list of offers. + +**Note:** a response of 200 with an empty records array may either mean there are no offers for +`account_id` or `account_id` does not exist. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/offers?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/offers?cursor=5443256&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/offers?cursor=5443256&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/5443256" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF" + } + }, + "id": "5443256", + "paging_token": "5443256", + "seller": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "selling": { + "asset_type": "native" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR" + }, + "amount": "10.0000000", + "price_r": { + "n": 1, + "d": 1 + }, + "price": "1.0000000", + "last_modified_ledger": 694974, + "last_modified_time": "2019-04-09T17:14:22Z" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/offers.md b/services/horizon/internal/docs/reference/endpoints/offers.md new file mode 100644 index 0000000000..6f8273487e --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/offers.md @@ -0,0 +1,122 @@ +--- +title: Offers +replacement: https://developers.stellar.org/api/resources/offers/list/ +--- + +People on the Stellar network can make [offers](../resources/offer.md) to buy or sell assets. This +endpoint represents all the current offers, allowing filtering by `seller`, `selling_asset` or `buying_asset`. + +## Request + +``` +GET /offers{?selling_asset_type,selling_asset_issuer,selling_asset_code,buying_asset_type,buying_asset_issuer,buying_asset_code,seller,cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?seller` | optional, string | Account ID of the offer creator | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36` | +| `?selling` | optional, string | Asset being sold | `native` or `EUR:GD6VWBXI6NY3AOOR55RLVQ4MNIDSXE5JSAVXUTF35FRRI72LYPI3WL6Z` | +| `?buying` | optional, string | Asset being bought | `native` or `USD:GD6VWBXI6NY3AOOR55RLVQ4MNIDSXE5JSAVXUTF35FRRI72LYPI3WL6Z` | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/offers{?selling_asset_type,selling_asset_issuer,selling_asset_code,buying_asset_type,buying_asset_issuer,buying_asset_code,seller,cursor,limit,order}" +``` + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Response + +The list of offers. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/offers?cursor=5443256&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/offers?cursor=5443256&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/5443256" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/" + } + }, + "id": "5443256", + "paging_token": "5443256", + "seller": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "selling": { + "asset_type": "native" + }, + "buying": { + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR" + }, + "amount": "10.0000000", + "price_r": { + "n": 1, + "d": 1 + }, + "price": "1.0000000", + "last_modified_ledger": 694974, + "last_modified_time": "2019-04-09T17:14:22Z" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/operations-all.md b/services/horizon/internal/docs/reference/endpoints/operations-all.md new file mode 100644 index 0000000000..c44f9f4fc6 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/operations-all.md @@ -0,0 +1,192 @@ +--- +title: All Operations +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=operations&endpoint=all +replacement: https://developers.stellar.org/api/resources/operations/ +--- + +This endpoint represents [operations](../resources/operation.md) that are part of successfully validated [transactions](../resources/transaction.md). +Please note that this endpoint returns operations that are part of failed transactions if `include_failed` parameter is `true` +and Horizon is ingesting failed transactions. +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to listen as operations are processed in the Stellar network. +If called in streaming mode Horizon will start at the earliest known operation unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream operations created since your request time. + +## Request + +``` +GET /operations{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include operations of failed transactions in results. | `true` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the operations in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/operations?limit=200&order=desc" +``` + +### JavaScript Example Request + +```js +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.operations() + .call() + .then(function (operationsResult) { + //page 1 + console.log(operationsResult.records) + return operationsResult.next() + }) + .then(function (operationsResult) { + //page 2 + console.log(operationsResult.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var operationHandler = function (operationResponse) { + console.log(operationResponse); +}; + +var es = server.operations() + .cursor('now') + .stream({ + onmessage: operationHandler + }) +``` + +## Response + +This endpoint responds with a list of operations. See [operation resource](../resources/operation.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": "1000.0000000", + "transaction_successful": true, + "type_i": 0, + "type": "create_account" + }, + { + "_links": { + "effects": { + "href": "/operations/463856472064/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=463856472064&order=asc" + }, + "self": { + "href": "/operations/463856472064" + }, + "succeeds": { + "href": "/operations?cursor=463856472064&order=desc" + }, + "transactions": { + "href": "/transactions/463856472064" + } + }, + "account": "GC2ADYAIPKYQRGGUFYBV2ODJ54PY6VZUPKNCWWNX2C7FCJYKU4ZZNKVL", + "funder": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "id": 463856472064, + "paging_token": "463856472064", + "starting_balance": "1000.0000000", + "transaction_successful": true, + "type_i": 0, + "type": "create_account" + } + ] + }, + "_links": { + "next": { + "href": "/operations?order=asc&limit=2&cursor=463856472064" + }, + "prev": { + "href": "/operations?order=desc&limit=2&cursor=77309415424" + }, + "self": { + "href": "/operations?order=asc&limit=2&cursor=" + } + } +} +``` + +### Example Streaming Event + +```json +{ + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": "1000.0000000", + "transaction_successful": true, + "type_i": 0, + "type": "create_account" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/operations-for-account.md b/services/horizon/internal/docs/reference/endpoints/operations-for-account.md new file mode 100644 index 0000000000..0b44988134 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/operations-for-account.md @@ -0,0 +1,162 @@ +--- +title: Operations for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=operations&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/offers/ +--- + +This endpoint represents successful [operations](../resources/operation.md) that were included in valid [transactions](../resources/transaction.md) that affected a particular [account](../resources/account.md). + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to listen for new operations that affect a given account as they happen. +If called in streaming mode Horizon will start at the earliest known operation unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream operations created since your request time. + +## Request + +``` +GET /accounts/{account}/operations{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `account`| required, string | Account ID | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36`| +| `?cursor`| optional, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc`| The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include operations of failed transactions in results. | `true` | | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the operations in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36/operations" +``` + +### JavaScript Example Request + +```js +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.operations() + .forAccount("GAKLBGHNHFQ3BMUYG5KU4BEWO6EYQHZHAXEWC33W34PH2RBHZDSQBD75") + .call() + .then(function (operationsResult) { + console.log(operationsResult.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var operationHandler = function (operationResponse) { + console.log(operationResponse); +}; + +var es = server.operations() + .forAccount("GAKLBGHNHFQ3BMUYG5KU4BEWO6EYQHZHAXEWC33W34PH2RBHZDSQBD75") + .cursor('now') + .stream({ + onmessage: operationHandler + }) +``` + +## Response + +This endpoint responds with a list of operations that affected the given account. See [operation resource](../resources/operation.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "effects": { + "href": "/operations/46316927324160/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=46316927324160&order=asc" + }, + "self": { + "href": "/operations/46316927324160" + }, + "succeeds": { + "href": "/operations?cursor=46316927324160&order=desc" + }, + "transactions": { + "href": "/transactions/46316927324160" + } + }, + "account": "GBBM6BKZPEHWYO3E3YKREDPQXMS4VK35YLNU7NFBRI26RAN7GI5POFBB", + "funder": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "id": 46316927324160, + "paging_token": "46316927324160", + "starting_balance": 1e+09, + "transaction_successful": true, + "type_i": 0, + "type": "create_account" + } + ] + }, + "_links": { + "next": { + "href": "/accounts/GBBM6BKZPEHWYO3E3YKREDPQXMS4VK35YLNU7NFBRI26RAN7GI5POFBB/operations?order=asc&limit=10&cursor=46316927324160" + }, + "prev": { + "href": "/accounts/GBBM6BKZPEHWYO3E3YKREDPQXMS4VK35YLNU7NFBRI26RAN7GI5POFBB/operations?order=desc&limit=10&cursor=46316927324160" + }, + "self": { + "href": "/accounts/GBBM6BKZPEHWYO3E3YKREDPQXMS4VK35YLNU7NFBRI26RAN7GI5POFBB/operations?order=asc&limit=10&cursor=" + } + } +} +``` + +### Example Streaming Event + +```json +{ + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": "1000.0000000", + "transaction_successful": true, + "type_i": 0, + "type": "create_account" +} +``` + + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `account` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/operations-for-ledger.md b/services/horizon/internal/docs/reference/endpoints/operations-for-ledger.md new file mode 100644 index 0000000000..85155a5726 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/operations-for-ledger.md @@ -0,0 +1,140 @@ +--- +title: Operations for Ledger +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=operations&endpoint=for_ledger +replacement: https://developers.stellar.org/api/resources/ledgers/operations/ +--- + +This endpoint returns successful [operations](../resources/operation.md) that occurred in a given [ledger](../resources/ledger.md). + +## Request + +``` +GET /ledgers/{sequence}/operations{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `sequence` | required, number | Ledger Sequence | `681637` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include operations of failed transactions in results. | `true` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the operations in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/ledgers/681637/operations?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.operations() + .forLedger("681637") + .call() + .then(function (operationsResult) { + console.log(operationsResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a list of operations in a given ledger. See [operation resource](../resources/operation.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/681637/operations?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers/681637/operations?cursor=2927608622751745&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers/681637/operations?cursor=2927608622747649&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2927608622747649" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2927608622747649" + } + }, + "id": "2927608622747649", + "paging_token": "2927608622747649", + "transaction_successful": true, + "source_account": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-08T21:59:27Z", + "transaction_hash": "4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a", + "asset_type": "native", + "from": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "404.0000000" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622751745" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/fdabcee816bd439dd1d20bcb0abab5aa939c15cca5fccc1db060ba6096a5e0ed" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622751745/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2927608622751745" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2927608622751745" + } + }, + "id": "2927608622751745", + "paging_token": "2927608622751745", + "transaction_successful": true, + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "create_account", + "type_i": 0, + "created_at": "2019-04-08T21:59:27Z", + "transaction_hash": "fdabcee816bd439dd1d20bcb0abab5aa939c15cca5fccc1db060ba6096a5e0ed", + "starting_balance": "10000.0000000", + "funder": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "account": "GCD5UL3DHC5TQRQVJKFTM66CLFTHGULOQ2HEAXNSA2JWUGBCT36BP55F" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no ledger whose ID matches the `id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/operations-for-transaction.md b/services/horizon/internal/docs/reference/endpoints/operations-for-transaction.md new file mode 100644 index 0000000000..14ef13f850 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/operations-for-transaction.md @@ -0,0 +1,115 @@ +--- +title: Operations for Transaction +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=operations&endpoint=for_transaction +replacement: https://developers.stellar.org/api/resources/transactions/operations/ +--- + +This endpoint represents successful [operations](../resources/operation.md) that are part of a given [transaction](../resources/transaction.md). + +### Warning - failed transactions + +The "Operations for Transaction" endpoint returns a list of operations in a successful or failed +transaction. Make sure to always check the operation status in this endpoint using +`transaction_successful` field! + +## Request + +``` +GET /transactions/{hash}/operations{?cursor,limit,order} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `hash` | required, string | A transaction hash, hex-encoded, lowercase. | `4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the operations in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a/operations?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.operations() + .forTransaction("4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a") + .call() + .then(function (operationsResult) { + console.log(operationsResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a list of operations that are part of a given transaction. See [operation resource](../resources/operation.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a/operations?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a/operations?cursor=2927608622747649&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a/operations?cursor=2927608622747649&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2927608622747649" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2927608622747649" + } + }, + "id": "2927608622747649", + "paging_token": "2927608622747649", + "transaction_successful": true, + "source_account": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-08T21:59:27Z", + "transaction_hash": "4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a", + "asset_type": "native", + "from": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "404.0000000" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `hash` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/operations-single.md b/services/horizon/internal/docs/reference/endpoints/operations-single.md new file mode 100644 index 0000000000..2693cd4d41 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/operations-single.md @@ -0,0 +1,97 @@ +--- +title: Operation Details +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=operations&endpoint=single +replacement: https://developers.stellar.org/api/resources/operations/single/ +--- + +The operation details endpoint provides information on a single +[operation](../resources/operation.md). The operation ID provided in the `id` argument specifies +which operation to load. + +### Warning - failed transactions + +Operations can be part of successful or failed transactions (failed transactions are also included +in Stellar ledger). Always check operation status using `transaction_successful` field! + +## Request + +``` +GET /operations/{id} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `id` | required, number | An operation ID. | 2927608622747649 | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the operations in the response. | `transactions` | + +### curl Example Request + +```sh +curl https://horizon-testnet.stellar.org/operations/2927608622747649 +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.operations() + .operation('2927608622747649') + .call() + .then(function (operationsResult) { + console.log(operationsResult) + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a single Operation. See [operation resource](../resources/operation.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2927608622747649/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2927608622747649" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2927608622747649" + } + }, + "id": "2927608622747649", + "paging_token": "2927608622747649", + "transaction_successful": true, + "source_account": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-08T21:59:27Z", + "transaction_hash": "4a3365180521e16b478d9f0c9198b97a9434fc9cb07b34f83ecc32fc54d0ca8a", + "asset_type": "native", + "from": "GCGXZPH2QNKJP4GI2J77EFQQUMP3NYY4PCUZ4UPKHR2XYBKRUYKQ2DS6", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "404.0000000" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if the + there is no operation that matches the ID argument, i.e. the operation does not exist. diff --git a/services/horizon/internal/docs/reference/endpoints/orderbook-details.md b/services/horizon/internal/docs/reference/endpoints/orderbook-details.md new file mode 100644 index 0000000000..f408aef7df --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/orderbook-details.md @@ -0,0 +1,118 @@ +--- +title: Orderbook Details +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=order_book&endpoint=details +replacement: https://developers.stellar.org/api/aggregations/order-books/ +--- + +People on the Stellar network can make [offers](../resources/offer.md) to buy or sell assets. +These offers are summarized by the assets being bought and sold in +[orderbooks](../resources/orderbook.md). + +Horizon will return, for each orderbook, a summary of the orderbook and the bids and asks +associated with that orderbook. + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to +listen as offers are processed in the Stellar network. If called in streaming mode Horizon will +start at the earliest known offer unless a `cursor` is set. In that case it will start from the +`cursor`. You can also set `cursor` value to `now` to only stream offers created since your request +time. + +## Request + +``` +GET /order_book?selling_asset_type={selling_asset_type}&selling_asset_code={selling_asset_code}&selling_asset_issuer={selling_asset_issuer}&buying_asset_type={buying_asset_type}&buying_asset_code={buying_asset_code}&buying_asset_issuer={buying_asset_issuer}&limit={limit} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `selling_asset_type` | required, string | Type of the Asset being sold | `native` | +| `selling_asset_code` | optional, string | Code of the Asset being sold | `USD` | +| `selling_asset_issuer` | optional, string | Account ID of the issuer of the Asset being sold | `GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36` | +| `buying_asset_type` | required, string | Type of the Asset being bought | `credit_alphanum4` | +| `buying_asset_code` | optional, string | Code of the Asset being bought | `BTC` | +| `buying_asset_issuer` | optional, string | Account ID of the issuer of the Asset being bought | `GD6VWBXI6NY3AOOR55RLVQ4MNIDSXE5JSAVXUTF35FRRI72LYPI3WL6Z` | +| `limit` | optional, string | Limit the number of items returned | `20` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=FOO&buying_asset_issuer=GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG&limit=20" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.orderbook(new StellarSdk.Asset.native(), new StellarSdk.Asset('FOO', 'GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG')) + .call() + .then(function(resp) { + console.log(resp); + }) + .catch(function(err) { + console.log(err); + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var orderbookHandler = function (orderbookResponse) { + console.log(orderbookResponse); +}; + +var es = server.orderbook(new StellarSdk.Asset.native(), new StellarSdk.Asset('FOO', 'GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG')) + .cursor('now') + .stream({ + onmessage: orderbookHandler + }) +``` + +## Response + +The summary of the orderbook and its bids and asks. + +## Example Response +```json +{ + "bids": [ + { + "price_r": { + "n": 100000000, + "d": 12953367 + }, + "price": "7.7200005", + "amount": "12.0000000" + } + ], + "asks": [ + { + "price_r": { + "n": 194, + "d": 25 + }, + "price": "7.7600000", + "amount": "238.4804125" + } + ], + "base": { + "asset_type": "native" + }, + "counter": { + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG" + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/path-finding-strict-receive.md b/services/horizon/internal/docs/reference/endpoints/path-finding-strict-receive.md new file mode 100644 index 0000000000..8e8444878d --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/path-finding-strict-receive.md @@ -0,0 +1,103 @@ +--- +title: Strict Receive Payment Paths +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=paths&endpoint=all +replacement: https://developers.stellar.org/api/aggregations/paths/strict-receive/ +--- + +The Stellar Network allows payments to be made across assets through _path payments_. A path +payment specifies a series of assets to route a payment through, from source asset (the asset +debited from the payer) to destination asset (the asset credited to the payee). + +A [Path Payment Strict Receive](../../../guides/concepts/list-of-operations.html#path-payment-strict-receive) allows a user to specify the *amount of the asset received*. The amount sent varies based on offers in the order books. If you would like to search for a path specifying the amount to be sent, use the [Find Payment Paths (Strict Send)](./path-finding-strict-send.html). + +A strict receive path search is specified using: + +- The source account id or source assets. +- The asset and amount that the destination account should receive. + +As part of the search, horizon will load a list of assets available to the source account id and +will find any payment paths from those source assets to the desired destination asset. The search's +amount parameter will be used to determine if a given path can satisfy a payment of the +desired amount. + +## Request + +``` +GET /paths/strict-receive?source_account={sa}&destination_asset_type={at}&destination_asset_code={ac}&destination_asset_issuer={di}&destination_amount={amount}&destination_account={da} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?source_account` | string | The sender's account id. Any returned path must use an asset that the sender has a trustline to. | `GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP` | +| `?source_assets` | string | A comma separated list of assets. Any returned path must use an asset included in this list | `USD:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V,native` | +| `?destination_account` | string | The destination account that any returned path should use | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_asset_type` | string | The type of the destination asset | `credit_alphanum4` | +| `?destination_asset_code` | required if `destination_asset_type` is not `native`, string | The destination asset code, if destination_asset_type is not "native" | `USD` | +| `?destination_asset_issuer` | required if `destination_asset_type` is not `native`, string | The issuer for the destination asset, if destination_asset_type is not "native" | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_amount` | string | The amount, denominated in the destination asset, that any returned path should be able to satisfy | `10.1` | + +The endpoint will not allow requests which provide both a `source_account` and a `source_assets` parameter. All requests must provide one or the other. +The assets in `source_assets` are expected to be encoded using the following format: + +XLM should be represented as `"native"`. Issued assets should be represented as `"Code:IssuerAccountID"`. `"Code"` must consist of alphanumeric ASCII characters. + + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/paths/strict-receive?destination_account=GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V&source_account=GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP&destination_asset_type=native&destination_amount=20" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var sourceAccount = "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP"; +var destinationAsset = StellarSdk.Asset.native(); +var destinationAmount = "20"; + +server.paths(sourceAccount, destinationAsset, destinationAmount) + .call() + .then(function (pathResult) { + console.log(pathResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a page of path resources. See [path resource](../resources/path.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "source_asset_type": "credit_alphanum4", + "source_asset_code": "FOO", + "source_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "source_amount": "100.0000000", + "destination_asset_type": "credit_alphanum4", + "destination_asset_code": "FOO", + "destination_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "destination_amount": "100.0000000", + "path": [] + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if no paths could be found to fulfill this payment request diff --git a/services/horizon/internal/docs/reference/endpoints/path-finding-strict-send.md b/services/horizon/internal/docs/reference/endpoints/path-finding-strict-send.md new file mode 100644 index 0000000000..ff2ad9d444 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/path-finding-strict-send.md @@ -0,0 +1,105 @@ +--- +title: Strict Send Payment Paths +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=paths&endpoint=all +replacement: https://developers.stellar.org/api/aggregations/paths/strict-send/ +--- + +The Stellar Network allows payments to be made across assets through _path payments_. A path +payment specifies a series of assets to route a payment through, from source asset (the asset +debited from the payer) to destination asset (the asset credited to the payee). + +A [Path Payment Strict Send](../../../guides/concepts/list-of-operations.html#path-payment-strict-send) allows a user to specify the amount of the asset to send. The amount received will vary based on offers in the order books. + + +A path payment strict send search is specified using: + +- The destination account id or destination assets. +- The source asset. +- The source amount. + +As part of the search, horizon will load a list of assets available to the source account id or use the assets passed in the request and will find any payment paths from those source assets to the desired destination asset. The source's amount parameter will be used to determine if a given path can satisfy a payment of the desired amount. + +## Request + +``` +https://horizon-testnet.stellar.org/paths/strict-send?&source_amount={sa}&source_asset_type={at}&source_asset_code={ac}&source_asset_issuer={ai}&destination_account={da} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?source_amount` | string | The amount, denominated in the source asset, that any returned path should be able to satisfy | `10.1` | +| `?source_asset_type` | string | The type of the source asset | `credit_alphanum4` | +| `?source_asset_code` | string, required if `source_asset_type` is not `native`, string | The source asset code, if source_asset_type is not "native" | `USD` | +| `?source_asset_issuer` | string, required if `source_asset_type` is not `native`, string | The issuer for the source asset, if source_asset_type is not "native" | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_account` | string optional | The destination account that any returned path should use | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_assets` | string optional | A comma separated list of assets. Any returned path must use an asset included in this list | `USD:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V,native` | + +The endpoint will not allow requests which provide both a `destination_account` and `destination_assets` parameter. All requests must provide one or the other. +The assets in `destination_assets` are expected to be encoded using the following format: + +XLM should be represented as `"native"`. Issued assets should be represented as `"Code:IssuerAccountID"`. `"Code"` must consist of alphanumeric ASCII characters. + + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/paths/strict-send?&source_amount=10&source_asset_type=native&destination_assets=MXN:GC2GFGZ5CZCFCDJSQF3YYEAYBOS3ZREXJSPU7LUJ7JU3LP3BQNHY7YKS" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var sourceAsset = StellarSdk.Asset.native(); +var sourceAmount = "20"; +var destinationAsset = new StellarSdk.Asset( + 'USD', + 'GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN' +) + + +server.strictSendPaths(sourceAsset, sourceAmount, [destinationAsset]) + .call() + .then(function (pathResult) { + console.log(pathResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a page of path resources. See [path resource](../resources/path.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "source_asset_type": "credit_alphanum4", + "source_asset_code": "FOO", + "source_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "source_amount": "100.0000000", + "destination_asset_type": "credit_alphanum4", + "destination_asset_code": "FOO", + "destination_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "destination_amount": "100.0000000", + "path": [] + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if no paths could be found to fulfill this payment request diff --git a/services/horizon/internal/docs/reference/endpoints/path-finding.md b/services/horizon/internal/docs/reference/endpoints/path-finding.md new file mode 100644 index 0000000000..356ec40f0f --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/path-finding.md @@ -0,0 +1,106 @@ +--- +title: Find Payment Paths +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=paths&endpoint=all +replacement: https://developers.stellar.org/api/aggregations/paths/ +--- + +**Note**: This endpoint will be deprecated, use [/path/strict-receive](./path-finding-strict-receive.md) instead. There are no differences between both endpoints, `/paths` is an alias for `/path/strict-receive`. + + +The Stellar Network allows payments to be made across assets through _path payments_. A path +payment specifies a series of assets to route a payment through, from source asset (the asset +debited from the payer) to destination asset (the asset credited to the payee). + +A path search is specified using: + +- The destination account id +- The source account id +- The asset and amount that the destination account should receive + +As part of the search, horizon will load a list of assets available to the source account id and +will find any payment paths from those source assets to the desired destination asset. The search's +amount parameter will be used to determine if there a given path can satisfy a payment of the +desired amount. + +## Request + +``` +GET /paths?destination_account={da}&source_account={sa}&destination_asset_type={at}&destination_asset_code={ac}&destination_asset_issuer={di}&destination_amount={amount} +``` + +## Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?destination_account` | string | The destination account that any returned path should use | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_asset_type` | string | The type of the destination asset | `credit_alphanum4` | +| `?destination_asset_code` | string | The destination asset code, if destination_asset_type is not "native" | `USD` | +| `?destination_asset_issuer` | string | The issuer for the destination, if destination_asset_type is not "native" | `GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V` | +| `?destination_amount` | string | The amount, denominated in the destination asset, that any returned path should be able to satisfy | `10.1` | +| `?source_account` | string | The sender's account id. Any returned path must use a source that the sender can hold | `GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP` | +| `?source_assets` | string | A comma separated list of assets. Any returned path must use a source included in this list | `USD:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V,native` | + +The endpoint will not allow requests which provide both a `source_account` and a `source_assets` parameter. All requests must provide one or the other. +The assets in `source_assets` are expected to be encoded using the following format: + +The native asset should be represented as `"native"`. Issued assets should be represented as `"Code:IssuerAccountID"`. `"Code"` must consist of alphanumeric ASCII characters. + + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/paths?destination_account=GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V&source_account=GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP&destination_asset_type=native&destination_amount=20" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var source_account = "GARSFJNXJIHO6ULUBK3DBYKVSIZE7SC72S5DYBCHU7DKL22UXKVD7MXP"; +var destination_account = "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V"; +var destination_asset = StellarSdk.Asset.native(); +var destination_amount = "20"; + +server.paths(source_account, destination_account, destination_asset, destination_amount) + .call() + .then(function (pathResult) { + console.log(pathResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a page of path resources. See [path resource](../resources/path.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "source_asset_type": "credit_alphanum4", + "source_asset_code": "FOO", + "source_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "source_amount": "100.0000000", + "destination_asset_type": "credit_alphanum4", + "destination_asset_code": "FOO", + "destination_asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "destination_amount": "100.0000000", + "path": [] + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if no paths could be found to fulfill this payment request diff --git a/services/horizon/internal/docs/reference/endpoints/payments-all.md b/services/horizon/internal/docs/reference/endpoints/payments-all.md new file mode 100644 index 0000000000..4df10b3c81 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/payments-all.md @@ -0,0 +1,198 @@ +--- +title: All Payments +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=payments&endpoint=all +--- + +This endpoint represents all payment-related [operations](../resources/operation.md) that are part +of validated [transactions](../resources/transaction.md). This endpoint can also be used in +[streaming](../streaming.md) mode so it is possible to use it to listen for new payments as they +get made in the Stellar network. + +If called in streaming mode Horizon will start at the earliest known payment unless a `cursor` is +set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only +stream payments created since your request time. + +The operations that can be returned in by this endpoint are: +- `create_account` +- `payment` +- `path_payment` +- `account_merge` + +## Request + +``` +GET /payments{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include payments of failed transactions in results. | `true` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the payments in the response. | `transactions` | + +### curl Example Request + +```sh +# Retrieve the first 200 payments, ordered chronologically. +curl "https://horizon-testnet.stellar.org/payments?limit=200" +``` + +```sh +# Retrieve a page of payments to occur immediately before the transaction +# specified by the paging token "1234". +curl "https://horizon-testnet.stellar.org/payments?cursor=1234&order=desc" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.payments() + .call() + .then(function (paymentResults) { + console.log(paymentResults.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var paymentHandler = function (paymentResponse) { + console.log(paymentResponse); +}; + +var es = server.payments() + .cursor('now') + .stream({ + onmessage: paymentHandler + }) +``` + +## Response + +This endpoint responds with a list of payments. See [operation resource](../resources/operation.md) for more information about operations (and payment operations). + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": 1e+14, + "type_i": 0, + "type": "create_account" + }, + { + "_links": { + "effects": { + "href": "/operations/463856472064/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=463856472064&order=asc" + }, + "self": { + "href": "/operations/463856472064" + }, + "succeeds": { + "href": "/operations?cursor=463856472064&order=desc" + }, + "transactions": { + "href": "/transactions/463856472064" + } + }, + "account": "GC2ADYAIPKYQRGGUFYBV2ODJ54PY6VZUPKNCWWNX2C7FCJYKU4ZZNKVL", + "funder": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "id": 463856472064, + "paging_token": "463856472064", + "starting_balance": 1e+09, + "type_i": 0, + "type": "create_account" + } + ] + }, + "_links": { + "next": { + "href": "?order=asc&limit=2&cursor=463856472064" + }, + "prev": { + "href": "?order=desc&limit=2&cursor=77309415424" + }, + "self": { + "href": "?order=asc&limit=2&cursor=" + } + } +} +``` + +### Example Streaming Event + +```json +{ + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": 1e+14, + "type_i": 0, + "type": "create_account" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/payments-for-account.md b/services/horizon/internal/docs/reference/endpoints/payments-for-account.md new file mode 100644 index 0000000000..3bd6bd4bb6 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/payments-for-account.md @@ -0,0 +1,168 @@ +--- +title: Payments for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=payments&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/payments/ +--- + +This endpoint responds with a collection of payment-related operations where the given +[account](../resources/account.md) was either the sender or receiver. + +This endpoint can also be used in [streaming](../streaming.md) mode so it is possible to use it to +listen for new payments to or from an account as they get made in the Stellar network. +If called in streaming mode Horizon will start at the earliest known payment unless a `cursor` is +set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only +stream payments created since your request time. + +The operations that can be returned in by this endpoint are: +- `create_account` +- `payment` +- `path_payment` +- `account_merge` + +## Request + +``` +GET /accounts/{id}/payments{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `id` | required, string | The account id of the account used to constrain results. | `GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ` | +| `?cursor` | optional, default _null_ | A payment paging token specifying from where to begin results. When streaming this can be set to `now` to stream object created since your request time. | `8589934592` | +| `?limit` | optional, number, default `10` | Specifies the count of records at most to return. | `200` | +| `?order` | optional, string, default `asc` | Specifies order of returned results. `asc` means older payments first, `desc` mean newer payments first. | `desc` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include payments of failed transactions in results. | `true` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the payments in the response. | `transactions` | + +### curl Example Request + +```bash +# Retrieve the 25 latest payments for a specific account. +curl "https://horizon-testnet.stellar.org/accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?limit=25&order=desc" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.payments() + .forAccount("GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ") + .call() + .then(function (accountResult) { + console.log(accountResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var paymentHandler = function (paymentResponse) { + console.log(paymentResponse); +}; + +var es = server.payments() + .forAccount("GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ") + .cursor('now') + .stream({ + onmessage: paymentHandler + }) +``` + +## Response + +This endpoint responds with a [page](../resources/page.md) of [payment operations](../resources/operation.md). + +### Example Response + +```json +{"_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "/operations/12884905984" + }, + "transaction": { + "href": "/transaction/6391dd190f15f7d1665ba53c63842e368f485651a53d8d852ed442a446d1c69a" + }, + "precedes": { + "href": "/accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=asc{?limit}", + "templated": true + }, + "succeeds": { + "href": "/accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=desc{?limit}", + "templated": true + } + }, + "id": 12884905984, + "paging_token": "12884905984", + "type_i": 0, + "type": "payment", + "sender": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "receiver": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + "asset": { + "code": "XLM" + }, + "amount": 1000000000, + "amount_f": 100.00 + } + ] +}, +"_links": { + "next": { + "href": "/accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=asc{?limit}", + "templated": true + }, + "self": { + "href": "/accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments" + } +} +} +``` + +### Example Streaming Event + +```json +{ + "_links": { + "effects": { + "href": "/operations/77309415424/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=77309415424&order=asc" + }, + "self": { + "href": "/operations/77309415424" + }, + "succeeds": { + "href": "/operations?cursor=77309415424&order=desc" + }, + "transactions": { + "href": "/transactions/77309415424" + } + }, + "account": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "funder": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "id": 77309415424, + "paging_token": "77309415424", + "starting_balance": 1e+14, + "type_i": 0, + "type": "create_account" +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/payments-for-ledger.md b/services/horizon/internal/docs/reference/endpoints/payments-for-ledger.md new file mode 100644 index 0000000000..52cc95ffb6 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/payments-for-ledger.md @@ -0,0 +1,119 @@ +--- +title: Payments for Ledger +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=payments&endpoint=for_ledger +replacement: https://developers.stellar.org/api/resources/ledgers/payments/ +--- + +This endpoint represents all payment-related [operations](../resources/operation.md) that are part +of a valid [transactions](../resources/transaction.md) in a given [ledger](../resources/ledger.md). + +The operations that can be returned in by this endpoint are: +- `create_account` +- `payment` +- `path_payment` +- `account_merge` + +## Request + +``` +GET /ledgers/{id}/payments{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `id` | required, number | Ledger ID | `696960` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include payments of failed transactions in results. | `true` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the payments in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/ledgers/696960/payments?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.payments() + .forLedger("696960") + .call() + .then(function (paymentResult) { + console.log(paymentResult) + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a list of payment operations in a given ledger. See [operation +resource](../resources/operation.md) for more information about operations (and payment +operations). + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/696960/payments?cursor=&limit=1&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers/696960/payments?cursor=2993420406628353&limit=1&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers/696960/payments?cursor=2993420406628353&limit=1&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2993420406628353" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2993420406628353/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2993420406628353" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2993420406628353" + } + }, + "id": "2993420406628353", + "paging_token": "2993420406628353", + "transaction_successful": true, + "source_account": "GAYB4GWPX2HUWR5QE7YX77QY6TSNFZIJZTYX2TDRW6YX6332BGD5SEAK", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-09T20:00:54Z", + "transaction_hash": "f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5", + "asset_type": "native", + "from": "GAYB4GWPX2HUWR5QE7YX77QY6TSNFZIJZTYX2TDRW6YX6332BGD5SEAK", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "293.0000000" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no ledger whose ID matches the `id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/payments-for-transaction.md b/services/horizon/internal/docs/reference/endpoints/payments-for-transaction.md new file mode 100644 index 0000000000..29b557eb4c --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/payments-for-transaction.md @@ -0,0 +1,124 @@ +--- +title: Payments for Transaction +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=payments&endpoint=for_transaction +--- + +This endpoint represents all payment-related [operations](../resources/operation.md) that are part +of a given [transaction](../resources/transaction.md). + +The operations that can be returned in by this endpoint are: +- `create_account` +- `payment` +- `path_payment` +- `account_merge` + +### Warning - failed transactions + +"Payments for Transaction" endpoint returns list of payments of successful or failed transactions +(that are also included in Stellar ledger). Always check the payment status in this endpoint using +`transaction_successful` field! + +## Request + +``` +GET /transactions/{hash}/payments{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `hash` | required, string | A transaction hash, hex-encoded, lowercase. | `f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | +| `?join` | optional, string, default: _null_ | Set to `transactions` to include the transactions which created each of the payments in the response. | `transactions` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5/payments" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.payments() + .forTransaction("f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5") + .call() + .then(function (paymentResult) { + console.log(paymentResult.records); + }) + .catch(function (err) { + console.log(err); + }) +``` + +## Response + +This endpoint responds with a list of payments operations that are part of a given transaction. See +[operation resource](../resources/operation.md) for more information about operations (and payment +operations). + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5/payments?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5/payments?cursor=2993420406628353&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5/payments?cursor=2993420406628353&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2993420406628353" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2993420406628353/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2993420406628353" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2993420406628353" + } + }, + "id": "2993420406628353", + "paging_token": "2993420406628353", + "transaction_successful": true, + "source_account": "GAYB4GWPX2HUWR5QE7YX77QY6TSNFZIJZTYX2TDRW6YX6332BGD5SEAK", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-09T20:00:54Z", + "transaction_hash": "f65278b36875d170e865853838da400515f59ca23836f072e8d62cac18b803e5", + "asset_type": "native", + "from": "GAYB4GWPX2HUWR5QE7YX77QY6TSNFZIJZTYX2TDRW6YX6332BGD5SEAK", + "to": "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", + "amount": "293.0000000" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no + transaction whose ID matches the `hash` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/trade_aggregations.md b/services/horizon/internal/docs/reference/endpoints/trade_aggregations.md new file mode 100644 index 0000000000..47a4c3fef8 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/trade_aggregations.md @@ -0,0 +1,155 @@ +--- +title: Trade Aggregations +replacement: https://developers.stellar.org/api/aggregations/trade-aggregations/ +--- + +Trade Aggregations are catered specifically for developers of trading clients. They facilitate +efficient gathering of historical trade data. This is done by dividing a given time range into +segments and aggregating statistics, for a given asset pair (`base`, `counter`) over each of these +segments. + +The duration of the segments is specified with the `resolution` parameter. The start and end of the +time range are given by `startTime` and `endTime` respectively, which are both rounded to the +nearest multiple of `resolution` since epoch. + +The individual segments are also aligned with multiples of `resolution` since epoch. If you want to +change this alignment, the segments can be offset by specifying the `offset` parameter. + + +## Request + +``` +GET /trade_aggregations?base_asset_type={base_asset_type}&base_asset_code={base_asset_code}&base_asset_issuer={base_asset_issuer}&counter_asset_type={counter_asset_type}&counter_asset_code={counter_asset_code}&counter_asset_issuer={counter_asset_issuer} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `start_time` | long | lower time boundary represented as millis since epoch | 1512689100000 | +| `end_time` | long | upper time boundary represented as millis since epoch | 1512775500000 | +| `resolution` | long | segment duration as millis. *Supported values are 1 minute (60000), 5 minutes (300000), 15 minutes (900000), 1 hour (3600000), 1 day (86400000) and 1 week (604800000).* | 300000 | +| `offset` | long | segments can be offset using this parameter. Expressed in milliseconds. Can only be used if the resolution is greater than 1 hour. *Value must be in whole hours, less than the provided resolution, and less than 24 hours.* | 3600000 (1 hour) | +| `base_asset_type` | string | Type of base asset | `native` | +| `base_asset_code` | string | Code of base asset, not required if type is `native` | `USD` | +| `base_asset_issuer` | string | Issuer of base asset, not required if type is `native` | 'GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36' | +| `counter_asset_type` | string | Type of counter asset | `credit_alphanum4` | +| `counter_asset_code` | string | Code of counter asset, not required if type is `native` | `BTC` | +| `counter_asset_issuer` | string | Issuer of counter asset, not required if type is `native` | 'GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH' | +| `?order` | optional, string, default `asc` | The order, in terms of timeline, in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request +```sh +curl https://horizon.stellar.org/trade_aggregations?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&limit=200&order=asc&resolution=3600000&start_time=1517521726000&end_time=1517532526000 +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon.stellar.org'); + +var base = new StellarSdk.Asset.native(); +var counter = new StellarSdk.Asset("SLT", "GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP"); +var startTime = 1517521726000; +var endTime = 1517532526000; +var resolution = 3600000; +var offset = 0; + +server.tradeAggregation(base, counter, startTime, endTime, resolution, offset) + .call() + .then(function (tradeAggregation) { + console.log(tradeAggregation); + }) + .catch(function (err) { + console.log(err); + }) +``` + +## Response + +A list of collected trade aggregations. + +Note +- Segments that fit into the time range but have 0 trades in them, will not be included. +- Partial segments, in the beginning and end of the time range, will not be included. Thus if your + start time is noon Wednesday, your end time is noon Thursday, and your resolution is one day, you + will not receive back any data. Instead, you would want to either start at midnight Wednesday and + midnight Thursday, or shorten the resolution interval to better cover your time frame. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517521726000\u0026end_time=1517532526000" + }, + "next": { + "href": "https://horizon.stellar.org/trade_aggregations?base_asset_type=native\u0026counter_asset_code=SLT\u0026counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP\u0026counter_asset_type=credit_alphanum4\u0026end_time=1517532526000\u0026limit=200\u0026order=asc\u0026resolution=3600000\u0026start_time=1517529600000" + } + }, + "_embedded": { + "records": [ + { + "timestamp": 1517522400000, + "trade_count": 26, + "base_volume": "27575.0201596", + "counter_volume": "5085.6410385", + "avg": "0.1844293", + "high": "0.1915709", + "high_r": { + "N": 50, + "D": 261 + }, + "low": "0.1506024", + "low_r": { + "N": 25, + "D": 166 + }, + "open": "0.1724138", + "open_r": { + "N": 5, + "D": 29 + }, + "close": "0.1506024", + "close_r": { + "N": 25, + "D": 166 + } + }, + { + "timestamp": 1517526000000, + "trade_count": 15, + "base_volume": "3913.8224543", + "counter_volume": "719.4993608", + "avg": "0.1838355", + "high": "0.1960784", + "high_r": { + "N": 10, + "D": 51 + }, + "low": "0.1506024", + "low_r": { + "N": 25, + "D": 166 + }, + "open": "0.1869159", + "open_r": { + "N": 20, + "D": 107 + }, + "close": "0.1515152", + "close_r": { + "N": 5, + "D": 33 + } + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/trades-for-account.md b/services/horizon/internal/docs/reference/endpoints/trades-for-account.md new file mode 100644 index 0000000000..1290ac68ab --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/trades-for-account.md @@ -0,0 +1,140 @@ +--- +title: Trades for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=trades&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/trades/ +--- + +This endpoint represents all [trades](../resources/trade.md) that affect a given [account](../resources/account.md). + +This endpoint can also be used in [streaming](../streaming.md) mode, making it possible to listen for new trades that affect the given account as they occur on the Stellar network. +If called in streaming mode Horizon will start at the earliest known trade unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream trades created since your request time. + +## Request + +``` +GET /accounts/{account_id}/trades{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `account_id` | required, string | ID of an account | GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | 12884905984 | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR/trades?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.trades() + .forAccount("GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR") + .call() + .then(function (accountResult) { + console.log(accountResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + + +## Response + +This endpoint responds with a list of trades that changed a given account's state. See the [trade resource](../resources/trade.md) for reference. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "/accounts/GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR/trades?cursor=\u0026limit=1\u0026order=asc" + }, + "next": { + "href": "/accounts/GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR/trades?cursor=940258535411713-0\u0026limit=1\u0026order=asc" + }, + "prev": { + "href": "/accounts/GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR/trades?cursor=940258535411713-0\u0026limit=1\u0026order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "/accounts/GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR" + }, + "counter": { + "href": "/accounts/GBOOAYCAJIN7YCUUAHEQJJARNQMRUP4P2WXVO6P4KAMAB27NGA3CYTZU" + }, + "operation": { + "href": "/operations/940258535411713" + } + }, + "id": "940258535411713-0", + "paging_token": "940258535411713-0", + "ledger_close_time": "2017-03-30T13:20:41Z", + "offer_id": "8", + "base_offer_id": "8", + "base_account": "GBYTR4MC5JAX4ALGUBJD7EIKZVM7CUGWKXIUJMRSMK573XH2O7VAK3SR", + "base_amount": "1.0000000", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "BTC", + "base_asset_issuer": "GB6FN4C7ZLWKENAOZDLZOQHNIOK4RDMV6EKLR53LWCHEBR6LVXOEKDZH", + "counter_offer_id": "4611686044197195777", + "counter_account": "GBOOAYCAJIN7YCUUAHEQJJARNQMRUP4P2WXVO6P4KAMAB27NGA3CYTZU", + "counter_amount": "1.0000000", + "counter_asset_type": "native", + "base_is_seller": true, + "price": { + "n": 1, + "d": 1 + } + } + ] + } +} +``` + +## Example Streaming Event +``` +{ + _links: + { self: { href: '' }, + base: { href: '/accounts/GDICGE2CFCNM3ZWRUVOWDJB2RAO667UE7WOSJJ2Z3IMISUA7CJZCE3KO' }, + counter: { href: '/accounts/GBILENMVJPVPEPXUPUPRBUEAME5OUQWAHIGZAX7TQX65NIQW3G3DGUYX' }, + operation: { href: '/operations/47274327069954049' } }, + id: '47274327069954049-0', + paging_token: '47274327069954049-0', + ledger_close_time: '2018-09-12T00:00:34Z', + offer_id: '711437', + base_account: 'GDICGE2CFCNM3ZWRUVOWDJB2RAO667UE7WOSJJ2Z3IMISUA7CJZCE3KO', + base_amount: '13.0000000', + base_asset_type: 'native', + counter_account: 'GBILENMVJPVPEPXUPUPRBUEAME5OUQWAHIGZAX7TQX65NIQW3G3DGUYX', + counter_amount: '13.0000000', + counter_asset_type: 'credit_alphanum4', + counter_asset_code: 'CNY', + counter_asset_issuer: 'GAREELUB43IRHWEASCFBLKHURCGMHE5IF6XSE7EXDLACYHGRHM43RFOX', + base_is_seller: true, + price: { n: 1, d: 1 } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `account_id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/trades-for-offer.md b/services/horizon/internal/docs/reference/endpoints/trades-for-offer.md new file mode 100644 index 0000000000..e6533e1cc1 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/trades-for-offer.md @@ -0,0 +1,139 @@ +--- +title: Trades for Offer +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=trades&endpoint=for_offer +--- + +This endpoint represents all [trades](../resources/trade.md) for a given [offer](../resources/offer.md). + +This endpoint can also be used in [streaming](../streaming.md) mode, making it possible to listen for new trades for the given offer as they occur on the Stellar network. +If called in streaming mode Horizon will start at the earliest known trade unless a `cursor` is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only stream trades created since your request time. +## Request + +``` +GET /offers/{offer_id}/trades{?cursor,limit,order} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `offer_id` | required, number | ID of an offer | 323223 | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. | 12884905984 | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/offers/323223/trades" +``` + +### JavaScript Example Request + +```js +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.trades() + .forOffer(323223) + .call() + .then(function (tradesResult) { + console.log(tradesResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + + +## Response + +This endpoint responds with a list of trades that consumed a given offer. See the [trade resource](../resources/trade.md) for reference. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/323223/trades?cursor=\u0026limit=10\u0026order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/offers/323223/trades?cursor=35789107779080193-0\u0026limit=10\u0026order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/offers/323223/trades?cursor=35789107779080193-0\u0026limit=10\u0026order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GDRCFIQAUEFUQ6GXF5DPRO2M77E4UB7RW7EWI2FTKOW7CWYKZCHSI75K" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GCUD7CBKTQI4D7ZR7IKHMGXZKKVABML7XFBHV4AIYBOEN5UQFZ5DSPPT" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/35789107779080193" + } + }, + "id": "35789107779080193-0", + "paging_token": "35789107779080193-0", + "ledger_close_time": "2018-04-08T05:58:37Z", + "base_offer_id": "323223", + "base_account": "GDRCFIQAUEFUQ6GXF5DPRO2M77E4UB7RW7EWI2FTKOW7CWYKZCHSI75K", + "base_amount": "912.6607285", + "base_asset_type": "native", + "counter_offer_id": "4611686044197195777", + "counter_account": "GCUD7CBKTQI4D7ZR7IKHMGXZKKVABML7XFBHV4AIYBOEN5UQFZ5DSPPT", + "counter_amount": "16.5200719", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "CM10", + "counter_asset_issuer": "GBUJJAYHS64L4RDHPLURQJUKSHHPINSAYXYVMWPEF4LECHDKB2EFMKBX", + "base_is_seller": true, + "price": { + "n": 18101, + "d": 1000000 + } + } + ] + } +} +``` + +## Example Streaming Event +```cgo +{ _links: + { self: { href: '' }, + base: + { href: '/accounts/GDJNMHET4DTS7HUHU7IG5DB274OSMHUYA7TRRKOD6ZABHPUW5YWJ4SUD' }, + counter: + { href: '/accounts/GCALYDRCCJEUPMV24TAX2N2N3IBX7NUUYZNM7I5FQS5GIEQ4A7EVKUOP' }, + operation: { href: '/operations/47261068505915393' } }, + id: '47261068505915393-0', + paging_token: '47261068505915393-0', + ledger_close_time: '2018-09-11T19:42:04Z', + offer_id: '734529', + base_account: 'GDJNMHET4DTS7HUHU7IG5DB274OSMHUYA7TRRKOD6ZABHPUW5YWJ4SUD', + base_amount: '0.0175999', + base_asset_type: 'credit_alphanum4', + base_asset_code: 'BOC', + base_asset_issuer: 'GCTS32RGWRH6RJM62UVZ4UT5ZN5L6B2D3LPGO6Z2NM2EOGVQA7TA6SKO', + counter_account: 'GCALYDRCCJEUPMV24TAX2N2N3IBX7NUUYZNM7I5FQS5GIEQ4A7EVKUOP', + counter_amount: '0.0199998', + counter_asset_type: 'credit_alphanum4', + counter_asset_code: 'ABC', + counter_asset_issuer: 'GCTS32RGWRH6RJM62UVZ4UT5ZN5L6B2D3LPGO6Z2NM2EOGVQA7TA6SKO', + base_is_seller: true, + price: { n: 2840909, d: 2500000 } +} +``` +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no offer whose ID matches the `offer_id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/trades.md b/services/horizon/internal/docs/reference/endpoints/trades.md new file mode 100644 index 0000000000..ab7760f3e9 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/trades.md @@ -0,0 +1,178 @@ +--- +title: Trades +replacement: https://developers.stellar.org/api/resources/trades/ +--- + +People on the Stellar network can make [offers](../resources/offer.md) to buy or sell assets. When +an offer is fully or partially fulfilled, a [trade](../resources/trade.md) happens. + +Trades can be filtered for a specific orderbook, defined by an asset pair: `base` and `counter`. + +This endpoint can also be used in [streaming](../streaming.md) mode, making it possible to listen +for new trades as they occur on the Stellar network. + +If called in streaming mode Horizon will start at the earliest known trade unless a `cursor` is +set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to only +stream trades created since your request time. + +## Request + +``` +GET /trades?base_asset_type={base_asset_type}&base_asset_code={base_asset_code}&base_asset_issuer={base_asset_issuer}&counter_asset_type={counter_asset_type}&counter_asset_code={counter_asset_code}&counter_asset_issuer={counter_asset_issuer} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `base_asset_type` | optional, string | Type of base asset | `native` | +| `base_asset_code` | optional, string | Code of base asset, not required if type is `native` | `USD` | +| `base_asset_issuer` | optional, string | Issuer of base asset, not required if type is `native` | 'GA2HGBJIJKI6O4XEM7CZWY5PS6GKSXL6D34ERAJYQSPYA6X6AI7HYW36' | +| `counter_asset_type` | optional, string | Type of counter asset | `credit_alphanum4` | +| `counter_asset_code` | optional, string | Code of counter asset, not required if type is `native` | `BTC` | +| `counter_asset_issuer` | optional, string | Issuer of counter asset, not required if type is `native` | 'GD6VWBXI6NY3AOOR55RLVQ4MNIDSXE5JSAVXUTF35FRRI72LYPI3WL6Z' | +| `offer_id` | optional, string | filter for by a specific offer id | `283606` | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order, in terms of timeline, in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | + +### curl Example Request +```sh +curl https://horizon.stellar.org/trades?base_asset_type=native&counter_asset_code=SLT&counter_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP&counter_asset_type=credit_alphanum4&limit=2&order=desc +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.trades() + .call() + .then(function (tradesResult) { + console.log(tradesResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Example Streaming Request + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var tradesHandler = function (tradeResponse) { + console.log(tradeResponse); +}; + +var es = server.trades() + .cursor('now') + .stream({ + onmessage: tradesHandler +}) +``` + +## Response + +The list of trades. `base` and `counter` in the records will match the asset pair filter order. If an asset pair is not specified, the order is arbitrary. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=6025839120434-0&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/trades?cursor=6012954218535-0&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/6012954218535" + } + }, + "id": "6012954218535-0", + "paging_token": "6012954218535-0", + "ledger_close_time": "2019-02-27T11:54:53Z", + "offer_id": "37", + "base_offer_id": "4611692031381606439", + "base_account": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "base_amount": "25.6687300", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "DSQ", + "base_asset_issuer": "GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF", + "counter_offer_id": "37", + "counter_account": "GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC", + "counter_amount": "1.0265563", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "USD", + "counter_asset_issuer": "GAA4MFNZGUPJAVLWWG6G5XZJFZDHLKQNG3Q6KB24BAD6JHNNVXDCF4XG", + "base_is_seller": false, + "price": { + "n": 10000000, + "d": 250046977 + } + }, + { + "_links": { + "self": { + "href": "" + }, + "base": { + "href": "https://horizon-testnet.stellar.org/accounts/GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C" + }, + "counter": { + "href": "https://horizon-testnet.stellar.org/accounts/GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC" + }, + "operation": { + "href": "https://horizon-testnet.stellar.org/operations/6025839120385" + } + }, + "id": "6025839120385-0", + "paging_token": "6025839120385-0", + "ledger_close_time": "2019-02-27T11:55:09Z", + "offer_id": "1", + "base_offer_id": "4611692044266508289", + "base_account": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "base_amount": "1434.4442973", + "base_asset_type": "credit_alphanum4", + "base_asset_code": "DSQ", + "base_asset_issuer": "GBDQPTQJDATT7Z7EO4COS4IMYXH44RDLLI6N6WIL5BZABGMUOVMLWMQF", + "counter_offer_id": "1", + "counter_account": "GCYN7MI6VXVRP74KR6MKBAW2ELLCXL6QCY5H4YQ62HVWZWMCE6Y232UC", + "counter_amount": "0.5622050", + "counter_asset_type": "credit_alphanum4", + "counter_asset_code": "SXRT", + "counter_asset_issuer": "GAIOQ3UYK5NYIZY5ZFAG4JBN4O37NAVFKZM5YDYEB6YEFBZSZ5KDCUFO", + "base_is_seller": false, + "price": { + "n": 642706, + "d": 1639839483 + } + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/transactions-all.md b/services/horizon/internal/docs/reference/endpoints/transactions-all.md new file mode 100644 index 0000000000..aac65c4950 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/transactions-all.md @@ -0,0 +1,189 @@ +--- +title: All Transactions +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=transactions&endpoint=all +replacement: https://developers.stellar.org/api/resources/transactions/single/ +--- + +This endpoint represents all successful [transactions](../resources/transaction.md). +Please note that this endpoint returns failed transactions that are included in the ledger if +`include_failed` parameter is `true` and Horizon is ingesting failed transactions. +This endpoint can also be used in [streaming](../streaming.md) mode. This makes it possible to use +it to listen for new transactions as they get made in the Stellar network. If called in streaming +mode Horizon will start at the earliest known transaction unless a `cursor` is set. In that case it +will start from the `cursor`. You can also set `cursor` value to `now` to only stream transaction +created since your request time. + +## Request + +``` +GET /transactions{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include failed transactions in results. | `true` | + +### curl Example Request + +```sh +# Retrieve the 200 latest transactions, ordered chronologically: +curl "https://horizon-testnet.stellar.org/transactions?limit=200&order=desc" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.transactions() + .call() + .then(function (transactionResult) { + //page 1 + console.log(transactionResult.records); + return transactionResult.next(); + }) + .then(function (transactionResult) { + console.log(transactionResult.records); + }) + .catch(function (err) { + console.log(err) + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var txHandler = function (txResponse) { + console.log(txResponse); +}; + +var es = server.transactions() + .cursor('now') + .stream({ + onmessage: txHandler + }) +``` + +## Response + +If called normally this endpoint responds with a [page](../resources/page.md) of transactions. +If called in streaming mode the transaction resources are returned individually. +See [transaction resource](../resources/transaction.md) for reference. + +### Example Response + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "account": { + "href": "/accounts/GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K" + }, + "effects": { + "href": "/transactions/fa78cb43d72171fdb2c6376be12d57daa787b1fa1a9fdd0e9453e1f41ee5f15a/effects{?cursor,limit,order}", + "templated": true + }, + "ledger": { + "href": "/ledgers/146970" + }, + "operations": { + "href": "/transactions/fa78cb43d72171fdb2c6376be12d57daa787b1fa1a9fdd0e9453e1f41ee5f15a/operations{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/transactions?cursor=631231343497216\u0026order=asc" + }, + "self": { + "href": "/transactions/fa78cb43d72171fdb2c6376be12d57daa787b1fa1a9fdd0e9453e1f41ee5f15a" + }, + "succeeds": { + "href": "/transactions?cursor=631231343497216\u0026order=desc" + } + }, + "id": "fa78cb43d72171fdb2c6376be12d57daa787b1fa1a9fdd0e9453e1f41ee5f15a", + "paging_token": "631231343497216", + "successful": true, + "hash": "fa78cb43d72171fdb2c6376be12d57daa787b1fa1a9fdd0e9453e1f41ee5f15a", + "ledger": 146970, + "created_at": "2015-09-24T10:07:09Z", + "account": "GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "account_sequence": 279172874343, + "max_fee": 100, + "fee_charged": 100, + "operation_count": 1, + "envelope_xdr": "AAAAAGXNhLrhGtltTwCpmqlarh7s1DB2hIkbP//jgzn4Fos/AAAACgAAAEEAAABnAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA2ddmTOFAgr21Crs2RXRGLhiAKxicZb/IERyEZL/Y2kUAAAAXSHboAAAAAAAAAAAB+BaLPwAAAECDEEZmzbgBr5fc3mfJsCjWPDtL6H8/vf16me121CC09ONyWJZnw0PUvp4qusmRwC6ZKfLDdk8F3Rq41s+yOgQD", + "result_xdr": "AAAAAAAAAAoAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAAAAAAEAAAACAAAAAAACPhoAAAAAAAAAANnXZkzhQIK9tQq7NkV0Ri4YgCsYnGW/yBEchGS/2NpFAAAAF0h26AAAAj4aAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQACPhoAAAAAAAAAAGXNhLrhGtltTwCpmqlarh7s1DB2hIkbP//jgzn4Fos/AABT8kS2c/oAAABBAAAAZwAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAA" + }, + { + "_links": { + "account": { + "href": "/accounts/GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K" + }, + "effects": { + "href": "/transactions/90ad6cfc9b0911bdbf202cace78ae7ecf50989c424288670dadb69bf8237c1b3/effects{?cursor,limit,order}", + "templated": true + }, + "ledger": { + "href": "/ledgers/144798" + }, + "operations": { + "href": "/transactions/90ad6cfc9b0911bdbf202cace78ae7ecf50989c424288670dadb69bf8237c1b3/operations{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/transactions?cursor=621902674530304\u0026order=asc" + }, + "self": { + "href": "/transactions/90ad6cfc9b0911bdbf202cace78ae7ecf50989c424288670dadb69bf8237c1b3" + }, + "succeeds": { + "href": "/transactions?cursor=621902674530304\u0026order=desc" + } + }, + "id": "90ad6cfc9b0911bdbf202cace78ae7ecf50989c424288670dadb69bf8237c1b3", + "paging_token": "621902674530304", + "successful": false, + "hash": "90ad6cfc9b0911bdbf202cace78ae7ecf50989c424288670dadb69bf8237c1b3", + "ledger": 144798, + "created_at": "2015-09-24T07:49:38Z", + "account": "GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "account_sequence": 279172874342, + "max_fee": 100, + "fee_charged": 100, + "operation_count": 1, + "envelope_xdr": "AAAAAGXNhLrhGtltTwCpmqlarh7s1DB2hIkbP//jgzn4Fos/AAAACgAAAEEAAABmAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAMPT7P7buwqnMueFS4NV10vE2q3C/mcAy4jx03/RdSGsAAAAXSHboAAAAAAAAAAAB+BaLPwAAAEBPWWMNSWyPBbQlhRheXyvAFDVx1rnf68fdDOUHPdDIkHdUczBpzvCjpdgwhQ2NYOX5ga1ZgOIWLy789YNnuIcL", + "result_xdr": "AAAAAAAAAAoAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAAAAAAEAAAACAAAAAAACNZ4AAAAAAAAAADD0+z+27sKpzLnhUuDVddLxNqtwv5nAMuI8dN/0XUhrAAAAF0h26AAAAjWeAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQACNZ4AAAAAAAAAAGXNhLrhGtltTwCpmqlarh7s1DB2hIkbP//jgzn4Fos/AABUCY0tXAQAAABBAAAAZgAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAA" + } + ] + }, + "_links": { + "next": { + "href": "/transactions?order=desc\u0026limit=2\u0026cursor=621902674530304" + }, + "prev": { + "href": "/transactions?order=asc\u0026limit=2\u0026cursor=631231343497216" + }, + "self": { + "href": "/transactions?order=desc\u0026limit=2\u0026cursor=" + } + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#standard-errors). diff --git a/services/horizon/internal/docs/reference/endpoints/transactions-create.md b/services/horizon/internal/docs/reference/endpoints/transactions-create.md new file mode 100644 index 0000000000..1801bb5c12 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/transactions-create.md @@ -0,0 +1,132 @@ +--- +title: Post Transaction +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=transactions&endpoint=create +replacement: https://developers.stellar.org/api/resources/transactions/post/ +--- + +Posts a new [transaction](../resources/transaction.md) to the Stellar Network. +Note that creating a valid transaction and signing it properly is the +responsibility of your client library. + +Transaction submission and the subsequent validation and inclusion into the +Stellar Network's ledger is a [complicated and asynchronous +process](https://www.stellar.org/developers/learn/concepts/transactions.html#life-cycle). +To reduce the complexity, horizon manages these asynchronous processes for the +client and will wait to hear results from the Stellar Network before returning +an HTTP response to a client. + +Transaction submission to horizon aims to be +[idempotent](https://en.wikipedia.org/wiki/Idempotence#Computer_science_meaning): +a client can submit a given transaction to horizon more than once and horizon +will behave the same each time. If the transaction has already been +successfully applied to the ledger, horizon will simply return the saved result +and not attempt to submit the transaction again. Only in cases where a +transaction's status is unknown (and thus will have a chance of being included +into a ledger) will a resubmission to the network occur. + +Information about [building transactions](https://www.stellar.org/developers/js-stellar-base/reference/building-transactions.html) in JavaScript. + +### Timeout + +If you are encountering this error it means that either: + +* Horizon has not received a confirmation from the Core server that the transaction you are trying to submit to the network was included in a ledger in a timely manner or: +* Horizon has not sent a response to a reverse-proxy before in a specified time. + +The former case may happen because there was no room for your transaction in the 3 consecutive ledgers. In such case, Core server removes a transaction from a queue. To solve this you can either: + +* Keep resubmitting the same transaction (with the same sequence number) and wait until it finally is added to a new ledger or: +* Increase the [fee](../../../guides/concepts/fees.html). + +## Request + +``` +POST /transactions +``` + +### Arguments + +| name | loc | notes | example | description | +| ---- | ---- | -------- | ---------------------- | ----------- | +| `tx` | body | required | `AAAAAO`....`f4yDBA==` | Base64 representation of transaction envelope [XDR](../xdr.md) | + + +### curl Example Request + +```sh +curl -X POST \ + -F "tx=AAAAAOo1QK/3upA74NLkdq4Io3DQAQZPi4TVhuDnvCYQTKIVAAAACgAAH8AAAAABAAAAAAAAAAAAAAABAAAAAQAAAADqNUCv97qQO+DS5HauCKNw0AEGT4uE1Ybg57wmEEyiFQAAAAEAAAAAZc2EuuEa2W1PAKmaqVquHuzUMHaEiRs//+ODOfgWiz8AAAAAAAAAAAAAA+gAAAAAAAAAARBMohUAAABAPnnZL8uPlS+c/AM02r4EbxnZuXmP6pQHvSGmxdOb0SzyfDB2jUKjDtL+NC7zcMIyw4NjTa9Ebp4lvONEf4yDBA==" \ + "https://horizon-testnet.stellar.org/transactions" +``` + +## Response + +A successful response (i.e. any response with a successful HTTP response code) +indicates that the transaction was successful and has been included into the +ledger. + +If the transaction failed or errored, then an error response will be returned. Please see the errors section below. + +### Attributes + +The response will include all fields from the [transaction resource](../resources/transaction.md). + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=2994111896358912" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=2994111896358912" + } + }, + "id": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "paging_token": "2994111896358912", + "successful": true, + "hash": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "ledger": 697121, + "created_at": "2019-04-09T20:14:25Z", + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "fee_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "source_account_sequence": "4660039994869", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AB031AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAFIMRkFZ9gZifhRSlklQpsz/9P04Earv0dzS3MkIM1cYAAAAXSHboAAAAAAAAAAABhlbgnAAAAEA+biIjrDy8yi+SvhFElIdWGBRYlDscnSSHkPchePy2JYDJn4wvJYDBumXI7/NmttUey3+cGWbBFfnnWh1H5EoD", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB030AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB031AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uYE789cgAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAACqMhAAAAAAAAAAAUgxGQVn2BmJ+FFKWSVCmzP/0/TgRqu/R3NLcyQgzVxgAAABdIdugAAAqjIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMACqMgAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar+EAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "none", + "signatures": [ + "Pm4iI6w8vMovkr4RRJSHVhgUWJQ7HJ0kh5D3IXj8tiWAyZ+MLyWAwbplyO/zZrbVHst/nBlmwRX551odR+RKAw==" + ] +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard_Errors). +- [transaction_failed](../errors/transaction-failed.md): The transaction failed and could not be applied to the ledger. +- [transaction_malformed](../errors/transaction-malformed.md): The transaction could not be decoded and was not submitted to the network. +- [timeout](../errors/timeout.md): No response from the Core server in a timely manner. Please check "Timeout" section above. diff --git a/services/horizon/internal/docs/reference/endpoints/transactions-for-account.md b/services/horizon/internal/docs/reference/endpoints/transactions-for-account.md new file mode 100644 index 0000000000..375c281698 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/transactions-for-account.md @@ -0,0 +1,166 @@ +--- +title: Transactions for Account +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=transactions&endpoint=for_account +replacement: https://developers.stellar.org/api/resources/accounts/transactions/ +--- + +This endpoint represents successful [transactions](../resources/transaction.md) that affected a +given [account](../resources/account.md). This endpoint can also be used in +[streaming](../streaming.md) mode so it is possible to use it to listen for new transactions that +affect a given account as they get made in the Stellar network. + +If called in streaming mode Horizon will start at the earliest known transaction unless a `cursor` +is set. In that case it will start from the `cursor`. You can also set `cursor` value to `now` to +only stream transaction created since your request time. + +## Request + +``` +GET /accounts/{account_id}/transactions{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ---- | ----- | ----------- | ------- | +| `account_id` | required, string | ID of an account | GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K | +| `?cursor` | optional, any, default _null_ | A paging token, specifying where to start returning records from. When streaming this can be set to `now` to stream object created since your request time. | 12884905984 | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default: `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include failed transactions in results. | `true` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/accounts/GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K/transactions?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.transactions() + .forAccount("GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K") + .call() + .then(function (accountResult) { + console.log(accountResult); + }) + .catch(function (err) { + console.error(err); + }) +``` + +### JavaScript Streaming Example + +```javascript +var StellarSdk = require('stellar-sdk') +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +var txHandler = function (txResponse) { + console.log(txResponse); +}; + +var es = server.transactions() + .forAccount("GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K") + .cursor('now') + .stream({ + onmessage: txHandler + }) +``` + +## Response + +This endpoint responds with a list of transactions that changed a given account's state. See +[transaction resource](../resources/transaction.md) for reference. + +### Example Response +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/payments?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/payments?cursor=2714719978786817&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/accounts/GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF/payments?cursor=1919197546291201&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/1919197546291201/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=1919197546291201" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=1919197546291201" + } + }, + "id": "1919197546291201", + "paging_token": "1919197546291201", + "transaction_successful": true, + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "type": "create_account", + "type_i": 0, + "created_at": "2019-03-25T22:43:38Z", + "transaction_hash": "7e2050abc676003efc3eaadd623c927f753b7a6c37f50864bf284f4e1510d088", + "starting_balance": "10000.0000000", + "funder": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "account": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF" + }, + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/operations/2714719978786817" + }, + "transaction": { + "href": "https://horizon-testnet.stellar.org/transactions/7cea6abe90654578b42ee696e823187d89d91daa157a1077b542ee7c77413ce3" + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/operations/2714719978786817/effects" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/effects?order=desc&cursor=2714719978786817" + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/effects?order=asc&cursor=2714719978786817" + } + }, + "id": "2714719978786817", + "paging_token": "2714719978786817", + "transaction_successful": true, + "source_account": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "type": "payment", + "type_i": 1, + "created_at": "2019-04-05T23:07:42Z", + "transaction_hash": "7cea6abe90654578b42ee696e823187d89d91daa157a1077b542ee7c77413ce3", + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "from": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR", + "to": "GBYUUJHG6F4EPJGNLERINATVQLNDOFRUD7SGJZ26YZLG5PAYLG7XUSGF", + "amount": "1000000.0000000" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no account whose ID matches the `account_id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/transactions-for-ledger.md b/services/horizon/internal/docs/reference/endpoints/transactions-for-ledger.md new file mode 100644 index 0000000000..7f1f5e4693 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/transactions-for-ledger.md @@ -0,0 +1,222 @@ +--- +title: Transactions for Ledger +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=transactions&endpoint=for_ledger +replacement: https://developers.stellar.org/api/resources/ledgers/transactions/ +--- + +This endpoint represents successful [transactions](../resources/transaction.md) in a given [ledger](../resources/ledger.md). + +## Request + +``` +GET /ledgers/{id}/transactions{?cursor,limit,order,include_failed} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `id` | required, number | Ledger ID | `697121` | +| `?cursor` | optional, default _null_ | A paging token, specifying where to start returning records from. | `12884905984` | +| `?order` | optional, string, default `asc` | The order in which to return rows, "asc" or "desc". | `asc` | +| `?limit` | optional, number, default `10` | Maximum number of records to return. | `200` | +| `?include_failed` | optional, bool, default: `false` | Set to `true` to include failed transactions in results. | `true` | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/ledgers/697121/transactions?limit=1" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.transactions() + .forLedger("697121") + .limit("1") + .call() + .then(function (accountResults) { + console.log(accountResults.records) + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a list of transactions in a given ledger. See [transaction +resource](../resources/transaction.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121/transactions?cursor=&limit=10&order=asc" + }, + "next": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121/transactions?cursor=2994111896367104&limit=10&order=asc" + }, + "prev": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121/transactions?cursor=2994111896358912&limit=10&order=desc" + } + }, + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=2994111896358912" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=2994111896358912" + } + }, + "id": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "paging_token": "2994111896358912", + "successful": true, + "hash": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "ledger": 697121, + "created_at": "2019-04-09T20:14:25Z", + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "source_account_sequence": "4660039994869", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AB031AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAFIMRkFZ9gZifhRSlklQpsz/9P04Earv0dzS3MkIM1cYAAAAXSHboAAAAAAAAAAABhlbgnAAAAEA+biIjrDy8yi+SvhFElIdWGBRYlDscnSSHkPchePy2JYDJn4wvJYDBumXI7/NmttUey3+cGWbBFfnnWh1H5EoD", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB030AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB031AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uYE789cgAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAACqMhAAAAAAAAAAAUgxGQVn2BmJ+FFKWSVCmzP/0/TgRqu/R3NLcyQgzVxgAAABdIdugAAAqjIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMACqMgAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar+EAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "none", + "signatures": [ + "Pm4iI6w8vMovkr4RRJSHVhgUWJQ7HJ0kh5D3IXj8tiWAyZ+MLyWAwbplyO/zZrbVHst/nBlmwRX551odR+RKAw==" + ] + }, + { + "memo": "2A1V6J5703G47XHY", + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/f175108e5c64619705b112a99fa32884dfa0511d9a8986aade87905b08eabe5b" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GAZ4A54KE6MTMXYEPM7T3IDLZWGNCCKB5ME422NZ3MAMTHWWP37RPEBW" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/f175108e5c64619705b112a99fa32884dfa0511d9a8986aade87905b08eabe5b/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/f175108e5c64619705b112a99fa32884dfa0511d9a8986aade87905b08eabe5b/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=2994111896363008" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=2994111896363008" + } + }, + "id": "f175108e5c64619705b112a99fa32884dfa0511d9a8986aade87905b08eabe5b", + "paging_token": "2994111896363008", + "successful": true, + "hash": "f175108e5c64619705b112a99fa32884dfa0511d9a8986aade87905b08eabe5b", + "ledger": 697121, + "created_at": "2019-04-09T20:14:25Z", + "source_account": "GAZ4A54KE6MTMXYEPM7T3IDLZWGNCCKB5ME422NZ3MAMTHWWP37RPEBW", + "source_account_sequence": "2994107601387521", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAADPAd4onmTZfBHs/PaBrzYzRCUHrCc1pudsAyZ7Wfv8XAAAAZAAKoyAAAAABAAAAAAAAAAEAAAAQMkExVjZKNTcwM0c0N1hIWQAAAAEAAAABAAAAADPAd4onmTZfBHs/PaBrzYzRCUHrCc1pudsAyZ7Wfv8XAAAAAQAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAAAAAAAhFKDAAAAAAAAAAAB1n7/FwAAAEBJdXuYg13Glzx1RinVCXd/cc1usrhU/0f5HFZ7lyIR8kS3T6PRrW78TQDNqXz+ukUiPwlB1A8MqxoW/SAL5FIB", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAqjIQAAAAAAAAAAM8B3iieZNl8Eez89oGvNjNEJQesJzWm52wDJntZ+/xcAAAAXSHbnnAAKoyAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAqjIQAAAAAAAAAAM8B3iieZNl8Eez89oGvNjNEJQesJzWm52wDJntZ+/xcAAAAXSHbnnAAKoyAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMACqMgAAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAANViducAABeBgAAoRQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAPZ3F6cAABeBgAAoRQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMACqMhAAAAAAAAAAAzwHeKJ5k2XwR7Pz2ga82M0QlB6wnNabnbAMme1n7/FwAAABdIduecAAqjIAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAzwHeKJ5k2XwR7Pz2ga82M0QlB6wnNabnbAMme1n7/FwAAABbEJGScAAqjIAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMACqMgAAAAAAAAAAAzwHeKJ5k2XwR7Pz2ga82M0QlB6wnNabnbAMme1n7/FwAAABdIdugAAAqjIAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAzwHeKJ5k2XwR7Pz2ga82M0QlB6wnNabnbAMme1n7/FwAAABdIduecAAqjIAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "signatures": [ + "SXV7mINdxpc8dUYp1Ql3f3HNbrK4VP9H+RxWe5ciEfJEt0+j0a1u/E0Azal8/rpFIj8JQdQPDKsaFv0gC+RSAQ==" + ] + }, + { + "memo": "WHALE", + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/83b6ebf4b3aec5b36cab14ae0f438a23487746857903a9e0bb002564b4641e25" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GABRMXDIJCTDSMPC67J64NSAMWRSYXVCXYTXVFC73DTHBKELHNKWANXP" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/83b6ebf4b3aec5b36cab14ae0f438a23487746857903a9e0bb002564b4641e25/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/83b6ebf4b3aec5b36cab14ae0f438a23487746857903a9e0bb002564b4641e25/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=2994111896367104" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=2994111896367104" + } + }, + "id": "83b6ebf4b3aec5b36cab14ae0f438a23487746857903a9e0bb002564b4641e25", + "paging_token": "2994111896367104", + "successful": true, + "hash": "83b6ebf4b3aec5b36cab14ae0f438a23487746857903a9e0bb002564b4641e25", + "ledger": 697121, + "created_at": "2019-04-09T20:14:25Z", + "source_account": "GABRMXDIJCTDSMPC67J64NSAMWRSYXVCXYTXVFC73DTHBKELHNKWANXP", + "source_account_sequence": "122518237256298", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAAAMWXGhIpjkx4vfT7jZAZaMsXqK+J3qUX9jmcKiLO1VgAAAAZAAAb24AAppqAAAAAQAAAAAAAAAAAAAAAFys/kkAAAABAAAABVdIQUxFAAAAAAAAAQAAAAAAAAAAAAAAAKrN4k6edFMb0WEyPzEEjWUAji0pvvALw+BAH4OnekA5AAAAAAcnDgAAAAAAAAAAAYs7VWAAAABAYd9uIm+TjIcAjTU90YJoNg/r+6PU3Uss7ewUb1w3yMa+HyoSvDq8sDz/SYmDBH7F+0ACIeBF4kkVEKVBJMh0AQ==", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAqjIQAAAAAAAAAAAxZcaEimOTHi99PuNkBloyxeor4nepRf2OZwqIs7VWAAJBMYWVFGqAAAb24AApppAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAqjIQAAAAAAAAAAAxZcaEimOTHi99PuNkBloyxeor4nepRf2OZwqIs7VWAAJBMYWVFGqAAAb24AAppqAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMACqMhAAAAAAAAAAADFlxoSKY5MeL30+42QGWjLF6ivid6lF/Y5nCoiztVYAAkExhZUUaoAABvbgACmmoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAADFlxoSKY5MeL30+42QGWjLF6ivid6lF/Y5nCoiztVYAAkExhSKjioAABvbgACmmoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAACqMhAAAAAAAAAACqzeJOnnRTG9FhMj8xBI1lAI4tKb7wC8PgQB+Dp3pAOQAAAAAHJw4AAAqjIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMACqMfAAAAAAAAAAADFlxoSKY5MeL30+42QGWjLF6ivid6lF/Y5nCoiztVYAAkExhZUUcMAABvbgACmmkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAADFlxoSKY5MeL30+42QGWjLF6ivid6lF/Y5nCoiztVYAAkExhZUUaoAABvbgACmmkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "signatures": [ + "Yd9uIm+TjIcAjTU90YJoNg/r+6PU3Uss7ewUb1w3yMa+HyoSvDq8sDz/SYmDBH7F+0ACIeBF4kkVEKVBJMh0AQ==" + ], + "valid_after": "1970-01-01T00:00:00Z", + "valid_before": "2019-04-09T20:19:21Z" + } + ] + } +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no ledgers whose sequence matches the `id` argument. diff --git a/services/horizon/internal/docs/reference/endpoints/transactions-single.md b/services/horizon/internal/docs/reference/endpoints/transactions-single.md new file mode 100644 index 0000000000..99aa29e376 --- /dev/null +++ b/services/horizon/internal/docs/reference/endpoints/transactions-single.md @@ -0,0 +1,112 @@ +--- +title: Transaction Details +clientData: + laboratoryUrl: https://www.stellar.org/laboratory/#explorer?resource=transactions&endpoint=single +replacement: https://developers.stellar.org/api/resources/transactions/single/ +--- + +The transaction details endpoint provides information on a single +[transaction](../resources/transaction.md). The transaction hash provided in the `hash` argument +specifies which transaction to load. + +### Warning - failed transactions + +Transaction can be successful or failed (failed transactions are also included in Stellar ledger). +Always check it's status using `successful` field! + +## Request + +``` +GET /transactions/{hash} +``` + +### Arguments + +| name | notes | description | example | +| ------ | ------- | ----------- | ------- | +| `hash` | required, string | A transaction hash, hex-encoded, lowercase. | 264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c | + +### curl Example Request + +```sh +curl "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c" +``` + +### JavaScript Example Request + +```javascript +var StellarSdk = require('stellar-sdk'); +var server = new StellarSdk.Server('https://horizon-testnet.stellar.org'); + +server.transactions() + .transaction("264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c") + .call() + .then(function (transactionResult) { + console.log(transactionResult) + }) + .catch(function (err) { + console.log(err) + }) +``` + +## Response + +This endpoint responds with a single Transaction. See [transaction resource](../resources/transaction.md) for reference. + +### Example Response + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/697121" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=2994111896358912" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=2994111896358912" + } + }, + "id": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "paging_token": "2994111896358912", + "successful": true, + "hash": "264226cb06af3b86299031884175155e67a02e0a8ad0b3ab3a88b409a8c09d5c", + "ledger": 697121, + "created_at": "2019-04-09T20:14:25Z", + "source_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "fee_account": "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", + "source_account_sequence": "4660039994869", + "fee_charged": 100, + "max_fee": 100, + "operation_count": 1, + "envelope_xdr": "AAAAABB90WssODNIgi6BHveqzxTRmIpvAFRyVNM+Hm2GVuCcAAAAZAAABD0AB031AAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAFIMRkFZ9gZifhRSlklQpsz/9P04Earv0dzS3MkIM1cYAAAAXSHboAAAAAAAAAAABhlbgnAAAAEA+biIjrDy8yi+SvhFElIdWGBRYlDscnSSHkPchePy2JYDJn4wvJYDBumXI7/NmttUey3+cGWbBFfnnWh1H5EoD", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB030AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAqjIQAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwBOLmYhGq/IAAABD0AB031AAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uYE789cgAAAEPQAHTfUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAACqMhAAAAAAAAAAAUgxGQVn2BmJ+FFKWSVCmzP/0/TgRqu/R3NLcyQgzVxgAAABdIdugAAAqjIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMACqMgAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar+EAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACqMhAAAAAAAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAE4uZiEar8gAAAEPQAHTfQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "none", + "signatures": [ + "Pm4iI6w8vMovkr4RRJSHVhgUWJQ7HJ0kh5D3IXj8tiWAyZ+MLyWAwbplyO/zZrbVHst/nBlmwRX551odR+RKAw==" + ] +} +``` + +## Possible Errors + +- The [standard errors](../errors.md#Standard-Errors). +- [not_found](../errors/not-found.md): A `not_found` error will be returned if there is no + transaction whose ID matches the `hash` argument. diff --git a/services/horizon/internal/docs/reference/errors.md b/services/horizon/internal/docs/reference/errors.md new file mode 100644 index 0000000000..f95a922a4e --- /dev/null +++ b/services/horizon/internal/docs/reference/errors.md @@ -0,0 +1,32 @@ +--- +title: Errors +replacement: https://developers.stellar.org/api/errors/ +--- + +In the event that an error occurs while processing a request to horizon, an +**error** response will be returned to the client. This error response will +contain information detailing why the request couldn't complete successfully. + +Like HAL for successful responses, horizon uses a standard to specify how we +communicate errors to the client. Specifically, horizon uses the [Problem +Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) draft specification. The specification is short, so we recommend +you read it. In summary, when an error occurs on the server we respond with a +json document with the following attributes: + +| name | type | description | +| -------- | ------ | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| type | url | The identifier for the error, expressed as a url. Visiting the url in a web browser will redirect you to the additional documentation for the problem. | +| title | string | A short title describing the error. | +| status | number | An HTTP status code that maps to the error. An error that is triggered due to client input will be in the 400-499 range of status code, for example. | +| detail | string | A longer description of the error meant the further explain the error to developers. | +| instance | string | A token that uniquely identifies this request. Allows server administrators to correlate a client report with server log files | + + +## Standard Errors + +There are a set of errors that can occur in any request to horizon which we +call **standard errors**. These errors are: + +- [Server Error](../reference/errors/server-error.md) +- [Rate Limit Exceeded](../reference/errors/rate-limit-exceeded.md) +- [Forbidden](../reference/errors/forbidden.md) diff --git a/services/horizon/internal/docs/reference/errors/bad-request.md b/services/horizon/internal/docs/reference/errors/bad-request.md new file mode 100644 index 0000000000..56cf369136 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/bad-request.md @@ -0,0 +1,44 @@ +--- +title: Bad Request +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +If Horizon cannot understand a request due to invalid parameters, it will return a `bad_request` +error. This is analogous to the +[HTTP 400 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +If you are encountering this error, check the `invalid_field` attribute on the `extras` object to +see what field is triggering the error. + +## Attributes + +As with all errors Horizon returns, `bad_request` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```shell +$ curl -X GET "https://horizon-testnet.stellar.org/ledgers?limit=invalidlimit" +{ + "type": "https://stellar.org/horizon-errors/bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way", + "extras": { + "invalid_field": "limit", + "reason": "unparseable value" + } +} +``` + +## Related + +- [Malformed Transaction](./transaction-malformed.md) diff --git a/services/horizon/internal/docs/reference/errors/before-history.md b/services/horizon/internal/docs/reference/errors/before-history.md new file mode 100644 index 0000000000..2e1dd7cc68 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/before-history.md @@ -0,0 +1,41 @@ +--- +title: Before History +replacement: https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/ +--- + +A horizon server may be configured to only keep a portion of the stellar network's history stored +within its database. This error will be returned when a client requests a piece of information +(such as a page of transactions or a single operation) that the server can positively identify as +falling outside the range of recorded history. + +This error returns a +[HTTP 410 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +## Attributes + +As with all errors Horizon returns, `before_history` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```shell +$ curl -X GET "https://horizon-testnet.stellar.org/transactions?cursor=1&order=desc" +{ + "type": "https://stellar.org/horizon-errors/before_history", + "title": "Data Requested Is Before Recorded History", + "status": 410, + "detail": "This horizon instance is configured to only track a portion of the stellar network's latest history. This request is asking for results prior to the recorded history known to this horizon instance." +} +``` + +## Related + +- [Not Found](./not-found.md) diff --git a/services/horizon/internal/docs/reference/errors/not-acceptable.md b/services/horizon/internal/docs/reference/errors/not-acceptable.md new file mode 100644 index 0000000000..d58305492a --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/not-acceptable.md @@ -0,0 +1,42 @@ +--- +title: Not Acceptable +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +When your client only accepts certain formats of data from Horizon and Horizon cannot fulfill that +request, Horizon will return a `not_acceptable` error. This is analogous to a +[HTTP 406 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +For example, if your client only accepts an XML response (`Accept: application/xml`), Horizon will +respond with a `not_acceptable` error. + +If you are encountering this error, please check to make sure the criteria for content you’ll +accept is correct. + +## Attributes + +As with all errors Horizon returns, `not_acceptable` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```shell +$ curl -X GET -H "Accept: application/xml" "https://horizon-testnet.stellar.org/accounts/GALWEV6GY73RJ255JC7XUOZ2L7WZ5JJDTKATB2MUK7F3S67DVT2A6R5G" +{ + "type": "https://stellar.org/horizon-errors/not_acceptable", + "title": "An acceptable response content-type could not be provided for this request", + "status": 406 +} +``` + +## Related + +- [Not Found](./not-found.md) diff --git a/services/horizon/internal/docs/reference/errors/not-found.md b/services/horizon/internal/docs/reference/errors/not-found.md new file mode 100644 index 0000000000..f431b5eef2 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/not-found.md @@ -0,0 +1,45 @@ +--- +title: Not Found +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +When Horizon can't find whatever data you are requesting, it will return a `not_found` error. This +is similar to a +[HTTP 404 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes) error +response. + +Incorrect URL path parameters or missing data are the common reasons for this error. If you +navigate using a link from a valid response, you should never receive this error message. + +## Attributes + +As with all errors Horizon returns, `not_found` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```shell +$ curl -X GET "https://horizon-testnet.stellar.org/accounts/accountthatdoesntexist" +{ + "type": "https://stellar.org/horizon-errors/bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way", + "extras": { + "invalid_field": "account_id", + "reason": "invalid address" + } +} +``` + +## Related + +- [Not Acceptable](./not-acceptable.md) diff --git a/services/horizon/internal/docs/reference/errors/not-implemented.md b/services/horizon/internal/docs/reference/errors/not-implemented.md new file mode 100644 index 0000000000..41d1852ede --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/not-implemented.md @@ -0,0 +1,41 @@ +--- +title: Not Implemented +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +If your [request method](http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html) is not supported by +Horizon, Horizon will return a `not_implemented` error. Likewise, if functionality that is intended +but does not exist (thus reserving the endpoint for future use), it will also return a +`not_implemented` error. This is analogous to a +[HTTP 501 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +If you are encountering this error, Horizon does not have the functionality you are requesting. + +## Attributes + +As with all errors Horizon returns, `not_implemented` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```shell +$ curl -X GET "https://horizon-testnet.stellar.org/offers/1234" +{ + "type": "https://stellar.org/horizon-errors/not_implemented", + "title": "Resource Not Yet Implemented", + "status": 404, + "detail": "While the requested URL is expected to eventually point to a valid resource, the work to implement the resource has not yet been completed." +} +``` + +## Related + +- [Server Error](./server-error.md) diff --git a/services/horizon/internal/docs/reference/errors/rate-limit-exceeded.md b/services/horizon/internal/docs/reference/errors/rate-limit-exceeded.md new file mode 100644 index 0000000000..1a6399466e --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/rate-limit-exceeded.md @@ -0,0 +1,41 @@ +--- +title: Rate Limit Exceeded +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +When a single user makes too many requests to Horizon in a one hour time frame, Horizon returns a +`rate_limit_exceeded` error. By default, Horizon allows 3600 requests per hour -- an average of one +request per second. This is analogous to a +[HTTP 429 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +If you are encountering this error, please reduce your request speed. Here are some strategies for +doing so: +* For collection endpoints, try specifying larger page sizes. +* Try streaming responses to watch for new data instead of pulling data every time. +* Cache immutable data, such as transaction details, locally. + +See the [Rate Limiting Guide](../../reference/rate-limiting.md) for more info. + +## Attributes + +As with all errors Horizon returns, `rate_limit_exceeded` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```json +{ + "type": "https://stellar.org/horizon-errors/rate_limit_exceeded", + "title": "Rate Limit Exceeded", + "status": 429, + "details": "The rate limit for the requesting IP address is over its alloted limit. The allowed limit and requests left per time period are communicated to clients via the http response headers 'X-RateLimit-*' headers." +} +``` diff --git a/services/horizon/internal/docs/reference/errors/server-error.md b/services/horizon/internal/docs/reference/errors/server-error.md new file mode 100644 index 0000000000..7fe88bef91 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/server-error.md @@ -0,0 +1,48 @@ +--- +title: Internal Server Error +replacement: https://developers.stellar.org/api/errors/http-status-codes/standard/ +--- + +If there's an internal error within Horizon, Horizon will return a +`server_error` response. This response is a catch-all, and can refer to many +possible errors in the Horizon server: a configuration mistake, a database +connection error, etc. This is analogous to a +[HTTP 500 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +Horizon does not expose information such as stack traces or raw error messages +to a client, as doing so may reveal sensitive configuration data such as secret +keys. If you are encountering this error on a server you control, please check the +Horizon log files for more details. The logs should contain detailed +information to help you discover the root issue. + +If you are encountering this error on the public Stellar infrastructure, please +report an error on [Horizon's issue tracker](https://github.com/stellar/go/issues) +and include as much information about the request that triggered the response +as you can (especially the time of the request). + +## Attributes + +As with all errors Horizon returns, `server_error` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Examples +```json +{ + "type": "https://stellar.org/horizon-errors/server_error", + "title": "Internal Server Error", + "status": 500, + "details": "An error occurred while processing this request. This is usually due to a bug within the server software. Trying this request again may succeed if the bug is transient, otherwise please report this issue to the issue tracker at: https://github.com/stellar/go/issues. Please include this response in your issue." +} +``` + +## Related + +- [Not Implemented](./not-implemented.md) diff --git a/services/horizon/internal/docs/reference/errors/stale-history.md b/services/horizon/internal/docs/reference/errors/stale-history.md new file mode 100644 index 0000000000..041ef11723 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/stale-history.md @@ -0,0 +1,39 @@ +--- +title: Stale History +replacement: https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/ +--- + +A horizon server may be configured to reject historical requests when the history is known to be +further out of date than the configured threshold. In such cases, this error is returned. To +resolve this error (provided you are the horizon instance's operator) please ensure that the +ingestion system is running correctly and importing new ledgers. This error returns a +[HTTP 503 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +## Attributes + +As with all errors Horizon returns, `stale_history` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example + +```json +{ + + "type": "https://stellar.org/horizon-errors/stale_history", + "title": "Historical DB Is Too Stale", + "status": 503, + "detail": "This horizon instance is configured to reject client requests when it can determine that the history database is lagging too far behind the connected instance of stellar-core. If you operate this server, please ensure that the ingestion system is properly running." +} +``` + +## Related + +- [Internal Server Error](./server-error.md) diff --git a/services/horizon/internal/docs/reference/errors/timeout.md b/services/horizon/internal/docs/reference/errors/timeout.md new file mode 100644 index 0000000000..dca8cf4d64 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/timeout.md @@ -0,0 +1,49 @@ +--- +title: Timeout +replacement: https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/ +--- + +If you are encountering this error it means that either: + +* Horizon has not received a confirmation from the Stellar Core server that the transaction you are + trying to submit to the network was included in a ledger in a timely manner. +* Horizon has not sent a response to a reverse-proxy before a specified amount of time has elapsed. + +The former case may happen because there was no room for your transaction for 3 consecutive +ledgers. This is because Stellar Core removes each submitted transaction from a queue. To solve +this you can: + +* Keep resubmitting the same transaction (with the same sequence number) and wait until it finally + is added to a new ledger. +* Increase the [fee](../../../guides/concepts/fees.md) in order to prioritize the transaction. + +This error returns a +[HTTP 504 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +## Attributes + +As with all errors Horizon returns, `timeout` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +## Example +```json +{ + "type": "https://stellar.org/horizon-errors/timeout", + "title": "Timeout", + "status": 504, + "detail": "Your request timed out before completing. Please try your request again. If you are submitting a transaction make sure you are sending exactly the same transaction (with the same sequence number)." +} +``` + +## Related + +- [Not Acceptable](./not-acceptable.md) +- [Transaction Failed](./transaction-failed.md) diff --git a/services/horizon/internal/docs/reference/errors/transaction-failed.md b/services/horizon/internal/docs/reference/errors/transaction-failed.md new file mode 100644 index 0000000000..fa84178d3a --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/transaction-failed.md @@ -0,0 +1,83 @@ +--- +title: Transaction Failed +replacement: https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/ +--- + +The `transaction_failed` error occurs when a client submits a transaction that was well-formed but +was not included into the ledger due to some other failure. For example, a transaction may fail if: + +- The source account for transaction cannot pay the minimum fee. +- The sequence number is incorrect. +- One of the contained operations has failed such as a payment operation that overdraws on the + paying account. + +In almost every case, this error indicates that the transaction submitted in the initial request +will never succeed. There is one exception: a transaction that fails with the `tx_bad_seq` result +code (as expressed in the `result_code` field of the error) may become valid in the future if the +sequence number it used was too high. + +This error returns a +[HTTP 400 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +## Attributes + +As with all errors Horizon returns, `transaction_failed` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +In addition, the following additional data is provided in the `extras` field of the error: + +| Attribute | Type | Description | +|----------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------| +| `envelope_xdr` | String | A base64-encoded representation of the TransactionEnvelope XDR whose failure triggered this response. | +| `result_xdr` | String | A base64-encoded representation of the TransactionResult XDR returned by stellar-core when submitting this transaction. | +| `result_codes.transaction` | String | The transaction result code returned by Stellar Core. | +| `result_codes.operations` | Array | An array of strings, representing the operation result codes for each operation in the submitted transaction, if available. | + + +## Examples + +### No Source Account +```json +{ + "type": "https://stellar.org/horizon-errors/transaction_failed", + "title": "Transaction Failed", + "status": 400, + "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", + "extras": { + "envelope_xdr": "AAAAANNVpdQ9vctZdAJ67sFmNe1KDzaj51dAdkW3vKKM51H3AAAAZAAAAABJlgLSAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA01Wl1D29y1l0AnruwWY17UoPNqPnV0B2Rbe8ooznUfcAAAAAAAAAAAL68IAAAAAAAAAAAA==", + "result_codes": { + "transaction": "tx_no_source_account" + }, + "result_xdr": "AAAAAAAAAAD////4AAAAAA==" + } +} +``` + +### Bad Authentication +```json +{ + "type": "https://stellar.org/horizon-errors/transaction_failed", + "title": "Transaction Failed", + "status": 400, + "detail": "The transaction failed when submitted to the stellar network. The `extras.result_codes` field on this response contains further details. Descriptions of each code can be found at: https://www.stellar.org/developers/learn/concepts/list-of-operations.html", + "extras": { + "envelope_xdr": "AAAAAPORy3CoX6ox2ilbeiVjBA5WlpCSZRcjZ7VE9Wf4QVk7AAAAZAAAQz0AAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA85HLcKhfqjHaKVt6JWMEDlaWkJJlFyNntUT1Z/hBWTsAAAAAAAAAAAL68IAAAAAAAAAAARN17BEAAABAA9Ad7OKc7y60NT/JuobaHOfmuq8KbZqcV6G/es94u9yT84fi0aI7tJsFMOyy8cZ4meY3Nn908OU+KfRWV40UCw==", + "result_codes": { + "transaction": "tx_bad_auth" + }, + "result_xdr": "AAAAAAAAAGT////6AAAAAA==" + } +} +``` + +## Related + +- [Transaction Malformed](./transaction-malformed.md) diff --git a/services/horizon/internal/docs/reference/errors/transaction-malformed.md b/services/horizon/internal/docs/reference/errors/transaction-malformed.md new file mode 100644 index 0000000000..24983047c0 --- /dev/null +++ b/services/horizon/internal/docs/reference/errors/transaction-malformed.md @@ -0,0 +1,53 @@ +--- +title: Transaction Malformed +replacement: https://developers.stellar.org/api/errors/http-status-codes/horizon-specific/ +--- + +When you submit a malformed transaction to Horizon, Horizon will return a `transaction_malformed` +error. There are many ways in which a transaction could be malformed, including: + +- You submitted an empty string. +- Your base64-encoded string is invalid. +- Your [XDR](../xdr.md) structure is invalid. +- You have leftover bytes in your [XDR](../xdr.md) structure. + +If you are encountering this error, please check the contents of the transaction you are +submitting. This error returns a +[HTTP 400 Error](https://developer.mozilla.org/en-US/docs/Web/HTTP/Response_codes). + +## Attributes + +As with all errors Horizon returns, `transaction_malformed` follows the +[Problem Details for HTTP APIs](https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00) +draft specification guide and thus has the following attributes: + +| Attribute | Type | Description | +| ----------- | ------ | ------------------------------------------------------------------------------- | +| `type` | URL | The identifier for the error. This is a URL that can be visited in the browser.| +| `title` | String | A short title describing the error. | +| `status` | Number | An HTTP status code that maps to the error. | +| `detail` | String | A more detailed description of the error. | + +In addition, the following additional data is provided in the `extras` field of the error: + +| Attribute | Type | Description | +|----------------|--------|----------------------------------------------------| +| `envelope_xdr` | String | The submitted data that was malformed in some way. | + +## Example + +```json +{ + "type": "https://stellar.org/horizon-errors/transaction_malformed", + "title": "Transaction Malformed", + "status": 400, + "detail": "Horizon could not decode the transaction envelope in this request. A transaction should be an XDR TransactionEnvelope struct encoded using base64. The envelope read from this request is echoed in the `extras.envelope_xdr` field of this response for your convenience.", + "extras": { + "envelope_xdr": "BBBBBPORy3CoX6ox2ilbeiVjBA5WlpCSZRcjZ7VE9Wf4QVk7AAAAZAAAQz0AAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA85HLcKhfqjHaKVt6JWMEDlaWkJJlFyNntUT1Z/hBWTsAAAAAAAAAAAL68IAAAAAAAAAAARN17BEAAABAA9Ad7OKc7y60NT/JuobaHOfmuq8KbZqcV6G/es94u9yT84fi0aI7tJsFMOyy8cZ4meY3Nn908OU+KfRWV40UCw==" + } +} +``` + +## Related + +- [Bad Request](./bad-request.md) diff --git a/services/horizon/internal/docs/reference/paging.md b/services/horizon/internal/docs/reference/paging.md new file mode 100644 index 0000000000..d2c8db0dda --- /dev/null +++ b/services/horizon/internal/docs/reference/paging.md @@ -0,0 +1,12 @@ +--- +title: Paging +replacement: https://developers.stellar.org/api/introduction/pagination/ +--- + +The Stellar network contains a lot of data and it would be infeasible to return it all at once. The paging system allows +a user to request a "page" of data containing only a limited number of results. Then, the user can use the paging system +to request results adjacent to the current page where they left off at. + +Read about the [page resource](../reference/resources/page.md) for information on the paging system's usage and representation. + + diff --git a/services/horizon/internal/docs/reference/rate-limiting.md b/services/horizon/internal/docs/reference/rate-limiting.md new file mode 100644 index 0000000000..642412fbed --- /dev/null +++ b/services/horizon/internal/docs/reference/rate-limiting.md @@ -0,0 +1,27 @@ +--- +title: Rate Limiting +replacement: https://developers.stellar.org/api/introduction/rate-limiting/ +--- + +In order to provide service stability, Horizon limits the number of requests a +client (single IP) can perform within a one hour window. By default this is set to 3600 +requests per hourβ€”an average of one request per second. Also, while streaming +every update of the stream (what happens every time there's a new ledger) is +counted. Ex. if there were 12 new ledgers in a minute, 12 requests will be +subtracted from the limit. + +Horizon is using [GCRA](https://brandur.org/rate-limiting#gcra) algorithm. + +## Response headers for rate limiting + +Every response from Horizon sets advisory headers to inform clients of their +standing with rate limiting system: + +| Header | Description | +| ----------------------- | ------------------------------------------------------------------------ | +| `X-RateLimit-Limit` | The maximum number of requests that the current client can make in one hour. | +| `X-RateLimit-Remaining` | The number of remaining requests for the current window. | +| `X-RateLimit-Reset` | Seconds until a new window starts. | + +In addition, a `Retry-After` header will be set when the current client is being +throttled. diff --git a/services/horizon/internal/docs/reference/readme.md b/services/horizon/internal/docs/reference/readme.md new file mode 100644 index 0000000000..0e4f4cbc32 --- /dev/null +++ b/services/horizon/internal/docs/reference/readme.md @@ -0,0 +1,26 @@ +--- +title: Overview +--- + +Horizon is an API server for the Stellar ecosystem. It acts as the interface between [stellar-core](https://github.com/stellar/stellar-core) and applications that want to access the Stellar network. It allows you to submit transactions to the network, check the status of accounts, subscribe to event streams, etc. See [an overview of the Stellar ecosystem](https://www.stellar.org/developers/guides/) for details of where Horizon fits in. + +Horizon provides a RESTful API to allow client applications to interact with the Stellar network. You can communicate with Horizon using cURL or just your web browser. However, if you're building a client application, you'll likely want to use a Stellar SDK in the language of your client. +SDF provides a [JavaScript SDK](https://www.stellar.org/developers/js-stellar-sdk/reference/index.html) for clients to use to interact with Horizon. + +SDF runs a instance of Horizon that is connected to the test net: [https://horizon-testnet.stellar.org/](https://horizon-testnet.stellar.org/) and one that is connected to the public Stellar network: +[https://horizon.stellar.org/](https://horizon.stellar.org/). + +## Libraries + +SDF maintained libraries:
+- [JavaScript](https://github.com/stellar/js-stellar-sdk) +- [Go](https://github.com/stellar/go/tree/master/clients/horizonclient) +- [Java](https://github.com/stellar/java-stellar-sdk) + +Community maintained libraries for interacting with Horizon in other languages:
+- [Python](https://github.com/StellarCN/py-stellar-base) +- [C# .NET Core 2.x](https://github.com/elucidsoft/dotnetcore-stellar-sdk) +- [Ruby](https://github.com/astroband/ruby-stellar-sdk) +- [iOS and macOS](https://github.com/Soneso/stellar-ios-mac-sdk) +- [Scala SDK](https://github.com/synesso/scala-stellar-sdk) +- [C++ SDK](https://github.com/bnogalm/StellarQtSDK) diff --git a/services/horizon/internal/docs/reference/resources/account.md b/services/horizon/internal/docs/reference/resources/account.md new file mode 100644 index 0000000000..8e90e78f77 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/account.md @@ -0,0 +1,199 @@ +--- +title: Account +replacement: https://developers.stellar.org/api/resources/accounts/ +--- + +In the Stellar network, users interact using **accounts** which can be controlled by a corresponding keypair that can authorize transactions. One can create a new account with the [Create Account](./operation.md#create-account) operation. + +To learn more about the concept of accounts in the Stellar network, take a look at the [Stellar account concept guide](https://www.stellar.org/developers/learn/concepts/accounts.html). + +When horizon returns information about an account it uses the following format: + +## Attributes +| Attribute | Type | Description | +|----------------|------------------|------------------------------------------------------------------------------------------------------------------------ | +| id | string | The canonical id of this account, suitable for use as the :id parameter for url templates that require an account's ID. | +| account_id | string | The account's public key encoded into a base32 string representation. | +| sequence | number | The current sequence number that can be used when submitting a transaction from this account. | +| subentry_count | number | The number of [account subentries](https://www.stellar.org/developers/guides/concepts/ledger.html#ledger-entries). | +| balances | array of objects | An array of the native asset or credits this account holds. | +| thresholds | object | An object of account thresholds. | +| flags | object | The flags denote the enabling/disabling of certain asset issuer privileges. | +| signers | array of objects | An array of [account signers](https://www.stellar.org/developers/guides/concepts/multi-sig.html#additional-signing-keys) with their weights. | +| data | object | An array of account [data](./data.md) fields. | + +### Signer Object +| Attribute | Type | Description | +|------------|--------|------------------------------------------------------------------------------------------------------------------| +| weight | number | The numerical weight of a signer, necessary to determine whether a transaction meets the threshold requirements. | +| key | string | Different depending on the type of the signer. | +| type | string | See below. | + +### Possible Signer Types +| Type | Description | +|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ed25519_public_key | A normal Stellar public key. | +| sha256_hash | The SHA256 hash of some arbitrary `x`. Adding a signature of this type allows anyone who knows `x` to sign a transaction from this account. *Note: Once this transaction is broadcast, `x` will be known publicly.* | +| preauth_tx | The hash of a pre-authorized transaction. This signer is automatically removed from the account when a matching transaction is properly applied. | +### Balances Object +| Attribute | Type | | +|---------------------|------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| balance | string | How much of an asset is owned. | +| buying_liabilities | string | The total amount of an asset offered to buy aggregated over all offers owned by this account. | +| selling_liabilities | string | The total amount of an asset offered to sell aggregated over all offers owned by this account. | +| limit | optional, number | The maximum amount of an asset that this account is willing to accept (this is specified when an account opens a trustline). | +| asset_type | string | Either native, credit_alphanum4, or credit_alphanum12. | +| asset_code | optional, string | The code for the asset. | +| asset_issuer | optional, string | The stellar address of the given asset's issuer. | +| is_authorized | optional, bool | The trustline status for an `auth_required` asset. If true, the issuer of the asset has granted the account permission to send, receive, buy, or sell the asset. If false, the issuer has not, so the account cannot send, receive, buy, or sell the asset. | + +### Flag Object +| Attribute | Type | | +|----------------|------|--------------------------------------------------------------------------------------------------------------------------------| +| auth_immutable | bool | With this setting, none of the following authorization flags can be changed. | +| auth_required | bool | With this setting, an anchor must approve anyone who wants to hold its asset. | +| auth_revocable | bool | With this setting, an anchor can set the authorize flag of an existing trustline to freeze the assets held by an asset holder. | + +### Threshold Object +| Attribute | Type | | +|----------------|--------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| low_threshold | number | The weight required for a valid transaction including the [Allow Trust][allow_trust] and [Bump Sequence][bump_seq] operations. | +| med_threshold | number | The weight required for a valid transaction including the [Create Account][create_acc], [Payment][payment], [Path Payment Strict Send][path_payment_send], [Path Payment Strict Receive][path_payment_receive], [Manage Buy Offer][manage_buy_offer], [Manage Sell Offer][manage_sell_offer], [Create Passive Sell Offer][passive_sell_offer], [Change Trust][change_trust], [Inflation][inflation], and [Manage Data][manage_data] operations. | +| high_threshold | number | The weight required for a valid transaction including the [Account Merge][account_merge] and [Set Options]() operations. | + +[account_merge]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#account-merge +[allow_trust]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#allow-trust +[bump_seq]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#bump-sequence +[change_trust]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#change-trust +[create_acc]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#create-account +[inflation]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#inflation +[manage_data]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#manage-data +[manage_buy_offer]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#manage-buy-offer +[manage_sell_offer]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#manage-sell-offer +[passive_sell_offer]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#create-passive-sell-offer +[path_payment_receive]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#path-payment-strict-receive +[path_payment_send]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#path-payment-strict-send +[payment]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#payment +[set_options]: https://www.stellar.org/developers/guides/concepts/list-of-operations.html#set-options + +## Links +| rel | Example | Description | `templated` | +|--------------|---------------------------------------------------------------------------------------------------------|--------------------------------------------------------------|-------------| +| data | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/data/{key}` | [Data fields](./data.md) related to this account | true | +| effects | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/effects/{?cursor,limit,order}` | The [effects](./effect.md) related to this account | true | +| offers | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/offers/{?cursor,limit,order}` | The [offers](./offer.md) related to this account | true | +| operations | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/operations/{?cursor,limit,order}` | The [operations](./operation.md) related to this account | true | +| payments | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/payments/{?cursor,limit,order}` | The [payments](./payment.md) related to this account | true | +| trades | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/trades/{?cursor,limit,order}` | The [trades](./trade.md) related to this account | true | +| transactions | `/accounts/GAOEWNUEKXKNGB2AAOX6S6FEP6QKCFTU7KJH647XTXQXTMOAUATX2VF5/transactions/{?cursor,limit,order}` | The [transactions](./transaction.md) related to this account | true | + +## Example + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW" + }, + "transactions": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/transactions{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/payments{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/offers{?cursor,limit,order}", + "templated": true + }, + "trades": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/trades{?cursor,limit,order}", + "templated": true + }, + "data": { + "href": "https://horizon-testnet.stellar.org/accounts/GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW/data/{key}", + "templated": true + } + }, + "id": "GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW", + "paging_token": "", + "account_id": "GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW", + "sequence": "43692723777044483", + "subentry_count": 3, + "thresholds": { + "low_threshold": 0, + "med_threshold": 0, + "high_threshold": 0 + }, + "flags": { + "auth_required": false, + "auth_revocable": false, + "auth_immutable": false + }, + "balances": [ + { + "balance": "1000000.0000000", + "limit": "922337203685.4775807", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "last_modified_ledger": 632070, + "asset_type": "credit_alphanum4", + "asset_code": "FOO", + "asset_issuer": "GAGLYFZJMN5HEULSTH5CIGPOPAVUYPG5YSWIYDJMAPIECYEBPM2TA3QR" + }, + { + "balance": "10000.0000000", + "buying_liabilities": "0.0000000", + "selling_liabilities": "0.0000000", + "asset_type": "native" + } + ], + "signers": [ + { + "public_key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "weight": 1, + "key": "GDLEPBJBC2VSKJCLJB264F2WDK63X4NKOG774A3QWVH2U6PERGDPUCS4", + "type": "ed25519_public_key" + }, + { + "public_key": "XCPNCUKYDHPMMH6TMHK73K5VP5A6ZTQ2L7Q74JR3TDANNFB3TMRS5OKG", + "weight": 1, + "key": "XCPNCUKYDHPMMH6TMHK73K5VP5A6ZTQ2L7Q74JR3TDANNFB3TMRS5OKG", + "type": "sha256_hash" + }, + { + "public_key": "TABGGIW6EXOVOSNJ2O27U2DUX7RWHSRBGOKQLGYDTOXPANEX6LXBX7O7", + "weight": 1, + "key": "TABGGIW6EXOVOSNJ2O27U2DUX7RWHSRBGOKQLGYDTOXPANEX6LXBX7O7", + "type": "preauth_tx" + }, + { + "public_key": "GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW", + "weight": 1, + "key": "GBWRID7MPYUDBTNQPEHUN4XOBVVDPJOHYXAVW3UTOD2RG7BDAY6O3PHW", + "type": "ed25519_public_key" + } + ], + "data": {} +} +``` + +## Endpoints +| Resource | Type | Resource URI Template | +|------------------------------------------------------------------|------------|--------------------------------------| +| [Account Details](../endpoints/accounts-single.md) | Single | `/accounts/:id` | +| [Account Data](../endpoints/data-for-account.md) | Single | `/accounts/:id/data/:key` | +| [Account Transactions](../endpoints/transactions-for-account.md) | Collection | `/accounts/:account_id/transactions` | +| [Account Operations](../endpoints/operations-for-account.md) | Collection | `/accounts/:account_id/operations` | +| [Account Payments](../endpoints/payments-for-account.md) | Collection | `/accounts/:account_id/payments` | +| [Account Effects](../endpoints/effects-for-account.md) | Collection | `/accounts/:account_id/effects` | +| [Account Offers](../endpoints/offers-for-account.md) | Collection | `/accounts/:account_id/offers` | diff --git a/services/horizon/internal/docs/reference/resources/asset.md b/services/horizon/internal/docs/reference/resources/asset.md new file mode 100644 index 0000000000..730b0de73a --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/asset.md @@ -0,0 +1,72 @@ +--- +title: Asset +replacement: https://developers.stellar.org/api/resources/assets/ +--- + +**Assets** are the units that are traded on the Stellar Network. + +An asset consists of an type, code, and issuer. + +To learn more about the concept of assets in the Stellar network, take a look at the [Stellar assets concept guide](https://www.stellar.org/developers/guides/concepts/assets.html). + +## Attributes + +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| asset_type | string | The type of this asset: "credit_alphanum4", or "credit_alphanum12". | +| asset_code | string | The code of this asset. | +| asset_issuer | string | The issuer of this asset. | +| accounts | object | The number of accounts and claimable balances holding this asset. Accounts are summarized by each state of the trust line flags. | +| balances | object | The number of units of credit issued, summarized by each state of the trust line flags, or if they are in a claimable balance. | +| flags | object | The flags denote the enabling/disabling of certain asset issuer privileges. | +| paging_token | string | A [paging token](./page.md) suitable for use as the `cursor` parameter to transaction collection resources. | + +#### Flag Object +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| auth_immutable | bool | With this setting, none of the following authorization flags can be changed. | +| auth_required | bool | With this setting, an anchor must approve anyone who wants to hold its asset. | +| auth_revocable | bool | With this setting, an anchor can set the authorize flag of an existing trustline to freeze the assets held by an asset holder. | + +## Links +| rel | Example | Description +|--------------|---------------------------------------------------------------------------------------------------|------------------------------------------------------------ +| toml | `https://www.stellar.org/.well-known/stellar.toml`| Link to the TOML file for this issuer | + +## Example + +```json +{ + "_links": { + "toml": { + "href": "https://www.stellar.org/.well-known/stellar.toml" + } + }, + "asset_type": "credit_alphanum4", + "asset_code": "USD", + "asset_issuer": "GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG", + "paging_token": "USD_GBAUUA74H4XOQYRSOW2RZUA4QL5PB37U3JS5NE3RTB2ELJVMIF5RLMAG_credit_alphanum4", + "accounts": { + "authorized": 91547871, + "authorized_to_maintain_liabilities": 45773935, + "unauthorized": 22886967, + "claimable_balances": 11443483 + }, + "balances": { + "authorized": "100.0000000", + "authorized_to_maintain_liabilities": "50.0000000", + "unauthorized": "25.0000000", + "claimable_balances": "12.5000000" + }, + "flags": { + "auth_required": false, + "auth_revocable": false + } +} +``` + +## Endpoints + +| Resource | Type | Resource URI Template | +| ---------------------------------------- | ---------- | ---------------------------- | +| [All Assets](../endpoints/assets-all.md) | Collection | `/assets` (`GET`) | diff --git a/services/horizon/internal/docs/reference/resources/data.md b/services/horizon/internal/docs/reference/resources/data.md new file mode 100644 index 0000000000..fb79d26d7b --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/data.md @@ -0,0 +1,22 @@ +--- +title: Data +replacement: https://developers.stellar.org/api/resources/accounts/data/ +--- + +Each account in Stellar network can contain multiple key/value pairs associated with it. Horizon can be used to retrieve value of each data key. + +When horizon returns information about a single account data key it uses the following format: + +## Attributes + +| Attribute | Type | | +| --- | --- | --- | +| value | base64-encoded string | The base64-encoded value for the key | + +## Example + +```json +{ + "value": "MTAw" +} +``` diff --git a/services/horizon/internal/docs/reference/resources/effect.md b/services/horizon/internal/docs/reference/resources/effect.md new file mode 100644 index 0000000000..8eb2c29452 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/effect.md @@ -0,0 +1,132 @@ +--- +title: Effect +replacement: https://developers.stellar.org/api/resources/effects/ +--- + +A successful operation will yield zero or more **effects**. These effects +represent specific changes that occur in the ledger, but are not necessarily +directly reflected in the [ledger](https://www.stellar.org/developers/learn/concepts/ledger.html) or [history](https://github.com/stellar/stellar-core/blob/master/docs/history.md), as [transactions](https://www.stellar.org/developers/learn/concepts/transactions.html) and [operations](https://www.stellar.org/developers/learn/concepts/operations.html) are. + +## Effect types + +We can distinguish 6 effect groups: +- Account effects +- Signer effects +- Trustline effects +- Trading effects +- Data effects +- Misc effects + +### Account effects + +| Type | Operation | +|---------------------------------------|------------------------------------------------------| +| Account Created | create_account | +| Account Removed | merge_account | +| Account Credited | create_account, payment, path_payment, merge_account | +| Account Debited | create_account, payment, path_payment, merge_account | +| Account Thresholds Updated | set_options | +| Account Home Domain Updated | set_options | +| Account Flags Updated | set_options | +| Account Inflation Destination Updated | set_options | + +### Signer effects + +| Type | Operation | +|----------------|-------------| +| Signer Created | set_options | +| Signer Removed | set_options | +| Signer Updated | set_options | + +### Trustline effects + +| Type | Operation | +|------------------------|---------------------------| +| Trustline Created | change_trust | +| Trustline Removed | change_trust | +| Trustline Updated | change_trust, allow_trust | +| Trustline Authorized | allow_trust | +| Trustline Deauthorized | allow_trust | + +### Trading effects + +| Type | Operation | +|---------------|------------------------------------------------------------------------------| +| Offer Created | manage_buy_offer, manage_sell_offer, create_passive_sell_offer | +| Offer Removed | manage_buy_offer, manage_sell_offer, create_passive_sell_offer, path_payment | +| Offer Updated | manage_buy_offer, manage_sell_offer, create_passive_sell_offer, path_payment | +| Trade | manage_buy_offer, manage_sell_offer, create_passive_sell_offer, path_payment | +### Data effects + +| Type | Operation | +|--------------|-------------| +| Data Created | manage_data | +| Data Removed | manage_data | +| Data Updated | manage_data | +### Misc effects + +| Type | Operation | +|-----------------|---------------| +| Sequence Bumped | bump_sequence | + +## Attributes + +Attributes depend on effect type. + +## Links + +| rel | Example | Relation | +|-----------|---------------------------------------------------------------|-----------------------------------| +| self | `/effects?order=asc\u0026limit=1` | | +| prev | `/effects?order=desc\u0026limit=1\u0026cursor=141733924865-1` | | +| next | `/effects?order=asc\u0026limit=1\u0026cursor=141733924865-1` | | +| operation | `/operations/141733924865` | Operation that created the effect | + +## Example + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "operation": { + "href": "/operations/141733924865" + }, + "precedes": { + "href": "/effects?cursor=141733924865-1\u0026order=asc" + }, + "succeeds": { + "href": "/effects?cursor=141733924865-1\u0026order=desc" + } + }, + "account": "GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "paging_token": "141733924865-1", + "starting_balance": "10000000.0", + "type_i": 0, + "type": "account_created" + } + ] + }, + "_links": { + "next": { + "href": "/effects?order=asc\u0026limit=1\u0026cursor=141733924865-1" + }, + "prev": { + "href": "/effects?order=desc\u0026limit=1\u0026cursor=141733924865-1" + }, + "self": { + "href": "/effects?order=asc\u0026limit=1\u0026cursor=" + } + } +} +``` + +## Endpoints + +| Resource | Type | Resource URI Template | +|--------------------------------------------------------------------------------------------------------------------------------------------|------------|---------------------------------| +| [All Effects](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/reference/endpoints/effects-all.md) | Collection | `/effects` | +| [Operation Effects](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/reference/endpoints/effects-for-operation.md) | Collection | `/operations/:id/effects` | +| [Account Effects](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/reference/endpoints/effects-for-account.md) | Collection | `/accounts/:account_id/effects` | +| [Ledger Effects](https://github.com/stellar/go/blob/master/services/horizon/internal/docs/reference/endpoints/effects-for-ledger.md) | Collection | `/ledgers/:ledger_id/effects` | diff --git a/services/horizon/internal/docs/reference/resources/ledger.md b/services/horizon/internal/docs/reference/resources/ledger.md new file mode 100644 index 0000000000..23632cc1da --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/ledger.md @@ -0,0 +1,100 @@ +--- +title: Ledger +replacement: https://developers.stellar.org/api/resources/ledgers/ +--- + +A **ledger** resource contains information about a given ledger. + +To learn more about the concept of ledgers in the Stellar network, take a look at the [Stellar ledger concept guide](https://www.stellar.org/developers/learn/concepts/ledger.html). + +## Attributes + +| Attribute | Type | | +|------------------------------|--------|------------------------------------------------------------------------------------------------------------------------------| +| id | string | The id is a unique identifier for this ledger. | +| paging_token | number | A [paging token](./page.md) suitable for use as a `cursor` parameter. | +| hash | string | A hex-encoded, lowercase SHA-256 hash of the ledger's [XDR](../../learn/xdr.md)-encoded form. | +| prev_hash | string | The hash of the ledger that chronologically came before this one. | +| sequence | number | Sequence number of this ledger, suitable for use as the as the :id parameter for url templates that require a ledger number. | +| successful_transaction_count | number | The number of successful transactions in this ledger. | +| failed_transaction_count | number | The number of failed transactions in this ledger. | +| operation_count | number | The number of operations applied in this ledger. | +| tx_set_operation_count | number | The number of operations in this ledger. This number includes operations from failed and successful transactions. | +| closed_at | string | An [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) formatted string of when this ledger was closed. | +| total_coins | string | The total number of lumens in circulation. | +| fee_pool | string | The sum of all transaction fees *(in lumens)* since the last inflation operation. They are redistributed during [inflation]. | +| base_fee | number | The [fee] the network charges per operation in a transaction. | +| base_reserve | string | The [reserve][fee] the network uses when calculating an account's minimum balance. | +| max_tx_set_size | number | The maximum number of transactions validators have agreed to process in a given ledger. | +| protocol_version | number | The protocol version that the stellar network was running when this ledger was committed. | +| header_xdr | string | A base64 encoded string of the raw `LedgerHeader` xdr struct for this ledger. | +| base_fee_in_stroops | number | The [fee] the network charges per operation in a transaction. Expressed in stroops. | +| base_reserve_in_stroops | number | The [reserve][fee] the network uses when calculating an account's minimum balance. Expressed in stroops. | + +## Links +| | Example | Relation | templated | +|--------------|---------------------------------------------------|---------------------------------|-----------| +| self | `/ledgers/500` | | | +| effects | `/ledgers/500/effects/{?cursor,limit,order}` | The effects in this transaction | true | +| operations | `/ledgers/500/operations/{?cursor,limit,order}` | The operations in this ledger | true | +| transactions | `/ledgers/500/transactions/{?cursor,limit,order}` | The transactions in this ledger | true | + + +## Example + +```json +{ + "_links": { + "effects": { + "href": "/ledgers/500/effects/{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/ledgers/500/operations/{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/ledgers/500" + }, + "transactions": { + "href": "/ledgers/500/transactions/{?cursor,limit,order}", + "templated": true + } + }, + "id": "689f00d4824b8e69330bf4ad7eb10092ff2f8fdb76d4668a41eebb9469ef7f30", + "paging_token": "2147483648000", + "hash": "689f00d4824b8e69330bf4ad7eb10092ff2f8fdb76d4668a41eebb9469ef7f30", + "prev_hash": "b608e110c7cc58200c912140f121af50dc5ef407aabd53b76e1741080aca1cf0", + "sequence": 500, + "transaction_count": 0, + "successful_transaction_count": 0, + "failed_transaction_count": 0, + "operation_count": 0, + "tx_set_operation_count": 0, + "closed_at": "2015-07-09T21:39:28Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0025600", + "base_fee": 100, + "base_reserve": "10.0000000", + "max_tx_set_size": 50, + "protocol_version": 8, + "header_xdr": "...", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 100000000 +} +``` + +## Endpoints +| Resource | Type | Resource URI Template | +|-------------------------|------------|------------------------------------| +| [All ledgers](../endpoints/ledgers-all.md) | Collection | `/ledgers` | +| [Single Ledger](../endpoints/ledgers-single.md) | Single | `/ledgers/:id` | +| [Ledger Transactions](../endpoints/transactions-for-ledger.md) | Collection | `/ledgers/:ledger_id/transactions` | +| [Ledger Operations](../endpoints/operations-for-ledger.md) | Collection | `/ledgers/:ledger_id/operations` | +| [Ledger Payments](../endpoints/payments-for-ledger.md) | Collection | `/ledgers/:ledger_id/payments` | +| [Ledger Effects](../endpoints/effects-for-ledger.md) | Collection | `/ledgers/:ledger_id/effects` | + + + +[inflation]: https://www.stellar.org/developers/learn/concepts/inflation.html +[fee]: https://www.stellar.org/developers/learn/concepts/fees.html diff --git a/services/horizon/internal/docs/reference/resources/offer.md b/services/horizon/internal/docs/reference/resources/offer.md new file mode 100644 index 0000000000..14c2a8cb96 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/offer.md @@ -0,0 +1,81 @@ +--- +title: Offer +replacement: https://developers.stellar.org/api/resources/offers/ +--- + +Accounts on the Stellar network can make [offers](http://stellar.org/developers/learn/concepts/exchange.html) to buy or sell assets. Users can create offers with the [Manage Offer](http://stellar.org/developers/learn/concepts/list-of-operations.html) operation. + +Horizon only returns offers that belong to a particular account. When it does, it uses the following format: + +## Attributes +| Attribute | Type | | +|----------------------|-------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| id | string | The ID of this offer. | +| paging_token | string | A [paging token](./page.md) suitable for use as a `cursor` parameter. | +| seller | string | Account id of the account making this offer. | +| selling | [Asset](http://stellar.org/developers/learn/concepts/assets.html) | The Asset this offer wants to sell. | +| buying | [Asset](http://stellar.org/developers/learn/concepts/assets.html) | The Asset this offer wants to buy. | +| amount | string | The amount of `selling` the account making this offer is willing to sell. | +| price_r | object | An object of a number numerator and number denominator that represent the buy and sell price of the currencies on offer. | +| price | string | How many units of `buying` it takes to get 1 unit of `selling`. A number representing the decimal form of `price_r`. | +| last_modified_ledger | integer | sequence number for the latest ledger in which this offer was modified. | +| last_modified_time | string | An ISO 8601 formatted string of last modification time. | + +#### Price_r Object +Price_r is a more precise representation of a bid/ask offer. + +| Attribute | Type | | +|-----------|--------|------------------| +| n | number | The numerator. | +| d | number | The denominator. | + +Thus to get price you would take n / d. + + + +## Links +| rel | Example | Description | `templated` | +|--------|------------------------------------------|---------------------------------------------------------|-------------| +| seller | `/accounts/{seller}?cursor,limit,order}` | Link to details about the account that made this offer. | true | + +## Example + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/offers/2611" + }, + "offer_maker": { + "href": "https://horizon-testnet.stellar.org/accounts/GDG3NOK5YI7A4FCBHE6SKI4L65R7UPRBZUZVBT44IBTQBWGUSTJDDKBQ" + } + }, + "id": "2611", + "paging_token": "2611", + "seller": "GDG3NOK5YI7A4FCBHE6SKI4L65R7UPRBZUZVBT44IBTQBWGUSTJDDKBQ", + "selling": { + "asset_type": "credit_alphanum12", + "asset_code": "USD", + "asset_issuer": "GCL3BJDFYQ2KAV7ARC4YCTERNJFOBOBQXSG556TX4YMOPKGEDV5K6LCQ" + }, + "buying": { + "asset_type": "native" + }, + "amount": "1.0000000", + "price_r": { + "n": 1463518003, + "d": 25041627 + }, + "price": "58.4434072", + "last_modified_ledger": 196458, + "last_modified_time": "2020-02-10T18:51:42Z" +} +``` + +## Endpoints + +| Resource | Type | Resource URI Template | +|------------------------------------------------------|------------|--------------------------------| +| [Offers](../endpoints/offers.md) | Collection | `/offers` | +| [Account Offers](../endpoints/offers-for-account.md) | Collection | `/accounts/:account_id/offers` | +| [Offers Details](../endpoints/offer-details.md) | Single | `/offers/:offer_id` | diff --git a/services/horizon/internal/docs/reference/resources/operation.md b/services/horizon/internal/docs/reference/resources/operation.md new file mode 100644 index 0000000000..887bd277c2 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/operation.md @@ -0,0 +1,840 @@ +--- +title: Operation +replacement: https://developers.stellar.org/api/resources/operations/ +--- + +[Operations](https://www.stellar.org/developers/learn/concepts/operations.html) are objects that represent a desired change to the ledger: payments, +offers to exchange currency, changes made to account options, etc. Operations +are submitted to the Stellar network grouped in a [Transaction](./transaction.md). + +To learn more about the concept of operations in the Stellar network, take a look at the [Stellar operations concept guide](https://www.stellar.org/developers/learn/concepts/operations.html). + +## Operation Types + +| type | type_i | description | +|---------------------------------------------------------|--------|------------------------------------------------------------------------------------------------------------| +| [CREATE_ACCOUNT](#create-account) | 0 | Creates a new account in Stellar network. | +| [PAYMENT](#payment) | 1 | Sends a simple payment between two accounts in Stellar network. | +| [PATH_PAYMENT_STRICT_RECEIVE](#path-payment) | 2 | Sends a path payment strict receive between two accounts in the Stellar network. | +| [PATH_PAYMENT_STRICT_SEND](#path-payment-strict-send) | 13 | Sends a path payment strict send between two accounts in the Stellar network. | +| [MANAGE_SELL_OFFER](#manage-sell-offer) | 3 | Creates, updates or deletes a sell offer in the Stellar network. | +| [MANAGE_BUY_OFFER](#manage-buy-offer) | 12 | Creates, updates or deletes a buy offer in the Stellar network. | +| [CREATE_PASSIVE_SELL_OFFER](#create-passive-sell-offer) | 4 | Creates an offer that won't consume a counter offer that exactly matches this offer. | +| [SET_OPTIONS](#set-options) | 5 | Sets account options (inflation destination, adding signers, etc.) | +| [CHANGE_TRUST](#change-trust) | 6 | Creates, updates or deletes a trust line. | +| [ALLOW_TRUST](#allow-trust) | 7 | Updates the "authorized" flag of an existing trust line this is called by the issuer of the related asset. | +| [ACCOUNT_MERGE](#account-merge) | 8 | Deletes account and transfers remaining balance to destination account. | +| [INFLATION](#inflation) | 9 | Runs inflation. | +| [MANAGE_DATA](#manage-data) | 10 | Set, modify or delete a Data Entry (name/value pair) for an account. | +| [BUMP_SEQUENCE](#bump-sequence) | 11 | Bumps forward the sequence number of an account. | + + +Every operation type shares a set of common attributes and links, some operations also contain +additional attributes and links specific to that operation type. + + + +## Common Attributes + +| | Type | | +|------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------| +| id | number | The canonical id of this operation, suitable for use as the :id parameter for url templates that require an operation's ID. | +| paging_token | any | A [paging token](./page.md) suitable for use as a `cursor` parameter. | +| transaction_successful | bool | Indicates if this operation is part of successful transaction. | +| type | string | A string representation of the type of operation. | +| type_i | number | Specifies the type of operation, See "Types" section below for reference. | + +## Common Links + +| | Relation | +|-------------|---------------------------------------------------------------------------| +| self | Relative link to the current operation | +| succeeds | Relative link to the list of operations succeeding the current operation. | +| precedes | Relative link to the list of operations preceding the current operation. | +| effects | The effects this operation triggered | +| transaction | The transaction this operation is part of | + + +Each operation type will have a different set of attributes, in addition to the +common attributes listed above. + + +### Create Account + +Create Account operation represents a new account creation. + +#### Attributes + +| Field | Type | Description | +|------------------|--------|------------------------------------| +| account | string | A new account that was funded. | +| funder | string | Account that funded a new account. | +| starting_balance | string | Amount the account was funded. | + + +#### Example +```json +{ + "_links": { + "effects": { + "href": "/operations/402494270214144/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=402494270214144&order=asc" + }, + "self": { + "href": "/operations/402494270214144" + }, + "succeeds": { + "href": "/operations?cursor=402494270214144&order=desc" + }, + "transactions": { + "href": "/transactions/402494270214144" + } + }, + "account": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "funder": "GBIA4FH6TV64KSPDAJCNUQSM7PFL4ILGUVJDPCLUOPJ7ONMKBBVUQHRO", + "id": "402494270214144", + "paging_token": "402494270214144", + "starting_balance": "10000.0", + "type_i": 0, + "type": "create_account" +} +``` + + +### Payment + +A payment operation represents a payment from one account to another. This payment +can be either a simple native asset payment or a fiat asset payment. + +#### Attributes + +| Field | Type | Description | +|--------------|--------|----------------------------------------------| +| from | string | Sender of a payment. | +| to | string | Destination of a payment. | +| asset_type | string | Asset type (native / alphanum4 / alphanum12) | +| asset_code | string | Code of the destination asset. | +| asset_issuer | string | Asset issuer. | +| amount | string | Amount sent. | + +#### Links + +| | Example | Relation | +|----------|--------------------------------------------------------------------|-------------------| +| sender | /accounts/GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2 | Sending account | +| receiver | /accounts/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ | Receiving account | + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/58402965295104/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=58402965295104&order=asc" + }, + "self": { + "href": "/operations/58402965295104" + }, + "succeeds": { + "href": "/operations?cursor=58402965295104&order=desc" + }, + "transactions": { + "href": "/transactions/58402965295104" + } + }, + "amount": "200.0", + "asset_type": "native", + "from": "GAKLBGHNHFQ3BMUYG5KU4BEWO6EYQHZHAXEWC33W34PH2RBHZDSQBD75", + "id": "58402965295104", + "paging_token": "58402965295104", + "to": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "transaction_successful": true, + "type_i": 1, + "type": "payment" +} +``` + + +### Path Payment Strict Receive + +A path payment strict receive operation represents a payment from one account to another through a path. This type of payment starts as one type of asset and ends as another type of asset. There can be other assets that are traded into and out of along the path. + + +#### Attributes + +| Field | Type | Description | +|---------------------|-------------------------------|-----------------------------------------------------------------------------| +| from | string | Sender of a payment. | +| to | string | Destination of a payment. | +| asset_code | string | Code of the destination asset. | +| asset_issuer | string | Destination asset issuer. | +| asset_type | string | Destination asset type (native / alphanum4 / alphanum12) | +| amount | string | Amount received. | +| source_asset_code | string | Code of the source asset. | +| source_asset_issuer | string | Source asset issuer. | +| source_asset_type | string | Source asset type (native / alphanum4 / alphanum12) | +| source_max | string | Max send amount. | +| source_amount | string | Amount sent. | +| path | array of [Assets](./asset.md) | Additional hops the operation went through to get to the destination asset. | + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/25769807873/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=25769807873\u0026order=asc" + }, + "self": { + "href": "/operations/25769807873" + }, + "succeeds": { + "href": "/operations?cursor=25769807873\u0026order=desc" + }, + "transaction": { + "href": "/transactions/25769807872" + } + }, + "amount": "10.0", + "asset_code": "EUR", + "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", + "asset_type": "credit_alphanum4", + "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + "id": "25769807873", + "paging_token": "25769807873", + "source_asset_code": "USD", + "source_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + "source_asset_type": "credit_alphanum4", + "source_amount": "10.0", + "source_max": "10.0", + "to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "transaction_successful": true, + "type_i": 2, + "type": "path_payment_strict_receive" +} +``` + + +### Path Payment Strict Send + +A path payment strict send operation represents a payment from one account to another through a path. This type of payment starts as one type of asset and ends as another type of asset. There can be other assets that are traded into and out of along the path. + +Unlike [path payment strict receive](#path-payment), this operation sends precisely the source amount, ensuring that the destination account receives at least the minimum specified amount (the amount received will vary based on offers in the order books). + + +#### Attributes + +| Field | Type | Description | +|---------------------|-------------------------------|-----------------------------------------------------------------------------| +| from | string | Sender of a payment. | +| to | string | Destination of a payment. | +| asset_type | string | Destination asset type (native / alphanum4 / alphanum12) | +| asset_code | string | Code of the destination asset. | +| asset_issuer | string | Destination asset issuer. | +| amount | string | Amount received. | +| source_asset_type | string | Source asset type (native / alphanum4 / alphanum12) | +| source_asset_code | string | Source asset code. | +| source_asset_issuer | string | Source asset issuer. | +| source_amount | string | Amount sent. | +| destination_min | string | The minimum amount of destination asset expected to be received. | +| path | array of [Assets](./asset.md) | Additional hops the operation went through to get to the destination asset. | + + +#### Example + +```json +{ + "_links": { + "self": { + "href": "/operations/120903307907612673" + }, + "transaction": { + "href": "/transactions/f60f32eff7f1dd0649cfe2986955d12f6ff45288357fe1526600642ea1b418aa" + }, + "effects": { + "href": "/operations/120903307907612673/effects" + }, + "succeeds": { + "href": "/effects?order=desc&cursor=120903307907612673" + }, + "precedes": { + "href": "/effects?order=asc&cursor=120903307907612673" + } + }, + "id": "120903307907612673", + "paging_token": "120903307907612673", + "transaction_successful": true, + "source_account": "GCXVEEBWI4YMRK6AFJQSEUBYDQL4PZ24ECAPJE2ZIAPIQZLZIBAX3LIF", + "type": "path_payment_strict_send", + "type_i": 13, + "created_at": "2020-02-09T20:32:53Z", + "transaction_hash": "f60f32eff7f1dd0649cfe2986955d12f6ff45288357fe1526600642ea1b418aa", + "asset_type": "native", + "from": "GCXVEEBWI4YMRK6AFJQSEUBYDQL4PZ24ECAPJE2ZIAPIQZLZIBAX3LIF", + "to": "GCXVEEBWI4YMRK6AFJQSEUBYDQL4PZ24ECAPJE2ZIAPIQZLZIBAX3LIF", + "amount": "0.0859000", + "path": [ + + ], + "source_amount": "1000.0000000", + "destination_min": "0.0859000", + "source_asset_type": "credit_alphanum4", + "source_asset_code": "KIN", + "source_asset_issuer": "GBDEVU63Y6NTHJQQZIKVTC23NWLQVP3WJ2RI2OTSJTNYOIGICST6DUXR" +} +``` + + +### Manage Sell Offer + +A "Manage Sell Offer" operation can create, update or delete a sell +offer to trade assets in the Stellar network. +It specifies an issuer, a price and amount of a given asset to +buy or sell. + +When this operation is applied to the ledger, trades can potentially be executed if +this offer crosses others that already exist in the ledger. + +In the event that there are not enough crossing orders to fill the order completely +a new "Offer" object will be created in the ledger. As other accounts make +offers or payments, this offer can potentially be filled. + +#### Sell Offer vs. Buy Offer + +A [sell offer](#manage-sell-offer) specifies a certain amount of the `selling` asset that should be sold in exchange for the maximum quantity of the `buying` asset. It additionally only crosses offers where the price is higher than `price`. + +A [buy offer](#manage-buy-offer) specifies a certain amount of the `buying` asset that should be bought in exchange for the minimum quantity of the `selling` asset. It additionally only crosses offers where the price is lower than `price`. + +Both will fill only partially (or not at all) if there are few (or no) offers that cross them. + +#### Attributes + +| Field | Type | Description | +|----------------------|--------|---------------------------------------------------------| +| offer_id | string | Offer ID. | +| amount | string | Amount of asset to be sold. | +| buying_asset_code | string | The code of asset to buy. | +| buying_asset_issuer | string | The issuer of asset to buy. | +| buying_asset_type | string | Type of asset to buy (native / alphanum4 / alphanum12) | +| price | string | Price of selling_asset in units of buying_asset | +| price_r | Object | n: price numerator, d: price denominator | +| selling_asset_code | string | The code of asset to sell. | +| selling_asset_issuer | string | The issuer of asset to sell. | +| selling_asset_type | string | Type of asset to sell (native / alphanum4 / alphanum12) | + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/592323234762753/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=592323234762753\u0026order=asc" + }, + "self": { + "href": "/operations/592323234762753" + }, + "succeeds": { + "href": "/operations?cursor=592323234762753\u0026order=desc" + }, + "transaction": { + "href": "/transactions/592323234762752" + } + }, + "amount": "100.0", + "buying_asset_code": "CHP", + "buying_asset_issuer": "GAC2ZUXVI5266NMMGDPBMXHH4BTZKJ7MMTGXRZGX2R5YLMFRYLJ7U5EA", + "buying_asset_type": "credit_alphanum4", + "id": "592323234762753", + "offer_id": "8", + "paging_token": "592323234762753", + "price": "2.0", + "price_r": { + "d": 1, + "n": 2 + }, + "selling_asset_code": "YEN", + "selling_asset_issuer": "GDVXG2FMFFSUMMMBIUEMWPZAIU2FNCH7QNGJMWRXRD6K5FZK5KJS4DDR", + "selling_asset_type": "credit_alphanum4", + "transaction_successful": true, + "type_i": 3, + "type": "manage_sell_offer" +} +``` + + +### Manage Buy Offer + +A "Manage Buy Offer" operation can create, update or delete a buy +offer to trade assets in the Stellar network. +It specifies an issuer, a price and amount of a given asset to +buy or sell. + +When this operation is applied to the ledger, trades can potentially be executed if +this offer crosses others that already exist in the ledger. + +In the event that there are not enough crossing orders to fill the order completely +a new "Offer" object will be created in the ledger. As other accounts make +offers or payments, this offer can potentially be filled. + +#### Attributes + +| Field | Type | Description | +|----------------------|--------|---------------------------------------------------------------| +| offer_id | string | Offer ID. | +| buy_amount | string | Amount of asset to be bought. | +| buying_asset_code | string | The code of asset to buy. | +| buying_asset_issuer | string | The issuer of asset to buy. | +| buying_asset_type | string | Type of asset to buy (native / alphanum4 / alphanum12) | +| price | string | Price of thing being bought in terms of what you are selling. | +| price_r | Object | n: price numerator, d: price denominator | +| selling_asset_code | string | The code of asset to sell. | +| selling_asset_issuer | string | The issuer of asset to sell. | +| selling_asset_type | string | Type of asset to sell (native / alphanum4 / alphanum12) | +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/592323234762753/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=592323234762753\u0026order=asc" + }, + "self": { + "href": "/operations/592323234762753" + }, + "succeeds": { + "href": "/operations?cursor=592323234762753\u0026order=desc" + }, + "transaction": { + "href": "/transactions/592323234762752" + } + }, + "amount": "100.0", + "buying_asset_code": "CHP", + "buying_asset_issuer": "GAC2ZUXVI5266NMMGDPBMXHH4BTZKJ7MMTGXRZGX2R5YLMFRYLJ7U5EA", + "buying_asset_type": "credit_alphanum4", + "id": "592323234762753", + "offer_id": "8", + "paging_token": "592323234762753", + "price": "2.0", + "price_r": { + "d": 1, + "n": 2 + }, + "selling_asset_code": "YEN", + "selling_asset_issuer": "GDVXG2FMFFSUMMMBIUEMWPZAIU2FNCH7QNGJMWRXRD6K5FZK5KJS4DDR", + "selling_asset_type": "credit_alphanum4", + "transaction_successful": true, + "type_i": 12, + "type": "manage_buy_offer" +} +``` + + +### Create Passive Sell Offer + +β€œCreate Passive Sell Offer” operation creates an offer that won't consume a counter offer that exactly matches this offer. This is useful for offers just used as 1:1 exchanges for path payments. Use Manage Sell Offer to manage this offer after using this operation to create it. + +#### Attributes + +As in [Manage Sell Offer](#manage-sell-offer) operation. + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/1127729562914817/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=1127729562914817\u0026order=asc" + }, + "self": { + "href": "/operations/1127729562914817" + }, + "succeeds": { + "href": "/operations?cursor=1127729562914817\u0026order=desc" + }, + "transaction": { + "href": "/transactions/1127729562914816" + } + }, + "amount": "11.27827", + "buying_asset_code": "USD", + "buying_asset_issuer": "GDS5JW5E6DRSSN5XK4LW7E6VUMFKKE2HU5WCOVFTO7P2RP7OXVCBLJ3Y", + "buying_asset_type": "credit_alphanum4", + "id": "1127729562914817", + "offer_id": "9", + "paging_token": "1127729562914817", + "price": "1.0", + "price_r": { + "d": 1, + "n": 1 + }, + "selling_asset_type": "native", + "transaction_successful": true, + "type_i": 4, + "type": "create_passive_sell_offer" +} +``` + + + +### Set Options + +Use β€œSet Options” operation to set following options to your account: +* Set/clear account flags: + * AUTH_REQUIRED_FLAG (0x1) - if set, TrustLines are created with authorized set to `false` requiring the issuer to set it for each TrustLine. + * AUTH_REVOCABLE_FLAG (0x2) - if set, the authorized flag in TrustLines can be cleared. Otherwise, authorization cannot be revoked. +* Set the account’s inflation destination. +* Add new signers to the account. +* Set home domain. + + +#### Attributes + +| Field | Type | Description | +|-------------------|--------|------------------------------------------------------------------------------| +| signer_key | string | The public key of the new signer. | +| signer_weight | int | The weight of the new signer (1-255). | +| master_key_weight | int | The weight of the master key (1-255). | +| low_threshold | int | The sum weight for the low threshold. | +| med_threshold | int | The sum weight for the medium threshold. | +| high_threshold | int | The sum weight for the high threshold. | +| home_domain | string | The home domain used for reverse federation lookup | +| set_flags | array | The array of numeric values of flags that has been set in this operation | +| set_flags_s | array | The array of string values of flags that has been set in this operation | +| clear_flags | array | The array of numeric values of flags that has been cleared in this operation | +| clear_flags_s | array | The array of string values of flags that has been cleared in this operation | + + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/696867033714691/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=696867033714691\u0026order=asc" + }, + "self": { + "href": "/operations/696867033714691" + }, + "succeeds": { + "href": "/operations?cursor=696867033714691\u0026order=desc" + }, + "transaction": { + "href": "/transactions/696867033714688" + } + }, + "high_threshold": 3, + "home_domain": "stellar.org", + "id": "696867033714691", + "low_threshold": 0, + "med_threshold": 3, + "paging_token": "696867033714691", + "set_flags": [ + 1 + ], + "set_flags_s": [ + "auth_required_flag" + ], + "transaction_successful": true, + "type_i": 5, + "type": "set_options" +} +``` + + +### Change Trust + +Use β€œChange Trust” operation to create/update/delete a trust line from the source account to another. The issuer being trusted and the asset code are in the given Asset object. + +#### Attributes + +| Field | Type | Description | +|--------------|--------|----------------------------------------------| +| asset_code | string | Asset code. | +| asset_issuer | string | Asset issuer. | +| asset_type | string | Asset type (native / alphanum4 / alphanum12) | +| trustee | string | Trustee account. | +| trustor | string | Trustor account. | +| limit | string | The limit for the asset. | + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/574731048718337/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=574731048718337\u0026order=asc" + }, + "self": { + "href": "/operations/574731048718337" + }, + "succeeds": { + "href": "/operations?cursor=574731048718337\u0026order=desc" + }, + "transaction": { + "href": "/transactions/574731048718336" + } + }, + "asset_code": "CHP", + "asset_issuer": "GAC2ZUXVI5266NMMGDPBMXHH4BTZKJ7MMTGXRZGX2R5YLMFRYLJ7U5EA", + "asset_type": "credit_alphanum4", + "id": "574731048718337", + "limit": "5.0", + "paging_token": "574731048718337", + "trustee": "GAC2ZUXVI5266NMMGDPBMXHH4BTZKJ7MMTGXRZGX2R5YLMFRYLJ7U5EA", + "trustor": "GDVXG2FMFFSUMMMBIUEMWPZAIU2FNCH7QNGJMWRXRD6K5FZK5KJS4DDR", + "transaction_successful": true, + "type_i": 6, + "type": "change_trust" +} +``` + + +### Allow Trust + +Updates the "authorized" flag of an existing trust line this is called by the issuer of the asset. + +Heads up! Unless the issuing account has `AUTH_REVOCABLE_FLAG` set than the "authorized" flag can only be set and never cleared. + +#### Attributes + +| Field | Type | Description | +|--------------|--------|---------------------------------------------------------| +| asset_code | string | Asset code. | +| asset_issuer | string | Asset issuer. | +| asset_type | string | Asset type (native / alphanum4 / alphanum12) | +| authorize | bool | `true` when allowing trust, `false` when revoking trust | +| trustee | string | Trustee account. | +| trustor | string | Trustor account. | + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/34359742465/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=34359742465\u0026order=asc" + }, + "self": { + "href": "/operations/34359742465" + }, + "succeeds": { + "href": "/operations?cursor=34359742465\u0026order=desc" + }, + "transaction": { + "href": "/transactions/34359742464" + } + }, + "asset_code": "USD", + "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + "asset_type": "credit_alphanum4", + "authorize": true, + "id": "34359742465", + "paging_token": "34359742465", + "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", + "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", + "transaction_successful": true, + "type_i": 7, + "type": "allow_trust" +} +``` + + +### Account Merge + +Removes the account and transfers all remaining XLM to the destination account. + +#### Attributes + +| Field | Type | Description | +|-------|--------|-------------------------------------------------------------| +| into | string | Account ID where funds of deleted account were transferred. | + +#### Example +```json +{ + "_links": { + "effects": { + "href": "/operations/799357838299137/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=799357838299137\u0026order=asc" + }, + "self": { + "href": "/operations/799357838299137" + }, + "succeeds": { + "href": "/operations?cursor=799357838299137\u0026order=desc" + }, + "transaction": { + "href": "/transactions/799357838299136" + } + }, + "account": "GBCR5OVQ54S2EKHLBZMK6VYMTXZHXN3T45Y6PRX4PX4FXDMJJGY4FD42", + "id": "799357838299137", + "into": "GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "paging_token": "799357838299137", + "transaction_successful": true, + "type_i": 8, + "type": "account_merge" +} +``` + + +### Inflation + +Runs inflation. + +#### Example + +```json +{ + "_links": { + "effects": { + "href": "/operations/12884914177/effects/{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "/operations?cursor=12884914177\u0026order=asc" + }, + "self": { + "href": "/operations/12884914177" + }, + "succeeds": { + "href": "/operations?cursor=12884914177\u0026order=desc" + }, + "transaction": { + "href": "/transactions/12884914176" + } + }, + "id": "12884914177", + "paging_token": "12884914177", + "transaction_successful": true, + "type_i": 9, + "type": "inflation" +} +``` + + +### Manage Data + +Set, modify or delete a Data Entry (name/value pair) for an account. + +#### Example + +```json +{ + "_links": { + "self": { + "href": "/operations/5250180907536385" + }, + "transaction": { + "href": "/transactions/e0710d3e410fe6b1ba77fcfec9e3789e94ff29b2424f1f4bf51e530dbbdf221c" + }, + "effects": { + "href": "/operations/5250180907536385/effects" + }, + "succeeds": { + "href": "/effects?order=desc&cursor=5250180907536385" + }, + "precedes": { + "href": "/effects?order=asc&cursor=5250180907536385" + } + }, + "id": "5250180907536385", + "paging_token": "5250180907536385", + "source_account": "GCGG3CIRBG2TTBR4HYZJ7JLDRFKZIYOAHFXRWLU62CA2QN52P2SUQNPJ", + "type": "manage_data", + "type_i": 10, + "transaction_successful": true, + "name": "lang", + "value": "aW5kb25lc2lhbg==" +} +``` + + +### Bump Sequence + +Bumps forward the sequence number of the source account of the operation, allowing it to invalidate any transactions with a smaller sequence number. + +#### Attributes + +| Field | Type | Description | +|--------|--------|-------------------------------------------------------------------| +| bumpTo | number | Desired value for the operation’s source account sequence number. | + +#### Example +```json +{ + "_links": { + "self": { + "href": "/operations/1743756726273" + }, + "transaction": { + "href": "/transactions/328436a8dffaf6ca33c08a93279234c7d3eaf1c028804152614187dc76b7168d" + }, + "effects": { + "href": "/operations/1743756726273/effects" + }, + "succeeds": { + "href": "/effects?order=desc&cursor=1743756726273" + }, + "precedes": { + "href": "/effects?order=asc&cursor=1743756726273" + } + }, + "id": "1743756726273", + "paging_token": "1743756726273", + "source_account": "GBHPJ3VMVT3X7Y6HIIAPK7YPTZCF3CWO4557BKGX2GVO4O7EZHIBELLH", + "type": "bump_sequence", + "type_i": 11, + "transaction_hash": "328436a8dffaf6ca33c08a93279234c7d3eaf1c028804152614187dc76b7168d", + "bump_to": "1273737228" +} +``` + +## Endpoints + +| Resource | Type | Resource URI Template | +|----------------------------------------------------|------------|-------------------------------------------------| +| [All Operations](../endpoints/operations-all.md) | Collection | `/operations` | +| [Operations Details](../endpoints/operations-single.md) | Single | `/operations/:id` | +| [Ledger Operations](../endpoints/operations-for-ledger.md) | Collection | `/ledgers/{id}/operations{?cursor,limit,order}` | +| [Account Operations](../endpoints/operations-for-account.md) | Collection | `/accounts/:account_id/operations` | +| [Account Payments](../endpoints/payments-for-account.md) | Collection | `/accounts/:account_id/payments` | diff --git a/services/horizon/internal/docs/reference/resources/orderbook.md b/services/horizon/internal/docs/reference/resources/orderbook.md new file mode 100644 index 0000000000..c7e1e78b30 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/orderbook.md @@ -0,0 +1,50 @@ +--- +title: Orderbook +replacement: https://developers.stellar.org/api/aggregations/order-books/ +--- + +[Orderbooks](https://www.stellar.org/developers/learn/concepts/exchange.html) are collections of offers for each issuer and currency pairs. Let's say you wanted to exchange EUR issued by a particular bank for BTC issued by a particular exchange. You would look at the orderbook and see who is buying `foo_bank/EUR` and selling `baz_exchange/BTC` and at what prices. + +## Attributes +| Attribute | Type | | +|--------------|------------------|------------------------------------------------------------------------------------------------------------------------| +| bids | object | Array of {`price_r`, `price`, `amount`} objects (see [offers](./offer.md)). These represent prices and amounts accounts are willing to buy for the given `selling` and `buying` pair. | +| asks | object | Array of {`price_r`, `price`, `amount`} objects (see [offers](./offer.md)). These represent prices and amounts accounts are willing to sell for the given `selling` and `buying` pair.| +| base | [Asset](http://stellar.org/developers/learn/concepts/assets.html) | The Asset this offer wants to sell.| +| counter | [Asset](http://stellar.org/developers/learn/concepts/assets.html) | The Asset this offer wants to buy.| + +#### Bid Object +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| price_r | object | An object of a number numerator and number denominator that represents the bid price. | +| price | string | The bid price of the asset. A number representing the decimal form of price_r | +| amount | string | The amount of asset bid offer. | + +#### Ask Object +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| price_r | object | An object of a number numerator and number denominator that represents the ask price. | +| price | string | The ask price of the asset. A number representing the decimal form of price_r | +| amount | string | The amount of asset ask offer. | + +#### Price_r Object +Price_r is a more precise representation of a bid/ask offer. + +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| n | number | The numerator. | +| d | number | The denominator. | + +Thus to get price you would take n / d. + +## Links + +This resource has no links. + + +## Endpoints + +| Resource | Type | Resource URI Template | +|--------------------------|------------|--------------------------------------| +| [Orderbook Details](../endpoints/orderbook-details.md) | Single | `/orderbook?{orderbook_params}` | +| [Trades](../endpoints/trades.md) | Collection | `/trades?{orderbook_params}` | diff --git a/services/horizon/internal/docs/reference/resources/page.md b/services/horizon/internal/docs/reference/resources/page.md new file mode 100644 index 0000000000..6c90db63bf --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/page.md @@ -0,0 +1,92 @@ +--- +title: Page +replacement: https://developers.stellar.org/api/introduction/pagination/page-arguments/ +--- + +Pages represent a subset of a larger collection of objects. +As an example, it would be unfeasible to provide the +[All Transactions](../endpoints/transactions-all.md) endpoint without paging. Over time there +will be millions of transactions in the Stellar network's ledger and returning +them all over a single request would be unfeasible. + +## Attributes + +A page itself exposes no attributes. It is merely a container for embedded +records and some links to aid in iterating the entire collection the page is +part of. + +## Cursor +A `cursor` is a number that points to a specific location in a collection of resources. + +The `cursor` attribute itself is an opaque value meaning that users should not try to parse it. + +## Embedded Resources + +A page contains an embedded set of `records`, regardless of the contained resource. + +## Links + +A page provides a couple of links to ease in iteration. + +| | Example | Relation | +| ---- | ------------------------------------------------------ | ---------------------------- | +| self | `/transactions` | | +| prev | `/transactions?cursor=12884905984&order=desc&limit=10` | The previous page of results | +| next | `/transactions?cursor=12884905984&order=asc&limit=10` | The next page of results | + +## Example + +```json +{ + "_embedded": { + "records": [ + { + "_links": { + "self": { + "href": "/operations/12884905984" + }, + "transaction": { + "href": "/transaction/6391dd190f15f7d1665ba53c63842e368f485651a53d8d852ed442a446d1c69a" + }, + "precedes": { + "href": "/account/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=asc{?limit}", + "templated": true + }, + "succeeds": { + "href": "/account/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=desc{?limit}", + "templated": true + } + }, + "id": 12884905984, + "paging_token": "12884905984", + "type_i": 0, + "type": "payment", + "sender": "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + "receiver": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", + "asset": { + "code": "XLM" + }, + "amount": 1000000000, + "amount_f": 100.00 + } + ] + }, + "_links": { + "next": { + "href": "/account/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=asc&limit=100" + }, + "prev": { + "href": "/account/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?cursor=12884905984&order=desc&limit=100" + }, + "self": { + "href": "/account/GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ/payments?limit=100" + } + } +} + +``` + +## Endpoints + +Any endpoint that provides a collection of resources will represent them as pages. + diff --git a/services/horizon/internal/docs/reference/resources/path.md b/services/horizon/internal/docs/reference/resources/path.md new file mode 100644 index 0000000000..20c87b2bc9 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/path.md @@ -0,0 +1,54 @@ +--- +title: Payment Path +replacement: https://developers.stellar.org/api/aggregations/paths/ +--- + +A **path** resource contains information about a payment path. A path can be used by code to populate necessary fields on path payment operation, such as `path` and `sendMax`. + + +## Attributes +| Attribute | Type | | +|--------------------------|------------------|--------------------------------------------------------------------------------------------------------------------------------| +| path | array of objects | An array of assets that represents the intermediary assets this path hops through | +| source_amount | string | An estimated cost for making a payment of destination_amount on this path. Suitable for use in a path payments `sendMax` field | +| destination_amount | string | The destination amount specified in the search that found this path | +| destination_asset_type | string | The type for the destination asset specified in the search that found this path | +| destination_asset_code | optional, string | The code for the destination asset specified in the search that found this path | +| destination_asset_issuer | optional, string | The issuer for the destination asset specified in the search that found this path | +| source_asset_type | string | The type for the source asset specified in the search that found this path | +| source_asset_code | optional, string | The code for the source asset specified in the search that found this path | +| source_asset_issuer | optional, string | The issuer for the source asset specified in the search that found this path | + +#### Asset Object +| Attribute | Type | | +|--------------|------------------|------------------------------------------------------------------------------------------------------------------------ +| asset_code | optional, string | The code for the asset. | +| asset_type | string | Either native, credit_alphanum4, or credit_alphanum12. | +| asset_issuer | optional, string | The stellar address of the given asset's issuer. | + +## Example + +```json +{ + "destination_amount": "20.0000000", + "destination_asset_code": "EUR", + "destination_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "destination_asset_type": "credit_alphanum4", + "path": [ + { + "asset_code": "1", + "asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "asset_type": "credit_alphanum4" + } + ], + "source_amount": "20.0000000", + "source_asset_code": "USD", + "source_asset_issuer": "GDSBCQO34HWPGUGQSP3QBFEXVTSR2PW46UIGTHVWGWJGQKH3AFNHXHXN", + "source_asset_type": "credit_alphanum4" +} +``` + +## Endpoints +| Resource | Type | Resource URI Template | +|------------------------------------------|------------|-----------------------| +| [Find Payment Paths](../endpoints/path-finding.md) | Collection | `/paths` | diff --git a/services/horizon/internal/docs/reference/resources/trade.md b/services/horizon/internal/docs/reference/resources/trade.md new file mode 100644 index 0000000000..08434dcd63 --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/trade.md @@ -0,0 +1,66 @@ +--- +title: Trade +--- + +A trade represents a fulfilled offer. For example, let's say that there exists an offer to sell 9 `foo_bank/EUR` for 3 `baz_exchange/BTC` and you make an offer to buy 3 `foo_bank/EUR` for 1 `baz_exchange/BTC`. Since your offer and the existing one cross, a trade happens. After the trade completes: + +- you are 3 `foo_bank/EUR` richer and 1 `baz_exchange/BTC` poorer +- the maker of the other offer is 1 `baz_exchange/BTC` richer and 3 `foo_bank/EUR` poorer +- your offer is completely fulfilled and no longer exists +- the other offer is partially fulfilled and becomes an offer to sell 6 `foo_bank/EUR` for 2 `baz_exchange/BTC`. The price of that offer doesn't change, but the amount does. + +Trades can also be caused by successful [path payments](https://www.stellar.org/developers/learn/concepts/exchange.html), because path payments involve fulfilling offers. + +Payments are one-way in that afterwards, the source account has a smaller balance and the destination account of the payment has a bigger one. Trades are two-way; both accounts increase and decrease their balances. + +A trade occurs between two parties - `base` and `counter`. Which is either arbitrary or determined by the calling query. + +## Attributes +| Attribute | Type | | +|--------------|------------------|------------------------------------------------------------------------------------------------------------------------| +| id | string | The ID of this trade. | +| paging_token | string | A [paging token](./page.md) suitable for use as a `cursor` parameter.| +| ledger_close_time | string | An ISO 8601 formatted string of when the ledger with this trade was closed.| +| offer_id | string | DEPRECATED. the sell offer id. +| base_account | string | base party of this trade| +| base_offer_id | string | the base offer id. If this offer was immediately fully consumed this will be a synthetic id +| base_amount | string | amount of base asset that was moved from `base_account` to `counter_account`| +| base_asset_type | string | type of base asset| +| base_asset_code | string | code of base asset| +| base_asset_issuer | string | issuer of base asset| +| counter_offer_id | string | the counter offer id. If this offer was immediately fully consumed this will be a synthetic id +| counter_account | string | counter party of this trade| +| counter_amount | string | amount of counter asset that was moved from `counter_account` to `base_account`| +| counter_asset_type | string | type of counter asset| +| counter_asset_code | string | code of counter asset| +| counter_asset_issuer | string | issuer of counter asset| +| price | object | original offer price, expressed as a rational number. example: {n:7, d:3} +| base_is_seller | boolean | indicates which party of the trade made the sell offer| + +#### Price Object +Price is a precise representation of a bid/ask offer. + +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| n | number | The numerator. | +| d | number | The denominator. | + +Thus to get price you would take n / d. + +#### Synthetic Offer Ids +Offer ids in the horizon trade resource (base_offer_id, counter_offer_id) are synthetic and don't always reflect the respective stellar-core offer ids. This is due to the fact that stellar-core does not assign offer ids when an offer gets filled immediately. In these cases, Horizon synthetically generates an offer id for the buying offer, based on the total order id of the offer operation. This allows wallets to aggregate historical trades based on offer ids without adding special handling for edge cases. The exact encoding can be found [here](https://github.com/stellar/go/blob/master/services/horizon/internal/db2/history/synt_offer_id.go). + +## Links + +| rel | Example | Description | `templated` | +|--------------|---------------------------------------------------------------------------------------------------|------------------------------------------------------------|-------------| +| base | `/accounts/{base_account}` | Link to details about the base account| true | +| counter | `/accounts/{counter_account}` | Link to details about the counter account | true | +| operation | `/operation/{operation_id}` | Link to the operation of the assets bought and sold. | true | + +## Endpoints + +| Resource | Type | Resource URI Template | +|--------------------------|------------|--------------------------------------| +| [Trades](../endpoints/trades.md) | Collection | `/trades` | +| [Account Trades](../endpoints/trades-for-account.md) | Collection | `/accounts/:account_id/trades` | diff --git a/services/horizon/internal/docs/reference/resources/trade_aggregation.md b/services/horizon/internal/docs/reference/resources/trade_aggregation.md new file mode 100644 index 0000000000..d1019e7f0c --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/trade_aggregation.md @@ -0,0 +1,39 @@ +--- +title: Trade Aggregation +replacement: https://developers.stellar.org/api/aggregations/trade-aggregations/ +--- + +A Trade Aggregation represents aggregated statistics on an asset pair (`base` and `counter`) for a specific time period. + +## Attributes +| Attribute | Type | | +|--------------|------------------|------------------------------------------------------------------------------------------------------------------------| +| timestamp | string | start time for this trade_aggregation. Represented as milliseconds since epoch.| +| trade_count | int | total number of trades aggregated.| +| base_volume | string | total volume of `base` asset.| +| counter_volume | string | total volume of `counter` asset.| +| avg | string | weighted average price of `counter` asset in terms of `base` asset.| +| high | string | highest price for this time period.| +| high_r | object | highest price for this time period as a rational number.| +| low | string | lowest price for this time period.| +| low_r | object | lowest price for this time period as a rational number.| +| open | string | price as seen on first trade aggregated.| +| open_r | object | price as seen on first trade aggregated as a rational number.| +| close | string | price as seen on last trade aggregated.| +| close_r | object | price as seen on last trade aggregated as a rational number.| + +#### Price_r Object +Price_r (high_r, low_r, open_r, close_r) is a more precise representation of a bid/ask offer. + +| Attribute | Type | | +| ---------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------ | +| n | number | The numerator. | +| d | number | The denominator. | + +Thus to get price you would take n / d. + +## Endpoints + +| Resource | Type | Resource URI Template | +|--------------------------|------------|--------------------------------------| +| [Trade Aggregations](../endpoints/trade_aggregations.md) | Collection | `/trade_aggregations?{orderbook_params}` | diff --git a/services/horizon/internal/docs/reference/resources/transaction.md b/services/horizon/internal/docs/reference/resources/transaction.md new file mode 100644 index 0000000000..ac4c5b488b --- /dev/null +++ b/services/horizon/internal/docs/reference/resources/transaction.md @@ -0,0 +1,119 @@ +--- +title: Transaction +replacement: https://developers.stellar.org/api/resources/transactions/ +--- + +**Transactions** are the basic unit of change in the Stellar Network. + +A transaction is a grouping of [operations](./operation.md). + +To learn more about the concept of transactions in the Stellar network, take a look at the [Stellar transactions concept guide](https://www.stellar.org/developers/learn/concepts/transactions.html). + +## Attributes + +| Attribute | Type | | +|-------------------------|--------------------------|--------------------------------------------------------------------------------------------------------------------------------| +| id | string | The canonical id of this transaction, suitable for use as the :id parameter for url templates that require a transaction's ID. | +| paging_token | string | A [paging token](./page.md) suitable for use as the `cursor` parameter to transaction collection resources. | +| successful | bool | Indicates if transaction was successful or not. | +| hash | string | A hex-encoded, lowercase SHA-256 hash of the transaction's [XDR](../../learn/xdr.md)-encoded form. | +| ledger | number | Sequence number of the ledger in which this transaction was applied. | +| created_at | ISO8601 string | | +| fee_account | string | The account which paid for the transaction fees | +| source_account | string | | +| source_account_sequence | string | | +| max_fee | number | The the maximum fee the fee account was willing to pay. | +| fee_charged | number | The fee paid by the fee account of this transaction when the transaction was applied to the ledger. | +| operation_count | number | The number of operations that are contained within this transaction. | +| envelope_xdr | string | A base64 encoded string of the raw `TransactionEnvelope` xdr struct for this transaction | +| result_xdr | string | A base64 encoded string of the raw `TransactionResult` xdr struct for this transaction | +| result_meta_xdr | string | A base64 encoded string of the raw `TransactionMeta` xdr struct for this transaction | +| fee_meta_xdr | string | A base64 encoded string of the raw `LedgerEntryChanges` xdr struct produced by taking fees for this transaction. | +| memo_type | string | The type of memo set in the transaction. Possible values are `none`, `text`, `id`, `hash`, and `return`. | +| memo | string | The string representation of the memo set in the transaction. When `memo_type` is `id`, the `memo` is a decimal string representation of an unsigned 64 bit integer. When `memo_type` is `hash` or `return`, the `memo` is a base64 encoded string. When `memo_type` is `text`, the `memo` is a unicode string. However, if the original memo byte sequence in the transaction XDR is not valid unicode, Horizon will replace any invalid byte sequences with the utf-8 replacement character. Note this field is only present when `memo_type` is not `none`. | +| memo_bytes | string | A base64 encoded string of the memo bytes set in the transaction's xdr envelope. Note this field is only present when `memo_type` is `text`. | +| signatures | string[] | An array of signatures used to sign this transaction | +| valid_after | RFC3339 date-time string | | +| valid_before | RFC3339 date-time string | | +| fee_bump_transaction | object | This object is only present if the transaction is a fee bump transaction or is wrapped by a fee bump transaction. The object has two fields: `hash` (the hash of the fee bump transaction) and `signatures` (the signatures present in the fee bump transaction envelope) | +| inner_transaction | object | This object is only present if the transaction is a fee bump transaction or is wrapped by a fee bump transaction. The object has three fields: `hash` (the hash of the inner transaction wrapped by the fee bump transaction), `max_fee` (the max fee set in the inner transaction), and `signatures` (the signatures present in the inner transaction envelope) | + +## Links + +| rel | Example | Description | +|------------|------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------| +| self | `https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f` | | +| account | `https://horizon-testnet.stellar.org/accounts/GCDLRUXOD6KA53G5ILL435TZAISNLPS4EKIHSOVY3MVD3DVJ333NO4DT` | The source [account](../endpoints/accounts-single.md) for this transaction. | +| ledger | `https://horizon-testnet.stellar.org/ledgers/2352988` | The [ledger](../endpoints/ledgers-single.md) in which this transaction was applied. | +| operations | `https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f/operations{?cursor,limit,order}"` | [Operations](../endpoints/operations-for-transaction.md) included in this transaction. | +| effects | `https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f/effects{?cursor,limit,order}"` | [Effects](../endpoints/effects-for-transaction.md) which resulted by operations in this transaction. | +| precedes | `https://horizon-testnet.stellar.org/transactions?order=asc&cursor=10106006507900928` | A collection of transactions that occur after this transaction. | +| succeeds | `https://horizon-testnet.stellar.org/transactions?order=desc&cursor=10106006507900928` | A collection of transactions that occur before this transaction. | + +## Example + +```json +{ + "_links": { + "self": { + "href": "https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f" + }, + "account": { + "href": "https://horizon-testnet.stellar.org/accounts/GCDLRUXOD6KA53G5ILL435TZAISNLPS4EKIHSOVY3MVD3DVJ333NO4DT" + }, + "ledger": { + "href": "https://horizon-testnet.stellar.org/ledgers/2352988" + }, + "operations": { + "href": "https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f/operations{?cursor,limit,order}", + "templated": true + }, + "effects": { + "href": "https://horizon-testnet.stellar.org/transactions/cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f/effects{?cursor,limit,order}", + "templated": true + }, + "precedes": { + "href": "https://horizon-testnet.stellar.org/transactions?order=asc&cursor=10106006507900928" + }, + "succeeds": { + "href": "https://horizon-testnet.stellar.org/transactions?order=desc&cursor=10106006507900928" + } + }, + "id": "cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f", + "paging_token": "10106006507900928", + "successful": true, + "hash": "cb9a25394acb6fe0d1d9bdea5afc01cafe2c6fde59a96ddceb2564a65780a81f", + "ledger": 2352988, + "created_at": "2019-02-21T21:44:13Z", + "source_account": "GCDLRUXOD6KA53G5ILL435TZAISNLPS4EKIHSOVY3MVD3DVJ333NO4DT", + "fee_account": "GCDLRUXOD6KA53G5ILL435TZAISNLPS4EKIHSOVY3MVD3DVJ333NO4DT", + "source_account_sequence": "10105916313567234", + "max_fee": 100, + "fee_charged":100, + "operation_count": 1, + "envelope_xdr": "AAAAAIa40u4flA7s3ULXzfZ5AiTVvlwikHk6uNsqPY6p3vbXAAAAZAAj50cAAAACAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAAB2Fmc2RmYXMAAAAAAQAAAAAAAAABAAAAAIa40u4flA7s3ULXzfZ5AiTVvlwikHk6uNsqPY6p3vbXAAAAAAAAAAEqBfIAAAAAAAAAAAGp3vbXAAAAQKElK3CoNo1f8fWIGeJm98lw2AaFiyVVFhx3uFok0XVW3MHV9MubtEhfA+n1iLPrxmzHtHfmZsumWk+sOEQlSwI=", + "result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + "result_meta_xdr": "AAAAAQAAAAIAAAADACPnXAAAAAAAAAAAhrjS7h+UDuzdQtfN9nkCJNW+XCKQeTq42yo9jqne9tcAAAAXSHbnOAAj50cAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABACPnXAAAAAAAAAAAhrjS7h+UDuzdQtfN9nkCJNW+XCKQeTq42yo9jqne9tcAAAAXSHbnOAAj50cAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + "fee_meta_xdr": "AAAAAgAAAAMAI+dTAAAAAAAAAACGuNLuH5QO7N1C1832eQIk1b5cIpB5OrjbKj2Oqd721wAAABdIduecACPnRwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAI+dcAAAAAAAAAACGuNLuH5QO7N1C1832eQIk1b5cIpB5OrjbKj2Oqd721wAAABdIduc4ACPnRwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + "memo_type": "text", + "memo": "afsdfas", + "valid_after": "1970-01-01T00:00:00Z", + "signatures": [ + "oSUrcKg2jV/x9YgZ4mb3yXDYBoWLJVUWHHe4WiTRdVbcwdX0y5u0SF8D6fWIs+vGbMe0d+Zmy6ZaT6w4RCVLAg==" + ] +} +``` + +## Endpoints + +| Resource | Type | Resource URI Template | +|--------------------------------------------------------|------------|--------------------------------------| +| [All Transactions](../endpoints/transactions-all.md) | Collection | `/transactions` (`GET`) | +| [Post Transaction](../endpoints/transactions-create.md) | Action | `/transactions` (`POST`) | +| [Transaction Details](../endpoints/transactions-single.md) | Single | `/transactions/:id` | +| [Account Transactions](../endpoints/transactions-for-account.md) | Collection | `/accounts/:account_id/transactions` | +| [Ledger Transactions](../endpoints/transactions-for-ledger.md) | Collection | `/ledgers/:ledger_id/transactions` | + + +## Submitting transactions +To submit a new transaction to Stellar network, it must first be built and signed locally. Then you can submit a hex representation of your transaction’s [XDR](../xdr.md) to the `/transactions` endpoint. Read more about submitting transactions in [Post Transaction](../endpoints/transactions-create.md) doc. diff --git a/services/horizon/internal/docs/reference/responses.md b/services/horizon/internal/docs/reference/responses.md new file mode 100644 index 0000000000..151e11ca57 --- /dev/null +++ b/services/horizon/internal/docs/reference/responses.md @@ -0,0 +1,75 @@ +--- +title: Response Format +replacement: https://developers.stellar.org/api/introduction/response-format/ +--- + +Rather than using a fully custom way of representing the resources we expose in +Horizon, we use [HAL](http://stateless.co/hal_specification.html). HAL is a +hypermedia format in JSON that remains simple while giving us a couple of +benefits such as simpler client integration for several languages. See [this +wiki page](https://github.com/mikekelly/hal_specification/wiki/Libraries) for a +list of libraries. + +## Attributes, Links, Embedded Resources + +At its simplest, a HAL response is just a JSON object with a couple of reserved +property names: `_links` is used for expressing links and `_embedded` is used +for bundling other HAL objects with the response. Other than links and embedded +objects, **HAL is just JSON**. + +### Links + +HAL is a hypermedia format, like HTML, in that it has a mechanism to express +links between documents. Let's look at a simple example: + +```json +{ + "_links": { + "self": { + "href": "/ledgers/1" + }, + "transactions": { + "href": "/ledgers/1/transactions{?cursor,limit,order}", + "templated": true + } + }, + "id": "43cf4db3741a7d6c2322e7b646320ce9d7b099a0b3501734dcf70e74a8a4e637", + "hash": "43cf4db3741a7d6c2322e7b646320ce9d7b099a0b3501734dcf70e74a8a4e637", + "prev_hash": "", + "sequence": 1, + "transaction_count": 0, + "operation_count": 0, + "closed_at": "0001-01-01T00:00:00Z", + "total_coins": "100000000000.0000000", + "fee_pool": "0.0000000", + "base_fee_in_stroops": 100, + "base_reserve_in_stroops": 100000000, + "max_tx_set_size": 50 +} +``` + +The above response is for the genesis ledger of the Stellar test network, and +the links in the `_links` attribute provide links to other relavant resources in +Horizon. Notice the object beneath the `transactions` key. The key of each +link specifies that links relation to the current resource, and in this case +`transactions` means "Transactions that occurred in this ledger". Logically, +you should expect that resource to respond with a collection of transactions +with all of the results having a `ledger_sequence` attribute equal to 1. + +The `transactions` link is also _templated_, which means that the `href` +attribute of the link is actually a URI template, as specified by [RFC +6570](https://tools.ietf.org/html/rfc6570). We use URI templates to show you +what parameters a given resource can take. You must evaluate the template to a +valid URI before navigating to it. + +## Pages + +Pages represent a subset of a larger collection of objects. +As an example, it would be unfeasible to provide the +[All Transactions](../reference/endpoints/transactions-all.md) endpoint without paging. +Over time there will be millions of transactions in the Stellar network's ledger +and returning them all over a single request would be unfeasible. + +Read more about paging in following docs: +- [Page](../reference/resources/page.md) +- [Paging](./paging.md) diff --git a/services/horizon/internal/docs/reference/streaming.md b/services/horizon/internal/docs/reference/streaming.md new file mode 100644 index 0000000000..4cc4aee979 --- /dev/null +++ b/services/horizon/internal/docs/reference/streaming.md @@ -0,0 +1,20 @@ +--- +title: Streaming +replacement: https://developers.stellar.org/api/introduction/streaming/ +--- + +## Streaming + +Certain endpoints in Horizon can be called in streaming mode using Server-Sent Events. This mode will keep the connection to Horizon open and Horizon will continue to return responses as ledgers close. All parameters for the endpoints that allow this mode are the same. The way a caller initiates this mode is by setting `Accept: text/event-stream` in the HTTP header when you make the request. +You can read an example of using the streaming mode in the [Follow Received Payments](./tutorials/follow-received-payments.md) tutorial. + +Endpoints that currently support streaming: +* [Account](./endpoints/accounts-single.md) +* [Effects](./endpoints/effects-all.md) +* [Ledgers](./endpoints/ledgers-all.md) +* [Offers](./endpoints/offers-for-account.md) +* [Operations](./endpoints/operations-all.md) +* [Orderbook](./endpoints/orderbook-details.md) +* [Payments](./endpoints/payments-all.md) +* [Transactions](./endpoints/transactions-all.md) +* [Trades](./endpoints/trades.md) diff --git a/services/horizon/internal/docs/reference/tutorials/follow-received-payments.md b/services/horizon/internal/docs/reference/tutorials/follow-received-payments.md new file mode 100644 index 0000000000..667384ae68 --- /dev/null +++ b/services/horizon/internal/docs/reference/tutorials/follow-received-payments.md @@ -0,0 +1,213 @@ +--- +title: Follow Received Payments +--- + +This tutorial shows how easy it is to use Horizon to watch for incoming payments on an [account](../../reference/resources/account.md) +using JavaScript and `EventSource`. We will eschew using [`js-stellar-sdk`](https://github.com/stellar/js-stellar-sdk), the +high-level helper library, to show that it is possible for you to perform this +task on your own, with whatever programming language you would like to use. + +This tutorial assumes that you: + +- Have node.js installed locally on your machine. +- Have curl installed locally on your machine. +- Are running on Linux, OS X, or any other system that has access to a bash-like + shell. +- Are familiar with launching and running commands in a terminal. + +In this tutorial we will learn: + +- How to create a new account. +- How to fund your account using friendbot. +- How to follow payments to your account using curl and EventSource. + +## Project Skeleton + +Let's get started by building our project skeleton: + +```bash +$ mkdir follow_tutorial +$ cd follow_tutorial +$ npm install --save stellar-base +$ npm install --save eventsource +``` + +This should have created a `package.json` in the `follow_tutorial` directory. +You can check that everything went well by running the following command: + +```bash +$ node -e "require('stellar-base')" +``` + +Everything was successful if no output it generated from the above command. Now +let's write a script to create a new account. + +## Creating an account + +Create a new file named `make_account.js` and paste the following text into it: + +```javascript +var Keypair = require("stellar-base").Keypair; + +var newAccount = Keypair.random(); + +console.log("New key pair created!"); +console.log(" Account ID: " + newAccount.publicKey()); +console.log(" Secret: " + newAccount.secret()); +``` + +Save the file and run it: + +```bash +$ node make_account.js +New key pair created! + Account ID: GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3 + Secret: SCU36VV2OYTUMDSSU4EIVX4UUHY3XC7N44VL4IJ26IOG6HVNC7DY5UJO +$ +``` + +Before our account can do anything it must be funded. Indeed, before an account +is funded it does not truly exist! + +## Funding your account + +The Stellar test network provides the Friendbot, a tool that developers +can use to get testnet lumens for testing purposes. To fund your account, simply +execute the following curl command: + +```bash +$ curl "https://friendbot.stellar.org/?addr=GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3" +``` + +Don't forget to replace the account id above with your own. If the request +succeeds, you should see a response like: + +```json +{ + "hash": "ed9e96e136915103f5d8978cbb2036628e811f2c59c4c3d88534444cf504e360", + "result": "received", + "submission_result": "000000000000000a0000000000000001000000000000000000000000" +} +``` + +After a few seconds, the Stellar network will perform consensus, close the +ledger, and your account will have been created. Next up we will write a command +that watches for new payments to your account and outputs a message to the +terminal. + +## Following payments using `curl` + +To follow new payments connected to your account you simply need to send `Accept: text/event-stream` header to the [/payments](../../reference/endpoints/payments-all.md) endpoint. + +```bash +$ curl -H "Accept: text/event-stream" "https://horizon-testnet.stellar.org/accounts/GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3/payments" +``` + +As a result you will see something like: + +```bash +retry: 1000 +event: open +data: "hello" + +id: 713226564145153 +data: {"_links":{"effects":{"href":"/operations/713226564145153/effects/{?cursor,limit,order}","templated":true}, + "precedes":{"href":"/operations?cursor=713226564145153\u0026order=asc"}, + "self":{"href":"/operations/713226564145153"}, + "succeeds":{"href":"/operations?cursor=713226564145153\u0026order=desc"}, + "transactions":{"href":"/transactions/713226564145152"}}, + "account":"GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3", + "funder":"GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K", + "id":713226564145153, + "paging_token":"713226564145153", + "starting_balance":"10000", + "type_i":0, + "type":"create_account"} +``` + +Every time you receive a new payment you will get a new row of data. Payments is not the only endpoint that supports streaming. You can also stream transactions [/transactions](../../reference/endpoints/transactions-all.md) and operations [/operations](../../reference/endpoints/operations-all.md). + +## Following payments using `EventStream` + +> **Warning!** `EventSource` object does not reconnect for certain error types so it can stop working. +> If you need a reliable streaming connection please use our [SDK](https://github.com/stellar/js-stellar-sdk). + +Another way to follow payments is writing a simple JS script that will stream payments and print them to console. Create `stream_payments.js` file and paste the following code into it: + +```js +var EventSource = require('eventsource'); +var es = new EventSource('https://horizon-testnet.stellar.org/accounts/GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3/payments'); +es.onmessage = function(message) { + var result = message.data ? JSON.parse(message.data) : message; + console.log('New payment:'); + console.log(result); +}; +es.onerror = function(error) { + console.log('An error occurred!'); +} +``` +Now, run our script: `node stream_payments.js`. You should see following output: +```bash +New payment: +{ _links: + { effects: + { href: '/operations/713226564145153/effects/{?cursor,limit,order}', + templated: true }, + precedes: { href: '/operations?cursor=713226564145153&order=asc' }, + self: { href: '/operations/713226564145153' }, + succeeds: { href: '/operations?cursor=713226564145153&order=desc' }, + transactions: { href: '/transactions/713226564145152' } }, + account: 'GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3', + funder: 'GBS43BF24ENNS3KPACUZVKK2VYPOZVBQO2CISGZ777RYGOPYC2FT6S3K', + id: 713226564145153, + paging_token: '713226564145153', + starting_balance: '10000', + type_i: 0, + type: 'create_account' } +``` + +## Testing it out + +We now know how to get a stream of transactions to an account. Let's check if our solution actually works and if new payments appear. Let's watch as we send a payment ([`create_account` operation](../../../guides/concepts/list-of-operations.html#create-account)) from our account to another account. + +We use the `create_account` operation because we are sending payment to a new, unfunded account. If we were sending payment to an account that is already funded, we would use the [`payment` operation](../../../guides/concepts/list-of-operations.html#payment). + +First, let's check our account sequence number so we can create a payment transaction. To do this we send a request to horizon: + +```bash +$ curl "https://horizon-testnet.stellar.org/accounts/GB7JFK56QXQ4DVJRNPDBXABNG3IVKIXWWJJRJICHRU22Z5R5PI65GAK3" +``` + +Sequence number can be found under the `sequence` field. The current sequence number is `713226564141056`. Save this value somewhere. + +Now, create `make_payment.js` file and paste the following code into it: + +```js +var StellarBase = require("stellar-base"); +StellarBase.Network.useTestNetwork(); + +var keypair = StellarBase.Keypair.fromSecret('SCU36VV2OYTUMDSSU4EIVX4UUHY3XC7N44VL4IJ26IOG6HVNC7DY5UJO'); +var account = new StellarBase.Account(keypair.publicKey(), "713226564141056"); + +var amount = "100"; +var transaction = new StellarBase.TransactionBuilder(account) + .addOperation(StellarBase.Operation.createAccount({ + destination: StellarBase.Keypair.random().publicKey(), + startingBalance: amount + })) + .build(); + +transaction.sign(keypair); + +console.log(transaction.toEnvelope().toXDR().toString("base64")); +``` + +After running this script you should see a signed transaction blob. To submit this transaction we send it to horizon or stellar-core. But before we do, let's open a new console and start our previous script by `node stream_payments.js`. + +Now to send a transaction just use horizon: + +```bash +curl -H "Content-Type: application/json" -X POST -d '{"tx":"AAAAAH6Sq76F4cHVMWvGG4AtNtFVIvayUxSgR401rPY9ej3TAAAD6AACiK0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAKc1j3y10+nI+sxuXlmFz71JS35mp/RcPCP45Gw0obdAAAAAAAAAAAAExLQAAAAAAAAAAAT16PdMAAABAsJTBC5N5B9Q/9+ZKS7qkMd/wZHWlP6uCCFLzeD+JWT60/VgGFCpzQhZmMg2k4Vg+AwKJTwko3d7Jt3Y6WhjLCg=="}' "https://horizon-testnet.stellar.org/transactions" +``` + +You should see a new payment in a window running `stream_payments.js` script. diff --git a/services/horizon/internal/docs/reference/xdr.md b/services/horizon/internal/docs/reference/xdr.md new file mode 100644 index 0000000000..c80063394c --- /dev/null +++ b/services/horizon/internal/docs/reference/xdr.md @@ -0,0 +1,26 @@ +--- +title: XDR +replacement: https://developers.stellar.org/api/introduction/xdr/ +--- + +**XDR**, also known as _External Data Representation_, is used extensively in +the Stellar Network, especially in the core protocol. The ledger, transactions, results, +history, and even the messages passed between computers running stellar-core +are encoded using XDR. + +XDR is specified in [RFC 4506](http://tools.ietf.org/html/rfc4506.html). + +Since XDR is a binary format and not known as widely as JSON for example, we try +to hide most of it from Horizon. Instead, we opt to interpret the XDR for you +and present the values as JSON attributes. That said, we also expose the XDR +to you so you can get access to the raw, canonical data. + +In general, Horizon will encode the XDR structures in base64 so that they can be +transmitted within a json body. You should decode the base64 string +into a byte stream, then decode the XDR into an in-memory data structure. + +## .X files + +Data structures in XDR are specified in an _interface definition file_ (IDL). +The IDL files used for the Stellar Network are available +[on GitHub](https://github.com/stellar/stellar-core/tree/master/src/xdr). diff --git a/services/horizon/internal/errors/main.go b/services/horizon/internal/errors/main.go new file mode 100644 index 0000000000..7b83029591 --- /dev/null +++ b/services/horizon/internal/errors/main.go @@ -0,0 +1,37 @@ +package errors + +import ( + "fmt" + "net/http" + + "github.com/getsentry/raven-go" + "github.com/go-errors/errors" +) + +// FromPanic extracts the err from the result of a recover() call. +func FromPanic(rec interface{}) error { + err, ok := rec.(error) + if !ok { + err = fmt.Errorf("%s", rec) + } + + return errors.Wrap(err, 4) +} + +// ReportToSentry reports err to the configured sentry server. Optionally, +// specifying a non-nil `r` will include information in the report about the +// current http request. +func ReportToSentry(err error, r *http.Request) { + st := raven.NewStacktrace(4, 3, []string{"github.org/stellar"}) + exc := raven.NewException(err, st) + + var packet *raven.Packet + if r != nil { + h := raven.NewHttp(r) + packet = raven.NewPacket(err.Error(), exc, h) + } else { + packet = raven.NewPacket(err.Error(), exc) + } + + raven.Capture(packet, nil) +} diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go new file mode 100644 index 0000000000..902db90e3a --- /dev/null +++ b/services/horizon/internal/flags.go @@ -0,0 +1,691 @@ +package horizon + +import ( + "fmt" + "go/types" + stdLog "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/db2/schema" + apkg "github.com/stellar/go/support/app" + support "github.com/stellar/go/support/config" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" + "github.com/stellar/throttled" +) + +const ( + // DatabaseURLFlagName is the command line flag for configuring the Horizon postgres URL + DatabaseURLFlagName = "db-url" + // StellarCoreDBURLFlagName is the command line flag for configuring the postgres Stellar Core URL + StellarCoreDBURLFlagName = "stellar-core-db-url" + // StellarCoreURLFlagName is the command line flag for configuring the URL fore Stellar Core HTTP endpoint + StellarCoreURLFlagName = "stellar-core-url" + // StellarCoreBinaryPathName is the command line flag for configuring the path to the stellar core binary + StellarCoreBinaryPathName = "stellar-core-binary-path" + // captiveCoreConfigAppendPathName is the command line flag for configuring the path to the captive core additional configuration + // Note captiveCoreConfigAppendPathName is deprecated in favor of CaptiveCoreConfigPathName + captiveCoreConfigAppendPathName = "captive-core-config-append-path" + // CaptiveCoreConfigPathName is the command line flag for configuring the path to the captive core configuration file + CaptiveCoreConfigPathName = "captive-core-config-path" + + captiveCoreMigrationHint = "If you are migrating from Horizon 1.x.y, start with the Migration Guide here: https://developers.stellar.org/docs/run-api-server/migrating/" +) + +// validateBothOrNeither ensures that both options are provided, if either is provided. +func validateBothOrNeither(option1, option2 string) error { + arg1, arg2 := viper.GetString(option1), viper.GetString(option2) + if arg1 != "" && arg2 == "" { + return fmt.Errorf("Invalid config: %s = %s, but corresponding option %s is not configured", option1, arg1, option2) + } + if arg1 == "" && arg2 != "" { + return fmt.Errorf("Invalid config: %s = %s, but corresponding option %s is not configured", option2, arg2, option1) + } + return nil +} + +func applyMigrations(config Config) error { + dbConn, err := db.Open("postgres", config.DatabaseURL) + if err != nil { + return fmt.Errorf("could not connect to horizon db: %v", err) + } + defer dbConn.Close() + + numMigrations, err := schema.Migrate(dbConn.DB.DB, schema.MigrateUp, 0) + if err != nil { + return fmt.Errorf("could not apply migrations: %v", err) + } + if numMigrations > 0 { + stdLog.Printf("successfully applied %v horizon migrations\n", numMigrations) + } + return nil +} + +// checkMigrations looks for necessary database migrations and fails with a descriptive error if migrations are needed. +func checkMigrations(config Config) error { + migrationsToApplyUp := schema.GetMigrationsUp(config.DatabaseURL) + if len(migrationsToApplyUp) > 0 { + return fmt.Errorf( + `There are %v migrations to apply in the "up" direction. +The necessary migrations are: %v +A database migration is required to run this version (%v) of Horizon. Run "horizon db migrate up" to update your DB. Consult the Changelog (https://github.com/stellar/go/blob/master/services/horizon/CHANGELOG.md) for more information.`, + len(migrationsToApplyUp), + migrationsToApplyUp, + apkg.Version(), + ) + } + + nMigrationsDown := schema.GetNumMigrationsDown(config.DatabaseURL) + if nMigrationsDown > 0 { + return fmt.Errorf( + `A database migration DOWN to an earlier version of the schema is required to run this version (%v) of Horizon. Consult the Changelog (https://github.com/stellar/go/blob/master/services/horizon/CHANGELOG.md) for more information. +In order to migrate the database DOWN, using the HIGHEST version number of Horizon you have installed (not this binary), run "horizon db migrate down %v".`, + apkg.Version(), + nMigrationsDown, + ) + } + return nil +} + +// Flags returns a Config instance and a list of commandline flags which modify the Config instance +func Flags() (*Config, support.ConfigOptions) { + config := &Config{} + + // flags defines the complete flag configuration for horizon. + // Add a new entry here to connect a new field in the horizon.Config struct + var flags = support.ConfigOptions{ + &support.ConfigOption{ + Name: DatabaseURLFlagName, + EnvVar: "DATABASE_URL", + ConfigKey: &config.DatabaseURL, + OptType: types.String, + Required: true, + Usage: "horizon postgres database to connect with", + }, + &support.ConfigOption{ + Name: "ro-database-url", + ConfigKey: &config.RoDatabaseURL, + OptType: types.String, + Required: false, + Usage: "horizon postgres read-replica to connect with, when set it will return stale history error when replica is behind primary", + }, + &support.ConfigOption{ + Name: StellarCoreBinaryPathName, + OptType: types.String, + FlagDefault: "", + Required: false, + Usage: "path to stellar core binary (--remote-captive-core-url has higher precedence). If captive core is enabled, look for the stellar-core binary in $PATH by default.", + ConfigKey: &config.CaptiveCoreBinaryPath, + }, + &support.ConfigOption{ + Name: "remote-captive-core-url", + OptType: types.String, + FlagDefault: "", + Required: false, + Usage: "url to access the remote captive core server", + ConfigKey: &config.RemoteCaptiveCoreURL, + }, + &support.ConfigOption{ + Name: captiveCoreConfigAppendPathName, + OptType: types.String, + FlagDefault: "", + Required: false, + Usage: "DEPRECATED in favor of " + CaptiveCoreConfigPathName, + CustomSetValue: func(opt *support.ConfigOption) error { + if val := viper.GetString(opt.Name); val != "" { + if viper.GetString(CaptiveCoreConfigPathName) != "" { + stdLog.Printf( + "both --%s and --%s are set. %s is deprecated so %s will be used instead", + captiveCoreConfigAppendPathName, + CaptiveCoreConfigPathName, + captiveCoreConfigAppendPathName, + CaptiveCoreConfigPathName, + ) + } else { + config.CaptiveCoreConfigPath = val + } + } + return nil + }, + }, + &support.ConfigOption{ + Name: CaptiveCoreConfigPathName, + OptType: types.String, + FlagDefault: "", + Required: false, + Usage: "path to the configuration file used by captive core. It must, at least, include enough details to define a quorum set. Any fields in the configuration file which are not supported by captive core will be rejected.", + CustomSetValue: func(opt *support.ConfigOption) error { + if val := viper.GetString(opt.Name); val != "" { + config.CaptiveCoreConfigPath = val + config.CaptiveCoreTomlParams.Strict = true + } + return nil + }, + }, + &support.ConfigOption{ + Name: "enable-captive-core-ingestion", + OptType: types.Bool, + FlagDefault: true, + Required: false, + Usage: "causes Horizon to ingest from a Captive Stellar Core process instead of a persistent Stellar Core database", + ConfigKey: &config.EnableCaptiveCoreIngestion, + }, + &support.ConfigOption{ + Name: "captive-core-http-port", + OptType: types.Uint, + CustomSetValue: support.SetOptionalUint, + Required: false, + FlagDefault: uint(0), + Usage: "HTTP port for Captive Core to listen on (0 disables the HTTP server)", + ConfigKey: &config.CaptiveCoreTomlParams.HTTPPort, + }, + &support.ConfigOption{ + Name: "captive-core-storage-path", + OptType: types.String, + CustomSetValue: func(opt *support.ConfigOption) error { + existingValue := viper.GetString(opt.Name) + if existingValue == "" || existingValue == "." { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("Unable to determine the current directory: %s", err) + } + existingValue = cwd + } + *opt.ConfigKey.(*string) = existingValue + return nil + }, + Required: false, + Usage: "Storage location for Captive Core bucket data", + ConfigKey: &config.CaptiveCoreStoragePath, + }, + &support.ConfigOption{ + Name: "captive-core-peer-port", + OptType: types.Uint, + FlagDefault: uint(0), + CustomSetValue: support.SetOptionalUint, + Required: false, + Usage: "port for Captive Core to bind to for connecting to the Stellar swarm (0 uses Stellar Core's default)", + ConfigKey: &config.CaptiveCoreTomlParams.PeerPort, + }, + &support.ConfigOption{ + Name: StellarCoreDBURLFlagName, + EnvVar: "STELLAR_CORE_DATABASE_URL", + ConfigKey: &config.StellarCoreDatabaseURL, + OptType: types.String, + Required: false, + Usage: "stellar-core postgres database to connect with", + }, + &support.ConfigOption{ + Name: StellarCoreURLFlagName, + ConfigKey: &config.StellarCoreURL, + OptType: types.String, + Usage: "stellar-core to connect with (for http commands). If unset and the local Captive core is enabled, it will use http://localhost:", + }, + &support.ConfigOption{ + Name: "history-archive-urls", + ConfigKey: &config.HistoryArchiveURLs, + OptType: types.String, + Required: false, + FlagDefault: "", + CustomSetValue: func(co *support.ConfigOption) error { + stringOfUrls := viper.GetString(co.Name) + urlStrings := strings.Split(stringOfUrls, ",") + *(co.ConfigKey.(*[]string)) = urlStrings + return nil + }, + Usage: "comma-separated list of stellar history archives to connect with", + }, + &support.ConfigOption{ + Name: "port", + ConfigKey: &config.Port, + OptType: types.Uint, + FlagDefault: uint(8000), + Usage: "tcp port to listen on for http requests", + }, + &support.ConfigOption{ + Name: "admin-port", + ConfigKey: &config.AdminPort, + OptType: types.Uint, + FlagDefault: uint(0), + Usage: "WARNING: this should not be accessible from the Internet and does not use TLS, tcp port to listen on for admin http requests, 0 (default) disables the admin server", + }, + &support.ConfigOption{ + Name: "max-db-connections", + ConfigKey: &config.MaxDBConnections, + OptType: types.Int, + FlagDefault: 0, + Usage: "when set has a priority over horizon-db-max-open-connections, horizon-db-max-idle-connections. max horizon database open connections may need to be increased when responses are slow but DB CPU is normal", + }, + &support.ConfigOption{ + Name: "horizon-db-max-open-connections", + ConfigKey: &config.HorizonDBMaxOpenConnections, + OptType: types.Int, + FlagDefault: 20, + Usage: "max horizon database open connections. may need to be increased when responses are slow but DB CPU is normal", + }, + &support.ConfigOption{ + Name: "horizon-db-max-idle-connections", + ConfigKey: &config.HorizonDBMaxIdleConnections, + OptType: types.Int, + FlagDefault: 20, + Usage: "max horizon database idle connections. may need to be set to the same value as horizon-db-max-open-connections when responses are slow and DB CPU is normal, because it may indicate that a lot of time is spent closing/opening idle connections. This can happen in case of high variance in number of requests. must be equal or lower than max open connections", + }, + &support.ConfigOption{ + Name: "sse-update-frequency", + ConfigKey: &config.SSEUpdateFrequency, + OptType: types.Int, + FlagDefault: 5, + CustomSetValue: support.SetDuration, + Usage: "defines how often streams should check if there's a new ledger (in seconds), may need to increase in case of big number of streams", + }, + &support.ConfigOption{ + Name: "connection-timeout", + ConfigKey: &config.ConnectionTimeout, + OptType: types.Int, + FlagDefault: 55, + CustomSetValue: support.SetDuration, + Usage: "defines the timeout of connection after which 504 response will be sent or stream will be closed, if Horizon is behind a load balancer with idle connection timeout, this should be set to a few seconds less that idle timeout, does not apply to POST /transactions", + }, + &support.ConfigOption{ + Name: "per-hour-rate-limit", + ConfigKey: &config.RateQuota, + OptType: types.Int, + FlagDefault: 3600, + CustomSetValue: func(co *support.ConfigOption) error { + var rateLimit *throttled.RateQuota = nil + perHourRateLimit := viper.GetInt(co.Name) + if perHourRateLimit != 0 { + rateLimit = &throttled.RateQuota{ + MaxRate: throttled.PerHour(perHourRateLimit), + MaxBurst: 100, + } + *(co.ConfigKey.(**throttled.RateQuota)) = rateLimit + } + return nil + }, + Usage: "max count of requests allowed in a one hour period, by remote ip address", + }, + &support.ConfigOption{ + Name: "friendbot-url", + ConfigKey: &config.FriendbotURL, + OptType: types.String, + CustomSetValue: support.SetURL, + Usage: "friendbot service to redirect to", + }, + &support.ConfigOption{ + Name: "log-level", + ConfigKey: &config.LogLevel, + OptType: types.String, + FlagDefault: "info", + CustomSetValue: func(co *support.ConfigOption) error { + ll, err := logrus.ParseLevel(viper.GetString(co.Name)) + if err != nil { + return fmt.Errorf("Could not parse log-level: %v", viper.GetString(co.Name)) + } + *(co.ConfigKey.(*logrus.Level)) = ll + return nil + }, + Usage: "minimum log severity (debug, info, warn, error) to log", + }, + &support.ConfigOption{ + Name: "log-file", + ConfigKey: &config.LogFile, + OptType: types.String, + Usage: "name of the file where logs will be saved (leave empty to send logs to stdout)", + }, + &support.ConfigOption{ + Name: "captive-core-log-path", + ConfigKey: &config.CaptiveCoreTomlParams.LogPath, + OptType: types.String, + CustomSetValue: support.SetOptionalString, + Required: false, + Usage: "name of the path for Core logs (leave empty to log w/ Horizon only)", + }, + &support.ConfigOption{ + Name: "max-path-length", + ConfigKey: &config.MaxPathLength, + OptType: types.Uint, + FlagDefault: uint(3), + Usage: "the maximum number of assets on the path in `/paths` endpoint, warning: increasing this value will increase /paths response time", + }, + &support.ConfigOption{ + Name: "max-assets-per-path-request", + ConfigKey: &config.MaxAssetsPerPathRequest, + OptType: types.Int, + FlagDefault: int(15), + Usage: "the maximum number of assets in '/paths/strict-send' and '/paths/strict-recieve' endpoints", + }, + &support.ConfigOption{ + Name: "disable-pool-path-finding", + ConfigKey: &config.DisablePoolPathFinding, + OptType: types.Bool, + FlagDefault: false, + Required: false, + Usage: "excludes liquidity pools from consideration in the `/paths` endpoint", + }, + &support.ConfigOption{ + Name: "network-passphrase", + ConfigKey: &config.NetworkPassphrase, + OptType: types.String, + Required: true, + Usage: "Override the network passphrase", + }, + &support.ConfigOption{ + Name: "sentry-dsn", + ConfigKey: &config.SentryDSN, + OptType: types.String, + Usage: "Sentry URL to which panics and errors should be reported", + }, + &support.ConfigOption{ + Name: "loggly-token", + ConfigKey: &config.LogglyToken, + OptType: types.String, + Usage: "Loggly token, used to configure log forwarding to loggly", + }, + &support.ConfigOption{ + Name: "loggly-tag", + ConfigKey: &config.LogglyTag, + OptType: types.String, + FlagDefault: "horizon", + Usage: "Tag to be added to every loggly log event", + }, + &support.ConfigOption{ + Name: "tls-cert", + ConfigKey: &config.TLSCert, + OptType: types.String, + Usage: "TLS certificate file to use for securing connections to horizon", + }, + &support.ConfigOption{ + Name: "tls-key", + ConfigKey: &config.TLSKey, + OptType: types.String, + Usage: "TLS private key file to use for securing connections to horizon", + }, + &support.ConfigOption{ + Name: "ingest", + ConfigKey: &config.Ingest, + OptType: types.Bool, + FlagDefault: true, + Usage: "causes this horizon process to ingest data from stellar-core into horizon's db", + }, + &support.ConfigOption{ + Name: "cursor-name", + EnvVar: "CURSOR_NAME", + ConfigKey: &config.CursorName, + OptType: types.String, + FlagDefault: "HORIZON", + Usage: "ingestor cursor used by horizon to ingest from stellar core. must be uppercase and unique for each horizon instance ingesting from that core instance.", + }, + &support.ConfigOption{ + Name: "history-retention-count", + ConfigKey: &config.HistoryRetentionCount, + OptType: types.Uint, + FlagDefault: uint(0), + Usage: "the minimum number of ledgers to maintain within horizon's history tables. 0 signifies an unlimited number of ledgers will be retained", + }, + &support.ConfigOption{ + Name: "history-stale-threshold", + ConfigKey: &config.StaleThreshold, + OptType: types.Uint, + FlagDefault: uint(0), + Usage: "the maximum number of ledgers the history db is allowed to be out of date from the connected stellar-core db before horizon considers history stale", + }, + &support.ConfigOption{ + Name: "skip-cursor-update", + ConfigKey: &config.SkipCursorUpdate, + OptType: types.Bool, + FlagDefault: false, + Usage: "causes the ingester to skip reporting the last imported ledger state to stellar-core", + }, + &support.ConfigOption{ + Name: "ingest-disable-state-verification", + ConfigKey: &config.IngestDisableStateVerification, + OptType: types.Bool, + FlagDefault: false, + Usage: "ingestion system runs a verification routing to compare state in local database with history buckets, this can be disabled however it's not recommended", + }, + &support.ConfigOption{ + Name: "ingest-enable-extended-log-ledger-stats", + ConfigKey: &config.IngestEnableExtendedLogLedgerStats, + OptType: types.Bool, + FlagDefault: false, + Usage: "enables extended ledger stats in the log (ledger entry changes and operations stats)", + }, + &support.ConfigOption{ + Name: "apply-migrations", + ConfigKey: &config.ApplyMigrations, + OptType: types.Bool, + FlagDefault: false, + Required: false, + Usage: "applies pending migrations before starting horizon", + }, + &support.ConfigOption{ + Name: "checkpoint-frequency", + ConfigKey: &config.CheckpointFrequency, + OptType: types.Uint32, + FlagDefault: uint32(64), + Required: false, + Usage: "establishes how many ledgers exist between checkpoints, do NOT change this unless you really know what you are doing", + }, + &support.ConfigOption{ + Name: "behind-cloudflare", + ConfigKey: &config.BehindCloudflare, + OptType: types.Bool, + FlagDefault: false, + Required: false, + Usage: "determines if Horizon instance is behind Cloudflare, in such case client IP in the logs will be replaced with Cloudflare header (cannot be used with --behind-aws-load-balancer)", + }, + &support.ConfigOption{ + Name: "behind-aws-load-balancer", + ConfigKey: &config.BehindAWSLoadBalancer, + OptType: types.Bool, + FlagDefault: false, + Required: false, + Usage: "determines if Horizon instance is behind AWS load balances like ELB or ALB, in such case client IP in the logs will be replaced with the last IP in X-Forwarded-For header (cannot be used with --behind-cloudflare)", + }, + } + + return config, flags +} + +// NewAppFromFlags constructs a new Horizon App from the given command line flags +func NewAppFromFlags(config *Config, flags support.ConfigOptions) (*App, error) { + err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreConfig: true, AlwaysIngest: false}) + if err != nil { + return nil, err + } + // Validate app-specific arguments + if config.StellarCoreURL == "" { + return nil, fmt.Errorf("flag --%s cannot be empty", StellarCoreURLFlagName) + } + if config.Ingest && !config.EnableCaptiveCoreIngestion && config.StellarCoreDatabaseURL == "" { + return nil, fmt.Errorf("flag --%s cannot be empty", StellarCoreDBURLFlagName) + } + + log.Infof("Initializing horizon...") + app, err := NewApp(*config) + if err != nil { + return nil, fmt.Errorf("cannot initialize app: %s", err) + } + return app, nil +} + +type ApplyOptions struct { + AlwaysIngest bool + RequireCaptiveCoreConfig bool +} + +// ApplyFlags applies the command line flags on the given Config instance +func ApplyFlags(config *Config, flags support.ConfigOptions, options ApplyOptions) error { + // Verify required options and load the config struct + if err := flags.RequireE(); err != nil { + return err + } + if err := flags.SetValues(); err != nil { + return err + } + + // Validate options that should be provided together + if err := validateBothOrNeither("tls-cert", "tls-key"); err != nil { + return err + } + + if options.AlwaysIngest { + config.Ingest = true + } + + if config.Ingest { + // Migrations should be checked as early as possible. Apply and check + // only on ingesting instances which are required to have write-access + // to the DB. + if config.ApplyMigrations { + stdLog.Println("Applying DB migrations...") + if err := applyMigrations(*config); err != nil { + return err + } + } + stdLog.Println("Checking DB migrations...") + if err := checkMigrations(*config); err != nil { + return err + } + + // config.HistoryArchiveURLs contains a single empty value when empty so using + // viper.GetString is easier. + if len(config.HistoryArchiveURLs) == 0 { + return fmt.Errorf("--history-archive-urls must be set when --ingest is set") + } + + if config.EnableCaptiveCoreIngestion { + stdLog.Println("Preparing captive core...") + + binaryPath := viper.GetString(StellarCoreBinaryPathName) + + // If the user didn't specify a Stellar Core binary, we can check the + // $PATH and possibly fill it in for them. + if binaryPath == "" { + if result, err := exec.LookPath("stellar-core"); err == nil { + binaryPath = result + viper.Set(StellarCoreBinaryPathName, binaryPath) + config.CaptiveCoreBinaryPath = binaryPath + } + } + + // NOTE: If both of these are set (regardless of user- or PATH-supplied + // defaults for the binary path), the Remote Captive Core URL + // takes precedence. + if binaryPath == "" && config.RemoteCaptiveCoreURL == "" { + return fmt.Errorf("Invalid config: captive core requires that either --%s or --remote-captive-core-url is set. %s", + StellarCoreBinaryPathName, captiveCoreMigrationHint) + } + + if config.RemoteCaptiveCoreURL == "" && (binaryPath == "" || config.CaptiveCoreConfigPath == "") { + if options.RequireCaptiveCoreConfig { + var err error + errorMessage := fmt.Errorf( + "Invalid config: captive core requires that both --%s and --%s are set. %s", + StellarCoreBinaryPathName, CaptiveCoreConfigPathName, captiveCoreMigrationHint, + ) + + var configFileName string + // Default config files will be located along the binary in the release archive. + switch config.NetworkPassphrase { + case network.TestNetworkPassphrase: + configFileName = "captive-core-testnet.cfg" + config.HistoryArchiveURLs = []string{"https://history.stellar.org/prd/core-testnet/core_testnet_001/"} + case network.PublicNetworkPassphrase: + configFileName = "captive-core-pubnet.cfg" + config.HistoryArchiveURLs = []string{"https://history.stellar.org/prd/core-live/core_live_001/"} + config.UsingDefaultPubnetConfig = true + default: + return errorMessage + } + + executablePath, err := os.Executable() + if err != nil { + return errorMessage + } + + config.CaptiveCoreConfigPath = filepath.Join(filepath.Dir(executablePath), configFileName) + if _, err = os.Stat(config.CaptiveCoreConfigPath); os.IsNotExist(err) { + return errorMessage + } + + config.CaptiveCoreTomlParams.NetworkPassphrase = config.NetworkPassphrase + config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreTomlFromFile(config.CaptiveCoreConfigPath, config.CaptiveCoreTomlParams) + if err != nil { + return fmt.Errorf("Invalid captive core toml file %v", err) + } + } else { + var err error + config.CaptiveCoreTomlParams.HistoryArchiveURLs = config.HistoryArchiveURLs + config.CaptiveCoreTomlParams.NetworkPassphrase = config.NetworkPassphrase + config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreToml(config.CaptiveCoreTomlParams) + if err != nil { + return fmt.Errorf("Invalid captive core toml file %v", err) + } + } + } else if config.RemoteCaptiveCoreURL == "" { + var err error + config.CaptiveCoreTomlParams.HistoryArchiveURLs = config.HistoryArchiveURLs + config.CaptiveCoreTomlParams.NetworkPassphrase = config.NetworkPassphrase + config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreTomlFromFile(config.CaptiveCoreConfigPath, config.CaptiveCoreTomlParams) + if err != nil { + return fmt.Errorf("Invalid captive core toml file %v", err) + } + } + + // If we don't supply an explicit core URL and we are running a local + // captive core process with the http port enabled, point to it. + if config.StellarCoreURL == "" && config.RemoteCaptiveCoreURL == "" && config.CaptiveCoreToml.HTTPPort != 0 { + config.StellarCoreURL = fmt.Sprintf("http://localhost:%d", config.CaptiveCoreToml.HTTPPort) + viper.Set(StellarCoreURLFlagName, config.StellarCoreURL) + } + } + } else { + if config.EnableCaptiveCoreIngestion && (config.CaptiveCoreBinaryPath != "" || config.CaptiveCoreConfigPath != "") { + captiveCoreConfigFlag := captiveCoreConfigAppendPathName + if viper.GetString(CaptiveCoreConfigPathName) != "" { + captiveCoreConfigFlag = CaptiveCoreConfigPathName + } + return fmt.Errorf("Invalid config: one or more captive core params passed (--%s or --%s) but --ingest not set. "+captiveCoreMigrationHint, + StellarCoreBinaryPathName, captiveCoreConfigFlag) + } + if config.StellarCoreDatabaseURL != "" { + return fmt.Errorf("Invalid config: --%s passed but --ingest not set. ", StellarCoreDBURLFlagName) + } + } + + // Configure log file + if config.LogFile != "" { + logFile, err := os.OpenFile(config.LogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + log.DefaultLogger.SetOutput(logFile) + } else { + return fmt.Errorf("Failed to open file to log: %s", err) + } + } + + // Configure log level + log.DefaultLogger.SetLevel(config.LogLevel) + + // Configure DB params. When config.MaxDBConnections is set, set other + // DB params to that value for backward compatibility. + if config.MaxDBConnections != 0 { + config.HorizonDBMaxOpenConnections = config.MaxDBConnections + config.HorizonDBMaxIdleConnections = config.MaxDBConnections + } + + if config.BehindCloudflare && config.BehindAWSLoadBalancer { + return fmt.Errorf("Invalid config: Only one option of --behind-cloudflare and --behind-aws-load-balancer is allowed. If Horizon is behind both, use --behind-cloudflare only.") + } + + return nil +} diff --git a/services/horizon/internal/hchi/context.go b/services/horizon/internal/hchi/context.go new file mode 100644 index 0000000000..566dbb4f52 --- /dev/null +++ b/services/horizon/internal/hchi/context.go @@ -0,0 +1,40 @@ +// Package hchi provides functions to support embedded and retrieving +// a request id from a go context tree +package hchi + +import ( + "context" + + "github.com/go-chi/chi/middleware" +) + +// key is an unexported type for keys defined in this package. +// This prevents collisions with keys defined in other packages. +type key int + +const ( + reqidKey key = iota +) + +// WithRequestID sets the reqid in a new context and returns that context. +func WithRequestID(ctx context.Context, reqid string) context.Context { + return context.WithValue(ctx, reqidKey, reqid) +} + +// WithChiRequestID gets the request id from the chi middleware, sets in a new +// context and returns the context. +func WithChiRequestID(ctx context.Context) context.Context { + reqid := middleware.GetReqID(ctx) + return WithRequestID(ctx, reqid) +} + +// RequestID returns the request id carried in the context, if any. It returns +// "" if no request id has been set or the context is nil. +func RequestID(ctx context.Context) string { + if ctx == nil { + return "" + } + + val, _ := ctx.Value(reqidKey).(string) + return val +} diff --git a/services/horizon/internal/hchi/context_test.go b/services/horizon/internal/hchi/context_test.go new file mode 100644 index 0000000000..fa6f2b07ef --- /dev/null +++ b/services/horizon/internal/hchi/context_test.go @@ -0,0 +1,32 @@ +package hchi + +import ( + "context" + "testing" + + "github.com/go-chi/chi/middleware" + "github.com/stretchr/testify/assert" +) + +func TestRequestContext(t *testing.T) { + ctx := WithRequestID(context.Background(), "2") + assert.Equal(t, "2", ctx.Value(reqidKey)) + + ctx2 := WithRequestID(ctx, "3") + assert.Equal(t, "3", ctx2.Value(reqidKey)) + assert.Equal(t, "2", ctx.Value(reqidKey)) +} + +func TestRequestContextFromCHI(t *testing.T) { + ctx := context.WithValue(context.Background(), middleware.RequestIDKey, "foobar") + ctx2 := WithChiRequestID(ctx) + assert.Equal(t, "foobar", RequestID(ctx2)) +} + +func TestRequestFromContext(t *testing.T) { + ctx := WithRequestID(context.Background(), "2") + ctx2 := WithRequestID(ctx, "3") + assert.Equal(t, "", RequestID(context.Background())) + assert.Equal(t, "2", RequestID(ctx)) + assert.Equal(t, "3", RequestID(ctx2)) +} diff --git a/services/horizon/internal/health.go b/services/horizon/internal/health.go new file mode 100644 index 0000000000..c41e4c8007 --- /dev/null +++ b/services/horizon/internal/health.go @@ -0,0 +1,96 @@ +package horizon + +import ( + "context" + "encoding/json" + "net/http" + "sync" + "time" + + "github.com/stellar/go/protocols/stellarcore" + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" +) + +const ( + dbPingTimeout = 5 * time.Second + infoRequestTimeout = 5 * time.Second + healthCacheTTL = 500 * time.Millisecond +) + +var healthLogger = log.WithField("service", "healthCheck") + +type stellarCoreClient interface { + Info(ctx context.Context) (*stellarcore.InfoResponse, error) +} + +type healthCache struct { + response healthResponse + lastUpdate time.Time + ttl time.Duration + clock clock.Clock + lock sync.Mutex +} + +func (h *healthCache) get(runCheck func() healthResponse) healthResponse { + h.lock.Lock() + defer h.lock.Unlock() + + if h.clock.Now().Sub(h.lastUpdate) > h.ttl { + h.response = runCheck() + h.lastUpdate = h.clock.Now() + } + + return h.response +} + +func newHealthCache(ttl time.Duration) *healthCache { + return &healthCache{ttl: ttl} +} + +type healthCheck struct { + session db.SessionInterface + ctx context.Context + core stellarCoreClient + cache *healthCache +} + +type healthResponse struct { + DatabaseConnected bool `json:"database_connected"` + CoreUp bool `json:"core_up"` + CoreSynced bool `json:"core_synced"` +} + +func (h healthCheck) runCheck() healthResponse { + response := healthResponse{ + DatabaseConnected: true, + CoreUp: true, + CoreSynced: true, + } + if err := h.session.Ping(h.ctx, dbPingTimeout); err != nil { + healthLogger.Warnf("could not ping db: %s", err) + response.DatabaseConnected = false + } + if resp, err := h.core.Info(h.ctx); err != nil { + healthLogger.Warnf("request to stellar core failed: %s", err) + response.CoreUp = false + response.CoreSynced = false + } else { + response.CoreSynced = resp.IsSynced() + } + + return response +} + +func (h healthCheck) ServeHTTP(w http.ResponseWriter, r *http.Request) { + response := h.cache.get(h.runCheck) + + if !response.DatabaseConnected || !response.CoreSynced || !response.CoreUp { + w.WriteHeader(http.StatusServiceUnavailable) + } + + if err := json.NewEncoder(w).Encode(response); err != nil { + healthLogger.Warnf("could not write response: %s", err) + } +} diff --git a/services/horizon/internal/health_test.go b/services/horizon/internal/health_test.go new file mode 100644 index 0000000000..c235f6c8ce --- /dev/null +++ b/services/horizon/internal/health_test.go @@ -0,0 +1,213 @@ +package horizon + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/stellar/go/protocols/stellarcore" + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/clock/clocktest" + "github.com/stellar/go/support/db" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +var _ stellarCoreClient = (*mockStellarCore)(nil) + +type mockStellarCore struct { + mock.Mock +} + +func (m *mockStellarCore) Info(ctx context.Context) (*stellarcore.InfoResponse, error) { + args := m.Called(ctx) + return args.Get(0).(*stellarcore.InfoResponse), args.Error(1) +} + +func TestHealthCheck(t *testing.T) { + synced := &stellarcore.InfoResponse{} + synced.Info.State = "Synced!" + notSynced := &stellarcore.InfoResponse{} + notSynced.Info.State = "Catching up" + + for _, tc := range []struct { + name string + pingErr error + coreErr error + coreResponse *stellarcore.InfoResponse + expectedStatus int + expectedResponse healthResponse + }{ + { + "healthy", + nil, + nil, + synced, + http.StatusOK, + healthResponse{ + DatabaseConnected: true, + CoreUp: true, + CoreSynced: true, + }, + }, + { + "db down", + fmt.Errorf("database is down"), + nil, + synced, + http.StatusServiceUnavailable, + healthResponse{ + DatabaseConnected: false, + CoreUp: true, + CoreSynced: true, + }, + }, + { + "stellar core not synced", + nil, + nil, + notSynced, + http.StatusServiceUnavailable, + healthResponse{ + DatabaseConnected: true, + CoreUp: true, + CoreSynced: false, + }, + }, + { + "stellar core down", + nil, + fmt.Errorf("stellar core is down"), + nil, + http.StatusServiceUnavailable, + healthResponse{ + DatabaseConnected: true, + CoreUp: false, + CoreSynced: false, + }, + }, + { + "stellar core and db down", + fmt.Errorf("database is down"), + fmt.Errorf("stellar core is down"), + nil, + http.StatusServiceUnavailable, + healthResponse{ + DatabaseConnected: false, + CoreUp: false, + CoreSynced: false, + }, + }, + { + "stellar core not synced and db down", + fmt.Errorf("database is down"), + nil, + notSynced, + http.StatusServiceUnavailable, + healthResponse{ + DatabaseConnected: false, + CoreUp: true, + CoreSynced: false, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + session := &db.MockSession{} + session.On("Ping", ctx, dbPingTimeout).Return(tc.pingErr).Once() + core := &mockStellarCore{} + core.On("Info", ctx).Return(tc.coreResponse, tc.coreErr).Once() + + h := healthCheck{ + session: session, + ctx: ctx, + core: core, + cache: newHealthCache(healthCacheTTL), + } + + w := httptest.NewRecorder() + h.ServeHTTP(w, nil) + assert.Equal(t, tc.expectedStatus, w.Code) + + var response healthResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, tc.expectedResponse, response) + + session.AssertExpectations(t) + core.AssertExpectations(t) + }) + } +} + +func TestHealthCheckCache(t *testing.T) { + cachedResponse := healthResponse{ + DatabaseConnected: false, + CoreUp: true, + CoreSynced: false, + } + h := healthCheck{ + session: nil, + ctx: context.Background(), + core: nil, + cache: &healthCache{ + response: cachedResponse, + lastUpdate: time.Unix(0, 0), + ttl: 5 * time.Second, + lock: sync.Mutex{}, + }, + } + + for _, timestamp := range []time.Time{time.Unix(1, 0), time.Unix(4, 0)} { + h.cache.clock = clock.Clock{ + Source: clocktest.FixedSource(timestamp), + } + w := httptest.NewRecorder() + h.ServeHTTP(w, nil) + assert.Equal(t, http.StatusServiceUnavailable, w.Code) + + var response healthResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, cachedResponse, response) + assert.Equal(t, cachedResponse, h.cache.response) + assert.True(t, h.cache.lastUpdate.Equal(time.Unix(0, 0))) + } + + ctx := context.Background() + session := &db.MockSession{} + session.On("Ping", ctx, dbPingTimeout).Return(nil).Once() + core := &mockStellarCore{} + core.On("Info", h.ctx).Return(&stellarcore.InfoResponse{}, fmt.Errorf("core err")).Once() + h.session = session + h.core = core + updatedResponse := healthResponse{ + DatabaseConnected: true, + CoreUp: false, + CoreSynced: false, + } + for _, timestamp := range []time.Time{time.Unix(6, 0), time.Unix(7, 0)} { + h.cache.clock = clock.Clock{ + Source: clocktest.FixedSource(timestamp), + } + w := httptest.NewRecorder() + h.ServeHTTP(w, nil) + assert.Equal(t, http.StatusServiceUnavailable, w.Code) + + var response healthResponse + err := json.Unmarshal(w.Body.Bytes(), &response) + assert.NoError(t, err) + assert.Equal(t, updatedResponse, response) + assert.Equal(t, updatedResponse, h.cache.response) + assert.True(t, h.cache.lastUpdate.Equal(time.Unix(6, 0))) + } + + session.AssertExpectations(t) + core.AssertExpectations(t) +} diff --git a/services/horizon/internal/helpers_test.go b/services/horizon/internal/helpers_test.go new file mode 100644 index 0000000000..023b49017b --- /dev/null +++ b/services/horizon/internal/helpers_test.go @@ -0,0 +1,38 @@ +package horizon + +import ( + "log" + "time" + + "github.com/stellar/throttled" + + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/test" + supportLog "github.com/stellar/go/support/log" +) + +func NewTestApp() *App { + app, err := NewApp(NewTestConfig()) + if err != nil { + log.Fatal("cannot create app", err) + } + return app +} + +func NewTestConfig() Config { + return Config{ + DatabaseURL: test.DatabaseURL(), + StellarCoreDatabaseURL: test.StellarCoreDatabaseURL(), + RateQuota: &throttled.RateQuota{ + MaxRate: throttled.PerHour(1000), + MaxBurst: 100, + }, + ConnectionTimeout: 55 * time.Second, // Default + LogLevel: supportLog.InfoLevel, + NetworkPassphrase: network.TestNetworkPassphrase, + } +} + +func NewRequestHelper(app *App) test.RequestHelper { + return test.NewRequestHelper(app.webServer.Router.Mux) +} diff --git a/services/horizon/internal/httpt_test.go b/services/horizon/internal/httpt_test.go new file mode 100644 index 0000000000..a0ff96f521 --- /dev/null +++ b/services/horizon/internal/httpt_test.go @@ -0,0 +1,109 @@ +package horizon + +import ( + "context" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/stellar/go/services/horizon/internal/test" +) + +type HTTPT struct { + Assert *test.Assertions + App *App + RH test.RequestHelper + coreServer *test.StaticMockServer + *test.T +} + +func startHTTPTest(t *testing.T, scenario string) *HTTPT { + ret := &HTTPT{T: test.Start(t)} + if scenario == "" { + test.ResetHorizonDB(t, ret.HorizonDB) + } else { + ret.Scenario(scenario) + } + ret.App = NewTestApp() + ret.RH = test.NewRequestHelper(ret.App.webServer.Router.Mux) + ret.Assert = &test.Assertions{ret.T.Assert} + + ret.coreServer = test.NewStaticMockServer(`{ + "info": { + "network": "test", + "build": "test-core", + "ledger": { + "version": 18, + "num": 64 + }, + "protocol_version": 18, + "network": "Test SDF Network ; September 2015" + } + }`) + + ret.App.config.StellarCoreURL = ret.coreServer.URL + ret.App.UpdateCoreLedgerState(context.Background()) + ret.App.UpdateStellarCoreInfo(context.Background()) + ret.App.UpdateHorizonLedgerState(context.Background()) + + return ret +} + +// StartHTTPTest is a helper function to setup a new test that will make http +// requests. Pair it with a deferred call to FinishHTTPTest. +func StartHTTPTest(t *testing.T, scenario string) *HTTPT { + if scenario == "" { + t.Fatal("scenario cannot be empty string") + } + return startHTTPTest(t, scenario) +} + +// StartHTTPTestWithoutScenario is like StartHTTPTest except it does not use +// a sql scenario +func StartHTTPTestWithoutScenario(t *testing.T) *HTTPT { + return startHTTPTest(t, "") +} + +// Get delegates to the test's request helper +func (ht *HTTPT) Get( + path string, + fn ...func(*http.Request), +) *httptest.ResponseRecorder { + return ht.RH.Get(path, fn...) +} + +// GetWithParams delegates to the test's request helper and encodes along with the query params +func (ht *HTTPT) GetWithParams( + path string, + queryParams url.Values, + fn ...func(*http.Request), +) *httptest.ResponseRecorder { + return ht.RH.Get(path+"?"+queryParams.Encode(), fn...) +} + +// Finish closes the test app and finishes the test +func (ht *HTTPT) Finish() { + ht.T.Finish() + ht.App.Close() + ht.coreServer.Close() +} + +// Post delegates to the test's request helper +func (ht *HTTPT) Post( + path string, + form url.Values, + mods ...func(*http.Request), +) *httptest.ResponseRecorder { + return ht.RH.Post(path, form, mods...) +} + +// ReapHistory causes the test server to run `DeleteUnretainedHistory`, after +// setting the retention count to the provided number. +func (ht *HTTPT) ReapHistory(retention uint) { + ht.App.reaper.RetentionCount = retention + err := ht.App.DeleteUnretainedHistory(context.Background()) + ht.Require.NoError(err) + ht.App.UpdateCoreLedgerState(context.Background()) + ht.App.UpdateHorizonLedgerState(context.Background()) +} diff --git a/services/horizon/internal/httpx/handler.go b/services/horizon/internal/httpx/handler.go new file mode 100644 index 0000000000..39fd3c15f5 --- /dev/null +++ b/services/horizon/internal/httpx/handler.go @@ -0,0 +1,330 @@ +package httpx + +import ( + "database/sql" + "io" + "net/http" + + "github.com/stellar/go/services/horizon/internal/actions" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/render/sse" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/support/render/problem" +) + +type objectAction interface { + GetResource( + w actions.HeaderWriter, + r *http.Request, + ) (interface{}, error) +} + +type ObjectActionHandler struct { + Action objectAction +} + +func (handler ObjectActionHandler) ServeHTTP( + w http.ResponseWriter, + r *http.Request, +) { + switch render.Negotiate(r) { + case render.MimeHal, render.MimeJSON: + response, err := handler.Action.GetResource(w, r) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + httpjson.Render( + w, + response, + httpjson.HALJSON, + ) + return + } + + problem.Render(r.Context(), w, hProblem.NotAcceptable) +} + +const defaultObjectStreamLimit = 10 + +type streamableObjectAction interface { + GetResource( + w actions.HeaderWriter, + r *http.Request, + ) (actions.StreamableObjectResponse, error) +} + +type streamableObjectActionHandler struct { + action streamableObjectAction + streamHandler sse.StreamHandler + limit int +} + +func (handler streamableObjectActionHandler) ServeHTTP( + w http.ResponseWriter, + r *http.Request, +) { + switch render.Negotiate(r) { + case render.MimeHal, render.MimeJSON: + response, err := handler.action.GetResource(w, r) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + httpjson.Render( + w, + response, + httpjson.HALJSON, + ) + return + case render.MimeEventStream: + handler.renderStream(w, r) + return + } + + problem.Render(r.Context(), w, hProblem.NotAcceptable) +} + +func repeatableReadStream( + r *http.Request, + generateEvents sse.GenerateEventsFunc, +) sse.GenerateEventsFunc { + var session db.SessionInterface + if val := r.Context().Value(&horizonContext.SessionContextKey); val != nil { + session = val.(db.SessionInterface) + } + + return func() ([]sse.Event, error) { + if session != nil { + err := session.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + if err != nil { + return nil, errors.Wrap(err, "Error starting repeatable read transaction") + } + defer session.Rollback() + } + + return generateEvents() + } +} + +func (handler streamableObjectActionHandler) renderStream( + w http.ResponseWriter, + r *http.Request, +) { + var lastResponse actions.StreamableObjectResponse + limit := handler.limit + if limit == 0 { + limit = defaultObjectStreamLimit + } + + handler.streamHandler.ServeStream( + w, + r, + limit, + repeatableReadStream(r, func() ([]sse.Event, error) { + response, err := handler.action.GetResource(w, r) + if err != nil { + return nil, err + } + + if lastResponse == nil || !lastResponse.Equals(response) { + lastResponse = response + return []sse.Event{{Data: response}}, nil + } + return []sse.Event{}, nil + }), + ) +} + +type pageAction interface { + GetResourcePage(w actions.HeaderWriter, r *http.Request) ([]hal.Pageable, error) +} + +type pageActionHandler struct { + action pageAction + streamable bool + streamHandler sse.StreamHandler + repeatableRead bool + ledgerState *ledger.State +} + +func restPageHandler(ledgerState *ledger.State, action pageAction) pageActionHandler { + return pageActionHandler{action: action, ledgerState: ledgerState} +} + +// streamableStatePageHandler creates a streamable page handler than generates +// events within a REPEATABLE READ transaction. +func streamableStatePageHandler( + ledgerState *ledger.State, + action pageAction, + streamHandler sse.StreamHandler, +) pageActionHandler { + return pageActionHandler{ + action: action, + ledgerState: ledgerState, + streamable: true, + streamHandler: streamHandler, + repeatableRead: true, + } +} + +// streamableStatePageHandler creates a streamable page handler than generates +// events without starting a REPEATABLE READ transaction. +func streamableHistoryPageHandler( + ledgerState *ledger.State, + action pageAction, + streamHandler sse.StreamHandler, +) pageActionHandler { + return pageActionHandler{ + action: action, + ledgerState: ledgerState, + streamable: true, + streamHandler: streamHandler, + repeatableRead: false, + } +} + +func (handler pageActionHandler) renderPage(w http.ResponseWriter, r *http.Request) { + records, err := handler.action.GetResourcePage(w, r) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + page, err := buildPage(handler.ledgerState, r, records) + + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + httpjson.Render( + w, + page, + httpjson.HALJSON, + ) +} + +func (handler pageActionHandler) renderStream(w http.ResponseWriter, r *http.Request) { + // Use pq to Get SSE limit. + pq, err := actions.GetPageQuery(handler.ledgerState, r) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + var generateEvents sse.GenerateEventsFunc = func() ([]sse.Event, error) { + records, err := handler.action.GetResourcePage(w, r) + if err != nil { + return nil, err + } + + events := make([]sse.Event, 0, len(records)) + for _, record := range records { + events = append(events, sse.Event{ID: record.PagingToken(), Data: record}) + } + + if len(events) > 0 { + // Update the cursor for the next call to GetObject, getCursor + // will use Last-Event-ID if present. This feels kind of hacky, + // but otherwise, we'll have to edit r.URL, which is also a + // hack. + r.Header.Set("Last-Event-ID", events[len(events)-1].ID) + } else if len(r.Header.Get("Last-Event-ID")) == 0 { + // If there are no records and Last-Event-ID has not been set, + // use the cursor from pq as the Last-Event-ID, otherwise, we'll + // keep using `now` which will always resolve to the next + // ledger. + r.Header.Set("Last-Event-ID", pq.Cursor) + } + + return events, nil + } + + if handler.repeatableRead { + generateEvents = repeatableReadStream(r, generateEvents) + } + + handler.streamHandler.ServeStream( + w, + r, + int(pq.Limit), + generateEvents, + ) +} + +func (handler pageActionHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + switch render.Negotiate(r) { + case render.MimeHal, render.MimeJSON: + handler.renderPage(w, r) + return + case render.MimeEventStream: + if handler.streamable { + handler.renderStream(w, r) + return + } + } + + problem.Render(r.Context(), w, hProblem.NotAcceptable) +} + +func buildPage(ledgerState *ledger.State, r *http.Request, records []hal.Pageable) (hal.Page, error) { + // Always DisableCursorValidation - we can assume it's valid since the + // validation is done in GetResourcePage. + pageQuery, err := actions.GetPageQuery(ledgerState, r, actions.DisableCursorValidation) + if err != nil { + return hal.Page{}, err + } + + ctx := r.Context() + + page := hal.Page{ + Cursor: pageQuery.Cursor, + Order: pageQuery.Order, + Limit: pageQuery.Limit, + } + page.Init() + + for _, record := range records { + page.Add(record) + } + + page.FullURL = actions.FullURL(ctx) + page.PopulateLinks() + + return page, nil +} + +type rawAction interface { + WriteRawResponse(w io.Writer, r *http.Request) error +} + +func HandleRaw(action rawAction) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if err := action.WriteRawResponse(w, r); err != nil { + problem.Render(r.Context(), w, err) + } + } +} + +func WrapRaw(next http.Handler, action rawAction) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch render.Negotiate(r) { + case render.MimeRaw: + HandleRaw(action).ServeHTTP(w, r) + default: + next.ServeHTTP(w, r) + } + }) +} diff --git a/services/horizon/internal/httpx/middleware.go b/services/horizon/internal/httpx/middleware.go new file mode 100644 index 0000000000..5c93295472 --- /dev/null +++ b/services/horizon/internal/httpx/middleware.go @@ -0,0 +1,435 @@ +package httpx + +import ( + "context" + "database/sql" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/services/horizon/internal/actions" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/errors" + "github.com/stellar/go/services/horizon/internal/hchi" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/support/db" + supportErrors "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" +) + +// requestCacheHeadersMiddleware adds caching headers to each response. +func requestCacheHeadersMiddleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Before changing this read Stack Overflow answer about staled request + // in older versions of Chrome: + // https://stackoverflow.com/questions/27513994/chrome-stalls-when-making-multiple-requests-to-same-resource + w.Header().Set("Cache-Control", "no-cache, no-store, max-age=0") + h.ServeHTTP(w, r) + }) +} + +func contextMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ctx = hchi.WithChiRequestID(ctx) + ctx = horizonContext.RequestContext(ctx, w, r) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +const ( + clientNameHeader = "X-Client-Name" + clientVersionHeader = "X-Client-Version" + appNameHeader = "X-App-Name" + appVersionHeader = "X-App-Version" +) + +func newWrapResponseWriter(w http.ResponseWriter, r *http.Request) middleware.WrapResponseWriter { + mw, ok := w.(middleware.WrapResponseWriter) + if !ok { + mw = middleware.NewWrapResponseWriter(w, r.ProtoMajor) + } + + return mw +} + +// loggerMiddleware logs http requests and resposnes to the logging subsytem of horizon. +func loggerMiddleware(serverMetrics *ServerMetrics) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + mw := newWrapResponseWriter(w, r) + + logger := log.WithField("req", middleware.GetReqID(ctx)) + ctx = log.Set(ctx, logger) + + // Checking `Accept` header from user request because if the streaming connection + // is reset before sending the first event no Content-Type header is sent in a response. + acceptHeader := r.Header.Get("Accept") + streaming := strings.Contains(acceptHeader, render.MimeEventStream) + + then := time.Now() + next.ServeHTTP(mw, r.WithContext(ctx)) + duration := time.Since(then) + logEndOfRequest(ctx, r, serverMetrics.RequestDurationSummary, duration, mw, streaming) + }) + } +} + +// timeoutMiddleware ensures the request is terminated after the given timeout +func timeoutMiddleware(timeout time.Duration) func(next http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + mw := newWrapResponseWriter(w, r) + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer func() { + cancel() + if ctx.Err() == context.DeadlineExceeded { + if mw.Status() == 0 { + // only write the header if it hasn't been written yet + mw.WriteHeader(http.StatusGatewayTimeout) + } + } + }() + + // txsub has a custom timeout + if r.Method != http.MethodPost { + r = r.WithContext(ctx) + } + next.ServeHTTP(mw, r) + } + return http.HandlerFunc(fn) + } +} + +// getClientData gets client data (name or version) from header or GET parameter +// (useful when not possible to set headers, like in EventStream). +func getClientData(r *http.Request, headerName string) string { + value := r.Header.Get(headerName) + if value != "" { + return value + } + + value = r.URL.Query().Get(headerName) + if value == "" { + value = "undefined" + } + + return value +} + +var routeRegexp = regexp.MustCompile("{([^:}]*):[^}]*}") + +// https://prometheus.io/docs/instrumenting/exposition_formats/ +// label_value can be any sequence of UTF-8 characters, but the backslash (\), +// double-quote ("), and line feed (\n) characters have to be escaped as \\, +// \", and \n, respectively. +func sanitizeMetricRoute(routePattern string) string { + route := routeRegexp.ReplaceAllString(routePattern, "{$1}") + route = strings.ReplaceAll(route, "\\", "\\\\") + route = strings.ReplaceAll(route, "\"", "\\\"") + route = strings.ReplaceAll(route, "\n", "\\n") + if route == "" { + // Can be empty when request did not reach the final route (ex. blocked by + // a middleware). More info: https://github.com/go-chi/chi/issues/270 + return "undefined" + } + return route +} + +// Author: https://github.com/rliebz +// From: https://github.com/go-chi/chi/issues/270#issuecomment-479184559 +// https://github.com/go-chi/chi/blob/master/LICENSE +func getRoutePattern(r *http.Request) string { + rctx := chi.RouteContext(r.Context()) + if pattern := rctx.RoutePattern(); pattern != "" { + // Pattern is already available + return pattern + } + + routePath := r.URL.Path + if r.URL.RawPath != "" { + routePath = r.URL.RawPath + } + + tctx := chi.NewRouteContext() + if !rctx.Routes.Match(tctx, r.Method, routePath) { + return "" + } + + // tctx has the updated pattern, since Match mutates it + return tctx.RoutePattern() +} + +func logEndOfRequest(ctx context.Context, r *http.Request, requestDurationSummary *prometheus.SummaryVec, duration time.Duration, mw middleware.WrapResponseWriter, streaming bool) { + route := sanitizeMetricRoute(getRoutePattern(r)) + + referer := r.Referer() + if referer == "" { + referer = r.Header.Get("Origin") + } + if referer == "" { + referer = "undefined" + } + + log.Ctx(ctx).WithFields(log.F{ + "bytes": mw.BytesWritten(), + "client_name": getClientData(r, clientNameHeader), + "client_version": getClientData(r, clientVersionHeader), + "app_name": getClientData(r, appNameHeader), + "app_version": getClientData(r, appVersionHeader), + "duration": duration.Seconds(), + "x_forwarder_for": r.Header.Get("X-Forwarded-For"), + "host": r.Host, + "ip": remoteAddrIP(r), + "ip_port": r.RemoteAddr, + "method": r.Method, + "path": r.URL.String(), + "route": route, + "status": mw.Status(), + "streaming": streaming, + "referer": referer, + }).Info("Finished request") + + requestDurationSummary.With(prometheus.Labels{ + "status": strconv.FormatInt(int64(mw.Status()), 10), + "route": route, + "streaming": strconv.FormatBool(streaming), + "method": r.Method, + }).Observe(float64(duration.Seconds())) +} + +// recoverMiddleware helps the server recover from panics. It ensures that +// no request can fully bring down the horizon server, and it also logs the +// panics to the logging subsystem. +func recoverMiddleware(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + defer func() { + if rec := recover(); rec != nil { + err := errors.FromPanic(rec) + errors.ReportToSentry(err, r) + problem.Render(ctx, w, err) + } + }() + + h.ServeHTTP(w, r) + }) +} + +// NewHistoryMiddleware adds session to the request context and ensures Horizon +// is not in a stale state, which is when the difference between latest core +// ledger and latest history ledger is higher than the given threshold +func NewHistoryMiddleware(ledgerState *ledger.State, staleThreshold int32, session db.SessionInterface) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + chiRoute := chi.RouteContext(ctx) + if chiRoute != nil { + ctx = context.WithValue(ctx, &db.RouteContextKey, sanitizeMetricRoute(chiRoute.RoutePattern())) + } + if staleThreshold > 0 { + ls := ledgerState.CurrentStatus() + isStale := (ls.CoreLatest - ls.HistoryLatest) > int32(staleThreshold) + if isStale { + err := hProblem.StaleHistory + err.Extras = map[string]interface{}{ + "history_latest_ledger": ls.HistoryLatest, + "core_latest_ledger": ls.CoreLatest, + } + problem.Render(ctx, w, err) + return + } + } + + requestSession := session.Clone() + h.ServeHTTP(w, r.WithContext( + context.WithValue( + ctx, + &horizonContext.SessionContextKey, + requestSession, + ), + )) + }) + } +} + +// StateMiddleware is a middleware which enables a state handler if the state +// has been initialized. +// Unless NoStateVerification is set, it ensures that the state (ledger entries) +// has been verified and is correct (Otherwise returns `500 Internal Server Error` to prevent +// returning invalid data to the user) +type StateMiddleware struct { + HorizonSession db.SessionInterface + NoStateVerification bool +} + +func ingestionStatus(ctx context.Context, q *history.Q) (uint32, bool, error) { + version, err := q.GetIngestVersion(ctx) + if err != nil { + return 0, false, supportErrors.Wrap( + err, "Error running GetIngestVersion", + ) + } + + lastIngestedLedger, err := q.GetLastLedgerIngestNonBlocking(ctx) + if err != nil { + return 0, false, supportErrors.Wrap( + err, "Error running GetLastLedgerIngestNonBlocking", + ) + } + + var lastHistoryLedger uint32 + err = q.LatestLedger(ctx, &lastHistoryLedger) + if err != nil { + return 0, false, supportErrors.Wrap(err, "Error running LatestLedger") + } + + ready := version == ingest.CurrentVersion && + lastIngestedLedger > 0 && + lastIngestedLedger == lastHistoryLedger + + return lastIngestedLedger, ready, nil +} + +// WrapFunc executes the middleware on a given HTTP handler function +func (m *StateMiddleware) WrapFunc(h http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + chiRoute := chi.RouteContext(ctx) + if chiRoute != nil { + ctx = context.WithValue(ctx, &db.RouteContextKey, sanitizeMetricRoute(chiRoute.RoutePattern())) + } + session := m.HorizonSession.Clone() + q := &history.Q{session} + sseRequest := render.Negotiate(r) == render.MimeEventStream + + // We want to start a repeatable read session to ensure that the data we + // fetch from the db belong to the same ledger. + // Otherwise, because the ingestion system is running concurrently with this request, + // it is possible to have one read fetch data from ledger N and another read + // fetch data from ledger N+1 . + err := session.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + if err != nil { + err = supportErrors.Wrap(err, "Error starting ingestion read transaction") + problem.Render(ctx, w, err) + return + } + defer session.Rollback() + + if !m.NoStateVerification { + stateInvalid, invalidErr := q.GetExpStateInvalid(ctx) + if invalidErr != nil { + invalidErr = supportErrors.Wrap(invalidErr, "Error running GetExpStateInvalid") + problem.Render(ctx, w, invalidErr) + return + } + if stateInvalid { + problem.Render(ctx, w, problem.ServerError) + return + } + } + + lastIngestedLedger, ready, err := ingestionStatus(ctx, q) + if err != nil { + problem.Render(ctx, w, err) + return + } + if !m.NoStateVerification && !ready { + problem.Render(ctx, w, hProblem.StillIngesting) + return + } + + // for SSE requests we need to discard the repeatable read transaction + // otherwise, the stream will not pick up updates occurring in future + // ledgers + if sseRequest { + if err = session.Rollback(); err != nil { + problem.Render( + ctx, + w, + supportErrors.Wrap( + err, + "Could not roll back repeatable read session for SSE request", + ), + ) + return + } + } else { + actions.SetLastLedgerHeader(w, lastIngestedLedger) + } + + h.ServeHTTP(w, r.WithContext( + context.WithValue(ctx, &horizonContext.SessionContextKey, session), + )) + } +} + +// WrapFunc executes the middleware on a given HTTP handler function +func (m *StateMiddleware) Wrap(h http.Handler) http.Handler { + return m.WrapFunc(h.ServeHTTP) +} + +type ReplicaSyncCheckMiddleware struct { + PrimaryHistoryQ *history.Q + ReplicaHistoryQ *history.Q + ServerMetrics *ServerMetrics +} + +// WrapFunc executes the middleware on a given HTTP handler function +func (m *ReplicaSyncCheckMiddleware) WrapFunc(h http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + for attempt := 1; attempt <= 4; attempt++ { + primaryIngestLedger, err := m.PrimaryHistoryQ.GetLastLedgerIngestNonBlocking(r.Context()) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + replicaIngestLedger, err := m.ReplicaHistoryQ.GetLastLedgerIngestNonBlocking(r.Context()) + if err != nil { + problem.Render(r.Context(), w, err) + return + } + + if replicaIngestLedger >= primaryIngestLedger { + break + } + + switch attempt { + case 1: + time.Sleep(20 * time.Millisecond) + case 2: + time.Sleep(40 * time.Millisecond) + case 3: + time.Sleep(80 * time.Millisecond) + case 4: + problem.Render(r.Context(), w, hProblem.StaleHistory) + m.ServerMetrics.ReplicaLagErrorsCounter.Inc() + return + } + } + + h.ServeHTTP(w, r) + } +} + +func (m *ReplicaSyncCheckMiddleware) Wrap(h http.Handler) http.Handler { + return m.WrapFunc(h.ServeHTTP) +} diff --git a/services/horizon/internal/httpx/middleware_test.go b/services/horizon/internal/httpx/middleware_test.go new file mode 100644 index 0000000000..a71a7aa0a7 --- /dev/null +++ b/services/horizon/internal/httpx/middleware_test.go @@ -0,0 +1,47 @@ +package httpx + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMiddlewareSanitizesRoutesForPrometheus(t *testing.T) { + for _, setup := range []struct { + name string + route string + expected string + }{ + { + "normal routes", + "/accounts", + "/accounts", + }, + { + "non-regex params", + "/claimable_balances/{id}", + "/claimable_balances/{id}", + }, + { + "named regexes", + "/accounts/{account_id:\\w+}/effects", + "/accounts/{account_id}/effects", + }, + { + "unnamed regexes", + "/accounts/{\\w+}/effects", + "/accounts/{\\\\w+}/effects", + }, + { + // Not likely used in routes, but just safer for prom metrics anyway + "quotes", + "/{\"}", + "/{\\\"}", + }, + } { + t.Run(setup.name, func(t *testing.T) { + assert.Equal(t, setup.expected, sanitizeMetricRoute(setup.route)) + }) + } + +} diff --git a/services/horizon/internal/httpx/rate_limiter.go b/services/horizon/internal/httpx/rate_limiter.go new file mode 100644 index 0000000000..0bdc2c6609 --- /dev/null +++ b/services/horizon/internal/httpx/rate_limiter.go @@ -0,0 +1,56 @@ +package httpx + +import ( + "net/http" + "strings" + "time" + + "github.com/stellar/throttled" + + "github.com/stellar/go/services/horizon/internal/ledger" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/support/render/problem" +) + +const lruCacheSize = 50000 + +type historyLedgerSourceFactory struct { + updateFrequency time.Duration + ledgerState *ledger.State +} + +func (f historyLedgerSourceFactory) Get() ledger.Source { + return ledger.NewHistoryDBSource(f.updateFrequency, f.ledgerState) +} + +func remoteAddrIP(r *http.Request) string { + // To support IPv6 + lastSemicolon := strings.LastIndex(r.RemoteAddr, ":") + if lastSemicolon == -1 { + return r.RemoteAddr + } else { + return r.RemoteAddr[0:lastSemicolon] + } +} + +type VaryByRemoteIP struct{} + +func (v VaryByRemoteIP) Key(r *http.Request) string { + return remoteAddrIP(r) +} + +func newRateLimiter(rateQuota *throttled.RateQuota) (*throttled.HTTPRateLimiter, error) { + rateLimiter, err := throttled.NewGCRARateLimiter(lruCacheSize, *rateQuota) + if err != nil { + return nil, err + } + + result := &throttled.HTTPRateLimiter{ + RateLimiter: rateLimiter, + DeniedHandler: http.HandlerFunc(func(w http.ResponseWriter, request *http.Request) { + problem.Render(request.Context(), w, hProblem.RateLimitExceeded) + }), + VaryBy: VaryByRemoteIP{}, + } + return result, nil +} diff --git a/services/horizon/internal/httpx/router.go b/services/horizon/internal/httpx/router.go new file mode 100644 index 0000000000..d78042a7cc --- /dev/null +++ b/services/horizon/internal/httpx/router.go @@ -0,0 +1,343 @@ +package httpx + +import ( + "compress/flate" + "fmt" + "net/http" + "net/http/pprof" + "net/url" + "time" + + "github.com/go-chi/chi" + chimiddleware "github.com/go-chi/chi/middleware" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/cors" + "github.com/stellar/throttled" + + "github.com/stellar/go/services/horizon/internal/actions" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/paths" + "github.com/stellar/go/services/horizon/internal/render" + "github.com/stellar/go/services/horizon/internal/render/sse" + "github.com/stellar/go/services/horizon/internal/txsub" + "github.com/stellar/go/support/db" + supporthttp "github.com/stellar/go/support/http" + "github.com/stellar/go/support/render/problem" +) + +type RouterConfig struct { + DBSession db.SessionInterface + PrimaryDBSession db.SessionInterface + TxSubmitter *txsub.System + RateQuota *throttled.RateQuota + + BehindCloudflare bool + BehindAWSLoadBalancer bool + SSEUpdateFrequency time.Duration + StaleThreshold uint + ConnectionTimeout time.Duration + NetworkPassphrase string + MaxPathLength uint + MaxAssetsPerPathRequest int + PathFinder paths.Finder + PrometheusRegistry *prometheus.Registry + CoreGetter actions.CoreStateGetter + HorizonVersion string + FriendbotURL *url.URL + HealthCheck http.Handler +} + +type Router struct { + *chi.Mux + Internal *chi.Mux +} + +func NewRouter(config *RouterConfig, serverMetrics *ServerMetrics, ledgerState *ledger.State) (*Router, error) { + result := Router{ + Mux: chi.NewMux(), + Internal: chi.NewMux(), + } + var rateLimiter *throttled.HTTPRateLimiter + if config.RateQuota != nil { + var err error + rateLimiter, err = newRateLimiter(config.RateQuota) + if err != nil { + return nil, fmt.Errorf("unable to create RateLimiter: %v", err) + } + } + result.addMiddleware(config, rateLimiter, serverMetrics) + result.addRoutes(config, rateLimiter, ledgerState) + return &result, nil +} + +func (r *Router) addMiddleware(config *RouterConfig, + rateLimitter *throttled.HTTPRateLimiter, + serverMetrics *ServerMetrics) { + + r.Use(chimiddleware.StripSlashes) + + r.Use(requestCacheHeadersMiddleware) + r.Use(chimiddleware.RequestID) + r.Use(contextMiddleware) + r.Use(supporthttp.XFFMiddleware(supporthttp.XFFMiddlewareConfig{ + BehindCloudflare: config.BehindCloudflare, + BehindAWSLoadBalancer: config.BehindAWSLoadBalancer, + })) + r.Use(loggerMiddleware(serverMetrics)) + r.Use(timeoutMiddleware(config.ConnectionTimeout)) + r.Use(recoverMiddleware) + r.Use(chimiddleware.Compress(flate.DefaultCompression, "application/hal+json")) + + c := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"*"}, + ExposedHeaders: []string{"Date", "Latest-Ledger"}, + }) + r.Use(c.Handler) + + if rateLimitter != nil { + r.Use(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Exempt streaming requests from rate limits via the HTTP middleware + // because rate limiting for streaming requests are already implemented in + // StreamHandler.ServeStream(). + if render.Negotiate(r) == render.MimeEventStream { + handler.ServeHTTP(w, r) + return + } + rateLimitter.RateLimit(handler).ServeHTTP(w, r) + }) + }) + } + + if config.PrimaryDBSession != nil { + replicaSyncMiddleware := ReplicaSyncCheckMiddleware{ + PrimaryHistoryQ: &history.Q{config.PrimaryDBSession}, + ReplicaHistoryQ: &history.Q{config.DBSession}, + ServerMetrics: serverMetrics, + } + r.Use(replicaSyncMiddleware.Wrap) + } + + // Internal middlewares + r.Internal.Use(chimiddleware.StripSlashes) + r.Internal.Use(chimiddleware.RequestID) + r.Internal.Use(loggerMiddleware(serverMetrics)) +} + +func (r *Router) addRoutes(config *RouterConfig, rateLimiter *throttled.HTTPRateLimiter, ledgerState *ledger.State) { + stateMiddleware := StateMiddleware{ + HorizonSession: config.DBSession, + } + + r.Method(http.MethodGet, "/health", config.HealthCheck) + + r.Method(http.MethodGet, "/", ObjectActionHandler{Action: actions.GetRootHandler{ + LedgerState: ledgerState, + CoreStateGetter: config.CoreGetter, + NetworkPassphrase: config.NetworkPassphrase, + FriendbotURL: config.FriendbotURL, + HorizonVersion: config.HorizonVersion, + }}) + + streamHandler := sse.StreamHandler{ + RateLimiter: rateLimiter, + LedgerSourceFactory: historyLedgerSourceFactory{ledgerState: ledgerState, updateFrequency: config.SSEUpdateFrequency}, + } + + historyMiddleware := NewHistoryMiddleware(ledgerState, int32(config.StaleThreshold), config.DBSession) + // State endpoints behind stateMiddleware + r.Group(func(r chi.Router) { + r.Route("/accounts", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/", restPageHandler(ledgerState, actions.GetAccountsHandler{LedgerState: ledgerState})) + r.Route("/{account_id}", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method( + http.MethodGet, + "/", + streamableObjectActionHandler{ + streamHandler: streamHandler, + action: actions.GetAccountByIDHandler{}, + }, + ) + accountData := actions.GetAccountDataHandler{} + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/data/{key}", WrapRaw( + streamableObjectActionHandler{streamHandler: streamHandler, action: accountData}, + accountData, + )) + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/offers", streamableStatePageHandler(ledgerState, actions.GetAccountOffersHandler{LedgerState: ledgerState}, streamHandler)) + }) + }) + + r.Route("/claimable_balances", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/", restPageHandler(ledgerState, actions.GetClaimableBalancesHandler{LedgerState: ledgerState})) + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/{id}", ObjectActionHandler{actions.GetClaimableBalanceByIDHandler{}}) + }) + + r.Route("/liquidity_pools", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/", restPageHandler(ledgerState, actions.GetLiquidityPoolsHandler{LedgerState: ledgerState})) + r.Route("/{liquidity_pool_id:\\w+}", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/", ObjectActionHandler{actions.GetLiquidityPoolByIDHandler{}}) + r.With(historyMiddleware).Method(http.MethodGet, "/operations", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/transactions", streamableHistoryPageHandler(ledgerState, actions.GetTransactionsHandler{LedgerState: ledgerState}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/trades", streamableHistoryPageHandler(ledgerState, actions.GetTradesHandler{LedgerState: ledgerState, CoreStateGetter: config.CoreGetter}, streamHandler)) + }) + }) + + r.Route("/offers", func(r chi.Router) { + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/", restPageHandler(ledgerState, actions.GetOffersHandler{LedgerState: ledgerState})) + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/{offer_id}", ObjectActionHandler{actions.GetOfferByID{}}) + }) + + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/assets", restPageHandler(ledgerState, actions.AssetStatsHandler{LedgerState: ledgerState})) + + findPaths := ObjectActionHandler{actions.FindPathsHandler{ + StaleThreshold: config.StaleThreshold, + SetLastLedgerHeader: true, + MaxPathLength: config.MaxPathLength, + MaxAssetsParamLength: config.MaxAssetsPerPathRequest, + PathFinder: config.PathFinder, + }} + findFixedPaths := ObjectActionHandler{actions.FindFixedPathsHandler{ + MaxPathLength: config.MaxPathLength, + SetLastLedgerHeader: true, + MaxAssetsParamLength: config.MaxAssetsPerPathRequest, + PathFinder: config.PathFinder, + }} + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/paths", findPaths) + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/paths/strict-receive", findPaths) + r.With(stateMiddleware.Wrap).Method(http.MethodGet, "/paths/strict-send", findFixedPaths) + r.With(stateMiddleware.Wrap).Method( + http.MethodGet, + "/order_book", + streamableObjectActionHandler{ + streamHandler: streamHandler, + action: actions.GetOrderbookHandler{}, + }, + ) + }) + + // account actions - /accounts/{account_id} has been created above so we + // need to use absolute routes here. Make sure we use regexp check here for + // emptiness. Without it, requesting `/accounts//payments` return all payments! + r.Group(func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/accounts/{account_id:\\w+}/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/accounts/{account_id:\\w+}/operations", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/accounts/{account_id:\\w+}/payments", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: true, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/accounts/{account_id:\\w+}/trades", streamableHistoryPageHandler(ledgerState, actions.GetTradesHandler{LedgerState: ledgerState, CoreStateGetter: config.CoreGetter}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/accounts/{account_id:\\w+}/transactions", streamableHistoryPageHandler(ledgerState, actions.GetTransactionsHandler{LedgerState: ledgerState}, streamHandler)) + }) + // ledger actions + r.Route("/ledgers", func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/", streamableHistoryPageHandler(ledgerState, actions.GetLedgersHandler{LedgerState: ledgerState}, streamHandler)) + r.Route("/{ledger_id}", func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/", ObjectActionHandler{actions.GetLedgerByIDHandler{LedgerState: ledgerState}}) + r.With(historyMiddleware).Method(http.MethodGet, "/transactions", streamableHistoryPageHandler(ledgerState, actions.GetTransactionsHandler{LedgerState: ledgerState}, streamHandler)) + r.Group(func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/operations", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/payments", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: true, + }, streamHandler)) + }) + }) + }) + + // claimable balance actions + r.Group(func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/claimable_balances/{claimable_balance_id:\\w+}/operations", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/claimable_balances/{claimable_balance_id:\\w+}/transactions", streamableHistoryPageHandler(ledgerState, actions.GetTransactionsHandler{LedgerState: ledgerState}, streamHandler)) + }) + + // transaction history actions + r.Route("/transactions", func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/", streamableHistoryPageHandler(ledgerState, actions.GetTransactionsHandler{LedgerState: ledgerState}, streamHandler)) + r.Route("/{tx_id}", func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/", ObjectActionHandler{actions.GetTransactionByHashHandler{}}) + r.With(historyMiddleware).Method(http.MethodGet, "/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/operations", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/payments", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: true, + }, streamHandler)) + }) + }) + + // operation actions + r.Route("/operations", func(r chi.Router) { + r.With(historyMiddleware).Method(http.MethodGet, "/", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: false, + }, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/{id}", ObjectActionHandler{actions.GetOperationByIDHandler{LedgerState: ledgerState}}) + r.With(historyMiddleware).Method(http.MethodGet, "/{op_id}/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + }) + + r.Group(func(r chi.Router) { + // payment actions + r.With(historyMiddleware).Method(http.MethodGet, "/payments", streamableHistoryPageHandler(ledgerState, actions.GetOperationsHandler{ + LedgerState: ledgerState, + OnlyPayments: true, + }, streamHandler)) + + // effect actions + r.With(historyMiddleware).Method(http.MethodGet, "/effects", streamableHistoryPageHandler(ledgerState, actions.GetEffectsHandler{LedgerState: ledgerState}, streamHandler)) + + // trading related endpoints + r.With(historyMiddleware).Method(http.MethodGet, "/trades", streamableHistoryPageHandler(ledgerState, actions.GetTradesHandler{LedgerState: ledgerState, CoreStateGetter: config.CoreGetter}, streamHandler)) + r.With(historyMiddleware).Method(http.MethodGet, "/trade_aggregations", ObjectActionHandler{actions.GetTradeAggregationsHandler{LedgerState: ledgerState, CoreStateGetter: config.CoreGetter}}) + // /offers/{offer_id} has been created above so we need to use absolute + // routes here. + r.With(historyMiddleware).Method(http.MethodGet, "/offers/{offer_id}/trades", streamableHistoryPageHandler(ledgerState, actions.GetTradesHandler{LedgerState: ledgerState, CoreStateGetter: config.CoreGetter}, streamHandler)) + }) + + // Transaction submission API + r.Method(http.MethodPost, "/transactions", ObjectActionHandler{actions.SubmitTransactionHandler{ + Submitter: config.TxSubmitter, + NetworkPassphrase: config.NetworkPassphrase, + CoreStateGetter: config.CoreGetter, + }}) + + // Network state related endpoints + r.Method(http.MethodGet, "/fee_stats", ObjectActionHandler{actions.FeeStatsHandler{}}) + + // friendbot + if config.FriendbotURL != nil { + redirectFriendbot := func(w http.ResponseWriter, r *http.Request) { + redirectURL := config.FriendbotURL.String() + "?" + r.URL.RawQuery + http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect) + } + r.Post("/friendbot", redirectFriendbot) + r.Get("/friendbot", redirectFriendbot) + } + + r.NotFound(func(w http.ResponseWriter, request *http.Request) { + problem.Render(request.Context(), w, problem.NotFound) + }) + + // internal + r.Internal.Get("/metrics", promhttp.HandlerFor(config.PrometheusRegistry, promhttp.HandlerOpts{}).ServeHTTP) + r.Internal.Get("/debug/pprof/heap", pprof.Index) + r.Internal.Get("/debug/pprof/profile", pprof.Profile) +} diff --git a/services/horizon/internal/httpx/server.go b/services/horizon/internal/httpx/server.go new file mode 100644 index 0000000000..c3f6983c2c --- /dev/null +++ b/services/horizon/internal/httpx/server.go @@ -0,0 +1,150 @@ +package httpx + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/ledger" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/render/sse" + "github.com/stellar/go/services/horizon/internal/txsub/sequence" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" +) + +type ServerMetrics struct { + RequestDurationSummary *prometheus.SummaryVec + ReplicaLagErrorsCounter prometheus.Counter +} + +type TLSConfig struct { + CertPath, KeyPath string +} +type ServerConfig struct { + Port uint16 + TLSConfig *TLSConfig + AdminPort uint16 +} + +// Server contains the http server related fields for horizon: the Router, +// rate limiter, etc. +type Server struct { + Router *Router + Metrics *ServerMetrics + server *http.Server + config ServerConfig + internalServer *http.Server +} + +func init() { + // register problems + problem.SetLogFilter(problem.LogUnknownErrors) + problem.RegisterError(sql.ErrNoRows, problem.NotFound) + problem.RegisterError(sequence.ErrNoMoreRoom, hProblem.ServerOverCapacity) + problem.RegisterError(db2.ErrInvalidCursor, problem.BadRequest) + problem.RegisterError(db2.ErrInvalidLimit, problem.BadRequest) + problem.RegisterError(db2.ErrInvalidOrder, problem.BadRequest) + problem.RegisterError(sse.ErrRateLimited, hProblem.RateLimitExceeded) + problem.RegisterError(context.DeadlineExceeded, hProblem.Timeout) + problem.RegisterError(context.Canceled, hProblem.ClientDisconnected) + problem.RegisterError(db.ErrCancelled, hProblem.ClientDisconnected) + problem.RegisterError(db.ErrTimeout, hProblem.ServiceUnavailable) + problem.RegisterError(db.ErrConflictWithRecovery, hProblem.ServiceUnavailable) + problem.RegisterError(db.ErrBadConnection, hProblem.ServiceUnavailable) +} + +func NewServer(serverConfig ServerConfig, routerConfig RouterConfig, ledgerState *ledger.State) (*Server, error) { + sm := &ServerMetrics{ + RequestDurationSummary: prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "http", Name: "requests_duration_seconds", + Help: "HTTP requests durations, sliding window = 10m", + }, + []string{"status", "route", "streaming", "method"}, + ), + ReplicaLagErrorsCounter: prometheus.NewCounter( + prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "http", Name: "replica_lag_errors_count", + Help: "Count of HTTP errors returned due to replica lag", + }, + ), + } + router, err := NewRouter(&routerConfig, sm, ledgerState) + if err != nil { + return nil, err + } + addr := fmt.Sprintf(":%d", serverConfig.Port) + result := &Server{ + Router: router, + Metrics: sm, + config: serverConfig, + server: &http.Server{ + Addr: addr, + Handler: router, + ReadTimeout: 5 * time.Second, + }, + } + + if serverConfig.AdminPort != 0 { + adminAddr := fmt.Sprintf(":%d", serverConfig.AdminPort) + result.internalServer = &http.Server{ + Addr: adminAddr, + Handler: result.Router.Internal, + ReadTimeout: 5 * time.Second, + } + } + return result, nil +} + +// RegisterMetrics registers the prometheus metrics +func (s *Server) RegisterMetrics(registry *prometheus.Registry) { + registry.MustRegister(s.Metrics.RequestDurationSummary) + registry.MustRegister(s.Metrics.ReplicaLagErrorsCounter) +} + +func (s *Server) Serve() error { + if s.internalServer != nil { + go func() { + log.Infof("Starting internal server on %s", s.internalServer.Addr) + if err := s.internalServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + log.Warn(errors.Wrap(err, "error in internalServer.ListenAndServe()")) + } + }() + } + + var err error + if s.config.TLSConfig != nil { + err = s.server.ListenAndServeTLS(s.config.TLSConfig.CertPath, s.config.TLSConfig.KeyPath) + } else { + err = s.server.ListenAndServe() + } + return err +} + +func (s *Server) Shutdown(ctx context.Context) error { + var wg sync.WaitGroup + defer wg.Wait() + if s.internalServer != nil { + wg.Add(1) + go func() { + err := s.internalServer.Shutdown(ctx) + if err != nil { + log.Warn(errors.Wrap(err, "error in internalServer.Shutdown()")) + } + wg.Done() + }() + } + if s.server != nil { + return s.server.Shutdown(ctx) + } + return nil +} diff --git a/services/horizon/internal/httpx/stream_handler_test.go b/services/horizon/internal/httpx/stream_handler_test.go new file mode 100644 index 0000000000..fa293ca479 --- /dev/null +++ b/services/horizon/internal/httpx/stream_handler_test.go @@ -0,0 +1,549 @@ +package httpx + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "sync" + "testing" + + "github.com/go-chi/chi" + + "github.com/stellar/go/services/horizon/internal/actions" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/render/sse" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/render/hal" +) + +type testingFactory struct { + ledgerSource ledger.Source +} + +func (f *testingFactory) Get() ledger.Source { + return f.ledgerSource +} + +// StreamTest utility struct to wrap SSE related tests. +type StreamTest struct { + ledgerSource *ledger.TestingSource + cancel context.CancelFunc + wg *sync.WaitGroup + w *httptest.ResponseRecorder + checkResponse func(w *httptest.ResponseRecorder) + ctx context.Context +} + +func newStreamTest( + handler http.HandlerFunc, + ledgerSource *ledger.TestingSource, + request *http.Request, + checkResponse func(w *httptest.ResponseRecorder), +) *StreamTest { + s := &StreamTest{ + ledgerSource: ledgerSource, + w: httptest.NewRecorder(), + checkResponse: checkResponse, + wg: &sync.WaitGroup{}, + } + s.ctx, s.cancel = context.WithCancel(request.Context()) + + s.wg.Add(1) + go func() { + handler(s.w, request.WithContext(s.ctx)) + s.wg.Done() + s.cancel() + }() + + return s +} + +// NewStreamablePageTest tests the SSE functionality of a pageAction +func NewStreamablePageTest( + action *testPageAction, + currentLedger uint32, + request *http.Request, + checkResponse func(w *httptest.ResponseRecorder), +) *StreamTest { + ledgerSource := ledger.NewTestingSource(currentLedger) + action.ledgerSource = ledgerSource + streamHandler := sse.StreamHandler{LedgerSourceFactory: &testingFactory{ledgerSource}} + handler := streamableStatePageHandler(&ledger.State{}, action, streamHandler) + + return newStreamTest( + handler.renderStream, + ledgerSource, + request, + checkResponse, + ) +} + +// NewStreamableObjectTest tests the SSE functionality of a streamableObjectAction +func NewStreamableObjectTest( + action *testObjectAction, + currentLedger uint32, + request *http.Request, + limit int, + checkResponse func(w *httptest.ResponseRecorder), +) *StreamTest { + ledgerSource := ledger.NewTestingSource(currentLedger) + action.ledgerSource = ledgerSource + streamHandler := sse.StreamHandler{LedgerSourceFactory: &testingFactory{ledgerSource}} + handler := streamableObjectActionHandler{action: action, limit: limit, streamHandler: streamHandler} + + return newStreamTest( + handler.renderStream, + ledgerSource, + request, + checkResponse, + ) +} + +// AddLedger pushes a new ledger to the stream handler. AddLedger() will block until +// the new ledger has been read by the stream handler +func (s *StreamTest) AddLedger(sequence uint32) { + s.ledgerSource.AddLedger(sequence) +} + +// Stop ends the stream request and checks the response +func (s *StreamTest) Stop() { + s.cancel() + s.wg.Wait() + s.checkResponse(s.w) +} + +// Wait blocks testing until the stream test has finished running and checks the response +func (s *StreamTest) Wait() { + s.wg.Wait() + s.checkResponse(s.w) +} + +type testPage struct { + Value string `json:"value"` + pagingToken int +} + +func (p testPage) PagingToken() string { + return fmt.Sprintf("%v", p.pagingToken) +} + +type testPageAction struct { + objects map[uint32][]string + ledgerSource ledger.Source +} + +func (action *testPageAction) GetResourcePage( + w actions.HeaderWriter, + r *http.Request, +) ([]hal.Pageable, error) { + objects, ok := action.objects[action.ledgerSource.CurrentLedger()] + if !ok { + return nil, fmt.Errorf("unexpected ledger") + } + + cursor := r.Header.Get("Last-Event-ID") + if cursor == "" { + cursor = r.URL.Query().Get("cursor") + } + if cursor == "" { + cursor = "0" + } + parsedCursor, err := strconv.Atoi(cursor) + if err != nil { + return nil, err + } + + limit := len(objects) + if limitParam := r.URL.Query().Get("limit"); limitParam != "" { + limit, err = strconv.Atoi(limitParam) + if err != nil { + return nil, err + } + } + + if parsedCursor < 0 { + return nil, fmt.Errorf("cursor cannot be negative") + } + + if parsedCursor >= len(objects) { + return []hal.Pageable{}, nil + } + + response := []hal.Pageable{} + for i, object := range objects[parsedCursor:] { + if len(response) >= limit { + break + } + + response = append(response, testPage{Value: object, pagingToken: parsedCursor + i + 1}) + } + + return response, nil +} + +func streamRequest(t *testing.T, queryParams string) *http.Request { + request, err := http.NewRequest("GET", "http://localhost?"+queryParams, nil) + if err != nil { + t.Fatalf("could not construct request: %v", err) + } + ctx := context.WithValue(context.Background(), chi.RouteCtxKey, chi.NewRouteContext()) + request = request.WithContext(ctx) + + return request +} + +func unmarashalPage(jsonString string) (string, error) { + var page testPage + err := json.Unmarshal([]byte(jsonString), &page) + return page.Value, err +} + +func expectResponse( + t *testing.T, + unmarshal func(string) (string, error), + expectedResponse []string, +) func(*httptest.ResponseRecorder) { + return func(w *httptest.ResponseRecorder) { + var response []string + for _, line := range strings.Split(w.Body.String(), "\n") { + if line == "data: \"hello\"" || line == "data: \"byebye\"" { + continue + } + + if strings.HasPrefix(line, "data: ") { + jsonString := line[len("data: "):] + value, err := unmarshal(jsonString) + if err != nil { + t.Fatalf("could not parse json %v", err) + } + response = append(response, value) + } + } + + if len(expectedResponse) != len(response) { + t.Fatalf("expected %v but got %v", expectedResponse, response) + } + + for i, entry := range expectedResponse { + if entry != response[i] { + t.Fatalf("expected %v but got %v", expectedResponse, response) + } + } + } +} + +func TestPageStream(t *testing.T) { + t.Run("without offset", func(t *testing.T) { + request := streamRequest(t, "") + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a", "b", "c"}, + 4: {"a", "b", "c", "d", "e"}, + 6: {"a", "b", "c", "d", "e", "f"}, + 7: {"a", "b", "c", "d", "e", "f"}, + }, + } + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"a", "b", "c", "d", "e", "f"}), + ) + + st.AddLedger(4) + st.AddLedger(6) + st.AddLedger(7) + + st.Stop() + }) + + t.Run("with offset", func(t *testing.T) { + request := streamRequest(t, "cursor=1") + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a", "b", "c"}, + 4: {"a", "b", "c", "d", "e"}, + 6: {"a", "b", "c", "d", "e", "f"}, + 7: {"a", "b", "c", "d", "e", "f"}, + }, + } + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"b", "c", "d", "e", "f"}), + ) + + st.AddLedger(4) + st.AddLedger(6) + st.AddLedger(7) + + st.Stop() + }) + + t.Run("with limit", func(t *testing.T) { + request := streamRequest(t, "limit=2") + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a", "b", "c"}, + }, + } + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"a", "b"}), + ) + + st.Wait() + }) + + t.Run("with limit and offset", func(t *testing.T) { + request := streamRequest(t, "limit=2&cursor=1") + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a", "b", "c", "d", "e"}, + }, + } + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"b", "c"}), + ) + + st.Wait() + }) + + t.Run("reach limit after multiple iterations", func(t *testing.T) { + request := streamRequest(t, "limit=3&cursor=1") + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a"}, + 4: {"a", "b"}, + 5: {"a", "b", "c", "d", "e", "f", "g"}, + }, + } + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"b", "c", "d"}), + ) + + st.AddLedger(4) + st.AddLedger(5) + + st.Wait() + }) +} + +type stringObject string + +func (s stringObject) Equals(other actions.StreamableObjectResponse) bool { + otherString, ok := other.(stringObject) + if !ok { + return false + } + return s == otherString +} + +func unmarashalString(jsonString string) (string, error) { + var object stringObject + err := json.Unmarshal([]byte(jsonString), &object) + return string(object), err +} + +type testObjectAction struct { + objects map[uint32]stringObject + ledgerSource ledger.Source +} + +func (action *testObjectAction) GetResource( + w actions.HeaderWriter, + r *http.Request, +) (actions.StreamableObjectResponse, error) { + ledger := action.ledgerSource.CurrentLedger() + object, ok := action.objects[ledger] + if !ok { + return nil, fmt.Errorf("unexpected ledger: %v", ledger) + } + + return object, nil +} + +func TestObjectStream(t *testing.T) { + t.Run("without interior duplicates", func(t *testing.T) { + request := streamRequest(t, "") + action := &testObjectAction{ + objects: map[uint32]stringObject{ + 3: "a", + 4: "b", + 5: "c", + 6: "c", + }, + } + + st := NewStreamableObjectTest( + action, + 3, + request, + 10, + expectResponse(t, unmarashalString, []string{"a", "b", "c"}), + ) + + st.AddLedger(4) + st.AddLedger(5) + st.AddLedger(6) + st.Stop() + }) + + t.Run("with interior duplicates", func(t *testing.T) { + request := streamRequest(t, "") + action := &testObjectAction{ + objects: map[uint32]stringObject{ + 3: "a", + 4: "b", + 5: "b", + 6: "c", + 7: "c", + }, + } + + st := NewStreamableObjectTest( + action, + 3, + request, + 10, + expectResponse(t, unmarashalString, []string{"a", "b", "c"}), + ) + + st.AddLedger(4) + st.AddLedger(5) + st.AddLedger(6) + st.AddLedger(7) + + st.Stop() + }) + + t.Run("limit reached", func(t *testing.T) { + request := streamRequest(t, "") + action := &testObjectAction{ + objects: map[uint32]stringObject{ + 1: "a", + 2: "b", + 3: "b", + 4: "c", + 5: "d", + }, + } + + st := NewStreamableObjectTest( + action, + 1, + request, + 4, + expectResponse( + t, + unmarashalString, + []string{ + "a", "b", "c", "d", + }, + ), + ) + + st.AddLedger(2) + st.AddLedger(3) + st.AddLedger(4) + st.AddLedger(5) + + st.Wait() + }) +} + +func TestRepeatableReadStream(t *testing.T) { + t.Run("page stream creates repeatable read tx", func(t *testing.T) { + action := &testPageAction{ + objects: map[uint32][]string{ + 3: {"a"}, + 4: {"a", "b"}, + }, + } + + session := &db.MockSession{} + session.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + session.On("Rollback").Return(nil).Once() + + session.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + session.On("Rollback").Return(nil).Once() + + request := streamRequest(t, "limit=2") + request = request.WithContext(context.WithValue( + request.Context(), + &horizonContext.SessionContextKey, + session, + )) + + st := NewStreamablePageTest( + action, + 3, + request, + expectResponse(t, unmarashalPage, []string{"a", "b"}), + ) + st.AddLedger(4) + st.Wait() + session.AssertExpectations(t) + }) + + t.Run("object stream creates repeatable read tx", func(t *testing.T) { + action := &testObjectAction{ + objects: map[uint32]stringObject{ + 3: "a", + 4: "b", + }, + } + + session := &db.MockSession{} + session.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + session.On("Rollback").Return(nil).Once() + + session.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + session.On("Rollback").Return(nil).Once() + + request := streamRequest(t, "") + request = request.WithContext(context.WithValue( + request.Context(), + &horizonContext.SessionContextKey, + session, + )) + + st := NewStreamableObjectTest( + action, + 3, + request, + 2, + expectResponse(t, unmarashalString, []string{"a", "b"}), + ) + st.AddLedger(4) + st.Wait() + session.AssertExpectations(t) + }) +} diff --git a/services/horizon/internal/ingest/TESTING.md b/services/horizon/internal/ingest/TESTING.md new file mode 100644 index 0000000000..a5257567ae --- /dev/null +++ b/services/horizon/internal/ingest/TESTING.md @@ -0,0 +1,102 @@ +# Testing New Ingestion System + + This document describes what you need to know to start testing Horizon's new ingestion system. This system will soon be standard, and your feedback is valuable. If you experience a problem not covered in this document, please add an issue in this repository. For questions please use [Stack Exchange](https://stellar.stackexchange.com) or ask it in one of our online [communities](https://www.stellar.org/community/#communities). + +Please remember that this is still an experimental version of the system and should only be used in staging and test environments! + +Thank you for helping us test the new ingestion system in Horizon. + +## What is the new ingestion system? + +The new ingestion system solves issues found in the previous version like: inconsistent data, relying on Stellar-Core database directly, slow responses for specific queries, etc. It allows new kind of features in Horizon, ex. faster path-finding. We published a [blog post](https://www.stellar.org/developers-blog/our-new-horizon-ingestion-engine) with more details, please check it out! + +## Why would you want to upgrade? + +* Ingestion can now run on multiple servers, which means that even if one of your ingesting instances is down, the ingestion will continue on other instances. +* New features like faster path-finding, accounts for signer endpoint, finding all accounts with a given asset, etc. More new features (and plugins!) to come. +* Ingestion does not generate a high load on Stellar-Core database. +* With batched requests (not implemented yet) you can get a consistent snapshot of the latest ledger data. Previously this wasn't possible, because some entries were loaded from Stellar-Core database and others from the Horizon database, and these could be at different ledgers. +* We will continue to update Horizon 0.* releases with security fixes until end-of-life, but the 1.x release will become the default and recommended version soon. It's better to test this now within your organization. And again, use this release in staging environments only! + +## Before you upgrade + +* You can rollback to the older version but only when using alpha or beta versions. We won't support rolling back when a stable version is released. To rollback: migrate DB down, rollback to the previous version and run `horizon db init-asset-stats` to regenerate assets stats in your DB. +* If you were using the new ingestion in one of the previous versions of Horizon, you must first remove `ENABLE_EXPERIMENTAL_INGESTION` feature flag and restart all Horizon instances before deploying a new version. +* The init stage (state ingestion) for the public Stellar network requires around 1.5GB of RAM. This memory is released after the state ingestion. State ingestion is performed only once. Restarting the server will not trigger it unless Horizon has been upgraded to a newer version (with an updated ingestion pipeline). We are evaluating alternative solutions to making these RAM requirements smaller.It's worth noting that the required memory will become smaller and smaller as more of the buckets in the history archive become [CAP-20](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0020.md) compatible. +* The CPU footprint of the new ingestion is modest. We were able to successfully run ingestion on an [AWS `c5.large`](https://aws.amazon.com/ec2/instance-types/c5/) instance. However, we highly recommend `c5.xlarge` instances. The init stage takes a few minutes on `c5.xlarge`. `c5.xlarge` is the equivalent of 4 vCPUs and 8GB of RAM. The definition of vCPU for the c5 large family in AWS is the following: +> The 2nd generation Intel Xeon Scalable Processors (Cascade Lake) or 1st generation Intel Xeon Platinum 8000 series (Skylake-SP) processor with a sustained all core Turbo frequency of up to 3.4GHz, and single core turbo frequency of up to 3.5 GHz. + +* The state data requires an additional 6GB DB disk space for the public Stellar network (as of January 2020). The disk usage will increase when the number of Stellar ledger entries increases. + * `accounts_signers` table: 2340 MB + * `trust_lines` table: 2052 MB + * `accounts` table: 1545 MB + * `offers` table: 61 MB + * `accounts_data` table: 15 MB + * `exp_asset_stats` table: less than 1 MB +* A new environment variable (or command line flag) needs to be set so that Horizon can ingest state from the history archives: + * `HISTORY_ARCHIVE_URLS="archive1,archive2,archive3"` (if you don't have your own pubnet history archive, you can use one of SDF's archives, for example `https://history.stellar.org/prd/core-live/core_live_001`) +* Horizon serves the endpoints `/paths` and `/order_book` from an in-memory graph, which is only available on ingesting instances. If some of the instances in your cluster are not configured to ingest, you can configure your proxy server to route those endpoints to the ingesting instances. This is beyond the scope of this document - consult the relevant documentation for your proxy server. + + ## Troubleshooting + +### Some endpoints are not available during state ingestion + +Endpoints that display state information are not available during initial state ingestion and will return a `503 Service Unavailable`/`Still Ingesting` error. An example is the `/paths` endpoint (built using offers). Such endpoints will become available after state ingestion is done (usually within a couple of minutes). + +### State ingestion is taking a lot of time + +State ingestion shouldn't take more than a couple of minutes on an AWS `c5.xlarge` instance, or equivalent. + + It's possible that the progress logs (see below) will not show anything new for a longer period of time or print a lot of progress entries every few seconds. This happens because of the way history archives are designed. The ingestion is still working but it's processing entries of type `DEADENTRY`'. If there is a lot of them in the bucket, there are no _active_ entries to process. We plan to improve the progress logs to display actual percentage progress so it's easier to estimate ETA. + +If you see that ingestion is not proceeding for a very long period of time: +1. Check the RAM usage on the machine. It's possible that system run out of RAM and it using swap memory that is extremely slow. +2. If above is not the case, file a new issue in this repository. + +### CPU usage goes high every few minutes + +This is _by design_. Horizon runs a state verifier routine that compares state in local storage to history archives every 64 ledgers to ensure data changes are applied correctly. If data corruption is detected Horizon will block access to endpoints serving invalid data. + +We recommend to keep this security feature turned on however if it's causing problems (due to CPU usage) this can be disabled by `--ingest-disable-state-verification` CLI param or `INGEST-DISABLE-STATE-VERIFICATION` env variable. + +### I see `Waiting for the next checkpoint...` messages + +If you were running the new system in the past (`ENABLE_EXPERIMENTAL_INGESTION` flag) it's possible that the old and new systems are not in sync. In such case, the upgrade code will activate and will make sure the data is in sync. When this happens you may see `Waiting for the next checkpoint...` messages for up to 5 minutes. + +## Reading the logs + +In order to check the progress and the status of ingestion you should check the logs. All logs connected to experimental ingestion are tagged with `service=ingest`. + +It starts with informing you about state ingestion: +``` +INFO[2019-08-29T13:04:13.473+02:00] Starting ingestion system from empty state... pid=5965 service=ingest temp_set="*io.MemoryTempSet" +INFO[2019-08-29T13:04:15.263+02:00] Reading from History Archive Snapshot ledger=25565887 pid=5965 service=ingest +``` +During state ingestion, Horizon will log number of processed entries every 100,000 entries (there are currently around 7M entries in the public network): +``` +INFO[2019-08-29T13:04:34.652+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=100000 pid=5965 service=ingest +INFO[2019-08-29T13:04:38.487+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=200000 pid=5965 service=ingest +INFO[2019-08-29T13:04:41.322+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=300000 pid=5965 service=ingest +INFO[2019-08-29T13:04:48.429+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=400000 pid=5965 service=ingest +INFO[2019-08-29T13:05:00.306+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=500000 pid=5965 service=ingest +``` +When state ingestion is finished it will proceed to ledger ingestion starting from the next ledger after checkpoint ledger (25565887+1 in this example) to update the state using transaction meta: +``` +INFO[2019-08-29T13:39:41.590+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=5300000 pid=5965 service=ingest +INFO[2019-08-29T13:39:44.518+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=5400000 pid=5965 service=ingest +INFO[2019-08-29T13:39:47.488+02:00] Processing entries from History Archive Snapshot ledger=25565887 numEntries=5500000 pid=5965 service=ingest +INFO[2019-08-29T13:40:00.670+02:00] Processed ledger ledger=25565887 pid=5965 service=ingest type=state_pipeline +INFO[2019-08-29T13:40:00.670+02:00] Finished processing History Archive Snapshot duration=2145.337575904 ledger=25565887 numEntries=5529931 pid=5965 service=ingest shutdown=false +INFO[2019-08-29T13:40:00.693+02:00] Reading new ledger ledger=25565888 pid=5965 service=ingest +INFO[2019-08-29T13:40:00.694+02:00] Processing ledger ledger=25565888 pid=5965 service=ingest type=ledger_pipeline updating_database=true +INFO[2019-08-29T13:40:00.779+02:00] Processed ledger ledger=25565888 pid=5965 service=ingest type=ledger_pipeline +INFO[2019-08-29T13:40:00.779+02:00] Finished processing ledger duration=0.086024492 ledger=25565888 pid=5965 service=ingest shutdown=false transactions=14 +INFO[2019-08-29T13:40:00.815+02:00] Reading new ledger ledger=25565889 pid=5965 service=ingest +INFO[2019-08-29T13:40:00.816+02:00] Processing ledger ledger=25565889 pid=5965 service=ingest type=ledger_pipeline updating_database=true +INFO[2019-08-29T13:40:00.881+02:00] Processed ledger ledger=25565889 pid=5965 service=ingest type=ledger_pipeline +INFO[2019-08-29T13:40:00.881+02:00] Finished processing ledger duration=0.06619956 ledger=25565889 pid=5965 service=ingest shutdown=false transactions=29 +INFO[2019-08-29T13:40:00.901+02:00] Reading new ledger ledger=25565890 pid=5965 service=ingest +INFO[2019-08-29T13:40:00.902+02:00] Processing ledger ledger=25565890 pid=5965 service=ingest type=ledger_pipeline updating_database=true +INFO[2019-08-29T13:40:00.972+02:00] Processed ledger ledger=25565890 pid=5965 service=ingest type=ledger_pipeline +INFO[2019-08-29T13:40:00.972+02:00] Finished processing ledger duration=0.071039012 ledger=25565890 pid=5965 service=ingest shutdown=false transactions=20 +``` diff --git a/services/horizon/internal/ingest/build_state_test.go b/services/horizon/internal/ingest/build_state_test.go new file mode 100644 index 0000000000..f7779f6f72 --- /dev/null +++ b/services/horizon/internal/ingest/build_state_test.go @@ -0,0 +1,405 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +func TestBuildStateTestSuite(t *testing.T) { + suite.Run(t, new(BuildStateTestSuite)) +} + +type BuildStateTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + ledgerBackend *ledgerbackend.MockDatabaseBackend + system *system + runner *mockProcessorsRunner + stellarCoreClient *mockStellarCoreClient + checkpointLedger uint32 + lastLedger uint32 +} + +func (s *BuildStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.historyQ = &mockDBQ{} + s.runner = &mockProcessorsRunner{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.stellarCoreClient = &mockStellarCoreClient{} + s.checkpointLedger = uint32(63) + s.lastLedger = 0 + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + ledgerBackend: s.ledgerBackend, + runner: s.runner, + stellarCoreClient: s.stellarCoreClient, + } + s.system.initMetrics() + + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("Rollback").Return(nil).Once() + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(63)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(63)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(63)).Return(xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 63, + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + }, nil).Once() +} + +func (s *BuildStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.runner.AssertExpectations(t) + s.stellarCoreClient.AssertExpectations(t) + s.ledgerBackend.AssertExpectations(t) +} + +func (s *BuildStateTestSuite) mockCommonHistoryQ() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once() + s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() + s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once() + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(62), + ).Return(nil).Once() +} + +func (s *BuildStateTestSuite) TestCheckPointLedgerIsZero() { + // Recreate mock in this single test to remove assertions. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + next, err := buildState{checkpointLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "unexpected checkpointLedger value") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestRangeNotPreparedFailPrepare() { + // Recreate mock in this single test to remove assertions. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(63)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(63)).Return(errors.New("my error")).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "error preparing range: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestRangeNotPreparedSuccessPrepareGetLedgerFail() { + // Recreate mock in this single test to remove assertions. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(63)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(63)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(63)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "error getting ledger blocking: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove assertions. + *s.historyQ = mockDBQ{} + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error starting a transaction: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, errors.New("my error")).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} +func (s *BuildStateTestSuite) TestGetIngestVersionReturnsError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, errors.New("my error")).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting ingestion version: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestAnotherInstanceHasCompletedBuildState() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.checkpointLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestUpdateLastLedgerIngestReturnsError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(errors.New("my error")).Once() + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(62), + ).Return(nil).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error updating last ingested ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestUpdateExpStateInvalidReturnsError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once() + s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(errors.New("my error")).Once() + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(62), + ).Return(nil).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error updating state invalid value: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestTruncateIngestStateTablesReturnsError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(s.lastLedger, nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.lastLedger).Return(nil).Once() + s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() + s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(errors.New("my error")).Once() + + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(62), + ).Return(nil).Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error clearing ingest tables: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestRunHistoryArchiveIngestionReturnsError() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error ingesting history archive: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestRunHistoryArchiveIngestionGenesisReturnsError() { + // Recreate mock in this single test to remove assertions. + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(0)).Return(nil).Once() + s.historyQ.On("UpdateExpStateInvalid", s.ctx, false).Return(nil).Once() + s.historyQ.On("TruncateIngestStateTables", s.ctx).Return(nil).Once() + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(0), + ).Return(nil).Once() + + s.runner. + On("RunGenesisStateIngestion"). + Return(ingest.StatsChangeProcessorResults{}, errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: 1}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error ingesting history archive: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestUpdateLastLedgerIngestAfterIngestReturnsError() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(nil). + Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.checkpointLedger). + Return(errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error updating last ingested ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestUpdateIngestVersionIngestReturnsError() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error updating ingestion version: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestUpdateCommitReturnsError() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.checkpointLedger). + Return(nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(nil). + Once() + s.historyQ.On("Commit"). + Return(errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error committing db transaction: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *BuildStateTestSuite) TestBuildStateSucceeds() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.checkpointLedger). + Return(nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(nil). + Once() + s.historyQ.On("Commit"). + Return(nil). + Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger}.run(s.system) + + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: s.checkpointLedger}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *BuildStateTestSuite) TestUpdateCommitReturnsErrorStop() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.checkpointLedger). + Return(nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(nil). + Once() + s.historyQ.On("Commit"). + Return(errors.New("my error")). + Once() + next, err := buildState{checkpointLedger: s.checkpointLedger, stop: true}.run(s.system) + + s.Assert().Error(err) + s.Assert().EqualError(err, "Error committing db transaction: my error") + s.Assert().Equal(transition{node: stopState{}, sleepDuration: 0}, next) +} + +func (s *BuildStateTestSuite) TestBuildStateSucceedStop() { + s.mockCommonHistoryQ() + s.runner. + On("RunHistoryArchiveIngestion", s.checkpointLedger, MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}). + Return(ingest.StatsChangeProcessorResults{}, nil). + Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, s.checkpointLedger). + Return(nil). + Once() + s.historyQ.On("UpdateIngestVersion", s.ctx, CurrentVersion). + Return(nil). + Once() + s.historyQ.On("Commit"). + Return(nil). + Once() + + next, err := buildState{checkpointLedger: s.checkpointLedger, stop: true}.run(s.system) + + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: stopState{}, + sleepDuration: 0, + }, + next, + ) +} diff --git a/services/horizon/internal/ingest/database_backend_test.go b/services/horizon/internal/ingest/database_backend_test.go new file mode 100644 index 0000000000..69de728116 --- /dev/null +++ b/services/horizon/internal/ingest/database_backend_test.go @@ -0,0 +1,35 @@ +package ingest + +import ( + "testing" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestGetLatestLedger(t *testing.T) { + tt := test.Start(t) + tt.ScenarioWithoutHorizon("base") + defer tt.Finish() + + backend, err := ledgerbackend.NewDatabaseBackendFromSession(tt.CoreSession(), network.TestNetworkPassphrase) + tt.Assert.NoError(err) + seq, err := backend.GetLatestLedgerSequence(tt.Ctx) + tt.Assert.NoError(err) + tt.Assert.Equal(uint32(3), seq) +} + +func TestGetLatestLedgerNotFound(t *testing.T) { + tt := test.Start(t) + tt.ScenarioWithoutHorizon("base") + defer tt.Finish() + + _, err := tt.CoreDB.Exec(`DELETE FROM ledgerheaders`) + tt.Assert.NoError(err, "failed to remove ledgerheaders") + + backend, err := ledgerbackend.NewDatabaseBackendFromSession(tt.CoreSession(), network.TestNetworkPassphrase) + tt.Assert.NoError(err) + _, err = backend.GetLatestLedgerSequence(tt.Ctx) + tt.Assert.EqualError(err, "no ledgers exist in ledgerheaders table") +} diff --git a/services/horizon/internal/ingest/db_integration_test.go b/services/horizon/internal/ingest/db_integration_test.go new file mode 100644 index 0000000000..2857a277ef --- /dev/null +++ b/services/horizon/internal/ingest/db_integration_test.go @@ -0,0 +1,164 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "io" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/suite" +) + +type memoryChangeReader xdr.LedgerEntryChanges + +func loadChanges(path string) (*memoryChangeReader, error) { + contents, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + entryChanges := xdr.LedgerEntryChanges{} + if err := entryChanges.UnmarshalBinary(contents); err != nil { + return nil, err + } + + reader := memoryChangeReader(entryChanges) + return &reader, nil +} + +func (r *memoryChangeReader) Read() (ingest.Change, error) { + entryChanges := *r + if len(entryChanges) == 0 { + return ingest.Change{}, io.EOF + } + + change := entryChanges[0] + *r = entryChanges[1:] + return ingest.Change{ + Type: change.State.Data.Type, + Post: change.State, + Pre: nil, + }, nil +} + +func (r *memoryChangeReader) Close() error { + return nil +} + +func TestDBTestSuite(t *testing.T) { + suite.Run(t, new(DBTestSuite)) +} + +type DBTestSuite struct { + suite.Suite + ctx context.Context + sampleFile string + sequence uint32 + ledgerBackend *ledgerbackend.MockDatabaseBackend + historyAdapter *mockHistoryArchiveAdapter + system *system + tt *test.T +} + +func (s *DBTestSuite) SetupTest() { + s.tt = test.Start(s.T()) + test.ResetHorizonDB(s.T(), s.tt.HorizonDB) + + // sample-changes.xdr is generated by sample_changes_test.go and is checked into + // the testdata directory. To regenerate the file run: + // go test -v -timeout 5m --tags=update github.com/stellar/go/services/horizon/internal/ingest -run "^(TestUpdateSampleChanges)$" + // and commit the new file to the git repo. + s.sampleFile = filepath.Join("testdata", "sample-changes.xdr") + + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + var err error + sIface, err := NewSystem(Config{ + CoreSession: s.tt.CoreSession(), + HistorySession: s.tt.HorizonSession(), + HistoryArchiveURL: "http://ignore.test", + DisableStateVerification: false, + CheckpointFrequency: 64, + }) + s.Assert().NoError(err) + s.system = sIface.(*system) + s.ctx = s.system.ctx + + s.sequence = uint32(28660351) + s.setupMocksForBuildState() + + s.system.historyAdapter = s.historyAdapter + s.system.ledgerBackend = s.ledgerBackend + s.system.runner.SetHistoryAdapter(s.historyAdapter) +} + +func (s *DBTestSuite) mockChangeReader() { + changeReader, err := loadChanges(s.sampleFile) + s.Assert().NoError(err) + s.historyAdapter.On("GetState", s.ctx, s.sequence). + Return(ingest.ChangeReader(changeReader), nil).Once() +} +func (s *DBTestSuite) setupMocksForBuildState() { + checkpointHash := xdr.Hash{1, 2, 3} + s.historyAdapter.On("GetLatestLedgerSequence"). + Return(s.sequence, nil).Once() + s.mockChangeReader() + s.historyAdapter.On("BucketListHash", s.sequence). + Return(checkpointHash, nil).Once() + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(s.sequence)).Return(true, nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, s.sequence). + Return( + xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(s.sequence), + BucketListHash: checkpointHash, + }, + }, + }, + }, + nil, + ).Once() +} + +func (s *DBTestSuite) TearDownTest() { + t := s.T() + s.historyAdapter.AssertExpectations(t) + s.ledgerBackend.AssertExpectations(t) + s.tt.Finish() +} + +func (s *DBTestSuite) TestBuildState() { + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + build := next.node.(buildState) + s.Assert().Equal(s.sequence, build.checkpointLedger) + + next, err = build.run(s.system) + s.Assert().NoError(err) + resume := next.node.(resumeState) + s.Assert().Equal(s.sequence, resume.latestSuccessfullyProcessedLedger) + + s.mockChangeReader() + s.Assert().NoError(s.system.verifyState(false)) +} + +func (s *DBTestSuite) TestVersionMismatchTriggersRebuild() { + s.TestBuildState() + + s.Assert().NoError( + s.system.historyQ.UpdateIngestVersion(context.Background(), CurrentVersion-1), + ) + + s.setupMocksForBuildState() + s.TestBuildState() +} diff --git a/services/horizon/internal/ingest/docs.md b/services/horizon/internal/ingest/docs.md new file mode 100644 index 0000000000..a0874a0b43 --- /dev/null +++ b/services/horizon/internal/ingest/docs.md @@ -0,0 +1,147 @@ +# Ingestion Finite State Machine +The following states are possible: + - `start` + - `stop` + - `build` + - `resume` + - `verifyRange` + - `historyRange` + - `reingestHistoryRange` + - `waitForCheckpoint` + +There is also the `stressTest` state, but that exists in its own world and is used only for testing ingestion, as its name implies. + +#### Definitions +There are some important terms that need to be defined for clarity as they're used extensively in both the codebase and this breakdown: + + - the `historyQ` member provides an interface into both the **historical** data (*time-series*) as well as the **state** data (*cumulative*) in the database and does not refer to the history archives + - the `lastIngestedLedger` thus corresponds to the last ledger that Horizon fully ingested into the database (both time-series and cumulative data) + - the `lastHistoryLedger`, however, corresponds to the last ledger that Horizon ingested *only* into the time-series tables (coming from `history_ledgers` table); this can be not-equal to the `lastIngestedLedger` because time-series data can be ingested independently of cumulative data (via the `db reingest range` subcommand). I'll usually refer to it as the `lastKnownLedger` + - the `lastCheckpoint` corresponds to the last checkpoint ledger (reminder: a checkpoint ledger is one in which: `(ledger# + 1) mod 64 == 0`) and thus to a matching history archive upload. + +One of the most important jobs of the FSM described here is to make sure that `lastIngestedLedger` and `lastHistoryLedger` are equal: the [`historyRange`](#historyrange-state) updates the latter, but not the former, so that we can track when state data is behind history data. + +In general, only one node should ever be writing to a database at once, globally. Hence, there are a few checks at the start of most states to ensure this. + +![State Diagram](states.jpg) + +(The above diagram was generated using [this Mermaid definition](https://mermaid-js.github.io/mermaid-live-editor/#/edit/eyJjb2RlIjoiZ3JhcGggVERcbiAgICBBWy9zdGFydFxcXSAtLSBjdW11bGF0aXZlIGRhdGEgYmVoaW5kIC0tPiBCKHdhaXRGb3JDaGVja3BvaW50KVxuICAgIEIgLS0-IEFcbiAgICBBIC0tIHRvIGZpeCBnYXBzIC0tPiBDKGhpc3RvcnlSYW5nZSlcbiAgICBDIC0tPiBBXG4gICAgQSAtLSBjbGVhbiBmcmVzaCBzdGFydCAtLT4gRChidWlsZClcbiAgICBBIC0tIGNsZWFuIHBhcnRpYWwgc3RhdGUgLS0-IEUocmVzdW1lKVxuICAgIEEgLS0gcmVzZXQgaGlzdG9yeSBkYiAtLT4gQVxuICAgIEQgLS0gb24gZmFpbHVyZSAtLS0-IEFcbiAgICBEIC0tIG9uIGZhaWx1cmUgLS0-IEZ7c3RvcH1cbiAgICBEIC0tIG9uIHN1Y2Nlc3MgLS0-IEVcbiAgICBFIC0tIG5leHQgbGVkZ2VyIC0tPiBFXG4gICAgRSAtLT4gQVxuICAgIEcocmVpbmdlc3RIaXN0b3J5UmFuZ2UpIC0tPiBGIiwibWVybWFpZCI6eyJ0aGVtZSI6ImRlZmF1bHQiLCJmbG93Y2hhcnQiOnsiY3VydmUiOiJiYXNpcyJ9LCJ0aGVtZVZhcmlhYmxlcyI6eyJiYWNrZ3JvdW5kIjoid2hpdGUiLCJwcmltYXJ5Q29sb3IiOiIjRUNFQ0ZGIiwic2Vjb25kYXJ5Q29sb3IiOiIjZmZmZmRlIiwidGVydGlhcnlDb2xvciI6ImhzbCg4MCwgMTAwJSwgOTYuMjc0NTA5ODAzOSUpIiwicHJpbWFyeUJvcmRlckNvbG9yIjoiaHNsKDI0MCwgNjAlLCA4Ni4yNzQ1MDk4MDM5JSkiLCJzZWNvbmRhcnlCb3JkZXJDb2xvciI6ImhzbCg2MCwgNjAlLCA4My41Mjk0MTE3NjQ3JSkiLCJ0ZXJ0aWFyeUJvcmRlckNvbG9yIjoiaHNsKDgwLCA2MCUsIDg2LjI3NDUwOTgwMzklKSIsInByaW1hcnlUZXh0Q29sb3IiOiIjMTMxMzAwIiwic2Vjb25kYXJ5VGV4dENvbG9yIjoiIzAwMDAyMSIsInRlcnRpYXJ5VGV4dENvbG9yIjoicmdiKDkuNTAwMDAwMDAwMSwgOS41MDAwMDAwMDAxLCA5LjUwMDAwMDAwMDEpIiwibGluZUNvbG9yIjoiIzMzMzMzMyIsInRleHRDb2xvciI6IiMzMzMiLCJtYWluQmtnIjoiI0VDRUNGRiIsInNlY29uZEJrZyI6IiNmZmZmZGUiLCJib3JkZXIxIjoiIzkzNzBEQiIsImJvcmRlcjIiOiIjYWFhYTMzIiwiYXJyb3doZWFkQ29sb3IiOiIjMzMzMzMzIiwiZm9udEZhbWlseSI6IlwidHJlYnVjaGV0IG1zXCIsIHZlcmRhbmEsIGFyaWFsIiwiZm9udFNpemUiOiIxNnB4IiwibGFiZWxCYWNrZ3JvdW5kIjoiI2U4ZThlOCIsIm5vZGVCa2ciOiIjRUNFQ0ZGIiwibm9kZUJvcmRlciI6IiM5MzcwREIiLCJjbHVzdGVyQmtnIjoiI2ZmZmZkZSIsImNsdXN0ZXJCb3JkZXIiOiIjYWFhYTMzIiwiZGVmYXVsdExpbmtDb2xvciI6IiMzMzMzMzMiLCJ0aXRsZUNvbG9yIjoiIzMzMyIsImVkZ2VMYWJlbEJhY2tncm91bmQiOiIjZThlOGU4IiwiYWN0b3JCb3JkZXIiOiJoc2woMjU5LjYyNjE2ODIyNDMsIDU5Ljc3NjUzNjMxMjglLCA4Ny45MDE5NjA3ODQzJSkiLCJhY3RvckJrZyI6IiNFQ0VDRkYiLCJhY3RvclRleHRDb2xvciI6ImJsYWNrIiwiYWN0b3JMaW5lQ29sb3IiOiJncmV5Iiwic2lnbmFsQ29sb3IiOiIjMzMzIiwic2lnbmFsVGV4dENvbG9yIjoiIzMzMyIsImxhYmVsQm94QmtnQ29sb3IiOiIjRUNFQ0ZGIiwibGFiZWxCb3hCb3JkZXJDb2xvciI6ImhzbCgyNTkuNjI2MTY4MjI0MywgNTkuNzc2NTM2MzEyOCUsIDg3LjkwMTk2MDc4NDMlKSIsImxhYmVsVGV4dENvbG9yIjoiYmxhY2siLCJsb29wVGV4dENvbG9yIjoiYmxhY2siLCJub3RlQm9yZGVyQ29sb3IiOiIjYWFhYTMzIiwibm90ZUJrZ0NvbG9yIjoiI2ZmZjVhZCIsIm5vdGVUZXh0Q29sb3IiOiJibGFjayIsImFjdGl2YXRpb25Cb3JkZXJDb2xvciI6IiM2NjYiLCJhY3RpdmF0aW9uQmtnQ29sb3IiOiIjZjRmNGY0Iiwic2VxdWVuY2VOdW1iZXJDb2xvciI6IndoaXRlIiwic2VjdGlvbkJrZ0NvbG9yIjoicmdiYSgxMDIsIDEwMiwgMjU1LCAwLjQ5KSIsImFsdFNlY3Rpb25Ca2dDb2xvciI6IndoaXRlIiwic2VjdGlvbkJrZ0NvbG9yMiI6IiNmZmY0MDAiLCJ0YXNrQm9yZGVyQ29sb3IiOiIjNTM0ZmJjIiwidGFza0JrZ0NvbG9yIjoiIzhhOTBkZCIsInRhc2tUZXh0TGlnaHRDb2xvciI6IndoaXRlIiwidGFza1RleHRDb2xvciI6IndoaXRlIiwidGFza1RleHREYXJrQ29sb3IiOiJibGFjayIsInRhc2tUZXh0T3V0c2lkZUNvbG9yIjoiYmxhY2siLCJ0YXNrVGV4dENsaWNrYWJsZUNvbG9yIjoiIzAwMzE2MyIsImFjdGl2ZVRhc2tCb3JkZXJDb2xvciI6IiM1MzRmYmMiLCJhY3RpdmVUYXNrQmtnQ29sb3IiOiIjYmZjN2ZmIiwiZ3JpZENvbG9yIjoibGlnaHRncmV5IiwiZG9uZVRhc2tCa2dDb2xvciI6ImxpZ2h0Z3JleSIsImRvbmVUYXNrQm9yZGVyQ29sb3IiOiJncmV5IiwiY3JpdEJvcmRlckNvbG9yIjoiI2ZmODg4OCIsImNyaXRCa2dDb2xvciI6InJlZCIsInRvZGF5TGluZUNvbG9yIjoicmVkIiwibGFiZWxDb2xvciI6ImJsYWNrIiwiZXJyb3JCa2dDb2xvciI6IiM1NTIyMjIiLCJlcnJvclRleHRDb2xvciI6IiM1NTIyMjIiLCJjbGFzc1RleHQiOiIjMTMxMzAwIiwiZmlsbFR5cGUwIjoiI0VDRUNGRiIsImZpbGxUeXBlMSI6IiNmZmZmZGUiLCJmaWxsVHlwZTIiOiJoc2woMzA0LCAxMDAlLCA5Ni4yNzQ1MDk4MDM5JSkiLCJmaWxsVHlwZTMiOiJoc2woMTI0LCAxMDAlLCA5My41Mjk0MTE3NjQ3JSkiLCJmaWxsVHlwZTQiOiJoc2woMTc2LCAxMDAlLCA5Ni4yNzQ1MDk4MDM5JSkiLCJmaWxsVHlwZTUiOiJoc2woLTQsIDEwMCUsIDkzLjUyOTQxMTc2NDclKSIsImZpbGxUeXBlNiI6ImhzbCg4LCAxMDAlLCA5Ni4yNzQ1MDk4MDM5JSkiLCJmaWxsVHlwZTciOiJoc2woMTg4LCAxMDAlLCA5My41Mjk0MTE3NjQ3JSkifX19).) + + +#### Tables +Within the Horizon database, there are a number of tables touched by ingestion that are worth differentiating explicitly. With these in mind, the subsequently-described states and their respective operations should be much clearer. + +The database contains: + + - **History tables** -- all tables that contain historical time-series data, such as `history_transactions`, `history_operations`, etc. + - **Transaction processors** -- processors that ingest the history tables (described by the `io.LedgerTransaction` interface). + - **State tables** -- all tables that contain the current cumulative state, such as accounts, offers, etc. + - **Change processors** -- processors ingesting deltas update state tables. These aren't related to a particular *transaction*, but rather describe a *transition* of a ledger entry from one state to another (described by the `io.Change` interface). This can take the form of both tx meta (time-series data, where the "change" occurs from one ledger to the next) and history archives (cumulative data, where the "change" occurs from the genesis ledger to the checkpoint). + + +## `start` State +As you might expect, this state kicks off the FSM process. + +There are a few possible branches in this state. + +##### DB upgrade or fresh start +The "happiest path" is the following: either the ingestion database is empty, so we can start purely from scratch, or the state data in a database is outdated, meaning it needs to be upgraded and so can effectively be started from scratch after catch-up. + +This branches differently depending on the last known ledger: + + - If it's newer than the last checkpoint, we need to wait for a new checkpoint to get the latest cumulative data. Note that though we probably *could* make incremental changes from block to block to the cumulative data, that would be more effort than it's worth relative to just waiting on the next history archive to get dumped. **Next state**: [`waitForCheckpoint`](#waitforcheckpoint-state). + + - If it's older, however, then we can just grok the missing gap (i.e. until the *latest* checkpoint) and build up (only) the time-series data. **Next state**: [`historyRange`](#historyrange-state). + +In the other cases (matching last-known and last-checkpoint ledger, or no last-known), **next state**: [`build`](#build-state). + +##### Otherwise +If we can't have a clean slate to work with, we need to fix partial state. Specifically, + + - If the last-known ledger is ahead of the last-ingested ledger, then Horizon's cumulative state data is behind its historical time-series data in the database. Here, we'll reset the time-series DB and start over. **Next state**: [`start`](#start-state), with `lastIngestedLedger == 0`. + + - If the time-series database is newer than the last-known ledger (can occur if ingestion was done for a different range earlier, for example), then Horizon needs to become aware of the missing ledgers. **Next state**: [`historyRange`](#historyrange-state) from "last known" to "last stored" in time-series db. + +**Next state**: [`resume`](#resume-state) + + +## `build` state +This is the big kahuna of the state machine: there aren't many state transitions aside from success/failure, and all roads ultimately should lead to ~~Rome~~ `build` in order to get ingestion done. This state only establishes a baseline for the cumulative data, though. + + +### Properties +This state tracks the: + + - `checkpointLedger`, which is Horizon's last-known (though possibly-unprocessed) checkpoint ledger, and + - `stop`, which optionally (though *universally*) transitions to the [`stop`](#stop-state) after this state is complete. + +### Process +If any of the checks (incl. the aforementioned sync checks) fail, we'll move to the [`start` state](#start-state). Sometimes, though, we want to [`stop`](#stop-state), instead (see `buildState.stop`). + +The actual ingestion involves a few steps: + + - turn a checkpoint's history archive into cumulative db data + - update the ingestion database's version + - update the last-ingested ledger in the time-series db + - commit to the ingestion db + +These are detailed later, in the [Ingestion](#ingestion) section. Suffice it to say that at the end of this state, either we've errored out (described above), stopped (on error **or** success, if `buildState.stop` is set), or [`resume`](#resume-state)d from the checkpoint ledger. + + +## `resume` state +This state ingests time-series data for a single ledger range, then loops back to itself for the next ledger range. + + +### Properties +This state just tracks one thing: + + - `latestSuccessfullyProcessedLedger`, whose name should be self-explanatory: this indicates the highest ledger number to be ingested. + +### Process +First, note the difference between `resumeState.latestSuccessfullyProcessedLedger` and the queried `lastIngestedLedger`: one of these is tied to the state machine, while the other is associated with the actual time-series database. + +The following are problematic error conditions: + + - the former is larger than the latter + - the versions (of the DB and current ledgers) mismatch + - the last-known ledger of the time-series db doesn't match the last-ingested ledger + - **Next state**: [`start`](#start-state). + +Otherwise, we have `ingestLedger == lastIngestedLedger + 1`, and will proceed to process the range from `ingestLedger` onward. + +With the range prepared, only one other primary state transition is possible. If the last-known ledger of the Core backend is outdated relative to the above `ingestLedger`, we'll block until the requisite ledger is seen and processed by Core. **Next state**: [`resume`](#resume-state) again, with the last-processed ledger set to whatever is last-known to Core. + +Otherwise, we can actually turn the ledger into time-series data: this is exactly the responsibility of `RunAllProcessorsOnLedger` and all of its subsequent friends. The deltas for the ledger(s) are ingested into the time-series db, then verified. + +**Next state**: [`resume`](#resume-state) again, except now targeting the *next* ledger. + + +## `historyRange` state +The purpose of this state is to ingest a particular ledger range into the cumulative database. Since the next state will be [start](#start-state), we will be rebuilding state in the future anyway. + +### Properties +This tracks an inclusive ledger range: [`fromLedger`, `toLedger`]. + +**Next state**: [`start`](#start-state) + + +## `reingestHistoryRange` state +This state acts much like the [`historyRange` state](#historyrange-state). + +**Next state**: [`stop`](#stop-state) + +### Properties +This tracks an inclusive ledger range: [`fromLedger`, `toLedger`], as well as a `force` flag that will override certain restrictions. + + +## `waitForCheckpoint` state +This pauses the state machine for 10 seconds then tries again, in hopes that a new checkpoint ledger has been created (remember, checkpoints occur every 64 ledgers). + +**Next state**: [`start`](#start-state) + + +# Ingestion +TODO + +# Range Preparation +TODO: See `maybePrepareRange` diff --git a/services/horizon/internal/ingest/fake_ledger_backend.go b/services/horizon/internal/ingest/fake_ledger_backend.go new file mode 100644 index 0000000000..c61738d134 --- /dev/null +++ b/services/horizon/internal/ingest/fake_ledger_backend.go @@ -0,0 +1,211 @@ +package ingest + +import ( + "context" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/keypair" + logpkg "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +type fakeLedgerBackend struct { + numTransactions int + changesPerTransaction int +} + +func (fakeLedgerBackend) GetLatestLedgerSequence(ctx context.Context) (uint32, error) { + return 1, nil +} + +func (fakeLedgerBackend) PrepareRange(ctx context.Context, r ledgerbackend.Range) error { + return nil +} + +func (fakeLedgerBackend) IsPrepared(ctx context.Context, r ledgerbackend.Range) (bool, error) { + return true, nil +} + +func fakeAccount() xdr.LedgerEntryChange { + account := keypair.MustRandom().Address() + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress(account), + Balance: xdr.Int64(100), + }, + }, + }, + } +} + +func fakeAccountData() xdr.LedgerEntryChange { + account := keypair.MustRandom().Address() + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &xdr.DataEntry{ + AccountId: xdr.MustAddress(account), + DataName: "test-name", + DataValue: xdr.DataValue("test"), + }, + }, + }, + } +} + +func fakeTrustline() xdr.LedgerEntryChange { + account := keypair.MustRandom().Address() + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress(account), + Balance: 123, + Asset: xdr.MustNewCreditAsset("usd", account).ToTrustLineAsset(), + }, + }, + }, + } +} + +func fakeOffer(offerID int64) xdr.LedgerEntryChange { + account := keypair.MustRandom().Address() + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + SellerId: xdr.MustAddress(account), + OfferId: xdr.Int64(offerID), + Amount: 213, + Buying: xdr.MustNewCreditAsset("usd", account), + Price: xdr.Price{N: 1, D: 1}, + Selling: xdr.MustNewCreditAsset("eur", account), + }, + }, + }, + } +} + +func (f fakeLedgerBackend) getLedgerAsync(ctx context.Context, sequence uint32) (bool, xdr.LedgerCloseMeta, error) { + ledgerCloseMeta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Hash: xdr.Hash{1, 2, 3, 4, 5, 6}, + Header: xdr.LedgerHeader{ + LedgerVersion: 7, + LedgerSeq: xdr.Uint32(1), + }, + }, + TxSet: xdr.TransactionSet{ + Txs: make([]xdr.TransactionEnvelope, f.numTransactions), + }, + TxProcessing: make([]xdr.TransactionResultMeta, f.numTransactions), + UpgradesProcessing: []xdr.UpgradeEntryMeta{}, + }, + } + + logger := log.WithField("sequence", sequence) + logger.Info("Creating fake ledger") + var offers, trustlines, accounts, accountData, total int + + for i := 0; i < f.numTransactions; i++ { + var results []xdr.OperationResult + ledgerCloseMeta.V0.TxProcessing[i] = xdr.TransactionResultMeta{ + Result: xdr.TransactionResultPair{ + TransactionHash: xdr.Hash{1, byte(i % 256), byte((i / 256) % 256)}, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &results, + }, + }, + }, + //FeeProcessing: nil, + TxApplyProcessing: xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: []xdr.OperationMeta{}, + }, + }, + } + aid := xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + ledgerCloseMeta.V0.TxSet.Txs[i] = xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: aid.ToMuxedAccount(), + }, + }, + } + feeChanges := xdr.LedgerEntryChanges{} + for j := 0; j < f.changesPerTransaction; j++ { + var change xdr.LedgerEntryChange + switch total % 4 { + case 0: + change = fakeAccount() + accounts++ + case 1: + change = fakeAccountData() + accountData++ + case 2: + offers++ + change = fakeOffer(int64(offers)) + case 3: + change = fakeTrustline() + trustlines++ + } + total++ + feeChanges = append(feeChanges, change) + + if total%logFrequency == 0 { + curHeap, sysHeap := getMemStats() + logger.WithFields(logpkg.F{ + "currentHeapSizeMB": curHeap, + "systemHeapSizeMB": sysHeap, + "accounts": accounts, + "offfers": offers, + "trustlines": trustlines, + "accountData": accountData, + "totalChanges": total, + }).Info("Adding changes to fake ledger") + } + } + + ledgerCloseMeta.V0.TxProcessing[i].FeeProcessing = feeChanges + } + + curHeap, sysHeap := getMemStats() + logger.WithFields(logpkg.F{ + "currentHeapSizeMB": curHeap, + "systemHeapSizeMB": sysHeap, + "accounts": accounts, + "offfers": offers, + "trustlines": trustlines, + "accountData": accountData, + "totalChanges": total, + }).Info("Finished creating fake ledger") + return true, ledgerCloseMeta, nil +} + +func (f *fakeLedgerBackend) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + _, meta, err := f.getLedgerAsync(ctx, sequence) + return meta, err +} + +func (fakeLedgerBackend) Close() error { + return nil +} diff --git a/services/horizon/internal/ingest/fsm.go b/services/horizon/internal/ingest/fsm.go new file mode 100644 index 0000000000..b95fe31c33 --- /dev/null +++ b/services/horizon/internal/ingest/fsm.go @@ -0,0 +1,1088 @@ +package ingest + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + logpkg "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +var ( + defaultSleep = time.Second +) + +// ErrReingestRangeConflict indicates that the reingest range overlaps with +// horizon's most recently ingested ledger +type ErrReingestRangeConflict struct { + maximumLedgerSequence uint32 +} + +func (e ErrReingestRangeConflict) Error() string { + return fmt.Sprintf("reingest range overlaps with horizon ingestion, supplied range shouldn't contain ledger %d", e.maximumLedgerSequence) +} + +type stateMachineNode interface { + run(*system) (transition, error) + String() string +} + +type transition struct { + node stateMachineNode + sleepDuration time.Duration +} + +func stop() transition { + return transition{node: stopState{}, sleepDuration: 0} +} + +func start() transition { + return transition{node: startState{}, sleepDuration: defaultSleep} +} + +func rebuild(checkpointLedger uint32) transition { + return transition{ + node: buildState{ + checkpointLedger: checkpointLedger, + }, + sleepDuration: defaultSleep, + } +} + +func resume(latestSuccessfullyProcessedLedger uint32) transition { + return transition{ + node: resumeState{ + latestSuccessfullyProcessedLedger: latestSuccessfullyProcessedLedger, + }, + sleepDuration: defaultSleep, + } +} + +func resumeImmediately(latestSuccessfullyProcessedLedger uint32) transition { + return transition{ + node: resumeState{ + latestSuccessfullyProcessedLedger: latestSuccessfullyProcessedLedger, + }, + sleepDuration: 0, + } +} + +func retryResume(r resumeState) transition { + return transition{ + node: r, + sleepDuration: defaultSleep, + } +} + +func historyRange(fromLedger, toLedger uint32) transition { + return transition{ + node: historyRangeState{ + fromLedger: fromLedger, + toLedger: toLedger, + }, + sleepDuration: defaultSleep, + } +} + +func waitForCheckPoint() transition { + return transition{ + node: waitForCheckpointState{}, + sleepDuration: 0, + } +} + +type stopState struct{} + +func (stopState) String() string { + return "stop" +} + +func (stopState) run(s *system) (transition, error) { + return stop(), errors.New("Cannot run terminal state") +} + +type startState struct { + suggestedCheckpoint uint32 +} + +func (startState) String() string { + return "start" +} + +func (state startState) run(s *system) (transition, error) { + if err := s.historyQ.Begin(); err != nil { + return start(), errors.Wrap(err, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // This will get the value `FOR UPDATE`, blocking it for other nodes. + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngest(s.ctx) + if err != nil { + return start(), errors.Wrap(err, getLastIngestedErrMsg) + } + + ingestVersion, err := s.historyQ.GetIngestVersion(s.ctx) + if err != nil { + return start(), errors.Wrap(err, getIngestVersionErrMsg) + } + + if ingestVersion > CurrentVersion { + log.WithFields(logpkg.F{ + "ingestVersion": ingestVersion, + "currentVersion": CurrentVersion, + }).Info("ingestion version in db is greater than current version, going to terminate") + return stop(), nil + } + + lastHistoryLedger, err := s.historyQ.GetLatestHistoryLedger(s.ctx) + if err != nil { + return start(), errors.Wrap(err, "Error getting last history ledger sequence") + } + + if ingestVersion != CurrentVersion || lastIngestedLedger == 0 { + // This block is either starting from empty state or ingestion + // version upgrade. + // This will always run on a single instance due to the fact that + // `LastLedgerIngest` value is blocked for update and will always + // be updated when leading instance finishes processing state. + // In case of errors it will start `Init` from the beginning. + var lastCheckpoint uint32 + if state.suggestedCheckpoint != 0 { + lastCheckpoint = state.suggestedCheckpoint + } else { + lastCheckpoint, err = s.historyAdapter.GetLatestLedgerSequence() + if err != nil { + return start(), errors.Wrap(err, "Error getting last checkpoint") + } + } + + if lastHistoryLedger != 0 { + // There are ledgers in history_ledgers table. This means that the + // old or new ingest system was running prior the upgrade. In both + // cases we need to: + // * Wait for the checkpoint ledger if the latest history ledger is + // greater that the latest checkpoint ledger. + // * Catchup history data if the latest history ledger is less than + // the latest checkpoint ledger. + // * Build state from the last checkpoint if the latest history ledger + // is equal to the latest checkpoint ledger. + switch { + case lastHistoryLedger > lastCheckpoint: + return waitForCheckPoint(), nil + case lastHistoryLedger < lastCheckpoint: + return historyRange(lastHistoryLedger+1, lastCheckpoint), nil + default: // lastHistoryLedger == lastCheckpoint + // Build state but make sure it's using `lastCheckpoint`. It's possible + // that the new checkpoint will be created during state transition. + return rebuild(lastCheckpoint), nil + } + } + + return rebuild(lastCheckpoint), nil + } + + switch { + case lastHistoryLedger > lastIngestedLedger: + // Ingestion was running at some point the past but was turned off. + // Now it's on by default but the latest history ledger is greater + // than the latest ingest ledger. We reset the exp ledger sequence + // so init state will rebuild the state correctly. + err = s.historyQ.UpdateLastLedgerIngest(s.ctx, 0) + if err != nil { + return start(), errors.Wrap(err, updateLastLedgerIngestErrMsg) + } + err = s.historyQ.Commit() + if err != nil { + return start(), errors.Wrap(err, commitErrMsg) + } + return start(), nil + // lastHistoryLedger != 0 check is here to check the case when one node ingested + // the state (so latest ingestion is > 0) but no history has been ingested yet. + // In such case we execute default case and resume from the last ingested + // ledger. + case lastHistoryLedger != 0 && lastHistoryLedger < lastIngestedLedger: + // Ingestion was running at some point the past but was turned off. + // Now it's on by default but the latest history ledger is less + // than the latest ingest ledger. We catchup history. + return historyRange(lastHistoryLedger+1, lastIngestedLedger), nil + default: // lastHistoryLedger == lastIngestedLedger + // The other node already ingested a state (just now or in the past) + // so we need to get offers from a DB, then resume session normally. + // State pipeline is NOT processed. + log.WithField("last_ledger", lastIngestedLedger). + Info("Resuming ingestion system from last processed ledger...") + + return resume(lastIngestedLedger), nil + } +} + +type buildState struct { + checkpointLedger uint32 + stop bool +} + +func (b buildState) String() string { + return fmt.Sprintf("buildFromCheckpoint(checkpointLedger=%d)", b.checkpointLedger) +} + +func (b buildState) run(s *system) (transition, error) { + var nextFailState = start() + if b.stop { + nextFailState = stop() + } + + if b.checkpointLedger == 0 { + return nextFailState, errors.New("unexpected checkpointLedger value") + } + + // We don't need to prepare range for genesis checkpoint because we don't + // perform protocol version and bucket list hash checks. + // In the long term we should probably create artificial xdr.LedgerCloseMeta + // for ledger #1 instead of using `ingest.GenesisChange` reader in + // ProcessorRunner.RunHistoryArchiveIngestion(). + var ledgerCloseMeta xdr.LedgerCloseMeta + if b.checkpointLedger != 1 { + err := s.maybePrepareRange(s.ctx, b.checkpointLedger) + if err != nil { + return nextFailState, err + } + + log.WithField("sequence", b.checkpointLedger).Info("Waiting for ledger to be available in the backend...") + startTime := time.Now() + ledgerCloseMeta, err = s.ledgerBackend.GetLedger(s.ctx, b.checkpointLedger) + if err != nil { + return nextFailState, errors.Wrap(err, "error getting ledger blocking") + } + log.WithFields(logpkg.F{ + "sequence": b.checkpointLedger, + "duration": time.Since(startTime).Seconds(), + }).Info("Ledger returned from the backend") + } + + if err := s.historyQ.Begin(); err != nil { + return nextFailState, errors.Wrap(err, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // We need to get this value `FOR UPDATE` so all other instances + // are blocked. + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngest(s.ctx) + if err != nil { + return nextFailState, errors.Wrap(err, getLastIngestedErrMsg) + } + + ingestVersion, err := s.historyQ.GetIngestVersion(s.ctx) + if err != nil { + return nextFailState, errors.Wrap(err, getIngestVersionErrMsg) + } + + // Double check if we should proceed with state ingestion. It's possible that + // another ingesting instance will be redirected to this state from `init` + // but it's first to complete the task. + if ingestVersion == CurrentVersion && lastIngestedLedger > 0 { + log.Info("Another instance completed `buildState`. Skipping...") + return nextFailState, nil + } + + if err = s.updateCursor(b.checkpointLedger - 1); err != nil { + // Don't return updateCursor error. + log.WithError(err).Warn("error updating stellar-core cursor") + } + + log.Info("Starting ingestion system from empty state...") + + // Clear last_ingested_ledger in key value store + err = s.historyQ.UpdateLastLedgerIngest(s.ctx, 0) + if err != nil { + return nextFailState, errors.Wrap(err, updateLastLedgerIngestErrMsg) + } + + // Clear invalid state in key value store. It's possible that upgraded + // ingestion is fixing it. + err = s.historyQ.UpdateExpStateInvalid(s.ctx, false) + if err != nil { + return nextFailState, errors.Wrap(err, updateExpStateInvalidErrMsg) + } + + // State tables should be empty. + err = s.historyQ.TruncateIngestStateTables(s.ctx) + if err != nil { + return nextFailState, errors.Wrap(err, "Error clearing ingest tables") + } + + log.WithFields(logpkg.F{ + "sequence": b.checkpointLedger, + }).Info("Processing state") + startTime := time.Now() + + var stats ingest.StatsChangeProcessorResults + if b.checkpointLedger == 1 { + stats, err = s.runner.RunGenesisStateIngestion() + } else { + stats, err = s.runner.RunHistoryArchiveIngestion( + ledgerCloseMeta.LedgerSequence(), + ledgerCloseMeta.ProtocolVersion(), + ledgerCloseMeta.BucketListHash(), + ) + } + + if err != nil { + return nextFailState, errors.Wrap(err, "Error ingesting history archive") + } + + if err = s.historyQ.UpdateIngestVersion(s.ctx, CurrentVersion); err != nil { + return nextFailState, errors.Wrap(err, "Error updating ingestion version") + } + + if err = s.completeIngestion(s.ctx, b.checkpointLedger); err != nil { + return nextFailState, err + } + + log. + WithFields(stats.Map()). + WithFields(logpkg.F{ + "sequence": b.checkpointLedger, + "duration": time.Since(startTime).Seconds(), + }). + Info("Processed state") + + if b.stop { + return stop(), nil + } + // If successful, continue from the next ledger + return resume(b.checkpointLedger), nil +} + +type resumeState struct { + latestSuccessfullyProcessedLedger uint32 +} + +func (r resumeState) String() string { + return fmt.Sprintf("resume(latestSuccessfullyProcessedLedger=%d)", r.latestSuccessfullyProcessedLedger) +} + +func (r resumeState) run(s *system) (transition, error) { + if r.latestSuccessfullyProcessedLedger == 0 { + return start(), errors.New("unexpected latestSuccessfullyProcessedLedger value") + } + + s.metrics.LocalLatestLedger.Set(float64(r.latestSuccessfullyProcessedLedger)) + + ingestLedger := r.latestSuccessfullyProcessedLedger + 1 + + err := s.maybePrepareRange(s.ctx, ingestLedger) + if err != nil { + return start(), err + } + + log.WithField("sequence", ingestLedger).Info("Waiting for ledger to be available in the backend...") + startTime := time.Now() + ledgerCloseMeta, err := s.ledgerBackend.GetLedger(s.ctx, ingestLedger) + if err != nil { + return start(), errors.Wrap(err, "error getting ledger blocking") + } + duration := time.Since(startTime).Seconds() + log.WithFields(logpkg.F{ + "sequence": ingestLedger, + "duration": duration, + }).Info("Ledger returned from the backend") + + s.Metrics().LedgerFetchDurationSummary.Observe(float64(duration)) + + if err = s.historyQ.Begin(); err != nil { + return retryResume(r), + errors.Wrap(err, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // This will get the value `FOR UPDATE`, blocking it for other nodes. + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngest(s.ctx) + if err != nil { + return retryResume(r), errors.Wrap(err, getLastIngestedErrMsg) + } + + if ingestLedger > lastIngestedLedger+1 { + return start(), errors.New("expected ingest ledger to be at most one greater " + + "than last ingested ledger in db") + } else if ingestLedger <= lastIngestedLedger { + log.WithField("ingestLedger", ingestLedger). + WithField("lastIngestedLedger", lastIngestedLedger). + Info("bumping ingest ledger to next ledger after ingested ledger in db") + + // Update cursor if there's more than one ingesting instance: either + // Captive-Core or DB ingestion connected to another Stellar-Core. + if err = s.updateCursor(lastIngestedLedger); err != nil { + // Don't return updateCursor error. + log.WithError(err).Warn("error updating stellar-core cursor") + } + + return retryResume(resumeState{ + latestSuccessfullyProcessedLedger: lastIngestedLedger, + }), nil + } + + ingestVersion, err := s.historyQ.GetIngestVersion(s.ctx) + if err != nil { + return retryResume(r), errors.Wrap(err, getIngestVersionErrMsg) + } + + if ingestVersion != CurrentVersion { + log.WithFields(logpkg.F{ + "ingestVersion": ingestVersion, + "currentVersion": CurrentVersion, + }).Info("ingestion version in db is not current, going back to start state") + return start(), nil + } + + lastHistoryLedger, err := s.historyQ.GetLatestHistoryLedger(s.ctx) + if err != nil { + return retryResume(r), errors.Wrap(err, "could not get latest history ledger") + } + + if lastHistoryLedger != 0 && lastHistoryLedger != lastIngestedLedger { + log.WithFields(logpkg.F{ + "lastHistoryLedger": lastHistoryLedger, + "lastIngestedLedger": lastIngestedLedger, + }).Info( + "last history ledger does not match last ingested ledger, " + + "going back to start state", + ) + return start(), nil + } + + startTime = time.Now() + + log.WithFields(logpkg.F{ + "sequence": ingestLedger, + "state": true, + "ledger": true, + "commit": true, + }).Info("Processing ledger") + + stats, err := + s.runner.RunAllProcessorsOnLedger(ledgerCloseMeta) + if err != nil { + return retryResume(r), errors.Wrap(err, "Error running processors on ledger") + } + + rebuildStart := time.Now() + err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, ingestLedger, ingestLedger) + if err != nil { + return stop(), errors.Wrap(err, "error rebuilding trade aggregations") + } + rebuildDuration := time.Since(rebuildStart).Seconds() + s.Metrics().LedgerIngestionTradeAggregationDuration.Observe(float64(rebuildDuration)) + + if err = s.completeIngestion(s.ctx, ingestLedger); err != nil { + return retryResume(r), err + } + + if err = s.updateCursor(ingestLedger); err != nil { + // Don't return updateCursor error. + log.WithError(err).Warn("error updating stellar-core cursor") + } + + duration = time.Since(startTime).Seconds() + s.Metrics().LedgerIngestionDuration.Observe(float64(duration)) + + // Update stats metrics + changeStatsMap := stats.changeStats.Map() + r.addLedgerStatsMetricFromMap(s, "change", changeStatsMap) + r.addProcessorDurationsMetricFromMap(s, stats.changeDurations) + + transactionStatsMap := stats.transactionStats.Map() + r.addLedgerStatsMetricFromMap(s, "ledger", transactionStatsMap) + tradeStatsMap := stats.tradeStats.Map() + r.addLedgerStatsMetricFromMap(s, "trades", tradeStatsMap) + r.addProcessorDurationsMetricFromMap(s, stats.transactionDurations) + + localLog := log.WithFields(logpkg.F{ + "sequence": ingestLedger, + "duration": duration, + "state": true, + "ledger": true, + "commit": true, + }) + + if s.config.EnableExtendedLogLedgerStats { + localLog = localLog. + WithFields(changeStatsMap). + WithFields(transactionStatsMap). + WithFields(tradeStatsMap) + } + + localLog.Info("Processed ledger") + + s.maybeVerifyState(ingestLedger) + + return resumeImmediately(ingestLedger), nil +} + +func (r resumeState) addLedgerStatsMetricFromMap(s *system, prefix string, m map[string]interface{}) { + for stat, value := range m { + stat = strings.Replace(stat, "stats_", prefix+"_", 1) + s.Metrics().LedgerStatsCounter. + With(prometheus.Labels{"type": stat}).Add(float64(value.(int64))) + } +} + +func (r resumeState) addProcessorDurationsMetricFromMap(s *system, m map[string]time.Duration) { + for processorName, value := range m { + // * is not accepted in Prometheus labels + processorName = strings.Replace(processorName, "*", "", -1) + s.Metrics().ProcessorsRunDuration. + With(prometheus.Labels{"name": processorName}).Add(value.Seconds()) + s.Metrics().ProcessorsRunDurationSummary. + With(prometheus.Labels{"name": processorName}).Observe(value.Seconds()) + } +} + +type historyRangeState struct { + fromLedger uint32 + toLedger uint32 +} + +func (h historyRangeState) String() string { + return fmt.Sprintf( + "historyRange(fromLedger=%d, toLedger=%d)", + h.fromLedger, + h.toLedger, + ) +} + +// historyRangeState is used when catching up history data +func (h historyRangeState) run(s *system) (transition, error) { + if h.fromLedger == 0 || h.toLedger == 0 || + h.fromLedger > h.toLedger { + return start(), errors.Errorf("invalid range: [%d, %d]", h.fromLedger, h.toLedger) + } + + err := s.maybePrepareRange(s.ctx, h.fromLedger) + if err != nil { + return start(), err + } + + if err = s.historyQ.Begin(); err != nil { + return start(), errors.Wrap(err, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // acquire distributed lock so no one else can perform ingestion operations. + if _, err = s.historyQ.GetLastLedgerIngest(s.ctx); err != nil { + return start(), errors.Wrap(err, getLastIngestedErrMsg) + } + + lastHistoryLedger, err := s.historyQ.GetLatestHistoryLedger(s.ctx) + if err != nil { + return start(), errors.Wrap(err, "could not get latest history ledger") + } + + // We should be ingesting the ledger which occurs after + // lastHistoryLedger. Otherwise, some other horizon node has + // already completed the ingest history range operation and + // we should go back to the init state + if lastHistoryLedger != h.fromLedger-1 { + return start(), nil + } + + for cur := h.fromLedger; cur <= h.toLedger; cur++ { + var ledgerCloseMeta xdr.LedgerCloseMeta + + log.WithField("sequence", cur).Info("Waiting for ledger to be available in the backend...") + startTime := time.Now() + + ledgerCloseMeta, err = s.ledgerBackend.GetLedger(s.ctx, cur) + if err != nil { + // Commit finished work in case of ledger backend error. + commitErr := s.historyQ.Commit() + if commitErr != nil { + log.WithError(commitErr).Error("Error commiting partial range results") + } else { + log.Info("Commited partial range results") + } + return start(), errors.Wrap(err, "error getting ledger") + } + + log.WithFields(logpkg.F{ + "sequence": cur, + "duration": time.Since(startTime).Seconds(), + }).Info("Ledger returned from the backend") + + if err = runTransactionProcessorsOnLedger(s, ledgerCloseMeta); err != nil { + return start(), err + } + } + + if err = s.historyQ.Commit(); err != nil { + return start(), errors.Wrap(err, commitErrMsg) + } + + return start(), nil +} + +func runTransactionProcessorsOnLedger(s *system, ledger xdr.LedgerCloseMeta) error { + log.WithFields(logpkg.F{ + "sequence": ledger.LedgerSequence(), + "state": false, + "ledger": true, + "commit": false, + }).Info("Processing ledger") + startTime := time.Now() + + ledgerTransactionStats, _, tradeStats, err := s.runner.RunTransactionProcessorsOnLedger(ledger) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("error processing ledger sequence=%d", ledger.LedgerSequence())) + } + + log. + WithFields(ledgerTransactionStats.Map()). + WithFields(tradeStats.Map()). + WithFields(logpkg.F{ + "sequence": ledger.LedgerSequence(), + "duration": time.Since(startTime).Seconds(), + "state": false, + "ledger": true, + "commit": false, + }). + Info("Processed ledger") + return nil +} + +type reingestHistoryRangeState struct { + fromLedger uint32 + toLedger uint32 + force bool +} + +func (h reingestHistoryRangeState) String() string { + return fmt.Sprintf( + "reingestHistoryRange(fromLedger=%d, toLedger=%d, force=%t)", + h.fromLedger, + h.toLedger, + h.force, + ) +} + +func (h reingestHistoryRangeState) ingestRange(s *system, fromLedger, toLedger uint32) error { + if s.historyQ.GetTx() == nil { + return errors.New("expected transaction to be present") + } + + // Clear history data before ingesting - used in `reingest range` command. + start, end, err := toid.LedgerRangeInclusive( + int32(fromLedger), + int32(toLedger), + ) + if err != nil { + return errors.Wrap(err, "Invalid range") + } + + err = s.historyQ.DeleteRangeAll(s.ctx, start, end) + if err != nil { + return errors.Wrap(err, "error in DeleteRangeAll") + } + + for cur := fromLedger; cur <= toLedger; cur++ { + var ledgerCloseMeta xdr.LedgerCloseMeta + ledgerCloseMeta, err = s.ledgerBackend.GetLedger(s.ctx, cur) + if err != nil { + return errors.Wrap(err, "error getting ledger") + } + + if err = runTransactionProcessorsOnLedger(s, ledgerCloseMeta); err != nil { + return err + } + } + + return nil +} + +func (h reingestHistoryRangeState) prepareRange(s *system) (transition, error) { + log.WithFields(logpkg.F{ + "from": h.fromLedger, + "to": h.toLedger, + }).Info("Preparing ledger backend to retrieve range") + startTime := time.Now() + + err := s.ledgerBackend.PrepareRange(s.ctx, ledgerbackend.BoundedRange(h.fromLedger, h.toLedger)) + if err != nil { + return stop(), errors.Wrap(err, "error preparing range") + } + + log.WithFields(logpkg.F{ + "from": h.fromLedger, + "to": h.toLedger, + "duration": time.Since(startTime).Seconds(), + }).Info("Range ready") + + return transition{}, nil +} + +// reingestHistoryRangeState is used as a command to reingest historical data +func (h reingestHistoryRangeState) run(s *system) (transition, error) { + if h.fromLedger == 0 || h.toLedger == 0 || + h.fromLedger > h.toLedger { + return stop(), errors.Errorf("invalid range: [%d, %d]", h.fromLedger, h.toLedger) + } + + if h.fromLedger == 1 { + log.Warn("Ledger 1 is pregenerated and not available, starting from ledger 2.") + h.fromLedger = 2 + } + + var startTime time.Time + + if h.force { + if t, err := h.prepareRange(s); err != nil { + return t, err + } + startTime = time.Now() + + if err := s.historyQ.Begin(); err != nil { + return stop(), errors.Wrap(err, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // acquire distributed lock so no one else can perform ingestion operations. + if _, err := s.historyQ.GetLastLedgerIngest(s.ctx); err != nil { + return stop(), errors.Wrap(err, getLastIngestedErrMsg) + } + + if err := h.ingestRange(s, h.fromLedger, h.toLedger); err != nil { + return stop(), err + } + + if err := s.historyQ.Commit(); err != nil { + return stop(), errors.Wrap(err, commitErrMsg) + } + } else { + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngestNonBlocking(s.ctx) + if err != nil { + return stop(), errors.Wrap(err, getLastIngestedErrMsg) + } + + if lastIngestedLedger > 0 && h.toLedger >= lastIngestedLedger { + return stop(), ErrReingestRangeConflict{lastIngestedLedger} + } + + // Only prepare the range after checking the bounds to enable an early error return + var t transition + if t, err = h.prepareRange(s); err != nil { + return t, err + } + startTime = time.Now() + + for cur := h.fromLedger; cur <= h.toLedger; cur++ { + err = func(ledger uint32) error { + if e := s.historyQ.Begin(); e != nil { + return errors.Wrap(e, "Error starting a transaction") + } + defer s.historyQ.Rollback() + + // ingest each ledger in a separate transaction to prevent deadlocks + // when acquiring ShareLocks from multiple parallel reingest range processes + if e := h.ingestRange(s, ledger, ledger); e != nil { + return e + } + + if e := s.historyQ.Commit(); e != nil { + return errors.Wrap(e, commitErrMsg) + } + + return nil + }(cur) + if err != nil { + return stop(), err + } + } + } + + err := s.historyQ.RebuildTradeAggregationBuckets(s.ctx, h.fromLedger, h.toLedger) + if err != nil { + return stop(), errors.Wrap(err, "Error rebuilding trade aggregations") + } + + log.WithFields(logpkg.F{ + "from": h.fromLedger, + "to": h.toLedger, + "duration": time.Since(startTime).Seconds(), + }).Info("Reingestion done") + + return stop(), nil +} + +type waitForCheckpointState struct{} + +func (waitForCheckpointState) String() string { + return "waitForCheckpoint" +} + +func (waitForCheckpointState) run(*system) (transition, error) { + log.Info("Waiting for the next checkpoint...") + time.Sleep(10 * time.Second) + return start(), nil +} + +type verifyRangeState struct { + fromLedger uint32 + toLedger uint32 + verifyState bool +} + +func (v verifyRangeState) String() string { + return fmt.Sprintf( + "verifyRange(fromLedger=%d, toLedger=%d, verifyState=%t)", + v.fromLedger, + v.toLedger, + v.verifyState, + ) +} + +func (v verifyRangeState) run(s *system) (transition, error) { + if v.fromLedger == 0 || v.toLedger == 0 || + v.fromLedger > v.toLedger { + return stop(), errors.Errorf("invalid range: [%d, %d]", v.fromLedger, v.toLedger) + } + + if err := s.historyQ.Begin(); err != nil { + err = errors.Wrap(err, "Error starting a transaction") + return stop(), err + } + defer s.historyQ.Rollback() + + // Simple check if DB clean + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngest(s.ctx) + if err != nil { + err = errors.Wrap(err, getLastIngestedErrMsg) + return stop(), err + } + + if lastIngestedLedger != 0 { + err = errors.New("Database not empty") + return stop(), err + } + + log.WithField("sequence", v.fromLedger).Info("Preparing range") + startTime := time.Now() + + err = s.ledgerBackend.PrepareRange(s.ctx, ledgerbackend.BoundedRange(v.fromLedger, v.toLedger)) + if err != nil { + return stop(), errors.Wrap(err, "Error preparing range") + } + + log.WithFields(logpkg.F{ + "sequence": v.fromLedger, + "duration": time.Since(startTime).Seconds(), + }).Info("Range prepared") + + log.WithField("sequence", v.fromLedger).Info("Processing state") + startTime = time.Now() + + ledgerCloseMeta, err := s.ledgerBackend.GetLedger(s.ctx, v.fromLedger) + if err != nil { + return stop(), errors.Wrap(err, "error getting ledger") + } + + stats, err := s.runner.RunHistoryArchiveIngestion( + ledgerCloseMeta.LedgerSequence(), + ledgerCloseMeta.ProtocolVersion(), + ledgerCloseMeta.BucketListHash(), + ) + if err != nil { + err = errors.Wrap(err, "Error ingesting history archive") + return stop(), err + } + + if err = s.completeIngestion(s.ctx, v.fromLedger); err != nil { + return stop(), err + } + + log. + WithFields(stats.Map()). + WithFields(logpkg.F{ + "sequence": v.fromLedger, + "duration": time.Since(startTime).Seconds(), + }). + Info("Processed state") + + for sequence := v.fromLedger + 1; sequence <= v.toLedger; sequence++ { + log.WithFields(logpkg.F{ + "sequence": sequence, + "state": true, + "ledger": true, + "commit": true, + }).Info("Processing ledger") + startTime := time.Now() + + if err = s.historyQ.Begin(); err != nil { + err = errors.Wrap(err, "Error starting a transaction") + return stop(), err + } + + var ledgerCloseMeta xdr.LedgerCloseMeta + ledgerCloseMeta, err = s.ledgerBackend.GetLedger(s.ctx, sequence) + if err != nil { + return stop(), errors.Wrap(err, "error getting ledger") + } + + var ledgerStats ledgerStats + ledgerStats, err = s.runner.RunAllProcessorsOnLedger(ledgerCloseMeta) + if err != nil { + err = errors.Wrap(err, "Error running processors on ledger") + return stop(), err + } + + if err = s.completeIngestion(s.ctx, sequence); err != nil { + return stop(), err + } + + log. + WithFields(ledgerStats.changeStats.Map()). + WithFields(ledgerStats.transactionStats.Map()). + WithFields(ledgerStats.tradeStats.Map()). + WithFields(logpkg.F{ + "sequence": sequence, + "duration": time.Since(startTime).Seconds(), + "state": true, + "ledger": true, + "commit": true, + }). + Info("Processed ledger") + } + + err = s.historyQ.RebuildTradeAggregationBuckets(s.ctx, v.fromLedger, v.toLedger) + if err != nil { + return stop(), errors.Wrap(err, "error rebuilding trade aggregations") + } + + if v.verifyState { + err = s.verifyState(false) + } + + return stop(), err +} + +type stressTestState struct{} + +func (stressTestState) String() string { + return "stressTest" +} + +func (stressTestState) run(s *system) (transition, error) { + if err := s.historyQ.Begin(); err != nil { + err = errors.Wrap(err, "Error starting a transaction") + return stop(), err + } + defer s.historyQ.Rollback() + + // Simple check if DB clean + lastIngestedLedger, err := s.historyQ.GetLastLedgerIngest(s.ctx) + if err != nil { + err = errors.Wrap(err, getLastIngestedErrMsg) + return stop(), err + } + + if lastIngestedLedger != 0 { + err = errors.New("Database not empty") + return stop(), err + } + + curHeap, sysHeap := getMemStats() + sequence := lastIngestedLedger + 1 + log.WithFields(logpkg.F{ + "currentHeapSizeMB": curHeap, + "systemHeapSizeMB": sysHeap, + "sequence": sequence, + "state": true, + "ledger": true, + "commit": true, + }).Info("Processing ledger") + startTime := time.Now() + + ledgerCloseMeta, err := s.ledgerBackend.GetLedger(s.ctx, sequence) + if err != nil { + return stop(), errors.Wrap(err, "error getting ledger") + } + + stats, err := s.runner.RunAllProcessorsOnLedger(ledgerCloseMeta) + if err != nil { + err = errors.Wrap(err, "Error running processors on ledger") + return stop(), err + } + + if err = s.completeIngestion(s.ctx, sequence); err != nil { + return stop(), err + } + + curHeap, sysHeap = getMemStats() + log. + WithFields(stats.changeStats.Map()). + WithFields(stats.transactionStats.Map()). + WithFields(stats.tradeStats.Map()). + WithFields(logpkg.F{ + "currentHeapSizeMB": curHeap, + "systemHeapSizeMB": sysHeap, + "sequence": sequence, + "duration": time.Since(startTime).Seconds(), + "state": true, + "ledger": true, + "commit": true, + }). + Info("Processed ledger") + + return stop(), nil +} + +func (s *system) completeIngestion(ctx context.Context, ledger uint32) error { + if ledger == 0 { + return errors.New("ledger must be positive") + } + + if err := s.historyQ.UpdateLastLedgerIngest(ctx, ledger); err != nil { + err = errors.Wrap(err, updateLastLedgerIngestErrMsg) + return err + } + + if err := s.historyQ.Commit(); err != nil { + return errors.Wrap(err, commitErrMsg) + } + + return nil +} + +// maybePrepareRange checks if the range is prepared and, if not, prepares it. +func (s *system) maybePrepareRange(ctx context.Context, from uint32) error { + ledgerRange := ledgerbackend.UnboundedRange(from) + + prepared, err := s.ledgerBackend.IsPrepared(ctx, ledgerRange) + if err != nil { + return errors.Wrap(err, "error checking prepared range") + } + + if !prepared { + log.WithFields(logpkg.F{"from": from}).Info("Preparing range") + startTime := time.Now() + + err = s.ledgerBackend.PrepareRange(ctx, ledgerRange) + if err != nil { + return errors.Wrap(err, "error preparing range") + } + + log.WithFields(logpkg.F{ + "from": from, + "duration": time.Since(startTime).Seconds(), + }).Info("Range prepared") + + return nil + } + + return nil +} diff --git a/services/horizon/internal/ingest/group_processors.go b/services/horizon/internal/ingest/group_processors.go new file mode 100644 index 0000000000..b35f940a6b --- /dev/null +++ b/services/horizon/internal/ingest/group_processors.go @@ -0,0 +1,84 @@ +package ingest + +import ( + "context" + "fmt" + "time" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" +) + +type processorsRunDurations map[string]time.Duration + +func (d processorsRunDurations) AddRunDuration(name string, startTime time.Time) { + d[name] += time.Since(startTime) +} + +type groupChangeProcessors struct { + processors []horizonChangeProcessor + processorsRunDurations +} + +func newGroupChangeProcessors(processors []horizonChangeProcessor) *groupChangeProcessors { + return &groupChangeProcessors{ + processors: processors, + processorsRunDurations: make(map[string]time.Duration), + } +} + +func (g groupChangeProcessors) ProcessChange(ctx context.Context, change ingest.Change) error { + for _, p := range g.processors { + startTime := time.Now() + if err := p.ProcessChange(ctx, change); err != nil { + return errors.Wrapf(err, "error in %T.ProcessChange", p) + } + g.AddRunDuration(fmt.Sprintf("%T", p), startTime) + } + return nil +} + +func (g groupChangeProcessors) Commit(ctx context.Context) error { + for _, p := range g.processors { + startTime := time.Now() + if err := p.Commit(ctx); err != nil { + return errors.Wrapf(err, "error in %T.Commit", p) + } + g.AddRunDuration(fmt.Sprintf("%T", p), startTime) + } + return nil +} + +type groupTransactionProcessors struct { + processors []horizonTransactionProcessor + processorsRunDurations +} + +func newGroupTransactionProcessors(processors []horizonTransactionProcessor) *groupTransactionProcessors { + return &groupTransactionProcessors{ + processors: processors, + processorsRunDurations: make(map[string]time.Duration), + } +} + +func (g groupTransactionProcessors) ProcessTransaction(ctx context.Context, tx ingest.LedgerTransaction) error { + for _, p := range g.processors { + startTime := time.Now() + if err := p.ProcessTransaction(ctx, tx); err != nil { + return errors.Wrapf(err, "error in %T.ProcessTransaction", p) + } + g.AddRunDuration(fmt.Sprintf("%T", p), startTime) + } + return nil +} + +func (g groupTransactionProcessors) Commit(ctx context.Context) error { + for _, p := range g.processors { + startTime := time.Now() + if err := p.Commit(ctx); err != nil { + return errors.Wrapf(err, "error in %T.Commit", p) + } + g.AddRunDuration(fmt.Sprintf("%T", p), startTime) + } + return nil +} diff --git a/services/horizon/internal/ingest/group_processors_test.go b/services/horizon/internal/ingest/group_processors_test.go new file mode 100644 index 0000000000..6848c24a66 --- /dev/null +++ b/services/horizon/internal/ingest/group_processors_test.go @@ -0,0 +1,192 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" +) + +var _ horizonChangeProcessor = (*mockHorizonChangeProcessor)(nil) + +type mockHorizonChangeProcessor struct { + mock.Mock +} + +func (m *mockHorizonChangeProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + args := m.Called(ctx, change) + return args.Error(0) +} + +func (m *mockHorizonChangeProcessor) Commit(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +var _ horizonTransactionProcessor = (*mockHorizonTransactionProcessor)(nil) + +type mockHorizonTransactionProcessor struct { + mock.Mock +} + +func (m *mockHorizonTransactionProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + args := m.Called(ctx, transaction) + return args.Error(0) +} + +func (m *mockHorizonTransactionProcessor) Commit(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +type GroupChangeProcessorsTestSuiteLedger struct { + suite.Suite + ctx context.Context + processors *groupChangeProcessors + processorA *mockHorizonChangeProcessor + processorB *mockHorizonChangeProcessor +} + +func TestGroupChangeProcessorsTestSuiteLedger(t *testing.T) { + suite.Run(t, new(GroupChangeProcessorsTestSuiteLedger)) +} + +func (s *GroupChangeProcessorsTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.processorA = &mockHorizonChangeProcessor{} + s.processorB = &mockHorizonChangeProcessor{} + s.processors = newGroupChangeProcessors([]horizonChangeProcessor{ + s.processorA, + s.processorB, + }) +} + +func (s *GroupChangeProcessorsTestSuiteLedger) TearDownTest() { + s.processorA.AssertExpectations(s.T()) + s.processorB.AssertExpectations(s.T()) +} + +func (s *GroupChangeProcessorsTestSuiteLedger) TestProcessChangeFails() { + change := ingest.Change{} + s.processorA. + On("ProcessChange", s.ctx, change). + Return(errors.New("transient error")).Once() + + err := s.processors.ProcessChange(s.ctx, change) + s.Assert().Error(err) + s.Assert().EqualError(err, "error in *ingest.mockHorizonChangeProcessor.ProcessChange: transient error") +} + +func (s *GroupChangeProcessorsTestSuiteLedger) TestProcessChangeSucceeds() { + change := ingest.Change{} + s.processorA. + On("ProcessChange", s.ctx, change). + Return(nil).Once() + s.processorB. + On("ProcessChange", s.ctx, change). + Return(nil).Once() + + err := s.processors.ProcessChange(s.ctx, change) + s.Assert().NoError(err) +} + +func (s *GroupChangeProcessorsTestSuiteLedger) TestCommitFails() { + s.processorA. + On("Commit", s.ctx). + Return(errors.New("transient error")).Once() + + err := s.processors.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "error in *ingest.mockHorizonChangeProcessor.Commit: transient error") +} + +func (s *GroupChangeProcessorsTestSuiteLedger) TestCommitSucceeds() { + s.processorA. + On("Commit", s.ctx). + Return(nil).Once() + s.processorB. + On("Commit", s.ctx). + Return(nil).Once() + + err := s.processors.Commit(s.ctx) + s.Assert().NoError(err) +} + +type GroupTransactionProcessorsTestSuiteLedger struct { + suite.Suite + ctx context.Context + processors *groupTransactionProcessors + processorA *mockHorizonTransactionProcessor + processorB *mockHorizonTransactionProcessor +} + +func TestGroupTransactionProcessorsTestSuiteLedger(t *testing.T) { + suite.Run(t, new(GroupTransactionProcessorsTestSuiteLedger)) +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.processorA = &mockHorizonTransactionProcessor{} + s.processorB = &mockHorizonTransactionProcessor{} + s.processors = newGroupTransactionProcessors([]horizonTransactionProcessor{ + s.processorA, + s.processorB, + }) +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) TearDownTest() { + s.processorA.AssertExpectations(s.T()) + s.processorB.AssertExpectations(s.T()) +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) TestProcessTransactionFails() { + transaction := ingest.LedgerTransaction{} + s.processorA. + On("ProcessTransaction", s.ctx, transaction). + Return(errors.New("transient error")).Once() + + err := s.processors.ProcessTransaction(s.ctx, transaction) + s.Assert().Error(err) + s.Assert().EqualError(err, "error in *ingest.mockHorizonTransactionProcessor.ProcessTransaction: transient error") +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) TestProcessTransactionSucceeds() { + transaction := ingest.LedgerTransaction{} + s.processorA. + On("ProcessTransaction", s.ctx, transaction). + Return(nil).Once() + s.processorB. + On("ProcessTransaction", s.ctx, transaction). + Return(nil).Once() + + err := s.processors.ProcessTransaction(s.ctx, transaction) + s.Assert().NoError(err) +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) TestCommitFails() { + s.processorA. + On("Commit", s.ctx). + Return(errors.New("transient error")).Once() + + err := s.processors.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "error in *ingest.mockHorizonTransactionProcessor.Commit: transient error") +} + +func (s *GroupTransactionProcessorsTestSuiteLedger) TestCommitSucceeds() { + s.processorA. + On("Commit", s.ctx). + Return(nil).Once() + s.processorB. + On("Commit", s.ctx). + Return(nil).Once() + + err := s.processors.Commit(s.ctx) + s.Assert().NoError(err) +} diff --git a/services/horizon/internal/ingest/history_archive_adapter.go b/services/horizon/internal/ingest/history_archive_adapter.go new file mode 100644 index 0000000000..d4cde9436f --- /dev/null +++ b/services/horizon/internal/ingest/history_archive_adapter.go @@ -0,0 +1,73 @@ +package ingest + +import ( + "context" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// historyArchiveAdapter is an adapter for the historyarchive package to read from history archives +type historyArchiveAdapter struct { + archive historyarchive.ArchiveInterface +} + +type historyArchiveAdapterInterface interface { + GetLatestLedgerSequence() (uint32, error) + BucketListHash(sequence uint32) (xdr.Hash, error) + GetState(ctx context.Context, sequence uint32) (ingest.ChangeReader, error) +} + +// newHistoryArchiveAdapter is a constructor to make a historyArchiveAdapter +func newHistoryArchiveAdapter(archive historyarchive.ArchiveInterface) historyArchiveAdapterInterface { + return &historyArchiveAdapter{archive: archive} +} + +// GetLatestLedgerSequence returns the latest ledger sequence or an error +func (haa *historyArchiveAdapter) GetLatestLedgerSequence() (uint32, error) { + has, err := haa.archive.GetRootHAS() + if err != nil { + return 0, errors.Wrap(err, "could not get root HAS") + } + + return has.CurrentLedger, nil +} + +// BucketListHash returns the bucket list hash to compare with hash in the +// ledger header fetched from Stellar-Core. +func (haa *historyArchiveAdapter) BucketListHash(sequence uint32) (xdr.Hash, error) { + exists, err := haa.archive.CategoryCheckpointExists("history", sequence) + if err != nil { + return xdr.Hash{}, errors.Wrap(err, "error checking if category checkpoint exists") + } + if !exists { + return xdr.Hash{}, errors.Errorf("history checkpoint does not exist for ledger %d", sequence) + } + + has, err := haa.archive.GetCheckpointHAS(sequence) + if err != nil { + return xdr.Hash{}, errors.Wrapf(err, "unable to get checkpoint HAS at ledger sequence %d", sequence) + } + + return has.BucketListHash() +} + +// GetState returns a reader with the state of the ledger at the provided sequence number. +func (haa *historyArchiveAdapter) GetState(ctx context.Context, sequence uint32) (ingest.ChangeReader, error) { + exists, err := haa.archive.CategoryCheckpointExists("history", sequence) + if err != nil { + return nil, errors.Wrap(err, "error checking if category checkpoint exists") + } + if !exists { + return nil, errors.Errorf("history checkpoint does not exist for ledger %d", sequence) + } + + sr, e := ingest.NewCheckpointChangeReader(ctx, haa.archive, sequence) + if e != nil { + return nil, errors.Wrap(e, "could not make memory state reader") + } + + return sr, nil +} diff --git a/services/horizon/internal/ingest/history_archive_adapter_test.go b/services/horizon/internal/ingest/history_archive_adapter_test.go new file mode 100644 index 0000000000..7c9207cbe4 --- /dev/null +++ b/services/horizon/internal/ingest/history_archive_adapter_test.go @@ -0,0 +1,197 @@ +package ingest + +import ( + "context" + "encoding/json" + stdio "io" + "testing" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type mockHistoryArchiveAdapter struct { + mock.Mock +} + +func (m *mockHistoryArchiveAdapter) GetLatestLedgerSequence() (uint32, error) { + args := m.Called() + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockHistoryArchiveAdapter) BucketListHash(sequence uint32) (xdr.Hash, error) { + args := m.Called(sequence) + return args.Get(0).(xdr.Hash), args.Error(1) +} + +func (m *mockHistoryArchiveAdapter) GetState(ctx context.Context, sequence uint32) (ingest.ChangeReader, error) { + args := m.Called(ctx, sequence) + return args.Get(0).(ingest.ChangeReader), args.Error(1) +} + +func TestGetState_Read(t *testing.T) { + archive, e := getTestArchive() + if !assert.NoError(t, e) { + return + } + + haa := newHistoryArchiveAdapter(archive) + + sr, e := haa.GetState(context.Background(), 21686847) + if !assert.NoError(t, e) { + return + } + + lec, e := sr.Read() + if !assert.NoError(t, e) { + return + } + assert.NotEqual(t, e, stdio.EOF) + + if !assert.NotNil(t, lec) { + return + } + assert.Equal(t, "GAFBQT4VRORLEVEECUYDQGWNVQ563ZN76LGRJR7T7KDL32EES54UOQST", lec.Post.Data.Account.AccountId.Address()) +} + +func getTestArchive() (historyarchive.ArchiveInterface, error) { + + var hasJson = []byte(`{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 21686847, + "currentBuckets": [ + { + "curr": "2a4416e7f3e301c2fc1078dce0e1dd109b8ae6d3958942b91b447f24014a7b5c", + "next": { + "state": 0 + }, + "snap": "7ff95a98838dfd39a36858f15c8d503641560f02a52aa15335559e1183ce2ca1" + }, + { + "curr": "2c7e74c4c5555e41b39a5fc04e77e77852c35e7769e32b486e07a072b9b3177c", + "next": { + "state": 1, + "output": "7ff95a98838dfd39a36858f15c8d503641560f02a52aa15335559e1183ce2ca1" + }, + "snap": "5f0bc7d0bd9e8ed6530fc270339b7dd2fbcedf0d80235f5ef64daa90b84259f4" + }, + { + "curr": "068f2a1ece2817c98c0d21d5ac20817637c331df6793d0ff3e874e29da5d65b1", + "next": { + "state": 1, + "output": "e93d50365d74d8a8dc2ff7631dfb506b7e6b2245f7f46556d407e82f197a6c59" + }, + "snap": "875cbdf9ab03c488c075a36ee3ee1e02aef9d5fe9d253a2b1f99b92fe64598b8" + }, + { + "curr": "f413ff9d27e2cad12754ff84ca905f8c309ca7b68a6fbe8e9b01ecd18f5d3759", + "next": { + "state": 1, + "output": "ffbb6cd3a4170dbf547ab0783fea90c1a57a28e562db7bcd3a079374f1e63464" + }, + "snap": "5d198cdc5a2139d92fe74f6541a098c27aba61e8aee543a6a551814aae9adb5a" + }, + { + "curr": "1c6f9ec76b06aac2aac77e9a0da2224d120dc25c1cf10211ce33475db4d66f13", + "next": { + "state": 1, + "output": "6473d4a3ff5b6448fc6dfd279ef33bf0b1524d8361b758dbde49fc84691cadbe" + }, + "snap": "6dd30650a7c8cadad545561d732828cf55cefdf5f70c615fbdc33e01c647907b" + }, + { + "curr": "b3b3c9b54db9e08f3994a17a40e5be7583c546488e88523ebf8b444ee53f4aec", + "next": { + "state": 1, + "output": "ed452df8b803190b7a1cf07894c27c03415029272e9c4a3171e7f3ad6e67c90a" + }, + "snap": "7d84d34019975b37758278e858e86265323ddbb7b46f6d679433c93bbae693ee" + }, + { + "curr": "a6c20a247ed2afc2cea7f4dc5856efa61a51b4e4b0318877eebdf8ad47be83b7", + "next": { + "state": 1, + "output": "ce9a7c779d0873ff364a9abd20007bbf7e41646ac4662eb87f89a5c39b69f70d" + }, + "snap": "285ac930ee2bd358d3202666c545fd3b94ee973d1a0cd2569de357042ec12b3d" + }, + { + "curr": "2e779b37b97052a1141a65a92c4ca14a7bd28f7c2d646749b1d584f45d50fa7b", + "next": { + "state": 1, + "output": "e4dba3994ad576489880eee38db2d8c0f8889585e932b7192dd7af168d79b43f" + }, + "snap": "37094a837769dbae5783dca9831be463b895f1b07d1cd24e271966e10503fdfc" + }, + { + "curr": "48f435285dd96511d0822f7ae1a20e28c6c28019e385313713655fc76fe3bc03", + "next": { + "state": 1, + "output": "11f8c2f8e1cb0d47576f74d9e2fa838f5f3a37180907a24a85d0ad8b647862e4" + }, + "snap": "96e0d8bf7d7eb775299edf285b6324499a1a05122d95eed9289c6477cf6a01cb" + }, + { + "curr": "4100ad3b1085bd14d1c808ece3b38db97171532d0d11ed5edd57aff0e416e06a", + "next": { + "state": 1, + "output": "5f351041761b45f3e725f98bb8b6713873e30ab6c8aee56ba0823d357c7ebd0d" + }, + "snap": "23669fa3d310ca8ac8dbe9dcce7e4e4361b1c3334da1dda2fb6447a30c67422f" + }, + { + "curr": "14cc632ab181396418fc761503105047e3b63d0455d0a4e9480578129ea8e9dc", + "next": { + "state": 1, + "output": "a4811c9ba9505e421f0015e5fcfd9f5d204ae85b584766759e844ef85db10d47" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}`) + + var has historyarchive.HistoryArchiveState + err := json.Unmarshal(hasJson, &has) + if err != nil { + return nil, errors.New("unable to unmarshal HAS json") + } + + bucketEntry := xdr.BucketEntry{ + Type: xdr.BucketEntryTypeLiveentry, + LiveEntry: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAFBQT4VRORLEVEECUYDQGWNVQ563ZN76LGRJR7T7KDL32EES54UOQST"), + Balance: xdr.Int64(200000000), + }, + }, + }, + } + mockArchive := &historyarchive.MockArchive{} + mockArchive. + On("CategoryCheckpointExists", "history", uint32(21686847)). + Return(true, nil) + mockArchive. + On("GetCheckpointManager"). + Return(historyarchive.NewCheckpointManager( + historyarchive.DefaultCheckpointFrequency)) + mockArchive. + On("GetCheckpointHAS", uint32(21686847)). + Return(has, nil) + mockArchive. + On("BucketExists", mock.AnythingOfType("historyarchive.Hash")). + Return(true, nil) + mockArchive. + On("BucketSize", mock.AnythingOfType("historyarchive.Hash")). + Return(int64(100), nil) + mockArchive. + On("GetXdrStreamForHash", mock.AnythingOfType("historyarchive.Hash")). + Return(historyarchive.CreateXdrStream(bucketEntry), nil) + return mockArchive, nil +} diff --git a/services/horizon/internal/ingest/ingest_history_range_state_test.go b/services/horizon/internal/ingest/ingest_history_range_state_test.go new file mode 100644 index 0000000000..2b2ebceeaf --- /dev/null +++ b/services/horizon/internal/ingest/ingest_history_range_state_test.go @@ -0,0 +1,591 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "testing" + + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func TestIngestHistoryRangeStateTestSuite(t *testing.T) { + suite.Run(t, new(IngestHistoryRangeStateTestSuite)) +} + +type IngestHistoryRangeStateTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + ledgerBackend *ledgerbackend.MockDatabaseBackend + runner *mockProcessorsRunner + system *system +} + +func (s *IngestHistoryRangeStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.historyQ = &mockDBQ{} + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.runner = &mockProcessorsRunner{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + ledgerBackend: s.ledgerBackend, + runner: s.runner, + } + s.system.initMetrics() + + s.historyQ.On("Rollback").Return(nil).Once() + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(100)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(100)).Return(nil).Once() +} + +func (s *IngestHistoryRangeStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.ledgerBackend.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.runner.AssertExpectations(t) +} + +func (s *IngestHistoryRangeStateTestSuite) TestInvalidRange() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + next, err := historyRangeState{fromLedger: 0, toLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [0, 0]") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) + + next, err = historyRangeState{fromLedger: 0, toLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [0, 100]") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) + + next, err = historyRangeState{fromLedger: 100, toLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [100, 0]") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) + + next, err = historyRangeState{fromLedger: 100, toLedger: 99}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [100, 99]") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestRangeNotPreparedFailPrepare() { + // Recreate mock in this single test to remove assertions. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(100)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(100)).Return(errors.New("my error")).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "error preparing range: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error starting a transaction: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestGetLatestLedgerReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "could not get latest history ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +// TestAnotherNodeIngested tests the case when another node has ingested the range. +// In such case we go back to `init` state without processing. +func (s *IngestHistoryRangeStateTestSuite) TestAnotherNodeIngested() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(200), nil).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestRunTransactionProcessorsOnLedgerReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(99), nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + errors.New("my error"), + ).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "error processing ledger sequence=100: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestSuccess() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(99), nil).Once() + + for i := 100; i <= 200; i++ { + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(i), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(i)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + } + + s.historyQ.On("Commit").Return(nil).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestSuccessOneLedger() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(99), nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + + s.historyQ.On("Commit").Return(nil).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *IngestHistoryRangeStateTestSuite) TestCommitsWorkOnLedgerBackendFailure() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(99), nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(101)). + Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + + s.historyQ.On("Commit").Return(nil).Once() + + next, err := historyRangeState{fromLedger: 100, toLedger: 102}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "error getting ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func TestReingestHistoryRangeStateTestSuite(t *testing.T) { + suite.Run(t, new(ReingestHistoryRangeStateTestSuite)) +} + +type ReingestHistoryRangeStateTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + ledgerBackend *mockLedgerBackend + runner *mockProcessorsRunner + system *system +} + +func (s *ReingestHistoryRangeStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.historyQ = &mockDBQ{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.ledgerBackend = &mockLedgerBackend{} + s.runner = &mockProcessorsRunner{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + ledgerBackend: s.ledgerBackend, + runner: s.runner, + } + + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("Rollback").Return(nil).Once() + s.historyQ.On("Begin").Return(nil).Once() + + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.BoundedRange(100, 200)).Return(nil).Once() +} + +func (s *ReingestHistoryRangeStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.runner.AssertExpectations(t) +} + +func (s *ReingestHistoryRangeStateTestSuite) TestInvalidRange() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + + err := s.system.ReingestRange([]history.LedgerRange{{0, 0}}, false) + s.Assert().EqualError(err, "Invalid range: {0 0} genesis ledger starts at 1") + + err = s.system.ReingestRange([]history.LedgerRange{{0, 100}}, false) + s.Assert().EqualError(err, "Invalid range: {0 100} genesis ledger starts at 1") + + err = s.system.ReingestRange([]history.LedgerRange{{100, 0}}, false) + s.Assert().EqualError(err, "Invalid range: {100 0} from > to") + + err = s.system.ReingestRange([]history.LedgerRange{{100, 99}}, false) + s.Assert().EqualError(err, "Invalid range: {100 99} from > to") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil) + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().EqualError(err, "Error starting a transaction: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestGetLastLedgerIngestNonBlockingError() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestReingestRangeOverlaps() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(190), nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().Equal(ErrReingestRangeConflict{190}, err) +} + +func (s *ReingestHistoryRangeStateTestSuite) TestReingestRangeOverlapsAtEnd() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(200), nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().Equal(ErrReingestRangeConflict{200}, err) +} + +func (s *ReingestHistoryRangeStateTestSuite) TestClearHistoryFails() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + toidFrom := toid.New(100, 0, 0) + toidTo := toid.New(101, 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(errors.New("my error")).Once() + + s.historyQ.On("Rollback").Return(nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().EqualError(err, "error in DeleteRangeAll: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestRunTransactionProcessorsOnLedgerReturnsError() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + toidFrom := toid.New(100, 0, 0) + toidTo := toid.New(101, 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta). + Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + errors.New("my error"), + ).Once() + s.historyQ.On("Rollback").Return(nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().EqualError(err, "error processing ledger sequence=100: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestCommitFails() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + toidFrom := toid.New(100, 0, 0) + toidTo := toid.New(101, 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + + s.historyQ.On("Commit").Return(errors.New("my error")).Once() + s.historyQ.On("Rollback").Return(nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().EqualError(err, "Error committing db transaction: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestSuccess() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + + for i := uint32(100); i <= uint32(200); i++ { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + + toidFrom := toid.New(int32(i), 0, 0) + toidTo := toid.New(int32(i+1), 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(i), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(i)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + + s.historyQ.On("Commit").Return(nil).Once() + s.historyQ.On("Rollback").Return(nil).Once() + } + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(100), uint32(200)).Return(nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, false) + s.Assert().NoError(err) +} + +func (s *ReingestHistoryRangeStateTestSuite) TestSuccessOneLedger() { + s.historyQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + + toidFrom := toid.New(100, 0, 0) + toidTo := toid.New(101, 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + }, + }, + }, + } + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + s.historyQ.On("Commit").Return(nil).Once() + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(100), uint32(100)).Return(nil).Once() + + // Recreate mock in this single test to remove previous assertion. + *s.ledgerBackend = mockLedgerBackend{} + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.BoundedRange(100, 100)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 100}}, false) + s.Assert().NoError(err) +} + +func (s *ReingestHistoryRangeStateTestSuite) TestGetLastLedgerIngestError() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") +} + +func (s *ReingestHistoryRangeStateTestSuite) TestReingestRangeForce() { + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(190), nil).Once() + + s.historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + + toidFrom := toid.New(100, 0, 0) + toidTo := toid.New(201, 0, 0) + s.historyQ.On( + "DeleteRangeAll", s.ctx, toidFrom.ToInt64(), toidTo.ToInt64(), + ).Return(nil).Once() + + for i := 100; i <= 200; i++ { + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(i), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(i)).Return(meta, nil).Once() + + s.runner.On("RunTransactionProcessorsOnLedger", meta).Return( + processors.StatsLedgerTransactionProcessorResults{}, + processorsRunDurations{}, + processors.TradeStats{}, + nil, + ).Once() + } + + s.historyQ.On("Commit").Return(nil).Once() + + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(100), uint32(200)).Return(nil).Once() + + err := s.system.ReingestRange([]history.LedgerRange{{100, 200}}, true) + s.Assert().NoError(err) +} diff --git a/services/horizon/internal/ingest/init_state_test.go b/services/horizon/internal/ingest/init_state_test.go new file mode 100644 index 0000000000..b0ddfb146c --- /dev/null +++ b/services/horizon/internal/ingest/init_state_test.go @@ -0,0 +1,268 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "testing" + + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/suite" +) + +func TestInitStateTestSuite(t *testing.T) { + suite.Run(t, new(InitStateTestSuite)) +} + +type InitStateTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + system *system +} + +func (s *InitStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.historyQ = &mockDBQ{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + } + s.system.initMetrics() + + s.historyQ.On("Rollback").Return(nil).Once() +} + +func (s *InitStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) +} + +func (s *InitStateTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + next, err := startState{}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error starting a transaction: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *InitStateTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := startState{}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *InitStateTestSuite) TestGetIngestVersionReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, errors.New("my error")).Once() + + next, err := startState{}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting ingestion version: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *InitStateTestSuite) TestCurrentVersionIsOutdated() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(1), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion+1, nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: stopState{}, sleepDuration: 0}, next) +} + +func (s *InitStateTestSuite) TestGetLatestLedgerReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := startState{}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last history ledger sequence: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *InitStateTestSuite) TestBuildStateEmptyDatabase() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), nil).Once() + + s.historyAdapter.On("GetLatestLedgerSequence").Return(uint32(63), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: buildState{checkpointLedger: 63}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *InitStateTestSuite) TestBuildStateEmptyDatabaseFromSuggestedCheckpoint() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), nil).Once() + + next, err := startState{suggestedCheckpoint: 127}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: buildState{checkpointLedger: 127}, sleepDuration: defaultSleep}, + next, + ) +} + +// TestBuildStateWait is testing the case when: +// * the ingest system version has been incremented or no ingest ledger, +// * the old system is in front of the latest checkpoint. +func (s *InitStateTestSuite) TestBuildStateWait() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil).Once() + + s.historyAdapter.On("GetLatestLedgerSequence").Return(uint32(63), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: waitForCheckpointState{}, sleepDuration: 0}, + next, + ) +} + +// TestBuildStateCatchup is testing the case when: +// * the ingest system version has been incremented or no ingest ledger, +// * the old system is behind the latest checkpoint. +func (s *InitStateTestSuite) TestBuildStateCatchup() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil).Once() + + s.historyAdapter.On("GetLatestLedgerSequence").Return(uint32(127), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: historyRangeState{fromLedger: 101, toLedger: 127}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +// TestBuildStateOldHistory is testing the case when: +// * the ingest system version has been incremented or no ingest ledger, +// * the old system latest ledger is equal to the latest checkpoint. +func (s *InitStateTestSuite) TestBuildStateOldHistory() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(127), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(127), nil).Once() + + s.historyAdapter.On("GetLatestLedgerSequence").Return(uint32(127), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: buildState{checkpointLedger: 127}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +// TestResumeStateInFront is testing the case when: +// * state doesn't need to be rebuilt, +// * history is in front of ingest. +func (s *InitStateTestSuite) TestResumeStateInFront() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(130), nil).Once() + + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(0)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +// TestResumeStateBehind is testing the case when: +// * state doesn't need to be rebuilt, +// * history is behind of ingest. +func (s *InitStateTestSuite) TestResumeStateBehind() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(130), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: historyRangeState{fromLedger: 101, toLedger: 130}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +// TestResumeStateBehindHistory0 is testing the case when: +// * state doesn't need to be rebuilt or was just rebuilt, +// * there are no ledgers in history tables. +// In such case we load offers and continue ingesting the next ledger. +func (s *InitStateTestSuite) TestResumeStateBehindHistory0() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(130), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 130}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +// TestResumeStateBehind is testing the case when: +// * state doesn't need to be rebuilt, +// * history is in sync with ingest. +func (s *InitStateTestSuite) TestResumeStateSync() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(130), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(130), nil).Once() + + next, err := startState{}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 130}, + sleepDuration: defaultSleep, + }, + next, + ) +} diff --git a/services/horizon/internal/ingest/logger_change_reader_test.go b/services/horizon/internal/ingest/logger_change_reader_test.go new file mode 100644 index 0000000000..e13f1abae1 --- /dev/null +++ b/services/horizon/internal/ingest/logger_change_reader_test.go @@ -0,0 +1,89 @@ +package ingest + +import ( + "errors" + "testing" + + "io" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func allChanges(changeReader ingest.ChangeReader) ([]ingest.Change, error) { + all := []ingest.Change{} + for { + change, err := changeReader.Read() + if err != nil { + return all, err + } + all = append(all, change) + } +} + +func createMockReader(changes []ingest.Change, err error) *ingest.MockChangeReader { + mockChangeReader := &ingest.MockChangeReader{} + for _, change := range changes { + mockChangeReader.On("Read"). + Return(change, nil).Once() + } + mockChangeReader.On("Read"). + Return(ingest.Change{}, err).Once() + + return mockChangeReader +} + +func TestLoggingChangeReader(t *testing.T) { + for _, testCase := range []struct { + name string + changes []ingest.Change + err error + }{ + { + "empty list with error", + []ingest.Change{}, + errors.New("test error"), + }, + { + "empty list with no errors", + []ingest.Change{}, + io.EOF, + }, + { + "non empty list and error", + []ingest.Change{ + {Type: xdr.LedgerEntryTypeAccount}, + {Type: xdr.LedgerEntryTypeOffer}, + }, + errors.New("test error"), + }, + { + "non empty list with no errors", + []ingest.Change{ + {Type: xdr.LedgerEntryTypeOffer}, + {Type: xdr.LedgerEntryTypeAccount}, + }, + io.EOF, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + m := createMockReader(testCase.changes, testCase.err) + reader := newloggingChangeReader( + m, + "test", + 2, + 1, + false, + ) + + all, err := allChanges(reader) + assert.Equal(t, testCase.changes, all) + assert.Equal(t, testCase.err, err) + assert.Equal(t, len(testCase.changes), reader.entryCount) + assert.Equal(t, uint32(2), reader.sequence) + assert.Equal(t, 1, reader.frequency) + m.AssertExpectations(t) + }) + } +} diff --git a/services/horizon/internal/ingest/logging_change_reader.go b/services/horizon/internal/ingest/logging_change_reader.go new file mode 100644 index 0000000000..a91a269f27 --- /dev/null +++ b/services/horizon/internal/ingest/logging_change_reader.go @@ -0,0 +1,77 @@ +package ingest + +import ( + "fmt" + "runtime" + + "github.com/stellar/go/ingest" +) + +func bToMb(b uint64) uint64 { + return b / 1024 / 1024 +} + +func getMemStats() (uint64, uint64) { + var m runtime.MemStats + runtime.ReadMemStats(&m) + + return bToMb(m.HeapAlloc), bToMb(m.HeapSys) +} + +type loggingChangeReader struct { + ingest.ChangeReader + profile bool + entryCount int + // how often should the logger report + frequency int + source string + sequence uint32 +} + +func newloggingChangeReader( + reader ingest.ChangeReader, + source string, + sequence uint32, + every int, + profile bool, +) *loggingChangeReader { + return &loggingChangeReader{ + ChangeReader: reader, + frequency: every, + profile: profile, + source: source, + sequence: sequence, + } +} + +// Read returns a new ledger entry change on each call, returning io.EOF when the stream ends. +func (lcr *loggingChangeReader) Read() (ingest.Change, error) { + change, err := lcr.ChangeReader.Read() + + if err == nil { + lcr.entryCount++ + + if lcr.entryCount%lcr.frequency == 0 { + logger := log.WithField("processed_entries", lcr.entryCount). + WithField("source", lcr.source). + WithField("sequence", lcr.sequence) + + if reader, ok := lcr.ChangeReader.(*ingest.CheckpointChangeReader); ok { + logger = logger.WithField( + "progress", + fmt.Sprintf("%.02f%%", reader.Progress()), + ) + } + + if lcr.profile { + curHeap, sysHeap := getMemStats() + logger = logger. + WithField("currentHeapSizeMB", curHeap). + WithField("systemHeapSizeMB", sysHeap) + } + logger.Info("Processing ledger entry changes") + } + } + + return change, err +} diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go new file mode 100644 index 0000000000..53abbfa9e8 --- /dev/null +++ b/services/horizon/internal/ingest/main.go @@ -0,0 +1,712 @@ +// Package ingest contains the new ingestion system for horizon. +// It currently runs completely independent of the old one, that means +// that the new system can be ledgers behind/ahead the old system. +package ingest + +import ( + "context" + "fmt" + "net/http" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/clients/stellarcore" + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + logpkg "github.com/stellar/go/support/log" +) + +const ( + // MaxSupportedProtocolVersion defines the maximum supported version of + // the Stellar protocol. + MaxSupportedProtocolVersion uint32 = 18 + + // CurrentVersion reflects the latest version of the ingestion + // algorithm. This value is stored in KV store and is used to decide + // if there's a need to reprocess the ledger state or reingest data. + // + // Version history: + // - 1: Initial version + // - 2: Added the orderbook, offers processors and distributed ingestion. + // - 3: Fixed a bug that could potentialy result in invalid state + // (#1722). Update the version to clear the state. + // - 4: Fixed a bug in AccountSignersChanged method. + // - 5: Added trust lines. + // - 6: Added accounts and accounts data. + // - 7: Fixes a bug in AccountSignersChanged method. + // - 8: Fixes AccountSigners processor to remove preauth tx signer + // when preauth tx is failed. + // - 9: Fixes a bug in asset stats processor that counted unauthorized + // trustlines. + // - 10: Fixes a bug in meta processing (fees are now processed before + // everything else). + // - 11: Protocol 14: CAP-23 and CAP-33. + // - 12: Trigger state rebuild due to `absTime` -> `abs_time` rename + // in ClaimableBalances predicates. + // - 13: Trigger state rebuild to include more than just authorized assets. + // - 14: Trigger state rebuild to include claimable balances in the asset stats processor. + // - 15: Fixed bug in asset stat ingestion where clawback is enabled (#3846). + CurrentVersion = 15 + + // MaxDBConnections is the size of the postgres connection pool dedicated to Horizon ingestion: + // * Ledger ingestion, + // * State verifications, + // * Metrics updates. + MaxDBConnections = 3 + + defaultCoreCursorName = "HORIZON" + stateVerificationErrorThreshold = 3 +) + +var log = logpkg.DefaultLogger.WithField("service", "ingest") + +type Config struct { + CoreSession db.SessionInterface + StellarCoreURL string + StellarCoreCursor string + EnableCaptiveCore bool + CaptiveCoreBinaryPath string + CaptiveCoreStoragePath string + CaptiveCoreToml *ledgerbackend.CaptiveCoreToml + RemoteCaptiveCoreURL string + NetworkPassphrase string + + HistorySession db.SessionInterface + HistoryArchiveURL string + + DisableStateVerification bool + EnableExtendedLogLedgerStats bool + + MaxReingestRetries int + ReingestRetryBackoffSeconds int + + // The checkpoint frequency will be 64 unless you are using an exotic test setup. + CheckpointFrequency uint32 +} + +const ( + getLastIngestedErrMsg string = "Error getting last ingested ledger" + getIngestVersionErrMsg string = "Error getting ingestion version" + updateLastLedgerIngestErrMsg string = "Error updating last ingested ledger" + commitErrMsg string = "Error committing db transaction" + updateExpStateInvalidErrMsg string = "Error updating state invalid value" +) + +type stellarCoreClient interface { + SetCursor(ctx context.Context, id string, cursor int32) error +} + +type Metrics struct { + // MaxSupportedProtocolVersion exposes the maximum protocol version + // supported by this version. + MaxSupportedProtocolVersion prometheus.Gauge + + // LocalLedger exposes the last ingested ledger by this ingesting instance. + LocalLatestLedger prometheus.Gauge + + // LedgerIngestionDuration exposes timing metrics about the rate and + // duration of ledger ingestion (including updating DB and graph). + LedgerIngestionDuration prometheus.Summary + + // LedgerIngestionTradeAggregationDuration exposes timing metrics about the rate and + // duration of rebuilding trade aggregation buckets. + LedgerIngestionTradeAggregationDuration prometheus.Summary + + // StateVerifyDuration exposes timing metrics about the rate and + // duration of state verification. + StateVerifyDuration prometheus.Summary + + // StateInvalidGauge exposes state invalid metric. 1 if state is invalid, + // 0 otherwise. + StateInvalidGauge prometheus.GaugeFunc + + // StateVerifyLedgerEntriesCount exposes total number of ledger entries + // checked by the state verifier by type. + StateVerifyLedgerEntriesCount *prometheus.GaugeVec + + // LedgerStatsCounter exposes ledger stats counters (like number of ops/changes). + LedgerStatsCounter *prometheus.CounterVec + + // ProcessorsRunDuration exposes processors run durations. + // Deprecated in favour of: ProcessorsRunDurationSummary. + ProcessorsRunDuration *prometheus.CounterVec + + // ProcessorsRunDurationSummary exposes processors run durations. + ProcessorsRunDurationSummary *prometheus.SummaryVec + + // LedgerFetchDurationSummary exposes a summary of durations required to + // fetch data from ledger backend. + LedgerFetchDurationSummary prometheus.Summary + + // CaptiveStellarCoreSynced exposes synced status of Captive Stellar-Core. + // 1 if sync, 0 if not synced, -1 if unable to connect or HTTP server disabled. + CaptiveStellarCoreSynced prometheus.GaugeFunc + + // CaptiveCoreSupportedProtocolVersion exposes the maximum protocol version + // supported by the running Captive-Core. + CaptiveCoreSupportedProtocolVersion prometheus.GaugeFunc +} + +type System interface { + Run() + RegisterMetrics(*prometheus.Registry) + Metrics() Metrics + StressTest(numTransactions, changesPerTransaction int) error + VerifyRange(fromLedger, toLedger uint32, verifyState bool) error + ReingestRange(ledgerRanges []history.LedgerRange, force bool) error + BuildGenesisState() error + Shutdown() +} + +type system struct { + metrics Metrics + ctx context.Context + cancel context.CancelFunc + + config Config + + historyQ history.IngestionQ + runner ProcessorRunnerInterface + + ledgerBackend ledgerbackend.LedgerBackend + historyAdapter historyArchiveAdapterInterface + + stellarCoreClient stellarCoreClient + + maxReingestRetries int + reingestRetryBackoffSeconds int + wg sync.WaitGroup + + // stateVerificationRunning is true when verification routine is currently + // running. + stateVerificationMutex sync.Mutex + // number of consecutive state verification runs which encountered errors + stateVerificationErrors int + stateVerificationRunning bool + disableStateVerification bool + + checkpointManager historyarchive.CheckpointManager +} + +func NewSystem(config Config) (System, error) { + ctx, cancel := context.WithCancel(context.Background()) + + archive, err := historyarchive.Connect( + config.HistoryArchiveURL, + historyarchive.ConnectOptions{ + Context: ctx, + NetworkPassphrase: config.NetworkPassphrase, + CheckpointFrequency: config.CheckpointFrequency, + }, + ) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating history archive") + } + + var ledgerBackend ledgerbackend.LedgerBackend + if config.EnableCaptiveCore { + if len(config.RemoteCaptiveCoreURL) > 0 { + ledgerBackend, err = ledgerbackend.NewRemoteCaptive(config.RemoteCaptiveCoreURL) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating captive core backend") + } + } else { + logger := log.WithField("subservice", "stellar-core") + ledgerBackend, err = ledgerbackend.NewCaptive( + ledgerbackend.CaptiveCoreConfig{ + BinaryPath: config.CaptiveCoreBinaryPath, + StoragePath: config.CaptiveCoreStoragePath, + Toml: config.CaptiveCoreToml, + NetworkPassphrase: config.NetworkPassphrase, + HistoryArchiveURLs: []string{config.HistoryArchiveURL}, + CheckpointFrequency: config.CheckpointFrequency, + LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession), + Log: logger, + Context: ctx, + }, + ) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating captive core backend") + } + } + } else { + coreSession := config.CoreSession.Clone() + ledgerBackend, err = ledgerbackend.NewDatabaseBackendFromSession(coreSession, config.NetworkPassphrase) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating ledger backend") + } + } + + historyQ := &history.Q{config.HistorySession.Clone()} + + historyAdapter := newHistoryArchiveAdapter(archive) + + system := &system{ + cancel: cancel, + config: config, + ctx: ctx, + disableStateVerification: config.DisableStateVerification, + historyAdapter: historyAdapter, + historyQ: historyQ, + ledgerBackend: ledgerBackend, + maxReingestRetries: config.MaxReingestRetries, + reingestRetryBackoffSeconds: config.ReingestRetryBackoffSeconds, + stellarCoreClient: &stellarcore.Client{ + URL: config.StellarCoreURL, + }, + runner: &ProcessorRunner{ + ctx: ctx, + config: config, + historyQ: historyQ, + historyAdapter: historyAdapter, + }, + checkpointManager: historyarchive.NewCheckpointManager(config.CheckpointFrequency), + } + + system.initMetrics() + return system, nil +} + +func (s *system) initMetrics() { + s.metrics.MaxSupportedProtocolVersion = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "max_supported_protocol_version", + Help: "the maximum protocol version supported by this version.", + }) + + s.metrics.MaxSupportedProtocolVersion.Set(float64(MaxSupportedProtocolVersion)) + + s.metrics.LocalLatestLedger = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "local_latest_ledger", + Help: "sequence number of the latest ledger ingested by this ingesting instance", + }) + + s.metrics.LedgerIngestionDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "ledger_ingestion_duration_seconds", + Help: "ledger ingestion durations, sliding window = 10m", + }) + + s.metrics.LedgerIngestionTradeAggregationDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "ledger_ingestion_trade_aggregation_duration_seconds", + Help: "ledger ingestion trade aggregation rebuild durations, sliding window = 10m", + }) + + s.metrics.StateVerifyDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "state_verify_duration_seconds", + Help: "state verification durations, sliding window = 10m", + }) + + s.metrics.StateInvalidGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "state_invalid", + Help: "equals 1 if state invalid, 0 otherwise", + }, + func() float64 { + invalid, err := s.historyQ.CloneIngestionQ().GetExpStateInvalid(s.ctx) + if err != nil { + log.WithError(err).Error("Error in initMetrics/GetExpStateInvalid") + return 0 + } + invalidFloat := float64(0) + if invalid { + invalidFloat = 1 + } + return invalidFloat + }, + ) + + s.metrics.StateVerifyLedgerEntriesCount = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "state_verify_ledger_entries", + Help: "number of ledger entries downloaded from buckets in a single state verifier run", + }, + []string{"type"}, + ) + + s.metrics.LedgerStatsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "ledger_stats_total", + Help: "counters of different ledger stats", + }, + []string{"type"}, + ) + + s.metrics.ProcessorsRunDuration = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "processor_run_duration_seconds_total", + Help: "run durations of ingestion processors", + }, + []string{"name"}, + ) + + s.metrics.ProcessorsRunDurationSummary = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "processor_run_duration_seconds", + Help: "run durations of ingestion processors, sliding window = 10m", + }, + []string{"name"}, + ) + + s.metrics.LedgerFetchDurationSummary = prometheus.NewSummary( + prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "ledger_fetch_duration_seconds", + Help: "duration of fetching ledgers from ledger backend, sliding window = 10m", + }, + ) + + s.metrics.CaptiveStellarCoreSynced = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "captive_stellar_core_synced", + Help: "1 if sync, 0 if not synced, -1 if unable to connect or HTTP server disabled.", + }, + func() float64 { + if !s.config.EnableCaptiveCore || (s.config.CaptiveCoreToml.HTTPPort == 0) { + return -1 + } + + client := stellarcore.Client{ + HTTP: &http.Client{ + Timeout: 2 * time.Second, + }, + URL: fmt.Sprintf("http://localhost:%d", s.config.CaptiveCoreToml.HTTPPort), + } + + info, err := client.Info(s.ctx) + if err != nil { + log.WithError(err).Error("Cannot connect to Captive Stellar-Core HTTP server") + return -1 + } + + if info.IsSynced() { + return 1 + } else { + return 0 + } + }, + ) + + s.metrics.CaptiveCoreSupportedProtocolVersion = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "ingest", Name: "captive_stellar_core_supported_protocol_version", + Help: "determines the supported version of the protocol by Captive-Core", + }, + func() float64 { + if !s.config.EnableCaptiveCore || (s.config.CaptiveCoreToml.HTTPPort == 0) { + return -1 + } + + client := stellarcore.Client{ + HTTP: &http.Client{ + Timeout: 2 * time.Second, + }, + URL: fmt.Sprintf("http://localhost:%d", s.config.CaptiveCoreToml.HTTPPort), + } + + info, err := client.Info(s.ctx) + if err != nil { + log.WithError(err).Error("Cannot connect to Captive Stellar-Core HTTP server") + return -1 + } + + return float64(info.Info.ProtocolVersion) + }, + ) +} + +func (s *system) Metrics() Metrics { + return s.metrics +} + +// RegisterMetrics registers the prometheus metrics +func (s *system) RegisterMetrics(registry *prometheus.Registry) { + registry.MustRegister(s.metrics.MaxSupportedProtocolVersion) + registry.MustRegister(s.metrics.LocalLatestLedger) + registry.MustRegister(s.metrics.LedgerIngestionDuration) + registry.MustRegister(s.metrics.LedgerIngestionTradeAggregationDuration) + registry.MustRegister(s.metrics.StateVerifyDuration) + registry.MustRegister(s.metrics.StateInvalidGauge) + registry.MustRegister(s.metrics.LedgerStatsCounter) + registry.MustRegister(s.metrics.ProcessorsRunDuration) + registry.MustRegister(s.metrics.ProcessorsRunDurationSummary) + registry.MustRegister(s.metrics.CaptiveStellarCoreSynced) + registry.MustRegister(s.metrics.CaptiveCoreSupportedProtocolVersion) + registry.MustRegister(s.metrics.LedgerFetchDurationSummary) + registry.MustRegister(s.metrics.StateVerifyLedgerEntriesCount) +} + +// Run starts ingestion system. Ingestion system supports distributed ingestion +// that means that Horizon ingestion can be running on multiple machines and +// only one, random node will lead the ingestion. +// +// It needs to support cartesian product of the following run scenarios cases: +// - Init from empty state (1a) and resuming from existing state (1b). +// - Ingestion system version has been upgraded (2a) or not (2b). +// - Current node is leading ingestion (3a) or not (3b). +// +// We always clear state when ingestion system is upgraded so 2a and 2b are +// included in 1a. +// +// We ensure that only one instance is a leader because in each round instances +// try to acquire a lock on `LastLedgerIngest value in key value store and only +// one instance will be able to acquire it. This happens in both initial processing +// and ledger processing. So this solves 3a and 3b in both 1a and 1b. +// +// Finally, 1a and 1b are tricky because we need to keep the latest version +// of order book graph in memory of each Horizon instance. To solve this: +// * For state init: +// * If instance is a leader, we update the order book graph by running state +// pipeline normally. +// * If instance is NOT a leader, we build a graph from offers present in a +// database. We completely omit state pipeline in this case. +// * For resuming: +// * If instances is a leader, it runs full ledger pipeline, including updating +// a database. +// * If instances is a NOT leader, it runs ledger pipeline without updating a +// a database so order book graph is updated but database is not overwritten. +func (s *system) Run() { + s.runStateMachine(startState{}) +} + +func (s *system) StressTest(numTransactions, changesPerTransaction int) error { + if numTransactions <= 0 { + return errors.New("transactions must be positive") + } + if changesPerTransaction <= 0 { + return errors.New("changes per transaction must be positive") + } + + s.runner.EnableMemoryStatsLogging() + s.ledgerBackend = &fakeLedgerBackend{ + numTransactions: numTransactions, + changesPerTransaction: changesPerTransaction, + } + return s.runStateMachine(stressTestState{}) +} + +// VerifyRange runs the ingestion pipeline on the range of ledgers. When +// verifyState is true it verifies the state when ingestion is complete. +func (s *system) VerifyRange(fromLedger, toLedger uint32, verifyState bool) error { + return s.runStateMachine(verifyRangeState{ + fromLedger: fromLedger, + toLedger: toLedger, + verifyState: verifyState, + }) +} + +func validateRanges(ledgerRanges []history.LedgerRange) error { + for i, cur := range ledgerRanges { + if cur.StartSequence > cur.EndSequence { + return errors.Errorf("Invalid range: %v from > to", cur) + } + if cur.StartSequence == 0 { + return errors.Errorf("Invalid range: %v genesis ledger starts at 1", cur) + } + if i == 0 { + continue + } + prev := ledgerRanges[i-1] + if prev.EndSequence >= cur.StartSequence { + return errors.Errorf("ranges are not sorted prevRange %v curRange %v", prev, cur) + } + } + return nil +} + +// ReingestRange runs the ingestion pipeline on the range of ledgers ingesting +// history data only. +func (s *system) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error { + if err := validateRanges(ledgerRanges); err != nil { + return err + } + for _, cur := range ledgerRanges { + run := func() error { + return s.runStateMachine(reingestHistoryRangeState{ + fromLedger: cur.StartSequence, + toLedger: cur.EndSequence, + force: force, + }) + } + err := run() + for retry := 0; err != nil && retry < s.maxReingestRetries; retry++ { + log.Warnf("reingest range [%d, %d] failed (%s), retrying", cur.StartSequence, cur.EndSequence, err.Error()) + time.Sleep(time.Second * time.Duration(s.reingestRetryBackoffSeconds)) + err = run() + } + if err != nil { + return err + } + } + return nil +} + +// BuildGenesisState runs the ingestion pipeline on genesis ledger. Transitions +// to stopState when done. +func (s *system) BuildGenesisState() error { + return s.runStateMachine(buildState{ + checkpointLedger: 1, + stop: true, + }) +} + +func (s *system) runStateMachine(cur stateMachineNode) error { + s.wg.Add(1) + defer func() { + s.wg.Done() + s.wg.Wait() + }() + + log.WithFields(logpkg.F{"current_state": cur}).Info("Ingestion system initial state") + + for { + // Every node in the state machine is responsible for + // creating and disposing its own transaction. + // We should never enter a new state with the transaction + // from the previous state. + if s.historyQ.GetTx() != nil { + panic("unexpected transaction") + } + + next, err := cur.run(s) + if err != nil { + logger := log.WithFields(logpkg.F{ + "error": err, + "current_state": cur, + "next_state": next.node, + }) + if isCancelledError(err) { + // We only expect context.Canceled errors to occur when horizon is shutting down + // so we log these errors using the info log level + logger.Info("Error in ingestion state machine") + } else { + logger.Error("Error in ingestion state machine") + } + } + + // Exit after processing shutdownState + if next.node == (stopState{}) { + log.Info("Shut down") + return err + } + + select { + case <-s.ctx.Done(): + log.Info("Received shut down signal...") + return nil + case <-time.After(next.sleepDuration): + } + + log.WithFields(logpkg.F{ + "current_state": cur, + "next_state": next.node, + }).Info("Ingestion system state machine transition") + cur = next.node + } +} + +func (s *system) maybeVerifyState(lastIngestedLedger uint32) { + stateInvalid, err := s.historyQ.GetExpStateInvalid(s.ctx) + if err != nil && !isCancelledError(err) { + log.WithField("err", err).Error("Error getting state invalid value") + } + + // Run verification routine only when... + if !stateInvalid && // state has not been proved to be invalid... + !s.disableStateVerification && // state verification is not disabled... + s.checkpointManager.IsCheckpoint(lastIngestedLedger) { // it's a checkpoint ledger. + s.wg.Add(1) + go func() { + defer s.wg.Done() + + err := s.verifyState(true) + if err != nil { + if isCancelledError(err) { + return + } + + errorCount := s.incrementStateVerificationErrors() + switch errors.Cause(err).(type) { + case ingest.StateError: + markStateInvalid(s.ctx, s.historyQ, err) + default: + logger := log.WithField("err", err).Warn + if errorCount >= stateVerificationErrorThreshold { + logger = log.WithField("err", err).Error + } + logger("State verification errored") + } + } else { + s.resetStateVerificationErrors() + } + }() + } +} + +func (s *system) incrementStateVerificationErrors() int { + s.stateVerificationMutex.Lock() + defer s.stateVerificationMutex.Unlock() + s.stateVerificationErrors++ + return s.stateVerificationErrors +} + +func (s *system) resetStateVerificationErrors() { + s.stateVerificationMutex.Lock() + defer s.stateVerificationMutex.Unlock() + s.stateVerificationErrors = 0 +} + +func (s *system) updateCursor(ledgerSequence uint32) error { + if s.stellarCoreClient == nil || s.config.EnableCaptiveCore { + return nil + } + + cursor := defaultCoreCursorName + if s.config.StellarCoreCursor != "" { + cursor = s.config.StellarCoreCursor + } + + ctx, cancel := context.WithTimeout(s.ctx, time.Second) + defer cancel() + err := s.stellarCoreClient.SetCursor(ctx, cursor, int32(ledgerSequence)) + if err != nil { + return errors.Wrap(err, "Setting stellar-core cursor failed") + } + + return nil +} + +func (s *system) Shutdown() { + log.Info("Shutting down ingestion system...") + s.stateVerificationMutex.Lock() + if s.stateVerificationRunning { + log.Info("Shutting down state verifier...") + } + s.stateVerificationMutex.Unlock() + s.cancel() + // wait for ingestion state machine to terminate + s.wg.Wait() + if err := s.ledgerBackend.Close(); err != nil { + log.WithError(err).Info("could not close ledger backend") + } +} + +func markStateInvalid(ctx context.Context, historyQ history.IngestionQ, err error) { + log.WithField("err", err).Error("STATE IS INVALID!") + q := historyQ.CloneIngestionQ() + if err := q.UpdateExpStateInvalid(ctx, true); err != nil { + log.WithField("err", err).Error(updateExpStateInvalidErrMsg) + } +} + +func isCancelledError(err error) bool { + cause := errors.Cause(err) + return cause == context.Canceled || cause == db.ErrCancelled +} diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go new file mode 100644 index 0000000000..63c0f19f87 --- /dev/null +++ b/services/horizon/internal/ingest/main_test.go @@ -0,0 +1,513 @@ +package ingest + +import ( + "bytes" + "context" + "database/sql" + "testing" + + "github.com/jmoiron/sqlx" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/errors" + logpkg "github.com/stellar/go/support/log" + strtime "github.com/stellar/go/support/time" + "github.com/stellar/go/xdr" +) + +var ( + issuer = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + usdAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'u', 's', 'd', 0}, + Issuer: issuer, + }, + } + + nativeAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeNative, + } + + eurAsset = xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: [4]byte{'e', 'u', 'r', 0}, + Issuer: issuer, + }, + } + eurOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(4), + Buying: eurAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 1, + D: 1, + }, + Flags: 1, + Amount: xdr.Int64(500), + } + twoEurOffer = xdr.OfferEntry{ + SellerId: issuer, + OfferId: xdr.Int64(5), + Buying: eurAsset, + Selling: nativeAsset, + Price: xdr.Price{ + N: 2, + D: 1, + }, + Flags: 2, + Amount: xdr.Int64(500), + } +) + +func TestCheckVerifyStateVersion(t *testing.T) { + assert.Equal( + t, + CurrentVersion, + stateVerifierExpectedIngestionVersion, + "State verifier is outdated, update it, then update stateVerifierExpectedIngestionVersion value", + ) +} + +func TestNewSystem(t *testing.T) { + config := Config{ + CoreSession: &db.Session{DB: &sqlx.DB{}}, + HistorySession: &db.Session{DB: &sqlx.DB{}}, + DisableStateVerification: true, + HistoryArchiveURL: "https://history.stellar.org/prd/core-live/core_live_001", + CheckpointFrequency: 64, + } + + sIface, err := NewSystem(config) + assert.NoError(t, err) + system := sIface.(*system) + + assert.Equal(t, config, system.config) + assert.Equal(t, config.DisableStateVerification, system.disableStateVerification) + + assert.Equal(t, config, system.runner.(*ProcessorRunner).config) + assert.Equal(t, system.ctx, system.runner.(*ProcessorRunner).ctx) +} + +func TestStateMachineRunReturnsUnexpectedTransaction(t *testing.T) { + historyQ := &mockDBQ{} + system := &system{ + historyQ: historyQ, + ctx: context.Background(), + } + + historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + + assert.PanicsWithValue(t, "unexpected transaction", func() { + system.Run() + }) +} + +func TestStateMachineTransition(t *testing.T) { + historyQ := &mockDBQ{} + system := &system{ + historyQ: historyQ, + ctx: context.Background(), + } + + historyQ.On("GetTx").Return(nil).Once() + historyQ.On("Begin").Return(errors.New("my error")).Once() + historyQ.On("GetTx").Return(&sqlx.Tx{}).Once() + + assert.PanicsWithValue(t, "unexpected transaction", func() { + system.Run() + }) +} + +func TestContextCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + historyQ := &mockDBQ{} + system := &system{ + historyQ: historyQ, + ctx: ctx, + } + + historyQ.On("GetTx").Return(nil).Once() + historyQ.On("Begin").Return(errors.New("my error")).Once() + + cancel() + assert.NoError(t, system.runStateMachine(startState{})) +} + +// TestStateMachineRunReturnsErrorWhenNextStateIsShutdownWithError checks if the +// state that goes to shutdownState and returns an error will make `run` function +// return that error. This is essential because some commands rely on this to return +// non-zero exit code. +func TestStateMachineRunReturnsErrorWhenNextStateIsShutdownWithError(t *testing.T) { + historyQ := &mockDBQ{} + system := &system{ + ctx: context.Background(), + historyQ: historyQ, + } + + historyQ.On("GetTx").Return(nil).Once() + + err := system.runStateMachine(verifyRangeState{}) + assert.Error(t, err) + assert.EqualError(t, err, "invalid range: [0, 0]") +} + +func TestMaybeVerifyStateGetExpStateInvalidDBErrCancelOrContextCanceled(t *testing.T) { + historyQ := &mockDBQ{} + system := &system{ + historyQ: historyQ, + ctx: context.Background(), + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + var out bytes.Buffer + logger := logpkg.New() + logger.SetOutput(&out) + done := logger.StartTest(logpkg.InfoLevel) + + oldLogger := log + log = logger + defer func() { log = oldLogger }() + + historyQ.On("GetExpStateInvalid", system.ctx).Return(false, db.ErrCancelled).Once() + system.maybeVerifyState(0) + + historyQ.On("GetExpStateInvalid", system.ctx).Return(false, context.Canceled).Once() + system.maybeVerifyState(0) + + logged := done() + assert.Len(t, logged, 0) + historyQ.AssertExpectations(t) +} +func TestMaybeVerifyInternalDBErrCancelOrContextCanceled(t *testing.T) { + historyQ := &mockDBQ{} + system := &system{ + historyQ: historyQ, + ctx: context.Background(), + checkpointManager: historyarchive.NewCheckpointManager(64), + } + + var out bytes.Buffer + logger := logpkg.New() + logger.SetOutput(&out) + done := logger.StartTest(logpkg.InfoLevel) + + oldLogger := log + log = logger + defer func() { log = oldLogger }() + + historyQ.On("GetExpStateInvalid", system.ctx, mock.Anything).Return(false, nil).Twice() + historyQ.On("Rollback").Return(nil).Twice() + historyQ.On("CloneIngestionQ").Return(historyQ).Twice() + + historyQ.On("BeginTx", mock.Anything).Return(db.ErrCancelled).Once() + system.maybeVerifyState(63) + system.wg.Wait() + + historyQ.On("BeginTx", mock.Anything).Return(context.Canceled).Once() + system.maybeVerifyState(63) + system.wg.Wait() + + logged := done() + + // it logs "State verification finished" twice, but no errors + assert.Len(t, logged, 0) + + historyQ.AssertExpectations(t) +} + +type mockDBQ struct { + mock.Mock + + history.MockQAccounts + history.MockQClaimableBalances + history.MockQHistoryClaimableBalances + history.MockQLiquidityPools + history.MockQHistoryLiquidityPools + history.MockQAssetStats + history.MockQData + history.MockQEffects + history.MockQLedgers + history.MockQOffers + history.MockQOperations + history.MockQSigners + history.MockQTransactions + history.MockQTrustLines +} + +func (m *mockDBQ) Begin() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockDBQ) BeginTx(txOpts *sql.TxOptions) error { + args := m.Called(txOpts) + return args.Error(0) +} + +func (m *mockDBQ) CloneIngestionQ() history.IngestionQ { + args := m.Called() + return args.Get(0).(history.IngestionQ) +} + +func (m *mockDBQ) Commit() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockDBQ) Rollback() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockDBQ) GetTx() *sqlx.Tx { + args := m.Called() + if args.Get(0) == nil { + return nil + } + return args.Get(0).(*sqlx.Tx) +} + +func (m *mockDBQ) GetLastLedgerIngest(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) GetOfferCompactionSequence(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) GetLiquidityPoolCompactionSequence(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) GetLastLedgerIngestNonBlocking(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) GetIngestVersion(ctx context.Context) (int, error) { + args := m.Called(ctx) + return args.Get(0).(int), args.Error(1) +} + +func (m *mockDBQ) UpdateLastLedgerIngest(ctx context.Context, sequence uint32) error { + args := m.Called(ctx, sequence) + return args.Error(0) +} + +func (m *mockDBQ) UpdateExpStateInvalid(ctx context.Context, invalid bool) error { + args := m.Called(ctx, invalid) + return args.Error(0) +} + +func (m *mockDBQ) UpdateIngestVersion(ctx context.Context, version int) error { + args := m.Called(ctx, version) + return args.Error(0) +} + +func (m *mockDBQ) GetExpStateInvalid(ctx context.Context) (bool, error) { + args := m.Called(ctx) + return args.Get(0).(bool), args.Error(1) +} + +func (m *mockDBQ) StreamAllOffers(ctx context.Context, callback func(history.Offer) error) error { + a := m.Called(ctx, callback) + return a.Error(0) +} + +func (m *mockDBQ) GetLatestHistoryLedger(ctx context.Context) (uint32, error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) TruncateIngestStateTables(ctx context.Context) error { + args := m.Called(ctx) + return args.Error(0) +} + +func (m *mockDBQ) DeleteRangeAll(ctx context.Context, start, end int64) error { + args := m.Called(ctx, start, end) + return args.Error(0) +} + +// Methods from interfaces duplicating methods: + +func (m *mockDBQ) NewTransactionParticipantsBatchInsertBuilder(maxBatchSize int) history.TransactionParticipantsBatchInsertBuilder { + args := m.Called(maxBatchSize) + return args.Get(0).(history.TransactionParticipantsBatchInsertBuilder) +} + +func (m *mockDBQ) NewOperationParticipantBatchInsertBuilder(maxBatchSize int) history.OperationParticipantBatchInsertBuilder { + args := m.Called(maxBatchSize) + return args.Get(0).(history.TransactionParticipantsBatchInsertBuilder) +} + +func (m *mockDBQ) NewTradeBatchInsertBuilder(maxBatchSize int) history.TradeBatchInsertBuilder { + args := m.Called(maxBatchSize) + return args.Get(0).(history.TradeBatchInsertBuilder) +} + +func (m *mockDBQ) RebuildTradeAggregationTimes(ctx context.Context, from, to strtime.Millis) error { + args := m.Called(ctx, from, to) + return args.Error(0) +} + +func (m *mockDBQ) RebuildTradeAggregationBuckets(ctx context.Context, fromLedger, toLedger uint32) error { + args := m.Called(ctx, fromLedger, toLedger) + return args.Error(0) +} + +func (m *mockDBQ) CreateAssets(ctx context.Context, assets []xdr.Asset, batchSize int) (map[string]history.Asset, error) { + args := m.Called(ctx, assets) + return args.Get(0).(map[string]history.Asset), args.Error(1) +} + +type mockLedgerBackend struct { + mock.Mock +} + +func (m *mockLedgerBackend) GetLatestLedgerSequence(ctx context.Context) (sequence uint32, err error) { + args := m.Called(ctx) + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockLedgerBackend) GetLedger(ctx context.Context, sequence uint32) (xdr.LedgerCloseMeta, error) { + args := m.Called(ctx, sequence) + return args.Get(0).(xdr.LedgerCloseMeta), args.Error(1) +} + +func (m *mockLedgerBackend) PrepareRange(ctx context.Context, ledgerRange ledgerbackend.Range) error { + args := m.Called(ctx, ledgerRange) + return args.Error(0) +} + +func (m *mockLedgerBackend) IsPrepared(ctx context.Context, ledgerRange ledgerbackend.Range) (bool, error) { + args := m.Called(ctx, ledgerRange) + return args.Get(0).(bool), args.Error(1) +} + +func (m *mockLedgerBackend) Close() error { + args := m.Called() + return args.Error(0) +} + +type mockProcessorsRunner struct { + mock.Mock +} + +func (m *mockProcessorsRunner) SetHistoryAdapter(historyAdapter historyArchiveAdapterInterface) { + m.Called(historyAdapter) +} + +func (m *mockProcessorsRunner) EnableMemoryStatsLogging() { + m.Called() +} + +func (m *mockProcessorsRunner) DisableMemoryStatsLogging() { + m.Called() +} + +func (m *mockProcessorsRunner) RunGenesisStateIngestion() (ingest.StatsChangeProcessorResults, error) { + args := m.Called() + return args.Get(0).(ingest.StatsChangeProcessorResults), args.Error(1) +} + +func (m *mockProcessorsRunner) RunHistoryArchiveIngestion( + checkpointLedger uint32, + ledgerProtocolVersion uint32, + bucketListHash xdr.Hash, +) (ingest.StatsChangeProcessorResults, error) { + args := m.Called(checkpointLedger, ledgerProtocolVersion, bucketListHash) + return args.Get(0).(ingest.StatsChangeProcessorResults), args.Error(1) +} + +func (m *mockProcessorsRunner) RunAllProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + ledgerStats, + error, +) { + args := m.Called(ledger) + return args.Get(0).(ledgerStats), + args.Error(1) +} + +func (m *mockProcessorsRunner) RunTransactionProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + processors.StatsLedgerTransactionProcessorResults, + processorsRunDurations, + processors.TradeStats, + error, +) { + args := m.Called(ledger) + return args.Get(0).(processors.StatsLedgerTransactionProcessorResults), + args.Get(1).(processorsRunDurations), + args.Get(2).(processors.TradeStats), + args.Error(3) +} + +var _ ProcessorRunnerInterface = (*mockProcessorsRunner)(nil) + +type mockStellarCoreClient struct { + mock.Mock +} + +func (m *mockStellarCoreClient) SetCursor(ctx context.Context, id string, cursor int32) error { + args := m.Called(ctx, id, cursor) + return args.Error(0) +} + +var _ stellarCoreClient = (*mockStellarCoreClient)(nil) + +type mockSystem struct { + mock.Mock +} + +func (m *mockSystem) Run() { + m.Called() +} + +func (m *mockSystem) Metrics() Metrics { + args := m.Called() + return args.Get(0).(Metrics) +} + +func (m *mockSystem) RegisterMetrics(registry *prometheus.Registry) { + m.Called(registry) +} + +func (m *mockSystem) StressTest(numTransactions, changesPerTransaction int) error { + args := m.Called(numTransactions, changesPerTransaction) + return args.Error(0) +} + +func (m *mockSystem) VerifyRange(fromLedger, toLedger uint32, verifyState bool) error { + args := m.Called(fromLedger, toLedger, verifyState) + return args.Error(0) +} + +func (m *mockSystem) ReingestRange(ledgerRanges []history.LedgerRange, force bool) error { + args := m.Called(ledgerRanges, force) + return args.Error(0) +} + +func (m *mockSystem) BuildGenesisState() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockSystem) Shutdown() { + m.Called() +} + +var _ System = (*mockSystem)(nil) diff --git a/services/horizon/internal/ingest/mock_orderbook_graph.go b/services/horizon/internal/ingest/mock_orderbook_graph.go new file mode 100644 index 0000000000..da0df3e260 --- /dev/null +++ b/services/horizon/internal/ingest/mock_orderbook_graph.go @@ -0,0 +1,60 @@ +package ingest + +import ( + "github.com/stellar/go/exp/orderbook" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" +) + +var _ orderbook.OBGraph = (*mockOrderBookGraph)(nil) + +type mockOrderBookGraph struct { + mock.Mock +} + +func (m *mockOrderBookGraph) AddOffers(offer ...xdr.OfferEntry) { + m.Called(offer) +} + +func (m *mockOrderBookGraph) AddLiquidityPools(liquidityPool ...xdr.LiquidityPoolEntry) { + m.Called(liquidityPool) +} + +func (m *mockOrderBookGraph) Apply(ledger uint32) error { + args := m.Called(ledger) + return args.Error(0) + +} + +func (m *mockOrderBookGraph) Discard() { + m.Called() +} + +func (m *mockOrderBookGraph) Offers() []xdr.OfferEntry { + args := m.Called() + return args.Get(0).([]xdr.OfferEntry) +} + +func (m *mockOrderBookGraph) LiquidityPools() []xdr.LiquidityPoolEntry { + args := m.Called() + return args.Get(0).([]xdr.LiquidityPoolEntry) +} + +func (m *mockOrderBookGraph) RemoveOffer(id xdr.Int64) orderbook.OBGraph { + args := m.Called(id) + return args.Get(0).(orderbook.OBGraph) +} + +func (m *mockOrderBookGraph) RemoveLiquidityPool(pool xdr.LiquidityPoolEntry) orderbook.OBGraph { + args := m.Called(pool) + return args.Get(0).(orderbook.OBGraph) +} + +func (m *mockOrderBookGraph) Clear() { + m.Called() +} + +func (m *mockOrderBookGraph) Verify() ([]xdr.OfferEntry, []xdr.LiquidityPoolEntry, error) { + args := m.Called() + return args.Get(0).([]xdr.OfferEntry), args.Get(1).([]xdr.LiquidityPoolEntry), args.Error(2) +} diff --git a/services/horizon/internal/ingest/orderbook.go b/services/horizon/internal/ingest/orderbook.go new file mode 100644 index 0000000000..9577cf5f6d --- /dev/null +++ b/services/horizon/internal/ingest/orderbook.go @@ -0,0 +1,392 @@ +package ingest + +import ( + "context" + "database/sql" + "math/rand" + "sort" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/exp/orderbook" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const ( + verificationFrequency = time.Hour + updateFrequency = 2 * time.Second +) + +// OrderBookStream updates an in memory graph to be consistent with +// offers in the Horizon DB. Any offers which are created, modified, or removed +// from the Horizon DB during ingestion will be applied to the in memory order book +// graph. OrderBookStream assumes that no other component will update the +// in memory graph. However, it is safe for other go routines to use the +// in memory graph for read operations. +type OrderBookStream struct { + graph orderbook.OBGraph + historyQ history.IngestionQ + // LatestLedgerGauge exposes the local (order book graph) + // latest processed ledger + LatestLedgerGauge prometheus.Gauge + lastLedger uint32 + lastVerification time.Time + encodingBuffer *xdr.EncodingBuffer +} + +// NewOrderBookStream constructs and initializes an OrderBookStream instance +func NewOrderBookStream(historyQ history.IngestionQ, graph orderbook.OBGraph) *OrderBookStream { + return &OrderBookStream{ + graph: graph, + historyQ: historyQ, + LatestLedgerGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "order_book_stream", Name: "latest_ledger", + }), + lastVerification: time.Now(), + encodingBuffer: xdr.NewEncodingBuffer(), + } +} + +type ingestionStatus struct { + HistoryConsistentWithState bool + StateInvalid bool + LastIngestedLedger uint32 + LastOfferCompactionLedger uint32 + LastLiquidityPoolCompactionLedger uint32 +} + +func (o *OrderBookStream) getIngestionStatus(ctx context.Context) (ingestionStatus, error) { + var status ingestionStatus + var err error + + status.StateInvalid, err = o.historyQ.GetExpStateInvalid(ctx) + if err != nil { + return status, errors.Wrap(err, "Error from GetExpStateInvalid") + } + + var lastHistoryLedger uint32 + lastHistoryLedger, err = o.historyQ.GetLatestHistoryLedger(ctx) + if err != nil { + return status, errors.Wrap(err, "Error from GetLatestHistoryLedger") + } + status.LastIngestedLedger, err = o.historyQ.GetLastLedgerIngestNonBlocking(ctx) + if err != nil { + return status, errors.Wrap(err, "Error from GetLastLedgerIngestNonBlocking") + } + status.LastOfferCompactionLedger, err = o.historyQ.GetOfferCompactionSequence(ctx) + if err != nil { + return status, errors.Wrap(err, "Error from GetOfferCompactionSequence") + } + status.LastLiquidityPoolCompactionLedger, err = o.historyQ.GetLiquidityPoolCompactionSequence(ctx) + if err != nil { + return status, errors.Wrap(err, "Error from GetLiquidityPoolCompactionSequence") + } + + status.HistoryConsistentWithState = (status.LastIngestedLedger == lastHistoryLedger) || + // Running ingestion on an empty DB is a special case because we first ingest from the history archive. + // Then, on the next iteration, we ingest TX Meta from Stellar Core. So there is a brief + // period where there will not be any rows in the history_ledgers table but that is ok. + (lastHistoryLedger == 0) + return status, nil +} + +// update returns true if the order book graph was reset +func (o *OrderBookStream) update(ctx context.Context, status ingestionStatus) (bool, error) { + reset := o.lastLedger == 0 + if status.StateInvalid { + log.WithField("status", status).Warn("ingestion state is invalid") + reset = true + } else if !status.HistoryConsistentWithState { + log.WithField("status", status). + Info("waiting for ingestion system catchup") + reset = true + } else if status.LastIngestedLedger < o.lastLedger { + log.WithField("status", status). + WithField("last_ledger", o.lastLedger). + Warn("ingestion is behind order book last ledger") + reset = true + } else if o.lastLedger > 0 && o.lastLedger < status.LastOfferCompactionLedger { + log.WithField("status", status). + WithField("last_ledger", o.lastLedger). + Warn("order book is behind the last offer compaction ledger") + reset = true + } else if status.LastOfferCompactionLedger != status.LastLiquidityPoolCompactionLedger { + log.WithField("status", status). + WithField("last_ledger", o.lastLedger). + Warn("offer compaction is not consistentwith liquidity pool compaction") + reset = true + } + + if reset { + o.graph.Clear() + o.lastLedger = 0 + + // wait until offers in horizon db is valid before populating order book graph + if status.StateInvalid || !status.HistoryConsistentWithState { + return true, nil + } + + defer o.graph.Discard() + + err := o.historyQ.StreamAllOffers(ctx, func(offer history.Offer) error { + o.graph.AddOffers(offerToXDR(offer)) + return nil + }) + + if err != nil { + return true, errors.Wrap(err, "Error loading offers into orderbook") + } + + err = o.historyQ.StreamAllLiquidityPools(ctx, func(liquidityPool history.LiquidityPool) error { + if liquidityPoolXDR, liquidityPoolErr := liquidityPoolToXDR(liquidityPool); liquidityPoolErr != nil { + return errors.Wrapf(liquidityPoolErr, "Invalid liquidity pool row %v, unable to marshal to xdr", liquidityPool) + } else { + o.graph.AddLiquidityPools(liquidityPoolXDR) + return nil + } + }) + + if err != nil { + return true, errors.Wrap(err, "Error loading liquidity pools into orderbook") + } + + if err := o.graph.Apply(status.LastIngestedLedger); err != nil { + return true, errors.Wrap(err, "Error applying changes to order book") + } + + o.lastLedger = status.LastIngestedLedger + o.LatestLedgerGauge.Set(float64(status.LastIngestedLedger)) + return true, nil + } + + if status.LastIngestedLedger == o.lastLedger { + return false, nil + } + + defer o.graph.Discard() + + offers, err := o.historyQ.GetUpdatedOffers(ctx, o.lastLedger) + if err != nil { + return false, errors.Wrap(err, "Error from GetUpdatedOffers") + } + liquidityPools, err := o.historyQ.GetUpdatedLiquidityPools(ctx, o.lastLedger) + if err != nil { + return false, errors.Wrap(err, "Error from GetUpdatedLiquidityPools") + } + + for _, offer := range offers { + if offer.Deleted { + o.graph.RemoveOffer(xdr.Int64(offer.OfferID)) + } else { + o.graph.AddOffers(offerToXDR(offer)) + } + } + + for _, liquidityPool := range liquidityPools { + var poolXDR xdr.LiquidityPoolEntry + poolXDR, err = liquidityPoolToXDR(liquidityPool) + if err != nil { + return false, errors.Wrap(err, "Error converting liquidity pool row to xdr") + } + if liquidityPool.Deleted { + o.graph.RemoveLiquidityPool(poolXDR) + } else { + o.graph.AddLiquidityPools(poolXDR) + } + } + + if err = o.graph.Apply(status.LastIngestedLedger); err != nil { + return false, errors.Wrap(err, "Error applying changes to order book") + } + + o.lastLedger = status.LastIngestedLedger + o.LatestLedgerGauge.Set(float64(status.LastIngestedLedger)) + return false, nil +} + +func (o *OrderBookStream) verifyAllOffers(ctx context.Context, offers []xdr.OfferEntry) (bool, error) { + var ingestionOffers []history.Offer + err := o.historyQ.StreamAllOffers(ctx, func(offer history.Offer) error { + ingestionOffers = append(ingestionOffers, offer) + return nil + }) + + if err != nil { + return false, errors.Wrap(err, "Error loading all offers for orderbook verification") + } + + mismatch := len(offers) != len(ingestionOffers) + + if !mismatch { + sort.Slice(offers, func(i, j int) bool { + return offers[i].OfferId < offers[j].OfferId + }) + sort.Slice(ingestionOffers, func(i, j int) bool { + return ingestionOffers[i].OfferID < ingestionOffers[j].OfferID + }) + + for i, offerRow := range ingestionOffers { + offerEntry := offers[i] + offerRowXDR := offerToXDR(offerRow) + offerEntryBase64, err := o.encodingBuffer.MarshalBase64(&offerEntry) + if err != nil { + return false, errors.Wrap(err, "Error from marshalling offerEntry") + } + offerRowBase64, err := o.encodingBuffer.MarshalBase64(&offerRowXDR) + if err != nil { + return false, errors.Wrap(err, "Error from marshalling offerRowXDR") + } + if offerEntryBase64 != offerRowBase64 { + mismatch = true + break + } + } + } + + if mismatch { + log.WithField("stream_offers", offers). + WithField("ingestion_offers", ingestionOffers). + Error("offers derived from order book stream does not match offers from ingestion") + return false, nil + } + log.Info("offer stream verification succeeded") + return true, nil +} + +func (o *OrderBookStream) verifyAllLiquidityPools(ctx context.Context, liquidityPools []xdr.LiquidityPoolEntry) (bool, error) { + var ingestionLiquidityPools []history.LiquidityPool + + err := o.historyQ.StreamAllLiquidityPools(ctx, func(liquidityPool history.LiquidityPool) error { + ingestionLiquidityPools = append(ingestionLiquidityPools, liquidityPool) + return nil + }) + + if err != nil { + return false, errors.Wrap(err, "Error loading all liquidity pools for orderbook verification") + } + + mismatch := len(liquidityPools) != len(ingestionLiquidityPools) + + if !mismatch { + sort.Slice(liquidityPools, func(i, j int) bool { + return processors.PoolIDToString(liquidityPools[i].LiquidityPoolId) < + processors.PoolIDToString(liquidityPools[j].LiquidityPoolId) + }) + sort.Slice(ingestionLiquidityPools, func(i, j int) bool { + return ingestionLiquidityPools[i].PoolID < ingestionLiquidityPools[j].PoolID + }) + + for i, liquidityPoolRow := range ingestionLiquidityPools { + liquidityPoolEntry := liquidityPools[i] + liquidityPoolRowXDR, err := liquidityPoolToXDR(liquidityPoolRow) + if err != nil { + return false, errors.Wrap(err, "Error from converting liquidity pool row to xdr") + } + liquidityPoolEntryBase64, err := o.encodingBuffer.MarshalBase64(&liquidityPoolEntry) + if err != nil { + return false, errors.Wrap(err, "Error from marshalling liquidityPoolEntry") + } + liquidityPoolRowBase64, err := o.encodingBuffer.MarshalBase64(&liquidityPoolRowXDR) + if err != nil { + return false, errors.Wrap(err, "Error from marshalling liquidityPoolRowXDR") + } + if liquidityPoolEntryBase64 != liquidityPoolRowBase64 { + mismatch = true + break + } + } + } + + if mismatch { + log.WithField("stream_liquidity_pools", liquidityPools). + WithField("ingestion_liquidity_pools", ingestionLiquidityPools). + Error("liquidity pools derived from order book stream does not match liquidity pool from ingestion") + return false, nil + } + log.Info("liquidity pool stream verification succeeded") + return true, nil +} + +// Update will query the Horizon DB for offers which have been created, removed, or updated since the +// last time Update() was called. Those changes will then be applied to the in memory order book graph. +// After calling this function, the the in memory order book graph should be consistent with the +// Horizon DB (assuming no error is returned). +func (o *OrderBookStream) Update(ctx context.Context) error { + if err := o.historyQ.BeginTx(&sql.TxOptions{ReadOnly: true, Isolation: sql.LevelRepeatableRead}); err != nil { + return errors.Wrap(err, "Error starting repeatable read transaction") + } + defer o.historyQ.Rollback() + + status, err := o.getIngestionStatus(ctx) + if err != nil { + return errors.Wrap(err, "Error obtaining ingestion status") + } + + if reset, err := o.update(ctx, status); err != nil { + return errors.Wrap(err, "Error updating") + } else if reset { + return nil + } + + // add 15 minute jitter so that not all horizon nodes are calling + // historyQ.StreamAllOffers at the same time + jitter := time.Duration(rand.Int63n(int64(15 * time.Minute))) + requiresVerification := o.lastLedger > 0 && + time.Since(o.lastVerification) >= verificationFrequency+jitter + + if requiresVerification { + offers, pools, err := o.graph.Verify() + if err != nil { + log.WithError(err). + Error("Orderbook graph is not internally consistent") + o.lastVerification = time.Now() + // set last ledger to 0 so that we reset on next update + o.lastLedger = 0 + return nil + } + + offersOk, err := o.verifyAllOffers(ctx, offers) + if err != nil { + if !isCancelledError(err) { + log.WithError(err).Info("Could not verify offers") + return nil + } + } + + liquidityPoolsOK, err := o.verifyAllLiquidityPools(ctx, pools) + if err != nil { + if !isCancelledError(err) { + log.WithError(err).Info("Could not verify liquidity pools") + return nil + } + } + o.lastVerification = time.Now() + if !offersOk || !liquidityPoolsOK { + // set last ledger to 0 so that we reset on next update + o.lastLedger = 0 + } + } + return nil +} + +// Run will call Update() every 30 seconds until the given context is terminated. +func (o *OrderBookStream) Run(ctx context.Context) { + ticker := time.NewTicker(updateFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := o.Update(ctx); err != nil && !isCancelledError(err) { + log.WithError(err).Error("could not apply updates from order book stream") + } + case <-ctx.Done(): + log.Info("shutting down OrderBookStream") + return + } + } +} diff --git a/services/horizon/internal/ingest/orderbook_test.go b/services/horizon/internal/ingest/orderbook_test.go new file mode 100644 index 0000000000..870a3490ab --- /dev/null +++ b/services/horizon/internal/ingest/orderbook_test.go @@ -0,0 +1,1019 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "fmt" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +type IngestionStatusTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + stream *OrderBookStream +} + +func TestIngestionStatus(t *testing.T) { + suite.Run(t, new(IngestionStatusTestSuite)) +} + +func (t *IngestionStatusTestSuite) SetupTest() { + t.ctx = context.Background() + t.historyQ = &mockDBQ{} + t.stream = NewOrderBookStream(t.historyQ, &mockOrderBookGraph{}) +} + +func (t *IngestionStatusTestSuite) TearDownTest() { + t.historyQ.AssertExpectations(t.T()) +} + +func (t *IngestionStatusTestSuite) TestGetExpStateInvalidError() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, fmt.Errorf("state invalid error")). + Once() + _, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().EqualError(err, "Error from GetExpStateInvalid: state invalid error") +} + +func (t *IngestionStatusTestSuite) TestGetLatestLedgerError() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(0), fmt.Errorf("latest ledger error")). + Once() + _, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().EqualError(err, "Error from GetLatestHistoryLedger: latest ledger error") +} + +func (t *IngestionStatusTestSuite) TestGetLastLedgerIngestNonBlockingError() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(0), fmt.Errorf("ingest error")). + Once() + + _, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().EqualError(err, "Error from GetLastLedgerIngestNonBlocking: ingest error") +} + +func (t *IngestionStatusTestSuite) TestGetOfferCompactionSequenceError() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetOfferCompactionSequence", t.ctx). + Return(uint32(0), fmt.Errorf("compaction error")). + Once() + + _, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().EqualError(err, "Error from GetOfferCompactionSequence: compaction error") +} + +func (t *IngestionStatusTestSuite) TestLiquidityPoolCompactionSequenceError() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetOfferCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + t.historyQ.On("GetLiquidityPoolCompactionSequence", t.ctx). + Return(uint32(0), fmt.Errorf("compaction error")). + Once() + + _, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().EqualError(err, "Error from GetLiquidityPoolCompactionSequence: compaction error") +} + +func (t *IngestionStatusTestSuite) TestStateInvalid() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(true, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetOfferCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + t.historyQ.On("GetLiquidityPoolCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + status, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().NoError(err) + t.Assert().Equal(ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: true, + LastIngestedLedger: 200, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + }, status) +} + +func (t *IngestionStatusTestSuite) TestHistoryInconsistentWithState() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(true, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(200), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(201), nil). + Once() + + t.historyQ.On("GetOfferCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + t.historyQ.On("GetLiquidityPoolCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + status, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().NoError(err) + t.Assert().Equal(ingestionStatus{ + HistoryConsistentWithState: false, + StateInvalid: true, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + }, status) +} + +func (t *IngestionStatusTestSuite) TestHistoryLatestLedgerZero() { + t.historyQ.On("GetExpStateInvalid", t.ctx). + Return(false, nil). + Once() + + t.historyQ.On("GetLatestHistoryLedger", t.ctx). + Return(uint32(0), nil). + Once() + + t.historyQ.On("GetLastLedgerIngestNonBlocking", t.ctx). + Return(uint32(201), nil). + Once() + + t.historyQ.On("GetOfferCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + t.historyQ.On("GetLiquidityPoolCompactionSequence", t.ctx). + Return(uint32(100), nil). + Once() + + status, err := t.stream.getIngestionStatus(t.ctx) + t.Assert().NoError(err) + t.Assert().Equal(ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + }, status) +} + +type UpdateOrderBookStreamTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + graph *mockOrderBookGraph + stream *OrderBookStream +} + +func TestUpdateOrderBookStream(t *testing.T) { + suite.Run(t, new(UpdateOrderBookStreamTestSuite)) +} + +func (t *UpdateOrderBookStreamTestSuite) SetupTest() { + t.ctx = context.Background() + t.historyQ = &mockDBQ{} + t.graph = &mockOrderBookGraph{} + t.stream = NewOrderBookStream(t.historyQ, t.graph) +} + +func (t *UpdateOrderBookStreamTestSuite) TearDownTest() { + t.historyQ.AssertExpectations(t.T()) + t.graph.AssertExpectations(t.T()) +} + +func (t *UpdateOrderBookStreamTestSuite) TestStreamAllOffersError() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Clear").Return().Once() + t.graph.On("Discard").Return().Once() + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(fmt.Errorf("offers error")). + Once() + + t.stream.lastLedger = 300 + _, err := t.stream.update(t.ctx, status) + t.Assert().EqualError(err, "Error loading offers into orderbook: offers error") + t.Assert().Equal(uint32(0), t.stream.lastLedger) +} + +func (t *UpdateOrderBookStreamTestSuite) TestResetApplyError() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Clear").Return().Once() + t.graph.On("Discard").Return().Once() + + sellerID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + offer := history.Offer{OfferID: 1, SellerID: sellerID} + offerEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 1, + }} + otherOffer := history.Offer{OfferID: 20, SellerID: sellerID} + otherOfferEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 20, + }} + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.Offer) error) + callback(offer) + callback(otherOffer) + }). + Once() + + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Once() + + t.graph.On("AddOffers", offerEntry).Return().Once() + t.graph.On("AddOffers", otherOfferEntry).Return().Once() + + t.graph.On("Apply", status.LastIngestedLedger). + Return(fmt.Errorf("apply error")). + Once() + + t.stream.lastLedger = 300 + _, err := t.stream.update(t.ctx, status) + t.Assert().EqualError(err, "Error applying changes to order book: apply error") + t.Assert().Equal(uint32(0), t.stream.lastLedger) +} + +func (t *UpdateOrderBookStreamTestSuite) mockReset(status ingestionStatus) { + t.graph.On("Clear").Return().Once() + t.graph.On("Discard").Return().Once() + + sellerID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + offer := history.Offer{OfferID: 1, SellerID: sellerID} + offerEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 1, + }} + otherOffer := history.Offer{OfferID: 20, SellerID: sellerID} + otherOfferEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 20, + }} + offers := []history.Offer{offer, otherOffer} + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.Offer) error) + for idx := range offers { + callback(offers[idx]) + } + }). + Once() + + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Once() + + t.graph.On("AddOffers", offerEntry).Return().Once() + t.graph.On("AddOffers", otherOfferEntry).Return().Once() + + t.graph.On("Apply", status.LastIngestedLedger). + Return(nil). + Once() +} + +func (t *UpdateOrderBookStreamTestSuite) TestFirstUpdateSucceeds() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.mockReset(status) + + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(201), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestInvalidState() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: true, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Clear").Return().Once() + + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(0), t.stream.lastLedger) + t.Assert().True(reset) + + t.stream.lastLedger = 123 + + t.graph.On("Clear").Return().Once() + + reset, err = t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(0), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestHistoryInconsistentWithState() { + status := ingestionStatus{ + HistoryConsistentWithState: false, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Clear").Return().Once() + + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(0), t.stream.lastLedger) + t.Assert().True(reset) + + t.stream.lastLedger = 123 + + t.graph.On("Clear").Return().Once() + + reset, err = t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(0), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestOfferCompactionDoesNotMatchLiquidityPoolCompaction() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 110, + } + t.mockReset(status) + + t.stream.lastLedger = 201 + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(201), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestLastIngestedLedgerBehindStream() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.mockReset(status) + + t.stream.lastLedger = 300 + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(201), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestStreamBehindLastCompactionLedger() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.mockReset(status) + + t.stream.lastLedger = 99 + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(201), t.stream.lastLedger) + t.Assert().True(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestStreamLedgerEqualsLastIngestedLedger() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + + t.stream.lastLedger = 201 + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(uint32(201), t.stream.lastLedger) + t.Assert().False(reset) +} + +func (t *UpdateOrderBookStreamTestSuite) TestGetUpdatedOffersError() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Discard").Return().Once() + + t.stream.lastLedger = 100 + t.historyQ.MockQOffers.On("GetUpdatedOffers", t.ctx, uint32(100)). + Return([]history.Offer{}, fmt.Errorf("updated offers error")). + Once() + + _, err := t.stream.update(t.ctx, status) + t.Assert().EqualError(err, "Error from GetUpdatedOffers: updated offers error") + t.Assert().Equal(uint32(100), t.stream.lastLedger) +} + +func (t *UpdateOrderBookStreamTestSuite) TestGetUpdatedLiquidityPoolsError() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + t.graph.On("Discard").Return().Once() + + t.stream.lastLedger = 100 + t.historyQ.MockQOffers.On("GetUpdatedOffers", t.ctx, uint32(100)). + Return([]history.Offer{}, nil). + Once() + + t.historyQ.MockQLiquidityPools.On("GetUpdatedLiquidityPools", t.ctx, t.stream.lastLedger). + Return([]history.LiquidityPool{}, fmt.Errorf("updated liquidity pools error")). + Once() + + _, err := t.stream.update(t.ctx, status) + t.Assert().EqualError(err, "Error from GetUpdatedLiquidityPools: updated liquidity pools error") + t.Assert().Equal(uint32(100), t.stream.lastLedger) +} + +func (t *UpdateOrderBookStreamTestSuite) mockUpdate() { + t.stream.lastLedger = 100 + + t.graph.On("Discard").Return().Once() + sellerID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + offer := history.Offer{OfferID: 1, SellerID: sellerID, LastModifiedLedger: 101} + offerEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 1, + }} + otherOffer := history.Offer{OfferID: 20, SellerID: sellerID, LastModifiedLedger: 102} + otherOfferEntry := []xdr.OfferEntry{{ + SellerId: xdr.MustAddress(sellerID), + OfferId: 20, + }} + deletedOffer := history.Offer{OfferID: 30, SellerID: sellerID, LastModifiedLedger: 103, Deleted: true} + offers := []history.Offer{offer, otherOffer, deletedOffer} + t.historyQ.MockQOffers.On("GetUpdatedOffers", t.ctx, t.stream.lastLedger). + Return(offers, nil). + Once() + + t.historyQ.MockQLiquidityPools.On("GetUpdatedLiquidityPools", t.ctx, t.stream.lastLedger). + Return([]history.LiquidityPool{}, nil). + Once() + + t.graph.On("AddOffers", offerEntry).Return().Once() + t.graph.On("AddOffers", otherOfferEntry).Return().Once() + t.graph.On("RemoveOffer", xdr.Int64(deletedOffer.OfferID)).Return(t.graph).Once() +} + +func (t *UpdateOrderBookStreamTestSuite) TestApplyUpdatesError() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + + t.mockUpdate() + + t.graph.On("Apply", status.LastIngestedLedger). + Return(fmt.Errorf("apply error")). + Once() + + _, err := t.stream.update(t.ctx, status) + t.Assert().EqualError(err, "Error applying changes to order book: apply error") + t.Assert().Equal(uint32(100), t.stream.lastLedger) +} + +func (t *UpdateOrderBookStreamTestSuite) TestApplyUpdatesSucceeds() { + status := ingestionStatus{ + HistoryConsistentWithState: true, + StateInvalid: false, + LastIngestedLedger: 201, + LastOfferCompactionLedger: 100, + LastLiquidityPoolCompactionLedger: 100, + } + + t.mockUpdate() + + t.graph.On("Apply", status.LastIngestedLedger). + Return(nil). + Once() + + reset, err := t.stream.update(t.ctx, status) + t.Assert().NoError(err) + t.Assert().Equal(status.LastIngestedLedger, t.stream.lastLedger) + t.Assert().False(reset) +} + +type VerifyOffersStreamTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + graph *mockOrderBookGraph + stream *OrderBookStream +} + +func TestVerifyOffersStreamTestSuite(t *testing.T) { + suite.Run(t, new(VerifyOffersStreamTestSuite)) +} + +func (t *VerifyOffersStreamTestSuite) SetupTest() { + t.ctx = context.Background() + t.historyQ = &mockDBQ{} + t.graph = &mockOrderBookGraph{} + t.stream = NewOrderBookStream(t.historyQ, t.graph) + + sellerID := "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML" + otherSellerID := "GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK" + t.graph.On("Offers").Return([]xdr.OfferEntry{ + { + SellerId: xdr.MustAddress(sellerID), + OfferId: 1, + Selling: xdr.MustNewNativeAsset(), + Buying: xdr.MustNewCreditAsset("USD", sellerID), + Amount: 123, + Price: xdr.Price{ + N: 1, + D: 2, + }, + Flags: 1, + Ext: xdr.OfferEntryExt{}, + }, + { + SellerId: xdr.MustAddress(otherSellerID), + OfferId: 3, + Selling: xdr.MustNewCreditAsset("EUR", sellerID), + Buying: xdr.MustNewCreditAsset("CHF", sellerID), + Amount: 9, + Price: xdr.Price{ + N: 3, + D: 1, + }, + Flags: 0, + Ext: xdr.OfferEntryExt{}, + }, + }).Once() +} + +func (t *VerifyOffersStreamTestSuite) TearDownTest() { + t.historyQ.AssertExpectations(t.T()) + t.graph.AssertExpectations(t.T()) +} + +func (t *VerifyOffersStreamTestSuite) TestStreamAllOffersError() { + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(fmt.Errorf("offers error")). + Once() + + offersOk, err := t.stream.verifyAllOffers(t.ctx, t.graph.Offers()) + t.Assert().EqualError(err, "Error loading all offers for orderbook verification: offers error") + t.Assert().False(offersOk) +} + +func (t *VerifyOffersStreamTestSuite) TestEmptyDBOffers() { + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything).Return(nil).Once() + + offersOk, err := t.stream.verifyAllOffers(t.ctx, t.graph.Offers()) + t.Assert().NoError(err) + t.Assert().False(offersOk) +} + +func (t *VerifyOffersStreamTestSuite) TestLengthMismatch() { + offers := []history.Offer{ + { + OfferID: 1, + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + SellingAsset: xdr.MustNewNativeAsset(), + BuyingAsset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 123, + Pricen: 1, + Priced: 2, + Price: 0.5, + Flags: 1, + Deleted: false, + LastModifiedLedger: 1, + }, + } + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.Offer) error) + for idx := range offers { + callback(offers[idx]) + } + }). + Once() + + offersOk, err := t.stream.verifyAllOffers(t.ctx, t.graph.Offers()) + t.Assert().NoError(err) + t.Assert().False(offersOk) +} + +func (t *VerifyOffersStreamTestSuite) TestContentMismatch() { + offers := []history.Offer{ + { + OfferID: 1, + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + SellingAsset: xdr.MustNewNativeAsset(), + BuyingAsset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 123, + Pricen: 1, + Priced: 2, + Price: 0.5, + Flags: 1, + Deleted: false, + LastModifiedLedger: 1, + }, + { + OfferID: 3, + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + SellingAsset: xdr.MustNewNativeAsset(), + BuyingAsset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 123, + Pricen: 1, + Priced: 2, + Price: 0.5, + Flags: 1, + Deleted: false, + LastModifiedLedger: 1, + }, + } + + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.Offer) error) + for idx := range offers { + callback(offers[idx]) + } + }). + Once() + + t.stream.lastLedger = 300 + offersOk, err := t.stream.verifyAllOffers(t.ctx, t.graph.Offers()) + t.Assert().NoError(err) + t.Assert().False(offersOk) +} + +func (t *VerifyOffersStreamTestSuite) TestSuccess() { + offers := []history.Offer{ + { + OfferID: 1, + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + SellingAsset: xdr.MustNewNativeAsset(), + BuyingAsset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 123, + Pricen: 1, + Priced: 2, + Price: 0.5, + Flags: 1, + Deleted: false, + LastModifiedLedger: 1, + }, + { + OfferID: 3, + SellerID: "GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK", + SellingAsset: xdr.MustNewCreditAsset("EUR", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + BuyingAsset: xdr.MustNewCreditAsset("CHF", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 9, + Pricen: 3, + Priced: 1, + Price: 3, + Flags: 0, + Deleted: false, + LastModifiedLedger: 1, + }, + } + t.historyQ.On("StreamAllOffers", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.Offer) error) + for idx := range offers { + callback(offers[idx]) + } + }). + Once() + + offersOk, err := t.stream.verifyAllOffers(t.ctx, t.graph.Offers()) + t.Assert().NoError(err) + t.Assert().True(offersOk) +} + +type VerifyLiquidityPoolsStreamTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + graph *mockOrderBookGraph + stream *OrderBookStream +} + +func TestVerifyLiquidityPoolsStreamTestSuite(t *testing.T) { + suite.Run(t, new(VerifyLiquidityPoolsStreamTestSuite)) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) SetupTest() { + t.ctx = context.Background() + t.historyQ = &mockDBQ{} + t.graph = &mockOrderBookGraph{} + t.stream = NewOrderBookStream(t.historyQ, t.graph) + + t.graph.On("LiquidityPools").Return([]xdr.LiquidityPoolEntry{ + { + LiquidityPoolId: xdr.PoolId{1, 2, 3}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewNativeAsset(), + AssetB: xdr.MustNewCreditAsset("USD", issuer.Address()), + Fee: xdr.LiquidityPoolFeeV18, + }, + ReserveA: 789, + ReserveB: 456, + TotalPoolShares: 11, + PoolSharesTrustLineCount: 13, + }, + }, + }, + { + LiquidityPoolId: xdr.PoolId{4, 5, 6}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewNativeAsset(), + AssetB: xdr.MustNewCreditAsset("EUR", issuer.Address()), + Fee: xdr.LiquidityPoolFeeV18, + }, + ReserveA: 19, + ReserveB: 1234, + TotalPoolShares: 456, + PoolSharesTrustLineCount: 90, + }, + }, + }, + }).Once() +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TearDownTest() { + t.historyQ.AssertExpectations(t.T()) + t.graph.AssertExpectations(t.T()) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TestStreamAllLiquidityPoolsError() { + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(fmt.Errorf("liquidity pools error")). + Once() + + liquidityPoolsOk, err := t.stream.verifyAllLiquidityPools(t.ctx, t.graph.LiquidityPools()) + t.Assert().EqualError(err, "Error loading all liquidity pools for orderbook verification: liquidity pools error") + t.Assert().False(liquidityPoolsOk) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TestEmptyDBOffers() { + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Once() + + liquidityPoolsOk, err := t.stream.verifyAllLiquidityPools(t.ctx, t.graph.LiquidityPools()) + t.Assert().NoError(err) + t.Assert().False(liquidityPoolsOk) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TestLengthMismatch() { + liquidityPools := []history.LiquidityPool{ + { + PoolID: processors.PoolIDToString(xdr.PoolId{1, 2, 3}), + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 13, + ShareCount: 11, + AssetReserves: history.LiquidityPoolAssetReserves{ + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewNativeAsset(), + Reserve: 789, + }, + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewCreditAsset("USD", issuer.Address()), + Reserve: 456, + }, + }, + LastModifiedLedger: 100, + Deleted: false, + }, + } + + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.LiquidityPool) error) + for idx := range liquidityPools { + callback(liquidityPools[idx]) + } + }). + Once() + + liquidityPoolsOk, err := t.stream.verifyAllLiquidityPools(t.ctx, t.graph.LiquidityPools()) + t.Assert().NoError(err) + t.Assert().False(liquidityPoolsOk) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TestContentMismatch() { + liquidityPools := []history.LiquidityPool{ + { + PoolID: processors.PoolIDToString(xdr.PoolId{1, 2, 3}), + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 0, + ShareCount: 11, + AssetReserves: history.LiquidityPoolAssetReserves{ + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewNativeAsset(), + Reserve: 789, + }, + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewCreditAsset("USD", issuer.Address()), + Reserve: 456, + }, + }, + LastModifiedLedger: 100, + Deleted: false, + }, + { + PoolID: processors.PoolIDToString(xdr.PoolId{4, 5, 6}), + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 90, + ShareCount: 456, + AssetReserves: history.LiquidityPoolAssetReserves{ + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewNativeAsset(), + Reserve: 19, + }, + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewCreditAsset("EUR", issuer.Address()), + Reserve: 1234, + }, + }, + LastModifiedLedger: 50, + Deleted: false, + }, + } + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(offer history.LiquidityPool) error) + for idx := range liquidityPools { + callback(liquidityPools[idx]) + } + }). + Once() + + liquidityPoolsOk, err := t.stream.verifyAllLiquidityPools(t.ctx, t.graph.LiquidityPools()) + t.Assert().NoError(err) + t.Assert().False(liquidityPoolsOk) +} + +func (t *VerifyLiquidityPoolsStreamTestSuite) TestSuccess() { + liquidityPools := []history.LiquidityPool{ + { + PoolID: processors.PoolIDToString(xdr.PoolId{1, 2, 3}), + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 13, + ShareCount: 11, + AssetReserves: history.LiquidityPoolAssetReserves{ + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewNativeAsset(), + Reserve: 789, + }, + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewCreditAsset("USD", issuer.Address()), + Reserve: 456, + }, + }, + LastModifiedLedger: 100, + Deleted: false, + }, + { + PoolID: processors.PoolIDToString(xdr.PoolId{4, 5, 6}), + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: xdr.LiquidityPoolFeeV18, + TrustlineCount: 90, + ShareCount: 456, + AssetReserves: history.LiquidityPoolAssetReserves{ + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewNativeAsset(), + Reserve: 19, + }, + history.LiquidityPoolAssetReserve{ + Asset: xdr.MustNewCreditAsset("EUR", issuer.Address()), + Reserve: 1234, + }, + }, + LastModifiedLedger: 50, + Deleted: false, + }, + } + t.historyQ.MockQLiquidityPools.On("StreamAllLiquidityPools", t.ctx, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + callback := args.Get(1).(func(history.LiquidityPool) error) + for idx := range liquidityPools { + callback(liquidityPools[idx]) + } + }). + Once() + + offersOk, err := t.stream.verifyAllLiquidityPools(t.ctx, t.graph.LiquidityPools()) + t.Assert().NoError(err) + t.Assert().True(offersOk) +} diff --git a/services/horizon/internal/ingest/parallel.go b/services/horizon/internal/ingest/parallel.go new file mode 100644 index 0000000000..547098ed0d --- /dev/null +++ b/services/horizon/internal/ingest/parallel.go @@ -0,0 +1,172 @@ +package ingest + +import ( + "fmt" + "sync" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + logpkg "github.com/stellar/go/support/log" +) + +const ( + historyCheckpointLedgerInterval = 64 + minBatchSize = historyCheckpointLedgerInterval +) + +type rangeError struct { + err error + ledgerRange history.LedgerRange +} + +func (e rangeError) Error() string { + return fmt.Sprintf("error when processing [%d, %d] range: %s", e.ledgerRange.StartSequence, e.ledgerRange.EndSequence, e.err) +} + +type ParallelSystems struct { + config Config + workerCount uint + systemFactory func(Config) (System, error) +} + +func NewParallelSystems(config Config, workerCount uint) (*ParallelSystems, error) { + // Leaving this because used in tests, will update after a code review. + return newParallelSystems(config, workerCount, NewSystem) +} + +// private version of NewParallel systems, allowing to inject a mock system +func newParallelSystems(config Config, workerCount uint, systemFactory func(Config) (System, error)) (*ParallelSystems, error) { + if workerCount < 1 { + return nil, errors.New("workerCount must be > 0") + } + + return &ParallelSystems{ + config: config, + workerCount: workerCount, + systemFactory: systemFactory, + }, nil +} + +func (ps *ParallelSystems) runReingestWorker(s System, stop <-chan struct{}, reingestJobQueue <-chan history.LedgerRange) rangeError { + + for { + select { + case <-stop: + return rangeError{} + case reingestRange := <-reingestJobQueue: + err := s.ReingestRange([]history.LedgerRange{reingestRange}, false) + if err != nil { + return rangeError{ + err: err, + ledgerRange: reingestRange, + } + } + log.WithFields(logpkg.F{"from": reingestRange.StartSequence, "to": reingestRange.EndSequence}).Info("successfully reingested range") + } + } +} + +func enqueueReingestTasks(ledgerRanges []history.LedgerRange, batchSize uint32, stop <-chan struct{}, reingestJobQueue chan<- history.LedgerRange) { + for _, cur := range ledgerRanges { + for subRangeFrom := cur.StartSequence; subRangeFrom < cur.EndSequence; { + // job queuing + subRangeTo := subRangeFrom + (batchSize - 1) // we subtract one because both from and to are part of the batch + if subRangeTo > cur.EndSequence { + subRangeTo = cur.EndSequence + } + select { + case <-stop: + return + case reingestJobQueue <- history.LedgerRange{StartSequence: subRangeFrom, EndSequence: subRangeTo}: + } + subRangeFrom = subRangeTo + 1 + } + } +} + +func calculateParallelLedgerBatchSize(rangeSize uint32, batchSizeSuggestion uint32, workerCount uint) uint32 { + batchSize := batchSizeSuggestion + if batchSize == 0 || rangeSize/batchSize < uint32(workerCount) { + // let's try to make use of all the workers + batchSize = rangeSize / uint32(workerCount) + } + // Use a minimum batch size to make it worth it in terms of overhead + if batchSize < minBatchSize { + batchSize = minBatchSize + } + + // Also, round the batch size to the closest, lower or equal 64 multiple + return (batchSize / historyCheckpointLedgerInterval) * historyCheckpointLedgerInterval +} + +func totalRangeSize(ledgerRanges []history.LedgerRange) uint32 { + var sum uint32 + for _, ledgerRange := range ledgerRanges { + sum += ledgerRange.EndSequence - ledgerRange.StartSequence + 1 + } + return sum +} + +func (ps *ParallelSystems) ReingestRange(ledgerRanges []history.LedgerRange, batchSizeSuggestion uint32) error { + var ( + batchSize = calculateParallelLedgerBatchSize(totalRangeSize(ledgerRanges), batchSizeSuggestion, ps.workerCount) + reingestJobQueue = make(chan history.LedgerRange) + wg sync.WaitGroup + + // stopOnce is used to close the stop channel once: closing a closed channel panics and it can happen in case + // of errors in multiple ranges. + stopOnce sync.Once + stop = make(chan struct{}) + + lowestRangeErrMutex sync.Mutex + // lowestRangeErr is an error of the failed range with the lowest starting ledger sequence that is used to tell + // the user which range to reingest in case of errors. We use that fact that System.ReingestRange is blocking, + // jobs are sent to a queue (unbuffered channel) in sequence and there is a WaitGroup waiting for all the workers + // to exit. + // Because of this when we reach `wg.Wait()` all jobs previously sent to a channel are processed (either success + // or failure). In case of a failure we save the range with the smallest sequence number because this is where + // the user needs to start again to prevent the gaps. + lowestRangeErr *rangeError + ) + if err := validateRanges(ledgerRanges); err != nil { + return err + } + + for i := uint(0); i < ps.workerCount; i++ { + wg.Add(1) + s, err := ps.systemFactory(ps.config) + if err != nil { + return errors.Wrap(err, "error creating new system") + } + go func() { + defer wg.Done() + rangeErr := ps.runReingestWorker(s, stop, reingestJobQueue) + if rangeErr.err != nil { + log.WithError(rangeErr).Error("error in reingest worker") + lowestRangeErrMutex.Lock() + if lowestRangeErr == nil || lowestRangeErr.ledgerRange.StartSequence > rangeErr.ledgerRange.StartSequence { + lowestRangeErr = &rangeErr + } + lowestRangeErrMutex.Unlock() + stopOnce.Do(func() { + close(stop) + }) + return + } + }() + } + + enqueueReingestTasks(ledgerRanges, batchSize, stop, reingestJobQueue) + + stopOnce.Do(func() { + close(stop) + }) + wg.Wait() + close(reingestJobQueue) + + if lowestRangeErr != nil { + lastLedger := ledgerRanges[len(ledgerRanges)-1].EndSequence + return errors.Wrapf(lowestRangeErr, "job failed, recommended restart range: [%d, %d]", lowestRangeErr.ledgerRange.StartSequence, lastLedger) + } + return nil +} diff --git a/services/horizon/internal/ingest/parallel_test.go b/services/horizon/internal/ingest/parallel_test.go new file mode 100644 index 0000000000..27ab0c459f --- /dev/null +++ b/services/horizon/internal/ingest/parallel_test.go @@ -0,0 +1,119 @@ +package ingest + +import ( + "math/rand" + "sort" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" +) + +func TestCalculateParallelLedgerBatchSize(t *testing.T) { + assert.Equal(t, uint32(6656), calculateParallelLedgerBatchSize(20096, 20096, 3)) + assert.Equal(t, uint32(4992), calculateParallelLedgerBatchSize(20096, 20096, 4)) + assert.Equal(t, uint32(4992), calculateParallelLedgerBatchSize(20096, 0, 4)) + assert.Equal(t, uint32(64), calculateParallelLedgerBatchSize(64, 256, 4)) + assert.Equal(t, uint32(64), calculateParallelLedgerBatchSize(64, 32, 4)) + assert.Equal(t, uint32(64), calculateParallelLedgerBatchSize(2, 256, 4)) + assert.Equal(t, uint32(64), calculateParallelLedgerBatchSize(20096, 64, 1)) +} + +func TestParallelReingestRange(t *testing.T) { + config := Config{} + var ( + rangesCalled []history.LedgerRange + m sync.Mutex + ) + result := &mockSystem{} + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Run( + func(args mock.Arguments) { + m.Lock() + defer m.Unlock() + rangesCalled = append(rangesCalled, args.Get(0).([]history.LedgerRange)...) + // simulate call + time.Sleep(time.Millisecond * time.Duration(10+rand.Int31n(50))) + }).Return(error(nil)) + factory := func(c Config) (System, error) { + return result, nil + } + system, err := newParallelSystems(config, 3, factory) + assert.NoError(t, err) + err = system.ReingestRange([]history.LedgerRange{{1, 2050}}, 258) + assert.NoError(t, err) + + sort.Slice(rangesCalled, func(i, j int) bool { + return rangesCalled[i].StartSequence < rangesCalled[j].StartSequence + }) + expected := []history.LedgerRange{ + {StartSequence: 1, EndSequence: 256}, {StartSequence: 257, EndSequence: 512}, {StartSequence: 513, EndSequence: 768}, {StartSequence: 769, EndSequence: 1024}, {StartSequence: 1025, EndSequence: 1280}, + {StartSequence: 1281, EndSequence: 1536}, {StartSequence: 1537, EndSequence: 1792}, {StartSequence: 1793, EndSequence: 2048}, {StartSequence: 2049, EndSequence: 2050}, + } + assert.Equal(t, expected, rangesCalled) + + rangesCalled = nil + system, err = newParallelSystems(config, 1, factory) + assert.NoError(t, err) + err = system.ReingestRange([]history.LedgerRange{{1, 1024}}, 64) + result.AssertExpectations(t) + expected = []history.LedgerRange{ + {StartSequence: 1, EndSequence: 64}, {StartSequence: 65, EndSequence: 128}, {StartSequence: 129, EndSequence: 192}, {StartSequence: 193, EndSequence: 256}, {StartSequence: 257, EndSequence: 320}, + {StartSequence: 321, EndSequence: 384}, {StartSequence: 385, EndSequence: 448}, {StartSequence: 449, EndSequence: 512}, {StartSequence: 513, EndSequence: 576}, {StartSequence: 577, EndSequence: 640}, + {StartSequence: 641, EndSequence: 704}, {StartSequence: 705, EndSequence: 768}, {StartSequence: 769, EndSequence: 832}, {StartSequence: 833, EndSequence: 896}, {StartSequence: 897, EndSequence: 960}, + {StartSequence: 961, EndSequence: 1024}, + } + assert.NoError(t, err) + assert.Equal(t, expected, rangesCalled) +} + +func TestParallelReingestRangeError(t *testing.T) { + config := Config{} + result := &mockSystem{} + // Fail on the second range + result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Return(errors.New("failed because of foo")) + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil)) + factory := func(c Config) (System, error) { + return result, nil + } + system, err := newParallelSystems(config, 3, factory) + assert.NoError(t, err) + err = system.ReingestRange([]history.LedgerRange{{1, 2050}}, 258) + result.AssertExpectations(t) + assert.Error(t, err) + assert.Equal(t, "job failed, recommended restart range: [1537, 2050]: error when processing [1537, 1792] range: failed because of foo", err.Error()) +} + +func TestParallelReingestRangeErrorInEarlierJob(t *testing.T) { + config := Config{} + var wg sync.WaitGroup + wg.Add(1) + result := &mockSystem{} + // Fail on an lower subrange after the first error + result.On("ReingestRange", []history.LedgerRange{{1025, 1280}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) { + // Wait for a more recent range to error + wg.Wait() + // This sleep should help making sure the result of this range is processed later than the one below + // (there are no guarantees without instrumenting ReingestRange(), but that's too complicated) + time.Sleep(50 * time.Millisecond) + }).Return(errors.New("failed because of foo")) + result.On("ReingestRange", []history.LedgerRange{{1537, 1792}}, mock.AnythingOfType("bool")).Run(func(mock.Arguments) { + wg.Done() + }).Return(errors.New("failed because of bar")) + result.On("ReingestRange", mock.AnythingOfType("[]history.LedgerRange"), mock.AnythingOfType("bool")).Return(error(nil)) + + factory := func(c Config) (System, error) { + return result, nil + } + system, err := newParallelSystems(config, 3, factory) + assert.NoError(t, err) + err = system.ReingestRange([]history.LedgerRange{{1, 2050}}, 258) + result.AssertExpectations(t) + assert.Error(t, err) + assert.Equal(t, "job failed, recommended restart range: [1025, 2050]: error when processing [1025, 1280] range: failed because of foo", err.Error()) + +} diff --git a/services/horizon/internal/ingest/processor_runner.go b/services/horizon/internal/ingest/processor_runner.go new file mode 100644 index 0000000000..31a08b8c97 --- /dev/null +++ b/services/horizon/internal/ingest/processor_runner.go @@ -0,0 +1,341 @@ +package ingest + +import ( + "bytes" + "context" + "fmt" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type ingestionSource int + +const ( + _ = iota + historyArchiveSource = ingestionSource(iota) + ledgerSource = ingestionSource(iota) + logFrequency = 50000 +) + +type horizonChangeProcessor interface { + processors.ChangeProcessor + Commit(context.Context) error +} + +type horizonTransactionProcessor interface { + processors.LedgerTransactionProcessor + Commit(context.Context) error +} + +type statsChangeProcessor struct { + *ingest.StatsChangeProcessor +} + +func (statsChangeProcessor) Commit(ctx context.Context) error { + return nil +} + +type statsLedgerTransactionProcessor struct { + *processors.StatsLedgerTransactionProcessor +} + +func (statsLedgerTransactionProcessor) Commit(ctx context.Context) error { + return nil +} + +type ledgerStats struct { + changeStats ingest.StatsChangeProcessorResults + changeDurations processorsRunDurations + transactionStats processors.StatsLedgerTransactionProcessorResults + transactionDurations processorsRunDurations + tradeStats processors.TradeStats +} + +type ProcessorRunnerInterface interface { + SetHistoryAdapter(historyAdapter historyArchiveAdapterInterface) + EnableMemoryStatsLogging() + DisableMemoryStatsLogging() + RunGenesisStateIngestion() (ingest.StatsChangeProcessorResults, error) + RunHistoryArchiveIngestion( + checkpointLedger uint32, + ledgerProtocolVersion uint32, + bucketListHash xdr.Hash, + ) (ingest.StatsChangeProcessorResults, error) + RunTransactionProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + transactionStats processors.StatsLedgerTransactionProcessorResults, + transactionDurations processorsRunDurations, + tradeStats processors.TradeStats, + err error, + ) + RunAllProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + stats ledgerStats, + err error, + ) +} + +var _ ProcessorRunnerInterface = (*ProcessorRunner)(nil) + +type ProcessorRunner struct { + config Config + + ctx context.Context + historyQ history.IngestionQ + historyAdapter historyArchiveAdapterInterface + logMemoryStats bool +} + +func (s *ProcessorRunner) SetHistoryAdapter(historyAdapter historyArchiveAdapterInterface) { + s.historyAdapter = historyAdapter +} + +func (s *ProcessorRunner) EnableMemoryStatsLogging() { + s.logMemoryStats = true +} + +func (s *ProcessorRunner) DisableMemoryStatsLogging() { + s.logMemoryStats = false +} + +func buildChangeProcessor( + historyQ history.IngestionQ, + changeStats *ingest.StatsChangeProcessor, + source ingestionSource, + ledgerSequence uint32, +) *groupChangeProcessors { + statsChangeProcessor := &statsChangeProcessor{ + StatsChangeProcessor: changeStats, + } + + useLedgerCache := source == ledgerSource + return newGroupChangeProcessors([]horizonChangeProcessor{ + statsChangeProcessor, + processors.NewAccountDataProcessor(historyQ), + processors.NewAccountsProcessor(historyQ), + processors.NewOffersProcessor(historyQ, ledgerSequence), + processors.NewAssetStatsProcessor(historyQ, useLedgerCache), + processors.NewSignersProcessor(historyQ, useLedgerCache), + processors.NewTrustLinesProcessor(historyQ), + processors.NewClaimableBalancesChangeProcessor(historyQ), + processors.NewLiquidityPoolsChangeProcessor(historyQ, ledgerSequence), + }) +} + +func (s *ProcessorRunner) buildTransactionProcessor( + ledgerTransactionStats *processors.StatsLedgerTransactionProcessor, + tradeProcessor *processors.TradeProcessor, + ledger xdr.LedgerHeaderHistoryEntry, +) *groupTransactionProcessors { + statsLedgerTransactionProcessor := &statsLedgerTransactionProcessor{ + StatsLedgerTransactionProcessor: ledgerTransactionStats, + } + *tradeProcessor = *processors.NewTradeProcessor(s.historyQ, ledger) + sequence := uint32(ledger.Header.LedgerSeq) + return newGroupTransactionProcessors([]horizonTransactionProcessor{ + statsLedgerTransactionProcessor, + processors.NewEffectProcessor(s.historyQ, sequence), + processors.NewLedgerProcessor(s.historyQ, ledger, CurrentVersion), + processors.NewOperationProcessor(s.historyQ, sequence), + tradeProcessor, + processors.NewParticipantsProcessor(s.historyQ, sequence), + processors.NewTransactionProcessor(s.historyQ, sequence), + processors.NewClaimableBalancesTransactionProcessor(s.historyQ, sequence), + processors.NewLiquidityPoolsTransactionProcessor(s.historyQ, sequence), + }) +} + +// checkIfProtocolVersionSupported checks if this Horizon version supports the +// protocol version of a ledger with the given sequence number. +func (s *ProcessorRunner) checkIfProtocolVersionSupported(ledgerProtocolVersion uint32) error { + if ledgerProtocolVersion > MaxSupportedProtocolVersion { + return fmt.Errorf( + "This Horizon version does not support protocol version %d. "+ + "The latest supported protocol version is %d. Please upgrade to the latest Horizon version.", + ledgerProtocolVersion, + MaxSupportedProtocolVersion, + ) + } + + return nil +} + +// validateBucketList validates if the bucket list hash in history archive +// matches the one in corresponding ledger header in stellar-core backend. +// This gives you full security if data in stellar-core backend can be trusted +// (ex. you run it in your infrastructure). +// The hashes of actual buckets of this HAS file are checked using +// historyarchive.XdrStream.SetExpectedHash (this is done in +// CheckpointChangeReader). +func (s *ProcessorRunner) validateBucketList(ledgerSequence uint32, ledgerBucketHashList xdr.Hash) error { + historyBucketListHash, err := s.historyAdapter.BucketListHash(ledgerSequence) + if err != nil { + return errors.Wrap(err, "Error getting bucket list hash") + } + + if !bytes.Equal(historyBucketListHash[:], ledgerBucketHashList[:]) { + return fmt.Errorf( + "Bucket list hash of history archive and ledger header does not match: %#x %#x", + historyBucketListHash, + ledgerBucketHashList, + ) + } + + return nil +} + +func (s *ProcessorRunner) RunGenesisStateIngestion() (ingest.StatsChangeProcessorResults, error) { + return s.RunHistoryArchiveIngestion(1, 0, xdr.Hash{}) +} + +func (s *ProcessorRunner) RunHistoryArchiveIngestion( + checkpointLedger uint32, + ledgerProtocolVersion uint32, + bucketListHash xdr.Hash, +) (ingest.StatsChangeProcessorResults, error) { + changeStats := ingest.StatsChangeProcessor{} + changeProcessor := buildChangeProcessor(s.historyQ, &changeStats, historyArchiveSource, checkpointLedger) + + if checkpointLedger == 1 { + if err := changeProcessor.ProcessChange(s.ctx, ingest.GenesisChange(s.config.NetworkPassphrase)); err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error ingesting genesis ledger") + } + } else { + if err := s.checkIfProtocolVersionSupported(ledgerProtocolVersion); err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error while checking for supported protocol version") + } + + if err := s.validateBucketList(checkpointLedger, bucketListHash); err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error validating bucket list from HAS") + } + + changeReader, err := s.historyAdapter.GetState(s.ctx, checkpointLedger) + if err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error creating HAS reader") + } + + defer changeReader.Close() + + log.WithField("sequence", checkpointLedger). + Info("Processing entries from History Archive Snapshot") + + err = processors.StreamChanges(s.ctx, changeProcessor, newloggingChangeReader( + changeReader, + "historyArchive", + checkpointLedger, + logFrequency, + s.logMemoryStats, + )) + if err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error streaming changes from HAS") + } + } + + if err := changeProcessor.Commit(s.ctx); err != nil { + return changeStats.GetResults(), errors.Wrap(err, "Error commiting changes from processor") + } + + return changeStats.GetResults(), nil +} + +func (s *ProcessorRunner) runChangeProcessorOnLedger( + changeProcessor horizonChangeProcessor, ledger xdr.LedgerCloseMeta, +) error { + var changeReader ingest.ChangeReader + var err error + changeReader, err = ingest.NewLedgerChangeReaderFromLedgerCloseMeta(s.config.NetworkPassphrase, ledger) + if err != nil { + return errors.Wrap(err, "Error creating ledger change reader") + } + changeReader = newloggingChangeReader( + changeReader, + "ledger", + ledger.LedgerSequence(), + logFrequency, + s.logMemoryStats, + ) + if err = processors.StreamChanges(s.ctx, changeProcessor, changeReader); err != nil { + return errors.Wrap(err, "Error streaming changes from ledger") + } + + err = changeProcessor.Commit(s.ctx) + if err != nil { + return errors.Wrap(err, "Error commiting changes from processor") + } + + return nil +} + +func (s *ProcessorRunner) RunTransactionProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + transactionStats processors.StatsLedgerTransactionProcessorResults, + transactionDurations processorsRunDurations, + tradeStats processors.TradeStats, + err error, +) { + var ( + ledgerTransactionStats processors.StatsLedgerTransactionProcessor + tradeProcessor processors.TradeProcessor + transactionReader *ingest.LedgerTransactionReader + ) + + transactionReader, err = ingest.NewLedgerTransactionReaderFromLedgerCloseMeta(s.config.NetworkPassphrase, ledger) + if err != nil { + err = errors.Wrap(err, "Error creating ledger reader") + return + } + + if err = s.checkIfProtocolVersionSupported(ledger.ProtocolVersion()); err != nil { + err = errors.Wrap(err, "Error while checking for supported protocol version") + return + } + + groupTransactionProcessors := s.buildTransactionProcessor( + &ledgerTransactionStats, &tradeProcessor, transactionReader.GetHeader()) + err = processors.StreamLedgerTransactions(s.ctx, groupTransactionProcessors, transactionReader) + if err != nil { + err = errors.Wrap(err, "Error streaming changes from ledger") + return + } + + err = groupTransactionProcessors.Commit(s.ctx) + if err != nil { + err = errors.Wrap(err, "Error committing changes from processor") + return + } + + transactionStats = ledgerTransactionStats.GetResults() + transactionDurations = groupTransactionProcessors.processorsRunDurations + tradeStats = tradeProcessor.GetStats() + return +} + +func (s *ProcessorRunner) RunAllProcessorsOnLedger(ledger xdr.LedgerCloseMeta) ( + stats ledgerStats, + err error, +) { + changeStatsProcessor := ingest.StatsChangeProcessor{} + + if err = s.checkIfProtocolVersionSupported(ledger.ProtocolVersion()); err != nil { + err = errors.Wrap(err, "Error while checking for supported protocol version") + return + } + + groupChangeProcessors := buildChangeProcessor(s.historyQ, &changeStatsProcessor, ledgerSource, ledger.LedgerSequence()) + err = s.runChangeProcessorOnLedger(groupChangeProcessors, ledger) + if err != nil { + return + } + + stats.changeStats = changeStatsProcessor.GetResults() + stats.changeDurations = groupChangeProcessors.processorsRunDurations + + stats.transactionStats, stats.transactionDurations, stats.tradeStats, err = + s.RunTransactionProcessorsOnLedger(ledger) + if err != nil { + return + } + + return +} diff --git a/services/horizon/internal/ingest/processor_runner_test.go b/services/horizon/internal/ingest/processor_runner_test.go new file mode 100644 index 0000000000..c602f67aff --- /dev/null +++ b/services/horizon/internal/ingest/processor_runner_test.go @@ -0,0 +1,343 @@ +package ingest + +import ( + "context" + "io" + "reflect" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/network" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/xdr" +) + +func TestProcessorRunnerRunHistoryArchiveIngestionGenesis(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + q := &mockDBQ{} + + q.MockQAccounts.On("UpsertAccounts", ctx, []history.AccountEntry{ + { + LastModifiedLedger: 1, + AccountID: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Balance: int64(1000000000000000000), + SequenceNumber: 0, + MasterWeight: 1, + }, + }).Return(nil).Once() + + mockAccountSignersBatchInsertBuilder := &history.MockAccountSignersBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockAccountSignersBatchInsertBuilder) + mockAccountSignersBatchInsertBuilder.On("Add", ctx, history.AccountSigner{ + Account: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Signer: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Weight: 1, + Sponsor: null.String{}, + }).Return(nil).Once() + mockAccountSignersBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(mockAccountSignersBatchInsertBuilder).Once() + + q.MockQAssetStats.On("InsertAssetStats", ctx, []history.ExpAssetStat{}, 100000). + Return(nil) + + runner := ProcessorRunner{ + ctx: ctx, + config: Config{ + NetworkPassphrase: network.PublicNetworkPassphrase, + }, + historyQ: q, + } + + _, err := runner.RunGenesisStateIngestion() + assert.NoError(t, err) +} + +func TestProcessorRunnerRunHistoryArchiveIngestionHistoryArchive(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + config := Config{ + NetworkPassphrase: network.PublicNetworkPassphrase, + } + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + historyAdapter := &mockHistoryArchiveAdapter{} + defer mock.AssertExpectationsForObjects(t, historyAdapter) + + bucketListHash := xdr.Hash([32]byte{0, 1, 2}) + historyAdapter.On("BucketListHash", uint32(63)).Return(bucketListHash, nil).Once() + + m := &ingest.MockChangeReader{} + m.On("Read").Return(ingest.GenesisChange(network.PublicNetworkPassphrase), nil).Once() + m.On("Read").Return(ingest.Change{}, io.EOF).Once() + m.On("Close").Return(nil).Once() + + historyAdapter. + On("GetState", ctx, uint32(63)). + Return( + m, + nil, + ).Once() + + q.MockQAccounts.On("UpsertAccounts", ctx, []history.AccountEntry{ + { + LastModifiedLedger: 1, + AccountID: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Balance: int64(1000000000000000000), + SequenceNumber: 0, + MasterWeight: 1, + }, + }).Return(nil).Once() + + mockAccountSignersBatchInsertBuilder := &history.MockAccountSignersBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockAccountSignersBatchInsertBuilder) + mockAccountSignersBatchInsertBuilder.On("Add", ctx, history.AccountSigner{ + Account: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Signer: "GAAZI4TCR3TY5OJHCTJC2A4QSY6CJWJH5IAJTGKIN2ER7LBNVKOCCWN7", + Weight: 1, + }).Return(nil).Once() + mockAccountSignersBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(mockAccountSignersBatchInsertBuilder).Once() + + q.MockQAssetStats.On("InsertAssetStats", ctx, []history.ExpAssetStat{}, 100000). + Return(nil) + + runner := ProcessorRunner{ + ctx: ctx, + config: config, + historyQ: q, + historyAdapter: historyAdapter, + } + + _, err := runner.RunHistoryArchiveIngestion(63, MaxSupportedProtocolVersion, bucketListHash) + assert.NoError(t, err) +} + +func TestProcessorRunnerRunHistoryArchiveIngestionProtocolVersionNotSupported(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + config := Config{ + NetworkPassphrase: network.PublicNetworkPassphrase, + } + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + historyAdapter := &mockHistoryArchiveAdapter{} + defer mock.AssertExpectationsForObjects(t, historyAdapter) + + // Batches + + mockAccountSignersBatchInsertBuilder := &history.MockAccountSignersBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockAccountSignersBatchInsertBuilder) + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(mockAccountSignersBatchInsertBuilder).Once() + + q.MockQAssetStats.On("InsertAssetStats", ctx, []history.ExpAssetStat{}, 100000). + Return(nil) + + runner := ProcessorRunner{ + ctx: ctx, + config: config, + historyQ: q, + historyAdapter: historyAdapter, + } + + _, err := runner.RunHistoryArchiveIngestion(100, 200, xdr.Hash{}) + assert.EqualError(t, err, "Error while checking for supported protocol version: This Horizon version does not support protocol version 200. The latest supported protocol version is 18. Please upgrade to the latest Horizon version.") +} + +func TestProcessorRunnerBuildChangeProcessor(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + + // Twice = checking ledgerSource and historyArchiveSource + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(&history.MockAccountSignersBatchInsertBuilder{}).Twice() + runner := ProcessorRunner{ + ctx: ctx, + historyQ: q, + } + + stats := &ingest.StatsChangeProcessor{} + processor := buildChangeProcessor(runner.historyQ, stats, ledgerSource, 123) + assert.IsType(t, &groupChangeProcessors{}, processor) + + assert.IsType(t, &statsChangeProcessor{}, processor.processors[0]) + assert.IsType(t, &processors.AccountDataProcessor{}, processor.processors[1]) + assert.IsType(t, &processors.AccountsProcessor{}, processor.processors[2]) + assert.IsType(t, &processors.OffersProcessor{}, processor.processors[3]) + assert.IsType(t, &processors.AssetStatsProcessor{}, processor.processors[4]) + assert.True(t, reflect.ValueOf(processor.processors[4]). + Elem().FieldByName("useLedgerEntryCache").Bool()) + assert.IsType(t, &processors.SignersProcessor{}, processor.processors[5]) + assert.True(t, reflect.ValueOf(processor.processors[5]). + Elem().FieldByName("useLedgerEntryCache").Bool()) + assert.IsType(t, &processors.TrustLinesProcessor{}, processor.processors[6]) + + runner = ProcessorRunner{ + ctx: ctx, + historyQ: q, + } + + processor = buildChangeProcessor(runner.historyQ, stats, historyArchiveSource, 456) + assert.IsType(t, &groupChangeProcessors{}, processor) + + assert.IsType(t, &statsChangeProcessor{}, processor.processors[0]) + assert.IsType(t, &processors.AccountDataProcessor{}, processor.processors[1]) + assert.IsType(t, &processors.AccountsProcessor{}, processor.processors[2]) + assert.IsType(t, &processors.OffersProcessor{}, processor.processors[3]) + assert.IsType(t, &processors.AssetStatsProcessor{}, processor.processors[4]) + assert.False(t, reflect.ValueOf(processor.processors[4]). + Elem().FieldByName("useLedgerEntryCache").Bool()) + assert.IsType(t, &processors.SignersProcessor{}, processor.processors[5]) + assert.False(t, reflect.ValueOf(processor.processors[5]). + Elem().FieldByName("useLedgerEntryCache").Bool()) + assert.IsType(t, &processors.TrustLinesProcessor{}, processor.processors[6]) +} + +func TestProcessorRunnerBuildTransactionProcessor(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + + q.MockQOperations.On("NewOperationBatchInsertBuilder", maxBatchSize). + Return(&history.MockOperationsBatchInsertBuilder{}).Twice() // Twice = with/without failed + q.MockQTransactions.On("NewTransactionBatchInsertBuilder", maxBatchSize). + Return(&history.MockTransactionsBatchInsertBuilder{}).Twice() + + runner := ProcessorRunner{ + ctx: ctx, + config: Config{}, + historyQ: q, + } + + stats := &processors.StatsLedgerTransactionProcessor{} + trades := &processors.TradeProcessor{} + ledger := xdr.LedgerHeaderHistoryEntry{} + processor := runner.buildTransactionProcessor(stats, trades, ledger) + assert.IsType(t, &groupTransactionProcessors{}, processor) + + assert.IsType(t, &statsLedgerTransactionProcessor{}, processor.processors[0]) + assert.IsType(t, &processors.EffectProcessor{}, processor.processors[1]) + assert.IsType(t, &processors.LedgersProcessor{}, processor.processors[2]) + assert.IsType(t, &processors.OperationProcessor{}, processor.processors[3]) + assert.IsType(t, &processors.TradeProcessor{}, processor.processors[4]) + assert.IsType(t, &processors.ParticipantsProcessor{}, processor.processors[5]) + assert.IsType(t, &processors.TransactionProcessor{}, processor.processors[6]) +} + +func TestProcessorRunnerRunAllProcessorsOnLedger(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + config := Config{ + NetworkPassphrase: network.PublicNetworkPassphrase, + } + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + + ledger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + BucketListHash: xdr.Hash([32]byte{0, 1, 2}), + }, + }, + }, + } + + // Batches + mockAccountSignersBatchInsertBuilder := &history.MockAccountSignersBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockAccountSignersBatchInsertBuilder) + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(mockAccountSignersBatchInsertBuilder).Once() + + mockOperationsBatchInsertBuilder := &history.MockOperationsBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockOperationsBatchInsertBuilder) + mockOperationsBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + q.MockQOperations.On("NewOperationBatchInsertBuilder", maxBatchSize). + Return(mockOperationsBatchInsertBuilder).Twice() + + mockTransactionsBatchInsertBuilder := &history.MockTransactionsBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockTransactionsBatchInsertBuilder) + mockTransactionsBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + q.MockQTransactions.On("NewTransactionBatchInsertBuilder", maxBatchSize). + Return(mockTransactionsBatchInsertBuilder).Twice() + + q.MockQLedgers.On("InsertLedger", ctx, ledger.V0.LedgerHeader, 0, 0, 0, 0, CurrentVersion). + Return(int64(1), nil).Once() + + runner := ProcessorRunner{ + ctx: ctx, + config: config, + historyQ: q, + } + + _, err := runner.RunAllProcessorsOnLedger(ledger) + assert.NoError(t, err) +} + +func TestProcessorRunnerRunAllProcessorsOnLedgerProtocolVersionNotSupported(t *testing.T) { + ctx := context.Background() + maxBatchSize := 100000 + + config := Config{ + NetworkPassphrase: network.PublicNetworkPassphrase, + } + + q := &mockDBQ{} + defer mock.AssertExpectationsForObjects(t, q) + + ledger := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerVersion: 200, + }, + }, + }, + } + + // Batches + + mockAccountSignersBatchInsertBuilder := &history.MockAccountSignersBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockAccountSignersBatchInsertBuilder) + q.MockQSigners.On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(mockAccountSignersBatchInsertBuilder).Once() + + mockOperationsBatchInsertBuilder := &history.MockOperationsBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockOperationsBatchInsertBuilder) + q.MockQOperations.On("NewOperationBatchInsertBuilder", maxBatchSize). + Return(mockOperationsBatchInsertBuilder).Twice() + + mockTransactionsBatchInsertBuilder := &history.MockTransactionsBatchInsertBuilder{} + defer mock.AssertExpectationsForObjects(t, mockTransactionsBatchInsertBuilder) + q.MockQTransactions.On("NewTransactionBatchInsertBuilder", maxBatchSize). + Return(mockTransactionsBatchInsertBuilder).Twice() + + runner := ProcessorRunner{ + ctx: ctx, + config: config, + historyQ: q, + } + + _, err := runner.RunAllProcessorsOnLedger(ledger) + assert.EqualError(t, err, "Error while checking for supported protocol version: This Horizon version does not support protocol version 200. The latest supported protocol version is 18. Please upgrade to the latest Horizon version.") +} diff --git a/services/horizon/internal/ingest/processors/account_data_processor.go b/services/horizon/internal/ingest/processors/account_data_processor.go new file mode 100644 index 0000000000..dfdbecb43e --- /dev/null +++ b/services/horizon/internal/ingest/processors/account_data_processor.go @@ -0,0 +1,107 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type AccountDataProcessor struct { + dataQ history.QData + + cache *ingest.ChangeCompactor +} + +func NewAccountDataProcessor(dataQ history.QData) *AccountDataProcessor { + p := &AccountDataProcessor{dataQ: dataQ} + p.reset() + return p +} + +func (p *AccountDataProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *AccountDataProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + // We're interested in data only + if change.Type != xdr.LedgerEntryTypeData { + return nil + } + + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func (p *AccountDataProcessor) Commit(ctx context.Context) error { + var ( + datasToUpsert []history.Data + datasToDelete []history.AccountDataKey + ) + changes := p.cache.GetChanges() + for _, change := range changes { + switch { + case change.Pre == nil && change.Post != nil: + // Created + datasToUpsert = append(datasToUpsert, p.ledgerEntryToRow(change.Post)) + case change.Pre != nil && change.Post == nil: + // Removed + data := change.Pre.Data.MustData() + key := history.AccountDataKey{ + AccountID: data.AccountId.Address(), + DataName: string(data.DataName), + } + datasToDelete = append(datasToDelete, key) + default: + // Updated + datasToUpsert = append(datasToUpsert, p.ledgerEntryToRow(change.Post)) + } + } + + if len(datasToUpsert) > 0 { + if err := p.dataQ.UpsertAccountData(ctx, datasToUpsert); err != nil { + return errors.Wrap(err, "error executing upsert") + } + } + + if len(datasToDelete) > 0 { + count, err := p.dataQ.RemoveAccountData(ctx, datasToDelete) + if err != nil { + return errors.Wrap(err, "error executing removal") + } + if count != int64(len(datasToDelete)) { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when deleting %d account data", + count, + len(datasToDelete), + )) + } + } + + return nil +} + +func (p *AccountDataProcessor) ledgerEntryToRow(entry *xdr.LedgerEntry) history.Data { + data := entry.Data.MustData() + return history.Data{ + AccountID: data.AccountId.Address(), + Name: string(data.DataName), + Value: history.AccountDataValue(data.DataValue), + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + Sponsor: ledgerEntrySponsorToNullString(*entry), + } +} diff --git a/services/horizon/internal/ingest/processors/accounts_data_processor_test.go b/services/horizon/internal/ingest/processors/accounts_data_processor_test.go new file mode 100644 index 0000000000..86ff1e13de --- /dev/null +++ b/services/horizon/internal/ingest/processors/accounts_data_processor_test.go @@ -0,0 +1,229 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/suite" +) + +func TestAccountsDataProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(AccountsDataProcessorTestSuiteState)) +} + +type AccountsDataProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *AccountDataProcessor + mockQ *history.MockQData +} + +func (s *AccountsDataProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQData{} + + s.processor = NewAccountDataProcessor(s.mockQ) +} + +func (s *AccountsDataProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AccountsDataProcessorTestSuiteState) TestNoEntries() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *AccountsDataProcessorTestSuiteState) TestCreatesAccounts() { + data := xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{1, 1, 1, 1}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + entry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &data, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + } + historyData := history.Data{ + AccountID: data.AccountId.Address(), + Name: string(data.DataName), + Value: history.AccountDataValue(data.DataValue), + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + } + s.mockQ.On("UpsertAccountData", s.ctx, []history.Data{historyData}).Return(nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeData, + Pre: nil, + Post: &entry, + }) + s.Assert().NoError(err) +} + +func TestAccountsDataProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(AccountsDataProcessorTestSuiteLedger)) +} + +type AccountsDataProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *AccountDataProcessor + mockQ *history.MockQData +} + +func (s *AccountsDataProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQData{} + + s.processor = NewAccountDataProcessor(s.mockQ) +} + +func (s *AccountsDataProcessorTestSuiteLedger) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AccountsDataProcessorTestSuiteLedger) TestNoTransactions() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *AccountsDataProcessorTestSuiteLedger) TestNewAccountData() { + data := xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{1, 1, 1, 1}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeData, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &data, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + updatedData := xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{2, 2, 2, 2}, + } + + updatedEntry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &updatedData, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeData, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &data, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + }, + Post: &updatedEntry, + }) + s.Assert().NoError(err) + + // We use LedgerEntryChangesCache so all changes are squashed + historyData := history.Data{ + AccountID: updatedData.AccountId.Address(), + Name: string(updatedData.DataName), + Value: history.AccountDataValue(updatedData.DataValue), + LastModifiedLedger: uint32(updatedEntry.LastModifiedLedgerSeq), + } + s.mockQ.On("UpsertAccountData", s.ctx, []history.Data{historyData}).Return(nil).Once() +} + +func (s *AccountsDataProcessorTestSuiteLedger) TestUpdateAccountData() { + data := xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{1, 1, 1, 1}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + updatedData := xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{2, 2, 2, 2}, + } + + updatedEntry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &updatedData, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeData, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &data, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + }, + Post: &updatedEntry, + }) + s.Assert().NoError(err) + + historyData := history.Data{ + AccountID: updatedData.AccountId.Address(), + Name: string(updatedData.DataName), + Value: history.AccountDataValue(updatedData.DataValue), + LastModifiedLedger: uint32(updatedEntry.LastModifiedLedgerSeq), + } + s.mockQ.On("UpsertAccountData", s.ctx, []history.Data{historyData}).Return(nil).Once() +} + +func (s *AccountsDataProcessorTestSuiteLedger) TestRemoveAccountData() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeData, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &xdr.DataEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + DataName: "test", + DataValue: []byte{1, 1, 1, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "RemoveAccountData", + s.ctx, + []history.AccountDataKey{ + { + AccountID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + DataName: "test", + }, + }, + ).Return(int64(1), nil).Once() +} diff --git a/services/horizon/internal/ingest/processors/accounts_processor.go b/services/horizon/internal/ingest/processors/accounts_processor.go new file mode 100644 index 0000000000..7ea97bc6f1 --- /dev/null +++ b/services/horizon/internal/ingest/processors/accounts_processor.go @@ -0,0 +1,133 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type AccountsProcessor struct { + accountsQ history.QAccounts + + cache *ingest.ChangeCompactor +} + +func NewAccountsProcessor(accountsQ history.QAccounts) *AccountsProcessor { + p := &AccountsProcessor{accountsQ: accountsQ} + p.reset() + return p +} + +func (p *AccountsProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *AccountsProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeAccount { + return nil + } + + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func (p *AccountsProcessor) Commit(ctx context.Context) error { + batchUpsertAccounts := []history.AccountEntry{} + removeBatch := []string{} + + changes := p.cache.GetChanges() + for _, change := range changes { + changed, err := change.AccountChangedExceptSigners() + if err != nil { + return errors.Wrap(err, "Error running change.AccountChangedExceptSigners") + } + + if !changed { + continue + } + + switch { + case change.Post != nil: + // Created and updated + row := p.ledgerEntryToRow(*change.Post) + batchUpsertAccounts = append(batchUpsertAccounts, row) + case change.Pre != nil && change.Post == nil: + // Removed + account := change.Pre.Data.MustAccount() + accountID := account.AccountId.Address() + removeBatch = append(removeBatch, accountID) + default: + return errors.New("Invalid io.Change: change.Pre == nil && change.Post == nil") + } + } + + // Upsert accounts + if len(batchUpsertAccounts) > 0 { + err := p.accountsQ.UpsertAccounts(ctx, batchUpsertAccounts) + if err != nil { + return errors.Wrap(err, "errors in UpsertAccounts") + } + } + + if len(removeBatch) > 0 { + rowsAffected, err := p.accountsQ.RemoveAccounts(ctx, removeBatch) + if err != nil { + return errors.Wrap(err, "error in RemoveAccounts") + } + + if rowsAffected != int64(len(removeBatch)) { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when removing %d accounts", + rowsAffected, + len(removeBatch), + )) + } + } + + return nil +} + +func (p *AccountsProcessor) ledgerEntryToRow(entry xdr.LedgerEntry) history.AccountEntry { + account := entry.Data.MustAccount() + liabilities := account.Liabilities() + + var inflationDestination = "" + if account.InflationDest != nil { + inflationDestination = account.InflationDest.Address() + } + + return history.AccountEntry{ + AccountID: account.AccountId.Address(), + Balance: int64(account.Balance), + BuyingLiabilities: int64(liabilities.Buying), + SellingLiabilities: int64(liabilities.Selling), + SequenceNumber: int64(account.SeqNum), + NumSubEntries: uint32(account.NumSubEntries), + InflationDestination: inflationDestination, + Flags: uint32(account.Flags), + HomeDomain: string(account.HomeDomain), + MasterWeight: account.MasterKeyWeight(), + ThresholdLow: account.ThresholdLow(), + ThresholdMedium: account.ThresholdMedium(), + ThresholdHigh: account.ThresholdHigh(), + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + Sponsor: ledgerEntrySponsorToNullString(entry), + NumSponsored: uint32(account.NumSponsored()), + NumSponsoring: uint32(account.NumSponsoring()), + } +} diff --git a/services/horizon/internal/ingest/processors/accounts_processor_test.go b/services/horizon/internal/ingest/processors/accounts_processor_test.go new file mode 100644 index 0000000000..ea9fe91125 --- /dev/null +++ b/services/horizon/internal/ingest/processors/accounts_processor_test.go @@ -0,0 +1,308 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/suite" +) + +func TestAccountsProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(AccountsProcessorTestSuiteState)) +} + +type AccountsProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *AccountsProcessor + mockQ *history.MockQAccounts +} + +func (s *AccountsProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQAccounts{} + + s.processor = NewAccountsProcessor(s.mockQ) +} + +func (s *AccountsProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AccountsProcessorTestSuiteState) TestNoEntries() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *AccountsProcessorTestSuiteState) TestCreatesAccounts() { + // We use LedgerEntryChangesCache so all changes are squashed + s.mockQ.On( + "UpsertAccounts", s.ctx, + []history.AccountEntry{ + { + LastModifiedLedger: 123, + AccountID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + MasterWeight: 1, + ThresholdLow: 1, + ThresholdMedium: 1, + ThresholdHigh: 1, + }, + }, + ).Return(nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + LastModifiedLedgerSeq: xdr.Uint32(123), + }, + }) + s.Assert().NoError(err) +} + +func TestAccountsProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(AccountsProcessorTestSuiteLedger)) +} + +type AccountsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *AccountsProcessor + mockQ *history.MockQAccounts +} + +func (s *AccountsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQAccounts{} + + s.processor = NewAccountsProcessor(s.mockQ) +} + +func (s *AccountsProcessorTestSuiteLedger) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AccountsProcessorTestSuiteLedger) TestNoTransactions() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *AccountsProcessorTestSuiteLedger) TestNewAccount() { + account := xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &account, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + updatedAccount := xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{0, 1, 2, 3}, + HomeDomain: "stellar.org", + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &account, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &updatedAccount, + }, + }, + }) + s.Assert().NoError(err) + + // We use LedgerEntryChangesCache so all changes are squashed + s.mockQ.On( + "UpsertAccounts", + s.ctx, + []history.AccountEntry{ + { + LastModifiedLedger: 123, + AccountID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + MasterWeight: 0, + ThresholdLow: 1, + ThresholdMedium: 2, + ThresholdHigh: 3, + HomeDomain: "stellar.org", + }, + }, + ).Return(nil).Once() +} + +func (s *AccountsProcessorTestSuiteLedger) TestRemoveAccount() { + s.mockQ.On( + "RemoveAccounts", + s.ctx, + []string{"GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"}, + ).Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) +} + +func (s *AccountsProcessorTestSuiteLedger) TestProcessUpgradeChange() { + account := xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &account, + }, + }, + }) + s.Assert().NoError(err) + + updatedAccount := xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{0, 1, 2, 3}, + HomeDomain: "stellar.org", + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &account, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq + 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &updatedAccount, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertAccounts", + s.ctx, + []history.AccountEntry{ + { + LastModifiedLedger: uint32(lastModifiedLedgerSeq) + 1, + AccountID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + MasterWeight: 0, + ThresholdLow: 1, + ThresholdMedium: 2, + ThresholdHigh: 3, + HomeDomain: "stellar.org", + }, + }, + ).Return(nil).Once() +} + +func (s *AccountsProcessorTestSuiteLedger) TestFeeProcessedBeforeEverythingElse() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 200, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 100, + }, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 100, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + Balance: 300, + }, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertAccounts", + s.ctx, + []history.AccountEntry{ + { + LastModifiedLedger: 0, + AccountID: "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A", + Balance: 300, + }, + }, + ).Return(nil).Once() +} diff --git a/services/horizon/internal/ingest/processors/asset_stats_processor.go b/services/horizon/internal/ingest/processors/asset_stats_processor.go new file mode 100644 index 0000000000..098167bb5b --- /dev/null +++ b/services/horizon/internal/ingest/processors/asset_stats_processor.go @@ -0,0 +1,234 @@ +package processors + +import ( + "context" + "database/sql" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type AssetStatsProcessor struct { + assetStatsQ history.QAssetStats + + cache *ingest.ChangeCompactor + assetStatSet AssetStatSet + useLedgerEntryCache bool +} + +// NewAssetStatsProcessor constructs a new AssetStatsProcessor instance. +// If useLedgerEntryCache is false we don't use ledger cache and we just +// add trust lines to assetStatSet, then we insert all the stats in one +// insert query. This is done to make history buckets processing faster +// (batch inserting). +func NewAssetStatsProcessor( + assetStatsQ history.QAssetStats, + useLedgerEntryCache bool, +) *AssetStatsProcessor { + p := &AssetStatsProcessor{ + assetStatsQ: assetStatsQ, + useLedgerEntryCache: useLedgerEntryCache, + } + p.reset() + return p +} + +func (p *AssetStatsProcessor) reset() { + p.cache = ingest.NewChangeCompactor() + p.assetStatSet = AssetStatSet{} +} + +func (p *AssetStatsProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeLiquidityPool && + change.Type != xdr.LedgerEntryTypeClaimableBalance && + change.Type != xdr.LedgerEntryTypeTrustline { + return nil + } + if p.useLedgerEntryCache { + return p.addToCache(ctx, change) + } + if change.Pre != nil || change.Post == nil { + return errors.New("AssetStatsProcessor is in insert only mode") + } + + switch change.Type { + case xdr.LedgerEntryTypeLiquidityPool: + return p.assetStatSet.AddLiquidityPool(change) + case xdr.LedgerEntryTypeClaimableBalance: + return p.assetStatSet.AddClaimableBalance(change) + case xdr.LedgerEntryTypeTrustline: + return p.assetStatSet.AddTrustline(change) + default: + return nil + } +} + +func (p *AssetStatsProcessor) addToCache(ctx context.Context, change ingest.Change) error { + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + return nil +} + +func (p *AssetStatsProcessor) Commit(ctx context.Context) error { + if !p.useLedgerEntryCache { + assetStatsDeltas := p.assetStatSet.All() + if len(assetStatsDeltas) == 0 { + return nil + } + return p.assetStatsQ.InsertAssetStats(ctx, assetStatsDeltas, maxBatchSize) + } + + changes := p.cache.GetChanges() + for _, change := range changes { + var err error + switch change.Type { + case xdr.LedgerEntryTypeLiquidityPool: + err = p.assetStatSet.AddLiquidityPool(change) + case xdr.LedgerEntryTypeClaimableBalance: + err = p.assetStatSet.AddClaimableBalance(change) + case xdr.LedgerEntryTypeTrustline: + err = p.assetStatSet.AddTrustline(change) + default: + return errors.Errorf("Change type %v is unexpected", change.Type) + } + + if err != nil { + return errors.Wrap(err, "Error adjusting asset stat") + } + } + + assetStatsDeltas := p.assetStatSet.All() + for _, delta := range assetStatsDeltas { + var rowsAffected int64 + var stat history.ExpAssetStat + var err error + + stat, err = p.assetStatsQ.GetAssetStat(ctx, + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + ) + assetStatNotFound := err == sql.ErrNoRows + if !assetStatNotFound && err != nil { + return errors.Wrap(err, "could not fetch asset stat from db") + } + + if assetStatNotFound { + // Safety checks + if delta.Accounts.Authorized < 0 { + return ingest.NewStateError(errors.Errorf( + "Authorized accounts negative but DB entry does not exist for asset: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } else if delta.Accounts.AuthorizedToMaintainLiabilities < 0 { + return ingest.NewStateError(errors.Errorf( + "AuthorizedToMaintainLiabilities accounts negative but DB entry does not exist for asset: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } else if delta.Accounts.Unauthorized < 0 { + return ingest.NewStateError(errors.Errorf( + "Unauthorized accounts negative but DB entry does not exist for asset: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } else if delta.Accounts.ClaimableBalances < 0 { + return ingest.NewStateError(errors.Errorf( + "Claimable balance negative but DB entry does not exist for asset: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } else if delta.Accounts.LiquidityPools < 0 { + return ingest.NewStateError(errors.Errorf( + "Liquidity pools negative but DB entry does not exist for asset: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } + + // Insert + var errInsert error + rowsAffected, errInsert = p.assetStatsQ.InsertAssetStat(ctx, delta) + if errInsert != nil { + return errors.Wrap(errInsert, "could not insert asset stat") + } + } else { + var statBalances assetStatBalances + if err = statBalances.Parse(&stat.Balances); err != nil { + return errors.Wrap(err, "Error parsing balances") + } + + var deltaBalances assetStatBalances + if err = deltaBalances.Parse(&delta.Balances); err != nil { + return errors.Wrap(err, "Error parsing balances") + } + + statBalances = statBalances.Add(deltaBalances) + statAccounts := stat.Accounts.Add(delta.Accounts) + + if statAccounts.IsZero() { + // Remove stats + if !statBalances.IsZero() { + return ingest.NewStateError(errors.Errorf( + "Removing asset stat by final amount non-zero for: %s %s %s", + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } + rowsAffected, err = p.assetStatsQ.RemoveAssetStat(ctx, + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + ) + if err != nil { + return errors.Wrap(err, "could not remove asset stat") + } + } else { + // Update + rowsAffected, err = p.assetStatsQ.UpdateAssetStat(ctx, history.ExpAssetStat{ + AssetType: delta.AssetType, + AssetCode: delta.AssetCode, + AssetIssuer: delta.AssetIssuer, + Accounts: statAccounts, + Balances: statBalances.ConvertToHistoryObject(), + Amount: statBalances.Authorized.String(), + NumAccounts: statAccounts.Authorized, + }) + if err != nil { + return errors.Wrap(err, "could not update asset stat") + } + } + } + + if rowsAffected != 1 { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when adjusting asset stat for asset: %s %s %s", + rowsAffected, + delta.AssetType, + delta.AssetCode, + delta.AssetIssuer, + )) + } + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/asset_stats_processor_test.go b/services/horizon/internal/ingest/processors/asset_stats_processor_test.go new file mode 100644 index 0000000000..409d83d489 --- /dev/null +++ b/services/horizon/internal/ingest/processors/asset_stats_processor_test.go @@ -0,0 +1,1174 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "database/sql" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/suite" +) + +func TestAssetStatsProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(AssetStatsProcessorTestSuiteState)) +} + +type AssetStatsProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *AssetStatsProcessor + mockQ *history.MockQAssetStats +} + +func (s *AssetStatsProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQAssetStats{} + s.processor = NewAssetStatsProcessor(s.mockQ, false) +} + +func (s *AssetStatsProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AssetStatsProcessorTestSuiteState) TestCreateTrustLine() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("InsertAssetStats", s.ctx, []history.ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{Authorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 1, + }, + }, maxBatchSize).Return(nil).Once() +} + +func (s *AssetStatsProcessorTestSuiteState) TestCreatePoolShareTrustLine() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &xdr.PoolId{1, 2, 3}, + }, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AssetStatsProcessorTestSuiteState) TestCreateTrustLineWithClawback() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag | xdr.TrustLineFlagsTrustlineClawbackEnabledFlag), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("InsertAssetStats", s.ctx, []history.ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{Authorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 1, + }, + }, maxBatchSize).Return(nil).Once() +} + +func (s *AssetStatsProcessorTestSuiteState) TestCreateTrustLineUnauthorized() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("InsertAssetStats", s.ctx, []history.ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{Unauthorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }, + }, maxBatchSize).Return(nil).Once() +} + +func TestAssetStatsProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(AssetStatsProcessorTestSuiteLedger)) +} + +type AssetStatsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *AssetStatsProcessor + mockQ *history.MockQAssetStats +} + +func (s *AssetStatsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQAssetStats{} + + s.processor = NewAssetStatsProcessor(s.mockQ, true) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestInsertClaimableBalance() { + claimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()), + Amount: 12, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{1, 2, 3}, + }, + } + + nativeClaimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewNativeAsset(), + Amount: 100000000, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{1, 2, 43}, + }, + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + // test inserts + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &claimableBalance, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &nativeClaimableBalance, + }, + }, + }) + s.Assert().NoError(err) + + usdClaimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()), + Amount: 46, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{4, 5, 3}, + }, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &usdClaimableBalance, + }, + }, + }) + s.Assert().NoError(err) + + // test updates + + updatedClaimableBalance := claimableBalance + updatedClaimableBalance.Amount *= 2 + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &claimableBalance, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &updatedClaimableBalance, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + ClaimableBalances: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "24", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + ClaimableBalances: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "46", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestInsertTrustLine() { + // should be ignored because it's not an trust line type + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + + // add trust line + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + unauthorizedTrustline := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustline, + }, + }, + }) + s.Assert().NoError(err) + + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + updatedUnauthorizedTrustline := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustline, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedUnauthorizedTrustline, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "10", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "10", + NumAccounts: 1, + }).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "10", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestInsertClaimableBalanceAndTrustlineAndLiquidityPool() { + liquidityPool := xdr.LiquidityPoolEntry{ + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()), + AssetB: xdr.MustNewNativeAsset(), + Fee: 20, + }, + ReserveA: 100, + ReserveB: 200, + TotalPoolShares: 1000, + PoolSharesTrustLineCount: 10, + }, + }, + } + + claimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()), + Amount: 12, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{1, 2, 3}, + }, + } + + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 9, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &liquidityPool, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &claimableBalance, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + ClaimableBalances: 1, + Authorized: 1, + LiquidityPools: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "9", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "12", + LiquidityPools: "100", + }, + Amount: "9", + NumAccounts: 1, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestUpdateTrustLine() { + lastModifiedLedgerSeq := xdr.Uint32(1234) + + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{Authorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "100", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "100", + NumAccounts: 1, + }, nil).Once() + s.mockQ.On("UpdateAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{Authorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "110", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "110", + NumAccounts: 1, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestUpdateTrustLineAuthorization() { + lastModifiedLedgerSeq := xdr.Uint32(1234) + + // EUR trustline: 100 unauthorized -> 10 authorized + eurTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 100, + } + eurUpdatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + // USD trustline: 100 authorized -> 10 unauthorized + usdTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 100, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + usdUpdatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + } + + // ETH trustline: 100 authorized -> 10 authorized_to_maintain_liabilities + ethTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("ETH", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 100, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + ethUpdatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("ETH", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &eurTrustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &eurUpdatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &usdTrustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &usdUpdatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: ðTrustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: ðUpdatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "100", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }, nil).Once() + s.mockQ.On("UpdateAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "10", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "10", + NumAccounts: 1, + }).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "100", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "100", + NumAccounts: 1, + }, nil).Once() + s.mockQ.On("UpdateAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "10", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "ETH", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "ETH", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "100", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "100", + NumAccounts: 1, + }, nil).Once() + s.mockQ.On("UpdateAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "ETH", + Accounts: history.ExpAssetStatAccounts{ + AuthorizedToMaintainLiabilities: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "10", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestRemoveClaimableBalance() { + claimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()), + Amount: 12, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{1, 2, 3}, + }, + } + usdClaimableBalance := xdr.ClaimableBalanceEntry{ + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()), + Amount: 21, + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{4, 5, 6}, + }, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &claimableBalance, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &usdClaimableBalance, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + ClaimableBalances: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "12", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }, nil).Once() + s.mockQ.On("RemoveAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + ClaimableBalances: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "21", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }, nil).Once() + s.mockQ.On("UpdateAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{Unauthorized: 1}, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestRemoveTrustLine() { + authorizedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + unauthorizedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &authorizedTrustLine, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustLine, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 1, + }, nil).Once() + s.mockQ.On("RemoveAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(int64(1), nil).Once() + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "0", + NumAccounts: 0, + }, nil).Once() + s.mockQ.On("RemoveAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + trustLineIssuer.Address(), + ).Return(int64(1), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AssetStatsProcessorTestSuiteLedger) TestProcessUpgradeChange() { + // add trust line + lastModifiedLedgerSeq := xdr.Uint32(1234) + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + }) + s.Assert().NoError(err) + + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("GetAssetStat", s.ctx, + xdr.AssetTypeAssetTypeCreditAlphanum4, + "EUR", + trustLineIssuer.Address(), + ).Return(history.ExpAssetStat{}, sql.ErrNoRows).Once() + s.mockQ.On("InsertAssetStat", s.ctx, history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "10", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "10", + NumAccounts: 1, + }).Return(int64(1), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} diff --git a/services/horizon/internal/ingest/processors/asset_stats_set.go b/services/horizon/internal/ingest/processors/asset_stats_set.go new file mode 100644 index 0000000000..672e387e56 --- /dev/null +++ b/services/horizon/internal/ingest/processors/asset_stats_set.go @@ -0,0 +1,348 @@ +package processors + +import ( + "math/big" + + "github.com/stellar/go/ingest" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type assetStatKey struct { + assetType xdr.AssetType + assetCode string + assetIssuer string +} + +type assetStatValue struct { + assetStatKey + balances assetStatBalances + accounts history.ExpAssetStatAccounts +} + +type assetStatBalances struct { + Authorized *big.Int + AuthorizedToMaintainLiabilities *big.Int + ClaimableBalances *big.Int + LiquidityPools *big.Int + Unauthorized *big.Int +} + +func (a *assetStatBalances) Parse(b *history.ExpAssetStatBalances) error { + authorized, ok := new(big.Int).SetString(b.Authorized, 10) + if !ok { + return errors.New("Error parsing: " + b.Authorized) + } + a.Authorized = authorized + + authorizedToMaintainLiabilities, ok := new(big.Int).SetString(b.AuthorizedToMaintainLiabilities, 10) + if !ok { + return errors.New("Error parsing: " + b.AuthorizedToMaintainLiabilities) + } + a.AuthorizedToMaintainLiabilities = authorizedToMaintainLiabilities + + claimableBalances, ok := new(big.Int).SetString(b.ClaimableBalances, 10) + if !ok { + return errors.New("Error parsing: " + b.ClaimableBalances) + } + a.ClaimableBalances = claimableBalances + + liquidityPools, ok := new(big.Int).SetString(b.LiquidityPools, 10) + if !ok { + return errors.New("Error parsing: " + b.LiquidityPools) + } + a.LiquidityPools = liquidityPools + + unauthorized, ok := new(big.Int).SetString(b.Unauthorized, 10) + if !ok { + return errors.New("Error parsing: " + b.Unauthorized) + } + a.Unauthorized = unauthorized + + return nil +} + +func (a assetStatBalances) Add(b assetStatBalances) assetStatBalances { + return assetStatBalances{ + Authorized: big.NewInt(0).Add(a.Authorized, b.Authorized), + AuthorizedToMaintainLiabilities: big.NewInt(0).Add(a.AuthorizedToMaintainLiabilities, b.AuthorizedToMaintainLiabilities), + ClaimableBalances: big.NewInt(0).Add(a.ClaimableBalances, b.ClaimableBalances), + LiquidityPools: big.NewInt(0).Add(a.LiquidityPools, b.LiquidityPools), + Unauthorized: big.NewInt(0).Add(a.Unauthorized, b.Unauthorized), + } +} + +func (a assetStatBalances) IsZero() bool { + return a.Authorized.Cmp(big.NewInt(0)) == 0 && + a.AuthorizedToMaintainLiabilities.Cmp(big.NewInt(0)) == 0 && + a.ClaimableBalances.Cmp(big.NewInt(0)) == 0 && + a.LiquidityPools.Cmp(big.NewInt(0)) == 0 && + a.Unauthorized.Cmp(big.NewInt(0)) == 0 +} + +func (a assetStatBalances) ConvertToHistoryObject() history.ExpAssetStatBalances { + return history.ExpAssetStatBalances{ + Authorized: a.Authorized.String(), + AuthorizedToMaintainLiabilities: a.AuthorizedToMaintainLiabilities.String(), + ClaimableBalances: a.ClaimableBalances.String(), + LiquidityPools: a.LiquidityPools.String(), + Unauthorized: a.Unauthorized.String(), + } +} + +func (value assetStatValue) ConvertToHistoryObject() history.ExpAssetStat { + balances := value.balances.ConvertToHistoryObject() + return history.ExpAssetStat{ + AssetType: value.assetType, + AssetCode: value.assetCode, + AssetIssuer: value.assetIssuer, + Accounts: value.accounts, + Balances: balances, + Amount: balances.Authorized, + NumAccounts: value.accounts.Authorized, + } +} + +// AssetStatSet represents a collection of asset stats +type AssetStatSet map[assetStatKey]*assetStatValue + +type delta struct { + Authorized int64 + AuthorizedToMaintainLiabilities int64 + Unauthorized int64 + ClaimableBalances int64 + LiquidityPools int64 +} + +func (d *delta) addByFlags(flags xdr.Uint32, amount int64) { + f := xdr.TrustLineFlags(flags) + if f.IsAuthorized() { + d.Authorized += amount + } else if f.IsAuthorizedToMaintainLiabilitiesFlag() { + d.AuthorizedToMaintainLiabilities += amount + } else { + d.Unauthorized += amount + } +} + +func (d delta) isEmpty() bool { + return d == delta{} +} + +// addDelta adds a delta balance and delta accounts to a given asset trustline. +func (s AssetStatSet) addDelta(asset xdr.Asset, deltaBalances, deltaAccounts delta) error { + if deltaBalances.isEmpty() && deltaAccounts.isEmpty() { + return nil + } + + var key assetStatKey + if err := asset.Extract(&key.assetType, &key.assetCode, &key.assetIssuer); err != nil { + return errors.Wrap(err, "could not extract asset info from trustline") + } + + current, ok := s[key] + if !ok { + current = &assetStatValue{assetStatKey: key, balances: assetStatBalances{ + Authorized: big.NewInt(0), + AuthorizedToMaintainLiabilities: big.NewInt(0), + ClaimableBalances: big.NewInt(0), + LiquidityPools: big.NewInt(0), + Unauthorized: big.NewInt(0), + }} + s[key] = current + } + + current.accounts.Authorized += int32(deltaAccounts.Authorized) + current.accounts.AuthorizedToMaintainLiabilities += int32(deltaAccounts.AuthorizedToMaintainLiabilities) + current.accounts.ClaimableBalances += int32(deltaAccounts.ClaimableBalances) + current.accounts.LiquidityPools += int32(deltaAccounts.LiquidityPools) + current.accounts.Unauthorized += int32(deltaAccounts.Unauthorized) + + current.balances.Authorized.Add(current.balances.Authorized, big.NewInt(deltaBalances.Authorized)) + current.balances.AuthorizedToMaintainLiabilities.Add(current.balances.AuthorizedToMaintainLiabilities, big.NewInt(deltaBalances.AuthorizedToMaintainLiabilities)) + current.balances.ClaimableBalances.Add(current.balances.ClaimableBalances, big.NewInt(deltaBalances.ClaimableBalances)) + current.balances.LiquidityPools.Add(current.balances.LiquidityPools, big.NewInt(deltaBalances.LiquidityPools)) + current.balances.Unauthorized.Add(current.balances.Unauthorized, big.NewInt(deltaBalances.Unauthorized)) + + // Note: it's possible that after operations above: + // numAccounts != 0 && amount == 0 (ex. two accounts send some of their assets to third account) + // OR + // numAccounts == 0 && amount != 0 (ex. issuer issued an asset) + if current.balances.IsZero() && current.accounts.IsZero() { + delete(s, key) + } + + return nil +} + +// AddTrustline updates the set to account for how a given trustline has changed. +// change must be a xdr.LedgerEntryTypeTrustLine type. +func (s AssetStatSet) AddTrustline(change ingest.Change) error { + var pre, post *xdr.TrustLineEntry + if change.Pre != nil { + pre = change.Pre.Data.TrustLine + } + if change.Post != nil { + post = change.Post.Data.TrustLine + } + + deltaAccounts := delta{} + deltaBalances := delta{} + + if pre == nil && post == nil { + return ingest.NewStateError(errors.New("both pre and post trustlines cannot be nil")) + } + + var asset xdr.TrustLineAsset + if pre != nil { + asset = pre.Asset + deltaAccounts.addByFlags(pre.Flags, -1) + deltaBalances.addByFlags(pre.Flags, -int64(pre.Balance)) + } + if post != nil { + asset = post.Asset + deltaAccounts.addByFlags(post.Flags, 1) + deltaBalances.addByFlags(post.Flags, int64(post.Balance)) + } + if asset.Type == xdr.AssetTypeAssetTypePoolShare { + return nil + } + + err := s.addDelta(asset.ToAsset(), deltaBalances, deltaAccounts) + if err != nil { + return errors.Wrap(err, "error running AssetStatSet.addDelta") + } + return nil +} + +// AddLiquidityPool updates the set to account for how a given liquidity pool has changed. +// change must be a xdr.LedgerEntryTypeLiqidityPool type. +func (s AssetStatSet) AddLiquidityPool(change ingest.Change) error { + var pre, post *xdr.LiquidityPoolEntry + if change.Pre != nil { + pre = change.Pre.Data.LiquidityPool + } + if change.Post != nil { + post = change.Post.Data.LiquidityPool + } + + assetAdeltaNum := delta{} + assetAdeltaBalances := delta{} + assetBdeltaNum := delta{} + assetBdeltaBalances := delta{} + + if pre == nil && post == nil { + return ingest.NewStateError(errors.New("both pre and post liquidity pools cannot be nil")) + } + + lpType, err := change.GetLiquidityPoolType() + if err != nil { + return ingest.NewStateError(err) + } + + var assetA, assetB xdr.Asset + switch lpType { + case xdr.LiquidityPoolTypeLiquidityPoolConstantProduct: + if pre != nil { + assetA = pre.Body.ConstantProduct.Params.AssetA + assetAdeltaNum.LiquidityPools-- + assetAdeltaBalances.LiquidityPools -= int64(pre.Body.ConstantProduct.ReserveA) + + assetB = pre.Body.ConstantProduct.Params.AssetB + assetBdeltaNum.LiquidityPools-- + assetBdeltaBalances.LiquidityPools -= int64(pre.Body.ConstantProduct.ReserveB) + } + if post != nil { + assetA = post.Body.ConstantProduct.Params.AssetA + assetAdeltaNum.LiquidityPools++ + assetAdeltaBalances.LiquidityPools += int64(post.Body.ConstantProduct.ReserveA) + + assetB = post.Body.ConstantProduct.Params.AssetB + assetBdeltaNum.LiquidityPools++ + assetBdeltaBalances.LiquidityPools += int64(post.Body.ConstantProduct.ReserveB) + } + default: + return errors.Errorf("Unknown liquidity pool type=%d", lpType) + } + + if assetA.Type != xdr.AssetTypeAssetTypeNative { + err := s.addDelta(assetA, assetAdeltaBalances, assetAdeltaNum) + if err != nil { + return errors.Wrap(err, "error running AssetStatSet.addDelta using AssetA") + } + } + + if assetB.Type != xdr.AssetTypeAssetTypeNative { + err := s.addDelta(assetB, assetBdeltaBalances, assetBdeltaNum) + if err != nil { + return errors.Wrap(err, "error running AssetStatSet.addDelta using AssetB") + } + } + + return nil +} + +// AddClaimableBalance updates the set to account for how a given claimable balance has changed. +// change must be a xdr.LedgerEntryTypeClaimableBalance type. +func (s AssetStatSet) AddClaimableBalance(change ingest.Change) error { + var pre, post *xdr.ClaimableBalanceEntry + if change.Pre != nil { + pre = change.Pre.Data.ClaimableBalance + } + if change.Post != nil { + post = change.Post.Data.ClaimableBalance + } + + deltaAccounts := delta{} + deltaBalances := delta{} + + if pre == nil && post == nil { + return ingest.NewStateError(errors.New("both pre and post claimable balances cannot be nil")) + } + + var asset xdr.Asset + if pre != nil { + asset = pre.Asset + deltaAccounts.ClaimableBalances-- + deltaBalances.ClaimableBalances -= int64(pre.Amount) + } + if post != nil { + asset = post.Asset + deltaAccounts.ClaimableBalances++ + deltaBalances.ClaimableBalances += int64(post.Amount) + } + + if asset.Type == xdr.AssetTypeAssetTypeNative { + return nil + } + + err := s.addDelta(asset, deltaBalances, deltaAccounts) + if err != nil { + return errors.Wrap(err, "error running AssetStatSet.addDelta") + } + return nil +} + +// Remove deletes an asset stat from the set +func (s AssetStatSet) Remove(assetType xdr.AssetType, assetCode string, assetIssuer string) (history.ExpAssetStat, bool) { + key := assetStatKey{assetType: assetType, assetIssuer: assetIssuer, assetCode: assetCode} + value, ok := s[key] + if !ok { + return history.ExpAssetStat{}, false + } + + delete(s, key) + + return value.ConvertToHistoryObject(), true +} + +// All returns a list of all `history.ExpAssetStat` contained within the set +func (s AssetStatSet) All() []history.ExpAssetStat { + assetStats := make([]history.ExpAssetStat, 0, len(s)) + for _, value := range s { + assetStats = append(assetStats, value.ConvertToHistoryObject()) + } + return assetStats +} diff --git a/services/horizon/internal/ingest/processors/asset_stats_set_test.go b/services/horizon/internal/ingest/processors/asset_stats_set_test.go new file mode 100644 index 0000000000..0c247593d6 --- /dev/null +++ b/services/horizon/internal/ingest/processors/asset_stats_set_test.go @@ -0,0 +1,330 @@ +package processors + +import ( + "math" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" +) + +func TestEmptyAssetStatSet(t *testing.T) { + set := AssetStatSet{} + if all := set.All(); len(all) != 0 { + t.Fatalf("expected empty list but got %v", all) + } + + _, ok := set.Remove( + xdr.AssetTypeAssetTypeCreditAlphanum4, + "USD", + "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB", + ) + if ok { + t.Fatal("expected remove to return false") + } +} + +func assertAllEquals(t *testing.T, set AssetStatSet, expected []history.ExpAssetStat) { + all := set.All() + assert.Len(t, all, len(expected)) + sort.Slice(all, func(i, j int) bool { + return all[i].AssetCode < all[j].AssetCode + }) + for i, got := range all { + assert.Equal(t, expected[i], got) + } +} + +func TestAddNativeClaimableBalance(t *testing.T) { + set := AssetStatSet{} + claimableBalance := xdr.ClaimableBalanceEntry{ + BalanceId: xdr.ClaimableBalanceId{}, + Claimants: nil, + Asset: xdr.MustNewNativeAsset(), + Amount: 100, + } + assert.NoError(t, set.AddClaimableBalance( + ingest.Change{ + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + ClaimableBalance: &claimableBalance, + }, + }, + }, + )) + assert.Empty(t, set.All()) +} + +func trustlineChange(pre, post *xdr.TrustLineEntry) ingest.Change { + c := ingest.Change{} + if pre != nil { + c.Pre = &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + TrustLine: pre, + }, + } + } + if post != nil { + c.Post = &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + TrustLine: post, + }, + } + } + return c +} + +func TestAddPoolShareTrustline(t *testing.T) { + set := AssetStatSet{} + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &xdr.PoolId{1, 2, 3}, + }, + Balance: 1, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + )), + ) + assert.Empty(t, set.All()) +} + +func TestAddAndRemoveAssetStats(t *testing.T) { + set := AssetStatSet{} + eur := "EUR" + eurAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: eur, + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "1", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "1", + NumAccounts: 1, + } + + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(eur, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 1, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + )), + ) + assertAllEquals(t, set, []history.ExpAssetStat{eurAssetStat}) + + eurAssetStat.Accounts.ClaimableBalances++ + eurAssetStat.Balances.ClaimableBalances = "23" + eurAsset := xdr.MustNewCreditAsset(eur, trustLineIssuer.Address()) + assert.NoError( + t, + set.addDelta( + eurAsset, + delta{ClaimableBalances: 23}, + delta{ClaimableBalances: 1}, + ), + ) + + assertAllEquals(t, set, []history.ExpAssetStat{eurAssetStat}) + + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(eur, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 24, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag | xdr.TrustLineFlagsTrustlineClawbackEnabledFlag), + })), + ) + + eurAssetStat.Balances.Authorized = "25" + eurAssetStat.Amount = "25" + eurAssetStat.Accounts.Authorized++ + eurAssetStat.NumAccounts++ + assertAllEquals(t, set, []history.ExpAssetStat{eurAssetStat}) + + usd := "USD" + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset(usd, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + })), + ) + + ether := "ETHER" + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset(ether, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 3, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + })), + ) + + // AddTrustline an authorized_to_maintain_liabilities trust line + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(ether, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 4, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag), + })), + ) + + // AddTrustline an unauthorized trust line + assert.NoError( + t, + set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(ether, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 5, + })), + ) + expected := []history.ExpAssetStat{ + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetCode: ether, + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + AuthorizedToMaintainLiabilities: 1, + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "3", + AuthorizedToMaintainLiabilities: "4", + Unauthorized: "5", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "3", + NumAccounts: 1, + }, + eurAssetStat, + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: usd, + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "10", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "10", + NumAccounts: 1, + }, + } + assertAllEquals(t, set, expected) + + for i, assetStat := range expected { + removed, ok := set.Remove(assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + if !ok { + t.Fatal("expected remove to return true") + } + if removed != assetStat { + t.Fatalf("expected removed asset stat to be %v but got %v", assetStat, removed) + } + + assertAllEquals(t, set, expected[i+1:]) + } +} + +func TestOverflowAssetStatSet(t *testing.T) { + set := AssetStatSet{} + eur := "EUR" + err := set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(eur, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: math.MaxInt64, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + })) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + all := set.All() + if len(all) != 1 { + t.Fatalf("expected list of 1 asset stat but got %v", all) + } + + eurAssetStat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: eur, + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Authorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "9223372036854775807", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "9223372036854775807", + NumAccounts: 1, + } + if all[0] != eurAssetStat { + t.Fatalf("expected asset stat to be %v but got %v", eurAssetStat, all[0]) + } + + err = set.AddTrustline(trustlineChange(nil, &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset(eur, trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: math.MaxInt64, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + })) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + all = set.All() + if len(all) != 1 { + t.Fatalf("expected list of 1 asset stat but got %v", all) + } + + eurAssetStat = history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: eur, + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Authorized: 2, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "18446744073709551614", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "0", + ClaimableBalances: "0", + LiquidityPools: "0", + }, + Amount: "18446744073709551614", + NumAccounts: 2, + } + if all[0] != eurAssetStat { + t.Fatalf("expected asset stat to be %v but got %v", eurAssetStat, all[0]) + } +} diff --git a/services/horizon/internal/ingest/processors/change_processors.go b/services/horizon/internal/ingest/processors/change_processors.go new file mode 100644 index 0000000000..6c939288b8 --- /dev/null +++ b/services/horizon/internal/ingest/processors/change_processors.go @@ -0,0 +1,63 @@ +package processors + +import ( + "context" + "io" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" +) + +type ChangeProcessor interface { + ProcessChange(ctx context.Context, change ingest.Change) error +} + +type LedgerTransactionProcessor interface { + ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error +} + +func StreamLedgerTransactions( + ctx context.Context, + txProcessor LedgerTransactionProcessor, + reader *ingest.LedgerTransactionReader, +) error { + for { + tx, err := reader.Read() + if err == io.EOF { + return nil + } + if err != nil { + return errors.Wrap(err, "could not read transaction") + } + if err = txProcessor.ProcessTransaction(ctx, tx); err != nil { + return errors.Wrapf( + err, + "could not process transaction %v", + tx.Index, + ) + } + } +} + +func StreamChanges( + ctx context.Context, + changeProcessor ChangeProcessor, + reader ingest.ChangeReader, +) error { + for { + change, err := reader.Read() + if err == io.EOF { + return nil + } + if err != nil { + return errors.Wrap(err, "could not read transaction") + } + + if err = changeProcessor.ProcessChange(ctx, change); err != nil { + return errors.Wrap( + err, + "could not process change", + ) + } + } +} diff --git a/services/horizon/internal/ingest/processors/change_processors_test.go b/services/horizon/internal/ingest/processors/change_processors_test.go new file mode 100644 index 0000000000..829552543f --- /dev/null +++ b/services/horizon/internal/ingest/processors/change_processors_test.go @@ -0,0 +1,47 @@ +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" +) + +func TestStreamReaderError(t *testing.T) { + tt := assert.New(t) + ctx := context.Background() + + mockChangeReader := &ingest.MockChangeReader{} + mockChangeReader. + On("Read"). + Return(ingest.Change{}, errors.New("transient error")).Once() + mockChangeProcessor := &MockChangeProcessor{} + + err := StreamChanges(ctx, mockChangeProcessor, mockChangeReader) + tt.EqualError(err, "could not read transaction: transient error") +} + +func TestStreamChangeProcessorError(t *testing.T) { + tt := assert.New(t) + ctx := context.Background() + + change := ingest.Change{} + mockChangeReader := &ingest.MockChangeReader{} + mockChangeReader. + On("Read"). + Return(change, nil).Once() + + mockChangeProcessor := &MockChangeProcessor{} + mockChangeProcessor. + On( + "ProcessChange", ctx, + change, + ). + Return(errors.New("transient error")).Once() + + err := StreamChanges(ctx, mockChangeProcessor, mockChangeReader) + tt.EqualError(err, "could not process change: transient error") +} diff --git a/services/horizon/internal/ingest/processors/claimable_balances_change_processor.go b/services/horizon/internal/ingest/processors/claimable_balances_change_processor.go new file mode 100644 index 0000000000..fae72f2c49 --- /dev/null +++ b/services/horizon/internal/ingest/processors/claimable_balances_change_processor.go @@ -0,0 +1,136 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type ClaimableBalancesChangeProcessor struct { + encodingBuffer *xdr.EncodingBuffer + qClaimableBalances history.QClaimableBalances + cache *ingest.ChangeCompactor +} + +func NewClaimableBalancesChangeProcessor(Q history.QClaimableBalances) *ClaimableBalancesChangeProcessor { + p := &ClaimableBalancesChangeProcessor{ + encodingBuffer: xdr.NewEncodingBuffer(), + qClaimableBalances: Q, + } + p.reset() + return p +} + +func (p *ClaimableBalancesChangeProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *ClaimableBalancesChangeProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeClaimableBalance { + return nil + } + + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func (p *ClaimableBalancesChangeProcessor) Commit(ctx context.Context) error { + var ( + cbsToUpsert []history.ClaimableBalance + cbIDsToDelete []string + ) + changes := p.cache.GetChanges() + for _, change := range changes { + switch { + case change.Pre == nil && change.Post != nil: + // Created + row, err := p.ledgerEntryToRow(change.Post) + if err != nil { + return err + } + cbsToUpsert = append(cbsToUpsert, row) + case change.Pre != nil && change.Post == nil: + // Removed + cBalance := change.Pre.Data.MustClaimableBalance() + id, err := p.encodingBuffer.MarshalHex(cBalance.BalanceId) + if err != nil { + return err + } + cbIDsToDelete = append(cbIDsToDelete, id) + default: + // Updated + row, err := p.ledgerEntryToRow(change.Post) + if err != nil { + return err + } + cbsToUpsert = append(cbsToUpsert, row) + } + } + + if len(cbsToUpsert) > 0 { + if err := p.qClaimableBalances.UpsertClaimableBalances(ctx, cbsToUpsert); err != nil { + return errors.Wrap(err, "error executing upsert") + } + } + + if len(cbIDsToDelete) > 0 { + count, err := p.qClaimableBalances.RemoveClaimableBalances(ctx, cbIDsToDelete) + if err != nil { + return errors.Wrap(err, "error executing removal") + } + if count != int64(len(cbIDsToDelete)) { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when deleting %d claimable balances", + count, + len(cbIDsToDelete), + )) + } + } + + return nil +} + +func buildClaimants(claimants []xdr.Claimant) history.Claimants { + hClaimants := history.Claimants{} + for _, c := range claimants { + xc := c.MustV0() + hClaimants = append(hClaimants, history.Claimant{ + Destination: xc.Destination.Address(), + Predicate: xc.Predicate, + }) + } + return hClaimants +} + +func (p *ClaimableBalancesChangeProcessor) ledgerEntryToRow(entry *xdr.LedgerEntry) (history.ClaimableBalance, error) { + cBalance := entry.Data.MustClaimableBalance() + id, err := xdr.MarshalHex(cBalance.BalanceId) + if err != nil { + return history.ClaimableBalance{}, err + } + row := history.ClaimableBalance{ + BalanceID: id, + Claimants: buildClaimants(cBalance.Claimants), + Asset: cBalance.Asset, + Amount: cBalance.Amount, + Sponsor: ledgerEntrySponsorToNullString(*entry), + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + Flags: uint32(cBalance.Flags()), + } + return row, nil +} diff --git a/services/horizon/internal/ingest/processors/claimable_balances_change_processor_test.go b/services/horizon/internal/ingest/processors/claimable_balances_change_processor_test.go new file mode 100644 index 0000000000..0ece2661de --- /dev/null +++ b/services/horizon/internal/ingest/processors/claimable_balances_change_processor_test.go @@ -0,0 +1,290 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" +) + +func TestClaimableBalancesChangeProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(ClaimableBalancesChangeProcessorTestSuiteState)) +} + +type ClaimableBalancesChangeProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *ClaimableBalancesChangeProcessor + mockQ *history.MockQClaimableBalances +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQClaimableBalances{} + + s.processor = NewClaimableBalancesChangeProcessor(s.mockQ) +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteState) TestNoEntries() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteState) TestCreatesClaimableBalances() { + lastModifiedLedgerSeq := xdr.Uint32(123) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + + cBalance := xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: []xdr.Claimant{}, + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 10, + } + id, err := xdr.MarshalHex(balanceID) + s.Assert().NoError(err) + s.mockQ.On("UpsertClaimableBalances", s.ctx, []history.ClaimableBalance{ + { + BalanceID: id, + Claimants: []history.Claimant{}, + Asset: cBalance.Asset, + Amount: cBalance.Amount, + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }).Return(nil).Once() + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) +} + +func TestClaimableBalancesChangeProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(ClaimableBalancesChangeProcessorTestSuiteLedger)) +} + +type ClaimableBalancesChangeProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *ClaimableBalancesChangeProcessor + mockQ *history.MockQClaimableBalances +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQClaimableBalances{} + + s.processor = NewClaimableBalancesChangeProcessor(s.mockQ) +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) TestNoTransactions() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) TestNewClaimableBalance() { + lastModifiedLedgerSeq := xdr.Uint32(123) + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + cBalance := xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: []xdr.Claimant{}, + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 10, + } + entry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &entry, + }) + s.Assert().NoError(err) + + // add sponsor + updated := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + } + + entry.LastModifiedLedgerSeq = entry.LastModifiedLedgerSeq - 1 + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &entry, + Post: &updated, + }) + s.Assert().NoError(err) + + id, err := xdr.MarshalHex(balanceID) + s.Assert().NoError(err) + // We use LedgerEntryChangesCache so all changes are squashed + s.mockQ.On( + "UpsertClaimableBalances", + s.ctx, + []history.ClaimableBalance{ + { + BalanceID: id, + Claimants: []history.Claimant{}, + Asset: cBalance.Asset, + Amount: cBalance.Amount, + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + ).Return(nil).Once() +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) TestUpdateClaimableBalance() { + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + cBalance := xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: []xdr.Claimant{}, + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 10, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + pre := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + + // add sponsor + updated := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &pre, + Post: &updated, + }) + s.Assert().NoError(err) + + id, err := xdr.MarshalHex(balanceID) + s.Assert().NoError(err) + s.mockQ.On( + "UpsertClaimableBalances", + s.ctx, + []history.ClaimableBalance{ + { + BalanceID: id, + Claimants: []history.Claimant{}, + Asset: cBalance.Asset, + Amount: cBalance.Amount, + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + ).Return(nil).Once() +} + +func (s *ClaimableBalancesChangeProcessorTestSuiteLedger) TestRemoveClaimableBalance() { + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + cBalance := xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: []xdr.Claimant{}, + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Amount: 10, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + pre := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: &pre, + Post: nil, + }) + s.Assert().NoError(err) + + id, err := xdr.MarshalHex(balanceID) + s.Assert().NoError(err) + s.mockQ.On( + "RemoveClaimableBalances", + s.ctx, + []string{id}, + ).Return(int64(1), nil).Once() +} diff --git a/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor.go b/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor.go new file mode 100644 index 0000000000..7881df6519 --- /dev/null +++ b/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor.go @@ -0,0 +1,256 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type claimableBalance struct { + internalID int64 // Bigint auto-generated by postgres + transactionSet map[int64]struct{} + operationSet map[int64]struct{} +} + +func (b *claimableBalance) addTransactionID(id int64) { + if b.transactionSet == nil { + b.transactionSet = map[int64]struct{}{} + } + b.transactionSet[id] = struct{}{} +} + +func (b *claimableBalance) addOperationID(id int64) { + if b.operationSet == nil { + b.operationSet = map[int64]struct{}{} + } + b.operationSet[id] = struct{}{} +} + +type ClaimableBalancesTransactionProcessor struct { + sequence uint32 + claimableBalanceSet map[string]claimableBalance + qClaimableBalances history.QHistoryClaimableBalances +} + +func NewClaimableBalancesTransactionProcessor(Q history.QHistoryClaimableBalances, sequence uint32) *ClaimableBalancesTransactionProcessor { + return &ClaimableBalancesTransactionProcessor{ + qClaimableBalances: Q, + sequence: sequence, + claimableBalanceSet: map[string]claimableBalance{}, + } +} + +func (p *ClaimableBalancesTransactionProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + err := p.addTransactionClaimableBalances(p.claimableBalanceSet, p.sequence, transaction) + if err != nil { + return err + } + + err = p.addOperationClaimableBalances(p.claimableBalanceSet, p.sequence, transaction) + if err != nil { + return err + } + + return nil +} + +func (p *ClaimableBalancesTransactionProcessor) addTransactionClaimableBalances(cbSet map[string]claimableBalance, sequence uint32, transaction ingest.LedgerTransaction) error { + transactionID := toid.New(int32(sequence), int32(transaction.Index), 0).ToInt64() + transactionClaimableBalances, err := claimableBalancesForTransaction( + sequence, + transaction, + ) + if err != nil { + return errors.Wrap(err, "Could not determine claimable balances for transaction") + } + + for _, cb := range transactionClaimableBalances { + entry := cbSet[cb] + entry.addTransactionID(transactionID) + cbSet[cb] = entry + } + + return nil +} + +func claimableBalancesForTransaction( + sequence uint32, + transaction ingest.LedgerTransaction, +) ([]string, error) { + changes, err := transaction.GetChanges() + if err != nil { + return nil, err + } + cbs, err := claimableBalancesForChanges(changes) + if err != nil { + return nil, errors.Wrapf(err, "reading transaction %v claimable balances", transaction.Index) + } + return dedupeClaimableBalances(cbs) +} + +func dedupeClaimableBalances(in []string) (out []string, err error) { + set := map[string]struct{}{} + for _, id := range in { + set[id] = struct{}{} + } + + for id := range set { + out = append(out, id) + } + return +} + +func claimableBalancesForChanges( + changes []ingest.Change, +) ([]string, error) { + var cbs []string + + for _, c := range changes { + if c.Type != xdr.LedgerEntryTypeClaimableBalance { + continue + } + + if c.Pre == nil && c.Post == nil { + return nil, errors.New("Invalid io.Change: change.Pre == nil && change.Post == nil") + } + + var claimableBalanceID xdr.ClaimableBalanceId + if c.Pre != nil { + claimableBalanceID = c.Pre.Data.MustClaimableBalance().BalanceId + } + if c.Post != nil { + claimableBalanceID = c.Post.Data.MustClaimableBalance().BalanceId + } + id, err := xdr.MarshalHex(claimableBalanceID) + if err != nil { + return nil, err + } + cbs = append(cbs, id) + } + + return cbs, nil +} + +func (p *ClaimableBalancesTransactionProcessor) addOperationClaimableBalances(cbSet map[string]claimableBalance, sequence uint32, transaction ingest.LedgerTransaction) error { + claimableBalances, err := claimableBalancesForOperations(transaction, sequence) + if err != nil { + return errors.Wrap(err, "could not determine operation claimable balances") + } + + for operationID, cbs := range claimableBalances { + for _, cb := range cbs { + entry := cbSet[cb] + entry.addOperationID(operationID) + cbSet[cb] = entry + } + } + + return nil +} + +func claimableBalancesForOperations(transaction ingest.LedgerTransaction, sequence uint32) (map[int64][]string, error) { + cbs := map[int64][]string{} + + for opi, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(opi), + transaction: transaction, + operation: op, + ledgerSequence: sequence, + } + + changes, err := transaction.GetOperationChanges(uint32(opi)) + if err != nil { + return cbs, err + } + c, err := claimableBalancesForChanges(changes) + if err != nil { + return cbs, errors.Wrapf(err, "reading operation %v claimable balances", operation.ID()) + } + cbs[operation.ID()] = c + } + + return cbs, nil +} + +func (p *ClaimableBalancesTransactionProcessor) Commit(ctx context.Context) error { + if len(p.claimableBalanceSet) > 0 { + if err := p.loadClaimableBalanceIDs(ctx, p.claimableBalanceSet); err != nil { + return err + } + + if err := p.insertDBTransactionClaimableBalances(ctx, p.claimableBalanceSet); err != nil { + return err + } + + if err := p.insertDBOperationsClaimableBalances(ctx, p.claimableBalanceSet); err != nil { + return err + } + } + + return nil +} + +func (p *ClaimableBalancesTransactionProcessor) loadClaimableBalanceIDs(ctx context.Context, claimableBalanceSet map[string]claimableBalance) error { + ids := make([]string, 0, len(claimableBalanceSet)) + for id := range claimableBalanceSet { + ids = append(ids, id) + } + + toInternalID, err := p.qClaimableBalances.CreateHistoryClaimableBalances(ctx, ids, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Could not create claimable balance ids") + } + + for _, id := range ids { + internalID, ok := toInternalID[id] + if !ok { + // TODO: Figure out the right way to convert the id to a string here. %v will be nonsense. + return errors.Errorf("no internal id found for claimable balance %v", id) + } + + cb := claimableBalanceSet[id] + cb.internalID = internalID + claimableBalanceSet[id] = cb + } + + return nil +} + +func (p ClaimableBalancesTransactionProcessor) insertDBTransactionClaimableBalances(ctx context.Context, claimableBalanceSet map[string]claimableBalance) error { + batch := p.qClaimableBalances.NewTransactionClaimableBalanceBatchInsertBuilder(maxBatchSize) + + for _, entry := range claimableBalanceSet { + for transactionID := range entry.transactionSet { + if err := batch.Add(ctx, transactionID, entry.internalID); err != nil { + return errors.Wrap(err, "could not insert transaction claimable balance in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush transaction claimable balances to db") + } + return nil +} + +func (p ClaimableBalancesTransactionProcessor) insertDBOperationsClaimableBalances(ctx context.Context, claimableBalanceSet map[string]claimableBalance) error { + batch := p.qClaimableBalances.NewOperationClaimableBalanceBatchInsertBuilder(maxBatchSize) + + for _, entry := range claimableBalanceSet { + for operationID := range entry.operationSet { + if err := batch.Add(ctx, operationID, entry.internalID); err != nil { + return errors.Wrap(err, "could not insert operation claimable balance in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush operation claimable balances to db") + } + return nil +} diff --git a/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor_test.go b/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor_test.go new file mode 100644 index 0000000000..2be06109c6 --- /dev/null +++ b/services/horizon/internal/ingest/processors/claimable_balances_transaction_processor_test.go @@ -0,0 +1,211 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" +) + +type ClaimableBalancesTransactionProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *ClaimableBalancesTransactionProcessor + mockQ *history.MockQHistoryClaimableBalances + mockTransactionBatchInsertBuilder *history.MockTransactionClaimableBalanceBatchInsertBuilder + mockOperationBatchInsertBuilder *history.MockOperationClaimableBalanceBatchInsertBuilder + + sequence uint32 +} + +func TestClaimableBalancesTransactionProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(ClaimableBalancesTransactionProcessorTestSuiteLedger)) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQHistoryClaimableBalances{} + s.mockTransactionBatchInsertBuilder = &history.MockTransactionClaimableBalanceBatchInsertBuilder{} + s.mockOperationBatchInsertBuilder = &history.MockOperationClaimableBalanceBatchInsertBuilder{} + s.sequence = 20 + + s.processor = NewClaimableBalancesTransactionProcessor( + s.mockQ, + s.sequence, + ) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockTransactionBatchInsertBuilder.AssertExpectations(s.T()) + s.mockOperationBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) mockTransactionBatchAdd(transactionID, internalID int64, err error) { + s.mockTransactionBatchInsertBuilder.On("Add", s.ctx, transactionID, internalID).Return(err).Once() +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) mockOperationBatchAdd(operationID, internalID int64, err error) { + s.mockOperationBatchInsertBuilder.On("Add", s.ctx, operationID, internalID).Return(err).Once() +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) TestEmptyClaimableBalances() { + // What is this expecting? Doesn't seem to assert anything meaningful... + err := s.processor.Commit(context.Background()) + s.Assert().NoError(err) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) testOperationInserts(balanceID xdr.ClaimableBalanceId, body xdr.OperationBody, change xdr.LedgerEntryChange) { + // Setup the transaction + internalID := int64(1234) + txn := createTransaction(true, 1) + txn.Envelope.Operations()[0].Body = body + txn.UnsafeMeta.V = 2 + txn.UnsafeMeta.V2.Operations = []xdr.OperationMeta{ + {Changes: xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + }, + }, + }, + }, + change, + }}, + } + + if body.Type == xdr.OperationTypeCreateClaimableBalance { + // For insert test + txn.Result.Result.Result.Results = + &[]xdr.OperationResult{ + { + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceResult: &xdr.CreateClaimableBalanceResult{ + Code: xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess, + BalanceId: &balanceID, + }, + }, + }, + } + } + txnID := toid.New(int32(s.sequence), int32(txn.Index), 0).ToInt64() + opID := (&transactionOperationWrapper{ + index: uint32(0), + transaction: txn, + operation: txn.Envelope.Operations()[0], + ledgerSequence: s.sequence, + }).ID() + + hexID, _ := xdr.MarshalHex(balanceID) + + // Setup a q + s.mockQ.On("CreateHistoryClaimableBalances", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + []string{ + hexID, + }, + arg, + ) + }).Return(map[string]int64{ + hexID: internalID, + }, nil).Once() + + // Prepare to process transactions successfully + s.mockQ.On("NewTransactionClaimableBalanceBatchInsertBuilder", maxBatchSize). + Return(s.mockTransactionBatchInsertBuilder).Once() + s.mockTransactionBatchAdd(txnID, internalID, nil) + s.mockTransactionBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + // Prepare to process operations successfully + s.mockQ.On("NewOperationClaimableBalanceBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationBatchInsertBuilder).Once() + s.mockOperationBatchAdd(opID, internalID, nil) + s.mockOperationBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + // Process the transaction + err := s.processor.ProcessTransaction(s.ctx, txn) + s.Assert().NoError(err) + err = s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) TestIngestClaimableBalancesInsertsClaimClaimableBalance() { + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + s.testOperationInserts(balanceID, xdr.OperationBody{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceOp: &xdr.ClaimClaimableBalanceOp{ + BalanceId: balanceID, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.LedgerKeyClaimableBalance{ + BalanceId: balanceID, + }, + }, + }) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) TestIngestClaimableBalancesInsertsClawbackClaimableBalance() { + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + s.testOperationInserts(balanceID, xdr.OperationBody{ + Type: xdr.OperationTypeClawbackClaimableBalance, + ClawbackClaimableBalanceOp: &xdr.ClawbackClaimableBalanceOp{ + BalanceId: balanceID, + }, + }, xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + }, + }, + }, + }) +} + +func (s *ClaimableBalancesTransactionProcessorTestSuiteLedger) TestIngestClaimableBalancesInsertsCreateClaimableBalance() { + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + s.testOperationInserts(balanceID, xdr.OperationBody{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceOp: &xdr.CreateClaimableBalanceOp{}, + }, xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + }, + }, + }, + }) +} diff --git a/services/horizon/internal/ingest/processors/effects_processor.go b/services/horizon/internal/ingest/processors/effects_processor.go new file mode 100644 index 0000000000..0bce813343 --- /dev/null +++ b/services/horizon/internal/ingest/processors/effects_processor.go @@ -0,0 +1,1385 @@ +package processors + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + + "github.com/guregu/null" + "github.com/stellar/go/amount" + "github.com/stellar/go/ingest" + "github.com/stellar/go/keypair" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// EffectProcessor process effects +type EffectProcessor struct { + effects []effect + effectsQ history.QEffects + sequence uint32 +} + +func NewEffectProcessor(effectsQ history.QEffects, sequence uint32) *EffectProcessor { + return &EffectProcessor{ + effectsQ: effectsQ, + sequence: sequence, + } +} + +func (p *EffectProcessor) loadAccountIDs(ctx context.Context, accountSet map[string]int64) error { + addresses := make([]string, 0, len(accountSet)) + for address := range accountSet { + addresses = append(addresses, address) + } + + addressToID, err := p.effectsQ.CreateAccounts(ctx, addresses, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Could not create account ids") + } + + for _, address := range addresses { + id, ok := addressToID[address] + if !ok { + return errors.Errorf("no id found for account address %s", address) + } + + accountSet[address] = id + } + + return nil +} + +func operationsEffects(transaction ingest.LedgerTransaction, sequence uint32) ([]effect, error) { + effects := []effect{} + + for opi, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(opi), + transaction: transaction, + operation: op, + ledgerSequence: sequence, + } + + p, err := operation.effects() + if err != nil { + return effects, errors.Wrapf(err, "reading operation %v effects", operation.ID()) + } + effects = append(effects, p...) + } + + return effects, nil +} + +func (p *EffectProcessor) insertDBOperationsEffects(ctx context.Context, effects []effect, accountSet map[string]int64) error { + batch := p.effectsQ.NewEffectBatchInsertBuilder(maxBatchSize) + + for _, effect := range effects { + accountID, found := accountSet[effect.address] + + if !found { + return errors.Errorf("Error finding history_account_id for address %v", effect.address) + } + + var detailsJSON []byte + detailsJSON, err := json.Marshal(effect.details) + + if err != nil { + return errors.Wrapf(err, "Error marshaling details for operation effect %v", effect.operationID) + } + + if err := batch.Add(ctx, + accountID, + effect.addressMuxed, + effect.operationID, + effect.order, + effect.effectType, + detailsJSON, + ); err != nil { + return errors.Wrap(err, "could not insert operation effect in db") + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush operation effects to db") + } + return nil +} + +func (p *EffectProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) (err error) { + // Failed transactions don't have operation effects + if !transaction.Result.Successful() { + return nil + } + + var effectsForTx []effect + effectsForTx, err = operationsEffects(transaction, p.sequence) + if err != nil { + return err + } + p.effects = append(p.effects, effectsForTx...) + + return nil +} + +func (p *EffectProcessor) Commit(ctx context.Context) (err error) { + if len(p.effects) > 0 { + accountSet := map[string]int64{} + + for _, effect := range p.effects { + accountSet[effect.address] = 0 + } + + if err = p.loadAccountIDs(ctx, accountSet); err != nil { + return err + } + + if err = p.insertDBOperationsEffects(ctx, p.effects, accountSet); err != nil { + return err + } + } + + return err +} + +type effect struct { + address string + addressMuxed null.String + operationID int64 + details map[string]interface{} + effectType history.EffectType + order uint32 +} + +// Effects returns the operation effects +func (operation *transactionOperationWrapper) effects() ([]effect, error) { + if !operation.transaction.Result.Successful() { + return []effect{}, nil + } + var ( + op = operation.operation + err error + ) + + changes, err := operation.transaction.GetOperationChanges(operation.index) + if err != nil { + return nil, err + } + + wrapper := &effectsWrapper{ + effects: []effect{}, + operation: operation, + } + + switch operation.OperationType() { + case xdr.OperationTypeCreateAccount: + wrapper.addAccountCreatedEffects() + case xdr.OperationTypePayment: + wrapper.addPaymentEffects() + case xdr.OperationTypePathPaymentStrictReceive: + err = wrapper.pathPaymentStrictReceiveEffects() + case xdr.OperationTypePathPaymentStrictSend: + err = wrapper.addPathPaymentStrictSendEffects() + case xdr.OperationTypeManageSellOffer: + err = wrapper.addManageSellOfferEffects() + case xdr.OperationTypeManageBuyOffer: + err = wrapper.addManageBuyOfferEffects() + case xdr.OperationTypeCreatePassiveSellOffer: + err = wrapper.addCreatePassiveSellOfferEffect() + case xdr.OperationTypeSetOptions: + wrapper.addSetOptionsEffects() + case xdr.OperationTypeChangeTrust: + err = wrapper.addChangeTrustEffects() + case xdr.OperationTypeAllowTrust: + err = wrapper.addAllowTrustEffects() + case xdr.OperationTypeAccountMerge: + wrapper.addAccountMergeEffects() + case xdr.OperationTypeInflation: + wrapper.addInflationEffects() + case xdr.OperationTypeManageData: + err = wrapper.addManageDataEffects() + case xdr.OperationTypeBumpSequence: + err = wrapper.addBumpSequenceEffects() + case xdr.OperationTypeCreateClaimableBalance: + err = wrapper.addCreateClaimableBalanceEffects(changes) + case xdr.OperationTypeClaimClaimableBalance: + err = wrapper.addClaimClaimableBalanceEffects(changes) + case xdr.OperationTypeBeginSponsoringFutureReserves, xdr.OperationTypeEndSponsoringFutureReserves, xdr.OperationTypeRevokeSponsorship: + // The effects of these operations are obtained indirectly from the ledger entries + case xdr.OperationTypeClawback: + err = wrapper.addClawbackEffects() + case xdr.OperationTypeClawbackClaimableBalance: + err = wrapper.addClawbackClaimableBalanceEffects(changes) + case xdr.OperationTypeSetTrustLineFlags: + err = wrapper.addSetTrustLineFlagsEffects() + case xdr.OperationTypeLiquidityPoolDeposit: + err = wrapper.addLiquidityPoolDepositEffect() + case xdr.OperationTypeLiquidityPoolWithdraw: + err = wrapper.addLiquidityPoolWithdrawEffect() + default: + return nil, fmt.Errorf("Unknown operation type: %s", op.Body.Type) + } + if err != nil { + return nil, err + } + + // Effects generated for multiple operations. Keep the effect categories + // separated so they are "together" in case of different order or meta + // changes generate by core (unordered_map). + + // Sponsorships + for _, change := range changes { + if err = wrapper.addLedgerEntrySponsorshipEffects(change); err != nil { + return nil, err + } + wrapper.addSignerSponsorshipEffects(change) + } + + // Liquidity pools + for _, change := range changes { + // Effects caused by ChangeTrust (creation), AllowTrust and SetTrustlineFlags (removal through revocation) + wrapper.addLedgerEntryLiquidityPoolEffects(change) + } + + return wrapper.effects, nil +} + +type effectsWrapper struct { + effects []effect + operation *transactionOperationWrapper +} + +func (e *effectsWrapper) add(address string, addressMuxed null.String, effectType history.EffectType, details map[string]interface{}) { + e.effects = append(e.effects, effect{ + address: address, + addressMuxed: addressMuxed, + operationID: e.operation.ID(), + effectType: effectType, + order: uint32(len(e.effects) + 1), + details: details, + }) +} + +func (e *effectsWrapper) addUnmuxed(address *xdr.AccountId, effectType history.EffectType, details map[string]interface{}) { + e.add(address.Address(), null.String{}, effectType, details) +} + +func (e *effectsWrapper) addMuxed(address *xdr.MuxedAccount, effectType history.EffectType, details map[string]interface{}) { + var addressMuxed null.String + if address.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + addressMuxed = null.StringFrom(address.Address()) + } + accID := address.ToAccountId() + e.add(accID.Address(), addressMuxed, effectType, details) +} + +var sponsoringEffectsTable = map[xdr.LedgerEntryType]struct { + created, updated, removed history.EffectType +}{ + xdr.LedgerEntryTypeAccount: { + created: history.EffectAccountSponsorshipCreated, + updated: history.EffectAccountSponsorshipUpdated, + removed: history.EffectAccountSponsorshipRemoved, + }, + xdr.LedgerEntryTypeTrustline: { + created: history.EffectTrustlineSponsorshipCreated, + updated: history.EffectTrustlineSponsorshipUpdated, + removed: history.EffectTrustlineSponsorshipRemoved, + }, + xdr.LedgerEntryTypeData: { + created: history.EffectDataSponsorshipCreated, + updated: history.EffectDataSponsorshipUpdated, + removed: history.EffectDataSponsorshipRemoved, + }, + xdr.LedgerEntryTypeClaimableBalance: { + created: history.EffectClaimableBalanceSponsorshipCreated, + updated: history.EffectClaimableBalanceSponsorshipUpdated, + removed: history.EffectClaimableBalanceSponsorshipRemoved, + }, + + // We intentionally don't have Sponsoring effects for Offer + // entries because we don't generate creation effects for them. +} + +func (e *effectsWrapper) addSignerSponsorshipEffects(change ingest.Change) { + if change.Type != xdr.LedgerEntryTypeAccount { + return + } + + preSigners := map[string]xdr.AccountId{} + postSigners := map[string]xdr.AccountId{} + if change.Pre != nil { + account := change.Pre.Data.MustAccount() + preSigners = account.SponsorPerSigner() + } + if change.Post != nil { + account := change.Post.Data.MustAccount() + postSigners = account.SponsorPerSigner() + } + + var all []string + for signer := range preSigners { + all = append(all, signer) + } + for signer := range postSigners { + if _, ok := preSigners[signer]; ok { + continue + } + all = append(all, signer) + } + sort.Strings(all) + + for _, signer := range all { + pre, foundPre := preSigners[signer] + post, foundPost := postSigners[signer] + details := map[string]interface{}{} + + switch { + case !foundPre && !foundPost: + continue + case !foundPre && foundPost: + details["sponsor"] = post.Address() + details["signer"] = signer + srcAccount := change.Post.Data.MustAccount().AccountId + e.addUnmuxed(&srcAccount, history.EffectSignerSponsorshipCreated, details) + case !foundPost && foundPre: + details["former_sponsor"] = pre.Address() + details["signer"] = signer + srcAccount := change.Pre.Data.MustAccount().AccountId + e.addUnmuxed(&srcAccount, history.EffectSignerSponsorshipRemoved, details) + case foundPre && foundPost: + formerSponsor := pre.Address() + newSponsor := post.Address() + if formerSponsor == newSponsor { + continue + } + + details["former_sponsor"] = formerSponsor + details["new_sponsor"] = newSponsor + details["signer"] = signer + srcAccount := change.Post.Data.MustAccount().AccountId + e.addUnmuxed(&srcAccount, history.EffectSignerSponsorshipUpdated, details) + } + } +} + +func (e *effectsWrapper) addLedgerEntrySponsorshipEffects(change ingest.Change) error { + effectsForEntryType, found := sponsoringEffectsTable[change.Type] + if !found { + return nil + } + + details := map[string]interface{}{} + var effectType history.EffectType + + switch { + case (change.Pre == nil || change.Pre.SponsoringID() == nil) && + (change.Post != nil && change.Post.SponsoringID() != nil): + effectType = effectsForEntryType.created + details["sponsor"] = (*change.Post.SponsoringID()).Address() + case (change.Pre != nil && change.Pre.SponsoringID() != nil) && + (change.Post == nil || change.Post.SponsoringID() == nil): + effectType = effectsForEntryType.removed + details["former_sponsor"] = (*change.Pre.SponsoringID()).Address() + case (change.Pre != nil && change.Pre.SponsoringID() != nil) && + (change.Post != nil && change.Post.SponsoringID() != nil): + preSponsor := (*change.Pre.SponsoringID()).Address() + postSponsor := (*change.Post.SponsoringID()).Address() + if preSponsor == postSponsor { + return nil + } + effectType = effectsForEntryType.updated + details["new_sponsor"] = postSponsor + details["former_sponsor"] = preSponsor + default: + return nil + } + + var ( + accountID *xdr.AccountId + muxedAccount *xdr.MuxedAccount + ) + + var data xdr.LedgerEntryData + if change.Post != nil { + data = change.Post.Data + } else { + data = change.Pre.Data + } + + switch change.Type { + case xdr.LedgerEntryTypeAccount: + a := data.MustAccount().AccountId + accountID = &a + case xdr.LedgerEntryTypeTrustline: + tl := data.MustTrustLine() + accountID = &tl.AccountId + if tl.Asset.Type == xdr.AssetTypeAssetTypePoolShare { + details["asset_type"] = "liquidity_pool" + details["liquidity_pool_id"] = PoolIDToString(*tl.Asset.LiquidityPoolId) + } else { + details["asset"] = tl.Asset.ToAsset().StringCanonical() + } + case xdr.LedgerEntryTypeData: + muxedAccount = e.operation.SourceAccount() + details["data_name"] = data.MustData().DataName + case xdr.LedgerEntryTypeClaimableBalance: + muxedAccount = e.operation.SourceAccount() + var err error + details["balance_id"], err = xdr.MarshalHex(data.MustClaimableBalance().BalanceId) + if err != nil { + return errors.Wrapf(err, "Invalid balanceId in change from op %d", e.operation.index) + } + case xdr.LedgerEntryTypeLiquidityPool: + // liquidity pools cannot be sponsored + fallthrough + default: + return errors.Errorf("invalid sponsorship ledger entry type %v", change.Type.String()) + } + + if accountID != nil { + e.addUnmuxed(accountID, effectType, details) + } else { + e.addMuxed(muxedAccount, effectType, details) + } + + return nil +} + +func (e *effectsWrapper) addLedgerEntryLiquidityPoolEffects(change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeLiquidityPool { + return nil + } + var effectType history.EffectType + + var details map[string]interface{} + switch { + case change.Pre == nil && change.Post != nil: + effectType = history.EffectLiquidityPoolCreated + details = map[string]interface{}{ + "liquidity_pool": liquidityPoolDetails(change.Post.Data.LiquidityPool), + } + case change.Pre != nil && change.Post == nil: + effectType = history.EffectLiquidityPoolRemoved + poolID := change.Pre.Data.LiquidityPool.LiquidityPoolId + details = map[string]interface{}{ + "liquidity_pool_id": PoolIDToString(poolID), + } + default: + return nil + } + e.addMuxed( + e.operation.SourceAccount(), + effectType, + details, + ) + + return nil +} + +func (e *effectsWrapper) addAccountCreatedEffects() { + op := e.operation.operation.Body.MustCreateAccountOp() + + e.addUnmuxed( + &op.Destination, + history.EffectAccountCreated, + map[string]interface{}{ + "starting_balance": amount.String(op.StartingBalance), + }, + ) + e.addMuxed( + e.operation.SourceAccount(), + history.EffectAccountDebited, + map[string]interface{}{ + "asset_type": "native", + "amount": amount.String(op.StartingBalance), + }, + ) + e.addUnmuxed( + &op.Destination, + history.EffectSignerCreated, + map[string]interface{}{ + "public_key": op.Destination.Address(), + "weight": keypair.DefaultSignerWeight, + }, + ) +} + +func (e *effectsWrapper) addPaymentEffects() { + op := e.operation.operation.Body.MustPaymentOp() + + details := map[string]interface{}{"amount": amount.String(op.Amount)} + addAssetDetails(details, op.Asset, "") + + e.addMuxed( + &op.Destination, + history.EffectAccountCredited, + details, + ) + e.addMuxed( + e.operation.SourceAccount(), + history.EffectAccountDebited, + details, + ) +} + +func (e *effectsWrapper) pathPaymentStrictReceiveEffects() error { + op := e.operation.operation.Body.MustPathPaymentStrictReceiveOp() + resultSuccess := e.operation.OperationResult().MustPathPaymentStrictReceiveResult().MustSuccess() + source := e.operation.SourceAccount() + + details := map[string]interface{}{"amount": amount.String(op.DestAmount)} + addAssetDetails(details, op.DestAsset, "") + + e.addMuxed( + &op.Destination, + history.EffectAccountCredited, + details, + ) + + result := e.operation.OperationResult().MustPathPaymentStrictReceiveResult() + details = map[string]interface{}{"amount": amount.String(result.SendAmount())} + addAssetDetails(details, op.SendAsset, "") + + e.addMuxed( + source, + history.EffectAccountDebited, + details, + ) + + return e.addIngestTradeEffects(*source, resultSuccess.Offers) +} + +func (e *effectsWrapper) addPathPaymentStrictSendEffects() error { + source := e.operation.SourceAccount() + op := e.operation.operation.Body.MustPathPaymentStrictSendOp() + resultSuccess := e.operation.OperationResult().MustPathPaymentStrictSendResult().MustSuccess() + result := e.operation.OperationResult().MustPathPaymentStrictSendResult() + + details := map[string]interface{}{"amount": amount.String(result.DestAmount())} + addAssetDetails(details, op.DestAsset, "") + e.addMuxed(&op.Destination, history.EffectAccountCredited, details) + + details = map[string]interface{}{"amount": amount.String(op.SendAmount)} + addAssetDetails(details, op.SendAsset, "") + e.addMuxed(source, history.EffectAccountDebited, details) + + return e.addIngestTradeEffects(*source, resultSuccess.Offers) +} + +func (e *effectsWrapper) addManageSellOfferEffects() error { + source := e.operation.SourceAccount() + result := e.operation.OperationResult().MustManageSellOfferResult().MustSuccess() + return e.addIngestTradeEffects(*source, result.OffersClaimed) +} + +func (e *effectsWrapper) addManageBuyOfferEffects() error { + source := e.operation.SourceAccount() + result := e.operation.OperationResult().MustManageBuyOfferResult().MustSuccess() + return e.addIngestTradeEffects(*source, result.OffersClaimed) +} + +func (e *effectsWrapper) addCreatePassiveSellOfferEffect() error { + result := e.operation.OperationResult() + source := e.operation.SourceAccount() + + var claims []xdr.ClaimAtom + + // KNOWN ISSUE: stellar-core creates results for CreatePassiveOffer operations + // with the wrong result arm set. + if result.Type == xdr.OperationTypeManageSellOffer { + claims = result.MustManageSellOfferResult().MustSuccess().OffersClaimed + } else { + claims = result.MustCreatePassiveSellOfferResult().MustSuccess().OffersClaimed + } + + return e.addIngestTradeEffects(*source, claims) +} + +func (e *effectsWrapper) addSetOptionsEffects() error { + source := e.operation.SourceAccount() + op := e.operation.operation.Body.MustSetOptionsOp() + + if op.HomeDomain != nil { + e.addMuxed(source, history.EffectAccountHomeDomainUpdated, + map[string]interface{}{ + "home_domain": string(*op.HomeDomain), + }, + ) + } + + thresholdDetails := map[string]interface{}{} + + if op.LowThreshold != nil { + thresholdDetails["low_threshold"] = *op.LowThreshold + } + + if op.MedThreshold != nil { + thresholdDetails["med_threshold"] = *op.MedThreshold + } + + if op.HighThreshold != nil { + thresholdDetails["high_threshold"] = *op.HighThreshold + } + + if len(thresholdDetails) > 0 { + e.addMuxed(source, history.EffectAccountThresholdsUpdated, thresholdDetails) + } + + flagDetails := map[string]interface{}{} + if op.SetFlags != nil { + setAuthFlagDetails(flagDetails, xdr.AccountFlags(*op.SetFlags), true) + } + if op.ClearFlags != nil { + setAuthFlagDetails(flagDetails, xdr.AccountFlags(*op.ClearFlags), false) + } + + if len(flagDetails) > 0 { + e.addMuxed(source, history.EffectAccountFlagsUpdated, flagDetails) + } + + if op.InflationDest != nil { + e.addMuxed(source, history.EffectAccountInflationDestinationUpdated, + map[string]interface{}{ + "inflation_destination": op.InflationDest.Address(), + }, + ) + } + changes, err := e.operation.transaction.GetOperationChanges(e.operation.index) + if err != nil { + return err + } + + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeAccount { + continue + } + + beforeAccount := change.Pre.Data.MustAccount() + afterAccount := change.Post.Data.MustAccount() + + before := beforeAccount.SignerSummary() + after := afterAccount.SignerSummary() + + // if before and after are the same, the signers have not changed + if reflect.DeepEqual(before, after) { + continue + } + + beforeSortedSigners := []string{} + for signer := range before { + beforeSortedSigners = append(beforeSortedSigners, signer) + } + sort.Strings(beforeSortedSigners) + + for _, addy := range beforeSortedSigners { + weight, ok := after[addy] + if !ok { + e.addMuxed(source, history.EffectSignerRemoved, map[string]interface{}{ + "public_key": addy, + }) + continue + } + + if weight != before[addy] { + e.addMuxed(source, history.EffectSignerUpdated, map[string]interface{}{ + "public_key": addy, + "weight": weight, + }) + } + } + + afterSortedSigners := []string{} + for signer := range after { + afterSortedSigners = append(afterSortedSigners, signer) + } + sort.Strings(afterSortedSigners) + + // Add the "created" effects + for _, addy := range afterSortedSigners { + weight := after[addy] + // if `addy` is in before, the previous for loop should have recorded + // the update, so skip this key + if _, ok := before[addy]; ok { + continue + } + + e.addMuxed(source, history.EffectSignerCreated, map[string]interface{}{ + "public_key": addy, + "weight": weight, + }) + } + } + return nil +} + +func (e *effectsWrapper) addChangeTrustEffects() error { + source := e.operation.SourceAccount() + + op := e.operation.operation.Body.MustChangeTrustOp() + changes, err := e.operation.transaction.GetOperationChanges(e.operation.index) + if err != nil { + return err + } + + // NOTE: when an account trusts itself, the transaction is successful but + // no ledger entries are actually modified. + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeTrustline { + continue + } + + var ( + effect history.EffectType + trustLine xdr.TrustLineEntry + ) + + switch { + case change.Pre == nil && change.Post != nil: + effect = history.EffectTrustlineCreated + trustLine = *change.Post.Data.TrustLine + case change.Pre != nil && change.Post == nil: + effect = history.EffectTrustlineRemoved + trustLine = *change.Pre.Data.TrustLine + case change.Pre != nil && change.Post != nil: + effect = history.EffectTrustlineUpdated + trustLine = *change.Post.Data.TrustLine + default: + panic("Invalid change") + } + + // We want to add a single effect for change_trust op. If it's modifying + // credit_asset search for credit_asset trustline, otherwise search for + // liquidity_pool. + if op.Line.Type != trustLine.Asset.Type { + continue + } + + details := map[string]interface{}{"limit": amount.String(op.Limit)} + if trustLine.Asset.Type == xdr.AssetTypeAssetTypePoolShare { + // The only change_trust ops that can modify LP are those with + // asset=liquidity_pool so *op.Line.LiquidityPool below is available. + if err := addLiquidityPoolAssetDetails(details, *op.Line.LiquidityPool); err != nil { + return err + } + } else { + addAssetDetails(details, op.Line.ToAsset(), "") + } + + e.addMuxed(source, effect, details) + break + } + + return nil +} + +func (e *effectsWrapper) addAllowTrustEffects() error { + source := e.operation.SourceAccount() + op := e.operation.operation.Body.MustAllowTrustOp() + asset := op.Asset.ToAsset(source.ToAccountId()) + details := map[string]interface{}{ + "trustor": op.Trustor.Address(), + } + addAssetDetails(details, asset, "") + + switch { + case xdr.TrustLineFlags(op.Authorize).IsAuthorized(): + e.addMuxed(source, history.EffectTrustlineAuthorized, details) + // Forward compatibility + setFlags := xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag) + e.addTrustLineFlagsEffect(source, &op.Trustor, asset, &setFlags, nil) + case xdr.TrustLineFlags(op.Authorize).IsAuthorizedToMaintainLiabilitiesFlag(): + e.addMuxed( + source, + history.EffectTrustlineAuthorizedToMaintainLiabilities, + details, + ) + // Forward compatibility + setFlags := xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + e.addTrustLineFlagsEffect(source, &op.Trustor, asset, &setFlags, nil) + default: + e.addMuxed(source, history.EffectTrustlineDeauthorized, details) + // Forward compatibility, show both as cleared + clearFlags := xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag | xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + e.addTrustLineFlagsEffect(source, &op.Trustor, asset, nil, &clearFlags) + } + return e.addLiquidityPoolRevokedEffect() +} + +func (e *effectsWrapper) addAccountMergeEffects() { + source := e.operation.SourceAccount() + + dest := e.operation.operation.Body.MustDestination() + result := e.operation.OperationResult().MustAccountMergeResult() + details := map[string]interface{}{ + "amount": amount.String(result.MustSourceAccountBalance()), + "asset_type": "native", + } + + e.addMuxed(source, history.EffectAccountDebited, details) + e.addMuxed(&dest, history.EffectAccountCredited, details) + e.addMuxed(source, history.EffectAccountRemoved, map[string]interface{}{}) +} + +func (e *effectsWrapper) addInflationEffects() { + payouts := e.operation.OperationResult().MustInflationResult().MustPayouts() + for _, payout := range payouts { + e.addUnmuxed(&payout.Destination, history.EffectAccountCredited, + map[string]interface{}{ + "amount": amount.String(payout.Amount), + "asset_type": "native", + }, + ) + } +} + +func (e *effectsWrapper) addManageDataEffects() error { + source := e.operation.SourceAccount() + op := e.operation.operation.Body.MustManageDataOp() + details := map[string]interface{}{"name": op.DataName} + effect := history.EffectType(0) + changes, err := e.operation.transaction.GetOperationChanges(e.operation.index) + if err != nil { + return err + } + + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeData { + continue + } + + before := change.Pre + after := change.Post + + if after != nil { + raw := after.Data.MustData().DataValue + details["value"] = base64.StdEncoding.EncodeToString(raw) + } + + switch { + case before == nil && after != nil: + effect = history.EffectDataCreated + case before != nil && after == nil: + effect = history.EffectDataRemoved + case before != nil && after != nil: + effect = history.EffectDataUpdated + default: + panic("Invalid before-and-after state") + } + + break + } + + e.addMuxed(source, effect, details) + return nil +} + +func (e *effectsWrapper) addBumpSequenceEffects() error { + source := e.operation.SourceAccount() + changes, err := e.operation.transaction.GetOperationChanges(e.operation.index) + if err != nil { + return err + } + + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeAccount { + continue + } + + before := change.Pre + after := change.Post + + beforeAccount := before.Data.MustAccount() + afterAccount := after.Data.MustAccount() + + if beforeAccount.SeqNum != afterAccount.SeqNum { + details := map[string]interface{}{"new_seq": afterAccount.SeqNum} + e.addMuxed(source, history.EffectSequenceBumped, details) + } + break + } + + return nil +} + +func setClaimableBalanceFlagDetails(details map[string]interface{}, flags xdr.ClaimableBalanceFlags) { + if flags.IsClawbackEnabled() { + details["claimable_balance_clawback_enabled_flag"] = true + return + } +} + +func (e *effectsWrapper) addCreateClaimableBalanceEffects(changes []ingest.Change) error { + source := e.operation.SourceAccount() + var cb *xdr.ClaimableBalanceEntry + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeClaimableBalance || change.Post == nil { + continue + } + cb = change.Post.Data.ClaimableBalance + e.addClaimableBalanceEntryCreatedEffects(source, cb) + break + } + if cb == nil { + return errors.New("claimable balance entry not found") + } + + details := map[string]interface{}{ + "amount": amount.String(cb.Amount), + } + addAssetDetails(details, cb.Asset, "") + e.addMuxed( + source, + history.EffectAccountDebited, + details, + ) + + return nil +} + +func (e *effectsWrapper) addClaimableBalanceEntryCreatedEffects(source *xdr.MuxedAccount, cb *xdr.ClaimableBalanceEntry) error { + id, err := xdr.MarshalHex(cb.BalanceId) + if err != nil { + return err + } + details := map[string]interface{}{ + "balance_id": id, + "amount": amount.String(cb.Amount), + "asset": cb.Asset.StringCanonical(), + } + setClaimableBalanceFlagDetails(details, cb.Flags()) + e.addMuxed( + source, + history.EffectClaimableBalanceCreated, + details, + ) + // EffectClaimableBalanceClaimantCreated can be generated by + // `create_claimable_balance` operation but also by `liquidity_pool_withdraw` + // operation causing a revocation. + // In case of `create_claimable_balance` we use `op.Claimants` to make + // effects backward compatible. The reason for this is that Stellar-Core + // changes all `rel_before` predicated to `abs_before` when tx is included + // in the ledger. + var claimants []xdr.Claimant + if op, ok := e.operation.operation.Body.GetCreateClaimableBalanceOp(); ok { + claimants = op.Claimants + } else { + claimants = cb.Claimants + } + for _, c := range claimants { + cv0 := c.MustV0() + e.addUnmuxed( + &cv0.Destination, + history.EffectClaimableBalanceClaimantCreated, + map[string]interface{}{ + "balance_id": id, + "amount": amount.String(cb.Amount), + "predicate": cv0.Predicate, + "asset": cb.Asset.StringCanonical(), + }, + ) + } + return err +} + +func (e *effectsWrapper) addClaimClaimableBalanceEffects(changes []ingest.Change) error { + op := e.operation.operation.Body.MustClaimClaimableBalanceOp() + + balanceID, err := xdr.MarshalHex(op.BalanceId) + if err != nil { + return fmt.Errorf("Invalid balanceId in op: %d", e.operation.index) + } + + var cBalance xdr.ClaimableBalanceEntry + found := false + for _, change := range changes { + if change.Type != xdr.LedgerEntryTypeClaimableBalance { + continue + } + + if change.Pre != nil && change.Post == nil { + cBalance = change.Pre.Data.MustClaimableBalance() + preBalanceID, err := xdr.MarshalHex(cBalance.BalanceId) + if err != nil { + return fmt.Errorf("Invalid balanceId in meta changes for op: %d", e.operation.index) + } + + if preBalanceID == balanceID { + found = true + break + } + } + } + + if !found { + return fmt.Errorf("Change not found for balanceId : %s", balanceID) + } + + details := map[string]interface{}{ + "amount": amount.String(cBalance.Amount), + "balance_id": balanceID, + "asset": cBalance.Asset.StringCanonical(), + } + setClaimableBalanceFlagDetails(details, cBalance.Flags()) + source := e.operation.SourceAccount() + e.addMuxed( + source, + history.EffectClaimableBalanceClaimed, + details, + ) + + details = map[string]interface{}{ + "amount": amount.String(cBalance.Amount), + } + addAssetDetails(details, cBalance.Asset, "") + e.addMuxed( + source, + history.EffectAccountCredited, + details, + ) + + return nil +} + +func (e *effectsWrapper) addIngestTradeEffects(buyer xdr.MuxedAccount, claims []xdr.ClaimAtom) error { + for _, claim := range claims { + if claim.AmountSold() == 0 && claim.AmountBought() == 0 { + continue + } + switch claim.Type { + case xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool: + if err := e.addClaimLiquidityPoolTradeEffect(claim); err != nil { + return err + } + default: + e.addClaimTradeEffects(buyer, claim) + } + } + return nil +} + +func (e *effectsWrapper) addClaimTradeEffects(buyer xdr.MuxedAccount, claim xdr.ClaimAtom) { + seller := claim.SellerId() + bd, sd := tradeDetails(buyer, seller, claim) + + e.addMuxed( + &buyer, + history.EffectTrade, + bd, + ) + + e.addUnmuxed( + &seller, + history.EffectTrade, + sd, + ) +} + +func (e *effectsWrapper) addClaimLiquidityPoolTradeEffect(claim xdr.ClaimAtom) error { + lp, _, err := e.operation.getLiquidityPoolAndProductDelta(&claim.LiquidityPool.LiquidityPoolId) + if err != nil { + return err + } + details := map[string]interface{}{ + "liquidity_pool": liquidityPoolDetails(lp), + "sold": map[string]string{ + "asset": claim.LiquidityPool.AssetSold.StringCanonical(), + "amount": amount.String(claim.LiquidityPool.AmountSold), + }, + "bought": map[string]string{ + "asset": claim.LiquidityPool.AssetBought.StringCanonical(), + "amount": amount.String(claim.LiquidityPool.AmountBought), + }, + } + e.addMuxed(e.operation.SourceAccount(), history.EffectLiquidityPoolTrade, details) + return nil +} + +func (e *effectsWrapper) addClawbackEffects() error { + op := e.operation.operation.Body.MustClawbackOp() + details := map[string]interface{}{ + "amount": amount.String(op.Amount), + } + source := e.operation.SourceAccount() + addAssetDetails(details, op.Asset, "") + + // The funds will be burned, but even with that, we generated an account credited effect + e.addMuxed( + source, + history.EffectAccountCredited, + details, + ) + + e.addMuxed( + &op.From, + history.EffectAccountDebited, + details, + ) + + return nil +} + +func (e *effectsWrapper) addClawbackClaimableBalanceEffects(changes []ingest.Change) error { + op := e.operation.operation.Body.MustClawbackClaimableBalanceOp() + balanceId, err := xdr.MarshalHex(op.BalanceId) + if err != nil { + return errors.Wrapf(err, "Invalid balanceId in op %d", e.operation.index) + } + details := map[string]interface{}{ + "balance_id": balanceId, + } + source := e.operation.SourceAccount() + e.addMuxed( + source, + history.EffectClaimableBalanceClawedBack, + details, + ) + + // Generate the account credited effect (although the funds will be burned) for the asset issuer + for _, c := range changes { + if c.Type == xdr.LedgerEntryTypeClaimableBalance && c.Post == nil && c.Pre != nil { + cb := c.Pre.Data.ClaimableBalance + details = map[string]interface{}{"amount": amount.String(cb.Amount)} + addAssetDetails(details, cb.Asset, "") + e.addMuxed( + source, + history.EffectAccountCredited, + details, + ) + break + } + } + + return nil +} + +func (e *effectsWrapper) addSetTrustLineFlagsEffects() error { + source := e.operation.SourceAccount() + op := e.operation.operation.Body.MustSetTrustLineFlagsOp() + e.addTrustLineFlagsEffect(source, &op.Trustor, op.Asset, &op.SetFlags, &op.ClearFlags) + return e.addLiquidityPoolRevokedEffect() +} + +func (e *effectsWrapper) addTrustLineFlagsEffect( + account *xdr.MuxedAccount, + trustor *xdr.AccountId, + asset xdr.Asset, + setFlags *xdr.Uint32, + clearFlags *xdr.Uint32) { + details := map[string]interface{}{ + "trustor": trustor.Address(), + } + addAssetDetails(details, asset, "") + + var flagDetailsAdded bool + if setFlags != nil { + setTrustLineFlagDetails(details, xdr.TrustLineFlags(*setFlags), true) + flagDetailsAdded = true + } + if clearFlags != nil { + setTrustLineFlagDetails(details, xdr.TrustLineFlags(*clearFlags), false) + flagDetailsAdded = true + } + + if flagDetailsAdded { + e.addMuxed(account, history.EffectTrustlineFlagsUpdated, details) + } +} + +func setTrustLineFlagDetails(flagDetails map[string]interface{}, flags xdr.TrustLineFlags, setValue bool) { + if flags.IsAuthorized() { + flagDetails["authorized_flag"] = setValue + } + if flags.IsAuthorizedToMaintainLiabilitiesFlag() { + flagDetails["authorized_to_maintain_liabilites"] = setValue + } + if flags.IsClawbackEnabledFlag() { + flagDetails["clawback_enabled_flag"] = setValue + } +} + +type sortableClaimableBalanceEntries []*xdr.ClaimableBalanceEntry + +func (s sortableClaimableBalanceEntries) Len() int { return len(s) } +func (s sortableClaimableBalanceEntries) Less(i, j int) bool { return s[i].Asset.LessThan(s[j].Asset) } +func (s sortableClaimableBalanceEntries) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (e *effectsWrapper) addLiquidityPoolRevokedEffect() error { + source := e.operation.SourceAccount() + lp, delta, err := e.operation.getLiquidityPoolAndProductDelta(nil) + if err != nil { + if err == errLiquidityPoolChangeNotFound { + // no revocation happened + return nil + } + return err + } + changes, err := e.operation.transaction.GetOperationChanges(e.operation.index) + if err != nil { + return err + } + assetToCBID := map[string]string{} + var cbs sortableClaimableBalanceEntries + for _, change := range changes { + if change.Type == xdr.LedgerEntryTypeClaimableBalance && change.Pre == nil && change.Post != nil { + cb := change.Post.Data.ClaimableBalance + id, err := xdr.MarshalHex(cb.BalanceId) + if err != nil { + return err + } + assetToCBID[cb.Asset.StringCanonical()] = id + cbs = append(cbs, cb) + } + } + if len(assetToCBID) == 0 { + // no claimable balances were created, and thus, no revocation happened + return nil + } + // Core's claimable balance metadata isn't ordered, so we order it ourselves + // so that effects are ordered consistently + sort.Sort(cbs) + for _, cb := range cbs { + if err := e.addClaimableBalanceEntryCreatedEffects(source, cb); err != nil { + return err + } + } + + reservesRevoked := make([]map[string]string, 0, 2) + for _, aa := range []base.AssetAmount{ + { + Asset: lp.Body.ConstantProduct.Params.AssetA.StringCanonical(), + Amount: amount.String(-delta.ReserveA), + }, + { + Asset: lp.Body.ConstantProduct.Params.AssetB.StringCanonical(), + Amount: amount.String(-delta.ReserveB), + }, + } { + if cbID, ok := assetToCBID[aa.Asset]; ok { + assetAmountDetail := map[string]string{ + "asset": aa.Asset, + "amount": aa.Amount, + "claimable_balance_id": cbID, + } + reservesRevoked = append(reservesRevoked, assetAmountDetail) + } + } + details := map[string]interface{}{ + "liquidity_pool": liquidityPoolDetails(lp), + "reserves_revoked": reservesRevoked, + "shares_revoked": amount.String(-delta.TotalPoolShares), + } + e.addMuxed(source, history.EffectLiquidityPoolRevoked, details) + return nil +} + +func setAuthFlagDetails(flagDetails map[string]interface{}, flags xdr.AccountFlags, setValue bool) { + if flags.IsAuthRequired() { + flagDetails["auth_required_flag"] = setValue + } + if flags.IsAuthRevocable() { + flagDetails["auth_revocable_flag"] = setValue + } + if flags.IsAuthImmutable() { + flagDetails["auth_immutable_flag"] = setValue + } + if flags.IsAuthClawbackEnabled() { + flagDetails["auth_clawback_enabled_flag"] = setValue + } +} + +func tradeDetails(buyer xdr.MuxedAccount, seller xdr.AccountId, claim xdr.ClaimAtom) (bd map[string]interface{}, sd map[string]interface{}) { + bd = map[string]interface{}{ + "offer_id": claim.OfferId(), + "seller": seller.Address(), + "bought_amount": amount.String(claim.AmountSold()), + "sold_amount": amount.String(claim.AmountBought()), + } + addAssetDetails(bd, claim.AssetSold(), "bought_") + addAssetDetails(bd, claim.AssetBought(), "sold_") + + sd = map[string]interface{}{ + "offer_id": claim.OfferId(), + "bought_amount": amount.String(claim.AmountBought()), + "sold_amount": amount.String(claim.AmountSold()), + } + addAccountAndMuxedAccountDetails(sd, buyer, "seller") + addAssetDetails(sd, claim.AssetBought(), "bought_") + addAssetDetails(sd, claim.AssetSold(), "sold_") + + return +} + +func liquidityPoolDetails(lp *xdr.LiquidityPoolEntry) map[string]interface{} { + return map[string]interface{}{ + "id": PoolIDToString(lp.LiquidityPoolId), + "fee_bp": uint32(lp.Body.ConstantProduct.Params.Fee), + "type": "constant_product", + "total_trustlines": strconv.FormatInt(int64(lp.Body.ConstantProduct.PoolSharesTrustLineCount), 10), + "total_shares": amount.String(lp.Body.ConstantProduct.TotalPoolShares), + "reserves": []base.AssetAmount{ + { + Asset: lp.Body.ConstantProduct.Params.AssetA.StringCanonical(), + Amount: amount.String(lp.Body.ConstantProduct.ReserveA), + }, + { + Asset: lp.Body.ConstantProduct.Params.AssetB.StringCanonical(), + Amount: amount.String(lp.Body.ConstantProduct.ReserveB), + }, + }, + } +} + +func (e *effectsWrapper) addLiquidityPoolDepositEffect() error { + op := e.operation.operation.Body.MustLiquidityPoolDepositOp() + lp, delta, err := e.operation.getLiquidityPoolAndProductDelta(&op.LiquidityPoolId) + if err != nil { + return err + } + details := map[string]interface{}{ + "liquidity_pool": liquidityPoolDetails(lp), + "reserves_deposited": []base.AssetAmount{ + { + Asset: lp.Body.ConstantProduct.Params.AssetA.StringCanonical(), + Amount: amount.String(delta.ReserveA), + }, + { + Asset: lp.Body.ConstantProduct.Params.AssetB.StringCanonical(), + Amount: amount.String(delta.ReserveB), + }, + }, + "shares_received": amount.String(delta.TotalPoolShares), + } + e.addMuxed(e.operation.SourceAccount(), history.EffectLiquidityPoolDeposited, details) + return nil +} + +func (e *effectsWrapper) addLiquidityPoolWithdrawEffect() error { + op := e.operation.operation.Body.MustLiquidityPoolWithdrawOp() + lp, delta, err := e.operation.getLiquidityPoolAndProductDelta(&op.LiquidityPoolId) + if err != nil { + return err + } + details := map[string]interface{}{ + "liquidity_pool": liquidityPoolDetails(lp), + "reserves_received": []base.AssetAmount{ + { + Asset: lp.Body.ConstantProduct.Params.AssetA.StringCanonical(), + Amount: amount.String(-delta.ReserveA), + }, + { + Asset: lp.Body.ConstantProduct.Params.AssetB.StringCanonical(), + Amount: amount.String(-delta.ReserveB), + }, + }, + "shares_redeemed": amount.String(-delta.TotalPoolShares), + } + e.addMuxed(e.operation.SourceAccount(), history.EffectLiquidityPoolWithdrew, details) + return nil +} diff --git a/services/horizon/internal/ingest/processors/effects_processor_test.go b/services/horizon/internal/ingest/processors/effects_processor_test.go new file mode 100644 index 0000000000..872a39ef2d --- /dev/null +++ b/services/horizon/internal/ingest/processors/effects_processor_test.go @@ -0,0 +1,3457 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "encoding/hex" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + . "github.com/stellar/go/services/horizon/internal/test/transactions" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type EffectsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *EffectProcessor + mockQ *history.MockQEffects + mockBatchInsertBuilder *history.MockEffectBatchInsertBuilder + + firstTx ingest.LedgerTransaction + secondTx ingest.LedgerTransaction + thirdTx ingest.LedgerTransaction + failedTx ingest.LedgerTransaction + firstTxID int64 + secondTxID int64 + thirdTxID int64 + failedTxID int64 + sequence uint32 + addresses []string + addressToID map[string]int64 + txs []ingest.LedgerTransaction +} + +func TestEffectsProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(EffectsProcessorTestSuiteLedger)) +} + +func (s *EffectsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQEffects{} + s.mockBatchInsertBuilder = &history.MockEffectBatchInsertBuilder{} + + s.sequence = uint32(20) + + s.addresses = []string{ + "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + } + + s.firstTx = BuildLedgerTransaction( + s.Suite.T(), + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAADkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AAAAAAAAAAABHK0SlAAAAEDq0JVhKNIq9ag0sR+R/cv3d9tEuaYEm2BazIzILRdGj9alaVMZBhxoJ3ZIpP3rraCJzyoKZO+p5HBVe10a2+UG", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "829d53f2dceebe10af8007564b0aefde819b95734ad431df84270651e7ed8a90", + }, + ) + s.firstTxID = toid.New(int32(s.sequence), 1, 0).ToInt64() + + s.secondTx = BuildLedgerTransaction( + s.Suite.T(), + TestTransaction{ + Index: 2, + EnvelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2", + }, + ) + + s.secondTxID = toid.New(int32(s.sequence), 2, 0).ToInt64() + + s.thirdTx = BuildLedgerTransaction( + s.Suite.T(), + TestTransaction{ + Index: 3, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + s.thirdTxID = toid.New(int32(s.sequence), 3, 0).ToInt64() + + s.failedTx = BuildLedgerTransaction( + s.Suite.T(), + TestTransaction{ + Index: 4, + EnvelopeXDR: "AAAAAPCq/iehD2ASJorqlTyEt0usn2WG3yF4w9xBkgd4itu6AAAAZAAMpboAADNGAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVEVTVAAAAAAObS6P1g8rj8sCVzRQzYgHhWFkbh1oV+1s47LFPstSpQAAAAAAAAACVAvkAAAAAfcAAAD6AAAAAAAAAAAAAAAAAAAAAXiK27oAAABAHHk5mvM6xBRsvu3RBvzzPIb8GpXaL2M7InPn65LIhFJ2RnHIYrpP6ufZc6SUtKqChNRaN4qw5rjwFXNezmrBCw==", + ResultXDR: "AAAAAAAAAGT/////AAAAAQAAAAAAAAAD////+QAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADABDLGAAAAAAAAAAA8Kr+J6EPYBImiuqVPIS3S6yfZYbfIXjD3EGSB3iK27oAAAB2ucIg2AAMpboAADNFAAAA4wAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAABHT9ws4fAAAAAAAAAAAAAAAAAAAAAAAAAAEAEMsYAAAAAAAAAADwqv4noQ9gEiaK6pU8hLdLrJ9lht8heMPcQZIHeIrbugAAAHa5wiDYAAylugAAM0YAAADjAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAEdP3Czh8AAAAAAAAAAAAAAAAAAAAAAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAEMsCAAAAAAAAAADwqv4noQ9gEiaK6pU8hLdLrJ9lht8heMPcQZIHeIrbugAAAHa5wiE8AAylugAAM0UAAADjAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAEdP3Czh8AAAAAAAAAAAAAAAAAAAAAAAAAAQAQyxgAAAAAAAAAAPCq/iehD2ASJorqlTyEt0usn2WG3yF4w9xBkgd4itu6AAAAdrnCINgADKW6AAAzRQAAAOMAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAR0/cLOHwAAAAAAAAAAAAAAAAAAAAA=", + Hash: "24206737a02f7f855c46e367418e38c223f897792c76bbfb948e1b0dbd695f8b", + }, + ) + s.failedTxID = toid.New(int32(s.sequence), 4, 0).ToInt64() + + s.addressToID = map[string]int64{ + s.addresses[0]: 2, + s.addresses[1]: 20, + s.addresses[2]: 200, + } + + s.processor = NewEffectProcessor( + s.mockQ, + 20, + ) + + s.txs = []ingest.LedgerTransaction{ + s.firstTx, + s.secondTx, + s.thirdTx, + } +} + +func (s *EffectsProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *EffectsProcessorTestSuiteLedger) mockSuccessfulEffectBatchAdds() { + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[2]], + null.String{}, + toid.New(int32(s.sequence), 1, 1).ToInt64(), + uint32(1), + history.EffectSequenceBumped, + []byte("{\"new_seq\":300000000000}"), + ).Return(nil).Once() + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[2]], + null.String{}, + toid.New(int32(s.sequence), 2, 1).ToInt64(), + uint32(1), + history.EffectAccountCreated, + []byte("{\"starting_balance\":\"1000.0000000\"}"), + ).Return(nil).Once() + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[1]], + null.String{}, + toid.New(int32(s.sequence), 2, 1).ToInt64(), + uint32(2), + history.EffectAccountDebited, + []byte("{\"amount\":\"1000.0000000\",\"asset_type\":\"native\"}"), + ).Return(nil).Once() + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[2]], + null.String{}, + toid.New(int32(s.sequence), 2, 1).ToInt64(), + uint32(3), + history.EffectSignerCreated, + []byte("{\"public_key\":\"GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN\",\"weight\":1}"), + ).Return(nil).Once() + + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[0]], + null.String{}, + toid.New(int32(s.sequence), 3, 1).ToInt64(), + uint32(1), + history.EffectAccountCredited, + []byte("{\"amount\":\"10.0000000\",\"asset_type\":\"native\"}"), + ).Return(nil).Once() + + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + s.addressToID[s.addresses[0]], + null.String{}, + toid.New(int32(s.sequence), 3, 1).ToInt64(), + uint32(2), + history.EffectAccountDebited, + []byte("{\"amount\":\"10.0000000\",\"asset_type\":\"native\"}"), + ).Return(nil).Once() +} + +func (s *EffectsProcessorTestSuiteLedger) mockSuccessfulCreateAccounts() { + s.mockQ.On( + "CreateAccounts", + s.ctx, + mock.AnythingOfType("[]string"), + maxBatchSize, + ).Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch(s.addresses, arg) + }).Return(s.addressToID, nil).Once() +} + +func (s *EffectsProcessorTestSuiteLedger) TestEmptyEffects() { + err := s.processor.Commit(context.Background()) + s.Assert().NoError(err) +} + +func (s *EffectsProcessorTestSuiteLedger) TestIngestEffectsSucceeds() { + s.mockSuccessfulCreateAccounts() + s.mockQ.On("NewEffectBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.mockSuccessfulEffectBatchAdds() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} + +func (s *EffectsProcessorTestSuiteLedger) TestCreateAccountsFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Return(s.addressToID, errors.New("transient error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "Could not create account ids: transient error") +} + +func (s *EffectsProcessorTestSuiteLedger) TestBatchAddFails() { + s.mockSuccessfulCreateAccounts() + s.mockQ.On("NewEffectBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, + s.addressToID[s.addresses[2]], + null.String{}, + toid.New(int32(s.sequence), 1, 1).ToInt64(), + uint32(1), + history.EffectSequenceBumped, + []byte("{\"new_seq\":300000000000}"), + ).Return(errors.New("transient error")).Once() + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "could not insert operation effect in db: transient error") +} + +func getRevokeSponsorshipMeta(t *testing.T) (string, []effect) { + source := xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + firstSigner := xdr.MustAddress("GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN") + secondSigner := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + thirdSigner := xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX") + formerSponsor := xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + oldSponsor := xdr.MustAddress("GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y") + updatedSponsor := xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + newSponsor := xdr.MustAddress("GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ") + + expectedEffects := []effect{ + { + address: source.Address(), + operationID: 249108107265, + details: map[string]interface{}{ + "sponsor": newSponsor.Address(), + "signer": thirdSigner.Address(), + }, + effectType: history.EffectSignerSponsorshipCreated, + order: 1, + }, + { + address: source.Address(), + operationID: 249108107265, + details: map[string]interface{}{ + "former_sponsor": oldSponsor.Address(), + "new_sponsor": updatedSponsor.Address(), + "signer": secondSigner.Address(), + }, + effectType: history.EffectSignerSponsorshipUpdated, + order: 2, + }, + { + address: source.Address(), + operationID: 249108107265, + details: map[string]interface{}{ + "former_sponsor": formerSponsor.Address(), + "signer": firstSigner.Address(), + }, + effectType: history.EffectSignerSponsorshipRemoved, + order: 3, + }, + } + + accountSignersMeta := &xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + TxChanges: xdr.LedgerEntryChanges{}, + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: source, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &source, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + Signers: []xdr.Signer{ + { + Key: xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypeEd25519, + Ed25519: firstSigner.Ed25519, + }, + Weight: 10, + }, + { + Key: xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypeEd25519, + Ed25519: secondSigner.Ed25519, + }, + Weight: 10, + }, + { + Key: xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypeEd25519, + Ed25519: thirdSigner.Ed25519, + }, + Weight: 10, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{}, + Ext: xdr.AccountEntryExtensionV1Ext{ + V: 2, + V2: &xdr.AccountEntryExtensionV2{ + NumSponsored: 0, + NumSponsoring: 0, + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &formerSponsor, + &oldSponsor, + nil, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: source, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &source, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + Signers: []xdr.Signer{ + { + Key: xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypeEd25519, + Ed25519: secondSigner.Ed25519, + }, + Weight: 10, + }, + { + Key: xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypeEd25519, + Ed25519: thirdSigner.Ed25519, + }, + Weight: 10, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{}, + Ext: xdr.AccountEntryExtensionV1Ext{ + V: 2, + V2: &xdr.AccountEntryExtensionV2{ + NumSponsored: 0, + NumSponsoring: 0, + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &updatedSponsor, + &newSponsor, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + b64, err := xdr.MarshalBase64(accountSignersMeta) + assert.NoError(t, err) + + return b64, expectedEffects +} + +func TestEffectsCoversAllOperationTypes(t *testing.T) { + for typ, s := range xdr.OperationTypeToStringMap { + op := xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationType(typ), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling effects should either panic (because the operation field is set to nil) + // or not error + func() { + var err error + defer func() { + err2 := recover() + if err != nil { + assert.NotContains(t, err.Error(), "Unknown operation type") + } + assert.True(t, err2 != nil || err == nil, s) + }() + _, err = operation.effects() + }() + } + + // make sure the check works for an unknown operation type + op := xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationType(20000), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling effects should error due to the unknown operation + _, err := operation.effects() + assert.Contains(t, err.Error(), "Unknown operation type") +} + +func TestOperationEffects(t *testing.T) { + + sourceAID := xdr.MustAddress("GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V") + sourceAccount := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *sourceAID.Ed25519, + }, + } + destAID := xdr.MustAddress("GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ") + dest := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *destAID.Ed25519, + }, + } + strictPaymentWithMuxedAccountsTx := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: sourceAccount, + Fee: 100, + SeqNum: 3684420515004429, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendOp: &xdr.PathPaymentStrictSendOp{ + SendAsset: xdr.Asset{ + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: xdr.AssetCode4{66, 82, 76, 0}, + Issuer: xdr.MustAddress("GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF"), + }, + }, + SendAmount: 300000, + Destination: dest, + DestAsset: xdr.Asset{ + Type: 1, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: xdr.AssetCode4{65, 82, 83, 0}, + Issuer: xdr.MustAddress("GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF"), + }, + }, + DestMin: 10000000, + Path: []xdr.Asset{ + { + Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: xdr.AssetCode4{65, 82, 83, 0}, + Issuer: xdr.MustAddress("GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF"), + }, + }, + }, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{99, 66, 175, 143}, + Signature: xdr.Signature{244, 107, 139, 92, 189, 156, 207, 79, 84, 56, 2, 70, 75, 22, 237, 50, 100, 242, 159, 177, 27, 240, 66, 122, 182, 45, 189, 78, 5, 127, 26, 61, 179, 238, 229, 76, 32, 206, 122, 13, 154, 133, 148, 149, 29, 250, 48, 132, 44, 86, 163, 56, 32, 44, 75, 87, 226, 251, 76, 4, 59, 182, 132, 8}, + }, + }, + }, + } + strictPaymentWithMuxedAccountsTxBase64, err := xdr.MarshalBase64(strictPaymentWithMuxedAccountsTx) + assert.NoError(t, err) + + creator := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + created := xdr.MustAddress("GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN") + sponsor := xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A") + sponsor2 := xdr.MustAddress("GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX") + createAccountMeta := &xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + TxChanges: xdr.LedgerEntryChanges{ + { + Type: 3, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: 0, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152377009533292, + SeqNum: 25, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + }, + }, + { + Type: 1, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: 0, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152377009533292, + SeqNum: 26, + InflationDest: &creator, + }, + }, + Ext: xdr.LedgerEntryExt{}, + }, + }, + }, + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor2, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.LedgerKeyAccount{ + AccountId: created, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor2, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152377009533292, + SeqNum: 26, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: creator, + Balance: 800152367009533292, + SeqNum: 26, + InflationDest: &creator, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 0x39, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: created, + Balance: 10000000000, + SeqNum: 244813135872, + Thresholds: xdr.Thresholds{0x1, 0x0, 0x0, 0x0}, + }, + }, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsor, + }, + }, + }, + }, + }, + }, + }, + }, + } + + createAccountMetaB64, err := xdr.MarshalBase64(createAccountMeta) + assert.NoError(t, err) + assert.NoError(t, err) + + revokeSponsorshipMeta, revokeSponsorshipEffects := getRevokeSponsorshipMeta(t) + + testCases := []struct { + desc string + envelopeXDR string + resultXDR string + metaXDR string + feeChangesXDR string + hash string + index uint32 + sequence uint32 + expected []effect + }{ + { + desc: "createAccount", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: createAccountMetaB64, + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2", + index: 0, + sequence: 57, + expected: []effect{ + { + address: "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + operationID: int64(244813139969), + details: map[string]interface{}{ + "starting_balance": "1000.0000000", + }, + effectType: history.EffectAccountCreated, + order: uint32(1), + }, + { + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + operationID: int64(244813139969), + details: map[string]interface{}{ + "amount": "1000.0000000", + "asset_type": "native", + }, + effectType: history.EffectAccountDebited, + order: uint32(2), + }, + { + address: "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + operationID: int64(244813139969), + details: map[string]interface{}{ + "public_key": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + "weight": 1, + }, + effectType: history.EffectSignerCreated, + order: uint32(3), + }, + { + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + operationID: int64(244813139969), + details: map[string]interface{}{ + "former_sponsor": "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX", + }, + effectType: history.EffectAccountSponsorshipRemoved, + order: uint32(4), + }, + { + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + operationID: int64(244813139969), + details: map[string]interface{}{ + "former_sponsor": "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A", + "new_sponsor": "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX", + }, + effectType: history.EffectAccountSponsorshipUpdated, + order: uint32(5), + }, + { + address: "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + operationID: int64(244813139969), + details: map[string]interface{}{ + "sponsor": "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A", + }, + effectType: history.EffectAccountSponsorshipCreated, + order: uint32(6), + }, + }, + }, + { + desc: "payment", + envelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + index: 0, + sequence: 56, + expected: []effect{ + { + address: "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + details: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + }, + effectType: history.EffectAccountCredited, + operationID: int64(240518172673), + order: uint32(1), + }, + { + address: "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + details: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + }, + effectType: history.EffectAccountDebited, + operationID: int64(240518172673), + order: uint32(2), + }, + }, + }, + { + desc: "pathPaymentStrictSend", + envelopeXDR: "AAAAAPbGHHrGbL7EFLG87cWA6eecM/LaVyzrO+pakFpjQq+PAAAAZAANFvYAAAANAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABQlJMAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAAABJPgAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAUFSUwAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAAJiWgAAAAAEAAAABQVJTAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAAAAAABY0KvjwAAAED0a4tcvZzPT1Q4AkZLFu0yZPKfsRvwQnq2Lb1OBX8aPbPu5UwgznoNmoWUlR36MIQsVqM4ICxLV+L7TAQ7toQI", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAEAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAAAAJmwQAAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAACYloAAAAABQlJMAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAAABJPgAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAUFSUwAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAAJiWgAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAA0aVQAAAAAAAAAA9sYcesZsvsQUsbztxYDp55wz8tpXLOs76lqQWmNCr48AAAAXSHbi7AANFvYAAAAMAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAA0aVQAAAAAAAAAA9sYcesZsvsQUsbztxYDp55wz8tpXLOs76lqQWmNCr48AAAAXSHbi7AANFvYAAAANAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMADRo0AAAAAQAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAB22gaB//////////wAAAAEAAAABAAAAAAC3GwAAAAAAAAAAAAAAAAAAAAAAAAAAAQANGlUAAAABAAAAAPbGHHrGbL7EFLG87cWA6eecM/LaVyzrO+pakFpjQq+PAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAHbHtwH//////////AAAAAQAAAAEAAAAAALcbAAAAAAAAAAAAAAAAAAAAAAAAAAADAA0aNAAAAAIAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAAAAJmwQAAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAFNyTgAAAAAMAAABkAAAAAAAAAAAAAAAAAAAAAQANGlUAAAACAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAACZsEAAAAABQVJTAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAABRD/QAAAAADAAAAZAAAAAAAAAAAAAAAAAAAAAMADRo0AAAAAQAAAADI6tBrFibueH4w/WP8JSSeGvYgELxfNoUSI0+9erubAwAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAB3kSGB//////////wAAAAEAAAABAAAAAACgN6AAAAAAAAAAAAAAAAAAAAAAAAAAAQANGlUAAAABAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAHejcQH//////////AAAAAQAAAAEAAAAAAJujwAAAAAAAAAAAAAAAAAAAAAAAAAADAA0aNAAAAAEAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAABQVJTAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAB2BGcAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAABTck4AAAAAAAAAAAAAAAAEADRpVAAAAAQAAAADI6tBrFibueH4w/WP8JSSeGvYgELxfNoUSI0+9erubAwAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAHYEZwB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAFEP9AAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMADRpIAAAAAAAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAABdIduNQAA0W9gAAAAwAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADRpVAAAAAAAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAABdIduLsAA0W9gAAAAwAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588", + index: 0, + sequence: 20, + expected: []effect{ + { + address: "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + details: map[string]interface{}{ + "amount": "1.0000000", + "asset_code": "ARS", + "asset_type": "credit_alphanum4", + "asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectAccountCredited, + operationID: int64(85899350017), + order: uint32(1), + }, + { + address: "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + details: map[string]interface{}{ + "amount": "0.0300000", + "asset_code": "BRL", + "asset_type": "credit_alphanum4", + "asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectAccountDebited, + operationID: int64(85899350017), + order: uint32(2), + }, + { + address: "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + details: map[string]interface{}{ + "seller": "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + "offer_id": xdr.Int64(10072128), + "sold_amount": "0.0300000", + "bought_amount": "1.0000000", + "sold_asset_code": "BRL", + "sold_asset_type": "credit_alphanum4", + "bought_asset_code": "ARS", + "bought_asset_type": "credit_alphanum4", + "sold_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + "bought_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectTrade, + operationID: int64(85899350017), + order: uint32(3), + }, + { + address: "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + details: map[string]interface{}{ + "seller": "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + "offer_id": xdr.Int64(10072128), + "sold_amount": "1.0000000", + "bought_amount": "0.0300000", + "sold_asset_code": "ARS", + "sold_asset_type": "credit_alphanum4", + "bought_asset_code": "BRL", + "bought_asset_type": "credit_alphanum4", + "sold_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + "bought_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectTrade, + operationID: int64(85899350017), + order: uint32(4), + }, + }, + }, + { + desc: "pathPaymentStrictSend with muxed accounts", + envelopeXDR: strictPaymentWithMuxedAccountsTxBase64, + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAEAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAAAAJmwQAAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAACYloAAAAABQlJMAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAAABJPgAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAUFSUwAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAAJiWgAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAA0aVQAAAAAAAAAA9sYcesZsvsQUsbztxYDp55wz8tpXLOs76lqQWmNCr48AAAAXSHbi7AANFvYAAAAMAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAA0aVQAAAAAAAAAA9sYcesZsvsQUsbztxYDp55wz8tpXLOs76lqQWmNCr48AAAAXSHbi7AANFvYAAAANAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMADRo0AAAAAQAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAB22gaB//////////wAAAAEAAAABAAAAAAC3GwAAAAAAAAAAAAAAAAAAAAAAAAAAAQANGlUAAAABAAAAAPbGHHrGbL7EFLG87cWA6eecM/LaVyzrO+pakFpjQq+PAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAHbHtwH//////////AAAAAQAAAAEAAAAAALcbAAAAAAAAAAAAAAAAAAAAAAAAAAADAA0aNAAAAAIAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAAAAJmwQAAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAFNyTgAAAAAMAAABkAAAAAAAAAAAAAAAAAAAAAQANGlUAAAACAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAACZsEAAAAABQVJTAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAABRD/QAAAAADAAAAZAAAAAAAAAAAAAAAAAAAAAMADRo0AAAAAQAAAADI6tBrFibueH4w/WP8JSSeGvYgELxfNoUSI0+9erubAwAAAAFCUkwAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAB3kSGB//////////wAAAAEAAAABAAAAAACgN6AAAAAAAAAAAAAAAAAAAAAAAAAAAQANGlUAAAABAAAAAMjq0GsWJu54fjD9Y/wlJJ4a9iAQvF82hRIjT716u5sDAAAAAUJSTAAAAAAAro9D+0/L4lJBzN9uG46hqjOAL8F1TinfZUl+6cftWVoAAAAAHejcQH//////////AAAAAQAAAAEAAAAAAJujwAAAAAAAAAAAAAAAAAAAAAAAAAADAA0aNAAAAAEAAAAAyOrQaxYm7nh+MP1j/CUknhr2IBC8XzaFEiNPvXq7mwMAAAABQVJTAAAAAACuj0P7T8viUkHM324bjqGqM4AvwXVOKd9lSX7px+1ZWgAAAAB2BGcAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAABTck4AAAAAAAAAAAAAAAAEADRpVAAAAAQAAAADI6tBrFibueH4w/WP8JSSeGvYgELxfNoUSI0+9erubAwAAAAFBUlMAAAAAAK6PQ/tPy+JSQczfbhuOoaozgC/BdU4p32VJfunH7VlaAAAAAHYEZwB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAFEP9AAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMADRpIAAAAAAAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAABdIduNQAA0W9gAAAAwAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADRpVAAAAAAAAAAD2xhx6xmy+xBSxvO3FgOnnnDPy2lcs6zvqWpBaY0KvjwAAABdIduLsAA0W9gAAAAwAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588", + index: 0, + sequence: 20, + expected: []effect{ + { + address: "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + addressMuxed: null.StringFrom("MDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQGAAAAAAMV7V2X24II"), + details: map[string]interface{}{ + "amount": "1.0000000", + "asset_code": "ARS", + "asset_type": "credit_alphanum4", + "asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectAccountCredited, + operationID: int64(85899350017), + order: uint32(1), + }, + { + address: "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + addressMuxed: null.StringFrom("MD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY6AAAAAAMV7V2XZY4C"), + details: map[string]interface{}{ + "amount": "0.0300000", + "asset_code": "BRL", + "asset_type": "credit_alphanum4", + "asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectAccountDebited, + operationID: int64(85899350017), + order: uint32(2), + }, + { + address: "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + addressMuxed: null.StringFrom("MD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY6AAAAAAMV7V2XZY4C"), + details: map[string]interface{}{ + "seller": "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + "offer_id": xdr.Int64(10072128), + "sold_amount": "0.0300000", + "bought_amount": "1.0000000", + "sold_asset_code": "BRL", + "sold_asset_type": "credit_alphanum4", + "bought_asset_code": "ARS", + "bought_asset_type": "credit_alphanum4", + "sold_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + "bought_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectTrade, + operationID: int64(85899350017), + order: uint32(3), + }, + { + address: "GDEOVUDLCYTO46D6GD6WH7BFESPBV5RACC6F6NUFCIRU7PL2XONQHVGJ", + details: map[string]interface{}{ + "seller": "GD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY737V", + "seller_muxed": "MD3MMHD2YZWL5RAUWG6O3RMA5HTZYM7S3JLSZ2Z35JNJAWTDIKXY6AAAAAAMV7V2XZY4C", + "seller_muxed_id": uint64(0xcafebabe), + "offer_id": xdr.Int64(10072128), + "sold_amount": "1.0000000", + "bought_amount": "0.0300000", + "sold_asset_code": "ARS", + "sold_asset_type": "credit_alphanum4", + "bought_asset_code": "BRL", + "bought_asset_type": "credit_alphanum4", + "sold_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + "bought_asset_issuer": "GCXI6Q73J7F6EUSBZTPW4G4OUGVDHABPYF2U4KO7MVEX52OH5VMVUCRF", + }, + effectType: history.EffectTrade, + operationID: int64(85899350017), + order: uint32(4), + }, + }, + }, + { + desc: "manageSellOffer - without claims", + envelopeXDR: "AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAARFUV7EAAABALuai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3", + index: 0, + sequence: 56, + expected: []effect{}, + }, + { + desc: "manageSellOffer - with claims", + envelopeXDR: "AAAAAPrjQnnOn4RqMmOSDwYfEMVtJuC4VR9fKvPfEtM7DS7VAAAAZAAMDl8AAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVNUUgAAAAAASYK2XlJiUiNav1waFVDq1fzoualYC4UNFqThKBroJe0AAAACVAvkAAAAAGMAAADIAAAAAAAAAAAAAAAAAAAAATsNLtUAAABABmA0aLobgdSrjIrus94Y8PWeD6dDfl7Sya12t2uZasJFI7mZ+yowE1enUMzC/cAhDTypK8QuH2EVXPQC3xpYDA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAEAAAAADkfaGg9y56NND7n4CRcr4R4fvivwAcMd4ZrCm4jAe5AAAAAAAI0f+AAAAAFTVFIAAAAAAEmCtl5SYlIjWr9cGhVQ6tX86LmpWAuFDRak4Sga6CXtAAAAAS0Il1oAAAAAAAAAAlQL4/8AAAACAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAxMfwAAAAAAAAAA+uNCec6fhGoyY5IPBh8QxW0m4LhVH18q898S0zsNLtUAAAAU9GsC1QAMDl8AAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAxMfwAAAAAAAAAA+uNCec6fhGoyY5IPBh8QxW0m4LhVH18q898S0zsNLtUAAAAU9GsC1QAMDl8AAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMADEx+AAAAAgAAAAAOR9oaD3Lno00PufgJFyvhHh++K/ABwx3hmsKbiMB7kAAAAAAAjR/4AAAAAVNUUgAAAAAASYK2XlJiUiNav1waFVDq1fzoualYC4UNFqThKBroJe0AAAAAAAAAA2L6BdYAAABjAAAAMgAAAAAAAAAAAAAAAAAAAAEADEx/AAAAAgAAAAAOR9oaD3Lno00PufgJFyvhHh++K/ABwx3hmsKbiMB7kAAAAAAAjR/4AAAAAVNUUgAAAAAASYK2XlJiUiNav1waFVDq1fzoualYC4UNFqThKBroJe0AAAAAAAAAAjXxbnwAAABjAAAAMgAAAAAAAAAAAAAAAAAAAAMADEx+AAAAAAAAAAAOR9oaD3Lno00PufgJFyvhHh++K/ABwx3hmsKbiMB7kAAAABnMMdMvAAwOZQAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAABrSdIAkAAAAAAAAAAAAAAAAAAAAAAAAAAQAMTH8AAAAAAAAAAA5H2hoPcuejTQ+5+AkXK+EeH74r8AHDHeGawpuIwHuQAAAAHCA9ty4ADA5lAAAAAgAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAEYJE8CgAAAAAAAAAAAAAAAAAAAAAAAAADAAxMfgAAAAEAAAAADkfaGg9y56NND7n4CRcr4R4fvivwAcMd4ZrCm4jAe5AAAAABU1RSAAAAAABJgrZeUmJSI1q/XBoVUOrV/Oi5qVgLhQ0WpOEoGugl7QAAABYDWSXWf/////////8AAAABAAAAAQAAAAAAAAAAAAAAA2L6BdYAAAAAAAAAAAAAAAEADEx/AAAAAQAAAAAOR9oaD3Lno00PufgJFyvhHh++K/ABwx3hmsKbiMB7kAAAAAFTVFIAAAAAAEmCtl5SYlIjWr9cGhVQ6tX86LmpWAuFDRak4Sga6CXtAAAAFNZQjnx//////////wAAAAEAAAABAAAAAAAAAAAAAAACNfFufAAAAAAAAAAAAAAAAwAMDnEAAAABAAAAAPrjQnnOn4RqMmOSDwYfEMVtJuC4VR9fKvPfEtM7DS7VAAAAAVNUUgAAAAAASYK2XlJiUiNav1waFVDq1fzoualYC4UNFqThKBroJe0AAAAYdX9/Wn//////////AAAAAQAAAAAAAAAAAAAAAQAMTH8AAAABAAAAAPrjQnnOn4RqMmOSDwYfEMVtJuC4VR9fKvPfEtM7DS7VAAAAAVNUUgAAAAAASYK2XlJiUiNav1waFVDq1fzoualYC4UNFqThKBroJe0AAAAZoogWtH//////////AAAAAQAAAAAAAAAAAAAAAwAMTH8AAAAAAAAAAPrjQnnOn4RqMmOSDwYfEMVtJuC4VR9fKvPfEtM7DS7VAAAAFPRrAtUADA5fAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAMTH8AAAAAAAAAAPrjQnnOn4RqMmOSDwYfEMVtJuC4VR9fKvPfEtM7DS7VAAAAEqBfHtYADA5fAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMADA5xAAAAAAAAAAD640J5zp+EajJjkg8GHxDFbSbguFUfXyrz3xLTOw0u1QAAABT0awM5AAwOXwAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADEx/AAAAAAAAAAD640J5zp+EajJjkg8GHxDFbSbguFUfXyrz3xLTOw0u1QAAABT0awLVAAwOXwAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ef62da32b6b3eb3c4534dac2be1088387fb93b0093b47e113073c1431fac9db7", + index: 0, + sequence: 56, + expected: []effect{ + { + address: "GD5OGQTZZ2PYI2RSMOJA6BQ7CDCW2JXAXBKR6XZK6PPRFUZ3BUXNLFKP", + details: map[string]interface{}{ + "seller": "GAHEPWQ2B5ZOPI2NB647QCIXFPQR4H56FPYADQY54GNMFG4IYB5ZAJ5H", + "offer_id": xdr.Int64(9248760), + "sold_amount": "999.9999999", + "bought_amount": "505.0505050", + "sold_asset_type": "native", + "bought_asset_code": "STR", + "bought_asset_type": "credit_alphanum4", + "bought_asset_issuer": "GBEYFNS6KJRFEI22X5OBUFKQ5LK7Z2FZVFMAXBINC2SOCKA25AS62PUN", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(1), + }, + { + address: "GAHEPWQ2B5ZOPI2NB647QCIXFPQR4H56FPYADQY54GNMFG4IYB5ZAJ5H", + details: map[string]interface{}{ + "seller": "GD5OGQTZZ2PYI2RSMOJA6BQ7CDCW2JXAXBKR6XZK6PPRFUZ3BUXNLFKP", + "offer_id": xdr.Int64(9248760), + "sold_amount": "505.0505050", + "bought_amount": "999.9999999", + "sold_asset_code": "STR", + "sold_asset_type": "credit_alphanum4", + "bought_asset_type": "native", + "sold_asset_issuer": "GBEYFNS6KJRFEI22X5OBUFKQ5LK7Z2FZVFMAXBINC2SOCKA25AS62PUN", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(2), + }, + }, + }, + { + desc: "manageBuyOffer - with claims", + envelopeXDR: "AAAAAEotqBM9oOzudkkctgQlY/PHS0rFcxVasWQVnSytiuBEAAAAZAANIfEAAAADAAAAAAAAAAAAAAABAAAAAAAAAAwAAAAAAAAAAlRYVGFscGhhNAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAAAB3NZQAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAABrYrgRAAAAEAh57TBifjJuUPj1TI7zIvaAZmyRjWLY4ktc0F16Knmy4Fw07L7cC5vCwjn4ZXyrgr9bpEGhv4oN6znbPpNLQUH", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAMAAAAAAAAAAEAAAAAgbI9jY68fYXd6+DwMcZQQIYCK4HsKKvqnR5o+1IdVoUAAAAAAJovcgAAAAJUWFRhbHBoYTQAAAAAAAAASi2oEz2g7O52SRy2BCVj88dLSsVzFVqxZBWdLK2K4EQAAAAAdzWUAAAAAAAAAAAAdzWUAAAAAAIAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAA0pGAAAAAAAAAAASi2oEz2g7O52SRy2BCVj88dLSsVzFVqxZBWdLK2K4EQAAAAXSHbm1AANIfEAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAA0pGAAAAAAAAAAASi2oEz2g7O52SRy2BCVj88dLSsVzFVqxZBWdLK2K4EQAAAAXSHbm1AANIfEAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMADSkYAAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAABdIdubUAA0h8QAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADSkYAAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAABbRQVLUAA0h8QAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMADSjEAAAAAgAAAACBsj2Njrx9hd3r4PAxxlBAhgIrgewoq+qdHmj7Uh1WhQAAAAAAmi9yAAAAAlRYVGFscGhhNAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAQANKRgAAAACAAAAAIGyPY2OvH2F3evg8DHGUECGAiuB7Cir6p0eaPtSHVaFAAAAAACaL3IAAAACVFhUYWxwaGE0AAAAAAAAAEotqBM9oOzudkkctgQlY/PHS0rFcxVasWQVnSytiuBEAAAAAAAAAAA7msoAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAADAA0oxAAAAAAAAAAAgbI9jY68fYXd6+DwMcZQQIYCK4HsKKvqnR5o+1IdVoUAAAAZJU0xXAANGSMAAAARAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQADMowLgdQAAAAAAAAAAAAAAAAAAAAAAAAAAAEADSkYAAAAAAAAAACBsj2Njrx9hd3r4PAxxlBAhgIrgewoq+qdHmj7Uh1WhQAAABmcgsVcAA0ZIwAAABEAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAMyi5RMQAAAAAAAAAAAAAAAAAAAAAAAAAAAAwANKMQAAAABAAAAAIGyPY2OvH2F3evg8DHGUECGAiuB7Cir6p0eaPtSHVaFAAAAAlRYVGFscGhhNAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAACRatNxoAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAALLQXgAAAAAAAAAAAAAAAAEADSkYAAAAAQAAAACBsj2Njrx9hd3r4PAxxlBAhgIrgewoq+qdHmj7Uh1WhQAAAAJUWFRhbHBoYTQAAAAAAAAASi2oEz2g7O52SRy2BCVj88dLSsVzFVqxZBWdLK2K4EQAAAkWNgGGAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMADSSgAAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAABdIduc4AA0h8QAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADSkYAAAAAAAAAABKLagTPaDs7nZJHLYEJWPzx0tKxXMVWrFkFZ0srYrgRAAAABdIdubUAA0h8QAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "9caa91eec6e29730f4aabafb60898a8ecedd3bf67b8628e6e32066fbba9bec5d", + index: 0, + sequence: 56, + expected: []effect{ + { + address: "GBFC3KATHWQOZ3TWJEOLMBBFMPZ4OS2KYVZRKWVRMQKZ2LFNRLQEIRCV", + details: map[string]interface{}{ + "seller": "GCA3EPMNR26H3BO55PQPAMOGKBAIMARLQHWCRK7KTUPGR62SDVLIL7D6", + "offer_id": xdr.Int64(10104690), + "sold_amount": "200.0000000", + "bought_amount": "200.0000000", + "sold_asset_type": "native", + "bought_asset_code": "TXTalpha4", + "bought_asset_type": "credit_alphanum12", + "bought_asset_issuer": "GBFC3KATHWQOZ3TWJEOLMBBFMPZ4OS2KYVZRKWVRMQKZ2LFNRLQEIRCV", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(1), + }, + { + address: "GCA3EPMNR26H3BO55PQPAMOGKBAIMARLQHWCRK7KTUPGR62SDVLIL7D6", + details: map[string]interface{}{ + "seller": "GBFC3KATHWQOZ3TWJEOLMBBFMPZ4OS2KYVZRKWVRMQKZ2LFNRLQEIRCV", + "offer_id": xdr.Int64(10104690), + "sold_amount": "200.0000000", + "bought_amount": "200.0000000", + "sold_asset_code": "TXTalpha4", + "sold_asset_type": "credit_alphanum12", + "bought_asset_type": "native", + "sold_asset_issuer": "GBFC3KATHWQOZ3TWJEOLMBBFMPZ4OS2KYVZRKWVRMQKZ2LFNRLQEIRCV", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(2), + }, + }, + }, + { + desc: "createPassiveSellOffer", + envelopeXDR: "AAAAAAHwZwJPu1TJhQGgsLRXBzcIeySkeGXzEqh0W9AHWvFDAAAAZAAN3tMAAAACAAAAAQAAAAAAAAAAAAAAAF4FBqwAAAAAAAAAAQAAAAAAAAAEAAAAAAAAAAFDT1AAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAADuaygAAAAAJAAAACgAAAAAAAAABB1rxQwAAAEDz2JIw8Z3Owoc5c2tsiY3kzOYUmh32155u00Xs+RYxO5fL0ApYd78URHcYCbe0R32YmuLTfefWQStR3RfhqKAL", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAEAAAAAMgQ65fmCczzuwmU3oQLivASzvZdhzjhJOQ6C+xTSDu8AAAAAAKMvZgAAAAFDT1AAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAA6NSlEAAAAAAAAAAAADuaygAAAAACAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAA3fGgAAAAAAAAAAAfBnAk+7VMmFAaCwtFcHNwh7JKR4ZfMSqHRb0Ada8UMAAAAXSHbnOAAN3tMAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAA3fGgAAAAAAAAAAAfBnAk+7VMmFAaCwtFcHNwh7JKR4ZfMSqHRb0Ada8UMAAAAXSHbnOAAN3tMAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMADd72AAAAAgAAAAAyBDrl+YJzPO7CZTehAuK8BLO9l2HOOEk5DoL7FNIO7wAAAAAAoy9mAAAAAUNPUAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAAAAAA6NSlEAAAAAABAAAD6AAAAAAAAAAAAAAAAAAAAAIAAAACAAAAADIEOuX5gnM87sJlN6EC4rwEs72XYc44STkOgvsU0g7vAAAAAACjL2YAAAADAA3fGQAAAAAAAAAAMgQ65fmCczzuwmU3oQLivASzvZdhzjhJOQ6C+xTSDu8AAAAXSHbkfAAIGHsAAAAJAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAAAAAAAEADd8aAAAAAAAAAAAyBDrl+YJzPO7CZTehAuK8BLO9l2HOOEk5DoL7FNIO7wAAABeEEa58AAgYewAAAAkAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAADuaygAAAAAAAAAAAAAAAAAAAAAAAAAAAwAN3xkAAAABAAAAADIEOuX5gnM87sJlN6EC4rwEs72XYc44STkOgvsU0g7vAAAAAUNPUAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAABI3mQjsAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAdGpSiAAAAAAAAAAAAAAAAABAA3fGgAAAAEAAAAAMgQ65fmCczzuwmU3oQLivASzvZdhzjhJOQ6C+xTSDu8AAAABQ09QAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAEU7EY9wAf/////////8AAAABAAAAAQAAAAAAAAAAAAAA6NSlEAAAAAAAAAAAAAAAAAMADd7UAAAAAQAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAAAFDT1AAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEADd8aAAAAAQAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAAAFDT1AAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAA6NSlEAB//////////wAAAAEAAAAAAAAAAAAAAAMADd8aAAAAAAAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAABdIduc4AA3e0wAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADd8aAAAAAAAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAABcM3B04AA3e0wAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMADd7UAAAAAAAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAABdIduecAA3e0wAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADd8aAAAAAAAAAAAB8GcCT7tUyYUBoLC0Vwc3CHskpHhl8xKodFvQB1rxQwAAABdIduc4AA3e0wAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e4b286344ae1c863ab15773ddf6649b08fe031383135194f8613a3a475c41a5a", + index: 0, + sequence: 56, + expected: []effect{ + { + address: "GAA7AZYCJ65VJSMFAGQLBNCXA43QQ6ZEUR4GL4YSVB2FXUAHLLYUHIO5", + details: map[string]interface{}{ + "bought_amount": "100000.0000000", + "bought_asset_code": "COP", + "bought_asset_issuer": "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + "bought_asset_type": "credit_alphanum4", + "offer_id": xdr.Int64(10694502), + "seller": "GAZAIOXF7GBHGPHOYJSTPIIC4K6AJM55S5Q44OCJHEHIF6YU2IHO6VHU", + "sold_amount": "100.0000000", + "sold_asset_type": "native", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(1), + }, + { + address: "GAZAIOXF7GBHGPHOYJSTPIIC4K6AJM55S5Q44OCJHEHIF6YU2IHO6VHU", + details: map[string]interface{}{ + "bought_amount": "100.0000000", + "bought_asset_type": "native", + "offer_id": xdr.Int64(10694502), + "seller": "GAA7AZYCJ65VJSMFAGQLBNCXA43QQ6ZEUR4GL4YSVB2FXUAHLLYUHIO5", + "sold_amount": "100000.0000000", + "sold_asset_code": "COP", + "sold_asset_issuer": "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + "sold_asset_type": "credit_alphanum4", + }, + effectType: history.EffectTrade, + operationID: int64(240518172673), + order: uint32(2), + }, + }, + }, + { + desc: "setOption", + envelopeXDR: "AAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAZAAIGHoAAAAHAAAAAQAAAAAAAAAAAAAAAF4FFtcAAAAAAAAAAQAAAAAAAAAFAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAEAAAACAAAAAQAAAAEAAAABAAAAAwAAAAEAAAABAAAAAQAAAAIAAAABAAAAAwAAAAEAAAAVaHR0cHM6Ly93d3cuaG9tZS5vcmcvAAAAAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAIAAAAAAAAAAaJsK1MAAABAiQjCxE53GjInjJtvNr6gdhztRi0GWOZKlUS2KZBLjX3n2N/y7RRNt7B1ZuFcZAxrnxWHD/fF2XcrEwFAuf4TDA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAA3iDQAAAAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAXSHblRAAIGHoAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAA3iDQAAAAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAXSHblRAAIGHoAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMADeINAAAAAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAABdIduVEAAgYegAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADeINAAAAAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAABdIduVEAAgYegAAAAcAAAABAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAEAAAAVaHR0cHM6Ly93d3cuaG9tZS5vcmcvAAAAAwECAwAAAAEAAAAAIHtDAQ21/TnXbBjFiB22NXBl7hmD+G5dcpSL1JJTu9wAAAACAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMADd8YAAAAAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAABdIduWoAAgYegAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEADeINAAAAAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAABdIduVEAAgYegAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e76b7b0133690fbfb2de8fa9ca2273cb4f2e29447e0cf0e14a5f82d0daa48760", + index: 0, + sequence: 56, + expected: []effect{ + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "home_domain": "https://www.home.org/", + }, + effectType: history.EffectAccountHomeDomainUpdated, + operationID: int64(240518172673), + order: uint32(1), + }, + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "high_threshold": xdr.Uint32(3), + "low_threshold": xdr.Uint32(1), + "med_threshold": xdr.Uint32(2), + }, + effectType: history.EffectAccountThresholdsUpdated, + operationID: int64(240518172673), + order: uint32(2), + }, + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "auth_required_flag": true, + "auth_revocable_flag": false, + }, + effectType: history.EffectAccountFlagsUpdated, + operationID: int64(240518172673), + order: uint32(3), + }, + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "inflation_destination": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + }, + effectType: history.EffectAccountInflationDestinationUpdated, + operationID: int64(240518172673), + order: uint32(4), + }, + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "public_key": "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + "weight": int32(3), + }, + effectType: history.EffectSignerUpdated, + operationID: int64(240518172673), + order: uint32(5), + }, + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + details: map[string]interface{}{ + "public_key": "GAQHWQYBBW272OOXNQMMLCA5WY2XAZPODGB7Q3S5OKKIXVESKO55ZQ7C", + "weight": int32(2), + }, + effectType: history.EffectSignerCreated, + operationID: int64(240518172673), + order: uint32(6), + }, + }, + }, + { + desc: "changeTrust - trustline created", + envelopeXDR: "AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQKN8LftAafeoAGmvpsEokqm47jAuqw4g1UWjmL0j6QPm1jxoalzDwDS3W+N2HOHdjSJlEQaTxGBfQKHhr6nNsAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da", + index: 0, + sequence: 40, + expected: []effect{ + { + address: "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + effectType: history.EffectTrustlineCreated, + operationID: int64(171798695937), + order: uint32(1), + details: map[string]interface{}{ + "limit": "922337203685.4775807", + "asset_code": "USD", + "asset_type": "credit_alphanum4", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + }, + }, + }, + }, + { + desc: "changeTrust - trustline removed", + envelopeXDR: "AAAAABwDSftLnTVAHpKUGYPZfTJr6rIm5Z5IqDHVBFuTI3ubAAAAZAARM9kAAAADAAAAAQAAAAAAAAAAAAAAAF4XMm8AAAAAAAAAAQAAAAAAAAAGAAAAAk9DSVRva2VuAAAAAAAAAABJxf/HoI4oaD9CLBvECRhG9GPMNa/65PTI9N7F37o4nwAAAAAAAAAAAAAAAAAAAAGTI3ubAAAAQMHTFPeyHA+W2EYHVDut4dQ18zvF+47SsTPaePwZUaCgw/A3tKDx7sO7R8xlI3GwKQl91Ljmm1dbvAONU9nk/AQ=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADABEz3wAAAAAAAAAAHANJ+0udNUAekpQZg9l9MmvqsiblnkioMdUEW5Mje5sAAAAXSHbm1AARM9kAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABABEz3wAAAAAAAAAAHANJ+0udNUAekpQZg9l9MmvqsiblnkioMdUEW5Mje5sAAAAXSHbm1AARM9kAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAETPeAAAAAQAAAAAcA0n7S501QB6SlBmD2X0ya+qyJuWeSKgx1QRbkyN7mwAAAAJPQ0lUb2tlbgAAAAAAAAAAScX/x6COKGg/QiwbxAkYRvRjzDWv+uT0yPTexd+6OJ8AAAAAAAAAAH//////////AAAAAQAAAAAAAAAAAAAAAgAAAAEAAAAAHANJ+0udNUAekpQZg9l9MmvqsiblnkioMdUEW5Mje5sAAAACT0NJVG9rZW4AAAAAAAAAAEnF/8egjihoP0IsG8QJGEb0Y8w1r/rk9Mj03sXfujifAAAAAwARM98AAAAAAAAAABwDSftLnTVAHpKUGYPZfTJr6rIm5Z5IqDHVBFuTI3ubAAAAF0h25tQAETPZAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQARM98AAAAAAAAAABwDSftLnTVAHpKUGYPZfTJr6rIm5Z5IqDHVBFuTI3ubAAAAF0h25tQAETPZAAAAAwAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAETPeAAAAAAAAAAAcA0n7S501QB6SlBmD2X0ya+qyJuWeSKgx1QRbkyN7mwAAABdIduc4ABEz2QAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAETPfAAAAAAAAAAAcA0n7S501QB6SlBmD2X0ya+qyJuWeSKgx1QRbkyN7mwAAABdIdubUABEz2QAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "0f1e93ed9a83edb01ad8ccab67fd59dc7a513c413a8d5a580c5eb7a9c44f2844", + index: 0, + sequence: 40, + expected: []effect{ + { + address: "GAOAGSP3JOOTKQA6SKKBTA6ZPUZGX2VSE3SZ4SFIGHKQIW4TEN5ZX3WW", + effectType: history.EffectTrustlineRemoved, + operationID: int64(171798695937), + order: uint32(1), + details: map[string]interface{}{ + "limit": "0.0000000", + "asset_code": "OCIToken", + "asset_type": "credit_alphanum12", + "asset_issuer": "GBE4L76HUCHCQ2B7IIWBXRAJDBDPIY6MGWX7VZHUZD2N5RO7XI4J6GTJ", + }, + }, + }, + }, + { + desc: "changeTrust - trustline updated", + envelopeXDR: "AAAAAHHbEhVipyZ2k4byyCZkS1Bdvpj7faBChuYo8S/Rt89UAAAAZAAQuJIAAAAHAAAAAQAAAAAAAAAAAAAAAF4XVskAAAAAAAAAAQAAAAAAAAAGAAAAAlRFU1RBU1NFVAAAAAAAAAA7JUkkD+tgCi2xTVyEcs4WZXOA0l7w2orZg/bghXOgkAAAAAA7msoAAAAAAAAAAAHRt89UAAAAQOCi2ylqRvvRzZaCFjGkLYFk7DCjJA5uZ1nXo8FaPCRl2LZczoMbc46sZIlHh0ENzk7fKjFnRPMo8XAirrrf2go=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADABE6jwAAAAAAAAAAcdsSFWKnJnaThvLIJmRLUF2+mPt9oEKG5ijxL9G3z1QAAAAAO5rHRAAQuJIAAAAGAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABABE6jwAAAAAAAAAAcdsSFWKnJnaThvLIJmRLUF2+mPt9oEKG5ijxL9G3z1QAAAAAO5rHRAAQuJIAAAAHAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAETqAAAAAAQAAAABx2xIVYqcmdpOG8sgmZEtQXb6Y+32gQobmKPEv0bfPVAAAAAJURVNUQVNTRVQAAAAAAAAAOyVJJA/rYAotsU1chHLOFmVzgNJe8NqK2YP24IVzoJAAAAAAO5rKAAAAAAA7msoAAAAAAQAAAAAAAAAAAAAAAQAROo8AAAABAAAAAHHbEhVipyZ2k4byyCZkS1Bdvpj7faBChuYo8S/Rt89UAAAAAlRFU1RBU1NFVAAAAAAAAAA7JUkkD+tgCi2xTVyEcs4WZXOA0l7w2orZg/bghXOgkAAAAAA7msoAAAAAADuaygAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAETp/AAAAAAAAAABx2xIVYqcmdpOG8sgmZEtQXb6Y+32gQobmKPEv0bfPVAAAAAA7mseoABC4kgAAAAYAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAETqPAAAAAAAAAABx2xIVYqcmdpOG8sgmZEtQXb6Y+32gQobmKPEv0bfPVAAAAAA7msdEABC4kgAAAAYAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "dc8d4714d7db3d0e27ae07f629bc72f1605fc24a2d178af04edbb602592791aa", + index: 0, + sequence: 40, + expected: []effect{ + { + address: "GBY5WEQVMKTSM5UTQ3ZMQJTEJNIF3PUY7N62AQUG4YUPCL6RW7HVJARI", + effectType: history.EffectTrustlineUpdated, + operationID: int64(171798695937), + order: uint32(1), + details: map[string]interface{}{ + "limit": "100.0000000", + "asset_code": "TESTASSET", + "asset_type": "credit_alphanum12", + "asset_issuer": "GA5SKSJEB7VWACRNWFGVZBDSZYLGK44A2JPPBWUK3GB7NYEFOOQJAC2B", + }, + }, + }, + }, + { + desc: "allowTrust", + envelopeXDR: "AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAUpI8/gAAABA6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50", + index: 0, + sequence: 41, + expected: []effect{ + { + address: "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + effectType: history.EffectTrustlineAuthorized, + operationID: int64(176093663233), + order: uint32(1), + details: map[string]interface{}{ + "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + "asset_code": "USD", + "asset_type": "credit_alphanum4", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + }, + }, + { + address: "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + effectType: history.EffectTrustlineFlagsUpdated, + order: uint32(2), + operationID: int64(176093663233), + details: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "asset_type": "credit_alphanum4", + "authorized_flag": true, + "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + }, + }, + }, + }, + { + desc: "accountMerge (Destination)", + envelopeXDR: "AAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+HvAAAAZAAAACsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAYrj4e8AAABA3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAIAAAAAAAAAAJUC+OcAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv", + feeChangesXDR: "AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc", + index: 0, + sequence: 44, + expected: []effect{ + { + address: "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ", + effectType: history.EffectAccountDebited, + operationID: int64(188978565121), + order: uint32(1), + details: map[string]interface{}{ + "amount": "999.9999900", + "asset_type": "native", + }, + }, + { + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + effectType: history.EffectAccountCredited, + operationID: int64(188978565121), + order: uint32(2), + details: map[string]interface{}{ + "amount": "999.9999900", + "asset_type": "native", + }, + }, + { + address: "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ", + effectType: history.EffectAccountRemoved, + operationID: int64(188978565121), + order: uint32(3), + details: map[string]interface{}{}, + }, + }, + }, + { + desc: "inflation", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAVAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAVb8BfcAAABABUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAAIAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAIrEjCYwXAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAIrEjfceLAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAUAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGraHekccnAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256", + index: 0, + sequence: 47, + expected: []effect{ + { + address: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + effectType: history.EffectAccountCredited, + operationID: int64(201863467009), + order: uint32(1), + details: map[string]interface{}{ + "amount": "15257676.9536092", + "asset_type": "native", + }, + }, + { + address: "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS", + effectType: history.EffectAccountCredited, + operationID: int64(201863467009), + order: uint32(2), + details: map[string]interface{}{ + "amount": "3814420.0001419", + "asset_type": "native", + }, + }, + }, + }, + { + desc: "manageData - data created", + envelopeXDR: "AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAABAAAABDU2NzgAAAAAAAAAAS6Z+xkAAABAjxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99", + index: 0, + sequence: 49, + expected: []effect{ + { + address: "GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD", + effectType: history.EffectDataCreated, + operationID: int64(210453401601), + order: uint32(1), + details: map[string]interface{}{ + "name": xdr.String64("name2"), + "value": "NTY3OA==", + }, + }, + }, + }, + { + desc: "manageData - data removed", + envelopeXDR: "AAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAZAAIGHoAAAAKAAAAAQAAAAAAAAAAAAAAAF4XaMIAAAAAAAAAAQAAAAAAAAAKAAAABWhlbGxvAAAAAAAAAAAAAAAAAAABomwrUwAAAEDyu3HI9bdkzNBs4UgTjVmYt3LQ0CC/6a8yWBmz8OiKeY/RJ9wJvV9/m0JWGtFWbPOXWBg/Pj3ttgKMiHh9TKoF", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADABE92wAAAAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAXSHbkGAAIGHoAAAAJAAAAAgAAAAEAAAAAIHtDAQ21/TnXbBjFiB22NXBl7hmD+G5dcpSL1JJTu9wAAAABAAAAFWh0dHBzOi8vd3d3LmhvbWUub3JnLwAAAAMBAgMAAAABAAAAACB7QwENtf0512wYxYgdtjVwZe4Zg/huXXKUi9SSU7vcAAAAAgAAAAAAAAAAAAAAAQARPdsAAAAAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAF0h25BgACBh6AAAACgAAAAIAAAABAAAAACB7QwENtf0512wYxYgdtjVwZe4Zg/huXXKUi9SSU7vcAAAAAQAAABVodHRwczovL3d3dy5ob21lLm9yZy8AAAADAQIDAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAIAAAAAAAAAAAAAAAEAAAAEAAAAAwARPcsAAAADAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAABWhlbGxvAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAMAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAFaGVsbG8AAAAAAAADABE92wAAAAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAXSHbkGAAIGHoAAAAKAAAAAgAAAAEAAAAAIHtDAQ21/TnXbBjFiB22NXBl7hmD+G5dcpSL1JJTu9wAAAABAAAAFWh0dHBzOi8vd3d3LmhvbWUub3JnLwAAAAMBAgMAAAABAAAAACB7QwENtf0512wYxYgdtjVwZe4Zg/huXXKUi9SSU7vcAAAAAgAAAAAAAAAAAAAAAQARPdsAAAAAAAAAALly/iTceP/82O3aZAmd8hyqUjYAANfc5RfN0/iibCtTAAAAF0h25BgACBh6AAAACgAAAAEAAAABAAAAACB7QwENtf0512wYxYgdtjVwZe4Zg/huXXKUi9SSU7vcAAAAAQAAABVodHRwczovL3d3dy5ob21lLm9yZy8AAAADAQIDAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAIAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAET3LAAAAAAAAAAC5cv4k3Hj//Njt2mQJnfIcqlI2AADX3OUXzdP4omwrUwAAABdIduR8AAgYegAAAAkAAAACAAAAAQAAAAAge0MBDbX9OddsGMWIHbY1cGXuGYP4bl1ylIvUklO73AAAAAEAAAAVaHR0cHM6Ly93d3cuaG9tZS5vcmcvAAAAAwECAwAAAAEAAAAAIHtDAQ21/TnXbBjFiB22NXBl7hmD+G5dcpSL1JJTu9wAAAACAAAAAAAAAAAAAAABABE92wAAAAAAAAAAuXL+JNx4//zY7dpkCZ3yHKpSNgAA19zlF83T+KJsK1MAAAAXSHbkGAAIGHoAAAAJAAAAAgAAAAEAAAAAIHtDAQ21/TnXbBjFiB22NXBl7hmD+G5dcpSL1JJTu9wAAAABAAAAFWh0dHBzOi8vd3d3LmhvbWUub3JnLwAAAAMBAgMAAAABAAAAACB7QwENtf0512wYxYgdtjVwZe4Zg/huXXKUi9SSU7vcAAAAAgAAAAAAAAAA", + hash: "397b208adb3d484d14ddd3237422baae0b6bd1e8feb3c970147bc6bcc493d112", + index: 0, + sequence: 49, + expected: []effect{ + { + address: "GC4XF7RE3R4P77GY5XNGICM56IOKUURWAAANPXHFC7G5H6FCNQVVH3OH", + effectType: history.EffectDataRemoved, + operationID: int64(210453401601), + order: uint32(1), + details: map[string]interface{}{ + "name": xdr.String64("hello"), + }, + }, + }, + }, + { + desc: "manageData - data updated", + envelopeXDR: "AAAAAKO5w1Op9wij5oMFtCTUoGO9YgewUKQyeIw1g/L0mMP+AAAAZAAALbYAADNjAAAAAQAAAAAAAAAAAAAAAF4WVfgAAAAAAAAAAQAAAAEAAAAAOO6NdKTWKbGao6zsPag+izHxq3eUPLiwjREobLhQAmQAAAAKAAAAOEdDUjNUUTJUVkgzUVJJN0dRTUMzSUpHVVVCUjMyWVFIV0JJS0lNVFlSUTJZSDRYVVREQjc1VUtFAAAAAQAAABQxNTc4NTIxMjA0XzI5MzI5MDI3OAAAAAAAAAAC0oPafQAAAEAcsS0iq/t8i+p85xwLsRy8JpRNEeqobEC5yuhO9ouVf3PE0VjLqv8sDd0St4qbtXU5fqlHd49R9CR+z7tiRLEB9JjD/gAAAEBmaa9sGxQhEhrakzXcSNpMbR4nox/Ha0p/1sI4tabNEzjgYLwKMn1U9tIdVvKKDwE22jg+CI2FlPJ3+FJPmKUA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADABEK2wAAAAAAAAAAo7nDU6n3CKPmgwW0JNSgY71iB7BQpDJ4jDWD8vSYw/4AAAAXSGLVVAAALbYAADNiAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABABEK2wAAAAAAAAAAo7nDU6n3CKPmgwW0JNSgY71iB7BQpDJ4jDWD8vSYw/4AAAAXSGLVVAAALbYAADNjAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAEQqbAAAAAwAAAAA47o10pNYpsZqjrOw9qD6LMfGrd5Q8uLCNEShsuFACZAAAADhHQ1IzVFEyVFZIM1FSSTdHUU1DM0lKR1VVQlIzMllRSFdCSUtJTVRZUlEyWUg0WFVUREI3NVVLRQAAABQxNTc4NTIwODU4XzI1MjM5MTc2OAAAAAAAAAAAAAAAAQARCtsAAAADAAAAADjujXSk1imxmqOs7D2oPosx8at3lDy4sI0RKGy4UAJkAAAAOEdDUjNUUTJUVkgzUVJJN0dRTUMzSUpHVVVCUjMyWVFIV0JJS0lNVFlSUTJZSDRYVVREQjc1VUtFAAAAFDE1Nzg1MjEyMDRfMjkzMjkwMjc4AAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAEQqbAAAAAAAAAACjucNTqfcIo+aDBbQk1KBjvWIHsFCkMniMNYPy9JjD/gAAABdIYtW4AAAttgAAM2IAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAEQrbAAAAAAAAAACjucNTqfcIo+aDBbQk1KBjvWIHsFCkMniMNYPy9JjD/gAAABdIYtVUAAAttgAAM2IAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "c60b74a14b628d06d3683db8b36ce81344967ac13bc433124bcef44115fbb257", + index: 0, + sequence: 49, + expected: []effect{ + { + address: "GA4O5DLUUTLCTMM2UOWOYPNIH2FTD4NLO6KDZOFQRUISQ3FYKABGJLPC", + effectType: history.EffectDataUpdated, + operationID: int64(210453401601), + order: uint32(1), + details: map[string]interface{}{ + "name": xdr.String64("GCR3TQ2TVH3QRI7GQMC3IJGUUBR32YQHWBIKIMTYRQ2YH4XUTDB75UKE"), + "value": "MTU3ODUyMTIwNF8yOTMyOTAyNzg=", + }, + }, + }, + }, + { + desc: "bumpSequence - new_seq is the same as current sequence", + envelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8A", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgDAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA9AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+JwAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21", + index: 0, + sequence: 61, + expected: []effect{}, + }, + { + + desc: "bumpSequence - new_seq is lower than current sequence", + envelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgCAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AQAAAAAAAAABHK0SlAAAAEC4H7TDntOUXDMg4MfoCPlbLRQZH7VwNpUHMvtnRWqWIiY/qnYYu0bvgYUVtoFOOeqElRKLYqtOW3Fz9iKl0WQJ", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgBAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA7AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+M4AAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "c8132b95c0063cafd20b26d27f06c12e688609d2d9d3724b840821e861870b8e", + index: 0, + sequence: 60, + expected: []effect{}, + }, + { + + desc: "bumpSequence - new_seq is higher than current sequence", + envelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAADkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AAAAAAAAAAABHK0SlAAAAEDq0JVhKNIq9ag0sR+R/cv3d9tEuaYEm2BazIzILRdGj9alaVMZBhxoJ3ZIpP3rraCJzyoKZO+p5HBVe10a2+UG", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "829d53f2dceebe10af8007564b0aefde819b95734ad431df84270651e7ed8a90", + index: 0, + sequence: 58, + expected: []effect{ + { + address: "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + effectType: history.EffectSequenceBumped, + operationID: int64(249108107265), + order: uint32(1), + details: map[string]interface{}{ + "new_seq": xdr.SequenceNumber(300000000000), + }, + }, + }, + }, + { + desc: "revokeSponsorship (signer)", + envelopeXDR: getRevokeSponsorshipEnvelopeXDR(t), + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: revokeSponsorshipMeta, + feeChangesXDR: "AAAAAA==", + hash: "a41d1c8cdf515203ac5a10d945d5023325076b23dbe7d65ae402cd5f8cd9f891", + index: 0, + sequence: 58, + expected: revokeSponsorshipEffects, + }, + { + desc: "Failed transaction", + envelopeXDR: "AAAAAPCq/iehD2ASJorqlTyEt0usn2WG3yF4w9xBkgd4itu6AAAAZAAMpboAADNGAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVEVTVAAAAAAObS6P1g8rj8sCVzRQzYgHhWFkbh1oV+1s47LFPstSpQAAAAAAAAACVAvkAAAAAfcAAAD6AAAAAAAAAAAAAAAAAAAAAXiK27oAAABAHHk5mvM6xBRsvu3RBvzzPIb8GpXaL2M7InPn65LIhFJ2RnHIYrpP6ufZc6SUtKqChNRaN4qw5rjwFXNezmrBCw==", + resultXDR: "AAAAAAAAAGT/////AAAAAQAAAAAAAAAD////+QAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADABDLGAAAAAAAAAAA8Kr+J6EPYBImiuqVPIS3S6yfZYbfIXjD3EGSB3iK27oAAAB2ucIg2AAMpboAADNFAAAA4wAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAABHT9ws4fAAAAAAAAAAAAAAAAAAAAAAAAAAEAEMsYAAAAAAAAAADwqv4noQ9gEiaK6pU8hLdLrJ9lht8heMPcQZIHeIrbugAAAHa5wiDYAAylugAAM0YAAADjAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAEdP3Czh8AAAAAAAAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAEMsCAAAAAAAAAADwqv4noQ9gEiaK6pU8hLdLrJ9lht8heMPcQZIHeIrbugAAAHa5wiE8AAylugAAM0UAAADjAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAEdP3Czh8AAAAAAAAAAAAAAAAAAAAAAAAAAQAQyxgAAAAAAAAAAPCq/iehD2ASJorqlTyEt0usn2WG3yF4w9xBkgd4itu6AAAAdrnCINgADKW6AAAzRQAAAOMAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAR0/cLOHwAAAAAAAAAAAAAAAAAAAAA=", + hash: "24206737a02f7f855c46e367418e38c223f897792c76bbfb948e1b0dbd695f8b", + index: 0, + sequence: 58, + expected: []effect{}, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: tc.envelopeXDR, + ResultXDR: tc.resultXDR, + MetaXDR: tc.metaXDR, + FeeChangesXDR: tc.feeChangesXDR, + Hash: tc.hash, + }, + ) + + operation := transactionOperationWrapper{ + index: tc.index, + transaction: transaction, + operation: transaction.Envelope.Operations()[tc.index], + ledgerSequence: tc.sequence, + } + + effects, err := operation.effects() + tt.NoError(err) + tt.Equal(tc.expected, effects) + }) + } +} + +func TestOperationEffectsSetOptionsSignersOrder(t *testing.T) { + tt := assert.New(t) + transaction := ingest.LedgerTransaction{ + UnsafeMeta: createTransactionMeta([]xdr.OperationMeta{ + { + Changes: []xdr.LedgerEntryChange{ + // State + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 10, + }, + }, + }, + }, + }, + }, + // Updated + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 16, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + { + Key: xdr.MustSigner("GCR3TQ2TVH3QRI7GQMC3IJGUUBR32YQHWBIKIMTYRQ2YH4XUTDB75UKE"), + Weight: 14, + }, + { + Key: xdr.MustSigner("GA4O5DLUUTLCTMM2UOWOYPNIH2FTD4NLO6KDZOFQRUISQ3FYKABGJLPC"), + Weight: 17, + }, + }, + }, + }, + }, + }, + }, + }, + }), + } + transaction.Index = 1 + transaction.Envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTx + aid := xdr.MustAddress("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV") + transaction.Envelope.V1 = &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: aid.ToMuxedAccount(), + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationTypeSetOptions, + SetOptionsOp: &xdr.SetOptionsOp{}, + }, + }, + ledgerSequence: 46, + } + + effects, err := operation.effects() + tt.NoError(err) + expected := []effect{ + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + "weight": int32(15), + }, + effectType: history.EffectSignerUpdated, + order: uint32(1), + }, + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + "weight": int32(16), + }, + effectType: history.EffectSignerUpdated, + order: uint32(2), + }, + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GA4O5DLUUTLCTMM2UOWOYPNIH2FTD4NLO6KDZOFQRUISQ3FYKABGJLPC", + "weight": int32(17), + }, + effectType: history.EffectSignerCreated, + order: uint32(3), + }, + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GCR3TQ2TVH3QRI7GQMC3IJGUUBR32YQHWBIKIMTYRQ2YH4XUTDB75UKE", + "weight": int32(14), + }, + effectType: history.EffectSignerCreated, + order: uint32(4), + }, + } + tt.Equal(expected, effects) +} + +// Regression for https://github.com/stellar/go/issues/2136 +func TestOperationEffectsSetOptionsSignersNoUpdated(t *testing.T) { + tt := assert.New(t) + transaction := ingest.LedgerTransaction{ + UnsafeMeta: createTransactionMeta([]xdr.OperationMeta{ + { + Changes: []xdr.LedgerEntryChange{ + // State + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GA4O5DLUUTLCTMM2UOWOYPNIH2FTD4NLO6KDZOFQRUISQ3FYKABGJLPC"), + Weight: 17, + }, + }, + }, + }, + }, + }, + // Updated + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 16, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCR3TQ2TVH3QRI7GQMC3IJGUUBR32YQHWBIKIMTYRQ2YH4XUTDB75UKE"), + Weight: 14, + }, + }, + }, + }, + }, + }, + }, + }, + }), + } + transaction.Index = 1 + transaction.Envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTx + aid := xdr.MustAddress("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV") + transaction.Envelope.V1 = &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: aid.ToMuxedAccount(), + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationTypeSetOptions, + SetOptionsOp: &xdr.SetOptionsOp{}, + }, + }, + ledgerSequence: 46, + } + + effects, err := operation.effects() + tt.NoError(err) + expected := []effect{ + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GA4O5DLUUTLCTMM2UOWOYPNIH2FTD4NLO6KDZOFQRUISQ3FYKABGJLPC", + }, + effectType: history.EffectSignerRemoved, + order: uint32(1), + }, + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + "weight": int32(16), + }, + effectType: history.EffectSignerUpdated, + order: uint32(2), + }, + { + address: "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + operationID: int64(197568499713), + details: map[string]interface{}{ + "public_key": "GCR3TQ2TVH3QRI7GQMC3IJGUUBR32YQHWBIKIMTYRQ2YH4XUTDB75UKE", + "weight": int32(14), + }, + effectType: history.EffectSignerCreated, + order: uint32(3), + }, + } + tt.Equal(expected, effects) +} + +func TestOperationRegressionAccountTrustItself(t *testing.T) { + tt := assert.New(t) + // NOTE: when an account trusts itself, the transaction is successful but + // no ledger entries are actually modified. + transaction := ingest.LedgerTransaction{ + UnsafeMeta: createTransactionMeta([]xdr.OperationMeta{}), + } + transaction.Index = 1 + transaction.Envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTx + aid := xdr.MustAddress("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV") + transaction.Envelope.V1 = &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: aid.ToMuxedAccount(), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationTypeChangeTrust, + ChangeTrustOp: &xdr.ChangeTrustOp{ + Line: xdr.MustNewCreditAsset("COP", "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV").ToChangeTrustAsset(), + Limit: xdr.Int64(1000), + }, + }, + }, + ledgerSequence: 46, + } + + effects, err := operation.effects() + tt.NoError(err) + tt.Equal([]effect{}, effects) +} + +func TestOperationEffectsAllowTrustAuthorizedToMaintainLiabilities(t *testing.T) { + tt := assert.New(t) + asset := xdr.Asset{} + allowTrustAsset, err := asset.ToAssetCode("COP") + tt.NoError(err) + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &xdr.AllowTrustOp{ + Trustor: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Asset: allowTrustAsset, + Authorize: xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag), + }, + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + tt.NoError(err) + + expected := []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + }, + effectType: history.EffectTrustlineAuthorizedToMaintainLiabilities, + order: uint32(1), + }, + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + operationID: int64(4294967297), + details: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorized_to_maintain_liabilites": true, + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + }, + effectType: history.EffectTrustlineFlagsUpdated, + order: uint32(2), + }, + } + tt.Equal(expected, effects) +} + +func TestOperationEffectsClawback(t *testing.T) { + tt := assert.New(t) + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClawback, + ClawbackOp: &xdr.ClawbackOp{ + Asset: xdr.MustNewCreditAsset("COP", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + From: xdr.MustMuxedAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Amount: 34, + }, + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + tt.NoError(err) + + expected := []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "amount": "0.0000034", + }, + effectType: history.EffectAccountCredited, + order: uint32(1), + }, + { + address: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "amount": "0.0000034", + }, + effectType: history.EffectAccountDebited, + order: uint32(2), + }, + } + tt.Equal(expected, effects) +} + +func TestOperationEffectsClawbackClaimableBalance(t *testing.T) { + tt := assert.New(t) + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + var balanceID xdr.ClaimableBalanceId + xdr.SafeUnmarshalBase64("AAAAANoNV9p9SFDn/BDSqdDrxzH3r7QFdMAzlbF9SRSbkfW+", &balanceID) + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClawbackClaimableBalance, + ClawbackClaimableBalanceOp: &xdr.ClawbackClaimableBalanceOp{ + BalanceId: balanceID, + }, + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + tt.NoError(err) + + expected := []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + operationID: 4294967297, + details: map[string]interface{}{ + "balance_id": "00000000da0d57da7d4850e7fc10d2a9d0ebc731f7afb40574c03395b17d49149b91f5be", + }, + effectType: history.EffectClaimableBalanceClawedBack, + order: uint32(1), + }, + } + tt.Equal(expected, effects) +} + +func TestOperationEffectsSetTrustLineFlags(t *testing.T) { + tt := assert.New(t) + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + trustor := xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + setFlags := xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + clearFlags := xdr.Uint32(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag | xdr.TrustLineFlagsAuthorizedFlag) + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeSetTrustLineFlags, + SetTrustLineFlagsOp: &xdr.SetTrustLineFlagsOp{ + Trustor: trustor, + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + ClearFlags: clearFlags, + SetFlags: setFlags, + }, + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + tt.NoError(err) + + expected := []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorized_flag": false, + "authorized_to_maintain_liabilites": true, + "clawback_enabled_flag": false, + "trustor": "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + }, + effectType: history.EffectTrustlineFlagsUpdated, + order: uint32(1), + }, + } + tt.Equal(expected, effects) +} + +type CreateClaimableBalanceEffectsTestSuite struct { + suite.Suite + ops []xdr.Operation + tx ingest.LedgerTransaction +} + +func (s *CreateClaimableBalanceEffectsTestSuite) SetupTest() { + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + var balanceIDOp0, balanceIDOp1 xdr.ClaimableBalanceId + xdr.SafeUnmarshalBase64("AAAAANoNV9p9SFDn/BDSqdDrxzH3r7QFdMAzlbF9SRSbkfW+", &balanceIDOp0) + xdr.SafeUnmarshalBase64("AAAAALHcX0PDa9UefSAzitC6vQOUr802phH8OF2ahLzg6j1D", &balanceIDOp1) + cb0 := xdr.ClaimableBalanceEntry{ + BalanceId: balanceIDOp0, + Amount: xdr.Int64(100000000), + Asset: xdr.MustNewNativeAsset(), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY"), + + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + Ext: xdr.ClaimableBalanceEntryExt{ + V: 1, + V1: &xdr.ClaimableBalanceEntryExtensionV1{ + Flags: xdr.Uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + }, + }, + } + relBefore := xdr.Int64(1000) + cb1 := xdr.ClaimableBalanceEntry{ + BalanceId: balanceIDOp1, + Amount: xdr.Int64(200000000), + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime, + RelBefore: &relBefore, + }, + }, + }, + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + } + s.ops = []xdr.Operation{ + { + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceOp: &xdr.CreateClaimableBalanceOp{ + Amount: cb0.Amount, + Asset: cb0.Asset, + Claimants: cb0.Claimants, + }, + }, + }, + { + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceOp: &xdr.CreateClaimableBalanceOp{ + Amount: cb1.Amount, + Asset: cb1.Asset, + Claimants: cb1.Claimants, + }, + }, + }, + } + + s.tx = ingest.LedgerTransaction{ + Index: 0, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Operations: s.ops, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{ + { + Changes: []xdr.LedgerEntryChange{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cb0, + }, + }, + }, + }, + }, + { + Changes: []xdr.LedgerEntryChange{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cb1, + }, + }, + }, + }, + }, + }, + }, + }, + } +} +func (s *CreateClaimableBalanceEffectsTestSuite) TestEffects() { + relBefore := xdr.Int64(1000) + testCases := []struct { + desc string + op xdr.Operation + expected []effect + }{ + { + desc: "claimable balance with native asset", + op: s.ops[0], + expected: []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + details: map[string]interface{}{ + "asset": "native", + "amount": "10.0000000", + "balance_id": "00000000da0d57da7d4850e7fc10d2a9d0ebc731f7afb40574c03395b17d49149b91f5be", + "claimable_balance_clawback_enabled_flag": true, + }, + effectType: history.EffectClaimableBalanceCreated, + operationID: int64(4294967297), + order: uint32(1), + }, + { + address: "GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY", + details: map[string]interface{}{ + "asset": "native", + "amount": "10.0000000", + "balance_id": "00000000da0d57da7d4850e7fc10d2a9d0ebc731f7afb40574c03395b17d49149b91f5be", + "predicate": xdr.ClaimPredicate{Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional}, + }, + effectType: history.EffectClaimableBalanceClaimantCreated, + operationID: int64(4294967297), + order: uint32(2), + }, + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + details: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + }, + effectType: history.EffectAccountDebited, + operationID: int64(4294967297), + order: uint32(3), + }, + }, + }, + { + desc: "claimable balance with issued asset", + op: s.ops[1], + expected: []effect{ + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + details: map[string]interface{}{ + "asset": "USD:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "20.0000000", + "balance_id": "00000000b1dc5f43c36bd51e7d20338ad0babd0394afcd36a611fc385d9a84bce0ea3d43", + }, + effectType: history.EffectClaimableBalanceCreated, + operationID: int64(4294967298), + order: uint32(1), + }, + { + address: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + details: map[string]interface{}{ + "asset": "USD:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "20.0000000", + "balance_id": "00000000b1dc5f43c36bd51e7d20338ad0babd0394afcd36a611fc385d9a84bce0ea3d43", + // Make sure data ingested from op body (rel_before) + "predicate": xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime, + RelBefore: &relBefore, + }, + }, + effectType: history.EffectClaimableBalanceClaimantCreated, + operationID: int64(4294967298), + order: uint32(2), + }, + { + address: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + details: map[string]interface{}{ + "asset": "USD:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "20.0000000", + "balance_id": "00000000b1dc5f43c36bd51e7d20338ad0babd0394afcd36a611fc385d9a84bce0ea3d43", + "predicate": xdr.ClaimPredicate{Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional}, + }, + effectType: history.EffectClaimableBalanceClaimantCreated, + operationID: int64(4294967298), + order: uint32(3), + }, + { + address: "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + details: map[string]interface{}{ + "amount": "20.0000000", + "asset_code": "USD", + "asset_type": "credit_alphanum4", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + effectType: history.EffectAccountDebited, + operationID: int64(4294967298), + order: uint32(4), + }, + }, + }, + } + for i, tc := range testCases { + s.T().Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: uint32(i), + transaction: s.tx, + operation: tc.op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + s.Assert().NoError(err) + s.Assert().Equal(tc.expected, effects) + }) + } +} + +func TestCreateClaimableBalanceEffectsTestSuite(t *testing.T) { + suite.Run(t, new(CreateClaimableBalanceEffectsTestSuite)) +} + +type ClaimClaimableBalanceEffectsTestSuite struct { + suite.Suite + ops []xdr.Operation + tx ingest.LedgerTransaction +} + +func (s *ClaimClaimableBalanceEffectsTestSuite) SetupTest() { + var balanceIDOp1, balanceIDOp1Meta, balanceIDOp2, balanceIDOp2Meta xdr.ClaimableBalanceId + xdr.SafeUnmarshalBase64("AAAAANoNV9p9SFDn/BDSqdDrxzH3r7QFdMAzlbF9SRSbkfW+", &balanceIDOp1) + xdr.SafeUnmarshalBase64("AAAAANoNV9p9SFDn/BDSqdDrxzH3r7QFdMAzlbF9SRSbkfW+", &balanceIDOp1Meta) + xdr.SafeUnmarshalBase64("AAAAALHcX0PDa9UefSAzitC6vQOUr802phH8OF2ahLzg6j1D", &balanceIDOp2) + xdr.SafeUnmarshalBase64("AAAAALHcX0PDa9UefSAzitC6vQOUr802phH8OF2ahLzg6j1D", &balanceIDOp2Meta) + + aid := xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY") + claimant1 := aid.ToMuxedAccount() + aid = xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2") + claimant2 := aid.ToMuxedAccount() + s.ops = []xdr.Operation{ + { + SourceAccount: &claimant1, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceOp: &xdr.ClaimClaimableBalanceOp{ + BalanceId: balanceIDOp1, + }, + }, + }, + { + SourceAccount: &claimant2, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceOp: &xdr.ClaimClaimableBalanceOp{ + BalanceId: balanceIDOp2, + }, + }, + }, + } + + s.tx = ingest.LedgerTransaction{ + Index: 0, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Operations: s.ops, + }, + }, + }, + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Results: &[]xdr.OperationResult{ + { + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceResult: &xdr.ClaimClaimableBalanceResult{ + Code: xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess, + }, + }, + }, + { + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceResult: &xdr.ClaimClaimableBalanceResult{ + Code: xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess, + }, + }, + }, + }, + }, + }, + }, + FeeChanges: xdr.LedgerEntryChanges{}, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{ + // op1 + { + Changes: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceIDOp1Meta, + Amount: xdr.Int64(100000000), + Asset: xdr.MustNewNativeAsset(), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY"), + + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + Ext: xdr.ClaimableBalanceEntryExt{ + V: 1, + V1: &xdr.ClaimableBalanceEntryExtensionV1{ + Flags: xdr.Uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + }, + }, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.LedgerKeyClaimableBalance{ + BalanceId: balanceIDOp1Meta, + }, + }, + }, + }, + }, + // op2 + { + Changes: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceIDOp2Meta, + Amount: xdr.Int64(200000000), + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.LedgerKeyClaimableBalance{ + BalanceId: balanceIDOp2Meta, + }, + }, + }, + }, + }, + }, + }, + }, + } +} +func (s *ClaimClaimableBalanceEffectsTestSuite) TestEffects() { + testCases := []struct { + desc string + op xdr.Operation + expected []effect + }{ + { + desc: "claimable balance with native asset", + op: s.ops[0], + expected: []effect{ + { + address: "GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY", + details: map[string]interface{}{ + "asset": "native", + "amount": "10.0000000", + "balance_id": "00000000da0d57da7d4850e7fc10d2a9d0ebc731f7afb40574c03395b17d49149b91f5be", + "claimable_balance_clawback_enabled_flag": true, + }, + effectType: history.EffectClaimableBalanceClaimed, + operationID: int64(4294967297), + order: uint32(1), + }, + { + address: "GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY", + details: map[string]interface{}{ + "asset_type": "native", + "amount": "10.0000000", + }, + effectType: history.EffectAccountCredited, + operationID: int64(4294967297), + order: uint32(2), + }, + }, + }, + { + desc: "claimable balance with issued asset", + op: s.ops[1], + expected: []effect{ + { + address: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + details: map[string]interface{}{ + "asset": "USD:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "20.0000000", + "balance_id": "00000000b1dc5f43c36bd51e7d20338ad0babd0394afcd36a611fc385d9a84bce0ea3d43", + }, + effectType: history.EffectClaimableBalanceClaimed, + operationID: int64(4294967298), + order: uint32(1), + }, + { + address: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + details: map[string]interface{}{ + "amount": "20.0000000", + "asset_code": "USD", + "asset_type": "credit_alphanum4", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + effectType: history.EffectAccountCredited, + operationID: int64(4294967298), + order: uint32(2), + }, + }, + }, + } + for i, tc := range testCases { + s.T().Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: uint32(i), + transaction: s.tx, + operation: tc.op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + s.Assert().NoError(err) + s.Assert().Equal(tc.expected, effects) + }) + } +} + +func TestClaimClaimableBalanceEffectsTestSuite(t *testing.T) { + suite.Run(t, new(ClaimClaimableBalanceEffectsTestSuite)) +} + +func TestTrustlineSponsorhipEffects(t *testing.T) { + source := xdr.MustMuxedAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + usdAsset := xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + poolIDStr := "19cc788419412926a11049b9fb1f87906b8f02bc6bf8f73d8fd347ede0b79fa5" + var poolID xdr.PoolId + poolIDBytes, err := hex.DecodeString(poolIDStr) + assert.NoError(t, err) + copy(poolID[:], poolIDBytes) + baseAssetTrustLineEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: source.ToAccountId(), + Asset: usdAsset.ToTrustLineAsset(), + Balance: 100, + Limit: 1000, + Flags: 0, + }, + }, + } + baseLiquidityPoolTrustLineEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: source.ToAccountId(), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &poolID, + }, + Balance: 100, + Limit: 1000, + Flags: 0, + }, + }, + } + + sponsor1 := xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2") + sponsor2 := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + withSponsor := func(le *xdr.LedgerEntry, accID *xdr.AccountId) *xdr.LedgerEntry { + le2 := *le + le2.Ext = xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: accID, + }, + } + return &le2 + } + + changes := xdr.LedgerEntryChanges{ + // create asset sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &baseAssetTrustLineEntry, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: withSponsor(&baseAssetTrustLineEntry, &sponsor1), + }, + // update asset sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: withSponsor(&baseAssetTrustLineEntry, &sponsor1), + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: withSponsor(&baseAssetTrustLineEntry, &sponsor2), + }, + // remove asset sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: withSponsor(&baseAssetTrustLineEntry, &sponsor2), + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &baseAssetTrustLineEntry, + }, + + // create liquidity pool sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &baseLiquidityPoolTrustLineEntry, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: withSponsor(&baseLiquidityPoolTrustLineEntry, &sponsor1), + }, + // update liquidity pool sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: withSponsor(&baseLiquidityPoolTrustLineEntry, &sponsor1), + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: withSponsor(&baseLiquidityPoolTrustLineEntry, &sponsor2), + }, + // remove liquidity pool sponsorship + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: withSponsor(&baseLiquidityPoolTrustLineEntry, &sponsor2), + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &baseLiquidityPoolTrustLineEntry, + }, + } + expected := []effect{ + { + effectType: history.EffectTrustlineSponsorshipCreated, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + // `asset_type` set in `Effect.UnmarshalDetails` to prevent reingestion + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + }, + { + effectType: history.EffectTrustlineSponsorshipUpdated, + order: 2, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + // `asset_type` set in `Effect.UnmarshalDetails` to prevent reingestion + "former_sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + "new_sponsor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + }, + { + effectType: history.EffectTrustlineSponsorshipRemoved, + order: 3, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + // `asset_type` set in `Effect.UnmarshalDetails` to prevent reingestion + "former_sponsor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + }, + { + effectType: history.EffectTrustlineSponsorshipCreated, + order: 4, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool_id": poolIDStr, + "asset_type": "liquidity_pool", + "sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + }, + { + effectType: history.EffectTrustlineSponsorshipUpdated, + order: 5, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool_id": poolIDStr, + "asset_type": "liquidity_pool", + "former_sponsor": "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + "new_sponsor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + }, + { + effectType: history.EffectTrustlineSponsorshipRemoved, + order: 6, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool_id": poolIDStr, + "asset_type": "liquidity_pool", + "former_sponsor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + }, + }, + } + + // pick an operation with no intrinsic effects + // (the sponsosrhip effects are obtained from the changes, so it doesn't matter) + phonyOp := xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationTypeEndSponsoringFutureReserves, + }, + } + tx := ingest.LedgerTransaction{ + Index: 0, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: source, + Operations: []xdr.Operation{phonyOp}, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{Changes: changes}}, + }, + }, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: tx, + operation: phonyOp, + ledgerSequence: 1, + } + + effects, err := operation.effects() + assert.NoError(t, err) + assert.Equal(t, expected, effects) + +} + +func TestLiquidityPoolEffects(t *testing.T) { + source := xdr.MustMuxedAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + usdAsset := xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + poolIDStr := "ea4e3e63a95fd840c1394f195722ffdcb2d0d4f0a26589c6ab557d81e6b0bf9d" + var poolID xdr.PoolId + poolIDBytes, err := hex.DecodeString(poolIDStr) + assert.NoError(t, err) + copy(poolID[:], poolIDBytes) + baseLiquidityPoolEntry := xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewNativeAsset(), + AssetB: usdAsset, + Fee: 20, + }, + ReserveA: 200, + ReserveB: 100, + TotalPoolShares: 1000, + PoolSharesTrustLineCount: 10, + }, + }, + } + baseState := xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &baseLiquidityPoolEntry, + }, + }, + } + updateState := func(cp xdr.LiquidityPoolEntryConstantProduct) xdr.LedgerEntryChange { + return xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &cp, + }, + }, + }, + }, + } + } + + testCases := []struct { + desc string + op xdr.OperationBody + result xdr.OperationResult + changes xdr.LedgerEntryChanges + expected []effect + }{ + { + desc: "liquidity pool creation", + op: xdr.OperationBody{ + Type: xdr.OperationTypeChangeTrust, + ChangeTrustOp: &xdr.ChangeTrustOp{ + Line: xdr.ChangeTrustAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPool: &xdr.LiquidityPoolParameters{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &baseLiquidityPoolEntry.Body.ConstantProduct.Params, + }, + }, + Limit: 1000, + }, + }, + changes: xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &baseLiquidityPoolEntry, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: source.ToAccountId(), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &poolID, + }, + }, + }, + }, + }, + }, + expected: []effect{ + { + effectType: history.EffectTrustlineCreated, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_type": "liquidity_pool_shares", + "limit": "0.0001000", + "liquidity_pool_id": poolIDStr, + }, + }, + { + effectType: history.EffectLiquidityPoolCreated, + order: 2, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool": map[string]interface{}{ + "fee_bp": uint32(20), + "id": poolIDStr, + "reserves": []base.AssetAmount{ + { + "native", + "0.0000200", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000100", + }, + }, + "total_shares": "0.0001000", + "total_trustlines": "10", + "type": "constant_product", + }, + }, + }, + }, + }, + { + desc: "liquidity pool deposit", + op: xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + LiquidityPoolDepositOp: &xdr.LiquidityPoolDepositOp{ + LiquidityPoolId: poolID, + MaxAmountA: 100, + MaxAmountB: 200, + MinPrice: xdr.Price{ + N: 50, + D: 3, + }, + MaxPrice: xdr.Price{ + N: 100, + D: 2, + }, + }, + }, + changes: xdr.LedgerEntryChanges{ + baseState, + updateState(xdr.LiquidityPoolEntryConstantProduct{ + + Params: baseLiquidityPoolEntry.Body.ConstantProduct.Params, + ReserveA: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveA + 50, + ReserveB: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveB + 60, + TotalPoolShares: baseLiquidityPoolEntry.Body.ConstantProduct.TotalPoolShares + 10, + PoolSharesTrustLineCount: baseLiquidityPoolEntry.Body.ConstantProduct.PoolSharesTrustLineCount, + }), + }, + expected: []effect{ + { + effectType: history.EffectLiquidityPoolDeposited, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool": map[string]interface{}{ + "fee_bp": uint32(20), + "id": poolIDStr, + "reserves": []base.AssetAmount{ + { + "native", + "0.0000250", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000160", + }, + }, + "total_shares": "0.0001010", + "total_trustlines": "10", + "type": "constant_product", + }, + "reserves_deposited": []base.AssetAmount{ + { + "native", + "0.0000050", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000060", + }, + }, + "shares_received": "0.0000010", + }, + }, + }, + }, + { + desc: "liquidity pool withdrawal", + op: xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolWithdraw, + LiquidityPoolWithdrawOp: &xdr.LiquidityPoolWithdrawOp{ + LiquidityPoolId: poolID, + Amount: 10, + MinAmountA: 10, + MinAmountB: 5, + }, + }, + changes: xdr.LedgerEntryChanges{ + baseState, + updateState(xdr.LiquidityPoolEntryConstantProduct{ + + Params: baseLiquidityPoolEntry.Body.ConstantProduct.Params, + ReserveA: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveA - 11, + ReserveB: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveB - 6, + TotalPoolShares: baseLiquidityPoolEntry.Body.ConstantProduct.TotalPoolShares - 10, + PoolSharesTrustLineCount: baseLiquidityPoolEntry.Body.ConstantProduct.PoolSharesTrustLineCount, + }), + }, + expected: []effect{ + { + effectType: history.EffectLiquidityPoolWithdrew, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool": map[string]interface{}{ + "fee_bp": uint32(20), + "id": poolIDStr, + "reserves": []base.AssetAmount{ + { + "native", + "0.0000189", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000094", + }, + }, + "total_shares": "0.0000990", + "total_trustlines": "10", + "type": "constant_product", + }, + "reserves_received": []base.AssetAmount{ + { + "native", + "0.0000011", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000006", + }, + }, + "shares_redeemed": "0.0000010", + }, + }, + }, + }, + { + desc: "liquidity pool trade", + op: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendOp: &xdr.PathPaymentStrictSendOp{ + SendAsset: xdr.MustNewNativeAsset(), + SendAmount: 10, + Destination: xdr.MustMuxedAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + DestAsset: usdAsset, + DestMin: 5, + Path: nil, + }, + }, + changes: xdr.LedgerEntryChanges{ + baseState, + updateState(xdr.LiquidityPoolEntryConstantProduct{ + + Params: baseLiquidityPoolEntry.Body.ConstantProduct.Params, + ReserveA: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveA - 11, + ReserveB: baseLiquidityPoolEntry.Body.ConstantProduct.ReserveB - 6, + TotalPoolShares: baseLiquidityPoolEntry.Body.ConstantProduct.TotalPoolShares - 10, + PoolSharesTrustLineCount: baseLiquidityPoolEntry.Body.ConstantProduct.PoolSharesTrustLineCount, + }), + }, + result: xdr.OperationResult{ + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendResult: &xdr.PathPaymentStrictSendResult{ + Code: xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess, + Success: &xdr.PathPaymentStrictSendResultSuccess{ + Last: xdr.SimplePaymentResult{ + Destination: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Amount: 5, + }, + Offers: []xdr.ClaimAtom{ + { + Type: xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool, + LiquidityPool: &xdr.ClaimLiquidityAtom{ + LiquidityPoolId: poolID, + AssetSold: xdr.MustNewNativeAsset(), + AmountSold: 10, + AssetBought: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + AmountBought: 5, + }, + }, + }, + }, + }, + }, + }, + expected: []effect{ + { + effectType: history.EffectAccountCredited, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "amount": "0.0000005", + "asset_code": "USD", + "asset_issuer": "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "asset_type": "credit_alphanum4", + }, + }, + { + effectType: history.EffectAccountDebited, + order: 2, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "amount": "0.0000010", + "asset_type": "native", + }, + }, + { + effectType: history.EffectLiquidityPoolTrade, + order: 3, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "bought": map[string]string{ + "amount": "0.0000005", + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + }, + "liquidity_pool": map[string]interface{}{ + "fee_bp": uint32(20), + "id": poolIDStr, + "reserves": []base.AssetAmount{ + { + "native", + "0.0000189", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000094", + }, + }, + "total_shares": "0.0000990", + "total_trustlines": "10", + "type": "constant_product", + }, + "sold": map[string]string{ + "amount": "0.0000010", + "asset": "native", + }, + }, + }, + }, + }, + { + desc: "liquidity pool revocation", + // Deauthorize an asset + // + // This scenario assumes that the asset being deauthorized is also part of a liquidity pool trustline + // from the same account. This results in a revocation (with a claimable balance being created). + // + // This scenario also assumes that the liquidity pool trustline was the last one, cause a liquidity pool removal. + op: xdr.OperationBody{ + Type: xdr.OperationTypeSetTrustLineFlags, + SetTrustLineFlagsOp: &xdr.SetTrustLineFlagsOp{ + Trustor: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: usdAsset, + ClearFlags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + changes: xdr.LedgerEntryChanges{ + // Asset trustline update + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: usdAsset.ToTrustLineAsset(), + Balance: 5, + Limit: 100, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: usdAsset.ToTrustLineAsset(), + Balance: 5, + Limit: 100, + Flags: 0, + }, + }, + }, + }, + // Liquidity pool trustline removal + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &poolID, + }, + Balance: 1000, + Limit: 2000, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.LedgerKeyTrustLine{ + AccountId: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &poolID, + }, + }, + }, + }, + // create claimable balance for USD asset as part of the revocation (in reality there would probably be another claimable + // balance crested for the native asset, but let's keep this simple) + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{0xa, 0xb}, + }, + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + Asset: usdAsset, + Amount: 100, + }, + }, + }, + }, + // Liquidity pool removal + baseState, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LedgerKeyLiquidityPool{ + LiquidityPoolId: poolID, + }, + }, + }, + }, + expected: []effect{ + { + effectType: history.EffectTrustlineFlagsUpdated, + order: 1, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "asset_type": "credit_alphanum4", + "authorized_flag": false, + "trustor": "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + }, + }, + { + effectType: history.EffectClaimableBalanceCreated, + order: 2, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "amount": "0.0000100", + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "balance_id": "000000000a0b000000000000000000000000000000000000000000000000000000000000", + }, + }, + { + effectType: history.EffectClaimableBalanceClaimantCreated, + order: 3, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "amount": "0.0000100", + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "balance_id": "000000000a0b000000000000000000000000000000000000000000000000000000000000", + "predicate": xdr.ClaimPredicate{}, + }, + }, + { + effectType: history.EffectLiquidityPoolRevoked, + order: 4, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool": map[string]interface{}{ + "fee_bp": uint32(20), + "id": poolIDStr, + "reserves": []base.AssetAmount{ + { + "native", + "0.0000200", + }, + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000100", + }, + }, + "total_shares": "0.0001000", + "total_trustlines": "10", + "type": "constant_product", + }, + "reserves_revoked": []map[string]string{ + { + "amount": "0.0000100", + "asset": "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "claimable_balance_id": "000000000a0b000000000000000000000000000000000000000000000000000000000000", + }, + }, + "shares_revoked": "0.0001000", + }, + }, + { + effectType: history.EffectLiquidityPoolRemoved, + order: 5, + address: "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + operationID: 4294967297, + details: map[string]interface{}{ + "liquidity_pool_id": poolIDStr, + }, + }, + }, + }, + } + for _, tc := range testCases { + + op := xdr.Operation{Body: tc.op} + tx := ingest.LedgerTransaction{ + Index: 0, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: source, + Operations: []xdr.Operation{op}, + }, + }, + }, + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Results: &[]xdr.OperationResult{ + tc.result, + }, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{Changes: tc.changes}}, + }, + }, + } + + t.Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: 0, + transaction: tx, + operation: op, + ledgerSequence: 1, + } + + effects, err := operation.effects() + assert.NoError(t, err) + assert.Equal(t, tc.expected, effects) + }) + } + +} diff --git a/services/horizon/internal/ingest/processors/ledgers_processor.go b/services/horizon/internal/ingest/processors/ledgers_processor.go new file mode 100644 index 0000000000..01c29b43d9 --- /dev/null +++ b/services/horizon/internal/ingest/processors/ledgers_processor.go @@ -0,0 +1,74 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type LedgersProcessor struct { + ledgersQ history.QLedgers + ledger xdr.LedgerHeaderHistoryEntry + ingestVersion int + successTxCount int + failedTxCount int + opCount int + txSetOpCount int +} + +func NewLedgerProcessor( + ledgerQ history.QLedgers, + ledger xdr.LedgerHeaderHistoryEntry, + ingestVersion int, +) *LedgersProcessor { + return &LedgersProcessor{ + ledger: ledger, + ledgersQ: ledgerQ, + ingestVersion: ingestVersion, + } +} + +func (p *LedgersProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) (err error) { + opCount := len(transaction.Envelope.Operations()) + p.txSetOpCount += opCount + if transaction.Result.Successful() { + p.successTxCount++ + p.opCount += opCount + } else { + p.failedTxCount++ + } + + return nil +} + +func (p *LedgersProcessor) Commit(ctx context.Context) error { + rowsAffected, err := p.ledgersQ.InsertLedger(ctx, + p.ledger, + p.successTxCount, + p.failedTxCount, + p.opCount, + p.txSetOpCount, + p.ingestVersion, + ) + + if err != nil { + return errors.Wrap(err, "Could not insert ledger") + } + + sequence := uint32(p.ledger.Header.LedgerSeq) + + if rowsAffected != 1 { + log.WithField("rowsAffected", rowsAffected). + WithField("sequence", sequence). + Error("Invalid number of rows affected when ingesting new ledger") + return errors.Errorf( + "0 rows affected when ingesting new ledger: %v", + sequence, + ) + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/ledgers_processor_test.go b/services/horizon/internal/ingest/processors/ledgers_processor_test.go new file mode 100644 index 0000000000..05bd2c3c3b --- /dev/null +++ b/services/horizon/internal/ingest/processors/ledgers_processor_test.go @@ -0,0 +1,166 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +type LedgersProcessorTestSuiteLedger struct { + suite.Suite + processor *LedgersProcessor + mockQ *history.MockQLedgers + header xdr.LedgerHeaderHistoryEntry + successCount int + failedCount int + opCount int + ingestVersion int + txs []ingest.LedgerTransaction + txSetOpCount int +} + +func TestLedgersProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(LedgersProcessorTestSuiteLedger)) +} + +func createTransaction(successful bool, numOps int) ingest.LedgerTransaction { + code := xdr.TransactionResultCodeTxSuccess + if !successful { + code = xdr.TransactionResultCodeTxFailed + } + + operations := []xdr.Operation{} + op := xdr.BumpSequenceOp{BumpTo: 30000} + for i := 0; i < numOps; i++ { + operations = append(operations, xdr.Operation{ + Body: xdr.OperationBody{ + Type: xdr.OperationTypeBumpSequence, + BumpSequenceOp: &op, + }, + }) + } + sourceAID := xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + return ingest.LedgerTransaction{ + Result: xdr.TransactionResultPair{ + TransactionHash: xdr.Hash{}, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: code, + InnerResultPair: &xdr.InnerTransactionResultPair{}, + Results: &[]xdr.OperationResult{}, + }, + }, + }, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: sourceAID.ToMuxedAccount(), + Operations: operations, + }, + }, + }, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, numOps, numOps), + }, + }, + } +} + +func (s *LedgersProcessorTestSuiteLedger) SetupTest() { + s.mockQ = &history.MockQLedgers{} + s.ingestVersion = 100 + s.header = xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(20), + }, + } + s.processor = NewLedgerProcessor( + s.mockQ, + s.header, + s.ingestVersion, + ) + + s.txs = []ingest.LedgerTransaction{ + createTransaction(true, 1), + createTransaction(false, 3), + createTransaction(true, 4), + } + + s.successCount = 2 + s.failedCount = 1 + s.opCount = 5 + s.txSetOpCount = 8 +} + +func (s *LedgersProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *LedgersProcessorTestSuiteLedger) TestInsertLedgerSucceeds() { + ctx := context.Background() + s.mockQ.On( + "InsertLedger", + ctx, + s.header, + s.successCount, + s.failedCount, + s.opCount, + s.txSetOpCount, + s.ingestVersion, + ).Return(int64(1), nil) + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().NoError(err) +} + +func (s *LedgersProcessorTestSuiteLedger) TestInsertLedgerReturnsError() { + ctx := context.Background() + s.mockQ.On( + "InsertLedger", + ctx, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(int64(0), errors.New("transient error")) + + err := s.processor.Commit(ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "Could not insert ledger: transient error") +} + +func (s *LedgersProcessorTestSuiteLedger) TestInsertLedgerNoRowsAffected() { + ctx := context.Background() + s.mockQ.On( + "InsertLedger", + ctx, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(int64(0), nil) + + err := s.processor.Commit(ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "0 rows affected when ingesting new ledger: 20") +} diff --git a/services/horizon/internal/ingest/processors/liquidity_pools_change_processor.go b/services/horizon/internal/ingest/processors/liquidity_pools_change_processor.go new file mode 100644 index 0000000000..c5e5252280 --- /dev/null +++ b/services/horizon/internal/ingest/processors/liquidity_pools_change_processor.go @@ -0,0 +1,118 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type LiquidityPoolsChangeProcessor struct { + qLiquidityPools history.QLiquidityPools + cache *ingest.ChangeCompactor + sequence uint32 +} + +func NewLiquidityPoolsChangeProcessor(Q history.QLiquidityPools, sequence uint32) *LiquidityPoolsChangeProcessor { + p := &LiquidityPoolsChangeProcessor{ + qLiquidityPools: Q, + sequence: sequence, + } + p.reset() + return p +} + +func (p *LiquidityPoolsChangeProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *LiquidityPoolsChangeProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeLiquidityPool { + return nil + } + + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func (p *LiquidityPoolsChangeProcessor) Commit(ctx context.Context) error { + + changes := p.cache.GetChanges() + var lps []history.LiquidityPool + for _, change := range changes { + switch { + case change.Pre == nil && change.Post != nil: + // Created + lps = append(lps, p.ledgerEntryToRow(change.Post)) + case change.Pre != nil && change.Post == nil: + // Removed + lp := p.ledgerEntryToRow(change.Pre) + lp.Deleted = true + lp.LastModifiedLedger = p.sequence + lps = append(lps, lp) + default: + // Updated + lps = append(lps, p.ledgerEntryToRow(change.Post)) + } + } + + if len(lps) > 0 { + if err := p.qLiquidityPools.UpsertLiquidityPools(ctx, lps); err != nil { + return errors.Wrap(err, "error upserting liquidity pools") + } + } + + if p.sequence > compactionWindow { + // trim liquidity pools table by removing liquidity pools which were deleted before the cutoff ledger + if removed, err := p.qLiquidityPools.CompactLiquidityPools(ctx, p.sequence-compactionWindow); err != nil { + return errors.Wrap(err, "could not compact liquidity pools") + } else { + log.WithField("liquidity_pool_rows_removed", removed).Info("Trimmed liquidity pools table") + } + } + + return nil +} + +func (p *LiquidityPoolsChangeProcessor) ledgerEntryToRow(entry *xdr.LedgerEntry) history.LiquidityPool { + lPool := entry.Data.MustLiquidityPool() + cp := lPool.Body.MustConstantProduct() + ar := history.LiquidityPoolAssetReserves{ + { + Asset: cp.Params.AssetA, + Reserve: uint64(cp.ReserveA), + }, + { + Asset: cp.Params.AssetB, + Reserve: uint64(cp.ReserveB), + }, + } + return history.LiquidityPool{ + PoolID: PoolIDToString(lPool.LiquidityPoolId), + Type: lPool.Body.Type, + Fee: uint32(cp.Params.Fee), + TrustlineCount: uint64(cp.PoolSharesTrustLineCount), + ShareCount: uint64(cp.TotalPoolShares), + AssetReserves: ar, + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + } +} + +// PoolIDToString encodes a liquidity pool id xdr value to its string form +func PoolIDToString(id xdr.PoolId) string { + return xdr.Hash(id).HexString() +} diff --git a/services/horizon/internal/ingest/processors/liquidity_pools_change_processor_test.go b/services/horizon/internal/ingest/processors/liquidity_pools_change_processor_test.go new file mode 100644 index 0000000000..4e7383b1fe --- /dev/null +++ b/services/horizon/internal/ingest/processors/liquidity_pools_change_processor_test.go @@ -0,0 +1,348 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" +) + +func TestLiquidityPoolsChangeProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(LiquidityPoolsChangeProcessorTestSuiteState)) +} + +type LiquidityPoolsChangeProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *LiquidityPoolsChangeProcessor + mockQ *history.MockQLiquidityPools + sequence uint32 +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQLiquidityPools{} + + s.sequence = 456 + s.processor = NewLiquidityPoolsChangeProcessor(s.mockQ, s.sequence) +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestNoEntries() { + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestNoEntriesWithSequenceLessThanWindow() { + s.sequence = 50 + s.processor.sequence = s.sequence + // Nothing processed, assertions in TearDownTest. +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteState) TestCreatesLiquidityPools() { + lastModifiedLedgerSeq := xdr.Uint32(123) + lpoolEntry := xdr.LiquidityPoolEntry{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 34, + }, + ReserveA: 450, + ReserveB: 500, + TotalPoolShares: 412241, + PoolSharesTrustLineCount: 52115, + }, + }, + } + lp := history.LiquidityPool{ + PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: 34, + TrustlineCount: 52115, + ShareCount: 412241, + AssetReserves: []history.LiquidityPoolAssetReserve{ + { + xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + 450, + }, + { + xdr.MustNewNativeAsset(), + 500, + }, + }, + LastModifiedLedger: 123, + } + s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{lp}).Return(nil).Once() + + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpoolEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) +} + +func TestLiquidityPoolsChangeProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(LiquidityPoolsChangeProcessorTestSuiteLedger)) +} + +type LiquidityPoolsChangeProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *LiquidityPoolsChangeProcessor + mockQ *history.MockQLiquidityPools + sequence uint32 +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQLiquidityPools{} + + s.sequence = 456 + s.processor = NewLiquidityPoolsChangeProcessor(s.mockQ, s.sequence) +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNoTransactions() { + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNoEntriesWithSequenceLessThanWindow() { + s.sequence = 50 + s.processor.sequence = s.sequence + // Nothing processed, assertions in TearDownTest. +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestNewLiquidityPool() { + lastModifiedLedgerSeq := xdr.Uint32(123) + lpEntry := xdr.LiquidityPoolEntry{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 34, + }, + ReserveA: 450, + ReserveB: 500, + TotalPoolShares: 412241, + PoolSharesTrustLineCount: 52115, + }, + }, + } + pre := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: nil, + Post: &pre, + }) + s.Assert().NoError(err) + + // add sponsor + post := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + } + + pre.LastModifiedLedgerSeq = pre.LastModifiedLedgerSeq - 1 + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: &pre, + Post: &post, + }) + s.Assert().NoError(err) + + postLP := history.LiquidityPool{ + PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: 34, + TrustlineCount: 52115, + ShareCount: 412241, + AssetReserves: []history.LiquidityPoolAssetReserve{ + { + xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + 450, + }, + { + xdr.MustNewNativeAsset(), + 500, + }, + }, + LastModifiedLedger: 123, + } + s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{postLP}).Return(nil).Once() + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestUpdateLiquidityPool() { + lpEntry := xdr.LiquidityPoolEntry{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 34, + }, + ReserveA: 450, + ReserveB: 500, + TotalPoolShares: 412241, + PoolSharesTrustLineCount: 52115, + }, + }, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + pre := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + + // add sponsor + post := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: &pre, + Post: &post, + }) + s.Assert().NoError(err) + + postLP := history.LiquidityPool{ + PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: 34, + TrustlineCount: 52115, + ShareCount: 412241, + AssetReserves: []history.LiquidityPoolAssetReserve{ + { + xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + 450, + }, + { + xdr.MustNewNativeAsset(), + 500, + }, + }, + LastModifiedLedger: 123, + } + + s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{postLP}).Return(nil).Once() + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() +} + +func (s *LiquidityPoolsChangeProcessorTestSuiteLedger) TestRemoveLiquidityPool() { + lpEntry := xdr.LiquidityPoolEntry{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 34, + }, + ReserveA: 450, + ReserveB: 123, + TotalPoolShares: 412241, + PoolSharesTrustLineCount: 52115, + }, + }, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + pre := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lpEntry, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + }, + } + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: &pre, + Post: nil, + }) + s.Assert().NoError(err) + + deleted := s.processor.ledgerEntryToRow(&pre) + deleted.Deleted = true + deleted.LastModifiedLedger = s.processor.sequence + s.mockQ.On("UpsertLiquidityPools", s.ctx, []history.LiquidityPool{deleted}).Return(nil).Once() + s.mockQ.On("CompactLiquidityPools", s.ctx, s.sequence-100).Return(int64(0), nil).Once() +} diff --git a/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor.go b/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor.go new file mode 100644 index 0000000000..548c7481a8 --- /dev/null +++ b/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor.go @@ -0,0 +1,251 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type liquidityPool struct { + internalID int64 // Bigint auto-generated by postgres + transactionSet map[int64]struct{} + operationSet map[int64]struct{} +} + +func (b *liquidityPool) addTransactionID(id int64) { + if b.transactionSet == nil { + b.transactionSet = map[int64]struct{}{} + } + b.transactionSet[id] = struct{}{} +} + +func (b *liquidityPool) addOperationID(id int64) { + if b.operationSet == nil { + b.operationSet = map[int64]struct{}{} + } + b.operationSet[id] = struct{}{} +} + +type LiquidityPoolsTransactionProcessor struct { + sequence uint32 + liquidityPoolSet map[string]liquidityPool + qLiquidityPools history.QHistoryLiquidityPools +} + +func NewLiquidityPoolsTransactionProcessor(Q history.QHistoryLiquidityPools, sequence uint32) *LiquidityPoolsTransactionProcessor { + return &LiquidityPoolsTransactionProcessor{ + qLiquidityPools: Q, + sequence: sequence, + liquidityPoolSet: map[string]liquidityPool{}, + } +} + +func (p *LiquidityPoolsTransactionProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + err := p.addTransactionLiquidityPools(p.liquidityPoolSet, p.sequence, transaction) + if err != nil { + return err + } + + err = p.addOperationLiquidityPools(p.liquidityPoolSet, p.sequence, transaction) + if err != nil { + return err + } + + return nil +} + +func (p *LiquidityPoolsTransactionProcessor) addTransactionLiquidityPools(lpSet map[string]liquidityPool, sequence uint32, transaction ingest.LedgerTransaction) error { + transactionID := toid.New(int32(sequence), int32(transaction.Index), 0).ToInt64() + transactionLiquidityPools, err := liquidityPoolsForTransaction( + sequence, + transaction, + ) + if err != nil { + return errors.Wrap(err, "Could not determine liquidity pools for transaction") + } + + for _, lp := range transactionLiquidityPools { + entry := lpSet[lp] + entry.addTransactionID(transactionID) + lpSet[lp] = entry + } + + return nil +} + +func liquidityPoolsForTransaction( + sequence uint32, + transaction ingest.LedgerTransaction, +) ([]string, error) { + changes, err := transaction.GetChanges() + if err != nil { + return nil, err + } + lps, err := liquidityPoolsForChanges(changes) + if err != nil { + return nil, errors.Wrapf(err, "reading transaction %v liquidity pools", transaction.Index) + } + return dedupeLiquidityPools(lps) +} + +func dedupeLiquidityPools(in []string) (out []string, err error) { + set := map[string]struct{}{} + for _, id := range in { + set[id] = struct{}{} + } + + for id := range set { + out = append(out, id) + } + return +} + +func liquidityPoolsForChanges( + changes []ingest.Change, +) ([]string, error) { + var lps []string + + for _, c := range changes { + if c.Type != xdr.LedgerEntryTypeLiquidityPool { + continue + } + + if c.Pre == nil && c.Post == nil { + return nil, errors.New("Invalid io.Change: change.Pre == nil && change.Post == nil") + } + + if c.Pre != nil { + poolID := c.Pre.Data.MustLiquidityPool().LiquidityPoolId + lps = append(lps, PoolIDToString(poolID)) + } + if c.Post != nil { + poolID := c.Post.Data.MustLiquidityPool().LiquidityPoolId + lps = append(lps, PoolIDToString(poolID)) + } + } + + return lps, nil +} + +func (p *LiquidityPoolsTransactionProcessor) addOperationLiquidityPools(lpSet map[string]liquidityPool, sequence uint32, transaction ingest.LedgerTransaction) error { + liquidityPools, err := liquidityPoolsForOperations(transaction, sequence) + if err != nil { + return errors.Wrap(err, "could not determine operation liquidity pools") + } + + for operationID, lps := range liquidityPools { + for _, lp := range lps { + entry := lpSet[lp] + entry.addOperationID(operationID) + lpSet[lp] = entry + } + } + + return nil +} + +func liquidityPoolsForOperations(transaction ingest.LedgerTransaction, sequence uint32) (map[int64][]string, error) { + lps := map[int64][]string{} + + for opi, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(opi), + transaction: transaction, + operation: op, + ledgerSequence: sequence, + } + + changes, err := transaction.GetOperationChanges(uint32(opi)) + if err != nil { + return lps, err + } + c, err := liquidityPoolsForChanges(changes) + if err != nil { + return lps, errors.Wrapf(err, "reading operation %v liquidity pools", operation.ID()) + } + lps[operation.ID()] = c + } + + return lps, nil +} + +func (p *LiquidityPoolsTransactionProcessor) Commit(ctx context.Context) error { + if len(p.liquidityPoolSet) > 0 { + if err := p.loadLiquidityPoolIDs(ctx, p.liquidityPoolSet); err != nil { + return err + } + + if err := p.insertDBTransactionLiquidityPools(ctx, p.liquidityPoolSet); err != nil { + return err + } + + if err := p.insertDBOperationsLiquidityPools(ctx, p.liquidityPoolSet); err != nil { + return err + } + } + + return nil +} + +func (p *LiquidityPoolsTransactionProcessor) loadLiquidityPoolIDs(ctx context.Context, liquidityPoolSet map[string]liquidityPool) error { + ids := make([]string, 0, len(liquidityPoolSet)) + for id := range liquidityPoolSet { + ids = append(ids, id) + } + + toInternalID, err := p.qLiquidityPools.CreateHistoryLiquidityPools(ctx, ids, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Could not create liquidity pool ids") + } + + for _, id := range ids { + internalID, ok := toInternalID[id] + if !ok { + return errors.Errorf("no internal id found for liquidity pool %s", id) + } + + lp := liquidityPoolSet[id] + lp.internalID = internalID + liquidityPoolSet[id] = lp + } + + return nil +} + +func (p LiquidityPoolsTransactionProcessor) insertDBTransactionLiquidityPools(ctx context.Context, liquidityPoolSet map[string]liquidityPool) error { + batch := p.qLiquidityPools.NewTransactionLiquidityPoolBatchInsertBuilder(maxBatchSize) + + for _, entry := range liquidityPoolSet { + for transactionID := range entry.transactionSet { + if err := batch.Add(ctx, transactionID, entry.internalID); err != nil { + return errors.Wrap(err, "could not insert transaction liquidity pool in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush transaction liquidity pools to db") + } + return nil +} + +func (p LiquidityPoolsTransactionProcessor) insertDBOperationsLiquidityPools(ctx context.Context, liquidityPoolSet map[string]liquidityPool) error { + batch := p.qLiquidityPools.NewOperationLiquidityPoolBatchInsertBuilder(maxBatchSize) + + for _, entry := range liquidityPoolSet { + for operationID := range entry.operationSet { + if err := batch.Add(ctx, operationID, entry.internalID); err != nil { + return errors.Wrap(err, "could not insert operation liquidity pool in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush operation liquidity pools to db") + } + return nil +} diff --git a/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor_test.go b/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor_test.go new file mode 100644 index 0000000000..1f19646156 --- /dev/null +++ b/services/horizon/internal/ingest/processors/liquidity_pools_transaction_processor_test.go @@ -0,0 +1,244 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" +) + +type LiquidityPoolsTransactionProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *LiquidityPoolsTransactionProcessor + mockQ *history.MockQHistoryLiquidityPools + mockTransactionBatchInsertBuilder *history.MockTransactionLiquidityPoolBatchInsertBuilder + mockOperationBatchInsertBuilder *history.MockOperationLiquidityPoolBatchInsertBuilder + + sequence uint32 +} + +func TestLiquidityPoolsTransactionProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(LiquidityPoolsTransactionProcessorTestSuiteLedger)) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQHistoryLiquidityPools{} + s.mockTransactionBatchInsertBuilder = &history.MockTransactionLiquidityPoolBatchInsertBuilder{} + s.mockOperationBatchInsertBuilder = &history.MockOperationLiquidityPoolBatchInsertBuilder{} + s.sequence = 20 + + s.processor = NewLiquidityPoolsTransactionProcessor( + s.mockQ, + s.sequence, + ) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockTransactionBatchInsertBuilder.AssertExpectations(s.T()) + s.mockOperationBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) mockTransactionBatchAdd(transactionID, internalID int64, err error) { + s.mockTransactionBatchInsertBuilder.On("Add", s.ctx, transactionID, internalID).Return(err).Once() +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) mockOperationBatchAdd(operationID, internalID int64, err error) { + s.mockOperationBatchInsertBuilder.On("Add", s.ctx, operationID, internalID).Return(err).Once() +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) TestEmptyLiquidityPools() { + // What is this expecting? Doesn't seem to assert anything meaningful... + err := s.processor.Commit(context.Background()) + s.Assert().NoError(err) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) testOperationInserts(poolID xdr.PoolId, body xdr.OperationBody, change xdr.LedgerEntryChange) { + // Setup the transaction + internalID := int64(1234) + txn := createTransaction(true, 1) + txn.Envelope.Operations()[0].Body = body + txn.UnsafeMeta.V = 2 + txn.UnsafeMeta.V2.Operations = []xdr.OperationMeta{ + {Changes: xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + }, + }, + }, + }, + change, + }}, + } + + if body.Type == xdr.OperationTypeChangeTrust { + // For insert test + txn.Result.Result.Result.Results = + &[]xdr.OperationResult{ + { + Code: xdr.OperationResultCodeOpInner, + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeChangeTrust, + ChangeTrustResult: &xdr.ChangeTrustResult{ + Code: xdr.ChangeTrustResultCodeChangeTrustSuccess, + }, + }, + }, + } + } + txnID := toid.New(int32(s.sequence), int32(txn.Index), 0).ToInt64() + opID := (&transactionOperationWrapper{ + index: uint32(0), + transaction: txn, + operation: txn.Envelope.Operations()[0], + ledgerSequence: s.sequence, + }).ID() + + hexID := PoolIDToString(poolID) + + // Setup a q + s.mockQ.On("CreateHistoryLiquidityPools", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + []string{hexID}, + arg, + ) + }).Return(map[string]int64{ + hexID: internalID, + }, nil).Once() + + // Prepare to process transactions successfully + s.mockQ.On("NewTransactionLiquidityPoolBatchInsertBuilder", maxBatchSize). + Return(s.mockTransactionBatchInsertBuilder).Once() + s.mockTransactionBatchAdd(txnID, internalID, nil) + s.mockTransactionBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + // Prepare to process operations successfully + s.mockQ.On("NewOperationLiquidityPoolBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationBatchInsertBuilder).Once() + s.mockOperationBatchAdd(opID, internalID, nil) + s.mockOperationBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + // Process the transaction + err := s.processor.ProcessTransaction(s.ctx, txn) + s.Assert().NoError(err) + err = s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) TestIngestLiquidityPoolsRemoval() { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + s.testOperationInserts(poolID, + xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + ChangeTrustOp: &xdr.ChangeTrustOp{ + Line: xdr.ChangeTrustAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPool: &xdr.LiquidityPoolParameters{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("EUR", "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 30, + }, + }, + }, + Limit: 0, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LedgerKeyLiquidityPool{ + LiquidityPoolId: poolID, + }, + }, + }, + ) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) TestIngestLiquidityPoolsUpdate() { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + s.testOperationInserts(poolID, + xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + ChangeTrustOp: &xdr.ChangeTrustOp{ + Line: xdr.ChangeTrustAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPool: &xdr.LiquidityPoolParameters{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("EUR", "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 30, + }, + }, + }, + Limit: 10, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{}, + }, + }, + }, + }, + ) +} + +func (s *LiquidityPoolsTransactionProcessorTestSuiteLedger) TestIngestLiquidityPoolsCreate() { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + s.testOperationInserts(poolID, + xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + ChangeTrustOp: &xdr.ChangeTrustOp{ + Line: xdr.ChangeTrustAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPool: &xdr.LiquidityPoolParameters{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("EUR", "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 30, + }, + }, + }, + Limit: 10, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{}, + }, + }, + }, + }, + ) +} diff --git a/services/horizon/internal/ingest/processors/main.go b/services/horizon/internal/ingest/processors/main.go new file mode 100644 index 0000000000..5088dd97aa --- /dev/null +++ b/services/horizon/internal/ingest/processors/main.go @@ -0,0 +1,22 @@ +package processors + +import ( + "github.com/guregu/null" + logpkg "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +var log = logpkg.DefaultLogger.WithField("service", "ingest") + +const maxBatchSize = 100000 + +func ledgerEntrySponsorToNullString(entry xdr.LedgerEntry) null.String { + sponsoringID := entry.SponsoringID() + + var sponsor null.String + if sponsoringID != nil { + sponsor.SetValid((*sponsoringID).Address()) + } + + return sponsor +} diff --git a/services/horizon/internal/ingest/processors/mock_change_processor.go b/services/horizon/internal/ingest/processors/mock_change_processor.go new file mode 100644 index 0000000000..6d2ab64a8f --- /dev/null +++ b/services/horizon/internal/ingest/processors/mock_change_processor.go @@ -0,0 +1,20 @@ +package processors + +import ( + "context" + + "github.com/stretchr/testify/mock" + + "github.com/stellar/go/ingest" +) + +var _ ChangeProcessor = (*MockChangeProcessor)(nil) + +type MockChangeProcessor struct { + mock.Mock +} + +func (m *MockChangeProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + args := m.Called(ctx, change) + return args.Error(0) +} diff --git a/services/horizon/internal/ingest/processors/offers_processor.go b/services/horizon/internal/ingest/processors/offers_processor.go new file mode 100644 index 0000000000..af1d80693e --- /dev/null +++ b/services/horizon/internal/ingest/processors/offers_processor.go @@ -0,0 +1,114 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// The offers processor can be configured to trim the offers table +// by removing all offer rows which were marked for deletion at least 100 ledgers ago +const compactionWindow = uint32(100) + +type OffersProcessor struct { + offersQ history.QOffers + sequence uint32 + + cache *ingest.ChangeCompactor +} + +func NewOffersProcessor(offersQ history.QOffers, sequence uint32) *OffersProcessor { + p := &OffersProcessor{offersQ: offersQ, sequence: sequence} + p.reset() + return p +} + +func (p *OffersProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *OffersProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeOffer { + return nil + } + + if err := p.cache.AddChange(change); err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + if err := p.flushCache(ctx); err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func (p *OffersProcessor) ledgerEntryToRow(entry *xdr.LedgerEntry) history.Offer { + offer := entry.Data.MustOffer() + return history.Offer{ + SellerID: offer.SellerId.Address(), + OfferID: int64(offer.OfferId), + SellingAsset: offer.Selling, + BuyingAsset: offer.Buying, + Amount: int64(offer.Amount), + Pricen: int32(offer.Price.N), + Priced: int32(offer.Price.D), + Price: float64(offer.Price.N) / float64(offer.Price.D), + Flags: int32(offer.Flags), + LastModifiedLedger: uint32(entry.LastModifiedLedgerSeq), + Sponsor: ledgerEntrySponsorToNullString(*entry), + } +} + +func (p *OffersProcessor) flushCache(ctx context.Context) error { + var batchUpsertOffers []history.Offer + changes := p.cache.GetChanges() + for _, change := range changes { + switch { + case change.Post != nil: + // Created and updated + row := p.ledgerEntryToRow(change.Post) + batchUpsertOffers = append(batchUpsertOffers, row) + case change.Pre != nil && change.Post == nil: + // Removed + row := p.ledgerEntryToRow(change.Pre) + row.Deleted = true + row.LastModifiedLedger = p.sequence + batchUpsertOffers = append(batchUpsertOffers, row) + default: + return errors.New("Invalid io.Change: change.Pre == nil && change.Post == nil") + } + } + + if len(batchUpsertOffers) > 0 { + err := p.offersQ.UpsertOffers(ctx, batchUpsertOffers) + if err != nil { + return errors.Wrap(err, "errors in UpsertOffers") + } + } + + return nil +} + +func (p *OffersProcessor) Commit(ctx context.Context) error { + if err := p.flushCache(ctx); err != nil { + return errors.Wrap(err, "error flushing cache") + } + + if p.sequence > compactionWindow { + // trim offers table by removing offers which were deleted before the cutoff ledger + if offerRowsRemoved, err := p.offersQ.CompactOffers(ctx, p.sequence-compactionWindow); err != nil { + return errors.Wrap(err, "could not compact offers") + } else { + log.WithField("offer_rows_removed", offerRowsRemoved).Info("Trimmed offers table") + } + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/offers_processor_test.go b/services/horizon/internal/ingest/processors/offers_processor_test.go new file mode 100644 index 0000000000..21905dc1df --- /dev/null +++ b/services/horizon/internal/ingest/processors/offers_processor_test.go @@ -0,0 +1,449 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +func TestOffersProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(OffersProcessorTestSuiteState)) +} + +type OffersProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *OffersProcessor + mockQ *history.MockQOffers + sequence uint32 +} + +func (s *OffersProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQOffers{} + + s.sequence = 456 + s.processor = NewOffersProcessor(s.mockQ, s.sequence) +} + +func (s *OffersProcessorTestSuiteState) TearDownTest() { + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) + + s.mockQ.AssertExpectations(s.T()) +} + +func (s *OffersProcessorTestSuiteState) TestCreateOffer() { + offer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(1), + Price: xdr.Price{1, 2}, + } + lastModifiedLedgerSeq := xdr.Uint32(123) + entry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + } + + s.mockQ.On("UpsertOffers", s.ctx, []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 1, + Pricen: int32(1), + Priced: int32(2), + Price: float64(0.5), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }).Return(nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: nil, + Post: &entry, + }) + s.Assert().NoError(err) +} + +func TestOffersProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(OffersProcessorTestSuiteLedger)) +} + +type OffersProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *OffersProcessor + mockQ *history.MockQOffers + sequence uint32 +} + +func (s *OffersProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQOffers{} + + s.sequence = 456 + s.processor = NewOffersProcessor(s.mockQ, s.sequence) +} + +func (s *OffersProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *OffersProcessorTestSuiteLedger) setupInsertOffer() { + // should be ignored because it's not an offer type + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + + // add offer + offer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 2}, + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + }, + }) + s.Assert().NoError(err) + + updatedOffer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 6}, + } + + updatedEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &updatedOffer, + }, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + }, + Post: &updatedEntry, + }) + s.Assert().NoError(err) + + // We use LedgerEntryChangesCache so all changes are squashed + s.mockQ.On("UpsertOffers", s.ctx, []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 2, + Pricen: int32(1), + Priced: int32(6), + Price: float64(1) / float64(6), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }).Return(nil).Once() +} + +func (s *OffersProcessorTestSuiteLedger) TestInsertOffer() { + s.setupInsertOffer() + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestSkipCompactionIfSequenceEqualsWindow() { + s.processor.sequence = compactionWindow + s.setupInsertOffer() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestSkipCompactionIfSequenceLessThanWindow() { + s.processor.sequence = compactionWindow - 1 + s.setupInsertOffer() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestCompactionError() { + s.setupInsertOffer() + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100). + Return(int64(0), errors.New("compaction error")).Once() + s.Assert().EqualError(s.processor.Commit(s.ctx), "could not compact offers: compaction error") +} + +func (s *OffersProcessorTestSuiteLedger) TestUpsertManyOffers() { + lastModifiedLedgerSeq := xdr.Uint32(1234) + + offer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 2}, + } + updatedOffer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 6}, + } + + anotherOffer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GDMUVYVYPYZYBDXNJWKFT3X2GCZCICTL3GSVP6AWBGB4ZZG7ZRDA746P"), + OfferId: xdr.Int64(3), + Price: xdr.Price{2, 3}, + } + + updatedEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &updatedOffer, + }, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + }, + Post: &updatedEntry, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &anotherOffer, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On("UpsertOffers", s.ctx, mock.Anything).Run(func(args mock.Arguments) { + // To fix order issue due to using ChangeCompactor + offers := args.Get(1).([]history.Offer) + s.Assert().ElementsMatch( + offers, + []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 2, + Pricen: int32(1), + Priced: int32(6), + Price: float64(1) / float64(6), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + { + SellerID: "GDMUVYVYPYZYBDXNJWKFT3X2GCZCICTL3GSVP6AWBGB4ZZG7ZRDA746P", + OfferID: 3, + Pricen: int32(2), + Priced: int32(3), + Price: float64(2) / float64(3), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }, + ) + }).Return(nil).Once() + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestRemoveOffer() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(3), + Price: xdr.Price{3, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + s.mockQ.On("UpsertOffers", s.ctx, []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 3, + Pricen: 3, + Priced: 1, + Price: 3, + Deleted: true, + LastModifiedLedger: 456, + }, + }).Return(nil).Once() + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestProcessUpgradeChange() { + // add offer + offer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 2}, + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + }, + }) + s.Assert().NoError(err) + + updatedOffer := xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(2), + Price: xdr.Price{1, 6}, + } + + updatedEntry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &updatedOffer, + }, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offer, + }, + }, + Post: &updatedEntry, + }) + s.Assert().NoError(err) + + // We use LedgerEntryChangesCache so all changes are squashed + s.mockQ.On("UpsertOffers", s.ctx, []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 2, + Pricen: 1, + Priced: 6, + Price: float64(1) / float64(6), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }).Return(nil).Once() + + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *OffersProcessorTestSuiteLedger) TestRemoveMultipleOffers() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(3), + Price: xdr.Price{3, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + OfferId: xdr.Int64(4), + Price: xdr.Price{3, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + s.mockQ.On("CompactOffers", s.ctx, s.sequence-100).Return(int64(0), nil).Once() + s.mockQ.On("UpsertOffers", s.ctx, mock.Anything).Run(func(args mock.Arguments) { + // To fix order issue due to using ChangeCompactor + offers := args.Get(1).([]history.Offer) + s.Assert().ElementsMatch( + offers, + []history.Offer{ + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 3, + Pricen: 3, + Priced: 1, + Price: 3, + LastModifiedLedger: 456, + Deleted: true, + }, + { + SellerID: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + OfferID: 4, + Pricen: 3, + Priced: 1, + Price: 3, + LastModifiedLedger: 456, + Deleted: true, + }, + }, + ) + }).Return(nil).Once() + + err = s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} diff --git a/services/horizon/internal/ingest/processors/operations_processor.go b/services/horizon/internal/ingest/processors/operations_processor.go new file mode 100644 index 0000000000..9afae5b44f --- /dev/null +++ b/services/horizon/internal/ingest/processors/operations_processor.go @@ -0,0 +1,860 @@ +package processors + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/guregu/null" + "github.com/stellar/go/amount" + "github.com/stellar/go/ingest" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// OperationProcessor operations processor +type OperationProcessor struct { + operationsQ history.QOperations + + sequence uint32 + batch history.OperationBatchInsertBuilder +} + +func NewOperationProcessor(operationsQ history.QOperations, sequence uint32) *OperationProcessor { + return &OperationProcessor{ + operationsQ: operationsQ, + sequence: sequence, + batch: operationsQ.NewOperationBatchInsertBuilder(maxBatchSize), + } +} + +// ProcessTransaction process the given transaction +func (p *OperationProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + for i, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(i), + transaction: transaction, + operation: op, + ledgerSequence: p.sequence, + } + details, err := operation.Details() + if err != nil { + return errors.Wrapf(err, "Error obtaining details for operation %v", operation.ID()) + } + var detailsJSON []byte + detailsJSON, err = json.Marshal(details) + if err != nil { + return errors.Wrapf(err, "Error marshaling details for operation %v", operation.ID()) + } + + source := operation.SourceAccount() + acID := source.ToAccountId() + var sourceAccountMuxed null.String + if source.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + sourceAccountMuxed = null.StringFrom(source.Address()) + } + if err := p.batch.Add(ctx, + operation.ID(), + operation.TransactionID(), + operation.Order(), + operation.OperationType(), + detailsJSON, + acID.Address(), + sourceAccountMuxed, + ); err != nil { + return errors.Wrap(err, "Error batch inserting operation rows") + } + } + + return nil +} + +func (p *OperationProcessor) Commit(ctx context.Context) error { + return p.batch.Exec(ctx) +} + +// transactionOperationWrapper represents the data for a single operation within a transaction +type transactionOperationWrapper struct { + index uint32 + transaction ingest.LedgerTransaction + operation xdr.Operation + ledgerSequence uint32 +} + +// ID returns the ID for the operation. +func (operation *transactionOperationWrapper) ID() int64 { + return toid.New( + int32(operation.ledgerSequence), + int32(operation.transaction.Index), + int32(operation.index+1), + ).ToInt64() +} + +// Order returns the operation order. +func (operation *transactionOperationWrapper) Order() uint32 { + return operation.index + 1 +} + +// TransactionID returns the id for the transaction related with this operation. +func (operation *transactionOperationWrapper) TransactionID() int64 { + return toid.New(int32(operation.ledgerSequence), int32(operation.transaction.Index), 0).ToInt64() +} + +// SourceAccount returns the operation's source account. +func (operation *transactionOperationWrapper) SourceAccount() *xdr.MuxedAccount { + sourceAccount := operation.operation.SourceAccount + if sourceAccount != nil { + return sourceAccount + } else { + ret := operation.transaction.Envelope.SourceAccount() + return &ret + } +} + +// OperationType returns the operation type. +func (operation *transactionOperationWrapper) OperationType() xdr.OperationType { + return operation.operation.Body.Type +} + +func (operation *transactionOperationWrapper) getSignerSponsorInChange(signerKey string, change ingest.Change) xdr.SponsorshipDescriptor { + if change.Type != xdr.LedgerEntryTypeAccount || change.Post == nil { + return nil + } + + preSigners := map[string]xdr.AccountId{} + if change.Pre != nil { + account := change.Pre.Data.MustAccount() + preSigners = account.SponsorPerSigner() + } + + account := change.Post.Data.MustAccount() + postSigners := account.SponsorPerSigner() + + pre, preFound := preSigners[signerKey] + post, postFound := postSigners[signerKey] + + if !postFound { + return nil + } + + if preFound { + formerSponsor := pre.Address() + newSponsor := post.Address() + if formerSponsor == newSponsor { + return nil + } + } + + return &post +} + +func (operation *transactionOperationWrapper) getSponsor() (*xdr.AccountId, error) { + changes, err := operation.transaction.GetOperationChanges(operation.index) + if err != nil { + return nil, err + } + var signerKey string + if setOps, ok := operation.operation.Body.GetSetOptionsOp(); ok && setOps.Signer != nil { + signerKey = setOps.Signer.Key.Address() + } + + for _, c := range changes { + // Check Signer changes + if signerKey != "" { + if sponsorAccount := operation.getSignerSponsorInChange(signerKey, c); sponsorAccount != nil { + return sponsorAccount, nil + } + } + + // Check Ledger key changes + if c.Pre != nil || c.Post == nil { + // We are only looking for entry creations denoting that a sponsor + // is associated to the ledger entry of the operation. + continue + } + if sponsorAccount := c.Post.SponsoringID(); sponsorAccount != nil { + return sponsorAccount, nil + } + } + + return nil, nil +} + +type liquidityPoolDelta struct { + ReserveA xdr.Int64 + ReserveB xdr.Int64 + TotalPoolShares xdr.Int64 +} + +var errLiquidityPoolChangeNotFound = errors.New("liquidity pool change not found") + +func (operation *transactionOperationWrapper) getLiquidityPoolAndProductDelta(lpID *xdr.PoolId) (*xdr.LiquidityPoolEntry, *liquidityPoolDelta, error) { + changes, err := operation.transaction.GetOperationChanges(operation.index) + if err != nil { + return nil, nil, err + } + + for _, c := range changes { + if c.Type != xdr.LedgerEntryTypeLiquidityPool { + continue + } + // The delta can be caused by a full removal or full creation of the liquidity pool + var lp *xdr.LiquidityPoolEntry + var preA, preB, preShares xdr.Int64 + if c.Pre != nil { + if lpID != nil && c.Pre.Data.LiquidityPool.LiquidityPoolId != *lpID { + // if we were looking for specific pool id, then check on it + continue + } + lp = c.Pre.Data.LiquidityPool + if c.Pre.Data.LiquidityPool.Body.Type != xdr.LiquidityPoolTypeLiquidityPoolConstantProduct { + return nil, nil, fmt.Errorf("unexpected liquity pool body type %d", c.Pre.Data.LiquidityPool.Body.Type) + } + cpPre := c.Pre.Data.LiquidityPool.Body.ConstantProduct + preA, preB, preShares = cpPre.ReserveA, cpPre.ReserveB, cpPre.TotalPoolShares + } + var postA, postB, postShares xdr.Int64 + if c.Post != nil { + if lpID != nil && c.Post.Data.LiquidityPool.LiquidityPoolId != *lpID { + // if we were looking for specific pool id, then check on it + continue + } + lp = c.Post.Data.LiquidityPool + if c.Post.Data.LiquidityPool.Body.Type != xdr.LiquidityPoolTypeLiquidityPoolConstantProduct { + return nil, nil, fmt.Errorf("unexpected liquity pool body type %d", c.Post.Data.LiquidityPool.Body.Type) + } + cpPost := c.Post.Data.LiquidityPool.Body.ConstantProduct + postA, postB, postShares = cpPost.ReserveA, cpPost.ReserveB, cpPost.TotalPoolShares + } + delta := &liquidityPoolDelta{ + ReserveA: postA - preA, + ReserveB: postB - preB, + TotalPoolShares: postShares - preShares, + } + return lp, delta, nil + } + + return nil, nil, errLiquidityPoolChangeNotFound +} + +// OperationResult returns the operation's result record +func (operation *transactionOperationWrapper) OperationResult() *xdr.OperationResultTr { + results, _ := operation.transaction.Result.OperationResults() + tr := results[operation.index].MustTr() + return &tr +} + +func (operation *transactionOperationWrapper) findInitatingBeginSponsoringOp() *transactionOperationWrapper { + if !operation.transaction.Result.Successful() { + // Failed transactions may not have a compliant sandwich structure + // we can rely on (e.g. invalid nesting or a being operation with the wrong sponsoree ID) + // and thus we bail out since we could return incorrect information. + return nil + } + sponsoree := operation.SourceAccount().ToAccountId() + operations := operation.transaction.Envelope.Operations() + for i := int(operation.index) - 1; i >= 0; i-- { + if beginOp, ok := operations[i].Body.GetBeginSponsoringFutureReservesOp(); ok && + beginOp.SponsoredId.Address() == sponsoree.Address() { + result := *operation + result.index = uint32(i) + result.operation = operations[i] + return &result + } + } + return nil +} + +func addAccountAndMuxedAccountDetails(result map[string]interface{}, a xdr.MuxedAccount, prefix string) { + accid := a.ToAccountId() + result[prefix] = accid.Address() + if a.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + result[prefix+"_muxed"] = a.Address() + // _muxed_id fields should had ideally been stored in the DB as a string instead of uint64 + // due to Javascript not being able to handle them, see https://github.com/stellar/go/issues/3714 + // However, we released this code in the wild before correcting it. Thus, what we do is + // work around it (by preprocessing it into a string) in Operation.UnmarshalDetails() + result[prefix+"_muxed_id"] = uint64(a.Med25519.Id) + } +} + +// Details returns the operation details as a map which can be stored as JSON. +func (operation *transactionOperationWrapper) Details() (map[string]interface{}, error) { + details := map[string]interface{}{} + source := operation.SourceAccount() + switch operation.OperationType() { + case xdr.OperationTypeCreateAccount: + op := operation.operation.Body.MustCreateAccountOp() + addAccountAndMuxedAccountDetails(details, *source, "funder") + details["account"] = op.Destination.Address() + details["starting_balance"] = amount.String(op.StartingBalance) + case xdr.OperationTypePayment: + op := operation.operation.Body.MustPaymentOp() + addAccountAndMuxedAccountDetails(details, *source, "from") + addAccountAndMuxedAccountDetails(details, op.Destination, "to") + details["amount"] = amount.String(op.Amount) + addAssetDetails(details, op.Asset, "") + case xdr.OperationTypePathPaymentStrictReceive: + op := operation.operation.Body.MustPathPaymentStrictReceiveOp() + addAccountAndMuxedAccountDetails(details, *source, "from") + addAccountAndMuxedAccountDetails(details, op.Destination, "to") + + details["amount"] = amount.String(op.DestAmount) + details["source_amount"] = amount.String(0) + details["source_max"] = amount.String(op.SendMax) + addAssetDetails(details, op.DestAsset, "") + addAssetDetails(details, op.SendAsset, "source_") + + if operation.transaction.Result.Successful() { + result := operation.OperationResult().MustPathPaymentStrictReceiveResult() + details["source_amount"] = amount.String(result.SendAmount()) + } + + var path = make([]map[string]interface{}, len(op.Path)) + for i := range op.Path { + path[i] = make(map[string]interface{}) + addAssetDetails(path[i], op.Path[i], "") + } + details["path"] = path + + case xdr.OperationTypePathPaymentStrictSend: + op := operation.operation.Body.MustPathPaymentStrictSendOp() + addAccountAndMuxedAccountDetails(details, *source, "from") + addAccountAndMuxedAccountDetails(details, op.Destination, "to") + + details["amount"] = amount.String(0) + details["source_amount"] = amount.String(op.SendAmount) + details["destination_min"] = amount.String(op.DestMin) + addAssetDetails(details, op.DestAsset, "") + addAssetDetails(details, op.SendAsset, "source_") + + if operation.transaction.Result.Successful() { + result := operation.OperationResult().MustPathPaymentStrictSendResult() + details["amount"] = amount.String(result.DestAmount()) + } + + var path = make([]map[string]interface{}, len(op.Path)) + for i := range op.Path { + path[i] = make(map[string]interface{}) + addAssetDetails(path[i], op.Path[i], "") + } + details["path"] = path + case xdr.OperationTypeManageBuyOffer: + op := operation.operation.Body.MustManageBuyOfferOp() + details["offer_id"] = op.OfferId + details["amount"] = amount.String(op.BuyAmount) + details["price"] = op.Price.String() + details["price_r"] = map[string]interface{}{ + "n": op.Price.N, + "d": op.Price.D, + } + addAssetDetails(details, op.Buying, "buying_") + addAssetDetails(details, op.Selling, "selling_") + case xdr.OperationTypeManageSellOffer: + op := operation.operation.Body.MustManageSellOfferOp() + details["offer_id"] = op.OfferId + details["amount"] = amount.String(op.Amount) + details["price"] = op.Price.String() + details["price_r"] = map[string]interface{}{ + "n": op.Price.N, + "d": op.Price.D, + } + addAssetDetails(details, op.Buying, "buying_") + addAssetDetails(details, op.Selling, "selling_") + case xdr.OperationTypeCreatePassiveSellOffer: + op := operation.operation.Body.MustCreatePassiveSellOfferOp() + details["amount"] = amount.String(op.Amount) + details["price"] = op.Price.String() + details["price_r"] = map[string]interface{}{ + "n": op.Price.N, + "d": op.Price.D, + } + addAssetDetails(details, op.Buying, "buying_") + addAssetDetails(details, op.Selling, "selling_") + case xdr.OperationTypeSetOptions: + op := operation.operation.Body.MustSetOptionsOp() + + if op.InflationDest != nil { + details["inflation_dest"] = op.InflationDest.Address() + } + + if op.SetFlags != nil && *op.SetFlags > 0 { + addAuthFlagDetails(details, xdr.AccountFlags(*op.SetFlags), "set") + } + + if op.ClearFlags != nil && *op.ClearFlags > 0 { + addAuthFlagDetails(details, xdr.AccountFlags(*op.ClearFlags), "clear") + } + + if op.MasterWeight != nil { + details["master_key_weight"] = *op.MasterWeight + } + + if op.LowThreshold != nil { + details["low_threshold"] = *op.LowThreshold + } + + if op.MedThreshold != nil { + details["med_threshold"] = *op.MedThreshold + } + + if op.HighThreshold != nil { + details["high_threshold"] = *op.HighThreshold + } + + if op.HomeDomain != nil { + details["home_domain"] = *op.HomeDomain + } + + if op.Signer != nil { + details["signer_key"] = op.Signer.Key.Address() + details["signer_weight"] = op.Signer.Weight + } + case xdr.OperationTypeChangeTrust: + op := operation.operation.Body.MustChangeTrustOp() + if op.Line.Type == xdr.AssetTypeAssetTypePoolShare { + if err := addLiquidityPoolAssetDetails(details, *op.Line.LiquidityPool); err != nil { + return nil, err + } + } else { + addAssetDetails(details, op.Line.ToAsset(), "") + details["trustee"] = details["asset_issuer"] + } + addAccountAndMuxedAccountDetails(details, *source, "trustor") + details["limit"] = amount.String(op.Limit) + case xdr.OperationTypeAllowTrust: + op := operation.operation.Body.MustAllowTrustOp() + addAssetDetails(details, op.Asset.ToAsset(source.ToAccountId()), "") + addAccountAndMuxedAccountDetails(details, *source, "trustee") + details["trustor"] = op.Trustor.Address() + details["authorize"] = xdr.TrustLineFlags(op.Authorize).IsAuthorized() + authLiabilities := xdr.TrustLineFlags(op.Authorize).IsAuthorizedToMaintainLiabilitiesFlag() + if authLiabilities { + details["authorize_to_maintain_liabilities"] = authLiabilities + } + clawbackEnabled := xdr.TrustLineFlags(op.Authorize).IsClawbackEnabledFlag() + if clawbackEnabled { + details["clawback_enabled"] = clawbackEnabled + } + case xdr.OperationTypeAccountMerge: + addAccountAndMuxedAccountDetails(details, *source, "account") + addAccountAndMuxedAccountDetails(details, operation.operation.Body.MustDestination(), "into") + case xdr.OperationTypeInflation: + // no inflation details, presently + case xdr.OperationTypeManageData: + op := operation.operation.Body.MustManageDataOp() + details["name"] = string(op.DataName) + if op.DataValue != nil { + details["value"] = base64.StdEncoding.EncodeToString(*op.DataValue) + } else { + details["value"] = nil + } + case xdr.OperationTypeBumpSequence: + op := operation.operation.Body.MustBumpSequenceOp() + details["bump_to"] = fmt.Sprintf("%d", op.BumpTo) + case xdr.OperationTypeCreateClaimableBalance: + op := operation.operation.Body.MustCreateClaimableBalanceOp() + details["asset"] = op.Asset.StringCanonical() + details["amount"] = amount.String(op.Amount) + var claimants history.Claimants + for _, c := range op.Claimants { + cv0 := c.MustV0() + claimants = append(claimants, history.Claimant{ + Destination: cv0.Destination.Address(), + Predicate: cv0.Predicate, + }) + } + details["claimants"] = claimants + case xdr.OperationTypeClaimClaimableBalance: + op := operation.operation.Body.MustClaimClaimableBalanceOp() + balanceID, err := xdr.MarshalHex(op.BalanceId) + if err != nil { + panic(fmt.Errorf("Invalid balanceId in op: %d", operation.index)) + } + details["balance_id"] = balanceID + addAccountAndMuxedAccountDetails(details, *source, "claimant") + case xdr.OperationTypeBeginSponsoringFutureReserves: + op := operation.operation.Body.MustBeginSponsoringFutureReservesOp() + details["sponsored_id"] = op.SponsoredId.Address() + case xdr.OperationTypeEndSponsoringFutureReserves: + beginSponsorshipOp := operation.findInitatingBeginSponsoringOp() + if beginSponsorshipOp != nil { + beginSponsorshipSource := beginSponsorshipOp.SourceAccount() + addAccountAndMuxedAccountDetails(details, *beginSponsorshipSource, "begin_sponsor") + } + case xdr.OperationTypeRevokeSponsorship: + op := operation.operation.Body.MustRevokeSponsorshipOp() + switch op.Type { + case xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + if err := addLedgerKeyDetails(details, *op.LedgerKey); err != nil { + return nil, err + } + case xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner: + details["signer_account_id"] = op.Signer.AccountId.Address() + details["signer_key"] = op.Signer.SignerKey.Address() + } + case xdr.OperationTypeClawback: + op := operation.operation.Body.MustClawbackOp() + addAssetDetails(details, op.Asset, "") + addAccountAndMuxedAccountDetails(details, op.From, "from") + details["amount"] = amount.String(op.Amount) + case xdr.OperationTypeClawbackClaimableBalance: + op := operation.operation.Body.MustClawbackClaimableBalanceOp() + balanceID, err := xdr.MarshalHex(op.BalanceId) + if err != nil { + panic(fmt.Errorf("Invalid balanceId in op: %d", operation.index)) + } + details["balance_id"] = balanceID + case xdr.OperationTypeSetTrustLineFlags: + op := operation.operation.Body.MustSetTrustLineFlagsOp() + details["trustor"] = op.Trustor.Address() + addAssetDetails(details, op.Asset, "") + if op.SetFlags > 0 { + addTrustLineFlagDetails(details, xdr.TrustLineFlags(op.SetFlags), "set") + } + + if op.ClearFlags > 0 { + addTrustLineFlagDetails(details, xdr.TrustLineFlags(op.ClearFlags), "clear") + } + case xdr.OperationTypeLiquidityPoolDeposit: + op := operation.operation.Body.MustLiquidityPoolDepositOp() + details["liquidity_pool_id"] = PoolIDToString(op.LiquidityPoolId) + var ( + assetA, assetB string + depositedA, depositedB xdr.Int64 + sharesReceived xdr.Int64 + ) + if operation.transaction.Result.Successful() { + // we will use the defaults (omitted asset and 0 amounts) if the transaction failed + lp, delta, err := operation.getLiquidityPoolAndProductDelta(&op.LiquidityPoolId) + if err != nil { + return nil, err + } + params := lp.Body.ConstantProduct.Params + assetA, assetB = params.AssetA.StringCanonical(), params.AssetB.StringCanonical() + depositedA, depositedB = delta.ReserveA, delta.ReserveB + sharesReceived = delta.TotalPoolShares + } + details["reserves_max"] = []base.AssetAmount{ + {Asset: assetA, Amount: amount.String(op.MaxAmountA)}, + {Asset: assetB, Amount: amount.String(op.MaxAmountB)}, + } + details["min_price"] = op.MinPrice.String() + details["min_price_r"] = map[string]interface{}{ + "n": op.MinPrice.N, + "d": op.MinPrice.D, + } + details["max_price"] = op.MaxPrice.String() + details["max_price_r"] = map[string]interface{}{ + "n": op.MaxPrice.N, + "d": op.MaxPrice.D, + } + details["reserves_deposited"] = []base.AssetAmount{ + {Asset: assetA, Amount: amount.String(depositedA)}, + {Asset: assetB, Amount: amount.String(depositedB)}, + } + details["shares_received"] = amount.String(sharesReceived) + case xdr.OperationTypeLiquidityPoolWithdraw: + op := operation.operation.Body.MustLiquidityPoolWithdrawOp() + details["liquidity_pool_id"] = PoolIDToString(op.LiquidityPoolId) + var ( + assetA, assetB string + receivedA, receivedB xdr.Int64 + ) + if operation.transaction.Result.Successful() { + // we will use the defaults (omitted asset and 0 amounts) if the transaction failed + lp, delta, err := operation.getLiquidityPoolAndProductDelta(&op.LiquidityPoolId) + if err != nil { + return nil, err + } + params := lp.Body.ConstantProduct.Params + assetA, assetB = params.AssetA.StringCanonical(), params.AssetB.StringCanonical() + receivedA, receivedB = -delta.ReserveA, -delta.ReserveB + } + details["reserves_min"] = []base.AssetAmount{ + {Asset: assetA, Amount: amount.String(op.MinAmountA)}, + {Asset: assetB, Amount: amount.String(op.MinAmountB)}, + } + details["shares"] = amount.String(op.Amount) + details["reserves_received"] = []base.AssetAmount{ + {Asset: assetA, Amount: amount.String(receivedA)}, + {Asset: assetB, Amount: amount.String(receivedB)}, + } + + default: + panic(fmt.Errorf("Unknown operation type: %s", operation.OperationType())) + } + + sponsor, err := operation.getSponsor() + if err != nil { + return nil, err + } + if sponsor != nil { + details["sponsor"] = sponsor.Address() + } + + return details, nil +} + +func addLiquidityPoolAssetDetails(result map[string]interface{}, lpp xdr.LiquidityPoolParameters) error { + result["asset_type"] = "liquidity_pool_shares" + if lpp.Type != xdr.LiquidityPoolTypeLiquidityPoolConstantProduct { + return fmt.Errorf("unkown liquidity pool type %d", lpp.Type) + } + cp := lpp.ConstantProduct + poolID, err := xdr.NewPoolId(cp.AssetA, cp.AssetB, cp.Fee) + if err != nil { + return err + } + result["liquidity_pool_id"] = PoolIDToString(poolID) + return nil +} + +// addAssetDetails sets the details for `a` on `result` using keys with `prefix` +func addAssetDetails(result map[string]interface{}, a xdr.Asset, prefix string) error { + var ( + assetType string + code string + issuer string + ) + err := a.Extract(&assetType, &code, &issuer) + if err != nil { + err = errors.Wrap(err, "xdr.Asset.Extract error") + return err + } + result[prefix+"asset_type"] = assetType + + if a.Type == xdr.AssetTypeAssetTypeNative { + return nil + } + + result[prefix+"asset_code"] = code + result[prefix+"asset_issuer"] = issuer + return nil +} + +// addAuthFlagDetails adds the account flag details for `f` on `result`. +func addAuthFlagDetails(result map[string]interface{}, f xdr.AccountFlags, prefix string) { + var ( + n []int32 + s []string + ) + + if f.IsAuthRequired() { + n = append(n, int32(xdr.AccountFlagsAuthRequiredFlag)) + s = append(s, "auth_required") + } + + if f.IsAuthRevocable() { + n = append(n, int32(xdr.AccountFlagsAuthRevocableFlag)) + s = append(s, "auth_revocable") + } + + if f.IsAuthImmutable() { + n = append(n, int32(xdr.AccountFlagsAuthImmutableFlag)) + s = append(s, "auth_immutable") + } + + if f.IsAuthClawbackEnabled() { + n = append(n, int32(xdr.AccountFlagsAuthClawbackEnabledFlag)) + s = append(s, "auth_clawback_enabled") + } + + result[prefix+"_flags"] = n + result[prefix+"_flags_s"] = s +} + +// addTrustLineFlagDetails adds the trustline flag details for `f` on `result`. +func addTrustLineFlagDetails(result map[string]interface{}, f xdr.TrustLineFlags, prefix string) { + var ( + n []int32 + s []string + ) + + if f.IsAuthorized() { + n = append(n, int32(xdr.TrustLineFlagsAuthorizedFlag)) + s = append(s, "authorized") + } + + if f.IsAuthorizedToMaintainLiabilitiesFlag() { + n = append(n, int32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag)) + s = append(s, "authorized_to_maintain_liabilites") + } + + if f.IsClawbackEnabledFlag() { + n = append(n, int32(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag)) + s = append(s, "clawback_enabled") + } + + result[prefix+"_flags"] = n + result[prefix+"_flags_s"] = s +} + +func addLedgerKeyDetails(result map[string]interface{}, ledgerKey xdr.LedgerKey) error { + switch ledgerKey.Type { + case xdr.LedgerEntryTypeAccount: + result["account_id"] = ledgerKey.Account.AccountId.Address() + case xdr.LedgerEntryTypeClaimableBalance: + marshalHex, err := xdr.MarshalHex(ledgerKey.ClaimableBalance.BalanceId) + if err != nil { + return errors.Wrapf(err, "in claimable balance") + } + result["claimable_balance_id"] = marshalHex + case xdr.LedgerEntryTypeData: + result["data_account_id"] = ledgerKey.Data.AccountId.Address() + result["data_name"] = ledgerKey.Data.DataName + case xdr.LedgerEntryTypeOffer: + result["offer_id"] = fmt.Sprintf("%d", ledgerKey.Offer.OfferId) + case xdr.LedgerEntryTypeTrustline: + result["trustline_account_id"] = ledgerKey.TrustLine.AccountId.Address() + if ledgerKey.TrustLine.Asset.Type == xdr.AssetTypeAssetTypePoolShare { + result["trustline_liquidity_pool_id"] = PoolIDToString(*ledgerKey.TrustLine.Asset.LiquidityPoolId) + } else { + result["trustline_asset"] = ledgerKey.TrustLine.Asset.ToAsset().StringCanonical() + } + case xdr.LedgerEntryTypeLiquidityPool: + result["liquidity_pool_id"] = PoolIDToString(ledgerKey.LiquidityPool.LiquidityPoolId) + } + return nil +} + +func getLedgerKeyParticipants(ledgerKey xdr.LedgerKey) []xdr.AccountId { + var result []xdr.AccountId + switch ledgerKey.Type { + case xdr.LedgerEntryTypeAccount: + result = append(result, ledgerKey.Account.AccountId) + case xdr.LedgerEntryTypeClaimableBalance: + // nothing to do + case xdr.LedgerEntryTypeData: + result = append(result, ledgerKey.Data.AccountId) + case xdr.LedgerEntryTypeOffer: + result = append(result, ledgerKey.Offer.SellerId) + case xdr.LedgerEntryTypeTrustline: + result = append(result, ledgerKey.TrustLine.AccountId) + } + return result +} + +// Participants returns the accounts taking part in the operation. +func (operation *transactionOperationWrapper) Participants() ([]xdr.AccountId, error) { + participants := []xdr.AccountId{} + participants = append(participants, operation.SourceAccount().ToAccountId()) + op := operation.operation + + switch operation.OperationType() { + case xdr.OperationTypeCreateAccount: + participants = append(participants, op.Body.MustCreateAccountOp().Destination) + case xdr.OperationTypePayment: + participants = append(participants, op.Body.MustPaymentOp().Destination.ToAccountId()) + case xdr.OperationTypePathPaymentStrictReceive: + participants = append(participants, op.Body.MustPathPaymentStrictReceiveOp().Destination.ToAccountId()) + case xdr.OperationTypePathPaymentStrictSend: + participants = append(participants, op.Body.MustPathPaymentStrictSendOp().Destination.ToAccountId()) + case xdr.OperationTypeManageBuyOffer: + // the only direct participant is the source_account + case xdr.OperationTypeManageSellOffer: + // the only direct participant is the source_account + case xdr.OperationTypeCreatePassiveSellOffer: + // the only direct participant is the source_account + case xdr.OperationTypeSetOptions: + // the only direct participant is the source_account + case xdr.OperationTypeChangeTrust: + // the only direct participant is the source_account + case xdr.OperationTypeAllowTrust: + participants = append(participants, op.Body.MustAllowTrustOp().Trustor) + case xdr.OperationTypeAccountMerge: + participants = append(participants, op.Body.MustDestination().ToAccountId()) + case xdr.OperationTypeInflation: + // the only direct participant is the source_account + case xdr.OperationTypeManageData: + // the only direct participant is the source_account + case xdr.OperationTypeBumpSequence: + // the only direct participant is the source_account + case xdr.OperationTypeCreateClaimableBalance: + for _, c := range op.Body.MustCreateClaimableBalanceOp().Claimants { + participants = append(participants, c.MustV0().Destination) + } + case xdr.OperationTypeClaimClaimableBalance: + // the only direct participant is the source_account + case xdr.OperationTypeBeginSponsoringFutureReserves: + participants = append(participants, op.Body.MustBeginSponsoringFutureReservesOp().SponsoredId) + case xdr.OperationTypeEndSponsoringFutureReserves: + beginSponsorshipOp := operation.findInitatingBeginSponsoringOp() + if beginSponsorshipOp != nil { + participants = append(participants, beginSponsorshipOp.SourceAccount().ToAccountId()) + } + case xdr.OperationTypeRevokeSponsorship: + op := operation.operation.Body.MustRevokeSponsorshipOp() + switch op.Type { + case xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + participants = append(participants, getLedgerKeyParticipants(*op.LedgerKey)...) + case xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner: + participants = append(participants, op.Signer.AccountId) + // We don't add signer as a participant because a signer can be arbitrary account. + // This can spam successful operations history of any account. + } + case xdr.OperationTypeClawback: + op := operation.operation.Body.MustClawbackOp() + participants = append(participants, op.From.ToAccountId()) + case xdr.OperationTypeClawbackClaimableBalance: + // the only direct participant is the source_account + case xdr.OperationTypeSetTrustLineFlags: + op := operation.operation.Body.MustSetTrustLineFlagsOp() + participants = append(participants, op.Trustor) + case xdr.OperationTypeLiquidityPoolDeposit: + // the only direct participant is the source_account + case xdr.OperationTypeLiquidityPoolWithdraw: + // the only direct participant is the source_account + default: + return participants, fmt.Errorf("Unknown operation type: %s", op.Body.Type) + } + + sponsor, err := operation.getSponsor() + if err != nil { + return nil, err + } + if sponsor != nil { + participants = append(participants, *sponsor) + } + + return dedupeParticipants(participants), nil +} + +// dedupeParticipants remove any duplicate ids from `in` +func dedupeParticipants(in []xdr.AccountId) (out []xdr.AccountId) { + set := map[string]xdr.AccountId{} + for _, id := range in { + set[id.Address()] = id + } + + for _, id := range set { + out = append(out, id) + } + return +} + +// OperationsParticipants returns a map with all participants per operation +func operationsParticipants(transaction ingest.LedgerTransaction, sequence uint32) (map[int64][]xdr.AccountId, error) { + participants := map[int64][]xdr.AccountId{} + + for opi, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(opi), + transaction: transaction, + operation: op, + ledgerSequence: sequence, + } + + p, err := operation.Participants() + if err != nil { + return participants, errors.Wrapf(err, "reading operation %v participants", operation.ID()) + } + participants[operation.ID()] = p + } + + return participants, nil +} diff --git a/services/horizon/internal/ingest/processors/operations_processor_test.go b/services/horizon/internal/ingest/processors/operations_processor_test.go new file mode 100644 index 0000000000..79b94b1f7f --- /dev/null +++ b/services/horizon/internal/ingest/processors/operations_processor_test.go @@ -0,0 +1,159 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type OperationsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *OperationProcessor + mockQ *history.MockQOperations + mockBatchInsertBuilder *history.MockOperationsBatchInsertBuilder +} + +func TestOperationProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(OperationsProcessorTestSuiteLedger)) +} + +func (s *OperationsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQOperations{} + s.mockBatchInsertBuilder = &history.MockOperationsBatchInsertBuilder{} + s.mockQ. + On("NewOperationBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.processor = NewOperationProcessor( + s.mockQ, + 56, + ) +} + +func (s *OperationsProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *OperationsProcessorTestSuiteLedger) mockBatchInsertAdds(txs []ingest.LedgerTransaction, sequence uint32) error { + for _, t := range txs { + for i, op := range t.Envelope.Operations() { + expected := transactionOperationWrapper{ + index: uint32(i), + transaction: t, + operation: op, + ledgerSequence: sequence, + } + details, err := expected.Details() + if err != nil { + return err + } + detailsJSON, err := json.Marshal(details) + if err != nil { + return err + } + + source := expected.SourceAccount() + acID := source.ToAccountId() + var muxedAccount null.String + if source.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + muxedAccount = null.StringFrom(source.Address()) + } + s.mockBatchInsertBuilder.On( + "Add", + s.ctx, + expected.ID(), + expected.TransactionID(), + expected.Order(), + expected.OperationType(), + detailsJSON, + acID.Address(), + muxedAccount, + ).Return(nil).Once() + } + } + + return nil +} + +func (s *OperationsProcessorTestSuiteLedger) TestAddOperationSucceeds() { + unmuxed := xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + muxed := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xdeadbeefdeadbeef, + Ed25519: *unmuxed.Ed25519, + }, + } + firstTx := createTransaction(true, 1) + firstTx.Index = 1 + firstTx.Envelope.Operations()[0].Body = xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: muxed, + Asset: xdr.Asset{Type: xdr.AssetTypeAssetTypeNative}, + Amount: 100, + }, + } + firstTx.Envelope.V1.Tx.SourceAccount = muxed + secondTx := createTransaction(false, 3) + thirdTx := createTransaction(true, 4) + + txs := []ingest.LedgerTransaction{ + firstTx, + secondTx, + thirdTx, + } + + var err error + + err = s.mockBatchInsertAdds(txs, uint32(56)) + s.Assert().NoError(err) + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) + + for _, tx := range txs { + err = s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } +} + +func (s *OperationsProcessorTestSuiteLedger) TestAddOperationFails() { + tx := createTransaction(true, 1) + + s.mockBatchInsertBuilder. + On( + "Add", s.ctx, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + ).Return(errors.New("transient error")).Once() + + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error batch inserting operation rows: transient error") +} + +func (s *OperationsProcessorTestSuiteLedger) TestExecFails() { + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(errors.New("transient error")).Once() + err := s.processor.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "transient error") +} diff --git a/services/horizon/internal/ingest/processors/participants_processor.go b/services/horizon/internal/ingest/processors/participants_processor.go new file mode 100644 index 0000000000..50f4187132 --- /dev/null +++ b/services/horizon/internal/ingest/processors/participants_processor.go @@ -0,0 +1,294 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ParticipantsProcessor is a processor which ingests various participants +// from different sources (transactions, operations, etc) +type ParticipantsProcessor struct { + participantsQ history.QParticipants + sequence uint32 + participantSet map[string]participant +} + +func NewParticipantsProcessor(participantsQ history.QParticipants, sequence uint32) *ParticipantsProcessor { + return &ParticipantsProcessor{ + participantsQ: participantsQ, + sequence: sequence, + participantSet: map[string]participant{}, + } +} + +type participant struct { + accountID int64 + transactionSet map[int64]struct{} + operationSet map[int64]struct{} +} + +func (p *participant) addTransactionID(id int64) { + if p.transactionSet == nil { + p.transactionSet = map[int64]struct{}{} + } + p.transactionSet[id] = struct{}{} +} + +func (p *participant) addOperationID(id int64) { + if p.operationSet == nil { + p.operationSet = map[int64]struct{}{} + } + p.operationSet[id] = struct{}{} +} + +func (p *ParticipantsProcessor) loadAccountIDs(ctx context.Context, participantSet map[string]participant) error { + addresses := make([]string, 0, len(participantSet)) + for address := range participantSet { + addresses = append(addresses, address) + } + + addressToID, err := p.participantsQ.CreateAccounts(ctx, addresses, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Could not create account ids") + } + + for _, address := range addresses { + id, ok := addressToID[address] + if !ok { + return errors.Errorf("no id found for account address %s", address) + } + + participantForAddress := participantSet[address] + participantForAddress.accountID = id + participantSet[address] = participantForAddress + } + + return nil +} + +func participantsForChanges( + changes xdr.LedgerEntryChanges, +) ([]xdr.AccountId, error) { + var participants []xdr.AccountId + + for _, c := range changes { + var participant *xdr.AccountId + + switch c.Type { + case xdr.LedgerEntryChangeTypeLedgerEntryCreated: + participant = participantsForLedgerEntry(c.MustCreated()) + case xdr.LedgerEntryChangeTypeLedgerEntryRemoved: + participant = participantsForLedgerKey(c.MustRemoved()) + case xdr.LedgerEntryChangeTypeLedgerEntryUpdated: + participant = participantsForLedgerEntry(c.MustUpdated()) + case xdr.LedgerEntryChangeTypeLedgerEntryState: + participant = participantsForLedgerEntry(c.MustState()) + default: + return nil, errors.Errorf("Unknown change type: %s", c.Type) + } + + if participant != nil { + participants = append(participants, *participant) + } + } + + return participants, nil +} + +func participantsForLedgerEntry(le xdr.LedgerEntry) *xdr.AccountId { + if le.Data.Type != xdr.LedgerEntryTypeAccount { + return nil + } + aid := le.Data.MustAccount().AccountId + return &aid +} + +func participantsForLedgerKey(lk xdr.LedgerKey) *xdr.AccountId { + if lk.Type != xdr.LedgerEntryTypeAccount { + return nil + } + aid := lk.MustAccount().AccountId + return &aid +} + +func participantsForMeta( + meta xdr.TransactionMeta, +) ([]xdr.AccountId, error) { + var participants []xdr.AccountId + if meta.Operations == nil { + return participants, nil + } + + for _, op := range *meta.Operations { + var accounts []xdr.AccountId + accounts, err := participantsForChanges(op.Changes) + if err != nil { + return nil, err + } + + participants = append(participants, accounts...) + } + + return participants, nil +} + +func participantsForTransaction( + sequence uint32, + transaction ingest.LedgerTransaction, +) ([]xdr.AccountId, error) { + participants := []xdr.AccountId{ + transaction.Envelope.SourceAccount().ToAccountId(), + } + if transaction.Envelope.IsFeeBump() { + participants = append(participants, transaction.Envelope.FeeBumpAccount().ToAccountId()) + } + + p, err := participantsForMeta(transaction.UnsafeMeta) + if err != nil { + return nil, err + } + participants = append(participants, p...) + + p, err = participantsForChanges(transaction.FeeChanges) + if err != nil { + return nil, err + } + participants = append(participants, p...) + + for opi, op := range transaction.Envelope.Operations() { + operation := transactionOperationWrapper{ + index: uint32(opi), + transaction: transaction, + operation: op, + ledgerSequence: sequence, + } + + p, err := operation.Participants() + if err != nil { + return nil, errors.Wrapf( + err, "could not determine operation %v participants", operation.ID(), + ) + } + participants = append(participants, p...) + } + + return dedupeParticipants(participants), nil +} + +func (p *ParticipantsProcessor) addTransactionParticipants( + participantSet map[string]participant, + sequence uint32, + transaction ingest.LedgerTransaction, +) error { + transactionID := toid.New(int32(sequence), int32(transaction.Index), 0).ToInt64() + transactionParticipants, err := participantsForTransaction( + sequence, + transaction, + ) + if err != nil { + return errors.Wrap(err, "Could not determine participants for transaction") + } + + for _, participant := range transactionParticipants { + address := participant.Address() + entry := participantSet[address] + entry.addTransactionID(transactionID) + participantSet[address] = entry + } + + return nil +} + +func (p *ParticipantsProcessor) addOperationsParticipants( + participantSet map[string]participant, + sequence uint32, + transaction ingest.LedgerTransaction, +) error { + participants, err := operationsParticipants(transaction, sequence) + if err != nil { + return errors.Wrap(err, "could not determine operation participants") + } + + for operationID, p := range participants { + for _, participant := range p { + address := participant.Address() + entry := participantSet[address] + entry.addOperationID(operationID) + participantSet[address] = entry + } + } + + return nil +} + +func (p *ParticipantsProcessor) insertDBTransactionParticipants(ctx context.Context, participantSet map[string]participant) error { + batch := p.participantsQ.NewTransactionParticipantsBatchInsertBuilder(maxBatchSize) + + for _, entry := range participantSet { + for transactionID := range entry.transactionSet { + if err := batch.Add(ctx, transactionID, entry.accountID); err != nil { + return errors.Wrap(err, "Could not insert transaction participant in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "Could not flush transaction participants to db") + } + return nil +} + +func (p *ParticipantsProcessor) insertDBOperationsParticipants(ctx context.Context, participantSet map[string]participant) error { + batch := p.participantsQ.NewOperationParticipantBatchInsertBuilder(maxBatchSize) + + for _, entry := range participantSet { + for operationID := range entry.operationSet { + if err := batch.Add(ctx, operationID, entry.accountID); err != nil { + return errors.Wrap(err, "could not insert operation participant in db") + } + } + } + + if err := batch.Exec(ctx); err != nil { + return errors.Wrap(err, "could not flush operation participants to db") + } + return nil +} + +func (p *ParticipantsProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) (err error) { + err = p.addTransactionParticipants(p.participantSet, p.sequence, transaction) + if err != nil { + return err + } + + err = p.addOperationsParticipants(p.participantSet, p.sequence, transaction) + if err != nil { + return err + } + + return nil +} + +func (p *ParticipantsProcessor) Commit(ctx context.Context) (err error) { + if len(p.participantSet) > 0 { + if err = p.loadAccountIDs(ctx, p.participantSet); err != nil { + return err + } + + if err = p.insertDBTransactionParticipants(ctx, p.participantSet); err != nil { + return err + } + + if err = p.insertDBOperationsParticipants(ctx, p.participantSet); err != nil { + return err + } + } + + return err +} diff --git a/services/horizon/internal/ingest/processors/participants_processor_test.go b/services/horizon/internal/ingest/processors/participants_processor_test.go new file mode 100644 index 0000000000..cf6fcc5f6d --- /dev/null +++ b/services/horizon/internal/ingest/processors/participants_processor_test.go @@ -0,0 +1,363 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type ParticipantsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *ParticipantsProcessor + mockQ *history.MockQParticipants + mockBatchInsertBuilder *history.MockTransactionParticipantsBatchInsertBuilder + mockOperationsBatchInsertBuilder *history.MockOperationParticipantBatchInsertBuilder + + firstTx ingest.LedgerTransaction + secondTx ingest.LedgerTransaction + thirdTx ingest.LedgerTransaction + firstTxID int64 + secondTxID int64 + thirdTxID int64 + addresses []string + addressToID map[string]int64 + txs []ingest.LedgerTransaction +} + +func TestParticipantsProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(ParticipantsProcessorTestSuiteLedger)) +} + +func (s *ParticipantsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQParticipants{} + s.mockBatchInsertBuilder = &history.MockTransactionParticipantsBatchInsertBuilder{} + s.mockOperationsBatchInsertBuilder = &history.MockOperationParticipantBatchInsertBuilder{} + sequence := uint32(20) + + s.addresses = []string{ + "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK", + "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + } + + s.firstTx = createTransaction(true, 1) + s.firstTx.Index = 1 + aid := xdr.MustAddress(s.addresses[0]) + s.firstTx.Envelope.V1.Tx.SourceAccount = aid.ToMuxedAccount() + s.firstTxID = toid.New(int32(sequence), 1, 0).ToInt64() + + s.secondTx = createTransaction(true, 1) + s.secondTx.Index = 2 + s.secondTx.Envelope.Operations()[0].Body = xdr.OperationBody{ + Type: xdr.OperationTypeCreateAccount, + CreateAccountOp: &xdr.CreateAccountOp{ + Destination: xdr.MustAddress(s.addresses[1]), + }, + } + aid = xdr.MustAddress(s.addresses[2]) + s.secondTx.Envelope.V1.Tx.SourceAccount = aid.ToMuxedAccount() + s.secondTxID = toid.New(int32(sequence), 2, 0).ToInt64() + + s.thirdTx = createTransaction(true, 1) + s.thirdTx.Index = 3 + aid = xdr.MustAddress(s.addresses[0]) + s.thirdTx.Envelope.V1.Tx.SourceAccount = aid.ToMuxedAccount() + s.thirdTxID = toid.New(int32(sequence), 3, 0).ToInt64() + + s.addressToID = map[string]int64{ + s.addresses[0]: 2, + s.addresses[1]: 20, + s.addresses[2]: 200, + } + + s.processor = NewParticipantsProcessor( + s.mockQ, + sequence, + ) + + s.txs = []ingest.LedgerTransaction{ + s.firstTx, + s.secondTx, + s.thirdTx, + } +} + +func (s *ParticipantsProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockBatchInsertBuilder.AssertExpectations(s.T()) + s.mockOperationsBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *ParticipantsProcessorTestSuiteLedger) mockSuccessfulTransactionBatchAdds() { + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.firstTxID, s.addressToID[s.addresses[0]], + ).Return(nil).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID, s.addressToID[s.addresses[1]], + ).Return(nil).Once() + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID, s.addressToID[s.addresses[2]], + ).Return(nil).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.thirdTxID, s.addressToID[s.addresses[0]], + ).Return(nil).Once() +} + +func (s *ParticipantsProcessorTestSuiteLedger) mockSuccessfulOperationBatchAdds() { + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.firstTxID+1, s.addressToID[s.addresses[0]], + ).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID+1, s.addressToID[s.addresses[1]], + ).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID+1, s.addressToID[s.addresses[2]], + ).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.thirdTxID+1, s.addressToID[s.addresses[0]], + ).Return(nil).Once() +} +func (s *ParticipantsProcessorTestSuiteLedger) TestEmptyParticipants() { + err := s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestFeeBumptransaction() { + feeBumpTx := createTransaction(true, 0) + feeBumpTx.Index = 1 + aid := xdr.MustAddress(s.addresses[0]) + feeBumpTx.Envelope.V1.Tx.SourceAccount = aid.ToMuxedAccount() + aid = xdr.MustAddress(s.addresses[1]) + feeBumpTx.Envelope.FeeBump = &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: aid.ToMuxedAccount(), + Fee: 100, + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: feeBumpTx.Envelope.V1, + }, + }, + } + feeBumpTx.Envelope.V1 = nil + feeBumpTx.Envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTxFeeBump + feeBumpTx.Result.Result.Result.Code = xdr.TransactionResultCodeTxFeeBumpInnerSuccess + feeBumpTx.Result.Result.Result.InnerResultPair = &xdr.InnerTransactionResultPair{ + Result: xdr.InnerTransactionResult{ + Result: xdr.InnerTransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &[]xdr.OperationResult{}, + }, + }, + } + feeBumpTx.Result.Result.Result.Results = nil + feeBumpTxID := toid.New(20, 1, 0).ToInt64() + + addresses := s.addresses[:2] + addressToID := map[string]int64{ + addresses[0]: s.addressToID[addresses[0]], + addresses[1]: s.addressToID[addresses[1]], + } + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + addresses, + arg, + ) + }).Return(addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + s.mockQ.On("NewOperationParticipantBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationsBatchInsertBuilder).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, feeBumpTxID, addressToID[addresses[0]], + ).Return(nil).Once() + s.mockBatchInsertBuilder.On( + "Add", s.ctx, feeBumpTxID, addressToID[addresses[1]], + ).Return(nil).Once() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + s.Assert().NoError(s.processor.ProcessTransaction(s.ctx, feeBumpTx)) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestIngestParticipantsSucceeds() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + s.addresses, + arg, + ) + }).Return(s.addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + s.mockQ.On("NewOperationParticipantBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationsBatchInsertBuilder).Once() + + s.mockSuccessfulTransactionBatchAdds() + s.mockSuccessfulOperationBatchAdds() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().NoError(err) +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestCreateAccountsFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Return(s.addressToID, errors.New("transient error")).Once() + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "Could not create account ids: transient error") +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestBatchAddFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + s.addresses, + arg, + ) + }).Return(s.addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.firstTxID, s.addressToID[s.addresses[0]], + ).Return(errors.New("transient error")).Once() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID, s.addressToID[s.addresses[1]], + ).Return(nil).Maybe() + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID, s.addressToID[s.addresses[2]], + ).Return(nil).Maybe() + + s.mockBatchInsertBuilder.On( + "Add", s.ctx, s.thirdTxID, s.addressToID[s.addresses[0]], + ).Return(nil).Maybe() + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "Could not insert transaction participant in db: transient error") +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestOperationParticipantsBatchAddFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + s.addresses, + arg, + ) + }).Return(s.addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + s.mockQ.On("NewOperationParticipantBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationsBatchInsertBuilder).Once() + + s.mockSuccessfulTransactionBatchAdds() + + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.firstTxID+1, s.addressToID[s.addresses[0]], + ).Return(errors.New("transient error")).Once() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID+1, s.addressToID[s.addresses[1]], + ).Return(nil).Maybe() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.secondTxID+1, s.addressToID[s.addresses[2]], + ).Return(nil).Maybe() + s.mockOperationsBatchInsertBuilder.On( + "Add", s.ctx, s.thirdTxID+1, s.addressToID[s.addresses[0]], + ).Return(nil).Maybe() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "could not insert operation participant in db: transient error") +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestBatchAddExecFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + s.addresses, + arg, + ) + }).Return(s.addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.mockSuccessfulTransactionBatchAdds() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(errors.New("transient error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "Could not flush transaction participants to db: transient error") +} + +func (s *ParticipantsProcessorTestSuiteLedger) TestOpeartionBatchAddExecFails() { + s.mockQ.On("CreateAccounts", s.ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + s.addresses, + arg, + ) + }).Return(s.addressToID, nil).Once() + s.mockQ.On("NewTransactionParticipantsBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + s.mockQ.On("NewOperationParticipantBatchInsertBuilder", maxBatchSize). + Return(s.mockOperationsBatchInsertBuilder).Once() + + s.mockSuccessfulTransactionBatchAdds() + s.mockSuccessfulOperationBatchAdds() + + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.mockOperationsBatchInsertBuilder.On("Exec", s.ctx).Return(errors.New("transient error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(s.ctx, tx) + s.Assert().NoError(err) + } + err := s.processor.Commit(s.ctx) + s.Assert().EqualError(err, "could not flush operation participants to db: transient error") +} diff --git a/services/horizon/internal/ingest/processors/participants_test.go b/services/horizon/internal/ingest/processors/participants_test.go new file mode 100644 index 0000000000..73684c0555 --- /dev/null +++ b/services/horizon/internal/ingest/processors/participants_test.go @@ -0,0 +1,66 @@ +package processors + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" +) + +func TestParticipantsForTransaction(t *testing.T) { + var envelope xdr.TransactionEnvelope + var meta xdr.TransactionMeta + var feeChanges xdr.LedgerEntryChanges + assert.NoError( + t, + xdr.SafeUnmarshalBase64( + "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAQAAAAAAAABkAAAAAF4L0vAAAAAAAAAAAQAAAAAAAAAAAAAAAC6N7oJcJiUzTWRDL98Bj3fVrJUB19wFvCzEHh8nn/IOAAAAAlQL5AAAAAAAAAAAAVb8BfcAAABA8CyjzEXXVTMwnZTAbHfJeq2HCFzAWkU98ds2ZXFqjXR4EiN0YDSAb/pJwXc0TjMa//SiX83UvUFSqLa8hOXICQ==", + &envelope, + ), + ) + assert.NoError( + t, + xdr.SafeUnmarshalBase64( + "AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAAAAAuje6CXCYlM01kQy/fAY931ayVAdfcBbwsxB4fJ5/yDgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + &meta, + ), + ) + assert.NoError( + t, + xdr.SafeUnmarshalBase64( + "AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + &feeChanges, + ), + ) + + particpants, err := participantsForTransaction( + 3, + ingest.LedgerTransaction{ + Index: 1, + Envelope: envelope, + FeeChanges: feeChanges, + UnsafeMeta: meta, + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + }, + }, + }, + }, + ) + assert.NoError(t, err) + assert.Len(t, particpants, 2) + assert.Contains( + t, + particpants, + xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + ) + assert.Contains( + t, + particpants, + xdr.MustAddress("GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK"), + ) +} diff --git a/services/horizon/internal/ingest/processors/signer_processor_test.go b/services/horizon/internal/ingest/processors/signer_processor_test.go new file mode 100644 index 0000000000..de5c70bddc --- /dev/null +++ b/services/horizon/internal/ingest/processors/signer_processor_test.go @@ -0,0 +1,676 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/guregu/null" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/suite" +) + +func TestAccountsSignerProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(AccountsSignerProcessorTestSuiteState)) +} + +type AccountsSignerProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *SignersProcessor + mockQ *history.MockQSigners + mockBatchInsertBuilder *history.MockAccountSignersBatchInsertBuilder +} + +func (s *AccountsSignerProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQSigners{} + s.mockBatchInsertBuilder = &history.MockAccountSignersBatchInsertBuilder{} + + s.mockQ. + On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.processor = NewSignersProcessor(s.mockQ, false) +} + +func (s *AccountsSignerProcessorTestSuiteState) TearDownTest() { + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) + + s.mockQ.AssertExpectations(s.T()) + s.mockBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *AccountsSignerProcessorTestSuiteState) TestNoEntries() { + // Nothing processed, assertions in TearDownTest. +} + +func (s *AccountsSignerProcessorTestSuiteState) TestCreatesSigners() { + s.mockBatchInsertBuilder. + On("Add", s.ctx, history.AccountSigner{ + Account: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Signer: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Weight: int32(1), + }).Return(nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + + s.mockBatchInsertBuilder. + On("Add", s.ctx, history.AccountSigner{ + Account: "GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX", + Signer: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Weight: int32(10), + }).Return(nil).Once() + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Weight: 10, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + +} + +func (s *AccountsSignerProcessorTestSuiteState) TestCreatesSignerWithSponsor() { + s.mockBatchInsertBuilder. + On("Add", s.ctx, history.AccountSigner{ + Account: "GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX", + Signer: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Weight: int32(10), + Sponsor: null.StringFrom("GDWZ6MKJP5ESVIB7O5RW4UFFGSCDILPEKDXWGG4HXXSHEZZPTKLR6UVG"), + }).Return(nil).Once() + + sponsorshipDescriptor := xdr.MustAddress("GDWZ6MKJP5ESVIB7O5RW4UFFGSCDILPEKDXWGG4HXXSHEZZPTKLR6UVG") + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GCCCU34WDY2RATQTOOQKY6SZWU6J5DONY42SWGW2CIXGW4LICAGNRZKX"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Weight: 10, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Ext: xdr.AccountEntryExtensionV1Ext{ + V: 2, + V2: &xdr.AccountEntryExtensionV2{ + NumSponsored: 1, + NumSponsoring: 0, + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + &sponsorshipDescriptor, + }, + }, + }, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) +} + +func TestAccountsSignerProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(AccountsSignerProcessorTestSuiteLedger)) +} + +type AccountsSignerProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *SignersProcessor + mockQ *history.MockQSigners +} + +func (s *AccountsSignerProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQSigners{} + s.mockQ. + On("NewAccountSignersBatchInsertBuilder", maxBatchSize). + Return(&history.MockAccountSignersBatchInsertBuilder{}).Once() + + s.processor = NewSignersProcessor(s.mockQ, true) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestNoTransactions() { + // Nothing processed, assertions in TearDownTest. + s.Assert().NoError(s.processor.Commit(context.Background())) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestNewAccount() { + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + int32(1), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestNoUpdatesWhenNoSignerChanges() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestNewSigner() { + // Remove old signer + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + ). + Return(int64(1), nil).Once() + + // Create new and old signer + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + int32(10), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + int32(15), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestSignerRemoved() { + // Remove old signers + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + ). + Return(int64(1), nil).Once() + + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + ). + Return(int64(1), nil).Once() + + // Create new signer + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + int32(15), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +// TestSignerPreAuthTxRemovedTxFailed tests if removing preauthorized transaction +// signer works even when tx failed. +func (s *AccountsSignerProcessorTestSuiteLedger) TestSignerPreAuthTxRemovedTxFailed() { + // Remove old signers + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + ). + Return(int64(1), nil).Once() + + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + ). + Return(int64(1), nil).Once() + + // Create new signer + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + int32(10), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7"), + Weight: 15, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestRemoveAccount() { + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestNewAccountNoRowsAffected() { + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + int32(1), + (*string)(nil), + ). + Return(int64(0), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().IsType(ingest.StateError{}, errors.Cause(err)) + s.Assert().EqualError( + err, + "0 rows affected when inserting "+ + "account=GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML "+ + "signer=GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML to database", + ) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestRemoveAccountNoRowsAffected() { + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + ). + Return(int64(0), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + err = s.processor.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().IsType(ingest.StateError{}, errors.Cause(err)) + s.Assert().EqualError( + err, + "Expected "+ + "account=GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML "+ + "signer=GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML in database but not found when removing "+ + "(rows affected = 0)", + ) +} + +func (s *AccountsSignerProcessorTestSuiteLedger) TestProcessUpgradeChange() { + // Remove old signer + s.mockQ. + On( + "RemoveAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + ). + Return(int64(1), nil).Once() + + // Create new and old (updated) signer + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV", + int32(12), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + s.mockQ. + On( + "CreateAccountSigner", + s.ctx, + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + int32(15), + (*string)(nil), + ). + Return(int64(1), nil).Once() + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1000, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 10, + }, + }, + }, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 1001, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner("GCBBDQLCTNASZJ3MTKAOYEOWRGSHDFAJVI7VPZUOP7KXNHYR3HP2BUKV"), + Weight: 12, + }, + { + Key: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + Weight: 15, + }, + }, + }, + }, + }, + }) + s.Assert().NoError(err) + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func createTransactionMeta(opMeta []xdr.OperationMeta) xdr.TransactionMeta { + return xdr.TransactionMeta{ + V: 1, + V1: &xdr.TransactionMetaV1{ + Operations: opMeta, + }, + } +} diff --git a/services/horizon/internal/ingest/processors/signers_processor.go b/services/horizon/internal/ingest/processors/signers_processor.go new file mode 100644 index 0000000000..77fe54ebc2 --- /dev/null +++ b/services/horizon/internal/ingest/processors/signers_processor.go @@ -0,0 +1,157 @@ +package processors + +import ( + "context" + + "github.com/guregu/null" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type SignersProcessor struct { + signersQ history.QSigners + + cache *ingest.ChangeCompactor + batch history.AccountSignersBatchInsertBuilder + // insertOnlyMode is a mode in which we don't use ledger cache and we just + // add signers to a batch, then we Exec all signers in one insert query. + // This is done to make history buckets processing faster (batch inserting). + useLedgerEntryCache bool +} + +func NewSignersProcessor( + signersQ history.QSigners, useLedgerEntryCache bool, +) *SignersProcessor { + p := &SignersProcessor{signersQ: signersQ, useLedgerEntryCache: useLedgerEntryCache} + p.reset() + return p +} + +func (p *SignersProcessor) reset() { + p.batch = p.signersQ.NewAccountSignersBatchInsertBuilder(maxBatchSize) + p.cache = ingest.NewChangeCompactor() +} + +func (p *SignersProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeAccount { + return nil + } + + if p.useLedgerEntryCache { + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil + } + + if !(change.Pre == nil && change.Post != nil) { + return errors.New("AssetStatsProSignersProcessorcessor is in insert only mode") + } + + accountEntry := change.Post.Data.MustAccount() + account := accountEntry.AccountId.Address() + + sponsors := accountEntry.SponsorPerSigner() + for signer, weight := range accountEntry.SignerSummary() { + var sponsor null.String + if sponsorDesc, isSponsored := sponsors[signer]; isSponsored { + sponsor = null.StringFrom(sponsorDesc.Address()) + } + + err := p.batch.Add(ctx, history.AccountSigner{ + Account: account, + Signer: signer, + Weight: weight, + Sponsor: sponsor, + }) + if err != nil { + return errors.Wrap(err, "Error adding row to accountSignerBatch") + } + } + + return nil +} + +func (p *SignersProcessor) Commit(ctx context.Context) error { + if !p.useLedgerEntryCache { + return p.batch.Exec(ctx) + } + + changes := p.cache.GetChanges() + for _, change := range changes { + if !change.AccountSignersChanged() { + continue + } + + // The code below removes all Pre signers adds Post signers but + // can be improved by finding a diff (check performance first). + if change.Pre != nil { + preAccountEntry := change.Pre.Data.MustAccount() + for signer := range preAccountEntry.SignerSummary() { + rowsAffected, err := p.signersQ.RemoveAccountSigner(ctx, preAccountEntry.AccountId.Address(), signer) + if err != nil { + return errors.Wrap(err, "Error removing a signer") + } + + if rowsAffected != 1 { + return ingest.NewStateError(errors.Errorf( + "Expected account=%s signer=%s in database but not found when removing (rows affected = %d)", + preAccountEntry.AccountId.Address(), + signer, + rowsAffected, + )) + } + } + } + + if change.Post != nil { + postAccountEntry := change.Post.Data.MustAccount() + sponsorsPerSigner := postAccountEntry.SponsorPerSigner() + for signer, weight := range postAccountEntry.SignerSummary() { + + // Ignore master key + var sponsor *string + if signer != postAccountEntry.AccountId.Address() { + if s, ok := sponsorsPerSigner[signer]; ok { + a := s.Address() + sponsor = &a + } + } + + rowsAffected, err := p.signersQ.CreateAccountSigner(ctx, + postAccountEntry.AccountId.Address(), + signer, + weight, + sponsor, + ) + if err != nil { + return errors.Wrap(err, "Error inserting a signer") + } + + if rowsAffected != 1 { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when inserting account=%s signer=%s to database", + rowsAffected, + postAccountEntry.AccountId.Address(), + signer, + )) + } + } + } + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor.go b/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor.go new file mode 100644 index 0000000000..cec7b5ff54 --- /dev/null +++ b/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor.go @@ -0,0 +1,166 @@ +package processors + +import ( + "context" + "fmt" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" +) + +// StatsLedgerTransactionProcessor is a state processors that counts number of changes types +// and entry types. +type StatsLedgerTransactionProcessor struct { + results StatsLedgerTransactionProcessorResults +} + +// StatsLedgerTransactionProcessorResults contains results after running StatsLedgerTransactionProcessor. +type StatsLedgerTransactionProcessorResults struct { + Transactions int64 + TransactionsSuccessful int64 + TransactionsFailed int64 + + Operations int64 + OperationsInSuccessful int64 + OperationsInFailed int64 + + OperationsCreateAccount int64 + OperationsPayment int64 + OperationsPathPaymentStrictReceive int64 + OperationsManageSellOffer int64 + OperationsCreatePassiveSellOffer int64 + OperationsSetOptions int64 + OperationsChangeTrust int64 + OperationsAllowTrust int64 + OperationsAccountMerge int64 + OperationsInflation int64 + OperationsManageData int64 + OperationsBumpSequence int64 + OperationsManageBuyOffer int64 + OperationsPathPaymentStrictSend int64 + OperationsCreateClaimableBalance int64 + OperationsClaimClaimableBalance int64 + OperationsBeginSponsoringFutureReserves int64 + OperationsEndSponsoringFutureReserves int64 + OperationsRevokeSponsorship int64 + OperationsClawback int64 + OperationsClawbackClaimableBalance int64 + OperationsSetTrustLineFlags int64 + OperationsLiquidityPoolDeposit int64 + OperationsLiquidityPoolWithdraw int64 +} + +func (p *StatsLedgerTransactionProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + p.results.Transactions++ + ops := int64(len(transaction.Envelope.Operations())) + p.results.Operations += ops + + if transaction.Result.Successful() { + p.results.TransactionsSuccessful++ + p.results.OperationsInSuccessful += ops + + } else { + p.results.TransactionsFailed++ + p.results.OperationsInFailed += ops + } + + for _, op := range transaction.Envelope.Operations() { + switch op.Body.Type { + case xdr.OperationTypeCreateAccount: + p.results.OperationsCreateAccount++ + case xdr.OperationTypePayment: + p.results.OperationsPayment++ + case xdr.OperationTypePathPaymentStrictReceive: + p.results.OperationsPathPaymentStrictReceive++ + case xdr.OperationTypeManageSellOffer: + p.results.OperationsManageSellOffer++ + case xdr.OperationTypeCreatePassiveSellOffer: + p.results.OperationsCreatePassiveSellOffer++ + case xdr.OperationTypeSetOptions: + p.results.OperationsSetOptions++ + case xdr.OperationTypeChangeTrust: + p.results.OperationsChangeTrust++ + case xdr.OperationTypeAllowTrust: + p.results.OperationsAllowTrust++ + case xdr.OperationTypeAccountMerge: + p.results.OperationsAccountMerge++ + case xdr.OperationTypeInflation: + p.results.OperationsInflation++ + case xdr.OperationTypeManageData: + p.results.OperationsManageData++ + case xdr.OperationTypeBumpSequence: + p.results.OperationsBumpSequence++ + case xdr.OperationTypeManageBuyOffer: + p.results.OperationsManageBuyOffer++ + case xdr.OperationTypePathPaymentStrictSend: + p.results.OperationsPathPaymentStrictSend++ + case xdr.OperationTypeCreateClaimableBalance: + p.results.OperationsCreateClaimableBalance++ + case xdr.OperationTypeClaimClaimableBalance: + p.results.OperationsClaimClaimableBalance++ + case xdr.OperationTypeBeginSponsoringFutureReserves: + p.results.OperationsBeginSponsoringFutureReserves++ + case xdr.OperationTypeEndSponsoringFutureReserves: + p.results.OperationsEndSponsoringFutureReserves++ + case xdr.OperationTypeRevokeSponsorship: + p.results.OperationsRevokeSponsorship++ + case xdr.OperationTypeClawback: + p.results.OperationsClawback++ + case xdr.OperationTypeClawbackClaimableBalance: + p.results.OperationsClawbackClaimableBalance++ + case xdr.OperationTypeSetTrustLineFlags: + p.results.OperationsSetTrustLineFlags++ + case xdr.OperationTypeLiquidityPoolDeposit: + p.results.OperationsLiquidityPoolDeposit++ + case xdr.OperationTypeLiquidityPoolWithdraw: + p.results.OperationsLiquidityPoolWithdraw++ + default: + panic(fmt.Sprintf("Unkown operation type: %d", op.Body.Type)) + } + } + + return nil +} + +func (p *StatsLedgerTransactionProcessor) GetResults() StatsLedgerTransactionProcessorResults { + return p.results +} + +func (stats *StatsLedgerTransactionProcessorResults) Map() map[string]interface{} { + return map[string]interface{}{ + "stats_transactions": stats.Transactions, + "stats_transactions_successful": stats.TransactionsSuccessful, + "stats_transactions_failed": stats.TransactionsFailed, + + "stats_operations": stats.Operations, + "stats_operations_in_successful": stats.OperationsInSuccessful, + "stats_operations_in_failed": stats.OperationsInFailed, + + "stats_operations_create_account": stats.OperationsCreateAccount, + "stats_operations_payment": stats.OperationsPayment, + "stats_operations_path_payment_strict_receive": stats.OperationsPathPaymentStrictReceive, + "stats_operations_manage_sell_offer": stats.OperationsManageSellOffer, + "stats_operations_create_passive_sell_offer": stats.OperationsCreatePassiveSellOffer, + "stats_operations_set_options": stats.OperationsSetOptions, + "stats_operations_change_trust": stats.OperationsChangeTrust, + "stats_operations_allow_trust": stats.OperationsAllowTrust, + "stats_operations_account_merge": stats.OperationsAccountMerge, + "stats_operations_inflation": stats.OperationsInflation, + "stats_operations_manage_data": stats.OperationsManageData, + "stats_operations_bump_sequence": stats.OperationsBumpSequence, + "stats_operations_manage_buy_offer": stats.OperationsManageBuyOffer, + "stats_operations_path_payment_strict_send": stats.OperationsPathPaymentStrictSend, + "stats_operations_create_claimable_balance": stats.OperationsCreateClaimableBalance, + "stats_operations_claim_claimable_balance": stats.OperationsClaimClaimableBalance, + "stats_operations_begin_sponsoring_future_reserves": stats.OperationsBeginSponsoringFutureReserves, + "stats_operations_end_sponsoring_future_reserves": stats.OperationsEndSponsoringFutureReserves, + "stats_operations_revoke_sponsorship": stats.OperationsRevokeSponsorship, + "stats_operations_clawback": stats.OperationsClawback, + "stats_operations_clawback_claimable_balance": stats.OperationsClawbackClaimableBalance, + "stats_operations_liquidity_pool_deposit": stats.OperationsLiquidityPoolDeposit, + "stats_operations_liquidity_pool_withdraw": stats.OperationsLiquidityPoolWithdraw, + } +} + +// Ensure the StatsChangeProcessor conforms to the ChangeProcessor interface. +var _ LedgerTransactionProcessor = (*StatsLedgerTransactionProcessor)(nil) diff --git a/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor_test.go b/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor_test.go new file mode 100644 index 0000000000..ef4f15ce61 --- /dev/null +++ b/services/horizon/internal/ingest/processors/stats_ledger_transaction_processor_test.go @@ -0,0 +1,162 @@ +package processors + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" +) + +func TestStatsLedgerTransactionProcessoAllOpTypesCovered(t *testing.T) { + txTemplate := ingest.LedgerTransaction{ + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Operations: []xdr.Operation{ + {Body: xdr.OperationBody{Type: 0}}, + }, + }, + }, + }, + } + for typ, s := range xdr.OperationTypeToStringMap { + tx := txTemplate + txTemplate.Envelope.V1.Tx.Operations[0].Body.Type = xdr.OperationType(typ) + f := func() { + var p StatsLedgerTransactionProcessor + p.ProcessTransaction(context.Background(), tx) + } + assert.NotPanics(t, f, s) + } + + // make sure the check works for an unreasonable operation type + tx := txTemplate + txTemplate.Envelope.V1.Tx.Operations[0].Body.Type = 20000 + f := func() { + var p StatsLedgerTransactionProcessor + p.ProcessTransaction(context.Background(), tx) + } + assert.Panics(t, f) +} + +func TestStatsLedgerTransactionProcessor(t *testing.T) { + processor := &StatsLedgerTransactionProcessor{} + + // Successful + assert.NoError(t, processor.ProcessTransaction(context.Background(), ingest.LedgerTransaction{ + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + }, + }, + }, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Operations: []xdr.Operation{ + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreateAccount}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePayment}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePathPaymentStrictReceive}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageSellOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreatePassiveSellOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeSetOptions}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeChangeTrust}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeAllowTrust}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeAccountMerge}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeInflation}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageData}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeBumpSequence}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageBuyOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePathPaymentStrictSend}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreateClaimableBalance}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClaimClaimableBalance}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeBeginSponsoringFutureReserves}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeEndSponsoringFutureReserves}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeRevokeSponsorship}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClawback}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClawbackClaimableBalance}}, + }, + }, + }, + }, + })) + + // Failed + assert.NoError(t, processor.ProcessTransaction(context.Background(), ingest.LedgerTransaction{ + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFailed, + }, + }, + }, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + Operations: []xdr.Operation{ + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreateAccount}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePayment}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePathPaymentStrictReceive}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageSellOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreatePassiveSellOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeSetOptions}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeChangeTrust}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeAllowTrust}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeAccountMerge}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeInflation}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageData}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeBumpSequence}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeManageBuyOffer}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypePathPaymentStrictSend}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeCreateClaimableBalance}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClaimClaimableBalance}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeBeginSponsoringFutureReserves}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeEndSponsoringFutureReserves}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeRevokeSponsorship}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClawback}}, + {Body: xdr.OperationBody{Type: xdr.OperationTypeClawbackClaimableBalance}}, + }, + }, + }, + }, + })) + + results := processor.GetResults() + + assert.Equal(t, int64(2), results.Transactions) + assert.Equal(t, int64(1), results.TransactionsSuccessful) + assert.Equal(t, int64(1), results.TransactionsFailed) + + assert.Equal(t, int64(21*2), results.Operations) + assert.Equal(t, int64(21), results.OperationsInSuccessful) + assert.Equal(t, int64(21), results.OperationsInFailed) + + assert.Equal(t, int64(2), results.OperationsCreateAccount) + assert.Equal(t, int64(2), results.OperationsPayment) + assert.Equal(t, int64(2), results.OperationsPathPaymentStrictReceive) + assert.Equal(t, int64(2), results.OperationsManageSellOffer) + assert.Equal(t, int64(2), results.OperationsCreatePassiveSellOffer) + assert.Equal(t, int64(2), results.OperationsSetOptions) + assert.Equal(t, int64(2), results.OperationsChangeTrust) + assert.Equal(t, int64(2), results.OperationsAllowTrust) + assert.Equal(t, int64(2), results.OperationsAccountMerge) + assert.Equal(t, int64(2), results.OperationsInflation) + assert.Equal(t, int64(2), results.OperationsManageData) + assert.Equal(t, int64(2), results.OperationsBumpSequence) + assert.Equal(t, int64(2), results.OperationsManageBuyOffer) + assert.Equal(t, int64(2), results.OperationsPathPaymentStrictSend) + assert.Equal(t, int64(2), results.OperationsCreateClaimableBalance) + assert.Equal(t, int64(2), results.OperationsClaimClaimableBalance) + assert.Equal(t, int64(2), results.OperationsBeginSponsoringFutureReserves) + assert.Equal(t, int64(2), results.OperationsEndSponsoringFutureReserves) + assert.Equal(t, int64(2), results.OperationsRevokeSponsorship) + assert.Equal(t, int64(2), results.OperationsClawback) + assert.Equal(t, int64(2), results.OperationsClawbackClaimableBalance) +} diff --git a/services/horizon/internal/ingest/processors/synt_offer_id.go b/services/horizon/internal/ingest/processors/synt_offer_id.go new file mode 100644 index 0000000000..7da907a28b --- /dev/null +++ b/services/horizon/internal/ingest/processors/synt_offer_id.go @@ -0,0 +1,40 @@ +//lint:file-ignore U1001 Fails at unused DecodeOfferID but it may be useful in the future. +package processors + +type OfferIDType uint64 + +const ( + CoreOfferIDType OfferIDType = 0 + TOIDType OfferIDType = 1 + + mask uint64 = 0xC000000000000000 +) + +// EncodeOfferId creates synthetic offer ids to be used by trade resources +// +// This is required because stellar-core does not allocate offer ids for immediately filled offers, +// while clients expect them for aggregated views. +// +// The encoded value is of type int64 for sql compatibility. The 2nd bit is used to differentiate between stellar-core +// offer ids and operation ids, which are toids. +// +// Due to the 2nd bit being used, the largest possible toid is: +// 0011111111111111111111111111111100000000000000000001000000000001 +// \ ledger /\ transaction /\ op / +// = 1073741823 +// with avg. 5 sec close time will reach in ~170 years +func EncodeOfferId(id uint64, typ OfferIDType) int64 { + // First ensure the bits we're going to change are 0s + if id&mask != 0 { + panic("Value too big to encode") + } + return int64(id | uint64(typ)<<62) +} + +// DecodeOfferID performs the reverse operation of EncodeOfferID +func DecodeOfferID(encodedId int64) (uint64, OfferIDType) { + if encodedId < 0 { + panic("Negative offer ids can not be decoded") + } + return uint64(encodedId<<2) >> 2, OfferIDType(encodedId >> 62) +} diff --git a/services/horizon/internal/ingest/processors/trades_processor.go b/services/horizon/internal/ingest/processors/trades_processor.go new file mode 100644 index 0000000000..fa51c381da --- /dev/null +++ b/services/horizon/internal/ingest/processors/trades_processor.go @@ -0,0 +1,336 @@ +package processors + +import ( + "context" + "time" + + "github.com/guregu/null" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TradeProcessor operations processor +type TradeProcessor struct { + tradesQ history.QTrades + ledger xdr.LedgerHeaderHistoryEntry + trades []ingestTrade + stats TradeStats +} + +func NewTradeProcessor(tradesQ history.QTrades, ledger xdr.LedgerHeaderHistoryEntry) *TradeProcessor { + return &TradeProcessor{ + tradesQ: tradesQ, + ledger: ledger, + } +} + +type TradeStats struct { + count int64 +} + +func (p *TradeProcessor) GetStats() TradeStats { + return p.stats +} +func (stats *TradeStats) Map() map[string]interface{} { + return map[string]interface{}{ + "stats_count": stats.count, + } +} + +// ProcessTransaction process the given transaction +func (p *TradeProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) (err error) { + if !transaction.Result.Successful() { + return nil + } + + trades, err := p.extractTrades(p.ledger, transaction) + if err != nil { + return err + } + + p.trades = append(p.trades, trades...) + p.stats.count += int64(len(trades)) + return nil +} + +func (p *TradeProcessor) Commit(ctx context.Context) error { + if len(p.trades) == 0 { + return nil + } + + batch := p.tradesQ.NewTradeBatchInsertBuilder(maxBatchSize) + var poolIDs, accounts []string + var assets []xdr.Asset + for _, trade := range p.trades { + if trade.buyerAccount != "" { + accounts = append(accounts, trade.buyerAccount) + } + if trade.sellerAccount != "" { + accounts = append(accounts, trade.sellerAccount) + } + if trade.liquidityPoolID != "" { + poolIDs = append(poolIDs, trade.liquidityPoolID) + } + assets = append(assets, trade.boughtAsset) + assets = append(assets, trade.soldAsset) + } + + accountSet, err := p.tradesQ.CreateAccounts(ctx, accounts, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Error creating account ids") + } + + var assetMap map[string]history.Asset + assetMap, err = p.tradesQ.CreateAssets(ctx, assets, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Error creating asset ids") + } + + var poolMap map[string]int64 + poolMap, err = p.tradesQ.CreateHistoryLiquidityPools(ctx, poolIDs, maxBatchSize) + if err != nil { + return errors.Wrap(err, "Error creating pool ids") + } + + for _, trade := range p.trades { + row := trade.row + if id, ok := accountSet[trade.sellerAccount]; ok { + row.BaseAccountID = null.IntFrom(id) + } else if len(trade.sellerAccount) > 0 { + return errors.Errorf("Could not find history account id for %s", trade.sellerAccount) + } + if id, ok := accountSet[trade.buyerAccount]; ok { + row.CounterAccountID = null.IntFrom(id) + } else if len(trade.buyerAccount) > 0 { + return errors.Errorf("Could not find history account id for %s", trade.buyerAccount) + } + if id, ok := poolMap[trade.liquidityPoolID]; ok { + row.BaseLiquidityPoolID = null.IntFrom(id) + } else if len(trade.liquidityPoolID) > 0 { + return errors.Errorf("Could not find history liquidity pool id for %s", trade.liquidityPoolID) + } + row.BaseAssetID = assetMap[trade.soldAsset.String()].ID + row.CounterAssetID = assetMap[trade.boughtAsset.String()].ID + + if row.BaseAssetID > row.CounterAssetID { + row.BaseIsSeller = false + row.BaseAccountID, row.CounterAccountID = row.CounterAccountID, row.BaseAccountID + row.BaseAssetID, row.CounterAssetID = row.CounterAssetID, row.BaseAssetID + row.BaseAmount, row.CounterAmount = row.CounterAmount, row.BaseAmount + row.BaseLiquidityPoolID, row.CounterLiquidityPoolID = row.CounterLiquidityPoolID, row.BaseLiquidityPoolID + row.BaseOfferID, row.CounterOfferID = row.CounterOfferID, row.BaseOfferID + row.PriceN, row.PriceD = row.PriceD, row.PriceN + } + + if err = batch.Add(ctx, row); err != nil { + return errors.Wrap(err, "Error adding trade to batch") + } + } + + if err = batch.Exec(ctx); err != nil { + return errors.Wrap(err, "Error flushing operation batch") + } + return nil +} + +func (p *TradeProcessor) findTradeSellPrice( + transaction ingest.LedgerTransaction, + opidx int, + trade xdr.ClaimAtom, +) (int64, int64, error) { + if trade.Type == xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool { + return int64(trade.AmountBought()), int64(trade.AmountSold()), nil + } + + key := xdr.LedgerKey{} + if err := key.SetOffer(trade.SellerId(), uint64(trade.OfferId())); err != nil { + return 0, 0, errors.Wrap(err, "Could not create offer ledger key") + } + + change, err := p.findOperationChange(transaction, opidx, key) + if err != nil { + return 0, 0, errors.Wrap(err, "could not find change for trade offer") + } + + return int64(change.Pre.Data.Offer.Price.N), int64(change.Pre.Data.Offer.Price.D), nil +} + +func (p *TradeProcessor) findOperationChange(tx ingest.LedgerTransaction, opidx int, key xdr.LedgerKey) (ingest.Change, error) { + changes, err := tx.GetOperationChanges(uint32(opidx)) + if err != nil { + return ingest.Change{}, errors.Wrap(err, "could not determine changes for operation") + } + + var change ingest.Change + for i := len(changes) - 1; i >= 0; i-- { + change = changes[i] + if change.Pre != nil && key.Equals(change.Pre.LedgerKey()) { + return change, nil + } + } + return ingest.Change{}, errors.Errorf("could not find operation for key %v", key) +} + +func (p *TradeProcessor) findPoolFee( + transaction ingest.LedgerTransaction, + opidx int, + poolID xdr.PoolId, +) (uint32, error) { + key := xdr.LedgerKey{} + if err := key.SetLiquidityPool(poolID); err != nil { + return 0, errors.Wrap(err, "Could not create liquidity pool ledger key") + + } + + change, err := p.findOperationChange(transaction, opidx, key) + if err != nil { + return 0, errors.Wrap(err, "could not find change for liquidity pool") + } + + return uint32(change.Pre.Data.MustLiquidityPool().Body.MustConstantProduct().Params.Fee), nil +} + +type ingestTrade struct { + row history.InsertTrade + sellerAccount string + liquidityPoolID string + buyerAccount string + boughtAsset xdr.Asset + soldAsset xdr.Asset +} + +func (p *TradeProcessor) extractTrades( + ledger xdr.LedgerHeaderHistoryEntry, + transaction ingest.LedgerTransaction, +) ([]ingestTrade, error) { + var result []ingestTrade + + closeTime := time.Unix(int64(ledger.Header.ScpValue.CloseTime), 0).UTC() + + opResults, ok := transaction.Result.OperationResults() + if !ok { + return result, errors.New("transaction has no operation results") + } + for opidx, op := range transaction.Envelope.Operations() { + var trades []xdr.ClaimAtom + var buyOfferExists bool + var buyOffer xdr.OfferEntry + + switch op.Body.Type { + case xdr.OperationTypePathPaymentStrictReceive: + trades = opResults[opidx].MustTr().MustPathPaymentStrictReceiveResult(). + MustSuccess(). + Offers + + case xdr.OperationTypePathPaymentStrictSend: + trades = opResults[opidx].MustTr(). + MustPathPaymentStrictSendResult(). + MustSuccess(). + Offers + + case xdr.OperationTypeManageBuyOffer: + manageOfferResult := opResults[opidx].MustTr().MustManageBuyOfferResult(). + MustSuccess() + trades = manageOfferResult.OffersClaimed + buyOffer, buyOfferExists = manageOfferResult.Offer.GetOffer() + + case xdr.OperationTypeManageSellOffer: + manageOfferResult := opResults[opidx].MustTr().MustManageSellOfferResult(). + MustSuccess() + trades = manageOfferResult.OffersClaimed + buyOffer, buyOfferExists = manageOfferResult.Offer.GetOffer() + + case xdr.OperationTypeCreatePassiveSellOffer: + result := opResults[opidx].MustTr() + + // KNOWN ISSUE: stellar-core creates results for CreatePassiveOffer operations + // with the wrong result arm set. + if result.Type == xdr.OperationTypeManageSellOffer { + manageOfferResult := result.MustManageSellOfferResult().MustSuccess() + trades = manageOfferResult.OffersClaimed + buyOffer, buyOfferExists = manageOfferResult.Offer.GetOffer() + } else { + passiveOfferResult := result.MustCreatePassiveSellOfferResult().MustSuccess() + trades = passiveOfferResult.OffersClaimed + buyOffer, buyOfferExists = passiveOfferResult.Offer.GetOffer() + } + } + + opID := toid.New( + int32(ledger.Header.LedgerSeq), int32(transaction.Index), int32(opidx+1), + ).ToInt64() + for order, trade := range trades { + // stellar-core will opportunistically garbage collect invalid offers (in the + // event that a trader spends down their balance). These garbage collected + // offers get emitted in the result with the amount values set to zero. + // + // These zeroed ClaimOfferAtom values do not represent trades, and so we + // skip them. + if trade.AmountBought() == 0 && trade.AmountSold() == 0 { + continue + } + + sellPriceN, sellPriceD, err := p.findTradeSellPrice(transaction, opidx, trade) + if err != nil { + return result, err + } + + row := history.InsertTrade{ + HistoryOperationID: opID, + Order: int32(order), + LedgerCloseTime: closeTime, + CounterAmount: int64(trade.AmountBought()), + BaseAmount: int64(trade.AmountSold()), + BaseIsSeller: true, + PriceN: sellPriceN, + PriceD: sellPriceD, + } + + var sellerAccount, liquidityPoolID string + if trade.Type == xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool { + id := trade.MustLiquidityPool().LiquidityPoolId + liquidityPoolID = PoolIDToString(id) + var fee uint32 + if fee, err = p.findPoolFee(transaction, opidx, id); err != nil { + return nil, err + } + row.LiquidityPoolFee = null.IntFrom(int64(fee)) + row.Type = history.LiquidityPoolTradeType + } else { + row.BaseOfferID = null.IntFrom(int64(trade.OfferId())) + sellerAccount = trade.SellerId().Address() + row.Type = history.OrderbookTradeType + } + + if buyOfferExists { + row.CounterOfferID = null.IntFrom(EncodeOfferId(uint64(buyOffer.OfferId), CoreOfferIDType)) + } else { + row.CounterOfferID = null.IntFrom(EncodeOfferId(uint64(opID), TOIDType)) + } + + var buyerAddress string + if buyer := op.SourceAccount; buyer != nil { + accid := buyer.ToAccountId() + buyerAddress = accid.Address() + } else { + sa := transaction.Envelope.SourceAccount().ToAccountId() + buyerAddress = sa.Address() + } + + result = append(result, ingestTrade{ + row: row, + sellerAccount: sellerAccount, + liquidityPoolID: liquidityPoolID, + buyerAccount: buyerAddress, + boughtAsset: trade.AssetBought(), + soldAsset: trade.AssetSold(), + }) + } + } + + return result, nil +} diff --git a/services/horizon/internal/ingest/processors/trades_processor_test.go b/services/horizon/internal/ingest/processors/trades_processor_test.go new file mode 100644 index 0000000000..31baf425ad --- /dev/null +++ b/services/horizon/internal/ingest/processors/trades_processor_test.go @@ -0,0 +1,964 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/guregu/null" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" +) + +type TradeProcessorTestSuiteLedger struct { + suite.Suite + processor *TradeProcessor + mockQ *history.MockQTrades + mockBatchInsertBuilder *history.MockTradeBatchInsertBuilder + + unmuxedSourceAccount xdr.AccountId + unmuxedOpSourceAccount xdr.AccountId + sourceAccount xdr.MuxedAccount + opSourceAccount xdr.MuxedAccount + strictReceiveTrade xdr.ClaimAtom + strictReceiveTradeLP xdr.ClaimAtom + strictSendTrade xdr.ClaimAtom + strictSendTradeLP xdr.ClaimAtom + buyOfferTrade xdr.ClaimAtom + sellOfferTrade xdr.ClaimAtom + passiveSellOfferTrade xdr.ClaimAtom + otherPassiveSellOfferTrade xdr.ClaimAtom + allTrades []xdr.ClaimAtom + sellPrices []xdr.Price + + assets []xdr.Asset + + lpToID map[xdr.PoolId]int64 + unmuxedAccountToID map[string]int64 + assetToID map[string]history.Asset + + txs []ingest.LedgerTransaction +} + +func TestTradeProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(TradeProcessorTestSuiteLedger)) +} + +func (s *TradeProcessorTestSuiteLedger) SetupTest() { + s.mockQ = &history.MockQTrades{} + s.mockBatchInsertBuilder = &history.MockTradeBatchInsertBuilder{} + + s.unmuxedSourceAccount = xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + s.sourceAccount = xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xdeadbeef, + Ed25519: *s.unmuxedSourceAccount.Ed25519, + }, + } + s.unmuxedOpSourceAccount = xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML") + s.opSourceAccount = xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *s.unmuxedOpSourceAccount.Ed25519, + }, + } + s.strictReceiveTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GA2YS6YBWIBUMUJCNYROC5TXYTTUA4TCZF7A4MJ2O4TTGT3LFNWIOMY4"), + OfferId: 11, + AssetSold: xdr.MustNewNativeAsset(), + AmountSold: 111, + AmountBought: 211, + AssetBought: xdr.MustNewCreditAsset("HUF", s.unmuxedSourceAccount.Address()), + }, + } + s.strictReceiveTradeLP = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool, + LiquidityPool: &xdr.ClaimLiquidityAtom{ + LiquidityPoolId: xdr.PoolId{1, 2, 3}, + AssetSold: xdr.MustNewCreditAsset("MAD", s.unmuxedSourceAccount.Address()), + AmountSold: 20, + AssetBought: xdr.MustNewCreditAsset("GRE", s.unmuxedSourceAccount.Address()), + AmountBought: 300, + }, + } + s.strictSendTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GALOBQKDZUSAEUDE7F4OYUIQTUZBL62G6TRCXU2ED6SA7TL72MBUQSYJ"), + OfferId: 12, + AssetSold: xdr.MustNewCreditAsset("USD", s.unmuxedSourceAccount.Address()), + AmountSold: 112, + AmountBought: 212, + AssetBought: xdr.MustNewCreditAsset("RUB", s.unmuxedSourceAccount.Address()), + }, + } + s.strictSendTradeLP = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeLiquidityPool, + LiquidityPool: &xdr.ClaimLiquidityAtom{ + LiquidityPoolId: xdr.PoolId{4, 5, 6}, + AssetSold: xdr.MustNewCreditAsset("WER", s.unmuxedSourceAccount.Address()), + AmountSold: 67, + AssetBought: xdr.MustNewCreditAsset("NIJ", s.unmuxedSourceAccount.Address()), + AmountBought: 98, + }, + } + s.buyOfferTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GCWRLPH5X5A3GABFDLDILZ4RLY6O76AYOIIR5H2PAI6TNZZZNLZWBXSH"), + OfferId: 13, + AssetSold: xdr.MustNewCreditAsset("EUR", s.unmuxedSourceAccount.Address()), + AmountSold: 113, + AmountBought: 213, + AssetBought: xdr.MustNewCreditAsset("NOK", s.unmuxedSourceAccount.Address()), + }, + } + s.sellOfferTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GAVOLNFXVVUJOELN4T3YVSH2FFA3VSP2XN4NJRYF2ZWVCHS77C5KXLHZ"), + OfferId: 14, + AssetSold: xdr.MustNewCreditAsset("PLN", s.unmuxedSourceAccount.Address()), + AmountSold: 114, + AmountBought: 214, + AssetBought: xdr.MustNewCreditAsset("UAH", s.unmuxedSourceAccount.Address()), + }, + } + s.passiveSellOfferTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GDQWI6FKB72DPOJE4CGYCFQZKRPQQIOYXRMZ5KEVGXMG6UUTGJMBCASH"), + OfferId: 15, + AssetSold: xdr.MustNewCreditAsset("SEK", s.unmuxedSourceAccount.Address()), + AmountSold: 115, + AmountBought: 215, + AssetBought: xdr.MustNewCreditAsset("GBP", s.unmuxedSourceAccount.Address()), + }, + } + s.otherPassiveSellOfferTrade = xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: xdr.MustAddress("GCPZFOJON3PSSYUBNT7MCGEDSGP47UTSJSB4XGCVEWEJO4XQ6U4XN3N2"), + OfferId: 16, + AssetSold: xdr.MustNewCreditAsset("CHF", s.unmuxedSourceAccount.Address()), + AmountSold: 116, + AmountBought: 216, + AssetBought: xdr.MustNewCreditAsset("JPY", s.unmuxedSourceAccount.Address()), + }, + } + + s.unmuxedAccountToID = map[string]int64{ + s.unmuxedSourceAccount.Address(): 1000, + s.unmuxedOpSourceAccount.Address(): 1001, + } + s.assetToID = map[string]history.Asset{} + s.allTrades = []xdr.ClaimAtom{ + s.strictReceiveTrade, + s.strictSendTrade, + s.buyOfferTrade, + s.sellOfferTrade, + s.passiveSellOfferTrade, + s.otherPassiveSellOfferTrade, + s.strictReceiveTradeLP, + s.strictSendTradeLP, + } + + s.assets = []xdr.Asset{} + s.sellPrices = []xdr.Price{} + s.lpToID = map[xdr.PoolId]int64{} + for i, trade := range s.allTrades { + if trade.Type == xdr.ClaimAtomTypeClaimAtomTypeOrderBook { + s.unmuxedAccountToID[trade.SellerId().Address()] = int64(1002 + i) + n := xdr.Int32(i + 1) + s.sellPrices = append(s.sellPrices, xdr.Price{N: n, D: 100}) + } else { + s.lpToID[trade.MustLiquidityPool().LiquidityPoolId] = int64(3000 + i) + s.sellPrices = append(s.sellPrices, xdr.Price{N: xdr.Int32(trade.AmountBought()), D: xdr.Int32(trade.AmountSold())}) + } + if i%2 == 0 { + s.assetToID[trade.AssetSold().String()] = history.Asset{ID: int64(10000 + i)} + s.assetToID[trade.AssetBought().String()] = history.Asset{ID: int64(100 + i)} + } else { + s.assetToID[trade.AssetSold().String()] = history.Asset{ID: int64(100 + i)} + s.assetToID[trade.AssetBought().String()] = history.Asset{ID: int64(10000 + i)} + } + s.assets = append(s.assets, trade.AssetSold(), trade.AssetBought()) + } + + s.processor = NewTradeProcessor( + s.mockQ, + xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + }, + }, + ) +} + +func (s *TradeProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *TradeProcessorTestSuiteLedger) TestIgnoreFailedTransactions() { + ctx := context.Background() + err := s.processor.ProcessTransaction(ctx, createTransaction(false, 1)) + s.Assert().NoError(err) + + err = s.processor.Commit(ctx) + s.Assert().NoError(err) +} + +func (s *TradeProcessorTestSuiteLedger) mockReadTradeTransactions( + ledger xdr.LedgerHeaderHistoryEntry, +) []history.InsertTrade { + closeTime := time.Unix(int64(ledger.Header.ScpValue.CloseTime), 0).UTC() + inserts := []history.InsertTrade{ + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 2).ToInt64(), + Order: 1, + LedgerCloseTime: closeTime, + BaseAmount: int64(s.strictReceiveTrade.AmountBought()), + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + BaseAssetID: s.assetToID[s.strictReceiveTrade.AssetBought().String()].ID, + BaseOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 2).ToInt64()), TOIDType)), + CounterAmount: int64(s.strictReceiveTrade.AmountSold()), + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.strictReceiveTrade.SellerId().Address()]), + CounterAssetID: s.assetToID[s.strictReceiveTrade.AssetSold().String()].ID, + CounterOfferID: null.IntFrom(int64(s.strictReceiveTrade.OfferId())), + BaseIsSeller: false, + PriceN: int64(s.sellPrices[0].D), + PriceD: int64(s.sellPrices[0].N), + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 3).ToInt64(), + Order: 0, + LedgerCloseTime: closeTime, + CounterAmount: int64(s.strictSendTrade.AmountBought()), + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + CounterAssetID: s.assetToID[s.strictSendTrade.AssetBought().String()].ID, + CounterOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 3).ToInt64()), TOIDType)), + BaseAmount: int64(s.strictSendTrade.AmountSold()), + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.strictSendTrade.SellerId().Address()]), + BaseAssetID: s.assetToID[s.strictSendTrade.AssetSold().String()].ID, + BaseIsSeller: true, + BaseOfferID: null.IntFrom(int64(s.strictSendTrade.OfferId())), + PriceN: int64(s.sellPrices[1].N), + PriceD: int64(s.sellPrices[1].D), + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 4).ToInt64(), + Order: 1, + LedgerCloseTime: closeTime, + BaseOfferID: null.IntFrom(879136), + BaseAmount: int64(s.buyOfferTrade.AmountBought()), + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + BaseAssetID: s.assetToID[s.buyOfferTrade.AssetBought().String()].ID, + CounterAmount: int64(s.buyOfferTrade.AmountSold()), + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.buyOfferTrade.SellerId().Address()]), + CounterAssetID: s.assetToID[s.buyOfferTrade.AssetSold().String()].ID, + BaseIsSeller: false, + CounterOfferID: null.IntFrom(int64(s.buyOfferTrade.OfferId())), + PriceN: int64(s.sellPrices[2].D), + PriceD: int64(s.sellPrices[2].N), + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 5).ToInt64(), + Order: 2, + LedgerCloseTime: closeTime, + CounterAmount: int64(s.sellOfferTrade.AmountBought()), + CounterAssetID: s.assetToID[s.sellOfferTrade.AssetBought().String()].ID, + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + CounterOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 5).ToInt64()), TOIDType)), + BaseAmount: int64(s.sellOfferTrade.AmountSold()), + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.sellOfferTrade.SellerId().Address()]), + BaseAssetID: s.assetToID[s.sellOfferTrade.AssetSold().String()].ID, + BaseIsSeller: true, + BaseOfferID: null.IntFrom(int64(s.sellOfferTrade.OfferId())), + PriceN: int64(s.sellPrices[3].N), + PriceD: int64(s.sellPrices[3].D), + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 6).ToInt64(), + Order: 0, + LedgerCloseTime: closeTime, + BaseAmount: int64(s.passiveSellOfferTrade.AmountBought()), + BaseAssetID: s.assetToID[s.passiveSellOfferTrade.AssetBought().String()].ID, + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedSourceAccount.Address()]), + BaseOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 6).ToInt64()), TOIDType)), + CounterAmount: int64(s.passiveSellOfferTrade.AmountSold()), + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.passiveSellOfferTrade.SellerId().Address()]), + CounterAssetID: s.assetToID[s.passiveSellOfferTrade.AssetSold().String()].ID, + BaseIsSeller: false, + CounterOfferID: null.IntFrom(int64(s.passiveSellOfferTrade.OfferId())), + PriceN: int64(s.sellPrices[4].D), + PriceD: int64(s.sellPrices[4].N), + Type: history.OrderbookTradeType, + }, + + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 7).ToInt64(), + Order: 0, + LedgerCloseTime: closeTime, + + CounterAmount: int64(s.otherPassiveSellOfferTrade.AmountBought()), + CounterAssetID: s.assetToID[s.otherPassiveSellOfferTrade.AssetBought().String()].ID, + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + CounterOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 7).ToInt64()), TOIDType)), + BaseAmount: int64(s.otherPassiveSellOfferTrade.AmountSold()), + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.otherPassiveSellOfferTrade.SellerId().Address()]), + BaseAssetID: s.assetToID[s.otherPassiveSellOfferTrade.AssetSold().String()].ID, + BaseIsSeller: true, + BaseOfferID: null.IntFrom(int64(s.otherPassiveSellOfferTrade.OfferId())), + PriceN: int64(s.sellPrices[5].N), + PriceD: int64(s.sellPrices[5].D), + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 8).ToInt64(), + Order: 1, + LedgerCloseTime: closeTime, + BaseAmount: int64(s.strictReceiveTradeLP.AmountBought()), + BaseAssetID: s.assetToID[s.strictReceiveTradeLP.AssetBought().String()].ID, + BaseAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + BaseOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 8).ToInt64()), TOIDType)), + CounterAmount: int64(s.strictReceiveTradeLP.AmountSold()), + CounterLiquidityPoolID: null.IntFrom(s.lpToID[s.strictReceiveTradeLP.MustLiquidityPool().LiquidityPoolId]), + CounterAssetID: s.assetToID[s.strictReceiveTradeLP.AssetSold().String()].ID, + BaseIsSeller: false, + LiquidityPoolFee: null.IntFrom(int64(xdr.LiquidityPoolFeeV18)), + PriceN: int64(s.sellPrices[6].D), + PriceD: int64(s.sellPrices[6].N), + Type: history.LiquidityPoolTradeType, + }, + { + HistoryOperationID: toid.New(int32(ledger.Header.LedgerSeq), 1, 9).ToInt64(), + Order: 0, + LedgerCloseTime: closeTime, + CounterAmount: int64(s.strictSendTradeLP.AmountBought()), + CounterAssetID: s.assetToID[s.strictSendTradeLP.AssetBought().String()].ID, + CounterAccountID: null.IntFrom(s.unmuxedAccountToID[s.unmuxedOpSourceAccount.Address()]), + CounterOfferID: null.IntFrom(EncodeOfferId(uint64(toid.New(int32(ledger.Header.LedgerSeq), 1, 9).ToInt64()), TOIDType)), + BaseAmount: int64(s.strictSendTradeLP.AmountSold()), + BaseLiquidityPoolID: null.IntFrom(s.lpToID[s.strictSendTradeLP.MustLiquidityPool().LiquidityPoolId]), + BaseAssetID: s.assetToID[s.strictSendTradeLP.AssetSold().String()].ID, + BaseIsSeller: true, + LiquidityPoolFee: null.IntFrom(int64(xdr.LiquidityPoolFeeV18)), + PriceN: int64(s.sellPrices[7].N), + PriceD: int64(s.sellPrices[7].D), + Type: history.LiquidityPoolTradeType, + }, + } + + emptyTrade := xdr.ClaimAtom{ + Type: xdr.ClaimAtomTypeClaimAtomTypeOrderBook, + OrderBook: &xdr.ClaimOfferAtom{ + SellerId: s.sourceAccount.ToAccountId(), + OfferId: 123, + AssetSold: xdr.MustNewNativeAsset(), + AmountSold: 0, + AssetBought: xdr.MustNewCreditAsset("EUR", s.unmuxedSourceAccount.Address()), + AmountBought: 0, + }, + } + + operationResults := []xdr.OperationResult{ + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeBumpSequence, + BumpSeqResult: &xdr.BumpSequenceResult{ + Code: xdr.BumpSequenceResultCodeBumpSequenceSuccess, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypePathPaymentStrictReceive, + PathPaymentStrictReceiveResult: &xdr.PathPaymentStrictReceiveResult{ + Code: xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess, + Success: &xdr.PathPaymentStrictReceiveResultSuccess{ + Offers: []xdr.ClaimAtom{ + emptyTrade, + s.strictReceiveTrade, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendResult: &xdr.PathPaymentStrictSendResult{ + Code: xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess, + Success: &xdr.PathPaymentStrictSendResultSuccess{ + Offers: []xdr.ClaimAtom{ + s.strictSendTrade, + emptyTrade, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeManageBuyOffer, + ManageBuyOfferResult: &xdr.ManageBuyOfferResult{ + Code: xdr.ManageBuyOfferResultCodeManageBuyOfferSuccess, + Success: &xdr.ManageOfferSuccessResult{ + OffersClaimed: []xdr.ClaimAtom{ + emptyTrade, + s.buyOfferTrade, + emptyTrade, + }, + Offer: xdr.ManageOfferSuccessResultOffer{ + Offer: &xdr.OfferEntry{ + OfferId: 879136, + }, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeManageSellOffer, + ManageSellOfferResult: &xdr.ManageSellOfferResult{ + Code: xdr.ManageSellOfferResultCodeManageSellOfferSuccess, + Success: &xdr.ManageOfferSuccessResult{ + OffersClaimed: []xdr.ClaimAtom{ + emptyTrade, + emptyTrade, + s.sellOfferTrade, + }, + Offer: xdr.ManageOfferSuccessResultOffer{ + Effect: xdr.ManageOfferEffectManageOfferDeleted, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeManageSellOffer, + ManageSellOfferResult: &xdr.ManageSellOfferResult{ + Code: xdr.ManageSellOfferResultCodeManageSellOfferSuccess, + Success: &xdr.ManageOfferSuccessResult{ + OffersClaimed: []xdr.ClaimAtom{ + s.passiveSellOfferTrade, + emptyTrade, + emptyTrade, + }, + Offer: xdr.ManageOfferSuccessResultOffer{ + Effect: xdr.ManageOfferEffectManageOfferDeleted, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypeCreatePassiveSellOffer, + CreatePassiveSellOfferResult: &xdr.ManageSellOfferResult{ + Code: xdr.ManageSellOfferResultCodeManageSellOfferSuccess, + Success: &xdr.ManageOfferSuccessResult{ + OffersClaimed: []xdr.ClaimAtom{ + s.otherPassiveSellOfferTrade, + }, + Offer: xdr.ManageOfferSuccessResultOffer{ + Effect: xdr.ManageOfferEffectManageOfferDeleted, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypePathPaymentStrictReceive, + PathPaymentStrictReceiveResult: &xdr.PathPaymentStrictReceiveResult{ + Code: xdr.PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess, + Success: &xdr.PathPaymentStrictReceiveResultSuccess{ + Offers: []xdr.ClaimAtom{ + emptyTrade, + s.strictReceiveTradeLP, + }, + }, + }, + }, + }, + { + Tr: &xdr.OperationResultTr{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendResult: &xdr.PathPaymentStrictSendResult{ + Code: xdr.PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess, + Success: &xdr.PathPaymentStrictSendResultSuccess{ + Offers: []xdr.ClaimAtom{ + s.strictSendTradeLP, + }, + }, + }, + }, + }, + } + + operations := []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeBumpSequence, + BumpSequenceOp: &xdr.BumpSequenceOp{BumpTo: 30000}, + }, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictReceive, + PathPaymentStrictReceiveOp: &xdr.PathPaymentStrictReceiveOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendOp: &xdr.PathPaymentStrictSendOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeManageBuyOffer, + ManageBuyOfferOp: &xdr.ManageBuyOfferOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeManageSellOffer, + ManageSellOfferOp: &xdr.ManageSellOfferOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreatePassiveSellOffer, + CreatePassiveSellOfferOp: &xdr.CreatePassiveSellOfferOp{}, + }, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreatePassiveSellOffer, + CreatePassiveSellOfferOp: &xdr.CreatePassiveSellOfferOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictReceive, + PathPaymentStrictReceiveOp: &xdr.PathPaymentStrictReceiveOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePathPaymentStrictSend, + PathPaymentStrictSendOp: &xdr.PathPaymentStrictSendOp{}, + }, + SourceAccount: &s.opSourceAccount, + }, + } + + tx := ingest.LedgerTransaction{ + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &operationResults, + }, + }, + }, + Envelope: xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: s.sourceAccount, + Operations: operations, + }, + }, + }, + Index: 1, + FeeChanges: []xdr.LedgerEntryChange{}, + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{ + { + Changes: xdr.LedgerEntryChanges{}, + }, + }, + }, + }, + } + + for i, trade := range s.allTrades { + var changes xdr.LedgerEntryChanges + if trade.Type == xdr.ClaimAtomTypeClaimAtomTypeOrderBook { + changes = xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + Price: s.sellPrices[i], + SellerId: trade.SellerId(), + OfferId: trade.OfferId(), + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryRemoved, + Removed: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.LedgerKeyOffer{ + SellerId: trade.SellerId(), + OfferId: trade.OfferId(), + }, + }, + }, + } + } else { + changes = xdr.LedgerEntryChanges{ + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: trade.MustLiquidityPool().LiquidityPoolId, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: trade.AssetSold(), + AssetB: trade.AssetSold(), + Fee: xdr.LiquidityPoolFeeV18, + }, + ReserveA: 100, + ReserveB: 200, + TotalPoolShares: 40, + PoolSharesTrustLineCount: 50, + }, + }, + }, + }, + }, + }, + xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: trade.MustLiquidityPool().LiquidityPoolId, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: trade.AssetSold(), + AssetB: trade.AssetSold(), + Fee: xdr.LiquidityPoolFeeV18, + }, + ReserveA: 100, + ReserveB: 200, + TotalPoolShares: 40, + PoolSharesTrustLineCount: 50, + }, + }, + }, + }, + }, + }, + } + } + tx.UnsafeMeta.V2.Operations = append(tx.UnsafeMeta.V2.Operations, xdr.OperationMeta{ + Changes: changes, + }) + } + + s.txs = []ingest.LedgerTransaction{ + tx, + } + + s.mockQ.On("NewTradeBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + return inserts +} + +func mapKeysToList(set map[string]int64) []string { + keys := make([]string, 0, len(set)) + for key := range set { + keys = append(keys, key) + } + return keys +} + +func uniq(list []string) []string { + var deduped []string + set := map[string]bool{} + for _, s := range list { + if set[s] { + continue + } + deduped = append(deduped, s) + set[s] = true + } + return deduped +} + +func (s *TradeProcessorTestSuiteLedger) TestIngestTradesSucceeds() { + ctx := context.Background() + inserts := s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockCreateAssets(ctx) + + s.mockCreateHistoryLiquidityPools(ctx) + + for _, insert := range inserts { + s.mockBatchInsertBuilder.On("Add", ctx, []history.InsertTrade{ + insert, + }).Return(nil).Once() + } + + s.mockBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().NoError(err) +} + +func (s *TradeProcessorTestSuiteLedger) mockCreateHistoryLiquidityPools(ctx context.Context) { + lpIDs, lpStrToID := s.extractLpIDs() + s.mockQ.On("CreateHistoryLiquidityPools", ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + lpIDs, + arg, + ) + }).Return(lpStrToID, nil).Once() +} + +func (s *TradeProcessorTestSuiteLedger) extractLpIDs() ([]string, map[string]int64) { + var lpIDs []string + lpStrToID := map[string]int64{} + for lpID, id := range s.lpToID { + lpIDStr := PoolIDToString(lpID) + lpIDs = append(lpIDs, lpIDStr) + lpStrToID[lpIDStr] = id + } + return lpIDs, lpStrToID +} + +func (s *TradeProcessorTestSuiteLedger) TestCreateAccountsError() { + ctx := context.Background() + s.mockReadTradeTransactions(s.processor.ledger) + + s.mockQ.On("CreateAccounts", ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + mapKeysToList(s.unmuxedAccountToID), + uniq(arg), + ) + }).Return(map[string]int64{}, fmt.Errorf("create accounts error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + + s.Assert().EqualError(err, "Error creating account ids: create accounts error") +} + +func (s *TradeProcessorTestSuiteLedger) TestCreateAssetsError() { + ctx := context.Background() + s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockQ.On("CreateAssets", ctx, mock.AnythingOfType("[]xdr.Asset"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]xdr.Asset) + s.Assert().ElementsMatch( + s.assets, + arg, + ) + }).Return(s.assetToID, fmt.Errorf("create assets error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().EqualError(err, "Error creating asset ids: create assets error") +} + +func (s *TradeProcessorTestSuiteLedger) TestCreateHistoryLiquidityPoolsError() { + ctx := context.Background() + s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockCreateAssets(ctx) + + lpIDs, lpStrToID := s.extractLpIDs() + s.mockQ.On("CreateHistoryLiquidityPools", ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + lpIDs, + arg, + ) + }).Return(lpStrToID, fmt.Errorf("create liqudity pool id error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().EqualError(err, "Error creating pool ids: create liqudity pool id error") +} + +func (s *TradeProcessorTestSuiteLedger) mockCreateAssets(ctx context.Context) { + s.mockQ.On("CreateAssets", ctx, mock.AnythingOfType("[]xdr.Asset"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]xdr.Asset) + s.Assert().ElementsMatch( + s.assets, + arg, + ) + }).Return(s.assetToID, nil).Once() +} + +func (s *TradeProcessorTestSuiteLedger) mockCreateAccounts(ctx context.Context) { + s.mockQ.On("CreateAccounts", ctx, mock.AnythingOfType("[]string"), maxBatchSize). + Run(func(args mock.Arguments) { + arg := args.Get(1).([]string) + s.Assert().ElementsMatch( + mapKeysToList(s.unmuxedAccountToID), + uniq(arg), + ) + }).Return(s.unmuxedAccountToID, nil).Once() +} + +func (s *TradeProcessorTestSuiteLedger) TestBatchAddError() { + ctx := context.Background() + s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockCreateAssets(ctx) + + s.mockCreateHistoryLiquidityPools(ctx) + + s.mockBatchInsertBuilder.On("Add", ctx, mock.AnythingOfType("[]history.InsertTrade")). + Return(fmt.Errorf("batch add error")).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().EqualError(err, "Error adding trade to batch: batch add error") +} + +func (s *TradeProcessorTestSuiteLedger) TestBatchExecError() { + ctx := context.Background() + insert := s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockCreateAssets(ctx) + + s.mockCreateHistoryLiquidityPools(ctx) + + s.mockBatchInsertBuilder.On("Add", ctx, mock.AnythingOfType("[]history.InsertTrade")). + Return(nil).Times(len(insert)) + s.mockBatchInsertBuilder.On("Exec", ctx).Return(fmt.Errorf("exec error")).Once() + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().EqualError(err, "Error flushing operation batch: exec error") +} + +func (s *TradeProcessorTestSuiteLedger) TestIgnoreCheckIfSmallLedger() { + ctx := context.Background() + insert := s.mockReadTradeTransactions(s.processor.ledger) + + s.mockCreateAccounts(ctx) + + s.mockCreateAssets(ctx) + + s.mockCreateHistoryLiquidityPools(ctx) + s.mockBatchInsertBuilder.On("Add", ctx, mock.AnythingOfType("[]history.InsertTrade")). + Return(nil).Times(len(insert)) + s.mockBatchInsertBuilder.On("Exec", ctx).Return(nil).Once() + + for _, tx := range s.txs { + err := s.processor.ProcessTransaction(ctx, tx) + s.Assert().NoError(err) + } + + err := s.processor.Commit(ctx) + s.Assert().NoError(err) +} + +func TestTradeProcessor_ProcessTransaction_MuxedAccount(t *testing.T) { + unmuxed := xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") + muxed := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xdeadbeefdeadbeef, + Ed25519: *unmuxed.Ed25519, + }, + } + tx := createTransaction(true, 1) + tx.Index = 1 + tx.Envelope.Operations()[0].Body = xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: muxed, + Asset: xdr.Asset{Type: xdr.AssetTypeAssetTypeNative}, + Amount: 100, + }, + } +} diff --git a/services/horizon/internal/ingest/processors/transaction_operation_wrapper_test.go b/services/horizon/internal/ingest/processors/transaction_operation_wrapper_test.go new file mode 100644 index 0000000000..32e67d8b5b --- /dev/null +++ b/services/horizon/internal/ingest/processors/transaction_operation_wrapper_test.go @@ -0,0 +1,2282 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "testing" + + "github.com/stellar/go/protocols/horizon/base" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + . "github.com/stellar/go/services/horizon/internal/test/transactions" + "github.com/stellar/go/xdr" +) + +func TestTransactionOperationID(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: transaction.Envelope.Operations()[0], + ledgerSequence: 56, + } + + tt.Equal(int64(240518172673), operation.ID()) +} + +func TestTransactionOperationOrder(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: transaction.Envelope.Operations()[0], + ledgerSequence: 1, + } + + tt.Equal(uint32(1), operation.Order()) +} + +func TestTransactionOperationTransactionID(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + operation := transactionOperationWrapper{ + index: 0, + transaction: transaction, + operation: transaction.Envelope.Operations()[0], + ledgerSequence: 56, + } + + tt.Equal(int64(240518172672), operation.TransactionID()) +} + +func TestOperationTransactionSourceAccount(t *testing.T) { + testCases := []struct { + desc string + sourceAccount string + expected string + }{ + { + desc: "Operation without source account", + sourceAccount: "", + expected: "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + }, + { + desc: "Operation with source account", + sourceAccount: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + expected: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + { + desc: "Operation with source account", + sourceAccount: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + expected: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + }, + { + desc: "Operation with muxed source account", + sourceAccount: "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + expected: "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + op := transaction.Envelope.Operations()[0] + if len(tc.sourceAccount) > 0 { + sourceAccount := xdr.MustMuxedAddress(tc.sourceAccount) + op.SourceAccount = &sourceAccount + } + + operation := transactionOperationWrapper{ + index: 1, + transaction: transaction, + operation: op, + ledgerSequence: 1, + } + + tt.Equal(tc.expected, operation.SourceAccount().Address()) + }) + } +} + +func TestTransactionOperationType(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + MetaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + FeeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + }, + ) + + operation := transactionOperationWrapper{ + index: 1, + transaction: transaction, + operation: transaction.Envelope.Operations()[0], + ledgerSequence: 1, + } + + tt.Equal(xdr.OperationTypePayment, operation.OperationType()) +} + +func getRevokeSponsorshipEnvelopeXDR(t *testing.T) string { + source := xdr.MustMuxedAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + env := &xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: source, + Memo: xdr.Memo{Type: xdr.MemoTypeMemoNone}, + Operations: []xdr.Operation{ + { + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeRevokeSponsorship, + RevokeSponsorshipOp: &xdr.RevokeSponsorshipOp{ + Type: xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner, + Signer: &xdr.RevokeSponsorshipOpSigner{ + AccountId: xdr.MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + SignerKey: xdr.MustSigner("GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS"), + }, + }, + }, + }, + }, + }, + }, + } + b64, err := xdr.MarshalBase64(env) + assert.NoError(t, err) + return b64 +} + +func TestTransactionOperationDetails(t *testing.T) { + testCases := []struct { + desc string + envelopeXDR string + resultXDR string + metaXDR string + feeChangesXDR string + hash string + index uint32 + expected map[string]interface{} + }{ + { + desc: "createAccount", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2", + index: 0, + expected: map[string]interface{}{ + "account": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + "funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "starting_balance": "1000.0000000", + }, + }, + { + desc: "createAccount with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAAA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "account": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", + "funder": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "funder_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "funder_muxed_id": uint64(1234), + "starting_balance": "1000.0000000", + }, + }, + { + desc: "payment", + envelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + index: 0, + expected: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + "from": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + "to": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", + }, + }, + { + desc: "payment with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAEAAAEAAAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAAAAAAABfXhAAAAAAAAAAAA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + "from": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "from_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "from_muxed_id": uint64(1234), + "to": "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", + "to_muxed": "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + "to_muxed_id": uint64(0), + }, + }, + { + desc: "pathPaymentStrictReceive", + envelopeXDR: "AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAAAAAAB6Dk1CgAAAEB+7jxesBKKrF343onyycjp2tiQLZiGH2ETl+9fuOqotveY2rIgvt9ng+QJ2aDP3+PnDsYEa9ZUaA+Zne2nIGgE", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588", + index: 0, + expected: map[string]interface{}{ + "to": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP", + "from": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "path": []map[string]interface{}{}, + "amount": "100.0000000", + "asset_code": "EUR", + "asset_type": "credit_alphanum4", + "source_max": "100.0000000", + "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", + "source_amount": "100.0000000", + "source_asset_type": "native", + }, + }, + { + desc: "pathPaymentStrictReceive with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAEAAAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAAAAAAAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "to": "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", + "to_muxed": "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + "to_muxed_id": uint64(0), + "from": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "from_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "from_muxed_id": uint64(1234), + "path": []map[string]interface{}{}, + "amount": "100.0000000", + "asset_code": "EUR", + "asset_type": "credit_alphanum4", + "source_max": "100.0000000", + "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", + "source_amount": "100.0000000", + "source_asset_type": "native", + }, + }, + { + desc: "manageSellOffer", + envelopeXDR: "AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAARFUV7EAAABALuai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3", + index: 0, + expected: map[string]interface{}{ + "amount": "400.0000000", + "buying_asset_code": "USD", + "buying_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", + "buying_asset_type": "credit_alphanum4", + "offer_id": xdr.Int64(0), + "price": "0.5000000", + "price_r": map[string]interface{}{ + "d": xdr.Int32(2), + "n": xdr.Int32(1), + }, + "selling_asset_type": "native", + }, + }, + { + desc: "createPassiveSellOffer", + envelopeXDR: "AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAQAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBqzCYDuLYn/jXhfEVxEGigMCJGoOBCK92lUb3Um15PgwSJ63tNl+FpH8+y5c+mCs/rzcvdyo9uXdodd4LXWiQg=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "a76e0260f6b83c6ea93f545d17de721c079dc31e81ee5edc41f159ec5fb48443", + index: 0, + expected: map[string]interface{}{ + "price": "1.0000000", + "amount": "200.0000000", + "price_r": map[string]interface{}{ + "d": xdr.Int32(1), + "n": xdr.Int32(1), + }, + "buying_asset_type": "native", + "selling_asset_code": "USD", + "selling_asset_type": "credit_alphanum4", + "selling_asset_issuer": "GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q", + }, + }, + { + desc: "setOption - home domain", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAC2V4YW1wbGUuY29tAAAAAAAAAAAAAAAAATCeMFAAAABAkID6CkBHP9eovLQXkMQJ7QkE6NWlmdKGmLxaiI1YaVKZaKJxz5P85x+6wzpYxxbs6Bd2l4qxVjS7Q36DwRiqBA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "8ccc0c28c3e99a63cc59bad7dec3f5c56eb3942c548ecd40bc39c509d6b081d4", + index: 0, + expected: map[string]interface{}{ + "home_domain": xdr.String32("example.com"), + }, + }, + { + desc: "setOption - signer", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAMAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAAAAAAAAAAAATCeMFAAAABAOb0qGWnk1WrSUXS6iQFocaIOY/BDmgG1zTmlPyg0boSid3jTBK3z9U8+IPGAOELNLgkQHtgGYFgFGMio1xY+BQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAALAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAwAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC9+0AAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA", + hash: "6b38cdd5c17df2013d5a5e211c4b32218b6be91025316b1aab28bc12316615d5", + index: 0, + expected: map[string]interface{}{ + "signer_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE", + "signer_weight": xdr.Uint32(0), + }, + }, + { + desc: "setOption - inflation dest", + envelopeXDR: "AAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAZAAAAC0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMYK3JwAAAEAOkGOPTOBDSQ7nW2Zn+bls2PDUebk2/k3/gqHKQ8eYOFsD6nBeEvyMD858vo5BabjQwB9injABIM8esDh7bEkC", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAtAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7FAAAAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "7207de5b75243e0b062c3833f587036b7e9f64453be49ff50f3f3fdc7516ec6b", + index: 0, + expected: map[string]interface{}{ + "inflation_dest": "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS", + }, + }, + { + desc: "setOption - set flags (all)", + envelopeXDR: "AAAAAOfbN5h8zjMqvileFVS66GUvvu5mbAKtbhD+buOEj6BzAAAAZAAGVyQAAAABAAAAAQAAAAAAAAAAAAAAAF3b7rYAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABhI+gcwAAAEAZ5WOkuymbGA/kmUxoKzpdc5Hupy6xgVDA2uzckBXDaPLieH9AMXi9c8ptXDBVBopJQy+31VA63yiR6+b2mOQH", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAZXJQAAAAAAAAAA59s3mHzOMyq+KV4VVLroZS++7mZsAq1uEP5u44SPoHMAAAAAATEsnAAGVyQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAZXJQAAAAAAAAAA59s3mHzOMyq+KV4VVLroZS++7mZsAq1uEP5u44SPoHMAAAAAATEsnAAGVyQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMABlclAAAAAAAAAADn2zeYfM4zKr4pXhVUuuhlL77uZmwCrW4Q/m7jhI+gcwAAAAABMSycAAZXJAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABlclAAAAAAAAAADn2zeYfM4zKr4pXhVUuuhlL77uZmwCrW4Q/m7jhI+gcwAAAAABMSycAAZXJAAAAAEAAAAAAAAAAAAAAAcAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMABlckAAAAAAAAAADn2zeYfM4zKr4pXhVUuuhlL77uZmwCrW4Q/m7jhI+gcwAAAAABMS0AAAZXJAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABlclAAAAAAAAAADn2zeYfM4zKr4pXhVUuuhlL77uZmwCrW4Q/m7jhI+gcwAAAAABMSycAAZXJAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "2c11336f2b87b149eb4c49a7c8693281450761e140c321929da9987709f6f0d3", + index: 0, + expected: map[string]interface{}{ + "set_flags": []int32{1, 2, 4}, + "set_flags_s": []string{ + "auth_required", + "auth_revocable", + "auth_immutable", + }, + }, + }, + { + desc: "setOption - set flags (auth required)", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEDYxq3zpaFIC2JcuJUbrQ3MFXzqvu+5G7XUi4NnHlfbLutn76ylQcjuwLgbUG2lqcQfl75doPUZyurKtFP1rkMO", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+OcAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "345ef7f85c6ea297e3f994feef279b63812628681bd173a1f615185a4368e482", + index: 0, + expected: map[string]interface{}{ + "set_flags": []int32{1}, + "set_flags_s": []string{ + "auth_required", + }, + }, + }, + { + desc: "setOption - set flags (auth revocable)", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAKuQ1exMu8hdf8dOPeULX2DG7DZx5WWIUFHXJMWGG9KmVrQoZDt2S6a/1uYEVJnvvY/EoJM5RpVjh2ZCs30VYA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "2a14735d7b05109359444acdd87e7fe92c98e9295d2ba61b05e25d1f7ee10fd3", + index: 0, + expected: map[string]interface{}{ + "set_flags": []int32{2}, + "set_flags_s": []string{ + "auth_revocable", + }, + }, + }, + { + desc: "setOption - set flags (auth immutable)", + envelopeXDR: "AAAAAOZPoQTlXBixd6XSUExX/Yvos/pVllkUNdNvCdmC+mNkAAACvAAE5bIAAAAOAAAAAQAAAAAAAAAAAAAAAF3X8mwAAAAAAAAABwAAAAEAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAACYloAAAAABAAAAAOZPoQTlXBixd6XSUExX/Yvos/pVllkUNdNvCdmC+mNkAAAAAAAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAAF9eEAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAYAAAABVFNUAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAB3NZQAAAAAAQAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAEAAAAA+uqHxVDr9/5OT2gdBesK+XNI78HQ2Pd97LG4yuueg5sAAAABVFNUAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAB3NZQAAAAAAQAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAABAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAABQAAAAAAAAAAAAAAAQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAABQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4L6Y2QAAABAd8d0e8OiMmmGlxLrPu8JfTLphUfFPgx0Fs/fwU6/ilzwbpTHCKICWGlSz8enjb57FXD6DliXcaWJeR/2Fj8tB+ueg5sAAABAkAwqpu1liQpxh3C2MdsDoOg/N4pxuUuzh0Ey/0g0QbWy0Y2bBkLPldsGj/pDNbKfkZPGfdx4MZ6rHbUdGEwgDRx9mSAAAABA/IRS0D7EcFS1J6uR4HnOvh8tikBhVe+0uI6DPkqv/GfSqeuoZIRyWxKSd/v64DxxozKZsmQmatLZqOnQwkuxCA==", + resultXDR: "AAAAAAAAArwAAAAAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4yLaGAAE5bIAAAANAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4yLaGAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAwAAAAMABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItoYAATlsgAAAA4AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbiikOYAATlsgAAAA4AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4opDmAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW3JRimAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAWcAAAAAAAAAAAA+uqHxVDr9/5OT2gdBesK+XNI78HQ2Pd97LG4yuueg5sAAAAABfXhAAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAAwAFnAAAAAAAAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAAX14QAABZwAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAFnAAAAAAAAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAAX14QAABZwAAAAAAAAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAFnAAAAAABAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAVRTVAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAAAAAAAAAAB3NZQAAAAAAQAAAAAAAAAAAAAAAgAAAAMABZwAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAFUU1QAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAAAAAAAAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAEABZwAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAFUU1QAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAHc1lAAAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAIAAAADAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAEXRzdC50ZXN0YXNzZXQuY29tAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAADAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAEXRzdC50ZXN0YXNzZXQuY29tAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAABAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAMABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAFnAAAAAAAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAACYloAABZwAAAAAAAAAAAAAAAAAAAAABAAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAAAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMABZv0AAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItzUAATlsgAAAA0AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItoYAATlsgAAAA0AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "12e901c3c12cc9f35adc09e83c0a2d58f939e8c0bebc6f9a945aea42f2587cfe", + index: 5, + expected: map[string]interface{}{ + "set_flags": []int32{4}, + "set_flags_s": []string{ + "auth_immutable", + }, + }, + }, + { + desc: "setOption - master weight", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCeMFAAAABAAd6MzHDjUdRtHozzDnD3jJA+uRDCar3PQtuH/43pnROzk1HkovJPQ1YyzcpOb/NeuU/LKNzseL0PJNasVX1lAQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "4f9598206ab17cf27b5c3eb9e906d63ebee2626654112eabdd2bce7bf12cccf2", + index: 0, + expected: map[string]interface{}{ + "master_key_weight": xdr.Uint32(2), + }, + }, + { + desc: "setOption - thresholds (medium, high)", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAnFzc6kqweyIL4TzIDbr+8GUOGGs1W5jcX5iSNw4DeonzQARlejYJ9NOn/XkrcoC9Hvd8hc5lNx+1h991GxJUJ", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "852ba25e0e4aa149a22dc193bcb645ae9eba23e7f7432707f3b910474e9b6a5b", + index: 0, + expected: map[string]interface{}{ + "low_threshold": xdr.Uint32(0), + "med_threshold": xdr.Uint32(2), + "high_threshold": xdr.Uint32(2), + }, + }, + { + desc: "setOption - thresholds (low, medium, high)", + envelopeXDR: "AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAgAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABKe4g1QAAAEDglRRymtLjw+ImmGwTiBTKE7X7+2CywlHw8qed+t520SbAggcqboy5KXJaEP51/wRSMxtZUgDOFfaDn9Df04EA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAMAAAABAAAAAAAAAAAAAAAAAQICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+M4AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "f78dca926455579b4a43009ffe35a0229a6da4bed32d3c999d7a06ad26605a25", + index: 0, + expected: map[string]interface{}{ + "low_threshold": xdr.Uint32(2), + "med_threshold": xdr.Uint32(2), + "high_threshold": xdr.Uint32(2), + }, + }, + { + desc: "setOption - clears flags", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAALAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAFytUxjxN4bnJMrEJkSprnES9iGpOxAsNOFYrTP/xtGVk/PZ2oThUW+/hLRIk+hYYEgF21Gf58N/abJKFpqlsI", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAsAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAAAAfAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+AYAAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvftAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA", + hash: "bb9d6654111fae501594400dc901c70d47489a67163d2a34f9b3e32a921a50dc", + index: 0, + expected: map[string]interface{}{ + "clear_flags": []int32{1, 2}, + "clear_flags_s": []string{ + "auth_required", + "auth_revocable", + }, + }, + }, + { + desc: "changeTrust", + envelopeXDR: "AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQKN8LftAafeoAGmvpsEokqm47jAuqw4g1UWjmL0j6QPm1jxoalzDwDS3W+N2HOHdjSJlEQaTxGBfQKHhr6nNsAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da", + index: 0, + expected: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "asset_type": "credit_alphanum4", + "limit": "922337203685.4775807", + "trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + }, + }, + { + desc: "changeTrust with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "asset_type": "credit_alphanum4", + "limit": "922337203685.4775807", + "trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "trustor": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "trustor_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "trustor_muxed_id": uint64(1234), + }, + }, + { + desc: "changeTrust with muxed accounts of liquidity pool", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAYAAAADAAAAAAAAAAAAAAABVVNEAAAAAAAokk0ZqR+mxwuhJJ2uXvNqIhmObygxBFIJKvQgf/7fqwAAABR//////////wAAAAAAAAAA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "asset_type": "liquidity_pool_shares", + "limit": "922337203685.4775807", + "trustor": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "trustor_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "trustor_muxed_id": uint64(1234), + "liquidity_pool_id": "ea4e3e63a95fd840c1394f195722ffdcb2d0d4f0a26589c6ab557d81e6b0bf9d", + }, + }, + { + desc: "allowTrust", + envelopeXDR: "AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAUpI8/gAAABA6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50", + index: 0, + expected: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "asset_type": "credit_alphanum4", + "authorize": true, + "trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", + "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + }, + }, + { + desc: "allowTrust with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "asset_type": "credit_alphanum4", + "authorize": true, + "trustee": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "trustee_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "trustee_muxed_id": uint64(1234), + "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", + }, + }, + { + desc: "accountMerge (Destination)", + envelopeXDR: "AAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+HvAAAAZAAAACsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAYrj4e8AAABA3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv", + feeChangesXDR: "AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc", + index: 0, + expected: map[string]interface{}{ + "into": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + "account": "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ", + }, + }, + { + desc: "accountMerge (Destination) with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAgAAAEAAAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAAAAAAA", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv", + feeChangesXDR: "AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "into": "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", + "into_muxed": "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + "into_muxed_id": uint64(0), + "account": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "account_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "account_muxed_id": uint64(1234), + }, + }, + { + desc: "inflation", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAVAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAVb8BfcAAABABUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAAIAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAIrEjCYwXAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAIrEjfceLAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAUAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGraHekccnAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256", + index: 0, + expected: map[string]interface{}{}, + }, + { + desc: "manageData", + envelopeXDR: "AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAABAAAABDU2NzgAAAAAAAAAAS6Z+xkAAABAjxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==", + + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99", + index: 0, + expected: map[string]interface{}{ + "name": "name2", + "value": "NTY3OA==", + }, + }, + { + desc: "bumpSequence", + envelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8A", + + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgDAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA9AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+JwAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21", + index: 0, + expected: map[string]interface{}{ + "bump_to": "300000000003", + }, + }, + { + desc: "manageBuyOffer", + envelopeXDR: "AAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAZAAFedEAAAAQAAAAAQAAAAAAAAAAAAAAAF3cTA8AAAABAAAADk1ha2UgQnV5IE9mZmVyAAAAAAABAAAAAAAAAAwAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAAAAAANZresAAAAAgAAAABAAAAAAAAAAAAAAAAAAAAAe+vEWEAAABAY0cI3kQXv1EcCDDmf3hCKLLEiinkVPB2+rAJe8PnA8WY8r27xGr5LCikUj8n7wEAtzMM83VcPYIMoJROYMjvCA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAMAAAAAAAAAAIAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAAAEPjtgAAAAAAAAAAL3ix2AAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAAAXvFjsAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAAABiIs2AAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAAADERZsAAAACAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAZoPQAAAAAAAAAAn/S6GmMg81p4RwSoHLlJWq5obvuV4eDqI1hXru+vEWEAAAAXYEnCxwAFedEAAAAPAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAB7RNLgAAAAAAAAAAAAAAAEABmg9AAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScLHAAV50QAAABAAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAAABAAAAADAAZoOAAAAAIAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAAAEPjtgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAveLHYAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAACAAAAAgAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAAAAAQ+O2AAAAAwAGaCwAAAAAAAAAAIBSoZJZ0L+1+OECZtMg5Jr47WRR7PjuuXHzvhIVsDnUAAAAF6bkRhMABXnFAAAAEwAAAAYAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACrQT9gAAAAAAAAAAAAAAABAAZoPQAAAAAAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAXoMIZOwAFecUAAAATAAAABgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAKUfEogAAAAAAAAAAAAAAAMABmg4AAAAAQAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAADJbEu7YAAAAXSHboAAAAAAEAAAABAAAAADfP+isAAAAAAAAAAAAAAAAAAAAAAAAAAQAGaD0AAAABAAAAALP1RhRPFD3Po6MZhEuu/6FRY97SiVXQswaiaCLJlh5pAAAAAVhDWgAAAAAAtBnl50kq4z7eYmlaPA2y4mVtLB69lj/czFmLhjK3RIUAAAAMnLPR8QAAABdIdugAAAAAAQAAAAEAAAAAMeDj8AAAAAAAAAAAAAAAAAAAAAAAAAADAAZoLAAAAAIAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAWIzSwAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAABAAZoPQAAAAIAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAQAQfYAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAADAAZoOAAAAAAAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAXpiEUNgAFecIAAAATAAAABgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAIOopqAAAAAAAAAAAAAAAAEABmg9AAAAAAAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAABd2qGJeAAV5wgAAABMAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAVC/0yAAAAAAAAAAAAAAAAwAGaCwAAAABAAAAAIBSoZJZ0L+1+OECZtMg5Jr47WRR7PjuuXHzvhIVsDnUAAAAAVhDWgAAAAAAtBnl50kq4z7eYmlaPA2y4mVtLB69lj/czFmLhjK3RIUAAAADPc/w0QAAABdIdugAAAAAAQAAAAEAAAABRw/DngAAAAAAAAAAAAAAAAAAAAAAAAABAAZoPQAAAAEAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAM+lDZsAAAAF0h26AAAAAABAAAAAQAAAAFGS34DAAAAAAAAAAAAAAAAAAAAAAAAAAMABmg9AAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScLHAAV50QAAABAAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAGaD0AAAAAAAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAF5XkoXcABXnRAAAAEAAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAe0TS4AAAAAAAAAAAAAAADAAYzMAAAAAEAAAAAn/S6GmMg81p4RwSoHLlJWq5obvuV4eDqI1hXru+vEWEAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAABNL7iG9AAAAF0h26AAAAAABAAAAAQAAAADBUPuwAAAAAAAAAAAAAAAAAAAAAAAAAAEABmg9AAAAAQAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAE0U6xecAAAAXSHboAAAAAAEAAAABAAAAAMFQ+7AAAAAAAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMABjMvAAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScMrAAV50QAAAA8AAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAGaD0AAAAAAAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAF2BJwscABXnRAAAADwAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAe0TS4AAAAAAAAAAA=", + hash: "94b050b55dfdfa55dc46889b6f6b5798c24cf77983562826a74db058ff0eb5b4", + index: 0, + expected: map[string]interface{}{ + "price": "8.0000000", + "amount": "89.9342000", + "price_r": map[string]interface{}{ + "d": xdr.Int32(1), + "n": xdr.Int32(8), + }, + "offer_id": xdr.Int64(0), + "buying_asset_type": "native", + "selling_asset_code": "XCZ", + "selling_asset_type": "credit_alphanum4", + "selling_asset_issuer": "GC2BTZPHJEVOGPW6MJUVUPANWLRGK3JMD26ZMP64ZRMYXBRSW5CIKEW5", + }, + }, + { + desc: "pathPaymentStrictSend", + envelopeXDR: "AAAAAOsC3UuQJXeJWl7o2Q9Wf2RvZiHiHKfSbtDNsXkn3NMiAAAAZAAKTgQAAAABAAAAAAAAAAAAAAABAAAAAAAAAA0AAAAAAAAAAAX14QAAAAAA4VWYSXp7+QFjS8+8WzU2KJTONKIIk2FHXORcby4KqbgAAAAAAAAAAAX14QAAAAABAAAAAAAAAAAAAAABJ9zTIgAAAEBPAPVBKa8d5/DyiTghHO8OnFNtxa4WSMW1geqCH+83EL+yyLszkzdIWSBX8/N9FC1Mo+DTRF/peVAsxlL4G04N", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAAAAAAA4VWYSXp7+QFjS8+8WzU2KJTONKIIk2FHXORcby4KqbgAAAAAAAAAAAX14QAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdCgQacAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABZwuAAAAAAAAAADhVZhJenv5AWNLz7xbNTYolM40ogiTYUdc5FxvLgqpuAAAAAAAmJaAAAWcLgAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAKThUAAAAAAAAAAOFVmEl6e/kBY0vPvFs1NiiUzjSiCJNhR1zkXG8uCqm4AAAAAAaOd4AABZwuAAAAAAAAAAAAAAAAAAAABAAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAAAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMACk4EAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIdugAAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "5cbb2e13e2ddd72075b6c7e83eb018d0092c27972bf1f0343b83be9c5f964b62", + index: 0, + expected: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + "destination_min": "10.0000000", + "from": "GDVQFXKLSASXPCK2L3UNSD2WP5SG6ZRB4IOKPUTO2DG3C6JH3TJSEA7R", + "path": []map[string]interface{}{{"asset_type": "native"}}, + "source_amount": "10.0000000", + "source_asset_type": "native", + "to": "GDQVLGCJPJ57SALDJPH3YWZVGYUJJTRUUIEJGYKHLTSFY3ZOBKU3QIO3", + }, + }, + { + desc: "pathPaymentStrictSend with muxed accounts", + envelopeXDR: "AAAAAgAAAQAAAAAAAAAE0iAAdX7q5YP8UN1mn5dnOswl7HJYI6xz+vbH3zGtMeUJAAAAAAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAA0AAAAAAAAAAAX14QAAAAEAAAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAAAAAAABfXhAAAAAAEAAAAAAAAAAAAAAAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAAAAAAA4VWYSXp7+QFjS8+8WzU2KJTONKIIk2FHXORcby4KqbgAAAAAAAAAAAX14QAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdCgQacAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABZwuAAAAAAAAAADhVZhJenv5AWNLz7xbNTYolM40ogiTYUdc5FxvLgqpuAAAAAAAmJaAAAWcLgAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAKThUAAAAAAAAAAOFVmEl6e/kBY0vPvFs1NiiUzjSiCJNhR1zkXG8uCqm4AAAAAAaOd4AABZwuAAAAAAAAAAAAAAAAAAAABAAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAAAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMACk4EAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIdugAAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + index: 0, + expected: map[string]interface{}{ + "amount": "10.0000000", + "asset_type": "native", + "destination_min": "10.0000000", + "from": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "from_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "from_muxed_id": uint64(1234), + "path": []map[string]interface{}{{"asset_type": "native"}}, + "source_amount": "10.0000000", + "source_asset_type": "native", + "to": "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", + "to_muxed": "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + "to_muxed_id": uint64(0), + }, + }, + { + desc: "revokeSponsorship (signer)", + envelopeXDR: getRevokeSponsorshipEnvelopeXDR(t), + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: "AAAAAgAAAAAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + hash: "a41d1c8cdf515203ac5a10d945d5023325076b23dbe7d65ae402cd5f8cd9f891", + index: 0, + expected: map[string]interface{}{ + "signer_account_id": "GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A", + "signer_key": "GCAHY6JSXQFKWKP6R7U5JPXDVNV4DJWOWRFLY3Y6YPBF64QRL4BPFDNS", + }, + }, + { + desc: "revokeSponsorship (ledger key)", + envelopeXDR: "AAAAAgAAAAAokk0ZqR+mxwuhJJ2uXvNqIhmObygxBFIJKvQgf/7fqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAASAAAAAAAAAAQAAAAAyv66vgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: "AAAAAgAAAAAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + hash: "13658aed93a0cb60582491a8eb945eb9b7737a7560324dd2b24f2acfe5090ada", + index: 0, + expected: map[string]interface{}{ + "claimable_balance_id": "00000000cafebabe00000000000000000000000000000000000000000000000000000000", + }, + }, + { + desc: "clawbackClaimableBalance", + envelopeXDR: "AAAAAgAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAUAAAAAMr+ur7erb7vAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=", + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: "AAAAAgAAAAAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + hash: "b803cd72c77fb1cf95dd94048b14e02abf8694ef9124b10e9620b83a8b15804f", + index: 0, + expected: map[string]interface{}{ + "balance_id": "00000000cafebabedeadbeef000000000000000000000000000000000000000000000000", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: tc.envelopeXDR, + ResultXDR: tc.resultXDR, + MetaXDR: tc.metaXDR, + FeeChangesXDR: tc.feeChangesXDR, + Hash: tc.hash, + }, + ) + + operation := transactionOperationWrapper{ + index: tc.index, + transaction: transaction, + operation: transaction.Envelope.Operations()[tc.index], + ledgerSequence: 1, + } + details, err := operation.Details() + tt.NoError(err) + tt.Equal(tc.expected, details) + }) + } + + tx := createTransaction(true, 1) + tx.Index = 1 + + tx.Envelope.Operations()[0].Body = xdr.OperationBody{ + Type: xdr.OperationTypeRevokeSponsorship, + RevokeSponsorshipOp: &xdr.RevokeSponsorshipOp{ + Type: xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry, + LedgerKey: &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.LedgerKeyClaimableBalance{ + BalanceId: xdr.ClaimableBalanceId{ + Type: 0, + V0: &xdr.Hash{0xca, 0xfe, 0xba, 0xbe}, + }, + }, + }, + }, + } +} + +func TestTransactionOperationParticipants(t *testing.T) { + testCases := []struct { + desc string + envelopeXDR string + resultXDR string + metaXDR string + feeChangesXDR string + hash string + index uint32 + expected []xdr.AccountId + }{ + { + desc: "createAccount", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + xdr.MustAddress("GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN"), + }, + }, + { + desc: "payment - same destination as source", + envelopeXDR: "AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y"), + }, + }, + { + desc: "payment - destination and source are different", + envelopeXDR: "AAAAAGk/nUZSIwC34ltdN0iqxq+m+0UAWAilH1lZDMM07nODAAAAZAALscMAAAABAAAAAQAAAAAAAAAAAAAAAF35J+sAAAABAAAACjEyMDgwNDc1NDIAAAAAAAEAAAAAAAAAAQAAAABbpsBvIu34ZyHMCzALP5ZzWU604GJX6h9tyk49T5gDwAAAAAAAAAACVAvkAAAAAAAAAAABNO5zgwAAAEDktE4HWENBP01or+tVTmLlDM5J4rwvt0qUZ0wB6fZKbevr+j8Y2eem0lQPjAqk/jdL/KkpFantFd+NKgK+48YO", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAuxxAAAAAAAAAAAaT+dRlIjALfiW103SKrGr6b7RQBYCKUfWVkMwzTuc4MAAAAXSHbnnAALscMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAuxxAAAAAAAAAAAaT+dRlIjALfiW103SKrGr6b7RQBYCKUfWVkMwzTuc4MAAAAXSHbnnAALscMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAC7GlAAAAAAAAAABbpsBvIu34ZyHMCzALP5ZzWU604GJX6h9tyk49T5gDwAAAAAAX14OcAABp9wAAT4AAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAC7HEAAAAAAAAAABbpsBvIu34ZyHMCzALP5ZzWU604GJX6h9tyk49T5gDwAAAAAJr42ecAABp9wAAT4AAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAC7HEAAAAAAAAAABpP51GUiMAt+JbXTdIqsavpvtFAFgIpR9ZWQzDNO5zgwAAABdIduecAAuxwwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAC7HEAAAAAAAAAABpP51GUiMAt+JbXTdIqsavpvtFAFgIpR9ZWQzDNO5zgwAAABT0awOcAAuxwwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAC7HDAAAAAAAAAABpP51GUiMAt+JbXTdIqsavpvtFAFgIpR9ZWQzDNO5zgwAAABdIdugAAAuxwwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAC7HEAAAAAAAAAABpP51GUiMAt+JbXTdIqsavpvtFAFgIpR9ZWQzDNO5zgwAAABdIduecAAuxwwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "152dafa4699b954272d9896939d2ecd99e39a809713acb99680380e7f0074f89", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GBUT7HKGKIRQBN7CLNOTOSFKY2X2N62FABMARJI7LFMQZQZU5ZZYHXXG"), + xdr.MustAddress("GBN2NQDPELW7QZZBZQFTACZ7SZZVSTVU4BRFP2Q7NXFE4PKPTAB4AY4S"), + }, + }, + { + desc: "pathPaymentStrictReceive", + envelopeXDR: "AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAAAAAAB6Dk1CgAAAEB+7jxesBKKrF343onyycjp2tiQLZiGH2ETl+9fuOqotveY2rIgvt9ng+QJ2aDP3+PnDsYEa9ZUaA+Zne2nIGgE", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + xdr.MustAddress("GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP"), + }, + }, + { + desc: "manageSellOffer", + envelopeXDR: "AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAARFUV7EAAABALuai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"), + }, + }, + { + desc: "createPassiveSellOffer", + envelopeXDR: "AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAQAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBqzCYDuLYn/jXhfEVxEGigMCJGoOBCK92lUb3Um15PgwSJ63tNl+FpH8+y5c+mCs/rzcvdyo9uXdodd4LXWiQg=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "a76e0260f6b83c6ea93f545d17de721c079dc31e81ee5edc41f159ec5fb48443", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q"), + }, + }, + { + desc: "setOption", + envelopeXDR: "AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAC2V4YW1wbGUuY29tAAAAAAAAAAAAAAAAATCeMFAAAABAkID6CkBHP9eovLQXkMQJ7QkE6NWlmdKGmLxaiI1YaVKZaKJxz5P85x+6wzpYxxbs6Bd2l4qxVjS7Q36DwRiqBA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "8ccc0c28c3e99a63cc59bad7dec3f5c56eb3942c548ecd40bc39c509d6b081d4", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"), + }, + }, + { + desc: "changeTrust", + envelopeXDR: "AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQKN8LftAafeoAGmvpsEokqm47jAuqw4g1UWjmL0j6QPm1jxoalzDwDS3W+N2HOHdjSJlEQaTxGBfQKHhr6nNsAA=", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG"), + }, + }, + { + desc: "allowTrust", + envelopeXDR: "AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAUpI8/gAAABA6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"), + xdr.MustAddress("GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG"), + }, + }, + { + desc: "accountMerge (Destination)", + envelopeXDR: "AAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+HvAAAAZAAAACsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAYrj4e8AAABA3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAIAAAAAAAAAAJUC+OcAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv", + feeChangesXDR: "AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ"), + xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + }, + }, + { + desc: "inflation", + envelopeXDR: "AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAVAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAVb8BfcAAABABUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAAIAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAIrEjCYwXAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAIrEjfceLAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAUAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGraHekccnAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + }, + }, + { + desc: "manageData", + envelopeXDR: "AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAABAAAABDU2NzgAAAAAAAAAAS6Z+xkAAABAjxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==", + + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD"), + }, + }, + { + desc: "bumpSequence", + envelopeXDR: "AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8A", + + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgDAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==", + feeChangesXDR: "AAAAAgAAAAMAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA9AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+JwAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN"), + }, + }, + { + desc: "manageBuyOffer", + envelopeXDR: "AAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAZAAFedEAAAAQAAAAAQAAAAAAAAAAAAAAAF3cTA8AAAABAAAADk1ha2UgQnV5IE9mZmVyAAAAAAABAAAAAAAAAAwAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAAAAAANZresAAAAAgAAAABAAAAAAAAAAAAAAAAAAAAAe+vEWEAAABAY0cI3kQXv1EcCDDmf3hCKLLEiinkVPB2+rAJe8PnA8WY8r27xGr5LCikUj8n7wEAtzMM83VcPYIMoJROYMjvCA==", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAMAAAAAAAAAAIAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAAAEPjtgAAAAAAAAAAL3ix2AAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAAAXvFjsAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAAABiIs2AAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAAADERZsAAAACAAAAAA==", + metaXDR: "AAAAAQAAAAIAAAADAAZoPQAAAAAAAAAAn/S6GmMg81p4RwSoHLlJWq5obvuV4eDqI1hXru+vEWEAAAAXYEnCxwAFedEAAAAPAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAB7RNLgAAAAAAAAAAAAAAAEABmg9AAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScLHAAV50QAAABAAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAAABAAAAADAAZoOAAAAAIAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAAAEPjtgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAveLHYAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAACAAAAAgAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAAAAAQ+O2AAAAAwAGaCwAAAAAAAAAAIBSoZJZ0L+1+OECZtMg5Jr47WRR7PjuuXHzvhIVsDnUAAAAF6bkRhMABXnFAAAAEwAAAAYAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACrQT9gAAAAAAAAAAAAAAABAAZoPQAAAAAAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAXoMIZOwAFecUAAAATAAAABgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAKUfEogAAAAAAAAAAAAAAAMABmg4AAAAAQAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAADJbEu7YAAAAXSHboAAAAAAEAAAABAAAAADfP+isAAAAAAAAAAAAAAAAAAAAAAAAAAQAGaD0AAAABAAAAALP1RhRPFD3Po6MZhEuu/6FRY97SiVXQswaiaCLJlh5pAAAAAVhDWgAAAAAAtBnl50kq4z7eYmlaPA2y4mVtLB69lj/czFmLhjK3RIUAAAAMnLPR8QAAABdIdugAAAAAAQAAAAEAAAAAMeDj8AAAAAAAAAAAAAAAAAAAAAAAAAADAAZoLAAAAAIAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAWIzSwAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAABAAZoPQAAAAIAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAAAAEPkBgAAAAAAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAAQAQfYAAAAAQAAAAgAAAAAAAAAAAAAAAAAAAADAAZoOAAAAAAAAAAAs/VGFE8UPc+joxmES67/oVFj3tKJVdCzBqJoIsmWHmkAAAAXpiEUNgAFecIAAAATAAAABgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAIOopqAAAAAAAAAAAAAAAAEABmg9AAAAAAAAAACz9UYUTxQ9z6OjGYRLrv+hUWPe0olV0LMGomgiyZYeaQAAABd2qGJeAAV5wgAAABMAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAVC/0yAAAAAAAAAAAAAAAAwAGaCwAAAABAAAAAIBSoZJZ0L+1+OECZtMg5Jr47WRR7PjuuXHzvhIVsDnUAAAAAVhDWgAAAAAAtBnl50kq4z7eYmlaPA2y4mVtLB69lj/czFmLhjK3RIUAAAADPc/w0QAAABdIdugAAAAAAQAAAAEAAAABRw/DngAAAAAAAAAAAAAAAAAAAAAAAAABAAZoPQAAAAEAAAAAgFKhklnQv7X44QJm0yDkmvjtZFHs+O65cfO+EhWwOdQAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAAAM+lDZsAAAAF0h26AAAAAABAAAAAQAAAAFGS34DAAAAAAAAAAAAAAAAAAAAAAAAAAMABmg9AAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScLHAAV50QAAABAAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAGaD0AAAAAAAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAF5XkoXcABXnRAAAAEAAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAe0TS4AAAAAAAAAAAAAAADAAYzMAAAAAEAAAAAn/S6GmMg81p4RwSoHLlJWq5obvuV4eDqI1hXru+vEWEAAAABWENaAAAAAAC0GeXnSSrjPt5iaVo8DbLiZW0sHr2WP9zMWYuGMrdEhQAAABNL7iG9AAAAF0h26AAAAAABAAAAAQAAAADBUPuwAAAAAAAAAAAAAAAAAAAAAAAAAAEABmg9AAAAAQAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAAAFYQ1oAAAAAALQZ5edJKuM+3mJpWjwNsuJlbSwevZY/3MxZi4Yyt0SFAAAAE0U6xecAAAAXSHboAAAAAAEAAAABAAAAAMFQ+7AAAAAAAAAAAAAAAAAAAAAA", + feeChangesXDR: "AAAAAgAAAAMABjMvAAAAAAAAAACf9LoaYyDzWnhHBKgcuUlarmhu+5Xh4OojWFeu768RYQAAABdgScMrAAV50QAAAA8AAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAHtE0uAAAAAAAAAAAAAAAAQAGaD0AAAAAAAAAAJ/0uhpjIPNaeEcEqBy5SVquaG77leHg6iNYV67vrxFhAAAAF2BJwscABXnRAAAADwAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAe0TS4AAAAAAAAAAA=", + hash: "94b050b55dfdfa55dc46889b6f6b5798c24cf77983562826a74db058ff0eb5b4", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GCP7JOQ2MMQPGWTYI4CKQHFZJFNK42DO7OK6DYHKENMFPLXPV4IWDPLC"), + }, + }, + { + desc: "pathPaymentStrictSend", + envelopeXDR: "AAAAAOsC3UuQJXeJWl7o2Q9Wf2RvZiHiHKfSbtDNsXkn3NMiAAAAZAAKTgQAAAABAAAAAAAAAAAAAAABAAAAAAAAAA0AAAAAAAAAAAX14QAAAAAA4VWYSXp7+QFjS8+8WzU2KJTONKIIk2FHXORcby4KqbgAAAAAAAAAAAX14QAAAAABAAAAAAAAAAAAAAABJ9zTIgAAAEBPAPVBKa8d5/DyiTghHO8OnFNtxa4WSMW1geqCH+83EL+yyLszkzdIWSBX8/N9FC1Mo+DTRF/peVAsxlL4G04N", + resultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAAAAAAA4VWYSXp7+QFjS8+8WzU2KJTONKIIk2FHXORcby4KqbgAAAAAAAAAAAX14QAAAAAA", + metaXDR: "AAAAAQAAAAIAAAADAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAApOFQAAAAAAAAAA6wLdS5Ald4laXujZD1Z/ZG9mIeIcp9Ju0M2xeSfc0yIAAAAXSHbnnAAKTgQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdCgQacAApOBAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMABZwuAAAAAAAAAADhVZhJenv5AWNLz7xbNTYolM40ogiTYUdc5FxvLgqpuAAAAAAAmJaAAAWcLgAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAKThUAAAAAAAAAAOFVmEl6e/kBY0vPvFs1NiiUzjSiCJNhR1zkXG8uCqm4AAAAAAaOd4AABZwuAAAAAAAAAAAAAAAAAAAABAAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAAAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMACk4EAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIdugAAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEACk4VAAAAAAAAAADrAt1LkCV3iVpe6NkPVn9kb2Yh4hyn0m7QzbF5J9zTIgAAABdIduecAApOBAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "5cbb2e13e2ddd72075b6c7e83eb018d0092c27972bf1f0343b83be9c5f964b62", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GDVQFXKLSASXPCK2L3UNSD2WP5SG6ZRB4IOKPUTO2DG3C6JH3TJSEA7R"), + xdr.MustAddress("GDQVLGCJPJ57SALDJPH3YWZVGYUJJTRUUIEJGYKHLTSFY3ZOBKU3QIO3"), + }, + }, + { + desc: "operation with source_account different to transaction source_account", + envelopeXDR: "AAAAAOZPoQTlXBixd6XSUExX/Yvos/pVllkUNdNvCdmC+mNkAAACvAAE5bIAAAAOAAAAAQAAAAAAAAAAAAAAAF3X8mwAAAAAAAAABwAAAAEAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAACYloAAAAABAAAAAOZPoQTlXBixd6XSUExX/Yvos/pVllkUNdNvCdmC+mNkAAAAAAAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAAF9eEAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAYAAAABVFNUAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAB3NZQAAAAAAQAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAEAAAAA+uqHxVDr9/5OT2gdBesK+XNI78HQ2Pd97LG4yuueg5sAAAABVFNUAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAB3NZQAAAAAAQAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAABAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAABQAAAAAAAAAAAAAAAQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAABQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4L6Y2QAAABAd8d0e8OiMmmGlxLrPu8JfTLphUfFPgx0Fs/fwU6/ilzwbpTHCKICWGlSz8enjb57FXD6DliXcaWJeR/2Fj8tB+ueg5sAAABAkAwqpu1liQpxh3C2MdsDoOg/N4pxuUuzh0Ey/0g0QbWy0Y2bBkLPldsGj/pDNbKfkZPGfdx4MZ6rHbUdGEwgDRx9mSAAAABA/IRS0D7EcFS1J6uR4HnOvh8tikBhVe+0uI6DPkqv/GfSqeuoZIRyWxKSd/v64DxxozKZsmQmatLZqOnQwkuxCA==", + resultXDR: "AAAAAAAAArwAAAAAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAFAAAAAAAAAAA=", + metaXDR: "AAAAAQAAAAIAAAADAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4yLaGAAE5bIAAAANAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4yLaGAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAwAAAAMABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItoYAATlsgAAAA4AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbiikOYAATlsgAAAA4AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW4opDmAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA5k+hBOVcGLF3pdJQTFf9i+iz+lWWWRQ1028J2YL6Y2QAAAAW3JRimAAE5bIAAAAOAAAAAAAAAAAAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAWcAAAAAAAAAAAA+uqHxVDr9/5OT2gdBesK+XNI78HQ2Pd97LG4yuueg5sAAAAABfXhAAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAAwAFnAAAAAAAAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAAX14QAABZwAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAFnAAAAAAAAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAAX14QAABZwAAAAAAAAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAFnAAAAAABAAAAAPrqh8VQ6/f+Tk9oHQXrCvlzSO/B0Nj3feyxuMrrnoObAAAAAVRTVAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAAAAAAAAAAB3NZQAAAAAAQAAAAAAAAAAAAAAAgAAAAMABZwAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAFUU1QAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAAAAAAAAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAEABZwAAAAAAQAAAAD66ofFUOv3/k5PaB0F6wr5c0jvwdDY933ssbjK656DmwAAAAFUU1QAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAHc1lAAAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAIAAAADAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAEXRzdC50ZXN0YXNzZXQuY29tAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAADAAWcAAAAAAAAAAAA8SasYjn6L0hs/J/eX7qRWEwvWVRkkmbJC90FFxx9mSAAAAAAAJiWgAAFnAAAAAAAAAAAAAAAAAAAAAAAAAAAEXRzdC50ZXN0YXNzZXQuY29tAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAABAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAMABZwAAAAAAAAAAADxJqxiOfovSGz8n95fupFYTC9ZVGSSZskL3QUXHH2ZIAAAAAAAmJaAAAWcAAAAAAAAAAAAAAAAAAAAAAQAAAARdHN0LnRlc3Rhc3NldC5jb20AAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAFnAAAAAAAAAAAAPEmrGI5+i9IbPyf3l+6kVhML1lUZJJmyQvdBRccfZkgAAAAAACYloAABZwAAAAAAAAAAAAAAAAAAAAABAAAABF0c3QudGVzdGFzc2V0LmNvbQAAAAAAAAAAAAAAAAAAAAAAAAA=", + feeChangesXDR: "AAAAAgAAAAMABZv0AAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItzUAATlsgAAAA0AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEABZwAAAAAAAAAAADmT6EE5VwYsXel0lBMV/2L6LP6VZZZFDXTbwnZgvpjZAAAABbjItoYAATlsgAAAA0AAAAAAAAAAAAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + hash: "12e901c3c12cc9f35adc09e83c0a2d58f939e8c0bebc6f9a945aea42f2587cfe", + index: 2, + expected: []xdr.AccountId{ + xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY"), + }, + }, + { + desc: "revokeSponsorship (signer)", + envelopeXDR: "AAAAAgAAAAAokk0ZqR+mxwuhJJ2uXvNqIhmObygxBFIJKvQgf/7fqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAASAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAACCEcFim0Esp2yagOwR1omkcZQJqj9X5o5/1XafEdnfoAAAAAAAAAAA", + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: "AAAAAgAAAAAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + hash: "a41d1c8cdf515203ac5a10d945d5023325076b23dbe7d65ae402cd5f8cd9f891", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"), + }, + }, + { + desc: "revokeSponsorship (ledger key)", + envelopeXDR: "AAAAAgAAAAAokk0ZqR+mxwuhJJ2uXvNqIhmObygxBFIJKvQgf/7fqwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAASAAAAAAAAAAQAAAAAyv66vgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + resultXDR: "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + metaXDR: "AAAAAgAAAAAAAAABAAAAAAAAAAA=", + feeChangesXDR: "AAAAAA==", + hash: "13658aed93a0cb60582491a8eb945eb9b7737a7560324dd2b24f2acfe5090ada", + index: 0, + expected: []xdr.AccountId{ + xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tt := assert.New(t) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: tc.envelopeXDR, + ResultXDR: tc.resultXDR, + MetaXDR: tc.metaXDR, + FeeChangesXDR: tc.feeChangesXDR, + Hash: tc.hash, + }, + ) + + operation := transactionOperationWrapper{ + index: tc.index, + transaction: transaction, + operation: transaction.Envelope.Operations()[tc.index], + ledgerSequence: 1, + } + + participants, err := operation.Participants() + tt.NoError(err) + tt.ElementsMatch(tc.expected, participants) + }) + } +} + +func TestOperationParticipants(t *testing.T) { + tt := assert.New(t) + + sequence := uint32(56) + transaction := BuildLedgerTransaction( + t, + TestTransaction{ + Index: 1, + EnvelopeXDR: "AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAAAAAAB6Dk1CgAAAEB+7jxesBKKrF343onyycjp2tiQLZiGH2ETl+9fuOqotveY2rIgvt9ng+QJ2aDP3+PnDsYEa9ZUaA+Zne2nIGgE", + ResultXDR: "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAA==", + MetaXDR: "AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA", + FeeChangesXDR: "AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==", + Hash: "96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588", + }, + ) + + expectedParticipants := []xdr.AccountId{ + xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + xdr.MustAddress("GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP"), + } + participantsMap, err := operationsParticipants(transaction, sequence) + tt.NoError(err) + tt.Len(participantsMap, 1) + for k, v := range participantsMap { + tt.Equal(int64(240518172673), k) + tt.ElementsMatch(expectedParticipants, v) + } +} + +func TestTransactionOperationAllowTrustDetails(t *testing.T) { + tt := assert.New(t) + asset := xdr.Asset{} + allowTrustAsset, err := asset.ToAssetCode("COP") + tt.NoError(err) + + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + + testCases := []struct { + desc string + op xdr.Operation + expected map[string]interface{} + }{ + { + desc: "authorize", + op: xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &xdr.AllowTrustOp{ + Trustor: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Asset: allowTrustAsset, + Authorize: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + }, + expected: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": true, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + }, + }, + { + desc: "authorize maintain liabilities", + op: xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &xdr.AllowTrustOp{ + Trustor: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Asset: allowTrustAsset, + Authorize: xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag), + }, + }, + }, + expected: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": false, + "authorize_to_maintain_liabilities": true, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + }, + }, + { + desc: "deauthorize", + op: xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &xdr.AllowTrustOp{ + Trustor: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Asset: allowTrustAsset, + Authorize: xdr.Uint32(0), + }, + }, + }, + expected: map[string]interface{}{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": false, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }, + }, + operation: tc.op, + ledgerSequence: 1, + } + details, err := operation.Details() + tt.NoError(err) + tt.Equal(tc.expected, details) + }) + } +} + +type CreateClaimableBalanceOpTestSuite struct { + suite.Suite + ops []xdr.Operation +} + +func (s *CreateClaimableBalanceOpTestSuite) SetupTest() { + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + s.ops = []xdr.Operation{ + { + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceOp: &xdr.CreateClaimableBalanceOp{ + Amount: xdr.Int64(100000000), + Asset: xdr.MustNewNativeAsset(), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY"), + + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + }, + }, + }, + { + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeCreateClaimableBalance, + CreateClaimableBalanceOp: &xdr.CreateClaimableBalanceOp{ + Amount: xdr.Int64(200000000), + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + }, + }, + }, + } +} +func (s *CreateClaimableBalanceOpTestSuite) TestDetails() { + testCases := []struct { + desc string + op xdr.Operation + expected map[string]interface{} + }{ + { + desc: "claimable balance with native asset", + op: s.ops[0], + expected: map[string]interface{}{ + "asset": "native", + "amount": "10.0000000", + "claimants": history.Claimants{ + history.Claimant{ + Destination: "GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY", + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + }, + { + desc: "claimable balance with issued asset", + op: s.ops[1], + expected: map[string]interface{}{ + "asset": "USD:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "20.0000000", + "claimants": history.Claimants{ + history.Claimant{ + Destination: "GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2", + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + history.Claimant{ + Destination: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + s.T().Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }, + }, + operation: tc.op, + ledgerSequence: 1, + } + + details, err := operation.Details() + s.Assert().NoError(err) + s.Assert().Equal(tc.expected, details) + }) + } +} + +func (s *CreateClaimableBalanceOpTestSuite) TestParticipants() { + testCases := []struct { + desc string + op xdr.Operation + expected []xdr.AccountId + }{ + { + desc: "claimable balance with a single claimant", + op: s.ops[0], + expected: []xdr.AccountId{ + xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + xdr.MustAddress("GD5OVB6FKDV7P7SOJ5UB2BPLBL4XGSHPYHINR5355SY3RSXLT2BZWAKY"), + }, + }, + { + desc: "claimable balance with a multiple claimants", + op: s.ops[1], + expected: []xdr.AccountId{ + xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + xdr.MustAddress("GDMQUXK7ZUCWM5472ZU3YLDP4BMJLQQ76DEMNYDEY2ODEEGGRKLEWGW2"), + xdr.MustAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"), + }, + }, + } + for _, tc := range testCases { + s.T().Run(tc.desc, func(t *testing.T) { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }}, + operation: tc.op, + ledgerSequence: 1, + } + + participants, err := operation.Participants() + s.Assert().NoError(err) + s.Assert().ElementsMatch(tc.expected, participants) + }) + } +} +func TestCreateClaimableBalanceOpTestSuite(t *testing.T) { + suite.Run(t, new(CreateClaimableBalanceOpTestSuite)) +} + +type ClaimClaimableBalanceOpTestSuite struct { + suite.Suite + op xdr.Operation + balanceID string +} + +func (s *ClaimClaimableBalanceOpTestSuite) SetupTest() { + s.balanceID = "00000000da0d57da7d4850e7fc10d2a9d0ebc731f7afb40574c03395b17d49149b91f5be" + source := xdr.MustMuxedAddress("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26") + var balanceID xdr.ClaimableBalanceId + xdr.SafeUnmarshalHex(s.balanceID, &balanceID) + s.op = xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClaimClaimableBalance, + ClaimClaimableBalanceOp: &xdr.ClaimClaimableBalanceOp{ + BalanceId: balanceID, + }, + }, + } +} +func (s *ClaimClaimableBalanceOpTestSuite) TestDetails() { + expected := map[string]interface{}{ + "claimant": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "claimant_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "claimant_muxed_id": uint64(1234), + "balance_id": s.balanceID, + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }}, + operation: s.op, + ledgerSequence: 1, + } + + details, err := operation.Details() + s.Assert().NoError(err) + s.Assert().Equal(expected, details) +} + +func (s *ClaimClaimableBalanceOpTestSuite) TestParticipants() { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }, + }, + operation: s.op, + ledgerSequence: 1, + } + + participants, err := operation.Participants() + s.Assert().NoError(err) + s.Assert().ElementsMatch([]xdr.AccountId{ + xdr.MustAddress("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY"), + }, participants) +} + +func TestClaimClaimableBalanceOpTestSuite(t *testing.T) { + suite.Run(t, new(ClaimClaimableBalanceOpTestSuite)) +} + +var ( + sponsor = xdr.MustMuxedAddress("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26") + sponsorAID = sponsor.ToAccountId() + sponsoree = xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2") +) + +func getSponsoredSandwichWrappers() []*transactionOperationWrapper { + const ledgerSeq = uint32(12345) + tx := createTransaction(true, 3) + tx.Index = 1 + tx.UnsafeMeta = xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 3, 3), + }, + } + tx.Result = xdr.TransactionResultPair{ + TransactionHash: xdr.Hash{}, + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + }, + }, + } + + // begin sponsorship + tx.Envelope.Operations()[0].Body = xdr.OperationBody{ + Type: xdr.OperationTypeBeginSponsoringFutureReserves, + BeginSponsoringFutureReservesOp: &xdr.BeginSponsoringFutureReservesOp{ + SponsoredId: sponsoree, + }, + } + + sponsorMuxed := sponsor + // Do not provide the source explicitly so that the transaction source is used + // It tests https://github.com/stellar/go/issues/2982 . + // tx.Envelope.Operations()[0].SourceAccount = &sponsorMuxed + tx.Envelope.Operations()[0].SourceAccount = nil + tx.Envelope.V1.Tx.SourceAccount = sponsorMuxed + + // sponsored operation + tx.Envelope.Operations()[1].Body = xdr.OperationBody{ + Type: xdr.OperationTypeCreateAccount, + CreateAccountOp: &xdr.CreateAccountOp{ + Destination: xdr.MustAddress("GC6VKA3RC3CVU7POEKFORVMHWJNQIRZS6AEH3KIIHCVO3YRGWUV7MSUC"), + }, + } + sponsoreeMuxed := sponsoree.ToMuxedAccount() + tx.Envelope.Operations()[1].SourceAccount = &sponsoreeMuxed + tx.UnsafeMeta.V2.Operations[1] = xdr.OperationMeta{Changes: []xdr.LedgerEntryChange{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryCreated, + Created: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(ledgerSeq), + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: &sponsorAID, + }, + }, + }, + }, + }} + + // end sponsorship + tx.Envelope.Operations()[2].Body = xdr.OperationBody{ + Type: xdr.OperationTypeEndSponsoringFutureReserves, + } + tx.Envelope.Operations()[2].SourceAccount = &sponsoreeMuxed + + // wrappers + result := make([]*transactionOperationWrapper, len(tx.Envelope.Operations()), len(tx.Envelope.Operations())) + for i, op := range tx.Envelope.Operations() { + result[i] = &transactionOperationWrapper{ + index: uint32(i), + transaction: tx, + operation: op, + ledgerSequence: ledgerSeq, + } + } + return result +} + +func TestSponsoredSandwichTransaction_Details(t *testing.T) { + wrappers := getSponsoredSandwichWrappers() + + details, err := wrappers[0].Details() + assert.NoError(t, err) + assert.Equal(t, map[string]interface{}{ + "sponsored_id": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + }, details) + + details, err = wrappers[1].Details() + assert.NoError(t, err) + assert.Equal(t, map[string]interface{}{ + "account": "GC6VKA3RC3CVU7POEKFORVMHWJNQIRZS6AEH3KIIHCVO3YRGWUV7MSUC", + "funder": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", + "sponsor": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "starting_balance": "0.0000000", + }, details) + + details, err = wrappers[2].Details() + assert.NoError(t, err) + assert.Equal(t, map[string]interface{}{ + "begin_sponsor": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "begin_sponsor_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "begin_sponsor_muxed_id": uint64(1234), + }, details) +} + +func TestSponsoredSandwichTransaction_Participants(t *testing.T) { + wrappers := getSponsoredSandwichWrappers() + + participants, err := wrappers[0].Participants() + assert.NoError(t, err) + assert.ElementsMatch(t, + []xdr.AccountId{ + xdr.MustAddress("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY"), + xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"), + }, + participants, + ) + + participants, err = wrappers[1].Participants() + assert.NoError(t, err) + assert.ElementsMatch(t, + []xdr.AccountId{ + xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"), + xdr.MustAddress("GC6VKA3RC3CVU7POEKFORVMHWJNQIRZS6AEH3KIIHCVO3YRGWUV7MSUC"), + xdr.MustAddress("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY"), + }, + participants, + ) + + participants, err = wrappers[2].Participants() + assert.NoError(t, err) + assert.ElementsMatch(t, + []xdr.AccountId{ + xdr.MustAddress("GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"), + xdr.MustAddress("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY"), + }, + participants, + ) +} + +type ClawbackTestSuite struct { + suite.Suite + op xdr.Operation + balanceID string +} + +func (s *ClawbackTestSuite) SetupTest() { + source := xdr.MustMuxedAddress("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26") + s.op = xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeClawback, + ClawbackOp: &xdr.ClawbackOp{ + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + From: xdr.MustMuxedAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ"), + Amount: 20, + }, + }, + } +} +func (s *ClawbackTestSuite) TestDetails() { + expected := map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "amount": "0.0000020", + "from": "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", + "from_muxed": "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + "from_muxed_id": uint64(0), + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }}, + operation: s.op, + ledgerSequence: 1, + } + + details, err := operation.Details() + s.Assert().NoError(err) + s.Assert().Equal(expected, details) +} + +func (s *ClawbackTestSuite) TestParticipants() { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }, + }, + operation: s.op, + ledgerSequence: 1, + } + + participants, err := operation.Participants() + s.Assert().NoError(err) + s.Assert().ElementsMatch([]xdr.AccountId{ + xdr.MustAddress("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY"), + xdr.MustAddress("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ"), + }, participants) +} + +func TestClawbackTestSuite(t *testing.T) { + suite.Run(t, new(ClawbackTestSuite)) +} + +type SetTrustLineFlagsTestSuite struct { + suite.Suite + op xdr.Operation + balanceID string +} + +func (s *SetTrustLineFlagsTestSuite) SetupTest() { + aid := xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + source := aid.ToMuxedAccount() + trustor := xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY") + setFlags := xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + clearFlags := xdr.Uint32(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag | xdr.TrustLineFlagsAuthorizedFlag) + s.op = xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeSetTrustLineFlags, + SetTrustLineFlagsOp: &xdr.SetTrustLineFlagsOp{ + Trustor: trustor, + Asset: xdr.MustNewCreditAsset("USD", "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + ClearFlags: clearFlags, + SetFlags: setFlags, + }, + }, + } +} +func (s *SetTrustLineFlagsTestSuite) TestDetails() { + expected := map[string]interface{}{ + "asset_code": "USD", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "clear_flags": []int32{1, 4}, + "clear_flags_s": []string{"authorized", "clawback_enabled"}, + "set_flags": []int32{2}, + "set_flags_s": []string{"authorized_to_maintain_liabilites"}, + "trustor": "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + } + + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }}, + operation: s.op, + ledgerSequence: 1, + } + + details, err := operation.Details() + s.Assert().NoError(err) + s.Assert().Equal(expected, details) +} + +func (s *SetTrustLineFlagsTestSuite) TestParticipants() { + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: make([]xdr.OperationMeta, 1, 1), + }, + }, + }, + operation: s.op, + ledgerSequence: 1, + } + + participants, err := operation.Participants() + s.Assert().NoError(err) + s.Assert().ElementsMatch([]xdr.AccountId{ + xdr.MustAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"), + xdr.MustAddress("GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + }, participants) +} + +func TestSetTrustLineFlagsTestSuite(t *testing.T) { + suite.Run(t, new(SetTrustLineFlagsTestSuite)) +} + +func TestLiquidityPoolDepositDetails(t *testing.T) { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + opBody := xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + LiquidityPoolDepositOp: &xdr.LiquidityPoolDepositOp{ + LiquidityPoolId: poolID, + MaxAmountA: 100, + MaxAmountB: 200, + MinPrice: xdr.Price{ + N: 50, + D: 3, + }, + MaxPrice: xdr.Price{ + N: 100, + D: 2, + }, + }, + } + entryChanges := xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 20, + }, + ReserveA: 100, + ReserveB: 200, + TotalPoolShares: 1000, + PoolSharesTrustLineCount: 10, + }, + }, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 20, + }, + ReserveA: 160, + ReserveB: 250, + TotalPoolShares: 1010, + PoolSharesTrustLineCount: 10, + }, + }, + }, + }, + }, + }, + } + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{entryChanges}}, + }, + }}, + operation: xdr.Operation{ + SourceAccount: &source, + Body: opBody, + }, + ledgerSequence: 1, + } + + expected := map[string]interface{}{ + "liquidity_pool_id": "cafebabe00000000000000000000000000000000000000000000000000000000", + "min_price": "16.6666667", + "min_price_r": map[string]interface{}{ + "d": xdr.Int32(3), + "n": xdr.Int32(50), + }, + "max_price": "50.0000000", + "max_price_r": map[string]interface{}{ + "d": xdr.Int32(2), + "n": xdr.Int32(100), + }, + "reserves_deposited": []base.AssetAmount{ + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000060", + }, + { + "native", + "0.0000050", + }, + }, + "reserves_max": []base.AssetAmount{ + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000100", + }, + { + "native", + "0.0000200", + }, + }, + "shares_received": "0.0000010", + } + + details, err := operation.Details() + assert.NoError(t, err) + assert.Equal(t, expected, details) +} + +func TestFailedLiquidityPoolDepositDetails(t *testing.T) { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + opBody := xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolDeposit, + LiquidityPoolDepositOp: &xdr.LiquidityPoolDepositOp{ + LiquidityPoolId: poolID, + MaxAmountA: 100, + MaxAmountB: 200, + MinPrice: xdr.Price{ + N: 50, + D: 3, + }, + MaxPrice: xdr.Price{ + N: 100, + D: 2, + }, + }, + } + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{}}, + }, + }, + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFailed, + }, + }, + }, + }, + operation: xdr.Operation{ + SourceAccount: &source, + Body: opBody, + }, + ledgerSequence: 1, + } + + expected := map[string]interface{}{ + "liquidity_pool_id": "cafebabe00000000000000000000000000000000000000000000000000000000", + "min_price": "16.6666667", + "min_price_r": map[string]interface{}{ + "d": xdr.Int32(3), + "n": xdr.Int32(50), + }, + "max_price": "50.0000000", + "max_price_r": map[string]interface{}{ + "d": xdr.Int32(2), + "n": xdr.Int32(100), + }, + "reserves_deposited": []base.AssetAmount{ + { + "", + "0.0000000", + }, + { + "", + "0.0000000", + }, + }, + "reserves_max": []base.AssetAmount{ + { + "", + "0.0000100", + }, + { + "", + "0.0000200", + }, + }, + "shares_received": "0.0000000", + } + + details, err := operation.Details() + assert.NoError(t, err) + assert.Equal(t, expected, details) +} + +func TestLiquidityPoolWithdrawDetails(t *testing.T) { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + opBody := xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolWithdraw, + LiquidityPoolWithdrawOp: &xdr.LiquidityPoolWithdrawOp{ + LiquidityPoolId: poolID, + Amount: 10, + MinAmountA: 5, + MinAmountB: 10, + }, + } + entryChanges := xdr.LedgerEntryChanges{ + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 20, + }, + ReserveA: 160, + ReserveB: 250, + TotalPoolShares: 1010, + PoolSharesTrustLineCount: 10, + }, + }, + }, + }, + }, + }, + { + Type: xdr.LedgerEntryChangeTypeLedgerEntryUpdated, + Updated: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 20, + }, + ReserveA: 100, + ReserveB: 200, + TotalPoolShares: 1000, + PoolSharesTrustLineCount: 10, + }, + }, + }, + }, + }, + }, + } + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{entryChanges}}, + }, + }}, + operation: xdr.Operation{ + SourceAccount: &source, + Body: opBody, + }, + ledgerSequence: 1, + } + + expected := map[string]interface{}{ + "liquidity_pool_id": "cafebabe00000000000000000000000000000000000000000000000000000000", + "reserves_received": []base.AssetAmount{ + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000060", + }, + { + "native", + "0.0000050", + }, + }, + "reserves_min": []base.AssetAmount{ + { + "USD:GAUJETIZVEP2NRYLUESJ3LS66NVCEGMON4UDCBCSBEVPIID773P2W6AY", + "0.0000005", + }, + { + "native", + "0.0000010", + }, + }, + "shares": "0.0000010", + } + + details, err := operation.Details() + assert.NoError(t, err) + assert.Equal(t, expected, details) +} + +func TestFailedLiquidityPoolWithdrawDetails(t *testing.T) { + poolID := xdr.PoolId{0xca, 0xfe, 0xba, 0xbe} + opBody := xdr.OperationBody{ + Type: xdr.OperationTypeLiquidityPoolWithdraw, + LiquidityPoolWithdrawOp: &xdr.LiquidityPoolWithdrawOp{ + LiquidityPoolId: poolID, + Amount: 10, + MinAmountA: 5, + MinAmountB: 10, + }, + } + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{ + Operations: []xdr.OperationMeta{{}}, + }, + }, + Result: xdr.TransactionResultPair{ + Result: xdr.TransactionResult{ + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxFailed, + }, + }, + }, + }, + operation: xdr.Operation{ + SourceAccount: &source, + Body: opBody, + }, + ledgerSequence: 1, + } + + expected := map[string]interface{}{ + "liquidity_pool_id": "cafebabe00000000000000000000000000000000000000000000000000000000", + "reserves_received": []base.AssetAmount{ + { + "", + "0.0000000", + }, + { + "", + "0.0000000", + }, + }, + "reserves_min": []base.AssetAmount{ + { + "", + "0.0000005", + }, + { + "", + "0.0000010", + }, + }, + "shares": "0.0000010", + } + + details, err := operation.Details() + assert.NoError(t, err) + assert.Equal(t, expected, details) +} + +func TestParticipantsCoversAllOperationTypes(t *testing.T) { + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + for typ, s := range xdr.OperationTypeToStringMap { + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationType(typ), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling Participants should either panic (because the operation field is set to nil) + // or not error + func() { + var err error + defer func() { + err2 := recover() + if err != nil { + assert.NotContains(t, err.Error(), "Unknown operation type") + } + assert.True(t, err2 != nil || err == nil, s) + }() + _, err = operation.Participants() + }() + } + + // make sure the check works for an unknown operation type + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationType(20000), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling Participants should error due to the unknown operation + _, err := operation.Participants() + assert.Contains(t, err.Error(), "Unknown operation type") +} + +func TestDetailsCoversAllOperationTypes(t *testing.T) { + source := xdr.MustMuxedAddress("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD") + for typ, s := range xdr.OperationTypeToStringMap { + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationType(typ), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling Details should either panic (because the operation field is set to nil) + // or not error + func() { + var err error + defer func() { + err2 := recover() + if err2 != nil { + if err3, ok := err2.(error); ok { + assert.NotContains(t, err3.Error(), "Unknown operation type") + } + } + assert.True(t, err2 != nil || err == nil, s) + }() + _, err = operation.Details() + }() + } + + // make sure the check works for an unknown operation type + op := xdr.Operation{ + SourceAccount: &source, + Body: xdr.OperationBody{ + Type: xdr.OperationType(20000), + }, + } + operation := transactionOperationWrapper{ + index: 0, + transaction: ingest.LedgerTransaction{ + UnsafeMeta: xdr.TransactionMeta{ + V: 2, + V2: &xdr.TransactionMetaV2{}, + }, + }, + operation: op, + ledgerSequence: 1, + } + // calling Details should panic with unknown operation type + f := func() { + operation.Details() + } + assert.PanicsWithError(t, "Unknown operation type: ", f) +} diff --git a/services/horizon/internal/ingest/processors/transactions_processor.go b/services/horizon/internal/ingest/processors/transactions_processor.go new file mode 100644 index 0000000000..4e47cb96ba --- /dev/null +++ b/services/horizon/internal/ingest/processors/transactions_processor.go @@ -0,0 +1,38 @@ +package processors + +import ( + "context" + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" +) + +type TransactionProcessor struct { + transactionsQ history.QTransactions + sequence uint32 + batch history.TransactionBatchInsertBuilder +} + +func NewTransactionProcessor(transactionsQ history.QTransactions, sequence uint32) *TransactionProcessor { + return &TransactionProcessor{ + transactionsQ: transactionsQ, + sequence: sequence, + batch: transactionsQ.NewTransactionBatchInsertBuilder(maxBatchSize), + } +} + +func (p *TransactionProcessor) ProcessTransaction(ctx context.Context, transaction ingest.LedgerTransaction) error { + if err := p.batch.Add(ctx, transaction, p.sequence); err != nil { + return errors.Wrap(err, "Error batch inserting transaction rows") + } + + return nil +} + +func (p *TransactionProcessor) Commit(ctx context.Context) error { + if err := p.batch.Exec(ctx); err != nil { + return errors.Wrap(err, "Error flushing transaction batch") + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/transactions_processor_test.go b/services/horizon/internal/ingest/processors/transactions_processor_test.go new file mode 100644 index 0000000000..ec1cf105e5 --- /dev/null +++ b/services/horizon/internal/ingest/processors/transactions_processor_test.go @@ -0,0 +1,90 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/suite" +) + +type TransactionsProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *TransactionProcessor + mockQ *history.MockQTransactions + mockBatchInsertBuilder *history.MockTransactionsBatchInsertBuilder +} + +func TestTransactionsProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(TransactionsProcessorTestSuiteLedger)) +} + +func (s *TransactionsProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQTransactions{} + s.mockBatchInsertBuilder = &history.MockTransactionsBatchInsertBuilder{} + + s.mockQ. + On("NewTransactionBatchInsertBuilder", maxBatchSize). + Return(s.mockBatchInsertBuilder).Once() + + s.processor = NewTransactionProcessor(s.mockQ, 20) +} + +func (s *TransactionsProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) + s.mockBatchInsertBuilder.AssertExpectations(s.T()) +} + +func (s *TransactionsProcessorTestSuiteLedger) TestAddTransactionsSucceeds() { + sequence := uint32(20) + + firstTx := createTransaction(true, 1) + secondTx := createTransaction(false, 3) + thirdTx := createTransaction(true, 4) + + s.mockBatchInsertBuilder.On("Add", s.ctx, firstTx, sequence).Return(nil).Once() + s.mockBatchInsertBuilder.On("Add", s.ctx, secondTx, sequence).Return(nil).Once() + s.mockBatchInsertBuilder.On("Add", s.ctx, thirdTx, sequence).Return(nil).Once() + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) + + err := s.processor.ProcessTransaction(s.ctx, firstTx) + s.Assert().NoError(err) + + err = s.processor.ProcessTransaction(s.ctx, secondTx) + s.Assert().NoError(err) + + err = s.processor.ProcessTransaction(s.ctx, thirdTx) + s.Assert().NoError(err) +} + +func (s *TransactionsProcessorTestSuiteLedger) TestAddTransactionsFails() { + sequence := uint32(20) + firstTx := createTransaction(true, 1) + s.mockBatchInsertBuilder.On("Add", s.ctx, firstTx, sequence). + Return(errors.New("transient error")).Once() + + err := s.processor.ProcessTransaction(s.ctx, firstTx) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error batch inserting transaction rows: transient error") +} + +func (s *TransactionsProcessorTestSuiteLedger) TestExecFails() { + sequence := uint32(20) + firstTx := createTransaction(true, 1) + + s.mockBatchInsertBuilder.On("Add", s.ctx, firstTx, sequence).Return(nil).Once() + s.mockBatchInsertBuilder.On("Exec", s.ctx).Return(errors.New("transient error")).Once() + + err := s.processor.ProcessTransaction(s.ctx, firstTx) + s.Assert().NoError(err) + + err = s.processor.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error flushing transaction batch: transient error") +} diff --git a/services/horizon/internal/ingest/processors/trust_lines_processor.go b/services/horizon/internal/ingest/processors/trust_lines_processor.go new file mode 100644 index 0000000000..f161e8a20f --- /dev/null +++ b/services/horizon/internal/ingest/processors/trust_lines_processor.go @@ -0,0 +1,150 @@ +package processors + +import ( + "context" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +type TrustLinesProcessor struct { + trustLinesQ history.QTrustLines + + cache *ingest.ChangeCompactor +} + +func NewTrustLinesProcessor(trustLinesQ history.QTrustLines) *TrustLinesProcessor { + p := &TrustLinesProcessor{trustLinesQ: trustLinesQ} + p.reset() + return p +} + +func (p *TrustLinesProcessor) reset() { + p.cache = ingest.NewChangeCompactor() +} + +func (p *TrustLinesProcessor) ProcessChange(ctx context.Context, change ingest.Change) error { + if change.Type != xdr.LedgerEntryTypeTrustline { + return nil + } + + err := p.cache.AddChange(change) + if err != nil { + return errors.Wrap(err, "error adding to ledgerCache") + } + + if p.cache.Size() > maxBatchSize { + err = p.Commit(ctx) + if err != nil { + return errors.Wrap(err, "error in Commit") + } + p.reset() + } + + return nil +} + +func trustLineLedgerKey(trustLineEntry xdr.TrustLineEntry) (string, error) { + var ledgerKey xdr.LedgerKey + var ledgerKeyString string + + err := ledgerKey.SetTrustline(trustLineEntry.AccountId, trustLineEntry.Asset) + if err != nil { + return "", errors.Wrap(err, "Error creating ledger key") + } + ledgerKeyString, err = ledgerKey.MarshalBinaryBase64() + if err != nil { + return "", errors.Wrap(err, "Error marshalling ledger key") + } + return ledgerKeyString, nil +} + +func xdrToTrustline(ledgerEntry xdr.LedgerEntry) (history.TrustLine, error) { + trustLineEntry := ledgerEntry.Data.MustTrustLine() + ledgerKeyString, err := trustLineLedgerKey(trustLineEntry) + if err != nil { + return history.TrustLine{}, errors.Wrap(err, "Error extracting ledger key") + } + + assetType := trustLineEntry.Asset.Type + var assetCode, assetIssuer, poolID string + if assetType == xdr.AssetTypeAssetTypePoolShare { + poolID = PoolIDToString(trustLineEntry.Asset.MustLiquidityPoolId()) + } else { + if err = trustLineEntry.Asset.ToAsset().Extract(&assetType, &assetCode, &assetIssuer); err != nil { + return history.TrustLine{}, errors.Wrap(err, "Error extracting asset from trustline") + } + } + + liabilities := trustLineEntry.Liabilities() + return history.TrustLine{ + AccountID: trustLineEntry.AccountId.Address(), + AssetType: assetType, + AssetIssuer: assetIssuer, + AssetCode: assetCode, + Balance: int64(trustLineEntry.Balance), + LedgerKey: ledgerKeyString, + Limit: int64(trustLineEntry.Limit), + LiquidityPoolID: poolID, + BuyingLiabilities: int64(liabilities.Buying), + SellingLiabilities: int64(liabilities.Selling), + Flags: uint32(trustLineEntry.Flags), + LastModifiedLedger: uint32(ledgerEntry.LastModifiedLedgerSeq), + Sponsor: ledgerEntrySponsorToNullString(ledgerEntry), + }, nil +} + +func (p *TrustLinesProcessor) Commit(ctx context.Context) error { + var batchUpsertTrustLines []history.TrustLine + var batchRemoveTrustLineKeys []string + + changes := p.cache.GetChanges() + for _, change := range changes { + switch { + case change.Post != nil: + tl, err := xdrToTrustline(*change.Post) + if err != nil { + return errors.Wrap(err, "Error extracting trustline") + } + + batchUpsertTrustLines = append(batchUpsertTrustLines, tl) + case change.Pre != nil && change.Post == nil: + // Removed + trustLineEntry := change.Pre.Data.MustTrustLine() + ledgerKeyString, err := trustLineLedgerKey(trustLineEntry) + if err != nil { + return errors.Wrap(err, "Error extracting ledger key") + } + batchRemoveTrustLineKeys = append(batchRemoveTrustLineKeys, ledgerKeyString) + + default: + return errors.New("Invalid io.Change: change.Pre == nil && change.Post == nil") + } + } + + if len(batchUpsertTrustLines) > 0 { + err := p.trustLinesQ.UpsertTrustLines(ctx, batchUpsertTrustLines) + if err != nil { + return errors.Wrap(err, "errors in UpsertTrustLines") + } + } + + if len(batchRemoveTrustLineKeys) > 0 { + rowsAffected, err := p.trustLinesQ.RemoveTrustLines(ctx, batchRemoveTrustLineKeys) + if err != nil { + return err + } + + if rowsAffected != int64(len(batchRemoveTrustLineKeys)) { + return ingest.NewStateError(errors.Errorf( + "%d rows affected when removing %d trust lines", + rowsAffected, + len(batchRemoveTrustLineKeys), + )) + } + } + + return nil +} diff --git a/services/horizon/internal/ingest/processors/trust_lines_processor_test.go b/services/horizon/internal/ingest/processors/trust_lines_processor_test.go new file mode 100644 index 0000000000..2e3d092f5a --- /dev/null +++ b/services/horizon/internal/ingest/processors/trust_lines_processor_test.go @@ -0,0 +1,687 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package processors + +import ( + "context" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +var trustLineIssuer = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + +func TestTrustLinesProcessorTestSuiteState(t *testing.T) { + suite.Run(t, new(TrustLinesProcessorTestSuiteState)) +} + +type TrustLinesProcessorTestSuiteState struct { + suite.Suite + ctx context.Context + processor *TrustLinesProcessor + mockQ *history.MockQTrustLines +} + +func (s *TrustLinesProcessorTestSuiteState) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQTrustLines{} + s.processor = NewTrustLinesProcessor(s.mockQ) +} + +func (s *TrustLinesProcessorTestSuiteState) TearDownTest() { + s.Assert().NoError(s.processor.Commit(s.ctx)) + s.mockQ.AssertExpectations(s.T()) +} + +func (s *TrustLinesProcessorTestSuiteState) TestCreateTrustLine() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + + poolShareTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &xdr.PoolId{1, 2, 3, 4}, + }, + Balance: 12365, + Limit: 123659, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &poolShareTrustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", + s.ctx, + mock.AnythingOfType("[]history.TrustLine"), + ).Run(func(args mock.Arguments) { + arg := args.Get(1).([]history.TrustLine) + s.Assert().ElementsMatch( + []history.TrustLine{ + { + AccountID: trustLine.AccountId.Address(), + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(trustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(trustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(trustLine.Liabilities().Buying), + SellingLiabilities: int64(trustLine.Liabilities().Selling), + Flags: uint32(trustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + { + AccountID: poolShareTrustLine.AccountId.Address(), + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: int64(poolShareTrustLine.Balance), + LedgerKey: "AAAAAQAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWgAAAAMBAgMEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==", + Limit: int64(poolShareTrustLine.Limit), + LiquidityPoolID: "0102030400000000000000000000000000000000000000000000000000000000", + Flags: uint32(poolShareTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + }, + }, + arg) + }).Return(nil).Once() +} + +func (s *TrustLinesProcessorTestSuiteState) TestCreateTrustLineUnauthorized() { + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + } + lastModifiedLedgerSeq := xdr.Uint32(123) + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", + s.ctx, + []history.TrustLine{ + { + AccountID: trustLine.AccountId.Address(), + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(trustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(trustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(trustLine.Liabilities().Buying), + SellingLiabilities: int64(trustLine.Liabilities().Selling), + Flags: uint32(trustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + }, + ).Return(nil).Once() +} + +func TestTrustLinesProcessorTestSuiteLedger(t *testing.T) { + suite.Run(t, new(TrustLinesProcessorTestSuiteLedger)) +} + +type TrustLinesProcessorTestSuiteLedger struct { + suite.Suite + ctx context.Context + processor *TrustLinesProcessor + mockQ *history.MockQTrustLines +} + +func (s *TrustLinesProcessorTestSuiteLedger) SetupTest() { + s.ctx = context.Background() + s.mockQ = &history.MockQTrustLines{} + s.processor = NewTrustLinesProcessor(s.mockQ) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TearDownTest() { + s.mockQ.AssertExpectations(s.T()) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestNoIngestUpdateState() { + // Nothing processed, assertions in TearDownTest. + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestInsertTrustLine() { + // should be ignored because it's not an trust line type + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + }, + }) + s.Assert().NoError(err) + + // add trust line + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + unauthorizedTrustline := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + } + lastModifiedLedgerSeq := xdr.Uint32(1234) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: nil, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustline, + }, + }, + }) + s.Assert().NoError(err) + + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + updatedUnauthorizedTrustline := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustline, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedUnauthorizedTrustline, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", + s.ctx, + mock.AnythingOfType("[]history.TrustLine"), + ).Run(func(args mock.Arguments) { + arg := args.Get(1).([]history.TrustLine) + s.Assert().ElementsMatch( + []history.TrustLine{ + { + AccountID: updatedTrustLine.AccountId.Address(), + AssetType: updatedTrustLine.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(updatedTrustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(updatedTrustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(updatedTrustLine.Liabilities().Buying), + SellingLiabilities: int64(updatedTrustLine.Liabilities().Selling), + Flags: uint32(updatedTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + { + AccountID: updatedUnauthorizedTrustline.AccountId.Address(), + AssetType: updatedUnauthorizedTrustline.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Balance: int64(updatedUnauthorizedTrustline.Balance), + LedgerKey: "AAAAAQAAAAC2LgFRDBZ3J52nLm30kq2iMgrO7dYzYAN3hvjtf1IHWgAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(updatedUnauthorizedTrustline.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(updatedUnauthorizedTrustline.Liabilities().Buying), + SellingLiabilities: int64(updatedUnauthorizedTrustline.Liabilities().Selling), + Flags: uint32(updatedUnauthorizedTrustline.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + }, + arg, + ) + }).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestUpdateTrustLine() { + lastModifiedLedgerSeq := xdr.Uint32(1234) + + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", + s.ctx, + []history.TrustLine{ + { + AccountID: updatedTrustLine.AccountId.Address(), + AssetType: updatedTrustLine.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(updatedTrustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(updatedTrustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(updatedTrustLine.Liabilities().Buying), + SellingLiabilities: int64(updatedTrustLine.Liabilities().Selling), + Flags: uint32(updatedTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + }, + ).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestUpdateTrustLineAuthorization() { + lastModifiedLedgerSeq := xdr.Uint32(1234) + + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 100, + } + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + otherTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 100, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + otherUpdatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq - 1, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &otherTrustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &otherUpdatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", + s.ctx, + mock.AnythingOfType("[]history.TrustLine"), + ).Run(func(args mock.Arguments) { + arg := args.Get(1).([]history.TrustLine) + s.Assert().ElementsMatch( + []history.TrustLine{ + { + AccountID: updatedTrustLine.AccountId.Address(), + AssetType: updatedTrustLine.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(updatedTrustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(updatedTrustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(updatedTrustLine.Liabilities().Buying), + SellingLiabilities: int64(updatedTrustLine.Liabilities().Selling), + Flags: uint32(updatedTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + { + AccountID: otherUpdatedTrustLine.AccountId.Address(), + AssetType: otherUpdatedTrustLine.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "USD", + Balance: int64(otherUpdatedTrustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(otherUpdatedTrustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(otherUpdatedTrustLine.Liabilities().Buying), + SellingLiabilities: int64(otherUpdatedTrustLine.Liabilities().Selling), + Flags: uint32(otherUpdatedTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + }, + arg, + ) + }).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestRemoveTrustLine() { + unauthorizedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &unauthorizedTrustLine, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + lkStr1, err := xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.LedgerKeyTrustLine{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + }, + }.MarshalBinaryBase64() + + lkStr2, err := xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.LedgerKeyTrustLine{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("USD", trustLineIssuer.Address()).ToTrustLineAsset(), + }, + }.MarshalBinaryBase64() + s.Assert().NoError(err) + s.mockQ.On( + "RemoveTrustLines", s.ctx, mock.Anything, + ).Run(func(args mock.Arguments) { + // To fix order issue due to using ChangeCompactor + ledgerKeys := args.Get(1).([]string) + s.Assert().ElementsMatch( + ledgerKeys, + []string{lkStr1, lkStr2}, + ) + }).Return(int64(2), nil).Once() + + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestProcessUpgradeChange() { + // add trust line + lastModifiedLedgerSeq := xdr.Uint32(1234) + trustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + }) + s.Assert().NoError(err) + + updatedTrustLine := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 10, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + err = s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustLine, + }, + }, + Post: &xdr.LedgerEntry{ + LastModifiedLedgerSeq: lastModifiedLedgerSeq, + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &updatedTrustLine, + }, + }, + }) + s.Assert().NoError(err) + + s.mockQ.On( + "UpsertTrustLines", s.ctx, + []history.TrustLine{ + { + AccountID: updatedTrustLine.AccountId.Address(), + AssetType: updatedTrustLine.Asset.Type, + AssetIssuer: trustLineIssuer.Address(), + AssetCode: "EUR", + Balance: int64(updatedTrustLine.Balance), + LedgerKey: "AAAAAQAAAAAdBJqAD9qPq+j2nRDdjdp5KVoUh8riPkNO9ato7BNs8wAAAAFFVVIAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3", + Limit: int64(updatedTrustLine.Limit), + LiquidityPoolID: "", + BuyingLiabilities: int64(updatedTrustLine.Liabilities().Buying), + SellingLiabilities: int64(updatedTrustLine.Liabilities().Selling), + Flags: uint32(updatedTrustLine.Flags), + LastModifiedLedger: uint32(lastModifiedLedgerSeq), + Sponsor: null.String{}, + }, + }, + ).Return(nil).Once() + s.Assert().NoError(s.processor.Commit(s.ctx)) +} + +func (s *TrustLinesProcessorTestSuiteLedger) TestRemoveTrustlineNoRowsAffected() { + err := s.processor.ProcessChange(s.ctx, ingest.Change{ + Type: xdr.LedgerEntryTypeTrustline, + Pre: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Balance: 0, + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + }, + }, + }, + Post: nil, + }) + s.Assert().NoError(err) + + lkStr, err := xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &xdr.LedgerKeyTrustLine{ + AccountId: xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + }, + }.MarshalBinaryBase64() + s.Assert().NoError(err) + + s.mockQ.On( + "RemoveTrustLines", s.ctx, []string{lkStr}, + ).Return(int64(0), nil).Once() + + err = s.processor.Commit(s.ctx) + s.Assert().Error(err) + s.Assert().IsType(ingest.StateError{}, errors.Cause(err)) + s.Assert().EqualError(err, "0 rows affected when removing 1 trust lines") +} diff --git a/services/horizon/internal/ingest/resume_state_test.go b/services/horizon/internal/ingest/resume_state_test.go new file mode 100644 index 0000000000..863c425094 --- /dev/null +++ b/services/horizon/internal/ingest/resume_state_test.go @@ -0,0 +1,376 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func TestResumeTestTestSuite(t *testing.T) { + suite.Run(t, new(ResumeTestTestSuite)) +} + +type ResumeTestTestSuite struct { + suite.Suite + ctx context.Context + ledgerBackend *ledgerbackend.MockDatabaseBackend + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + runner *mockProcessorsRunner + stellarCoreClient *mockStellarCoreClient + system *system +} + +func (s *ResumeTestTestSuite) SetupTest() { + s.ctx = context.Background() + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.historyQ = &mockDBQ{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.runner = &mockProcessorsRunner{} + s.stellarCoreClient = &mockStellarCoreClient{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + runner: s.runner, + ledgerBackend: s.ledgerBackend, + stellarCoreClient: s.stellarCoreClient, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + s.system.initMetrics() + + s.historyQ.On("Rollback").Return(nil).Once() + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(101)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(101)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(101)).Return(xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 101, + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + }, nil).Once() +} + +func (s *ResumeTestTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.runner.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.ledgerBackend.AssertExpectations(t) + s.stellarCoreClient.AssertExpectations(t) +} + +func (s *ResumeTestTestSuite) TestInvalidParam() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + next, err := resumeState{latestSuccessfullyProcessedLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "unexpected latestSuccessfullyProcessedLedger value") + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestRangeNotPreparedFailPrepare() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(101)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(101)).Return(errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "error preparing range: my error") + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestRangeNotPreparedSuccessPrepareGetLedgerFail() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(101)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(101)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(101)).Return(xdr.LedgerCloseMeta{}, errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "error getting ledger blocking: my error") + s.Assert().Equal(transition{node: startState{}, sleepDuration: defaultSleep}, next) +} + +func (s *ResumeTestTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error starting a transaction: my error") + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 100}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 100}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestGetLatestLedgerLessThanCurrent() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(99), nil).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "expected ingest ledger to be at most one greater than last ingested ledger in db") + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestGetIngestionVersionError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(0, errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting ingestion version: my error") + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 100}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestIngestionVersionLessThanCurrentVersion() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion-1, nil).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestIngestionVersionGreaterThanCurrentVersion() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion+1, nil).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestGetLatestLedgerError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(0), errors.New("my error")) + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "could not get latest history ledger: my error") + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 100}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestLatestHistoryLedgerLessThanIngestLedger() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(99), nil) + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) TestLatestHistoryLedgerGreaterThanIngestLedger() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(101), nil) + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: startState{}, sleepDuration: defaultSleep}, + next, + ) +} + +func (s *ResumeTestTestSuite) mockSuccessfulIngestion() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil) + + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). + Run(func(args mock.Arguments) { + meta := args.Get(0).(xdr.LedgerCloseMeta) + s.Assert().Equal(uint32(101), meta.LedgerSequence()) + }). + Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101)).Return(nil).Once() + + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(101), + ).Return(nil).Once() + + s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() +} +func (s *ResumeTestTestSuite) TestBumpIngestLedger() { + *s.ledgerBackend = ledgerbackend.MockDatabaseBackend{} + + s.ledgerBackend.On("IsPrepared", s.ctx, ledgerbackend.UnboundedRange(100)).Return(false, nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.UnboundedRange(100)).Return(nil).Once() + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: 100, + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + }, nil).Once() + + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(101), nil).Once() + + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(101), + ).Return(errors.New("my error")).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 99}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 101}, + sleepDuration: defaultSleep, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestIngestAllMasterNode() { + s.mockSuccessfulIngestion() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 101}, + sleepDuration: 0, + }, + next, + ) +} + +func (s *ResumeTestTestSuite) TestErrorSettingCursorIgnored() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + s.historyQ.On("GetIngestVersion", s.ctx).Return(CurrentVersion, nil).Once() + s.historyQ.On("GetLatestHistoryLedger", s.ctx).Return(uint32(100), nil) + + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")). + Run(func(args mock.Arguments) { + meta := args.Get(0).(xdr.LedgerCloseMeta) + s.Assert().Equal(uint32(101), meta.LedgerSequence()) + }). + Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(101)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + + s.stellarCoreClient.On( + "SetCursor", + mock.AnythingOfType("*context.timerCtx"), + defaultCoreCursorName, + int32(101), + ).Return(errors.New("my error")).Once() + + s.historyQ.On("GetExpStateInvalid", s.ctx).Return(false, nil).Once() + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(101), uint32(101)).Return(nil).Once() + + next, err := resumeState{latestSuccessfullyProcessedLedger: 100}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{ + node: resumeState{latestSuccessfullyProcessedLedger: 101}, + sleepDuration: 0, + }, + next, + ) +} diff --git a/services/horizon/internal/ingest/sample_changes_test.go b/services/horizon/internal/ingest/sample_changes_test.go new file mode 100644 index 0000000000..6553e7b7b5 --- /dev/null +++ b/services/horizon/internal/ingest/sample_changes_test.go @@ -0,0 +1,248 @@ +//go:build update +// +build update + +package ingest + +import ( + "context" + "io/ioutil" + "math/rand" + "path/filepath" + "testing" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + logpkg "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +const ( + sampleSize = 100 + sampleSeed = 37213 +) + +type sampleChangeReader struct { + offers []*xdr.LedgerEntry + trustlines []*xdr.LedgerEntry + accounts []*xdr.LedgerEntry + data []*xdr.LedgerEntry + size int + + allAccounts map[string]*xdr.LedgerEntry + allChanges xdr.LedgerEntryChanges + inner ingest.ChangeReader + random *rand.Rand + output string +} + +func newSampleChangeReader(output string, size int) (*sampleChangeReader, error) { + archive, err := historyarchive.Connect( + "http://history.stellar.org/prd/core-live/core_live_001", + historyarchive.ConnectOptions{ + Context: context.Background(), + }, + ) + if err != nil { + return nil, err + } + + historyAdapter := newHistoryArchiveAdapter(archive) + checkpointLedger, err := historyAdapter.GetLatestLedgerSequence() + if err != nil { + return nil, err + } + + inner, err := historyAdapter.GetState(context.Background(), checkpointLedger) + if err != nil { + return nil, err + } + inner = newloggingChangeReader( + inner, + "historyArchive", + checkpointLedger, + logFrequency, + false, + ) + + r := &sampleChangeReader{ + offers: []*xdr.LedgerEntry{}, + trustlines: []*xdr.LedgerEntry{}, + accounts: []*xdr.LedgerEntry{}, + data: []*xdr.LedgerEntry{}, + inner: inner, + allAccounts: map[string]*xdr.LedgerEntry{}, + output: output, + random: rand.New(rand.NewSource(sampleSeed)), + size: size, + allChanges: xdr.LedgerEntryChanges{}, + } + return r, nil +} + +func (r *sampleChangeReader) Read() (ingest.Change, error) { + change, err := r.inner.Read() + if err != nil { + return change, err + } + + switch change.Type { + case xdr.LedgerEntryTypeAccount: + r.allAccounts[change.Post.Data.Account.AccountId.Address()] = change.Post + case xdr.LedgerEntryTypeData: + if len(r.data) < r.size { + r.data = append(r.data, change.Post) + } else { + r.data[r.random.Intn(r.size)] = change.Post + } + case xdr.LedgerEntryTypeOffer: + if len(r.offers) < r.size { + r.offers = append(r.offers, change.Post) + } else { + r.offers[r.random.Intn(r.size)] = change.Post + } + case xdr.LedgerEntryTypeTrustline: + if len(r.trustlines) < r.size { + r.trustlines = append(r.trustlines, change.Post) + } else { + r.trustlines[r.random.Intn(r.size)] = change.Post + } + } + + return change, nil +} + +func getIssuer(asset xdr.Asset) string { + if alphanum, ok := asset.GetAlphaNum12(); ok { + return alphanum.Issuer.Address() + } + if alphanum, ok := asset.GetAlphaNum4(); ok { + return alphanum.Issuer.Address() + } + return "" +} + +func (r *sampleChangeReader) Close() error { + if err := r.inner.Close(); err != nil { + return err + } + + for _, dataEntry := range r.data { + address := dataEntry.Data.Data.AccountId.Address() + if entry := r.allAccounts[address]; entry != nil { + r.accounts = append(r.accounts, entry) + delete(r.allAccounts, address) + } + } + + for _, trustlineEntry := range r.trustlines { + address := trustlineEntry.Data.TrustLine.AccountId.Address() + if entry := r.allAccounts[address]; entry != nil { + r.accounts = append(r.accounts, entry) + delete(r.allAccounts, address) + } + } + + for _, offerEntry := range r.offers { + seller := offerEntry.Data.Offer.SellerId.Address() + if entry := r.allAccounts[seller]; entry != nil { + r.accounts = append(r.accounts, entry) + delete(r.allAccounts, seller) + } + + if issuer := getIssuer(offerEntry.Data.Offer.Buying); r.allAccounts[issuer] != nil { + r.accounts = append(r.accounts, r.allAccounts[issuer]) + delete(r.allAccounts, issuer) + } + + if issuer := getIssuer(offerEntry.Data.Offer.Selling); r.allAccounts[issuer] != nil { + r.accounts = append(r.accounts, r.allAccounts[issuer]) + delete(r.allAccounts, issuer) + } + } + + extraAccounts := 0 + for _, entry := range r.allAccounts { + if extraAccounts >= r.size { + break + } + r.accounts = append(r.accounts, entry) + extraAccounts++ + } + + for _, list := range [][]*xdr.LedgerEntry{ + r.accounts, + r.data, + r.offers, + r.trustlines, + } { + for _, entry := range list { + r.allChanges = append(r.allChanges, xdr.LedgerEntryChange{ + Type: xdr.LedgerEntryChangeTypeLedgerEntryState, + State: entry, + }) + } + } + + serialized, err := r.allChanges.MarshalBinary() + if err != nil { + return err + } + + if err := ioutil.WriteFile(r.output, serialized, 0644); err != nil { + return err + } + return nil +} + +func TestUpdateSampleChanges(t *testing.T) { + log.SetLevel(logpkg.InfoLevel) + path := filepath.Join("testdata", "sample-changes.xdr") + reader, err := newSampleChangeReader(path, sampleSize) + if err != nil { + t.Fatalf("could not create sample change reader: %v", err) + } + + changeStats := &ingest.StatsChangeProcessor{} + err = processors.StreamChanges(changeStats, reader) + if err != nil { + t.Fatalf("could not stream changes: %v", err) + } + err = reader.Close() + if err != nil { + t.Fatalf("could not close reader: %v", err) + } + + results := changeStats.GetResults() + log.WithFields(results.Map()). + Info("Finished processing ledger entry changes") + + contents, err := ioutil.ReadFile(path) + if err != nil { + t.Fatalf("could not read sample file: %v", err) + } + entryChanges := xdr.LedgerEntryChanges{} + if err := entryChanges.UnmarshalBinary(contents); err != nil { + t.Fatalf("could not unmarshall sample file: %v", err) + } + + for i, entry := range entryChanges { + marshalledFileEntry, err := xdr.MarshalBase64(entry) + if err != nil { + t.Fatalf("could not marshall ledger entry change: %v", err) + } + + marshalledSourceEntry, err := xdr.MarshalBase64(reader.allChanges[i]) + if err != nil { + t.Fatalf("could not marshall ledger entry change: %v", err) + } + + if marshalledFileEntry != marshalledSourceEntry { + t.Fatalf( + "ledger entry change from sample file '%s' does not match source '%s'", + marshalledFileEntry, + marshalledSourceEntry, + ) + } + } +} diff --git a/services/horizon/internal/ingest/states.jpg b/services/horizon/internal/ingest/states.jpg new file mode 100644 index 0000000000..8864a3bd94 Binary files /dev/null and b/services/horizon/internal/ingest/states.jpg differ diff --git a/services/horizon/internal/ingest/stress_test.go b/services/horizon/internal/ingest/stress_test.go new file mode 100644 index 0000000000..337e96acc5 --- /dev/null +++ b/services/horizon/internal/ingest/stress_test.go @@ -0,0 +1,144 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "testing" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/support/errors" +) + +func TestStressTestStateTestSuite(t *testing.T) { + suite.Run(t, new(StressTestStateTestSuite)) +} + +type StressTestStateTestSuite struct { + suite.Suite + ctx context.Context + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + runner *mockProcessorsRunner + system *system +} + +func (s *StressTestStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.historyQ = &mockDBQ{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.runner = &mockProcessorsRunner{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + runner: s.runner, + } + s.system.initMetrics() + + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("Rollback").Return(nil).Once() + s.runner.On("EnableMemoryStatsLogging").Return() +} + +func (s *StressTestStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.runner.AssertExpectations(t) +} + +func (s *StressTestStateTestSuite) TestBounds() { + *s.historyQ = mockDBQ{} + *s.runner = mockProcessorsRunner{} + + err := s.system.StressTest(-1, 4) + s.Assert().EqualError(err, "transactions must be positive") + + err = s.system.StressTest(0, 4) + s.Assert().EqualError(err, "transactions must be positive") + + err = s.system.StressTest(100, -2) + s.Assert().EqualError(err, "changes per transaction must be positive") +} + +func (s *StressTestStateTestSuite) TestBeginReturnsError() { + *s.historyQ = mockDBQ{} + s.historyQ.On("GetTx").Return(nil).Once() + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Error starting a transaction: my error") +} + +func (s *StressTestStateTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") +} + +func (s *StressTestStateTestSuite) TestGetLastLedgerIngestNonEmpty() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Database not empty") +} + +func (s *StressTestStateTestSuite) TestRunAllProcessorsOnLedgerReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).Return( + ledgerStats{}, + errors.New("my error"), + ).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Error running processors on ledger: my error") +} + +func (s *StressTestStateTestSuite) TestUpdateLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(1)).Return(errors.New("my error")).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Error updating last ingested ledger: my error") +} + +func (s *StressTestStateTestSuite) TestCommitReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(1)).Return(nil).Once() + s.historyQ.On("Commit").Return(errors.New("my error")).Once() + + err := s.system.StressTest(10, 4) + s.Assert().EqualError(err, "Error committing db transaction: my error") +} + +func (s *StressTestStateTestSuite) TestSucceeds() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.runner.On("RunAllProcessorsOnLedger", mock.AnythingOfType("xdr.LedgerCloseMeta")).Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(1)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + + err := s.system.StressTest(10, 4) + s.Assert().NoError(err) +} diff --git a/services/horizon/internal/ingest/testdata/sample-changes.xdr b/services/horizon/internal/ingest/testdata/sample-changes.xdr new file mode 100644 index 0000000000..2b2edfc0c2 Binary files /dev/null and b/services/horizon/internal/ingest/testdata/sample-changes.xdr differ diff --git a/services/horizon/internal/ingest/verify.go b/services/horizon/internal/ingest/verify.go new file mode 100644 index 0000000000..2538cf7268 --- /dev/null +++ b/services/horizon/internal/ingest/verify.go @@ -0,0 +1,815 @@ +package ingest + +import ( + "context" + "database/sql" + "encoding/hex" + "fmt" + "time" + + "github.com/guregu/null" + "github.com/prometheus/client_golang/prometheus" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/services/horizon/internal/ingest/verify" + "github.com/stellar/go/support/errors" + logpkg "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +const verifyBatchSize = 50000 +const assetStatsBatchSize = 500 + +// stateVerifierExpectedIngestionVersion defines a version of ingestion system +// required by state verifier. This is done to prevent situations where +// ingestion has been updated with new features but state verifier does not +// check them. +// There is a test that checks it, to fix it: update the actual `verifyState` +// method instead of just updating this value! +const stateVerifierExpectedIngestionVersion = 15 + +// verifyState is called as a go routine from pipeline post hook every 64 +// ledgers. It checks if the state is correct. If another go routine is already +// running it exits. +func (s *system) verifyState(verifyAgainstLatestCheckpoint bool) error { + s.stateVerificationMutex.Lock() + if s.stateVerificationRunning { + log.Warn("State verification is already running...") + s.stateVerificationMutex.Unlock() + return nil + } + s.stateVerificationRunning = true + s.stateVerificationMutex.Unlock() + defer func() { + s.stateVerificationMutex.Lock() + s.stateVerificationRunning = false + s.stateVerificationMutex.Unlock() + }() + + updateMetrics := false + + if stateVerifierExpectedIngestionVersion != CurrentVersion { + log.Errorf( + "State verification expected version is %d but actual is: %d", + stateVerifierExpectedIngestionVersion, + CurrentVersion, + ) + return nil + } + + historyQ := s.historyQ.CloneIngestionQ() + defer historyQ.Rollback() + err := historyQ.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + if err != nil { + return errors.Wrap(err, "Error starting transaction") + } + + // Ensure the ledger is a checkpoint ledger + ledgerSequence, err := historyQ.GetLastLedgerIngestNonBlocking(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.GetLastLedgerIngestNonBlocking") + } + + localLog := log.WithFields(logpkg.F{ + "subservice": "state_verify", + "sequence": ledgerSequence, + }) + + if !s.checkpointManager.IsCheckpoint(ledgerSequence) { + localLog.Info("Current ledger is not a checkpoint ledger. Cancelling...") + return nil + } + + localLog.Info("Starting state verification") + + if verifyAgainstLatestCheckpoint { + retries := 0 + for { + // Get root HAS to check if we're checking one of the latest ledgers or + // Horizon is catching up. It doesn't make sense to verify old ledgers as + // we want to check the latest state. + var historyLatestSequence uint32 + historyLatestSequence, err = s.historyAdapter.GetLatestLedgerSequence() + if err != nil { + return errors.Wrap(err, "Error getting the latest ledger sequence") + } + + if ledgerSequence < historyLatestSequence { + localLog.Info("Current ledger is old. Cancelling...") + return nil + } + + if ledgerSequence == historyLatestSequence { + break + } + + localLog.Info("Waiting for stellar-core to publish HAS...") + select { + case <-s.ctx.Done(): + localLog.Info("State verifier shut down...") + return nil + case <-time.After(5 * time.Second): + // Wait for stellar-core to publish HAS + retries++ + if retries == 12 { + localLog.Info("Checkpoint not published. Cancelling...") + return nil + } + } + } + } + + totalByType := map[string]int64{} + + startTime := time.Now() + defer func() { + duration := time.Since(startTime).Seconds() + if updateMetrics { + // Don't update metrics if context cancelled. + if s.ctx.Err() != context.Canceled { + s.Metrics().StateVerifyDuration.Observe(float64(duration)) + for typ, tot := range totalByType { + s.Metrics().StateVerifyLedgerEntriesCount. + With(prometheus.Labels{"type": typ}).Set(float64(tot)) + } + } + } + log.WithField("duration", duration).Info("State verification finished") + + }() + + localLog.Info("Creating state reader...") + + stateReader, err := s.historyAdapter.GetState(s.ctx, ledgerSequence) + if err != nil { + return errors.Wrap(err, "Error running GetState") + } + defer stateReader.Close() + + verifier := verify.NewStateVerifier(stateReader, nil) + + assetStats := processors.AssetStatSet{} + total := int64(0) + for { + var keys []xdr.LedgerKey + keys, err = verifier.GetLedgerKeys(verifyBatchSize) + if err != nil { + return errors.Wrap(err, "verifier.GetLedgerKeys") + } + + if len(keys) == 0 { + break + } + + accounts := make([]string, 0, verifyBatchSize) + data := make([]xdr.LedgerKeyData, 0, verifyBatchSize) + offers := make([]int64, 0, verifyBatchSize) + trustLines := make([]xdr.LedgerKeyTrustLine, 0, verifyBatchSize) + cBalances := make([]xdr.ClaimableBalanceId, 0, verifyBatchSize) + lPools := make([]xdr.PoolId, 0, verifyBatchSize) + for _, key := range keys { + switch key.Type { + case xdr.LedgerEntryTypeAccount: + accounts = append(accounts, key.Account.AccountId.Address()) + totalByType["accounts"]++ + case xdr.LedgerEntryTypeData: + data = append(data, *key.Data) + totalByType["data"]++ + case xdr.LedgerEntryTypeOffer: + offers = append(offers, int64(key.Offer.OfferId)) + totalByType["offers"]++ + case xdr.LedgerEntryTypeTrustline: + trustLines = append(trustLines, *key.TrustLine) + totalByType["trust_lines"]++ + case xdr.LedgerEntryTypeClaimableBalance: + cBalances = append(cBalances, key.ClaimableBalance.BalanceId) + totalByType["claimable_balances"]++ + case xdr.LedgerEntryTypeLiquidityPool: + lPools = append(lPools, key.LiquidityPool.LiquidityPoolId) + totalByType["liquidity_pools"]++ + default: + return errors.New("GetLedgerKeys return unexpected type") + } + } + + err = addAccountsToStateVerifier(s.ctx, verifier, historyQ, accounts) + if err != nil { + return errors.Wrap(err, "addAccountsToStateVerifier failed") + } + + err = addDataToStateVerifier(s.ctx, verifier, historyQ, data) + if err != nil { + return errors.Wrap(err, "addDataToStateVerifier failed") + } + + err = addOffersToStateVerifier(s.ctx, verifier, historyQ, offers) + if err != nil { + return errors.Wrap(err, "addOffersToStateVerifier failed") + } + + err = addTrustLinesToStateVerifier(s.ctx, verifier, assetStats, historyQ, trustLines) + if err != nil { + return errors.Wrap(err, "addTrustLinesToStateVerifier failed") + } + + err = addClaimableBalanceToStateVerifier(s.ctx, verifier, assetStats, historyQ, cBalances) + if err != nil { + return errors.Wrap(err, "addClaimableBalanceToStateVerifier failed") + } + + err = addLiquidityPoolsToStateVerifier(s.ctx, verifier, assetStats, historyQ, lPools) + if err != nil { + return errors.Wrap(err, "addLiquidityPoolsToStateVerifier failed") + } + + total += int64(len(keys)) + localLog.WithField("total", total).Info("Batch added to StateVerifier") + } + + localLog.WithField("total", total).Info("Finished writing to StateVerifier") + + countAccounts, err := historyQ.CountAccounts(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountAccounts") + } + + countData, err := historyQ.CountAccountsData(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountData") + } + + countOffers, err := historyQ.CountOffers(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountOffers") + } + + countTrustLines, err := historyQ.CountTrustLines(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountTrustLines") + } + + countClaimableBalances, err := historyQ.CountClaimableBalances(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountClaimableBalances") + } + + countLiquidityPools, err := historyQ.CountLiquidityPools(s.ctx) + if err != nil { + return errors.Wrap(err, "Error running historyQ.CountLiquidityPools") + } + + err = verifier.Verify(countAccounts + countData + countOffers + countTrustLines + countClaimableBalances + countLiquidityPools) + if err != nil { + return errors.Wrap(err, "verifier.Verify failed") + } + + err = checkAssetStats(s.ctx, assetStats, historyQ) + if err != nil { + return errors.Wrap(err, "checkAssetStats failed") + } + + localLog.Info("State correct") + updateMetrics = true + return nil +} + +func checkAssetStats(ctx context.Context, set processors.AssetStatSet, q history.IngestionQ) error { + page := db2.PageQuery{ + Order: "asc", + Limit: assetStatsBatchSize, + } + + for { + assetStats, err := q.GetAssetStats(ctx, "", "", page) + if err != nil { + return errors.Wrap(err, "could not fetch asset stats from db") + } + if len(assetStats) == 0 { + break + } + + for _, assetStat := range assetStats { + fromSet, removed := set.Remove(assetStat.AssetType, assetStat.AssetCode, assetStat.AssetIssuer) + if !removed { + return ingest.NewStateError( + fmt.Errorf( + "db contains asset stat with code %s issuer %s which is missing from HAS", + assetStat.AssetCode, assetStat.AssetIssuer, + ), + ) + } + + if fromSet != assetStat { + return ingest.NewStateError( + fmt.Errorf( + "db asset stat with code %s issuer %s does not match asset stat from HAS: expected=%v actual=%v", + assetStat.AssetCode, assetStat.AssetIssuer, fromSet, assetStat, + ), + ) + } + } + + page.Cursor = assetStats[len(assetStats)-1].PagingToken() + } + + if len(set) > 0 { + return ingest.NewStateError( + fmt.Errorf( + "HAS contains %d more asset stats than db", + len(set), + ), + ) + } + return nil +} + +func addAccountsToStateVerifier(ctx context.Context, verifier *verify.StateVerifier, q history.IngestionQ, ids []string) error { + if len(ids) == 0 { + return nil + } + + accounts, err := q.GetAccountsByIDs(ctx, ids) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetAccountsByIDs") + } + + signers, err := q.SignersForAccounts(ctx, ids) + if err != nil { + return errors.Wrap(err, "Error running history.Q.SignersForAccounts") + } + + masterWeightMap := make(map[string]int32) + signersMap := make(map[string][]xdr.Signer) + // map[accountID]map[signerKey]sponsor + sponsoringSignersMap := make(map[string]map[string]string) + for _, row := range signers { + if row.Account == row.Signer { + masterWeightMap[row.Account] = row.Weight + } else { + signersMap[row.Account] = append( + signersMap[row.Account], + xdr.Signer{ + Key: xdr.MustSigner(row.Signer), + Weight: xdr.Uint32(row.Weight), + }, + ) + if sponsoringSignersMap[row.Account] == nil { + sponsoringSignersMap[row.Account] = make(map[string]string) + } + sponsoringSignersMap[row.Account][row.Signer] = row.Sponsor.String + } + } + + for _, row := range accounts { + var inflationDest *xdr.AccountId + if row.InflationDestination != "" { + t := xdr.MustAddress(row.InflationDestination) + inflationDest = &t + } + + // Ensure master weight matches, if not it's a state error! + if int32(row.MasterWeight) != masterWeightMap[row.AccountID] { + return ingest.NewStateError( + fmt.Errorf( + "Master key weight in account %s does not match (expected=%d, actual=%d)", + row.AccountID, + masterWeightMap[row.AccountID], + int32(row.MasterWeight), + ), + ) + } + + signers := xdr.SortSignersByKey(signersMap[row.AccountID]) + signerSponsoringIDs := make([]xdr.SponsorshipDescriptor, len(signers)) + for i, signer := range signers { + sponsor := sponsoringSignersMap[row.AccountID][signer.Key.Address()] + if sponsor != "" { + signerSponsoringIDs[i] = xdr.MustAddressPtr(sponsor) + } + } + + account := &xdr.AccountEntry{ + AccountId: xdr.MustAddress(row.AccountID), + Balance: xdr.Int64(row.Balance), + SeqNum: xdr.SequenceNumber(row.SequenceNumber), + NumSubEntries: xdr.Uint32(row.NumSubEntries), + InflationDest: inflationDest, + Flags: xdr.Uint32(row.Flags), + HomeDomain: xdr.String32(row.HomeDomain), + Thresholds: xdr.Thresholds{ + row.MasterWeight, + row.ThresholdLow, + row.ThresholdMedium, + row.ThresholdHigh, + }, + Signers: signers, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: xdr.Int64(row.BuyingLiabilities), + Selling: xdr.Int64(row.SellingLiabilities), + }, + Ext: xdr.AccountEntryExtensionV1Ext{ + V: 2, + V2: &xdr.AccountEntryExtensionV2{ + NumSponsored: xdr.Uint32(row.NumSponsored), + NumSponsoring: xdr.Uint32(row.NumSponsoring), + SignerSponsoringIDs: signerSponsoringIDs, + }, + }, + }, + }, + } + + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: account, + }, + } + addLedgerEntrySponsor(&entry, row.Sponsor) + err = verifier.Write(entry) + if err != nil { + return err + } + } + + return nil +} + +func addDataToStateVerifier(ctx context.Context, verifier *verify.StateVerifier, q history.IngestionQ, lkeys []xdr.LedgerKeyData) error { + if len(lkeys) == 0 { + return nil + } + var keys []history.AccountDataKey + for _, k := range lkeys { + keys = append(keys, history.AccountDataKey{ + AccountID: k.AccountId.Address(), + DataName: string(k.DataName), + }) + } + data, err := q.GetAccountDataByKeys(ctx, keys) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetAccountDataByKeys") + } + + for _, row := range data { + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeData, + Data: &xdr.DataEntry{ + AccountId: xdr.MustAddress(row.AccountID), + DataName: xdr.String64(row.Name), + DataValue: xdr.DataValue(row.Value), + }, + }, + } + addLedgerEntrySponsor(&entry, row.Sponsor) + err := verifier.Write(entry) + if err != nil { + return err + } + } + + return nil +} + +func addOffersToStateVerifier( + ctx context.Context, + verifier *verify.StateVerifier, + q history.IngestionQ, + ids []int64, +) error { + if len(ids) == 0 { + return nil + } + + offers, err := q.GetOffersByIDs(ctx, ids) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetOfferByIDs") + } + + for _, row := range offers { + offerXDR := offerToXDR(row) + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &offerXDR, + }, + } + addLedgerEntrySponsor(&entry, row.Sponsor) + err := verifier.Write(entry) + if err != nil { + return err + } + } + + return nil +} + +func offerToXDR(row history.Offer) xdr.OfferEntry { + return xdr.OfferEntry{ + SellerId: xdr.MustAddress(row.SellerID), + OfferId: xdr.Int64(row.OfferID), + Selling: row.SellingAsset, + Buying: row.BuyingAsset, + Amount: xdr.Int64(row.Amount), + Price: xdr.Price{ + N: xdr.Int32(row.Pricen), + D: xdr.Int32(row.Priced), + }, + Flags: xdr.Uint32(row.Flags), + } +} + +func addTrustLinesToStateVerifier( + ctx context.Context, + verifier *verify.StateVerifier, + assetStats processors.AssetStatSet, + q history.IngestionQ, + keys []xdr.LedgerKeyTrustLine, +) error { + if len(keys) == 0 { + return nil + } + + var ledgerKeyStrings []string + for _, key := range keys { + var ledgerKey xdr.LedgerKey + if err := ledgerKey.SetTrustline(key.AccountId, key.Asset); err != nil { + return errors.Wrap(err, "Error running ledgerKey.SetTrustline") + } + b64, err := ledgerKey.MarshalBinaryBase64() + if err != nil { + return errors.Wrap(err, "Error running ledgerKey.MarshalBinaryBase64") + } + ledgerKeyStrings = append(ledgerKeyStrings, b64) + } + + trustLines, err := q.GetTrustLinesByKeys(ctx, ledgerKeyStrings) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetTrustLinesByKeys") + } + + for _, row := range trustLines { + var entry xdr.LedgerEntry + entry, err = trustLineToXDR(row) + if err != nil { + return err + } + + if err = verifier.Write(entry); err != nil { + return err + } + if err = assetStats.AddTrustline( + ingest.Change{ + Post: &entry, + }, + ); err != nil { + return ingest.NewStateError( + errors.Wrap(err, "could not add trustline to asset stats"), + ) + } + } + + return nil +} + +func trustLineToXDR(row history.TrustLine) (xdr.LedgerEntry, error) { + var asset xdr.TrustLineAsset + switch row.AssetType { + case xdr.AssetTypeAssetTypePoolShare: + asset = xdr.TrustLineAsset{ + Type: xdr.AssetTypeAssetTypePoolShare, + LiquidityPoolId: &xdr.PoolId{}, + } + _, err := hex.Decode((*asset.LiquidityPoolId)[:], []byte(row.LiquidityPoolID)) + if err != nil { + return xdr.LedgerEntry{}, errors.Wrap(err, "Error decoding liquidity pool id") + } + case xdr.AssetTypeAssetTypeNative: + asset = xdr.MustNewNativeAsset().ToTrustLineAsset() + default: + creditAsset, err := xdr.NewCreditAsset(row.AssetCode, row.AssetIssuer) + if err != nil { + return xdr.LedgerEntry{}, errors.Wrap(err, "Error decoding credit asset") + } + asset = creditAsset.ToTrustLineAsset() + } + + trustline := xdr.TrustLineEntry{ + AccountId: xdr.MustAddress(row.AccountID), + Asset: asset, + Balance: xdr.Int64(row.Balance), + Limit: xdr.Int64(row.Limit), + Flags: xdr.Uint32(row.Flags), + Ext: xdr.TrustLineEntryExt{ + V: 1, + V1: &xdr.TrustLineEntryV1{ + Liabilities: xdr.Liabilities{ + Buying: xdr.Int64(row.BuyingLiabilities), + Selling: xdr.Int64(row.SellingLiabilities), + }, + }, + }, + } + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &trustline, + }, + } + addLedgerEntrySponsor(&entry, row.Sponsor) + return entry, nil +} + +func addClaimableBalanceToStateVerifier( + ctx context.Context, + verifier *verify.StateVerifier, + assetStats processors.AssetStatSet, + q history.IngestionQ, + ids []xdr.ClaimableBalanceId, +) error { + if len(ids) == 0 { + return nil + } + + var idStrings []string + e := xdr.NewEncodingBuffer() + for _, id := range ids { + idString, err := e.MarshalHex(id) + if err != nil { + return err + } + idStrings = append(idStrings, idString) + } + cBalances, err := q.GetClaimableBalancesByID(ctx, idStrings) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetClaimableBalancesByID") + } + + for _, row := range cBalances { + claimants := []xdr.Claimant{} + for _, claimant := range row.Claimants { + claimants = append(claimants, xdr.Claimant{ + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress(claimant.Destination), + Predicate: claimant.Predicate, + }, + }) + } + claimants = xdr.SortClaimantsByDestination(claimants) + var balanceID xdr.ClaimableBalanceId + if err := xdr.SafeUnmarshalHex(row.BalanceID, &balanceID); err != nil { + return err + } + cBalance := xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: claimants, + Asset: row.Asset, + Amount: row.Amount, + } + if row.Flags != 0 { + cBalance.Ext = xdr.ClaimableBalanceEntryExt{ + V: 1, + V1: &xdr.ClaimableBalanceEntryExtensionV1{ + Flags: xdr.Uint32(row.Flags), + }, + } + } + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &cBalance, + }, + } + addLedgerEntrySponsor(&entry, row.Sponsor) + if err := verifier.Write(entry); err != nil { + return err + } + + if err := assetStats.AddClaimableBalance( + ingest.Change{ + Post: &entry, + }, + ); err != nil { + return ingest.NewStateError( + errors.Wrap(err, "could not add claimable balance to asset stats"), + ) + } + } + + return nil +} + +func addLiquidityPoolsToStateVerifier( + ctx context.Context, + verifier *verify.StateVerifier, + assetStats processors.AssetStatSet, + q history.IngestionQ, + ids []xdr.PoolId, +) error { + if len(ids) == 0 { + return nil + } + var idsHex = make([]string, len(ids)) + for i, id := range ids { + idsHex[i] = processors.PoolIDToString(id) + + } + lPools, err := q.GetLiquidityPoolsByID(ctx, idsHex) + if err != nil { + return errors.Wrap(err, "Error running history.Q.GetLiquidityPoolsByID") + } + + for _, row := range lPools { + lPoolEntry, err := liquidityPoolToXDR(row) + if err != nil { + return errors.Wrap(err, "Invalid liquidity pool row") + } + + entry := xdr.LedgerEntry{ + LastModifiedLedgerSeq: xdr.Uint32(row.LastModifiedLedger), + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &lPoolEntry, + }, + } + if err := verifier.Write(entry); err != nil { + return err + } + + if err := assetStats.AddLiquidityPool( + ingest.Change{ + Post: &entry, + }, + ); err != nil { + return ingest.NewStateError( + errors.Wrap(err, "could not add claimable balance to asset stats"), + ) + } + } + + return nil +} + +func liquidityPoolToXDR(row history.LiquidityPool) (xdr.LiquidityPoolEntry, error) { + if len(row.AssetReserves) != 2 { + return xdr.LiquidityPoolEntry{}, fmt.Errorf("unexpected number of asset reserves (%d), expected %d", len(row.AssetReserves), 2) + } + id, err := hex.DecodeString(row.PoolID) + if err != nil { + return xdr.LiquidityPoolEntry{}, errors.Wrap(err, "Error decoding pool ID") + } + var poolID xdr.PoolId + if len(id) != len(poolID) { + return xdr.LiquidityPoolEntry{}, fmt.Errorf("Error decoding pool ID, incorrect length (%d)", len(id)) + } + copy(poolID[:], id) + + var lPoolEntry = xdr.LiquidityPoolEntry{ + LiquidityPoolId: poolID, + Body: xdr.LiquidityPoolEntryBody{ + Type: row.Type, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: row.AssetReserves[0].Asset, + AssetB: row.AssetReserves[1].Asset, + Fee: xdr.Int32(row.Fee), + }, + ReserveA: xdr.Int64(row.AssetReserves[0].Reserve), + ReserveB: xdr.Int64(row.AssetReserves[1].Reserve), + TotalPoolShares: xdr.Int64(row.ShareCount), + PoolSharesTrustLineCount: xdr.Int64(row.TrustlineCount), + }, + }, + } + return lPoolEntry, nil +} + +func addLedgerEntrySponsor(entry *xdr.LedgerEntry, sponsor null.String) { + ledgerEntrySponsor := xdr.SponsorshipDescriptor(nil) + + if !sponsor.IsZero() { + ledgerEntrySponsor = xdr.MustAddressPtr(sponsor.String) + } + entry.Ext = xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: ledgerEntrySponsor, + }, + } +} diff --git a/services/horizon/internal/ingest/verify/main.go b/services/horizon/internal/ingest/verify/main.go new file mode 100644 index 0000000000..416bc7b9de --- /dev/null +++ b/services/horizon/internal/ingest/verify/main.go @@ -0,0 +1,217 @@ +// Package verify provides helpers used for verifying if the ingested data is +// correct. +package verify + +import ( + "bytes" + "encoding/base64" + "io" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TransformLedgerEntryFunction is a function that transforms ledger entry +// into a form that should be compared to checkpoint state. It can be also used +// to decide if the given entry should be ignored during verification. +// Sometimes the application needs only specific type entries or specific fields +// for a given entry type. Use this function to create a common form of an entry +// that will be used for equality check. +type TransformLedgerEntryFunction func(xdr.LedgerEntry) (ignore bool, newEntry xdr.LedgerEntry) + +// StateVerifier verifies if ledger entries provided by Add method are the same +// as in the checkpoint ledger entries provided by CheckpointChangeReader. +// The algorithm works in the following way: +// 0. Develop `transformFunction`. It should remove all fields and objects not +// stored in your app. For example, if you only store accounts, all other +// ledger entry types should be ignored (return ignore = true). +// 1. In a loop, get entries from history archive by calling GetEntries() +// and Write() your version of entries found in the batch (in any order). +// 2. When GetEntries() return no more entries, call Verify with a number of +// entries in your storage (to find if some extra entires exist in your +// storage). +// Functions will return StateError type if state is found to be incorrect. +// It's user responsibility to call `stateReader.Close()` when reading is done. +// Check Horizon for an example how to use this tool. +type StateVerifier struct { + stateReader ingest.ChangeReader + // transformFunction transforms (or ignores) ledger entries streamed from + // checkpoint buckets to match the form added by `Write`. Read + // TransformLedgerEntryFunction godoc for more information. + transformFunction TransformLedgerEntryFunction + + readEntries int + readingDone bool + + currentEntries map[string]xdr.LedgerEntry + encodingBuffer *xdr.EncodingBuffer +} + +func NewStateVerifier(stateReader ingest.ChangeReader, tf TransformLedgerEntryFunction) *StateVerifier { + return &StateVerifier{ + stateReader: stateReader, + transformFunction: tf, + encodingBuffer: xdr.NewEncodingBuffer(), + } +} + +// GetLedgerKeys returns up to `count` ledger keys from history buckets +// storing actual entries in cache to compare in Write. +func (v *StateVerifier) GetLedgerKeys(count int) ([]xdr.LedgerKey, error) { + err := v.checkUnreadEntries() + if err != nil { + return nil, err + } + + keys := make([]xdr.LedgerKey, 0, count) + v.currentEntries = make(map[string]xdr.LedgerEntry) + + for count > 0 { + entryChange, err := v.stateReader.Read() + if err != nil { + if err == io.EOF { + v.readingDone = true + return keys, nil + } + return keys, err + } + + entry := *entryChange.Post + + if v.transformFunction != nil { + ignore, _ := v.transformFunction(entry) + if ignore { + continue + } + } + + ledgerKey := entry.LedgerKey() + key, err := v.encodingBuffer.MarshalBinary(ledgerKey) + if err != nil { + return keys, errors.Wrap(err, "Error marshaling ledgerKey") + } + + keys = append(keys, ledgerKey) + entry.Normalize() + v.currentEntries[string(key)] = entry + + count-- + v.readEntries++ + } + + return keys, nil +} + +// Write compares the entry with entries in the latest batch of entries fetched +// using `GetEntries`. Entries don't need to follow the order in entries returned +// by `GetEntries`. +// Warning: Write will call Normalize() on `entry` that can modify it! +// Any `StateError` returned by this method indicates invalid state! +func (v *StateVerifier) Write(entry xdr.LedgerEntry) error { + actualEntry := entry.Normalize() + actualEntryMarshaled, err := v.encodingBuffer.MarshalBinary(actualEntry) + if err != nil { + return errors.Wrap(err, "Error marshaling actualEntry") + } + + // safe, since we convert to string right away (causing a copy) + key, err := v.encodingBuffer.UnsafeMarshalBinary(actualEntry.LedgerKey()) + if err != nil { + return errors.Wrap(err, "Error marshaling ledgerKey") + } + + expectedEntry, exist := v.currentEntries[string(key)] + if !exist { + return ingest.NewStateError(errors.Errorf( + "Cannot find entry in currentEntries map: %s (key = %s)", + base64.StdEncoding.EncodeToString(actualEntryMarshaled), + base64.StdEncoding.EncodeToString(key), + )) + } + delete(v.currentEntries, string(key)) + + preTransformExpectedEntry := expectedEntry + preTransformExpectedEntryMarshaled, err := v.encodingBuffer.MarshalBinary(&preTransformExpectedEntry) + if err != nil { + return errors.Wrap(err, "Error marshaling preTransformExpectedEntry") + } + + if v.transformFunction != nil { + var ignore bool + ignore, expectedEntry = v.transformFunction(expectedEntry) + // Extra check: if entry was ignored in GetEntries, it shouldn't be + // ignored here. + if ignore { + return errors.Errorf( + "Entry ignored in GetEntries but not ignored in Write: %s. Possibly transformFunction is buggy.", + base64.StdEncoding.EncodeToString(preTransformExpectedEntryMarshaled), + ) + } + } + + expectedEntryMarshaled, err := v.encodingBuffer.MarshalBinary(&expectedEntry) + if err != nil { + return errors.Wrap(err, "Error marshaling expectedEntry") + } + + if !bytes.Equal(actualEntryMarshaled, expectedEntryMarshaled) { + return ingest.NewStateError(errors.Errorf( + "Entry does not match the fetched entry. Expected: %s (pretransform = %s), actual: %s", + base64.StdEncoding.EncodeToString(expectedEntryMarshaled), + base64.StdEncoding.EncodeToString(preTransformExpectedEntryMarshaled), + base64.StdEncoding.EncodeToString(actualEntryMarshaled), + )) + } + + return nil +} + +// Verify should be run after all GetEntries/Write calls. If there were no errors +// so far it means that all entries present in history buckets matches the entries +// in application storage. However, it's still possible that state is invalid when: +// * Not all entries have been read from history buckets (ex. due to a bug). +// * Some entries were not compared using Write. +// * There are some extra entries in application storage not present in history +// buckets. +// Any `StateError` returned by this method indicates invalid state! +func (v *StateVerifier) Verify(countAll int) error { + err := v.checkUnreadEntries() + if err != nil { + return err + } + + if !v.readingDone { + return errors.New("There are unread entries in state reader. Process all entries before calling Verify.") + } + + if v.readEntries != countAll { + return ingest.NewStateError(errors.Errorf( + "Number of entries read using GetEntries (%d) does not match number of entries in your storage (%d).", + v.readEntries, + countAll, + )) + } + + return nil +} + +func (v *StateVerifier) checkUnreadEntries() error { + if len(v.currentEntries) > 0 { + var entry xdr.LedgerEntry + for _, e := range v.currentEntries { + entry = e + break + } + + // Ignore error as StateError below is more important + entryString, _ := v.encodingBuffer.MarshalBase64(&entry) + return ingest.NewStateError(errors.Errorf( + "Entries (%d) not found locally, example: %s", + len(v.currentEntries), + entryString, + )) + } + + return nil +} diff --git a/services/horizon/internal/ingest/verify/main_test.go b/services/horizon/internal/ingest/verify/main_test.go new file mode 100644 index 0000000000..6f8b2947ed --- /dev/null +++ b/services/horizon/internal/ingest/verify/main_test.go @@ -0,0 +1,307 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package verify + +import ( + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func assertStateError(t *testing.T, err error, expectStateError bool) { + _, ok := err.(ingest.StateError) + if expectStateError { + assert.True(t, ok, "err should be StateError") + } else { + assert.False(t, ok, "err should not be StateError") + } +} + +func TestStateVerifierTestSuite(t *testing.T) { + suite.Run(t, new(StateVerifierTestSuite)) +} + +type StateVerifierTestSuite struct { + suite.Suite + verifier *StateVerifier + mockStateReader *ingest.MockChangeReader +} + +func (s *StateVerifierTestSuite) SetupTest() { + s.mockStateReader = &ingest.MockChangeReader{} + s.verifier = NewStateVerifier(s.mockStateReader, nil) +} + +func (s *StateVerifierTestSuite) TearDownTest() { + s.mockStateReader.AssertExpectations(s.T()) +} + +func (s *StateVerifierTestSuite) TestNoEntries() { + s.mockStateReader.On("Read").Return(ingest.Change{}, io.EOF).Once() + + keys, err := s.verifier.GetLedgerKeys(10) + s.Assert().NoError(err) + s.Assert().Len(keys, 0) +} + +func (s *StateVerifierTestSuite) TestReturnErrorOnStateReaderError() { + s.mockStateReader.On("Read").Return(ingest.Change{}, errors.New("Read error")).Once() + + _, err := s.verifier.GetLedgerKeys(10) + s.Assert().EqualError(err, "Read error") +} + +func (s *StateVerifierTestSuite) TestCurrentEntriesNotEmpty() { + entry := makeAccountLedgerEntry() + entryBase64, err := xdr.MarshalBase64(entry) + s.Assert().NoError(err) + + ledgerKey := entry.LedgerKey() + ledgerKeyBase64, err := xdr.MarshalBase64(ledgerKey) + s.Assert().NoError(err) + + s.verifier.currentEntries = map[string]xdr.LedgerEntry{ + ledgerKeyBase64: entry, + } + + _, err = s.verifier.GetLedgerKeys(10) + s.Assert().Error(err) + assertStateError(s.T(), err, true) + s.Assert().EqualError(err, "Entries (1) not found locally, example: "+entryBase64) + + err = s.verifier.Verify(10) + s.Assert().Error(err) + assertStateError(s.T(), err, true) + s.Assert().EqualError(err, "Entries (1) not found locally, example: "+entryBase64) +} + +func (s *StateVerifierTestSuite) TestTransformFunction() { + accountEntry := makeAccountLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Post: &accountEntry, + }, nil).Once() + + offerEntry := makeOfferLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Post: &offerEntry, + }, nil).Once() + + s.mockStateReader.On("Read").Return(ingest.Change{}, io.EOF).Once() + + s.verifier.transformFunction = + func(entry xdr.LedgerEntry) (ignore bool, newEntry xdr.LedgerEntry) { + // Leave Account ID only for accounts, ignore the rest + switch entry.Data.Type { + case xdr.LedgerEntryTypeAccount: + accountEntry := entry.Data.Account + + return false, xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: accountEntry.AccountId, + }, + }, + } + default: + return true, xdr.LedgerEntry{} + } + } + + _, err := s.verifier.GetLedgerKeys(10) + s.Assert().NoError(err) + + // Check currentEntries + ledgerKey, err := accountEntry.LedgerKey().MarshalBinary() + s.Assert().NoError(err) + + // Account entry transformed and offer entry ignored + s.Assert().Len(s.verifier.currentEntries, 1) + s.Assert().Equal(accountEntry, s.verifier.currentEntries[string(ledgerKey)]) +} + +func (s *StateVerifierTestSuite) TestOnlyRequestedNumberOfKeysReturned() { + accountEntry := makeAccountLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Post: &accountEntry, + }, nil).Once() + + // We don't mock Read() -> (io.Change{}, stdio.EOF) call here + // because this would execute `stdio.EOF` code path. + + keys, err := s.verifier.GetLedgerKeys(1) + s.Assert().NoError(err) + s.Assert().Len(keys, 1) + + // In such case Verify() should notice that not all entries read from buckets + err = s.verifier.Write(accountEntry) + s.Assert().NoError(err) + + err = s.verifier.Verify(1) + s.Assert().Error(err) + assertStateError(s.T(), err, false) + s.Assert().EqualError(err, "There are unread entries in state reader. Process all entries before calling Verify.") +} + +func (s *StateVerifierTestSuite) TestWriteEntryNotExist() { + entry := makeAccountLedgerEntry() + entryBase64, err := xdr.MarshalBase64(entry) + s.Assert().NoError(err) + + ledgerKey := entry.LedgerKey() + ledgerKeyBase64, err := xdr.MarshalBase64(ledgerKey) + s.Assert().NoError(err) + + err = s.verifier.Write(entry) + s.Assert().Error(err) + assertStateError(s.T(), err, true) + errorMsg := fmt.Sprintf( + "Cannot find entry in currentEntries map: %s (key = %s)", + entryBase64, + ledgerKeyBase64, + ) + s.Assert().EqualError(err, errorMsg) +} + +func (s *StateVerifierTestSuite) TestTransformFunctionBuggyIgnore() { + accountEntry := makeAccountLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Post: &accountEntry, + }, nil).Once() + + s.verifier.transformFunction = + func(entry xdr.LedgerEntry) (ignore bool, newEntry xdr.LedgerEntry) { + return false, xdr.LedgerEntry{} + } + + keys, err := s.verifier.GetLedgerKeys(1) + s.Assert().NoError(err) + s.Assert().Len(keys, 1) + + // Check the behaviour of transformFunction to code path to test. + s.verifier.transformFunction = + func(entry xdr.LedgerEntry) (ignore bool, newEntry xdr.LedgerEntry) { + return true, xdr.LedgerEntry{} + } + + entryBase64, err := xdr.MarshalBase64(accountEntry) + s.Assert().NoError(err) + errorMsg := fmt.Sprintf( + "Entry ignored in GetEntries but not ignored in Write: %s. Possibly transformFunction is buggy.", + entryBase64, + ) + err = s.verifier.Write(accountEntry) + s.Assert().EqualError(err, errorMsg) +} + +func (s *StateVerifierTestSuite) TestActualExpectedEntryNotEqualWrite() { + expectedEntry := makeAccountLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Post: &expectedEntry, + }, nil).Once() + + keys, err := s.verifier.GetLedgerKeys(1) + s.Assert().NoError(err) + s.Assert().Len(keys, 1) + + actualEntry := makeAccountLedgerEntry() + actualEntry.Data.Account.Thresholds = [4]byte{1, 1, 1, 0} + actualEntry.Normalize() + + expectedEntryBase64, err := xdr.MarshalBase64(expectedEntry) + s.Assert().NoError(err) + actualEntryBase64, err := xdr.MarshalBase64(actualEntry) + s.Assert().NoError(err) + + errorMsg := fmt.Sprintf( + "Entry does not match the fetched entry. Expected: %s (pretransform = %s), actual: %s", + expectedEntryBase64, + expectedEntryBase64, + actualEntryBase64, + ) + err = s.verifier.Write(actualEntry) + s.Assert().Error(err) + assertStateError(s.T(), err, true) + s.Assert().EqualError(err, errorMsg) +} + +func (s *StateVerifierTestSuite) TestVerifyCountersMatch() { + accountEntry := makeAccountLedgerEntry() + s.mockStateReader. + On("Read"). + Return(ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Post: &accountEntry, + }, nil).Once() + + s.mockStateReader.On("Read").Return(ingest.Change{}, io.EOF).Once() + + keys, err := s.verifier.GetLedgerKeys(2) + s.Assert().NoError(err) + s.Assert().Len(keys, 1) + + err = s.verifier.Write(accountEntry) + s.Assert().NoError(err) + + err = s.verifier.Verify(10) + s.Assert().Error(err) + assertStateError(s.T(), err, true) + errorMsg := fmt.Sprintf( + "Number of entries read using GetEntries (%d) does not match number of entries in your storage (%d).", + 1, + 10, + ) + s.Assert().EqualError(err, errorMsg) + + err = s.verifier.Verify(1) + s.Assert().NoError(err) +} + +func makeAccountLedgerEntry() xdr.LedgerEntry { + entry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Thresholds: [4]byte{1, 1, 1, 1}, + }, + }, + } + entry.Normalize() + return entry +} + +func makeOfferLedgerEntry() xdr.LedgerEntry { + entry := xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &xdr.OfferEntry{ + SellerId: xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + } + entry.Normalize() + return entry +} diff --git a/services/horizon/internal/ingest/verify_range_state_test.go b/services/horizon/internal/ingest/verify_range_state_test.go new file mode 100644 index 0000000000..1e55b5d224 --- /dev/null +++ b/services/horizon/internal/ingest/verify_range_state_test.go @@ -0,0 +1,631 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package ingest + +import ( + "context" + "database/sql" + "fmt" + "io" + "testing" + + "github.com/guregu/null" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/ingest/ledgerbackend" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest/processors" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func TestVerifyRangeStateTestSuite(t *testing.T) { + suite.Run(t, new(VerifyRangeStateTestSuite)) +} + +type VerifyRangeStateTestSuite struct { + suite.Suite + ctx context.Context + ledgerBackend *ledgerbackend.MockDatabaseBackend + historyQ *mockDBQ + historyAdapter *mockHistoryArchiveAdapter + runner *mockProcessorsRunner + system *system +} + +func (s *VerifyRangeStateTestSuite) SetupTest() { + s.ctx = context.Background() + s.ledgerBackend = &ledgerbackend.MockDatabaseBackend{} + s.historyQ = &mockDBQ{} + s.historyAdapter = &mockHistoryArchiveAdapter{} + s.runner = &mockProcessorsRunner{} + s.system = &system{ + ctx: s.ctx, + historyQ: s.historyQ, + historyAdapter: s.historyAdapter, + ledgerBackend: s.ledgerBackend, + runner: s.runner, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + s.system.initMetrics() + + s.historyQ.On("Rollback").Return(nil).Once() +} + +func (s *VerifyRangeStateTestSuite) TearDownTest() { + t := s.T() + s.historyQ.AssertExpectations(t) + s.historyAdapter.AssertExpectations(t) + s.runner.AssertExpectations(t) +} + +func (s *VerifyRangeStateTestSuite) TestInvalidRange() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + + next, err := verifyRangeState{fromLedger: 0, toLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [0, 0]") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) + + next, err = verifyRangeState{fromLedger: 0, toLedger: 100}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [0, 100]") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) + + next, err = verifyRangeState{fromLedger: 100, toLedger: 0}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [100, 0]") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) + + next, err = verifyRangeState{fromLedger: 100, toLedger: 99}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "invalid range: [100, 99]") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +func (s *VerifyRangeStateTestSuite) TestBeginReturnsError() { + // Recreate mock in this single test to remove Rollback assertion. + *s.historyQ = mockDBQ{} + s.historyQ.On("Begin").Return(errors.New("my error")).Once() + + next, err := verifyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error starting a transaction: my error") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +func (s *VerifyRangeStateTestSuite) TestGetLastLedgerIngestReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), errors.New("my error")).Once() + + next, err := verifyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error getting last ingested ledger: my error") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +func (s *VerifyRangeStateTestSuite) TestGetLastLedgerIngestNonEmpty() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(100), nil).Once() + + next, err := verifyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Database not empty") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +func (s *VerifyRangeStateTestSuite) TestRunHistoryArchiveIngestionReturnsError() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.BoundedRange(100, 200)).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + s.runner.On("RunHistoryArchiveIngestion", uint32(100), MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}).Return(ingest.StatsChangeProcessorResults{}, errors.New("my error")).Once() + + next, err := verifyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().Error(err) + s.Assert().EqualError(err, "Error ingesting history archive: my error") + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +func (s *VerifyRangeStateTestSuite) TestSuccess() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.BoundedRange(100, 200)).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + s.runner.On("RunHistoryArchiveIngestion", uint32(100), MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}).Return(ingest.StatsChangeProcessorResults{}, nil).Once() + + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(100)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + + for i := uint32(101); i <= 200; i++ { + s.historyQ.On("Begin").Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(i), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(i)).Return(meta, nil).Once() + + s.runner.On("RunAllProcessorsOnLedger", meta).Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, i).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + } + + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(100), uint32(200)).Return(nil).Once() + + next, err := verifyRangeState{fromLedger: 100, toLedger: 200}.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) +} + +// Bartek: looks like this test really tests the state verifier. Instead, I think we should just ensure +// data is passed so a single account would be enough to test if the FSM state works correctly. +func (s *VerifyRangeStateTestSuite) TestSuccessWithVerify() { + s.historyQ.On("Begin").Return(nil).Once() + s.historyQ.On("GetLastLedgerIngest", s.ctx).Return(uint32(0), nil).Once() + s.ledgerBackend.On("PrepareRange", s.ctx, ledgerbackend.BoundedRange(100, 110)).Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(100), + LedgerVersion: xdr.Uint32(MaxSupportedProtocolVersion), + BucketListHash: xdr.Hash{1, 2, 3}, + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(100)).Return(meta, nil).Once() + s.runner.On("RunHistoryArchiveIngestion", uint32(100), MaxSupportedProtocolVersion, xdr.Hash{1, 2, 3}).Return(ingest.StatsChangeProcessorResults{}, nil).Once() + + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, uint32(100)).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + + for i := uint32(101); i <= 110; i++ { + s.historyQ.On("Begin").Return(nil).Once() + + meta := xdr.LedgerCloseMeta{ + V0: &xdr.LedgerCloseMetaV0{ + LedgerHeader: xdr.LedgerHeaderHistoryEntry{ + Header: xdr.LedgerHeader{ + LedgerSeq: xdr.Uint32(i), + }, + }, + }, + } + s.ledgerBackend.On("GetLedger", s.ctx, uint32(i)).Return(meta, nil).Once() + + s.runner.On("RunAllProcessorsOnLedger", meta).Return( + ledgerStats{}, + nil, + ).Once() + s.historyQ.On("UpdateLastLedgerIngest", s.ctx, i).Return(nil).Once() + s.historyQ.On("Commit").Return(nil).Once() + } + + s.historyQ.On("RebuildTradeAggregationBuckets", s.ctx, uint32(100), uint32(110)).Return(nil).Once() + + clonedQ := &mockDBQ{} + s.historyQ.On("CloneIngestionQ").Return(clonedQ).Once() + + clonedQ.On("BeginTx", mock.AnythingOfType("*sql.TxOptions")).Run(func(args mock.Arguments) { + arg := args.Get(0).(*sql.TxOptions) + s.Assert().Equal(sql.LevelRepeatableRead, arg.Isolation) + s.Assert().True(arg.ReadOnly) + }).Return(nil).Once() + clonedQ.On("Rollback").Return(nil).Once() + clonedQ.On("GetLastLedgerIngestNonBlocking", s.ctx).Return(uint32(63), nil).Once() + mockChangeReader := &ingest.MockChangeReader{} + mockChangeReader.On("Close").Return(nil).Once() + mockAccountID := "GACMZD5VJXTRLKVET72CETCYKELPNCOTTBDC6DHFEUPLG5DHEK534JQX" + sponsor := "GAREDQSXC7QZYJLVMTU7XZW4LSILQ4M5U4GNLO523LEWZ3JBRC5E4HLE" + signers := []string{ + "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + "GA25GQLHJU3LPEJXEIAXK23AWEA5GWDUGRSHTQHDFT6HXHVMRULSQJUJ", + "GC6G3EQFKOKIIZFTJQSCHTSXBVC4XO3I64F5IBRQNS3E5SW3MO3KWGMT", + } + accountChange := ingest.Change{ + Type: xdr.LedgerEntryTypeAccount, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeAccount, + Account: &xdr.AccountEntry{ + AccountId: xdr.MustAddress(mockAccountID), + Balance: xdr.Int64(600), + Thresholds: [4]byte{1, 0, 0, 0}, + Signers: []xdr.Signer{ + { + Key: xdr.MustSigner(signers[0]), + Weight: 1, + }, + { + Key: xdr.MustSigner(signers[1]), + Weight: 2, + }, + { + Key: xdr.MustSigner(signers[2]), + Weight: 3, + }, + }, + Ext: xdr.AccountEntryExt{ + V: 1, + V1: &xdr.AccountEntryExtensionV1{ + Liabilities: xdr.Liabilities{ + Buying: 1, + Selling: 1, + }, + Ext: xdr.AccountEntryExtensionV1Ext{ + V: 2, + V2: &xdr.AccountEntryExtensionV2{ + NumSponsored: xdr.Uint32(0), + NumSponsoring: xdr.Uint32(2), + SignerSponsoringIDs: []xdr.SponsorshipDescriptor{ + nil, + xdr.MustAddressPtr(mockAccountID), + xdr.MustAddressPtr(sponsor), + }, + }, + }, + }, + }, + }, + }, + LastModifiedLedgerSeq: xdr.Uint32(62), + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + offerChange := ingest.Change{ + Type: xdr.LedgerEntryTypeOffer, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &eurOffer, + }, + LastModifiedLedgerSeq: xdr.Uint32(62), + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + balanceIDStr, err := xdr.MarshalHex(balanceID) + s.Assert().NoError(err) + claimableBalance := history.ClaimableBalance{ + BalanceID: balanceIDStr, + Asset: xdr.MustNewNativeAsset(), + Amount: 10, + LastModifiedLedger: 62, + Claimants: []history.Claimant{ + { + Destination: mockAccountID, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Flags: uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + } + claimableBalanceChange := ingest.Change{ + Type: xdr.LedgerEntryTypeClaimableBalance, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &xdr.ClaimableBalanceEntry{ + BalanceId: balanceID, + Claimants: []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: xdr.MustAddress(claimableBalance.Claimants[0].Destination), + Predicate: claimableBalance.Claimants[0].Predicate, + }, + }, + }, + Asset: claimableBalance.Asset, + Amount: claimableBalance.Amount, + Ext: xdr.ClaimableBalanceEntryExt{ + V: 1, + V1: &xdr.ClaimableBalanceEntryExtensionV1{ + Flags: xdr.Uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + }, + }, + }, + }, + LastModifiedLedgerSeq: xdr.Uint32(62), + Ext: xdr.LedgerEntryExt{ + V: 1, + V1: &xdr.LedgerEntryExtensionV1{ + SponsoringId: xdr.MustAddressPtr("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + } + liquidityPool := history.LiquidityPool{ + PoolID: "cafebabedeadbeef000000000000000000000000000000000000000000000000", + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + Fee: 34, + TrustlineCount: 52115, + ShareCount: 412241, + AssetReserves: []history.LiquidityPoolAssetReserve{ + { + Asset: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Reserve: 450, + }, + { + Asset: xdr.MustNewNativeAsset(), + Reserve: 123, + }, + }, + LastModifiedLedger: 62, + } + liquidityPoolChange := ingest.Change{ + Type: xdr.LedgerEntryTypeLiquidityPool, + Pre: nil, + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + Type: xdr.LedgerEntryTypeLiquidityPool, + LiquidityPool: &xdr.LiquidityPoolEntry{ + LiquidityPoolId: xdr.PoolId{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + Body: xdr.LiquidityPoolEntryBody{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolEntryConstantProduct{ + Params: xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdr.MustNewCreditAsset("USD", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + AssetB: xdr.MustNewNativeAsset(), + Fee: 34, + }, + ReserveA: 450, + ReserveB: 123, + TotalPoolShares: 412241, + PoolSharesTrustLineCount: 52115, + }, + }, + }, + }, + LastModifiedLedgerSeq: xdr.Uint32(62), + }, + } + + mockChangeReader.On("Read").Return(accountChange, nil).Once() + mockChangeReader.On("Read").Return(offerChange, nil).Once() + mockChangeReader.On("Read").Return(claimableBalanceChange, nil).Once() + mockChangeReader.On("Read").Return(liquidityPoolChange, nil).Once() + mockChangeReader.On("Read").Return(ingest.Change{}, io.EOF).Once() + mockChangeReader.On("Read").Return(ingest.Change{}, io.EOF).Once() + s.historyAdapter.On("GetState", s.ctx, uint32(63)).Return(mockChangeReader, nil).Once() + mockAccount := history.AccountEntry{ + AccountID: mockAccountID, + Balance: 600, + LastModifiedLedger: 62, + MasterWeight: 1, + NumSponsored: 0, + NumSponsoring: 2, + BuyingLiabilities: 1, + SellingLiabilities: 1, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } + + clonedQ.MockQAccounts.On("GetAccountsByIDs", s.ctx, []string{mockAccountID}).Return([]history.AccountEntry{mockAccount}, nil).Once() + clonedQ.MockQSigners.On("SignersForAccounts", s.ctx, []string{mockAccountID}).Return([]history.AccountSigner{ + { + Account: mockAccountID, + Signer: mockAccountID, + Weight: 1, + }, + { + Account: mockAccountID, + Signer: signers[0], + Weight: 1, + }, + { + Account: mockAccountID, + Signer: signers[2], + Weight: 3, + Sponsor: null.StringFrom(sponsor), + }, + { + Account: mockAccountID, + Signer: signers[1], + Weight: 2, + Sponsor: null.StringFrom(mockAccountID), + }, + }, nil).Once() + clonedQ.MockQSigners.On("CountAccounts", s.ctx).Return(1, nil).Once() + mockOffer := history.Offer{ + SellerID: eurOffer.SellerId.Address(), + OfferID: int64(eurOffer.OfferId), + SellingAsset: eurOffer.Selling, + BuyingAsset: eurOffer.Buying, + Amount: int64(eurOffer.Amount), + Pricen: int32(eurOffer.Price.N), + Priced: int32(eurOffer.Price.D), + Price: float64(eurOffer.Price.N) / float64(eurOffer.Price.N), + Flags: int32(eurOffer.Flags), + LastModifiedLedger: 62, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + } + clonedQ.MockQOffers.On("GetOffersByIDs", s.ctx, []int64{int64(eurOffer.OfferId)}).Return([]history.Offer{mockOffer}, nil).Once() + clonedQ.MockQOffers.On("CountOffers", s.ctx).Return(1, nil).Once() + // TODO: add accounts data, trustlines and asset stats + clonedQ.MockQData.On("CountAccountsData", s.ctx).Return(0, nil).Once() + clonedQ.MockQAssetStats.On("CountTrustLines", s.ctx).Return(0, nil).Once() + clonedQ.MockQAssetStats.On("GetAssetStats", s.ctx, "", "", db2.PageQuery{ + Order: "asc", + Limit: assetStatsBatchSize, + }).Return([]history.ExpAssetStat{ + // Created by liquidity pool: + { + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: "USD", + AssetIssuer: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Accounts: history.ExpAssetStatAccounts{ + LiquidityPools: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + ClaimableBalances: "0", + LiquidityPools: "450", + Unauthorized: "0", + }, + Amount: "0", + }}, nil).Once() + clonedQ.MockQAssetStats.On("GetAssetStats", s.ctx, "", "", db2.PageQuery{ + Cursor: "USD_GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML_credit_alphanum4", + Order: "asc", + Limit: assetStatsBatchSize, + }).Return([]history.ExpAssetStat{}, nil).Once() + + clonedQ.MockQClaimableBalances.On("CountClaimableBalances", s.ctx).Return(1, nil).Once() + clonedQ.MockQClaimableBalances. + On("GetClaimableBalancesByID", s.ctx, []string{balanceIDStr}). + Return([]history.ClaimableBalance{claimableBalance}, nil).Once() + + clonedQ.MockQLiquidityPools.On("CountLiquidityPools", s.ctx).Return(1, nil).Once() + clonedQ.MockQLiquidityPools. + On("GetLiquidityPoolsByID", s.ctx, []string{liquidityPool.PoolID}). + Return([]history.LiquidityPool{liquidityPool}, nil).Once() + + next, err := verifyRangeState{ + fromLedger: 100, toLedger: 110, verifyState: true, + }.run(s.system) + s.Assert().NoError(err) + s.Assert().Equal( + transition{node: stopState{}, sleepDuration: 0}, + next, + ) + clonedQ.AssertExpectations(s.T()) +} + +func (s *VerifyRangeStateTestSuite) TestVerifyFailsWhenAssetStatsMismatch() { + set := processors.AssetStatSet{} + + trustLineIssuer := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + set.AddTrustline( + ingest.Change{ + Post: &xdr.LedgerEntry{ + Data: xdr.LedgerEntryData{ + TrustLine: &xdr.TrustLineEntry{ + AccountId: xdr.MustAddress(keypair.MustRandom().Address()), + Balance: 123, + Asset: xdr.MustNewCreditAsset("EUR", trustLineIssuer.Address()).ToTrustLineAsset(), + Flags: xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag), + }, + }, + }, + }, + ) + + stat := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: "EUR", + AssetIssuer: trustLineIssuer.Address(), + Accounts: history.ExpAssetStatAccounts{ + Unauthorized: 1, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "0", + AuthorizedToMaintainLiabilities: "0", + Unauthorized: "123", + }, + Amount: "0", + NumAccounts: 0, + } + + s.historyQ.MockQAssetStats.On("GetAssetStats", s.ctx, "", "", db2.PageQuery{ + Order: "asc", + Limit: assetStatsBatchSize, + }).Return([]history.ExpAssetStat{stat}, nil).Once() + s.historyQ.MockQAssetStats.On("GetAssetStats", s.ctx, "", "", db2.PageQuery{ + Cursor: stat.PagingToken(), + Order: "asc", + Limit: assetStatsBatchSize, + }).Return([]history.ExpAssetStat{}, nil).Once() + + err := checkAssetStats(s.ctx, set, s.historyQ) + s.Assert().Contains(err.Error(), fmt.Sprintf("db asset stat with code EUR issuer %s does not match asset stat from HAS", trustLineIssuer.Address())) + + // Satisfy the mock + s.historyQ.Rollback() +} diff --git a/services/horizon/internal/ingest/verify_test.go b/services/horizon/internal/ingest/verify_test.go new file mode 100644 index 0000000000..0f84707f18 --- /dev/null +++ b/services/horizon/internal/ingest/verify_test.go @@ -0,0 +1,204 @@ +package ingest + +import ( + "io" + "math/rand" + "regexp" + "testing" + + "github.com/stellar/go/gxdr" + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" + "github.com/stellar/go/randxdr" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/db" + "github.com/stellar/go/xdr" +) + +func genAccount(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + numSigners := uint32(rand.Int31n(xdr.MaxSigners)) + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.ACCOUNT.GetU32())}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.seqNum"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.account.numSubEntries"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.balance"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.account.homeDomain"), randxdr.SetPrintableASCII}, + {randxdr.FieldEquals("created.data.account.flags"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.signers"), randxdr.SetVecLen(numSigners)}, + {randxdr.FieldMatches(regexp.MustCompile("created\\.data\\.account\\.signers.*weight")), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.ext.v1.liabilities.selling"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.account.ext.v1.liabilities.buying"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.account.ext.v1.ext.v2.numSponsoring"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.ext.v1.ext.v2.numSponsored"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.account.ext.v1.ext.v2.signerSponsoringIDs"), randxdr.SetVecLen(numSigners)}, + }, + ) + + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func genAccountData(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.DATA.GetU32())}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.data.dataName"), randxdr.SetPrintableASCII}, + }, + ) + + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func genOffer(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.OFFER.GetU32())}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.offer.amount"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.offer.price.n"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.offer.price.d"), randxdr.SetPositiveNum32}, + }, + ) + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func genLiquidityPool(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + // liquidity pools cannot be sponsored + {randxdr.FieldEquals("created.ext.v1.sponsoringID"), randxdr.SetPtr(false)}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.LIQUIDITY_POOL.GetU32())}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.params.fee"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.params.assetA.alphaNum4.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.params.assetA.alphaNum12.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.params.assetB.alphaNum4.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.params.assetB.alphaNum12.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.reserveA"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.reserveB"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.totalPoolShares"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.liquidityPool.body.constantProduct.poolSharesTrustLineCount"), randxdr.SetPositiveNum64}, + }, + ) + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func genTrustLine(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.TRUSTLINE.GetU32())}, + {randxdr.FieldEquals("created.data.trustLine.flags"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.trustLine.asset.alphaNum4.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.trustLine.asset.alphaNum12.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.trustLine.balance"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.trustLine.limit"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.trustLine.ext.v1.liabilities.selling"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.trustLine.ext.v1.liabilities.buying"), randxdr.SetPositiveNum64}, + }, + ) + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func genClaimableBalance(tt *test.T, gen randxdr.Generator) xdr.LedgerEntryChange { + change := xdr.LedgerEntryChange{} + shape := &gxdr.LedgerEntryChange{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.FieldEquals("type"), randxdr.SetU32(gxdr.LEDGER_ENTRY_CREATED.GetU32())}, + {randxdr.FieldEquals("created.lastModifiedLedgerSeq"), randxdr.SetPositiveNum32}, + {randxdr.FieldEquals("created.data.type"), randxdr.SetU32(gxdr.CLAIMABLE_BALANCE.GetU32())}, + {randxdr.FieldEquals("created.data.claimableBalance.ext.v1.flags"), randxdr.SetPositiveNum32}, + { + randxdr.And( + randxdr.IsPtr, + randxdr.FieldMatches(regexp.MustCompile("created\\.data\\.claimableBalance\\.claimants.*notPredicate")), + ), + randxdr.SetPtr(true), + }, + {randxdr.FieldEquals("created.data.claimableBalance.amount"), randxdr.SetPositiveNum64}, + {randxdr.FieldEquals("created.data.claimableBalance.asset.alphaNum4.assetCode"), randxdr.SetAssetCode}, + {randxdr.FieldEquals("created.data.claimableBalance.asset.alphaNum12.assetCode"), randxdr.SetAssetCode}, + }, + ) + tt.Assert.NoError(gxdr.Convert(shape, &change)) + return change +} + +func TestStateVerifier(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + q := &history.Q{&db.Session{DB: tt.HorizonDB}} + + checkpointLedger := uint32(63) + changeProcessor := buildChangeProcessor(q, &ingest.StatsChangeProcessor{}, ledgerSource, checkpointLedger) + mockChangeReader := &ingest.MockChangeReader{} + + gen := randxdr.NewGenerator() + var changes []xdr.LedgerEntryChange + for i := 0; i < 100; i++ { + changes = append(changes, + genLiquidityPool(tt, gen), + genClaimableBalance(tt, gen), + genOffer(tt, gen), + genTrustLine(tt, gen), + genAccount(tt, gen), + genAccountData(tt, gen), + ) + } + for _, change := range ingest.GetChangesFromLedgerEntryChanges(changes) { + mockChangeReader.On("Read").Return(change, nil).Once() + tt.Assert.NoError(changeProcessor.ProcessChange(tt.Ctx, change)) + } + tt.Assert.NoError(changeProcessor.Commit(tt.Ctx)) + + q.UpdateLastLedgerIngest(tt.Ctx, checkpointLedger) + + mockChangeReader.On("Read").Return(ingest.Change{}, io.EOF).Twice() + mockChangeReader.On("Close").Return(nil).Once() + + mockHistoryAdapter := &mockHistoryArchiveAdapter{} + mockHistoryAdapter.On("GetState", tt.Ctx, uint32(checkpointLedger)).Return(mockChangeReader, nil).Once() + + sys := &system{ + ctx: tt.Ctx, + historyQ: q, + historyAdapter: mockHistoryAdapter, + checkpointManager: historyarchive.NewCheckpointManager(64), + } + sys.initMetrics() + + tt.Assert.NoError(sys.verifyState(false)) + mockChangeReader.AssertExpectations(t) + mockHistoryAdapter.AssertExpectations(t) +} diff --git a/services/horizon/internal/init.go b/services/horizon/internal/init.go new file mode 100644 index 0000000000..c37749492e --- /dev/null +++ b/services/horizon/internal/init.go @@ -0,0 +1,220 @@ +package horizon + +import ( + "context" + "net/http" + "runtime" + + "github.com/getsentry/raven-go" + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/exp/orderbook" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/simplepath" + "github.com/stellar/go/services/horizon/internal/txsub" + "github.com/stellar/go/services/horizon/internal/txsub/sequence" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" +) + +func mustNewDBSession(subservice db.Subservice, databaseURL string, maxIdle, maxOpen int, registry *prometheus.Registry) db.SessionInterface { + log.Infof("Establishing database session at %s for %v", databaseURL, subservice) + session, err := db.Open("postgres", databaseURL) + if err != nil { + log.Fatalf("cannot open %v DB: %v", subservice, err) + } + + session.DB.SetMaxIdleConns(maxIdle) + session.DB.SetMaxOpenConns(maxOpen) + return db.RegisterMetrics(session, "horizon", subservice, registry) +} + +func mustInitHorizonDB(app *App) { + log.Infof("Initializing database...") + + maxIdle := app.config.HorizonDBMaxIdleConnections + maxOpen := app.config.HorizonDBMaxOpenConnections + if app.config.Ingest { + maxIdle -= ingest.MaxDBConnections + maxOpen -= ingest.MaxDBConnections + if maxIdle <= 0 { + log.Fatalf("max idle connections to horizon db must be greater than %d", ingest.MaxDBConnections) + } + if maxOpen <= 0 { + log.Fatalf("max open connections to horizon db must be greater than %d", ingest.MaxDBConnections) + } + } + + if app.config.RoDatabaseURL == "" { + app.historyQ = &history.Q{mustNewDBSession( + db.HistorySubservice, + app.config.DatabaseURL, + maxIdle, + maxOpen, + app.prometheusRegistry, + )} + } else { + // If RO set, use it for all DB queries + app.historyQ = &history.Q{mustNewDBSession( + db.HistorySubservice, + app.config.RoDatabaseURL, + maxIdle, + maxOpen, + app.prometheusRegistry, + )} + + app.primaryHistoryQ = &history.Q{mustNewDBSession( + db.HistoryPrimarySubservice, + app.config.DatabaseURL, + maxIdle, + maxOpen, + app.prometheusRegistry, + )} + } +} + +func initIngester(app *App) { + var err error + var coreSession db.SessionInterface + if !app.config.EnableCaptiveCoreIngestion { + coreSession = mustNewDBSession( + db.CoreSubservice, app.config.StellarCoreDatabaseURL, ingest.MaxDBConnections, ingest.MaxDBConnections, app.prometheusRegistry) + } + app.ingester, err = ingest.NewSystem(ingest.Config{ + CoreSession: coreSession, + HistorySession: mustNewDBSession( + db.IngestSubservice, app.config.DatabaseURL, ingest.MaxDBConnections, ingest.MaxDBConnections, app.prometheusRegistry, + ), + NetworkPassphrase: app.config.NetworkPassphrase, + // TODO: + // Use the first archive for now. We don't have a mechanism to + // use multiple archives at the same time currently. + HistoryArchiveURL: app.config.HistoryArchiveURLs[0], + CheckpointFrequency: app.config.CheckpointFrequency, + StellarCoreURL: app.config.StellarCoreURL, + StellarCoreCursor: app.config.CursorName, + CaptiveCoreBinaryPath: app.config.CaptiveCoreBinaryPath, + CaptiveCoreStoragePath: app.config.CaptiveCoreStoragePath, + CaptiveCoreToml: app.config.CaptiveCoreToml, + RemoteCaptiveCoreURL: app.config.RemoteCaptiveCoreURL, + EnableCaptiveCore: app.config.EnableCaptiveCoreIngestion, + DisableStateVerification: app.config.IngestDisableStateVerification, + EnableExtendedLogLedgerStats: app.config.IngestEnableExtendedLogLedgerStats, + }) + + if err != nil { + log.Fatal(err) + } +} + +func initPathFinder(app *App) { + orderBookGraph := orderbook.NewOrderBookGraph() + app.orderBookStream = ingest.NewOrderBookStream( + &history.Q{app.HorizonSession()}, + orderBookGraph, + ) + + app.paths = simplepath.NewInMemoryFinder(orderBookGraph, !app.config.DisablePoolPathFinding) +} + +// initSentry initialized the default sentry client with the configured DSN +func initSentry(app *App) { + if app.config.SentryDSN == "" { + return + } + + log.WithField("dsn", app.config.SentryDSN).Info("Initializing sentry") + err := raven.SetDSN(app.config.SentryDSN) + if err != nil { + log.Fatal(err) + } +} + +// initLogglyLog attaches a loggly hook to our logging system. +func initLogglyLog(app *App) { + if app.config.LogglyToken == "" { + return + } + + log.WithFields(log.F{ + "token": app.config.LogglyToken, + "tag": app.config.LogglyTag, + }).Info("Initializing loggly hook") + + hook := log.NewLogglyHook(app.config.LogglyToken, app.config.LogglyTag) + log.DefaultLogger.AddHook(hook) + + go func() { + <-app.ctx.Done() + hook.Flush() + }() +} + +func initDbMetrics(app *App) { + app.buildInfoGauge = prometheus.NewGaugeVec( + prometheus.GaugeOpts{Namespace: "horizon", Subsystem: "build", Name: "info"}, + []string{"version", "goversion"}, + ) + app.prometheusRegistry.MustRegister(app.buildInfoGauge) + app.buildInfoGauge.With(prometheus.Labels{ + "version": app.horizonVersion, + "goversion": runtime.Version(), + }).Inc() + + app.ingestingGauge = prometheus.NewGauge( + prometheus.GaugeOpts{Namespace: "horizon", Subsystem: "ingest", Name: "enabled"}, + ) + app.prometheusRegistry.MustRegister(app.ingestingGauge) + + app.ledgerState.RegisterMetrics(app.prometheusRegistry) + + app.coreState.RegisterMetrics(app.prometheusRegistry) + + app.prometheusRegistry.MustRegister(app.orderBookStream.LatestLedgerGauge) +} + +// initGoMetrics registers the Go collector provided by prometheus package which +// includes Go-related metrics. +func initGoMetrics(app *App) { + app.prometheusRegistry.MustRegister(prometheus.NewGoCollector()) +} + +// initProcessMetrics registers the process collector provided by prometheus +// package. This is only available on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. +func initProcessMetrics(app *App) { + app.prometheusRegistry.MustRegister( + prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), + ) +} + +// initIngestMetrics registers the metrics for the ingestion into the provided +// app's metrics registry. +func initIngestMetrics(app *App) { + if app.ingester == nil { + return + } + + app.ingestingGauge.Inc() + app.ingester.RegisterMetrics(app.prometheusRegistry) +} + +func initTxSubMetrics(app *App) { + app.submitter.Init() + app.submitter.RegisterMetrics(app.prometheusRegistry) +} + +func initWebMetrics(app *App) { + app.webServer.RegisterMetrics(app.prometheusRegistry) +} + +func initSubmissionSystem(app *App) { + app.submitter = &txsub.System{ + Pending: txsub.NewDefaultSubmissionList(), + Submitter: txsub.NewDefaultSubmitter(http.DefaultClient, app.config.StellarCoreURL), + SubmissionQueue: sequence.NewManager(), + DB: func(ctx context.Context) txsub.HorizonDB { + return &history.Q{SessionInterface: app.HorizonSession()} + }, + } +} diff --git a/services/horizon/internal/integration/claimable_balance_ops_test.go b/services/horizon/internal/integration/claimable_balance_ops_test.go new file mode 100644 index 0000000000..867df6072c --- /dev/null +++ b/services/horizon/internal/integration/claimable_balance_ops_test.go @@ -0,0 +1,128 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + sdk "github.com/stellar/go/clients/horizonclient" + hEffects "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" +) + +func TestClaimableBalanceCreationOperationsAndEffects(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + t.Run("Successful", func(t *testing.T) { + op := txnbuild.CreateClaimableBalance{ + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(master.Address(), nil), + }, + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + txResp, err := itest.SubmitOperations(itest.MasterAccount(), master, &op) + tt.NoError(err) + + var txResult xdr.TransactionResult + err = xdr.SafeUnmarshalBase64(txResp.ResultXdr, &txResult) + tt.NoError(err) + tt.Equal(xdr.TransactionResultCodeTxSuccess, txResult.Result.Code) + + opResults, _ := txResult.OperationResults() + tt.Len(opResults, 1) + claimCreationOp := opResults[0].MustTr().CreateClaimableBalanceResult + tt.Equal(xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess, claimCreationOp.Code) + expectedBalanceID, err := xdr.MarshalHex(claimCreationOp.BalanceId) + tt.NoError(err) + + response, err := itest.Client().Operations(sdk.OperationRequest{}) + ops := response.Embedded.Records + tt.NoError(err) + tt.Len(ops, 1) + cb := ops[0].(operations.CreateClaimableBalance) + tt.Equal("native", cb.Asset) + tt.Equal("10.0000000", cb.Amount) + tt.Equal(master.Address(), cb.SourceAccount) + tt.Len(cb.Claimants, 1) + + claimant := cb.Claimants[0] + tt.Equal(master.Address(), claimant.Destination) + tt.Equal(xdr.ClaimPredicateTypeClaimPredicateUnconditional, claimant.Predicate.Type) + + eResponse, err := itest.Client().Effects(sdk.EffectRequest{ForOperation: cb.ID}) + effects := eResponse.Embedded.Records + tt.Len(effects, 4) + + claimableBalanceCreatedEffect := effects[0].(hEffects.ClaimableBalanceCreated) + tt.Equal("claimable_balance_created", claimableBalanceCreatedEffect.Type) + tt.Equal("10.0000000", claimableBalanceCreatedEffect.Amount) + tt.Equal("native", claimableBalanceCreatedEffect.Asset) + tt.Equal(expectedBalanceID, claimableBalanceCreatedEffect.BalanceID) + tt.Equal(master.Address(), claimableBalanceCreatedEffect.Account) + + claimableBalanceClaimantCreatedEffect := effects[1].(hEffects.ClaimableBalanceClaimantCreated) + tt.Equal("claimable_balance_claimant_created", claimableBalanceClaimantCreatedEffect.Type) + tt.Equal(master.Address(), claimableBalanceClaimantCreatedEffect.Account) + tt.Equal(expectedBalanceID, claimableBalanceClaimantCreatedEffect.BalanceID) + tt.Equal("10.0000000", claimableBalanceClaimantCreatedEffect.Amount) + tt.Equal("native", claimableBalanceClaimantCreatedEffect.Asset) + tt.Equal( + xdr.ClaimPredicateTypeClaimPredicateUnconditional, + claimableBalanceClaimantCreatedEffect.Predicate.Type, + ) + + accountDebitedEffect := effects[2].(hEffects.AccountDebited) + tt.Equal("10.0000000", accountDebitedEffect.Amount) + tt.Equal("native", accountDebitedEffect.Asset.Type) + tt.Equal(master.Address(), accountDebitedEffect.Account) + + claimableBalanceSponsorshipCreated := effects[3].(hEffects.ClaimableBalanceSponsorshipCreated) + tt.Equal("claimable_balance_sponsorship_created", claimableBalanceSponsorshipCreated.Type) + tt.Equal(master.Address(), claimableBalanceSponsorshipCreated.Sponsor) + tt.Equal(master.Address(), claimableBalanceSponsorshipCreated.Account) + tt.Equal(expectedBalanceID, claimableBalanceSponsorshipCreated.BalanceID) + }) + + t.Run("Invalid", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(2, "50") + op := txnbuild.CreateClaimableBalance{ + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(master.Address(), nil), + txnbuild.NewClaimant(keys[1].Address(), nil), + }, + Amount: "100", + Asset: txnbuild.NativeAsset{}, + } + + // this operation will fail because the claimable balance is trying to + // reserve 100 XLM but the account only has 50. + _, err := itest.SubmitOperations(accounts[0], keys[0], &op) + tt.Error(err) + + response, err := itest.Client().Operations(sdk.OperationRequest{ + Order: "desc", + Limit: 1, + IncludeFailed: true, + }) + ops := response.Embedded.Records + tt.NoError(err) + tt.Len(ops, 1) + cb := ops[0].(operations.CreateClaimableBalance) + tt.False(cb.TransactionSuccessful) + tt.Equal("native", cb.Asset) + tt.Equal("100.0000000", cb.Amount) + tt.Equal(keys[0].Address(), cb.SourceAccount) + tt.Len(cb.Claimants, 2) + + eResponse, err := itest.Client().Effects(sdk.EffectRequest{ForOperation: cb.ID}) + effects := eResponse.Embedded.Records + tt.Len(effects, 0) + }) +} diff --git a/services/horizon/internal/integration/claimable_balance_test.go b/services/horizon/internal/integration/claimable_balance_test.go new file mode 100644 index 0000000000..84bb8be04d --- /dev/null +++ b/services/horizon/internal/integration/claimable_balance_test.go @@ -0,0 +1,608 @@ +package integration + +import ( + "fmt" + "strconv" + "testing" + "time" + + sdk "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + proto "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/codes" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestClaimableBalanceBasics(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + // Ensure predicting claimable balances works. + t.Run("BalanceIDs", func(t *testing.T) { + tx, err := itest.CreateSignedTransaction( + itest.MasterAccount(), + []*keypair.Full{master}, + &txnbuild.CreateClaimableBalance{ + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(master.Address(), nil), + }, + Asset: txnbuild.NativeAsset{}, + Amount: "42", + }, + &txnbuild.CreateClaimableBalance{ + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(master.Address(), nil), + }, + Asset: txnbuild.NativeAsset{}, + Amount: "24", + }) + tt.NoError(err) + + id1, err := tx.ClaimableBalanceID(0) + tt.NoError(err) + id2, err := tx.ClaimableBalanceID(1) + tt.NoError(err) + predictions := []string{id1, id2} + + var txResult xdr.TransactionResult + txResp, err := itest.Client().SubmitTransaction(tx) + tt.NoError(err) + xdr.SafeUnmarshalBase64(txResp.ResultXdr, &txResult) + opResults, ok := txResult.OperationResults() + tt.True(ok) + tt.Len(opResults, len(predictions)) + + for i, predictedId := range predictions { + claimCreationOp := opResults[i].MustTr().CreateClaimableBalanceResult + calculatedId, err := xdr.MarshalHex(claimCreationOp.BalanceId) + tt.NoError(err) + tt.Equal(calculatedId, predictedId) + + helperCalculatedId, err := txResult.ExtractBalanceIDHex(i) + tt.NoError(err) + tt.Equal(calculatedId, helperCalculatedId) + } + }) +} + +func TestHappyClaimableBalances(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + master, client := itest.Master(), itest.Client() + + keys, accounts := itest.CreateAccounts(3, "1000") + a, b, c := keys[0], keys[1], keys[2] + accountA, accountB, accountC := accounts[0], accounts[1], accounts[2] + + // + // Each sub-test is completely self-contained: at the end of the test, we + // start with a clean slate for each account. This lets us check with + // equality for things like "number of operations," etc. + // + + // We start simple: native asset, single destination, no predicate. + t.Run("Simple/Native", func(t *testing.T) { + // Note that we don't use the `itest.MustCreateClaimableBalance` helper + // here because the whole point is to check that ^ generally works. + t.Logf("Creating claimable balance.") + _, err := itest.SubmitOperations(accountA, a, + &txnbuild.CreateClaimableBalance{ + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(b.Address(), nil), + txnbuild.NewClaimant(c.Address(), nil), + }, + Asset: txnbuild.NativeAsset{}, + Amount: "42", + }, + ) + assert.NoError(t, err) + + // + // Ensure it shows up with the various filters (and *doesn't* show up with + // non-matching filters, of course). + // + t.Log("Checking claimable balance filters") + + // Ensure it exists in the global list + t.Log(" global") + balances, err := client.ClaimableBalances(sdk.ClaimableBalanceRequest{}) + assert.NoError(t, err) + + claims := balances.Embedded.Records + assert.Len(t, claims, 1) + assert.Equal(t, a.Address(), claims[0].Sponsor) + claim := claims[0] + + // Ensure we can look it up explicitly + t.Log(" by ID") + balance, err := client.ClaimableBalance(claim.BalanceID) + assert.NoError(t, err) + assert.Equal(t, claim, balance) + + checkFilters(itest, claim) + + for _, assetType := range []txnbuild.AssetType{ + txnbuild.AssetTypeCreditAlphanum4, + txnbuild.AssetTypeCreditAlphanum12, + } { + t.Logf(" by non-native %+v", assetType) + randomAsset := createAsset(assetType, a.Address()) + xdrAsset, innerErr := randomAsset.ToXDR() + assert.NoError(t, innerErr) + + balances, innerErr = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Asset: xdrAsset.StringCanonical()}) + assert.NoError(t, innerErr) + assert.Len(t, balances.Embedded.Records, 0) + } + + // check that its operations and transactions can be obtained + transactionsResp, err := client.Transactions(sdk.TransactionRequest{ + ForClaimableBalance: claim.BalanceID, + }) + assert.NoError(t, err) + assert.Len(t, transactionsResp.Embedded.Records, 1) + + operationsResp, err := client.Operations(sdk.OperationRequest{ + ForClaimableBalance: claim.BalanceID, + }) + assert.NoError(t, err) + if assert.Len(t, operationsResp.Embedded.Records, 1) { + assert.IsType(t, operationsResp.Embedded.Records[0], operations.CreateClaimableBalance{}) + } + + // + // Now, actually try to *claim* the CB to remove it from the global list. + // + + // Claiming a balance when you aren't the recipient should fail... + t.Logf("Stealing balance (ID=%s)...", claim.BalanceID) + _, err = itest.SubmitOperations(accountA, a, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + t.Log(" failed as expected") + + // ...but if you are it should succeed. + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err = itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.NoError(t, err) + t.Log(" claimed") + + // Ensure the claimable balance is gone now ... + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Sponsor: a.Address()}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + t.Log(" gone") + + // ... but that its operations and transactions can still be obtained + transactionsResp, err = client.Transactions(sdk.TransactionRequest{ + ForClaimableBalance: claim.BalanceID, + }) + assert.NoError(t, err) + assert.Len(t, transactionsResp.Embedded.Records, 2) + + operationsResp, err = client.Operations(sdk.OperationRequest{ + ForClaimableBalance: claim.BalanceID, + }) + assert.NoError(t, err) + if assert.Len(t, operationsResp.Embedded.Records, 2) { + assert.IsType(t, operationsResp.Embedded.Records[0], operations.CreateClaimableBalance{}) + assert.IsType(t, operationsResp.Embedded.Records[1], operations.ClaimClaimableBalance{}) + } + + // Ensure the actual account has a higher balance, now! + request := sdk.AccountRequest{AccountID: b.Address()} + details, err := client.AccountDetail(request) + assert.NoError(t, err) + + foundBalance := false + for _, balance := range details.Balances { + if balance.Code != "" { + continue + } + + assert.Equal(t, "1041.9999900", balance.Balance) // 1000 + 42 - fee + foundBalance = true + break + } + assert.True(t, foundBalance) + + // Ensure that the other claimant can't do anything about it! + t.Log(" other claimant can't claim") + _, err = itest.SubmitOperations(accountC, c, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + }) + + // Now, confirm the same thing works for non-native assets. + for _, assetType := range []txnbuild.AssetType{ + txnbuild.AssetTypeCreditAlphanum4, + txnbuild.AssetTypeCreditAlphanum12, + } { + t.Run(fmt.Sprintf("Simple/%+v", assetType), func(t *testing.T) { + asset := createAsset(assetType, a.Address()) + itest.MustEstablishTrustline(b, accountB, asset) + + t.Log("Creating claimable balance.") + claim := itest.MustCreateClaimableBalance(a, asset, "42", + txnbuild.NewClaimant(b.Address(), nil)) + accountA.IncrementSequenceNumber() + + // + // Ensure it shows up with the various filters (and *doesn't* show + // up with non-matching filters, of course). + // + t.Log("Checking claimable balance filters") + + // Ensure we can look it up explicitly + t.Log(" by ID") + balance, err := client.ClaimableBalance(claim.BalanceID) + assert.NoError(t, err) + assert.Equal(t, claim, balance) + + checkFilters(itest, claim) + + t.Logf(" by native") + xdrAsset, err := txnbuild.NativeAsset{}.ToXDR() + balances, err := client.ClaimableBalances(sdk.ClaimableBalanceRequest{Asset: xdrAsset.StringCanonical()}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + + // Even if the native asset filter doesn't match, we need to ensure + // that a different credit asset also doesn't match. + t.Logf(" by random asset") + xdrAsset, err = txnbuild.CreditAsset{Code: "RAND", Issuer: master.Address()}.ToXDR() + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Asset: xdrAsset.StringCanonical()}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + + // + // Now, actually try to *claim* the CB to remove it from the global list. + // + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err = itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.NoError(t, err) + t.Log(" claimed") + + // Ensure the claimable balance is gone now. + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Sponsor: a.Address()}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + t.Log(" gone") + + // Ensure the actual account has a higher balance, now! + account := itest.MustGetAccount(b) + foundBalance := false + for _, balance := range account.Balances { + if balance.Code != asset.GetCode() || balance.Issuer != asset.GetIssuer() { + continue + } + + assert.Equal(t, "42.0000000", balance.Balance) + foundBalance = true + break + } + assert.True(t, foundBalance) + }) + } +} + +// We want to ensure that users can't claim the same claimable balance twice. +func TestDoubleClaim(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + client := itest.Client() + + // Create a couple of accounts to test the interactions. + keys, accounts := itest.CreateAccounts(2, "1000") + a, b := keys[0], keys[1] + _, accountB := accounts[0], accounts[1] + + notExistResult, _ := codes.String(xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceDoesNotExist) + + // Two cases: claim in separate TXs, claim twice in same TX + t.Run("TwoTx", func(t *testing.T) { + claim := itest.MustCreateClaimableBalance( + a, txnbuild.NativeAsset{}, "42", + txnbuild.NewClaimant(b.Address(), nil)) + + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err := itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.NoError(t, err) + t.Log(" claimed") + + _, err = itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + t.Log(" couldn't claim twice") + + assert.Equal(t, notExistResult, getOperationsError(err)) + }) + + t.Run("SameTx", func(t *testing.T) { + claim := itest.MustCreateClaimableBalance( + a, txnbuild.NativeAsset{}, "42", + txnbuild.NewClaimant(b.Address(), nil)) + + // One succeeds, other fails + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err := itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + t.Log(" couldn't claim twice") + + assert.Equal(t, codes.OpSuccess, getOperationsErrorByIndex(err, 0)) + assert.Equal(t, notExistResult, getOperationsErrorByIndex(err, 1)) + + // Both included in /operations + response, err := client.Operations(sdk.OperationRequest{ + ForAccount: b.Address(), + Order: "desc", + Limit: 2, + IncludeFailed: true, + }) + ops := response.Embedded.Records + assert.NoError(t, err) + assert.Len(t, ops, 2) + }) +} + +func TestClaimableBalancePredicates(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + _, client := itest.Master(), itest.Client() + + // Create a couple of accounts to test the interactions. + keys, accounts := itest.CreateAccounts(3, "1000") + a, b, c := keys[0], keys[1], keys[2] + accountA, accountB, accountC := accounts[0], accounts[1], accounts[2] + + t.Run("Predicates", func(t *testing.T) { + now := time.Now().Unix() + minute := int64(60 * 60) + + // + // We create a series of claims, all claimable by the same account, with + // a variety of predicates, all of which should succeed with no issue. + // + predicates := []xdr.ClaimPredicate{ + txnbuild.UnconditionalPredicate, + txnbuild.BeforeAbsoluteTimePredicate(now + minute), // full minute to claim + txnbuild.BeforeRelativeTimePredicate(minute), + txnbuild.AndPredicate( + txnbuild.BeforeAbsoluteTimePredicate(now+minute), + txnbuild.BeforeRelativeTimePredicate(minute), + ), + txnbuild.OrPredicate( + txnbuild.BeforeAbsoluteTimePredicate(now+minute), + txnbuild.BeforeRelativeTimePredicate(minute), + ), + } + + t.Logf("Creating claims...") + createClaimOps := make([]txnbuild.Operation, len(predicates)) + for i, predicate := range predicates { + amount := (i + 1) * 10 // diff for uniqueness + claimant := txnbuild.NewClaimant(c.Address(), &predicates[i]) + t.Logf(" amount: %d, predicate: %+v", amount, predicate.Type) + + createClaimOps[i] = &txnbuild.CreateClaimableBalance{ + SourceAccount: accountA.GetAccountID(), + Destinations: []txnbuild.Claimant{claimant}, + Amount: fmt.Sprintf("%d.0000000", amount), + Asset: txnbuild.NativeAsset{}, + } + } + + var txResult xdr.TransactionResult + txResp, err := itest.SubmitOperations(accountA, a, createClaimOps...) + itest.LogFailedTx(txResp, err) + xdr.SafeUnmarshalBase64(txResp.ResultXdr, &txResult) + opResults, _ := txResult.OperationResults() + + // Ensure all of the operations succeeded, and also get balance IDs. + balanceIds := make([]string, len(predicates)) + t.Logf("Verifying operation success...") + for i, result := range opResults { + t.Logf(" predicate: %+v", predicates[i].Type) + assert.Equal(t, + xdr.CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess, + result.MustTr().CreateClaimableBalanceResult.Code) + + balanceId, innerErr := xdr.MarshalHex(result.MustTr().MustCreateClaimableBalanceResult().BalanceId) + assert.NoError(t, innerErr) + assert.Equal(t, uint8('0'), balanceId[0]) // check discriminant + balanceIds[i] = balanceId + } + + // Ensure the global list is accurate. + balances, err := client.ClaimableBalances( + sdk.ClaimableBalanceRequest{Claimant: c.Address()}) + claims := balances.Embedded.Records + assert.Len(t, claims, len(predicates)) + + for i, balanceId := range balanceIds { + claim, innerErr := client.ClaimableBalance(balanceId) + assert.NoError(t, innerErr) + + assert.Equal(t, "native", claim.Asset) + assert.Equal(t, fmt.Sprintf("%d.0000000", (i+1)*10), claim.Amount) + assert.Equal(t, a.Address(), claim.Sponsor) + + assert.Len(t, claim.Claimants, 1) + claimant := claim.Claimants[0] + + assert.Equal(t, c.Address(), claimant.Destination) + + // Ensure that RelTime() predicates turn into AbsTime() + expectedType := predicates[i].Type + if expectedType == xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime { + expectedType = xdr.ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime + } + assert.Equal(t, expectedType, claimant.Predicate.Type) + } + + t.Logf("Verifying that the balance can be claimed...") + claimOps := make([]txnbuild.Operation, len(claims)) + for i, predicate := range predicates { + id := claims[i].BalanceID + t.Logf(" predicate: %+v", predicate.Type) + t.Logf(" id: %s", id) + + claimOps[i] = &txnbuild.ClaimClaimableBalance{BalanceID: id} + } + + _, err = itest.SubmitOperations(accountC, c, claimOps...) + assert.NoError(t, err) + + // Ensure the global list is empty now. + balances, err = client.ClaimableBalances( + sdk.ClaimableBalanceRequest{Claimant: b.Address()}) + claims = balances.Embedded.Records + assert.Len(t, claims, 0) + t.Log(" all claimed") + + // Ensure balance got updated due to all claims. + account := itest.MustGetAccount(c) + expectedBalance := 0 + for i := range predicates { + expectedBalance += (i + 1) * 10 + } + actualBalance, _ := strconv.ParseFloat(account.Balances[0].Balance, 64) + assert.EqualValues(t, 1000+expectedBalance-1, int(actualBalance)) + t.Log("Balance updated correctly.") + }) + + // reused a lot: + cantClaimResult, _ := codes.String( + xdr.ClaimClaimableBalanceResultCodeClaimClaimableBalanceCannotClaim) + + // This is an easy fail. + predicate := txnbuild.NotPredicate(txnbuild.UnconditionalPredicate) + t.Run("AlwaysFail", func(t *testing.T) { + t.Logf("Creating claimable balance (asset=native).") + t.Logf(" predicate: %+v", predicate.Type) + + claim := itest.MustCreateClaimableBalance( + a, txnbuild.NativeAsset{}, "42", + txnbuild.NewClaimant(b.Address(), &predicate)) + + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err := itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + + // Ensure it failed w/ the right error code: + // CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM + assert.Equal(t, cantClaimResult, getOperationsError(err)) + t.Logf(" tx did fail w/ %s", cantClaimResult) + + // check that /operations also has the claim as failed + response, err := client.Operations(sdk.OperationRequest{ + Order: "desc", + Limit: 1, + IncludeFailed: true, + }) + ops := response.Embedded.Records + assert.NoError(t, err) + assert.Len(t, ops, 1) + + cb := ops[0].(operations.ClaimClaimableBalance) + assert.False(t, cb.TransactionSuccessful) + assert.Equal(t, claim.BalanceID, cb.BalanceID) + assert.Equal(t, b.Address(), cb.Claimant) + t.Log(" op did fail") + }) + + // This one fails because of an expiring claim. + predicate = txnbuild.BeforeRelativeTimePredicate(1) + t.Run("Expire", func(t *testing.T) { + t.Log("Creating claimable balance (asset=native).") + t.Logf(" predicate: %+v", predicate.Type) + + claim := itest.MustCreateClaimableBalance( + a, txnbuild.NativeAsset{}, "42", + txnbuild.NewClaimant(b.Address(), &predicate)) + + oneSec, err := time.ParseDuration("1s") + time.Sleep(oneSec) + + t.Logf("Claiming balance (ID=%s)...", claim.BalanceID) + _, err = itest.SubmitOperations(accountB, b, + &txnbuild.ClaimClaimableBalance{BalanceID: claim.BalanceID}) + assert.Error(t, err) + + assert.Equal(t, cantClaimResult, getOperationsError(err)) + t.Logf(" tx did fail w/ %s", cantClaimResult) + }) +} + +/* Utility functions below */ + +// Extracts the first error string in the "operations: [...]" of a Problem. +func getOperationsError(err error) string { + return getOperationsErrorByIndex(err, 0) +} + +func getOperationsErrorByIndex(err error, i int) string { + resultCodes := sdk.GetError(err).Problem.Extras["result_codes"].(map[string]interface{}) + opResultCodes := resultCodes["operations"].([]interface{}) + return opResultCodes[i].(string) +} + +// Checks that filtering works for a particular claim. +func checkFilters(i *integration.Test, claim proto.ClaimableBalance) { + client := i.Client() + t := i.CurrentTest() + + source := claim.Sponsor + asset := claim.Asset + + t.Log(" by sponsor") + balances, err := client.ClaimableBalances(sdk.ClaimableBalanceRequest{Sponsor: source}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 1) + assert.Equal(t, claim, balances.Embedded.Records[0]) + + dest := claim.Claimants[0].Destination + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Sponsor: dest}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + + t.Log(" by claimant(s)") + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Claimant: source}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 0) + + for _, claimant := range claim.Claimants { + dest := claimant.Destination + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Claimant: dest}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 1) + assert.Equal(t, claim, balances.Embedded.Records[0]) + } + + t.Log(" by exact asset") + balances, err = client.ClaimableBalances(sdk.ClaimableBalanceRequest{Asset: asset}) + assert.NoError(t, err) + assert.Len(t, balances.Embedded.Records, 1) +} + +// Creates an asset object given a type and issuer. +func createAsset(assetType txnbuild.AssetType, issuer string) txnbuild.Asset { + switch assetType { + case txnbuild.AssetTypeNative: + return txnbuild.NativeAsset{} + case txnbuild.AssetTypeCreditAlphanum4: + return txnbuild.CreditAsset{Code: "HEYO", Issuer: issuer} + case txnbuild.AssetTypeCreditAlphanum12: + return txnbuild.CreditAsset{Code: "HEYYYAAAAAAA", Issuer: issuer} + default: + panic(-1) + } +} diff --git a/services/horizon/internal/integration/clawback_test.go b/services/horizon/internal/integration/clawback_test.go new file mode 100644 index 0000000000..c41d2af465 --- /dev/null +++ b/services/horizon/internal/integration/clawback_test.go @@ -0,0 +1,492 @@ +package integration + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/codes" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" +) + +func TestHappyClawbackAccount(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + asset, fromKey, _ := setupClawbackAccountTest(tt, itest, master) + + // Clawback all of the asset + submissionResp := itest.MustSubmitOperations(itest.MasterAccount(), master, &txnbuild.Clawback{ + From: fromKey.Address(), + Amount: "10", + Asset: asset, + }) + + assertClawbackAccountSuccess(tt, itest, master, fromKey, "0.0000000", submissionResp) + + // Check the operation details + opDetailsResponse, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: submissionResp.Hash, + }) + tt.NoError(err) + if tt.Len(opDetailsResponse.Embedded.Records, 1) { + clawbackOp := opDetailsResponse.Embedded.Records[0].(operations.Clawback) + tt.Equal("PTS", clawbackOp.Code) + tt.Equal(fromKey.Address(), clawbackOp.From) + tt.Equal("10.0000000", clawbackOp.Amount) + } + + // Check the operation effects + effectsResponse, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForTransaction: submissionResp.Hash, + }) + tt.NoError(err) + + if tt.Len(effectsResponse.Embedded.Records, 2) { + accountCredited := effectsResponse.Embedded.Records[0].(effects.AccountCredited) + tt.Equal(master.Address(), accountCredited.Account) + tt.Equal("10.0000000", accountCredited.Amount) + tt.Equal(master.Address(), accountCredited.Issuer) + tt.Equal("PTS", accountCredited.Code) + accountDebited := effectsResponse.Embedded.Records[1].(effects.AccountDebited) + tt.Equal(fromKey.Address(), accountDebited.Account) + tt.Equal("10.0000000", accountDebited.Amount) + tt.Equal(master.Address(), accountDebited.Issuer) + tt.Equal("PTS", accountDebited.Code) + } +} + +func TestHappyClawbackAccountPartial(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + asset, fromKey, _ := setupClawbackAccountTest(tt, itest, master) + + // Partial clawback of the asset + submissionResp := itest.MustSubmitOperations(itest.MasterAccount(), master, &txnbuild.Clawback{ + From: fromKey.Address(), + Amount: "5", + Asset: asset, + }) + + assertClawbackAccountSuccess(tt, itest, master, fromKey, "5.0000000", submissionResp) +} + +func TestHappyClawbackAccountSellingLiabilities(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + asset, fromKey, fromAccount := setupClawbackAccountTest(tt, itest, master) + + // Add a selling liability + submissionResp := itest.MustSubmitOperations(fromAccount, fromKey, &txnbuild.ManageSellOffer{ + Buying: txnbuild.NativeAsset{}, + Selling: asset, + Amount: "5", + Price: xdr.Price{1, 1}, + SourceAccount: fromAccount.GetAccountID(), + }) + tt.True(submissionResp.Successful) + + // Full clawback of the asset, with a deauthorize/reauthorize sandwich + submissionResp = itest.MustSubmitOperations(itest.MasterAccount(), master, + &txnbuild.SetTrustLineFlags{ + Trustor: fromAccount.GetAccountID(), + Asset: asset, + ClearFlags: []txnbuild.TrustLineFlag{txnbuild.TrustLineAuthorized}, + }, + &txnbuild.Clawback{ + From: fromKey.Address(), + Amount: "10", + Asset: asset, + }, + &txnbuild.SetTrustLineFlags{ + Trustor: fromAccount.GetAccountID(), + Asset: asset, + SetFlags: []txnbuild.TrustLineFlag{txnbuild.TrustLineAuthorized}, + }, + ) + + assertClawbackAccountSuccess(tt, itest, master, fromKey, "0.0000000", submissionResp) +} + +func TestSadClawbackAccountInsufficientFunds(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + asset, fromKey, _ := setupClawbackAccountTest(tt, itest, master) + // Attempt to clawback more than the account holds. + submissionResp, err := itest.SubmitOperations(itest.MasterAccount(), master, &txnbuild.Clawback{ + From: fromKey.Address(), + Amount: "20", + Asset: asset, + }) + tt.Error(err) + assertClawbackAccountFailed(tt, itest, master, fromKey, submissionResp) +} + +func TestSadClawbackAccountSufficientFundsSellingLiabilities(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + asset, fromKey, fromAccount := setupClawbackAccountTest(tt, itest, master) + + // Add a selling liability + submissionResp := itest.MustSubmitOperations(fromAccount, fromKey, &txnbuild.ManageSellOffer{ + Buying: txnbuild.NativeAsset{}, + Selling: asset, + Amount: "5", + Price: xdr.Price{1, 1}, + SourceAccount: fromAccount.GetAccountID(), + }) + + // Attempt to clawback more than is available. + submissionResp, err := itest.SubmitOperations(itest.MasterAccount(), master, &txnbuild.Clawback{ + From: fromKey.Address(), + Amount: "10", + Asset: asset, + }) + tt.Error(err) + + assertClawbackAccountFailed(tt, itest, master, fromKey, submissionResp) +} + +func setupClawbackAccountTest(tt *assert.Assertions, itest *integration.Test, master *keypair.Full) (txnbuild.CreditAsset, *keypair.Full, txnbuild.Account) { + // Give the master account the revocable flag (needed to set the clawback flag) + setRevocableFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRevocable, + }, + } + + itest.MustSubmitOperations(itest.MasterAccount(), master, &setRevocableFlag) + + // Give the master account the clawback flag + setClawBackFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthClawbackEnabled, + }, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &setClawBackFlag) + + // Make sure the clawback flag was set + accountDetails := itest.MustGetAccount(master) + tt.True(accountDetails.Flags.AuthClawbackEnabled) + + // Create another account from which to claw an asset back + keyPairs, accounts := itest.CreateAccounts(1, "100") + accountKeyPair := keyPairs[0] + account := accounts[0] + + // Add some assets to the account with asset which allows clawback + + // Time machine to Spain before Euros were a thing + pesetasAsset := txnbuild.CreditAsset{Code: "PTS", Issuer: master.Address()} + itest.MustEstablishTrustline(accountKeyPair, account, pesetasAsset) + pesetasPayment := txnbuild.Payment{ + Destination: accountKeyPair.Address(), + Amount: "10", + Asset: pesetasAsset, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &pesetasPayment) + + accountDetails = itest.MustGetAccount(accountKeyPair) + if tt.Len(accountDetails.Balances, 2) { + pts := accountDetails.Balances[0] + tt.Equal("PTS", pts.Code) + if tt.NotNil(pts.IsClawbackEnabled) { + tt.True(*pts.IsClawbackEnabled) + } + tt.Equal("10.0000000", pts.Balance) + } + + return pesetasAsset, accountKeyPair, account +} + +func assertClawbackAccountSuccess(tt *assert.Assertions, itest *integration.Test, master, accountKeyPair *keypair.Full, expectedBalance string, submissionResp protocol.Transaction) { + tt.True(submissionResp.Successful) + assertAccountBalance(tt, itest, accountKeyPair, expectedBalance) +} + +func assertClawbackAccountFailed(tt *assert.Assertions, itest *integration.Test, master, accountKeyPair *keypair.Full, submissionResp protocol.Transaction) { + tt.False(submissionResp.Successful) + assertAccountBalance(tt, itest, accountKeyPair, "10.0000000") +} + +func assertAccountBalance(tt *assert.Assertions, itest *integration.Test, accountKeyPair *keypair.Full, expectedBalance string) { + accountDetails := itest.MustGetAccount(accountKeyPair) + if tt.Len(accountDetails.Balances, 2) { + pts := accountDetails.Balances[0] + tt.Equal("PTS", pts.Code) + if tt.NotNil(pts.IsClawbackEnabled) { + tt.True(*pts.IsClawbackEnabled) + } + tt.Equal(expectedBalance, pts.Balance) + } +} + +func TestHappyClawbackClaimableBalance(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + // Give the master account the revocable flag (needed to set the clawback flag) + setRevocableFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRevocable, + }, + } + + itest.MustSubmitOperations(itest.MasterAccount(), master, &setRevocableFlag) + + // Give the master account the clawback flag + setClawBackFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthClawbackEnabled, + }, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &setClawBackFlag) + + // Make sure the clawback flag was set + accountDetails := itest.MustGetAccount(master) + tt.True(accountDetails.Flags.AuthClawbackEnabled) + + // Create another account as a claimable balance claimant + keyPairs, accounts := itest.CreateAccounts(1, "100") + accountKeyPair := keyPairs[0] + account := accounts[0] + + // Time machine to Spain before Euros were a thing + pesetasAsset := txnbuild.CreditAsset{Code: "PTS", Issuer: master.Address()} + itest.MustEstablishTrustline(accountKeyPair, account, pesetasAsset) + + // Make a claimable balance from the master account (and asset issuer) to the account with an asset which allows clawback + pesetasCreateCB := txnbuild.CreateClaimableBalance{ + Amount: "10", + Asset: pesetasAsset, + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(accountKeyPair.Address(), nil), + }, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &pesetasCreateCB) + + // Check that the claimable balance was created, clawback is enabled and obtain the id to claw it back later on + listCBResp, err := itest.Client().ClaimableBalances(horizonclient.ClaimableBalanceRequest{ + Claimant: accountKeyPair.Address(), + }) + tt.NoError(err) + cbID := "" + if tt.Len(listCBResp.Embedded.Records, 1) { + cb := listCBResp.Embedded.Records[0] + tt.True(cb.Flags.ClawbackEnabled) + cbID = cb.BalanceID + tt.Equal(master.Address(), cb.Sponsor) + } + + // check that its operations and transactions can be obtained + transactionsResp, err := itest.Client().Transactions(horizonclient.TransactionRequest{ + ForClaimableBalance: cbID, + }) + assert.NoError(t, err) + assert.Len(t, transactionsResp.Embedded.Records, 1) + + operationsResp, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForClaimableBalance: cbID, + }) + assert.NoError(t, err) + if assert.Len(t, operationsResp.Embedded.Records, 1) { + assert.IsType(t, operationsResp.Embedded.Records[0], operations.CreateClaimableBalance{}) + } + + // Clawback the claimable balance + pesetasClawbackCB := txnbuild.ClawbackClaimableBalance{ + BalanceID: cbID, + } + clawbackCBResp := itest.MustSubmitOperations(itest.MasterAccount(), master, &pesetasClawbackCB) + + // Make sure the claimable balance is clawed back (gone) + _, err = itest.Client().ClaimableBalance(cbID) + // Not found + tt.Error(err) + + // check that its operations and transactions can still be obtained + transactionsResp, err = itest.Client().Transactions(horizonclient.TransactionRequest{ + ForClaimableBalance: cbID, + }) + assert.NoError(t, err) + assert.Len(t, transactionsResp.Embedded.Records, 2) + + operationsResp, err = itest.Client().Operations(horizonclient.OperationRequest{ + ForClaimableBalance: cbID, + }) + assert.NoError(t, err) + if assert.Len(t, operationsResp.Embedded.Records, 2) { + assert.IsType(t, operationsResp.Embedded.Records[0], operations.CreateClaimableBalance{}) + assert.IsType(t, operationsResp.Embedded.Records[1], operations.ClawbackClaimableBalance{}) + } + + // Check the operation details + opDetailsResponse, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: clawbackCBResp.Hash, + }) + tt.NoError(err) + if tt.Len(opDetailsResponse.Embedded.Records, 1) { + clawbackOp := opDetailsResponse.Embedded.Records[0].(operations.ClawbackClaimableBalance) + tt.Equal(cbID, clawbackOp.BalanceID) + } + + // Check the operation effects + effectsResponse, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForTransaction: clawbackCBResp.Hash, + }) + tt.NoError(err) + + if tt.Len(effectsResponse.Embedded.Records, 3) { + claimableBalanceClawedBack := effectsResponse.Embedded.Records[0].(effects.ClaimableBalanceClawedBack) + tt.Equal(master.Address(), claimableBalanceClawedBack.Account) + tt.Equal(cbID, claimableBalanceClawedBack.BalanceID) + accountCredited := effectsResponse.Embedded.Records[1].(effects.AccountCredited) + tt.Equal(master.Address(), accountCredited.Account) + tt.Equal("10.0000000", accountCredited.Amount) + tt.Equal(accountCredited.Issuer, master.Address()) + tt.Equal(accountCredited.Code, "PTS") + cbSponsorshipRemoved := effectsResponse.Embedded.Records[2].(effects.ClaimableBalanceSponsorshipRemoved) + tt.Equal(master.Address(), cbSponsorshipRemoved.Account) + tt.Equal(cbID, cbSponsorshipRemoved.BalanceID) + tt.Equal(master.Address(), cbSponsorshipRemoved.FormerSponsor) + } +} + +func TestHappySetTrustLineFlags(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + // Give the master account the revocable flag (needed to set the clawback flag) + setRevocableFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRevocable, + }, + } + + itest.MustSubmitOperations(itest.MasterAccount(), master, &setRevocableFlag) + + // Give the master account the clawback flag + setClawBackFlag := txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthClawbackEnabled, + }, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &setClawBackFlag) + + // Make sure the clawback flag was set + accountDetails := itest.MustGetAccount(master) + tt.True(accountDetails.Flags.AuthClawbackEnabled) + + // Create another account fot the Trustline + keyPairs, accounts := itest.CreateAccounts(1, "100") + accountKeyPair := keyPairs[0] + account := accounts[0] + + // Time machine to Spain before Euros were a thing + pesetasAsset := txnbuild.CreditAsset{Code: "PTS", Issuer: master.Address()} + itest.MustEstablishTrustline(accountKeyPair, account, pesetasAsset) + // Confirm that the Trustline has the clawback flag + accountDetails = itest.MustGetAccount(accountKeyPair) + if tt.Len(accountDetails.Balances, 2) { + pts := accountDetails.Balances[0] + tt.Equal("PTS", pts.Code) + if tt.NotNil(pts.IsClawbackEnabled) { + tt.True(*pts.IsClawbackEnabled) + } + } + + // Clear the clawback flag + setTrustlineFlags := txnbuild.SetTrustLineFlags{ + Trustor: accountKeyPair.Address(), + Asset: pesetasAsset, + ClearFlags: []txnbuild.TrustLineFlag{ + txnbuild.TrustLineClawbackEnabled, + }, + } + submissionResp := itest.MustSubmitOperations(itest.MasterAccount(), master, &setTrustlineFlags) + + // make sure it was cleared + accountDetails = itest.MustGetAccount(accountKeyPair) + if tt.Len(accountDetails.Balances, 2) { + pts := accountDetails.Balances[0] + tt.Equal("PTS", pts.Code) + tt.Nil(pts.IsClawbackEnabled) + } + + // Check the operation details + opDetailsResponse, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: submissionResp.Hash, + }) + tt.NoError(err) + if tt.Len(opDetailsResponse.Embedded.Records, 1) { + setTrustlineFlagsDetails := opDetailsResponse.Embedded.Records[0].(operations.SetTrustLineFlags) + tt.Equal("PTS", setTrustlineFlagsDetails.Code) + tt.Equal(master.Address(), setTrustlineFlagsDetails.Issuer) + tt.Equal(accountKeyPair.Address(), setTrustlineFlagsDetails.Trustor) + if tt.Len(setTrustlineFlagsDetails.ClearFlags, 1) { + tt.True(xdr.TrustLineFlags(setTrustlineFlagsDetails.ClearFlags[0]).IsClawbackEnabledFlag()) + } + if tt.Len(setTrustlineFlagsDetails.ClearFlagsS, 1) { + tt.Equal(setTrustlineFlagsDetails.ClearFlagsS[0], "clawback_enabled") + } + tt.Len(setTrustlineFlagsDetails.SetFlags, 0) + tt.Len(setTrustlineFlagsDetails.SetFlagsS, 0) + } + + // Check the operation effects + effectsResponse, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForTransaction: submissionResp.Hash, + }) + tt.NoError(err) + + if tt.Len(effectsResponse.Embedded.Records, 1) { + trustlineFlagsUpdated := effectsResponse.Embedded.Records[0].(effects.TrustlineFlagsUpdated) + tt.Equal(master.Address(), trustlineFlagsUpdated.Account) + tt.Equal(master.Address(), trustlineFlagsUpdated.Issuer) + tt.Equal("PTS", trustlineFlagsUpdated.Code) + tt.Nil(trustlineFlagsUpdated.Authorized) + tt.Nil(trustlineFlagsUpdated.AuthorizedToMaintainLiabilities) + if tt.NotNil(trustlineFlagsUpdated.ClawbackEnabled) { + tt.False(*trustlineFlagsUpdated.ClawbackEnabled) + } + } + + // Try to set the clawback flag (we shouldn't be able to) + setTrustlineFlags = txnbuild.SetTrustLineFlags{ + Trustor: accountKeyPair.Address(), + Asset: pesetasAsset, + SetFlags: []txnbuild.TrustLineFlag{ + txnbuild.TrustLineClawbackEnabled, + }, + } + _, err = itest.SubmitOperations(itest.MasterAccount(), master, &setTrustlineFlags) + if tt.Error(err) { + clientErr, ok := err.(*horizonclient.Error) + if tt.True(ok) { + tt.Equal(400, clientErr.Problem.Status) + resCodes, err := clientErr.ResultCodes() + tt.NoError(err) + tt.Equal(codes.OpMalformed, resCodes.OperationCodes[0]) + } + + } + +} diff --git a/services/horizon/internal/integration/db_test.go b/services/horizon/internal/integration/db_test.go new file mode 100644 index 0000000000..1d0a391327 --- /dev/null +++ b/services/horizon/internal/integration/db_test.go @@ -0,0 +1,382 @@ +package integration + +import ( + "context" + "fmt" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/historyarchive" + horizoncmd "github.com/stellar/go/services/horizon/cmd" + "github.com/stellar/go/services/horizon/internal/db2/schema" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/db/dbtest" + "github.com/stellar/go/txnbuild" +) + +func generateLiquidityPoolOps(itest *integration.Test, tt *assert.Assertions) (lastLedger int32) { + + master := itest.Master() + keys, accounts := itest.CreateAccounts(2, "1000") + shareKeys, shareAccount := keys[0], accounts[0] + tradeKeys, tradeAccount := keys[1], accounts[1] + + itest.MustSubmitMultiSigOperations(shareAccount, []*keypair.Full{shareKeys, master}, + &txnbuild.ChangeTrust{ + Line: txnbuild.ChangeTrustAssetWrapper{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ChangeTrust{ + Line: txnbuild.LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: txnbuild.LiquidityPoolParameters{ + AssetA: txnbuild.NativeAsset{}, + AssetB: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Fee: 30, + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.Payment{ + SourceAccount: master.Address(), + Destination: shareAccount.GetAccountID(), + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Amount: "1000", + }, + ) + + poolID, err := xdr.NewPoolId( + xdr.MustNewNativeAsset(), + xdr.MustNewCreditAsset("USD", master.Address()), + 30, + ) + tt.NoError(err) + poolIDHexString := xdr.Hash(poolID).HexString() + + itest.MustSubmitOperations(shareAccount, shareKeys, + &txnbuild.LiquidityPoolDeposit{ + LiquidityPoolID: [32]byte(poolID), + MaxAmountA: "400", + MaxAmountB: "777", + MinPrice: xdr.Price{1, 2}, + MaxPrice: xdr.Price{2, 1}, + }, + ) + + itest.MustSubmitOperations(tradeAccount, tradeKeys, + &txnbuild.ChangeTrust{ + Line: txnbuild.ChangeTrustAssetWrapper{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.PathPaymentStrictReceive{ + SendAsset: txnbuild.NativeAsset{}, + DestAsset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + SendMax: "1000", + DestAmount: "2", + Destination: tradeKeys.Address(), + }, + ) + + pool, err := itest.Client().LiquidityPoolDetail(horizonclient.LiquidityPoolRequest{ + LiquidityPoolID: poolIDHexString, + }) + tt.NoError(err) + + txResp := itest.MustSubmitOperations(shareAccount, shareKeys, + &txnbuild.LiquidityPoolWithdraw{ + LiquidityPoolID: [32]byte(poolID), + Amount: pool.TotalShares, + MinAmountA: "10", + MinAmountB: "20", + }, + ) + + return txResp.Ledger +} + +func generatePaymentOps(itest *integration.Test, tt *assert.Assertions) (lastLedger int32) { + txResp := itest.MustSubmitOperations(itest.MasterAccount(), itest.Master(), + &txnbuild.Payment{ + Destination: itest.Master().Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + }, + ) + + return txResp.Ledger +} + +func initializeDBIntegrationTest(t *testing.T) (itest *integration.Test, reachedLedger int32) { + itest = integration.NewTest(t, integration.Config{}) + tt := assert.New(t) + + generatePaymentOps(itest, tt) + reachedLedger = generateLiquidityPoolOps(itest, tt) + + root, err := itest.Client().Root() + tt.NoError(err) + tt.LessOrEqual(reachedLedger, root.HorizonSequence) + + return +} + +func TestReingestDB(t *testing.T) { + itest, reachedLedger := initializeDBIntegrationTest(t) + tt := assert.New(t) + + // Create a fresh Horizon database + newDB := dbtest.Postgres(t) + // TODO: Unfortunately Horizon's ingestion System leaves open sessions behind,leading to + // a "database is being accessed by other users" error when trying to drop it + // defer newDB.Close() + freshHorizonPostgresURL := newDB.DSN + horizonConfig := itest.GetHorizonConfig() + horizonConfig.DatabaseURL = freshHorizonPostgresURL + // Initialize the DB schema + dbConn, err := db.Open("postgres", freshHorizonPostgresURL) + tt.NoError(err) + defer dbConn.Close() + _, err = schema.Migrate(dbConn.DB.DB, schema.MigrateUp, 0) + tt.NoError(err) + + t.Run("validate parallel range", func(t *testing.T) { + horizoncmd.RootCmd.SetArgs(command(horizonConfig, + "db", + "reingest", + "range", + "--parallel-workers=2", + "10", + "2", + )) + + assert.EqualError(t, horizoncmd.RootCmd.Execute(), "Invalid range: {10 2} from > to") + }) + + // cap reachedLedger to the nearest checkpoint ledger because reingest range cannot ingest past the most + // recent checkpoint ledger when using captive core + toLedger := uint32(reachedLedger) + archive, err := historyarchive.Connect(horizonConfig.HistoryArchiveURLs[0], historyarchive.ConnectOptions{ + NetworkPassphrase: horizonConfig.NetworkPassphrase, + CheckpointFrequency: horizonConfig.CheckpointFrequency, + }) + tt.NoError(err) + + // make sure a full checkpoint has elapsed otherwise there will be nothing to reingest + var latestCheckpoint uint32 + publishedFirstCheckpoint := func() bool { + has, requestErr := archive.GetRootHAS() + tt.NoError(requestErr) + latestCheckpoint = has.CurrentLedger + return latestCheckpoint > 1 + } + tt.Eventually(publishedFirstCheckpoint, 10*time.Second, time.Second) + + if toLedger > latestCheckpoint { + toLedger = latestCheckpoint + } + + // We just want to test reingestion, so there's no reason for a background + // Horizon to run. Keeping it running will actually cause the Captive Core + // subprocesses to conflict. + itest.StopHorizon() + + horizonConfig.CaptiveCoreConfigPath = filepath.Join( + filepath.Dir(horizonConfig.CaptiveCoreConfigPath), + "captive-core-reingest-range-integration-tests.cfg", + ) + + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", + "reingest", + "range", + "--parallel-workers=1", + "1", + fmt.Sprintf("%d", toLedger), + )) + + tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(horizoncmd.RootCmd.Execute(), "Repeat the same reingest range against db, should not have errors.") +} + +func command(horizonConfig horizon.Config, args ...string) []string { + return append([]string{ + "--stellar-core-url", + horizonConfig.StellarCoreURL, + "--history-archive-urls", + horizonConfig.HistoryArchiveURLs[0], + "--db-url", + horizonConfig.DatabaseURL, + "--stellar-core-db-url", + horizonConfig.StellarCoreDatabaseURL, + "--stellar-core-binary-path", + horizonConfig.CaptiveCoreBinaryPath, + "--captive-core-config-path", + horizonConfig.CaptiveCoreConfigPath, + "--enable-captive-core-ingestion=" + strconv.FormatBool(horizonConfig.EnableCaptiveCoreIngestion), + "--network-passphrase", + horizonConfig.NetworkPassphrase, + // due to ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING + "--checkpoint-frequency", + "8", + }, args...) +} + +func TestFillGaps(t *testing.T) { + itest, reachedLedger := initializeDBIntegrationTest(t) + tt := assert.New(t) + + // Create a fresh Horizon database + newDB := dbtest.Postgres(t) + // TODO: Unfortunately Horizon's ingestion System leaves open sessions behind,leading to + // a "database is being accessed by other users" error when trying to drop it + // defer newDB.Close() + freshHorizonPostgresURL := newDB.DSN + horizonConfig := itest.GetHorizonConfig() + horizonConfig.DatabaseURL = freshHorizonPostgresURL + // Initialize the DB schema + dbConn, err := db.Open("postgres", freshHorizonPostgresURL) + defer dbConn.Close() + _, err = schema.Migrate(dbConn.DB.DB, schema.MigrateUp, 0) + tt.NoError(err) + + // cap reachedLedger to the nearest checkpoint ledger because reingest range cannot ingest past the most + // recent checkpoint ledger when using captive core + toLedger := uint32(reachedLedger) + archive, err := historyarchive.Connect(horizonConfig.HistoryArchiveURLs[0], historyarchive.ConnectOptions{ + NetworkPassphrase: horizonConfig.NetworkPassphrase, + CheckpointFrequency: horizonConfig.CheckpointFrequency, + }) + tt.NoError(err) + + t.Run("validate parallel range", func(t *testing.T) { + horizoncmd.RootCmd.SetArgs(command(horizonConfig, + "db", + "fill-gaps", + "--parallel-workers=2", + "10", + "2", + )) + + assert.EqualError(t, horizoncmd.RootCmd.Execute(), "Invalid range: {10 2} from > to") + }) + + // make sure a full checkpoint has elapsed otherwise there will be nothing to reingest + var latestCheckpoint uint32 + publishedFirstCheckpoint := func() bool { + has, requestErr := archive.GetRootHAS() + tt.NoError(requestErr) + latestCheckpoint = has.CurrentLedger + return latestCheckpoint > 1 + } + tt.Eventually(publishedFirstCheckpoint, 10*time.Second, time.Second) + + if toLedger > latestCheckpoint { + toLedger = latestCheckpoint + } + + // We just want to test reingestion, so there's no reason for a background + // Horizon to run. Keeping it running will actually cause the Captive Core + // subprocesses to conflict. + itest.StopHorizon() + + historyQ := history.Q{dbConn} + var oldestLedger, latestLedger int64 + tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.NoError(historyQ.DeleteRangeAll(context.Background(), oldestLedger, latestLedger)) + + horizonConfig.CaptiveCoreConfigPath = filepath.Join( + filepath.Dir(horizonConfig.CaptiveCoreConfigPath), + "captive-core-reingest-range-integration-tests.cfg", + ) + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", "fill-gaps", "--parallel-workers=1")) + tt.NoError(horizoncmd.RootCmd.Execute()) + + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.Equal(int64(0), latestLedger) + + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", "fill-gaps", "3", "4")) + tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) + tt.Equal(int64(3), oldestLedger) + tt.Equal(int64(4), latestLedger) + + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", "fill-gaps", "6", "7")) + tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) + tt.Equal(int64(3), oldestLedger) + tt.Equal(int64(7), latestLedger) + var gaps []history.LedgerRange + gaps, err = historyQ.GetLedgerGaps(context.Background()) + tt.NoError(err) + tt.Equal([]history.LedgerRange{{StartSequence: 5, EndSequence: 5}}, gaps) + + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", "fill-gaps")) + tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) + tt.Equal(int64(3), oldestLedger) + tt.Equal(int64(7), latestLedger) + gaps, err = historyQ.GetLedgerGaps(context.Background()) + tt.NoError(err) + tt.Empty(gaps) + + horizoncmd.RootCmd.SetArgs(command(horizonConfig, "db", "fill-gaps", "2", "8")) + tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) + tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) + tt.Equal(int64(2), oldestLedger) + tt.Equal(int64(8), latestLedger) + gaps, err = historyQ.GetLedgerGaps(context.Background()) + tt.NoError(err) + tt.Empty(gaps) +} + +func TestResumeFromInitializedDB(t *testing.T) { + itest, reachedLedger := initializeDBIntegrationTest(t) + tt := assert.New(t) + + // Stop the integration test, and restart it with the same database + err := itest.RestartHorizon() + tt.NoError(err) + + successfullyResumed := func() bool { + root, err := itest.Client().Root() + tt.NoError(err) + // It must be able to reach the ledger and surpass it + const ledgersPastStopPoint = 4 + return root.HorizonSequence > (reachedLedger + ledgersPastStopPoint) + } + + tt.Eventually(successfullyResumed, 1*time.Minute, 1*time.Second) +} diff --git a/services/horizon/internal/integration/liquidity_pool_test.go b/services/horizon/internal/integration/liquidity_pool_test.go new file mode 100644 index 0000000000..60a5e99f02 --- /dev/null +++ b/services/horizon/internal/integration/liquidity_pool_test.go @@ -0,0 +1,732 @@ +package integration + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/amount" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" +) + +func TestLiquidityPoolHappyPath(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + keys, accounts := itest.CreateAccounts(2, "1000") + shareKeys, shareAccount := keys[0], accounts[0] + tradeKeys, tradeAccount := keys[1], accounts[1] + + itest.MustSubmitMultiSigOperations(shareAccount, []*keypair.Full{shareKeys, master}, + &txnbuild.ChangeTrust{ + Line: txnbuild.ChangeTrustAssetWrapper{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ChangeTrust{ + Line: txnbuild.LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: txnbuild.LiquidityPoolParameters{ + AssetA: txnbuild.NativeAsset{}, + AssetB: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Fee: 30, + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.Payment{ + SourceAccount: master.Address(), + Destination: shareAccount.GetAccountID(), + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Amount: "1000", + }, + ) + + poolID, err := xdr.NewPoolId( + xdr.MustNewNativeAsset(), + xdr.MustNewCreditAsset("USD", master.Address()), + 30, + ) + tt.NoError(err) + poolIDHexString := xdr.Hash(poolID).HexString() + + pools, err := itest.Client().LiquidityPools(horizonclient.LiquidityPoolsRequest{}) + tt.NoError(err) + tt.Len(pools.Embedded.Records, 1) + + pool := pools.Embedded.Records[0] + tt.Equal(poolIDHexString, pool.ID) + tt.Equal(uint32(30), pool.FeeBP) + tt.Equal("constant_product", pool.Type) + tt.Equal("0.0000000", pool.TotalShares) + tt.Equal(uint64(1), pool.TotalTrustlines) + + tt.Equal("0.0000000", pool.Reserves[0].Amount) + tt.Equal("native", pool.Reserves[0].Asset) + tt.Equal("0.0000000", pool.Reserves[1].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), pool.Reserves[1].Asset) + + itest.MustSubmitOperations(shareAccount, shareKeys, + &txnbuild.LiquidityPoolDeposit{ + LiquidityPoolID: [32]byte(poolID), + MaxAmountA: "400", + MaxAmountB: "777", + MinPrice: xdr.Price{1, 2}, + MaxPrice: xdr.Price{2, 1}, + }, + ) + + pool, err = itest.Client().LiquidityPoolDetail(horizonclient.LiquidityPoolRequest{ + LiquidityPoolID: poolIDHexString, + }) + tt.NoError(err) + + tt.Equal(poolIDHexString, pool.ID) + tt.Equal(uint64(1), pool.TotalTrustlines) + + tt.Equal("400.0000000", pool.Reserves[0].Amount) + tt.Equal("native", pool.Reserves[0].Asset) + tt.Equal("777.0000000", pool.Reserves[1].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), pool.Reserves[1].Asset) + + itest.MustSubmitOperations(tradeAccount, tradeKeys, + &txnbuild.ChangeTrust{ + Line: txnbuild.ChangeTrustAssetWrapper{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.PathPaymentStrictReceive{ + SendAsset: txnbuild.NativeAsset{}, + DestAsset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + SendMax: "1000", + DestAmount: "2", + Destination: tradeKeys.Address(), + }, + ) + + account, err := itest.Client().AccountDetail(horizonclient.AccountRequest{ + AccountID: shareKeys.Address(), + }) + tt.NoError(err) + tt.Len(account.Balances, 3) + + liquidityPoolBalance := account.Balances[0] + tt.Equal("liquidity_pool_shares", liquidityPoolBalance.Asset.Type) + tt.Equal(poolIDHexString, liquidityPoolBalance.LiquidityPoolId) + tt.Equal("557.4943945", liquidityPoolBalance.Balance) + + usdBalance := account.Balances[1] + tt.Equal("credit_alphanum4", usdBalance.Asset.Type) + tt.Equal("USD", usdBalance.Asset.Code) + tt.Equal(master.Address(), usdBalance.Asset.Issuer) + tt.Equal("223.0000000", usdBalance.Balance) + + nativeBalance := account.Balances[2] + tt.Equal("native", nativeBalance.Asset.Type) + + stats, err := itest.Client().Assets(horizonclient.AssetRequest{}) + tt.NoError(err) + tt.Len(stats.Embedded.Records, 1) + + stat := stats.Embedded.Records[0] + tt.Equal("credit_alphanum4", stat.Asset.Type) + tt.Equal("USD", stat.Asset.Code) + tt.Equal(master.Address(), stat.Asset.Issuer) + tt.Equal(int32(2), stat.NumAccounts) + tt.Equal("225.0000000", stat.Amount) + tt.Equal(int32(1), stat.NumLiquidityPools) + tt.Equal("775.0000000", stat.LiquidityPoolsAmount) + + itest.MustSubmitOperations(shareAccount, shareKeys, + &txnbuild.LiquidityPoolWithdraw{ + LiquidityPoolID: [32]byte(poolID), + Amount: pool.TotalShares, + MinAmountA: "10", + MinAmountB: "20", + }, + ) + + itest.MustSubmitOperations(shareAccount, shareKeys, + // Clear trustline... + &txnbuild.Payment{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Amount: "998", + Destination: master.Address(), + }, + // ...and remove it. It should also remove LP. + &txnbuild.ChangeTrust{ + Line: txnbuild.LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: txnbuild.LiquidityPoolParameters{ + AssetA: txnbuild.NativeAsset{}, + AssetB: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Fee: 30, + }, + }, + Limit: "0", + }, + ) + + account, err = itest.Client().AccountDetail(horizonclient.AccountRequest{ + AccountID: shareKeys.Address(), + }) + tt.NoError(err) + tt.Len(account.Balances, 2) + + // Shouldn't contain liquidity_pool_shares balance + usdBalance = account.Balances[0] + tt.Equal("credit_alphanum4", usdBalance.Asset.Type) + tt.Equal("USD", usdBalance.Asset.Code) + tt.Equal(master.Address(), usdBalance.Asset.Issuer) + tt.Equal("0.0000000", usdBalance.Balance) + + nativeBalance = account.Balances[1] + tt.Equal("native", nativeBalance.Asset.Type) + + ops, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForLiquidityPool: poolIDHexString, + }) + tt.NoError(err) + + // We expect the following ops for this liquidity pool: + // 1. change_trust creating a trust to LP. + // 2. liquidity_pool_deposit. + // 3. path_payment + // 4. liquidity_pool_withdraw. + // 5. change_trust removing a trust to LP. + tt.Len(ops.Embedded.Records, 5) + + op1 := (ops.Embedded.Records[0]).(operations.ChangeTrust) + tt.Equal("change_trust", op1.Type) + tt.Equal("liquidity_pool_shares", op1.Asset.Type) + tt.Equal(poolIDHexString, op1.LiquidityPoolID) + tt.Equal("922337203685.4775807", op1.Limit) + + op2 := (ops.Embedded.Records[1]).(operations.LiquidityPoolDeposit) + tt.Equal("liquidity_pool_deposit", op2.Type) + tt.Equal(poolIDHexString, op2.LiquidityPoolID) + tt.Equal("0.5000000", op2.MinPrice) + tt.Equal("2.0000000", op2.MaxPrice) + tt.Equal("native", op2.ReservesDeposited[0].Asset) + tt.Equal("400.0000000", op2.ReservesDeposited[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), op2.ReservesDeposited[1].Asset) + tt.Equal("777.0000000", op2.ReservesDeposited[1].Amount) + tt.Equal("native", op2.ReservesMax[0].Asset) + tt.Equal("400.0000000", op2.ReservesMax[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), op2.ReservesMax[1].Asset) + tt.Equal("777.0000000", op2.ReservesMax[1].Amount) + tt.Equal("557.4943945", op2.SharesReceived) + + op3 := (ops.Embedded.Records[2]).(operations.PathPayment) + tt.Equal("path_payment_strict_receive", op3.Payment.Base.Type) + tt.Equal("2.0000000", op3.Amount) + tt.Equal("1.0353642", op3.SourceAmount) + tt.Equal("1000.0000000", op3.SourceMax) + tt.Equal("native", op3.SourceAssetType) + tt.Equal("credit_alphanum4", op3.Payment.Asset.Type) + tt.Equal("USD", op3.Payment.Asset.Code) + tt.Equal(master.Address(), op3.Payment.Asset.Issuer) + + op4 := (ops.Embedded.Records[3]).(operations.LiquidityPoolWithdraw) + tt.Equal("liquidity_pool_withdraw", op4.Type) + tt.Equal(poolIDHexString, op4.LiquidityPoolID) + + tt.Equal("native", op4.ReservesMin[0].Asset) + tt.Equal("10.0000000", op4.ReservesMin[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), op4.ReservesMin[1].Asset) + tt.Equal("20.0000000", op4.ReservesMin[1].Amount) + + tt.Equal("native", op4.ReservesReceived[0].Asset) + tt.Equal("401.0353642", op4.ReservesReceived[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), op4.ReservesReceived[1].Asset) + tt.Equal("775.0000000", op4.ReservesReceived[1].Amount) + + tt.Equal("557.4943945", op4.Shares) + + op5 := (ops.Embedded.Records[4]).(operations.ChangeTrust) + tt.Equal("change_trust", op5.Type) + tt.Equal("liquidity_pool_shares", op5.Asset.Type) + tt.Equal(poolIDHexString, op5.LiquidityPoolID) + tt.Equal("0.0000000", op5.Limit) + + effs, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForLiquidityPool: poolIDHexString, + }) + tt.NoError(err) + + // We expect the following effects for this liquidity pool: + // 1. trustline_created creating liquidity_pool_shares trust_line + // 2. liquidity_pool_created + // 3. liquidity_pool_deposited + // 4. account_credited - connected to trade + // 5. account_debited - connected to trade + // 6. liquidity_pool_trade + // 7. liquidity_pool_withdrew + // 8. trustline_removed removing liquidity_pool_shares trust_line + // 9. liquidity_pool_removed + tt.Len(effs.Embedded.Records, 9) + + ef1 := (effs.Embedded.Records[0]).(effects.TrustlineCreated) + tt.Equal(shareKeys.Address(), ef1.Account) + tt.Equal("trustline_created", ef1.Type) + tt.Equal("liquidity_pool_shares", ef1.Asset.Type) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef1.LiquidityPoolID) + tt.Equal("922337203685.4775807", ef1.Limit) + + ef2 := (effs.Embedded.Records[1]).(effects.LiquidityPoolCreated) + tt.Equal(shareKeys.Address(), ef2.Account) + tt.Equal("liquidity_pool_created", ef2.Type) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef2.LiquidityPool.ID) + tt.Equal("constant_product", ef2.LiquidityPool.Type) + tt.Equal(uint32(30), ef2.LiquidityPool.FeeBP) + tt.Equal("0.0000000", ef2.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef2.LiquidityPool.TotalTrustlines) + tt.Equal("native", ef2.LiquidityPool.Reserves[0].Asset) + tt.Equal("0.0000000", ef2.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef2.LiquidityPool.Reserves[1].Asset) + tt.Equal("0.0000000", ef2.LiquidityPool.Reserves[1].Amount) + + ef3 := (effs.Embedded.Records[2]).(effects.LiquidityPoolDeposited) + tt.Equal("liquidity_pool_deposited", ef3.Type) + tt.Equal(shareKeys.Address(), ef3.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef3.LiquidityPool.ID) + tt.Equal("constant_product", ef3.LiquidityPool.Type) + tt.Equal(uint32(30), ef3.LiquidityPool.FeeBP) + tt.Equal("557.4943945", ef3.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef3.LiquidityPool.TotalTrustlines) + + tt.Equal("native", ef3.LiquidityPool.Reserves[0].Asset) + tt.Equal("400.0000000", ef3.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef3.LiquidityPool.Reserves[1].Asset) + tt.Equal("777.0000000", ef3.LiquidityPool.Reserves[1].Amount) + + tt.Equal("native", ef3.ReservesDeposited[0].Asset) + tt.Equal("400.0000000", ef3.ReservesDeposited[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef3.ReservesDeposited[1].Asset) + tt.Equal("777.0000000", ef3.ReservesDeposited[1].Amount) + + tt.Equal("557.4943945", ef3.SharesReceived) + + ef4 := (effs.Embedded.Records[3]).(effects.AccountCredited) + tt.Equal("account_credited", ef4.Base.Type) + // TODO - is it really LP effect? + + ef5 := (effs.Embedded.Records[4]).(effects.AccountDebited) + tt.Equal("account_debited", ef5.Base.Type) + // TODO - is it really LP effect? + + ef6 := (effs.Embedded.Records[5]).(effects.LiquidityPoolTrade) + tt.Equal("liquidity_pool_trade", ef6.Type) + tt.Equal(tradeKeys.Address(), ef6.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef6.LiquidityPool.ID) + tt.Equal("constant_product", ef6.LiquidityPool.Type) + tt.Equal(uint32(30), ef6.LiquidityPool.FeeBP) + tt.Equal("557.4943945", ef3.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef6.LiquidityPool.TotalTrustlines) + tt.Equal("native", ef6.LiquidityPool.Reserves[0].Asset) + tt.Equal("401.0353642", ef6.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef6.LiquidityPool.Reserves[1].Asset) + tt.Equal("775.0000000", ef6.LiquidityPool.Reserves[1].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef6.Sold.Asset) + tt.Equal("2.0000000", ef6.Sold.Amount) + tt.Equal("native", ef6.Bought.Asset) + tt.Equal("1.0353642", ef6.Bought.Amount) + + ef7 := (effs.Embedded.Records[6]).(effects.LiquidityPoolWithdrew) + tt.Equal("liquidity_pool_withdrew", ef7.Type) + tt.Equal(shareKeys.Address(), ef7.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef7.LiquidityPool.ID) + tt.Equal("constant_product", ef7.LiquidityPool.Type) + tt.Equal(uint32(30), ef7.LiquidityPool.FeeBP) + tt.Equal("0.0000000", ef7.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef7.LiquidityPool.TotalTrustlines) + + tt.Equal("native", ef7.LiquidityPool.Reserves[0].Asset) + tt.Equal("0.0000000", ef7.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef7.LiquidityPool.Reserves[1].Asset) + tt.Equal("0.0000000", ef7.LiquidityPool.Reserves[1].Amount) + + tt.Equal("native", ef7.ReservesReceived[0].Asset) + tt.Equal("401.0353642", ef7.ReservesReceived[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef7.ReservesReceived[1].Asset) + tt.Equal("775.0000000", ef7.ReservesReceived[1].Amount) + + tt.Equal("557.4943945", ef7.SharesRedeemed) + + ef8 := (effs.Embedded.Records[7]).(effects.TrustlineRemoved) + tt.Equal("trustline_removed", ef8.Type) + tt.Equal(shareKeys.Address(), ef8.Account) + tt.Equal("liquidity_pool_shares", ef8.Asset.Type) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef8.LiquidityPoolID) + tt.Equal("0.0000000", ef8.Limit) + + ef9 := (effs.Embedded.Records[8]).(effects.LiquidityPoolRemoved) + tt.Equal("liquidity_pool_removed", ef9.Type) + tt.Equal(shareKeys.Address(), ef9.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef9.LiquidityPoolID) + + trades, err := itest.Client().Trades(horizonclient.TradeRequest{}) + tt.NoError(err) + + tt.Len(trades.Embedded.Records, 1) + + trade1 := trades.Embedded.Records[0] + tt.Equal("liquidity_pool", trade1.TradeType) + + tt.Equal(poolIDHexString, trade1.BaseLiquidityPoolID) + tt.Equal(uint32(30), trade1.LiquidityPoolFeeBP) + tt.Equal("2.0000000", trade1.BaseAmount) + tt.Equal("credit_alphanum4", trade1.BaseAssetType) + tt.Equal("USD", trade1.BaseAssetCode) + tt.Equal(master.Address(), trade1.BaseAssetIssuer) + + tt.Equal(tradeKeys.Address(), trade1.CounterAccount) + tt.Equal("1.0353642", trade1.CounterAmount) + tt.Equal("native", trade1.CounterAssetType) + + tt.Equal(int64(10353642), trade1.Price.N) + tt.Equal(int64(20000000), trade1.Price.D) +} + +func TestLiquidityPoolRevoke(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + keys, accounts := itest.CreateAccounts(2, "1000") + shareKeys, shareAccount := keys[0], accounts[0] + + poolID, err := xdr.NewPoolId( + xdr.MustNewNativeAsset(), + xdr.MustNewCreditAsset("USD", master.Address()), + 30, + ) + tt.NoError(err) + poolIDHexString := xdr.Hash(poolID).HexString() + + itest.MustSubmitMultiSigOperations(shareAccount, []*keypair.Full{shareKeys, master}, + &txnbuild.SetOptions{ + SourceAccount: master.Address(), + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRevocable, + }, + }, + &txnbuild.ChangeTrust{ + Line: txnbuild.ChangeTrustAssetWrapper{ + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ChangeTrust{ + Line: txnbuild.LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: txnbuild.LiquidityPoolParameters{ + AssetA: txnbuild.NativeAsset{}, + AssetB: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Fee: 30, + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.Payment{ + SourceAccount: master.Address(), + Destination: shareAccount.GetAccountID(), + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + Amount: "1000", + }, + &txnbuild.LiquidityPoolDeposit{ + LiquidityPoolID: [32]byte(poolID), + MaxAmountA: "400", + MaxAmountB: "777", + MinPrice: xdr.Price{1, 2}, + MaxPrice: xdr.Price{2, 1}, + }, + &txnbuild.SetTrustLineFlags{ + SourceAccount: master.Address(), + Trustor: shareKeys.Address(), + Asset: txnbuild.CreditAsset{ + Code: "USD", + Issuer: master.Address(), + }, + ClearFlags: []txnbuild.TrustLineFlag{ + txnbuild.TrustLineAuthorized, + }, + }, + ) + + // Check if claimable balances have been created + claimableBalances, err := itest.Client().ClaimableBalances(horizonclient.ClaimableBalanceRequest{}) + tt.NoError(err) + tt.Len(claimableBalances.Embedded.Records, 2) + + // The list is sorted by ID and preimage consists of Account ID which can + // differ between test runs. Flip the order if the first one is no native. + if claimableBalances.Embedded.Records[0].Asset != "native" { + claimableBalances.Embedded.Records[0], claimableBalances.Embedded.Records[1] = + claimableBalances.Embedded.Records[1], claimableBalances.Embedded.Records[0] + } + + cb1 := claimableBalances.Embedded.Records[0] + tt.Equal("native", cb1.Asset) + tt.Equal("400.0000000", cb1.Amount) + tt.Equal(shareAccount.GetAccountID(), cb1.Claimants[0].Destination) + tt.Equal(xdr.ClaimPredicateTypeClaimPredicateUnconditional, cb1.Claimants[0].Predicate.Type) + + cb2 := claimableBalances.Embedded.Records[1] + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), cb2.Asset) + tt.Equal("777.0000000", cb2.Amount) + tt.Equal(shareAccount.GetAccountID(), cb2.Claimants[0].Destination) + tt.Equal(xdr.ClaimPredicateTypeClaimPredicateUnconditional, cb2.Claimants[0].Predicate.Type) + + ops, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForLiquidityPool: poolIDHexString, + }) + tt.NoError(err) + + // We expect the following ops for this liquidity pool: + // 1. change_trust creating a trust to LP. + // 2. liquidity_pool_deposit. + // 3. set_trust_line_flags revoking assets from LP. + tt.Len(ops.Embedded.Records, 3) + + op1 := (ops.Embedded.Records[0]).(operations.ChangeTrust) + tt.Equal("change_trust", op1.Type) + tt.Equal("liquidity_pool_shares", op1.Asset.Type) + tt.Equal(poolIDHexString, op1.LiquidityPoolID) + tt.Equal("922337203685.4775807", op1.Limit) + + op2 := (ops.Embedded.Records[1]).(operations.LiquidityPoolDeposit) + tt.Equal("liquidity_pool_deposit", op2.Type) + tt.Equal(poolIDHexString, op2.LiquidityPoolID) + tt.Equal("0.5000000", op2.MinPrice) + tt.Equal("2.0000000", op2.MaxPrice) + tt.Equal("native", op2.ReservesDeposited[0].Asset) + tt.Equal("400.0000000", op2.ReservesDeposited[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), op2.ReservesDeposited[1].Asset) + tt.Equal("777.0000000", op2.ReservesDeposited[1].Amount) + tt.Equal("557.4943945", op2.SharesReceived) + + op3 := (ops.Embedded.Records[2]).(operations.SetTrustLineFlags) + tt.Equal("set_trust_line_flags", op3.Base.Type) + tt.Equal("credit_alphanum4", op3.Asset.Type) + tt.Equal("USD", op3.Asset.Code) + tt.Equal(master.Address(), op3.Asset.Issuer) + tt.Equal("authorized", op3.ClearFlagsS[0]) + + effs, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForLiquidityPool: poolIDHexString, + Limit: 20, + }) + tt.NoError(err) + // We expect the following effects for this liquidity pool: + // 1. trustline_created creating liquidity_pool_shares trust_line + // 2. liquidity_pool_created + // 3. liquidity_pool_deposited + // 4. trustline_flags_updated - revoking LP assets + // 5. claimable_balance_created - creating CB for asset A + // 6. claimable_balance_claimant_created - claimant for CB above + // 7. claimable_balance_created - creating CB for asset B + // 8. claimable_balance_claimant_created - claimant for CB above + // 9. liquidity_pool_revoked + // 10. claimable_balance_sponsorship_created + // 11. claimable_balance_sponsorship_created + // 12. liquidity_pool_removed - because no more assets inside + tt.Len(effs.Embedded.Records, 12) + + ef1 := (effs.Embedded.Records[0]).(effects.TrustlineCreated) + tt.Equal(shareKeys.Address(), ef1.Account) + tt.Equal("trustline_created", ef1.Type) + tt.Equal("liquidity_pool_shares", ef1.Asset.Type) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef1.LiquidityPoolID) + tt.Equal("922337203685.4775807", ef1.Limit) + + ef2 := (effs.Embedded.Records[1]).(effects.LiquidityPoolCreated) + tt.Equal(shareKeys.Address(), ef2.Account) + tt.Equal("liquidity_pool_created", ef2.Type) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef2.LiquidityPool.ID) + tt.Equal("constant_product", ef2.LiquidityPool.Type) + tt.Equal(uint32(30), ef2.LiquidityPool.FeeBP) + tt.Equal("0.0000000", ef2.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef2.LiquidityPool.TotalTrustlines) + tt.Equal("native", ef2.LiquidityPool.Reserves[0].Asset) + tt.Equal("0.0000000", ef2.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef2.LiquidityPool.Reserves[1].Asset) + tt.Equal("0.0000000", ef2.LiquidityPool.Reserves[1].Amount) + + ef3 := (effs.Embedded.Records[2]).(effects.LiquidityPoolDeposited) + tt.Equal("liquidity_pool_deposited", ef3.Type) + tt.Equal(shareKeys.Address(), ef3.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef3.LiquidityPool.ID) + tt.Equal("constant_product", ef3.LiquidityPool.Type) + tt.Equal(uint32(30), ef3.LiquidityPool.FeeBP) + tt.Equal("557.4943945", ef3.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef3.LiquidityPool.TotalTrustlines) + + ef4 := (effs.Embedded.Records[3]).(effects.TrustlineFlagsUpdated) + tt.Equal("trustline_flags_updated", ef4.Base.Type) + tt.Equal(master.Address(), ef4.Account) + tt.Equal("USD", ef4.Asset.Code) + tt.Equal(master.Address(), ef4.Asset.Issuer) + tt.Equal(shareAccount.GetAccountID(), ef4.Trustor) + + ef5 := (effs.Embedded.Records[4]).(effects.ClaimableBalanceCreated) + tt.Equal("claimable_balance_created", ef5.Type) + tt.Equal("native", ef5.Asset) + tt.Equal("400.0000000", ef5.Amount) + + ef6 := (effs.Embedded.Records[5]).(effects.ClaimableBalanceClaimantCreated) + tt.Equal("claimable_balance_claimant_created", ef6.Type) + tt.Equal("native", ef6.Asset) + tt.Equal("400.0000000", ef6.Amount) + tt.Equal(shareKeys.Address(), ef6.Account) + tt.Equal(xdr.ClaimPredicateTypeClaimPredicateUnconditional, ef6.Predicate.Type) + + ef7 := (effs.Embedded.Records[6]).(effects.ClaimableBalanceCreated) + tt.Equal("claimable_balance_created", ef7.Type) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef7.Asset) + tt.Equal("777.0000000", ef7.Amount) + + ef8 := (effs.Embedded.Records[7]).(effects.ClaimableBalanceClaimantCreated) + tt.Equal("claimable_balance_claimant_created", ef8.Type) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef8.Asset) + tt.Equal("777.0000000", ef8.Amount) + tt.Equal(shareKeys.Address(), ef8.Account) + tt.Equal(xdr.ClaimPredicateTypeClaimPredicateUnconditional, ef8.Predicate.Type) + + ef9 := (effs.Embedded.Records[8]).(effects.LiquidityPoolRevoked) + tt.Equal("liquidity_pool_revoked", ef9.Type) + tt.Equal(master.Address(), ef9.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef9.LiquidityPool.ID) + tt.Equal("constant_product", ef9.LiquidityPool.Type) + tt.Equal(uint32(30), ef9.LiquidityPool.FeeBP) + tt.Equal("557.4943945", ef9.LiquidityPool.TotalShares) + tt.Equal(uint64(1), ef9.LiquidityPool.TotalTrustlines) + tt.Equal("native", ef9.LiquidityPool.Reserves[0].Asset) + tt.Equal("400.0000000", ef9.LiquidityPool.Reserves[0].Amount) + tt.Equal(fmt.Sprintf("USD:%s", master.Address()), ef9.LiquidityPool.Reserves[1].Asset) + tt.Equal("777.0000000", ef9.LiquidityPool.Reserves[1].Amount) + + // ef10 and ef11 are `claimable_balance_sponsorship_created` effects not + // relevant here. + + ef12 := (effs.Embedded.Records[11]).(effects.LiquidityPoolRemoved) + tt.Equal("liquidity_pool_removed", ef12.Type) + tt.Equal(master.Address(), ef12.Account) + tt.Equal("64e163b66108152665ee325cc333211446277c86bfe021b9da6bb1769b0daea1", ef12.LiquidityPoolID) +} + +func TestLiquidityPoolFailedDepositAndWithdraw(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + + keys, accounts := itest.CreateAccounts(2, "1000") + shareKeys, shareAccount := keys[0], accounts[0] + + nonExistentPoolID := [32]byte{0xca, 0xfe} + + // Failing deposit + tx, err := itest.CreateSignedTransaction(shareAccount, []*keypair.Full{shareKeys}, + &txnbuild.LiquidityPoolDeposit{ + LiquidityPoolID: nonExistentPoolID, + MaxAmountA: "400", + MaxAmountB: "777", + MinPrice: xdr.Price{1, 2}, + MaxPrice: xdr.Price{2, 1}, + }, + ) + _, err = itest.Client().SubmitTransaction(tx) + tt.Error(err) + hash, err := tx.HashHex(integration.StandaloneNetworkPassphrase) + tt.NoError(err) + opsResponse, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: hash, + }) + tt.NoError(err) + tt.Len(opsResponse.Embedded.Records, 1) + deposit := (opsResponse.Embedded.Records[0]).(operations.LiquidityPoolDeposit) + tt.Equal("liquidity_pool_deposit", deposit.Type) + tt.Equal("cafe000000000000000000000000000000000000000000000000000000000000", deposit.LiquidityPoolID) + tt.Equal("0.5000000", deposit.MinPrice) + tt.Equal("2.0000000", deposit.MaxPrice) + tt.Equal("", deposit.ReservesDeposited[0].Asset) + tt.Equal("0.0000000", deposit.ReservesDeposited[0].Amount) + tt.Equal("", deposit.ReservesDeposited[1].Asset) + tt.Equal("0.0000000", deposit.ReservesDeposited[1].Amount) + tt.Equal("", deposit.ReservesMax[0].Asset) + tt.Equal("400.0000000", deposit.ReservesMax[0].Amount) + tt.Equal("", deposit.ReservesMax[1].Asset) + tt.Equal("777.0000000", deposit.ReservesMax[1].Amount) + tt.Equal("0.0000000", deposit.SharesReceived) + + // Failing withdrawal + tx, err = itest.CreateSignedTransaction(shareAccount, []*keypair.Full{shareKeys}, + &txnbuild.LiquidityPoolWithdraw{ + LiquidityPoolID: nonExistentPoolID, + Amount: amount.StringFromInt64(int64(10)), + MinAmountA: "10", + MinAmountB: "20", + }, + ) + _, err = itest.Client().SubmitTransaction(tx) + tt.Error(err) + + hash, err = tx.HashHex(integration.StandaloneNetworkPassphrase) + tt.NoError(err) + opsResponse, err = itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: hash, + }) + tt.NoError(err) + tt.Len(opsResponse.Embedded.Records, 1) + withdrawal := (opsResponse.Embedded.Records[0]).(operations.LiquidityPoolWithdraw) + tt.Equal("liquidity_pool_withdraw", withdrawal.Type) + tt.Equal("cafe000000000000000000000000000000000000000000000000000000000000", withdrawal.LiquidityPoolID) + + tt.Equal("", withdrawal.ReservesMin[0].Asset) + tt.Equal("10.0000000", withdrawal.ReservesMin[0].Amount) + tt.Equal("", withdrawal.ReservesMin[1].Asset) + tt.Equal("20.0000000", withdrawal.ReservesMin[1].Amount) + + tt.Equal("", withdrawal.ReservesReceived[0].Asset) + tt.Equal("0.0000000", withdrawal.ReservesReceived[0].Amount) + tt.Equal("", withdrawal.ReservesReceived[1].Asset) + tt.Equal("0.0000000", withdrawal.ReservesReceived[1].Amount) + + tt.Equal("0.0000010", withdrawal.Shares) +} diff --git a/services/horizon/internal/integration/muxed_account_details_test.go b/services/horizon/internal/integration/muxed_account_details_test.go new file mode 100644 index 0000000000..2b3180f34b --- /dev/null +++ b/services/horizon/internal/integration/muxed_account_details_test.go @@ -0,0 +1,92 @@ +package integration + +import ( + "math" + "testing" + + "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestMuxedAccountDetails(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + masterStr := master.Address() + masterAcID := xdr.MustAddress(masterStr) + + accs, _ := itest.CreateAccounts(1, "100") + destionationStr := accs[0].Address() + destinationAcID := xdr.MustAddress(destionationStr) + + source := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabecafebabe, + Ed25519: *masterAcID.Ed25519, + }, + } + + destination := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + // Make sure we cover the full uint64 range + Id: math.MaxUint64, + Ed25519: *destinationAcID.Ed25519, + }, + } + + // Submit a simple payment tx + op := txnbuild.Payment{ + SourceAccount: source.Address(), + Destination: destination.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + txSource := itest.MasterAccount().(*hProtocol.Account) + txSource.AccountID = source.Address() + txResp := itest.MustSubmitOperations(txSource, master, &op) + + // check the transaction details + txDetails, err := itest.Client().TransactionDetail(txResp.Hash) + tt.NoError(err) + tt.Equal(source.Address(), txDetails.AccountMuxed) + tt.Equal(uint64(source.Med25519.Id), txDetails.AccountMuxedID) + tt.Equal(source.Address(), txDetails.FeeAccountMuxed) + tt.Equal(uint64(source.Med25519.Id), txDetails.FeeAccountMuxedID) + + // check the operation details + opsResp, err := itest.Client().Operations(horizonclient.OperationRequest{ + ForTransaction: txResp.Hash, + }) + tt.NoError(err) + opDetails := opsResp.Embedded.Records[0].(operations.Payment) + tt.Equal(source.Address(), opDetails.SourceAccountMuxed) + tt.Equal(uint64(source.Med25519.Id), opDetails.SourceAccountMuxedID) + tt.Equal(source.Address(), opDetails.FromMuxed) + tt.Equal(uint64(source.Med25519.Id), opDetails.FromMuxedID) + tt.Equal(destination.Address(), opDetails.ToMuxed) + tt.Equal(uint64(destination.Med25519.Id), opDetails.ToMuxedID) + + // check the effect details + effectsResp, err := itest.Client().Effects(horizonclient.EffectRequest{ + ForTransaction: txResp.Hash, + }) + tt.NoError(err) + records := effectsResp.Embedded.Records + + credited := records[0].(effects.AccountCredited) + tt.Equal(destination.Address(), credited.AccountMuxed) + tt.Equal(uint64(destination.Med25519.Id), credited.AccountMuxedID) + + debited := records[1].(effects.AccountDebited) + tt.Equal(source.Address(), debited.AccountMuxed) + tt.Equal(uint64(source.Med25519.Id), debited.AccountMuxedID) +} diff --git a/services/horizon/internal/integration/muxed_operations_test.go b/services/horizon/internal/integration/muxed_operations_test.go new file mode 100644 index 0000000000..092b4152ee --- /dev/null +++ b/services/horizon/internal/integration/muxed_operations_test.go @@ -0,0 +1,134 @@ +package integration + +import ( + "testing" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestMuxedOperations(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + + sponsored := keypair.MustRandom() + // Is there an easier way? + sponsoredMuxed := xdr.MustMuxedAddress(sponsored.Address()) + sponsoredMuxed.Type = xdr.CryptoKeyTypeKeyTypeMuxedEd25519 + sponsoredMuxed.Med25519 = &xdr.MuxedAccountMed25519{ + Ed25519: *sponsoredMuxed.Ed25519, + Id: 100, + } + + master := itest.Master() + masterMuxed := xdr.MustMuxedAddress(master.Address()) + masterMuxed.Type = xdr.CryptoKeyTypeKeyTypeMuxedEd25519 + masterMuxed.Med25519 = &xdr.MuxedAccountMed25519{ + Ed25519: *masterMuxed.Ed25519, + Id: 200, + } + + ops := []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: sponsored.Address(), + }, + &txnbuild.CreateAccount{ + Destination: sponsored.Address(), + Amount: "100", + }, + &txnbuild.ChangeTrust{ + SourceAccount: sponsoredMuxed.Address(), + Line: txnbuild.CreditAsset{"ABCD", master.Address()}.MustToChangeTrustAsset(), + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ManageSellOffer{ + SourceAccount: sponsoredMuxed.Address(), + Selling: txnbuild.NativeAsset{}, + Buying: txnbuild.CreditAsset{"ABCD", master.Address()}, + Amount: "3", + Price: xdr.Price{1, 1}, + }, + // This will generate a trade effect: + &txnbuild.ManageSellOffer{ + SourceAccount: masterMuxed.Address(), + Selling: txnbuild.CreditAsset{"ABCD", master.Address()}, + Buying: txnbuild.NativeAsset{}, + Amount: "3", + Price: xdr.Price{1, 1}, + }, + &txnbuild.ManageData{ + SourceAccount: sponsoredMuxed.Address(), + Name: "test", + Value: []byte("test"), + }, + &txnbuild.Payment{ + SourceAccount: sponsoredMuxed.Address(), + Destination: master.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + }, + &txnbuild.CreateClaimableBalance{ + SourceAccount: sponsoredMuxed.Address(), + Amount: "2", + Asset: txnbuild.NativeAsset{}, + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(keypair.MustRandom().Address(), nil), + }, + }, + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: sponsored.Address(), + }, + } + txResp, err := itest.SubmitMultiSigOperations(itest.MasterAccount(), []*keypair.Full{master, sponsored}, ops...) + assert.NoError(t, err) + assert.True(t, txResp.Successful) + + ops = []txnbuild.Operation{ + // Remove subentries to be able to merge account + &txnbuild.Payment{ + SourceAccount: sponsoredMuxed.Address(), + Destination: master.Address(), + Amount: "3", + Asset: txnbuild.CreditAsset{"ABCD", master.Address()}, + }, + &txnbuild.ChangeTrust{ + SourceAccount: sponsoredMuxed.Address(), + Line: txnbuild.CreditAsset{"ABCD", master.Address()}.MustToChangeTrustAsset(), + Limit: "0", + }, + &txnbuild.ManageData{ + SourceAccount: sponsoredMuxed.Address(), + Name: "test", + }, + &txnbuild.AccountMerge{ + SourceAccount: sponsoredMuxed.Address(), + Destination: masterMuxed.Address(), + }, + } + txResp, err = itest.SubmitMultiSigOperations(itest.MasterAccount(), []*keypair.Full{master, sponsored}, ops...) + assert.NoError(t, err) + assert.True(t, txResp.Successful) + + // Check if no 5xx after processing the tx above + // TODO expand it to test actual muxed fields + _, err = itest.Client().Operations(horizonclient.OperationRequest{Limit: 200}) + assert.NoError(t, err, "/operations failed") + + _, err = itest.Client().Payments(horizonclient.OperationRequest{Limit: 200}) + assert.NoError(t, err, "/payments failed") + + effectsPage, err := itest.Client().Effects(horizonclient.EffectRequest{Limit: 200}) + assert.NoError(t, err, "/effects failed") + + for _, effect := range effectsPage.Embedded.Records { + if effect.GetType() == "trade" { + trade := effect.(effects.Trade) + oneSet := trade.AccountMuxedID != 0 || trade.SellerMuxedID != 0 + assert.True(t, oneSet, "at least one of account_muxed_id, seller_muxed_id must be set") + } + } +} diff --git a/services/horizon/internal/integration/negative_seq_txsub_test.go b/services/horizon/internal/integration/negative_seq_txsub_test.go new file mode 100644 index 0000000000..0abf2c7bcb --- /dev/null +++ b/services/horizon/internal/integration/negative_seq_txsub_test.go @@ -0,0 +1,73 @@ +package integration + +import ( + "math" + "testing" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" +) + +func TestNegativeSequenceTxSubmission(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + // First, bump the sequence to the maximum value -1 + op := txnbuild.BumpSequence{ + BumpTo: int64(math.MaxInt64) - 1, + } + itest.MustSubmitOperations(itest.MasterAccount(), master, &op) + + account := itest.MasterAccount() + seqnum, err := account.GetSequenceNumber() + tt.NoError(err) + tt.Equal(int64(math.MaxInt64)-1, seqnum) + + // Submit a simple payment + op2 := txnbuild.Payment{ + Destination: master.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + txResp := itest.MustSubmitOperations(account, master, &op2) + tt.Equal(master.Address(), txResp.Account) + + // The transaction should had bumped our sequence to the maximum possible value + seqnum, err = account.GetSequenceNumber() + tt.NoError(err) + tt.Equal(int64(math.MaxInt64), seqnum) + + // Using txnbuild to create another transaction should fail, since it would cause a sequence number overflow + txResp, err = itest.SubmitOperations(account, master, &op2) + tt.Error(err) + tt.Contains(err.Error(), "sequence cannot be increased, it already reached MaxInt64") + + // We can enforce a negative sequence without errors by setting IncrementSequenceNum=false + account = &txnbuild.SimpleAccount{ + AccountID: account.GetAccountID(), + Sequence: math.MinInt64, + } + txParams := txnbuild.TransactionParams{ + SourceAccount: account, + Operations: []txnbuild.Operation{&op2}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + IncrementSequenceNum: false, + } + tx, err := txnbuild.NewTransaction(txParams) + tt.NoError(err) + tx, err = tx.Sign(integration.StandaloneNetworkPassphrase, master) + tt.NoError(err) + txResp, err = itest.Client().SubmitTransaction(tx) + tt.Error(err) + clientErr, ok := err.(*horizonclient.Error) + tt.True(ok) + codes, err := clientErr.ResultCodes() + tt.NoError(err) + tt.Equal("tx_bad_seq", codes.TransactionCode) + +} diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go new file mode 100644 index 0000000000..3679b146e3 --- /dev/null +++ b/services/horizon/internal/integration/parameters_test.go @@ -0,0 +1,257 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package integration + +import ( + "io/ioutil" + "os" + "os/exec" + "path" + "strings" + "testing" + + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/test/integration" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +var defaultCaptiveCoreParameters = map[string]string{ + horizon.StellarCoreBinaryPathName: os.Getenv("CAPTIVE_CORE_BIN"), + horizon.StellarCoreURLFlagName: "", + horizon.StellarCoreDBURLFlagName: "", +} + +const ( + SIMPLE_CAPTIVE_CORE_TOML = ` + PEER_PORT=11725 + ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING=true + + UNSAFE_QUORUM=true + FAILURE_SAFETY=0 + + [[VALIDATORS]] + NAME="local_core" + HOME_DOMAIN="core.local" + PUBLIC_KEY="GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS" + ADDRESS="localhost" + QUALITY="MEDIUM"` +) + +func NewParameterTest(t *testing.T, params map[string]string) *integration.Test { + return NewParameterTestWithEnv(t, params, map[string]string{}) +} + +func NewParameterTestWithEnv(t *testing.T, params, envvars map[string]string) *integration.Test { + config := integration.Config{ + ProtocolVersion: 17, + SkipHorizonStart: true, + HorizonParameters: params, + HorizonEnvironment: envvars, + } + return integration.NewTest(t, config) +} + +func TestFatalScenarios(t *testing.T) { + suite.Run(t, new(FatalTestCase)) +} + +// Ensures that BUCKET_DIR_PATH is not an allowed value for Captive Core. +func (suite *FatalTestCase) TestBucketDirDisallowed() { + // This is a bit of a hacky workaround. + // + // In CI, we run our integration tests twice: once with Captive Core + // enabled, and once without. *These* tests only run with Captive Core + // configured properly (specifically, w/ the CAPTIVE_CORE_BIN envvar set). + if !integration.RunWithCaptiveCore { + suite.T().Skip() + } + + config := `BUCKET_DIR_PATH="/tmp" + ` + SIMPLE_CAPTIVE_CORE_TOML + + confName, _, cleanup := createCaptiveCoreConfig(config) + defer cleanup() + + test := NewParameterTest(suite.T(), map[string]string{ + horizon.CaptiveCoreConfigPathName: confName, + horizon.StellarCoreBinaryPathName: os.Getenv("CAPTIVE_CORE_BIN"), + }) + + suite.Exits(func() { test.StartHorizon() }) +} + +func (suite *FatalTestCase) TestEnvironmentPreserved() { + // Who tests the tests? This test. + // + // It ensures that the global OS environmental variables are preserved after + // running an integration test. + t := suite.T() + + // Note that we ALSO need to make sure we don't modify parent env state. + if value, isSet := os.LookupEnv("CAPTIVE_CORE_CONFIG_PATH"); isSet { + defer func() { + os.Setenv("CAPTIVE_CORE_CONFIG_PATH", value) + }() + } + + err := os.Setenv("CAPTIVE_CORE_CONFIG_PATH", "original value") + assert.NoError(t, err) + + confName, _, cleanup := createCaptiveCoreConfig(SIMPLE_CAPTIVE_CORE_TOML) + defer cleanup() + test := NewParameterTestWithEnv(t, map[string]string{}, map[string]string{ + "CAPTIVE_CORE_CONFIG_PATH": confName, + }) + + err = test.StartHorizon() + assert.NoError(t, err) + test.WaitForHorizon() + + envValue := os.Getenv("CAPTIVE_CORE_CONFIG_PATH") + assert.Equal(t, confName, envValue) + + test.Shutdown() + + envValue = os.Getenv("CAPTIVE_CORE_CONFIG_PATH") + assert.Equal(t, "original value", envValue) +} + +// Ensures that the filesystem ends up in the correct state with Captive Core. +func TestCaptiveCoreConfigFilesystemState(t *testing.T) { + if !integration.RunWithCaptiveCore { + t.Skip() // explained above + } + + confName, storagePath, cleanup := createCaptiveCoreConfig(SIMPLE_CAPTIVE_CORE_TOML) + defer cleanup() + + localParams := integration.MergeMaps(defaultCaptiveCoreParameters, map[string]string{ + "captive-core-storage-path": storagePath, + horizon.CaptiveCoreConfigPathName: confName, + }) + test := NewParameterTest(t, localParams) + + err := test.StartHorizon() + assert.NoError(t, err) + test.WaitForHorizon() + + t.Run("disk state", func(t *testing.T) { + validateCaptiveCoreDiskState(test, storagePath) + }) + + t.Run("no bucket dir", func(t *testing.T) { + validateNoBucketDirPath(test, storagePath) + }) +} + +func TestMaxAssetsForPathRequests(t *testing.T) { + t.Run("default", func(t *testing.T) { + test := NewParameterTest(t, map[string]string{}) + err := test.StartHorizon() + assert.NoError(t, err) + test.WaitForHorizon() + assert.Equal(t, test.Horizon().Config().MaxAssetsPerPathRequest, 15) + test.Shutdown() + }) + t.Run("set to 2", func(t *testing.T) { + test := NewParameterTest(t, map[string]string{"max-assets-per-path-request": "2"}) + err := test.StartHorizon() + assert.NoError(t, err) + test.WaitForHorizon() + assert.Equal(t, test.Horizon().Config().MaxAssetsPerPathRequest, 2) + test.Shutdown() + }) +} + +// Pattern taken from testify issue: +// https://github.com/stretchr/testify/issues/858#issuecomment-600491003 +// +// This lets us run test cases that are *expected* to fail from a fatal error. +// +// For our purposes, if you *want* `StartHorizon()` to fail, you should wrap it +// in a lambda and pass it to `suite.Exits(...)`. +type FatalTestCase struct { + suite.Suite +} + +func (t *FatalTestCase) Exits(subprocess func()) { + testName := t.T().Name() + if os.Getenv("ASSERT_EXISTS_"+testName) == "1" { + subprocess() + return + } + + cmd := exec.Command(os.Args[0], "-test.run="+testName) + cmd.Env = append(os.Environ(), "ASSERT_EXISTS_"+testName+"=1") + err := cmd.Run() + + t.T().Log("Result:", err) + if e, ok := err.(*exec.ExitError); ok && !e.Success() { + return + } + + t.Fail("expecting unsuccessful exit, got", err) +} + +// validateNoBucketDirPath ensures the Stellar Core auto-generated configuration +// file doesn't contain the BUCKET_DIR_PATH entry, which is forbidden when using +// Captive Core. +// +// Pass "rootDirectory" set to whatever it is you pass to +// "--captive-core-storage-path". +func validateNoBucketDirPath(itest *integration.Test, rootDir string) { + tt := assert.New(itest.CurrentTest()) + + coreConf := path.Join(rootDir, "captive-core", "stellar-core.conf") + tt.FileExists(coreConf) + + result, err := ioutil.ReadFile(coreConf) + tt.NoError(err) + + bucketPathSet := strings.Contains(string(result), "BUCKET_DIR_PATH") + tt.False(bucketPathSet) +} + +// validateCaptiveCoreDiskState ensures that running Captive Core creates a +// sensible directory structure. +// +// Pass "rootDirectory" set to whatever it is you pass to +// "--captive-core-storage-path". +func validateCaptiveCoreDiskState(itest *integration.Test, rootDir string) { + tt := assert.New(itest.CurrentTest()) + + storageDir := path.Join(rootDir, "captive-core") + coreConf := path.Join(storageDir, "stellar-core.conf") + + tt.DirExists(rootDir) + tt.DirExists(storageDir) + tt.FileExists(coreConf) +} + +// createCaptiveCoreConfig will create a temporary TOML config with the +// specified contents as well as a temporary storage directory. You should +// `defer` the returned function to clean these up when you're done. +func createCaptiveCoreConfig(contents string) (string, string, func()) { + tomlFile, err := ioutil.TempFile("", "captive-core-test-*.toml") + defer tomlFile.Close() + if err != nil { + panic(err) + } + + _, err = tomlFile.WriteString(contents) + if err != nil { + panic(err) + } + + storagePath, err := ioutil.TempDir("", "captive-core-test-*-storage") + if err != nil { + panic(err) + } + + filename := tomlFile.Name() + return filename, storagePath, func() { + os.Remove(filename) + os.RemoveAll(storagePath) + } +} diff --git a/services/horizon/internal/integration/sponsorship_test.go b/services/horizon/internal/integration/sponsorship_test.go new file mode 100644 index 0000000000..74a21dd385 --- /dev/null +++ b/services/horizon/internal/integration/sponsorship_test.go @@ -0,0 +1,778 @@ +package integration + +import ( + "encoding/base64" + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + + sdk "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" +) + +func TestSponsorships(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + client := itest.Client() + + /* Query helpers that can/should? probably be added to IntegrationTest. */ + + getOperationsByTx := func(txHash string) []operations.Operation { + response, err := client.Operations(sdk.OperationRequest{ForTransaction: txHash}) + tt.NoError(err) + return response.Embedded.Records + } + + getEffectsByOp := func(opId string) []effects.Effect { + response, err := client.Effects(sdk.EffectRequest{ForOperation: opId}) + tt.NoError(err) + return response.Embedded.Records + } + + getEffectsByTx := func(txId string) []effects.Effect { + response, err := client.Effects(sdk.EffectRequest{ForTransaction: txId}) + tt.NoError(err) + return response.Embedded.Records + } + + /* + * Each test has its own sponsor and sponsoree (or is it sponsee? + * :thinking:) so that we can do direct equality checks. + * + * Each sub-test follows a similar structure: + * - sponsor a particular operation + * - replace the sponsor with a new one + * - revoke the sponsorship + * + * Between each step, we validate /operations, /effects, etc. according to + * the expected behavior for that sponsorship. + */ + + // We will create the following operation structure: + // BeginSponsoringFutureReserves A + // CreateAccount A + // EndSponsoringFutureReserves (with A as a source) + t.Run("CreateAccount", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(2, "1000") + sponsor, sponsorPair := accounts[0], keys[0] + newAccountKeys := keypair.MustRandom() + newAccountID := newAccountKeys.Address() + + t.Logf("Testing sponsorship of CreateAccount operation") + ops := sponsorOperations(newAccountID, + &txnbuild.CreateAccount{ + Destination: newAccountID, + Amount: "100", + }) + + signers := []*keypair.Full{sponsorPair, newAccountKeys} + txResp, err := itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Ensure that the operations are in fact the droids we're looking for + opRecords := getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 3) + tt.True(opRecords[0].IsTransactionSuccessful()) + + startSponsoringOp := opRecords[0].(operations.BeginSponsoringFutureReserves) + actualCreateAccount := opRecords[1].(operations.CreateAccount) + endSponsoringOp := opRecords[2].(operations.EndSponsoringFutureReserves) + + tt.Equal(newAccountID, startSponsoringOp.SponsoredID) + tt.Equal(sponsorPair.Address(), actualCreateAccount.Sponsor) + tt.Equal(sponsorPair.Address(), endSponsoringOp.BeginSponsor) + + // Make sure that the sponsor is an (implicit) participant on the end + // sponsorship operation + response, err := client.Operations(sdk.OperationRequest{ForAccount: sponsorPair.Address()}) + tt.Condition(findOperationByID(endSponsoringOp.ID, response.Embedded.Records)) + t.Logf(" operations accurate") + + // Check that the num_sponsoring and num_sponsored fields are accurate + tt.EqualValues(2, itest.MustGetAccount(sponsorPair).NumSponsoring) + tt.EqualValues(2, itest.MustGetAccount(newAccountKeys).NumSponsored) + t.Logf(" accounts accurate") + + // Check effects of CreateAccount Operation + effectRecords := getEffectsByOp(actualCreateAccount.GetID()) + tt.Len(effectRecords, 4) + tt.Equal(sponsorPair.Address(), + effectRecords[3].(effects.AccountSponsorshipCreated).Sponsor) + t.Logf(" effects accurate") + + // Update sponsor + newSponsorPair, newSponsor := keys[1], accounts[1] + + t.Logf("Revoking & replacing sponsorship") + ops = []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SourceAccount: newSponsor.GetAccountID(), + SponsoredID: sponsorPair.Address(), + }, + &txnbuild.RevokeSponsorship{ + SourceAccount: sponsor.GetAccountID(), + SponsorshipType: txnbuild.RevokeSponsorshipTypeAccount, + Account: &newAccountID, + }, + &txnbuild.EndSponsoringFutureReserves{}, + } + + signers = []*keypair.Full{sponsorPair, newSponsorPair} + txResp, err = itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify operation details + response, err = client.Operations(sdk.OperationRequest{ + ForTransaction: txResp.Hash, + }) + tt.NoError(err) + opRecords = response.Embedded.Records + tt.Len(opRecords, 3) + tt.True(opRecords[1].IsTransactionSuccessful()) + + revokeOp := opRecords[1].(operations.RevokeSponsorship) + tt.Equal(newAccountID, *revokeOp.AccountID) + t.Logf(" operations accurate") + + // Check effects + effectRecords = getEffectsByOp(revokeOp.ID) + tt.Len(effectRecords, 1) + effect := effectRecords[0].(effects.AccountSponsorshipUpdated) + tt.Equal(sponsorPair.Address(), effect.FormerSponsor) + tt.Equal(newSponsorPair.Address(), effect.NewSponsor) + t.Logf(" effects accurate") + + // Revoke sponsorship + + t.Logf("Revoking sponsorship entirely") + op := &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeAccount, + Account: &newAccountID, + } + txResp = itest.MustSubmitOperations(newSponsor, newSponsorPair, op) + + // Verify operation details + opRecords = getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 1) + tt.True(opRecords[0].IsTransactionSuccessful()) + revokeOp = opRecords[0].(operations.RevokeSponsorship) + tt.Equal(newAccountID, *revokeOp.AccountID) + + // Make sure that the sponsoree is an (implicit) participant in the + // revocation operation + response, err = client.Operations(sdk.OperationRequest{ForAccount: newAccountID}) + tt.Condition(findOperationByID(revokeOp.ID, response.Embedded.Records)) + t.Logf(" operations accurate") + + // Check effects + effectRecords = getEffectsByOp(revokeOp.ID) + tt.Len(effectRecords, 1) + tt.IsType(effects.AccountSponsorshipRemoved{}, effectRecords[0]) + desponsorOp := effectRecords[0].(effects.AccountSponsorshipRemoved) + tt.Equal(newSponsorPair.Address(), desponsorOp.FormerSponsor) + t.Logf(" effects accurate") + }) + + // Let's add a sponsored data entry + // BeginSponsorship N (Source=sponsor) + // SetOptionsSigner (Source=N) + // EndSponsorship (Source=N) + t.Run("Signer", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(3, "1000") + sponsorPair, sponsor := keys[0], accounts[0] + newAccountPair, newAccount := keys[1], accounts[1] + signerKey := keypair.MustRandom().Address() // unspecified signer + + ops := sponsorOperations(newAccountPair.Address(), &txnbuild.SetOptions{ + SourceAccount: newAccount.GetAccountID(), + Signer: &txnbuild.Signer{ + Address: signerKey, + Weight: 1, + }, + }) + + signers := []*keypair.Full{sponsorPair, newAccountPair} + txResp, err := itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify that the signer was incorporated + signerAdded := func() bool { + signers := itest.MustGetAccount(newAccountPair).Signers + for _, signer := range signers { + if signer.Key == signerKey { + tt.Equal(sponsorPair.Address(), signer.Sponsor) + return true + } + } + return false + } + tt.Condition(signerAdded) + + // Check effects and details of the SetOptions operation + opRecords := getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 3) + setOptionsOp := opRecords[1].(operations.SetOptions) + tt.Equal(sponsorPair.Address(), setOptionsOp.Sponsor) + + effRecords := getEffectsByOp(setOptionsOp.GetID()) + tt.Len(effRecords, 2) + signerSponsorshipEffect := effRecords[1].(effects.SignerSponsorshipCreated) + tt.Equal(sponsorPair.Address(), signerSponsorshipEffect.Sponsor) + tt.Equal(newAccountPair.Address(), signerSponsorshipEffect.Account) + tt.Equal(signerKey, signerSponsorshipEffect.Signer) + + // Update sponsor + newSponsorPair, newSponsor := keys[2], accounts[2] + ops = []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SourceAccount: newSponsor.GetAccountID(), + SponsoredID: sponsorPair.Address(), + }, + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeSigner, + Signer: &txnbuild.SignerID{ + AccountID: newAccountPair.Address(), + SignerAddress: signerKey, + }, + }, + &txnbuild.EndSponsoringFutureReserves{}, + } + + signers = []*keypair.Full{sponsorPair, newSponsorPair} + txResp, err = itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify operation details + opRecords = getOperationsByTx(txResp.Hash) + tt.NoError(err) + tt.Len(opRecords, 3) + tt.True(opRecords[1].IsTransactionSuccessful()) + + revokeOp := opRecords[1].(operations.RevokeSponsorship) + tt.Equal(newAccountPair.Address(), *revokeOp.SignerAccountID) + tt.Equal(signerKey, *revokeOp.SignerKey) + + // Check effects + effRecords = getEffectsByOp(revokeOp.ID) + tt.Len(effRecords, 1) + effect := effRecords[0].(effects.SignerSponsorshipUpdated) + tt.Equal(sponsorPair.Address(), effect.FormerSponsor) + tt.Equal(newSponsorPair.Address(), effect.NewSponsor) + tt.Equal(signerKey, effect.Signer) + + // Revoke sponsorship + + revoke := txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeSigner, + Signer: &txnbuild.SignerID{ + AccountID: newAccountPair.Address(), + SignerAddress: signerKey, + }, + } + txResp = itest.MustSubmitOperations(newSponsor, newSponsorPair, &revoke) + + effRecords = getEffectsByTx(txResp.ID) + tt.Len(effRecords, 1) + sponsorshipRemoved := effRecords[0].(effects.SignerSponsorshipRemoved) + tt.Equal(newSponsorPair.Address(), sponsorshipRemoved.FormerSponsor) + tt.Equal(signerKey, sponsorshipRemoved.Signer) + }) + + // Let's add a sponsored preauth signer with a transaction: + // + // BeginSponsorship N (Source=sponsor) + // SetOptionsSigner preAuthHash (Source=N) + // EndSponsorship (Source=N) + t.Run("PreAuthSigner", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(2, "1000") + sponsorPair, sponsor := keys[0], accounts[0] + newAccountPair, newAccount := keys[1], accounts[1] + + // unspecified signer + randomSigner := keypair.MustRandom().Address() + + // Let's create a preauthorized transaction for the new account + // to add a signer + preAuthOp := &txnbuild.SetOptions{ + Signer: &txnbuild.Signer{ + Address: randomSigner, + Weight: 1, + }, + } + txParams := txnbuild.TransactionParams{ + SourceAccount: newAccount, + Operations: []txnbuild.Operation{preAuthOp}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + IncrementSequenceNum: true, + } + preaAuthTx, err := txnbuild.NewTransaction(txParams) + tt.NoError(err) + preAuthHash, err := preaAuthTx.Hash(itest.GetPassPhrase()) + tt.NoError(err) + preAuthTxB64, err := preaAuthTx.Base64() + tt.NoError(err) + + // Add a sponsored preauth signer with the above transaction. + preAuthSignerKey := xdr.SignerKey{ + Type: xdr.SignerKeyTypeSignerKeyTypePreAuthTx, + PreAuthTx: (*xdr.Uint256)(&preAuthHash), + } + ops := sponsorOperations(newAccountPair.Address(), + &txnbuild.SetOptions{ + SourceAccount: newAccount.GetAccountID(), + Signer: &txnbuild.Signer{ + Address: preAuthSignerKey.Address(), + Weight: 1, + }, + }) + + signers := []*keypair.Full{sponsorPair, newAccountPair} + txResp, err := itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify that the preauth signer was incorporated + preAuthSignerAdded := func() bool { + for _, signer := range itest.MustGetAccount(newAccountPair).Signers { + if preAuthSignerKey.Address() == signer.Key { + return true + } + } + return false + } + tt.Condition(preAuthSignerAdded) + + // Check effects and details of the SetOptions operation + opRecords := getOperationsByTx(txResp.Hash) + setOptionsOp := opRecords[1].(operations.SetOptions) + tt.Equal(sponsorPair.Address(), setOptionsOp.Sponsor) + + effRecords := getEffectsByOp(setOptionsOp.GetID()) + signerSponsorshipEffect := effRecords[1].(effects.SignerSponsorshipCreated) + tt.Equal(sponsorPair.Address(), signerSponsorshipEffect.Sponsor) + tt.Equal(preAuthSignerKey.Address(), signerSponsorshipEffect.Signer) + + // Submit the preauthorized transaction + var txResult xdr.TransactionResult + tt.NoError(err) + txResp, err = itest.Client().SubmitTransactionXDR(preAuthTxB64) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(txResp.ResultXdr, &txResult) + tt.NoError(err) + tt.Equal(xdr.TransactionResultCodeTxSuccess, txResult.Result.Code) + + // Verify that the new signer was incorporated and that the preauth signer was removed + preAuthSignerAdded = func() bool { + signers := itest.MustGetAccount(newAccountPair).Signers + if len(signers) != 2 { + return false + } + for _, signer := range signers { + if signer.Key == randomSigner { + return true + } + } + return false + } + tt.Condition(preAuthSignerAdded) + + // We don't check effects because we don't process transaction-level changes + // See https://github.com/stellar/go/pull/3050#discussion_r493651644 + }) + + // Let's add a sponsored data entry + // + // BeginSponsorship N (Source=sponsor) + // ManageData "SponsoredData"="SponsoredValue" (Source=N) + // EndSponsorship (Source=N) + t.Run("Data", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(3, "1000") + sponsorPair, sponsor := keys[0], accounts[0] + newAccountPair, newAccount := keys[1], accounts[1] + + ops := sponsorOperations(newAccountPair.Address(), + &txnbuild.ManageData{ + Name: "SponsoredData", + Value: []byte("SponsoredValue"), + SourceAccount: newAccount.GetAccountID(), + }) + + signers := []*keypair.Full{sponsorPair, newAccountPair} + txResp, err := itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify that the data was incorporated + dataAdded := func() bool { + data := itest.MustGetAccount(newAccountPair).Data + if value, ok := data["SponsoredData"]; ok { + decoded, e := base64.StdEncoding.DecodeString(value) + tt.NoError(e) + if string(decoded) == "SponsoredValue" { + return true + } + } + return false + } + tt.Condition(dataAdded) + + // Check effects and details of the ManageData operation + opRecords := getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 3) + manageDataOp := opRecords[1].(operations.ManageData) + tt.Equal(sponsorPair.Address(), manageDataOp.Sponsor) + + effRecords := getEffectsByOp(manageDataOp.GetID()) + tt.Len(effRecords, 2) + dataSponsorshipEffect := effRecords[1].(effects.DataSponsorshipCreated) + tt.Equal(sponsorPair.Address(), dataSponsorshipEffect.Sponsor) + tt.Equal(newAccountPair.Address(), dataSponsorshipEffect.Account) + tt.Equal("SponsoredData", dataSponsorshipEffect.DataName) + + // Update sponsor + + newSponsorPair, newSponsor := keys[2], accounts[2] + ops = []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SourceAccount: newSponsor.GetAccountID(), + SponsoredID: sponsorPair.Address(), + }, + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeData, + Data: &txnbuild.DataID{ + Account: newAccountPair.Address(), + DataName: "SponsoredData", + }, + }, + &txnbuild.EndSponsoringFutureReserves{}, + } + signers = []*keypair.Full{sponsorPair, newSponsorPair} + txResp, err = itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify operation details + opRecords = getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 3) + tt.True(opRecords[1].IsTransactionSuccessful()) + + revokeOp := opRecords[1].(operations.RevokeSponsorship) + tt.Equal(newAccountPair.Address(), *revokeOp.DataAccountID) + tt.Equal("SponsoredData", *revokeOp.DataName) + + // Check effects + effRecords = getEffectsByOp(revokeOp.ID) + tt.Len(effRecords, 1) + effect := effRecords[0].(effects.DataSponsorshipUpdated) + tt.Equal(sponsorPair.Address(), effect.FormerSponsor) + tt.Equal(newSponsorPair.Address(), effect.NewSponsor) + tt.Equal("SponsoredData", effect.DataName) + + // Revoke sponsorship + + revoke := txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeData, + Data: &txnbuild.DataID{ + Account: newAccountPair.Address(), + DataName: "SponsoredData", + }, + } + txResp = itest.MustSubmitOperations(newSponsor, newSponsorPair, &revoke) + + effRecords = getEffectsByTx(txResp.ID) + tt.Len(effRecords, 1) + sponsorshipRemoved := effRecords[0].(effects.DataSponsorshipRemoved) + tt.Equal(newSponsorPair.Address(), sponsorshipRemoved.FormerSponsor) + tt.Equal("SponsoredData", sponsorshipRemoved.DataName) + }) + // Let's add a sponsored trustline and offer + // + // BeginSponsorship N (Source=sponsor) + // Change Trust (ABC, sponsor) (Source=N) + // ManageSellOffer Buying (ABC, sponsor) (Source=N) + // EndSponsorship (Source=N) + t.Run("TrustlineAndOffer", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(3, "1000") + sponsorPair, sponsor := keys[0], accounts[0] + newAccountPair, newAccount := keys[1], accounts[1] + + asset := txnbuild.CreditAsset{Code: "ABCD", Issuer: sponsorPair.Address()} + canonicalAsset := fmt.Sprintf("%s:%s", asset.Code, asset.Issuer) + + ops := sponsorOperations(newAccountPair.Address(), + &txnbuild.ChangeTrust{ + SourceAccount: newAccount.GetAccountID(), + Line: asset.MustToChangeTrustAsset(), + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ManageSellOffer{ + SourceAccount: newAccount.GetAccountID(), + Selling: txnbuild.NativeAsset{}, + Buying: asset, + Amount: "3", + Price: xdr.Price{1, 1}, + }) + + signers := []*keypair.Full{sponsorPair, newAccountPair} + txResp, err := itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify that the offer was incorporated correctly + trustlineAdded := func() bool { + for _, balance := range itest.MustGetAccount(newAccountPair).Balances { + if balance.Issuer == sponsorPair.Address() { + tt.Equal(asset.Code, balance.Code) + tt.Equal(sponsorPair.Address(), balance.Sponsor) + return true + } + } + return false + } + tt.Condition(trustlineAdded) + + // Check the details of the ManageSellOffer operation + // (there are no effects, which is intentional) + opRecords := getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 4) + changeTrust := opRecords[1].(operations.ChangeTrust) + tt.Equal(sponsorPair.Address(), changeTrust.Sponsor) + + // Verify that the offer was incorporated correctly + var offer protocol.Offer + offerAdded := func() bool { + offers, e := client.Offers(sdk.OfferRequest{ + ForAccount: newAccountPair.Address(), + }) + tt.NoError(e) + if tt.Len(offers.Embedded.Records, 1) { + offer = offers.Embedded.Records[0] + tt.Equal(sponsorPair.Address(), offer.Buying.Issuer) + tt.Equal(asset.Code, offer.Buying.Code) + tt.Equal(sponsorPair.Address(), offer.Sponsor) + return true + } + return false + } + tt.Condition(offerAdded) + + // Check the details of the ManageSellOffer operation + // (there are no effects, which is intentional) + manageOffer := opRecords[2].(operations.ManageSellOffer) + tt.Equal(sponsorPair.Address(), manageOffer.Sponsor) + + // Update sponsor + + newSponsorPair, newSponsor := keys[2], accounts[2] + + ops = []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SourceAccount: newSponsor.GetAccountID(), + SponsoredID: sponsorPair.Address(), + }, + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeOffer, + Offer: &txnbuild.OfferID{ + SellerAccountAddress: offer.Seller, + OfferID: offer.ID, + }, + }, + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeTrustLine, + TrustLine: &txnbuild.TrustLineID{ + Account: newAccountPair.Address(), + Asset: asset.MustToTrustLineAsset(), + }, + }, + &txnbuild.EndSponsoringFutureReserves{}, + } + signers = []*keypair.Full{sponsorPair, newSponsorPair} + txResp, err = itest.SubmitMultiSigOperations(sponsor, signers, ops...) + itest.LogFailedTx(txResp, err) + + // Verify operation details + opRecords = getOperationsByTx(txResp.Hash) + tt.Len(opRecords, 4) + + tt.True(opRecords[1].IsTransactionSuccessful()) + revokeOp := opRecords[1].(operations.RevokeSponsorship) + tt.Equal(offer.ID, *revokeOp.OfferID) + + tt.True(opRecords[2].IsTransactionSuccessful()) + revokeOp = opRecords[2].(operations.RevokeSponsorship) + tt.Equal(newAccountPair.Address(), *revokeOp.TrustlineAccountID) + tt.Equal("ABCD:"+sponsorPair.Address(), *revokeOp.TrustlineAsset) + + // Check effects + effRecords := getEffectsByOp(revokeOp.ID) + tt.Len(effRecords, 1) + effect := effRecords[0].(effects.TrustlineSponsorshipUpdated) + tt.Equal("credit_alphanum4", effect.Type) + tt.Equal(sponsorPair.Address(), effect.FormerSponsor) + tt.Equal(newSponsorPair.Address(), effect.NewSponsor) + + // Revoke sponsorship + ops = []txnbuild.Operation{ + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeOffer, + Offer: &txnbuild.OfferID{ + SellerAccountAddress: offer.Seller, + OfferID: offer.ID, + }, + }, + &txnbuild.RevokeSponsorship{ + SponsorshipType: txnbuild.RevokeSponsorshipTypeTrustLine, + TrustLine: &txnbuild.TrustLineID{ + Account: newAccountPair.Address(), + Asset: asset.MustToTrustLineAsset(), + }, + }, + } + txResp = itest.MustSubmitOperations(newSponsor, newSponsorPair, ops...) + + // There are intentionally no effects when revoking an Offer + effRecords = getEffectsByTx(txResp.ID) + tt.Len(effRecords, 1) + sponsorshipRemoved := effRecords[0].(effects.TrustlineSponsorshipRemoved) + tt.Equal("credit_alphanum4", sponsorshipRemoved.Type) + tt.Equal(newSponsorPair.Address(), sponsorshipRemoved.FormerSponsor) + tt.Equal(canonicalAsset, sponsorshipRemoved.Asset) + }) + + // + // Confirms the operations, effects, and functionality of combining the + // CAP-23 and CAP-33 features together as part of Protocol 14. + // + t.Run("ClaimableBalance", func(t *testing.T) { + keys, accounts := itest.CreateAccounts(2, "50") + sponsorPair, sponsor := keys[0], accounts[0] + sponsoreePair, sponsoree := keys[1], accounts[1] + + ops := []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SourceAccount: sponsor.GetAccountID(), + SponsoredID: sponsoreePair.Address(), + }, + &txnbuild.CreateClaimableBalance{ + SourceAccount: sponsoree.GetAccountID(), + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(sponsorPair.Address(), nil), + txnbuild.NewClaimant(sponsoreePair.Address(), nil), + }, + Amount: "25", + Asset: txnbuild.NativeAsset{}, + }, + &txnbuild.EndSponsoringFutureReserves{}, + } + + txResp, err := itest.SubmitMultiSigOperations(sponsoree, + []*keypair.Full{sponsoreePair, sponsorPair}, ops...) + itest.LogFailedTx(txResp, err) + + // Establish a baseline for the master account + masterBalance := getAccountXLM(itest, sponsorPair) + + // Check the global /claimable_balances list for success. + balances, err := client.ClaimableBalances(sdk.ClaimableBalanceRequest{}) + tt.NoError(err) + + claims := balances.Embedded.Records + tt.Len(claims, 1) + balance := claims[0] + tt.Equal(sponsorPair.Address(), balance.Sponsor) + + // Claim the CB and validate balances: + // - sponsoree should go down for fulfilling the CB + // - master should go up for claiming the CB + txResp, err = itest.SubmitOperations(sponsor, sponsorPair, + &txnbuild.ClaimClaimableBalance{BalanceID: claims[0].BalanceID}) + itest.LogFailedTx(txResp, err) + + tt.Lessf(getAccountXLM(itest, sponsoreePair), float64(25), "sponsoree balance didn't decrease") + tt.Greaterf(getAccountXLM(itest, sponsorPair), masterBalance, "master balance didn't increase") + + // Check that operations populate. + expectedOperations := map[string]bool{ + operations.TypeNames[xdr.OperationTypeBeginSponsoringFutureReserves]: false, + operations.TypeNames[xdr.OperationTypeCreateClaimableBalance]: false, + operations.TypeNames[xdr.OperationTypeEndSponsoringFutureReserves]: false, + } + + opsPage, err := client.Operations(sdk.OperationRequest{Order: "desc", Limit: 5}) + for _, op := range opsPage.Embedded.Records { + opType := op.GetType() + if _, ok := expectedOperations[opType]; ok { + expectedOperations[opType] = true + t.Logf(" operation %s found", opType) + } + } + + for expectedType, exists := range expectedOperations { + tt.Truef(exists, "operation %s not found", expectedType) + } + + // Check that effects populate. + expectedEffects := map[string][]uint{ + effects.EffectTypeNames[effects.EffectClaimableBalanceSponsorshipCreated]: {0, 1}, + effects.EffectTypeNames[effects.EffectClaimableBalanceCreated]: {0, 1}, + effects.EffectTypeNames[effects.EffectClaimableBalanceClaimantCreated]: {0, 2}, + effects.EffectTypeNames[effects.EffectClaimableBalanceSponsorshipRemoved]: {0, 1}, + effects.EffectTypeNames[effects.EffectClaimableBalanceClaimed]: {0, 1}, + } + + effectsPage, err := client.Effects(sdk.EffectRequest{Order: "desc", Limit: 100}) + for _, effect := range effectsPage.Embedded.Records { + effectType := effect.GetType() + if _, ok := expectedEffects[effectType]; ok { + expectedEffects[effectType][0] += 1 + t.Logf(" effect %s found", effectType) + } + } + + for expectedType, counts := range expectedEffects { + actual, needed := counts[0], counts[1] + tt.Equalf(needed, actual, "effect %s not found enough", expectedType) + } + }) +} + +// Sandwiches a set of operations between a Begin/End reserve sponsorship. +func sponsorOperations(account string, ops ...txnbuild.Operation) []txnbuild.Operation { + return append(append( + []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{SponsoredID: account}, + }, + ops...), + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: account, + }, + ) +} + +// Returns a function that will find `needle` in `haystack` by ID. +// Designed to be usable by assert.Condition +func findOperationByID(needle string, haystack []operations.Operation) func() bool { + return func() bool { + for _, o := range haystack { + if o.GetID() == needle { + return true + } + } + return false + } +} + +// Retrieves the XLM balance for an account. +func getAccountXLM(i *integration.Test, account *keypair.Full) float64 { + details := i.MustGetAccount(account) + balance, err := strconv.ParseFloat(details.Balances[0].Balance, 64) + if err != nil { + panic(err) + } + return balance +} diff --git a/services/horizon/internal/integration/state_verifier_test.go b/services/horizon/internal/integration/state_verifier_test.go new file mode 100644 index 0000000000..012257db95 --- /dev/null +++ b/services/horizon/internal/integration/state_verifier_test.go @@ -0,0 +1,169 @@ +package integration + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestStateVerifier(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + + sponsored := keypair.MustRandom() + sponsoredSource := &txnbuild.SimpleAccount{ + AccountID: sponsored.Address(), + Sequence: 1, + } + signer1 := keypair.MustParseAddress("GAB3CVX6C2KCDZUUS4FIMP5Z2IUDTMKMRKADOFOCNOB437VCPS5DRG3Z") + signer2 := keypair.MustParseAddress("GBUERII77FW6Z7SPOIMFQQT63PMUQRWTIAARR3QVSXTRULNQSUQVIYRC") + signer3 := keypair.MustParseAddress("GCNLAKGPBL4H6CQRITHSDTJZ6RLTP3WY2OJZJN4EWKRSNM2A23CV6VD3") + + // The operations below create a sponsorship sandwich, sponsoring an + // account, its trustlines, offers, data, and claimable balances. + // Then 3 signers are created with the middle one sponsored. + master := itest.Master() + ops := []txnbuild.Operation{ + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: sponsored.Address(), + }, + &txnbuild.CreateAccount{ + Destination: sponsored.Address(), + Amount: "100", + }, + &txnbuild.ChangeTrust{ + SourceAccount: sponsoredSource.AccountID, + Line: txnbuild.CreditAsset{"ABCD", master.Address()}.MustToChangeTrustAsset(), + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ChangeTrust{ + Line: txnbuild.LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: txnbuild.LiquidityPoolParameters{ + AssetA: txnbuild.NativeAsset{}, + AssetB: txnbuild.CreditAsset{ + Code: "ABCD", + Issuer: master.Address(), + }, + Fee: 30, + }, + }, + Limit: txnbuild.MaxTrustlineLimit, + }, + &txnbuild.ManageSellOffer{ + SourceAccount: sponsoredSource.AccountID, + Selling: txnbuild.NativeAsset{}, + Buying: txnbuild.CreditAsset{"ABCD", master.Address()}, + Amount: "3", + Price: xdr.Price{1, 1}, + }, + &txnbuild.ManageData{ + SourceAccount: sponsoredSource.AccountID, + Name: "test", + Value: []byte("test"), + }, + &txnbuild.CreateClaimableBalance{ + SourceAccount: sponsoredSource.AccountID, + Amount: "2", + Asset: txnbuild.NativeAsset{}, + Destinations: []txnbuild.Claimant{ + txnbuild.NewClaimant(keypair.MustRandom().Address(), nil), + }, + }, + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: sponsoredSource.AccountID, + }, + &txnbuild.SetOptions{ + SourceAccount: sponsoredSource.AccountID, + Signer: &txnbuild.Signer{ + Address: signer1.Address(), + Weight: 3, + }, + }, + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: sponsored.Address(), + }, + &txnbuild.SetOptions{ + SourceAccount: sponsoredSource.AccountID, + Signer: &txnbuild.Signer{ + Address: signer2.Address(), + Weight: 3, + }, + }, + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: sponsoredSource.AccountID, + }, + &txnbuild.SetOptions{ + SourceAccount: sponsoredSource.AccountID, + Signer: &txnbuild.Signer{ + Address: signer3.Address(), + Weight: 3, + }, + }, + } + txResp, err := itest.SubmitMultiSigOperations(itest.MasterAccount(), []*keypair.Full{master, sponsored}, ops...) + assert.NoError(t, err) + assert.True(t, txResp.Successful) + + verified := waitForStateVerifications(itest, 1) + if !verified { + t.Fatal("State verification not run...") + } + + // Trigger state rebuild to check if ingesting from history archive works + session := itest.Horizon().HistoryQ().Clone() + q := &history.Q{session} + err = q.Begin() + assert.NoError(t, err) + _, err = q.GetLastLedgerIngest(context.Background()) + assert.NoError(t, err) + err = q.UpdateIngestVersion(context.Background(), 0) + assert.NoError(t, err) + err = q.Commit() + assert.NoError(t, err) + + verified = waitForStateVerifications(itest, 2) + if !verified { + t.Fatal("State verification not run...") + } +} + +func waitForStateVerifications(itest *integration.Test, count int) bool { + t := itest.CurrentTest() + // Check metrics until state verification run + for i := 0; i < 120; i++ { + t.Logf("Checking metrics (%d attempt)\n", i) + res, err := http.Get(itest.MetricsURL()) + assert.NoError(t, err) + + metricsBytes, err := ioutil.ReadAll(res.Body) + res.Body.Close() + assert.NoError(t, err) + metrics := string(metricsBytes) + + stateInvalid := strings.Contains(metrics, "horizon_ingest_state_invalid 1") + assert.False(t, stateInvalid, "State is invalid!") + + notVerifiedYet := strings.Contains( + metrics, + fmt.Sprintf("horizon_ingest_state_verify_duration_seconds_count %d", count-1), + ) + if notVerifiedYet { + time.Sleep(time.Second) + continue + } + + return true + } + + return false +} diff --git a/services/horizon/internal/integration/trade_aggregations_test.go b/services/horizon/internal/integration/trade_aggregations_test.go new file mode 100644 index 0000000000..8f67f85815 --- /dev/null +++ b/services/horizon/internal/integration/trade_aggregations_test.go @@ -0,0 +1,267 @@ +package integration + +import ( + "context" + "testing" + "time" + + "github.com/stellar/go/services/horizon/internal/db2" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test/integration" + strtime "github.com/stellar/go/support/time" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + + "github.com/guregu/null" +) + +func TestTradeAggregations(t *testing.T) { + itest := integration.NewTest(t, integration.Config{}) + ctx := context.Background() + historyQ := itest.Horizon().HistoryQ() + + // Insert some trades + now := strtime.Now().RoundDown(60_000) + base, err := xdr.BuildAsset("credit_alphanum4", "GDUKMGUGDZQK6YHYA5Z6AY2G4XDSZPSZ3SW5UN3ARVMO6QSRDWP5YLEX", "EUR") + counter, err := xdr.BuildAsset("credit_alphanum4", "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "USD") + assert.NoError(t, err) + + assets, err := historyQ.CreateAssets(ctx, []xdr.Asset{ + base, + counter, + }, 1000) + assert.NoError(t, err) + assert.Len(t, assets, 2) + baseAssetId := assets[base.String()].ID + counterAssetId := assets[counter.String()].ID + + accounts, err := historyQ.CreateAccounts(ctx, []string{ + itest.Master().Address(), + }, 1000) + assert.NoError(t, err) + assert.Len(t, accounts, 1) + + scenarios := []struct { + name string + trades []history.InsertTrade + resolution int64 + offset int64 + pq db2.PageQuery + startTime strtime.Millis + endTime strtime.Millis + expected []history.TradeAggregation + }{ + { + name: "no trades", + trades: []history.InsertTrade{}, + resolution: 60_000, + pq: db2.PageQuery{Limit: 100}, + expected: []history.TradeAggregation{}, + }, + { + name: "one trade", + trades: []history.InsertTrade{ + { + HistoryOperationID: 0, + Order: 1, + LedgerCloseTime: now.ToTime().Add(5 * time.Second), + BaseAccountID: null.IntFrom(accounts[itest.Master().Address()]), + CounterAccountID: null.IntFrom(accounts[itest.Master().Address()]), + BaseAssetID: baseAssetId, + BaseAmount: int64(4_263_291_501), + BaseOfferID: null.IntFrom(int64(100)), + BaseIsSeller: true, + CounterAmount: int64(100), + CounterAssetID: counterAssetId, + PriceN: 23456, + PriceD: 10000, + Type: history.OrderbookTradeType, + }, + }, + resolution: 60_000, + pq: db2.PageQuery{Limit: 100}, + expected: []history.TradeAggregation{ + { + Timestamp: now.ToInt64(), + TradeCount: 1, + BaseVolume: "4263291501", + CounterVolume: "100", + Average: float64(100) / 4_263_291_501, + HighN: 23456, + HighD: 10000, + LowN: 23456, + LowD: 10000, + OpenN: 23456, + OpenD: 10000, + CloseN: 23456, + CloseD: 10000, + }, + }, + }, + { + name: "two trades", + trades: []history.InsertTrade{ + { + HistoryOperationID: 0, + Order: 0, + LedgerCloseTime: now.ToTime().Add(5 * time.Second), + BaseAccountID: null.IntFrom(accounts[itest.Master().Address()]), + CounterAccountID: null.IntFrom(accounts[itest.Master().Address()]), + BaseAssetID: baseAssetId, + BaseAmount: int64(4_263_291_501), + BaseOfferID: null.IntFrom(int64(200)), + BaseIsSeller: true, + CounterAmount: int64(100), + CounterAssetID: counterAssetId, + PriceN: 23456, + PriceD: 10000, + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: 0, + Order: 1, + LedgerCloseTime: now.ToTime().Add(5 * time.Second), + BaseAccountID: null.IntFrom(accounts[itest.Master().Address()]), + CounterAccountID: null.IntFrom(accounts[itest.Master().Address()]), + BaseAssetID: baseAssetId, + BaseAmount: int64(4_263_291_501), + BaseOfferID: null.IntFrom(int64(300)), + BaseIsSeller: true, + CounterAmount: int64(1000), + CounterAssetID: counterAssetId, + PriceN: 13456, + PriceD: 10000, + Type: history.OrderbookTradeType, + }, + }, + resolution: 60_000, + pq: db2.PageQuery{Limit: 100}, + expected: []history.TradeAggregation{ + { + Timestamp: now.ToInt64(), + TradeCount: 2, + BaseVolume: "8526583002", + CounterVolume: "1100", + Average: float64(1100) / 8_526_583_002, + HighN: 23456, + HighD: 10000, + LowN: 13456, + LowD: 10000, + OpenN: 23456, + OpenD: 10000, + CloseN: 13456, + CloseD: 10000, + }, + }, + }, + { + name: "1d resolution rollups", + trades: []history.InsertTrade{ + { + HistoryOperationID: 0, + Order: 0, + LedgerCloseTime: now.ToTime().Add(5 * time.Second), + BaseAccountID: null.IntFrom(accounts[itest.Master().Address()]), + CounterAccountID: null.IntFrom(accounts[itest.Master().Address()]), + BaseAssetID: baseAssetId, + BaseAmount: int64(4_263_301_501), + BaseOfferID: null.IntFrom(int64(400)), + BaseIsSeller: true, + CounterAmount: int64(100), + CounterAssetID: counterAssetId, + PriceN: 23456, + PriceD: 10000, + Type: history.OrderbookTradeType, + }, + { + HistoryOperationID: 0, + Order: 1, + LedgerCloseTime: now.ToTime().Add(5 * time.Second), + BaseAccountID: null.IntFrom(accounts[itest.Master().Address()]), + CounterAccountID: null.IntFrom(accounts[itest.Master().Address()]), + BaseAssetID: baseAssetId, + BaseAmount: int64(4_263_291_501), + BaseOfferID: null.IntFrom(int64(500)), + BaseIsSeller: true, + CounterAmount: int64(1000), + CounterAssetID: counterAssetId, + PriceN: 13456, + PriceD: 10000, + Type: history.OrderbookTradeType, + }, + }, + resolution: 86_400_000, + pq: db2.PageQuery{Limit: 100}, + expected: []history.TradeAggregation{ + { + Timestamp: now.RoundDown(86_400_000).ToInt64(), + TradeCount: 2, + BaseVolume: "8526593002", + CounterVolume: "1100", + Average: float64(1100) / 8_526_593_002, + HighN: 23456, + HighD: 10000, + LowN: 13456, + LowD: 10000, + OpenN: 23456, + OpenD: 10000, + CloseN: 13456, + CloseD: 10000, + }, + }, + }, + } + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // Run each in a txn so the ids don't conflict. + assert.NoError(t, historyQ.Begin()) + defer func() { + assert.NoError(t, historyQ.Rollback()) + }() + + batch := historyQ.NewTradeBatchInsertBuilder(1000) + batch.Add(ctx, scenario.trades...) + assert.NoError(t, batch.Exec(ctx)) + + // Rebuild the aggregates. + for _, trade := range scenario.trades { + ledgerCloseTime := strtime.MillisFromTime(trade.LedgerCloseTime) + assert.NoError(t, historyQ.RebuildTradeAggregationTimes(ctx, ledgerCloseTime, ledgerCloseTime)) + } + + // Check the result is what we expect + query, err := historyQ.GetTradeAggregationsQ( + baseAssetId, + counterAssetId, + scenario.resolution, + scenario.offset, + scenario.pq, + ) + assert.NoError(t, err) + + if !scenario.startTime.IsNil() { + query, err = query.WithStartTime(scenario.startTime) + assert.NoError(t, err) + } + if !scenario.endTime.IsNil() { + query, err = query.WithStartTime(scenario.endTime) + assert.NoError(t, err) + } + + sql, args, err := query.GetSql().ToSql() + assert.NoError(t, err) + t.Logf( + "Querying sql: %q, args: %v", + sql, args, + ) + + var records []history.TradeAggregation + assert.NoError(t, historyQ.Select(ctx, &records, query.GetSql())) + + assert.Len(t, records, len(scenario.expected)) + for i, elem := range records { + assert.Equal(t, scenario.expected[i], elem) + } + }) + } +} diff --git a/services/horizon/internal/integration/txsub_test.go b/services/horizon/internal/integration/txsub_test.go new file mode 100644 index 0000000000..eb3723db4d --- /dev/null +++ b/services/horizon/internal/integration/txsub_test.go @@ -0,0 +1,69 @@ +package integration + +import ( + "strconv" + "sync" + "testing" + + "github.com/stellar/go/services/horizon/internal/test/integration" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" +) + +func TestTxsub(t *testing.T) { + tt := assert.New(t) + itest := integration.NewTest(t, integration.Config{}) + master := itest.Master() + + // Sanity check: create 20 accounts and submit 2 txs from each of them as + // a source at the same time. Then check if the results are correct. + t.Run("Sanity", func(t *testing.T) { + testAccounts := 20 + subsPerAccont := 2 + keys, accounts := itest.CreateAccounts(testAccounts, "1000") + + var wg sync.WaitGroup + + for i := 0; i < testAccounts; i++ { + for j := 0; j < subsPerAccont; j++ { + wg.Add(1) + + seq, err := accounts[i].GetSequenceNumber() + assert.NoError(t, err) + + var account txnbuild.SimpleAccount + if j == 0 { + account = txnbuild.SimpleAccount{ + AccountID: keys[i].Address(), + Sequence: seq, + } + } else { + account = txnbuild.SimpleAccount{ + AccountID: keys[i].Address(), + Sequence: seq + 1, + } + } + + go func(i int, j int, account txnbuild.SimpleAccount) { + defer wg.Done() + + op := txnbuild.Payment{ + Destination: master.Address(), + Amount: "10", + Asset: txnbuild.NativeAsset{}, + } + + txResp := itest.MustSubmitOperations(&account, keys[i], &op) + + tt.Equal(accounts[i].GetAccountID(), txResp.Account) + seq, err := account.GetSequenceNumber() + assert.NoError(t, err) + tt.Equal(strconv.FormatInt(seq, 10), txResp.AccountSequence) + t.Logf("%d/%d done", i, j) + }(i, j, account) + } + } + + wg.Wait() + }) +} diff --git a/services/horizon/internal/ledger/ledger_source.go b/services/horizon/internal/ledger/ledger_source.go new file mode 100644 index 0000000000..cf5f9efcbe --- /dev/null +++ b/services/horizon/internal/ledger/ledger_source.go @@ -0,0 +1,128 @@ +package ledger + +import ( + "sync" + "time" +) + +// Source exposes two helpers methods to help you find out the current +// ledger and yield every time there is a new ledger. Call `Close` when +// source is no longer used. +type Source interface { + CurrentLedger() uint32 + NextLedger(currentSequence uint32) chan uint32 + Close() +} + +// HistoryDBSource utility struct to pass the SSE update frequency and a +// function to get the current ledger state. +type HistoryDBSource struct { + updateFrequency time.Duration + state *State + + closedLock sync.Mutex + closed bool +} + +// NewHistoryDBSource constructs a new instance of HistoryDBSource +func NewHistoryDBSource(updateFrequency time.Duration, state *State) *HistoryDBSource { + return &HistoryDBSource{ + updateFrequency: updateFrequency, + state: state, + closedLock: sync.Mutex{}, + } +} + +// CurrentLedger returns the current ledger. +func (source *HistoryDBSource) CurrentLedger() uint32 { + return source.state.CurrentStatus().ExpHistoryLatest +} + +// NextLedger returns a channel which yields every time there is a new ledger with a sequence number larger than currentSequence. +func (source *HistoryDBSource) NextLedger(currentSequence uint32) chan uint32 { + // Make sure this is buffered channel of size 1. Otherwise, the go routine below + // will never return if `newLedgers` channel is not read. From Effective Go: + // > If the channel is unbuffered, the sender blocks until the receiver has received the value. + newLedgers := make(chan uint32, 1) + go func() { + for { + if source.updateFrequency > 0 { + time.Sleep(source.updateFrequency) + } + + source.closedLock.Lock() + closed := source.closed + source.closedLock.Unlock() + if closed { + return + } + + currentLedgerState := source.state.CurrentStatus() + if currentLedgerState.ExpHistoryLatest > currentSequence { + newLedgers <- currentLedgerState.ExpHistoryLatest + return + } + } + }() + + return newLedgers +} + +// Close closes the internal go routines. +func (source *HistoryDBSource) Close() { + source.closedLock.Lock() + defer source.closedLock.Unlock() + source.closed = true +} + +// TestingSource is helper struct which implements the LedgerSource +// interface. +type TestingSource struct { + currentLedger uint32 + newLedgers chan uint32 + lock *sync.RWMutex +} + +// NewTestingSource returns a TestingSource. +func NewTestingSource(currentLedger uint32) *TestingSource { + return &TestingSource{ + currentLedger: currentLedger, + newLedgers: make(chan uint32), + lock: &sync.RWMutex{}, + } +} + +// CurrentLedger returns the current ledger. +func (source *TestingSource) CurrentLedger() uint32 { + source.lock.RLock() + defer source.lock.RUnlock() + return source.currentLedger +} + +// AddLedger adds a new sequence to the newLedgers channel. AddLedger() +// will block until the new sequence is read +func (source *TestingSource) AddLedger(nextSequence uint32) { + source.newLedgers <- nextSequence +} + +// NextLedger returns a channel which yields every time there is a new ledger. +func (source *TestingSource) NextLedger(currentSequence uint32) chan uint32 { + response := make(chan uint32, 1) + + go func() { + for { + nextLedger := <-source.newLedgers + if nextLedger > source.currentLedger { + source.lock.Lock() + defer source.lock.Unlock() + source.currentLedger = nextLedger + response <- nextLedger + return + } + } + }() + + return response +} + +func (source *TestingSource) Close() {} diff --git a/services/horizon/internal/ledger/ledger_source_test.go b/services/horizon/internal/ledger/ledger_source_test.go new file mode 100644 index 0000000000..f7eedaa1df --- /dev/null +++ b/services/horizon/internal/ledger/ledger_source_test.go @@ -0,0 +1,50 @@ +package ledger + +import ( + "sync" + "testing" +) + +func Test_HistoryDBLedgerSourceCurrentLedger(t *testing.T) { + state := &State{ + RWMutex: sync.RWMutex{}, + current: Status{ + HorizonStatus: HorizonStatus{ + ExpHistoryLatest: 3, + }, + }, + } + + ledgerSource := HistoryDBSource{ + updateFrequency: 0, + state: state, + } + + currentLedger := ledgerSource.CurrentLedger() + if currentLedger != 3 { + t.Errorf("CurrentLedger = %d, want 3", currentLedger) + } +} + +func Test_HistoryDBLedgerSourceNextLedger(t *testing.T) { + state := &State{ + RWMutex: sync.RWMutex{}, + current: Status{ + HorizonStatus: HorizonStatus{ + ExpHistoryLatest: 3, + }, + }, + } + + ledgerSource := HistoryDBSource{ + updateFrequency: 0, + state: state, + } + + ledgerChan := ledgerSource.NextLedger(0) + + nextLedger := <-ledgerChan + if nextLedger != 3 { + t.Errorf("NextLedger = %d, want 3", nextLedger) + } +} diff --git a/services/horizon/internal/ledger/main.go b/services/horizon/internal/ledger/main.go new file mode 100644 index 0000000000..1d17e09d67 --- /dev/null +++ b/services/horizon/internal/ledger/main.go @@ -0,0 +1,115 @@ +// Package ledger provides useful utilities concerning ledgers within stellar, +// specifically as a central location to store a cached snapshot of the state of +// both horizon's and stellar-core's views of the ledger. This package is +// intended to be at the lowest levels of horizon's dependency tree, please keep +// it free of dependencies to other horizon packages. +package ledger + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// Status represents a snapshot of both horizon's and stellar-core's view of the +// ledger. +type Status struct { + CoreStatus + HorizonStatus +} + +type CoreStatus struct { + CoreLatest int32 `db:"core_latest"` +} + +type HorizonStatus struct { + HistoryLatest int32 `db:"history_latest"` + HistoryLatestClosedAt time.Time `db:"history_latest_closed_at"` + HistoryElder int32 `db:"history_elder"` + ExpHistoryLatest uint32 `db:"exp_history_latest"` +} + +// State is an in-memory data structure which holds a snapshot of both +// horizon's and stellar-core's view of the the network +type State struct { + sync.RWMutex + current Status + + Metrics struct { + HistoryLatestLedgerCounter prometheus.CounterFunc + HistoryLatestLedgerClosedAgoGauge prometheus.GaugeFunc + HistoryElderLedgerCounter prometheus.CounterFunc + CoreLatestLedgerCounter prometheus.CounterFunc + } +} + +// CurrentStatus returns the cached snapshot of ledger state +func (c *State) CurrentStatus() Status { + c.RLock() + defer c.RUnlock() + ret := c.current + return ret +} + +// SetStatus updates the cached snapshot of the ledger state +func (c *State) SetStatus(next Status) { + c.Lock() + defer c.Unlock() + c.current = next +} + +// SetCoreStatus updates the cached snapshot of the ledger state of Stellar-Core +func (c *State) SetCoreStatus(next CoreStatus) { + c.Lock() + defer c.Unlock() + c.current.CoreStatus = next +} + +// SetHorizonStatus updates the cached snapshot of the ledger state of Horizon +func (c *State) SetHorizonStatus(next HorizonStatus) { + c.Lock() + defer c.Unlock() + c.current.HorizonStatus = next +} + +func (c *State) RegisterMetrics(registry *prometheus.Registry) { + c.Metrics.HistoryLatestLedgerCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{Namespace: "horizon", Subsystem: "history", Name: "latest_ledger"}, + func() float64 { + ls := c.CurrentStatus() + return float64(ls.HistoryLatest) + }, + ) + registry.MustRegister(c.Metrics.HistoryLatestLedgerCounter) + + c.Metrics.HistoryLatestLedgerClosedAgoGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "history", Name: "latest_ledger_closed_ago_seconds", + Help: "seconds since the close of the last ingested ledger", + }, + func() float64 { + ls := c.CurrentStatus() + return time.Since(ls.HistoryLatestClosedAt).Seconds() + }, + ) + registry.MustRegister(c.Metrics.HistoryLatestLedgerClosedAgoGauge) + + c.Metrics.HistoryElderLedgerCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{Namespace: "horizon", Subsystem: "history", Name: "elder_ledger"}, + func() float64 { + ls := c.CurrentStatus() + return float64(ls.HistoryElder) + }, + ) + registry.MustRegister(c.Metrics.HistoryElderLedgerCounter) + + c.Metrics.CoreLatestLedgerCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{Namespace: "horizon", Subsystem: "stellar_core", Name: "latest_ledger"}, + func() float64 { + ls := c.CurrentStatus() + return float64(ls.CoreLatest) + }, + ) + registry.MustRegister(c.Metrics.CoreLatestLedgerCounter) +} diff --git a/services/horizon/internal/logmetrics/main.go b/services/horizon/internal/logmetrics/main.go new file mode 100644 index 0000000000..baace30e97 --- /dev/null +++ b/services/horizon/internal/logmetrics/main.go @@ -0,0 +1,65 @@ +package logmetrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/stellar/go/support/log" +) + +// Metrics is a logrus hook-compliant struct that records metrics about logging +// when added to a logrus.Logger +type Metrics map[logrus.Level]prometheus.Counter + +var DefaultMetrics = NewMetrics() + +func init() { + _, DefaultMetrics = New() +} + +// New creates a new logger according to horizon specifications. +func New() (l *log.Entry, m *Metrics) { + m = NewMetrics() + l = log.New() + l.SetLevel(logrus.WarnLevel) + l.AddHook(m) + return +} + +// NewMetrics creates a new hook for recording metrics. +func NewMetrics() *Metrics { + return &Metrics{ + logrus.DebugLevel: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "log", Name: "debug_total", + }), + logrus.InfoLevel: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "log", Name: "info_total", + }), + logrus.WarnLevel: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "log", Name: "warn_total", + }), + logrus.ErrorLevel: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "log", Name: "error_total", + }), + logrus.PanicLevel: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "log", Name: "panic_total", + }), + } +} + +// Fire is triggered by logrus, in response to a logging event +func (m *Metrics) Fire(e *logrus.Entry) error { + (*m)[e.Level].Inc() + return nil +} + +// Levels returns the logging levels that will trigger this hook to run. In +// this case, all of them. +func (m *Metrics) Levels() []logrus.Level { + return []logrus.Level{ + logrus.DebugLevel, + logrus.InfoLevel, + logrus.WarnLevel, + logrus.ErrorLevel, + logrus.PanicLevel, + } +} diff --git a/services/horizon/internal/logmetrics/main_test.go b/services/horizon/internal/logmetrics/main_test.go new file mode 100644 index 0000000000..9f80bfc359 --- /dev/null +++ b/services/horizon/internal/logmetrics/main_test.go @@ -0,0 +1,44 @@ +package logmetrics + +import ( + "bytes" + "testing" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" +) + +func TestLogPackageMetrics(t *testing.T) { + output := new(bytes.Buffer) + l, m := New() + l.DisableColors() + l.SetLevel(logrus.DebugLevel) + l.SetOutput(output) + + for _, meter := range *m { + assert.Equal(t, float64(0), getMetricValue(meter).GetCounter().GetValue()) + } + + l.Debug("foo") + l.Info("foo") + l.Warn("foo") + l.Error("foo") + assert.Panics(t, func() { + l.Panic("foo") + }) + + for _, meter := range *m { + assert.Equal(t, float64(1), getMetricValue(meter).GetCounter().GetValue()) + } +} + +func getMetricValue(metric prometheus.Metric) *dto.Metric { + value := &dto.Metric{} + err := metric.Write(value) + if err != nil { + panic(err) + } + return value +} diff --git a/services/horizon/internal/middleware_test.go b/services/horizon/internal/middleware_test.go new file mode 100644 index 0000000000..b44d2cade6 --- /dev/null +++ b/services/horizon/internal/middleware_test.go @@ -0,0 +1,406 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package horizon + +import ( + "context" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/throttled" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/services/horizon/internal/actions" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/httpx" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/services/horizon/internal/ledger" + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +func requestHelperRemoteAddr(ip string) func(r *http.Request) { + return func(r *http.Request) { + r.RemoteAddr = ip + } +} + +func requestHelperXFF(xff string) func(r *http.Request) { + return func(r *http.Request) { + r.Header.Set("X-Forwarded-For", xff) + } +} + +type RateLimitMiddlewareTestSuite struct { + suite.Suite + ht *HTTPT + c Config + app *App + rh test.RequestHelper +} + +func (suite *RateLimitMiddlewareTestSuite) SetupSuite() { + suite.ht = StartHTTPTest(suite.T(), "base") +} + +func (suite *RateLimitMiddlewareTestSuite) SetupTest() { + suite.c = NewTestConfig() + suite.c.RateQuota = &throttled.RateQuota{ + MaxRate: throttled.PerHour(10), + MaxBurst: 9, + } + app, err := NewApp(suite.c) + if err != nil { + log.Fatal("cannot initialize app", err) + } + suite.app = app + suite.rh = NewRequestHelper(suite.app) +} + +func (suite *RateLimitMiddlewareTestSuite) TearDownSuite() { + suite.ht.Finish() +} + +func (suite *RateLimitMiddlewareTestSuite) TearDownTest() { + suite.app.Close() +} + +// Sets X-RateLimit-Limit headers correctly. +func (suite *RateLimitMiddlewareTestSuite) TestRateLimit_LimitHeaders() { + w := suite.rh.Get("/") + assert.Equal(suite.T(), 200, w.Code) + assert.Equal(suite.T(), "10", w.Header().Get("X-RateLimit-Limit")) +} + +// Sets X-RateLimit-Remaining headers correctly. +func (suite *RateLimitMiddlewareTestSuite) TestRateLimit_RemainingHeaders() { + // test that SSE requests are ignored + for i := 0; i < 10; i++ { + w := suite.rh.Get("/", test.RequestHelperStreaming) + assert.Equal(suite.T(), "", w.Header().Get("X-RateLimit-Remaining")) + assert.NotEqual(suite.T(), http.StatusTooManyRequests, w.Code) + } + + for i := 0; i < 10; i++ { + w := suite.rh.Get("/") + expected := 10 - (i + 1) + assert.Equal(suite.T(), strconv.Itoa(expected), w.Header().Get("X-RateLimit-Remaining")) + assert.NotEqual(suite.T(), http.StatusTooManyRequests, w.Code) + } + + // confirm remaining stays at 0 + for i := 0; i < 10; i++ { + w := suite.rh.Get("/") + assert.Equal(suite.T(), "0", w.Header().Get("X-RateLimit-Remaining")) + assert.Equal(suite.T(), http.StatusTooManyRequests, w.Code) + } +} + +// Sets X-RateLimit-Reset header correctly. Should reset after 360 seconds since it's limited to 10 requests/hour. +func (suite *RateLimitMiddlewareTestSuite) TestRateLimit_ResetHeaders() { + w := suite.rh.Get("/") + assert.Equal(suite.T(), "360", w.Header().Get("X-RateLimit-Reset")) +} + +// Restricts based on RemoteAddr IP after too many requests. +func (suite *RateLimitMiddlewareTestSuite) TestRateLimit_RemoteAddr() { + for i := 0; i < 10; i++ { + w := suite.rh.Get("/") + assert.Equal(suite.T(), 200, w.Code) + } + + w := suite.rh.Get("/") + assert.Equal(suite.T(), 429, w.Code) + + w = suite.rh.Get("/", requestHelperRemoteAddr("127.0.0.2")) + assert.Equal(suite.T(), 200, w.Code) + + // Ignores ports + w = suite.rh.Get("/", requestHelperRemoteAddr("127.0.0.1:4312")) + assert.Equal(suite.T(), 429, w.Code) +} + +// Restrict based upon X-Forwarded-For correctly. +func (suite *RateLimitMiddlewareTestSuite) TestRateLimit_XForwardedFor() { + for i := 0; i < 10; i++ { + w := suite.rh.Get("/", requestHelperXFF("4.4.4.4")) + assert.Equal(suite.T(), 200, w.Code) + } + + w := suite.rh.Get("/", requestHelperXFF("4.4.4.4")) + assert.Equal(suite.T(), 429, w.Code) + + // allow other ips + w = suite.rh.Get("/", requestHelperRemoteAddr("4.4.4.3")) + assert.Equal(suite.T(), 200, w.Code) + + // Ignores trailing ips + w = suite.rh.Get("/", requestHelperXFF("4.4.4.4, 4.4.4.5, 127.0.0.1")) + assert.Equal(suite.T(), 429, w.Code) +} + +func TestRateLimitMiddlewareTestSuite(t *testing.T) { + suite.Run(t, new(RateLimitMiddlewareTestSuite)) +} + +func TestStateMiddleware(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + q := &history.Q{tt.HorizonSession()} + + request, err := http.NewRequest("GET", "http://localhost/", nil) + tt.Assert.NoError(err) + + expectTransaction := true + endpoint := func(w http.ResponseWriter, r *http.Request) { + session := r.Context().Value(&horizonContext.SessionContextKey).(db.SessionInterface) + if (session.GetTx() == nil) == expectTransaction { + t.Fatalf("expected transaction to be in session: %v", expectTransaction) + } + w.WriteHeader(http.StatusOK) + } + + stateMiddleware := &httpx.StateMiddleware{ + HorizonSession: tt.HorizonSession(), + } + handler := chi.NewRouter() + handler.With(stateMiddleware.Wrap).MethodFunc("GET", "/", endpoint) + + for i, testCase := range []struct { + name string + noStateVerification bool + stateInvalid bool + latestHistoryLedger xdr.Uint32 + lastIngestedLedger uint32 + ingestionVersion int + sseRequest bool + expectedStatus int + expectTransaction bool + }{ + { + name: "responds with 500 if q.GetExpStateInvalid returns true", + stateInvalid: true, + latestHistoryLedger: 2, + lastIngestedLedger: 2, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: http.StatusInternalServerError, + expectTransaction: false, + }, + { + name: "responds with still ingesting if lastIngestedLedger <= 0", + stateInvalid: false, + latestHistoryLedger: 1, + lastIngestedLedger: 0, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: hProblem.StillIngesting.Status, + expectTransaction: false, + }, + { + name: "responds with still ingesting if lastIngestedLedger < latestHistoryLedger", + stateInvalid: false, + latestHistoryLedger: 3, + lastIngestedLedger: 2, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: hProblem.StillIngesting.Status, + expectTransaction: false, + }, + { + name: "responds with still ingesting if lastIngestedLedger > latestHistoryLedger", + stateInvalid: false, + latestHistoryLedger: 4, + lastIngestedLedger: 5, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: hProblem.StillIngesting.Status, + expectTransaction: false, + }, + { + name: "responds with still ingesting if version != ingest.CurrentVersion", + stateInvalid: false, + latestHistoryLedger: 5, + lastIngestedLedger: 5, + ingestionVersion: ingest.CurrentVersion - 1, + sseRequest: false, + expectedStatus: hProblem.StillIngesting.Status, + expectTransaction: false, + }, + { + name: "succeeds", + stateInvalid: false, + latestHistoryLedger: 6, + lastIngestedLedger: 6, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: http.StatusOK, + expectTransaction: true, + }, + { + name: "succeeds with SSE request", + stateInvalid: false, + latestHistoryLedger: 7, + lastIngestedLedger: 7, + ingestionVersion: ingest.CurrentVersion, + sseRequest: true, + expectedStatus: http.StatusOK, + expectTransaction: false, + }, + { + name: "succeeds without state verification", + noStateVerification: true, + stateInvalid: false, + latestHistoryLedger: 8, + lastIngestedLedger: 8, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: http.StatusOK, + expectTransaction: true, + }, + { + name: "succeeds without state verification and invalid state", + noStateVerification: true, + stateInvalid: true, + latestHistoryLedger: 9, + lastIngestedLedger: 9, + ingestionVersion: ingest.CurrentVersion, + sseRequest: false, + expectedStatus: http.StatusOK, + expectTransaction: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + stateMiddleware.NoStateVerification = testCase.noStateVerification + tt.Assert.NoError(q.UpdateExpStateInvalid(context.Background(), testCase.stateInvalid)) + _, err = q.InsertLedger(context.Background(), xdr.LedgerHeaderHistoryEntry{ + Hash: xdr.Hash{byte(i)}, + Header: xdr.LedgerHeader{ + LedgerSeq: testCase.latestHistoryLedger, + PreviousLedgerHash: xdr.Hash{byte(i)}, + }, + }, 0, 0, 0, 0, 0) + tt.Assert.NoError(err) + tt.Assert.NoError(q.UpdateLastLedgerIngest(context.Background(), testCase.lastIngestedLedger)) + tt.Assert.NoError(q.UpdateIngestVersion(context.Background(), testCase.ingestionVersion)) + + if testCase.sseRequest { + request.Header.Set("Accept", "text/event-stream") + } else { + request.Header.Del("Accept") + } + + w := httptest.NewRecorder() + expectTransaction = testCase.expectTransaction + handler.ServeHTTP(w, request) + tt.Assert.Equal(testCase.expectedStatus, w.Code) + if testCase.expectedStatus == http.StatusOK && !testCase.sseRequest { + tt.Assert.Equal( + w.Header().Get(actions.LastLedgerHeaderName), + strconv.FormatInt(int64(testCase.lastIngestedLedger), 10)) + } else { + tt.Assert.Equal(w.Header().Get(actions.LastLedgerHeaderName), "") + } + }) + } +} + +func TestClientDisconnect(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + test.ResetHorizonDB(t, tt.HorizonDB) + + request, err := http.NewRequest("GET", "http://localhost/", nil) + tt.Assert.NoError(err) + + endpoint := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + } + + stateMiddleware := &httpx.StateMiddleware{ + HorizonSession: tt.HorizonSession(), + NoStateVerification: true, + } + handler := chi.NewRouter() + handler.With(stateMiddleware.Wrap).MethodFunc("GET", "/", endpoint) + w := httptest.NewRecorder() + + ctx, cancel := context.WithCancel(request.Context()) + defer cancel() + request = request.WithContext(ctx) + // cancel invocation simulates client disconnect in the context + cancel() + + handler.ServeHTTP(w, request) + tt.Assert.Equal(499, w.Code) +} + +func TestCheckHistoryStaleMiddleware(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + request, err := http.NewRequest("GET", "http://localhost/", nil) + tt.Assert.NoError(err) + + endpoint := func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + } + + for _, testCase := range []struct { + name string + coreLatest int32 + historyLatest int32 + expectedStatus int + staleThreshold int32 + }{ + { + name: "responds with a service unavailable if history is stale", + coreLatest: 4, + historyLatest: 2, + expectedStatus: http.StatusServiceUnavailable, + staleThreshold: 1, + }, + { + name: "succeeds", + coreLatest: 6, + historyLatest: 6, + expectedStatus: http.StatusOK, + staleThreshold: 1, + }, + { + name: "succeeds with threshold 0", + coreLatest: 6, + historyLatest: 5, + expectedStatus: http.StatusOK, + staleThreshold: 0, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + state := ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: testCase.coreLatest, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: testCase.historyLatest, + }, + } + ledgerState := &ledger.State{} + ledgerState.SetStatus(state) + historyMiddleware := httpx.NewHistoryMiddleware(ledgerState, testCase.staleThreshold, tt.HorizonSession()) + handler := chi.NewRouter() + handler.With(historyMiddleware).MethodFunc("GET", "/", endpoint) + w := httptest.NewRecorder() + handler.ServeHTTP(w, request) + tt.Assert.Equal(testCase.expectedStatus, w.Code) + }) + } +} diff --git a/services/horizon/internal/operationfeestats/main.go b/services/horizon/internal/operationfeestats/main.go new file mode 100644 index 0000000000..c9728f380f --- /dev/null +++ b/services/horizon/internal/operationfeestats/main.go @@ -0,0 +1,81 @@ +// Package operationfeestats provides useful utilities concerning operation fee +// stats within stellar,specifically as a central location to store a cached snapshot +// of the state of network per operation fees and surge pricing. This package is +// intended to be at the lowest levels of horizon's dependency tree, please keep +// it free of dependencies to other horizon packages. +package operationfeestats + +import ( + "sync" +) + +// State represents a snapshot of horizon's view of the state of operation fee's +// on the network. +type State struct { + FeeChargedMax int64 + FeeChargedMin int64 + FeeChargedMode int64 + FeeChargedP10 int64 + FeeChargedP20 int64 + FeeChargedP30 int64 + FeeChargedP40 int64 + FeeChargedP50 int64 + FeeChargedP60 int64 + FeeChargedP70 int64 + FeeChargedP80 int64 + FeeChargedP90 int64 + FeeChargedP95 int64 + FeeChargedP99 int64 + + // MaxFee + MaxFeeMax int64 + MaxFeeMin int64 + MaxFeeMode int64 + MaxFeeP10 int64 + MaxFeeP20 int64 + MaxFeeP30 int64 + MaxFeeP40 int64 + MaxFeeP50 int64 + MaxFeeP60 int64 + MaxFeeP70 int64 + MaxFeeP80 int64 + MaxFeeP90 int64 + MaxFeeP95 int64 + MaxFeeP99 int64 + + LastBaseFee int64 + LastLedger uint32 + LedgerCapacityUsage string +} + +// CurrentState returns the cached snapshot of operation fee state and a boolean indicating +// if the cache has been populated +func CurrentState() (State, bool) { + lock.RLock() + ret := current + ok := present + lock.RUnlock() + return ret, ok +} + +// SetState updates the cached snapshot of the operation fee state +func SetState(next State) { + lock.Lock() + // in case of one query taking longer than another, this makes + // sure we don't overwrite the latest fee stats with old stats + if current.LastLedger < next.LastLedger { + current = next + } + present = true + lock.Unlock() +} + +// ResetState is used only for testing purposes +func ResetState() { + current = State{} + present = false +} + +var current State +var present bool +var lock sync.RWMutex diff --git a/services/horizon/internal/paths/doc.go b/services/horizon/internal/paths/doc.go new file mode 100644 index 0000000000..c1ae1a2cd1 --- /dev/null +++ b/services/horizon/internal/paths/doc.go @@ -0,0 +1,3 @@ +// Package paths provides utilities and facilities for payment paths as needed by horizon. Most +// importantly, it provides the Finder interface, allowing for pluggable path finding back ends. +package paths diff --git a/services/horizon/internal/paths/main.go b/services/horizon/internal/paths/main.go new file mode 100644 index 0000000000..d5baaf25d8 --- /dev/null +++ b/services/horizon/internal/paths/main.go @@ -0,0 +1,47 @@ +package paths + +import ( + "context" + + "github.com/stellar/go/xdr" +) + +// Query is a query for paths +type Query struct { + DestinationAsset xdr.Asset + DestinationAmount xdr.Int64 + SourceAssets []xdr.Asset + SourceAssetBalances []xdr.Int64 + // if ValidateSourceBalance is true then we won't consider payment paths + // which require a source asset amount which exceeds the balance present in `SourceAssetBalances` + ValidateSourceBalance bool + SourceAccount *xdr.AccountId +} + +// Path is the result returned by a path finder and is tied to the DestinationAmount used in the input query +type Path struct { + Path []string + Source string + SourceAmount xdr.Int64 + Destination string + DestinationAmount xdr.Int64 +} + +// Finder finds paths. +type Finder interface { + // Find returns a list of payment paths and the most recent ledger + // for a Query of a maximum length `maxLength`. The payment paths + // are accurate and consistent with the returned ledger sequence number + Find(ctx context.Context, q Query, maxLength uint) ([]Path, uint32, error) + // FindFixedPaths return a list of payment paths the most recent ledger + // Each of the payment paths start by spending `amountToSpend` of `sourceAsset` and end + // with delivering a postive amount of `destinationAsset`. + // The payment paths are accurate and consistent with the returned ledger sequence number + FindFixedPaths( + ctx context.Context, + sourceAsset xdr.Asset, + amountToSpend xdr.Int64, + destinationAssets []xdr.Asset, + maxLength uint, + ) ([]Path, uint32, error) +} diff --git a/services/horizon/internal/paths/mock_finder.go b/services/horizon/internal/paths/mock_finder.go new file mode 100644 index 0000000000..7b81429cfa --- /dev/null +++ b/services/horizon/internal/paths/mock_finder.go @@ -0,0 +1,33 @@ +package paths + +import ( + "context" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/mock" +) + +var _ Finder = (*MockFinder)(nil) + +// MockFinder is a mock implementation of the Finder interface +type MockFinder struct { + mock.Mock +} + +func (m *MockFinder) Find(ctx context.Context, q Query, maxLength uint) ([]Path, uint32, error) { + args := m.Called(ctx, q, maxLength) + + return args.Get(0).([]Path), args.Get(1).(uint32), args.Error(2) +} + +func (m *MockFinder) FindFixedPaths( + ctx context.Context, + sourceAsset xdr.Asset, + amountToSpend xdr.Int64, + destinationAssets []xdr.Asset, + maxLength uint, +) ([]Path, uint32, error) { + args := m.Called(ctx, sourceAsset, amountToSpend, destinationAssets, maxLength) + + return args.Get(0).([]Path), args.Get(1).(uint32), args.Error(2) +} diff --git a/services/horizon/internal/reap/main.go b/services/horizon/internal/reap/main.go new file mode 100644 index 0000000000..bcf71ecf87 --- /dev/null +++ b/services/horizon/internal/reap/main.go @@ -0,0 +1,38 @@ +// Package reap contains the history reaping subsystem for horizon. This system +// is designed to remove data from the history database such that it does not +// grow indefinitely. The system can be configured with a number of ledgers to +// maintain at a minimum. +package reap + +import ( + "context" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/support/db" +) + +// System represents the history reaping subsystem of horizon. +type System struct { + HistoryQ *history.Q + RetentionCount uint + ledgerState *ledger.State + ctx context.Context + cancel context.CancelFunc +} + +// New initializes the reaper, causing it to begin polling the stellar-core +// database for now ledgers and ingesting data into the horizon database. +func New(retention uint, dbSession db.SessionInterface, ledgerState *ledger.State) *System { + ctx, cancel := context.WithCancel(context.Background()) + + r := &System{ + HistoryQ: &history.Q{dbSession.Clone()}, + RetentionCount: retention, + ledgerState: ledgerState, + ctx: ctx, + cancel: cancel, + } + + return r +} diff --git a/services/horizon/internal/reap/system.go b/services/horizon/internal/reap/system.go new file mode 100644 index 0000000000..02a10945a2 --- /dev/null +++ b/services/horizon/internal/reap/system.go @@ -0,0 +1,118 @@ +package reap + +import ( + "context" + "time" + + herrors "github.com/stellar/go/services/horizon/internal/errors" + "github.com/stellar/go/services/horizon/internal/toid" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +// DeleteUnretainedHistory removes all data associated with unretained ledgers. +func (r *System) DeleteUnretainedHistory(ctx context.Context) error { + // RetentionCount of 0 indicates "keep all history" + if r.RetentionCount == 0 { + return nil + } + + var ( + latest = r.ledgerState.CurrentStatus() + targetElder = (latest.HistoryLatest - int32(r.RetentionCount)) + 1 + ) + + if targetElder < latest.HistoryElder { + return nil + } + + err := r.clearBefore(ctx, latest.HistoryElder, targetElder) + if err != nil { + return err + } + + log. + WithField("new_elder", targetElder). + Info("reaper succeeded") + + return nil +} + +// Run triggers the reaper system to update itself, deleted unretained history +// if it is the appropriate time. +func (r *System) Run() { + for { + select { + case <-time.After(1 * time.Hour): + r.runOnce(r.ctx) + case <-r.ctx.Done(): + return + } + } +} + +func (r *System) Shutdown() { + r.cancel() +} + +func (r *System) runOnce(ctx context.Context) { + defer func() { + if rec := recover(); rec != nil { + err := herrors.FromPanic(rec) + log.Errorf("reaper panicked: %s", err) + herrors.ReportToSentry(err, nil) + } + }() + + err := r.DeleteUnretainedHistory(ctx) + if err != nil { + log.Errorf("reaper failed: %s", err) + } +} + +// Work backwards in 100k ledger blocks to prevent using all the CPU. +// +// This runs every hour, so we need to make sure it doesn't +// run for longer than an hour. +// +// Current ledger at 2021-08-12 is 36,827,497, so 100k means 368 batches. At 1 +// batch/second, that seems like a reasonable balance between running well +// under an hour, and slowing it down enough to leave some CPU for other +// processes. +var batchSize = int32(100_000) +var sleep = 1 * time.Second + +func (r *System) clearBefore(ctx context.Context, startSeq, endSeq int32) error { + for batchEndSeq := endSeq - 1; batchEndSeq >= startSeq; batchEndSeq -= batchSize { + batchStartSeq := batchEndSeq - batchSize + if batchStartSeq < startSeq { + batchStartSeq = startSeq + } + log.WithField("start_ledger", batchStartSeq).WithField("end_ledger", batchEndSeq).Info("reaper: clearing") + + batchStart, batchEnd, err := toid.LedgerRangeInclusive(batchStartSeq, batchEndSeq) + if err != nil { + return err + } + + err = r.HistoryQ.Begin() + if err != nil { + return errors.Wrap(err, "Error in begin") + } + defer r.HistoryQ.Rollback() + + err = r.HistoryQ.DeleteRangeAll(ctx, batchStart, batchEnd) + if err != nil { + return errors.Wrap(err, "Error in DeleteRangeAll") + } + + err = r.HistoryQ.Commit() + if err != nil { + return errors.Wrap(err, "Error in commit") + } + + time.Sleep(sleep) + } + + return nil +} diff --git a/services/horizon/internal/reap/system_test.go b/services/horizon/internal/reap/system_test.go new file mode 100644 index 0000000000..7d5d5a70b2 --- /dev/null +++ b/services/horizon/internal/reap/system_test.go @@ -0,0 +1,54 @@ +package reap + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestDeleteUnretainedHistory(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + ledgerState := &ledger.State{} + ledgerState.SetStatus(tt.Scenario("kahuna")) + + db := tt.HorizonSession() + + sys := New(0, db, ledgerState) + + // Disable sleeps for this. + sleep = 0 + + var ( + prev int + cur int + ) + err := db.GetRaw(tt.Ctx, &prev, `SELECT COUNT(*) FROM history_ledgers`) + tt.Require.NoError(err) + + err = sys.DeleteUnretainedHistory(tt.Ctx) + if tt.Assert.NoError(err) { + err = db.GetRaw(tt.Ctx, &cur, `SELECT COUNT(*) FROM history_ledgers`) + tt.Require.NoError(err) + tt.Assert.Equal(prev, cur, "Ledgers deleted when RetentionCount == 0") + } + + ledgerState.SetStatus(tt.LoadLedgerStatus()) + sys.RetentionCount = 10 + err = sys.DeleteUnretainedHistory(tt.Ctx) + if tt.Assert.NoError(err) { + err = db.GetRaw(tt.Ctx, &cur, `SELECT COUNT(*) FROM history_ledgers`) + tt.Require.NoError(err) + tt.Assert.Equal(10, cur) + } + + ledgerState.SetStatus(tt.LoadLedgerStatus()) + sys.RetentionCount = 1 + err = sys.DeleteUnretainedHistory(tt.Ctx) + if tt.Assert.NoError(err) { + err = db.GetRaw(tt.Ctx, &cur, `SELECT COUNT(*) FROM history_ledgers`) + tt.Require.NoError(err) + tt.Assert.Equal(1, cur) + } +} diff --git a/services/horizon/internal/render/main.go b/services/horizon/internal/render/main.go new file mode 100644 index 0000000000..140eafd15e --- /dev/null +++ b/services/horizon/internal/render/main.go @@ -0,0 +1,29 @@ +package render + +import ( + "net/http" + + "github.com/adjust/goautoneg" + "github.com/stellar/go/support/log" +) + +// Negotiate inspects the Accept header of the provided request and determines +// what the most appropriate response type should be. Defaults to HAL. +func Negotiate(r *http.Request) string { + ctx := r.Context() + alternatives := []string{MimeHal, MimeJSON, MimeEventStream, MimeRaw} + accept := r.Header.Get("Accept") + + if accept == "" { + return MimeHal + } + + result := goautoneg.Negotiate(r.Header.Get("Accept"), alternatives) + + log.Ctx(ctx).WithFields(log.F{ + "content_type": result, + "accept": accept, + }).Debug("Negotiated content type") + + return result +} diff --git a/services/horizon/internal/render/main_test.go b/services/horizon/internal/render/main_test.go new file mode 100644 index 0000000000..b9205b5851 --- /dev/null +++ b/services/horizon/internal/render/main_test.go @@ -0,0 +1,39 @@ +package render + +import ( + "context" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNegotiate(t *testing.T) { + r, err := http.NewRequest("GET", "/ledgers", nil) + assert.Nil(t, err) + r.WithContext(context.Background()) + + testCases := []struct { + Header string + ExpectedResponseType string + }{ + // Obeys the Accept header's prioritization + {"application/hal+json", MimeHal}, + {"text/event-stream,application/hal+json", MimeEventStream}, + // Defaults to HAL + {"text/event-stream;q=0.5,application/hal+json", MimeHal}, + {"", MimeHal}, + // Returns empty string for invalid type + {"text/plain", ""}, + } + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + r.Header.Set("Accept", tc.Header) + assert.Equal(t, tc.ExpectedResponseType, Negotiate(r)) + }) + } + + // Defaults to MimeHal even with no Accept key set + r.Header.Del("Accept") + assert.Equal(t, MimeHal, Negotiate(r)) +} diff --git a/services/horizon/internal/render/mime.go b/services/horizon/internal/render/mime.go new file mode 100644 index 0000000000..a4b80f5c5f --- /dev/null +++ b/services/horizon/internal/render/mime.go @@ -0,0 +1,13 @@ +package render + +const ( + + //MimeEventStream is the mime type for "text/event-stream" + MimeEventStream = "text/event-stream" + //MimeHal is the mime type for "application/hal+json" + MimeHal = "application/hal+json" + //MimeJSON is the mime type for "application/json" + MimeJSON = "application/json" + //MimeRaw is the mime type for "application/octet-stream" + MimeRaw = "application/octet-stream" +) diff --git a/services/horizon/internal/render/problem/problem.go b/services/horizon/internal/render/problem/problem.go new file mode 100644 index 0000000000..0d206f255c --- /dev/null +++ b/services/horizon/internal/render/problem/problem.go @@ -0,0 +1,127 @@ +package problem + +import ( + "net/http" + + "github.com/stellar/go/support/render/problem" +) + +// Well-known and reused problems below: +var ( + + // ClientDisconnected, represented by a non-standard HTTP status code of 499, which was introduced by + // nginix.org(https://www.nginx.com/resources/wiki/extending/api/http/) as a way to capture this state. Use it as a shortcut + // in your actions. + ClientDisconnected = problem.P{ + Type: "client_disconnected", + Title: "Client Disconnected", + Status: 499, + Detail: "The client has closed the connection.", + } + + // ServiceUnavailable is a well-known problem type. Use it as a shortcut + // in your actions. + ServiceUnavailable = problem.P{ + Type: "service_unavailable", + Title: "Service Unavailable", + Status: http.StatusServiceUnavailable, + Detail: "The request cannot be serviced at this time.", + } + + // RateLimitExceeded is a well-known problem type. Use it as a shortcut + // in your actions. + RateLimitExceeded = problem.P{ + Type: "rate_limit_exceeded", + Title: "Rate Limit Exceeded", + Status: 429, + Detail: "The rate limit for the requesting IP address is over its alloted " + + "limit. The allowed limit and requests left per time period are " + + "communicated to clients via the http response headers 'X-RateLimit-*' " + + "headers.", + } + + // NotImplemented is a well-known problem type. Use it as a shortcut + // in your actions. + NotImplemented = problem.P{ + Type: "not_implemented", + Title: "Resource Not Yet Implemented", + Status: http.StatusNotFound, + Detail: "While the requested URL is expected to eventually point to a " + + "valid resource, the work to implement the resource has not yet " + + "been completed.", + } + + // NotAcceptable is a well-known problem type. Use it as a shortcut + // in your actions. + NotAcceptable = problem.P{ + Type: "not_acceptable", + Title: "An acceptable response content-type could not be provided for " + + "this request", + Status: http.StatusNotAcceptable, + } + + // ServerOverCapacity is a well-known problem type. Use it as a shortcut + // in your actions. + ServerOverCapacity = problem.P{ + Type: "server_over_capacity", + Title: "Server Over Capacity", + Status: http.StatusServiceUnavailable, + Detail: "This horizon server is currently overloaded. Please wait for " + + "several minutes before trying your request again.", + } + + // Timeout is a well-known problem type. Use it as a shortcut + // in your actions. + Timeout = problem.P{ + Type: "timeout", + Title: "Timeout", + Status: http.StatusGatewayTimeout, + Detail: "Your request timed out before completing. Please try your " + + "request again. If you are submitting a transaction make sure you are " + + "sending exactly the same transaction (with the same sequence number).", + } + + // UnsupportedMediaType is a well-known problem type. Use it as a shortcut + // in your actions. + UnsupportedMediaType = problem.P{ + Type: "unsupported_media_type", + Title: "Unsupported Media Type", + Status: http.StatusUnsupportedMediaType, + Detail: "The request has an unsupported content type. Presently, the " + + "only supported content type is application/x-www-form-urlencoded.", + } + + // BeforeHistory is a well-known problem type. Use it as a shortcut + // in your actions. + BeforeHistory = problem.P{ + Type: "before_history", + Title: "Data Requested Is Before Recorded History", + Status: http.StatusGone, + Detail: "This horizon instance is configured to only track a " + + "portion of the stellar network's latest history. This request " + + "is asking for results prior to the recorded history known to " + + "this horizon instance.", + } + + // StaleHistory is a well-known problem type. Use it as a shortcut + // in your actions. + StaleHistory = problem.P{ + Type: "stale_history", + Title: "Historical DB Is Too Stale", + Status: http.StatusServiceUnavailable, + Detail: "This horizon instance is configured to reject client requests " + + "when it can determine that the history database is lagging too far " + + "behind the connected instance of Stellar-Core or read replica. It's " + + "also possible that Stellar-Core is out of sync. Please try again later.", + } + + // StillIngesting is a well-known problem type. Use it as a shortcut + // in your actions. + StillIngesting = problem.P{ + Type: "still_ingesting", + Title: "Still Ingesting", + Status: http.StatusServiceUnavailable, + Detail: "Data cannot be presented because it's still being ingested. Please " + + "wait for several minutes before trying your request again.", + } +) diff --git a/services/horizon/internal/render/problem/problem_test.go b/services/horizon/internal/render/problem/problem_test.go new file mode 100644 index 0000000000..707c75b658 --- /dev/null +++ b/services/horizon/internal/render/problem/problem_test.go @@ -0,0 +1,78 @@ +package problem + +import ( + "context" + "errors" + "net/http/httptest" + "testing" + + "github.com/stellar/go/support/render/problem" + "github.com/stretchr/testify/assert" +) + +var ctx = context.Background() +var testRender = func(ctx context.Context, err error) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + problem.Render(ctx, w, err) + return w +} + +func TestCommonProblems(t *testing.T) { + testCases := []struct { + testName string + p problem.P + expectedCode int + }{ + {"NotFound", problem.NotFound, 404}, + {"RateLimitExceeded", RateLimitExceeded, 429}, + {"ClientDisconneted", ClientDisconnected, 499}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + w := testRender(ctx, tc.p) + assert.Equal(t, tc.expectedCode, w.Code) + }) + } +} + +func TestMakeProblemWithInvalidField(t *testing.T) { + tt := assert.New(t) + + p := problem.NewProblemWithInvalidField( + problem.NotFound, + "key", + errors.New("not found"), + ) + + expectedErr := map[string]interface{}{ + "invalid_field": "key", + "reason": "not found", + } + + tt.Equal(expectedErr, p.Extras) + tt.Equal(p.Type, "not_found") + + // it doesn't add keys to source problem + tt.Len(problem.NotFound.Extras, 0) +} + +func TestMakeInvalidFieldProblem(t *testing.T) { + tt := assert.New(t) + + p := problem.MakeInvalidFieldProblem( + "key", + errors.New("not found"), + ) + + expectedErr := map[string]interface{}{ + "invalid_field": "key", + "reason": "not found", + } + + tt.Equal(expectedErr, p.Extras) + tt.Equal(p.Type, "bad_request") + + // it doesn't add keys to source problem + tt.Len(problem.BadRequest.Extras, 0) +} diff --git a/services/horizon/internal/render/sse/doc.go b/services/horizon/internal/render/sse/doc.go new file mode 100644 index 0000000000..0e88cd377d --- /dev/null +++ b/services/horizon/internal/render/sse/doc.go @@ -0,0 +1,3 @@ +// This package contains the Server Sent Events implementation used by +// horizon. +package sse diff --git a/services/horizon/internal/render/sse/main.go b/services/horizon/internal/render/sse/main.go new file mode 100644 index 0000000000..d5001bc9e4 --- /dev/null +++ b/services/horizon/internal/render/sse/main.go @@ -0,0 +1,96 @@ +package sse + +import ( + "context" + "encoding/json" + "fmt" + "net/http" +) + +// Event is the packet of data that gets sent over the wire to a connected +// client. +type Event struct { + Data interface{} + Error error + ID string + Event string + Retry int +} + +// WritePreamble prepares this http connection for streaming using Server Sent +// Events. It sends the initial http response with the appropriate headers to +// do so. +func WritePreamble(ctx context.Context, w http.ResponseWriter) bool { + _, flushable := w.(http.Flusher) + if !flushable { + //TODO: render a problem struct instead of simple string + http.Error(w, "Streaming Not Supported", http.StatusBadRequest) + return false + } + + w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + w.Header().Set("Access-Control-Allow-Origin", "*") + w.WriteHeader(200) + + WriteEvent(ctx, w, helloEvent) + + return true +} + +// WriteEvent does the actual work of formatting an SSE compliant message +// sending it over the provided ResponseWriter and flushing. +func WriteEvent(ctx context.Context, w http.ResponseWriter, e Event) { + if e.Error != nil { + fmt.Fprint(w, "event: error\n") + fmt.Fprintf(w, "data: %s\n\n", e.Error.Error()) + w.(http.Flusher).Flush() + return + } + + // TODO: add tests to ensure retry get's properly rendered + if e.Retry != 0 { + fmt.Fprintf(w, "retry: %d\n", e.Retry) + } + + if e.ID != "" { + fmt.Fprintf(w, "id: %s\n", e.ID) + } + + if e.Event != "" { + fmt.Fprintf(w, "event: %s\n", e.Event) + } + + fmt.Fprintf(w, "data: %s\n\n", getJSON(e.Data)) + w.(http.Flusher).Flush() +} + +// Upon successful completion of a query (i.e. the client didn't disconnect +// and we didn't error) we send a "Goodbye" event. This is a dummy event +// so that we can set a low retry value so that the client will immediately +// recoonnect and request more data. This helpes to give the feel of a infinite +// stream of data, even though we're actually responding in PAGE_SIZE chunks. +var goodbyeEvent = Event{ + Data: "byebye", + Event: "close", + Retry: 10, +} + +// Upon initial stream creation, we send this event to inform the client +// that they may retry an errored connection after 1 second. +var helloEvent = Event{ + Data: "hello", + Event: "open", + Retry: 1000, +} + +func getJSON(val interface{}) string { + js, err := json.Marshal(val) + + if err != nil { + panic(err) + } + + return string(js) +} diff --git a/services/horizon/internal/render/sse/main_test.go b/services/horizon/internal/render/sse/main_test.go new file mode 100644 index 0000000000..cd747ff6ad --- /dev/null +++ b/services/horizon/internal/render/sse/main_test.go @@ -0,0 +1,55 @@ +package sse + +import ( + "errors" + "fmt" + "net/http/httptest" + "testing" + + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" +) + +func TestWriteEventOutput(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + testCases := []struct { + Event Event + ExpectedSubstring string + }{ + {Event{Data: "test"}, "data: \"test\"\n\n"}, + {Event{ID: "1", Data: "test"}, "id: 1\n"}, + {Event{Retry: 1000, Data: "test"}, "retry: 1000\n"}, + {Event{Error: errors.New("busted")}, "event: error\ndata: busted\n\n"}, + {Event{Event: "test", Data: "test"}, "event: test\ndata: \"test\"\n\n"}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("Checking for expected substring %s", tc.ExpectedSubstring), func(t *testing.T) { + w := httptest.NewRecorder() + WriteEvent(ctx, w, tc.Event) + bodyString := w.Body.String() + assert.Contains(t, bodyString, tc.ExpectedSubstring) + }) + } +} + +func TestWriteEventLogs(t *testing.T) { + ctx, log := test.ContextWithLogBuffer() + w := httptest.NewRecorder() + WriteEvent(ctx, w, Event{Error: errors.New("busted")}) + assert.NotContains(t, log.String(), "level=error") + assert.NotContains(t, log.String(), "busted") +} + +// Tests that the preamble sets the correct headers and writes the hello event. +func TestWritePreamble(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + w := httptest.NewRecorder() + WritePreamble(ctx, w) + assert.Equal(t, "text/event-stream; charset=utf-8", w.Header().Get("Content-Type")) + assert.Equal(t, "no-cache", w.Header().Get("Cache-Control")) + assert.Equal(t, "keep-alive", w.Header().Get("Connection")) + assert.Equal(t, "*", w.Header().Get("Access-Control-Allow-Origin")) + assert.Equal(t, 200, w.Code) + assert.Contains(t, w.Body.String(), "retry: 1000\nevent: open\ndata: \"hello\"\n\n") +} diff --git a/services/horizon/internal/render/sse/stream.go b/services/horizon/internal/render/sse/stream.go new file mode 100644 index 0000000000..f93508cb4e --- /dev/null +++ b/services/horizon/internal/render/sse/stream.go @@ -0,0 +1,84 @@ +package sse + +import ( + "context" + "net/http" + + "github.com/pkg/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" +) + +var ( + // default error + errBadStream = errors.New("Unexpected stream error") + + // known errors + ErrRateLimited = errors.New("Rate limit exceeded") +) + +type Stream struct { + ctx context.Context + w http.ResponseWriter + done bool + eventsSent int + limit int + initialized bool +} + +// NewStream creates a new stream against the provided response writer. +func NewStream(ctx context.Context, w http.ResponseWriter) *Stream { + return &Stream{ + ctx: ctx, + w: w, + } +} + +// Init function is only executed once. It writes the preamble event which includes the HTTP response code and a +// hello message. This should be called before any method that writes to the client to ensure that the preamble +// has been sent first. +func (s *Stream) Init() { + if !s.initialized { + s.initialized = true + ok := WritePreamble(s.ctx, s.w) + if !ok { + s.done = true + } + } +} + +func (s *Stream) Send(e Event) { + s.Init() + WriteEvent(s.ctx, s.w, e) + s.eventsSent++ +} + +func (s *Stream) SetLimit(limit int) { + s.limit = limit +} + +func (s *Stream) Done() { + s.Init() + WriteEvent(s.ctx, s.w, goodbyeEvent) + s.done = true +} + +func (s *Stream) Err(err error) { + // We haven't initialized the stream, we should simply return the normal HTTP + // error because it means that we haven't sent the preamble. + if !s.initialized { + problem.Render(s.ctx, s.w, err) + return + } + + if knownErr := problem.IsKnownError(err); knownErr != nil { + err = knownErr + } else { + log.Ctx(s.ctx).WithStack(err).Error(err) + err = errBadStream + } + + s.Init() + WriteEvent(s.ctx, s.w, Event{Error: err}) + s.done = true +} diff --git a/services/horizon/internal/render/sse/stream_handler.go b/services/horizon/internal/render/sse/stream_handler.go new file mode 100644 index 0000000000..81d898088a --- /dev/null +++ b/services/horizon/internal/render/sse/stream_handler.go @@ -0,0 +1,88 @@ +package sse + +import ( + "net/http" + + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/support/errors" + "github.com/stellar/throttled" +) + +type LedgerSourceFactory interface { + Get() ledger.Source +} + +// StreamHandler represents a stream handling action +type StreamHandler struct { + RateLimiter *throttled.HTTPRateLimiter + LedgerSourceFactory LedgerSourceFactory +} + +// GenerateEventsFunc generates a slice of sse.Event which are sent via +// streaming. +type GenerateEventsFunc func() ([]Event, error) + +// ServeStream handles a SSE requests, sending data every time there is a new +// ledger. +func (handler StreamHandler) ServeStream( + w http.ResponseWriter, + r *http.Request, + limit int, + generateEvents GenerateEventsFunc, +) { + ctx := r.Context() + stream := NewStream(ctx, w) + stream.SetLimit(limit) + + ledgerSource := handler.LedgerSourceFactory.Get() + defer ledgerSource.Close() + + currentLedgerSequence := ledgerSource.CurrentLedger() + for { + // Rate limit the request if it's a call to stream since it queries the DB every second. See + // https://github.com/stellar/go/issues/715 for more details. + rateLimiter := handler.RateLimiter + if rateLimiter != nil { + limited, _, err := rateLimiter.RateLimiter.RateLimit(rateLimiter.VaryBy.Key(r), 1) + if err != nil { + stream.Err(errors.Wrap(err, "RateLimiter error")) + return + } + if limited { + stream.Err(ErrRateLimited) + return + } + } + + events, err := generateEvents() + if err != nil { + stream.Err(err) + return + } + for _, event := range events { + if limit <= 0 { + break + } + stream.Send(event) + limit-- + } + + if limit <= 0 { + stream.Done() + return + } + + // Manually send the preamble in case there are no data events in SSE to trigger a stream.Send call. + // This method is called every iteration of the loop, but is protected by a sync.Once variable so it's + // only executed once. + stream.Init() + + select { + case currentLedgerSequence = <-ledgerSource.NextLedger(currentLedgerSequence): + continue + case <-ctx.Done(): + stream.Done() + return + } + } +} diff --git a/services/horizon/internal/render/sse/stream_handler_test.go b/services/horizon/internal/render/sse/stream_handler_test.go new file mode 100644 index 0000000000..5d2ca61a87 --- /dev/null +++ b/services/horizon/internal/render/sse/stream_handler_test.go @@ -0,0 +1,44 @@ +package sse + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stellar/go/services/horizon/internal/ledger" +) + +type testingFactory struct { + ledgerSource ledger.Source +} + +func (f *testingFactory) Get() ledger.Source { + return f.ledgerSource +} + +func TestSendByeByeOnContextDone(t *testing.T) { + ledgerSource := ledger.NewTestingSource(1) + handler := StreamHandler{LedgerSourceFactory: &testingFactory{ledgerSource}} + + r, err := http.NewRequest("GET", "http://localhost", nil) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + ctx, cancel := context.WithCancel(context.Background()) + r = r.WithContext(ctx) + + w := httptest.NewRecorder() + + handler.ServeStream(w, r, 10, func() ([]Event, error) { + cancel() + return []Event{}, nil + }) + + expected := "retry: 1000\nevent: open\ndata: \"hello\"\n\n" + + "retry: 10\nevent: close\ndata: \"byebye\"\n\n" + + if got := w.Body.String(); got != expected { + t.Fatalf("expected '%v' but got '%v'", expected, got) + } +} diff --git a/services/horizon/internal/render/sse/stream_test.go b/services/horizon/internal/render/sse/stream_test.go new file mode 100644 index 0000000000..48cff6bb12 --- /dev/null +++ b/services/horizon/internal/render/sse/stream_test.go @@ -0,0 +1,149 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package sse + +import ( + "context" + "database/sql" + "errors" + "net/http/httptest" + "strconv" + "testing" + + hProblem "github.com/stellar/go/services/horizon/internal/render/problem" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +func (s *Stream) SentCount() int { + return s.eventsSent +} + +// IsDone is safe to call concurrently and is exported. +func (s *Stream) IsDone() bool { + if s.limit == 0 { + return s.done + } + + return s.done || s.eventsSent >= s.limit +} + +type StreamTestSuite struct { + suite.Suite + ctx context.Context + w *httptest.ResponseRecorder + stream *Stream +} + +// Helper method to check that the preamble has been sent and all HTTP response headers are correctly set. +func (suite *StreamTestSuite) checkHeadersAndPreamble() { + if !suite.stream.initialized { + assert.Equal(suite.T(), "application/problem+json; charset=utf-8", suite.w.Header().Get("Content-Type")) + assert.Equal(suite.T(), 500, suite.w.Code) + return + } + + assert.Equal(suite.T(), "text/event-stream; charset=utf-8", suite.w.Header().Get("Content-Type")) + assert.Equal(suite.T(), "no-cache", suite.w.Header().Get("Cache-Control")) + assert.Equal(suite.T(), "keep-alive", suite.w.Header().Get("Connection")) + assert.Equal(suite.T(), "*", suite.w.Header().Get("Access-Control-Allow-Origin")) + assert.Equal(suite.T(), 200, suite.w.Code) + assert.Contains(suite.T(), suite.w.Body.String(), "retry: 1000\nevent: open\ndata: \"hello\"\n\n") +} + +func (suite *StreamTestSuite) SetupTest() { + suite.ctx, _ = test.ContextWithLogBuffer() + suite.w = httptest.NewRecorder() + suite.stream = NewStream(suite.ctx, suite.w) +} + +// Tests that the stream sends the preamble before any events and that events are correctly sent. +func (suite *StreamTestSuite) TestStream_Send() { + e := Event{Data: "test message"} + suite.stream.Send(e) + // Before sending, it should have sent the preamble first and set the headers. + suite.checkHeadersAndPreamble() + // Now check that the data got written + assert.Contains(suite.T(), suite.w.Body.String(), "data: \"test message\"\n\n") + suite.stream.Done() + assert.Equal(suite.T(), 1, suite.stream.SentCount()) +} + +// Tests that Stream can send error events. +func (suite *StreamTestSuite) TestStream_Err() { + err := errors.New("example error") + // If we encounter an error before sending any event, we should just + // return the error without the hello message. + suite.stream.Err(err) + suite.checkHeadersAndPreamble() + + // Reset the stream to test the scenario where an event has been sent. + suite.w = httptest.NewRecorder() + suite.stream = NewStream(suite.ctx, suite.w) + suite.stream.initialized = false + suite.stream.Send(Event{}) + suite.stream.Err(err) + suite.checkHeadersAndPreamble() + assert.Contains(suite.T(), suite.w.Body.String(), "event: error\ndata: Unexpected stream error\n\n") + assert.True(suite.T(), suite.stream.IsDone()) +} + +// Tests that Stream can send handled registered errors +func (suite *StreamTestSuite) TestStream_ErrRegisterError() { + problem.RegisterError(context.DeadlineExceeded, hProblem.Timeout) + defer problem.UnRegisterErrors() + + suite.w = httptest.NewRecorder() + suite.stream = NewStream(suite.ctx, suite.w) + suite.stream.initialized = false + suite.stream.Send(Event{}) + suite.stream.Err(context.DeadlineExceeded) + suite.checkHeadersAndPreamble() + assert.Contains(suite.T(), suite.w.Body.String(), "event: error\ndata: problem: timeout\n\n") + assert.True(suite.T(), suite.stream.IsDone()) +} + +// Tests that Stream can send handled ErrNoRows +func (suite *StreamTestSuite) TestStream_ErrNoRows() { + problem.RegisterError(sql.ErrNoRows, problem.NotFound) + defer problem.UnRegisterErrors() + + suite.w = httptest.NewRecorder() + suite.stream = NewStream(suite.ctx, suite.w) + suite.stream.initialized = false + suite.stream.Send(Event{}) + suite.stream.Err(sql.ErrNoRows) + suite.checkHeadersAndPreamble() + assert.Contains(suite.T(), suite.w.Body.String(), "event: error\ndata: problem: not_found\n\n") + assert.True(suite.T(), suite.stream.IsDone()) +} + +// Tests that SetLimit sets stream.done to true after the limit has been reached. +func (suite *StreamTestSuite) TestStream_SetLimit() { + suite.stream.SetLimit(3) + // Send more than the limit + for i := 0; i < 5; i++ { + message := "test message " + strconv.Itoa(i) + suite.stream.Send(Event{Data: message}) + } + assert.True(suite.T(), suite.stream.IsDone()) +} + +// Tests that SentCount reports the correct number. +func (suite *StreamTestSuite) TestStream_SentCount() { + for i := 0; i < 5; i++ { + message := "test message " + strconv.Itoa(i) + suite.stream.Send(Event{Data: message}) + } + assert.Equal(suite.T(), 5, suite.stream.SentCount()) + suite.stream.Err(errors.New("example error")) + // Make sure that errors don't contribute to the send count + assert.Equal(suite.T(), 5, suite.stream.SentCount()) +} + +// Runs the test suite. +func TestStreamTestSuite(t *testing.T) { + suite.Run(t, new(StreamTestSuite)) +} diff --git a/services/horizon/internal/resourceadapter/account_entry.go b/services/horizon/internal/resourceadapter/account_entry.go new file mode 100644 index 0000000000..e18528be5b --- /dev/null +++ b/services/horizon/internal/resourceadapter/account_entry.go @@ -0,0 +1,124 @@ +package resourceadapter + +import ( + "context" + "fmt" + "strconv" + + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// PopulateAccountEntry fills out the resource's fields +func PopulateAccountEntry( + ctx context.Context, + dest *protocol.Account, + account history.AccountEntry, + accountData []history.Data, + accountSigners []history.AccountSigner, + trustLines []history.TrustLine, + ledger *history.Ledger, +) error { + dest.ID = account.AccountID + dest.PT = account.AccountID + dest.AccountID = account.AccountID + dest.Sequence = strconv.FormatInt(account.SequenceNumber, 10) + dest.SubentryCount = int32(account.NumSubEntries) + dest.InflationDestination = account.InflationDestination + dest.HomeDomain = account.HomeDomain + dest.LastModifiedLedger = account.LastModifiedLedger + if ledger != nil { + dest.LastModifiedTime = &ledger.ClosedAt + } + + dest.Flags.AuthRequired = account.IsAuthRequired() + dest.Flags.AuthRevocable = account.IsAuthRevocable() + dest.Flags.AuthImmutable = account.IsAuthImmutable() + dest.Flags.AuthClawbackEnabled = account.IsAuthClawbackEnabled() + + dest.Thresholds.LowThreshold = account.ThresholdLow + dest.Thresholds.MedThreshold = account.ThresholdMedium + dest.Thresholds.HighThreshold = account.ThresholdHigh + + // populate balances + dest.Balances = make([]protocol.Balance, len(trustLines)+1) + for i, tl := range trustLines { + var err error + + switch tl.AssetType { + case xdr.AssetTypeAssetTypePoolShare: + err = PopulatePoolShareBalance(&dest.Balances[i], tl) + default: + err = PopulateAssetBalance(&dest.Balances[i], tl) + } + + if err != nil { + return errors.Wrap(err, "populating balance") + } + } + + // add native balance + err := PopulateNativeBalance( + &dest.Balances[len(dest.Balances)-1], + xdr.Int64(account.Balance), + xdr.Int64(account.BuyingLiabilities), + xdr.Int64(account.SellingLiabilities), + ) + if err != nil { + return errors.Wrap(err, "populating native balance") + } + + // populate data + dest.Data = make(map[string]string) + for _, d := range accountData { + dest.Data[d.Name] = d.Value.Base64() + } + + masterKeyIncluded := false + + // populate signers + dest.Signers = make([]protocol.Signer, len(accountSigners)) + for i, signer := range accountSigners { + dest.Signers[i].Weight = signer.Weight + dest.Signers[i].Key = signer.Signer + dest.Signers[i].Type = protocol.MustKeyTypeFromAddress(signer.Signer) + if signer.Sponsor.Valid { + dest.Signers[i].Sponsor = signer.Sponsor.String + } + + if account.AccountID == signer.Signer { + masterKeyIncluded = true + } + } + + if !masterKeyIncluded { + dest.Signers = append(dest.Signers, protocol.Signer{ + Weight: int32(account.MasterWeight), + Key: account.AccountID, + Type: protocol.MustKeyTypeFromAddress(account.AccountID), + }) + } + + dest.NumSponsoring = account.NumSponsoring + dest.NumSponsored = account.NumSponsored + if account.Sponsor.Valid { + dest.Sponsor = account.Sponsor.String + } + + lb := hal.LinkBuilder{horizonContext.BaseURL(ctx)} + self := fmt.Sprintf("/accounts/%s", account.AccountID) + dest.Links.Self = lb.Link(self) + dest.Links.Transactions = lb.PagedLink(self, "transactions") + dest.Links.Operations = lb.PagedLink(self, "operations") + dest.Links.Payments = lb.PagedLink(self, "payments") + dest.Links.Effects = lb.PagedLink(self, "effects") + dest.Links.Offers = lb.PagedLink(self, "offers") + dest.Links.Trades = lb.PagedLink(self, "trades") + dest.Links.Data = lb.Link(self, "data/{key}") + dest.Links.Data.PopulateTemplated() + return nil +} diff --git a/services/horizon/internal/resourceadapter/account_entry_test.go b/services/horizon/internal/resourceadapter/account_entry_test.go new file mode 100644 index 0000000000..f74884edc4 --- /dev/null +++ b/services/horizon/internal/resourceadapter/account_entry_test.go @@ -0,0 +1,297 @@ +package resourceadapter + +import ( + "encoding/base64" + "encoding/json" + "strconv" + "testing" + "time" + + "github.com/guregu/null" + + "github.com/stellar/go/amount" + . "github.com/stellar/go/protocols/horizon" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/assets" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/test" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +var ( + accountID = xdr.MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB") + + data = []history.Data{ + { + AccountID: accountID.Address(), + Name: "test", + Value: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + LastModifiedLedger: 1, + Sponsor: null.StringFrom("GBUH7T6U36DAVEKECMKN5YEBQYZVRBPNSZAAKBCO6P5HBMDFSQMQL4Z4"), + }, + { + AccountID: accountID.Address(), + Name: "test2", + Value: []byte{10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, + LastModifiedLedger: 2, + }, + } + + inflationDest = xdr.MustAddress("GBUH7T6U36DAVEKECMKN5YEBQYZVRBPNSZAAKBCO6P5HBMDFSQMQL4Z4") + + account = history.AccountEntry{ + AccountID: accountID.Address(), + Balance: 20000, + SequenceNumber: 223456789, + NumSubEntries: 10, + InflationDestination: inflationDest.Address(), + Flags: 0b1001, // required and clawback + HomeDomain: "stellar.org", + ThresholdLow: 1, + ThresholdMedium: 2, + ThresholdHigh: 3, + SellingLiabilities: 4, + BuyingLiabilities: 3, + LastModifiedLedger: 1000, + NumSponsored: 12, + NumSponsoring: 34, + Sponsor: null.StringFrom("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"), + } + + ledgerWithCloseTime = &history.Ledger{ + ClosedAt: func() time.Time { + t, err := time.Parse(time.RFC3339, "2019-03-05T13:23:50Z") + if err != nil { + panic(err) + } + return t + }(), + } + + trustLineIssuer = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + + trustLines = []history.TrustLine{ + { + AccountID: accountID.Address(), + AssetCode: "EUR", + AssetIssuer: trustLineIssuer.Address(), + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + Balance: 20000, + Limit: 223456789, + Flags: 1, + SellingLiabilities: 3, + BuyingLiabilities: 4, + LastModifiedLedger: 900, + }, + { + AccountID: accountID.Address(), + AssetCode: "USDDDDDDDDDD", + AssetIssuer: trustLineIssuer.Address(), + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + Balance: 10000, + Limit: 123456789, + Flags: 0, + SellingLiabilities: 2, + BuyingLiabilities: 1, + LastModifiedLedger: 900, + }, + { + AccountID: accountID.Address(), + AssetType: xdr.AssetTypeAssetTypePoolShare, + Balance: 10000, + Limit: 123456789, + Flags: 0, + LastModifiedLedger: 900, + }, + } + + signers = []history.AccountSigner{ + { + Account: accountID.Address(), + Signer: accountID.Address(), + Weight: int32(3), + }, + + { + Account: accountID.Address(), + Signer: "GCMQBJWOLTCSSMWNVDJAXL6E42SADH563IL5MN5B6RBBP4XP7TBRLJKE", + Weight: int32(1), + Sponsor: null.StringFrom("GBXSGN5GX4PZOSBHB4JJF67CEGSGT56IN2N7LF3VGJ7WQ56BYWRVNNDX"), + }, + { + Account: accountID.Address(), + Signer: "GBXSGN5GX4PZOSBHB4JJF67CEGSGT56IN2N7LF3VGJ7WQ56BYWRVNNDX", + Weight: int32(2), + }, + { + Account: accountID.Address(), + Signer: "GBPXUGDRAOU5QUNUAXX6LYPBIOXYG45GLTKIRWKOCQ6HXP5QE5OCPFBY", + Weight: int32(3), + }, + } +) + +func TestPopulateAccountEntry(t *testing.T) { + tt := assert.New(t) + ctx, _ := test.ContextWithLogBuffer() + hAccount := Account{} + err := PopulateAccountEntry(ctx, &hAccount, account, data, signers, trustLines, ledgerWithCloseTime) + tt.NoError(err) + + tt.Equal(account.AccountID, hAccount.ID) + tt.Equal(account.AccountID, hAccount.AccountID) + tt.Equal(account.AccountID, hAccount.PT) + tt.Equal(strconv.FormatInt(account.SequenceNumber, 10), hAccount.Sequence) + tt.Equal(int32(account.NumSubEntries), hAccount.SubentryCount) + tt.Equal(account.InflationDestination, hAccount.InflationDestination) + tt.Equal(account.HomeDomain, hAccount.HomeDomain) + tt.Equal(account.LastModifiedLedger, hAccount.LastModifiedLedger) + tt.NotNil(hAccount.LastModifiedTime) + tt.Equal(ledgerWithCloseTime.ClosedAt, *hAccount.LastModifiedTime) + tt.Equal(account.NumSponsoring, hAccount.NumSponsoring) + tt.Equal(account.NumSponsored, hAccount.NumSponsored) + tt.Equal(account.Sponsor.String, hAccount.Sponsor) + + wantAccountThresholds := AccountThresholds{ + LowThreshold: account.ThresholdLow, + MedThreshold: account.ThresholdMedium, + HighThreshold: account.ThresholdHigh, + } + tt.Equal(wantAccountThresholds, hAccount.Thresholds) + + wantFlags := AccountFlags{ + AuthRequired: account.IsAuthRequired(), + AuthRevocable: account.IsAuthRevocable(), + AuthImmutable: account.IsAuthImmutable(), + AuthClawbackEnabled: account.IsAuthClawbackEnabled(), + } + + tt.Equal(wantFlags, hAccount.Flags) + + for _, d := range data { + want, e := base64.StdEncoding.DecodeString(hAccount.Data[d.Name]) + tt.NoError(e) + tt.Equal(d.Value, history.AccountDataValue(want)) + } + + tt.Len(hAccount.Balances, 4) + + for i, t := range trustLines { + ht := hAccount.Balances[i] + tt.Equal(t.AssetIssuer, ht.Issuer) + tt.Equal(t.AssetCode, ht.Code) + var wantType string + if t.AssetType == xdr.AssetTypeAssetTypePoolShare { + wantType = "liquidity_pool_shares" + } else { + wantType, err = assets.String(t.AssetType) + tt.NoError(err) + } + tt.Equal(wantType, ht.Type) + + tt.Equal(amount.StringFromInt64(t.Balance), ht.Balance) + + wantBuy := "" + if t.BuyingLiabilities != 0 { + wantBuy = amount.StringFromInt64(t.BuyingLiabilities) + } + tt.Equal(wantBuy, ht.BuyingLiabilities) + + wantSell := "" + if t.SellingLiabilities != 0 { + wantSell = amount.StringFromInt64(t.SellingLiabilities) + } + tt.Equal(wantSell, ht.SellingLiabilities) + + tt.Equal(amount.StringFromInt64(t.Limit), ht.Limit) + tt.Equal(t.LastModifiedLedger, ht.LastModifiedLedger) + tt.Equal(t.IsAuthorized(), *ht.IsAuthorized) + } + + native := hAccount.Balances[len(hAccount.Balances)-1] + + tt.Equal("native", native.Type) + tt.Equal("0.0020000", native.Balance) + tt.Equal("0.0000003", native.BuyingLiabilities) + tt.Equal("0.0000004", native.SellingLiabilities) + tt.Equal("", native.Limit) + tt.Equal("", native.Issuer) + tt.Equal("", native.Code) + + tt.Len(hAccount.Signers, 4) + for i, s := range signers { + hs := hAccount.Signers[i] + tt.Equal(s.Signer, hs.Key) + tt.Equal(s.Weight, hs.Weight) + tt.Equal(protocol.MustKeyTypeFromAddress(s.Signer), hs.Type) + var expectedSponsor string + if s.Sponsor.Valid { + expectedSponsor = s.Sponsor.String + } + tt.Equal(expectedSponsor, hs.Sponsor) + } + + links, err := json.Marshal(hAccount.Links) + want := ` + { + "data": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/data/{key}", + "templated": true + }, + "effects": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/effects{?cursor,limit,order}", + "templated": true + }, + "offers": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/offers{?cursor,limit,order}", + "templated": true + }, + "operations": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/operations{?cursor,limit,order}", + "templated": true + }, + "payments": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/payments{?cursor,limit,order}", + "templated": true + }, + "self": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB" + }, + "trades": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/trades{?cursor,limit,order}", + "templated": true + }, + "transactions": { + "href": "/accounts/GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB/transactions{?cursor,limit,order}", + "templated": true + } + } + ` + tt.JSONEq(want, string(links)) +} + +func TestPopulateAccountEntryMasterMissingInSigners(t *testing.T) { + tt := assert.New(t) + ctx, _ := test.ContextWithLogBuffer() + hAccount := Account{} + + account.MasterWeight = 0 + signers = []history.AccountSigner{ + { + Account: accountID.Address(), + Signer: "GCMQBJWOLTCSSMWNVDJAXL6E42SADH563IL5MN5B6RBBP4XP7TBRLJKE", + Weight: int32(3), + }, + } + err := PopulateAccountEntry(ctx, &hAccount, account, data, signers, trustLines, nil) + tt.NoError(err) + + tt.Len(hAccount.Signers, 2) + + signer := hAccount.Signers[1] + tt.Equal(account.AccountID, signer.Key) + tt.Equal(int32(account.MasterWeight), signer.Weight) + tt.Equal(protocol.MustKeyTypeFromAddress(account.AccountID), signer.Type) + tt.Nil(hAccount.LastModifiedTime) +} diff --git a/services/horizon/internal/resourceadapter/asset.go b/services/horizon/internal/resourceadapter/asset.go new file mode 100644 index 0000000000..8b3b325ad5 --- /dev/null +++ b/services/horizon/internal/resourceadapter/asset.go @@ -0,0 +1,12 @@ +package resourceadapter + +import ( + "context" + + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/xdr" +) + +func PopulateAsset(ctx context.Context, dest *protocol.Asset, asset xdr.Asset) error { + return asset.Extract(&dest.Type, &dest.Code, &dest.Issuer) +} diff --git a/services/horizon/internal/resourceadapter/asset_stat.go b/services/horizon/internal/resourceadapter/asset_stat.go new file mode 100644 index 0000000000..9ae61fb4ac --- /dev/null +++ b/services/horizon/internal/resourceadapter/asset_stat.go @@ -0,0 +1,88 @@ +package resourceadapter + +import ( + "context" + "strings" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// PopulateAssetStat populates an AssetStat using asset stats and account entries +// generated from the ingestion system. +func PopulateAssetStat( + ctx context.Context, + res *protocol.AssetStat, + row history.ExpAssetStat, + issuer history.AccountEntry, +) (err error) { + res.Asset.Type = xdr.AssetTypeToString[row.AssetType] + res.Asset.Code = row.AssetCode + res.Asset.Issuer = row.AssetIssuer + res.Accounts = protocol.AssetStatAccounts{ + Authorized: row.Accounts.Authorized, + AuthorizedToMaintainLiabilities: row.Accounts.AuthorizedToMaintainLiabilities, + Unauthorized: row.Accounts.Unauthorized, + } + res.NumClaimableBalances = row.Accounts.ClaimableBalances + res.NumLiquidityPools = row.Accounts.LiquidityPools + res.NumAccounts = row.NumAccounts + err = populateAssetStatBalances(res, row.Balances) + if err != nil { + return err + } + flags := int8(issuer.Flags) + res.Flags = protocol.AccountFlags{ + (flags & int8(xdr.AccountFlagsAuthRequiredFlag)) != 0, + (flags & int8(xdr.AccountFlagsAuthRevocableFlag)) != 0, + (flags & int8(xdr.AccountFlagsAuthImmutableFlag)) != 0, + (flags & int8(xdr.AccountFlagsAuthClawbackEnabledFlag)) != 0, + } + res.PT = row.PagingToken() + + trimmed := strings.TrimSpace(issuer.HomeDomain) + var toml string + if trimmed != "" { + toml = "https://" + issuer.HomeDomain + "/.well-known/stellar.toml" + } + res.Links.Toml = hal.NewLink(toml) + return +} + +func populateAssetStatBalances(res *protocol.AssetStat, row history.ExpAssetStatBalances) (err error) { + res.Amount, err = amount.IntStringToAmount(row.Authorized) + if err != nil { + return errors.Wrap(err, "Invalid amount in PopulateAssetStat") + } + + res.Balances.Authorized, err = amount.IntStringToAmount(row.Authorized) + if err != nil { + return errors.Wrapf(err, "Invalid amount in PopulateAssetStatBalances: %q", row.Authorized) + } + + res.Balances.AuthorizedToMaintainLiabilities, err = amount.IntStringToAmount(row.AuthorizedToMaintainLiabilities) + if err != nil { + return errors.Wrapf(err, "Invalid amount in PopulateAssetStatBalances: %q", row.AuthorizedToMaintainLiabilities) + } + + res.Balances.Unauthorized, err = amount.IntStringToAmount(row.Unauthorized) + if err != nil { + return errors.Wrapf(err, "Invalid amount in PopulateAssetStatBalances: %q", row.Unauthorized) + } + + res.ClaimableBalancesAmount, err = amount.IntStringToAmount(row.ClaimableBalances) + if err != nil { + return errors.Wrapf(err, "Invalid amount in PopulateAssetStatBalances: %q", row.ClaimableBalances) + } + + res.LiquidityPoolsAmount, err = amount.IntStringToAmount(row.LiquidityPools) + if err != nil { + return errors.Wrapf(err, "Invalid amount in PopulateAssetStatBalances: %q", row.LiquidityPools) + } + + return nil +} diff --git a/services/horizon/internal/resourceadapter/asset_stat_test.go b/services/horizon/internal/resourceadapter/asset_stat_test.go new file mode 100644 index 0000000000..cd598f4b01 --- /dev/null +++ b/services/horizon/internal/resourceadapter/asset_stat_test.go @@ -0,0 +1,87 @@ +package resourceadapter + +import ( + "context" + "testing" + + "github.com/stellar/go/protocols/horizon" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestPopulateExpAssetStat(t *testing.T) { + row := history.ExpAssetStat{ + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum4, + AssetCode: "XIM", + AssetIssuer: "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM", + Accounts: history.ExpAssetStatAccounts{ + Authorized: 429, + AuthorizedToMaintainLiabilities: 214, + Unauthorized: 107, + ClaimableBalances: 12, + }, + Balances: history.ExpAssetStatBalances{ + Authorized: "100000000000000000000", + AuthorizedToMaintainLiabilities: "50000000000000000000", + Unauthorized: "2500000000000000000", + ClaimableBalances: "1200000000000000000", + LiquidityPools: "7700000000000000000", + }, + Amount: "100000000000000000000", // 10T + NumAccounts: 429, + } + issuer := history.AccountEntry{ + AccountID: "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM", + Flags: 0, + HomeDomain: "xim.com", + } + + var res protocol.AssetStat + err := PopulateAssetStat(context.Background(), &res, row, issuer) + assert.NoError(t, err) + + assert.Equal(t, "credit_alphanum4", res.Type) + assert.Equal(t, "XIM", res.Code) + assert.Equal(t, "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM", res.Issuer) + assert.Equal(t, int32(429), res.Accounts.Authorized) + assert.Equal(t, int32(214), res.Accounts.AuthorizedToMaintainLiabilities) + assert.Equal(t, int32(107), res.Accounts.Unauthorized) + assert.Equal(t, int32(12), res.NumClaimableBalances) + assert.Equal(t, "10000000000000.0000000", res.Balances.Authorized) + assert.Equal(t, "5000000000000.0000000", res.Balances.AuthorizedToMaintainLiabilities) + assert.Equal(t, "250000000000.0000000", res.Balances.Unauthorized) + assert.Equal(t, "120000000000.0000000", res.ClaimableBalancesAmount) + assert.Equal(t, "770000000000.0000000", res.LiquidityPoolsAmount) + assert.Equal(t, "10000000000000.0000000", res.Amount) + assert.Equal(t, int32(429), res.NumAccounts) + assert.Equal(t, horizon.AccountFlags{}, res.Flags) + assert.Equal(t, "https://xim.com/.well-known/stellar.toml", res.Links.Toml.Href) + assert.Equal(t, row.PagingToken(), res.PagingToken()) + + issuer.HomeDomain = "" + issuer.Flags = uint32(xdr.AccountFlagsAuthRequiredFlag) | + uint32(xdr.AccountFlagsAuthImmutableFlag) | + uint32(xdr.AccountFlagsAuthClawbackEnabledFlag) + + err = PopulateAssetStat(context.Background(), &res, row, issuer) + assert.NoError(t, err) + + assert.Equal(t, "credit_alphanum4", res.Type) + assert.Equal(t, "XIM", res.Code) + assert.Equal(t, "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM", res.Issuer) + assert.Equal(t, "10000000000000.0000000", res.Amount) + assert.Equal(t, int32(429), res.NumAccounts) + assert.Equal( + t, + horizon.AccountFlags{ + AuthRequired: true, + AuthImmutable: true, + AuthClawbackEnabled: true, + }, + res.Flags, + ) + assert.Equal(t, "", res.Links.Toml.Href) + assert.Equal(t, row.PagingToken(), res.PagingToken()) +} diff --git a/services/horizon/internal/resourceadapter/balance.go b/services/horizon/internal/resourceadapter/balance.go new file mode 100644 index 0000000000..7065d5476b --- /dev/null +++ b/services/horizon/internal/resourceadapter/balance.go @@ -0,0 +1,91 @@ +package resourceadapter + +import ( + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/assets" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func PopulatePoolShareBalance(dest *protocol.Balance, row history.TrustLine) (err error) { + if row.AssetType == xdr.AssetTypeAssetTypePoolShare { + dest.Type = "liquidity_pool_shares" + } else { + dest.Type, err = assets.String(row.AssetType) + if err != nil { + return err + } + + if dest.Type != "liquidity_pool_shares" { + return PopulateAssetBalance(dest, row) + } + } + + dest.LiquidityPoolId = row.LiquidityPoolID + dest.Balance = amount.StringFromInt64(row.Balance) + dest.Limit = amount.StringFromInt64(row.Limit) + dest.LastModifiedLedger = row.LastModifiedLedger + fillAuthorizationFlags(dest, row) + + return +} + +func PopulateAssetBalance(dest *protocol.Balance, row history.TrustLine) (err error) { + dest.Type, err = assets.String(row.AssetType) + if err != nil { + return err + } + + dest.Balance = amount.StringFromInt64(row.Balance) + dest.BuyingLiabilities = amount.StringFromInt64(row.BuyingLiabilities) + dest.SellingLiabilities = amount.StringFromInt64(row.SellingLiabilities) + dest.Limit = amount.StringFromInt64(row.Limit) + dest.Issuer = row.AssetIssuer + dest.Code = row.AssetCode + dest.LastModifiedLedger = row.LastModifiedLedger + fillAuthorizationFlags(dest, row) + if row.Sponsor.Valid { + dest.Sponsor = row.Sponsor.String + } + + return +} + +func PopulateNativeBalance(dest *protocol.Balance, stroops, buyingLiabilities, sellingLiabilities xdr.Int64) (err error) { + dest.Type, err = assets.String(xdr.AssetTypeAssetTypeNative) + if err != nil { + return errors.Wrap(err, "getting the string representation from the provided xdr asset type") + } + + dest.Balance = amount.String(stroops) + dest.BuyingLiabilities = amount.String(buyingLiabilities) + dest.SellingLiabilities = amount.String(sellingLiabilities) + dest.LastModifiedLedger = 0 + dest.Limit = "" + dest.Issuer = "" + dest.Code = "" + dest.IsAuthorized = nil + dest.IsAuthorizedToMaintainLiabilities = nil + dest.IsClawbackEnabled = nil + return +} + +func fillAuthorizationFlags(dest *protocol.Balance, row history.TrustLine) { + isAuthorized := row.IsAuthorized() + dest.IsAuthorized = &isAuthorized + + // After CAP-18, isAuth => isAuthToMaintain, so the following code does this + // in a backwards compatible manner. + dest.IsAuthorizedToMaintainLiabilities = &isAuthorized + isAuthorizedToMaintainLiabilities := row.IsAuthorizedToMaintainLiabilities() + if isAuthorizedToMaintainLiabilities { + dest.IsAuthorizedToMaintainLiabilities = &isAuthorizedToMaintainLiabilities + } + + isClawbackEnabled := row.IsClawbackEnabled() + if isClawbackEnabled { + dest.IsClawbackEnabled = &isClawbackEnabled + } +} diff --git a/services/horizon/internal/resourceadapter/balance_test.go b/services/horizon/internal/resourceadapter/balance_test.go new file mode 100644 index 0000000000..0e9f743627 --- /dev/null +++ b/services/horizon/internal/resourceadapter/balance_test.go @@ -0,0 +1,108 @@ +package resourceadapter + +import ( + "testing" + + . "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestPopulateBalance(t *testing.T) { + testAssetCode1 := "TEST_ASSET_1" + testAssetCode2 := "TEST_ASSET_2" + authorizedTrustline := history.TrustLine{ + AccountID: "testID", + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "", + AssetCode: testAssetCode1, + Limit: 100, + Balance: 10, + Flags: 1, + } + authorizedToMaintainLiabilitiesTrustline := history.TrustLine{ + AccountID: "testID", + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "", + AssetCode: testAssetCode1, + Limit: 100, + Balance: 10, + Flags: 2, + } + unauthorizedTrustline := history.TrustLine{ + AccountID: "testID", + AssetType: xdr.AssetTypeAssetTypeCreditAlphanum12, + AssetIssuer: "", + AssetCode: testAssetCode2, + Limit: 100, + Balance: 10, + Flags: 0, + } + poolshareTrustline := history.TrustLine{ + AccountID: "testID", + AssetType: xdr.AssetTypeAssetTypePoolShare, + Limit: 100, + Balance: 10, + Flags: 0, + } + + want := Balance{} + err := PopulateAssetBalance(&want, authorizedTrustline) + assert.NoError(t, err) + assert.Equal(t, "credit_alphanum12", want.Type) + assert.Equal(t, "0.0000010", want.Balance) + assert.Equal(t, "0.0000100", want.Limit) + assert.Equal(t, "", want.Issuer) + assert.Equal(t, testAssetCode1, want.Code) + assert.Equal(t, true, *want.IsAuthorized) + assert.Equal(t, true, *want.IsAuthorizedToMaintainLiabilities) + + want = Balance{} + err = PopulateAssetBalance(&want, authorizedToMaintainLiabilitiesTrustline) + assert.NoError(t, err) + assert.Equal(t, "credit_alphanum12", want.Type) + assert.Equal(t, "0.0000010", want.Balance) + assert.Equal(t, "0.0000100", want.Limit) + assert.Equal(t, "", want.Issuer) + assert.Equal(t, testAssetCode1, want.Code) + assert.Equal(t, false, *want.IsAuthorized) + assert.Equal(t, true, *want.IsAuthorizedToMaintainLiabilities) + + want = Balance{} + err = PopulateAssetBalance(&want, unauthorizedTrustline) + assert.NoError(t, err) + assert.Equal(t, "credit_alphanum12", want.Type) + assert.Equal(t, "0.0000010", want.Balance) + assert.Equal(t, "0.0000100", want.Limit) + assert.Equal(t, "", want.Issuer) + assert.Equal(t, testAssetCode2, want.Code) + assert.Equal(t, false, *want.IsAuthorized) + assert.Equal(t, false, *want.IsAuthorizedToMaintainLiabilities) + + want = Balance{} + err = PopulatePoolShareBalance(&want, poolshareTrustline) + assert.NoError(t, err) + assert.Equal(t, "liquidity_pool_shares", want.Type) + assert.Equal(t, "0.0000010", want.Balance) + assert.Equal(t, "0.0000100", want.Limit) + assert.Equal(t, "", want.Issuer) + assert.Equal(t, "", want.Code) + assert.Equal(t, false, *want.IsAuthorized) + assert.Equal(t, false, *want.IsAuthorizedToMaintainLiabilities) +} + +func TestPopulateNativeBalance(t *testing.T) { + want := Balance{} + err := PopulateNativeBalance(&want, 10, 10, 10) + assert.NoError(t, err) + assert.Equal(t, "native", want.Type) + assert.Equal(t, "0.0000010", want.Balance) + assert.Equal(t, "0.0000010", want.BuyingLiabilities) + assert.Equal(t, "0.0000010", want.SellingLiabilities) + assert.Equal(t, "", want.Limit) + assert.Equal(t, "", want.Issuer) + assert.Equal(t, "", want.Code) + assert.Nil(t, want.IsAuthorized) + assert.Nil(t, want.IsAuthorizedToMaintainLiabilities) +} diff --git a/services/horizon/internal/resourceadapter/claimable_balances.go b/services/horizon/internal/resourceadapter/claimable_balances.go new file mode 100644 index 0000000000..356e485b9b --- /dev/null +++ b/services/horizon/internal/resourceadapter/claimable_balances.go @@ -0,0 +1,50 @@ +package resourceadapter + +import ( + "context" + "fmt" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// PopulateClaimableBalance fills out the resource's fields +func PopulateClaimableBalance( + ctx context.Context, + dest *protocol.ClaimableBalance, + claimableBalance history.ClaimableBalance, + ledger *history.Ledger, +) error { + dest.BalanceID = claimableBalance.BalanceID + dest.Asset = claimableBalance.Asset.StringCanonical() + dest.Amount = amount.StringFromInt64(int64(claimableBalance.Amount)) + if claimableBalance.Sponsor.Valid { + dest.Sponsor = claimableBalance.Sponsor.String + } + dest.LastModifiedLedger = claimableBalance.LastModifiedLedger + dest.Claimants = make([]protocol.Claimant, len(claimableBalance.Claimants)) + for i, c := range claimableBalance.Claimants { + dest.Claimants[i].Destination = c.Destination + dest.Claimants[i].Predicate = c.Predicate + } + + if ledger != nil { + dest.LastModifiedTime = &ledger.ClosedAt + } + + if xdr.ClaimableBalanceFlags(claimableBalance.Flags).IsClawbackEnabled() { + dest.Flags.ClawbackEnabled = xdr.ClaimableBalanceFlags(claimableBalance.Flags).IsClawbackEnabled() + } + + lb := hal.LinkBuilder{Base: horizonContext.BaseURL(ctx)} + self := fmt.Sprintf("/claimable_balances/%s", dest.BalanceID) + dest.Links.Self = lb.Link(self) + dest.PT = fmt.Sprintf("%d-%s", claimableBalance.LastModifiedLedger, dest.BalanceID) + dest.Links.Transactions = lb.PagedLink(self, "transactions") + dest.Links.Operations = lb.PagedLink(self, "operations") + return nil +} diff --git a/services/horizon/internal/resourceadapter/claimable_balances_test.go b/services/horizon/internal/resourceadapter/claimable_balances_test.go new file mode 100644 index 0000000000..c7525b828b --- /dev/null +++ b/services/horizon/internal/resourceadapter/claimable_balances_test.go @@ -0,0 +1,103 @@ +package resourceadapter + +import ( + "encoding/json" + "testing" + + "github.com/guregu/null" + . "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/test" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestPopulateClaimableBalance(t *testing.T) { + tt := assert.New(t) + ctx, _ := test.ContextWithLogBuffer() + resource := ClaimableBalance{} + + balanceID := xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{1, 2, 3}, + } + unconditional := &xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + } + relBefore := xdr.Int64(12) + absBefore := xdr.Int64(1598440539) + + id, err := xdr.MarshalHex(&balanceID) + tt.NoError(err) + claimableBalance := history.ClaimableBalance{ + BalanceID: id, + Asset: xdr.MustNewNativeAsset(), + Amount: 100000000, + Sponsor: null.StringFrom("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + Claimants: history.Claimants{ + { + Destination: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateAnd, + AndPredicates: &[]xdr.ClaimPredicate{ + { + Type: xdr.ClaimPredicateTypeClaimPredicateOr, + OrPredicates: &[]xdr.ClaimPredicate{ + { + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime, + RelBefore: &relBefore, + }, + { + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime, + AbsBefore: &absBefore, + }, + }, + }, + { + Type: xdr.ClaimPredicateTypeClaimPredicateNot, + NotPredicate: &unconditional, + }, + }, + }, + }, + }, + LastModifiedLedger: 123, + Flags: uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + } + + err = PopulateClaimableBalance(ctx, &resource, claimableBalance, nil) + tt.NoError(err) + + tt.Equal("000000000102030000000000000000000000000000000000000000000000000000000000", resource.BalanceID) + tt.Equal(claimableBalance.Asset.StringCanonical(), resource.Asset) + tt.Equal("10.0000000", resource.Amount) + tt.Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", resource.Sponsor) + tt.Equal(uint32(123), resource.LastModifiedLedger) + tt.Len(resource.Claimants, 1) + tt.Equal("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", resource.Claimants[0].Destination) + tt.Equal("123-000000000102030000000000000000000000000000000000000000000000000000000000", resource.PagingToken()) + tt.True(resource.Flags.ClawbackEnabled) + + links, err := json.Marshal(resource.Links) + tt.NoError(err) + want := ` + { + "self": { + "href": "/claimable_balances/000000000102030000000000000000000000000000000000000000000000000000000000" + }, + "operations": { + "href": "/claimable_balances/000000000102030000000000000000000000000000000000000000000000000000000000/operations{?cursor,limit,order}", + "templated": true + }, + "transactions": { + "href": "/claimable_balances/000000000102030000000000000000000000000000000000000000000000000000000000/transactions{?cursor,limit,order}", + "templated": true + } + } + ` + tt.JSONEq(want, string(links)) + + predicate, err := json.Marshal(resource.Claimants[0].Predicate) + tt.NoError(err) + tt.JSONEq(`{"and":[{"or":[{"rel_before":"12"},{"abs_before":"2020-08-26T11:15:39Z","abs_before_epoch":"1598440539"}]},{"not":{"unconditional":true}}]}`, string(predicate)) +} diff --git a/services/horizon/internal/resourceadapter/effects.go b/services/horizon/internal/resourceadapter/effects.go new file mode 100644 index 0000000000..1f197b123b --- /dev/null +++ b/services/horizon/internal/resourceadapter/effects.go @@ -0,0 +1,331 @@ +package resourceadapter + +import ( + "context" + + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/protocols/horizon/effects" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +var EffectTypeNames = map[history.EffectType]string{ + history.EffectAccountCreated: "account_created", + history.EffectAccountRemoved: "account_removed", + history.EffectAccountCredited: "account_credited", + history.EffectAccountDebited: "account_debited", + history.EffectAccountThresholdsUpdated: "account_thresholds_updated", + history.EffectAccountHomeDomainUpdated: "account_home_domain_updated", + history.EffectAccountFlagsUpdated: "account_flags_updated", + history.EffectAccountInflationDestinationUpdated: "account_inflation_destination_updated", + history.EffectSignerCreated: "signer_created", + history.EffectSignerRemoved: "signer_removed", + history.EffectSignerUpdated: "signer_updated", + history.EffectTrustlineCreated: "trustline_created", + history.EffectTrustlineRemoved: "trustline_removed", + history.EffectTrustlineUpdated: "trustline_updated", + history.EffectTrustlineAuthorized: "trustline_authorized", + history.EffectTrustlineAuthorizedToMaintainLiabilities: "trustline_authorized_to_maintain_liabilities", + history.EffectTrustlineDeauthorized: "trustline_deauthorized", + history.EffectTrustlineFlagsUpdated: "trustline_flags_updated", + // unused + // history.EffectOfferCreated: "offer_created", + // history.EffectOfferRemoved: "offer_removed", + // history.EffectOfferUpdated: "offer_updated", + history.EffectTrade: "trade", + history.EffectDataCreated: "data_created", + history.EffectDataRemoved: "data_removed", + history.EffectDataUpdated: "data_updated", + history.EffectSequenceBumped: "sequence_bumped", + history.EffectClaimableBalanceCreated: "claimable_balance_created", + history.EffectClaimableBalanceClaimantCreated: "claimable_balance_claimant_created", + history.EffectClaimableBalanceClaimed: "claimable_balance_claimed", + history.EffectAccountSponsorshipCreated: "account_sponsorship_created", + history.EffectAccountSponsorshipUpdated: "account_sponsorship_updated", + history.EffectAccountSponsorshipRemoved: "account_sponsorship_removed", + history.EffectTrustlineSponsorshipCreated: "trustline_sponsorship_created", + history.EffectTrustlineSponsorshipUpdated: "trustline_sponsorship_updated", + history.EffectTrustlineSponsorshipRemoved: "trustline_sponsorship_removed", + history.EffectDataSponsorshipCreated: "data_sponsorship_created", + history.EffectDataSponsorshipUpdated: "data_sponsorship_updated", + history.EffectDataSponsorshipRemoved: "data_sponsorship_removed", + history.EffectClaimableBalanceSponsorshipCreated: "claimable_balance_sponsorship_created", + history.EffectClaimableBalanceSponsorshipUpdated: "claimable_balance_sponsorship_updated", + history.EffectClaimableBalanceSponsorshipRemoved: "claimable_balance_sponsorship_removed", + history.EffectSignerSponsorshipCreated: "signer_sponsorship_created", + history.EffectSignerSponsorshipUpdated: "signer_sponsorship_updated", + history.EffectSignerSponsorshipRemoved: "signer_sponsorship_removed", + history.EffectClaimableBalanceClawedBack: "claimable_balance_clawed_back", + history.EffectLiquidityPoolDeposited: "liquidity_pool_deposited", + history.EffectLiquidityPoolWithdrew: "liquidity_pool_withdrew", + history.EffectLiquidityPoolTrade: "liquidity_pool_trade", + history.EffectLiquidityPoolCreated: "liquidity_pool_created", + history.EffectLiquidityPoolRemoved: "liquidity_pool_removed", + history.EffectLiquidityPoolRevoked: "liquidity_pool_revoked", +} + +// NewEffect creates a new effect resource from the provided database representation +// of the effect. +func NewEffect( + ctx context.Context, + row history.Effect, + ledger history.Ledger, +) (result hal.Pageable, err error) { + + basev := effects.Base{} + PopulateBaseEffect(ctx, &basev, row, ledger) + + switch row.Type { + case history.EffectAccountCreated: + e := effects.AccountCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountCredited: + e := effects.AccountCredited{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountDebited: + e := effects.AccountDebited{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountThresholdsUpdated: + e := effects.AccountThresholdsUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountHomeDomainUpdated: + e := effects.AccountHomeDomainUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountFlagsUpdated: + e := effects.AccountFlagsUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerCreated: + e := effects.SignerCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerUpdated: + e := effects.SignerUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerRemoved: + e := effects.SignerRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineCreated: + e := effects.TrustlineCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineUpdated: + e := effects.TrustlineUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineRemoved: + e := effects.TrustlineRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineAuthorized: + e := effects.TrustlineAuthorized{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineAuthorizedToMaintainLiabilities: + e := effects.TrustlineAuthorizedToMaintainLiabilities{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineDeauthorized: + e := effects.TrustlineDeauthorized{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineFlagsUpdated: + e := effects.TrustlineFlagsUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrade: + e := effects.Trade{Base: basev} + tradeDetails := history.TradeEffectDetails{} + err = row.UnmarshalDetails(&tradeDetails) + if err == nil { + e.Seller = tradeDetails.Seller + e.SellerMuxed = tradeDetails.SellerMuxed + e.SellerMuxedID = tradeDetails.SellerMuxedID + e.OfferID = tradeDetails.OfferID + e.SoldAmount = tradeDetails.SoldAmount + e.SoldAssetType = tradeDetails.SoldAssetType + e.SoldAssetCode = tradeDetails.SoldAssetCode + e.SoldAssetIssuer = tradeDetails.SoldAssetIssuer + e.BoughtAmount = tradeDetails.BoughtAmount + e.BoughtAssetType = tradeDetails.BoughtAssetType + e.BoughtAssetCode = tradeDetails.BoughtAssetCode + e.BoughtAssetIssuer = tradeDetails.BoughtAssetIssuer + } + result = e + case history.EffectDataCreated: + e := effects.DataCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectDataUpdated: + e := effects.DataUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectDataRemoved: + e := effects.DataRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSequenceBumped: + e := effects.SequenceBumped{Base: basev} + hsb := history.SequenceBumped{} + err = row.UnmarshalDetails(&hsb) + if err == nil { + e.NewSeq = hsb.NewSeq + } + result = e + case history.EffectClaimableBalanceCreated: + e := effects.ClaimableBalanceCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceClaimed: + e := effects.ClaimableBalanceClaimed{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceClaimantCreated: + e := effects.ClaimableBalanceClaimantCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountSponsorshipCreated: + e := effects.AccountSponsorshipCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountSponsorshipUpdated: + e := effects.AccountSponsorshipUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountSponsorshipRemoved: + e := effects.AccountSponsorshipRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineSponsorshipCreated: + e := effects.TrustlineSponsorshipCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineSponsorshipUpdated: + e := effects.TrustlineSponsorshipUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectTrustlineSponsorshipRemoved: + e := effects.TrustlineSponsorshipRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectDataSponsorshipCreated: + e := effects.DataSponsorshipCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectDataSponsorshipUpdated: + e := effects.DataSponsorshipUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectDataSponsorshipRemoved: + e := effects.DataSponsorshipRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceSponsorshipCreated: + e := effects.ClaimableBalanceSponsorshipCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceSponsorshipUpdated: + e := effects.ClaimableBalanceSponsorshipUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceSponsorshipRemoved: + e := effects.ClaimableBalanceSponsorshipRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerSponsorshipCreated: + e := effects.SignerSponsorshipCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerSponsorshipUpdated: + e := effects.SignerSponsorshipUpdated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectSignerSponsorshipRemoved: + e := effects.SignerSponsorshipRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectClaimableBalanceClawedBack: + e := effects.ClaimableBalanceClawedBack{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolDeposited: + e := effects.LiquidityPoolDeposited{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolWithdrew: + e := effects.LiquidityPoolWithdrew{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolTrade: + e := effects.LiquidityPoolTrade{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolCreated: + e := effects.LiquidityPoolCreated{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolRemoved: + e := effects.LiquidityPoolRemoved{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectLiquidityPoolRevoked: + e := effects.LiquidityPoolRevoked{Base: basev} + err = row.UnmarshalDetails(&e) + result = e + case history.EffectAccountRemoved: + // there is no explicit data structure for account removed + fallthrough + default: + result = basev + } + + if err != nil { + return + } + + rh, ok := result.(base.Rehydratable) + + if ok { + err = rh.Rehydrate() + } + + return +} + +// Populate loads this resource from `row` +func PopulateBaseEffect(ctx context.Context, this *effects.Base, row history.Effect, ledger history.Ledger) { + this.ID = row.ID() + this.PT = row.PagingToken() + this.Account = row.Account + if row.AccountMuxed.Valid { + this.AccountMuxed = row.AccountMuxed.String + muxedAccount := xdr.MustMuxedAddress(row.AccountMuxed.String) + this.AccountMuxedID = uint64(muxedAccount.Med25519.Id) + } + populateEffectType(this, row) + this.LedgerCloseTime = ledger.ClosedAt + + lb := hal.LinkBuilder{horizonContext.BaseURL(ctx)} + this.Links.Operation = lb.Linkf("/operations/%d", row.HistoryOperationID) + this.Links.Succeeds = lb.Linkf("/effects?order=desc&cursor=%s", this.PT) + this.Links.Precedes = lb.Linkf("/effects?order=asc&cursor=%s", this.PT) +} + +func populateEffectType(this *effects.Base, row history.Effect) { + var ok bool + this.TypeI = int32(row.Type) + this.Type, ok = EffectTypeNames[row.Type] + + if !ok { + this.Type = "unknown" + } +} diff --git a/services/horizon/internal/resourceadapter/effects_test.go b/services/horizon/internal/resourceadapter/effects_test.go new file mode 100644 index 0000000000..f0ef2f4912 --- /dev/null +++ b/services/horizon/internal/resourceadapter/effects_test.go @@ -0,0 +1,133 @@ +package resourceadapter + +import ( + "context" + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/protocols/horizon/effects" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" +) + +func TestNewEffectAllEffectsCovered(t *testing.T) { + for typ, s := range EffectTypeNames { + if typ == history.EffectAccountRemoved || typ == history.EffectAccountInflationDestinationUpdated { + // these effects use the base representation + continue + } + e := history.Effect{ + Type: typ, + } + result, err := NewEffect(context.TODO(), e, history.Ledger{}) + assert.NoError(t, err, s) + // it shouldn't be a base type + _, ok := result.(effects.Base) + assert.False(t, ok, s) + } + + // verify that the check works for an unknown effect + e := history.Effect{ + Type: 20000, + } + result, err := NewEffect(context.TODO(), e, history.Ledger{}) + assert.NoError(t, err) + _, ok := result.(effects.Base) + assert.True(t, ok) +} + +func TestEffectTypeNamesAreConsistentWithAdapterTypeNames(t *testing.T) { + for typ, s := range EffectTypeNames { + s2, ok := effects.EffectTypeNames[effects.EffectType(typ)] + assert.True(t, ok, s) + assert.Equal(t, s, s2) + } + for typ, s := range effects.EffectTypeNames { + s2, ok := EffectTypeNames[history.EffectType(typ)] + assert.True(t, ok, s) + assert.Equal(t, s, s2) + } +} + +func TestNewEffect_EffectTrustlineAuthorizedToMaintainLiabilities(t *testing.T) { + tt := assert.New(t) + ctx, _ := test.ContextWithLogBuffer() + + details := `{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3" + }` + + hEffect := history.Effect{ + Account: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + HistoryOperationID: 1, + Order: 1, + Type: history.EffectTrustlineAuthorizedToMaintainLiabilities, + DetailsString: null.StringFrom(details), + } + resource, err := NewEffect(ctx, hEffect, history.Ledger{}) + tt.NoError(err) + + var resourcePage hal.Page + resourcePage.Add(resource) + + effect, ok := resource.(effects.TrustlineAuthorizedToMaintainLiabilities) + tt.True(ok) + tt.Equal("trustline_authorized_to_maintain_liabilities", effect.Type) + + binary, err := json.Marshal(resourcePage) + tt.NoError(err) + + var page effects.EffectsPage + tt.NoError(json.Unmarshal(binary, &page)) + tt.Len(page.Embedded.Records, 1) + tt.Equal(effect, page.Embedded.Records[0].(effects.TrustlineAuthorizedToMaintainLiabilities)) +} + +func TestNewEffect_EffectTrade_Muxed(t *testing.T) { + tt := assert.New(t) + ctx, _ := test.ContextWithLogBuffer() + + details := `{ + "seller": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "seller_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "seller_muxed_id": 1234 + }` + + hEffect := history.Effect{ + Account: "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + AccountMuxed: null.StringFrom("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26"), + HistoryOperationID: 1, + Order: 1, + Type: history.EffectTrade, + DetailsString: null.StringFrom(details), + } + resource, err := NewEffect(ctx, hEffect, history.Ledger{}) + tt.NoError(err) + + var resourcePage hal.Page + resourcePage.Add(resource) + + effect, ok := resource.(effects.Trade) + tt.True(ok) + tt.Equal("trade", effect.Type) + + binary, err := json.Marshal(resourcePage) + tt.NoError(err) + + var page effects.EffectsPage + tt.NoError(json.Unmarshal(binary, &page)) + tt.Len(page.Embedded.Records, 1) + tt.Equal(effect, page.Embedded.Records[0].(effects.Trade)) + tt.Equal("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", effect.Account) + tt.Equal("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", effect.AccountMuxed) + tt.Equal(uint64(1234), effect.AccountMuxedID) + tt.Equal("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", effect.Seller) + tt.Equal("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", effect.SellerMuxed) + tt.Equal(uint64(1234), effect.SellerMuxedID) +} diff --git a/services/horizon/internal/resourceadapter/ledger.go b/services/horizon/internal/resourceadapter/ledger.go new file mode 100644 index 0000000000..f705bed891 --- /dev/null +++ b/services/horizon/internal/resourceadapter/ledger.go @@ -0,0 +1,50 @@ +package resourceadapter + +import ( + "context" + "fmt" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +func PopulateLedger(ctx context.Context, dest *protocol.Ledger, row history.Ledger) { + dest.ID = row.LedgerHash + dest.PT = row.PagingToken() + dest.Hash = row.LedgerHash + dest.PrevHash = row.PreviousLedgerHash.String + dest.Sequence = row.Sequence + // Default to `transaction_count` + dest.SuccessfulTransactionCount = row.TransactionCount + if row.SuccessfulTransactionCount != nil { + dest.SuccessfulTransactionCount = *row.SuccessfulTransactionCount + } + dest.FailedTransactionCount = row.FailedTransactionCount + dest.OperationCount = row.OperationCount + dest.TxSetOperationCount = row.TxSetOperationCount + dest.ClosedAt = row.ClosedAt + dest.TotalCoins = amount.String(xdr.Int64(row.TotalCoins)) + dest.FeePool = amount.String(xdr.Int64(row.FeePool)) + dest.BaseFee = row.BaseFee + dest.BaseReserve = row.BaseReserve + dest.MaxTxSetSize = row.MaxTxSetSize + dest.ProtocolVersion = row.ProtocolVersion + + if row.LedgerHeaderXDR.Valid { + dest.HeaderXDR = row.LedgerHeaderXDR.String + } else { + dest.HeaderXDR = "" + } + + self := fmt.Sprintf("/ledgers/%d", row.Sequence) + lb := hal.LinkBuilder{horizonContext.BaseURL(ctx)} + dest.Links.Self = lb.Link(self) + dest.Links.Transactions = lb.PagedLink(self, "transactions") + dest.Links.Operations = lb.PagedLink(self, "operations") + dest.Links.Payments = lb.PagedLink(self, "payments") + dest.Links.Effects = lb.PagedLink(self, "effects") +} diff --git a/services/horizon/internal/resourceadapter/liquidity_pool.go b/services/horizon/internal/resourceadapter/liquidity_pool.go new file mode 100644 index 0000000000..3ff27562f4 --- /dev/null +++ b/services/horizon/internal/resourceadapter/liquidity_pool.go @@ -0,0 +1,52 @@ +package resourceadapter + +import ( + "context" + "fmt" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// PopulateLiquidityPool fills out the resource's fields +func PopulateLiquidityPool( + ctx context.Context, + dest *protocol.LiquidityPool, + liquidityPool history.LiquidityPool, + ledger *history.Ledger, +) error { + dest.ID = liquidityPool.PoolID + dest.FeeBP = liquidityPool.Fee + typ, ok := xdr.LiquidityPoolTypeToString[liquidityPool.Type] + if !ok { + return errors.Errorf("unknown liquidity pool type: %d", liquidityPool.Type) + } + dest.Type = typ + dest.TotalTrustlines = liquidityPool.TrustlineCount + dest.TotalShares = amount.StringFromInt64(int64(liquidityPool.ShareCount)) + for _, reserve := range liquidityPool.AssetReserves { + dest.Reserves = append(dest.Reserves, protocol.LiquidityPoolReserve{ + Asset: reserve.Asset.StringCanonical(), + Amount: amount.StringFromInt64(int64(reserve.Reserve)), + }) + } + + dest.LastModifiedLedger = liquidityPool.LastModifiedLedger + + if ledger != nil { + dest.LastModifiedTime = &ledger.ClosedAt + } + + lb := hal.LinkBuilder{Base: horizonContext.BaseURL(ctx)} + self := fmt.Sprintf("/liquidity_pools/%s", dest.ID) + dest.Links.Self = lb.Link(self) + dest.PT = dest.ID + dest.Links.Transactions = lb.PagedLink(self, "transactions") + dest.Links.Operations = lb.PagedLink(self, "operations") + return nil +} diff --git a/services/horizon/internal/resourceadapter/offer.go b/services/horizon/internal/resourceadapter/offer.go new file mode 100644 index 0000000000..faf3c999e1 --- /dev/null +++ b/services/horizon/internal/resourceadapter/offer.go @@ -0,0 +1,40 @@ +package resourceadapter + +import ( + "context" + "fmt" + "math/big" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// PopulateOffer constructs an offer response struct from an offer row extracted from the +// the horizon offers table. +func PopulateOffer(ctx context.Context, dest *protocol.Offer, row history.Offer, ledger *history.Ledger) { + dest.ID = int64(row.OfferID) + dest.PT = fmt.Sprintf("%d", row.OfferID) + dest.Seller = row.SellerID + dest.Amount = amount.String(xdr.Int64(row.Amount)) + dest.PriceR.N = row.Pricen + dest.PriceR.D = row.Priced + dest.Price = big.NewRat(int64(row.Pricen), int64(row.Priced)).FloatString(7) + if row.Sponsor.Valid { + dest.Sponsor = row.Sponsor.String + } + + row.SellingAsset.MustExtract(&dest.Selling.Type, &dest.Selling.Code, &dest.Selling.Issuer) + row.BuyingAsset.MustExtract(&dest.Buying.Type, &dest.Buying.Code, &dest.Buying.Issuer) + + dest.LastModifiedLedger = int32(row.LastModifiedLedger) + if ledger != nil { + dest.LastModifiedTime = &ledger.ClosedAt + } + lb := hal.LinkBuilder{horizonContext.BaseURL(ctx)} + dest.Links.Self = lb.Linkf("/offers/%d", row.OfferID) + dest.Links.OfferMaker = lb.Linkf("/accounts/%s", row.SellerID) +} diff --git a/services/horizon/internal/resourceadapter/operations.go b/services/horizon/internal/resourceadapter/operations.go new file mode 100644 index 0000000000..95101fb474 --- /dev/null +++ b/services/horizon/internal/resourceadapter/operations.go @@ -0,0 +1,194 @@ +package resourceadapter + +import ( + "context" + "fmt" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/operations" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" + "github.com/stellar/go/xdr" +) + +// NewOperation creates a new operation resource, finding the appropriate type to use +// based upon the row's type. +func NewOperation( + ctx context.Context, + operationRow history.Operation, + transactionHash string, + transactionRow *history.Transaction, + ledger history.Ledger, +) (result hal.Pageable, err error) { + + base := operations.Base{} + err = PopulateBaseOperation(ctx, &base, operationRow, transactionHash, transactionRow, ledger) + if err != nil { + return + } + + switch operationRow.Type { + case xdr.OperationTypeBumpSequence: + e := operations.BumpSequence{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeCreateAccount: + e := operations.CreateAccount{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypePayment: + e := operations.Payment{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypePathPaymentStrictReceive: + e := operations.PathPayment{} + e.Payment.Base = base + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeManageBuyOffer: + e := operations.ManageBuyOffer{} + e.Offer.Base = base + err = operationRow.UnmarshalDetails(&e.Offer) + if err == nil { + hmo := history.ManageOffer{} + err = operationRow.UnmarshalDetails(&hmo) + e.OfferID = hmo.OfferID + } + result = e + case xdr.OperationTypeManageSellOffer: + e := operations.ManageSellOffer{} + e.Offer.Base = base + err = operationRow.UnmarshalDetails(&e.Offer) + if err == nil { + hmo := history.ManageOffer{} + err = operationRow.UnmarshalDetails(&hmo) + e.OfferID = hmo.OfferID + } + result = e + case xdr.OperationTypeCreatePassiveSellOffer: + e := operations.CreatePassiveSellOffer{} + e.Offer.Base = base + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeSetOptions: + e := operations.SetOptions{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeChangeTrust: + e := operations.ChangeTrust{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeAllowTrust: + e := operations.AllowTrust{Base: base} + err = operationRow.UnmarshalDetails(&e) + // if the trustline is authorized, we want to reflect that it implies + // authorized_to_maintain_liabilities to true, otherwise, we use the + // value from details + if e.Authorize { + e.AuthorizeToMaintainLiabilities = e.Authorize + } + result = e + case xdr.OperationTypeAccountMerge: + e := operations.AccountMerge{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeInflation: + e := operations.Inflation{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeManageData: + e := operations.ManageData{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypePathPaymentStrictSend: + e := operations.PathPaymentStrictSend{} + e.Payment.Base = base + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeCreateClaimableBalance: + e := operations.CreateClaimableBalance{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeClaimClaimableBalance: + e := operations.ClaimClaimableBalance{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeBeginSponsoringFutureReserves: + e := operations.BeginSponsoringFutureReserves{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeEndSponsoringFutureReserves: + e := operations.EndSponsoringFutureReserves{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeRevokeSponsorship: + e := operations.RevokeSponsorship{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeClawback: + e := operations.Clawback{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeClawbackClaimableBalance: + e := operations.ClawbackClaimableBalance{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeSetTrustLineFlags: + e := operations.SetTrustLineFlags{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeLiquidityPoolDeposit: + e := operations.LiquidityPoolDeposit{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + case xdr.OperationTypeLiquidityPoolWithdraw: + e := operations.LiquidityPoolWithdraw{Base: base} + err = operationRow.UnmarshalDetails(&e) + result = e + default: + result = base + } + + return +} + +// Populate fills out this resource using `row` as the source. +func PopulateBaseOperation(ctx context.Context, dest *operations.Base, operationRow history.Operation, transactionHash string, transactionRow *history.Transaction, ledger history.Ledger) error { + dest.ID = fmt.Sprintf("%d", operationRow.ID) + dest.PT = operationRow.PagingToken() + dest.TransactionSuccessful = operationRow.TransactionSuccessful + dest.SourceAccount = operationRow.SourceAccount + if operationRow.SourceAccountMuxed.Valid { + dest.SourceAccountMuxed = operationRow.SourceAccountMuxed.String + muxedAccount := xdr.MustMuxedAddress(dest.SourceAccountMuxed) + dest.SourceAccountMuxedID = uint64(muxedAccount.Med25519.Id) + } + populateOperationType(dest, operationRow) + dest.LedgerCloseTime = ledger.ClosedAt + dest.TransactionHash = transactionHash + + lb := hal.LinkBuilder{Base: horizonContext.BaseURL(ctx)} + self := fmt.Sprintf("/operations/%d", operationRow.ID) + dest.Links.Self = lb.Link(self) + dest.Links.Succeeds = lb.Linkf("/effects?order=desc&cursor=%s", dest.PT) + dest.Links.Precedes = lb.Linkf("/effects?order=asc&cursor=%s", dest.PT) + dest.Links.Transaction = lb.Linkf("/transactions/%s", operationRow.TransactionHash) + dest.Links.Effects = lb.Link(self, "effects") + + if transactionRow != nil { + dest.Transaction = new(horizon.Transaction) + return PopulateTransaction(ctx, transactionHash, dest.Transaction, *transactionRow) + } + return nil +} + +func populateOperationType(dest *operations.Base, row history.Operation) { + var ok bool + dest.TypeI = int32(row.Type) + dest.Type, ok = operations.TypeNames[row.Type] + + if !ok { + dest.Type = "unknown" + } +} diff --git a/services/horizon/internal/resourceadapter/operations_test.go b/services/horizon/internal/resourceadapter/operations_test.go new file mode 100644 index 0000000000..90a2104244 --- /dev/null +++ b/services/horizon/internal/resourceadapter/operations_test.go @@ -0,0 +1,412 @@ +package resourceadapter + +import ( + "context" + "encoding/json" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/protocols/horizon/operations" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/test" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestNewOperationAllTypesCovered(t *testing.T) { + for typ, s := range xdr.OperationTypeToStringMap { + row := history.Operation{ + Type: xdr.OperationType(typ), + } + op, err := NewOperation(context.Background(), row, "foo", &history.Transaction{}, history.Ledger{}) + assert.NoError(t, err, s) + // if we got a base type, the operation is not covered + if _, ok := op.(operations.Base); ok { + assert.Fail(t, s) + } + } + + // make sure the check works for an unreasonable operation type + row := history.Operation{ + Type: xdr.OperationType(200000), + } + op, err := NewOperation(context.Background(), row, "foo", &history.Transaction{}, history.Ledger{}) + assert.NoError(t, err) + assert.IsType(t, op, operations.Base{}) + +} + +// TestPopulateOperation_Successful tests operation object population. +func TestPopulateOperation_Successful(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + + var ( + dest operations.Base + row history.Operation + ledger = history.Ledger{} + ) + + dest = operations.Base{} + row = history.Operation{TransactionSuccessful: true} + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, row, "", nil, ledger), + ) + assert.True(t, dest.TransactionSuccessful) + assert.Nil(t, dest.Transaction) + + dest = operations.Base{} + row = history.Operation{TransactionSuccessful: false} + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, row, "", nil, ledger), + ) + assert.False(t, dest.TransactionSuccessful) + assert.Nil(t, dest.Transaction) +} + +// TestPopulateOperation_WithTransaction tests PopulateBaseOperation when passing both an operation and a transaction. +func TestPopulateOperation_WithTransaction(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + + var ( + dest operations.Base + operationsRow history.Operation + ledger = history.Ledger{} + transactionRow history.Transaction + ) + + dest = operations.Base{} + operationsRow = history.Operation{TransactionSuccessful: true} + transactionRow = history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Successful: true, + MaxFee: 10000, + FeeCharged: 100, + }, + } + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, operationsRow, transactionRow.TransactionHash, &transactionRow, ledger), + ) + assert.True(t, dest.TransactionSuccessful) + assert.True(t, dest.Transaction.Successful) + assert.Equal(t, int64(100), dest.Transaction.FeeCharged) + assert.Equal(t, int64(10000), dest.Transaction.MaxFee) +} + +func TestPopulateOperation_AllowTrust(t *testing.T) { + tt := assert.New(t) + + details := `{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": false, + "authorize_to_maintain_liabilities": true, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3" + }` + + rsp, err := getJSONResponse(xdr.OperationTypeAllowTrust, details) + tt.NoError(err) + tt.Equal(false, rsp["authorize"]) + tt.Equal(true, rsp["authorize_to_maintain_liabilities"]) + + details = `{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": true, + "authorize_to_maintain_liabilities": true, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3" + }` + + rsp, err = getJSONResponse(xdr.OperationTypeAllowTrust, details) + tt.NoError(err) + tt.Equal(true, rsp["authorize"]) + tt.Equal(true, rsp["authorize_to_maintain_liabilities"]) + + details = `{ + "asset_code": "COP", + "asset_issuer": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "asset_type": "credit_alphanum4", + "authorize": false, + "authorize_to_maintain_liabilities": false, + "trustee": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustor": "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3" + }` + + rsp, err = getJSONResponse(xdr.OperationTypeAllowTrust, details) + tt.NoError(err) + tt.Equal(false, rsp["authorize"]) + tt.Equal(false, rsp["authorize_to_maintain_liabilities"]) +} + +func TestPopulateOperation_CreateClaimableBalance(t *testing.T) { + tt := assert.New(t) + + details := `{ + "asset": "COP:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "amount": "10.0000000", + "claimants": [ + { + "destination": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "predicate": { + "and": [ + { + "or": [ + {"rel_before":"12"}, + {"abs_before": "2020-08-26T11:15:39Z"} + ] + }, + { + "not": {"unconditional": true} + } + ] + } + } + ] + }` + + resp, err := getJSONResponse(xdr.OperationTypeCreateClaimableBalance, details) + tt.NoError(err) + tt.Equal("COP:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["asset"]) + tt.Equal("10.0000000", resp["amount"]) +} + +func TestPopulateOperation_ClaimClaimableBalance(t *testing.T) { + tt := assert.New(t) + + details := `{ + "balance_id": "abc", + "claimant": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD" + }` + + resp, err := getJSONResponse(xdr.OperationTypeClaimClaimableBalance, details) + tt.NoError(err) + tt.Equal("abc", resp["balance_id"]) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["claimant"]) +} + +func TestPopulateOperation_ClaimClaimableBalance_Muxed(t *testing.T) { + tt := assert.New(t) + + details := `{ + "claimant": "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + "claimant_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "claimant_muxed_id": "1234", + "balance_id": "abc", + "source_account_muxed": "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", + "source_account_muxed_id": "1234" + }` + + resp, err := getJSONResponse(xdr.OperationTypeClaimClaimableBalance, details) + tt.NoError(err) + tt.Equal("abc", resp["balance_id"]) + tt.Equal("GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", resp["claimant"]) + tt.Equal("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", resp["claimant_muxed"]) + tt.Equal("1234", resp["claimant_muxed_id"]) + tt.Equal("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26", resp["source_account_muxed"]) + tt.Equal("1234", resp["source_account_muxed_id"]) +} + +func TestPopulateOperation_BeginSponsoringFutureReserves(t *testing.T) { + tt := assert.New(t) + + details := `{ + "sponsored_id": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD" + }` + + resp, err := getJSONResponse(xdr.OperationTypeBeginSponsoringFutureReserves, details) + tt.NoError(err) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["sponsored_id"]) +} + +func TestPopulateOperation_EndSponsoringFutureReserves(t *testing.T) { + tt := assert.New(t) + + details := `{ + "begin_sponsor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD" + }` + + resp, err := getJSONResponse(xdr.OperationTypeEndSponsoringFutureReserves, details) + tt.NoError(err) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["begin_sponsor"]) +} + +func TestPopulateOperation_OperationTypeRevokeSponsorship_Account(t *testing.T) { + tt := assert.New(t) + + details := `{ + "account_id": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD" + }` + + resp, err := getJSONResponse(xdr.OperationTypeRevokeSponsorship, details) + tt.NoError(err) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["account_id"]) +} + +func TestPopulateOperation_OperationTypeRevokeSponsorship_Data(t *testing.T) { + tt := assert.New(t) + + details := `{ + "data_account_id": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "data_name": "name" + }` + + resp, err := getJSONResponse(xdr.OperationTypeRevokeSponsorship, details) + tt.NoError(err) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["data_account_id"]) + tt.Equal("name", resp["data_name"]) +} + +func TestPopulateOperation_OperationTypeRevokeSponsorship_Offer(t *testing.T) { + tt := assert.New(t) + + details := `{ + "offer_id": "1000" + }` + + resp, err := getJSONResponse(xdr.OperationTypeRevokeSponsorship, details) + tt.NoError(err) + tt.Equal("1000", resp["offer_id"]) +} + +func TestPopulateOperation_OperationTypeRevokeSponsorship_Trustline(t *testing.T) { + tt := assert.New(t) + + details := `{ + "trustline_account_id": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", + "trustline_asset": "COP:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD" + }` + + resp, err := getJSONResponse(xdr.OperationTypeRevokeSponsorship, details) + tt.NoError(err) + tt.Equal("GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["trustline_account_id"]) + tt.Equal("COP:GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", resp["trustline_asset"]) +} + +func getJSONResponse(typ xdr.OperationType, details string) (rsp map[string]interface{}, err error) { + ctx, _ := test.ContextWithLogBuffer() + transactionRow := history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Successful: true, + MaxFee: 10000, + FeeCharged: 100, + }, + } + operationsRow := history.Operation{ + TransactionSuccessful: true, + Type: typ, + DetailsString: null.StringFrom(details), + } + resource, err := NewOperation(ctx, operationsRow, "", &transactionRow, history.Ledger{}) + if err != nil { + return + } + + data, err := json.Marshal(resource) + if err != nil { + return + } + err = json.Unmarshal(data, &rsp) + return +} + +func TestFeeBumpOperation(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + dest := operations.Base{} + operationsRow := history.Operation{TransactionSuccessful: true} + transactionRow := history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Successful: true, + MaxFee: 123, + FeeCharged: 100, + TransactionHash: "cebb875a00ff6e1383aef0fd251a76f22c1f9ab2a2dffcb077855736ade2659a", + FeeAccount: null.StringFrom("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"), + Account: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + NewMaxFee: null.IntFrom(10000), + InnerTransactionHash: null.StringFrom("2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d"), + Signatures: []string{"a", "b", "c"}, + InnerSignatures: []string{"d", "e", "f"}, + }, + } + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, operationsRow, transactionRow.TransactionHash, nil, history.Ledger{}), + ) + assert.Equal(t, transactionRow.TransactionHash, dest.TransactionHash) + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, operationsRow, transactionRow.InnerTransactionHash.String, nil, history.Ledger{}), + ) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.TransactionHash) + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, operationsRow, transactionRow.TransactionHash, &transactionRow, history.Ledger{}), + ) + + assert.Equal(t, transactionRow.TransactionHash, dest.TransactionHash) + assert.Equal(t, transactionRow.TransactionHash, dest.Transaction.Hash) + assert.Equal(t, transactionRow.TransactionHash, dest.Transaction.ID) + assert.Equal(t, transactionRow.FeeAccount.String, dest.Transaction.FeeAccount) + assert.Equal(t, transactionRow.Account, dest.Transaction.Account) + assert.Equal(t, transactionRow.FeeCharged, dest.Transaction.FeeCharged) + assert.Equal(t, transactionRow.NewMaxFee.Int64, dest.Transaction.MaxFee) + assert.Equal(t, []string{"a", "b", "c"}, dest.Transaction.Signatures) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.Transaction.InnerTransaction.Hash) + assert.Equal(t, transactionRow.MaxFee, dest.Transaction.InnerTransaction.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.Transaction.InnerTransaction.Signatures) + assert.Equal(t, transactionRow.TransactionHash, dest.Transaction.FeeBumpTransaction.Hash) + assert.Equal(t, []string{"a", "b", "c"}, dest.Transaction.FeeBumpTransaction.Signatures) + + assert.NoError( + t, + PopulateBaseOperation(ctx, &dest, operationsRow, transactionRow.InnerTransactionHash.String, &transactionRow, history.Ledger{}), + ) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.TransactionHash) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.Transaction.Hash) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.Transaction.ID) + assert.Equal(t, transactionRow.FeeAccount.String, dest.Transaction.FeeAccount) + assert.Equal(t, transactionRow.Account, dest.Transaction.Account) + assert.Equal(t, transactionRow.FeeCharged, dest.Transaction.FeeCharged) + assert.Equal(t, transactionRow.NewMaxFee.Int64, dest.Transaction.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.Transaction.Signatures) + assert.Equal(t, transactionRow.InnerTransactionHash.String, dest.Transaction.InnerTransaction.Hash) + assert.Equal(t, transactionRow.MaxFee, dest.Transaction.InnerTransaction.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.Transaction.InnerTransaction.Signatures) + assert.Equal(t, transactionRow.TransactionHash, dest.Transaction.FeeBumpTransaction.Hash) + assert.Equal(t, []string{"a", "b", "c"}, dest.Transaction.FeeBumpTransaction.Signatures) +} + +func TestPopulateOperation_OperationTypeManageSellOffer(t *testing.T) { + tt := assert.New(t) + + details := `{ + "offer_id": 1000 + }` + + resp, err := getJSONResponse(xdr.OperationTypeManageSellOffer, details) + tt.NoError(err) + tt.Equal("1000", resp["offer_id"]) +} + +func TestPopulateOperation_OperationTypeManageBuyOffer(t *testing.T) { + tt := assert.New(t) + + details := `{ + "offer_id": 1000 + }` + + resp, err := getJSONResponse(xdr.OperationTypeManageBuyOffer, details) + tt.NoError(err) + tt.Equal("1000", resp["offer_id"]) +} diff --git a/services/horizon/internal/resourceadapter/path.go b/services/horizon/internal/resourceadapter/path.go new file mode 100644 index 0000000000..1e7d1ecf36 --- /dev/null +++ b/services/horizon/internal/resourceadapter/path.go @@ -0,0 +1,63 @@ +package resourceadapter + +import ( + "context" + "fmt" + "strings" + + "github.com/stellar/go/amount" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/paths" +) + +func extractAsset(asset string, t, c, i *string) error { + if asset == "native" { + *t = asset + return nil + } + parts := strings.Split(asset, "/") + if len(parts) != 3 { + return fmt.Errorf("expected length to be 3 but got %v", parts) + } + *t = parts[0] + *c = parts[1] + *i = parts[2] + return nil +} + +// PopulatePath converts the paths.Path into a Path +func PopulatePath(ctx context.Context, dest *horizon.Path, p paths.Path) (err error) { + dest.DestinationAmount = amount.String(p.DestinationAmount) + dest.SourceAmount = amount.String(p.SourceAmount) + + err = extractAsset( + p.Source, + &dest.SourceAssetType, + &dest.SourceAssetCode, + &dest.SourceAssetIssuer) + if err != nil { + return + } + + err = extractAsset( + p.Destination, + &dest.DestinationAssetType, + &dest.DestinationAssetCode, + &dest.DestinationAssetIssuer) + if err != nil { + return + } + + dest.Path = make([]horizon.Asset, len(p.Path)) + for i, a := range p.Path { + err = extractAsset( + a, + &dest.Path[i].Type, + &dest.Path[i].Code, + &dest.Path[i].Issuer) + if err != nil { + return + } + } + return +} diff --git a/services/horizon/internal/resourceadapter/path_test.go b/services/horizon/internal/resourceadapter/path_test.go new file mode 100644 index 0000000000..4ade5249ab --- /dev/null +++ b/services/horizon/internal/resourceadapter/path_test.go @@ -0,0 +1,50 @@ +package resourceadapter + +import ( + "context" + "testing" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/paths" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestPopulatePath(t *testing.T) { + native := xdr.MustNewNativeAsset() + usdc := xdr.MustNewCreditAsset("USDC", "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML") + bingo := xdr.MustNewCreditAsset("BINGO", "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM") + p := paths.Path{ + Path: []string{bingo.String(), native.String()}, + Source: native.String(), + SourceAmount: 123, + Destination: usdc.String(), + DestinationAmount: 345, + } + + var dest horizon.Path + assert.NoError(t, PopulatePath(context.Background(), &dest, p)) + + assert.Equal(t, horizon.Path{ + SourceAssetType: "native", + SourceAssetCode: "", + SourceAssetIssuer: "", + SourceAmount: "0.0000123", + DestinationAssetType: "credit_alphanum4", + DestinationAssetCode: "USDC", + DestinationAssetIssuer: "GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML", + DestinationAmount: "0.0000345", + Path: []horizon.Asset{ + { + Type: "credit_alphanum12", + Code: "BINGO", + Issuer: "GBZ35ZJRIKJGYH5PBKLKOZ5L6EXCNTO7BKIL7DAVVDFQ2ODJEEHHJXIM", + }, + { + Type: "native", + Code: "", + Issuer: "", + }, + }, + }, dest) +} diff --git a/services/horizon/internal/resourceadapter/root.go b/services/horizon/internal/resourceadapter/root.go new file mode 100644 index 0000000000..9ecc909341 --- /dev/null +++ b/services/horizon/internal/resourceadapter/root.go @@ -0,0 +1,75 @@ +package resourceadapter + +import ( + "context" + "net/url" + + "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/support/render/hal" +) + +// Populate fills in the details +func PopulateRoot( + ctx context.Context, + dest *horizon.Root, + ledgerState ledger.Status, + hVersion, cVersion string, + passphrase string, + currentProtocolVersion int32, + coreSupportedProtocolVersion int32, + friendBotURL *url.URL, + templates map[string]string, +) { + dest.IngestSequence = ledgerState.ExpHistoryLatest + dest.HorizonSequence = ledgerState.HistoryLatest + dest.HorizonLatestClosedAt = ledgerState.HistoryLatestClosedAt + dest.HistoryElderSequence = ledgerState.HistoryElder + dest.CoreSequence = ledgerState.CoreLatest + dest.HorizonVersion = hVersion + dest.StellarCoreVersion = cVersion + dest.NetworkPassphrase = passphrase + dest.CurrentProtocolVersion = currentProtocolVersion + dest.CoreSupportedProtocolVersion = coreSupportedProtocolVersion + + lb := hal.LinkBuilder{Base: horizonContext.BaseURL(ctx)} + if friendBotURL != nil { + friendbotLinkBuild := hal.LinkBuilder{Base: friendBotURL} + l := friendbotLinkBuild.Link("{?addr}") + dest.Links.Friendbot = &l + } + + dest.Links.Account = lb.Link("/accounts/{account_id}") + dest.Links.AccountTransactions = lb.PagedLink("/accounts/{account_id}/transactions") + dest.Links.Assets = lb.Link("/assets{?asset_code,asset_issuer,cursor,limit,order}") + dest.Links.Effects = lb.Link("/effects{?cursor,limit,order}") + dest.Links.Ledger = lb.Link("/ledger/{sequence}") + dest.Links.Ledgers = lb.Link("/ledgers{?cursor,limit,order}") + dest.Links.FeeStats = lb.Link("/fee_stats") + dest.Links.Operation = lb.Link("/operations/{id}") + dest.Links.Operations = lb.Link("/operations{?cursor,limit,order,include_failed}") + dest.Links.Payments = lb.Link("/payments{?cursor,limit,order,include_failed}") + dest.Links.TradeAggregations = lb.Link("/trade_aggregations?base_asset_type={base_asset_type}&base_asset_code={base_asset_code}&base_asset_issuer={base_asset_issuer}&counter_asset_type={counter_asset_type}&counter_asset_code={counter_asset_code}&counter_asset_issuer={counter_asset_issuer}") + dest.Links.Trades = lb.Link("/trades?base_asset_type={base_asset_type}&base_asset_code={base_asset_code}&base_asset_issuer={base_asset_issuer}&counter_asset_type={counter_asset_type}&counter_asset_code={counter_asset_code}&counter_asset_issuer={counter_asset_issuer}") + + accountsLink := lb.Link(templates["accounts"]) + claimableBalancesLink := lb.Link(templates["claimableBalances"]) + liquidityPoolsLink := lb.Link(templates["liquidityPools"]) + offerLink := lb.Link("/offers/{offer_id}") + offersLink := lb.Link(templates["offers"]) + strictReceivePaths := lb.Link(templates["strictReceivePaths"]) + strictSendPaths := lb.Link(templates["strictSendPaths"]) + dest.Links.Accounts = &accountsLink + dest.Links.ClaimableBalances = &claimableBalancesLink + dest.Links.LiquidityPools = &liquidityPoolsLink + dest.Links.Offer = &offerLink + dest.Links.Offers = &offersLink + dest.Links.StrictReceivePaths = &strictReceivePaths + dest.Links.StrictSendPaths = &strictSendPaths + + dest.Links.OrderBook = lb.Link("/order_book{?selling_asset_type,selling_asset_code,selling_asset_issuer,buying_asset_type,buying_asset_code,buying_asset_issuer,limit}") + dest.Links.Self = lb.Link("/") + dest.Links.Transaction = lb.Link("/transactions/{hash}") + dest.Links.Transactions = lb.PagedLink("/transactions") +} diff --git a/services/horizon/internal/resourceadapter/root_test.go b/services/horizon/internal/resourceadapter/root_test.go new file mode 100644 index 0000000000..23bc1c5024 --- /dev/null +++ b/services/horizon/internal/resourceadapter/root_test.go @@ -0,0 +1,125 @@ +package resourceadapter + +import ( + "context" + "net/url" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/ledger" +) + +func TestPopulateRoot(t *testing.T) { + res := &horizon.Root{} + templates := map[string]string{ + "accounts": "/accounts{?signer,asset_type,asset_issuer,asset_code}", + "offers": "/offers", + "strictReceivePaths": "/paths/strict-receive", + "strictSendPaths": "/paths/strict-send", + } + + PopulateRoot(context.Background(), + res, + ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 1, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 3, HistoryElder: 2, + }, + }, + "hVersion", + "cVersion", + "passphrase", + 100, + 101, + urlMustParse(t, "https://friendbot.example.com"), + templates, + ) + + assert.Equal(t, int32(1), res.CoreSequence) + assert.Equal(t, int32(2), res.HistoryElderSequence) + assert.Equal(t, int32(3), res.HorizonSequence) + assert.Equal(t, "hVersion", res.HorizonVersion) + assert.Equal(t, "cVersion", res.StellarCoreVersion) + assert.Equal(t, "passphrase", res.NetworkPassphrase) + assert.Equal(t, "https://friendbot.example.com/{?addr}", res.Links.Friendbot.Href) + + // Without testbot + res = &horizon.Root{} + PopulateRoot(context.Background(), + res, + ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 1, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 3, HistoryElder: 2, + }, + }, + "hVersion", + "cVersion", + "passphrase", + 100, + 101, + nil, + templates, + ) + + assert.Equal(t, int32(1), res.CoreSequence) + assert.Equal(t, int32(2), res.HistoryElderSequence) + assert.Equal(t, int32(3), res.HorizonSequence) + assert.Equal(t, "hVersion", res.HorizonVersion) + assert.Equal(t, "cVersion", res.StellarCoreVersion) + assert.Equal(t, "passphrase", res.NetworkPassphrase) + assert.Empty(t, res.Links.Friendbot) + + res = &horizon.Root{} + PopulateRoot(context.Background(), + res, + ledger.Status{ + CoreStatus: ledger.CoreStatus{ + CoreLatest: 1, + }, + HorizonStatus: ledger.HorizonStatus{ + HistoryLatest: 3, HistoryElder: 2, + }, + }, + "hVersion", + "cVersion", + "passphrase", + 100, + 101, + urlMustParse(t, "https://friendbot.example.com"), + templates, + ) + + assert.Equal(t, templates["accounts"], res.Links.Accounts.Href) + assert.Equal(t, "/offers/{offer_id}", res.Links.Offer.Href) + assert.Equal( + t, + templates["offers"], + res.Links.Offers.Href, + ) + assert.Equal( + t, + templates["strictReceivePaths"], + res.Links.StrictReceivePaths.Href, + ) + assert.Equal( + t, + templates["strictSendPaths"], + res.Links.StrictSendPaths.Href, + ) +} + +func urlMustParse(t *testing.T, s string) *url.URL { + if u, err := url.Parse(s); err != nil { + t.Fatalf("Unable to parse URL: %s/%v", s, err) + return nil + } else { + return u + } +} diff --git a/services/horizon/internal/resourceadapter/trade.go b/services/horizon/internal/resourceadapter/trade.go new file mode 100644 index 0000000000..280e40dba3 --- /dev/null +++ b/services/horizon/internal/resourceadapter/trade.go @@ -0,0 +1,99 @@ +package resourceadapter + +import ( + "context" + "fmt" + + "github.com/stellar/go/xdr" + + "github.com/stellar/go/amount" + protocol "github.com/stellar/go/protocols/horizon" + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" +) + +// Populate fills out the details of a trade using a row from the history_trades +// table. +func PopulateTrade( + ctx context.Context, + dest *protocol.Trade, + row history.Trade, +) { + dest.ID = row.PagingToken() + dest.PT = row.PagingToken() + dest.BaseOfferID = "" + if row.BaseOfferID.Valid { + dest.BaseOfferID = fmt.Sprintf("%d", row.BaseOfferID.Int64) + } + if row.BaseAccount.Valid { + dest.BaseAccount = row.BaseAccount.String + } + if row.BaseLiquidityPoolID.Valid { + dest.BaseLiquidityPoolID = row.BaseLiquidityPoolID.String + } + dest.BaseAssetType = row.BaseAssetType + dest.BaseAssetCode = row.BaseAssetCode + dest.BaseAssetIssuer = row.BaseAssetIssuer + dest.BaseAmount = amount.String(xdr.Int64(row.BaseAmount)) + dest.CounterOfferID = "" + if row.CounterOfferID.Valid { + dest.CounterOfferID = fmt.Sprintf("%d", row.CounterOfferID.Int64) + } + if row.CounterAccount.Valid { + dest.CounterAccount = row.CounterAccount.String + } + if row.CounterLiquidityPoolID.Valid { + dest.CounterLiquidityPoolID = row.CounterLiquidityPoolID.String + } + dest.CounterAssetType = row.CounterAssetType + dest.CounterAssetCode = row.CounterAssetCode + dest.CounterAssetIssuer = row.CounterAssetIssuer + dest.CounterAmount = amount.String(xdr.Int64(row.CounterAmount)) + dest.LedgerCloseTime = row.LedgerCloseTime + dest.BaseIsSeller = row.BaseIsSeller + + if row.LiquidityPoolFee.Valid { + dest.LiquidityPoolFeeBP = uint32(row.LiquidityPoolFee.Int64) + } + + switch row.Type { + case history.OrderbookTradeType: + dest.TradeType = history.OrderbookTrades + case history.LiquidityPoolTradeType: + dest.TradeType = history.LiquidityPoolTrades + } + + if row.HasPrice() { + dest.Price = protocol.TradePrice{ + N: row.PriceN.Int64, + D: row.PriceD.Int64, + } + } + + populateTradeLinks(ctx, dest, row.HistoryOperationID) +} + +func populateTradeLinks( + ctx context.Context, + dest *protocol.Trade, + opid int64, +) { + lb := hal.LinkBuilder{horizonContext.BaseURL(ctx)} + switch { + case dest.BaseOfferID != "": + dest.Links.Base = lb.Link("/accounts", dest.BaseAccount) + case dest.BaseLiquidityPoolID != "": + dest.Links.Base = lb.Link("/liquidity_pools", dest.BaseLiquidityPoolID) + } + switch { + case dest.CounterOfferID != "": + dest.Links.Counter = lb.Link("/accounts", dest.CounterAccount) + case dest.CounterLiquidityPoolID != "": + dest.Links.Counter = lb.Link("/liquidity_pools", dest.CounterLiquidityPoolID) + } + dest.Links.Operation = lb.Link( + "/operations", + fmt.Sprintf("%d", opid), + ) +} diff --git a/services/horizon/internal/resourceadapter/trade_aggregation.go b/services/horizon/internal/resourceadapter/trade_aggregation.go new file mode 100644 index 0000000000..3cafe7fd0d --- /dev/null +++ b/services/horizon/internal/resourceadapter/trade_aggregation.go @@ -0,0 +1,52 @@ +package resourceadapter + +import ( + "context" + + "github.com/stellar/go/amount" + "github.com/stellar/go/price" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" +) + +// PopulateTradeAggregation fills out the details of a trade aggregation using a row from the trade aggregations +// table. +func PopulateTradeAggregation( + ctx context.Context, + dest *protocol.TradeAggregation, + row history.TradeAggregation, +) error { + var err error + dest.Timestamp = row.Timestamp + dest.TradeCount = row.TradeCount + dest.BaseVolume, err = amount.IntStringToAmount(row.BaseVolume) + if err != nil { + return err + } + dest.CounterVolume, err = amount.IntStringToAmount(row.CounterVolume) + if err != nil { + return err + } + dest.Average = price.StringFromFloat64(row.Average) + dest.HighR = protocol.TradePrice{ + N: row.HighN, + D: row.HighD, + } + dest.High = dest.HighR.String() + dest.LowR = protocol.TradePrice{ + N: row.LowN, + D: row.LowD, + } + dest.Low = dest.LowR.String() + dest.OpenR = protocol.TradePrice{ + N: row.OpenN, + D: row.OpenD, + } + dest.Open = dest.OpenR.String() + dest.CloseR = protocol.TradePrice{ + N: row.CloseN, + D: row.CloseD, + } + dest.Close = dest.CloseR.String() + return nil +} diff --git a/services/horizon/internal/resourceadapter/transaction.go b/services/horizon/internal/resourceadapter/transaction.go new file mode 100644 index 0000000000..ec5b8cc01c --- /dev/null +++ b/services/horizon/internal/resourceadapter/transaction.go @@ -0,0 +1,118 @@ +package resourceadapter + +import ( + "context" + "encoding/base64" + "fmt" + "time" + + "github.com/guregu/null" + + horizonContext "github.com/stellar/go/services/horizon/internal/context" + "github.com/stellar/go/xdr" + + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/render/hal" +) + +// Populate fills out the details +func PopulateTransaction( + ctx context.Context, + transactionHash string, + dest *protocol.Transaction, + row history.Transaction, +) error { + dest.ID = transactionHash + dest.PT = row.PagingToken() + dest.Successful = row.Successful + dest.Hash = transactionHash + dest.Ledger = row.LedgerSequence + dest.LedgerCloseTime = row.LedgerCloseTime + dest.Account = row.Account + if row.AccountMuxed.Valid { + dest.AccountMuxed = row.AccountMuxed.String + muxedAccount := xdr.MustMuxedAddress(dest.AccountMuxed) + dest.AccountMuxedID = uint64(muxedAccount.Med25519.Id) + } + dest.AccountSequence = row.AccountSequence + + dest.FeeCharged = row.FeeCharged + + dest.OperationCount = row.OperationCount + dest.EnvelopeXdr = row.TxEnvelope + dest.ResultXdr = row.TxResult + dest.ResultMetaXdr = row.TxMeta + dest.FeeMetaXdr = row.TxFeeMeta + dest.MemoType = row.MemoType + dest.Memo = row.Memo.String + if row.MemoType == "text" { + if memoBytes, err := memoBytes(row.TxEnvelope); err != nil { + return err + } else { + dest.MemoBytes = memoBytes + } + } + dest.Signatures = row.Signatures + if !row.TimeBounds.Null { + dest.ValidBefore = timeString(dest, row.TimeBounds.Upper) + dest.ValidAfter = timeString(dest, row.TimeBounds.Lower) + } + + if row.InnerTransactionHash.Valid { + dest.FeeAccount = row.FeeAccount.String + if row.FeeAccountMuxed.Valid { + dest.FeeAccountMuxed = row.FeeAccountMuxed.String + muxedAccount := xdr.MustMuxedAddress(dest.FeeAccountMuxed) + dest.FeeAccountMuxedID = uint64(muxedAccount.Med25519.Id) + } + dest.MaxFee = row.NewMaxFee.Int64 + dest.FeeBumpTransaction = &protocol.FeeBumpTransaction{ + Hash: row.TransactionHash, + Signatures: dest.Signatures, + } + dest.InnerTransaction = &protocol.InnerTransaction{ + Hash: row.InnerTransactionHash.String, + MaxFee: row.MaxFee, + Signatures: row.InnerSignatures, + } + if transactionHash != row.TransactionHash { + dest.Signatures = dest.InnerTransaction.Signatures + } + } else { + dest.FeeAccount = dest.Account + dest.FeeAccountMuxed = dest.AccountMuxed + dest.FeeAccountMuxedID = dest.AccountMuxedID + dest.MaxFee = row.MaxFee + } + + lb := hal.LinkBuilder{Base: horizonContext.BaseURL(ctx)} + dest.Links.Account = lb.Link("/accounts", dest.Account) + dest.Links.Ledger = lb.Link("/ledgers", fmt.Sprintf("%d", dest.Ledger)) + dest.Links.Operations = lb.PagedLink("/transactions", dest.ID, "operations") + dest.Links.Effects = lb.PagedLink("/transactions", dest.ID, "effects") + dest.Links.Self = lb.Link("/transactions", dest.ID) + dest.Links.Transaction = dest.Links.Self + dest.Links.Succeeds = lb.Linkf("/transactions?order=desc&cursor=%s", dest.PT) + dest.Links.Precedes = lb.Linkf("/transactions?order=asc&cursor=%s", dest.PT) + + return nil +} + +func memoBytes(envelopeXDR string) (string, error) { + var parsedEnvelope xdr.TransactionEnvelope + if err := xdr.SafeUnmarshalBase64(envelopeXDR, &parsedEnvelope); err != nil { + return "", err + } + + memo := *parsedEnvelope.Memo().Text + return base64.StdEncoding.EncodeToString([]byte(memo)), nil +} + +func timeString(res *protocol.Transaction, in null.Int) string { + if !in.Valid { + return "" + } + + return time.Unix(in.Int64, 0).UTC().Format(time.RFC3339) +} diff --git a/services/horizon/internal/resourceadapter/transaction_result_codes.go b/services/horizon/internal/resourceadapter/transaction_result_codes.go new file mode 100644 index 0000000000..f834a433a5 --- /dev/null +++ b/services/horizon/internal/resourceadapter/transaction_result_codes.go @@ -0,0 +1,30 @@ +package resourceadapter + +import ( + "context" + + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/txsub" +) + +// Populate fills out the details +func PopulateTransactionResultCodes(ctx context.Context, + transactionHash string, + dest *protocol.TransactionResultCodes, + fail *txsub.FailedTransactionError, +) (err error) { + + results, err := fail.TransactionResultCodes(transactionHash) + if err != nil { + return + } + dest.TransactionCode = results.Code + dest.InnerTransactionCode = results.InnerCode + + dest.OperationCodes, err = fail.OperationResultCodes() + if err != nil { + return + } + + return +} diff --git a/services/horizon/internal/resourceadapter/transaction_test.go b/services/horizon/internal/resourceadapter/transaction_test.go new file mode 100644 index 0000000000..b522858240 --- /dev/null +++ b/services/horizon/internal/resourceadapter/transaction_test.go @@ -0,0 +1,210 @@ +package resourceadapter + +import ( + "encoding/base64" + "testing" + + "github.com/guregu/null" + "github.com/stellar/go/xdr" + + . "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" +) + +// TestPopulateTransaction_Successful tests transaction object population. +func TestPopulateTransaction_Successful(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + + var ( + dest Transaction + row history.Transaction + ) + + dest = Transaction{} + row = history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Successful: true, + }, + } + + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + assert.True(t, dest.Successful) + + dest = Transaction{} + row = history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Successful: false, + }, + } + + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + assert.False(t, dest.Successful) +} + +func TestPopulateTransaction_HashMemo(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + dest := Transaction{} + row := history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + MemoType: "hash", + Memo: null.StringFrom("abcdef"), + }, + } + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + assert.Equal(t, "hash", dest.MemoType) + assert.Equal(t, "abcdef", dest.Memo) + assert.Equal(t, "", dest.MemoBytes) +} + +func TestPopulateTransaction_TextMemo(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + rawMemo := []byte{0, 0, 1, 1, 0, 0, 3, 3} + rawMemoString := string(rawMemo) + + sourceAID := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + feeSourceAID := xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU") + for _, envelope := range []xdr.TransactionEnvelope{ + { + Type: xdr.EnvelopeTypeEnvelopeTypeTxV0, + V0: &xdr.TransactionV0Envelope{ + Tx: xdr.TransactionV0{ + Memo: xdr.Memo{ + Type: xdr.MemoTypeMemoText, + Text: &rawMemoString, + }, + }, + }, + }, + { + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: sourceAID.ToMuxedAccount(), + Memo: xdr.Memo{ + Type: xdr.MemoTypeMemoText, + Text: &rawMemoString, + }, + }, + }, + }, + { + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: sourceAID.ToMuxedAccount(), + Memo: xdr.Memo{ + Type: xdr.MemoTypeMemoText, + Text: &rawMemoString, + }, + }, + }, + }, + FeeSource: feeSourceAID.ToMuxedAccount(), + }, + }, + }, + } { + envelopeXDR, err := xdr.MarshalBase64(envelope) + assert.NoError(t, err) + row := history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + MemoType: "text", + TxEnvelope: envelopeXDR, + Memo: null.StringFrom("sample"), + }, + } + + var dest Transaction + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + + assert.Equal(t, "text", dest.MemoType) + assert.Equal(t, "sample", dest.Memo) + assert.Equal(t, base64.StdEncoding.EncodeToString(rawMemo), dest.MemoBytes) + } +} + +// TestPopulateTransaction_Fee tests transaction object population. +func TestPopulateTransaction_Fee(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + + var ( + dest Transaction + row history.Transaction + ) + + dest = Transaction{} + row = history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + MaxFee: 10000, + FeeCharged: 100, + }, + } + + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + assert.Equal(t, int64(100), dest.FeeCharged) + assert.Equal(t, int64(10000), dest.MaxFee) +} + +func TestFeeBumpTransaction(t *testing.T) { + ctx, _ := test.ContextWithLogBuffer() + dest := Transaction{} + row := history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + MaxFee: 123, + FeeCharged: 100, + TransactionHash: "cebb875a00ff6e1383aef0fd251a76f22c1f9ab2a2dffcb077855736ade2659a", + FeeAccount: null.StringFrom("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ"), + FeeAccountMuxed: null.StringFrom("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ"), + Account: "GAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSTVY", + AccountMuxed: null.StringFrom("MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26"), + NewMaxFee: null.IntFrom(10000), + InnerTransactionHash: null.StringFrom("2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d"), + Signatures: []string{"a", "b", "c"}, + InnerSignatures: []string{"d", "e", "f"}, + }, + } + + assert.NoError(t, PopulateTransaction(ctx, row.TransactionHash, &dest, row)) + assert.Equal(t, row.TransactionHash, dest.Hash) + assert.Equal(t, row.TransactionHash, dest.ID) + assert.Equal(t, row.FeeAccount.String, dest.FeeAccount) + assert.Equal(t, row.FeeAccountMuxed.String, dest.FeeAccountMuxed) + assert.Equal(t, uint64(0), dest.FeeAccountMuxedID) + assert.Equal(t, row.Account, dest.Account) + assert.Equal(t, row.AccountMuxed.String, dest.AccountMuxed) + assert.Equal(t, uint64(1234), dest.AccountMuxedID) + assert.Equal(t, row.FeeCharged, dest.FeeCharged) + assert.Equal(t, row.NewMaxFee.Int64, dest.MaxFee) + assert.Equal(t, []string{"a", "b", "c"}, dest.Signatures) + assert.Equal(t, row.InnerTransactionHash.String, dest.InnerTransaction.Hash) + assert.Equal(t, row.MaxFee, dest.InnerTransaction.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.InnerTransaction.Signatures) + assert.Equal(t, row.TransactionHash, dest.FeeBumpTransaction.Hash) + assert.Equal(t, []string{"a", "b", "c"}, dest.FeeBumpTransaction.Signatures) + assert.Equal(t, "/transactions/"+row.TransactionHash, dest.Links.Transaction.Href) + + assert.NoError(t, PopulateTransaction(ctx, row.InnerTransactionHash.String, &dest, row)) + assert.Equal(t, row.InnerTransactionHash.String, dest.Hash) + assert.Equal(t, row.InnerTransactionHash.String, dest.ID) + assert.Equal(t, row.FeeAccount.String, dest.FeeAccount) + assert.Equal(t, row.FeeAccountMuxed.String, dest.FeeAccountMuxed) + assert.Equal(t, uint64(0), dest.FeeAccountMuxedID) + assert.Equal(t, row.Account, dest.Account) + assert.Equal(t, row.AccountMuxed.String, dest.AccountMuxed) + assert.Equal(t, uint64(1234), dest.AccountMuxedID) + assert.Equal(t, row.FeeCharged, dest.FeeCharged) + assert.Equal(t, row.NewMaxFee.Int64, dest.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.Signatures) + assert.Equal(t, row.InnerTransactionHash.String, dest.InnerTransaction.Hash) + assert.Equal(t, row.MaxFee, dest.InnerTransaction.MaxFee) + assert.Equal(t, []string{"d", "e", "f"}, dest.InnerTransaction.Signatures) + assert.Equal(t, row.TransactionHash, dest.FeeBumpTransaction.Hash) + assert.Equal(t, []string{"a", "b", "c"}, dest.FeeBumpTransaction.Signatures) + assert.Equal(t, "/transactions/"+row.InnerTransactionHash.String, dest.Links.Transaction.Href) +} diff --git a/services/horizon/internal/scripts/check_release_hash/Dockerfile b/services/horizon/internal/scripts/check_release_hash/Dockerfile new file mode 100644 index 0000000000..b7aadc3354 --- /dev/null +++ b/services/horizon/internal/scripts/check_release_hash/Dockerfile @@ -0,0 +1,20 @@ +# Change to Go version used in CI or rebuild with --build-arg. +ARG GO_IMAGE=golang:1.17 +FROM $GO_IMAGE + +WORKDIR /go/src/github.com/stellar/go + +ENV DEBIAN_FRONTEND=noninteractive +# ca-certificates are required to make tls connections +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates curl wget gnupg apt-utils git zip unzip apt-transport-https ca-certificates +RUN wget -qO - https://apt.stellar.org/SDF.asc | APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=true apt-key add - +RUN echo "deb https://apt.stellar.org xenial stable" >/etc/apt/sources.list.d/SDF.list +RUN echo "deb https://apt.stellar.org xenial testing" >/etc/apt/sources.list.d/SDF-testing.list + +RUN git clone https://github.com/stellar/go.git /go/src/github.com/stellar/go +# Fetch dependencies and prebuild binaries. Not necessary but will make check faster. +RUN go run -v ./support/scripts/build_release_artifacts + +COPY check.sh . +RUN chmod +x check.sh +ENTRYPOINT ["./check.sh"] diff --git a/services/horizon/internal/scripts/check_release_hash/README.md b/services/horizon/internal/scripts/check_release_hash/README.md new file mode 100644 index 0000000000..a9316bef6d --- /dev/null +++ b/services/horizon/internal/scripts/check_release_hash/README.md @@ -0,0 +1,9 @@ +# check_release_hash + +Docker image for comparing releases hash to a local builds hash. + +## Usage + +1. Build the image. Optionally pass `--build-arg` with `golang` image you want to use for compilation. See `Dockerfile` for a default value. +2. `docker run -e "TAG=horizon-vX.Y.Z" -e "PACKAGE_VERSION=X.Y.Z-BUILD_ID" check_release_hash` +3. Compare the hashes in the output. `released` directory contains packages from GitHub, `dist` are the locally built packages. \ No newline at end of file diff --git a/services/horizon/internal/scripts/check_release_hash/check.sh b/services/horizon/internal/scripts/check_release_hash/check.sh new file mode 100644 index 0000000000..b1377d9810 --- /dev/null +++ b/services/horizon/internal/scripts/check_release_hash/check.sh @@ -0,0 +1,81 @@ +#!/bin/bash +set -e + +apt-get clean +apt-get update +apt-get install -y stellar-horizon=$PACKAGE_VERSION + +mkdir released +cd released + +wget https://github.com/stellar/go/releases/download/$TAG/$TAG-darwin-amd64.tar.gz +wget https://github.com/stellar/go/releases/download/$TAG/$TAG-linux-amd64.tar.gz +wget https://github.com/stellar/go/releases/download/$TAG/$TAG-linux-arm.tar.gz +wget https://github.com/stellar/go/releases/download/$TAG/$TAG-windows-amd64.zip + +tar -xvf $TAG-darwin-amd64.tar.gz +tar -xvf $TAG-linux-amd64.tar.gz +tar -xvf $TAG-linux-arm.tar.gz +unzip $TAG-windows-amd64.zip + +cd - + +git pull origin --tags +git checkout $TAG +# -keep: artifact directories are not removed after packaging +CIRCLE_TAG=$TAG go run -v ./support/scripts/build_release_artifacts -keep + +echo "RESULTS" +echo "=======" +echo "" +echo "compiled version" +./dist/$TAG-linux-amd64/horizon version + +echo "github releases version" +./released/$TAG-linux-amd64/horizon version + +echo "debian package version" +stellar-horizon version + +echo "" + +suffixes=(darwin-amd64 linux-amd64 linux-arm windows-amd64) +for S in "${suffixes[@]}" +do + released="" + dist="" + msg="" + + if [ -f "./released/$TAG-$S.tar.gz" ]; then + released=($(shasum -a 256 ./released/$TAG-$S/horizon)) + else + # windows + released=($(shasum -a 256 ./released/$TAG-$S/horizon.exe)) + fi + + if [ -f "./dist/$TAG-$S.tar.gz" ]; then + dist=($(shasum -a 256 ./dist/$TAG-$S/horizon)) + else + # windows + dist=($(shasum -a 256 ./dist/$TAG-$S/horizon.exe)) + fi + + if [ $S == "linux-amd64" ]; then + path=$(which stellar-horizon) + debian=($(shasum -a 256 $path)) + + if [[ "$released" == "$dist" && "$dist" == "$debian" ]]; then + msg="$TAG-$S ok" + else + msg="$TAG-$S NO MATCH! github=$released compile=$dist debian=$debian" + fi + else + if [ "$released" == "$dist" ]; then + msg="$TAG-$S ok" + else + msg="$TAG-$S NO MATCH! github=$released compile=$dist" + fi + fi + + echo $msg +done \ No newline at end of file diff --git a/services/horizon/internal/scripts/current_os.go b/services/horizon/internal/scripts/current_os.go new file mode 100644 index 0000000000..eeb13b1c80 --- /dev/null +++ b/services/horizon/internal/scripts/current_os.go @@ -0,0 +1,10 @@ +package main + +import ( + "fmt" + "runtime" +) + +func main() { + fmt.Println(runtime.GOOS) +} diff --git a/services/horizon/internal/scripts/dev.bash b/services/horizon/internal/scripts/dev.bash new file mode 100755 index 0000000000..b9c2d7fd77 --- /dev/null +++ b/services/horizon/internal/scripts/dev.bash @@ -0,0 +1,5 @@ +#! /usr/bin/env bash + +set -e + +mcdev-each-change go test {{.Pkg}} diff --git a/services/horizon/internal/scripts/rebuild_schema.bash b/services/horizon/internal/scripts/rebuild_schema.bash new file mode 100755 index 0000000000..a76f01d721 --- /dev/null +++ b/services/horizon/internal/scripts/rebuild_schema.bash @@ -0,0 +1,10 @@ +#! /usr/bin/env bash +set -e + +# This scripts rebuilds the latest.sql file included in the schema package. +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +GOTOP="$( cd "$DIR/../../../../../../../.." && pwd )" + +go generate github.com/stellar/go/services/horizon/internal/db2/schema +go generate github.com/stellar/go/services/horizon/internal/test +go install github.com/stellar/go/services/horizon diff --git a/services/horizon/internal/simplepath/doc.go b/services/horizon/internal/simplepath/doc.go new file mode 100644 index 0000000000..1a3e71532b --- /dev/null +++ b/services/horizon/internal/simplepath/doc.go @@ -0,0 +1,38 @@ +// Package simplepath provides an implementation of paths. Finder that performs +// a breadth first search for paths against an orderbook. +// +// The core algorithm works as follows: +// 1. `search` object contains a queue of currently extended paths. Queue is +// initialized with a single-asset path containing destination asset. Paths +// are linked lists, head of the path is a newly extended source asset, tail +// of the path is always the destination asset. +// 2. Every iteration of search does the following: +// - pops a path from the queue, +// - checks if the path starts with a source asset, if it does, it appends +// the path to results, +// - finds all assets connected to the head of the current path and prepends +// the path, calculating the current cost. +// Algorithm ends when there is no more paths to extend (len(queue) = 0) or +// `maxResults` has been reached. +// +// The actual calculation of the cost is happening in `pathNode.Cost()` method. +// There are a couple of important things to note: +// 1. We are given `DestinationAmount` and the destination asset is the tail of +// the list. So we need to start from the end of the path and continue to the +// front. +// 2. Because we are going from the tail to the head of the path, given the path +// A -> B -> C. we are interested in finding: +// - amount of B needed to buy `DestinationAmount` of C, +// - amount of A needed to by above amount of B. +// 3. Finally, the actual path payment will sell A, buy B, sell B, buy C. So we +// need to check the following orderbooks: +// - sell C, buy B (the user will sell B, buy C), +// - sell B, buy A (the user will sell A, buy B). +// +// The algorithm works as follows: +// 1. Because the head of the path is source account, we build a stack of assets +// to reverse that order. +// 2. We start with the last asset (pop the stack), calculate it's cost (if not +// cached) and continue towards the source asset (bottom of the stack). +// 3. We return the final cost. +package simplepath diff --git a/services/horizon/internal/simplepath/inmemory.go b/services/horizon/internal/simplepath/inmemory.go new file mode 100644 index 0000000000..1009fa611b --- /dev/null +++ b/services/horizon/internal/simplepath/inmemory.go @@ -0,0 +1,119 @@ +package simplepath + +import ( + "context" + + "github.com/go-errors/errors" + "github.com/stellar/go/exp/orderbook" + "github.com/stellar/go/services/horizon/internal/paths" + "github.com/stellar/go/xdr" +) + +const ( + maxAssetsPerPath = 5 + // MaxInMemoryPathLength is the maximum path length which can be queried by the InMemoryFinder + MaxInMemoryPathLength = 5 +) + +var ( + // ErrEmptyInMemoryOrderBook indicates that the in memory order book is not yet populated + ErrEmptyInMemoryOrderBook = errors.New("Empty orderbook") +) + +// InMemoryFinder is an implementation of the path finding interface +// using the in memory orderbook +type InMemoryFinder struct { + graph *orderbook.OrderBookGraph + includePools bool +} + +// NewInMemoryFinder constructs a new InMemoryFinder instance +func NewInMemoryFinder(graph *orderbook.OrderBookGraph, includePools bool) InMemoryFinder { + return InMemoryFinder{ + graph: graph, + includePools: includePools, + } +} + +// Find implements the path payments finder interface +func (finder InMemoryFinder) Find(ctx context.Context, q paths.Query, maxLength uint) ([]paths.Path, uint32, error) { + if finder.graph.IsEmpty() { + return nil, 0, ErrEmptyInMemoryOrderBook + } + + if maxLength == 0 { + maxLength = MaxInMemoryPathLength + } + if maxLength > MaxInMemoryPathLength { + return nil, 0, errors.New("invalid value of maxLength") + } + + orderbookPaths, lastLedger, err := finder.graph.FindPaths( + ctx, + int(maxLength), + q.DestinationAsset, + q.DestinationAmount, + q.SourceAccount, + q.SourceAssets, + q.SourceAssetBalances, + q.ValidateSourceBalance, + maxAssetsPerPath, + finder.includePools, + ) + results := make([]paths.Path, len(orderbookPaths)) + for i, path := range orderbookPaths { + results[i] = paths.Path{ + Path: path.InteriorNodes, + Source: path.SourceAsset, + SourceAmount: path.SourceAmount, + Destination: path.DestinationAsset, + DestinationAmount: path.DestinationAmount, + } + } + return results, lastLedger, err +} + +// FindFixedPaths returns a list of payment paths where the source and destination +// assets are fixed. All returned payment paths will start by spending `amountToSpend` +// of `sourceAsset` and will end with some positive balance of `destinationAsset`. +// `sourceAccountID` is optional. if `sourceAccountID` is provided then no offers +// created by `sourceAccountID` will be considered when evaluating payment paths +func (finder InMemoryFinder) FindFixedPaths( + ctx context.Context, + sourceAsset xdr.Asset, + amountToSpend xdr.Int64, + destinationAssets []xdr.Asset, + maxLength uint, +) ([]paths.Path, uint32, error) { + if finder.graph.IsEmpty() { + return nil, 0, ErrEmptyInMemoryOrderBook + } + + if maxLength == 0 { + maxLength = MaxInMemoryPathLength + } + if maxLength > MaxInMemoryPathLength { + return nil, 0, errors.New("invalid value of maxLength") + } + + orderbookPaths, lastLedger, err := finder.graph.FindFixedPaths( + ctx, + int(maxLength), + sourceAsset, + amountToSpend, + destinationAssets, + maxAssetsPerPath, + finder.includePools, + ) + results := make([]paths.Path, len(orderbookPaths)) + for i, path := range orderbookPaths { + results[i] = paths.Path{ + Path: path.InteriorNodes, + Source: path.SourceAsset, + SourceAmount: path.SourceAmount, + Destination: path.DestinationAsset, + DestinationAmount: path.DestinationAmount, + } + } + return results, lastLedger, err +} diff --git a/services/horizon/internal/test/assertion.go b/services/horizon/internal/test/assertion.go new file mode 100644 index 0000000000..d518cea210 --- /dev/null +++ b/services/horizon/internal/test/assertion.go @@ -0,0 +1,75 @@ +package test + +import ( + "bytes" + "encoding/json" + "strings" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/support/render/problem" +) + +// Assertions provides an assertions helper. Custom assertions for this package +// can be defined as methods on this struct. +// foo +type Assertions struct { + *assert.Assertions +} + +func (a *Assertions) PageOf(length int, body *bytes.Buffer) bool { + + var result map[string]interface{} + err := json.Unmarshal(body.Bytes(), &result) + + if !a.NoError(err, "failed to parse body") { + return false + } + + embedded, ok := result["_embedded"] + + if !a.True(ok, "_embedded not found in response") { + return false + } + + records, ok := embedded.(map[string]interface{})["records"] + + if !a.True(ok, "no 'records' property on _embedded object") { + return false + } + + return a.Len(records, length) +} + +// Problem asserts that `body` is a serialized problem equal to `expected`, +// using Type and Status to compare for equality. +func (a *Assertions) Problem(body *bytes.Buffer, expected problem.P) bool { + var actual problem.P + err := json.Unmarshal(body.Bytes(), &actual) + if !a.NoError(err, "failed to parse body") { + return false + } + + actual.Type = strings.TrimPrefix(actual.Type, problem.Default.ServiceHost()) + if expected.Type != "" && a.Equal(expected.Type, actual.Type, "problem type didn't match") { + return false + } + + if expected.Status != 0 && a.Equal(expected.Status, actual.Status, "problem status didn't match") { + return false + } + + return true +} + +// ProblemType asserts that the provided `body` is a JSON serialized problem +// whose type is `typ` +func (a *Assertions) ProblemType(body *bytes.Buffer, typ string) bool { + var actual problem.P + err := json.Unmarshal(body.Bytes(), &actual) + if !a.NoError(err, "failed to parse body") { + return false + } + + return a.Problem(body, problem.P{Type: typ}) +} diff --git a/services/horizon/internal/test/db/main.go b/services/horizon/internal/test/db/main.go new file mode 100644 index 0000000000..af825a721f --- /dev/null +++ b/services/horizon/internal/test/db/main.go @@ -0,0 +1,63 @@ +// Package db provides helpers to connect to test databases. It has no +// internal dependencies on horizon and so should be able to be imported by +// any horizon package. +package db + +import ( + "fmt" + "log" + "testing" + + "github.com/jmoiron/sqlx" + // pq enables postgres support + _ "github.com/lib/pq" + db "github.com/stellar/go/support/db/dbtest" +) + +var ( + coreDB *sqlx.DB + coreUrl *string + horizonDB *sqlx.DB + horizonUrl *string +) + +// Horizon returns a connection to the horizon test database +func Horizon(t *testing.T) *sqlx.DB { + if horizonDB != nil { + return horizonDB + } + postgres := db.Postgres(t) + horizonUrl = &postgres.DSN + horizonDB = postgres.Open() + + return horizonDB +} + +// HorizonURL returns the database connection the url any test +// use when connecting to the history/horizon database +func HorizonURL() string { + if horizonUrl == nil { + log.Panic(fmt.Errorf("Horizon not initialized")) + } + return *horizonUrl +} + +// StellarCore returns a connection to the stellar core test database +func StellarCore(t *testing.T) *sqlx.DB { + if coreDB != nil { + return coreDB + } + postgres := db.Postgres(t) + coreUrl = &postgres.DSN + coreDB = postgres.Open() + return coreDB +} + +// StellarCoreURL returns the database connection the url any test +// use when connecting to the stellar-core database +func StellarCoreURL() string { + if coreUrl == nil { + log.Panic(fmt.Errorf("StellarCore not initialized")) + } + return *coreUrl +} diff --git a/services/horizon/internal/test/http.go b/services/horizon/internal/test/http.go new file mode 100644 index 0000000000..2e42e681ee --- /dev/null +++ b/services/horizon/internal/test/http.go @@ -0,0 +1,70 @@ +package test + +import ( + "net/http" + "net/http/httptest" + "net/url" + "strings" + + "github.com/go-chi/chi" +) + +type RequestHelper interface { + Get(string, ...func(*http.Request)) *httptest.ResponseRecorder + Post(string, url.Values, ...func(*http.Request)) *httptest.ResponseRecorder +} + +type requestHelper struct { + router *chi.Mux +} + +func RequestHelperRaw(r *http.Request) { + r.Header.Set("Accept", "application/octet-stream") +} + +func RequestHelperStreaming(r *http.Request) { + r.Header.Set("Accept", "text/event-stream") +} + +func NewRequestHelper(router *chi.Mux) RequestHelper { + return &requestHelper{router} +} + +func (rh *requestHelper) Get( + path string, + mods ...func(*http.Request), +) *httptest.ResponseRecorder { + + req, _ := http.NewRequest("GET", path, nil) + return rh.Execute(req, mods) +} + +func (rh *requestHelper) Post( + path string, + form url.Values, + mods ...func(*http.Request), +) *httptest.ResponseRecorder { + + body := strings.NewReader(form.Encode()) + req, _ := http.NewRequest("POST", path, body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return rh.Execute(req, mods) +} + +func (rh *requestHelper) Execute( + req *http.Request, + requestModFns []func(*http.Request), +) *httptest.ResponseRecorder { + + req.RemoteAddr = "127.0.0.1" + req.Host = "localhost" + for _, fn := range requestModFns { + fn(req) + } + + w := httptest.NewRecorder() + + rh.router.ServeHTTP(w, req) + return w + +} diff --git a/services/horizon/internal/test/integration/environment.go b/services/horizon/internal/test/integration/environment.go new file mode 100644 index 0000000000..9a3d7c09d2 --- /dev/null +++ b/services/horizon/internal/test/integration/environment.go @@ -0,0 +1,45 @@ +//lint:file-ignore U1001 Ignore all unused code, this is only used in tests. +package integration + +import ( + "os" +) + +type EnvironmentManager struct { + oldEnvironment, newEnvironment map[string]string +} + +func NewEnvironmentManager() *EnvironmentManager { + env := &EnvironmentManager{} + env.oldEnvironment = make(map[string]string) + env.newEnvironment = make(map[string]string) + return env +} + +// Add sets a new environment variable, saving the original value (if any). +func (envmgr *EnvironmentManager) Add(key, value string) error { + // If someone pushes an environmental variable more than once, we don't want + // to lose the *original* value, so we're being careful here. + if _, ok := envmgr.newEnvironment[key]; !ok { + if oldValue, ok := os.LookupEnv(key); ok { + envmgr.oldEnvironment[key] = oldValue + } + } + + envmgr.newEnvironment[key] = value + return os.Setenv(key, value) +} + +// Restore restores the environment prior to any modifications. +// +// You should probably use this alongside `defer` to ensure the global +// environment isn't modified for longer than you intend. +func (envmgr *EnvironmentManager) Restore() { + for key := range envmgr.newEnvironment { + if oldValue, ok := envmgr.oldEnvironment[key]; ok { + os.Setenv(key, oldValue) + } else { + os.Unsetenv(key) + } + } +} diff --git a/services/horizon/internal/test/integration/integration.go b/services/horizon/internal/test/integration/integration.go new file mode 100644 index 0000000000..e5a265da97 --- /dev/null +++ b/services/horizon/internal/test/integration/integration.go @@ -0,0 +1,822 @@ +//lint:file-ignore U1001 Ignore all unused code, this is only used in tests. +package integration + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/signal" + "path/filepath" + "strconv" + "strings" + "sync" + "syscall" + "testing" + "time" + + "github.com/spf13/cobra" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stretchr/testify/assert" + + sdk "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/clients/stellarcore" + "github.com/stellar/go/keypair" + proto "github.com/stellar/go/protocols/horizon" + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/support/db/dbtest" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" +) + +const ( + StandaloneNetworkPassphrase = "Standalone Network ; February 2017" + stellarCorePostgresPassword = "mysecretpassword" + adminPort = 6060 + stellarCorePort = 11626 + stellarCorePostgresPort = 5641 + historyArchivePort = 1570 +) + +var ( + RunWithCaptiveCore = os.Getenv("HORIZON_INTEGRATION_ENABLE_CAPTIVE_CORE") != "" +) + +type Config struct { + PostgresURL string + ProtocolVersion uint32 + SkipContainerCreation bool + CoreDockerImage string + + // Weird naming here because bools default to false, but we want to start + // Horizon by default. + SkipHorizonStart bool + + // If you want to override the default parameters passed to Horizon, you can + // set this map accordingly. All of them are passed along as --k=v, but if + // you pass an empty value, the parameter will be dropped. (Note that you + // should exclude the prepending `--` from keys; this is for compatibility + // with the constant names in flags.go) + // + // You can also control the environmental variables in a similar way, but + // note that CLI args take precedence over envvars, so set the corresponding + // CLI arg empty. + HorizonParameters map[string]string + HorizonEnvironment map[string]string +} + +type CaptiveConfig struct { + binaryPath string + configPath string +} + +type Test struct { + t *testing.T + + composePath string + + config Config + coreConfig CaptiveConfig + horizonConfig horizon.Config + environment *EnvironmentManager + + horizonClient *sdk.Client + coreClient *stellarcore.Client + + app *horizon.App + appStopped chan struct{} + shutdownOnce sync.Once + shutdownCalls []func() + masterKey *keypair.Full + passPhrase string +} + +func NewTestForRemoteHorizon(t *testing.T, horizonURL string, passPhrase string, masterKey *keypair.Full) *Test { + return &Test{ + t: t, + horizonClient: &sdk.Client{HorizonURL: horizonURL}, + masterKey: masterKey, + passPhrase: passPhrase, + } +} + +// NewTest starts a new environment for integration test at a given +// protocol version and blocks until Horizon starts ingesting. +// +// Skips the test if HORIZON_INTEGRATION_TESTS env variable is not set. +// +// WARNING: This requires Docker Compose installed. +func NewTest(t *testing.T, config Config) *Test { + if os.Getenv("HORIZON_INTEGRATION_TESTS") == "" { + t.Skip("skipping integration test: HORIZON_INTEGRATION_TESTS not set") + } + + // If not specific explicitly, set the protocol to the maximum supported version + if config.ProtocolVersion == 0 { + config.ProtocolVersion = ingest.MaxSupportedProtocolVersion + } + + composePath := findDockerComposePath() + i := &Test{ + t: t, + config: config, + composePath: composePath, + passPhrase: StandaloneNetworkPassphrase, + environment: NewEnvironmentManager(), + } + + i.configureCaptiveCore() + + // Only run Stellar Core container and its dependencies. + i.runComposeCommand("up", "--detach", "--quiet-pull", "--no-color", "core") + i.prepareShutdownHandlers() + i.coreClient = &stellarcore.Client{URL: "http://localhost:" + strconv.Itoa(stellarCorePort)} + i.waitForCore() + + if !config.SkipHorizonStart { + if innerErr := i.StartHorizon(); innerErr != nil { + t.Fatalf("Failed to start Horizon: %v", innerErr) + } + + i.WaitForHorizon() + } + + return i +} + +func (i *Test) configureCaptiveCore() { + // We either test Captive Core through environment variables or through + // custom Horizon parameters. + if RunWithCaptiveCore { + composePath := findDockerComposePath() + i.coreConfig.binaryPath = os.Getenv("CAPTIVE_CORE_BIN") + i.coreConfig.configPath = filepath.Join(composePath, "captive-core-integration-tests.cfg") + } + + if value := i.getParameter( + horizon.StellarCoreBinaryPathName, + "STELLAR_CORE_BINARY_PATH", + ); value != "" { + i.coreConfig.binaryPath = value + } + if value := i.getParameter( + horizon.CaptiveCoreConfigPathName, + "CAPTIVE_CORE_CONFIG_PATH", + ); value != "" { + i.coreConfig.configPath = value + } +} + +func (i *Test) getParameter(argName, envName string) string { + if value, ok := i.config.HorizonEnvironment[envName]; ok { + return value + } + if value, ok := i.config.HorizonParameters[argName]; ok { + return value + } + return "" +} + +// Runs a docker-compose command applied to the above configs +func (i *Test) runComposeCommand(args ...string) { + integrationYaml := filepath.Join(i.composePath, "docker-compose.integration-tests.yml") + + cmdline := append([]string{"-f", integrationYaml}, args...) + cmd := exec.Command("docker-compose", cmdline...) + if i.config.CoreDockerImage != "" { + cmd.Env = append( + os.Environ(), + fmt.Sprintf("CORE_IMAGE=%s", i.config.CoreDockerImage), + ) + } + + i.t.Log("Running", cmd.Env, cmd.Args) + out, innerErr := cmd.Output() + if exitErr, ok := innerErr.(*exec.ExitError); ok { + fmt.Printf("stdout:\n%s\n", string(out)) + fmt.Printf("stderr:\n%s\n", string(exitErr.Stderr)) + } + + if innerErr != nil { + i.t.Fatalf("Compose command failed: %v", innerErr) + } +} + +func (i *Test) prepareShutdownHandlers() { + i.shutdownCalls = append(i.shutdownCalls, + func() { + if i.app != nil { + i.app.Close() + } + i.runComposeCommand("rm", "-fvs", "core") + i.runComposeCommand("rm", "-fvs", "core-postgres") + }, + i.environment.Restore, + ) + + // Register cleanup handlers (on panic and ctrl+c) so the containers are + // stopped even if ingestion or testing fails. + i.t.Cleanup(i.Shutdown) + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + i.Shutdown() + os.Exit(int(syscall.SIGTERM)) + }() +} + +func (i *Test) RestartHorizon() error { + i.StopHorizon() + + if err := i.StartHorizon(); err != nil { + return err + } + + i.WaitForHorizon() + return nil +} + +func (i *Test) GetHorizonConfig() horizon.Config { + return i.horizonConfig +} + +// Shutdown stops the integration tests and destroys all its associated +// resources. It will be implicitly called when the calling test (i.e. the +// `testing.Test` passed to `New()`) is finished if it hasn't been explicitly +// called before. +func (i *Test) Shutdown() { + i.shutdownOnce.Do(func() { + // run them in the opposite order in which they where added + for callI := len(i.shutdownCalls) - 1; callI >= 0; callI-- { + i.shutdownCalls[callI]() + } + }) +} + +func (i *Test) StartHorizon() error { + horizonPostgresURL := i.config.PostgresURL + if horizonPostgresURL == "" { + postgres := dbtest.Postgres(i.t) + i.shutdownCalls = append(i.shutdownCalls, func() { + // FIXME: Unfortunately, Horizon leaves open sessions behind, + // leading to a "database is being accessed by other users" + // error when trying to drop it. + // postgres.Close() + }) + horizonPostgresURL = postgres.DSN + } + + config, configOpts := horizon.Flags() + cmd := &cobra.Command{ + Use: "horizon", + Short: "Client-facing API server for the Stellar network", + Long: "Client-facing API server for the Stellar network.", + Run: func(cmd *cobra.Command, args []string) { + var err error + i.app, err = horizon.NewAppFromFlags(config, configOpts) + if err != nil { + // Explicitly exit here as that's how these tests are structured for now. + fmt.Println(err) + os.Exit(1) + } + }, + } + + // To facilitate custom runs of Horizon, we merge a default set of + // parameters with the tester-supplied ones (if any). + // + // TODO: Ideally, we'd be pulling host/port information from the Docker + // Compose YAML file itself rather than hardcoding it. + hostname := "localhost" + coreBinaryPath := i.coreConfig.binaryPath + captiveCoreConfigPath := i.coreConfig.configPath + + defaultArgs := map[string]string{ + "stellar-core-url": i.coreClient.URL, + "stellar-core-db-url": fmt.Sprintf( + "postgres://postgres:%s@%s:%d/stellar?sslmode=disable", + stellarCorePostgresPassword, + hostname, + stellarCorePostgresPort, + ), + "stellar-core-binary-path": coreBinaryPath, + "captive-core-config-path": captiveCoreConfigPath, + "captive-core-http-port": "21626", + "enable-captive-core-ingestion": strconv.FormatBool(len(coreBinaryPath) > 0), + "ingest": "true", + "history-archive-urls": fmt.Sprintf("http://%s:%d", hostname, historyArchivePort), + "db-url": horizonPostgresURL, + "network-passphrase": i.passPhrase, + "apply-migrations": "true", + "admin-port": strconv.Itoa(i.AdminPort()), + "port": "8000", + // due to ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING + "checkpoint-frequency": "8", + "per-hour-rate-limit": "0", // disable rate limiting + } + + merged := MergeMaps(defaultArgs, i.config.HorizonParameters) + args := mapToFlags(merged) + + // initialize core arguments + i.t.Log("Horizon command line:", args) + var env strings.Builder + for key, value := range i.config.HorizonEnvironment { + env.WriteString(fmt.Sprintf("%s=%s ", key, value)) + } + i.t.Logf("Horizon environmental variables: %s\n", env.String()) + + // prepare env + cmd.SetArgs(args) + for key, value := range i.config.HorizonEnvironment { + innerErr := i.environment.Add(key, value) + if innerErr != nil { + return errors.Wrap(innerErr, fmt.Sprintf( + "failed to set envvar (%s=%s)", key, value)) + } + } + + var err error + if err = configOpts.Init(cmd); err != nil { + return errors.Wrap(err, "cannot initialize params") + } + + if err = cmd.Execute(); err != nil { + return errors.Wrap(err, "cannot initialize Horizon") + } + + horizonPort := "8000" + if port, ok := merged["--port"]; ok { + horizonPort = port + } + i.horizonConfig = *config + i.horizonClient = &sdk.Client{ + HorizonURL: fmt.Sprintf("http://%s:%s", hostname, horizonPort), + } + + if err = i.app.Ingestion().BuildGenesisState(); err != nil { + return errors.Wrap(err, "cannot build genesis state") + } + + done := make(chan struct{}) + go func() { + i.app.Serve() + close(done) + }() + i.appStopped = done + + return nil +} + +// Wait for core to be up and manually close the first ledger +func (i *Test) waitForCore() { + i.t.Log("Waiting for core to be up...") + for t := 30 * time.Second; t >= 0; t -= time.Second { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + _, err := i.coreClient.Info(ctx) + cancel() + if err != nil { + i.t.Logf("could not obtain info response: %v", err) + time.Sleep(time.Second) + continue + } + break + } + + i.UpgradeProtocol(i.config.ProtocolVersion) + + for t := 0; t < 5; t++ { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + info, err := i.coreClient.Info(ctx) + cancel() + if err != nil || !info.IsSynced() { + i.t.Logf("Core is still not synced: %v %v", err, info) + time.Sleep(time.Second) + continue + } + i.t.Log("Core is up.") + return + } + i.t.Fatal("Core could not sync after 30s") +} + +// UpgradeProtocol arms Core with upgrade and blocks until protocol is upgraded. +func (i *Test) UpgradeProtocol(version uint32) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + err := i.coreClient.Upgrade(ctx, int(version)) + cancel() + if err != nil { + i.t.Fatalf("could not upgrade protocol: %v", err) + } + + for t := 0; t < 10; t++ { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + info, err := i.coreClient.Info(ctx) + cancel() + if err != nil { + i.t.Logf("could not obtain info response: %v", err) + time.Sleep(time.Second) + continue + } + + if info.Info.Ledger.Version == int(version) { + i.t.Logf("Protocol upgraded to: %d", info.Info.Ledger.Version) + return + } + time.Sleep(time.Second) + } + + i.t.Fatalf("could not upgrade protocol in 10s") +} + +func (i *Test) WaitForHorizon() { + for t := 60; t >= 0; t -= 1 { + time.Sleep(time.Second) + + i.t.Log("Waiting for ingestion and protocol upgrade...") + root, err := i.horizonClient.Root() + if err != nil { + i.t.Logf("could not obtain root response %v", err) + continue + } + + if root.HorizonSequence < 3 || + int(root.HorizonSequence) != int(root.IngestSequence) { + i.t.Logf("Horizon ingesting... %v", root) + continue + } + + if uint32(root.CurrentProtocolVersion) == i.config.ProtocolVersion { + i.t.Logf("Horizon protocol version matches... %v", root) + return + } + } + + i.t.Fatal("Horizon not ingesting...") +} + +// Client returns horizon.Client connected to started Horizon instance. +func (i *Test) Client() *sdk.Client { + return i.horizonClient +} + +// Horizon returns the horizon.App instance for the current integration test +func (i *Test) Horizon() *horizon.App { + return i.app +} + +// StopHorizon shuts down the running Horizon process +func (i *Test) StopHorizon() { + i.app.CloseDB() + i.app.Close() + + // Wait for Horizon to shut down completely. + <-i.appStopped + + i.app = nil +} + +// AdminPort returns Horizon admin port. +func (i *Test) AdminPort() int { + return adminPort +} + +// Metrics URL returns Horizon metrics URL. +func (i *Test) MetricsURL() string { + return fmt.Sprintf("http://localhost:%d/metrics", i.AdminPort()) +} + +// Master returns a keypair of the network masterKey account. +func (i *Test) Master() *keypair.Full { + if i.masterKey != nil { + return i.masterKey + } + return keypair.Master(i.passPhrase).(*keypair.Full) +} + +func (i *Test) MasterAccount() txnbuild.Account { + master, client := i.Master(), i.Client() + request := sdk.AccountRequest{AccountID: master.Address()} + account, err := client.AccountDetail(request) + panicIf(err) + return &account +} + +func (i *Test) CurrentTest() *testing.T { + return i.t +} + +/* Utility functions for easier test case creation. */ + +// Creates new accounts via the master account. +// +// It funds each account with the given balance and then queries the API to +// find the randomized sequence number for future operations. +// +// Returns: The slice of created keypairs and account objects. +// +// Note: panics on any errors, since we assume that tests cannot proceed without +// this method succeeding. +func (i *Test) CreateAccounts(count int, initialBalance string) ([]*keypair.Full, []txnbuild.Account) { + client := i.Client() + master := i.Master() + + pairs := make([]*keypair.Full, count) + ops := make([]txnbuild.Operation, count) + + // Two paths here: either caller already did some stuff with the master + // account so we should retrieve the sequence number, or caller hasn't and + // we start from scratch. + seq := int64(0) + request := sdk.AccountRequest{AccountID: master.Address()} + account, err := client.AccountDetail(request) + if err == nil { + seq, err = strconv.ParseInt(account.Sequence, 10, 64) // str -> bigint + panicIf(err) + } + + masterAccount := txnbuild.SimpleAccount{ + AccountID: master.Address(), + Sequence: seq, + } + + for i := 0; i < count; i++ { + pair, _ := keypair.Random() + pairs[i] = pair + + ops[i] = &txnbuild.CreateAccount{ + SourceAccount: masterAccount.AccountID, + Destination: pair.Address(), + Amount: initialBalance, + } + } + + // Submit transaction, then retrieve new account details. + _ = i.MustSubmitOperations(&masterAccount, master, ops...) + + accounts := make([]txnbuild.Account, count) + for i, kp := range pairs { + request := sdk.AccountRequest{AccountID: kp.Address()} + account, err := client.AccountDetail(request) + panicIf(err) + + accounts[i] = &account + } + + for _, keys := range pairs { + i.t.Logf("Funded %s (%s) with %s XLM.\n", + keys.Seed(), keys.Address(), initialBalance) + } + + return pairs, accounts +} + +// Panics on any error establishing a trustline. +func (i *Test) MustEstablishTrustline( + truster *keypair.Full, account txnbuild.Account, asset txnbuild.Asset, +) (resp proto.Transaction) { + txResp, err := i.EstablishTrustline(truster, account, asset) + panicIf(err) + return txResp +} + +// EstablishTrustline works on a given asset for a particular account. +func (i *Test) EstablishTrustline( + truster *keypair.Full, account txnbuild.Account, asset txnbuild.Asset, +) (proto.Transaction, error) { + if asset.IsNative() { + return proto.Transaction{}, nil + } + line, err := asset.ToChangeTrustAsset() + if err != nil { + return proto.Transaction{}, err + } + return i.SubmitOperations(account, truster, &txnbuild.ChangeTrust{ + Line: line, + Limit: "2000", + }) +} + +// MustCreateClaimableBalance panics on any error creating a claimable balance. +func (i *Test) MustCreateClaimableBalance( + source *keypair.Full, asset txnbuild.Asset, amount string, + claimants ...txnbuild.Claimant, +) (claim proto.ClaimableBalance) { + account := i.MustGetAccount(source) + _ = i.MustSubmitOperations(&account, source, + &txnbuild.CreateClaimableBalance{ + Destinations: claimants, + Asset: asset, + Amount: amount, + }, + ) + + // Ensure it exists in the global list + balances, err := i.Client().ClaimableBalances(sdk.ClaimableBalanceRequest{}) + panicIf(err) + + claims := balances.Embedded.Records + if len(claims) == 0 { + panic(-1) + } + + claim = claims[len(claims)-1] // latest one + i.t.Logf("Created claimable balance w/ id=%s", claim.BalanceID) + return +} + +// MustGetAccount panics on any error retrieves an account's details from its +// key. This means it must have previously been funded. +func (i *Test) MustGetAccount(source *keypair.Full) proto.Account { + client := i.Client() + account, err := client.AccountDetail(sdk.AccountRequest{AccountID: source.Address()}) + panicIf(err) + return account +} + +// MustSubmitOperations submits a signed transaction from an account with +// standard options. +// +// Namely, we set the standard fee, time bounds, etc. to "non-production" +// defaults that work well for tests. +// +// Most transactions only need one signer, so see the more verbose +// `MustSubmitOperationsWithSigners` below for multi-sig transactions. +// +// Note: We assume that transaction will be successful here so we panic in case +// of all errors. To allow failures, use `SubmitOperations`. +func (i *Test) MustSubmitOperations( + source txnbuild.Account, signer *keypair.Full, ops ...txnbuild.Operation, +) proto.Transaction { + tx, err := i.SubmitOperations(source, signer, ops...) + panicIf(err) + return tx +} + +func (i *Test) SubmitOperations( + source txnbuild.Account, signer *keypair.Full, ops ...txnbuild.Operation, +) (proto.Transaction, error) { + return i.SubmitMultiSigOperations(source, []*keypair.Full{signer}, ops...) +} + +func (i *Test) SubmitMultiSigOperations( + source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation, +) (proto.Transaction, error) { + tx, err := i.CreateSignedTransaction(source, signers, ops...) + if err != nil { + return proto.Transaction{}, err + } + return i.Client().SubmitTransaction(tx) +} + +func (i *Test) MustSubmitMultiSigOperations( + source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation, +) proto.Transaction { + tx, err := i.SubmitMultiSigOperations(source, signers, ops...) + panicIf(err) + return tx +} + +func (i *Test) CreateSignedTransaction( + source txnbuild.Account, signers []*keypair.Full, ops ...txnbuild.Operation, +) (*txnbuild.Transaction, error) { + txParams := txnbuild.TransactionParams{ + SourceAccount: source, + Operations: ops, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + IncrementSequenceNum: true, + } + + tx, err := txnbuild.NewTransaction(txParams) + if err != nil { + return nil, err + } + + for _, signer := range signers { + tx, err = tx.Sign(i.passPhrase, signer) + if err != nil { + return nil, err + } + } + + return tx, nil +} + +func (i *Test) GetCurrentCoreLedgerSequence() (int, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + info, err := i.coreClient.Info(ctx) + if err != nil { + return 0, err + } + return info.Info.Ledger.Num, nil +} + +// LogFailedTx is a convenience function to provide verbose information about a +// failing transaction to the test output log, if it's expected to succeed. +func (i *Test) LogFailedTx(txResponse proto.Transaction, horizonResult error) { + t := i.CurrentTest() + assert.NoErrorf(t, horizonResult, "Submitting the transaction failed") + if prob := sdk.GetError(horizonResult); prob != nil { + t.Logf(" problem: %s\n", prob.Problem.Detail) + t.Logf(" extras: %s\n", prob.Problem.Extras["result_codes"]) + return + } + + var txResult xdr.TransactionResult + err := xdr.SafeUnmarshalBase64(txResponse.ResultXdr, &txResult) + assert.NoErrorf(t, err, "Unmarshalling transaction failed.") + assert.Equalf(t, xdr.TransactionResultCodeTxSuccess, txResult.Result.Code, + "Transaction doesn't have success code.") +} + +func (i *Test) GetPassPhrase() string { + return i.passPhrase +} + +// Cluttering code with if err != nil is absolute nonsense. +func panicIf(err error) { + if err != nil { + panic(err) + } +} + +// findDockerComposePath performs a best-effort attempt to find the project's +// Docker Compose files. +func findDockerComposePath() string { + // Lets you check if a particular directory contains a file. + directoryContainsFilename := func(dir string, filename string) bool { + files, innerErr := ioutil.ReadDir(dir) + panicIf(innerErr) + + for _, file := range files { + if file.Name() == filename { + return true + } + } + + return false + } + + current, err := os.Getwd() + panicIf(err) + + // + // We have a primary and backup attempt for finding the necessary docker + // files: via $GOPATH and via local directory traversal. + // + + if gopath := os.Getenv("GOPATH"); gopath != "" { + monorepo := filepath.Join(gopath, "src", "github.com", "stellar", "go") + if _, err = os.Stat(monorepo); !os.IsNotExist(err) { + current = monorepo + } + } + + // In either case, we try to walk up the tree until we find "go.mod", + // which we hope is the root directory of the project. + for !directoryContainsFilename(current, "go.mod") { + current, err = filepath.Abs(filepath.Join(current, "..")) + + // FIXME: This only works on *nix-like systems. + if err != nil || filepath.Base(current)[0] == filepath.Separator { + fmt.Println("Failed to establish project root directory.") + panic(err) + } + } + + // Directly jump down to the folder that should contain the configs + return filepath.Join(current, "services", "horizon", "docker") +} + +// MergeMaps returns a new map which contains the keys and values of *all* input +// maps, overwriting earlier values with later values on duplicate keys. +func MergeMaps(maps ...map[string]string) map[string]string { + merged := map[string]string{} + for _, m := range maps { + for k, v := range m { + merged[k] = v + } + } + return merged +} + +// mapToFlags will convert a map of parameters into an array of CLI args (i.e. +// in the form --key=value). Note that an empty value for a key means to drop +// the parameter. +func mapToFlags(params map[string]string) []string { + args := make([]string, 0, len(params)) + for key, value := range params { + if value == "" { + continue + } + + args = append(args, fmt.Sprintf("--%s=%s", key, value)) + } + return args +} diff --git a/services/horizon/internal/test/log.go b/services/horizon/internal/test/log.go new file mode 100644 index 0000000000..c23282a990 --- /dev/null +++ b/services/horizon/internal/test/log.go @@ -0,0 +1,14 @@ +package test + +import ( + "github.com/sirupsen/logrus" + "github.com/stellar/go/support/log" +) + +var testLogger *log.Entry + +func init() { + testLogger = log.New() + testLogger.DisableColors() + testLogger.SetLevel(logrus.DebugLevel) +} diff --git a/services/horizon/internal/test/main.go b/services/horizon/internal/test/main.go new file mode 100644 index 0000000000..d3b7323da3 --- /dev/null +++ b/services/horizon/internal/test/main.go @@ -0,0 +1,89 @@ +// Package test contains simple test helpers that should not +// have any dependencies on horizon's packages. think constants, +// custom matchers, generic helpers etc. +package test + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/jmoiron/sqlx" + "github.com/sirupsen/logrus" + tdb "github.com/stellar/go/services/horizon/internal/test/db" + "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// StaticMockServer is a test helper that records it's last request +type StaticMockServer struct { + *httptest.Server + LastRequest *http.Request +} + +// T provides a common set of functionality for each test in horizon +type T struct { + T *testing.T + Assert *assert.Assertions + Require *require.Assertions + Ctx context.Context + HorizonDB *sqlx.DB + CoreDB *sqlx.DB + EndLogTest func() []logrus.Entry +} + +// Context provides a context suitable for testing in tests that do not create +// a full App instance (in which case your tests should be using the app's +// context). This context has a logger bound to it suitable for testing. +func Context() context.Context { + return log.Set(context.Background(), testLogger) +} + +// Database returns a connection to the horizon test database +// +// DEPRECATED: use `Horizon()` from test/db package +func Database(t *testing.T) *sqlx.DB { + return tdb.Horizon(t) +} + +// DatabaseURL returns the database connection the url any test +// use when connecting to the history/horizon database +// +// DEPRECATED: use `HorizonURL()` from test/db package +func DatabaseURL() string { + return tdb.HorizonURL() +} + +// Start initializes a new test helper object, a new instance of log, +// and conceptually "starts" a new test +func Start(t *testing.T) *T { + result := &T{} + result.T = t + logger := log.New() + + result.Ctx = log.Set(context.Background(), logger) + result.HorizonDB = Database(t) + result.CoreDB = StellarCoreDatabase(t) + result.Assert = assert.New(t) + result.Require = require.New(t) + result.EndLogTest = logger.StartTest(log.DebugLevel) + + return result +} + +// StellarCoreDatabase returns a connection to the stellar core test database +// +// DEPRECATED: use `StellarCore()` from test/db package +func StellarCoreDatabase(t *testing.T) *sqlx.DB { + return tdb.StellarCore(t) +} + +// StellarCoreDatabaseURL returns the database connection the url any test +// use when connecting to the stellar-core database +// +// DEPRECATED: use `StellarCoreURL()` from test/db package +func StellarCoreDatabaseURL() string { + return tdb.StellarCoreURL() +} diff --git a/services/horizon/internal/test/scenarios/account_merge-core.sql b/services/horizon/internal/test/scenarios/account_merge-core.sql new file mode 100644 index 0000000000..9cd05c1bb2 --- /dev/null +++ b/services/horizon/internal/test/scenarios/account_merge-core.sql @@ -0,0 +1,722 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999979999999800, 2, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 19999999900, 8589934592, 0, NULL, '', 'AQAAAA==', 0, 3, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('07ca4ecb248af2f369c2a3a89736e46ea24035df4deb1f949dc313d95f6a7c5c', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '4dd933b62c503d4e71afb525f35573c6b0428d8f3e8a78c84938095a5d6778bf', 2, 1559579720, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZUeFWh3+LY3YTHkhb6t+XikT48AKVUrh+INl3MY9NNtUAAAAAXPVMSAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAwWNZfI5WyFWx2m7ToNQChYZ5zqFrwog2j0kXqQNLArtN2TO2LFA9TnGvtSXzVXPGsEKNjz6KeMhJOAlaXWd4vwAAAAIN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('2d85c4463b2a69227e7d7317ff5527be6d135785e4f0baf76579e4f2fdd31ba5', '07ca4ecb248af2f369c2a3a89736e46ea24035df4deb1f949dc313d95f6a7c5c', '4e72a026391a0898cd89641e6aa944b9364f23d4adbe853dfc72738b5cd7aedf', 3, 1559579721, 'AAAACwfKTsskivLzacKjqJc25G6iQDXfTesflJ3DE9lfanxcb5A34qvH8+GyvuBdlScMesgJrpNRwsb9q9TMA24ciq0AAAAAXPVMSQAAAAAAAAAAl3fNvfZdYBxPGC86jq3dI5KKUxZcop2bXz/KHY7Ox/ROcqAmORoImM2JZB5qqUS5Nk8j1K2+hT38cnOLXNeu3wAAAAMN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GDCKUCCI3QNQLVJRYMVMFNTHM4M3FYDQVFOR5HGFRDYBOP6YFW4T3T7O', 2, 'AAAAAMSqCEjcGwXVMcMqwrZnZxmy4HCpXR6cxYjwFz/YLbk9AAAAAAAAAAIAAAACAAAAAQAAAEhR4VaHf4tjdhMeSFvq35eKRPjwApVSuH4g2Xcxj0021QAAAABc9UxIAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABBMbtIqHGQSqybGMGHnq6n5q5nv/YBiJogRctSxK3CX8AAABAgRPQn66xXi9m+bDSvCz7kr67QjzoLlaNwo/F1Vlx6nrnx/0I1s3WdOZ5mABE/hVDMZEUKvsYR58ZbkYLdYDfBQ=='); +INSERT INTO scphistory VALUES ('GDCKUCCI3QNQLVJRYMVMFNTHM4M3FYDQVFOR5HGFRDYBOP6YFW4T3T7O', 3, 'AAAAAMSqCEjcGwXVMcMqwrZnZxmy4HCpXR6cxYjwFz/YLbk9AAAAAAAAAAMAAAACAAAAAQAAADBvkDfiq8fz4bK+4F2VJwx6yAmuk1HCxv2r1MwDbhyKrQAAAABc9UxJAAAAAAAAAAAAAAABBMbtIqHGQSqybGMGHnq6n5q5nv/YBiJogRctSxK3CX8AAABA8EHC6+y2XP6LS+DqiDUWk2fYcpgl43vkSpph2x1lb58JEFXs4vH+TGwnyCLs6KqIBvMq60BILdqbPNh+urvXCg=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('04c6ed22a1c6412ab26c63061e7aba9f9ab99effd806226881172d4b12b7097f', 3, 'AAAAAQAAAAEAAAAAxKoISNwbBdUxwyrCtmdnGbLgcKldHpzFiPAXP9gtuT0AAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', '2d85c4463b2a69227e7d7317ff5527be6d135785e4f0baf76579e4f2fdd31ba5'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 3, + "currentBuckets": [ + { + "curr": "227bedcde9fd0b37b70989c6116851496ef47931570663bb0080d600a8344419", + "next": { + "state": 0 + }, + "snap": "ef31a20a398ee73ce22275ea8177786bac54656f33dcc4f3fec60d55ddf163d9" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "ef31a20a398ee73ce22275ea8177786bac54656f33dcc4f3fec60d55ddf163d9" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAADEqghI3BsF1THDKsK2Z2cZsuBwqV0enMWI8Bc/2C25PQAAAAAAAAADAAAAAwTG7SKhxkEqsmxjBh56up+auZ7/2AYiaIEXLUsStwl/AAAAAQAAAJhvkDfiq8fz4bK+4F2VJwx6yAmuk1HCxv2r1MwDbhyKrQAAAABc9UxJAAAAAAAAAAEAAAAAxKoISNwbBdUxwyrCtmdnGbLgcKldHpzFiPAXP9gtuT0AAABAJh8HZVMHfyljHTKopAoK7x9jc2h/5yA/n58wODadish7omaOS3zYOA+OuFlhTc9sfmOg9u2vzeLcAp1Li1K1BQAAAAEAAACYb5A34qvH8+GyvuBdlScMesgJrpNRwsb9q9TMA24ciq0AAAAAXPVMSQAAAAAAAAABAAAAAMSqCEjcGwXVMcMqwrZnZxmy4HCpXR6cxYjwFz/YLbk9AAAAQCYfB2VTB38pYx0yqKQKCu8fY3Nof+cgP5+fMDg2nYrIe6Jmjkt82DgPjrhZYU3PbH5joPbtr83i3AKdS4tStQUAAABA8VY9JUSjqPxsxbZJUAa6rpUzmawjh0wqv0d1xWVcBCwwlKdPJZj7HNol/nJ25SBGQZEwE3Ura1SuIg06mpMmCAAAAADEqghI3BsF1THDKsK2Z2cZsuBwqV0enMWI8Bc/2C25PQAAAAAAAAADAAAAAgAAAAEAAAAwb5A34qvH8+GyvuBdlScMesgJrpNRwsb9q9TMA24ciq0AAAAAXPVMSQAAAAAAAAAAAAAAAQTG7SKhxkEqsmxjBh56up+auZ7/2AYiaIEXLUsStwl/AAAAQPBBwuvstlz+i0vg6og1FpNn2HKYJeN75EqaYdsdZW+fCRBV7OLx/kxsJ8gi7OiqiAbzKutASC3amzzYfrq71woAAAABB8pOyySK8vNpwqOolzbkbqJANd9N6x+UncMT2V9qfFwAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAa7kvkwAAABAM/DuF92stQo0jQftrEuvRRr2FYta8g/D9WbmWUJziU8j7Z/SK2Gh//rge0j0XQ8ykb3D8Ln9zfprPK7T+UyzAQAAAAEAAAABAAAAAQAAAADEqghI3BsF1THDKsK2Z2cZsuBwqV0enMWI8Bc/2C25PQAAAAA='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('36be70fb7782f9801cdcedc1206e21f99293c99860a15e441f4749747a0a37ab', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('734be94762dd4b7f98f644de207273f1a139f53aefc2a1eeb61886118ca7827f', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDt3KwmaPuPdFSUxdAFeb6OQetyQKIWazlbSMMhmHKNLD4sqhEqUZcQP0l+X/Op+osWmN6+FUYbsz75Q2jG4vMM', 'sqInw5xkpE/Hq9TJaBlFbwOZkG0SxHbXC0Ar/bKW1qMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('36be70fb7782f9801cdcedc1206e21f99293c99860a15e441f4749747a0a37ab', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEA3xWbxPObnZMiBGFKLJQufJLguTsHJxyAsPP5F9Zj561aXnvN/HVRJbFsEcitGbgi9dWVdKRYvmVWCizIdmLID', 'Nr5w+3eC+YAc3O3BIG4h+ZKTyZhgoV5EH0dJdHoKN6sAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('734be94762dd4b7f98f644de207273f1a139f53aefc2a1eeb61886118ca7827f', 3, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAa7kvkwAAABAM/DuF92stQo0jQftrEuvRRr2FYta8g/D9WbmWUJziU8j7Z/SK2Gh//rge0j0XQ8ykb3D8Ln9zfprPK7T+UyzAQ==', 'c0vpR2LdS3+Y9kTeIHJz8aE59TrvwqHuthiGEYyngn8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAgAAAAAAAAAAlQL45wAAAAA', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAASoF8ecAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5M'); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/account_merge-horizon.sql b/services/horizon/internal/test/scenarios/account_merge-horizon.sql new file mode 100644 index 0000000000..232f3221e2 --- /dev/null +++ b/services/horizon/internal/test/scenarios/account_merge-horizon.sql @@ -0,0 +1,962 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (2, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 3, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, false); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 12884905985, 1, 3, '{"amount": "999.9999900", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 2, 2, '{"amount": "999.9999900", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 3, 1, '{}'); +INSERT INTO history_effects VALUES (1, 8589938689, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589938689, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (2, 8589942785, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589942785, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589942785, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (3, '2d85c4463b2a69227e7d7317ff5527be6d135785e4f0baf76579e4f2fdd31ba5', '07ca4ecb248af2f369c2a3a89736e46ea24035df4deb1f949dc313d95f6a7c5c', 1, 1, '2019-06-03 16:35:21', '2019-06-03 16:35:22.987017', '2019-06-03 16:35:22.987017', 12884901888, 16, 1000000000000000000, 300, 100, 100000000, 1000000, 11, 'AAAACwfKTsskivLzacKjqJc25G6iQDXfTesflJ3DE9lfanxcb5A34qvH8+GyvuBdlScMesgJrpNRwsb9q9TMA24ciq0AAAAAXPVMSQAAAAAAAAAAl3fNvfZdYBxPGC86jq3dI5KKUxZcop2bXz/KHY7Ox/ROcqAmORoImM2JZB5qqUS5Nk8j1K2+hT38cnOLXNeu3wAAAAMN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (2, '07ca4ecb248af2f369c2a3a89736e46ea24035df4deb1f949dc313d95f6a7c5c', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 2, 2, '2019-06-03 16:35:20', '2019-06-03 16:35:23.003797', '2019-06-03 16:35:23.003797', 8589934592, 16, 1000000000000000000, 200, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZUeFWh3+LY3YTHkhb6t+XikT48AKVUrh+INl3MY9NNtUAAAAAXPVMSAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAwWNZfI5WyFWx2m7ToNQChYZ5zqFrwog2j0kXqQNLArtN2TO2LFA9TnGvtSXzVXPGsEKNjz6KeMhJOAlaXWd4vwAAAAIN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:35:23.01716', '2019-06-03 16:35:23.017161', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (2, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (3, 8589938689, 3); +INSERT INTO history_operation_participants VALUES (4, 8589938689, 1); +INSERT INTO history_operation_participants VALUES (5, 8589942785, 3); +INSERT INTO history_operation_participants VALUES (6, 8589942785, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 6, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 8, '{"into": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (2, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (3, 8589938688, 3); +INSERT INTO history_transaction_participants VALUES (4, 8589938688, 1); +INSERT INTO history_transaction_participants VALUES (5, 8589942784, 3); +INSERT INTO history_transaction_participants VALUES (6, 8589942784, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 6, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('734be94762dd4b7f98f644de207273f1a139f53aefc2a1eeb61886118ca7827f', 3, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:35:22.987213', '2019-06-03 16:35:22.987213', 12884905984, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAa7kvkwAAABAM/DuF92stQo0jQftrEuvRRr2FYta8g/D9WbmWUJziU8j7Z/SK2Gh//rge0j0XQ8ykb3D8Ln9zfprPK7T+UyzAQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAIAAAAAAAAAAJUC+OcAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAASoF8ecAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5M', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{M/DuF92stQo0jQftrEuvRRr2FYta8g/D9WbmWUJziU8j7Z/SK2Gh//rge0j0XQ8ykb3D8Ln9zfprPK7T+UyzAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:35:23.003936', '2019-06-03 16:35:23.003937', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDt3KwmaPuPdFSUxdAFeb6OQetyQKIWazlbSMMhmHKNLD4sqhEqUZcQP0l+X/Op+osWmN6+FUYbsz75Q2jG4vMM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{7dysJmj7j3RUlMXQBXm+jkHrckCiFms5W0jDIZhyjSw+LKoRKlGXED9Jfl/zqfqLFpjevhVGG7M++UNoxuLzDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('36be70fb7782f9801cdcedc1206e21f99293c99860a15e441f4749747a0a37ab', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:35:23.004119', '2019-06-03 16:35:23.004119', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEA3xWbxPObnZMiBGFKLJQufJLguTsHJxyAsPP5F9Zj561aXnvN/HVRJbFsEcitGbgi9dWVdKRYvmVWCizIdmLID', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{N8Vm8Tzm52TIgRhSiyULnyS4Lk7ByccgLDz+RfWY+etWl57zfx1USWxbBHIrRm4IvXVlXSkWL5lVgosyHZiyAw==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- added manually +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/base-core.sql b/services/horizon/internal/test/scenarios/base-core.sql new file mode 100644 index 0000000000..90cadbfe37 --- /dev/null +++ b/services/horizon/internal/test/scenarios/base-core.sql @@ -0,0 +1,727 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + signers text, + lastmodified integer NOT NULL, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999996999999700, NULL, NULL, 3, 0, NULL, '', 'AQAAAA==', 0, NULL, 2); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1000000000, NULL, NULL, 8589934592, 0, NULL, '', 'AQAAAA==', 0, NULL, 2); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 949999900, NULL, NULL, 8589934593, 0, NULL, '', 'AQAAAA==', 0, NULL, 3); +INSERT INTO accounts VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1050000000, NULL, NULL, 8589934592, 0, NULL, '', 'AQAAAA==', 0, NULL, 3); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('9b7c8bfa1a9c5311b826007f90fb756ac043ed1422a3c292088c231a6206e660', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '8eb63d15a9e8c24469fc0382b02678bb9ea79abbfd04861fc693cc840e6ee71e', 2, 1572527985, 'AAAAAGPZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZlmEdOpVCM5HLr9FNj55qa6w2HKMtqTPFLvG8yPU/aAoAAAAAXbrfcQAAAAAAAAAARUAVxJm1lDMwwqujKcyQzs97F/AETiCgQPrw63wqaPGOtj0VqejCRGn8A4KwJni7nqeau/0Ehh/Gk8yEDm7nHgAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('55a91b5668c4ea95bc9f0f044abf2c30c386add87730ebe564bd55d09a6df71f', '9b7c8bfa1a9c5311b826007f90fb756ac043ed1422a3c292088c231a6206e660', 'ff7fc3046e8222730e02040cb5ead9ad58615b1c7b9ac04ee15fc204dc5cd78a', 3, 1572527986, 'AAAADJt8i/oanFMRuCYAf5D7dWrAQ+0UIqPCkgiMIxpiBuZgSP1aMoPYp3qCSCfZ1BjWWxnystfryrQnN5fe8YZZ1xcAAAAAXbrfcgAAAAIAAAAIAAAAAQAAAAwAAAAIAAAAAwAPQkAAAAAAFMKJva6QmOlDLtejYbhpYI7SUKOfeJbIdkqj9wO1Atr/f8MEboIicw4CBAy16tmtWGFbHHuawE7hX8IE3FzXigAAAAMN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GD5K7UYXGXMVDRXPGXAFF6B4PPBX5NYPFM5SNKSOE7LQHZLXM2QDXKNF', 2, 'AAAAAPqv0xc12VHG7zXAUvg8e8N+tw8rOyaqTifXA+V3ZqA7AAAAAAAAAAIAAAACAAAAAQAAADCWYR06lUIzkcuv0U2PnmprrDYcoy2pM8Uu8bzI9T9oCgAAAABdut9xAAAAAAAAAAAAAAAB9E/mAofkecxFf+H5XKAHyLaswFqKwQizQCxIg5U5hJIAAABAW6KjWavxKHPNJZxXw2ZZ24MecXl/Lj3lmrpT1e/38eo7uraymRdNkrfPGVVdOkcQGOzoUrAQ/QuGad35MXxyAQ=='); +INSERT INTO scphistory VALUES ('GD5K7UYXGXMVDRXPGXAFF6B4PPBX5NYPFM5SNKSOE7LQHZLXM2QDXKNF', 3, 'AAAAAPqv0xc12VHG7zXAUvg8e8N+tw8rOyaqTifXA+V3ZqA7AAAAAAAAAAMAAAACAAAAAQAAAEhI/Voyg9ineoJIJ9nUGNZbGfKy1+vKtCc3l97xhlnXFwAAAABdut9yAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAAB9E/mAofkecxFf+H5XKAHyLaswFqKwQizQCxIg5U5hJIAAABACreeS0hRIfwpQAAiLu/7s/rrlSlXvSnReNsPXLg3NDjAybBYBlJDr4MjNbFsVO8nppi7v5kh/3k6SSfbZednBA=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('f44fe60287e479cc457fe1f95ca007c8b6acc05a8ac108b3402c488395398492', 3, 'AAAAAQAAAAEAAAAA+q/TFzXZUcbvNcBS+Dx7w363Dys7JqpOJ9cD5XdmoDsAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('lastscpdata2 ', 'AAAAAgAAAAD6r9MXNdlRxu81wFL4PHvDfrcPKzsmqk4n1wPld2agOwAAAAAAAAACAAAAA/RP5gKH5HnMRX/h+VygB8i2rMBaisEIs0AsSIOVOYSSAAAAAQAAADCWYR06lUIzkcuv0U2PnmprrDYcoy2pM8Uu8bzI9T9oCgAAAABdut9xAAAAAAAAAAAAAAABAAAAMJZhHTqVQjORy6/RTY+eamusNhyjLakzxS7xvMj1P2gKAAAAAF2633EAAAAAAAAAAAAAAEDlawnyL9qwGWLmFgUepzwtU7XhODteYOsjTarw6ueobwb+nD5qWj1z8sigsrGwafHJzM9qpr+JYO/BdnRES54OAAAAAPqv0xc12VHG7zXAUvg8e8N+tw8rOyaqTifXA+V3ZqA7AAAAAAAAAAIAAAACAAAAAQAAADCWYR06lUIzkcuv0U2PnmprrDYcoy2pM8Uu8bzI9T9oCgAAAABdut9xAAAAAAAAAAAAAAAB9E/mAofkecxFf+H5XKAHyLaswFqKwQizQCxIg5U5hJIAAABAW6KjWavxKHPNJZxXw2ZZ24MecXl/Lj3lmrpT1e/38eo7uraymRdNkrfPGVVdOkcQGOzoUrAQ/QuGad35MXxyAQAAAAFj2Y9TbuaNGye1uJ8jr1MRt1aaJPrxQDrQtStjOwe+mQAAAAMAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQIPOq+RAFCg0AmJ89FcOguG+3JxPeUU8JDnWCR2wUdoE1bDTlL9WFbReCSvQIE8Tg1oVXYqZyzdnAuaJvhNGswsAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAMAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQMm6XW0sYsXhXHC3R0MJUR/q1vmXhvIysaAKn6VVkybFy3niI1/abG2PHox3lkngTgOVyx/joQCEEZJQ+yfx9gMAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAIAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQBIRmJlt4XAKysoGcoi6z/TlW0kMGuCiy6EtD9TpdSPi3BbKGztS1LgIx7E4zompx+okrXRaUGSTCfiDw8h+MgYAAAABAAAAAQAAAAEAAAAA+q/TFzXZUcbvNcBS+Dx7w363Dys7JqpOJ9cD5XdmoDsAAAAA'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 1572527985, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', '55a91b5668c4ea95bc9f0f044abf2c30c386add87730ebe564bd55d09a6df71f'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v12.0.0rc2", + "currentLedger": 3, + "currentBuckets": [ + { + "curr": "f6daafe6467d72aa01beae7a04385891fefb81fc5c8d11aa706aee80832c82d1", + "next": { + "state": 0 + }, + "snap": "ef31a20a398ee73ce22275ea8177786bac54656f33dcc4f3fec60d55ddf163d9" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "ef31a20a398ee73ce22275ea8177786bac54656f33dcc4f3fec60d55ddf163d9" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('lastscpdata3 ', 'AAAAAgAAAAD6r9MXNdlRxu81wFL4PHvDfrcPKzsmqk4n1wPld2agOwAAAAAAAAADAAAAA/RP5gKH5HnMRX/h+VygB8i2rMBaisEIs0AsSIOVOYSSAAAAAQAAAEhI/Voyg9ineoJIJ9nUGNZbGfKy1+vKtCc3l97xhlnXFwAAAABdut9yAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAABAAAASEj9WjKD2Kd6gkgn2dQY1lsZ8rLX68q0JzeX3vGGWdcXAAAAAF2633IAAAACAAAACAAAAAEAAAAMAAAACAAAAAMAD0JAAAAAAAAAAECI3qi7S0TN5ajJ3xMDI+Vy/DFSZvdpmIgqXNB/ggKFNQ5Y78GWkn1llttH67DASUMTc1zwU3S+dcRRWmg3tH8DAAAAAPqv0xc12VHG7zXAUvg8e8N+tw8rOyaqTifXA+V3ZqA7AAAAAAAAAAMAAAACAAAAAQAAAEhI/Voyg9ineoJIJ9nUGNZbGfKy1+vKtCc3l97xhlnXFwAAAABdut9yAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAAB9E/mAofkecxFf+H5XKAHyLaswFqKwQizQCxIg5U5hJIAAABACreeS0hRIfwpQAAiLu/7s/rrlSlXvSnReNsPXLg3NDjAybBYBlJDr4MjNbFsVO8nppi7v5kh/3k6SSfbZednBAAAAAGbfIv6GpxTEbgmAH+Q+3VqwEPtFCKjwpIIjCMaYgbmYAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAABkAAAAAgAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAAAAvrwgAAAAAAAAAABruS+TAAAAED0+72mPKRxFLrSWo4uo3wUfPbjhA/xtpg15NMlkiWvdJtELXeoSv24/g5EODIIH+By6DYYqsMy4rRJPdA5opQHAAAAAQAAAAEAAAABAAAAAPqv0xc12VHG7zXAUvg8e8N+tw8rOyaqTifXA+V3ZqA7AAAAAA=='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('cebb875a00ff6e1383aef0fd251a76f22c1f9ab2a2dffcb077855736ade2659a', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'I3Tpk0m57326ml2zM5t4/ajzR3exrzO6RorVwN+UbU0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTTUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEASEZiZbeFwCsrKBnKIus/05VtJDBrgosuhLQ/U6XUj4twWyhs7UtS4CMexOM6JqcfqJK10WlBkkwn4g8PIfjIG', 'FkpQZOumTyzbrbhWvzRISF/GJiR62j7TnN3w9pAhM7YAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTTUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'Ky6C26uwJLJ6DDFAynHYrJvHGDH59aO9aeyj2I+w7FwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6DUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('cebb875a00ff6e1383aef0fd251a76f22c1f9ab2a2dffcb077855736ade2659a', 3, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAL68IAAAAAAAAAAAa7kvkwAAABA9Pu9pjykcRS60lqOLqN8FHz244QP8baYNeTTJZIlr3SbRC13qEr9uP4ORDgyCB/gcug2GKrDMuK0ST3QOaKUBw==', 'zruHWgD/bhODrvD9JRp28iwfmrKi3/ywd4VXNq3iZZoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA+lbqAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA4n9kcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (3, 1, 'AAAAAQAAAAw=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (3, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/base-horizon.sql b/services/horizon/internal/test/scenarios/base-horizon.sql new file mode 100644 index 0000000000..c732c69bb4 --- /dev/null +++ b/services/horizon/internal/test/scenarios/base-horizon.sql @@ -0,0 +1,1527 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trust_lines_by_type_code_issuer; +DROP INDEX IF EXISTS public.trust_lines_by_issuer; +DROP INDEX IF EXISTS public.trust_lines_by_account_id; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.signers_by_account; +DROP INDEX IF EXISTS public.offers_by_selling_asset; +DROP INDEX IF EXISTS public.offers_by_seller; +DROP INDEX IF EXISTS public.offers_by_last_modified_ledger; +DROP INDEX IF EXISTS public.offers_by_buying_asset; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.exp_asset_stats_by_issuer; +DROP INDEX IF EXISTS public.exp_asset_stats_by_code; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +DROP INDEX IF EXISTS public.accounts_inflation_destination; +DROP INDEX IF EXISTS public.accounts_home_domain; +DROP INDEX IF EXISTS public.accounts_data_account_id_name; +ALTER TABLE IF EXISTS ONLY public.trust_lines DROP CONSTRAINT IF EXISTS trust_lines_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.exp_asset_stats DROP CONSTRAINT IF EXISTS exp_asset_stats_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts_signers DROP CONSTRAINT IF EXISTS accounts_signers_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts_data DROP CONSTRAINT IF EXISTS accounts_data_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.trust_lines; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.key_value_store; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP INDEX IF EXISTS public.htrd_agg_open_ledger_toid; +DROP INDEX IF EXISTS public.htrd_agg_bucket_lookup; +DROP TABLE IF EXISTS public.history_trades_60000; +DROP FUNCTION IF EXISTS public.to_millis(timestamp with time zone, numeric); +DROP FUNCTION IF EXISTS public.to_millis(timestamp without time zone, numeric); +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.exp_asset_stats; +DROP TABLE IF EXISTS public.asset_stats; +DROP TABLE IF EXISTS public.accounts_signers; +DROP TABLE IF EXISTS public.accounts_data; +DROP TABLE IF EXISTS public.accounts; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + account_id character varying(56) NOT NULL, + balance bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + sequence_number bigint NOT NULL, + num_subentries integer NOT NULL, + inflation_destination character varying(56) NOT NULL, + flags integer NOT NULL, + home_domain character varying(32) NOT NULL, + master_weight smallint NOT NULL, + threshold_low smallint NOT NULL, + threshold_medium smallint NOT NULL, + threshold_high smallint NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: accounts_data; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts_data ( + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + name character varying(64) NOT NULL, + value character varying(90) NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: accounts_signers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts_signers ( + account_id character varying(64) NOT NULL, + signer character varying(64) NOT NULL, + weight integer NOT NULL, + sponsor character varying(56) +); + +CREATE INDEX accounts_signers_by_sponsor ON accounts_signers USING BTREE(sponsor); + + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: exp_asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE exp_asset_stats ( + asset_type integer NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL, + amount text NOT NULL, + num_accounts integer NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- history_claimable_balances (manually added) +CREATE SEQUENCE history_claimable_balances_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_claimable_balances ( + id bigint NOT NULL DEFAULT nextval('history_claimable_balances_id_seq'::regclass), + claimable_balance_id text NOT NULL +); + +CREATE UNIQUE INDEX "index_history_claimable_balances_on_id" ON history_claimable_balances USING btree (id); +CREATE UNIQUE INDEX "index_history_claimable_balances_on_claimable_balance_id" ON history_claimable_balances USING btree (claimable_balance_id); + +CREATE TABLE history_operation_claimable_balances ( + history_operation_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_operation_claimable_balances_on_ids" ON history_operation_claimable_balances USING btree (history_operation_id , history_claimable_balance_id); +CREATE INDEX "index_history_operation_claimable_balances_on_operation_id" ON history_operation_claimable_balances USING btree (history_operation_id); + +CREATE TABLE history_transaction_claimable_balances ( + history_transaction_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_transaction_claimable_balances_on_ids" ON history_transaction_claimable_balances USING btree (history_transaction_id , history_claimable_balance_id); +CREATE INDEX "index_history_transaction_claimable_balances_on_transaction_id" ON history_transaction_claimable_balances USING btree (history_transaction_id); + + +INSERT INTO history_claimable_balances VALUES (1, '00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9'); +SELECT pg_catalog.setval('history_claimable_balances_id_seq', 1, true); +-- The operations/transactions are going to be unrelated to claimable balances, but it doesn't matter for testing +INSERT INTO history_operation_claimable_balances VALUES (12884905985, 1); +INSERT INTO history_operation_claimable_balances VALUES (8589938689, 1); +INSERT INTO history_transaction_claimable_balances VALUES (12884905984, 1); +INSERT INTO history_transaction_claimable_balances VALUES (8589938688, 1); + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer, + tx_set_operation_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount >= 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount >= 0)) +); + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades_60000 ( + timestamp bigint not null, + base_asset_id bigint not null, + counter_asset_id bigint not null, + count integer not null, + base_volume numeric not null, + counter_volume numeric not null, + avg numeric not null, + high_n numeric not null, + high_d numeric not null, + low_n numeric not null, + low_d numeric not null, + open_ledger_toid bigint not null, + open_n numeric not null, + open_d numeric not null, + close_ledger_toid bigint not null, + close_n numeric not null, + close_d numeric not null, + + PRIMARY KEY(base_asset_id, counter_asset_id, timestamp) +); + +CREATE OR REPLACE FUNCTION to_millis(t timestamp without time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN div(cast((extract(epoch from t) * 1000 ) as bigint), trun)*trun; + END; +$$ LANGUAGE plpgsql IMMUTABLE; + +CREATE OR REPLACE FUNCTION to_millis(t timestamp with time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN to_millis(t::timestamp, trun); + END; +$$ LANGUAGE plpgsql IMMUTABLE; + +CREATE INDEX htrd_agg_bucket_lookup ON history_trades + USING btree (to_millis(ledger_closed_at, '60000'::numeric)); + +CREATE INDEX htrd_agg_open_ledger_toid ON history_trades_60000 USING btree (open_ledger_toid); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged bigint, + inner_transaction_hash character varying(64), + fee_account character varying(64), + inner_signatures character varying(96)[], + new_max_fee bigint +); + +-- +-- Name: key_value_store; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + seller_id character varying(56) NOT NULL, + offer_id bigint NOT NULL, + selling_asset text NOT NULL, + buying_asset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: trust_lines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trust_lines ( + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + asset_type integer NOT NULL, + asset_issuer character varying(56) NOT NULL, + asset_code character varying(12) NOT NULL, + balance bigint NOT NULL, + trust_line_limit bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + flags integer NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts_data; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts_signers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: exp_asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-10-31 14:19:49.03833+01'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-10-31 14:19:49.04267+01'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-10-31 14:19:49.045926+01'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-10-31 14:19:49.054147+01'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-10-31 14:19:49.061804+01'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-10-31 14:19:49.067093+01'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-10-31 14:19:49.081047+01'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-10-31 14:19:49.085128+01'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-10-31 14:19:49.089574+01'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-10-31 14:19:49.092366+01'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-10-31 14:19:49.095671+01'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-10-31 14:19:49.099289+01'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-10-31 14:19:49.105961+01'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-10-31 14:19:49.111757+01'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-10-31 14:19:49.113736+01'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-10-31 14:19:49.115578+01'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-10-31 14:19:49.116928+01'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-10-31 14:19:49.118562+01'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-31 14:19:49.123835+01'); +INSERT INTO gorp_migrations VALUES ('19_offers.sql', '2019-10-31 14:19:49.133107+01'); +INSERT INTO gorp_migrations VALUES ('20_account_for_signer_index.sql', '2019-10-31 14:19:49.135499+01'); +INSERT INTO gorp_migrations VALUES ('21_trades_remove_zero_amount_constraints.sql', '2019-10-31 14:19:49.138031+01'); +INSERT INTO gorp_migrations VALUES ('22_trust_lines.sql', '2019-10-31 14:19:49.144708+01'); +INSERT INTO gorp_migrations VALUES ('23_exp_asset_stats.sql', '2019-10-31 14:19:49.15222+01'); +INSERT INTO gorp_migrations VALUES ('24_accounts.sql', '2019-10-31 14:19:49.160844+01'); +INSERT INTO gorp_migrations VALUES ('25_expingest_rename_columns.sql', '2019-10-31 14:19:49.163717+01'); +INSERT INTO gorp_migrations VALUES ('33_remove_unused.sql', '2019-11-30 10:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('34_fee_bump_transactions.sql', '2019-11-30 11:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('35_drop_participant_id.sql', '2019-11-30 14:19:49.163728+01'); +INSERT INTO gorp_migrations VALUES ('37_add_tx_set_operation_count_to_ledgers.sql', '2019-11-30 12:19:49.163728+01'); +INSERT INTO gorp_migrations VALUES ('41_add_sponsor_to_state_tables.sql', '2019-11-30 13:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('45_add_claimable_balances_history.sql', '2019-11-30 14:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('46_add_muxed_accounts.sql', '2019-12-30 14:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('47_precompute_trade_aggregations.sql', '2019-12-30 14:19:49.163719+01'); +INSERT INTO gorp_migrations VALUES ('48_rebuild_trade_aggregations.sql', '2021-12-02 01:33:33.428419+00'); +INSERT INTO gorp_migrations VALUES ('49_add_brin_index_trade_aggregations.sql', '2021-12-02 01:33:33.43274+00'); +INSERT INTO gorp_migrations VALUES ('50_liquidity_pools.sql', '2021-12-02 01:33:33.471893+00'); +INSERT INTO gorp_migrations VALUES ('51_remove_ht_unused_indexes.sql', '2021-12-02 01:33:33.47903+00'); + + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_accounts VALUES (3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_accounts VALUES (4, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 4, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, false); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (2, 12884905985, 1, 2, '{"amount": "5.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 2, 3, '{"amount": "5.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589938689, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589938689, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589942785, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589946881, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 3, 10, '{"weight": 1, "public_key": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (3, '55a91b5668c4ea95bc9f0f044abf2c30c386add87730ebe564bd55d09a6df71f', '9b7c8bfa1a9c5311b826007f90fb756ac043ed1422a3c292088c231a6206e660', 1, 1, '2019-10-31 13:19:46', '2019-10-31 13:19:49.394864', '2019-10-31 13:19:49.394864', 12884901888, 16, 1000000000000000000, 400, 100, 100000000, 1000000, 12, 'AAAADJt8i/oanFMRuCYAf5D7dWrAQ+0UIqPCkgiMIxpiBuZgSP1aMoPYp3qCSCfZ1BjWWxnystfryrQnN5fe8YZZ1xcAAAAAXbrfcgAAAAIAAAAIAAAAAQAAAAwAAAAIAAAAAwAPQkAAAAAAFMKJva6QmOlDLtejYbhpYI7SUKOfeJbIdkqj9wO1Atr/f8MEboIicw4CBAy16tmtWGFbHHuawE7hX8IE3FzXigAAAAMN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (2, '9b7c8bfa1a9c5311b826007f90fb756ac043ed1422a3c292088c231a6206e660', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 3, 3, '2019-10-31 13:19:45', '2019-10-31 13:19:49.409603', '2019-10-31 13:19:49.409603', 8589934592, 16, 1000000000000000000, 300, 100, 100000000, 100, 0, 'AAAAAGPZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZlmEdOpVCM5HLr9FNj55qa6w2HKMtqTPFLvG8yPU/aAoAAAAAXbrfcQAAAAAAAAAARUAVxJm1lDMwwqujKcyQzs97F/AETiCgQPrw63wqaPGOtj0VqejCRGn8A4KwJni7nqeau/0Ehh/Gk8yEDm7nHgAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-10-31 13:19:49.421622', '2019-10-31 13:19:49.421622', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (2, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (3, 8589938689, 3); +INSERT INTO history_operation_participants VALUES (4, 8589938689, 1); +INSERT INTO history_operation_participants VALUES (5, 8589942785, 3); +INSERT INTO history_operation_participants VALUES (6, 8589942785, 4); +INSERT INTO history_operation_participants VALUES (7, 8589946881, 3); +INSERT INTO history_operation_participants VALUES (8, 8589946881, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 8, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "5.0000000", "asset_type": "native"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (2, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (3, 8589938688, 3); +INSERT INTO history_transaction_participants VALUES (4, 8589938688, 1); +INSERT INTO history_transaction_participants VALUES (5, 8589942784, 3); +INSERT INTO history_transaction_participants VALUES (6, 8589942784, 4); +INSERT INTO history_transaction_participants VALUES (7, 8589946880, 3); +INSERT INTO history_transaction_participants VALUES (8, 8589946880, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 8, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('cebb875a00ff6e1383aef0fd251a76f22c1f9ab2a2dffcb077855736ade2659a', 3, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-10-31 13:19:49.395016', '2019-10-31 13:19:49.395016', 12884905984, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAL68IAAAAAAAAAAAa7kvkwAAABA9Pu9pjykcRS60lqOLqN8FHz244QP8baYNeTTJZIlr3SbRC13qEr9uP4ORDgyCB/gcug2GKrDMuK0ST3QOaKUBw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA+lbqAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA4n9kcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{9Pu9pjykcRS60lqOLqN8FHz244QP8baYNeTTJZIlr3SbRC13qEr9uP4ORDgyCB/gcug2GKrDMuK0ST3QOaKUBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-10-31 13:19:49.409714', '2019-10-31 13:19:49.409714', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTTUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{g86r5EAUKDQCYnz0Vw6C4b7cnE95RTwkOdYJHbBR2gTVsNOUv1YVtF4JK9AgTxODWhVdipnLN2cC5om+E0azCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-10-31 13:19:49.409839', '2019-10-31 13:19:49.409839', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEASEZiZbeFwCsrKBnKIus/05VtJDBrgosuhLQ/U6XUj4twWyhs7UtS4CMexOM6JqcfqJK10WlBkkwn4g8PIfjIG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTTUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{EhGYmW3hcArKygZyiLrP9OVbSQwa4KLLoS0P1Ol1I+LcFsobO1LUuAjHsTjOianH6iStdFpQZJMJ+IPDyH4yBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-10-31 13:19:49.409953', '2019-10-31 13:19:49.409953', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6DUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ybpdbSxixeFccLdHQwlRH+rW+ZeG8jKxoAqfpVWTJsXLeeIjX9psbY8ejHeWSeBOA5XLH+OhAIQRklD7J/H2Aw==}', 'none', NULL, NULL, true, 100); + + +-- +-- Data for Name: key_value_store; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO key_value_store VALUES ('exp_ingest_last_ledger', '0'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: trust_lines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: accounts_data accounts_data_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts_data + ADD CONSTRAINT accounts_data_pkey PRIMARY KEY (ledger_key); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (account_id); + + +-- +-- Name: accounts_signers accounts_signers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts_signers + ADD CONSTRAINT accounts_signers_pkey PRIMARY KEY (signer, account_id); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: exp_asset_stats exp_asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY exp_asset_stats + ADD CONSTRAINT exp_asset_stats_pkey PRIMARY KEY (asset_code, asset_issuer, asset_type); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: key_value_store key_value_store_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offer_id); + + +-- +-- Name: trust_lines trust_lines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trust_lines + ADD CONSTRAINT trust_lines_pkey PRIMARY KEY (ledger_key); + + +-- +-- Name: accounts_data_account_id_name; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX accounts_data_account_id_name ON accounts_data USING btree (account_id, name); + + +-- +-- Name: accounts_home_domain; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accounts_home_domain ON accounts USING btree (home_domain); + + +-- +-- Name: accounts_inflation_destination; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accounts_inflation_destination ON accounts USING btree (inflation_destination); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_fee_account; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_hash ON public.history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_inner_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_inner_hash ON history_transactions USING btree (inner_transaction_hash) WHERE inner_transaction_hash IS NOT NULL; + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: exp_asset_stats_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX exp_asset_stats_by_code ON exp_asset_stats USING btree (asset_code); + + +-- +-- Name: exp_asset_stats_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX exp_asset_stats_by_issuer ON exp_asset_stats USING btree (asset_issuer); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: offers_by_buying_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_buying_asset ON offers USING btree (buying_asset); + + +-- +-- Name: offers_by_last_modified_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_last_modified_ledger ON offers USING btree (last_modified_ledger); + + +-- +-- Name: offers_by_seller; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_seller ON offers USING btree (seller_id); + + +-- +-- Name: offers_by_selling_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_selling_asset ON offers USING btree (selling_asset); + + +-- +-- Name: signers_by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX signers_by_account ON accounts_signers USING btree (account_id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: trust_lines_by_account_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_account_id ON trust_lines USING btree (account_id); + + +-- +-- Name: trust_lines_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_issuer ON trust_lines USING btree (asset_issuer); + + +-- +-- Name: trust_lines_by_type_code_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_type_code_issuer ON trust_lines USING btree (asset_type, asset_code, asset_issuer); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + +-- +-- The following DDL is basically any manipulations that happen to the schema since migration 47. If you need to include +-- an update of schema for this scenario, append the 'up' portion from newest migration at end here, otherwise, tests +-- will run into potential errors when then db migration up/downs are run automatically by the tests. +-- +ALTER TABLE accounts ADD sponsor TEXT; +CREATE INDEX accounts_by_sponsor ON accounts USING BTREE(sponsor); + +ALTER TABLE accounts_data ADD sponsor TEXT; +CREATE INDEX accounts_data_by_sponsor ON accounts_data USING BTREE(sponsor); + +ALTER TABLE trust_lines ADD sponsor TEXT; +CREATE INDEX trust_lines_by_sponsor ON trust_lines USING BTREE(sponsor); + +ALTER TABLE offers ADD sponsor TEXT; +CREATE INDEX offers_by_sponsor ON offers USING BTREE(sponsor); + +ALTER TABLE history_operation_participants + DROP COLUMN id; + +ALTER TABLE history_transaction_participants + DROP COLUMN id; + +DROP TABLE asset_stats cascade; + +DROP INDEX exp_asset_stats_by_code; + +DROP INDEX index_history_transactions_on_id; + +DROP INDEX index_history_ledgers_on_id; + +DROP INDEX asset_by_code; + +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + + +-- mgiration 49 +CREATE INDEX IF NOT EXISTS htrd_agg_timestamp_brin ON history_trades_60000 USING brin(timestamp); + +-- mgiration 50 +CREATE TABLE liquidity_pools ( + id text NOT NULL, -- hex-encoded PoolID + type smallint NOT NULL, + fee integer NOT NULL, + trustline_count bigint NOT NULL CHECK (trustline_count > 0), + share_count bigint NOT NULL DEFAULT 0 CHECK(share_count >= 0), + asset_reserves jsonb NOT NULL, + last_modified_ledger integer NOT NULL, + deleted boolean NOT NULL DEFAULT false, + PRIMARY KEY (id) +); + +CREATE INDEX liquidity_pools_by_asset_reserves ON liquidity_pools USING gin(asset_reserves jsonb_path_ops); +CREATE INDEX live_liquidity_pools ON liquidity_pools USING BTREE (deleted, last_modified_ledger); + +CREATE SEQUENCE history_liquidity_pools_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_liquidity_pools ( + id bigint NOT NULL DEFAULT nextval('history_liquidity_pools_id_seq'::regclass), + liquidity_pool_id text NOT NULL +); + +CREATE UNIQUE INDEX index_history_liquidity_pools_on_id ON history_liquidity_pools USING btree (id); +CREATE UNIQUE INDEX index_history_liquidity_pools_on_liquidity_pool_id ON history_liquidity_pools USING btree (liquidity_pool_id); + +CREATE TABLE history_operation_liquidity_pools ( + history_operation_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_operation_liquidity_pools_on_ids ON history_operation_liquidity_pools USING btree (history_operation_id , history_liquidity_pool_id); +CREATE INDEX index_history_operation_liquidity_pools_on_operation_id ON history_operation_liquidity_pools USING btree (history_operation_id); + +CREATE TABLE history_transaction_liquidity_pools ( + history_transaction_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_transaction_liquidity_pools_on_ids ON history_transaction_liquidity_pools USING btree (history_transaction_id , history_liquidity_pool_id); +CREATE INDEX index_history_transaction_liquidity_pools_on_transaction_id ON history_transaction_liquidity_pools USING btree (history_transaction_id); + +ALTER TABLE trust_lines ADD liquidity_pool_id text; +CREATE INDEX trust_lines_by_liquidity_pool_id ON trust_lines USING BTREE(liquidity_pool_id); + +DROP INDEX htrd_by_offer; +DROP INDEX htrd_counter_lookup; + +ALTER TABLE history_trades DROP offer_id, + ALTER base_account_id DROP NOT NULL, + ALTER counter_account_id DROP NOT NULL, + ADD base_liquidity_pool_id bigint, + ADD counter_liquidity_pool_id bigint, + ADD liquidity_pool_fee int; + +CREATE INDEX htrd_by_base_liquidity_pool_id ON history_trades USING BTREE(base_liquidity_pool_id); +CREATE INDEX htrd_by_counter_liquidity_pool_id ON history_trades USING BTREE(counter_liquidity_pool_id); + +-- mgiration 51 +DROP INDEX IF EXISTS by_account; +DROP INDEX IF EXISTS by_fee_account; + + +-- +-- PostgreSQL database dump complete +-- diff --git a/services/horizon/internal/test/scenarios/bindata.go b/services/horizon/internal/test/scenarios/bindata.go new file mode 100644 index 0000000000..da0741247e --- /dev/null +++ b/services/horizon/internal/test/scenarios/bindata.go @@ -0,0 +1,754 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// account_merge-core.sql (26.849kB) +// account_merge-horizon.sql (36.651kB) +// base-core.sql (29.682kB) +// base-horizon.sql (58.055kB) +// failed_transactions-core.sql (38.723kB) +// failed_transactions-horizon.sql (54.917kB) +// ingest_asset_stats-core.sql (61.38kB) +// ingest_asset_stats-horizon.sql (87.473kB) +// kahuna-core.sql (232.639kB) +// kahuna-horizon.sql (319.152kB) +// offer_ids-core.sql (61.677kB) +// offer_ids-horizon.sql (85.572kB) +// operation_fee_stats_1-core.sql (48.276kB) +// operation_fee_stats_1-horizon.sql (65.624kB) +// operation_fee_stats_2-core.sql (26.671kB) +// operation_fee_stats_2-horizon.sql (32.011kB) +// operation_fee_stats_3-core.sql (45.051kB) +// operation_fee_stats_3-horizon.sql (58.503kB) +// pathed_payment-core.sql (52.308kB) +// pathed_payment-horizon.sql (76.327kB) +// paths_strict_send-core.sql (70.821kB) +// paths_strict_send-horizon.sql (92.968kB) + +package scenarios + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _account_mergeCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x69\x93\xa2\x4a\xf3\xef\xfb\xf9\x14\xc4\xbc\xe9\x99\xb0\xe7\xc8\xbe\xcc\xdc\x79\x22\x70\x57\x14\xf7\xf5\xc6\x8d\x8e\x02\x0a\x44\x11\x90\xc5\xed\xc6\xf3\xdd\x6f\x00\x2e\xa0\x68\xdb\xda\xe7\x39\xcf\x8d\xff\xf8\x62\x4e\x1f\x49\x7f\xf5\xab\xac\xcc\xac\xac\x04\xd3\x1f\x3f\xbe\xfc\xf8\x81\xb4\x2c\xd7\xd3\x1c\xd8\x6d\xd7\x11\x05\x78\x40\x02\x2e\x44\x14\x7f\x61\x7f\xf9\xf1\xe3\x4b\x70\xbd\xe0\x2f\x6c\xa8\x20\xaa\x63\x2d\x4e\x02\x2b\xe8\xb8\xba\x65\x22\xdc\x5f\xf4\x5f\x58\x4c\x4a\xda\x22\xb6\xf6\x16\x7c\xfc\x4c\xe4\x4b\xb7\xd8\x43\x5c\x0f\x78\x70\x01\x4d\xef\xcd\xd3\x17\xd0\xf2\x3d\xe4\x37\x82\xfe\x0a\x2f\x19\x96\x3c\xbf\x7c\x57\x57\x0c\xf8\xa6\x9b\x6f\x9e\x03\x4c\x17\xc8\x9e\x6e\x99\x6f\x2e\x74\x03\xdc\x4b\x61\xd9\xd0\x03\x68\x68\xca\x96\xa2\x9b\x1a\xf2\x1b\x79\xe9\xf7\x4a\xec\xcb\xaf\xc3\xd8\xa6\x02\x1c\xe5\x4d\xb6\x4c\xd5\x72\x16\xba\xa9\xbd\xb9\x9e\xa3\x9b\x9a\x8b\xfc\x46\x2c\x73\x8f\x31\x85\xf2\xfc\x4d\xf5\xcd\x68\x2c\xc9\x52\x74\x18\x5c\x57\x81\xe1\xc2\xc4\x30\x0b\xdd\x7c\x5b\x40\xd7\x05\x5a\x28\xb0\x06\x8e\xa9\x9b\x5a\x24\xe2\x58\xeb\x37\x17\xca\xbe\xa3\x7b\xdb\x00\x5c\x55\x7f\xed\x15\x00\x81\x23\x4f\xdf\x6c\xe0\x4d\x91\xdf\x88\xed\x4b\x86\x2e\xbf\x06\x1a\x93\x81\x07\x0c\x4b\xfb\xf5\xe5\x4b\xa1\xd3\x6c\x21\x55\xb1\x50\x1c\x21\xd5\x12\x52\x1c\x55\xbb\xbd\xee\x5e\xf2\x2f\xdf\xd6\x1c\xa0\xc0\xa9\xee\x7a\xd2\xd6\x85\xcb\x5f\x37\xa5\x5d\xd9\x5e\xfa\x96\xe3\x2f\xdc\xfb\x84\xa1\xb9\xba\x47\xd2\x80\x8a\x06\x9d\x7b\x24\x03\x9e\x2a\x84\x77\x4a\xde\x21\x26\x41\xd7\xb3\x54\x15\x3a\xba\xa9\xc0\xcd\x6d\x59\x20\xcb\x96\x6f\x7a\x12\x30\x80\x29\x43\xf7\xd7\x17\xbe\xde\x2b\x76\x90\x1e\x9f\xab\x17\x63\xd2\x4d\xb1\x3e\x4e\x51\xaf\xe5\x6c\x91\x10\x3d\xdf\x14\xbb\xbd\x0e\x5f\x15\x7b\xb1\x0f\x25\x05\xdf\xec\x39\xdc\xde\x83\xef\x6d\xde\x87\x3e\xca\x7c\x00\x55\x85\x77\x70\x8e\x8b\xdd\x8f\xed\xf8\xae\x67\xe8\x26\x74\x6f\x21\x1f\x85\xee\xc6\x0d\x58\xc0\x30\x1a\xdc\xc0\x3d\x09\xdd\x8f\x7b\x34\xf9\x5b\xb8\x47\xa1\xbb\x71\x23\x79\xdd\x54\xad\x1b\xb8\x27\xa1\xbb\x71\x6d\x5f\x72\x7d\xe9\x06\x66\x24\xf0\x11\x3c\x43\x77\xa7\x4b\x1f\xfa\xb7\x34\x1b\x17\xbb\x1f\x1b\x42\xe7\x96\x5a\xc3\xeb\x77\xa3\x85\x6e\x7c\x0b\x2e\x12\xb8\x1b\x2f\x8a\x4a\x53\x08\x94\xdb\xb0\x09\xb9\xbf\x19\x7d\x1f\x29\xe1\xf2\xed\xce\x61\x24\x60\xde\x00\x97\x80\x79\x37\xe1\x7d\xf4\xbb\xc5\xf5\x20\xf2\x51\xcc\x20\x07\x78\x1f\x36\x90\xda\x23\x87\xb2\xe7\xc0\xa9\x21\xf7\xb6\xec\x31\x34\xbe\x27\x76\x0a\x74\xef\x48\x1e\x03\xd7\x6d\xb9\x53\x20\x7a\x47\xee\x18\x58\xde\x95\xbb\x8b\xdf\x29\xa0\xdc\x96\x8b\x82\xc4\xbb\x32\x47\x97\x7f\x47\x32\xf0\xe3\xdb\x22\x91\x6f\xde\x96\x49\xb8\xc2\x6d\x51\x09\x98\xb7\x05\x0e\xa6\x7a\x97\x54\x60\x79\x7b\xc1\xe2\xa8\x57\x14\xbb\xd5\xa6\x18\x17\x36\x6c\xcd\x5d\x1a\x7b\x89\x6e\xbe\x52\x6c\xf0\x17\x58\xbf\xbe\x44\xb9\xb1\x08\x16\xf0\xe7\xe1\x3d\xa4\xb7\xb5\xe1\xcf\xfd\x47\x7e\x21\x5d\x79\x0a\x17\xe0\x27\xf2\xe3\x17\xd2\x5c\x9b\xd0\xf9\x89\xfc\x08\x53\xe6\x7c\xa7\xc8\xf7\x8a\x07\xe4\x03\xde\x97\x04\x62\xf2\xe2\x1e\x38\xdf\x6c\x34\x8a\x62\xef\x06\x72\x24\x80\x34\xc5\x24\x00\x52\xed\x22\x2f\x87\xfc\xf6\xf0\x9e\x1b\x82\xbc\x9c\x8f\x7c\x98\xfe\x7e\xcc\xa3\x86\xde\x9d\x4f\x42\x97\x62\xb3\x77\xa6\x4f\x64\x58\xed\x55\x8e\xb4\xe2\x09\x6d\x62\xf8\x13\xca\x19\x91\x8f\x4c\xfe\x02\x24\x54\x40\xab\x9e\xb5\xb5\xe0\x14\x63\x3b\x96\x0c\x15\xdf\x01\x06\x62\x00\x53\xf3\x81\x06\x43\x35\xdc\x99\x80\x07\x62\x0a\x54\x81\x6f\x78\x6f\x1e\x90\x0c\xe8\xda\x40\x86\xc1\x69\xe2\xe5\xec\xea\x5a\xf7\xa6\x6f\x96\xae\xc4\x0e\x08\x89\xc9\xc6\x0d\x72\x3f\xcd\xd0\x74\x4f\x93\x3c\x18\x40\x9a\xc2\x23\x2b\x8f\x07\xdd\x6f\x5f\x10\x04\x39\xbc\xa3\x2b\x88\x3c\x05\x0e\x90\x3d\xe8\x20\x2b\xe0\x6c\x75\x53\xfb\x46\xd1\xdf\xc3\xb5\x11\xfb\xf5\xfa\x6b\x28\x1d\x7c\xd0\x04\x0b\x98\x22\xcc\xb2\x69\xc2\x2b\x60\xf8\x69\xd2\x18\x86\x9f\x8b\x1b\xc0\xf5\x16\x96\xa2\xab\x3a\x54\x10\xdd\xf4\xa0\x06\x9d\xa3\xc8\x97\xef\xe7\x6b\x7f\xf4\xe2\x27\x75\xe1\x3e\xa4\x88\xfd\x41\x00\x91\x74\x4d\x37\xbd\xb3\x8b\x2e\x5c\x9a\xfe\x22\xfd\x9a\xe9\x2f\x5c\x5f\x82\xa6\xe7\x04\x47\xc1\xf3\x69\x46\x32\xba\xa9\x1a\x20\x38\x31\x2a\xd0\xf5\xd2\xe9\x44\x82\x53\x6b\x01\x15\x6b\x01\x74\x33\x45\x8a\x24\xcf\x49\x7b\x53\x07\xba\x53\xcb\x50\x5c\xc4\x83\x9b\x73\x66\xaa\x01\xb4\x6b\x8c\x6e\xae\xcd\x5e\x23\x7e\x30\xaa\xa1\x03\x49\x37\x74\x2f\x98\x5c\x34\xff\x83\x4a\x0c\xe3\xd6\x65\x5d\x33\x83\x5c\x28\xa0\x15\xbd\x13\xcb\x06\x8e\xa9\xc5\x5e\xe9\x6f\xe1\xb1\x1a\xc9\x57\x8a\x79\x01\xf9\xf6\xed\xb0\x14\xff\xfa\x8d\xa0\xdf\xbf\xdf\xf8\xf4\x39\xc1\x73\x9c\x8b\x09\xbc\x87\x98\x58\xcb\x33\xb4\xe4\x3a\xbf\x87\x74\xa9\x9e\x33\xb8\x14\xfd\x45\x98\x97\x8e\x11\xec\x7f\x8f\xfa\x44\x90\x32\x46\xee\x60\x5a\x0a\x8c\xfb\x42\xc2\x07\x2e\x07\x4d\xee\xcf\x8f\x0e\x9f\x4c\x8c\x23\x22\xfb\xf7\x80\x3b\x8d\x91\xa1\x2f\x6c\xdb\x76\xe0\xea\x5d\x21\xc9\x97\xe7\xd0\x33\x74\xd7\x7b\x57\xf4\x98\x6d\x1f\xcc\x3d\x7a\x5b\x36\x2c\x17\x7a\xfa\xe2\x8a\xe7\x87\x81\x35\xc5\xb7\x62\x6b\x9e\x4c\xea\x8f\x78\x67\xeb\x7d\x1a\xe7\x8a\xe9\x5c\x3b\x1b\x24\x61\x4e\xb3\xb8\x66\x2d\xfb\xe4\xeb\xd1\x15\xdb\x1f\xbc\xbe\x1d\x9d\x1c\x3a\x77\x46\xd0\xa8\xf2\xa2\x5c\x8b\xa0\xa1\xb9\x03\xd7\x85\x5e\x9a\x3e\x23\x5f\xbd\x7a\x19\x2c\x02\xb7\x4a\x87\xb6\x1d\x5d\x86\xe6\x95\x20\x16\x5e\xbc\x16\xe1\xc2\x8b\x88\x62\xf9\x92\x01\x03\x7b\x93\xf5\xb0\x22\xf9\xa9\x51\x34\xb6\xc2\xfb\x23\x6b\x34\x97\xb3\x75\xdd\x4f\xf0\x8a\x6d\xec\x3f\xb9\xd7\xf0\xd9\x47\x0f\x7a\xbf\x66\x10\x51\xc2\xfe\xa8\x3d\x44\xc7\xfa\xc8\x1c\x74\x3b\x6d\xe3\xa7\x2e\x3c\xd7\x72\xbc\xa3\x36\x0a\xc5\x12\xdf\xaf\xf7\x10\xf4\x7c\xdb\x84\x1b\x0f\x78\x1e\x5c\xd8\x1e\x12\xb8\x85\xeb\x81\x85\x8d\x04\x29\x93\xe5\x47\xef\x20\x3b\xcb\x84\x97\x9b\xad\x0a\x74\xc3\x77\x62\x5b\xed\xb5\x11\xbc\xad\x0d\xdf\x5f\x94\xa8\x2c\x11\xc3\xbd\x0c\xfb\xc7\x11\xaf\xac\xce\xbe\xb2\x61\x39\xe7\x8b\xfa\x2d\xd4\xc4\xbf\x10\xf4\x3b\xc2\x8b\x05\x24\xfa\xdf\xff\xf5\x1b\xa1\x29\x8a\xa0\xbe\xa7\xae\x55\xfc\x18\xf6\xf0\x92\xc5\xab\x3c\xf1\x98\x7b\x45\x1b\x51\xa1\x2d\xf0\xba\x54\x42\xc1\xd9\xf1\x09\x2a\xae\x2f\xed\x49\x38\xd0\x4d\x6c\x40\x44\x6a\xc6\xe8\x40\x70\xf4\xa5\x4b\x3e\xb1\x33\xef\xa3\x9c\x62\xc5\xba\x3b\x76\xc6\x88\xd8\xd2\x85\xb7\x76\x98\x4b\x9e\xb1\x33\xfc\xa3\x3c\x4f\x10\xf7\xf3\xbc\xd8\xe4\xce\xae\x43\x73\x05\x0d\xcb\x86\xef\x6c\x69\xa7\xa1\x9f\xd8\x88\x62\xe5\x8e\x27\x54\x70\xa8\xd7\x7e\xbb\x67\x1d\x4e\x56\xf4\x9e\x22\x96\x57\x36\x9a\xa4\x12\x0e\x75\xe0\x04\xe2\xb9\x22\x12\xa3\x5d\x55\xc6\xa9\x46\xf4\xb0\x32\x4e\x45\xf1\x6f\x27\xbf\x4d\x1e\xde\x52\x7c\xea\x96\x77\xc7\x2a\x5c\x8f\xb2\x8a\xdd\x02\x78\xe4\xd8\x15\xee\xf8\x37\x22\xb5\xee\xba\x3e\x74\xee\x87\x92\x2d\x25\xf5\x74\x7a\xa1\x16\xcf\xd0\x17\xfa\x95\x8c\xe2\xe6\x59\xf0\x9f\x3c\x55\xc5\xac\x33\x76\x57\xe5\xa1\x53\x54\xfc\xf3\x9f\x75\x8e\x8a\x61\x3e\x7e\xfe\xb9\x85\x1a\x2d\xda\x19\xd2\x7e\x25\xff\x95\xee\x78\x89\x72\xef\xc3\x46\x1e\xbf\x87\x16\x99\xb9\xb7\x49\x84\xe2\x3b\xce\x1b\xe7\x06\xb8\x09\xef\x52\x5e\xbd\x2a\x4f\x81\xa9\xc1\xd4\x83\x7d\x5c\x39\xf1\xdb\x76\x8f\xc7\xea\x53\xed\xfc\x71\x15\xfd\x87\xf5\x23\x59\xca\x36\x4d\x39\xde\xc6\x81\xae\x6f\xa4\x46\x77\x6f\xb3\x80\xef\x9e\xe7\x4e\xb7\x58\x1f\xd7\xe7\xd9\x7d\x8b\x47\x95\x7a\x76\xc7\xf9\xdb\x5d\x8a\xdb\x7f\xe8\x96\xf6\xf6\x22\x69\x8a\xb8\xcf\xec\xce\xee\x70\x3f\xa2\xa8\x42\x70\xb2\x56\x2d\xe7\x9d\x62\x28\x52\xe0\x7b\xfc\x3b\x3a\xbb\x0d\xe9\x7e\x18\xaf\x2a\x76\x8b\x9d\x1e\x52\x15\x7b\xcd\x53\x51\x71\xc0\xd7\xfb\xc5\x2e\xf2\xed\xa5\x9c\xeb\xb4\xc6\x95\x6a\x1d\xcf\x57\x89\x92\xd8\x26\x73\xa3\x7a\xa9\x21\x16\xea\xa5\x5a\x5f\x6c\xf5\xf1\xca\x98\x98\x34\x4a\xdd\x4a\x53\xec\xe7\x8b\x4d\xbe\x3b\x64\xda\x79\xa6\x39\xc2\x2b\x2f\xaf\x08\x17\xbd\x98\xfd\x7f\x59\x14\x7d\x45\xf0\x57\x04\x7d\x8d\xb4\x8c\xbc\xbc\xbc\x22\x2f\x7c\x9b\xe7\x79\xfe\xf7\xef\x97\xf0\x02\x7e\xb8\x76\xfa\xf7\xfb\xaf\xf7\x18\xf2\xd4\x30\xd7\x1a\xf3\xd4\x98\x1c\xf2\xc5\xca\x68\xd8\xc1\xfb\x42\x13\xef\x37\xc9\x5c\xbf\x5c\xe9\xb7\x19\xb2\xd8\x6f\x09\x4d\x11\x6f\x57\x06\xe4\xb0\x53\x69\x56\x3b\xa2\x20\x54\xf0\x97\x57\x04\xdb\x53\xe3\x02\x6a\x2c\xc5\x72\x1c\x41\x52\xdc\x3b\x1c\x89\x54\x8e\x57\x56\xe5\xbc\x9a\xf5\xc4\x02\x5f\xaf\x51\x7d\x74\x95\x93\x75\xaa\xa3\x22\x69\x42\xe1\x58\x95\x22\x68\x08\x69\x56\xc1\x24\x9c\x91\x28\x89\xe5\x54\x9c\x00\x2a\x45\x60\x98\xc4\x50\x34\x07\x70\x52\x05\x2a\x46\xa2\x04\x50\x50\x89\xc2\x25\x9a\x20\x24\x94\x91\x20\xc7\x05\xaa\x42\x9f\x7c\x05\x18\x14\x83\x03\x1c\x12\xb8\xaa\xe2\x24\x0b\x50\x46\x42\x21\x83\xaa\x0a\xa6\xd2\x0a\x81\xb1\x32\xa6\x02\x59\xc1\x51\x89\x96\x65\x94\x95\x09\x42\xa1\x18\x86\xc2\x29\x8e\xa5\x59\x0c\xa7\x00\x46\x07\x0b\x1b\xae\xd4\x0b\xff\x5f\xfb\xca\x8d\x04\x9d\xdc\x66\xb7\x5d\x21\xc7\x14\xcc\x02\x57\xc1\xd1\xcd\x2c\x97\x71\x51\xcd\x73\xd7\xd5\xf5\x0e\x1b\x29\xdd\xe1\x18\xe4\x6a\xa0\xa4\x05\xf2\x45\x91\xac\x83\x9d\x8d\xb7\xdf\x45\x9e\xf0\x23\x8c\x0c\xc5\x72\xf3\xff\xc0\x44\x3e\xf5\xf5\x72\xe6\xec\x57\x0c\x15\x65\x64\x40\x42\x59\x0a\xcc\x43\xc5\x55\x82\xe6\x64\x1c\x10\x80\xe5\x18\x82\x86\x24\x0d\x01\x4e\xa2\x04\xa5\xa8\xa4\x02\x25\x4c\xe5\x48\x4e\x91\x09\x8c\x50\x38\x4a\xa5\x01\x23\x53\x72\x60\x64\x9f\x61\xec\xa4\xa2\x70\x04\x21\xd1\xb8\x4c\xa1\x84\x42\x42\x06\x03\xaa\x44\xe1\x94\x4a\x50\x14\x43\xc8\xb4\x84\x92\x38\xab\xb0\x2a\x01\x59\xc0\xb0\x32\x4b\x72\x04\x8b\x72\x14\xa0\x14\x9a\x61\x58\x49\x7d\x09\x03\x1e\x46\x51\x1c\xc5\x70\x0c\x7e\xb0\xd8\x3c\xde\x9a\xcc\x30\xd1\xa7\x2c\x54\xaa\x31\x43\xd2\xdc\x36\x57\xfd\x4d\x99\x18\xd8\xd6\x3c\xb3\x2a\xf1\x4d\x2f\x8f\x09\x78\x83\xc9\x31\xf4\xa4\x0f\x4b\xc3\x29\x91\xa9\x8f\x89\x71\xaf\x32\x9f\x4a\xb4\x97\x19\xe9\xf3\x1e\xc9\xf2\xc2\xa0\xef\x4c\x33\x55\xd1\x20\x1a\x63\x4e\x14\xbd\x7e\xa8\xe1\x51\x6b\xd0\xe8\x86\x7f\x55\x8f\xff\x44\xe1\xcd\x3d\xfd\xff\x9a\x6f\xb5\xf7\xb6\xb3\x1e\x8a\x13\xb5\x4a\x0d\xb7\xa5\xe1\x06\x5f\x30\x3d\x4b\x6c\xe7\xa7\xe3\x09\xb5\x5b\x96\x9c\xb5\xa5\xe1\x33\x74\x3e\x5a\xb6\xc5\x3a\xef\x78\x22\xde\x6b\xe2\xf5\x12\xcf\xf5\xcc\xf2\xca\xeb\x8e\x76\x83\x51\xab\xec\x16\x05\x71\xb6\xa3\x05\xd8\x98\xd6\x9a\xbc\x01\x46\x43\x85\x5c\xad\xc3\xa1\x52\x2c\xba\x50\x4d\xb3\x8a\x83\x45\x17\xd0\xda\xdf\x67\x7a\x7f\xd3\xeb\x4e\x8b\xc6\x15\x96\x92\x49\x92\x26\x24\x1c\xd0\x1c\x8e\x33\x90\x51\x18\x02\x63\x54\x95\xa2\x70\x46\x82\xb4\x82\x11\x14\xc3\x52\x90\x54\x51\x09\xa8\x0c\x4d\x31\x1c\x24\x55\x5c\x55\x14\x02\x93\x00\x15\x86\xde\x4f\xf0\x0a\x12\x32\x38\x40\x71\x9a\xe0\x30\x80\xb2\x1c\x2b\x2b\x2c\x47\x93\x18\xa4\x01\xe0\x48\x52\xe2\x08\x9a\x54\x71\x42\x21\x81\x22\x41\x96\x22\x14\x55\x66\x70\x86\x60\x25\x4a\x56\x18\x00\x95\xc0\xa2\x89\x98\x45\x63\x07\x8b\x5e\xab\x42\xcf\x75\xe7\xfa\xaa\xbe\x03\xb2\x30\x5b\xd6\x64\x9c\x2a\xd3\x7a\xbb\x30\x52\x7b\xd0\x55\x8d\x1a\x51\x28\x72\x86\x0a\xcc\x8d\x2c\x51\x3c\x41\x2e\x57\x15\x36\x53\xde\xae\xfc\x9c\x62\x74\xe5\x06\x74\xb5\x9a\x63\x8b\x9d\xb5\x2b\x71\x4b\xae\xd7\xe0\x71\x52\xd6\x97\x68\xa8\xe1\xd0\xa2\x63\x46\x64\x10\xaa\xb8\x52\x27\xca\x38\xb7\x69\x95\xf3\x2c\x3d\x5b\x12\x4a\x95\x12\x84\xfe\x66\x22\x5b\x36\x2e\x8d\x76\x59\xa1\x32\x66\x9a\x9b\x6c\xa7\x29\x2f\xf9\x45\xb3\x63\x55\x17\x0d\xbc\x36\xc9\x51\xcb\x65\xbf\x4b\x89\x73\x76\x86\x09\x78\x66\xda\x23\x58\xd9\x6c\xd6\x47\x22\xf4\x89\xd0\x62\x1b\x29\x16\x5b\x74\xd3\x56\xfd\xff\x77\x8b\xbd\x92\x81\xa4\xdc\x6c\x79\x22\x9f\xb9\xac\xd4\x3f\x03\x76\xad\x94\xfc\x1c\xe6\x79\x35\xf8\x09\xb4\x2b\xb5\xdc\x27\x10\xaf\x54\x5d\x3f\x9a\x0d\xc6\x2a\xaf\xa7\x9c\xba\x90\x17\xfa\xf9\x7c\x95\x68\x8b\xed\xfa\xa0\xd6\x19\x37\x06\x8d\x92\xd8\xab\x34\xc8\x06\x51\x1a\x17\xda\x83\x52\xb3\x43\x55\xca\xa5\x4e\x61\x9c\x6b\xb6\xe8\x71\x69\x48\xf6\x88\x1e\xd3\x8c\x76\xb4\x28\xef\x6a\x74\x97\xf9\xe2\x4c\x2e\xaf\x47\x83\x86\xdc\x58\xae\x9d\x89\x39\xd9\x2c\xb6\x64\x25\x6f\x8f\x3a\xb4\xbc\x19\xcf\xd6\xa5\x5d\x76\x5c\x97\xe6\xdc\xc9\xfa\xc2\x0d\x20\x1f\xfe\x19\xb8\x59\x71\xda\x21\x07\xa0\xa2\x92\xde\x4c\x99\x36\x60\xb7\xb4\x5a\x12\x14\x14\x3a\xad\xd9\x9a\xb7\x07\x5d\xbf\x42\x6a\xf8\x48\xde\xcc\x50\x14\xc7\xa2\x9c\x47\xe6\xfa\x9b\x68\x17\xd1\x8e\xff\xe4\x42\xd0\xf5\xf1\xff\x0b\x3c\xcf\xe5\x63\x5e\x9c\xcb\x35\x24\xaf\xba\xac\x94\xdb\xdd\xe5\x56\x2a\x37\xca\x15\x73\x49\x9b\xd4\x92\x32\x57\xd9\x71\x4e\xaf\x59\x5a\x47\xf6\xba\x1b\x81\xc8\x8f\xd8\x10\x4e\xeb\xb4\xda\x26\x4d\x6f\x46\x3a\xb7\xc8\x48\x85\xee\x2a\xbf\x63\xe6\x0e\xcd\xb4\x67\x3b\xab\x6e\x00\x71\x6d\x65\x4b\xd8\xc0\xd8\xd0\xa6\x63\x6e\xb2\x68\x15\x73\x89\xa1\xd2\x9c\x50\x0b\x3e\x57\xcc\x4e\x07\x85\xc6\xa4\xd8\x17\x56\xee\xb8\x43\xb1\x13\x69\x3e\xae\x2b\xe3\x82\x9a\x6b\xff\xfe\x7d\xbe\x5b\x7c\xf2\xd2\x10\x4f\x2d\x4d\x23\xb9\x34\x85\xdc\x6a\x5e\x50\xf5\x25\xab\xee\x48\x49\xc8\x90\x25\x7c\x50\x5b\x6f\xe8\x2d\xbf\xf0\xe7\x58\x25\xbf\x59\xe1\x0e\xd6\x58\x17\xa4\xe9\x56\x70\x4e\x4b\x73\x1e\x0f\x3f\xac\x7a\xb6\x58\xc9\xd3\x99\x2d\x3e\x6a\xd1\xf5\x6e\xa6\xb0\xd4\x0b\xfd\xe1\x1c\x57\xc7\xb2\xad\x19\x24\xb1\x9a\x77\x6d\x7b\x8a\x6f\x30\x43\xa2\xd8\x5a\xb1\x34\x72\xc9\x55\x25\xd3\x2b\xaf\xcd\x6d\xbe\xee\xd2\xc2\xb2\x9a\x5b\x35\x96\x34\x9a\xab\xd6\x95\xa5\xd4\x12\xa7\x19\xdf\x59\x8d\xf2\x5a\xa4\xfa\xeb\x7e\x96\x56\xda\x7f\xc0\xcf\x0e\xe5\xfd\x53\x26\x4b\xca\x34\x54\x70\x1c\x60\x32\x4d\x62\x38\x90\x70\x5a\xa6\x09\x94\xc6\x20\x03\x24\xc0\xa9\x1c\x90\x38\x0e\xaa\xaa\xc2\xa2\x34\x8e\xd3\x2c\x8b\x61\x0c\xae\x90\x12\x86\x4b\x0c\xca\x31\x6a\x7c\x51\x43\x25\x17\xc3\x3f\x37\x82\x55\xed\x8a\x6b\x29\xa7\xf4\x37\xeb\xad\x93\xf7\x16\x8a\x59\x96\xea\x9a\x2c\x18\x4a\xc5\xde\x95\xf4\x16\x3f\x6a\x71\x9a\xe7\xf7\xd0\xf7\x62\xfe\x95\x52\xfe\x87\x27\x7f\x2a\xe7\x1f\x27\x7f\xf8\xfe\x4c\xf4\x70\x1e\x72\xf1\x0a\x12\x12\x0c\xbd\xf0\x89\x14\x24\x13\x7a\x6b\xcb\x99\xdb\xc0\x75\xed\xa9\x03\x5c\x98\x82\xd4\x83\xae\x87\x74\x0b\x25\x44\x8c\x84\x91\x5f\x48\x17\xda\x1e\x5c\x48\xd0\x41\x70\x14\xa3\xee\x19\x48\xb5\x1c\x19\xba\xb2\x6d\x99\x26\xdc\x78\x06\xf0\x4d\x79\x7a\x3e\x50\xf8\xf0\xdb\x3d\x60\x51\x2e\xb8\x2f\x2e\xb9\xe9\xf3\xff\xbf\x61\x2d\xea\xab\xa7\x2f\xe0\xd7\x9f\x08\x1a\x95\xa6\xbe\xee\xbf\x4d\xf4\xf5\x27\x12\x5d\x0f\xdf\x9c\x02\xf7\xeb\xcf\xe8\xd1\xbb\xf0\xcd\x7f\xef\x85\x55\x08\xef\x13\x5c\x80\x8d\xb7\x71\xf5\xdd\x9d\xe2\x0e\x74\xa1\xb3\x7a\x4f\xf8\xcb\xbf\xef\x52\x05\x70\xbd\xf0\x99\x11\x65\x7f\xef\x36\x45\x15\x4f\xe7\xc8\x77\xf0\xd8\x47\x5b\xe0\xc8\x53\x7d\xb5\xbf\x78\x65\x49\x4e\x6b\x80\xed\x15\x12\xaa\xc3\xf9\xfa\x13\xf9\xba\xc2\xb0\xbf\xb0\xbf\xd0\xaf\xfb\x0b\xb2\xef\x38\xd0\xf4\xea\xe1\xd4\xbe\xfe\x44\x88\xe4\xfb\xb9\xf0\xb1\x9e\x40\x6f\xff\xfb\xa8\xc8\x93\x4a\x8f\x92\x01\x30\x1e\x4c\x54\x91\x15\xc8\xa9\x0a\x2a\x11\x4c\x10\x05\x58\x4e\xa6\x31\x8c\x66\x29\x8c\xe4\x68\xa8\x92\x0c\x47\x60\x14\x83\xd2\x34\x21\x49\x28\xca\xa2\x0a\x8d\xa2\x80\x25\x48\x92\xc4\xb8\x3d\xa3\x23\x6e\x60\xc5\x89\x05\x3c\x5e\x09\xe7\x1e\x98\x5c\xe2\xd2\xbf\xcf\x3e\xef\x9a\xc0\x0e\x78\x41\x95\xc0\x00\x8e\x02\x82\x63\x21\x64\x08\x19\xe2\x38\xce\x50\x10\xb0\x18\xc3\x30\x2c\x2d\x01\x99\x22\x69\x8a\x56\x09\x42\x91\x65\x52\x25\x54\x28\xd3\xa8\x42\x51\x8a\xa2\x62\xc1\x49\xfc\xeb\x97\x94\x11\xae\xe8\xe0\xd9\x12\xd3\xc7\x75\x80\xbd\x5e\x5e\xb3\x7c\xcf\xf6\xbd\xcf\x9d\xfb\x2d\x0d\x3f\x3d\xeb\xff\x6a\x0d\xdf\x67\x65\x7f\x74\xf0\x47\x07\x7f\x74\xf0\x47\x07\x7f\x74\xf0\x47\x07\x7f\x74\xf0\x47\x07\x7f\xa3\x0e\xc2\xbf\xfe\xcf\xfd\x87\x17\x57\xb6\xc3\x87\xe7\xd3\x5e\x2f\x87\x13\x7a\x54\x7c\x2a\x2e\xb5\x69\x95\xc8\xb9\x25\xac\x57\x29\x08\xae\x80\x4f\x70\x79\xe2\xfa\xb9\xf5\x72\x80\x42\xb3\x31\xac\xb2\x39\x39\x8b\xe7\x71\xaa\x75\x2a\x51\x15\xc2\x7f\xd7\xbd\x32\xd3\x15\xa6\x9b\x79\x71\xe9\x2e\x36\xb3\xdc\x94\xa2\x7d\x3b\x03\xfc\x09\x93\xc5\xf9\xb1\x0e\xaa\xc5\x51\xbd\xef\x76\xbd\xb5\x91\x3d\x56\x04\x6a\xd3\x27\xcb\x34\x8f\x54\x14\x72\x7c\x6d\xca\x56\x26\x83\x46\x45\xdd\x1a\xb3\x4a\x4f\xb0\x6c\xde\x12\x98\x0d\x37\x93\xf1\x69\x96\xda\xf2\x59\x93\x62\xd7\xcd\x02\x50\x74\x77\xca\x58\x0b\xd0\xec\x12\xbb\x71\x93\xcf\x34\xfd\x92\x31\xed\xc9\x9c\xab\x2e\x9a\x1a\xe7\xe3\xab\x1d\xac\xcb\xbc\x8d\xd5\x75\x4c\xc0\x72\xc7\x0a\x47\x7e\xfc\xe4\x5d\x82\xdc\x23\x65\xb0\x76\x7e\xac\xe6\xf0\x41\x2f\x47\xb0\xf6\x78\x83\x6e\x97\x42\x5b\xc8\xfb\xac\x3a\x26\x44\x4b\xcd\xc8\x5a\x8b\xca\xa8\x8d\x82\x86\x9b\x63\xa7\x0a\xe9\xda\x62\x36\xf7\x58\xbc\xa0\xb5\x66\xce\x74\x32\xee\x13\x2d\xa9\x42\xcd\xac\x96\xe4\x39\x2c\xa1\x13\xbc\xa0\x74\x49\xaf\xeb\xb5\xfb\x51\x59\x6b\x30\xe6\x6a\xfd\xee\x6c\xd9\xda\xb8\x1b\x69\x52\xeb\xf3\x80\x76\xec\xfe\x6e\x01\xd6\xb3\x29\xba\x5e\xae\x50\x05\xdb\x0c\x07\x72\x2e\xbf\x5e\x1b\x82\xd2\xaa\x4d\x66\x4c\x45\xb4\x8c\xac\x59\xc3\xa9\x6e\xae\xdc\x9e\x14\xd7\x45\xa2\xef\x00\xac\xeb\x57\x35\x94\x5e\xd8\x8d\x45\x54\xab\x7b\xc2\xde\xb4\xe3\xfa\xaf\x9f\xbd\x2b\x13\xd9\xe3\xc7\xec\xb7\xdd\xca\xe5\xd6\xfe\xca\xf5\x8c\x5d\x46\x47\x57\x1a\x6d\x69\x58\xc9\x16\x4d\xbc\x22\x8c\x6b\x50\x64\xa8\xe2\x12\x8c\x15\x57\x99\x0c\x33\x6a\xbe\x93\x1b\x30\xcd\xfa\x26\x3b\xdf\xb8\x35\x56\xd3\x99\xa6\xbe\xd4\x79\x69\x27\xf8\x1e\xdf\xcd\x13\x60\xb1\xdb\x8d\x55\x67\xc9\x60\x6b\x2b\x2a\x3b\xb2\x76\x73\xbb\xed\x0a\xec\x4a\xb4\xd7\xcb\xa6\x65\xec\xa4\xb9\xb4\xac\xf1\xa2\xc2\x89\xf4\x26\xd3\x37\xe5\x46\x0f\x1f\x70\x4b\xb5\xb4\x3e\xd9\x8b\x40\xcf\xa0\x4e\xcc\x16\x56\x95\xed\x95\x8d\x42\x16\x6a\x32\xc1\xb4\x46\x5e\x45\x10\x76\xc3\x01\xbb\x1e\xe8\x93\x1c\xc8\xfb\x54\x9d\x0a\x6b\xa5\x93\x53\x45\x3b\x77\x5e\xf6\x3c\xfd\x19\x2a\x99\x6f\xe2\xf9\x2c\xdf\x24\xa9\x71\xae\x40\x78\x95\x41\xa9\x89\x75\x08\x1e\x6d\xc0\x79\x8b\xad\x75\x68\x53\xc4\x78\x0e\x0e\x75\x65\x5b\xdd\xdf\xc7\x0d\x5f\x80\x99\xaf\xe6\xeb\x10\xae\x91\x2d\xf8\x25\x0e\x77\xbd\xb6\x85\xce\xda\xaa\xe7\x14\xfd\x55\xa7\xe3\xe0\xa5\xb1\x07\x58\x2d\x5b\xe0\x86\xd2\x62\xd8\xaf\xed\xf4\x3e\x3b\x63\x26\xd9\xae\x80\x97\xa7\xd9\xac\xa3\x41\x74\x86\x8e\xda\xec\x76\x2e\x11\x05\xb6\x6e\x72\x3b\xd5\x76\x5a\x02\xd3\xcb\xf4\xb7\xbb\x58\x45\x31\x77\xaa\x30\x7e\xd8\x9e\x6e\x95\x56\xaf\x3c\x92\xfb\xc4\x4d\x91\xab\x4f\x40\x7e\xb4\x62\x99\x78\x0a\xf2\x18\xeb\x25\x1c\xe0\x38\x23\x13\x9c\x4c\x93\x80\x24\x55\x99\x01\x92\x42\xca\x1c\xcd\x62\x1c\x49\xd1\x2a\x4a\x70\x1c\x87\xd2\x0a\x86\xcb\x24\x43\x2b\x0c\x2a\x91\x28\x2e\xa9\x8a\x84\x73\xb4\x42\x03\x62\x7f\xcb\x3f\xb1\x23\x34\xce\x2c\x22\xa7\x67\x73\x68\x1d\xad\x95\xb7\xde\x74\x2d\x62\xc6\x18\x05\x5b\xdb\xc2\x38\xb1\xb2\x59\xd5\xf3\xdb\x26\xe5\xe5\x8a\x72\x7e\xb0\x5a\x97\xb8\x35\xa1\x79\x4e\xd3\x9c\xf0\x77\xbc\xae\x3e\x3a\x12\xae\x70\xfe\x99\xf1\xc7\xd9\x8c\x7c\x86\x77\xe7\xf8\x97\xf7\x3c\xd2\xf5\x4e\xd0\x12\x64\x50\x55\x62\x18\x16\x57\x39\x16\xc5\x64\x45\x86\x8a\x8c\xe1\x28\x0d\x71\x4c\xe5\x38\x9c\x23\x64\x8e\x63\x69\x14\x60\x14\x24\x49\x4c\x25\x19\x92\x63\x48\x06\xa0\x80\x60\x80\x14\xe9\x1d\xbf\xd4\xfb\x3f\x34\xef\xcf\xd2\x3b\x4b\x9e\x3e\x9f\xfa\xf0\xc4\x93\x7a\x67\x08\x52\x82\x1c\xc9\xd0\xb8\xa2\x90\x12\xa3\x72\xac\x4a\x93\xa4\x02\x71\x94\xc1\x19\x42\xc5\x00\x46\x70\x2a\x45\x00\xa8\xca\x38\xc0\x20\x94\x68\x8c\x65\x69\x0c\x63\x65\xc0\xb0\xf8\xe1\x06\x45\x8a\xbd\xc7\xe6\x9d\xf7\x2d\xc2\xf2\x48\x6a\x99\x6f\x15\x37\x76\x3b\x4b\x58\x15\x31\xb3\xc3\x98\xce\x56\x77\x31\x43\x6d\x94\xc6\x8b\xf6\x50\x73\xfc\x6e\xa6\x17\xca\xd7\xfa\xf9\xcc\x7e\x52\xda\x35\xdd\xbe\xab\xf7\xc2\x73\xe3\x37\xe5\xc7\xc6\xbf\x79\xa3\x29\xfd\xb9\xe4\x8f\x07\xae\xbf\x3b\x6a\x95\xeb\x6c\xa5\xbd\x6a\xcf\x25\x01\xaf\xf0\xc4\x70\x30\xeb\x38\xc2\x62\x36\x42\x51\xb5\xcc\xba\xf5\x2a\xb3\x40\x8b\x9d\x75\x6d\x98\xe5\x47\x04\x7f\xdc\x07\xc3\xd7\x8d\x7d\x30\x7a\x39\x4b\x91\xae\xc3\x26\xd0\x66\x9b\x06\xe8\xb7\x38\x3a\xb7\x53\x5d\x0e\xa2\xb2\xe5\x88\x93\xd1\x2e\x37\xac\xcd\x4b\x96\x70\xd8\xf7\xf8\xfc\x80\x5f\xc5\x9f\x72\xcb\x45\xde\x11\x2c\x72\xc1\x23\x84\xf5\x02\xb4\xfc\x96\x52\xea\xf6\x37\x0a\x5f\x82\x12\xdd\x6c\x43\x6f\xdb\x16\xaa\x43\xb0\x33\xa4\x6e\xa3\x31\x5d\x54\x04\xb1\x5e\x20\xdd\xe5\xb4\xb8\xec\x4f\xe4\x76\x0b\x35\x32\xa3\x6c\xd3\xce\x58\xee\x70\x21\xd2\x99\x52\x7f\x2c\xb9\x3b\x86\x6a\xe3\xb3\x32\xb9\x6a\x34\x82\x5c\xde\x5d\x56\xcd\x35\xb5\x99\xdb\xc5\x6c\x65\xc9\xf5\x6a\x20\x67\x94\xa4\x75\x73\x32\x2f\xa3\xdd\x4d\x45\x1a\xe5\x51\xde\xc9\x4a\xc2\x10\x5b\x36\x8e\xc4\x6e\xe8\xe0\x64\x19\x89\x3b\x79\x31\xd9\xe8\xe9\x13\xfe\x9f\x8b\x16\xfc\x27\x44\xab\x52\x6f\x98\x73\x9f\x18\x9f\xe7\xff\xb9\xa8\x91\x16\x2d\xff\xee\x2d\xea\x71\x27\xcb\x9f\xb1\xbf\x30\xb8\x0f\x26\x9b\x37\x9c\x8c\x27\x36\x43\x69\xd3\x6a\x4a\xe6\xa4\xa1\xe7\xca\x25\xa1\x5e\x6b\xfb\x6a\xad\xae\xf9\x3d\xb7\x52\xdb\x6c\x79\xb7\xd5\xa2\x4a\xdc\x64\x46\xd1\x18\x18\x99\x2b\x31\x5b\x19\x74\x6a\x52\xc9\x2d\xca\xba\x57\x96\x34\x9d\x53\x86\x03\x45\xe8\x8c\x57\x8b\xc1\x30\xaf\xef\xaa\xca\xa2\x5e\x2d\x04\x8e\x20\x3a\xd4\x3a\x43\xc0\x7c\x66\xcc\xcb\x44\x93\xc8\x55\xcb\xe4\x34\x33\x11\x7a\xdb\xc9\x54\xb3\x06\x54\xb1\x82\x2a\x35\xa5\x62\x09\x22\x7d\x7a\x02\xeb\x9f\x77\xb2\x67\x8d\xfc\x49\x27\x5b\x32\xd9\x5e\x41\xfe\x44\x27\xe3\x99\x71\x9d\xe5\x99\x99\xa1\x15\x5b\x10\x55\xfa\x7d\x66\x50\x91\x0b\xed\x0d\xdd\xce\xae\x8d\xca\x52\x26\xfa\x05\x8c\x02\x35\xa2\xaa\x47\x8f\xdf\xfc\xed\x4e\xf6\xc9\xf9\xc8\xff\xb8\x13\x5d\xe4\x00\x32\xba\xb2\x3b\x78\x5d\xe9\x12\x99\x31\x37\xef\xc1\x6a\xa5\xb6\x63\x41\x91\xe2\x7a\xce\x6a\xbd\xac\xf8\xde\x54\x2f\x17\xc7\x5b\x53\x33\xd9\x23\xb5\x74\xe7\x8a\xad\xb2\xd1\xae\x93\x54\xe8\x45\x7c\xd2\xc9\x42\x7d\xed\x2b\x58\x27\xf1\x07\x76\xfa\x99\x79\xd2\xff\x95\xd7\x95\x1c\x3c\xe1\xe4\x9f\x32\xfe\x95\x00\x73\x7b\xfc\x7d\xd9\x89\xff\xe7\x9c\x8c\x4f\xc9\x7f\x3f\x3a\x7e\xd7\x2a\xb1\xf0\xc1\xfc\xf7\x38\xff\x4f\xcc\xbf\x3f\x7a\xee\x8a\xd9\xcf\x07\xfc\xff\x46\xde\x7e\xe3\xfb\x6f\x1f\x4d\xde\xcf\xbe\x03\x77\x88\x7b\xf1\xec\x3b\x7a\x5a\xff\xb4\x91\xa5\x04\xce\xeb\x28\xc7\xf4\x22\x7a\xc6\xff\x02\xe5\x6a\xcf\xae\xcb\x56\x86\xc7\x5e\x65\x87\xef\xc9\xbd\x33\xcd\x78\x4f\xc5\xb0\x93\x62\x0c\x31\x2c\xb7\xf3\x85\x42\x4a\x77\xa1\xe3\x80\x48\xab\x53\x6d\xf0\x9d\x31\x22\x14\xc7\xc8\xb7\xe3\x17\xaf\x5f\x8f\x5d\xbd\xae\xf6\xd8\x3a\xeb\xee\xf8\x59\xc4\xdd\x1b\xac\xdd\x5b\x94\xd3\x7a\x1e\x9d\x1a\x5a\x3e\x4d\x4f\x02\x66\x1a\xb3\xc3\x00\x49\x52\x51\x9b\x85\xdb\x0d\x91\x6e\x76\xf2\x7c\x9a\x6e\x02\x3c\x8d\xf8\x8d\xd1\x91\xbe\x58\x6d\xf7\x8b\xc8\xe9\xbb\x97\x1f\x9a\xc9\xe7\xe8\xfb\x83\x13\xb8\x5c\x83\x53\x8f\xa8\x2b\xfd\x8d\x12\xfd\x5f\x9f\xe6\x1b\x81\xa5\x11\x8d\x0d\x93\x64\xb8\xef\xba\x93\xde\x6d\x27\xde\xec\xf6\x69\x72\x21\x56\x1a\xb7\xd3\x20\x49\x6a\xba\xfd\x1a\xf6\xe0\xb9\xd9\x5d\x26\xa5\xc9\xef\xf3\x4c\x63\x90\xa9\x84\xcf\x87\x4c\x5b\xf4\x2b\x2d\x68\x12\xad\x8e\x3f\x83\xa9\xeb\x4b\x57\x38\x1e\x86\x49\xb2\x0b\xbb\xd7\xdc\xe8\x47\x73\xd1\xdf\xf9\x69\x92\x27\xc0\x34\xa2\x67\xc3\xdd\x15\xc3\x62\x0f\x5c\x9f\x77\xb9\x7e\x9a\xed\x09\x30\x8d\xed\xd9\x70\x49\xb6\x87\xae\x2e\x37\xba\xa6\x5c\x74\xfb\x7e\x9e\xef\x11\x30\x95\x6f\x72\xb8\x24\xdf\x63\xe3\x95\x1b\x0d\x55\x2e\xda\x9e\x3f\x4d\xf8\x04\x98\x46\xf8\x6c\xb8\xab\xa9\x41\xd4\x50\xe5\xf5\xd4\x2d\xe5\x66\xc7\x8c\x94\xb6\xf0\xcf\xcf\x23\x06\x99\x3a\x93\xf3\x21\xd3\xa2\x84\x0b\x97\xaf\x87\x16\x11\xd7\xbb\x59\x9c\xb7\xcb\xff\x04\xee\x37\x89\x3f\xc5\xfa\x2c\x45\x4d\xfb\x11\x81\xa7\xf9\x27\x41\xd3\x26\x91\x32\xec\xd5\x99\xc4\x3b\x4d\x5c\x4b\x35\x8f\x3f\xb0\xb0\xe7\x1e\xfe\x1c\xc3\x7d\x3d\x30\xa2\x5f\x6e\x38\xc3\x41\x9a\xe2\x29\x8b\xed\x77\xab\x62\x19\x91\x3c\x07\x42\xe4\xd0\xf0\xe6\x3b\x32\xac\x14\x3b\x45\x24\xde\x00\x07\x3b\x3e\xd0\x72\x99\x68\x26\x7f\x33\xe2\x51\x96\x49\x98\x80\xe4\x3e\x4f\x49\x50\x8c\x77\x41\x7c\x8d\xf7\x3c\x7c\x8d\x7a\x11\x5e\xb0\x3b\xfd\xf0\xc5\xa3\xc4\x8e\x08\x01\xa7\x93\x63\x24\x68\x5d\x4f\x15\x13\x3f\xd1\xf1\x0c\x85\x03\x48\xc4\x22\x16\x60\xee\x24\x92\xf8\x55\x91\x47\x89\xc4\x41\x02\x22\xc9\x44\xf8\x4e\x26\x89\x5f\x42\x79\x94\x49\x1c\x24\x60\x12\xfb\x5a\xdb\xfd\x34\x12\xbf\xde\xf2\x04\x93\x38\xce\x9e\xcc\x21\x4b\x48\x92\x89\x77\x3a\xbb\x15\xbe\x9e\x63\x74\x0e\x14\x50\x3a\x8b\x8d\xef\xea\xe8\xda\x8f\x16\x21\xb2\xb5\xb0\x0d\xe8\xc1\x70\xd8\xff\x17\x00\x00\xff\xff\x5b\x1d\x6a\x33\xe1\x68\x00\x00") + +func account_mergeCoreSqlBytes() ([]byte, error) { + return bindataRead( + _account_mergeCoreSql, + "account_merge-core.sql", + ) +} + +func account_mergeCoreSql() (*asset, error) { + bytes, err := account_mergeCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "account_merge-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x33, 0x94, 0x5a, 0x86, 0xfc, 0x68, 0x89, 0x29, 0xf5, 0x4, 0x9e, 0x47, 0x87, 0xfb, 0x30, 0x69, 0xb, 0xab, 0x73, 0xe2, 0xf6, 0xe5, 0x6b, 0x70, 0x67, 0xf0, 0x2c, 0x48, 0x53, 0x41, 0x9e, 0x5c}} + return a, nil +} + +var _account_mergeHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5d\x79\x6f\xe2\x48\xd3\xff\x7f\x3e\x85\x35\x5a\x29\x33\x4a\x66\xe2\xdb\x38\xf3\xcc\x4a\x06\xcc\x11\xc0\xdc\x81\x64\xb5\xb2\x7c\xb4\xc1\x89\xb1\x89\x6d\x12\xc8\xea\xf9\xee\xaf\x7c\x81\x6d\x7c\x02\x33\xfb\xbc\x28\x8a\x00\x57\x57\xfd\xaa\xba\xba\xab\xab\xbb\xe9\xfe\xf6\xed\xd3\xb7\x6f\xd0\xc0\xb0\xec\x85\x09\xc6\xc3\x2e\x24\x0b\xb6\x20\x0a\x16\x80\xe4\xcd\x6a\xfd\xe9\xdb\xb7\x4f\xce\xf3\xfa\x66\xb5\x06\x32\xa4\x98\xc6\xea\x40\xf0\x06\x4c\x4b\x35\x74\x88\xfe\x4e\x7e\x47\x42\x54\xe2\x0e\x5a\x2f\x78\xa7\x78\x8c\xe4\xd3\x98\x9d\x40\x96\x2d\xd8\x60\x05\x74\x9b\xb7\xd5\x15\x30\x36\x36\xf4\x13\x82\x7f\xb8\x8f\x34\x43\x7a\x39\xfe\x56\xd2\x54\x87\x1a\xe8\x92\x21\xab\xfa\x02\xfa\x09\x5d\x4d\x27\x8d\xca\xd5\x8f\x80\x9d\x2e\x0b\xa6\xcc\x4b\x86\xae\x18\xe6\x4a\xd5\x17\xbc\x65\x9b\xaa\xbe\xb0\xa0\x9f\x90\xa1\xfb\x3c\x96\x40\x7a\xe1\x95\x8d\x2e\xd9\xaa\xa1\xf3\xa2\x21\xab\xc0\x79\xae\x08\x9a\x05\x22\x62\x56\xaa\xce\xaf\x80\x65\x09\x0b\x97\xe0\x5d\x30\x75\x55\x5f\xfc\xf0\xb1\x03\xc1\x94\x96\xfc\x5a\xb0\x97\xd0\x4f\x68\xbd\x11\x35\x55\xba\x71\x94\x95\x04\x5b\xd0\x0c\x87\x8c\xe9\x4e\xd8\x11\x34\x61\xaa\x5d\x16\x6a\x37\x20\x76\xde\x1e\x4f\xc6\x50\x9f\xeb\x3e\xfa\xf4\xdf\x97\xaa\x65\x1b\xe6\x8e\xb7\x4d\x41\x06\x16\x54\x1f\xf5\x07\x50\xad\xcf\x8d\x27\x23\xa6\xcd\x4d\x42\x85\xa2\x84\xbc\x64\x6c\x74\x1b\x98\xbc\x60\x59\xc0\xe6\x55\x99\x57\x5e\xc0\xee\xc7\xef\x10\x28\xb9\xef\x7e\x87\x48\xc7\xaf\x7e\x9f\x82\x9e\xb4\xf2\xda\x79\x00\x1d\x47\xce\x12\x16\xa2\x3a\x30\x77\xc9\xdb\x5c\x9d\x9d\x87\x28\x7d\xb6\x2e\x2a\x1e\x28\x0a\x90\x6c\x8b\x17\x77\xbc\x61\xca\xc0\xe4\x45\xc3\x78\xc9\x2e\xa8\xea\x32\xd8\xf2\x21\xe5\x74\x4b\x70\x1d\xdd\xe2\x0d\x9d\x57\xe5\x32\xa5\x8d\x35\x30\x85\x7d\x59\x7b\xb7\x06\x67\x94\x3e\x20\x39\x0b\x45\xb9\xb2\x1a\x90\x17\xc0\x74\x0b\x5a\xe0\x75\x03\x74\xa9\x94\x0a\xa1\xe2\x6b\x13\xbc\xa9\xc6\xc6\xf2\xbf\xe3\x97\x82\xb5\x3c\x91\xd5\xf9\x1c\xd4\xd5\xda\x30\x9d\xe6\xe8\xf7\xa9\xa7\xb2\x39\xd5\x96\x92\x66\x58\x40\xe6\x05\xbb\x4c\xf9\xc0\x99\x4f\x70\x25\xbf\x5d\x9e\x00\x3a\x5c\x52\x90\x65\x13\x58\x56\x76\xf1\xa5\x6d\xca\x6e\xdc\xe1\x35\xc3\x78\xd9\xac\x0b\x50\xaf\xf3\x20\x79\x54\x82\x6a\x96\x64\x1c\x74\xba\x85\x0b\x38\xfd\x84\xa2\x00\xb3\x18\x69\xc0\xfe\x84\x22\xbe\x59\x8b\x15\x72\xbb\xd6\x12\x42\xc2\x5d\x71\x5e\x89\xb5\x53\x60\x69\xe7\xd6\x80\x15\xe9\x80\xc4\x5d\xae\x1b\x2d\xf7\x2d\xbd\x08\xb1\xe1\xe1\x30\x72\x09\x55\xcb\xe6\xed\x2d\xbf\xce\x67\xe9\x50\x1a\xeb\xa2\x94\xa0\x28\x59\x10\x4a\xb2\x89\xc5\xa0\xb9\xe7\x92\xe5\xf7\x62\xe2\xae\x58\x65\x7a\x31\xd2\xb1\xb6\x65\x6d\xf2\x24\xef\x89\x25\x43\x06\x25\xc7\x05\x7b\x37\x58\x0b\xa6\xad\x4a\xea\x5a\xd0\x33\x83\x77\x5e\x51\x7e\x5d\x72\x6c\xb2\x8f\x68\x65\x11\x24\x17\x2c\x2d\xdf\x35\x5e\x11\x79\x1e\xe1\x2f\xe7\xef\x55\xa6\x53\x93\xfe\x5b\x27\x3e\x04\x43\x3f\xd7\x19\xf8\x82\x08\x16\x86\xb9\xe6\x57\xea\xc2\x1f\x30\x64\x40\x88\x51\x16\xd6\xb1\xfc\x78\x2f\x8b\x73\x51\xe7\xf4\x4a\xd7\xfa\xdd\x69\x8f\x83\x54\xd9\x93\x5c\x67\x1b\xcc\xb4\x3b\x29\xc8\x3b\xc5\xe9\x2e\xc0\xd9\xaf\xee\x6c\x4e\xee\xa7\xe2\xea\x07\x51\x7a\xcc\x0e\xa7\x2c\x57\x3b\xc1\x66\xce\x38\xdb\x02\xaf\xa5\x25\x47\x98\x14\x2e\x2d\x83\x82\xb4\x87\xd1\x6c\x61\x0d\x53\x5a\x7d\x19\xfd\x92\x59\x14\x2b\xeb\x8f\xfb\x8a\x11\xfb\x83\xbc\xc2\xba\xf9\x3d\x40\x19\x5d\xbc\x22\x05\x69\xfd\xe1\x5f\x71\x3c\xc1\x78\xb1\x08\xa2\x58\x1f\x92\x4d\x1c\xea\x12\x7c\x42\xa6\xd9\x1c\xb1\x4d\x66\x92\x40\xbc\x52\x9d\x8c\x43\x95\xc0\x17\x7d\xb3\x02\xa6\x2a\xfd\xf5\xf7\xd7\x02\xa5\x84\xed\x09\xa5\x34\xc1\xb2\xbf\x08\xfa\x0e\x68\xee\x54\x4c\x81\x12\x8a\x6a\x26\x16\x69\x4c\xb9\xda\xa4\xdd\xe7\x32\xf4\xe1\x85\xc5\xe2\x80\xee\x06\x3a\x02\x9a\xc1\x23\xd0\xee\x0c\x1e\x8e\xae\x6e\xf1\x03\xf8\x1b\xa8\x8c\x22\xae\xea\x05\x38\xb0\xf3\x09\xcb\x8d\x63\x2c\xb4\xf5\xc2\x7a\xd5\x02\x5f\xac\xb5\xd8\x1e\x73\x24\xe1\xc7\x27\x6f\x16\x8e\x13\x56\xe0\x2e\xf8\x0e\x9a\xec\xd6\xe0\xce\x2f\xf2\x03\x1a\x4b\x4b\xb0\x12\xee\xa0\x6f\x3f\xa0\xfe\xbb\x0e\xcc\x3b\xe8\x9b\x3b\x39\x57\x1b\xb1\x4e\x7d\xf9\x9c\x03\x7e\x9f\x22\x1c\xa3\x0f\x7d\xc6\xb5\x7e\xaf\xc7\x72\x93\x0c\xce\x1e\x01\xd4\xe7\xa2\x0c\xa0\xf6\x18\xba\x0a\xa6\xdd\x82\xef\x2c\x97\xc9\x55\x5c\x72\xa0\xbe\x2f\x73\x6f\xa1\x5c\x7d\x22\xb6\xe4\xfa\x93\x98\x3d\xa1\x59\x7b\xd2\xda\xc3\x0a\xcf\xbf\x45\xc4\x1f\xb8\xc4\x80\x94\x51\xfe\x88\x89\x6b\x80\x41\xf7\x76\xbd\x18\x0f\xbb\xd0\xda\x34\x24\x20\x6f\x4c\x41\x83\x34\x41\x5f\x6c\x84\x05\x70\xcd\x50\x70\xbe\x30\x0c\x37\xdf\xd1\x7c\xf8\x81\xaf\x1e\xf0\x07\x75\x9b\x64\xcb\xbd\x67\xe7\xf2\x87\x46\xec\x64\x3a\xe2\xc6\xa1\xef\x3e\x41\x10\x04\x75\x19\xae\x39\x65\x9a\x2c\xe4\x6a\xdf\xeb\x4d\xbd\xfe\x6e\x3c\x19\xb5\x6b\x13\x97\x82\x19\x43\x7f\xf0\x7f\x40\x63\xb6\xcb\xd6\x26\xd0\x1f\x88\xf3\x29\x5e\x1b\xb9\x0d\xf1\x3c\xed\xf2\xd8\x5f\x4c\x39\x34\x49\xb9\x22\x3d\xd5\x79\xfa\x15\x90\xb0\x57\x71\xff\xd5\x49\x1a\x7e\xf9\x04\x41\x35\x66\xcc\x42\xb3\x16\xcb\x41\x7f\x20\x7f\x21\x7f\xdf\xfe\x81\xfc\x85\xfe\xfd\xe7\x1f\xa8\xfb\x1e\xfd\x0b\xfd\x1b\x9a\x78\x0f\x21\xb6\x3b\x66\x1d\xa3\xb0\x5c\xfd\x6b\xa2\x65\x0a\xc4\x81\x33\x2d\x93\x2f\xe1\x57\x5b\xe6\x3f\xa7\x58\xe6\x38\xa6\xfa\x76\xd8\xc7\xe1\x62\x86\x38\x84\xed\x23\x8e\x2e\x62\x08\x1a\x3b\xb6\x82\x7e\x1e\x7a\x80\x1b\xef\xeb\xc9\xe3\x80\x85\x7e\x86\x5b\xc4\xd7\xa4\x56\x7b\x51\x8c\x71\x86\x31\x88\x41\x33\x2e\x8e\x30\x71\x08\x74\x2e\xca\x24\xa6\x31\xa4\x91\x06\x19\x85\x7b\xf0\xb2\x63\xb4\x49\xc3\xbc\xb3\xd1\x26\x30\x8d\xa3\x0d\x37\x92\x4c\xb4\x4e\xe4\x92\x81\x22\x6c\x34\x9b\xb7\x05\x51\x03\xd6\x5a\x90\x00\xf4\x13\xba\xba\xfa\x11\x7d\xfa\xae\xda\x4b\xde\x50\xe5\xd0\x52\x5a\x44\xd7\xf0\xf8\xd7\x57\xd1\x6d\x60\xc5\xd4\xf3\xda\x62\x38\xf9\xf6\x34\x52\x65\x48\x54\x17\xaa\x6e\xbb\x03\x03\x6e\xda\xed\x7a\xea\x08\x2b\x67\x18\x0f\x49\x4b\xc1\x14\x24\x1b\x98\xd0\x9b\x60\xee\x54\x7d\x11\x23\xd3\x37\xab\xfd\x90\x1f\x52\x75\x1b\x2c\x80\x19\x23\x51\x34\x61\x61\x41\xd6\x4a\xd0\xb4\x63\x31\xb6\xb1\xd2\x8e\x85\x7c\x41\x09\xe2\xeb\x9e\xf2\xb8\xda\xe3\x79\xc3\xa9\xe6\x88\xcf\x76\xec\x4d\x62\x83\xed\x91\x41\xd6\x6b\x4d\x75\xe7\xec\x21\x5b\x5d\x01\xcb\x16\x56\x6b\xc8\xa9\x33\xf7\x23\xf4\x61\xe8\xe0\x18\x68\x5a\x56\x14\x8c\x47\xfd\x74\xaa\x18\xe6\x7d\xf2\x95\xc2\xd5\x77\x43\x66\x34\xf1\x46\x74\x88\xfb\x45\x9b\xab\x8d\x58\x77\xf8\x55\x7d\xf4\xbf\xe2\xfa\x50\xaf\xcd\x3d\x30\xdd\x29\xbb\xff\xcc\xcc\x0f\x9f\x6b\x4c\xad\xc5\x42\x48\x9e\x32\x27\x9b\x3d\xce\xe8\xc8\x15\xfd\x49\x0f\x48\x07\x5b\xfb\x4d\xd0\xbe\x5c\xa5\x68\x7c\x75\x77\x67\x82\x85\xa4\x09\x96\xf5\x35\x5e\x5d\xde\x5a\x45\x82\x6f\x91\xf8\xd7\x8c\x8a\xf2\x72\xe3\xb3\x35\xf3\x66\x74\xf6\x7a\x25\xb7\x8c\xc3\x5c\x5d\x32\xcc\x44\x72\xc9\x90\x93\xc8\x11\x34\x99\xdc\x9b\xfe\x4b\x28\x40\x90\x59\x2d\x2c\x79\x7a\xe1\x42\x6e\x1b\xe6\xf9\xdb\x9c\x36\x4b\x11\xa8\x3f\xe3\xd8\x3a\x54\x7d\xcc\xd1\xc8\x9b\xa1\xcb\x56\x68\xcf\x2b\xf6\xf8\xbb\x2a\xa7\x61\x0b\xe6\x7c\xce\xf5\x3a\x9f\x8f\xef\x76\xb1\x36\xc3\xa7\xf5\xf4\xc7\x53\x5c\x69\x94\x9f\xdd\x85\x8f\xcf\x29\xde\xec\xfa\x71\xf2\x23\x19\xd8\x82\xaa\x59\xd0\xb3\x65\xe8\x62\xba\xb3\x05\x13\x65\xe7\xda\xc1\xe7\xe3\xdb\x21\x58\xb7\x4e\xc1\x16\x5a\x4c\x2e\xd4\x0a\x93\xd6\xb1\x93\x0b\xfa\x66\x09\xcd\x8c\xba\x15\xb1\xc7\x11\xf4\x72\x70\x4c\xc2\xa1\x22\x8a\xd1\xef\x17\x93\x63\x81\xc9\xd8\xd8\x87\xd8\x14\x2f\x63\x02\xc1\xce\x2d\xe4\xd1\x6e\xd6\x72\x61\xda\xbd\xeb\xf8\x1f\x63\xeb\xec\x47\xba\x20\x47\xe3\x01\x5b\xd0\x78\xc9\x50\x75\x2b\xd9\x07\x15\x00\xf8\xb5\x61\x68\xc9\x4f\xdd\x95\x4f\x05\xa4\xd5\xb5\xfb\xd8\x04\x16\x30\xdf\xd2\x48\x9c\x71\xa8\xbd\xe5\xdd\x61\x92\xfa\x91\x46\xb5\x36\x0d\xdb\x90\x0c\x2d\x55\xaf\x78\x1d\x05\xce\x02\x04\x19\x98\xee\xf0\xc2\xfb\xde\xda\x48\x12\xb0\x2c\x65\xa3\xf1\xa9\x8e\xe2\x2b\x2e\xa8\x1a\x90\xd3\xa9\xd2\x9b\x55\xca\xdc\xf5\xb9\xad\x2c\x65\x3d\x24\x27\xe6\x15\xef\x6d\xf2\xfb\xaf\xb2\x2a\x5f\x36\x8c\x65\xca\xf8\x5d\x61\xad\x94\xa2\x67\x86\xb9\x4c\x59\xc7\x61\x2f\x99\x3c\x23\x0c\x86\x56\x76\x2e\xe6\x9b\x79\x69\x4e\x74\x57\x55\x4a\x2a\xe4\x8c\xfc\x25\x4f\x15\x37\x02\x9e\x19\x00\xfd\x96\x6f\x6c\x4c\x69\xbf\x4d\x23\x25\xf4\x04\xdd\xc9\xd5\xd5\xdd\x5d\x7a\x2a\x96\xde\x0e\xfc\x85\xb5\x73\xcd\xe9\xef\x05\xfc\x72\xd1\xf1\x82\xdf\x25\x9e\x12\xbd\xdc\xbd\x30\xa9\x62\x63\x3b\x11\xb3\x88\xfc\xcd\x91\x59\x24\x5e\x1e\x9c\x48\x70\xbc\xa7\x33\x87\x2e\x53\xdc\x9e\x2a\x43\xa2\x0b\x49\xb5\x78\x0b\x68\x1a\x30\x21\xd1\x30\x34\x20\xe8\x41\x4c\x52\x25\xc0\xeb\x91\xf8\xeb\x7d\x17\x8d\xc9\x87\xdd\x44\x7c\x2c\x5a\x47\xf6\x33\xc5\x1f\x86\x96\xe9\x13\x77\x7e\xba\xa8\x79\x77\x6f\x30\x54\x6b\xb1\xb5\x0e\xf4\xe5\x4b\xd8\x82\x7f\x42\xf0\xd7\xaf\x79\xac\x92\x8a\x07\x46\xfb\xcf\x91\x1d\x0b\xf0\x8b\xd8\x34\xc6\x3e\x66\x70\x17\x60\x66\x53\x4a\x5e\xe1\xbe\x40\xe3\x4a\xde\xb3\x50\x30\x92\x16\xe9\xc2\xce\x89\xa5\x79\xfb\x03\x2e\x13\x4d\x73\xa4\xfc\xae\x78\x5a\x52\xd9\x33\x23\x6a\x8e\xb4\xe3\x98\x9a\x56\x20\x23\xaa\x46\xf6\x84\x5c\xd0\x57\x03\xff\x0c\x43\x2a\x9c\x44\xf9\x7d\x7f\x4e\x6a\x56\x34\xf0\x66\xc7\xd0\x44\xda\x83\xe8\xc4\xf6\xe2\x64\x01\xe9\x69\x44\x5a\x82\xf6\xaf\xa4\x58\xf6\x96\x07\xfa\x1b\xd0\x8c\x35\x48\x9a\xb6\xb4\xb7\x4e\xc2\xb3\xd1\xec\x94\x87\x2b\x60\x0b\x29\x8f\x9c\x54\x2b\xed\xb1\xa5\x2e\x74\xc1\xde\x98\x20\x69\x86\x8d\x26\xbf\xfe\xf5\xf7\x61\xec\xf2\xcf\x7f\x93\x46\x2f\x7f\xfd\x1d\xb7\x39\x58\x19\x29\x93\x61\x07\x5e\xba\xa1\x83\xcc\xb1\xd0\x81\xd7\x31\x1b\x5f\x33\x75\x05\x78\xd1\xd8\xe8\xb2\x3b\x63\x5d\x31\x05\x7d\x01\xe2\xd9\x58\x34\xb4\x3a\x96\x70\xb8\x2d\x80\x9c\x9f\x6e\xf9\x73\x7f\xaa\x1c\xb4\xb6\x60\x0b\x57\x91\x2e\xc2\x6b\x6e\xee\x7e\xb9\x9c\xdd\x61\x63\x76\x92\x31\x4f\x1a\x9e\x91\x0a\xcf\x92\x96\xcb\x23\x2e\xa7\x44\xc1\xcd\x73\x99\x4a\x65\xe6\x1f\x45\x94\x4c\x8d\xb4\x17\x53\xb3\xf0\xfe\xc3\x4c\x45\x73\xc2\x42\xb2\xaa\x75\xc1\x16\x20\xc5\x30\x73\x56\x8b\xa0\x3a\x33\x61\x72\xd4\x4b\x61\x99\xb5\xea\x52\x84\x6d\x9b\x1b\xb3\xa3\x09\xd4\xe6\x26\xfd\xa3\x95\x17\x37\x40\x8f\xa1\x2f\x57\x08\xaf\xea\xaa\xad\x0a\x1a\xef\xed\x82\xf9\x6e\xbd\x6a\x57\x37\xd0\x15\x0a\x23\xf4\x37\x98\xfc\x06\x63\x10\x52\xb9\x43\x2b\x77\x38\xf5\x1d\xc6\x50\x9c\x26\xaf\x61\xf4\xea\xeb\x8f\x62\xdc\x51\xde\xfb\x75\x43\xc4\xaa\xe2\x8e\xb7\x0d\x55\xce\x96\x44\x93\x04\x55\x46\x12\xc6\x6f\x2c\xb0\x8f\x32\xbc\xaa\x1f\xfd\xa2\x22\x53\x1e\x8e\xc3\x78\xa5\x8c\x3c\x9c\x17\x64\x99\x8f\xcf\x4b\x65\xca\x20\x70\x02\x43\xcb\xc8\x20\x78\x2f\xa6\x05\xa3\x6b\x77\x3d\x33\x53\x04\x89\xc1\x68\x29\x35\xc8\x40\x84\xdf\x83\x15\x10\x51\xc1\x11\xa2\x8c\x08\x8a\x5f\x19\xb2\xaa\xec\x8a\x6b\x51\x41\x48\xb4\x94\x88\x4a\x44\x0b\x7f\x1b\x73\x01\x39\x14\x4e\x62\xe5\xe4\x38\x95\x2e\x2c\x16\x26\x58\x08\xb6\x61\x66\xfb\x14\x0d\x23\x30\x5d\x86\x3d\xed\xb2\xf7\xe6\x2c\xf9\xad\x6c\x66\x73\x47\x29\xa4\x54\x55\x23\xb0\xcb\xde\xaf\x05\x37\x53\xcd\x16\x40\xd0\x54\x29\xeb\x20\x48\x58\xc0\x3e\xf5\x71\x3a\x80\x6c\x41\x34\x49\x97\xd3\x04\x8d\x54\xb4\x9f\x6c\x7a\x3f\x9c\xcd\x92\x84\xc0\x14\x81\x97\xaa\x11\x04\xf3\xd4\xd9\xa7\xe8\x99\x35\x8e\x20\x28\x45\x96\xd3\x04\xe7\x15\x75\x1b\xfc\x88\xc0\x58\x69\xbc\xa2\x02\x2d\xb3\x6b\x44\x10\x02\x41\x4a\x75\xc2\x08\x11\xac\x9d\x04\x73\xda\xdb\x1c\x35\x48\xaa\x5c\x37\x8f\x90\xbc\xaa\x2f\x80\x65\xf3\xc7\xb3\xe6\x39\xa2\x28\xba\x52\xae\x46\xa8\x48\xb8\x76\x97\x27\x84\xec\x60\x82\xa0\x30\x8c\xe1\xbe\x90\x94\x58\x9b\xb9\xd6\x5e\x36\xd8\x1e\xad\xb7\x07\xe8\x91\x1b\xe8\xaa\x59\x9b\x77\x9a\xe4\x88\xc3\xfb\x5c\x9b\x1d\xd4\x7a\x5c\xa3\x4a\x61\x28\x83\x63\xe4\x13\x31\xe0\xea\xe3\x51\xb7\x39\xeb\x50\xcd\x6a\xb7\xd6\x1b\x76\xdb\x8d\x3e\x3e\xa6\xd8\xc7\xd9\xc3\x34\x6e\xa1\x54\x21\xa8\x23\x84\x21\x66\xd5\xc1\x23\x43\x3c\xe2\x33\x86\x6d\xcd\x67\x23\x74\xda\xe9\xa3\xd3\x3e\x5e\x9d\x36\x5b\xd3\x21\x85\xb3\xd3\x41\xa7\xcf\xa1\xc3\xd6\x03\x3e\x1b\xb5\xfa\xed\x11\xd7\xe9\xb4\x8e\xaa\x21\x55\x08\xe6\x08\xa9\x8e\x06\x8f\xad\x76\x17\xad\xb5\xb1\x06\x37\xc4\xab\xf3\x6e\xa3\xc7\xd5\xbb\x8d\xfb\x29\x37\x98\xa2\xad\x47\xec\xa9\xd7\x18\xb7\xfa\xdc\xb4\xc6\xf6\x99\xf1\x8c\x1a\xd6\xa8\xfe\x1c\x6d\x5d\x9d\xba\x6d\xc3\x19\xc5\xe5\x54\x83\xbf\xd5\xed\xb0\x4b\xf5\xbb\x05\xb2\xb7\x34\xdc\x40\xd8\x0d\x64\x9b\x1b\x50\xc0\x39\x8e\x37\x2b\x94\x19\xde\x95\x59\x20\xbf\x88\xa6\x91\xa4\xe4\x06\x42\x6e\xbc\x7d\x4e\xf9\x8a\x26\x2d\x90\x9f\xda\x08\x82\x45\xf2\x50\x1b\x40\xd0\x4a\x05\xa7\x61\x82\xae\x10\x2e\x2a\xc7\x99\xfe\xf9\xec\x75\xe3\x9f\xef\xa0\xcf\x34\x4d\x7f\xa7\x9d\x17\x0c\x7f\xbe\x81\x3e\x1f\xb6\x6d\x38\x0f\x75\xc1\x56\xdf\xc0\xe7\xff\xa6\xb9\x6a\x5c\x1e\x1a\x93\x87\xba\x7f\xbf\x4e\x5e\x5c\x3f\xcc\x55\xd1\x49\xca\x8b\x33\xa8\x10\x15\x9a\xc6\x2a\x64\x85\x76\x0b\xc3\x2e\x5e\xcb\x76\x06\xd1\xfa\x82\x17\x05\x4d\xd0\x25\x17\x1c\x02\xc3\xf0\x77\xd8\x7b\x15\x87\x88\x45\x25\xa0\xc7\x35\x10\xe1\x7b\x09\x93\x84\xe5\x39\x16\xf1\x54\x7a\x07\xea\x62\xe9\x08\x44\x6e\xa0\xcf\x9e\x47\xf1\x2f\x60\xe7\xc8\x38\xb5\x9b\x2c\xe5\x18\x2e\x2a\x1c\xa5\x7c\x3f\xfc\x55\x76\xf6\x25\xfc\x72\x3b\xc7\x34\x2a\x66\xe7\x13\x23\x85\x87\x2a\xa7\x1f\x49\xda\x60\x72\x6a\x3f\x12\x6c\x32\x09\x47\x20\x54\xae\x10\x12\x8e\x93\x98\x88\x0a\x24\x8d\xa2\x14\xa0\x64\x0a\x43\x28\x45\x21\x08\x94\x12\x01\x29\x23\x18\x41\x55\x08\x80\x2b\xb0\x28\x28\x14\x49\x50\x34\xc0\x15\x54\x91\x65\x0c\x11\x05\xc2\x19\x31\xc0\x94\x24\xe0\x40\x12\x51\xbc\x22\x28\xa8\x82\x91\xb4\x84\x0a\x98\x50\xa1\x29\x8c\x04\x38\x09\x04\x14\x87\x31\x42\x56\x70\x19\x88\x88\x42\xe3\xb4\x2c\x61\x08\x26\xd3\x84\x42\x0a\x94\x44\x48\x5e\xc7\x8a\xc4\xc6\x1e\xe4\x1d\x46\xdc\xa1\x48\x7c\x48\xe2\x7d\x8d\x7e\xa7\x2b\x14\x8c\x50\xb9\x4f\xfd\x8e\x04\xa9\x54\x2a\x37\x10\x42\x3a\xf5\x79\xf4\xba\x81\x30\xe7\x1f\xe2\xff\x0b\xbe\x44\xf6\x6f\x1c\x68\x0c\xc3\x30\xb5\x77\xa5\x33\xb1\xac\x17\xf5\xad\xfb\x21\x48\x9d\xe7\xd7\x7b\x09\x25\x9a\xa4\x3a\xac\xcf\x95\x09\xb0\x14\xed\x1e\xab\xb3\xb4\xa6\x08\xfa\x56\x12\x09\x06\xc3\x5f\xdf\x5a\x95\xeb\xe6\xee\x6d\x53\x95\xb5\xb1\xd4\x03\xd6\xe2\xde\x5c\x73\xa3\x77\x4b\xa4\x5f\xe9\x49\x8f\x41\x71\x49\x7d\x85\x1d\xd6\xcc\x7c\xf0\xd0\x1b\x0f\x99\xfd\x4b\xc3\x14\xee\x4d\x79\x92\x1f\xab\xdb\x41\xb3\x56\x21\x9f\x5f\x31\xb9\x4d\x74\x3a\xd3\xed\x93\x64\xac\x51\x71\xfe\x71\xdb\x69\x3d\x52\xfd\xed\xed\xa8\x2f\xbd\x32\xab\xfe\xc8\x68\xaf\x7a\xe8\xfd\x53\x95\x78\x7d\x9d\x8e\x09\xee\xa5\xf2\x8c\x74\xd0\xeb\xe5\x04\xab\x48\x7a\xbf\x3b\xe7\xc0\x06\x7b\x77\x38\xf7\x38\xbc\x2b\x7c\xac\xd1\x90\x30\x86\xb5\x98\x84\xd7\x13\x33\x47\xf0\x21\xc3\xd4\xe1\xfb\xa4\xc7\xff\xd3\x2f\xcf\xa9\xe0\x94\x76\x1f\x6f\x0a\xe8\x65\xdc\xf8\x8a\xc4\x64\xba\xa2\x10\x18\x09\x00\x59\x91\x11\x11\xa5\x44\x42\xac\xd0\x0a\x8a\x09\x0a\x81\x21\x88\x48\x11\x24\x2d\xa0\xb8\x22\x28\x08\x0e\x63\x82\x0c\x8b\x04\x2a\x92\x18\x26\xc2\x94\x08\x68\xfa\x6a\x1f\x5d\x8f\xbd\x1a\x4e\x76\x76\xec\x3b\x0c\x63\x14\x9d\xd2\x14\x42\x4f\xbd\x00\x82\x13\x34\x9a\xd1\x12\xd0\x82\x2d\x01\x1d\x3c\x3d\x23\xdc\x86\x30\x60\xf1\x9e\x9a\xe1\xfa\xae\xff\x36\xdd\x36\xb1\x87\xb5\xf1\x72\xfd\xd6\x60\xfa\x76\x0d\xe9\xa0\x3d\xaa\x4a\x91\x4f\x53\xd0\x98\x2d\xb1\xeb\xee\x23\xf6\x38\x69\xbd\x2c\x45\xd2\xbe\x9e\xab\x2f\x13\xbc\xc2\x74\x1e\xa6\xe6\xf2\xba\xcd\x69\x58\xef\x91\xe6\x38\x7b\xea\xd6\x9c\xdb\x12\xdc\x77\xed\xfd\x3f\xc6\x75\x56\xeb\xf0\xf9\x9d\x19\x0c\x5f\xbc\x9a\x7e\x9f\x71\x4f\x4a\x9b\x98\xed\x1a\xb3\x2d\xba\xa2\x26\x06\x37\xac\x2d\x1f\x9f\x88\x8f\xd7\x86\xf9\x6e\x2c\xd0\x67\xf8\x65\xfe\x3a\xe4\xba\x8c\x69\x73\xe8\xa4\x8f\x76\x1b\x0c\x3d\xd1\x9b\x6f\xf6\x78\xfe\xf1\x30\x1f\x34\x2d\xb6\xc3\x3d\x7f\x90\x1d\xd0\x5b\xde\xf7\x19\x4d\x98\xcf\x64\xfc\xcd\x6d\x29\xed\x84\x96\x52\x6f\x27\x79\xdb\xff\xf3\x96\x82\x16\x6f\x29\xc8\x65\xbc\xdc\x5d\xf8\x70\x86\x0b\x4e\x78\x45\x68\x0a\xfe\x06\x23\xdf\x60\x04\x82\xe1\x3b\xf7\x2f\xd5\x9b\x11\x0a\x21\x33\x1f\x3a\x11\x03\x47\x69\x9c\x26\x29\x94\x26\x33\x5c\x3d\xd9\xd1\x3d\x44\xff\x76\x9d\xa4\xbf\xaa\xf3\x8e\x8a\xef\x6e\x77\xe3\x4e\x95\xaa\xeb\x75\xba\x85\xc2\xdb\xe7\xea\xb5\x05\x2f\x6c\xeb\xbd\xfd\xfe\x81\xcc\xe5\xf1\xec\x51\xa8\xde\x0b\x8d\x85\x43\xcf\x26\xf8\x70\xf2\x2b\xf0\x61\x86\xa9\xbe\xfc\x06\x45\x2e\xfa\xba\xf2\x7c\x29\x7f\x3c\x55\x60\x67\xe1\xa9\xc3\xab\x94\x05\xa3\xd4\xac\x2d\xa5\xc1\xe5\xb0\x39\x4a\xc6\x4e\x63\x13\x4b\x60\xb0\xd3\xb8\xe0\xb1\x44\xeb\x34\x2e\x44\x6c\xd0\x7d\x1a\x17\x32\x96\x2a\x5c\x66\xa7\xe5\x45\xa6\x11\xb2\x97\x01\x6f\x20\xb2\xe8\xf4\x49\xca\x7e\xc3\xb3\x3d\x36\xe4\xa5\x11\x17\xdd\x7f\xc0\xdd\xd1\x54\xc5\x4d\x85\x54\xdd\x36\xce\xca\x7b\x9c\x2c\xcd\x9b\x42\x3a\x33\x4d\xfd\x05\x73\x81\x09\x26\x09\x7b\xf8\xfe\x7d\x25\x94\xee\x2a\x1b\x5d\x06\xa6\xab\xcb\x89\xf3\x79\x97\x32\xc9\x0d\x54\x24\xf7\x3e\x73\xe2\xb1\x8c\xd9\xfc\xc6\xb8\x7f\x8f\xff\x52\xb3\x9d\xe1\x90\xbf\xde\x6c\x39\x4d\x3b\x61\xdf\xeb\x19\x0b\xdf\xa5\xb6\x00\x9e\xda\x7d\xa4\x6e\x1d\x48\x0c\x79\x78\x7a\x7c\xc8\x65\x84\xc6\x18\xa5\x05\xbd\x5c\x46\x58\xb4\x09\xa7\x85\x9a\x5c\x3e\x78\xac\x2b\x38\x95\x4f\xac\x6d\x9c\x8c\x87\x8c\xf2\x49\x0f\x7e\x65\x77\x0b\x5e\x22\xfc\xe5\x6d\x0e\x29\x11\x00\x53\xb7\x06\x5e\xc0\x87\xc3\x0b\xee\x18\x2e\x02\x1a\xa7\x48\x54\x96\x71\x91\x52\xe8\x8a\x42\xe2\xb8\x0c\x50\x98\x42\x29\x4c\x41\x04\x04\xa3\x15\x02\x13\x80\x22\xa1\x02\x02\x80\x48\x22\x95\x0a\x89\x20\x15\x49\xa0\x2a\x28\xa5\x5c\xed\x27\xad\x4f\x8e\x4f\xa1\x7c\x1d\x0b\x12\x95\xf4\xc9\x2e\x14\xc1\x32\xa6\xc2\xbc\xa7\x91\x16\xe4\x65\x38\x1d\xf2\x19\xa8\xd8\xf3\xca\x68\x57\x26\x4d\xad\x7e\x0b\x16\x12\x46\x0d\xe6\x76\xab\xd3\xf9\x98\x3d\x54\xde\x1f\xd4\xa7\xaa\x50\xdb\x10\x5d\xa2\xc7\xb8\x19\x02\x13\x24\xe0\xd5\xd8\x00\x3c\xf4\xd9\x4d\x3b\x98\x3e\x5a\xbb\x65\xfa\x38\xf1\x58\xad\x63\x76\xeb\xa1\xd1\x47\x46\x18\x03\xf7\xc0\xcb\xa0\x72\x3f\x22\x75\x0e\x61\x68\x30\x53\xe5\x5d\xdb\xcf\xfa\xdd\x97\x40\xbd\xbc\xbd\xbc\xbb\xec\x7a\xb7\xf5\x4d\x83\x46\x2d\x7b\x68\xc0\xcf\x43\xc5\x36\xd9\xcd\xdb\x68\x64\xa2\x8d\x47\x5b\xa8\x2c\x6e\xeb\xf4\x4c\x5c\xcd\xa6\xf7\x1f\xea\xb4\xf2\x4c\x3d\xdd\x8e\x3b\x68\x73\x79\x7b\x6b\x2e\x00\xfc\x0c\xcf\x87\x95\xdd\x8b\x88\xd5\x2b\x5d\x9d\xfe\x50\xd6\xe6\xa0\x43\x4d\xae\xa7\xbb\x0f\x66\xf8\xf3\xe7\x55\x38\xbb\x6b\x86\xb2\xa2\xc3\xdb\x50\x86\x7f\x3f\xad\x5d\xf7\x25\xef\x7d\xa8\xec\x70\x4f\x56\x77\x3f\xbf\x1f\x4a\x98\xaf\x1c\xd9\x05\x7d\x61\xf1\xbc\xed\x09\xd3\x01\x4d\x56\x3f\x14\x8b\x06\xb0\x64\x98\xdc\xd3\xfc\xa3\x3a\xbb\x7f\x69\x18\x9d\x40\x4f\xa6\xf6\xc0\xbc\x3d\xeb\x71\xb1\x47\x2f\x36\xed\x41\xf5\xc2\xf2\xe3\xf5\x5a\x48\xbe\x57\xc8\x75\x91\x5a\xe8\x19\xf5\xd8\xad\x30\xd4\xb3\xb6\x60\x07\x00\x96\xa7\x53\xea\xa1\x25\xd5\x87\x5b\x72\x78\xfb\xae\xb5\x5e\x25\x6c\x5a\x47\x08\xe1\x1e\x6b\xab\x88\x6b\x4f\xc7\xd6\x7e\x25\x2c\x32\x4c\x91\x9a\xc8\xba\x18\xeb\xa7\xcb\x1f\x1b\x8d\x0a\x90\x4e\x97\xdf\x8b\xc9\xaf\x6d\x0c\xcc\xb0\x71\xe2\xb5\x36\x60\xb7\xeb\xe1\x2d\x66\xb4\xb8\xeb\x0f\x84\x1a\xed\x54\x0b\xd1\x94\x5e\xe3\x71\x35\x9c\x2d\xcc\xcd\xf8\x7a\xc2\x30\x51\x5f\x5b\x30\xe9\x36\x4f\x95\x1f\xf2\x9f\x12\xed\x7a\xef\xd3\x0b\x26\xa1\x0e\x4f\xd1\xe1\x92\x75\x78\xae\x0d\xcb\xc8\xf7\xda\xf7\x3f\xbf\xaa\xe3\x71\x07\x90\xee\x66\xe0\x60\xf6\xcb\xfb\xef\x04\x3e\xb7\x83\xcf\x8f\xfd\xa1\x08\x25\xa2\x02\x8a\x52\x12\x46\x4b\x24\x2e\xe0\xb8\x22\x51\x82\x28\xe3\x12\x4d\x56\x10\x1a\x27\x48\x05\xc6\x68\x9a\x86\x49\x19\x41\x25\x9c\x22\x65\x0a\x16\x71\x18\x15\x15\x59\x44\x69\x52\x26\x05\xcc\x9b\xf4\x43\xce\x19\xd3\x7a\xab\x36\xe9\x81\xc9\x9d\x7a\xa6\xb1\xf4\xd9\x3a\xe7\xe9\x61\x62\xda\x1b\x49\x79\xbe\xd8\xec\x56\x5a\xc3\xb7\xe1\x8b\xd8\x41\x5b\x0c\x36\x7b\x78\x1e\x99\x9d\xd5\xf3\x1c\x86\x95\x66\xc5\xea\xb6\xa9\x15\xcc\x8e\xde\xef\x67\xb7\xcc\x1c\x63\xf6\x71\xc9\x7d\x65\xc4\x25\xef\x75\x42\xff\x18\x9e\x0d\xab\x3e\xbc\xbd\x37\x68\xe7\x11\x5b\xb7\xb1\xce\xfb\x4a\x18\x6c\x06\x72\x63\x3c\xdd\xca\x4c\x03\x88\x64\x7f\x08\xec\xdd\xb0\xd3\x9e\x09\x1f\x9a\x38\xee\xf5\x96\xab\x56\x87\xeb\xd6\x71\xeb\x75\xc9\xbe\x4e\x9f\xa4\xe1\x00\xd6\xae\xe7\xb7\xfd\xf5\xb5\x61\xcd\x56\x1c\x79\xdd\x98\x3e\x8a\xd6\x07\x45\x0c\xd1\xe7\x26\xfe\xd6\xeb\x15\x88\x4f\x11\xa7\x8d\xc6\xa4\x90\xce\xde\x5a\x0f\x13\x6d\xcf\x55\xf5\xb6\x0a\x77\xe1\xfb\xe6\xce\x5e\xbe\x73\x88\xf6\x08\x0b\xbb\xb5\x81\xd0\x5c\x6b\xfb\xd6\xad\xed\xfa\x84\x5d\x65\xa5\x9a\xa7\x23\xb6\xb0\xcd\xbe\xfe\x78\x5b\xc1\x0f\xe5\x53\x62\x54\x76\x7b\x3e\x43\x7e\x63\x32\xab\x5a\x67\xc8\x67\x98\x7f\xaf\x3f\x0b\x8d\x17\x0e\x7d\x6b\xf5\x74\x5b\xf4\xf5\xa7\x4c\x35\xf3\x6c\x71\x6e\x5d\x38\xbe\x70\x2d\xc5\xf8\x95\xb2\xc5\x3f\x94\xbc\xb3\xee\x57\xcf\xd4\x33\x36\x9a\x6a\xbd\xf9\xb0\x3a\x5f\x5d\x3f\xbf\xb4\x4c\xe9\xa5\xa6\x36\x56\x16\x31\x83\x9f\xeb\xed\xa7\xe5\xee\x79\xfc\x7e\xdd\xed\x18\xa3\x8e\xd6\x9c\xb3\x75\xfa\x5e\xd1\x6e\x3f\x5e\x95\xd7\x6e\x63\xfd\x0c\xde\x96\x0f\xcd\x26\xd5\xbb\xbe\x9e\x72\xc6\x76\xd3\xfd\xa8\x33\x97\xee\x5b\x31\x52\x04\x14\xac\x88\x14\x55\x41\x15\xba\x02\x23\x92\x2c\x01\x59\x42\x50\x98\x04\x28\xa2\xd0\x34\x4a\x63\x12\x4d\x57\x48\x58\x40\x08\x80\xe3\x88\x82\x53\x38\x4d\xe1\x94\x00\x0b\x18\x25\x88\x87\x45\xbc\x33\xfa\x56\x34\xb7\x6f\xc5\x11\x84\x4e\xef\x5b\xfd\xa7\xe1\xac\xf0\xdc\xbe\xb5\x16\xab\xd4\xa3\xbe\xb5\xe4\x98\x3f\xa3\x6f\x65\xb0\xed\x4c\xdc\x0e\xfa\xa2\xfe\xd4\x53\xab\xcd\x46\xa7\x7b\x3f\xdc\x28\xf7\xdd\xc5\x66\x62\xb5\xee\xb7\x3b\xc6\x1a\x0c\x88\x06\xfd\xf4\x4c\x90\x88\x30\xd7\xdf\xb8\xdb\xd6\xc3\xe8\x5e\x6c\x58\xac\xa4\xda\x4d\x71\xa1\xd2\xf2\xec\x41\xee\x8c\x1e\xdf\x56\x0f\xb3\x9a\xfa\xd1\x96\x57\xdd\x76\xfd\x7f\xab\x6f\x3d\xb7\x6f\x3b\xb3\x3d\xbf\x52\xb7\x93\xba\x74\xc1\xbe\xf5\x77\x8e\xf7\x13\xfb\xd6\x7f\xa9\x6f\x63\x2e\xd4\xb7\x9e\x1a\x67\xfd\xbe\x95\xab\x3c\xac\x2a\x93\x8f\x15\x81\x4e\xda\x8b\xd1\x72\xac\xee\xa6\x5d\x7d\x37\xc6\xbb\x2f\x54\x75\x27\x49\x8b\x6e\xfd\xe3\x7a\xa4\xcc\x1e\xaf\x81\x3d\xd3\x08\xea\x43\xd9\x22\xd3\xf1\x6c\x2b\x56\x5b\x6d\x73\xb4\xc2\xdb\x6f\xf3\x07\x6d\x3e\x7e\x99\x75\x09\xed\x61\x61\x58\xbb\xd6\x93\xba\x63\xde\x8b\xf5\xad\xa9\xc7\xbe\x1d\x9f\x8a\xbe\x3f\x81\x35\xf8\x01\x74\xd9\x1f\x2e\x85\x38\x7a\x27\x34\xd6\xeb\xe1\x9f\x53\xc7\x05\x42\x83\x51\xbb\xc7\x8c\x1e\xa1\x0e\xfb\x08\x7d\x51\xe5\xbc\x93\xd9\x92\x4f\x89\x3f\x1b\x75\x8c\x6b\x12\xf2\x24\xc1\xb9\xe8\x63\x3f\xb9\x3b\xed\x94\xfd\xb3\xb5\x8b\x8a\x4d\x52\xee\x24\x60\xd0\x94\x6b\x0f\xa7\x2c\xf4\xe5\x40\x7e\x13\x3a\x82\xec\x26\x72\x60\x58\x49\xd3\x5c\xa6\x5a\x4b\x2b\x5e\xaa\x52\x53\x56\x3c\x73\x96\x15\x2f\xab\x59\xb2\x90\x2c\x4d\x33\x60\x15\xd6\x3c\x75\xc2\x3b\x77\x4e\xf9\xb2\xda\xa7\x89\xc9\xd2\x3f\x13\x5a\xae\x05\xa2\xb7\x9c\xf8\x8a\xb8\x37\xa2\x14\xfb\xf5\xbb\x77\x79\x4a\x84\x0b\xd4\xe7\xe2\x8d\x61\x3a\x6e\x73\x4d\x48\xb4\x4d\x00\xc2\xad\x2b\x1d\x8d\x7f\x41\xcb\xd9\x78\xfc\xc3\xfd\x0a\x21\x4a\x69\xd7\xa1\xcb\x65\x4e\x85\x73\x60\x11\x46\x12\xc9\x06\xa2\x78\x3c\xe2\x9b\xa3\xdf\xe2\x27\x81\x73\xaf\xc7\x39\x03\x99\x7b\x24\x41\x21\x58\xf1\x83\x0c\x92\xd0\xf8\x77\xfa\x9c\x81\xc7\xe3\x50\x0c\x51\xec\x94\x84\x9b\xe3\x03\x11\x12\x9b\x7c\xf8\x92\xa2\xf2\x48\xfd\x28\xe1\x01\x8e\xb1\x0b\xc3\x0e\x36\x7b\x47\x10\x27\x9d\x0d\x74\x13\x9c\x03\x94\x06\xf6\xf0\xeb\xeb\x33\x61\xaa\x72\x61\x80\x87\x83\x50\x6e\x12\x0f\x34\xca\x01\x1d\xdc\x2b\x75\x09\xdc\x3e\xaf\x30\xf4\x94\x50\x75\x92\x26\xc9\x0a\x04\x57\x68\x5d\x42\x01\x9f\x57\x8a\x4f\x9f\xa8\x42\xf4\x54\x9b\x63\x25\x42\x17\x86\x9d\xda\x1a\x43\x3c\x4e\x35\x7e\xb6\xa1\x63\x37\xa0\x9d\x6b\xeb\x28\xbb\x30\xe4\x60\x5b\x69\x04\x63\x32\xa2\xe3\x5b\xdc\xce\x87\x75\xc4\xb3\x58\xf7\x96\x04\x30\x74\x1f\xdd\xc9\xd5\x7a\xe0\x71\xba\x4b\xe6\xb9\x5f\xd2\x4d\x7b\xa7\x03\x3e\x66\x16\x43\x2e\x83\x18\xce\xd8\x19\x67\xd9\x00\xbd\xab\x03\x2f\x02\xcf\x65\x55\x08\x5c\xf0\x0b\xe5\x54\x68\xf1\xab\x10\xcf\xc5\x17\xe3\x97\x07\xf2\xf8\xf0\xb6\x5c\xa4\x97\xb1\x63\x84\x5b\x51\x94\xb9\xd6\xbc\x0c\xb6\x42\x98\xb2\xb1\xc4\xee\xdc\x3c\x0b\x51\x94\x57\xe1\x1a\x0d\x8e\x87\x4b\xc4\x77\x74\x8d\xe8\x59\x08\xe3\xdc\x8a\xb5\x5b\x1f\xe0\xcd\xd1\x89\x76\x37\x47\xa7\x22\xa6\x28\x71\x81\x7e\xdb\xe7\x93\x87\xb8\xe4\xe8\x28\x7e\xfb\xeb\x59\xd6\x2d\x61\xd8\x5c\xbb\xe5\x5f\x6b\x7b\xa6\x41\x73\x05\x44\xf2\xb4\xe0\x07\xec\xd1\xcc\xc8\x23\x2c\x81\xfd\x7c\x3f\xc8\xe2\x9d\x8f\x38\xa1\x95\x65\x5f\x5a\x7c\xaa\x3f\x64\x72\xcd\x1d\xf6\x3b\x44\x39\x40\x13\x6f\x67\xbe\x0c\xda\x24\xd6\xb9\xc3\xb7\xa2\x9e\x1c\xbd\x8e\xfa\xa2\xce\x10\x61\x7d\xca\x78\xb3\xf8\xfd\xdb\x17\x37\xf4\xd1\xc9\xe3\xb9\xf0\x63\x05\x8a\x2b\x13\xbe\x8e\xfc\x57\xd9\x3f\x7c\xd8\x7c\x9e\x26\x21\xda\xe2\x4a\x24\x5e\xcf\xfe\xab\xb4\x49\x3c\x43\x3f\x4f\xad\xa4\x42\xc5\xf5\xdb\xdf\x5e\xff\xab\x74\xda\x1f\x28\x99\xa7\x47\xea\x6c\x57\xce\xad\xfd\x17\x05\x1e\xe7\x9e\x98\x00\x97\x6d\xe0\x51\xa6\xd1\x14\xea\x42\x2d\x3c\x4b\x44\x11\x1d\x72\xf2\xba\x4c\x61\x97\x0b\x5f\xc7\x8c\x0b\x61\xcf\x0f\x62\xe1\x64\xfb\x57\xb8\xcd\x31\xff\x93\x53\x7d\xef\x1c\xab\x20\x90\x07\x33\x8c\xbc\x68\x18\x2f\x27\x5b\x39\x83\x67\xee\x10\xe1\xcb\x97\xe0\x90\xf6\x6f\x7f\xfe\x09\x5d\x59\x86\x26\x87\x56\xd3\xae\xee\xee\x6c\xb0\xb5\xbf\x7e\xbd\x81\xd2\x09\x25\x43\x2e\x46\xe8\xcd\xc5\xa7\x93\x8a\xc6\x66\xb1\xb4\x0b\x89\x8f\x90\x66\x03\x88\x90\xc6\x20\x7c\x85\x66\x2d\x76\xc4\x7a\x4e\x06\xfd\x84\x30\xac\xf0\x42\xb4\x2a\xf3\x4a\x68\x99\xa8\xd1\xf9\x3d\xcb\xd1\xbe\x58\xa8\xd1\x1f\xb1\xed\x26\xb7\x5f\x02\x82\x46\x6c\x83\x1d\xb1\x5c\x8d\x8d\x5f\x93\xee\x3e\xed\x73\xd0\x74\x50\x77\x5c\x66\xc4\x7a\x37\x13\x3a\x5f\xd5\xd9\x2e\x3b\x61\xa1\x1a\x33\xae\x31\x75\x36\xfb\x34\xfd\xd8\x47\x3e\x36\x15\x73\x39\x63\x44\xe5\xe4\x2c\x92\xa5\x21\x89\xda\x27\x3e\x6d\x94\x68\x2c\x7f\xa0\x9f\xb3\xa2\x98\x6a\x09\x3f\x95\xfd\xd7\xed\x10\xc6\x91\x64\x85\x60\x96\x20\xdb\x61\xca\x59\xe0\x78\x52\xe9\x5f\x34\x43\x0a\x98\xa8\x2d\x12\xa6\xc1\x2e\xeb\x14\xf1\x29\x8e\xff\x05\x83\xa4\xbb\xc6\xd1\x1c\x52\x11\xef\x80\x04\x59\x06\x32\xb4\x12\xf4\x8d\xa0\x69\xbb\x08\xd2\xc4\xd8\xe8\xc0\x0c\x0c\xbe\xda\x6c\x81\x0c\xbd\x09\xa6\xb4\x14\xcc\x2f\x24\xfd\xd5\xdf\x11\xe4\xd0\x28\xe0\xd0\x5a\x93\xe9\x7e\x24\x0a\x0b\x8d\x20\x1c\x36\xd1\xab\x46\x4a\x71\x0a\x42\xa5\x8b\xd8\x9b\x9d\x48\x2d\xef\x3b\xc5\xc0\xb0\xec\x85\x09\xc6\xc3\x2e\x24\x0b\xb6\xe0\x34\x36\x48\xde\xac\xd6\x90\x64\xac\xd6\x1a\xb0\x81\x5b\x9b\xff\x17\x00\x00\xff\xff\xa6\x6a\x02\x7e\x2b\x8f\x00\x00") + +func account_mergeHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _account_mergeHorizonSql, + "account_merge-horizon.sql", + ) +} + +func account_mergeHorizonSql() (*asset, error) { + bytes, err := account_mergeHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "account_merge-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x6d, 0x3a, 0x55, 0x79, 0x80, 0xdc, 0x87, 0x4e, 0x2c, 0x66, 0x25, 0xc8, 0x9d, 0x9d, 0xf2, 0x1, 0xc9, 0x76, 0x42, 0x95, 0x6d, 0x48, 0x27, 0xe6, 0x15, 0xbb, 0x4, 0xab, 0x4e, 0xd4, 0xa4, 0x14}} + return a, nil +} + +var _baseCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x69\x93\xa2\x4a\xf7\xe7\xfb\xfe\x14\x44\xbf\xa9\xbe\x61\xf5\x95\x7d\xe9\x9e\x7e\x22\x50\x70\x17\xf7\x75\x62\xa2\x22\x81\x04\x51\x16\x65\x11\x75\xe2\xf9\xee\x13\x82\x0b\x2a\x2e\x55\x56\xdf\xfb\x8f\x98\xf6\x45\x75\x0b\x87\x5f\x9e\xfc\xe5\x39\x27\x37\x3c\xf9\xfd\xfb\x97\xef\xdf\x91\xa6\xe3\xf9\xba\x0b\x3b\xad\x1a\xa2\x02\x1f\xc8\xc0\x83\x88\x1a\x58\xf3\x2f\xdf\xbf\x7f\xd9\xde\x17\x02\x6b\x0e\x55\x44\x73\x1d\xeb\x28\xb0\x84\xae\x67\x38\x36\xc2\xfd\x4d\xff\x8d\x25\xa4\xe4\x35\x32\xd7\xdf\xb6\x8f\x9f\x89\x7c\xe9\x88\x5d\xc4\xf3\x81\x0f\x2d\x68\xfb\x6f\xbe\x61\x41\x27\xf0\x91\x5f\x08\xfa\x33\xba\x65\x3a\xca\xec\xf2\xaa\xa1\x9a\xf0\xcd\xb0\xdf\x7c\x17\xd8\x1e\x50\x7c\xc3\xb1\xdf\x3c\xe8\x6d\x71\x2f\x85\x15\xd3\xd8\x42\x43\x5b\x71\x54\xc3\xd6\x91\x5f\xc8\x4b\xaf\x5b\x60\x5f\x7e\xee\xcb\xb6\x55\xe0\xaa\x6f\x8a\x63\x6b\x8e\x6b\x19\xb6\xfe\xe6\xf9\xae\x61\xeb\x1e\xf2\x0b\x71\xec\x1d\xc6\x04\x2a\xb3\x37\x2d\xb0\xe3\xb2\x64\x47\x35\xe0\xf6\xbe\x06\x4c\x0f\x9e\x14\x63\x19\xf6\x9b\x05\x3d\x0f\xe8\x91\x40\x08\x5c\xdb\xb0\xf5\x58\xc4\x75\xc2\x37\x0f\x2a\x81\x6b\xf8\xeb\x2d\xb8\xa6\xfd\xdc\x11\x00\x81\xab\x4c\xde\xe6\xc0\x9f\x20\xbf\x90\x79\x20\x9b\x86\xf2\xba\x65\x4c\x01\x3e\x30\x1d\xfd\xe7\x97\x2f\x42\xbb\xd1\x44\xca\x92\x20\x0e\x91\x72\x01\x11\x87\xe5\x4e\xb7\xb3\x93\xfc\x3b\x98\xeb\x2e\x50\xe1\xc4\xf0\x7c\x79\xed\xc1\xc5\xcf\x9b\xd2\x9e\x32\x5f\x04\x8e\x1b\x58\xde\x63\xc2\xd0\x5e\x3e\x22\x69\x42\x55\x87\xee\x23\x92\x5b\x3d\x35\x08\x1f\x94\x7c\x40\x4c\x86\x9e\xef\x68\x1a\x74\x0d\x5b\x85\xab\xdb\xb2\x40\x51\x9c\xc0\xf6\x65\x60\x02\x5b\x81\xde\xcf\x2f\x7c\xad\x2b\xb6\x91\x2e\x9f\xab\x89\x09\xe9\x86\x54\x1b\xa5\xd0\xeb\xb8\x6b\x24\x42\xcf\x37\xa4\x4e\xb7\xcd\x97\xa5\x6e\xe2\xa1\x53\xc1\xb7\xf9\x0c\xae\x1f\xc1\xf7\x57\xf7\xa1\x0f\x32\xef\x40\xd5\xe0\x03\x3a\x27\xc5\x1e\xc7\x76\x03\xcf\x37\x0d\x1b\x7a\xb7\x90\x0f\x42\x0f\xe3\x6e\xb5\x80\x51\x34\xb8\x81\x7b\x14\x7a\x1c\xf7\x60\xf2\xb7\x70\x0f\x42\x0f\xe3\xc6\xf2\x86\xad\x39\x37\x70\x8f\x42\x0f\xe3\xce\x03\xd9\x0b\xe4\x1b\x98\xb1\xc0\x7b\xf0\x4c\xc3\x9b\x2c\x02\x18\xdc\x62\x36\x29\xf6\x38\x36\x84\xee\x2d\x5a\xa3\xfb\x0f\xa3\x45\x6e\x7c\x0b\x2e\x16\x78\x18\x2f\x8e\x4a\x13\x08\xd4\xdb\xb0\x27\x72\xbf\x19\x7d\x17\x29\xe1\xe2\xed\xc1\x62\x64\x60\xdf\x00\x97\x81\xfd\xb0\xc2\xbb\xe8\x77\x4b\xd7\xbd\xc8\x7b\x31\xb7\x63\x80\xfb\xb0\x5b\xa9\x1d\x72\x24\x7b\x0e\x9c\x1a\x72\x6f\xcb\x1e\x42\xe3\x3d\xb1\x63\xa0\xbb\x23\x79\x08\x5c\xb7\xe5\x8e\x81\xe8\x8e\xdc\x21\xb0\xdc\x95\x7b\x48\xbf\x63\x40\xb9\x2d\x17\x07\x89\xbb\x32\x07\x97\xbf\x23\xb9\xf5\xe3\xdb\x22\xb1\x6f\xde\x96\x39\x71\x85\xdb\xa2\x32\xb0\x6f\x0b\xec\x4d\xf5\x21\xa9\xad\xe5\xed\x04\xc5\x61\x57\x94\x3a\xe5\x86\x94\x14\x36\xe7\xba\xb7\x30\x77\x12\x9d\x7c\x49\xac\xf3\x17\x58\x3f\xbf\xc4\x63\x63\x09\x58\xf0\xc7\xfe\x1a\xd2\x5d\xcf\xe1\x8f\xdd\x23\x3f\x91\x8e\x32\x81\x16\xf8\x81\x7c\xff\x89\x34\x42\x1b\xba\x3f\x90\xef\xd1\x90\x39\xdf\x16\xf9\xae\xb8\x47\xde\xe3\x7d\x39\x41\x3c\xbd\xb9\x03\xce\x37\xea\x75\x51\xea\xde\x40\x8e\x05\x90\x86\x74\x0a\x80\x94\x3b\xc8\xcb\x7e\x7c\xbb\xbf\xe6\x45\x20\x2f\xe7\x25\xef\xab\xbf\x2b\xf3\xc0\xd0\xdd\xfa\x9c\x70\x29\x35\xba\x67\x7c\x22\x83\x72\xb7\x74\x50\x2b\x39\xa0\x3d\x29\xfe\x88\x72\xa6\xc8\x7b\x2a\x7f\x01\x12\x11\xd0\xac\x65\xe7\xfa\x76\x16\x33\x77\x1d\x05\xaa\x81\x0b\x4c\xc4\x04\xb6\x1e\x00\x1d\x46\x34\x3c\x38\x00\xdf\x8a\xa9\x50\x03\x81\xe9\xbf\xf9\x40\x36\xa1\x37\x07\x0a\xdc\xce\x26\x5e\xce\xee\x86\x86\x3f\x79\x73\x0c\x35\x31\x41\x38\xa9\x6c\xd2\x20\x77\xd5\x8c\x4c\xf7\x58\xc9\xbd\x01\xa4\x11\x1e\x5b\x79\x32\xe8\x7e\xfb\x82\x20\xc8\xfe\x8a\xa1\x22\xca\x04\xb8\x40\xf1\xa1\x8b\x2c\x81\xbb\x36\x6c\xfd\x1b\x45\xff\x15\xb5\x8d\xd4\xab\xd5\x5e\x23\xe9\xed\x83\x36\xb0\x60\x8a\x30\xcb\xa6\x09\x2f\x81\x19\xa4\x49\x63\x18\x7e\x2e\x6e\x02\xcf\xb7\x1c\xd5\xd0\x0c\xa8\x22\x86\xed\x43\x1d\xba\x07\x91\x2f\x7f\x9d\xb7\xfd\xc1\x8b\x9f\xe4\xc2\xfb\x10\x11\xbb\x89\x00\x22\x1b\xba\x61\xfb\xe7\x37\x83\xed\x53\xa6\x01\x64\xc3\x34\xfc\xed\x94\x2f\x16\x8b\xef\x7a\xd0\x34\x6f\xde\x5e\xd8\x81\x95\x0e\x6c\x07\x96\x17\xc8\xd0\xf6\xdd\xed\x53\xe7\x1c\xc5\x32\x86\xad\x99\x60\x3b\xdd\x54\xa1\xe7\xa7\xd7\x25\x16\x9c\x38\x16\x54\x1d\x0b\x18\x76\x8a\x14\x49\x9e\xd7\xd8\x9f\xb8\xd0\x9b\x38\xa6\xea\x21\x3e\x5c\x9d\x6b\xa6\x99\x40\xbf\xa6\x91\x67\xe8\xf6\x76\xa0\xb3\x7d\xec\x81\xa6\x8e\x45\x12\xa3\x81\xc3\xd0\x62\x47\xfa\x5b\x34\xad\x46\xf2\x25\x31\x5f\x45\xbe\x7d\xdb\x37\xc5\x7f\x7e\x21\xe8\x5f\x7f\xdd\x78\xfa\xbc\x55\xce\x71\x2e\x5a\xed\x1e\xe2\x49\x73\x9c\xa1\x9d\x36\xd5\x3d\xa4\x4b\x9b\x38\x83\x4b\x31\x9a\x18\xf3\xd2\x31\xb6\xfd\xdf\x47\x7d\x62\x3b\x64\x8c\xdd\xc1\x76\x54\x98\xf4\x85\x13\x1f\xb8\x2c\xf4\xb4\x7f\xfe\x68\xf1\xa7\x03\xe3\x58\x91\xdd\x35\xe0\x4d\x12\xca\xd0\x17\xe6\x39\x77\xe1\xf2\xae\x90\x1c\x28\x33\xe8\x9b\x86\xe7\xdf\x15\x3d\x8c\xb6\xf7\xf6\x19\x5f\x56\x4c\xc7\x83\xbe\x61\x5d\xf1\xfc\x28\xb0\xa6\xb8\x47\xa2\xcd\x4f\x07\xf5\x07\xbc\xb3\xf6\x3e\x96\x73\xc5\x74\xae\xcd\x0d\x4e\x61\x8e\xb5\xb8\x66\x2d\xbb\xc1\xd7\x47\x5b\x6c\x37\xf1\xfa\x76\x88\x6c\xd0\x7d\x30\x82\xc6\x2b\x2f\x6a\x3a\x8f\x3b\x73\x07\x9e\x07\xfd\x34\x3e\x63\x5f\xbd\x7a\x1b\x58\x5b\xb7\x4a\x87\x9e\xbb\x86\x02\xed\x2b\x51\x27\xba\x79\x2d\x24\x45\x37\x11\xd5\x09\x64\x13\x6e\xed\x4d\x31\xa2\x15\xc9\xc7\x03\xe1\xfb\xc2\xde\x6e\xca\x1a\xd7\xe5\xac\x5d\x77\x15\xbc\x62\x1b\xbb\x27\x77\x0c\x9f\x3d\xba\xe7\xfd\x9a\x41\xc4\x03\xf6\x8f\xda\x43\x3c\xad\x8f\xcd\xc1\x98\xa7\x75\xfc\xd4\x85\xe7\x3a\xae\x7f\x60\x43\x10\x0b\x7c\xaf\xd6\x45\xd0\xf3\x9e\x0f\xae\x7c\xe0\xfb\xd0\x9a\xfb\xc8\xd6\x2d\x3c\x1f\x58\x73\x64\x3b\x64\x72\x82\xf8\x0a\xb2\x71\x6c\x78\xd9\x5f\x6a\xc0\x30\x03\x37\xd1\x5b\x5e\x2b\xc1\x5f\xcf\xe1\xfd\x46\x89\x97\x25\x12\xb8\x97\x61\xff\x50\xe2\x95\xd6\xd9\xad\x6c\x38\xee\x79\xa3\x7e\x8b\x98\xf8\x0f\x82\xfe\x85\xf0\x92\x80\xc4\x5f\xff\xd7\x2f\x84\xa6\x28\x82\xfa\x2b\xb5\xad\x92\xd3\xb0\x0f\x37\x59\x72\x95\x27\x19\x73\xaf\x75\xe7\xd1\x42\xdb\xd6\xeb\x52\x15\xda\xce\x1d\x9f\x50\xc5\x0b\xe4\x9d\x12\x2e\xf4\x4e\x3a\x20\x22\x75\xc4\xe8\x42\x70\xf0\xa5\x4b\x7d\x12\x73\xde\x8f\xea\x94\x58\xac\x7b\xa0\x67\x8c\x15\x5b\x78\xf0\x56\x0f\x73\xa9\x67\x62\x0e\xff\x51\x3d\x8f\x10\x8f\xeb\x79\xd1\xc9\x9d\xdd\x87\xf6\x12\x9a\xce\x1c\xde\xe9\xd2\x8e\x45\x3f\xd1\x11\x25\x96\x3b\x9e\xa0\x60\xbf\x5e\xfb\xed\x91\x76\x38\x5a\xd1\x3d\x22\x16\x57\x3a\x9a\x53\x12\xf6\xeb\xc0\x27\x88\xe7\x44\x9c\x94\x76\x95\x8c\xe3\x1a\xd1\x87\xc9\x38\x2e\x8a\x7f\x3b\xfa\xed\xe9\xe4\x2d\xc5\xa7\x6e\x79\x77\x62\x85\xeb\xa3\x5a\x25\xb6\x00\x3e\x32\xed\x8a\x7a\xfc\x1b\x91\xda\xf0\xbc\x00\xba\x8f\x43\x29\x8e\x9a\x3a\x3b\xbd\xa0\xc5\x37\x0d\xcb\xb8\x32\xa2\xf8\x7d\x73\xc1\xcf\x1b\x4c\x24\x76\x55\x3e\x34\x8b\x4a\x3e\xff\x59\xf3\xa8\x04\xe6\xc7\xe7\x3f\xb7\x50\xe3\x46\x3b\x43\xda\xb5\xe4\x7f\xd2\x1d\xef\x64\xb9\xf7\xc3\x46\x9e\xdc\x43\x8b\xcd\xdc\x5f\x9d\x84\xe2\x07\xe6\x1b\xe7\x06\xb8\x8a\x76\x29\xaf\xde\x55\x26\xc0\xd6\x61\xea\xdc\x3c\x49\x4e\x72\xdb\xee\xe3\xb1\xfa\xb8\x76\xfe\x71\x8a\xfe\x61\x7e\x64\x47\x5d\xa7\x91\xe3\xaf\x5c\xe8\x05\x66\x6a\x74\xf7\x57\x16\xbc\x3b\x9f\x3b\x6e\xb1\x7e\x9c\xcf\xb3\x7d\x8b\x8f\x92\x7a\xb6\xe3\xfc\xed\x21\xe2\x76\x0f\xdd\x62\x6f\x27\x92\x46\xc4\x63\x66\x77\xb6\xc3\xfd\x11\xa2\x84\xed\xcc\x5a\x73\xdc\x3b\x8b\xa1\x88\xc0\x77\xf9\x3b\x9c\xdd\x86\xf4\xde\x8d\x57\x96\x3a\x62\xbb\x8b\x94\xa5\x6e\xe3\xb8\xa8\xd8\xe7\x6b\x3d\xb1\x83\x7c\x7b\x29\xe6\xda\xcd\x51\xa9\x5c\xc3\xf3\x65\xa2\x20\xb5\xc8\xdc\xb0\x56\xa8\x4b\x42\xad\x50\xe9\x49\xcd\x1e\x5e\x1a\x11\xe3\x7a\xa1\x53\x6a\x48\xbd\xbc\xd8\xe0\x3b\x03\xa6\x95\x67\x1a\x43\xbc\xf4\xf2\x8a\x70\xbb\x0f\x1d\xff\xc3\xa0\xe8\x6b\xcc\xef\xee\x2f\xf1\x8a\x1c\xae\xbc\xbc\xbc\x22\x2f\x7c\x8b\xe7\x79\xfe\xd7\xaf\x97\xc4\x0d\xfc\xaf\x9f\xf7\x34\xe4\xa9\x41\xae\x39\xe2\xa9\x11\x39\xe0\xc5\xd2\x70\xd0\xc6\x7b\xd5\x06\xde\x6b\x90\xb9\x5e\xb1\xd4\x6b\x31\xa4\xd8\x6b\x56\x1b\x12\xde\x2a\xf5\xc9\x41\xbb\xd4\x28\xb7\xa5\x6a\xb5\x84\xbf\xbc\x22\x18\xba\xff\x9c\x6a\xc6\x52\x2c\xc7\x11\x24\xc5\xe1\x9f\xa4\x62\x7e\x58\x2d\xd2\x6d\x89\x6c\x48\x65\xb1\x99\xaf\x4b\x85\x1c\x43\xe0\x3c\x49\xd0\x63\xaa\x29\x09\x9d\x76\xad\x38\xa8\x32\xc5\x5c\x2d\x5f\x6f\xd5\xca\x85\x06\xd9\x61\xc4\xd1\xa0\xdf\xdb\x92\x48\x46\xe4\x5d\xd5\xf0\x21\x12\x89\xfb\x1a\xe6\x86\xc5\x56\x65\xd0\xaf\x0d\x1a\xa3\x52\xa1\xd6\xef\x56\x07\x7d\xaa\x50\x2c\xf1\x44\x4d\x1a\x8d\xf0\x4a\xab\x5a\x67\x1a\x7c\x85\xef\x89\xad\x42\x8f\xae\x35\xf3\x1d\xb1\xd0\x1f\x36\xa4\x88\x44\xea\x13\x48\x24\xae\x3b\xcb\xf9\x8a\xe0\x13\x4e\x72\x7d\x9d\xef\xbd\x9e\x72\xba\xd6\x77\xe0\x91\x26\x54\x8e\xd5\x28\x82\x86\x90\x66\x55\x4c\xc6\x19\x99\x92\x59\x4e\xc3\x09\xa0\x51\x04\x86\xc9\x0c\x45\x73\x00\x27\x35\xa0\x61\x24\x4a\x00\x15\x95\x29\x5c\xa6\x09\x42\x46\x19\x19\x72\xdc\x96\x1e\xf4\xc9\xcf\x16\x83\x62\x70\x80\x43\x02\xd7\x34\x9c\x64\x01\xca\xc8\x28\x64\x50\x4d\xc5\x34\x5a\x25\x30\x56\xc1\x34\xa0\xa8\x38\x2a\xd3\x8a\x82\xb2\x0a\x41\xa8\x14\xc3\x50\x38\xc5\xb1\x34\x8b\xe1\x14\xc0\xe8\x6d\xbb\x46\xad\xf3\xc2\xff\x8f\xfd\xe4\x86\x55\x83\x5c\x67\xd7\x9d\x6a\x8e\x11\x6c\x81\x2b\xe1\xe8\x6a\x9a\xcb\x78\xa8\xee\x7b\x61\x39\xdc\x60\x43\xb5\x33\x18\x81\x5c\x05\x14\xf4\xad\xbc\x28\x91\x35\xb0\x99\xe3\xad\xbb\xc8\x63\x7e\x88\x91\x91\x58\x6e\xf6\x0f\x54\xe4\x53\x3f\x2f\x67\xbe\x7e\xc5\x50\x39\x99\x51\x58\x59\x03\x18\xe0\x94\xc8\x30\x59\x9c\x46\x51\x46\xe3\x50\x6d\x6b\xa3\x40\x41\x49\x02\xaa\x18\x89\xe3\x80\x50\x70\x0e\x47\x59\x56\xc1\x09\x0c\xd0\x38\x4a\x43\x9a\x8e\x8c\xec\x33\x8c\x9d\x85\x32\x4d\xa8\x18\x05\x38\xc8\x2a\x38\x49\xd2\x9c\xa6\xa0\x04\x8b\xcb\x28\x4e\x33\xac\x2c\x73\x10\x30\x1c\x90\x65\x4d\x45\x49\x96\xc6\x34\x85\xe6\x08\x45\x61\x49\x14\xd2\x10\x32\x18\x7c\x79\x45\xf0\x57\x04\xa3\x18\x9c\xc2\x19\x8e\xa5\xf6\x16\x5b\x6c\x8e\xa7\x98\x14\x50\x0e\x2a\x57\x98\x01\x69\xaf\x1b\xcb\xde\xaa\x48\xf4\xe7\xce\x2c\xb3\x2c\xf0\x0d\x3f\x8f\x55\xf1\x3a\x93\x63\xe8\xb1\x69\x89\x6a\x63\xde\xcf\xd7\xa9\x52\xcd\xe5\x0a\xd2\x94\xa2\x16\x80\x0e\xf1\x52\xb5\xee\x2f\xba\xcd\x42\x6d\x59\x64\xd7\xcd\x5e\x16\xf0\x4e\x04\x3d\x94\x5d\x4d\x49\x18\x51\xbb\xc7\xf7\x57\x15\x0b\x33\x85\x7a\x18\x2e\x82\x69\x55\x59\xb7\x36\x1e\xc7\x14\xb2\xbc\xd8\x35\xf2\x7a\xab\xe9\x86\x34\x11\x2e\x40\xb3\xd8\xf0\xa7\x68\x7f\x01\xa7\xf9\x76\xd1\x66\x79\xb2\x1a\x56\x6c\x83\xb1\x17\x10\x04\x59\x54\x9c\x4c\xb2\xc5\x19\xbb\x16\x05\x8b\xb1\x4b\x91\xc5\x96\x53\x2c\x56\xf4\xd2\x5a\xfd\xff\x03\x8b\xa5\x28\xc0\x61\x32\x45\xd3\xac\x42\x42\xc0\x51\xb2\xc2\x69\xa8\x86\x92\x24\x90\x35\x5c\x21\x50\x85\x60\x69\xa0\xaa\x2c\xc3\x10\x28\x94\x21\x45\x93\xb2\x4a\x51\x2a\xca\x01\x5a\xd5\x18\x4c\xdb\x5a\xdb\x67\x58\xbd\xa6\x31\x9a\x42\xa0\x24\x0d\x59\x1c\xc7\xb7\x85\xa1\x38\x4a\xa2\x8a\x4c\x41\xa0\x72\x40\xa5\x58\x1a\xa3\x64\x4c\x61\x64\x6e\x8b\x07\x21\x46\x69\x0a\x8e\x92\xaa\x42\x29\x2a\xc3\x82\x97\x68\xe4\x73\xb0\x58\x7a\x67\xb1\x42\xc5\x67\x8d\xac\x03\xec\x42\xbd\x1d\xe4\x47\xbc\x46\x09\x8c\x3a\x70\xf9\x56\x06\xed\x95\x17\xcd\xfc\x4c\x37\xea\xe5\xd5\xdc\xc8\x05\x63\xbd\xd3\xc4\x40\xdd\x69\x8e\xe6\xc4\x22\xdf\xc9\x6b\x63\x2c\x37\x1d\x0c\x56\xf6\xda\xf3\x35\x77\xed\xb6\x6c\x89\xd2\x20\x3b\x1a\x8f\xb1\x95\x12\x31\x1c\x59\x6c\x6c\x51\x87\x3f\x71\x27\x1c\x1e\xbf\x87\x7c\xb3\xb5\xb3\x9d\x42\xbd\x5a\x59\x02\xba\x65\x35\x4c\xa1\xe6\xc3\xe9\x48\x9e\xcc\x47\x65\xa6\xd3\xab\x36\x34\x58\x91\xcb\xea\x6c\x31\xe5\xc2\x06\xc6\xfb\x6e\x56\x63\xeb\xa2\xec\x94\x0d\x25\x24\xf3\x39\x7e\x8d\xd1\xbe\xe5\x0f\x8a\x05\xb9\x54\x0a\x40\x28\x32\x93\x21\x5b\x16\x89\xc2\x66\x68\x44\xe5\xd7\x53\x2c\xba\x98\x1a\x90\xf7\x16\x2d\xa0\x95\xdf\x68\x7b\xbf\xe7\xf3\x72\x7d\x30\x93\xb2\x61\xf5\xc4\x78\xe6\x72\xb7\xe3\x19\xb0\x6b\xcb\xf1\xcf\x61\x9e\xaf\xa8\x3f\x81\x76\x65\x3d\xfc\x09\xc4\x2b\x2b\xd7\xef\x1d\x0d\x26\x56\xaf\x8f\x43\x6a\x81\xaa\x32\xbd\xd1\xb0\x38\xac\xf7\x85\xf6\xb0\x59\x1c\xf2\x85\x02\x9d\x23\x9b\xcd\xdc\x90\x92\x46\xcd\x42\x9d\xea\x48\xd5\x4e\x43\x64\x6a\xad\xd2\xb8\x36\xac\xe3\x2d\x61\x58\x95\x0a\x71\x8f\x16\xf7\x62\xcd\xc5\x12\x5d\x29\x18\xde\x2f\x15\x99\xcd\x90\xef\x2d\x75\x16\xb2\x52\xc6\x0f\x59\xb7\xb1\x06\x8b\xae\xa1\x0d\xf9\x4c\x9f\x18\x2f\x78\xe6\x68\x7d\x91\x3b\xe7\xa3\xff\x6e\x3d\x4b\xc8\x0f\x46\x6d\x94\x36\x7b\xe5\xcd\x4c\x09\x96\x68\x0f\x6f\xda\xd6\xdc\x75\x85\x91\xe2\xac\xf1\x79\x9d\xed\x05\xac\xbc\x29\x73\x5d\xce\xc9\x47\x3e\x9a\x53\x03\x9f\x5b\x9d\x99\x74\x8e\x13\xb3\x16\xef\x68\x33\xa8\xac\x0a\x5a\xa6\x44\x0d\xab\x7c\x69\x5d\x03\x5e\x58\x58\x54\xc3\x96\xb1\x69\xe5\x57\x65\x9d\xea\x51\x93\xca\xb6\xfc\x1c\x3f\xa0\xab\xd3\x01\x58\xae\xaa\xa5\xa6\x54\x19\xaf\x86\x21\x3e\x1e\xe3\x64\x1d\x2a\x43\x33\x5b\x9b\x12\xa6\xe5\xce\xbb\x18\xcc\x12\x2c\x74\x98\xc0\x05\x6b\xab\xad\x4a\x33\x57\x6b\x16\xfb\x7d\xb5\x31\x53\x5a\xc5\xc6\xc6\xe9\xb9\x7c\x2b\xdb\x0a\x8a\x40\x25\xa8\xfa\x70\xb5\xe6\x5b\xbf\x7e\x9d\xf7\x16\x9f\x4c\x3d\xf1\x14\xf5\xf5\x53\xea\xc5\x49\x39\xdb\x77\xd6\x3a\x67\xd8\xd0\xa9\x94\x2b\x9c\xdd\x2b\x4a\x63\xb9\xa8\x55\xd7\x58\x66\x59\xf5\xf3\x0a\x61\x72\xcc\x6a\x62\xda\xc3\x42\x78\xa0\x7e\x1d\x3d\xaf\x1f\xfe\xe4\xb6\x7f\x84\xe3\x45\x81\xe7\xb9\x7c\xeb\x89\xa6\xc9\xbb\x10\x76\xd0\x49\xbb\xac\x85\xf3\x16\xcf\x1b\xb5\x20\xcb\x78\x59\xd7\x35\x3b\xe6\x70\xd9\xb1\xdb\x50\xf2\x9a\xc3\x9a\x4e\x48\xc2\x94\x5f\xcb\xb9\x51\xce\xac\x08\x2e\x59\x9f\x4a\x72\xc1\xeb\x37\x58\x7b\x3e\x37\x98\x25\x35\x9b\x64\x89\x19\xdd\xe9\x68\xf2\x18\xaa\x76\x8e\x8f\x9b\xe6\xba\x9f\xa5\x6d\x8f\x7c\xc0\xcf\xf6\x5b\x24\x87\xc6\xd6\x48\x52\x83\x34\x8a\xb3\x0c\x24\x19\x4e\x51\x48\x8a\xd1\x20\xa6\x71\x94\x02\x50\x74\xdb\xdd\xd3\x40\x51\x50\x0a\xb0\x40\xc1\x50\x56\x26\x48\x14\x57\x48\x96\x25\x38\x8a\xe0\x58\x92\xc3\x93\x8d\x1e\xb1\x2a\x46\xff\xcd\x2c\xb2\xdd\xc2\x66\x38\xee\x29\xf2\x52\x52\x72\x9d\x8c\xb0\x62\x42\x82\x26\x84\xb5\xc7\x54\x16\xf3\x46\x85\x53\x04\x6a\xa8\x5a\x8e\xe0\xdd\x8b\xf9\x57\xb6\x43\xde\x5d\xf9\xe3\x96\xc8\xa1\xf2\xfb\xdf\x20\xc5\x2f\x38\x22\x17\x9f\xed\x80\x05\x43\x2f\x7c\x26\x05\xc9\x86\x7e\xe8\xb8\xb3\x39\xf0\xbc\xf9\xc4\x05\x1e\x4c\x41\xea\x42\xcf\x47\x3a\x42\x01\x91\x62\x61\xe4\x27\xd2\x81\x73\x1f\x5a\x32\x74\x11\x1c\xc5\xa8\x47\x0a\xd2\x1c\x57\x81\x9e\x32\x77\x6c\x1b\xae\x7c\x13\x04\xb6\x32\x39\x2f\x28\x7a\x81\xf0\x11\x30\x13\x78\xbe\xa7\xcc\xb7\x34\xe0\x97\xb5\x8f\xc1\x12\x9e\x43\xbb\x5c\x7d\x28\xa9\x66\x7b\x15\xb0\x58\x58\xa8\x91\xcd\xd2\x52\xd0\x5c\xa5\x59\xdd\x78\xd6\x62\x46\xda\x58\xd8\x34\x55\x1c\xe8\x8d\xf0\xe0\x5e\xb1\x3b\x67\xdb\x4d\x4a\xaf\x96\xa8\x92\x5d\x6f\x0f\xb3\x93\x4c\x7f\xad\xe7\x58\x03\x77\xeb\x39\x60\x78\x62\xd9\x43\x79\xaf\x53\x6e\xf4\x1b\xa3\x4e\xe7\xb3\x23\x6f\x14\x58\x2a\xe3\x49\xa9\xbb\xe8\xb7\xa6\x8d\xf6\x9a\xce\xb6\xbb\xa3\x0c\x04\x56\xe0\x49\x93\xf5\xb4\x06\x66\x9b\x55\x87\x59\x2d\xeb\x53\xac\x89\xeb\xd5\xe8\xa9\x02\x4e\x13\x84\x78\x8a\x24\x0a\x26\x08\xed\x75\x8d\x5b\x84\xc5\x41\xcd\x2a\xe8\x3d\x38\xdf\x84\x7e\x8f\x19\x4e\x1a\x82\x0f\x47\x0d\x6f\xda\x05\x6e\x48\x07\xd0\x91\x43\x39\x63\x0b\xd4\x62\x30\xc5\x36\xac\x67\xe8\x9e\x5b\x0c\x81\x56\xaa\x6c\xea\xdc\x62\xee\x66\x2a\xa3\x46\x36\xa7\xda\x6d\xb1\x43\x91\x8d\x3f\x3d\x55\x5a\x4f\x15\x35\xc2\x14\x1f\x71\x5d\x39\x00\x52\x71\x0d\xb1\xa0\xc2\x4e\x5d\xac\xde\xf6\x31\x00\x2a\x4d\x77\xd5\x12\xdc\x96\xdf\xf1\xa7\x8d\x10\x66\xac\xd6\xa1\xfb\xe0\x47\xcb\x50\xcd\x73\xf9\xb6\xe7\x8e\x14\x41\x1d\x0f\xa4\xa2\xb7\x00\x92\xd6\x6d\xb3\xf2\x3a\xf4\xa6\x81\xdc\x6a\x97\x78\x53\x66\x73\x9a\x72\x36\xdf\x3b\x6b\xef\x8b\xef\xf9\xc0\x21\x1c\x9f\xa4\x16\xf9\xa6\xb8\x9a\xb7\xb2\x84\x53\x92\x32\x1b\x8c\x69\xaf\x0d\x0f\x33\xb5\x7a\x61\x64\xb5\x06\xba\x1b\x74\x32\xdd\xf8\x01\xc6\xf2\x9c\xc4\xf3\x85\x41\x96\x1f\x12\x51\x73\x95\x9b\x8d\x45\xa6\xcd\x17\xf2\x3a\xca\x5b\x15\x96\x2b\x28\x0d\x3d\x28\x66\x88\xca\xaa\x09\x7b\x3d\xb6\x22\xd8\x83\x7c\x1b\x0f\x7b\xaa\x23\x62\xb2\xd0\x35\x6b\xdc\xa0\x20\xb7\x61\xbe\xb3\x6c\x95\x45\xb6\xab\x63\x4e\x7f\x38\x5a\x8c\xd7\x1b\xd5\xe6\x03\x50\x59\x4e\xa4\xa2\x17\x7a\x4f\xd6\xbf\x7e\xa7\xfe\xb9\x00\xe4\xe5\xfe\x70\x8c\x0b\xe6\x70\x00\xdc\x3e\xdd\x5b\x85\xf2\x80\x28\x4a\x15\x7d\x6e\x13\x7c\x27\x3f\x29\x17\xe6\x94\xbc\xea\x94\x07\xfa\x9d\xfa\xd7\x2d\x7a\x38\x40\xbd\x91\x37\x9c\x0c\x4b\x79\xa2\x8d\xd6\x2b\xbd\x76\x76\x81\x2d\xad\xe1\x64\x59\x5e\x7b\x80\xaf\xda\x74\xbf\x3f\x5b\xcb\x85\x35\x61\x1b\x65\x2c\x0b\xe4\x22\xde\x2c\x39\x2b\xc2\x9c\xd9\x7a\x57\x6f\xf4\xd7\xab\xec\xd4\x69\xe5\x45\x71\x5c\x69\x65\xd6\xda\x8a\xd3\x9f\x6d\xff\xf2\x9d\xfa\xf3\xcc\xa8\xc6\xf2\xcc\xd4\xd4\xc5\x26\x44\xd5\x5e\x8f\xe9\x97\x14\xa1\xb5\xa2\x5b\xd9\xd0\x2c\x2d\x14\xa2\x27\x60\x14\xa8\x10\x65\x03\x6b\xdd\xa9\x7f\xae\xdc\xb6\x2a\xa6\x4f\x0e\xf9\xea\xda\x73\x8a\x8a\x63\xd0\x9b\x6c\xd7\x1c\xa0\xb3\x7a\x31\xc8\x1b\x6b\x5a\xf4\x05\xae\x3b\x57\x3b\x4d\x83\xc8\xc9\xd5\xe2\xc6\xef\x60\x35\xbd\xbc\x62\x44\x72\xe3\x58\xf3\x55\xc6\x99\xb9\xc3\x36\xe8\x15\x3b\xdd\xbc\x66\x08\x21\x3b\xc9\xd4\xf5\x11\xbf\x8f\x77\xcf\xf6\xc4\x77\xfb\x8d\x68\x8d\x61\xb7\xb1\xe3\xa5\xf7\x1b\xff\x37\xda\x07\xfa\xea\x1b\x16\xfc\xfa\x23\xb9\xc4\x14\x5f\xdf\xfd\xa4\xf7\xeb\x0f\x24\x16\x8c\x2e\x4e\x80\xf7\xf5\x47\xfc\xfe\x7b\x74\xf1\xbf\x3b\x61\x0d\xc2\xc7\x04\x2d\xb0\xf2\x57\x9e\xb1\x79\x50\xdc\x85\x1e\x74\x97\xf7\x84\xbf\xfc\xf7\xd1\xbe\x34\x7a\x71\x53\xdd\xbd\x40\x95\xc2\xc9\xd3\x8b\x30\x0f\xe8\xb1\x1b\xce\x03\x57\x99\x18\xcb\xdd\xcd\x2b\x6d\x73\x6c\x03\x6c\x47\x48\x44\x87\xfb\xf5\x07\xf2\x75\x89\xe1\x7f\xa3\x7f\xa3\xae\x82\x7f\xdd\xdd\x53\x02\xd7\x85\xb6\x5f\x8b\x6a\xf7\xf5\x07\x42\x9c\x5e\xcf\x45\xaf\xd7\x6e\xa9\xfb\xdf\x07\x2e\x8f\xac\x1e\x24\xb7\xd8\x1a\xad\x02\xa0\x41\x9a\xa4\x19\x95\xc1\x01\x40\x31\x19\x02\xc8\x00\x94\x24\x58\x8a\xe5\x30\x0d\x6a\x32\x8b\x69\x0a\xa5\xb0\x2a\x86\x01\xc0\xa0\x34\x80\x90\x45\x59\x02\x57\x58\x5c\xc5\x76\x1a\x1d\x70\xb7\x23\xa1\x93\x36\x3c\xdc\x89\xaa\xff\xf5\x07\x82\x9e\xdc\xfa\xef\xd9\xf3\x9e\x0d\xe6\x5b\xbd\xa0\x46\x60\x00\x47\x01\xc1\xb1\x10\x32\x84\x02\x71\x1c\x67\x28\x08\x58\x8c\x61\x18\x96\x96\x81\x42\x91\x34\x45\x6b\x04\xa1\x2a\x0a\xa9\x11\x1a\x54\x68\x74\xdb\x40\xaa\x86\xd1\x84\xca\x7d\xfd\x92\x52\xc2\x15\x0e\x9e\xdd\xa6\x78\x3f\x07\xd8\xeb\xe5\x3d\x27\xf0\xe7\x81\xff\xb9\x75\xbf\xc5\xf0\xd3\xb5\xfe\x1f\xcd\xf0\x63\x56\xf6\x87\x83\x3f\x1c\xfc\xe1\xe0\x0f\x07\x7f\x38\xf8\xc3\xc1\x1f\x0e\xfe\x70\xf0\x1b\x39\x88\xfe\xf7\x7f\x1e\x9f\xbf\xec\xd6\x02\x89\xf3\x1a\x20\x9f\xb3\x16\x18\xaf\xc2\x7f\x64\x2d\xf0\x1f\xda\x0a\xd8\xfe\xe9\x88\x53\x6e\x30\xad\x0a\x78\x55\xa5\xf5\x99\x6e\xe3\x6a\x6b\x84\x99\xde\x98\x75\x6b\x43\x9a\x5d\xa0\x95\x0d\x1c\x12\xcb\x62\x71\xa0\x2a\xc3\x78\x5a\x8f\xd3\x04\x71\x5c\x8b\xcb\x1f\xd7\x0e\x12\xfb\x19\xf5\xd3\x9d\x58\x31\x5f\x26\x16\x06\xd3\x41\xbb\x12\x05\xa6\x15\x62\x55\x17\xca\x99\xfe\x3a\x2b\x14\x3a\xe3\xa5\x3a\xb7\xca\xfa\x62\x28\xe5\xb2\xba\x5e\x2d\x48\x2d\x6a\xc4\xb0\xc5\xc1\xcc\xc6\x4c\xd3\xf7\x4b\x34\x23\xf0\x9d\x5e\xbd\xab\x60\x9b\xb0\x47\x74\x32\xaa\xd2\x6e\x0f\x2c\x9d\xf0\x4b\x6c\x5c\xb5\x3f\x5b\x2f\xef\xdc\x7a\xd9\x7e\x8a\xb2\x56\x5e\xd2\xc5\xf9\xaa\x2b\xca\xba\xc5\x97\x32\xad\x0c\xd1\x5f\x84\x62\xd3\x2f\xe4\xab\xd3\x70\x5e\x2e\x4f\xf3\x75\x30\xd2\x65\x6b\x94\x58\x1b\x72\x17\x12\x5d\x83\x0d\xa0\x4f\x57\x75\xd0\x6b\x72\x74\x6e\xa3\x79\x1c\x44\x15\xc7\x95\xc6\xc3\x4d\x6e\x50\x99\x15\x9c\x2a\x33\x5b\xce\xc2\xe3\x5a\x93\x9e\xb6\xb6\x94\xf8\x1e\xbf\x87\xf2\xde\xb5\xb6\xf8\xb3\x74\xc3\xc4\xd7\xdc\x61\x2d\x52\x14\xd0\x0c\x83\x5b\xcd\x6a\x7b\x55\xa8\xb9\x9d\x81\x43\x06\x0e\x11\xf6\xb4\xa6\x3c\x9d\xf0\xd9\x95\x3f\xd7\x31\x4a\xaa\x9b\x33\x63\xb0\x54\x2b\xbe\x58\x1b\x42\xa7\xb3\xc4\xc9\xac\x4e\x89\x0d\xa1\x5c\x2e\x65\x72\x6b\x5a\x18\x8d\x16\x5e\x7d\x4d\xba\xed\x4a\x53\xe5\x29\x67\xde\x2a\x9d\xad\x35\xe5\x3e\x66\x7f\x37\xb7\xbe\xae\xfc\xec\xe4\x89\x4d\xeb\xab\x6f\xf9\xbf\x77\x47\xe9\xe4\x4d\xff\x43\x1c\xc5\x09\x86\x84\x1c\x47\x90\x9c\xcc\x41\x8d\x51\x65\xc0\x01\x4a\x95\x09\x82\xe0\x64\x86\xd5\x54\xc0\x6a\x04\xc9\x30\x8c\x8c\x01\x8d\x20\x64\x40\xd2\x2c\x50\x29\x05\x55\x35\x8e\xa4\x55\x52\xdd\xbd\x92\x75\x12\x6d\xeb\x47\x76\xe3\x86\x35\xb2\x39\xb4\x86\x56\x8a\x6b\x7f\x12\x4a\x98\x39\x42\xc1\x7a\xee\x60\x9c\x54\x5a\x2d\x6b\xf9\x75\x83\xf2\x73\xa2\x92\xef\x2f\xc3\x02\x17\x12\xba\xef\x36\xec\x31\xff\xc0\xe7\xea\xab\x7d\x51\xeb\xe6\x9f\x29\x7f\x94\xcd\x28\xa9\xd6\x7e\xb7\xfc\xcb\x3d\xeb\x74\xde\x31\x9a\x04\x14\x4a\x93\x50\x06\x34\xa9\xe1\x8a\x2a\x03\x55\x66\x29\x5a\xd6\x08\x92\x64\x49\x96\xd2\x14\x1a\xa7\x71\x92\x01\x2a\x20\xa0\x4a\x70\x8a\xaa\x6a\xa8\x46\x73\x28\x8e\x11\x84\x4c\xc7\xbc\xe3\x97\xbc\xff\x4b\xf5\xfe\x2c\xde\x59\xf2\xf8\xfc\xf9\x0a\xf7\x67\xf0\x8e\xcb\x38\x64\x71\x55\x06\xb2\x8c\xe2\xa4\x8c\x33\x00\x55\x08\x8c\x44\x15\xc0\x60\x2a\x0b\x14\x4e\x56\x18\x8c\x25\x30\x8d\xd3\x28\x40\xc8\x2a\xcd\x41\x05\x10\x2a\xcb\x6a\x32\x0a\x15\x4a\x89\x79\x27\x3e\x9d\xf7\x8f\xd6\xfb\xb3\x78\x67\x7a\xc7\xe7\xcf\x77\x56\x3e\x83\x77\x05\xca\x32\xcb\x50\x00\x45\x35\x8d\x86\x18\xc1\x12\x00\x6a\xa8\xa6\xe2\x14\x06\x18\x5a\xc3\x71\x05\xd3\x38\x20\xe3\x00\x57\x35\x4d\x91\x51\x86\x61\x29\x8a\x21\x68\xa0\x42\x9c\xa6\xb8\xfd\x8b\x74\x37\x79\x7f\x62\xc7\x4b\xbf\xc6\xed\x5d\xde\x85\xe7\xca\xb7\x94\x63\xf9\xef\x8f\x33\x57\xfb\x8c\xcf\xe9\x30\x7e\x77\x6f\x51\xac\xb1\xa5\xd6\xb2\x35\x93\xab\x78\x89\x27\x06\xfd\x69\xdb\xad\x5a\xd3\x21\x8a\x6a\x45\xd6\xab\x95\x19\x0b\x15\xdb\x61\xe5\xb0\x0f\x95\xe8\x15\x72\x67\x74\x9c\x7f\x7f\xe7\x58\x87\xe7\x1b\x94\x5b\x4d\xe2\xc5\xde\x11\x8d\x7d\x85\xcd\x62\x39\x6b\xe5\x5a\x8e\xc4\x57\x0c\xad\xd9\x1e\x0a\x4e\x6d\xb2\xf4\xd7\x4a\x97\x30\x0b\xcd\x7c\x8b\xc2\xf4\x99\xea\x15\x4a\x20\x27\x0d\x42\x94\xea\x64\xfb\x93\x01\x3a\xd4\x67\x2e\x9a\xcf\x35\x45\x52\x02\x85\x3e\x5e\xb5\x14\x8f\x18\x87\x35\xcb\x90\xc9\x6e\xdb\xad\xd7\xb6\xf3\x93\x32\xd1\x9d\xcf\x50\x8b\x62\x08\x9c\xb6\x4c\x7c\x53\xa7\x7c\x32\x0b\xa6\x9b\x36\x01\x57\xee\xa6\x41\xb7\x1d\xb7\x1f\x4a\x99\x9e\xdc\x43\x0f\x8a\xdd\xe0\xe0\x68\x19\x27\x6f\xb8\x24\x64\xa3\xaa\x7e\x46\xb4\xfa\x68\xb4\xe0\x3f\x21\x5a\x49\xee\xba\xdb\x7d\xa2\x7c\x9e\xff\xf7\xa2\x46\x5a\xb4\xfc\xdd\x43\x83\x8f\x3b\x59\xfe\x4c\xfb\x0b\x83\x6b\xe0\xf9\x2c\xdf\x20\xa9\x51\x4e\x20\xfc\x52\xbf\xd0\xc0\xda\x04\x8f\xd6\xe1\xac\xc9\x56\xda\xb4\x2d\x61\x3c\x07\x07\x86\xba\x2e\xfb\x71\x7b\x5d\x77\x32\xbe\x23\x8e\x8d\xb1\x0c\x0b\x61\xde\x73\xab\x39\xbb\x5a\x0e\xbc\x2c\x4a\xf5\xfd\x8a\x90\x73\x75\xc7\x0b\x26\xb5\x56\xb6\x47\x0f\x7b\x53\xd2\x0f\x07\xeb\x89\xc7\xf4\xfc\x0e\x99\xaf\xc3\x55\xa3\x4e\x57\x16\x8a\xb6\xa8\x54\x31\x74\x60\xe6\x66\xb3\xd0\x26\x75\xb6\x59\xd6\xa6\xe5\xe2\xd6\x11\x0a\xb3\x79\x6b\xdc\x08\xac\xee\x7a\x23\xbb\xf2\x64\xb0\xdc\xb4\xcb\x9d\x42\xb6\x58\x31\xda\x34\x3e\x65\xba\xb6\x44\x84\xdc\x9c\x9f\xd4\x99\xd1\x41\xb1\x7f\xdf\xc9\x9e\x35\xf2\x67\x9d\xac\x1e\xd6\x2c\xf7\x13\x9d\xec\x89\x97\x11\x7e\x8f\x93\x7d\xf2\x38\xf0\xe3\x4e\x26\x9c\x69\x7f\x61\x70\xb2\xa5\x5b\x58\x1f\x57\x75\xaa\x8f\x59\x0b\x0c\x9a\x75\xa5\x88\xf9\xab\x69\x67\x54\x1d\x73\xa1\xa8\x3b\x9d\x1c\x80\x03\xb6\x67\x14\x62\xc2\xae\x3b\x99\x50\x09\x4c\xcc\xaf\x15\x6b\x05\xb2\xbf\x0a\x7d\x54\x15\xf2\x7d\x51\xa3\x7d\x99\x32\x49\x79\x5d\x77\x8b\x7a\x7e\x9e\x31\xfb\xe3\xba\xb5\x52\x7c\x8a\x34\x24\x0d\xb7\x56\xfe\x74\x45\xd7\x55\x6a\x5c\x21\x45\x52\x30\x15\x4f\x23\x69\x91\x9f\xe4\x8a\x9d\x5e\xd3\xb3\x59\x6d\x24\x6c\x1d\xa1\xba\xa6\xf3\x38\x1d\x84\x95\x5a\x85\x16\x84\x02\xbf\xb6\x4b\x23\xb7\xb2\x2c\x15\x85\x12\xc5\x81\x06\x07\xe0\x7a\x8a\x97\x33\x21\x53\x38\x2e\xaf\xfd\xfb\x4e\xf6\xac\x91\x3f\xeb\x64\x35\x74\x46\x0b\x9f\xe8\x64\x4f\xbc\xf1\xf4\x7b\x9c\xec\x93\x07\xfd\x55\x7a\x0a\x0d\x62\x6a\x39\x65\xb6\x5b\x34\x85\x2c\xd4\x15\x82\x69\x0e\xfd\x52\xb5\xba\x19\xf4\xd9\xb0\x6f\x8c\x73\x20\x1f\x50\x35\x2a\x62\x32\x36\xb0\x68\x32\x75\x63\xb8\x18\x0f\xba\xdf\xeb\x64\xf1\xa7\x46\xb3\xc9\xb9\x1a\x38\x2c\x9d\xf1\x5c\x33\xe0\xe6\xd3\xf5\x4c\x69\x77\x68\xd4\x5c\x34\x6a\x0b\x89\x2d\x94\x36\x38\x49\xb6\x9a\xac\x0c\x46\x12\xec\x76\x2b\xe3\xb2\xe9\x12\x1d\xb9\x9d\xc7\x88\x85\xe8\x72\x41\x93\x6c\xb4\x05\x7d\x9d\xcf\x65\x75\x25\xd0\xf1\x62\xd5\x15\xea\x41\x15\xed\x74\x89\x56\x03\x54\x7b\xb9\x30\x76\x88\x8d\x1b\x94\x06\xba\x90\x95\x27\x0d\xc1\x5d\x0a\x5c\xa5\x3d\xc7\x59\x23\xd4\x2c\xb7\x6a\x10\xd9\x75\xa8\x92\xfd\xa1\xb4\x20\x8c\xf1\xf8\xa8\x6b\xba\xb3\x89\xa7\x0d\x7a\xc5\xd9\xe2\x07\x2e\x9c\xed\x1f\x34\x36\x3e\x65\xb2\xf5\xee\xf2\x33\xa6\xbc\x78\xa2\xfc\x3a\xff\xef\x4d\xf6\xd2\xea\xff\xee\xf2\x49\x9b\x9b\xfd\x8e\xc9\xe6\x8d\x84\x10\xef\x9d\x71\x9e\x25\x85\xd8\xc7\x91\x64\x0c\x88\x7f\xc8\x76\xb4\xd5\x94\x40\x74\x1d\xe5\x30\x26\x8e\x7f\xfe\x76\x81\x72\x35\x89\xed\x65\x6e\xef\x43\xf2\xde\x7d\xe2\x88\x3b\xd5\x4c\x26\x19\x8f\x52\x8b\x27\x10\xa3\x7d\x2f\x5e\x10\x52\xd2\x6d\x1e\x0a\x44\x9a\xed\x72\x9d\x6f\x8f\x90\xaa\x38\x42\xbe\x1d\x32\x11\xbd\x1e\xd2\xdc\x5e\x4d\x3a\x7b\x96\xee\xfc\xb3\x14\xf7\x6e\x68\xed\xdd\x52\x39\x2d\x09\xe8\x31\xc3\xfb\xd3\xea\xc9\xc0\x4e\xd3\x6c\x5f\xc0\xa9\x52\x71\xde\xb1\xdb\x19\x42\x6f\xa6\xb6\x7f\x5a\xdd\x13\xf0\x34\xc5\x6f\x94\x8e\xf4\xa4\x72\xab\x27\x22\xc7\x64\x24\xef\xaa\xc9\xe7\xf0\xfd\xce\x0a\x5c\xb6\xc1\x31\x69\xea\x95\x84\x9f\x27\x07\x22\x3c\xad\x6f\x0c\x96\xa6\x68\xa2\x98\x53\x0d\x77\x69\x28\xd3\xd3\x4f\x26\x4f\x7f\x78\x5a\xb9\x08\x2b\x4d\xb7\x63\x21\xa7\xaa\x19\xf3\xd7\x28\x29\xe5\xcd\x74\x8b\x29\xa7\x5e\x3c\xaf\x69\x02\x32\x55\xe1\xf3\x22\xd3\x1a\xfd\x4a\x4e\xc6\x93\xb3\x3f\x3e\x43\x53\x2f\x90\xaf\xe8\xb8\x2f\xe6\x54\xbb\x28\x9d\xe3\x8d\x04\x8d\x17\x07\x9e\x3c\xad\xe4\x11\x30\x4d\xd1\xb3\xe2\x1e\x8a\x61\x89\x5f\xcf\x9d\x1f\xfb\xf2\xb4\xb6\x47\xc0\x34\x6d\xcf\x8a\x3b\xd5\x76\x9f\xe6\xf0\x46\x1a\xc1\x8b\xe3\x6f\x9e\xd7\xf7\x00\x98\xaa\xef\x69\x71\xa7\xfa\x1e\x32\x11\xde\xc8\x30\x78\x71\x0e\xd0\xd3\x0a\x1f\x01\xd3\x14\x3e\x2b\xee\xea\xd0\x20\xce\x30\xf8\x7a\x4c\x1f\x78\x33\x85\x5c\xca\x39\x49\xcf\xd7\x23\x01\x99\x5a\x93\xf3\x22\xd3\xa2\x84\x07\x17\xaf\xfb\x9c\x69\xd7\xd3\xbb\x9d\x9f\x1f\xf5\x09\xba\xdf\x54\xfc\x29\xad\xcf\x86\xa8\x69\xa7\x6a\x3d\xad\xff\x29\x68\x5a\x25\x52\x8a\xbd\x5a\x93\x64\xea\xb5\x6b\x43\xcd\xc3\x89\x63\x3b\xdd\xa3\xf3\xc9\x1e\x4b\x0a\x17\x1f\x65\x76\x86\x83\x34\xa4\xe3\x28\xb6\xd7\x29\x4b\x45\x44\xf6\x5d\x08\x91\x7d\x06\xc8\xbf\x90\x41\x49\x6c\x8b\x48\x32\x23\xe4\x31\xcd\xd7\xe5\x40\xf3\xf4\x10\xb5\x8f\x6a\x79\x0a\xb3\x55\x72\x37\x4e\x39\x51\x31\x99\x16\xfc\x35\x99\x04\xfc\x35\x4e\xce\x7d\xa1\xdd\xf1\x24\xb8\x8f\x2a\x76\x40\xd8\xea\x74\x74\x8c\x13\xb5\xae\x0f\x15\x4f\xce\xac\x7b\x46\x85\x3d\x48\xac\x45\x22\xc0\x3c\xa8\xc8\xc9\x31\x7b\x1f\x55\x24\x09\xb2\x55\xe4\x74\x20\xfc\xa0\x26\x27\x47\x03\x7e\x54\x93\x24\xc8\x56\x93\x44\x0e\x83\xc7\xd5\x38\x39\xce\xf0\x09\x4d\x92\x38\x3b\x65\xf6\xa3\x84\x53\x65\x92\xa9\x7f\x6f\x85\xaf\xe7\x34\x3a\x07\xda\xaa\x74\x16\x1b\xef\x72\x74\xed\x14\x4f\x44\x71\xac\xb9\x09\x7d\x18\x15\xfb\xff\x02\x00\x00\xff\xff\x83\x97\x70\x56\xf2\x73\x00\x00") + +func baseCoreSqlBytes() ([]byte, error) { + return bindataRead( + _baseCoreSql, + "base-core.sql", + ) +} + +func baseCoreSql() (*asset, error) { + bytes, err := baseCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "base-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x0, 0x8a, 0xbb, 0x55, 0xdf, 0x5, 0x72, 0x31, 0x6b, 0x2b, 0x97, 0x6e, 0xb2, 0x8f, 0x30, 0xeb, 0x24, 0xce, 0xcf, 0x2c, 0xc9, 0xfa, 0xd0, 0xe9, 0x6a, 0xcd, 0xfc, 0xd5, 0x4, 0x74, 0xc8, 0xf1}} + return a, nil +} + +var _baseHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x7d\x79\x6f\xe2\xc8\xd3\xf0\xff\xf3\x29\x5a\xa3\x95\x92\x3c\x93\xd9\xf8\xc6\xce\x3c\xf3\x93\x0c\x98\x40\x42\x20\xe1\xc8\xb5\x5a\x59\x6d\xbb\x0d\x4e\x8c\x4d\x6c\x13\x60\x56\xcf\x77\x7f\xe5\x0b\x6c\xe3\x13\xc8\xfc\xf6\x8d\x56\xb3\x80\xab\xeb\xea\xea\xea\xaa\xee\xea\xf6\xf7\xef\x5f\xbe\x7f\x07\x77\xa6\xed\x4c\x2c\x34\xbc\xef\x02\x05\x3a\x50\x82\x36\x02\xca\x62\x36\xff\xf2\xfd\xfb\x17\xf7\x79\x73\x31\x9b\x23\x05\xa8\x96\x39\xdb\x02\x7c\x20\xcb\xd6\x4c\x03\x70\x7f\x32\x7f\xe2\x11\x28\x69\x0d\xe6\x13\xd1\x6d\x9e\x00\xf9\x32\x14\x46\xc0\x76\xa0\x83\x66\xc8\x70\x44\x47\x9b\x21\x73\xe1\x80\x9f\x00\xfb\xe1\x3d\xd2\x4d\xf9\x6d\xf7\x57\x59\xd7\x5c\x68\x64\xc8\xa6\xa2\x19\x13\xf0\x13\x9c\x8c\x47\x2d\xf6\xe4\x47\x88\xce\x50\xa0\xa5\x88\xb2\x69\xa8\xa6\x35\xd3\x8c\x89\x68\x3b\x96\x66\x4c\x6c\xf0\x13\x98\x46\x80\x63\x8a\xe4\x37\x51\x5d\x18\xb2\xa3\x99\x86\x28\x99\x8a\x86\xdc\xe7\x2a\xd4\x6d\x14\x23\x33\xd3\x0c\x71\x86\x6c\x1b\x4e\x3c\x80\x25\xb4\x0c\xcd\x98\xfc\x08\x78\x47\xd0\x92\xa7\xe2\x1c\x3a\x53\xf0\x13\xcc\x17\x92\xae\xc9\xe7\xae\xb0\x32\x74\xa0\x6e\xba\x60\x7c\x77\x24\x0c\xc0\x88\xaf\x77\x05\xd0\x69\x01\xe1\xa9\x33\x1c\x0d\x41\xbf\xd7\x7d\x0e\xe0\xff\x9c\x6a\xb6\x63\x5a\x6b\xd1\xb1\xa0\x82\x6c\xd0\x1c\xf4\xef\x40\xa3\xdf\x1b\x8e\x06\x7c\xa7\x37\x8a\x34\x8a\x03\x8a\xb2\xb9\x30\x1c\x64\x89\xd0\xb6\x91\x23\x6a\x8a\xa8\xbe\xa1\xf5\x8f\xdf\x41\x50\xf6\x3e\xfd\x0e\x92\xae\x5d\xfd\x3e\x01\x7d\x6a\xd5\xa5\xf3\x19\x74\x0d\x39\x8f\x58\x04\x6a\x8b\xdc\x03\xef\xf4\x9a\xc2\x53\x04\x32\x40\xeb\x58\x0b\xdb\x11\x75\xcd\x70\x59\x5b\x8b\xce\x7a\x8e\x44\xd9\x54\x90\xa8\xd9\xf6\x02\x59\x95\x1a\xef\xd1\x64\xab\x88\xa2\x66\x50\x41\x22\x52\x55\x24\x3b\x5e\x43\xd3\x52\x90\x25\x4a\xa6\xf9\x96\xdf\xd0\xd6\x26\x06\xb2\xa2\xb4\xf2\xe1\x4d\x55\x0d\xc0\x6d\xa4\xeb\xee\xc0\xf6\x54\x5a\xa5\x51\x91\x0a\xb6\xd0\x3a\xb4\x1d\x71\x66\x2a\x9a\xaa\x21\x45\xd4\x91\x32\x29\xdf\x56\x5a\xac\x4b\x72\xa7\x19\x0a\x5a\x89\x11\x33\x34\x6c\xe8\xb9\x24\x5b\x34\x8d\x42\xcd\xc7\x5b\x9b\x73\x64\xc1\x4d\x5b\xd7\x5a\x0e\x68\xbd\xe5\xe4\x20\x2e\xaa\xb5\xf5\xb5\xec\x35\xb4\xd1\xfb\x02\x19\x72\x25\x11\x22\xcd\xe7\x16\xfa\xd0\xcc\x85\x1d\xfc\x26\x4e\xa1\x3d\xdd\x13\xd5\xe1\x18\xb4\xd9\xdc\xb4\x5c\xc7\x19\xcc\x7e\xfb\xa2\xd9\x57\x97\xb2\x6e\xda\x48\x11\x61\x25\x5b\x0c\xc7\xf3\x1e\xa6\x14\x0c\xe6\x3d\x98\x8e\xb6\x84\x8a\x62\x21\xdb\xce\x6f\x3e\x75\x2c\xc5\x8b\x10\x44\xdd\x34\xdf\x16\xf3\x12\xd0\xf3\x22\x96\x7c\x28\xa8\x59\x15\x11\x87\xd3\x63\xe9\x06\xae\xab\x74\x7d\x46\x39\xd0\x10\xfd\x1e\x4d\x4a\x79\xd7\xb0\x91\x37\x09\x56\x20\x12\x9d\x34\x8b\x5a\xcc\xdd\x06\x53\xa7\xb0\x07\xec\x98\x03\x72\xa7\xaf\xe2\x16\xc1\x38\x2d\x03\x6c\xfa\x7c\x98\x85\x80\x9a\xed\x88\xce\x4a\x9c\x17\xa3\x74\x21\xcd\x79\x59\x48\x54\x16\x2c\x9c\x4d\xf3\x81\xd1\x6a\x2e\x46\xa3\x8b\x92\xf3\x7d\x4a\x33\x37\xbc\xc8\x6f\x24\xad\x4b\x4d\x86\xae\x7e\x0b\x3d\x66\xd9\x89\xdf\x67\xb2\xa4\x54\x1b\xe0\x62\x59\x36\xee\x46\x33\x54\xdd\x9b\xb4\x44\x05\xd9\x8e\x66\x78\x9f\x4b\xb6\x9d\x9a\x33\x24\x2a\xe6\x0c\x6a\x65\x5b\xb8\x09\x53\x34\xcc\x34\xe0\x0c\x95\x09\x33\x23\xf1\x59\x4e\x98\x19\x8d\xe2\xe6\x25\x03\x58\x3f\x74\xc9\x41\x1a\xc4\x36\x65\xf1\xbd\xa1\xb5\xf8\x01\xf5\x05\x12\x5d\xbf\x8e\x72\x10\x27\x20\x4b\x53\x48\x09\x99\xc4\x39\xb4\x1c\x4d\xd6\xe6\xd0\xc8\x8d\xc3\x8b\x9a\x56\xe6\x61\x13\xf2\x54\xe5\x20\xbd\x61\x65\xfa\x9e\xc5\x97\xa1\xe7\x03\x7e\x3a\x7e\x7f\x04\x7a\x99\x8a\xff\xd1\xcb\x5c\x82\x2c\xce\x1b\xc1\x62\x49\x0e\x26\xa6\x35\x17\x67\xda\x24\x88\x28\x73\x58\x48\x40\x96\x96\x31\xe1\x03\x73\x28\x24\xbd\x65\x59\x0a\xd5\x93\xc3\xd2\x98\x43\x87\x12\x24\x52\x79\xe8\x13\xa0\x95\x69\x94\xc1\x5d\x99\x6f\xd7\x11\x96\x41\xec\x39\xcc\x3c\xec\x65\x9d\x82\xdf\xba\xd1\xef\x8e\x6f\x7b\x40\x53\x7c\xda\x4d\xa1\xc5\x8f\xbb\xa3\x92\xb8\x33\x06\xfb\x11\x30\x07\xc3\x2c\x1f\x93\xf7\x2d\x03\x51\xc4\xf3\xe7\x03\xfa\xde\x3c\x1f\x26\xe1\x98\xf3\x81\xd3\x12\xd8\xa0\xc5\x50\xb8\x1f\x0b\xbd\xc6\x1e\xbd\xe5\x4e\x8d\x36\x7a\xaf\x4c\x39\x86\xa4\x74\x6b\x05\x95\x49\x35\xe0\x64\xe2\x5a\xc0\x26\x2b\x74\x8a\x63\xc8\xb0\x99\xb4\x90\xdf\x90\x13\xcf\x0e\x4a\xb1\x25\x32\x18\x86\x61\x41\x8b\xd6\xb8\xd7\x18\x75\xfa\xbd\x94\xde\x37\xc5\x99\xa6\xeb\x9a\x7d\xea\xa6\x2d\xb6\x03\x67\x73\xb0\xd4\x9c\x29\x70\xbf\x82\x5f\xa6\x81\xce\x81\xb1\x98\x21\x4b\x93\xcf\xf6\x46\x66\x2e\x9c\x1c\x7c\x65\x87\x4e\x79\xdb\xc8\x98\x21\xab\x58\x46\x3a\x8a\x72\x6d\x83\x24\xba\x1c\x70\x90\x31\x97\x96\x2d\x98\x2d\xab\xc8\xe2\x37\x29\x09\x1b\x78\xcf\xf2\xfc\x6c\xa2\xe1\x12\x1c\x25\xe6\xdb\x7c\xe0\xc4\xd4\x99\x0f\x5c\x1e\x30\x31\xa7\x95\x84\x76\x27\x93\x72\xa0\x01\x14\x7f\x75\x35\x10\xae\xf8\x51\x0a\xe4\x4c\x33\xc4\xb9\xa5\xc9\xe8\x34\x18\x09\x7f\xfd\x7d\x56\xa2\x15\x5c\xed\xd1\x4a\x87\xb6\x73\x0a\x8d\x35\xd2\xbd\xdd\x92\x12\x2d\x54\xcd\x4a\x6d\x92\x3d\xec\x37\xf2\xb8\x1e\x6b\xcb\xdd\x66\xa0\x6f\x19\xcd\xc1\x11\x4a\x77\x00\x0e\x6f\xf1\xd5\x6d\xbe\x65\xfe\x1c\x54\x11\xc4\x13\xbd\x04\x06\xe1\x69\x24\xf4\x86\x09\x14\xfa\x7c\x62\xbf\xeb\xe1\xb8\x69\xb4\x85\x5b\x7e\x87\xc2\x8f\x2f\xfe\x46\x59\x0f\xce\xd0\x65\xf8\x1b\x18\xad\xe7\xe8\x32\x68\xf2\x03\x0c\xe5\x29\x9a\xc1\x4b\xf0\xfd\x07\xe8\x2f\x0d\x64\x5d\x82\xef\xde\xfe\x59\x63\x20\xb8\xfd\x15\x60\x0e\xf1\x7d\x89\x61\x8c\x3f\x0c\x10\x37\xfa\xb7\xb7\x42\x6f\x94\x83\xd9\x07\x00\xfd\x5e\x1c\x01\xe8\x0c\xc1\x49\xb8\x33\x16\xfe\x66\x7b\x48\x4e\x92\x94\x43\xf1\x03\x9a\x1b\x0d\x15\xca\x13\xd3\x65\xaf\x3f\x4a\xe8\x13\x3c\x76\x46\xed\x0d\x5b\xd1\x2d\xb2\x18\xf9\x2d\x96\x04\x23\x55\x84\xdf\x41\xe2\x29\xe0\xae\x7b\x31\x9f\x0c\xef\xbb\x60\x6e\x99\x32\x52\x16\x16\xd4\x81\x0e\x8d\xc9\x02\x4e\x90\xa7\x86\x92\x5b\x7a\x51\x76\x8b\x0d\x2d\x60\x3f\xb4\xd5\x2d\xff\x61\xdf\xa6\xe9\x72\x63\xd9\x85\xf8\xc1\x40\x18\x8d\x07\xbd\x61\xe4\xb7\x2f\x00\x00\xd0\xe5\x7b\x57\x63\xfe\x4a\x00\x9e\xf4\xb7\xb7\x63\xdf\xd9\x0d\x47\x83\x4e\x63\xe4\x41\xf0\x43\xf0\x87\xf8\x07\x18\x0a\x5d\xa1\x31\x02\x7f\xe0\xee\xb7\x64\x6f\x14\x0e\xc4\xc3\xa4\x2b\x42\x7f\x34\xe1\x88\x34\xe1\xca\x78\xaa\xc3\xe4\x2b\x41\x61\x23\xe2\xe6\xa7\xbd\x24\x3c\xfd\x02\x40\x83\x1f\x0a\xe0\xb1\x2d\xf4\xc0\x1f\xf8\x5f\xf8\xdf\x17\x7f\xe0\x7f\x11\x7f\xff\xe7\x0f\xc2\xfb\x4c\xfc\x45\xfc\x0d\x46\xfe\x43\x20\x74\x87\x82\xab\x14\xa1\xd7\x3c\x4b\xd5\x4c\x89\x79\xe0\x40\xcd\x14\x53\xf8\x6c\xcd\xfc\xef\x3e\x9a\xd9\x9d\x53\x03\x3d\x6c\xe6\xe1\x72\x8a\xd8\x4e\xdb\x3b\x18\x3d\x8e\x01\x18\xba\xba\x02\x3f\xb7\x1e\xe0\xdc\xff\x79\xf4\x7c\x27\x80\x9f\xd1\x11\x71\x96\x36\x6a\x8f\xca\x63\x12\x61\x82\xc5\x70\x18\x97\xe7\x30\x35\x04\x3a\x94\xcb\x34\xa4\x09\x4e\x63\x03\x32\xce\xee\xd6\xca\x76\xb9\x4d\x0b\xf3\x0e\xe6\x36\x05\x69\x92\xdb\xe8\x20\xc9\xe5\xd6\x9d\xb9\x14\xa4\xc2\x85\xee\x88\x0e\x94\x74\x64\xcf\xa1\x8c\xc0\x4f\x70\x72\xf2\x23\xfe\xd4\xcd\xe2\x44\x53\x53\x22\xd5\x2e\x31\x59\x37\xc1\x6f\x20\x9f\x37\xba\xca\xc9\xe6\x0f\xc4\xcd\x8a\x91\x2f\xcb\x76\x9d\x1b\xc8\x53\x68\x41\xd9\x41\x16\xf8\x80\xd6\x5a\x33\x26\xa7\x34\x73\xe6\x45\x0a\xbd\x71\xb7\xeb\xcb\x27\x41\x1d\x1a\x32\x02\x92\x36\xd1\x0c\x27\xf9\xd0\xdf\x57\xd7\x35\x28\x69\xba\xe6\x68\xc8\x4e\x87\x0b\xcb\x03\x4a\x00\xfa\xbb\xcc\xa2\xb1\x98\x49\xc8\x4a\x07\x32\x16\x33\xd1\x5e\x48\xc8\x70\x2c\x17\x91\x66\x38\x68\x82\xac\x04\x50\xea\x0e\x42\x29\x89\x55\x1d\x4e\xb2\xb0\x46\xf6\x16\x52\x70\x91\x44\x12\xd7\x0c\xda\x0e\xb2\xc4\x25\xd2\x26\x53\x07\xd8\x33\xe8\xea\x21\x29\x8f\x33\xb5\x90\x3d\x35\x75\x45\xd4\xcd\x65\x31\xd0\x0c\x29\xda\x62\x56\x0c\x37\xd5\x26\xd3\x2c\xa8\xb4\x62\x8a\x1d\x91\x77\xc7\x5d\x3c\x67\x3b\xd4\x20\xfd\xe5\x46\xdf\x2a\x83\xe5\x9b\x37\xb4\x4e\xd1\x2b\x4e\x63\x49\xc5\x56\xb4\x62\x03\xce\x50\x0a\x20\x43\x25\x01\xbd\x15\xb6\x14\x48\x6e\x87\x83\x43\x55\x18\x26\xc9\x07\x6b\x31\x5c\x6c\x2e\x31\xbc\x77\xe5\xf5\x1b\x97\x02\x0d\x8c\x38\x7d\x60\xd8\x73\xd3\xb0\xcd\x34\x44\x34\x73\xe6\x69\x21\x60\xde\x5f\x95\xdb\x59\xfe\x96\xd6\x62\x88\xa2\xdf\xdb\x95\x6d\x3c\xec\xf4\xae\x40\x7d\x34\x10\x84\xd3\x00\x6e\x57\xb3\x91\x75\x8a\xbd\x95\x1a\xd9\x1b\xf0\xf5\xa9\x29\xe9\x4e\x08\xce\x5c\x0e\x77\xe5\x4d\xf1\x55\x1b\x07\x9c\xae\x3a\xdf\xdf\x64\x0d\x67\x73\xa6\xa7\x28\x95\xa0\xe9\xb3\x1c\x23\x4b\xae\xef\xec\xab\x8e\xe4\x66\x4c\x60\x62\x9b\x3d\xa4\x0c\x89\xb6\xfb\x4d\x69\x83\x79\xc7\x49\x46\x37\xa2\x4a\x8d\xe6\x40\xf7\x0e\x5a\xa5\x4d\x0d\x99\xea\xde\xd5\x53\x72\xd1\x6c\x5f\x3d\x25\xb7\xc5\x36\xa6\x93\xc2\x22\x9c\xcf\x75\xcd\xab\xfe\x01\x59\xeb\xc2\xbb\x8c\x66\x2d\x09\x86\x0b\x1c\xc1\x5a\x62\x39\x9e\x37\x2b\x8f\x19\x58\x83\xb8\x86\x1f\x8c\xfc\x25\x02\xdc\xfb\xa1\xd3\x6b\x0c\x04\x2f\x9f\xaf\x3f\x07\x3f\xf5\xfa\xe0\xb6\xd3\x7b\xe0\xbb\x63\x61\xf3\x9d\x7f\xda\x7e\x6f\xf0\x8d\xb6\x00\xf0\x22\x61\xf6\x56\x7b\x12\xd1\xce\x90\x0d\x76\x69\x80\x81\x56\xce\x07\xd4\x4f\x4f\x32\x24\x3e\xb9\xbc\xb4\xd0\x44\xd6\xa1\x6d\xef\xd8\x9a\x5f\xf5\x94\xee\x21\xc3\x8e\xda\x70\x22\xeb\x50\x9b\xb9\xe1\x9e\x18\xc4\x4d\x36\x38\x9d\x41\x63\x01\x75\x7d\xed\xa2\x42\xca\x59\x66\x2f\xec\xb6\xfd\xbc\xfe\x48\x55\x63\x1a\xf3\x19\x3e\x30\x5b\xb3\x99\x52\x44\x75\xec\xab\x76\x07\x54\x4c\x8e\x98\xe8\xc4\x31\xee\x75\xee\xc7\xe1\xfc\xf1\x35\x5e\xa6\x96\x42\xd4\x2b\x75\xfb\xea\x4e\x26\x39\xe2\xf9\xd3\x8a\xe4\x58\x08\x81\x53\x4d\x39\xfb\xb1\x3f\xb1\x34\x61\x2a\x91\x4f\x43\x70\x96\xd5\x55\xdb\x3d\x94\xcc\x4e\xdb\x05\xcd\x9a\xca\x32\x39\x4c\x69\x51\xb6\x47\xf2\x18\xf4\xfb\xc6\x8e\x69\x27\x57\xa0\x98\x9e\x52\xe5\x3a\xcf\x15\x62\xdb\xaf\x7b\xf1\x1a\xa5\x74\x44\xa6\x33\x3b\x37\xba\x75\x5a\xd8\xbd\xf1\xc2\xe0\xdf\xd7\xc1\xf9\x4c\xa6\x75\x71\x81\x58\xa9\xfa\x4a\x48\x77\x50\x37\x17\x73\x1c\xa7\x76\x64\xe6\xbd\xa9\xa2\xd3\x1b\x0a\x83\x11\xe8\xf4\x46\xfd\x3c\xc7\xe0\xb9\xec\x21\x38\xc5\xcf\xc1\x09\x16\xfc\xe1\x35\x96\x25\x18\x55\x52\x11\x49\x72\x08\x57\x69\x99\x26\x29\xbc\x26\x33\x2a\x52\x54\x44\xc8\x18\x8d\x58\x09\xc9\x38\x45\x62\x24\x4e\x91\x48\xa6\x18\x89\x64\x39\x16\x97\x30\x4e\x26\x55\xee\xe4\xec\xc7\x97\x60\x05\x6e\xbb\x78\xfe\xa7\x8d\xca\xba\xef\x73\x80\x9f\x03\xc7\x5a\xa0\xb3\x1f\xee\x8c\x37\x9a\x22\xb0\xdd\x3f\xbe\x88\x16\x1a\x00\x68\x21\x30\x31\xdd\x60\xd8\x31\x81\x84\xc0\xc2\xb0\x90\x0e\x1d\xa4\xb8\xdf\x37\x14\xc2\xa5\x05\xfb\x1c\x48\x0b\x07\x68\x0e\x50\x4c\x64\x1b\x27\x0e\x98\x41\xc7\x9d\x68\x55\xd3\x02\x8e\x97\xbc\x4f\x52\x15\x97\x3b\xfc\x36\x2a\x24\x58\x96\xe2\x30\x9a\x63\xe9\x73\x80\x9f\xfd\xd8\x1f\x13\x4b\xb3\x1c\x47\xb2\x0c\xcb\x65\x23\x2a\x30\x93\x5d\xa6\xa8\x83\x71\x6d\xd8\x62\x7d\x54\xe9\xa1\x96\xbf\x4f\x7d\x70\xa0\xe5\x57\xc4\x6c\xa2\x82\xbc\xf0\xdf\xcb\x0f\xca\xe4\x95\x9f\x97\x2d\xe4\xc4\xd1\xb1\xad\xfe\x23\x45\xd1\x51\x9c\xbf\x2d\x86\xce\x13\x04\xf4\x1f\x7b\x42\x13\xd4\x9f\x0b\x24\xf2\x2b\x9c\xf2\x05\xda\xe0\x4a\x3c\xfe\x53\x53\xb2\x78\x0b\xeb\x2f\x0e\xb5\xba\x00\x4f\x62\xe2\x8b\x2c\x74\xe4\x4e\x7a\xc5\xf1\xcf\x57\xaf\xa2\xfb\x6b\x86\x35\xe7\xe4\xb9\x0a\x72\xa0\xa6\xdb\xe0\xd5\x36\x0d\x29\xdb\xd8\xc2\xa2\x95\x43\xf5\x10\xe0\x09\xf4\x10\x2e\x95\x66\xf0\x16\x39\x25\x53\x6a\x14\xa6\x1d\xd0\x49\x6f\x18\xa8\x25\xea\x9d\xbc\x74\x3c\xe4\x23\x4c\x0d\xb0\x04\x85\x88\x93\x2d\x05\xbf\x39\x25\x03\x72\x4a\x9e\x92\x6d\x2c\xe4\xce\x33\x05\x8d\x7c\xd8\xc5\x5c\x29\x0d\xbb\x31\x9d\xe0\x6b\xe2\x00\xd1\x8e\x2c\xf8\xce\x32\x8e\x03\x75\x51\x36\x35\x23\x63\xe1\x5b\x45\x48\x9c\x9b\xa6\x9e\xb1\xce\x0e\x6d\x24\xaa\x28\xab\xaf\xbd\xc7\x16\xb2\x91\xf5\x91\x05\x32\x83\x2b\xd1\x59\x89\xde\x52\x8e\xf6\x2b\x0b\x6a\x6e\x99\x8e\x29\x9b\x7a\xa6\x5c\xc9\x3e\x0a\x8d\x05\x41\x05\x59\x5e\xee\x16\x2c\x07\x2e\x64\x19\xd9\xb6\xba\xd0\xc5\x4c\x43\x09\x04\x87\x9a\x8e\x94\x22\xa8\x80\xf5\x0c\x13\xca\x1e\x7a\x19\xb5\x66\x87\x8e\xc4\x8c\x9a\xd3\x82\x79\xb1\x7a\x46\x96\xed\xe3\xaa\x8a\x7c\xdc\xa9\x2e\x97\xc6\xef\x9a\xfa\x2a\x09\x7a\xe0\x54\x98\x4b\x6b\x77\x6a\x4c\x07\xcf\x99\x2a\x23\x95\x98\x47\xb3\xcd\xa2\x15\xec\x32\x99\xa3\xb7\x58\x29\xfb\xa2\x78\xb3\xe4\x81\x93\x64\xe0\x1d\xcc\x85\x25\x6f\xce\xa8\x65\x4c\x4f\xa1\xcb\x39\x39\xb9\xbc\xcc\x5e\x65\xcf\x1e\x07\x41\x09\xf1\xa1\xea\x0c\x8e\xac\x57\x5d\x53\xc9\x8f\x29\x02\xb7\xb9\xcf\x0c\xe7\x95\x8a\x67\x92\x4d\x1c\x98\xcf\x03\x0a\xce\xf0\xe7\x81\xf8\xcb\xec\xa9\x00\xbb\x57\x0f\x14\xc0\xe5\x92\xdb\x40\xe5\x50\xf4\x58\xd2\xec\xe0\xd4\x38\x90\x4c\x53\x47\xd0\x08\xe7\x2d\x4d\x46\xa2\x11\x9b\xa3\xfd\xdf\xe2\xf3\xf6\xf6\x28\xa5\x98\x98\xd1\x63\x87\x39\x93\x0f\x23\x87\x21\x52\x2f\x28\xf0\xb8\x16\xbd\x2b\x2c\x40\xa3\x2d\x34\x6e\xc0\xe9\x69\x54\x83\xff\xf9\x09\xb0\xb3\xb3\x22\x5c\x69\xed\x43\xad\xfd\xef\x8e\x22\x4b\xe0\x8b\x29\x35\x81\x3e\xa1\x71\x9f\xc3\x2f\x59\xe9\xe3\x51\xc7\x92\x5f\x3e\xef\x8d\xa8\xad\xd9\x07\x5d\x6e\x98\x0e\x30\x16\xba\xee\xca\x96\x6a\xa6\x51\x80\x2c\xdb\xda\x81\xd9\x8c\xc2\x1d\xf4\x1f\xa6\xbe\x98\xa1\xb0\x78\x23\x15\x7b\x0e\x08\xfc\x98\xa4\xfe\x3e\xd5\x26\x53\xd1\xc8\x7e\xa4\xa4\x3e\xd2\xcd\x65\x46\x23\xf7\x49\x7a\x9b\xe4\xd1\x87\x34\x15\x78\x30\xe9\x88\xbd\x47\xe9\x98\x3d\xdf\x54\x84\xda\x07\x4a\xc7\xed\x3f\x4b\x43\xfe\x05\x80\xbb\x41\xe7\x96\x1f\x3c\x83\x1b\xe1\x39\x6e\xe9\xe7\x3b\xbd\x7a\xbe\xb5\x92\xd8\x46\x72\x7f\x00\x06\xc2\x5d\x97\x6f\x44\x8a\xd8\x22\xa7\x24\x72\x63\x7a\xe0\x58\x8b\x2d\xd7\x9b\xa0\xfd\xec\x0b\xd8\xd4\xb8\x05\xd2\xf2\x43\xf0\xc7\x1f\x5f\x00\xa8\x0b\x57\x9d\x9e\x37\xe0\x7c\x00\xa0\x68\x1f\xa7\x32\xb4\x9d\xd3\x53\xb4\x72\xdc\xc9\xe9\x14\xcd\x4d\x79\xea\xdf\xc0\xe3\x9c\x81\xff\x01\xb8\x6b\xe5\x67\x00\x86\x11\xff\x99\x4f\xf6\xec\x7f\xdc\x7f\x7f\x7c\x01\x40\xe8\x35\x7f\x7c\xf9\xe3\x8f\x6d\x01\xdd\xa6\x36\x36\x2c\xa2\xdb\x57\xda\xe3\x8a\x1a\xa1\x73\x79\xb9\x21\x14\x48\x53\x55\x10\x7f\x69\x36\xfd\x10\x4e\x62\xc1\x55\x41\xf6\x17\x10\x5f\x5a\xdd\xb2\x92\x9c\x42\xcf\xc1\x89\xe7\x57\x4e\x2e\x2f\xc3\x43\x30\x3b\x55\x07\x99\x27\x86\x76\x09\x07\x4e\x2a\x46\x3c\xd9\x2a\x2f\xf4\x48\x3f\xfb\x74\x04\x07\x9a\x7e\x8e\xae\x64\xe6\x51\x65\xb3\x60\x9f\xdc\xa3\xe8\xe4\xd8\x71\xb2\x8f\x02\x2a\xbf\x2b\xff\xa8\x28\xec\x81\x19\x48\x01\xb5\xdd\x1c\x24\xab\x41\x4e\x16\x12\x3b\x2d\xb8\xaf\xad\xe6\x9c\x40\x0c\xcc\x34\xca\x59\xe9\xf5\xa9\x60\xdc\x15\xac\x7a\x95\xcd\x57\xf2\x53\x8f\xf4\x52\xb3\x0d\xe9\xd4\x61\x33\x83\xab\x9c\x15\x9a\xac\xb5\xaf\xff\xca\xea\x95\xb3\x12\x91\xf1\x81\x74\x73\x8e\xd2\x0a\x54\x9c\x95\x68\x21\x7b\xa1\xa7\x16\xd8\x38\x2b\x71\x86\x1c\x98\xf1\x48\x45\x28\xf3\xb1\xad\x4d\x0c\xe8\x2c\x2c\x94\x56\x4b\xc1\x31\x67\x7f\xfd\xbd\x4d\xf9\xfe\xf9\xbf\xb4\xa4\xef\xaf\xbf\x93\x3a\x47\x33\x33\x63\x9f\x61\x8b\xcb\x30\x0d\x94\x9b\x42\x6e\x71\xed\xa2\x09\x24\xd3\x66\x48\x94\xcc\x85\xa1\x78\x45\x45\xac\x05\x8d\x09\x4a\x2e\x74\xc5\x33\x12\x57\x13\x2e\xb6\x09\x4a\xac\x1c\x1a\x86\x3b\x7b\x94\x1a\x01\x5b\x4c\xb9\xe6\x1a\x45\x5c\xac\xe4\xa0\x4e\x0a\x2d\xc5\xd0\x62\x7d\xf6\x76\x62\xff\xe4\x79\xe3\x7d\xfd\x41\xf2\xea\x09\xdf\x07\xa4\x17\x7a\xc6\xca\xda\xf2\x0b\x32\x0b\x2a\xe0\x82\x13\xd5\xfb\x32\x1d\xdc\xbf\x11\xae\xb4\xbb\x19\x67\xd9\x52\xd3\xfc\x04\x3d\x76\x51\x5a\xda\x30\x89\x5e\x55\x96\x5a\x3e\x96\x93\x22\x7b\x29\xaf\x91\xb9\xb6\xab\xc9\x28\x2b\x40\xf0\x1e\x02\xc5\x5c\x48\x3a\x02\x73\x0b\xc9\x9a\xb7\xfc\x5b\xbe\x30\x7a\xcf\x6a\xd8\xe8\x09\xf9\x7d\xfb\x2a\x7a\x0b\xcb\x6f\x29\x26\x2e\x59\xff\x58\xa5\xa0\xb1\xda\x16\x68\x6e\x4d\xfe\x56\x1d\xa2\xae\xcd\xb4\xac\xd5\x94\x63\x57\xee\x7f\x82\x71\x24\x76\x9d\x35\x25\x34\x91\xf0\xf2\x85\x32\x81\x94\x6f\x23\xde\x6d\x17\x05\xf7\x3a\x0c\x85\x51\x4e\xc1\x60\x74\x2f\x34\x5a\xca\x56\x6d\x75\xfa\x78\x42\x94\xbc\xf6\x22\x57\xa8\xdc\x55\xed\x32\x42\x66\xe6\x23\x47\x13\xb3\xf4\xcd\x21\xb9\x82\x16\x04\xcf\xe9\xa2\x36\xa1\x03\xbd\x1a\x94\x9c\x73\x38\xa0\xc9\x8f\xf8\x02\xd9\x0a\xf0\xed\x9e\xa5\x38\x06\xd2\xb4\xd3\x05\x87\xe0\xcd\x28\x26\x3f\x00\x65\x5e\x8d\xfa\x01\x68\xf3\x4a\xba\xcb\xa0\x8d\x56\xe2\x24\xcb\xba\xc3\x92\x9b\x13\x5c\xd4\x0c\xcd\xd1\xa0\x2e\xfa\x67\xb6\xff\xb4\xdf\xf5\x93\x73\x70\x42\x60\x38\xf7\x1d\xc7\xbe\x93\x38\xc0\xa9\x4b\x9c\xbb\xa4\xb8\x3f\x31\x92\x25\xc9\x6f\x18\x7e\x92\x28\xf3\xc9\x44\x4e\x88\x7e\xdd\x5a\xcc\x50\xa5\xb5\xb7\xe4\x90\x4b\x88\x22\x98\x5a\x15\x42\xa4\xb8\xb0\xd1\x26\xad\x11\x35\x63\xe7\xda\xc7\x7c\x72\x34\x47\x30\x55\xe8\x51\x22\x54\x14\x31\xb9\xc7\x9c\x4b\x83\xa6\x70\xaa\x92\x4c\xb4\xe8\x27\x51\xe1\x32\x8e\x77\xf6\x2e\x97\x04\x83\xb3\x18\x55\x85\x04\x13\x92\x08\xe6\x84\x12\x24\x6a\x18\x57\xc9\x04\x6a\xfe\x6c\xb9\x2e\x2f\x05\x8b\x63\xd5\x14\xc5\x7a\x9d\x01\x27\x13\x0b\x4d\xa0\x63\x5a\xf9\x7d\xcd\xd2\x38\xc1\x56\x43\x1f\x55\x52\x70\x8f\x56\x09\x31\x38\xba\x56\xa9\x33\x38\x4f\x0c\xbf\xfe\x40\x5c\x29\x56\x2e\x76\x8e\x20\x99\x4a\x16\x8b\x63\x1e\xfa\xa0\x17\xbc\x20\x39\x9f\x00\xcd\xd4\xf0\x4a\x04\xf0\x28\x81\x4d\x1c\xea\x8e\xff\x7c\x42\x1c\xc1\x72\x95\x08\x11\xb1\x9e\x08\xf6\x84\xfc\x7b\xd8\xf3\x28\xe1\x18\xcd\x31\xd5\x44\x22\x7d\x71\x36\x5b\x69\xb9\x96\x85\xe3\x78\x8d\xae\x64\xb8\x38\x25\xaa\xda\x2a\xbc\xc8\xce\x9c\xe9\xa2\xaa\x21\x3d\xd7\x33\xe2\x38\x59\x23\xab\x75\x3c\x1d\x2e\xf4\x86\xf5\x29\xab\x02\x31\x68\xba\x56\x69\x80\xe0\x8c\xa8\x19\x13\x64\x3b\xe2\x6e\x05\x4c\x01\x29\x86\xab\x36\x16\xf1\x5a\x2c\x00\xf2\x4a\x8d\x60\xfe\x5c\x82\xe3\x2c\xcd\x10\x95\x88\xb0\x1b\xf3\x55\x4d\x2b\x8c\x3f\x72\x69\x10\x24\x4b\xd2\x95\x68\x70\xbe\x51\xe5\xa3\x25\x49\x1c\xab\x64\x51\x04\x96\xc2\x7a\xf1\x20\xc4\x49\x9a\xe2\x2a\x0d\x42\x02\x0f\x47\xba\x85\x66\xe6\x07\x12\x7f\x21\xcb\xdc\xec\xcf\x9a\x86\xed\x58\x50\x2b\x98\x76\x71\x92\xc5\xc8\x4a\x03\x92\x20\xc4\x48\x8a\x9c\x8b\x9b\xa2\x6a\x58\x25\xd3\x22\x48\x31\x11\xc7\xe5\xe2\xa7\x09\xa2\x92\x51\x11\x54\xa9\x50\x04\x67\x30\x96\xaa\x34\x6d\x10\xb4\xcb\x77\x30\x00\x2d\x64\xc0\x19\x12\x65\x53\x5f\xcc\x0a\xc6\x1e\x43\xd6\xf0\x6a\x31\x16\x19\xf6\xf5\xc2\x58\xd8\x28\x31\xe8\xf0\xef\x24\x06\x70\x2c\x8a\xbd\x92\xfa\x49\xca\x1b\xcd\xd2\x62\x36\xcf\xf1\x1f\x3e\x15\x7c\x7f\x2a\xb4\xa8\x58\xe6\x3c\x1a\x90\x8a\x49\xf7\xe1\xd3\x88\xea\xa9\x9a\x8f\x22\x6b\xfe\x44\x98\x5a\xe7\x27\x3a\x66\x58\xfc\x9a\x46\x95\xd8\x9b\x2a\xe5\x4f\xbf\xc1\x89\x62\x97\x8c\xf7\xf2\x93\xe0\xc6\x86\x34\x5a\xe4\xde\x5a\xa4\x68\x8f\x56\xca\x01\x8b\x20\xea\x2e\x52\x68\x45\x72\x8c\x47\x6e\xb6\x58\x21\x25\x63\x10\x11\x87\x92\xa8\x89\x73\x0b\xc9\xe6\x6c\xbe\x08\x03\xee\x4d\x40\xb9\x6b\x85\x29\xd4\x2a\x39\x4f\x8a\x15\x2d\x24\x2d\x34\x5d\xc9\x25\x45\xe0\x2e\x29\x8c\x00\x18\x7e\x49\x92\x97\x24\xf9\x27\x45\xb0\x94\x4b\x0a\x2b\x4f\xca\x8f\x2a\x25\x4b\x33\x82\x5c\xac\x22\x45\x92\x70\xa3\xd8\xf2\x04\x69\x4c\xd4\xb5\xf7\x85\xa6\x68\xce\xda\x2b\x03\xce\x47\x5f\xc3\x59\x37\x9f\xa8\x80\x1f\x0f\xbd\xd0\xd4\x09\x1c\x91\x2f\x18\x2a\x20\xc4\x61\x21\x9d\xac\x7c\x3b\xf7\x30\x6f\xd5\x84\x7b\xe7\x40\x6f\xf4\x20\xd4\x55\xe3\xe9\xe6\x8a\x19\xf4\xa8\x7e\xaf\x23\xdc\x35\x6e\x7b\xad\x7a\x8d\x24\x78\x8a\x64\x5e\xe8\xbb\x5e\x73\x38\xe8\x5e\x3d\xde\xd4\xae\xea\xdd\xc6\xed\x7d\xb7\xd3\xea\x53\xc3\x9a\xf0\xfc\xf8\x30\x4e\x6a\x29\x93\x08\xe1\x12\xa9\x3f\x5d\xdd\x5f\x3f\x3e\x74\x1f\xfb\xcf\xed\x56\xf7\x61\x74\xf3\xf8\x40\xb7\xae\xda\x3c\xd9\xed\x3d\x3f\x13\xd7\xf7\x37\xb7\xb5\x3e\x7f\xcd\x8f\x85\xfb\xd6\x98\xe9\xde\x35\x86\x42\xeb\xe1\xa9\xdf\x2b\x4d\x84\xf4\x88\x0c\xee\x9e\xdb\x9d\x2e\xd1\xe8\x90\xad\xde\x3d\x55\x7f\xea\xb6\x6e\x7b\xcd\x6e\xeb\x7a\xdc\xbb\x1b\x13\xed\x67\xf2\xe5\xb6\x35\x6c\xf7\x7b\xe3\x86\xd0\xe7\x87\x8f\xb5\xfb\x46\xad\xff\x44\xb4\x4b\x13\xa1\x5c\x22\x3c\xfd\x58\xbf\x7b\xe6\xe9\x67\xea\x91\x17\xda\x4f\x8f\x03\x62\x7c\xd3\x27\xc6\x7d\xaa\x3e\xbe\x6a\x8f\xef\x6b\x94\x30\xbe\xbb\xe9\xf7\x88\xfb\xf6\x03\xf5\x38\x68\xf7\x3b\x83\xde\xcd\x4d\x9b\x38\xd9\xf7\xf0\x39\x18\x0a\x45\xcb\x7c\xc5\xe7\xcf\x92\x07\xb3\xcf\x01\xb5\x39\x75\x56\x64\x81\xbb\x67\x9c\xaa\xac\x23\x55\x39\x57\x73\x14\x49\x63\x2b\xca\xde\xe9\x3a\xef\xfa\x9f\x62\x41\xd3\xce\xd5\xec\x3b\xd2\xc2\xb3\x35\x91\x31\x10\x3f\x31\x77\x0e\xdc\x61\xf1\xcf\x57\x3f\x4a\xfd\x7a\x09\xbe\xd2\x7f\x06\x27\x12\xbf\x9e\x83\xaf\xdb\xbd\x10\xf7\x91\x01\x1d\xed\x03\x7d\xfd\xbf\x2c\x43\x4d\x52\xc3\x13\xd4\x88\x73\x40\x7e\x2a\xb5\xd8\x19\xbe\x73\x80\x79\xc4\x6c\xc7\x0d\x6c\x8c\x49\x38\x23\xbb\xb8\x71\x0c\xdb\x10\x2e\x4d\x80\x8c\x13\x48\x91\x26\x8a\xf6\xd8\xf2\x90\xe7\x00\xf7\x05\xf2\x6f\x6a\xf9\x7a\xe9\x8a\xf8\xd5\x37\x05\xf1\x0d\xad\x5d\x1a\xfb\x3a\xd1\xf2\x5c\x51\x01\x57\x14\x51\x0b\x0c\xe8\x93\xb4\x1c\x10\xf8\x6c\x2d\x27\xe4\x29\xa7\xe5\x3d\x7d\x6f\x79\xae\x88\x90\x2b\x86\x65\xf1\x4f\xd5\xb2\x4f\xe0\xb3\xb5\x9c\x90\xa7\x9c\x96\xf7\x9c\xab\x7d\xae\x0a\x9c\x6c\xda\xa1\xbd\x7d\x9d\x6c\x78\x70\x2f\x1a\x03\xd0\x34\xe4\x70\x89\x66\x18\x56\xa6\x10\xe4\x68\x49\xe6\x54\x4c\xc5\x28\x0a\x4a\x2a\x21\x93\x98\x4c\xb2\x0c\x54\x14\xb6\x56\x23\x31\x24\x21\x9a\xa1\x24\x85\xa6\x15\x8c\x83\x8c\xa2\xd6\x70\xd5\x8d\xd9\x38\xa9\x26\xb3\x92\x0a\x71\xc8\xc9\x34\x89\xe3\x12\x4b\x30\x18\x56\x53\x39\x4c\x95\x6a\x34\x03\x65\x8c\x22\x91\x82\x53\x04\x01\x49\x99\xe0\x08\x8c\x65\x65\x82\xc4\x21\x43\x60\x0c\x62\x18\xcc\x9f\x75\xf0\x44\xb2\xeb\xa7\x38\x4c\x32\x07\x0e\x33\x1f\x92\xa3\x58\x86\x2a\x7c\x1a\xf8\x75\x9c\xf5\xce\x25\x33\xe7\x5e\x39\x6c\xe2\xef\x1c\x50\xee\x3f\x78\xf0\x4f\xf8\x23\xbe\xf9\xe0\x4e\x3d\x3c\xcf\xf3\xcd\x6b\x87\xd5\x2e\x4c\x68\xb4\x6e\x07\x8b\xc6\x33\xaf\xd2\xcd\x9a\xf2\x68\xf1\xf7\xdf\xb0\x71\xe7\xfd\xae\xf1\x36\xd1\x6e\x3b\xab\xb9\x56\x5f\xbc\x4c\x86\x77\x38\xbc\x35\xef\x9e\xe7\xe4\x7b\x63\xd8\x50\x5f\xf0\xfa\xeb\xe3\xe3\xca\x58\xdb\x8e\x6a\xad\xad\x7b\xa3\x47\xab\x88\x7d\x7e\x79\xc1\x57\xb2\x8b\x9a\x7f\x92\x2c\x55\x9e\xb8\x9f\x3a\x9b\x7f\xf8\x7b\xf7\x9f\xe5\xf6\xfb\x92\xbf\xbb\x7f\xf3\x3e\xf1\xad\xdb\x9b\xeb\x0f\xc8\xdc\xcf\xfa\x7a\xb3\xeb\xa0\xd7\x67\x69\x3a\x7f\xee\xd4\x86\xe3\x9b\xbe\x8a\xae\xa5\x8e\xf2\xf6\xfe\xca\x2d\xfb\x38\xef\x58\x17\x2a\x7b\x2b\x48\x66\x47\x93\x97\x54\xa3\xce\xaf\x71\xc6\x99\x39\x8f\x57\x2d\xa9\xdd\x5e\xc0\xa5\x50\x9b\x3e\xb1\x1d\x81\x6c\xfd\x7a\xd2\x3c\xfa\xb7\x3d\xaa\x0b\x7f\xcd\x89\x7b\x7e\xfb\x77\x15\xfd\xb2\xf9\x7b\xe1\x9f\x70\xea\x9e\xe7\x9b\xd8\x75\xda\xe3\x7f\xf5\xdf\x49\xe8\xad\x7a\xe3\x6e\x37\xc3\x3b\x24\x07\x0c\x71\x1c\x63\x3f\x61\x48\x85\x63\x55\x9a\x64\x10\x62\x58\x05\x97\x88\x9a\x44\x4b\x2c\xa7\x12\x24\x54\x3d\x9c\x35\x9a\xe1\x20\x41\xa9\x50\xc5\x29\x8c\x84\x0a\x26\xd1\x84\xc4\x90\xa4\x84\xd5\x24\xc4\x71\x27\x9e\x67\x22\x53\x6d\x9f\xce\x1a\x12\x14\xc6\x31\x18\x59\xf8\xd4\x9f\xca\x29\x9a\x23\x72\xc6\x0b\x99\x31\x5e\x7c\xf7\xef\xe9\xf7\xea\xee\xe5\x15\xef\x2d\x68\x13\x93\xae\x6b\x8f\x94\xb1\xee\x7f\x8c\x57\x57\xe4\xc3\xdc\x7c\xfb\xf6\xd1\xe2\xfb\x4e\x03\xbf\x21\x6e\x6b\xf5\x1a\xf3\xa2\xcf\x04\xa5\x3f\x7f\x68\xdc\xd2\xed\xae\xc5\xb5\x7a\xaf\x34\xfd\x0e\x99\x25\xd1\xbe\xb9\x75\xde\x47\x77\xad\xee\xc7\x15\xbb\xbe\x1b\x5f\x40\xde\xf4\x50\x7b\x43\x25\x62\x90\x83\x31\xff\xb0\xba\x9e\xe1\x7a\xf3\x76\xb9\x7c\x5f\xbc\xde\xc8\xeb\xfb\x5f\x36\x57\x6b\x5d\xf0\xc2\x48\x6b\x4c\xee\xef\xac\x25\x43\x2e\xdf\xe1\xdd\x55\xdf\x79\xc5\x1e\xde\xd1\x6b\x63\x70\x65\xb0\x3c\x75\xb3\xbc\x36\xb4\x9a\xf1\x8e\xe0\xe2\x02\x13\xa6\xd3\x8b\xab\x37\x76\x2d\x34\x67\x35\xa3\xed\x0f\xc5\x94\xa1\x20\xd8\x69\xe6\x14\x0e\x05\x9e\xaf\xbf\x7d\xaa\xdd\x7e\xc2\x9f\x6f\x4e\xd5\x86\x02\x7e\x1c\x33\xf6\x8a\x73\x40\x60\x37\x38\x57\xc3\xbe\x63\xf8\x77\x0c\x07\x18\x76\xe9\xfd\x97\x69\xae\x04\xce\x10\x44\xe1\x53\x8a\xe0\x28\x8e\xa9\x11\x1c\x93\x63\xcc\x85\xa6\xfc\xaf\xfc\xab\x3f\xdd\x68\xd4\xfa\x62\x3d\xbc\xa9\xd7\x9a\x46\x93\x6b\x13\xd8\xea\xb5\xfe\xcd\xc6\x26\x8e\xbd\xec\x2c\x7f\xe1\x4f\xca\xf0\xf1\x19\xd6\xaf\x61\xcb\x33\x65\x21\xc5\x94\xd3\xff\xfe\x3f\x37\x65\x2c\x6a\xca\x05\xd1\x55\x89\x73\xd9\xfb\x06\x5b\x19\x85\x51\x59\x29\x67\xf1\x95\x30\xa9\x68\x92\x79\x32\xb1\x1f\x9a\x44\xc6\x48\xee\x87\x85\x4a\x24\xb6\xfb\x61\xa1\x13\x89\xce\x7e\x58\x98\x38\x16\x6a\x3f\x2c\xb5\x44\x3a\xb0\x1f\x16\x36\x91\xc3\x1c\xe7\xcc\xfc\x51\x56\x7f\xf2\x4b\xef\xce\x01\x5b\x76\xd5\x2b\xe3\xe4\xf8\xc1\xa3\x27\xeb\x06\xa5\xd8\xcd\x45\x7e\xf2\xf0\xcf\x57\xc7\x3c\x28\x1f\x3b\x07\x5f\x55\xcb\x9c\x1d\xb4\x3e\xe1\x66\xa0\x95\x16\x8d\x3e\x61\x45\x39\x45\x79\xd1\x71\x19\xbb\xa9\x29\x4c\xd8\xd5\x85\xa1\x20\xcb\x57\xdf\x7e\xab\xc2\x9e\x8c\xfe\xb2\xe9\xa1\x1a\x2c\x5e\x3d\xf8\x84\xd5\xeb\x2c\xad\x05\x1e\x64\xf3\x99\xfa\x54\xad\xed\xbb\x62\xf3\xaf\xd3\x9a\xef\xeb\x36\x9f\xb1\x4f\xd5\xda\x01\x23\xfe\xd3\xb5\x56\xe0\x38\x53\xce\xb4\x1f\x50\x76\x5a\xe9\xe8\xe7\xbe\xce\x39\xb3\x18\x3a\x35\xb8\x29\x79\xb5\x5c\x71\x78\x43\x65\x87\x37\x85\x88\xc8\xb8\xdb\xcb\x9a\xc8\x0b\xf1\x50\x09\xf7\xb9\x2f\x9e\x84\x43\xd9\x9b\x1f\x26\x8e\x27\x2b\xcc\x29\xc4\x53\x8b\x0f\xd5\xbd\xf9\x61\xe3\x78\xb2\x43\x9d\xaa\xa7\x56\x8f\x11\xec\x14\x95\xdf\x57\x08\x77\x32\x8f\xa8\x1e\x61\x4c\x45\x36\xcc\x65\x24\x49\x6c\x8d\x86\x18\xa6\xaa\x0c\xc2\x49\x96\x84\x48\xc5\x54\x85\xa0\x71\x58\x63\x54\x82\x90\x71\x95\x83\x12\x01\x09\x45\x55\x65\x09\xab\xd5\x58\x9a\xae\x91\x0c\x54\x10\xc1\xd0\x1c\xf4\x33\xfb\x83\x76\xad\x23\x2b\x42\x64\x98\x28\x67\x2e\xba\xd2\x18\x9e\xb3\x60\x1b\x3c\x8d\x8d\x68\x3f\xc3\xbe\x61\x5e\x91\x46\xbe\xce\xcc\x0e\x3b\xba\xd2\x9b\x17\x68\x22\x93\xb5\xbb\x27\xa7\x7d\x73\xf3\xeb\xf1\x81\x5d\x3e\x68\x2f\x75\xd8\x58\xd0\x5d\xfa\x96\xf7\x32\x54\x3e\x5c\x12\xad\x27\x12\xc0\xc8\x77\xc1\xfb\x57\x9a\x4d\x66\xf8\x03\xa1\x4c\xe8\x07\x7c\xf6\x8e\x23\xfd\x56\xbe\xc2\x9d\xd5\xeb\xf0\xf9\xe6\x85\x5b\x0a\x13\x73\x58\x87\xe8\x91\x1d\x6b\x2d\x33\x82\xa6\xcb\xb0\x9d\xc8\x57\x58\x7b\xfb\x78\x5b\x7a\xe8\xb9\xbb\x05\x37\x7f\x5d\xbf\xc9\x83\x21\x83\xe9\xef\xfd\xee\x7b\x8f\x6d\xb5\x7f\x11\x14\x75\x7f\xc7\x4a\xf0\xb9\x87\x46\xa3\xeb\x97\x8e\x6e\x91\x43\x69\xd0\xc0\xc9\x77\xc1\xe2\x16\x77\x54\x7f\xd0\x9c\xac\x1b\xf5\x8b\x89\xbc\x98\x10\x57\x37\x56\xf3\x76\x71\x83\x0d\x47\xe4\x7d\x1f\xde\x8c\xeb\xcb\x9f\x3f\x4f\xa2\xab\x0d\xd1\xe5\xd6\xfb\x34\xd9\xf8\x2d\x7c\xe2\xb9\x0f\xe4\xa9\xa9\x11\x51\xcb\x02\x36\xa4\x87\xa7\x17\xa2\xa9\x3f\x3d\x42\xeb\x81\x19\xaf\x96\xd2\x23\x79\xd5\xbb\x9e\xcc\x0d\x92\x1f\x36\xa6\x9d\xd6\x9c\x96\x56\xc3\xce\xa3\xb7\x5a\xc0\xd7\x66\x76\xa0\x8f\x49\x4e\xba\x9d\xb9\x98\xe0\xe9\xbe\x79\x00\xfd\x6f\xba\xf4\x7e\x00\xfd\xdb\x04\xfd\xc6\xc2\x24\x4d\x87\xa2\xdf\x1b\x77\xc2\x6a\x7e\x7f\x41\x9a\xed\xde\xb7\x5f\x78\x6d\xb0\xd6\x6c\x5c\x57\x6f\x5b\xcf\xb3\xfb\xc7\x89\xb5\x18\x7e\x1b\xf1\xa1\xfc\x33\x79\x4b\x5f\x38\x50\xfe\xca\xf4\x29\x83\x7b\xdb\x93\x7e\xc4\x96\x26\x7c\x8a\x2d\xec\xa3\x8b\x63\xda\xc2\xef\xec\x0b\x5f\x17\xff\x7c\xd6\xa0\xf5\x82\x43\xef\x64\x77\xb8\x94\xe9\xff\xeb\x4e\x22\x9e\xb3\x2c\x9e\x47\x63\x55\xac\x35\x0a\xb9\xae\x96\x93\x38\xa4\xd6\x14\x09\x72\x90\x56\x24\x92\x24\x39\xa9\xc6\xaa\x0a\x64\x55\x92\xaa\xd5\x6a\x12\x0e\x55\x92\x94\x20\xc5\xb0\x50\xa1\x65\x4c\x51\x39\x8a\x51\x28\xe5\xc4\xdb\x1f\xc5\x0f\x89\x57\xfd\x64\x3a\xcf\xc9\x53\x18\x57\xc3\x33\xf7\xdd\x36\x4f\xa3\x51\x52\xb0\x21\xd0\x65\xdb\xf7\x1f\xf7\x6f\xd2\x0d\xd1\xe6\xc9\xc7\x87\xd7\x81\x75\x33\x7b\x7d\xc2\x30\xf5\x8a\xb5\xbb\x9d\xda\x0c\x13\x06\xcb\xeb\xc7\x0b\xfe\x89\xe4\x37\x3e\xde\xfb\xcb\xf1\xf1\xfe\x9f\xf5\xde\x63\xba\xa8\x0f\x27\xaf\xab\x5b\x38\xbe\xe3\x98\xfa\x2f\xd5\xe6\x10\x26\x9b\x56\xef\xe5\xe9\x57\xfd\xf1\xfa\xad\x65\xde\x84\x3e\x9c\xe7\xfb\xb4\x75\x13\xc5\xf7\xf0\xb1\x6c\x71\xee\x23\xa1\xd1\xfc\xf5\xfe\xf1\x76\x5f\xbf\x37\x7b\xfc\xb5\xa6\xde\x0d\x9e\x9a\x66\x77\xfa\xe1\xac\xe5\x11\xa9\xb7\xee\x1a\xf7\x34\x3e\x79\x53\xec\x56\x1b\xd6\x7b\x8f\x4b\x8c\x1e\x5e\x3c\x4c\x1f\xb1\xa7\xc9\x9b\x85\x35\xea\x77\x02\xd5\x83\xad\x07\xe2\x66\x26\xdb\xe4\xcb\xb2\x3b\xd3\x24\x6a\x34\xb0\x6e\xbb\x25\x7c\x7b\xcc\x68\xb3\x7c\xbb\xbf\x15\xb8\xe3\xdb\xb5\x8b\x3a\xd6\xc5\xae\xaf\xd6\xce\x74\xd9\xc3\xf5\x67\x0c\xae\xe7\x26\xce\xf5\xda\xab\x8f\x6e\x63\xdd\xa7\x9d\xba\x20\x37\x7c\x19\xc9\x89\x63\xf5\x8d\xe7\x8b\xda\x78\xdb\xfe\x36\x95\x89\x82\xf1\x7c\x00\xfd\x9e\xb5\x1e\x8d\x0e\xa0\xcf\xf3\xff\x3d\x7f\x96\xea\x5b\xeb\xfb\xeb\xa2\x6f\xbc\xe4\x8a\x59\xa4\x8b\x43\xfb\xc2\xb5\x85\x6f\x72\x02\x5f\x25\x5d\xfc\x33\x61\x19\x8b\x16\xf8\xf1\x4d\xf3\xbe\xf1\x6c\xfc\xc2\x1e\x96\x4c\x83\x92\x6a\xb2\x21\x70\xf4\x60\xb4\x7c\xeb\x2b\xcf\xd7\x6d\xa9\x3e\x20\x26\xa3\x07\xbb\xd7\x1f\x7f\xe0\xcf\x0f\x4e\x8b\xba\xbe\xe1\xf8\xc9\x68\xd5\x6f\x3e\x4e\x1f\x14\x6d\x6e\x74\x7b\x84\xdc\xa0\xcd\xd9\x37\x01\x83\xbf\x1a\x47\xf7\xad\x38\x43\x41\x1a\x63\x28\x24\x41\x86\x52\x09\x59\x91\xa0\x22\xb1\x34\x23\xa9\x24\x45\xb1\x14\x4b\xab\x32\x43\x30\x04\x55\x83\x0a\x24\x91\x42\x72\xb2\xa2\xa8\x98\xca\x70\x18\x81\x93\xa4\xc4\xf8\xbe\x95\x38\xcc\xb7\x12\xc5\xbe\x95\x25\xb9\x1c\xdf\xea\x3f\x8d\x66\x7c\x87\xfa\xd6\x46\xa2\x53\x77\x7c\x6b\x9f\x68\x5c\xf0\x7d\x8a\x7e\xae\x37\x49\xa7\xfd\xd0\xea\xe3\x03\x92\xc7\x6e\xd1\xdb\x1d\x7b\x3d\x60\x8c\x1e\xce\x73\xe8\x51\x53\xd6\x1d\xc7\x1f\xd2\xd9\xbe\x95\x1f\x0a\x2f\xda\x8b\x84\x5a\xcb\x86\x6d\xdd\xd4\x8d\x9b\xce\xc2\xbe\xc0\xe8\x07\xe7\xba\x59\xb7\x26\xa6\xbd\x98\x76\xef\x2f\xc6\xcc\xd3\xf8\x95\x72\x96\x8f\xeb\xa9\x5d\x1b\x3b\x43\xaa\x71\x8b\x56\xfd\x5b\xe6\xfa\x5d\x56\xdf\xaf\x6f\x70\xec\x51\xaf\xbf\xbd\x2d\x0d\x6a\xc2\xde\x75\xd4\xd7\xce\xd5\xbf\xcb\xb7\x1e\xea\xdb\x0e\x1d\xcf\xb7\xcb\xee\xcc\x3a\xa2\x6f\xe5\x6b\xcf\x5d\x96\xaf\xbd\xea\x13\xe1\x0e\x61\xca\x78\x5c\x7b\x68\xcb\xcd\xfb\x15\x73\x7f\xb1\xd4\xdb\xef\x32\x39\x6e\xe2\x34\xbc\x26\x3b\x1a\xee\xe3\x3c\xb6\x6f\xfd\x2f\xf9\x36\xfe\x48\xbe\x95\xa5\xb6\xed\x3b\x95\x75\xf1\x8f\x30\xbd\x7a\x9e\x3d\x92\x53\x99\xb7\x6e\xd6\x93\x97\xb5\xd6\xb5\xee\xb8\xfe\x83\x34\xbc\x5f\x42\xea\xa6\xdb\x35\x87\xd8\x1d\xde\xd7\xf1\xce\xb7\xae\xdc\xb2\x4d\xa9\x8f\x77\xc7\x0b\xfe\xb5\x6d\x8f\x5e\xfb\x1a\x34\xda\x8c\x36\x74\x94\xd6\xfc\xfe\xe5\xfa\xf6\xfa\x5b\xe7\xae\xb9\x6e\x53\xeb\xfa\xe4\xe8\x71\xab\x44\x20\x96\x50\x24\x28\x49\x18\x41\x49\x44\x0d\x62\x32\x89\x53\x98\x0c\x6b\xb8\xc2\x42\x99\x93\xe4\x1a\xce\x92\xb8\xca\xa9\x34\x24\x25\x85\xe1\x90\x0c\x49\x85\x65\x55\x09\x43\x32\x2d\x9f\x6c\xea\xfa\x0e\xf0\xad\x45\x8b\x13\x14\xc6\x71\x74\x5e\xf9\x8b\xff\x34\xba\x7a\x75\xa8\x6f\x6d\x26\x3a\x75\xc7\xb7\x56\x5d\x9b\xc8\xf6\xad\xcd\xeb\x85\x8e\x3b\xdd\xab\x6e\x8b\x7a\x58\x2d\x1d\x4c\x69\x36\x1e\x04\x95\x71\x24\x5a\xa7\xa4\xf5\xad\x75\x35\x69\xcc\xbf\xe9\x0f\x2f\xb7\xb3\x95\xec\xd0\x94\xd6\x53\x89\xd9\xca\x79\x5d\x31\xb7\x0a\xfd\x72\x4d\x09\x54\x53\x97\x6d\x95\x62\x04\x7e\x5a\xbf\x1a\x8e\xef\x6c\x83\x55\x9f\x9b\xff\x2e\xdf\x7a\xa8\x6f\x3b\x74\x3c\x77\xb1\x37\xa6\x79\x44\xdf\xfa\x3b\xd7\x64\x3e\xc3\xb7\xee\xeb\xdb\xf8\x23\xf9\xd6\x7d\x73\x98\xc0\xb7\xae\xa5\xb9\x22\x0d\x57\xda\x0a\xb5\x64\xb9\xab\xb4\xef\x97\xfa\xa0\xfd\xcd\x7a\xfc\xf6\x82\xae\xd8\xd7\x9b\x95\xc9\xbf\xab\xf3\x87\xc7\xd1\xb5\xfd\xd4\x45\xa8\xf3\xfa\xc4\xcd\x6d\xe9\x99\x45\xaf\x6d\xf4\x38\x44\xf5\x3e\x4f\x3f\x75\xdb\xdf\xfa\x53\xbe\x73\x3f\x78\xd3\x9b\xb5\xeb\x8b\x36\xc1\x97\x8c\x5b\x33\x56\x97\xf3\xee\x3a\xab\xba\xb0\x9c\xbc\xef\x6c\xe3\xad\xd1\x6a\x1e\x1e\xf9\xf6\x2e\x44\xf2\x2b\xbb\x5c\xa6\xb1\x9c\xed\xaa\x94\x8b\xcc\x0e\xd8\xa6\xca\xba\x6f\xab\xfa\x41\x99\xf8\x4b\x7c\x63\xdf\xc4\xf9\x1b\x5a\x87\xe8\xb7\xf7\x53\x57\xbd\x02\x28\x86\xd3\xbb\x47\x8a\x6f\x36\xa3\xf7\x5d\xef\x12\x8d\xde\x2b\x0c\x4e\xb7\xd7\x80\x65\xbe\x85\x77\x8b\xe3\xb8\x3c\xe7\xb2\xbb\xcb\xe9\xf6\x02\xb2\xc2\xf7\x05\xef\xbe\x3a\xf7\xc8\xda\x0e\xd0\xe6\x4a\x10\x25\x1d\x97\xc4\x7f\x72\x0e\xf2\x24\x8a\xbc\x50\x36\x7a\x27\xc5\x91\xe4\xd8\x62\x4c\x15\x21\x41\x30\xce\x7d\x0a\xb7\xc9\x57\xe0\x26\xbe\x1f\x89\xeb\x04\xd6\x34\xce\xd3\x08\x27\xac\x68\x73\x8b\xdc\x79\xec\x0a\xba\xf3\xc8\x8d\x75\x45\xaf\xc0\x4d\x7e\x3f\x92\x7c\x09\xac\x69\xf2\xa5\x11\x2e\xec\x9d\xc4\x95\x6e\x89\xb3\x75\x5b\x85\x88\x5b\x0d\x88\x51\xd5\x88\x47\x91\x2e\x4e\x36\x4d\xb8\xbd\x18\x0b\xdf\xbf\x98\xd2\xb1\x2e\x7c\xbc\x93\x2b\xaa\xe6\x38\xdd\x5a\x59\xf0\x4a\x9d\x9a\x51\xdd\x57\x50\x42\x77\x5c\xc9\xd2\x89\xe4\x49\x9a\xc3\x56\x69\xc9\x33\xb7\xfb\x0b\x77\xd4\x8f\x2b\x7d\x16\x99\x3c\xf9\x73\x59\x2b\xd4\x40\x32\x7a\x4a\x7c\x3f\x92\x7c\x09\xac\x69\xe2\xa4\x11\x8e\x73\x9f\x16\x57\x04\xf7\xc6\xfa\xff\x3b\x12\xb3\x3e\xb2\x34\x1e\x23\x64\xe2\xac\x85\x57\x2f\xe5\xdd\xb7\x1a\xfd\x7c\x24\x4e\x23\x18\xd3\xd8\x4d\x12\xac\x1c\xad\xf9\x81\xde\x36\xb4\x10\x0d\x38\xdb\x04\xeb\xde\x15\xff\xe5\xee\x8d\x8d\xbd\xd7\x36\x17\x39\xe8\xf7\x12\x91\x6e\xec\x8d\x00\x5b\xe8\x73\xe0\x82\x67\x73\x3e\x35\x67\x48\x54\xcc\x19\xd4\x8c\x3d\x18\x4e\x70\x1a\x41\x16\x65\x30\xf1\x16\xda\x2d\x50\x36\x5b\x9a\xa1\xea\xbe\xb3\x52\xbc\x17\xad\x7a\x9f\x0f\x67\x30\x15\x6d\x36\xab\xa9\xe0\x19\x71\xa3\xb4\xf6\x26\xc2\xfd\x79\x8c\x62\x89\xbe\xf7\x21\x98\x27\xe3\xfd\xbb\x99\x78\xb3\xb9\xf1\xa7\xdf\xc3\xf9\x09\xae\x0b\x2e\xc5\x51\xc6\x94\x2f\x6d\xee\x4f\xd8\x9b\x9d\x2d\x8a\x8c\xb7\x1f\x27\xf9\xf1\x81\xcf\x77\x6e\xc9\x4f\x63\x2e\x72\x95\x79\x39\x06\xe7\xa6\xed\x4c\x2c\x64\xa7\xf2\x19\xbd\x18\xbd\x14\xaf\x91\x06\x67\xe0\xb1\x2d\x0c\x84\xd8\xe5\xea\x9d\xe1\xe6\x2e\xe2\x1f\x3b\xac\x4f\xa1\x3d\x3d\x02\xcf\xde\x65\xef\xfd\x5e\xee\x4b\x12\xe2\x2f\x3c\x49\xdc\x14\x9f\xa6\x56\xff\xea\xf7\x23\x71\xb8\x45\x56\x4e\xa9\xe9\x17\xda\x87\xfa\xcd\xb8\xee\x3e\x57\xd5\xfe\x44\x70\x88\x05\x07\x37\x4c\x97\xe2\x3f\xf1\x5e\x89\xf3\xdd\x57\x48\x14\xa5\x84\x07\xbb\xa4\x0c\x7c\x2e\xff\xc9\xec\xb3\xac\x77\x4a\x41\x79\xa0\x9f\xca\xc4\x58\x92\xcd\x9c\x2c\x45\x44\x2e\x36\x4f\xd7\x87\x4e\xe6\x09\x74\x51\x13\x08\x6f\x1c\x28\x7c\x2f\xff\x79\xf8\xf2\xbe\x2c\x66\xb7\x97\x5b\x1f\xc8\x66\xfc\xc5\x43\xb9\x0c\x46\x23\x8d\x3d\x98\x36\xe7\xe2\xfc\x58\x7c\x07\xb8\xa2\xac\x67\x64\x6a\x7b\x49\x92\x2e\x80\xb3\x3a\x9e\x00\x01\xae\xac\xd7\xfb\xef\x27\x42\xca\xcb\xfe\x63\x42\x98\x73\xd7\x2a\xa7\xe6\x5e\x32\x04\xcc\x6f\x71\xec\xab\xfc\x7c\x45\x6f\x5e\xfe\xec\x0e\xee\xc3\x75\x1d\x47\x17\x65\x39\x3c\xd4\x1c\x9f\x49\x52\x39\x8a\xea\xf5\x58\x6c\xed\xe0\x2c\x39\xd5\xa5\x30\xe8\xf8\x5d\xe2\x1c\xd2\xad\x5b\x1c\xfb\x9b\x64\x91\xf9\x39\x96\xe2\x12\x89\xbe\x1f\xf4\x00\x86\x77\x91\xed\xbe\x41\x2d\xce\x67\xe2\xc5\xa4\xf9\x0c\x7a\xe9\xeb\x71\xd8\xf3\x50\x95\x62\x2e\x33\x67\x0e\xf1\x25\x5e\x79\x7a\x30\x7f\x09\x7c\x45\x4c\xee\xbe\x71\xb5\x90\xd3\xe3\xe8\x31\x86\xad\x2c\x97\x85\xda\x3c\x0e\x6f\xa5\x78\xca\xe7\x25\xe4\xd8\x7f\x05\xe1\x61\x1c\xc5\x71\x95\xee\xd1\xf0\x95\xae\xa9\xfc\xcd\xa1\x66\x89\xde\x1b\xa8\x8e\xc1\x61\x12\x5b\xb9\x71\x9b\xf3\x72\xce\xe4\x7b\x18\x33\x84\x38\x82\xdf\x0e\xf0\x14\x71\x5c\x31\x3a\x72\xb1\x1e\x4d\xbb\x15\x14\x5b\xa8\x37\xff\xe2\xd1\x9d\x7b\x11\x4d\x43\x84\x8a\x62\x21\xdb\x3e\x54\xa1\x85\x04\x62\x6b\x11\xa9\x0b\x37\x01\x60\x05\xde\x0f\xb7\x83\x3c\xdc\xc5\x1c\xa7\x8c\xb2\x38\xc2\x20\x0a\x77\xf1\x39\xeb\xf9\xfe\x49\x5d\x2e\xd6\xc2\xb0\x3f\x75\xab\x2e\x8e\x32\x88\xa1\x5c\x94\x1b\x23\x3a\x12\xb7\x69\xa8\x0b\xc3\xb7\xb2\x96\x1c\x41\x7e\x6c\x63\x88\xa1\xde\x27\xde\xcc\x46\x37\x9b\x9b\x96\xf7\x36\x68\xff\xa5\x25\xc7\x57\x74\x92\x42\x31\xfb\x89\x06\xe5\x85\x09\x5c\x4f\xf9\x05\xa3\x3d\xf4\x1f\xa1\x51\x28\x49\x04\xb6\xbc\x10\x73\x0b\x7d\x68\xe6\xc2\xfe\x2d\xd2\xa4\x11\x2b\x14\x2b\xad\x51\x79\xf9\xc2\x05\xa9\x4f\x93\x69\xf3\x3a\xd3\x22\x39\x32\x57\x74\xe3\xa8\xb7\x17\x12\x7c\xc6\xd0\x4e\x62\x4f\x4d\x80\xab\x0e\xf0\x38\xd2\x78\x0a\x75\xa4\x11\x9e\x47\xa2\x8c\x0c\x05\x79\x5d\x2e\xb1\xe3\x4d\x5f\xbb\x88\x4b\xf1\x5e\x3c\x89\x45\x93\xed\xcf\x30\x9b\x5d\xfc\x7b\xa7\xfa\xc1\x66\xab\x9b\x58\x46\x5e\xd1\xb9\xb7\x82\xd3\xd1\xb9\xdc\x05\x7b\xc8\xf1\x30\x3c\x02\x93\xc3\x59\xda\x8b\x16\x8f\xc0\x61\xea\xfb\x1b\x33\x38\x4d\x83\xcd\xe1\xd8\x7f\xb7\xea\x11\x78\xf4\x11\x65\x71\xb5\x79\x85\x6b\x01\x2b\xc7\xec\xd7\xf8\xab\x5e\x73\x18\xcb\xee\xd9\xb0\xce\xee\x08\x1b\x7b\xbb\xa8\x62\x7b\xdb\x61\x75\x61\xc6\xf6\x76\x4a\x21\x01\x54\xd0\x26\xac\x0d\xd7\xdb\x45\xc9\x34\xdf\xf6\x66\x31\x07\x67\x61\xc0\x7c\x7a\xaa\x20\x07\x6a\xba\x0d\xbe\xff\xe7\x3f\xe0\xc4\x36\x75\x25\x52\x5a\x75\x72\x79\xe9\xa0\x95\x73\x76\x76\x0e\xb2\x01\x65\x53\x29\x07\xe8\x6f\x65\x64\x83\x4a\xe6\x62\x32\x75\x4a\x91\x8f\x81\xe6\x33\x10\x03\x4d\xb0\x10\x6e\xb7\x79\x2e\x17\xfc\x04\x24\x99\x57\xf9\x11\xb1\x81\x43\x26\xba\x4c\x8c\x6e\x67\x45\x0b\x4d\xca\xdb\x54\x0c\xe1\x81\x1b\x56\xa9\xd8\xf2\x59\xcb\xdb\xa8\x4a\xa0\xf3\x0a\xf6\xbc\x02\xbe\xe3\xb2\x99\xc4\x5b\x82\xe1\x68\x2d\xe0\x6e\xf1\x67\xe9\xf2\x5b\x4d\x11\xd5\x48\x15\x50\xeb\xe6\xf7\x14\xe1\x06\x64\x41\xab\x3f\x10\x3a\x57\xbd\x4d\x61\x18\x18\x08\x2d\x61\x20\xf4\x1a\xc2\x30\x51\x10\xe1\x3d\xed\xf7\xc0\xf8\xae\xe9\xaa\x71\x20\x0c\x47\x83\x4e\x63\xe4\xfe\xd4\x14\xba\xc2\x48\x00\x0d\x7e\xd8\xe0\x9b\x42\x4e\x75\x9d\x82\xec\xc4\x57\x31\xb1\x42\x7d\x3c\x65\xc4\xe9\x14\x94\xce\x65\x71\x12\xd7\x4f\x72\x35\x3d\x55\x59\x81\x6b\x2f\xa8\x33\xcc\xd4\x44\xb0\xc2\xf7\x5f\xd7\x43\x94\x8f\x34\x2d\x84\x8b\xa7\xf9\x06\x53\x4d\x03\xbb\x6b\xed\xff\x45\x35\x64\x30\x13\xd7\x45\xca\xee\xc0\x71\x8d\x22\xb9\xf2\xfb\x6f\x50\x48\xb6\x69\xec\x2c\xad\x17\x5a\x87\xaf\x89\xd1\x14\x01\xd5\xd4\x75\x73\xa9\x19\x13\xd0\x6c\x76\x81\x66\x03\x09\xda\x9a\x0c\x75\x7d\x0d\xa0\xb1\x06\x33\x68\x68\xf3\x85\x1e\x24\x3c\xce\x14\x3a\x60\x0a\xe7\x73\x64\x00\xc7\x04\xce\x14\x01\xff\x95\xc0\xc0\xd6\xdc\xe4\x7a\x53\xd0\x0e\xa8\xda\x9f\xa0\xa3\x82\xb5\xb9\x00\x06\x42\x8a\x0b\xad\x19\xb2\xbe\x50\x90\x4b\x18\x1a\x60\x31\x57\xa0\x83\x80\xa9\x86\x18\x54\xd3\x02\xce\x54\xb3\x81\x2d\x23\x03\x5a\x9a\xe9\x15\xa7\x20\x43\xf1\xc8\x9c\x2c\xe6\x27\x60\x6e\x5a\x1e\x72\xd5\x32\x67\xc0\x40\x4b\x64\x3b\x11\x92\xd0\x01\x2e\xf4\x14\x59\xe8\x1c\x98\xce\x14\x59\x4b\xcd\x46\xe7\xc0\x41\xb6\x63\xbb\x54\x97\x9a\xae\x03\x6b\x61\x00\xcd\x70\x4c\x30\x37\x1d\x64\x38\x1a\xd4\x01\xb2\x2c\xd3\xb2\xc1\x72\xea\x4a\xe5\xfe\xa3\x48\x11\xb4\x8b\xf9\x85\x62\x2e\x0d\x1b\x40\x0b\x79\xad\xe1\xc2\x31\x67\xd0\x09\x94\x24\xad\x3d\xfe\x3c\x22\x7f\xba\x54\x62\x5d\xbe\x59\x96\x75\xfb\x38\x78\xf1\x1a\x18\x09\x4f\xa3\x1f\x19\xb5\x84\x6e\x24\x1d\x80\xed\x16\x10\xd6\x47\x03\x41\x38\x0d\x9e\xbb\xdd\x98\x46\xca\x2f\xdc\x2c\x4b\xcf\x2b\x03\x4d\x27\x1a\xad\x00\xcd\xa5\x1c\x9d\xb1\x0b\xe8\x26\xe6\xff\x08\xd5\xdd\x69\x3f\x97\x66\x90\x5d\x14\x90\x8b\xa4\x27\x5b\x4a\xb1\xc4\x24\x97\x48\x89\xb2\xf8\xe6\xa0\x7f\xb7\x7d\x2f\x7b\x46\xfb\xdc\xc2\xf2\x1d\x0c\xde\x0f\x41\x9f\x46\xa2\x17\x19\xda\x32\x54\x50\x08\x90\x5b\x4c\x15\x07\x2a\x5c\x84\xc8\x81\x8e\xaf\x5f\xc7\x00\xe3\x15\xaa\x85\x92\xfb\x9d\x15\x7a\x6c\xef\x4d\x7d\xe0\x03\x5a\xf2\x14\x5a\xa7\x0c\x77\x16\x1c\x5f\x74\x61\x22\x65\x8a\x19\x70\x3f\xf2\xbb\x29\xb0\x0b\x73\x61\xc9\xfb\x61\x0a\x93\x2e\x8f\x63\x7f\xd7\x27\xb3\xbd\xeb\x4c\xc1\x6c\xa2\x85\x8e\x8f\x03\x71\x1b\xec\xb4\xbc\xea\x3f\xe1\xa9\x33\x1c\x0d\xfd\x8d\x33\x38\x99\x78\x9b\x67\xb6\x03\x67\x73\xef\xc5\x7b\xbb\xfb\x67\x22\x83\x61\x18\x16\x46\xc0\x96\x66\x9c\x6e\x5a\xf8\x0e\x3c\x42\x93\xc6\x42\x92\xbe\x18\x89\xb7\xeb\x81\x53\xcf\xd0\x34\x05\xb8\xf9\xd3\xa6\x16\xf1\x1c\x7c\xff\x0e\xa6\x68\xf5\x1d\x19\x6e\x0f\x2a\xe0\xce\x34\xf5\x4e\xd3\x83\xf5\x32\x2b\x7b\x06\xdd\x74\x3d\xd2\xc2\x7b\xa6\x22\xe4\x7a\x50\x34\x41\x56\xe2\x89\x37\x86\xdd\x21\xec\x4f\x5a\x40\xd2\x26\xd1\xe6\xa0\xd1\x16\x1a\x37\xe0\x34\x09\xf6\x1f\x80\x9d\xf9\x08\xec\x29\xb4\xb2\x1a\x37\x85\x16\x3f\xee\x8e\x00\xe6\xa3\x39\x8d\xc2\xfe\xe7\xe7\x06\x85\x6f\x95\x16\xb2\x91\xf5\x81\x6c\xf0\x6a\x9b\x86\x94\x60\x33\x75\x81\x27\x5d\x22\x05\xe9\xc8\x41\x0a\x90\x4c\x53\x47\xd0\xd8\xe5\xc6\x7b\x89\x99\x0f\x9b\x3c\xec\xf1\xc5\xed\xa7\x98\x29\x24\xfa\xc5\xcb\x23\xe3\xec\xf6\x7b\x3b\x9d\xe7\x5b\xc0\x44\x33\x4e\xd3\x44\x13\xe7\xd0\x99\x8a\xe6\xdc\x3e\xfb\x91\xa4\xf5\x81\x92\xaf\x59\xcc\x46\xef\xb9\x41\x70\x1a\x88\x7b\x9e\xaa\xa2\x88\x38\x9b\xfb\x32\x37\x6e\x22\x21\x99\x7f\xd5\xa5\xa7\x96\xe1\x88\x1f\x8c\xc0\x63\x67\xd4\x06\xb8\xf7\x43\xa7\xd7\x18\x08\xb7\x42\x6f\x04\xea\xcf\xc1\x4f\xbd\x3e\xb8\xed\xf4\xbc\xb3\xc3\x9b\xef\xfc\xd3\xf6\x7b\x83\x6f\xb4\x05\x80\x6f\x19\x88\x0f\xd7\x4c\x7b\xcf\xb2\x21\x03\xad\xe2\x77\x76\xa6\xf3\x7f\x72\x79\x69\xa1\x89\xac\x43\xdb\x0e\xac\x2b\x0e\x27\x26\x87\x54\xb4\xcb\xf3\xf6\x25\x12\xd4\x76\x77\xf0\x52\x3b\x29\xba\x5a\xbb\x17\x91\x5d\xee\xcb\xd2\xdc\x69\x79\x96\xd5\x17\xdb\xb9\x32\xbd\x57\xd2\x0a\x16\x92\xfd\x74\x1e\x83\xdc\xe5\x3a\x01\x5e\x52\xeb\x99\x9c\xf9\xfa\xb7\xd3\xab\x0c\x73\xd5\x92\x2a\xcc\x79\x36\xe7\xc9\x41\x5a\x81\xc1\x18\x89\xe3\x70\x9a\xd9\x87\xd1\x78\x25\xbf\x17\x13\x5b\x3c\xbf\xa5\x1f\x73\xb8\x4b\xe9\xc9\x3c\x59\x4a\xd4\x16\xee\xdf\x9b\x05\x6c\x66\x6f\x8e\x1d\xc8\x71\x51\x74\x9e\xee\xc2\xf2\xe3\xf4\x54\xc7\x91\x15\xb1\xa7\xfa\x8a\x48\xdc\x18\xaf\x89\xdb\x79\x92\xa8\x50\xcb\x8c\x2b\xdd\xac\xdd\x6b\x1b\x16\xbc\xf9\xd6\x96\xf1\xe7\x23\x49\xac\x24\xf9\xed\xe3\xd6\x9a\xdb\x7e\x77\xdd\xa1\x0a\x8a\x66\xd3\x67\x20\x6b\x20\x14\xb6\xde\xa8\x66\x5f\x04\x89\x86\x41\x24\x97\x0c\x53\x62\x25\xa5\xb9\x73\x46\xac\xd4\xcb\xef\xfd\xf4\x46\xc9\xa1\x92\xac\xb6\xac\x4a\x25\xb3\xdd\x6e\x6c\x8c\x47\x2d\xac\xd3\x0a\x03\xf1\xc8\x26\x53\x26\x40\xf4\x04\xd7\x66\xf9\xe8\xce\x3f\x4f\x34\xbc\xef\x02\x37\x41\x76\xe5\x05\xca\x62\x36\x07\xb2\x39\x9b\xbb\xe1\x93\x0b\xf7\xff\x02\x00\x00\xff\xff\x90\xf3\xa6\x06\xc7\xe2\x00\x00") + +func baseHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _baseHorizonSql, + "base-horizon.sql", + ) +} + +func baseHorizonSql() (*asset, error) { + bytes, err := baseHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "base-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0xe5, 0xb5, 0xb0, 0x71, 0xf7, 0xfc, 0xdd, 0x3d, 0xaa, 0xb5, 0xfd, 0x5a, 0xcc, 0xc9, 0x7f, 0xd6, 0x2f, 0xa6, 0x46, 0x48, 0x8f, 0x89, 0xae, 0x2f, 0x18, 0xb1, 0x71, 0xf9, 0x9a, 0x64, 0xd2}} + return a, nil +} + +var _failed_transactionsCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x69\xaf\xe2\xc6\xd7\xe7\xfb\xfe\x14\x28\x6f\x3a\x11\x9d\x50\x5e\xcb\x4e\x4f\x1e\xc9\x80\xd9\x31\x98\x1d\x46\xa3\x56\xb9\x5c\x05\x06\x6f\xd8\x66\x1d\x3d\xdf\x7d\x84\xe1\xb2\x5d\xe0\x72\x81\x4e\xfe\x9a\x27\x96\x72\x43\xe3\xe3\x73\x7e\xf5\xab\x73\x4e\xad\xa6\x7e\xff\xfd\xcb\xef\xbf\x27\xea\x5e\x18\x0d\x03\xd2\xd4\x2b\x09\x13\x45\xc8\x40\x21\x49\x98\x33\xc7\xff\xf2\xfb\xef\x5f\x36\xf7\xb3\x33\xc7\x27\x66\x82\x06\x9e\x73\x10\x98\x93\x20\xb4\x3c\x37\x21\xff\x21\xfe\xc1\x1c\x49\x19\xab\x84\x3f\xfc\xb1\x79\xfc\x4c\xe4\x4b\x53\x6d\x25\xc2\x08\x45\xc4\x21\x6e\xf4\x23\xb2\x1c\xe2\xcd\xa2\xc4\x5f\x09\xf0\x3d\xbe\x65\x7b\x78\xf2\xfe\x5b\xcb\xb4\xc9\x0f\xcb\xfd\x11\x05\xc8\x0d\x11\x8e\x2c\xcf\xfd\x11\x92\x70\xa3\xf7\xbd\x30\xb6\xad\x8d\x6a\xe2\x62\xcf\xb4\xdc\x61\xe2\xaf\xc4\xd7\x76\x2b\x27\x7d\xfd\xfe\x66\xdb\x35\x51\x60\xfe\xc0\x9e\x4b\xbd\xc0\xb1\xdc\xe1\x8f\x30\x0a\x2c\x77\x18\x26\xfe\x4a\x78\xee\x4e\xc7\x88\xe0\xc9\x0f\x3a\x73\xb7\xb6\x0c\xcf\xb4\xc8\xe6\x3e\x45\x76\x48\x4e\xcc\x38\x96\xfb\xc3\x21\x61\x88\x86\xb1\xc0\x02\x05\xae\xe5\x0e\xb7\x22\x81\xb7\xf8\x11\x12\x3c\x0b\xac\x68\xb5\x51\x4e\xe9\xf7\x1d\x01\x04\x05\x78\xf4\xc3\x47\xd1\x28\xf1\x57\xc2\x9f\x19\xb6\x85\xbf\x6d\x18\xc3\x28\x42\xb6\x37\xfc\xfe\xe5\x4b\xb6\x51\xab\x27\x8a\x5a\x56\xed\x25\x8a\xb9\x84\xda\x2b\x36\x5b\xcd\x9d\xe4\x1f\x33\x7f\x18\x20\x93\x8c\xac\x30\x32\x56\x21\x99\x7e\xbf\x29\x1d\x62\x7f\x3a\xf3\x82\x99\x13\xde\x27\x4c\xdc\xf9\x3d\x92\x36\x31\x87\x24\xb8\x47\x72\x83\x93\x12\x72\xa7\xe4\x1d\x62\x06\x09\x23\x8f\x52\x12\x58\xae\x49\x96\xb7\x65\x11\xc6\xde\xcc\x8d\x0c\x64\x23\x17\x93\xf0\xfb\x17\xa5\xd2\x52\x1b\x89\x96\x92\xae\xa8\x47\xd2\x35\xad\xd2\xbf\x40\xaf\x17\xac\x12\xb1\xf6\x4c\x4d\x6b\xb6\x1a\x4a\x51\x6b\x1d\x3d\x74\x2a\xf8\xc3\x9f\x90\xd5\x3d\xfa\xa3\xe5\xc7\xaa\xf7\x32\x9f\xd0\x4a\xc9\x1d\x98\x8f\xc5\xee\xd7\x1d\xcc\xc2\xc8\xb6\x5c\x12\xde\xd2\xbc\x17\xba\x5b\xef\x06\x05\x89\xb3\xc1\x0d\xbd\x07\xa1\xfb\xf5\xee\x5d\xfe\x96\xde\xbd\xd0\xdd\x7a\xb7\xf2\x96\x4b\xbd\x1b\x7a\x0f\x42\x77\xeb\xf5\x67\x46\x38\x33\x6e\xe8\xdc\x0a\x7c\x46\x9f\x6d\x85\xa3\xe9\x8c\xcc\x6e\x31\x7b\x2c\x76\xbf\x6e\x42\x82\x5b\xb4\xc6\xf7\xef\xd6\x16\x87\xf1\x2d\x75\x5b\x81\xbb\xf5\x6d\xb3\xd2\x88\x20\xf3\xb6\xda\x13\xb9\x9f\xac\x7d\x97\x29\xc9\xf4\xc7\x9d\x66\x0c\xe4\xde\x50\x6e\x20\xf7\x6e\xc0\xbb\xec\x77\x0b\xeb\x9b\xc8\x67\x75\x6e\xfa\x00\x1f\xab\xdd\x48\xed\x34\xc7\xb2\xe7\x8a\x2f\xa6\xdc\xdb\xb2\xfb\xd4\xf8\x91\xd8\x21\xd1\x7d\x20\xb9\x4f\x5c\xb7\xe5\x0e\x89\xe8\x03\xb9\x7d\x62\xf9\x50\xee\x2e\x7c\x87\x84\x72\x5b\x6e\x9b\x24\x3e\x94\xd9\x87\xfc\x07\x92\x9b\x38\xbe\x2d\xb2\x8d\xcd\xdb\x32\x27\xa1\x70\x5b\xd4\x40\xee\x6d\x81\x37\x57\xbd\x4b\x6a\xe3\x79\x3b\x41\xb5\xd7\x52\xb5\x66\xb1\xa6\x1d\x0b\xdb\xfe\x30\x9c\xda\x3b\x89\x66\xa6\xa0\x56\x95\x77\xba\xbe\x7f\xd9\xf6\x8d\x35\xe4\x90\x3f\xdf\xbe\x4b\xb4\x56\x3e\xf9\x73\xf7\xc8\xf7\x44\x13\x8f\x88\x83\xfe\x4c\xfc\xfe\x3d\x51\x5b\xb8\x24\xf8\x33\xf1\x7b\xdc\x65\xce\x34\x54\xa5\xa5\xbe\x69\x7e\xd3\xf7\xe5\x44\xe3\xe9\xcd\x9d\xe2\x4c\xad\x5a\x55\xb5\xd6\x0d\xcd\x5b\x81\x44\x4d\x3b\x55\x90\x28\x36\x13\x5f\xdf\xfa\xb7\x6f\xdf\x85\xb1\x92\xaf\xe7\x96\xdf\x8a\xbf\xb3\xb9\x67\xe8\xc3\xf2\x9c\x70\xa9\xd5\x5a\x67\x7c\x26\xba\xc5\x56\x61\x0f\xeb\xb8\x43\x7b\x62\xfe\xa0\xe5\x0c\xc8\x67\x0a\xff\x4e\x49\x4c\x40\xbd\x92\xf2\x87\x9b\x51\x8c\x1f\x78\x98\x98\xb3\x00\xd9\x09\x1b\xb9\xc3\x19\x1a\x92\x98\x86\x3b\x3b\xe0\x1b\x31\x93\x50\x34\xb3\xa3\x1f\x11\x32\x6c\x12\xfa\x08\x93\xcd\x68\xe2\xeb\xd9\xdd\x85\x15\x8d\x7e\x78\x96\x79\x34\x40\x38\x29\xec\xb1\x43\xee\x8a\x19\xbb\xee\xa1\x90\x6f\x0e\x70\x89\xf0\xad\x97\x1f\x27\xdd\x5f\xbf\x24\x12\x89\xb7\x6f\x2c\x33\x81\x47\x28\x40\x38\x22\x41\x62\x8e\x82\x95\xe5\x0e\x7f\x15\xc4\xdf\xe2\xba\xd1\xda\x95\xca\xb7\x58\x7a\xf3\xa0\x8b\x1c\x72\x41\x58\x92\x2e\x09\xcf\x91\x3d\xbb\x24\xcd\x30\xec\xb9\xb8\x8d\xc2\xc8\xf1\x4c\x8b\x5a\xc4\x4c\x58\x6e\x44\x86\x24\xd8\x8b\x7c\xf9\xed\xbc\xee\xf7\x51\xfc\x24\x17\xe1\x43\x44\xec\x06\x02\x09\xc3\x1a\x5a\x6e\x74\x76\x33\x24\x53\x77\xe6\x5c\xbe\xe7\xce\x9c\x70\x66\x10\x37\x0a\x36\x43\xc1\xf3\x62\x6e\x65\x2c\x97\xda\x68\x33\x62\x34\x49\x18\x5d\x86\xb3\x15\x1c\x79\x0e\x31\x3d\x07\x59\xee\x05\x29\x9e\x3f\x07\x1d\x8d\x02\x12\x8e\x3c\xdb\x0c\x13\x11\x59\x9e\x23\xa3\x36\x1a\x5e\x43\x74\xb3\x6e\x76\x8c\xcc\x36\x56\x6d\x0b\x19\x96\x6d\x45\x9b\xc2\x6d\xcb\xff\x46\x89\x6d\xdf\xba\x6d\x0d\xdd\x4d\x5f\x68\x03\x6b\xfb\xcd\x51\x6f\x60\xdf\xb5\xd8\x91\xfe\x23\x1e\x56\x27\x32\x05\x35\x53\x4e\xfc\xfa\xeb\x5b\x55\xfc\xd7\x5f\x09\xf0\xdb\x6f\x37\x9e\x3e\x07\x78\xae\xe7\x5d\x01\x3e\xd2\x78\x52\x97\x67\xda\x4e\xeb\xf9\x23\x4d\xef\xe9\x39\x53\x77\x81\xbf\xad\xce\xf7\x81\xb1\x69\xff\x1e\x8d\x89\x4d\x97\x71\x1b\x0e\xae\x67\x92\xe3\x58\x38\x89\x81\xf7\x46\x4f\xdb\xe7\x47\xcd\x9f\x76\x8c\xb7\x40\x76\xdf\xa1\x70\x74\x04\x46\x7c\xe7\xdb\x7e\x40\xe6\x1f\x0a\x19\x33\x3c\x21\x91\x6d\x85\xd1\x87\xa2\xfb\xde\xf6\x9b\xbb\x6f\xbf\xc6\xb6\x17\x92\xc8\x72\xae\x44\x7e\x9c\x58\x2f\xc4\xd6\x51\x9d\x9f\x76\xea\xf7\xfa\xce\xea\xfb\x60\xe7\x8a\xeb\x5c\x1b\x1b\x9c\xaa\x39\x94\xe2\x9a\xb7\xec\x3a\x5f\x8f\xd6\xd8\x6e\xe0\xf5\xeb\x3e\xc8\x49\x70\x67\x06\xdd\xce\xbc\x98\xd7\x32\x68\xec\xee\x28\x0c\x49\x74\x89\xcf\x6d\xac\x5e\xbd\x8d\x9c\x4d\x58\x5d\x56\xed\x07\x16\x26\xee\x95\x24\x16\xdf\xbc\x96\xe1\xe2\x9b\x09\xd3\x9b\x19\x36\xd9\xf8\x1b\xb6\xe2\x19\xc9\x97\x66\xd1\xa3\x1a\xde\x0d\x59\xb7\x65\x39\xab\xd7\x5d\x01\xaf\xf8\xc6\xee\xc9\x1d\xc3\x67\x8f\xbe\xf1\x7e\xcd\x21\xb6\x1d\xf6\x47\xfd\x61\x3b\xac\xdf\xba\x83\xe5\x5f\x6a\xf8\x85\x77\x91\xeb\x05\xd1\x9e\x8d\xac\x9a\x53\xda\x95\x56\x02\x9c\x37\x9b\x64\x19\xa1\x28\x22\x8e\x1f\x25\x36\x61\x11\x46\xc8\xf1\x13\x9b\x2e\x93\x37\xdb\x7e\x93\x58\x7b\x2e\x79\xdf\xd8\x52\x64\xd9\xb3\xe0\xa8\xa9\xbd\x66\x21\x5a\xf9\xe4\xe3\x4a\xd9\x4e\x4b\x1c\xe9\x7d\x9f\xf6\xf7\x16\xaf\xd4\xce\x6e\x66\xc3\x0b\xce\x2b\xf5\xd7\x98\x89\xff\x4a\x80\xdf\x12\x8a\x96\x4d\x6c\xff\xf9\xbf\xfe\x4a\x88\x82\xc0\x09\xbf\x5d\xac\xab\xe3\x61\xd8\xc3\x55\x76\x3c\xcb\x73\x9c\x73\xaf\xb0\xb1\x9d\x68\xdb\x44\xdd\x45\x40\x9b\xb1\xe3\x13\x50\xc2\x99\xb1\x03\x11\x90\xf0\xa4\x01\xe2\x2e\xf6\x18\x03\x82\xf6\xb1\xf4\x1e\xcf\xd1\x98\xf7\x51\x4c\x47\x93\x75\x77\xb4\x8c\x5b\x60\xd3\x90\xdc\x6a\x61\xde\xe3\x3c\x1a\xc3\x3f\x8a\xf3\xa0\xe2\x7e\x9c\xef\x1a\xb9\xb3\xfb\xc4\x9d\x13\xdb\xf3\xc9\x07\x4d\xda\xc1\xf4\x13\x0d\xd1\xd1\x74\xc7\x13\x14\xbc\xcd\xd7\xfe\x7a\x4f\x3d\x1c\xbc\xe8\x23\x22\xa6\x57\x1a\x9a\x53\x12\xde\xe6\x81\x4f\x34\x9e\x13\x71\x62\xed\x2a\x19\x87\x39\xa2\x87\xc9\x38\x4c\x8a\xff\x7a\x88\xdb\xd3\xc1\xdb\x85\x98\xba\x15\xdd\x47\x33\x5c\x8f\xa2\x3a\x5a\x02\x78\x64\xd8\x15\xb7\xf8\x37\x32\xb5\x15\x86\x33\x12\xdc\xaf\x0a\x7b\xe6\xc5\xd1\xe9\x3b\x5a\x22\xdb\x72\xac\x2b\x3d\x8a\x9b\x63\xc1\x7f\x72\x54\x75\xe4\x9d\x47\xab\x2a\x0f\x8d\xa2\x8e\x9f\x7f\xd5\x38\xea\x48\xe7\xe3\xe3\x9f\x5b\x5a\xb7\x95\x76\xa6\x69\x57\x93\xff\x75\x39\xf0\x4e\xa6\x7b\x1f\x76\xf2\xe3\x35\xb4\xad\x9b\x47\xcb\x93\x54\x7c\xc7\x78\xe3\xdc\x01\x97\xf1\x2a\xe5\xd5\xbb\x78\x84\xdc\x21\xb9\x38\xb0\x3f\x26\xe7\x78\xd9\xee\xf1\x5c\x7d\x98\x3b\x7f\x9c\xa2\xbf\x99\x1f\xc3\x33\x57\x97\xc8\x89\x96\x01\x09\x67\xf6\xc5\xec\x1e\x2d\x1d\xf2\xe1\x78\xee\xb0\xc4\xfa\x38\x9f\x67\xeb\x16\x8f\x92\x7a\xb6\xe2\xfc\xeb\x5d\xc4\xed\x1e\xba\xc5\xde\x4e\xe4\x12\x11\xf7\xb9\xdd\xd9\x0a\xf7\x23\x44\x65\x37\x23\x6b\xea\x05\x1f\x4c\x86\x26\xb2\x4a\x4b\xf9\x80\xb3\xdb\x2a\xc3\x4f\xeb\x2b\x6a\x4d\xb5\xd1\x4a\x14\xb5\x56\xed\x30\xa9\xd8\x51\x2a\x6d\xb5\x99\xf8\xf5\x6b\x3e\xdd\xa8\xf7\x0b\xc5\x0a\x9b\x29\x72\x39\x4d\xe7\xd3\xbd\x4a\xae\xaa\x65\x2b\xb9\x52\x5b\xab\xb7\xd9\x42\x9f\x1b\x54\x73\xcd\x42\x4d\x6b\x67\xd4\x9a\xd2\xec\x42\x3d\x03\x6b\x3d\xb6\xf0\xf5\x5b\x42\xde\x5e\xe2\xee\xff\x10\x80\x6f\x09\xee\x5b\x02\x7c\xdb\xb2\x9c\xf8\xfa\xf5\x5b\xe2\xab\xa2\x2b\x8a\xa2\xfc\xf5\xd7\xd7\xf8\x06\xfb\x76\xef\xf0\xf7\xb7\xef\x1f\x21\x54\x84\x6e\xba\xde\x57\x84\x3e\xdf\x55\xd4\x42\xaf\xdb\x60\xdb\xe5\x1a\xdb\xae\xf1\xe9\x76\xbe\xd0\xd6\x21\xaf\xb6\xeb\xe5\x9a\xc6\xea\x85\x0e\xdf\x6d\x14\x6a\xc5\x86\x56\x2e\x17\xd8\x03\x42\x79\x83\x4c\x12\x24\x59\xe6\x78\x41\xe6\xbe\x25\x98\x1b\x10\xb9\x47\x20\x66\x7a\xe5\xbc\xd8\xd0\xf8\x9a\x56\x54\xeb\x99\xaa\x96\x4b\x43\x8e\x55\x78\x4e\x1c\x08\x75\x2d\xdb\x6c\x54\xf2\xdd\x32\xcc\xa7\x2b\x99\xaa\x5e\x29\xe6\x6a\x7c\x13\xaa\xfd\x6e\xa7\x7d\x80\x08\x4f\x20\x0a\xb7\x21\xf2\xdb\xbf\xe0\xed\xba\x17\x65\xba\x97\xd7\x4b\xdd\x4e\xa5\x5b\xeb\x17\x72\x95\x4e\xab\xdc\xed\x08\xb9\x7c\x41\xe1\x2a\x5a\xbf\xcf\x96\xf4\x72\x15\xd6\x94\x92\xd2\x56\xf5\x5c\x5b\xac\xd4\x33\x4d\x35\xd7\xe9\xd5\xb4\x03\x4a\xe9\x04\x25\x7f\x1b\xa5\x70\x91\xc8\x2b\xde\x7d\x3e\x2b\xf8\x44\xa0\x5c\x9f\xeb\xfb\x6c\xb4\x9c\xce\xf7\xed\x79\x14\x39\x53\x96\xa8\xc0\x89\x84\x88\x92\xc9\x18\x2c\x34\x04\x43\x92\x29\xcb\x21\x2a\x70\x0c\x63\x40\x41\x94\x11\xcb\x53\x44\x19\x1e\x70\xc8\x04\x86\xc0\x1a\x22\xc7\x19\x00\x1a\x44\x96\x37\x54\x81\x27\xaf\x8d\x0e\x01\xb2\x88\x25\x1c\x4b\x29\xcb\x4b\x08\x40\x03\x10\x08\xa8\xc9\x50\xd1\xe4\x18\x09\x33\x14\x61\x93\x05\x86\x88\x31\x90\x30\xc7\x99\x02\x84\x02\x2b\xc8\x92\x28\x31\xac\x80\x18\xf1\x6b\x5c\x7f\x60\x53\x71\xff\xb1\x57\xba\x57\xb6\xf8\x55\x6a\xd5\x2c\xa7\x61\xd6\xcd\xca\x05\x16\x2c\xc7\xe9\x64\x08\x86\x51\xb8\x28\x2e\xd6\x4c\xcf\x6c\x76\xfb\x28\x5d\x42\xb9\xe1\x46\x5e\xd5\xf8\x0a\x5a\xfb\xac\xfe\xa1\xe6\x81\xd2\x63\xf8\x58\x2c\x3d\xf9\x1b\x0a\xf2\xd2\xeb\xeb\x59\xac\x5f\x71\x54\x28\x00\x51\xc2\x08\xb3\x94\x21\x22\x02\x06\x8b\x11\x45\xac\x29\x98\x86\x81\x31\x16\x30\x23\x8b\x90\x81\x54\xa4\x94\x4a\x50\x96\x31\xe6\x90\x21\x22\x51\x16\x36\x8e\x2a\x33\x68\xe3\x64\xaf\x70\x76\x04\x45\x01\x11\xc4\x08\xbc\xc1\x9a\x3c\xe0\x79\xc6\x44\x86\x41\xa1\xc1\xb0\x10\xc9\xc0\xe4\x80\x84\xe9\xc6\x41\x0d\x2a\x08\x98\x50\x81\x60\x56\x66\x28\x23\x98\x02\x00\x02\x47\xbe\xc6\x0d\x07\x23\x08\xb2\x00\x65\x11\x8a\x3b\x8f\xcd\xb0\xf5\xc1\x98\xd1\x66\x82\x07\x8c\x12\xec\xf2\xee\xaa\x36\x6f\x2f\xf3\x5c\xc7\xf7\x26\xc9\x79\x4e\xa9\x45\x19\xa6\xcc\x56\x61\x1a\x8a\x83\x54\x55\x28\x36\x33\xd3\x06\x97\xf4\xd7\xb9\x6e\x59\xaf\xad\xd5\xa1\x35\xb4\x23\xbe\xd4\x54\x48\x16\xf6\x03\x2f\x54\x51\x25\x25\x14\x2b\x6a\xcc\x70\xaf\xde\xa9\x16\xe2\x4f\xc5\xfd\x9f\x6d\x7a\x0b\x0f\xff\x5e\x28\x75\x7d\xe7\x3b\xa0\xa3\x4b\x33\xc7\xaf\xd6\x92\x8d\x52\x2a\xb3\x2e\x82\xce\xa0\x44\x92\x02\x19\x2d\x52\x13\xbe\xc5\x9b\xa3\x45\x5d\x5f\x2c\xd4\x0e\x84\x8e\x3b\x40\xe2\xa8\x53\xa9\xb4\xd3\x6a\x01\xcd\x52\x64\x59\x12\x27\x5a\xb5\xb8\x5e\xd7\x3b\x73\xa6\x57\x93\xc9\xc2\x2f\x2c\x7b\x1d\x25\x57\x8f\x3d\xba\x78\xc1\xa3\xd5\xf0\x92\x57\xbc\x79\x74\x16\x94\x7e\x9e\xeb\xfd\xa4\xeb\x4e\x8f\x46\x2c\x2f\xb3\xa6\x2c\x43\x84\x19\x8e\xa3\x32\x60\x38\x42\x31\x2b\x48\xd4\x10\x0c\x2c\x70\x06\xa1\x40\x96\x0c\x2a\xcb\x92\x89\x05\x09\x1b\x02\xa0\x58\xa4\x92\x64\x40\x8a\xa4\x6d\xda\x7c\x45\x54\x10\xce\x10\x64\xc4\x0a\xac\x40\xb1\x80\x20\x26\xa2\x01\xa8\xc9\x99\x14\x0a\x0c\x8f\x59\x01\x63\x0c\x79\xc2\x49\x48\x20\x00\x4a\x26\xc0\x8c\x29\x70\x10\x21\x06\x70\x54\x94\x44\xe6\x6b\xdc\xcf\xd8\x7b\x34\x7c\xf3\x68\xae\x9d\x1f\x97\x17\xf3\x82\xe3\x55\x2a\xe5\xa4\xc5\x98\x33\x69\x9d\x5b\x0c\x06\x4b\xca\xa6\x52\x23\xc1\xad\xd6\x02\xe4\xd8\x46\x1a\x4e\x90\xb1\x9e\x03\x3d\x5f\x9f\xe4\x7b\x4d\xcc\xe2\x61\x3a\x93\xf5\x3d\xcb\x52\xf3\x0d\x9e\xb2\x4e\x00\xfc\xb4\x9c\x47\xb0\x3f\x4c\xf6\x8c\xad\xb3\xc6\x1e\x7d\xe4\x44\xcd\x46\xa5\xdd\x2e\x84\xe9\x54\x6a\x5d\x08\xc7\x05\xc5\x63\x06\x4a\x3b\x3d\xf1\xa4\x25\xdf\x6c\xf4\xb4\x15\xa2\xd1\x60\xda\x71\x73\x42\xb5\x30\x8e\x06\x9e\x5d\xa2\x4b\x44\x6b\x68\x91\x6a\x71\x5c\xbb\xbd\xb0\xd7\x55\xb3\x56\x2b\xf7\x86\x44\xcb\xa6\x99\x16\x99\xaa\x59\xd9\xeb\xc7\xfa\xab\x17\x3c\xb6\x00\x2e\xd5\xfa\xff\x00\x8f\x35\x28\x23\x4b\x22\x66\x0d\x83\x9a\x06\x8b\x25\x11\xf2\x26\xc7\x1b\x32\x21\x0c\x07\xa0\x41\x29\x8f\x01\x66\x90\x61\x00\x81\x25\x32\x85\x88\x22\x60\x42\x11\x13\xcc\xb1\x86\x40\xa5\x38\x7f\xbe\xc0\xeb\x0d\xcc\x42\xd1\x94\x05\x83\x41\x26\x35\x0d\x99\x35\x44\xd3\xa4\x12\xa0\x26\xcf\x4a\x1c\xcb\xf1\x54\xa6\x54\x92\x18\x5e\xc4\x82\x88\x4c\x8a\x25\x59\x10\x45\xc9\x94\x18\x53\x86\x32\x63\x7e\x8d\xbb\x9d\x7b\x8f\x95\xde\x3c\x56\x2c\x95\x2a\x03\x5b\x5c\xb4\xea\x82\xd2\x82\x52\xa9\x9f\x64\xe6\xb9\x5a\x52\x2a\x59\x29\xa7\xcf\xe5\xc6\x95\x76\x7d\x39\xb7\x2a\x54\x1c\x2a\x6d\xbb\x32\x0b\x18\xab\x97\x2f\x38\xfd\xb6\xd0\x74\x1a\x05\x93\xe6\x74\x79\x55\xcb\x4e\x65\x3b\x57\x5d\xe5\xa6\x3c\x10\x4b\xa1\x01\x7b\x8b\x98\xe1\xd8\x63\x87\x07\xc6\x53\xc1\xb4\x35\x2e\x56\xba\xad\xb9\xa9\x26\x59\x60\x44\x6b\x13\x21\x99\x53\xeb\x51\x7b\x92\xe3\xbb\xcd\x9a\x54\x2e\x36\x94\x95\x5c\x2c\x66\xa4\x12\xcb\x76\x42\xc4\xc9\xb3\x66\x14\x71\x7c\x56\xd6\xbd\xea\xb8\x25\xa4\xf8\x61\xdb\xc8\xa1\x28\x55\xb4\xbb\x48\x1b\x9a\xb6\xb0\x8d\x08\xfd\x82\xc7\x56\x87\x67\x15\x9e\x56\xfe\x67\x78\xac\x28\x62\xc9\xe0\x38\x41\x36\x11\x24\xd4\x34\x45\x86\x0a\x94\x35\x38\x08\x44\x56\x66\x44\x56\xe0\xb1\xc8\x33\x92\xc8\x71\xd8\x64\x19\x96\xe1\x39\xc8\x03\x0c\xb1\x09\x0c\x02\x24\x41\x88\xbd\xed\x05\x5e\x2f\x73\x82\xc1\x02\x51\xde\x74\x5a\x39\x89\x31\xb0\xc8\xca\x86\x60\x42\x1e\xb2\x06\x90\x45\xca\x11\x0c\x04\x2c\x32\x22\x16\x4c\x96\xe5\xb1\xcc\xb2\x22\x90\x4c\x93\x40\x76\xa3\x7c\xd3\xbd\x15\x8e\x3c\x56\x7e\xf3\x58\x28\x0d\x46\x61\x19\xa6\x8c\x4a\x71\x00\x00\x98\x11\xbe\x95\x86\x29\xb0\xa8\xe6\x83\x30\x57\xf1\x65\x31\x39\x64\xd8\x75\x58\x0d\x7a\xfc\xd4\x5b\x6a\xc1\x72\xcc\xb8\x39\x29\xd9\x2e\x16\xc5\xae\x36\xc9\xae\xed\x71\xa3\x41\x02\xae\x33\x6b\x4c\xd6\xcd\xd2\x40\xd1\x75\x1e\xb6\xb6\x9e\x13\x7b\xec\xe2\xc0\x38\x31\x2b\x39\x47\xce\x0c\x7b\x1d\x28\x2b\xb3\x81\x26\xa3\x61\x4e\x5b\x06\x83\xac\x3e\xec\x70\x06\xb5\xf3\xca\xb4\x0e\x93\xcb\x22\x1d\x64\x5a\xdd\x55\xda\x37\x9b\x6d\x3e\x2f\xf5\xfd\xc8\x34\x0b\xe5\x85\x3d\xaf\x87\x69\xdc\xef\x2e\xcd\xe2\x64\xd5\x2c\x0d\xc7\xa6\xb0\x5a\x54\xb7\xfd\xdc\xf6\x05\x8f\xad\xa9\x67\x15\xfe\xff\x85\xc7\x5e\x19\xe5\x5d\xd8\x18\xf0\xd9\xe1\xdd\x6e\x73\xc0\x2b\x46\xf1\xcc\x7e\x10\x15\x8f\x7e\xb7\x9f\x3b\x9d\x56\x63\x57\x90\x60\xaa\x89\x15\x52\x43\xc3\xf1\xb2\x8a\xda\x75\x59\x4c\xaf\x69\x28\x13\x80\xbd\x40\x1b\xf4\xd6\xe9\x6e\x69\x92\xf3\xca\x70\x32\x9f\x2c\x36\xcf\x1f\x0f\xed\x99\xb8\xf3\x0b\xfe\x10\xb6\x83\xfe\xeb\x8c\xbc\x5f\x19\x7f\x62\x10\x7d\x75\xe9\xf6\x39\x9d\xe7\xab\xaf\x4f\x68\xbb\xb2\x76\xfa\x84\xc6\x2b\xab\x9c\x9f\x75\xab\xa3\x95\xce\xa3\xa9\x17\x41\x6d\x69\x39\x5e\x2f\x0a\x83\x76\xbe\x5f\xe4\x4a\x9d\xe2\xa0\xd1\xd5\x5b\x69\x1d\x56\xb8\x7a\xa5\xa7\xd5\xdb\x5a\xb1\xad\x66\xb8\x06\x6c\x70\x19\xb1\xc3\x73\xe9\x6a\x27\xaf\x6e\x47\x3e\x5b\x77\x2a\xf8\xa5\xa8\xb2\xca\xd4\x04\xcf\xc8\x68\xfe\x74\x5c\x35\x74\x27\x5f\x60\x20\x99\xc1\x1e\x40\xd6\x28\x6d\x8c\x93\xb5\x25\x09\xd6\xfd\xee\x21\x82\xe2\xbe\x5f\x26\xfe\xb8\x49\x15\xea\x58\x5a\x4f\x46\xc5\xb2\x5f\xa0\xa2\x5b\xed\xf4\x7d\x05\x56\x9b\x99\x72\xa6\xcb\x0d\xed\x62\x9a\xaf\x47\xd6\xcc\x5a\x34\xbc\x79\x6a\x32\x0c\xb7\x63\x63\x2c\xb7\x17\x38\x7e\x7e\xb8\xff\x13\xe7\x95\xcc\x62\xff\xef\xac\xa2\xc8\x99\xa3\x4c\x94\x9e\x8c\x32\x0a\x1e\xa0\x42\x05\x4c\xcd\xfc\x6a\x51\xef\xa5\x3d\xcd\xea\x27\x27\x6e\x65\xe2\x42\x7d\xbe\xe8\xae\x67\x91\xb6\xc8\x8a\xca\x4c\x8a\xd5\xa1\xbe\xc7\x19\xda\x98\xef\x24\xf5\xe4\xa2\xb6\x48\xae\x69\xcd\x77\xd7\xe5\x66\x43\xa8\x66\x2a\xb6\xeb\x8c\x10\x9f\x83\x35\xbe\x6d\x0c\x72\xd3\x55\xad\x01\xb2\xb9\x09\xad\x18\x23\xbe\x35\x57\xd3\x70\x2c\x05\x83\xe1\xb4\x51\x33\x04\x8a\x18\x41\xee\x94\x4b\xba\x53\x5f\x57\xd2\xfa\x5f\x7f\x9d\xb7\x78\x2f\xae\x1a\xee\xa9\xaa\xa9\x9e\x56\x4d\x36\x3d\xaf\xa5\x1a\x4a\x3f\xa9\x0f\xcc\xd2\x7a\xb0\x52\xd4\x62\xcd\xb1\xca\x45\x7d\x50\x18\xa5\x10\x9a\x37\x27\x05\x30\x08\x22\x2b\x2b\x98\xe1\x70\x5f\x35\xe6\x59\xc6\xfc\x34\xf5\x26\xce\xb7\x9d\xb2\xca\xe7\x3a\xe9\x75\xbf\xdf\xb3\x96\x23\x71\x69\xd2\x96\x56\xd5\xcb\x8e\x50\xf6\xe7\x48\x01\xc3\x96\xc2\xcd\xa1\x38\x69\x00\x39\x1c\x64\x8d\xf4\x90\x5f\x66\xe6\x05\xec\x4d\xc6\x9c\x4b\x9b\x61\x4d\xee\xa5\x04\xbb\x0e\x8a\x85\xa6\x44\xcb\xcd\x60\x1a\x64\x94\x9f\x4e\x3d\xff\x14\xf5\xfa\x39\xf5\xcd\xf6\x4c\x9c\x77\x4b\xb8\x4f\x06\xa3\x96\x5d\x1e\xa8\x26\x23\x75\xb2\x5c\x91\xaf\x05\x6c\xbb\xbd\x2e\x76\x83\x71\xcb\x73\x96\xf3\x88\x2a\x7b\xea\xc9\xb3\xd4\x57\xb5\x21\x6a\x35\x96\xa0\x30\x9d\xb6\xaa\xa3\x46\x7a\x81\xf3\xc9\xe6\x5a\x29\x30\xfa\x8c\x9b\xbb\x2e\xdf\xf0\xba\x20\xdf\x50\x03\xe8\x80\x61\x89\x2b\x0e\x06\xc0\xb2\x7c\x67\xae\x96\xe7\x5d\xba\x4a\x3a\xa0\x2f\x97\xb8\x99\xa5\xf7\x33\xe9\x62\x60\x4f\x97\xa1\x9d\xcf\x88\x7f\x83\xd7\x0b\x4f\x51\xdf\x3e\xa3\x3e\x33\x1d\xab\xec\x3c\x5f\xef\xe2\xde\x5a\xd0\x87\x63\xbf\xcf\xea\xf5\x5a\x57\xcb\xe5\xc4\xb9\xd9\x15\xf2\x2d\xad\x68\x4f\xd2\xe9\xec\x38\xd2\x0e\xd4\xd3\x67\xa9\x6f\xd6\xea\x73\x69\xb9\xca\x4d\x32\x76\xbf\xde\xcf\x17\x16\x50\x6f\xaf\xd4\x46\x52\x5d\x8b\xfd\x51\x5f\x61\x0b\x90\x9b\xf2\xf6\x5c\xf5\x6a\x56\xbb\x9f\x69\xe9\xba\xdc\xed\x4d\x09\x16\x46\xa9\x39\x98\xa8\xd9\x9a\x94\x1d\xf9\x51\xd1\x99\x2a\x2d\x32\xb3\x38\x5a\x6e\xf6\xc6\x83\xf4\x70\x4b\xfd\xf5\xd6\xe5\xd2\x06\x92\x07\x5a\x97\xb7\x4d\x24\xfb\xca\x94\x59\x06\x48\x00\x32\xb2\x28\x41\x96\xf2\x48\x36\x19\x83\xc5\x80\x0a\x98\x41\xc0\x94\x64\x89\x22\x5e\xc6\x86\xcc\x42\x62\x02\x83\x02\xc1\xe0\x0c\x64\x72\x10\x00\x8a\x00\x4b\xe8\x71\xa5\xc6\xa1\xb1\xed\x48\x92\x89\x03\xe6\xc5\x22\x74\x46\x61\x91\x75\xa6\xd5\x65\x94\xe9\xf7\x69\x2f\x12\x61\x44\x1b\xd3\xb2\x9a\x9b\xd5\x79\x98\x13\xe7\xda\xa8\xff\x51\x6f\xed\xca\x86\x91\x4f\x17\xfe\xb0\x69\x64\x5f\x78\x1b\x85\x51\xbc\x1d\xd5\xdc\x6d\x0b\x3b\xbb\xe2\x49\xcc\x67\x87\x34\xe7\x11\x75\x01\xc7\xdb\xdb\xe2\xdb\x57\x51\xce\x51\x6c\x71\x30\xe0\x1e\x4d\x2e\x89\x16\x5e\x30\xf1\x51\x18\xfa\xa3\x00\x85\xe4\x82\xa6\x16\x09\xa3\x44\x33\x9b\x4b\x68\x5b\xe1\xc4\xf7\x44\x93\xf8\x11\x71\x0c\x12\x24\x58\xc0\xdc\x05\x99\x7a\x01\x26\x21\xf6\x3d\xd7\x25\xcb\xc8\x46\x33\x17\x8f\xce\x0d\xc5\xaf\x7a\xdc\xa3\x6c\xcb\xfe\x6e\x29\x35\xbc\x5c\xfe\xff\x1b\xaf\xbc\xfe\x12\x59\x0e\xf9\xe5\xcf\x04\xd8\x2e\xc4\xfe\xb2\x7b\x77\xfe\x97\x3f\x13\xdb\xfb\xf1\x97\x23\x14\xfe\xf2\xe7\xf6\x45\x93\xf8\xcb\xff\xde\x09\x53\x42\xee\x13\x74\xd0\x32\x5a\x86\xd6\xfa\x4e\xf1\x80\x84\x24\x98\x7f\x24\xfc\xe5\xbf\xef\xa1\x62\x97\x6d\x51\x80\x47\xd6\x7c\x77\xf3\x0a\x15\x87\xb2\x33\x3b\x20\x31\x8c\xe0\x97\x3f\x13\xbf\xcc\x19\xe6\x0f\xe6\x0f\xf0\xcb\xee\x06\x9e\x05\x01\x71\xa3\x4a\xcc\xf3\x2f\x7f\x26\x84\xd3\xef\xd3\xf1\xe6\xf1\x0d\xde\xff\xbd\x2f\xc0\xa1\x28\x7b\xc9\x8d\x62\x09\x00\x89\xe1\x0c\x1e\x72\x84\x65\x28\x4b\x78\x1e\x03\x8e\x23\x48\x00\x3c\xc3\x10\x00\x65\x0e\x53\x91\x33\x44\x2c\x52\x11\x9a\x18\x8a\x3c\xc3\x62\x86\x60\xc0\x72\x22\xcb\x1a\xcc\x0e\xd1\x5e\xef\xc6\x7b\x4e\x88\xdb\xdf\x89\xcb\xbe\xa9\xea\x93\x5b\xff\x7d\xf6\x7c\xe8\x22\x7f\x83\x8b\x03\x2c\xa4\xc8\xe0\x28\xe2\xa1\xcc\x4a\x22\x92\x25\x40\xa0\x89\x10\x05\x12\x6f\xc8\x88\x81\x3c\x6b\xb2\x84\xa1\x02\x6f\x00\xc6\x14\x31\x10\x91\x2c\x12\x53\xc4\x04\x43\x1e\x82\x5f\xbe\x5c\xb0\x70\x85\x03\x42\x39\x06\xb1\x00\x71\xb2\x44\x08\xe4\x30\x61\x59\x16\x0a\x04\x49\x0c\x84\x50\x12\x0d\x84\x05\x5e\x14\x44\xca\x71\x26\xc6\x3c\xe5\x28\xc1\x22\x30\x05\xc1\x34\x29\x23\x72\xa6\xfc\x79\x0e\x98\x6f\xef\xef\x79\xb3\xc8\x9f\x45\xaf\x2d\xfb\x2d\x86\x9f\x5d\x76\xfc\x0c\xc3\x4f\xdb\xfa\x49\x5e\xf6\x2f\x07\xff\x72\xf0\x2f\x07\xff\x72\xf0\x2f\x07\xff\x72\xf0\x2f\x07\xff\x72\xf0\x13\x39\x88\x3f\xfd\x9f\xfb\x06\x0d\x9b\x71\x6c\x88\xfd\xf8\x15\xcd\x4b\xd7\x7e\x59\x61\x3b\xf9\x27\x36\x8d\xa6\x34\x1c\xcf\x50\x7e\x31\x46\xc8\x5b\xe7\x41\x69\x34\x92\x09\x17\xcc\x18\x39\xef\x79\x7a\x97\x4f\x8d\xc3\xde\x54\x62\x73\x87\x75\xcc\x5c\xfc\x57\x28\xea\xc3\x42\xbe\x3b\x5a\xc9\x65\xb7\x11\x86\x59\x66\x81\xb2\x7d\xa7\xee\x97\x56\x42\x29\x09\x2a\x52\x2e\x84\x41\x0b\x2b\xc9\x61\x65\xbe\x9f\x11\x28\x59\x4f\x4e\xd3\x3c\x32\xa3\x90\x56\xd2\xd5\x12\x9f\xb2\xf3\x60\xd9\x81\x4d\xaa\xfb\x13\x41\xae\x82\x10\x5a\xf9\x85\x88\xc6\x7e\x4b\x0b\xa5\xa6\x50\x70\xca\x0b\xa8\xea\x29\x59\xf1\x0a\x0b\x5a\xeb\xf7\x41\x11\x33\x2a\x0e\x16\x8b\x8a\x25\x8c\x94\x81\xe6\x37\x91\x5c\x6f\x55\x67\x49\xcf\x5a\xf1\x9d\x51\x26\xbd\x9f\xe1\xc8\xf4\x9f\x5c\xdf\x4b\x3f\x32\x0d\xa6\x2b\xad\x0c\xa9\x0b\x8d\xa8\xda\x21\xc0\x05\xe5\x41\x8d\xb6\xb4\x4a\x8d\x1f\x85\x35\xc7\x13\xdb\x6b\xa3\xae\xce\x1a\x82\x15\xd6\x96\x6a\x3d\xa5\x97\xd3\x52\x61\xed\xe4\xb5\x4c\x41\x6b\x14\xca\x52\x35\xc3\xcf\xfa\x7a\xbe\x85\xda\xce\xbc\x05\xd6\x95\xf9\xb4\x18\xce\x72\x7d\x7d\xd8\x8e\xe1\x34\x5c\x4f\x1f\xe4\x2a\x90\x0c\x99\xbc\xa4\x8b\x58\x37\xbc\xee\x14\x4a\xd6\x54\x2d\x0c\xf3\xe9\x69\x7a\x95\x37\xa6\xc9\x26\x56\x5b\x60\x6a\x8f\xed\x30\x2a\xb5\xab\x26\x50\xfc\xc9\xc8\x5f\x14\xa4\x7e\xc1\x50\x42\x66\x31\xac\x54\xb4\x0e\x8b\xc7\x3e\xa8\x31\xfd\x3c\xbb\xc8\x2e\x9e\xf4\xb7\xe1\xbe\xfe\x17\xcf\xae\xa7\xc6\xd7\xe0\x73\xfe\xab\xab\xe3\x31\x4c\x55\xf1\x68\xa0\xf8\xdd\x2c\x9b\x1e\x49\x35\x90\xab\x8e\x54\x3a\xaa\x26\x9d\x62\x57\xd1\x46\x49\x59\x9c\x95\x8c\x65\x39\xeb\xd9\x79\x65\x02\xd4\x7a\xc7\x16\x5d\xb7\xd6\xa7\x50\x2e\xa5\x95\xf5\x5c\xe1\x91\xd1\x2c\x4d\x87\x2a\x17\x78\x11\xb7\x9a\xd8\x3c\xab\xc7\x33\x5e\xe9\xf9\xd2\xc9\x2f\x82\xb9\x1c\x86\x23\xb7\xd5\x6a\x0a\x70\x54\x2d\xcc\x53\x2d\x65\x81\x66\x8b\xf6\xcc\xe5\x02\x31\xdb\x33\x6a\x8b\x55\x44\x0f\xeb\x26\x4a\x5e\xf0\x4a\x51\xc7\x74\xfb\xb5\x8e\x39\x98\x46\x3d\xbf\x55\x48\x47\x06\xee\x03\x27\xe3\x50\x9c\x2e\x96\xd5\x61\xd7\xb5\xe7\xb9\xe2\x08\xc5\xc5\x8d\x1f\x3a\x5a\xc7\x51\xce\xfc\xef\x28\xbe\x6a\x6c\x26\xa5\xd4\x78\xa1\x9f\xce\x72\x51\xa1\x93\xab\x31\x0d\x4e\x01\x55\x32\xa9\x4b\xa5\x86\xe8\x6a\x8c\x22\x93\xae\x65\xae\x8a\x51\x3c\x0d\x9b\xee\x74\xb4\xdd\x9a\x71\x66\xe6\x71\x5e\xc4\x0b\xd3\x4c\x5d\x5d\xfa\x7a\x8a\xf3\x0a\x5a\x72\xcd\xc0\xc6\xca\x0a\x19\x9b\x56\x73\x7d\x47\xef\x0e\x83\x59\x33\xd9\xda\x9a\xe6\xb4\xc1\xf1\xda\x73\x6e\x8f\x57\x2f\xe3\x7c\x53\xae\x85\x1d\xb7\x53\xc8\x74\x8a\x05\xc0\x67\xe4\x41\x79\xbd\x2e\xf7\xd3\x0d\x33\xe3\xac\x92\xa5\x85\xb3\xb6\x4d\xa8\x63\xa5\x52\x5b\x0e\xda\x58\x19\x4e\x66\x79\xea\x35\xcd\x79\xcf\x2f\x70\x92\xa3\xcd\x83\xa9\x6e\xa1\x6a\xd8\xd4\x9c\x56\xa9\xdb\x6f\xac\x0b\xf3\xa1\x77\x28\xef\xcf\x99\xa1\xbc\xf2\xf2\xd8\x67\x67\x28\x8f\x5e\x20\x7b\xc5\x06\x76\xe6\x5b\xe2\x99\x65\xe9\xaf\xed\x66\xf6\xeb\xb7\x84\xcc\xb2\x1c\x07\x59\xc0\x89\x92\xc0\x43\x28\x48\x00\x7e\x4b\x30\xfb\x06\x2b\x36\xc3\xdf\xd8\xf0\x7e\xb1\x50\x8f\x6f\x26\xff\xe7\x0a\x75\xad\xee\xaf\xbd\x55\xf5\xe9\xda\x3f\x7e\xb3\xea\xb0\x05\x8c\x45\x2c\x0b\x31\x27\x63\x91\x47\x3c\x4f\x31\x44\x86\xc9\x63\x59\x94\x18\x99\x17\x44\x0a\x38\x59\x96\x81\x68\x32\x2c\xe6\xa1\x68\x42\x60\xf0\x80\x8d\x37\xce\xc8\xa2\x29\x22\x6e\xb7\xfd\xf5\xa4\xfd\xaf\x9e\xc5\x7f\xda\x4a\xa5\x41\x05\x94\xf2\xab\x68\xb4\xd0\x18\xbb\x0f\xd0\xca\xf7\x18\x59\x2b\x2c\xe7\x95\xcc\xaa\x26\x44\x69\x15\x67\x3a\xf3\x45\x4e\x5e\x70\xc3\x28\xa8\xb9\x03\xe5\x8e\xeb\xea\x36\x6a\xf5\x2c\x1f\x7d\xde\x7e\x3f\x95\xc4\xe7\xf9\xeb\x3e\xfb\xef\x57\xb8\x2e\xf3\xce\x9b\x1c\x4f\x78\x1e\x30\x82\xc0\x51\x91\x47\xa2\x8c\x91\xc8\x49\x2c\x2f\xc8\x12\xe5\x4d\x99\x02\xc4\xca\x06\xa0\x84\xc3\x84\x93\x30\x4f\x78\x13\x02\x1e\x98\x88\x52\x86\xa5\x94\x6e\x79\x67\xdf\xf3\xfe\x0f\x95\xfb\x55\xbc\x4b\xfc\xe1\xf9\xe2\x4f\xe0\x1d\xb2\x02\x14\x44\x83\xa1\x86\x49\x25\xce\x00\x12\xc3\x42\xca\x49\x02\xa1\xc4\xa4\x40\x06\x32\xc6\xd2\xf6\xbd\x04\x02\x19\xca\x60\x20\xc9\xd0\x64\x04\x9e\x85\xd8\x10\x90\x69\x6e\x79\xe7\x5e\xce\xfb\xa3\xe5\x7e\x15\xef\xb0\x7d\x78\xbe\xfa\x13\x78\x17\x18\x06\x31\x94\x15\x08\x43\x05\x82\x58\x13\x03\x03\x30\x32\xcb\x31\x94\x40\x99\x85\x1c\xc5\x98\x85\x3c\x64\x31\xcb\x89\x58\x90\x25\x40\xb1\x6c\x62\xd3\x14\x4d\x99\x11\x08\x0b\x76\x9b\x92\x6f\xf2\xae\xc0\x7e\x45\x52\xe0\xd8\x1e\xaa\x75\x02\xcc\x76\x1b\x76\x0a\x38\xab\x2f\x45\x3d\xb5\xb0\x0b\x53\xcc\xb5\xb3\x8c\x80\x4a\x5c\xd1\x62\xe2\xb2\x94\xda\x99\xe4\xae\x50\xe7\xfb\x29\xef\xe7\x3d\xfb\x9c\xfd\x1a\x7e\xcc\xfe\xbd\xbc\x23\xd6\x44\x06\xe5\x89\x6c\x32\x22\xcf\x42\x96\x15\x01\xcb\x42\x96\x30\x50\x42\x1c\xc4\x32\xe4\xb0\xcc\x40\x68\xc8\x02\x34\x91\x24\x32\x32\x8b\x64\xd9\xe0\x88\x4c\x39\x24\x23\xc4\x6f\x79\xff\x20\xcf\xcc\x50\xc6\xe8\xf4\x06\x6c\xd6\xee\x75\x51\xd0\x11\xdb\xcb\x85\xd1\xe5\xf2\x5a\x69\xe8\xbb\x9c\xd2\xcc\x8c\x8a\x39\x5f\x30\x96\xcd\x62\x77\xf8\x33\x78\x7f\xc4\xfe\xcf\xe6\x5d\x10\x09\xc7\x32\x22\xe0\x05\x53\x80\xb2\x41\x10\x0f\x28\x6b\x72\x02\x02\x32\x0f\x44\x93\x70\x48\x92\x79\x82\x0d\xc1\x20\x10\x98\x86\x89\x04\x82\x65\x0c\x78\x16\x22\x60\x0a\x88\xd9\x6d\x69\xbe\xc9\xfb\x67\xfb\xc5\x2f\xe1\x5d\x7d\xce\xfe\xcf\xe6\x9d\xc1\xbc\xc0\x8b\x1c\x60\x45\x88\x90\x04\x45\x48\xb0\x84\x25\xc2\x01\x5e\x00\x98\x20\xd1\x40\xa2\x08\x18\x5e\x20\x32\x96\x59\x1e\x19\x06\x14\x4c\x88\x44\x51\x94\x0d\x11\xb3\x12\xda\xf2\x7e\xc1\xdf\xff\xa1\x72\xbf\x8a\xf7\x2a\xff\x73\x79\x97\x89\x41\x88\x49\x0c\x2c\xb0\x26\xe2\x18\xc9\x14\x0d\x93\x13\x78\x91\xe7\x39\x99\x93\x21\x30\x4d\x28\x00\xd1\x30\x24\x83\x22\x49\xa4\x22\x87\x25\x19\x0b\x22\x94\x30\x80\x58\xe0\xe5\x2d\xef\x17\xda\xd5\x7f\xa8\xdc\xaf\xe2\xbd\xd2\xfe\xb9\xbc\x23\xc4\x88\x12\x65\x58\x86\xe5\x0d\x88\x19\x59\xc4\x00\x99\x88\x10\x88\x21\x87\x44\xde\xe4\x20\x95\x65\x9e\x95\x30\xc2\x86\x20\x23\x99\xa1\x94\x93\x64\x91\x15\x25\x5e\x20\x10\xef\xb6\xd7\x5c\xc8\x33\x2f\xcc\xaf\x31\x85\xe9\x4f\xf2\x9e\x7b\xce\xfe\x71\xbd\x7f\xc6\xfe\xcd\x6d\x52\x97\x7f\xbb\xe1\xf3\x03\xb1\x9f\x3d\x0a\xcb\x57\xa4\x82\x3e\xd7\x27\x46\x99\x2d\x28\x5c\xb7\x33\x6e\x04\x65\x67\xdc\x03\x80\xe6\xa5\xb0\x52\x84\x0e\x50\x1b\x8b\x52\x37\xa5\xf4\x38\x65\x3f\x8b\xa3\x5c\x62\xe9\x1d\x6b\x9f\xd8\x2c\xbe\x11\xcf\x74\x94\xf9\xf1\x1b\xac\xe9\x6d\xaf\x73\x53\x29\xd9\x88\x2b\x2f\x1c\x54\x9f\xd5\xcd\x5c\xb3\xbd\x34\x95\x1c\x31\xc4\x9a\x4e\xa2\x95\x5e\x2e\x76\xd1\xda\x36\x9a\xd5\xea\xc8\x29\x94\xb5\x4a\x96\x0f\xa7\x23\x75\xda\x1e\x60\xbd\x0e\xec\x64\x2f\x55\xf3\x93\x5e\xd8\x75\x34\x31\x99\x6b\xf7\x8d\x70\x0d\x05\x9d\x1d\xe7\xf9\x79\xb5\xba\x19\x83\x87\xd3\xa2\xbb\x10\x96\x13\x5f\x4d\x15\xa6\x72\xab\x84\xd2\x76\xce\x58\xd4\x06\x93\x3c\x68\x2e\x0b\x46\x2f\x03\x94\x20\x65\x94\xbb\xcc\xf4\xd0\xd3\xbd\xc1\xc1\xc1\x33\x4e\xf6\xa1\x1d\xc9\xc6\x45\x7d\xc5\x28\xe0\xd1\x5e\xb8\xf2\x82\x51\x40\xae\xd5\x4d\x07\x4f\xd8\x57\x94\x7f\xae\x77\x72\x29\x5b\xfe\xec\x21\xf7\xe3\x41\x76\x63\xaa\x74\x7b\x19\xce\xd0\x61\x3a\xac\x39\x14\x3a\x8c\x33\x65\x88\x5d\xc5\x79\x26\x5a\x8e\x9b\xfd\xf2\x40\x5e\xa8\x43\xaf\x99\x46\xa4\x2b\xb5\xad\x5c\x3c\xf5\x78\x23\xc8\x32\x66\x36\xca\xb3\x4b\x67\xa8\xa7\xaa\x4a\x34\x9d\x52\x3a\x4c\x57\x93\x6d\x3a\xe8\x14\xd6\xb2\x97\x5d\x46\xeb\x9c\x66\x0a\xd2\x24\xe5\xb0\x86\x5d\xcf\xbb\x45\xc3\x98\x11\xe4\x44\xbe\x3e\xcf\xc8\x7c\xd0\x18\xa1\x45\xaa\x10\xa6\x55\x8a\x90\x3c\x1d\x0f\x16\xb0\xef\x77\x26\xf9\x78\x93\x5f\xab\x35\xd1\xd3\x9d\x96\x3c\xf1\xfd\x95\x53\xcb\x34\x06\x63\xc0\xd2\x8c\xe5\x2c\x52\xe3\xf5\x78\xac\x0a\x1a\x56\xb3\x28\xb5\xac\xa4\xa4\x3d\xb0\x7f\x3e\xc8\x9e\x75\xf2\x27\x83\x6c\x0a\x53\xad\xac\xf1\xc2\x20\xfb\x3b\x87\x5e\x77\x05\xd9\x8b\xe7\x57\x1e\x0f\xb2\xec\x19\xfa\x77\x0e\xf7\xc9\xf5\x88\x1b\x41\x96\x1e\xf3\xc3\xb4\x9e\x4a\x2b\xc6\x70\x4a\xe1\xb4\xe6\x45\x28\x1a\x0e\xda\x85\x71\xd6\x0e\x6b\x51\x56\x33\x7d\x88\xec\x41\x43\x48\xe5\x26\x32\xcd\x8f\x93\xf6\x52\x5d\x0d\x95\x41\x77\xdd\x87\xa9\x4a\x9f\x19\x70\xcd\x9c\x88\xc1\x34\x64\x20\x5b\x19\x29\x93\xc9\xbc\x23\xf9\x20\x6e\xc9\xb0\x6d\x76\x43\x3a\xa7\x43\x98\x49\x97\x52\xb5\x1c\x4c\x72\x8b\x49\x69\x5d\x2c\x56\x8c\xaa\xb0\x94\x70\xa6\x44\x1b\x9d\x0c\xad\x74\xa3\xc3\x2b\xcc\xff\x78\x90\x3d\xed\xe4\xcf\x06\xd9\x2a\xd0\x73\x95\x17\x06\xd9\xdf\x39\xaf\x74\x57\x90\xbd\x78\x32\x2d\x1b\x0d\xe7\x8b\xec\xac\xd6\x55\x74\x19\x36\x98\x46\x2b\x6a\x9b\x0b\x2d\x5b\xf0\xb3\xa9\x4c\x9b\xf8\x6b\x53\xaf\xf7\x6c\xcf\xc5\x56\xa5\xa3\x9c\x2e\xfa\xdd\xe8\x2e\x6e\x17\x21\x1f\x5a\xc4\x2b\xa4\xf6\xd7\x41\x9f\xba\xb7\xaf\x17\x54\xc6\x4f\x0a\x51\xba\x3e\x95\xfc\xb6\x97\x57\x7a\xd3\x9a\xdc\x84\x68\x21\xd4\x64\xc3\x95\x60\x63\x55\x5f\x80\x1e\x63\x96\x81\x09\x47\xcd\x86\x08\x67\xf9\xd4\x64\xa4\xac\x32\x9c\x27\xb7\x82\x7a\x4b\x5c\xcb\xe6\x60\x32\x72\x7a\x29\x4d\x99\x48\xee\xd2\x91\x47\x76\x5f\x57\xe3\xa0\x68\x37\x3c\x5a\x22\x05\x46\xb4\x38\x25\x4c\x4f\xc6\x85\x24\x69\xe2\xd4\x3a\x63\x16\x2a\x19\x2d\xec\xf6\xb3\x92\x8b\xb9\x2e\xdb\xe8\x59\x07\x64\x97\x03\xae\x7f\x5a\xa9\x27\x01\x17\x73\xb7\x4d\x50\x47\x0b\xc8\x0f\x24\xa4\xb1\x7b\xa8\x8b\x2b\xd7\x95\x49\xfc\x93\x80\x7f\x89\xfd\x2b\xc9\xe6\x1e\xfb\xe7\x03\xce\x67\x27\x52\x1f\x5a\xb8\x78\xb5\xfd\x4f\x0c\x78\x95\x83\x7d\xfd\x11\xfb\xb9\x4e\x1b\xec\x94\x97\xc5\x31\xb1\xb8\xb1\xe3\x15\xa5\x56\xde\xce\xa6\xc8\x10\x73\xb0\xde\x8b\x0a\xe5\xf2\xba\xdb\x91\x16\x1d\x6b\x90\x46\x99\x99\x50\x11\x8e\xb2\x62\xfa\x10\x70\x8b\x73\xfe\xee\x4a\x48\x2f\x9e\x65\x7e\x7c\x17\xc2\xdf\x98\x90\x8e\x76\x15\x14\xac\x8a\xaf\x6a\x5d\xc8\x8d\x71\x8b\x69\x9a\x13\x9a\x22\x68\x39\x6e\x56\xf2\x2d\x3d\x33\x2c\xba\x04\x44\x1c\x8f\x8a\x64\x65\x8e\x7c\xbb\x13\x75\xe5\x65\x56\x17\x47\x4a\x8b\x93\xf2\xf2\xc4\x0a\xca\xe5\x46\x71\xe5\x95\x67\x13\xaf\xad\x69\x23\x65\x61\x76\x57\xa9\xfa\x58\xc7\x71\xd2\xf0\xa2\x69\xaa\xe5\x83\xae\xee\x16\x1d\xc5\xc2\xb3\x1c\xef\x71\xab\x5e\xbd\x94\xe3\x66\x1d\x8a\x46\xa3\xc9\xd4\x31\xea\xbe\x24\x3a\xd3\x83\x77\xbd\x2a\x21\x3d\x30\x0c\x79\x69\x42\x7a\xd2\xfe\x0b\x13\xd2\x4b\x66\xc0\xde\x5f\x7f\xdb\x0a\xc7\xb3\x09\xe9\xb3\xf6\xff\xf1\x84\xf4\xe2\xe5\x97\x4f\x16\xe2\xbe\x84\xb4\x2d\xd4\x27\x9d\xfc\x89\x6d\x51\x0a\x74\x42\xef\x08\x4a\x7e\x8f\x57\x57\xdd\x72\xd6\xc8\xf6\xe7\xe5\x49\x69\xdc\x2f\x03\x14\xcc\x47\x39\xdb\x4d\xe6\xcb\x20\x09\x95\x95\x38\x04\x28\xc9\x8c\xc6\x8d\x06\xb1\x55\x85\xf0\x0b\x67\x4c\xa6\x1a\x6e\x0c\x59\x87\xcf\xb8\x30\xe2\x95\x71\x69\xad\x84\x59\xbd\x08\xac\x1e\x1a\xe5\x0d\xaf\x54\x2f\x6a\x4a\xfc\x6b\x11\x5f\x3b\xb3\xea\xa8\xaf\xf6\x3a\xc4\x10\x27\xd9\x15\x60\xbc\x92\x9e\xe7\xc6\xd3\x52\x2b\x8c\x0c\x61\xc1\xca\x3e\x59\xe1\x74\x86\x0c\x19\x74\xa8\xe4\xcb\x09\xec\xcc\x09\xae\x24\xb0\x63\xc2\x1f\x98\xac\xb4\x98\x43\xdd\x5d\xb9\x6e\x27\x90\x17\xda\x7f\x2c\x81\x9d\x4f\xe1\xff\xc7\x05\xb0\x72\xba\xb4\xf2\x77\xe3\xcb\xce\xd0\x6a\xf8\x74\x82\x79\xf1\x3a\xe3\xe3\x09\xe6\x3f\x68\xdf\xe5\x8d\x04\x93\xf5\x8b\x13\x79\xca\x81\x68\x4d\xf5\x89\xaf\xcf\x32\x8b\x1c\xb4\x50\x9d\x33\x34\x31\xdb\xc8\x4c\x92\x8b\x36\xd7\x91\x44\x18\x4d\x27\x15\xbd\xc3\x15\x4d\x5e\x60\xbb\x61\xb9\xdd\xf7\xeb\x7a\xa0\x49\x64\x2c\xd2\x09\x98\x2d\x09\x71\xd2\x5a\xb8\x66\x34\xa1\x53\x0d\xe5\x61\x3f\x4e\x02\x05\xb5\x93\xaf\x66\x06\xe2\xb4\x30\x48\xae\xaa\xe3\x71\x3a\x97\xad\xf9\x81\xef\x28\xed\x9e\xbf\x6a\x36\x83\x88\x61\xc4\x81\xe3\xb0\x0b\xef\x80\xed\x3f\x24\xc1\x3c\x14\xe0\x2f\xb4\x7f\xee\x3b\x77\xd9\xbf\x9c\x60\xfe\x93\x86\x2c\xca\x85\x04\xf3\x37\xe2\x7b\x4d\x82\x79\xf1\x82\xfa\xe3\x09\xe6\xc6\x44\xea\x51\x7d\x3c\xf6\x7b\x52\xdb\x9a\xd9\x67\x8d\xf3\x89\xbf\xa3\x0b\xbd\xc9\xa7\x95\x79\x38\x13\x68\x72\x0e\x3b\x41\x29\x5b\x28\xcf\x58\xa9\xbb\x50\xd9\xf5\x22\xab\x0b\x76\xb5\xeb\x66\x60\xce\x1b\x36\xed\x56\x4a\x1b\x2f\x87\x85\x65\x16\x4e\x26\x83\x42\xb5\xcb\xda\x93\x71\x7f\x99\x92\x9b\xbc\x10\x16\x4b\xf9\x4c\x8d\x9f\x8f\xc5\xe4\xb0\x38\x5f\x16\x82\x85\x68\xa7\x77\x01\xef\x06\x30\x12\xa5\x76\xe4\x2d\xc7\xdd\x79\xab\x93\x6f\x64\x27\x02\xd6\x18\x26\x1d\xcc\x2a\x49\xcf\x60\xeb\x45\x37\x37\xe0\x17\x85\x65\xfb\x30\x17\x7c\x39\x99\x5c\x9e\x74\xfc\x2c\x37\x47\x0e\xac\x3c\x97\x9c\x63\xaf\x3c\xd2\xf7\xae\x87\xf3\x77\x24\xbc\x87\x12\xce\x0b\xed\x9f\xfb\xf2\x5d\xf6\x8f\x86\x84\x2f\xdc\x0c\xf2\xd0\xa4\xf8\xab\xed\x7f\x34\x24\x7c\x77\xff\xac\x85\x3f\x7f\x70\x5b\xbf\x0f\x25\xf4\xa3\xa2\xfd\xc4\x5c\x72\xcf\x5c\xd6\x6b\x77\xd4\xfc\xfb\x46\xcd\x23\x6f\xd4\xc4\xc9\x68\x3a\x42\x75\x75\x54\xaa\xd0\xb4\x1d\x66\xa2\x60\x46\x0b\x35\xa7\xd5\xa2\x82\x9d\x19\x97\x57\x8c\xe3\x17\x52\xb5\x52\xdf\x19\xe5\x04\xe9\x68\x89\xb9\xbe\x69\x72\xa5\x33\xbe\xe2\x09\xbc\xe4\xf6\xf3\x8d\x9e\xdd\x51\x11\x1f\x99\x7b\xaa\x1d\xea\xee\xe8\x6d\xa0\xe3\xeb\x83\x44\xf7\x3a\xfb\x99\x4f\xda\xbf\xf5\xf6\xd1\x8d\x13\x56\x3e\xbb\xf5\xe9\xec\x94\x95\xb7\x98\x3b\xde\xbb\xb4\xfd\x1d\xf3\x43\x1d\x5d\xe8\x2d\x5d\xd7\xb2\x1f\x4f\x6d\x7f\xfd\xfc\x9d\x96\xab\xa7\x42\xbf\x3f\x2c\x7f\x7f\x1a\xf6\xdb\x49\x2c\x1f\x14\xf3\xf8\xd4\xfe\xf8\xac\xfe\x23\x8d\xf1\xab\xb6\x4a\x36\x7b\xe1\xfc\xda\xbd\xc1\x44\xbd\x51\xac\x2a\x8d\x7e\xa2\xac\xf6\x13\xbf\xee\x8f\xf6\xfa\xb6\x3f\x37\xfa\xea\x29\xce\x87\xa3\x70\x5f\x0a\x3c\xbc\x81\x3a\xbc\x05\xf9\xd2\xa9\xba\x9b\xff\x5e\x04\xcf\x40\xee\x25\x64\x6f\x06\x4e\x41\x6d\x0f\xf2\xbb\x7d\xe4\xee\xd5\xf3\x60\x5f\x02\xf7\x44\xf9\x25\xe0\x37\xac\x27\xda\x5a\x51\x6f\xab\x89\xc3\xe9\x3e\x9f\x2a\xc9\x6b\xf8\xfe\x64\x01\xde\xd7\xc1\xe1\x14\xe2\x2b\x27\xe8\xbe\x1d\xba\xfa\x1a\xbc\x5b\x65\x97\x80\x1e\x99\x39\x45\xb8\x3b\xd7\xf5\xf2\x79\xae\x6f\x87\x8e\xbe\x04\x5c\xac\xeb\x12\xb6\x83\x91\x53\x68\x96\xff\x2d\x3e\xe5\xf5\xe6\xf9\xa5\x27\xff\x78\x15\xd2\x23\x95\x17\x01\x9f\x9b\xbc\x54\xe9\x57\x0e\x39\xdd\xfd\xef\x75\x48\xc3\x99\x71\x05\xe3\x9b\x99\x53\x74\xf1\xf9\xa8\x37\x4e\x3c\x3d\xfa\xf8\x22\x90\x07\x85\x97\x80\x9e\x99\xbb\x2b\x87\x1d\xfd\xd8\xe2\xd1\xf9\x99\xaf\x41\x7b\x50\x78\x09\xed\x99\xb9\x53\xb4\x6f\xe7\x86\xde\x38\x97\xf3\xe8\xe3\xab\xf0\xee\x15\x5e\xc4\x7b\x6a\xee\x14\xef\xfe\x68\xcf\x1b\x47\x76\x1e\x1f\x8c\xf8\x1a\xc0\x07\x85\x97\x00\x9f\x99\xbb\xda\x35\xd8\x1e\xd9\xf9\xed\x70\x1e\xe7\xcd\x33\x19\x4f\x4f\x30\x7c\x51\x39\x8e\x54\x5e\x2c\xc9\xb9\xc9\x4b\x59\x22\x24\xd3\x6f\x6f\x87\x10\x5e\x3f\x2f\xf1\xe8\xb4\xc0\x57\x61\xbf\x09\xfc\x29\xd4\x67\x5d\xd4\xb3\x43\xfc\x5e\x83\xff\x54\xe9\xa5\x42\x5c\x30\x7b\xb5\x24\xc7\x67\x19\x5e\xeb\x6a\xee\x0e\x3a\xdd\xff\x0a\x41\x51\xcb\xaa\xbd\xfb\x4e\x59\x8c\x45\xcf\xf5\x24\x6a\xda\xa1\x17\xdb\x6e\x16\xb5\x7c\xc2\x88\x02\x42\x12\x6f\x47\xaa\xfe\x96\xe8\x16\xd4\x86\x9a\x38\x3e\x62\xf5\xf0\x1a\xfd\xfb\x8e\x26\x09\xa3\x6d\x5b\xbe\x29\xc5\xc3\x28\x4f\xd5\x6c\x40\xee\xfa\x29\x27\x10\x8f\xcf\xd9\xff\x76\x7c\xaa\xfe\xb7\xed\x69\xf7\xef\xd0\x6d\xaa\xc1\x58\x85\x64\xfa\x30\xb0\xbd\x86\x0d\xa6\x43\x60\x9c\xc0\xba\xde\x55\xdc\x88\x53\x42\x9e\x87\xf0\xa6\x64\x8b\xe2\x28\xc1\xdc\x09\x64\x77\xe7\x39\x20\xc7\x4a\x36\x40\x4e\x3b\xc2\x77\x22\x09\xb1\x4f\xdc\xf9\x93\x48\x8e\x95\x6c\x90\x1c\xfd\xa4\xf5\xfd\x30\x76\x0d\xea\xd3\x48\x8e\xf5\xec\xc0\xbc\xf5\x12\x4e\xc1\x1c\x9f\xa5\x7d\x2b\x7d\x3d\x87\xe8\x5c\xd1\x06\xd2\x59\x6e\xfc\x90\xa3\xba\x17\x46\xc3\x80\x34\xf5\x4a\xe2\xed\x77\x8c\x13\xe6\xcc\xf1\x13\xd8\x73\x7c\x9b\x44\x24\x36\xfb\xff\x02\x00\x00\xff\xff\xe7\x00\x3f\xf5\x43\x97\x00\x00") + +func failed_transactionsCoreSqlBytes() ([]byte, error) { + return bindataRead( + _failed_transactionsCoreSql, + "failed_transactions-core.sql", + ) +} + +func failed_transactionsCoreSql() (*asset, error) { + bytes, err := failed_transactionsCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "failed_transactions-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x71, 0xd6, 0x9, 0x26, 0x93, 0xa2, 0x88, 0x71, 0x30, 0x2b, 0xa6, 0x48, 0x4b, 0x5e, 0x63, 0xe7, 0x7, 0x3c, 0xd, 0x3a, 0x60, 0x28, 0xa4, 0x5a, 0x96, 0x20, 0x4f, 0x93, 0x22, 0xd0, 0x6, 0x85}} + return a, nil +} + +var _failed_transactionsHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xf9\xaf\xa2\x4a\xf6\xf8\xef\xef\xaf\x20\x9d\x49\x6e\x77\xbc\xdd\xb2\x2f\xfd\x3e\x3d\x09\x2a\xee\xe2\xbe\x4e\x26\xa6\x80\x42\x51\x04\x2f\xe0\x55\xef\x64\xfe\xf7\x6f\x58\x54\x44\x51\x5c\xba\x5f\xbf\xc9\x97\x74\x3a\x57\x39\x75\xb6\x3a\x4b\xd5\xa9\x03\x7e\xfd\xfa\xc7\xd7\xaf\x48\xc3\xb4\x9d\x89\x05\xdb\xcd\x2a\xa2\x00\x07\x48\xc0\x86\x88\xb2\x5a\x2c\xff\xf8\xfa\xf5\x0f\xf7\x7e\x6e\xb5\x58\x42\x05\x51\x2d\x73\x71\x00\x78\x87\x96\xad\x99\x06\xc2\x7d\xa3\xbf\x61\x21\x28\x69\x8b\x2c\x27\x63\x77\x78\x04\xe4\x8f\xb6\xd0\x41\x6c\x07\x38\x70\x01\x0d\x67\xec\x68\x0b\x68\xae\x1c\xe4\x07\x82\xfe\xe9\xdd\xd2\x4d\x79\x7e\xfa\xad\xac\x6b\x2e\x34\x34\x64\x53\xd1\x8c\x09\xf2\x03\x79\xe9\x76\xf2\xec\xcb\x9f\x3b\x74\x86\x02\x2c\x65\x2c\x9b\x86\x6a\x5a\x0b\xcd\x98\x8c\x6d\xc7\xd2\x8c\x89\x8d\xfc\x40\x4c\x23\xc0\x31\x85\xf2\x7c\xac\xae\x0c\xd9\xd1\x4c\x63\x2c\x99\x8a\x06\xdd\xfb\x2a\xd0\x6d\x78\x44\x66\xa1\x19\xe3\x05\xb4\x6d\x30\xf1\x00\xd6\xc0\x32\x34\x63\xf2\x67\xc0\x3b\x04\x96\x3c\x1d\x2f\x81\x33\x45\x7e\x20\xcb\x95\xa4\x6b\xf2\xab\x2b\xac\x0c\x1c\xa0\x9b\x2e\x18\x5f\xed\x08\x2d\xa4\xc3\x67\xaa\x02\x52\xca\x23\xc2\xa0\xd4\xee\xb4\x91\xba\x58\x1d\x06\xf0\xdf\xa6\x9a\xed\x98\xd6\x76\xec\x58\x40\x81\x36\x92\x6b\xd5\x1b\x48\xb6\x2e\xb6\x3b\x2d\xbe\x24\x76\x42\x83\x8e\x01\xc7\xb2\xb9\x32\x1c\x68\x8d\x81\x6d\x43\x67\xac\x29\x63\x75\x0e\xb7\x7f\xfe\x0a\x82\xb2\xf7\xd7\xaf\x20\xe9\xda\xd5\xaf\x13\xd0\xa7\x76\xbb\x74\x3e\x83\xae\x21\x5f\x22\x16\x82\x3a\x20\xf7\xc0\x4b\x62\x4e\x18\x84\x20\x03\xb4\x1e\x57\x63\xa8\xaa\x50\x76\xec\xb1\xb4\x1d\x9b\x96\x02\xad\xb1\x64\x9a\xf3\xcb\x03\x35\x43\x81\x9b\x71\x48\x38\xc3\x06\x9e\xa1\xdb\x63\xd3\x18\x6b\xca\x2d\xa3\xcd\x25\xb4\xc0\x7e\xac\xb3\x5d\xc2\x07\x46\x1f\x38\x79\x88\x8b\xdb\xc6\xea\x50\x99\x40\xcb\x1b\x68\xc3\xb7\x15\x34\xe4\x9b\x44\x08\x0d\x5f\x5a\xf0\x5d\x33\x57\x76\xf0\xdd\x78\x0a\xec\xe9\x9d\xa8\x1e\xc7\xa0\x2d\x96\xa6\xe5\xba\x63\x10\x53\xef\x45\x73\xaf\x2e\x65\xdd\xb4\xa1\x32\x06\xce\x2d\xe3\x77\xc6\x7c\x87\x29\x05\x7e\x79\x07\xd3\xe1\x91\x40\x51\x2c\x68\xdb\x97\x87\x4f\x1d\x4b\xf1\xf2\xce\x58\x37\xcd\xf9\x6a\x99\x00\x7a\x79\x8d\x25\x1f\x0a\x68\xd6\x8d\x88\x77\x41\x37\xf1\x00\x37\x4e\xa8\x2a\xb4\x92\x81\xee\xd0\xdf\x31\x24\x50\x6b\xb2\x41\x5e\x68\xbd\x81\x48\x38\x14\x5f\x1b\xb1\x74\x07\x4c\x9d\xab\x33\x60\x1f\x05\x20\x69\x7b\xd5\x8c\xa6\x7b\x4f\x4f\x02\x6c\xfa\x7c\x98\x57\x01\x35\xdb\x19\x3b\x9b\xf1\xf2\x3a\x4a\x17\xd2\x5c\x26\x85\x84\x49\xc1\x76\xa9\xe4\x32\xb0\xb4\x73\xf7\xab\x60\xd7\xa3\x98\xb4\x4d\x36\x99\x7e\x8e\x74\xb5\x6d\xdb\xab\x6b\x94\xf7\xc0\xb2\xa9\xc0\x1b\xd7\x05\x7b\x33\x58\x02\xcb\xd1\x64\x6d\x09\x8c\x8b\xc9\xfb\xda\xd0\xf1\xf2\xc6\xb5\xc9\x3e\xa3\xdd\xca\xc1\xf9\x81\x37\xd3\xf7\x94\x97\x84\x9e\x0f\xf8\xd3\xf1\xfb\x93\xe9\xce\x64\xf0\xa7\x9b\x1f\x76\x4b\x3f\xcf\x18\xc6\x09\x39\x98\x98\xd6\x72\xbc\xd0\x26\xc1\x82\xe1\x02\x0b\x11\xc8\xc4\x32\xde\xbe\xde\xbb\x84\x39\xa9\x71\xfa\xa3\xb3\xf5\x6a\xb7\x26\x22\x9a\xe2\x53\xce\x09\x79\xbe\x5b\xed\x24\xc4\x1d\x63\x74\x4f\xc0\x1c\x4c\xf7\x65\x4c\xde\xa7\xe4\xe2\xef\xb2\x74\x5b\x68\x76\x05\x31\x7b\x87\xce\xdc\x75\xb6\x0d\xdf\x6e\xa6\x7c\x84\x24\xf1\x68\x05\x26\x84\x3d\xac\x66\x13\x4b\x18\xe3\xf5\xb7\xc8\x77\x1e\x45\xb2\xb1\xc1\xba\x2f\x19\x70\xb0\xc8\x4b\x2c\x5b\x10\x01\x6e\x91\xc5\x1f\x92\x10\x36\x58\xfe\x25\xe7\x67\xb7\x5e\x4c\xc2\x51\x24\x86\x5c\x06\x0e\x85\x84\x00\x90\x2f\x14\x5a\x42\x81\xef\x9c\x01\x5e\x68\xee\x8e\x43\x93\xe1\x67\x63\xb5\x80\x96\x26\xff\xeb\xdf\x5f\x12\x8c\x02\x9b\x3b\x46\xe9\xc0\x76\x3e\x03\x63\x0b\x75\xaf\x14\x93\x60\x84\xaa\x59\x67\x87\xe4\xbb\x62\xb6\x53\xaa\x8b\x17\xe4\x19\x83\xc9\xe4\xc0\xdd\x2b\x72\xc2\xe8\x05\x1c\x3b\xe9\x1e\xc0\xe1\xca\xea\x0d\x3f\x30\xff\x8a\xdc\x22\x88\x27\x7a\x02\x0c\xc2\xa0\x23\x88\xed\x08\x0a\x7d\x39\xb1\xdf\xf4\x9d\x2d\x66\x8b\x42\x8d\x3f\xa1\xf0\xe7\x1f\x7e\x15\x4e\x04\x0b\xf8\x7d\xf7\x1d\xd2\xd9\x2e\xe1\xf7\x60\xc8\x9f\x48\x5b\x9e\xc2\x05\xf8\x8e\x7c\xfd\x13\xa9\xaf\x0d\x68\x7d\x47\xbe\x7a\xc5\xb9\x6c\x4b\x70\xe7\x2b\xc0\xbc\xc3\xf7\xc7\x11\xc6\xe3\x9b\x01\xe2\x6c\xbd\x56\x13\xc4\xce\x05\xcc\x3e\x00\x52\x17\x8f\x11\x20\xa5\x36\xf2\xb2\x2b\xbb\xed\xbe\xb3\x3d\x24\x2f\x51\xca\x3b\xf1\x03\x9a\x7b\x0d\x5d\x95\xe7\x48\x97\x62\xbd\x13\xd1\x27\xd2\x2f\x75\x8a\x7b\xb6\xc2\xf5\xb7\x23\xf2\x07\x2c\x11\x46\x6e\x11\xfe\x04\x89\xa7\x80\x46\x35\xbd\x9c\xb4\x9b\x55\x64\x69\x99\x32\x54\x56\x16\xd0\x11\x1d\x18\x93\x15\x98\x40\x4f\x0d\x09\xeb\x85\x61\x76\xaf\x1b\x5a\xc0\xfe\xce\x56\x0f\xfc\xef\xe6\xf6\x9c\x2e\xf7\x96\x7d\x15\x3f\xd2\x12\x3a\xdd\x96\xd8\x0e\x7d\xf7\x07\x82\x20\x48\x95\x17\x0b\x5d\xbe\x20\x20\x9e\xf4\xb5\x5a\xd7\x8f\x77\xed\x4e\xab\x94\xed\x78\x10\x7c\x1b\xf9\xc7\xf8\x1f\x48\x5b\xa8\x0a\xd9\x0e\xf2\x0f\xcc\xfd\x14\x9d\x8d\xab\x8e\xf8\x98\x74\xd7\xd0\x3f\x4d\x38\xfc\x9c\x70\x49\x22\xd5\x63\xf2\x25\xa0\xb0\x17\x71\xff\xd5\x5d\x12\x7e\xfe\x03\x41\xb2\x7c\x5b\x40\xfa\x45\x41\x44\xfe\x81\xfd\x0b\xfb\x77\xfa\x1f\xd8\xbf\xf0\x7f\xff\xf3\x1f\xb8\xf7\x37\xfe\x2f\xfc\xdf\x48\xc7\xbf\x89\x08\xd5\xb6\xe0\x2a\x45\x10\x73\x5f\xce\x6a\x26\x41\x1e\x78\x50\x33\xd7\x29\xfc\x6c\xcd\xfc\xdf\x3d\x9a\x39\xcd\xa9\x81\x1e\xf6\x79\x38\x99\x22\x0e\x69\xfb\x04\xa3\xc7\x31\x82\xb4\x5d\x5d\x21\x3f\x0e\x11\xe0\xd5\xff\xba\x33\x6c\x08\xc8\x8f\xb0\x47\x7c\x39\xe7\xb5\x4f\xe5\x31\x8a\x30\xc2\xe2\xce\x8d\x93\x73\x78\x76\x09\xf4\x28\x97\xe7\x90\x46\x38\x3d\x72\xc8\x63\x76\x0f\x56\x76\xca\xed\xb9\x65\xde\xc3\xdc\x9e\x41\x1a\xe5\x36\xec\x24\x17\xb9\x75\x33\x97\x02\x55\xb0\xd2\x9d\xb1\x03\x24\x1d\xda\x4b\x20\x43\xe4\x07\xf2\xf2\xf2\xe7\xf1\xdd\xb5\xe6\x4c\xc7\xa6\xa6\x84\x8e\xd2\x8e\x64\x0d\xaf\x7f\x03\x11\x3d\x07\x4b\x26\x9e\xef\x8b\xe1\xcd\xb7\x2f\x91\xa6\x20\x92\x36\xd1\x0c\xc7\x5b\x18\x88\xdd\x6a\xd5\x17\x07\x2c\xdc\x65\x3c\x22\x4f\x81\x05\x64\x07\x5a\xc8\x3b\xb0\xb6\x9a\x31\x89\x80\x19\xab\xc5\x7e\xc9\x8f\x68\x86\x03\x27\xd0\x8a\x80\xa8\x3a\x98\xd8\x88\xbd\x00\xba\x7e\x4a\xc6\x31\x17\xfa\x29\x91\xcf\x38\x45\x7d\xd9\x43\x9e\x4e\x7b\x74\xdf\x70\xaf\x3a\xa2\xd5\x8e\xbd\x4a\x1c\xb8\x39\x51\xc8\x72\xa9\x6b\x5e\xcd\x1e\x71\xb4\x05\xb4\x1d\xb0\x58\x22\xee\x9c\x79\x1f\x91\x0f\xd3\x80\xa7\x8c\xc6\xed\x8a\x76\xeb\xd1\x60\x3b\x95\x8c\xe7\xfd\xe6\x2b\x06\x6b\x60\x86\x7c\xab\xe3\xaf\xe8\x30\xef\x8b\x92\x98\x6d\x09\xde\xf2\x2b\x33\x0c\xbe\x12\xeb\x48\xad\x24\xf6\xf8\x6a\x57\xd8\x7f\xe6\x07\x87\xcf\x59\x3e\x5b\x14\x10\xec\x9a\x30\x77\xab\x3d\x8a\xe8\xc4\x14\x83\xa2\x07\x62\xc0\x8d\xf3\x0e\xf4\xcf\x2f\x31\x12\xbf\x7c\xff\x6e\xc1\x89\xac\x03\xdb\xfe\x12\x9d\x2e\xff\xac\xe2\x8c\x6d\xd1\xe4\x97\x0b\x13\xe5\xef\x8d\x1f\x96\xcc\xaf\xe8\xec\xe5\x3a\xef\x19\x87\x5a\xdd\x79\x36\xcf\x82\xcb\xa6\x72\x0e\x1c\xc3\xcf\x83\xfb\xe5\xbf\x33\x03\x28\xfa\x92\x87\x9d\x2f\x2f\x3c\xc9\x6c\xc3\x38\x7f\x99\xd1\x5e\x12\x04\xa9\xf7\x45\x21\x87\x64\x86\x57\x24\xf2\x2b\x74\x97\x05\xda\xe3\x8a\xdc\xfe\xa6\x29\x71\xbc\xed\x6a\x3e\x8f\x5a\x5d\x80\x27\x30\xbb\x88\xcf\x8c\xe3\x22\xfd\x69\x89\x2b\x0e\xf2\x93\x77\xf0\xf1\x29\xc6\x9a\x3d\x3b\x3e\x7f\x4b\x81\x0e\xd0\x74\x1b\x99\xd9\xa6\x21\xc5\x1b\xdb\xae\x50\xf6\xa8\x1e\x02\x3c\x81\x1e\x76\xe7\xd6\x31\xbc\x85\x0e\x93\x13\x79\xe1\xb9\x73\xec\xf3\x03\x03\xb5\x84\x2a\xa3\xde\x44\xec\xf9\xd8\x45\x39\x34\x42\xe1\x30\x11\xc9\xe0\xf7\x87\xc9\x91\xc4\x64\xae\x9c\x43\x6e\x8a\x8e\xb1\x20\x70\xae\x0e\xf2\x61\x57\x4b\x25\x31\xec\xde\x74\x82\x8f\x91\x73\xf6\x13\x59\xb0\x93\xf5\x80\x03\xf4\xb1\x6c\x6a\x86\x7d\xde\x06\x55\x08\xc7\x4b\xd3\xd4\xcf\xdf\xf5\x4e\x3e\x55\x18\x37\xd7\xde\x6d\x0b\xda\xd0\x7a\x8f\x03\x71\xd7\xa1\xce\x66\xec\x2d\x93\xb4\x8f\x38\xa8\xa5\x65\x3a\xa6\x6c\xea\xb1\x72\x45\xe7\x68\x67\x2c\x10\x28\xd0\xf2\x96\x17\xfe\xf7\xf6\x4a\x96\xa1\x6d\xab\x2b\x7d\x1c\x6b\x28\x81\xe0\x40\xd3\xa1\x72\x0d\x2a\x60\x3d\xc6\x84\xe2\x5d\x2f\xa6\xbe\xfd\xa8\x27\xc6\x9c\x99\x5c\xc9\x8b\xc9\x23\xd2\xf5\x18\x77\xab\xc8\xcf\x4d\x75\x17\x69\xfc\xaa\xd4\x77\x93\xa0\x0f\xa6\xc2\x8b\xb4\x4e\x53\xe3\x79\xf0\x0b\xa9\x32\x74\xfa\xf3\x34\xdb\xbc\xb6\x15\x3a\xee\xbc\x8a\xd9\x2e\xb9\xbb\x03\xd9\x17\xc5\xcb\x92\x0f\x26\xc9\x20\x3a\x98\x2b\x4b\xde\xb7\x72\xc4\xa4\xa7\x5d\xc8\x79\x79\xf9\xfe\x3d\x7e\xbb\x16\xef\x07\xc1\xe1\xdb\xa3\xea\x0c\xfa\x05\x3f\x3f\x75\x4d\x11\x84\xcd\x7b\x32\x9c\xd7\x2f\x13\x4b\x36\xd2\xad\x78\x09\x28\x68\xa0\xbc\x04\xe2\xef\x95\xcf\x02\x9c\xf6\x7d\x5e\x81\xbb\x48\x6e\x0f\x75\x81\xa2\xc7\x92\x66\x8f\x6d\xa8\xeb\xd0\x42\x24\xd3\xd4\x21\x30\x76\x79\x4b\x93\xe1\xd8\x38\xca\xd1\xfe\x77\xc7\x79\xfb\xd0\x71\x34\x8e\x64\xf4\xa3\x9e\xa7\xe8\xcd\xd0\x51\xfe\xd9\xee\x50\x8f\xeb\xb1\xd7\x3f\x8c\x64\x8b\x42\xb6\x82\x7c\xfe\x1c\xd6\xe0\x3f\x11\xf4\xcb\x97\x6b\xa8\xce\x0d\xdf\x29\xed\xff\x4e\xf4\x98\x00\xdf\x91\x4e\x23\xe8\x23\x0a\xf7\x18\xbc\xe8\x4a\xe7\x4f\xc1\x9f\xe0\x5c\xe7\xfb\x1a\x12\x66\xd2\x24\x21\xec\x91\x5c\x7a\xad\x87\xe0\x39\xd9\xf4\x0a\x95\x5f\x95\x4f\x6f\x14\xf6\xc1\x8c\x7a\x85\xda\x69\x4e\x8d\x1b\x70\x21\xab\x1e\xf5\x8d\x3c\xd1\x56\x77\xf6\x19\x66\x29\xf1\x46\x2b\x88\xfd\x57\xb6\x6f\x49\x13\xef\xe5\x1c\x7a\x16\xf6\x40\xfa\xac\xbf\xb8\x3b\x85\xf8\xad\x46\xdc\x26\xee\x2f\xd9\x86\x39\x9b\x31\x34\xde\xa1\x6e\x2e\xe1\xb9\xd2\xa6\xb3\x71\x37\x45\x2b\xdd\x89\xb9\xb9\x80\x0e\x88\xb9\xe5\x6e\xc7\xe2\x6e\xdb\xda\xc4\x00\xce\xca\x82\xe7\xaa\x70\x1c\xfd\xe5\x5f\xff\x3e\xac\x5d\xfe\xf3\xdf\x73\xab\x97\x7f\xfd\x3b\xaa\x73\xb8\x30\x63\x0a\x66\x07\x5c\x86\x69\xc0\x8b\x6b\xa1\x03\xae\x53\x34\x81\x64\xda\x02\x8e\x25\x73\x65\x28\x5e\x55\x9b\xb5\x80\x31\x81\xd1\x1d\xdb\x71\x6a\x75\x35\xe1\x62\x9b\xc0\xc8\x16\xd8\x30\xa0\x35\x4e\xe6\x01\x07\x4c\x17\xcd\x35\x8c\xf8\xba\x92\x83\x42\x3d\x5c\x8f\x77\x16\xeb\xb3\x77\xad\xe8\x87\x68\xca\x2e\x16\xec\x9a\xd0\x92\x04\x30\x3f\x18\x78\x1d\x7f\x57\xfa\xdb\xda\x42\xe7\x42\xa5\x37\x5c\x53\x0b\xd7\x79\x6f\xdb\xe5\x3c\x4f\x88\x84\xed\x7f\x17\x85\xba\xb8\x3b\x4a\x22\x64\xec\x3a\xe0\x69\x62\x26\xee\xa0\xbc\x28\xe8\x95\xa4\x75\x5e\xd4\x1c\x70\x00\xa2\x9a\xd6\x95\xf3\x2e\x24\xc7\x77\xf8\x2b\xe2\x95\xc4\xb6\xd0\xea\x20\x25\xb1\x53\x3f\x3a\xf3\xf2\x72\x7c\x1b\xf9\x8c\xbd\x22\x2f\x38\xba\xbb\x5e\x5e\x11\xfc\x15\x41\x5f\x91\x97\x97\x78\x76\x2e\x9d\x39\xdd\xca\x52\xf4\xdc\x69\xc7\xd6\x0b\x36\xd6\x0c\xcd\xd1\x80\x3e\xf6\x7b\x80\xbe\xd9\x6f\xfa\x8b\xc7\x2b\xc6\x7d\x45\xe9\xaf\x28\x81\x60\xec\x77\x9c\xfd\x4e\x32\xdf\x50\x02\x27\x39\x3a\x85\xe2\x2e\xd3\x89\xb0\xe3\x63\xff\xd9\x8e\xa3\x19\x91\xb6\x63\xc7\xd4\x94\xcb\x94\x38\x9a\x62\x6e\xa1\x44\x8c\x57\x36\xdc\xe7\xcf\xb1\x66\x9c\x3c\x4f\x72\x91\x1e\x49\xa2\x24\x7b\x0b\x3d\x72\x0c\x14\x65\x1c\xad\xca\x5d\xa4\x41\x91\x14\x81\xdf\x42\x83\x1a\xfb\xd9\x7a\xb7\x6f\xf0\x4e\x73\x2f\x92\xa0\x09\x14\xbf\x49\x0c\x7a\x47\x22\x88\x7e\x09\x48\xb0\x24\x46\xdd\x42\x82\x19\x2f\x4c\x45\x53\xb7\xc9\xa5\x60\x31\x1a\xbf\x89\x04\x7b\x24\x45\xd0\xc4\x9d\x80\x0e\x43\xd2\xc4\x6d\x74\xdc\x49\x07\x93\x89\x05\x27\xc0\x31\xad\xcb\x36\xc5\xa1\x18\xca\xdd\x82\x9e\xf3\xd0\xfb\x15\xdb\xf1\x46\xb1\x2e\x63\xc7\x19\xec\xa6\xa9\xc6\x50\x0f\x7d\x30\x0b\xde\x1e\xfc\x32\x01\x8a\x63\x6e\xd2\x0e\x86\x85\x09\xec\x37\x75\x6e\x00\xb8\x4c\x88\xa3\xb9\xdb\x24\xc1\x8f\x26\x3a\xd8\x46\xfb\x8f\x0d\x5f\xa2\x84\xa1\x0c\x45\xde\x34\x23\x18\xe1\x8b\xb3\x2f\x3e\x5c\x9c\x71\x0c\xc3\x19\xfa\x36\x49\xc8\xb1\xaa\x6d\x76\x8f\x50\x98\x0b\x7d\xac\x6a\x50\xbf\x18\x1a\x31\x8c\xc2\xb0\x9b\x82\x30\x46\xed\x4e\x8e\x76\x15\xfd\xcd\x15\x31\x68\xe6\xb6\x30\x8f\xd1\x63\xcd\x98\x40\xdb\x19\x9f\x9e\x19\x5c\x21\xc5\x70\xec\x6d\x33\xc2\x1c\xa5\x7a\xef\x70\x06\x5c\x4e\x26\x18\x8e\xa2\x04\x79\x13\x11\x76\x6f\xbe\xaa\xe9\xaf\x78\x61\xc4\xd9\x31\xf4\x2b\x81\x21\x18\xf9\x1d\xe3\xbe\x93\xdc\x37\x0c\x27\x58\x82\x4a\xa1\xd8\x85\x7c\x7e\xb1\x9b\xe1\xd6\x84\x7e\xd2\xd1\x10\x5e\x68\x14\x32\x83\x42\xb3\xdc\xef\x55\xfb\xf5\x61\x31\x5f\xed\x75\x2a\xfd\x1e\x95\x2f\x14\x79\xa2\x2a\x0e\x87\x78\xb9\x59\xa9\x31\x75\xbe\xcc\x77\x85\x66\xbe\x4b\x57\x1b\xd9\xb6\x90\xef\x0d\xea\x62\x54\x41\xb1\x44\x70\x97\x08\x4f\xf5\x33\x8d\x21\x4f\x0d\xc9\x3e\x2f\x14\x07\xfd\x16\xde\xad\xd4\xf1\x6e\x9d\xcc\x74\x0b\xc5\x6e\x93\x21\x85\x6e\xa3\x52\x17\xf1\x66\xb1\x47\xf6\x5b\xc5\x7a\xa9\x25\x56\x2a\xc5\x93\x59\x88\x25\x42\xb8\x44\xb2\x83\x4a\x81\x6e\x89\x64\x5d\x2c\x09\x8d\x6c\x4d\xcc\x67\x18\x02\xe7\x49\x82\x1e\x51\x0d\x31\xd7\x6e\x55\x0b\xfd\x0a\x53\xc8\x54\xb3\xb5\x66\xb5\x94\xaf\x93\x6d\x46\x18\xf6\x7b\xdd\xc4\x44\x48\x4f\x5d\xad\xc6\xb0\x58\xaa\xe2\xd9\x12\x91\x17\x9b\x64\x66\x50\xcd\xd7\xc4\x5c\x35\x5f\xee\x8a\x8d\x2e\x5e\x1c\x12\xa3\x5a\xbe\x5d\xac\x8b\xdd\xac\x50\xe7\xdb\x7d\xa6\x99\x65\xea\x03\xbc\xf8\x72\x6f\xf7\x8d\xbb\x94\xbd\x32\xd7\x41\xc7\xe2\xa1\xd9\xf8\x9b\x0d\x2f\x77\xa6\xbc\x22\xe4\x2b\xe2\x58\x2b\x98\xc0\x02\x4f\x7b\x4e\xee\xb6\x3f\x7f\xa7\x15\xb6\x3e\xd9\x82\x8a\xe6\x8c\x81\xbe\x9c\x02\x63\xb5\x20\x5d\x9f\xe9\xb6\x73\x2f\x0f\x4e\xe7\x3d\x5d\x16\x4f\xd1\xf3\xd1\xbe\xf0\x15\xc1\x92\x6a\xf9\x5c\x93\xc5\xbd\x6a\xde\x35\x5a\x84\xf4\x8c\x31\x6e\xf0\x64\x08\x9c\xc5\x3c\xa6\x5c\x9f\xfc\xcf\x27\x3f\x19\x7e\xfa\x8e\x7c\xc2\x50\xf4\x5b\xb0\xdd\xf8\xf4\x8a\x7c\x3a\xf4\xf2\xb8\x37\xbb\xed\xdc\xe1\x4b\x67\xbb\xf4\xbe\x8c\x4e\xdc\x01\xc2\xef\xe9\x71\x61\xee\x9d\xc1\x4f\xff\x8d\x73\xc9\xa8\x68\x44\x44\x34\xfc\x15\x21\xfe\x47\x44\xc3\xf7\xa2\x31\x04\xc3\xfc\x8f\xce\x9a\x2f\xda\xff\xd8\xac\xe1\x2c\x4b\x72\x28\xc5\xb1\x94\x3f\x6b\xa8\x27\x9b\xae\x2d\x34\x4f\x34\x0e\xc7\x09\x82\xc1\x51\x82\x66\xa9\x6f\x24\xc3\x50\x2c\xca\xfc\xad\x64\xc4\x76\x32\x62\x28\xba\x8b\x27\xff\x6b\x32\x12\xaf\x08\x4b\xb1\x1c\x47\xb0\x34\xcb\x79\x22\xfa\x12\xda\x0e\xb0\x1c\xcd\x98\x8c\x25\xa0\x03\x43\x86\x81\xad\x1e\x8c\x35\x31\x05\xf2\x98\xc2\x79\x1f\x38\xe3\x04\x3b\x3d\x19\xc0\xd1\xde\xe1\xdd\x12\xb9\x3e\xe8\x8b\xb4\x86\xda\x64\xea\x12\xc4\x5e\x91\x4f\x7e\x92\x19\xcf\xe1\xf6\x97\xd9\x92\xc7\x15\x89\x33\x81\xbb\xfc\x2c\x3d\x07\x14\x7e\xba\x9e\x23\x12\x25\xd3\xf3\x9d\x6b\xf0\x9b\xe2\x92\xcf\x15\xcd\x06\x2e\xfb\xd3\xf4\xec\x53\xf8\xe9\x7a\x8e\x48\x94\x4c\xcf\x77\x6e\x43\x7c\xae\xae\x2c\xe1\xce\xf5\x87\xde\xbb\x84\xdb\xf5\x88\xee\x84\xa5\x5e\x91\x17\x9a\x96\x59\x89\x20\x28\x4e\x01\x0c\x54\x15\x85\xc6\x54\x4a\xc5\x25\x82\x41\x69\x9c\xc3\x68\x9c\x22\x65\x9a\xc4\x58\x9a\x20\x64\x05\xc7\x70\x8c\x24\x18\x12\x95\x19\x59\x41\x25\x88\xb2\x14\xe5\xae\xa9\x25\x15\xe3\x58\x5a\xc6\x25\x49\x55\x24\x5c\x66\x69\x86\x54\x08\x52\xe2\x20\xc4\x08\x94\x91\x54\x95\x94\x51\x19\x03\x92\x84\x52\x38\xe4\x54\x06\xa8\x00\x55\x18\x5a\x86\x32\x81\x4b\x94\xca\xbe\x78\x76\x83\x46\x36\xcf\xf4\x77\x82\xfc\x4e\x70\xd1\x3d\x75\xf0\xf5\x37\x0a\x23\x48\x8e\xba\x7a\x17\xc7\x48\x86\x64\x09\x9a\x64\xd1\x57\x04\xa3\xdd\xf9\x3c\xb9\x5e\x11\xce\xfd\x0f\x0b\xfe\xdb\x7d\x89\xed\xff\x70\xf7\x14\x3c\xcf\xf3\x59\x86\x1d\x4d\xed\x0a\x93\x96\xaa\xa5\x11\x8a\xa2\x2b\x48\x76\x32\x4c\x1a\x5d\xd7\x0a\x96\x9d\xaf\x2e\x39\x3a\x35\xc1\xf0\x0f\xbb\x66\x0d\xc8\x37\x73\x23\x5a\x9b\x19\x66\xe4\xd9\x54\xb7\x54\xa2\xfb\xe2\x3c\xf7\xa1\xcf\x5a\x2d\x68\x11\xbd\x55\x6b\xfe\xd1\x2e\x8f\xf8\x66\x93\x64\x3a\x4d\x17\x35\x3f\x68\xf4\x6a\xc5\x35\xbf\xbf\xa0\x52\xcd\x2f\xb8\xec\x64\xd0\x63\x38\x7e\x35\x12\x39\x30\xc9\x8b\x1b\x6b\x94\x6b\x4e\x7a\x84\xa4\xea\x05\xfe\xad\xc1\xa4\x36\x25\x75\x94\xed\xf4\xb7\x99\xa5\xd2\xee\x92\x05\x76\xb8\x74\x14\xa5\x58\x59\xeb\xef\x0d\x3b\x23\x0f\xfb\x1b\xa5\x34\xdf\xb6\xcb\x93\x99\x42\x6d\xd7\xb5\xfc\xc4\xc5\xdc\x15\xc9\x2a\xf8\x58\xe2\xcd\x03\x31\xbe\x2e\xf0\xc7\x57\xc6\xfd\x6f\xc4\x0f\x30\xb2\xc9\xf3\x39\xb4\xcc\xff\xdd\x2e\xdf\xa8\xb0\x57\xef\xf4\x33\xc6\xfb\xa3\x0e\x41\x3e\xc7\x98\x5f\x00\x4e\x72\xb8\xc2\x71\x0c\x90\x31\x82\x50\x39\x14\x23\xa0\x2a\xe3\x14\xab\x4a\x94\x24\x53\x84\x04\x55\x94\x63\x25\x95\xe3\x58\x45\xa6\x58\x59\xa2\x50\x55\xa6\x55\x96\x95\x18\x15\xb0\xde\xe1\x0c\xe1\x47\xba\x53\xdb\x66\x63\x4d\x9e\x40\x69\x22\xde\x21\x76\x77\xfd\x35\x32\xcd\x61\x2c\x79\xc1\x21\xd8\x84\x0e\x41\x97\xcb\xd5\x91\x4e\xaf\x3b\x0d\x8a\xef\x30\x6c\x79\x98\xc2\xde\xf3\xf5\x14\x5b\xd6\xd2\x8b\x21\x91\x9f\x55\xbb\x8d\xcd\xbb\x56\x55\xe9\x09\xdf\xd5\xab\x2b\x0b\xd3\x06\x85\xe2\x62\xd8\xa5\xda\x8b\x56\x51\x51\xf3\x4d\x6e\x5b\xcf\xbd\x71\x7a\xbe\xb6\xcd\xbf\x91\x28\x5d\xb6\x25\x66\xe0\x7b\x81\xe7\x10\x93\xc3\x84\xa6\xad\xb7\xce\xac\x54\xed\x77\xde\x15\x21\x85\xa3\x92\xf3\xa1\x00\xc0\x11\x42\xc3\xe9\xce\xf3\x64\xbf\x5d\x67\x2b\xa5\x16\xbf\xe5\x4a\xa5\x2c\x5b\xc6\xf1\x9e\x0d\x08\x6e\xd5\x76\x1c\x82\xcc\x71\x4d\xb3\x36\xeb\x50\x69\x72\xd2\x95\xf2\xc0\x49\x97\xf4\x3e\x10\x27\x8a\x4e\x15\x3d\x1f\x68\x9e\x71\x88\xda\x24\x62\x4f\xff\x0b\x0e\x41\x78\x3e\x71\x83\x43\x10\xcf\x31\xe6\x17\x86\x42\x69\x56\x06\x32\xae\x62\x90\x06\xa8\x84\xcb\x40\x05\xb8\x42\x29\x92\x24\xcb\x32\x25\x63\x1c\xcd\x60\x8c\x4a\xab\xaa\xca\x32\x1c\x27\xcb\x04\x90\x68\x40\x73\x94\x84\x32\x12\x87\x01\xff\xb4\x12\x3f\x6b\xdb\x4c\xac\xc9\x93\x38\x8d\xc7\xbb\x8b\x7b\xd7\xcd\x2e\xc1\xce\x0a\x63\x59\xf6\x82\x43\x50\x09\x1d\x82\xe8\x16\x66\x95\xf5\x7b\x71\x61\x56\xab\x95\x94\x86\x29\x2b\xf6\x23\xbf\x1e\x8d\x36\x2a\x9e\x4e\x4f\x29\xa3\x56\xb7\xc0\x42\x97\x32\xcc\x1c\x48\x1f\xef\x68\xb3\xd0\x98\x17\x06\x6d\x19\x97\x27\x99\x6c\x6e\x69\x6a\x9a\x50\x68\x91\x2a\xbe\xb0\xd0\x65\x86\x2b\x00\x66\x38\x49\x0d\xa4\x92\x37\x81\x9e\x43\x84\x6c\xb4\xdd\xaa\x76\xbb\x45\x3b\x93\x4e\x7f\x14\xed\x59\x91\x37\xb1\x11\xdf\xcd\xcc\x4d\x76\x43\xb6\x5b\x03\x71\x0b\x54\x67\xf4\xd6\x33\xf2\x54\xad\x38\x73\x46\xa6\x5e\x56\x37\x40\xad\x83\x75\xba\x43\x10\xdd\xee\x5a\xff\xa8\x29\xf5\x7a\x65\x30\x81\x62\x2e\x83\x75\xe0\x9b\x90\xe3\xcc\xa1\x87\xbf\x76\xc6\x21\x8a\xe8\x39\xa3\xfa\x9b\x3b\x04\x7e\xab\x43\xe0\xcf\x31\xe6\x17\x9a\x50\x38\x56\xa5\x08\x1a\x42\x9a\x55\x30\x09\x67\x24\x4a\x62\x39\x15\x27\x80\x4a\x11\x18\x26\x31\x14\xcd\x01\x9c\x54\x81\x8a\x91\x28\x01\x14\x54\xa2\x70\x89\x26\x08\x17\x07\xe4\xb8\x4b\x19\x82\x8e\x35\x79\x8a\x64\xc8\xf8\x05\x15\x45\x32\x6e\x82\xf0\xf7\x73\x24\xc5\xe1\x17\xdc\x81\x48\xe8\x0e\x78\x63\x34\xc3\xc4\x15\x65\xa2\x52\x99\xe9\x93\xc6\xb6\xfe\xde\xdd\x14\x88\xde\xd2\x9c\xa7\xde\xf3\x7c\xdd\xc9\x62\x15\xbc\xc6\x64\x18\x7a\x94\xae\x51\xa5\x76\xf6\xad\x45\xa4\x96\x1f\xf9\x7e\xa5\x59\xff\x10\x26\xda\x44\x77\xc8\x72\x9b\x87\x39\x66\x68\x99\xb6\x00\xaa\x69\xaa\x54\xf5\xd7\x2a\x9e\x3b\x78\x7f\x95\xf6\xff\xf1\x9e\xc5\xda\x87\xcf\x6b\xbe\xd1\x9c\xfb\xd3\x8d\xf6\x9a\xec\x6a\xb1\xac\xd5\x53\xad\x72\x3a\xfb\x51\x42\x7b\xa3\x32\x4c\x51\x70\xba\x4e\xcf\xc9\x0e\xa9\x4c\xd7\x8d\xe6\x7a\x2d\xf4\x18\x66\x61\x8c\x00\x3d\xed\x55\xab\xdd\x8c\x50\x04\xab\x34\xdc\x94\xe9\xb9\x58\x2b\x7d\x7c\x34\x7a\xef\xd8\xa0\xce\xc1\xf5\xb2\xb8\x19\xf4\xf8\x7c\xc3\xcb\x12\xa5\x33\xee\x22\xd8\xe7\x4c\xee\x6f\xee\x2e\x37\xe7\x0f\xec\x39\xa6\xee\x35\xb0\xed\xf6\x08\x18\xc7\xa0\x5f\x51\xec\x2b\x8a\x21\x28\xfa\xdd\xfb\x17\x6b\xd2\x34\xcb\x50\xf1\xf9\xc1\xbd\xeb\xe6\x07\x12\xe7\x48\x8e\x66\x70\x8e\xbe\x60\xf1\xe7\xed\xdd\x67\xe9\xaf\x9e\x9a\xf8\x2b\x33\xa8\x68\xe4\x36\xbd\x6d\x57\x32\x4c\xce\xc8\x71\x45\x1c\xdd\xcc\x32\x29\x1b\x9d\x38\xf6\xba\xb4\xfe\xc0\x06\x4a\xbb\x3f\x04\x99\x32\xf0\xf7\x06\xc2\x19\x53\x3e\x7f\xed\x4c\x99\xe7\x33\xf3\x5f\x20\xc8\x53\xaf\xfd\x86\x33\x30\xe5\x2b\x7b\xf0\x04\x0f\x8a\xdd\xbb\x25\x8f\xe9\xb0\x0b\xf9\x8f\xbf\x81\x25\x51\xca\x2b\xd7\xc7\xf8\xdd\x15\x34\x78\x04\x0d\x7e\x1f\x9a\xe8\xb9\x08\x71\x1f\x1a\x32\x7a\x72\x74\x1f\x1a\x2a\x52\xef\xbf\x93\x1b\x3a\x7a\x6c\x70\x1f\x1a\x66\x87\x86\xc5\x48\x86\xb8\x9b\x1b\x36\x52\xe9\xbf\x93\x1b\x2e\x5a\x4c\xbf\x0f\x0d\x86\x1e\x97\x77\xc9\x3b\xd1\x60\x91\x2a\xf1\x9d\x68\xf0\xe3\x22\xe8\xbd\xdc\x10\x91\xea\xf0\x9d\x68\x22\xa5\xc9\x7b\xb9\xa1\x22\x15\xce\xe7\x3c\xbb\xf9\x94\x73\xe7\xcb\xad\xbb\xaf\x88\xcb\x7b\xb2\x83\xe8\x98\x47\x18\x1f\x8e\x9a\xa1\x10\x77\x14\xdf\xf6\x1f\x68\xaf\x04\x84\x79\x25\x5c\xc7\x7c\xa8\x5a\xfb\x8a\x7c\x52\x2d\x73\xf1\x50\x65\xfd\x15\x09\x55\xae\xf1\xdf\xed\x34\xf2\x27\xf4\xed\x9c\x99\xa9\xe3\xd8\xbf\xff\x80\x9e\xce\xd4\x03\x5a\xde\xcf\xd4\xbd\xda\x38\x9a\xa9\xdf\xee\xdc\xf8\x27\xf4\x25\xc5\xcf\x54\xd0\x29\xb0\xff\xf0\x93\x7c\xea\xff\xcf\xd4\xa3\x33\x15\x2c\x3d\x0e\x1f\x70\x6f\x9a\xfc\x13\x33\xaf\xdd\xd6\x15\x06\xfd\x46\x85\x14\xb4\xd7\x1c\x79\xac\x39\xff\x09\x59\x57\xfa\xff\x7c\x52\x3e\x7d\x47\xf0\x57\xe4\x93\xf1\xe9\x3b\x82\xfd\xf7\x15\xf9\xb4\xeb\x45\xfd\xf4\xdd\x5d\x44\x7f\x92\x56\x5b\xcd\x98\x8c\xcf\x2a\xfb\xe8\xde\x25\x9d\xdb\x50\xd7\x4f\x21\x83\xf3\xbb\x28\xa2\xbf\xd3\xd4\x1c\x35\x6e\xec\x3f\x90\xde\xd4\xd0\xc9\x1a\x1c\x1c\x6b\x65\x3b\x10\x3e\xea\x26\x1e\x1a\xd3\x7a\xd4\x63\x7f\x0f\xa7\x7a\x72\xef\x67\xec\xcc\xed\x56\xd0\xfb\x0f\xe8\x5f\x3c\x73\x8f\xac\x3d\x7e\x8b\x99\xfb\xf9\x4b\x8c\xf0\x66\x63\xff\x37\x1b\x6a\x50\x50\x57\x86\x12\xc8\x72\x67\xe7\xad\xa7\x17\xbf\xff\xf5\xd1\xc9\x4d\xd0\x2d\xf1\x60\x8b\xf0\x2d\x6a\x0b\x76\x45\xfb\xbf\xc9\x9f\xaa\xb6\x07\x8c\xf9\x37\x53\x9b\xbf\x7d\xdb\xff\x8d\xfe\x54\xb5\x3d\x10\xbd\x7f\xbe\xda\xae\xec\x05\xcf\xbc\x7b\x25\xc9\x3e\xf0\x3a\xd6\xeb\xaf\xa1\xb8\x77\xbf\x19\xfb\x80\xe8\xd9\x3a\x1d\x1d\x5f\x53\xb8\x8a\x08\x8f\x20\x8a\xab\xff\x5c\x45\x44\x44\xf6\x56\x71\x35\x97\xab\x88\xc8\xe8\x26\xed\x5e\x44\x54\x64\x0f\x71\x37\x47\x74\x04\xd1\xdd\x3a\x62\x22\x6b\xe5\xbb\x39\x62\x23\x2b\xbb\xbb\x39\xe2\xa2\x0b\x8d\x7b\x11\x1d\x55\xee\xd8\xf8\xea\xd4\x75\x44\xd8\x31\xa2\xbb\x75\x74\x54\xbd\x23\x1f\xe1\x88\x88\x64\xa7\xbb\x11\x91\xc7\xf1\xfa\x7e\x8e\xa8\x63\x44\xf1\x35\xbc\x5b\x5f\xa3\xf2\x8c\x2a\xde\xb5\xe7\xd2\x6f\xa9\xe3\xc5\xbe\x34\xe5\x09\x91\x35\xf4\xf4\x1b\x00\x18\xcd\xaa\x18\x8e\xe1\xa4\xc4\xc8\x18\x47\xcb\x28\x50\x00\x84\x8c\xcc\x10\x80\x26\x15\x82\x51\xdd\xe9\x67\x65\x20\x4b\x14\x07\x38\x4c\x55\x09\x96\xa3\x71\x9a\x25\x29\xc8\xc8\xea\xcb\x2b\xe2\xb7\xfd\xde\xbf\xd8\x0c\x9d\x83\x93\xbb\x93\xbf\xf8\x5e\x43\x9a\xb8\xd0\x88\xe8\xdd\x3c\x8a\xea\xfe\x81\x61\x81\x32\xcb\x4e\x4f\x31\x86\xf5\x9e\x32\x7a\x73\x06\xcb\x4e\x31\xe3\x48\xf2\x10\x5d\x64\x17\xaa\x9c\x29\x55\x84\x49\xdf\xd0\xdf\xf3\xa5\x29\xe0\xbd\x03\x37\x7e\x77\xac\x9d\x8d\x9c\x67\x65\x0e\x7f\xfa\x67\xe3\x75\x3c\x9b\xe6\xeb\x24\x35\xcc\xe4\x08\xa7\xd8\xcb\xd7\xb1\x16\xc1\xa3\x35\x38\x6f\xb0\xe5\x16\x6d\x88\x18\xcf\xc1\xbe\xa6\x6c\x4b\x4e\xd7\x1b\xdf\xeb\x89\x41\x03\x60\x76\x65\x12\xa6\x43\x52\x6f\xd9\x86\xb0\x59\x36\xd3\x84\x59\x14\x53\x1f\x18\xd3\xda\x6a\x36\xa6\xab\xb5\xfc\x70\xd1\xec\x4f\xac\x55\x3b\xd5\xf1\x49\x13\xe2\x28\x7c\x58\x98\xdf\xf3\xdb\xac\xc8\x85\x36\x57\xb7\x7b\x46\xaf\x98\xed\x95\x8a\x28\x99\xe5\x46\x95\x8f\x8f\xca\x30\xd3\x52\xb2\x8b\x6d\xaa\xbc\x5e\x7c\xe8\x0a\xd3\x94\xf9\x6a\x7d\x33\xea\xca\xfc\x64\xbe\x2a\xa8\x66\x5b\x79\x1f\x2c\x8b\x04\xbb\x10\xdf\xad\xb7\xa6\x06\x6a\x76\x5b\x5c\x74\xca\xfd\x61\xeb\xa3\xf8\x3e\x31\x7f\xbc\x84\x0f\x5c\x0b\x9d\xb4\x7b\x79\x7f\x1f\xd8\xc8\x78\x5f\xfa\x1d\x5c\x07\xf8\xe6\x5e\x7f\x39\x0f\x28\xc4\xb6\xb4\x98\x2c\xb0\x1e\xae\x4c\xa8\x1e\xb6\x78\xc3\xa0\x5e\x93\x0b\x98\xb3\x99\xb5\x87\x95\x11\xb7\x16\x26\x66\x3b\x03\x60\x9f\xed\x6a\x79\xd3\x53\x52\x8f\x7f\x9f\xd5\x0f\xf3\x91\x89\xd0\x3f\x9a\x8b\x33\x57\xe6\xc9\xf4\xb3\x37\xd2\xe7\xf7\x3a\xf1\x74\x54\xdb\xeb\x24\x60\x6f\x05\xb2\x52\x6f\x30\xc2\x73\xfa\xa0\x0f\xac\x1e\xdd\xdd\xac\xa5\x3e\x51\x10\xcb\x93\xa5\x41\xf0\xed\xec\xb4\x94\x5f\x52\xd2\xa6\x5d\xea\x7b\xe3\xcb\xdd\x6c\xaa\x2e\x1f\xf0\x09\xfc\xb1\x4d\x06\x57\xec\x99\xb2\x07\x9f\x7f\x8c\x7e\x8d\xbc\x8f\xfe\x0f\xcf\x3e\xfe\xb3\x5c\x8f\xaa\x28\xbd\xe9\x2b\x5d\xb9\xdc\x9d\xa8\x9d\x49\x15\x9b\x5b\x8d\xda\x72\x92\xcf\xa3\x15\xa9\x4a\x1a\x59\xa9\xde\x23\x9c\xcc\x9a\xb7\x99\xbc\xde\x5a\x67\xb3\x6d\x72\x94\x9a\x96\x71\x0e\xce\x55\x75\x3b\xc4\x53\xab\x65\xb6\x6c\x6e\x37\x25\x7c\x54\xd3\x47\xd3\x62\x0d\xa6\xb2\x93\x1f\x3f\xbc\xc5\xb4\xf7\x72\x9e\x5d\x17\x83\xff\xbf\xf7\xb2\x7b\x2f\xae\x5c\x4f\x3a\xe1\xf7\x31\xd0\x90\xc0\x31\x1a\x25\x29\x85\x62\x38\x09\x02\x12\x55\x71\x85\xa0\x00\xca\x91\x28\xad\x40\x02\xb0\x1c\x09\x65\x89\x92\x20\x83\x2a\x92\x02\x28\x28\x73\x32\x4a\xe2\x0c\x40\x15\x0a\x60\xfe\x23\x9a\xd8\x23\x95\xaf\x50\x60\x24\xae\x05\x46\x02\x65\x38\xec\x42\x47\xaa\x7f\xf7\x68\x79\xe9\x9b\x65\x85\x9e\x41\x8d\x98\x2d\xcc\x12\xdb\x29\xe8\xb9\x34\x9c\xc8\x04\xd3\x18\x38\xc5\x4a\xe5\xa3\xdf\x63\xd7\x3d\x6d\x94\x01\xd9\x15\x55\xa5\x3c\xdb\x0d\x85\xc6\xe8\xb4\x9f\x84\xc6\x1b\x5d\xed\x81\xd0\xc8\x33\x0b\xdb\x0c\xb1\x52\xd8\xf3\xdb\x14\x8c\x4a\x4e\xca\x0d\xdf\x2b\xf3\xf2\x6c\x58\x41\x81\xf5\x3e\xcd\xeb\x46\xaa\x50\x41\x53\x0c\xbf\xa5\x27\x28\x48\x61\xd3\x59\xab\x05\x75\x81\x87\xe4\x7a\x31\x83\x6f\xa2\xdc\x9a\xe0\x0b\x32\x6b\x30\x0e\xc9\xcf\xca\x1f\xbc\x9d\x6b\x96\x50\x6d\x00\xa6\x05\xc9\x2c\x37\x4a\x22\xbf\x8e\x84\xc6\xe6\x39\xd3\x0f\xeb\x27\x3e\x34\x86\x80\xac\x37\x91\xae\xc2\x3a\x98\xcc\x36\x35\xd0\x6d\x70\x74\xe6\x43\xb5\x39\x88\xca\xa6\x25\x8e\x06\x1f\x99\x7e\x79\x9e\x37\x2b\xcc\xfc\x7d\xee\xb5\xed\xba\xa1\x49\xc3\x0e\xf3\x11\x73\x5d\x09\x8d\xcf\xa3\x7f\x1a\x06\x12\xd0\x3f\x09\x8b\x7e\x9f\xcb\x8d\x61\x29\xdf\xeb\xa2\x81\xe2\x6f\xb4\xe7\x80\x95\xf4\xfe\x5a\x9f\xe3\x59\x38\x7c\xf9\xab\xf9\xcb\xad\xc0\x76\x72\x89\xbf\x1f\x3f\xce\xa4\x98\xd0\xb2\xe5\x56\x5f\x72\x43\x7c\xc0\x6b\xb4\x31\x3b\x7c\x5d\x4e\x31\xc2\x63\xf4\xc3\x29\xee\x16\xfa\x41\x8a\x69\xcb\xa6\x68\x8b\x1a\xfb\xd6\x5c\x88\x13\xab\xf5\xb6\x4a\x09\xfd\xbe\x4a\x0e\xad\x0e\x63\xe7\xaa\x6f\xb9\x96\xc5\xf4\x0b\x62\x3e\x4f\x77\x9b\x19\x66\x96\x05\x22\x6d\x62\x9b\x42\x0e\x48\x93\x8a\xba\x22\x26\xd9\x9a\x51\xcb\xae\x45\x7e\xd6\x2e\x2b\x6f\xc2\x68\x35\x99\xb3\x13\x34\xc7\xc7\xa7\x18\x77\x45\x7f\x7b\x86\xc1\x64\x92\x22\x69\x02\xc5\x69\x06\x00\x96\xa1\x19\x28\xb3\x32\x0b\x09\x94\xa4\x50\x19\x02\x5a\x02\x34\x8d\x62\x24\x05\x39\x99\xc3\x49\x20\x49\x0c\xa5\x30\x80\xa6\x69\x4e\xa2\x65\x9c\x05\x7e\x86\xc1\x9f\x94\x61\xae\x2e\xbd\x09\x94\x8b\x6f\x68\x0d\x6e\x1e\x15\x0b\x1e\xcd\x2f\xbf\xd1\xd2\xfb\x42\x7e\xc9\x2d\x4b\x73\xee\x8d\x40\x9d\x0f\xb5\x39\x5f\x36\x57\xd9\x75\x9e\xd1\x40\x83\x90\x44\x3a\xd7\xca\xce\x53\xeb\x2e\xd1\x63\x69\xc6\x79\x9b\x57\x9b\x3d\xa2\xa4\x90\x14\xde\xb7\x2b\xdd\xe1\xb2\xd1\xb4\x44\x16\xce\x68\x75\x8e\xae\x36\x10\x2e\x32\xa2\xfd\x81\x89\x54\xaf\x66\x73\x93\xe1\xef\x98\x5f\xee\x8a\xef\x4f\xa4\x1f\xb5\x87\x44\xf4\xcf\xe7\x17\x9e\x19\x56\x59\x9e\x99\xe9\x13\xa1\x01\x51\xa5\xdb\x65\x7a\x45\x39\xd7\xdc\xd0\xcd\xf4\x5a\x2f\xbe\xc9\x44\x37\x87\x51\xa0\x4c\x94\x34\xcc\x83\xff\xd5\xf9\xe5\x17\xf2\x77\x5f\x7e\xf9\x8b\xe2\x3b\xff\xa4\xfc\x12\xde\xc2\xdc\x42\x3f\xc8\x2f\xf5\xb9\xd6\xc1\x2d\xb5\x4d\x88\x5c\xb6\xad\x67\xc9\x2a\x3f\x58\x95\xcd\xb4\x62\x13\xa6\x28\x54\x3a\x4c\xa6\xa3\x0c\x3e\xac\x15\xfe\xd6\x74\x32\x03\x79\x4a\xcc\x8c\x11\x58\x2f\x5b\xda\x9c\xcb\xda\xc4\x86\x6e\x2c\x53\x9d\x36\x93\xa7\xe8\xa1\x80\x7f\x34\xba\x84\xde\xdd\x7e\xe0\x99\x0b\x5b\x98\xfb\xf2\x0b\x07\x25\x08\x15\x28\xc9\x14\xae\x00\x02\x63\x15\x5a\x52\x08\x8a\xa4\x49\x92\xe0\x08\x8e\x41\x15\x85\xa1\x50\x5a\x92\x58\x49\x05\x2c\xad\xd2\x84\xcc\x72\x32\x45\x33\xac\x8c\x32\x32\xe5\x3d\xbd\x40\xfa\x2d\x0d\xcf\xc8\x2f\xd4\xd5\xfc\x82\x61\xcc\x85\x27\xee\x82\xbb\x47\x55\xe4\x47\x33\x4c\x2e\x32\xbd\xa1\x60\x15\x72\xde\x5e\xaf\xd3\x0a\xfe\xbc\x31\x82\x85\xd2\x86\x67\xb0\xe7\x23\x18\xd8\xc1\x67\xf8\x77\x7b\x45\xa9\xa9\x77\xa6\x67\x95\x73\xc5\xca\x0a\x67\xfb\x6b\x01\xff\x58\xe7\x9a\x94\x5e\xeb\x1b\x59\x26\x6f\x4e\xda\x7a\x27\x2d\xce\x36\x93\xe2\x26\xc7\xcc\xe7\xa3\x62\xad\x8f\xeb\xf3\xd9\x70\x93\xe6\xda\x24\x65\x97\xca\x85\x6c\x9d\x7c\x9f\xd1\xa9\x49\xe9\x7d\x53\xb4\xd6\xb4\x9e\x09\x7b\x30\x1f\x9b\x4d\xa2\xba\xe0\xef\xd1\x6d\x48\xd6\x9f\xae\xbb\x5f\x90\xf1\xee\xca\x38\x4f\xa4\x7f\x7e\x4e\xae\x65\x3c\x0f\xd5\x33\xa2\x74\xd5\x5b\x36\x85\xa2\xfe\xe9\xf5\x53\xa3\xf4\x09\xfd\x6b\x85\xa6\x93\xfb\x91\x65\x5b\x74\xa0\x3f\xbf\x7f\x07\x1b\x7f\x5a\xe6\xbd\x37\xf3\xf1\x3f\x61\x4e\x6f\xa1\x1f\x64\xde\x9f\x15\x20\x9f\x9b\x79\x29\x0c\x03\x98\x8a\x53\x10\x53\x29\x08\x70\x45\x46\x25\x14\xe3\x70\x02\x53\x21\xc3\xe1\x0c\xa1\xca\x32\xce\x90\x0c\x2e\xe3\x04\x2d\x53\x1c\x8b\xaa\x32\xa7\xc8\x8a\x42\x2b\x1c\x46\x41\x3c\x78\x28\x1d\x7b\xa4\xf7\xea\x96\xda\x21\x89\xb3\x24\x7e\xe1\xe1\x5d\xff\xee\xd1\xb1\xab\x6f\x95\x39\x67\xf2\xbe\xce\xad\xea\x7d\xbe\xc9\x31\x2d\xac\xd5\x71\xba\xca\x5a\xcc\x15\x97\xb9\x74\xb6\x0b\x97\x1f\x4a\xb3\x31\xd0\x4d\x43\xd6\xaa\x3d\x3e\x71\xed\x70\xc8\xdf\xbd\x57\x2b\x1e\x96\xb1\x21\xab\xdd\xd3\x6f\x16\x05\x6c\x99\xa2\x9c\x4c\xe3\x8d\x5d\x76\xcd\x02\x3f\x78\xab\x73\x6d\x06\xac\xa9\x3a\x27\x19\x2c\xd3\xda\x36\xd6\xe8\x00\x53\x2a\xa8\xc2\x4c\xdb\x2d\x9a\x59\x15\xd2\xf3\x29\xbf\xcd\x12\x26\xd7\xb1\x1a\x1d\xfa\x83\x53\x46\xf3\xe9\x62\x90\x16\xf9\x39\x6b\x6c\x16\xdc\x54\x1f\x36\x85\x24\xd9\xb5\x10\x36\xe5\xb8\xcc\xc5\x87\xde\x49\x71\xe3\xde\xd6\x3b\xa6\x30\x0e\xfa\x8d\xb9\x92\x64\x8e\xa7\xd0\xbf\xaf\x16\x78\xc8\x5c\xe1\xcc\x77\xe3\x5e\xe8\xec\x11\xc9\xe9\x75\x39\xca\x3d\x9b\xfe\x0d\x47\x34\x3c\xff\xfb\xee\x55\xaf\xd5\x1a\xef\xd1\xd5\xc3\xb5\xc6\x27\xce\xd5\x2d\xf4\x83\x8c\x24\x77\xfa\x06\xb3\x40\x85\x94\xb5\xd5\xdb\x93\x61\x06\x9a\x0c\x56\x75\xac\xdc\x9c\xc1\x56\xea\x87\x53\x2c\xa5\x73\x2d\xb5\x87\x5a\x2d\x62\x95\x2f\x17\xad\x15\x29\xa5\xda\x42\xae\x54\x85\x33\xac\x6e\x73\x0d\xab\x81\x61\x8b\x76\x61\xa4\xb2\x68\xb6\xb3\x55\x0b\x12\x5e\xe8\x4d\xf9\xe6\xb3\x33\x12\xc0\x15\x20\xa9\x24\xe4\x14\x8c\x26\x71\x06\xc7\x69\x14\xc7\x19\x1c\x62\x0c\x0b\x08\x46\xe6\x18\x42\xe6\x30\x86\x91\x38\x8a\x51\x00\x4b\x63\x1c\x0e\x38\x4e\x22\x20\xa7\x12\x80\x03\x80\xf4\x33\x12\xfe\xa4\x63\xfe\xeb\x19\x89\xc0\xd0\xf8\xd3\xac\xdd\xdd\xa3\xfe\x9d\x47\x0f\xfa\x7f\x61\x46\x0a\x1d\xdc\x17\xb5\xea\x52\x10\xfb\x0c\x31\x93\x3b\x58\x5b\x99\xab\x69\x08\x36\xb3\x76\xb5\xd0\x69\x66\x27\x25\x03\xa2\x0e\x41\x82\x12\xdc\x2a\xd3\xa5\xde\x73\xfa\xdc\x26\xd7\xa4\xa7\x7c\x87\x60\x0b\xdc\x5c\xb3\x2a\x95\x56\x69\x6b\x56\x56\x73\xb3\x2b\x8a\x53\x7e\xad\xf4\xb7\xe9\xc6\xac\x29\xff\x94\x8c\x74\xcf\xc1\xf9\x33\x33\xd2\x83\xf4\x9f\x98\x91\x9e\x72\x68\x7f\x7a\x25\x8e\x72\xbf\xba\x69\x80\xe7\x7f\xdf\xd3\xb9\x6b\x19\xe9\x1e\x5d\x3d\x33\x23\x3d\x3a\x57\xb7\xd0\x0f\x32\x12\x2c\xad\xe6\x4d\x4c\x7a\x87\xe2\xa6\xd1\x2b\xe3\xad\x34\x47\x59\x05\xb1\x64\x8f\x44\xbe\xc2\x6b\x0a\xd3\x26\xd4\xa9\x39\x65\xca\x78\x61\xd1\xeb\x63\x12\x21\x88\xb9\x37\x21\xd3\x50\xd7\x12\xde\xae\xd8\xe6\x52\x98\x55\x26\x6f\x74\x7b\xda\x44\x71\x21\x97\xc1\xa4\x2a\x9b\x12\x33\xeb\x67\x67\x24\x09\x07\x38\xce\xc8\x04\x27\xd3\x24\x20\x49\x55\x66\x80\xa4\x90\x32\x47\xb3\x18\x47\x52\xb4\x8a\x12\x1c\xc7\xa1\xb4\x82\xe1\x32\xc9\xd0\x0a\x83\x4a\x24\x8a\x7b\x2f\x0a\xe3\x68\x85\x06\x84\xff\x4a\x19\xec\x91\x06\x6a\xff\xe1\xba\x8b\x89\x88\x22\x39\x34\xbe\xe1\x6c\x77\x37\xdc\xb5\x19\xe4\xa1\x2a\x5b\x6c\xbe\x37\xe7\x52\x05\x2f\xf2\x44\xbf\x37\x6b\x59\x95\xc5\x6c\x80\xa2\x6a\x81\xb5\xab\x25\x77\xc1\xd0\x5a\x97\xfb\x69\x7e\x40\xf0\xfb\x3c\xe4\x1b\x4c\x64\x52\x4f\xfc\xf4\x8e\x9a\x52\xf8\xad\x11\x99\xde\xfb\x3a\xcf\xb9\xb7\x84\x9c\x43\x54\xd6\x0b\xd0\x58\x35\x94\x7c\xbb\xbb\x51\xf8\x3c\x94\xe8\x7a\x13\x3a\xdb\x66\xa5\xd4\x07\x1f\xba\xd4\xae\xd5\xa6\x8b\x62\x45\xac\xe6\x48\xfb\x6d\x2a\xbc\x75\x47\x72\xb3\x81\xea\xa9\x41\xba\xbe\x4c\x99\x76\x7f\x21\xd2\xa9\x7c\x77\x28\xd9\x1f\x0c\xd5\xc4\x67\x05\xf2\xbd\x56\x4b\x90\x7b\x8e\x8c\xf6\x38\xf7\x84\x64\x3e\xc4\xde\xb0\x3f\x6b\xe9\x0c\x5a\x45\xcb\x85\xad\x33\x5d\x8b\x98\x3e\x44\xc1\x76\x69\x62\x9c\x58\xdc\xbc\x57\xb3\xdb\x3a\xe5\x64\x04\x39\xeb\xcb\x48\x4c\x1c\xab\x6e\x0c\xd3\x4c\xf7\x30\xfe\xae\x3a\xd6\x03\xf4\xf3\x9d\x7e\xc6\x7a\x80\x3e\xcf\xff\x75\xa7\xf9\x67\x63\x6b\xe6\x7e\x5d\xd4\x8d\xd1\x45\x31\xaf\xe9\xe2\xd1\xb9\x70\x6d\x21\x25\x47\xf0\xdd\xa4\x8b\xff\x30\xca\xd6\x2e\x2f\x66\xcc\x8c\x68\x75\xf5\xda\xa0\x99\x19\x2c\x52\xb3\x79\xd1\x92\xe7\x59\x2d\xbf\xb0\xa9\x3e\x3a\xcb\x95\x46\xd3\xed\xac\xbd\x4e\x55\x2b\x66\xab\xa2\x17\x06\x42\x8e\x2b\xab\x7a\xfa\xe3\x4d\x7d\xab\xe6\x97\x33\xf8\x3e\xed\x15\x0a\x4c\x2d\x95\xea\x8a\xe6\x66\x55\xfd\x78\x7e\x67\x01\xa9\x10\x24\x24\x49\x14\xa3\x28\x42\xa5\x49\x40\x73\x32\xa0\x09\x16\x27\x29\x8e\x55\x49\x85\x53\x51\x80\x73\x12\xaa\x42\x42\x86\x04\x2b\x93\x90\x54\x18\x94\x44\x15\xa0\xaa\x18\xae\xaa\xea\xe1\x1d\x70\x0f\xc4\x56\xfc\x6a\x6c\xa5\xb0\x0b\xaf\x58\xdc\xdd\x0d\xf7\x9f\x3f\x1a\x5b\x2f\x74\x14\xf8\xd7\x1d\x6b\xcc\x98\xd8\x9a\x55\x72\x4e\x01\xdf\x2c\x26\xcd\x74\x8d\x77\xde\xde\x54\x75\x92\xa9\xa5\xba\xea\xa8\x57\xfc\xe0\xcc\xdc\xc6\xf9\xc8\x8b\x0a\xc5\xce\xd3\x0b\x5c\xd2\x1b\x05\xa3\x24\x49\x2b\x08\x16\xce\xb2\xf9\x9e\xe5\x48\xab\x35\x05\xeb\x74\xd1\xce\x08\x2a\x00\xdc\xdb\x6c\xb4\x66\x86\xcb\xde\xbc\xf0\x7b\xc5\xd6\x47\x63\xdb\x83\xfe\xfc\xc6\xa4\x3b\x39\xe9\x89\xb1\xf5\x57\xae\x15\xaf\xae\x5b\x7f\x61\x6c\xe3\x9f\x14\x5b\x59\xf2\x30\x3e\x66\xcf\x77\x29\xb6\x1a\x4d\xa6\xe5\xd8\x23\x53\x68\x7c\xf0\x55\xf0\x66\x10\x24\xdf\x69\xe8\x45\xbc\xd7\x62\xd3\x80\x67\x25\x79\xd3\x19\x40\xb5\xdc\xa0\x9c\x05\xd5\xd9\x2c\xb7\x05\x9c\x31\x16\x4b\x0b\x74\xab\xeb\x77\x58\x41\x87\x7d\xbb\xb1\x61\xf8\x56\x11\x5f\xbc\x03\x13\x97\xeb\x78\xa5\x37\x7a\xfe\xa9\x3a\x83\x53\x0c\x45\x4b\x98\x2a\x29\x2a\x4b\x48\x28\x8b\xe1\x8c\x4a\xb0\x14\x54\xa1\xa2\xa2\x1c\xca\xc9\x32\x8b\xa3\x12\x2d\xcb\x90\xc1\x54\x4c\x46\x59\x8e\x51\x30\x8a\xc4\x19\x59\xa2\x80\xa2\xbc\xec\x5f\xad\xfd\x40\x6c\xbd\x5a\x40\xa1\x28\x3c\xfe\x65\x6c\xc1\xcd\xf0\x73\x34\x8f\x46\xd6\x0b\x27\xe9\xfe\x75\x47\x3d\x39\x26\xb2\x66\x66\xe4\x24\xd3\x4c\x67\x78\x69\xf2\xa6\x32\x6f\x75\xd3\x01\xce\x64\xd4\x2d\xce\x72\xba\x5d\x77\x72\xa2\xb2\x64\x80\x3e\x6a\x51\xe9\xfc\x9c\x53\x0b\xb3\x94\xbe\x11\xb6\x13\x7e\xd4\xff\x18\x32\xe9\xea\x10\x1b\x11\xed\x3c\x2d\xa3\x6f\x36\xc6\xe0\xd5\x29\x3f\x9f\xbf\xf7\xd8\x25\xfa\x7b\xad\x5a\x1f\x8e\x6c\x8f\x46\xd6\xad\xd5\xcc\x57\x9f\x18\x59\x7f\x65\x5d\xf8\x67\x44\xd6\x7b\x23\x1b\xff\xa4\xc8\x7a\xef\x0e\x26\x88\xac\xc3\x54\x89\xef\x36\xd6\xcd\x02\x59\x31\x52\xf4\xec\xad\xda\xb7\x86\x85\x5e\x86\x5c\x53\x52\xce\x6a\x7e\x0c\x00\xc4\x97\xfd\x2e\x54\x37\xa3\xc6\x60\x63\xa6\x97\x72\xab\x66\xf2\x85\x9e\x8d\xd7\xd3\x5b\x5c\xec\x2b\xe8\x14\x1a\x62\xc5\x12\x21\xa7\x91\xcd\x72\xb9\x8a\xa9\x15\x25\xe9\xaa\xf5\xf8\xe9\xbc\xf0\xaf\xa5\x86\x7f\xd6\x6f\x39\x87\xdb\xdd\x53\x6e\x87\xdf\xd2\xbf\xf5\x57\x66\x43\x18\xbd\x5f\x25\xe6\x73\xb9\xf0\x2f\xf3\x47\x09\x22\x8d\x56\xa9\xc6\xb7\x86\x48\x45\x18\x22\x9f\x35\xe5\x84\xdb\xe8\x6f\xc6\x45\x3e\x3f\x89\xeb\x08\xd6\x73\x9c\x9f\x23\x7c\x95\xfb\xc8\xaf\x76\x45\x7e\xe2\xea\xf0\x82\x8a\xd0\x7b\x60\x8e\xde\xf9\x32\x7e\x8a\x74\xc7\x64\xcf\x09\x77\x17\x63\x48\x57\x2c\x35\xbb\x02\xf2\xf9\x00\xfe\x8a\x1c\xe0\x77\x7f\xfb\x03\x6e\x54\xcd\x73\xa6\xf5\x66\xc1\x6f\x9a\xd4\x98\x77\xdd\x5d\x79\x9f\xdc\x73\x25\x3b\x4f\xe4\x92\xa4\x17\xd8\x4a\x2c\x79\xec\x33\xc2\x57\x9f\xc2\x7d\xae\xf4\x71\x64\x2e\xc9\x7f\x91\xb5\xab\x1a\xf0\x4d\x5a\xda\x7a\xd6\xbe\x13\xa4\x24\xe6\x84\xc1\x15\x19\xb2\x2d\x81\xef\x08\x3e\xe8\x31\x16\xa4\x2e\x46\x9d\xa1\xdb\x2e\x89\x05\x44\x72\x2c\x08\xc3\xde\x15\xcf\x8d\xef\x63\x8f\xf3\xe3\xe3\x49\xc6\x51\x8c\x5f\x4b\xfb\x5f\x4a\xbc\x9b\x9d\x03\x8a\x30\x27\x47\x7b\x81\x63\x7e\x7c\xe0\x57\x64\xf7\x23\xa2\xbb\x9f\xa5\x3e\xc7\x5c\xe8\xb7\xf7\x93\x31\xb8\x34\x6d\x67\x62\x41\xfb\x2c\x9f\xe1\x5f\xf2\x4f\xc4\x6b\x68\xc0\x17\xa4\x5f\x14\x5a\x02\x12\xc6\x51\x6a\x23\x62\xbd\xe3\xe5\xf0\x3f\x4f\x58\x9f\x02\x7b\xfa\x04\x9e\x5d\x34\xc9\x98\x0d\xfb\x8a\x3b\xea\x9c\x3e\x35\xc3\x80\xd6\xb3\x58\x3b\x20\x4b\xc6\xa0\x0f\x7f\xc2\x66\xa0\xd8\xf3\x77\x2f\xeb\xd8\x7f\xa7\xf7\x23\xa6\xeb\x63\x48\xc6\x7f\xf0\x3b\xc1\x3b\x7b\x7d\x45\xc0\x72\xa9\x6b\xb2\x1f\x9f\x4d\x4b\x89\xc9\x9b\x63\xe8\x3a\xab\x77\xff\x0e\x4e\x83\xb4\xed\x33\x1c\x41\x17\x66\x7b\xf7\x6b\x51\x47\x1c\x9f\xa6\x11\x4d\x79\x45\x3e\x79\x83\x3f\xc5\x31\xab\x29\x4f\x62\x53\x53\x12\x33\xb8\xff\x3d\x6c\xe5\xf5\x4c\xee\xbb\xca\xb4\xb9\x1c\x2f\x9f\xc5\x77\x80\x2b\xcc\x7a\xcc\xda\xe1\x2e\x49\xce\x0b\xe0\x6c\x9e\x27\x40\x80\x2b\xc6\xa6\xef\x14\x21\x8c\xe1\x9c\x10\xe6\xd2\xb5\xca\xa9\x79\x97\x0c\x01\xf3\x07\x1c\xf7\x2a\xff\xb2\xa2\xed\xdd\x2f\x7d\xbb\xb1\xeb\x71\x5d\x1f\xa3\x0b\xb3\xbc\xfb\xa9\x81\xe3\xe8\x77\x96\xa3\xb0\x5e\x9f\xc5\xd6\x09\xce\x84\xe1\xf9\x0c\x83\x8e\x3f\x25\xce\x23\xd3\x7a\xc0\x71\xbf\x49\x5e\x33\x3f\xc7\x52\x5c\x22\x12\xb0\x6f\x5c\x2f\x9c\x65\xf8\x14\x59\x84\x73\x05\x46\xf8\x0c\xc3\x5e\x65\xd0\x7b\x45\xe9\x73\xd8\xf3\x50\x25\x62\x6e\xf7\x5e\xd4\x58\xd6\x3c\xde\xa1\xf5\x34\xf5\x45\xf0\x5d\x63\x32\x02\x9e\x84\xd3\xe7\xe8\xf1\x08\x5b\x52\x2e\xaf\x6a\xf3\x39\xbc\x25\xe2\xe9\x32\x2f\x3b\x8e\x75\xd3\x9c\xaf\x96\x8f\x71\x74\x8c\x2b\xf1\x8c\xfa\x1b\x90\x18\xfe\x96\x40\xb3\xc6\x8e\xb6\x80\x4f\xe1\x30\x8a\x2d\x99\xdf\x06\x0c\xbe\x22\x51\x96\x5f\x91\x20\xc4\xcb\xba\x69\x43\x65\x0c\x9c\x18\x21\x9e\x10\xb7\x03\x3c\xd7\x38\xbe\x71\x75\xe4\x62\x7d\x9a\x76\x6f\x50\xec\x55\xbd\x69\x86\x02\x37\xe3\xc8\x92\xc3\x1e\x9b\xc6\x18\x28\x8a\x05\x6d\xfb\x51\x85\x5e\x25\x70\xb4\x71\x0e\x6e\x47\xb6\xaa\x3e\xe0\x0d\xbc\x3f\x6e\x07\x97\x70\x5f\xe7\xf8\x8c\x97\x1d\x23\x0c\x56\xe1\x2e\x3e\x67\xbb\xbc\xbf\x28\x72\x11\xeb\xd5\x65\xbf\x0b\x74\x85\xd1\x60\x0d\xe5\xa2\xdc\x1b\xd1\x93\xb8\x3d\x87\xfa\xea\xf2\x2d\xa9\x25\x87\x90\x3f\xdb\x18\x8e\x50\xdf\xb3\xde\x8c\x47\xb7\x58\x9a\x96\x1b\xf8\xde\xa1\x65\x6b\xa6\xf1\x7c\x45\x47\x29\x5c\x67\x3f\x32\x20\xb9\x30\x41\xe8\x49\x5e\xe4\xb8\x43\xff\x21\x1a\x57\x25\x09\xc1\x26\x17\x62\x69\xc1\x77\xcd\x5c\xd9\xbf\x44\x9a\x73\xc4\xae\x8a\x75\x6e\x50\x72\xf9\x76\x45\x94\x9f\x26\xd3\x8e\xc0\x55\x39\x62\xcb\x8f\xc7\xa8\x0f\x2f\x57\xfe\x19\xae\x1d\xc5\x7e\x76\x03\x7c\xab\x83\x1f\x23\x3d\xde\x42\x3d\xc9\xc3\x2f\x91\x48\x22\xc3\x95\x7d\xdd\x45\x62\xcf\x4b\x5f\xa7\x88\x13\xf1\x7e\x3d\x89\x85\x37\xdb\x3f\xc3\x6c\x4e\xf1\xdf\xbd\xd5\xf7\x16\x71\xfb\x44\xbe\xab\x30\x8e\x25\xd3\x9c\xdf\xad\xe5\x0b\x38\xaf\x2e\x11\x3e\x7f\x56\xa0\x03\x34\xdd\x46\xbe\xfe\xf3\x9f\xc8\x8b\x6d\xea\x4a\xe8\x78\xf3\xe5\xfb\x77\x07\x6e\x9c\x2f\x5f\x5e\x91\x78\x40\xd9\x54\x92\x01\xfa\x87\x23\xf1\xa0\x92\xb9\x9a\x4c\x9d\x44\xe4\x8f\x40\x2f\x33\x70\x04\x1a\x61\x61\x57\x14\xf7\x8c\x0c\xf9\x81\x10\x44\xe2\xce\x00\x4d\x19\xab\xa1\x73\xbb\x7c\xe5\xd7\xf4\x07\x04\x64\x91\x7c\xbd\x25\x94\x0a\xe2\xfe\x4c\x0e\x69\x09\x79\xa1\x25\x88\x59\xa1\x1d\x39\xa6\xf2\xee\xd6\x45\xa4\xdb\xc8\xb9\x26\xd3\x12\xda\x9d\x56\x29\xdb\x71\xbf\xca\x09\x55\xa1\x23\x20\x59\xbe\x9d\xe5\x73\xc2\x85\x83\x4d\x77\xdf\x71\xfc\x71\x1c\x29\xc5\x3c\x4f\x19\xc7\x74\xae\x9c\x5a\xc6\x71\x72\xac\x9f\x68\xd9\xe8\xac\xb2\x82\x85\xfe\x95\x23\xde\x58\x4d\x04\x5b\xd9\xbf\x5c\x0f\x61\x3e\xce\x69\x61\x57\x25\xb8\x6c\x30\xb7\x69\xe0\xb4\xa8\xf4\x17\xaa\x21\x86\x99\x63\x5d\x9c\x29\x83\x3d\xd7\x28\xa2\x25\x8e\xdf\x41\x21\xf1\xa6\x71\x52\x43\x4a\x62\x1d\x08\x50\x14\xa8\x20\x0b\x60\xac\x80\xae\x6f\x8f\x38\x2d\xe5\x11\x61\x50\x6a\x77\xda\x3e\xcf\xbe\x64\xdf\xe6\x70\x3b\x7e\x07\xfa\x0a\x8e\x5d\x8c\x10\xc9\xb5\xea\x8d\x30\xe3\x87\x51\x11\x48\xbf\x45\xe2\x0f\x0f\x3e\x4a\xe0\x3c\xee\x3f\x77\x29\xd2\x07\x8f\x52\xfe\xec\xe9\xed\xe2\xe5\x2a\x49\x9e\x02\x0b\xc8\x0e\xb4\x90\x77\x60\x6d\x35\x63\xf2\x19\xa7\xa8\x2f\xfb\x93\xd2\xd7\xeb\x58\x3c\xa2\xd7\xf0\xfc\x11\xe9\xf4\x8d\x72\xbb\x6f\xf2\x85\x9b\xe5\x58\x33\x26\xd0\x76\xc6\x3a\x70\xff\xf3\x56\xda\x2f\xaf\xc8\x0b\xfa\xf2\xe5\xcf\x53\x5b\x89\x20\x3a\x67\x2c\xe7\x34\x7d\xdc\xf1\x31\x87\x5b\x77\xc6\x8f\xf4\xb9\x2f\x98\xd8\xda\xc4\x70\xd7\xfa\x09\x14\xba\x1b\x74\x46\x1b\x34\xf9\x25\x81\x2e\x11\x9f\xd8\xfd\xe3\xd7\x50\x9b\x4c\x1d\x44\x33\x1c\x38\x81\x56\x64\x1a\xbf\x7e\x45\xd6\x10\x59\x6b\xba\x8e\xbc\xad\xa0\xb5\x45\xa4\xed\x8e\xa0\x6d\x22\xce\x14\x38\x88\x66\x23\xeb\xe9\xfe\x5b\xcd\x46\x9c\x29\x44\x54\xcd\xb2\x1d\x44\x73\xe0\x02\xd1\x0c\xef\x1b\xd9\x5c\x2c\x4d\x5b\x73\xa0\xab\xdc\x04\x6c\x1d\x69\xdb\x47\xbe\x6f\xdb\xf8\xf2\x47\x64\x5e\xcf\xae\x3a\xdd\x39\xdd\x85\xb2\xc5\x6a\x03\x15\x57\x33\xae\x9a\x3e\xd3\xdc\x97\xa0\xf9\xd1\x85\x09\x75\x55\xc4\xc0\x9d\x27\x16\x5a\x9b\xbb\x68\x6c\x73\x65\xc9\xf7\x61\xda\x2d\x42\x3d\x8e\xfd\xba\x5f\xec\xf8\x20\xdc\x36\xfc\xe6\x88\x76\xb3\x8a\x28\xc0\x01\x6e\x1a\x43\x94\xd5\x62\xe9\xe9\x59\x87\x0e\xf4\xe2\xe4\xff\x0b\x00\x00\xff\xff\x7b\x3c\x5b\x1f\x85\xd6\x00\x00") + +func failed_transactionsHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _failed_transactionsHorizonSql, + "failed_transactions-horizon.sql", + ) +} + +func failed_transactionsHorizonSql() (*asset, error) { + bytes, err := failed_transactionsHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "failed_transactions-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xa, 0xb6, 0x80, 0x69, 0xce, 0x9d, 0xd9, 0x5b, 0xf6, 0xcd, 0xf9, 0x2, 0xf0, 0x55, 0xc3, 0x6, 0x86, 0xf3, 0x5b, 0x8c, 0x13, 0xb9, 0xde, 0xf0, 0x25, 0x47, 0xb0, 0xef, 0x10, 0xee, 0x11}} + return a, nil +} + +var _ingest_asset_statsCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x67\x8f\x22\xc9\xf2\x37\xfa\x7e\x3f\x05\xda\x37\xb3\x47\xcc\x39\x64\x96\xc9\xcc\xda\xbd\xfb\x97\x0a\xef\xbd\xbf\xba\x1a\xa5\x85\xc2\x54\x41\x55\x61\xaf\xfe\xdf\xfd\x11\xd0\xdd\xd0\x34\xed\x80\x31\x8f\xce\x96\xb4\xb3\xdd\x54\x10\x11\xf9\xcb\x88\xc8\xc8\x48\xd3\xff\xfe\xf7\x6f\xff\xfe\x77\xa4\xea\x05\xe1\xc0\x97\x8d\x5a\x31\x22\x68\x48\x19\x0d\x64\x44\x2c\xa6\xb3\xdf\xfe\xfd\xef\xdf\x76\xef\x93\x8b\xe9\x4c\x8a\x88\xf2\xbd\xe9\x91\x60\x29\xfd\xc0\xf1\xdc\x88\xf5\x1f\xf4\x1f\x78\x42\xc5\x36\x91\xd9\xe0\xdb\xee\xeb\x67\x24\xbf\x35\x52\xcd\x48\x10\xd2\x50\x4e\xa5\x1b\x7e\x0b\x9d\xa9\xf4\x16\x61\xe4\xef\x08\xf8\x6b\xff\x6a\xe2\xf1\xf1\xcb\x4f\x1d\x31\x91\xdf\x1c\xf7\x5b\xe8\x53\x37\xa0\x3c\x74\x3c\xf7\x5b\x20\x83\x1d\xdf\x97\xc4\x7c\xe2\xec\x58\x4b\x97\x7b\xc2\x71\x07\x91\xbf\x23\x5f\x5a\xcd\x34\xf9\xf2\xd7\xa3\x6c\x57\x50\x5f\x7c\xe3\x9e\xab\x3c\x7f\xea\xb8\x83\x6f\x41\xe8\x3b\xee\x20\x88\xfc\x1d\xf1\xdc\x07\x1e\x43\xc9\xc7\xdf\xd4\xc2\x3d\xc8\x62\x9e\x70\xe4\xee\xbd\xa2\x93\x40\x3e\x13\x33\x75\xdc\x6f\x53\x19\x04\x74\xb0\x27\x58\x51\xdf\x75\xdc\xc1\x81\xc4\xf7\x56\xdf\x02\xc9\x17\xbe\x13\x6e\x76\xcc\x95\xfa\xeb\x01\x00\x49\x7d\x3e\xfc\x36\xa3\xe1\x30\xf2\x77\x64\xb6\x60\x13\x87\x7f\xdd\x21\xc6\x69\x48\x27\xde\xe0\xaf\xdf\x7e\x4b\xd6\x2b\xd5\x48\xae\x9c\x4c\x75\x23\xb9\x74\x24\xd5\xcd\x35\x9a\x8d\x07\xca\xff\x2c\x66\x03\x9f\x0a\x39\x74\x82\x90\x6d\x02\x39\xff\xeb\x4d\xea\x80\xcf\xe6\x0b\xcf\x5f\x4c\x83\x8f\x11\x4b\x77\xf9\x11\xca\x89\x14\x03\xe9\x7f\x84\x72\xa7\xa7\x92\xf2\x83\x94\x1f\x20\x63\x32\x08\x3d\xa5\xa4\xef\xb8\x42\xae\xdf\xa6\xa5\x9c\x7b\x0b\x37\x64\x74\x42\x5d\x2e\x83\xbf\x7e\xb3\x8b\xcd\x54\x3d\xd2\xb4\xe3\xc5\xd4\x09\x75\xa5\x5c\xec\x5d\x80\xd7\xf3\x37\x91\x3d\xf7\x44\xa5\xdc\x68\xd6\xed\x5c\xb9\x79\xf2\xa5\xe7\x84\xdf\x66\x63\xb9\xf9\x08\xff\x70\xfd\x3e\xeb\x27\x9a\x4f\x70\x55\xf2\x03\x3a\x9f\x92\x7d\x9c\xb7\xbf\x08\xc2\x89\xe3\xca\xe0\x2d\xce\x4f\x44\x1f\xe6\xbb\xd3\x42\xee\xa3\xc1\x1b\x7c\x8f\x44\x1f\xe7\xfb\x64\xf2\x6f\xf1\x7d\x22\xfa\x30\xdf\x03\xbd\xe3\x2a\xef\x0d\xbe\x47\xa2\x0f\xf3\x9d\x2d\x58\xb0\x60\x6f\xf0\x3c\x10\x7c\x86\xdf\xc4\x09\x86\xf3\x85\x5c\xbc\x85\xec\x29\xd9\xc7\x79\x4b\xe9\xbf\x05\xeb\xfe\xfd\x87\xb9\xed\xdd\xf8\x2d\x76\x07\x82\x0f\xf3\x3b\x44\xa5\xa1\xa4\xe2\x6d\xb6\xcf\xe8\xbe\x33\xf7\x87\x48\x29\xe7\xdf\x3e\x28\x86\x51\xf7\x0d\xe6\x8c\xba\x1f\x56\xf8\x21\xfa\xbd\xa5\xeb\x23\xc9\x67\x79\xee\x72\x80\xf7\xd9\xee\xa8\x1e\x38\xef\x69\xcf\x19\x5f\x0c\xb9\x6f\xd3\x3e\x85\xc6\xf7\xc8\x8e\x81\xee\x1d\xca\xa7\xc0\xf5\x36\xdd\x31\x10\xbd\x43\xf7\x14\x58\xde\xa5\xfb\x90\x7e\xc7\x80\xf2\x36\xdd\x21\x48\xbc\x4b\xf3\xe4\xf2\xef\x50\xee\xfc\xf8\x6d\x92\x83\x6f\xbe\x4d\xf3\xcc\x15\xde\x26\x65\xd4\x7d\x9b\xe0\xd1\x54\x3f\x44\xb5\xb3\xbc\x07\xc2\x54\xb7\x99\x2a\x37\x72\x95\xf2\x29\xf1\x64\x36\x08\xe6\x93\x07\x8a\x46\x22\x9b\x2a\xd9\x2f\x78\xfd\xf5\xdb\x21\x37\x2e\xd3\xa9\xfc\xf3\xf1\xb3\x48\x73\x33\x93\x7f\x3e\x7c\xe5\xaf\x48\x83\x0f\xe5\x94\xfe\x19\xf9\xf7\x5f\x91\xca\xca\x95\xfe\x9f\x91\x7f\xef\x53\xe6\x44\x3d\x65\x37\x53\x8f\x9c\x1f\xf9\xfd\xf6\x8c\xe3\xf3\x97\x0f\x8c\x13\x95\x52\x29\x55\x6e\xbe\xc1\xf9\x40\x10\xa9\x94\x9f\x33\x88\xe4\x1a\x91\x2f\x8f\xf9\xed\xe3\x67\xc1\x9e\xc9\x97\x73\xc9\x8f\xcd\x7f\x90\xf9\x84\xd0\xbb\xed\x79\x86\x65\xb9\xd2\x3c\xc3\x33\xd2\xc9\x35\xb3\x4f\x6a\x9d\x26\xb4\xcf\xc4\x1f\xb9\x9c\x29\xf2\x99\xc6\xbf\x60\xb2\x07\xa0\x5a\x8c\xcd\x06\xbb\x59\xcc\xcc\xf7\xb8\x14\x0b\x9f\x4e\x22\x13\xea\x0e\x16\x74\x20\xf7\x30\x7c\x30\x01\xdf\x91\x09\xa9\xe8\x62\x12\x7e\x0b\x29\x9b\xc8\x60\x46\xb9\xdc\xcd\x26\xbe\x9c\xbd\x5d\x39\xe1\xf0\x9b\xe7\x88\x93\x09\xc2\xb3\xc6\x9e\x1a\xe4\x43\x33\xf7\xa6\x7b\x6c\xe4\xa3\x01\x5c\x02\xfc\x60\xe5\xa7\x41\xf7\x8f\xdf\x22\x91\xc8\xe3\x27\x8e\x88\xf0\x21\xf5\x29\x0f\xa5\x1f\x59\x52\x7f\xe3\xb8\x83\x3f\x4c\xf4\xaf\x7d\xdf\x94\x5b\xc5\xe2\xd7\x3d\xf5\xee\x8b\x2e\x9d\xca\x0b\xc4\x84\x5c\x22\x5e\xd2\xc9\xe2\x12\x35\x84\xda\x39\xf9\x84\x06\xe1\xd4\x13\x8e\x72\xa4\x88\x38\x6e\x28\x07\xd2\x7f\x22\xf9\xed\x5f\xe7\x7d\xff\xe4\xc5\x37\x62\x11\x5c\x05\xc4\xc3\x44\x20\xc2\x9c\x81\xe3\x86\x67\x2f\x03\x39\x77\x17\xd3\xcb\xef\xdc\xc5\x34\x58\x30\xe9\x86\xfe\x6e\x2a\x78\xde\xcc\x03\x8d\xe3\xaa\x09\xdd\xcd\x18\x85\x0c\xc2\xcb\xea\x1c\x08\x87\xde\x54\x0a\x6f\x4a\x1d\xf7\x02\x95\x61\x9c\x2b\x1d\x0e\x7d\x19\x0c\xbd\x89\x08\x22\xa1\x5c\x9f\x6b\xa6\x26\x74\xf0\x9a\x46\x6f\xf6\xcd\x03\x22\x8b\x9d\xd4\x89\x43\x99\x33\x71\xc2\x5d\xe3\x0e\xed\x7f\x84\x64\x32\x79\xeb\xb5\x33\x70\x77\xb9\xd0\x4e\xad\xc3\x27\x27\xd9\xc0\x53\x6a\xf1\x00\xfa\xb7\xfd\xb4\x3a\x92\xc8\xa6\x12\x85\xc8\x1f\x7f\x3c\x76\xc5\xff\xfc\x1d\x01\xff\xfa\xd7\x1b\xdf\x3e\x57\xf0\x9c\xcf\x8b\x06\xbc\xc7\xf1\x59\x5f\x9e\x71\x7b\xde\xcf\xef\x71\x7a\x09\xcf\x19\xbb\x0b\xf8\x1d\x78\xbe\x74\x8c\xdd\xf8\x77\xad\x4f\xec\x52\xc6\x83\x3b\xb8\x9e\x90\xa7\xbe\xf0\xcc\x07\x5e\x0a\x7d\x3e\x3e\x5f\x2b\xfe\x79\x62\x7c\x50\xe4\xe1\x33\x1a\x0c\x4f\x94\x41\x2f\x6c\x7b\xe6\xcb\xe5\xbb\x44\x6c\xc1\xc7\x32\x9c\x38\x41\xf8\x2e\xe9\x53\xb6\xfd\x68\xee\x87\x8f\xf9\xc4\x0b\x64\xe8\x4c\x5f\xf1\xfc\x7d\x60\xbd\xe0\x5b\x27\x7d\xfe\x3c\xa9\x7f\xe2\x77\xd6\xdf\x47\x39\xaf\x98\xce\x6b\x73\x83\xe7\x6c\x8e\xad\x78\xcd\x5a\x1e\x92\xaf\x6b\x7b\xec\x61\xe2\xf5\xc7\x93\x93\x4b\xff\x83\x11\xf4\x50\x79\x11\xaf\x45\xd0\xbd\xb9\xd3\x20\x90\xe1\x25\x3c\x0f\xbe\xfa\xea\x6b\x3a\xdd\xb9\xd5\x65\xd6\x33\xdf\xe1\xd2\x7d\x25\x88\xed\x5f\xbe\x16\xe1\xf6\x2f\x23\xc2\x5b\xb0\x89\xdc\xd9\x1b\x77\xf6\x15\xc9\xbb\x46\xd1\x93\x1e\x7e\x98\xb2\x1e\xda\x72\xd6\xaf\x0f\x0d\x7c\xc5\x36\x1e\xbe\xf9\x80\xf0\xd9\x57\x1f\x71\x7f\xcd\x20\x0e\x09\xfb\xb5\xf6\x70\x98\xd6\x1f\xcc\xc1\x99\x5d\x1a\xf8\xcd\x17\x9e\xeb\xf9\xe1\x13\x1a\xc9\x54\xda\x6e\x15\x9b\x11\x70\x3e\x6c\xca\x75\x48\xc3\x50\x4e\x67\x61\x64\xe7\x16\x41\x48\xa7\xb3\xc8\x2e\x65\xf2\x16\x87\x4f\x22\x5b\xcf\x95\x2f\x07\x5b\x45\x9d\xc9\xc2\x3f\x19\x6a\x5f\x93\x10\x6e\x66\xf2\xfd\x4e\x39\x94\x25\x4e\xf8\xbe\x0c\xfb\x4f\x12\x5f\xe9\x9d\x87\xca\x86\xe7\x9f\x77\xea\x1f\x7b\x24\xfe\x27\x02\xfe\x15\xb1\xcb\xc9\xc8\xe1\xd7\xff\xe7\xef\x08\x32\x4d\xdd\xfc\xd7\xc5\xbe\x3a\x9d\x86\x5d\xdd\x65\xa7\x55\x9e\xd3\x98\xfb\x0a\x1a\x87\x42\xdb\xce\xeb\x2e\x2a\xb4\x9b\x3b\xde\xa0\x4a\xb0\x60\x0f\x4a\xf8\x32\x78\x36\x00\xe9\x17\x33\x46\x5f\xd2\x27\x5f\x7a\xa9\xcf\xc9\x9c\xf7\x5a\x9d\x4e\x8a\x75\x1f\x18\x19\x0f\x8a\xcd\x03\xf9\xd6\x08\xf3\x52\xcf\x93\x39\xfc\xb5\x7a\x1e\x59\x7c\x5c\xcf\x17\x83\xdc\xd9\x7b\xe9\x2e\xe5\xc4\x9b\xc9\x77\x86\xb4\xa3\xe8\x1b\x06\xa2\x93\x72\xc7\x0d\x10\x3c\xd6\x6b\xff\xf8\x48\x3f\x1c\xad\xe8\x3d\x20\xe6\xaf\x0c\x34\xcf\x41\x78\xac\x03\x3f\xe3\x78\x0e\xc4\x33\x69\xaf\x82\x71\xac\x11\x5d\x0d\xc6\xb1\x28\xfe\xc7\xd1\x6f\x9f\x4f\xde\x2e\xf8\xd4\x5b\xde\x7d\x52\xe1\xba\x56\xab\x93\x25\x80\x6b\xa6\x5d\xfb\x11\xff\x8d\x48\xed\x04\xc1\x42\xfa\x1f\x67\xc5\x3d\x71\x71\x76\xfa\x02\x96\x70\xe2\x4c\x9d\x57\x32\x8a\x37\xe7\x82\x3f\x73\x56\x75\x62\x9d\x27\xab\x2a\x57\xcd\xa2\x4e\xbf\x7f\xaf\x79\xd4\x09\xcf\xeb\xe7\x3f\x6f\x71\x3d\x74\xda\x19\xa7\x87\x9e\xfc\x9f\xcb\x8e\xf7\xac\xdc\x7b\xb5\x91\x9f\xae\xa1\x1d\xcc\x3c\x5c\x3f\x0b\xc5\x1f\x98\x6f\x9c\x1b\xe0\x7a\xbf\x4a\xf9\xea\x5b\x3e\xa4\xee\x40\x5e\x9c\xd8\x9f\x82\x73\xba\x6c\x77\x7d\xac\x3e\xd6\xce\xaf\x87\xe8\x07\xe3\xc3\x3c\xb1\xb9\x04\x4e\xb8\xf6\x65\xb0\x98\x5c\x8c\xee\xe1\x7a\x2a\xdf\x9d\xcf\x1d\x97\x58\xaf\xc7\xf3\x6c\xdd\xe2\x5a\x50\xcf\x56\x9c\xff\xf8\x10\x70\x0f\x5f\x7a\x0b\xbd\x07\x92\x4b\x40\x7c\xcc\xec\xce\x56\xb8\xaf\x01\x2a\xb9\x9b\x59\x2b\xcf\x7f\xa7\x18\x1a\x49\xda\x4d\xfb\x1d\xcc\xde\x66\x19\x7c\x9a\x5f\xae\xdc\x48\xd5\x9b\x91\x5c\xb9\x59\x39\x16\x15\xdb\x76\xb1\x95\x6a\x44\xfe\xf8\x92\x89\xd7\xab\xbd\x6c\xae\xa8\x25\x72\x7a\xba\x5c\x33\xe2\xdd\x62\xba\x54\x4e\x16\xd3\xf9\x56\xb9\xda\xd2\xb2\x3d\xbd\x5f\x4a\x37\xb2\x95\x72\x2b\x91\xaa\xd8\x8d\x0e\xae\x25\x70\xa5\xab\x65\xbf\x7c\x8d\x58\x87\x07\x3d\xfc\x1f\x03\xf0\x35\xa2\x7f\x8d\x80\xaf\x07\x94\x23\x5f\xbe\x7c\x8d\x7c\xb1\x6b\xb6\x6d\xdb\x7f\xff\xfd\x65\xff\x42\x7b\x7c\x77\xfc\xf7\x5f\x7f\xbd\xa7\x61\x42\xd3\x6b\x69\x2d\xdb\x4a\x99\x9a\x5d\xea\xb6\xd2\xad\xac\x6e\xf7\xf2\x76\xb7\x9b\xe9\x76\xdb\x5a\x3b\xdb\xed\xf5\xea\x28\xd5\xeb\xa6\x9a\xd5\x42\xb2\xdb\x6f\xd8\x1d\x84\xbb\x15\xe3\xa8\xa1\xb6\xd3\x8c\x98\xc4\xb2\x74\x03\xed\x7e\x3e\xaa\x28\x32\xed\xad\x48\x98\x23\xa6\x81\xbf\xcf\xd4\x85\x5f\x23\xe8\x1a\x75\x6d\xb3\x13\xaf\xf6\x6c\xb3\x67\x74\xec\x54\xb6\xdb\xa9\x6b\xad\x42\x45\x6b\x55\x8c\x78\x2b\x93\x6d\xd5\xb0\x91\x6a\x55\x0b\x95\xb2\x56\xcb\xb6\x8d\x4e\x3d\x5b\xc9\xd5\xcb\x85\x42\x56\x3b\x01\xf4\x44\x5d\xd3\x42\x27\xa8\x5d\x40\x94\x5c\x85\x68\xb7\x90\x41\xf5\xb2\x51\x29\xe7\x52\xd5\x44\xa9\x9c\x8e\x63\x5d\xb3\x0d\x1d\xf5\xcd\x6a\x39\xd9\xa8\x17\x33\x9d\x02\xce\xc4\x8b\x89\x52\xad\x98\x4b\x57\x8c\x06\x4e\xf5\x3a\xed\xd6\x51\x45\xe3\x99\x8a\xe4\x0d\x15\xb5\xd7\x54\x7c\xc5\xcc\xcf\xcb\x83\x37\x78\xcc\xeb\x45\xbf\xcf\xba\xcd\xf3\xc2\xdf\x13\x8e\x48\x17\x16\x51\xa6\x8e\xa4\x44\x44\x40\xa6\x61\x66\x32\x62\x29\x4d\xa7\xca\xd4\x21\x64\xd8\x44\x16\xd5\x0c\x45\x15\x34\x80\x4e\x05\x60\xa6\xc6\x90\xae\x33\x80\x99\xb4\xac\x1d\x54\xe0\xc6\x67\xc7\xc3\xc4\x1a\xd5\xa4\xae\x29\xa5\x19\x84\x02\xcc\x80\xc4\x40\x09\xa8\x90\xd0\x21\xe1\x50\x51\x2e\x34\xc0\x10\xe7\x80\x70\x5d\x17\x26\xc6\xa6\x66\x5a\x04\x11\xa8\x99\x14\xa2\x83\xb1\x83\x5d\xc7\xfd\xb2\x4f\xbc\x5b\x70\x8c\x4d\x6c\xd3\x28\xc4\x71\xd2\x4d\x5a\x59\x0d\xac\x47\xf1\x68\x00\x06\x61\xb0\xca\xad\xb6\xb0\x2b\x1a\x9d\x1e\x8d\xe7\x69\x7a\xb0\xa3\x4f\x95\x8d\x22\xdd\xce\xb4\xda\xbb\x9c\xfb\x76\x17\x1a\x7b\xb2\xf8\xf8\x07\x34\xe4\xae\xcf\x97\x33\x5f\x7f\xc5\x50\x0d\x6a\x30\xa8\x94\x06\x0d\x6e\x10\x88\x95\xd0\x80\x4e\x88\xa9\x84\x32\x4d\x83\x13\x41\x90\x86\xa1\x40\x98\x10\x04\x91\x89\x4c\x68\x02\x81\x85\xc4\x26\x42\x4a\x28\x8b\xec\x8c\xec\x1e\xc6\x2e\x85\x6e\x61\xa6\x21\x0b\x42\xcd\xe4\xdc\xc2\x16\x35\x31\x43\xc8\xa0\x10\xe9\x5c\x42\x43\x1a\x42\x93\x42\x99\xd8\xb4\x28\x20\x16\xc4\x12\x9b\x8c\x12\x4b\x70\xcc\x95\xd4\x0f\xf1\x04\x9a\xa6\x65\x62\x8b\x20\xf8\x60\xb1\x09\xad\xda\x1f\xc1\xf2\xc2\xf4\x00\xcb\xe3\x8e\xe1\x6e\x2a\xcb\xd6\x3a\xa3\xb7\x67\xde\x38\xba\x4c\xdb\x95\x30\x01\x0b\x5a\x09\xc7\x31\xea\x5b\x99\x55\x17\xcd\x49\x61\xbb\xee\xb6\x92\x63\x3d\xaa\x44\x3f\x18\x06\xdb\x44\xa7\xab\x35\xd8\xc2\x76\xf3\xe5\x92\xd3\xf0\x16\x93\xc4\xe6\x60\x0c\xdd\x6a\xbb\x04\xf7\xd6\x91\x7b\xfa\xe7\x10\xde\x82\xe3\xef\x2b\xbb\x5a\x7b\xb0\x9d\xc4\x66\x1c\x93\x9d\x8c\xdd\x4b\x55\x86\x13\x6a\x35\xd6\xfd\xec\x68\xbc\xce\xe4\x6a\x08\xce\x72\xce\x68\x5a\x4d\x5b\xc3\xf6\x0c\xd6\x96\xa6\x1f\x56\xba\xc1\x74\x5c\xcf\xf3\x4d\x77\x3a\x91\x5a\xbf\x90\x1e\x6d\x87\xcd\x31\x58\xe8\x50\xf4\xbd\xdc\xb8\x6b\xc2\x85\xe7\x72\x55\x35\x56\x7b\x51\x17\x2c\x3a\x15\x5c\xb2\x8a\x47\x8b\x4e\x82\xfc\x77\xb4\xbd\xef\xf3\x7c\xd0\xa2\xb1\x2e\x0c\x53\x00\x2c\x99\x32\xb0\xce\x85\x4e\x11\x95\x9a\x30\x28\x45\xca\x44\x02\x40\x85\x94\x12\x18\x0a\xcc\x74\x13\x61\xaa\x84\x09\x10\x60\x48\x71\x4b\x83\xcc\xdc\x85\xbc\xbb\x78\x05\x37\x85\x69\x98\x4a\x31\x83\x9a\x4c\xe9\x48\xe3\x86\x45\x74\xc8\x09\xa3\x3a\x55\x80\x31\x1d\x70\x8a\x74\x46\x2d\x44\x2c\xdd\xd2\x39\x85\x54\x40\x26\x98\x44\xc8\xb2\xa8\xf8\xb2\x4f\x99\x9e\x2c\x5a\x7b\xb4\x68\x30\x2b\x66\x63\xb9\xd6\x3a\x97\x8e\x81\x41\x25\xd7\xb5\xac\x76\x33\xa7\x65\xf2\x6b\xe8\x3a\x99\xf4\xa4\x5f\x6f\x95\x95\x14\x1d\x66\x45\x7b\x4e\x30\x8b\xd7\x7b\xa3\xfc\x78\x34\xf3\x27\x6e\xaa\x5b\x93\xb5\xa1\xb6\xee\xaf\x67\xc6\x68\xd9\x99\x44\x47\xf3\xee\x76\xbd\x5c\x96\xdd\x76\xff\xc4\xa2\x07\x47\xc4\xb7\x86\x04\xbc\xec\x35\xfc\xc6\x26\xcf\x9c\x96\xb5\x5a\x19\xdd\x6d\x2c\x61\x60\x37\xdd\xce\xc5\x53\xd9\x71\x1d\xc2\xf1\x60\x2a\xa3\xc9\x6d\x1a\xb6\xba\xb1\xb0\xd0\x89\x95\x9d\x75\x7e\xb0\xde\x14\x3d\x44\x8a\xc1\x6a\x33\xad\xa0\x89\x37\x36\xab\x85\x4c\x98\xb1\x96\x53\xda\xf7\xf7\x46\x5a\xba\x60\xb1\xf9\xde\xa5\x5e\xff\x2f\xb0\x58\x0e\x90\xa0\x00\x0b\x4b\x28\x1d\x02\xc6\x95\x32\x00\x33\xf7\xa3\xb4\x6e\xea\x10\x99\x1a\xa2\xba\x49\xa9\x06\x31\xc0\x1a\x10\x58\x62\xaa\x10\x85\x9a\x0e\x19\x27\x64\x1f\x3f\xef\x61\xf5\xdc\x62\x1c\xa2\x9d\x29\x02\x4d\x53\x50\x68\x1a\xd3\x18\x33\x29\x94\x42\x49\x05\x2c\x46\xa8\xe2\xcc\xe2\x98\x5b\xd0\x34\x29\x16\xa6\x50\x08\x13\x69\xea\x5c\x98\xca\xda\xa5\xd5\xc6\x89\xc5\xea\x8f\x16\xab\x57\x5b\xdd\x9a\xf2\xad\x6c\xb5\x5c\x99\xfa\x0e\x2c\xcc\x96\xed\xd0\x56\x2c\x06\xd7\x10\x97\x3b\x72\x09\x6b\xbd\x22\x23\xe3\x61\xd8\xf1\xa3\x7c\x08\x98\x3b\x2b\x17\xb3\x35\xcb\x2b\x0c\x07\xa4\x92\xb4\x7b\x39\x68\x96\xdc\x05\xaf\xc7\xad\x79\xb7\xb4\x0e\xb3\xcb\x58\x14\xf0\x3d\xc2\x7b\x8b\x5d\x1d\x11\x1f\x18\x2c\x59\x9c\x56\xe3\xb8\xbc\xa8\x0e\x79\xba\xbc\xd0\xf5\xea\xa8\xe1\x8d\x78\xd5\xd3\x9a\x59\xb0\x36\x37\xc5\xe2\x82\x0d\xe7\xa5\xd1\x36\xbf\x8c\xf7\xcc\xaa\x9d\x5b\x66\x1b\x85\xcd\x02\x7a\x52\x8f\x92\xfc\xa2\x10\x2b\xba\xd9\x4d\xbd\x4d\x55\x5b\xd7\x64\xa5\x45\xa0\x9a\xec\x39\xd7\x2e\x58\x6c\xd5\xbb\xd4\xeb\xff\x05\x16\xab\xeb\xba\xe4\xa6\xb4\xa4\xb0\x00\x63\x92\x31\xd3\x82\x58\xb3\x34\x0b\x30\xcd\x80\x8c\x22\x53\x07\x42\x28\x4b\x21\x08\x34\x09\x34\xc3\x50\x9a\x34\x19\xb6\xb0\x06\x39\x94\xc6\xde\xda\xee\x60\xf5\x48\x62\x03\x52\x9d\x10\x2c\x10\xc1\x18\x09\x2c\x90\x29\x74\xa6\x01\xcd\x34\x20\x23\x0a\x50\x68\xea\x4c\x27\xba\xd0\x4d\x4b\xa7\x10\x9a\x52\x2a\x61\x6a\xc2\xc4\x26\x90\xf8\xcb\xd7\x88\x79\x62\xb1\xc6\xa3\xc5\x92\x78\xe8\x65\x35\x41\xd6\x09\x12\x03\x09\x18\xab\x5b\xa3\xd6\xba\xdf\xa0\xa3\xce\x3c\xd7\x8d\x6f\x92\x5d\x13\x59\xf3\xd4\x28\x43\x9c\x3c\x2b\x94\x0a\x8c\x10\x56\x58\x8d\xd2\xb1\x50\x99\x71\x2d\xf0\xe7\x93\x38\xec\x84\x70\x35\x9b\x97\xfa\xc9\x75\xd2\x74\x13\x60\x1b\x6a\x87\xf0\xb6\xb3\x58\xed\x14\xf2\x65\xb3\xe3\xd9\x7d\x12\x30\xdd\xb5\xb6\xb9\x72\xd2\x86\xc3\xb9\x47\xdd\xae\x53\x8e\xcd\xf8\x48\xa5\xc2\x42\x31\xe9\x6f\xdb\xcb\x64\x79\x21\xe2\x9e\x31\xb4\x68\x56\x84\xaa\xd3\x6d\x06\x83\x7c\x2b\x63\x90\x42\xba\x19\x18\xd5\x66\xa7\xe9\xd5\xbb\x8b\x58\xbb\xd8\x16\x35\x73\xef\x11\xad\x0b\x16\xdb\x6a\x5d\xea\xf5\xff\x02\x8b\x95\x06\xe1\x4a\x53\x52\x98\x52\x67\x44\x31\x85\xa1\x49\x0d\x48\x94\xa5\x29\x02\x19\xd9\x8d\xcf\x84\x00\x43\x71\x5d\x37\x24\xd6\x85\x69\x31\x4b\x72\xcb\x92\xd2\xd0\x34\x83\xed\xac\xed\x2e\x56\xcf\xb0\x84\x8c\x50\xc1\x89\x41\x38\x03\x1a\xb3\x4c\x0d\xed\x6c\x11\x11\x60\x31\xc6\x29\x34\xb0\x41\x20\x44\x82\x12\x8b\x2a\x81\x90\x25\x35\x6e\x18\xba\xb0\xa0\xa9\xb4\x2f\xfb\xd2\xc3\x93\xc5\x9a\x8f\x16\xbb\x2d\x45\xd7\xd2\x0d\xc7\xc5\xa5\x1f\xf6\xd3\x9b\xf1\x2c\xb1\xa9\xb1\xd9\xa4\x54\xd6\x4d\x6b\x68\x2f\xec\x71\x73\x63\x32\xd9\xcd\xf1\xec\x78\x41\xfa\x2a\xe6\x24\x83\xe9\xac\x15\x6e\x87\xad\xe1\x32\x31\xee\xaf\x33\xcb\xc9\x30\x6e\x92\x4e\xbe\xd2\x08\xa3\x9d\x7e\xc1\x45\x72\x9d\x38\x18\xcb\xde\x62\x4f\x8c\xa8\xcb\x8a\xfe\x76\xb4\x19\x72\x60\x55\x4a\x75\xe0\x67\x72\xbc\x3c\xc9\x6f\x96\x2d\x1a\x98\x58\x1a\x5b\xad\x39\x6b\x0d\x27\xc5\x55\x26\xee\x17\xd5\x70\x51\xd0\x73\x8d\x52\x90\x58\xb4\xf3\x93\xa4\x4b\xed\x29\xde\x0c\xeb\x60\x50\x67\xd4\xa1\xb1\x0e\x95\xc5\x54\xcd\x1a\xb7\xc9\x3e\xeb\xe8\x5d\xb0\x58\x7a\x71\xe6\xf5\x5f\x60\xb1\xd0\xb0\xa0\x52\x88\x33\xc5\x30\xd0\x98\x4e\x00\x04\x4c\x2a\x86\x14\xa0\xc8\x10\x48\xd3\x2c\x9d\x71\x00\x4c\xdd\xe2\x9a\x41\x8d\xdd\x2c\x08\xe8\x64\x67\xd9\xd8\xd0\xf5\xc3\xac\xea\x0e\x56\x6f\x32\x93\x70\x8d\x71\xcd\x52\x12\x31\xca\xa1\x00\xdc\x44\x42\x52\x02\x14\x32\xa4\x29\x19\x50\x9a\x02\x54\x53\x96\x92\x1a\x10\x40\x12\x9d\x1a\x84\xe8\xa6\x8e\x80\x01\x76\x79\x2c\x3e\xb1\x58\xf4\x68\xb1\xd1\x46\x89\x2c\x71\xdb\xc0\x23\x6c\xad\x3b\xe3\x4c\x75\xbc\x1c\xb0\x81\x56\xcc\x0d\x52\xb1\x64\xb9\xc2\xad\x0e\x73\x83\xa9\x34\x13\xf9\x62\x13\xe8\x95\x81\x13\x8f\x45\x4b\x95\x64\xbd\x5a\x53\x54\x95\xac\xd5\x5a\x9b\x19\xc0\x9a\xd5\x96\xb4\x2f\x46\xb0\x82\x70\xa3\x57\xc8\x83\x3d\xc2\x7b\x8b\x3d\xc9\x63\x67\xa8\x6b\x50\xa8\x6a\x9d\x7a\x19\xaa\xa6\x51\x19\x52\xd8\xb6\x99\xa6\xfb\xeb\x91\x4f\xd5\x70\x10\x23\xab\xd1\xa6\x52\xad\x19\x72\xc2\x3a\xa5\x02\x29\x28\xd3\xf7\xe3\xa0\xd4\x0e\x91\x97\xd4\x9a\x13\xb4\x22\xcb\x84\x13\x55\xc6\x00\x54\x06\xa8\x91\x2b\x37\x7b\xa9\xe4\xde\x48\xf9\x05\x8b\x15\xff\xad\x33\x2f\x06\x35\x64\x62\x48\x15\xa7\x0a\x5a\x70\x37\x7f\xd7\x2d\x65\x4a\x46\x09\x81\x52\x33\x2d\x1d\x08\xcb\xa0\x3a\xa6\xa6\x46\xa0\x29\x18\x84\x04\x5a\x9c\x0b\x53\x37\x00\xdb\xe7\x8f\x77\xb1\x7a\x4d\x03\x90\x63\x88\x81\xa9\x19\x4a\x62\x65\x00\x8a\x85\x0e\x21\x82\x3a\x83\xd4\x30\x0c\x6c\x5a\xd2\x24\x88\x12\xa9\x69\x80\x30\xc0\x4d\x89\x11\x43\xa6\x81\x2c\x0c\x34\xfe\x65\x5f\x98\x7c\xb2\x58\xfc\x68\xb1\xeb\x46\x3d\xa6\x6d\x62\xe1\xaa\xb0\x1d\xc4\x13\xd1\xa8\x46\x0a\xfd\x72\xcf\x19\xe3\x95\xdd\x32\x57\xe3\x59\x29\x36\xb2\x8d\xa5\x9e\x06\x25\xb3\xe1\xbb\xdd\xa2\x56\x25\x93\xc6\xd6\x43\xb9\x76\x72\x1a\xf6\x60\x3f\xc5\xf2\xf1\x66\x2c\x39\x2e\xb4\xdb\x49\x94\xb7\x45\x1f\x0c\xf9\xf4\x60\x2c\x7b\x8b\x3d\xc9\x63\xfb\x6b\x51\xeb\xa8\x64\x09\x96\xd6\xad\xc4\x42\x2a\x7b\x2a\x63\x6c\x34\xaf\xf0\xe1\x76\x9c\x4d\x11\xab\x12\x34\xc7\x9e\x89\xb8\x91\x33\x1c\x9b\xf3\x6e\xbc\xd1\x8c\x2a\x90\x70\xc1\x3a\x3d\xac\xac\x67\xa9\x3a\x74\x27\xc3\xf9\xc8\xc9\xe5\x82\x52\xd7\xa5\x5a\x3b\x33\x59\x15\xf7\x9c\x07\x17\x2c\x76\x00\x2e\xf5\xfa\xff\xed\x16\xfb\x4a\xed\xf7\xc2\xbe\xc1\x1b\x2a\xc9\x2f\x37\x9d\xdd\xc2\xec\xb5\x5d\x51\xb7\xf1\x3c\xdf\xd8\x74\x03\xb7\x57\xb6\x25\xdd\xc0\xf1\x95\x0d\x44\x9f\xad\xc3\x9f\x6c\x22\x3a\x59\xc0\x2a\xe8\x8d\x64\x26\x51\xc8\x74\x33\xcd\x72\xd2\x2e\x27\xea\xed\x4a\xb1\x54\x68\x54\xd2\xba\x19\xc7\xb9\x62\x3e\x5b\x6c\x97\xda\x5a\x36\x93\xd0\x8c\x82\x99\x6c\xa6\xaa\xb9\x54\x47\xcf\x1f\x6a\x89\x87\x8a\x77\xba\x63\x26\x4b\xf9\xba\x3f\x9d\x82\x5e\xc6\x19\xb5\x37\x5a\x61\x6c\x2c\xa2\xc3\xe8\x90\x36\xfd\xf9\x66\x31\x9a\xa6\x7d\x67\x31\x9a\xd6\x51\xe2\x68\x7d\xfb\xd2\xdf\xe1\xf7\x9d\x9b\xa5\x46\x80\xc5\xd5\xdc\x5f\xf9\xd5\xb4\xa8\x55\x9a\xca\xb4\xe0\x74\x93\xd9\x96\xf2\x7d\xd5\xcf\x2f\x8c\x04\x1f\x83\x4d\xbe\xe0\xa0\x56\xb1\x70\xa8\x36\x73\xab\xb5\x6d\xef\xbf\x3f\x78\xfa\x27\xbe\x67\xba\x7a\xfa\x3d\x69\xdb\x56\xe2\xc4\x8b\xe3\xfd\x46\x07\xa7\x78\x77\x93\xe7\x7a\x6e\xde\xa7\xce\x5a\x15\x9a\xa3\x60\xdc\xf2\x8d\x62\xc6\xb5\x96\xca\x72\x0a\x19\xc2\xa4\xa9\xb5\x82\x3d\xbb\x89\x95\xad\xce\x32\x04\x75\xbd\x04\x6d\x35\xb6\x41\x74\x44\x26\xd1\x94\x48\xe6\xea\xab\x68\xb4\x1b\x43\xc9\x61\x58\xd5\xe6\x89\x15\x34\x9d\xe5\x04\x8c\x5a\x06\x10\x05\x3c\xef\x80\x42\x26\xba\xd2\xa2\xb3\xe9\xac\xad\x98\x3d\x80\x2d\x7d\x08\xba\xa4\xda\xd3\x63\x8b\x96\x8c\x0f\xfe\xfe\xfb\x7c\xb4\xb8\x73\xd7\xe8\x37\x75\x4d\xe9\x79\xd7\x24\x13\x85\xcd\x38\x9d\x76\x4a\xd3\x46\x65\xba\xe8\xf0\xba\x88\x9b\x89\x2c\x4b\xbb\x19\xd7\xa9\x58\xb4\x8b\x2a\x33\x55\xcd\x44\x89\x26\xda\xd3\x63\xd7\x74\xce\xa2\xcd\xa7\xa1\x07\xed\xb2\xbf\xed\x2d\x96\x6e\x69\xe5\xd4\x3b\x05\xcb\x18\x64\xa7\x61\xdd\x62\xbd\x3c\x5b\x24\x6a\x18\x82\xb9\x8e\xb3\x96\x5c\xc7\x17\x59\xd3\x58\x54\xe2\x9b\x59\x03\xa5\x96\xee\x6a\x35\x30\x2b\xab\x74\xd6\x59\x81\x34\x5b\xb9\x51\xa3\xdc\x76\x68\xbb\x53\x33\x8a\x69\x2c\x92\xb5\xef\x0e\xbd\x71\x13\xf4\xb5\x73\xe8\x97\xe6\x26\x5b\x5f\xc8\x31\x08\x44\x52\x1b\xcc\x33\xc9\x95\x51\x8a\x0f\x46\xdd\xf1\x46\x9a\xeb\x54\x56\x9b\xf1\x6d\x06\xc8\x58\x0c\xd7\x57\x4f\xd0\x77\x6f\x85\x3e\x68\x46\x8b\x7a\x6c\x39\x4b\x73\xdf\x71\x17\x66\x07\x57\x78\xce\xa1\x71\xb3\xa5\x75\xaa\x84\xc3\xbc\x1a\xe9\xda\xac\x9e\x2f\xf2\xdc\xc8\x5e\xb7\xf4\x69\x71\x93\xe8\x65\x35\xe9\xa3\x2d\x5d\xad\xb3\xab\x4a\xb4\xa9\xd7\x56\x85\x74\xd0\xed\x62\x34\xcf\xa4\xa6\x79\x3f\x66\x27\xbe\x3f\xf4\xe6\x4d\xd0\xb7\xce\xa0\x8f\x07\xde\x6a\xb6\xdc\xae\x03\x3f\x51\xea\x46\x61\x6c\x9c\xa5\x9b\xc5\xbc\x95\x6d\x53\xbd\x9b\x98\x7a\xeb\x71\x35\x55\x9d\xf2\x62\xb3\xa2\xf7\x07\x4f\xd0\x9f\x97\x67\x3f\x0d\x7d\xa1\xd5\xce\x6f\x66\x5d\xb7\x8e\xb5\x5e\xb7\xbc\xec\xa4\x45\xac\x66\x43\xab\xe2\x4c\xb6\xb3\xea\xc6\x19\xbb\x24\xcc\x14\xf5\x64\x7a\x9c\x4b\xb4\x37\xb9\x8c\xce\x81\x67\x90\x6e\x45\xcb\x57\x68\xb5\x65\xc5\x69\x5c\x7a\x0b\xd8\xcd\xf4\xac\x7e\x1e\x90\x74\x26\x9f\x6e\x4e\xfc\x84\xfd\xdd\xa1\x47\x37\x41\xdf\x3b\xb7\x7a\xbc\x9e\xc4\xa2\xb9\xca\x86\x4e\x1a\x7a\x25\xdd\xc8\x90\x42\xdd\x4d\xd1\x68\x27\x95\x75\xd7\xbd\xb1\x59\xd0\xcd\xfe\x78\xae\x66\x38\x95\x3f\x06\x9c\xfe\xad\xd0\x7b\xcd\xf5\xa0\xa5\xcc\x7c\x47\x45\xc7\xc3\xf4\x02\x75\x52\xd0\x75\x07\x8b\x4a\x2a\xb5\x18\xce\x56\xe9\xcc\x32\xa3\xf5\x97\x73\xb4\xdd\x0c\x3b\xa3\xad\x51\x41\x09\x3b\x57\x0a\x12\x68\x8c\x31\x1a\xcc\x63\x50\xab\x8e\x82\x74\x3b\xd6\x72\xea\x33\x2b\x97\x48\x82\xce\x96\xae\x66\x3f\x00\x7a\x7c\x13\xf4\xfc\xdc\xea\xab\x4d\x8e\x12\xb9\x6c\xcc\x58\x19\xe5\x94\x15\xb7\x66\x64\xab\x27\xb3\xd4\x1d\x35\xb5\x49\xc2\x9a\x4d\xb4\x6a\x0b\xfb\x61\x7e\xe0\xb9\x47\xe8\xe9\xcd\xb1\xbe\x9f\xb4\x79\x35\xcc\xf6\x9b\xcb\x12\x6c\x36\xf3\x1e\x6d\xc7\xac\xcd\x1c\xae\x27\x53\x58\xed\x35\x50\x73\x53\x2b\x64\xb0\xda\x66\x4d\xb3\xc9\x17\x4b\xc1\x93\xcd\x35\x5b\xf9\x4b\xb3\x33\x83\x8b\xee\xd8\xb0\x17\xbd\x69\xb6\xe2\xf0\x58\x7d\xc2\xe7\x33\xa7\x03\x92\x33\x61\x7f\xff\x61\x96\xdc\x04\xfd\xe0\x1c\xfa\xc2\x42\xf0\x65\x2f\xb6\x69\x17\x2b\x23\x6f\xd8\xaa\x50\x38\x6a\x8f\xeb\xc1\x38\x5d\x25\x95\xda\xac\xdd\xaa\x7a\xe3\x38\x74\x1b\xe9\x0d\x3d\xc6\x7a\x76\x2b\xf4\xb3\x5c\xb2\xd3\x11\x71\x38\x01\x89\x94\xaa\x54\x2b\xeb\x6c\x52\x83\x4e\x47\xcf\xc8\xf9\x2a\xe9\x84\xa5\x2d\xe9\x76\x96\x63\x3f\x91\x5a\x6f\x58\x63\x13\x0c\x3b\xb3\x38\x59\xf5\x92\xe3\x96\x2c\xa1\x79\xad\xb6\xcd\x8f\x63\xb2\xdb\x2e\xba\xaa\xbe\x25\x9d\xf8\xa6\xb3\x2a\xa6\xba\xf1\xd5\x01\xfa\xd7\xd3\xd9\x4b\x9b\xc1\xaf\x48\x67\x1f\x37\x84\x1f\xf7\x94\x98\x9a\xc9\x18\x84\xdc\x54\x9a\x66\x72\xc1\x09\xb5\x2c\x44\xb5\xdd\xef\x96\x4e\x24\xb7\xa0\x41\x15\xd1\x38\xb2\x94\x60\x42\x29\xa4\x69\x04\x29\xc8\x30\xb3\x84\xb5\x2f\xe6\x3c\x75\xea\xde\xbe\x53\xfb\x1f\xdb\x6c\x5c\x5a\x4d\x32\x0b\xca\xea\x03\x5a\x28\x77\x8b\xbd\x79\xd3\xc1\x28\x8b\xd2\xb3\xca\xc2\x2f\xa0\x4a\xaf\xb3\x28\xa0\x4a\x3f\xeb\x1d\x56\xb4\xdf\x6a\xfc\xe5\xcd\xdf\x9f\x6e\xfc\x71\x03\xf8\x53\xe3\x1f\x6f\x5c\x3a\x1c\xe7\x8e\xbc\x78\xf6\xb5\x03\xf0\xc2\x27\x2e\x70\x72\x65\xb8\xf2\xfc\xf1\x8c\x06\xc1\x6c\xe8\xd3\x40\x5e\xe0\xd4\x94\x41\x18\x69\x24\xd3\x91\xf2\x81\x38\xf2\x57\xa4\x21\x67\xa1\x9c\x32\xe9\x47\x34\x00\xcd\x8f\x08\x52\x9e\xcf\x65\xc0\x67\x9e\xeb\xca\x75\x38\xa1\x0b\x97\x0f\xcf\x05\xed\x8f\x4b\x7f\x84\xd9\xa1\xe4\xf2\xb0\x1d\x31\xb8\xdc\xfe\xff\x7f\xbf\x7b\xf1\xf7\xd0\x99\xca\xdf\xff\x8c\x80\xc3\x66\xc6\xdf\x1f\xee\x9f\xfa\xfd\xcf\xc8\xe1\xfd\xfe\xc3\x21\x0d\x7e\xff\xf3\x70\x58\x7b\xff\xe1\xff\x3e\x10\x2b\x29\x3f\x46\x38\xa5\xeb\x70\x1d\x38\xdb\x0f\x92\xfb\x32\x90\xfe\xf2\x3d\xe2\xdf\xfe\xf7\x43\x50\xd0\x20\xdc\x9f\x32\x14\x0f\xa7\x7d\x2e\x40\x71\x73\x29\xea\x03\x7a\x3c\x44\x5b\xea\xf3\xa1\xb3\x7c\x78\xf9\x4a\x97\x1c\xfb\x00\x3e\x00\xb2\x87\xc3\xff\xfd\xcf\xc8\xef\x4b\x08\xff\x03\xff\x03\x7e\x7f\x78\xc1\x17\xbe\x2f\xdd\xb0\xb8\x6f\xda\xef\x7f\x46\xc8\xf3\xcf\xe3\xfb\x83\xa0\x3b\xdc\xfe\xdf\x27\x20\x8f\x90\x3e\x51\xee\x18\x03\x29\x4d\x13\x59\x96\x61\x42\x8d\x51\xc8\x25\x17\x44\x03\x88\x62\x1d\x2b\xdd\x14\x94\x83\x5d\xab\x29\x57\x96\x22\x52\x63\x16\x84\x48\xea\x8a\x11\x4e\xa5\x69\x12\xcc\x1f\x34\x7a\xe2\xbb\xb3\xe2\x67\x1d\xf8\xf4\x66\xdf\xf6\x9d\xc9\x3d\x7b\xf5\xbf\x67\xdf\x0f\x5c\x3a\xdb\xeb\x85\x0d\xc2\x35\x44\x0c\xa5\xa8\x49\x0d\x4e\x99\x01\x15\x46\x92\x70\x8a\x74\xa4\x23\x20\x0d\xac\x2b\x86\x74\x02\x85\x29\x81\x65\x68\x80\x33\x53\x87\x1a\xa2\x98\x30\x65\xfd\xfe\xdb\x05\x09\xaf\x60\x20\x21\xa3\x90\x98\x50\x57\x42\x03\x44\x37\x99\xe2\x8c\x21\xd3\x42\x82\x42\x66\x12\x9d\x6a\x44\xc3\xc4\x44\x16\x13\x86\xa4\x18\x99\x8c\x49\x02\x25\x00\x86\x65\x98\x9a\xd2\x3f\x8f\x01\xfc\xfa\xf2\x9d\xb7\x08\x67\x8b\xdd\xf7\x7e\x17\x0c\x6b\xc8\xe0\x44\x98\x06\x47\x94\x63\xca\x4d\x69\x02\xaa\x19\x3a\x11\x82\x2b\x49\x35\x5d\x30\x13\x72\x40\x35\x89\xa0\xe0\x4a\x71\xa4\x31\x84\x24\x45\x50\xe9\xc0\xf8\xfd\x43\x08\x23\x4c\xa1\xb9\x33\x65\x28\x75\x04\x98\x20\x3a\x65\x42\x0a\xae\x4b\x29\x0c\x0b\x4b\x24\x80\xa4\x0a\x72\x5d\x58\x40\x02\x0b\x31\x41\x35\x81\x04\xc3\x0c\x2b\x08\x28\xf8\x0c\xc2\xb7\xee\x52\xbc\x37\xc2\xf7\x6b\xfb\x5b\x08\xdf\xdc\xea\x5f\x1a\xe1\x0f\xfa\xf1\x3f\x18\xfc\x83\xc1\x3f\x18\xfc\x83\xc1\x3f\x18\xfc\x83\xc1\x3f\x18\x7c\x2f\x0c\xf6\x3f\xfd\x7f\x1f\x9f\x1a\x05\x7c\xb6\xbf\xcc\xe5\xd2\xf3\xe5\x71\xfe\x7f\xa8\xea\xb6\x17\xb5\x6d\xa2\x45\xcd\x59\x98\x89\x7b\x1e\xe4\xa1\x33\xab\x14\x97\x9e\xf2\x3a\x63\x84\x02\xdf\x33\x87\xd4\xf0\x3d\x73\x2c\x07\xc7\xcd\x14\x87\x22\x80\xd6\x9a\x2c\xd6\xd9\x34\x71\xba\xe5\x4d\x61\xda\xf1\x82\xee\x66\x6c\xe0\x7c\xba\x10\x4d\xac\x67\x31\xa6\xc7\x7a\xce\x70\x99\xd1\x17\x62\x52\x7c\xaa\x37\xe4\x87\x37\x16\x81\xae\xa9\x57\xc4\xed\x61\x3a\xd9\x6d\x2c\x33\xf5\x41\x32\x89\xe8\x90\x27\x17\xb9\xd9\xb8\x3d\x82\xf3\x56\xda\xa8\xd4\x9b\x5e\x6a\x52\xdb\x86\xdd\x6d\xc3\x5f\xcc\x54\x1e\xda\x7d\x85\xa6\xbc\x30\x8a\x8b\x1c\xa8\xa1\xf4\xb6\x36\x1e\xf2\xfc\x30\xb3\x09\xbb\x7a\xd1\x9b\x8f\xc3\x95\x2e\xe9\x7c\x1c\xc4\x9f\xf4\x49\xf4\x6e\x5c\xea\x8f\x5f\x53\x64\xab\xe5\xea\x35\x08\xfc\xf5\xb8\x67\xaf\xa2\x53\xaf\x6b\x63\xa7\xd0\x4f\xf7\x2c\x3a\x89\xcb\xe4\xb8\x85\xe2\xf9\x56\x09\xb7\x09\x98\xe3\x79\x77\x23\x6a\x99\x6e\x74\xe6\x26\xbc\x55\xb7\x51\x4e\x55\x86\x1c\xe4\x73\xdd\x44\xaf\x1e\xf8\x6d\x6b\x83\x0a\xb3\x22\x2f\xeb\xd3\xf9\xac\xb8\xef\xa4\xb8\x4d\xb3\xe9\xae\x63\xa0\xbe\x37\x6d\x77\x9d\x61\x69\x59\xb3\x96\xdd\x46\x32\xa5\xa9\x79\x72\xd1\x1d\xb0\xb1\x9c\xe4\x0b\xf3\x5c\x54\xaf\x2f\xf5\x86\xa8\xa0\x05\xaf\x6a\xdb\x95\xbf\xf2\xfc\x22\xaf\x18\x5a\xa3\xc2\x57\xf3\x4a\x50\xad\xd7\x07\xc1\x22\x1a\xb3\xa0\xbb\x1d\x75\x47\x87\x7a\xd3\x0d\xf6\x36\x78\xea\xff\xd5\xad\x5b\x2b\xf6\x4f\xe7\x73\xf6\x5b\x2b\x34\x6c\x38\x71\x6b\xa2\x2f\xec\x61\x76\x3b\xda\x06\xf5\x95\x15\xf6\x26\xe1\xda\x9d\x07\xb6\xe1\x37\x4b\xb1\x34\xf4\xcd\xc2\x6a\x58\xe2\x53\x10\xf8\xb9\xf6\xbc\xa6\x4a\x19\xdb\x4c\x67\x47\x95\xf9\x38\x55\xda\xf4\xab\xfa\x04\x36\x4c\x1d\xf0\x58\x7a\xc0\x27\x41\x62\x9d\x5e\xed\xeb\xd3\xf1\x74\x3e\x1b\x63\xc5\xa8\x6e\xfb\x15\x3b\x55\xc4\x98\xad\x66\x63\xe8\x14\x9a\x4b\x3b\xde\x74\x13\x8d\xf1\x36\x5a\x4a\x3a\x31\xde\xad\x6d\xc7\xc7\xa2\x6a\x32\x1c\x2c\x57\xc9\x45\xa5\x63\xd7\x2c\x5c\x87\xf5\x66\xd8\x12\xab\x72\x32\x3b\x4b\xc6\x12\x2d\x39\xdb\x8a\x5a\xb5\x3b\xf1\x5c\xee\x14\xf7\xcb\xc8\xfd\x23\x94\xa9\xf3\xa2\xea\xb9\x7f\xf9\xf3\x32\x2a\xca\x0a\x1d\x8c\xd6\x25\xda\xaa\x5a\x28\xbe\x55\x81\x25\x01\xf7\xfc\x72\xbf\xbb\x8d\x77\xf2\xe3\xb4\x57\xc0\xe3\xe5\xf8\xe0\xa4\xed\x76\xf9\x81\x67\x02\x2e\xe2\x22\xeb\x35\x17\x83\xd2\xb2\x16\x26\x71\x7c\x98\x2b\xea\x65\x69\x89\x76\x55\x65\x72\xd1\xbc\x63\xe6\x97\xad\x4a\xb4\x6f\x87\xf8\xa1\x3f\xb6\x16\x3e\x59\x55\x4a\x3d\xe9\x5b\xcb\xb2\xe9\xa4\x3a\x6f\xf3\xb5\x97\x9b\x6f\xf3\x69\xba\x11\x22\xbf\xca\xd4\x4b\xa4\xbd\x9e\x82\x78\x6f\xb2\x70\xf4\x62\x7b\x61\x89\x98\x1b\xd7\x86\x2c\x16\x06\xad\xce\xa0\xd5\x2f\x26\x5a\x6e\x79\x19\x4b\x54\x47\x41\xa9\x69\x97\xb5\xe2\xb4\x3d\xee\x55\x4a\x61\x42\xf4\x78\xb4\x5c\x23\x57\xb4\xef\xe1\xe0\xd7\x61\x7b\xe2\x19\x74\x27\x50\x1e\xd6\xe1\x71\xaf\x48\x6c\x3c\x9a\x0c\x52\x55\x09\x44\xab\x85\xdb\x59\x9e\xac\xad\x51\x2d\xb6\x9a\x64\xe7\x5c\x6f\x25\xa1\x49\xf3\x7a\xce\x39\x1c\x1b\x4a\xb7\x5b\xe0\x61\x01\xbf\xd8\x31\xd2\x40\x0e\x2b\xc8\xde\x58\x09\x50\x0d\x32\xa9\xc1\x92\x43\x0c\x61\xcb\x22\xbd\x91\x31\x2d\x8e\xa7\x56\x0d\x9b\xe3\x84\xbe\x3c\xf4\x5a\x51\x4e\xcc\x13\x55\xe8\x93\xbe\xf6\x5c\x96\x67\xfd\xe2\x2c\xd3\xf4\xbb\x0b\xd9\x4c\x76\xc9\xdc\xc8\x5b\xd6\xcc\xd0\x5b\xdb\x02\xc5\xd5\x0d\x03\x99\x05\xab\xa7\xdc\xd6\x70\x00\x66\x9a\x34\x61\x17\x2f\x7b\x72\x95\xdb\xb6\x3b\x22\xaa\xaa\x31\x94\xcb\x7a\xf5\x54\x37\x85\x56\x0d\xb7\xd3\x4b\xc9\x49\x6d\x9d\x4c\x1c\xeb\xc5\xf1\x63\x7b\x3f\xed\xcf\x6f\x15\xce\x5f\xb9\xa2\xe3\xb3\xb5\xe3\x93\x6b\x3a\xee\x71\xa8\x15\x7e\x8d\xdc\x72\xca\xf8\x4b\xbc\x99\xf8\xf2\x35\x62\x69\x9a\xae\x63\x0d\xe8\x88\x98\x06\xc6\x26\x01\xf8\x6b\x04\x02\x60\x11\x8c\x00\x00\x2f\x0f\x14\x9f\x0d\xf5\x17\x1b\x75\xfd\x61\xe2\x43\xa3\xae\xc6\xe4\x4b\x23\x51\x69\xbe\xd1\xaa\xc7\xe7\xe7\x34\xeb\xfa\xbe\x6a\x35\x92\x5f\x9e\x35\xe0\xb1\x19\x1a\x00\x50\x83\x10\x11\x82\xc8\xa1\x55\xe4\xb3\xad\xfa\x79\x16\x78\x68\xd5\xc5\xbe\xb2\x2c\x82\x2d\x82\x0d\x53\xd7\x2e\x37\xeb\x35\x57\x7d\xed\xaa\x91\x4f\x3b\xeb\xe9\x75\x23\xc7\xa5\x1e\xa6\x5b\x44\x32\x83\x4a\x62\x61\x13\xe9\x9a\x89\x0c\x9d\x53\xa1\x41\x6e\x19\x12\xea\x4c\x71\x80\x0d\xa6\x6b\xba\x94\x44\x97\xd0\x80\x4c\x61\x00\xa9\x29\x2c\x60\x28\xc8\x1e\x8e\x82\x3e\x4b\x75\x4b\x67\x43\x5d\xdc\x89\xc5\x41\x11\xe4\x33\x9b\x70\xb8\x2a\xc3\x49\x0f\xd0\xcd\xcc\x83\x56\x39\xbb\x5e\x16\x13\x9b\x8a\x19\xc6\x53\x3c\xd1\x5e\xae\xd2\xd6\x4a\x1f\x84\x7e\xc5\x3d\x5f\x82\xbf\xf8\xbc\x7a\xa4\x78\x1f\x3a\x13\xb7\xc8\xef\xc5\xa2\xfc\xe2\xd0\xf3\xae\xfc\x97\x4b\xc5\x97\x71\x57\x16\xe6\x54\x29\xca\x08\x87\x08\x68\x3a\xd5\x31\x21\x06\x44\x26\x67\x80\xe9\x4a\x41\x4a\x35\x41\x95\x01\x00\x50\x52\x19\x96\xd0\xa0\x54\x9c\x18\x58\x08\xa6\x98\xa4\x07\xdc\xb5\x97\xb8\xff\xa4\x76\xdf\x0b\x77\x62\x1c\xbf\x9f\xfb\x0e\xb8\x63\xcd\xc4\x26\x62\x50\x31\xa1\x88\xce\x00\x81\x1a\x56\x3a\x31\xa5\x92\x42\x01\x0b\x58\x9c\x93\xc3\x19\x7d\x89\xa1\x82\x1c\x10\x0b\x0b\x68\x1a\x1a\xe6\xcc\xa4\x42\x1c\x70\xd7\xef\x8e\xfb\xb5\xed\xbe\x17\xee\xf8\xe4\xac\x52\xe9\x3b\xe0\xce\x04\x02\x88\x00\x0a\x75\x4c\x94\x92\x1c\xeb\x96\x84\x4a\x51\x4d\x30\x83\x0b\x13\x2a\x93\x30\xca\x14\xc3\xd0\x90\xc0\xa0\xa6\xc6\x84\xc6\x90\xc9\x14\xa1\x3a\x64\x86\x7a\x38\xa0\xfb\x26\xee\x89\x85\xa7\x7b\xa1\x61\xce\x13\xd5\xd4\x7a\x56\x8b\xe9\x5e\xb6\x1c\xdd\x42\x5c\xdf\x38\x01\x9c\xa8\x52\xba\x37\xad\x75\x06\xfe\xa2\x11\x6d\xee\xe9\xf3\xad\x44\xf4\xa1\x51\x83\xd7\xb0\x7d\x17\xf7\xe4\x6d\xf2\x2b\xfc\x3a\xf9\x1f\xc5\x9d\x63\x02\x4c\x64\x71\x03\x68\x5c\xb3\x08\xc3\xcc\x54\xba\x82\x14\x51\x0d\x50\x0e\x35\x68\x51\x80\x84\xd2\x2d\x8a\x89\x62\x3a\xe5\x48\x58\x3a\xa7\xa6\x4e\x85\x21\x4d\xf9\x70\x30\xfa\xed\x38\xf3\xd9\xa9\xc8\xdd\x71\xbf\x42\xfe\xf7\xc6\x5d\x13\x3a\xc4\x82\x4b\x45\x90\x86\x24\xd2\x2d\xc6\x29\x65\xfb\xa3\xee\x9c\x42\x49\x24\x42\x06\x96\xcc\x40\x02\x99\x9c\x12\x61\x11\xa8\x63\x2e\x76\x63\x31\x04\x74\x7f\x58\x52\xbf\x18\x67\x7e\x52\xbb\xef\x85\x7b\xc9\xf8\xbe\xb8\x03\x40\x99\xc5\x15\x97\x1a\x33\xb8\x01\x0d\x28\x08\x63\x08\x23\x22\x04\xb0\x0c\x26\x18\x83\x1c\x1b\x00\x61\x08\x04\x63\x3a\xa3\x40\x2a\x8b\x10\x0c\x14\xd2\xa9\x6e\x3c\x1e\xab\x7e\xd3\xde\x3f\x3b\xf5\xbc\x8b\xbd\xa7\x6e\x93\xff\xbd\xed\xdd\x30\x08\xd2\x2c\x22\x81\xa1\x14\x83\x4a\x47\x1a\xe0\xa6\x06\x15\x81\x54\x30\x53\x03\x58\x99\x02\x6a\x5c\x83\x0c\x10\x0a\x30\x32\x89\x25\x34\x26\x75\x41\x04\x95\xa6\xa1\x1f\x70\xbf\x10\x67\xee\x18\x5f\xcf\xf3\x99\xdc\x27\x71\xbf\x46\xfe\xa9\xbd\x7f\x46\xfe\x47\x71\x87\x02\xe9\x80\x08\x8e\xa4\x85\x20\x66\x52\xea\x16\x45\x96\x42\x04\x62\xc4\x15\x52\x3a\x65\x5c\x19\x42\x47\x10\x0b\xa6\x73\x8c\x76\x71\x87\x09\x68\x11\x6a\x4a\xc3\xd0\x0e\xb8\x5f\x88\x33\x3f\xa9\xdd\xf7\xc2\xbd\xd8\xfa\xbe\xb8\xef\xa2\x37\x65\x14\x2b\x9d\x71\x13\x6b\x48\x70\x08\x0c\x68\x9a\xba\x92\x06\x36\x76\x09\xa3\x14\x80\x6a\x54\x69\x02\x58\x44\x58\x3a\x03\x8c\x72\x93\x21\xa5\x73\xa4\x19\xfc\x80\xbb\xf1\x26\xee\x3f\xd2\xcf\x2f\xe1\x7e\x8d\xfc\xef\x1d\xdf\x75\x86\x10\xa2\x9a\xa9\xeb\x50\x57\x1c\x53\x20\x34\x03\x4a\xa9\x11\x80\x0c\x29\x39\x26\x94\x52\x53\x32\x01\x28\xe6\xbb\x71\x54\x11\x53\x33\x2d\x49\x80\xa2\x02\x68\x96\x7a\xb8\x84\xe0\xee\xe3\xea\x69\xbb\xcf\xf3\xf7\x3d\xa4\xb9\x93\x2b\xee\xe2\x6f\x63\x73\x92\x8a\x7f\xb6\x54\x39\xa9\x15\x8d\xf0\xe4\xec\xc7\x79\x3f\xec\x5f\x0d\x41\xbf\x5b\x06\xc5\x69\x79\xc9\x6a\x17\xe7\x78\x1f\xec\x0b\x61\x09\xc4\x08\x44\x14\x50\x9d\x23\x03\x20\x1d\x0b\x83\x28\xa9\xeb\x8a\x02\xa0\x2c\xa9\x20\x44\x10\xe8\x5c\x03\x06\xd1\x0d\x0a\x39\x24\xd4\xda\xc5\x7e\xa0\x2b\x61\x8a\x43\x5f\x5c\x88\xf9\xe9\xdb\xfa\xe2\xd4\xf7\x7f\x72\x5f\xb8\xf6\x0f\xea\x0b\x44\x77\x8e\x81\x88\xa4\x1a\x01\x50\x48\x44\xb1\xa6\x5b\xdc\x32\x84\x61\x48\x53\x18\xbb\x21\x19\x12\xa8\x21\x80\xb1\x41\xa0\x66\x0a\xaa\x63\x8d\x69\x98\x21\xc4\x85\xce\x0f\x7d\x71\x61\x1c\xb8\xb1\x2f\xf2\xab\x5f\xa6\x2f\x06\xab\x1f\xd4\x17\x5c\x01\x65\x2a\x2e\x0c\x44\x08\x34\x76\x13\x5b\xa4\xe9\xca\x92\x48\x62\x6e\x6a\x06\xb1\x84\xc1\x84\x6e\x61\x8b\x1a\xd0\x42\x04\x5a\x84\x68\x8c\x21\xcd\x00\xcc\x90\xc4\x34\x1f\xae\x9d\xb8\xfb\x98\x7c\xea\x17\xe7\x39\xed\x87\xc6\xe4\xcc\x6d\xf2\x4f\x6d\xe1\x33\xf2\x3f\x3c\xe7\xd2\x01\x40\x00\x68\x06\x51\xc4\x80\x5c\x98\xca\x04\x1a\x46\x8a\xc3\xdd\xac\x0a\x31\x4e\x08\xc5\xba\x21\x31\xd5\x2c\xa0\x34\x82\x29\xd3\xa9\x46\xa9\xa5\x21\x62\x10\x0d\x1d\x70\xbf\x7f\x3c\xca\x95\x8e\xfc\xce\xaf\x84\xf9\xac\x0f\x9c\x2c\x8b\x5d\xe3\x03\xf4\x78\x56\x24\x7e\x8e\xf5\x5d\x7d\xc0\xa4\x06\xa1\x04\x42\xc9\x09\x36\x14\xb7\xb8\x29\x30\xe6\x98\x53\xc9\x08\x65\x5c\xee\xe6\x00\x1c\x32\x4a\x4d\xc8\xb0\x69\x32\x8d\x12\x4c\x74\x0b\x52\x40\x2c\x8e\x05\x3c\xf4\xc5\x85\x78\x94\xb9\xad\x2f\x32\xde\x2f\xd3\x17\xad\xda\x0f\xea\x0b\x43\x1a\x18\x5b\x0a\x08\x64\x09\x66\x42\xc9\x0d\x85\x19\xd6\x75\x82\x19\x02\x5c\xd3\x2d\x43\xd7\x09\x30\x84\x86\x0d\x2c\xa4\xd2\x20\xc3\x18\x43\x69\x31\x0c\x05\x36\x99\x3c\xf4\xc5\x85\x5c\xf5\xc6\xbe\x48\xa7\x7e\x99\xbe\xa8\xd8\x3f\xa8\x2f\x14\x85\x58\x61\x0e\x88\xae\x84\xe0\xa6\x2e\x89\xd4\x08\x21\x26\x93\x96\x6e\x48\x68\x31\x85\x74\x2c\x35\x82\x39\xb4\x4c\xc8\xa4\x49\xa0\x10\xc0\xe4\x60\x47\x70\xb8\x3a\x0e\xbf\x37\x36\xdc\x9a\xb7\x9f\x9c\xa8\x7f\x81\xc3\xa5\x67\x2f\x3a\x7b\x9b\xfc\x17\x39\xdb\x07\xe5\x7f\xb8\x0e\x6a\x68\xdc\xda\xc5\x7f\x8d\x72\x42\xa5\xd0\x19\xb3\x28\x66\x58\x28\x66\x21\x66\x01\x53\xea\xba\x80\x26\x40\xca\x90\x08\xe9\x3a\x02\x50\x37\x25\xe2\x48\x42\x68\x02\x4c\x0e\xb8\x5f\x18\x1b\xee\x38\x26\xd6\xec\x2b\xc6\xe4\xec\x6d\xf2\x4f\xc7\xa6\xcf\xc8\xff\x70\x7d\xc2\xd0\x84\x2e\x98\x34\x2d\x83\x48\xa6\x5b\x82\x41\x25\x90\x26\x2c\xa8\x71\x89\x30\xd4\x21\x83\x4c\x07\x80\x0a\x06\xa0\x49\x77\xa9\x13\xc4\x62\x37\x0e\x98\xd8\xd0\x2c\xf1\x70\x3d\xcc\x4b\xdc\xef\x68\x6f\x2f\xea\x7b\x1f\xc1\x3d\x77\x9b\xfc\xb7\x72\xa1\xb7\xe4\x7f\x78\x9e\xac\x23\x8d\x5b\x0c\x23\x41\x4c\x4a\x0c\x83\x63\xdd\x62\xba\x4e\x04\x93\xca\xd0\xa0\xce\x25\x32\x24\xa7\x90\x4b\x86\x38\x10\x18\x48\x82\x89\x85\x4d\xca\xa0\x81\x31\x83\x07\xdc\x2f\xd8\xfb\x1d\xed\xad\x65\x5f\x61\xef\xb9\xdb\xe4\xbf\x18\xff\x3f\x65\xef\xaf\xae\xd1\xdf\x67\x81\xfe\x7b\xaf\xce\x67\x8a\x24\x5b\x5b\xd6\xc6\xac\xa0\x65\x6d\xbd\xd3\x1e\xd5\xfd\xc2\x74\xd4\x05\x40\x65\x48\x50\xcc\xe1\x29\x48\xd5\x57\xf9\x4e\xcc\xee\xea\xf6\xd3\x46\xb6\xfd\x73\x3e\xfa\xbe\x18\x8d\x43\x36\xe8\xd6\x51\x0a\x7b\xc9\x22\x28\xd6\xa2\xab\x5e\x23\x61\x6d\xbb\xcb\x6e\xbb\xa9\xaf\x9d\xaa\xd3\x5b\x34\x18\x4c\x2e\xa7\xb5\xa2\xdc\x6f\x0c\x4b\xb4\xed\xe5\xe9\x5d\x73\xf1\xc3\x6a\xe4\xbe\x93\x7b\xa3\x1a\xaf\x36\xb5\x8c\x39\x9c\xbb\xf1\xe9\x20\x93\x91\x03\x2b\x4f\x26\x06\x87\x29\xb7\x35\x59\x8f\x27\xa9\x49\xd6\x0a\xe6\x7d\x1f\x58\x18\xa6\x51\xa5\xd8\x51\x32\x36\x35\xc6\xb3\x74\x98\x8b\x06\x39\xe0\xc0\x79\xd1\x09\x4d\x1b\xe4\x37\x1d\x97\x0d\x7b\xc5\x8e\xe9\x25\xf7\x57\x55\x6d\xa7\x95\xb0\x80\xf2\xa2\x53\x9a\xf4\x93\x9b\x30\xc7\x27\x95\x14\x8e\xd9\xa2\x58\x1a\x61\xaf\x3a\xac\x31\x6b\x95\x99\x68\xb5\xe6\xfa\x78\xa9\xd8\x1b\x18\x1c\x2d\xe3\xd9\x41\xdf\x13\xda\xbd\x8f\xdf\x63\x75\xf8\xda\xd5\x59\xfb\x0e\xab\xc3\xe9\x66\x27\xee\xdf\x20\xdf\xb6\x7f\xde\x6a\xe1\xa5\x68\xf9\xbd\xb7\x62\x5c\xef\x64\xe7\x43\xc0\x0b\x83\xfb\xe4\x6e\xd1\x37\x9c\x2c\x3e\x2d\xcc\x1a\x83\xa5\xbf\x2a\x54\x34\xd0\x4d\x54\x54\x4f\x75\x83\x4c\x2a\xd5\x0a\x57\x3d\x4a\x53\x6a\xde\x58\xa0\xcd\x34\x3f\x9d\x24\xa7\x34\x9a\xeb\xa2\x1c\xce\x0d\x06\xac\xd5\x2f\x79\xbc\x26\xfa\x96\x91\x2b\xd9\xaa\x20\x6a\x76\x79\xde\x65\xb9\x0a\xde\x04\x2b\x29\x4b\x89\x9d\x23\x44\xbb\x9b\x65\xd4\x1f\xc5\x7b\x89\xca\x48\xe6\x6a\x9d\x6e\x31\xb1\x8d\xad\xe7\x8e\xb6\xac\xd9\xc9\xa8\x95\x07\x43\x4c\x86\x59\x9d\x45\xa3\xc7\xfb\x7c\x7f\xbe\x93\xdd\x6a\xe4\x37\x3a\xd9\x1c\xc7\x9a\x49\x76\x4f\x27\xfb\x81\x5b\x21\x3e\xe4\x64\x77\xde\x77\x73\xbd\x93\x25\xcf\xb4\x7f\x61\x70\x15\x2d\x11\xb3\x2b\x86\xd9\x8b\x27\xf5\x30\xdb\x4e\x57\x60\x5d\xb7\x41\x49\x8e\xab\x24\x5f\x47\x6e\x19\xda\x96\xec\x38\x62\x93\x0b\x0f\xf9\xc3\x1b\x4e\x36\x32\x06\xf1\x5a\x2c\x6e\xb3\xc1\x5c\xe1\x79\xc5\x0b\x69\x38\xe8\xb7\xb2\xa3\xe4\x24\xa8\x84\xc9\xb2\x98\x61\x3a\xe9\xd7\xcd\x58\x7a\x6c\xa9\xcc\x28\x3a\x59\xa7\x36\x03\xbb\xdf\xd9\xf6\x70\xac\xd8\x83\x7d\xbd\x91\x46\x1c\xcc\x03\x88\xb5\xe2\xd0\x1e\x8f\x97\x6d\x32\x03\xa5\xfd\xc5\xb6\x13\xd1\x09\xd4\x52\x0d\x70\x22\x9e\x8f\x55\xd2\x38\xaa\xaf\xc6\xf9\x6d\x2e\x57\x64\x25\x73\x4d\x78\x22\xaf\xea\xed\x84\x2a\x76\xc2\xe3\x65\x83\x3f\xdd\xc9\x6e\x36\xf2\x5b\x9d\x6c\xe3\xd7\xd2\xc5\x3b\x3a\xd9\x8f\xdc\x07\xf0\x21\x27\xbb\xf3\x26\xab\x02\x1a\x49\x47\x1f\x4d\xbd\x1c\x69\x66\x26\xc9\x98\x1c\x70\x1d\x57\xbb\x61\xb6\x50\xd8\x76\xda\x64\xd5\x76\xfa\x71\x9a\x58\x98\x45\x73\x8f\xe4\xc9\xb9\x87\x37\xd2\xc5\xd6\xa5\x86\xc6\xdf\x05\xe5\x8c\xdf\x53\xe4\x4a\xc5\xc7\x5b\x73\x51\x1f\xb4\xcc\xf4\x7a\x5e\x59\x90\x1e\x1d\xe3\x38\x13\x1c\xc4\xc3\xc1\x32\x95\x07\xe9\xd1\xc2\xef\x6f\x63\xc5\x41\x66\xd5\xd4\x52\x5d\x0b\xf6\x08\xec\xf9\xa2\xd1\x5e\x68\xe5\x6a\x1d\x4c\xd8\xb0\x61\x7b\x61\xa6\x36\x59\x36\xaa\xbd\xd4\xc6\x5c\x96\x11\x9e\x19\xfb\x91\x6c\xd9\x89\x7b\x89\x61\xd9\x88\x45\xd7\x5b\x77\x18\x43\x45\xd6\x2c\xb7\x54\xa7\x38\x8f\x85\xeb\xe6\xa0\x31\x29\x58\x05\xad\x13\x73\x46\x19\x40\x9e\x14\xbb\xec\x64\xad\xe7\x1d\xf9\xcc\xc9\xf6\x78\x1d\x82\xd2\xc9\xb9\x99\x2b\x46\xfa\x91\x7b\xc4\xff\x95\xe7\x95\x0d\x9d\xcf\x9c\xfc\x2e\xf2\x5f\x09\x30\xef\xc8\xff\x21\x9b\x3e\x1e\x9e\x1f\xb6\xa9\xef\xd6\xcd\x07\xdf\x7b\x47\xdf\x27\xab\xa5\x37\x3b\xf9\xeb\xcf\xe1\xce\xcd\x6c\x7d\xc2\xf5\xda\xa2\xa7\x59\xe1\xf9\xfb\xa7\x39\x41\x2a\x9e\x5d\x8d\xfb\x7c\x93\xeb\x4c\x69\x75\x99\x0a\x93\x93\x3a\x49\x09\x43\x24\x61\x49\xce\xb4\xd1\xa2\x18\x66\xd3\xba\x6b\xd6\x33\x60\xb4\xf0\xf3\xc3\xc2\x3c\xa6\x97\xe2\x70\x5b\x47\x2c\x99\xf5\xa3\x2b\x6f\xa5\x9b\xce\xa8\x60\x69\xce\x70\x94\xed\xcc\x9b\xeb\xd1\xb6\x90\x1c\x8e\x2b\x3b\xc7\x5c\x1b\xf1\x8e\x9b\xb2\x83\x42\x2f\xc4\xdd\x2d\xa1\xd4\x49\x04\xa9\xe1\x74\xc0\x54\x85\xca\x6a\x80\xd6\xe1\x98\xcc\x9a\x7e\xcb\x94\xef\x8d\xac\x9f\x77\xfa\x2b\xe6\xd0\xa3\xca\xb1\x3f\x5e\x79\x3e\xee\xf4\x37\xca\xbf\xa7\xd3\xdf\xb8\x03\xe3\x66\xa7\xff\x1e\xf2\xdf\x5d\xcd\xf8\x80\xf3\xdf\x79\x5b\xe9\xf5\xce\xff\xc6\x5c\xf5\x0d\xe7\x7f\xff\xaf\xc0\x3d\xd1\x9f\x38\x3b\x99\x9b\x15\x15\xcd\xd8\x40\x52\xb1\x8a\x0e\x9b\x7e\x33\x91\xb3\x3d\x29\x0a\x9b\xf4\x40\xc6\x0a\xcb\x68\xbd\x55\x0e\xe6\x38\x58\xe2\x59\xc3\x2b\xda\xb5\x74\x67\xbe\x4a\xe7\x96\xeb\x44\x26\xbe\x98\x80\xee\xb0\xb1\xae\x78\xd3\x4c\x6c\xd0\x19\x4c\x73\xce\x6a\x84\x28\x1c\x78\xfb\x34\xba\xd8\x4c\x5b\xdb\xe5\xd0\x61\xd3\x0a\xdb\xcc\xc3\x42\x63\xbd\x19\xa2\x4a\x7f\xac\xfc\x7a\xd8\xe7\xf3\xf2\x34\x5e\x26\x5a\x0f\xad\x13\x8b\xe3\xa9\x88\x5f\xc4\xd9\x2f\x1a\xd0\xb3\xe0\xf9\xc3\x8c\xfd\x62\xf0\xf9\xc4\xd2\xdd\x77\xc1\xe7\x65\x39\xfd\x97\xc1\xe7\x63\x4b\x9b\x1f\x08\x06\x77\xde\xeb\x7c\xfd\x31\xe7\x37\x32\x81\xfd\xba\xec\x75\xc7\x96\xf5\xd8\xd3\x73\x02\xde\xf1\xd8\x72\xb1\xcd\x72\xb9\x28\x2c\x4a\x67\xbb\x1e\xb8\x3c\x99\x33\x50\x21\xbb\x89\x87\xc0\x8c\x66\x2d\xcd\x85\xd1\xba\xae\x91\xbc\xb5\x2d\x4f\x34\x35\xc8\x77\x34\x57\xb9\x7a\x3a\xe7\x15\xdb\x81\x36\x6f\xc3\x68\xa2\x35\xf3\xa1\x06\xa9\x16\xc7\x76\x09\x65\x0b\xbe\xe1\xc6\x8b\x76\xee\xe0\xbc\x85\x05\x8f\x55\x0a\x60\x1d\xaf\x09\x07\xf7\xb5\x91\x48\xb4\x96\x2c\xe0\xc2\xee\xaf\x93\x2c\xc0\x5e\x25\xda\x1b\xae\xac\x91\x07\x8e\x56\x74\x39\x30\x9c\x1e\x63\x7e\x3d\x30\x9c\x02\x78\x45\xfd\xe1\xe6\x2c\xe0\x8e\xf2\xaf\xcb\x02\x8e\xf5\x85\x3b\xae\x63\x5f\x95\x05\xdc\x5b\xfe\x4b\x3c\xde\x1e\x04\x53\x27\x34\x3f\xee\x88\xfb\xa1\x2b\x8e\x0e\x77\x7e\x85\xc3\xc7\xa6\x26\x77\x3e\x04\x70\x7d\xfd\xe1\x8d\xec\xe4\xde\x01\x29\xf3\x24\xbf\x96\x9e\x11\x3f\x48\x1a\xf6\x02\x7a\xb2\x3f\x8e\x37\x61\x3d\x9b\xcb\xd7\x37\xeb\x0e\xdd\xd0\x85\xae\xcc\xd6\x48\xda\x60\x65\x44\x41\x71\x3b\x2a\xa6\x36\x8e\xe5\x66\x4a\x01\x91\x13\x3b\x6b\x4c\x92\xc3\x61\x32\xbf\x4e\xe4\x49\x76\xb8\x66\x99\x68\xb7\x19\xe3\xd3\x5a\xb0\x80\x35\x7b\x1f\x34\xea\xb9\xde\x6c\x34\x68\xc6\x02\x55\x76\x92\xe9\x9c\x1a\x50\x0d\xe6\xb2\x96\x48\x05\x39\x96\x28\xc4\xb5\x4e\x1e\xf8\x86\x35\xa2\x66\xeb\x58\xd4\xba\x57\x40\xba\xa2\x16\xe0\xc0\x63\x5f\x9c\x3b\x40\xe2\xd4\xce\x2f\x3c\x2f\x02\xd2\x8d\xf2\xcf\x6d\xe1\x43\xf2\x2f\x07\xa4\x5b\x37\x5d\x9e\x07\xe8\x1f\x7d\x10\xe2\xbc\x3f\xde\x93\x7f\x02\x54\xed\x1a\xf9\x3f\x3d\x20\xdd\xf9\x74\xcc\xf5\x01\xe9\x8d\x55\x87\x43\x40\xaa\x4d\xea\x0f\x34\x77\x0c\x48\xc5\xd4\x26\x3b\x69\xd4\xcc\x01\x33\x68\x0d\xcb\x65\x65\x62\x4c\xfb\x68\xd2\x68\xe6\xd2\x78\xdc\x18\x6e\x02\x2f\xe6\x6c\xaa\x60\xb1\xd5\x9d\x59\xd6\xe3\x42\x27\x31\x51\x74\x16\x78\xd2\x5e\x96\x32\xdd\x95\x87\x36\xd3\x3c\x9e\x3a\x6b\x91\x5c\x94\x8b\xb9\x8e\x93\xb3\x9a\xac\x76\xc8\x90\xb2\x9d\x52\x4e\xcf\xa0\x4e\x1a\xe3\xd1\x74\xea\x6a\x83\x2e\xab\xb2\xed\x9c\x58\xe5\xf2\x50\xb1\x6a\xb6\x3f\xad\x63\xd0\x77\x26\x66\xea\x68\xf4\xbf\x48\x40\x3a\xde\x94\xfe\x00\xf8\xa9\x9d\x5f\x78\xee\x1d\x90\x92\xd7\xc8\xff\x3e\x01\xe9\xfc\xa6\x86\x1f\x1d\x90\x3e\xbb\x0b\xfd\x04\xa8\xeb\x02\x52\xa2\x9d\x7a\x18\x20\x7f\x4e\x40\xba\xf3\xb1\xb1\xeb\xa7\x6c\xef\x66\x48\x2d\x50\xae\xb6\x3f\xdb\xc9\xd9\x77\xa6\x6c\xd9\x66\xab\x20\xfb\xb4\xdf\x8d\x6d\x3c\xb7\x26\xb6\x7e\xa6\x07\xc6\x93\xfe\x7c\x35\x6c\xf5\x05\xf6\xdc\xb0\xc5\x46\xb3\xda\xb4\x38\x8e\x76\xeb\x3d\xb2\xe8\x79\x41\xd4\xce\x69\x7d\x7d\x3e\x6f\xe9\xb5\xb4\x86\x53\x6d\xa3\x5d\x0f\xda\xbc\xd5\x5a\x2e\x5d\x13\xab\x79\x5a\x6c\x07\xb5\x7d\xd0\xe8\x67\x91\x3f\x8b\x55\x48\x7b\xc3\x78\x2a\x95\x6e\x57\xa3\x75\x90\x6e\x15\xc3\x84\xe3\x6f\x40\x7e\xd4\xaf\xac\x16\x41\x47\x23\xa4\x37\x3e\xf6\xdc\x2f\x32\x65\x7b\xa5\x36\xf6\xc3\xa6\x6c\xe7\x01\xf1\x43\xf2\xbf\xcf\x94\xed\x95\x29\xe4\x0f\x9b\xb2\x7d\x76\xeb\xf3\x09\x50\xd7\x4d\xd9\x9a\x35\x60\x1d\x02\xe2\x27\x93\x8b\x87\xae\x38\x0b\x48\xa7\x50\x7c\x28\x20\xdd\xf9\x3c\xe5\xf5\x05\xe5\x37\x32\xa4\x43\xf1\xf5\xfa\xab\xf2\x4e\x40\x51\x8f\x05\xc3\xb8\x5d\x44\x7c\xdb\x4b\x2f\x1b\xf1\xa1\x68\xcb\xa4\xa1\x58\xb7\x92\x5d\x74\xd3\x54\x4b\x24\xe7\xc5\x59\x5a\xf1\x68\x2d\xef\x7a\x4e\xb5\x18\xc6\x34\xbd\xd7\x76\x5a\xf5\x4c\x71\xa3\x06\x3a\x21\xe9\x42\xa9\x10\xb0\x72\x3e\x35\x98\xa6\x83\x44\x7e\x14\x0e\x26\xba\x1a\xe1\x95\x1f\xb3\x57\x87\x80\x51\xd1\xfa\xf3\x7c\xb3\xd4\x8c\x65\xbd\x72\x3e\x8e\x9d\x41\xa6\xb9\x58\x1b\xf3\x79\xd7\x07\x05\x65\xfb\x6e\x34\x9d\xef\x23\x3b\x1a\xda\xb3\xf7\x96\x8e\xf9\xf3\x0e\x7d\x2d\x18\x9d\x18\xe8\x15\x85\x53\x27\x79\xec\x87\x8b\x03\xc3\x27\x0a\xa7\x77\x3c\x2b\x75\xbe\x3f\xe3\xb3\x67\x42\xce\x83\xf5\x8f\xbf\x72\x71\xf7\xa8\x27\x0f\x25\x97\x34\x8d\x1f\xfb\xef\x97\xd0\xef\x14\xd3\x8f\x04\x90\x3b\x1f\x02\xbe\x3e\x80\xbc\x71\xd7\xe6\x75\x01\xe4\x38\x25\xbb\x1c\x40\x4a\xb9\x38\x09\x0a\x72\xb2\x6e\xce\xd3\x95\x62\x35\x57\x1c\xc5\x81\x9b\x0b\xb9\xf2\x33\x7e\x62\xb5\xf0\x83\xdc\x30\x18\xb6\x65\x21\xdb\x58\x69\xb9\x84\x31\x9b\x26\xe4\x00\x17\x32\xc9\x4a\x71\xa6\x5a\x89\x22\xd7\x74\xd7\xcc\xca\x66\xb0\xce\x07\x2c\x96\xf0\xb3\xf9\x74\xac\xbb\xff\xbb\x82\x5f\x23\x5f\x34\x41\x8d\xf4\x3c\x31\x5a\x8f\xe3\x23\xd5\x1a\x45\x4b\x31\xcf\x8e\x4a\x52\xef\xd5\xaa\x89\x78\xae\x5c\xc8\xc6\x9d\x99\x98\x0c\x92\xb1\xf6\x7b\xcb\xd0\x3f\x3c\x80\x5c\x0c\xe4\x3f\x27\x80\x9c\x0b\xfa\x39\x01\xe4\xaa\xa9\xfd\x0f\x0c\x20\x77\xd6\xef\xb3\x01\xe4\xce\x27\xd7\xaf\x0f\x20\xe9\x4b\xc0\x1e\x9e\x83\x13\x7d\x32\xcd\x7e\x37\x03\xc1\xa4\xdd\x9f\x2d\x49\xdf\xa2\x7a\xb7\x64\x0d\x96\x68\xb8\x29\x15\x43\x8d\xc5\xfd\xbe\x19\x14\x82\x74\xbd\x65\x64\x0a\xdd\x5e\xb8\xee\x69\xa5\xb1\x1d\x5a\x79\x2b\x55\xf6\x1b\xf5\xbe\x0b\x4b\x60\x34\x59\x5b\xe9\x74\x26\x4c\x2c\xc3\x74\xba\xef\x90\xe4\x70\xbd\x9c\x27\x37\x34\xfe\x10\x40\xa8\xdf\x9f\xd2\x8a\xe7\xd9\xc2\x9c\xf3\x91\x9b\x6f\xb6\x9a\x13\x18\x27\x5a\x26\x9e\x1f\x08\x30\x68\x74\x69\x79\x13\xb8\xe1\x74\xdb\x7c\x6f\x3a\xf4\xc3\x03\xc8\xc5\x40\xfe\x73\x02\xc8\xad\xa7\x52\x2f\x07\x90\xeb\xed\xe8\xc7\x04\x90\x9f\xac\xdf\x67\x03\xc8\x9d\xaf\x5b\xb8\xbe\xc8\xfb\xee\x6d\xdf\x9f\x05\xf6\xaa\x1a\xcc\x9e\xfe\x64\xab\xf1\xfe\x39\x2d\x0a\x37\x7a\xb5\x44\x02\x46\x93\x99\x1a\x09\xfc\x6c\x71\xed\xa0\x86\x72\xcb\x31\xe1\x92\x10\x4f\x6d\xde\x9d\x24\x5d\xa7\xa5\xe7\xa3\x02\xa5\xb6\x03\xd8\x42\x93\x81\xe5\x80\x51\xa7\x12\x28\x3a\x75\xbc\x5e\x9b\xe5\x2d\x61\x3b\xb5\x78\x7f\x93\x0b\x5c\xdc\x8c\x9b\xbc\xb8\x5a\xed\x03\xc3\x76\x65\xa9\x6d\x8b\xe6\xd2\x69\x07\xf5\x46\x51\xc9\xdc\x75\xa3\x91\x87\x45\x60\x4a\x5a\xeb\xd3\xf8\x34\x57\xc0\xbd\x71\x02\xa0\xf4\xd1\xb1\x2e\x07\x9d\xb3\x79\xec\x6b\x41\xe7\x64\xc7\xf0\x35\x45\x59\x6e\x3f\xe7\x77\xb2\x03\xf9\x63\x45\xe1\xfb\xc9\x4f\x5d\x23\xff\xc5\xf1\xef\x5f\xae\x06\x61\x3f\xbf\x3a\xe4\x47\xeb\x37\xa9\x15\xcd\xdb\x6b\x24\x77\xbe\x57\xe4\xfa\x0c\x25\x73\xd1\x08\x4e\x0c\xe5\x87\xcd\x1d\x2b\xa3\x56\x08\xa3\x27\xaa\x64\x9f\xf4\xad\xe5\xe6\x33\xbd\xae\xaa\x70\x21\xe3\xa0\x59\xcf\xd7\x7b\x5d\x97\x7a\xd1\xf1\x54\x48\x23\x9e\x1c\x92\x39\x48\xcd\xf0\x1c\x1a\x35\xe2\xd5\xcb\x6b\x58\xb7\x16\xa1\xcb\xd5\xcc\xeb\xfa\x38\x5f\xe1\x73\x67\x15\x0e\xa8\xdf\xb4\xc6\xa8\x30\x2d\x6d\x46\x82\x5a\x38\x6b\xd6\x07\xa5\x7d\x10\xc8\xad\xe2\x03\x7b\x3c\x32\x6a\x1c\xaa\x56\x42\x2c\x57\x3d\xb4\xa6\xc4\x29\x70\x60\xa2\x42\x8d\x78\xd2\xaf\x38\xf3\x59\xde\x6b\x24\x8e\x45\xdc\x7b\x05\x98\x2b\xb2\x9a\x81\x71\xec\xbb\x8b\xd9\xe5\x27\xb2\x9a\x1b\xef\xda\x48\x1e\x5a\xf2\xfa\x9f\x56\xb8\x2e\xab\xf9\x35\xeb\x16\xf6\xf3\x00\xfd\x73\x7c\xe3\xc6\xac\xe6\xce\x17\xe8\x5c\x1f\x74\xb2\x97\x80\x3d\x31\x9c\x1f\x39\xdf\x24\xa5\xee\xea\xc4\x25\x4f\x82\x4e\x26\xd5\x9d\xcf\x52\x56\xa5\x50\x59\xd3\x21\xf2\xe2\xc3\xba\x65\x9a\xb6\x11\xef\x4d\x2b\xd1\xcd\xa2\x58\x2e\x95\x42\x6e\x17\x27\xc5\xa0\x30\xd2\x4a\x50\x5a\xb5\xe6\x24\xbe\xb4\xb7\x8b\xd5\x78\x90\x4a\x2c\x07\x9a\xb3\x22\xf3\x6e\x3f\xae\x8d\xb3\xa1\xec\xad\xc8\xdc\xf3\xb6\xfc\x61\xef\x4d\x67\xec\x78\x29\xb9\xc9\x36\x89\x9b\x86\xfa\x9a\xf8\xbe\xe3\x6f\xe7\x71\x6d\xc5\xe6\x93\x8c\xde\x66\x05\x4f\x26\xc7\x34\x91\x5f\x5b\xc7\xa0\xf2\x8b\x04\x9d\x8b\x03\xc6\xcf\x09\x3a\xfc\x12\xff\x1f\x1e\x74\xbe\x63\xad\xc3\xbe\x47\xd0\xb9\xd5\x37\x6e\x0c\x3a\x77\xbe\x29\xea\xfa\xa0\x73\xbe\x53\xea\xe6\xa9\xd4\xd5\xd1\x5c\x64\x66\x70\xd5\x3f\x71\x93\x93\xa0\x93\xad\xa5\x87\x15\x5e\x40\xf9\x52\xb5\xb7\x56\xf5\x4e\x3c\xba\xae\x40\x3d\x35\xae\x26\xe7\xe3\xe5\xb2\x9a\x89\x65\x67\x24\xd5\x49\x36\x73\xa5\xe6\x2c\x9b\x75\x8c\xd2\xdc\xd7\x63\x0d\x5f\xe6\x8b\xad\x75\xc5\xd1\xe7\x99\xc6\xbc\x97\xce\x67\x1d\xdb\x2b\x20\xd3\x4f\xf7\x6a\xb4\x9a\xb2\x0f\xcb\xd9\xcd\xb1\x30\x49\x99\x8a\xb0\x8e\x53\x16\xe6\xdb\x21\xee\x95\x72\x66\x6d\x3b\x48\x01\xb7\x6e\xe1\x4d\x46\x17\x6b\xc4\xb8\x10\xec\xf8\x87\x03\x7e\x91\xa0\x73\x71\xc0\xf8\x39\x41\xe7\xfc\x44\xe9\x7d\x82\xce\x2f\x54\x1f\xb1\x2f\x05\x9d\x1f\xed\x1b\x37\x06\x9d\x3b\x5f\x89\x76\xfd\x9e\x98\x77\x37\xe9\x5d\x09\xd4\x68\xb8\xcc\x2f\x57\xcf\x8c\xfe\x64\x4f\x4c\x39\x47\x1a\x5d\xd6\x8a\x77\xf2\x4e\x6c\xad\x08\xeb\x84\xf1\x78\xc1\x73\x57\x2b\xab\x37\x63\x45\x3e\x86\x31\xcd\x32\xe7\xeb\x7e\xa5\x57\x36\x97\xa3\x74\xb2\x57\x2b\xd2\x8c\xce\x20\xcd\x74\xe6\xdb\x4e\x7f\x4e\xad\x74\x29\x3b\xce\x6b\xa1\x9d\x4a\x56\x47\xa9\x6c\xae\x34\xde\xda\x87\x7a\x4c\x74\xa8\xf4\x55\xae\x6a\xe9\xe9\xaa\x67\x78\x4e\x7a\x39\x2b\x57\x32\xcc\x1a\x29\x67\x48\x32\xed\x4c\xb4\x13\xd7\x6b\x5d\x7b\x39\xab\xc0\xf7\xa6\x4b\x1f\xdc\x13\x73\x62\xb4\x57\xec\x49\x79\xb9\x49\xef\xc5\x89\xf0\x77\xea\x31\xf7\x93\x7f\x5e\x0f\xfa\x90\xfc\x17\xd7\xc2\xfd\xd8\x6d\xfc\x71\x40\xdd\x6e\x7c\xf2\xa1\x7a\x4c\xf6\xe7\xe9\x67\xdb\x39\x83\x6d\x18\x49\x5d\xc0\xf4\x63\x27\xa0\xef\x7b\x97\xdf\xf5\x05\xdf\x37\x56\x8c\x7e\xf8\x48\x01\xdd\x96\x3a\x31\xd7\x93\x82\xaf\x9d\xe9\x6d\xdc\xf4\x46\x4b\x14\x54\xa1\xbf\x19\x4e\x3b\xa5\xa2\x1a\x4c\x87\x22\x1f\xe6\xb3\xdd\x0e\xf6\x06\x4d\xb1\xe9\x63\xba\x74\x52\x89\x4a\xb6\x97\x6f\xd4\x0a\xd5\x31\x77\x4b\x5e\xc6\x28\x63\xe4\xcc\xc6\x8b\x76\x16\x0d\x47\x8b\x75\x32\x1b\xcf\x13\x3d\x9a\x75\x37\x43\xf6\x90\xa5\xac\x13\x9b\x9e\xe3\x16\x02\x67\x01\xf0\x82\x4a\x5d\xc7\x13\x7f\x9c\x36\xb6\xa0\x1d\x5f\x36\xa7\xbd\x6d\x2f\x5e\x96\x2c\x63\xd4\x5b\xd9\xa3\x6a\x37\x65\x29\xb7\x5d\x51\xf0\x72\x95\xe9\xb3\x05\xdf\xfb\xc9\x4f\x5f\x25\x7f\xff\xe3\xc9\x3d\x7c\xbf\x9c\x03\xdb\x3f\x37\xc0\x08\x0b\x34\x47\xef\xe9\x77\x1e\xa0\x7f\xdc\xb1\x06\x54\x6e\xe8\x5d\xe3\xb3\x01\xfa\x07\xea\x97\x27\x34\x8d\x6e\x2e\x98\xdf\xf9\xd2\xcf\x7f\xfe\xfe\xee\x35\x7f\x7f\x77\x1f\x44\xd3\x09\x60\x2d\x27\x93\x1c\xda\xba\x2c\x6b\xf5\xc2\x71\x63\x3b\xe5\xcd\xcc\x3a\x58\x25\xc2\x20\xde\x09\x48\x35\x6d\x8d\x69\xbc\x5b\x9b\xbd\xb7\x8f\xe8\x63\x01\xfa\x74\xf7\xe0\x35\x19\xd8\x1b\x2b\x72\x17\xf4\x78\xd9\xb7\x77\x94\x7f\x3e\x40\x7c\x48\xfe\xe5\x00\xfd\x2b\x39\xb0\xfd\xfc\x22\xd5\x1f\xae\x5f\x6c\x66\xb9\xc3\x77\xf4\xfb\x89\x03\xdc\x87\x06\x90\x73\xfc\x7e\xa4\x7e\x68\xcc\xe0\xe0\xd6\x0c\xfa\xce\xb7\x03\x5f\x9f\x41\xbf\xbb\xa2\xf9\x03\x33\xe8\x86\x3e\x93\x27\x93\xf4\x93\x0c\xba\xe0\x8e\x68\xa7\x81\xea\x63\x04\xb1\x3b\x5e\xc1\x58\x61\x91\x50\x8a\xca\x32\x2c\x6d\xa6\x8b\x2d\x99\x5a\x71\x7f\x0a\xea\x79\x98\xeb\x95\x0b\xc2\x5d\x88\x76\x14\x6b\xd9\x20\x51\x82\x6d\x57\xb9\xdb\x58\xd4\x89\xa3\x54\x3d\xbd\xae\x04\xa9\x19\x9c\xc6\xb3\xb3\x56\xa9\xb6\x1a\xef\x83\x68\x49\x2b\xe6\x43\x4d\x4b\xcf\x53\xcd\x6c\x85\x95\x0c\x2d\x6a\x25\x52\xc4\x1c\xe3\x42\xb6\x12\x06\xc9\x6e\xd2\x93\x79\x41\x83\x96\xc0\xf7\x59\x5c\x38\x0d\x90\x57\x64\xb0\xc3\xf9\xb1\xef\xae\xca\x60\xef\x28\x3f\x73\x8d\xfc\x93\x00\xfd\x8b\x3a\xb0\xfd\x73\x03\x8c\xca\x24\xe3\x8b\xf7\xf4\x3b\xc7\xef\x57\x1b\x40\x7e\xe2\x00\x97\x03\xc5\xee\xe2\x2d\xfd\xde\xbc\x49\x7c\x31\x1b\xf8\x54\xdc\xe5\xef\x7d\x3f\x67\xf5\x14\xfb\x4f\xef\x03\xdf\x37\x31\x38\xc6\x8a\x0b\x83\xc7\xeb\x5c\x9e\xa2\xff\xca\xae\xd6\xc6\xf6\x0b\x2e\x8f\x2d\x3c\x34\x8c\x72\xee\x2d\xdc\x50\xec\x9a\x7b\xf2\xf3\xb7\xd9\x58\x3e\x35\x33\x51\x29\x37\x9a\x75\x3b\x57\x6e\xbe\xd3\x4c\xbb\xd8\x4c\xd5\x1f\x50\xa9\x94\x8b\xbd\x53\x8e\xbf\x45\x22\x91\x88\x9d\x4c\x9e\x70\x7b\x21\x30\x52\xad\xe7\x4a\x76\xbd\x17\x29\xa4\x7a\x91\x3f\x1e\xde\x3a\xe2\x6b\x64\x47\xe1\xd2\xa9\x7c\x4d\xfd\xe0\xe9\x87\xfb\x2a\x1e\xbc\xa1\x75\xf0\x96\xca\x2f\x14\x65\xd4\xdd\xfd\x77\x27\xf5\x18\x75\x2f\x69\xf6\x28\xe0\xb9\x52\xae\x27\xe4\x05\x8d\x26\x52\x0c\xa4\x3f\x94\x54\x48\x3f\x78\xfe\xdb\xb7\xc3\x6f\x81\x9c\x7f\xbb\x8b\xba\xcf\x98\x5f\x52\xfc\x0d\xe9\x91\x56\x39\x57\x6b\xa5\x22\x7f\x3c\x7d\xfa\xa9\x96\xdc\x07\xef\x4f\x36\xe0\x65\x1f\x3c\xbc\xa7\xc1\xf0\x85\xf6\x9e\x52\x3b\xb5\x0f\xff\xbb\x93\xbe\x07\x66\x97\x14\x3d\x11\xf3\x5c\xc3\xfd\x8b\x0b\x66\x32\x93\x3b\xed\xf6\xff\xde\x49\xb9\x3d\xaf\x4b\xba\x1d\x85\x3c\x57\xcd\x99\x7d\x8d\xcc\x3c\x3f\x7c\xa9\xdb\x4e\x5e\x30\x9c\x2f\xe4\x42\x3e\xfb\xe5\x5e\x9a\x9e\xb0\xbc\xa8\xf0\xb9\xc8\x4b\x9d\x7e\x49\xeb\x60\xc1\x1e\xfe\x77\x3f\x4d\x83\x05\x7b\x45\xc7\x47\x31\xcf\xb5\xf3\x65\x70\xa1\xbb\xe7\x0b\xcf\x5f\x4c\x1d\x57\x79\x27\x3f\xde\x49\xc9\x23\xc3\x4b\x8a\x9e\x89\xfb\x50\x0c\x0b\xf8\xec\xf0\xb5\xe0\xe4\xc7\x3b\x69\x7b\x64\x78\x49\xdb\x33\x71\xcf\xb5\x9d\x07\x32\xbc\xe8\xeb\xbb\xc1\x5a\x06\x21\x0d\xe5\xc9\x8f\xf7\xd2\xf7\x89\xe1\x45\x7d\x9f\x8b\x7b\xae\xef\xfe\xf3\x8b\xe3\x6b\xe8\x2f\x82\x70\xe2\xb8\x32\x38\xf9\xf1\x4e\x0a\x1f\x19\x5e\x52\xf8\x4c\xdc\xab\xa9\x81\x13\x04\x0b\xe9\x7f\x8d\xd0\x20\x90\x21\xf7\xc4\x85\x36\x9c\xfe\x21\x9e\xd3\x5f\xee\xd5\x8e\x13\x96\x17\x5b\x72\x2e\xf2\x52\x94\x08\xe4\xfc\x6b\x24\x5c\x3b\xae\x90\xeb\x0b\x2d\x38\xaa\x7f\x6f\xdd\xdf\x54\xfc\x26\xad\xcf\x52\xd4\xe7\xbf\xde\x49\xff\xe7\x4c\x2f\x35\xe2\x82\xd8\x57\x5b\xf2\x40\x7b\xb9\x39\x0f\x36\xc7\xe8\x84\xba\x5c\x06\x8f\xba\xe7\xca\xc9\x54\xf7\x1d\xb5\x13\xf5\x94\xdd\x4c\x1d\x48\xcf\xf9\x44\x2a\xe5\x63\x16\xdb\x6a\xe4\xca\x99\x08\x0b\x7d\x29\x23\x7f\x3c\x50\xfc\x2b\xd2\xc9\xa6\xea\xa9\xa7\xdf\x23\xff\xf3\x77\x04\x82\xc7\xe7\x65\xa2\x29\x83\xf0\x30\x96\xef\x5a\x71\xb5\x96\xcf\xd9\xec\x94\x7c\xc8\x53\x9e\xa9\x18\xc8\xc9\xc4\x71\x07\x7b\xdf\xfb\x1a\x61\x8b\xcd\xf1\x97\x99\xef\xf0\x97\xbe\xb8\xeb\x06\xb6\x09\xe4\xfc\x6a\xc5\x9e\x38\xec\x74\x3a\x3a\xc6\x33\xb5\x5e\x4f\x15\x77\xe4\x4a\xca\xdb\x55\x78\x64\x72\xd0\xe2\x24\xc0\x7c\x50\x91\x87\x37\xb7\x29\x72\xca\x64\xa7\xc8\xf3\x44\xf8\x83\x9a\x04\x7c\x26\xdd\xe5\x8d\x9a\x9c\x32\xd9\x69\x12\xf0\xd9\x27\x01\x39\x0e\xa8\x37\x6b\x72\xca\xe7\x41\x99\xc7\x2c\xe1\xb9\x32\x34\x08\x5f\x57\xe8\x24\x70\xdc\xa6\xd1\x39\xa3\x9d\x4a\x67\xb1\xf1\x5d\x8c\xaa\x5e\x10\x0e\x7c\xd9\xa8\x15\xf7\x73\x61\x46\x03\x19\x11\x8b\xe9\x2c\xc2\xbd\xe9\x6c\x22\x43\xb9\x17\xfb\x7f\x02\x00\x00\xff\xff\x32\x5e\x81\x9c\xc4\xef\x00\x00") + +func ingest_asset_statsCoreSqlBytes() ([]byte, error) { + return bindataRead( + _ingest_asset_statsCoreSql, + "ingest_asset_stats-core.sql", + ) +} + +func ingest_asset_statsCoreSql() (*asset, error) { + bytes, err := ingest_asset_statsCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "ingest_asset_stats-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2b, 0x45, 0xdc, 0x5f, 0xf9, 0xe2, 0xc2, 0x67, 0x7d, 0x36, 0x75, 0x28, 0x3a, 0xf5, 0xf6, 0x41, 0xd3, 0x51, 0xb, 0x88, 0x22, 0x87, 0x7b, 0x4c, 0xd9, 0x67, 0x8d, 0x94, 0x68, 0xa5, 0xb4, 0x4e}} + return a, nil +} + +var _ingest_asset_statsHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x7d\x79\xaf\xe2\xb8\xd2\xf7\xff\xf3\x29\xa2\xd1\x95\xce\x8c\xe8\x6e\xec\x6c\x4e\x7a\xde\x79\xa4\x00\x61\xdf\x77\xb8\x1a\x21\x27\x71\x20\x2c\x09\x24\x61\xbd\x7a\xbe\xfb\x2b\x08\x6b\x0e\x6b\xe0\x9c\xee\xbe\x0f\x1a\xf5\x9c\x10\xa7\x5c\x55\x2e\xd7\xcf\x55\x2e\x87\xaf\x5f\x7f\xfb\xfa\x95\x2a\x5a\x8e\xdb\xb5\x49\xa5\x94\xa5\x34\xec\x62\x05\x3b\x84\xd2\xa6\xa3\xf1\x6f\x5f\xbf\xfe\xb6\xbe\x1f\x9b\x8e\xc6\x44\xa3\x74\xdb\x1a\x1d\x1a\xcc\x88\xed\x18\x96\x49\x89\xdf\xf8\x6f\xf0\xa8\x95\xb2\xa4\xc6\xdd\xce\xfa\x71\x5f\x93\xdf\x2a\x72\x95\x72\x5c\xec\x92\x11\x31\xdd\x8e\x6b\x8c\x88\x35\x75\xa9\xbf\x29\xf0\xd7\xe6\xd6\xd0\x52\x07\xef\xbf\x55\x87\xc6\xba\x35\x31\x55\x4b\x33\xcc\x2e\xf5\x37\xf5\x56\xab\xc6\x85\xb7\xbf\x76\xe4\x4c\x0d\xdb\x5a\x47\xb5\x4c\xdd\xb2\x47\x86\xd9\xed\x38\xae\x6d\x98\x5d\x87\xfa\x9b\xb2\xcc\x2d\x8d\x1e\x51\x07\x1d\x7d\x6a\xaa\xae\x61\x99\x1d\xc5\xd2\x0c\xb2\xbe\xaf\xe3\xa1\x43\x4e\xba\x19\x19\x66\x67\x44\x1c\x07\x77\x37\x0d\xe6\xd8\x36\x0d\xb3\xfb\xd7\x96\x77\x82\x6d\xb5\xd7\x19\x63\xb7\x47\xfd\x4d\x8d\xa7\xca\xd0\x50\xbf\xac\x85\x55\xb1\x8b\x87\xd6\xba\x99\x94\xad\xca\x65\xaa\x2a\x45\xb2\x32\x95\x8a\x53\x72\x33\x55\xa9\x56\xa8\x42\x3e\xdb\xda\xb6\xff\xd6\x33\x1c\xd7\xb2\x97\x1d\xd7\xc6\x1a\x71\xa8\x58\xb9\x50\xa4\xa2\x85\x7c\xa5\x5a\x96\x52\xf9\xea\xd1\x43\xa7\x0d\x3b\xaa\x35\x35\x5d\x62\x77\xb0\xe3\x10\xb7\x63\x68\x1d\x7d\x40\x96\x7f\x7d\x46\x87\xea\xe6\xaf\xcf\xe8\x72\x6d\x57\x9f\x27\xa0\xd7\xdb\xe3\xd2\x79\x0c\xae\x0d\xf9\x5a\x67\x47\xad\x0e\xc4\x37\xcd\x53\xf9\x98\xdc\x3c\x6a\xb9\x25\xbb\xe1\xaa\x43\x74\x9d\xa8\xae\xd3\x51\x96\x1d\xcb\xd6\x88\xdd\x51\x2c\x6b\x70\xfd\x41\xc3\xd4\xc8\xa2\x73\x24\x9c\xe9\xe0\x8d\xa1\x3b\x1d\xcb\xec\x18\xda\x23\x4f\x5b\x63\x62\xe3\xfd\xb3\xee\x72\x4c\x9e\x78\xfa\xc0\xc9\x53\x5c\x3c\xf6\xec\x90\x68\x5d\x62\x6f\x1e\x74\xc8\x64\x4a\x4c\xf5\x21\x11\x8e\x1e\x1f\xdb\x64\x66\x58\x53\x67\xfb\x5d\xa7\x87\x9d\x5e\x40\x52\xcf\x53\x30\x46\x63\xcb\x5e\x4f\xc7\xad\x4f\x0d\x4a\x26\xa8\x2e\xd5\xa1\xe5\x10\xad\x83\xdd\x47\x9e\xdf\x19\x73\x00\x53\xda\xce\xcb\x00\x4c\x1f\x3f\x89\x35\xcd\x26\x8e\x73\xfd\xf1\x9e\x6b\x6b\x1b\xdc\xe9\x0c\x2d\x6b\x30\x1d\xdf\xd1\x7a\x7c\x8b\x25\xaf\x15\x36\xec\x07\x09\xef\x9c\xee\xdd\x0f\xac\xfd\x84\xae\x13\xfb\xbe\xa6\x3b\xf2\x01\x1e\xd9\xaa\xf5\xbe\x87\x36\xae\xf5\x81\x4e\x8e\x5d\xf1\xad\x27\xc6\xeb\x07\x7a\xee\xcd\x11\x70\x4e\x1c\x90\xb2\xbc\x69\x46\xbd\xfd\x4c\xbf\xa7\xb1\xe5\xf1\x61\xdd\x6c\x68\x38\x6e\xc7\x5d\x74\xc6\xb7\x49\xae\x5b\x5a\xe3\x7b\x5b\x92\x7b\x9b\xed\xa0\xe4\x7a\x63\x65\x37\xdd\x6f\x36\xbb\xed\xc5\x94\xe5\x7d\x83\xe9\x61\xe4\x5a\xdb\x8e\x33\xbd\xd5\xf3\xbe\xb1\x6a\x69\xe4\xc1\x75\xc1\xde\x0c\xc6\xd8\x76\x0d\xd5\x18\x63\xf3\x2a\x78\xdf\x7a\xb4\x33\x7e\x70\x6d\xb2\x47\xb4\x47\x39\x38\xff\xe0\xc3\xfd\x6f\x94\x77\x4f\x7f\x5e\xc3\x0f\xa7\xef\x0d\xe6\x7a\x24\xb7\x7f\xae\xf1\x61\xb7\xf4\xdb\x18\x43\xe7\x4e\x0e\xba\x96\x3d\xee\x8c\x8c\xee\x76\xc1\x70\x85\x05\x5f\xcb\xbb\x65\x7c\x7c\xbd\x77\x8d\xf2\xbd\xc6\xe9\x3d\x1d\x2d\x64\x6b\xb9\x3c\x65\x68\x5e\xcf\x31\x39\x2e\xd5\xb2\xd5\x3b\x69\x5f\x30\xba\x17\x50\xde\x0e\xf7\x75\x4a\x9b\xab\xfb\xc5\xdf\xa1\x74\x45\x2e\xd5\xe4\x7c\x34\x80\xce\xd6\xeb\x6c\x87\x4c\x1e\xee\xf9\x84\xc8\xdd\x4f\x6b\xe4\xce\xb6\x87\xd5\xec\xdd\x12\x5e\x98\xf5\x8f\xc8\x77\x9e\xc4\x7d\xcf\x6e\xd7\x7d\xf7\x35\xde\x2e\xf2\xee\x96\x6d\xeb\x01\x1e\x91\xc5\x7b\xe4\xce\xb6\xdb\xe5\xdf\xfd\xfc\xec\xd6\x8b\xf7\x70\xe4\xf3\x21\xd7\x1b\x1f\xb9\x84\x6d\x43\x29\x91\x28\xcb\x09\xa9\x7a\xa6\xf1\xc8\x58\x47\x1c\x86\x4a\xfe\x30\xa7\x23\x62\x1b\xea\xbf\xff\xf9\xf3\x8e\xa7\xf0\x22\xc0\x53\x43\xec\xb8\x7f\x60\x73\x49\x86\x9b\x54\xcc\x1d\x4f\xe8\x86\x7d\xf6\x91\x78\x2d\x1f\xad\xa6\x0a\xf9\x2b\xf2\x74\x70\xb7\x7b\xe0\xee\x0b\xf5\x8e\xd1\x2b\x34\x76\xd2\x3d\x41\x63\x2d\xeb\xe6\xf1\x03\xf3\x5f\xa8\x47\x04\xd9\x88\x7e\x07\x05\xb9\x59\x95\xf3\x15\x1f\x89\xe1\xb8\xeb\x4c\x86\x3b\x5b\x8c\x26\xe5\x9c\xf4\xae\x87\xbf\x7e\xf3\xb2\x70\x79\x3c\x22\xdf\x77\xdf\x51\xd5\xe5\x98\x7c\xdf\x3e\xf2\x17\x55\x51\x7b\x64\x84\xbf\x53\x5f\xff\xa2\x0a\x73\x93\xd8\xdf\xa9\xaf\x9b\xe4\x5c\xb4\x2c\xaf\xc7\x6b\x4b\x79\x47\xef\xb7\x13\x8a\xa7\x37\xb7\x84\xa3\x85\x5c\x4e\xce\x57\xaf\x50\xf6\x1a\x50\x85\xfc\x29\x01\x2a\x55\xa1\xde\x76\x69\xb7\xdd\x77\xce\x86\xc8\x9b\xbf\xe7\x9d\xf8\xdb\x3e\xf7\x1a\xba\x29\xcf\x89\x2e\xf3\x85\xaa\x4f\x9f\x54\x23\x55\x4d\xee\xd9\x3a\xce\xbf\x9d\x74\x7f\xa0\xe2\x63\xe4\x11\xe1\xdf\x11\xd9\x28\xa0\x98\x0d\x8f\xbb\x95\x52\x96\x1a\xdb\x96\x4a\xb4\xa9\x8d\x87\xd4\x10\x9b\xdd\x29\xee\x92\x8d\x1a\xee\xcc\x17\x1e\xb3\x7b\xdb\xd0\xb6\xec\xef\x6c\xf5\xc0\xff\x6e\x6c\xcf\xe9\x72\x6f\xd9\x37\xe9\x53\x65\xb9\x5a\x2b\xe7\x2b\x47\xdf\xfd\x46\x51\x14\x95\x95\xf2\x89\x9a\x94\x90\xa9\x8d\xf4\xb9\x5c\xcd\xf3\x77\x95\x6a\x39\x15\xad\x6e\x5a\x48\x15\xea\x5f\x9d\x7f\x51\x15\x39\x2b\x47\xab\xd4\xbf\xe0\xfa\xca\x3f\x1a\x37\x27\xe2\x73\xd2\xdd\x22\xff\x32\xe1\xe8\x73\xc2\xdd\xe3\xa9\x9e\x93\xef\x8e\x1e\xf6\x22\xee\xbf\x0a\x24\xe1\x1f\xbf\x51\x54\x54\xaa\xc8\x54\x23\x29\xe7\xa9\x7f\xc1\x7f\xc3\x7f\xc2\xff\x82\xff\xa6\xff\xf9\x9f\x7f\xd1\x9b\xbf\xe9\x7f\xd3\xff\x50\x55\xef\x26\x25\x67\x2b\xf2\x5a\x29\x72\x3e\xf6\xe7\x59\xcd\xdc\x81\x03\x4f\x6a\xe6\x76\x0f\x1f\xad\x99\xff\x17\x44\x33\xef\x31\x75\xab\x87\x3d\x0e\xdf\xa7\x88\x03\x6c\xbf\xa3\xb8\xe1\x98\xa2\x2a\x6b\x5d\x51\x7f\x1f\x3c\xc0\x17\xef\xeb\x6a\xab\x28\x53\x7f\x1f\xcf\x88\x3f\xcf\xcd\xda\x97\xf2\xe8\x27\xe8\x63\x71\x37\x8d\xef\xe7\xf0\xec\x12\xe8\x59\x2e\xcf\x11\xf5\x71\x7a\x32\x21\x4f\xd9\x3d\x58\xd9\x7b\x6e\xcf\x2d\xf3\x9e\xe6\xf6\x0c\x51\x3f\xb7\xc7\x93\xe4\x2a\xb7\x6b\xe4\xd2\x88\x8e\xa7\x43\xb7\xe3\x62\x65\x48\x9c\x31\x56\x09\xf5\x37\xf5\xf6\xf6\xd7\xe9\xdd\xb9\xe1\xf6\x3a\x96\xa1\x1d\x6d\xa5\x9d\xc8\x7a\xbc\xfe\xdd\x8a\xb8\x99\x60\xf7\x89\xe7\xcd\xc5\xe3\xe0\xdb\x93\xc8\xd0\x28\xc5\xe8\x1a\xa6\xbb\x59\x18\xe4\x6b\xd9\xac\x27\x0e\x1e\xad\x97\xf1\x94\xda\xc3\x36\x56\x5d\x62\x53\x33\x6c\x2f\x0d\xb3\xeb\x6b\x66\x4e\x47\xfb\x25\x3f\x65\x98\x2e\xe9\x12\xdb\xd7\x44\x1f\xe2\xae\x43\x39\x23\x3c\x1c\xbe\xef\xc6\xb5\x46\xc3\xf7\x9d\xfc\x41\x73\xdc\x9f\xfb\x96\xef\x87\xdd\x1f\x37\x04\x55\x87\x3f\xdb\xb1\x57\x89\x4b\x16\xef\x14\x32\x1e\x0f\x8d\x4d\xce\x9e\x72\x8d\x11\x71\x5c\x3c\x1a\x53\xeb\x31\xdb\x5c\x52\x2b\xcb\x24\xef\x19\xbd\x14\x15\xed\xd6\xa3\xdb\x70\xea\x3e\x9e\xf7\xc1\xd7\x05\xaa\x5b\x33\x94\xca\x55\x6f\x45\x07\x37\x5f\xa4\xf2\xd1\xb2\xbc\x59\x7e\x45\x5a\xdb\xaf\xf2\x05\x2a\x97\xca\xd7\xa5\x6c\x4d\xde\x5f\x4b\xcd\xc3\x75\x54\x8a\x26\x65\x0a\xde\x12\x26\xb0\xda\xfd\x84\xde\x99\xe2\x36\xe9\x41\x99\x64\xe1\xce\xf0\xf0\x8f\xb7\x0b\x12\xbf\x7d\xff\x6e\x93\xae\x3a\xc4\x8e\xf3\xa7\x7f\xb8\xbc\xbd\x8a\x33\xb6\xc5\xb3\x7f\x5e\x19\x28\x2f\x36\x7e\x5a\x32\x2f\xa3\xb3\x97\xeb\xfc\xcc\x38\xe4\xea\xce\xb3\x79\xb6\xb9\x6a\x69\xe7\x9a\x43\xfa\x7c\x73\x2f\xfd\x77\xe6\x01\x8e\xbf\x36\xc3\xce\xa7\x17\x5e\x64\xb6\xc7\x34\x3f\xcd\x68\xaf\x09\x42\x15\x1a\x79\x39\x46\x45\x5a\x37\x24\xf2\x32\x74\xd7\x05\xda\xd3\xf2\xdd\xfe\x66\x68\x97\x78\xdb\xe5\x7c\x9e\xb5\xba\x2d\x9d\xad\xd9\xf9\xe6\x4c\xe7\x92\xa7\x7f\x9f\xe2\xba\xd4\xf2\xf7\xcd\xc6\xc7\xef\x17\xac\x79\x63\xc7\xe7\x6f\x69\xc4\xc5\xc6\xd0\xa1\xfa\x8e\x65\x2a\x97\x8d\x6d\x97\x28\x7b\x56\x0f\x5b\x3a\x5b\x3d\xec\xf6\xad\x2f\xf0\x76\xb4\x99\x7c\xd7\x2c\x3c\xb7\x8f\x7d\xfe\xc1\xad\x5a\x8e\x32\xa3\x9b\x81\xd8\xf3\xb1\xf3\x72\xc0\xd7\xc3\x61\x20\xee\x6b\xbf\xdf\x4c\xf6\x01\x93\x35\x75\x0f\xd8\xe4\x7f\xc6\x26\xd8\xbd\xf9\x90\xd7\x76\x3a\xd6\xee\x6e\xbb\x37\x9d\xed\xa5\x6f\x9f\xfd\x9d\x2c\xf0\xdd\x7a\xc0\xc5\xc3\x8e\x6a\x19\xa6\x73\xde\x06\x75\x42\x3a\x63\xcb\x1a\x9e\xbf\xbb\xd9\xf9\xd4\xc9\xa5\xb1\xde\xdc\xb6\x89\x43\xec\xd9\xa5\x26\xeb\x75\xa8\xbb\xe8\x6c\x96\x49\xc6\xea\x52\xab\xb1\x6d\xb9\x96\x6a\x0d\x2f\xca\xe5\x1f\xa3\x9d\xb1\x10\xac\x11\x7b\xb3\xbc\xf0\xbe\x77\xa6\xaa\x4a\x1c\x47\x9f\x0e\x3b\x17\x0d\x65\x2b\x38\x36\x86\x44\xbb\xdc\xea\xf2\xb4\xba\x90\xbb\x7e\x76\x96\x5d\xd8\x0f\xb9\x81\x79\xf7\x7b\x9b\xdb\xfe\xeb\x51\x91\x5f\x0b\x63\x57\xfb\xf8\x2c\x58\x7b\x48\xd0\x27\x61\xee\x6a\x5f\xef\x61\xef\x7c\xf3\x2b\x30\x78\xb4\xb3\xf3\x32\xdb\xbc\x15\xe6\x9c\x56\x55\x5d\x08\x85\xd6\x2b\x7f\xd5\x13\x65\x83\x80\x4f\x02\xe0\x76\xe6\x5b\x53\x5b\xdd\x97\x69\x5c\x80\x9e\x9d\x3b\x79\x7b\xfb\xfe\xfd\x72\x28\x76\x79\x1e\x6c\x37\xd6\x9e\x55\xe7\xb6\x16\xf0\x8f\x97\xae\x17\xb6\x2e\x31\x08\x7a\x6d\x6a\x61\x2e\x76\xeb\xab\x44\xbc\xd6\x68\x5b\x1c\x79\xad\x89\x17\x07\x9f\x6d\xf0\xbe\xa6\xf3\x46\xbb\xab\xdd\xed\x5b\x5d\xe9\x71\xc3\x92\xe1\x74\x1c\x32\x1c\x12\x9b\x52\x2c\x6b\x48\xb0\xb9\xc3\x24\x43\x25\x1d\xf3\x04\x7f\xbd\xef\x4e\x31\xf9\x50\x4d\xd4\xf1\xa1\xf5\x49\x3d\x93\xff\xe6\xd1\x36\xfd\xd9\xca\xcf\x0d\xd7\x9d\x4d\x6d\x30\x15\x4d\xca\xd1\x0c\xf5\xc7\x1f\xc7\x1a\xfc\x1f\x0a\xfc\xf9\xe7\x2d\x52\xe7\x1e\xdf\x29\xed\xff\xbd\xd3\xe3\x1d\xf4\x4e\x74\xea\x23\xef\x53\xf8\x86\xc1\xab\x53\xe9\xfc\x0e\xf7\x0b\x26\xd7\xf9\x9a\x85\x3b\x91\xf4\x1e\x17\xf6\x0c\x96\xde\xaa\x0f\x78\x0d\x9a\xde\xe8\xe5\xb3\xf0\xf4\x41\x61\x9f\x44\xd4\x1b\xbd\xbd\xc7\xd4\x4b\x0f\x5c\x41\xd5\x93\x9a\x90\x17\xda\xea\xce\x3e\x8f\x59\xba\x3b\x88\xda\xfa\xfe\x1b\xa1\xd9\xbd\xc0\x7b\x1d\x43\xcf\xb6\x3d\x74\x7d\x76\xbe\xac\xa3\x80\xcb\x61\xc4\xa5\x00\xed\x87\x84\x58\xee\xa2\x43\xcc\x19\x19\x5a\x63\x72\x2e\x6d\xe9\x2e\xd6\x01\xcf\x74\xe8\x5e\xb8\x39\x22\x2e\xbe\x70\x6b\x1d\x6a\x5d\xba\xed\x18\x5d\x13\xbb\x53\x9b\x9c\xcb\xb0\x89\xfc\x9f\xff\xfe\xe7\xb0\x76\xf9\xcf\xff\x9e\x5b\xbd\xfc\xfb\x1f\xbf\xce\xc9\xc8\xba\x90\x0c\x3b\xd0\x32\x2d\x93\x5c\x5d\x0b\x1d\x68\xbd\x27\xb3\x95\xcc\x18\x91\x8e\x62\x4d\x4d\x6d\x93\xb1\x16\x6c\x6c\x76\x89\x3f\x1a\x3b\x85\xd6\xb5\x26\xd6\xd4\xba\x44\xbb\x1d\x6e\x6d\x73\x7f\x86\xb6\x9b\x6d\xbb\x12\xae\x7b\x5c\x84\x37\xdd\x36\xf5\x72\x37\xaa\xc3\x2a\x72\xf5\x4a\x9e\xf4\x38\x23\x75\x9c\x25\x7d\x2c\x8e\x78\x9d\x10\x77\x16\xcf\x5d\x15\xea\x6a\xfc\x71\x8f\x90\x17\x91\xf6\x65\x62\xde\x5d\x7f\x78\x55\xd0\x1b\xb0\x70\x5e\xd4\x18\x76\x31\xa5\x5b\xf6\x8d\xdd\x22\x2a\x26\x55\xa5\x1b\xe2\xa5\xf2\x15\xb9\x5c\xa5\x52\xf9\x6a\xe1\x64\xc7\x68\x83\xa2\x15\xea\x0f\xf8\x85\x7a\x83\x00\x88\x02\xe2\x01\x00\x6f\x5f\x28\xb8\xf9\xef\xad\xe7\xba\x63\xe7\x7b\x38\xec\x12\xc7\xfd\xa6\x5a\xa3\xf0\xb7\x39\x19\x0e\xbf\x0e\x4c\x6b\x6e\x86\x1d\x97\x0c\x87\xd8\xfe\xe6\x5a\xa3\xe1\xdb\x9f\x7f\xdd\xec\x84\xfe\x42\xbd\x31\x00\x00\x00\x01\xcb\xb0\x5e\x3f\xf4\x07\xf4\xc3\x78\xc2\xec\x3e\x9e\x34\xeb\xce\xdf\x2e\x2b\xf7\xda\xfe\xd3\xa3\x0a\xf6\xef\x41\xed\xf8\x7a\x83\x1d\xc3\x34\x5c\x03\x0f\x3b\x5e\x3d\xd0\x37\x67\x32\x7c\xfb\x42\xbd\xd1\x00\x8a\x5f\x01\xff\x15\x30\x14\x14\xbe\xd3\xc2\x77\x16\x7d\x03\x0c\xcd\x8a\x7c\x08\xd0\x7e\x89\x2f\x52\xa7\x3b\xde\x39\x8f\x13\xfb\x52\x96\x1d\xd7\x32\xb4\xeb\x3d\x89\x3c\x87\x1e\xe9\x89\xe9\x4c\x1d\xb2\xc7\xdb\x8e\x61\xbe\x3b\x5b\x72\xb5\x3f\x96\x05\xac\xf0\x48\x7f\x6c\x07\x6b\x5a\xc7\x9f\xa1\xbb\xda\x07\xc7\x72\x0c\xfd\x48\x1f\x5c\xc7\x43\xf7\x5d\x9c\xb1\xd9\xd9\xbd\xda\x05\xcf\x00\xfa\x21\x31\xf8\x5d\x17\x5b\x5f\x7e\x47\x17\x02\x0b\xb9\x47\xba\x40\x9d\x91\xa5\x19\xfa\xf2\x7e\x29\x04\xc8\xd3\x0f\x75\x21\x9c\x48\xb1\x2d\xe8\xbe\xa3\x1f\xc4\xf2\xcc\x63\xfd\xac\x07\x1d\x77\xbb\x36\xe9\x62\xd7\xb2\xaf\xdb\x94\x08\x20\x10\x1f\x21\x2f\x6e\xc8\x7b\xd9\xdb\xce\x42\xb3\xaf\x53\xa7\x11\x7c\x68\xa8\x21\xd8\x90\xdf\x8e\xc2\x26\x66\xbf\xde\x01\x27\xa2\x87\xb4\x03\xe1\x71\x07\xfb\x20\x70\xed\x00\xae\x77\x24\xf2\xe2\x63\x92\xd0\x27\x03\xbd\x0d\xbb\xbd\x23\xc4\xd7\x7a\x82\x00\x71\xec\x43\x23\x02\x19\x4f\x9c\x7d\xb2\xe2\xea\x88\x43\x48\x23\xfe\x31\x49\xd8\x8e\x6e\x2c\x76\xc7\x29\xac\xd1\xb0\xa3\x1b\x64\x78\xd5\x35\x42\xc8\x41\xf8\x90\x13\x86\xdc\x6e\x17\x69\x97\xdd\x5f\xdc\x10\x83\x47\x8f\xb9\x79\xc8\x77\x0c\xb3\x4b\x1c\xb7\xf3\x7e\xff\xe0\x46\x57\x48\x14\x1e\x1b\x11\x74\xb2\x70\xd9\x6c\xd4\xe0\xeb\x60\x02\x69\x00\x18\x76\xdb\xc9\x05\xac\xbd\x5a\x75\xf0\x28\xd8\xbe\xab\x3c\x38\x5e\xd2\x24\xa2\xcd\x4c\x82\x2f\xe7\xd9\x42\x3e\x25\x17\xa3\xb9\x7c\x3c\x82\x18\x5a\x62\x19\xbe\xcd\x15\xf3\xb1\x4a\x39\x9b\x68\x64\x50\x22\x92\x8d\xe6\x4a\xd9\x54\xbc\xc0\x56\x90\xdc\x6a\xd4\x6b\x7e\x0d\x5d\xec\x64\xbd\xaa\x48\x48\x5c\x23\x52\x6c\x49\x5c\x8b\x6d\x48\x72\xb2\xd9\x28\xd3\xb5\x4c\x81\xae\x15\xd8\x48\x2d\x91\xac\x95\x10\x2b\xd7\x8a\x99\x42\x9e\x2e\x25\xeb\x6c\xa3\x9c\x2c\xa4\xca\xf9\x4c\x26\xf9\x6e\x18\x2e\x76\xc2\x6c\x24\xa1\x99\x52\x9c\x4e\xd6\x64\x8e\x96\x72\xcd\x5a\xbc\x96\x64\xa4\x56\x5a\x6a\x36\x13\xcd\x66\x9d\xae\x27\x9b\xad\x56\x99\x97\x5b\x4d\xb9\x5a\xcc\xc4\x9a\xed\x8a\xd4\xe0\x51\xb3\xc0\xde\xdd\x09\xbb\xee\x24\x52\x2e\xb6\x92\xa9\x2c\x1d\x4d\x31\xf1\x7c\x89\x8d\x34\xb3\xf1\x5c\x3e\x96\x8d\xa7\x6b\xf9\x62\x8d\x4e\xb6\x98\x76\x2e\x5e\x49\x16\xf2\xb5\xa8\x5c\x90\x2a\x0d\x54\x8a\xa2\x42\x93\x4e\xbe\x05\xad\x92\x59\x2f\x9a\x6f\x8c\xf5\xb6\xb2\xf0\x50\x14\xfc\xcd\x21\xd7\x2b\x48\xbe\x50\xec\x17\xca\xb5\xa7\xe4\x0e\x0b\x7c\x5f\x1b\x12\xd8\xfe\xbc\x98\xee\xd8\xfa\x54\x9b\x68\x86\xdb\xc1\xc3\x71\x0f\x9b\xd3\x11\xbb\x9e\x33\x91\x6a\xf4\xed\x23\x86\xf3\xb4\x77\xfa\x42\xef\xb5\x4a\xec\x13\x7a\x67\x2e\xf4\x5e\x89\x16\xaa\x6f\x4f\xce\xca\x20\xa5\x20\x2f\x31\xb2\x93\xf0\xfb\x0b\xc5\xdc\x6b\x62\xe7\x2a\x41\x82\xda\xd8\xae\x1a\xe4\xc8\xc8\x18\x96\xe1\x44\xc4\xd2\x2c\xcf\xed\xc3\x9c\xff\xfc\xee\xa1\xf4\xef\xdf\xa9\xdf\xe1\x37\x86\xa7\xd7\x41\xd7\xef\x5f\xa8\xdf\x0f\xe5\x46\xeb\x5b\xb5\x4a\xec\xf0\xa5\xbb\x1c\x6f\xbe\xf4\x8f\xdb\xa1\x85\x57\x76\xb4\x6e\x13\xd4\x7c\x7e\xff\xdf\x4b\x06\xe4\x17\x8c\xf6\x09\x46\x6f\x14\xfe\x5f\x25\x18\xcf\xf1\xf0\xec\x88\x31\xf0\x1b\xcf\x21\xc4\x0b\xbf\x96\x64\xd0\x27\xd9\x99\x21\xfb\x45\x25\x5b\x4b\x02\x00\xcf\x22\xc4\x41\x5e\xf4\xc6\xcc\x1b\xb4\xa1\x31\x32\x3c\x63\xdc\x27\x1b\xbe\x6d\xff\xff\x8b\x4a\x28\xd2\x17\xfc\x88\x20\x7e\x13\xb9\x5f\x4f\x32\xe8\x93\xec\x8c\x55\xfe\xa2\x92\xd1\x5f\x28\x9a\x43\xbc\x28\x00\x24\x20\xe6\xbc\xef\xbf\x6a\x8f\x6b\x34\x0e\x2a\x5c\x30\xfc\x7e\x68\xd8\x4e\x84\x3b\xe7\xff\x7f\x7d\xe1\x20\x14\x77\xfe\xe4\x8c\x70\x00\x6c\x82\xa9\x5f\xcd\x2e\x19\x9f\x74\x17\x86\xee\x17\x95\xee\x30\x76\x3c\xb8\xb4\xe2\x02\xe0\xdb\x36\x8f\xfe\x5e\xb4\x48\x35\xfa\xb3\x8a\xc6\xf8\x44\x3b\x3f\x70\xbf\xa4\x68\x7b\x5f\x49\x03\x78\x61\xd5\xe5\xd9\xe2\x37\x91\xde\x6c\x4c\xfc\x52\x36\xc9\xf8\xa4\x3b\x33\x70\xbf\xb8\x74\x90\x45\xac\xc0\x02\x0e\x21\x6f\xec\x3c\xf1\x5c\x7b\xba\x7e\xe4\x29\xa7\xfd\x6b\x6a\x82\xe5\x77\x88\xff\x81\x9a\xf8\x89\xe7\xf3\x41\x13\x02\xda\x21\xe8\x7b\x4d\x04\x4c\xcb\xfd\x52\x36\x41\x7f\xa1\x20\x82\x48\x14\x10\x43\x0b\x5b\xcf\x06\x4e\x63\x13\x91\xa6\x19\x06\xd1\x80\xe1\x05\xee\xdb\x3a\x88\x11\x00\xfa\xa5\x64\x84\x7b\x19\x11\xb3\xf3\x00\xff\xb5\x32\x0a\x90\xdd\xcd\xed\x40\x32\xfe\xc4\xb3\x76\x6f\xab\x02\xc7\xed\x66\x6d\x20\x19\x7f\xf2\xc5\x3d\xa4\x05\x81\x15\x01\x27\x0a\xde\x02\x91\xf7\xc0\x78\xea\xf6\x3a\x36\x99\x59\x2a\x56\x86\xa4\xa3\x0f\x71\xf7\xf7\xef\x9b\x1c\xe2\x23\x6e\xcf\xa3\x0d\x01\xd8\x4e\x76\x6e\x43\xbb\x67\x8d\x48\x47\xb3\x46\xd8\x30\xd7\x52\xee\x8a\x28\x1e\x72\xa8\x5b\xca\x2c\xdc\x4e\xb1\x13\xae\x27\x53\xc3\x26\x5a\x50\xa6\x05\x4e\x10\x45\x46\xe0\x05\x6f\xd0\xbd\x31\x77\x5c\x6c\xbb\x86\xd9\xed\x28\x78\x88\x4d\x95\xbc\x8b\xe9\xee\xee\x81\x3d\xed\xe1\xee\x58\x71\x67\x38\x26\x76\x8d\x19\x79\x48\x59\xc7\xfd\xad\x75\xe7\x89\x34\x27\x46\xb7\xb7\xee\x10\x7e\xa1\x7e\xf7\x92\xba\x9d\x01\x59\x7e\x9a\x07\xd9\x70\xc5\xd2\x68\x6b\x77\x1f\xa5\xe7\x6d\x0f\x1f\xae\x67\x9f\x44\x77\xea\xf9\xa3\x27\x38\xbd\xe3\x8a\x17\xb6\x73\xf0\xc3\xf4\xec\xf5\xf0\xe1\x7a\xf6\x49\x74\x9f\x9e\x83\x2e\xae\xfe\xf7\x8e\x7d\xe1\x73\x87\x46\x83\x6e\x99\xec\x0e\x8e\xee\x84\x15\xbe\x50\x6f\x0a\xa4\x79\x0e\x41\xac\xab\x58\x87\x22\xe4\x90\xc2\x33\xa2\xce\x11\x05\x0b\x02\x24\x34\x27\x32\x40\x13\x59\xcc\x20\xcc\xd1\x02\xe4\x34\x05\x42\x01\x8a\xaa\xaa\x71\x0c\x0b\x14\x71\xb3\x97\x05\x59\x11\xea\x3a\xaf\x2a\xba\x82\x00\xad\x30\x02\x80\x40\x21\xba\xc2\xeb\x00\xf3\xac\xc6\xd3\xb4\xc8\x28\x2a\x00\x1c\x23\xaa\x34\x8b\x59\x55\x27\x0c\x60\x04\x45\x47\x10\xb1\x0c\x23\x7a\xa5\x6f\xb4\x6f\x17\x9d\xff\xce\xa0\xef\x2c\xf2\x6f\xae\x7b\x5f\x73\xdf\x00\x40\x3c\x73\xfb\xae\x97\x87\x67\x04\x86\x17\xbe\x50\x90\x5f\x8f\xe7\xbb\xcf\x7a\x05\xbd\xfe\x77\xf7\xcf\xee\x5b\xb8\xff\x03\x7e\xa1\xde\x24\x49\x92\xa2\x8b\x4a\x39\x4c\x2f\xc3\xee\x3c\xb3\xea\x46\xa2\xa1\x10\x2d\x64\xda\xf9\x96\x31\x40\x73\xa9\xc6\xcd\x07\xe3\x5c\xb8\x2f\xb1\x33\x26\x0e\x72\x5c\xc5\x36\x9b\x59\xba\x28\x0c\x2b\x2b\x8b\x4f\xd5\x63\x23\xb7\x05\xdb\xb2\x92\x8e\x54\xc3\xb1\x41\xa6\x5e\x8f\xf1\x69\x49\x6b\x83\x9e\x3a\x72\xd6\xa4\xa5\x66\xb1\x9e\xa3\xe7\xd2\xfe\xd3\x5e\x68\xa5\x86\x1e\xcb\xc1\xdc\xa2\x16\x9d\x12\x5d\x1a\x91\xb0\xd2\x9f\x14\xd4\xde\x6a\x90\x94\x05\xb1\xe0\x54\x07\x16\xc7\xab\x6c\x8a\x35\x24\x55\x6d\x46\x2a\xd5\x90\x0e\xa2\x26\x58\xc4\x7b\x85\xc5\x58\x2e\x43\x73\xd8\x9b\xf4\x8d\x54\xca\xc9\x35\x4d\x4c\xd7\x13\xc3\x79\x76\x43\xb9\x9b\x67\xb3\x78\x35\xa6\x4b\x87\xce\xa4\x2e\x90\xce\x7c\xda\x52\x13\xb2\x25\x49\x8a\x81\xf4\xb9\xdb\x3f\xf5\xc7\xb3\x2a\x70\x61\xe2\xfb\xe7\x02\x7a\x8d\x1d\xbf\x11\x56\x50\x75\x5a\x27\x1a\x47\x18\x45\xd0\xd7\xf7\x38\xcc\x42\x41\x17\x69\x5d\x80\x8a\xc0\xf0\xb4\x2a\x08\x80\xd5\x55\x86\x61\x09\x62\x34\x4e\x54\x44\xa2\x8a\x22\x21\x2c\x4d\xb3\xca\xb5\xb9\xc0\x5f\xb4\x76\x06\x20\x00\xaf\xde\xa5\xdf\xf6\xd9\x7f\x08\x10\x7d\x65\x2e\x40\xf1\xce\xb9\x10\xaa\xe4\x84\x19\xaa\xb3\xa8\x8f\xc4\x45\x63\x90\x28\x0e\x66\x5d\xa5\x4b\x67\x53\x5d\x39\x1c\xcb\x17\x54\xb1\xa1\x98\xce\x88\x70\xd1\x74\xb6\x0a\x98\x42\xd7\x88\x84\x43\xb9\x42\xac\x5c\x2c\xe9\x58\xcf\x89\xf3\x05\x3d\x66\x81\x38\x2e\xcd\x70\x5b\xeb\xc3\x02\x8f\x2a\xad\x4c\xda\x33\xc3\xcd\x5c\xe8\x1e\xc6\x72\xcc\x37\x59\x0c\xf5\x52\xa3\x9c\x87\x7a\x95\x2d\xf4\x30\xac\x4b\x0a\xcd\xd8\x8b\xbe\x8d\xf5\x5e\x37\x2c\xcc\xfb\xcb\x42\xb1\xc4\x92\xa1\xd2\xc8\x65\x84\x8c\xce\xd9\x76\x04\xe4\xea\x2e\x6f\xc5\xe8\xea\x90\x9f\x0b\xb3\xa8\x11\xd2\xd9\x2e\x28\x74\xf9\x4a\x2a\x5f\x6d\xc9\xb1\x8d\xf9\xab\x67\xe6\x82\xe6\x9c\xb3\xa7\xff\x3b\x73\x81\x7f\x8d\x1d\xbf\x31\x0c\x43\x54\x8e\x88\x44\x13\x81\xa2\x10\x45\xe1\x44\x88\x68\x91\x16\x81\x42\xb3\x50\xc1\x3c\xc7\x00\x4d\xd3\x45\x9d\x87\x80\x26\x80\x66\x59\x9d\x26\x9c\x82\x44\x44\x43\x15\x12\xd6\xab\x5e\x61\xcf\x9b\xf5\x45\x6b\x67\x19\x46\xb8\x3c\x17\x76\x77\xb7\x5b\x2a\x0c\x42\xfc\xb5\xb9\x80\xee\x9c\x0b\xab\x5c\x68\x41\x4c\x77\x90\x9d\xd9\x6e\x3b\xbe\x1c\x8c\xa3\xcb\x92\x32\x1e\xe6\xf2\x0c\x27\xf6\xa4\xa9\x34\xa8\x2e\x39\x85\x34\x53\x6a\x72\x30\x15\xda\x7a\xd8\x88\x39\xa3\x71\xcd\x5d\xf5\x6a\xbd\x59\x74\xd0\x5e\x24\x66\xc3\x5e\x84\x13\x1a\xe9\x42\xc5\x0d\x35\xda\x19\x93\x27\x8b\x68\x6d\x33\x76\x9b\xb9\x70\x64\x9e\x4d\x25\x6b\xaf\xfa\xcb\x9e\x0a\xc4\x42\xae\x0c\xec\x44\x4a\xcd\x0f\xd3\xcb\x59\x0d\x3b\x1c\x22\xec\x8a\xae\x8e\x6b\xbd\x61\x76\x9e\x88\xd8\x59\xbd\x37\xcd\x30\xa9\x4a\xce\x89\x4e\xeb\xe9\x61\xcc\xc4\xd2\x08\x2d\x7b\x65\xd0\x2d\x2b\xd8\xc0\xe1\x06\x26\x59\xb9\x24\x0e\xea\xc2\x66\xae\xb5\xce\xcc\x05\x3c\x38\x67\x4f\xbf\xf8\x5c\x60\xef\x9f\x0b\xdc\x6b\xec\xf8\x4d\x05\xbc\x86\x01\xd2\x44\x4d\x67\x20\x50\x54\x5d\x67\x81\xc2\xe9\x1a\xd4\x79\x86\x63\x20\xcf\xd1\x3c\x66\x38\x8c\x69\x88\x00\xa2\x81\x86\x08\xc2\x3a\x8f\x21\xcd\x40\x45\x15\x04\xd1\x2b\xb3\x61\xce\x9a\x35\x7b\xd1\xda\x79\xc0\x5e\x9e\x0a\xdb\x9b\x5e\xf6\x90\xe1\x59\x01\x5c\x9b\x09\xcc\x9d\x33\x41\x88\xb8\x56\x92\xd6\x84\x45\x54\x08\x83\x28\x0c\x97\xc5\x7e\x6d\xd1\xae\xe0\x7e\x63\x92\x6a\x46\x96\xb1\x26\xc7\x8b\x13\xb9\x9f\x10\x8c\xb4\x92\xc9\x65\x14\x41\x50\x32\xf3\x7e\x3c\xec\xea\x5c\x84\x76\xec\xc9\x30\x02\x1b\x2e\x9c\x8f\x27\xb9\x76\x6c\x11\xe3\xcc\x28\x58\xb9\x74\x6b\x33\x72\x9b\x99\x70\x3c\x94\xb3\x6a\xc3\x92\xda\x82\xa3\x30\xa6\xb8\x4a\xe5\x63\x12\xec\x4d\x2c\x6c\x36\x8d\x7c\x78\xac\xf6\x75\xd9\xcd\x64\x63\xf6\xaa\x3e\x8b\xe5\xa7\x5a\xc4\x62\x7b\x22\x4e\x6a\xae\xde\x68\x56\x9d\x6e\xba\x96\x60\x85\x4c\xbc\xea\xb0\xc5\x6a\xa3\x6a\x95\x9b\xd3\x70\x3d\x5b\xd7\x4a\xdc\x66\x05\x56\x3b\x33\x13\x6a\xb5\x73\xd6\xf4\x8b\xcf\x04\xe6\xfe\x99\xc0\xbe\xc6\x8a\xdf\x10\xa3\xb1\x9c\x06\x10\x51\x74\x16\x31\xaa\xc6\x60\x1e\x13\x5a\x63\x31\xe6\x75\x8e\xd7\x00\xd4\x79\x5d\xd7\x10\xd4\x90\xc2\x70\x3c\xc2\xba\xc6\x01\x1e\x28\xbc\xae\x8a\x34\x54\x38\xfe\x1a\x2a\x30\x17\x8d\x1d\xd1\x02\xba\x1c\x2d\xec\xee\x7a\x39\x39\x5e\x84\x02\x7b\x6d\x2e\x80\x3b\xe7\x02\x53\xac\x35\x4b\xba\x2d\x26\x8b\xf9\xc2\xc8\x36\x60\x66\x3c\xab\xbb\x92\xae\x84\xe1\x02\xa2\x7c\x83\xcc\x60\xa9\x95\x55\x84\x41\xcf\x6d\xd8\x21\xb5\x07\x14\x73\x9c\xcf\x26\x4b\xa2\x95\xe9\x75\x85\x42\x4c\x6a\xa5\x20\x97\x33\xa7\x6a\x39\x22\x4e\x9a\xb9\x85\x9b\x9c\x85\x43\x40\xdd\x8c\xdd\x7a\x2e\xc0\xa3\x68\xa1\xcb\x2a\xb1\xec\xa8\x18\x41\xf9\x69\xb1\xa7\xc6\xf3\x53\x86\x29\xf6\x2b\x56\x5f\x2d\x5a\x74\x35\x09\x16\xdc\x32\x9b\x9d\x2a\xbd\x49\xae\xbf\x4a\xcf\x22\x2d\xae\x28\xa5\x66\xc9\x4a\x66\x39\x85\x16\x61\x42\x42\x7a\x9a\x09\x67\xcd\xe4\xb2\x5c\xc7\x7a\x9d\xa1\x49\xa1\x26\x40\x7d\xb8\xa1\x5c\x3a\x33\x17\x8a\xd6\x39\x7b\xfa\xc5\xe7\xc2\x03\xa8\xc0\xbc\xc6\x8e\xdf\x58\xcc\x2a\x50\xd7\x69\xc8\xaa\xac\x00\x91\xae\xd1\x80\x11\x04\x4e\xd7\x74\x8e\x63\x55\x41\x13\x78\x1a\x41\x8d\x47\x82\xc0\x43\x9e\xe3\x39\xc8\x01\x0d\x69\x04\x71\x3c\xaf\x6b\xba\x28\x5c\x43\x05\xfa\xa2\xb5\x0b\x1c\x12\x2f\xc3\xc2\xfa\xee\xfa\xd9\x6d\xea\x16\x0a\xc2\xb5\xc8\x99\xbf\x73\x2a\x80\x71\x36\x19\x4e\xd5\x16\xa9\x78\x18\x74\x0b\xa9\xa6\x28\xd6\xab\x29\x3a\x91\x5e\x40\xd3\x48\xc4\x87\xed\x72\x2d\xaf\x13\xad\xa1\x88\xa1\x96\xe1\x8c\x23\xe5\x56\x3f\x3d\xe8\x8f\xed\xa1\x29\x37\x4b\xa4\xd4\xa3\x17\xed\xc5\x98\xed\xcf\x1a\xc3\x50\x7f\xd2\x5c\x2d\x66\xb3\xbc\x59\x6f\x7b\x6b\x93\xcd\x54\x38\x0a\x16\x56\x2c\x01\x6a\xde\xaa\xd8\x95\x65\x5a\x31\x6a\xe2\x7c\xce\x36\x57\xe1\x28\x8b\xcc\x78\x3d\x15\x91\x93\x83\x32\x84\x83\xee\x88\x84\x62\xab\x38\xac\x35\xc3\x6e\xa6\x11\xce\x1b\x8b\x74\x77\xb1\xcc\x5a\xbc\x90\x75\xe6\xcb\x51\x81\x1f\x5a\x03\xae\x98\x49\xb8\x09\x71\x36\xc2\x6d\x7b\x63\xfd\xb9\x33\x53\x21\xdd\x3a\x67\x4e\xbf\xf8\x54\x78\x00\x16\xe8\xd7\x98\xf1\x1b\xcf\x68\xa2\xa0\x73\x0c\x4f\x08\x2f\x68\x50\xa1\x91\xc2\x29\x82\xa8\xd3\x0c\xd6\x39\x06\x42\x05\x71\xbc\x88\x69\x56\xc7\x3a\x64\x01\x83\x35\xa0\x70\xb4\xc2\x33\x8c\x02\x90\x42\xc4\xab\x0b\xa4\x8b\xc6\x0e\x01\xa0\x2f\x62\xc6\xee\xa6\x97\xe0\x66\x39\xf1\x5a\xd4\x7c\xef\xf2\x88\x2e\xb6\xfb\x30\x3f\xe5\x2c\xa0\xa4\x51\x83\x35\x97\x85\x59\x6d\x91\x60\xea\x63\x6b\x10\x9a\xc5\xa5\x82\x1b\x85\x19\x3a\x87\x22\x88\x6f\x8b\x89\x79\x93\x9f\x08\x99\xd5\xa2\x59\x8b\x0d\x98\x90\xae\xb5\x9d\x9e\xb3\x8a\x36\x9a\x74\x45\x99\x4a\x66\x3a\x9f\x33\x2a\xd6\x74\x18\x5d\x1e\xcd\x83\x8d\x69\xa6\xf6\xff\x48\x9b\x6b\xe7\x70\x3d\x97\x8a\xa5\xed\x92\x3e\xba\x1c\x84\x49\x23\x21\xb5\xe4\x42\x6f\x88\xc5\xca\xa2\x9d\xec\x0f\x16\x89\x54\x89\x87\xe3\x94\xd1\x1f\x15\xe3\x62\xaf\x3e\x86\xa5\x19\x67\xbb\x85\xa6\x33\x1a\x94\xd3\xea\xb2\x39\x1a\x12\xba\x9d\x89\xf7\x57\xbd\xea\x00\x4c\x19\xa8\xb5\xad\xd4\xa0\xc9\xc1\xa9\x65\xaa\x7a\x91\xdd\x40\x52\xea\xcc\x3c\x91\xff\x1b\x83\xea\x07\xe6\x09\x7c\x8d\x8d\x6f\x5e\x02\x40\x01\x2f\xd3\x0e\x45\x04\xbe\x02\xf8\x15\x40\x0a\x80\xef\x9b\xff\x2e\x1a\x33\x84\x80\x16\xae\xde\x5d\x93\x67\x69\x91\x15\x79\x44\x8b\xd7\xa2\xe2\xf3\x96\xee\xb1\xf4\xa3\x07\xe5\xf2\x27\xd2\xcc\x18\xec\x32\xbc\xac\x64\x22\x28\x66\xc6\xc4\x24\x0d\x16\xfd\x48\xc8\x01\x5d\xd7\x99\xa7\xe6\x2b\xd8\xd4\x2a\x8d\x16\x8e\xa4\x71\x7c\x03\x26\xf2\x19\x23\x3e\xff\xd9\x19\xb1\x24\x45\xce\x46\xcb\x3f\xf3\xe7\xcd\x33\xa6\xdb\x1b\x11\x77\xbc\x66\x2f\xe8\xbe\xc4\x85\xb7\x27\x5c\x3a\xd9\x41\x5f\x98\x71\x37\xc8\xf8\xcf\x51\xc0\x60\x64\x18\xff\xa9\x85\x60\x64\x58\xff\x11\x81\x60\x64\x38\x5f\x3d\x7e\x40\x32\xbc\xbf\x34\x3c\x18\x19\xe4\xaf\x9d\x0f\x46\x46\xf0\x97\x73\x07\x23\x23\xfa\x0b\xa7\x83\x91\x81\xc0\x57\xc5\xcb\x04\xa4\xf3\xae\xd6\x39\x20\x1d\xda\x57\x9c\x1a\x94\x1f\x7f\x91\x6b\x50\x7e\x58\x5f\xcd\x65\x50\x7e\x38\x7f\xed\x66\x40\x3a\xbc\xaf\x4a\x32\x28\x3f\xc8\x5f\x6d\x19\x90\x8e\xe0\xab\x55\x0c\xca\x8f\xe8\xaf\x79\x0c\xe8\x07\x81\xaf\x62\x30\x20\x3f\x34\xf4\xd1\x09\xea\x97\xfd\x75\x7b\x41\xe9\x30\xfe\xda\xb8\x80\x74\x58\x7f\xfd\x59\x40\x3a\x9c\xaf\xc6\x2b\xa8\x5c\xbc\xbf\x8c\x2a\x20\x1d\xe4\x2b\x99\x0a\x3a\xee\x82\xaf\x40\x2a\x28\x1d\xf1\xb4\x76\x88\x0d\x88\xc7\xc0\x57\x82\x14\x90\x8c\xaf\xc2\x26\x28\x37\xb4\xaf\xf4\x28\x20\x19\xe6\xb4\x0e\x25\x28\x37\xfe\xf2\x99\xd7\xbc\x51\xf8\x25\x87\x88\xaf\xbf\xee\x6a\xbd\x3a\xba\xf7\x54\xf1\x85\x17\xeb\x3e\xbd\x1a\x3d\xd6\xe3\xd1\xba\xf1\x70\xc1\xee\x5f\x0d\xf5\x9f\xdf\x5d\xeb\xd9\x8a\x73\xdd\xb6\x46\x4f\xd7\x6a\xff\xac\x47\x82\x3f\xe0\xd5\x10\x97\xc7\xc9\x5b\x51\x1f\x2e\xc0\xfb\x71\x7a\x42\xc7\xfb\x71\x7a\xe6\x74\xc1\x4f\x7b\x0e\xf8\x03\x5e\x14\x72\x6e\xa0\x8e\x63\x96\xc3\x85\x70\x54\x6d\x7b\xe3\x3c\xf1\xe6\x70\x03\x21\x4f\x49\xbb\x23\xf3\xdf\x70\x46\xe2\x93\x26\xd8\x71\x74\x77\xb8\x38\xe3\x08\x7f\x9a\x09\xf6\xb3\x1d\x69\xfe\x9c\x09\x76\x1a\xf8\xee\x2f\xe8\x9f\x78\xa0\x7e\xc2\x43\xcc\x9f\x39\x56\xbb\x23\xc2\xfb\x0b\xe1\x63\x56\x17\x4f\x38\xcb\x9f\xfb\xd4\xf2\x07\xbc\xcb\xe7\xf2\x60\x6d\x8f\x05\xef\x2f\x3e\x68\x29\xf8\xaa\xc1\xfa\xb9\x4e\x2a\x7f\xe6\x48\xed\x72\x49\xfb\x8b\x8f\x5a\x0c\xbe\x62\xa4\x7e\xc6\xa3\xc9\x9f\x34\x58\xc7\x89\xb6\xc3\x05\xbf\x19\x29\x74\x38\xce\xfa\xda\x15\xdf\x33\xb8\x35\x75\x7b\x96\x6d\xac\xc8\xf6\x0c\xd8\xff\xbd\xb1\xf2\x92\x90\x87\x0b\xfa\xd7\x1d\xab\xff\x62\x27\x78\x92\xa0\xdd\x5f\x08\x1f\x3b\x56\xcf\x44\x52\xff\x77\xe7\xd5\xe9\xe1\xf5\xfd\x05\x38\x17\x14\x5f\x3a\x1c\xfc\x93\x0d\xe6\x4f\x31\x72\x1f\x1f\x16\x9f\x6e\x3b\xec\x2f\xf8\x1f\x3c\x72\xbf\xf4\x8b\x40\x3e\x27\xf6\x3a\xdd\xe8\xd9\x5f\xd0\xff\x2d\x23\xf7\xc3\x90\xed\xb3\x46\x6e\xfb\xfa\x84\xfd\xc5\xd9\x14\xe2\x5d\x23\x17\x5c\xe5\x1f\xe2\x2d\x7f\x58\xc2\xe3\x13\xdc\xe5\xc9\x6e\xe6\xfe\x82\x3d\x7a\x8b\xc3\x5a\xa2\xcd\x2f\xab\xff\xfe\x9d\xfa\x37\xfd\xcf\x17\xea\xf0\x4d\x67\xf3\x9d\xef\x1d\x12\xbf\xff\xf3\x69\x76\x77\xf2\xd6\x89\xfd\x05\xb8\xeb\x15\x14\x9f\xb4\x90\x38\x79\x7d\xc5\xfe\x82\xbf\xa4\x5f\x78\x45\xbf\xde\xdb\x2e\xf6\xea\xfd\x70\xde\x8f\xb7\x72\xf7\x7f\x0b\x47\xef\x16\xd0\xa7\xa6\xb6\xb5\xf3\x80\x6f\xe8\xde\xcc\x19\xef\x3d\xd9\xcf\xfa\xec\x3b\x5e\x74\xf0\xe4\xab\xc4\x1f\x51\xdb\x76\xcf\x79\xff\x37\xfb\xb1\x6a\x0b\xee\x30\x7f\x32\xb5\x79\x9b\xe3\xfb\xbf\xc1\x87\xaa\xed\x09\x80\xf8\x78\xb5\xdd\xd8\x69\x3f\xf3\x7b\xab\xf7\xec\xb2\xdf\xa6\x7a\xfb\xa7\x27\x83\xee\xe6\x5f\xfc\xc9\xaa\xb3\xd5\xa5\xec\xe5\x2a\x9d\x9b\x84\x68\x7f\x69\x40\x50\x42\x8c\x7f\xef\x3a\x28\x21\xd6\x47\x28\xb0\x68\x9c\x6f\x93\x36\x30\x21\xde\xbf\x6b\x18\x94\x10\xf2\x11\x0a\xcc\x91\xe0\xdf\x1e\x0b\x4a\x48\xf4\x11\x0a\xcc\xd1\x69\xbd\xa9\x70\xb9\xa2\xe9\x36\x25\xe8\xdf\x4f\x0a\x4c\x89\xf6\x6f\x76\x04\xa6\xc4\xf8\x28\x05\x97\x8e\xf5\xa5\xf5\x83\x53\xe2\x7c\x94\x82\x8f\x1d\xef\xcb\x5e\x07\xe7\xe9\x5d\x1e\x3c\x30\x25\xc1\x97\xa5\x0d\xce\x93\xe8\xcf\xf7\x06\xf6\x95\xc0\x97\x8d\x0c\xee\x75\xa1\x8f\x52\x60\xe9\x4e\xeb\x50\x9f\xb1\x82\xd3\x4a\xd4\x67\xc6\xee\xb4\x16\xf5\x29\x8d\x73\xbe\x28\x39\xb8\x74\xbc\x3f\x68\x0b\x4c\x09\xf9\x22\xa8\xe0\x63\x27\xf8\x02\x9d\xe0\x94\x7c\x71\xc7\xa5\xca\xcb\xdb\x28\x0e\x4e\x09\x05\xe6\xe8\xa4\x32\x95\x7d\x86\x23\xda\x17\x1b\x04\x26\xc4\x9c\xae\x96\x03\x9b\xd2\x49\x7d\x2a\xf0\x44\x7b\xcd\x0f\x57\xbf\xa2\x42\xf5\xd6\xef\x94\x3e\x52\xa3\x7a\xf1\x67\xaa\x5f\xb0\xae\x3d\xe8\xf3\x0d\xb2\xb4\xc6\x68\x0a\xe1\x44\x56\x20\x0a\x23\x6a\x0a\xd4\x35\x9e\xd6\x44\x48\xab\x84\x47\x90\x81\x0a\x54\x18\x00\xb0\xa6\x00\xc8\x61\x55\x07\x3a\x44\x9a\x08\x31\xe0\x10\x4b\x8b\xda\xdb\x17\xca\x8b\xb5\x83\xa7\x81\x8e\xce\xcb\xf2\xbb\x73\x82\x97\x5f\xd2\x26\xd2\x97\x5f\x6a\xb5\xbb\x7b\xb2\xaa\xf6\x0e\x18\xc6\xdc\xee\x6c\x1e\x9b\x16\x1a\x52\x49\x44\x65\x58\xae\xba\x35\x6d\x9e\x8f\x25\xc7\xb1\x70\xb4\x46\xc6\x2b\xad\x54\x6c\x0e\x2d\x53\x35\xb2\x75\x69\x73\x40\x4f\xda\x1d\x80\x95\x7d\xe7\xdf\x22\x87\x3f\xbd\x5b\xf6\x24\xcf\x67\x49\x01\x77\xfb\x8b\x1c\xae\x15\x45\x3e\xb2\xd2\x1d\x91\x00\xd5\xb2\xf3\xed\xe6\x2a\xd2\x48\x0f\xe2\x56\x06\x0d\x66\x83\xcd\x51\xd7\x48\xbd\x9e\xdf\xd2\x8c\xc2\x69\x44\x4b\x5a\xd5\x69\x37\x37\x2b\xb9\x31\x14\xe9\xa5\xb2\x4c\x9e\x88\x5a\xbd\xa8\x27\x52\xa1\xb4\xc1\xa5\x67\xb5\x42\xa8\x2d\xb9\x68\xfb\xe2\x86\x95\x88\x8e\x4e\x8f\xcb\x7b\x7e\x4b\x49\x65\x34\x2c\x4e\xea\xea\xc2\x4a\x4d\x56\xe9\x38\x5e\x6a\x5a\x7a\x9e\x28\xe7\x84\xfa\x62\x04\x22\xad\xe1\xd4\x60\xb2\xf5\xa9\xa8\x85\xcd\x08\xdd\x53\xc2\xae\x53\x6b\x74\x6b\xed\x6c\xb4\x66\xe6\x67\xe1\x68\xb1\xef\xe4\xaa\x52\x9e\xce\x8e\xea\x83\x56\x21\xe7\x46\xb5\x96\x1a\xca\x97\x84\xbf\xdf\x8e\x0f\x68\x26\x8e\x0e\x36\x96\xce\xe9\x43\x3a\xb4\x3f\x9c\x2a\x8e\x6d\x24\x3d\x6a\x54\xa0\xa3\x61\xa9\xc0\x72\xad\x48\x8c\x71\x93\xf5\x78\x01\x96\x19\x09\xe4\xc8\xa0\x28\xa4\xcb\xbc\x99\x87\x92\x48\x1a\x86\xb6\x4c\xb9\x9b\xf7\xa7\x44\xeb\xd2\xcc\x50\xa5\x53\x7a\x52\xd7\x7f\x2c\xd1\x3f\x4c\xa7\xfc\xbd\xb0\x7f\x39\x48\xff\x9e\x92\x72\xeb\x7f\x92\x07\xfd\x44\xa7\x16\x63\xb9\x2c\x37\x89\x16\xe5\xc5\xb8\x14\x66\xac\x64\x3e\xb4\x82\xa8\xbc\x34\x1c\x38\xd4\x73\xf1\xd6\xa8\xd4\xe8\xda\xd3\x4a\xa8\xba\x79\x28\x5e\xaf\x81\xad\xe2\xb3\x0d\x36\x0e\x48\xaf\xc0\x4b\x4b\x31\x0a\x8a\x4e\x42\xee\xce\x54\x88\x20\xac\x89\x42\xab\xcf\x8e\xb2\x83\x91\x58\x42\xdc\x20\xca\xcc\xd6\xcd\xf9\xb4\x80\xe3\x7c\x24\xbc\xff\xcc\xcf\xf1\x2c\x4b\xa7\xa7\xc0\x3f\x93\xbf\xf0\x58\x34\x7b\x37\xf8\xf3\xeb\x4f\x42\xad\xac\x20\xa1\xfe\xb0\x2b\x17\x09\xd0\x6a\x35\x54\x4f\xaa\xb1\xd2\x82\x2f\x85\xe7\xc3\xe4\x44\x65\x6a\x31\xc8\xe1\x34\x93\x32\xbc\x53\xee\x81\xf9\x8b\x00\x4d\x04\xd5\xbe\x24\xa5\x58\x65\xa9\x08\xf2\x85\x31\xf7\xeb\xef\x33\xf9\xe3\x07\x0a\xec\x5e\xe1\xef\xef\xc3\xdc\xec\xfa\x74\x29\x05\xe1\x35\x5d\x8b\x86\xb2\x35\x1f\xbd\xe8\x3b\xeb\xbf\x78\x16\xfa\x48\x57\x81\xfb\x4f\xcf\x83\xf5\xef\xe9\xe2\x3f\xda\x14\xd7\x42\xe3\xfa\x2a\xd1\x35\xec\xdc\xa0\x6e\xa7\x21\x30\xa5\xb6\xbc\x5a\x34\x13\x4a\x29\x6e\x34\xf8\xac\xea\x36\x10\x64\x42\x6a\x12\xc7\x67\x21\x7a\x51\xc6\x91\xf2\xc0\x49\x57\x54\x3a\x2c\xa4\x42\x85\xf9\x22\x27\x31\xad\x69\xbb\x51\xee\xb2\x4b\x90\x86\xbd\x15\x0b\x63\xf3\xbf\xff\xde\x24\xd5\x36\x3f\xcc\xbf\x3b\x7d\xef\xfd\xeb\x55\x27\x40\x70\xe9\x05\x00\xe7\x11\x9a\x61\x78\x5a\x15\x15\xc4\x6b\x02\x87\x05\x96\x55\x11\x23\x2a\x0c\x23\x68\x0a\xd1\x59\x1a\x32\x2a\xe1\x59\xa2\x62\xa8\x12\x85\x57\x81\x86\x00\x11\x90\x20\x22\x0e\x2b\x90\x45\x48\x81\x1e\x42\xd3\xcf\xec\x75\x1c\x21\xb4\x70\x13\xa1\x05\x86\xb9\xfc\x52\xa5\xf5\x5d\xf6\xcd\x97\x65\xf2\xac\x32\xc3\xf7\x89\xc1\xf4\x47\x56\x4a\xa8\x26\x86\xb1\x30\xe9\xaa\x0c\x2a\x36\xdd\x64\x26\xb3\x6a\xd4\x85\x79\xdd\x68\x47\x70\x74\xca\x65\xb9\xcd\x50\x1f\x21\x74\xc2\x37\xbc\xef\x10\xfa\x41\x8f\xff\x0c\x42\x57\x98\x31\x39\xb2\xb9\xc4\x9e\xdf\x52\xc6\xec\xe3\x46\x85\x2f\x0f\x78\x88\xcc\xc1\x1c\x86\x33\xd3\xa8\xae\x63\x92\x87\xb9\xe5\x68\xba\x12\x46\x62\xc4\x1e\x81\x72\x1a\xa6\x5a\xf9\x8c\x66\x4e\xb5\x7a\x08\xd1\x49\x27\x9a\x83\x75\x53\x37\x57\xe1\x90\x11\xe1\xe5\x72\x7c\x51\x70\xe4\x31\x1c\x45\x92\xe3\x5a\xae\x34\x1f\x7c\x08\x42\x3f\xb8\xa2\x59\x23\x64\x6f\x72\x18\x8f\xf8\x61\x56\x4a\x47\xa4\xef\x47\xe8\x27\xfb\x4f\x04\xe9\xff\x08\xa1\x7f\x52\x0f\x2e\xfd\x58\x84\xd1\x13\xb1\xc8\xf4\x16\x7f\x7e\xfd\xfd\x6c\x2b\x88\x1f\xb8\xc2\x49\x81\x6c\x73\x7a\x8d\xbf\x5b\x08\xfd\x28\xaf\x6b\x84\x4c\xe5\x0e\xf4\xbc\xd5\xec\x81\xde\x16\x7c\xef\x46\xe8\x20\xfd\x27\xac\x60\xfd\x6f\x11\x7a\x42\xf2\xe3\x76\x76\x9c\xa8\xda\xcd\x29\xa9\xc6\x9a\xc2\x84\x4d\x8b\xe2\x98\x65\x6a\xab\x0c\x46\xc5\xa5\x02\x12\x53\xa5\x2c\x9b\xb5\x5e\x17\x8c\x69\xc2\xc1\x26\x9a\xb5\xc8\x3c\xb5\xaa\x37\xb4\x90\x5e\x0c\xf3\xa9\xa4\x55\x96\x9b\x32\x3f\xaf\x98\x8d\x96\x4c\x86\xa5\x45\x2c\x5a\x7a\x35\x42\xeb\x18\x22\x1d\xa9\x40\x60\x74\x4d\x53\x39\x86\x08\x84\x16\x04\x81\x53\x88\xc8\xb0\x04\x8a\x8a\xce\x33\x88\xd0\x02\x52\xa1\xc8\x41\x85\x70\x02\xd4\x34\xc0\xa9\x60\xdd\xc0\x7b\xaf\x1b\x7a\x5d\x0c\xcd\xdd\x42\x68\x06\x08\xd7\xde\x0b\xbd\xb9\x79\xb2\x79\xf3\x6c\x04\x1d\xf3\x0d\xee\x11\x1e\x6d\x82\xd9\xa0\x78\xdb\xef\xcd\xd2\xb3\xf9\x89\xfd\x1c\x45\xc4\xf9\x94\x50\x69\x2a\xb5\x48\x23\x6d\x84\x17\xba\xa0\x34\xdc\x48\x24\x63\x99\xf3\xb9\xd8\x1a\x2b\x59\x75\x00\xc3\xb4\xc8\x4d\x16\xed\x42\x2b\xcf\xcd\xfa\xf1\x58\xab\x94\xc5\x09\x46\x81\x38\xd1\x98\xac\x1a\xed\x09\x16\xe3\xb9\xe4\x20\x4d\xbb\x92\x1c\x2b\xf6\xe5\x64\x2a\x37\x58\x49\xf3\x7b\xf0\xf6\x78\x3d\x72\x11\x6f\x23\x47\xaf\x62\x0c\x12\x91\xc2\x83\x7e\xa3\x87\x39\x76\xa2\x8b\x73\x13\x6b\xaf\xff\x17\xf6\x1f\x28\x22\x3f\xf2\x71\x89\x23\xfd\x7d\x1e\x9e\x61\xb3\x19\x19\xde\x15\x11\xff\x98\x88\x73\xc3\xdf\xb5\x88\xf3\x2c\x5e\x1c\x37\x08\x10\x51\xe5\xd8\x03\xbd\x23\xdb\x3a\xfe\x5c\xc7\x8b\x17\x46\x94\x8f\xf4\xbf\xc5\x0b\xd0\x5f\xa4\x35\xb7\x14\x6f\x8d\xb2\xe1\x78\x78\xe1\x62\x20\xcb\x13\x43\x8f\xc5\xa0\x31\x74\xdc\x65\xb5\x19\x56\x98\x91\x1d\x1f\x70\x5d\x66\x14\xca\xd5\xf2\x3d\xc9\xb5\x14\x6d\x56\xb7\xda\xd8\xce\xb7\x47\x63\x1b\xd4\xe6\xa4\x64\x62\x20\x95\x72\xa1\x5c\x49\xed\x2e\xab\xb9\x98\xf4\x6a\xbc\x50\x59\x5a\x15\x05\x01\x23\x1a\xab\x02\x26\x1a\xa3\x28\x22\x46\x0a\xd2\x74\x45\xe4\x15\x11\x70\x84\x61\x34\xc8\x01\x5e\x67\x09\xcf\x33\x0c\x0f\x20\xc3\x11\x5e\xe5\x09\x84\x1c\x40\x82\x87\x17\xaf\x8a\xe8\xd0\x4d\xbc\x80\x5e\x9f\xd7\xef\x9e\x6c\xae\x3f\x1b\xd1\xc5\x7d\xc3\xfb\x23\x23\x3a\x68\xd6\xf4\x23\xdf\x76\x14\xd1\x49\x89\xd6\xd2\x8c\x2f\xe9\x68\x46\xcf\xb4\x97\xbd\x51\x23\x97\xd5\xbb\xa3\x9e\x96\x76\xd3\xc9\x66\x03\x59\xdd\xaa\xb6\x6c\x23\x3c\x33\xe4\x68\x21\xd9\x4a\x57\x4a\x99\xe2\x40\x35\x73\x56\x82\xcd\x23\xde\x18\x0f\xa6\xf5\x24\xdf\xeb\x4f\x17\xb1\x64\x24\x2d\x30\xa1\xa4\xb9\xec\x29\x52\xe9\x65\x11\xdd\xb1\x87\x0f\x10\x51\x19\xb1\xc3\x78\xf8\x73\x9e\xf7\x45\x54\xaf\xeb\x3f\x50\x44\x79\x3e\xe7\xfa\x33\x79\x70\xe9\xc7\x22\xcc\x5d\x39\x4d\x3f\x42\x7f\x62\xc4\x94\xaf\x30\x4d\xf6\x51\x84\xfe\x99\x72\xea\x67\x11\xfa\x68\x75\x18\x24\xa2\x3a\xce\x79\x7a\x02\x1f\xe8\xdd\x15\xd1\xbd\x30\xa2\x7c\xa4\xff\x2d\x42\x4b\xed\x7e\x46\x6d\x66\x5b\xa9\xb1\x30\x36\x33\x89\x76\x6b\xee\x86\xa2\x38\x0e\x46\x60\xa0\x69\xca\xd4\x88\xe4\x99\xb4\xe9\x4e\x42\xa9\x52\x8a\xd5\xba\xc3\xb4\x64\x85\xca\x4b\x75\xd9\x55\xba\xcc\x6c\x92\x19\x55\xb8\x9a\x3e\x49\x14\x90\x9c\x53\x65\x73\xa5\xb3\x44\xcf\xc4\x9d\xc8\xeb\x11\x5a\x07\x3a\xa7\xab\xda\xa6\xd0\x93\xe5\x04\x05\xf3\x34\xa3\x8b\x84\x27\x48\xe5\x68\x56\x10\x35\x56\xd1\x18\x11\x89\x98\x85\x22\x2f\x40\x51\x10\x68\x45\xe1\x69\x16\x28\x2c\x11\x38\xee\x6d\x73\xa0\x00\xbe\x08\xa1\x6f\xee\x8a\xb2\x0c\x77\xe5\x45\xf6\xbb\xbb\x27\xc5\x66\xcf\x22\xf4\xcd\x5d\xd1\x47\x11\xba\x06\xf2\xc5\x7a\x60\xab\x3c\x36\xb9\x23\x84\xce\x56\x5a\xa5\x68\x14\x86\x62\x89\x92\xe0\xd8\xc9\xec\xc2\xe0\x2b\xba\x99\x0f\x6b\xa6\xe0\xa2\x91\xa4\x36\x87\x31\xd3\xa8\x31\xe9\x90\xc6\xcb\xab\x2e\xac\xf1\xc3\xae\x68\x80\x7e\xa3\xe0\xe8\x78\x64\x58\xad\xba\x92\x16\x35\xc9\x28\x45\xda\xcb\x94\x63\xa2\x6a\x84\x53\xb3\xf3\xbb\x62\xc0\xfb\x10\xfa\x68\x5d\x11\x04\x21\xaf\xec\x8a\xde\x87\x90\xaf\xeb\x3f\xd0\x0a\xe1\x5d\xdc\x12\x0c\x01\xab\x25\x20\x7a\x71\xc3\x83\xf6\xbc\x65\xe5\x1e\x84\x79\x26\x46\x0d\xce\xdf\xb0\x94\xe5\xae\xf2\x77\x2b\x06\x0c\x32\x97\xae\xed\xea\xdd\x85\x30\x2f\x44\xb8\x47\xfa\xdf\x22\x8c\x9b\xee\x49\xa9\x6c\x93\xcd\xb5\x63\xcb\xa5\xa3\xce\x12\xd9\x71\x3a\xa4\x32\x22\xad\x2f\x99\x69\x2b\xb2\x20\xb5\x02\x49\x57\x55\x93\x63\xac\x6a\x21\x56\xaf\x4e\x1a\x31\x3a\x5b\xc9\xb7\xf8\x85\x38\xc1\x99\x5e\xaf\xee\x98\x10\x44\xd3\x52\xdc\x4c\x19\x4b\xdd\xcd\x25\x87\xf3\xd9\xeb\x63\x40\x7a\x1d\x2f\x01\x40\xb3\x82\x2e\xb0\x50\xd5\x38\x9d\x03\x34\xe2\x75\x15\x0a\x44\x81\xbc\xa2\xae\xe3\x43\x86\x25\x08\xd3\x22\xd0\x69\x01\x61\x85\xc1\x34\xc6\x22\xcd\x0b\xac\xb0\xa9\x72\xe1\x77\x31\x60\xc0\x23\x36\x8f\xec\xea\xb1\x0c\x02\xe2\x15\x84\x41\x87\x5f\xc8\xda\x56\x0e\x7b\x46\xf9\xe0\x92\xea\x91\x4d\xbd\x4f\x2b\xbb\x29\xf4\x6b\x2e\x0c\x1d\xb1\x92\xdc\xf3\x5b\x4a\x4d\xc6\x4c\x59\x2f\xc2\x29\x89\x80\x6a\x39\x5d\x6e\x35\x4d\x6c\x85\x06\x23\x8d\xb0\x91\x58\x4f\x98\x00\x79\x8c\x26\x90\x2d\x09\x56\x39\xbf\x80\x65\x71\xea\x9a\xaa\x3e\xb6\x9a\x36\x4a\x17\xd4\x89\x31\x77\xbb\xd8\xae\x8a\x03\x3e\x33\xca\x2d\xfb\x1a\x16\x51\x92\x2b\x77\x73\x1f\x02\x30\xae\xd2\x6d\x96\x79\x19\x59\xb1\x2c\xc8\x96\x42\xf3\x56\x25\x2a\xae\x9a\xb3\x66\xbd\xca\x2c\x8c\xa2\xd1\x9a\x56\x14\x18\x9b\x8d\x4a\x59\x22\x48\x5b\x07\xdf\x65\x0f\xe3\x71\x36\x24\x8f\x26\xcb\x43\x95\x29\x4d\x5b\xb4\xe8\x3e\xe0\x14\x1e\x0c\xc1\xd7\x4e\x21\xe6\x49\xe2\xfd\xea\xd8\x39\xfa\x29\x2d\x51\x5f\x69\x51\xae\xaf\xd0\x20\x72\x89\x93\x77\xfa\x29\x49\x3f\xa4\x8c\x6b\xfd\xd1\xf7\x5e\x5d\xf0\x8f\xa7\x74\x0a\xd0\x3f\xc6\xde\xaf\xf0\x77\x0e\x74\xe2\xcf\x8d\xef\xbb\x8d\xaa\x27\xc6\xf7\xc8\x3e\x1e\xf4\x3f\x6b\xc0\x65\xf1\x86\x89\xe8\xc1\x3e\xfc\xf4\x7b\xa0\xdd\xcc\x83\xec\x28\x3f\x53\x4a\x67\x17\x38\x1e\x10\x19\x13\x53\x8b\x0b\xe1\x06\xc7\x26\xcb\x79\x79\x18\xef\x11\x6d\xd2\xe7\x2b\x6d\x88\xba\x72\x21\xb9\xb4\x4b\x15\x73\x6a\x37\x7b\xb1\x65\x4f\x66\x92\xf5\x24\xcd\xd3\xda\x22\x34\xea\x91\x99\x33\xe0\x96\x93\x6c\x94\x8e\x4c\xdc\x22\x5d\xb5\x26\xad\x55\x26\x0f\x6d\xc6\xd1\x87\x09\xe9\xe5\xe5\x25\x1c\x66\x05\x2c\x40\x48\x54\x01\xb1\xba\x2a\xaa\x9c\x86\x90\x8a\x54\x4c\x14\x01\x2b\x2a\xc1\x00\xf1\x2a\x54\x30\xe6\xa0\x82\x38\x4e\xa1\xb1\x80\x04\x46\x84\x18\x08\xa2\x8a\x34\xe8\x01\x11\xf3\x22\x20\x12\x6f\x03\x91\x70\xe5\x57\x4d\x77\x77\x4f\xce\x8b\x3c\x8b\x44\xc9\x73\x33\xf3\xc8\x32\x1f\x9e\x99\xa5\x61\x39\x16\x6c\xa6\x48\x42\xae\x39\x3f\xf2\xe9\x47\x48\x94\x90\x9b\x93\xb1\x2c\x16\x32\x85\x05\xee\xf1\x56\xa4\x57\x16\x39\x4e\x62\x23\xad\x51\x21\xb4\x9c\x66\xf3\xb9\x9c\xab\x4a\xd9\x61\xd6\xc9\xf4\xe9\x1c\x24\x62\xa9\x3a\x8c\xcc\xa4\xd5\x74\x3e\xe8\xca\xd1\x59\x97\x36\xe6\xc2\xa4\xd9\x8e\xd0\x83\xa4\x4b\x5a\x73\x61\x62\x59\x2b\xb5\x24\xfd\x8c\x48\x74\x76\x65\xf0\x63\x90\x48\x3d\x47\xff\xd3\x91\xe8\x09\x7b\xda\x7c\x3e\x1a\x89\x9e\xb5\xf7\x07\x91\xe8\xc9\xf1\x7d\x57\xb2\xf0\xc4\xf8\x3e\x89\x44\xb5\x43\x82\xed\x29\x24\x6a\x95\xc9\x64\x50\x05\xac\x85\xe2\x93\xe4\xa4\x9b\x88\x27\x19\x73\x10\xeb\xc6\x8d\x16\x42\x19\xd6\x01\xf3\x25\x9c\x4b\xd3\xda\x74\x3e\x29\xb6\x56\x75\x04\x23\x85\x5a\x42\x88\x15\x50\xb4\x22\x95\x32\xa1\x18\xce\xc6\x96\x63\x6d\x90\xc4\x25\x02\xb9\x7e\x6c\x39\x31\xfa\xf9\x85\xf4\xf2\x90\x88\x25\x2c\x42\xa2\x0e\x34\x5e\xd4\x14\x0e\x12\x95\xd5\x91\x82\x18\x46\x40\x0a\x0f\x54\x9a\x11\x59\x86\x11\x00\xab\xd1\x88\x45\x1a\xd1\x69\xa8\x20\x84\x20\x11\x15\x04\x35\xc4\x29\xc4\x43\x22\xf6\x25\x48\x74\xf4\x23\x75\x97\xb1\x46\xbc\xf2\xfb\x90\xbb\xbb\x27\xa7\x04\x9f\x45\xa2\x94\x6f\x74\x7f\xdc\xb6\x98\x96\x18\xc3\x79\xfb\xc8\x26\x8f\x90\x28\x59\x8a\xf7\x0a\x6a\x86\x4f\xe7\x8a\xad\x85\x5e\x6e\x44\x42\x8b\x02\x64\xe4\x41\x31\x36\x19\xcc\x66\xc5\x44\x38\x39\x16\xe4\x46\xac\x9a\xca\x55\xc7\xc9\xa4\xc1\xe6\x26\x36\x13\xae\xd8\x24\x9d\xad\x2d\x0a\x06\x33\x49\x54\x26\xad\x78\x3a\x69\x48\x56\x86\xe7\xec\x78\xab\x84\x8b\xf2\x2b\xb7\xc5\x5e\x87\x44\x67\x57\x06\x3f\x06\x89\xfc\xd5\x17\xaf\x41\xa2\x4f\xdc\x66\xdd\x7c\x1e\x45\xa2\xcf\xb6\xf7\xcf\x45\xa2\xf8\x51\x52\xf5\x07\x23\x51\xc1\x13\x42\x7a\x16\x89\x34\xa9\x21\x73\x73\xdb\x1a\xcc\xc5\x7e\x5c\x8c\xb7\x92\x48\x46\x4d\xb5\x52\x12\x0a\x93\x4a\x48\x10\x14\x81\x98\xf3\x72\x2b\x9f\xeb\x2e\x0a\x03\x95\x64\xbb\xcb\xc9\x4c\x17\x33\x2e\x3b\x70\xab\x32\x9f\x25\x56\x3b\x33\xee\xd6\x06\x24\x15\xed\xda\xf6\xc8\xa9\xf7\x22\x96\x50\x7a\xfd\xf6\x0f\xa3\xf0\x3c\x8f\x69\x8e\x61\x20\xa3\xab\x08\x03\x8d\x66\x21\x21\xb4\x00\x78\x96\x10\x15\x09\x18\x63\x8e\x28\x1a\xc0\x48\x05\x98\x20\x5d\xe0\x68\x4e\x24\x02\xd0\xb1\x06\x68\x51\x7f\xdb\xbc\x34\x07\xbe\x28\x26\xba\x59\xd0\xc7\x03\x9e\xbf\xbc\xfd\xb3\xbb\x7b\x72\x36\xfc\x59\x24\xba\x52\xd2\xe7\xad\x89\x83\x67\x2b\x8e\xac\x47\xdf\x79\xde\x88\x94\xe5\xd5\x55\x2b\x3e\xab\x44\x7a\x5a\x9d\xc4\x58\x5d\x69\x16\x92\xd3\x66\x1c\xd3\xd1\xd8\x24\x3b\x8e\xeb\x6a\xa8\x94\x36\x2d\xa3\x98\x75\xc3\x34\xd3\xaa\x1b\xb5\x72\x22\xbb\xd4\xbb\x8c\x20\xc4\x33\xb9\x8c\xa3\xe4\xd3\x72\x77\x14\x77\xa2\xe9\xbe\xdb\x1d\x32\x7a\x1f\xcd\xed\xf0\x3a\x98\xbe\x03\x55\x92\x7e\x2b\x3e\xdc\xf7\x7b\x4d\xef\x13\x00\x55\x8e\x8b\x1d\xfc\x25\x4f\x8f\xa2\xca\x0b\x33\x31\xb9\x73\xf4\x83\xa1\xca\xe1\xf9\x1f\x9d\x69\xf3\x7f\x22\xd2\xcf\x9b\x09\x3c\x87\x2a\xb1\xe7\xc6\xf7\x5d\x89\xdf\x13\xe3\x7b\x84\x4a\x41\x50\xc5\x3d\x2a\x20\xf0\x2f\x60\x1e\x41\x95\x8f\x72\x0e\xaf\x45\x15\x4d\xd4\x78\x45\x80\x3c\x06\x98\x51\x79\x16\xf0\x0c\xd2\x58\x41\x27\x0c\xa3\x63\x00\x74\x91\xe8\x10\xf2\x10\x30\x2a\x0d\x58\x81\x61\x31\x54\xa1\x80\x45\xc4\x73\x02\x60\x74\x8d\xd3\x3c\x54\x79\xd5\x96\xcf\xcd\xa2\x82\x35\x2b\x97\xe3\x9b\xdd\xdd\x93\xb7\x7b\x3c\x8b\x2a\x57\x8a\x0a\x82\xa1\xca\x21\xf3\x70\x1e\x55\x72\xa9\x88\xe0\x64\xc8\x70\x51\x9d\xc4\x0b\xd9\x62\x2a\xdb\x8f\x00\x33\xe5\xaa\xba\x9d\xb0\xa3\xf3\xa9\xed\xa4\x7a\x4e\xaf\x4e\x32\xc9\xca\x9c\x4e\x45\xd9\xf1\x28\x4a\xba\x28\x93\x88\x15\xb2\x63\xbd\x16\xcd\xaa\x34\x63\x72\x49\x52\x75\x16\x69\x47\x09\x47\xed\x64\x3a\x1e\x6e\xc6\x4a\x3f\x25\xaa\x9c\x45\xec\x1f\x83\x2a\xfe\x8e\x7e\x0c\xaa\xbc\x38\x6b\xe6\xff\x3c\x8d\x2a\x1f\x98\xd5\xfb\x80\xfd\x9b\x77\x85\xdb\x4f\x8c\xef\x93\xa8\x62\x7a\x42\x1c\xf8\xf1\xd3\xbf\x13\x55\x3e\xca\x39\xbc\x16\x55\x78\xbc\x0e\x56\x78\x81\x60\x5a\x00\x50\x23\x3c\x46\x34\x23\xaa\x22\xab\xb1\x2c\xe1\x34\x16\xea\x0c\x0f\x05\x48\xf3\x00\x21\x56\x80\x34\xa7\x61\x06\xd1\x0a\x8d\x14\x9e\x57\x35\x46\xf5\x50\xe5\x55\xfb\x37\x37\x8b\xc9\x79\xc8\x70\xd7\x50\xc5\xbb\x7b\xf2\x7e\xa6\x67\x51\xe5\x4a\x31\xb9\x87\x2a\xc1\xb3\x08\xe7\x51\x05\x09\xf5\xf6\x78\x26\xb4\x45\xcc\x34\x73\x62\x77\xc6\xf7\x96\xb9\xac\x4b\x2b\x11\xbb\xcd\x39\x19\x27\x5e\xae\xb1\x89\x4c\xb3\xe5\x2e\x5a\x74\x6e\x20\xb9\x62\x5a\x94\xf3\x76\xa5\xdc\x36\x61\x0e\xf4\x87\x0b\x31\x1e\x4f\xb8\xd1\x99\x1b\x8f\xb7\x0d\x21\xd6\x5b\xcc\x26\xb1\x25\x8e\xfc\x9c\xa8\x72\x16\xb1\x7f\x0c\xaa\x3c\x9b\x21\x39\x8f\x2a\x3f\x38\x03\xe6\xff\xbc\x43\x95\x9f\x28\x43\xf7\x01\xa8\x72\x5c\x0a\xf6\x83\x51\xa5\xeb\x95\x3d\x1e\xf8\xf1\xd3\xbf\x13\x55\x3e\xca\x39\xbc\x16\x55\x00\xc0\x8a\xa8\xea\x2a\xa1\x15\x56\x65\x21\x0b\x35\x41\x51\x78\xc4\x0b\x9a\x06\x44\x56\xd1\x14\x05\xaa\x88\x05\x3c\x82\x40\x53\x14\x46\xc1\x80\xe8\xa2\x20\x20\xa0\xf3\x0c\x66\xd8\x35\x26\xb0\xaf\x3b\xd2\xca\xdc\x42\x15\xc4\x00\x9a\xbe\x88\x2a\xbb\xbb\x27\xef\xea\x7b\xf6\x50\xeb\xd9\xdc\xb4\xf7\x79\xe2\x50\x2b\x73\xa8\xf5\x3c\xb2\xf4\xc3\xa1\xd6\x6c\x5d\x49\xa5\x42\x30\x4b\x8c\xd5\xa2\x6b\xaa\xb1\x14\xcb\x67\x92\xcb\x88\x0b\xb8\x50\x52\xa4\x4d\x18\x2a\x33\xb4\x90\x16\x57\xf9\x21\xad\x77\xd3\x0d\xda\xd4\x4d\x26\x9e\xb2\xb2\x75\x87\x9e\xd4\x61\x28\x5a\x1b\xdb\x90\x86\x98\x8e\x20\x29\xc7\x27\x33\x36\x6b\x46\xb2\x52\xea\x75\x87\x5a\x8f\x1a\x05\x38\x54\xda\x2f\x1c\xf4\x7b\xe1\x73\xcf\x91\x9f\x97\xf4\x7f\xc1\x9b\xdc\x28\x68\xde\x38\x89\x57\x1f\xc4\xbc\xd0\xe7\x75\x64\x7d\x75\xff\xef\xf5\x71\xb1\xff\x03\x13\x9f\x7c\xa4\x69\x37\x14\xbe\x82\xe9\xe3\xcf\xd9\x82\xe9\xe8\x73\xba\xda\xf2\xfa\xee\x00\xf4\x3d\xba\x7a\xc5\x58\x15\xd4\x60\xfd\xef\x0a\xa6\xeb\x4e\xb7\x8f\x6a\x2e\x9f\x2d\x26\xa2\xda\x3c\xd7\xef\x5b\x96\x9e\x4a\x30\x55\x93\xd5\x19\xac\x37\xb9\xa4\xae\xcc\x4d\x26\x47\x37\xdb\x21\x69\xa8\x60\x2d\xa4\xab\x35\xa3\xeb\x36\x56\x78\xdc\x6c\xb2\xe9\xca\x68\xd6\x54\xea\x76\x2b\xe9\x48\x2b\x4b\x9d\xcc\x0c\x55\x76\xa4\xee\xcb\xab\x03\x58\x81\xa7\x45\x81\x00\x56\xd7\x95\x75\x50\x43\x03\x95\xa3\xa1\x2e\x40\xac\x29\x1c\x0d\x90\xce\x69\x90\x56\x69\xa8\x00\x01\x03\xc4\x73\x82\xa8\xd1\x0a\x61\x34\x41\xc3\xc4\x3b\x00\xc3\xbe\xee\xd0\x2c\x7b\x1b\x91\x68\x70\x79\x4f\x66\x77\xf7\xe4\x9d\xaf\xcf\x1e\xc9\x39\xbb\x6f\xe0\x7d\x5e\x8d\x48\x47\x47\x6c\xe2\x63\xc1\x76\x62\xac\x34\x85\x16\x69\x0f\x22\x55\x58\x4e\xa6\xd2\xe5\xe5\xa2\x81\x97\x78\xca\xe8\x5c\xad\x4f\x24\x30\x67\x43\x20\xbb\xea\x67\xe5\xa5\x21\x9a\x89\x9c\x23\x90\xa1\x94\x64\x87\xb1\x5e\x2f\x96\x5e\x44\xd3\x42\xb2\xb7\x50\x12\xa1\x66\x35\xac\x8e\x4a\xce\x14\xde\x57\x77\xf6\x30\x22\x05\x39\xe2\x72\xf4\x9a\x03\xbf\x07\x7c\xe0\x10\xea\x4b\xfa\xf7\x8f\xef\x7d\x47\x6c\xce\x22\xd2\xb3\xc7\x42\xfc\x08\x7d\xd7\xb1\x90\x57\xf7\x1f\xb9\xbf\xff\x23\x45\x7d\xf2\x21\xd1\xed\x50\x3c\x8a\x48\xb1\xe7\x74\x75\x8c\x08\x7e\xfb\xf8\x8c\xb1\xba\xb6\x7a\xb9\xe3\x08\x4f\xc3\x5c\x4e\xe7\xc5\x6e\x14\x35\x7a\xdc\xa8\x14\x2f\xd6\x65\xb5\x3b\x4c\x66\xe3\x6d\x3b\x3d\x41\x5a\x78\x58\xc9\xb3\xb1\x6a\xac\x8f\x4a\xb3\x42\xce\xa9\x66\xb2\xb4\xda\x5a\xae\x16\x7c\x4d\xd2\x8d\x5a\x21\x21\xe7\x4c\x39\x65\xce\x49\x32\xee\x28\x9c\x56\x14\x97\xed\xe8\x12\xd5\x5f\x5f\xaf\x06\x35\x9e\x01\x82\xa6\xf2\x44\xe4\x21\x52\x08\x61\x44\xcc\x8b\x3a\x2f\x40\xc4\xab\x3a\xaf\x33\x58\x51\x75\x56\x63\x78\x88\x34\x85\x51\x11\xcf\xf3\x2c\x52\x34\x28\x0a\x98\x23\x2c\x4b\x7b\x88\xc4\xbc\x08\x91\x6e\x56\x09\x20\xef\xcd\x7b\x57\x6f\x9e\xbc\x39\xfc\x59\x3c\xba\xf9\xda\x9f\x40\x19\xee\x5b\x78\x94\x95\x97\xc9\x61\xa5\xc4\x75\x15\x16\x97\x10\x99\x15\x86\xec\xa8\xcd\x0f\x2b\xd5\x54\x1c\x0d\x2a\xbd\xa5\x63\x85\x8d\x65\x11\x4c\x57\x8c\x31\x4e\x5a\xaa\xc6\x08\x61\x2d\x6b\x4c\xd1\xb0\x3e\xcb\x25\x9a\x73\x8b\x5f\x8e\xd2\x68\x64\x2c\xb4\xd8\x34\x9f\x4d\x35\x8c\x94\x58\x55\x4a\x1f\x13\x21\xbd\x04\x0f\x0e\xfd\x7f\x36\x1e\xc5\x82\xf4\xff\x31\x78\x94\x93\x1e\xc3\x83\x57\xe3\xd1\xa3\xc7\x24\x8f\x14\x15\x0c\x8f\xa2\x75\x79\x5b\x9b\xf1\x39\x78\xf4\x83\xf0\x40\xfa\x80\xb1\x0a\x80\x47\x4e\x35\x45\xea\xe9\xd8\x28\x32\xeb\x8d\x63\x2e\x2f\xf0\x4d\xa3\x6d\x4e\xea\xe9\x5c\xb7\x39\x2d\x67\x92\x99\x65\x3f\x94\x4d\x85\x2b\xa8\x48\x32\x03\xd2\x5b\x30\xfa\x4a\x04\xd3\x0c\x9a\xd6\x1b\xc2\xbc\xad\x47\xfb\x76\xa6\x65\x4e\x71\x36\x0e\x0a\x2c\x70\x7a\x38\xd5\x87\x79\xf7\xf5\x11\x12\xcf\x22\x82\x15\x8c\x74\x46\x51\x39\x44\xf3\x9a\x0a\x01\x0b\x39\x8e\xd1\x09\x8b\x58\xc8\xb1\x34\xd1\x00\xa6\xb1\x4e\x6b\x40\x14\x34\x91\x51\x80\x82\x55\x4e\xe1\x75\x46\xe5\x69\x56\xf5\xf0\x88\x7d\x51\xce\xee\x8e\x08\x89\x45\xd7\x72\x76\xde\xdd\x93\x5f\xa0\x78\x36\x67\x77\x33\x42\x0a\xf4\x12\x82\xe4\x8d\x9c\x5d\xb2\x5a\xcb\x90\x36\x6e\x37\xc3\x4b\xcb\x2c\x69\x2b\x3b\xd1\x02\x83\x61\x7b\x32\xef\xd5\xda\x1a\xb2\x4c\xb7\xa6\xf4\xc7\xa5\x51\x76\x10\x6a\x96\x5b\xc2\xb4\x65\x39\x21\x29\x45\xb7\x99\xc9\xa4\xc6\x94\xe2\x34\x92\xeb\x6c\xbd\xec\xd4\xd5\x5a\x6d\x36\x33\x39\xa4\x4f\xe2\xda\xaa\x7b\x57\x3d\xf4\x67\xe7\xec\x22\xe7\x67\xd2\xa7\xe5\xec\xfc\x88\x78\x57\xff\x1f\x93\xb3\xbb\x90\x43\xfc\xb4\x9c\xdd\xa3\x2f\x6f\x3b\x52\xd4\x4f\xf7\x12\x86\x8f\x78\xd1\x5d\xd0\x9c\x99\xf4\x01\x63\xf5\x48\xff\xbb\x57\x97\xe7\x4b\x63\x6e\x38\x1e\xea\xe1\x8c\xa1\x45\x98\x82\xd3\xee\x57\x2a\xf5\x91\x1d\x8d\x97\x87\xcc\xd4\x20\xb0\x3c\x2d\x0c\xa3\xad\x69\x95\xd3\xe2\xfd\x25\x67\x18\x2b\x56\xea\xb7\x4d\x32\x19\x57\x35\xa9\xa9\x38\xe5\x66\xaf\x9e\x58\xd4\x17\xe5\x4a\x28\xa4\x9b\x6e\xc8\xaa\x33\x85\xd7\xd7\x51\x2b\x1a\x0f\x78\x01\x60\xc8\x20\x41\xd7\x89\x8a\x18\x91\x40\x5d\xc7\xb4\xa6\xb0\xaa\xc6\x41\x9d\x13\x14\xac\xe8\x0a\x82\x2c\x01\x2c\xe6\x68\x45\xa3\x15\x9e\x53\x74\x01\x33\x50\x61\xf5\xb7\x4d\x78\xf4\xaa\xd7\xe8\xdc\xdc\x45\x12\x78\x20\x72\x17\x11\x69\x7d\x97\x7f\xf3\xfd\x92\xd1\xb3\x31\xd2\x95\x5d\xa4\xda\x39\x4b\x88\xdc\xb4\x1a\x1f\xbd\xfd\x3a\x48\x8e\x0c\x56\xdc\xb4\xdc\xad\x71\xf1\xc5\xa4\x30\x15\x5a\x78\x80\x22\x8a\xa6\x82\x88\xdb\x9d\xc9\x69\x10\xef\x4f\xed\xf6\x2a\x9c\xed\x26\xe6\x55\x5a\x6e\x8a\xb0\x25\xc0\x96\xad\x55\xea\x53\x3a\x5f\x2c\x83\xa1\xd2\xab\x48\x96\x9b\x28\x0d\x67\x95\x62\x4b\x5e\x72\xb3\x3c\x8f\xc6\x6c\xf4\x0e\x14\x3a\xae\xbf\xb8\x88\x42\xd2\x73\x2f\x8b\xeb\x9b\x07\x9d\x5e\xf8\xdc\x83\x02\x2f\xe9\x3f\xd8\xce\xd1\xa7\xe4\x7e\xb6\x9f\xeb\x9e\xed\x07\xe5\x9e\x6e\xed\xcc\x3c\xf1\x5a\xa8\xe0\x5e\xfe\x85\xba\x78\xa4\xff\xad\x97\x6f\xe7\x42\xca\xa0\x15\x2f\x94\x55\xdc\xb7\x67\x89\xc4\xb8\x30\x6f\x30\xcd\xbc\xa4\xb4\xb2\x8b\xa8\x16\x69\xa1\x09\xad\x86\x97\x6c\xc4\x91\xc5\x5e\x3c\xac\x35\x8a\xf9\x46\x86\xa9\x0d\x15\xb7\x5f\x05\x5a\xba\xc1\xd6\xba\x99\x6c\x79\x90\x56\x40\x9f\x8e\xe4\xa6\xca\x8a\x4c\x79\xf2\xfa\xb8\x43\x45\x02\xe0\x78\x51\x65\x01\xad\xd2\xa2\xa0\x20\x85\xd3\x19\x1d\x62\x1e\xd3\x00\xab\x90\x86\x22\x06\xbc\xa6\x33\x22\x46\x82\xae\x30\x58\xe5\x35\x91\x51\x31\xc7\x60\x8d\x25\x1c\xd1\x3c\x2f\xff\xaa\xba\xe6\x3b\xbc\x3c\x03\x2f\xc7\x1d\xbb\xbb\x27\xbf\x32\xf7\x6c\x05\x5a\x30\x2f\x7f\xf9\xf3\xbe\xca\xca\x77\x7f\x5f\x45\x23\x47\x92\xf3\x41\x5b\x5d\xa6\x1a\x23\x5c\x9c\xc9\x6e\x6c\x58\x16\x64\x8d\xd5\x62\x30\x47\xc6\x74\x7f\x9a\x75\x93\x71\xc6\xe4\xca\x09\xd0\x9f\xda\xe9\x5e\x66\x12\x66\x72\x11\xb8\x2a\xf3\x4a\x2c\x69\x87\xe6\xd6\x9c\xe1\x8c\x7e\x46\xa4\x8d\x5e\x3f\xd9\x98\x54\x17\xfd\x55\x26\xd6\x1b\x14\x3e\xc2\xeb\x07\xa8\x44\x7b\xba\x5e\xe0\x85\xfd\xbf\xd2\xeb\x3f\x79\xaa\xe3\x69\xaf\xff\x11\xfd\xdf\xac\xd4\xba\xe1\xfd\x03\xf0\xf4\x52\xef\x1f\xa0\xff\x27\xbd\x7f\x59\x48\x27\x9a\x39\x23\x3e\x1e\xf5\xd1\x22\x5b\xe2\x6a\x7a\x24\x49\x92\x25\xb1\x9a\x9c\x68\x16\x32\x50\x79\xa1\x71\xa1\x5a\xd9\x4d\xa1\xc9\xb2\x55\x99\xcc\xc4\x95\xa4\xa9\x80\x8c\xe6\x0b\x3e\xec\x64\x72\xf9\x50\xcb\x5a\xce\x34\xcb\x6a\x2d\xe0\x64\x20\xb4\x84\x65\x97\x6d\xc7\x5e\xee\xfd\x69\x8d\x81\x48\x53\x89\x2e\xf0\x34\x4f\x78\x46\x54\x54\x8c\x15\x16\xb3\x0a\x54\x31\x24\x02\xe1\x79\x16\x11\x85\xe5\x35\x9e\x53\xb1\xa0\x89\x02\x64\x90\xaa\x89\x02\x51\x20\xc0\x04\x79\xde\xff\x55\xf5\xc7\x37\xb3\x4e\x02\xcf\x8a\xf0\x8a\xf7\xf7\xee\x9e\xfc\x32\xe8\xb3\xde\xff\x4a\xd6\xe9\x8a\xf7\xbf\x1e\xa9\x9f\xd0\x3b\xf2\xf6\xc2\x84\x2b\xe8\xa1\x84\x04\x08\xd6\xe6\xa1\x5e\xd5\xae\x46\x53\x92\x45\xb4\xcc\x32\xde\x25\xe1\xcc\x2c\x54\xae\xe5\x9d\x09\x72\x66\x68\x5c\xb1\xb2\x52\x29\xde\x98\xcc\xe3\xa9\xd9\x22\x9a\x88\x4c\x87\xa0\xd9\xab\x2c\x0a\xd6\x28\x11\xee\x36\xba\xa3\x94\x31\xef\xf3\x18\x76\xad\xdc\x4f\xe8\xed\xcf\x7a\x90\x07\xea\x8e\x3f\xf0\x0c\x9d\xc7\x64\x90\xba\xe3\x17\xea\xe7\x7d\xe6\xe7\xa7\xd1\xcf\x7d\x75\xbb\xaf\x3f\xf7\xf8\x74\xc6\xe7\x85\x3a\x79\xa4\xff\x2d\x1a\xe8\x99\x69\xd5\x0c\xf7\xba\xf9\xe4\xc8\x54\x8b\x56\x8d\x07\x73\x43\xca\x24\xcd\x8a\xd3\x6b\x25\x67\x4b\x3b\x3c\xa8\xc7\x94\xcc\x14\x65\x43\x7c\x6d\x12\x9d\xcb\x91\xfa\xc4\x89\x54\xb2\x42\xa9\xd7\x55\xc6\x5a\x9c\xad\x39\xd5\x49\xba\x37\x63\xe3\x56\xda\x48\x39\xa9\xd0\xc8\x6d\x65\x5e\xff\x5a\x4b\x4d\x61\xd6\x9e\x9d\xc5\x44\x10\x11\xc7\x33\x34\xc7\xb3\x8c\x8a\x35\x1a\xaa\x22\x4b\x20\xa3\xe8\x2a\x40\xac\xc2\xd0\x0c\x21\x02\x43\x20\x0b\x15\x1d\x01\x88\x39\x4d\x04\xac\x0e\x95\xb7\x4d\x20\xb0\xc9\xf8\x44\xca\xc5\x56\x32\x95\xa5\xa3\x29\x26\x9e\x2f\xb1\x91\x66\x36\x9e\xcb\xc7\xb2\xf1\x74\x2d\x5f\xac\xd1\xc9\x16\xd3\xce\xc5\x2b\xc9\x42\xbe\x16\x95\x0b\x52\xa5\x81\x4a\x51\x54\x68\xd2\xc9\xb7\xcd\xe3\x57\x41\x00\x02\xc0\x5e\xdc\x0a\xdf\xdd\x3c\xfe\x25\x66\xcf\x12\x13\x59\x21\x59\x9a\x95\x06\x4a\x86\x4e\x4a\x4c\xa3\xde\x2f\xdb\x99\x51\xbf\x09\x80\x9e\x10\x9c\x6c\x0a\x8d\x80\x5c\x9e\xa7\x1b\x61\xa9\xc9\x48\x7b\x04\xd8\x7c\xae\xac\xff\xbd\x4f\x80\xd9\x3e\x38\xa6\x57\x9f\xcd\xe3\xe2\x06\x01\xa4\x56\xbf\xa4\x16\xab\x74\x82\xeb\x4d\xcc\xc8\xa8\x9b\x48\x90\xae\x98\x16\x86\xac\x0a\x65\xb3\x36\x5c\x0c\x86\xf2\x30\x29\x3a\x93\xb6\x0d\x44\x04\xe3\x7c\x21\xdb\xd0\x49\x78\xc4\x0e\xc6\x71\x37\x15\x72\x52\xc0\x80\x93\xac\xe1\x72\x12\x48\x2f\x1b\xa6\xd2\x6b\x65\x1b\x9c\x15\xbb\x03\x01\x4e\x4c\xf6\x14\x01\x8e\x64\x3e\xe4\xda\x8f\x3c\x54\xc4\x08\x47\x40\x16\xa4\x13\x4b\xb7\x37\xcf\xc3\x61\x0b\xe0\xe5\xd8\x82\x62\x3e\xb9\x98\x65\xa3\xcb\x02\xe7\x46\x64\x35\xea\xc9\xc8\x74\x5d\xbb\x60\xb6\xc2\xe8\x08\x3a\xfd\xa7\xd4\xaf\xf2\xb6\x9b\xcd\x4f\xf4\x1f\xaf\x36\x22\xf6\x13\xfd\x4b\xd2\x8f\x5b\xdb\x9e\xcd\xb2\x44\x82\xeb\xa2\x60\xb6\xaf\x8a\x79\x4b\x17\xcf\x8e\xc5\xda\x16\x42\xaa\x8f\xde\x43\xba\xf8\x4f\x22\x05\x92\x31\x20\xf6\xa6\x2d\x3c\x9e\xb7\xad\x48\xcf\xb4\x8a\x15\x3d\x4d\x92\xf9\x72\x1a\xa6\xd5\x76\xba\x9c\x2e\x87\x95\xcc\x08\x8b\x45\x22\x96\x49\xdf\x80\x26\x33\xe3\xa6\xe9\x4c\x59\xa9\x14\xed\x68\x3e\xe5\x62\x83\xb5\x49\x29\x1f\x55\x87\x63\x9a\x6d\x44\xe1\x14\xbf\xfe\xf4\xb8\x2e\x22\x15\xeb\x3a\x56\x04\x15\xf2\x80\x66\x30\x83\x04\x81\x85\x3c\xa7\x2a\x40\x61\x74\x1d\x62\x4c\x6b\x58\x67\x01\x00\x3a\xd1\x59\x51\xa3\x21\xd1\x55\x81\x45\x9a\xa6\xe8\x0a\xc1\x9e\x67\xa5\x9f\xf3\xac\xf4\x6d\xcf\x7a\xf9\x4d\x24\xbb\x9b\xc7\xbf\x28\xff\xac\x67\xbd\xb2\xb6\xf6\x3e\x01\x72\xb9\x17\x3c\x6b\x64\x94\x19\x57\xba\x33\x7b\x9e\x29\xd0\xa0\x19\x2d\xe8\x2d\xbd\xe9\x24\x64\xb9\xe6\xce\x5b\x18\xcb\xfa\xa4\x32\xe5\x97\xa3\xf4\x68\x18\x1b\xe1\x50\xaa\xc9\xa7\x50\xaa\xdb\x55\x6a\xed\x9c\xa5\x96\xb4\xb6\xc8\xa6\x72\x92\x9e\xd1\x4a\x52\x7e\xd2\x54\x52\x05\xb4\x74\xe6\x84\xe4\xee\xc9\x9f\x9f\x98\xec\xc7\x7a\xd6\x67\x3d\xdb\x93\xb3\x79\x82\xc2\xd5\x98\xf2\x4a\xcf\xfa\x89\x39\xe3\x5b\xf9\xeb\xcf\xf4\x6c\xd2\x8b\x3c\xab\xc0\x1e\x9e\xbf\x90\x65\xbb\x9a\xbf\x36\x26\x35\x2b\xcb\x0b\xd1\xbe\xeb\xc6\xe7\x7d\x93\x4e\x42\x14\xe9\x45\xe2\x59\x35\x91\x18\xf5\x92\xfc\xc0\x9e\x3a\x63\xa3\x3d\x2e\x71\xa3\x99\x11\x0f\x19\x85\x65\x2a\x95\x80\x89\x6a\x26\x29\x27\x1b\x3a\x89\xc6\xa4\xe4\xd2\xac\x49\x31\x3c\xa4\x97\xb1\xa9\x60\xe7\x92\x66\xff\xf5\xf9\x6b\x44\x73\x88\xe3\x15\xa8\x2b\x9a\x2e\x30\x0a\x10\x20\x8d\x74\x46\xe0\x88\x4e\x34\x1d\x88\x40\x54\x55\x81\x06\x0a\xaf\xaa\x04\x41\x1d\xaa\x40\x10\x91\x06\x39\x96\x46\xaa\xc2\x61\x4d\xf3\xfc\x22\xf3\x9c\x67\xbd\x95\xb6\x86\x00\x20\xe6\xe2\x1b\xd8\xd7\x77\xf7\x8b\x56\x96\x17\x0e\x49\xeb\xe0\xae\xf5\x4a\xf9\xa6\xf7\x09\x50\xac\x71\xc9\xb5\xf6\xd9\x6e\xa4\x14\x8e\x48\x4a\x77\xa2\xa3\x49\xc1\x72\xb1\xdb\x6d\xd7\x92\xfd\xd8\xd0\x29\xb8\xb1\xbc\x36\x46\x78\xd8\x2e\x73\xe1\xf8\x40\xd4\x13\xfd\xd0\x70\x21\x2f\xbb\x52\xbb\xb1\x6a\xa1\x70\xb6\x05\xdb\x4c\x25\xce\xab\x60\xe2\x40\x44\x67\x7b\xd2\x60\x30\xab\x0b\x63\x70\x4f\xda\xe2\xc4\x66\x3f\xd4\xb5\x3e\xed\xda\x9e\x75\xad\x4b\xbb\x14\xcf\xbe\xd0\xb5\x7e\xe6\x41\xa9\x8f\x70\xad\x41\x5d\x9b\xf4\x22\xd7\x1a\x34\x80\xd9\xba\xd6\x56\x28\x25\xd5\x8a\xf3\x52\x82\xcd\x98\x21\xbe\x3f\xc9\x36\xec\x56\xa2\x1e\x61\xe7\x9c\x12\xb3\x4b\xab\x26\x26\xf4\xb8\x51\x23\xfa\xa2\x5d\x6c\x2e\xac\xf0\x58\x2d\xe7\x2c\x29\x51\x77\xe8\x42\x78\x49\xe7\x1b\x1a\xe8\x11\x33\x9f\xb1\xf3\x44\x34\xd8\x52\x3a\x9d\x85\x7a\x46\xbb\x37\x1d\xf0\xdb\x6f\x5f\xbf\xfe\xf6\xf5\x2b\x95\xc7\x23\xf2\x9d\xc2\x8e\x43\xdc\x8e\xe3\x62\xd7\x39\xfe\xbb\x33\x1e\x90\xe5\x5f\x54\x75\x39\x26\xdf\xa9\x68\x21\x5f\xa9\x96\xa5\x54\xbe\xfa\x17\x55\x51\x7b\x64\x84\xbf\x53\xe3\xa9\x32\x34\xd4\xbf\xa8\xc2\xdc\x24\xf6\x77\x6a\x4d\xf1\xb7\xdf\xa4\x6c\x55\x2e\x53\x55\x29\x92\x95\xa9\x42\x3e\xdb\x3a\xa6\xf8\x1b\x45\x51\x94\x14\x8b\x1d\x51\x7b\xd7\x21\x55\x2c\xa7\x72\x52\xb9\x45\x65\xe4\x16\xf5\x87\xa1\xbd\xe3\xb6\x6b\xd9\xe3\xce\xc8\xe8\xda\xd8\x43\x01\xdf\xf5\x8b\xb8\xf6\x51\x3d\xc7\xf9\xb9\x8e\x6f\x72\xbf\x03\xb2\x8d\xd8\x8e\xef\xd2\xfb\x5f\x47\xb5\x34\xb2\xfd\xd3\x5d\x8e\x77\x7f\x1a\x8e\x33\x25\x76\xe7\x25\xd2\x9d\x76\x7b\x4e\xb8\x40\x8c\x51\xb5\x7c\xaa\x54\x93\xa9\x3f\x0e\xcd\xbf\x50\x87\xf6\xbb\xbf\xbd\x07\x1e\x54\xcd\x6b\x86\xf5\x61\xc1\x1f\x1a\x54\x6b\x4c\x3c\x63\xe8\x8c\xb1\xed\x1a\xaa\x31\xc6\xe6\x91\x24\xe7\x6f\xbf\x58\xb2\xf3\x9d\x5c\x93\xf4\x0a\x5b\x77\x4b\x7e\xb4\x2e\x3b\x2f\xfb\xa5\x06\x2f\x96\xfe\x52\x37\xd7\xe4\xbf\xca\xda\x4d\x0d\x78\x26\xad\x2c\x37\xd6\xbe\x13\x24\x95\x8f\xc9\xcd\x1b\x32\x44\xcb\xb2\x54\x95\xbd\xa6\xa7\x54\xa8\x42\xde\x3f\x19\x6a\x95\x54\x3e\x41\x29\xae\x4d\xc8\xf1\xec\xba\xcc\x8d\x37\xc7\x9e\xe7\xc7\xa3\x73\x1f\x47\x17\xe6\xb5\xb2\xec\x60\x55\xb5\xa6\xa6\x1b\x98\x9d\x03\x89\x63\x4e\x4e\x82\x81\x53\x7e\xbc\xc6\x5f\xa8\xed\x1f\x1d\x87\x4c\xa6\xc4\x54\xdf\x2b\x4c\x59\x76\x7a\xd8\xe9\x3d\xc3\xd9\xfa\xf9\xfb\xd8\x3a\xb6\xb4\xf5\x53\xe7\xb8\x19\x12\xad\xfb\xc4\xc0\xed\x29\xdc\xc7\x91\xd7\x76\xaf\x9e\x2f\x14\x1e\x8f\x87\x86\xea\xb9\x03\xcb\xd6\x2e\xb8\xe9\x0e\x59\xdb\xc6\xe6\x7e\x00\x4e\xb7\x28\xe1\x31\xec\x23\x77\xcc\x36\xd1\x75\xa2\xfa\x4d\xed\xbd\xd7\x32\xb4\x2f\xd4\xef\x9b\x87\x7f\xbf\xc4\xac\xa1\xbd\x88\x4d\x43\xbb\x9b\xc1\x9d\xe9\xad\xd9\x0b\xc0\xb4\x35\xee\x8c\x5f\xc5\xf7\x96\xd6\x31\xeb\x17\xa0\x2a\x90\x24\xe7\x05\x70\x17\xaf\x13\x60\x4b\xeb\x82\x4d\x07\x14\xe1\x98\xc2\x39\x21\xac\xf1\xda\x2a\x7b\x56\x20\x19\xb6\xcc\x1f\x68\x04\x55\xfe\x75\x45\x3b\xdb\xd9\xbe\x71\xd5\xcf\xeb\xfa\x94\xdc\x31\xcb\xde\xf7\x3e\x1e\xcf\x73\x74\xac\xd7\x57\xb1\xf5\x8e\xe6\x7d\xee\xed\x1c\x83\xae\x37\x24\xee\x33\xc3\x7a\xa0\x11\xdc\x24\x6f\x99\x9f\x6b\x6b\xeb\x4e\x14\xec\x90\xa7\xf1\xf3\x1c\x31\x1f\xe7\x1a\xf1\xf1\x79\xdc\xf6\x26\x83\x96\xae\x3f\x01\x5a\xef\x49\xdd\xc5\xdc\xa6\xe5\x35\xd6\x36\xbc\x13\xfb\x65\xea\xf3\xd1\xbb\xc5\xa4\xaf\xf9\x3d\x9c\xbe\x46\x8f\x27\xd4\xee\xe5\xf2\xa6\x36\x5f\xc3\xdb\x5d\x3c\x5d\xe7\x65\xc7\xf1\xd0\xb2\x06\xd3\xf1\x73\x1c\x9d\xd2\xba\x7b\x44\xbd\xf5\xee\x05\xfe\xc6\xd8\xb0\x3b\xae\x31\x22\x2f\xe1\xd0\x4f\xed\xbe\x79\xbb\x65\xf0\x0b\xe5\x67\xf9\x0b\xb5\x75\xf1\xea\xd0\x72\x88\xd6\xc1\xee\x05\x21\x5e\xe0\xb7\xb7\x74\x6e\x71\xfc\xe0\xea\x68\x4d\xf5\x65\xda\x7d\x40\xb1\x37\xf5\x66\x98\x1a\x59\x74\x7c\x4b\x0e\xa7\x63\x99\x1d\xac\x69\x36\x71\x9c\x67\x15\x7a\xb3\x83\x93\x38\x6d\x7b\xdb\x17\x19\x79\x0d\x1f\xe0\xfd\x79\x3b\xb8\x46\xfb\x36\xc7\x67\x66\xd9\x29\xc1\xed\x2a\x7c\x4d\xcf\x5d\x8e\x83\xc7\xe0\x57\xa9\xde\x5c\xf6\xaf\x1b\xdd\x60\x74\xbb\x86\x5a\x93\xdc\x1b\xd1\x8b\xb8\x3d\x47\xfa\xe6\xf2\xed\x5e\x4b\x3e\x22\xfe\x6a\x63\x38\x21\x1d\x64\xbd\x79\x99\xdc\x68\x6c\xd9\x6b\xc7\x37\x23\xb6\x63\x58\xe6\xeb\x15\xed\xef\xe1\x36\xfb\xbe\x07\xee\x17\x66\xeb\x7a\x02\x66\x2a\xee\xd3\xff\x51\x1f\x37\x25\x39\x6a\x7b\xbf\x10\x63\x9b\xcc\x0c\x6b\xea\x7c\x8a\x34\xe7\x3a\xbb\x29\xd6\xb9\x87\xee\x97\x6f\x97\x44\xf9\x30\x99\x76\x1d\xdc\x94\xe3\x62\xb6\xeb\x94\xf4\x1e\x6f\x3f\x64\x6a\xfb\xa9\x9f\x0d\x80\x1f\x9d\xe0\xa7\x44\x4f\x43\xa8\x17\xcd\xf0\x6b\x5d\xdc\x23\xc3\x8d\xb8\xee\x6a\x67\xaf\x83\xaf\xf7\x84\xef\xe2\xfd\x36\x88\x1d\x07\xdb\x1f\x61\x36\xef\xe9\x07\x0e\xf5\x37\x8b\xb8\x3d\x90\xef\x32\x8c\x1d\xc5\xb2\x06\x81\xb5\x7c\x85\xe6\xcd\x25\xc2\x1f\x7f\x68\xc4\xc5\xc6\xd0\xa1\xbe\xfe\xcf\xff\x50\x6f\x8e\x35\xd4\x8e\x76\xd3\xde\xbe\x7f\x77\xc9\xc2\xfd\xf3\xcf\x2f\xd4\xe5\x86\xaa\xa5\xdd\xd7\xd0\xcb\xc5\x5f\x6e\xaa\x58\xd3\x6e\xcf\xbd\xab\xfb\x93\xa6\xd7\x19\x38\x69\xea\x63\xe1\x4f\xaa\x91\x94\xcb\xb2\x67\x64\xd4\xdf\x14\xc3\xdc\xbd\x11\x6d\x68\x1d\xfd\x68\x9b\x28\x9e\xf9\x9c\xed\xe8\x6d\xb7\x54\xbc\x50\x96\x53\x89\xfc\x7e\x0b\x88\x2a\xcb\x71\xb9\x2c\xe7\xa3\x72\xc5\xb7\x2b\xb2\xb9\x5b\xc8\x53\xb5\x62\x6c\x6d\x32\x65\xb9\x52\x2d\xa7\xa2\xd5\xf5\x57\x31\x39\x2b\x57\x65\x2a\x2a\x55\xa2\x52\x4c\xbe\xb2\x8f\xb6\x8e\x3b\x4e\x2f\x3b\xbe\x54\xcc\xeb\x94\x71\xda\xcf\x8d\x4d\xb2\x4b\x9c\x9c\xea\xc7\x9f\x36\x3a\xab\xac\xed\x42\xff\xc6\x8e\xe2\x45\x4d\x6c\x43\xd9\x1f\xae\x87\x63\x3e\xce\x69\x61\x97\x25\xb8\x6e\x30\x8f\x69\xe0\x7d\x52\xe9\x07\xaa\xe1\x02\x33\xa7\xba\x38\x93\x06\x7b\xad\x51\xf8\x53\x1c\x3f\x83\x42\x2e\x9b\xc6\xbb\x1c\xd2\xbd\xd6\x51\xb4\x1c\xb7\x6b\x93\x4a\x29\x4b\x69\xd8\xc5\x6b\x13\xa3\xb4\xe9\x68\x4c\xa9\xd6\x68\x3c\x24\x2e\xd9\xc8\xf0\xff\x03\x00\x00\xff\xff\x3d\xc9\x6f\x6f\xb1\x55\x01\x00") + +func ingest_asset_statsHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _ingest_asset_statsHorizonSql, + "ingest_asset_stats-horizon.sql", + ) +} + +func ingest_asset_statsHorizonSql() (*asset, error) { + bytes, err := ingest_asset_statsHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "ingest_asset_stats-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7c, 0xc3, 0x95, 0xa9, 0x3f, 0x82, 0xc5, 0xa, 0x14, 0x8e, 0x83, 0xe, 0x61, 0x73, 0x8e, 0x2e, 0x2e, 0xa3, 0xea, 0xaf, 0x66, 0x51, 0xf, 0x4b, 0x1f, 0x75, 0xf4, 0xf4, 0x60, 0xbe, 0x97, 0x77}} + return a, nil +} + +var _kahunaCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\xbd\x57\x93\xdb\xc8\x92\x80\xfb\x3e\xbf\xa2\xdf\x74\x4e\x70\xce\xb2\xbc\xd9\x73\x67\x23\x40\x12\xf4\x00\x3d\x09\xf0\x65\xa2\x2c\xbd\xf7\xfc\xf5\x37\xc8\x96\xba\xa9\x9e\x96\x39\x1a\xed\xde\x1b\xa1\x8e\x09\x4d\x8b\x48\x26\xb2\xb2\xbe\xca\xca\xaa\x2c\x40\xff\xfa\xd7\x6f\xff\xfa\xd7\x53\x73\xb5\xdb\x8f\xb6\xae\xd3\xaa\x3f\x59\xb5\x57\x5a\xed\xdc\x93\x3d\x2c\xd6\xbf\xfd\xeb\x5f\xbf\xdd\xae\x17\x0e\x8b\xb5\xb3\x4f\x7e\xbb\x5a\xbc\x0a\x1c\xdd\x76\x37\x59\x2d\x9f\xe4\x7f\xb1\xff\x82\x0f\x52\xfa\xf2\xb4\x1e\xfd\x79\xfb\xfa\x1b\x91\xdf\x3a\x61\xf7\x69\xb7\x57\x7b\xb7\x70\xcb\xfd\x9f\xfb\xc9\xc2\xad\x0e\xfb\xa7\x3f\x9e\xc0\xbf\xef\x97\xe6\x2b\x33\xfb\xeb\xa7\x13\x3b\x77\x7f\x4e\x96\x7f\xee\xb7\x6a\xb9\x53\x66\x3f\x59\x2d\xff\xdc\xb9\xdd\x4d\xef\x5f\x85\xcd\x7c\x72\x53\xed\x96\x66\x65\x27\xcb\xd1\xd3\x1f\x4f\x1f\x7a\xdd\xa2\xf8\xf0\xef\x4f\xf7\x5e\x5a\xb5\xb5\x7f\x9a\xd5\xd2\xaf\xb6\x8b\xc9\x72\xf4\xe7\x6e\xbf\x9d\x2c\x47\xbb\xa7\x3f\x9e\x56\xcb\x8f\x3a\xc6\xce\xcc\xfe\xf4\x87\xe5\xf3\xbd\xf4\xca\x4e\xdc\xed\xba\x57\xf3\x9d\xfb\xec\x36\x8b\xc9\xf2\xcf\x85\xdb\xed\xd4\xe8\x2e\x70\x52\xdb\xe5\x64\x39\x7a\x16\xd9\xae\x4e\x7f\xee\x9c\x39\x6c\x27\xfb\xcb\x4d\xb9\xf7\xff\xfe\xe8\x00\xa7\xb6\x66\xfc\xe7\x5a\xed\xc7\x4f\x7f\x3c\xad\x0f\x7a\x3e\x31\xbf\xdf\x3c\x66\xd4\x5e\xcd\x57\xa3\x7f\xff\xf6\x5b\xa1\xdd\x68\x3e\x55\xe2\x42\x98\x3c\x55\x8a\x4f\x61\x52\xe9\x74\x3b\x1f\x25\xff\xeb\xb0\x1e\x6d\x95\x75\xe3\xc9\x6e\xaf\x2f\x3b\xb7\xf9\xf7\x57\xa5\x77\x66\xbd\x39\xac\xb6\x87\xc5\xee\xfb\x84\xdd\xf2\xf8\x3d\x92\x73\x67\x47\x6e\xfb\x3d\x92\x37\x3b\xbd\x73\xdf\x29\xf9\x1d\x62\xda\xed\xf6\x2b\xef\xdd\x76\xb2\xb4\xee\xfc\x75\x59\x65\xcc\xea\xb0\xdc\x6b\x35\x57\x4b\xe3\x76\xff\xfe\x2d\xa8\x77\xc3\xf6\x53\x37\xc8\xd5\xc3\x07\xe9\x46\x5c\x4f\xdf\x71\xef\x6a\x7b\x79\xba\x6b\xcf\x37\xe2\x4e\xb7\x1d\x54\xe2\xee\xc3\x97\x3e\x17\xfc\x73\x3d\x73\x97\xef\xd1\xbf\x3f\x7f\x5b\xf5\x8b\xcc\x7f\xa0\xd5\xbb\xef\xb0\xf9\x51\xec\xfb\x75\x6f\x0f\xbb\xfd\x7c\xb2\x74\xbb\xaf\x69\x7e\x11\xfa\x6e\xbd\x37\x2b\xdc\x3d\x1a\x7c\x45\xef\xab\xd0\xf7\xeb\x7d\x41\xfe\x6b\x7a\x5f\x84\xbe\x5b\xef\xb3\xfc\x64\xe9\x57\x5f\xd1\xfb\x2a\xf4\xdd\x7a\xd7\x07\xbd\x3b\xe8\xaf\xe8\x7c\x16\xf8\x4f\xf4\xcd\x27\xbb\xf1\xe6\xe0\x0e\x5f\xf3\xec\xa3\xd8\xf7\xeb\x76\x6e\xfb\x35\xb7\xde\xaf\x7f\xb7\xb6\xfb\x30\xfe\x9a\xba\x67\x81\xef\xd6\xf7\x1c\x95\xc6\x4e\xd9\xaf\xab\xfd\x4c\xee\x7f\x59\xfb\xc7\x48\xe9\x36\x7f\x7e\xe7\x6d\xb4\x5a\x7e\x45\xb9\x56\xcb\xef\x36\xf8\x63\xf4\xfb\x9a\xad\x9f\x44\xfe\x53\x9d\xb7\x1c\xe0\xdb\x6a\x6f\x52\x1f\x35\xdf\x65\xdf\x2a\x7e\x37\xe4\x7e\x5d\xf6\x25\x34\x7e\x4b\xec\x35\xd0\x7d\x43\xf2\x25\x70\x7d\x5d\xee\x35\x10\x7d\x43\xee\x25\xb0\x7c\x53\xee\xbb\xec\x7b\x0d\x28\x5f\x97\x7b\x0e\x12\xdf\x94\x79\x19\xf2\xdf\x90\xbc\x8d\xe3\xaf\x8b\x3c\x8f\xcd\xaf\xcb\x7c\x36\x14\xbe\x2e\xaa\xd5\xf2\xeb\x02\x9f\x50\xfd\x2e\xa9\x1b\x79\x1f\x05\xc3\xa4\x1b\xc6\x9d\x4a\x23\x7e\x14\x9e\xaf\x47\xbb\xcd\xfc\xa3\x44\x27\x5f\x0e\xa3\xe0\x2f\xba\xfe\xfd\xdb\x73\x6e\x1c\xab\x85\xfb\xef\x4f\x9f\x3d\x75\x2f\x6b\xf7\xdf\x1f\xbf\xf2\xef\xa7\x8e\x19\xbb\x85\xfa\xef\xa7\x7f\xfd\xfb\xa9\x71\x5a\xba\xed\x7f\x3f\xfd\xeb\x9e\x32\xe7\xdb\x61\xd0\x0d\x3f\x69\xfe\xa4\xef\xb7\xcf\x34\x7e\x7e\xf1\xa3\xe2\x7c\x23\x8a\xc2\xb8\xfb\x15\xcd\xcf\x02\x4f\x8d\xf8\x73\x05\x4f\x95\xce\xd3\x87\x4f\xf9\xed\xa7\xcf\x76\x77\x25\x1f\xde\xde\xf9\x53\xf3\x3f\xde\xf3\xc5\x43\xdf\x6c\xcf\x67\xbe\x8c\x1b\xdd\x37\xfe\x7c\x1a\x54\xba\xe5\x17\xb3\x1e\x13\xda\xcf\x6e\xff\xaa\xe5\x8d\x21\xff\x49\xe3\xff\xa2\xe4\xee\x80\x66\x3d\xbb\x1e\xdd\x56\x31\xeb\xed\xca\x38\x7b\xd8\xaa\xf9\xd3\x5c\x2d\x47\x07\x35\x72\x77\x37\x7c\x67\x02\x7e\x13\xb3\xce\xab\xc3\x7c\xff\xe7\x5e\xe9\xb9\xdb\xad\x95\x71\xb7\xd5\xc4\x87\x37\x57\x4f\x93\xfd\xf8\xcf\xd5\xc4\x3e\x2c\x10\x3e\x6b\xec\x23\x90\x1f\x9b\x79\x47\xf7\xb5\x91\x9f\x00\x78\xcf\xe1\xcf\x94\x3f\x06\xdd\x7f\xfc\xf6\xf4\xf4\xf4\xe9\x93\x89\x7d\x32\x63\xb5\x55\x66\xef\xb6\x4f\x47\xb5\xbd\x4c\x96\xa3\x7f\x50\xf6\xcf\x7b\xdf\xc4\xbd\x7a\xfd\xf7\xbb\xf4\xed\x8b\x4b\xb5\x70\xef\x08\x0b\xf1\x9e\xf0\x51\xcd\x0f\xef\x49\x43\x88\xde\x8a\xcf\xd5\x6e\xbf\x58\xd9\x89\x9f\x38\xfb\x34\x59\xee\xdd\xc8\x6d\x5f\x44\x7e\xfb\xe7\xdb\xbe\x7f\x19\xc5\x7f\xd3\x17\xbb\x1f\x72\xc4\xc7\x85\xc0\x93\x9e\x8c\x26\xcb\xfd\x9b\x8b\x3b\xb7\x59\x1e\x16\xef\x5f\x5b\x1e\x16\xbb\x83\x76\xcb\xfd\xf6\xb6\x14\x7c\xdb\xcc\x67\x99\xc9\xd2\xcf\xd5\x6d\xc5\x68\xdd\x6e\xff\xbe\x39\xcf\x82\xe3\xd5\xc2\xd9\xd5\x42\x4d\x96\xef\x48\x11\xf2\xd6\xe8\xfd\x78\xeb\x76\xe3\xd5\xdc\xee\x9e\xf6\xee\xfc\xd6\x32\x3f\x57\xa3\x2f\x59\xf4\xd5\xbe\xf9\xe8\x91\xc3\xed\xae\xf3\x89\xd2\x93\xf9\x64\x7f\x6b\xdc\x73\xfb\x3f\xb9\x64\x3e\xff\xda\xe5\xc9\x68\x79\xcb\x85\x6e\x66\x3d\x7f\xf2\x90\x0d\xbc\xa4\x16\x1f\x9d\xfe\xe7\x7d\x59\xfd\x94\x2f\x87\xf9\xda\xd3\x3f\xfe\xf1\xa9\x2b\xfe\xe7\x8f\x27\xf0\xcf\x7f\x7e\xe5\xdb\x6f\x0d\x7c\xab\xe7\x2f\x0d\xf8\x96\xc6\xcf\xfa\xf2\x8d\xb6\xcf\xfb\xf9\x5b\x9a\xfe\xea\x9e\x37\xea\xde\xf1\xdf\xb3\xce\xbf\x0e\x8c\xdb\xfc\xf7\xa3\x63\xe2\x96\x32\x3e\x0f\x87\xe5\xca\xba\xc7\xb1\xf0\xd9\x18\xf8\xeb\x4d\x3f\x9f\x9f\x7f\xf4\xf6\x9f\x27\xc6\xcf\x86\x7c\xfc\x4c\xed\xc6\x0f\xc6\xb0\xbf\xb0\xbd\xde\xba\xe3\x37\x85\xf4\xc1\xcc\xdc\x7e\x3e\xd9\xed\xbf\x29\xfa\x92\x6d\x7f\xc2\xfd\xf9\x63\x33\x5f\xed\xdc\x7e\xb2\xf8\xc2\xc8\xbf\x07\xd6\x77\xc6\xd6\x43\x9f\x7f\x9e\xd4\xbf\xe8\x7b\xd3\xdf\xaf\xf7\xf9\x02\x3a\x5f\x5a\x1b\x7c\xae\xe6\xb5\x15\x5f\xa2\xe5\x63\xf2\xf5\xa3\x3d\xf6\x71\xe1\xf5\x8f\x97\x41\xee\xb6\xdf\x19\x41\x9f\x77\x5e\xec\x97\x22\xe8\x1d\x77\xb5\xdb\xb9\xfd\x7b\xfe\x7c\x1e\xab\x5f\xbc\xac\x16\xb7\x61\xf5\xbe\xea\xf5\x76\x62\xdc\xf2\x0b\x41\xec\x7e\xf1\x4b\x11\xee\x7e\xf1\xc9\xae\x0e\x7a\xee\x6e\xbc\x99\xc9\x7d\x47\xf2\xa7\x46\xd1\x87\x1e\xfe\xb8\x64\x7d\x6e\xcb\x9b\x7e\xfd\xd8\xc0\x2f\xb0\xf1\xf1\x9b\x1f\x3d\xfc\xe6\xab\x9f\xfc\xfe\x25\x20\x9e\x13\xf6\x1f\xe5\xe1\x79\x59\xff\x8c\xc3\x64\xfd\xde\xc4\x4f\xff\x32\x72\x57\xdb\xfd\x8b\x37\x0a\x61\x31\xe8\xd5\xbb\x4f\xe0\xed\xb4\xe9\xce\x7b\xb5\xdf\xbb\xc5\x7a\xff\x74\x1b\x16\xbb\xbd\x5a\xac\x9f\x6e\x29\xd3\xea\xf0\xfc\xc9\xd3\x75\xb5\x74\x7f\x9d\x6c\xbd\x9a\xcc\x0f\xdb\x87\xa9\xf6\x4b\x77\xd8\x5f\xd6\xee\xdb\x9d\xf2\xbc\x2d\xf1\xa0\xf7\xaf\x61\xff\xe5\x8e\x5f\xe8\x9d\x8f\x3b\x1b\xab\xed\xdb\x4e\xfd\xc7\xdd\x13\xff\xf3\x04\xfe\xf9\x14\xc4\x85\xa7\xe7\xbf\xfe\x3f\x7f\x3c\x31\x4a\x31\xfd\xe7\xbb\x7d\xf5\xb8\x0c\xfb\xe1\x2e\x7b\xdc\xe5\x79\x8c\xb9\x5f\xf0\xc6\xf3\x46\xdb\x6d\xd4\xbd\x6b\xd0\x6d\xed\xf8\x37\x4c\xd9\x1d\xf4\x47\x23\xb6\x6e\xf7\xd9\x04\x84\xdf\xcd\x18\xb7\x4e\xbd\x8c\xa5\xbf\xda\xf3\xb0\xe6\xfd\x51\x9b\x1e\x36\xeb\xbe\x63\x66\x7c\x36\x6c\xb3\x73\x5f\x9b\x61\xfe\x6a\xe7\xc3\x1a\xfe\x47\xed\x7c\x55\xf1\xfd\x76\xfe\x65\x92\x7b\x73\xdd\x2d\x8f\x6e\xbe\x5a\xbb\x6f\x4c\x69\xaf\xb7\xfe\x1b\x13\xd1\xc3\x76\xc7\xdf\x70\xc1\xa7\xfd\xda\x7f\x7c\x4f\x3f\xbc\x52\xf4\x2d\x47\x6c\xbe\x30\xd1\x7c\xee\x84\x4f\xfb\xc0\x9f\x69\x7c\xeb\x88\xcf\xee\xf6\x45\x67\xbc\xee\x11\xfd\xb0\x33\x5e\x37\xc5\xff\xf1\x3a\x6e\x3f\x5f\xbc\xbd\x33\xa6\xbe\x36\xba\x1f\x76\xb8\x7e\xd4\xaa\x87\x12\xc0\x8f\x2c\xbb\xee\x33\xfe\x57\x22\xf5\x64\xb7\x3b\xb8\xed\xf7\xab\x32\x2b\xfb\xee\xea\xf4\x2f\x6e\xd9\xcf\x27\x8b\xc9\x17\x32\x8a\xaf\xae\x05\xff\xbf\x5c\x55\x3d\xd0\xf9\x50\x55\xf9\xa1\x55\xd4\xe3\xf7\x7f\xd6\x3a\xea\x41\xe7\x8f\xaf\x7f\xbe\xa6\xf5\xb9\xd3\xde\x68\xfa\xd8\x93\xff\xf3\xfe\xc0\xfb\x6c\xbb\xf7\x87\x21\x7f\xac\xa1\x3d\x63\xbe\x3f\x7f\x16\x8a\xbf\x63\xbd\xf1\x16\xc0\xf3\xbd\x4a\xf9\xc5\xab\x66\xac\x96\x23\xf7\xee\xc2\xfe\xd1\x39\x8f\x65\xbb\x1f\x8f\xd5\xaf\x7b\xe7\x3f\xee\xa2\xff\x63\xff\xe8\x95\xbd\xbc\xe7\x9c\xfd\x79\xeb\x76\x87\xf9\xbb\xd1\x7d\x7f\x5e\xb8\x6f\xae\xe7\x5e\x4b\xac\x3f\xee\xcf\x37\x75\x8b\x1f\x75\xea\x9b\x8a\xf3\x3f\xbe\xcb\x71\x1f\xbf\xf4\x35\xef\x7d\x14\x79\xcf\x11\xdf\x87\xdd\x9b\x0a\xf7\x8f\x38\xaa\x70\x5b\x59\xfb\xd5\xf6\x1b\x9b\xa1\x4f\x85\xa0\x1b\x7c\xc3\x67\x95\xb8\x13\xb6\xbb\x4f\x95\xb8\xdb\xf8\x6c\x43\xb4\x1f\xd4\x7b\x61\xe7\xe9\x1f\x1f\x4a\x41\xda\xc9\x47\xb5\x56\xca\xc2\xb4\x9e\x34\x9a\xdd\x2e\xab\x36\x9b\x8d\xa4\x10\xf5\xe3\xdc\xa0\xd2\x6d\x76\x3b\xc3\x4a\x7f\x30\x20\xf5\x41\xd0\xee\xe7\x1a\xdd\x32\x6d\x77\xeb\x41\xe1\xc3\xef\x4f\x1f\xf4\xa2\xb8\x1f\x76\x82\x3f\x6e\xbf\xab\xa4\x7d\xad\x94\xec\xd1\xe6\x73\x57\x53\x2a\x4e\x87\x49\x34\xfe\xf0\xfb\x13\x91\xff\xfc\xf7\xff\x81\x11\xdd\xf0\x6e\x44\x54\x08\x4e\x51\xf0\xc7\xed\x77\x8a\xbe\xe9\xd0\xdd\xcf\xf0\xe6\xee\xb1\x15\x49\x05\xe3\x5e\xbe\xde\xea\xe6\x6b\x11\x8a\xa3\x76\xae\xc3\x93\x34\xd7\xa6\x98\xd6\xeb\x61\x3f\x28\xd7\x69\x9a\x8b\x49\xb1\x9b\xcf\x91\xf2\xb0\xdc\xe5\xc3\x80\xe6\xfb\xb5\x0f\xbf\x3f\x41\xf0\xf2\xf3\xfb\x13\x44\x42\x10\x09\xa0\x10\xe2\xf7\x27\xf0\xfb\x33\x66\x4f\x1f\x6e\x0d\x0c\x5a\x41\x10\x3c\x37\x10\xfc\xfe\x84\x3f\x5d\x7b\xfd\xf3\x7d\x6f\x3f\x1a\x59\x48\x8a\x41\xa9\x9a\xef\xe4\x2b\x24\x5f\x43\x69\xb9\xc6\xd2\x76\x3d\x60\xdd\x5a\x98\x14\xdb\x09\xcf\x45\xa5\x7e\xd4\x6a\xe4\xa2\x7a\x25\xec\x55\xdb\x55\x9a\xb6\xe2\x7a\x54\xc9\x7d\xf8\xfd\x49\x3e\xff\xb0\xbb\x8d\x1c\x72\x29\x98\x84\x37\x1b\xe1\xe7\x36\x8e\x2a\xf9\x60\xf4\xc9\x46\xf6\xb9\x8d\x1f\x6e\xf6\x3f\xb7\xa2\x80\xba\x14\x46\x13\xe4\xa7\x8b\x7c\x4a\x86\x99\x36\xad\x36\x62\x58\x9f\x1f\x36\xd7\xed\x72\xbd\xe8\x9e\x7b\xd5\xa4\xbb\xce\x06\xb8\xd8\x3e\xdd\xe4\xc3\x3f\x3e\x7c\xbb\x75\x01\x61\xfd\x76\x2d\x97\xaf\x57\x50\xc2\x0a\x49\x3d\xe1\x41\x25\xec\xb7\x8b\xf5\x32\xee\x05\x3c\xc9\x85\x38\x2e\xc5\x4d\xd6\xe0\xa4\xdc\xa2\xf5\xa4\x1c\x95\xba\x7d\x54\x7d\x6e\x1d\x7d\x69\x1d\x06\x80\x11\xce\x21\xe0\xec\xeb\x3d\x20\x7e\xa4\x07\x82\x12\x45\xdd\x01\x6b\x05\x39\xd6\x2d\xc5\x51\xa3\x5b\xc7\x28\x25\x11\xee\xb5\x5a\xf5\x38\x8e\xcb\xcd\xb0\x9c\x06\x95\xb4\xdd\x64\x9d\x62\x91\x0d\x83\x7e\xbb\x48\x87\xad\xf4\x66\x23\xbf\xf7\x80\xb8\xdb\x28\x18\x25\x1c\x50\xc6\xe8\xd7\x6d\x84\xe0\x87\x30\xc9\xf7\xbb\xb9\x52\x27\x0c\x7b\xbc\x56\x4f\x7a\x51\x39\xea\x24\xb9\xa0\x9e\x54\x51\x97\x54\x50\xad\xd1\x4c\x06\xa8\x43\xbb\xed\x7a\xad\xd0\xae\x04\x49\x81\xf6\x0a\xe5\x20\x6d\xdc\x8c\x24\xcf\x9c\xdc\x8c\x24\x1c\x11\xc2\x08\x40\x94\x7f\xc3\x48\xf4\x23\x46\xe6\x73\xbc\xd8\x4c\x4b\xf5\x3a\x6b\x57\x31\x2f\xd7\xda\x41\x3a\xa0\xdd\x60\x10\x15\x73\xa5\x52\xb1\x14\x91\x4a\xc4\xc2\x76\x2e\x3f\x4c\x2a\x28\x37\x24\x8d\x46\x23\x41\xbd\xe0\xe6\x49\xfa\x3a\xdc\x28\xa4\x58\x32\xc0\x29\x45\xff\x1b\x36\x06\xf1\xb0\xd4\xac\xa5\x74\xd0\x29\x0f\x4a\x34\x6d\x0c\xa3\xb8\x44\x51\x29\x5f\xa3\x9d\x7c\x95\xa4\xa5\xde\x20\xaa\x56\xfb\xa5\x61\xa7\x86\x8a\x4d\x92\xab\xa0\x6a\xa5\x1a\xa3\xfc\x73\x50\x78\xf5\x24\xa5\x02\x13\xca\x89\x20\xf2\x2f\x03\xee\x73\x2b\xc9\x0f\x79\xb2\x9c\x27\x05\x94\xef\x10\x9a\xaf\xb6\xe3\x98\xb4\x82\x72\x17\xd5\x8b\xc3\xa0\x5a\xe9\xd1\x66\xc0\xcb\x14\xd7\x70\xbf\xd1\x64\x83\xb0\xca\x92\x41\x39\xee\xc4\xc3\x5a\xab\x74\xeb\x6e\xf1\xca\xe4\x8b\x91\x14\x7c\xc3\x95\xf4\x87\x5c\x99\x0f\xda\x28\x08\xd3\xb0\x56\xe9\x86\xa8\x5e\xab\xd0\x76\x94\x14\x69\x54\xe9\x0f\x59\x8b\x27\x95\x7a\xbb\x51\x2a\x74\x11\x6a\xf0\x6a\x42\x0a\x9d\x41\xb1\xc3\x8b\x85\x42\xf3\x35\x74\xdd\x3d\xc9\x04\x87\x92\x70\xc6\x31\xff\x86\x27\xf9\x0f\x0d\x9c\xf6\x00\x73\x1a\x05\x69\x9b\xb0\x46\xa1\x54\x44\x83\x52\x10\xb7\xf2\xa8\xdd\x1e\xd6\x79\x03\x11\x56\x48\xcb\xe5\x7c\x69\xd0\xed\xa3\x76\xc8\x2b\xe5\x10\xb5\x7a\xad\xfa\x6d\x2a\x7b\x76\xa4\xe4\x9f\x1b\xf9\x8d\xee\x46\x3f\x34\xba\x83\x24\x2a\x12\xdc\x2d\x0d\xcb\x03\xdc\x8a\x71\x3b\x6c\xf4\x02\xd4\xa3\xcd\x01\xcd\x75\x83\x76\x52\x2a\xa5\x55\x5c\xad\x14\xcb\x03\x9c\x76\x59\xab\x5d\x6b\xd7\x71\xbe\xd9\xec\xdd\xa1\xfc\x51\x2b\xef\xff\x7b\x1d\x76\xdf\x67\x68\x0e\xb5\x2a\x69\x17\x55\x82\x5e\x31\x6a\x27\xb5\x7a\xa7\x5e\x6f\xb6\xc3\x7c\x9e\x35\xf2\x8d\x52\x35\x0a\x0a\x9d\x66\xb7\x5d\xe3\xdd\x52\xdc\x45\x9d\x62\x1b\xa5\xa5\x41\x21\x68\x17\xee\x86\xc2\xd7\x3e\x97\x00\x4a\x82\x21\x46\xf0\x1b\x7d\x8e\xc8\xc3\x64\xfc\xc2\xf0\x77\xd8\xd9\xa8\xf1\x5c\xa3\xd7\x69\x0c\x9a\xe5\x5c\x10\xe7\xd2\x88\x45\x95\x76\x3a\xac\x56\x0a\x95\x66\x2f\xad\x36\x93\x72\xb7\x1c\x14\x8a\x9c\xf6\xc2\x7e\xa5\x9f\x0e\xca\xe5\x46\xdc\xca\xbf\x89\xe9\xaf\x66\xfe\x75\x56\xfd\x8b\x99\xff\x91\x79\xac\x14\xe3\x7a\xb5\x37\x60\xd5\xb4\xcd\xc3\x42\xa3\x4a\x78\x3f\x57\xe6\x05\x42\x23\x52\x1d\x94\x93\x52\x8d\xd5\xcb\xd5\x76\x10\x56\x68\x35\xd7\x89\x51\x21\xd7\x4a\x79\xeb\x75\xe4\xdc\xcd\x83\x80\x63\x4e\xa0\x40\x04\xa0\xdf\x9f\xd0\xd7\xec\x63\x9f\x77\xf6\x7f\xde\xf1\xf9\x4a\xb1\xd8\x6e\xd5\xca\x51\x99\x55\x0b\x3c\x5f\xa3\x8d\x0a\x49\xfa\xf9\x34\x1f\xc5\xed\xb8\xc8\x9a\x69\xc0\xab\xdd\x7c\x1b\x17\x9b\xe5\x66\x75\xd8\xea\xa6\x69\x31\x47\xc3\xce\x8b\xc5\xe2\xd9\x62\x48\x25\x23\x10\x72\x00\x9e\x3d\xf6\xa1\x94\x6b\x37\xd3\x72\xa5\x8e\xf2\x15\x5c\x8c\x5b\x24\x97\xd4\x8b\x51\x5c\xa8\x17\xab\xbd\xb8\xd9\x43\xe5\x14\x0f\xa3\x62\xa7\xdc\x88\x7b\xf9\xb0\x11\x74\x06\xbc\x95\xe7\x8d\x04\x95\x6f\x6d\x1c\x26\xe3\xb1\x4e\x72\xbb\x61\x87\x4e\x35\x02\x7f\x3c\x67\x3b\xc1\x43\xb6\x83\x7f\x6c\x1a\xab\xd6\xaa\x49\xb3\x96\x2b\x56\xca\x8d\x06\x26\x94\x0e\x92\x41\x89\xe6\x0b\xb9\x61\xd2\x8a\xd3\x62\xbb\x5d\xaa\xe4\xd3\xa8\xd9\x6b\x61\x9a\x6f\xb6\xc8\xa0\xdf\xc1\xb5\x61\xbd\xf4\x36\x25\x23\x90\x63\x2c\x11\xe0\xfc\x1b\x13\x19\xfe\xa1\xc0\x96\xef\x0f\x68\x3d\xdf\x1e\x16\x9b\xbc\x19\xc6\x49\x37\x28\x35\xfa\x95\x56\x12\x14\x0a\x71\x2f\x49\x86\xd5\x7c\x5c\x2b\x92\x7e\x2b\x87\x2a\x35\x3e\x40\xf5\x66\x75\x50\xe4\xb8\x57\x7a\xcb\x10\xc3\x08\x08\x4e\x39\xba\xcd\x11\x5f\x63\x88\xfc\x58\xde\x42\x3a\x51\x23\xc4\xfd\x66\xa7\xc8\x87\x6d\x9c\xef\x86\x2d\xdc\xa4\xbd\xb8\x9b\x8b\xc2\x6a\x21\x40\xa5\x7a\xd2\x2d\xb7\x79\x14\x05\xed\x20\xae\xd5\xaa\x85\x21\x6f\x37\x4b\xc5\xb7\xbe\x7c\xb5\xf2\x6b\xbe\xc4\xbf\x3f\x91\x1f\xea\xf1\x42\x9b\xe2\x41\x10\x56\x6a\x8d\x1e\x1e\xd6\x62\x4c\xf2\x9d\x72\x30\x28\xf3\x72\x9f\xd5\x18\xce\xe7\xaa\xed\x5e\x77\xd0\x2b\xe4\x8a\x9d\x28\xe5\xed\x76\xab\x86\x3b\xcd\x5a\xe3\x46\xf7\x6d\x24\x61\x01\x09\xb9\x0f\x29\x88\xe1\x2d\xf2\x4a\x8c\x38\xa6\x48\x60\x04\x3f\x52\xfe\xe3\x37\x78\xaf\x27\x7e\x88\x97\x1f\x5f\xd3\x7d\xec\x09\xf2\x1c\x39\x18\xa4\x82\x60\x80\x6e\x89\xcd\xd7\x78\xa1\x3f\x96\x9e\xe5\xab\xcd\x90\xa4\xbd\x36\x42\xfd\x26\xc9\x47\x28\x57\x28\x16\x82\x72\x8a\x0b\xf5\xb0\x88\xcb\x3c\x0e\xe3\x5a\xaf\x45\x71\xa1\x4b\xbb\x61\x05\x95\x82\x72\x97\xc6\x24\x79\xbb\x66\x43\x88\x33\x8c\x11\x63\xec\x5b\x8b\x36\xfa\x43\xf9\x59\x10\x17\x87\x85\x76\x2e\x1f\x77\x7b\x49\xa5\x51\xc8\x57\xc3\x34\x0a\xf2\xcd\x28\xdf\x19\x86\xfd\x90\x0c\x86\xa5\x21\xce\x0f\x0b\x43\xdc\x44\x9d\xa4\x46\x6a\x65\x4e\x2b\x35\x96\xbe\x49\x7d\x10\x66\x08\x61\x04\x20\x12\xf0\x1b\x56\xb2\x1f\xb1\xf2\xc7\x63\xab\x00\x00\x52\x84\x19\x07\x40\x52\x8c\x91\x44\xcf\x93\xc8\xdf\x0d\xd9\xef\xb4\xec\xc7\x82\x5f\x6b\xd8\xc4\x95\x1e\x4f\x7a\x2c\xac\x32\x5c\x1d\x26\xb5\x7c\xab\x91\x76\x51\x3b\x4e\x62\x5c\xce\xd1\x7c\x5c\x0e\xe3\xb8\x17\xf6\xca\x9d\x28\x20\xfd\x5e\xb5\x5a\xed\x84\xf1\x9b\xb0\x82\x5f\x91\xc1\x5f\xf7\x3f\x83\xef\x5a\xf9\x85\x0d\x8d\xb7\xc7\x60\xbe\x67\x2f\xe3\x0b\xaa\xbe\x7c\xb8\xe5\x3f\xdd\x20\xf9\xfc\x80\xcb\x8b\x2b\x19\xb6\x52\x78\x8a\x99\x73\x4c\x58\xa8\x11\xd7\x54\x0b\xe9\x11\x56\x9e\x62\x08\x35\xa7\x4c\x2a\x44\xbc\xf2\x90\x00\xac\x2c\xd0\x14\x69\x86\xb1\x06\x5c\x3b\x29\x6f\xbe\x02\x7f\xf3\xe7\xa6\x83\x72\xa4\x90\xc3\xc8\x7b\x44\x84\x02\x5c\x03\xc7\x81\xb7\xd0\x33\x8b\xa1\x30\xd0\x2b\x63\x11\xd0\xcc\x18\x20\x0c\xc6\x96\x72\x4e\x11\x95\x82\x09\x88\xa8\x82\xec\xc3\x3d\x43\x03\x9f\xb6\x34\xfe\x7f\xf9\x93\x4b\x6a\x13\x72\xc9\x5e\x3a\xb5\x1c\x2f\x2c\x0b\xb2\x8c\xc0\x79\x9a\xcb\xec\xc0\x68\xbf\x3b\x55\x4e\x57\x98\xd8\xce\x20\x55\xb9\xaa\x2a\x8e\x6e\xf2\x61\x4c\xea\xea\xba\x46\xad\x6f\x6a\x1e\x06\x09\x24\x77\xb1\xdc\xec\xff\xa0\x21\x3f\xf5\xe7\xed\x36\xd2\x17\x40\xf5\x56\xa2\x1b\xa1\xcc\x72\x0a\xa0\x66\x4e\x73\x49\x99\x66\x9c\x32\x8d\x84\x77\x04\x1b\x47\x15\x03\x10\x68\xce\x1c\xe7\xcc\x4a\xe8\x19\x70\xd4\x39\xcb\x35\x52\x37\xc8\x7e\x06\xec\xce\x7b\xa1\x35\xb3\xde\x23\x8e\xb1\xf7\xd0\x63\xef\x15\x85\x04\x7a\x4c\x94\xe3\x94\x43\xe7\xb0\x15\x46\x39\x66\xb5\x85\x48\x6a\x65\x28\x84\x5e\x01\xed\x2d\x23\x1f\xee\xd3\x26\xa4\x54\x52\x2e\x39\x12\x1f\x89\xcd\xa3\xe6\x70\x0a\xe3\x03\x5d\x01\x5d\xe5\x03\xb2\xbc\x34\x8e\xbd\x73\x09\xf7\xd7\xab\x59\xe6\x58\x0c\x1a\xfb\x3c\xac\xa1\x88\xe7\x38\x1b\x1e\x86\xed\xf2\x56\xf6\x6c\x52\xd3\xdd\xda\xc4\xcc\x7d\x63\x7a\xe1\x28\x1d\x16\xab\xbd\x59\xb5\xd9\x37\xb5\x2e\x1d\xef\x8f\xab\xed\x02\x3e\x93\x93\x34\xfb\x51\xef\xfe\x5b\xe5\xe5\x8f\xe7\xf8\xb6\x7b\xfd\xfb\x29\x68\xb6\x3e\xb2\x83\xaf\x72\xbc\x08\x3a\xeb\xba\xdc\x07\xfd\xf3\x6c\x7f\x2e\xe0\xa4\xd3\x58\xe3\xc9\xfe\xdc\x39\x86\x8b\x88\x05\xbd\xd9\x29\xd7\x21\x61\x7b\x79\xcc\xd4\xf7\xfb\xec\xc5\x5c\xb3\x67\x91\xcd\xcc\x8b\x39\x01\xb6\xcb\xfe\x99\x4f\x51\xb4\x5d\x20\x19\xae\x0f\xbb\x5e\x3b\x33\x3a\xca\xe1\xf3\xfd\xff\x63\xa2\x0b\xa0\xfa\xbf\x01\xdd\xff\xea\xcf\x77\x12\x8d\x09\xf4\x5e\x1a\x22\x85\x96\x80\x33\x2f\x3c\x82\xca\x29\x2e\x14\xc6\xda\x00\xc3\xb0\x54\x9a\x1a\x2f\x05\x53\x56\x1b\x64\x85\x05\xc4\x4a\xa4\xa9\x92\x96\x59\x7f\xa3\xf1\x67\x8c\x0a\x28\x01\x50\xc0\x4b\x8a\x29\xe1\x10\x73\xe4\x91\xd0\x0e\x7b\x2d\xb0\xc2\x18\x0b\x65\x04\xe0\x04\x7b\x0f\x34\xe7\x10\x78\x05\x81\xd0\x00\x69\x8c\x85\x67\xc0\x8b\xe7\xcc\xfb\x85\x68\xf9\x89\xe8\x2c\xea\xa8\xb8\xb8\xb7\xad\x12\x62\x78\x3e\xd8\x2f\xfb\xdb\x5a\x93\x16\xae\xf3\xf5\x28\xac\xdb\x83\x45\xa8\x2d\x47\xd4\xf1\x64\xb7\x29\x0c\x4b\xad\x5c\x08\xd3\xdd\x6a\x5f\xd9\xb6\x93\xd9\x89\x9f\x59\xd6\xd4\x9a\x16\x5d\x1b\xf9\xdc\xe1\xca\xcb\xa8\x3d\x6f\xaf\x3a\xa5\x34\xbc\x7b\xf8\x4e\xf4\x03\x44\xb4\xb9\x3c\x86\xc7\xa8\x11\x8a\xd9\xa5\xb0\xc3\x25\x7d\x80\xe3\xd5\x41\x98\xc5\xea\x74\x82\x15\x7e\xde\xa4\xa5\x86\xde\x83\xe1\x0a\xac\x86\x41\xad\x40\x7b\xb0\x7d\x8e\x2f\x62\x75\x9c\x66\x48\x63\x1a\x91\x6d\x25\x07\x9a\xd9\x3c\x36\xad\xcc\x38\x5f\x0f\xb6\x11\x91\xa3\xcc\x5d\x73\xf4\x0e\xb1\xef\xc7\xd8\x5f\x80\x58\x83\x18\x80\x12\x42\xe4\xb8\x85\x0a\x03\xae\xb9\x66\x4a\x10\xac\x2c\x65\xde\x72\xac\x29\xf4\xda\x1a\xe4\x18\xa6\xde\x71\x0d\xbc\x31\x42\x2b\x47\x98\x93\x04\xf2\x1b\x6d\x3f\x85\x7a\x07\xa9\x77\x02\x73\x07\xa4\x17\x90\x02\x62\x18\xba\xc1\xc7\x35\x76\x86\x40\xa5\x11\x60\xdc\x2b\xa7\x0d\xc1\x86\x30\x0c\x94\xd2\xd8\x62\x65\x3c\xc4\x9e\xde\x62\x30\x79\x25\x16\x7f\xca\x1a\xf2\xd7\x96\xcf\x98\x4e\x7a\x68\xd9\xe3\xf4\x52\x3a\xac\xc9\xea\x7a\x0c\xce\xd3\xc5\x36\x69\x2e\x4a\x5b\x7d\xda\x4f\x5b\xdd\x61\x0d\x6e\xac\xf6\x9d\x4c\xe2\x3c\x29\x26\x49\xa5\x8b\x66\x06\x26\xc6\x5f\x97\x0d\x18\xa4\x4d\x59\x8d\x0f\x85\x7c\x05\x56\x8b\x49\x79\xbd\xec\xc9\xca\x3d\xc6\x3e\x13\x3b\x7a\xf5\x78\x45\x14\x5d\x39\xbc\x56\x41\x96\x14\x1b\x22\x1b\x97\xb2\x04\x9f\xba\xe1\xf2\x02\xf3\x83\xec\xd1\x94\xd1\xd4\x6c\x5b\xb3\xab\xd6\x66\x9a\x29\x7a\x56\xf0\xa3\x25\x29\xb6\xba\xa5\x6a\x32\xc7\xbb\xcc\x39\xb7\xb9\xe4\x96\x99\xc3\x31\x6c\x86\xe9\x69\xb3\x6d\x76\xb7\xcd\x30\xdb\xbf\x6b\x6e\xbd\x43\x6c\xa1\xf2\x5e\xaf\xff\x02\xc4\x7a\xea\xa5\x81\xde\x70\x00\x28\x87\x56\x51\xeb\x81\x65\x0e\x38\xe3\xac\xd2\x5c\x78\x27\xa0\x63\x9e\x31\x40\x89\x23\x98\x22\x2f\xb9\xc2\x1e\x18\x8f\x2d\xf5\xf8\x4e\xec\xcf\xa0\x5e\x72\x67\x15\x03\xcc\x62\x6c\x2d\x97\x82\x1a\x0c\xa5\xd2\xd6\x0b\xec\xb4\x61\x90\x72\xe4\x21\x22\x1c\x4b\x4a\x30\x60\xc2\x41\x62\x34\x95\xce\x3a\xe4\x15\x45\x37\x62\xe9\x03\xb1\xf0\x13\xb1\xa2\x3a\x2a\xb5\xc3\x83\x6f\xaf\x4e\x0e\xef\x37\xe3\xc2\xb6\xaf\xa5\xe1\x3d\x7f\x34\xf5\x45\xec\x29\x2f\x88\x4b\x7d\x3b\xd3\xeb\x56\x72\xb9\xca\x49\x8a\xb6\xe5\xf1\xe9\xd0\x3b\x36\x8a\x87\x7a\xc8\x16\xb9\x63\xb7\x41\xd8\xb8\x92\x49\xce\x35\x54\xa5\x06\xee\x0f\xf5\xdd\xe4\x39\x57\xb8\x13\x7b\x7a\xf5\xf8\x8c\x1d\x1b\xd9\x5a\xfb\x7c\x1a\x82\xcb\xb2\x1a\x45\xdb\x5a\x6f\xdd\xe8\x78\x3a\x18\x6b\x3b\x8b\x67\xa0\x5e\x69\x64\xaf\xc3\x73\x2e\xb3\x71\x09\x57\x69\x09\x5c\x71\xb2\x28\x9e\xce\x8b\x2d\x26\xcd\xed\x79\xdc\xbf\x88\x4e\xfb\x3a\xef\x45\xa5\xe9\xb8\x5b\x1f\x38\x3c\xc9\xcc\x9f\xb9\xeb\xbd\x43\x6c\x19\xbc\xd7\xeb\xbf\x00\xb1\x8e\x32\x2d\xb1\x44\x1e\x48\x22\x91\x27\x40\x10\x2e\xa4\x62\x9e\x3b\xa7\x30\xe2\xc4\x32\x29\xa1\x91\x50\x58\x49\xc1\x6d\x21\x2b\x9d\x14\xd4\x6b\xc7\x0c\xb0\xe0\x9e\xa3\xfe\x0c\xea\x39\xa0\xd6\x48\xe7\xa0\x25\x96\x39\x8a\x24\x54\x8a\x58\xa2\xa9\x46\xd8\x1b\x29\xb1\xa7\x46\x0a\x6d\x01\x44\x5c\x08\xa0\x85\xe1\x9e\x09\x86\x0c\x42\x4c\x59\x62\x3e\xdc\x8b\xcd\x2f\xc4\xa2\x97\xac\x20\xa1\x27\x7f\x3e\xe5\x92\xb2\x4a\x7c\x61\x40\x1a\xd7\xfd\x06\x4f\x33\x23\xa7\x51\x5a\xec\xce\xe2\x4e\x26\x59\x65\x0b\xcd\x66\x5f\xe0\xf8\x22\xf9\x2c\x0b\x8a\x28\x18\x94\x51\xb3\xc9\xa6\xb9\xf3\xca\x56\x92\x68\x34\x5d\xb4\xeb\xd5\xd3\xb8\xb0\x6f\x88\x7d\x6b\x38\xf2\xe2\x35\xc6\xf6\x1f\x3c\x1e\x8b\x16\x0a\xf6\xf9\xfc\xce\xcd\x49\xb9\xb2\xab\x91\x83\xdd\x24\x97\x7a\x6b\x63\xb6\xaa\x34\x35\x41\xb9\xdf\xe9\xc5\x02\xd2\xd1\xb6\x78\x4a\xcc\xf2\x50\xee\xc1\x43\x6f\x5d\xda\x74\x7b\x7b\x5d\xc9\x5e\x86\x4d\x78\x49\x8f\xad\x70\x39\x09\x0e\x91\x47\xe3\xc9\x69\xa2\xf6\xdd\xbb\xe6\xf4\x1d\x62\xab\xe9\x7b\xbd\xfe\x0b\x10\x0b\x8c\x56\xd4\x32\x22\x98\x06\x02\x18\x04\x24\x21\x56\x38\x04\xb8\xb0\x0a\x50\x41\x89\xd3\xc4\x32\xa1\x81\x21\x44\x71\xc5\x81\x07\xcc\x7a\xad\x91\x30\x40\x3c\x13\xfb\x33\xa8\x97\xc2\x7a\xed\x91\xc6\x0e\x69\x08\xbc\xf1\x54\x42\x64\xef\x73\x3e\x40\x92\x68\x83\x0d\xf2\x8c\x22\x82\xa0\xf5\x4c\x38\x2d\xb8\x10\x8a\x51\x26\xa9\xc4\xc6\xb2\x0f\xbf\x3f\xf1\x07\x62\xf1\x27\x62\x33\xfd\xed\x8c\xd6\x4f\xf3\x6a\x1d\xe4\x43\x57\x5d\x1f\xfd\x61\x75\xb1\xb1\x1a\x96\xab\xa5\x78\xde\x9a\x8f\xf2\x6e\x51\xcc\x64\x74\x00\xaa\xdb\x1e\xca\x2d\x89\xea\x4e\xd3\xa8\xbe\x2e\x78\xef\x2b\x59\xc7\xca\x05\x5c\xea\xb4\xf3\x61\x75\x45\x82\x6d\xba\x9f\x48\xd2\x18\x6c\x9e\x61\xb9\x13\xfb\x00\xd1\x8c\x96\x1b\xad\x35\xee\xe6\x2b\x97\x46\x6f\x2a\xfa\x8b\xb8\xb4\x1d\xef\x50\x3e\x53\x49\xa6\x92\xa0\xf5\x12\x6f\x9a\xfd\x4a\x90\x24\x67\x96\x62\x2e\xb6\xcd\xc9\xae\x95\x6d\x0e\xda\x75\x75\x3d\x47\xf9\x79\xfd\x24\xea\xa8\x37\xab\x58\xb9\x62\xc4\x55\xd6\x73\xd5\x9f\x09\x78\xcf\x3a\xcc\x3b\xc4\xd6\xc4\x7b\xbd\xfe\x0b\x10\xcb\x94\xb7\x10\x63\xe5\x39\xf6\x5e\x5b\xee\x2c\x65\x06\x31\x6a\x99\xc5\x9a\x3b\xcc\x89\x06\x16\x72\x40\xa1\xb1\xd0\x78\xe1\xa4\xb3\x9a\x33\xed\xbc\xb1\x9a\x70\x7d\xdf\xf4\xfa\x09\xd4\x63\x22\x38\xc6\x12\x58\x64\x21\x06\x0a\x5a\x0b\x19\x12\x0c\x78\x29\x81\x71\x42\x1b\x48\xa1\xd1\x48\x6a\x04\x9c\x31\x50\x4a\xed\xbd\x25\x42\x18\x40\x14\x16\xe0\x16\xa7\xc5\x03\xb1\xe4\x13\xb1\xa7\x0b\x4b\x06\x9d\xd2\xae\x52\xc8\xe7\x7b\xdd\x94\x8c\x5c\xbc\x2a\x8e\x7b\x0c\xc0\xd5\x2e\x6a\xd7\xdc\xb2\x70\xd2\xfe\x70\x99\x06\xa3\xea\x30\x3b\xea\x97\xc7\x24\x2c\xad\xda\xc9\xf4\x9c\x9b\x57\x93\x91\xd5\x6c\x45\xa9\xa3\xf5\x66\x4e\x64\xb7\x95\x7d\x92\x99\xe4\xa2\xc5\xe8\x79\x42\xbe\x13\xfb\x90\xc7\x52\x3f\x06\x85\xf6\x30\xd3\xf0\x2e\x20\x93\x32\x2e\x75\x23\x5d\x62\x66\x53\x2d\x85\x67\xb0\xc9\x53\x8e\x20\x2b\xeb\xee\xa9\xd5\xec\x81\xf1\xb5\xd1\x02\xfb\xf0\xb4\xb2\x60\x50\x2b\x15\xe8\x2c\x62\xf5\x53\xbf\x5c\xaf\xe9\x4a\xe3\x9a\x5b\xe8\xac\xec\x54\x4e\xe1\x8a\xe4\xee\x59\xc7\xe8\x1d\x62\xdb\xd1\x7b\xbd\xfe\x2b\x10\x2b\x3d\xe2\x8e\x23\x2d\xb9\xe7\x46\x73\xed\xed\xed\x17\x0b\x00\x44\x9a\x6b\x05\x04\xb0\xd8\x3a\x03\x05\x64\x42\x08\x22\x18\x62\x1a\x3a\xa8\x95\x45\xd4\x29\x7a\xdf\xfd\xfa\x09\xd4\x1b\x06\x30\xb8\x65\x17\x9a\x40\x4a\x29\x20\xc2\x60\xce\x25\x03\x58\x48\xed\x3c\x10\xdc\x33\x63\x89\x86\x8c\x2b\x8b\x15\x03\xc6\x02\xec\x88\xf7\xcc\x0b\x05\x88\xfa\xf0\xfb\x93\x7c\x20\x96\xbe\xec\x7e\x6d\x65\x78\xdd\xe2\x66\xf6\x98\xf0\xbe\xce\x0f\xad\xee\xee\x33\x31\xd8\xc5\xc5\x53\xcf\x00\x93\x69\x2c\xf7\x7b\x74\x3c\x5e\x75\x9b\x57\xa0\x6a\xf4\x32\xe5\xfd\xf5\x08\xd3\xcb\xaa\x31\xc8\xc4\xbd\x64\xb5\x9b\x86\x27\xd6\x1e\x16\xc7\xa7\x7d\x03\x36\xa3\x65\xd0\x0e\xf6\x87\xe7\x65\xfa\x9d\xd8\x87\x3c\x56\x27\xad\xfa\xe0\x30\x42\x95\x21\x2a\x91\xf6\xf2\xd4\xdb\xe6\xbb\xc7\xca\xa9\xc1\xba\xd1\xfa\x80\x60\xa6\x83\x7a\xc9\x78\x53\xaf\x0d\xba\xe5\x52\x70\x4a\xe8\x05\x14\xfb\xbd\x70\x5a\xb0\x34\x2d\x4c\x34\x3f\x8d\xb3\x3a\xee\x9c\x87\x0c\xb0\x34\x02\x05\xda\x94\xc7\xc9\xa8\x73\x1f\x11\xb3\x77\x88\xed\x9c\xde\xeb\xf5\x5f\x81\x58\xa4\x2c\xd1\x58\x29\x6d\x90\x92\xd8\x01\x49\x20\x14\x92\x09\xc7\x20\x24\x16\x3a\x48\x95\x87\xd6\x6b\x4e\xb1\xa7\x9a\xdf\x50\x02\x50\x3b\xea\x35\x76\x52\xdf\xe3\xe3\xcf\xa0\x1e\x1a\xcb\xa8\x21\xd6\x2a\xaa\x88\xf0\x10\x71\x05\xa0\x14\x4e\x59\x6d\x00\x53\x46\xb1\x5b\x8e\x0b\x3d\x94\x42\x53\x6a\x95\x21\x12\x61\xa2\x28\x82\x42\x5b\xcf\xd1\x87\xe7\x23\x7b\x2f\xc8\xb2\x17\x64\x97\x17\xbf\xac\x51\x8f\x2f\xf8\x28\x3d\xbe\xb6\x82\xce\x9e\xaf\x2a\x85\x2e\xde\x95\x72\xaa\x32\xae\xa4\x8b\x9d\x2b\x31\x30\x67\xf3\xb5\xec\x51\xd6\xee\xac\x4c\x9b\x0c\xd7\x49\x32\xdc\x65\x47\x73\xb3\x2c\xf5\xcb\x61\xa7\x5f\x2c\xf6\x58\x7d\x52\x93\xfe\xe4\xea\xf5\xf4\x15\xd9\xc1\x83\xcb\x65\x66\x3a\xdd\x0d\x6a\xec\xc8\x46\xa0\x91\x46\xc5\x73\x77\x05\x33\xe9\x6a\x34\x41\x97\x42\x67\x9a\x8c\xab\x82\xc5\x30\x38\x57\x1b\xc7\xd0\xc0\xf9\x39\x46\xf3\x75\x45\x74\xdc\xa8\x94\xb2\xfd\x31\x50\xbb\xf5\x81\xd8\xf2\x79\x51\xef\xdb\x6d\x38\x9b\x76\xe6\x95\xf4\xe8\xcd\x1d\xd9\xd5\x3b\xc8\xf6\xc9\x7b\xdd\xfe\x0b\x20\x4b\x1d\x75\x42\x62\x0d\xbc\x04\x5c\x28\xce\x19\xc0\x9e\x01\x26\x24\x01\xd4\x22\x62\x11\x41\x84\x5b\xe0\xa0\xd4\x00\x78\x01\xa9\x45\x10\x69\xe7\xb5\x80\x92\x51\x76\x47\xf6\x27\x60\xaf\xac\x65\xcc\x01\x04\xac\x62\x56\x43\x4a\x81\x06\x86\x2a\xe2\x29\x25\x42\x1b\xe4\x20\x03\xda\x3b\xc5\x9d\x62\xca\x69\x2c\x91\x76\xc2\x1b\xc7\xbc\x94\x8a\xd8\x1b\xf6\x10\x3e\x20\xcb\x5f\x90\xad\xed\x3b\xd7\xcd\xf6\xb4\x99\x65\xf2\xbd\x30\x9d\xaf\xe8\xb8\x18\x97\xc7\x83\x63\xd9\xef\x61\x13\xee\x4f\x9d\x28\xd0\xd4\xef\x32\x0b\xca\x79\x54\xa4\x1a\x56\xdc\x04\x5f\xcb\x45\x19\x06\x8b\x5d\xbd\x2b\xc8\x9c\xa7\x3a\xe8\x75\xca\x59\x5e\xcc\x9e\x2e\x8b\xf6\x46\x8a\xd7\x1a\xc3\xe0\x81\xa2\x75\xe5\xbc\x56\xdd\x66\x10\x57\x3b\x38\xf1\xfb\x52\x52\xec\xa4\xdb\xb0\x0a\xa2\xb1\x8f\x59\x29\x86\x0d\x47\x33\x69\xd8\x64\x40\x66\xf6\x70\x41\xf2\x05\xa5\x75\xb1\x9f\x3f\x9d\x55\x17\xf6\x2b\xc7\x3c\x19\xe4\xb3\x6c\xc9\x16\xdb\x6d\xa3\x73\x5c\x65\x1b\x9a\x2e\x66\xcf\xe5\x8c\xdd\x3b\xc8\x26\xe6\xbd\x6e\xff\x05\x90\x05\x58\x5a\x89\x01\x36\x08\x18\xa9\xa8\x00\x02\x79\x48\x3d\x11\x9c\x3a\x41\x0c\xf4\xc0\x38\x62\x0d\x02\x4e\x21\xcc\x05\x63\x86\x4a\x4b\x25\x37\x06\x13\xa9\xa0\xbd\x97\x5e\x7f\x02\xf6\x06\x13\x8f\x29\x53\x4c\x68\x2f\xed\x2d\xb0\x3a\x6b\x19\x06\x12\x29\x29\x9d\xb5\x12\x78\x2e\x09\xf0\xd6\x43\x45\x84\x42\x56\x62\x4e\x39\x05\xd0\x0a\x05\x81\xb8\x23\xfb\x50\x16\xc3\x2f\x65\x31\x48\xdd\xa4\xbb\x6b\xce\xca\x93\xa5\x1d\x35\x51\x6e\x35\x0f\x92\x4e\xb7\xd3\x9a\xf9\x16\x19\xee\x82\x4c\x2e\xe9\x84\x5b\xce\x47\xc3\xe1\xa0\xd8\x29\x5f\xcc\x24\x68\xba\xca\x02\x04\xca\x8d\x37\x99\x64\xdb\xd6\x3e\xe7\x51\x3a\xd8\xa3\xd5\xe2\xea\x5b\xcb\xa8\xb3\x8c\xd4\xfa\x99\x96\x3b\xb2\x0f\xa9\xac\x21\x4d\xbc\xca\xe4\xaa\x93\xea\x31\x49\x2a\x7e\x9a\x75\xfd\x56\xb3\x08\xdb\xed\xca\x76\x19\xa2\xa5\x4f\x80\x22\x55\x3e\x28\x5e\x3a\xdb\x42\xf7\xda\xdf\xa8\x7a\xc6\x9e\x4a\xdb\x3d\x9c\xe6\x3b\x9b\x21\xb7\xb3\xa6\xeb\x15\xa4\x50\x9d\x5a\x7d\x18\x43\xdb\x2a\xa7\xab\xd6\xf8\x4e\xe9\xe9\x1d\x64\x87\xef\x76\xfb\xaf\x80\xac\x81\x44\x11\x46\x3d\xbb\x9f\x98\x02\x58\x51\xc6\xbc\x33\x90\x7a\x64\xb4\xb1\x0a\x71\xa5\x0d\x26\x4e\x1a\xa1\x29\xd4\x42\x48\x4a\x14\xf2\x52\x63\xe0\x99\x86\xf7\xc5\xd7\x4f\xc0\x1e\x59\x2f\x18\x15\xc6\x53\xa2\x09\xe1\x4c\x4b\x28\xa4\x62\x08\x4b\x4a\x38\x43\x42\x4b\x6c\x3d\xc6\x8e\x32\x24\x91\x41\x58\x23\xc9\xd4\x2d\x6f\x90\x46\x48\xe8\xef\x27\x0e\x1e\xea\x5e\xf8\xa5\xee\x75\x6a\xd8\xd9\xa9\x99\x2f\x54\xd7\x69\x3e\x2f\xfa\xb2\x62\xdd\x38\x2a\x9f\xae\x33\x9c\x2f\x6c\x2a\x78\xbc\x38\x0f\xe1\xd0\x47\x71\x75\x65\x8b\xfe\x68\xaf\xfb\xf3\x36\x2d\x37\xe5\xf9\x6a\x37\x3a\x2b\xd7\xa6\xb6\x1a\x57\x92\x49\x77\xb9\x1f\x5c\x33\x9d\x73\x13\xad\x2e\x34\x87\x5f\x77\xb8\x06\x0f\x19\x65\x71\x36\xe8\xc5\x97\x62\xe6\xc4\x78\x9b\x2e\x33\xe5\x51\x21\x76\x7a\x4e\x77\xcb\x3e\x4a\xe7\xb9\xdd\x15\x9f\xeb\xdd\x4d\x2e\x6a\xed\x5a\xe1\x3e\x53\x1a\x44\xb2\xb7\x0f\xed\x76\x96\x2e\x16\x15\xda\x2f\xa7\xab\x43\x17\x5f\x9b\xf3\x74\x5d\xcf\x37\x2e\xf3\xcd\x74\x83\xcc\xa4\x2d\xef\x43\x02\xbc\x83\xac\xf9\x55\xab\x08\x8a\x11\xe5\xa1\x01\xc2\x6b\x6a\x91\x50\xda\x58\xe8\x8d\xb4\xca\x09\x63\x34\xd4\xde\x48\xad\x9d\x81\x9a\x6b\xc2\xb9\x83\xc4\x1b\x85\x8c\x56\xd6\x28\x22\x01\x37\xcf\xfb\x05\x7f\x1f\x7b\x6b\xad\xd6\x0a\x38\xe7\x1d\xa5\x8c\x08\x02\xb5\x51\xde\x70\x6b\xb0\xa0\x0c\x28\x8f\x88\xb5\x92\x33\x29\x29\x61\x1e\x23\x68\xb9\x11\x86\x11\x4c\x15\xa0\x0a\xdd\x92\x0b\xf8\x50\xf8\x22\x2f\x85\xaf\xd3\xa9\xb7\x2e\x25\x28\x08\xc2\x53\x63\x3e\x3c\xf2\x5c\x72\xb9\x08\x34\x71\xdb\x13\x60\x66\x02\x4b\x64\xd2\xef\x4c\x32\x3a\x6a\xaa\xf3\x91\x9a\x7d\x6a\x4d\xd4\x09\x17\x02\xef\x3a\xc7\xd5\x6c\x8d\x51\x1e\x76\xb2\x41\x65\xa4\xb5\xd9\x0f\x07\x6a\x70\xec\x9d\x63\x14\x3c\xaf\xd5\x6f\xc8\x26\x0f\x2e\x9f\x0b\x17\xe9\x63\xbd\xba\x6f\xe4\x56\xdd\xfc\x71\xb5\x9d\x87\xb5\x73\x83\x8d\xd6\xcb\x5d\xde\xed\xa6\xcb\x0c\xcc\xc3\x6c\xb9\x04\x8e\x70\x6b\x11\x5f\x35\x78\xa6\x3f\xe8\x84\x25\xb1\x15\xde\x34\x8a\x69\x4d\xcc\xf0\xd0\xae\xe7\x3d\x7d\xad\x58\x5f\x39\xcf\x62\x95\x53\xb5\x3b\xa5\xe4\x1d\x64\xfd\xbb\x27\x0d\x7e\x01\x64\x85\x42\x12\x61\x24\x25\xb2\x4c\x6a\xcd\xa0\x27\x94\x1b\xce\x84\xe2\x84\x6a\xe6\x35\xb2\x02\x5a\x65\xb5\xb0\x8c\x0b\xe0\x89\x64\x40\x4b\xe7\xb0\x40\x46\xaa\x7b\x0e\xf9\x53\xb0\x17\x1c\x48\x28\x21\x15\x8a\x03\x6a\x14\x75\x02\x03\x47\xb5\x60\x9e\x01\xa3\x38\x40\x9c\x31\x6b\xb9\x75\xcc\x5b\xeb\xa8\xa5\x9a\x08\x88\x2d\xa5\x00\x0b\x0f\x94\xfb\xf0\xfc\x74\xca\x0b\xb2\x2f\x95\x2f\x36\xac\x09\x93\x6f\xee\x6d\xad\x76\x8c\xcb\x62\xa9\x58\x74\x39\x1f\xc5\x82\xf3\xdc\x9e\xb7\x31\xe9\x65\x6b\xf5\xfa\xd6\xac\xab\x39\x61\xd1\xc8\x2a\xa9\x92\x69\xfb\x0c\x16\x2a\x73\x28\xd1\xca\xb2\xd0\x0c\xc3\x81\xc6\xdc\x4f\x87\x97\xb8\x75\x69\x9e\x9a\x93\x56\x78\x7c\x3d\x5d\x90\x3c\x80\xd3\xdb\x4d\x7b\xf3\x41\xf5\x9a\x5c\x93\x42\x29\x3c\xf0\x61\x22\xe2\x88\x94\xf0\x38\x89\xaa\xa3\xee\x66\x01\x57\x64\x73\x0c\x23\xaf\x59\xa9\x9c\x6f\xcf\xfa\x93\x65\xce\xac\xdd\xe8\x44\xf5\xf8\x98\x46\xeb\x53\x15\x69\xeb\x9d\x96\x78\x0e\x75\x27\xd7\xec\xf7\x0a\xd3\xd3\xf6\x1e\x65\xc5\x3b\xc8\x8e\x7e\xd5\xca\x17\xf3\x44\x73\x23\x29\x57\x04\x29\x8c\xac\x15\x18\x58\x82\x11\x44\x96\x6a\xad\x6e\x1e\x44\x08\x52\x2c\x1c\x01\x10\x28\x82\x30\xa1\x14\x63\x60\xad\x06\x52\xa3\xfb\xfe\xd4\xcf\xc0\xde\x51\x24\x85\xa7\x40\x60\x09\x04\x40\x18\x72\x0a\xa9\xd5\x1a\x7b\x6a\x0d\x65\x1c\x42\x6d\x8d\x64\x40\x72\xef\x81\x23\x9e\x28\x0b\x2c\x46\x82\x6a\x89\xb5\x21\xfe\x86\xec\x43\xe9\x8b\xbc\x94\xbe\xc8\x6a\x5d\xb9\x2c\x3a\x70\x7d\x40\x65\xd0\x17\x76\xb5\x06\x03\x94\xb9\xa0\x1c\xda\x1f\xe2\x21\x29\x80\xf9\xe8\xe0\x08\xa9\x57\xb7\xfd\x14\x8e\x82\xde\x20\x57\x3a\xb5\x5a\xcd\x68\x93\xb2\x66\x53\x14\x8b\x97\xf5\x7a\x56\x5e\xed\x6a\x6d\xd2\x2d\xc0\xd3\x2e\xb5\xd9\x6c\x25\xf3\x9a\xcb\x26\x0f\xb9\x6c\x5a\x5b\x1d\x78\x6e\xd6\xdc\xb7\xf6\x8b\x02\x69\x50\x92\xed\x76\x57\xbd\x51\x2e\xe8\xf7\xf2\x8b\xfc\x3c\xdb\xc8\xc8\xee\x68\x83\xc6\x71\xf9\x3a\xaf\xa5\xb2\x35\xa2\xf9\xa0\x72\xb6\xed\x44\xef\xb2\x89\xe9\x2f\x43\x8d\xab\x69\xd5\x67\x0b\x33\x59\x03\x71\xb4\x1a\xd0\x86\xe8\xde\x12\x8f\x5c\xf0\x0e\xb2\xf3\xd1\x7b\xdd\xfe\x0b\x20\x2b\x94\xf7\x00\x20\x46\xbd\x96\x82\x40\x24\x01\xb2\x5e\x5a\x6a\xf0\x6d\xb5\x44\x14\x80\xca\x6a\xcb\x25\x22\x12\x38\x4f\x85\xc0\x8c\x70\x61\xb5\xb4\xc2\x61\xf8\x71\xc7\xe0\x27\x60\x6f\xbd\x30\x1e\x59\x05\x21\x84\x02\x23\xa7\x19\x40\xce\x59\x46\x05\xa5\xd0\x39\x4e\x2c\x26\x96\x38\x8b\xa5\x27\x0e\x58\x8e\x39\x31\x4e\x63\x6a\x1d\xd0\x00\x59\xfe\xe1\xf9\xf1\xba\x17\x64\x5f\x6a\x5f\x48\xd6\x7d\xb5\xcf\x5a\x9b\x68\x8f\x0a\x85\x5e\x34\xae\xf7\x0f\x2c\xbb\x9d\x4d\x8a\xdd\xe9\x2c\x68\xad\xf3\x71\xb1\x77\x2d\xd8\x5d\x75\xb7\xbd\x98\x06\xae\x66\xcb\x6b\x64\xc2\xe3\x96\x60\x5c\x6a\xba\x0d\x4e\xf6\xd5\x72\xb3\x2f\x32\xd7\xc4\x9e\xeb\xed\x76\xef\x0c\x59\x6b\xfc\x10\x65\x1f\x72\xd9\x4d\x79\xd0\x28\xb6\xed\xa0\x70\x72\x35\x32\xac\x84\xb9\xb4\x9b\x36\x66\xd7\xd3\x04\x6d\xc4\xa4\xb2\xca\xc9\x73\xf1\x1a\x34\x36\xcd\x4e\xd6\xfb\x69\xb3\xae\xc2\x76\xa9\x50\xdf\xa6\x79\xbe\x1f\xa6\xfd\x36\x5f\x76\xbb\xdd\x1e\xef\x2e\x01\x89\x0d\xee\x36\x76\x10\x8f\x76\x79\x78\x47\xf6\xbd\x73\xb4\xab\xd5\x7b\xdd\xfe\x0b\x20\xeb\x98\xa7\xc4\x29\x6a\x8d\x57\x12\x31\x4f\x1d\x61\xd6\x02\xca\x98\x04\x5e\x03\x0c\xad\xc1\x12\x71\xc8\xad\x01\x5e\x2b\x03\x34\xf2\x04\x42\x27\x88\x64\x14\x41\xf0\x1c\x65\x7f\x02\xf6\x86\x3b\x23\x38\xb5\x18\x29\xea\x94\xd5\x52\x08\x6e\x14\x85\xca\x20\xa1\x80\x67\xc0\x2a\x61\xbd\x05\x80\x33\x61\x94\x43\x4a\x6b\xa6\x09\xa0\xde\x69\x22\xd8\x0d\x7b\xf8\x50\xfc\x22\x2f\xc5\x2f\xb2\xcd\x06\xf9\xa1\x3f\xa4\xad\xce\x2c\x8f\x29\x34\x51\xa3\xd4\x5b\xe5\xb6\x1a\xd2\x6a\xb5\x70\x1c\x54\xe2\x99\x8b\x0f\x76\x3a\x2d\x0e\xd6\xbc\x3c\xc9\xcc\x0a\xb9\xb0\x6e\x7a\xe9\xbc\x91\xed\xbb\xea\xfa\x58\x67\x47\xba\xaa\xb4\x46\x53\x9f\x2b\xcb\x3d\xea\xef\xd2\x1e\x38\xbe\x96\x6b\x1f\xab\xfc\x71\xed\x7c\x09\xa2\xd5\x25\xca\x8e\x96\x54\x8e\x73\xe9\xb5\x7e\xd6\x3d\x24\x06\x7a\x72\xc6\xad\xf2\x85\xef\x80\x60\xc7\x75\x27\x25\x76\xe7\x77\x63\x08\x2e\x6b\xb7\xd5\x8b\x8a\xaf\xf5\xd4\x69\xb5\x6a\xa6\xf1\x26\xce\xb6\x72\x68\x5a\x23\x9b\x03\xda\x07\x49\x66\x5f\x51\x77\x64\xdf\x3b\x28\xbb\xef\xbd\xe9\xf1\x7c\xf0\x6b\x20\x6b\x1c\x34\x96\x2b\xa5\x81\x43\x48\x41\x27\xa0\x11\x52\x5b\xc8\xa1\x05\xda\x2a\x6a\x85\x75\x06\x09\x81\x08\x24\x18\x20\xea\x99\x07\x16\x52\x05\x2d\x57\xc6\x40\x71\x9f\xd4\x7f\x02\xf6\x4e\x22\x42\x35\x31\x52\x70\xa9\xa4\xbe\x3f\x1e\x63\x98\x20\x88\x78\xa7\x8c\xf5\x1e\x02\x05\x2c\x16\x50\x08\x88\x00\x03\x80\x38\x6d\xb8\xe0\x9e\x43\xc2\x11\xbb\x2f\xbf\x1e\xaa\x5f\xe4\xa5\xfa\x95\xd1\xb0\xbb\x49\x4c\x66\x5d\x3d\x26\x33\x6d\x73\x03\xd5\xca\x9c\x22\x7b\xa2\xd5\xb3\x37\x05\xbe\x0d\x76\xc7\x56\x9b\x85\xf3\x79\xa5\xa5\x52\xd9\xa9\xec\xce\x19\xb4\xb8\xce\xf5\x2c\xe3\x65\x27\x5c\x1d\xdd\x72\x5d\x5f\x65\xeb\xd5\x4b\xaf\x39\x66\x41\x66\x94\xe9\x17\xc5\xfa\x79\x17\xff\x8e\xec\x03\x45\xe9\x35\xcf\x88\x5f\x0c\xa6\xc3\xc6\x7e\x13\x92\xea\x74\x57\xdb\x9c\xdc\xd6\x2e\xaf\x31\xbb\x9c\x96\xd5\xce\xd1\xee\x0a\x83\x62\xd3\x1c\xf0\xba\x5a\xdc\x47\x8b\xf2\x62\xbd\x43\x26\x38\xaf\x5a\xb3\xec\xe6\xea\x45\x6b\x15\x37\x72\x93\x5c\xa5\x14\x84\x4c\xb8\xb2\x3c\xb7\x2f\xea\xa6\x3f\xf7\xde\x49\xd9\xc3\xdb\x52\xc2\xaf\x82\xac\x03\x46\x0a\x05\x98\xc3\xd6\x10\x0e\x05\x56\x44\x00\xec\x25\xb9\x57\xaf\x94\x77\x98\x5b\xa1\x25\xb1\x06\x13\xeb\x08\x92\xca\x5a\x02\xbc\xe2\x06\x4a\xc3\x9e\x91\xfd\x19\xd8\x6b\x0f\x14\x34\x0a\x71\xa6\xb8\x33\x5c\x7b\xa6\xb9\x33\x54\x48\x8f\x0c\x90\x04\x23\xed\x20\x72\x04\x73\x43\x98\x91\x4a\x09\x44\xb4\xc4\x40\x4b\xc8\x08\x24\xd4\x7d\x78\x7e\x58\xfc\x05\xd9\x97\xea\x97\x20\x06\xb2\xcd\x89\x4c\x56\x6e\x64\x26\x1a\x24\xe5\xd6\x51\x25\x29\xde\xd5\x2a\xd5\x5c\xeb\x54\xf5\xfa\x04\xfa\x2b\xeb\x76\xa7\x34\xe3\x57\xa7\x5d\xdd\xd4\xae\xbc\x0a\x6d\x88\xe9\xa8\xb6\x4c\xdb\x91\x42\xd7\x00\x9b\x8d\x3f\xac\xe2\x5e\x04\xbd\x9b\x0c\x9b\x87\xe7\xf3\x28\x77\x64\x1f\x32\xca\x73\x76\x7c\x56\xe1\x00\x25\x71\x35\x87\x27\x05\x8f\xb0\x4b\x7d\xc9\x9c\x4c\x4b\x30\xb4\xee\xec\x79\xa3\xca\x8a\x45\x08\x44\x45\x65\xf3\xe3\xcb\xc4\x6e\xfc\xce\x65\x15\xe6\xc5\x89\xaf\x07\xf3\x42\x2d\x13\x1e\x5a\xf8\x5c\xba\xa8\x4d\xbe\x43\xa3\xfa\x6c\xd0\xea\xdd\x73\xe5\xdc\x7b\x47\x65\x4f\x6f\x4b\x09\xbf\x0a\xb2\xd0\x71\x8c\x18\xd3\xd2\x40\xcb\x9c\x15\xd4\x59\x04\x10\x07\x1a\x1a\x69\x08\x10\x90\x10\x6a\xa9\x47\x00\x42\x21\xb5\xd3\x9a\x72\x04\x8d\xd4\x88\x32\xea\x39\x79\x3e\xc6\xf5\x13\xb0\x97\x80\x42\xcb\x84\xc2\xd6\x03\xc8\x19\x82\x46\x79\x89\xf1\x2d\xc9\x10\xc4\x4b\xcc\xa0\x16\x9c\x73\x62\x84\x37\xb7\x78\x8d\x80\xa6\xde\x62\x87\x95\x20\xce\xea\x5b\x62\x80\x1e\xaa\x5f\xe4\xa5\xfa\x95\x29\x54\x27\x23\x3d\xc5\xa1\x49\x1b\xb3\x51\x21\xd3\xf3\x71\x50\xcb\x4e\x7d\x7a\xe8\xe1\x42\xd7\xb5\xd6\x5b\xdb\x6a\xae\x45\xc9\xa4\x69\x71\xc5\x33\xa5\xb9\xda\xa1\xe9\x64\xbd\x2a\xcd\x95\x3c\x6e\xeb\xf5\x1a\x58\xad\x82\x49\x54\xa1\xa5\x96\x6b\xcc\xea\xfd\xec\x61\x2a\x3f\x6e\x89\xde\x91\x7d\xc8\x65\x71\x3d\x58\xc6\xd3\xce\x69\x51\x5f\x11\x4e\x56\xd3\x55\x79\x74\xd9\xf9\x59\xb2\x70\x2e\x57\x49\xec\x68\xa0\x2f\x8d\xea\x49\x2f\x77\xe7\x4c\xab\x67\x55\xad\xe9\x83\x24\x1d\x5f\x8e\xb3\x6b\xad\x55\x0b\x33\xdd\x54\x8f\xb1\x8d\xa6\xc2\x24\x60\xb4\xf7\x20\xd3\x58\x75\xf7\x87\x7b\x62\xf0\xde\x59\xd9\xeb\xdb\xe7\x11\x7e\x15\x64\xb5\xb0\x02\x2a\x49\x8c\x91\xca\x40\x6b\x2c\x91\x9c\x3a\xaa\x95\x03\x4c\x00\xc5\x09\xf3\x04\x2a\x2d\x11\xb6\x4e\x6a\x26\x18\x05\x04\x21\x80\xa5\x17\xd6\x13\xfe\xfc\xf4\xcb\x4f\xc0\x9e\x13\x26\x3c\xa4\x56\x4b\x8a\x90\x41\xdc\x49\xeb\x8d\xb7\x08\x58\x20\x24\x51\xda\x71\x83\x89\xc3\x1e\x62\x63\x09\xc4\x48\x4a\x25\x31\xb1\xc2\x58\xe4\xb8\xc3\x77\x64\x1f\xaa\x5f\xe4\xa5\xfa\x75\xa6\xd7\xea\xe2\x60\xca\x03\x9e\x26\xfb\x4a\xde\xd4\xcb\xd5\x73\x30\xea\xb5\x6d\x72\x09\xda\x13\xcd\x78\xff\x52\xae\xee\xe6\x43\x6f\xab\xa4\x70\xbd\x1e\xfb\xbe\x39\x75\xf4\x98\x67\x66\x34\xce\x17\xfd\x76\x78\x8a\xca\x51\x58\x88\x8a\x70\x3d\x3e\xad\xb3\xb3\xca\xf4\x90\xe5\xaf\x51\xf6\xb1\x06\x05\x82\x68\x72\xe9\x92\xfc\xa5\x50\xaf\x1f\xd5\xa5\x03\x82\x45\x53\xf2\xb1\x5b\xca\x29\xdc\xce\x2a\xbb\x55\xea\x62\x30\x69\x6d\xa7\x87\x35\x50\xcd\xa2\x3d\xf4\x2b\xbb\x6a\x66\xe9\xaf\xb2\x12\xe7\xab\x9d\x2d\x15\x71\xa3\x79\x6e\xc6\xad\x6e\x6d\xb8\xe9\x76\xd3\x6b\x87\x66\x1a\x77\x64\xdf\x3b\x2c\x0b\xdf\x96\x12\x7e\x15\x64\x2d\x90\x06\x71\x4a\x9d\xc2\x10\x02\xc9\x19\xb7\x4c\xdd\xfe\xe3\x90\x52\xe3\x3d\xd4\x0c\x03\x67\x35\xc3\x4c\x28\xa2\x91\x00\x5a\x33\x61\x1d\x22\x06\x39\xad\xc9\x7d\x52\xff\x09\xd8\x23\x43\x81\x53\x04\x31\xef\x34\x70\xcc\x1a\xc5\x8c\x31\x5e\x13\xef\xb9\x83\x1e\x59\x60\xa0\x63\xc6\x43\xc1\xa4\x31\x00\x7b\xaf\x21\x06\x10\x73\x08\x14\xe3\xf7\x07\xc7\xd0\x43\xf5\x8b\xbc\x54\xbf\xf8\x34\x2d\xad\xbb\xd1\x62\x57\x36\xb0\x6a\xdd\x80\x91\xd2\xa8\x66\x4b\x32\xb7\xa1\x15\xc9\xb4\x2a\xf5\xc2\xca\xa8\xe1\xa7\xbe\xcd\x1a\xcd\x11\x0d\x56\x93\x62\x6f\xb4\xc7\xa5\x12\x18\x34\xaa\x5b\x92\xc7\x55\xd5\x2a\x2f\x78\xd4\xcc\xb5\x26\x3b\x9b\x27\xe3\xd9\xf5\xdc\x79\x40\xf6\x81\x22\x77\x60\xc3\xfa\x35\x93\x71\x8d\xcb\x00\x67\xcb\xcd\x94\x41\x76\xcc\xac\x8a\x28\xe1\x94\x0d\xf2\x5d\x51\x92\x38\x58\xdb\xf2\xc4\xaf\x76\xbd\xc6\x3a\xaf\x33\xbb\x86\x36\xeb\xdd\xb5\xb9\x6f\xca\x4c\xf9\x02\xa2\xf2\xe2\x7a\x1e\xaf\xaf\x41\x33\xbb\xeb\x06\x5d\xd3\x5a\x2f\x6b\xf7\xc4\xe0\xbd\xd3\xb2\x68\xf7\xa6\xc7\x0b\xc1\x2f\x82\x2c\x87\x1a\x22\xa4\xbd\x86\xc0\x41\x28\x31\x95\x5c\x19\x4f\x38\x93\x5e\x4a\xcd\x21\x73\x5c\x3a\x6e\xa1\x53\xc6\x78\x84\xa8\xb2\x9e\x7a\xcb\xa8\xd1\xce\xb9\xe7\xa7\x09\x7e\x06\xf6\x9c\x23\xe9\x39\x75\x42\x29\xc4\x94\xd1\x48\x50\x2f\xb1\x73\x8e\x19\xc8\x28\x80\x16\x69\x66\xb5\x76\xd6\x69\xe4\xb4\xb0\x08\x22\xef\x99\xe3\x0a\x78\x0c\xe5\xbd\x60\x8b\x1e\xaa\x5f\xf4\xa5\xfa\x25\xf3\xa6\x0a\x93\x4d\xd4\xce\x27\x43\xa9\xf6\x6a\x6f\xfa\x49\x53\xe8\xf4\xc4\x75\x8a\x26\xb3\xdd\x2a\xcf\x55\x4c\x66\xa7\xc3\x01\xe0\x71\xc4\x0c\xeb\x5c\x4e\x41\xbb\xb3\x6c\x6a\x6c\xf6\x45\xe6\x57\x7d\xab\x47\x97\xf3\x78\x9d\x6d\x93\xec\x2e\x33\x08\x33\x7b\x47\x9f\x5d\x7c\x47\xf6\x21\x97\x85\xd7\x49\x59\x4d\x0f\xba\xb0\x9d\xc2\xca\x81\x86\x2d\x91\x0e\x73\x88\xe4\xaf\x0b\x24\x83\x6a\xd3\xaa\xea\x72\x97\xe0\x4c\x90\xf1\x7d\x5c\xf3\xd6\x4d\x36\xd5\xcd\xe5\x32\xf6\xb3\x0c\x5f\x9c\x06\xbd\x1c\xd8\x6a\x7d\xdc\xb3\x0b\x23\x60\x5c\xcf\xea\xe5\xaa\x19\x0d\xef\x05\xdb\xdc\x7b\xc7\x65\xc9\xdb\xea\xd7\x7d\xe7\xed\x17\x40\xd6\x53\x62\x00\x90\xdc\x12\x02\xbd\xe7\xcc\x10\xc1\x2d\x71\x5a\x43\xe1\xa9\xa3\x9e\x22\x43\x15\x85\x46\x49\x26\x00\xd0\x5e\x6a\x01\x05\xba\x61\x01\x14\xe6\xfc\x9e\x87\xfe\x0c\xec\x6f\x4b\x2d\x01\x31\x61\x54\x52\x83\x10\xd7\x56\x18\x0d\x95\xf7\x8a\x22\x4d\xa1\x25\x06\x4b\xec\xa4\x52\x98\x18\xa5\x19\x92\x8e\x51\x8a\x04\x14\xd6\x6a\x62\x98\xbf\x21\xfb\x50\xfd\xa2\x2f\xd5\x2f\x69\x74\x38\x39\xf2\xb0\x11\x0e\xe3\xa1\xdb\xc9\xb2\xf2\x0b\x6d\x06\x94\x52\x59\xde\x5c\x9b\x95\xf9\xd6\x27\x72\x68\x8e\x07\x82\xf3\xd4\x4e\xd3\x93\xcb\xcf\xfc\x41\xbb\x74\x72\xd9\x06\xc7\xf3\xbc\xb9\x38\xf4\xca\xa3\xb3\x9e\x65\xd6\xb3\x84\x50\x30\xd7\x93\x5e\xee\x35\x97\x1d\x3e\xe4\xb2\x91\x1c\xd5\xfb\x85\xe3\xf4\x14\x1d\xfa\xc3\xcc\x72\xda\x9e\x1e\x8f\xd1\x79\x3e\x5e\xd9\xf6\x28\xdb\xaf\xae\x0f\x6a\x3d\xc9\x6b\x70\x91\x9d\xc2\x6e\x50\x09\xc1\x70\x98\xe4\xab\x1c\x45\x3b\x95\x65\xbd\x6d\xcf\x76\x0b\xb3\xcc\x62\x13\x47\x1b\x54\x73\xc3\x7e\x2d\x37\xb5\xfb\x48\xdf\x13\x83\xf7\xce\xcb\xd2\xb7\xd5\xaf\x5f\x05\x59\x89\xa8\x23\xc2\x3b\x61\x30\x94\x0e\x61\x2d\x28\x80\xc4\x5a\xcf\x1c\x66\x52\x58\xad\x9c\x86\x54\x5b\x41\x1c\x70\x84\x60\xca\x20\xf1\x18\x22\x6a\x05\xe1\x4e\x3d\x3f\x17\xfb\x13\xb0\x17\x50\x72\x2a\xa5\x14\x10\x58\x45\x00\x03\x58\x51\x60\x90\xf5\x08\x63\x21\x31\xa5\x86\x12\x45\x04\x36\xd2\x59\x2e\x29\x22\x46\x71\x8f\x14\x54\xc2\x19\xc9\xec\x3d\x97\x7d\xa8\x7e\xd1\xd7\x07\xbf\xfa\x51\x50\xf5\xbd\x96\x97\xe8\x5c\xf1\x3d\xc6\x4b\xcd\x64\x2e\x3b\x67\xd5\x33\x9b\xc1\x28\x38\xd2\x43\x6e\xb4\xee\xd3\xfc\xd4\x92\x86\xce\x6d\x8c\xdb\x89\x6b\x32\x0c\xa6\xbe\x5e\x52\xed\x4c\x64\xc1\xa8\x7c\x6a\xf6\x49\x3d\x13\x1f\x6c\x73\xd4\x69\x55\xed\x2e\xa4\xaf\x89\x81\x7a\x70\x79\xa9\x5b\x33\x53\x4b\xf7\x3b\x33\x2f\xe2\x66\x66\xbd\x2f\x4c\x22\xdb\x1b\xac\x05\xde\x1f\xd2\x62\xe6\x3a\x0f\x0a\xeb\x4c\x92\x19\xed\x33\x2e\x37\x87\x8b\xe1\xa8\x85\x66\xb9\x51\x63\x5e\xc8\xe3\x4b\x44\x66\x30\x29\x76\x66\x23\xb1\xdc\xbb\x7e\x35\x5a\x67\x6b\xe3\x4d\xe3\x32\x40\x77\x64\xdf\x3b\x2f\x2b\xde\x3e\x46\x53\x0a\x7e\x11\x64\x01\xd2\xc0\x53\x07\x9d\xa3\x80\x69\xe9\xa5\x65\x42\x38\xc2\x08\xe3\x58\x5b\x85\xbc\x52\x80\x32\xc1\x99\xb1\xde\x01\x65\x10\x05\xce\x53\x23\xad\x07\x4a\xa8\x7b\x19\xe0\x67\x60\x0f\x31\x86\xc4\x53\xe8\x98\x64\x86\x12\x00\xbd\x93\x82\x10\xc5\xa1\x10\x4c\x48\x43\x1c\x91\x04\x42\xc8\x29\x44\x04\x32\x0f\xb9\x72\x96\x72\x02\xbd\x05\xc4\xde\x5f\x60\x80\x1e\xaa\x5f\xf4\xa5\xfa\x45\xab\xae\xd3\x64\x51\x34\x24\xd3\x43\x31\xe8\x61\xaf\xa7\x2a\x45\x8c\x9d\x07\x72\xdc\x28\xcc\x5a\x30\xed\x89\x73\xd5\x8e\xcb\xec\xd8\xba\xd8\x6a\x25\x09\x83\x1d\x3c\x34\x0a\xac\x32\xeb\x45\x3b\x51\x9a\x2f\x2f\xcd\x42\xf7\xdc\xde\xac\xda\xb5\x01\xc9\x9e\x46\xc3\xa1\x8a\x9f\x43\xeb\x1d\xd9\xc7\x27\xbf\xd6\x2e\xb1\x71\xe2\x17\x63\x18\x2e\x7b\xd3\x7c\x5a\x89\x9c\xb6\xd7\xbe\x68\xce\x47\x79\xc7\xaa\xcc\x55\x37\x22\xd2\xa4\x93\xa4\xdd\xa8\x27\xdb\x74\xad\x8b\xad\x5c\x76\x3d\x0e\xd7\xe7\x49\x69\x62\xba\xb3\x79\x2e\x4c\x7a\x9d\xd6\x40\x24\xdb\x7d\x1f\x94\x65\x2e\xbe\xbf\xc1\x20\xf7\xde\x79\xd9\xcc\xdb\x82\xed\xaf\x82\xac\xb9\x2d\xe7\x6f\xab\x74\xef\x08\x72\xc8\x42\xa0\x91\x54\x98\x03\xad\x9d\x56\x14\x62\x47\x94\x73\x04\x49\x24\x90\xd5\x8e\x10\x41\x90\xc0\xd4\x42\x4d\x08\x66\x0c\x3f\x6f\x50\xfd\x7d\xec\xa9\xd6\x8c\x6a\x84\xa8\x00\x98\x43\x25\x08\xe3\x9c\x02\x88\x34\x43\xc8\x68\x29\x20\x31\x0e\x2a\xa7\x8d\x32\x8e\xdc\xe2\xaf\xe3\xd4\x31\x66\x00\x51\x96\x39\x4a\x6f\xc8\x3e\x54\xbf\xe8\x4b\xf5\x8b\x06\xdb\x02\x24\x8e\xb6\x14\xcd\x58\x55\xa1\xa5\xf6\xb2\x21\x57\xc7\xcd\xa8\xbf\xb2\x3b\x9c\xc9\xef\xaa\x2d\x0e\x2f\x56\xd4\x26\x8b\x65\x5c\x38\xd5\xd3\xb2\xe3\xa3\x4a\x39\x8d\x2e\xe7\x74\x32\x68\xd6\x59\x60\xd7\x8d\x45\xa3\x94\x6f\x57\xe2\x45\xd3\x84\xc7\x45\x6e\x49\x9f\x6b\xa5\x77\x64\x1f\xc0\xb1\xad\x6c\x97\x5c\xaa\x71\x6d\x29\xc7\x93\xea\xaa\x7b\x38\x17\x76\xb1\xb6\x72\xa2\xdb\x03\x8f\xc6\x71\xb9\x3a\xde\x4d\x36\xb9\xda\xa0\x00\xf5\x7e\xbe\x9b\x0c\x82\xf8\xbc\x09\x87\xb8\x97\xab\xa1\xca\xee\x90\x16\x23\xa2\x98\xd8\x36\xda\xad\xd3\xc1\xba\xe1\x2e\x57\x83\x87\xfb\xc3\x90\xb9\x77\xce\xcb\xe6\xaa\x6f\xf7\x65\x7f\x15\x64\x25\x66\x06\x3a\xe8\x81\xf6\x50\x79\xc9\xb0\x00\x4e\x03\xe9\xa8\x74\x14\x41\x4b\x1d\xb6\xd6\x62\x0e\x2c\x82\xde\x13\x6f\xa1\x02\x52\x69\x88\xb0\x22\xc6\x6b\x75\x5f\xed\xff\x0c\xec\x11\xba\xe5\xab\x86\x10\xa9\x6f\x4b\x2e\xca\x04\xd4\x4e\x69\xe0\xad\xd0\x8e\x39\xcf\xa0\xe2\x0c\x0b\x0e\x31\x84\x50\x7b\xae\xa1\x02\x98\x2a\xe9\xb1\x57\x90\xe0\x9b\x0e\xf4\x50\xfd\xa2\x2f\xd5\x2f\x51\x0c\x07\xfd\xca\x91\xe6\xc9\x3e\xac\xd7\x54\x7c\x3a\x64\x0e\xf3\x30\xd3\x39\xd0\xfc\x6c\x55\xd7\x34\x1c\xe7\x47\x10\xe8\x76\x61\x38\x8d\xca\x85\xdd\xa2\xba\xea\xae\x2f\x3e\xda\x82\x25\x1f\xb6\xa7\xb0\xb9\xef\xa7\x66\x72\xea\xd0\x22\xa2\x59\xa7\xfb\xb4\xb6\x68\x96\x2f\xaf\xb9\xac\x7a\x7c\xf6\x4b\xe5\x0b\xa4\xad\x7d\xb8\xe9\x6c\x2b\x99\xcd\xba\x72\xb9\x5e\x7b\xbd\x8c\x4e\xd3\xa8\xba\x6b\x9f\xf0\xba\x6c\x16\xe5\x53\x6b\x7b\x54\xc1\xa4\xdc\xbb\x86\x1d\xcd\x17\xe7\xc1\x48\xb3\x6d\x41\x4e\x32\xfa\x98\x2a\x3b\x1d\x9f\xa3\x76\x29\xeb\xce\xab\xc2\x60\x2d\xb2\xab\x5e\x74\x4f\x0c\xde\x39\x2f\x9b\xab\xbf\xed\xf1\x5f\x05\x59\x4e\x81\x77\xd6\x0b\x01\x25\xc4\x8e\x02\x41\x10\xb7\xdc\x30\x0f\x15\x86\x1e\x60\x64\xfc\x8d\x9a\x1b\x77\x4a\x23\x6f\x14\x36\x44\x20\x4e\xb0\x91\xc8\x41\x76\x3f\x38\xf8\x33\xb0\x97\xd0\x38\xa5\x28\xe3\xee\xa6\xc5\x39\xce\xbd\xf4\x58\x73\x0e\x85\x91\x0c\x52\xcc\x98\x80\x50\x52\xcf\x8d\x84\x18\x53\x81\x01\x03\x06\x41\x6e\xa4\x54\x80\xde\x90\xc5\x0f\xd5\x2f\xfa\x52\xfd\xa2\xf1\xae\x3c\x16\x75\xa1\x32\x83\x46\xc0\x4e\xcb\xf9\x72\x5e\xb1\xc9\x14\xdb\xf8\x04\xc6\x59\x90\x6d\xaf\xaa\x9b\x73\x85\x75\x9b\x87\xcd\xec\xda\x48\xae\x74\xe1\x51\x7d\x94\x2e\xd5\x2a\xd8\x27\xe7\x78\x58\x9f\x65\x7a\xbe\x2b\xd2\xb6\x44\x2d\x2d\x87\x3c\x6d\xea\x3a\x7e\x3d\x2f\xab\x1f\x5c\x0e\xa7\x49\x5d\xad\x6a\xd9\x4a\x63\x53\x8b\xfb\xb3\x89\x46\x26\xae\x6f\xa7\xf3\xb0\x5b\x4e\xd6\xc3\xb9\xa9\x92\x5c\x31\x6d\x08\x1d\xe1\x45\xfb\xba\x59\x0f\xfc\xe8\x78\xe6\x4b\x4f\x05\xb7\xe7\x69\x35\xed\xc7\x8b\x51\x7b\xee\x7d\x35\xbc\x0e\x0a\xb9\xd1\x69\xec\xab\x8b\xd1\xfd\xb5\x1e\xb9\x77\xce\xcb\xe6\xa2\xb7\xa5\x84\x5f\x05\x59\xef\x20\xc6\x8c\x19\x62\x28\xbe\xcd\xb2\x8c\x3a\x43\xa8\xa3\xd6\x72\x4a\x80\xc5\x18\x29\x61\xb0\x20\x94\x79\x0e\x15\x61\x5a\x18\x28\xb9\x12\x9e\x41\x06\x89\xbe\x47\xc8\x9f\x81\x3d\x30\x48\x32\x04\xac\xb6\xc4\x38\x02\x1c\x82\xc0\x4a\x8f\x31\x00\x1a\x53\xc9\x3c\xf4\x06\x22\x04\x21\x43\x54\x11\x0e\x94\x91\x84\x79\xcc\x09\x43\x8e\x30\x6f\x6e\x3a\xf0\x43\xf5\x8b\xbe\x54\xbf\x70\xaf\xc9\xfd\x24\x37\xeb\xd2\xd6\x38\xef\x93\xf3\xb1\x34\x2d\x9f\xa2\x5d\x56\x4e\x60\x98\xd9\x5c\xb2\xb5\x66\x38\x5a\xb6\xc4\xec\x50\xdc\x99\xca\xa6\xbf\x98\x5e\x8e\x9b\xca\x5c\x17\xed\x62\x91\xdf\x99\x6e\xf9\x74\x28\xc2\xb3\xcf\x42\x24\x6a\x8b\xd5\x3a\x57\x8f\x5a\x83\x78\xf3\x9a\xcb\xea\x07\x8a\x44\x69\x31\x44\x8b\x71\x69\xd0\xcc\x9b\x41\x60\xe1\xba\x27\x3b\xb9\x6a\xe2\xdd\x74\x5f\xe0\xc5\x46\xb8\xaa\x17\xd8\xaa\xb8\xeb\x36\x8e\x24\xaa\x0d\x2a\xf1\xb1\x77\x9d\x15\x26\x61\xbc\xbc\x46\xc1\x0e\xce\x8f\x65\x11\x4e\xc2\x41\x55\xb5\x4f\xdb\x6a\xfb\x18\x83\xf4\xd0\x3e\x5e\x6f\x8a\x73\xef\x9c\x97\xcd\x35\xde\x96\x12\x7e\x15\x64\x01\x75\x9c\x1b\x2d\xd9\x2d\x68\x69\xa8\x29\xc1\x9c\x50\x2f\x84\x77\x98\x39\x8c\x08\xd3\x84\x6a\xc4\x94\x74\x8e\x4a\xee\xa1\x24\xde\x7a\x85\x31\x92\x90\x2a\xc3\x3e\xbe\x49\xeb\x6f\x63\x6f\xac\x92\x82\x59\xc9\x15\xa6\x8a\x69\x0f\x08\x17\x0e\x50\x4a\x01\xb6\x1a\x03\xc1\x99\xb3\x52\x0b\xc3\x9d\x22\xd4\x73\x04\xa4\x33\x48\x30\xe4\xa0\x90\x0a\xe8\x5b\xa4\xc6\x0f\xd5\x2f\xfa\x52\xfd\xca\x92\x6e\xbc\x38\x47\x3d\x91\xab\x0d\xdd\xb9\x48\x2d\x84\xad\x38\xba\x6c\xa2\x46\xd8\x3f\x1a\xd5\xde\x4e\x73\x73\x36\x45\xc5\x71\xe7\xaa\xed\xf9\xac\x7d\xb1\xd0\xcf\xec\x26\x2c\x28\xb5\x8f\x75\xba\xcb\xac\xfa\x66\x97\x8a\xe1\x69\x56\xa4\xb3\xba\x0c\x73\x97\xf5\x84\xcf\x5e\x77\x0c\xf4\x43\x2e\x8b\xae\x05\xca\x2d\x5a\xf5\x4e\xa8\xce\x16\x87\xed\x7e\x91\x5d\x39\x54\x98\xb2\xe2\xe1\x9a\x4b\x96\xfd\x05\x9f\xf3\xf2\xaa\xb0\x8b\x7a\xd7\x78\x93\xea\xa1\x9b\x0e\x16\xc7\x53\x9b\x90\x62\xbf\xd5\xd4\x51\xc5\x1e\x90\x9e\x96\xd9\x2c\xc1\x95\x2a\xcf\x8f\x27\x24\x5d\x8c\x0e\xa3\x7b\x5d\xeb\x1d\x64\xdb\x6f\xf7\x65\x7f\x15\x64\x29\xe2\xd8\x22\xc4\x3c\x56\x56\x12\x62\x35\x17\x8e\x13\x60\x91\xf3\x90\x63\x26\x08\xbd\xd1\x05\xa4\xf0\xc2\x20\x64\x90\x82\x8a\x40\x24\xb0\xb0\xc8\x23\xce\xef\xa7\xb0\x7e\x06\xf6\x37\x28\x31\xa5\x06\x5a\x6d\x2d\x45\x0c\x12\x00\x34\x73\xd2\x58\xec\xb0\x60\x52\x30\x42\xa4\x17\xdc\x09\x46\x1c\x22\x1c\x40\x83\x88\x43\x96\x1b\xef\x19\x72\xfa\x86\xec\x43\xf5\x8b\xbe\x3e\xfb\x95\x2c\x7d\x7d\x3e\x4a\xa3\x9d\xee\x17\x6c\x31\x53\xc9\x4e\xf5\xb4\x5a\xda\x17\x77\x8b\x8d\xa3\x43\x7f\x9e\x37\x31\x8b\x2e\xb3\xfe\xb6\x74\x70\x51\xe9\x54\xeb\x5d\xf0\xd5\xe6\xab\xa1\x36\xcb\xfa\xf2\xbc\x1a\x86\x3d\xdc\xa7\x52\xa5\x5b\x9e\xd9\x68\x1f\x4d\x40\xe1\xb4\x7c\xe6\xf4\x8e\xec\x43\x2e\x5b\xb6\x4d\xb1\xcf\xe5\xf6\x6b\x77\xa9\x0b\x29\xf7\xf5\xe6\x32\xb6\x6a\xd3\x5b\x95\xdb\x83\x45\xb9\x36\xad\x89\xc5\x4a\x6f\x09\x44\xa7\xb0\x62\xa7\x7d\x53\xd6\xb8\x5f\x1d\xb7\x82\xfd\x61\x19\x37\xa7\xe3\xf5\x78\xd6\xf1\xe3\xcc\x78\x46\x66\x26\x77\x9a\x91\xbd\x6f\xca\x09\xbb\xe9\xcf\xbf\x73\x5e\x36\xd7\x79\xbb\x2f\xfb\xab\x20\xeb\x80\x04\x54\x5b\x06\x99\xe4\x02\x52\xc7\xad\x15\x40\x31\xcf\x95\x15\x4e\x60\xc0\xb9\xa2\x18\x10\xa5\x01\x13\x50\x20\xea\xa8\x12\x08\x68\xaf\x8d\x81\xc8\xea\xe7\x27\x6c\x7f\x02\xf6\x9c\x30\xa6\x0c\x35\x1c\x23\x23\xad\x01\x4a\x28\xa0\x25\x45\xd4\x32\x23\xa0\x94\x10\x30\x22\x0c\xe3\xd6\x53\x42\x80\xa6\xda\x12\x22\x8c\xf0\x0a\x1a\x8e\xd0\xfd\x7d\x5c\xf8\xa1\xfa\xc5\x5e\xaa\x5f\xb0\x7a\x05\x13\x7d\xdd\x0e\xdb\xf1\x9e\x50\x50\xe8\xf0\xb3\x41\xe3\x62\x29\x59\x55\xa7\xe4\x34\x39\x6d\x4a\xb3\x70\xd5\x88\xeb\x17\x7b\xba\xec\xe3\x6a\xf5\x50\x23\x87\xf6\x3a\xb7\x89\x43\x56\x08\xb2\xfb\x53\x7e\xbf\xbb\x44\xa7\x24\x9b\x8b\xa7\xdd\x1a\x9a\xc2\x00\x99\xfa\xfa\x75\xf9\xf5\xb8\x6c\xcf\xea\x30\xd3\x74\xad\xb6\x36\xd1\xb1\x53\xf1\xad\x8b\x4a\x3a\x83\x24\x1c\x5d\x73\x5c\xe9\xd3\xd6\xc5\xfd\x96\x4b\xd3\xca\xf8\x34\x19\x82\xe1\xe6\x6c\xcc\xe5\x62\x4f\xb5\xc9\xe8\xd0\xaf\x5a\x5d\x29\x0d\xc3\x52\x27\x1a\x4a\xd9\x6b\xd5\x07\xb2\x5d\xb9\x34\x57\xc6\x4c\xee\x2f\xe4\xca\xbf\x73\x5e\x36\xd7\x7b\xfb\x42\xae\x5f\x05\x59\x0f\x24\x60\x94\x7a\x60\xa9\x71\x8e\x0a\x8c\x10\xb7\x00\x08\x61\x34\x57\x1e\x69\x2e\x80\x67\xde\x4a\xe8\x04\x66\x8a\x40\xc8\x95\xd6\x10\x5a\xc2\xa5\xa6\xc6\xc1\xe7\x93\x5c\x7f\x1f\x7b\x0c\x09\xc4\x9a\x08\x4c\xb8\xa3\x84\x2b\xa0\xa8\xc5\x9a\x58\x45\x18\x13\xc6\x38\xee\x8c\xd3\x5e\x58\x41\x08\x55\x1c\x02\x75\xcb\x7c\x21\xc4\xc6\x2b\x22\x9d\x73\x37\x64\x1f\xaa\x5f\xec\xa5\xfa\x95\xc9\xb7\x06\x32\x1d\xcc\x49\xd1\x79\x3b\xaa\x69\xbc\x4d\x59\x21\x87\xd7\xdd\x5c\x6d\x57\x1a\xa5\x55\x37\x58\x55\xea\x19\x71\xea\x20\xd9\xdd\x0e\x7a\x96\x35\x78\x44\x3a\xc0\x3b\xd1\x0b\x8d\x1d\xcf\xcd\x22\x35\x17\x0b\x32\x2b\x3d\x52\xb6\x35\x4d\x87\x41\xb6\x57\x7b\x3d\x2f\x6b\x1e\x9f\xfd\xc2\xf3\x41\x53\xfa\x6e\xa7\xba\x68\x2f\x63\xde\x5c\xc7\x39\x35\x54\xe9\x75\x52\x1a\xcb\x64\x86\x2a\x68\x7e\x09\x46\x69\xcf\xc6\xbb\xd3\xb9\xd5\xdd\x57\xe2\x32\xed\xb9\xd1\xda\x36\x00\x9a\x0d\x57\xd7\x86\xdf\xb1\xec\x34\x6d\x17\xd7\xe7\xfc\x3a\x1d\x85\xdd\x2b\xeb\x38\x7e\x4f\x0c\xde\x39\x2f\x9b\xeb\xff\xaa\xa5\x04\x2f\x19\xf5\xd4\x71\xe6\x90\xf5\x5c\xde\x0f\xb4\x70\x44\xa8\xc0\x50\x4b\xaa\x05\x73\xc6\x48\x0e\xa1\xe7\x9a\x7b\x0d\x09\xb1\x5e\x6a\xa3\x0c\x36\x42\x71\xf5\xfc\x1e\xe3\x9f\x81\x3d\xe2\x86\x7a\x08\x99\x70\x94\x19\xcc\x85\xe0\xd4\x02\xcb\xb9\x40\x48\x62\xe7\x6f\xec\x3b\xe8\x0d\x46\x94\x13\xa3\x18\x36\xc0\x78\xe5\xfd\x2d\xd5\x75\x80\x7a\x72\x43\xf6\xa1\xfa\xc5\x5e\xab\x5f\xf9\xd6\xb0\x2f\xe2\xa4\x41\xd3\x68\xe2\x5b\xf9\xca\x05\x6f\x2f\x7b\x52\x40\xd9\x61\x79\x15\x6f\x5a\x6d\xb7\xdd\xb5\x61\x79\x01\xaf\xe3\x3d\x01\x66\x7d\x64\x2a\x1f\x2f\x86\x89\x99\x4f\x41\x69\x0d\x47\x22\xdb\xea\xea\xcc\x36\xdc\x5d\x79\x83\xb5\xda\xee\x18\xae\xf9\xfa\xf5\x6d\x31\xe6\x21\x97\x2d\x9c\xf9\x01\x77\xf7\x42\x82\x42\x21\x1f\xa4\xa8\xd9\x5b\xe1\x79\x6d\xd2\xc3\x11\x5e\x5e\xe6\x66\xbb\x59\x1c\xa3\x79\xb1\xde\xc9\xf8\x70\x79\xf6\xe1\x60\x3a\xd7\x05\x57\xb1\xb6\x90\xb8\x7c\xad\xcb\xe5\x74\x45\xfc\xe9\xd2\x07\x97\x45\x33\xb8\xb2\x2c\x0c\x24\x29\xca\x7b\x23\xde\x39\x2f\x9b\x4b\x7e\xd5\x52\x82\xc7\x52\x32\xe8\xb8\x75\x14\x03\x80\xad\x54\x82\x0b\xc4\x39\x26\x18\x48\x68\x18\x70\xf0\xf6\x15\xaf\x6f\xa9\x27\x40\xda\x01\x81\x31\x31\xca\x31\x2e\xb8\x76\xcf\x05\xdb\x9f\x80\xbd\x10\x0c\x09\x4e\x35\xa6\x00\x4a\x0c\x05\xd0\x40\x13\xcf\x0d\xa4\x90\x19\xe3\x1c\x01\xd0\x48\x8e\x99\xc4\x46\x59\x65\x08\x36\x16\x01\xa4\x00\x40\xc2\x49\x78\xc7\x1e\x3f\x54\xbf\xd8\x4b\xf5\x2b\x3b\x9f\x4b\x67\x0f\x75\xef\x7a\x73\x48\x2f\xed\x34\xd2\x73\x3d\x3e\x5c\xab\xa6\x2d\xb9\xe7\xc5\x10\xd3\x63\xed\x24\x26\xcb\xd5\x66\xea\x12\xbf\xa3\x6d\x95\xcf\xae\x3b\x3a\x6a\x81\xde\x2c\x1e\x35\xca\x1b\x56\x2b\xae\x8b\x8b\x0d\xcd\x9e\x9b\xe3\xb1\xca\x6e\x2a\xdb\xc9\x03\xb2\x0f\xb9\x6c\xb3\xd1\xd6\x87\x63\x39\x8e\x8b\x7e\xd7\xe9\x2c\xac\xdd\x2c\x7a\x4d\x51\x9c\x89\xc9\x40\x56\x35\xbc\xb4\xea\xd9\x43\x78\x49\x4b\x06\x1d\x2a\xe9\xca\xea\xb8\x55\xea\x96\x82\x5d\xbd\x8b\x4f\xfd\xe2\xee\x7a\x68\xe5\x2e\x49\xbc\x6e\xd6\xd0\xae\x25\xc0\xa8\x36\x0a\x56\xac\x7d\x3f\xfc\x92\x7f\xe7\xbc\x6c\x6e\x18\xbe\xe9\xf1\x5f\x05\x59\x2d\xee\xef\xb0\x80\x46\x63\x43\x98\x30\x84\x7b\x08\x00\x63\x48\x19\x8d\x31\x77\x90\x09\xc1\xe0\x2d\xc3\x74\x08\x42\x6b\xad\xc2\x00\x41\x01\xa4\x70\x02\x30\xe3\xee\xc8\xfe\x04\xec\x0d\x61\x0e\x29\x4e\x8c\xd0\xda\x32\xc2\x0d\xe3\x5a\x2b\xc9\x81\xc7\x4c\x2b\x81\xa9\x75\xc8\x79\xca\x21\x63\xc8\x18\xe8\x30\x11\x0c\x59\x45\x10\x42\x82\x71\x79\x4f\x0c\x1e\xaa\x5f\xec\xa5\xfa\x95\x6d\x0c\x53\xe7\x5d\xef\x14\xc8\xc5\xca\xe5\xed\xb5\x75\x9a\x99\xb4\x51\xde\xe0\xe3\x61\x1a\x8d\x6a\x99\x7c\x21\x8e\xb6\x0b\x57\x3e\x1e\x8f\xb6\x11\x25\x66\xd8\x3a\x22\x9f\x8f\x5a\xcb\x7e\xd4\xf3\xb5\x5c\x95\x27\x9d\xe2\x40\x8f\x72\xd9\x53\x88\x73\x3b\xee\xb6\x82\x1f\x9e\x5d\x7c\x43\xd6\x3e\xb8\x3c\xce\x66\x6c\x7e\x97\x74\xfa\x66\xb2\xae\xd6\x13\xea\x3b\xfd\x79\xd6\xee\x0b\x62\x5d\x74\x99\xc9\x70\xe9\x97\xf1\x10\x8d\xf8\x31\x99\x66\x43\x3d\x59\x83\x4b\xfd\x38\x68\x8b\x21\x3f\xac\x4d\x33\xde\x6e\x0a\x89\xab\x1f\xfb\xe7\xe1\xe4\x9a\x23\x60\x3c\x41\xb3\xca\x64\xbc\xbc\xaf\xf4\xf2\xef\x9c\x97\xcd\x99\xb7\x2f\x3e\xfc\x65\x90\x35\x58\x51\x25\x29\x16\x44\x78\x87\x8c\x26\xcc\x59\x4e\x6f\x99\xa3\x63\x5a\x01\x69\x04\x65\x96\x70\xcb\xbc\x53\x00\x51\x84\xb0\xa0\xc2\x0a\x6b\xb8\x52\x56\xd2\xe7\xf3\xb2\x7f\x1f\x7b\x43\x2c\xc5\xc2\x6b\xc2\x14\xf0\xd4\x7b\x42\xb0\xf2\x82\x5b\xe4\x30\x22\x18\x52\xac\x01\x95\x4c\x59\x0c\x89\xe5\x9e\x33\x6c\x38\xa4\xc6\x5b\x85\x30\x10\xf2\xfe\x60\x39\x7e\xa8\x7e\xb1\x97\xea\x17\x1f\xad\xd5\xaa\x5c\x6f\x86\x2a\x6a\x67\xc3\x60\x38\xd9\xd6\x23\x4c\x06\x93\x52\xf1\x5a\x6f\x54\xda\xd8\xae\x4e\x95\x34\x9f\xb2\x40\x37\xca\x93\x63\xfd\x7c\x69\x8a\x4a\x86\xcd\x48\x75\x9d\x23\x01\x6e\x9e\x1b\xb5\x5d\x3b\x1c\xf6\xa2\xe5\x35\xcd\xad\x82\xe5\x48\x0c\x0f\x61\xfc\xec\xe2\x3b\xb2\x0f\x14\x75\x5c\x39\xb7\x3d\x14\x46\x71\xb3\xaa\xe5\xcc\x9b\x6a\x3f\xba\xe4\x47\x6c\xe1\x34\x98\xad\x8e\xd4\x0e\x64\x63\x26\xc2\xd0\xad\x91\x0e\x61\x77\xca\xdb\x9b\x02\xcc\x82\x16\xcb\x94\xc1\x21\x9a\x45\xfd\xc6\x69\x30\xd8\x76\x8b\xb1\xc7\xa9\x30\xfd\xab\x5c\x4d\xf3\xd5\xfb\x3b\xb9\xf2\xef\x9c\x97\xcd\xd9\xb7\x6f\x3e\xfc\x55\x90\x55\xcc\x53\x6d\x38\xb1\x4a\x12\x26\x39\x90\x44\x1a\x23\x14\x15\x9a\x7b\x03\x8c\xa2\xe8\xb6\xd0\xe1\xfe\xb6\xca\xa7\xde\x18\x4f\x19\x57\x4c\x59\xe4\x31\x84\x4a\xde\x57\xfb\x3f\x03\x7b\x4e\x28\xe6\x1a\x49\x4c\x01\xd0\x46\x6a\x49\x20\x65\x8a\x5a\x61\xa8\x66\xce\x02\x85\x0d\xd6\x00\x10\x20\x0d\xd7\x0a\x7a\x0c\x20\x64\x48\x18\x41\x85\x26\x42\xdc\xec\x20\x0f\xd5\x2f\xf6\x52\xfd\xe2\x27\x36\x58\xf7\x48\xa7\x49\x76\xfb\x12\x4f\x92\x1d\xcb\x28\x96\x37\xe3\x01\x2c\xc3\x23\x1b\x55\x3b\x15\x32\x88\xa7\xc6\x6d\x50\x3f\x3b\x59\x56\x5c\xa9\x5a\xdc\xf6\x61\xbb\xdf\xcb\x9c\xd7\xdb\xca\xb0\x5d\x16\xb5\xbe\xaa\x70\x32\xcf\x76\x83\x7d\x26\xbb\xad\xba\x72\x95\xbe\xbe\x5e\xd6\x3e\x64\x94\x83\xea\x7c\x25\xf9\xd6\xb7\xa6\xd3\x4a\x7c\x08\xc2\x99\xcb\x37\x34\x6e\x9f\xe6\x2c\x4e\x7b\x33\x49\x13\xc5\x70\x6c\x40\x2f\x7f\x84\xa0\x87\x77\xeb\xb8\x95\x17\x0b\xda\xea\xab\x39\x8a\x06\x88\xb7\x56\xa2\x71\xca\x05\xcb\xf2\x61\x2c\x4e\xe1\xa0\x16\x8d\xd3\x7d\xe5\xae\x3e\xff\xce\x79\xd9\xdc\xe8\xed\xe3\x8a\xbf\x0a\xb2\x08\x69\xc0\x34\x01\x92\x10\x76\x7f\x7b\x16\xe2\x42\x79\x60\x1c\xa4\xca\x4a\xc9\x28\x57\xc0\x7b\x04\x25\x53\x54\x7b\x67\x9d\xa5\xcc\x32\x69\x31\xe3\x5c\x1a\x69\x9f\xdf\x16\xf3\xf7\xb1\xc7\xc4\x59\x0c\x9d\x51\x80\x68\x00\x9d\xf7\x06\x1b\x42\x81\x30\xd2\x61\x22\x99\xc4\x58\x22\x4d\x21\xd4\x0a\x20\x2e\xb1\xc7\xc8\x3b\xec\xbd\xf7\x0e\xdf\x5f\x3a\x77\x43\xf6\xa1\xfa\xc5\x5e\xaa\x5f\x4c\xc3\x63\xb9\xab\xe6\xa5\xf9\x69\x5e\xbd\x56\xd6\xe9\x3e\x7b\x8a\xd6\x9d\xcd\xa0\x9a\x5d\xa9\xb6\xbf\x36\xfb\xcb\xf5\x06\x1c\xa3\xf6\xa6\x55\x8b\x2f\xa9\x6e\x9c\xea\xbc\xad\x27\x6a\x5a\xa9\x2c\xe5\x69\x05\x93\xca\xe6\xbc\x3d\xd7\x92\x71\xc7\x74\xaa\xc3\xc3\x6e\x3c\x5a\xc2\xde\x6b\x29\xe1\x31\xd6\xe1\xbd\x3b\x97\x07\x33\x97\xcc\x77\x63\x71\x99\x53\x54\xbd\x86\x30\xe2\xeb\xac\x38\x5c\x67\x98\x15\xec\x5c\x1f\x4f\x76\x33\x38\xc1\x14\xf0\x6e\x79\xb7\x0a\x77\x39\x9e\x3d\x89\x76\x6b\x5a\x25\x60\xbe\x8e\x68\x0d\x86\x7a\x95\x77\x5d\x71\xc9\x4e\xb3\xd9\xec\x14\x07\xf7\x63\x37\xf9\x77\xce\xcb\xfe\xf5\x9f\xa2\xf9\x55\x90\x55\x92\x19\xed\xa5\xc7\x06\x59\x6c\x28\xd3\x16\x12\x4d\x00\x05\x88\x30\x45\x1d\x96\x8e\x4a\x04\x14\x02\xd0\x49\xcb\x98\x21\x40\x1b\xaa\x14\xbb\x65\xab\xde\xd1\x8f\x47\x5a\xfe\x3e\xf6\xd8\x51\xae\x3c\x52\xd2\x20\x26\x28\x24\xc4\x70\x81\x34\x15\x48\x50\xa9\xac\x85\xc0\x70\x0f\x04\x63\x86\x61\x2d\x34\x03\x94\xdf\xec\x12\x37\x5b\x8c\x93\xf7\x72\x04\x79\xa8\x7e\xb1\x97\xea\xd7\xa5\x76\x52\x20\xdf\x6b\xef\xca\xa7\xd3\x72\x72\x2c\x34\x8a\x0a\x0d\x87\xc9\xaa\x29\xc6\xf3\xcd\x20\xcb\x1d\x1c\xc0\x35\x40\x96\x5e\x6c\x05\x5e\xcb\x6a\x9f\xe9\x96\x3b\x93\x92\x28\x4e\xaa\xe9\x38\x41\x95\xe9\xc6\xb8\x53\x74\xd9\xac\xb9\xab\xd4\xdb\xfb\xfa\x79\xbe\x09\xfa\xaf\x51\xd6\x3d\xb8\x9c\x2c\x5a\xe7\x43\xe5\x52\x9a\x45\xcb\x5e\xbc\xde\x9e\x57\xf0\xc0\x34\x8e\x1b\xb5\x51\x27\x9b\xdf\xb4\xdb\x60\x26\x57\x51\xab\x59\x1f\xec\x44\xa6\xcf\xc4\x66\x99\x57\xc5\x62\x78\x26\x35\x38\x5a\x0d\x14\x6e\x17\xca\xa2\x32\xdc\xa5\x7c\x82\x72\xc9\xa0\x93\x8f\x06\x9b\xeb\xfa\x7c\x47\xf6\x9d\xf3\xb2\xb9\xf9\xaf\x7a\xc6\x80\x10\x70\x5b\x2d\x41\x4b\x29\x60\x14\x68\xa6\x80\x62\x18\x40\xcc\x9c\x00\xc6\x22\x2a\x15\xc6\xd8\x28\x4a\x11\xc0\x56\x43\x4a\x84\xc5\x90\x7a\x28\x9d\x27\x8a\xde\xcf\x07\xfc\x0c\xec\x35\xe0\x98\x21\xa2\x25\x13\x08\x63\xc5\x2c\x43\x50\x01\x0b\xb4\x86\x9e\x60\xa1\x80\x75\xc0\xdf\x92\x60\x46\x80\xc4\x12\x41\x25\x1d\x57\xd6\x09\xa7\x2d\x70\xe0\xa6\x83\x3c\x54\xbf\xd8\x4b\xf5\x8b\xcd\x77\x47\x2a\x44\xbd\x7b\x1e\x1c\xdb\x1d\x90\x6b\x55\x4b\x6b\xd7\x70\x83\x4e\x7e\x12\xb8\xe5\x40\x87\x79\x31\xd8\xa8\xf2\xe8\x48\xbb\xc3\xb8\x73\x19\x27\x05\x5f\x0b\x2f\x3d\x36\x9d\x81\x85\x58\x9e\x83\x90\x35\xf3\x55\x55\x3a\x07\xab\xc6\x69\x73\x34\x41\x27\xda\xf5\x3a\xaf\xfb\xb2\xee\x81\x22\x59\x47\xa7\x4d\x4e\x2d\xc4\xf9\xcc\xc2\x68\xde\x82\x2e\xdb\xe6\xd5\xe6\x78\xb6\xab\x6e\x8a\xab\x61\x61\x51\x2b\x9a\x62\xee\xe4\xf1\x39\x7f\x32\xa8\x5a\x9f\xaf\x2a\xd7\xf5\x3e\x1d\xaf\xe2\x3c\x2f\x83\x46\xad\xe0\x0a\x72\x75\x1a\x06\x33\x5a\x51\xcb\xe5\xd6\x4d\xb7\xa0\x11\xdc\xab\x5f\xef\x9c\x97\xcd\x2d\x7f\xd5\x33\x06\xd4\x39\x0f\x01\xbb\xc5\x41\x6d\x35\x81\x18\xfd\xbf\xec\xbd\x5b\x93\xa3\x48\xb6\x2e\xf8\xde\xbf\x22\xad\x5e\xf6\x83\x7a\x6f\x81\xbb\x83\x43\x6f\xdb\x0f\xdc\x41\x02\x21\x10\x02\xc1\xd8\x3c\x70\x71\x2e\x02\x09\x89\x8b\x40\x3a\x76\xfe\xfb\x58\x44\x56\x67\x44\x47\x66\x77\x57\xb5\xaa\x62\x66\x4e\xb5\x2c\x2c\x8d\x48\x29\x1c\xf9\xe7\x1f\x8b\xf5\xad\xe5\xac\x45\x13\x84\x41\x4c\xa7\x34\xca\x49\x96\x32\x0c\xcc\x68\xc8\x40\x9e\xa1\xd9\x8c\xc4\x28\xcf\x32\x48\xf3\x19\x95\xc6\x38\x03\xf1\xeb\xd6\xea\xdf\x84\xf6\x98\x24\x59\x4e\x33\x34\x20\x80\x20\x3e\x8b\x01\x95\xa6\x4c\x9a\xa1\x8c\x8d\xb9\x8c\x8f\x31\x26\x59\x0e\x21\x9d\x32\x30\xe1\x48\xc2\xb1\x80\x02\x0c\x8f\xf2\x24\xe1\x93\x0c\xc4\xf1\x8b\xa5\x46\xef\xb2\x5f\xf8\x5b\xf6\x8b\x72\x56\x57\x5d\xf7\xf7\xda\xde\x8c\x8b\xcb\x51\xf0\x92\x56\xde\xac\xa2\xf6\xb1\x5d\xfb\x3b\x81\xef\xfd\xdd\xc6\xf2\xb9\x08\x53\x97\xb4\x6b\xa7\x7d\xb6\x76\x1f\xc2\x32\x72\xeb\x8c\xb4\x9c\x36\x82\xab\x8f\xbc\x72\x4f\xb3\x19\x7f\xe1\x36\x18\xe9\xf1\x79\x64\xf6\xfd\x5b\xb1\x4e\xf2\xde\x97\x0d\x52\xfa\x12\x96\x7e\xe0\xb6\xca\x2a\x66\xb5\x54\x37\xfa\xf8\x48\xdf\xfb\xd9\x40\x92\xb3\x10\xd3\xd5\xb5\xd2\x02\x38\x1d\x4f\xab\xc5\x2d\x57\x5d\xb3\x62\x56\xa0\x92\xad\xc3\xc6\x0b\xae\x9b\xd3\x39\x1f\xb8\x87\xae\xda\x88\x45\x61\xb1\x8a\x3c\x3c\x26\xd4\xb5\x7b\x95\x5f\x3f\xda\x2f\xdb\x7e\x7c\x5c\xf1\x8f\x42\x59\x96\xe7\x10\x45\x38\x3a\x27\x98\x30\x1c\x8a\xe3\x8c\xca\x01\xc8\xf2\x98\xe3\x09\xc1\x2c\x4e\x33\xc8\x00\x84\x78\xcc\xb0\x90\x20\x4c\xc7\x39\x17\xe7\x2c\x4e\x18\x98\x73\x19\xc2\xf4\xd7\x92\xc8\xcf\xd3\x1e\x66\x19\x4f\x73\x19\x26\x08\xb2\xe9\xeb\x83\x60\x88\x87\x54\x9a\xa7\x08\xd1\x69\xc6\x72\x31\xc7\xa6\x10\x71\x20\xa5\x99\x24\x61\xe3\x84\x22\x4c\x4a\x73\x69\x02\x32\x26\x47\xe8\x95\xf6\xe8\x5d\xf6\x0b\x7f\xcb\x7e\xd1\xf8\xa6\x68\xa4\x58\xf0\x83\x68\x95\x8c\xbe\x2e\xa7\xfd\x72\x98\xfd\xad\xbb\xf1\x1a\x37\x21\x97\x2d\xf4\xb4\x4c\x5e\xe7\xbb\x0e\xd5\x9a\xa4\xeb\x67\x81\x5d\xdb\x59\xbb\x4d\x77\xb7\x0b\xeb\xce\x65\x0d\xca\xbd\xdb\x68\x2a\xed\x6f\x41\xbb\xf3\x37\x62\x2d\x1b\xfc\x5b\x19\x39\xf2\xce\x97\x05\xfd\x22\xcc\x20\xab\xed\xc5\x93\xda\x49\x67\xee\x3c\x3f\x5c\xa8\x61\x8d\x15\x9b\x60\x92\x8f\x81\x71\xdb\x8d\xce\xac\x59\x86\x14\xf2\xc0\x3d\x1e\x18\x39\xb1\xe7\xb0\x5c\x59\xd6\x52\x71\x52\xba\xbd\x6a\x13\x55\xf4\x6a\x32\x5c\x7b\xfb\x20\x1e\x4d\xd3\xe7\x95\xd7\x62\xa0\xd2\x8f\xf6\xcb\x5e\xff\xa8\xdb\x62\x30\xc5\x66\x3c\xe2\x62\x96\x89\xa9\x97\xf5\x4e\x21\x04\x2c\xa2\x59\x8e\xf0\x34\xcc\x51\x82\x78\x8a\x82\x88\x47\x20\xc7\x7c\x4c\x62\x9e\xcd\x59\x9a\x8d\x29\x48\x78\x0e\xf0\x38\x85\x5f\x7b\x25\x3c\x4f\x7b\x92\xa5\x54\xc6\xd3\x31\x9b\x65\x08\x92\x84\xa5\x58\x86\x10\x88\x69\x92\x31\x29\x4a\x28\xc0\x66\x54\xce\x52\x0c\x47\x12\x9e\x66\xf2\x18\xe4\x3c\x06\x39\x4c\x52\x16\x53\x14\x43\xbf\xf6\x5b\x40\xef\xb2\x5f\xf8\x5b\xf6\x0b\x9c\x14\xb9\xd5\x17\x79\x53\xae\x3b\x87\xab\x20\x5b\x11\x06\xe4\x9b\xcd\xce\x5d\x65\xc1\xb6\x4e\xe3\xc5\x9a\x3f\x0f\xde\x62\xe3\xce\x42\xe1\x19\x5b\x53\xb3\xef\xe7\xd1\x29\xec\xc3\xe3\x74\x70\xa2\xce\xbc\x6b\x9e\xa0\x19\x72\x60\xeb\x2b\xd7\x59\x5d\xf7\x79\x22\xbc\x6b\x53\x97\xbf\x83\xbc\xac\xc3\xc2\x0b\x7d\x35\xaf\x36\xf3\x7d\x8a\xd4\xfb\x7a\x7f\x64\xbb\x3d\x71\x11\xd8\xb9\x8f\xcd\xc3\x3e\xc7\x7d\xe1\x2f\x83\x51\xbe\x0d\xd3\xa6\x8e\x93\xcc\x59\x0c\x45\x44\x10\xd4\x87\x83\xd2\x4b\x89\xc3\x17\x81\x3d\xba\x07\xd6\x64\xd2\x9b\xcd\x45\x93\xe8\x5e\x5e\xad\xec\x8f\xf6\xcb\x0e\x7f\xd4\x3d\x06\x09\xe2\x49\x4c\x91\x84\xce\x09\x0b\xf8\x3c\x05\x29\x9f\xe5\x20\xe5\x31\x9b\xa4\x3c\x8f\xe9\x9c\x64\x6c\x9e\x71\x19\x60\x30\x49\x09\x86\x1c\x41\x4c\x0a\x73\x8e\x67\x71\x46\x33\xec\xd7\xb2\x19\xbf\x01\xed\xe3\x1c\xd2\x98\x63\x41\x86\x53\x0e\xbd\xb8\xbb\x2c\x4c\x08\x01\x84\xa3\x28\x1e\xd2\x20\x66\x13\x8a\xca\xd3\xd7\x62\x74\x39\x93\x26\x5c\x4a\xa7\x19\xc1\x3c\x47\x11\x8a\xe2\x72\xf4\xea\x18\xbc\xcb\x7e\xe1\x6f\xd9\x2f\x28\x0e\x8d\x71\x69\x5a\xd1\x8f\xa6\xc7\xaa\x56\x5b\xd6\xdd\x52\xc3\x4a\x90\x77\x7b\x13\x9e\xc6\x6b\xc0\x97\x71\xb1\xbd\x14\x97\x5c\xa6\x15\x9f\xa5\xb7\x4c\xab\x68\xf4\xf1\x72\x43\x46\xbc\xd4\xf7\x85\x26\x5d\xd6\x93\xe7\xa0\xe8\xd2\xe8\xe5\x80\x5b\x7c\x11\x30\xfd\xb6\x93\xeb\x7d\x61\x62\xce\xb6\xb0\xb4\x39\x36\xed\x1c\x5c\xa7\x26\x8e\xcf\x27\x31\xee\xdc\xc7\x29\x56\x84\xe3\x8e\x76\x3d\x0f\xcb\x96\xb8\x2f\x97\xb6\x5e\xdd\xac\x43\x59\xd1\x5c\xa9\xcd\x42\x88\x71\xc5\x0a\x92\xa7\x5c\xe3\x49\xe6\x6e\x87\xfa\x76\x30\x8f\xe2\x83\x90\x50\x2e\xa4\xad\xf7\x4a\x59\x6e\x83\x02\xe7\x12\x6c\x8e\xeb\x9f\x4f\x25\x52\x36\xfc\xb9\x4d\xe8\xb7\xd3\xff\x51\x28\x8b\xe8\x84\xa1\x39\x92\x40\x06\x43\x9a\x05\x59\x12\x43\x1e\x71\x7c\xc6\x10\xcc\xa7\x74\x4a\x21\x42\xd8\x94\xcd\x28\x2e\xa5\x79\x48\xd3\x09\x93\xe4\x38\xe3\x13\x2e\xe7\xe3\x98\xc6\xaf\xfd\xe1\x7e\x13\xda\x83\x04\x11\x0a\xd3\x09\x21\x38\xcb\x10\x93\x22\x3a\x41\x6c\x9c\xe7\x00\x24\x71\xce\x61\x9a\x89\x19\x8a\xe7\x01\x93\x93\x84\x83\x14\xa0\x73\x92\x60\x9a\x05\x14\xa2\x31\xe0\x59\xf8\xea\x18\xbc\xcb\x7e\xe1\x6f\xd9\x2f\xbc\x23\xad\xdd\xe7\x4c\x75\xe6\xcc\x15\xbc\xdf\x0f\x31\x77\x3a\xe8\x0b\xfa\x06\x36\xab\x03\xb6\x53\xc4\xa8\xd3\xb2\x0a\x72\xd7\xbf\xb0\xb1\x1c\x40\x73\x9b\x04\xce\xb9\x76\xae\xc2\x7d\x13\x45\xab\x46\x0e\x62\x5f\x30\x79\xed\xe4\xaf\x1b\xcb\x4f\xf4\x36\x16\xbc\xfb\xdb\x16\xef\xfc\x9d\x2f\xeb\x6c\x0a\x94\x76\x48\x6e\xf1\x75\x3c\x40\xd8\xa2\x2e\x89\x0f\x31\x5e\x88\x72\xa6\xa7\xdc\x36\xf1\x94\x83\x98\x8e\x8e\xe2\x6d\xb9\xce\x2b\xd2\x04\x9f\xe1\xfe\xa0\x68\x54\x7c\xe3\xaa\xf1\x56\xce\x41\x23\x45\xab\x9c\x45\x96\xa4\x2f\x86\x39\x2c\x9c\xc3\x3a\xd8\x0a\xaf\x45\x35\x7e\x40\x59\xf4\xb5\x9a\xc2\x1f\x8f\xb2\x54\x96\x72\x2c\x4d\x91\x94\x45\x3c\xc1\x34\x97\x21\x86\xe2\x60\xc2\xf2\x34\xc5\xd0\x0c\x4c\x18\x96\xe3\x13\x92\xb2\x18\x01\x1c\x63\x40\x31\x5c\xc2\x43\x06\x13\x92\x50\x71\x4a\xe2\x57\xe9\xf4\x5b\xd0\x1e\x62\x96\x20\x02\x99\x14\x64\x71\x0e\x20\xcc\x33\xc2\x70\x80\xe6\x53\x8a\xa2\x19\x3e\xcd\xf3\xd7\xbe\x77\x2f\x1a\x0d\xa3\x98\xe3\x12\x04\x31\x61\xf3\x38\xe3\x11\x61\xb8\xec\x95\xf6\xe8\x5d\xf6\x0b\x7f\xcb\x7e\x51\x1a\xad\xd9\x3d\x9d\xce\xe1\x30\x1e\x1b\xe3\xec\x33\xcc\x94\x8a\x36\xd3\x27\xce\x51\xac\x67\x8d\xbe\x41\x90\x1c\x99\x6b\x99\x07\xc4\x77\xd5\x85\x5a\x05\xb4\xd3\xa4\x0b\x38\xd2\x27\x31\x8d\x38\xb4\x37\xdd\xac\x85\x8b\xd4\x6b\x11\x13\xba\x2b\xd9\x5e\x6c\xdf\x45\x0c\xf2\x77\xbe\x6c\xbf\xb3\x4d\x71\xa1\x96\x78\x6b\xd0\x62\x76\x12\x16\xe7\x66\x66\xe4\x8c\x1c\x76\x42\x14\x62\xfb\x7c\xd7\xd3\xf1\x4e\x2c\xe5\xb4\x12\x60\x52\x23\x7a\x1a\xba\xbb\xb5\x84\x4d\x51\x9e\x05\xc1\x3f\x6f\x97\x95\x88\x58\x9e\xa7\xae\xc6\x20\xe7\xa7\xc5\xd0\xd8\xc1\xe6\xf5\x09\x5e\x59\xf9\x01\x65\xf9\xe6\xf5\xe8\x8f\x47\xd9\x84\x4e\x71\x86\xe9\x98\x24\x31\x87\xa9\x84\x66\x41\x42\x01\x90\x42\x3a\xce\x48\x02\x58\x96\x46\x84\x85\x14\xa6\x78\xc0\xb1\x08\xc7\x74\xc2\xa7\x6c\x92\x32\x74\x9c\xc4\x1c\xcc\xa8\xd7\x98\xea\x6f\x41\x7b\x90\xc4\x59\x1c\x63\x02\x69\x1e\x53\x39\xe0\x5e\x2c\x33\xcf\x64\x10\xa4\x14\x8e\x53\x92\xe4\x79\x9a\xbf\x56\x97\x4b\x01\x60\x08\x4b\xd1\xe9\x8b\xd7\xc1\x65\x84\x49\x49\x9e\xbf\x7e\x0f\xe6\x5d\xf6\x0b\x7f\xcb\x7e\x4d\xd0\x08\x1d\xac\xed\x48\x1a\xd2\xaa\x24\x0f\x17\x45\x55\xbd\x21\xa8\x12\xac\x65\x12\x39\x1b\x6a\xc5\x6c\x0e\xb8\x93\x7a\xb6\xaf\xa8\xc4\xeb\xc5\x64\x97\xc9\xb6\xda\x96\x8b\x3b\x93\x73\x69\x03\x87\xc5\xc9\x8c\xf2\x47\x57\xea\xda\x92\x4d\x66\xb1\x71\x69\xeb\xcd\xca\xbe\xdf\x4e\x45\xc0\xc3\xda\x07\x3e\x68\xf3\x79\xde\xad\xf6\x19\x55\x16\xca\x25\x58\x64\xf5\xc2\x62\x28\xb8\xdf\x94\xfd\xf5\x60\xae\x4d\xca\xb3\x8a\xae\x8b\x2f\x0b\x2b\x4a\xb7\xeb\x2d\x0e\x9a\xcc\xea\x45\xf6\xd1\x2d\x39\x1e\x18\xe9\x54\x1d\x4e\x42\xca\xba\xf6\x86\x49\xf1\x52\x7e\xa5\xac\xf1\x03\xca\x2e\xbf\x72\xf2\x1f\x50\x76\xcd\x82\x6b\x7e\xd4\x0e\xf2\xfd\xb8\xa0\x2f\x07\xcf\x14\x48\xcf\x2e\x97\xdb\xac\xd2\x2d\xa3\x61\x0a\xdd\xae\xbd\x23\x39\xd8\xcb\xe9\x87\x35\xec\x3f\xfd\xf5\x8b\xeb\x18\x30\x10\x41\x86\xca\x61\x4e\x62\x2a\x66\x63\x86\xe1\xb3\x17\x8b\xc6\xf2\x0c\x01\x1c\x20\x1c\xcf\xf1\x14\xcf\x20\xc4\x01\xf8\xba\x07\x9b\xa2\xa8\x1c\xb3\x71\x12\x67\x29\xc5\xbd\xee\x0f\xf8\x2d\x68\xcf\xd1\x79\xcc\xe3\x98\x40\x94\x27\x34\xc7\x33\x39\xe6\x38\x88\x73\x8a\xa1\xb2\x9c\xa4\x88\x20\x9a\x27\x59\x96\x67\x09\x83\xf3\x84\xc5\x80\xa5\x40\x8a\x62\x06\x63\x82\x51\xcc\xbe\xee\xe4\x62\xde\x65\xbf\xf0\xb7\xec\x17\xd6\x75\x7a\xee\xba\xab\x2e\xcd\x61\x27\x54\xd3\xdc\x91\xfe\x14\xee\x99\xa3\x38\xd5\x6d\xa4\xb7\xc9\x59\xbb\xa9\x5a\x77\x95\xa9\xcd\x00\x6e\x2b\x4f\xef\xd7\x77\x4b\xcf\x7c\x49\x5f\x16\x3d\x95\xc2\xfd\x92\x38\xeb\x6b\x1c\xb8\x44\x59\xa0\xe3\x1d\x14\xc3\x62\xac\xdf\x51\xf6\x9d\x2f\xab\xf1\xe6\x8d\xb4\xdc\xda\xb6\xc4\x7e\xf4\x0e\xf3\x1c\x77\x71\x1e\x3a\x0f\x7d\xdd\x5c\xa7\xf2\x10\x0c\xa9\x7b\xbc\x9c\x3c\x3b\x5d\xd8\xe2\xe2\x42\xc6\xcd\xb6\x0f\x9b\x9c\x18\x9b\xa5\xe8\xc0\xc5\x6c\x3b\x11\xce\x96\x89\xbf\x1c\xce\xab\xc2\x54\x2e\x87\xfc\xbc\x3b\xbd\xee\xe4\x92\xad\xef\x29\xbb\x15\x86\xd7\xa3\x3f\x1e\x65\x93\x0c\x27\x08\xf0\x59\x92\x71\x39\x47\xf2\x8c\x4e\x18\x96\x64\x98\x02\x49\x4e\x73\x2f\xfa\x1e\x42\x9a\x21\x20\xa7\x10\x00\x59\xc2\x30\x3c\xcf\x20\x2e\x45\x14\x4a\x09\x61\x29\x96\xfd\xb9\xd0\xe6\xd3\xb4\xcf\xf8\x94\x90\x18\x51\x0c\x01\x69\x96\x53\x0c\xe6\x63\xfa\xc5\x7d\x48\x31\x22\x84\x64\x2c\x41\x38\xc3\x30\x21\x3c\x4f\xb3\x28\x61\x29\x8e\xc9\xd8\x2c\xe6\x68\x9a\x8a\x53\x40\x5e\xbb\xe0\x32\xef\xb2\x5f\xf8\x5b\xf6\x8b\x11\x3d\x87\x96\x1f\xcb\xab\x74\xba\xf8\x67\x62\xe8\xb1\x8f\x5a\xb3\x3d\xad\x24\xdf\x35\x0c\x76\x4e\x53\x41\x86\x71\xd7\xa5\x92\xfd\x20\xaa\x12\x83\x23\x3b\x68\x1a\x57\x24\xd6\x92\xb6\x5c\x2e\x16\xc4\x70\xb9\x9d\x47\x9b\x8f\x1e\xa0\xe7\x4f\xb3\x76\x36\xfd\xf6\xad\x55\x5d\xf1\xbe\x8e\x81\xda\x53\xe3\x2d\xab\x47\x40\x68\x14\x99\xd1\x26\x5c\x56\xe7\x5a\xa6\x8c\x42\xe3\x4f\x0b\xbe\xa4\x38\x2d\xa9\xcf\x51\xcf\x2b\x8f\xe8\x31\x5e\x84\x43\xf5\xc8\xc5\xc3\xa9\xe4\xf9\xb3\xee\x8d\x38\x60\x74\xfa\x71\xbb\xd4\xc1\x0e\x48\x2a\x3d\x5c\x45\x65\x3d\x8d\xd2\x2b\x65\x9d\x1f\x50\x56\x72\x5f\x8f\xfe\x78\x94\x45\x1c\x8b\x98\x38\xe3\x10\xa4\x08\xc3\x33\x80\x61\x60\xfa\x72\x4b\x66\xf9\x98\xc1\x34\x9d\xc4\x29\x0f\x11\x4b\xbf\xd6\xdd\xa4\x20\x95\xb0\x54\x42\xb1\x20\x47\x1c\x03\x13\x36\xa1\xbf\x56\x3e\xfc\x0d\x68\x0f\x63\x08\xb3\x34\x21\x31\xce\x63\x36\x83\x0c\x4f\x12\x48\x61\x36\x8e\x01\x89\xf9\x38\xa1\xe8\x94\x49\x13\x1e\xe7\x39\x46\x29\x8f\x31\xa1\x98\x8c\xe5\x53\x26\x4f\x01\x95\xa4\x34\xc6\x2f\x94\x7d\x97\xfd\xc2\x6f\x95\x0f\x69\xec\x5c\x00\x7f\x44\x98\xd7\xe8\x64\x48\xa5\xdb\x5c\x2c\xdc\x87\xe5\xa3\x9b\x28\x99\x89\x1f\x35\xfb\xa3\x22\x5a\xf8\x24\x9e\xe4\x9b\x5b\x41\x34\x32\x6e\xa1\x08\x72\x95\x5c\x52\x1b\xa5\x8c\x2d\x26\xa6\xc1\xe8\x6a\x30\xd3\xc3\x83\xb6\xc5\x69\xc6\x86\xf5\x56\x78\xbe\x78\xe7\xcb\x86\x1c\x15\xf7\x73\xab\xfb\x46\x86\xa9\xb9\xe6\x19\xb3\xec\xdd\x4d\x5d\xc1\x71\x17\x96\xc5\xaa\xbb\x59\x97\x07\xeb\x10\xc8\x4c\x0e\x6b\xf1\xf7\xc5\x65\x79\x19\x36\x11\xfb\x10\xc1\xb5\x62\x2f\xd7\x49\x57\xef\x4c\xbe\xcc\xac\x06\x22\x95\xbe\xcc\xf9\x54\xdc\xbe\xee\xd7\x92\xf7\x3f\xa0\xac\x4c\xbf\x1e\xfd\xf1\x28\x9b\x70\x3c\x95\xe6\x19\x86\x80\xa5\x38\x18\x67\xaf\xe6\x8d\xa1\x59\x96\x07\x34\xc8\x19\x1a\x60\x98\x52\x1c\x45\x40\xc2\xe2\x18\xb2\x2c\x22\x09\x4e\x93\x34\xe1\x73\x1a\x63\x1a\xbd\x6e\x69\xf9\x4d\x68\xcf\x43\x3e\x03\x29\x26\x54\xc2\x10\x92\x02\x0c\x71\x46\x11\x3e\xe1\x78\x18\xa3\x9c\xa2\x69\x3a\x25\x14\xc7\xf1\x29\x17\x43\xc4\x42\x0a\x24\x29\xfd\xf2\x2b\x83\x31\x0f\xb9\x94\x79\xa1\xec\xbb\xec\x17\xf7\x96\xfd\x2a\xeb\x60\x28\x65\xb9\x69\x76\xbe\xf7\x08\x9a\xe0\xd4\xa4\xee\xd8\xd7\x54\x98\xe8\xab\x49\x96\x80\x34\x85\xb7\x9d\x6a\x83\x7e\x9f\x09\x8f\xcb\x29\x42\xfe\xa9\x5a\x09\x9a\x51\x9e\x69\x89\x93\x58\xe6\x2e\xd4\xa7\x41\x4c\xa5\x5a\x3d\xe8\xf2\x54\x7b\xdb\x36\x7c\xe7\x18\x94\xef\x20\xcf\x0f\x7e\xbe\xdd\xf1\x47\xf1\x5a\x7a\xf9\x03\x99\xd5\x2d\x0c\xb7\x7c\x32\x9c\xc7\xc5\x75\xca\x97\xd7\x8b\x1f\x9a\x58\x2f\x12\xc1\x02\x8c\x9d\x99\x3a\x32\x0f\xe3\x74\xde\xf0\xf2\x65\x5c\xd9\x35\x27\x2a\x29\x32\xaa\xb4\x3a\xba\x47\xa1\x9b\xc2\x2a\xf5\x61\x8d\xe6\x57\x2b\x1b\xfe\x80\xb2\xda\xeb\x03\x0b\x7f\x40\xca\xf2\x39\x61\x39\x0e\xd3\x28\x89\x11\x0b\x92\xe4\x45\x06\x25\x0c\x00\x31\xe0\x72\x04\x19\x16\x73\x69\x0a\xe3\x0c\x72\x30\x4b\x51\x9c\x67\x3c\x1d\x73\x29\x4d\x13\xc8\xc4\x30\xc3\xf0\xe7\xfd\xb2\xcf\xd3\x3e\xe1\x61\xce\x66\x3c\xcf\xf3\x28\x8f\x29\x2e\xcd\xa9\x94\xe3\x08\x4d\xd1\x2c\x47\xf3\x14\xc3\xa6\x19\xcf\x62\x94\xd2\x34\x87\x93\x94\x23\x88\x4d\x72\x16\x73\x31\x8c\x11\xfd\xe2\x47\x24\x2f\x94\x7d\x97\xfd\xe2\xbe\x65\xbf\x70\xe5\x3c\xf8\xf4\x1e\x1a\xf6\xe0\x33\xad\xaf\x9e\xea\xd2\xa4\x95\xf3\x56\x28\xec\x35\x20\xc7\xa8\x66\xe1\x9d\x1b\x73\x15\xaa\x82\xa6\x10\x4d\xe1\x45\x38\x34\xb8\xe5\xa5\x7c\x3d\x6c\x7d\x0b\xc9\x24\x48\xf6\xf9\x52\x6c\x21\x9f\xca\xba\xd7\x29\x46\x7c\x4e\xdf\xac\x6c\xf9\xce\x97\xd5\xed\x91\x12\xdd\x05\x60\x3c\xf9\x9e\x86\x38\x4b\x37\xbb\xbb\x9f\xd6\xf4\xf9\x1e\x89\x52\x2d\x88\x6a\x74\x98\x8e\x75\x95\x32\x60\xc7\x6c\x01\x88\x4e\xfb\x45\x71\xdc\xca\xc6\xb1\x14\x82\x22\x12\x83\x47\x14\x51\x93\x5b\xe2\xbb\xed\x76\xfc\xb9\x3a\x5e\x44\x38\xbd\x76\x6f\x94\xd3\x1f\x50\xd6\xf8\x7a\xb5\xfc\xf1\x28\x8b\x10\x8f\x60\x1c\xa7\x2c\x95\x63\x88\x39\x00\xd2\x24\xe1\x48\x46\xf1\x59\x9c\xc1\x38\xcd\x73\x96\xca\x79\x8e\xa6\xb3\x84\x4f\x53\x86\x25\x39\x01\x79\xc6\x52\x1c\x20\x19\xa6\xf3\xe4\x95\x6e\xbf\x05\xed\x41\x8c\x20\xf7\x62\x7d\x79\x42\x67\x3c\xcd\xb2\x79\x82\x12\x8e\x8d\x69\x9e\x25\x2c\x4b\x67\x31\x45\xa7\x14\xc5\xc6\x2c\x8d\xf9\x24\x67\x32\x0a\xf1\x31\x66\x98\x9c\x00\x92\x22\x4c\xe2\x17\xca\xbe\xcb\x7e\x71\xdf\xb2\x5f\xcc\xf2\x54\xe9\xaa\x79\xd1\xd6\x78\xcd\x02\xda\xb8\xae\xb7\x0e\x1d\xa1\x87\xdc\x79\x05\x3f\xaf\x97\x91\xd6\x4e\x2e\xa2\x5b\x3e\xa5\x85\xbc\x1b\x15\xda\xd8\xa4\x9b\x45\xec\xde\x6a\xa1\xb1\xb6\x29\x00\x99\x57\x14\x45\x95\xb3\x53\xd3\x5a\x83\x83\xf0\x71\x75\x75\xdf\x9e\xb0\x2d\xdf\xf9\xb2\x63\x76\x33\xd6\x49\xa6\x3a\x36\x9b\x9e\xaf\x62\x93\x0b\x0e\x75\x9d\xaa\x1b\xe7\x2d\x93\x76\x5a\x06\x5b\xcf\x10\x36\xed\x52\x71\x5d\xe5\xea\x20\x69\x8a\x89\x1e\xa9\xa7\x05\x35\x6a\x6d\x94\x9c\xc2\xac\x15\x27\x21\x3e\xa9\xcc\x8d\xa6\x94\xd3\xd9\xcf\xd1\x38\xeb\xec\xcb\xf8\x72\xf1\x03\xca\xae\xd5\xd7\xa3\x3f\x20\x65\x5f\x6c\x5b\x1c\x73\x39\x95\x82\x98\x4a\x12\xcc\x02\x40\x43\x40\x43\x1a\x24\x14\x42\x3c\x06\x14\xa1\x10\x4e\xd2\x14\xe1\x98\xcb\x10\x26\x90\xe3\x18\x16\xd0\x90\x67\x33\x02\xc1\xd7\x6d\x31\xcf\xd3\x1e\xc0\x9c\x65\x72\x9a\xca\xb3\x04\x26\x4c\x4e\xb3\x31\x43\xe8\x9c\x86\x09\x83\x08\x0d\x78\x48\xf3\x09\x05\x32\x00\x58\xc2\xd0\x54\xc2\xa5\x71\x9c\x30\x98\x50\x31\xc8\x62\x84\x70\xfe\x1a\xe4\x7a\x97\xfd\xe2\xbe\x65\xbf\xa8\xdd\xde\xbe\xce\x05\xff\x20\x92\x69\x8e\x36\xb5\x02\x83\xdd\x2f\x81\xcc\x14\x2e\x60\x1e\x6a\x72\x43\x37\xba\x28\x46\x7a\x5e\x4c\xbb\x48\x19\x89\xef\x5c\xfc\x2e\x3b\xa0\x76\xb1\xb6\x46\x96\x89\x58\xe1\xba\x55\x9c\x02\x96\x72\x61\x69\x07\xff\x11\x64\x79\x81\xde\x59\xd9\x77\xbe\x2c\x8d\x7a\xd5\x70\xc7\x63\x30\x15\xfc\xfa\x12\xc0\x39\xdd\x7a\xa2\xa0\x91\x51\x64\xa5\xfe\xbc\xb3\xbc\xcb\x96\xdb\x1d\x77\x7d\xc6\xb7\x47\xbe\xe1\x9c\x65\x62\xd3\x5c\x70\x21\xfa\x6c\xd3\x5e\xb9\xf6\xb4\x44\x18\x8c\x13\xe3\x4a\xe8\x7e\x1d\x0e\x68\x6d\xc6\xae\xbe\x7c\x75\x0c\xea\x1f\x50\xd6\xbc\xbc\x1e\xfd\xf1\x28\x4b\x91\x17\x93\x06\x32\x96\x4e\x12\x9c\x00\x2e\x8f\x73\x06\x33\x09\x1f\x43\x86\x62\x30\x9b\xa7\x88\xca\x69\x96\x40\x0e\xd0\x18\x43\x9a\xe2\x59\x04\x79\x94\x24\x00\x64\x84\x65\xb8\xd7\x36\x73\xbf\x05\xed\xf3\x94\xa7\x33\x86\xa7\xe1\xd7\x6a\x30\x38\x4e\x00\x82\x3c\xc4\x88\xe7\x30\x7e\x6d\x7d\x07\x40\x0e\x28\x9a\xc7\x84\xc9\xa8\x38\x01\x7c\x92\x11\x36\x46\x79\x86\x19\x8a\x7e\x8d\xed\x32\xef\xb2\x5f\xdc\xb7\xec\x17\xc5\x0f\xa7\xeb\x71\x9a\xae\x12\xce\x2a\xc3\x33\x3c\xa5\x13\x95\xe6\x2e\x17\x2e\x7e\x28\xa4\xa5\x75\x84\xca\xc0\xf0\x9a\x01\xdd\x99\x26\x8e\xc9\x1c\x55\x50\x24\xed\x2a\x11\xc3\xb5\xa2\xfb\x6e\x7c\x70\xc4\x47\xf4\x38\xc8\xc7\xae\x7a\x9c\xc4\x28\xd6\x00\x17\xbc\x55\x3e\xac\xde\x41\xae\x59\xb9\x23\xaa\xcb\xf0\x1a\x92\x4c\xd9\x6f\xce\xce\x58\x35\x8f\xb1\x7e\x98\xab\x2d\xf1\x3b\x97\x37\xd8\x2b\x4c\xe2\x8d\xc6\xc1\x33\x57\x67\x81\xbb\xbd\xba\x79\xc6\xf6\xb5\xed\x65\xab\x12\x16\x13\x2d\x99\x77\x21\xca\x1b\x6a\xdd\x5f\x6e\x24\xae\x97\x87\xbd\xd8\xbd\x0c\x2c\xb7\x3f\xa0\xec\xe6\xeb\xe3\x3c\x7f\x3c\xca\x72\x0c\xc1\x80\x65\x51\x9a\xd1\x1c\x64\x00\x60\x01\xc7\xb2\x39\xc5\x40\x86\x41\x14\x0d\x59\x90\xc4\x5c\x4a\x62\x36\x06\x39\xc2\x04\x60\x9a\x45\x98\x8e\x13\x8e\xca\x70\xcc\x01\xea\x75\x17\xd6\x6f\x41\x7b\x92\xd1\x59\xce\xe5\x3c\x93\xa7\x14\x97\x30\x2c\x87\x78\x82\x30\xcb\x01\x3e\xa7\x28\x2a\xe6\x68\x08\x00\x4d\x50\x9c\x41\xcc\xc1\x84\x8f\x49\x46\x48\x8c\x78\xc0\x26\x74\xfa\x72\xde\x17\xca\xbe\xcb\x7e\x71\xdf\xb2\x5f\x13\xeb\x46\x15\x5d\x8e\xb0\x6f\x17\x37\x9f\x1e\xe3\x8d\xe3\x83\xa5\x22\xcf\xc9\xb1\x28\xb3\x87\xb2\x8a\xe4\xc6\xec\x2b\x78\x0a\xcc\x85\x21\x9d\xa1\x68\x3a\x07\xb7\xc7\x79\x83\xfa\x2a\x50\x25\xd0\x64\x32\x59\x60\x3a\xcb\xa3\x85\x3d\xb0\xbb\xc7\xe8\xae\x93\x9f\xfb\x1a\xbe\x52\xf6\x9d\x2f\xdb\x6c\xac\x49\x94\xf7\xca\x75\xa8\x14\xef\xb4\x5f\xc4\x63\x53\x5b\x9d\x3c\xfa\x83\x98\xe7\x26\xd7\xfb\xe4\x7a\xa2\xe6\xd3\xe2\xae\x3e\x06\x3d\x3f\x32\x07\x4e\x32\xfd\x76\x47\x5c\x50\x5c\x38\x41\x6a\x95\xbb\x41\x76\xc3\x06\xd9\x4c\x37\xe0\xeb\x6e\x17\xcf\x73\xf9\xda\x91\x46\xee\x7f\x40\x59\x7b\x7e\x3d\xfa\xe3\x51\x36\x4f\x39\x04\x33\x40\x23\x98\x62\x36\xc9\x00\x4f\x12\x98\xd0\x79\x9e\x65\x29\xcc\x10\x97\xb1\x54\x8a\x63\x3a\x4b\xf3\x98\x86\x71\xf6\x5a\x29\x2e\x8b\x29\x88\x29\x1e\xe4\x34\xc8\xbe\x76\x57\xfc\x0d\x68\x9f\xa5\x1c\x93\xbc\x30\x9a\xd0\x98\xc7\x30\x27\x69\x9e\xe4\x71\xf6\x5a\x40\x31\x21\x88\x65\x41\xca\x70\x88\x4d\x00\xe1\x20\x97\xc4\xd4\x0b\x5d\x11\xcb\x67\x80\x65\x69\x16\x65\x2f\x56\x96\x7d\x97\xfd\xe2\xbe\x65\xbf\xd0\xe1\xbc\x3a\x79\x1b\x4d\xde\x57\x61\x1b\xdd\x44\xcf\xdf\x0b\x5e\xd8\x5d\x2d\xf6\xd4\xde\xdc\xc5\x6a\x8e\x74\xad\x2b\x36\xa4\x35\x0c\x86\xcb\xfd\x5b\xd3\x79\x65\x6a\xf6\x6e\x19\x2d\xad\xa0\xb4\xac\x7c\xb9\x0f\x3b\x60\x3f\x6a\xb7\xf6\xd6\x6a\x18\xc9\x9a\xc3\x90\xb7\x88\x41\xf5\xce\x97\x8d\x44\x70\xc0\x4d\x2a\x25\x18\x83\x66\x8b\x9d\x66\x30\xa6\x1c\x0b\xf9\x42\xd9\x55\xfd\xba\x6f\xd5\xcb\x9e\x4e\xc5\xe8\xb6\x4b\x71\x5a\x26\x2a\xdc\x96\xe4\xb0\x5d\x3c\xf0\x62\xe8\x57\x4c\xc2\x68\x61\x1f\x28\xf1\x9d\x95\x2b\x56\xaa\xeb\x3a\xce\x56\x27\xb5\x86\x2f\x03\xcb\xd3\x0f\x28\xeb\xfc\xd3\x6d\x31\xff\x87\x52\x16\xa3\x94\xcd\xf2\x0c\x72\x29\x9b\xa4\x28\xa1\xa9\x8c\x64\x59\xf2\xe2\x6c\x66\x4c\x4c\x23\x18\x73\x34\x66\x19\x1a\x40\xc2\x92\x84\x85\x00\x65\x39\xc9\xa9\x2c\xcd\x50\xcc\x30\xf9\xd7\xc6\xf4\xbf\x05\xed\x73\x2e\x26\x39\x4c\x31\x05\xe9\x2c\x8b\x5f\x0c\x30\xcb\x11\x98\x40\x2a\xcf\x30\x1b\xa7\x19\xc5\x53\x88\xe3\x18\x9a\xa1\x08\xcc\x18\x26\xce\xb2\x24\x49\x33\x4c\x12\x44\x40\x0a\x71\xfe\x42\xd9\x77\xd9\x2f\xee\x5b\xf6\x6b\x79\x57\xb6\x3b\x55\x9e\xc1\x6d\x77\xee\xec\x79\xc9\x43\x99\x36\xe8\x62\x66\xf5\x74\x51\xda\x43\x25\xd6\x59\x2b\xa7\x2b\x73\x36\x9d\xe9\x9c\xb4\x72\x74\xb7\x8b\xf4\x9e\x27\xa8\x26\xcc\x64\xc0\x5d\xdb\xfb\x11\x1b\x3a\xe3\x6c\x9c\x37\x60\xcf\x66\x62\x86\xf8\xf2\xcd\x97\xad\xde\xf9\xb2\x6d\xaf\x94\x37\xd7\x93\x6e\x71\x66\x2c\x57\x7c\xb7\xe0\x6a\x71\xd1\x84\x02\x6b\x5f\x4a\xef\xa8\xeb\x4b\xa6\x05\x8b\xaa\x6e\x0c\xb4\x45\xdd\x6d\xab\x0b\x33\x8c\xfb\x8a\x6e\x11\xb6\xb6\x34\xe8\x36\x92\xb3\x33\xf6\xbe\x7c\xa4\xfd\x2e\x1b\x39\x7a\x31\xd8\xa6\xfc\xba\x87\x41\xa6\x7e\x40\x59\xf7\x9f\x6e\x8b\xf9\xff\x23\x65\xff\xf4\xa7\xff\xfc\xcf\x3f\xfd\xe7\x7f\x7e\x91\xe3\x21\xfe\x92\xb7\xdd\x97\x4d\x7c\x22\x7f\xf9\xd2\xe6\x39\xe9\xfa\xff\xfe\xe2\xdd\x2f\xe4\x2f\x5f\x3c\x41\x34\x95\x2f\xb2\xe0\x09\xff\xfd\x65\x97\x96\xe4\x14\xff\xe5\xcb\x65\x4c\x9a\x2a\xfd\xef\x2f\xf6\x74\x26\xdd\x5f\xbe\xbc\x0c\xf2\xa7\xbf\xb9\x00\xbe\x0e\xf1\xc6\x7c\x4d\x38\x58\x2a\x82\x9e\x16\xe9\x01\x74\x36\xd0\x55\xec\xbd\x00\xf6\xcc\x36\x60\x44\x4f\x70\x0f\x9a\x16\xae\xe0\xca\x50\xf5\x00\x86\x1e\xeb\xb8\x6b\xd7\x84\xd2\x76\xbb\xff\x8f\x3f\x7f\xf9\x6b\x38\x4a\x10\xfe\xe7\x7f\xfe\xe3\xaf\xc7\xbe\xef\xb9\x3f\x4f\xc4\xec\xcd\x47\xfa\x88\xb6\x49\xaa\xe1\xca\xa5\x85\xf8\x42\xf3\x63\x79\x92\x36\x0f\x6d\x3a\xc7\x7b\x75\x9b\x90\xd5\xa2\x54\xf7\x3e\x56\xfe\xe7\xb5\xc7\xe2\x5f\x5f\xaf\x43\x83\x3f\x7f\xa1\xfe\x8b\xf9\xf3\x97\x97\xdf\xf8\x0f\x57\xf1\x77\x93\x10\x81\x63\x84\x1e\x30\x84\xbd\x6a\xb9\x87\xb5\xb9\x33\xcd\xad\xab\x48\x12\x6b\x4b\xb6\xb6\xb2\x04\x79\xb7\xf5\xdc\x35\xf6\xb4\x8d\x07\x76\xaa\x0b\x42\x2d\x90\x05\x57\xfe\x8f\x3f\x7f\x41\x3f\xf8\xe2\x99\xe3\x56\xa4\x16\x5a\xbf\x4e\xae\x87\x55\x0c\x67\xc3\x38\x9c\xa5\x34\x32\x05\xb9\x5e\xa4\x7e\x7e\x8a\x7a\x7e\x67\xea\x81\x1c\x28\x6f\x13\xff\x0a\x02\xfd\x37\x73\xf8\xfa\x43\xfd\xf9\x0b\x40\xff\x74\x06\xac\xb6\x81\xe6\x6a\x1f\xb0\xab\xd0\xc5\x8a\x6c\xaf\x10\xf6\x45\x1d\xcb\x88\xb1\xd0\x2a\xd0\x0f\xda\x9a\x35\xf5\x95\x2b\x28\x06\xb3\x12\x77\x1b\x20\x8b\x4e\x88\x9d\x17\xb7\xea\x07\x33\xc8\xb5\x64\x88\xe3\x80\x3b\x2b\x79\xad\x31\xdb\x2d\x2d\xe5\x68\x91\x45\xf3\xe6\x08\xad\xee\xfe\xb8\x54\x42\x65\x5f\xe4\xd5\x58\x96\xc5\xc7\x19\x7c\x58\x86\x6f\x3f\x80\xfd\xfd\xa6\xc0\xfe\x13\x26\xfd\xca\xe9\xfc\xc3\x29\xfc\x9d\x4b\xeb\x42\xfe\x95\x2b\xeb\xef\x0d\xf6\xf2\xd9\xbe\xbc\x8e\x64\x24\xbf\xe1\x98\xfd\x98\xfc\x56\xa3\x5d\xc7\xb6\x1b\x4f\xd5\x39\x6f\x7f\xab\x11\xfb\xf4\x52\x56\xfd\xd0\x76\xf7\xa7\xec\xd3\xdb\x30\xef\x6d\x14\xdc\x01\xdb\x91\xdd\x83\x2d\xad\x3c\x55\x11\x91\xbc\x59\x23\xcf\xc4\x20\xd2\x0d\x85\x8d\x0e\x9e\x11\xe1\xad\x1d\xb1\xc0\x40\xb2\x6a\xdb\x2b\x73\xb7\x17\xa4\xcd\xcb\xe5\xfd\xd7\xf8\xa3\x20\xa7\x83\x5d\xd8\x1b\x28\x79\xd6\x45\xe0\x34\xfa\xb0\xea\x96\xdd\x8a\x52\xf8\x13\x47\x45\x0b\x1e\xe4\xb1\x3b\x45\x07\x7b\x37\xfa\xef\x4c\xf1\x6b\x7c\xf1\xb5\x2b\xdf\xeb\x0d\x46\xa9\x98\x46\x21\x37\xda\xa5\xd3\xcb\x60\x5d\x57\x77\x9f\x63\xb7\xe6\x2d\x2a\xeb\x7d\xb3\x73\x6a\xde\x9f\x2e\xdb\x93\x06\x16\xd5\x98\x7c\x1d\x45\x4c\xf9\xfd\xfc\xf5\xd6\x54\x7c\xfb\x47\x7c\x1d\x74\xfa\xf6\xbb\x2c\x08\xbc\xf4\x7e\x3f\xfd\x64\x4e\xd7\x5d\x10\x15\x03\x75\x2e\xcc\x81\x7a\xd4\x27\x5b\xb8\x9c\x03\xb2\xd0\x76\x0b\x37\x71\x0f\xd6\x45\x26\x4d\xb0\xd9\x9e\x4e\x5f\x87\xb3\x0d\xc1\x71\xed\xf5\xa9\x78\x08\xc2\x49\x80\x62\xdd\x7b\x75\x8b\x36\xc5\x5e\x5f\x17\x27\x18\xec\xea\x88\xb5\x4e\xc6\x3a\xee\x74\x3e\x32\x4f\xe0\x70\x1b\x1f\x7b\x21\xe0\x76\x53\xf4\xa0\xe4\x59\x23\xcc\x5c\x87\x73\x57\x2c\x26\x12\x71\xab\x41\x84\x7a\x91\xee\x44\xe7\x7f\xfe\xe7\xa3\xef\xf4\x1b\x2f\x0d\x7c\x6a\x69\xac\xbf\x5d\x1a\x59\xd8\xd4\x91\xa0\x78\x7e\x75\xaf\xa8\x6a\x50\x89\x27\xdf\xf4\x8e\x9f\x5a\x1e\x26\x16\x32\x34\xbc\xed\xf3\x48\xf3\xb5\xd2\x88\xbe\xee\xd4\x79\x5d\x1a\xf7\xc3\xad\xf7\x57\x43\xef\x78\x46\x66\x79\x87\x95\x4a\xb9\xd6\x2d\x00\xb7\x81\x8c\x93\x54\xd8\x84\xaa\x74\x67\x80\xa7\x7a\x4d\x33\x32\x7f\xe9\x9d\x1e\xcb\xba\x42\x2c\xcf\xd8\x35\x77\x63\xd7\xdb\xbe\x95\xac\x8e\x0b\x91\x54\x8e\xc5\xcf\x4e\x4d\x98\xea\xa1\xa5\x00\xdf\xc2\xb4\x88\x25\xe1\x77\x87\x1e\x3d\x05\xbd\xf3\x01\x7a\xd1\x64\x32\x66\x59\xf8\x59\x5a\x6e\x63\xf7\xe1\x67\xf3\xd2\x4e\xf1\x5e\x2c\x96\x54\x0d\x90\x65\x1c\xf7\xb5\x9f\x92\x53\xe6\x51\x5f\xdd\xc4\x57\xe8\x77\xcf\x42\x6f\x2f\xdb\xfd\xcc\xf5\xee\xa5\x59\x8b\xea\x26\x1e\xce\xd1\x18\xdc\xb4\x30\x5d\xe7\x52\x72\xf4\x24\x9a\xb4\x67\x97\xeb\xd5\xe1\x96\xee\x83\xed\xb0\x5f\xb7\x3b\x92\x43\xa6\xb6\xee\x87\x8c\x1c\xb6\xb7\xf4\xda\xf9\xa9\xb6\xb3\xd7\xdd\xf2\xb4\xae\xa9\x23\x5f\xf4\x86\x29\xfc\xfe\xd0\x33\x4f\x41\xbf\xff\x00\xbd\x6c\x6e\xc1\xea\x18\xf7\x44\x97\x98\x1d\x87\x02\xd4\x7b\xd7\x50\xe3\x2d\x7c\xbc\x2a\x47\x26\x57\xba\xf0\xdc\x3c\x02\x80\xc6\xfb\xea\x8d\xf5\xde\xb3\xd0\x0b\x61\x82\xe0\xf1\x54\xd2\x86\x6c\x5c\xe5\x51\x07\x55\xee\xd5\x72\x71\x5b\x54\x9d\xe2\x94\x5d\x72\x2e\x85\x93\x9a\x36\xf7\xd6\x24\xca\xfe\x21\x7a\xe6\xba\x6e\x79\x4e\x3d\x9a\xaa\x4b\x8c\xa3\x83\x3b\x8f\xde\x97\x47\xaa\xbe\xc0\xe5\x6e\x19\xf9\x4e\x03\x74\x4f\x2c\x7e\x77\xe8\xd9\xa7\xa0\x0f\x3f\x1a\x1c\x68\xc2\xd1\x5b\x3a\x87\x50\x0c\xf3\x90\x5b\x5e\x2d\x5d\x2b\xa9\x32\xbd\x4b\x76\xa4\xf4\x67\x49\xb1\x29\x7c\xa7\xc5\x93\xb8\xbc\xbf\xb1\x7e\xff\x2c\xf4\xfb\xe8\xe0\xaf\xcf\xb9\x04\x57\x03\x35\x0f\xdb\x2c\xaa\x74\xa3\x3e\xef\x4a\xf6\x8a\x8e\xcb\xca\x92\x95\x2a\xc2\xd1\x43\xdc\xdd\xc5\xa5\x76\xe4\xd2\x89\xca\x16\x47\x25\xa2\x02\xdf\x5f\xed\x21\xbb\xcf\x97\xe2\x3c\x89\x2d\xd7\x1c\xea\x3b\xfd\xb0\x1e\x01\xea\xf9\x59\x98\x7e\x77\xe8\xf1\x53\xd0\xa7\x1f\xa0\x97\x06\x2f\xd4\xf2\xf2\x62\x6f\x8a\x69\xac\x37\x3c\xcf\x1d\x79\xdc\xa6\xdb\x34\x5a\x29\x86\x73\x3a\x16\xd2\x50\x01\x13\x16\x4c\x7c\x29\xbe\x41\xef\x3f\x0b\xbd\x6e\x91\x31\x3e\x26\xd1\x99\x2b\xaf\xc2\xb0\x97\xbb\x46\xb2\x68\x8d\x91\x6f\x7d\xa2\x4e\x8e\x5d\xc9\xe9\xf2\x44\xda\x9d\xd3\x8b\xf2\x00\x7b\x85\xad\x5b\x7b\xbd\xb7\xcc\x50\xba\xb0\xec\x9a\xce\x48\xcf\xcf\x17\x77\xd9\x9f\x8f\x28\xbb\x1c\xdc\xd6\x9f\xda\x49\xfe\x04\x5b\xcf\x3d\x05\x7d\xf1\xd1\xd6\x9f\x17\xe2\x9e\xe8\x85\x13\x97\x2a\xd9\x2a\xda\xbe\x21\x22\x7d\xbb\x1e\xcf\x0d\xae\x7b\x4e\x7f\x2c\xfa\x8a\xce\x59\x43\xb9\xc7\xf2\x9b\xc1\x09\x9e\x85\x5e\xcc\x89\x91\xf1\x13\x2e\xc5\xe2\xbe\x91\xcc\x52\x4a\xa4\xdb\x3a\x4a\xc8\x76\x99\xb4\x78\xad\x4f\xb9\x7a\xd2\x04\x98\x0b\x3d\x58\x98\xe7\xe1\x9e\xdb\x99\xd0\x9f\x8a\xe9\x86\x70\x34\xdb\x1b\xb4\x53\x89\x18\x75\xc1\x46\xae\x4f\xeb\x4b\xbf\x36\xb6\x53\x4a\x73\x9f\x60\x70\xf8\xa7\xa0\xaf\x3f\x1a\x9c\xa3\xdf\x32\x1e\x22\xd0\x5e\xfa\xc7\x75\xc1\x24\x88\x76\x49\x75\xb7\x3c\xf9\xa2\xd6\x81\x2e\x51\x78\xcf\xdd\x53\x51\x91\x00\xfb\x06\xfd\xe1\x59\xe8\xcd\x43\xc1\x66\xdd\xe0\x55\xf4\x32\xdc\x82\xb5\x36\x82\x48\x89\x15\x46\x9f\x91\xb5\x5c\x72\xb4\x74\x3c\x62\x75\xf9\x58\x5c\xb9\xa3\x38\x81\x6d\xc8\x70\x21\x3c\x31\x7e\x4f\x39\x62\x1d\x4c\x8c\x6a\x77\x9d\xe0\x2f\xb5\x30\x97\xfb\xf1\xb4\xc1\x82\xa5\xb4\x61\xff\x09\xb7\x59\x9a\x7a\x0a\xfb\xf6\xa3\xc5\x39\xd3\xde\xf9\xa2\xae\xcb\x59\x2f\x4f\x4d\x96\x9d\x1e\x0b\xc9\xbf\xa7\xd1\x3e\x75\x57\xfb\xbd\xef\xb5\xa3\xd1\xd1\x4b\x11\xf5\x43\xf5\x86\xfd\xc7\xc7\x62\x7f\x35\xf6\x78\xb3\x24\xcc\x71\x85\xc4\x9a\xb3\xa8\x3b\xbb\x3e\x08\x94\xc1\x75\x52\x5f\x39\xad\x78\xf4\xb9\x6d\x71\x5f\x76\xa8\x74\xae\x77\x46\xc4\xf9\xd2\xbe\x62\x6a\x0d\xdc\x73\x80\x2f\x32\x6f\x5f\x12\x4d\xb3\x7d\xb4\x0c\x0f\xad\xe1\xaa\xc5\x39\xb9\xf3\x45\x56\x08\xaa\xf4\xfb\x1b\x7b\x9a\x7e\x0a\xfb\xfe\x03\xf6\xf2\xad\x9f\x0e\xcd\x6d\x5f\xb2\x66\x6e\xa5\x07\xca\x91\xe2\x69\xd8\x3e\xaa\xc3\x50\xf6\xa2\x6b\xe4\xcb\xfe\xb0\x94\xd7\x91\xd6\xc1\xaf\x09\x97\x57\xec\xa3\x67\xb1\x1f\xbc\x23\x06\x7e\x79\x64\x94\xf4\x7a\x37\xf5\xe3\xe5\x6a\x91\xb3\xe5\xc2\x43\x63\x5c\xc6\x34\x70\xfd\xbd\xa3\xc6\x70\xa3\x57\xa3\xe4\x07\x47\x6b\x49\xc9\x42\x01\x06\x65\x9d\x1e\x6f\x5e\xe6\x1f\x57\xab\x87\xdb\xbb\x10\x77\x1e\x27\x87\x8c\x61\xa2\x73\xc3\x29\xc2\xef\x2f\xaa\xe8\xe7\x04\xef\xf4\xd1\xe6\xf8\x46\xbe\xba\x1b\x02\x8f\xaa\xc4\x11\x2f\xac\xd6\x31\x64\x50\x07\x4e\x5d\x46\x65\x0c\xe3\x2a\xd9\xf0\x52\x3a\xaf\xd3\xf9\xda\xbc\x39\x39\xf1\xb3\xd8\xb3\x94\x08\xb4\x43\xe7\x98\x93\xad\xec\x47\x66\x93\x9f\xf9\x9a\xe4\xe3\x6d\x45\x6e\xe9\x1e\x38\xe2\x32\x94\xc5\x1e\xaf\x57\x65\xb5\xae\x49\x56\xeb\x8f\x20\xba\xea\x83\x9a\xce\xf5\x48\xdf\x33\x83\x3e\x46\x8a\x26\xec\x55\x3d\x21\x30\x65\xf8\x6c\xb0\xb4\xd9\xfb\x84\x3b\x2d\xfd\x9c\xa2\xa5\xbe\xc3\x7e\xc1\x43\x1b\x6a\x43\x91\x2e\xa1\xbe\x01\x97\xdb\x12\x34\xd3\xb5\x52\x4a\xb2\xb2\x09\x9d\x6c\x19\x53\x59\xc6\x47\xb3\xd6\xc9\x1b\xf6\xc9\xb3\xd8\xd7\xe9\x65\x1d\x5c\x0a\x8a\x78\xbc\x96\x85\x3a\x11\x93\x03\x3f\x5b\x48\x96\x56\x6b\x2b\x3a\x68\x87\x30\xbf\xab\x71\x95\x09\xf1\xa1\x38\xfb\x9e\xb9\x53\x72\x7c\x60\x89\x1d\x9d\xbc\xc3\x75\x1f\x30\xd5\x8d\x72\xd7\x0f\x66\xda\x68\xcb\x84\x17\x27\x46\x60\xea\x2e\xf9\x0c\x7b\xff\x9c\xa4\x45\x1f\xed\xfd\xb2\xb9\xd3\x25\x3d\xcd\xc6\x2e\x79\x90\x79\xbd\xa8\x76\xe7\x3c\x34\x7d\x93\x13\x2a\x71\x18\xee\x74\x13\x5d\x62\xde\x53\x60\x28\xbc\x61\xff\xb1\x39\xe3\xaf\xd7\x55\xfb\xeb\x0d\x8f\x1a\x7a\xd0\xc0\x01\xbb\x8c\xb1\x8d\x0a\xa8\x08\x4d\x6a\xd6\xec\xf1\xe9\xd0\x94\x8f\x8d\x9a\x52\x20\x1c\xc9\x45\x58\x5f\x99\x74\xb5\xd8\xb1\x7b\x41\x00\x40\xd8\x1e\x20\xab\x98\x5b\x0f\x86\x2b\x47\xd7\x53\xc6\x3d\xc5\x63\x77\x3e\xb6\x76\xf3\x19\xf6\xfe\x39\x4d\xcb\x7d\xc0\x5e\x84\xb1\x48\x77\xf4\x85\x6c\x74\x7d\x17\x75\x2c\x4a\xea\x2a\xb5\x38\xc7\x8d\x6e\xf9\xb0\xd8\x9c\x0d\x5a\x36\x96\xc2\x62\x25\xec\xb8\xb7\x7b\x6d\xf6\x2c\xf6\xd7\x5d\x7d\x71\xbb\x29\x50\x84\x11\x6d\xb6\xd3\x66\x2b\x07\x27\xd4\x33\x57\xda\xce\x17\x34\x64\x1b\x31\x84\x4b\xc5\x84\x2a\x50\x1a\x3a\xf0\x24\x51\x99\xd5\x29\x4c\xcc\x5e\x63\x2e\xd6\x45\xe1\x43\x26\xd1\x46\x97\x8e\xbd\x8d\xd6\x01\xd9\x15\xfa\x6b\x1a\x53\xd2\xef\xef\x62\xd2\x4f\x89\xda\xd7\x50\xe4\xdf\x60\x7f\x0c\x04\xd1\x0d\x95\x44\x14\x05\xee\x7e\x39\xb6\xdc\x72\xda\x1f\xd6\xa7\x93\x43\xaa\xe9\xa2\x97\xd6\xf6\x20\xcd\x25\x5c\x72\x47\xe6\x8d\xf7\xe4\x59\xec\x65\xfa\xc6\xf5\x69\xab\x2c\x1b\xc1\xa6\x1e\x96\xe4\x34\x6c\x2a\x2d\xab\xa6\x0f\xb7\x83\x40\x15\x8c\xc1\xca\x1b\x76\xb9\xaf\x29\xc8\x3a\x1d\xc3\x07\x83\x76\x4e\xe0\x0d\x9c\x76\x93\x94\xf7\x3a\x4f\xa9\x0b\x3e\xb7\x69\x93\x91\x4b\xe4\x5a\xcd\x66\x03\xae\x1c\xff\x19\xf6\xfe\x29\x55\x2b\x2a\x1f\xb0\x97\x57\x13\x4e\xcf\x1c\x39\x47\xd3\x6e\x71\x3b\xe6\x69\xc8\xb3\x5d\x46\xa8\x3a\xe5\x0f\x0f\xbc\xc9\xa0\x32\xa8\xaa\xa7\x1f\x2e\x92\xf2\xc6\xfb\xfc\x59\xec\x8d\x51\xdc\x1f\x8f\xa1\x2f\x4e\x84\xee\xce\x87\x29\xb2\xd7\x8a\xa4\x2b\xf4\xf1\x26\x47\x85\xb1\x4a\x6f\x37\x44\x27\x9c\x07\x5d\x10\x16\x2a\xd5\x2b\xcb\xf5\xe5\x0e\x35\xca\xa5\x55\xb4\xa6\x6c\xbe\xd4\x13\x63\x43\x0a\x96\xd9\xe1\x74\xd4\xee\x5e\x9c\xa4\x4c\xf7\x19\xf6\xfe\x29\x59\x2b\x1a\x1f\xed\xfd\xb9\x27\x26\xeb\x58\x8a\x33\xcc\x6e\xb5\xc7\xbc\x8f\x4e\x27\xee\x76\x5d\x9e\x8a\x52\x92\x36\x9c\x92\x03\x18\x05\x73\xe9\xed\xf8\xb7\x88\xc2\xc7\xa6\x0f\xbf\x3e\x84\xf9\x58\xd7\xb3\x28\xcf\xba\x41\x82\xb6\xae\xcd\xbe\xdb\x5e\xae\xf7\x7d\x23\x4e\xf5\x6d\x87\xb1\x39\xea\x97\xab\xd6\x2d\x9a\xeb\x3a\x6f\xcc\x43\xb3\x77\x54\x3f\xbf\x2e\xe0\x41\x5b\x58\xd0\xd4\x75\xf9\x54\x36\x6e\x70\x5e\x0f\x73\xd3\x71\xb2\x7e\x83\x27\x2f\x5a\x7f\x86\xbd\x7f\x4a\xd7\x8a\x1f\x23\xf7\xe2\xe5\x48\x1b\xd5\x43\xc7\x71\x62\xfb\xa3\xc7\x2c\x69\x63\x57\xf1\x2c\xa9\xc7\x23\xd7\x9f\x57\xce\x42\x6f\x65\x56\x66\xf6\x87\xfb\xf9\x0d\xfb\xf2\x59\xec\xb5\xf0\xe0\xae\x56\xa6\xed\xa8\x86\xd2\x45\x62\x39\xd7\x94\x58\x3a\xd1\x5a\xf7\x0d\xd7\xa5\xec\x95\xaa\x42\x46\xd9\xec\xe7\xc5\x72\x58\x1e\x82\x6b\x06\x16\xc5\xb6\x3f\x3f\xc4\x0d\xdf\xeb\x2e\xef\xd9\x57\xe9\xd6\x66\x42\x67\x8f\x8a\x0f\xdc\x9b\xe6\x4d\x69\x18\x88\xbf\x3f\xef\xc1\x53\xba\x56\xfc\x18\xba\x97\x99\xc5\x51\x9a\x86\xa9\xdb\xf6\x67\x9f\xf2\xf2\x93\x70\xcd\x4a\x65\xee\x12\x4b\xce\xee\x97\x05\x5b\xd0\xce\xc3\xe7\xd9\x55\xbd\xc0\x6f\xf6\xbe\x7a\x16\xfb\xb2\x5d\x87\xe3\x46\xf5\x6f\xa7\x47\x8d\x65\x09\x12\x5e\x74\x06\x0b\xf2\x97\xf3\xb1\xf7\xd3\x72\xe6\x50\xe1\xcd\xe1\x75\x0c\xf8\x24\x57\x24\xd0\x9c\x0c\xc4\x95\x6b\xa5\x33\xdc\x80\x3a\xf1\xe7\x20\x55\xfd\xc1\x02\x2e\xa6\xa0\x93\x14\xe4\x54\x2f\xa5\x78\x96\x7f\x7f\xde\x83\xa7\x74\xad\xf8\x31\x76\x2f\x04\xc7\x1b\x8a\xfd\xeb\x23\xb6\xd7\xa7\x22\xf6\x3b\xb0\xe8\xfb\xbe\xdb\x55\x85\x64\x4c\xc7\x3a\x12\x11\xeb\x0c\x87\x05\xbb\xa5\xa7\x37\xde\x1f\x9f\xc5\x7e\x18\x01\x48\x48\x36\xf7\xe6\x9c\xe6\xdc\x64\x58\xed\x59\xdb\x33\xea\xed\xbe\x0a\x1c\xc8\x04\x14\xcc\xfc\xc2\x47\x7a\x76\x6f\x0a\x46\xb9\x2a\xcc\xa8\x1e\xbc\x6a\x1b\x58\x9a\x6f\x0b\x61\x65\x9d\xd4\x4b\x3a\xeb\x89\xb6\x37\x86\x0e\x19\x14\x75\x63\x96\xf3\xf9\x13\x6c\x0e\x78\x4a\xd7\x8a\x1f\x83\xf7\x72\xb1\xdd\xda\xbc\xcf\x2d\x36\xf8\xc4\x99\x97\xbb\xa4\x18\xfe\x62\x38\x0b\x53\x3a\x39\xd6\x74\x08\x4e\xba\x74\x5e\x38\x95\x8d\x97\xb7\x37\xde\x7f\x2c\xd9\xfb\xab\xb1\x0f\xc5\xf0\x71\x41\x48\xc9\x3a\x60\xe8\xb7\xf6\xc6\x06\x07\x7d\xb2\x5a\x4e\xd2\xe6\x20\x65\x6b\xe2\x38\x67\x70\x94\xa6\x98\xf4\x20\x9b\xc0\xc4\x29\x39\x77\x07\x0f\x52\x24\x61\xf9\xb8\x2b\x4b\xd7\x7d\x6c\xdb\x20\xb2\x9a\x83\x1e\x1a\x8b\x41\x66\x07\xb8\x59\x7f\x42\xa2\x16\x3c\xa5\x6b\xc5\x8f\xd1\x7b\x01\x2d\xe4\x5a\xaa\x0c\x7f\x27\xc1\x34\x4c\xdc\x10\x9d\x6e\x85\x99\x36\x17\x81\x24\xfd\xc4\xa9\xd2\x7a\xa6\xcc\x4a\xf3\xb6\xaa\xf9\x86\x7d\xf3\x2c\xf6\xd2\x39\x5d\x0b\x8f\x36\x6d\x99\xad\x9d\x04\x01\x3e\xb9\x05\x60\xc9\x2d\x46\xdc\x42\x55\x01\x2a\x8c\x7d\x2c\xaf\x36\x50\x3d\x26\xdb\x4d\xb9\x31\x70\x56\x88\xad\x5d\xed\x79\xfa\x56\x49\xd7\xa3\x59\x08\xad\x30\xdf\xd6\xae\x27\xa5\x54\x69\xbb\xdb\x21\x13\x3a\x49\xfe\x04\xec\x9f\xd2\xb5\xe2\xc7\xf0\xbd\x4c\x94\xc7\xe5\x71\x31\x4d\x41\x54\xd7\x29\x7f\xcb\xee\xd4\xe1\xb2\x28\x7d\x7a\x94\x4d\x4d\x3b\xf3\xfa\x71\xf1\x60\x42\x8f\xa5\x71\x2d\x7c\xc3\xfe\xf4\x2c\xf6\x33\xe9\xf6\x9b\x48\xbb\x45\x61\x31\x10\x56\x8d\x37\x0f\x01\xc3\x1b\xbe\xa0\x68\xb7\x68\xf3\x92\x58\xc7\x55\xab\xdc\xd2\xa4\x43\x86\x45\xef\xb9\x23\xc7\x6d\xc5\x53\xd6\xdc\x95\x1d\x80\xfd\xb4\xcd\x5c\x20\x6b\x8d\x98\x9f\xb6\x0f\x4d\x68\x25\xd7\xe8\x71\xac\x7d\x06\xef\x9f\xd2\xb5\xe2\x77\xf1\x7b\xb3\x01\x9b\xa3\x88\xd6\xee\x82\x19\x98\xca\x5c\xf7\xd2\x52\xdb\x2f\x62\xc6\x21\xb2\x3a\x7a\xec\xc9\xcd\x8f\xe7\x9d\x3f\xae\x1c\xe5\xcd\xde\x9f\x9f\xc5\xbe\xc4\x07\x15\xe2\xc3\x3d\x38\xa4\xb4\xb9\x7b\x98\x33\x5c\x78\x50\x03\x9d\xbf\xdd\xc3\xb9\x92\x17\x14\xdc\x1c\x8f\x22\xb8\xcf\xd3\x7a\x5b\xf8\x7a\x58\x8b\x66\x58\x3f\xa2\x4a\x74\x68\x93\xac\xf0\xb5\xbc\xe9\x69\xb6\x7c\xb0\x42\x2a\x2c\x10\x4c\x46\xb8\x4a\xb4\x4f\xf0\xef\xc1\x73\xba\xf6\x63\xfc\x5e\x60\x7a\xed\x32\xb3\x8f\xc7\x26\xab\xa5\x0d\xd7\x47\x17\x1d\xcd\x70\x27\xe4\x02\x7f\x28\x6e\x08\x30\xd4\x42\x5c\x09\x0d\x98\xbc\xf3\x9b\xcd\x69\x9f\xb6\x39\xdc\xf9\xa2\x5b\xdd\x5e\xdc\x1f\xeb\x42\x61\xb1\xe8\xe4\x11\x5f\x14\xab\x74\x68\x78\xe3\x02\x61\x70\x01\x8d\x21\xed\x47\x3d\xbd\x48\xa9\x2f\x8d\x76\x1f\xd9\xf6\x3a\x1a\x0f\xd2\x8a\xcb\xcc\xdc\x7f\x34\xee\x06\x5d\xc1\xd5\x5d\x96\x7d\xe3\x44\xd9\x22\x13\xa9\xcf\xb8\xd7\x3e\xa7\x6b\x3f\xc6\xef\x45\x79\x45\xd5\x65\xea\x48\x8f\x00\xa1\x6d\x5b\xb9\xce\xfd\x31\xc5\x41\x6e\x70\x9b\xad\xaa\x5d\x4b\xe5\x92\x1c\x97\x92\xd8\x34\x2d\x7c\xb3\x39\x97\x67\xb1\xa7\x67\x58\x56\x5e\x14\x06\x32\xdf\x24\x7a\xb4\x73\x39\x81\x6a\x18\xad\xa6\x4f\x46\x5c\x81\xc0\x99\x0e\xec\x62\x45\x49\x72\xb3\xa3\xc8\xad\xcd\x99\x29\x76\x8b\x82\x07\xba\x72\x0b\x06\xb9\x32\x78\x0c\x21\x58\xf8\xd0\x2f\x05\xf3\x18\x9e\x86\xf2\xfc\x38\xe3\xcf\xf0\x31\x9f\xd3\xb5\x1f\xe3\xf7\x52\x4a\x6d\x85\xa1\xc8\xf0\x28\x14\x59\xf1\x30\xd5\x6a\x15\x72\xb7\x56\x04\x35\x1b\xa2\x70\xa5\x14\x20\xe4\xa7\xdd\x22\xd4\xf2\xd3\x9b\xcd\xb9\x3e\x6d\x73\xfc\x1a\x42\x51\x33\x19\xf1\xd1\x5f\xe2\xf1\xf4\xd0\x15\x87\x5f\x62\xe1\xc2\x47\xf7\x6d\x92\xad\x29\x61\x49\xb4\x58\xec\x57\x1b\x24\x66\xc6\xa8\xef\xb6\x97\xa3\xb8\xf0\x9b\xdb\x6a\x7d\x5e\x1b\x85\x66\xde\xe9\xfb\x51\x46\xd6\x92\xbe\x5d\xb7\xfb\x85\x21\x09\x42\xf2\x19\xd8\x3f\xa7\x6b\xbf\x8b\xdf\x4f\xa9\x7d\x0f\x4f\xa5\x7d\x5e\x71\xf7\xdb\x2e\x1f\x1a\x6d\xbb\x5f\xd0\x7e\x79\x37\x45\xb3\x3e\x24\x67\x9e\x19\x0e\xf5\x35\xe4\x72\xe3\x0d\xfb\xee\x59\xec\x95\xd9\x6d\x55\x75\xc5\x67\xc2\x31\x99\x41\xb2\x69\x6f\xcd\x22\x5c\x57\xe6\x81\xd6\x7c\x85\x7d\x64\x52\x62\xe9\xbd\xd9\xf3\x69\x61\x15\x35\x59\x54\xbb\xfd\xe2\x9c\x83\xd9\x4d\xb6\xd2\xad\xca\x76\x97\xd3\x7d\x11\x71\x33\xb7\xb9\xe5\x78\x53\xc9\xee\x0a\x1f\x0f\xdd\x27\xa4\xca\xe1\x73\xba\xf6\xbb\xf8\xbd\x67\x31\xf9\xf6\x14\x2d\xc3\x51\xac\xb2\x6b\x21\xd1\xb9\x42\xd7\xa3\xc7\xb8\xfc\x76\x2e\x75\x18\x89\x37\xfa\x3c\x14\x7c\x7f\x4b\xdf\xec\xfd\xc7\x62\xf1\xbf\xde\xe6\x3c\x96\xfb\x41\x8d\xea\xdb\x39\xd2\x79\x03\x2e\x44\x27\xb1\xb7\xa4\x64\xa2\x20\xb8\x3c\xf8\xb1\xe8\x5b\x25\xa5\xad\x8d\x01\xea\x76\x97\x96\xbc\x7a\x48\x9b\xad\x72\x3b\x9a\x02\xde\x25\x7a\x05\x32\x52\x0a\xd2\x15\x3e\xc8\xbc\x95\xee\x64\xd0\x08\x72\x61\xfb\x09\x31\x64\xf8\x9c\xae\xfd\x2e\x7e\x3f\x55\x97\x20\xde\xae\x17\x6d\xe5\xf7\x3e\x88\xc3\xf5\x3c\x5b\xb9\x84\x0e\x07\x75\xb9\x3c\x24\xd3\x35\xae\x6a\xa5\x9f\xc5\x10\x74\x6f\xf6\x7e\x78\x16\xfb\x22\x11\xe4\x33\x2d\xcb\x75\x6c\x67\xfd\xca\x3d\x2c\xe8\x71\xb1\xc9\xeb\xb0\xb6\xed\x1d\x70\x6f\x6a\x7e\x70\xc5\xeb\x2c\xd4\x8e\x91\x93\xa3\xb2\x8c\x1a\xcf\x78\xd4\x91\xb9\x96\xfc\x72\xa3\xf1\xfb\xbd\x7f\xd7\xc7\xcd\xf1\xe8\x02\x86\x33\x39\x8d\xae\x96\x43\x78\xf8\x04\x9b\x03\x9f\xd2\xb5\x92\xf0\x11\xfb\x01\xea\xea\xc0\xed\x37\x07\xf6\x6e\xb6\x42\xa4\x71\xb7\xd3\x83\x2d\xfd\xfb\x7c\x9c\xcf\x92\x73\x38\x39\x37\xca\xd1\xd7\x27\x73\xf4\xde\x78\x3f\x3e\x8b\xfd\x89\xd0\x51\x90\x30\x51\xd5\x6f\xec\x56\xe4\xef\x0e\xb9\x52\xeb\x18\x2c\x8e\x82\xe5\x45\x03\xd1\xaa\xc0\x64\xa4\xad\x02\xed\xe5\x66\x46\xd6\xea\xc2\x5e\x51\xb4\x6c\x57\xe7\x53\x98\x4f\xfb\xf4\x74\xb6\x80\x63\x86\x95\x4f\x79\x65\x7e\x2a\xf8\x95\xea\x96\xf6\x43\xfe\x04\xde\x3f\xa5\x6b\xa5\x8f\xf1\x7b\x89\x41\x53\x22\x5c\x3c\x33\xdf\x50\x46\xed\x0e\xf7\x74\xcc\xb5\xb2\x76\xbd\xec\x70\xa6\x2f\xd5\xed\xc6\x5e\x06\xee\x6e\x3a\x5b\x89\xbc\xf1\xfe\xf6\x2c\xf6\xbe\xbf\xb1\x11\x4e\xfc\xe0\x72\xdb\x88\x6c\x6f\xc2\xa8\x6a\xc7\x24\xe1\x7a\x79\x3e\x0c\xe6\xea\x54\x3b\x19\xa4\x88\xa6\x9d\xcc\x1d\x3a\x3f\xc4\xdb\xf5\xa8\x92\x84\xd4\xd3\x36\x3d\x06\x51\xc2\xf5\x67\xa8\x57\xd7\xac\x5d\xed\x46\x74\x76\x79\x84\x73\x13\x12\xf1\x13\x78\xff\x94\xae\x95\x3e\xc6\xef\xe5\x35\x45\xd5\x27\xd4\x55\x8c\x56\x6b\x2d\xe5\xb5\x96\xbc\x80\xc2\x1a\x3c\x8c\x87\x98\x73\x0a\xd8\x58\x5d\xbc\xdd\xcb\xd1\x34\xd6\x6f\xf7\xda\x8f\xbd\x19\x7f\x35\xf6\x9d\xc3\x3e\x62\xcc\x8d\xc3\xf1\x7a\x5b\x91\x32\xdb\x9e\xf4\x44\x4c\x5a\x77\xb7\x92\xd4\x95\x75\x38\xa7\x73\x3b\x29\x5d\xea\x34\x8a\xa6\x2c\x1e\x9c\x44\x94\x3b\x59\xea\x73\xbb\x49\x6b\x23\xc9\xc3\x15\xaf\x76\x68\xb8\x56\xf4\xc5\x94\x1e\x7b\x67\x37\x6f\x93\x87\xf8\x09\xda\x0a\x3e\xa5\x6b\xa5\xef\xe2\xf7\xf6\x10\xb9\xb0\xc5\xfd\xa3\x34\x5d\x1e\xcf\x8e\x3b\x03\xcd\xbf\x47\xe5\x63\x05\x3d\xb6\x1c\xc5\x0b\x2d\x6d\xca\x5a\xe6\x9d\xee\x0d\xfb\xf9\x59\xec\x95\xb8\x7a\x44\x61\xcb\xdb\x8a\xde\x94\x98\x5e\xb0\xeb\xf9\xbc\xee\xb9\x1b\x59\x87\x4b\x3a\x09\x96\xfe\x7a\x99\x8e\xf4\xd2\x53\x71\x73\x3e\x14\x99\xe5\x69\xcc\x8e\x1b\x4b\x76\x3f\x90\xf2\xb4\x60\x0f\x22\x74\xdb\x00\x26\xe6\x64\xe4\x76\xb7\x60\x2d\xc4\x03\x51\xfe\x04\xec\x9f\xd2\xb5\xd2\xc7\xf8\xbd\x04\x8f\xee\xfd\xb4\xbc\xb4\x06\x88\x9a\xec\x1e\x6c\x9d\xf8\x1c\xc8\x0f\x5e\xdc\xdc\xd8\x7e\xf7\xd8\xf6\xf8\x22\xaa\x2c\xb7\x3b\x8f\xa7\xb7\x9c\xe1\xfd\x59\xec\xb3\xe3\x9a\x4c\xa7\xdd\x58\x6b\x52\x83\xad\xce\x9a\xf9\xa6\x12\x44\xb3\xd9\x8f\x94\xb8\xd4\xe9\x00\x8f\xe7\x19\x1a\x37\x1f\x50\xbb\x51\x14\x8d\x8e\xcb\xd9\x93\x69\x08\xf3\x6e\x56\xf4\xa2\xf2\x38\x79\x57\xdf\x35\xa7\x21\xf3\x51\xcf\xd4\xb8\xb6\xf5\x44\x69\x3f\x21\x77\x02\x9f\xd2\xb5\xd2\xc7\xf8\xbd\xb4\x61\xb2\xc5\xa3\x51\x5b\x73\xd1\xac\xfa\x59\x76\x77\x0e\x10\x10\xe9\xda\x36\xa8\x83\xb8\x3b\x2f\x95\x85\xa6\x76\x8b\xb6\x1a\xd7\x6f\xd8\x3f\x9e\xc5\x3e\xed\xa3\x4d\x73\xbc\x78\x27\x5b\x58\x05\xbc\x47\xef\x16\xbb\x10\xc9\x09\xdf\x86\xb5\xbd\xc3\xfe\x41\x89\xc0\x51\x5a\x2e\x7a\x3f\xc6\xd5\x78\xb2\x50\xb4\x7d\xd8\xcc\x72\xd1\x5e\xd2\x2d\x57\xce\x05\xb3\x65\x8a\xfe\x7e\xda\xa3\x0e\xad\xd5\x8a\x8e\x47\xa2\x9f\x84\xcf\xb8\xd7\x3e\xa5\x6b\xa5\x8f\xf1\x7b\x89\xa7\xd0\x9c\xcd\x8d\xb4\x8c\x38\x63\x96\xb2\xfd\xec\x72\xad\x72\x1e\x32\xc3\x8f\x46\x41\x5f\x0a\x5e\xaa\x3d\x06\xf6\xf6\x18\xbf\xd6\x41\x7f\xc5\xfe\xe3\xe3\xbd\xbf\x1a\xfb\x9d\xb9\x38\x1f\xf9\xc5\x99\x1f\x77\xb2\x7f\xbd\xd9\x5b\xb0\xab\xa7\xfd\xc5\xb8\xcc\xdd\x70\x99\x77\x54\xdd\xa4\x07\x0e\x85\xa7\xdd\xe6\x11\xc0\xdd\x63\xf7\x20\x38\xd2\x47\x76\x92\xe9\x73\xbe\x90\xe8\x9d\x79\xb4\xf5\xd5\x54\x94\xe9\xb6\x01\x9b\xe5\xdd\x3d\x9e\x1f\xc6\x67\x60\xff\x94\xae\x95\xbe\x8b\xdf\x93\x35\x77\xd3\x8d\xe5\x74\xc4\x57\xaf\x38\xd5\x7a\x21\xa7\x4b\x05\x5d\x67\xc5\x6d\x9c\x7b\xbe\x29\xb4\x42\x22\xf2\x7c\x42\x0e\xf5\x86\x3d\xfd\x2c\xf6\x09\x1f\x96\xbb\xfd\x8d\x3d\x38\xda\x81\xb3\x12\x84\x17\x45\xb4\x3b\x2c\xf7\x9d\xe6\x74\x96\x14\x4e\x1e\x57\x2b\xab\x79\x50\x9d\x76\xa9\x88\x37\x41\x71\x40\xb0\x4b\xe4\xdd\x66\x7d\xbd\xcf\xb0\x57\x87\x78\x8d\x15\xf6\x5a\xa7\x14\x23\xd6\xab\x6b\xf0\xd8\xb1\xb5\xf3\x09\xba\x16\x3d\xa5\x6b\xa5\xef\xe2\xf7\x8b\x75\x5a\xa2\xb0\x0e\x86\x83\xaf\xfa\x1e\xd6\x4e\x7d\xd9\x28\xf9\x74\xf1\xdb\xe3\xad\x3a\xf0\x96\x04\xf1\xa2\x6f\x50\x7a\x3e\xbf\xd9\x1c\xf0\x2c\xf6\xb6\x27\x5f\x30\x31\xba\x75\x46\x4f\x87\xbb\xe7\x8a\x49\xc6\x66\xfb\x83\x31\x52\x66\x79\xc4\x41\x7a\xef\x03\x52\x34\x66\x08\xb0\x7c\x3c\x4f\xf9\xa6\x1b\x7c\x7f\x93\x76\x59\x3b\xeb\x98\x8e\xa2\xc3\x78\xf4\x96\x1b\x0f\x86\x67\x47\xf7\x5a\xfd\xe1\xdd\xc2\x3a\xfb\x84\xdc\x09\x7a\x4a\xd7\x4a\xdf\xc5\xef\x5b\xb8\x2a\x7b\x2c\xdc\x06\x75\x5c\x5d\xad\xa2\xca\xa1\x74\xf4\xd3\xaa\xd3\x6e\xca\x85\xa8\xab\xd9\x68\x4e\xec\x5d\x93\x72\xff\x9d\xb6\x82\xcf\x62\xcf\x79\x19\xee\xd6\xda\x42\xe2\x8a\x65\xb9\x60\x2f\xb7\xb5\x15\xaf\x57\x77\x33\xa4\x38\xd8\xfb\xcb\xe1\x2e\x9e\x15\x9a\x3a\x67\xe1\xbd\x6a\xfc\x26\x4b\x92\xb1\x3a\xb9\x33\x6d\x66\x89\xa8\x59\xdc\xbd\x5b\x3e\xca\x35\xc7\x9e\x1c\x69\x7b\x5b\xd6\x42\x71\x52\x27\xea\x33\x62\xc8\xe8\x39\x5d\xfb\x5d\xfc\xfe\x78\xb0\xb2\x2b\x64\xac\x6c\x6d\x24\x53\x60\x34\x95\x9a\x87\x95\x7d\x99\xb1\xf0\x58\x5f\xcf\x03\x2a\x06\x8d\xba\x69\x41\x2b\x1e\xde\x78\xff\xb1\x91\xf5\xaf\xc6\x3e\xb0\x9d\xa3\x65\x80\x92\x70\x66\x22\x6c\xab\x38\x69\x8d\x36\xf4\xb6\x8b\xcc\xd9\x2d\x2d\x39\xe7\x46\x07\x0a\xd0\xb2\xec\x88\xda\x3b\xc5\x18\x30\xe2\xdd\xe6\x02\xac\x77\xc0\xe4\xf4\xe4\x61\x2e\x1b\x34\xe0\x0a\x33\xea\x4d\x2c\x17\x27\x02\xb6\x64\x33\xb8\x9f\xe0\xdf\xa3\xe7\x74\xed\x77\xf1\xfb\x9a\x36\xd7\x6a\xba\xe1\x5a\x6f\xe5\x5d\x6d\x6f\x97\xdc\x73\x45\xf0\x5a\xce\x68\xda\x44\x91\x0a\x2c\x5d\xf9\x49\x34\xee\xb3\x6b\xbe\xf9\xf7\xcc\xb3\xd8\x87\x3b\x0c\xd6\xfa\xe1\x9e\x42\xf1\x76\x5f\x87\x14\x6b\x55\x21\xee\x84\xed\xe0\x33\x7b\x10\x55\x8f\xae\xd9\xac\x6d\xb9\xe2\x23\xfd\x8c\x16\xbd\xd2\x36\x5e\x43\xf8\x51\x54\x58\x77\x6b\xf7\x02\xb2\xcd\x9d\xe9\xad\x67\x9d\xae\xd6\x3b\x5d\xb1\x39\x2e\x2b\x8e\xf3\x27\xdc\x6b\xd1\x73\xba\xf6\xbb\xf8\xfd\x78\x14\x5d\xea\xa2\x5b\x32\xdf\x68\x2e\xcd\x1e\xa7\x18\xc7\x97\x43\x69\xab\xde\xe1\x02\xc1\x79\x82\xb7\x22\xbb\x12\xdc\xec\xde\x3d\x64\xc8\x3e\x8d\xfd\x70\xc4\xa5\xc0\x6d\xc6\xa8\xda\xf8\x6d\x73\x89\xcf\x15\xe9\xa5\x7a\x65\x77\x83\xad\x4d\x56\xc6\xa8\x66\x30\xcb\xe4\x8c\x96\xa0\x28\x2e\x07\x2b\xbf\x97\xcb\x29\xb3\xbb\xe0\x5c\x22\x12\x33\xd7\x6c\x2d\xcc\x0f\x3f\xa2\x8e\x76\xc2\xf4\x81\xe3\x2e\x57\x6d\xf3\x09\xfe\x3d\x7a\x4e\xd7\x7e\x8c\xdf\x4b\x4e\x68\xa4\x24\x95\xdb\x96\x01\x05\x3f\xaf\x17\xe7\x8b\xae\x69\x5e\xac\xba\xda\x3e\x3c\xf8\xfb\x65\x5c\xae\xf6\x94\xe6\x58\x47\xfa\x0d\x7b\xfc\x34\xf6\xbc\x3b\x7a\xb7\xfc\x24\x8c\x42\x7e\x86\x10\x33\x96\xe1\x70\x5d\x59\x16\x0f\x8f\x86\xa3\x20\x03\x69\xcf\xae\x73\xdf\xc4\x8b\x61\xa7\xc3\x39\xb9\x59\xa7\xe5\x20\x95\xe3\xbc\x08\xaf\xf1\x28\x5c\x64\x27\x62\xbd\xc7\x6d\xb9\x5a\xcf\xcb\x68\x4d\x67\xc8\x7b\xf8\x9f\x81\xfd\x73\xba\xf6\x63\xfc\x5e\x90\x44\xab\xe0\xfa\x10\xaf\x09\x23\x09\x4c\x6e\x47\x99\x78\xea\x6f\x46\x64\x09\x61\xb1\x09\x51\xda\xa8\xc2\xe9\xe2\xf2\xbd\xf4\xee\xb9\x66\xee\x59\xec\x77\x61\x5e\xc6\xfd\xe2\x61\xed\xe1\x76\xba\x2a\xae\xb5\xa1\xab\x31\x94\xb6\x99\xde\x83\xf3\xe2\xb8\x29\xf3\x68\xbb\xab\x4e\xfb\x69\xf3\xa0\xf4\xf5\x18\xb3\x59\x97\x49\xbd\x1c\xf4\xd7\xc7\x5a\x3d\x9f\x47\x3a\x81\xb7\x76\xd3\x14\xf7\xe0\x60\x75\xde\x39\x19\xf8\x9d\xf0\x19\x7e\xce\x73\xba\xf6\x63\xfc\x5e\xde\xbb\x87\x6e\xbf\x3c\x15\x4e\x12\xd8\xa7\x65\x51\x76\x5c\xb6\x13\xc2\x75\xdd\x89\x1b\xb9\x3c\x9d\xf6\x44\x83\xe3\x71\xac\xe5\xdb\xd7\xe7\x3a\x5f\xb1\xe7\x9f\xc5\xbe\x8b\x22\xa8\x9d\xd2\x26\x84\x0b\xfe\x28\xae\xd6\xa6\x2b\x79\xe2\x44\xd1\x9b\xb9\x62\xe2\x83\x3e\x1d\x44\x89\x96\x16\xcb\xb1\x39\xec\xf9\xa1\x59\x83\xb0\x1b\x35\x7f\x38\x45\xf0\x41\x35\x69\x5d\x43\xad\xc0\xb6\x52\xa6\x5d\x91\x5e\x2d\x63\x71\xba\x99\x66\x74\xfa\x84\x3d\x22\xe8\x29\x5d\x2b\x0b\x1f\xb1\xbf\xb4\x9b\x24\xed\xf9\x21\x92\x88\x2b\xb5\xb2\x41\x37\xf5\x69\xbf\x89\x2e\x7b\xe1\x46\xc5\xd1\xfe\xba\x9f\xfd\x9e\x94\xad\xb8\x35\xdf\xb0\x5f\x3c\x8b\xfd\x76\xd8\x49\x72\x0e\x56\xdd\xb8\x5a\xb6\xe6\x98\x9f\xc6\xbd\x34\x9d\xca\x97\xaf\x7b\xe3\xad\xdc\x43\xa5\xe0\x1f\xaf\xe7\x4d\x0d\x45\xbe\x2b\x1e\xf5\x5d\x36\x46\x1b\x27\xb7\x39\x2a\x65\x97\x9b\xc4\xb9\x35\x13\xcb\x2f\xae\xa7\x36\xd9\x2a\x77\x59\x98\xb8\xd3\xf9\x33\xfc\x9c\xa7\x74\xad\xfc\x31\x7e\x2f\x32\xbe\x72\x40\xc1\x2a\xf1\x25\xff\x81\x09\x0e\x42\x75\x3e\x3f\x4a\x67\x50\xc1\x31\x67\x66\xfb\x78\x6c\x4a\xa5\xb6\x30\xe2\xdf\xf9\x39\xcb\x67\xb1\x9f\xe8\xd2\x5f\xfa\x1e\x5f\x57\x21\x4b\x6b\xfe\xd6\x14\xb6\x17\xc7\x39\x6f\x63\x52\xdc\xb2\x25\x23\xa9\xc5\x86\xeb\x6b\x2f\x6f\x53\xb3\xab\xea\x91\x4a\x2c\xc7\x3a\x2e\x0c\x41\x1d\xee\x3d\xbd\xa8\x93\x44\xaa\x9b\x1d\x00\x42\x1b\x56\xac\x1a\x48\x17\xf2\xb8\x7d\xc6\xb3\x0f\xcc\x53\xba\x56\xfe\x6e\xff\xbd\xe9\x0e\xf6\xa4\x0e\x2b\xca\x42\x41\xa5\x63\xb3\x59\xce\xf7\x03\x81\x6c\x38\x34\x4b\x7b\x54\xd2\x64\x79\xb9\x29\x9a\xaf\xef\x1f\xdf\x78\xff\x31\x8c\xf9\xeb\xb1\x87\x8b\xc7\x6d\xa6\xa9\x40\xaf\xf6\xad\x7d\x67\x2e\x96\x7b\x40\xc1\x3c\x8a\x40\x6b\xb2\x65\xe4\xa0\x9d\x35\xa7\x5b\x68\x15\x9c\xd1\x9d\x0c\x35\xd6\x8f\xf3\x42\xbb\xdf\x47\xcf\x2b\xaf\x57\x8b\x50\x96\x93\x9c\x6f\xa7\x98\xf2\xaf\x77\xd7\xdf\x93\x80\x9b\xef\x07\xed\x13\x72\x86\xcc\x53\xba\x56\xfe\x18\xbf\x97\x60\xcc\x35\x16\x99\x3a\x63\xca\xe8\xbd\x91\x2f\xa4\x87\xfb\xc8\x3c\x9e\x11\xae\x97\x4b\xa4\x22\x0f\x57\x5b\x33\x96\x20\xcb\x7a\x6f\xd8\x8b\x4f\xdb\x7b\xab\x3a\x51\x71\x80\x7c\x46\x40\xd7\xe8\x38\x5d\xc2\x61\xe1\x97\x57\xf1\x68\xd8\xe2\x2c\x2d\x0d\x8d\x83\xa7\xd5\xb5\x10\xa5\xb5\xab\x5e\xcb\xc8\x1f\x2a\xb3\xde\xae\xca\xe6\x7e\xe0\x46\x09\x4c\x07\x67\x77\xa3\x93\xc5\xdc\x8c\xf7\xe5\x48\xee\xa6\x73\x29\x02\xea\x13\x0a\x59\x30\x4f\xe9\x5a\xf9\xbb\xfd\xf7\x1b\xb4\x77\xbb\x78\x7b\xa5\xc2\xe4\x2e\xf6\x8f\xe5\x7e\xd6\xe7\x56\x50\x8f\xdc\x52\x43\x98\x3e\x6f\xe3\x07\x48\x94\x38\x1d\x82\xf2\x0d\x7b\xe9\x59\xec\x9b\xa3\x0d\x86\xd4\xb6\xe6\xe8\x4e\x79\x73\xce\x63\x3f\xe0\x1a\xaf\x2d\xc8\x29\xe1\x91\x42\x4d\x06\x6b\x3d\x82\xf1\x91\x19\xcb\xdd\x68\xab\x67\x0f\x76\x0b\x19\x1c\x98\xf5\x61\xe8\xae\x52\x24\xdb\x57\x80\x29\x1e\x04\x2a\xb8\xf4\x05\x5f\xed\x26\x55\x8b\x17\x9f\x10\x53\x60\x9e\xd2\xb5\xf2\x77\xfb\xef\x6d\x5e\x33\xf3\x0a\x37\x9a\xe0\x08\xf6\x6a\x6c\x26\x5c\x3e\x6a\xa4\xf6\xfd\xb1\x4e\xfd\x44\x0f\xe0\x76\x8f\x74\x59\xef\x8b\xf9\x9b\xbd\xbf\xcb\xcf\x62\x6f\x6b\x4a\xa7\xad\xbb\x87\x1f\xe9\x3b\xc4\x2f\xcf\x90\x36\xc1\xa2\xda\xcb\xf9\x8e\x33\xc4\x49\x9a\xb7\x38\x21\x25\x75\x30\xf8\x8d\x73\xea\x5c\xde\x5c\x65\x69\xe9\x5d\x59\x9a\x33\x64\x76\x38\xca\x8b\x6b\x0f\x5d\x7e\x5a\x4f\xf5\xb6\x6f\xa3\x65\xe9\x77\x51\xb7\xff\x0c\x9b\xf3\x94\xae\x95\x3f\xc6\xef\x45\x4a\xb6\x4f\xd1\xb9\x0c\x62\xa3\x16\xc2\x4a\xcb\xf7\xe6\x64\x76\x67\x43\xda\xc5\x94\x3a\xad\x1d\x3f\x4d\xb7\x92\x6b\x2d\xca\xea\x9d\xcd\x51\x9e\xb6\xf7\x4e\x19\x67\xf4\x3d\x48\x38\xe9\x70\x19\x8b\xd3\x92\x09\xa5\xd2\x87\xf6\xb8\x72\xc4\x20\x55\xf0\x2d\xe4\xd8\xf0\xaa\x53\x25\x2a\x96\xdd\x3d\xe5\xf9\xf0\x18\xde\xc3\x31\x5e\x34\xd0\xd9\x93\xa3\x8a\xb1\x74\xa7\xfb\xc7\x43\x1a\x0c\x66\x33\x15\x64\x9b\x87\x37\xe1\xf7\x8f\x29\x30\x4f\xe9\x5a\xf9\xbb\xf8\x7d\xe8\xa2\xd0\xa3\x74\x02\x0e\xe3\x91\x5a\x71\x57\x8a\xdf\x3f\x8a\x0d\x13\x0d\xee\x92\xd3\x8e\x39\x3d\x59\x99\xdd\x3b\xe5\x35\x7b\xc7\x7b\xf5\x59\xec\xc9\x29\x3d\xd5\xc8\x90\x72\xb4\xc1\xd3\x78\xb6\x3a\x2a\xa6\xa3\xa9\xb9\xd4\xb1\xbe\x5e\x2f\x1a\x62\xd5\x6a\xd0\x68\x0d\x1a\xa2\x24\xb7\x79\xf3\x34\x8e\x68\x1b\xad\x36\x38\x4c\xd5\x2e\xaf\x82\x7b\x52\xf2\xc7\x89\x18\x29\xb0\x44\x21\x30\x59\xcd\x3c\x34\xf2\x27\xe4\x6b\x99\xa7\x74\xad\xfc\x31\x7e\x2f\x88\x8b\x11\x79\xfb\x82\x9e\x20\x73\xd1\x16\x8e\xb4\x9f\xf8\x47\x12\xd1\xb6\x24\x49\xab\x65\x27\x05\xc5\x9d\x96\x8f\xa3\x75\xba\xbc\xed\x01\xbf\x6b\x4f\xf3\x7e\xab\x17\x91\x22\x1e\xab\xbb\x78\xbf\xab\x20\xd4\xf8\x80\xe0\xa3\x55\x03\x2f\xce\x58\xa5\x9f\x56\xfa\xc2\x47\x8b\xfd\xcb\xd9\x16\xd5\x82\x30\x21\x20\xf1\x1d\xed\x35\xc3\xd2\xb7\xf4\x19\x8f\x4c\xe1\xa8\xdc\x10\x9d\x30\xc6\x53\xb2\xe5\x03\x5d\x9d\xe1\x67\x60\xff\x94\xae\x95\x3f\xc6\xef\xc5\x55\xbd\x63\x18\x5f\x6a\x82\x81\xce\xab\x23\x6a\xef\xb8\x6b\xce\xad\xd4\x72\xae\x24\x13\xc5\x16\xa6\x28\x3b\x6c\x22\x7a\x21\x97\x6f\xd8\xeb\xcf\x62\x0f\xf2\x93\x18\xec\xd4\xbb\xc0\x4e\xd4\x83\xbd\xf1\xf6\xde\xc2\xe7\x68\x58\xae\xfd\xe5\xfe\xb2\xdf\xf2\x07\x5a\xb3\xd2\x76\xa9\x53\x48\x16\x63\xd1\x9f\xe9\xad\xfe\x98\xdb\x86\xde\xdd\x76\x75\xd7\xcd\xed\x55\x3a\x4f\xf5\x39\x5a\x93\x7e\x5d\x19\xdc\x32\xde\x17\xc9\xa7\xf0\xfe\x39\x5d\xfb\x31\x7e\x2f\x9f\xfc\xcb\x05\xab\x27\x33\x55\xd9\xa2\xe9\xd5\xa2\x75\x32\x5f\xbd\x64\x82\xbe\x39\x6f\x52\xdb\x1e\x4d\x3b\x54\x9b\x36\x49\xe6\xf8\x0d\x7b\xe3\x59\xec\x83\x88\x3c\x56\x5b\x4a\x67\x10\xa7\x7b\x13\xba\x5c\xa3\xa0\x68\xc3\x4d\xba\xbd\xe0\x66\x28\xd7\x0d\x2f\x1f\x46\xad\xde\xf4\xf5\xb0\xe1\xa7\xe3\xf1\x21\x05\x78\xc9\xb3\x9a\x59\x45\x85\x5c\x2e\x23\xfa\xd8\x06\xc2\x1d\x31\xae\x32\x25\x7c\xce\xf9\x52\x76\x17\xf5\xcf\xd0\x56\xcf\xe9\xda\xef\xea\xe7\xa0\x62\x9d\xa7\xca\x20\x66\xda\x63\x58\x1c\xaa\xfb\x2a\xdc\x9b\xb1\x4f\x6d\x30\xbe\xf9\x34\x7f\x46\x2c\xbc\x98\x36\xa3\x5c\xc6\xaf\x0f\x34\xbf\x62\xff\xb1\x35\xf7\xaf\xf7\x31\x69\x61\x7c\x68\x6b\x75\xb3\x65\x60\xac\xea\x16\x4c\x1e\xfe\xc4\xc4\xab\x50\x0e\x23\xf6\x18\x00\xb9\x8b\x08\x93\xba\x7e\x8b\x90\x79\xd9\x34\xc3\xda\xda\x86\x81\x5b\xee\xba\x90\xb9\x43\x3e\x62\x52\x78\x58\x9f\x96\x6b\xe1\x50\x8b\xd4\x42\x14\x06\x2b\x12\xb8\x4f\xd8\x0b\xcb\x3e\xa7\x6b\x3f\xc6\xef\xe5\xf3\xcc\x07\x8b\x60\xb0\xd5\x69\x9c\x35\xf5\xcc\xcd\xb1\x32\xcd\x4b\xde\xad\x6e\x21\xb6\x5d\xcd\xb5\x5a\xbf\xac\xad\x48\x6e\xf0\x1b\xf6\xeb\x67\xb1\xd7\xf8\x80\x9e\x36\x78\x07\xfc\x71\x14\x64\x78\x8b\x8c\xd8\xb9\x6c\x4c\x5d\xb8\xe5\xe5\xd9\x56\x0b\xc5\x17\x0c\x13\x44\x41\x34\x3d\x68\x94\xaf\xea\xcd\x92\xe1\xf7\x88\x90\xf1\xd4\x44\x87\xce\x8a\xee\x64\xeb\x9c\x4b\x69\x7d\x5c\x75\x11\x9a\x6b\x2f\x52\x23\xed\x13\xfc\x7b\xf6\x39\x5d\xfb\x31\x7e\x2f\x4b\xd9\x58\x6c\xce\x06\x2b\x3e\x56\xfc\xad\x72\xf1\x59\x38\x66\xeb\x6a\xf6\xcf\x97\x52\xc2\x4a\x95\xc2\xc8\xbb\x50\x2a\x3c\x02\xf5\xcd\xe6\x98\xcf\x62\xcf\x2d\x9d\xae\xac\x47\xde\x32\xf9\x24\x71\xe4\x6c\x2f\xb0\x0d\x93\x57\x35\x43\xd5\xd6\xf6\x14\x7b\xfa\x2e\x58\x40\x17\xdc\xfc\x86\xc4\x87\xa1\xa3\x5b\xf9\x1a\x3e\xc8\x23\x4d\xe5\xb1\x5a\x27\x3d\x26\xaa\xa2\xb6\x18\xd4\x73\x7d\x6a\xca\x5a\xaf\xb4\xbd\x46\xff\x35\x96\xf6\xf7\x2b\x2f\x7f\x2d\xe7\xfc\x5c\x65\xf8\xb7\x61\xde\x56\x33\xa5\x92\x14\xc4\x88\x67\x59\x96\x4a\x30\xe2\x09\x05\x48\x96\x41\xc8\x03\x16\x72\x54\xcc\x62\x86\x27\x39\xc7\xa2\x84\x20\x26\x41\x0c\x86\x80\xa7\x32\xc2\x33\x2c\x44\x39\x1f\xb3\xd4\xdf\xac\xea\xab\xf8\xfd\xea\xc4\x6f\xee\x14\x2b\x20\x98\xae\xac\x7b\x2d\x4f\x89\x9f\x9e\x6e\x8b\xfe\xec\x78\x20\x79\xb8\x67\x0c\x23\xfe\xa2\x8b\x4d\xca\xac\x99\x9f\x0b\x66\xfe\xa3\xd9\x0f\x6d\x47\xfa\x21\x1e\x7e\x7d\xa5\xed\xbf\x99\xfd\xb7\x61\xde\x66\xdf\xc4\xfd\x90\x36\x6d\x4f\xb2\xaf\x6d\x23\xbe\x7c\x78\xfd\xc7\x9f\xbf\x3c\xdf\x39\xe2\xe3\x35\xf5\x83\xef\xf1\xf3\x45\x16\x77\x69\x59\xdd\x7e\x7e\xf3\xe3\xf7\xf8\x5f\x7f\x7a\x39\xfa\xe9\x46\xba\xbe\x6a\xcf\x3f\xfd\xe5\x0b\xfd\xe7\xaf\xff\xd3\x93\xee\x46\xba\x9f\xfe\xf2\xe5\xa7\x1b\x4d\xff\x17\xfd\x5f\xd4\x4f\x3f\xbf\x91\x8e\x5d\x47\xce\x83\xf9\x3a\xb5\x9f\xfe\xf2\xb2\x50\x7f\xf3\x86\x38\xa6\x35\x19\xfa\x9f\xfe\xf2\xe5\xff\xfa\xd3\x5f\xcf\xf4\xbf\xfe\xf4\xfe\xbc\xaf\x9f\x7c\x19\x39\xcd\x78\x18\x33\x34\x0b\x98\x9c\xf0\x04\x91\x1c\x63\x2e\x61\x59\xcc\x27\x08\x52\x14\x80\x88\xe5\x19\x9e\x87\x09\xcc\x29\xc4\xa7\x29\xcf\xe3\x84\xc6\x84\xe1\x48\x86\x39\x1a\x41\xfa\xe7\xaf\xf4\x6d\xdc\x33\x99\x87\x9f\xfe\xf2\xe1\x6c\x5f\x67\xf3\x32\xf9\x9f\xfe\xf2\x85\xfa\x9b\xb7\xfe\xf7\x87\xbf\xef\xcf\xf1\xe5\xe5\x7b\xc5\x84\xa7\x38\x94\xa4\x90\x25\x20\x67\x53\x96\xa6\x18\xcc\xa6\x84\x24\x71\xce\x65\x80\xcb\x73\x2e\xe7\xa8\x94\xcf\x52\xcc\x12\x1e\xb3\x88\x67\x71\x9e\xb0\x88\xb0\x39\x97\x11\x3e\xa1\x7f\xfa\xd3\x0f\xce\xf0\x77\x30\xc8\x59\x84\x69\x96\xc2\x39\xcc\x32\x3e\x67\x20\x0b\x21\x97\x42\x2a\x27\x98\x70\x09\x4c\x39\x80\x93\x84\x66\xf8\x2c\x66\x38\x1a\xa3\x17\xac\xb8\x98\x4d\x49\xc2\x03\x2e\x49\x52\x82\x7f\x3d\x06\xf4\x9f\xbf\x7f\xaf\x1d\x87\xcb\xf8\xf2\x77\x3f\xe1\x38\x4d\x21\x4e\x30\xa0\x41\xc2\xc7\x71\xcc\x27\x14\x0f\x09\xe1\xe3\x14\x64\x2c\x61\x09\x66\x00\x9d\x91\x1c\xe6\x98\xf0\x3c\x9d\x13\x08\xa8\x84\xa2\x01\x4a\x00\xa0\x08\x42\xc9\x4f\xbf\x08\x61\x9e\x43\x79\x0c\x32\x3a\x8d\x59\x96\x63\xf2\x8c\x70\x71\x9a\xbf\x30\x3b\x7f\x19\x85\xe7\x09\x46\x39\xc5\x60\xc2\x60\x8e\xc2\x39\x49\x38\x16\xc0\x98\x4b\x33\x86\x80\x18\x51\x31\xf7\x6b\x10\xc6\x84\xa2\x33\x0e\x72\x14\x97\xa4\x2c\x1f\xa7\x49\x4c\xa5\x6c\x06\x21\x9d\xc5\x5c\x9e\x20\x9c\xa7\x39\xcf\xa7\x09\x66\x72\x86\x8b\x53\x80\x69\x92\x24\x0c\x9f\xa5\x28\x26\x0c\xc5\xc4\xe9\x6f\x8d\x70\x9a\xe5\x59\x12\xb3\x79\x8e\x49\xfc\x72\x39\xe3\x34\x47\x79\x9a\xf0\x31\xe6\x99\x9c\xc7\x49\x92\x91\x38\xa5\x72\xc2\xb1\x1c\xc1\x09\x0f\x33\x9e\xe5\x38\x82\x18\x9e\x01\x49\x4c\x91\xf4\x97\x21\x0c\x93\x1c\x24\x98\x50\x39\x4d\x65\x2c\x4d\x00\x83\xb9\x94\xcf\xd8\x3c\xe7\x21\xa6\x32\xcc\x52\x2f\xe7\xa1\x50\x4e\x21\x8e\xc3\x19\xc0\x7c\xc2\x80\x98\xe5\x68\x86\xe3\x59\x8e\xa5\xa8\x5f\x83\x30\xf5\xe4\xeb\xb7\x46\xf8\xb7\x9b\xfb\x3f\x42\xf8\xe9\x59\xff\x7f\x1a\xe1\x5f\x66\x29\xff\x8d\xc1\xbf\x31\xf8\x37\x06\xff\xc6\xe0\xdf\x18\xfc\x1b\x83\xdf\x09\x83\xd7\xa3\xff\xfb\x4f\xff\xfb\x97\x28\x9e\x2c\x1e\xe2\x24\xee\x49\xff\x2a\xdf\x3e\xce\xe1\xab\xe2\xa1\xa9\x5f\x32\xd2\x99\x0c\x53\xdb\xd5\x97\xb8\xef\x2f\x65\x17\xf7\xe4\x07\x23\x79\xa4\x1f\xbe\xec\x64\xf5\xcb\xe6\xeb\x87\xbf\xfc\xf7\x97\x1d\xb9\x0c\xe4\x94\x90\xee\x0b\xa0\x68\xe6\x97\x9c\x28\x6f\xbb\x94\xf4\xe9\xa5\x3d\xbf\x60\xdf\xc4\xe3\x39\x2d\x3f\x9e\x28\x8f\x9b\x9e\xfc\x92\xc1\xbe\xea\xcd\xf1\x52\x74\x71\x46\xfa\x1f\xcf\xff\x67\xc5\x37\x54\xa7\xd7\xe5\xfc\xf3\x47\x01\xf8\xb6\xf6\x3f\x95\xf1\x8b\x8c\x7b\x3d\xfb\x9f\xde\xad\xf6\x4f\x39\x21\xbf\xec\x83\xa7\x78\x1e\xe6\xbe\x7a\xfc\xc2\x8f\x77\xe4\x55\x75\xfe\x93\x0f\xff\x32\x2a\xbc\x88\xf0\x3e\xbd\xbc\x30\xe2\x7b\x1c\x7e\x86\xe2\x35\x46\xf0\x35\x7a\x09\x4d\xaf\x95\x8f\xd9\x54\x3f\xd6\xce\x56\x1c\xfc\x7b\xbc\x64\xef\x99\xb8\x8d\x6e\x1b\x2d\xbf\x65\x67\x50\xa7\x9a\xff\xa8\xbb\xe6\x5d\xc7\xa5\xaf\x9b\xd7\x38\x89\x5b\xd7\xcd\x29\x34\xb3\x15\x92\x70\x66\x31\xab\x63\xb1\x8e\xe8\xf3\xad\xac\x6f\x75\x40\xf9\x8f\xb5\x03\x2f\xfe\xd1\x63\xbe\x36\x95\x78\x0d\x67\xac\x8e\x4f\x06\x99\xfe\x95\x70\x88\x28\x78\x63\x63\xf8\xd5\xc0\x46\x3d\x44\x8e\xb9\xaf\x61\x34\x1c\x12\x2b\x67\xc4\x15\xa0\x66\x3f\x3f\xdc\x82\x73\xe6\xfb\xbd\xfd\x50\x06\x4f\x2f\xe7\xe2\x1e\x67\xdc\x51\x5c\x64\xb0\xf1\xcd\xab\x53\x07\xd9\xe3\x8e\x8f\x66\x99\xe8\xfd\x9e\x49\x99\x85\x17\x6f\xbc\x52\xfa\x16\x9e\x91\xc2\x27\x5b\x40\x8a\xff\x4a\x10\xcf\x51\xf0\x65\xa7\x86\x1d\x39\x25\x9b\x85\x22\xd1\xab\x0d\x48\x7c\xf0\xd0\x17\xce\x2e\x1b\x2c\xff\x40\x63\xfa\x02\x7d\x3f\x91\xfb\xd9\xdc\xcf\x28\x0d\xad\xd3\x39\x37\xa6\xfc\x9c\x31\xfe\x8e\xad\x57\xea\x39\xe5\x46\x74\x47\xc1\x8c\x55\xfb\x60\xe7\x35\x38\xee\x91\x53\x7f\xfd\x3a\x2d\x7f\xb1\x76\xda\x5c\x03\x99\xc3\xa4\xd8\x13\x6e\x3d\x6e\xce\x36\xa7\x6d\x07\xf3\xac\x5f\x19\x2e\xb6\x87\xd2\x29\xb6\x89\xe2\xc8\xf2\x14\x73\xd1\x56\x64\x4b\x6d\x77\x54\xc2\xab\x56\xa2\x6b\x98\x2f\xfb\xda\xa4\xd7\x1e\x95\xfa\x55\x11\xa5\xba\xb8\x4a\x1f\x5f\x0b\x3f\x3f\xc1\xb7\xe2\xdb\xfa\x4f\xcf\xb6\xdc\x7c\x7d\xa5\xbf\x8e\xbf\xce\x76\x4b\xad\x51\x64\xde\x3c\x69\x19\x00\x4a\x80\xbe\x60\x5f\xc8\x01\x5d\xec\x6c\x25\xcb\xcc\xa9\x9e\xa9\xe6\x36\x50\x59\x47\x47\x87\x53\x83\x63\x3e\x16\xd8\x93\x05\x7b\xa8\x0b\xb8\xad\x4e\xd8\x86\xa5\x2b\xc6\x36\x7f\xb1\xa2\xd5\x25\x8c\x44\x54\x36\xe2\xf0\x75\x92\xe2\xd2\x70\x78\x63\xbf\xd5\x63\x7e\x4d\x7a\xac\x2f\x61\xba\xdd\x1f\x03\x59\x6f\xb3\x07\xab\xb0\xc0\xd0\x5c\x50\x6c\xa6\xfa\xa6\x0c\xef\xf8\xb2\xd6\x0e\xd8\xf5\x78\x96\x18\x67\x40\x31\x63\xbb\xd7\xc3\xf3\xd5\x4c\x06\x90\x6e\xdd\x8d\xed\x76\xb1\xd2\x12\x4f\x48\x3b\x77\xfd\x7a\x15\xbc\x56\xf8\x57\x0e\x51\x64\x16\xdf\x6d\xca\x78\x3b\xec\x5f\x7e\x55\x41\xb0\x43\xc2\x3b\x7e\xea\x6b\x6a\xf7\x5a\x51\x4e\x91\x52\x83\x25\x33\x25\x5f\xd9\x50\x28\xd9\x78\x4d\xa3\xa3\x3e\x8f\xc2\xad\xbc\xad\x35\xb0\xd8\x8c\x8f\xa4\x15\xd6\x5d\x1e\x4a\xb1\x44\x47\x3b\x07\x63\x31\xd4\x97\x8c\x55\x94\xdb\x03\x8f\x57\x36\x1f\x1c\x7c\x1a\x93\x72\xb3\xc6\x19\x35\x9e\x66\xb1\x88\x57\xc7\x9f\xf7\x21\xbf\x85\x3b\xff\xa5\xeb\xe1\x9f\x04\x7d\x87\x6e\xec\x87\xa6\x3a\x93\xe7\x82\xbe\x6f\xc3\xbc\x0f\xe1\x6f\x22\x6d\xbb\x0e\x99\x60\xa7\x07\x1a\x13\xda\x91\xb5\xd1\x18\xa0\x49\x6b\x66\x27\xad\x50\xa8\xed\x03\x6b\xb5\xf2\xb5\x68\xb7\x06\xea\x16\x89\x06\x58\x19\xab\x0d\x90\x7e\x6e\x09\xaa\x49\xba\x84\x64\x20\xed\x10\x23\xad\xdc\xcd\x06\x39\x82\xee\x01\x53\x8d\x84\x95\xb1\x67\xb6\x02\xd6\x19\xb8\x86\xbe\xbd\x65\x03\x65\xc5\x1e\x02\x7d\xb3\xdb\x44\x6b\x47\x7b\x31\xe4\xfb\xdd\x6b\xdf\x14\x00\x20\xc4\x80\x82\x2c\xc7\x20\x8c\x19\x8e\xc2\xdf\x75\xd6\x64\xfe\xfc\x65\xb3\x37\xcd\xaf\xff\x7e\xb8\x8d\xfc\x70\x52\xb2\x1b\x40\xcc\x58\x42\xe8\x22\xd6\x96\x35\x15\x04\x9a\xb0\x71\x24\xe0\xba\x91\x89\x6d\x80\x58\x39\xd4\x75\x49\x0b\x3c\x1f\xb8\x0a\x36\x74\x05\x38\x7b\xc7\x94\xff\x3a\xa9\x7f\xbd\x4d\xea\x3f\x9a\xd4\xcf\x93\xe1\x7f\xed\x64\x04\x49\x70\x81\xa0\x84\xca\xda\xf0\x14\x60\xae\x0d\xc6\xb5\x0e\x2a\x63\x19\x7e\xc4\x3a\xf8\x60\x98\xae\xad\xc9\x1e\x00\x36\x5e\x1d\x90\xbc\x0b\xd4\x1d\x56\x65\x79\xfb\x1b\x4c\x46\xd9\xbb\x7f\x6f\x32\x90\xfa\x9b\x25\x02\xd4\xaf\x9d\x95\x68\xaf\xb1\x68\xef\x77\x76\xb0\xd5\x45\x61\x23\x86\x16\x6b\x19\x6e\x18\xad\x0c\xd9\xd8\xee\xc3\xd5\xf6\xa0\x7b\xba\x20\xab\x98\xd9\x2b\xbe\xe1\x87\x81\xae\xdb\x1b\xe7\x1b\xef\xfe\xf5\x26\xb0\xff\x68\x89\xc0\xdf\x4e\x0a\xbd\x2e\x1a\xf5\x4b\xa6\x23\xf9\x01\x63\x4a\x6e\xa4\x6e\xf1\x56\xd9\x1c\x3c\x41\xb3\x7d\xc3\x39\x08\xb2\xbc\xd9\x1f\x0e\xd1\x4a\xda\xac\x55\xe4\x3b\x22\x30\xd6\x38\x00\xe6\x76\x15\xa8\x18\xee\xb5\xbf\x4e\x47\x46\x3b\xcb\x56\xa0\xbf\xdd\xa9\x38\x72\xa1\xe4\x29\x0e\xdc\x32\xfb\x8d\x27\x5a\xca\x4a\x16\x80\x66\x1e\x3c\xdd\xc5\x96\x25\xb8\xc2\x66\xbd\x5e\xc9\x11\x76\xb7\x9a\xfa\x8b\x18\x87\xe8\x5f\xbb\x36\xff\xef\x4d\xe6\x1f\x30\x8e\x7a\xfd\x41\xe0\xc3\x64\xfe\x9e\xc9\x9c\x73\x42\x7e\x8b\x1e\xa5\xef\x07\x7a\x83\x08\xb1\x0c\xce\x71\x9c\x80\x3c\xe3\x40\x4c\x00\x05\x73\x0a\x65\x80\xe2\x09\x1b\x67\x24\xa5\x08\x9b\xa4\x28\xa7\x78\x0e\x26\x28\x4f\x61\x1e\xb3\x98\xe7\x00\x45\x32\x84\x09\xe0\x33\xfc\xb5\x25\x26\xfd\x37\xee\xac\xf5\xe1\x76\x26\x56\x4b\x91\x32\xa9\x95\x76\x1f\xca\x69\x43\x37\x21\x15\xdf\x2f\x2d\xcd\x6f\xf4\xf9\x66\x4a\x77\x9b\x19\x44\x25\x95\xfc\xdb\xa4\xf2\x13\x2c\x86\xce\x3e\x7f\x6c\x84\xf3\xc3\x97\xf3\xf7\xde\x78\xbd\x7d\xc9\xcf\x9c\x3f\x5c\x2e\x3e\xf6\xc5\xf8\x85\xe7\xff\x3e\xdf\xfc\x63\xdc\x59\x96\x80\xff\x87\xbc\xf7\x68\x72\x2b\xd7\xb2\x85\xe7\xfd\x2b\x6a\x56\x03\xde\x0e\xc2\x9b\x7e\xf1\x06\x87\xde\x7b\x3f\x83\xa5\xf7\x9e\xbf\xfe\x8b\x24\x53\x4a\x16\x45\xa5\xe8\x52\xaa\x7e\xdf\x89\x7b\x23\x2a\x45\xf2\x2c\x60\x61\x63\x61\x63\x6f\x18\xee\x35\x12\x82\x03\xa3\x29\xa2\xcc\x29\x89\x38\x23\xda\x68\x84\x90\xb2\x5a\x2b\x45\xbd\x33\xc8\x0a\xa9\x18\x52\x52\x29\x61\xa4\x11\x0e\x39\x27\x9d\x3c\xdd\x87\x79\x85\xf7\x27\xea\x9d\xa8\x35\x23\x6b\x73\xc1\xe3\xbd\xbc\xc7\x9f\xc3\x5f\x92\xc7\xf0\x6f\xe5\xdd\x49\x0b\xd5\x9b\xe6\x2b\xa7\x30\xe3\x04\x43\x82\x3c\x72\xd6\x41\xc0\x2c\x36\x1a\x73\x83\x2d\x77\x42\x50\x20\x1c\xf6\x08\x6a\x22\x19\xe6\x40\x53\xc1\xb0\xa0\xe2\x74\x19\xe6\x15\xde\xcf\x0a\x1b\x5b\xd7\x23\xc3\xfe\x30\x93\x8a\x64\x61\x97\x6d\xda\xfd\x65\x6a\x36\xcf\x98\xa5\x0e\x47\xb7\xb0\x59\x36\x89\x3e\x88\x4f\xc7\x59\x47\xba\xf0\x58\x97\x4c\x3d\x1a\x7a\xf7\x41\x7f\x46\xed\xaf\x79\x4f\x3c\x87\x5f\x34\x8f\xe1\xdf\xca\xbb\x94\x54\x4b\xc4\xa4\x97\x5e\x1a\x62\xa0\x53\xc6\x71\x4a\x01\x84\x42\x70\xc6\x2c\x73\x42\x39\x02\x0c\x25\x10\x43\x04\x04\x84\x8a\x02\x82\x39\x13\xd8\x68\x4e\xcc\x89\x77\xf4\x23\xef\x7f\xa8\xde\xaf\xe2\x3d\x4f\xbe\x96\x77\xcf\x85\x35\x4a\x22\x46\x28\xa5\x5c\x6a\xa2\x08\x06\x40\x7a\xef\x30\x55\x00\x21\xa9\x98\x55\x44\x3b\x8b\x91\xc5\x46\x4a\x69\xb9\x02\x4c\x59\xc4\x18\xa0\x0a\xd1\x13\xef\xf8\xe5\xbc\x3f\x5a\xef\x57\xf1\x9e\xab\x7f\x2d\xef\x4a\x02\x41\x1d\xc4\x5e\x3b\xe9\x05\x71\x80\x3b\x8c\x80\x02\x96\x10\x4a\x31\xb3\x0e\x2a\x6f\x90\xf1\x56\x18\xee\x08\x14\x8c\x09\x2e\x00\x77\xd6\x22\x4d\x84\xe4\xa7\x9b\x5f\xaf\xe8\xcc\x0b\xeb\x7d\x39\x4e\x7f\xab\x77\x3a\x7a\x36\xcd\x0f\xe4\x30\x64\x6b\xe9\xd5\x84\xd0\xee\xb8\x98\xf4\x43\x57\xad\x0d\x6c\x95\xea\xf9\x92\x51\xd6\x19\x8a\x46\xb4\x01\x98\xdf\x16\x76\xf5\xab\x5d\xe9\xf8\x77\xe4\xec\xd6\x11\x3e\x0a\x3a\xe9\x59\xb4\xb2\xad\xae\xda\xc5\x05\x4a\x2f\x22\x4c\xf5\x5b\xb9\x64\x78\xbb\x2c\x34\x86\xa9\x48\x7b\x15\xc9\x66\xfa\x93\x75\xba\x70\xda\xfb\xd0\x08\x36\x7d\xf3\xd1\x20\xb1\xeb\x6d\x13\x8f\x06\xdd\xf4\x07\x7e\xa9\x53\x9a\xd4\xf7\xb9\x4e\xa8\xd8\xce\x0c\x7a\x13\x9a\x1a\x0e\x09\xae\xaf\x9b\x6c\x51\x5c\xbb\x71\xa7\x94\x28\x8f\x6c\x71\x22\x62\xa6\x91\xba\x7c\xdf\x6d\xed\x0b\xbc\x96\x06\x39\x04\x24\x61\x08\x21\x8d\xb0\x83\x16\x5a\xc6\x80\x75\xd2\x72\x42\x39\x33\x04\x1a\x6f\xa5\x86\x52\x7a\x69\x29\xa3\x0a\x40\x6c\xa0\xf3\x42\x1a\x75\xba\x5e\xf6\xf3\x71\xe4\xce\xf1\x73\xce\xc3\xb5\x98\x39\x1b\x3f\x2f\x97\xa2\x7e\x6b\xdf\xeb\xff\xfc\x0e\x9d\x7a\x0e\x5f\xd7\x1f\xc3\xbf\xb5\x5f\x59\xcb\x89\x73\x0e\x71\x45\xa5\x20\x58\x23\xa1\x00\x55\x16\x08\xa5\x3d\xa3\x4e\x29\x84\xa1\xe6\xd6\x69\x47\x2c\xa0\x94\x02\x03\x14\x57\x04\x11\x63\x14\x95\x48\x9e\xee\x96\xbd\xc2\xfb\xf9\x9a\x69\xca\x96\x8d\x48\x7c\x95\x5c\x84\x52\xd6\x86\xbb\xe5\xea\xb2\xba\x28\xad\xbb\x25\x1c\xad\xe9\x5a\xc1\x53\x1c\x4a\xf5\xa0\x3d\x2c\x0b\xc5\xe3\xef\xcf\xc6\xef\xcb\xf0\xd6\xed\xbc\xa7\x9f\xc3\x7f\x1f\xc7\xee\xc6\xbf\x95\x77\x44\x29\x74\x9c\x29\x6c\x1c\x11\x02\x6a\xae\x0d\xc7\xde\x7a\x2b\xa4\x65\x1c\x58\x0a\xa1\x53\xdc\x12\x47\x19\xa4\x0c\x51\xa4\x29\xe4\x9c\x03\x84\x11\x40\xd6\xf1\x13\xef\x57\xc6\xef\x3f\x54\xef\x57\xf1\xfe\x3e\x8e\x7d\x19\xef\x58\x63\xe6\x8c\xd7\xc6\xbc\xcd\x09\x80\xf1\xde\x03\xa1\x9c\x60\x6f\xaa\xc2\x88\x83\x48\x0a\xe2\x81\x20\x5a\x03\xac\x9d\xd4\x1a\x11\xc9\x20\xb4\x5e\x20\x8c\x90\x3e\xf1\x7e\x65\xfc\xfe\x43\xf5\x7e\x15\xef\xef\xe3\xd8\x97\xf1\xee\x20\x11\x82\x1a\xcd\x98\xf2\xd4\x73\x4f\x9d\x94\x50\x03\x48\x9c\x33\x84\x53\xc3\xa0\x31\x02\x43\x24\x91\xf1\x94\x22\x66\xac\x05\xc6\x2a\x48\x28\x06\x40\xe0\x77\x7b\x27\x2f\xe7\xfd\xd1\x7a\xbf\x8a\xf7\xcc\xf6\x6b\x79\x67\xcc\x20\x61\x80\x31\x96\x2a\xe4\x08\x07\x88\x29\x65\x94\x57\xc8\x19\x8b\x0d\x05\xd0\x3b\x6a\x1d\x26\xd2\x79\xcc\x99\x01\x5e\x28\xaf\x85\xc4\x86\x6b\x4d\xa9\x3d\x5d\x60\x7d\x45\xdf\xcf\xea\x7d\xef\xb8\xb6\x5f\x4d\xe7\x87\xb3\x71\x2d\xff\x08\xef\x99\xe7\xf0\xcf\xcf\x7d\xbd\x07\xff\xe6\x79\x82\xd5\x4c\x32\xc5\x25\xd7\x9c\x49\xc8\x99\xd1\xca\x59\xac\x28\x70\x44\x31\x25\x18\x87\x10\x4a\x86\xa0\x67\x54\x61\x2f\x29\x51\x58\x1b\x0f\x01\x30\xdc\x79\x60\xde\x6f\xaf\xbe\x42\xfc\xf9\xa6\x8c\x02\xae\x73\xd7\x0d\x4a\xb3\xb1\xda\x01\xc8\xe1\x60\x50\x00\xbd\x88\x6e\xae\x8a\xa2\x27\x82\x4a\x3f\x17\xae\x66\x5b\xa8\xdc\xe8\xb7\x2e\x06\xd6\xe8\x4f\xc9\xfd\x25\xf1\xd9\xe7\xf0\x4f\x42\x77\x3f\xfe\xcd\x06\xcf\x19\x80\x0a\x19\x85\x20\xd2\x82\x00\x89\x14\x7f\x33\x74\x04\x21\x47\x9a\x71\x4f\xb4\xc4\x96\x23\xcd\x11\x53\x80\x19\x4a\x80\x84\xdc\x22\xa5\x05\x36\x50\xc1\xf7\xab\xab\xaf\x10\x7f\x56\xf1\x7b\x2d\x6e\x36\xea\x4e\x67\xdb\x5f\x32\xfc\x39\xf1\xb9\xe7\xf0\xa7\xf9\xc7\xf0\x6f\x25\x1e\x38\x88\x04\x23\x5c\x23\x2d\x31\x17\x4c\x33\xcd\x99\x83\x02\x59\x63\x15\x80\x1c\x73\xca\x01\x63\x5e\x18\xe5\x01\x45\xd8\x42\xad\xb0\x26\xdc\x5b\xe6\x39\x02\xf6\xfd\xde\xea\x2b\xc4\x9f\x55\x3c\x96\x68\xb6\x6b\xd5\x74\x75\x02\x73\x9b\x4a\x0f\x55\xd7\xf1\x28\x9f\xc1\x52\xbf\x90\x35\x21\xb9\xaa\xf2\x62\x16\xc6\xfa\xdd\xc4\x4e\x4e\xf3\x47\xba\xcf\x2c\xfe\x71\x89\xcf\x3f\x87\x7f\x72\xa9\xee\xc7\xbf\x95\x78\x63\x85\x12\x4e\x3a\x4d\xb1\xb7\x88\x09\x0b\x91\x24\x0e\x21\x21\x25\x35\x88\x7b\x82\x90\x95\x80\x0b\x6c\x08\xa0\xc4\x11\xa2\x34\x00\x48\x78\x03\xad\x42\x10\xa8\xf7\x4b\xab\x3f\x27\xfe\x5e\x8b\xeb\x56\xec\xbc\x7d\x46\x5c\xfd\x11\xe2\x0b\xcf\xe1\x37\xa7\x8f\xe1\xdf\x4a\xbc\xf6\xda\x52\x27\x09\xe5\x96\x43\xae\x8d\x17\x84\x23\x09\x15\x33\x88\x68\x6e\xac\xb0\x9a\x78\xcf\x05\x71\xc6\x12\x2a\x91\x76\x18\x58\x20\x20\x61\x06\x18\xc4\xc8\x3b\xf1\x57\x9c\xf8\x3f\x54\xf1\x57\x11\xdf\x88\x7f\x2d\xf1\x18\x08\x01\xac\x25\xc8\x0a\x47\x00\x52\x18\x58\xa1\x30\x45\x5c\x53\x66\xa0\xc3\xd8\x41\xe1\x04\x37\x84\x39\x88\x31\x72\x8a\x1a\x6f\x20\x47\xd0\x5b\xc1\x8d\xd7\xef\x57\x85\x5f\xb1\xf8\xb3\x8a\x47\x8b\xb9\xae\x8c\x8e\x0e\xd3\xe1\x4e\xe1\x61\x90\x9a\xcc\x3a\xa6\x5b\xed\x4f\x5a\xdd\x90\x5f\x43\x3d\xa7\xe1\x45\xbc\xc6\xe4\x60\x35\x3c\x1d\xef\xf9\x21\x35\xb1\xc7\x07\xd7\xe2\x73\xf8\x27\xa9\xb9\x1f\xff\xe6\x68\x81\xb6\x92\x11\x6f\xbc\xd5\x18\x33\x85\x81\x47\xd0\x20\x02\xbc\xf7\x56\x79\x8e\x2d\x37\x9c\x0a\x01\x9c\x85\x5a\x4a\xfc\xe6\xd7\x23\x2f\x88\xc3\x8e\x4a\x44\x39\x78\x27\xfe\x73\x8b\x0f\xf4\xf0\x00\xdb\x5c\x55\x78\x92\xf7\x50\xb3\x50\xc0\x95\x5e\x96\xa7\xab\xae\x5d\xa8\x1e\xe2\xb3\xb9\xce\x8f\x54\x36\xdc\xad\xab\x6a\xef\x4b\x88\x7f\x04\xff\xab\x89\x87\x04\x19\x29\x84\x86\x9e\xf1\xb7\x09\x2a\x27\x0a\x52\x01\xad\x93\x46\x39\xe3\x89\x94\x8e\x01\x0f\x1d\x70\xce\x1e\x37\xea\x41\xa5\x90\xa1\x54\x22\x2c\x38\x3b\x86\xfb\xe1\xf5\x3c\xcb\x33\x16\x57\x4c\x74\xb3\x67\x15\x7f\x28\xbf\x55\x7a\x0e\x3f\x4d\x1e\xc3\xbf\x39\xee\x4c\x95\x74\xd8\x28\x86\x9d\x34\x06\x52\x8a\xa9\x34\x92\x63\xcc\xb5\xd1\x90\x10\x76\x0c\x84\x31\x8d\x30\x50\xc4\x02\xc3\x3d\xe2\x54\x20\x46\x88\x85\x4c\x00\xf9\x7e\x3b\xfb\xe7\x52\x73\xaf\xc6\x16\x66\xad\x15\x3c\xab\xed\x4f\x72\xab\x9f\x13\x5f\x7e\x0e\xff\xd0\x7d\x0c\xff\x66\x3f\x5e\x01\xca\xa0\x90\x9a\x50\xce\x80\x61\x80\x3b\x8c\xa1\x91\xc0\x50\x25\x34\x31\x96\x23\x20\x19\xb4\x5e\x68\x23\x8c\x33\xde\x12\xa5\xb0\x10\x9a\x72\xae\x98\xd1\xef\xc4\x5f\x91\x9a\x3f\x54\xf1\x57\x11\x7f\x7e\xc2\xe5\x57\x10\x0f\x85\xf6\xcc\x18\x87\x80\xf1\x5a\x03\x2f\x01\x97\x86\x68\xc1\x05\xe6\x50\x48\x22\x2d\xc4\xda\x42\x64\x21\xc7\x8a\x01\xcc\xb0\x45\x5a\x38\xac\x00\x77\xde\x29\xfb\x4e\xfc\x95\x10\xd9\x1f\xaa\xf8\xab\x88\xdf\xb6\xbf\x96\x78\x63\x9d\x27\xd4\xda\x37\xab\xa6\x6f\x83\x27\xa6\xc7\x18\xb0\xe5\xce\x33\x02\x89\xf7\x44\x62\x0e\x15\xc6\x94\x23\xec\x04\xb1\xca\x2b\xe3\x14\x74\x10\x33\x46\x8f\x7e\xfc\xf5\x1c\xc8\x59\xb1\x62\x03\xed\x43\xcb\x48\xbf\x54\x2a\x26\xdb\xeb\x45\x32\x40\xc1\xaa\x32\x18\x79\xac\x28\x71\xb1\x49\x5c\x01\xc6\xfa\x35\x16\x1b\xc2\xe8\x45\xc8\xe0\x27\xea\x7a\x0b\xf1\x95\xe7\xf0\x4f\x83\xeb\xfd\xf8\x37\x7b\x35\xd0\x53\x89\x9d\xa6\x0e\x12\x2f\x39\x40\x5c\x1b\x2e\x05\x82\x5e\x11\xc6\x90\x30\x10\x70\x80\x89\xd7\x4a\x2a\xaa\x8c\xf3\x4c\x49\xab\xb8\x74\x80\x42\xe7\x38\x7e\x27\xfe\x73\xa9\x09\xe2\xd1\x62\x37\xbe\xad\x54\x33\x43\xb0\xaa\x16\x77\x1d\xb3\x69\xa6\x17\x13\x50\xc2\xe5\x75\x6e\x10\x9b\x2c\x6a\x9b\xd9\x66\x6b\x16\x89\x91\xf8\x12\xe2\x1f\xc1\xff\x6a\xe2\x25\x40\x5a\x02\x73\x0c\xac\x4b\xc7\x34\xc6\xd4\x71\x4a\x30\x16\x52\x71\x42\x98\x16\xcc\x61\x80\xa5\xd3\x9e\x4a\x67\x18\xb3\xde\x6b\x0a\x9c\x72\x06\x58\x23\xe8\x1b\xf1\xd7\xb3\x4f\xe7\xc5\x5a\x6f\x37\x85\x43\x61\x28\x57\x5b\xbd\xce\xa4\xea\x91\xf9\xa4\x85\x58\xb2\x9d\xc6\x79\x1d\xb5\xb3\x72\x5d\xae\xc8\x84\xc5\x1b\x95\xd6\xb2\xfc\x32\xe2\xab\xcf\xe1\x7f\x35\xf1\x46\x71\xca\x2c\xa4\x50\x1a\xf7\x66\xc9\x5e\x70\x84\x00\x41\x1a\x22\xe3\x14\xd7\x0a\x00\x62\xf0\x9b\xb3\x0f\x95\xd5\x0c\x69\x2a\xbd\x10\x4a\x30\xee\x05\x73\x1a\xbf\x13\x7f\xc5\xe2\xff\x50\xc5\x5f\x45\xfc\x29\x3a\xf9\x75\xc4\x63\xae\x35\x97\x9e\x49\x2a\x0d\x70\xc2\x49\x8d\x2d\x86\x9e\x19\x89\x81\xb0\xc2\x02\x41\xac\x34\x8c\x13\x64\xbc\xa2\xcc\x28\x20\x89\xf1\x8a\x39\xe5\xa4\x24\xe8\x1b\xf1\x57\x06\xd7\x3f\x54\xf1\x57\x11\x7f\xca\xc3\x7c\x1d\xf1\x50\x0a\x41\x88\x11\x9a\x70\x64\x95\x32\x86\x6a\x0e\x39\x93\x54\x11\xa3\x20\x53\x86\x4b\xa9\x20\xf6\x1a\x19\x4f\x20\x45\x16\x4a\x87\x20\xe4\xca\x41\x43\x99\x39\x12\x7f\x3d\x11\xf2\xc2\xc1\xed\x6c\x9f\xc0\xed\xc4\xd7\x9e\xc3\x3f\x6f\xf8\x7b\xf0\x6f\x4e\x84\x00\x61\x0d\xf4\xce\x40\x0a\x3c\xe2\x8c\x32\x24\x18\x33\x8e\x78\x8a\x38\xf2\x8c\x0a\xe3\x81\xb6\xd2\x5b\x61\xa0\x95\x4c\x21\x64\xa0\x64\xda\x21\x07\x35\x3a\x7a\x35\xe8\x7a\x22\xe4\x85\x15\x4f\x3f\x42\x7c\xfd\x39\xfc\x73\x8b\xbf\x07\xff\xe6\x15\x06\x8a\x49\xc1\x15\x93\x18\x38\xa5\xb9\xc3\xda\x19\x65\xbc\xd4\x9c\xb9\xe3\xa2\x34\x01\x10\x63\xc2\x40\x0f\x9d\x06\x5e\x20\x4f\x19\x87\x00\x12\x4d\x34\xc3\xec\x48\xfc\xf5\x44\xc8\xe3\x7e\xf4\x6c\xb7\x2a\x1e\x82\x33\x3f\xfa\xf2\x76\xf5\x9b\x88\x6f\x3c\x85\xbf\x0f\x81\xc7\xf0\x6f\xf6\x6a\x18\x81\x54\x19\x68\x91\xe7\x92\x21\xa8\x11\xd3\x90\x32\xe1\x25\xc3\xde\x0a\x6b\x99\xa1\xc0\x20\xf0\x66\xe9\x9c\x20\x61\x9c\xd7\xde\x49\xeb\x9c\x60\x9c\x0a\xf1\x4e\xfc\x95\xc1\xf5\x0f\x55\xfc\x55\xc4\xcb\xf2\xd7\x12\xaf\x1d\xa0\x8e\xbc\x79\x2d\xcc\x52\xc1\x84\x74\x50\x33\xaf\x1c\x80\xd8\x51\xa5\x38\xd7\xd6\x51\xe6\x98\x32\x16\x59\x2d\xb5\x64\x82\x03\x47\x1d\x27\x4c\x11\xa5\xf9\x1b\xf1\xd7\x33\x50\xe7\x15\x37\x0b\xd2\xaa\x0f\xd9\xe1\xd0\x0b\x40\x31\xe9\xda\xfd\x62\x7e\x56\x2e\xc4\x58\xbb\x1a\xa2\x74\x12\xe4\x37\x21\x90\x69\x24\x4c\xb3\x68\x2f\x32\x50\x89\xc7\xa3\x93\xcd\xe7\xf0\x4f\x63\xcc\xfd\xf8\x37\x4f\xa0\x84\x46\x14\x08\x88\x30\xa3\x4c\x43\xeb\xa1\x73\x90\x1b\xc4\x19\x17\x48\x6a\x83\x90\xd2\x04\x4a\x2a\x95\x45\xd4\x31\x63\x0c\x45\xc0\x09\x22\x29\x64\x5e\x1f\x73\xae\xe8\x7a\x06\xea\x85\x15\x7f\x68\x70\x6d\x3d\x87\x7f\x1a\x63\xee\xc7\xbf\x39\xe7\x0a\x31\x61\xd6\x41\xa3\x30\x30\x0e\x60\x48\xa4\x95\x9e\x12\x49\xa8\xa4\x6f\x43\xa9\x97\x46\x3b\x8b\xad\x80\x5e\x40\xc3\x90\xd6\x54\x1a\x2f\x8c\xb5\x42\x0b\xf9\xe6\xd5\xa0\xeb\x89\x90\x73\x8b\x87\x91\x64\x86\x95\xa3\xbd\x66\x65\x3d\x33\xa3\x85\x8f\x77\x7b\xce\x64\x76\xc3\x65\x50\xac\xd1\x5d\x23\xd4\x19\x1f\x60\x7a\x69\xdb\x85\xf6\xc5\x04\xea\x09\x8b\x6f\x3f\x87\xff\xd5\x16\x4f\x01\xa3\xc6\x72\x23\xb9\xf1\x9a\x79\xed\xb9\x55\x82\x48\xac\xdd\x71\x27\x8f\x45\x86\x3b\xaf\xb1\x06\x40\x73\xae\x88\x91\x88\x49\xe4\x2c\x11\xdc\x6b\xc1\x14\x79\x23\xfe\x7a\x3c\xfe\x09\x8d\xf5\x89\x34\xb2\x67\xd5\xfa\x49\xc6\xf9\x73\xe2\x3b\xcf\xe1\xeb\xe5\x63\xf8\x37\xc7\xe3\x39\x73\x00\x31\xe0\x99\x16\xd8\x30\xa7\x24\xf6\x94\x50\x0b\xb9\x75\x1c\x41\x03\xb8\xb4\x06\x43\x27\xa0\x73\xd4\x59\x43\xa0\x87\x54\x3a\x43\xbd\x26\x82\x90\xa3\xc5\x5f\x8f\xc7\x9f\x57\x5c\x74\xd6\x70\x36\xd3\x7b\x53\x09\x95\xf5\x50\x84\xeb\x99\xf0\x80\xc2\x49\x1c\x95\xcc\x7e\x93\x2b\x8e\xd3\xd1\x34\x1b\xe6\xc7\x2c\x79\x3a\x95\xf9\xc3\xe2\x93\x8f\x5b\xbc\x7a\x0e\xff\x64\xf1\xf7\xe3\xdf\xec\xd5\x20\xc5\x28\x31\x9c\x49\xc6\x94\x61\x50\x19\x23\xad\x07\x9a\x53\x2f\xa1\xd1\xd6\x61\x4d\x29\x34\xd2\x49\xce\x18\xc7\x40\x20\xae\x3c\x41\x16\x3a\x44\xb8\x31\xdf\x88\xbf\xe2\xd5\xfc\xa1\x8a\xbf\x8a\xf8\x93\xc6\x7f\x1d\xf1\x1c\x00\xaf\x08\xd1\x9a\x00\xc7\x94\x45\x86\x0a\x21\x18\x65\xc6\x22\xc7\xb5\xb0\x82\x59\x87\x2d\xa6\x94\x6b\x46\xb1\x72\x4c\x70\x42\x18\x83\x9c\x5a\x46\x24\x3a\x7a\x35\xd7\xc3\xc2\x4f\x74\xf5\xc6\x2e\x41\x72\x67\x5d\xfd\xf2\x56\xdd\x9b\x88\xd7\xcf\xe1\x67\xd3\x8f\xe1\xdf\x3c\x73\x75\x98\x03\xee\xb5\xa5\xe6\x4d\x39\x24\x35\x54\x0a\x8f\xa1\x35\x1c\x4a\xc3\x20\x82\xc2\x12\xe3\x14\x71\xc2\x5a\xab\xac\x66\x1c\x63\x4f\x04\x63\x40\x48\x08\xc0\x1b\xf1\xd7\xa3\x93\x67\x15\x8f\x96\xeb\xcb\x76\xb6\xb8\x0d\x0f\x07\xa1\x4c\xab\x95\xa6\x6e\xba\x8c\xb6\x51\xae\x1a\xf2\xdd\x14\x38\x24\x52\x9d\xd6\xc4\x4d\xf2\x19\xb2\x3d\x4d\x37\xcf\xa4\xe6\xf1\x05\x4d\xe6\x39\xfc\xf7\x1e\xf7\x75\x6b\xb4\x09\x75\x9e\x7b\x41\xdf\xf4\x1d\x49\xee\xb0\x97\x92\x78\xe7\x3c\xe2\x52\x33\x2c\x20\x62\x48\x30\x01\xf5\x31\x07\x05\x3d\x83\x14\x0a\xaa\x08\x66\xc2\x11\x81\xde\x89\xbf\x22\x35\x7f\xa8\xe2\xaf\x22\xfe\x5d\x6a\xbe\x6e\x53\x82\x82\x84\x63\x6a\xb9\x06\x14\x02\x89\xa9\x24\x84\xa8\x37\x67\x91\x3b\xee\x9d\x44\x46\x0a\x27\x91\xa4\x16\x69\xc5\xa0\x06\xd4\x21\x6a\xa1\xe7\xce\x41\xe0\x2d\x7e\x27\xfe\x4a\x74\xf2\x0f\x55\xfc\x55\xc4\x9f\x62\x35\x5f\x47\x3c\xf1\x92\x4a\x81\x00\x53\x1a\x72\xe3\x11\xd7\xd4\x60\xa7\xa5\x93\x80\x59\x86\x9d\x76\x0e\x31\xc4\x18\x25\x10\x22\xa7\xb4\xb5\x48\x1b\xc7\xb5\x87\xc8\x18\xe3\xbf\x59\xfc\x95\xe5\xf1\x7f\xa8\xe2\xaf\x22\xfe\xb4\x3c\xfe\xeb\x88\x17\x14\x69\x85\xa8\x03\x8e\x28\x05\x89\x54\x08\x59\x03\x25\xd6\x46\x33\x42\x95\x93\x4e\x2b\x84\x1d\xf7\x9c\x60\xf4\x36\x1a\x60\x2d\x21\x20\x9c\x38\xa9\x99\xa2\xfa\x9d\x78\xfa\x72\xe2\x1f\xad\xf8\xab\x88\x4f\xe7\xbf\x98\x78\x63\x0c\x30\x48\x18\xec\xa4\x54\x0c\x1b\x43\xa5\x56\x96\x5b\x67\xb0\xa7\x86\x32\xa7\xb1\x24\xc8\x50\x22\x9c\xb1\x04\x68\x83\xa5\xa1\x40\x5a\xa6\x81\x80\x96\xbc\x13\xcf\x5e\x4e\xfc\xa3\x15\x7f\x15\xf1\xc9\xe9\x17\x13\x8f\x11\x80\x00\x12\x21\x00\xe0\xd8\x0b\xe7\x3d\xf3\x08\x2a\xc7\x99\xa3\xd0\x20\x43\xbc\xf2\x14\x63\x47\x29\x38\x6e\x11\x41\x08\x50\x4d\x84\x02\x12\x49\x76\x5c\x2d\xfc\x46\x3c\x7f\x39\xf1\x8f\x56\xfc\x55\xc4\x27\xe2\x5f\x4b\x3c\x93\x9e\x11\xe5\x80\x17\x40\x6a\x20\xa4\x64\x06\x7a\x2c\x89\x73\x5c\x52\x00\xa0\x22\xc0\x39\xc7\xe4\xd1\x91\xa4\x4a\x33\xac\xbd\x85\x12\x23\x8b\x95\xf2\xfa\xa8\xf1\xd7\x33\x50\x2f\xac\xf8\xd9\x96\xe3\x63\x65\xef\xf1\xcb\xbf\x97\x27\xd7\x69\xf5\x7a\xba\x15\x59\x76\xaa\x74\xa0\x11\x78\x2b\xeb\x3f\xb6\x40\xfb\x0c\xee\xec\x61\xbb\x5b\xdb\xcf\x07\xfb\x72\x2a\x3e\x68\x37\x0d\x2e\xcc\x7a\x21\x0c\x8a\x40\x9b\xdd\xb2\x57\x2e\x65\x7a\x9d\x58\xfc\x74\x81\xf8\xd5\x2d\xd0\xa9\xb3\xc6\x18\x26\x72\xc9\xe8\x60\x59\xa2\xe9\x70\xbf\x01\xf7\xc5\xd6\x3c\x17\x8c\x0b\x7d\xb0\x99\x90\x88\xcc\xef\x2a\xa8\x41\xf1\xec\x10\x75\xf9\xd3\x66\xef\x68\x23\xd8\x74\x4f\xfb\xed\x8e\x53\x98\xb3\x2d\xcb\xa7\xf2\xb5\x37\x5b\x1b\x95\xd1\xca\x72\xd1\x36\x31\xdb\x69\x16\x92\xcb\xb9\x2a\xf8\x5a\x45\xe8\xfd\x76\x39\x58\xeb\x72\x25\x15\x8c\xb4\x88\xf8\xd3\xea\xc7\xe3\x4b\x51\x83\xb4\x9b\x70\xab\x93\xf5\x75\x1b\xc9\xd5\x31\xa9\x73\xbe\x85\x3a\xb5\xb7\xc8\xac\x9a\xe9\xb8\x98\x4f\xc5\x30\xb2\x4b\xa3\xc4\xa4\x70\x50\x6d\xbf\x92\xb1\x55\x12\xe7\x75\xba\x1e\xdb\xb7\x9b\xe5\x5d\xb1\x7c\x69\x6c\x37\x26\xf5\xb1\xb1\x84\x2b\x0c\xa1\x03\x88\x12\xc2\x3c\x47\x86\x02\x82\x98\xa6\x9a\x12\x42\x1c\x45\x0c\x12\x0c\xbd\xa1\x9c\x01\x27\x1c\xe5\x84\x71\x23\xb8\xb1\x44\x7a\xf3\xf7\x7f\xfe\xc2\xd7\x33\x6d\xf6\x39\x03\x3b\x5d\x80\x75\x32\xb0\xee\xff\x1e\x03\xeb\x3e\x67\x60\xfe\xc3\xc0\xd2\xff\x6f\x18\x18\x92\xd2\x1a\xc6\x30\xb4\x54\x50\x45\xa9\x72\x98\x01\xe4\x19\x03\xce\x50\x4d\x15\x00\x42\x58\x44\x14\x24\x1a\x13\x62\x38\x72\xc6\x18\xa4\x18\xb2\x52\x09\x89\xc5\x9b\x81\x5d\xcf\x28\xba\xe7\x0c\x2c\x22\x3e\x0c\x6c\xf8\xbf\xc7\xc0\xb6\xcf\x19\x58\xf2\xc3\xc0\x32\xff\x6f\x18\x98\xd6\xd2\x9e\x26\x39\xd0\x2b\x47\x01\x7c\x9b\x80\x02\x60\x8d\x04\xd0\x70\x60\x09\x27\x42\x2a\xc6\x21\xc3\x16\x29\x4c\xbc\xd4\xd8\x61\xa4\x24\x82\x8a\x02\x7b\x54\xb0\xeb\x09\x3c\xff\x9c\x81\x9d\xd2\x11\x27\x03\x9b\xfe\x1b\x0d\xec\x87\x9b\x19\x83\x8b\xad\xc9\x0f\x18\x98\x5f\x7d\x18\x58\xf6\xdf\x68\x60\x91\xbb\x0d\x8c\x69\x2c\x8c\xb5\xd4\x40\x6e\x3d\x02\x10\x5b\xaa\xa8\x43\x10\x1a\xa2\x31\x42\x50\x68\xa6\x9d\x84\x00\x51\x0c\x99\x86\x4a\x69\x24\xb4\x81\x08\x43\xc6\x20\xb5\xf4\xdd\xc0\xae\x44\x96\xba\x4f\x19\x98\x3c\xa5\xe6\xff\xff\x66\x60\xf5\xff\xf7\x0c\xcc\x72\xe1\x21\xf7\xca\x2b\x84\x21\xb7\x4c\x79\xc6\xa5\x83\x8e\x12\x04\x3c\xa6\x10\x01\xee\x3d\x83\xc6\x7a\x8f\xa0\x61\x00\x38\x25\xbc\xa0\x90\x52\x8d\xdd\x31\x13\x8e\xaf\x67\xc2\x9f\x08\x96\xe7\x6d\x94\xb2\xb3\xb6\x22\xc1\xd5\xe7\xf3\xd9\x55\xef\x39\x7c\x32\x7c\x0c\xff\xe6\xbc\x20\xa0\x56\x29\x87\x30\xd0\xd0\x73\x82\x09\x27\x4e\x60\xa5\x99\x25\xe2\xad\xb3\x43\x8f\x3d\xe7\x8c\x41\xc5\xad\x80\xd4\x73\x86\x80\x71\x0e\x29\x29\x80\x24\x02\xbc\x11\x7f\x3d\x13\x7e\x56\xf1\x68\x75\x56\xc0\xf3\x68\x23\x82\x8b\xf8\xb0\xd1\xad\xa5\x9e\xf6\x7a\x07\x17\x33\x89\x41\x7c\x1f\x69\xb7\x43\xc3\x82\x8c\x87\x52\xb9\xc6\x68\x75\x91\x09\x4f\x3f\x9e\x9e\xea\x3f\x87\x7f\x8a\x59\xdf\x8f\x7f\x73\xe8\xd2\x21\x42\x90\x77\xc8\x09\x6b\x85\x61\x82\x51\x0e\x8c\xf4\x14\x73\x65\x34\xf2\x6f\xa3\x38\x56\x52\x08\xec\x85\x85\x52\x6a\xe6\x09\x07\xd0\x69\x24\xa4\xc6\xea\x48\xfc\xf5\x4c\xf8\x0b\x2b\xfe\xd0\xda\x8f\xc1\x73\xf8\xa7\x98\xf5\xfd\xf8\x37\x13\x4f\x8c\x66\x46\x38\x4b\xac\x76\x86\x12\xa4\x3c\x54\xca\x22\x0c\x00\xb4\x56\x5a\xc6\x85\xf1\x50\x1a\x61\x08\x83\x8a\x51\xec\x38\x43\x8a\x23\x8a\x9d\x75\xf6\x98\x25\xc1\xd7\x33\xe1\x2f\xac\xf8\x43\x0b\x2b\x87\xcf\xe1\x9f\x62\xd6\xf7\xe3\xdf\xbc\xf6\x03\x61\x21\x94\x14\x8e\x38\xcc\x0c\xe4\x9c\x48\x25\x09\xe6\x04\x71\x60\x0c\xa3\xda\x6a\x8e\x38\x34\x9a\x42\xc4\xb9\x07\x92\x2a\xa5\x84\x87\x46\x49\x8b\x11\x3a\x7a\xa9\xd7\x13\xb2\x2f\xac\xf8\xcf\xce\xa2\xfb\x94\xf8\xd1\x73\xf8\xa7\x98\xf5\xfd\xf8\x37\x6b\xbc\x77\x5a\x00\x01\x28\x42\x4e\x2b\x0e\x8d\xc2\x48\x20\x44\xb5\x36\x9e\x63\x08\x90\x74\xd6\x78\x85\x28\x31\x02\x71\x43\xa8\x76\x54\x00\xad\x9c\xa4\x86\x23\x7c\x1c\x5c\xaf\x27\x64\x9f\x18\xdc\x62\xe9\x30\x9f\x9e\x35\x9c\xb8\xce\xef\xe7\xc4\x8f\x9f\xc3\x9f\x04\x8f\xe1\xdf\x9c\x17\xc4\x94\x58\x2f\x00\x82\x10\x12\x28\x94\x94\xd2\x61\x68\x90\x64\xc4\x42\xa6\x85\xf3\x4e\x38\x22\x91\xe6\x96\x13\xeb\x28\x91\x58\x0a\x44\xa1\x04\x0e\x02\x02\xfd\x3b\xf1\x57\xdc\xe6\x3f\x54\xf1\x57\x11\x3f\x32\x5f\x4b\x3c\x84\x1c\x50\x2f\x89\xb1\x8c\x5a\xae\x19\xc7\x0a\x22\xa2\x04\x35\x0e\x33\x61\x04\xf0\x82\x50\xe1\xbd\x72\xd6\x7b\x0e\x25\x06\xc4\x0a\x2f\x88\xa4\x98\x6a\xe2\x8e\x83\xeb\xf5\x98\xf1\x59\xc5\x63\x34\x33\xe8\xeb\x79\xa8\xe2\xe9\x60\x59\xdf\x97\x51\x78\x95\x3c\x44\xb7\xfd\x4a\x17\x74\x2c\x9d\x0c\xe4\x20\xa8\x04\xb0\x3a\x1c\x1c\x42\xc7\xef\x7f\x78\x35\x99\xee\xcf\xc8\xfd\x25\xf1\x93\xe7\xf0\x4f\x83\xfb\xfd\xf8\x37\x07\xeb\xbd\x22\x8c\x6b\x8a\x3d\xc5\x82\x59\xce\x95\xc5\xd4\x20\x0a\x90\xb3\xc8\x58\x6c\xad\xd0\x84\x01\x45\xb5\x43\x48\x33\x8d\x04\x14\x5a\x40\x6d\x2c\x76\x16\x59\xf5\xf7\x7f\xfe\x22\xd7\x63\xa9\x67\xc4\x47\x17\x7a\xbe\xab\xec\x1b\x61\x3c\xd4\xae\x1d\x2b\xce\xfb\x89\x6d\x2c\x89\x46\x9c\x56\x33\xf3\xbe\x58\x04\xc5\x61\x36\xac\xab\x78\xb6\x6c\x6d\x5f\x46\xfc\xf4\x39\xfc\xaf\x26\x1e\x68\xa3\x19\x57\x0a\xb3\x63\x04\xdb\x22\x42\xbc\x33\xde\x63\x05\x0c\x96\xdc\x0b\xe8\xb1\x92\x1a\x62\x82\x04\x13\x42\x32\xea\x38\x67\x96\x10\x6e\x80\x86\xee\x1b\xf1\x57\xa4\xe6\x0f\x55\xfc\x55\xc4\x9f\xbc\xaa\x2f\xb4\x78\x8b\x1c\x06\xde\x52\xfe\x26\xe4\x1e\x39\xa4\x21\x46\x0e\x6a\x23\xa1\xa2\x44\x28\xcc\x24\x14\x52\x3b\xed\x39\x77\x1a\x69\x6c\x05\x92\x10\x41\x25\x2d\x32\xf4\x4d\x6a\xc8\xf5\xe0\xee\x0b\xbb\xfa\xe5\xfe\xb0\xfc\x2d\xc4\xcf\x9e\xc3\x3f\x27\xfe\x1e\xfc\x9b\xbd\x1a\x81\x91\xf7\x8c\x03\x41\x8d\x96\x4e\x33\x0f\x0d\xd1\x9c\x00\xcf\x1c\xc0\x58\x2b\xa9\x29\x10\xca\x73\x44\xbd\xf6\x08\x60\xc2\x24\x47\x52\x31\xa2\xa0\x94\xde\xbf\x13\x7f\xc5\xe2\xff\x50\xc5\x5f\x45\xfc\xc9\x9d\xfd\x3a\xe2\x2d\xe3\xc6\x6b\xc4\xa1\x12\x42\x3a\x2e\x28\xf1\xde\x32\xa8\x81\x70\xca\x58\xc7\x99\xa5\xcc\x71\x2a\x08\x63\xde\x60\xae\x84\x73\x06\x59\xac\x08\x70\x5e\x68\x48\xfe\x3e\x5d\x3a\x71\xc5\xe2\x5f\x58\xf1\xcb\xb3\x09\x6f\x22\x7e\xfe\x1c\xfe\xc9\x8f\xbf\x1f\xff\xe6\xa5\xc4\x84\x6a\x06\x39\xb4\x8e\x13\xae\x34\x46\x58\x63\x63\x15\x45\x48\x02\x89\xb1\xf5\x58\x3a\x6b\x39\x60\xd0\x33\x87\x18\xe0\x0c\x31\x86\x9d\x37\x14\x6a\x63\xf4\x51\x6a\xae\x07\xc9\x1e\x77\xe7\xa6\xdb\x2e\x0d\x27\x3e\xdc\xb9\xc8\x43\x87\xd9\x2c\x9e\xc3\x8f\x91\xc7\xf0\x6f\x3e\x84\x15\x70\x8e\x2d\xe0\x4a\x2b\x84\x2d\x84\x8e\x29\xc0\x34\x40\x90\x09\x84\x24\xd1\x0e\x7a\xe9\x11\x40\x0a\x49\xc4\x04\xe2\x00\x21\x8a\x25\x63\xc8\x38\x64\xf8\x31\x43\x4c\xae\x07\xc9\xce\x2a\x1e\x2d\x86\xe8\x7c\x50\x17\x9b\x46\x8d\x64\xa6\x99\xf0\x26\xcc\x17\xe4\x10\x4e\x54\x4d\xd5\x56\x2a\xa0\x98\xde\xce\x51\x7c\xd3\x5f\x97\x7a\xfc\xc2\xab\xc9\x3e\xbe\xf6\x63\xf9\x1c\xfe\x69\x8c\xb9\x1f\xff\xe6\x90\x81\x26\xc8\x70\x0e\x08\xf2\x80\x68\xcf\x21\x63\x54\x2a\x40\x1d\x37\x0a\x7b\xc2\x01\x56\x1e\x60\xa1\xb8\x55\x9c\x6a\x08\xb4\xa0\x58\x70\xc0\x8d\xf4\x1e\x72\x74\xd4\xf8\xeb\x41\xb2\xb3\x8a\x3f\x62\x71\xd1\x8f\x4d\xac\x91\x87\x8e\x37\x5f\x3d\x87\x7f\x7e\x1f\xdb\x57\x1c\x6f\xce\x11\xe0\xd6\x51\xcd\x29\x22\xd8\x01\x0d\x18\x32\x58\x60\xec\xa9\xe0\x00\x33\xcd\x9d\xf4\x8c\x10\x8a\xb5\x23\xd2\x7b\x0a\x3c\xf6\xd8\x5b\xc3\x29\x64\xce\x30\xfd\x46\xfc\xf5\x20\xd9\x59\xc5\x63\x03\x6c\xbb\xf1\xf2\x0c\x16\x68\x1d\xfb\x48\xb5\x18\xdd\x85\x27\x0b\xda\x5a\xf5\xa3\xe3\x42\x06\x4d\xf3\x99\x4e\x7e\x17\xca\xb7\xb3\x38\xb3\xcd\x25\xfb\x9b\x28\xff\x96\x4d\xc9\x3d\x1e\x16\x5e\x3f\x83\x1f\x7f\xbf\x8f\xe7\x7e\xfc\x9b\x07\x57\x44\x3c\x11\x4c\x5b\x8e\x90\xb7\x50\x70\xaa\x05\xc1\x02\x4b\x27\x04\xd0\xd6\x39\x85\x11\x71\x88\x5a\xcd\x01\x53\x88\x29\x4f\x1d\xb1\x4a\x09\x43\x01\x87\xee\x1b\xf1\x57\xbc\x9a\x07\x2d\x6e\xa9\xb2\x74\x35\x02\xff\xb0\xb8\x87\x0e\x1e\x5e\x3f\x85\x1f\x0a\xe7\x1f\xc3\xbf\x59\xe3\x95\xc4\xce\x5b\x61\x90\x27\x8e\x50\x03\x30\x14\x86\x49\x67\x24\x15\x0c\x61\x05\x1c\xc1\x9c\x78\x02\x2c\x65\xd2\x39\x03\x11\xb1\x04\x1b\xa1\x28\xb1\x0c\x51\xf6\x46\xfc\xf5\xe8\xe4\x0b\x2b\x5e\xbe\xa8\xec\xbd\x29\xce\x9b\x1a\x6a\xf3\x5c\x79\xe5\xf4\xf7\x94\xf7\xe6\x86\xd5\x82\x0a\x66\x8e\x67\x49\x13\xe3\x91\x13\x8c\x68\x44\x11\x94\x44\x28\xc9\x91\xd5\x14\x71\x62\x1d\x22\x8c\x63\xc6\x24\x61\xd8\x01\xc3\x05\x22\xc6\x39\x70\x6a\xd8\xeb\xd1\xcf\x87\x89\x5a\x8d\xc2\xbb\x50\x2f\xfc\xe1\xaa\x44\xea\xbf\xa3\x61\xb7\xcf\x95\x57\x76\x7f\x4f\x79\x6f\x76\x87\xbd\xf7\x0c\x4a\xc8\x38\x64\x5e\x23\x4a\x01\x20\xd8\x2b\x23\x99\x50\x86\x19\x88\x05\x40\xca\x53\xc8\x99\x82\xc0\x1b\x24\x21\x10\xde\x78\x43\x08\x75\x9e\x42\xfc\xd6\xb0\xd7\x83\x7c\xe7\x45\xdc\xa5\x6b\x89\xf2\x8e\x8c\x13\xd8\x84\x4c\x38\xe3\x24\xdc\x2d\xe7\xa0\x30\xac\xf9\x62\xb3\xde\x58\xe9\x3e\x0f\x06\xf5\xdc\x6c\xc2\x93\x17\xa9\xcb\x9f\x88\xd4\x2d\x0d\xb5\x7b\x0e\xff\xe4\x95\xdd\x8f\x7f\x73\x8f\x22\x0c\x48\x28\x00\xa7\xd0\x71\x0e\x10\x61\x4c\x09\x41\xa8\xa0\xdc\x7a\x82\x1d\xb1\x90\x12\x67\x04\xd1\x4c\x2b\xfb\xe6\x05\x53\xc0\xbd\x83\xc8\x43\xab\xbc\x94\xef\xc4\x5f\x19\xa3\xfe\x50\xc5\x5f\x45\xfc\x69\xe6\xff\x75\xc4\x13\x41\x20\x35\x16\x78\xab\xa4\x36\x52\x39\x0d\x3d\xd0\x04\x4a\xed\x35\x92\x92\x7b\xae\x90\x52\x50\x43\xe7\x91\xa3\x50\x01\x06\x90\x61\x10\x02\xe2\x0d\xc2\xc6\xbc\x13\x7f\x65\x9f\xd1\x1f\xaa\xf8\xab\x88\x3f\xcd\xfc\xbf\x8e\x78\x06\x99\x61\x40\x02\xc2\x9d\x17\x5e\x1a\x25\x81\x50\x84\xbf\xfd\x4f\x11\xad\x01\x14\x84\x48\x43\x99\xa4\x44\x2a\x8b\x8c\x62\x80\x4a\xa0\x94\xe6\x04\x31\xee\x8e\xb7\xf5\x5d\x0f\x6b\xbf\xb0\xe2\x97\xd7\x2e\xde\x44\xfc\xfe\x39\xfc\xcc\xf6\x31\xfc\x9b\x13\x39\x96\x0b\x8c\x0d\xf1\x4a\x69\x20\x1c\x43\x0c\x48\x65\x3c\xe6\x90\x58\xa8\x95\x76\x88\x33\x04\x15\xd2\x18\x09\x67\x3d\xe6\x84\x51\x27\xa5\x52\x1e\x0b\x69\xd4\x9b\x3b\x4c\xaf\x47\x57\x5f\x58\xf1\xe3\x3f\x46\xef\x24\xfe\xf0\x1c\xfe\x69\xbb\xcd\xfd\xf8\x37\x9f\x74\x27\x14\x12\x6f\xa3\xaa\x25\x5c\x10\x8f\xb9\xe2\x0a\x70\xe1\xa0\x77\xce\x03\xa7\x31\x30\x8a\x11\x27\x25\xe1\x98\x30\x44\x95\x21\x4e\x01\xc6\x8d\x61\x08\x92\xe3\x19\xed\xf4\x7a\x90\xef\x85\x15\xaf\x3f\x42\x3c\x78\x0e\xff\xb4\xdd\xe6\x7e\xfc\x9b\x27\x80\x9e\x7a\xe0\x84\xc6\x9a\x62\x6c\xad\x34\x56\x01\xef\x39\x25\x40\x3b\x8f\x9d\x74\x50\x62\x26\x01\x03\x5e\x28\xa2\x01\x81\x44\x03\x87\x0d\xd4\x04\x43\xaa\xe1\xf1\x82\xca\xeb\x41\xbe\x27\xdc\x3f\x64\xf9\x60\x75\xe6\xfe\xb5\x2f\x2a\xfb\x25\xee\x2a\x7c\xae\xbc\x0b\xf1\x7b\xca\x7b\xfb\x96\x49\xad\x2d\xd2\x94\x0a\xca\x30\x85\x42\x61\xe1\xa4\xe6\x44\x6a\x4b\xb4\x30\xce\x32\xa0\xa5\xd7\x5a\x73\xc5\x34\x12\xd8\x41\xaa\x9c\x14\x94\x08\x0c\x90\x32\xc7\x1e\x75\x3d\x88\xf8\x04\x51\xab\x0c\x19\xe9\x33\xa2\xcc\xef\x68\x58\xf4\x5c\x79\xdb\xe0\xf7\x94\xf7\xf6\x20\xa5\xd6\xce\x7a\x4d\x91\xf3\x16\x5a\x6a\x25\xc7\x0e\x21\x4a\x00\x20\x44\x21\xae\x05\x84\x94\x73\x84\x21\x41\x12\x3b\xac\x95\xd0\xd0\x6b\x88\x1c\xc3\x46\x21\xf7\xd6\xb0\xd7\x83\x94\x2f\x24\xaa\xfb\x3b\x1a\x16\x3f\x57\xde\x56\xf9\xf7\x94\xf7\xf6\x6d\xfd\x02\x50\x0e\x91\x61\x16\x02\x2f\x1d\x7f\x73\xf5\x8c\xf1\x94\x28\x27\x91\x42\x9a\x68\xe8\xa8\x60\x84\x42\xef\x04\xc4\x58\x21\x82\xb1\x80\xcc\x33\x4d\x19\x3f\x4a\xf1\xf5\x20\xe8\x19\x51\x81\x6a\xa5\x49\x2f\x8e\x6c\x6e\x6a\xf2\xd5\x6a\xbb\x1d\xd4\x50\x74\x34\x1c\x55\xcd\x6a\x88\x6c\x7f\x5f\x2a\x62\x36\xb2\xad\x44\x2a\x7c\xea\xe6\x1f\x13\xcc\xc2\xe3\x61\x7f\xf2\x1c\xfe\x69\x9e\x75\x3f\xfe\xed\x57\x22\x51\x6d\x31\x46\x48\x42\x87\x81\x14\x8e\x48\x21\x98\xf5\xc8\x58\x2d\x35\xc5\x4c\x2a\xea\xa5\x03\x4a\x4a\x8e\x3d\xb0\xd2\x41\x25\x89\x90\x86\x51\x01\xb5\x42\x6f\xc4\x5f\x8f\xc5\x3d\x61\xa1\x03\x08\x27\x67\x9b\x27\x23\xc3\x8b\xca\x7e\x49\x8f\xa2\xcf\x95\x37\xb1\xfc\x3d\xe5\xbd\x79\x0c\x44\xd2\x52\xec\x91\x35\xce\x69\x07\x81\xf2\x02\x00\x4e\x19\xd1\x40\x39\x6f\x9d\x80\x52\x4b\xca\x31\x51\x96\x60\x68\xbd\x20\x88\x03\x46\xa1\xe3\xce\x0a\x25\xc1\x5b\xc3\x5e\x8f\xc5\x9d\x11\x15\xed\x8d\x42\xa0\x1e\x76\x93\x7e\x46\xae\x8a\x7a\x9e\x88\xa0\x0c\xeb\x23\x6e\x27\x31\x50\xab\x0d\x15\xea\x65\x53\xc3\x6d\x2a\x0b\xaa\xa7\x75\x9b\x1f\x3d\xaa\xf8\x78\x5a\x81\x3d\x87\x7f\xea\x51\xf7\xe3\xdf\x9c\xcf\x21\x9a\x21\x4b\x11\x86\xd0\x1d\x33\x67\x98\x4a\xcf\x09\x97\x80\x4a\x8a\x09\xf6\x92\x33\xe5\x15\xf1\x16\x03\x66\x94\x72\xde\x39\xea\xbc\xd5\x5a\x43\x40\x8e\x8b\x25\xe8\xf5\x58\xd9\x4b\x2a\x5e\xb1\xa3\xe1\xfa\x31\xe2\xf9\x73\xf8\xa7\xc8\xc5\xfd\xf8\xb7\xcf\xa3\x20\x46\x5a\x52\x03\xc0\xdb\x68\xef\x2d\x02\x1a\x31\x8b\xb8\x07\xcc\xbc\xb9\x00\x42\x30\x20\x2d\xb2\xd2\x62\x8e\x88\x16\x04\x08\x04\x9d\x60\x50\x70\xa0\xc5\x9b\x73\xc0\xae\x47\x0e\x5e\x58\xf1\x87\x72\xf6\xe2\x39\xfc\x53\xe4\xe2\x7e\xfc\x9b\x0f\x35\x35\x10\x6a\x6a\x08\xb4\x8e\x4b\x88\x99\xf4\x56\x50\xaf\xa0\x31\x1e\x40\x83\xa9\x41\xe0\x6d\xaa\x25\x85\xf7\xc8\x73\x6a\x01\x72\xca\x32\xa8\xbd\xa5\x08\x38\x04\xdf\x88\xbf\x1e\x39\x78\x61\xc5\x1f\x4a\x1d\xcb\xe7\xf0\x4f\x91\x8b\xfb\xf1\x4f\xc4\xff\xd7\x7f\xfd\xf7\x7f\xff\xd7\x7f\xff\xf7\x5f\x31\xb5\x52\x7f\xf9\xe9\xe2\xaf\x82\x1a\xbb\xff\xf9\x6b\xb5\x7b\x6f\x80\xff\xf3\x57\x6d\x3f\x73\xff\xf3\x57\x2d\x88\xe4\xe2\x7f\xc5\x82\x5a\xf0\x7f\xfe\xaa\x9a\x9e\x1b\xab\xff\xf9\x6b\xb6\xd6\xa3\xbe\xf9\x3f\x7f\x15\xb7\x13\xb7\xf8\x9f\xbf\xde\xde\xf3\x5f\x17\x2d\xf9\x63\xc8\x93\x51\xee\xb9\xd2\xc8\x5b\x81\x94\x43\x00\x7b\x40\x2c\x02\xd2\x31\x65\x9d\x01\x8e\x69\x43\x3c\x90\x02\x6b\xe2\x0d\xf6\x8a\x71\x29\x10\x38\x9e\x26\x89\xa4\xe5\x7f\x1f\xc3\x9d\xdf\x5b\x31\x99\x13\xa9\xf2\xa6\x3c\xd4\x59\x94\x0a\x70\xb3\x31\xa8\x2c\xb2\xe3\x41\x0b\x00\x9f\x14\xcb\x5c\x9a\x8f\x41\xbc\xb2\xcd\x34\xc3\x41\xeb\xe8\x29\x9c\x9f\xf4\xf6\x4f\x66\xde\x87\xd3\x04\xc9\x81\xcd\x27\xcc\x45\x59\x81\x4f\x33\x26\xd3\xaf\x1f\x6a\xcd\x4a\x2c\x27\x45\x64\x80\x7d\x63\x91\xa9\x47\xa0\xdc\x26\x36\xd1\x43\x3c\xd5\x13\x93\x49\x38\x7d\xba\x4f\x6a\x54\xce\xd1\xf3\xdf\x37\xbe\x6d\xd4\x8b\x04\x22\xba\x1f\x1c\xe2\xad\x56\xa3\x96\xdf\x4e\x3a\xb5\x40\xa7\x7c\xc6\xcd\x51\x2a\x9a\x38\x04\xcd\x61\x5d\x0a\xbb\x44\x9d\x56\x62\x3e\x68\x55\x48\xbc\x5f\x00\xed\x58\x35\xd0\xe1\x59\x66\xdb\x32\xa0\x36\xc8\xab\x70\xb8\xda\x6f\x09\x5c\xdf\xd4\x13\xd5\x79\x4e\x89\x5e\xb1\x95\x8e\x96\xff\xef\xff\xfd\xfb\x3f\x7f\xfd\x5d\x19\x79\x3c\xdf\x87\xdb\xd9\x75\x3a\x26\xe2\xa0\x3b\x19\x2f\xdc\xb6\xa8\x44\x6d\x3b\x8e\xad\x4a\xdb\xf0\x6c\x32\x8e\xc6\x56\x95\x50\xd6\x7e\x4c\xb8\x7e\xe0\xe6\x9a\xc5\x7c\xe3\xfd\x63\x53\xc9\x29\x40\x78\xe6\x51\xdc\xb1\x61\xb1\x40\x72\xea\x30\x43\xa5\xf0\xe4\x3a\xe0\xd9\xf3\xd3\x13\xf1\x23\x2f\xc6\xff\x49\xdd\x6f\xc1\xbf\x0c\x98\xde\xb9\x10\x64\x51\x9c\xb4\xdf\xd7\x23\x7c\x8a\xf9\xb9\x9a\x3c\x81\x9f\xa8\x35\x23\xeb\x27\xf0\x83\x0b\xfc\x60\x3d\x70\x2c\xda\x8a\xb6\x47\x79\x00\x87\xe5\x7d\xd8\x07\x6d\x89\xa1\xda\x37\x02\xeb\x4d\x44\x6f\x97\xbb\x08\xf1\x19\x1a\xde\xc7\x2e\xae\x7a\xf9\xd9\x91\xa9\x3f\xc7\xbf\x36\x8c\x5c\xb9\x04\xda\x21\xee\x35\x12\x82\x03\xa3\x29\xa2\xcc\x29\x89\x38\x23\xda\x68\x84\x90\xb2\x5a\x2b\x45\xbd\x33\xc8\x0a\xa9\x18\x52\x52\x29\x61\xa4\x11\x0e\x39\x27\xdd\x31\xd1\xf5\x12\xf1\xb9\x8c\x31\xfe\x60\x74\x7c\x14\x74\xd2\xb3\x68\x65\x5b\x5d\xb5\x8b\x0b\x94\x5e\x44\x98\xea\xb7\x72\xc9\xf0\x76\x59\x68\x0c\x53\x91\xf6\x2a\x92\xcd\xf4\x27\xeb\x74\xe1\x14\xb3\x6c\x04\x9b\xe1\xf9\xfb\xbe\xbb\xf9\xf1\x68\x50\x9f\x15\x43\xbd\x5d\xbf\xab\xc2\xed\x6e\x65\xd9\xc0\x8b\xc4\x2c\x92\xd9\xdb\x6e\x71\x3f\x01\xb5\x52\x7a\x9c\x89\xaa\x72\x34\xdf\x1f\x0e\xfb\xc9\xd0\xb2\xd0\x5a\x94\x23\xcb\x76\x6b\x90\x59\x8c\x8a\xfd\xe4\x20\xb9\xac\xe3\xc5\x90\xac\x37\xc9\x91\xa0\xc1\xa2\xdf\x8e\xc9\x52\xa1\x9d\x7a\x13\x82\xce\x3a\x13\x5e\x4e\x7b\xdb\x3d\xcc\x34\xd9\x2c\x83\xaa\x62\xd9\xcf\xae\xd6\x6c\xe6\x79\x14\x65\x66\xfd\xb9\x9a\xe7\x27\x69\xb2\x66\xee\xa3\x68\xcf\x88\xcc\xf9\x0f\xee\xec\xe4\xbb\x3a\xec\xea\xe2\x2f\xb1\x3f\xef\xe4\x2f\xc4\xff\x31\xc6\x7c\x03\xfe\x99\xc8\x9c\x7d\xf1\x91\x4e\xbe\x3c\x8b\x17\x3c\xe4\xb2\x3c\x8e\x3f\xe7\xe1\x5a\xcc\x3c\x81\x1f\x5c\xe0\xc7\xd6\xf5\xc8\xb0\x3f\xcc\xa4\x22\x59\xd8\x65\x9b\x76\x7f\x99\x9a\xcd\x33\x66\xa9\xc3\xd1\x2d\x6c\x96\x4d\xa2\x0f\xe2\xd3\x71\xd6\x91\x2e\xbc\x48\xe8\xff\xc4\x08\x3e\xc1\xbf\x49\x64\x9c\xb4\x50\x61\x00\x80\x72\x0a\x33\x4e\x30\x24\xc8\x23\x67\x1d\x04\xcc\x62\xa3\x31\x37\xd8\x72\x27\x04\x05\xc2\x61\x8f\xa0\x26\x92\x61\x0e\x34\x15\x0c\x0b\x7a\xcc\x2d\x9e\x89\x4c\x91\x96\x93\xd5\x6c\x79\x68\xe2\x8b\x66\x6c\x2e\xfb\xd9\xad\x1b\x4f\x47\xfb\xdd\x46\xe4\x62\x8d\x4e\x64\xdb\xcc\x95\xab\xfd\xf6\x8c\xf7\x63\xc7\x13\xb3\x3b\x1f\xa5\xbf\x7a\xd8\xca\xe9\xb9\x5c\xee\x71\xf9\xf9\x4f\xf6\xe9\x5e\x7b\xaa\xdf\x44\x28\x12\x54\x0f\x20\x58\x75\x5c\xe1\xd0\xaa\xb6\x86\x83\xd2\x30\x9b\xf1\xc5\xb8\x50\xf5\x5a\xb0\x2e\x55\xd8\x6c\x97\xcf\xeb\x04\xc6\x7c\xbb\x8b\xe3\xed\xa1\x58\x8b\xa9\x86\x89\x95\x51\x81\x96\x70\x5c\xe6\xb3\x26\xe4\xb5\x4e\xf4\x3a\x32\x1b\x02\x3c\x94\x01\xdb\x7c\x72\xb4\xaa\x44\xde\x05\x81\xd9\xe4\x20\x08\x16\xf3\xc2\xa4\xbc\x2b\x6f\x72\x2b\x52\xd6\xb5\xfd\xc1\xc7\x60\xa8\x9f\x88\x16\x4b\xfb\x24\x18\x0d\x4c\xae\x99\x2c\x26\x3e\x22\x8d\xd7\xc5\xa6\xfe\xcf\x06\xfd\x99\xd8\x9c\x19\xc0\x03\x62\xdc\x87\x17\xe4\xfe\xf8\xfc\x42\x6c\x5e\x87\xff\x98\x47\xf3\x31\x3f\x3a\x3b\xf2\xe2\x91\xce\x76\x9a\x1f\x45\x3e\xc3\xfc\x5c\x6c\xfe\x10\xfe\x4d\x9d\x5d\x4a\xaa\x25\x62\xd2\x4b\x2f\x0d\x31\xd0\x29\xe3\x38\xa5\x00\x42\x21\x38\x63\x96\x39\xa1\x1c\x01\x86\x12\x88\x21\x02\x02\x42\x45\x01\xc1\x9c\x09\x6c\x34\x3f\xde\x47\x4b\xcf\x56\xce\x3c\xde\xd9\x3f\xf1\x28\x7e\xd5\xd9\x3f\x79\x8e\xdf\x8f\xa1\x1a\x85\xf9\x3e\xf2\x83\x71\xb4\x4d\x3a\xa1\x0a\xcd\x14\x0b\x30\x37\x5a\xcf\x0f\x8b\xc9\x6c\x5c\xdb\xd5\x33\xad\xda\x2c\x1c\xe0\x44\x65\x7b\x41\xf2\x99\x38\xc4\x66\xc3\x7c\xde\xf0\xe1\x70\xd8\x1e\xc4\xa6\xa5\xad\xaf\x8f\x8a\x71\xb9\xca\xb5\x37\xcd\x54\x3a\x3c\x0e\x45\x22\x2e\xdc\x8d\x66\x0b\xd0\x34\x36\xf1\x04\xac\x47\x1a\x2e\x1a\x5d\x27\x23\xb5\xc1\x7a\xb1\x27\xb5\xf9\x74\x37\x1b\x65\x47\xa4\xd0\x49\x65\xaa\x35\x1a\x2e\x2e\x16\xa4\x15\x7d\xef\xc0\xe3\xc6\xba\xaa\xfc\x84\xee\xf2\xa9\xf9\x61\xd2\x28\xc7\xdb\x3d\xa4\x9b\x2c\x4b\x83\x5d\x3d\xbe\x4b\xa7\xe3\xaa\x1e\x2f\xa0\xae\x58\x81\x0f\xbf\xf2\x5f\x22\x0e\x8f\x79\x22\xaf\xc3\x7f\xcc\x13\xf9\x12\x71\x78\xc8\x13\x79\x35\xfe\x27\x5b\xf5\x4f\x9c\xc8\x61\xc8\xd6\xd2\xab\x09\xa1\xdd\x71\x31\xe9\x87\xae\x5a\x1b\xd8\x2a\xd5\xf3\x25\xa3\xac\x33\x14\x8d\x68\x03\x30\xbf\x2d\xec\xea\xe6\xf2\x7d\xb7\x08\x8a\xe7\xc2\x1a\x25\x11\x23\x94\x52\x2e\x35\x51\x04\x03\x20\xbd\x77\x98\x2a\x80\x90\x54\xcc\x2a\xa2\x9d\xc5\xc8\x62\x23\xa5\x3c\x5e\x61\xa5\x2c\x62\x0c\x50\x85\xe8\x49\x50\xf0\xf3\x82\x72\xb9\xf0\xe7\x5e\x41\x39\x33\x94\x23\x71\xd1\x8f\xef\x5f\x6d\xe8\xc8\xf7\x86\x89\xc7\xba\xa3\x4a\x65\x3f\x5e\xe5\x06\xdb\x50\x7a\x3c\x4e\x6e\x6b\xfd\x48\x2d\x1b\xe7\x2d\x1e\x42\xd1\xfd\x76\x94\xda\x8a\xb9\xb3\xa1\x15\x45\xa0\xaa\x83\x6e\xd7\xcc\xf5\x74\x4f\xb3\xad\x8c\x8a\x97\x28\x0c\x6f\x2b\xd5\xfc\x6e\xd5\xa9\x77\x63\xc5\x84\x57\xb1\x89\x8c\x79\x40\xe2\xc1\x5b\x67\x96\x04\x67\x87\xe3\x4a\xa3\x41\x57\xd9\xf2\x36\xea\xc3\x83\x66\x37\x3d\x9b\xad\x66\x39\x5e\xcb\xd5\xf6\x9d\x49\x6b\x9a\x5c\x54\x3b\xdd\x66\xff\xa3\x8e\xff\x12\xa1\x88\x5e\xf0\xfd\xfe\xc4\xff\x59\xb6\x52\xa7\x34\xa9\xef\x73\x9d\x50\xb1\x9d\x19\xf4\x26\x34\x35\x1c\x12\x5c\x5f\x37\xd9\xa2\xb8\x76\xe3\x4e\x29\x51\x1e\xd9\xe2\x44\xc4\x4c\x23\x75\xf5\x7d\xe5\x8b\xba\xdd\x69\x3f\xc1\xa8\x9c\x23\xa7\x83\x30\x4e\x05\xbb\x1c\x10\x2e\x79\x7c\x76\x80\xf9\xe0\x20\xfa\x81\xf7\xbf\xb2\xfc\x2f\x14\xb2\x9f\x9d\x39\x92\xfe\xc7\xc1\x67\x5f\x2c\x64\x4a\x02\x41\x1d\xc4\x5e\x3b\xe9\x05\x71\x80\x3b\x8c\x80\x02\x96\x10\x4a\x31\xb3\x0e\x2a\x6f\x90\xf1\x56\x18\xee\x08\x14\x8c\x09\x2e\x00\x77\xd6\x22\x4d\x84\xe4\x7f\x1f\x0f\xb5\x7e\xc1\x34\xe8\x92\xed\x7b\xa7\x41\x3f\x19\x21\x2f\x9e\xfe\x87\xa7\x43\x4a\x95\xc0\x01\x37\x09\x03\xda\x49\xa1\x91\x8b\xba\x5a\x71\xb9\xd3\x35\x60\xea\x6b\xbc\xed\xd6\xfb\xcd\xba\x59\xc7\x86\x64\xaf\x44\x32\xdc\x4d\x4b\xd0\x1b\x35\xd8\xec\x50\x6c\xef\x83\x08\xeb\xac\xa8\x2f\xf0\x59\x65\x51\xaa\xe4\xc2\xab\xda\x68\x32\xe8\xd6\x83\x81\x8a\x6c\xbe\x35\x44\x24\x48\x8c\x6d\xb2\xc2\x32\x9d\xf5\x30\x9b\xa9\x47\x71\x63\x81\xda\xf1\x4c\x18\x91\x24\x5e\xb9\xe5\xbc\xb6\x21\xa6\x41\xeb\x26\x98\x1e\x2a\xbd\x2a\x0a\x6d\x41\xa9\xdd\x68\xcc\x1d\x2f\xd7\xc6\xc5\x7c\xa1\x9a\x6c\x85\xa3\x38\xb7\x2b\xc1\x55\x75\x33\x6b\xd9\x7a\xb8\xd8\x6b\x2f\x8b\xb1\xed\x49\xc4\xe6\xe5\x9e\x8b\x87\x37\xb3\x50\x9c\xa4\xc8\x3e\xda\x85\xf1\x46\x4d\xbb\xe4\x66\xbb\x0c\xb7\x77\xa1\x72\xbb\x33\x75\x81\x5f\x81\x45\x35\xf3\xab\xc0\xf1\x8d\x02\x79\xb6\x2e\xe4\x11\x81\x34\x1f\x2f\x8d\x5d\xb4\xe3\xb7\x36\x8c\xfe\xe3\xe4\xbb\x97\x08\xe4\xd9\xf2\xb3\x47\x04\x66\xf2\x61\x5c\x3f\x99\x96\x44\x82\x6e\xfa\x4c\xf0\x5f\x2d\x90\xff\x2b\xcb\x9f\xfc\xf8\xfb\x11\x81\x3c\xa5\xc9\xce\x8f\x93\xbc\x2c\xf4\xb1\xcc\xbf\x4f\x20\x81\xd7\xd2\x20\x87\x80\x24\x0c\x21\xa4\x11\x76\xd0\x42\xcb\x18\xb0\x4e\x5a\x4e\x28\x67\x86\x40\xe3\xad\xd4\x50\x4a\x2f\x2d\x65\x54\x01\x88\x0d\x74\x5e\x48\xa3\xfe\x3e\x1e\x3e\xff\x82\x60\xf4\x27\x9e\xde\xbb\x8d\xb8\x45\xa2\x5c\xc9\x55\xe6\xe1\x1e\x6e\x79\x12\x8f\x2f\xe2\xf3\x03\x9f\xc6\xe4\x76\x88\xc0\xa1\x15\xb2\xe1\x1e\xb1\x2d\xc3\x63\xb5\xe3\x5a\xda\x4f\x82\xd1\x31\xb1\x4a\x27\xf6\xd3\xae\x2c\xba\xe8\xbc\xaf\x22\x05\xbf\x4b\xd8\x60\x34\x71\x85\x76\xcd\x0f\xa6\x85\x7a\x3e\xdb\x67\x89\x4c\xb4\x4d\x23\x73\x37\x9e\xc4\x22\xbb\xe4\x74\x87\x07\xd9\x6a\x78\xb7\x23\x87\xd9\x2e\xa8\x4d\xe3\x89\x19\x6e\xa3\x7c\xa8\x50\xc9\xa4\xeb\xa4\x1b\x4e\x81\xcc\x9b\xb8\xc4\xf8\x24\x4a\xba\xa3\x64\xba\xbf\x1c\x10\x9b\x6a\xb6\x0b\xcc\xda\x84\x5d\x96\x4d\xb8\xb3\xec\x4c\x28\x6c\x36\xbb\x71\x91\xda\xf4\xcd\xc7\x76\xb6\xa7\x82\xd1\x8f\x67\x9c\xd6\x61\xb0\x45\x67\x6b\x3c\x1f\x9a\x82\xbd\x10\xff\xc7\x25\xfe\x37\xe0\x9f\x05\xa3\x53\x67\x1f\x3d\x10\x0c\xd6\x67\x23\xc5\x43\x1b\x51\x9f\xc0\xdf\x2f\xca\x89\xdc\x13\xf8\xc1\x05\x7e\x40\xd9\xb2\x11\x89\xaf\x92\x8b\x50\xca\xda\x70\xb7\x5c\x5d\x56\x17\xa5\x75\xb7\x84\xa3\x35\x5d\x2b\x78\x8a\x43\xa9\x1e\xb4\x87\x65\xa1\x78\x91\xf1\x8a\x7c\x4d\xc6\xcb\x5a\x4e\x9c\x73\x88\x2b\x2a\x05\xc1\x1a\x09\x05\xa8\xb2\x40\x28\xed\x19\x75\x4a\x21\x0c\x35\xb7\x4e\x3b\x62\x01\xa5\x14\x18\xa0\xb8\x22\x88\x18\xa3\xa8\x44\xf2\xef\xff\xfc\x75\xbe\x3e\x2b\x36\x99\xef\xea\xf1\x2a\x50\x1b\x62\x21\x0e\x45\x22\xd9\x5d\x76\x19\x62\x41\xd8\x64\x0a\xab\x3c\x0c\x4f\x3c\x71\xa9\x06\x2e\x6e\xc1\x71\x19\xd1\xa9\x83\x5d\xaa\xe3\xc7\xf4\xcf\x5d\xaa\xfd\x33\x0b\xd9\x32\xfd\xe6\x99\x33\x11\xf9\x4e\x72\x3c\x32\x18\xd2\x78\x62\xde\x10\xc9\x7e\x4e\xee\xea\x0c\x15\xeb\xd1\xac\xab\x9a\xd6\x2e\x99\xaf\xe5\xe7\x99\x69\x8c\x2f\xf6\xfd\xa4\xa7\x83\x5c\xa9\x93\xa9\x54\x67\x3d\xdd\x8c\xe2\xce\xde\xa4\xe2\xa1\x59\x6c\xbd\x0e\xb3\x78\xb6\x3a\x37\x85\xfa\xa2\x4f\x83\xd6\xa1\x3c\xce\x87\x92\xed\xc8\x9b\x30\xe0\x56\x6d\x4d\x26\xb3\x4e\x2f\xb6\x9c\x4e\x13\x8b\x72\x7f\x21\x47\x6c\x9e\xde\xad\x24\x0b\xd5\xca\x8d\x46\x2c\x98\xb1\xf2\xf0\x90\x6d\x56\x7f\x95\x01\x8b\xff\xb3\x61\x7f\x22\x3a\xe7\x4a\xf1\x80\x28\xbf\x7b\x4b\xe6\x13\x4b\xfb\xbc\xd3\xbf\x10\xff\x89\x0c\xdc\xb3\xa2\xf3\x6c\xa7\x3f\x96\x31\xfd\x0c\x3e\x2a\x8e\x9e\xc0\xcf\x5f\xe0\x3f\x22\x3a\xef\xde\xd0\xf5\xe9\xed\x9d\xf5\xbf\x1b\xbf\x66\xc0\xe6\x41\xfc\x9b\x44\x0f\x51\x0a\x1d\x67\x0a\x1b\x47\x84\x80\x9a\x6b\xc3\xb1\xb7\xde\x0a\x69\x19\x07\x96\x42\xe8\x14\xb7\xc4\x51\x06\x29\x43\x14\x69\x0a\x39\xe7\x00\x61\x04\x90\x75\xfc\x24\x7a\xe8\x79\xd1\x8b\x5e\x90\x96\x50\xc9\xc6\x52\x27\xbf\x2f\x38\xfb\x41\x00\xee\x3c\xec\xfb\xfd\x89\xb6\x47\xe7\xd7\x88\x7f\xef\x74\x91\xa0\x8a\x42\x79\x55\x0a\xb8\x0c\x06\x31\x10\xe1\xf3\xc1\x08\xcc\xe3\x07\x50\x60\xd1\x61\x6c\x3c\xad\x92\x61\x77\xd2\x1a\x74\xbc\xde\x58\x23\xd3\xc3\x79\x61\x0c\xaa\xa1\x4d\x36\x52\xe8\x36\x04\x98\x55\x7d\x7f\xd7\x86\x84\xe7\x42\x83\x4d\x35\xdc\x55\x93\xe9\x66\xae\x73\xfd\xe0\x7d\x8d\x51\xa6\x91\x9a\xa8\xc1\x61\xd8\x8f\xac\xf8\xee\x10\xf6\x28\x03\x27\xb1\x46\xdc\xcd\x64\x6d\xd4\x6e\xb4\x47\x59\x98\xc0\x26\x9a\xef\xe6\xdc\xaf\xa6\x8a\x5f\x2f\x7e\x75\x5c\xc8\x89\x8f\x76\x79\x48\x7c\x5e\x88\xff\x98\xc7\x77\xfc\xcf\xcb\xce\xff\xbb\xc5\xe7\x69\xf1\x5b\x9a\x56\xf8\x4f\x8a\xdf\x85\xf8\x3c\x94\x74\x78\x06\xbf\x8a\x16\x8d\x07\xf1\x6f\x12\x3f\xac\x31\x73\xc6\x6b\x63\x90\xb2\x1a\x18\xef\x3d\x10\xca\x09\xf6\x36\x89\x64\xc4\x41\x24\x05\xf1\x40\x10\xad\x01\xd6\x4e\x6a\x8d\x88\x64\x10\x5a\x2f\x10\x46\x48\x9f\xc4\x0f\x3f\x2f\x7e\x67\xd3\x8a\x7c\x24\x28\xc7\x6f\xfc\x7f\xf0\x1b\x3c\xc2\x58\x54\xf6\xf2\xab\x7c\xbb\xc3\x7a\x7a\x07\xfb\x81\x6d\x4d\xba\x15\x13\x6d\x97\xc7\x5e\xb8\x35\x39\x98\x88\xac\xe6\x52\x48\x4a\xb1\x6a\xb4\x8d\x62\xe5\x76\xbf\x4b\x63\xcb\xee\x1e\x4d\xa3\xf9\x18\xcc\xf0\x61\x2f\x9d\x93\x83\x43\xb8\xd1\x1c\x98\x52\x6f\x53\x6b\x6c\xa2\x22\xf7\x26\x5c\xc5\x83\x5e\x86\xc4\x76\x8e\xb6\x87\xb0\x48\x2f\xa6\xed\xce\x04\xd5\x7a\xd9\x76\x6d\xdb\xcb\x2d\x63\x9b\xd9\x7a\x5f\x6d\xc6\x6d\x28\x9a\xef\x7f\xec\xe4\xf8\x63\xa2\xb8\x9a\xc3\x33\x8f\xec\x21\x51\x7a\x21\xfe\x43\xd3\xd0\x17\x89\xe2\x93\xa2\xf4\xb4\x28\xae\xa2\xbd\xce\x9f\x14\xc5\x0b\x51\x7a\xb6\xfe\xf7\xe3\x97\xdf\x0f\xa1\xbb\x1f\xff\xb6\x35\x59\x90\x08\x41\x8d\x66\x4c\x79\xea\xb9\xa7\x4e\x4a\xa8\x01\x24\xce\x19\xc2\xa9\x61\xd0\x18\x81\x21\x92\xc8\x78\x4a\x11\x33\xd6\x02\x63\x15\x24\x14\x03\x20\xf0\xbb\x47\x48\x9e\x17\xc5\x73\x65\x8b\x9e\x02\x8e\xb7\xfc\xff\xf2\xa7\xc1\x57\x4c\x93\x8b\x7e\x2e\x4b\x65\x11\x4f\x9a\xd9\xa0\xd2\x8c\xab\x79\x72\xb7\xe9\x45\x06\xd5\xc6\x7a\xc8\xb2\x34\x40\x8b\x55\x2f\xd7\x4e\x4d\xc6\x41\x6b\x5c\x86\x99\x41\x26\x86\xe3\xd6\x0e\xfa\xf1\x2a\xd6\xa5\x4e\x3d\x41\x9d\x4f\x96\x37\x95\xc1\x34\x9e\xed\xf6\x23\xc8\xd6\xb1\x47\xdb\xc4\x9b\x70\x91\x7a\x3f\xb1\x47\x6a\xd3\xc2\xd2\x8d\x2b\xcb\x48\x6d\xbd\x4b\xb5\x92\xa9\x7c\x77\x97\xad\x1e\x60\x7d\x7c\xb0\xb1\xc2\xb4\x5e\xdf\x46\x63\xff\x02\x4f\x71\x98\x76\x67\x9e\xda\x43\xa2\xf4\x42\xfc\x9f\x00\xfd\x16\x51\x7c\x52\x94\x9e\x17\xc5\xe9\xfc\xf0\x47\x45\xf1\x9f\xa2\xf4\x13\xa0\xaf\x13\xc5\xca\xbc\xdd\x7b\x10\xff\xc6\xd5\xf0\x06\x09\x03\x8c\xb1\x54\x21\x47\x38\x40\x4c\x29\xa3\xbc\x42\xc7\xfb\x6e\x29\x80\xde\x51\xeb\x30\x91\xce\x63\xce\x0c\xf0\x42\x79\x2d\x24\x36\x5c\x6b\x4a\xed\xdf\xff\xf9\x4b\xbe\x24\x01\xf1\x49\x86\xf6\xf4\xc4\x2c\x2c\xe2\x69\x10\x63\x9d\xf1\xd2\x16\x9c\x6c\x93\x83\x4d\x97\x9b\x70\x51\xdb\xa4\x7d\x10\x6f\xf7\xc3\xa0\x3f\x92\xc3\x44\x7b\x74\x5c\x58\xfc\x49\x02\x22\xa8\xc4\x58\xbe\xd1\xec\xc6\x83\xea\x7a\xe9\x7b\x0b\x96\xb1\x09\x99\xc5\x95\xbe\x43\xad\x68\x25\x93\x1d\x85\x0b\xd3\xec\x3e\x63\x16\x16\x0e\x93\x4b\xbc\xef\xce\x66\x94\xee\xd6\x02\xb4\x47\x89\x6d\xb7\xd0\x88\x2f\x3a\x61\x13\x0f\x52\xed\xe2\x1c\xb0\xe8\xb6\xe0\x27\x71\xb4\x8c\x1e\x57\xc3\x2f\xb3\xf9\x58\x1e\xaa\xdc\xd0\x44\xd5\x7c\xbf\x99\xae\x0f\xb5\x5d\x39\x15\x6a\xb9\x42\x86\x1f\xec\x32\x46\x16\xbc\x5f\xdb\xf1\x55\xe3\x63\x17\xff\x33\x09\x88\xe8\x59\xc3\xdf\x99\x00\x58\x2e\x54\x76\x79\x16\x40\x78\x4c\xe4\x5e\x87\xff\x90\xc8\x9d\x25\x20\x32\x67\x1f\x3d\x20\x32\xbb\xb3\xe1\xf1\xa1\x4e\xfe\x04\xfe\xac\x33\x5a\xf6\x9e\xc0\x0f\x2e\xf0\x83\x02\xae\x73\xd7\x0d\x4a\xb3\xb1\xda\x01\xc8\xe1\x60\x50\x00\xbd\x88\x6e\xae\x8a\xa2\x27\x82\x4a\x3f\x17\xae\x66\x5b\xa8\xdc\xe8\xb7\x2e\x12\x10\xd1\x97\xec\x55\xfe\x71\x3d\x9b\xd5\x4c\x32\xc5\x25\xd7\x9c\x1d\xcf\x6c\xd4\xca\x59\xac\x28\x70\x44\x31\x25\x18\x87\x10\x4a\x86\xa0\x67\x54\x61\x2f\x29\x51\x58\x1b\x0f\x01\x30\xdc\x79\x60\xfe\xfe\xcf\x5f\xf0\x7c\xbf\x6c\x80\x6d\x6d\xc5\x82\x20\x34\xee\x2c\x52\xb5\xd6\xa6\x59\xcc\xe3\x6a\x3c\xb1\x52\x80\xef\x53\xdb\x48\x32\xbd\x91\xe9\x99\xef\x44\x9a\x99\xe3\x5d\xaa\xa7\x53\xa1\xce\xee\x42\xfd\x78\x2e\x83\x73\x0f\x06\xdf\x5a\x90\x94\x2f\x99\x7a\xcc\x35\x8b\xf8\xd6\xd9\xfd\x3b\x41\xe4\x7b\x2b\xc5\x83\xdd\x7e\x44\xbb\x9b\x68\x34\x16\xe5\x23\x30\x9b\x4b\x1d\x8e\xac\x2c\x36\xc5\x7a\xdd\xb4\x65\x65\x03\x82\xdc\xae\x31\x58\x8f\x48\xbc\x51\xcd\xc1\xc6\xbe\xbb\x80\x80\x27\x07\xb1\x29\x84\xa1\xf6\x72\x6b\xc7\xa3\xe8\xba\xe9\x79\x36\xbd\xaa\x03\xd3\x1b\x4d\xbb\xb3\xc9\x92\x1c\xe7\xab\x61\xad\x9a\x33\xea\xb0\xaa\xd8\xe5\x7a\x0d\x58\xbd\x58\x1d\xcf\x93\xa6\x92\x6c\xa6\x7d\x47\x95\x68\x23\x5b\x14\xa2\x1c\x4b\xf1\xed\x47\x41\xf7\x9f\x70\x78\xfd\xef\x4f\x54\xec\xcc\x17\x7d\x40\xe5\x07\xc5\x8f\xf6\xfd\xc9\xf3\x0b\x15\x7b\x1d\xfe\xbd\x41\xc5\x68\xf0\x4f\x57\xed\x0f\xaa\xc8\xb1\x8c\xd9\x27\xf0\xfd\x20\x3b\x7a\x02\x3f\x7f\x81\xff\x88\x8a\x9d\xf6\xbf\x47\xaf\x2e\xf9\xba\xb7\xfe\x77\xe3\x17\x13\xdd\xf4\x13\xf8\x1f\xeb\x97\xce\xed\xf1\xce\x51\x75\xde\x22\xfb\xd9\xb3\xa3\xea\x33\xf8\x9d\x76\x36\xfb\x04\xfe\xd3\x7a\x50\xeb\xb5\xa3\x4f\xf4\xc7\xa7\xf5\xa0\x1a\x0d\x7a\x0f\xe2\xdf\xe4\xae\x73\x06\xa0\x42\x46\x21\x88\xb4\x20\x40\x22\xc5\xdf\xdc\x74\x04\x21\x47\x9a\x71\x4f\xb4\xc4\x96\x23\xcd\x11\x53\x80\x19\x4a\x80\x84\xdc\x22\xa5\x05\x36\xf0\x78\xa5\x2a\x84\x2f\xf1\xd7\xaf\xde\x81\x7b\xfe\xec\x1a\xe3\x38\xe8\xc7\x67\xb2\xca\x41\xdb\x0e\x17\xbd\x60\xcd\x6c\x8d\x0c\xaa\x93\xd2\x46\xd7\xd7\x87\xfe\xaa\x4c\xa6\x11\xe3\x55\xec\xd8\xf3\x3f\xf1\xd7\x23\xa9\xdc\x70\xca\xc2\x35\xbd\x01\x9b\x30\x8d\x36\x53\xc3\xfe\xae\x35\xd1\xfb\x69\x9d\xcd\xcb\x6c\xef\xb6\x9d\xe4\xbc\x94\x4a\x54\x0f\x85\x5d\xde\xaf\xad\x60\xae\x9d\x1c\x16\xc0\x80\x8c\x97\xf9\x68\xcb\xe7\x82\xe9\x9a\xf7\xb3\xc5\xc6\x04\xe4\x9b\xfb\xc3\x68\xb6\x69\x57\x02\x70\xcc\xdd\x77\x50\xa0\x72\xd9\x74\x75\x1d\x8f\x56\x67\xb2\x94\x48\x57\xcc\xa2\x13\xae\xd2\x52\x2b\x8b\x33\xf3\xc8\xb2\x11\x0c\x5b\x60\xbe\x8e\x6d\xd5\x47\x8b\x3d\xe5\xaf\x3f\xbe\x60\xe7\xd8\xb3\x62\xbf\x30\xa1\x5f\x5a\xf6\xeb\xf0\x2f\xdb\xff\x26\xfc\x33\x7f\x3d\x77\xf6\xd1\xbd\x23\xcd\xa8\x3b\x9d\x9e\x45\x02\xea\xd7\x11\x3f\x57\xfa\x27\xf0\xbb\x15\x3b\x6f\x3f\x81\x1f\x5c\xe0\xc7\x12\xcd\x76\xad\x9a\xae\x4e\x60\x6e\x53\xe9\xa1\x37\x6b\xe4\x33\x58\xea\x17\xb2\x26\x24\x57\x55\x5e\xcc\xc2\x58\xbf\x9b\xd8\xc9\x69\xfe\xe2\x92\x90\xe8\xd7\x2c\x18\x02\x0e\x22\xc1\x08\xd7\x48\x4b\xcc\x05\xd3\x4c\x73\xe6\xa0\x40\xd6\x58\x05\x20\xc7\x9c\x72\xc0\x98\x17\x46\x79\x40\x11\xb6\x50\x2b\xac\x09\xf7\x96\x79\x8e\x80\x7d\x53\x99\xf3\x73\x42\xf3\x8d\x4e\xaf\x90\xee\x65\x7d\x7d\x2d\x93\xa9\x4e\x96\x94\x73\xeb\x49\x3d\x94\x06\xb3\x03\x47\x30\xb7\x24\x8b\x7a\x31\x1b\xb4\x52\xa8\x7b\x3c\x47\xf4\xd4\xc3\x8e\xb9\x91\x4f\xb6\xaf\x9e\x9e\x6e\x38\x1b\x8e\x8c\x16\x32\x51\x0c\x4f\x1a\xf1\x64\xce\x8e\xa3\x87\x68\x2f\xdf\x9e\x8d\x4d\xf9\xb0\x4b\xf7\x12\x63\x56\x88\x1c\x76\x87\x49\xeb\x34\xe3\x4e\x19\x38\x3a\x7f\xdf\x77\x56\xe3\x91\x6d\x5b\xa6\xca\x41\x05\x55\xf3\x25\x87\x33\xa5\xcd\x38\x12\x59\x45\x86\x68\xe0\xa7\x5d\x90\x4c\xc4\x13\xed\x61\xae\x90\x38\x94\x0b\xf3\x4d\x7b\xc4\xfb\x9d\xfe\x6a\x5c\xa4\x89\xf2\x78\x98\x1d\x6d\xc2\x85\x22\xed\x18\xd5\x8c\xcc\x5b\x26\xd5\x2b\x96\x67\x43\xb0\x44\xad\x6a\xa4\x7c\x0c\x7d\xc6\x7a\xd9\x64\x85\x67\x69\x81\x28\xb4\x42\xa4\x9d\x33\xa8\x9b\x38\xd8\x56\x64\x1c\xca\x2f\xb6\xf5\x01\xd0\x53\x5e\x09\xc3\x8d\xe9\xbe\x26\x2a\x70\x3e\x95\x7f\x40\x85\x07\x93\x0f\xfe\x7f\xf2\x7c\xde\xcb\x5f\x88\xff\xec\x41\x1c\x67\xbd\xf4\x91\x5e\x76\x3a\xc8\x2a\xfa\xf8\x0a\x99\x27\xf0\xd3\xa8\x34\x61\x4f\xe0\x07\x17\xf8\xd1\x98\x58\x88\x64\x73\x03\xea\x3c\x64\xeb\xe5\xf6\x0a\xb5\x73\xf9\x6c\x7c\xd7\x1f\x77\x76\xb1\x52\xbc\x1f\x6f\xea\x29\x48\x95\x52\x45\xeb\x4f\xef\xb4\x87\xe6\x49\xd9\x7e\x12\x1a\xfa\x04\xff\x26\x95\x31\x56\x28\xe1\xa4\xd3\x14\x7b\x8b\x98\xb0\x10\x49\xe2\x10\x12\x52\x52\x83\xb8\x27\x08\x59\x09\xb8\xc0\x86\x00\x4a\x1c\x21\x4a\x03\x80\x84\x37\xd0\x2a\x04\x81\x7a\x53\x99\xd7\x1c\x03\x94\xbc\x28\xfe\x0f\x56\x37\xe8\x93\x52\x79\x66\x58\x26\xdf\x5c\xd1\x20\x42\x59\xb3\x95\x8e\x4f\x67\x90\x94\x26\x7c\xd5\x64\x6b\xcf\x76\xf1\xd0\xa6\xcd\x3b\xa7\xe3\x13\x3f\x8b\x3d\xd6\x57\xb6\xdd\xdc\x2f\x18\xd9\x6f\xc2\x8b\x6c\x69\x01\xc2\x9b\x06\xd9\xb4\xf7\xd3\x89\x5f\x36\x77\xb3\x5d\x6a\xd9\x6f\xe7\x52\xd9\x0c\xd6\x63\x36\x0c\xed\x67\xfd\x20\xb2\x37\x62\x05\xb2\x21\xae\x0f\xbb\x6a\xae\x34\x18\x0f\xb2\xd9\x02\x2d\x2d\xb6\x3c\xb0\x6e\x32\x8a\xf2\xfc\xd1\x97\x39\xb4\xe7\xc5\xc9\xa2\x1e\x06\xe3\x41\x25\x5b\x27\xfd\x7e\xa7\x15\xf5\x20\xbd\x1a\xa6\xba\xa2\x9c\xa8\x0d\xab\x8b\x20\x3a\x10\x29\x95\x2e\xbf\x66\xf1\x73\xec\xf1\xd8\xdf\x34\xde\x52\xa3\xca\xc7\xcf\x1f\xf2\x25\x5e\x88\x7f\xd9\xfe\x37\xe1\x9f\xa9\x4c\xe1\xec\xa3\x07\x7c\x89\xc6\x19\x50\xfb\x3a\xe2\xe7\x2a\xf3\x04\x7e\x53\xaa\x65\xe2\x09\xfc\xe0\x02\x3f\x5a\xcc\x75\x65\x74\x74\x98\x0e\x77\x0a\x0f\x83\xd4\x64\xd6\x31\xdd\x6a\x7f\xd2\xea\x86\xfc\x1a\xea\x39\x0d\x2f\xe2\x35\x26\x07\xab\xe1\xe9\xd4\xce\x0f\x5f\x26\xf6\x35\xb1\x47\xed\xb5\xa5\x4e\x12\xca\x2d\x87\x5c\x1b\x2f\x08\x47\x12\x2a\x66\x10\xd1\xdc\x58\x61\x35\xf1\x9e\x0b\xe2\x8c\x25\x54\x22\xed\x30\xb0\x40\x40\xc2\x0c\x30\xe8\x78\xe6\xf9\x9b\xca\xa0\xe7\x55\x26\x75\x51\xfc\x1f\x7a\x5c\x92\xe6\x65\xb3\x88\x86\x6e\xb7\x26\x76\x34\xa8\x59\xd0\xae\xae\xf7\xf1\xc9\x38\x56\x5f\xee\xb2\x6a\x7c\xc8\x34\xfb\x1b\x92\x48\x0e\xa7\xa7\xcd\xa0\x9f\x6c\xb1\x68\xc3\x5a\x3f\x3f\x08\xcd\x07\x22\x74\x68\x6b\xd4\xd0\x0c\xf4\x42\xf3\xe6\xae\xb3\x4a\xf8\x6a\x72\xab\xc1\x70\x93\x5d\xad\xaa\x89\x20\x55\xee\x25\x8b\x83\xb4\xb5\xfd\x46\x79\x3a\xdb\xc9\x5c\xac\x52\x64\xf5\x6e\x69\x9c\xcb\xed\x12\x9b\xf2\x2c\x7d\x70\xc9\x89\xea\xf6\xf0\xa6\x1c\x7b\x53\x82\x0d\x87\x6e\x94\xf0\xad\x04\x3f\x90\xca\x7e\xa8\x74\xb4\x8a\x0f\x6d\x04\xc2\xb8\x57\x3c\xd4\x9b\xd5\xcd\x20\x56\xee\xd6\x75\xb0\x1d\x7f\x34\xd9\x1f\x52\x99\xd1\xa6\xb9\x88\x3c\xdb\xcb\x5f\x88\x7f\xd9\xfe\x37\xe1\xbf\x46\x65\x2e\x7b\xf9\x4f\x56\x7f\x7f\x99\xca\x14\x66\xad\x15\x7c\x02\x3f\xb8\xc0\x0f\xf4\xf0\x00\xdb\x5c\x55\x78\x92\xf7\x50\xb3\x50\xc0\x95\x5e\x96\xa7\xab\xae\x5d\xa8\x1e\xe2\xb3\xb9\xce\x8f\x54\x36\xdc\xad\xab\xea\x29\x98\xfe\xe5\x2a\x83\x81\x10\xc0\x5a\x82\xac\x70\x04\x20\x85\x81\x15\x0a\x53\xc4\x35\x65\x06\x3a\x8c\x1d\x14\x4e\x70\x43\x98\x83\x18\x23\xa7\xa8\xf1\x06\x72\x04\xbd\x15\xdc\x78\xfd\xa6\x32\xe7\xa7\x8a\xa5\xc9\x3a\x06\xb2\xad\x62\xbf\x96\x58\xb8\x72\xe0\xdc\x68\xb4\x8f\x64\xb3\xd6\xc5\x68\x88\x37\xd6\x8b\x49\x68\x59\x29\x2d\x50\x11\xd5\x8e\x54\x9e\x7a\xd8\x71\xf6\xf0\xc9\x8c\xe9\xd4\x00\x77\xaa\xcc\xfb\x73\x91\xe1\xf0\xdf\x7c\x9d\x48\xd0\x77\x9d\x6a\x75\x5d\xec\xcc\x47\xdb\xd5\x7e\x30\x19\x50\x1b\xae\x82\x64\xbd\x9a\xec\x97\x37\x7b\xd0\x9f\x95\x72\xfb\xc9\x6c\x44\xea\x9b\xa2\x98\x1b\x1e\x8d\x1d\xf0\x66\x99\xab\x14\x47\x05\xd4\xa5\x60\xde\xea\x2f\xf6\xb6\x5a\xb4\x6a\x4a\x59\x6f\x53\xe9\x2d\xe2\xbe\xb2\x4c\x7e\x3b\xd3\x23\x9f\xee\x16\x60\x14\x15\xcb\xd1\xe9\x16\x65\x0b\x55\x07\x75\x84\x1c\x48\x9b\xa5\x76\x49\x52\xcb\xaf\x67\x26\x1c\x31\xbd\x70\xdb\x97\x5e\xb3\x92\x2e\x76\x16\xf9\x7b\xc0\xe7\x7b\x9f\xb9\x80\x4f\x4c\xec\x17\x6a\xf3\x3a\xfc\x67\xf7\x56\x9c\x1d\x54\xf6\x48\x6f\x3b\xcd\x9c\xee\xef\x6d\xdf\xcb\xf8\x0c\xbe\x0a\x4c\xf5\x09\xfc\xcb\xfa\x3f\xe2\xd3\x9c\xd7\xff\xa1\x99\xe3\x33\xf8\xc5\x44\x37\xfb\x20\xfe\x6d\x1b\xca\xb4\x95\x8c\x78\xe3\xad\xc6\x98\x29\x0c\x3c\x82\x06\x11\xe0\xbd\xb7\xca\x73\x6c\xb9\xe1\x54\x08\xe0\x2c\xd4\x52\x62\x4e\x0d\x43\x5e\x10\x87\x1d\x95\x88\x72\xf0\xae\x76\xdf\x7d\xaa\xc8\xba\x56\x6a\x0c\x56\xb3\xd4\x52\xaf\x53\x9d\x36\xc0\x36\x19\x5f\x2c\x7b\x19\xda\x85\xb9\x7c\x75\x3c\x5b\xee\x1b\xd3\x45\x28\x52\x99\x65\x8f\x55\xb9\x4d\xed\x8e\x9e\x6c\xa4\xd1\x28\xc4\xef\x66\x31\x15\xfe\xfe\x7c\xbc\x2f\xf9\x1d\xbf\x9c\x8c\x05\x8d\x30\x2d\xce\x50\x6c\x9c\xa8\x97\x04\xb1\xe3\x7d\x8d\x26\xc3\xdd\xdd\x04\x36\x0f\x5d\x9b\x5f\x0c\xab\xd5\x3a\xdf\xfa\xd9\x1a\xcb\xbd\x99\x63\x56\xed\x86\xba\x4b\xb4\x9f\x55\x06\x5b\xda\xdb\xed\x5c\xbe\x3e\x08\x27\x1b\xf1\x6c\x7f\x66\x62\xc9\x89\x75\x7d\x2c\x82\xed\x51\x95\x90\x1c\x0d\xc3\x25\x74\x28\xcc\xf3\xa5\xb4\xc9\x04\xe1\x30\xda\x18\xe9\x53\xcd\x74\x6c\x95\xa4\x43\xdc\x4a\xe6\x48\x6d\x40\x3b\x99\xd6\x47\xc9\xae\x2b\x5e\xfb\x9f\xad\x7a\x83\xe2\xdd\xeb\x7f\x36\xbb\xa9\xf8\x2b\x15\xef\x49\xfc\x67\x63\x45\x2f\x54\x9c\x67\x7b\xfc\x4b\xf0\x3f\x39\xc5\xe8\xea\x53\x3c\xfb\xce\x9d\xf8\x89\x46\x1d\xbc\xbf\xfc\x4e\xcf\xe5\xbd\x29\x3e\x3a\xdc\x0f\xb1\xae\x9b\x14\x09\x12\x64\xa4\x10\x1a\x7a\xc6\xa5\x20\x9e\x13\x05\xa9\x80\xd6\x49\xa3\x9c\xf1\x44\x4a\xc7\x80\x87\x0e\x38\x67\x89\x64\x8c\x41\xa5\x90\xa1\x54\x22\x2c\x38\x3b\x1e\xc1\x06\xe9\x4b\xfc\xaf\x4f\xce\x60\x7b\xc8\xff\x7a\x4c\xc1\x4e\x3f\x48\x48\x77\xce\x64\xea\x7b\x79\xcb\x99\x48\x7d\x47\x57\x4d\x3f\x08\xb6\xad\x9d\xd2\xb2\x4e\x53\xc5\x41\x67\x13\xa9\x6c\xf0\x5a\xd2\x41\x4b\xef\x0f\xeb\xb9\xeb\x84\x87\x83\xe5\x3e\xbf\xac\x83\x41\x31\xdc\x05\xb8\xe2\xe1\xa1\xeb\xcc\x0a\xf6\x06\xa4\x17\x6b\x27\x0b\x62\xdc\x24\xd3\xf8\x74\x05\x96\xd5\x5a\xa7\x7b\x52\xb0\x44\x74\xdf\xee\xef\xe4\x64\x1c\x97\x60\xda\xe8\x5a\x66\x16\x4b\x99\x99\x8c\x63\x3b\x52\xe4\xf5\xd1\xb8\xad\xa6\xcb\x46\x27\x4d\xec\xf0\x57\xe7\xb0\xdd\xe8\xb3\x9d\xe5\x40\xee\xf5\x99\x3e\xb2\xc5\x8f\x2b\xc8\x0b\xf1\x9f\x3d\x87\xed\x5f\xda\x83\xbf\xff\x5d\xfa\x33\xe5\x7b\x9b\xbb\x3c\xad\x30\x8a\x2a\xe9\xb0\x51\x0c\x3b\x69\x0c\xa4\x14\x53\x69\x24\xc7\x98\x6b\xa3\x21\x21\xec\xb8\x5b\x9e\x69\x84\x81\x22\x16\x18\xee\x11\xa7\x02\x31\x42\x2c\x64\x02\xc8\x37\x85\x61\x2f\x89\x56\x5f\xee\x16\xfb\xc1\x6a\x09\xc2\xe1\x45\xd0\x1e\x1c\x0e\xbd\x71\x6e\xbe\xeb\x16\xba\xb9\x7a\x9b\xb6\x24\x5a\xbb\x54\x97\xee\x92\xab\xe2\x7a\x3a\x0c\x75\x69\xa1\xfc\xab\xa3\x3a\x22\xbd\x44\x2c\xac\xdb\xaa\xd6\xe9\x64\x70\x23\x23\x77\x99\x79\x6b\xda\xa4\xce\xe5\xb2\x20\x70\x09\x15\xd4\x22\x29\x89\x4c\xc5\xef\x40\xb3\x5e\x4b\xcc\x67\x6c\xd1\xda\x11\xee\xf3\x91\xfa\x6e\xd8\x6c\xcf\x85\x0e\x52\x79\xdf\x8e\x56\x69\xab\x55\xaa\x74\x05\x5b\x06\xc9\x7a\xf6\xad\x27\xcf\xd4\x64\xb0\x1f\x97\x66\x87\x48\xa3\xd6\x34\xa3\x43\x81\xef\x77\x95\x64\x2d\x3f\x6b\x2e\x07\xd1\x21\xc8\xfb\xbd\x6d\x67\x86\xb5\x8a\x0a\x5e\x73\x6e\xf4\x79\x7b\xdf\x19\xc7\x19\xa8\x06\x37\x67\xc1\xa3\x87\xe2\x38\x2f\xc4\xff\xc9\x6e\xc1\x9b\xfd\x9c\x33\x2f\xe0\x91\x38\xce\xf6\xcc\xb1\xec\x5e\x47\xfc\xdc\xcf\x79\x02\x3f\xde\xa8\x6f\xba\x4f\xe0\x5f\x7e\x1a\x1b\x68\x1f\x5a\x46\xfa\xa5\x52\x31\xd9\x5e\x2f\x92\x01\x0a\x56\x95\xc1\xc8\x63\x45\x89\x8b\x4d\xe2\x0a\x30\xd6\xaf\xb1\xd8\x10\x46\x2f\x56\xca\xfe\x94\xed\x9f\xe2\xdf\xb6\x1c\x5f\x01\xca\xa0\x90\x9a\x50\xce\x80\x61\x80\x3b\x8c\xa1\x91\xc0\x50\x25\x34\x31\x96\x23\x20\x19\xb4\x5e\x68\x23\x8c\x33\xde\x12\xa5\xb0\x10\x9a\x72\xae\x98\xd1\xef\x2a\xf3\x82\x68\x75\xe6\xa2\xf8\x3f\xf4\xb8\x48\x30\x98\x46\xf2\xf1\x7a\xbf\x53\xc8\xd5\x07\xcb\x66\x2b\x07\xfb\x59\x2a\x63\x12\xe4\xfa\x64\x4b\x19\xe0\x4c\x8b\x54\x76\xd7\xf1\x47\x83\xfd\x2c\x27\xb6\x8b\x52\x33\xe2\xab\xe1\xa0\x9c\x06\xc6\x27\x3a\xb5\x7e\x3a\x11\x5b\x4f\x41\x75\xbb\x6f\x4f\x0a\xf3\x5a\x3d\x85\x7a\xbb\x58\x64\x35\xe6\xbd\xf0\xa6\x3e\x8c\x24\xb1\xe9\x6e\x93\xad\xaa\xe0\x93\xfe\xa4\x31\xec\x8d\x37\x36\x3d\xab\x35\xbd\x4b\xbb\x64\x7f\xcb\x87\x5d\xe7\xeb\xc7\xa3\x1e\x55\xb7\xd1\xeb\xeb\x4a\xab\x9d\x6f\xa7\xc8\x21\x95\x89\x25\xe6\xb9\x5a\xc1\x74\x47\x3d\x4c\x36\xe9\xc3\x32\x5c\x9f\x0f\xfa\xb9\x06\x9e\x2d\x7f\x15\x3f\xba\x68\xc9\xd7\xab\x4c\x2f\xd1\xc8\x91\x67\x7b\xf9\x0b\xf1\x2f\xdb\xff\x26\xfc\xd7\xa8\xcc\x65\x2f\xff\xc9\xca\xde\xaf\x52\x99\x19\x8f\x54\x76\xe5\x27\xf0\x7f\xf8\x34\x1e\x2d\x76\xe3\xdb\x4a\x35\x33\x04\xab\x6a\x71\xd7\x31\x9b\x66\x7a\x31\x01\x25\x5c\x5e\xe7\x06\xb1\xc9\xa2\xb6\x99\x6d\xb6\x66\x91\x18\x89\xdf\xa3\x32\x50\x68\xcf\x8c\x71\x08\x18\xaf\x35\xf0\x12\x70\x69\x88\x16\x5c\x60\x0e\x85\x24\xd2\x42\xac\x2d\x44\x16\x72\xac\x18\xc0\x0c\x5b\xa4\x85\xc3\x0a\x70\xe7\x9d\xb2\xef\x2a\x83\x9f\x57\x99\xec\x45\xf1\x7f\xe8\x71\xb9\x65\xee\x60\x0e\x9d\x92\x36\x49\xde\xaf\xc0\x40\xcd\xa0\x5c\xf7\xc6\xd1\xc2\x21\xb9\x9d\xa8\x7a\xa2\xa4\x5d\x26\xd4\x4b\xd4\x1b\xfc\x74\xbe\xdd\x27\x77\x60\x84\x2b\x8d\xd5\x94\xed\x57\xc1\x3c\x35\xb3\xac\x93\x68\x0e\xb6\xad\xf2\xbe\x35\x9d\x65\x8b\x95\x83\x28\x57\x37\x07\x60\x45\x65\x5a\x5a\x14\xe3\x91\x6e\x21\xbe\x0e\x06\x22\x34\xd4\xfb\x5e\x95\x97\xfb\xae\x38\xdf\xea\x7e\x66\x51\x05\x41\x5d\xb4\x9b\x2a\x52\x2e\x93\x83\x09\x6d\x8f\xeb\xe7\x93\x39\xb9\x3c\xf4\x63\xa5\xf5\x36\x54\x76\xa6\x4a\x5c\xcc\xb4\x47\x19\x50\xdb\x54\x72\x15\xc3\xda\xb1\xb6\xcc\x12\xc2\x22\xa1\xf0\xfc\x35\xeb\x7b\x9e\xe8\xe5\x6e\x5b\x37\xf5\x67\x7b\xf9\x0b\xf1\x2f\xdb\xff\x26\xfc\x97\xa8\xcc\x0f\xbd\x7c\x7a\x1d\xf1\xcb\x54\x66\xb7\x2a\x1e\x82\x27\xf0\x7f\xf8\x74\xbd\xdd\x14\x0e\x85\xa1\x5c\x6d\xf5\x3a\x93\xaa\x47\xe6\x93\x16\x62\xc9\x76\x1a\xe7\x75\xd4\xce\xca\x75\xb9\x22\x13\x16\x6f\x54\x5a\xcb\xe3\xaf\xbe\x5c\x65\x8c\x75\x9e\x50\x6b\xdf\xfc\x15\x2a\x31\xa7\x98\x1e\x8f\xdc\xb1\xdc\x79\x46\x20\xf1\x9e\x48\xcc\xa1\xc2\x98\x72\x84\x9d\x20\x56\x79\x65\x9c\x82\x0e\x62\xc6\xe8\x71\x7d\xcf\xf9\xe1\x86\xc5\xc2\x2a\xcc\xb6\xc9\xb4\x10\xa4\xd3\x67\xcb\x76\xac\x1d\x85\xc9\x62\x23\x6c\x17\x93\x1e\x29\x9a\xca\xa2\xb6\x98\x67\x4a\xd3\x62\xad\x7e\xb4\xaa\x63\x0f\x3b\xd9\xcb\x1d\x51\xe2\x3b\x58\x6c\x5d\x8b\x12\xa7\xbe\xe3\x97\xd3\x83\xdc\xdc\xb4\x5b\x71\x11\x0f\x96\x29\x16\xdb\xa1\xde\xb6\x34\xe8\xc7\x7d\x2a\xd9\x21\x83\x65\xbe\xd0\xe9\x18\x5e\xda\x4f\x0a\xfd\x4c\x5f\x0e\x13\xad\x81\xdf\x6c\x72\xb1\x51\xb3\x7f\x48\x06\x0b\x14\x91\xf9\x44\x6c\xe1\x63\x95\xd8\x66\x30\x89\xec\xb2\xd9\x5e\xa6\xef\xe3\xa6\x9c\x3f\xaa\xc2\xc1\xc9\x04\xee\xb4\x6d\xb3\x66\x6b\x75\x37\x93\x30\x24\x87\x89\x92\xcc\x14\x76\xd3\x43\x63\x5f\x9a\xd6\x54\x68\xc9\x7a\xa4\xd6\x19\xfd\x6a\xad\xcf\x6d\x51\xe2\xf8\x99\x15\x3c\x30\xbb\x3c\xe5\xa5\x3e\xbd\x6d\xe0\x17\x8a\xf3\x3a\xfc\x67\xa3\xc4\x67\x09\xfd\x47\x66\x0f\xc5\xb3\x1d\xdd\x0f\x45\x89\x5f\x8d\x7f\x6f\x94\xb8\xf2\xf1\x9d\x7b\xf1\xcf\x62\x38\x51\x1e\x15\x38\x8f\x6a\x08\x47\xd6\x64\x68\xcb\xc9\xb9\xf5\x7a\xda\xe9\x0e\xcc\x6e\x99\x41\xa3\x48\x0d\xe1\xbe\x9f\x56\x1a\x09\xb7\x3b\x6b\x8a\x67\x63\x38\x16\x7a\x2a\xb1\xd3\xd4\x41\xe2\x25\x07\x88\x6b\xc3\xa5\x40\xd0\x2b\xc2\x18\x12\x06\x02\x0e\x30\xf1\x5a\x49\x45\x95\x71\x9e\x29\x69\x15\x97\x0e\x50\xe8\x1c\xc7\xef\x8a\xf4\x7d\x76\x15\x94\xd3\x2c\xa8\x45\x12\xe9\x71\xad\x0a\xd3\x3c\x31\xda\xcb\x76\x7f\xed\xcb\x61\x1b\x25\xeb\x7c\xd1\xad\x8a\xa1\x71\x38\xb2\x5f\x36\x5b\xc7\xed\x72\xf7\x28\x52\xa5\xd1\xa8\xbe\xff\xc3\x93\x8a\x94\xf8\x8e\x5f\x4e\xf3\x9e\xde\x76\x0c\x0c\x65\x9b\xbe\xe7\x1a\x93\x76\x30\xa7\xb5\x56\xa2\x25\xd5\xc4\xa4\x32\xe3\xcc\x1c\x6c\x1b\xc0\xc8\x62\x21\xed\x93\xac\x2e\x56\x8b\x5e\xba\xe1\x1a\xd3\x7e\x3c\x86\x5c\x3d\x91\x18\xf7\x76\x21\x1d\x59\x25\x64\xad\x54\xdd\xf4\x7d\x22\x3c\xf6\xb1\x51\x79\x78\x54\x0d\xe0\x9b\x35\x06\x49\x3d\xd4\x0a\x26\x9b\xd4\x38\x9a\x62\x95\x71\x36\x1f\x4f\x05\x20\xc4\xc6\xa3\x45\x51\xce\x27\xca\xb9\x48\x85\x4f\x3e\x96\xa4\xbe\x4a\x91\x1e\x98\x89\xbe\x54\x91\x9e\xc4\x7f\xa1\x22\x3d\x32\xd3\x78\xa5\x22\xbd\x04\xff\x09\x45\xba\x17\x3f\x91\x68\x34\xde\x27\xda\x7f\x46\x91\x24\x40\x5a\x02\x73\x3c\x74\x4b\x3a\xa6\x31\xa6\x8e\x53\x82\xb1\x90\x8a\x13\xc2\xb4\x60\x0e\x03\x2c\x9d\xf6\x54\x3a\xc3\x98\xf5\x5e\x53\xe0\x94\x33\xc0\x1a\x41\xdf\x14\xe9\xfc\x6c\xd6\x3b\x6b\x71\x9b\x22\x9d\x6a\x75\xe7\xb8\xfb\x98\x4f\x75\x6a\x45\x3e\x5e\x9e\x7b\xc3\xf1\xef\xe5\x2d\x47\x65\x0b\xa5\x71\xe7\x00\x77\x38\x28\xe7\xe7\x39\xd2\x8a\x1e\x5c\xa9\x36\xda\x4e\xa6\xc3\x0d\x8a\x94\x27\xcd\xe4\xb8\x06\x00\x9f\xa6\xa8\xec\xaa\x3e\x76\x85\x35\x0f\x37\x1b\xd3\xd4\xaa\x29\x7a\xcb\x6e\x6a\xb0\x84\xe3\x4e\x96\x03\x99\x38\x1c\x2a\x09\x64\x74\x0c\xad\xca\xf1\xa3\xca\x0c\xa3\xeb\xf2\x76\x90\x5e\x4c\xc6\xcb\x43\x6b\xd2\x88\x15\x33\x33\x50\x59\xf4\xd6\xf9\xd8\x64\x21\x3b\x3c\xa9\x7d\x08\xc6\xe6\x3c\xc0\xe9\x5f\x5d\x0b\x72\x5b\xde\x2a\x7e\x16\xbc\x7c\x60\x96\x7b\xba\x16\xe4\x19\x05\x7b\x1d\xfe\x63\x0a\xf6\x91\xb7\xfa\x97\xfa\x14\xdf\xff\xae\xfe\x91\xf2\xc5\xd6\x6a\xdf\x7d\x5a\x61\xb4\x03\xd4\x11\x6d\x25\x63\x96\x0a\x26\xa4\x83\x9a\x79\xe5\x00\xc4\x8e\x2a\xc5\xb9\xb6\x8e\x32\xc7\x94\xb1\xc8\x6a\xa9\x25\x13\x1c\x38\xea\x38\x61\x8a\x28\xcd\xff\xfe\xcf\x5f\xe8\x7c\x2f\x57\x62\xbf\xe9\xd9\x6a\x6d\x51\x2a\xc6\x63\x65\xd2\xa1\xfd\x34\xd9\x8f\x02\x50\x9a\xf5\x72\x7c\x32\x36\xc1\x3e\xcc\xcb\xa3\x7a\x63\xd7\xa6\x1f\xb1\x9e\x48\xfd\x9a\x95\x7c\x32\x0b\x8b\xc0\x48\x32\xc3\xca\xd1\x5e\xb3\xb2\x9e\x99\xd1\xc2\xc7\xbb\x3d\x67\x32\xbb\xe1\x32\x28\xd6\xe8\xae\x11\xea\x8c\x0f\x30\xbd\xb4\xed\x42\xfb\xaa\xcf\x13\xff\x8e\x5f\x8e\xe5\xa3\x4d\x1f\x05\x2e\x59\xc8\xac\xdb\xe9\x16\x5e\xd2\xa0\x5e\xc8\xb9\xc5\x64\x66\x52\xb5\x89\x28\xb2\x7d\x77\x1e\x2e\x6c\x71\x95\x6e\x26\xfb\x70\x93\xa0\x22\x4d\x92\x24\xab\x2f\xfb\xbd\x46\x1d\xee\x2c\xd5\x15\xd2\x1f\xa3\x50\xe3\x50\x1e\x95\xdb\xe5\x9e\x03\x83\xde\xb6\x7b\xec\xd5\x9b\x6e\x6b\xb8\xe9\x74\x56\xcd\x64\xdf\x25\x51\x68\x1d\xd4\xa8\x9a\xe1\x8d\x6b\xac\xd5\x12\xac\xd6\x7a\x34\x35\xc5\xd6\xa4\x32\xaf\x2e\x7e\x75\xa4\xd5\x6d\x3e\xcf\xd9\xad\x86\x41\x2b\x1b\x4a\xc0\x4c\x71\x29\x48\xb9\x10\xeb\x4d\xc6\xe9\xc1\x20\x5b\x8f\x95\x43\xe3\xf8\x66\xed\x3a\xdb\x58\x2e\xbc\x8a\x36\x2a\xad\xc4\x60\x72\xda\x42\xf4\xe1\x73\xfc\x64\xb7\x62\x70\x69\x5d\x3f\xb6\xd5\x0b\xf1\x9f\xf5\x79\x9a\x67\x1f\x99\x05\x69\xd5\x87\xec\x70\xe8\x05\xa0\x98\x74\xed\x7e\x31\x3f\x2b\x17\x62\xac\x5d\x0d\x51\x3a\x09\xf2\x9b\x10\xc8\x34\x12\xa6\x59\xb4\x17\xfb\xba\x12\x8f\xaf\xd5\x79\x35\xfe\xbd\x3e\x4f\xf3\xe3\x3b\xf7\xe2\x9f\x29\x52\xaa\x1e\x6f\x4f\x66\x41\x36\xd1\x49\xb2\xd1\xbe\xb9\x12\xd5\x68\x82\x6e\x27\xc9\xea\x36\xa0\xa5\x49\xa2\x45\xc7\xba\x54\xef\xef\x60\x17\x9e\x9d\x49\xf2\xb4\x22\x19\xc5\x29\xb3\x90\x42\x69\xdc\xdb\xec\xca\x0b\x8e\x10\x20\x48\x43\x64\x9c\xe2\x5a\x01\x40\x0c\x7e\x53\x2c\xa8\xac\x66\x48\x53\xe9\x85\x50\x82\x71\x2f\x98\xd3\xf8\xdd\xe7\x41\xcf\xfb\x3c\x9f\xac\xd5\x39\xdb\x38\xd7\x68\xd4\xbe\xb9\xb8\x77\x8e\x93\x67\x8e\x4c\xfc\x1a\xde\xe9\xa9\x7c\xfb\x7e\x24\xc8\xad\x55\x9f\x96\x77\xc6\x25\xf4\x2a\xe8\x47\xe9\x64\x38\x59\x15\x1a\x93\x4d\xb5\xe4\x9a\x95\x50\x34\x94\xe8\xce\x46\xa5\xc0\x56\xba\x95\x6a\xa8\x54\x8a\x57\x67\xf5\x7e\x35\xba\x5f\xf7\xb7\xeb\xde\x78\xb3\x8e\x6d\xf9\x70\xbb\x9b\x84\x02\x16\x07\x79\x62\x14\x5c\xa2\xf9\x21\x1f\x74\x4f\x8a\xb2\x9f\x34\x56\x89\xca\xa4\x88\x26\x13\x62\xfa\x91\xe8\xb2\x7a\x98\x3b\x16\xc4\xb7\x1c\x35\x93\xab\x55\x7f\xd5\x09\xa5\xe7\x49\x4f\xf4\xe2\x57\x33\xb4\xeb\x87\xb0\xdd\xcb\xcd\x87\x05\x9f\xde\xff\xb8\xbf\x78\x1c\x28\xcf\xde\xf7\x43\xea\xec\x77\xf8\x60\x0f\x29\xda\x0b\xf1\x9f\xbd\x4d\xba\x7a\xf6\xd9\x03\x91\xe4\xd3\x59\xfa\x27\xb4\x87\x8e\x53\x7e\x35\xfe\xaf\x14\xf5\x87\xcf\x2f\x26\x1d\x67\xdf\x3c\xcd\xda\x8f\xff\xfd\x90\x8f\x79\xd6\x06\x5f\xa8\x25\xb7\xec\x52\xe1\x5a\x73\xe9\x99\xa4\xd2\x00\x27\x9c\xd4\xd8\x62\xe8\x99\x91\x18\x08\x2b\x2c\x10\xc4\x4a\xc3\x38\x41\xc6\x2b\xca\x8c\x02\x92\x18\xaf\x98\x53\x4e\x4a\x82\xbe\x29\x2f\x7e\x5e\x79\x3f\xb9\x6e\xe8\x74\x71\xdd\x43\xf1\xaf\xb3\x16\x5e\xae\x22\xee\x8c\xad\xeb\xbd\xf3\x4c\x79\x17\x87\x99\x4c\xec\x76\xf3\x51\x68\x3f\xdd\x2f\xbb\xa3\xd8\xa0\x35\x96\xa1\xc5\x32\xe3\x0a\x2d\x94\x5c\x56\x15\xaf\x15\xf7\x38\x38\xa4\x8a\x6b\xd2\xa6\x1d\x91\x8e\xee\x28\x2a\x0b\x41\xbb\xe5\x64\xb9\x99\x5f\xc5\x4b\x60\xcb\xf6\x3d\x81\x2d\x0b\x4d\xd9\x70\x10\x0d\x37\xd7\xdf\x94\xb7\xc0\x57\x54\xce\x9a\x26\x36\x65\xba\x54\x4b\xa1\x7d\x2d\x5a\x18\x94\x7b\x85\x49\xd2\x46\x0f\xac\xb1\x9c\x66\x6a\xa5\xd9\x7a\x31\x1b\x45\x7f\xa3\xf2\xbe\x24\xfe\x11\xdd\x83\xc4\xfb\x35\xee\xd7\x7d\xa9\xdf\xa1\xbc\xbf\xba\x14\x33\xb8\xfa\x79\x91\x2e\x7e\x4c\x3e\xbe\x5a\x89\x7e\x76\x09\xe3\x3f\xfe\xba\x4d\x89\xca\xdf\xdf\xf7\x6a\xfe\x7e\x76\x67\xde\x67\x65\xfe\x9d\xfc\xfd\x38\xb2\x5d\xe3\x2f\x97\x2b\xb7\xba\x9f\xf0\x17\xbc\x42\xc9\x4f\x23\xcb\x57\x6b\xd3\x2d\x2b\x38\xa4\x10\x84\x18\xa1\x09\x47\x56\x29\x63\xa8\xe6\x90\x33\x49\xd5\x9b\xdf\xc7\x94\xe1\x52\x2a\x88\xbd\x46\xc6\x13\x48\x91\x85\xd2\x21\x08\xb9\x72\xd0\x50\x66\x8e\x4a\x2e\x5f\x92\x5b\xfd\xc4\x87\x3e\xb1\xf5\x9a\x38\xe0\x7d\x99\x96\xa0\xde\x68\xd4\xdf\xad\xe1\x5e\x75\xfc\x7e\xec\xc5\x0f\x6a\x76\x96\xdb\xcd\x2f\xe5\xa6\xd0\xa1\x50\x4c\xdb\xf5\xfc\x0c\x8b\x9a\x1a\x4e\x37\xcd\x70\x38\x16\xab\xe9\x65\x58\x4e\x4b\x03\x58\x09\xfa\x3b\x1a\x1f\x45\xc3\x96\x77\x75\x53\xa9\x42\x21\xb3\xcb\x96\x2b\xdc\xe5\x0b\x45\xb6\x88\x84\x26\xab\xe4\xdc\x39\xd2\x5c\x2f\x6a\x99\x6e\x40\x86\x68\xbb\x3c\xaa\x65\xb2\xdd\x8b\xef\x73\x95\x3d\x9a\x1f\x12\xab\x9d\x9d\x35\x87\xfb\x9e\x5a\xba\xce\xb4\x16\xda\x1f\x40\xa2\x0a\x3a\xa4\x97\x60\xc4\x34\x96\xbf\x1a\x2d\xce\xfc\xb0\xee\xc3\xd6\x7a\xf6\xbe\x08\x2e\x74\xde\x3f\x78\xc2\x8f\x39\x13\x8e\x47\xca\x13\x7d\x5d\xfb\x06\x97\x7f\xff\xbe\xec\xc1\xd9\x31\x44\x3f\x1d\x21\xcf\xf6\x15\x3c\x92\xf3\x2e\x06\x17\x73\x93\x5f\x29\x7c\xf0\xcf\x36\x7f\x21\xfe\x6d\x23\xf4\x05\x7e\xf4\x6a\x7c\xf8\xdf\x94\xe1\xf9\xfe\x77\xed\x8f\x94\xef\x68\x43\xbf\x2a\xdf\xd9\xdc\xee\x5f\x17\xbf\xbe\xc6\xdf\xbf\x2d\xfe\x7f\xc6\xdf\xe3\x1a\xfa\xb4\x66\x9d\x7e\xf4\xdd\xc3\xf9\x49\x7f\xbe\xe0\xf3\x4f\x97\xf7\x23\x2c\xf3\x49\x79\x5f\x1d\x7b\x78\xa1\xc7\xf8\xf6\x9b\xc8\xf9\x01\x5a\x77\xda\xd7\xc7\x35\xf2\x27\xf4\xeb\x57\x3d\x5d\x78\x7f\x97\x63\xe4\xd9\x98\xfb\xf1\x9c\xcd\x00\x4e\xef\xfb\x17\xc7\xde\x82\x7f\x8e\x27\x7f\xba\xbc\x67\x7c\xfe\x2c\x56\x78\xc3\x79\xe6\x40\x58\x03\xbd\x33\x90\x02\x8f\x38\xa3\x0c\x09\xc6\x8c\x23\x9e\x22\x8e\x3c\xa3\xc2\x78\xa0\xad\xf4\x56\x18\x68\x25\x53\x08\x19\x28\x99\x76\xc8\x41\xfd\xff\x91\xf7\x67\x4d\x8a\x33\xc9\xba\x28\x7c\xbf\x7e\x45\xdd\xf5\xfe\x8c\x77\x1b\x8a\x51\x8a\xb5\xed\x3b\x66\x02\x84\x98\x41\xcc\xe2\x66\x5b\x8c\x4c\x62\x9e\xf9\xf5\xc7\x12\xb2\x12\x15\x95\x23\x90\xd5\xd5\x67\x85\x75\xd7\x8b\x52\x10\xee\x31\xf8\x13\x1e\x1e\xee\x1e\xf0\xe4\xd9\x08\xad\x87\x68\xdf\xef\xd8\x51\x62\x2d\x8b\x0d\xd2\x17\x7d\x55\xee\xd9\xeb\xbc\x26\x54\xa9\x17\xe4\xf6\x52\x09\x7b\xb4\xd7\xab\x54\xb1\xb8\xcc\x22\x8c\x66\xd3\xc3\x41\x8e\xe6\x70\x3d\x0c\x4a\xbd\xa1\x9f\x83\x5e\x33\x4a\x30\xb3\xa9\x2e\x66\xeb\xad\x0e\xe1\x32\xdf\xdf\xae\xd9\xb4\x9f\x08\x0a\x90\x67\x6a\x28\x51\x9b\x66\x56\xa1\xc7\x59\xaf\xc5\xdd\x44\x6f\xaa\xe1\x34\xef\xf7\xbd\x27\x6d\xca\xc9\xa3\x54\x72\x95\x0d\x9c\x69\xaf\x53\xf4\x45\xb5\x09\x0a\x07\x16\x8d\x6a\x69\x36\x65\xa3\x14\xec\xcc\x86\xbb\x1e\x4f\x94\x06\xab\x0f\xbd\x23\xf3\xd7\xb3\xef\xdf\x82\xa6\xd7\x5b\xed\xd8\xf3\x9f\xd3\x36\x62\x73\xe8\xcd\xf3\xc9\x18\x8f\x37\x68\x8c\x5f\xb7\xe9\x5c\x8d\xd9\x03\xe9\x7f\xce\x26\x72\x45\xff\xcc\xf4\x69\xc5\xf8\x8b\x35\xc2\xd3\x73\xeb\xdf\xc2\xdf\x69\xc5\xfd\x94\xc6\xf5\x1f\xa4\xc1\x3c\xc4\xa6\xf4\x68\xf9\xb9\x69\xfe\xde\x43\xdf\xcf\x79\xe1\x1d\xf4\x7f\xdb\xf1\x3e\xc4\xa6\xf9\x5b\x78\xe8\xab\x36\xcd\xcb\x3a\x75\x95\x1c\x38\xf6\x9b\xb8\xc5\xe0\x8b\xf3\xb1\x58\x9b\x3f\x5f\x61\x75\x8f\x4d\xf8\xda\x6a\xf1\x99\x1b\x90\x39\x65\x8e\xcd\x29\x43\x96\xe6\xc2\xd6\x48\x68\xc9\xa5\x61\xc2\xa6\x5a\xd9\xdc\xa2\x8e\x05\x29\x75\x24\x30\x40\x0b\xcb\x38\xd0\x10\x6a\x03\x0b\x60\x81\x05\x45\xf4\xa4\xa5\x3c\x26\x57\x7c\xe9\xaa\x31\xbf\xb5\xee\x06\x6f\x92\xb7\x32\x1f\x9a\x79\x4b\xaf\x04\x1e\x07\xc9\xba\x49\xed\x41\x6b\xbf\xa8\x54\xd7\x3d\x98\xc0\x75\xdc\xb0\xba\xfb\x63\xbf\x5f\xab\x83\x34\x0a\x0f\x83\x9e\xbb\x4c\x16\x9d\xe2\xa1\x97\xee\x63\x3d\xca\x34\xb3\xd3\xc1\x86\x2d\xdc\x01\xf6\x4b\xbd\xd2\x58\xa4\x96\xdc\x9f\xca\x26\xcb\xc8\xf0\x94\xc5\xb9\x38\x99\xe4\xe6\xf3\x72\x75\x81\xb0\x6d\x17\x8f\x64\x0d\xed\xee\xcc\xef\xa7\x7b\xb3\x9d\xc9\x2d\x33\xb8\x04\x7a\x7b\xb7\xb5\x2e\x85\xf0\x31\xf9\x55\xb3\xb1\x19\xf1\xc5\x28\x2b\x29\x9a\xab\x6d\x6c\xba\xdc\x14\x65\xf5\x40\xfa\xd7\xe3\xff\x29\xfa\xb1\xb3\xe9\x76\xec\xd5\xd7\xa3\xac\x0e\x2c\x6e\x7b\x7f\x9d\xe2\xfb\x67\xd3\x77\xd0\x9f\xf5\x4a\xd6\xfa\x0e\xfa\xee\x35\xfd\x1b\xbc\x8d\xce\x95\x67\xdf\xa3\x71\x7b\x94\x17\xa3\x18\x10\x2e\x81\x82\xc6\x66\x14\x02\x01\xa9\x00\x84\x3a\x86\x51\x64\x94\xa3\x14\x95\xc4\x92\xd0\x7a\xda\x03\xd9\x18\x3a\x52\x1b\x61\x34\x53\x5a\x3b\xd4\x26\x8e\xf3\x8c\x32\x0f\x88\x58\xbf\x3e\x1c\xfd\x1d\x43\x83\xfa\x50\x8f\xdd\x59\x7b\x2c\x16\xdd\x02\x47\xfb\x7c\xbe\x3b\x4d\xcb\x5e\xc9\xcd\x8c\x13\xb2\x6d\x26\xbd\x15\x6b\x94\x72\x9d\x4c\xe7\xa3\x58\xd2\x8c\x2a\xf8\x6a\xab\x7a\xb0\x81\x83\x59\x57\x55\x13\x55\xb5\x76\x7a\x75\xa5\xdb\x1b\x7b\x92\xda\x2e\xec\xec\xba\xc6\x66\xe3\xc5\x92\x39\x73\x3f\x93\x1c\x35\xdc\x25\x1f\x47\xc1\x96\xd7\x27\xe9\x43\xd9\x2d\xe5\xcb\x19\x38\xf6\x9d\x3a\x2c\x8e\x06\xc5\xed\xc1\x9e\xe5\x4b\x34\x7f\xca\xe2\x1c\x8d\xb3\x7c\xa7\x4a\x28\x1a\xfa\x07\xbe\x6f\xcf\x12\x9d\x1a\x1b\x29\x91\xcd\xa4\xd3\xc3\x52\x2e\x98\x1d\xb7\xdb\xc4\x54\x53\x5f\x85\x97\xc5\xed\xdf\x84\x32\xdc\x6f\xb0\xd2\xbd\x52\xfe\x40\xfa\x6f\xdc\x0d\xf9\x27\x50\xe6\x5a\xca\xdf\xb8\x96\xe1\xdb\x50\xc6\x64\xf3\x50\xdd\x41\xdf\xbd\xa6\xff\x79\xff\xdb\xab\x58\xd2\x6f\x42\x19\xe5\x08\x48\x2c\x07\x40\x44\x09\x15\x40\x19\xa0\x35\xb0\x25\xb4\xa9\xed\x40\x26\x24\x84\x5c\x60\xc0\x08\xe3\x0a\x12\x4d\xa5\x94\x04\x5a\xda\xc1\x8c\x00\x6a\xc4\xe9\xde\x1b\x88\x1e\xe2\xc5\xfc\x65\x9f\xc1\x2f\xa2\x8e\x9b\x4e\x6c\x9f\x69\xbc\xe7\xb9\xf2\x53\x17\x4a\xb9\xcd\x3a\x76\xf6\xe1\xb0\x28\x36\x8e\x5b\x9d\x75\xb3\xbb\xad\xdc\xe6\x4b\x3d\x2b\x39\x0f\xc6\xc6\xdf\xec\x76\xee\x2c\xdf\xab\x68\xb3\xb4\x0f\x2a\xbf\x8b\xe4\xa6\x84\x71\xb7\x56\xb6\xe7\x85\x24\x1d\x65\x1a\x62\x91\xda\xa8\x66\xa5\xa3\x3c\x58\xa8\x8f\x36\x0b\x3b\x97\xb7\xf3\x3f\x11\x03\x96\x0a\x41\xbf\x51\x89\xf8\x1e\xed\xed\x81\x49\xab\x69\x7f\xbe\x4d\x17\x97\x41\xaf\xc3\xad\x88\xac\x8e\xd9\x7c\x75\x50\x68\x75\x12\xf4\x22\x65\x5f\xf1\x5c\xf9\xa2\xde\x77\x2e\x97\xc9\x7d\x9b\xc7\xf8\x79\x16\x96\xa8\xf3\xbc\x8b\xfd\xa2\xe7\x4a\x76\x77\x33\xff\xb1\x73\xb1\x98\x17\xf4\x57\x77\x69\x8f\xa3\x7f\x93\x95\x25\x75\xf1\x44\x89\xe5\xa0\xbc\x45\x2f\x39\xdf\xd6\x96\xbd\xf4\xef\x17\xbc\xa0\xbd\xef\xa0\xff\xb9\x73\x83\x58\xb9\xc8\xea\x75\x4f\xc5\x6e\xd3\xff\x22\xd6\x9c\xcb\x1f\xc4\x92\x53\x1a\xe7\xf0\xf2\xfe\xab\xfc\xde\xc1\xdf\xa9\x5c\x67\x5b\xbd\x1e\xf3\xe0\xa5\x3f\xff\x22\xfe\xae\xfb\xf7\xb7\xf2\xa9\x7b\x96\x00\xc2\x54\x69\x20\x39\xb2\xa4\xb6\x10\xc0\x4c\x31\x43\x30\xc3\x84\x11\xca\x21\x34\x4c\x0a\xad\x90\x72\x80\x71\x80\xa4\x50\x08\xc2\xa4\x71\xa4\x52\x8e\x70\x18\x7a\x5a\xd5\xe2\x59\xc3\xbf\xe8\xcf\xff\xb9\xd8\x9c\xb3\xbd\xe2\x0e\xa4\x3d\xcf\x3f\xed\x8f\x3e\x9c\x89\xe2\x67\xcf\xa7\x5c\xab\x94\xa0\x2c\x03\x06\xfb\xf9\x61\xed\x8e\xbb\x54\x6e\x6b\xc3\xd4\xa6\xea\x58\x8b\xc8\x69\x04\xc5\x1e\x20\xb5\x6a\x7b\xbf\x67\xbb\x30\xa2\x93\xde\xb2\x94\xa0\xad\x8e\xcf\x45\x32\x41\xa7\x7d\x58\x4e\x70\xc8\x3c\xbb\x5a\x49\x74\x57\x98\x86\xac\x5c\x59\x34\x07\x0c\xb8\xc1\xb3\x1e\xdc\xac\xaf\xb1\x9c\xed\x8e\xfd\x72\x6b\xda\x63\xad\x28\x1b\x75\x66\xc3\x84\xdc\xae\x6b\x61\x0e\xe7\xfc\xa2\xdd\x91\x89\x32\x0a\x37\x85\x2f\xf8\x63\xde\x14\x8b\x71\x2a\xf1\x33\x9c\xcb\xaa\xf4\x10\xa9\xbf\xed\xf7\x97\x46\xde\x1b\x2b\x12\xcb\x40\xfa\xc5\x55\xd6\xbf\x9d\xff\x87\xc4\x3a\x3d\x90\xfe\x4d\x91\x01\xe9\xeb\x55\xf6\xb4\xdd\xbb\x77\x7e\xdd\x33\x9e\xdb\xe5\xee\x2b\xb6\xf0\xdb\x57\xbd\x98\x96\x75\xee\xa3\x2f\x6a\x39\x77\x63\x95\x79\xc1\x7b\xe7\xba\xbd\x17\xf9\x7c\x7b\x4c\xc3\xcb\xf7\xff\x64\x6c\xd6\xd3\xf8\xbc\x7a\xb6\xf2\x6e\x8c\xf0\x4b\xd9\xbd\xac\xb2\xee\x4d\xe3\x17\x05\x25\x7c\x3c\x4d\x90\xf3\x3e\x32\xe6\x9d\x10\x2b\xaf\x7a\x3b\xc4\xca\x6b\xfd\xfb\x9b\x3c\x7e\x55\xeb\x7d\xb9\x93\xf5\x2b\x5a\xef\xfb\x7b\xe5\x78\x39\x8f\xf7\x59\x3e\x6f\x99\x6f\x67\xc2\xf7\x6a\x29\x97\x1b\x3a\x3e\xf2\xb6\x89\x05\x9b\xde\xb2\xcf\xbf\x3b\x76\xf2\x1e\xfa\x86\xd1\xce\x57\x63\x27\x53\x57\xfd\xf3\x5b\xf9\x84\xa6\x46\x2c\x4a\xa4\xb2\x25\xb3\xa5\x11\xd4\x08\x63\x2b\xee\x60\x86\x84\xd6\x0a\xdb\x5a\x41\x69\x6b\x23\x90\xb0\x2c\x61\xdb\x1c\x4b\x06\x29\x83\x5a\x61\xc7\x36\xc2\xa1\x1c\x3f\x69\x6a\xe4\x21\x67\x29\x95\x2b\xe6\x7f\x6b\xbd\xf1\xc5\x9a\xf3\x8e\x33\xf5\xcc\xd8\x27\xb5\x1a\x48\x1b\x9c\x50\xbd\x7d\x65\x84\xca\xcb\xc3\x71\x3e\x74\x87\xd5\x79\xa6\xb0\x19\x0c\x4e\x53\xf6\xbd\xec\xbf\xeb\xc1\xae\x89\x0a\xe9\x3e\xc9\xf7\xc6\xc5\x7a\xa5\x88\xe6\xb9\x14\x4f\x6a\xdf\x39\x2e\x1c\xd7\xb0\x7e\xb6\xd6\x89\xc2\xad\xa7\xea\x33\x3a\x3a\xba\x24\xc3\x4c\x58\x95\x99\x79\x31\x83\x94\xe7\x6e\x6a\xa5\x4a\xc5\xcd\x8d\xd8\xba\x22\x7a\xad\x82\x8b\x96\xbb\x7d\x85\xe1\x93\x95\xb3\xe5\x77\x2b\xa6\x60\x6a\xeb\xed\x16\xc1\x59\xa3\xb9\xdd\x80\x9c\xdd\xd8\x27\x12\xc7\x95\xbb\x46\xf3\xf2\x78\x32\xde\x80\xbc\xb1\x07\x8b\xcb\x90\xdd\x63\xe5\xf4\x6f\xb7\x32\x76\xf7\x8d\xca\xc4\xbe\xfc\xfc\x26\x2b\xe3\x03\xe9\x5f\x8f\xff\xa7\xe8\xc7\xac\x9c\xf1\x6e\xbc\xc1\xca\x28\x62\x07\x18\x6f\xdc\xfa\xf0\xbe\xf4\xdf\x41\xbf\xbd\xcf\xe2\xd2\x1d\xf4\xdd\x6b\xfa\x4e\x6f\x03\xe6\x73\x71\x90\xf5\x44\x20\xc6\x4e\xb2\x55\x48\x8e\x08\x98\x7a\xb0\x26\x0f\xdb\x52\x75\x92\x4f\xe7\xe9\xb8\x3c\xa1\xfe\x79\xf5\xb9\x58\x39\xfd\xef\xb1\x72\x72\x9b\x6a\x0b\x52\xcb\x50\xe1\x20\x49\x35\x67\xc8\x10\x4c\x14\xb0\x95\xb6\x21\x90\x96\xcd\x94\x44\x40\x3b\x40\x6b\xa2\x95\xc4\xc0\x00\xc2\xb4\x24\x46\x60\x07\xe3\xd3\x7e\x30\x9e\x63\x3c\xb7\x9f\xd8\x9d\x49\xb4\x2d\xec\x73\x24\xb5\x69\x1e\x59\x30\x4d\xd4\xa6\x1d\xd9\x0c\xd9\xb1\xe8\xac\x68\xd8\xcf\x8f\x16\xc1\x41\xcc\xc2\xf0\xa5\x6b\x52\xaf\xde\x5b\x9e\xba\x6a\x5e\x5c\xc7\xfa\x62\x2f\x9e\xcb\x55\x94\x45\x3c\x7b\xca\x0b\x3f\x41\x6a\x71\x4c\x87\x99\x4d\x29\x9c\x26\x47\xdd\x81\xf1\xda\x7b\xcf\x1f\xf6\xcb\xe9\x82\x3f\xab\xa6\xd2\x45\x06\xa3\x96\x40\xad\x09\x20\xb5\xfe\xae\x51\xa0\x68\x5d\x89\x12\xd9\x79\xce\x49\x1c\x88\x4c\x4c\xd2\xab\xe4\xf2\x28\xb7\xea\x30\x63\x9b\xae\x9a\x29\x85\x4b\xdd\xce\x30\x38\xe7\x6e\x98\x43\x9c\x0e\x6b\x1c\xd7\x7c\x3a\xaf\x81\xba\xca\x32\x72\xc8\xb9\x5a\xed\xf6\x34\x65\x47\xb6\x0c\x4c\xb6\x67\x67\x13\xd6\xc0\xbb\x2d\x26\xef\x8b\x28\x7c\xae\xf4\xd4\xb5\x71\x1d\xf0\x6b\xe3\xf7\x5c\xcb\x95\x87\xe3\x6f\x6b\xe4\xdb\x28\xd9\xbf\x99\xff\x98\x65\xf1\x8d\x6c\xb9\x97\xb1\x7e\xad\xa4\x1e\x4c\xff\xde\xfc\x12\x3c\xf6\xea\x06\x94\x38\x5b\x16\xfd\xdb\x75\xa4\x47\xd3\xff\x9c\x8e\xf4\xaa\xff\xcb\x75\x4f\x9d\xfd\x7d\xce\xdf\xbf\x65\x7e\x9e\xf7\xc4\xdf\x82\x1f\xb1\x36\x7d\xe6\xe4\x1a\x72\x4a\xb0\xb4\x29\xa3\x94\x4b\x0a\xb8\x94\x4c\x19\x4b\xd8\xc4\x30\x20\x85\xd2\x48\x10\x02\x24\xd3\xcc\xa6\xd4\x46\x96\x03\x6d\x6e\x30\x54\x40\x43\x6c\x4b\xf9\x13\x6d\xe1\xfd\x68\xfb\xce\x99\x52\xac\x49\xb1\x1d\xc2\x57\xd1\xe5\x73\x68\x2b\x3c\xd6\xb4\x67\xa9\xe2\xac\x62\x25\x1b\xc8\x6d\xdb\xfe\xac\x51\xd7\x89\x7d\xd3\x66\x8d\xa8\x53\x49\x87\xde\xba\x04\xea\x35\x6f\x5f\x72\xb2\xa5\xc1\x8e\x78\x99\x55\xb7\x34\x73\xb7\xf9\x94\x48\x6d\x73\xf9\x25\xab\x14\x65\x6d\xdd\xc9\x1c\x06\x32\x17\xe5\x37\xbc\x57\xcc\xf7\x9d\x73\x6e\xad\x45\xaf\xb5\x87\xd1\x64\xe9\xfb\xab\x83\x72\x4a\xca\x8c\xe5\x56\x57\x41\x4b\x4e\xe7\x6a\x22\x77\xfd\xe9\xd2\x2a\xd5\xf1\xd8\x94\xff\x20\xda\xc6\x3e\xde\x3e\x1b\xff\x0a\xb4\xfd\xdc\x39\xd2\xb5\xff\xfb\xab\xe5\xd1\xe8\xf3\x85\x5c\x0c\x1f\xa1\xcf\xc5\x22\xf6\xe8\xfe\xfb\x82\x45\xe0\xdf\xd2\x7f\x9f\x3b\x97\x8a\xf5\xdf\xb5\xb7\xe0\xe5\x9b\xf7\xa2\x77\xec\xe3\xa3\xf1\xe8\x6b\xe8\x6d\x5b\x96\xe1\x18\x0b\x81\x2d\x4d\xb9\x82\x92\x38\x8e\x43\x09\x95\x0a\x6a\x5b\x38\xca\xa1\x4a\x23\x85\x08\xb1\x05\x25\x88\x6b\xea\xd8\x18\x53\x0a\x6c\xa2\x28\x66\xf0\x94\xd7\xcc\x7e\xc8\x8e\xbc\x7a\xdd\xc7\xd7\x9d\x3e\xce\x96\xfc\xf4\x68\x55\x23\xf9\xe4\xb0\x0d\x0e\xd5\xee\xa2\xe4\x4e\x2a\x43\x6b\x3b\xc5\x29\x56\xde\xd7\x61\x9b\xa0\xf9\x31\xad\xcb\x67\x07\xdc\xf7\x76\xe4\x0b\xe4\xd7\x32\x6d\x5d\xaf\x99\xdd\x62\xdd\xc1\xc4\xef\x55\x86\x2d\x15\xb0\x11\xf5\x58\x65\x75\x4c\x46\xe5\xb0\x23\x3b\x99\x8e\x9f\xee\xb9\x99\xc6\xca\x1b\x7a\xb3\x2e\x5f\x82\x5c\x27\x5b\xa4\x6a\x39\x5f\xaf\x7c\x2f\x62\x35\x9a\x67\xc6\x4e\x43\x3f\x55\x14\x38\x0c\xca\x4f\x08\x25\xdd\xc4\xb8\x61\x07\x55\xbe\x2e\x65\x87\xf9\x5e\xe7\xd8\x20\xf6\x28\x14\xba\xd6\x6c\x77\xd7\x51\x75\xc3\x73\x75\x3f\xec\x76\x3b\x8d\xc6\x47\xb9\xcc\xe2\xe5\x9d\x1d\x79\xec\x14\xfb\x8b\x3b\xe2\xb6\xac\xeb\xf4\xf0\xf2\xf3\x9b\x76\xc4\x0f\xa4\x7f\x3d\xfe\x9f\xa2\x1f\xd3\x35\x45\xec\xd5\x0d\x3b\xe2\x62\xcc\x60\x8a\x5f\xa7\xf8\xbe\xae\x79\x07\xfd\xb2\x4a\x13\x7a\x07\x7d\xf7\x8a\x7e\x3a\x68\xad\xc2\x62\x75\x97\x1c\x8f\x12\x85\x6e\x37\x4f\xf4\x6c\x95\x0e\x61\xa9\x91\x30\xfd\x9c\x75\xcc\xe6\x7a\xdd\xa9\x9e\x96\x0b\x78\x77\x86\x8f\xd8\x8e\xfc\x0d\x97\xa7\xb7\xe9\x7f\x6a\x47\x6e\x34\xb2\x2d\xdb\x08\x45\xe4\xd3\x06\x9b\x11\x49\x98\x63\x10\x50\xd2\x06\x4c\x52\x00\x81\xa3\xb0\xd4\x1c\x6b\x47\x29\xc5\x95\xa0\x36\x42\x06\x3b\x94\x5a\x0e\x03\x96\xf5\x84\x32\xf1\xfc\xac\x85\x54\x63\xdf\x9f\xd9\x99\x44\xa3\x86\x23\x25\x47\x11\x1d\xee\x0a\xa3\x70\x5d\x22\x09\xd7\x34\xcb\x2d\x15\x69\x45\xe5\x6e\x3a\x3a\xef\x05\xcf\x3a\xe2\xc9\xe4\xf1\xce\x8e\x3c\x76\xca\xf4\x45\x14\x8b\xc7\xd4\xbd\x3b\x46\xbf\x97\xd4\xcb\x28\x78\x6e\xd2\xcf\xf7\x3d\xb6\x0a\x6b\xfe\x4e\xa4\x87\x79\x55\x52\x83\x99\xb7\x81\x24\x7d\x48\x59\x3d\x57\x4e\x46\xc1\x6c\x1a\xe4\xd7\x1b\xda\xf0\xac\x3e\x6f\xa4\xda\xcd\x64\xa4\x11\x21\x6e\x52\xed\x40\xa5\xc6\x67\xdd\x90\xd5\x92\x1b\x6b\xb6\x61\x19\x7b\x40\xda\x02\x18\x79\xba\x25\x2c\x39\x92\xb9\x04\xeb\xe6\xeb\x99\x48\x76\x42\x67\x8f\x72\xbe\x0c\x1b\x7e\xa5\x59\x9d\x57\x47\x0a\xae\xd7\xd3\x72\xb2\xe1\x87\xf9\x71\x6c\x84\x5f\x47\xa5\xd6\xaf\x23\xff\x06\x2a\xc5\xaf\xf6\xba\x01\xb5\x07\xf5\xcb\x78\xbd\x51\xde\x47\x85\x07\xd2\xbf\x37\x27\x6b\xec\x28\xe8\x16\xa9\x3c\xdf\x36\xef\xbf\x1a\xe1\xf3\x5c\xde\x47\xa5\xef\xa1\x7f\xd6\xe1\xbe\x80\x72\x1f\xf1\xfb\xb9\xdb\xe1\x31\xd1\xc6\x36\x0e\x91\x54\x73\xc8\x6c\x8d\x0c\x63\xd8\x68\x6d\xa0\xcd\x04\x45\x0e\x80\x14\x3a\xd4\x01\xe2\x74\xe3\x0e\x30\x14\x10\xe0\x10\x8e\x11\x75\x34\x76\xe0\x33\x8a\xc0\xfb\x51\xe4\x9d\x9d\x66\xeb\xb5\x96\xbe\x73\x96\xf4\x6a\x89\xa1\x42\x26\xdc\x2f\xd0\x71\xce\xb3\xf9\x34\x2c\xc8\x4d\xa1\x25\x96\x01\x2a\x67\xbb\xc7\xc5\x76\x93\x20\xbe\xdd\x6d\x0d\x71\x65\x9a\x8b\x8c\x28\x6d\xd6\x53\x9b\x1e\xa2\x40\x8e\x36\xbb\x52\x5f\xb4\x7c\x18\x2d\x64\x60\x22\x9b\xa8\x59\xad\xd5\x3b\x6c\x96\xc5\x75\xb6\x06\x96\xe3\x72\xf5\x94\x4f\x2b\x6b\xa3\x44\x76\xbf\x99\xcd\xcd\x28\xd1\x6b\x26\xec\x83\x16\x21\xf6\x26\x45\xbf\x2f\xac\x6e\x75\xc0\x06\xad\xb0\x33\xae\xcc\x48\xfe\xb2\x34\xfd\x25\xa8\x10\xdf\x79\x9f\xfe\xfd\x82\xee\xe1\x5e\xff\xf6\xb5\xfe\x7f\x30\xbf\xe9\x3f\xc1\xef\xf7\xa0\xce\x75\xec\xee\x23\xa5\xde\xfd\x06\x94\xba\x97\x5f\xef\x1d\x7e\x3f\x85\x52\x90\x03\x6c\x23\xa2\x6c\x61\x11\x60\x31\x44\x18\xc6\x98\x4b\xa5\x1c\x5b\xdb\x46\x33\x28\x99\xa3\x19\x64\x44\x41\xc1\x29\x10\x16\xd1\x90\x28\x60\x6c\xad\x81\x65\x14\x7a\x46\x29\x74\x3f\x4a\xbd\x13\xd5\xfe\x0e\x4a\xbd\x71\x19\xe4\x2b\x25\xae\xbb\x14\x37\x01\xd0\xfb\xf2\xc6\x19\x28\xe3\xa8\x6a\x4d\xb7\x4a\x5d\x98\xf1\xed\x4c\x6f\x4f\x3a\x9d\x7c\x2b\x9b\xeb\x16\xca\x1d\xdf\x67\xc5\x49\x7b\x19\xcc\x7a\x99\x35\x6c\x50\x9e\x04\x9b\xd0\x6b\x17\xa6\xdb\x6d\x98\xf4\x66\x85\x32\xa9\xcf\xdb\xa3\x01\xec\xa5\x57\xc8\x6a\x87\xa7\x08\xe9\xe2\xa0\x7e\xec\x76\x57\x59\xaf\x50\xe9\xd5\xbd\x65\x05\xe6\x48\x92\x36\x0e\x21\x6d\x44\xaa\x48\x43\x91\xd2\x05\x95\x43\xf6\x20\xc3\x3e\xb2\x79\xfd\x71\x94\xba\x57\xea\x53\xf1\xdf\xbe\xd6\xff\x0f\xe6\x37\xf3\x27\xf8\xfd\x1e\x94\xfa\xcd\x83\xf0\x81\x52\xef\x7e\x03\x4a\xdd\xcb\x6f\xf9\x1d\x7e\x3f\x85\x52\xd8\x30\xc2\x1c\x68\x51\x2e\x80\x2d\x0d\xb4\x05\x91\x48\x0b\xa6\x99\x45\x15\x45\x5a\x68\x0d\x29\xa4\x94\x60\x00\xa0\xe6\x42\x29\x28\xa4\xb6\x85\x01\x50\x4a\x69\x7e\xea\x52\xf8\x7e\x94\xba\x9e\x2e\x9f\x43\xa9\xcb\xa8\xbc\xe5\x39\xfc\x6b\x69\xfe\x9c\xf5\x29\xd7\x55\xb4\x7c\xcc\x65\x46\x2d\x55\x5f\xe7\x66\xc7\x63\x66\x9a\x41\xa3\x82\x9b\xd8\xd4\x33\x69\xbe\x44\xb5\x60\xbd\xc9\x25\x31\x9a\x4f\xeb\xd5\xe3\x18\xe4\xc6\xb3\x6d\xa1\x16\x80\xf0\x70\x94\xf3\xaa\x48\x56\xf4\xa6\x95\x2c\x15\x2b\xc7\x95\x2e\x59\xb5\x42\x85\xaf\xda\x5d\x10\xfd\xf4\x89\x6d\x92\x4e\x98\xf7\x17\x7b\x53\x2b\xd8\xdd\x0c\x25\x34\x10\x9d\xda\xd2\x1e\x86\x93\x5e\xcb\x6b\xd0\x25\x6a\x6c\xab\x3a\xe9\xad\x8e\xb5\xbf\x4e\xa7\xba\x57\xfa\x33\xf1\xdf\xbe\x52\x1e\x8d\x56\xde\x9f\xe0\xf7\x7b\xd0\x2a\x26\xb6\x0f\x97\x7e\xf7\x1b\xd0\xea\x41\xfc\xbe\xaa\x59\x7c\x0a\xad\x1c\x02\x05\x87\x44\x5b\x1a\x73\x0e\x30\xe3\x10\x2a\x09\x18\x12\x52\x50\x4c\xb8\x66\x5a\x70\x88\xb4\x6d\x6c\x8c\xa0\x6d\xd9\x06\x09\x06\x2c\x6c\x63\xcd\x04\xe5\x44\x3c\xa3\x15\xb9\x1f\xad\xb2\xaf\xce\x92\x53\xf9\x08\xad\xae\x9f\x63\xe8\x75\x91\xea\xeb\x12\xd7\xb1\xa6\xd9\xa3\xa4\xe3\xc5\x4e\x1f\xf2\x25\xdc\x3c\xe6\x33\x62\x99\x70\xfc\x56\xd5\xf7\x57\xa0\x43\x46\xb2\x4b\x86\x8d\xca\x0e\x67\xf4\x6c\x7a\x0c\xdc\x7a\xa4\x47\x61\x81\x55\xaa\xd3\x64\x77\xbc\x94\xb3\x34\xcb\x6d\x95\x33\x90\x24\xaa\xec\x13\x60\xc0\x18\xf0\xf7\x85\x56\xe1\x09\x59\x06\x8d\xcd\xb0\xdb\x27\xc5\x59\x6b\x32\x2c\x49\xbf\xb9\x2d\xf5\xb2\xcb\x39\xa5\xf9\x84\x41\xc1\x41\xe6\x1c\x7b\x1c\xd4\x2d\x2a\x78\xf4\xd1\xfd\xce\x7f\x1c\xb5\x1e\x84\x02\x6f\x7a\x23\x3f\x1a\xb5\xb2\x7f\x82\xdf\xef\x41\xad\xeb\xf9\xfd\x48\x14\x70\xbf\x01\xb5\x1e\xc6\x6f\xfa\x77\x9e\x3f\xe7\x87\xe6\x20\x68\x0c\xb5\x2d\x87\x48\xc1\xb4\xa0\x06\x48\x2c\x6c\x6c\x19\xaa\x2d\x84\x04\x67\x82\x58\x0e\x37\x36\x24\x46\x18\x68\x21\x4c\x99\x0d\x19\xa7\x98\x03\xc6\x8c\xf9\xd7\x3f\x3f\x70\x3c\xa6\xbf\x36\x9e\x54\x0b\x9b\x25\xc9\x26\x27\xd5\x7d\xb3\x90\x11\x09\x4b\x94\x4b\xe9\x82\x9f\xa9\x47\x68\xa2\x6b\xb0\x9c\xf2\x32\xed\x62\xa3\x56\x3b\x1d\x20\x9c\x24\x34\x1d\xba\xef\xef\x04\xcf\x1d\xbe\x80\x74\xd5\x92\x91\x61\xc4\x47\x13\xf7\xb8\x98\x0d\xa4\xbb\x5f\xcf\x75\xa2\x35\xe4\xb3\x6d\x71\x97\x99\xa7\xb7\xd0\x5a\x53\x91\x3d\xfb\xf2\x5f\xf2\x99\xc5\x04\xb0\x35\xcf\x3b\xc9\xfe\xa9\x7a\x10\x68\x67\xda\xdf\xb9\x95\x23\x36\xa5\x45\x98\x1e\xec\xea\xa3\x3a\xd9\x4f\xfb\x54\xe6\x17\x2d\xd2\x49\xad\x47\xe3\x5e\x3f\x8b\x37\xfd\xf6\x60\xe8\x14\xe8\xb8\x39\x77\xd3\xdb\xe9\xb6\xbb\x9a\xa1\x7c\xfb\x30\xa1\x75\xa3\x68\x41\x05\x1d\x27\x90\xa5\x71\xb6\xd9\x05\x65\x3f\xfd\x9c\x07\x7e\x91\x29\x25\x7b\xbb\x81\xdc\xe8\xf5\x36\xe7\xad\xad\x0c\xc4\x99\x2a\x9d\x80\x74\x91\x1d\xba\xb6\xd3\xaf\xf8\xd1\x61\x32\x69\x0c\x26\xc6\x79\x61\xed\x75\xb4\x92\xbf\x8e\xe8\x1b\x68\x55\x8c\x2d\x7f\x89\x46\x88\x27\x74\x3b\xee\x26\x42\x3b\x5b\x1e\x57\xb6\x76\x7d\xb5\x5b\xe5\xc7\x61\xc5\xef\xea\x1e\x4e\x86\x3b\x2f\xa8\xbc\xf4\x43\x2c\x9f\xce\x79\x1c\xae\x75\xd5\x4f\xe9\x20\x0f\xa4\x7f\x3d\x0f\xbe\xaa\x03\xcd\x2e\xfd\x93\x5e\x8a\xc5\xbe\x7e\x68\x27\xd1\x58\xe8\x30\x53\x5d\x0c\xb3\xbb\x8c\x0f\x23\x9b\x34\x0a\x8b\xa1\xb3\x74\xab\xe3\x62\x52\x34\xd0\x7c\xd5\x3d\xc7\x64\x5c\xf2\x5d\x7d\x71\x1e\x3f\xb3\x72\x15\x93\x71\x5d\x4e\x6d\x98\xff\x3d\xfc\xc5\xfb\xf4\x73\x3a\x8e\x94\xd2\x92\xd0\x91\x48\x33\xc6\x29\x92\x92\x30\xc1\x95\xad\xb4\x44\x86\x48\x42\xb5\x40\x0c\x43\x49\xb0\xa3\xa5\xc2\x96\x90\x88\x49\x62\x31\x45\x85\xe5\x00\x85\x9f\x75\x1c\x7a\xbf\x8e\xe3\xbf\x3a\x0b\x4e\xe5\x53\x3a\xce\xab\xb3\x28\x0d\xdb\x38\xec\x80\x9d\xf0\x5b\x9b\x10\xb2\xf5\x6f\xdf\x8a\xed\xd0\xc6\xf9\x0c\x4d\x8f\x53\xb9\x1a\xd3\xb3\x6d\x29\xe8\x8e\xcb\x41\xc1\x0e\xc6\x1e\xad\x74\xa2\x89\x2a\xfa\x93\xd2\x9e\x0f\xf3\x20\xe4\xed\x62\x8f\x17\x0b\xfb\x23\xa9\x39\x64\x9f\xa0\xbb\xe3\x3c\xdc\xef\xc5\x8a\xa6\x14\x8c\xf0\x62\xdf\x1e\x35\xec\x00\xd1\xcc\xae\x3e\x5c\xa4\x9e\x25\x7c\x54\xde\x95\x8b\xe5\xda\x7c\x32\xa9\x95\x3b\x62\xd9\x45\xab\x1a\xd8\x77\xe8\x31\x4a\xef\x5b\xa3\x15\x70\xb7\x99\x69\x36\xad\xf8\xae\xaf\x3e\xf2\x99\xff\xe3\xba\xce\xe3\x74\x87\xfe\xdb\xf9\x45\x1f\xc8\xaf\xff\x27\xf8\xfd\x1e\x5d\x27\xe6\x9f\xf3\x70\xdd\xc1\xfd\x06\x5d\xe7\x21\xfc\x96\x7a\xdd\xc1\x40\x74\x53\xab\x5e\x83\x8c\x04\xb4\xee\xd1\x7d\x1c\x04\x2d\x60\x01\xec\x38\x96\x65\x23\xe3\x68\x63\xa8\x81\x80\x6b\x9b\x6a\x02\x24\x94\xd8\x70\x43\x10\xd2\x84\x58\x5a\x2a\x24\x21\xb4\x88\xc0\x0e\xb7\x18\x64\xd4\xe2\xcf\x68\x66\xdf\x8f\x66\xb9\x57\x67\xcd\xa9\xdc\x84\x66\xf1\x5e\x76\xa6\x6a\x5a\x6a\x0f\x53\xb5\xe2\xa2\x56\x70\x65\xa3\x32\xe8\x1d\x25\x9c\xe4\x6c\x13\xd8\xf5\xf5\xd1\x3f\x64\x5d\x67\x92\x1d\x97\x9b\xe3\xcb\xa8\x9f\x4b\x0c\xed\xd6\xe1\x3a\x5a\x2d\xca\x75\x1d\xcc\x40\x6b\xd6\x82\x7e\x38\x12\x68\x40\xe0\xce\x2b\x6e\xa7\xb3\x4d\xba\x51\xa5\xa5\xa0\x0a\xf6\x93\xa4\xbb\x1c\xac\x83\x6a\x72\xd5\x25\x0b\x44\x1a\x6b\x27\x35\xe2\x61\xc7\x1b\x66\xa7\xf3\x44\x23\x18\xc1\x6c\x2f\xed\xb0\xea\x6a\x97\x8e\x94\xbb\x3b\x23\x52\xff\xe0\x06\xd9\x7c\xdf\x95\xc9\xd1\x96\x6d\xf3\x9c\x40\x52\xdf\xad\x9a\x94\x35\x6b\x51\xab\x7a\x6c\xee\x86\x29\xd1\x28\xa6\x1b\x51\xff\xa3\x6c\x6b\x7f\x1c\xed\x1e\x82\x1e\xbf\xaf\x3a\x7f\x0c\xfd\x72\xff\x0e\xfe\xbf\x07\x0d\x63\x55\x7d\x3b\xba\xb8\xdf\x80\x8e\xb1\xd3\x89\x6f\xe1\xff\x3c\xbe\xa6\x80\x7a\x07\x10\xf6\x9b\x87\xc5\xe8\x10\xe4\xbc\x51\xd8\x91\xa8\x32\x1f\x24\x90\x55\xb5\x84\xdc\xaf\x06\x41\xad\x30\xe8\x65\x3c\x12\x1b\xaf\x73\xf9\x0c\xa2\x52\x66\x28\xe6\xda\x32\x8e\xc5\x84\xe5\x30\x46\x25\x30\x88\x61\xad\x6d\x46\x2c\x0b\x70\x6c\x69\xad\x29\x3b\x79\x4f\x11\x2e\x28\x12\x46\x01\x86\xa0\x42\x9c\x1b\x71\xb2\xd8\xb3\x87\xf8\x50\x5d\x4f\xbf\x6f\xb7\xd8\x0f\x29\x5b\x64\x72\x32\x6a\x37\x98\xeb\xf8\x2e\x5f\x1c\xc6\x74\x96\xdf\x0f\xcb\x69\x58\xec\x76\xbd\xa9\xce\x0e\x47\x66\x9f\x23\xed\x43\xc9\xcf\x07\xbd\x8a\xbb\xaf\x56\x65\xba\x36\xcf\xf3\xa8\x45\x6b\x60\x1e\xd6\xbb\xa8\x88\xab\x51\x31\xd7\xc3\x83\xbc\xda\x17\x8e\x19\x9a\x7a\xb6\xd8\x73\xd3\x2b\xe2\x5a\xbf\xb0\xca\x4f\x3a\x3b\x53\x6d\xd9\xd3\x28\x70\x79\x50\xb5\x27\x93\xf5\x7a\xda\x59\x86\x76\xb2\x3e\x3e\x58\x74\x69\x3f\xc8\x62\x1f\xdb\xcd\xdd\x80\x30\x7d\xec\x5e\x21\x4c\x2c\x3a\xe6\xc1\x08\xf3\xec\x0f\x7d\x50\x50\xae\x3b\x79\xcf\x59\xcc\x9c\x71\x6a\x9f\x87\xd9\x69\xe5\xc8\x43\xb3\x66\x99\xb5\x8f\xca\x22\xdf\xca\x1c\xc2\x4e\xb0\xaf\x06\x57\xe3\x7e\x99\x07\xa9\x58\x8c\xe1\x17\xe7\xdd\x29\x22\xfe\xec\x1b\x2a\x2e\x93\x27\xe6\xef\xfc\x55\x5f\xbd\x93\x44\xaf\x23\xed\x67\xd7\xd2\xdf\x47\xa5\x49\x65\x2b\x82\xa7\x6a\xf3\x31\x5b\xec\x1d\x2b\xfb\xe5\x39\xfd\x42\xef\x7f\x66\xfb\xd5\xe5\xf9\x16\x04\xcf\x9c\xb0\xf5\x8c\xe0\x7d\xf7\xff\x03\x08\x2e\x91\x54\xd8\xe6\x08\x00\x6d\x41\x82\x31\x35\x36\x94\xc4\xc2\x90\x0a\x22\x08\xc6\x58\x13\x48\x01\x46\xc0\x48\x62\x53\x4b\x3b\x9a\xd8\x98\xda\xd2\xb1\xa5\xc2\xcc\xc8\x7f\xfd\xf3\x03\x59\x0f\x41\xf0\xc2\xd5\x70\xfd\x75\x3a\xb1\xdd\x2b\x17\x17\x8e\xb5\x91\x41\x63\x9d\x20\x64\x91\xa0\xed\x60\xe9\xa3\xdc\x72\x4b\x8f\xb9\x75\xb5\xb4\x1b\x1b\x77\xbf\x5f\xf5\x54\x58\xcb\x6f\xc6\xf6\xbe\xb7\xea\x8b\xc3\xa0\x9a\x6e\x77\x47\x03\x5d\x0d\xd8\xa1\xef\x76\xc0\x76\x7d\x10\xaa\xeb\x63\xe0\xee\x1b\xd9\xfa\xfa\xe7\x3d\x92\x3b\x07\xe4\x66\xfb\x5c\xbf\xd0\xaa\x6f\xe5\xaa\xe5\x15\x96\x1d\x50\xf7\x48\x23\x6c\x95\xcd\xbe\x1b\x56\x47\x91\xf2\x4d\xde\x54\x1a\xe6\x62\x68\xba\x0b\xf1\x63\x3a\xce\x2d\x88\x6f\xdc\xab\x95\xf7\x3f\x02\xf1\x63\xa6\xb1\x5b\x10\x2f\x77\xea\xfb\x33\xe2\x5d\x47\x84\xfd\xbd\x88\x17\x43\xfc\xff\x91\xed\xd7\x97\xe7\x5b\x10\x3f\x75\x32\xd8\x9f\x11\xff\x3a\x22\xef\x3f\x12\xf1\x21\x63\x4a\x52\x8a\x80\x22\x0e\xe1\x84\x70\x8d\xa8\x05\x0d\xa5\x96\x96\x44\x10\x6e\x59\x8e\xa3\x20\xe6\x00\x0b\x84\xb1\xb4\xa1\x96\x52\x42\x4e\xa1\x62\xdc\x61\xc8\x79\x42\x7c\xf0\x10\xc4\xbf\xce\x5d\xfe\x6f\x44\xfc\x18\xbd\x18\xe2\x5b\xbb\xe5\x50\x2f\xa7\x8d\x65\x82\xd4\x60\x90\xd6\xb3\x29\xd8\x8c\xc8\x64\x59\x2d\x55\x9a\xd5\x65\x2d\xac\xd5\x86\x64\x39\x94\xa5\x4d\x3f\x39\x5d\x6a\xaf\x35\x58\xf5\x1b\xc9\x31\x8a\x5c\x56\xf0\xe7\x6d\xa1\x12\x6b\x2e\xcb\x5e\x71\xd2\x2d\x4e\xb2\xfb\x5c\xda\x2b\x8f\x3a\xb5\xd4\x33\xe2\x17\x7b\xc8\x0f\xf7\x20\xec\x44\x9d\x4d\xa5\x5f\x82\x61\x75\x2f\x3a\xfd\x74\xde\x1a\xcf\x5a\x2b\xab\x99\x2b\xad\x8e\xe9\xf9\x10\xf2\x61\xf3\xa3\x6c\xd0\x9f\x44\xfc\xd8\x09\xc5\x2d\x88\xef\xbb\x57\x2b\xf5\x7f\x04\xe2\x5f\x4e\xdb\x6e\x42\xbc\x73\x20\xf1\x19\xf1\x2e\x12\xfa\xb7\x23\x5e\x0c\xf1\xff\x47\xb6\xdf\x5c\x9e\x6f\x41\xfc\xb3\xe1\xfa\x8c\xf8\x33\xf7\x2f\x44\xfc\xb8\xbf\xcf\x67\x10\x1f\x28\xdb\x41\x48\x62\xc3\xb9\xb0\x1c\x4d\x21\xb5\x18\x97\x06\xd9\x00\x2b\x20\xb8\xd0\xd0\xa6\x10\x70\x28\x10\x74\xb4\x32\xc8\xc6\x94\x68\xc6\x38\x37\xc8\x61\x92\x8b\x7f\xfd\xf3\x83\xc4\x11\x3f\xe3\x0d\xca\xed\x4c\x6e\x18\x76\xd5\x91\x1c\x9d\xc8\x46\x5d\xff\xb0\x0c\x60\xbd\xe2\x90\x5e\xbd\x03\x36\xa5\x55\xba\x12\x6c\x26\x66\x75\x02\xac\xde\x8b\xcc\xbf\xe7\xa9\x74\xea\xe9\xac\x98\x64\xd7\xbd\xa6\xf7\xcb\xfb\x54\xc6\x3b\x94\x8f\x31\xf1\x6e\xd0\x5e\x62\x3f\x3e\xbd\xce\x77\xf0\x61\x99\x55\x63\x4a\x4d\x3f\x93\x19\x2e\xed\x30\xeb\x36\x3d\x05\xa3\xa8\xba\x11\xc7\x7d\x72\xc8\x0b\xb2\x0c\x77\xe3\x2c\x9a\x6d\x16\x85\xa0\x92\x70\x8f\xc3\x03\x5c\xb6\xd7\x45\xe2\xce\x0e\xf3\xc1\x6c\x3c\xdc\xad\xc2\xee\x36\xd7\xa0\xad\x2c\x2b\x0f\xd4\xb4\xe5\x66\x9e\xad\x30\xb9\x6e\xff\xb8\xaf\x2d\x96\xe9\x6a\x38\x49\xf3\xe3\x51\xb6\x2c\xb1\x48\x14\x60\x9e\x17\x8f\xc5\x2a\x3a\x2a\xbf\x3b\x9f\x2c\x1c\x3c\x2d\x7e\xe4\x81\x14\xbf\x2b\xed\x6d\x84\x2e\xc7\x10\xba\xdc\xf0\xf6\xad\xb2\x2e\x0c\x14\xaa\x4d\x6b\x87\xee\x56\x49\x51\x5c\x67\x7a\x1e\x3a\x46\x51\x56\x74\xf0\x66\x97\x07\xe9\x9f\xfd\x70\x3e\x53\xcf\x5c\xfa\xf9\xdc\x81\x1f\xdd\x91\x73\xc5\xeb\x03\xe9\x67\x6f\xa1\x1f\xb3\x13\x9f\xef\x50\x3a\xf3\xb3\xcf\x37\xb3\xc1\x1e\x4f\xb2\x48\x26\x64\xb2\xa0\x19\xd8\xaf\x16\x56\x65\xdc\x34\xd5\x4e\xab\xbd\x16\x43\xdb\x1d\xb5\x4a\xf3\xa9\x7d\x8e\xaa\x6f\x6f\xc2\x0e\x88\xca\x2f\xd3\x25\xd8\x97\x47\xe5\xeb\x74\x52\xb1\xf6\x96\x6f\x69\xef\xf5\x3c\xf5\xca\xcd\xfc\x31\x1e\xac\xfc\x09\xa9\x14\x82\xa9\xb3\x2f\x33\x30\x5c\x13\x0b\x10\x86\xb1\x65\x29\xc9\x2c\x20\x6d\x4b\x61\x1b\x3b\x8c\x53\x1b\x50\xa4\x20\x47\xd8\x30\x81\x34\x82\x9c\x41\xc0\x89\xa5\x4e\x3b\x6f\xf8\x10\x3d\xec\x9d\x3b\x7c\xae\xf5\xb0\x37\x6e\x38\x7a\xbf\xc4\xfd\x05\xb3\x87\x75\x6b\x3f\xda\x57\xb0\x98\x16\xca\x4b\xaf\x30\x6e\xcc\x97\x53\xaf\xc1\x86\xfe\xbc\xba\x77\x57\x95\x6a\x36\x5c\x36\x6b\xc9\xfd\xda\x6f\x8f\x93\xb5\x1e\x9c\x35\x07\xad\x4e\x22\x39\x28\xd5\xf3\xe3\xc4\x20\x0c\xbd\x7e\x16\x02\xdf\x10\xa7\x92\xe4\xa2\x50\xcc\xce\x17\xd1\x2a\xff\x24\x51\x1b\x02\x26\xed\x94\x67\x96\x91\xdb\x8e\x3c\xb7\x72\x08\x72\xb9\x4c\x4b\xe5\x27\x13\xd9\xa9\x35\x66\x56\x42\xd4\x46\xc5\x79\x9e\xb7\x2a\x8f\xd9\x41\xc7\x0d\xce\x37\xe8\x53\xa6\xe5\x5e\xe9\xc1\x7f\x99\x3e\x95\x7a\x4d\x9f\x8a\x5b\xc9\x6f\xd0\x27\xd0\xc9\xed\xe7\x59\x9f\xb8\xce\x62\xfa\x57\xe8\x13\xd7\xa9\x35\xae\xf4\xa9\xff\x99\xed\x8f\xe1\xf8\x0d\xfa\x14\x3b\xdf\xd1\x74\xd6\xa7\x62\x91\x92\xb7\x47\xee\xfd\xbb\xf5\x29\x2a\x90\x23\x95\x22\x12\xd8\xca\x40\x0b\x20\x45\x38\xd1\x10\x00\x89\x05\x82\x10\x38\x82\x0a\xcd\x80\x05\x09\x02\x54\x00\xce\x05\x74\x84\x04\x10\x01\x4a\x01\x51\xe4\x19\xb9\x1f\x10\xf3\xfb\xce\xbd\x48\x7f\x7a\x07\x1d\x2b\xb1\x1d\x74\x55\x58\x0b\xbf\x33\x1d\x83\xce\xb2\xd1\xea\x36\xe8\x30\xc8\xce\x24\xcf\x57\xc3\x64\x2a\x33\xe9\xfb\xe0\xd8\x9c\x44\xb5\x43\xdf\x12\xb3\xc6\x50\xa1\x51\x33\x55\x44\x47\xd6\x72\x12\xf9\x9a\xef\x56\xbd\x52\xa5\xd4\x1f\x07\xb9\x75\xdf\x0f\xb3\xfd\xac\x5f\x1e\xce\xc0\x3e\x4c\xbc\x9c\x92\x1d\x47\x15\x20\xb3\xcc\xe9\x7b\xac\x13\xe1\x41\xce\x5b\x1d\xf2\xe1\x7a\x49\xeb\x6e\x54\xee\xac\xf8\xe2\x30\x74\xbc\x51\x76\x92\x55\x97\xbe\xf8\x4b\x10\xbf\x74\xa9\xff\xf6\xd8\xdb\xff\x34\xc4\x8f\xa9\xa6\xb7\x21\xde\xa9\xfc\xe7\x22\xfe\x7f\x50\xfb\x1f\x88\xf8\xbb\x5f\xe7\xd3\xc3\x11\x3f\x5e\x3e\xe5\x05\x46\x95\xed\x18\x60\x1b\x6e\x38\x44\xc0\x56\x94\x1b\x6a\x33\x0d\x34\xc1\xd0\x32\x88\x00\x68\xd9\xc6\x50\x20\x95\x31\x10\x48\x6a\x59\x9a\x3b\xc6\x21\x80\x10\x81\xf4\xe9\xbe\x29\x84\x1e\x92\x5d\xaa\x76\xc5\xfe\xef\xd9\xa5\x16\x4d\x45\xfb\x51\xa0\x8e\x6b\xc7\x86\xc0\xf6\x69\x3e\x94\xa8\x8f\x52\xe1\xbe\xdc\xef\xf8\xb5\x79\xc6\xd4\x6b\x83\x03\xe8\x89\x73\xf6\xf3\xb7\xb3\x4b\xa5\x13\x93\x7e\x31\x7f\xec\x2d\x4c\x14\xe4\x8b\xf9\x45\x67\xca\x4a\x4b\x39\xdc\x1c\xbc\xbd\xdd\xad\x99\xae\xdf\x1a\x6c\x0f\x01\x5a\xe5\x83\xd4\xd4\x57\xd5\xce\x60\x5c\x5d\x13\xbb\xd5\x4a\xda\xa4\xb4\x6e\x1d\xf0\x52\xcb\x66\x62\xb4\x0c\xa1\xcc\x15\x7b\x23\x84\xe6\x9b\x8d\x76\x4e\x77\x67\x8a\xee\x68\x6f\x68\x62\x58\xee\x82\x05\x9b\x4e\x07\xa4\x95\xaf\x55\xea\xf9\x5c\x12\xe6\x2a\xc9\x43\xce\x77\xab\x8b\xda\xa0\xde\x16\xb5\xc5\x65\xf7\x77\x4f\x76\xa9\x7c\x0c\xa5\xbe\x98\xdd\xa9\x99\x0b\x36\x3a\x76\xcc\x78\x53\x76\xa7\x07\xd2\xbf\x1e\xff\x4f\xd1\x8f\x65\x97\x8a\xdd\xc6\x71\x4b\x76\x27\x1c\x9b\x2b\xce\xeb\x14\xdf\x8f\xe6\xbb\x83\x7e\x26\x9f\xb4\x67\x77\xd0\x77\xaf\xe8\xa7\x1b\xf3\x0a\x5a\xa4\xdb\x29\x54\x45\xc7\xad\xe8\xae\xc4\x6c\x30\x38\xea\x8c\xcc\x8e\xbc\x43\x2a\x0c\x13\xe3\x0a\xf3\x12\xb9\x52\x3b\x3a\xdf\xe5\x77\xc9\x2e\x95\x7f\x8f\xc6\xed\xb1\xcc\xdc\x22\x8a\x73\x0d\x91\x25\x80\xb1\x31\xc2\x36\xd6\x0e\xe2\x82\x2a\xec\x3c\xa9\x8d\xc0\x20\x63\xdb\x94\x02\x6e\x2b\x07\x10\x63\x53\x68\x49\xad\x21\x67\x8e\xc5\xb0\x63\x3d\xa1\x4c\xfc\xfe\x9f\x42\x71\x8c\xf4\xac\xd0\xca\x49\xdb\x54\xd9\x5a\xef\x37\x43\x3f\x57\xc1\x95\x5d\xa7\xdc\xcc\x67\x07\x23\x1a\x20\xab\x89\xe5\xba\xd3\x39\xad\x38\xe7\x38\x9b\x57\xf3\xc3\xc6\x9e\xcf\x37\x43\xc5\x32\x64\x7e\x7e\x14\xd1\xd5\xcd\x49\xa7\xe2\xbd\xd0\x0f\x52\x61\x6b\xda\x46\x79\x50\x45\xc4\x73\x0f\xfc\x60\x4d\x56\xa3\x3e\x2a\x1f\x7b\x86\x4f\xd3\xeb\x2d\x8f\x8a\xbe\x9f\x60\x78\xae\x69\x7d\xdf\xf7\x92\xe3\xf4\x18\x8e\x9b\x4d\x96\xeb\xf6\xbb\x23\xb1\xe4\x8b\x64\x32\x48\x5a\xdb\x82\xe5\xce\xd2\x6e\xb7\xa1\x9b\xc0\x69\x8d\xfa\xe3\x13\x2a\xcc\xb2\x70\x81\x47\xe9\x3d\xb3\x2a\xb9\xe6\xac\xba\x14\xad\x7e\x17\xed\x9d\xa4\x82\x21\x37\x61\xd6\xa8\x61\xa6\x8a\x17\x13\x37\x8a\x29\x6c\xaf\x23\x4e\xf8\xeb\xa8\xbe\x85\x38\xf1\xb3\xf2\xaf\x23\xf2\xf9\xbe\x98\xf4\x9b\x52\xfd\x31\xe2\x3c\x8e\xfe\xbd\xb9\x93\x63\xa9\xf1\x6e\x91\xb8\xf3\xfd\x12\xf9\xe0\x1d\x9a\xef\x23\xce\xa3\xe9\x7f\x94\xbd\xf4\xba\x0c\x2f\xdf\xf9\x2a\xfd\x58\xee\xef\x9b\xf4\xbe\xbb\x63\x79\xb0\x86\x18\x43\xa3\xa1\x76\x94\x72\x24\x75\x28\xb1\x2d\xc9\x0c\x41\x36\x97\x02\x1a\x62\x01\x82\x38\x73\x1c\x64\x1c\x05\x18\x13\xd4\x60\xdb\x02\x5a\x40\x87\x09\xc4\x4f\x88\x44\x1e\x82\x48\xef\x64\xaa\xba\x03\x91\xce\xda\xe2\xe5\xc6\xf7\xf3\xac\xb9\x20\x52\x65\xd3\x5f\x24\x52\xc8\x9a\x2b\x71\xdc\xb6\xdb\xfe\x91\xd5\xab\x28\x91\x29\xeb\xba\xea\x2c\x64\xb2\xbb\x52\xb0\x12\xaa\x3e\xad\xa4\x36\xb6\x5e\x6d\xab\xaa\xd9\x09\xd0\x74\x3b\x4b\x79\x85\x83\xf6\x8f\x8e\xe7\xb1\x63\x3d\x18\x36\x86\x8b\xd9\x72\x97\x8b\x26\x09\xd7\x37\xbb\xfc\x39\xc3\xc2\xb0\x9e\x4e\x0e\x69\x05\x96\xb9\xdf\xde\x1d\x4c\x0b\x2d\x4b\x25\xe0\xb6\xab\xf3\x61\x26\x51\xf1\x7b\xeb\x6d\x7d\x97\x5b\x16\x0b\x2b\x7a\xe1\xec\x2e\x44\x8a\x9f\xe5\xde\x80\x08\x55\xf7\x0a\x11\x7e\x9b\xf5\x1f\x20\xd2\xe3\xe8\xdf\x74\x4f\x65\xec\xa4\xe0\x2f\x95\xc8\x97\xe7\xd1\xbf\x8f\xbf\x73\xa9\x92\x65\xf1\xf7\x3e\xfd\x1c\x62\x60\x29\xa8\x74\xb4\xc2\x4a\x68\x49\x30\xe4\x06\x70\xae\x20\xb2\x2c\xa0\x14\x53\xd4\x76\xa4\x01\x4c\x3a\x12\x53\xc0\x29\x41\xda\xa6\x90\xdb\x90\x20\xad\xb4\x3a\xe5\xb6\x43\xf4\x21\x88\xf1\x4e\xac\xf0\x37\x22\x46\x35\xa1\x9b\xf9\x5a\xb7\xd5\x1b\x27\x7c\x77\x61\x57\x69\xce\x51\x20\xd9\x69\x12\xb1\x99\x59\x7c\x3e\x2a\x0d\xfc\xfe\x3a\xa5\x1b\x87\x08\xd9\xad\xb6\x5d\x4a\xcf\x7b\x26\x3d\xa5\x99\xb0\x2d\xed\x28\xa8\xb6\x2b\x9d\x41\x4a\xda\xc5\x8c\x6b\x57\xb4\x83\xdc\x3a\x06\xe3\xd0\x3d\xeb\x30\xf5\xf2\x7a\x35\x5a\x37\x85\x9d\x0d\x16\x0e\x5f\x36\xca\x6e\x4e\x4d\x3b\xba\xec\xf4\x0e\xe5\xfa\x60\x1e\xd5\xa6\xe1\x42\x46\xb5\xb5\x1d\x7e\xe4\xe1\xfd\x39\xc4\x88\x3b\x57\xde\x20\xb1\xcf\xf1\xba\xb7\x4b\xec\x03\xe9\x67\x6e\xa1\x1f\x43\x8c\xbf\x54\x22\x5f\x9e\xc7\x7f\x27\x7f\x9f\x42\x0c\x02\x91\xe3\x70\xe6\x68\xac\x11\x95\xc0\xb6\x31\xe3\x0c\x23\x1b\x43\xdb\x92\x92\x12\xa1\x84\x0d\x6d\x20\x05\x01\xd0\xb6\x8d\xc5\x08\xe7\xdc\x31\x40\x72\xa6\x10\x84\xa7\x33\x4d\xfb\x21\x88\xf1\x4e\x06\xa7\xbb\x11\xe3\x7a\xd4\x2e\x88\x51\x26\x8d\xf4\xac\x03\x2c\xaf\x30\x2b\xa6\x52\xbb\x5d\x79\x63\xb5\x77\x09\x93\x10\x81\xe5\x8f\x02\x96\xad\xd2\x1d\x8a\x58\x90\xcc\xe6\xe5\x7a\xe2\xd8\x10\x3b\x6b\x36\xea\x36\x45\x64\xd5\x15\xae\xf4\x7d\x39\x9b\x58\x87\x99\xbf\xef\xcb\xc2\xd0\xab\xef\x7a\x7e\xaa\xdc\x0d\xe4\x49\xaa\x5b\xa3\x61\x71\x52\x6d\x8e\x44\x4a\x59\x93\x79\x80\x82\x69\xa6\xdc\x13\x10\x15\xf6\x07\xe0\x4d\xcd\x2e\xe2\x8b\x99\x23\x17\xaa\x3c\xfc\xe8\x7c\xf3\x93\x88\x11\x8f\x09\xb9\x41\x62\xa5\x7b\x9f\xc4\x3e\x90\xbe\x77\x0b\xfd\x73\x87\x9d\x10\xe3\x2f\x95\x48\xf7\xd7\x3b\x18\xbe\x28\x2b\xf1\x1c\xff\xb7\x9c\x37\xff\xbb\xc7\xe7\xe1\xf4\x7f\x2b\xaf\xfe\xf9\x53\xb6\x1f\xa3\x85\x63\x39\x16\x81\x50\x0b\x6e\x03\xc9\x11\x74\x20\x24\x42\x48\x63\x23\x60\x41\xa6\x95\x34\x1c\x12\x2c\x1d\x68\x4b\x4c\x84\x26\x8e\x25\xb8\x66\x44\xda\x10\x9d\x2c\xcc\xce\x43\x2c\xcc\xd7\x7d\xfa\xdb\x7e\xf7\x8b\x39\x56\xde\xb1\x30\x67\xa6\xc7\x6d\xa5\xef\x85\x29\xb0\x41\x29\xbf\x99\xcb\x66\xa2\x7c\x67\x3a\xb6\xfc\xea\xc2\x4e\x95\xe7\x46\x1e\xf4\xae\x50\x5f\x15\x8a\x2c\x6a\xe2\x5c\xb3\xec\xed\xca\x01\x1c\x35\x0a\x87\x65\x67\x92\xb2\xf7\x2a\xb5\xcf\x15\x79\xa5\xdc\x0d\xb8\x9b\xdf\xd3\x74\x63\x50\xea\xce\x83\xdc\x13\x3a\x2d\x13\x1b\xb7\x9f\xcd\x6f\x36\xd3\x5c\xb1\x3c\xcb\x47\x1b\x87\x1d\xbd\xf4\x74\x7d\xa4\x85\xd6\x21\x6d\xbc\x4e\xa2\xe3\x6e\x36\x91\x94\xa3\xc7\x58\x98\x0b\x31\x7b\xcb\x17\x2d\xbc\xc1\xa1\x96\x20\xb1\xab\xeb\x6f\xb2\xf0\x3e\x90\xfe\x1b\xd6\x8c\x4f\xdb\x7b\x26\xb1\x57\x37\x58\x78\x23\x79\x55\xf3\xef\xe5\x7d\x7b\xcf\xed\xf4\x67\xc4\x72\x58\xfb\x0e\xfa\xee\x15\xfd\xaf\xe6\x6e\xb9\x58\x98\x0b\xef\x65\xaf\xbd\xdd\xc2\x0c\x11\xc1\xca\x38\x16\x04\x00\x60\xe0\x70\xc6\x98\x46\x40\x42\x46\xb1\x02\x54\x38\xda\x68\x47\x63\x06\x85\xad\x6c\xac\x34\xc1\x0c\x31\x07\x12\xc0\x2c\x0d\x2c\x0c\xcc\x33\xca\xc0\xfb\x51\xa6\x7e\xc5\xfe\x6f\x7d\x7d\x43\x86\xa0\xb7\x50\x26\x43\x3b\x5b\x37\x2c\x81\xdd\x30\x5a\x29\xfb\x18\x66\x0a\x6b\x6b\x98\xad\x26\xa3\xf9\x3c\xa0\x7e\xa1\x30\x4d\xba\x4e\xab\x10\xb1\x51\x73\x54\xae\x54\x47\x9b\xa0\x96\x5a\xbb\xb6\x69\xec\xeb\xa4\xd8\xb4\x52\xbd\x92\x58\x07\x07\x67\x91\x8d\x0e\x2b\x2b\x4f\x4d\x53\x27\xe5\xee\x94\x79\x3c\x0f\x9a\xa6\x9f\xf6\xea\x41\xb8\xe8\x4d\x47\xb9\x74\x34\xb6\x3a\x9b\x6a\x72\x46\x0a\x45\x04\x2c\x14\x35\x9a\x93\x74\xab\x97\x19\xa4\x52\x1f\x65\x70\xba\x1a\xc9\xc7\xa3\x4c\x55\xd5\x6a\xed\x7b\xa5\xfc\x81\xf4\xaf\xc7\xff\x53\xf4\x1f\x82\x32\xbf\x49\xf9\x4d\x56\xe5\x3b\xe8\xef\xfa\x24\x99\xbd\x83\xbe\x7b\x45\x3f\x43\x0a\xa3\xa1\x58\x24\xea\x86\x8c\x56\xad\x43\x00\x93\x6b\xff\x98\xde\x0d\xeb\x7d\xab\xa7\xc8\x74\xc4\x46\x6e\xdd\x05\x8d\xf1\xe8\x98\x38\x7d\xff\xdb\x51\x06\x00\xdb\x22\x86\x61\xa9\x28\x51\xb6\xa0\x36\xe2\x00\x62\xee\x10\xa9\x11\x75\xa4\x63\x19\x07\x13\xc7\x18\xae\x95\x31\x36\x60\xc8\xc2\xca\x31\x0e\x66\x04\x11\x81\xf5\xc9\x6a\x1c\x8f\xf0\xbf\x3d\x5f\xdc\x47\xb7\xa4\x5c\xb7\x34\x36\xcb\x3e\x55\x52\x2f\xbd\xea\xa5\x0f\xa3\x0c\x07\x3a\x31\x5a\x77\x37\xe3\xe6\x32\x17\x6d\xaa\xf6\xde\x2a\xef\xed\xce\x08\x4f\xea\xb3\x32\x6e\x90\x96\x9b\x9d\xd4\xdb\x09\xb8\x6c\xcf\xf6\xa3\xf2\x6e\x91\x2d\x0c\xd6\xe1\xd4\x73\x5b\x6d\xc0\x12\x69\x72\x90\x73\x32\xaa\x96\x4a\xf3\xce\x7e\x99\xd6\xf5\x62\x21\x68\xf9\x4f\x48\xe0\x75\x53\x26\x2a\x83\x08\xd8\xbd\xe3\xac\xd1\x98\x75\xab\x95\xd9\xc1\x4d\x78\x9d\x5a\x62\x83\x92\xb2\x57\xf6\x60\x6d\x50\x68\x81\x75\xec\x6c\xfa\x1e\x9f\xa5\x42\xac\xfd\x37\xa0\xf0\xf3\xd9\x51\xf8\x4e\xb7\x7d\x80\x32\x8f\xa3\x7f\xef\xad\x27\xd3\xcb\xab\x5b\xa4\xec\x7c\x76\x54\xe8\xbf\x43\xf3\x7d\x94\xf9\x46\xfa\x77\x67\xde\xa6\x86\x63\x6a\x0b\x82\x0c\x41\x0e\x55\xb6\xcd\x15\x22\x12\x12\x0b\x6a\x05\xa5\x42\x4a\x39\x02\x53\x8b\x13\xa1\x21\x14\x54\x40\x07\x38\xc2\x01\x42\x2a\xa4\x15\x54\xfc\x5f\xff\xfc\xc0\xf1\x28\xf0\xe2\x7a\xb3\xcc\xe6\x0a\xdd\xa4\xae\xaf\x49\xbf\x4c\x17\xf9\xae\x5b\x16\xbc\xbb\x8d\xf2\x93\x45\xe9\xb0\x72\x69\xb0\x64\xeb\x92\x9e\xec\x4f\xb1\x3a\x9f\x93\xf2\x6b\xbb\xcd\x17\x7a\x31\xf7\xda\x69\x75\xf6\x85\x7e\x50\xac\x38\x25\xb3\x76\xb9\xd1\x33\xd7\x9f\x6c\xe7\x2b\x6f\x36\x5e\x4c\xb0\x3d\x72\x37\x8b\x1d\xee\x83\x56\x67\x34\x29\x59\x23\x1a\xd4\x26\x60\xb4\x9f\xf1\xe8\x98\xd9\x65\x1a\xa8\x93\xa8\xc0\x5c\x35\xa7\x46\x8d\x42\xe4\x05\xbc\xb9\xf7\x53\x26\x28\xe6\x06\x4b\x3a\xad\xac\x9e\xef\x87\x14\xb4\x3e\x5d\x37\x59\x73\xb0\x56\xd4\x02\xbb\xc8\xdd\x58\x2b\x8b\xc1\x52\xbd\x3f\x17\x78\xb8\x5e\x16\x53\x43\xec\x3b\x56\xc2\x5a\x7f\x94\xed\xe8\x73\x76\x9b\x78\x50\xe5\x0d\xbb\xbb\xe7\xb3\x99\xdb\x25\xfe\x81\xf4\xef\x3d\xad\x8e\x99\xf5\x6f\xd1\xde\xcf\x77\x35\xde\x21\xf1\x8f\xa6\xff\xd5\xd3\xea\x7b\x32\x63\x5e\xec\x56\x0f\xcf\x8c\xf9\x29\x44\xb2\x84\x14\xd4\xe6\x1c\xd1\x53\x52\x0a\x05\x31\x36\x5a\x1a\x83\xb8\x25\x11\xb3\x8d\x03\x0c\xe2\x4c\x00\x84\xa1\x43\x1d\x87\x51\xa2\x6d\x9b\x2a\x8c\x6d\x69\x09\xa0\x7f\x22\x12\xbc\x1f\x91\x3e\x3c\xad\xbe\xe4\x9d\x7d\x20\x22\x95\x6b\xed\x7e\x68\x12\x3b\x8b\x29\x3d\xcf\x34\xf6\x65\x59\x98\x8e\xda\xbd\x9c\x0b\xb3\xd1\x38\x35\x19\xd4\x26\x43\xab\x42\x69\x76\x33\x70\x8f\x4d\x3d\xee\xc8\x74\xb5\xac\xd2\x79\x4b\xb6\x64\x62\x5f\x18\x1c\x76\xa5\x6e\xa3\x3c\x2c\xba\x74\x57\xa3\x45\x86\x2b\xf5\x28\x1b\x05\xde\x09\x35\xd2\xce\x7a\xba\x18\xf5\x5a\xf5\xad\x35\x6e\x26\x8e\xc9\x6a\x7f\x47\x0c\xce\x1d\x17\xc2\xb3\x8a\xfe\xb0\xd0\xd3\x0a\x02\xcf\xb8\x2b\xfd\xd7\x21\xd2\x4d\xa7\xd5\x0f\xa4\x7f\xdb\x69\xf5\xb7\x20\xd2\xa7\x6e\xdf\xbd\xe6\xf1\xd1\xf4\x3f\xba\xbd\xf6\xba\xfc\xa5\xb9\x7a\x3f\xe9\x37\x0c\x35\xb2\x8c\x22\x36\x66\x50\x18\xa8\xa1\x00\x08\x6a\x20\x24\x03\x9c\x60\x87\x23\xca\x80\xc3\x84\x16\xc6\xb6\xb5\x80\x02\x29\x07\x32\x00\x01\x67\x0a\x4a\x62\x3d\x67\xce\x7e\xc0\x4e\xe8\x1d\x44\xba\x2d\x73\xf6\x45\xa7\x8a\xcd\xe4\x58\xe6\x6c\x5a\x85\x46\x83\x7e\x90\xda\xcd\xaa\x96\x29\x57\x2a\x5e\xab\x98\xb3\x02\xd5\xee\xb6\x47\x5e\x47\x54\x48\xbb\x44\x40\x66\x52\x6f\x69\x15\x96\xcb\xdd\xb5\xe8\x92\x42\xb1\xdd\x38\x46\x1c\x8e\xf3\xfe\xb6\xd3\x8f\x56\x40\x75\x37\xdd\x5c\x2f\x4c\xe6\xab\x11\x2f\x5a\x60\xf9\x33\x8a\x43\x34\xf0\x2e\xd9\x56\x85\x62\xb2\x34\x5c\x35\x4b\x83\x6d\xc1\x8f\x1a\xc5\xca\xdc\x2f\x6c\x97\x0c\xd1\xc3\x8a\xf5\xe7\xde\x60\xa1\x4a\xb1\x28\x9d\xbf\x24\x73\xf6\xb5\xf4\xfd\xe9\xcc\xd9\x37\x65\xee\x7e\x50\xe6\xec\x6f\xd4\x0f\xdc\x47\x64\xce\x7e\x30\x7f\x5f\x3e\x09\x57\xd4\x96\x46\x40\x1b\x70\xc7\x61\xda\x76\x08\x36\x46\x51\x20\x2c\x47\x73\xa9\xb4\x4d\x15\xa1\xda\x26\x0e\xa6\xd4\x48\x64\x73\x47\x6b\x09\x15\xe2\xd8\xd2\xc6\x11\x00\x3f\xa1\x05\x7c\x08\x5a\xbc\x73\x12\x7e\x6f\x9e\xfd\x58\x89\xa1\x85\x57\x2b\xca\x60\xe2\x36\xfd\xb9\xde\x2e\xd7\x91\x2b\x7b\xd3\x4a\x3e\xe9\x8f\x4c\xa9\x14\x78\x73\xc6\xab\x19\xdf\xaf\x67\xdb\x09\x98\xc6\xad\xaa\xa3\x5a\x89\x96\x5b\x6e\x8e\xd3\x8d\x6e\x50\xca\x24\xf6\x35\x5e\x0f\xf6\xc7\x1d\x43\x0d\xe9\xcd\xc6\xd4\x3f\x86\xe9\xf5\xda\xfe\x99\x35\x05\x4c\x8f\x76\x61\xbf\xc8\x4f\xa7\x83\x56\x92\x85\x22\x5d\x5d\x56\x08\x04\x1d\x02\x06\xbe\x70\x2a\x74\xb4\xd9\xad\xab\xe3\xcc\x76\xb8\xbf\x88\xd9\x5d\x68\x11\xb3\xaa\xdd\x22\xad\xf2\x32\x0e\x37\xe5\xb9\x7f\x20\xfd\x6b\x42\x5f\x45\x8b\xbf\x34\x8f\xfd\xcb\xf3\xe2\xef\xe1\x2f\x5e\x3e\x85\x16\x0c\x13\x41\x81\x0d\x94\xb6\xb1\xcd\x05\x82\x48\x20\xa9\x38\x81\x90\x59\x0c\x21\x65\x10\xd3\x4a\xd9\x16\x05\x86\x6a\x48\x2d\x9b\x42\x4a\x91\x36\x92\x00\x21\xa5\x38\xe9\x16\x8f\x89\x49\xba\x96\xec\xdf\x34\xcc\xd1\x76\xc3\x67\xa0\x56\x02\xad\x44\x9a\xa7\x8d\x9d\x4c\xd0\x44\x39\xb9\x6f\x4d\xbd\x69\xab\xa5\x32\xc3\x72\x71\x3d\x28\xe1\xe5\x08\xeb\x8f\x4e\x8c\x53\x59\xd1\x80\x92\x2c\x97\x61\xc5\x5f\x45\x95\xf6\x2a\x6a\xe6\x72\xce\xc8\xd9\x5b\xfd\xbe\xde\x01\x9d\xcb\x55\x5b\xcd\x0a\x1f\x95\x6b\x07\xc7\x0f\xa7\x04\xd6\x1b\xbb\xfa\x54\xee\x76\xdb\x2d\xd0\x23\xcf\xb8\x89\x7e\x72\xd2\xec\x96\xe7\xdd\x65\xaa\xea\x60\x3b\x8d\x69\x71\x76\xba\x9f\x31\xca\xae\x07\x52\x11\x4b\x2f\xcb\xa3\x95\x03\xa3\xfc\x3c\xdd\xac\x99\xaa\x46\xdd\xd4\x80\x6d\x0a\x7d\x35\xec\x8d\x6c\xa7\x25\x8e\xb1\xcc\x07\xf7\x9c\xe5\x14\x6f\xbf\x71\xbe\x9c\xaf\x9a\x5d\xe2\xf2\xf3\x9b\xce\x52\x1e\x48\xff\x77\x64\xff\x04\xfd\xd8\x0e\x67\x19\x7b\x75\xc3\x59\x4a\xe6\x22\x55\xa9\x37\xee\x13\x7a\x7f\x87\x73\x07\xfd\x69\x19\xb9\xc7\x3b\xe8\xbb\x57\xf4\xd3\xd5\x04\x59\x8c\x5a\xce\xb6\xdd\xc4\x85\x59\x21\xb9\x4d\xda\x4b\x7c\x4c\x66\x1b\xb2\xa1\xea\x75\xab\x9a\xdf\x2d\xa0\xb7\x1d\x6e\x6a\x03\xfb\xea\xc4\xb8\xf8\x3d\x37\xde\x6b\xcb\xb6\x91\xb2\x6c\x2e\x38\x44\x0a\x00\x4d\xb9\x45\x85\x05\x01\x75\x20\x64\x58\x68\x60\x98\x81\x16\xe4\x90\x41\xea\x40\xdb\x82\x90\x20\x46\x29\x94\x1a\x4a\xfb\x94\xeb\x13\xc7\x63\x92\xf2\xb6\x3d\x59\x54\x9a\x07\xd6\xae\xf5\x27\xfd\x69\x22\x99\xdc\x6c\x47\x35\xa7\x5d\xd8\x17\x40\x36\x17\xe0\x51\x7a\x19\x36\x12\x45\x9c\xc8\x6d\xdd\x17\x9d\xe4\xa3\x1b\xef\xcf\x0b\xde\x4d\x77\x11\x87\x3f\x51\x27\xe5\xa2\x51\xc1\xde\xa5\x96\xf5\xf9\x6a\x29\xa7\x8b\x54\xd0\x19\x1d\x8e\xbb\x6d\xfb\x08\xb7\xa4\x55\x20\xd4\xa7\x56\x7e\xd0\x5f\xf1\x4e\x90\x6d\x98\x84\x3d\x9b\x60\x0a\x8b\xcd\x99\x4c\xe4\x0a\xd0\xe6\xa5\x76\x35\x70\x90\xf2\x07\x60\xb8\x9d\x27\xda\xf9\x4d\xdd\x2b\x2c\x92\x8d\xd4\x73\xfe\x7a\x9c\x93\x2c\x45\x37\xc3\x5a\x3d\x37\x99\xf9\xab\x74\x76\x96\x9f\x37\x12\x39\xe2\xf4\x8b\xc3\xf1\xa4\x3f\x75\x87\x2d\xd2\x1b\x1e\x87\xfb\xe4\x47\x9e\x78\xb1\xc5\x3d\x0a\x4a\x98\x9c\xbf\xff\x16\xca\xc4\xd3\xb4\xdc\x80\xc2\xcf\x67\x29\xb7\xdf\x60\xff\x40\xfa\x37\x59\x76\x63\x1e\x79\xff\x46\x29\x3f\xf1\xb8\xba\x9d\xfe\x13\xca\xa5\x5b\xb7\xd3\x2f\x5f\xd1\xbf\x05\x65\xce\x67\x49\xc5\x9b\x6e\xf0\x8f\xf1\xfb\x05\xf9\xff\x8c\xef\xb0\xc0\x50\xda\xb6\x85\xa1\xb1\xb0\x30\x36\xa0\x94\x30\x6e\x11\x6d\x4b\x8e\x0c\xb6\x2d\xc4\x8d\x85\x1c\x6e\x2b\x6e\x13\x01\x2c\xe1\x10\xe4\xd8\x96\x2d\x99\x31\xc0\x86\xa7\x9b\xc9\xc8\x43\x74\xa0\xe6\x55\x93\x7f\x9b\xad\x98\xa1\x30\xe5\x15\x55\x53\xb7\x2b\x68\xd7\x1a\xf5\x57\x86\xd0\x44\xdb\x0e\xfb\xf3\x51\x43\x2d\x32\xe9\x4e\xb3\x6c\x46\x7e\x7a\x7d\x90\xe9\xfd\x6c\xb9\xdb\xec\x7f\xe9\xce\x78\x5c\xb6\x9f\x8e\xea\xba\x26\x5d\x6f\x1f\x24\x3b\xc5\xe5\x6c\x86\x92\x48\x19\x39\xa8\x25\x0f\x79\xa7\x59\xcf\x2c\x97\xa3\x75\x52\x0e\x7a\x0e\x6a\x95\x86\x4d\x49\x70\x44\x76\xf2\x08\xdc\xb1\x28\x71\x9a\x76\xf9\x5c\x35\xfc\x79\xab\xd3\x1d\x93\xf1\xaa\xb9\xc8\x74\x4b\x53\x7c\x46\x90\x8e\x55\xca\x49\xaf\xb4\x6b\x24\x65\xa7\xd7\x9b\x65\x89\x33\x4b\xd6\x77\xd5\xad\x8b\xe7\x6c\x0e\x56\xc1\x26\x5b\xcd\xa5\x9c\x69\x32\x7b\x78\x8c\x3f\x4b\xe9\xf6\xb8\xe8\x93\x0e\x12\x4b\x65\x72\x93\x0e\xf2\x40\xfa\xd7\xe3\xff\x29\xfa\x31\x1d\x28\x76\x8f\xd4\x2d\xe8\x10\xf3\x59\x48\x5d\x27\x78\x79\x2e\xef\xa3\xd3\x8d\xf4\x57\xbc\x48\xd6\x91\x75\x17\x7d\xf7\x8a\x7e\x66\x84\x54\xdf\x0b\xe6\xa0\x42\x5a\xc8\xa4\x1a\xd5\xf4\x3e\x39\x5d\x92\xee\x7a\x98\x9e\x54\x0a\x70\x56\x2e\xf4\xca\xfb\x44\x39\x2c\xa2\xc2\xae\xe4\x0f\xb7\x69\xfb\xa7\x99\xaf\xf4\x3d\x71\xd9\x36\xb4\x6c\xa5\x89\xb0\x09\xc4\x48\x5b\xc2\xa2\x50\x22\x07\x21\x43\x1c\xdb\x42\x54\xd8\x9a\x19\x8a\x31\x41\x42\x63\x66\x0c\xb1\x0c\x32\xc8\x28\x69\x13\x40\xb5\xa4\xe2\x09\x65\xe2\x31\x4d\xd5\x9a\x82\x6e\x3d\x3d\x6d\xa1\xa8\xa9\x9c\x6c\x1e\x97\x72\x09\xbd\x8d\x34\xcc\x17\x43\x6b\xca\xfb\xbb\x68\x7c\xcc\xe1\x7d\x7f\x29\x4f\xcb\xdf\x59\x07\x3a\xa5\xa3\xfb\xc8\x9f\x25\x75\x4b\xfd\x5f\xce\xf5\x13\xa3\x7f\x1e\x85\xd3\x24\xaa\x8e\xfd\x6a\xad\x59\x4d\x65\x1a\x81\x3d\xed\xc0\xde\x34\x21\xa2\x15\xac\x65\x5a\x5a\x8c\x61\x72\x8c\x92\xfd\x45\xae\x18\x38\x3a\xac\x66\x57\x19\x3a\x4d\x69\x6f\x7b\x28\x67\x1c\xe2\x6c\x67\x24\xc5\xc5\x28\xd8\xa5\xd8\x70\x3a\x72\x53\xf9\xb2\xa3\x57\x99\x81\x2d\xbc\x71\xfa\x09\x39\x64\xdf\xe8\x0e\x6a\x8d\x6b\xfd\x95\x5f\xca\xf4\x8f\x2c\x94\x19\x8e\xa8\xe9\x79\x2d\x9b\x14\x92\x20\x73\x74\x92\x28\xd7\xea\xd8\xfe\x63\x6e\x75\x2d\xc5\x54\xac\xaf\xa3\x76\x2d\x39\xbd\x8c\xd7\x1b\xe5\x03\x54\x7a\x1c\xfd\x7b\xfd\x5f\x36\x97\x57\x5f\x97\x4a\x2f\x99\x38\xe9\x0c\xa5\xdb\x63\xb7\xbf\x87\x7e\xf0\xd5\xfa\x3e\xe2\xf7\x73\xd6\x5d\x88\x0d\x76\xa8\x50\x36\x84\x46\x01\xc7\x26\xc2\xc1\xc8\x41\x4c\x3b\x8e\x25\x94\xd6\x1c\x41\xac\x21\x51\xc2\xb6\x28\x87\x94\x1b\xa2\xb1\xe2\xdc\x91\xc4\xb2\x81\xfe\x89\x22\x0f\xf0\xbd\xbd\x4e\xdf\xf3\x3a\x8a\x7c\x35\xf6\xe4\x0e\x14\x89\xd9\x77\xc2\xbc\xd5\x2c\x07\xed\x4e\x6d\x3b\x4d\xc3\x62\x4d\x64\xe6\x03\x56\x3e\x1c\x49\xab\xbc\x49\xd5\xf3\xe1\x1a\x86\x81\x5a\x47\xb5\x30\x8d\x5b\x39\x3e\x09\xa7\xb9\x55\xb9\x3f\x98\xe7\xcb\x86\x37\xec\x72\x47\xe5\x36\x7e\xe8\x80\x44\x36\x35\x3b\x56\x57\x8d\x44\x32\xe7\xcf\x06\x41\xe6\x49\xd2\xad\x31\xcb\x73\x26\x87\xc9\xfa\x00\x6c\xbc\x6a\x66\x3a\xeb\x97\xd0\x66\x76\x68\x0e\xbb\x42\xfa\xb3\xc9\x12\x90\xca\x62\xb6\x0f\xa4\x7e\x3c\x8a\x7c\x41\xb7\x28\xf9\xc3\x8d\xe8\xa9\xda\xf6\x78\xf9\xf9\x4d\xba\xc5\x03\xe9\x5f\xcf\x97\x4f\xd1\x7f\x1d\x45\x6e\xd0\x2d\x12\xc9\x8b\x42\xf1\x5b\xba\xb1\xe7\xf2\x69\x14\x79\x20\xfd\xdb\x33\x53\xdd\x83\x22\xd2\xe1\xd0\x31\x02\x12\x85\x6d\x07\x1b\x64\x73\x9b\x5b\xb6\xa3\x81\xd1\xda\x58\x5a\x20\x4b\x72\x8a\x35\x63\xd8\x46\x98\x42\xc2\x25\xd6\xdc\xa2\xb6\x94\x14\x02\x4c\xf1\xbf\xfe\xf9\x41\xe0\x43\xf2\x32\xbf\x73\xbb\xea\x7b\x79\x99\xdd\x5d\x39\x7e\xaa\x11\xcb\xcb\x8c\xbc\x7d\xa1\x92\xb3\xd9\xce\x6f\xd4\xc3\x9e\x5e\xd6\x6a\xe0\x58\x0e\x23\x3d\x2f\xaf\x36\x83\x59\x21\x4b\xb6\xb9\x29\xec\xa7\x57\xb9\x49\x66\xde\x31\xfd\xaa\xd3\x2e\xa6\x51\xaa\x9e\x9f\x54\x13\x53\xcd\x56\xf3\x56\x37\x6a\xe7\xca\x23\x6f\x33\xa8\xe6\x66\xb5\x41\x04\x06\xcb\xf2\xcf\xbb\x96\x0f\xc5\x62\x6d\x15\x81\xdc\xa0\x56\xa1\xba\xaf\xab\xb9\x84\xbd\xa3\xc7\x4c\xb1\x57\x9d\xb4\xa4\x15\x46\x4b\x8f\xf6\x7b\xce\x7e\x98\xf5\x3f\x3a\x03\xfa\x5c\x5e\xe6\x78\x5e\xdf\x1b\xf2\x22\x0f\x16\xee\x7d\x79\x91\x1f\x48\xdf\xbf\x85\x7e\x4c\xfa\xcf\x40\xf2\xcd\x79\x99\xcf\xed\x7d\x50\x5e\xe6\xa7\x39\x1a\x23\xf0\x19\x2b\x29\x67\x48\x1b\xe5\x48\x68\xb0\xc6\x44\x5a\x08\x38\x92\x32\x2d\x19\x71\x28\x44\xdc\xd2\x18\xd9\xd8\x60\x4b\x11\xca\xb4\x96\x00\x62\x85\x91\x74\x38\xc1\x8a\x42\x42\x9f\xd6\x76\xfb\x21\x76\x88\xf6\xab\xa3\x71\x2a\xb1\x23\x95\xf6\x4f\x2b\x68\xca\x4d\xb5\x72\x9b\x6e\x98\x28\x37\xfb\x9d\xe4\x2e\xb3\x25\x89\x4a\xa6\x3d\x60\x66\x87\x27\x7a\x75\xdc\xeb\x6e\x95\x39\x39\x2f\x30\xfd\x6e\x5b\xa7\x7b\xb6\xae\x8a\x74\x1e\xce\x96\x0d\xbf\x55\x71\x93\x8d\x22\xcd\xb5\x59\x72\xd3\x6c\x37\xf6\xf9\x20\xe8\xe4\x57\x33\x10\xcc\xf6\xb9\x9f\x7e\x1a\x74\x5e\xdb\xc2\x72\xc9\x22\xd9\x5d\xc6\x2f\x73\x7d\x68\x0f\x86\xd5\x3e\xc9\xa8\x5a\x50\x69\xcf\xed\x95\x37\x06\x99\x43\xb1\x5d\x09\xa3\x8b\x57\xd8\xeb\x52\x17\xe3\xfd\x7c\x85\xd6\x97\x70\x77\xb8\x6a\x94\x0b\xa3\xd4\xd9\xec\xfb\xb5\xdd\xd1\x70\xd5\xa8\xb0\x3d\x3e\xeb\x04\x6f\xae\xf5\xb7\x9d\xa5\xbc\xac\xb5\xfc\xf2\xf3\xd8\x5a\x7b\x7b\x96\xcf\x0f\x75\x83\xc7\xf1\xdb\xfe\x13\xfc\xc6\xac\xb8\xdb\xd8\xab\x1b\xd6\x72\x76\x81\xef\xd4\x75\x04\xc8\x23\xd7\xf2\x97\x36\xdd\xcc\xef\x3a\x4a\xee\x13\x83\xa4\xf7\xe7\xf8\xfd\xc6\x1d\xdf\xc3\x77\x5c\xaf\xf5\xef\xd7\xf8\x5d\xf2\x9c\x1e\xcb\xb3\x05\xe4\x2f\xd9\x21\x6a\xe1\x10\x87\x4a\x06\x6c\x2a\xb1\x34\x50\x3b\x14\x0b\x48\x20\x60\xd8\xe1\xcc\x86\x4a\x10\x68\x63\xa5\x21\xa6\x36\xa2\x94\x61\x8a\xb4\x25\x6d\x07\x62\xa9\xb5\x75\x5e\x45\x1e\x13\x03\xde\xb9\x62\xff\xb7\x1d\xdd\x0d\x3a\xc5\x5b\x59\x46\xdd\x72\x75\x4a\x7d\x3c\xda\xac\xfa\x73\xd3\x98\xe5\x76\xd3\x75\x2e\x18\x87\xf9\xcc\x3e\x9f\x6c\x4f\x0f\xb9\xe4\x22\x3f\x4c\x08\x55\xee\x1c\x87\xa0\x49\x3b\xd1\x6e\x5a\x4d\x1c\xf2\xcd\xfe\x04\x26\x26\x55\xde\x91\xf4\xd8\xde\xec\x87\xa5\x51\x2e\x8a\x8e\x29\x1d\x24\xf7\xc5\x6d\x50\x39\xad\x3e\xb8\xe3\x1f\xea\x6a\xd5\xac\x95\x66\xbd\x52\xa1\xe1\xb7\x86\x73\xb9\x5e\x37\x54\x05\x8f\x7b\xc7\xde\xbc\x3e\xc2\x65\x9d\x6e\x56\x71\xf6\xa3\xd5\xe7\x6a\x24\xdf\xba\x8b\x23\xf6\xad\x2f\xa1\xea\xb2\x67\x1c\x33\x33\x31\xcf\xe8\x3f\x82\xaa\x0f\xe4\xb7\xf3\x27\xf8\x8d\x59\xcb\x63\xf2\x76\x0b\xaa\xb2\x8b\x36\x9b\xba\xf7\xc6\xf9\xf8\x6f\x5f\x2b\xde\x9d\xfc\x42\x65\x8f\xd6\x7f\x90\x5f\xf7\x8a\xdf\xaf\xea\xec\x17\x8f\x84\x37\xce\x01\xde\xa1\xff\x39\xbf\x27\x63\x0c\x05\x0c\x50\x1b\xd0\xa7\x8d\x30\xb1\x2c\x8c\x0c\x97\x8c\x3a\x5c\x52\x09\x90\x63\x41\x6e\x08\xb0\x29\x07\x96\x91\x90\x01\xcb\x31\xd2\x48\x8c\x89\x36\x04\xa0\x27\x94\x64\x0f\xd9\x01\xbf\x63\x8d\xbf\xed\x66\xa2\x7d\x71\x98\x0b\xc3\x4a\xa9\x30\xec\xa0\x25\x49\x8c\xd3\x85\x89\xb3\x91\x7c\xd6\xb6\x53\x72\xe9\x4d\x83\x6e\x56\xa0\x15\xa8\x1f\xea\x87\x96\x70\xc7\x99\x88\xa7\xb7\x5e\xa2\x5e\xdc\x95\x7b\xb3\x4a\xcb\x88\x71\xd0\xf2\x97\xb3\xcd\xa1\x5d\x3c\x80\xde\xbc\xa5\xa5\x4c\x1d\x16\x99\xe7\x1d\xf0\x34\xc9\x06\xe3\x8e\xec\x24\x0e\xed\x20\x95\x49\xac\xa2\xd9\xd2\xdf\x35\xfb\xe9\x25\xc8\xc2\x59\x90\x4c\x8f\x83\x91\x93\xf4\xea\x9a\xd5\x2f\xe3\x76\xd7\xcd\x44\xb1\x26\xde\x72\x33\x10\x70\x5f\xf1\x7f\xfc\xa5\x7c\x80\x6e\x8f\xa3\x7f\x6f\x4c\xd9\x3e\xf6\xee\x06\x69\x2a\xb5\x5e\x6a\xba\xcd\x8a\xfe\x68\xfa\x5f\x8d\x29\xfb\xce\x9b\x99\x3e\xa5\x57\x61\x6a\x31\xe0\x58\x36\x01\xda\xb6\x2d\x88\x29\xe5\x8e\x83\x89\x43\x6c\x65\x30\xd2\x58\x01\x82\xb5\x74\xb0\xa0\x82\x2b\x0a\xa5\x26\x96\x6d\x34\x80\x06\x28\x6e\x18\x7b\x46\x0c\x78\x3f\x62\xbc\x13\x85\x11\x47\x8c\xfc\x2f\xef\x53\x99\x16\xac\x1c\x63\x06\x98\x18\x62\x8c\xf6\xfd\x69\xb3\x9e\x4a\x73\x6b\x0a\xb4\xe9\xcd\xe4\x7e\xee\x8d\xba\x3a\xdf\x0c\xc8\xca\x0b\x9b\x6d\xc5\xcc\x6c\xb7\x59\xcf\xe0\xb8\xb6\x23\x3a\xbb\xeb\xb7\xa7\x47\x5a\x5d\x16\x0b\xbb\x74\x7d\x4d\x4a\xce\x21\x9a\x74\x86\x6e\xb3\xdb\xce\xa3\xde\xc1\x0c\xd1\xa1\x59\x5c\xa4\x9e\xa5\x9a\xf8\xe9\x7a\x3f\xd7\xd2\x6a\x57\xf0\xf9\x6c\x90\x1d\x74\x91\x55\x1b\x5b\xed\xe6\x6a\x50\xe2\x14\x0c\x8f\x51\x2a\x89\x1b\x8e\x5a\x92\x8b\x6a\xf7\x97\x20\xc6\x4d\x31\x5f\x0f\xa4\x7f\x6f\xcc\xd7\x03\x25\xf6\xa6\x98\xaf\x47\xd3\xff\x6a\xcc\xd7\xfd\x88\xf1\x22\x2e\x01\xa8\x8c\x64\xcc\xef\xfa\x53\x88\x81\x1d\x0c\x88\x54\x96\x51\x9c\x09\xc9\xb8\x16\xc0\x58\x02\x03\x26\x8c\x80\x8c\xd9\xc6\xe6\x90\x73\x20\x80\x36\x50\x13\xc0\x2d\x6a\x41\x49\x01\xb0\xb0\x91\x10\x49\xf9\x8c\x18\xe8\x7e\xc4\x78\x27\x8b\x69\x0c\x31\x1a\xbf\xbe\xcf\xc0\xc8\x92\x87\xd4\x54\xa0\xa0\x2f\x51\x6a\x10\xc2\xf6\xf1\x97\x9c\xf5\x31\x04\xa9\x4c\xc2\xfe\x70\x1a\x0e\xba\x09\xda\x76\x57\x11\x28\x6c\x26\x66\x3f\x4e\x11\xfb\x00\x4b\xb9\x25\xef\x64\x5a\xe3\x7a\xa2\x98\xd9\x77\x3a\x4e\x44\xe6\xa6\xd9\x1e\x96\xf6\x7b\xbb\xe0\x90\xe2\xb2\xda\xb6\x16\x95\x74\x88\xeb\x86\xaf\xfa\x8b\x7d\xd6\xca\xa6\x91\xb7\x0c\xa5\x4e\x3f\xdb\xff\x1a\x5e\x56\x5a\x35\x38\xdf\x16\x96\xcb\xdc\x6e\x9d\x9a\x24\x57\xf3\x28\xa9\x87\x8b\xc1\x4a\x3b\x9b\x16\x4f\xf5\x4b\xbe\x17\x34\x9d\xbc\xf3\x91\xd7\xe3\x1f\x47\x90\xf3\x8c\xfd\xea\x6d\x88\x8f\xa3\x9f\xb9\x85\xfe\xf7\x20\x48\xd9\xfd\x9a\x04\x3f\x1a\x41\xca\xee\xab\x9a\xe0\x37\x23\xc8\xc5\x67\x91\xcd\x55\xae\xdc\xef\x41\x66\xe5\x73\x95\x5d\xd8\xa9\x44\xf2\xf0\xcb\x38\x7c\x2a\x12\x14\x50\x49\x2d\x66\x61\x5b\x1b\xc7\x30\xc9\x99\xe5\x70\x6c\x3f\xfd\x8f\x63\x21\x2c\xe0\x60\xcc\x24\xa1\x8c\x60\xc6\x15\x94\x9c\x5a\x84\x59\x9c\x0b\x1b\x43\x6a\x6b\xe7\x5f\xff\xfc\x20\xd6\x43\x76\x2d\xef\xc4\x76\xbd\xaa\x83\xc4\x47\xf6\xa5\xbe\xc0\x0d\xeb\xbd\x4a\x6d\x50\x48\x37\x77\xa3\x42\xdf\x2d\x30\xa1\x3d\xd4\xab\x26\x73\x68\x1c\x16\x06\xe1\xa4\x0d\xe6\xe9\xc9\xc1\x96\x4e\x6f\x09\x57\x45\x55\xac\x4e\x78\x69\x8a\xcd\xc4\x25\x0b\x5e\x4a\x44\x41\xb9\xb8\xab\x36\x06\xeb\xd1\x6e\xdc\x73\x0a\xb9\x7d\xad\xa5\x1c\xff\x39\xcf\x71\xd8\xd9\xf7\xc7\x9e\xd9\x8e\xc8\x61\x9e\x2e\xd6\x69\x9d\xce\x4b\xab\xd4\xc0\x9b\x66\x79\xbb\xc1\xad\xd5\xbc\xdf\x09\x16\x4b\x95\xee\x25\x3e\xba\xe5\xfa\x93\xa8\x11\x93\xb2\x5b\xa4\xf6\x92\xdf\xf2\xf9\xdf\xdf\x3c\xf1\x3f\x40\x8d\xc7\xd1\xf7\x6e\xa1\x1f\xb3\xae\x7f\xe7\xba\xeb\xba\xf1\x3b\xb2\xee\xae\xff\xc4\xef\x21\x56\xf7\x0d\x28\x53\x88\xdd\xe6\x1a\xb8\x5f\x43\x19\xef\x3b\xe8\x7f\x12\x65\x3f\x65\x27\x11\x12\x00\x41\x24\x06\x4a\xdb\x0c\x20\xca\x8c\x72\x88\xe1\x40\x4a\x63\x01\x89\x88\x84\x96\x32\xc4\x30\xc7\x18\x68\x6c\xa2\x2c\xa8\xb9\xa2\x40\x18\x45\xa0\xa5\x21\xf8\xd7\x3f\x3f\x68\x3c\xf6\xbc\xe8\x77\xed\x7a\x93\x51\x9d\x9f\x42\x8b\x6c\x66\xad\x5c\x38\x5d\x94\xc4\x1a\xca\x5a\xbd\x52\xad\x2f\xb9\x37\xd3\x4d\x57\x2e\xeb\xc5\x13\x76\x9f\x24\xd2\xeb\xf6\x7a\xa5\xfe\x3b\x3a\xcc\xea\xe9\x31\x0b\x3b\x0d\x1c\xf7\x20\xcd\x15\xad\x46\x74\xfa\x7d\x5a\xe6\xa9\xde\x5b\x99\x05\x0d\xdd\x01\xe5\x45\x80\x47\xb9\xfd\xc6\xdd\x0e\xb6\x45\x1f\x26\x2a\x9b\xa3\x98\xb9\xc5\xa5\x09\xd3\x3c\x0d\x7a\x8d\xc0\xb6\x53\x61\x2e\x49\xca\xfd\x41\xad\xcb\xec\x42\x95\x75\xba\x6d\x60\xeb\x41\xa5\x68\x2b\x6b\x33\xd9\xa7\xfa\xbc\x30\x72\x4e\xe7\x78\xdb\x94\x0f\xf6\x29\x34\x1d\x37\xb9\x81\xd9\xc4\xe0\x58\x73\x65\x45\xe6\x2b\xcc\x4c\x6a\xce\x56\x29\x77\xb3\xec\xf8\x49\xd0\xc8\x0c\x3f\xca\x10\xba\xfa\x75\x64\xde\x40\x99\x5a\x6c\x24\x67\x3d\xb3\xce\xd6\xd0\x1c\x0f\x8d\x68\x4e\xe8\x20\x50\x43\x3d\x5b\x6f\x50\x6f\xc7\x3c\x8b\xf8\xeb\x59\x63\x40\xca\xa9\xc3\xda\x9b\x9f\x67\xc5\x8b\x94\x9f\xfb\xf3\xf7\x59\x72\x2a\xef\x4b\xf9\x03\xe9\xbf\x61\x95\xf9\x40\x37\xf9\xa4\x97\x9c\x21\xc6\xd2\x8e\x40\x82\x20\xa4\x14\x93\x8a\x5b\xc6\xd8\x04\x5b\x42\x1b\xa4\x99\x06\x0c\x51\x66\x51\xcb\x38\x1c\x0b\x0b\x03\x2c\x2c\x8d\x24\x10\x18\x01\x22\xc0\x93\xe6\x4d\x1e\x13\xd5\xd8\x7d\xbd\x15\xb1\x3f\x14\x74\x73\x52\xac\x83\xc5\x32\x91\xee\xcd\xf2\x87\x91\x6b\xfc\xfd\x32\xdf\xed\x25\xd7\x79\xd0\xce\x2b\x58\x35\xbd\x71\x7d\xb6\xcb\x9c\x43\x47\xde\xcb\x50\x79\xc8\xed\x06\x1d\xe6\x77\x83\x76\xd7\x07\x8b\xa1\x10\x7a\xd1\x18\xed\xc3\xe3\x60\x4b\xf2\x69\xcb\x29\xc2\x6d\x63\xbc\x3f\x86\xcd\x5d\xe1\x30\xde\x06\x4e\x64\x25\x34\x2e\xe3\x01\xec\x6f\x66\x45\x87\xad\x9c\x5c\xcb\xe4\x17\xf9\xea\x31\x33\x99\xf9\xab\x4a\x93\x97\x64\x78\xce\x1d\x87\x80\x53\x1d\x1e\xd7\xcd\x9a\x9a\x56\x66\x35\x06\x82\x92\x7d\xa4\xda\x6f\xf2\x20\xac\x0d\xc7\x2b\x2f\x5b\xca\x8c\x76\x22\xd8\x77\xf6\x1f\x69\xe0\xf1\xf2\x8e\xdf\xcb\x6d\x1e\xf5\x25\x7f\xd9\x53\x53\x8d\x69\x2c\xb1\xcd\x1f\x39\x53\x78\x20\xbf\xdd\x3f\xc1\x6f\x4c\xc3\x07\xb1\x57\x37\x9c\x29\x2c\x2f\x21\x1c\xa9\x18\xfd\xef\x3b\x03\xb9\x83\xdf\x75\x01\x47\xe2\x0f\xf2\xeb\x5e\xf1\xeb\x7a\x11\xa9\x86\xf3\x5c\x67\xb1\xc5\x85\x49\x7f\x54\x2c\xa7\x1c\xe1\xaf\x06\x6a\x9a\xb0\x46\xed\xd6\x00\x85\x84\x4d\xea\xfe\xc8\xad\xd5\x4e\xfa\xc9\xe5\x0c\xa4\xf2\x3d\x11\x09\x0e\x11\x42\x41\x41\x88\x43\x28\x22\xc0\xe1\xc8\xd1\x4c\xd8\x98\x09\x85\x85\x23\xb5\xa2\x96\x60\x46\x08\x61\x73\x2a\xa0\x83\x34\x20\x5c\x33\x87\x60\x07\x59\x90\xcb\x93\x17\x20\xbe\x1d\x25\x63\x6a\xcf\x75\xd2\xb7\xd8\xda\x74\x9e\xc9\x5f\x44\xc9\xe7\xd2\x05\xf8\xdc\x3b\xcf\x08\xd4\xe8\x8e\xc9\x70\xac\xf8\x22\xd9\x9f\xf0\x74\x79\xb6\xcb\xed\x57\xfc\x90\x85\xc6\x6e\x54\xda\x8d\x9c\x1a\x4d\x61\xcf\xe3\x65\xd7\x49\x5c\x7a\xf6\xb6\x59\x91\x32\xdd\x41\xbc\x3d\x31\x94\xae\xd7\x3b\xbb\xf2\x52\x95\x96\x83\x69\x94\xc8\xe6\x6b\x09\x1b\xac\x9b\xb9\x14\x59\x46\xc7\xf4\xaa\xd6\x3e\xf8\xd3\x3a\x5a\xc9\x6d\x3e\xc3\x2a\x93\x7c\x09\x95\x7a\xde\x8c\x31\xb8\x69\x6e\x33\x79\x16\x94\x0e\x2b\x22\xd2\xf0\x50\x97\xa8\x13\xd6\xad\x6d\x76\xd8\xab\xaf\xb0\x95\x7f\x99\x35\x5e\xa6\x2b\xba\x9d\xb6\x3a\x4e\x2a\xb4\xd2\x49\x8d\xc2\x16\xa9\x6e\x53\x08\x75\x9a\x2d\xce\xe1\x2e\xd3\xeb\xa2\xfa\x24\xdb\xec\x05\x41\x32\x61\x17\xb6\x35\x25\xa2\xf2\xba\x92\xde\xcf\x9c\x7c\x35\xd4\xb5\xa0\xcf\xac\x3a\x1f\xb5\xd9\x32\x95\x18\x3b\xb5\x44\xc3\x9b\xd7\x72\xe9\xd6\x29\x1b\xfa\x40\x6c\x1b\xeb\xf6\xa0\x53\xa9\x0f\x47\xa3\xf9\xda\x9a\xb0\x06\x3e\xae\xc3\xd2\xd4\xde\xa0\xf9\xb2\x98\xc1\xed\xe5\x7c\xd0\xea\xef\x8a\xab\xcb\x94\x3c\xbc\x33\xa6\xaf\x3f\xbf\xb3\x2a\xdc\xea\x8b\xbc\xec\x89\x86\x2e\x74\x62\xde\x8c\x7f\x04\x65\x1f\xc8\x6f\xf8\xcd\xfc\xa6\xdd\x5f\x77\x70\xff\x26\xd4\x7a\xe1\x11\xde\x41\x9f\xbb\xb2\x75\x07\xfd\xf2\x15\xfd\x5b\x56\x99\xf0\x72\xb6\x94\xea\x5f\xd1\xfc\x96\x55\xf1\x1e\x7e\x33\x76\xae\xf5\x07\xf9\xbd\xec\x90\xef\x91\x8f\x20\xb1\x07\x7f\x50\x3e\xfe\xd3\xe4\xf9\xb7\xfe\xfd\xea\x2e\xa3\xd3\xcf\x9d\x3d\xc6\x33\x6f\x84\x6d\x7c\xb1\xbf\x6e\xde\xe5\x7c\x9d\xfe\xa7\x6e\x8b\x12\x42\x68\x65\x04\x81\xda\x28\xa0\x88\x62\x36\xd2\x10\x12\x6c\x59\x18\x73\x68\x0b\x07\x00\x62\xdb\x10\x01\x0c\x19\xd2\x48\x70\x47\x00\x23\x00\xd4\x14\x49\x0e\xf5\x93\xe6\xf3\x98\x88\xef\xde\x6b\x3d\x17\x2f\x7e\x74\xa8\xe6\xeb\x95\x69\x83\xe6\x32\xde\x78\xe2\xbb\x1e\xeb\xcf\x7b\x85\xd6\xb4\xd4\xab\x4c\xc3\xd5\xe8\xb8\x4e\xcc\xbb\xed\x7d\x3d\xc9\x02\xf7\x83\xac\x37\xe9\x72\x79\xd4\x65\xfb\x2a\xea\x16\xe7\x01\xdd\x34\x92\x2d\x90\x5a\xa8\x63\x7d\xd0\x48\x85\x01\x22\xc3\xed\xa4\xb7\xaf\xa5\xfa\x66\x11\xac\x9a\x99\x09\x07\xa9\x63\x75\xb5\x48\x8a\x49\xae\x80\x6b\x09\x33\x0e\x0b\xf5\x42\x4b\xf5\x7a\xfc\xd8\x2d\x94\xe1\x10\x4f\xb2\xf6\xf4\xac\x19\x74\xec\xed\x3a\x01\x4a\x5b\x4b\x75\x7b\x32\x91\x8f\x02\xb7\x5e\x2c\xd8\xfd\x7a\x1b\xe5\xf7\xc1\xbc\x36\xda\xcc\x56\x66\xd5\x20\xa3\xc3\x10\xbf\xd6\xf0\xaf\xef\x0f\x6f\xf5\x3c\x3e\x4b\x62\x3b\x76\xf4\xff\x67\x90\xe3\x71\xfc\xf6\xfe\x04\xbf\xb1\xfd\x61\xfc\xd6\xc9\x1b\x56\xc2\x6e\x2c\x9e\x2f\x36\x2d\xbf\x6f\x25\xbc\x83\xdf\x11\x00\xd3\xdc\x1f\xe4\xd7\xbd\xe2\xd7\xe5\xdd\x3c\x1e\x78\x50\x95\x66\xb2\xdc\x68\x84\xa1\xdb\x84\xe9\x68\x1c\x35\xe4\x7a\x0c\xd5\xf0\x50\xab\x22\x1a\xa9\x6e\x36\x97\x3c\xab\x67\x31\x4d\xeb\x7b\xb2\xf6\x40\xee\x58\xc4\x06\x50\x52\x05\x2c\xc3\xb4\x8d\x85\xb0\xa4\x34\x04\x73\xcd\x20\x87\x02\x0b\xa0\x89\x43\x31\x01\x46\x3b\x00\x21\x0e\x31\x42\x0e\xa0\x86\x0a\x42\xed\x93\x15\x2d\x1e\xb1\x9e\x9a\xcb\xd1\xd0\x6b\xf6\xac\xcd\x60\xb7\x2f\x14\x06\xfd\x54\x2d\x2c\x76\x1a\xed\xc2\x01\x36\x7b\xb0\x94\x77\x6c\xb3\x68\x03\xd9\x32\xc9\x8b\xed\x37\x73\x9a\xb5\xef\xf8\xc8\x9d\x67\xf2\x17\x51\xf2\xb9\xbc\xec\x0f\xcf\xe5\xe5\x7d\xca\x2d\xd2\xb9\xec\x86\xe5\xa3\xeb\x4d\x72\x96\x93\xee\x81\x52\x67\xb2\x5d\x57\x32\x45\xbe\x19\xec\x13\xd5\xfc\x64\x5d\x4b\x96\xc6\x78\xd0\x6e\x96\x0b\xf5\x76\xaa\xaa\x45\x7b\x45\x3a\x5e\x6d\xc4\x86\x8d\x65\xdf\x6f\x5a\xde\x6a\xb7\xc9\xa4\x7b\x70\x48\x5c\xef\xb8\xeb\xfb\x33\xc3\xdc\x67\x1f\xba\xe2\x2c\xd5\xf5\x56\xa2\x9e\x21\xc4\xda\xec\x8e\xb5\x76\x91\x36\x66\xcb\xc6\x9e\x84\xbd\x6c\x2e\xd1\x6f\x56\x87\xc1\xb1\xdf\x61\xcb\xf6\xf4\x23\x6b\xda\xe7\xf6\x4d\xb1\x0b\x1d\x6e\x59\x4d\xce\xd9\x73\x32\xf2\x9d\x29\xf6\x3e\x9a\x3c\x90\xfe\x6d\x3e\x74\x9f\xcc\xfb\xad\x89\x50\x08\x41\xc8\x80\x46\x16\x73\x34\x66\x8e\x43\x95\x81\x52\x09\x26\x08\xa2\x8c\x13\xc3\xb4\xc5\x19\xb3\x91\xb1\x14\xd3\x80\x33\xec\x30\x49\x89\x03\x04\x87\x4f\xb3\xfd\x31\xd1\x57\xfc\xf5\x56\x5c\xca\x0d\x96\xf7\xb7\x6c\xc6\xb9\x16\x23\x1e\xdb\xed\xfb\x5e\x33\xe3\x34\x17\xfb\xd6\x72\xdc\x4f\x5b\x49\xbb\x9b\x3b\x54\x32\xeb\x15\x0d\xc8\x60\x53\xcf\x99\x4c\xfd\x50\x97\xb3\x9c\xda\xda\xbc\x3c\x4f\xae\x6a\xdb\x34\xaa\xd7\xc6\xdd\x51\xb5\x3c\xea\x8b\x62\xa1\xd5\xb5\x1b\x7d\x6f\xdf\xd2\x61\x8a\x18\x27\xfb\x34\x13\x33\xd1\xb6\x59\x1e\x8e\x75\xb9\x30\x1a\x4f\xf2\xc2\x94\x2a\x1b\xd1\x82\x93\x28\xa1\xd3\xf3\xe8\xe8\x54\xa6\x83\x45\x6b\x28\x7b\xa1\x4f\x3f\xba\x67\x36\x5e\xde\x99\xe5\xb7\xdb\x60\x43\xa6\x7a\x7b\xe1\xbe\xc6\xc6\xf7\xad\xb1\x0f\xe4\x97\xff\x09\x7e\x63\x3a\x01\x89\xbd\xba\x61\x8d\xcd\x5e\x8e\xba\x52\xb1\xb3\xf5\xef\xd3\x09\xee\xe0\x97\x0f\x0e\x33\xf0\x07\xf9\x75\xaf\xf8\x4d\x0f\xa2\x84\xd5\x4a\xea\xe9\xb0\xc0\xd6\x55\xb1\xc8\xa6\x60\x81\x0e\xa1\xad\xa6\x19\xab\xd9\x1c\x73\x38\x28\xe6\xc6\xbb\x97\x93\xce\x98\x4e\x50\xfd\x26\x9b\x31\x64\x8a\x20\x03\x95\xd4\x5a\x68\x60\x71\xe3\x58\x96\x4d\x28\x16\x16\xd7\x46\x69\x07\x30\xc1\x88\x8d\x30\x57\x18\x01\x65\x1c\x0c\x6d\x8b\x12\xa0\x6d\xad\x1c\xce\xac\x27\x94\x74\xee\x3c\x0f\xce\x9c\xc0\xec\x1d\x9d\x20\x76\x1e\x1c\x7b\x7f\x39\x0f\xce\x2c\xac\x42\x7b\x50\xac\xe4\x17\x8c\xf7\xad\x55\x3d\x51\x4f\xca\x2d\x52\x6c\xed\x6d\x78\xe8\x4d\x60\x8a\x1f\xf3\xc7\x7c\xa9\xae\xfc\x11\xe3\x11\x6f\x97\x7b\xa9\xc1\x7e\x56\x40\xbd\xfc\xbc\x86\x96\x4b\x9e\x2e\x1c\x0f\xb3\x62\xaf\x9a\x98\x93\x5c\xaa\xfd\xd4\x0b\x30\x71\x3e\x29\xeb\xcf\x41\xd3\x59\x1f\x37\xdb\x41\x7a\xdb\x77\x55\xa7\xb1\x5b\x6e\xd1\xcc\x17\x51\xb7\x52\x04\x99\x9c\x19\xa4\xa5\xdf\xd2\x66\x3d\x9c\x5f\x18\xbb\xe7\x3c\xb8\x1a\xdb\x63\xdf\xb0\x2a\x3c\xaf\xad\xe3\x77\xa6\xca\x07\x28\xf6\x38\xfa\xf7\x66\x99\xa1\x97\x57\xb7\x48\xcd\x39\x33\x5d\xf5\x3a\x86\x2f\x5e\xde\x47\x99\x87\xd0\xaf\xab\x68\xbc\x79\x8b\xcc\xed\x52\x6b\x63\x41\xa1\x22\x10\x01\xa0\x4f\x29\xed\x10\x61\xc6\xc6\x36\xb3\x08\x23\x08\x23\xc3\x6c\xca\x0d\xc7\x46\x21\x8b\x4a\xce\xb5\xd1\x9a\x68\xa3\x84\x10\xc0\xc2\xc6\x3c\x49\x2d\x7b\x88\x17\xc7\x07\x52\x7b\x2e\x71\x89\xb8\x48\xad\x5b\x5d\x6e\x7b\x8d\xec\xb4\x89\x0a\xdb\x49\x13\xd4\x48\x54\x48\x46\xfd\xfe\xbc\xa7\xf1\x74\x9f\x23\x9d\x6d\x81\x15\x93\x8d\x52\x35\x83\xd9\xce\x2c\x16\x0e\x5e\x6d\xa6\x72\xd6\xcb\x4f\x51\xde\x35\x56\xcd\xdb\x97\x77\x48\xaf\x37\x24\x3b\xcc\xb4\x77\x48\xa2\x51\x18\x0e\xdc\xd2\x93\x64\xa9\x52\xb8\x6e\x8d\x3c\x3d\x4b\xd6\x8f\x1d\xa3\x72\xe3\x6c\xd4\x0c\x92\x11\x5c\xd2\x26\x2b\xfb\x87\x85\x9d\x20\x3a\x29\x36\xfb\x54\xed\xa3\x8c\x75\x9f\x94\xda\xd8\x5a\x70\x8b\xd4\x54\x2f\xfd\xf9\xc6\x54\xf9\x40\x6a\x1f\x47\xff\x3b\x35\x72\xe9\x00\x04\x05\x23\xd2\xb2\x28\x92\xdc\x28\x68\x09\x48\x15\xb4\x8d\x45\x25\x80\x9a\x3a\x0e\xb5\x98\x82\x8a\x29\x64\x43\x2c\x1c\x6c\x39\x10\x68\x87\x02\xc7\xb6\x84\xa3\xff\xf5\xcf\x0f\x6a\x3d\x64\xd6\xbe\x13\x71\x11\x5b\x6b\x62\xb6\x83\x98\xef\x11\xce\xd9\xcd\xcc\x74\x5d\x6d\x75\x33\xe5\x3e\x2e\x9b\x59\xba\x16\x89\x52\x3d\xe8\xe5\xec\xf6\xae\x32\x6f\xe5\xca\xdb\xf5\xb4\xde\x59\x74\xf2\xc3\x30\xb9\x98\x86\xe1\xc6\x12\xdb\x7e\xd8\x6a\xaf\x67\xd9\x6a\x55\x2f\xbc\xa8\x5e\x2c\x85\x8b\x75\xb5\x83\xb2\x47\x36\x2c\x46\x56\x27\x28\x9c\xb2\x91\xa4\xca\xcb\x48\xba\x7e\xad\x98\x6c\xa4\x0f\xa2\x61\x76\x22\x55\x9a\x0c\xfd\xb4\x2a\xf5\x2c\x54\x28\x0d\xdc\xfe\x80\xfa\x7e\x2e\x8d\x3f\xb2\xba\x7d\xd2\xf7\xc8\xbd\x6b\xd6\x9c\xfd\x92\xef\x98\x35\x0f\xa4\x7f\x93\xef\xd3\xcf\x59\xfb\x5f\xff\xf5\xbf\xff\xf7\x7f\xfd\xef\xff\xfd\x23\xc3\xd7\xfc\x87\x99\x2d\x7f\x54\xf8\x44\xff\xf7\x8f\xcd\xbc\xbf\xe4\x4a\x3f\xcf\xe2\xff\xf3\xa3\x79\x98\xeb\xff\xfe\xd1\x74\x53\x25\xef\x47\xc6\x6d\xba\xff\xe7\x47\x43\x0e\xf4\x84\xff\xf7\x8f\xf9\x46\x44\x43\xf9\x7f\x7e\x54\x77\x53\xbd\xfc\xef\x1f\x4f\x95\xfd\xd7\x2f\xb2\xf0\x6b\x55\x2f\x02\x11\xcf\xb8\x73\x6a\xd6\xea\x32\x56\xaf\x48\xd4\xdb\xb5\xbc\xc4\x20\xed\xdc\x5a\x30\x76\x7f\xab\xe5\x67\x0b\xcf\x0d\xe3\x52\xce\x36\xd3\xb5\x7a\x6a\x6e\xec\xf3\xff\x9d\x8f\xf5\x4b\x33\xd3\xd5\x4a\xa3\x59\x77\xf3\x95\xe6\x07\xcd\x74\x4b\x4d\xaf\xfe\xdc\x2b\xd5\x4a\x29\x8c\xd7\xf8\x5f\x3f\x7e\xfc\xf8\xe1\x66\x32\xb1\xda\x7e\x23\xf8\xa3\x56\xcf\x97\xdd\x7a\xf8\xa3\xe8\x85\x3f\xfe\xd7\xf3\xdb\xa1\xfa\xe7\xc7\xd3\x37\xa6\x7c\xa2\xdf\x62\x7f\xf5\xf2\xe1\xb1\x8c\xaf\xde\xe1\x7a\xf5\x1e\xcb\xbf\x31\x2a\xf8\xf4\xe9\xff\x0f\x62\x4f\xf0\xe9\x6b\x9c\xfd\x24\xf0\x2b\x53\xd3\x99\xd2\xaf\x70\x14\x69\xd5\xd7\xcb\x81\xe6\x4a\x2f\x57\xbf\x3e\xfd\xdf\xf3\xd3\x4a\x2f\xfe\xef\x43\xd8\xfd\xa5\xf2\xd7\x18\x7f\x87\xfa\x8f\x56\x25\x1f\xb4\xbc\x1f\xff\xeb\xe5\xaf\x5f\x6a\xc9\x63\xfa\xfb\x8b\x0d\xf8\x7d\x0c\x9e\xdf\xf3\xd5\xe0\x37\xee\x67\xc6\x3c\xb1\x7d\xfe\xcf\x83\xf8\x3d\x57\xf6\x1a\xa3\x31\x32\xbf\x72\x78\x7a\xf1\xca\x34\x99\xeb\x27\xee\x4e\xff\x3e\x88\xb9\x53\x5d\xaf\xf1\x76\x21\xf2\x2b\x6b\xc3\xf9\x3f\x3f\xe6\xb3\xe5\xfa\x77\xde\x9e\xe8\xad\x06\x8b\x8d\xde\xe8\x5f\x1e\x1e\xc5\x69\xac\xca\x57\x19\xbe\x26\xf9\xda\xa0\xbf\xc6\xf5\x6a\x23\x9e\xff\xf3\x38\x4e\x57\x1b\xf1\x06\x8f\x3f\xc9\xfc\xca\xdd\x52\xaf\x5e\x19\xee\xc5\x66\xb6\xdc\x4c\x86\x53\x33\x8b\x7d\x7c\x10\x93\x97\x0a\x5f\x63\xf4\x8a\xdc\xa7\x30\x6c\x25\xe7\xe7\x9f\xad\x62\x1f\x1f\xc4\xed\xa5\xc2\xd7\xb8\xbd\x22\xf7\x2b\xb7\x8b\x95\x5e\xbf\x2a\xeb\x4f\x8b\xb5\x5e\xad\xf9\x5a\xc7\x3e\x3e\x8a\xdf\x97\x0a\x5f\xe5\xf7\x57\x72\xbf\xf2\x7b\xfa\xfb\xab\xeb\xeb\x7a\xb9\x59\xad\xa3\xe1\x54\xaf\x62\x1f\x1f\xc4\xf0\xa5\xc2\xd7\x18\xbe\x22\xf7\xa6\x6a\x30\x5c\xad\x36\x7a\xf9\xcf\x0f\xbe\x5a\xe9\xb5\x9c\xa9\x57\xda\xb0\x37\xfa\x45\x51\x8a\x3f\x3c\xaa\x1d\xb1\x2a\x5f\x6d\xc9\x35\xc9\xd7\x50\x62\xa5\x17\xff\xfc\x58\xef\x87\x53\xa5\xf7\xaf\xb4\xe0\xc2\xfe\xa3\x79\x7f\x97\xf1\xbb\xb8\xbe\x52\x51\x7f\x7d\x7c\x10\xff\xbf\x56\xfa\x5a\x23\x5e\x21\xfb\x66\x4b\x9e\xbf\xfb\x7a\x73\x9e\xe7\x9c\xe0\x11\x9f\x4a\xbd\xfa\xc9\x7b\xbe\x92\xf1\xba\x1f\xb0\x9d\xae\x7b\x6e\xd3\x3b\x7f\xf5\xba\x9e\x1f\xd5\xca\x45\x8b\x6d\x35\xf2\x15\xff\x87\x58\x2f\xb5\xfe\xf1\xbf\x9e\xbf\xf1\xff\xfb\xd1\xc9\x79\x75\xef\xe5\xf9\xc7\xff\xf3\xff\xff\x01\xac\x9f\xe5\x77\x45\x53\xaf\xd6\xe7\xb5\xfc\xa9\x15\x37\x73\xf9\x6b\x35\x4f\x4c\x3e\xeb\x29\xbf\xb0\xb8\xd2\x51\x34\x9c\xf6\x4f\xb2\xf7\xcf\x0f\xb1\x39\x5c\x1e\xe6\xcb\xa1\xfc\x5d\x16\x9f\x86\x41\x1c\x56\x7a\x71\x33\x63\x2f\x35\x3c\xf1\x74\x11\x8c\x5f\xd8\x7a\x5b\x55\x7c\xfa\xba\xd1\xfa\x7e\x16\x7e\x56\x72\xe6\x22\x06\x30\x9f\x64\xe4\xf9\xcd\x7d\x8c\xc4\x2b\x79\x62\xe4\x57\x45\xf8\x93\x9c\xac\xe4\x5c\x4f\xb7\x77\x72\x12\xaf\xe4\x89\x93\x95\x9c\x7f\xb1\x43\x2e\x0b\xea\xdd\x9c\xc4\xeb\x79\x66\xe6\xa7\x96\xf0\x2b\x33\x7c\xb5\x7e\x9b\xa1\x18\x70\xdc\xc7\xd1\x75\x45\x4f\x2c\x5d\x61\xe3\x87\x7d\x54\x9b\xad\xd6\xfd\xa5\x6e\x04\xa5\xd3\x5e\x58\xf0\x95\xfe\xa1\x36\x93\xf9\x0f\x39\x9b\xcc\x23\xbd\xd6\x27\xb2\xff\x6f\x00\x00\x00\xff\xff\x7f\x3e\xae\xf4\xbf\x8c\x03\x00") + +func kahunaCoreSqlBytes() ([]byte, error) { + return bindataRead( + _kahunaCoreSql, + "kahuna-core.sql", + ) +} + +func kahunaCoreSql() (*asset, error) { + bytes, err := kahunaCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "kahuna-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfd, 0x62, 0x89, 0xc4, 0xf2, 0xd7, 0xac, 0xdb, 0xb2, 0xad, 0x87, 0xbf, 0x78, 0xf6, 0xb1, 0xe, 0x80, 0x32, 0xa, 0xa7, 0x60, 0x93, 0x97, 0x10, 0x2f, 0x93, 0x5b, 0x7f, 0x43, 0x3d, 0xed, 0xe2}} + return a, nil +} + +var _kahunaHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\xf9\x6e\x2a\x49\xb6\x37\xfa\x7f\x3d\x05\x2a\xb5\xe4\xaa\xcb\xee\x22\xe6\x61\xd7\xed\x2b\x25\x90\xcc\x24\x90\xcc\x1c\x1d\x59\x39\x44\x42\x32\x25\xf3\x74\xf4\xbd\xfb\x15\x83\x6d\x8c\x01\x83\xb7\xbd\x4f\x77\xd5\x67\x75\x97\xd8\x44\xc4\x9a\x62\xfd\x62\xc5\x8a\x89\x7f\xfe\xf3\x97\x7f\xfe\x33\x54\x0c\xa6\xb3\xf6\x44\x95\x4b\xb9\x90\x6b\xcd\x2c\xdb\x9a\xaa\x90\x3b\x1f\x8c\x7e\xf9\xe7\x3f\x7f\xd9\x96\xc7\xe7\x83\x91\x72\x43\xde\x24\x18\xbc\x54\x58\xa8\xc9\xd4\x0f\x86\x21\xf9\x07\xfb\x03\x1e\xd5\xb2\xd7\xa1\x51\xfb\x71\xdb\xfc\xa4\xca\x2f\x65\xbd\x12\x9a\xce\xac\x99\x1a\xa8\xe1\xec\x71\xe6\x0f\x54\x30\x9f\x85\xfe\x15\x02\x7f\xee\x8a\xfa\x81\xd3\x7b\xfb\xad\xd3\xf7\xb7\xb5\xd5\xd0\x09\x5c\x7f\xd8\x0e\xfd\x2b\xf4\x50\xad\x24\xc4\xc3\x9f\x4f\xe4\x86\xae\x35\x71\x1f\x9d\x60\xe8\x05\x93\x81\x3f\x6c\x3f\x4e\x67\x13\x7f\xd8\x9e\x86\xfe\x15\x0a\x86\x07\x1a\x1d\xe5\xf4\x1e\xbd\xf9\xd0\x99\xf9\xc1\xf0\xd1\x0e\x5c\x5f\x6d\xcb\x3d\xab\x3f\x55\xaf\xd8\x0c\xfc\xe1\xe3\x40\x4d\xa7\x56\x7b\x57\x61\x69\x4d\x86\xfe\xb0\xfd\xe7\x41\x76\x65\x4d\x9c\xce\xe3\xc8\x9a\x75\x42\xff\x0a\x8d\xe6\x76\xdf\x77\xbe\x6d\x95\x75\xac\x99\xd5\x0f\xb6\xd5\xb4\x5c\x45\x37\x43\x15\x2d\x9a\xd3\x43\xe9\x44\x48\x6f\xa4\xcb\x95\x72\xa8\x60\xe4\x9a\x87\xfa\x7f\x74\xfc\xe9\x2c\x98\xac\x1f\x67\x13\xcb\x55\xd3\x50\xdc\x2c\x14\x43\xb1\x82\x51\xae\x98\x5a\xda\xa8\x1c\x35\x7a\x5d\xf1\xd1\x09\xe6\xc3\x99\x9a\x3c\x5a\xd3\xa9\x9a\x3d\xfa\xee\xa3\xd7\x53\xeb\x3f\x7f\x06\x43\x67\xf7\xe9\x67\xb0\xdc\xfa\xd5\xcf\x53\x70\xcf\xed\x7e\xed\xf6\x02\x6e\x1d\xf9\x1a\xb3\xa3\x5a\x2f\xc4\x77\xd5\xd3\x46\x5c\x6f\x1c\xd5\x3c\x90\x9d\x4d\xe6\xd3\xd9\x63\xdf\x1f\x6e\x45\x5b\x3f\xce\xd6\x23\xf5\xe8\x04\xae\x7a\xf4\xa7\xd3\xb9\x9a\xdc\xd5\xf8\x03\x4d\x5e\x0c\xf1\x5e\x33\xcb\x55\x8f\xca\xf3\x94\x33\xdb\x35\x0c\x26\xae\x9a\x3c\xda\x41\xd0\xbb\xde\x70\xea\xb7\x87\x6a\x72\xcc\xeb\x7a\xfd\xc0\xf3\x0e\xd5\xa7\xaa\xdf\xdf\x02\x7b\x67\xd2\x7b\x1a\xbd\x67\x82\x97\xda\x7d\x6b\x3a\x7b\x1c\x04\xae\xef\xf9\xca\x7d\xec\x2b\xb7\x7d\x7b\x5b\x7b\xbe\xbe\x51\x3a\x7f\xe8\xaa\xd5\xe3\x91\x1b\x0e\xa7\xd6\x6e\x48\x9a\x3e\x06\xc3\x77\x2d\xff\xba\x75\x30\x52\x13\xeb\xb9\xed\xd6\x5b\x7e\xa0\xf5\x8b\x24\x3f\x24\xc5\x7d\x6d\xf7\x56\xde\x35\x9c\xaa\xf1\x5c\x0d\x9d\xbb\x54\x38\x6a\x3e\x9a\xa8\x85\x1f\xcc\xa7\x87\xef\x1e\x3b\xd6\xb4\xf3\x41\x52\x3f\x4e\xc1\x1f\x8c\x82\xc9\x76\xe0\x3c\x44\xbf\x8f\x92\xf9\xa8\x2d\x9d\x7e\x30\x55\xee\xa3\x75\x97\x2f\x3e\xe1\xf9\x03\xae\x74\x00\xf3\x07\x84\x3e\x6e\x69\xb9\xee\x44\x4d\xa7\xd7\x9b\x77\x66\x13\x77\x37\x43\x78\xec\x07\x41\x6f\x3e\xba\xa1\xf6\xe8\x3d\x91\xf6\xb5\x2c\x7f\x72\x27\xe1\xa7\xf0\x78\x73\x83\xed\x50\xb9\x1d\x33\x6e\xab\xfa\x44\xfe\x03\x4d\x6e\x1a\x5d\x9f\x1a\xed\x82\xe0\x1d\x4c\x8e\x83\xe6\x7b\x2d\x46\xdb\x06\x9d\xd9\xbb\x3d\x30\x7d\x35\x00\x6d\xc3\xd7\xfb\x2d\x0e\x38\xbd\xa5\x72\xb0\x97\x23\x78\xb7\xa2\x3f\x9d\x3d\xce\x56\x8f\xa3\xf7\x49\x6e\x6b\x06\xa3\x5b\x6b\xaa\x5b\xab\x3d\x45\xd3\xeb\x95\xd5\x6a\xf4\x78\x3c\xbb\xb8\x31\xde\x9f\x69\xb6\x9d\x5e\x5c\x6f\x64\xaf\x6f\x0a\x86\x5b\xfb\xbe\x3b\x62\xde\x1a\xf8\xf7\x42\xde\xa8\xd5\x73\xe5\xf7\x75\x79\x1e\x6e\xfc\xa1\xd7\xdf\x05\xad\x47\x57\x4d\x67\xfe\x70\xf7\xf9\xc6\xb6\x9d\x60\xa0\x1e\xdd\x60\x60\xf9\xb7\xb6\xd8\x26\x4c\xc7\xd3\xcc\xa1\x35\x50\xb7\x4c\x33\x8f\xe6\x67\x57\xa6\x99\xc7\xb3\xb8\xd1\x8d\x13\xd8\xfd\xd4\xe5\x0a\xd1\xc3\xdc\xe6\x56\x7a\x3d\xb5\x7e\x5c\x58\xfd\xb9\x7a\xdc\x8e\xeb\xea\x0a\xe1\x93\x9a\x37\x73\x38\x33\x65\x7a\x1c\x59\x93\x99\xef\xf8\x23\x6b\x78\x75\x1e\xfe\x5e\xd3\xbb\x65\x78\x9e\xf2\xdc\x2b\xc1\xf9\x86\x77\xf3\xdf\x79\xfc\x2d\xfc\xf6\x15\xbf\x9c\xfe\x1e\x81\xbb\x4c\x65\xff\x71\x97\xb9\x1c\xb2\xb8\x1d\x82\x1f\x6f\x94\xa0\x1d\x4c\x46\x8f\x03\xbf\x7d\x98\x51\x5e\x11\xe1\xa4\xe6\xcd\x3a\x9e\x8c\x81\x57\x38\x9c\x8e\x96\xb7\x72\xb8\x3f\x39\xbc\x99\xf2\xd3\x80\x72\x48\xa4\xae\x91\x3f\xa9\x7a\x37\x8f\x5b\x68\xdf\x2d\xf7\x76\x20\xbc\x85\xf0\x6e\xc0\xbc\x46\xfd\xd6\x41\x61\xdf\x3a\x56\xc8\x55\xf3\x46\xc8\x77\xf7\xbc\xe3\x7a\x42\xab\xe6\x2a\x37\xd2\xbe\x00\xf6\x4f\xa0\x7c\x80\xd9\x75\x4a\xbb\x7f\x5d\x20\x74\x34\xf2\x5f\xaf\xb8\x1f\xcd\xaf\xd7\x39\x19\x98\xaf\x57\x3e\x97\xc0\x1e\x5a\x94\xf5\x52\x55\x37\x62\x1f\xe8\xad\x6d\x68\x9c\xaa\xf1\xdd\x9c\x5f\x11\xb9\xb9\xb5\xab\x6e\x49\x35\xac\x76\x7b\xeb\x01\xcf\x59\xe1\xec\xfd\x39\xe4\x53\x33\x7b\xee\xf4\xd4\xec\x75\x76\x70\x93\x58\x8f\x0c\x00\x00\x0e\x2d\x12\x55\x23\x56\x49\x17\x8c\x33\xbd\x1f\x3c\x0e\xfc\x7e\xdf\x9f\xfe\xb6\x4d\x5b\xa6\x33\x6b\x30\x0a\x2d\xfd\x59\x27\xb4\xfd\x67\x68\x13\x0c\xd5\xb7\xd0\x70\x3e\x50\x13\xdf\xf9\xfd\xc3\xc4\x82\xf9\xec\x0a\xbd\x5b\xa1\x73\xbb\x6f\x5c\x88\x90\xf7\x78\xc6\x79\x12\xb7\xb5\x3d\x24\xd1\xb7\x55\x3e\x64\xcc\x37\xeb\x76\x88\x96\xf7\xe8\xb2\x6f\x72\x63\xdd\xc3\xe8\x79\xbb\x3c\xcf\xb3\xe1\x1b\x24\x3a\x89\xb7\xd7\x2b\x9f\x84\xce\xeb\x95\x6f\xaf\x78\x12\xd3\x6e\xac\xbd\x0d\x26\xb7\x55\xbd\xd7\xbf\xfa\xfe\x78\xee\xbb\xfe\x6c\xfd\x38\x0a\x82\xfe\xed\x43\xcf\xf3\xc0\x75\x9e\x80\x96\x4c\x9a\x7a\x52\xab\x9c\x21\x32\xf0\x87\x8f\xa3\x89\xef\xa8\xdf\x0e\x48\xfc\xaf\xff\xfe\xfd\x86\x56\xd6\xea\x03\xad\xfa\xd6\x74\xf6\x9b\x35\x5c\xab\xfe\x6e\xb7\xe6\x86\x16\x9e\x3f\x39\xdb\xe4\xf2\xb0\xf3\xac\xcf\x76\xc4\x7c\x91\xee\x79\xa0\x79\x11\xf4\x0a\x8d\x27\xed\x7e\x80\xc6\x6e\xf1\x77\xdb\xfc\x45\xf8\x6f\xa1\x7b\x14\xd9\xa9\x7e\x03\x05\xbd\x51\xd1\x8d\xf2\x09\x89\xfe\xa8\x3d\x1d\xf7\x9f\x70\x1b\x4b\xe9\x79\xed\x0d\x87\x3f\x7f\xd9\x6f\xd4\x19\xd6\x40\x7d\x7f\xfa\x2e\x54\x59\x8f\xd4\xf7\x43\x93\x3f\x43\x65\xa7\xa3\x06\xd6\xf7\xd0\x3f\xff\x0c\x15\x96\x43\x35\xf9\x1e\xfa\xe7\x6e\xff\x2e\x66\xea\xdb\xfe\x3a\x50\x7e\xa2\xf7\xcb\x2b\x8a\xaf\x0b\x0f\x84\x63\x85\x7c\x5e\x37\x2a\x57\x28\xef\x2b\x84\x0a\xc6\x6b\x02\xa1\x74\x39\xf4\xf0\xb4\x33\xf7\xf4\xdd\x74\x47\xe4\xe1\x94\xf3\x93\xfa\x07\x9e\xcf\x16\x7a\x57\x9f\x57\xb6\x34\x0a\x95\x13\x7b\x86\xea\xe9\x4a\xea\x59\xac\xe3\x2d\xba\x57\xec\x5f\xa8\x9c\x08\x72\x8f\xf2\x6f\x88\xec\x0c\x50\xcc\x45\x46\xed\x72\x29\x17\x1a\x4d\x02\x47\xb9\xf3\x89\xd5\x0f\xf5\xad\x61\x7b\x6e\xb5\xd5\xce\x0c\x37\x6e\x29\x1e\x8b\xfb\xbe\xa3\x1d\xc4\x7f\xf2\xd5\x17\xf9\x9f\xfa\xf6\x9c\x2d\x9f\x3d\xfb\x5d\xfa\x21\x53\xaf\x54\x4d\xa3\x7c\xf4\xdd\x2f\xa1\x50\x28\x94\xd3\x8c\x64\x55\x4b\xea\xa1\x9d\xf6\xf9\x7c\x75\x3f\x0e\x96\x2b\x66\x3a\x56\xd9\xd5\xd0\xca\xa1\x7f\x3c\xfe\x23\x54\xd6\x73\x7a\xac\x12\xfa\x07\xdc\xfe\xeb\xb4\x37\xde\x05\xe2\x8f\x69\xf7\x1e\xf9\x4f\x53\x0e\x9d\x53\xee\x96\x91\xea\xc7\xf4\xbb\x81\xc3\xb3\x8a\xcf\x5f\x7d\x48\xc3\xdf\x7e\x09\x85\x62\x5a\x59\x0f\xd5\x53\xba\x11\xfa\x07\xfc\x2f\xf8\xdf\x91\x7f\xc0\xff\x42\xff\xfd\xff\xfd\x03\xed\x3e\xa3\xff\x42\xff\x1d\xaa\xec\x0b\x43\x7a\xae\xac\x6f\x8d\xa2\x1b\xf1\xdf\xcf\x5a\xe6\x86\x38\xf0\x83\x96\x79\x9f\xc3\x57\x5b\xe6\xff\xfd\x88\x65\xde\xc6\xd4\x83\x1d\x9e\xe3\xf0\x6d\x86\x78\x09\xdb\x6f\x28\xee\x24\x0e\x85\xca\x5b\x5b\x85\xfe\xf5\x32\x02\x7c\xdb\x7f\x5d\x69\x16\xf5\xd0\xbf\x8e\x11\xf1\xfb\x39\xd4\x7e\xaa\x8c\xa7\x04\x4f\x44\x7c\x82\xf1\xed\x12\x9e\x9d\x02\xfd\xa8\x94\xe7\x88\x9e\x48\xfa\x0a\x90\xaf\xc5\x7d\xf1\xb2\xb7\xd2\x9e\x9b\xe6\xfd\xb0\xb4\x67\x88\x9e\x4a\x7b\x0c\x92\xab\xd2\x6e\x23\x97\xab\x3c\x6b\xde\x9f\x3d\xce\x2c\xbb\xaf\xa6\x23\xcb\x51\xa1\x7f\x85\x1e\x1e\xfe\x7c\x5d\xba\xcd\x22\x1f\x03\xdf\x3d\x3a\x6d\xf3\x4a\xd7\xe7\xc9\xf7\x41\xbf\x1d\xba\x6e\xd3\x6d\x0f\xc4\xe7\x15\xab\xbd\x2e\x2f\xeb\xec\x21\xa7\x63\x4d\x2c\x67\xa6\x26\xa1\x85\x35\x59\xfb\xc3\xf6\x6f\x94\xfd\xbe\x9b\x29\x18\xd5\x5c\x6e\xaf\x9f\x6d\xf5\xad\xa1\xa3\x42\xb6\xdf\xf6\x87\xb3\xd3\xc2\xfd\xbe\x7e\xdf\xb7\x6c\xbf\xef\xcf\x7c\x35\x3d\x5f\xef\xe9\x78\xc2\x0d\x15\xf7\xbb\xdc\x8f\xc3\xf9\xc0\x56\x93\xf3\x95\x86\xf3\xc1\xe3\x74\x6e\xab\xe1\x6c\xb2\x25\xe4\x0f\x67\xaa\xad\x26\x27\x95\xce\xee\x60\xdc\xa4\xb1\xd7\xb7\xda\x97\xa8\x1e\xed\x6d\x9c\xa1\x85\xd1\x29\xad\x81\x35\x9d\xa9\xc9\xe3\x52\xf9\xed\xce\x2c\x34\x1d\x58\x5b\x3b\x9c\xea\x33\xeb\x4c\xd4\xb4\x13\xf4\xdd\xc7\x7e\xb0\x7c\xbf\xd2\x40\xb9\xfe\x7c\xf0\x7e\xbd\x8e\xdf\xee\x5c\xaa\x75\xee\x30\xc7\x1b\x95\xdf\xe2\xee\x75\xce\xf8\xa3\x0e\xb9\x5f\xee\xdc\x7b\xe5\x61\xf9\xa8\xa7\xd6\x67\xec\x0a\x29\x38\x35\xec\x9d\x5e\x3c\xb4\x06\xea\x4c\x45\x46\x4e\x2b\xee\x56\xf8\xce\xd4\x94\x6f\x24\xf8\x51\x13\x3e\x25\xe9\x3f\x6c\xc5\xa7\xc5\xee\x1b\xe0\xfd\x56\xdf\x7d\xe3\x9b\xaa\x1e\x9c\xf8\x3c\x30\xa6\xa3\x60\x38\x0d\xce\x11\xa2\xec\xf7\x9d\x15\x0e\xc2\xef\x57\x05\xdf\x2c\xbf\xdb\xeb\xc7\x27\x12\x05\xe3\xad\x6e\xd5\x72\xda\x48\x86\xa2\x15\x53\xd7\x7f\x3b\xd4\x7b\x6b\xd9\xa3\x75\x92\x0f\x1b\xf5\x68\x6f\x62\x6f\x4f\xdf\x3d\x3f\x08\x59\x83\xad\x84\x6f\xf5\x3d\x33\x56\x3d\x0f\xc0\xe7\x4d\xb7\x1f\x6f\x2e\xc1\x39\x18\xf4\xcf\x18\x15\x51\xfa\xfb\x15\x27\x3b\x5d\x5f\xfa\xa8\x39\x4e\x37\x83\x0e\x2e\xf6\xbc\x87\x75\x41\xa3\x97\xfd\xae\x73\x60\x7e\x33\x48\x1e\x6f\x84\xdd\x84\xe6\x83\xed\x67\x6a\x75\x2e\x34\x5c\x34\xf7\x5b\x3b\x9d\x2e\xda\x7d\xd4\x4e\xa7\xdb\x72\xcf\xae\x73\x46\x44\x6b\x34\xea\xfb\xbb\xd3\x47\xa1\x4b\xeb\xd2\x6f\x05\xbd\xb4\x24\xf9\xb4\xc0\x71\x58\xcb\xbc\x4d\xe6\xe7\x95\xcf\x0b\x54\x0f\xf3\x1a\xcd\xac\xec\x97\x08\xe0\xee\x8b\xb4\x11\x33\xf5\x5d\x3e\x1f\x6d\x1e\xbe\x32\x0a\xa1\x7c\xda\xa8\x69\xb9\xaa\xfe\xfc\x6f\xad\xf1\xf2\xef\x98\x16\x4b\xe9\x21\xf8\x9e\x32\x1f\x36\xfb\x29\xa1\x37\x90\x3d\xec\x12\x85\x86\x6a\x35\x5b\x58\xfd\xdf\x1e\x2e\x68\xfc\xf0\xfd\xfb\x44\xb5\x9d\xbe\x35\x9d\xbe\xf1\xb5\xfd\xa9\xab\xf3\x23\xe4\x53\x47\x3d\x4b\xe2\xf4\x2d\x7f\xb0\x9d\xee\x3d\x1e\xe6\x4d\xd3\xd0\x6f\x03\x6b\x38\xb7\xfa\xfd\xf5\x96\x94\x72\x7f\xbf\xd8\x0b\x6f\xdb\x7e\x5d\x7f\x9c\x35\xe3\x39\xe1\x2f\x8c\x81\x97\x2d\x7b\x51\x8b\x63\x1b\xef\x4d\xfb\xa6\xea\xe3\x29\x62\x8e\x03\x47\xd5\x48\x97\xaa\x4f\xf1\xe3\xd7\xd7\xc7\xe4\xce\x30\xdd\x1d\xb5\xfb\x75\x1b\x4c\xae\xa8\xb7\x0f\x2b\xf6\x6c\xa2\x54\xe8\x37\xdf\xfd\xfd\xcf\x8f\x33\x3b\xa7\xcc\x5d\xec\xcf\x11\xf8\xfd\x52\x57\xbd\xac\xb1\x5f\xec\xb4\xb7\x55\x2f\x85\xb2\x8b\x12\x9e\x69\x71\x6b\x8f\x5c\x13\x70\xdf\x37\xd3\x57\xd6\xb9\xaa\xd0\x2b\x3b\x9d\xd5\xeb\xdb\x55\x25\x5e\xfa\xf5\x43\xb2\x1e\x73\xfa\x44\xa1\x2f\x76\xee\xf1\x0e\xc8\xbb\xdd\xfb\xfa\x60\xf2\xcf\xeb\xe0\xeb\x42\x9e\xeb\xe2\x77\xd4\x3a\x6b\xaf\x13\xed\x7e\xa8\x9b\xdf\x97\xf8\x35\xb7\x4f\x16\x7e\x17\x2a\xd2\x46\x59\x37\x2b\xa1\xb4\x51\x29\x5c\x1b\x18\x76\x43\x76\x39\xf4\x1b\xfc\x16\x7a\x00\x87\x3f\xc8\x85\x40\xcc\xb3\x3d\x85\xb1\x54\xd0\xa3\x0e\xc5\x04\x72\x87\x79\xca\xf5\x14\x72\x00\x55\xc2\x56\x0e\x24\x18\x60\x48\xb0\x72\x08\xb3\xb1\x90\x02\xda\x40\x3a\xd8\x93\x0f\xbf\xff\xf9\xcb\x61\x05\xee\x65\xf1\xfc\x8f\xa9\xba\x75\xf8\xfe\x16\x82\xdf\x42\xb3\xc9\x5c\xfd\xfe\xe7\x36\xe2\x55\x3a\x2a\xf4\xb2\x7f\x1d\x39\x3e\xe8\x10\xb2\x26\x2a\xd4\x0e\xb6\x93\xe1\x59\x10\xb2\x55\x68\x3e\x9c\xa8\xbe\x35\x53\xee\xf6\xdf\xcf\x1c\x9e\x96\x16\xa6\xdf\x42\xf6\x7c\x16\xf2\x67\x21\x37\x50\xd3\xe1\xc3\x2c\x34\xb0\x66\xdb\x40\xeb\x05\x93\xd0\x6c\x97\xbc\xb7\xcf\x1a\xee\x2a\xfc\x9e\x4d\x88\x84\x20\x12\x50\x29\xe8\xb7\x10\xfc\xfd\xcf\x8f\x53\x12\x54\x48\x89\x05\x13\xf2\x32\xa1\x77\xdc\xe4\xad\x50\xe4\x87\x69\x3d\x8b\x25\xf6\xa4\xce\x4f\xb5\xf6\xfb\xe4\x3f\x3c\xd1\xda\x9f\xc8\x79\x9e\x15\x5c\x9b\xfe\xef\xf2\x83\x5b\xf2\xca\xaf\xcb\x16\xae\xcc\xa3\x5f\x1d\x35\xf8\xa4\x59\xf4\x31\xcd\x9f\x36\x87\xbe\xa6\x48\xa8\x50\x37\xf4\x78\x28\xda\x7c\x47\xa3\xfd\x09\xab\xeb\x0a\x3d\xd3\x3a\x29\xfe\xc3\x77\x2f\xc9\xf6\x74\xfe\xe3\x47\xbd\xee\x40\xe7\x24\xf0\x1d\x2d\x74\x5c\x0d\x7a\xef\xcf\x7f\x7e\xdd\x9d\x28\xff\xf5\x82\x37\x5f\xc9\x73\x5d\x35\xb3\xfc\xfe\x34\xd4\x9d\x06\x43\xfb\xb2\xb3\x3d\x1d\x9a\xf9\x51\x3b\x1c\xe8\x1c\xec\xf0\xb4\x54\x7a\x41\xb6\xa3\x5b\x3a\x37\xa1\xf0\xdc\x05\xa1\xf3\x0d\x0f\x66\x39\x1e\x9d\x76\xe9\xf8\x93\x1c\x4f\xa9\x01\x38\xe1\x70\x34\xc8\xde\x54\xff\xf9\x96\x4e\xe8\xca\x91\xab\xd3\x36\x13\xb5\x8d\x33\xef\x34\xda\xd7\x9d\x8f\xdc\x9b\xeb\x3e\xbb\xce\xe1\x9f\x27\x17\x98\xde\xe8\x02\xdf\x2c\xe3\xcc\xac\xfe\xa3\x13\xf8\xc3\x0b\x0b\xdf\x9e\x52\xbb\x33\x2e\x17\xd6\xd9\xad\xa9\x7a\xf4\xd4\xa5\xbe\xde\x15\x4f\xd4\x54\x4d\x16\x97\xaa\x0c\xac\xd5\xe3\x6c\xf5\xb8\x5b\xca\xf1\x37\x97\x6a\x8d\x26\xc1\x2c\x70\x82\xfe\x45\xbd\x4e\xfb\xe8\xc9\x59\x94\xe5\xaa\xc9\x2e\x77\x3b\x2c\x07\xce\x1d\x47\x4d\xa7\xde\xbc\xff\x78\xd1\x51\x0e\x8a\x5b\x7e\x5f\xb9\xef\xd5\x3a\x88\x7e\xc1\x85\x2e\x43\xef\xc2\x59\xb7\x1f\x45\xe2\x85\x33\xaf\xef\xc4\xc5\xfb\x33\xb2\xcb\x63\xdc\xbd\x2a\x7f\x6e\xa8\xbb\xca\xe3\x67\x85\xbe\xbb\x14\xfd\xc1\x50\x78\x95\xd7\xdb\xd0\x78\xbe\xfa\x95\x50\x79\x74\x12\xf4\xd3\x7c\xf3\xbd\x15\xec\x5b\x32\xc7\xdd\x62\xa5\xb3\x57\x65\x17\x25\x7f\x30\x48\x1e\x46\x87\x60\x3e\x71\x9e\xef\xc8\x5d\x08\x4f\x4f\x43\xce\xc3\xc3\xf7\xef\x97\x57\xd9\x2f\xe3\xe0\x70\x84\xf9\x47\xcd\x79\xb8\x32\x7f\xef\x9a\xca\xf5\x39\xc5\x61\xd8\xfc\x48\x84\xdb\x1d\x55\xbf\xc8\xf6\xe4\xc2\xfe\xb5\x4a\x87\x37\x04\xae\x55\xd9\x2f\xb3\x9f\xad\xf0\xf6\xe9\x83\x77\xea\x5d\x65\xf7\x5c\xeb\x0a\xc7\x9d\x48\xfe\xf4\x70\x6b\x3d\x64\x07\x41\x5f\x59\xc3\xa7\xb8\xe5\x3b\xea\x71\xf8\x2a\x46\xef\xbf\x7b\x1d\xb7\x5f\xae\x72\x3e\x9e\x44\xf4\x57\x97\x49\x4f\x0b\x8f\x2e\x63\x9c\x7d\x20\x61\x27\xf5\xe3\xee\x09\x8d\x50\x2c\xa5\xc7\xb2\xa1\xdf\x7e\x3b\xb6\xe0\xff\xf7\xaf\x10\xf8\xfd\xf7\xf7\x68\x9d\x6b\xff\x64\xb5\xff\xf7\x8d\x21\x6f\xa0\xf7\xca\xa8\x27\xe4\x4f\x2c\xbe\x97\xf0\x97\x4b\xe9\xe3\xa7\x62\x69\x7f\x7c\x7f\x87\xa8\x17\xb7\x3f\x74\xf9\x30\x98\x85\x86\xf3\x7e\x7f\xab\xdb\x59\x37\x3d\xae\x70\xc9\xb7\xde\xd4\x79\x46\xe1\x1b\xf2\x8b\xa0\x3f\x1f\xa8\xa7\xc3\x1b\x67\xa9\x5f\xa9\x62\x2d\xda\x67\xbf\xef\xf8\xed\xce\xe3\xf0\x72\x91\x7b\xb6\xa8\x1f\x2c\x2f\x34\xda\x96\x9c\x6f\x73\x7a\xf5\xe2\x9c\x09\x76\x75\xce\x13\xde\x15\x9d\xa7\xbc\x1b\x9b\xde\x23\xbd\xaf\x74\x9e\xf6\xbe\xec\x1c\xf1\x5f\x42\xa1\xa2\x99\xce\x6b\x66\x33\x94\xd5\x9b\xaf\x3d\xfd\xdb\x9b\x5e\xfd\xf6\xe2\x25\xaf\x36\x92\x0b\x66\xc8\xd4\x8b\x39\x2d\x76\x74\x88\xed\xe8\x96\xc6\xd5\x39\x7d\x68\x36\x99\xbf\x48\xfd\x3c\x69\xff\xfd\x97\xd0\xf3\x19\xb7\x83\xb6\x5a\x39\xf4\x8f\x7f\xfc\x12\x0a\x45\xf5\x64\xda\xd8\x01\x6e\x5f\x21\xe4\xfa\x8b\xdf\x1c\x6b\x3a\xfb\xed\x37\xb5\x9a\x6d\x83\xd3\x6f\x6a\x14\x38\x9d\xfd\x0b\x40\xb3\xdf\x43\xff\x4f\x08\x6e\xbd\xfc\xf7\x90\xf5\x34\xe3\xff\x7d\xcf\xf6\xf7\xff\x67\xfb\xdf\x3f\x7f\x09\x85\x74\x23\xfe\xe7\x2f\xff\xf8\xc7\xcb\x01\xba\xe7\xb3\xb1\x4f\x87\xe8\x3e\xaa\xed\xe7\xaa\x7a\xc4\xe7\xfb\xf7\x67\x46\x07\x6d\xee\x55\x64\xbf\x34\x7b\xfe\x12\xd0\xc9\x82\xab\xab\xa6\xbf\x84\x5e\x2f\xad\xbe\x88\x72\x1a\x42\xbf\x85\x1e\x76\xe3\xca\xc3\xf7\xef\x4f\x97\x70\xde\x9c\x3a\xb8\x78\x63\xe9\x2d\xe3\xc3\x20\xf5\x8a\xf9\x69\xab\x6b\x53\x8f\xf3\x77\xaf\x3e\x61\x00\x3d\x7f\x8f\xef\xc6\xcc\xe3\x9e\xcd\x82\x8f\xe4\x1e\xef\xdd\x5c\xfb\x9c\xec\xe3\x1d\x2e\x3f\x2b\xff\xb8\x53\xd9\x1f\xcc\x40\xde\xe1\xf6\x36\x07\xb9\xd4\xe0\x4a\x16\xf2\xea\xb6\xe2\x47\x7d\xf5\xca\x0d\xc8\x83\x9b\x1e\x4b\x76\xf3\xfa\xd4\x01\x77\xef\xac\x7a\xdd\x9a\xaf\x5c\x4f\x3d\xce\x1f\x35\x7b\x66\x7d\x16\x36\x03\x6b\x75\x65\x85\xe6\xd2\xda\xd7\xff\xca\xea\xd5\x6c\xf5\xa8\x86\x0b\xd5\x0f\x46\xea\xdc\x01\x95\xd9\xea\x71\xa2\xa6\xf3\xfe\xd9\x03\x36\xb3\xd5\xe3\x40\xcd\xac\x0b\x45\x9e\x52\x17\x8b\xa7\x7e\x7b\x68\xcd\xe6\x13\x75\xee\x2c\x85\x64\xbf\xff\xd7\x7f\xbf\xa4\x7c\xff\xf3\x7f\xce\x25\x7d\xff\xf5\xdf\xa7\x36\x57\x83\xe0\xc2\x3e\xc3\x0b\xad\x61\x30\x54\x57\x53\xc8\x17\x5a\x6f\xc9\x1c\x34\xf3\x07\xea\xd1\x0e\xe6\x43\x77\x77\xa8\x48\x4c\xac\x61\x5b\x9d\x2e\x74\xbd\xce\x48\xb6\x96\xd8\x52\x6b\xab\x93\x95\xc3\xe1\x70\x1b\x3d\x6e\x42\xc0\x0b\xa5\xab\xee\x7a\x4c\xf8\x7d\x23\x1f\xce\x49\xa9\xe5\xe3\x93\xc7\xee\xc5\x7b\x33\xf7\x3f\xbd\xef\xfc\xd1\xf1\xe0\xf4\xe9\x8b\xfd\x18\x70\xfe\xa0\xe7\xab\x63\x6d\xd7\x0f\x64\xbe\x73\x02\xee\x70\xa3\xfb\xa3\x42\x1f\xde\xff\x78\x5a\x69\xdf\x66\x9c\xb7\x1e\x35\xbd\x9e\xa0\xbf\x7a\xa8\xed\x1c\x4c\x8e\x9f\x4a\x3b\x7b\x7c\xec\x4a\x8a\xbc\x4b\x79\x87\x17\xd7\x76\x7d\x47\x5d\x9a\x20\xec\x0a\x43\x6e\x30\xb7\xfb\x2a\x34\x9a\x28\xc7\xdf\x2d\xff\xde\x7e\x30\xfa\x83\xa7\x61\x8f\x6f\xe8\x7f\xb4\xaf\x8e\x5f\x81\xf9\x29\x87\x89\x6f\x3c\xff\x78\xcf\x81\xc6\xfb\xb6\x40\xaf\x9e\xc9\x7f\x31\xc7\x63\xdf\x1f\xf8\x97\x56\x53\x3e\xfb\xe4\xfe\xe7\x38\xc7\xe5\x2d\xce\x90\xef\x3e\x79\xc8\xd3\xdb\x0f\xb7\xcc\xa3\xf6\x2e\xb2\x7b\x6c\xe3\x9d\x67\x25\xca\x7a\xe5\xca\x79\xc1\xe3\xad\xd0\xe3\x93\x6c\xf7\x2d\x4e\x7f\x9e\x12\x37\xbe\xba\x71\x55\xa9\xab\x8b\xda\xb7\x28\x79\x31\x1d\xf9\x34\x35\x6f\x7e\xb8\xe4\xaa\xa2\xef\xcc\x9d\xcf\xab\x1a\xb7\x66\xd6\xee\x08\xca\x95\x6b\x38\xa1\xb8\x56\xd1\xde\xd1\xed\x1d\x7a\x6f\xaf\x52\x7c\x06\xd1\x73\x97\x0b\x7e\x84\xee\x85\xb3\xe4\xb7\x90\x3c\x3e\xda\x72\x7c\x96\xfc\xf8\x8c\x13\x7a\x3a\xe4\xb4\x3f\x65\x04\xbe\x85\x1e\x1e\x4e\x4e\xc5\x9c\x6b\x8a\xbe\x85\x1e\xee\x6c\x82\x0f\x4d\xc0\xb7\x10\xbe\xb1\x09\xf9\x16\x7a\xc0\xe0\x63\x12\xd2\x17\x09\x6f\x65\xc7\xbe\x85\x1e\xe0\x79\x6e\x17\x7a\xe7\xda\x29\xf6\x7b\x7b\xe8\xf4\x24\xfb\x93\x54\x0f\xf0\xd1\x1f\xfa\x33\xdf\xea\x3f\xee\xaf\xa9\xff\x31\x1d\xf7\x1f\x76\x5d\x07\xe5\x3f\x21\xf8\x27\x86\x21\x48\xbe\x43\xf9\x9d\xc8\x3f\x00\x16\x18\x87\x01\x3c\x55\xf7\x22\x71\xf4\xb8\x3f\xaa\xf7\x0a\x9c\xf6\x7a\xb7\xca\x72\x95\x11\x41\x8c\xdf\xc3\x08\x3f\xce\xa7\xea\x39\x93\x7b\xf4\x87\x6f\x5e\xda\xbc\xce\x8e\x4a\xc4\xee\xe1\x47\x1e\x2d\xd7\x7d\x3c\xdd\x56\xbf\xca\x83\x12\x48\xee\xd2\x89\x3e\xee\xf3\xc6\xa7\x95\xab\xdd\x75\xc3\xab\x2c\x18\x14\x80\xdc\xc3\x82\x3d\xb1\x38\xc4\xc1\x1b\x58\x70\x20\xef\x72\x01\xbe\x9f\x20\xac\x6f\xd7\x42\x40\x70\x9f\xa1\xc4\xae\x33\xac\x76\x7b\xa2\xda\xd6\x2c\x98\x5c\xef\x6b\x41\x21\x12\xf7\x91\x3f\x36\xd2\xe1\xe9\xb2\x1b\xd4\x90\x94\xdf\xd5\x19\x72\xa7\xc6\xfe\xc8\xc5\xe3\xca\x9d\x5c\xa5\x2e\x11\x66\x77\x79\x2c\x04\x3b\xf2\x87\x5e\xd8\xe5\x05\xd7\x19\x50\xc6\xe1\x5d\x0c\xe0\x31\x83\xe7\xa9\xf7\x16\xff\xd7\x19\x49\x24\xe4\x5d\x8c\xd0\xab\x9e\x38\x6c\x83\xed\x9f\xbe\xbf\xc6\x09\x02\x2a\xd9\x7d\x2a\xe1\xbd\x3a\xcf\xbb\x87\x57\x3d\x0b\x42\xc8\xe9\x5d\x8e\x0b\xc9\xa3\xe7\xaf\x9e\xde\x0e\x0c\x06\xfd\x47\xcf\x57\xfd\xab\x23\x23\x84\x98\xe3\xfb\x3a\x9e\x3e\xad\x6d\x3f\x1d\xc9\x59\xbd\xa3\x06\xa5\xfc\x2e\x80\x40\xf6\xe8\x0f\xdb\x6a\x3a\x7b\x7c\x7b\xe8\xe7\x1d\x56\x4c\xde\x87\x45\xc8\x5f\x4d\xfa\x76\xa7\xab\xac\xeb\xb1\x04\x42\x41\x19\xba\x8b\x89\x78\x76\x5f\x2f\x98\x3c\xcd\xb9\xae\xf2\x40\x58\x60\x7a\x17\x0f\xb9\x77\xaa\xeb\x64\x31\x86\xe0\x2e\x8f\x42\xe0\x8c\xe8\xef\x83\x10\x62\x4a\xe4\x5d\x20\x44\xf0\x09\xe9\x13\x35\x08\x16\xea\x71\xa3\x26\xc1\xf3\x96\x74\x30\x9c\xce\x26\x96\xff\x4e\xd8\x85\x58\x00\x7c\x17\x20\x11\x7a\x3c\x5a\x15\xb8\x4a\x9b\x10\x0e\xee\x72\x2d\x84\x1f\x4f\xae\x45\x5e\xa5\x4f\x11\xba\xcb\xa9\x10\xb9\x69\x2a\x02\x19\x10\xe4\xae\xb0\x81\xe8\x56\xee\x03\x00\x27\x6a\x68\x0d\xd4\xa3\x13\xf4\xe7\x83\x77\xb0\xc7\x30\x87\xf7\xcd\xb1\xf0\x53\x5f\xcf\x87\xf3\xa9\x3a\x01\x1d\xfc\x27\x06\x21\x08\x8e\xa9\xdf\x65\x7e\x4c\x76\x68\xb6\xe7\x83\xd1\x95\xf1\x63\xcf\x05\x7e\x9c\x0b\x7d\x74\x27\xc1\xe8\x78\x42\xfa\x78\x3a\x7c\xec\x79\x1c\xdb\xe9\xbe\x31\x0a\xf3\x7d\x20\x3c\x7b\xb4\xf1\x71\x16\x3c\x9d\xf7\x3d\xc7\x15\x7d\x98\x2b\xd9\x87\xdf\xc3\x25\xea\x2d\x9b\xdd\xef\xcd\x1c\x1e\xa9\x38\xc7\x0b\x7f\xd8\x8a\x84\xee\x78\x9d\xb9\x53\x72\x98\x75\xbf\x67\xd0\x3b\xd9\xb1\x1d\xbb\xc1\x7c\xa5\xdc\x0b\x20\x42\x3f\xca\x82\x3f\x8e\x26\xca\x09\x06\xa3\xf9\xd3\x84\xfb\x79\x42\xf9\xd6\x0b\xcf\x70\xbb\x6b\xf0\x24\xe2\x71\xa2\xec\xb9\xdf\x77\xaf\xb2\x42\x70\xcb\x0a\xa0\x10\x80\xdf\x31\xfe\x8e\xf1\x1f\x04\x09\xb2\x65\x05\x6e\x67\xb5\x9f\x55\xda\x13\x7f\x78\xc8\xc5\xee\xe4\x88\xd1\x76\x16\x7b\x3b\x43\x0a\x4e\x5f\xf7\xbb\x4a\x9e\x43\xb1\xcd\x27\xee\xa0\x0f\x9f\x46\xa1\xce\xec\x30\x10\xed\x15\x53\xef\x30\x92\xe0\x89\xcf\x85\x74\xfb\xea\xf5\xe5\x7b\xf3\xed\x37\x57\x98\x8f\x97\x45\x92\xb1\x52\xab\x88\xd3\x55\xde\xa8\x32\x3d\xc3\x70\xa6\xd5\xc8\xc6\x4a\x85\x66\x05\x99\x46\xc3\xc0\xa9\x28\x8d\x19\x29\xdd\x30\xaa\x7a\x35\x55\xce\x6b\xa4\x56\xcd\x64\x32\x65\xdd\x38\x35\xd2\x45\x26\x68\xcb\x24\x6a\x16\x9b\xa9\x74\x0e\xc5\xd2\x38\x61\x94\x48\xb4\x91\x4b\xe4\x8d\x78\x2e\x91\xa9\x1a\xc5\x2a\x4a\x35\x71\x2b\x9f\x28\xa7\x0a\x46\x35\xa6\x17\xb4\x72\x9d\x97\x62\xbc\xd0\x40\xa9\x9b\x99\xe0\x2d\x13\xcd\x48\xb4\xe2\x66\x34\x66\x54\xaa\x8d\x74\x21\x1e\xcb\xe8\xcd\xbc\x16\x2b\xe6\x63\xe5\x96\x5e\xd3\x49\xbd\x95\x6c\xe1\x58\x2b\xde\xc2\x45\x54\x6e\x64\x49\x36\xc5\x69\x3a\xcb\x9a\x37\x33\x21\x3b\x26\xb1\x4c\x51\x27\xcd\xaa\x89\x50\xad\x48\x62\x79\x14\x8d\x27\xe2\x5a\xaa\x89\xe3\x39\x3d\x81\x53\xdc\xd0\x8d\x6c\xb5\x44\x71\xbc\x42\x2b\x7a\x1a\x25\xb5\x54\x85\x1a\xa4\x71\x33\x13\xba\x63\xd2\x2c\xc7\xf2\xd9\x52\x93\xe9\xcd\x5c\xa3\x50\xac\x54\x58\xa6\x58\x2c\x34\xe2\xf9\x9a\x11\xad\xa7\x2b\xc5\x4a\xb9\x95\xae\xd5\xeb\x24\x57\xd7\xcc\x5a\xb4\x50\x49\x51\xb3\x92\xd3\xe2\x37\x33\x61\x5b\x26\x71\x93\xe2\xba\xa6\xa7\xb3\x85\x2a\x6e\x65\x0d\x4c\x62\xe5\x94\x56\x4f\xf1\x54\x8d\x65\x19\x8e\x45\x33\x66\xb5\x52\xaf\xc6\xa3\x89\x72\xbe\xc9\x4d\xb3\x94\xc5\xe5\x62\xb6\x50\xbe\x99\x09\xdf\x79\x57\xaa\xd8\x48\xd6\xe2\xd9\x62\x82\x66\x2b\x24\x66\x68\x15\xde\xe0\xbc\xd0\x68\xb6\x78\xb3\xa6\x93\x4c\x2a\x9b\x88\x57\x53\xb1\x64\xac\x56\x8f\xe5\x48\x96\x14\x4b\x8c\x67\xb3\xad\x9b\x99\x88\x9d\x26\xa4\x9c\x2f\xe8\xb8\x56\x2c\x27\x78\xcb\xc4\xb1\x8a\x5e\xc2\x45\x5a\x35\x2a\xd1\xbc\x9e\x89\x6b\x28\x99\x6b\x54\x52\x26\xcf\xe7\x35\x53\x33\xb2\xd9\x4c\xbc\xc5\xcd\x62\x32\x71\x33\x13\xb9\xd3\xa4\x56\xa7\xb9\x98\xd9\x4a\x14\x79\x51\x37\x1a\x15\x2d\x59\xa8\xa5\x4b\x0d\x2d\x1e\x37\xaa\x8d\x46\x2b\x13\x33\xb2\x09\x52\x2b\x45\x51\x3a\xcb\xeb\x28\x57\xcc\xd4\x13\x1c\x57\x93\x37\x33\x81\x60\xc7\x25\x93\xcd\x34\x8a\xd9\x68\x22\x9d\x2a\x14\x30\xa1\xb4\xde\xa8\x27\x69\x2c\x1e\x6d\x35\x4a\x46\x33\x61\x9a\xc9\x74\xac\x99\x2f\x56\x4b\x98\xc6\x8a\x25\x52\xaf\x95\x71\xb6\x95\xbb\x83\xcb\x1e\xf3\xe9\x44\xc2\x2c\x65\x53\xf9\x14\xcb\xc4\x79\x2c\x4b\x0b\x69\xd2\xa8\xc5\x9a\xb1\xbc\x61\x1a\x09\x56\x6c\x6a\x3c\x53\x89\x99\x38\x51\x4c\x15\x33\xad\x52\xa5\xd9\x4c\x44\xa9\x7e\x7b\xd7\xc3\x3d\xe8\x59\xd2\xc0\xb9\x4c\xb5\xce\x32\x4d\x93\xeb\xf1\x42\x86\xf0\x5a\x34\xc5\xe3\x84\xe6\x49\xa6\x9e\x6a\x24\xb3\x2c\x97\xca\x98\x9a\x9e\xa6\x99\x68\xd9\x40\xf1\x68\xa9\xc9\x4b\xb7\x73\xd9\xa1\x3e\x5a\xc8\xf2\x68\xa1\x5a\x2e\xd4\x8b\xa9\xa8\x66\x44\x9b\x79\x96\x4f\x9b\xcd\x56\x26\x1d\x4f\x17\xab\xcd\x4c\xb1\x91\xaa\xa4\xb4\x78\x82\xd3\xaa\x5e\x4b\xd7\x9a\xf5\x54\xaa\x60\x94\x62\xb7\x73\xd9\xc1\x3e\x8a\x4a\xe9\x66\x05\xa5\xb5\x6a\x22\x6f\x36\xb2\xb9\x72\x2e\x57\x34\xf5\x58\x8c\x15\x62\x85\x64\x26\xaf\xc5\xcb\xc5\x8a\x99\xe5\x95\xa4\x51\x41\xe5\x84\x89\x9a\xc9\x7a\x5c\x33\x6f\x47\x24\xdc\xe3\xbe\x91\x4f\x10\x5c\x49\xb6\x52\x75\x5c\x32\xb0\xa9\x17\xaa\x1a\xaa\xd2\x62\x9d\x46\x2b\x9a\xd9\x48\x26\x9b\x19\x9c\x49\x27\x52\x75\xdc\xac\xb0\x92\x99\x35\x73\x38\x56\x2c\x56\x6f\xe7\x72\x00\x7e\x1d\x73\x9a\xd7\x9a\x26\x61\x85\x78\x32\x81\xea\x49\xcd\x28\xc5\x90\x69\xb6\x72\xbc\x80\x08\x8b\x37\x53\xa9\x58\xb2\x5e\xa9\x21\x53\xe7\xe9\x94\x8e\x4a\xd5\x52\xee\x0e\x5d\xf8\x7e\xa0\xd4\x4c\xa4\xe9\x4d\x3d\x9b\xae\xe8\x28\x97\x4d\x53\x33\xdf\x48\xd0\x7c\xba\xd6\x62\x25\xde\x48\xe7\xcc\x42\x32\x5e\x41\xa8\xc0\x33\x0d\x12\x2f\xd7\x13\x65\x9e\x88\xc7\x8b\xb7\x73\x11\xfb\x31\xbf\x95\x2c\x66\x9b\xb4\x5e\x4e\xd5\x93\xb4\x59\x68\xe5\x8d\x24\x45\xc9\x58\x96\x96\x63\x19\xd2\x4c\x56\xeb\xf9\x4c\xa6\x96\x6c\x95\xb3\x28\x51\x24\xd1\x34\xca\xa4\x33\x06\xba\xa3\xf7\xf7\xd8\x4f\xc5\x48\x1c\xc5\xca\x84\xc6\x32\xa6\x61\x90\x92\x96\xaa\xa0\x5c\xa2\xa5\x65\xd2\x55\x5a\xd4\x78\x8a\xe2\x2c\xae\x15\x8a\xac\xae\x67\x58\xa3\x9e\x32\xca\x46\x2b\x5b\xba\x1d\x95\x68\x8f\xfd\x28\x4f\x14\x9b\xc9\x5c\x8e\x99\x19\xcc\x53\x59\x53\x6b\xd6\x69\x45\xab\xe7\x13\xd1\x64\x32\x91\xcc\x93\x74\x9e\xe9\x66\x34\xd6\x6a\xa4\x51\xb4\x45\x0a\x85\x42\x03\x55\xb5\xdb\xe3\x17\xda\x61\x3f\x1e\xab\x55\xa2\xc9\xb2\xae\x57\x79\x36\xd7\xa8\xe6\x53\xf9\x72\x23\xaa\xe5\x1a\x19\x54\x21\x69\x94\x2d\x14\x1b\x75\x54\xa6\x15\x33\x97\x8d\x9b\x69\xad\x11\xa7\xd5\x78\x4a\x6b\x16\x6e\xe7\xb2\xc3\xbe\x96\xa4\xa8\x52\x67\x25\x2d\xca\x2a\x49\x23\x5f\xa8\xe4\x30\x6a\x92\x3c\xae\x96\x4a\x39\xc3\x30\x52\x45\x3d\xd5\xd4\xd2\x4d\xb3\xc8\xca\x89\x04\x6b\x69\x35\x33\x41\x5b\xa5\x3b\x74\xd9\x47\x7c\xc2\x6a\x66\x36\x1a\xcb\xa5\x51\x83\xc5\x1b\xb9\x06\xd7\xd2\x7a\xcd\x4c\xe4\x52\xb8\xaa\xf1\x46\x54\xc7\x46\xd2\x28\xb2\x02\x27\xa9\x12\xcd\x35\x52\xf9\x64\xa5\x86\x32\xd1\xdb\xb9\xec\xb0\x1f\x6f\x24\xb4\x64\x26\x56\x8e\xa5\x49\x2c\x8b\x9a\xa9\x2c\x6b\x9a\x39\x8d\x55\xb2\x7a\x23\x61\x36\x78\x34\x9f\xac\xe5\x4b\x85\x68\x3e\x97\xd6\xab\x19\x33\x43\x9b\x25\x23\x97\x4f\xdf\xc1\xe5\x80\xfd\x34\xc6\xd5\x58\xae\x54\x89\x65\xf3\xc8\xc8\x9b\xd1\x32\x6f\x34\xa3\x26\xc5\x34\x97\xd3\x6b\x5a\x2a\x47\x9b\x51\x83\x24\x2a\xb1\x28\x49\xb5\x52\x15\xde\xd2\x68\xac\x96\x7d\xf8\xe8\x3b\x3e\xa1\xb2\xfe\xde\x96\xe9\xfb\x57\xf9\x4f\xdf\xb8\xf9\x16\xda\x2a\x73\xb8\xc1\xff\xde\xdc\xf6\xed\x7d\xf1\x0f\xcf\x6c\xf7\xdb\xed\xc7\xf3\x5a\x67\xa2\x5c\x7f\xf6\x68\xf5\x47\x1d\x6b\x38\x1f\x90\xed\x64\xbc\x5a\x8e\x3f\x7c\xc5\x60\xfe\x9a\x3b\x7a\x87\xfb\x67\x0f\xf2\xaf\xb9\xe3\x0b\xdc\xf5\xaa\xf9\xf0\x15\x73\xa5\xd7\xdc\xc9\x3b\xdc\xbf\x56\x77\xfa\x8e\xe5\xbf\x56\x77\xf6\x0e\xf7\xcf\x0e\x22\xaf\xb9\x6f\x63\xee\xd0\x9a\xf9\x0b\xb5\x65\xf6\x70\xb2\x9b\x7b\xcf\x85\xfe\x4f\x19\x17\x5e\x9d\x65\xf9\x16\xe2\xb7\x0e\x0a\xe7\xee\xf3\x7f\x74\x54\x78\xba\xd3\x7f\x34\x2c\x20\x22\x21\x10\x10\x70\xc4\xe8\x6e\xdb\x9b\x6c\xf1\xf2\x3f\xbf\x0e\xd5\x72\x2b\xe9\xaf\xdf\x43\x2f\xbb\xf0\x00\xfc\x9f\x4b\xb6\x3f\x4b\x99\x08\x88\x21\x96\x92\xc9\xe7\x0d\xf5\xff\xf9\x75\x3a\xb3\x26\x33\x7f\xd8\x7e\x5a\x96\xfa\xf5\x7b\xe8\x57\x08\x00\xf8\xe3\xc0\xe2\xd7\x9b\x79\xa0\x53\x1e\x68\xbf\xdf\xff\x3f\xbf\xee\x57\xba\xdf\x50\xfe\x16\xfa\xf5\xe5\x14\xd9\xb6\x74\xef\x1e\xb7\x73\x7c\xa3\x15\xfe\x16\x82\x7b\xb5\xf6\xef\x34\xfe\xfa\x7d\xab\xe8\xaf\xfb\x0e\x79\xec\xa9\xf5\x96\xcb\x47\x17\x14\x6e\x97\x0b\x6f\xe5\x02\x14\x0a\xc8\x11\xe3\x78\x67\x6d\xf4\xc6\x12\x9f\x67\x87\x37\xfc\xce\x5a\xfe\x73\xf9\x61\x86\x10\x46\x80\x62\xce\xbf\xcc\x9b\x5e\xf1\xf8\x72\x6f\x7a\xa3\xd5\x6d\xde\xf4\xc1\x45\x9d\xdb\xe5\x22\x5b\xb9\xa0\x44\x02\x61\x01\x04\xfc\x72\x6f\x42\xa7\xfc\xbe\xd8\x9b\x5e\xf3\x43\x5f\xae\x1f\x39\xe5\xf7\xc5\xfa\x6d\xf9\x21\xce\x30\x46\x1c\x70\x41\xbf\x0c\x2d\xaf\x78\x7c\x39\x5a\xde\x68\x75\x23\x5a\x3e\xb6\x3a\x79\xbb\x5c\x5b\xdd\x11\xc6\x58\x60\x80\x89\xd8\x47\x3a\xb2\x77\xa7\xa1\x35\x38\xa8\x3a\x50\x70\x6b\x81\xdd\xb1\xfa\xed\x37\xf9\xb8\xb6\xcc\x6b\xff\xfa\xd7\x7d\x6c\xa0\x04\x04\x63\xcc\xa0\xc4\x37\xb2\xa9\xa4\x37\xc6\xfd\x6c\x08\x27\x02\x33\x21\xe4\x7e\xa4\x25\xf0\x0d\x1b\x74\x27\x45\x40\x28\x26\x00\x32\xb0\x1f\x4d\x08\xf8\x1a\xc1\xf7\x6c\x28\x7b\x12\xfc\x2d\x1b\xf4\x8a\x8d\x51\x69\xe2\xc2\x47\xd9\x48\xfe\xd4\x0d\x6f\xd9\x84\x5e\xb1\xb1\x1a\xe6\x26\x9d\x74\x17\x6e\x2c\xba\x71\x92\x89\x6e\xab\x91\xef\xdc\xc7\x12\x30\x48\x05\xc1\x04\x83\xaf\x83\xf3\x2b\x1e\x5f\x0e\xe7\x37\x5a\xdd\x08\xe7\x8f\xed\x03\xdc\x69\x09\x28\x18\x26\x8c\x03\x20\xcf\x07\x07\x8a\x28\x67\x9c\xfd\x21\x29\x66\x40\xa2\x1f\xb7\x06\x3b\xe5\x8a\xde\x72\xc5\x02\x12\x82\x76\x5d\x00\x09\x94\x9f\xc2\x14\x4a\x4e\x99\x20\x52\x72\xb8\xf7\x65\xbe\x63\x7a\xf6\x19\xff\x9d\xfd\x3f\xb8\x45\x72\x97\xfd\xf7\x42\x51\x80\xc5\xc1\xfe\xef\x09\xf5\xc1\xbd\xb4\x3b\x2d\x85\x11\xc7\x14\x23\x02\xdf\x9b\x7f\xa2\x97\x7c\xe9\x43\x48\x7c\xcd\xea\x0c\x12\xcf\x31\xf8\x1c\x6f\x38\x66\x7c\x1b\x20\xbf\xdc\x21\xb6\xd6\xde\x86\x22\x41\x19\x85\x08\x3e\x9f\xea\x3e\x36\x88\x94\xf2\x0f\xb9\xfd\xfb\xa4\xe9\xe1\x6b\x86\x67\xb0\xf8\xa9\x0c\xdf\x68\xb8\x87\xe2\xc3\xff\xdc\x47\x81\x30\x81\xa9\xe4\x02\x7d\x59\x88\x78\xcd\xe3\xcb\x43\xc4\x1b\xad\x6e\xcc\xb6\x3f\xb6\xc1\x7a\xbb\x5c\x62\x2b\x17\xc0\x42\x30\x0c\x28\x3a\x84\x08\xb2\x93\x6b\x77\xfe\x2d\x98\xec\xe5\xf8\xd8\xf6\xe8\x8b\xdd\x9c\xc0\xdd\xd9\x4d\xaf\x9a\x6f\x8d\x79\xba\xae\xf6\x52\x63\x7f\x03\x6e\x0f\xce\x8f\x2d\xef\xdd\x67\x0a\xce\x80\xc4\x8c\x61\x84\x0f\x0b\x0f\xf8\xeb\x4c\x51\x2d\xc7\xff\x03\x4c\xc1\xf1\x93\x57\x7c\xa1\x29\xfe\x8d\xbd\x62\xab\x3c\x87\x5c\x0a\x26\xa9\xc4\xfb\x70\x89\xf6\xc0\xdd\x5d\x8e\xdc\x8d\xa0\xdb\xa4\x89\x23\x80\x99\xa0\x7f\x10\xce\xa9\x00\xfc\x3f\xaa\xbf\x9f\x95\xe4\x00\x80\x27\xd7\xff\x90\x92\xff\xc6\x3d\xb9\x75\x6a\xc6\x29\xc0\x1c\x09\x46\xf6\xc1\x97\xed\x07\xfd\xf9\xac\xf3\x38\x51\xe3\xb9\x3f\x51\xee\xa3\xd7\xb7\xda\xbf\xee\x2e\x16\xab\xad\x9c\xfb\xb2\x45\xe0\xec\xce\x1f\x1e\x15\xde\x65\x5d\x86\x11\x10\x9c\x41\x4c\xbe\x2e\xa2\xbd\xe2\xf1\xe5\x11\xed\x8d\x56\x37\x46\xb4\x0f\x8e\x19\xf7\x75\xf3\x5e\x2e\x4a\x0e\xdd\xfc\x85\xd6\xde\xf3\xf8\x72\x6b\xbf\xd1\xea\xc6\x19\xed\x57\x83\x6a\x2b\x03\xa4\x42\x42\xcc\x25\x01\xe4\x10\x2a\xe0\xeb\xa1\xe3\x8c\x1d\x7e\x7c\x50\xfc\xf2\x44\x69\xaf\x1a\x61\x50\x08\xc4\x38\x3d\x8c\x8a\xe8\xb5\x6a\xf0\x5c\x27\xff\xc7\x28\x07\x30\xc2\x82\x4a\x42\xf9\x5f\x4f\x39\xc2\x00\x12\x42\x22\xc8\xe0\xff\x4a\xd0\xfe\x49\x5a\x42\x8e\xb1\x44\x44\xb0\xaf\x8b\x2b\xaf\x78\x7c\xfd\xbe\xe4\x1b\xb5\x6e\x0c\x2c\x1f\x3b\x5b\x79\x87\x60\x5b\x3f\xc2\x9c\x60\x21\x29\xa7\x87\x7d\xe0\x2b\x13\x88\xdd\x4f\x71\x5e\x9c\x41\xec\x4a\x3f\xc2\x9b\x41\x76\xe8\x6b\x88\x5e\x1b\x05\x9d\x31\xca\xc7\x8e\x82\x7e\xc8\x28\x07\xc1\xb6\x1e\xb3\x1f\xff\x4f\x64\x89\xb2\x0c\xae\x17\x72\xd9\x66\x49\x67\xd9\x5a\xbc\xa5\x6b\x24\x13\xcf\x27\x2a\x95\x82\x51\x6d\x16\x71\xb1\x9a\x32\xe2\x46\xcb\xac\xa7\xb3\x1a\xcb\x94\xea\xe9\x7c\xa6\xa5\x55\x12\xfa\xbd\xb2\x60\x48\xb0\x94\x00\x1d\xb6\x8e\xff\x7d\x8c\x74\x2c\x18\x7a\x2b\x18\x7d\x2b\xd8\xcf\xb1\x18\x02\x88\x4a\x20\x04\x3e\x6c\xaf\xfd\xaf\xaf\x50\x1e\x49\xb5\x1d\x02\x6e\x9a\xa9\x7f\x88\x38\xa3\x87\xfd\xf7\x57\xc4\x7f\x64\xaa\xff\x42\x1d\x02\xc0\xfe\xdd\x5c\xf0\x20\x18\xe1\x87\xc1\x6b\xbf\xce\xd3\x0f\x96\x8f\xcf\xbf\xf9\xfa\xeb\xf7\x6d\x08\xf9\x75\xa0\xdc\x57\xdf\x6d\x45\xde\x3d\x3c\xfb\xea\xcb\x0f\x71\x17\x4f\xa1\x8a\xee\xb8\x1f\xfd\x1e\xee\xd6\x12\x6a\x65\x0d\x46\x7d\xf5\x87\x13\x0c\x3e\xa4\x1c\x44\xf2\x10\xef\xff\xcd\xac\x7e\x10\x0c\xdd\x14\xcb\x7e\x0e\xf0\x21\x95\x8c\x40\x04\x81\xf8\xb2\x33\x4d\xaf\x79\x7c\xfd\xdc\xe1\x8d\x5a\x37\xce\x1d\xbe\xdc\x11\x76\xfd\xce\x31\x27\x50\x30\x22\xbf\xec\xd4\xcf\x6b\x1e\x5f\x6f\xef\x37\x2c\x6f\xb3\xf7\x07\xef\x8e\xdc\x21\x18\xd9\xca\x81\x01\x97\x08\x4a\x74\xd8\xa9\xc7\x7b\x63\xec\x1f\xe3\xdb\xcb\xf1\xb1\xdb\x25\x5b\xc3\x3d\x3d\xd4\xf0\xeb\xf7\xad\xce\xbf\x4e\x83\xbe\xfb\x78\xbc\xab\x75\x6c\x64\x3b\x98\xb7\x3b\xb3\x8b\xc5\xfb\xb6\xe7\xd2\x8e\xa3\x92\x6b\xb9\xc7\x13\xfd\x73\x7d\xf9\x9a\xca\x71\x7e\xf2\xb1\x63\xd2\x77\xf4\x01\x3e\xed\x03\x74\xb6\x0f\x3e\x28\xc7\x97\xf5\xc1\x5b\x03\xbe\xb2\xef\x49\x17\x9d\xb5\xfd\xbb\x9d\xf4\xb3\xfb\x41\x12\x22\x24\x12\x84\x3d\x9d\x02\xf8\xd9\xc9\xf0\x4f\x51\x12\x40\x49\x30\xe4\xf8\xb0\xff\xff\x05\xe3\xeb\x2b\x16\x5f\x3f\xbc\x9e\x2a\x75\xe3\xe8\xfa\xc1\x51\xed\xae\xd1\x75\x2f\x17\x82\x04\x7c\xd9\xe4\xe1\x15\x8b\xaf\x37\xf6\xa9\x52\x37\x1a\xfb\xcb\x3d\x9b\x7f\x0b\xed\x7f\xa1\x91\x02\x70\x38\xaf\xf1\xf6\x84\xe7\x95\x85\xba\x8f\x6f\xc9\x7c\xf0\x3e\xc7\x1d\xba\xb1\x13\xdd\xce\xf7\xf2\x27\x76\xf2\x29\x43\x7c\x2e\x24\x7d\x54\xef\xd7\x21\x09\xbd\x0d\x49\x27\xba\xbc\x89\x49\x27\xe5\xf7\x07\xa5\x43\x5f\xff\x70\x50\xfa\xfa\x9e\xa7\x27\x1d\x41\xce\x75\xc4\x07\xef\xb2\x7e\x65\x47\x9c\x58\xfa\x2b\x67\x68\x5f\xdf\x09\xdb\xa1\x05\x32\x40\xb0\x40\x1c\x9d\x3f\x1c\x8f\xfe\x93\x87\x96\x63\xdd\x6e\x1d\x5a\x7e\x7c\xb2\xf3\xf3\x75\xfb\xc2\x51\x0c\xde\x0f\x1e\xf4\x2e\x78\x7e\x42\x7a\xf3\x73\x46\xb0\xe3\x4e\xf8\xc2\x11\xec\x4c\x27\xa0\xaf\x0e\x25\x9f\x95\xdf\xfc\x7c\x34\xd0\x9f\x1a\xd3\xdf\xeb\x88\xcb\x68\xf8\x4b\xc6\xf4\xe3\x8e\x60\x3f\x35\xa6\x7f\xbc\x23\xfe\x52\x31\x9d\x7d\x0b\x71\x8e\x81\x24\x90\x92\xc3\xf1\xd9\xbb\xd2\x85\x7f\xe7\xb8\x47\x4f\x74\xfb\x8b\xc5\x74\x8e\x01\x24\x84\x08\x88\xfe\x77\x56\x69\x7e\xca\x84\x73\xaf\x24\x45\xe8\xc9\x39\x7f\xf2\x39\xc3\x9f\xd2\x93\x4c\x70\x28\x89\x00\x02\x7f\xd9\x52\xd4\x2b\x16\x5f\xbf\x3a\x72\xaa\xd4\xad\x37\x2a\x3e\x36\xd8\xdf\xe5\x51\x07\xb9\x88\x44\x5f\xb6\x14\xf5\x8a\xc5\xd7\x1b\xfb\x54\xa9\x5b\xaf\x87\x7e\xe8\x4d\xa6\xfb\xc6\xdf\x83\x5c\x12\x7c\xdd\xd5\x8c\x57\x2c\xbe\xde\xd8\xa7\x4a\xdd\x68\xec\x2f\x1f\x46\xc4\xb7\x10\x23\x04\x11\x0a\x31\xc5\x97\xd6\xfd\xbe\x22\xd6\x7d\xf0\x29\x93\x3b\x54\x93\x27\xaa\xdd\x78\x89\xfc\x3f\x41\xb5\x6d\xaf\x01\x88\x24\x25\x0c\x91\xaf\x7f\x6f\x60\x67\xca\x63\x7e\x5f\x7c\x1f\xff\x45\x3f\x0a\xf0\xff\xd2\x75\x88\x9f\xe2\x9f\x94\x0a\x4c\x28\x17\xf2\xeb\x0e\xec\xbf\x62\xf1\xf5\xe3\xdc\xa9\x52\xb7\xde\x40\xfb\x09\x88\xd9\xcb\x25\x30\xf8\xba\xf3\xfa\xaf\x58\x7c\xbd\xb1\x4f\x95\xba\xf5\x39\x94\x0f\xbd\x77\x78\x87\x21\xc0\xb7\x10\x85\x14\x4b\x06\x21\x23\xef\x4d\x97\xe8\x47\x4c\x0d\x4f\x38\x9c\xb1\x35\xfd\xcc\x07\x50\x4e\x35\xba\xd1\xad\x3f\xf6\x1a\xe3\x7d\x76\x20\x1c\x11\xc2\x08\xc1\xf4\xcb\xb2\x80\x57\x2c\xbe\xdc\xad\xdf\x28\x75\x63\x16\xf0\xb1\x47\x29\xef\x33\x04\x92\x44\x32\xce\x01\xbd\x7d\xaa\xf4\x61\x33\x9c\xf2\xfb\xfa\x47\x7e\x5e\xd8\x89\x9f\xac\x9e\xf8\x7a\xf5\xb6\xf4\x05\xa3\x84\x03\xc9\xd9\x97\x45\x80\x57\x2c\xbe\x1e\x2a\xa7\x1c\x6f\x8c\x00\x1f\x7b\x59\xf5\x3e\x43\x10\x4c\x25\x27\x88\xb0\x0b\xcb\x83\x9f\x68\x05\x7c\xc2\xee\x9c\xdd\x3f\x15\x28\x07\x6e\x8c\xb2\x0b\x93\xef\x2f\x51\x6e\xcf\xee\x27\x29\x47\x01\xbb\x34\xc8\x7d\x85\x72\x07\x76\x3f\x4b\x39\xf2\x7c\xd5\xee\xa7\x28\xb7\x67\xf7\xc5\xca\x6d\xd9\x01\xc0\xb6\xb9\x10\xfc\xba\x07\x24\x5f\xb1\xf8\xfa\x01\xee\x54\xa9\x1b\x07\xb8\x8f\x3d\xea\x7c\x87\x5c\xe4\x5b\x08\x51\xce\xa4\x00\x5c\xdc\x7e\x03\xe3\x83\xcf\x40\xff\x80\x5c\x67\xee\x00\x9d\x9b\x3b\x61\x9d\xa7\xb2\x79\xd3\xa8\xb0\x54\x32\x9a\x4c\x45\x2b\x2c\xcd\x32\x3a\x31\x50\x99\xd4\x69\xb6\x85\x08\xab\x24\x33\x24\x5b\x2a\x37\x32\xa8\x48\xa2\x8d\x58\xb5\x58\xca\xdf\xb1\xa8\xb7\x93\x6b\xf7\x4c\x1a\x21\x97\x6e\xac\x9c\x93\xeb\xa7\xd8\xeb\x58\xae\xdb\xae\x4e\xfc\x4c\x7b\x89\xeb\x17\x69\xd0\xe7\x5f\xa4\xd9\x32\x3f\xbc\xd4\x80\x91\xf8\xba\xcb\xed\xc7\x2c\xbe\x7e\x24\x39\x55\xea\xc6\xac\xe2\xcb\x3d\x70\x3b\x3d\x42\x42\x10\x09\xa8\xfc\xba\xb7\x27\x5f\xb1\xf8\x7a\x63\x9f\x2a\x75\xeb\x72\xf7\x87\xde\xaf\xdf\xcb\xf5\xce\xab\xd0\x87\x5f\x29\xfb\x94\x57\xa1\x0f\xb4\x5e\x5e\x02\x83\xdf\x42\x0f\x9c\x38\xcc\xf5\x5c\x2c\x1c\x66\x3b\xc4\x86\xc0\x55\xae\x6b\x2b\xe4\xb9\x2e\xb5\x20\xc1\x96\x80\x9c\x51\x88\xb0\x62\xca\x66\x18\x11\xd7\x53\x1e\x70\x1d\x97\x58\x94\x7a\x36\x7c\xf8\x16\x7a\xf0\x1c\x41\xb0\x8b\x20\xc1\x0e\x67\xb6\x8b\xa4\xb2\xb1\x0d\x3d\xcf\x75\x1d\xec\x12\xe1\x32\xe0\x70\x0b\xba\x8e\x67\x41\x6c\xb9\x42\x40\x09\x5d\x0b\x60\x0e\x24\xf2\x20\x72\x0f\xbf\xc3\x0c\x9f\x7e\xd9\x0b\xb0\x7f\x02\x1c\x82\xec\x3b\x66\xdf\x11\x7f\x78\xfb\x35\xfd\x8e\xf1\x1f\xbb\xb7\x99\xd1\xbb\xa5\x88\x41\x29\x31\x00\x14\x50\xf6\x2d\x04\xb7\xff\x07\x00\x40\x09\x38\x7a\xfe\x1d\x68\xb0\x8d\xd6\x84\x43\xb4\xbb\xec\xb5\xfd\x27\x7c\x29\x81\xcf\x1f\xb6\x02\x6a\x9a\xa6\xc5\x22\x6b\xbd\x58\x4e\xc4\x57\x68\x51\x1e\x4e\x0a\xab\x88\xc4\x71\x98\x86\xed\x15\x4b\x39\xe1\x4e\x61\xe6\x47\x7b\x6e\x10\x77\x32\xb9\x55\xae\xb4\x1c\xda\x41\xbc\xb5\x2e\xb4\x9d\xb5\x67\x93\x9e\xa2\xcb\x34\x2e\x07\xd3\x5a\x8b\x35\x4b\xf3\x55\x7a\x68\xa0\x2a\x73\xa3\x2e\x91\x9d\xe6\x96\xb4\xd6\x28\xd6\xf2\xfe\x52\x7b\xfe\x0b\xa6\x7a\x67\x61\x56\x62\x0b\xcb\x4d\x47\x32\x72\x12\x16\xbd\x68\xb8\xdf\xd4\x58\x61\xd4\xa9\x74\x53\xa9\x08\x0d\x50\xd8\xef\xf5\xd3\xa4\x48\x26\x8b\x62\x4a\x5b\x61\x6b\xea\xc3\x80\xf0\x7c\x11\xa2\x89\x11\x2b\x95\xd3\xd5\x5a\xbc\x0b\x6b\x13\x77\x2e\x60\x78\x56\xc8\xc5\xbd\x2d\xfd\x38\x30\x48\xbd\x34\xaa\x1b\xdd\xec\x81\x55\x14\x14\x4d\xba\xfb\x54\x7a\x66\x9f\xdc\xfe\xa7\xa5\x35\x20\x29\x6d\x9b\x64\xb4\x2c\x43\x63\xaf\x9b\x6c\xc4\xd7\xdd\x30\x1c\x35\x2a\x39\x4d\x4d\x59\x24\x52\x74\xfd\x54\x3e\xdd\xa7\xed\x54\xa1\x57\xe9\xaa\x46\x21\xb2\x04\xda\xbf\xc3\xdf\xf3\x4f\x7c\x1b\xd5\x5c\xee\xc2\x30\xf0\x06\x17\xe0\x73\x7c\xfa\x41\x50\xc5\x11\x63\xc4\x71\xa1\xc0\x14\x21\x86\x04\x63\x1e\xa0\x98\x52\x02\x20\x66\xc8\xb6\x84\xa3\x2c\x66\x21\x8f\x70\x85\x38\x64\x84\x43\xcb\x16\xc0\xe5\x96\x40\x40\x5c\xc3\x05\xbb\xe8\xf9\x98\x12\x04\xdf\x2d\x3d\x4c\xb7\x30\xe7\x5b\x75\xaf\xe3\x02\xde\x88\x0b\xd2\x18\x66\x06\x15\x23\x19\xaf\xfa\xcd\xa0\xb5\x88\x56\x6a\x55\xad\xd2\x9c\x8c\xf3\x6c\x10\x2c\xcc\x70\x66\xd5\x4a\x25\x27\x6d\x43\x05\xe9\x34\x15\x5e\x6d\xd1\x9f\x54\x3a\x4e\x6e\x6a\x76\x5a\x91\x7c\xbd\x93\xcf\x7b\x91\x6a\x73\x82\x0a\x9b\x9e\xd9\xab\x64\x13\xcd\x56\x3c\x59\xa2\x6a\x0f\x86\x1d\x2e\xda\x2f\xfd\xda\x8a\xa2\x06\xef\x3b\x31\x9b\x73\xd4\x2f\xf2\x52\x7f\x96\x5e\x7a\x5c\xf3\xc2\x7a\xd9\x9f\x66\xa7\x41\x62\x54\x85\x4e\xb4\xb5\x28\x3b\xdc\xe9\xd8\x09\x5c\xec\xa8\x46\x31\xbc\xe1\xe1\xd9\x34\x43\x6d\x9a\x6c\x4e\xeb\xba\xb5\x66\x71\x9f\xc5\x7a\xbd\x9e\xe5\x66\x06\x89\x1e\xde\x12\x8e\x2f\xcf\xe0\xa2\x54\xdb\x7d\xfa\x9b\xe3\x82\xca\xcf\xf1\xe9\x07\xa0\x24\x64\x0c\xb9\x0c\xda\x36\xb7\x91\xf0\x2c\x8f\x72\x6a\x4b\x0b\x53\x40\x39\xf3\x1c\x02\x3c\xc8\x14\x16\x08\x72\x8e\x21\x90\x8c\x60\x49\x6c\x1b\x21\x57\x31\x2a\xec\x6b\xb8\xa0\x17\x3d\x9f\x30\xc0\xae\x97\xb2\x1d\x2e\x30\x01\x18\x70\x40\x18\x79\x0f\x17\xe0\x46\x5c\x2c\x99\xd9\xf2\x61\x67\x8e\xa7\x41\x78\x51\x83\x73\xcb\x28\xd5\x50\x44\x8f\xaf\xec\x6e\xbb\xe3\x6e\xf4\x4c\x2b\xde\xcf\x4d\x7d\x3c\xa8\xe7\xc2\xe9\xd8\x10\x47\x73\xa5\x86\x39\xe5\x5e\x9f\x4c\xfd\x7a\x22\x86\xfa\x6e\x5c\x85\x39\x74\xbd\x56\xb8\x30\x63\xe5\xcd\xdc\xcc\xda\xc1\xbe\x1f\x77\xb8\x78\xf1\x4b\xad\x6f\xe4\x97\xd1\x78\x55\x1f\xcf\x7c\xbd\x32\xa8\x86\xad\x79\xbf\x97\x9f\xc4\xe7\xb5\x59\xd4\xf3\x72\x62\x5a\x53\xe3\x01\x58\x0d\xc2\xeb\xc4\x66\x96\xf2\xba\xb4\x21\x62\xb9\x5a\x50\x56\x26\x6a\x8f\x84\x16\x0b\xf4\x75\x5a\x95\x67\x06\x29\xd0\xc9\x8c\x8f\xcb\x65\x6b\xb5\xea\xcc\x77\xf1\x62\x7a\x06\x17\x85\xd5\xee\xd3\xdf\x1d\x17\xe2\x73\x7c\xfa\x81\x78\xcc\x95\x96\x25\x3c\xe0\x20\x0b\xd8\x36\x67\x08\x41\x8c\x20\x86\xc8\x06\x84\x48\x8e\x80\x02\x84\xdb\x8e\x43\xb8\x25\x5c\xc2\x15\x16\x82\x32\x04\xb1\x64\xae\xc2\xe8\x1a\x2e\xc8\x45\xcf\xa7\x1c\xd1\xcb\xf3\xa8\xa7\xd2\xa7\xdf\x0f\xc1\x90\x89\x77\x70\x01\xe4\x8d\xb8\x00\x72\x36\x18\x77\x97\xcb\x71\x8c\xbb\x7e\xba\x92\xae\xe8\x93\xa8\xde\x5f\xc7\xdb\x26\xdf\xe8\x2a\x80\x29\x42\x3a\xf5\x74\xa5\x3f\x23\x6b\xda\xb7\x2c\xb5\x6a\xf9\x38\xaa\x82\x8c\x1d\x6d\x66\xf5\x54\xcd\xb4\x1a\xa5\xe8\xa6\xb5\x69\xc4\xbb\x13\x7f\x33\x88\xb6\xac\x24\x12\xf5\x3d\x30\x76\xb8\x38\xea\xd7\x64\xde\x2b\x45\x13\x91\xe6\xb8\xa9\x5c\xbd\x6a\x0c\x4b\x73\xbf\xbf\x99\xf7\x36\xb9\x4c\x51\xd5\x26\xa6\x4c\xb3\x31\xb6\x2d\x23\x29\xf0\x50\xf4\xdc\xba\x59\x1c\x9b\x9e\xcb\xa6\xbd\x42\xc5\xcd\x74\x70\x7b\x09\x63\xb9\xb5\xd6\xf2\xfa\x20\x3b\x1d\x2d\x94\xd5\x8b\x34\xaa\xd1\xc9\x96\x70\x3c\x38\x83\x0b\xc3\xd8\x7d\xfa\xbb\xe3\x82\x7f\x8e\x4f\x3f\x10\x22\x09\xb6\x2c\x87\x01\x8f\x63\x2e\x10\x72\x6c\x5b\x28\x17\x48\xd7\x72\xb1\xe5\x78\x1e\x03\x9e\x14\x10\xba\xb6\x74\x1c\xca\x94\xb7\x4d\x5f\x18\x10\x48\xb9\x1c\x7a\xf6\xd5\xfc\x02\x5f\xf4\x7c\x26\x08\xb9\x5e\x4a\x1e\x5e\x7e\x27\x86\x0a\x8e\xde\xc3\x85\xb8\x15\x17\xe5\x6a\x61\xbc\x6a\xcb\x8d\x8a\xe5\x72\xf3\x02\xc8\xa0\x59\x61\x1a\x41\x71\xda\x36\x11\xdd\x24\xec\x05\x59\xc0\x76\x7b\x0e\x57\xe1\x65\xb9\xa5\xcf\x55\xad\x34\xaa\x4d\xdc\x06\x09\xc2\xd9\xfc\x9c\xd1\x16\xd3\xc6\x45\xbd\xd4\xc6\x9d\x78\x3b\x9f\x6c\xd4\x36\x75\xd7\x6b\x93\x97\xfc\xa2\x73\x94\x5f\x40\x32\x4d\xa4\xcd\x79\xb7\xbe\x6c\xcb\xec\xa8\x8e\x57\x4e\xb1\x12\xd5\x92\x6a\x1e\x65\xb1\xe9\xb0\x9c\xaf\x8c\x8a\xa2\xdc\x2d\x4f\x5d\x19\x74\x65\x5f\x94\x22\x76\x01\x8a\xfa\x48\xa5\x56\x05\x58\xe9\x64\x2b\x49\x5b\x9b\xa5\x07\xd4\x8c\x91\xf5\x78\xd6\x20\xd9\x9c\x65\xa6\x22\x5b\xbf\x8f\xf7\xce\xe0\x22\x37\xda\x7d\xfa\xbb\xe3\x82\x7d\x8e\x4f\x3f\x48\x4f\x31\x21\x38\x24\xb6\x45\x18\xb2\x6d\x64\x5b\xae\x4d\x11\xb2\x90\xf0\x08\xa6\x8c\x0b\xc7\xc1\x96\x8b\x05\x76\x1d\x62\x79\xae\x84\x96\x70\x20\x54\x98\x5a\xd8\xe5\x98\x5e\xc3\xc5\xe5\x88\x20\x00\x91\x97\xa3\xc9\xb6\x94\x3e\x3c\xff\x6e\x10\x13\x94\xbf\x97\x77\x03\x7e\x23\x2e\x68\x64\xe0\xa7\x12\xb9\x51\x32\xcb\xb3\x0c\xc1\xf4\x38\x5b\x2c\xc1\x16\xd9\xc4\x27\x95\xb6\x5c\x65\x23\xad\x64\xb0\x34\x09\x0c\xa4\x03\x35\x6f\x32\xd7\x61\xda\x70\x8c\xb0\x65\x2e\x7a\x5a\x3f\x5f\x74\x10\x72\x2b\xed\x76\xdb\xf7\xd8\xb2\x1f\xe4\x67\x25\xc2\xbb\x99\xb1\x99\xde\xf5\xe3\x0e\x17\x47\xf9\xc5\xdc\x5d\xa4\xb3\xb6\x9b\x28\x15\x98\x33\x1c\x47\xfb\x9e\x56\x02\xe3\xa5\xbf\x10\x95\x88\x1d\x2c\x23\xf5\x62\x25\xad\x19\x41\x44\x37\x4d\x7d\x5c\x22\xb1\xa5\xa5\x52\xad\xc4\x20\x0c\xe6\xc9\xa0\x65\x0f\x9a\x6e\x10\x5d\x6a\xd6\x20\x41\x17\x10\xe8\x83\x61\xcd\x23\xf3\x55\x8a\x6d\xe9\xc7\xdb\x67\x70\x91\x4d\xec\x3e\xfd\xdd\x71\x41\x3f\xc7\xa7\x1f\x6c\x21\x81\xe3\xb9\x1c\x23\x06\x04\xb6\x5c\xca\xa5\x05\x29\x64\x4c\x22\x88\x3c\x0a\x11\xc7\x0e\x10\x40\x21\x9b\x71\x0b\x33\x46\x94\xcd\x1d\xdb\xb1\xa5\x07\x39\x87\xe4\x6a\xbc\xb8\x9c\x59\x4b\x44\xd8\xf5\xd2\xdd\x3c\xea\xf0\x4b\x50\x10\x89\xf7\xf2\x6e\xc0\x6e\xc4\x05\xf7\x4b\x1b\xe9\xac\x9b\xe9\xc2\xac\x46\x83\x5a\x62\xd0\xeb\xe4\xa0\x3e\x2c\x6a\xed\x42\x16\xa9\x6e\xab\xc7\xf0\x5a\xcc\xbd\x04\x4e\x68\x49\x5d\x25\x75\x19\xc5\xb3\x3e\x0f\x64\xcc\xcb\xce\x8a\xb5\x3c\x89\xab\xba\x5d\xf5\x22\xd1\x00\x4b\x27\x9e\xaa\x4c\xf4\xb4\x35\x74\x8e\xe2\xc5\x51\x7e\x91\x2a\xcc\x41\xd4\x0c\x23\x5a\x89\xaf\x9d\x26\x77\x1d\xa3\xbc\xae\x39\x3d\x38\x5c\xb7\xa2\xb1\x9e\x16\x4d\xb4\x1a\xcb\x6e\xcf\x77\x28\x2a\xd3\x22\x42\xad\x41\x35\xdc\xee\x16\xe3\xe9\x6e\x47\xab\xb7\x5b\xd1\xfa\xa6\xd5\x02\x4b\xb3\xc3\xd7\x05\x73\x22\x87\x7e\x77\x14\xc5\xcb\xfa\x2e\xbf\x70\xce\xe0\x22\xdd\xd9\x7d\xfa\xbb\xe3\x82\x7c\x8e\x4f\x3f\x10\xc1\x08\xb5\x5c\x41\x30\x50\x54\x52\x44\x29\x76\x5c\x26\x29\x93\x16\xe5\x10\xda\x96\x23\x31\x61\xd0\x86\x8e\x74\x00\x06\x36\x03\x36\x60\xc8\x23\x82\x62\x9b\xd9\x90\x3c\x3c\xef\x3d\xbf\xc5\x05\xb8\xe4\xf9\x04\x50\x01\x2f\x46\x93\xe7\xd2\xa7\x5f\xce\xc2\x52\xbc\x97\x77\x03\x7a\xeb\x3c\xaa\xd3\xab\xcf\x3a\xf1\x78\xbf\x5f\xae\x55\x36\xf5\x7e\x7d\xd0\x77\xcc\xf9\xb4\x07\x9a\x76\x2a\xb3\x8c\xc7\x50\x6c\xd9\x5c\x94\x13\x05\x34\xad\xba\xda\x66\x34\x68\x91\xda\xc0\xcf\x68\xc9\x74\x67\x08\x63\x22\xc6\xe8\x5a\xeb\x0d\x66\x51\x27\xd6\x4b\x34\x52\xf1\x65\xaf\x52\x0c\x9a\xbd\x97\xf5\xa8\xce\x51\xbf\x7a\x8d\x9a\x57\x2c\xcb\x6e\x74\xdc\xa9\x78\x1b\x92\xf3\x17\xcd\x66\x51\xda\xb3\xe1\x3c\x3c\x5e\x7a\x91\xf1\xa8\xd6\xcc\xf1\x54\xdb\xd6\xf2\x88\x16\xdc\x5c\x8a\xe4\x1a\xf3\xe5\xd0\x90\xf1\xd1\x3c\x53\xe8\x89\xa8\xee\x90\xb4\xef\xf8\x5d\xb3\xab\x4d\x96\x4d\xdf\xa9\xe1\x1e\x59\xed\xe6\x51\xcd\x33\xb8\x48\xca\xdd\xa7\xbf\x3b\x2e\xf0\xe7\xf8\xf4\x83\xed\x72\x9b\x20\xe9\xda\xae\xf0\x84\xf2\x5c\x68\x53\xa6\x5c\x0e\x90\xed\x41\x81\x15\xe1\x18\x43\xaa\x90\x07\x08\x42\xae\x4d\xa9\x94\x94\x08\x87\x00\xe2\x28\x75\x58\x36\xba\x14\x2f\xa0\xbc\xe8\xf9\x08\x20\x29\xde\x2d\x3d\xfc\x16\x1a\x63\x4c\xbc\x9b\x77\xe3\x5b\xe3\x05\xe4\xa5\x11\x92\x5d\xc2\x65\x12\xda\x33\x27\xb6\x58\xb5\xc3\xe6\x26\x5f\x23\x8b\x68\x2c\x67\xd7\x5a\xfd\x6a\x57\x8f\xe6\xf9\x20\x3a\x88\x2f\x4c\x1f\x93\x39\x35\xdb\xba\x16\xf7\xed\x91\x53\x20\x0e\x2d\x44\xed\x5c\x9a\xa6\x12\xf5\x15\x9c\x6d\x60\x21\xba\x5c\xf1\x74\xfe\x25\x5e\xb4\x8f\xf2\x8b\xa6\x00\xd6\x74\x15\xa4\x6a\x69\x97\x83\x55\x4f\xd2\x5c\x67\x6a\x1a\x3d\x1f\xcf\xcb\xcd\x4e\x3b\x33\x59\xe4\x47\x1b\x56\x52\x98\x2e\x4b\x2c\x2f\xd7\xe1\x51\x64\x34\x33\x5a\x6c\x13\x45\x63\x9f\x8d\xc6\xcb\x54\x62\x4d\xbd\x88\x9b\xef\x63\x92\x80\xa3\x95\xb7\x6c\x2f\xa2\xee\x2e\x5e\x54\xcf\xe0\x22\x0e\x77\x9f\xfe\xee\xb8\x40\x9f\xe3\xd3\x0f\x12\x50\x4c\x30\x05\x1e\xf6\x94\x05\x2c\x66\x51\x2a\x5d\x85\x00\x67\x92\x2a\x24\x90\x12\x52\x48\x20\x29\x21\x02\x61\xcb\xa1\x0e\x07\x00\x78\x9c\x59\xb6\xe5\x3a\x40\xa8\x6b\xb8\xb8\xec\xf9\x18\x70\x70\x71\x1e\xf5\x5c\xba\xff\x2d\x3e\x24\x25\x96\xef\xe6\xdd\xb7\xee\xeb\xd1\x68\xa5\x04\xe3\x9b\xc8\x38\x36\x18\xd5\x86\x2a\x9d\xb2\x6a\x24\xc8\x05\x83\x4c\xac\x66\xa6\xd3\x6c\xe5\x38\x5a\x1c\x5b\x93\x89\x13\x2b\x6c\x54\x42\xb7\x50\x97\xcd\x92\x49\xd1\xb6\xf3\x11\x98\x37\x85\xa5\x45\x9b\x91\xe2\x6a\x5e\x90\xad\x0d\x9a\xca\xc1\x2a\x39\xcc\xd5\x82\xbd\x33\xee\x70\x71\x94\x5f\xb8\x89\x29\x98\x2f\xdc\xde\x1c\x29\x48\x5a\xb9\x96\xd1\x8c\xf8\xc3\x5e\x1c\xa4\xdb\x49\x39\x08\xcb\x0e\x10\x49\xbb\x37\x6c\x4d\xa5\xbe\x69\x6d\xe6\x23\xad\xe1\x6f\xbc\x68\x63\xd0\x91\x72\x98\xaa\xcc\x79\x9d\xa6\xe0\x66\x31\xea\xd5\xcb\x28\x96\x80\xb3\x71\x54\xcf\x2e\xe7\xb1\x5d\xbc\x28\x9d\xc1\x45\xcc\xdc\x7d\xfa\xbb\xe3\x02\x7e\x8e\x4f\x3f\xd8\xd0\xe1\x2e\x87\x96\xb2\x2d\xc1\x81\x0d\x19\xb2\x01\x42\x0e\x86\x96\xab\x6c\xc4\x18\x24\x8a\x6d\xfd\x54\x22\xc1\x08\xb7\xa0\x2d\x1d\x66\x3b\x14\x5a\xb6\x25\xb0\x0b\xdc\x6b\xb8\xb8\xb8\xdf\x4d\xb6\x52\x5f\x8e\x26\x4f\xa5\x4f\x3f\x1e\x89\x80\x7c\x37\xef\xbe\x75\x5f\x8f\xa7\x52\x70\x35\x99\x8c\x53\xb1\x55\x73\xa2\xf9\xcb\xd5\x44\x4d\x07\xcd\x2a\xed\x46\x97\xbd\xa0\x95\x0a\xec\x61\x72\x91\x48\x4e\xc6\x71\x60\xcc\xd0\x22\x53\x49\x4d\xb3\xeb\x7c\xca\xad\xc5\x52\x91\xf6\x14\x38\xb8\x1a\x51\xa5\xec\xd8\xaa\x9b\x4a\x0f\x93\xee\x1a\xb5\x67\xe1\xf9\xd1\x3c\xaa\x7d\x94\x5f\x24\x65\x6e\xa1\x02\x91\x2d\xe4\xa3\xd3\x79\xa5\xb1\x5a\x59\x13\xcb\x6b\x96\x36\xa9\x6c\x7f\xbc\xec\x34\xea\x33\xc7\xec\x8e\x06\x95\x82\x13\x2e\x44\xc3\x23\x35\x37\x8a\xd3\x66\xdf\x53\x69\x23\x12\x2d\xe1\xf0\xaa\x50\x6a\x71\x37\x62\xd7\x22\xb3\x61\xa6\x9d\xd3\x47\x0d\x6f\x58\x1e\x90\x1d\x2e\xf2\x67\x70\xa1\xcd\x76\x9f\xfe\xee\xb8\x00\x9f\xe3\xd3\x0f\xc0\x75\x04\x83\x40\x39\x8c\x48\xc5\xa1\x70\x09\x05\x02\xdb\x4c\x42\x40\x21\xc5\x36\x65\x42\xda\xca\x61\x9c\x20\x6e\x71\x04\xa8\xb0\x25\xa6\x5c\x29\x1b\x58\x8e\xb2\x9c\x6b\xb8\xb8\xb8\xdf\x4d\x28\x87\xf4\x32\x6a\x9e\x4a\x9f\x7e\xed\x94\x08\xf0\x6e\xde\x7d\xf3\xbe\x1e\x4e\x37\x4b\x3c\x59\x56\x4e\x13\x26\x62\xf1\xd9\x48\x4f\x24\x2a\xb3\xba\x6f\xf3\xa4\x1b\x53\xc3\x74\xc2\xa7\x46\x83\x4f\x62\x53\x36\xf5\x81\x5d\x99\x46\xed\xb2\x1b\x2f\x24\x82\x4e\x78\x4d\x3d\xe1\xf4\xf1\x2c\x3c\xc8\xb5\xbc\xcd\xa4\x93\x4a\x46\x98\xbd\x8a\xf6\x4d\x98\x3f\xc2\xc5\x51\xbf\x2a\xb4\xc9\x57\xeb\x35\x14\x78\xab\x55\x39\x53\x75\x41\xa7\xad\x8f\xea\x61\xb7\x17\xce\x53\x80\xab\x46\x67\x3a\x6e\xe4\xb2\x39\x50\xc9\xb7\x27\x13\x6b\x14\xce\xb7\x9c\x62\xb6\xc8\xeb\x7d\x37\x3f\x8d\xb2\xcd\x24\x22\x24\x4a\x3b\x4b\xbf\x31\xd0\x1c\x66\x16\x0c\xea\xf0\x48\x7c\x87\x8b\xf4\x5b\x5c\x14\x22\x99\xdd\xa7\xbf\x39\x2e\x88\xfc\x1c\x9f\x7e\x20\xd0\xa6\x50\x28\x1b\x53\x8e\x21\x43\xae\x6d\x61\x49\x84\x74\xa9\xe2\xd2\x81\x0e\x20\x4a\x31\x87\xb9\x40\x38\x50\x62\x08\x6d\x6a\x7b\xdc\x95\xb6\xf0\xa4\x65\x41\xee\x6e\x5d\x7f\x7f\x9f\xe8\x0c\x2e\x2e\xee\x68\x13\x8e\x00\x7e\xbf\x74\xff\x2b\xb7\x58\x72\x0a\xde\xc9\xbb\x99\xbc\x79\x5f\x2f\x09\x93\x85\x29\x74\x56\xcd\xd9\xbc\xdb\x4f\x0f\x6b\x94\x2e\x9d\x68\x81\x4e\xed\x52\x37\xda\x5b\x25\xe1\x02\x23\xbb\x4b\xc7\x1d\xaf\xae\x6a\x66\x22\x9c\xf0\xeb\xb0\xd4\x77\xc2\x78\x0e\x07\x51\xa7\x25\x48\x35\x67\xba\x01\x0e\x3b\x95\x80\xd0\xa6\x99\x89\x17\xc2\xc5\x32\xd9\xf5\xe3\x16\x17\xde\x51\x7e\x31\x2d\x17\x72\xd1\x70\xa2\xc3\x8b\x69\x18\x75\x07\x5a\x78\xd8\x5f\xd1\xb8\xab\x1a\x65\xad\xd5\xe4\x85\xe1\x3a\xe5\xcc\xd7\x2a\xaf\x0f\x32\x1a\xb6\x7b\x04\x2e\x67\x93\x75\x3e\x82\xfb\xed\xce\x50\xd3\x6a\xc3\x62\xc4\x8f\x6e\x15\x03\xe3\xf4\x2c\xee\x0d\xc2\xb3\x7e\xa1\x6e\x58\x3b\x5c\xe8\x67\x70\x21\xfb\xbb\x4f\x57\x70\xf1\x9f\xf6\xb7\x77\xad\x3b\x71\x21\x3e\xc7\xa7\x1f\x6c\x22\x95\x05\x94\x0d\x3d\xc5\x90\xf4\x1c\xe4\x48\xd7\x43\x8e\xe4\xcc\x76\xa4\xe4\xd0\x53\x2e\xf3\x5c\xe1\x22\xca\x95\xa3\x38\x16\x8a\x50\x07\x7b\x42\x32\xee\x42\xca\xe4\xb5\x78\x71\x71\x87\x82\x08\x44\xd0\xe5\x79\x94\x40\xbb\x4d\xbf\xa7\xdf\x2c\x06\x08\xbc\x93\x76\x33\x79\xf3\x32\x6d\x59\x05\x85\xa9\x47\xfd\xa1\xc8\x65\xf0\x7a\xdd\xb0\xc4\xa0\x91\x0a\xc3\x05\x32\x32\x0d\x5e\x70\x08\x4d\x2c\x23\x7e\xdd\x33\x6b\x23\x66\xc5\xeb\x38\x57\xb4\xeb\xa5\x61\xaf\x34\xd6\xd6\x46\xab\x95\xe9\xc7\xeb\x56\x4d\xcb\xc9\xe4\xa0\x96\xed\xe7\x6b\x76\x2a\xb0\xb4\xca\xfa\x25\x5c\x78\x47\xf1\xa2\x64\xb4\x89\x33\x21\xf1\x80\x8f\xe7\x0d\x8c\x03\x32\xb1\xad\x86\xc5\xc3\xd1\xb8\x9b\x72\x44\xd1\xae\xe8\x8d\xa8\x33\x2f\xe9\x95\xa2\x98\x54\xda\x8e\xcd\x87\xb8\xda\xd0\x93\xc0\x5a\x08\x7f\xbe\xe8\xac\xea\xfd\x58\x2b\xe3\x31\x92\x8f\xa5\xc2\xb3\x55\xb3\x5d\x6a\x64\xeb\xc5\x2d\xe1\xb8\x76\x06\x16\xe4\xdd\x63\x83\xff\x69\x7f\x1f\x0a\x17\xfc\x73\x5c\xfa\x81\x03\xe6\x4a\x22\x2c\x46\x2d\x00\x29\x95\x0e\xc6\x88\x11\xc8\x84\x92\x10\x7b\xc4\x26\x12\x00\x4c\x24\x41\x1e\x97\x96\xb2\x24\xf3\x18\x64\x16\xc0\x4a\x0a\x24\xb9\x83\xaf\xc1\xe2\xe2\x86\x36\x91\x14\xb2\xcb\x49\xf9\xb6\x74\x97\x5e\x1c\x7e\xbd\x1a\x49\xf8\x4e\xda\xcd\xe4\xad\xcb\xb4\x38\x3a\xeb\xa7\x47\xfd\x20\x5a\x6b\x2d\x37\x99\x5e\x22\x60\x66\x11\xcc\x32\x5a\xbc\x5c\xcd\xe1\xc1\x7c\x5c\x97\x1d\xab\x5d\x1c\xb5\x47\x5e\x1c\xea\x35\x06\x8b\x34\xd0\x93\xb0\x3b\x5a\x90\xb4\x15\x49\x55\xdb\xc9\xd8\x28\xbb\xac\x94\x48\x6b\xd4\x4f\x75\x66\x3c\xe0\x23\x8d\xc3\x3d\x18\x76\xb8\x38\x4a\x2f\x44\x21\xcf\x63\x46\xb7\x1f\xac\xea\xe3\x65\xdf\xb2\x86\x83\xa8\x35\x31\x37\x03\x4b\xd7\xba\x65\x68\x56\x2a\x3c\x9e\x8f\x56\x3b\x91\x42\xca\x5f\xe4\x1b\x1d\x1f\x8a\x4e\x72\xa5\x35\x39\xf7\x99\x16\xab\xe8\x63\x6b\x19\x17\x8b\x46\x6f\xd1\xc8\x75\xa3\x1b\xa5\x9a\xf1\x76\xac\x58\xd9\xd2\x8f\x89\x33\xb8\xc0\xef\x1e\x1b\xfc\x4f\xfb\xfb\x10\x2e\xd8\xe7\xf8\xf4\x03\x93\x82\x00\x25\xa0\xa7\xb8\xa2\x82\x58\x96\x0b\x3c\x84\x5c\xcf\x12\x52\x29\xce\xb8\xe3\x62\x8a\x08\x91\x9c\x32\xac\x08\x87\x96\x27\x2c\x8f\x71\x9b\x62\x4f\xb8\x84\xef\x56\x8d\x2e\x6c\x5f\x5c\xde\xa0\xa0\x80\x51\x72\x31\xf9\x78\x2e\x7d\xfa\x81\x75\xca\xe0\x51\xda\x7d\xf2\xf7\x2d\xc4\x9f\x30\xf0\x1e\x26\xd0\x40\x8f\x07\xa9\xb0\xd7\xef\x64\x27\x25\xe1\x63\xe6\x2b\x8a\x3c\xc3\x28\x9b\x19\xb7\x5e\xec\x39\x56\x38\x2b\x87\xb3\x4a\xd8\x30\x57\x5a\xbb\x92\x2e\xe6\x92\x85\xf5\x70\x5e\x6a\x17\x1a\x9b\x41\xa3\xd4\x9a\xe4\xd6\xc9\x8a\x96\x4c\xc7\xeb\x85\x54\xc6\x2c\x65\xc6\x55\xcf\xd6\xfc\xea\xae\x0f\x77\x98\x38\xea\xd3\x4e\xaf\xd9\xae\x34\x6b\x09\xcf\x37\x56\xeb\x65\x2b\xb1\xce\x56\xbb\x6c\x52\x55\x26\x41\x65\x73\x63\x6c\x0a\x43\x6b\xda\xae\x45\xea\xf3\xf8\x62\xb6\x34\x7a\x96\xed\x96\xc2\xb3\x76\x4b\x11\x9c\x9a\x35\xf4\x69\xcc\x2e\xc9\x76\xbd\x30\x37\x1b\x2c\x47\x9d\x45\x41\xb4\x96\x51\x73\xb4\xc3\x04\x31\x48\xce\xda\x8c\xd0\x11\x00\xa3\xb3\xe6\x89\x4b\xfd\x15\x30\x81\xee\xc6\x04\xfd\x1c\x7f\x7e\xa0\x4a\x79\x10\x30\x6e\x01\xcf\x76\x6d\x02\x31\x82\x8a\x70\x64\x41\x07\x12\x4f\xb9\x0e\xa5\xd8\x85\x98\xe2\xed\xf8\xed\x2a\x8b\x78\xae\x8b\xa1\x74\x81\x63\x71\x17\x59\xde\xb5\x23\xe6\xf0\xe2\x22\x2c\x85\x82\x92\x8b\x29\xf7\x73\xe9\xe1\x67\xe6\x91\xc0\x08\x5c\xc1\x04\x13\x37\x62\x02\xf2\x85\x9e\x54\xed\xb0\x9c\x45\xf3\x1d\x9a\xca\x76\x96\xd5\xc8\x6c\x55\x2b\x9a\x46\xa5\x6f\xda\x6a\x54\xc4\x95\xa4\x1b\xcf\x7a\xe5\x09\xe9\x25\x63\xa9\xd4\x50\x63\xd9\x82\x1b\x14\x9d\xf2\x62\xc4\xcc\x55\xa7\x87\x3a\x55\xb3\x9f\x4c\xc0\x5a\x11\x05\xe5\x9a\x11\xed\xc5\xd3\xd2\xd9\xf5\xe1\x16\x13\xea\x28\xad\x40\xd3\x70\xd3\xc5\x2c\x59\x8d\x0e\x12\x93\xd8\x50\x0c\x57\x1b\x13\x27\x79\x92\x45\xfb\xf5\x65\xbc\x5b\x4f\x2f\xca\xf3\xd2\x2a\x99\x4f\xc7\x9a\x12\x99\xdd\x06\x8d\xdb\x85\x55\xb3\x93\xc9\xe7\x23\x7a\xc9\x81\xc1\x38\xb9\x04\xed\x69\xc2\x9e\x8d\xa7\x85\x46\xb4\x9b\xcb\xd5\xa4\xde\xd8\x29\x01\xce\x60\x62\x5c\x3a\x71\xa9\xbf\x02\x26\xee\x8f\x13\xe4\x73\xfc\xf9\x81\x10\x20\x2d\xc1\xa1\x4b\xb7\xc3\x33\xb0\x99\x05\x2c\x86\x01\xc4\x4c\x09\xe0\xb8\x88\x4a\x0b\x63\xec\x58\x94\x22\x80\x5d\x1b\x52\x22\x5c\x0c\xa9\x07\xa5\xf2\x88\x45\xaf\x2e\x43\x5d\xdc\xe6\xa6\x48\xc8\x2b\x88\x79\x2a\x85\x42\x48\x2e\x28\x83\x00\x91\x6b\x98\xe0\x37\x62\x02\x94\x32\xe3\x54\xaa\x56\x4d\x56\x73\x56\x7b\xd4\xd5\x2a\x76\x10\x37\x32\xad\x60\x53\xcc\xd6\xca\x9a\x9c\xd6\xca\x46\xbe\x26\x5a\x1c\x8c\x9c\x49\xb0\xac\xba\x59\x73\xa3\x45\x5a\x66\xcf\x55\x81\x48\xce\xd1\xb8\x46\x2a\x9d\x2a\x64\xae\x1c\x09\x83\x93\x94\x35\x9c\xd3\xea\x74\xba\xeb\xc3\x1d\x26\x8e\x72\x8a\x7a\xdd\x81\xa3\x66\xa7\x56\x37\x03\x3d\x63\xb1\xa4\x93\x4a\x4f\xad\x2e\x5c\x4f\x57\x69\x12\x2b\x85\xa3\x4e\x66\xec\x27\xeb\x78\xd9\x1d\x64\xc2\x0b\x2f\x61\xe6\x7c\x9a\x41\x7e\x3c\xdf\x30\x2a\xf5\xb1\x31\x18\x7a\x33\xb1\x49\x25\x0a\x84\x91\x66\x3b\xd3\xaa\xf0\xb9\x0d\xc6\x93\x5d\x9c\x58\x9e\xc1\x44\x30\x3d\x71\xa9\xbf\x27\x26\xf0\xe7\xf8\xf3\x83\x25\x99\x63\x7b\xd2\xc3\x0e\x72\xb1\x43\x99\xed\x42\x62\x13\x40\x01\x22\xcc\xa2\x0a\x4b\x45\x25\x02\x16\x02\x50\x49\x97\x31\x87\x00\xdb\xa1\x96\xc5\x04\x17\xc8\x53\xf4\x5a\x4e\x71\x79\x53\x82\x12\x88\xae\xcc\x9d\x9e\x4a\xa1\x20\x4c\x60\x2a\x31\x47\xe2\x1a\x26\xd8\x8d\x98\x60\xfd\xe9\x82\x0a\x91\xab\xac\xea\x0b\xb3\x0c\xa2\xa5\x4c\x72\xa4\x0a\xaa\x5e\x8e\xf9\x9a\x1a\xd6\x6d\x3d\x26\xea\x63\x2b\xd5\x5e\xd0\x4a\xcb\x28\xaf\x3b\x8d\xb8\x97\xd5\xd7\x55\xd6\xed\x81\x81\x18\xae\x34\x9d\x15\x63\x19\x2b\xb9\xd2\x82\xc2\x72\xbc\x70\xb4\x72\x7e\x5a\x3d\x5a\x7e\x52\x47\x6e\x2a\x73\x68\x39\x8e\x5a\x03\xb1\x5a\x31\x3d\xdf\x2f\x41\x15\x31\x79\xa6\xd8\xe9\x4d\x33\xe3\x44\xd0\x8a\x0f\xb2\x09\x27\x11\x5d\x7a\x78\x15\x5b\x3a\x28\x93\xeb\x07\xe9\xcd\x68\xd6\xec\x04\x46\x8c\xa7\x40\x21\x1b\x57\x71\x19\x2c\x5b\x5a\x8f\xa6\xad\xe1\x70\xa2\xba\x13\x50\xd0\xb6\x71\x28\x36\x3d\x83\x89\x61\xfa\xc4\xa5\xfe\x9e\x98\x40\x9f\xe3\xcf\x0f\x08\xd9\x80\xd9\x04\x48\x42\x98\x03\x3d\xe0\x20\x2e\x2c\x0f\x38\x0a\x52\xcb\x95\x92\xd1\x6d\x14\xf2\x10\x94\xcc\xa2\xb6\xa7\x5c\xe5\x52\xe6\x32\xe9\x62\xc6\xb9\x74\xe4\xb5\x6d\x3c\x70\x31\x93\xa6\x14\x89\xcb\x59\xf8\x73\x29\x14\x00\x0b\xc1\x10\x23\x18\x5d\xc3\x04\xbd\x11\x13\xeb\xec\xd2\x02\xb1\xaa\x39\x4d\x2d\x97\x43\x7f\x11\x2f\x24\x2c\xd4\x6a\x35\x82\xa2\xe8\xf4\xc7\xf5\x08\x57\xb0\x0e\x47\x00\xb9\x74\xed\xa6\xe1\x26\x65\xcd\xc2\x95\x54\xd9\x4f\x8a\x84\x9f\x69\x76\x1a\x28\xdd\x1d\x3b\x6a\x99\x5f\x8f\x47\x5c\xa5\x73\xe6\x2c\xb7\xea\x8f\xb5\xda\x7e\x69\x7f\x87\x89\xa3\x3e\x25\x83\xd2\x6a\x9e\x5e\x27\x7b\xf9\x61\xd5\x18\x4d\x56\x01\x9c\x33\x1b\x1b\x85\x6c\xbb\x1c\x89\x8d\x4d\x13\xf4\x64\x90\x2f\x15\x73\xf5\xa9\x08\xd7\x98\x18\x0f\x63\x56\x22\xa1\xaf\x48\x16\xb6\x83\xba\x85\xcd\x78\x4a\xa4\x5b\xd3\x26\xf7\x51\xb4\x51\x2f\xc7\xf2\xf5\xf1\x66\xb4\xda\xc6\xa1\x58\x70\x06\x13\xfd\xde\x89\x4b\xfd\x3d\x31\x01\x3f\xc7\x9f\x1f\x2c\xe6\x51\xdb\xe1\xc4\xb5\x24\x61\x92\x03\x49\xa4\xe3\x08\x8b\x0a\x9b\x7b\x0e\x70\x2c\x8a\x2c\xc9\x10\xf7\x2c\x68\x11\xea\x39\x8e\x47\x19\xb7\x98\xe5\x22\x0f\x43\x68\x49\x70\x25\xc7\x06\x97\x33\x06\x46\xe4\xe5\xab\x79\xcf\xa5\x90\x33\x20\x31\xa3\x12\xe2\x6b\x39\x36\x23\xb7\xc6\x09\x1b\x2e\x52\x15\xab\x9f\xec\x2f\xfb\x99\x4d\x7a\xd4\x9c\x45\x96\xf9\x51\x79\x5c\xcf\x44\x02\xcb\xf4\x36\xc5\xda\x70\x34\x06\x8b\xbc\x39\x2e\x65\x8d\x75\xd3\x2e\x2c\x73\xdc\xb4\x7d\xab\x9b\x4e\x0f\xe5\x32\x80\x8d\xf4\x78\x35\x59\x65\x1b\x9d\xb2\x53\xce\xb4\xe6\xd3\x4e\x7b\x08\xab\x62\xd7\x87\x5b\x4c\xb8\x47\xf9\x04\x9e\xa9\x55\xaa\xde\x53\x8d\xfe\xb4\x23\xd6\x7d\x8a\x32\x1b\x1d\xe6\xf9\x28\x22\xe6\x9b\x1e\x66\x71\xb7\x6f\x2f\x96\xee\xb8\xbe\x84\x4d\xc0\x2b\xa9\x69\xa0\x4f\xa3\x3c\xb2\x14\x66\xa9\x9b\x21\xa0\x3f\xca\xd3\x2c\xd4\xed\x20\xa6\x2a\x62\x1d\xe9\x46\x22\x91\x2e\xd6\xac\x1d\x26\x7a\x67\x30\x71\x0a\x89\xbf\x04\x26\xee\xcf\xb1\xc1\xe7\xf8\xf3\x83\xed\x60\x8b\x5a\x92\x62\x41\x84\xa7\x90\x63\x13\xa6\x5c\x4e\x95\xa3\x6c\xc5\x6c\x0b\x48\x47\x50\xe6\x12\xee\x32\x4f\x59\x00\x51\x84\xb0\xa0\xc2\x15\xae\xc3\x2d\xcb\xdd\xdf\x86\xb8\x84\x89\xcb\xb3\x23\x01\xf0\xe5\x4d\xef\xe7\xd2\xfd\xdb\x20\x4c\x42\x41\xae\xe6\xd8\xe8\x46\x4c\xf0\x25\xab\x8f\xaa\xa4\x5c\x24\xd3\x59\x92\x37\x1a\x53\x16\xb6\x58\xcc\xe9\xd4\x61\x0a\x2e\x58\x3b\x53\x4e\x93\xba\xd1\x75\xd4\x18\xd5\x22\xfe\x30\xad\x92\x99\xc4\xa4\x06\xcd\x5a\x35\xbc\x1a\x4d\xd2\x2d\x33\x25\xb2\x35\x2b\xcd\x49\x3f\x52\xd1\x66\xe1\xc8\x24\xa3\x52\x19\xfa\x12\x27\xdc\xe3\x7c\x22\xd3\x0f\x24\x9f\x78\xa5\x6e\x37\x6d\xcc\x35\xbd\xa7\x62\x05\x1b\x9b\xcb\x3e\x33\x9a\xd5\x9e\xa4\x0d\x8b\x61\xc3\x01\xd5\xd8\x02\x82\x2a\x9e\x8e\x8c\x52\x4c\x0c\x68\xa9\x66\xf5\x51\xbe\x8e\x78\x29\x10\x85\x65\x54\x1b\xa6\xe6\x1d\xb1\xd4\xeb\xd9\x7c\xa7\x39\x4b\xef\xc8\xc7\xda\x67\x30\xd1\x26\x27\x2e\xf5\xb7\xc4\x04\x96\x9f\xe3\xcf\x0f\xb6\x40\x92\x59\x02\x3a\x36\x76\x08\x13\x0e\xe1\x1e\x04\x80\x31\x64\x39\x36\xc6\x5c\x41\x26\x04\x83\x1c\x23\x47\x21\x08\x5d\xd7\xb5\x30\x40\x50\x00\x29\x94\x00\xcc\xb9\x76\x34\x10\x5c\x8e\x04\x12\x71\x72\x39\xdb\x78\x2a\x85\x8c\x53\x80\x39\x22\x94\x5c\xcd\xb1\x6f\x5d\x8b\xe5\xed\x91\x15\xa4\x72\x45\xdd\xca\x9b\x11\x5d\x6b\xf9\x93\x5c\x1e\x93\xba\x9f\x4c\x6c\x72\x85\xb4\x89\xdd\x60\x99\x6e\xc6\x9a\x4c\xb3\x0b\x29\x7f\x91\x5b\xad\x8b\x22\x1d\x66\x3d\x92\x19\x45\x89\x86\x8b\xab\x42\x76\x6a\xea\xad\x6a\x7e\xb8\x69\x46\x03\x6d\xd8\x16\xad\xb9\xbe\xbf\x20\xba\xc7\xc4\x91\x9b\x96\x55\x2a\x3a\x99\xc7\xdb\x46\x31\x63\xcb\x9e\xe7\x64\x6a\xf9\x75\xac\xcd\x06\xca\x06\xbd\x60\x41\xdd\xba\x2c\xf4\x84\xae\xab\x11\xb2\x75\x58\xe9\x72\x73\x1c\x87\x11\x50\x62\xe1\x14\x98\xe7\x7b\xf9\x5a\x61\x59\xaf\x4f\x2a\x09\xc3\xc3\x4d\xe1\xd4\x36\x32\xe8\xc6\x32\x70\x97\x63\x3b\x67\x30\x71\x1c\xa1\x76\x7f\x7f\x05\x4c\xdc\x3d\x77\xc2\xe2\x73\xfc\xf9\xc1\xc3\x52\x32\xa8\xb8\xab\x28\x06\x00\xbb\xdb\xa4\x5d\x20\xce\x31\xc1\x40\x42\x87\x01\x05\x95\xe5\x2a\xcf\xb6\x30\x46\x00\xd9\x0a\x08\x8c\x89\x63\x29\xc6\x05\xb7\x95\x77\x2d\x4e\x5c\xdc\xce\x66\x00\xd3\x8b\x61\xe2\xa9\x10\x32\x8c\x80\xe0\x94\x23\x72\x2d\xc3\xa6\xf2\x46\x44\x44\x0a\xad\xa6\xf2\x54\x75\xa9\xc9\x41\xa0\x62\xee\xa6\xb4\xec\x39\xcd\x42\x6a\x8c\x17\xf3\x6e\xbe\x9d\x0d\xc7\xe2\x46\x7e\x32\x50\xa9\xc5\x62\xe1\x16\xf2\x0d\xa7\x55\x5a\x20\x2f\x96\x2f\x0d\x6b\xf9\xaa\x97\x8d\x66\x78\xa3\x9c\xa8\xdb\xed\x68\x64\xa9\xe3\xe8\x94\xab\x89\xe0\xf3\x7d\x0f\xee\x10\x71\xd4\xa3\x46\x24\xec\xc6\xa6\x8d\x72\xcd\xf1\x47\x99\x5c\x83\x7a\xe5\x5a\x3f\xe2\xce\xe2\x62\x94\x50\x61\xbf\x35\xf4\x86\x46\x0b\xb5\xf9\xa2\xd1\x8d\xe8\xb6\x3f\x02\xeb\xdc\xa2\x6e\x8a\x16\x9f\x8f\x9c\xa2\x31\x19\xc7\x1b\x2a\xb7\xa8\xad\x5a\xfe\x26\x4a\x40\xc7\x47\xbd\xb4\xdf\x19\x0e\x77\x33\xa7\xe6\x19\x44\x38\xf9\x13\x87\xfa\x2b\x20\xe2\xfe\x28\xc1\x3f\xc7\x9b\x1f\x3c\xc9\xa8\x47\x15\x67\x0a\xb9\x1e\x97\x44\x72\xaa\xb6\xe3\xb2\xc0\xd0\x96\xd4\x16\x4c\x39\x8e\xe4\x10\x7a\xdc\xe6\x9e\x0d\x09\x71\x3d\x69\x3b\x96\x83\x1d\x61\x71\x0b\x59\xd7\xa2\xc4\xc5\x1c\x9a\x41\x4a\xe5\xc5\x95\xd8\xe7\x52\x48\x85\x84\x98\x0b\x29\xe9\xb5\x0c\x9b\xde\xba\x12\x1b\xe9\xf7\xa5\x72\xe7\x39\x4f\x55\xfb\x90\xae\xcd\x66\xde\xee\xdb\x9d\xf9\x26\xe3\x98\x92\x7b\x3c\xa1\x63\xba\xc8\x2e\x85\x3f\x0c\xc6\x5d\xd5\xf0\xa6\xd4\xb4\x62\x91\x51\xd9\xce\x97\x40\xb5\x67\xb4\x0b\xa9\x31\xcb\x26\x46\x89\xc1\x98\x46\x56\xc5\x4e\xc7\x8a\x8c\xd3\x13\x7f\x3f\x93\xdf\x62\xc2\x39\x1a\xab\x8b\x05\xd3\x9e\x2f\x52\x86\x91\xf0\xa6\xe5\xf2\xc0\x75\xc7\x83\x6a\x51\x24\x7a\xc2\xaf\xcb\x8c\x0d\xd7\xa5\x5c\x64\xae\xaf\x9b\x49\x07\xcd\xd3\xcd\xc0\xb5\x8d\x52\xb2\x92\xd4\xa6\xb9\x0a\x5e\xd6\x12\xd3\xcd\xbc\x14\x5d\x37\x8c\x51\x31\x8b\xa6\x25\x01\xda\xd9\xb6\x16\x30\x73\x77\x48\x36\x56\x3d\x83\x89\x96\x7e\xe2\x52\x7f\x05\x4c\xdc\x1f\x25\xd8\xe7\xf8\xf3\x83\x07\x24\x60\x94\x7a\xc0\xa5\x8e\x52\x54\x60\x84\xb8\x0b\x80\x10\x8e\xcd\x2d\x0f\xd9\x5c\x00\x8f\x79\xae\x84\x4a\x60\x66\x11\x08\xb9\x65\xdb\x10\xba\x84\x4b\x9b\x3a\x0a\x5e\xc3\xc4\xc5\x5d\x6c\x86\x29\x15\x17\x67\x4e\xdb\xd2\xdd\xc4\x89\x12\x06\x85\x40\x88\xd1\x6b\x09\x36\xbd\x75\x21\x36\x12\x2b\xb5\x6a\xc2\x68\x14\x68\x33\xef\x7b\xa5\x58\x7a\x8d\x27\xeb\x19\x89\xa3\x48\x2b\x15\x18\xe3\x92\xa9\x26\x53\x13\xa6\x06\x70\xd3\x99\x11\xe0\x8c\x16\xcc\x8a\x19\x83\x56\xc3\xe9\x77\x41\x72\x04\xdb\x22\x52\xaa\xd8\xe1\x89\x3e\xdd\xf0\x02\x2b\x99\x6a\xa1\x8f\xf8\xe8\x08\x12\x47\xc9\x44\x7c\xc5\xe7\xb8\x32\x13\x12\xc4\xe3\x31\xad\x89\x8a\xd5\x00\xf7\xb3\x7e\x15\xe7\xf1\x70\xdd\x77\x26\xe3\xc1\x22\xdf\x4f\xe4\xca\x61\x4f\x1f\xae\x3c\xbd\xde\xed\xdb\x71\x95\x76\xdd\x78\x43\xc5\xb2\x15\x2e\xbb\x01\xf1\x96\xeb\x1a\x58\x0f\x8a\xda\x86\x45\xa0\x26\x49\x62\x77\xc9\x2e\x56\x3a\x03\x89\x46\xfb\xc4\xa3\xfe\x9e\x90\xa0\x9f\xe3\xce\x0f\x0a\x48\x40\x6d\x97\x41\x26\xb9\x80\x54\x71\xd7\x15\xc0\x62\x1e\xb7\x5c\xa1\x04\x06\x9c\x5b\x14\x03\x62\xd9\x80\x09\x28\x10\x55\xd4\x12\x08\xd8\x9e\xed\x38\x10\xb9\xf6\xd5\x85\xd8\xcb\x81\x80\x50\x78\xf9\x75\xc1\xe7\x52\x48\x01\xde\xe6\x3f\x14\xb3\x6b\x09\x36\xbd\x75\x21\x36\x1c\x2b\xd5\x65\xb3\xde\x27\x09\xe5\xb9\xed\xac\x8d\x27\x4d\x16\x8f\xe2\x51\x25\x9a\x9d\x26\xdb\xcd\x8c\xaa\x07\xe9\x5c\x58\x2c\xcb\x48\x56\x26\xf5\xaa\xcb\x0a\x3c\x4f\xca\xc0\x53\xa2\xaa\x3b\x6e\xa7\xef\x0c\x9a\xce\xda\x05\xe1\xc0\x6e\x5b\x6e\xa9\xdb\x6c\x69\x91\x6a\xf6\x65\x73\xc2\x39\x72\xd3\x2a\xee\xd7\x8b\xd2\xab\x94\x33\x03\x73\x68\xf0\xe2\xc8\x88\x5a\x2d\xab\xb9\xf1\x93\x1d\xd9\xe8\xa1\x34\xea\xaf\xb5\x76\xb3\xea\x1a\xd3\xe5\xaa\x54\x99\xa5\x8d\x14\xad\xaa\xf6\xc8\x2d\x00\xd4\x6b\x05\x9b\x82\x37\x65\x91\x6e\xd3\x4c\x8c\x56\xb1\x51\xb3\xad\x57\x36\xac\xac\xf8\x6e\xea\x94\x3f\x83\x89\x9a\x38\x71\xa9\xbf\x27\x26\xc8\xe7\xf8\xf3\x03\x45\x1c\xbb\x08\x31\x0f\x5b\xae\x24\xc4\xb5\xb9\x50\x9c\x00\x17\x29\x0f\x72\xcc\x04\xa1\x50\x72\x0b\x48\xe1\x09\x07\x21\x07\x59\xd0\x22\x10\x09\x2c\x5c\xe4\x21\xce\xaf\xbd\x61\x00\x2e\x6e\x62\x33\xca\xc4\xe5\x4d\xec\xe7\x52\x48\x18\x40\x42\x08\x01\xd8\xb5\x04\x9b\xde\xba\x10\x0b\x33\x1b\xe0\xdb\x9b\x49\xcb\x34\x66\x84\x82\x78\x99\xaf\x1c\xd4\x49\x24\x1b\x41\xa6\x4b\x96\xfe\x72\x9c\xec\xe9\x41\xc1\xc8\xad\xdd\xe5\x7a\x66\x64\x32\xf3\x2c\x99\x9b\xa3\xe8\xd8\xd0\x59\x5c\x8b\xcc\x96\xb1\xd9\x74\x9d\x5f\x36\x22\x51\xa3\x5b\xc9\xa2\x2e\xd4\x90\x93\x1b\xbd\xbc\xeb\xe1\x1c\xf5\x69\xc4\xd6\xc3\x45\x55\x32\x6d\x27\xbf\x28\xa7\xbd\xd2\xda\x6a\x94\xeb\x0d\xbd\xbd\x89\x72\xcb\x5e\x4e\x94\x51\x2b\xa9\x66\x33\xdd\x59\xfa\x2d\xd0\x1a\xaf\x1c\x67\xbd\x76\x97\x59\xbf\x3d\xaf\x65\x5c\x3b\x9d\x6c\xe9\xc9\x72\xbe\x25\x65\xb5\x94\xab\x4b\x33\xbd\x2e\x06\x8e\xe3\x57\x76\x4a\xa4\xcf\x60\xa2\xfa\x57\x3c\xec\x74\x3f\x26\xf0\xe7\xf8\xf3\x03\xa0\x8a\x73\xc7\x96\x0c\x30\xe0\xd8\xd0\xa6\x04\x73\x42\x3d\x21\x3c\x85\x99\xc2\x88\x30\x9b\x50\x1b\x31\x4b\x2a\x45\x25\xf7\xa0\x24\x9e\xeb\x6d\xf3\x13\x09\xa9\xe5\x5c\xbe\xa7\x4d\xbf\xd3\xcb\x93\x23\x46\x29\xbd\xb8\x9d\xf7\x5c\x0a\x09\xe4\x18\x4b\x04\x38\xbb\x9a\x62\xe3\x1b\x31\xb1\x6c\x0c\xbd\x5c\xbf\xdd\xcc\x4f\xed\x5a\xdc\x4d\x84\xd3\x91\xae\xdd\xcd\x24\x67\x89\xe9\x60\xac\x68\xcb\x5b\xf5\x8b\x98\xe5\xd7\xbd\xda\x24\x39\x57\xf9\xe4\x32\x5b\x5d\xe3\x8d\x1b\xcb\xe8\xb6\x33\xcc\x0d\x57\x41\x4b\xaf\xe2\x1a\x95\x56\x73\xc2\xc3\x63\xdb\xcb\xfb\x20\xbe\x1c\xbe\x1c\x8a\xb5\x8f\xd2\x89\x94\x5b\x14\xb3\x68\x74\x36\x52\xeb\x9c\x90\x72\x96\x2b\x0e\x0d\xd7\x1a\x57\x83\x94\x59\x1f\xa4\xb2\xdd\xac\x18\x04\xf6\x84\x40\xb4\xd4\xd3\x6e\xb7\xe6\xa4\x6c\x5c\xcb\x74\x4a\xda\x6c\x3e\x34\x8a\xdd\xce\xa8\xd3\x2b\x7b\x9d\x70\xa7\x47\x7a\x4e\x74\xd9\x23\x33\xaf\x28\x7d\xb6\xdb\xc4\xd6\xcf\x60\xa2\x7c\x7a\x21\xe8\xef\x89\x09\xf4\x39\xfe\xfc\xe0\x29\x88\x31\x63\x0e\x71\x28\x76\x00\xb1\x18\x55\x0e\xa1\x8a\xba\x2e\xa7\x04\xb8\x18\x23\x4b\x38\x58\x10\xca\x3c\x0e\x2d\xc2\x6c\xe1\x6c\x81\x26\x3c\x06\x19\x24\x36\xbe\xb8\xe8\x44\xbf\x5f\xf1\x7a\xce\x11\xbf\x9c\x80\x3f\x95\x42\xcc\x09\x16\x92\x62\xc2\xaf\xa6\xd8\xb7\x6e\x4e\x44\x48\xc5\x18\xac\xf2\x55\x11\xcd\xb6\xd4\x2a\x41\x5d\x08\x4b\x46\x7e\x3d\xce\x17\xf4\xda\xc2\xb1\xcc\x49\x37\xda\x67\x5d\x94\xe8\x94\x37\xb6\xbb\x5a\xd9\x5e\x22\x5e\x0b\x4f\x7d\xa6\x25\xcd\x45\x8e\x4e\xc3\x41\xcd\x99\x36\x45\x6b\xd9\x4b\xd0\x5e\x4e\xea\xd1\xf5\xc8\xe7\xbd\x97\x0d\x3b\xfb\x68\x56\x8f\x36\x71\xca\x5d\x14\x54\x97\x28\xc7\x06\xf3\xc9\x6c\x10\x09\x14\x8a\x77\x59\x62\xbe\x89\x36\x86\xb5\x01\xef\xf3\x54\x10\x9f\xe6\xab\x1b\x63\xdc\xb4\x5b\xaa\x5b\x1f\x2c\x96\x26\x21\x89\x5a\xa9\x68\xe7\xd3\xee\x1c\xd9\xdd\x14\xeb\x35\x70\x3a\xc3\x63\x1d\x9f\x34\x07\xed\xf9\x6e\xee\xa4\x9d\xc1\x84\xf9\x57\x3c\x00\x78\xff\xb2\x13\xfc\x1c\x7f\x7e\xe0\x14\x78\xca\xf5\x76\x8f\x32\x63\x45\x81\x20\x88\xbb\xdc\x61\x1e\xb4\x30\xf4\x00\x46\x8e\xe7\xb9\xc2\xa6\x10\x2b\xcb\x46\x9e\x63\x61\x87\x08\xc4\x09\x76\x24\x52\x90\x5d\x3e\x00\x48\xbf\x5f\xbe\x69\xca\xe4\x36\x02\xbc\x5b\x0a\x31\x86\x04\x4b\xc1\x20\xbf\x9a\x63\xdf\xba\x39\x81\xab\x45\xee\xf9\xd1\x5e\x85\x96\x3a\x31\xaf\xb1\x5a\x24\xbb\xa9\x65\x7e\x1a\x91\x3e\xd4\xc3\xe3\x75\x24\x5b\xd4\xdb\xc3\x92\xe8\xcd\x13\x53\x27\x3d\xae\x0d\xba\xeb\xc5\x38\xdd\xb7\x13\xee\x60\x10\x9b\x3a\x95\xd4\x72\x9e\x80\x2b\x2f\x02\x91\xc8\x0e\x82\x51\x34\x97\x2f\xd5\x8d\xf1\xcb\xa5\x22\xfb\xf8\xf2\x44\x72\xd0\x42\x83\x4e\xb2\x5e\x8c\x39\x75\xcd\x85\xa3\xaa\x2c\x47\x33\x0d\x4f\x75\x67\x71\x9e\x28\xe8\x41\x2e\xce\x82\xc4\xb4\x52\x58\x90\x7c\xb6\x9e\x36\x16\xd5\x4d\x2f\xee\xeb\xc6\x70\x93\xd7\xa6\xb0\xbf\x48\x09\xdd\xd7\xeb\x19\xcb\x5c\x4e\x32\xe6\xc2\x00\xcd\xb9\xb9\xd8\x6c\x09\x47\xc5\x19\x4c\x14\xfe\xef\x61\xa7\x2d\x26\xc0\xe7\xf8\xf3\x83\xc4\xcc\x81\x0a\x7a\xc0\xf6\xa0\xe5\x49\x86\x05\x50\x36\x90\x8a\x4a\x45\x11\x74\xa9\xc2\xae\xeb\x62\x0e\x5c\x04\x3d\x8f\x78\x2e\xb4\x80\xb4\x6c\x88\xb0\x45\x1c\xcf\xb6\x2e\x2f\xc5\xd2\xef\x97\xf7\x1f\x38\x10\x44\x5c\xcc\x27\x9e\x4b\x0f\xbf\xb9\x00\xb7\x19\xc5\x15\x4c\x90\x5b\xb7\x27\xa8\x31\x4d\x75\x44\x4e\x58\xe1\x7a\x41\x63\xcb\x61\x7f\xd8\x4f\xbb\x8d\x2e\x76\x8d\x25\xe8\x44\x40\xc4\x0c\x32\xe3\x55\x9a\x55\x8a\xf3\x71\x6f\x53\x68\x6c\xe8\xc0\x43\xb9\x76\x73\x68\x05\xda\xac\xb1\x32\x5a\xb9\x5e\xb8\xea\x55\x44\xd3\x94\xa8\x64\xcb\x16\x6f\x16\xed\x1c\xde\xef\x0c\xec\x30\x71\xd4\xa7\xb0\xdb\xc8\x59\x41\x36\x92\x2e\x8c\xb3\x46\xad\xe7\xdb\xc8\x31\x72\x93\x6e\x5f\xaf\xa4\x1a\xa3\x56\xdf\xc9\x90\x68\xa2\x59\x10\x76\x1e\x0f\xcc\xcd\x78\x54\xf7\xda\x8b\x15\x1f\x7a\x54\x70\x77\xd5\xcd\x34\x6b\xc6\xa0\x6d\xf6\x3d\x2f\xa3\x6f\xea\xf1\x68\x7b\xd9\xf1\x32\x83\x76\x75\x0b\xbf\xe8\xb9\xcb\x13\xf9\xff\x7b\xd8\xa9\x1c\xfa\x0d\xc9\xcf\xf1\xe7\x07\x07\x12\x42\x25\x45\xc8\x53\x04\x29\xe4\x42\x60\x23\x69\x61\x0e\x6c\x5b\xd9\xd6\x16\x4e\xc4\x52\x8a\x20\x89\x04\x72\x6d\x45\x88\x20\x48\x60\xea\x42\x9b\x10\xcc\xd8\xe5\x43\xb1\xf4\x3b\xbd\xb8\x89\xcd\xa1\x20\xe4\xe2\x42\xed\x73\x29\x44\x84\x52\x02\x28\xa4\xe2\x5a\x8e\x4d\x6e\xbd\x3c\x21\x12\x7a\xbd\x96\x5e\xd0\x18\x99\xe9\xb9\xac\x65\x2c\xe7\xe1\x79\x5f\x0f\x97\xe7\x34\xd6\x0b\x72\x36\xd5\x3b\xb1\x36\x04\xb6\x19\x6f\x75\xf3\xa9\xf8\x74\x90\x09\x2a\xa3\xb5\x97\x9f\x80\x21\x6f\x99\x5d\x58\x9c\xd5\x9a\x8e\xbf\x2c\xd3\x04\xa2\x11\x65\xd7\x68\x76\x50\x4c\xad\x5f\x72\x6c\xeb\x28\x9f\xb0\xad\x58\x9c\x98\xb6\xa7\x8f\xcb\x93\x74\x78\x3c\x4a\xaf\x37\x9b\x6a\x35\x6c\x37\x9b\xf9\xcc\xd4\x5c\xe2\x51\xca\x19\xa4\x96\xa5\xc9\xc2\xd2\xfc\x54\x75\xa3\x97\x6d\x3e\x58\xd5\xdb\x36\x9b\xc4\xa5\x1f\xb6\x17\x4d\xcb\xed\x76\x56\x79\x33\x19\x51\xab\x20\x5e\x1f\x89\x48\x50\xdd\x3d\x8b\x10\x3d\x77\x79\x22\x77\xea\x52\x7f\x4f\x4c\x88\xcf\xf1\xe7\x07\x09\x90\x0d\x3c\xaa\xa0\x52\x14\x30\x5b\x7a\xd2\x65\x42\x28\xc2\x08\xe3\xd8\x76\x2d\xe4\x59\x16\xa0\x4c\x70\xe6\xb8\x9e\x02\x96\x83\x28\x50\x1e\x75\xa4\xeb\x01\x4b\x58\xdb\x50\xc0\x77\xff\x3b\x83\x89\x8b\x9b\xd8\x1c\xc9\x2b\x6f\x67\x3e\x97\x42\x04\x10\x95\xdb\xf0\x27\xae\xe5\xd8\xe4\xd6\x2d\x3b\xaa\x4d\xe2\x90\x28\x5a\xb2\x68\xd8\xb5\xd2\x34\x69\x0e\x0b\x32\x58\x8c\xdb\xb5\xc0\x9d\xe2\x70\x6c\x9a\x29\x71\xb8\x76\x45\xd6\x1f\x0c\x8d\xf8\x32\xd7\x4c\x29\xde\x4e\xa7\x9a\xf9\xf5\xaa\xe9\xd7\x8b\x39\xa6\xb9\xa3\xc2\xa0\x90\x8c\x99\x69\x63\x50\x74\xf4\xc5\x20\x3a\xa4\x2f\xef\x8f\x5b\xc7\xef\x3d\x95\x22\x15\xb2\xce\x18\xd9\xa1\xec\xf8\x99\xa0\x32\x5f\xc5\xa7\x86\xed\x4a\xdf\x36\xeb\x1e\xea\x18\xa9\x4c\x67\xea\x8f\xa3\xd9\x7a\x1c\xda\xb3\xfe\xd4\xaf\x6b\xc6\x6a\xac\xb7\x70\x35\x9a\x45\xe9\xe9\xbc\x99\xc8\x13\x8b\x89\x49\xc1\x2c\x2d\xe7\xae\x6a\x4d\xa3\x59\x38\xaf\x6d\x61\x10\x3d\x77\x79\x22\xe3\x9c\xb8\xd4\x5f\x01\x13\xfc\x6e\x4c\xf0\xcf\xf1\xe7\x07\x89\xa8\x22\xc2\x53\xc2\xc1\x50\x2a\x84\x6d\x41\x01\x24\xae\xeb\x31\x85\x99\x14\xae\x6d\x29\x1b\x52\xdb\x15\x44\x01\x45\x08\xa6\x0c\x12\x0f\x43\x44\x5d\x41\xb8\xb2\xbc\x6b\x71\xe2\x62\xc6\xc0\x29\xc3\x97\x2f\xd9\xed\x4a\x77\xeb\x4e\x90\x4a\x46\x20\x64\x52\x5e\xcb\xb1\xc9\xad\xf9\x04\xcd\xa8\x72\x91\xe5\xf3\x2d\xd2\x9d\x27\xb4\x2a\xf6\xec\xae\xd5\x44\x8c\xad\xea\xb2\x53\x88\xf7\x4a\xb0\x59\x15\xab\x8c\xdb\x49\xb1\x45\x69\xed\x66\xd2\x0d\x5d\x9b\xc2\x79\x21\xce\xd2\xbd\x6a\x7e\x2a\x92\xfd\xe1\xba\x18\xaf\xac\xcc\x71\x60\x66\xeb\x24\xb2\x6c\xb7\x5a\x96\xf1\x92\x4f\x58\x47\x6e\xda\x1b\xa9\x86\x6b\x34\xbc\x41\x07\xea\xc3\x6a\x37\xd6\x4c\xe7\x95\xed\x6e\x6a\xa2\xd8\x6f\xc7\x14\xcb\x30\x95\x19\x8b\xbc\x4d\xca\x8d\x66\x25\x5f\x95\x26\x1d\xd9\x89\x52\x34\x32\xea\xe8\xa3\x95\x9f\xf4\x9d\x4a\xaf\x1f\xd5\x1b\xd5\x72\xa9\x2e\x1a\x93\x59\x0d\xa4\x64\xd4\x08\x6f\x09\x47\xcf\x5c\x9e\xd0\xc2\xff\x77\xcf\x6e\x8b\x09\xf6\x39\xfe\xfc\xe0\x51\xe2\x00\x20\xb9\x4b\x08\xf4\x3c\xce\x1c\x22\xb8\x4b\x94\x6d\x43\xe1\x51\x45\x3d\x8a\x1c\x6a\x51\xe8\x58\x92\x09\x00\x6c\x4f\xda\x02\x0a\x24\x29\x97\xc0\xc2\x9c\x8b\x6b\xeb\x4e\x97\x67\x47\x8c\x0b\x72\x79\x66\xf5\x54\x0a\x21\x64\x4c\x42\x22\x99\xbc\x96\x63\xe3\x9b\x8f\x3b\xd5\xf2\x5a\xc6\xab\x96\x3c\x89\x56\x69\xaf\xca\x78\xb2\xd8\xe8\xcb\xf2\xca\xaa\x3a\xe3\x7a\x5b\x5b\xd0\x79\xb4\x3d\xaa\xd1\x58\xd7\x25\x05\x3b\x3a\x76\xd4\x54\x6c\x1a\x2d\xad\xeb\xe5\x92\x96\x19\xce\xbb\xa0\x9d\x5a\x16\x6b\x24\x17\x36\xe6\x6e\xb1\x5d\x2e\x65\xdc\xa9\x4e\x5f\xd6\x9d\xac\xa3\x3e\x4d\x56\xb2\x4e\xd7\xa5\xb3\xa9\xd3\x4f\xe0\x62\x78\x34\x8b\xfb\x79\xb7\x5a\x1f\x09\x3c\x9b\x37\x13\xe1\x4d\x5f\x8b\x8f\xc2\x8d\x70\x7b\x16\x56\xd1\x3e\x1c\xb4\xda\x25\xd4\x8b\xb6\x0b\xfd\x78\x0c\xaf\xf3\xa4\x07\x1b\x89\x72\xaf\x2d\x86\x33\x55\xcb\xe4\x47\x91\x6c\x67\x5c\x58\xd7\xd1\x6e\xee\x74\xe6\xf2\x84\x26\xfe\x8a\x7b\x76\x77\xaf\x3b\x21\xfa\x39\xfe\xfc\xe0\x72\x68\x43\x84\x6c\xcf\x86\x40\x41\x28\x31\x95\xdc\x72\x3c\xc2\x99\xf4\xa4\xb4\x39\x64\x8a\x4b\xc5\x5d\xa8\x2c\xc7\xf1\x10\xa2\x96\xeb\x51\xcf\x65\xd4\xb1\x95\x52\x98\x5f\x8b\x13\x97\x67\x47\x5c\xf2\xcb\xc7\x03\x9f\x4b\x21\xe0\x98\x13\x28\x10\x01\xd7\x72\x6c\x7c\xeb\xdc\x49\x3a\xb6\xee\x2f\xb8\x5e\xd0\x5b\x46\x4b\x4d\x65\xca\xf2\x06\xb6\x53\xa7\x94\xca\xd4\x78\x53\x4c\xf7\x27\x5e\x43\xb6\x9c\xc5\x9c\xe0\x18\x75\xbb\xcd\xa5\x8a\xf5\xbc\xb9\xad\x9a\xfe\x7a\xa2\x2d\x56\xfd\xe2\x60\x5e\x4d\xb5\x57\x76\x2f\x3c\xea\x35\x08\x05\x7d\xdb\xaf\x46\x5f\xf2\x89\xd6\x51\x3e\x91\x97\xed\x5c\x2d\xbe\xe8\x2e\xf3\xf3\x5a\x2b\x3c\xec\x9a\xdd\xc5\x22\xbf\xea\x77\x02\xd7\x6c\x47\x6a\x99\xd1\xdc\x1a\xf9\x31\x1b\xac\x65\x39\x3e\xad\xa7\x75\xd0\x6a\x35\x62\x19\x8e\xf2\x53\x2b\xc2\xaa\x93\xaa\x5b\x89\xf7\xc2\x83\xb1\x91\x1f\xa3\xac\x6a\xd5\xb2\xd1\xae\x3b\xdb\xef\x7f\x44\xcf\x5c\x9e\xd0\xe8\xe9\xfe\xc4\xee\xf8\xd3\x7f\x38\x26\xee\x8f\x13\xe4\x73\xfc\xf9\xc1\x05\xd2\x41\x9c\x52\x65\x61\x08\x81\xe4\x8c\xbb\xcc\xda\xfe\x8f\x43\x4a\x1d\xcf\x83\x36\xc3\x40\xb9\x36\xc3\x4c\x58\xc4\x46\x02\xd8\x36\x13\xae\x42\xc4\x41\xca\xb6\xc9\x35\x4c\x5c\xdc\xc7\xe6\x12\x42\x7c\x71\x47\xef\xb9\x14\x02\x0c\xb8\x44\x90\x42\x70\x2d\xc7\xc6\xb7\x9e\x77\x92\x31\x27\x03\xff\x7f\xf6\xde\xb4\x37\x75\x65\xd9\x1b\x7f\x7f\x3f\xc5\xd2\x7e\x93\x47\x62\x9f\x43\xcf\xc3\x7e\x74\x5f\x18\x63\xb0\xcd\x64\xc0\x0c\xe6\xd1\xd5\x96\x47\xe6\x79\xe6\xe8\xff\xdd\xff\x82\x0c\x0c\x09\x59\x89\x21\xd9\x6b\x9f\x73\xd1\xd2\x52\x3a\x0e\x3f\x57\x57\x57\x57\x75\x75\x57\x55\x37\xa7\x85\x8a\xda\x6c\x49\x77\xe1\x2e\xfc\x7a\xd3\x12\x9e\xb3\xe6\x9e\x83\xba\xfd\xf9\x58\xe5\x6e\x91\xf4\xd7\xcb\x25\xc0\x9d\x02\xf3\x59\x75\xbb\x56\x2a\xd5\x91\xe5\x61\x7f\x91\x61\xd1\xb8\x1e\x78\xed\xed\xa6\x33\x49\x56\x48\x72\x9e\x68\x68\x89\x45\xf8\x58\x5a\xe9\x71\x4e\x9c\xac\x60\xe0\xae\xab\xbb\xbd\xa5\x97\x9e\xf5\xa0\xb1\xa4\x5a\x59\x38\xad\x14\x22\xea\x6e\x88\xa4\x62\x5a\x81\x6b\x8e\xe6\x4d\x9c\x50\x12\x51\x1d\xe7\xa2\x20\xec\x4e\xcd\xe9\x76\xdb\x89\xfa\x09\x3e\x5c\x37\x6a\x29\x30\xf3\xbc\xd5\x82\x6d\x19\x01\x9d\x7c\xd2\x1b\x8d\xad\x42\x4b\xb6\x0f\x89\x12\x6f\xcc\x09\x72\x79\x3e\xf1\x9f\x39\x27\xf0\x7d\xe4\xf9\xc1\x13\x81\x80\xae\x24\xbe\x2f\x5d\x1f\x06\x7e\x70\x08\x24\xa4\x9e\x1b\x02\x26\x80\xcb\x09\x8b\x08\x74\x3d\x89\x70\x10\x4a\x8f\x09\x46\x01\x41\x08\x60\x19\x89\x20\x22\xfc\xbd\xbd\xd8\xeb\xe9\x11\x02\x52\x74\x7d\x2f\xf6\xf0\x74\xbf\xee\x92\x82\x0b\x82\x08\x17\xe0\x3d\x17\x1b\x7f\x34\xdc\x89\xf7\x9c\xec\xc4\x2e\x0c\xe7\xba\x0f\xcd\x20\x6c\x30\x92\x6d\xe7\x82\xac\x4c\x4d\xa9\x21\x99\xe7\x66\x6b\x9a\xd1\x2e\x45\xbd\xa8\xc2\x4a\x56\x9b\x2a\xe3\x6e\xa6\xd6\x5e\xe0\x6c\x16\x34\x4a\xe6\x8c\xa8\xd8\x74\xcb\xfa\x90\x17\xac\x54\xb9\x3b\x0f\x54\xd2\xe9\xef\x36\xd5\xe3\xd2\xa9\x75\x22\x98\xe1\x92\xb5\xf2\xbb\x44\x22\x2c\x6d\x1b\x38\xa9\x5b\x0e\x83\x6c\x95\x18\x67\x50\x93\x53\xd6\x50\x6d\x91\x95\x58\x99\x04\x7a\x37\x1a\xcf\x6b\xa5\x89\xea\x25\xe6\x25\xcf\x9f\xcc\x77\xd6\xc2\x92\x09\x7d\x0b\x0a\xfa\x70\xb7\xe9\x4c\x76\x8a\x95\x9c\xdb\x8a\xed\x97\x27\xa3\xdc\x61\x4a\xbc\x91\x3b\xa1\xa0\xcb\xfa\x04\x69\xe5\x3f\x71\x4a\xa0\xfb\x88\xf3\x03\x0c\x39\x46\x8c\x79\xd2\x87\x01\x0b\x03\x41\xc3\x00\x01\xc4\xc1\xa1\x6e\x3f\x01\x02\x12\x42\x03\x1a\x21\x00\xa1\x90\x5e\xe8\x79\x94\x23\xe8\x4b\x0f\x51\x46\xa3\xa7\xb4\x9f\x6b\x53\xe2\xea\x31\xb6\x40\x9c\x5c\x0f\x77\x7a\x79\x2a\x09\x11\x12\x09\x40\xe1\x7b\x1e\x36\xfe\x68\xb4\xd3\x86\xee\xcc\xe1\xd2\xd7\x1b\xdc\x69\x2e\x0c\xd5\xcf\xeb\xe6\x46\x69\xd7\x2a\x41\x73\xab\x54\xba\x1e\xe3\xf5\xad\x6e\xce\x07\xad\x28\x30\x49\x7a\xb7\x5b\xd5\x23\xab\x17\xd2\x95\xca\xfc\x76\x47\xcd\x44\xb3\xd6\xba\xa0\x17\xb4\x74\x21\x03\x27\x9d\xf5\x24\xd9\x37\x7a\xcb\x24\x3f\x99\x12\x27\x43\x0a\x94\x42\x77\x6b\x13\x75\x9b\xce\xe7\x57\xee\xb6\x0a\x94\xa1\x25\x79\x27\x1c\xc9\x1e\x9c\xf5\x8d\xf9\xd8\x09\x8b\xa0\x5b\x9e\xf5\x96\x13\xe0\x5a\x99\x60\x59\x37\xe6\x66\x62\x14\xed\xa4\x51\x54\xcd\xea\x8c\x8a\x62\xc9\xda\x58\xc5\xb2\x9d\x6b\x4d\x6d\xdb\xd9\x55\x69\xa2\x74\x58\x39\xbd\x91\x3c\xa1\xc0\xcb\x13\x3b\x55\xf9\x4f\x9c\x12\xf0\x3e\xe2\xfc\x10\x02\x5f\x0a\x17\xb0\x10\x07\x3e\xe1\x50\x60\x97\x08\x80\x23\x49\xb8\x1f\x00\xe0\x46\x21\xe6\x81\xf0\x24\x09\x7c\x4c\x82\x90\x20\xe9\x06\x01\x01\x91\xcb\x7d\x28\x7d\x06\xdf\xf3\xb0\xaf\xef\x2b\x09\x2c\xe0\x3b\x13\xe6\xf9\xa9\x04\x50\x12\x0c\x31\x7a\xb7\xda\x19\xfe\x68\xb0\x53\x22\x6d\x76\xdb\x5e\x0f\x6b\xbe\x53\xea\xb7\xd3\x89\x5a\x54\x54\x72\xc9\x5e\xe4\x2c\x6b\x38\x6d\x87\xe5\xc9\x2c\x28\x5b\x13\x91\xf5\x1d\x27\x33\xe6\x89\xec\xc0\x9d\xa3\x5e\x77\x32\xce\x0e\x5c\xb9\x9a\xe5\xf3\x39\x30\x1e\x2b\xdd\x82\x41\xb3\xe5\xb0\xd4\xcf\xd7\x93\xcb\x9e\xf4\x8f\xce\x84\x73\x9a\x89\x9d\x57\x46\xc5\x5e\x75\x3d\xcc\x8f\x09\x27\xe3\xde\x58\x6f\x6f\xe7\x51\xbf\x39\x0c\xc3\x94\xd1\x0c\xda\x0d\x6f\x5b\x32\xd7\xde\x68\xbe\x49\x94\x6b\x81\x9b\xb3\x22\xa5\xe9\x74\xb6\xab\xfe\x2e\x57\xce\x69\x09\xdb\xf1\x3a\x38\x28\xf4\x84\xdf\x04\xed\x45\x04\x12\xa5\xb1\xbd\x38\x5c\x90\x9a\x7a\x23\x77\x42\xd9\x5d\x1e\xd8\xfd\x3b\x4c\x89\xcf\x3b\xd8\xe0\x3e\xe2\xfc\xe0\x87\xd0\x0f\xb8\xeb\x7a\x20\x44\xc8\x85\xa1\x80\xbe\x90\x5e\x00\x39\x0c\x80\x17\xb8\x34\x10\x41\xe8\x23\x21\x10\x81\x04\x03\x44\x23\x16\x81\x00\x52\x17\x06\xdc\xf5\x7d\x78\xbd\xda\x19\xfd\xe3\x7a\x9d\x1a\x71\x58\x0c\xfd\xf4\xa9\xa0\x42\x4a\x4c\xa8\x7c\xb7\xd8\x19\x86\x1f\x3d\xaf\x23\x3e\x64\xd3\x35\xe9\x8e\xc3\xb6\xdf\xf5\x40\x53\x2f\xaf\xdc\xa6\x83\xe7\x39\xc3\x4c\x95\xd7\x66\xe4\xad\x41\x7d\x1c\x84\xf3\xb5\x93\x88\xc6\xeb\x79\xde\xcf\xed\xb8\x09\x03\x0d\xd3\x76\x6e\xe4\x54\x0a\x2e\xda\x29\xd8\x9f\x46\xcb\x71\xb1\x56\x80\x51\xd8\x6d\x59\xcb\xa3\x95\x70\x4e\x7c\x89\x4d\xb2\xb3\x71\xb5\x06\x6a\x16\xcd\x14\xee\xa6\x23\x84\x43\x27\xca\xfa\x6b\xbf\x2c\x18\x9a\x54\x17\xbc\x64\xb2\x4c\x06\x02\x61\xb8\x49\xb5\xb3\xed\x06\xd3\x68\x1e\x26\x5d\xcc\x33\xdd\x28\xaf\x0c\xd2\xb9\x84\xb6\x2c\xe3\x4d\x76\xeb\x4e\xd5\x2a\x2d\xe4\xfb\x8d\x72\xed\x90\x21\x91\x7a\x23\x77\x42\x59\x5f\x9e\x4d\xfc\x3b\x4c\x89\x4f\x5b\x09\x28\xef\x23\xce\x0f\x21\x8b\x28\x09\x5d\x1a\xf8\x91\x2b\x11\x8b\x68\x48\x58\x10\x00\xca\x98\x04\x91\x07\x30\x0c\x7c\x2c\x11\x87\x3c\xf0\x41\xe4\xb9\x3e\xf0\x50\x44\x20\x0c\x05\x91\x8c\x22\x78\x3d\x4e\x9c\xfe\x71\x7d\xa7\x55\xb0\x77\x42\x62\x9f\x1f\x0a\xc8\x00\xc1\x5c\xb0\x77\x2b\x9d\xe1\x8f\x1e\x4c\x24\x3c\x68\x4f\x9b\x7e\x62\x62\xae\x9a\x7d\x2f\x48\x35\xdc\x72\x62\x5d\x08\xd6\xd4\xdc\x44\x7e\x9a\xcf\x94\xf9\xaa\x5c\x61\xda\x60\x60\x94\x5d\x47\x56\x8d\xf9\x26\x81\x86\xbb\x81\xd7\x4f\x44\xb2\xaa\x8d\x57\xe1\x68\x92\x1f\x27\xf3\xe6\xb6\x66\x75\x98\x92\x68\x27\xea\x19\x31\x39\x26\x4e\x38\x27\x32\xea\xec\x54\x46\xa2\x61\xa3\xd7\x2a\x2d\xa6\x1a\x31\x7b\xf3\xdc\x74\x1d\xce\x82\xd1\xae\xc8\xb6\xeb\x91\x59\x5d\x05\xf3\x74\x23\x63\xf9\x4b\x3c\x31\x33\x8b\xc2\x50\x1f\x4e\xe6\xc8\x57\x36\xe3\x72\x3f\x39\xdd\x45\xa2\x3c\x2e\x96\x52\xdd\x94\x91\x55\x34\x26\x42\x5d\x6e\x2a\xdb\xc3\xc1\x47\xea\x8d\xc4\x09\x65\x79\x59\x99\xe0\x3f\x73\x42\x88\xfb\x08\xf3\x83\x70\xa3\x08\x00\xc4\x68\xe4\x49\x41\x20\x92\x00\x05\x91\x0c\xa8\x8f\x41\x08\x25\x71\x01\x74\x03\x2f\xe0\x12\x11\x09\xc2\x88\x0a\x81\x19\xe1\x22\xf0\x64\x20\x42\x0c\xe9\xf5\x62\xfb\xf4\x0f\x72\x75\x9b\x55\x08\x2c\xaf\xd7\x2d\x38\x3c\x3d\x9c\x8c\x73\x0c\x24\x81\x10\xbf\x5b\xe8\x0c\x7d\xf4\x5c\x82\xcc\x92\x8a\xda\x8a\x96\x4e\xb9\xda\x57\x31\x85\x7e\xa1\x94\xad\x8d\x53\x33\x0f\x52\xd3\x4c\xaf\x1a\x46\xb1\x1f\x16\x97\x41\xaf\x97\x69\x4c\xb8\xde\x4d\xf4\xd3\x29\x2d\xef\xd7\x9c\x41\x29\x59\x0f\xcd\xc9\x2a\xcf\x56\x74\x6c\x94\xdb\xbd\x28\xa5\xcb\x05\xaa\xcf\x9d\x1a\x58\x1d\xef\xf1\x3a\x4d\x5e\x28\xe6\x36\x5b\xa5\x30\xde\x16\x92\xed\x11\x95\x9d\x94\xb3\xcb\x6f\xbc\x1a\x12\x0d\xaf\xbb\xc1\x65\x7d\xcb\xe7\x40\xb0\xd5\xa4\xea\x90\x60\x1e\xcd\x3b\x10\x6c\x27\xe1\xcc\x1b\x1a\x51\xae\xe6\xae\xc7\x63\xcb\x29\x4e\x8b\xc9\x72\x0a\xf5\x72\x64\xba\x44\x0b\xa5\x99\x58\x18\x87\x98\x91\xd4\x1b\x79\x13\xca\xa2\x76\x21\x51\xff\x0e\x53\xe2\xd3\x75\xf6\x21\xbf\x8f\x38\x3f\xb0\x88\x78\xdc\x97\x94\xbb\x04\xb9\x18\x05\x81\xc0\x20\x20\x18\x41\x14\x50\xcf\x73\xa3\xd0\x93\x08\x41\x8a\x45\x48\x00\x04\x2e\x41\x98\x50\x8a\x31\x08\x02\x0f\x48\x0f\x79\xef\x79\x12\x57\xcf\xaf\x85\xe4\xf2\x7a\x59\xe5\x97\xa7\x1c\x03\x48\x08\x21\xe0\xdd\x3a\x67\xe8\xa3\x5b\xb0\x48\xe6\x23\xb3\xce\xca\xd3\xc2\x02\xa5\xd3\xb5\x42\x27\x5f\x5f\xb2\xe4\xac\xdf\xcd\xd8\xbd\xbe\x52\x9e\xa8\xc5\x4c\x6d\x97\x0e\xe6\xe6\x7c\xb6\xf5\x4b\xd8\x4c\xea\x13\xe4\x6b\xab\x19\xc1\x38\x6b\x85\x53\xdc\x5c\x98\xba\x55\x17\x89\x5d\x33\xd8\xe4\x2b\x95\xda\x06\xb2\x72\xe7\x31\x01\x7a\x3f\x25\x9a\x27\x9e\xc4\x54\x6f\x94\x32\x95\xa0\x91\x5e\x87\x39\xd2\x32\xb4\x94\x63\x3b\xa5\xfe\x6e\xdd\x45\x53\xd1\x35\xc6\x29\xb9\xc9\xec\x94\xd2\xd4\xaa\x26\xa3\xa8\x67\xe5\x5d\xad\x92\x4d\xe7\x67\x8e\xca\x17\x2d\xa7\x5e\xe1\x23\xdb\xb6\x6b\xdc\x1e\x01\x52\xf4\xb1\x5d\x9a\x43\xdc\x9e\xab\xf0\x30\x25\xde\x48\x9b\x50\xc6\xe3\xb7\xc4\xea\x6f\x3e\x25\x3e\xed\x49\xec\x05\xe4\x1e\xe2\xfc\x20\x5c\x24\x11\x46\x52\xa2\x80\x49\xcf\x63\x30\x22\x94\xfb\x9c\x09\x97\x13\xea\xb1\xc8\x43\x81\x80\x81\x1b\x78\x22\x60\x5c\x80\x88\x48\x06\x3c\x19\x86\x58\x20\x5f\xba\x01\x7d\xcf\x4a\x5c\x15\x7a\x09\x01\x25\x57\xf7\x9b\x5e\x9e\x32\xc1\xa1\x24\x9c\xf1\x77\xcb\x9c\xa1\x8f\xee\x37\x91\xf1\xc4\xd8\x0e\xab\x70\xb2\x44\x3a\xa8\x8b\x60\x3c\x01\x0d\x94\xd8\xa2\x14\x5a\x2c\x8b\x2d\x92\x06\x83\xf6\x32\x24\x24\x6f\xce\xea\x0e\x6c\x2b\xb5\x46\x2a\xbb\x2e\x97\xad\xc2\xd4\x61\x96\x25\x32\x99\xed\x64\xd2\xd7\xc7\xf3\x5c\x85\xd8\x69\xb8\x9e\x3b\x41\x32\x69\x24\x8e\x65\x93\x4f\x73\xa1\x9d\xdc\x78\xc9\x53\x7d\x6b\x51\x5e\x0c\xd3\xa4\x44\x49\xd2\xb6\xc7\xb5\x76\x4a\xa9\xd7\xd4\xa1\x3a\x48\x96\x12\xd2\x6e\x4f\x51\xa7\xa8\xef\x06\x39\x47\x96\xdb\x54\x55\x8c\x4d\x50\x69\x7a\xf3\x64\xd3\xaf\x8f\x34\x0f\x9b\x8e\x19\x25\xd3\x7d\x99\x03\xc5\xc2\xb8\x41\x4b\xc2\x3e\x4c\x89\x37\xb2\x26\x94\xc1\x65\x44\xc7\xe1\xf3\x37\x9f\x12\x9f\xb7\x12\xf4\x3e\xe2\xfc\xe0\x32\xe2\x46\xd0\x07\x22\xf2\x68\x80\x84\xeb\xf9\x01\x8c\x7c\x19\xb8\xa1\xf0\x7d\x0f\x7a\x91\x2f\x3d\x2f\xf4\xa1\xc7\x3d\xc2\x79\x08\x49\xe4\xbb\xc8\xf7\xdc\xc0\x77\x89\x04\xfc\xbd\xac\x89\x77\x84\x1e\x01\xf6\xce\x84\x79\x7e\xca\x08\x41\x84\x02\x49\xde\xad\x72\x86\x3e\xea\x5c\xb3\x56\x4e\xf8\xaa\xb5\x08\x72\xb9\x55\x51\x17\x23\x97\x15\xb6\x9b\x95\x18\x72\x9e\x5a\xf0\x0a\x26\xb5\x64\x2e\x9f\x9f\xf9\x13\x33\x25\x02\xd4\x0e\x5c\xe9\x36\x7b\x95\x0d\x18\xba\x89\x65\x96\x1a\xa3\xb4\xa5\x69\x0d\x0f\xf3\xa8\xd7\xda\x16\xcb\x5b\x6b\x6d\x75\xcb\xda\xea\xc4\x4a\x9c\x26\x61\xcf\x7b\xb5\x41\xc3\xdc\x35\x77\xcd\x74\x56\x5b\xf2\x56\x53\x14\x0b\x24\x8b\x3b\xcd\x82\xd9\xb6\xa7\x43\x38\x26\xd3\x95\x56\x88\x3c\x96\xd5\xd5\x4a\xbf\xde\x1d\xa5\xfc\x49\xd8\x5e\x53\xaf\xb3\x72\x0a\x93\xb5\x89\xbc\x60\xaf\xd1\xf0\x00\x7a\xd5\x94\x55\xaf\xa5\x7b\xeb\xd9\x41\xf0\xdf\x48\x9a\x50\xda\x6f\xde\xb6\xf5\x37\x9f\x12\x9f\xf7\x25\xc8\x7d\xc4\xf9\x01\xf8\x90\xb8\x84\xd1\x88\x01\x00\x20\x06\xd8\xa5\x8c\x45\xa1\x0f\x69\x84\x7c\xcf\x0f\x5c\xc4\x5d\xcf\xc7\x24\x94\xbe\xf0\x28\xf4\x84\x90\x94\xb8\x28\x92\x1e\x06\x11\xf3\xae\xdf\x38\x41\xff\x20\x57\x0f\xaf\x25\xc6\x50\x5e\xdd\x8d\x7a\x79\xca\x00\x44\x92\x12\x04\xdf\x2d\x72\x86\x3e\xea\x5e\xaf\xd7\xb5\x49\xb6\x89\x14\x45\x5b\x97\x06\xad\x15\x4f\x35\xb7\x5b\x81\xba\xe1\x6c\x0d\x98\xdf\x85\x59\xd2\xad\x57\xbb\x09\xaf\x60\xb9\x9b\x15\xf5\x17\x4e\xe0\x17\xaa\xda\x50\xe0\x79\x75\x35\xee\x4f\x30\x52\x61\x35\xa9\x18\x6d\xcf\xf3\x17\xad\x86\xdb\x58\xd5\x36\x45\xa4\x1c\x73\x26\x9a\x27\x43\x3a\x10\x61\xc1\x5b\xe5\xcd\x45\x29\x35\xb6\xd5\xd5\x78\x36\xd0\x72\x9b\x12\x6b\x4f\x46\x73\x35\x9c\xf7\x46\x09\xa8\xc2\xa4\x9e\x05\x2b\x38\x0b\x10\x1f\x97\x78\xa2\xde\xa8\x6a\x59\x31\x13\x91\x5f\xca\x38\x39\xd1\xc7\xad\x60\x32\xa8\x79\x3b\x23\x88\x8c\x4d\xbf\xe8\xa6\xdc\xdc\x61\x16\xbc\x91\x33\xa1\x44\x97\x67\xd7\x87\xcf\xdf\x7c\x4a\x7c\x7e\xe1\x84\xef\x23\xce\x0f\x00\xcb\x40\x62\x80\x7d\x04\x7c\xe9\x52\x01\x04\x8a\x20\x8d\x88\xe0\x34\x14\xe4\x50\x76\x36\x24\x81\x8f\x40\xe8\x22\xcc\x05\x63\x3e\x95\x01\x95\xdc\xf7\x31\x91\x2e\x0c\xde\x99\x12\xd7\x23\x36\x24\x11\xf2\xaa\x77\xfd\xfc\x90\x52\x81\x09\xe5\x44\xbc\x5b\xe3\x0c\x7e\x34\x61\x62\x5d\x0a\xfa\x6b\x4b\x4d\x9b\x13\x47\x55\x45\x5d\x1a\x41\xd8\x29\xe8\xeb\x5d\x1f\xab\xe9\xa9\x81\x3b\xc3\x4d\x0b\xb6\xa2\x42\xd1\x1c\x07\x99\x68\x15\xec\x16\x9b\x99\xa3\x5b\x72\xb3\x0b\xa6\x5e\x52\x4e\xfc\xdc\xb8\x63\x34\xbb\xf6\x68\xd1\xd8\x25\xaa\x1b\x0b\x8d\xb7\x34\x85\x8f\x95\xf5\x1b\x27\x9e\x44\xa6\xdf\xa8\x15\xb7\x99\xc4\x9a\xf1\x0a\x1d\x25\xf4\x76\xba\x18\x7a\x03\x3a\x1f\xd5\x91\x33\x48\xcd\x77\x78\x93\xb7\xa7\xa9\x42\x79\x5e\xd6\x16\x89\x6c\xa3\x20\x6b\x0b\x2d\x98\xf5\x9d\xe1\xd0\xa0\x75\xdd\x19\x2f\x6d\xbc\xb3\x06\xce\x24\xaf\x96\xb6\x83\x69\x6f\x8a\xfc\x6e\xe5\x10\xcc\xa1\xbc\x91\x30\xa1\xf8\x97\xc7\x74\x87\xcf\x7f\xdc\x84\x40\xf7\x11\xe6\x07\x1a\xd2\x50\x48\xec\x81\x48\x02\x2e\x5c\xce\x19\xc0\x11\x03\x4c\x48\x02\x68\x80\x48\x80\x08\x22\x3c\xd8\x7b\xeb\x1e\x00\x91\x80\x34\x40\x10\x79\x61\xe4\x09\x28\x19\x7d\xaf\x28\x01\xbe\xba\xc7\x2a\x19\x80\xfc\xfa\x74\x79\x7e\x4a\x21\xc5\x92\x01\x4e\xdf\x2d\x71\x06\x3f\xea\x5c\x43\x1a\x76\xed\xb9\xd5\xd7\xbb\xa3\xa0\x6d\xa1\xd4\x78\xa0\x34\xab\x76\xb5\xdc\x8f\xca\xa4\x35\x57\x12\xa9\x66\x55\x9b\x71\xde\x6e\xb5\x1a\x99\xaa\xbe\xf5\xbb\x8a\x15\x1a\x43\xa0\xb8\x61\x67\x9a\x68\xce\x2a\x5e\x94\x8a\x90\xd3\x58\xa0\xf1\x70\x17\x95\x47\x85\xea\xa8\xe0\x4e\x8e\x9e\x44\xe3\x64\x3d\xef\x13\x0b\x8f\x13\x29\xb3\x6b\xae\x9a\x4d\x23\xea\x25\xc3\x7a\xd9\xca\xc0\x4a\xc5\x98\x8d\x34\x34\x8a\x9a\xc0\x25\x26\x6f\x64\xb6\xd5\x59\xda\xde\xd5\xa7\x6e\x3e\x11\xac\xb3\xb3\x05\xec\xa9\xd5\x69\x8b\x07\x7d\x2b\xac\xa5\xa5\x70\xab\xb9\x7c\xab\x08\x83\xb2\xee\x8c\xcb\x9d\xc3\x2c\x78\x23\x5f\x42\x69\xbd\x29\x56\x7f\xf3\x29\xf1\xf9\x65\x13\xbc\x8f\x38\x3f\x30\xe4\x06\xc4\xc3\xae\xeb\xf9\xc8\x95\x38\x3c\xec\x79\x0a\xc9\x44\xc8\x20\x24\x01\x0c\x21\x75\x23\x18\x44\x1e\xa7\x38\xa2\x1e\x07\x44\xf8\x00\x7a\x21\x8d\x3c\x1c\x4a\xef\xbd\x60\x0e\x7c\xf5\xe4\x5a\x72\xc8\x3e\xf0\x94\x70\x44\x08\x23\x00\xbd\x5b\xe2\x0c\x7e\x34\xbe\x09\xe5\x16\xd5\xdd\x74\xb6\x9e\xf6\x13\x6a\x4d\x73\x06\x63\xda\xc9\x14\xf5\x4e\x63\xa5\x47\x0b\x68\xc1\xc5\xba\x5a\x50\x3c\x1a\xcd\x13\x43\xca\x79\x21\x43\x3d\x68\x84\x5d\xbc\xd3\x33\x52\x53\x86\xf3\xbc\x2d\xc8\x80\x3b\x9e\x52\xab\xea\x49\x9e\x49\xae\xb7\xc3\xca\x54\x8a\xe3\x95\xf1\x8d\x13\x29\x9d\x18\x9b\x89\x6b\x5b\x4a\xd1\xac\xe2\x66\xb4\xc8\x36\x33\x55\x67\xa6\x99\xa0\xd0\x89\x8a\x2c\x5b\x84\xa5\x90\x26\x1c\xcd\x62\x40\x26\x16\x70\x48\xd4\xb4\xeb\x79\x99\xba\xba\xde\xb8\x36\xac\x1b\x2b\x95\x34\xd4\x24\x1b\xb1\xe1\x6c\x56\xaa\xae\xc6\xc9\x92\x47\x87\xfd\x43\x6d\x58\xe5\xad\x74\x89\xe6\xe5\x31\xdd\xe1\xf3\x1f\x37\x25\xc0\x7d\xc4\xf9\x81\xc9\x08\xf1\x90\x23\x4f\xf2\x88\xfb\x1e\xf7\xa2\x60\xff\x43\x00\x00\x44\x1e\xf7\x5c\x20\x40\x80\x83\xd0\x87\x02\x32\x21\x04\x11\x0c\x31\x0f\x86\xd0\x73\x03\x44\x43\x97\x3e\x12\xff\xf6\xb2\xe9\xba\xaf\x20\xc0\x53\x56\xdf\xbb\x4f\x09\x92\x44\x32\x8e\xe4\xbb\x15\xce\xe0\x47\xf7\x9b\xd0\x68\x1b\x8d\x72\x34\xc2\x5b\xbc\x92\x11\xde\x95\x95\xea\x82\x8f\x8d\xb4\x8d\xe7\xd9\x94\x6b\x74\x0c\x67\x38\x0f\xb3\x0c\x0c\xd8\x60\x22\x6b\x94\x55\xaa\x63\xbf\x42\x5a\x93\x66\xb3\x35\x4f\xb6\x07\xfe\x28\x5b\xd7\xb5\x6a\x3d\x93\xa9\xb1\x7c\x37\x27\xa3\x75\x98\xcf\x3b\xc7\xaa\x7f\x8d\x93\x21\x95\x89\x5e\x6f\xde\xc8\xb1\x15\x6b\x83\x92\x53\xc8\x6c\xec\x31\x4c\x38\xe3\x76\x17\x6d\xd3\xd5\x5e\xb3\x63\x0a\x56\x84\xca\xc6\x2c\xad\x34\x1f\x0e\x36\x45\x34\x98\x18\xa2\x1a\xb6\xb3\x0e\x5b\xac\x14\x77\x3e\x59\x92\x40\xdf\x0c\xf3\xf5\x60\xa6\xf5\x7b\xd5\x81\xe1\xac\xa2\xc7\xaa\x82\x6f\x65\x4b\xd4\x2f\x0f\xea\x0e\x9f\xff\xb4\x29\x21\xef\x23\xcd\x0f\xcc\x8d\x02\x88\xb1\x1b\x71\x1c\x45\x5e\xc0\xc3\x80\x32\x1f\x31\x1a\xb0\x00\x7b\x3c\xc4\x9c\x78\x20\x80\x1c\x50\xe8\x07\xd0\x8f\x44\x28\xc3\xc0\xe3\xcc\x0b\x23\x3f\xf0\x08\xf7\xde\x33\x12\xd7\x7d\x05\x21\xc5\x7b\x33\xe2\xe9\x29\x16\x8c\x12\x0e\x28\x7b\xb7\xbe\x19\xfc\x68\xdd\x1a\x34\x93\xda\x6e\x86\xad\xe4\xaa\xc9\xeb\x9e\xda\x0a\x3c\x7b\x91\x28\x82\x79\x31\xb3\xae\xf9\xc0\x4f\x94\x46\x8b\x05\x5a\xad\x76\x5e\x85\x1b\xd0\x2d\xd5\x12\xfa\x62\xb7\x82\xce\x76\x5c\x6a\x24\x8a\xb5\xe6\x78\xde\xd3\xd6\xac\xd2\xca\x74\xd6\x8b\x12\xb4\x0a\x23\xa5\xa2\x2c\x96\xc7\x19\x51\x3f\xcd\xbd\x6e\x96\xf3\x8d\x65\x1b\x19\x2d\x94\x25\x95\xd1\xba\x36\x53\xed\x95\xb1\x2e\x31\xbb\x30\x59\x22\x98\xa8\xa2\x5a\xb3\x33\xcd\xe7\x1a\xb6\x9e\x55\xd6\x4d\xba\x05\x99\x7a\x4d\xeb\xa5\x03\xea\xa4\xbb\x1e\x5f\x77\x92\x5e\xb1\xba\x69\x31\xc0\x9c\x02\x48\x53\x4b\xae\xba\xed\xea\x61\x46\xbc\x95\x2b\x51\xbd\x2c\x20\x7e\xf8\xfc\xa7\xcd\x08\x71\x1f\x69\x7e\x00\xbe\xe7\xd2\x80\x11\xc1\x3c\x20\x80\x8f\x80\x24\x24\x10\x21\x02\x5c\x04\x2e\xa0\x82\x92\xd0\x23\x01\x13\x1e\xf0\x09\x71\xb9\xcb\x41\x04\x58\x10\x79\x1e\x12\x3e\x10\x87\x6b\xb8\xc8\xe1\xdf\x1b\xc2\x7d\xf5\xe4\x5a\x4a\x49\xaf\xdf\xbd\xf2\xf2\x14\x13\x4c\x25\xc7\x02\xbf\x5b\xdd\x0c\x7e\x74\x03\x76\xbd\x65\xcd\x46\x35\x3b\x37\xd2\xaa\x5a\xb3\x1d\xd2\x0e\x8b\xe3\x4c\xa7\xc6\x00\x1c\xcf\x0b\x95\x5c\x38\x4a\xaf\xbd\x68\xb9\xed\x29\x6d\xb3\x95\x6c\xd7\xf5\x0e\xd1\xb2\xe3\x4a\xb3\xb7\x49\x0d\xcc\x66\x3b\xf0\xd8\x98\xd2\x90\xe6\xad\x94\x48\xce\x8c\x45\x33\xd1\x4d\x15\x86\xed\xe3\x35\x13\xf5\x13\x4f\x82\x46\x1d\x90\xae\xb4\x12\xa5\x28\x54\x48\x57\xc7\x59\xbb\xe0\x65\x99\x3f\x35\xb3\xda\x06\x4c\x55\xca\x11\x64\xba\x67\xaf\xcb\x56\x0d\x74\x76\xa5\x32\x58\x68\xeb\x71\x00\x1a\xb9\x6c\x9a\xf6\x0b\x2c\xbf\xae\xeb\xf9\x9c\x67\x94\x76\xa9\xa1\x97\x94\x55\x63\xad\x8d\x49\xea\x20\xf7\x6f\x65\x4a\x54\x2e\x0b\x88\x1f\x3e\x7f\xf3\x19\x41\x3e\x3b\x23\xf8\x7d\xa4\xf9\x21\xa4\xcc\x93\x58\xa2\x08\x48\x22\x51\x44\x80\x20\x5c\x48\x97\x45\x3c\x0c\x5d\x8c\x38\x09\x98\x94\xd0\x97\x50\x04\x92\x02\xc9\x00\x90\xa1\x14\x34\xf2\x42\xe6\x83\x00\xbc\xeb\x48\x5c\xb1\x02\xe4\x9f\x00\x42\x22\xae\x78\xde\x27\x4f\x31\x00\x8c\x70\x0e\xc1\xbb\xb5\xcd\x3e\x9a\x4e\x97\xa8\xcf\xfa\x34\xbf\x1e\x98\x79\xa0\x6a\xa1\x39\x59\x45\xcb\xf1\x36\x28\xba\x2d\xdd\xcc\x16\x07\xe5\x41\x5b\x0d\x87\x99\x44\xc2\x53\x80\x39\xab\xa1\xd4\x88\xb8\x76\xcf\x29\xe4\x27\xe9\x28\x8a\x8c\x64\xc8\xf4\x34\xce\x56\x2b\xaa\x66\x8e\x89\x32\x73\x16\x5d\x49\x4a\x8d\xe9\x31\x94\xa3\x7e\x9a\x76\x4d\xf5\x52\x79\x82\x6d\xd5\xd8\x96\x6a\x3d\x51\x1f\x16\xb3\xb3\xce\x1c\xa9\x09\xa3\xd9\x93\x04\x4d\x46\x78\x6a\xd5\x0d\xa5\xd9\xdc\x30\x07\x73\x31\xb3\xba\xf3\x72\xd2\x6a\x54\xf2\xee\x6e\x53\x50\x07\xf9\xb5\xc8\xa3\x5a\xdf\x08\xe4\x98\x91\xd0\x98\x0c\xdc\x7a\x5f\x3c\x5e\xb7\xfd\x56\x9e\x44\xee\x32\xc5\xf4\xf0\xf9\x9b\x4f\x88\x4f\x9b\x08\x76\x1f\x61\x7e\x88\x68\x24\x7d\x18\xf9\x1c\x00\xca\x61\xe0\xd2\x20\x02\x01\x0b\x41\xe8\x87\x81\xeb\x71\x11\x85\x02\x86\x2c\x62\x0c\x50\x12\x12\x4c\x51\x24\xb9\x8b\x23\xe0\x47\x38\xa0\xd1\xbb\x19\xa6\xf8\xca\x29\x1c\xf9\x27\x40\xf0\xfa\x74\x78\x7c\x86\x28\x67\x52\x00\xcc\xdf\x2d\x6a\xf6\xe1\xba\xe1\x4d\xba\x8e\x36\xeb\x54\x53\x77\x9b\x51\xba\x41\x4a\xbb\xc5\x14\xf7\x12\xed\xd0\x43\x4e\xc6\xee\x17\xab\x89\xe6\x38\x99\xb6\xac\xba\xc0\xc5\xad\xe4\xfd\x24\xc8\x20\xa5\xa1\x23\xcb\x62\xbd\xd4\x66\x1c\x18\xcd\x42\xbb\x37\xac\xe4\xcd\x75\x27\xbd\x28\x89\x45\xb9\xd5\x8e\xc4\x71\xeb\xb5\x7e\x32\x98\x45\x51\x46\xca\x42\x55\xe7\xe1\x80\xe8\xc6\x3c\x47\x96\xc1\xb4\xb9\xcd\x97\xa7\xfe\xcc\xcd\xf6\x7c\x45\xaf\x57\x6b\x45\x01\x69\x7b\x96\x59\x37\xfd\xd1\x52\xaf\xc1\x65\x6d\x92\x9d\xda\xb5\x85\x67\x24\xb7\x2d\x0b\x6e\x9d\x55\x59\x1b\x75\x95\x65\x21\x42\x9d\xee\xba\xeb\x2e\x0e\xf5\x60\x95\xb7\x32\x24\xcc\xcb\x7a\xb0\x87\xcf\x7f\xda\x64\xa0\xf7\x11\xe4\x07\x1f\x31\x00\x25\x84\x28\xe4\x01\x74\x31\xe0\x1e\xf7\x98\x2b\x08\x76\x03\xca\xa2\x80\x63\x8f\xc2\xc8\x0b\x7c\x14\x32\x4c\xa3\x90\x7b\x20\xf2\x7d\xe1\xb9\x21\x61\xa1\x24\x90\xbf\x13\xc3\x81\xaf\x1c\x58\x93\x7f\x02\x4c\xdf\x31\x0e\xfb\x87\x87\x53\x3f\x48\x38\x11\x98\x91\x77\x0b\x9a\x7d\x74\x93\x49\x98\xed\x6c\x45\x5b\x46\x95\xf1\x3a\xc4\x8b\x69\x27\x3d\xab\x7b\xd2\xe7\xb5\x68\xe5\xe7\x87\xc5\x88\xf2\xb4\xd8\xe6\x67\x7d\x6f\x52\x6e\x6e\x77\xb2\xeb\xa0\x99\xde\x59\x2f\x6b\xab\x52\x66\x99\xd7\xd8\x30\xb5\xb2\x4b\x84\x75\x8c\x44\x73\x93\x43\x26\xf5\xe1\x62\x99\x9f\x9f\x5c\x06\x5f\x3b\x59\xc5\xf7\xd9\xaa\x94\xcc\x55\x36\xeb\x16\xd8\x8e\xcc\x42\x61\x96\xab\x4d\x4a\xd5\x88\x36\x3a\x5e\xd0\x2f\xf6\x41\xde\x28\x25\x77\xad\x4d\x2a\x31\x0d\x9b\xdc\x75\xb2\x60\x87\x9b\xc3\xcc\x7a\x33\x9c\x61\x62\xcd\x36\x9d\xfa\x56\x54\x2b\xbb\x41\xad\x90\xed\x75\xec\x7c\x23\xc4\xdd\xc4\xe0\x51\xaa\xdf\x4a\x8f\xd0\xff\x1d\x8f\xab\x3f\x1d\xc1\x41\xee\x23\xcb\x0f\x98\xc0\x28\x92\x3e\x91\xc2\x93\x80\xb3\x48\x44\x08\xba\xa1\xcb\x85\x8b\xb1\xe7\x03\x9f\x61\xe9\x7a\xd4\x8f\xa4\x60\x6e\xe0\xf9\x28\x10\x01\x20\x81\x44\x1e\x75\x65\xc0\x82\xf7\xca\xd4\xe0\x2b\xa7\xd5\xe4\x9f\x80\x22\x7e\xed\x6c\xee\xe4\xe9\xc9\x3d\x75\xef\x9f\x56\x7f\x68\x42\xec\xca\x51\xc2\xaf\x3a\xcb\x72\xb0\xea\x6d\xb3\xcb\x09\x19\xef\x56\xca\xa6\x37\x9c\x35\xad\x61\x76\xe6\xad\x17\xbd\xb2\xdd\xca\xc1\x69\xe0\x45\xd5\x44\x33\x8c\x48\xa6\xd9\x34\x6c\xd4\xf7\x61\xd3\x8f\x76\xa3\x12\x54\x1c\x4b\x9a\xc5\x65\x5a\x35\xa0\x99\x69\xea\x93\x51\x4d\x1a\x47\x03\x51\x3b\x71\x1f\x0c\x91\x09\x75\x6d\x67\x82\x24\xc9\x94\x44\xb2\x98\x4d\x12\xbc\xb6\xb5\xd1\x16\xaa\x8d\xe4\xca\xd7\x51\xcf\x9f\x95\xfb\x3b\xcf\xf3\x7b\x89\x4c\xc4\xd2\x51\x7b\x44\x32\x65\x3b\x6b\x36\x07\x78\x9e\xd8\xa4\xa6\xdb\xd4\x28\xb1\x5c\x69\x96\xe6\xac\xa7\x33\xcb\x9e\x59\x5a\xf2\xd1\x00\xbd\x95\x1c\x91\xfe\x77\x3c\x9b\xfb\x7c\xd1\xcb\xfb\x08\xf3\x43\x14\x48\xc4\x44\x00\x59\xc0\x29\x80\x1e\x0b\x3d\x2e\x29\xf3\x18\xa7\xcc\x43\x22\x0a\x09\xf6\x43\xea\x32\x00\x81\xc7\x59\xc8\x39\x0b\x24\x8c\x18\x08\x69\x18\x06\xdc\x7b\xe7\xfa\x21\xfa\x07\xba\x72\xf8\x46\xfe\x09\x18\x85\xfc\x8a\xbb\xfd\xf4\x94\x9e\xd7\xbc\x7c\xcf\xa1\xfe\xe8\x8a\x09\x55\xdd\x62\x66\x11\x94\xb3\x88\xe1\x41\x63\x31\xaa\xcf\x72\x16\x4d\xef\x06\x93\xb6\x96\x0f\x96\x01\x42\x15\xd9\xa6\x21\x6f\xce\xa7\xe9\x56\xb6\x9c\xd2\xa0\x33\x1f\x2f\x8c\x59\xa5\xd9\x5f\xf3\x0d\x4b\xfa\x39\x2b\x40\xbb\x92\x9a\x5a\xee\xb8\x8e\x2a\x83\xca\xb8\x9a\x75\x8e\x01\x4d\xb5\x13\x19\xa5\xd6\x68\xa5\xad\x0a\x25\x4d\xf4\xb7\xe9\x39\xce\x7a\x4b\xd8\x19\x2f\x85\x3f\x1c\xaf\xd7\xd0\xe0\x9b\xa9\x93\x2d\x79\x0b\xd0\x1a\x83\x71\x4b\xc9\xa5\x69\x0d\x56\x36\xc5\xad\x18\xaf\x7a\x09\x52\xea\x15\xc8\xcc\x48\x01\x2b\xa9\x62\xbf\x9c\xe8\xa8\x79\x65\x56\x20\xb2\x7d\xa8\xda\xa4\xbc\x95\x1c\xf1\xfa\x2e\x53\xe5\x3f\x70\x42\xa0\xfb\x08\xf3\x03\xc3\x81\x14\x11\xc5\x2c\x0c\xf7\x50\x1e\xe2\x1e\xf5\x84\x8c\x10\x76\x23\x8a\x21\xf4\x38\x65\xd2\x45\x24\x72\x23\x48\x00\x76\x03\xe0\x51\xe4\x31\x8c\x3d\xc0\xbd\x50\xee\x65\x1e\x1c\xfe\xbd\x31\x21\xae\x2f\x8a\x38\x43\xd7\xee\xe3\x3a\x79\x7a\xcc\x9f\x7b\x67\x3e\x7c\x70\xbf\xd5\x6a\xf5\x60\x71\x49\xc7\xc0\x33\x79\x83\x8c\xb6\xa5\x55\x6d\x93\xc5\xf5\xc9\xb8\x9f\x58\x65\x94\xd2\x42\x85\x39\x54\xe0\x29\xce\x5a\xcb\x56\x45\x9f\xc9\x5a\xd0\xcc\x79\x76\xae\xeb\x0f\xa2\x52\x6f\xcb\x91\xd3\xca\x98\xb5\xbe\x69\xd5\xfd\x9c\x4d\x3b\x8b\xd5\x78\x36\x84\xc7\x43\xb9\xc7\x95\x93\xf1\xf2\x9f\xf2\x78\x88\x76\x6c\xaf\x15\xab\xfc\x24\xb7\x78\x27\x3b\x43\xa5\x3a\xc9\xcb\x85\x52\xdf\xf4\x17\x9b\x34\x6e\x56\x4b\x13\xdc\x5d\x6c\xaa\x2b\x6d\x58\x60\x4a\xad\xbf\x4e\x55\x89\x56\x19\xad\x12\xf9\xc5\x22\xb9\xf5\x77\xc9\x8d\x48\x26\x06\x99\x94\x00\xb3\x51\x7d\xc3\x7b\xa8\x30\x1b\x22\xa9\x4d\x96\xf3\x5a\x25\xd1\x5e\xc9\xc7\xd3\xea\xb7\xf2\x26\xde\xfe\xfc\xcd\x67\x0b\xf8\xf4\xa1\xdd\x7d\x24\x7d\xff\xc6\x17\x79\x87\x92\x83\x7f\x00\xf8\x0f\x00\x7f\x00\xf0\xc7\xe1\xdf\x55\x89\x16\x88\xb1\xeb\xb3\xe1\xf9\xe9\xf1\xd4\xed\xd3\xf2\xfe\x48\xd2\x5f\x3d\x34\xd7\x3f\xa9\x66\xae\x4b\xb6\xc9\x6d\x35\x97\xe2\xe9\x51\x5a\xea\x08\x6c\x7a\xa9\xc4\x1c\xb4\x17\xf3\xb5\xb1\xde\xc1\x66\x50\x6d\x38\x6e\xca\x74\x33\x87\x95\xd6\x5b\xf9\x0e\x6f\x7f\x9e\x45\xf9\x9a\x61\xf8\x95\x3f\x97\xa2\xfc\x5f\xff\xf5\x8f\x7f\xfc\xd7\x3f\xfe\xf1\x23\xed\x2e\xdc\x1f\xd1\x78\xf6\xa3\xe8\x0e\xc3\x3f\x5e\xa4\x7a\x3c\x09\x67\xee\xa2\x3b\x1e\xfd\x39\x71\x67\x8b\xae\xdf\x9d\xb8\xa3\xc5\xfc\xff\xfe\xb0\xb7\x93\xf0\x8f\x1f\xb6\x92\xca\x6b\x3f\xd2\x8a\xad\xfc\xdf\x1f\x55\xbf\x13\x0e\xdd\x3f\x7e\x4c\x96\xde\xa0\xeb\xff\xdf\x1f\xa5\xf5\x28\x9c\xfd\xf1\x63\x0f\xfe\x5f\x6f\x4e\x98\xb7\xa1\x4f\xe7\x0f\x62\x50\x4a\x0c\x80\x84\x14\xff\xfe\x03\x5e\x99\x78\x3f\xc1\x41\x2f\xfb\x42\x04\x0a\xca\x63\xe3\xe0\x3d\x0e\x26\x00\x03\x4e\x28\x83\xb1\x71\xc8\xef\x3f\x10\x91\x10\x08\x08\x38\x62\x34\x36\x0e\xdd\xe3\x10\x01\x31\xc4\x52\x32\x19\x1b\x87\x5d\xe2\xa0\x78\x38\xfc\x90\x0c\x42\xa1\x80\x1c\x31\x8e\x7f\xff\x81\xe3\xe1\x88\xdf\x7f\x20\xcc\x10\xc2\x08\x50\xcc\x79\x6c\x7a\xe4\x25\x4e\x4c\x7a\xe0\xde\x23\xc4\x50\x22\x81\xb0\x00\x02\xc6\x26\x68\xbf\x32\x38\x07\x22\x31\x81\xd0\x19\x10\x8a\x0f\x84\x2f\x81\xe2\x76\x6d\x2f\xd4\x88\x33\x8c\x11\x07\x5c\xd0\xf8\x40\xf4\x12\x28\x6e\xd7\xf6\x62\x8d\x30\xc6\x02\x03\x4c\x84\xfc\xfd\x07\x8d\x09\xb4\x97\x40\x28\x01\xc1\x18\x33\x28\x71\x7c\x20\x71\xdc\x90\x13\x42\xf2\xf8\x40\xf2\x90\x63\x42\xf6\xca\x08\x32\x00\x63\x03\x21\x70\x04\xa2\xec\x06\x8a\x10\x3c\x02\x49\x7e\x03\x8f\xd0\x5e\x04\x01\x83\x54\x10\x4c\x30\x88\x2f\x47\x08\x5f\x02\xc5\xa5\x68\x2f\xd9\x00\x0a\x86\x09\xe3\x00\xc4\x57\x8f\x68\xaf\xe9\x25\xa7\x4c\x10\x29\xf9\x7e\xe6\xb1\x98\x40\xec\x19\x88\x02\x2c\x6e\xa1\x68\x6f\x0b\x25\x46\x1c\x53\x8c\xc8\x5e\xce\xe3\x52\x24\x2e\x81\xe2\x52\xb4\xb7\x62\xfb\xb9\x21\x28\xa3\x10\xc5\x57\xb5\x18\x5c\x02\xf1\x98\x40\x7b\x3b\x2f\x08\x13\x98\x4a\x2e\xf6\x63\x18\x17\x08\x5d\x02\xc5\xed\x1a\x3e\x64\x0f\x60\x21\x18\x06\x74\xcf\x31\x11\x13\x88\x5c\x02\xc9\x98\x40\x7b\xc9\xe6\x0c\x48\xcc\x18\x46\x7b\xfa\xe2\x52\xc4\x2e\x81\xe2\x52\xc4\x8f\x40\x1c\xdf\xc2\x23\x71\x09\x14\x97\x22\x79\xdc\x7c\xa6\x72\x4f\x5f\x4c\xa0\x43\xe6\xe1\x01\x88\x03\x00\x6e\xe0\x11\xd9\x4b\x36\xe3\x14\x60\x8e\x04\xdb\xb7\x62\xf2\x88\xa0\x97\x9b\xfd\x19\xc4\x24\xbe\x64\x13\x7c\x09\x14\xb7\x6b\xe4\x08\x44\x09\x89\xaf\x46\xf6\x34\x9c\x03\xc5\xe5\x11\x7b\xb9\xe9\x5d\x12\x40\xf6\xc2\x00\x62\x22\xf1\xe3\x05\xd9\x8c\x1f\xdc\xa2\xb8\x48\xe2\x78\xad\xb0\x24\x07\xc7\x28\x2e\x92\x7c\xb9\x8c\x55\x22\x78\x70\x8d\x62\x22\x51\x70\xbc\xc2\x92\x08\x16\x5f\x96\x28\xbc\x04\x8a\x4d\x12\x3a\xde\x20\xc8\xe9\xc1\xcf\x8a\xeb\xb0\xe1\x17\x24\x06\xd9\x81\xa6\xb8\x48\xe4\xe5\x06\x37\x09\xd0\xde\xd7\x8a\x8d\x44\x5f\xce\x00\x10\x92\x7b\x2f\x29\x36\x12\x3b\xde\x16\x44\xd9\xde\xbb\x89\x8d\xc4\x8f\x77\xac\x08\x2c\x6e\xe1\x93\x78\x41\xda\x8b\xc1\x2d\x34\xc9\x23\x12\xa3\xb7\xf0\x89\x81\x67\x24\x08\x00\xbb\x65\xec\x0e\x13\xed\x09\x89\xf0\x5b\x24\x93\xa1\x23\x92\x10\xb7\x48\x26\xc3\x2f\x48\x10\x49\x76\x03\xc7\x0f\x31\xcf\x8f\xb7\x8a\x20\x08\x44\xfc\x65\xee\x63\x77\x4e\x81\x62\x93\xc4\x5e\x2e\x75\xa0\xf8\xe0\xe6\xc0\xb8\x34\xf1\x23\x12\x17\x07\x9a\xe2\x22\x89\x63\x51\x7d\x46\x64\xfc\xb5\xf7\x41\x7e\xce\x80\xe2\x92\xc4\xc1\xb1\xa6\xb9\x44\xe0\xa0\x89\x63\x22\xc1\xe7\x4a\xd0\x14\xca\xbd\x13\x07\x63\xee\xe1\x70\xf4\x52\x3f\x97\xb0\xbd\xcb\x14\x1b\x08\xbf\x54\x1d\xe5\xf8\xb0\x7d\x12\x13\x87\x5c\xe0\xc4\x26\x88\x3e\x03\x21\x48\x6e\x70\x06\x39\xbb\xc0\x89\x3d\x66\xfc\xf9\x10\x8d\x02\xb0\xf7\x05\x61\x4c\xaf\x92\x8b\x4b\xa0\x98\xae\x17\x97\xcf\x55\x00\x05\xe2\x08\xc6\xa7\x48\x80\x4b\xa0\x98\x14\xed\x2d\xd1\x53\x11\x36\x4a\x0e\x1b\x03\x31\x77\x27\x04\xba\x04\x8a\xdb\x35\xfc\x02\x24\xe9\xa1\x6b\x71\x29\x22\xcf\x40\x08\xb3\xc3\xa8\xc5\x05\xa2\x2f\x55\xb9\x04\x3c\x6c\x0c\xc4\xed\x1a\x7b\x06\xa2\x08\x1d\x78\x14\x77\xd4\xf8\x73\x51\x24\x01\x04\x8e\x3f\xf7\x85\xb8\xc0\x89\xdd\x33\xf9\x02\x44\x24\x8a\x3f\xf7\x25\xb8\xc0\x89\xcb\x21\x09\x5f\x80\x24\xb8\x61\x8f\x43\xa2\x0b\x9c\xb8\x42\xb4\xb7\xd1\x8f\x45\x7b\x20\xa6\x87\x5d\x81\x98\x2e\xa5\x24\x97\x40\x31\x3d\x41\x49\x5f\x6a\xa6\x30\x74\xf0\xc1\xe3\x52\xc4\x2e\x81\xe2\x52\xc4\x9f\x81\x28\xc0\x37\x75\x4d\xbc\x54\xbf\x10\xf2\x86\x7d\x00\x29\x2f\x70\xe2\xb2\xe8\x70\x46\xfd\x88\x24\x30\xb8\x61\x1f\x00\x1e\x36\xd9\x4f\x81\xe2\xf2\x08\x02\xf4\x5c\x0f\x01\x42\x46\x0e\x3b\xf9\x71\x91\xf0\x25\x52\x4c\x0f\xf7\x70\xed\xcc\x53\x42\x3a\xc1\x34\xbe\x62\x83\xfb\x25\xda\x39\x50\xec\xce\xb1\x97\xd0\x04\x0e\xf6\x0e\x21\x8a\x4d\x13\xbf\x44\x8a\x0b\x24\x4e\x81\xc4\x2d\x24\xc9\x4b\xa4\xb8\x07\x8a\xe0\x25\x49\x54\x72\x76\xcb\xc9\x24\xbc\x04\x8a\x8d\x84\x9e\xb3\xf4\x08\x22\x87\x0d\x9c\xb8\xc7\xae\xfb\x55\xf1\x39\x52\x5c\x20\xf2\x02\xc4\x0e\x07\xf7\xf1\x49\xa2\x97\x48\x71\x81\xd8\x33\x10\x05\xec\x20\x94\xb1\x49\xe2\x97\x48\x71\x81\xc4\x0b\x10\x39\xec\x2b\xc6\x27\x49\x5e\x22\xc5\x3d\xe6\x06\x2f\xf9\x6d\x14\xde\x10\x99\x70\x38\x00\x3a\x07\x8a\xdb\x37\x74\x8c\x26\xe1\x62\xef\x0c\xa2\xd8\x67\xf8\xf8\xf9\x38\x98\x80\xc3\x86\x52\x7c\x24\xf2\x82\x44\xd8\x6d\x34\xd1\x17\x24\x71\xd8\x50\x8a\x8f\xc4\x5e\x8e\x29\x30\x3a\xc4\x4c\xc4\x46\xe2\x97\x48\x71\x81\xc4\xcb\x56\x27\x95\xb7\x84\x28\x1c\x56\xc8\x67\x40\xf4\x24\x8e\xea\x23\xe1\x53\x7f\x76\x83\x3f\xe7\xe1\xf4\x39\x8a\xaa\xaa\x95\x6b\x5a\x51\xd5\x7e\x54\x35\xfb\x27\x71\x54\x55\x2d\xaf\xa9\xf6\x8f\x49\xfb\x4f\xdf\x5d\xb8\x83\x71\xfb\x9f\xf3\x70\xb1\x72\x07\xff\xe7\xe1\x43\x6f\x3c\x44\x7c\xcb\xdf\x7f\x2c\x66\xcb\xf0\x13\xb1\x5f\x77\x8e\xf7\x3a\x3d\xcb\x3e\x0d\xf0\x3a\x69\xa1\xc7\x98\x77\xf8\xfb\x8f\x87\x7f\xfd\xe6\x2d\x87\x93\x3f\x17\xe3\xdf\xfe\xf8\xf1\x1b\x3e\x86\x25\xe2\xdf\xfe\xbf\x87\xdf\x7f\x3c\x64\xd5\x72\xcb\xc2\x46\x8d\x37\x6b\x4c\x33\x19\x36\x5b\xcd\x9c\x5a\x2e\x39\x36\xaa\x14\x9b\x45\xac\xa7\xa8\x5a\xd4\xb5\x62\xb1\xa6\xd5\xf4\x6a\x41\x21\xf5\x9a\x69\x9a\x55\xad\xf8\xf0\xb3\xd1\x3f\x3d\xba\x3f\x0d\x1f\x3b\x69\xb1\x9f\x53\x09\xbf\x91\xca\xd3\xe0\xb4\x93\x16\xb8\x42\x25\x04\xe0\xfb\x88\x3b\x8b\x78\x3b\x69\x91\x9f\xb3\xf0\x3b\xa9\x3c\x8b\x83\x3b\xb6\xc4\x73\xb4\xfc\xc3\xbf\x7e\x8b\x96\xa3\x20\x9c\xed\x69\xcc\xa6\x2a\x96\xa3\x1b\x79\xa4\x1a\x38\x53\x2c\x93\x54\x33\x9f\x29\x14\xd3\xf9\x8c\x59\x2b\x5a\x35\xa4\x3b\xb8\x55\xc8\x54\xf5\x52\xb1\xa6\x6a\x25\xa5\xda\xe0\x65\x95\x97\x9a\x48\xff\xed\xf7\x1f\xbf\xb9\xbe\x3f\x5e\x8e\x16\x07\x98\x98\x1d\xdb\xc3\xcc\x17\xfb\x49\x3e\x6a\xff\xe9\xb9\x03\x77\xe4\x87\x4f\xe3\x0a\xfe\x79\xce\xba\x98\x84\x7e\x8a\x75\xa7\xa1\x7f\x27\x2d\xf4\x92\xbd\xf2\xaf\xdf\x1e\x87\x36\xab\x14\x33\xad\x74\x25\xa5\x16\xed\x5a\xd3\x28\xa5\x55\x53\x73\x0a\x8a\x6a\x15\xd4\x6a\x4b\xab\x6b\xa4\xd1\xca\xb6\xb0\xda\x4a\xb7\xb0\x85\xaa\xcd\x1c\xc9\xe9\x9c\x1a\x39\xe6\xec\xfb\x1b\xcd\xc6\xc3\x5b\x31\xdc\xe1\x33\xe7\xe1\x91\x4f\xfb\xdf\xcf\xe7\xe1\xe2\xcf\xc5\x76\x72\xe0\xe2\xc8\x5d\x74\x57\xe1\x13\xff\x62\xbe\xed\x33\xfc\x3b\x0f\x79\x3c\xb6\xd8\x97\x8a\xde\x0d\x6c\xfc\x75\x44\xef\x3c\x38\xf3\xd8\x02\x6f\x88\x9e\x6a\x5a\x1a\x71\x6a\x15\x84\xea\x16\x51\x0b\x28\x95\xce\xa4\x15\xdd\xc1\xe9\xbc\x96\xc1\x3a\x2f\x6a\xc5\x5c\xad\x4c\x71\xda\xa6\xb6\x66\xa0\xac\xa2\xdb\xb4\x48\x9a\x67\xa2\x77\x0b\xeb\x3f\x2f\x7a\xdf\xca\x3f\x74\xc9\x3f\x74\xc1\xbf\x1b\xfa\x7e\x9c\xba\xf1\xc7\x20\xce\xd4\x8d\xf7\xb6\xcf\xf0\xef\x3c\xf0\xf5\xd8\x22\x5f\x3b\x75\xe3\xb3\xf1\xd7\x99\xba\x67\x11\xba\x27\xad\x47\x83\x0b\x1f\x79\x37\x72\x87\x4f\xe3\x3a\x0c\xe1\x9e\xfe\x95\x3b\x58\x1e\x7e\x53\x48\x2b\xeb\x82\xf2\xdf\xff\xfd\x3c\xd6\x4e\x55\x2d\xe4\xca\x0e\xd3\x9c\x7c\xb3\x64\xd9\x36\x33\x2d\xab\xd4\x4c\x17\xea\xc5\x54\xc3\xb0\x2d\xbb\xda\x32\xea\x8d\x06\xc9\x37\x94\x4a\x3d\x55\xb2\x75\x5a\xb1\xf3\x4a\xfa\x33\x04\x9f\x45\x02\x9f\xb4\xd0\x07\x09\xb6\x8d\x5d\xf1\x7b\x09\x3e\x8d\x38\x3e\x69\xb1\x6b\x04\xa3\x53\x82\x47\xcb\xc1\xe0\xfb\x48\x3d\x8d\x69\x3e\x69\x81\x5f\x96\xb7\xa7\xb1\xd3\x27\xad\x8f\xf1\xf6\xb7\xa2\xed\xe0\xd2\x5f\x41\xf0\x63\x8c\xf6\x49\xeb\xaa\xf4\xfe\x38\x23\xd8\x6d\x56\x76\x46\x36\x58\x05\x6a\x6a\xe7\x67\x33\xbd\x56\xb3\xd0\xf9\x3e\xe2\xcf\xe3\xc2\x8f\xad\x2f\x56\xb3\xf1\x3a\xf6\x6b\xa9\xd9\xf3\x00\xf6\x63\xeb\x51\xcd\xca\x3d\xeb\xbe\x8b\x96\xf3\x18\xf8\x93\xd6\xa3\x0c\xd2\xc3\x30\x76\x47\xd1\xe0\x71\x13\x23\x08\xe7\x8f\xe3\x90\xae\x50\xdc\x50\x34\x23\x57\xaa\xe1\x56\xae\x88\x89\x5a\xd5\x95\x86\xce\xf5\x3a\xcb\x31\xac\xa6\xcc\x4a\xcd\x6e\xd4\xd2\xa9\x4c\xb5\xe0\xf0\x4a\xa5\x9c\xc3\x55\x2b\x57\xaa\x3e\xf1\x38\xe6\xb7\x3f\xdd\xaf\xe7\x90\xfc\x93\x96\x78\xbf\x5f\x71\xc5\xf4\xfb\xc6\xeb\x34\xb0\xff\xa4\xf5\xb5\x8e\x49\xdc\xe1\xbe\x32\xed\xd0\x71\x37\xe1\xdb\x67\xdf\x79\x22\xc2\x49\xeb\xd1\xae\x89\x27\xc9\xb8\x7d\x85\x7d\xb6\xa7\xa0\x5b\xcd\x6c\x3d\x9d\xb3\x32\x34\x67\x13\xb5\xa8\xd8\xbc\xc9\x79\xa9\xe9\xb4\xb8\x53\xd7\x88\xa9\xe7\x32\xe9\x9a\xae\x66\xd5\x7a\x43\xcd\x93\x1c\xb1\xca\x8c\xe7\x72\xad\xe7\xad\x96\x78\xdf\xfe\x14\x53\x4e\x73\x21\x4e\x5a\x5f\xab\xcd\xe3\xb2\xe5\x57\xd2\xe6\xe7\x49\x1b\x27\xad\x47\x4d\xc3\x1f\xfd\xb5\xd9\x72\xbe\x08\xc3\xc7\xb9\x44\xaa\x85\x92\x86\xeb\x56\x35\xc3\x5b\x15\xac\xda\x5a\x19\x5b\xb4\x56\xb4\x53\x05\xcd\x4c\x2b\x28\x9b\x6f\xda\x7a\x85\x17\x0a\x4a\x45\x29\xe6\x72\x66\xba\xc5\x2b\x56\x36\xb3\xef\xf4\x01\x66\xfc\x38\x04\x6a\xbd\x41\xf3\x6a\xa5\x95\xb1\xb8\xa5\x15\x9b\xb6\x92\x2d\xd5\x8d\x72\x53\x49\xa7\x8b\xb5\x66\xb3\x65\xaa\xc5\x5c\x86\xd4\xcb\x29\x64\xe4\x78\x03\xe5\x2d\xb3\x91\xe1\xb8\x96\x3d\x0c\xc1\x72\xd1\x19\xcf\xba\xbb\x3d\x3d\x91\x3b\x98\x87\x2f\x3e\x9b\x3f\x0e\x0e\x44\x6a\xb5\xca\x6b\x47\xce\x9f\x85\x41\x77\xf1\xa7\x3b\x98\x74\xdc\xd1\x72\x48\x8e\x7f\xd1\x9d\xcf\x97\x4f\x92\x11\xb7\x77\x4f\x86\x21\xde\xb7\x3f\x33\x5c\x67\x19\x2d\x27\x2d\xf4\xb7\x18\xae\xc5\x6c\xf9\x7a\xb4\x6a\xd5\xf4\xbf\xf9\x68\x3d\x66\xfb\x9c\xb4\xfe\x1e\x93\xeb\xcd\xd1\xfa\x77\x9e\x5b\xa7\x29\x55\x27\xad\xc7\xc5\x09\x3b\x8c\xd6\xa0\x3b\xec\x1e\xb4\xbf\x44\x08\x63\x8e\x00\x66\x82\xfe\x93\x70\x4e\x05\xe0\x2f\xe3\xf0\xeb\x0c\xe7\x2f\x31\xd3\x62\x76\xe0\xd3\x63\xf7\x9c\xc5\x76\xd2\x42\xff\x2e\x63\xf7\x57\xcd\xbb\x6f\x18\xbb\xb3\xc4\xc1\x93\x16\x38\x71\x76\xf6\x7d\x8a\x06\x6e\x7b\xfe\xdb\x1f\x3f\xfe\x1f\xfc\xfd\x07\xfa\x9f\xfd\x2a\xea\xf9\x97\x7f\x1e\x7e\x7d\x50\x5e\x7f\xce\xc2\xe9\xb2\x3b\x0b\x83\x67\x6d\xf6\xe7\x2c\x5c\x8d\x7d\xd7\x1b\x84\xbf\xfd\xcf\xb7\xa9\x92\xb3\xcc\xc3\x93\xd6\x17\x2f\x48\xe3\x0b\xdb\xaf\xb3\x20\x3d\x4b\x91\x3c\x69\x81\xaf\x75\x11\xe3\x4f\xf7\x5f\x87\x75\xe7\x49\xa1\xc7\x96\x78\x4b\x09\x9e\x9d\x79\x9c\x6a\xbe\x1b\x38\x7a\xa6\xf9\xcc\x9c\xd9\xb4\x72\xa9\x8c\xa1\x97\x4a\x98\x50\xda\x68\x36\xb2\x54\x4d\xa7\x5a\xcd\x72\xd1\xc9\x54\x2a\x59\x43\x75\x0a\x56\xad\x8c\xa9\x6a\x95\x49\xa3\x5e\xc5\xb9\x56\xfe\xae\x56\xeb\xb6\xed\x90\x98\x1d\xf8\xd4\x80\x9d\xe5\xde\x1e\x5b\x6f\x5a\x2d\x08\xfe\x77\xc8\x7e\x81\x21\x3b\x4b\x72\x3e\xb6\xde\x5c\x24\xfe\xef\x90\xfd\x0a\x43\x76\x9e\x4d\x7e\x6c\x81\xb8\x6b\xc3\xff\x1d\xbb\xef\x1b\xbb\xb3\xb4\xfb\x63\xeb\x8b\x17\x52\xf1\x47\xe6\xd7\x59\x0d\x9c\xd7\x19\x38\xb6\x4e\xcf\x10\xfc\x41\xe8\xce\x5e\x2f\xac\x4f\x7e\xfd\xb9\xa5\xb5\x6a\x64\x32\x95\x72\x4e\x2f\xe8\xcc\x4c\x73\x35\x47\x4b\x06\x69\xd6\x55\x47\x2d\x14\x2b\xc5\x0c\xb3\x1c\x85\x9b\xb6\x5a\xc1\x19\x4b\xb7\xcc\x56\xd9\x76\x9c\x4c\x8a\x6a\x9f\x3a\x1a\x39\x2f\x7a\x70\x6c\x91\x53\x6f\xa1\xdb\x1e\x85\xb3\x3f\xfb\xe1\xf6\x51\x2a\x98\x89\x1b\xa5\x7c\xce\x29\x6b\x2c\x57\x4f\xb7\x34\x85\x98\xe9\x42\xc6\xb6\x4b\xc5\x9a\x63\x61\xab\xa6\x17\xd3\xc5\x56\xa5\x61\xe4\x14\x66\x96\x1b\x46\xc1\x6c\x29\x76\x46\x3b\x0c\xe7\x23\xd2\x3a\xec\xb6\x3b\x7b\xd9\x00\xdf\xd7\xcf\xb3\x92\x0c\xc7\x16\xfa\x96\x7e\xd2\x6f\xeb\xe7\x79\xc1\x88\x63\x8b\x7d\x4b\x3f\xe1\xf7\xf5\xf3\xac\x9c\xc5\xb1\x75\xea\xe5\x0e\xdd\xf9\xe2\xb1\x9f\x47\x0a\xd1\xf7\x51\x78\x56\x26\xe3\xd8\x22\x5f\x79\xe8\xf8\x6d\xfd\x7a\x2e\xda\x71\x6c\x5d\xdf\x5f\xf8\xf9\xe6\xc2\xf7\xe9\xbb\xf3\x42\x21\xc7\x16\xbb\x46\xfd\x7b\x5b\x23\xdf\xaf\xae\xcf\xab\x93\x1c\x5b\xe8\xd7\x12\xfb\x97\xaa\x27\xc7\xd6\xa9\x9d\x1c\x8c\xd7\x7f\x2e\x3a\xb3\x70\xde\x19\x0f\x82\xbd\x19\xf8\xfd\xc7\x6f\xc3\x30\x38\xfb\x1d\xfa\xfd\xc7\x6f\x9d\x6e\xbb\x73\xfe\xcb\xef\xee\xc6\x53\xc9\x95\x63\xeb\x74\xf6\x76\xc6\xc3\xf0\xcf\x60\x3c\x74\xbb\xa3\xfd\xd4\x0d\x37\xee\x70\x32\x08\xff\xe9\x8f\x87\xdf\x3c\x1b\x5f\x0a\xba\x1c\x5b\xe0\xdf\x4c\xdf\x5f\x54\x89\x79\x69\x7d\x71\xf8\x7f\xbc\x8e\xfd\x5a\x2b\xd7\xf3\xb2\x38\xc7\xd6\xa3\xce\x20\x07\xd6\x4d\x66\xdd\x27\x12\xcf\x62\x77\x5f\x62\x7a\xd1\xb9\xeb\x7d\xf8\xf3\x3f\xf7\xac\xfe\xd7\x6f\xfb\x79\x09\x7f\xff\xf1\xdb\xe8\x20\x0f\xbf\xff\xf8\xcd\x5b\x6e\xf7\xfd\x7e\x33\xf0\xf7\xa0\x4b\x07\x83\xe3\xf3\x0b\xa7\xeb\xfc\xe1\x7b\xce\xd7\xf9\x5f\x9e\x3a\x61\x2c\x5b\xc4\x79\xb3\xd6\x60\xa6\x53\xe1\x5a\xba\x64\x12\x5e\x4f\xe9\x3c\x4d\x68\x81\x98\x0d\xbd\x99\xcd\xb1\xbc\x6e\x56\x14\xcd\xa0\x66\xaa\x5a\x44\xe9\x54\xd9\xe1\xe5\xe7\xf1\x88\xf7\xed\x18\xe3\xf1\x54\x5c\xe8\xd8\x12\xdf\x31\x1e\x17\xfc\x7e\x73\xac\x7e\xce\xee\xd7\xa3\x7a\x06\xf4\x77\x1a\x8d\xf3\xba\x4a\xc7\xd6\xd7\xc6\x50\xc5\x65\xcb\x2f\xa5\x58\xce\x0b\x49\x1d\x5b\x8f\xb6\x07\x7f\x48\x90\xf1\xc7\xe5\x78\x1c\x45\xe1\xec\xcf\xee\xf3\x82\xe1\xaf\x56\x33\xa8\x6c\x38\x36\x32\x94\x5a\xa6\x50\x69\xe6\xf2\xd5\x7c\xde\xaa\x68\xaa\xca\x4a\x6a\x29\x6b\x16\x94\x74\xd5\xb2\x2b\x39\x6e\x67\x8b\x36\xaa\x66\x2a\xc8\xc9\x36\xd2\x4a\x25\xfd\x3c\x3a\xf1\xbe\xfd\x89\xd1\x39\x2b\xce\x75\x6c\x90\x4f\x8c\x0d\xba\xcf\xd8\x7c\xbb\xca\xb9\x6d\x64\x4a\x39\x9e\x2a\xd5\xaa\xa5\x86\xa5\xa7\x94\x62\xca\x29\xb0\x82\x51\x71\x5a\xa6\x91\x36\xac\x9a\x63\x5a\x4d\xdd\xd6\x95\x74\x86\xd3\x9a\x56\x37\xea\x4e\x43\xd7\x4b\xc5\xb2\xfa\x99\x91\x39\xad\x76\x76\x6c\xbc\x79\xac\xf4\xa1\xfd\xd3\x98\xfd\xbd\xd8\x3f\x8d\xd9\xf1\xbb\xee\x9f\xfe\xea\x23\x77\x5a\x16\xee\xd8\x40\x5f\x6b\x2a\xe2\x8f\xcb\x2f\x63\x2a\xce\xca\xd7\x1d\x1b\x5f\xbb\x78\xbf\x61\x5e\xfc\x32\x8c\x3b\xab\xb2\x77\x6c\x3c\x2e\x4e\xd0\x79\xfa\xa4\x52\x41\x8a\xe6\x68\x39\xc3\xd6\x50\x3e\x67\xd0\x4a\xa1\x99\xa1\x05\xa3\xde\x62\x65\xde\x34\xf2\x95\x52\x36\x6d\x23\x54\xe2\x66\x93\xa4\xab\x8d\x4c\x95\x67\xd2\x69\xeb\x2c\xfd\x2f\x5d\x69\x60\x4e\x0b\x8a\x53\x21\xac\x94\xce\x66\x50\x23\xab\x14\xcb\x2a\xaa\x54\x5a\x79\x5e\x42\x84\xa5\x1d\x5d\x57\xb3\x0d\xbb\x8e\x2a\x1a\x37\x74\x0d\x95\x6b\xe5\xfc\x81\x63\x13\x77\xd1\xf9\xed\x8f\x1f\xff\xef\x7f\x2e\x52\x01\xdf\xc8\x05\xfc\x74\x0c\xce\x7c\xbc\x9c\xf9\xe1\x9f\x43\x77\x73\x15\xf3\x44\x89\x28\xcd\x42\x86\x60\x3b\xdb\xd2\x1b\xb8\x5c\xc4\x15\xad\x54\x53\x50\x8d\x5a\x0d\x9a\xb2\x95\x4a\x33\x9b\x75\x4c\x6c\x1a\x19\xbd\x81\x1d\x9b\x95\x2b\xb9\x4a\x1e\xab\x96\x55\x3b\x79\xd1\x55\xfa\x9f\x9f\x5f\x4f\x69\x8c\xc9\xc1\xcf\x48\xc4\x69\x95\xc3\x63\x03\xfc\xb2\x12\xf1\xaf\x2b\x29\xa0\xff\xf3\x9e\x43\xf3\x6f\x25\x29\x97\x8b\xcf\xb7\xa4\xe8\x9d\xde\xdc\x8d\xee\xef\x12\xd1\xb3\xb2\x97\xc7\x06\x79\x9d\xf3\x7d\x83\x78\x1d\x73\x96\xe3\x8f\xe2\x07\x15\x55\xfc\x05\xcd\x6d\x23\x15\xf3\xdb\x9f\x1f\xa9\xc7\xba\xa2\xc7\xc6\x9b\x0e\x1c\xf8\x27\x7d\xcb\x49\x20\xef\xef\x44\xa0\x5f\xd5\x4b\xf8\x7b\x0c\xcd\x53\xa5\xd6\x63\x83\x7d\xca\xb7\xfe\x5e\xe7\xfa\x49\x4d\xdf\xc1\xb9\xfe\xe5\x07\xe7\xb4\xfa\xed\xb1\x11\xdb\x85\xbb\x41\x87\x9d\xba\x70\x37\xa8\xd3\x5f\x43\xe3\x7d\x83\x6d\x3a\x2d\x37\x7c\x6c\x90\xbf\x78\xe4\x6e\x58\xab\xdd\x2b\xb0\xfd\xc6\x39\x17\xaf\x03\x9f\x18\xb9\xb3\xba\xcc\xc7\xc6\xd7\x3a\xdf\x37\xcc\xa8\x5f\xc6\x87\x3c\xab\x1f\x7d\x6c\x7c\xad\xf3\x7d\x83\x40\xff\x6a\x8c\x7b\xac\x73\x7d\x6c\x7c\x71\x51\x83\x1b\x9c\x92\x5f\x86\x71\xa7\xe5\xb8\x8f\x0d\xf6\x56\xbd\xb1\x56\xd6\xca\x39\xb4\x51\xd5\x1b\x59\xea\x94\x5a\x85\x62\x96\xa2\xac\x9a\xa3\x55\xd5\x24\x4e\xb6\xd6\x28\x98\x66\x3d\xdb\xaa\xe6\x50\xc6\x22\x29\x03\x99\x86\x59\x44\xea\x99\x03\xa0\xea\x2a\x49\x23\xb5\x4a\xa8\x6a\x56\x8a\x45\x52\x56\x74\x1b\xe5\x33\x2d\xc5\x34\x6a\xd4\x52\xb8\x4e\x71\x0e\xd7\x4b\x16\x6b\x68\x26\x6b\x36\xf4\x62\xb5\xd8\xca\x95\xb3\x97\x0e\xc0\x57\xac\xff\xe3\x92\xf6\x9c\xea\x1d\xeb\xdb\x9f\x19\xa8\xd3\x2a\xe5\xc7\xc6\x5b\xd5\xb9\x7e\xe5\x81\x7a\x73\x2b\xe6\xbb\xb8\xf7\x54\x9a\xfd\xd8\x88\x9d\xe0\x78\x03\x7f\xce\xd6\x12\xf1\x87\xea\xd7\x90\xfb\x98\x1d\xf8\xc4\xc8\x9d\xd5\xb0\x3f\x36\xbe\xba\xc0\x41\xec\xe1\xfd\x65\x34\xfb\x59\xa5\xfd\x63\xe3\x6b\xb3\xc9\x6e\x10\xe8\x5f\x87\x71\xa7\xd7\x01\x1c\x1b\x57\x16\x61\x69\xb5\x6e\xa7\xb2\x55\x4d\xab\xf1\x5c\xbe\x59\x2b\xe8\x85\x6a\x33\xa5\xe4\x9b\x26\xb2\x89\x81\x72\x25\xab\xd9\x40\x55\x6a\x57\xf2\xb9\x74\xc5\x50\x9a\x69\x5a\x4b\xeb\x8a\x53\x7a\x25\x71\x29\x9e\xb1\x9c\x6c\x3e\xcf\x2a\x26\xe6\x7a\xae\xa2\x38\x0d\x6a\x2b\x8d\x42\x26\x95\xcd\x66\xb2\x05\x62\x14\x98\x56\x49\xa9\xad\xa6\x81\x52\x2d\x52\x2a\x95\x9a\xa8\xa6\x5c\x2d\x21\x49\x2f\xd9\x16\x93\xcc\x4f\xb0\xed\xec\xca\x82\x63\xe3\x8b\x17\xfd\xf1\xb9\xff\xcb\xc8\xdb\xd9\xbd\x0a\xc7\xc6\x1b\x4b\xb0\xbb\xd4\x8d\xcc\x52\x64\x37\x58\x59\x49\x31\x3b\x5b\x2c\x94\xec\x3c\x46\x0e\x29\xe0\x5a\xb9\x9c\x2f\x16\x8b\xba\xa5\xe9\x8e\x62\x38\x15\x8b\x55\x33\x19\xd6\x52\xea\x95\x0c\x6d\x95\x63\x97\x7c\x8d\xf7\xb6\x58\xdc\x13\x17\xdc\xfb\x92\xaa\x9b\xff\x66\xdc\x3b\xbb\xae\xe2\xd8\xf8\x62\x23\x11\x9f\x89\xbf\xcc\xa4\x3d\xbb\x53\xe3\xd8\x78\xe3\xe0\xe4\x2e\x62\x47\x58\xbd\x92\x4b\xa9\x79\x03\x35\x59\xba\x99\x6f\x72\xc5\xd0\xea\x95\x4c\x5e\xc7\x35\x85\x37\x53\x1a\x2e\x66\x8b\x16\x2b\x71\xa2\x97\x69\xbe\xa9\x17\xb2\x76\x1d\x99\xa9\x0b\xb1\xfb\xa8\xd4\xc5\x7b\xd9\xe7\x99\xf7\x78\xfb\xc7\xb1\xf1\x86\x2f\xf3\xbf\xcc\xbb\xc2\xbc\xa7\x7b\x4a\x8e\x8d\xaf\x32\x17\xff\x96\xcc\x7b\xbc\x51\xe5\xd8\x78\xa3\xbc\xfa\xff\x32\xef\x35\xf3\x4e\x2f\x7f\x39\x36\xbe\x78\x77\x32\x3e\x0f\x7f\x19\x63\x71\x7e\xd7\xcd\x4b\xe3\x83\x99\x40\xe9\x66\x46\xc9\x9a\x6a\x55\x35\x88\x9a\x43\x8e\x9e\x63\x4e\x25\xaf\x30\x3b\xa7\x35\x33\x95\x26\x4f\x15\xb2\xf5\x42\xb9\x94\x2a\xe4\x0d\xad\x66\x56\x4c\xea\x94\x8b\xf9\x82\xf1\x99\x81\x3d\xbf\x41\xe7\xa5\xc1\x7e\x42\x1f\xfc\x66\xfa\x9e\xee\xe5\x79\x69\xbc\x93\x10\x9a\xc6\x1a\xd7\x73\x85\x4a\xd1\x66\x7a\x36\x95\xd5\x53\x36\x33\x98\xa9\x91\x22\xaa\x92\x06\xcd\xb5\x10\x61\x76\xd6\x24\xb9\x72\xb5\x69\x22\x8b\xa4\x9a\x6a\xcd\x2a\x17\xac\x77\x12\x67\xbe\xad\x97\x4f\x77\x06\xbd\x34\xde\xcb\xc6\x42\x9f\xcc\xc6\xfa\xfa\x4e\x9c\x5d\x32\x74\x6c\x7c\x71\x0d\x9b\x78\xdd\xfa\xa5\x74\xc4\xd9\x5d\x48\xc7\xc6\x57\x9f\x60\x18\x18\xd7\xd4\x7c\xd9\x56\x73\x05\x54\x2c\x54\x52\x55\xde\x74\x52\x15\x8a\x69\x3e\xaf\xd5\x15\x3d\x4f\x9d\x54\x91\x64\x6c\x35\x45\xf4\x96\x6e\xf3\x96\x42\xd5\x7a\xee\x7b\x18\xf7\x93\x7b\x95\x16\x33\x37\x08\xef\x73\xa7\xd2\x23\xd4\x95\x6c\x89\x3d\xef\x11\x80\xf2\x1f\x80\xfd\x03\xe0\x1f\x90\xfd\x81\xe9\x1f\x14\x3c\x1c\x42\x3b\xe0\xe3\xf8\xbc\x14\xc6\xfd\xfd\x07\xc4\x87\xfa\x86\x27\xbf\x79\x2a\xd5\x09\x9f\x73\x86\xae\xdd\x9a\x76\x41\xc5\x59\x3c\xe9\x9b\x44\x10\xf6\xf0\xe8\xe5\xd2\x03\x2e\x04\x27\x54\xb0\x03\x15\xa7\xbf\x7a\x2c\x6a\x08\x9f\x83\x0f\x09\x83\x90\x09\x06\x01\xc1\x88\x71\xbc\xef\xef\x07\xe9\x3a\x8d\x6a\x7c\x9b\x2e\xfa\xf0\xf8\xa2\x27\x27\xfc\x94\x2e\x7a\xce\x9d\x23\x7b\x9e\x52\xdc\x9e\xa9\x02\x00\x43\xce\x01\x3b\xd4\xb5\x8d\x41\x18\xbc\x4a\xd8\x91\x61\xe8\x35\xc3\xd0\x07\x18\x76\x24\xed\x43\x42\x3a\x9a\xbb\xfe\xab\x9b\xc4\xee\x25\xb6\x6f\x82\x1f\x05\xf9\xd5\x6d\x60\xd7\x39\xf9\x3e\x12\x7a\x75\x63\x57\x5c\x24\xfc\xea\x56\xad\xb8\x48\xe4\xd5\x15\x58\x71\x91\xe8\xe5\x35\x55\xd7\x2e\xb5\xfb\x29\x12\x7b\x75\xe1\x55\x5c\x24\x7e\x79\xff\xd3\x3b\x6a\xe3\x7d\x24\x71\x79\x13\x52\xec\xde\xc9\x4b\xa4\xd8\x34\x41\xf0\xea\x8e\x9c\xd8\x50\xaf\xae\x2b\xba\x76\x49\xe2\xcf\xa1\xd0\xe5\xcd\x33\xf1\xa9\xc2\x97\x50\xf1\xa9\x22\x97\x97\xba\x5c\xbb\xc7\xfb\xe7\x50\xf4\xf2\xba\x95\xf8\x50\xec\xf2\x22\x94\xf8\x50\xfc\xf2\xa2\x92\xf8\x50\xe2\xf2\x0a\x91\xf8\x50\xf2\xf2\x72\x8f\xd8\x50\x08\x5c\x5e\xb5\x11\x5b\xae\x10\xbc\x84\x8a\x4f\x15\xba\xbc\xc5\x22\x3e\x55\xaf\x2e\xa1\xb8\x76\x19\xff\xcf\xa1\xc8\xe5\xbd\x0f\xf1\xa9\xa2\x97\x57\x2d\xc4\x87\x62\x97\x50\xf1\x3b\xc8\x2f\xaf\x2f\xe0\xb1\xa1\xc4\x25\x54\xfc\x0e\xca\xcb\xfb\x03\x62\x43\x61\x70\x09\x15\xbb\x83\x18\x5e\x96\xe6\xbf\x76\x63\xfc\xcf\xa1\xd0\x25\xd4\xb5\xfb\xf0\x7f\x0e\xf5\xaa\x02\x7d\x7c\xaa\xc8\x25\x54\x7c\xaa\xe8\x65\xa5\xf5\xf8\x50\xec\x12\x2a\x7e\x07\x5f\x55\x14\x8f\x4f\x95\xb8\x2c\x70\x1d\x1f\x4a\x5e\xd6\x5b\x8e\xdd\xc1\x43\x40\xc5\x59\x9d\xe3\xd8\x13\xe7\xb4\xee\xef\x23\x54\xec\x0e\x12\x74\x59\x42\x38\x7e\x07\xf1\x25\x54\xfc\x0e\x92\x57\xd5\x79\x41\x6c\x2c\xfa\xaa\x70\x6c\x7c\x2c\xf6\xaa\xa2\x69\x7c\x2c\xfe\xaa\xd4\x66\x7c\x2c\x71\x59\xfa\x31\x3e\xeb\xe5\xab\x2a\x92\xb1\xc9\xa2\xe0\x55\x59\xc5\xf8\xae\x17\x7c\x55\xcb\x30\x3e\x16\x7a\x55\x2f\x30\x3e\x16\x7e\x55\x93\x2f\x3e\x16\x79\x55\xf7\x2e\x3e\xd6\xeb\x0a\x75\xf1\xb1\xd8\xab\xaa\x70\xf1\xb1\x5e\xd7\x68\x8b\x8f\x25\x5e\x15\x4c\x8b\x8f\xf5\xba\xb4\x59\x6c\xac\xc3\x64\x3e\xaf\x2f\x16\x1f\xeb\x75\x0d\xb0\xf8\x58\xe8\x55\x9d\xad\xf8\x58\xf8\x12\x2b\xfe\x5e\x08\x79\x55\xc3\x2a\x3e\x16\x7d\x55\x7f\x29\x3e\x16\xbb\xac\x1e\x14\x1f\xea\x75\x21\xa2\xf8\x58\xe2\x55\x65\x9e\xd8\x5b\x06\x4c\x5e\x16\x92\x89\xbd\x53\xc3\xc1\x65\xe5\x93\xf8\x50\xf0\xa2\x14\x47\x6c\x5e\xed\x75\xc2\x79\x51\x8f\xf8\x44\xe1\x8b\x32\x17\xf1\x89\x22\x97\x05\x33\x62\x8f\x1f\xa7\x97\x25\x24\x62\xbb\x9b\x7b\x55\x7c\x0e\x15\xdb\x1b\xe3\xfc\xb2\x8c\x41\x7c\xaa\xc4\x25\x54\x7c\xaa\xe4\x65\xe6\x7a\xec\x1d\x91\xbd\x09\x3c\x87\x8a\xdd\x41\xf1\x2a\x4b\x3b\x3e\x55\xe8\x32\xab\x38\x3e\x14\xbe\xcc\x81\x8d\xdf\x41\x72\x99\x94\x19\x7b\x04\x05\xbd\xc8\x12\x8c\x3d\x05\x05\xbb\xcc\x37\x8c\xdf\x3f\x7e\x99\x81\x17\xbf\x7f\xe2\x02\x2a\x7e\xff\xe4\x45\x76\x5b\xfc\xad\x7c\x70\x99\x27\x17\x5b\xa8\x24\xbc\xcc\x1c\x8b\xed\xb6\xee\x17\x09\xe7\x50\xb1\xdd\x56\x89\x2f\xd3\xa4\xe2\x53\x45\x2e\xa1\xe2\x53\x45\x2f\xd3\x8f\xe2\x43\xb1\x8b\x7c\x98\xf8\xb2\xc0\x2f\x33\x6b\xe2\xb3\x4a\x5c\xe4\x9a\xc4\x27\x4a\x5e\x66\xad\xc4\xe6\x14\x3c\x1c\x29\x9c\x26\x72\xbc\x77\xd6\xfd\x33\x2c\x78\x89\x15\xdb\x8f\x86\x00\x5d\x64\x4a\xc4\x3f\x6e\x02\x97\x49\x17\xb7\x74\x91\x5c\x46\xd2\xdf\x40\x17\xbd\xc4\x8a\x0f\xc5\x2e\x42\xd4\x6f\x80\xe2\x97\x50\x37\x60\x89\x8b\x00\x70\x74\xc3\xa9\xa8\xbc\xc4\x8a\x7f\x2a\x0a\x2e\x42\xab\x6f\x20\x0b\x5e\x86\x69\xdf\x40\x16\xba\x08\x5a\xbe\x85\x2c\x7c\x89\x15\x1f\x8a\x5c\x84\x03\xdf\x42\x16\xbd\xc4\x8a\x0f\xc5\x2e\x02\x6d\x6f\x80\xe2\x97\x50\x37\xf4\x50\x5c\x84\xb1\xa2\xf8\x87\xdb\x50\x5e\x84\x9c\xde\x80\x75\x38\x01\x3b\x0d\x0f\xbd\x05\x0b\x5e\x04\x61\xde\x82\x85\x2e\x62\x21\xe3\x0f\x23\xc2\x97\x50\x37\x90\x45\x2e\x22\x0d\x6f\x20\xeb\x32\x68\xf1\x2c\x38\xea\x63\x31\x51\x7f\x76\x83\x3f\xe7\xe1\xf4\x39\x34\xaa\xaa\x95\x6b\x5a\x51\xd5\x7e\x54\x35\xfb\x27\xc1\x51\x55\x2d\xaf\xa9\xf6\x8f\x49\xfb\x4f\xdf\x5d\xb8\x83\x71\xfb\x9f\xf3\x70\xb1\x72\x07\xff\xe7\xe1\x83\xef\x7c\xd8\x53\x4f\x1f\x43\xbe\x3e\x15\xd4\x75\xf7\x40\xae\x23\x4f\x1f\x3c\x1f\x42\x8f\xfa\x04\x06\x21\x97\x10\x33\x19\x05\x82\x46\x2e\xf4\xfd\x08\x40\x1f\x53\x1f\x81\x20\xa2\x91\x14\x51\x84\x22\x4e\x03\x80\x42\x37\x60\xd0\x8b\x02\x8a\x40\x88\xe0\xc3\xef\x3f\xd8\x53\x24\x5c\x56\x2d\xb7\x2c\x6c\xd4\x78\xb3\xc6\x34\x93\x61\xb3\xd5\xcc\xa9\xe5\x92\x63\xa3\x4a\xb1\x59\xc4\x7a\x8a\xaa\x45\x5d\x2b\x16\x6b\x5a\x4d\xaf\x16\x14\x52\xaf\x99\xa6\x59\xd5\x8a\x0f\x07\x9d\xf1\xfc\xc1\x87\x58\xbe\x2b\xb1\x75\x18\xff\x13\x43\x82\xc0\xfe\xb5\xef\x3f\x3d\x0f\x49\x7b\x50\xf6\x9f\x5c\xb6\xc9\x2b\xb6\x64\xa1\x31\x42\x80\x2e\xc7\x35\xdd\x19\x4d\xf3\xde\x02\xf9\x56\xa5\x58\xaa\xcc\x5c\x6d\x1c\xda\x8a\x3f\xab\xe4\x6a\xfb\x3f\x6f\xed\xff\xd3\x9a\xad\x56\xbe\x9d\x56\xce\x3f\xa9\xe3\x8f\xf3\x7d\x33\x83\x1a\x55\xa2\xac\x8f\xcf\xf5\x1c\xa8\x0e\x0e\xdf\x57\x7d\x83\x85\x1b\x90\x9e\x32\x47\xe9\x30\x37\x07\x49\x4f\xdf\x2c\x95\x55\x67\x95\xcb\xa2\x44\x71\xb9\xf3\xc6\x4a\x6e\x16\x39\xaa\xab\xc2\x56\xb5\xcc\x79\xca\xd1\x93\xb4\xd0\xee\x58\x4d\xc9\xcd\x92\x6c\x34\xeb\x90\x87\x9d\x62\x8e\x07\x60\x39\xdc\xa4\xda\xae\xd9\x13\xca\xc3\x73\x9f\xf6\x9f\x6c\xf9\x48\xce\xf1\xc7\xfc\x09\xbd\xff\xfd\xf2\xf7\x87\xe7\xc6\xfe\xbf\x43\x9f\xac\x93\xaf\x8e\x5b\xd1\x22\x63\xe1\x09\xe9\x46\x9e\x3d\x64\x9d\x72\xd0\x0d\xc7\x8b\x25\x6e\xad\xa5\x06\x68\x76\x31\xae\x76\x68\x21\xb5\x5d\x68\x93\xc3\x97\xd4\xba\xb2\xea\xfa\x47\x1e\xa9\xca\x9b\x1f\xed\xed\x5f\x3f\xf1\xf0\x8e\xef\xbf\x1c\xa3\x0f\xbd\x5f\x51\xfe\xfb\xc8\x9b\xf6\xfe\xbf\xc2\xfe\x3f\x71\xfc\x33\xb5\x33\x48\x80\x5a\x32\x1c\x75\x4d\xb9\x28\x79\xd3\x4c\x0a\x99\xac\x8b\x78\x30\x4a\x03\xdb\xee\xbb\xa8\x93\xd3\xfb\xeb\x97\xf1\x56\xcc\x9a\x9a\xc8\x1f\xe4\xa7\x12\x0c\xfa\xcb\x47\x5e\xbf\xfe\x94\xaf\x91\x75\xa0\x57\xde\xf6\x7e\x73\x1d\xef\xfd\x8f\xbc\xf8\xd7\x48\x2d\x8d\xe6\x81\xc2\x96\x43\xc5\x08\x87\xdd\x45\x68\x6c\x84\xd7\xce\x13\x6f\xdb\x59\xac\x7a\xde\x1c\x31\x45\x9d\x62\xa4\x0c\xdb\x8b\x46\xad\x5f\x4a\xac\x1b\xa9\x64\xc2\x36\x0c\x1b\x26\xc3\xad\xbd\xaa\x0f\x60\x10\xe2\xb1\x5d\x5d\xe2\x20\x3f\x99\x97\x9d\x6c\xd7\x49\xee\xc1\x0f\xc1\xda\xa3\xf1\x28\x7c\xf8\xfd\x47\xb1\x96\xcf\x3f\xff\xff\x14\x03\x0b\x3e\xe0\xe3\x9c\x68\x2e\x5f\x40\x8c\x3c\x49\x7d\x00\x18\xf6\xdd\x28\x40\xc0\x43\x2c\x40\x3c\x02\xcc\x87\x28\x64\x42\x30\x20\x03\x14\xc8\x00\x73\x44\x3c\x41\x80\x40\x30\x14\x0c\x0a\x0e\x3c\xb1\x27\xe2\x29\x19\xf0\x4e\x9a\x0b\xfd\x4c\x73\xe1\xbd\xab\x79\x55\x73\xed\x9f\x92\x87\xcb\x10\xd8\x5b\x35\xd7\xe5\xac\x7c\x5b\x73\x1d\xa5\xe1\x54\x73\x11\x9d\xdb\xe9\xd1\xa2\x54\x6b\xa6\x0b\x6d\x52\x88\xc6\xaa\x35\xf0\xf2\x95\x72\x4b\xe7\xf5\x75\x71\x52\xd3\x0b\xab\xc5\xa8\xd2\x98\x36\x8c\xae\x93\x9c\x8e\x1c\x67\x09\xbc\x55\xdb\xa9\xd5\x17\xe3\x4c\xa9\x14\x4e\xb5\x41\x25\x97\x77\xa6\x8b\x52\x03\x67\x76\xb2\x9b\x1b\x80\x46\xd9\xbc\x9b\xe6\x3a\xf9\xa3\x38\x9a\x03\x1e\x79\x94\x7a\x7b\x8a\xfc\x44\x73\xdd\xef\xfd\xb1\x34\xe7\x15\xcd\xc5\x8f\x7f\x16\x47\x73\x14\x88\xf2\xa2\x39\xae\xbc\xff\x7d\xcd\x75\x47\xcd\xf9\x99\xf7\x3f\x69\xae\x65\x2a\x01\xd6\x94\xdb\x83\xcc\x7a\x67\x94\xd2\x3a\x53\x7a\xb4\xb1\x05\xb5\x6c\x25\x01\xfd\xb4\x5b\x4f\xed\xf2\xbc\x05\xea\xd3\x41\xd7\x18\x5a\x6c\x82\xb2\xf9\x45\x96\x93\x6c\x26\xe3\xb9\x29\xbb\x37\x9a\x76\xcc\x5a\xb5\x8b\x72\x33\x7b\xb0\xd8\xf8\x49\xa7\x3b\x09\x32\x7d\xb5\x7c\x6f\xcd\xc5\x89\xc7\x50\x40\x11\x86\x30\x74\x71\x44\x38\xa6\x32\xe2\x84\xef\x97\xb5\x14\x13\x1c\x49\xce\xdc\xc8\x25\x51\x80\x01\xf3\x5d\x37\x8c\xc2\x90\x86\x51\xe0\x79\x1e\x04\x24\x8a\x1e\x7e\xff\x71\x38\x71\xbe\x9b\xe6\x82\x3f\xd3\x5c\x84\x21\xca\xaf\x6a\xae\xe7\xa7\xe7\x21\xf7\xb7\x6a\xae\xcb\x59\x79\xa1\xb9\x1e\x3f\xad\x93\xe7\x47\xcd\xa5\x94\x66\xab\x56\x35\x33\xb2\xb1\xb9\x1a\xda\xd0\xa2\x03\x33\x39\x68\xb7\x27\xad\x90\x8c\x36\x3a\x6d\xac\x4c\x99\x4b\x56\xf3\xa5\x34\x91\xeb\x68\x3a\x15\x64\xbe\x1c\xf9\xe3\x96\x31\xc2\x86\x12\x01\x4b\xdb\x14\xd6\x38\x5c\x2c\x69\xa6\x9b\xae\xaf\xb1\x8f\x7b\x8e\xd3\x51\xf2\xf7\xd2\x5c\xa5\xe3\x3a\x31\x8e\xe6\xe8\x95\x8e\x3c\x7a\x7b\x86\xfc\x44\x73\xdc\xf1\xfd\xb1\x34\xe7\x15\xcd\xc5\x8e\x7f\x16\x47\x73\x94\x0e\x4b\xc1\x47\xcd\x71\xe5\xf3\xbe\xe6\xba\xa3\xe6\xfc\xcc\xfb\x9f\x34\x57\x7a\xca\x51\xad\xd3\x02\x72\xeb\xd1\xbe\xb4\x13\xad\x6a\x44\x1d\x23\xd7\x68\x2e\x4d\x51\x49\xd4\x67\xdb\xa8\xba\x02\xdd\x5d\x3b\x61\xf9\x3a\x9b\xae\x4a\xf9\xfc\x04\xe7\xb2\x55\x53\x6e\x15\x5d\xa6\x37\x05\xbb\x50\xc4\x33\x6f\x59\x71\xda\xd0\x2f\xe2\x22\x41\x59\xa3\xac\xae\xef\xad\xb9\x04\x92\x01\xc5\x11\x0a\xfc\x30\xf4\x42\x08\xdc\x48\x00\xc0\x29\x23\x1e\x70\xc3\x28\x08\x05\x94\x9e\xa4\x1c\x13\x37\x20\x18\x06\x91\x20\x88\x03\x46\x61\xc8\xc3\x40\xb8\x12\xec\x35\x97\xb8\x59\x73\x3d\x27\xbf\xd0\x43\xaa\xf3\xfb\x9a\x8b\x72\x2c\xe5\x55\xcd\xf5\xfc\xf4\x3c\xc5\x27\xbe\xe6\x4a\xf7\x95\x73\x4d\xa5\x28\x57\xd7\x5c\x27\xcf\x8f\x9a\x2b\x3d\x05\x66\xbd\x93\x2b\x1a\x53\xe9\xb6\xc1\xbc\x92\xa8\x24\xfd\x15\x0e\xe4\x42\x5b\xba\x8e\x36\x44\x29\x77\x67\xec\x8c\x7c\x25\xc8\xf6\xa4\x3b\x70\xeb\x85\x56\xaa\xb3\x19\x9b\xb8\x65\x4c\x2c\x3c\x9b\xb9\xaa\xb9\xdb\x8e\x73\xad\x52\x62\x42\xf5\x54\x7d\x3f\x42\x28\x51\xcb\xde\x4d\x73\x9d\x28\x9c\x38\x9a\x63\x74\xe4\xd1\x95\xcf\x4f\x34\xd7\xfd\xde\x1f\x4f\x73\xdd\x59\x5b\x95\xca\xef\xbc\xf3\x7d\x6d\xf5\x17\x69\xcb\x37\x35\x37\xbd\x8d\x96\xc7\x17\x95\xae\xf6\xf7\x7b\x78\xf1\xf9\xf7\x3f\x69\x6e\xb6\x50\xeb\x4e\xb5\x57\xcd\xad\x1a\xe3\x62\x5e\x8b\xfa\x11\xce\x4b\x1c\x79\x95\xfc\x70\x98\x32\x17\xed\xc6\x7c\x5b\xd8\xaa\xa0\x59\x19\x27\x1b\x93\xc6\xc0\xce\x96\x1d\xdf\x55\x03\x54\xcd\xd9\x92\x31\xd4\xee\xfa\x62\xaa\x0e\xed\xd5\x34\xac\xac\xeb\xcd\x45\x90\x5d\xac\x06\xa9\xf6\xbd\x35\x37\x08\xa9\x17\x60\x8c\x90\x84\x21\x06\x52\x84\x44\x0a\xc1\x82\x08\xf9\x81\x27\x3d\x8a\x99\x74\x69\x24\x43\xe0\x4a\xc9\x71\x04\x02\x19\x42\x57\x12\x21\x7d\x46\x05\xf4\x5c\xb4\xd7\xdc\xfc\x49\x73\xc7\x4d\xb7\xfe\xfd\xc7\x21\xc3\xe5\x5d\x7d\xcd\x04\x03\xd7\x7d\xe4\xe7\xa7\xe7\xe9\x8f\x8f\xf2\x98\xcd\x0b\xbd\xbc\x2a\xf7\xbd\x1c\xd2\x15\xdc\xa8\xf7\x2a\xb3\xdc\xb0\xd7\x04\x20\xca\x8a\x79\xde\xe0\x43\xa0\x55\xd6\x66\x23\xa9\x34\xb1\x72\xbe\x72\x74\x2f\x06\xf6\x95\x6e\x88\xa1\x6b\x4e\x95\x5c\xaa\xbe\x5a\x67\xe4\xfa\xa0\xdf\xf5\x9a\xa4\x9a\x5c\x6f\xda\x9a\x9d\x16\xf6\x74\x53\x9b\xf5\xdb\x2a\x48\xf2\xa6\xbe\x2d\xa6\x17\x73\x56\xa6\x9d\x65\x45\x8f\xd2\x95\x6d\xc5\x1f\xeb\xc1\x8a\xbb\x85\x49\x72\x6e\xad\x54\x5c\xb1\xfa\xcd\x5e\xa9\xd0\x6b\x7b\x39\xb3\xd6\xe4\xd5\xb6\xb6\xa9\x85\x4e\x8a\x46\x22\xf3\x01\xfd\x7e\x26\xb8\x57\xf5\xfb\xc9\xdf\x3b\xab\x75\xa0\x4a\xb5\x32\x9f\x39\x7e\x3a\x68\x35\x8a\xd9\xf9\xd4\x2d\x46\x76\x45\x78\xdb\xf5\xbc\xb7\xf4\xca\x15\x5d\x19\x78\x22\x15\xf9\xf9\xec\xac\xe5\xc8\xa0\xb5\xf1\x8e\x5f\x3f\x61\xaf\xf6\x59\x3c\xe5\xf2\xbb\x6f\x7c\x52\x77\xa6\xd7\xfd\x0e\x7a\x1f\x77\x88\x2f\x75\x66\xaa\x9b\x4c\x81\x3c\x30\xb3\xdb\x45\x67\x5d\x84\x03\x07\xb8\xdb\xc9\x18\xca\xa2\xbe\x59\xe5\xd5\x6d\x89\x2e\x52\x9a\xaf\x3e\xca\xd0\xdc\x5d\x0c\x7a\x10\x8e\x32\x47\x17\x28\x35\x3e\x42\x95\x3f\x8b\xa7\x5c\x7c\xf7\xad\x8f\x76\x23\xbd\x6e\x67\x3b\x86\xdf\x48\xaf\xa2\xfc\x75\x36\xe9\x4d\xfb\x88\xe3\xf3\x6e\x3f\xd6\xfa\xc9\xee\x5c\xff\x8b\x79\x77\xeb\x58\x5f\xca\xe6\x57\xd2\xfb\x64\x7f\x37\xd0\x0a\x2b\x96\x5f\x70\x52\xda\x3a\xa9\xb1\x79\x3d\x47\x0d\x65\x61\x25\xe0\x46\xf4\xca\xdc\x2b\xf5\x4b\x8e\xd7\xaf\xe0\x35\xf0\xfb\xcd\x5c\x0a\x7b\x09\xd4\xcb\x45\x3c\xcd\xd7\x0b\x60\xd3\x0c\xd9\xf5\x0c\x92\xdd\x76\xeb\x99\x04\x18\xa7\x0a\x75\x7d\xa8\x84\xcd\x64\xea\xee\x7b\x3e\xc8\x15\x80\x72\x88\x7c\x16\x40\x10\xc9\x90\x13\xcf\x03\xbe\x1f\x51\xe2\x86\x12\xb9\xc8\x23\x1e\x0c\xa9\x60\x84\xc2\x28\x14\x10\x63\x17\x11\x8c\x05\x64\x11\xf3\x28\xe3\xfe\xde\xfe\xb2\x27\xfb\xab\x14\x33\xad\x74\x25\xa5\x16\xed\x5a\xd3\x28\xa5\x55\x53\x73\x0a\x8a\x6a\x15\xd4\x6a\x4b\xab\x6b\xa4\xd1\xca\xb6\xb0\xda\x4a\xb7\xb0\x85\xaa\xcd\x1c\xc9\xe9\x9c\x1a\x39\xe6\x3c\x1c\x13\xeb\xe1\x63\xe5\x9c\x77\x2d\xb1\x00\x4c\x88\xab\x96\xf8\xf9\xe9\x79\xf9\x80\x47\xc9\x4f\x4d\xfc\x5e\x57\xb3\x5b\x60\xd9\x59\x6f\x4c\xb3\xd3\x4e\x59\x4e\xae\x51\xad\x9b\x5b\x64\xb7\x50\xde\x10\x3c\x9a\xd6\xa1\x5f\x8b\x92\x27\x9e\xd3\x41\x8b\xbe\xe3\x39\x3d\x6a\xd6\xec\x60\x5b\x32\x2a\xc5\x51\x95\xe9\x69\xad\x3f\xcc\x2a\x9a\x6c\x4f\x5a\x66\x6d\x94\x6f\x15\x47\xce\xbc\xb7\x5b\x24\x26\xcd\xfa\xa6\x92\x94\xa7\x22\xd3\x84\xe4\xb4\xf9\xf2\x3c\xa5\xe4\xd8\xc4\x6f\x3a\x85\x9d\xa2\x0d\x75\x20\xd4\x16\xcc\x37\x86\xab\x45\x31\x9d\x73\x97\x9d\x4d\xa2\x64\x0c\x17\x56\x32\xdf\x27\x9d\xba\x5d\x30\x2b\xf5\x54\x29\xf4\xea\x73\xda\xd0\xac\x9e\xec\x56\x67\xed\xac\x0d\xb4\xf9\x7a\x99\x56\x5b\xa8\x4b\x15\x6d\xb7\x6e\x67\xc7\x91\x54\xda\x27\x5a\x40\xb9\x6a\x91\x4f\xfb\x7a\xdd\x22\x9f\xfc\xd1\x27\xfb\x7e\xe2\xf1\xf8\x6f\x4d\x9f\x33\xbe\xbe\xf1\x49\xdd\xf9\xfd\xf7\xdc\x2b\x3a\xd1\xa8\x8a\xdb\x34\x48\x47\x43\x41\x7e\xec\x17\xaa\x55\xc7\x51\x6c\xa4\x0e\xfa\x83\xaa\xbf\xe8\xa3\xa0\xbb\xb5\x4a\x98\x0d\x82\x66\x46\x4f\x1e\x76\xdc\x4f\xb4\x7b\x71\xfd\x0e\x5b\xde\xd7\x90\xe4\xb6\xf7\x3f\x7a\x1c\x9f\x7f\xff\x93\xc6\xfb\x2a\xa1\xbd\xaf\xc6\xa3\x9e\xe7\x85\x41\xe4\x51\x14\x46\x01\x0c\x68\x20\x39\x0e\x11\xa2\x04\x00\x42\x5c\xc4\x3d\x01\x21\xe5\x1c\x61\x48\x90\xc4\x21\xf6\x5c\xe1\xc1\xc8\x83\x28\x64\xd8\x77\xd1\x9e\x88\x43\x1e\xda\x8d\x1e\x07\xfd\x99\x9e\x93\x88\xbf\xe3\x71\x3c\x3f\x3d\x2f\x49\x72\xab\xc7\xd1\xba\x18\xd8\x57\x73\x23\xc6\x5c\xbb\xe2\x71\xa4\xd4\x42\xa1\xd7\x94\x9b\x12\x6e\xe6\x26\x65\xb6\xac\x26\x6b\x30\x35\x0d\x76\x95\x4e\x35\xe5\x94\x31\xed\xae\x86\xad\x8d\x95\x6a\x47\xd3\xf2\xdc\x4e\x0f\x5d\x98\xda\x95\xe6\xd3\xa4\x37\xd4\x4d\x62\x25\xa2\xbe\x63\x56\xcc\x5a\xd0\x6a\xb9\xbb\xa6\x59\x40\x5d\x32\xcc\xf0\x51\x4d\xbf\x97\xc7\x71\x3a\x07\x3e\xb9\x82\xf7\xaa\xa1\x59\x07\x27\x5f\x3f\xfe\xf8\x75\x2b\xf8\x3b\xd2\xfb\x2d\x1e\xd2\x89\xc7\x71\xc3\x2a\x74\x61\x92\x41\xf3\xbb\x57\xa1\x7f\xa3\x55\xb3\xa2\xfc\x75\x36\xe9\x4d\xfb\x88\xe2\xf3\x6e\x3f\xd6\xce\x51\x4c\x53\xed\x2f\xe6\xdd\xad\x63\x7d\x29\x9b\x5f\x49\xef\x93\xfd\x2d\xf7\xd2\x06\x4c\xfa\xf6\x02\x6e\xa7\xb5\xd2\xac\xbf\x82\xc5\xb2\x3b\xf2\x81\x53\x6b\x37\xb4\x62\xc2\x99\xd1\xa1\x6f\xaf\x1d\x9d\xf5\xf3\xda\x9a\x0e\x17\x65\x7f\x37\xcb\xad\x10\xed\x6c\xc3\x74\x72\x44\xb3\x6a\xad\x5a\xd7\x1b\x8d\xc6\x1c\x6e\xed\xe2\x78\x69\x76\x42\x0a\x53\xf7\x3f\xab\xa1\x9e\x17\x20\x8f\x52\x41\x19\xa6\x50\xb8\x58\x84\xd2\xe3\x44\x7a\x01\xf1\x84\x1f\x06\x0c\x78\x32\xf2\x3c\x8f\xbb\xcc\x43\x02\x87\x90\xba\xa1\x14\x94\x08\x0c\x90\xeb\x93\xbd\xfd\x25\xb7\xdb\xdf\xc7\x12\x78\x8f\x57\x11\xbf\x65\x61\x09\x60\x00\x5d\xf5\x33\x5e\x9e\x9e\x17\xdf\x8a\x65\x7f\xb7\xc7\x81\x74\x2e\x06\x56\xbd\x90\x47\x25\x65\x86\xf6\x30\x57\x81\xd3\x59\x42\x6d\x8d\x8d\x6d\x4f\x89\xb2\x9b\x99\xd1\x6c\x25\x17\x06\xac\x1b\x01\x2a\x45\xad\x7e\x65\xbc\x4e\xef\x4e\x97\x82\x2f\x7e\xc6\x93\x3d\xaf\x36\xfb\xb4\xdb\x0f\xdc\x69\xb2\x3d\x74\xd5\xc2\x78\xad\x6f\xe6\xee\x36\x83\x22\x5e\x2d\xd6\xab\x7a\xd0\x1b\xa1\x96\xe6\x16\x14\x91\xb8\x55\x5e\x53\x51\xb3\x73\xda\x9f\x93\x1d\xc7\x4a\xa5\xb1\x2e\xcc\x82\xfc\xac\x33\x1a\x24\x32\x86\x95\xe0\x70\x61\xeb\x29\x3a\x1b\xec\xd4\xb9\x55\xdf\x66\x47\x15\x3c\xf7\x57\x46\x5a\x16\x87\x46\x1e\xe7\x5b\xda\x58\x4a\xb4\xb4\x57\x69\x43\x96\xf3\xdb\x39\xf5\x54\xb4\xad\xf8\xb8\xe1\x54\xc0\x2a\xd3\x6d\x55\xe6\x04\x18\xd9\x9e\x62\x59\xed\x03\x7e\xd3\x6b\x36\xea\xc1\x6e\x58\x64\xc5\x46\xaa\xe7\xd4\x68\x69\x95\xc2\xb8\x61\xd7\x5c\x17\xad\xd3\xad\x26\xae\x0c\x33\x76\xab\x5c\x4e\x26\xb8\xb9\xb2\x02\x6f\x50\x58\x14\xd5\xcd\x58\x18\x25\x27\xb4\xca\x6d\x09\x2a\x6e\xaf\x2e\x67\xa9\x44\x5f\x58\x89\xaa\x36\xb1\x74\xf5\x62\x7d\x51\x38\x99\xd3\xc7\x1f\xdf\xf1\x15\xdf\x59\x6f\x9c\x40\xc5\xb0\xdf\x8d\xe2\xc9\x60\x5f\xca\xcc\x97\xac\x37\xee\x47\xef\x57\xaf\x8f\xd4\xe3\x20\x1c\x6c\x10\x3c\xfd\xd2\x80\x96\x9c\x89\xde\x98\xae\x88\x39\x6c\xf7\x72\x85\x94\xf0\xb2\xf3\x4e\x30\x4a\x80\x5e\xbd\xd6\xc1\x0e\x95\xc3\xca\x8b\x4c\x9d\xda\xc3\xf8\xa7\x42\xe8\x86\xf7\xbb\x8a\x5f\xbb\xe1\xfd\x7f\x4b\x1b\x7c\x0b\xbd\x69\xae\xd7\xbe\x91\xde\xc3\x83\x9b\xe7\x73\x39\xb1\x81\xdf\x38\x3f\xfe\x6e\xf3\xf9\x15\x7f\x3f\x69\x13\xd5\x46\x5b\xcf\x3c\x22\xd5\xae\xbd\xe3\x53\xfc\xfa\xec\xfb\x5f\x7c\xe2\xcf\xbf\xff\xad\x35\xf5\x89\x3e\x8b\x33\x9f\xbd\x63\x64\x64\xea\x84\xff\xbf\xe6\x7c\xbe\xd0\x3f\x5f\x49\xef\xd3\x9a\x1a\xd4\xea\xf3\x74\x0e\x57\x19\x69\xd1\xa8\x53\x4d\x27\x97\xd2\xab\x6d\xd6\xa1\x4b\xfd\xf5\x2c\x0d\xfd\xce\x04\x04\x5c\xcf\x6f\x95\xa4\xdd\xea\xaa\x72\x8b\x2a\x39\x2b\x0a\x66\x7d\xbe\xde\x5a\x35\x55\xcc\x4a\x8d\xf5\x62\xde\x6f\x16\xe1\x50\x0b\xf2\x1b\x67\x58\xf3\x4a\x45\x55\xf9\xef\xff\xfe\x1d\x22\x08\x07\x4d\x9f\xf6\xc2\x5e\xbd\xed\xa0\x4c\xc9\x9e\xad\x03\x09\xfb\x30\x3b\x5c\xcc\x15\x54\x97\xa0\xd5\xa9\xa1\x9a\x66\x25\x97\x5b\x6f\x87\x1b\xd4\xce\xdb\xe5\xb9\x6b\xa9\xe9\xa1\xde\x03\x86\x15\x68\x8d\x31\x8c\xdc\x75\x34\xb1\xd2\xc9\x7e\x27\x67\x6f\xd6\x83\x9f\x2f\xd6\xd1\x27\x17\xeb\x41\x44\x23\x10\x0a\x0f\x7b\x14\xe3\x20\x90\x7e\xe0\x82\x28\xe2\x94\x00\x2f\x8c\x70\x28\x43\x28\x31\x93\x80\x81\x48\xb8\xc4\x03\x04\x12\x0f\x84\xd8\x87\x1e\xc1\x90\x7a\xf0\x70\x3c\x80\x6f\x5f\xac\xff\x2c\x9c\x8a\x20\x40\x28\xba\xba\x58\x7f\x7e\x7a\x5e\x93\xf6\xd6\xcd\xb2\xe6\x85\xc4\xbc\xda\x2c\x8b\xad\x18\x9e\xbe\x7e\xb2\x58\xde\xea\xeb\x4e\x43\x66\x9b\xe5\x7a\x33\x0b\xa7\x5d\xcf\x0b\xa7\xd5\xde\xc6\xd9\x75\x56\xd4\x50\x81\xc8\xa1\x55\xb5\xbf\xd9\x39\xf6\xda\xdc\xf6\x57\x65\x31\x00\x89\x90\x14\x48\x07\xb5\x97\xe3\x9c\x90\x73\xa1\xd7\x22\x63\x6a\x94\x76\xe9\xe1\x38\x3b\x2f\xda\x6e\xde\x77\x3e\x14\x7e\x75\x36\x23\xae\x2e\x5e\xe3\x1f\x77\x07\xa3\x90\xb0\xe4\xf1\xeb\x8d\xe3\x8f\x5f\x68\xec\xee\x47\xef\xb7\x2c\xb6\x4f\x36\xcb\x6e\x50\xf6\x28\xe0\xbd\xd9\x77\x2b\xfb\xbf\x91\x71\x52\x94\xbf\xce\x39\x78\x73\xb3\x6c\x1d\x9f\x77\xfb\xb1\x5e\x9c\xb8\xa4\xce\x17\xf3\xee\xd6\xb1\xbe\x94\xcd\xaf\xa4\xf7\xc9\xb0\x8b\x8e\x30\xea\xab\xca\x00\x64\xea\x9b\x85\x3b\x1e\x22\x3c\xed\x8f\x45\xa3\x40\xbc\x44\x55\x59\x58\xea\x62\x06\x26\x05\x1f\x69\x42\xf5\x27\x79\x60\x99\x81\x35\x5a\xa6\x4b\x46\x30\xce\x4f\xd5\x95\xe5\x59\x29\xa8\x6f\x73\xdd\xf4\x7c\x4d\xa7\xa9\x59\xba\x86\xba\x38\x9b\xb9\x7f\x78\x9c\x2f\x5c\x24\x22\x0f\xd1\x80\x70\x41\x22\xcc\x5d\xee\x02\x2e\x42\x18\x85\x61\x04\x42\x0f\x03\xdf\x65\x24\x94\x92\xf0\x43\xba\x83\xeb\x93\xd0\x05\x8c\xfb\x3e\x43\x90\xb0\xc3\x66\x19\x7a\x3e\x9e\x77\xaa\x6a\x21\x57\x76\x98\xe6\xe4\x9b\x25\xcb\xb6\x99\x69\x59\xa5\x66\xba\x50\x2f\xa6\x1a\x86\x6d\xd9\xd5\x96\x51\x6f\x34\x48\xbe\xa1\x54\xea\xa9\x92\xad\xd3\x8a\x9d\x57\xd2\x0f\xc7\x9a\xdb\x00\x41\xf2\x33\x4b\x8c\x81\xa0\x57\x8f\xad\x0e\x4f\x0f\xc9\x64\x67\x25\xdd\x1f\x25\x3f\xad\x75\x0a\xf5\xb4\xde\x75\x9a\xc1\x8e\xee\xc4\x80\xe3\x66\x76\x3b\x2b\xa3\x4a\x51\xd0\x56\xa5\x01\x97\xf9\xb9\x5a\x2c\x2f\x87\xd1\xfc\x60\x84\x1f\x8f\xe7\x0f\xdf\xcc\x5e\x0c\xf1\x89\x25\x3e\x04\xf5\x64\xbc\x61\x66\xd1\xb2\xb5\xb3\xe7\xa9\xb4\xb2\x2e\x9c\x26\x67\x56\x59\x2b\xb1\xe9\x1f\x1e\x63\x6d\x63\x16\x75\x2e\xd7\xd9\x6a\xc5\x69\x85\x33\xcb\x82\xbb\x82\x33\x08\x27\x85\xf9\xb2\x33\x36\x33\x74\xa5\x8f\x50\x5b\x9d\xeb\xc3\xf4\xa4\x11\xb5\x4b\xa2\x9e\x53\x71\xaa\x62\x0c\x4b\x89\x51\x28\xe7\x93\x5a\x73\x50\xd7\x0b\x3d\x6d\xd9\x29\xe9\x63\xab\x33\x80\x9d\x59\x41\xfd\xd8\xf1\x7b\xee\x54\x54\xaf\x5a\xdc\x93\x3f\x2a\x54\xb5\x4d\xad\x10\x9a\x9d\x00\x5b\x23\x6b\xdb\x5c\x05\xbe\x97\x5b\xa4\x5b\x1a\xde\x0d\x06\x19\xaf\x41\x96\x6b\x03\xaa\xcf\x7d\x3b\xac\x32\x3a\xd3\x23\xef\x1e\x5d\xa1\x57\x19\x1b\x3f\xb1\xa0\xf7\x7b\x7f\x36\xce\xfb\x4f\xb4\xe4\xee\xd0\x7e\x9c\xf5\x1b\xc3\xce\x94\x37\x64\x98\xc1\x7e\xc2\x4f\x9a\xa1\x84\x9b\xf9\x14\x14\xfb\x76\x54\x6a\xd4\xea\x0b\xaf\xcb\x95\x5e\x2d\x3f\x19\xf1\x47\xd6\xd7\x97\x4e\x03\x0e\x0a\x2f\xbc\x2f\x6f\x0a\xbd\x02\xb8\xde\xdf\x42\x9c\xfe\x5e\xca\x9e\x56\xd8\xcb\xdd\xc9\x0b\xde\xd0\xfc\xbb\x93\xc7\x9f\xec\xd3\xde\x0a\x19\x85\x23\xb5\x07\x17\xf3\x75\x22\xdf\xfb\x9a\x1c\xdc\xf6\xfe\xec\x38\xde\xfb\x9f\x34\xf3\x57\x4d\xbe\xfb\x6a\x66\x18\x70\x81\xb1\x4f\x22\xd7\xf5\x80\x08\x19\x62\x40\xba\x7e\x84\x39\x24\x01\xf4\x5c\x2f\x44\x9c\x21\xe8\x22\x0f\x23\x11\x06\x11\xe6\x84\xd1\x50\x4a\xd7\x8d\xb0\x90\xbe\xeb\xed\x35\x33\xbc\xa7\x66\xfe\xa9\x8f\x44\x30\x47\xd7\x35\xf3\xfe\xe9\x41\x33\x9f\xdd\x90\x71\xab\x66\xce\x5c\x0c\xf1\x07\x35\xb3\xb6\x2d\xec\x4e\x44\xe4\x44\x33\x1b\x0d\xb2\x9d\x65\x82\x3e\x63\x51\x3b\x9d\xee\xce\xb8\x93\x51\x6c\x2d\x40\x83\x41\x69\xe9\xed\x36\xc9\xae\x6b\xfa\x05\xb4\xee\x67\xf0\x78\x39\x35\xcb\xc5\x84\xb2\xeb\x6e\xd1\xac\xbe\xc8\x51\x65\xbc\x9d\x74\xc6\xfd\xee\x7a\xee\x34\x57\x7a\x95\xd5\x32\xb2\xd0\x09\x46\x35\x25\x5d\xbe\x9f\x66\x2e\x9c\xac\x3f\x62\x68\xc6\x6e\xfa\xc8\xbb\x47\xa6\x7c\x52\x33\xde\xf1\xfd\xb1\x2c\xc3\x89\x16\xdb\x1c\xda\x5f\xac\x99\x0b\x47\xdf\xe8\x76\xcd\x6c\x1b\xbb\x53\xcb\xf6\x86\x66\x3e\x39\xab\x8b\xa3\x19\xcd\x13\x6a\x1f\x03\x64\x5e\x71\xf1\x7d\xcd\x7c\x47\xcb\xf0\x99\xf7\x3f\x69\xe6\xaf\x9a\x7c\xf7\xd5\xcc\x0c\x32\x9f\x01\x09\x08\x0f\x23\x11\x49\xdf\x95\x40\xb8\x84\xef\xff\xb9\xc4\xf3\x00\x14\x84\x48\x9f\x32\x49\x89\x74\x03\xe4\xbb\x0c\x50\x09\x5c\xd7\xe3\x04\x31\x1e\x8a\xbd\x66\x06\xf7\xd4\xcc\x3f\x2b\xc0\x40\x28\xc7\x80\x5e\xd5\xcc\xcf\x4f\xcf\x2f\x1c\xba\x55\x33\x5f\x4e\xe3\x2b\x9a\xf9\xad\x32\x1d\xda\x0b\x5e\x59\x71\x2a\xad\xa2\xd5\x31\x55\x7b\xdd\x33\xdb\x8a\x29\xbd\x50\xc3\xad\x52\x52\xc7\x7d\xc7\xec\x38\xc3\x3a\x9c\xa8\xc3\x2d\xf7\x45\x6b\x86\xe6\xb9\x20\x57\x1a\xba\xf9\x11\x89\x86\x0a\x9d\xba\xf9\xc4\xa0\x5c\xc8\xad\x4b\xd5\xce\xa2\xb7\xee\xb7\x84\xa9\x6f\xac\x5a\x20\xb2\x4a\xff\x7e\xda\xf8\x44\x7b\xc5\xd1\x86\xfe\x91\x5f\x8f\xff\xbf\x0a\x83\xf9\x89\x36\xbe\xdf\xfb\xb5\x38\xef\x3f\x39\xf6\xbc\x5d\x1b\xbf\xf4\xa5\x0c\x8b\x3d\x9f\x5c\xbc\x4f\xbd\x1f\xfe\x97\x68\xda\xd7\x65\x78\xde\xd7\xb4\x7f\x91\xa6\x7f\x73\x27\x68\x73\x1b\x2d\x8f\xc5\x2b\x0a\x47\xde\xfe\x85\xbc\xf8\xcc\xfb\x9f\xac\x4e\xaa\x93\xe9\x83\x84\xd6\x37\x2d\xb5\x30\x54\x94\x11\x5c\x10\x3b\xe8\x73\x11\x85\x95\xf6\x30\xd3\x6d\x35\x1b\xfd\x9c\x97\x5f\xec\x36\xc3\x95\xbb\x9e\x80\x31\x6b\x8d\x97\x51\x27\xe1\xa4\x87\x93\xf1\x8a\xd5\x95\xf5\x4c\xa1\xb9\x2c\xb2\xd4\xca\x68\xdd\x8f\x34\x59\xc1\x6b\xe7\xfe\xc5\x33\x64\x14\x45\x0c\x4a\xc8\x38\x64\x91\x87\x28\x05\x80\xe0\xc8\xf5\x25\x13\xae\xcf\x7c\x88\x05\x40\x6e\x44\x21\x67\x2e\x04\x91\x8f\x24\x04\x22\xf2\x23\x9f\x10\x1a\x46\x14\xee\x17\xe6\x44\xde\xd1\xea\x00\xf9\x33\xab\xc3\x11\xbc\x5e\xf6\xe7\xe5\xe9\xf9\xdd\x74\xb7\x5a\x9d\x77\x82\x63\xe2\xf9\x03\x9b\x5c\x57\x77\x9c\x62\xde\xec\x36\xf0\x8c\x26\xfa\xaa\x39\x14\x4b\xdf\x1d\xd7\x79\xca\x9f\x69\xa3\x72\x33\xe3\xe1\x39\xac\x6c\x2b\xdb\x9a\xa7\xf4\xd3\x03\x57\x5d\x69\x89\x4a\x6e\x5d\x68\x8d\x8b\xb5\xc8\xeb\x97\x6b\xd9\xd9\x78\xb9\xad\xe7\xb6\xb0\x35\xa9\x85\xbe\x9f\xda\x4e\xd3\x77\xdc\xa9\x29\x9c\x7c\x35\x8e\x05\x80\x6f\xcf\x97\x93\xcf\x4f\x2c\xd0\xfd\xde\x1f\x2f\x51\xe2\x38\xf7\xef\xa8\xc5\x62\xa5\xa6\xdf\xfb\xfd\xaf\xf9\xf1\xfe\xd9\xc2\x57\xfa\x43\x3f\x39\x3f\x88\xd3\xdf\xc7\x97\x15\xde\xeb\xd1\x97\xf2\xfb\x31\x19\xe5\xf3\xef\x7f\x4e\xbf\xfb\x22\xc5\x70\x5f\xab\x11\x12\x06\x24\x14\x80\x53\x18\x72\x0e\x10\x61\xcc\x15\x82\x50\x41\x79\x10\x11\x1c\x92\x00\x52\x12\xfa\x82\x78\xcc\x73\x03\x86\xfc\x90\x02\x1e\x85\x10\x45\x30\x70\xa3\x43\x8d\x90\xbd\xd5\x40\x77\xf3\x55\xc0\xcf\xad\x06\x7e\x67\x17\x69\xff\x94\x3c\x5c\x5e\x43\x7a\xab\xd5\x78\xa7\x58\xdc\x9b\xbe\xca\x93\xd5\xa8\xa1\xe2\xee\x64\x01\x7e\x62\x35\x7a\x9b\xf6\xc8\xae\xa4\x54\x17\x8c\x60\x18\xb5\xc6\xfe\x66\xa2\xf5\x9a\xa1\x61\x97\xe9\x5c\x73\xec\x7a\x20\xa3\xf1\x7a\xb9\x18\xa3\xbe\xb5\xa6\x61\x66\xdd\xae\x8f\x76\xac\x34\xcb\x99\x6b\xb5\xb2\xa0\x79\xb1\x1d\x0c\x1b\x5d\xc5\x6e\xd6\x0d\xdc\xda\x46\x5d\xbc\xb5\x73\xd3\x94\xf2\x4b\x5a\x8d\xd4\xdb\x33\xe5\xdb\xac\x86\x1a\xe7\xfd\x5f\x63\x35\x0c\xe5\x73\x5a\xfb\xde\x56\xc3\x50\x3e\xb7\xe3\xa3\x28\x5f\xec\xb7\x7d\x81\xaf\x11\x57\x6b\x2b\x77\xe2\xf7\x63\xb9\xab\xd8\x56\xe3\xab\x14\xc3\x7d\xad\x06\x11\x04\x52\x3f\x00\x51\xe0\x4a\xcf\x97\x6e\xe8\xc1\x08\x78\x04\x4a\x2f\xf2\x90\x94\x3c\xe2\x2e\x72\x5d\xe8\xc1\x30\x42\x21\x85\x2e\x60\x00\xf9\x0c\x42\x40\x22\x1f\x61\xdf\x7f\xb2\x1a\xf8\x6e\x56\xe3\x67\x49\xdb\x84\x23\x82\xd9\x3b\x56\xe3\xf1\xe9\xf9\x8d\xd3\xb7\x5a\x8d\x77\x8a\x23\x9f\x58\x8d\xea\xf9\xf3\x34\x1a\x00\x7f\x9b\x1a\x79\xb8\xdc\xf6\x71\xaa\xe3\xa0\xfa\xce\x38\x95\x98\x13\x2b\x52\x1c\x3a\xed\xee\xc8\xe9\x34\x13\xac\xae\xcc\x07\xd0\x5c\x0e\xa3\x4d\x3f\x45\xf9\x16\xe5\xf5\x99\xdb\x48\xd7\xfa\x95\x44\x2e\xbd\x69\x34\xc4\x80\x4e\x22\xbb\xde\xcd\x6f\x36\xdc\x14\x34\x37\x2b\xd5\xc1\xb4\xa8\x3a\xa4\x12\xb9\xf3\xf6\x74\x93\x01\x19\x15\x6b\x33\xc7\x0f\xd5\x7b\x9e\x45\xdc\x5b\x8b\x7f\xf6\x2c\xe2\x7e\xef\x4f\xc7\x79\xff\xd7\x58\x91\x82\x12\x63\xdf\xfe\x2f\xdc\xc1\x51\x94\x7b\x58\x91\xe3\x3e\xb0\x9c\x04\x7a\xa1\xdd\x42\x12\x18\x7a\x71\xed\x34\x8a\x03\x7f\x7b\x36\x0e\x5f\x60\x55\xe2\x6a\x75\xe5\x0b\xf8\xff\x99\xf7\x3f\x59\x95\xaf\x52\x14\x77\xf6\x45\x3c\x41\x05\xf3\x25\xe4\xcc\x27\x7e\x84\x42\xc1\x88\x87\x28\x82\x92\x08\x57\x72\x14\x78\x14\x71\x12\x84\x88\x30\x8e\x19\x93\x84\xe1\x10\xf8\x5c\x20\xe2\x87\x21\xa0\x7b\x05\x4e\xc4\xed\xb1\xbe\x3f\x3d\x2d\x11\x88\x8a\xab\x45\x5f\x5f\x9e\x3e\x5b\x27\x82\xc1\x1d\x62\x7d\x1b\x17\x03\xfb\x6a\x15\x19\x43\xbf\x5d\x89\xf5\x55\x95\x42\x69\xc4\xb2\xa4\xb7\x9c\xb7\x27\x51\x75\xac\xaf\x47\x0b\xbd\xdc\x77\x8c\xf4\xc6\x48\xd6\x47\x5b\x3d\x39\x35\xba\x09\x2f\x28\x34\x76\x5d\x68\xb3\xc6\x60\x3d\x2a\x25\xb6\x86\xdd\x1e\xa2\xc4\xb0\xe4\x36\x7c\xb6\xab\x2f\x37\xdd\x7c\x4f\x1f\x0c\x76\xa9\xb0\x9c\xdc\xe4\x56\xe5\xe2\xbd\x62\x7d\x4f\xe7\xc0\x27\x63\x67\x23\x11\x8d\xa3\x93\xc8\xbe\xfa\xf1\xc7\xaf\x8b\x9d\xbd\x23\xbd\xdf\x12\x9b\x7c\x62\xb3\x6e\x88\xff\x4c\x6e\x12\x1d\xf9\xdd\xf1\x9f\x7f\xa3\x78\x55\x45\xf9\xeb\xf6\xc7\xde\x3c\xe1\x59\xc5\xe7\xdd\x7e\xac\x93\x47\xa1\x4a\xd5\xbe\x98\x77\xb7\x8e\xf5\xa5\x6c\x7e\x25\xbd\x4f\xf6\xb7\x9d\x4e\x4f\x12\x9d\xa5\xc1\x67\x46\xae\x09\xa6\x29\x61\xf2\x0a\x30\xb3\xaa\x22\xaa\x16\x6c\x89\x4e\x92\x75\x8d\xd5\x10\xdb\x99\x39\x59\xd4\x12\x83\x89\x6f\xee\x56\xf3\xae\x46\xcc\xc5\x6a\xd2\x1b\x0e\x46\xa5\x39\xf4\xe6\x4e\x97\x6c\x68\xcb\x5f\x37\xfb\x96\xa8\xce\xc0\xfd\xe3\x16\x42\x57\xe2\x30\x0a\x84\x8f\x22\x12\x12\xea\x03\x0c\x85\xcf\x64\xe8\x4b\x2a\x18\xc2\x2e\x08\x09\xe6\x24\x22\x20\xa0\x4c\x86\xa1\x0f\x11\x09\x08\xf6\x85\x4b\x49\xc0\xd0\xa3\xfd\xbd\x43\x29\xcc\x9f\xfa\x72\x92\x12\x78\x3d\xd7\x66\xff\xf4\x70\x6e\x04\xa0\x60\x98\x30\x0e\xc0\x1d\x4a\x61\xd6\x2f\x06\xf6\xc4\xfe\x9e\x98\xd1\xfa\xb3\xae\x4d\x29\xa9\x9a\xbe\x6c\x3a\x89\x82\xdd\x6e\x24\xd7\xe9\x15\x4d\x14\xd3\xf5\x8e\x8c\xd6\x64\x18\xce\x77\x9b\xb0\x59\x92\x42\xd7\xca\x51\xbb\x59\x0f\xd5\x16\x0f\x4b\x9e\x6a\xa0\xf1\xac\x9a\xad\x15\x95\x64\x35\xc7\xf4\xba\x4c\x2e\xed\x7a\x75\x63\x94\xcb\x0d\x63\x3e\x86\xe5\xf1\x46\x4f\x7d\xcc\x37\x33\x8f\xbf\x7c\x5c\x35\x7f\xca\x4e\x18\x33\xad\xa7\x3a\xeb\xc7\xc4\x92\x74\x0f\x07\x6d\xad\x3c\x81\x45\x5a\xc3\x51\xaa\x5a\x52\x37\xc9\xd1\x8c\x36\x17\x5d\x75\x58\x34\xd1\xb8\x60\xb6\x0a\x9b\x44\xc1\xc9\x61\x73\xfd\xf8\xd5\xc8\x0f\xf3\xa7\xb2\x7f\xa4\xed\x68\xc3\xf3\x71\x8b\xc5\x74\x97\x5e\x2b\xb0\x56\x27\x35\x29\x4f\x26\xef\xd7\xd9\xc4\x3b\xd2\xfb\x2d\x6b\x8e\x93\xa8\x8b\xd8\x7a\x3d\x47\x17\x03\x90\x90\xc7\x9a\x94\xdf\xa3\xd7\xff\x46\x76\xe8\xc0\xdf\x93\x2a\xd1\x9f\x99\x2b\xf9\x6c\x77\xa5\x72\x2d\x99\x38\x0c\x6a\xbe\xac\x9c\x8f\x69\xf9\xb3\x78\x71\xf8\xfb\x39\x7a\x67\xae\x1e\xf6\x7d\x7f\xf4\x3d\xf4\xbe\xb9\x46\x39\xe1\x75\x1c\x59\x4e\x1e\x97\x4a\xa9\x4b\xbd\xf9\x6b\xc9\xf2\xeb\xb9\xf7\x95\xf4\x3e\x47\xb9\x7c\x91\xc1\xba\xf3\x15\x31\x08\xf0\x20\xa4\x1e\xa7\x88\xe0\x10\x78\x80\x21\x1f\x0b\x8c\x23\x2a\x38\xc0\xcc\xe3\xa1\x8c\x18\x21\x14\x7b\x21\x91\x51\x44\x41\x84\x23\x1c\x05\x3e\xa7\x90\x85\x3e\xf3\xf6\x6b\x94\xe7\x72\xa1\xe9\x0a\xc5\x0d\x45\x33\x72\xa5\x1a\x6e\xe5\x8a\x98\xa8\x55\x5d\x69\xe8\x5c\xaf\xb3\x1c\xc3\x6a\xca\xac\xd4\xec\x46\x2d\x9d\xca\x54\x0b\x0e\xaf\x54\xca\x39\x5c\xb5\x72\xa5\xea\xc3\xef\x3f\xa0\xc4\x88\x63\x8a\x04\xfe\xe9\x6a\x85\x02\xc6\xe4\xd5\xf3\xca\x97\xa7\x50\x72\xca\x04\x91\x92\xc3\x97\x9d\xe7\x92\x15\x20\xa5\xa2\x8e\x6a\x78\x60\x07\x22\x63\x90\xbc\x9e\x08\x57\x83\x10\x19\x39\x07\x8c\xdc\xf6\x7a\xd0\xdf\xe9\x64\xd3\x9e\x3d\xce\xca\xc3\x6a\x45\x3d\x1c\xca\xbf\x13\xe5\x52\x3b\xb6\x3f\x89\x7f\xa5\x44\xe7\x47\x3e\xa9\x97\x59\xaf\x29\xa5\x7e\xb6\x64\xd9\xa5\x54\xba\x5a\xe6\xa3\x06\x6a\x8d\x12\xde\x60\x8e\xac\x74\x2d\xf4\xfa\x28\xd9\xc7\xc9\xf6\x54\xcf\x95\x45\xe8\x94\x32\xf3\x34\x1b\xa5\x42\x6d\xb5\x2d\xa4\x05\x15\xab\x31\x4d\xb9\x5e\xaf\xbc\x4e\xc9\xee\xa8\xa7\xa4\x8c\x82\x08\xe7\xe9\x0e\xf7\xb4\xbe\xfa\x81\x55\xd1\x69\x26\xc0\xf5\x95\xc9\xc9\x0e\x2f\x91\xd8\x49\x69\xb9\xc0\x0e\xeb\x45\xbc\xae\xf5\xda\xf3\x88\xb2\x44\x9d\x3b\xed\x49\xaf\x1a\x4c\xd3\x6a\xc3\x2e\x44\xbd\xac\xba\xd8\xfa\xea\x66\x3c\x5b\x2f\x37\x56\x72\x74\x1c\x83\x2b\x9f\x9f\xac\x34\xee\xf7\xfe\x5b\x2f\x72\xf8\x1a\xcb\x76\xfa\x79\x5f\x7b\xfe\x4d\x2c\xeb\x9b\x96\x6a\x71\x0b\xed\xcf\x82\x9a\x7f\x6f\xf7\xe0\xdb\x78\xf7\x99\xf7\x3f\x5f\xd9\x33\x49\xf5\x7a\x60\xd7\x2e\x83\x7e\x89\xc2\xc5\xb0\x95\x1c\x52\xaf\xd8\x5b\x43\x7d\x48\x8b\x2b\x5a\x4c\x92\x5c\x67\xdb\xb7\xf4\x61\xba\xe3\x29\x89\xc9\xba\xd9\xc9\x8b\x9e\x92\x2c\x45\x79\x56\x2a\x37\x86\x04\x14\x94\xc8\x99\x90\xb5\x52\x4d\x5b\xfa\x4c\x21\x21\xda\x98\xf7\x2f\xc3\x1a\x20\x12\x11\xc1\xbc\x80\x23\x14\x05\x50\x70\xea\x09\x82\x05\x96\xa1\x10\xc0\x0b\xc2\xd0\xc5\x88\x84\x88\x06\x1e\x07\xcc\x45\xcc\x8d\x68\x48\x02\xd7\x15\x3e\x05\x1c\x86\xcf\x96\x04\xdd\xe8\xed\xfe\x2c\xde\x85\x02\x26\xe8\xd5\x93\xcb\x97\xa7\x8f\xf6\x83\x02\x2c\xee\xe1\xed\x5e\xd6\x85\x79\xdb\x7e\x7c\x12\xff\x16\xfb\x71\x52\xb6\xd5\x31\x80\x5d\x28\xd7\x1b\xd6\x6a\xa4\xa2\x9c\xe5\xa5\x27\x1d\x59\xd8\xee\x68\xad\xb0\x4c\x55\x0c\x67\x81\x9c\x72\xb0\x18\x58\x8e\x4a\x6a\xba\x3b\x74\x46\xfa\xbc\xd0\xee\x4c\x8c\x42\xe4\x56\x79\xa1\x11\xe8\xcb\xac\x23\x60\x22\x93\x1a\xef\x4a\xf3\x6a\x22\xa9\x67\xc7\x9d\x72\xfa\x2b\xec\x47\x1c\x4f\xf1\x24\xbd\xc6\x7e\x9b\x15\x1f\xb7\x1f\x37\xbe\xff\x4a\x6d\xa0\x38\xf6\xe3\x8e\xab\xf5\xd3\xcf\x87\x75\xe0\xaf\xec\x2d\xfc\xcc\x7e\xc4\xa0\x3d\x79\x7a\xe1\xf2\x95\xfd\xe1\x6f\xe3\xdd\x67\xde\xff\x64\x3f\x1a\x6a\x51\xdb\x69\x99\x7a\x8f\xd3\xf5\xff\x4f\xde\x7b\x34\x27\xce\x74\xfd\xc3\x5f\xe5\xbf\x7b\x16\x3c\x55\x74\x0e\x8b\x77\x21\x94\xc8\x20\x32\xec\xa4\x56\x8b\x9c\x33\x9f\xfe\xad\x01\xdb\xe8\xf6\xd8\xc6\x80\x3c\x33\x4f\xdd\xbd\xb8\xea\xf2\x34\x70\x5a\xdd\xad\xf3\x3b\xf9\x6c\x86\x23\xb4\x67\xdd\xa8\x51\x5a\xa7\x5a\x56\xb1\xdf\xac\xbb\xc5\x70\x6a\x67\x83\x5e\x03\x19\x5b\x27\x83\x16\xd3\xfc\x81\x5b\xb9\x9c\x5f\xb7\xb2\x68\xbc\x3d\x39\xb3\x1a\x1f\x4c\xab\xe5\x68\xe0\xf9\xa5\xd3\xca\xde\xed\x0e\x87\x65\xae\x69\x24\x5e\x46\x94\x06\x04\x29\xce\x01\x41\x11\x20\x41\xc4\x21\x63\x54\xfa\x80\x6a\xae\xce\xbd\x2b\x01\xf6\x23\x80\x85\xcf\x43\x9f\xd3\x00\x82\x40\x50\x2c\x38\xe0\x4a\x46\x11\xe4\x28\xfa\x85\x1f\xcf\x97\xf1\x86\xb7\xa2\xec\x29\x14\x9c\x7f\xda\xe8\xed\xd7\xac\xb8\x2a\x34\x18\x11\x98\x40\x15\xef\xf7\xac\xeb\x37\x76\x7f\xbf\x68\xfb\x1f\xf7\x25\xee\xac\x74\xcd\x49\x4d\x57\x95\x61\x1f\xbc\x74\xbb\xb0\x9a\xcf\x71\x1a\x87\x91\x1a\x54\xd3\xc7\x9c\x68\xd4\xac\xd5\x6a\xb4\x49\xab\x41\x4f\xe0\x66\x71\xd8\x50\x94\x4c\xe8\x5e\x9d\xa0\x31\x0e\x8a\x3e\x33\x0d\x7f\x11\xd6\xdd\x45\xb3\xdd\x19\xd3\xf1\xba\xb1\xb4\x3a\xc5\x19\x31\xbe\xd3\x45\xfc\x3f\xee\xed\xa7\x70\x10\xfb\xfc\x1d\xec\xb8\x4c\x8a\x7e\x29\x57\x89\xe2\x69\xfa\xf5\x0f\x29\xdf\x82\x83\xe4\xe8\x3f\x04\x47\x31\x67\xe2\x83\x2c\x0d\xf7\x37\xf3\x7d\x9f\x3e\xcf\xd2\xfe\x22\x4b\x35\x8c\xbf\xa7\x12\x7c\x08\x2f\xeb\xc7\xf6\xe2\xf5\x2c\xcc\xab\x68\x90\x79\xa8\x8b\x7b\x82\x77\xe1\x81\x2e\xee\x83\xfe\xa2\xd9\x19\x61\x23\x53\x6a\x56\xe1\x70\xc9\x0a\xe5\xb4\xc4\x1d\x9c\x6b\xa4\xc5\xb0\x6a\x03\x8f\x31\xc2\x23\x9c\x6b\x47\x65\x68\x92\x31\xae\xe8\xbc\x56\xd9\x92\xf4\xf2\xee\x11\x6d\xfb\x7d\x77\xd9\x69\x0e\x96\x2d\x67\x42\x2b\xbd\xa2\xcd\xfa\xf0\x48\x53\x46\xf2\x5d\xdc\x35\xe0\x1c\x87\x80\xfb\x81\x8f\x70\x08\xa1\x66\x3e\x60\x01\x40\x90\x09\x84\x24\x09\x34\x8c\x64\x84\x00\xf2\x91\x44\x4c\x20\x0e\x10\xa2\x58\x32\x86\x94\x46\x8a\x47\xe7\x10\xcb\xd7\x2a\xd5\x66\xb6\xda\x71\x5b\x56\xa1\xea\xd0\x42\x83\x98\x65\xa3\xc1\x3b\x9c\x57\x3a\xdd\x1e\xef\xb6\x6c\x92\xcf\x16\x1c\xab\x99\x35\x5d\xb3\xd5\x36\x8b\xa4\x40\xaa\x1e\xe3\x85\x42\xef\x17\x2e\x08\xc2\x04\xa6\x12\x73\x74\x13\x68\x90\x04\xf4\xd3\x24\xe2\xb7\x59\x28\x84\xe4\x82\x32\x0a\xd1\x5b\x3a\x57\x8e\xf3\xe9\xb2\xdc\x38\xca\x56\xb5\x3f\xed\xcf\x52\xe9\xf4\x76\x37\xaa\x8a\x56\xfe\x90\x87\x4e\xd6\x23\x23\x73\xd5\xad\xa7\x0a\x24\x95\x3d\x9b\x54\x2f\x86\xae\xf3\x25\xfe\xc2\xd0\x75\x91\x84\x1f\x72\xaf\x74\x57\x23\xa2\xc5\xa5\x10\xcf\x28\xcf\xf7\x99\x55\x6d\xb1\x5e\xa9\xd9\x32\xe3\xb5\x47\xc7\xd3\x7e\xd7\x3a\xa1\x1d\x6d\xe6\x29\x73\x19\xc8\x0d\xfa\x6b\xbf\xed\x39\xf5\x28\xc5\xe7\x53\xc2\x50\xa1\x31\x57\xa9\x6c\x1e\x71\xbf\xd8\xaa\x78\x02\x87\xee\x00\x0e\x77\x8b\x54\x2b\xb7\xad\xd9\xf9\x65\xba\x9e\xd9\x7f\xcb\x8d\x17\xbb\xda\xd7\xc0\xe6\x2f\xdd\x6a\xb1\x8d\x18\xed\xb6\xfe\x1c\x56\x8b\xb0\x99\x32\x7d\x33\xe2\xe9\x14\x4b\x95\xd2\x87\xe6\xcc\x9e\x35\x9b\xa1\x35\x2c\x15\x36\x83\x22\x79\x7d\xce\x58\x4f\x1a\x73\xfd\xf1\x1b\x75\x1e\x37\xd0\x26\x39\xfa\x0f\x19\xaf\x62\x6e\xaf\x55\x6c\xea\x4e\x0e\x33\x2b\x61\x23\x56\xbc\xfc\x31\x0e\xf7\x17\x39\xec\x7b\x0e\x6f\x56\x52\x74\x39\x6a\x8a\x5d\xab\x41\xf2\xf3\x7c\x7a\x97\xe6\x2b\x72\x4a\x3b\x75\x55\x0f\x6b\x35\x50\xc9\xed\x97\xc8\xde\x0d\xb7\xd5\x01\x3f\xf3\xd5\xeb\x5d\x2b\xec\x8d\x07\x8c\x77\xb1\xf5\xde\xf1\x5e\x7f\x80\x52\xab\xe7\x9e\xe1\xb2\xc0\xc2\xe3\xbd\x8c\x12\xdc\xc3\x7b\xe8\xbf\x16\x21\xfa\x21\xc6\x93\x70\xd2\x31\xa1\x01\x83\x1c\x86\x9a\x13\xee\x07\x18\xe1\x00\xab\xd0\xa7\x08\x49\x20\x31\x0e\x23\x2c\x75\x18\x72\xc0\x60\xc4\x34\x62\x80\x33\xc4\x18\xd6\x91\xa2\x30\x50\x2a\x00\xbf\x50\xea\xf9\xf2\xac\x50\xdc\xc2\x26\x02\x31\xff\x34\x64\xf3\x6d\xf6\x15\xed\xb8\x40\x09\x84\x6c\xbe\x97\xd8\x7f\xe3\x69\x0f\xf0\xc8\xcf\x7a\x19\x39\x41\x1d\x29\xba\x5a\x75\xcb\xee\x7a\x52\x6e\xad\x27\x8d\x6c\x56\x8c\xc4\x01\xf4\xfb\x7a\x0f\x75\x36\x5b\x69\x36\xca\xfe\xa8\x54\x3d\x0a\xb7\x3b\xa3\xa8\x56\xdf\xd7\x66\x6a\xbf\xdf\xed\xa0\x1e\xd9\x91\x91\xea\xa7\xa7\x8d\x4e\x69\xd1\x59\x65\x2a\x82\x70\x93\xb0\xc2\x3c\x31\x2d\xa8\xf0\x58\xf8\xc4\x9b\x16\x92\xba\x7e\xbd\xf6\x21\xe5\x1b\xb8\x90\x20\xfd\x87\xb4\xb0\x98\x16\xf4\x04\x2e\xfd\xc2\x05\xeb\x59\x5c\xfa\x8b\xb8\x68\x18\x7f\x8f\xa7\x7f\xa8\x05\x4d\x1f\xdf\x8b\x5f\x67\xe1\x5c\x05\xc6\xcc\x43\x0e\xae\x04\xef\xc2\x3d\xf4\x5f\xf0\xa5\xd6\x06\x9b\x59\xc5\x67\xc8\xaa\xad\xf2\x8d\x56\x90\x6f\x1e\x0e\xe9\x5c\xba\x14\xe6\x72\xd9\x75\xb9\x33\x38\x9c\x26\xf6\xa9\x3d\x3f\x59\x62\x97\x99\xe6\x53\xe1\xb8\xb9\xb6\x7b\xb8\x54\x2a\x72\xd9\x99\x1f\xb2\xfb\xea\xbc\x4a\xc7\xf0\x54\x68\xb1\x7d\x63\x57\xa9\xec\xb7\x95\xe1\x32\x79\x2d\x28\x64\x5c\x45\x01\xe2\xd0\x17\x42\x6a\x2e\x28\x89\xa2\x90\xc1\x00\x08\xed\xab\x50\x73\x16\x52\xa6\x39\x15\x84\xb1\x48\x61\xee\x0b\xad\x15\x0a\xb1\x4f\x80\x8e\x44\x00\xc9\x2f\x7c\x79\x2d\x3f\x6a\x91\x7a\xa9\x62\xe3\x56\xb5\xee\xf0\x5e\x0d\x9b\x0d\xdb\xc3\x55\xda\x2c\x37\x32\x25\x3b\x6f\x19\xc8\x2d\x76\x1a\xd9\x1a\x2f\x95\x8c\x9a\x51\x2e\x14\xf2\x56\x8f\xd7\xaa\xae\xf3\x0b\x17\x18\x46\x40\x70\xca\x11\xbd\x95\x1c\x40\x29\x92\xec\x73\x2d\xe8\x75\x16\x0a\x80\x85\x60\x18\x50\xf4\xe6\xae\xa9\x8e\xa7\x95\xfc\x76\x45\x9d\xf4\xb4\x72\x68\xe4\xad\x20\x05\x82\x52\xd1\xcc\xbb\x56\x6d\x82\xa7\xba\x8a\x4a\x19\xdb\x6a\x15\xea\xd5\xea\xf9\xe4\x2f\x5a\x50\xd7\xf8\xba\x94\xd2\xe5\x92\x2e\x11\x5b\x37\xd5\x24\x92\xd4\xc5\x53\xe3\xb4\x9c\x0f\x94\x71\xd8\x2c\x74\xaa\x39\xf4\xe7\xbb\xc2\xde\x5a\x98\x3b\x04\x36\x2c\x70\xce\x48\x93\xa9\xb5\x5a\xbf\x73\xba\xe6\x22\x27\xd2\xfd\xf3\xcf\xdb\xd5\x82\xf2\xa6\x46\xc3\x5d\xe8\xdd\x6a\x33\x31\x54\x6f\x56\xce\xa5\xdd\x51\x54\x2c\x7a\xf6\x42\xfa\x15\xcb\x75\x6b\x4e\x2b\x85\x4c\xd2\xac\x88\xb0\x99\x6a\x1a\xa5\xc6\xd8\xac\x77\xbc\xa2\x95\x3a\x54\xfd\x9a\x77\x38\xed\x25\xae\x2b\x7b\x3e\x66\xee\xa9\x6b\x6e\x36\x3c\xf3\xbd\xa2\x17\xd9\xf8\x55\xfe\x14\x71\x62\x6e\x90\x54\xbd\x4b\xa6\x6c\x37\xee\xa4\xba\xdc\x29\x8d\xcb\x3b\x5e\x5b\xef\xd7\xb9\x71\xb7\xec\x76\x74\x8f\xa4\xbb\x7b\xdb\x2b\xbf\x3d\x5b\xac\xec\xd1\x65\x6f\xdf\x27\x3a\xbd\xfc\x7d\x03\x71\x92\xa3\xff\x9e\xd0\xb7\xe8\xc7\xb8\xdc\xe2\xba\x3f\xe6\x2a\x58\x1e\x6a\xc7\x56\x1a\x8f\x03\xdd\xb5\x2a\xcb\xa1\xb3\xb7\x5c\x34\xe1\xb4\x9e\x5f\x0e\xc5\xca\xa8\x8c\x0b\xe9\xa0\x8e\x17\xeb\xce\x99\xc3\x3a\x4e\xab\xf5\xc2\xe5\xef\xbc\x9b\x2f\x4b\x49\xbf\x8d\x0f\x35\x89\xf3\xdf\xcb\x7f\x67\x7d\xf1\xf1\x21\x62\x2c\x62\xc7\x40\xf3\xa3\x61\xb0\x4c\xd5\x22\x3a\x5a\x37\x8f\x1e\x4a\x6f\xdc\x93\xb9\x1f\xd6\xfa\xa0\x17\xd2\xd9\x48\x8e\x8c\x9a\x01\xeb\xe3\xd1\xe9\x22\x3d\x5c\x93\xb8\xf2\xd7\xdf\x8b\x8d\x97\xbf\xbf\x46\x8c\xe5\x73\xf4\x2f\xc1\x0c\xf7\xd3\x7f\x41\x8c\x9f\x7a\xe9\x13\x2e\xbe\x17\x22\x8d\x41\x14\x52\x4e\x24\x0a\x22\xa4\x51\x00\x31\xd2\x30\x50\x12\xfa\x94\x08\x1f\x33\x09\x85\x0c\x74\x10\x71\xae\x03\x14\xe0\x50\x20\x09\x11\xf4\x65\x88\x14\x3d\x6b\x24\x30\x49\xc4\xb8\xe9\xe0\x67\x14\x9e\xeb\x64\x7f\x3d\x0b\x39\x03\x12\x33\x86\x11\x46\xcf\x23\xc6\x17\x05\x2d\x1e\x43\x8c\x56\xab\x6c\xc7\x6e\xea\x65\xc4\x10\x83\x55\x50\xa4\x61\xdf\xcb\xec\xe7\x15\x10\x95\xca\x65\xbb\x59\xc8\x02\x2f\x6c\x75\x5a\x23\xbb\x1d\x94\x69\xab\x48\xa1\x35\xad\x35\x75\xd8\x2d\x95\x3a\x9b\xa0\x43\xf3\x85\x56\xfd\x34\xf1\xd1\x38\xe7\xee\xda\xfd\xc9\x1a\x86\x9d\x6d\x27\xdb\xeb\xa6\x73\x95\x89\x5f\x00\x70\xf5\xcd\x70\xf8\xef\x21\x46\xec\xab\x8f\x70\x6c\x78\xdd\xdb\xf7\x7a\xde\xf7\x10\x23\x39\xfa\xef\xcf\xf6\x5e\xc4\x98\x5f\xf7\xe7\x6e\x8e\xdc\x6a\x82\x97\x07\x49\x9c\x23\xbf\x3d\xc3\x33\x88\x96\xf0\xfa\xe2\x7b\xfa\x21\x62\xcc\x62\xc7\xf0\x00\xc7\xbe\xd8\x90\x2e\x1c\xfb\xfd\xf9\x7d\x0b\x31\x9e\x44\xac\x4b\xda\xf3\xfd\xf4\x5f\x10\xe3\xa7\x5e\xfa\x64\x11\xc3\x17\x18\x45\x11\xe3\x40\x50\x15\x48\x1d\xb0\x08\x2a\x12\x70\x02\x22\xa6\x01\xc6\x81\x2f\x03\x0a\x84\x1f\x71\x44\xa3\x20\x42\x00\x13\x26\x39\x92\x3e\x23\x3e\x94\x32\x8a\x5e\x10\x03\x25\x86\x18\x37\x43\x8a\x19\x25\x9f\x77\x06\x3d\xcf\xca\x18\x62\x70\x9c\x84\x8e\xf1\x45\x31\x8b\x67\x75\x8c\x8f\x11\x03\x7a\x5a\xcc\xfa\x7b\xa3\x7c\x22\x51\x71\xd9\x35\x07\xfb\xda\xa8\x46\x0f\xb3\x3e\x53\xb9\x65\x93\xb6\x33\x9b\xd1\xb8\xd7\x77\xc8\xb6\xdf\x1a\x0c\x45\x9e\x8d\x1b\x0b\xc3\xdc\xcd\x76\x9d\xf5\x1c\xe7\x5a\xc7\x29\xab\x45\x21\xcb\x87\x5e\x5b\x78\xaa\x38\x76\x1a\x1d\x58\x72\xbf\xd9\x02\xe1\x4f\x23\xc6\x43\x1c\x3b\x41\xfa\x0f\xe9\x38\x09\x21\xc6\x0f\xca\xf0\x46\x12\x88\xf1\x83\x3a\xd0\x4f\xe8\x18\x8f\x72\x6c\x23\x21\xc4\x88\xeb\x38\x0f\x20\xc6\x4f\xbd\xf4\x09\xeb\x18\x91\x4f\x18\x0f\x28\x8e\x28\x16\x2c\xe4\xdc\x0f\x31\x55\x88\x02\xa4\x43\xa4\x42\x1c\x86\x22\x20\x0c\xf8\x34\xd0\x08\x05\x2c\x40\x02\x8a\x40\xc0\x40\x85\x58\x87\x28\xf4\x7f\x21\xc6\x6b\x81\x6f\xb3\xd5\xa6\x45\xb3\xd6\x73\xaa\xbc\x6a\x97\x3b\x0d\xc3\xad\xb4\x72\x5e\xc7\xb0\xac\x72\xb3\xd3\xe9\xe5\xcd\x72\xc1\x21\x2d\x2f\x83\x72\x05\xde\x46\xc5\x6a\xbe\xed\x70\x7c\x6e\x9d\x76\x45\x0c\x72\xd3\x37\x2f\x00\x11\x9f\x5b\xa5\x5e\x67\x21\x87\x5c\x0a\x26\xa9\xc4\x6f\x51\x60\x85\xcd\x76\xe5\x64\xf3\x9d\xb4\xae\x6d\x68\xbf\xc4\x96\xb9\x8e\x51\x0a\xfc\xce\x6e\x92\x9b\x2e\x8b\xc7\xb5\xc1\xbc\x95\xdc\x14\xf5\xf4\x10\x19\xff\x89\x18\x5f\xf8\xe6\x2f\xf3\x57\x9d\xe1\x8e\x9b\x96\xbd\xbe\x50\xd7\xdf\x73\xde\xe8\x7b\x85\xb2\x28\x46\x1b\xc3\x8f\xf4\xdc\x70\xa7\xbb\xc5\xda\x9e\x8f\x97\x53\xc2\x47\xc6\x76\xb9\x27\x7d\xd8\x6c\x8f\xa6\x45\x30\x62\x5e\x75\x0a\x47\x87\xb9\x3f\x39\x59\x7b\xab\x8e\xdb\xa9\x32\xca\x56\xb2\xe1\xa8\x9e\x9f\xd8\x9e\xdf\x38\xb8\x99\xc8\x2b\x64\x07\x2b\x36\x2b\xaf\xe3\x28\x60\x7c\x8a\x1a\xf1\x26\x40\x9f\xa3\x46\xec\x43\x77\x22\xe6\xd9\x47\x5e\xb9\xee\xef\x27\xe3\x06\x6a\x24\x47\xff\xd9\x72\xac\xd7\xf4\xb6\xbb\xb9\xf2\x2d\x4e\xf7\x32\xbe\xe6\x74\x49\xd3\xbf\xb7\x1c\xeb\x3f\xaa\x67\xdd\xf2\x95\x3c\xb2\x57\x97\xb5\xe6\x7f\x2b\x9f\xf5\x9d\xbd\x4a\xe2\xac\xe2\x7a\xd4\x3d\xf4\x5f\x50\x69\x8e\xf7\x9b\x14\xc8\x2c\x24\xeb\x1b\xbe\x9f\x9a\xee\xeb\xc3\xfa\x32\x18\x6d\x4b\x26\x5b\x59\x43\xab\x55\xf3\x2b\xdd\x5d\xbd\xba\x30\x52\x41\xbb\xea\x0e\x96\x9d\x52\xd5\x28\x17\xc3\x80\xe0\xae\x22\x21\x2a\xe7\xa6\xcd\x5a\x66\x51\xb5\xbb\x8e\x34\xe6\xda\xdd\x2d\x15\xda\x27\xef\x2b\x01\x81\x0a\x18\xf7\x7d\xcc\x28\x21\x2c\x0a\x11\x21\x91\x56\x51\x84\x7d\xa0\xb0\xe4\x91\x80\x11\xf6\x65\x00\x31\x41\xe2\xdc\xc3\x81\x6a\xce\x59\x48\x08\x57\x20\x80\xfa\x15\x95\x50\x52\xa8\x74\xdb\xf2\x25\xc0\x17\x86\xaf\x97\xc9\x0b\x26\x71\x00\xc0\xd5\xee\xf5\x38\x26\x7d\x61\xf7\xba\xf0\xb4\xab\x56\x92\x20\x26\x95\xaa\xad\x7e\x37\x4a\xed\x81\x0c\xf5\xc2\xaa\x1f\x4a\x2a\x3f\x1b\xb5\x7a\x59\x03\x39\x93\x71\x66\x3a\xa8\x4e\x87\xa0\xcc\x98\xb3\x1d\x18\xa7\x86\x1e\xb7\x95\x59\x29\x85\x66\x0e\xa8\xa6\x4a\x1d\xf2\x83\xe3\xbe\xd8\xa9\x97\x86\x05\x83\xed\xab\xac\x20\x49\xb9\x36\x71\x26\x9e\xfd\x2f\x62\xd2\x43\xc5\x5e\x13\xa4\xff\x6c\xb1\xd7\x04\x31\xe1\xa1\x62\xaf\x49\xd3\xbf\xb7\xd8\xeb\x3f\xaa\xc9\x7d\x88\x49\x7f\x09\x13\x8c\x1f\x38\xab\x7b\xe8\xbf\x60\xd2\x5e\xb6\x33\x83\x34\xb7\x1a\x10\xb2\x71\xb9\x68\x1f\xf6\x53\x5d\x6e\x8d\x95\xd5\x6d\xb7\x3d\xd7\xb7\x53\x7e\xd1\xc3\xab\x79\x9b\xd9\x56\xb9\xcc\x6a\xbd\x7d\x8e\x1c\x40\x6e\x54\x3b\xd4\x4e\xdc\x9e\x66\x8b\xc6\x26\xcc\x1d\x73\x73\x6b\x65\xa4\xe7\x2b\xdc\x87\x6e\xb3\xdd\x32\x12\xb7\xad\x41\xc8\x01\x8d\x24\x51\x21\xa3\x21\x0f\x18\xc7\x3e\x44\xc4\x17\x54\x69\xcc\x84\x12\x20\x12\x84\x8a\x28\xf2\x75\x18\x45\x1c\x4a\x0c\x48\x28\x22\x41\x24\xc5\x34\x20\x1a\xfc\xcf\xff\xfe\x3f\x2c\x13\xf4\xc6\xdc\xd6\x94\x24\xfa\x3c\x59\xe6\x32\x07\x19\xa7\x00\x73\x24\x18\x01\xcf\xdb\xd5\x6e\xa5\xea\xbf\xbf\x09\x31\x8e\xf5\xad\x91\x79\xd3\xcf\x6d\xf3\x38\xb2\x7c\xa8\x53\xa3\x4d\x67\x3b\x6e\xac\xb2\x93\x6d\x85\x1f\x40\xe9\xc0\xdb\x23\x32\xad\xcd\x4b\xa4\x4e\x9b\x86\x33\xad\xb5\x52\x68\xd5\x9a\x1f\x46\xa5\xfd\xd2\xc9\x0f\x36\xdd\x99\x6d\x34\x5b\x50\xa6\x4c\x7a\x54\x0b\x3a\xaa\x14\x8b\x8b\xf6\x61\x65\xea\x5a\x21\xef\x7d\xab\x89\xf7\xb7\x52\x27\xe3\xa9\xd6\x0f\xd8\xb3\x5e\xa2\x87\x1f\xd7\x8c\x12\xa4\xff\x6c\xea\xfd\x0f\x7a\x0d\x5e\xc6\xd7\x9c\xed\x2f\x79\x2d\x6e\x69\x1e\x8f\xac\xe5\x69\xcd\x23\xc1\xbd\xb8\x87\xfe\x0b\x97\x5f\xcf\xf7\x68\xd3\xd9\xcd\x79\x8b\x2f\x6c\x76\xa0\xc1\x68\x2b\x42\xab\xa4\xe1\x3c\x95\x1f\xfb\x56\xc5\xde\xb6\x8c\x4c\x6f\xdc\x8a\x36\x4b\xe8\x97\xba\xa7\x52\x61\x50\xef\x06\xed\xfc\xc1\x70\x9c\x30\x8a\xfa\x5b\x35\x2b\xe8\xee\x69\x78\x64\xad\xb5\xbf\x9f\x8d\xeb\xc3\xe6\x0f\x34\x89\xf6\x23\x1d\x08\x20\x00\x45\x48\x07\x3e\x87\xca\xc7\x48\x20\x44\x83\x40\x45\x1c\x43\x80\xa4\x0e\x55\xe4\x23\x4a\x94\x40\x5c\x11\x1a\x68\x2a\x40\xe0\x6b\x49\x15\x47\x18\xfe\xe2\xf2\xcf\x17\x6e\x85\xec\x06\x6f\x67\x00\x73\x01\x3f\xe3\xee\x6f\xb3\x2f\x68\xc1\x20\x26\x09\x44\x01\xbf\x3f\xdd\xdf\x78\xc3\x03\x12\xef\x27\x51\xc0\xd6\xec\xb4\x2b\xf7\xed\x6e\x06\x6e\x71\xc6\x6d\x64\x1d\x6b\x92\x6b\xcf\xc6\xc0\xad\x2c\x79\xa6\xb4\x88\xd4\x51\xef\xf3\xb5\x75\xbe\x20\x27\x0d\x92\x6d\x94\xec\x7d\xc9\x43\xa3\x7a\xfe\xb8\x6a\x4f\x33\xfc\x10\x66\x0e\xd9\x82\x5f\x2e\x75\x3c\xdf\xc8\x1d\x98\x59\x1f\x14\x3b\x0b\x2f\x9b\x54\x14\x70\xfe\xb1\xd4\xf4\x32\x29\xfa\xde\xb1\x9a\xa2\x9d\xeb\xd7\xab\x1f\x52\xbe\xc5\xdf\x93\xa3\xff\x09\xc7\xf8\xb6\x96\xf1\x78\xe4\xe9\xc2\xca\xa5\xf9\x44\xbd\xfb\xe5\xdf\xc7\xd7\x3c\xed\x89\xc8\x57\x0a\x84\x6c\x3d\x41\xdf\x30\xfe\x9e\x35\xe9\x43\x7c\x19\x3c\xbc\x17\xe7\xb3\x98\xc7\xde\x45\x71\xe7\x5e\x3c\x79\x16\x67\xfa\x31\x78\xba\x8b\xfe\x0b\xbe\x50\xc1\x4f\xdd\x8c\x6b\x84\xc1\x66\x5f\x1b\x1f\x0e\x1e\xad\x3b\x0b\x5a\xce\x8c\x96\xdb\x7d\xa3\xd0\xc1\xa5\xd9\xda\x6c\x06\x66\x7d\xd7\x6b\xa6\x32\xe0\x94\x29\x59\x76\x79\x0e\x86\x6a\x09\x17\x7d\x2d\x3a\x9e\xaa\x1d\xa7\xa3\x86\x03\xdc\xbe\x59\xd2\xfd\xf1\xbc\x0e\x59\x33\xf9\x2c\x13\x84\x29\x09\x23\x01\x10\x84\x90\x40\xe1\x4b\x29\x35\x86\x0a\x49\x46\x42\xc8\x02\xa1\x23\x2d\x34\x91\x28\xe0\x21\x27\xa1\xa6\x44\x62\x29\x10\x85\x12\x68\x08\x08\x8c\x5e\xf0\xe5\xc9\x52\x2d\x90\xdf\xc6\x17\x29\x3f\xf5\xb2\xbc\xcd\xbe\xe2\x0b\x25\x57\xfd\xe1\x71\x7c\x79\x9f\x11\xf1\xdb\x1b\xf9\x80\x2c\xfb\x19\xbe\x58\xac\xbd\x33\xba\x45\xb8\x1f\x4e\xd6\x21\x3f\x75\xad\xfc\x06\x0c\x9d\x4a\x7a\xb2\x58\x78\xcc\xcd\xe7\x67\x69\x43\x34\xf3\x13\x39\x6a\x8c\x4a\xe5\xca\x68\xeb\x55\x33\x1b\x83\x47\xf5\x43\x8d\x16\x1a\x20\xd3\x2b\x06\x1b\xef\x28\x96\xce\xe4\xb8\x06\x39\x16\x35\x74\x5a\xed\x2b\xff\x00\xbe\x54\xc2\x6a\xb5\xf5\x2c\x7f\x4f\x90\xfe\x43\x59\x2e\x89\xe0\xcb\x6f\xfc\xfd\x21\xfd\xe1\x2f\x66\x56\x18\xc6\xdf\xd3\x19\x12\xce\x32\x79\x8a\xbf\x1b\x09\xe1\x4b\x4c\xd6\x78\x04\x5f\xf6\xa9\xc9\x6a\xef\x9a\x52\xe5\x16\x41\x56\x0b\x64\x1c\x83\x30\x37\x68\xec\xa8\xef\x37\x2b\x83\x61\xbd\x97\xde\x57\x1d\xb3\x17\x75\x01\x39\x59\x8d\x39\x1f\x5b\xfb\xc0\xab\x60\xb0\x6e\xea\xfa\x58\x1a\xed\x3a\xe2\xcd\xd2\xae\x30\xe8\xa9\x55\xd9\xac\xcc\x00\xde\xe1\x52\xf2\x4d\xf0\x28\xc2\x42\xf8\x52\x68\xa2\x31\x53\x90\x73\x22\x7d\x49\x30\x27\x88\x03\xa5\x18\x0d\xc2\x80\x23\x0e\x55\x40\x21\xe2\x3c\x02\x92\xfa\xbe\x2f\x22\xa8\x7c\x19\x62\x84\xd4\x2f\x7c\x79\x2d\x7c\x6d\xe6\x0b\xf9\x4e\xb5\x90\x71\x72\xd9\x4a\x05\x13\x4a\xdb\x9d\xb6\x4b\x4d\x2b\xd3\xeb\x78\xe5\xae\x53\xab\xb9\x39\xb3\x5b\xaa\x36\x3d\x4c\xcd\xaa\x47\xda\xad\x3a\x2e\xf4\x8a\x67\xcf\x09\x81\x1c\x63\x89\x00\xe7\xb7\xb2\x4c\x18\xa4\x02\x7e\xae\xc9\xbc\xce\x42\x2a\x24\xc4\x5c\x12\x40\xde\x22\xc0\xf2\x85\x31\xd6\xf3\x7c\x33\xab\x78\x54\x91\x1b\x7d\xd8\x0e\xdd\x6c\x99\x94\xf7\xed\x52\x23\xe7\x0c\x46\xcc\xc3\xa0\x41\xd4\xa6\xdd\x3e\x07\x08\x5d\x2c\x55\xb6\xf1\x75\x96\xc9\x7b\x7f\xfe\x83\x05\x92\xae\x37\xf7\x8d\xbe\x57\xa2\x75\x73\xde\x86\xc0\xce\xcf\x0b\x99\xcc\x7e\x5f\xda\x82\xd6\x3e\x15\xa5\x02\x0f\xb8\x23\x4f\x3a\x15\xb6\xc7\x13\xe9\xa5\x9d\x9c\xda\x4c\x05\x47\x44\x6c\xe4\xa8\xd3\x08\x26\xa0\x16\x92\x72\xdf\x55\xf3\x29\x38\xce\xdd\x43\x5f\xe5\x87\x76\x6d\xdf\x73\x33\xa5\x8e\xa7\x12\xf3\x9d\xe4\x63\x5f\x1d\x2f\x1b\x21\xeb\x4f\xbc\xf0\xb4\x11\x1c\x41\xee\xb2\x5c\x57\xe1\x3e\xce\x74\x0f\xa5\x7e\xdb\xad\x2e\xac\xa8\x56\x1d\x1c\x61\x2f\x38\x5b\xf6\x62\x99\x1e\xf6\xdb\xef\xdd\xeb\xbb\x48\x90\xbe\xfd\x08\xfd\x58\xce\xfd\xf8\xfa\x7d\xb3\xbe\x28\xe3\xa5\xd9\xca\xe0\x0a\x3e\xed\x82\xce\x3a\x98\x0f\x06\x27\x6d\x29\x67\x64\x1f\x33\xdd\x6e\x6a\x5c\x96\x76\x2a\x5b\x6c\x4d\x36\xe7\xcf\xc7\xfc\xd9\xf7\x16\x95\x8b\x8d\x0a\x5d\x15\x3e\x5e\x73\xcc\x17\x73\xe7\xfd\x37\x5a\xad\xc6\x2b\x1c\xdf\x59\x4b\x22\xe9\xfb\xf1\xd0\xf9\x24\x4e\xff\xb7\xf1\xe1\x3f\x7f\x84\x84\x31\xa9\xf2\xde\xfb\x71\x8d\x2c\xbb\xb4\x42\x2b\x19\x0f\xf8\xd6\x26\xcf\xd1\xbf\xd8\x87\xef\xa7\xff\x82\x84\xa7\x49\xae\x30\x08\x3a\x9e\x37\xed\xcf\xed\xac\x65\x1c\x79\xad\x63\xd1\x34\xdd\x58\x9e\x5f\xb6\x40\x93\xaf\x2c\xdd\x81\x96\x68\x0e\x8e\x28\x38\xed\x82\xd1\x11\xa3\x72\x58\xde\x76\x6a\x0e\xee\x23\xa3\x77\x1c\x06\x8d\x42\x3f\x70\x33\xfb\x69\xae\x96\xcd\x8c\xbb\xf6\x21\x4c\x5e\xd3\x22\x44\x05\x4c\x09\x1d\x92\x30\xd0\x8a\x12\xe4\x47\xd0\xf7\x43\x84\x01\x80\x61\x28\x43\xc6\x85\x8a\xa0\x54\x42\x11\x06\x7d\x46\xb1\xe6\x0c\xf9\x1c\x51\xac\x43\x1d\x0a\xf4\x0b\x09\x59\x92\x48\x78\x2b\x16\x9a\x61\xca\xe9\xa7\xe5\x95\xdf\x66\x21\x25\x0c\x0a\x81\x18\xa7\xe8\x79\x24\xfc\x22\x16\xfa\x69\x24\xe4\xd3\xf5\x3c\x7e\x73\xaf\x48\x58\x49\xe9\x46\xae\xda\x69\xf6\xc6\x29\xd7\x58\xf2\x0a\xcb\x8a\x10\xa6\xdb\x0d\x1a\x6c\xe7\xc0\x5f\x8c\x8a\x03\xb7\xbf\xc9\xe8\xfa\x71\x82\x79\xb3\xc5\x8b\xe6\xa2\x17\x99\x33\x66\x75\x5b\x8a\x4f\xbc\x4a\xab\xdc\x1e\x64\x14\x2f\x58\x06\x2f\x6b\x81\x8d\x1a\x81\xe3\xae\x31\x4e\x0e\x09\x63\x1f\x7a\x84\xd3\xc0\xeb\xfe\x3e\xe4\xc5\x4f\x90\xfe\x43\x48\x1c\xe3\x74\xa3\xeb\xf7\xff\x35\x24\x3c\xff\xfd\x8f\x22\xf5\x87\xfa\xd3\xe8\x3a\xff\x08\xd7\xbe\x78\xd9\x73\xd7\xbb\x7a\x2f\x6a\x24\x88\x5a\xf7\xd0\x7f\x41\x0d\x4e\x69\xa9\x2f\xc3\xda\xb4\x41\xba\xe6\x6a\xcd\xe7\xd1\x01\x77\x64\xaf\x3a\xd9\xb2\x51\x6d\x39\x2d\x6d\x6d\xdf\x04\x0e\x2d\x16\x3a\xd1\xa6\xd6\x59\xaf\x0b\x93\x89\x28\x44\xf3\xf2\xa0\x75\xda\xb6\x0c\xda\x84\xbe\xed\x9c\xd6\xf3\x92\xb5\x86\xfc\xa4\x32\xd9\x51\xbb\xd6\x4f\xbe\x71\x1f\xd1\x88\x10\x14\x69\xa4\x45\x18\x0a\xc5\x04\xa3\x1c\x28\x19\x51\xcc\x7d\x15\xa0\x88\x02\x48\xb1\x2f\x85\xc0\x91\x08\xa1\x94\x01\x8b\x08\x07\x50\x07\x48\xc8\x00\xfb\x67\x2f\x3f\x4d\x12\x35\x6e\x45\x9e\x31\x42\x11\xfb\x1c\x35\x5e\x67\x21\x05\x18\x61\x41\x25\xa1\xec\x79\xd4\xb8\x19\x7b\xf6\x23\xa8\x51\xde\xf6\x97\xa9\x0c\x06\x8b\x30\x38\xed\x5a\x2d\xf7\x24\x6b\x15\x9c\xb2\x4a\xba\x16\xb6\x97\x2a\xdd\x59\x87\xa8\xdc\x0d\xfb\xac\x9c\xd9\x72\xbd\xde\x55\xc2\x46\xdb\xc3\xb3\xdd\x3c\x63\xe7\x8f\xda\x3d\x09\xdb\x96\xa7\x9a\x37\xac\x0f\x97\xf3\xd5\x3e\x3b\x99\xa6\x0c\x37\xda\xe7\x12\x43\x8d\x5c\x4c\xff\x7b\x80\x6b\xbf\xc4\x7e\xc5\xe2\x6f\xef\xe4\xda\x09\xd2\x7f\x2c\xf6\xec\xca\xe9\x86\xd7\xef\xff\x69\xae\xfc\xad\x4c\xfd\x7f\x14\xd5\x3e\x44\x8d\xe1\x75\xfe\x11\xae\x7d\xf1\xda\x5f\xb8\xf6\xbd\xb1\xdd\xb1\xbd\x7a\x98\x7e\x1c\xb5\xee\xa1\xff\x82\x1a\x88\x99\x2b\x92\x8d\xea\x13\xb8\xab\xc8\x56\x33\xa8\x42\x9b\x47\xa4\x74\xa0\x0e\xf4\x17\x27\xa9\x0f\xb8\x0b\x07\xc8\x9a\x03\x97\x6f\xd8\x51\x50\x58\xee\x59\x5a\xa7\xfa\xb6\x37\xcb\x93\xa0\xba\xf7\x1a\xb8\xec\x98\xf9\x42\x61\x39\xdc\x19\xba\x1d\x10\xa3\x97\x4e\xbe\x00\xbf\x0f\x68\xe8\xfb\x1a\x61\x10\xc0\x88\x13\x4c\x38\xd1\x02\xfb\x01\x0b\x89\x80\x3c\x8c\x60\x84\x23\xce\x19\x83\x3e\x0f\x05\xa4\x11\x67\x08\x28\xad\x91\x2f\x05\x90\x44\x9c\x51\x83\x24\x88\x1a\xec\x56\x6c\x18\xa3\x4c\x7e\x9e\x45\xf3\x36\x0b\x09\x03\x48\x08\x89\x20\x03\xcf\xa3\xc6\x1d\x59\x34\x77\xa0\x06\xfe\x28\x62\x39\x86\x1a\x99\x6e\x73\xd6\xc2\x39\x58\xc1\xd4\x36\x8e\xfe\x11\x4c\xd7\xa3\x3e\x2e\x9d\x7a\x91\x3f\x33\x37\x3b\x7f\x52\x70\xdd\x94\x24\x0b\xcd\x6a\x87\xbe\x9d\x1e\x9b\x63\x34\x6e\x34\x64\xb6\xd3\xef\x8c\x82\x95\xbf\x4c\xa7\xbd\x34\xd8\xe5\x81\x31\x37\x8d\x4e\x5d\x37\xa0\x68\x8e\xfa\xc9\xe9\x1a\xb9\x98\x6d\xfe\x11\xae\x3d\xbb\xee\xef\x27\xe3\x06\x6a\x24\x47\xff\xd9\x2c\x9a\xa4\x39\xdd\xef\xe3\x6b\x4e\xf7\x17\x39\xad\x61\xfc\xbb\xa8\x79\x2b\xd6\xe0\x91\xbd\xba\xac\x35\xf7\xd5\x7e\xfc\xb1\xb3\xba\x87\xfe\x0b\x2a\x39\x83\x7a\xd8\x51\xa3\x26\x8f\xc6\x9e\x95\x5f\x15\xeb\xfe\xb1\x62\xa9\x53\x79\x22\x97\xaa\x80\xe4\xb2\x39\xef\x06\x1c\x0f\x27\x7c\x91\x75\x8d\x46\xca\x2b\x34\xfc\x5a\xb9\x0a\x42\x9d\xd1\xe5\xed\x66\xb9\x4a\x4b\x2b\x5d\x17\x33\xcf\xec\xe7\x32\x61\x9e\x56\x3b\x87\x7a\x25\x79\x5d\x86\x85\x5c\x44\x90\x47\x7e\xe4\x23\x0c\x79\xc8\xfc\x88\x71\xa9\xa1\xa6\x04\x81\x08\x53\x88\x00\x8f\x22\x06\x55\x18\x45\x08\x2a\x06\x80\xf6\x45\x24\x28\xa4\x34\xc0\xda\x3f\xc7\xb2\x25\x50\xd1\x92\xde\xc2\x22\x46\xf9\xe7\x4d\x50\xdf\x66\x5f\xd1\x8d\x08\x96\x40\x2c\xdb\xfb\xb8\xab\xdf\xde\xd5\x07\x78\xe1\x67\x4d\xc8\x53\xd3\x7e\x21\x77\xea\x2d\xa3\x89\x97\x2b\xe4\x96\xed\x99\x2c\xae\xd4\x70\x7b\xb4\x0f\xbc\x53\x8d\x3a\x6e\x73\xb0\x3b\x7a\x78\x9d\xf3\x32\x33\x37\xac\xb4\x07\xe3\xca\x86\xf2\x66\x33\xcd\x69\x71\xd3\x3c\x92\x95\x56\x8d\xd4\x68\xd5\x45\x2a\x5b\xe8\x8d\x30\x5e\x6c\xb7\x5a\x38\x49\xc5\x1a\xc4\xdf\x81\x3b\x7d\xfd\x8d\xac\xb7\xd5\x31\x5b\x55\xe5\x43\xca\xb7\xf0\x27\x39\xfa\x0f\xc5\xd2\xc5\xf0\xe7\x89\xf8\xa9\x52\x68\x52\xf2\x6c\xfc\xd4\x5f\x8c\xdf\x32\x8c\xbf\xc7\xd3\x3f\xc4\x97\xe0\xf1\xbd\xf8\x75\x16\x2c\x56\xd2\x93\xdc\xb9\x17\xcf\x9e\xc5\xfb\xbb\x70\x0f\xfd\x17\x7c\xd9\x2d\xe6\xe6\xb0\x84\xfc\x19\x6d\x9a\xe6\xb0\x30\x59\xa4\xeb\x0c\xe7\x56\xeb\x41\x49\xc3\x13\x86\x87\x49\x2e\x10\xe3\x32\x37\x6d\xa3\x77\x98\x35\x26\xf3\x9e\xb5\xd2\x1a\x3a\xd5\x54\xaa\xce\x5b\xa5\x6d\x01\xcf\xec\xf4\x9c\xa1\xf2\x2c\x73\x9c\x76\xa5\x64\xc1\x6a\xb6\x4b\xbe\xda\x4c\x10\xc8\x90\x31\x4a\x20\x84\x91\xaf\x29\x80\x54\x12\x02\x40\xa8\x24\x80\x8a\x83\x90\x70\x22\xa4\xcf\x38\x64\x38\x44\x3e\x26\x91\x0c\xb0\xc6\xc8\x97\x08\xfa\x14\x84\xe7\x58\x83\xd7\x8a\x96\x66\xce\x71\x6a\x5e\x21\x5b\xca\xb2\xbc\xc5\xcd\x02\xad\xe4\x48\xa7\x65\x76\xcd\x52\xb9\x56\x76\x58\xb5\x6b\xf0\x7c\xc3\xac\x61\xa7\x9a\xad\xe6\x7b\x5e\xa3\xdb\x75\x32\xd4\x3e\x37\xb0\x84\x54\x32\x02\x21\x07\x00\xdf\x42\x1a\xce\x09\xfe\xb4\x01\xd9\xdb\x2c\xc4\x9c\x60\x21\x29\xa7\xec\x1a\x6b\x90\xa9\x1f\xfa\x73\x6e\xa5\xea\x55\x32\x09\xd5\x68\xc2\x86\xfb\xfc\xa8\xbb\x29\xd2\x94\x11\x35\x4a\xcd\x70\xa2\x43\xa6\xf6\xb3\xd1\xa5\xc3\xd2\x19\x69\x32\xe7\xb2\xdb\xc5\x77\x47\xfc\x45\x56\xcc\x27\xbe\xb8\xaf\x47\xa6\x94\x27\xfb\xf3\x2f\xd9\x86\x73\xdc\x34\x0f\xa3\x43\x99\x04\xb3\x7c\x69\x65\xe7\xc7\xf5\xc5\x6a\x66\xd7\xe5\xd0\x5d\x54\x0e\xc6\xba\x5c\x71\xba\xab\x46\x35\x7d\xd8\xb8\xad\x71\xba\xda\x43\xf3\xc6\xa0\xd9\x4e\xa5\x07\xc5\x5a\x6e\x9c\x1a\x74\xbb\x76\xdf\x41\xd0\x8d\xa8\x28\xa7\xfd\x20\x5f\x70\x16\xcb\xc9\x3a\x97\x54\x56\x4c\xbc\xca\xee\xd8\x29\xba\xe6\x68\x5d\xa5\xb9\xf4\xb0\x05\x8f\x95\xce\xb2\x68\x4c\xcb\x43\xb0\x9b\x91\x8c\x2c\x1d\x6a\xa8\x45\xf1\xe2\x64\xea\xd2\xe5\x97\x7f\xa1\x6b\xd4\xbc\xee\x69\xe1\xfa\xfb\x17\xee\xfe\x88\x3f\xdb\x44\x2d\xd2\x6d\xc3\x7d\xe0\x36\xb7\x5d\x24\x37\xe7\x15\xf6\x63\x96\xed\xec\x31\x44\x6a\xd3\xce\xd9\x62\x39\x17\xe3\xcc\x21\x87\x9c\x59\xf9\xe4\x77\xa3\x8d\xb4\x36\x2e\x2e\x05\xb9\xa6\x75\xec\xb6\xbd\x43\xe5\xbc\x92\xdf\x7a\x7b\x79\xef\x2c\xa5\x77\xde\x21\x63\xe2\x15\xf1\xc5\x73\x7f\xe6\x86\xe6\x55\x32\x7f\xac\x69\xdd\xf9\xfb\x9b\x89\x76\x9d\x8d\x72\x0f\x93\xe2\xb4\xbc\x0b\xbc\x5f\x2b\xcc\xc5\xec\x7d\x19\x31\x0b\x67\xc5\xd6\x30\x53\x2d\x2c\xab\x79\x43\xd5\xcb\x83\xde\x49\xa1\x69\x96\x47\x1e\xaf\x6d\x4e\xee\xd1\x31\xc4\xd4\x19\x97\x1a\x17\x16\xf7\xbe\xb7\x5a\xcc\x7e\xb8\xff\xaf\x7d\xfe\x98\x4e\x6d\x7a\xcd\x75\xb7\x50\xd9\xa7\xc7\xa3\x54\xbe\xd3\xc9\x51\x3d\x5f\x9b\x5d\x54\xac\xa7\xa2\x7e\x16\x9c\x9c\x6c\xaf\x33\xd3\xb3\xb7\x77\xf8\x17\x92\x4a\x79\x5e\x89\x7b\x5e\x6f\xac\x87\xc4\x65\x7d\x8f\xc4\x23\x15\x7b\x9d\xc1\x20\xe8\x64\xd6\xbd\x3a\x1d\x05\x08\xfc\x5a\xa1\x69\xc4\xb2\x9a\xa2\x3c\xee\x1d\x61\xb7\xdf\x38\x2e\x47\x47\x2f\x6b\x8f\xba\x6d\x85\xcb\x8b\x41\x0a\x83\x0a\x08\xd4\x61\x3d\xf0\xaa\xf9\x41\xcf\xb2\xe9\xf9\xe3\x1f\xbf\xef\x57\xf4\x8e\x9e\x7a\xfe\xd4\xc5\x1c\x74\x79\xfe\xf9\x93\xcf\x5f\xfa\xe9\xe7\x7f\x5b\xdf\xb3\xfc\xed\xdc\x4a\xe8\xff\x08\x7f\xbb\xb4\xd9\x56\xab\x56\xa9\x2b\x1a\xda\xa5\xc7\x46\xe1\x60\xf6\xec\xa5\x4f\x0f\xf6\xae\x3b\x58\x36\xd6\x5e\xd1\x6a\x0c\xda\x05\x70\x4a\x8b\xa0\x36\xe9\x55\x4f\x28\x5c\xda\xa4\xe5\xec\x76\xa4\x0e\xea\xf9\xea\xbc\xed\x66\x72\x99\x70\x53\x9b\xa5\x22\x2b\x42\xd3\x63\x7d\xe0\xfb\x8b\xc0\x4c\xbc\x96\x04\x0b\xb0\x50\x61\x48\x15\xe4\x61\x84\x00\xc4\x21\xf5\xa9\x46\x10\x2a\x12\x60\x84\xa0\x08\x58\xa0\x25\x04\x88\x62\xc8\x02\xe8\xfb\x01\x12\x81\x82\x08\x43\xc6\x20\x0d\xe9\x8b\x94\x82\x12\x93\x52\xc8\x6d\x29\x85\x7d\xde\x7d\xe8\x3c\x1b\x93\x52\x18\x64\x57\x7d\xf8\x71\x29\xe5\x7d\x11\xdd\xef\xe5\xee\xde\x1a\x0f\x71\xd4\xd8\x68\xbc\xbe\x21\x19\xa3\x12\x80\xa5\xdb\x9e\x8d\x61\x7b\x55\x6f\x76\xea\x6c\xe8\x39\x73\xe5\xe7\x2a\xdd\x74\xc6\x9a\xf6\x5d\x78\x6a\x4c\x27\xd5\x63\x1f\x04\xf3\xfa\x30\xc4\xa3\x46\xa6\x80\x4f\xb2\x29\x52\xb9\xaa\x6b\x54\xec\x62\xb9\xd8\x1f\x7b\xd9\x4d\xdf\xed\x3a\x7d\xc7\x2d\x0d\xe7\xf0\xd0\x4d\x7d\xb3\xea\xea\x9f\x96\x6e\x8a\xd7\xdf\x7f\xe8\xed\xbf\xfc\xe8\xff\x31\xe9\xe6\x12\xa8\xf2\x04\xba\x9f\xc7\xff\x5d\xe9\xe6\xff\xd0\xf3\x27\x28\xdd\xec\xff\xf3\x3e\x25\x2e\xdd\xc4\xc7\x87\x76\x86\x27\x9f\x25\x05\x8c\xff\x42\x49\xe5\xff\x92\x26\x76\x96\x54\x7e\x0a\x3c\x12\xce\x0d\x94\x32\x54\x8c\x61\x18\x52\x41\x7d\x4a\x7d\x8d\x19\x40\x11\x63\x40\x2b\x1a\x50\x1f\x00\x21\x42\x44\x7c\x48\x02\x4c\x88\xe2\x48\x2b\xa5\x90\xcf\x50\x28\x7d\x21\xb1\xf8\x25\xa9\xc0\x24\xed\x29\x37\x73\x37\x24\xc1\xf4\xd3\x1a\x23\x6f\xb3\x10\x63\x48\xb0\x94\x00\x71\xf4\xbc\xa4\x52\xf8\xe8\x46\x5f\xc6\x9f\x96\x54\x62\xf4\x62\x92\x0a\xd8\xaf\x86\x7a\x35\xab\xaf\x52\xb4\x8a\x3c\x53\xcf\x67\x70\x3b\xa2\xd3\x55\xa5\x58\x6e\x54\x56\xd5\x6e\xb5\x3a\xa4\xab\xa1\x2a\x6e\xfb\xe9\xd9\x4a\xdb\xcd\xc1\xba\x5f\x4f\x8f\xf1\xc4\x90\x79\x77\xd1\x0a\xc2\xd4\xc6\x57\x25\xbb\x30\xed\x14\xa6\xce\x21\x6b\xda\xa5\x51\xbb\xfa\xcd\x8e\x22\xdf\x92\x54\xb2\xf1\x38\xa1\xfb\xdf\xfe\xbe\x7b\x3d\x8b\xfc\xf5\xf7\xff\x9d\xb7\xff\x37\xcb\xaa\x77\x3e\xda\xeb\xdf\x0f\x20\x35\xc9\x9c\x59\xf0\xc5\x0e\xf1\xbe\x4e\xe2\x3f\x61\x87\xf8\x30\xce\xe9\x2a\xa9\xfc\x77\x3e\xff\x7f\x93\x1d\xe2\x23\xe9\x46\x3f\xf7\xfc\x97\x4b\x73\x79\xfe\xf1\xbf\xf8\xfc\x1f\x46\x14\xfd\x37\xf1\xb7\xb3\x74\xf3\x53\x80\x93\xac\x74\xa3\xb0\x0a\x09\xf7\x31\x84\x1a\xa0\x73\x59\x4f\x8e\x14\x05\x04\xb1\x80\x06\x94\x10\xa2\x29\x62\x90\x60\x18\x29\xca\x19\xd0\x42\x53\x4e\x18\x57\x82\xab\x90\xc8\x73\x17\x68\x0c\x92\x94\x6e\x6e\xe5\xe3\x70\x20\xf8\xe7\xde\xa2\xf3\xec\xb9\xd3\x26\x12\x82\x48\x80\x90\xe4\xec\x79\xe9\x26\xff\xd1\x8d\xbe\x8c\x3f\x2d\xdd\xc4\x38\x6a\x4c\xba\xe1\xbd\x52\x61\x29\xc0\x56\x79\xf5\x4d\x8a\xd2\x65\x8a\xb5\xbc\x95\x8b\xb3\xab\x1d\x3b\x65\x37\x95\xe2\x7e\x1c\x19\x87\xc3\xba\x17\x76\xab\xb9\xed\x98\x1f\x7a\xeb\x7e\x70\x1c\x54\xcc\x56\x67\x34\xd0\x15\x4f\x1e\xfb\x46\x1b\xee\x36\xc7\x20\xec\xb8\x04\x1a\x87\xba\x53\xdb\x18\x49\x4a\x37\xf1\x78\xb6\x07\xde\xfe\xe8\x7a\x16\xb9\xeb\xef\xff\x9b\x6f\xff\xdb\xdf\x99\x98\x8f\xf9\x11\x74\xbf\xb0\xcc\x0b\xba\xbf\x8f\x5f\xfb\x77\xd1\x3d\x26\xdd\xfc\x57\x3e\xff\x7f\x13\xba\x7f\x24\xdd\x84\xcf\x3d\xbf\xd5\xbf\x3e\x7f\xff\x5f\x7f\xfe\xb7\xbf\xff\x9b\xf8\xdb\x25\x6f\xec\x87\x00\x27\x61\x2f\x93\x8c\x18\xf1\x35\x88\x04\x90\x01\x10\x52\x32\x05\x23\x2c\x89\xd6\x5c\x52\x00\xa0\x4f\x80\xd6\x9a\x49\x3f\x0c\x18\xa7\x7e\xc0\x70\x10\x85\x50\x62\x14\x62\xdf\x8f\x02\xf4\x3f\xff\xfb\xff\x90\x4c\x52\xba\xb9\x95\x37\xc6\xa1\x60\x9f\xd7\xdd\x38\xcf\x9e\xa3\x2e\x11\xa1\x94\x00\x4a\x99\x00\xcf\x4b\x37\xef\x1b\x2d\xdf\x21\xdd\x7c\x98\x77\xf6\xf1\x88\x49\x2b\x43\x26\x97\x56\x56\x4d\x5a\x75\x69\x08\xd7\xf0\x97\xc7\x31\x9b\xe7\x0e\xc3\x92\x89\x0a\x9d\x8e\x3d\xd3\xce\x70\x14\x1d\xb2\xb4\x75\x2c\xba\x39\xaf\x57\x36\x0e\x95\x8a\x32\xab\x8b\x9c\x3f\x69\xb2\x2a\x5c\x74\x6b\x1d\x5c\x20\x95\x49\x21\xdb\x23\x83\x5c\x78\xc8\x9f\x2c\x96\xa4\xd7\x28\x1b\xfb\xea\x23\x6f\x33\xb9\xee\x6d\xf6\xfa\xfb\xff\xe6\xdb\xfc\xf6\x77\x06\x5c\xff\x7e\x04\xad\x2f\x71\xa6\xc1\xf5\x42\xfc\x9f\x40\xeb\x98\xb4\xf2\x5f\xf9\xfc\xff\x4d\x68\xfd\x91\xb4\xa2\x9e\x7b\x7e\xc7\xbe\x3e\xbf\xfa\xd7\x9f\xff\xed\xef\xff\x26\xfe\x76\x96\x56\x7e\x0a\x70\x92\x95\x56\x22\x8d\x39\xe0\x51\x10\x52\x25\x08\xc1\x92\x2a\x2a\x45\x84\x61\xa8\x38\x94\x8a\x41\x04\x45\x48\x94\xf6\x89\x16\x61\x18\x9e\x65\x16\x8c\x23\x22\x18\x03\x42\x42\x00\x7e\x49\x2b\x22\x31\x69\x85\x49\x79\x2b\x72\x97\x23\xc9\xd8\xa7\x9e\xa6\xb7\x59\x88\x00\xa2\x12\x08\x81\x45\x02\x31\x31\xb7\xea\xd9\x3f\xc6\x71\xef\xb5\xdd\xc4\xe8\xc7\x22\x7d\xd3\x6e\xae\x6f\xcb\x75\xb7\xea\xee\x03\x73\x98\x0b\x8b\xe1\x60\x6e\x6f\x11\x35\x8f\x19\xd0\x33\xd4\x74\xe4\xcd\x67\x5e\x6e\xb3\x65\x75\x1b\xf4\xfd\x7a\xa6\xd5\x48\x4f\x34\xa6\xd4\x48\x87\x7b\x58\xae\xfa\xf3\x4e\x57\x56\xd3\x5b\x30\xdf\x4a\x8b\x0f\x68\x2b\x80\x91\x2a\x24\x26\xd5\xc4\x3e\xf4\xc0\x5b\x3f\xa8\x5d\xcf\xe0\x93\xf1\x75\x4e\x47\x82\xf4\x9f\xad\x7f\x9f\x20\xd7\x7f\x28\xa7\xf1\x67\xe8\x3f\x1e\xab\xf1\x60\x0e\xc8\x23\x6b\xbf\x10\x72\xbf\xea\x2f\xf1\xa3\x7b\x77\xc9\x31\xbc\x9f\xfe\x4b\x0e\x48\xf5\x30\xcc\x65\xaa\x81\x6b\x1d\xd6\xee\x7e\x3e\xcc\xd6\x71\xd7\xcf\x14\x37\x5b\x6f\xdd\x0f\x5d\x2f\x9b\x9f\x83\x42\x1e\x98\xc5\x60\x3b\x1e\x94\x73\xee\xb8\xdf\x6a\xa6\x69\x67\x93\xd2\x5e\x15\xab\x72\xa3\x81\x96\x0e\xaa\x36\xd2\x7c\x53\x28\xee\xbc\x14\xd1\xad\xb6\xec\xe0\xe4\xfb\x47\x62\x42\x75\xc4\x23\x41\x15\xd3\x3e\x92\x5c\xe3\x48\x4a\x12\x69\x1d\x21\x2e\x03\x86\x05\x44\x0c\x09\x26\x60\x10\x42\x8e\x7d\x18\x31\x48\xa1\xa0\x3e\xc1\x4c\x68\x72\xae\xb2\x85\x44\x62\xd1\x95\x4c\xca\x5b\xd1\x95\x1c\x49\x21\x3e\x6d\xd5\xf5\x36\xfb\x82\x24\x12\x91\x24\xf4\xde\x2f\xea\xa5\x7c\xa1\xf7\x7e\x9d\xad\xf5\x1f\xbf\x77\x45\x06\xab\x7b\x58\xe2\xd3\xc2\x77\x72\x26\xca\xab\x6d\xbe\x19\xac\x3c\x5c\x72\x3a\xa7\xe5\x6e\x9b\xa2\x2e\xef\x34\x87\xa4\x3c\xcb\x4e\xa2\xa0\xb8\xdd\xcc\x38\x3b\x4e\x3c\x35\xda\xee\x8b\xfd\xa0\xe9\xa2\xc9\x52\x79\xd1\x84\xd3\x70\x5e\x6d\xf6\x8e\xdb\x55\x61\xe3\x54\xe1\x6a\x5c\xfa\x4e\x65\xe3\x3f\x8d\x0c\xef\x6d\x03\x0f\x45\x49\xfe\x41\x24\x31\xff\xc4\x7a\x7f\x06\x79\x62\x66\x9b\xc4\x39\xbf\xf1\x03\x48\xf5\xec\x7a\xed\x2f\xd6\xfb\x21\x52\xfd\x25\xa4\x48\x6a\xef\x2e\x35\x5a\x1e\x46\x2a\x54\xf2\x37\x82\xb5\x07\x75\x63\x33\xec\x14\x87\x2d\x97\x81\xf2\x29\xd3\x12\x6c\xc5\x77\xdb\xda\x16\xc2\xdc\xd6\xea\x1d\x68\x07\x1d\xf9\xaa\x97\x4a\xad\x17\xcd\x6c\x8e\xaf\x4d\xe2\xc2\xcc\x66\xe1\xcf\xec\x2c\xd5\xa9\x8e\x6f\x41\x57\xad\x96\xc7\x55\xad\x21\x7d\x6a\x25\x5f\x19\x19\xf9\x90\x70\x4c\x43\x1e\x00\x0a\x81\xc4\x54\x12\x42\x7c\x15\x86\x82\x6b\x1e\x69\x89\x94\x14\x5a\x22\x49\x43\x14\xf8\x0c\x06\x80\x6a\x44\x43\x18\x71\xad\x21\x88\x42\xfc\x82\x54\x38\x31\xa4\xba\x95\x17\xcf\x31\x00\x5f\xe8\x3c\x18\x00\x1e\x03\x2a\x46\x93\x70\x3f\x7f\x51\x0e\xf2\x0b\xa0\xfa\xaa\xbc\xf8\xbb\xdf\x8b\xa9\x30\x85\xad\x07\xf5\xa1\xb4\x15\x83\x30\x12\x61\xa5\xaa\x9b\xc5\x0e\xb2\x5c\xfe\xeb\xae\xb4\xdb\xb9\xa6\x93\xed\xe4\x4b\x6d\xd7\x95\x85\x69\x6b\xe5\xcd\x7b\xd6\x06\xd5\x99\x9f\x86\xdb\xae\xdd\xca\xcf\x76\xbb\x6e\xda\x9e\xe7\x4b\xb4\xb6\x68\x8d\x06\xa8\x67\xae\x31\x68\x75\x8d\x7f\x10\xa8\x9e\x65\xfc\x99\xf8\x77\x3f\xda\xd3\x84\xd7\x6b\xfd\x89\xf5\xfe\x0c\x50\xc5\xb2\x58\x12\x67\xfc\xc6\x0f\x00\xd5\xb3\xeb\xbd\xbb\x05\xd9\x5f\x02\x8a\xa4\xf6\xee\x52\x82\xf2\x61\xa0\x32\x57\xe3\x72\x67\xdd\x28\xee\x72\x9d\x4e\x3a\xdb\x18\xe1\x89\x09\x65\xff\xb0\xdd\x23\xa5\x5b\x93\xa1\x93\xa9\xc1\x63\xc3\x19\x0c\x76\xf5\xc5\xc4\x07\x05\xd7\xe3\xe1\x78\x3b\xdd\xc9\x60\x9a\x71\xea\x3d\xce\x51\xf5\x50\x30\x1b\x95\xa6\xdf\xea\x92\x70\xea\xad\xca\xd2\x69\x27\xdf\xfc\x98\x44\x92\x4a\x81\x00\xf3\x03\xc8\x55\x84\x78\x40\x15\xd6\x81\xd4\x12\xb0\x90\x61\x1d\x68\x8d\x18\xba\x24\xde\x23\xed\x07\x61\x88\x02\xa5\x79\x10\x41\xa4\x94\x8a\x5e\x55\x2a\x92\x18\x50\xdd\x6a\x46\xc6\x31\x40\xf4\xf3\x40\xa9\xd7\xd9\x0b\x52\x41\x00\x58\x12\x61\xe0\x5f\x94\xf0\xff\x19\x57\xa2\x11\xb2\xd2\x29\x6b\x8d\x9a\x61\x6d\x93\x9d\x9f\x4e\xd6\xcc\xc2\xa3\xbc\x91\xda\xd6\x2c\xd3\x5f\xe1\xaa\xb7\xd9\x66\xd3\x04\x2f\x66\xb5\xca\x69\x0c\xb3\xe3\xf9\x2e\x5f\xf5\x60\xf7\x78\x52\x8b\x4a\x90\x2e\xeb\x6d\x33\x5d\x2c\x94\x4f\x6b\x5d\x04\xd5\x7c\xd9\x5f\xb7\x3a\x70\x62\x24\xe9\x4a\x8c\x7d\xe8\x2f\x23\x80\x15\xff\xee\x07\x23\x69\xc4\xb2\xff\xc4\x7a\x7f\x06\xb1\xde\x9f\x77\x92\x08\x60\xfc\x00\x62\x25\xb4\xde\x0f\x25\xc6\x9f\x40\xac\x47\x11\x23\xa9\xbd\xbb\x94\xda\x7f\x18\xb1\x7e\x8a\xe9\x24\x8b\x58\x82\xa2\xc0\x47\x54\x03\x4d\x7c\x1f\x12\xe9\x23\x14\x2a\x28\x71\xa0\x02\x46\xa8\xaf\xa5\x0e\x7c\x84\x35\x8f\x38\xc1\x88\x03\x1e\xe1\x40\x42\x40\x38\xd1\x32\x60\x3e\x0d\x5e\x10\x8b\x26\x86\x58\xb7\xda\x9b\x71\xfc\x8b\xfa\x17\x88\x75\x99\x7d\x45\x2c\xc2\x93\x28\x04\xf3\x61\x2a\xde\x65\x7c\x2b\xb4\xf7\x63\x04\xbb\xa2\xc0\xfb\x11\xd7\xb5\x66\xce\x49\xb1\xf1\x72\xaf\x8f\xb9\x22\x69\x9c\x72\x56\xb0\x4a\x09\xb7\x59\x71\xdd\x35\x6c\xd3\x91\xea\xd0\x61\xbd\xbc\x27\x96\x9e\xcf\x4e\x9e\x51\x9b\xe8\x51\x37\x2f\xcb\x95\x59\xba\x33\x5e\xa9\xb9\x29\xb3\xbb\x50\x0c\x14\x9d\x94\x0f\x29\x38\x90\x12\xba\x87\x7c\x33\xff\x0f\x22\x57\x42\x48\xf0\xe1\x7e\x1a\x3f\x80\x5c\xce\x9f\x58\xef\xcf\x20\xd7\xfb\x3b\x9b\x24\x12\x18\x3f\x80\x5c\x89\xad\xf7\xbb\xa9\xc5\x7f\x09\x39\x92\xda\xbb\x5c\xe9\x31\xfa\x2f\xc8\x95\x3f\x28\x5c\x59\x14\xd6\xd9\xf5\xd0\x4c\xd9\xe2\x68\x20\x96\xde\x65\x26\xd6\x60\xb0\x2a\xb7\xb6\x5d\xec\xa4\xba\xe3\x91\xaa\x18\x78\x99\x17\xc0\xb0\x7b\x9d\x39\x32\xa3\x46\x63\x91\x86\xb4\x80\x0b\xc6\xae\xc6\x71\x94\xeb\x54\x7a\x0d\x15\x6d\xba\x51\x14\xd6\xd6\xf5\x56\xf2\x25\x32\x85\x52\x0a\x28\x24\x14\xd6\x52\xfa\x0c\x2b\x45\x65\xe0\x87\x3c\xd4\x0a\x47\x54\x51\xa6\x03\x2c\x09\x52\x94\x08\xad\x42\x02\x02\x85\xa5\xa2\x40\x86\x2c\x00\x02\x86\xe4\x05\xb9\x58\x62\xc8\x25\x6e\x23\x17\xfb\xbc\xdc\xff\x79\x36\x8e\x5c\x42\x24\x51\x1c\xc4\x7d\x77\xc4\xcf\x26\xa5\x9c\xe7\x7f\x0f\xc5\x79\x37\x62\xba\xd7\x38\x67\x31\x73\x9c\xc9\x56\xa5\x9e\xef\x8a\x5e\x67\x5c\xf2\xf2\xdc\x1b\xdb\xac\xdc\x9e\x4c\xc3\x82\x3b\x2d\x1e\xfc\x61\x0e\x76\xfd\x56\xa1\xe7\x17\xf2\x87\x13\xad\x0a\x7a\x48\xb1\xfd\x69\xd1\x3d\x1c\x82\x35\xcb\x84\x68\x42\x96\x87\xd6\xa8\xce\x3d\xcc\xac\x7d\x6d\xb8\xcc\x18\xff\xa4\xee\x95\x1c\x22\xf4\x3f\x96\x0a\x12\x5e\xaf\xfb\x27\xd6\xfb\x33\x08\xd6\x7d\x77\xde\x49\x22\x82\xf1\x03\x08\x96\xc8\x7a\x9f\x29\x96\xf1\x97\x10\x25\xa9\xbd\x74\xe7\x8f\xd1\x7f\x41\xb4\x9f\x62\x42\x09\x23\x1a\x46\x00\x02\x48\x84\x00\x80\xe3\x48\xe8\x28\x62\x11\x82\xbe\xe6\x4c\x53\xa8\x90\x22\x91\x1f\x51\x8c\x35\xa5\x40\xab\x10\x2b\x84\x00\x0d\x88\xf0\x81\x44\x92\x01\xff\x05\xd1\x78\x62\x88\x76\xab\x15\x01\xc7\x40\x92\xaf\xdc\x5c\x97\x4e\xd3\x17\x40\x83\xbf\xd6\xf8\x3c\xa0\x65\xdf\x9d\xf0\x3f\x97\x65\xb9\xe9\x6e\x26\xeb\x65\xa9\xa6\xbd\x39\x6c\xce\x9b\xc8\xed\x8e\x02\x3c\xa0\x68\x6f\x17\x76\xb3\xf9\xd6\xac\x57\x58\xd1\xab\xc0\xc3\x34\x6d\xac\x06\x1b\xaf\x92\x5e\x77\xe8\x12\xd3\xfa\x46\x64\x46\x7e\xb7\x6d\x0f\x9d\xd9\x22\x55\xf7\x46\xc8\xe9\x99\x42\x56\xd6\x7b\x73\x12\x1a\xfb\x7f\x12\xf0\x12\x01\x90\x2f\xe2\x7a\xdf\x8f\xa4\x01\x30\xfb\x37\xd6\xff\x83\x71\xe5\xb1\xfb\xf0\x93\x00\x63\xfc\x00\x40\xfe\xf3\x71\xf1\x3f\x00\xaa\x8f\x82\xda\x4f\xec\xff\x3d\xf4\x5f\x40\xf5\xa7\x18\x5d\xb2\xa0\xca\x01\x88\x7c\x42\x82\x80\x00\xcd\xfc\x10\x29\x2a\x84\x60\x94\xa9\x10\x69\x1e\x88\x50\xb0\x50\xe3\x10\x53\xca\x03\x46\xb1\xaf\x99\xe0\x84\x30\x06\x39\x0d\x19\x91\x88\xff\x02\x55\xfe\x7c\x27\x85\x9b\xb1\x8d\x94\x51\xf2\x69\x27\x85\xf3\x2c\xbe\x82\x33\xfa\xa5\x1e\x3e\xdf\x49\xe1\x7d\xd5\xff\xdf\x3b\x29\xdc\xcf\x5f\x3f\xe9\xa4\x90\x59\x62\xb7\x6a\xb5\x74\xad\x1a\xed\x97\x9b\x36\xa1\x6e\xaf\x3c\x6c\x86\x9e\x1c\x31\x5b\x96\xd7\xa7\xf4\xa4\xd4\x6d\xab\xb6\xd5\x76\xcd\x9e\x61\xd5\xd7\xf6\xd0\x9e\x77\xfc\x15\xcc\xb6\x9d\x02\x0b\x57\x8b\xcd\xda\xb5\x27\xb2\xca\x72\x32\xe2\x26\x72\x33\x85\x80\x74\xbd\x52\x52\x9d\x14\xe2\xef\xc0\x9d\x9d\x0c\x5a\xaa\xa6\xcd\x58\x4b\x93\xf2\x87\x94\x6f\x38\xa8\x12\xa4\xff\x50\x27\x87\x58\x27\x85\x27\xaa\xf7\xb7\x0e\x0e\x29\x3c\x5b\xbd\xff\x2f\x76\x0f\x30\x8c\xbf\x17\xb9\xfe\xa1\xd2\xd6\x7b\x7c\x2f\x7e\x9d\x45\x31\x96\x86\x01\xee\xdc\x8b\x67\xcf\xe2\xfd\x5d\xb8\x87\xfe\x0b\xbe\xf8\x9b\xc3\x68\x0f\x3b\xe3\x06\x16\x85\x55\x6b\x5b\xa9\x4d\x1b\xdd\x49\xb6\x59\xed\xa6\x06\xd5\x46\x50\x4a\xd3\x86\xeb\xcc\x9c\x3e\x1c\xf4\xa7\x9e\x01\x56\x99\xdc\xa0\xe0\xa0\xa5\xac\xc1\x41\x7d\x3b\xf3\x59\x10\x64\x06\xf9\xa8\x91\x1a\x56\x3b\xa9\xfd\x66\xd0\xaf\x4f\x53\xae\x6d\x25\xae\xb4\xf9\x9c\x69\x80\x18\x88\x58\x20\xb0\x62\xda\x97\x38\xa2\x84\x86\x90\x87\x9a\x23\xa8\x00\x97\xa1\xc2\x50\x0b\xa8\x35\xd5\xa1\x22\x30\x82\x54\x6a\x45\xa3\x80\x08\x72\x66\xea\xe8\xb5\x57\x75\x86\xb9\x65\x5c\xcc\x37\xdb\x2c\xdf\xad\x71\xdb\xaa\xe4\x09\x6f\x65\xb2\xdc\x22\xb4\x44\xf2\xed\x6c\xc7\x2d\xb0\x62\x36\x5f\x33\xec\x1c\xcd\x67\xea\x65\x64\x65\xbc\x2e\xf7\x7e\xe1\x02\xe0\x98\x13\x28\x10\xb9\x5d\x1b\x87\x89\x2f\x3a\x29\x9c\x67\xcf\xc1\x89\x10\x32\x26\x21\xc5\x5c\xbe\x85\x7c\x64\x0f\x53\xde\x9e\x4e\x76\xf9\x43\x96\x66\xb6\x8d\x93\xf4\x66\xa9\xea\xac\xad\x1a\x5d\x79\x2a\x88\x35\xeb\xf6\x73\xa3\xa5\x77\x0c\xe6\xdd\xee\xdb\xf5\xcd\xc4\xea\x53\xc4\xae\xd3\xbb\x2b\x10\xef\x1f\x27\x7a\x5b\xb8\x58\x04\x47\x55\x4b\x79\xc1\x58\xa4\x9b\xf9\xf4\x88\xc2\x99\x8d\xaa\xea\xb8\x2b\x56\xa6\x39\x33\xc7\xc6\xa5\x29\x73\xe3\xb6\xcd\xf0\xd4\x6e\xc6\x6e\x6e\xdc\xe1\xf6\xb6\x1e\x2f\xb3\x3c\x99\x5d\x6b\x5b\xec\xce\xd2\xa3\xce\x20\xb2\x5b\x07\xdb\x1d\xf6\x4b\x66\xde\x9d\x57\x32\x66\x41\xa2\x49\x33\xc0\xcd\x29\xa4\xd5\xfe\xbe\x9e\x67\x78\x53\x9e\xa4\x9c\x45\x56\xa4\x8e\x54\xa5\xa6\xe6\x3a\xbd\x3a\xa9\x5d\x78\x9c\xcb\x6d\x27\x9c\x87\x21\x29\x76\xda\x43\xaf\xff\x1d\xcd\xec\x7d\x70\xa6\xf1\xc8\x7e\x5e\x46\xf3\xe7\xf6\xeb\x5d\x46\xe4\xf5\xdf\x62\x48\x19\xd3\x06\x22\x37\xd8\xf8\x7e\x5b\xcc\xec\x68\xec\xd2\x6a\x15\x9a\x11\x49\x85\xbd\x43\x79\x84\x4b\xab\xe3\x69\x31\x34\x86\x95\x85\x95\xdf\x0e\x06\xe7\x2f\x5d\x3b\x95\x66\x7e\xab\x16\x1d\x1b\x37\x90\x32\x39\xfa\xcf\xf6\xbc\xf3\x63\x53\x77\x9e\x41\x2c\x20\xcf\xfb\x82\xe6\xd7\xdc\x39\x69\xfa\xb7\x7a\xde\xbd\x64\xd9\x2a\x38\xf9\x6c\x59\x6f\x9f\x3c\x2b\x36\x17\x27\xf7\xff\x85\x3b\xfe\x31\xe2\x3e\xb0\xa7\x2f\xe8\xff\x95\x84\xf1\xa3\x67\xfa\x92\x8d\x70\x37\xfd\x17\xc4\x75\x57\xa5\x7c\xbf\x42\x36\xc3\x28\x55\xd6\x8e\xa8\x75\x6c\xcf\x2f\x18\xfb\xdc\xd8\xef\x13\x3b\xb7\xc2\x7e\xab\xb6\x0b\xeb\x41\x67\x9c\xb2\x32\xb9\xd9\x4a\x03\xd4\x21\xed\x71\x74\xe2\xc5\xc9\x89\x75\x0b\xa7\xd4\xae\x7c\x94\xb8\x30\x42\x34\x44\x03\x88\xfb\x9b\xd0\xcf\x27\xdf\x15\x40\x22\x9f\x51\xa2\x38\x93\x8c\xf9\x8a\x41\x5f\x29\x19\x46\x20\xe0\x34\x92\x50\x05\xa1\xc6\x01\xa5\x50\x49\x2d\x39\x63\x1c\x03\x81\xb8\x1f\x11\x14\x42\x8d\x08\x57\xea\x15\x71\x51\x62\x88\x7b\xab\xd6\x2e\x67\x12\x7c\x01\xb8\x97\xc9\x57\xbc\xe5\xe2\xaa\xd9\x3d\x8e\xb7\x5f\x64\xad\xc5\x2e\x40\xab\xd5\xa8\xbd\xfc\xef\x9d\xbc\xf4\x9b\x78\x1b\xd8\xb2\xc1\xe7\x99\xc2\xbc\x0c\xd2\x75\x6c\xb4\xb8\x3b\xaf\xd7\x74\xea\xd0\xe0\xb2\x3e\x69\x97\xcd\xae\xbd\x29\xc2\x5a\xd5\x3e\x14\x85\x53\x1c\xec\xa9\x6d\xad\x3b\xc5\xb9\xb1\xcb\x65\x82\xcc\x2e\x9b\x5b\xc9\x72\x41\x55\x37\x6d\xeb\x38\x50\xd9\x49\x6e\xeb\xf7\x0a\xb9\xbe\xf8\xc3\x78\xdb\xfd\xb9\xfd\xfa\xb3\x78\xfb\xbd\xce\xe4\x17\x71\x0c\x97\x7b\x5f\x31\x91\xa4\xf1\x27\x56\xc5\xe1\xb7\xb5\x5c\x89\x9c\xff\x7b\x0b\x7f\xce\xdf\x29\xfd\xc4\xfe\x7d\xaf\xb3\xfa\xdf\xdb\xbf\xdf\xc3\xa2\x6f\xec\xdf\xf5\x4e\xbe\x1b\x4f\xe3\xf7\x1f\x7e\x67\xae\xf8\xfd\x97\xf0\xd3\xf8\x81\x33\xbd\x87\xfe\x6b\x6f\xdb\x75\x03\x56\xb7\x7d\x7b\xd9\xc7\x0d\x59\x54\x99\xce\xda\x1f\xe4\x1d\xce\x9d\xea\x0e\x16\x5a\x5d\x90\xef\xd7\xc1\xae\x65\x8b\x86\xbd\xdb\x37\xb7\x59\x6b\xec\x55\x0e\x6a\xdb\x37\x45\xdf\x59\xbb\x42\x0d\x77\x00\x2c\xf6\x29\xd8\xad\x16\x9c\xbd\x6e\x0e\xe9\x62\x3c\x1f\x5a\x89\x5b\x64\x29\x60\x54\x85\x5c\x49\xae\xa2\x80\x45\x41\xc4\x43\x5f\x10\x89\x03\xad\x43\xc2\x75\x88\x14\xd7\x51\x80\x03\x00\x02\xce\x7d\xa2\x24\x62\x12\xe9\x90\x08\x1e\x05\x82\xf9\xe7\xc0\x1d\xfa\xbc\x45\xf6\x66\xdd\x12\x2e\xc5\x17\xce\xcd\xd7\xd9\x57\x39\x80\x11\xc9\x9e\xb7\xc8\xbe\xb7\x1e\xfe\xc6\x13\x1f\xe0\x61\x9f\x59\x64\x37\x83\x7d\x03\xe7\xcd\x3e\xcd\xf5\xc6\x85\x5a\xb9\x80\x17\xd9\x8c\x9f\xd6\xae\x38\x2d\x85\x11\xc9\xbe\x53\x6d\x4f\xba\x3b\x3b\xac\xcd\xd9\xe8\x64\x50\x4b\x46\xdd\x8a\xb2\x16\x05\x0b\x87\xb6\xb1\xad\x16\xcb\x65\x23\x3b\x92\x9b\x72\xd0\x6b\xe6\x0d\xbc\xda\x1f\xca\x92\x64\x12\xb3\xc8\xc6\x3e\x7f\xa7\x45\xb4\x73\xa8\x97\xa7\xfc\xfa\xf5\xf7\xfd\x91\x5e\xc6\x0d\x3d\x33\x39\xfa\x0f\x59\x84\x63\x7a\xe6\x13\x56\xc0\xc8\xc9\xa1\xe0\x59\x2b\xe0\x5f\xb4\x42\x1a\xef\xe9\xff\x41\x9d\xec\x43\x8b\x6c\xeb\xf1\xbd\xf8\x75\x16\x61\x8c\xd2\x27\x26\xe2\x1f\x3b\x8b\xf7\x77\xe1\x1e\xfa\x2f\xf8\x12\x74\x95\x2d\x8f\xde\xbc\x52\x77\x7b\xe6\xb8\x51\xdf\xb0\xda\xbe\xbd\xc3\x83\x5d\x89\xed\x8c\x6c\xba\x9b\x69\xc0\x45\xbb\x78\xc8\x36\xfd\xca\x5c\xec\x2b\x5e\xba\x83\xac\x99\xc1\xea\x7d\x89\x6b\x46\x71\x74\x3c\x35\xbc\x0c\x49\x07\x8d\x36\x6a\x99\x5e\x99\x89\x52\x23\xd2\xc9\xa7\x34\x00\x88\x09\x0b\x35\x54\x3e\x06\x4a\x03\x0c\x89\x0c\x65\x44\x89\x24\x54\x52\xe6\x23\x14\x49\x15\xe8\x10\x87\x02\x46\x02\x2a\x86\x82\x80\x4a\x15\x09\x15\x86\x22\x10\xf2\xac\x1f\x92\x57\x7c\x41\x5e\xae\xdb\x40\x39\xa3\xe9\x94\x6a\x9d\x42\xb1\x5e\x2c\x56\x6b\xb6\x69\xb2\x8a\x59\x71\xf3\x25\xc3\xaa\x57\x1b\xb5\x02\x6f\xb8\xe5\x06\xaa\x3b\x35\xd4\x75\xdb\x96\x51\xb3\xfe\xe7\x7f\xff\x9f\x04\x50\x12\x0c\x31\x82\x37\x33\x1a\x24\xfc\x02\x67\x5e\x26\x21\xc0\x80\x4b\x04\x25\x02\x6f\x41\x34\xd9\xa6\xdd\x9d\x2d\x8c\x82\xd3\x73\xd9\xe4\xd8\xde\x88\xba\xe9\xd0\xfd\xcc\xad\xef\x0d\x5a\x9d\x39\x1d\x3a\x0d\xaa\xcd\xe1\x01\xf6\xe1\xe0\xed\xee\x64\x62\xe5\xaf\x62\x77\xe9\xfa\xbf\x97\x46\xb6\x31\xd3\x0b\xcc\xb8\x79\xe6\x99\x83\x76\x6d\xbb\x50\x93\x55\x64\xf7\x07\x5a\xe5\x0f\xe3\xb5\x51\x69\xd0\x43\x2b\xd5\x9b\x9e\x60\x6e\x1d\x76\xcb\xdd\xd8\xd5\xb1\xb5\x3b\x8a\x5d\xdb\x8f\x4d\x6d\x41\xb6\x6d\xb5\xed\xf3\x34\x28\xa6\x98\xb4\xe0\xe0\xb0\x38\x6e\x8c\x71\x87\xa9\x5d\x75\x98\xd9\x56\x04\x58\x4e\x44\xdd\x2b\xf4\x20\xad\x56\x5a\x87\x83\xdc\x77\x27\x6c\xda\x5b\x15\x53\xac\xd9\x76\xfd\x20\x9d\x62\xb3\x3e\x2a\xa5\x7c\x24\x6d\x5e\x29\xa7\x3a\x6b\xc2\xba\xb2\x54\x5e\x36\x06\x12\x7e\x33\x43\xef\xb7\x8c\xb9\x4e\x21\xe5\xc0\x7c\x65\x2d\x88\x57\xb6\x06\xb3\x69\x6e\x34\x2a\x34\x2d\x2f\x35\xb5\x77\x5b\xdd\xdb\x5b\xc5\xf4\xc6\x6c\xd5\x3a\xce\x68\xa6\x3e\x7c\x9b\xcc\xd4\xee\x45\x14\x77\x5a\x4d\xf0\x42\xe8\xce\xb3\x32\x8c\xdd\x6a\x1f\x0f\xbc\xb8\xfb\xfb\xc6\x47\xa6\xf5\x87\xce\x32\x13\x75\x06\x5f\x9d\xe5\xa7\x10\x1e\xfb\x4c\xe8\xd5\x86\x7a\x6c\xcc\x5b\xe3\x60\xd9\xc9\xfb\xf8\x90\xcb\x75\x66\xa6\xea\x15\x0d\x6b\x9c\x52\xad\x68\xda\x5b\xcb\x7a\xf1\xf5\x3e\x5c\x54\xaf\x99\xf1\x76\x5f\x3f\x19\x37\x20\x3c\x39\xfa\x0f\x99\x8a\xcd\xab\x2a\xda\x39\xff\xeb\xa5\xf6\xa5\x5a\x91\x4e\x73\xcc\x4e\xa7\x81\x01\x2a\xae\xee\x0e\x2b\xa5\x85\x57\xb6\x58\xb7\x9e\xa2\x74\x66\x94\x76\x29\x90\x6f\x39\xaa\x5d\x09\xaf\x77\x2a\x76\x47\x13\xb9\x53\x5f\x14\x29\x8a\xa9\x92\xce\x71\x37\x08\xeb\x8d\x55\xb5\x62\x5b\x1e\xe9\xd1\x61\x8e\x1c\x27\x06\xa8\x2e\x06\x45\x3e\x9b\x2a\xe3\x98\xe6\xde\xa4\xd9\x3a\x74\x69\xac\x8d\xdd\x55\xf5\x76\x62\x79\xf9\x77\xbe\x53\x4f\xdf\xd9\x28\xfd\x3a\xc4\xfb\xe7\x2d\x32\xf1\x69\xce\xcd\xeb\x7a\xbb\xd7\xcf\xdf\x7b\x5e\xcf\x9e\x4f\xe6\x6d\xe5\xe9\xf7\x4d\x93\x6f\x8d\x73\x81\xda\x18\x4f\xba\xf7\xfc\x26\x5e\x91\x9c\xce\x17\xe4\x22\xe6\xf4\x8d\x8f\xf2\x93\x32\x1f\xec\x57\x7c\x7c\xb4\xbf\xbf\xbd\x8f\x77\xde\x07\xb3\x6e\x1a\x83\x8b\x29\xe6\xfc\x3e\xde\x61\x8a\xf9\xd6\xb8\x9c\xf7\xe5\xfd\x7c\xe4\xbe\x5d\x08\xc7\x4c\x1d\x77\xf2\x9b\xd7\x5d\x81\xc4\xbb\xee\xd7\xc7\xeb\x2f\x19\xff\x69\x5f\xb9\x77\xbd\x57\x33\x87\xf3\xb8\xeb\xe9\x19\xfa\x91\x64\xed\xf7\xf4\xbf\x67\xfa\xbb\xee\xcf\x6f\xe3\x86\xa8\xfe\xc0\x1e\x5d\x08\x39\x8f\x9b\x82\x12\x3c\xa3\x7b\xe8\xbf\x88\xea\x3f\x25\x50\x25\x2b\xaa\x87\x22\x40\x14\x08\x88\x30\xa3\x2c\x80\x61\x04\xb5\x86\x5c\x21\xce\xb8\x40\x32\x50\x08\xf9\x01\x81\x92\x4a\x3f\x44\x54\x33\xa5\x14\x45\x40\x0b\x22\x29\x64\x51\xe0\xc3\x5f\xa2\x3a\x7e\x15\xd5\x2b\x05\x9e\xa9\x34\xeb\x95\x76\x35\x9b\x31\xca\x99\x6e\x89\x95\x72\xb5\x6e\x2f\x9f\xb3\x72\xd5\x66\x37\x5f\xed\x64\x1b\x59\xc3\x72\x38\x6d\xda\xad\x5c\xab\xdb\xce\x66\x2b\x65\xcf\xfc\x0f\x51\xfd\x56\x0a\x97\x80\x94\x8a\x4f\x5d\x39\x6f\xb3\x52\x70\x41\x10\x85\x12\xbc\x65\x70\xdd\xc9\x8b\x63\xb2\xfa\x17\xae\x9c\x98\x65\xe3\x09\xde\x73\x95\x57\xbf\xc0\x9a\x37\x5e\x9c\x31\x1a\x35\x22\x0e\xdd\x61\x21\xd8\x0a\xa3\x32\xef\x38\xfb\x9d\xda\xe5\x8a\x3d\x90\x5e\x78\xe3\xc8\xdd\xee\xf7\xc6\x3c\xd7\x2b\xeb\x68\xc5\x8f\x61\x6e\x3f\x51\xdb\x22\x21\x9d\x6a\x89\x2f\xf2\x69\x36\xb2\xea\xc1\x32\xb3\x0d\x1b\xe5\x76\x68\xa3\x7c\x6d\xb4\x5d\xf2\x6c\x8e\xe7\x8c\xef\x65\x74\x7d\xec\xd6\x79\x5c\x4e\xf9\xf1\xbd\xfb\x4c\x36\x76\x62\xfa\xc2\xbd\x58\x78\x75\x4b\xc4\x64\xd3\x5b\x58\x78\x1d\x99\x84\xe9\x7f\x0f\x8b\xdf\xd1\xcf\xbc\x97\x8d\x2f\x53\x77\xca\x5a\x57\x93\xb9\xf3\x7d\x37\xd2\xfb\x35\x26\x4d\xff\x7b\x6e\x98\xd8\xb8\xde\xa1\xf7\x3b\xa5\x8c\x04\x64\xf1\xcb\xf8\x69\xfe\x70\x96\x3d\xbb\xd7\xf9\x7b\xd7\xfb\xac\xec\x94\xbd\x8a\xcd\x1f\xec\xf9\x4b\xf3\x0a\xf5\x8f\xad\xef\xfd\xfe\xfe\x36\x3e\x32\x41\xb6\xe3\x17\xe4\xfe\xfb\xfa\x9b\xec\xf7\x17\xdf\x97\x7b\xe8\xbf\xc8\x35\x3f\x05\x3e\xc9\xca\x35\x81\x06\x54\x93\x20\x94\x8c\x85\x54\x30\x21\x35\x0c\x58\xe4\x6b\x00\xb1\xa6\xbe\xcf\x79\x10\x6a\xca\x34\xf3\x55\x88\xc2\x40\x06\x92\x09\x0e\x34\xd5\x9c\x30\x9f\xf8\xc1\x39\xe9\x00\x25\x28\xd7\xdc\x32\x41\x0a\xc4\x29\xff\x34\xfb\xe0\x6d\x56\x12\x22\x24\x12\x84\x01\xf1\xbc\x5c\xf3\x85\x0d\xf2\xc2\x47\x1e\xb2\x01\x74\xde\xbd\x67\x97\x7b\xfb\x46\xdf\xb3\x4a\x66\x3b\x32\x81\x76\xcb\xf9\x6d\x37\xd7\xc1\x6b\x6a\x34\xcb\x45\xbd\x9a\x2d\x54\xb6\x31\x13\x15\x76\xec\x2f\xd3\xe5\x3d\xae\xd3\xdd\xec\x98\x6e\x13\x54\xa1\x2e\x71\x59\x73\x3d\x1c\xb4\x9a\xf0\x10\xd2\xa0\x46\x86\x53\x94\x6a\x9d\xbc\x89\xd7\xf5\x06\x1a\x8c\x06\xfb\x6f\x85\x80\xc6\xd5\xdf\x4f\xdd\x5e\x4e\xcc\xe6\xf7\x08\x2e\x3f\x6b\x33\x4b\x90\xfe\xb3\xe1\x95\x49\xf3\xb9\xdf\xc7\xd7\x7c\xee\x2f\xf2\x59\xe3\x4a\xff\x8f\xdb\xa0\xce\xf4\x3e\xb2\x41\xbd\x8c\x9b\x6e\xb1\x07\xf6\xea\x69\x5d\x3b\xc1\xb3\xba\x87\xfe\x0b\x26\x95\xf6\xf9\x9e\x28\xd6\x48\x17\x4c\x69\x7f\x10\xe9\xd3\x38\xe3\x81\x0d\xdb\xea\xc9\x3e\xac\x44\x7b\xbe\x2a\x98\x2b\x81\xad\xb0\x38\x4d\x45\x45\x19\x8c\xba\x7c\x1c\xf4\x83\x45\xfd\x58\x70\x5a\x8d\x8e\x83\x27\x9b\xec\xb0\x10\x04\xb4\x53\x36\x5b\x99\x81\xe9\xf0\x7a\x25\x9b\x7c\xd8\x24\xf2\x99\x14\xdc\x67\x12\x03\xed\x07\x5c\xe3\x40\x2b\x5f\x45\x32\xe0\x4c\x87\xdc\x07\x4c\x00\xc4\x98\x50\x30\x82\x3a\x00\x91\x40\x11\x65\x1c\x02\x48\x02\x12\x30\xcc\xce\xd9\xe5\xf0\xf9\xb0\x8b\x5b\xe9\x09\x02\x0b\x0c\x3e\xad\x92\xf2\x36\xfb\x82\x6d\x1c\x43\xf4\x7c\xd4\x45\xf1\xdd\xb9\xfe\xf6\xaa\x3e\xc0\x0a\x3f\x89\xba\xb0\xa2\x45\x53\xaf\x03\x32\xf6\xd2\xb5\x28\x73\x80\xcd\xc3\xb2\x5c\xd9\xf4\x50\x8a\xd4\x48\x1d\x74\x0e\xa7\x7e\xbf\x5a\x83\x26\xee\x1e\x07\x3d\x63\x95\x2e\x88\xc2\xb1\x67\xf6\x89\x1e\x59\x0d\x67\x36\xd8\xca\xa5\x31\x20\x6e\xb1\x57\x1c\x07\x99\x95\xef\xce\x54\x43\x5a\xaa\xeb\x24\x15\x75\xe1\x3c\x1e\xf5\xa0\x82\xc6\x7a\x17\x03\x9e\xc2\x87\x94\x6f\xc1\x4f\x72\xf4\xdf\x9f\xe9\xb7\xe8\xc7\xe0\xe7\x09\x4f\xff\x61\x53\x39\xca\xd8\x93\x7c\xd2\x0d\xe7\x6b\x96\xf6\x04\xfd\x79\xaf\x08\x36\x4f\xd0\x37\xde\xd3\xff\x83\x2c\xfd\x43\x78\xf1\x1e\xdf\x8b\xc3\xa6\x72\x8a\x9b\x62\xe7\x77\xee\xc5\xb3\x67\xf1\xeb\x2e\xa4\xc0\x63\xf4\x5f\xe0\x05\xb3\x56\x76\xe5\xa6\xf2\x76\x15\x74\xf6\x2a\x6c\x95\xfc\x51\x63\xd5\x0e\x77\x5b\x5b\xdb\x1b\x47\x28\x92\xb3\x40\xe8\x6d\x50\x69\xde\xf6\x0a\xe9\xe3\xce\x5c\x4f\xbd\x79\x25\x3b\xdf\x83\x43\x8f\x04\x3b\xbf\x9f\xd3\x99\x21\xaa\x53\x77\xef\xa3\xc1\x02\xdb\x69\x0f\xbb\xc9\xf7\x25\x93\x8c\x40\xea\x2b\x18\xa2\x88\x4b\x86\x60\x80\x58\x00\x29\x13\x91\x64\x38\x0a\x45\x18\x32\x45\x81\x42\xc0\x47\x48\x71\x82\x84\xd2\x51\x10\x69\x19\x6a\x2d\x18\xa7\x42\xbc\xc0\x0b\x7a\x12\x5e\x6e\xc5\xe2\x0b\x2c\x08\xfb\xb4\x77\xea\xdb\xec\x05\x5e\x10\x24\x20\x81\x34\xeb\xf7\x01\x68\xbf\xc1\xcb\x03\xde\xe9\xcf\xe0\x25\xcc\xbb\xe1\x2e\xec\xa1\x3a\xf1\xe6\x9d\xb0\x92\xaa\x84\x1b\xd1\xab\x85\xba\xb5\xe5\xd3\xcc\x6e\xc9\x9d\x4d\x55\xce\xc7\xcb\x95\x14\x0b\xd7\x4a\x8f\xea\xc6\xca\x1f\x4f\xbc\x9d\x5f\x9b\x9a\xc7\x92\x51\xcc\x95\x2c\x34\x76\x45\x0d\x15\x46\x83\xc2\xee\xc8\xe7\xb9\x22\xcb\x25\x16\xd4\xf7\x04\x7b\xf7\xdd\xba\x2c\x3e\xcb\xde\x13\xa4\xff\x50\x50\x61\x32\xf0\xf2\x9e\xbd\x3f\x14\x48\xf6\x17\x03\xd9\x8c\xf7\xf4\xff\xa0\x77\x2e\xe9\xa0\xbe\x67\xd8\xbb\x91\x10\xbc\xc4\x45\x8d\x47\xe0\xa5\x5e\x9b\x05\xb8\x1d\x8e\xb7\x76\xc1\xc1\x8d\xdd\x68\x16\x44\x6e\xb3\xd3\x99\x04\x5b\xda\x0f\xd8\xf6\x10\x34\xd2\x7e\xbe\xb0\x4c\x47\x85\x5a\x3f\x4d\x40\xbf\x80\x16\xf9\x66\x11\x8d\x7b\xfd\xf5\xc8\x30\x8f\x96\x21\x17\x99\x9d\x1d\x0e\xe7\xa4\xbe\x12\x5b\x66\x9a\xa9\x61\xf2\x9e\xc2\x08\x88\x50\xc1\x48\x2b\x48\x41\x84\x38\xa3\x0c\x09\xc6\x94\x26\x11\x45\x1c\x45\x8c\x0a\x15\x81\x20\x94\x51\x28\x14\x0c\x25\xfb\x05\x33\x50\xb2\x40\x23\x0d\x03\x44\xcf\xda\xcb\x0b\x2c\xb8\x56\xad\x8d\x39\x2d\x19\xdd\x1a\x61\x15\xcb\x75\x50\xdb\x35\xca\x9e\x89\x6a\xb5\x5e\x91\x57\x10\x61\x56\x37\x9b\x35\xdd\x76\xa3\x85\x6a\x36\xcf\x65\x6d\xe4\x35\xbd\xa2\xf5\x3f\xff\xfb\xff\x98\xe0\x50\x12\xce\x38\xbe\x55\x1a\x4b\x10\x01\xf1\xe7\x16\xb5\xd7\x59\x41\x85\x94\x98\x02\x00\xdf\x82\xc7\x2b\xe5\x4d\x9a\xed\xdd\x9c\x10\xa4\x37\x64\xeb\xae\xd5\x35\xa1\x5b\x69\xa5\xc3\xd5\x6c\x40\x2a\xaa\xb6\x6a\xac\x96\xf9\xea\xbc\xd2\x68\x9e\x45\xec\x8b\x45\xed\xfc\xcd\x2f\x3a\xc0\xc4\x22\x32\xac\xad\x7f\xec\xbf\x7e\x60\x34\xcf\x94\xec\xe6\xb0\x57\x2e\x36\x47\xeb\x76\xa7\x08\x87\x05\x2a\x2d\x09\x8a\x43\xb2\xa7\x0c\x70\x16\x88\x6c\xe1\xd0\x8b\x2e\x9e\x95\x5a\xab\x55\x7f\xf9\x95\xed\x7e\x57\x3e\x95\xc7\x72\xb3\x0f\xb6\xf9\x6c\x33\xb3\x9c\x75\x10\x73\xbb\x39\x5c\x0a\xcc\x70\xe1\x35\xe5\x86\xcc\x98\xdd\xaa\x75\xd6\x97\xbb\xc7\xa7\xeb\xdf\xee\x66\x86\x59\x63\x68\xf6\xcf\xc1\xe8\x29\x3e\x3a\xe8\x75\xa6\x50\x58\x39\x98\xe0\xf9\xec\x78\x54\xa3\x05\xda\x0c\xbd\x62\x6f\xe8\x66\x91\xdd\x98\xa4\x64\xb4\xad\x2c\xe7\x9b\x9d\xee\xa2\x55\xae\xbf\xdb\xc8\x59\x3f\xe5\xe5\x91\x6f\x55\x71\xaa\x3a\xb3\xd6\x5d\xdb\x97\xbd\xa6\x6f\xa4\x7a\x33\x8d\x66\x39\xb7\x6f\x7f\x03\xb7\x7e\xeb\xf0\x52\x5c\x17\x4f\xea\xd4\xab\x06\xca\xe5\xc3\x1a\x34\xfc\x05\x94\xdb\xc1\xd4\x2c\x9f\xdc\xfd\xcc\x6f\x3a\xd5\x40\xe7\x53\x03\xa7\xd9\xe2\x31\x5e\x7f\xde\x4f\xc7\x69\xb5\x5e\xf6\xd9\xe4\xa6\xc0\x25\xd4\x40\x38\xb3\x25\xe3\xd0\x73\x97\x61\x14\xcc\x7b\xfd\x91\x3a\xac\xf3\x68\x92\x69\x20\x3c\x8c\xe6\xb5\x96\xa3\x0f\xef\xcf\xe4\xdf\x38\xa3\xff\xef\x73\x0c\x8f\x2d\x93\x20\x9c\x5e\x19\xdd\xd1\xe9\x34\x98\x16\x97\x87\x7e\xb9\x5f\x6c\x76\x69\x47\xa2\xad\xce\xf6\xe9\xc1\xdd\x54\xb6\xf3\x71\xaa\x4f\xcb\xde\x25\xa1\xa8\x65\xec\x86\xf0\x7a\x33\x1f\xf2\x1c\x26\x48\xdf\x7a\x84\xfe\x65\xd1\x67\xdc\x68\xc4\xbe\x6f\x9b\x95\xbe\xbd\xaf\xd5\xf3\x63\xb0\xa9\x57\x0e\x3d\xb5\x6b\xe7\x56\x33\x50\xc5\xde\xb6\x38\xb2\x66\xab\xc6\x6e\xb1\xdb\xab\x95\x33\x11\xcf\xde\x95\x73\x02\xd7\x57\x16\xba\xb7\xbf\x9b\x7f\x65\x7d\xc5\xa2\xd7\xf9\x38\x8a\x2d\x36\x62\xfb\x77\xb9\xe7\xf7\xde\xd5\xeb\x51\x34\x5b\xad\xe6\xcb\xbb\xf2\xd0\xbb\x7b\x7d\xd7\xbe\x17\x15\x79\xef\x7e\x5c\xbf\x9f\xf4\xfb\xf3\xd0\xfd\x7d\x86\xbe\x9b\xb5\xbb\x4f\xd0\x3f\x7f\xde\x8e\x89\x8b\x77\x9e\xd7\xc7\xcf\xff\x5b\x79\xba\x0f\xa3\x00\xaf\xbc\xed\xc3\x04\xd2\xd8\xfb\x72\x19\x77\xde\xc7\x42\x75\xb1\x3c\x35\xaf\xd4\x4b\xc6\xf7\xa3\xda\xae\xe3\x7d\x42\xe6\x07\xb2\x6a\x23\xb6\x9d\xa3\x20\x4a\xad\x33\xc3\x6a\xb5\xe2\x76\xb7\x2b\xd7\x40\xc6\xa6\x36\x9a\x44\xd8\xa7\x44\x5b\x33\xdb\x07\x8c\x0d\x1b\x6f\x18\x1b\xf3\xbe\x5e\xd6\xf8\x50\xb4\x44\xf3\x39\xfa\xc5\xe6\x63\xf4\x5f\x64\xd5\x68\x4b\x44\x67\x65\xd4\x87\xcb\x43\x98\x2a\xb3\xbc\x58\xcf\x72\x2c\x5c\x75\xc7\x26\xea\x0e\x06\x72\x60\xd3\x68\xd7\xe1\xa3\xe5\xb2\x18\xe0\x69\x79\x79\xcc\x15\x79\xd4\x23\xd5\xb1\x19\x4e\xfb\x27\x99\x1e\xd1\x3d\x77\x33\xee\xae\xdd\x72\xfb\xd5\x69\x0f\x6f\x16\xc7\xcc\x3c\xf9\x3a\xae\x50\x0a\x41\x88\x12\x01\xe1\x28\xf4\x7d\xa5\x68\xc0\x21\x67\x92\xfa\x44\xf9\x90\xf9\x8a\x4b\xe9\x43\x1c\x05\x48\x45\x04\x52\x14\x42\xa9\x11\x84\xdc\xd7\x50\x51\x76\x2e\x50\x00\x65\x82\xb2\xea\xcd\xa8\x36\x46\x25\xe3\x9f\xca\xaa\xaf\xb3\x02\x32\x40\xb0\x40\x1c\x81\xe7\x65\xd5\x2f\xa2\xda\x2e\xf7\xe2\xea\xfd\x7d\x46\x8e\xf1\x72\xcc\x68\x64\x9c\xdc\xb4\x51\x87\x39\xee\x4c\x8e\xb2\x3b\xdc\x46\x5e\x3a\x34\xc9\xb6\x54\xd1\x9b\x4a\x6a\x9a\xce\x1c\xd7\xed\xce\xf1\xfc\xf1\x27\xb0\xe4\x5d\xb2\x72\x6c\x64\xdf\x9e\xdf\x2b\xad\xe5\xae\xdc\xa3\x50\xcc\xbb\xcd\xd2\x02\x8b\x86\x3f\x9e\xef\xda\xe9\xb4\x65\x35\x82\x75\x5a\xce\xab\x23\x58\x33\x86\x07\x6a\x4f\xcc\x74\xc8\xfb\x41\xdb\xf7\xcb\xe5\xfc\xa1\xe0\xd5\xb8\x2e\x95\x2b\x6c\x95\x49\xcd\x36\xee\x52\x6b\xd2\xde\xae\x1a\xf9\xbe\x41\xc6\x68\xbf\xfe\x8e\xf7\xd9\x7c\xb7\xbf\x8f\x61\x65\xec\xa7\xaf\xcf\x1b\xf3\x36\x3e\x21\xf7\xfe\x15\xb9\xfb\x3d\x16\xc4\xfe\xfe\x73\x72\x77\xbc\x58\xc0\xe7\x72\x77\x1c\x37\x1f\x90\x1b\x5e\x22\xf6\x0c\xe3\xd1\x88\xc1\x04\xe9\x3f\x24\xf7\x9b\x57\x0c\xac\xc5\xbe\xff\xe7\xe4\xda\xcb\x52\xbe\x23\x77\xff\xc3\x7a\xc1\x79\xff\xea\xd7\xf5\xdd\x8b\xe1\xcf\xbe\xeb\xf7\xee\xdf\x1f\x5c\xdf\xb7\xce\x37\xb6\x7f\x7f\x5b\x6f\x39\xeb\x59\x97\x45\xc7\xee\xdb\xe7\xfb\xf9\xb7\xd7\xfb\x2d\x3d\x2b\xb6\xbf\xc6\x23\xeb\x7d\x2f\x63\x96\x8c\x6f\x47\xe4\x5e\xf7\xf3\x43\x63\xd5\x25\x82\x34\x66\xf6\xbf\xf7\x7e\x4d\xbc\x22\xb9\x98\xed\x2f\xd4\xdf\xb7\x3b\x8f\xaf\xe5\x6d\x5c\x9e\xa2\x42\x57\x2f\xee\xf4\x0f\x0b\xcb\x5c\xf0\x21\x96\xad\xf5\x24\xb6\x3f\x21\x7b\xc5\x00\xdd\xfb\xf0\xf9\x8c\xff\xc4\x93\xbf\xbd\xde\xd8\x7e\x7e\xbc\xde\x8f\xfc\x05\xb5\xd8\xd6\x3f\xa0\x03\x5d\xa2\x8d\xec\xeb\xf9\xde\xab\x83\x25\xa8\x03\x3e\x10\x81\x7b\x3c\x21\x01\x67\xb3\xce\x71\x30\xf0\x8e\xb3\xe8\x50\x5e\xd6\x87\x32\x48\x8b\x52\xb9\xbc\x3d\xa5\x51\x3f\x55\x6d\xd9\x66\x31\x3b\xae\x37\x8b\x12\x6f\x33\x9b\xde\x62\x0e\xc0\xcc\x5e\x64\xb2\x1b\xb2\x07\x7c\xb9\xce\x32\x0d\xfc\x05\xe5\x03\x9f\x6d\x4a\x53\xc3\x1a\x36\x02\x33\xf1\x22\x33\x12\xa0\x40\x02\x85\x30\x42\x81\xd4\x2c\xc0\x98\x6a\x4e\x09\xc6\x42\xfa\x9c\x10\x16\x08\xa6\x31\xc0\x52\x07\x11\x95\x5a\x31\x16\x46\x51\x40\x81\xf6\xb5\x02\xa1\x12\xe7\xbe\x15\xe2\x45\x07\x33\x3a\x25\x87\xe0\x86\xdb\xcb\xb6\xb1\x57\xc6\x35\xbb\xd2\x34\x50\x93\x56\xdb\x34\xd3\x30\x6a\x1d\xd7\xed\xe6\x71\x3e\xe7\x64\xdb\xb8\xdb\x60\x5e\xad\x50\x2b\x62\xb3\x5a\x6d\xfe\x87\x0e\x76\x33\x02\x57\x10\x20\x3f\xd7\xc1\x5e\x67\xf9\xb9\xe5\x06\xa4\x04\xbd\x65\x16\xdd\xc9\x87\x62\x3a\xd8\xad\x2e\x85\xf7\xca\x79\x09\xe9\x6c\x67\xfa\x6f\xeb\xf5\x4c\xd9\x41\x39\xdc\x3b\xc1\x03\x36\xbc\xd2\xb2\x48\x3a\xe6\x49\x57\x1b\x93\xfd\x6c\x3e\xde\xa1\x8c\x37\x6b\xbb\xd3\x06\x00\x7c\x9e\xa5\xb2\xef\x0f\xb1\x2e\x6f\x79\xba\xdd\x9a\x67\x37\x6d\x31\x58\xf7\xb3\xa3\x35\x9c\xf6\x0a\x1c\x48\xe7\x74\xaa\x39\x48\x05\x16\xda\x78\xf6\x77\x74\xa6\x8f\xcb\xff\xbf\x97\xcb\x63\xf6\xa7\x27\xed\x59\x9f\x8c\x1b\x72\x79\x72\xf4\x1f\x8b\xd8\xfd\x50\x2e\xff\xd7\xe4\xb6\xf3\xdf\xff\xa8\xdc\x7b\x2b\xe4\xe9\x11\x19\xc8\xfb\xfa\xe4\xde\x53\xf8\x6c\xaf\x1e\xa6\x1f\xc7\xb8\x7b\xe8\xbf\x60\x4c\x11\x46\xdd\x51\x38\xab\x76\xb2\x2a\x63\x1c\xe7\xbb\x81\x2a\x96\x89\xac\x74\x4c\x3d\xac\xa7\xbb\x8e\x19\x76\xfd\x5e\xb5\xd1\xd8\xf6\xa3\x19\xca\x2c\x8b\x21\x41\x7c\x27\x7b\xed\xbe\x86\xc1\xd1\x3d\x1a\xba\x72\x6a\xf7\xc6\xab\x5d\x03\x74\xaa\x65\xbb\xd3\x3b\xac\xab\xfe\x0f\x64\xaf\x2a\x9f\x53\x16\x42\x0a\xa5\xd2\xa1\xcf\x65\x24\x38\x42\x80\xa0\x00\x22\xa5\x7d\x1e\xf8\x00\x10\x85\x83\x50\x32\xe8\x87\x01\x43\x01\x95\x91\x10\xbe\x60\x3c\x12\x4c\x07\xf8\x05\x63\x50\x52\x18\x73\xd3\xce\x27\x08\xe6\xf4\x0b\x8c\xc1\x9c\x5d\x31\x46\xd2\xab\x9d\xef\x71\x8c\xb9\x3b\x7b\xf5\x5e\x59\xf0\x0a\x1c\xf6\x47\xf4\x2e\xa3\xf6\xfa\xf9\x8c\x51\xdc\xfa\x43\xea\x1d\x94\x76\x82\x8d\x31\x34\xe9\x6c\x3c\xdb\x94\x5b\xb3\x5d\xbd\xaa\xdb\xb5\x94\x99\x72\xfa\x8b\x49\xd5\x08\x6b\xfd\x5a\x3d\x55\xad\xda\xf5\x45\x73\x58\x37\x8f\xdb\xe1\x7e\x3b\x98\xee\xb6\xd6\x9e\x8f\xf7\x87\x59\xca\x60\x36\x28\x11\xe5\xc3\x35\x5a\x9e\x4a\x46\xff\x89\xec\xd5\x87\xf8\x9d\xfd\x47\xf6\xee\x0f\x60\xde\x43\x98\x93\x20\xfd\x4f\x32\xef\xbe\x1d\xc7\x95\xa0\xae\xfa\x49\x55\x92\x1f\xe5\xd3\x77\xfb\x63\x7e\x9b\xff\x50\x57\x3e\x7f\xf2\x59\x9f\xe9\x1f\xbe\xe3\x57\xec\xfd\x4b\xd8\x67\x24\x74\xa6\x71\xfd\xee\x1e\xfa\xaf\xd8\xfb\x43\x0c\x32\x59\xec\xc5\x3c\x08\xb8\x8c\x98\xa4\x52\x01\x2d\xb4\x0c\x70\x88\x61\xc4\x94\xc4\x40\x84\x22\x04\x82\x84\x52\x31\x4e\x90\x8a\x7c\xca\x94\x0f\x24\x51\x91\xcf\xb4\xaf\xa5\x24\xe8\x15\x7b\x71\x52\xd8\x7b\x33\x1e\x4c\x10\xfe\x45\x86\xe5\xeb\xec\x05\x7b\x11\x66\xd7\x78\xb0\xc7\xb1\xf7\x8b\x78\xb0\x8b\x7f\xfa\x89\x58\xa1\xf3\x58\x6f\x32\x3a\x76\x6f\x3f\xa9\x1c\x71\xc5\xde\xd5\x69\x21\x9d\xc3\x61\x39\x49\x1d\xe7\xc7\x75\x7f\x62\x8d\x3a\x53\x99\x5a\xad\xf3\xba\xdc\x41\xee\xba\xee\xf3\x46\xe5\x88\x8d\x53\xb6\xb2\x25\x5d\xda\x13\x39\xf3\x40\x91\x27\x04\xed\x7b\xae\xd7\x2e\x6d\xec\x2a\xd8\xb3\xe3\x40\xe0\x90\xa5\xe6\x6c\x3c\x32\xd3\xed\xed\x9f\xc7\xde\xa7\xe3\xac\xbe\xb5\x77\x7f\x00\x7b\x1f\xab\xa2\x74\x35\x46\xbe\xff\x4e\xd2\x76\xdb\x64\xb0\xc8\x33\xfe\xa3\x2a\x59\x82\xfb\xf7\xbd\xf8\x97\xbf\xb7\x7f\xc9\xd8\xbd\x8d\x24\xb0\xfc\x0f\xbf\x33\x89\x61\xf9\xa3\x58\x6a\xfc\xc0\x99\xde\x43\xff\x05\xcb\x7f\x8a\xe1\x26\xac\x47\x87\x3a\x22\x34\xfc\xa5\x27\x87\x54\x62\x4e\x31\x85\xda\xe7\x61\xc8\x75\xc4\x08\x24\x51\x44\x24\xe6\xd0\xc7\x98\x72\x84\xb5\x20\xa1\x1f\xf9\x4a\xfb\x50\x43\xcc\xd8\x39\xb6\x1b\xf2\x04\xe3\x65\x6e\xda\x6a\xa5\x00\xfc\x73\x5b\xed\xeb\x2c\xc7\x00\x12\x42\x04\x44\xe2\xf9\x78\x99\x3b\xaa\x25\xdc\x71\xcf\x3e\xac\x96\x10\x8b\x57\xc9\x8d\x8a\x4b\xd5\xed\xd8\xc2\x36\xd6\x59\x66\x1d\xd0\x60\x5f\x1d\x0d\xed\x28\xeb\xf6\xc8\x68\x5d\x2a\xf7\x7a\x8a\x57\x8f\xb3\xf2\x30\x3f\x94\x63\xa7\x33\x8a\x76\xbb\xa2\x35\x69\x0f\x4f\xae\xb1\x42\x19\x59\x72\xac\x55\x64\xd5\xac\xdd\x68\x96\x39\x14\x0a\x83\xfc\x30\xb2\x95\x57\x4a\xac\x5a\x82\x1d\xfb\xea\x23\x31\x09\xb3\x8f\x76\xf6\x3f\xc6\x0d\x3d\x34\x39\xfa\xcf\x56\x4b\x48\xda\x27\xf5\xfb\xf8\x9a\xcf\xfd\x45\x9f\x98\x61\xfc\xbb\xb6\xe7\x5b\xb6\xdd\x47\xf6\xea\x69\xdb\x6e\x82\x67\x75\x0f\xfd\x17\x4c\x1a\x96\xb6\x8b\xc3\x40\x35\xf6\x9e\xb9\x8f\xe6\xd5\xac\x9f\x35\x52\x95\x5c\x4d\xa8\xde\x6c\x58\xd9\xef\xe1\x64\x72\x5a\xa7\x0b\x0a\xe5\xa6\x45\xe4\xb5\x74\x39\x95\x12\xeb\x4a\xcb\x2f\x96\xba\xe6\xae\x9b\x05\xfb\x66\x65\x23\xca\x76\x25\x55\x51\x59\x7b\xbe\xb4\xa7\x79\x51\x3b\x24\xdf\x36\x38\x84\x11\x95\x58\x07\x54\x43\x12\x49\x0e\x10\x0f\x14\x97\x02\xc1\xc8\x27\x8c\x21\xa1\x20\xe0\x00\x93\x28\xf0\xa5\x4f\x7d\xa5\x23\xe6\xcb\xd0\xe7\x52\x03\x0a\xb5\xe6\xf8\x05\x93\x2e\xb6\x5d\xd3\xa8\x21\xc3\xee\xda\x85\x5c\xc3\x46\xc5\x42\x8e\xd6\x4a\x1d\x87\x96\x72\xad\x1e\xf3\x78\x27\x57\xac\x55\x5c\xab\x81\x50\x85\xe7\x3b\xc4\xaa\xb7\x9d\x3a\x77\x2c\xab\x7a\x2f\x26\xa1\x2f\xf4\xcb\x5f\xb3\xf8\x0d\x93\x28\x42\x57\xff\xe1\x9d\x31\x92\xf7\x60\xd2\x43\xf2\xde\x87\x98\xe4\xbc\xd1\xf7\x72\x7c\x10\xec\x7b\x0a\xa6\x0a\xed\x68\xa0\x5b\xb3\xae\xb1\xa4\x8d\x8e\xd3\x91\xfe\x4c\x65\xf3\xd3\xfc\x12\xec\x5b\x40\xc9\x4a\x39\x17\xb9\xac\x29\x36\xab\x41\xae\xa5\x5b\xf3\xa1\x6d\x21\xdd\x74\x9c\xe9\xe0\x90\x0a\x32\x1b\x47\x36\xaa\xf5\xdd\x30\x72\xd2\xd3\xc8\x9a\x78\xe3\x1f\xc1\xa4\x3b\x63\x08\x13\xc7\xa4\x27\xe9\x27\x88\x49\xf7\xc6\xc1\x25\x8d\x49\x89\xd0\x7f\x02\x93\xfe\xa5\x38\xc5\x9b\xfe\xc6\x07\xf6\x2a\x49\x4c\x7a\xf6\xac\xee\xa1\xff\x82\x49\xa3\xad\xb3\xcb\x4c\x4e\x1d\xb2\xe8\xa5\x1c\xda\x0e\xfb\xe6\x6a\x52\x56\xad\x08\x2e\xc3\xbd\x9a\x76\xa7\xab\x46\xa6\x53\x3b\x01\x02\x06\x22\x58\x34\x8e\x68\x6b\x0f\x5a\xb4\x3d\xcc\x79\xd5\x1e\xf5\x9a\x6d\x3f\x9b\xa5\x6b\x17\x74\x60\x49\x16\x52\x79\xd1\x49\xf5\x44\xa5\x65\x26\xee\x6f\xf4\xa9\x2f\x35\x56\x3e\xc3\x5a\x2a\x05\x29\xc5\x54\x2a\xc9\x31\xe6\x81\x0a\x20\x21\x8c\x28\xe5\x53\x16\x20\x0c\x7c\x12\x02\xc5\x23\xc4\xa9\x40\x8c\x90\x10\x32\x01\xce\x6d\x24\xd8\xd3\x15\x7c\x6e\x79\x19\x25\x04\xec\x5c\xce\xe1\xeb\xd9\x0b\xb4\x09\x20\x70\x02\x05\x7c\xde\xbb\x4e\x7e\x7b\x53\x1f\x90\xce\x3f\x6b\x9b\x34\x70\xac\x74\xd0\xf5\x1b\xbd\x5e\x1e\xb7\xf2\xf2\x90\x5f\x76\xe6\x6d\xaa\x75\xb1\x00\x0c\xed\xf8\x46\x23\x93\x95\x48\xd5\xa2\x03\x68\x37\x1b\xce\x72\xc1\x56\x9d\x03\xe1\x51\x29\xd3\x3c\x8c\xdb\xdd\xa5\x08\x8c\x6c\x29\xea\x9a\x75\xda\xe9\x54\x6b\x7d\xc1\xd6\x86\xdb\x2c\x24\x55\x61\x21\xfe\x06\xdc\x59\xe1\x60\xe4\xb7\xb8\x8a\x41\x5c\xf6\x43\xca\xb7\xd0\x27\x39\xfa\x9f\xb8\xc3\xbe\x8d\x3e\x71\x1c\xbc\x2f\xab\x7e\x59\x5e\x74\x36\xfb\x58\xd1\x98\xdf\x52\xcc\x2e\xe3\x6b\x8e\xf6\x04\x7d\xbb\xd5\xdc\xf5\x9f\xa0\xff\x7e\xf6\x4f\x6a\x19\x1f\xa2\x4b\xf9\xf1\xbd\xf8\x75\x16\x30\xb6\x0c\x75\xe7\x5e\x3c\x7b\x16\xbf\xe8\x9f\xfa\x8f\xd1\x7f\x41\x97\x6e\xcd\x4b\x23\xe4\x8e\x51\xbb\x1e\xc2\x7a\xa4\xea\xfe\x84\x39\xdb\xce\x6c\x78\xdc\x18\xd9\x41\xfb\x17\xd6\xa5\xc3\x99\xdd\x11\xa1\x33\xb1\x0f\xfe\x52\x2f\xa1\xd0\x15\x7c\x32\x5a\xa5\x9e\x33\x2d\xec\xdc\x7d\xe6\x94\x45\xc6\x78\xdb\x82\x27\xd0\xad\x56\x56\x46\x66\x62\x26\x6e\x85\x63\x3e\xa0\x0c\x0a\x19\x10\xca\x19\x50\x0c\x70\x8d\x31\x54\x12\x28\xea\x8b\x80\xa8\x90\x23\x20\x19\x0c\x23\x11\x28\xa1\xb4\x8a\x42\xe2\xfb\x58\x88\x80\x72\xee\x33\x15\xbc\xa0\xcb\x93\x05\x7c\x6e\xf9\xd1\x24\x04\x82\x7c\x1a\xc3\xf2\x36\xfb\x82\x2e\x44\xa2\x04\xea\xf7\xe4\xdf\x1d\xeb\x6f\xe8\xf2\x80\x9c\xfd\x09\xba\x18\x07\x93\xaa\x09\xdf\x8c\x47\x5e\x0e\xa8\xc8\xe9\x35\x86\x39\xc7\xda\xce\x41\x7d\x7f\xec\xce\xca\xcb\x46\x33\x8b\x06\x07\x2b\xb3\x99\xf2\x41\x7a\xd7\x1c\x67\x5c\xac\xfa\x7b\xb7\x53\x17\x7c\x36\x9c\xb5\xc6\x83\xe9\x2e\xcc\x2d\x1a\xed\x48\xe7\xb4\x3b\xdc\xf3\x71\x5f\x47\x4d\xe3\x1f\x40\x97\x81\xd3\x2a\x92\x67\xb9\x7b\x82\xf4\xdf\x9f\xe9\xb7\xe8\x27\x83\x2e\xef\xb9\xfb\x27\xcd\xf0\x7f\x8a\xa3\x2d\x78\xa6\x76\xf0\x9e\xa0\xff\xdb\xec\x1f\xd4\x17\x12\x2e\x0f\xf7\x14\x77\x37\x12\x42\x97\x83\x78\x8c\xfe\x6b\xf5\x51\x6f\xdb\xc9\x6b\xde\xcb\x01\xb3\x9c\xcd\x1e\xda\x4d\x32\xcc\x78\x7c\x59\xb6\xd7\xa5\x69\xfe\xe4\x8f\x61\x46\xce\x95\xb7\x0f\x7a\x5b\x12\x71\x98\x37\x6a\x1b\x9c\x2b\x65\x26\x60\x57\xa1\x64\x01\x7b\xb9\xde\x0a\xd7\x0b\x4d\x38\xc3\xc3\xec\x60\xbe\xae\xd0\x5c\x76\x06\x93\xaf\x88\x0d\x45\x10\x31\xa5\x34\x02\x2a\x0a\x02\x10\x49\xc0\xa5\x22\x81\xe0\x02\x73\x28\x24\x91\x21\xc4\x41\x08\x51\x08\x39\xf6\x19\xc0\x0c\x87\x28\x10\x1a\xfb\x80\xeb\x48\xfb\xe1\x0b\xba\xe0\x27\xcb\xc3\x81\xdb\xf0\x22\xd9\x57\xca\xcb\x65\xf6\x05\x5e\x24\xb8\x9a\xd1\x1e\x87\x97\xf7\xde\xe0\xdf\xe0\xe5\x01\x37\xf5\x27\xf0\x62\xa6\x6b\xad\xcd\x9c\x1d\x37\xc6\x32\xbb\x08\x59\xcf\x69\x8f\xf6\x1d\xef\xd8\x99\x2f\x0a\x95\xda\x49\x78\xf5\xdd\x09\x84\xa2\x36\xaf\xae\x2a\x76\xa6\x5f\xb6\xb7\xc6\x48\xa4\xc6\xc1\x71\x50\xe7\xde\x50\x57\x96\xfb\x60\x98\x5f\xd5\x81\xd1\x14\xdd\xb6\x9f\xf1\x3c\x72\x52\xa9\x7d\xf1\x1f\x80\x17\xbd\x6f\xaa\xe6\xb3\xec\x3d\x41\xfa\x0f\x55\x3f\x4d\x04\x5e\x7e\x63\xef\x0f\x95\x24\x7b\x82\xfe\xb3\x15\x37\x7f\x9b\xfd\x83\xe1\xef\x3f\x01\x2f\x8f\xb2\x77\x23\x21\x78\x89\x2b\xb2\x0f\xc0\xcb\x0e\xb4\x02\xbf\xb2\x5e\x79\x85\x01\xeb\xe8\x69\xad\x35\x17\x0e\x28\x4d\x58\xa1\x3e\x1a\xab\xb4\x6d\xaf\x84\xcc\x46\xb6\x6f\xb1\xd3\xc0\xe8\x5a\xb5\x62\x3f\x97\xae\x2e\x5c\x31\x6f\x6e\x41\x6e\x36\x5a\xae\x5d\x32\xf4\xc1\xc6\x70\xaa\xae\x33\xed\x37\xed\x4a\x09\x57\xd7\xc9\xa7\x7b\x41\x82\x94\x14\x22\x80\x11\xe3\x52\x90\x88\x13\x1f\x52\x01\x43\x2d\x95\xaf\x55\x44\xa4\xd4\x0c\x44\x50\x03\xad\x43\x22\x19\x63\xd0\xf7\x91\xa2\x54\x22\x2c\x38\x23\xea\x17\x36\xbc\xf6\x14\x37\xb3\x26\xb1\x90\x59\x27\xd4\xcc\xd7\xca\x65\xe2\x19\xd9\x06\x2a\x3a\x3d\x23\x9f\x6b\xd2\xaa\xc1\xb3\x14\x17\x70\xab\x52\x65\x6d\x3b\xcf\x3a\xed\x6c\xb9\x5e\xee\x15\x3c\xf7\x7f\xfe\xf7\xff\x51\x2a\x30\xa1\x9c\x08\x7a\x13\x67\x10\x10\x80\x7c\x8a\x33\xaf\xb3\x8c\x10\x44\x28\xc4\x14\xbf\x85\x03\xe6\xc8\xd6\x02\x85\x4e\x65\xd8\x70\x56\xda\x33\xb4\x9e\x4c\x8e\x99\x42\x21\xd4\x16\x4d\xf1\xd6\x76\x35\x4b\xad\x6b\xd5\x15\xaa\xa0\xc6\xf9\xb0\x2f\x38\x73\xae\x50\xf8\x45\x28\xfe\xe5\x4d\x71\x69\x49\xb6\x2b\x68\xac\x0f\x5b\x12\x4e\x46\x8d\x10\x74\xeb\xdb\xa3\x3d\x9b\x5a\xcd\xf5\xa1\xe0\x4f\x4f\xf9\xf6\x70\x47\x1c\x77\x3c\xbf\xe4\x4e\x5e\x43\x0e\xcc\x4a\xb1\x2f\xcd\xc9\x69\x3e\x3e\xf8\x78\x6c\x64\x67\x8b\x9e\xea\xd7\x87\xb3\x4e\x3f\x15\x6d\x61\xb0\xa4\xe9\x95\xdd\x60\x72\xb4\x19\xbf\x74\xa7\x71\xa4\x8e\xbf\x9d\xd9\xb7\xf5\x7a\xf9\x4c\xf3\x40\x37\xed\x68\x64\xec\x3b\x07\x3f\x90\x4d\x9a\xad\x8c\x7a\xbb\x4c\x6d\x87\xb7\x92\x8e\x3a\xc1\xf1\xb4\x5d\xea\x5e\x7a\x3c\x5a\x1f\x4b\xeb\x26\x18\x55\xd2\x7d\x80\x6b\x11\x3c\xf5\xb5\xda\xc0\xc1\x88\x0c\xac\xae\x5b\x16\xd3\x36\x99\xdb\xf3\x0d\x58\xd7\x1b\xbd\xfe\x3e\xb1\x74\x2f\x2b\x56\x06\x61\x34\x24\x55\x6f\xa1\x58\xbe\xd4\xde\x50\x23\x43\x59\xbb\x93\xb3\xe7\x0b\x48\xaa\x33\xbe\x69\xb3\x6d\xc4\x0e\x76\x6a\xd7\xe5\xbd\xf2\xf9\x4b\x66\x63\xd0\x35\x2b\xd7\xf3\x78\xc8\xbd\x92\x20\xfd\xc7\x42\xdf\xaf\x7c\xb1\x12\xdb\xbf\x60\x7c\x82\x5d\xee\xd7\xb8\xcb\x07\xa8\x5d\x2e\xe3\xda\xa0\xc0\x73\x75\xdd\x2d\xd7\x4f\xf6\x62\x19\x94\x26\x7e\x21\xdd\x6f\xfa\xf5\x4b\x97\x81\x98\xcb\xfd\xce\xfb\xfc\xb2\x94\xef\xa4\x7b\x55\xff\xce\xfa\x3a\x90\x78\x77\xbb\x5f\x2a\xd7\xf9\x7b\xdf\xa5\x7c\xc5\xe9\x17\xce\xb4\x2d\xef\x8b\xf3\xfb\x1a\x63\xaa\xcf\xd1\xcf\x91\xc7\xe8\xbf\x60\xcc\xd8\x69\x64\xa7\xb0\x97\x2a\x59\x99\xc8\x59\xec\x60\x63\xac\x58\x79\x2a\x1c\x37\xad\x39\x9e\x96\xc3\x5d\xb1\xc2\x16\x74\x96\xaa\x57\x4e\xb9\xe3\xa1\x51\x2f\xf1\x94\xd5\x08\x9d\x74\xa7\x92\xa1\x47\xdc\x76\xab\x43\xbb\xdc\xef\xe2\x63\x2f\x18\xf6\xeb\x43\x5c\x3f\xe4\xcb\x53\x2b\x71\x15\x06\x03\x21\x40\x18\x12\x14\x0a\x4d\x00\xf2\x31\x08\x85\x8f\x29\xe2\x01\x65\x0a\x6a\x8c\x35\x14\x5a\x70\x45\x98\x86\x18\x23\xed\x53\x15\x29\xc8\x11\x8c\x42\xc1\x55\x74\x36\x90\x91\xe4\x30\x86\xdc\x34\x95\x61\xcc\xd0\xe7\x18\xf3\x3a\xcb\x00\x44\x92\x12\x86\x08\x78\x1e\x63\x6e\xa6\x14\xdf\x89\x31\x2f\xe3\x5d\xe3\xcf\xe8\x95\xa7\x65\x8c\xa1\xee\xd5\xeb\xdb\x4a\x6f\x39\xd9\x6f\x8e\xa3\xd9\x88\x86\xe9\x3a\x70\x9b\x75\x77\xe8\xed\x8e\x60\xb8\xa8\x16\x8f\xb3\xc5\x84\x34\x77\x15\xb1\x54\xdc\xb4\x4e\x78\xb7\x2e\xd6\x2a\x93\x32\xea\x53\xb0\xec\x0c\x57\xc7\xb0\x5e\x09\xfd\x39\x65\x83\x5d\x6d\xb0\xb2\xa3\xda\xda\x35\xbf\xd7\x8c\xf0\x7b\x78\x11\x33\x2e\xdc\xcb\xaf\xdf\xdc\xf1\x06\xf8\xf8\x9d\x8a\xed\xeb\x07\x23\x93\x30\xfd\x87\xf0\xea\xf2\xa5\xf7\x3c\xee\x5e\x7e\x7c\x75\xf1\x5a\x5f\xe9\x0d\x5f\xf3\xb8\x67\xe8\xfb\x86\xaa\x3f\x41\xff\x69\x1e\xff\xee\xf9\x1f\xe2\xf1\x7f\x09\x63\x6e\x39\x84\x1e\xd9\x8b\x0b\xa1\x64\xee\xc2\xb3\x67\x71\x0f\xfd\xd7\x10\xb8\x1f\x62\x5a\x09\x87\xc0\x05\xa1\x64\x24\x52\x51\x18\x60\xcc\x7c\x0c\x22\x04\x15\x22\x20\x8a\xa2\xd0\x8f\x38\x0e\xb9\xe2\x54\x08\xa0\x43\x18\x48\x89\x39\x55\x0c\x45\x82\x68\xac\xa9\x44\x94\x83\x17\xbc\xbb\x84\xc0\x95\x7b\x6e\xb5\xd0\xa5\xed\x7a\xb6\xed\xd2\x6e\xa5\x57\x2a\xbb\x14\xb9\x66\x81\xd6\xcd\x3c\xe9\xba\xcd\x76\x29\x9f\x6f\xb9\xbd\x7a\x01\x39\x55\x92\xc9\xa1\x7c\x2e\x5f\x46\xe6\x9d\x78\x47\x00\xfe\xb4\x73\xd0\xdb\xec\x05\xef\x28\xc0\x57\x9d\x2a\xb3\x6d\x54\x5b\xa3\xcd\x22\xbb\x0e\xb6\xd9\x5e\x17\xe0\xd0\xb5\x57\xeb\x41\x9e\xf6\x61\xb1\x54\x9f\x2e\xd6\xc7\xd6\x7c\x95\xca\xd4\x16\x85\xf3\xe5\xf9\x1e\xde\xbd\x0f\xcb\xbe\xe3\x9e\xbd\x6f\x16\x79\x1e\xee\x1b\x7d\xcf\xb5\x8c\x56\x9a\x56\x16\xc8\x9a\x3a\xcd\xaa\x20\xe1\xf4\xd8\xa0\x6e\xba\x7f\x98\xc1\xf6\xa9\x1f\x96\x56\xe3\x7a\xbd\xc9\xf7\xd1\x62\x8b\xe5\x51\x2d\x31\xab\xf7\x53\xfd\x35\x3a\x2e\x6a\xa3\x3d\x1d\x1c\x0e\xba\xd4\x1c\xa5\xdd\x96\x5d\x18\x2e\x94\xe5\xce\x42\x3d\xc4\xc2\xf8\x96\x8e\xf4\xad\x10\xb8\x38\xe6\xdc\x89\xf7\x66\xbb\x9f\xb5\x93\xc4\xbc\x27\xe9\x3f\x1b\x02\x97\x20\xe6\x3c\xcb\xf3\x13\xa1\x7f\x6f\x08\xdc\x3f\xaa\x23\xde\xc2\xa4\x47\xe4\x93\x24\x31\xe9\x4f\xca\x47\xaf\x41\x0a\xa5\x4c\x27\x3d\x66\xb3\x6e\xa5\xdb\xf2\xd2\xa7\x01\x0a\xf2\xd5\x71\x90\xb2\xdc\xa8\x15\x54\x32\xe0\xb8\xad\xe5\xf3\x8d\x5d\x26\x35\xe5\x11\xce\x1f\x57\xd1\xa2\x60\x31\xf3\x14\x14\x26\x6e\xd5\x9a\x66\xb3\x0e\x39\xd4\xab\xa2\xd7\xf4\x96\x85\xc9\xbe\xe4\x87\x90\x15\xa3\x7d\xf2\x3a\x98\x0a\x85\x2f\xb4\xd4\x01\xc5\x51\x88\x98\x08\x21\x92\x44\x23\x24\xa4\xa4\x0a\xf1\x88\x20\x14\x4a\xc0\x05\x56\x04\x50\xa2\x09\xf1\x03\x00\x90\x88\x14\x0c\x7d\x04\xc1\x39\x55\x08\x3f\x1d\x02\xc7\x6e\x21\x11\x91\x10\x7f\xda\x26\xfe\x3c\xcb\xaf\xd0\x26\x24\x49\xc0\x8b\xe4\xbe\x3b\xd6\xdf\xde\xd4\x07\xa4\xff\xcf\x82\x14\x9a\x9b\xb0\xdb\x3e\xae\x18\x39\xee\xd2\xab\x42\x75\x05\xd2\xbb\x16\xd9\x75\x8f\xf3\x59\xb4\x6e\x1f\x16\x87\xec\x7a\xd8\x2d\x66\x0b\x79\x1c\x4c\xd9\x38\x75\x5c\x0c\x8d\xcc\x51\x89\x0d\x28\xa4\x78\x70\x3a\xd4\x8b\xd5\xd1\x74\x54\x28\x94\x69\x75\xb5\xe7\x46\xa8\x67\x13\x93\x97\x12\x6b\x32\x14\x7f\x03\xee\xf4\xe2\xcc\xed\x8e\x3f\x89\x45\xa2\x3a\x1f\x52\xbe\x85\x3e\xc9\xd1\x7f\x7f\xa6\xdf\xa2\x1f\x43\x9f\x27\xc2\xae\xfa\xb5\x70\xd9\x8a\x11\xea\x7e\x4c\xf1\x6b\x8e\xf6\x04\xfd\xb6\xf4\xd7\xce\x13\xf4\x0d\xe3\xef\x69\x19\x1f\xa2\x4b\xf1\xf1\xbd\xf8\x75\x16\xdd\x58\xb5\x9c\x4f\xda\xfb\xfe\xd8\x59\xfc\xa2\xdf\x9e\x3f\x46\xff\x05\x5d\x9c\x62\xa7\xed\xac\x97\xa9\x6d\x69\x95\x66\xc7\x11\x93\x55\x0e\x75\x11\x95\x96\x79\xcc\x9d\xb5\xaf\x6a\xff\x3f\x75\x5f\xda\xa4\x2a\x92\xfd\xfd\xbe\x3f\x85\xf1\x7f\xd3\xb7\xc3\xdb\x63\x92\xec\xdd\x31\x13\x01\x8a\xbb\xa0\x82\x0b\x3e\x2f\x2a\x58\x12\xc5\x05\x10\x70\x9d\x2f\xff\x84\x4b\x55\x21\x8a\x5a\x48\xcd\x9d\x21\xa2\xa3\xaf\x45\x72\xce\x2f\x4f\x26\xe7\x97\x79\xf2\x24\x49\xd7\x16\xc5\xed\xce\x36\x21\xb9\xf2\xda\xc1\x52\x1d\x73\x86\xd3\x6e\x9a\xc5\xf5\x0a\x32\x4c\xcf\xde\x13\xa4\xbb\xb3\xa7\x48\xa1\x18\x69\x53\x1d\x7a\x64\x67\xb5\xcf\xfe\x83\x4e\xba\xa5\x9b\x24\x62\x09\x92\x36\x69\x8c\xd6\x0d\x8b\x21\x68\xc8\x62\x1a\x65\x40\x42\xa7\x0d\x93\x31\x75\xc2\xb2\x68\x86\x40\x86\x49\x90\x2c\xd4\x11\x0e\x4c\xc0\x60\x04\x65\x00\x03\x52\xc4\x99\x5d\x5e\x4c\x81\x7b\xb4\xd5\x87\x25\x58\x9c\x48\x3c\xc2\xee\xe3\xee\x89\x5d\x18\x1c\x7c\xc6\xf5\xd2\xb3\x4b\x3c\x19\xf8\x8a\x5d\xbe\x3a\xce\xbe\x73\x84\x9d\x8a\x29\x76\x6b\x9a\x5f\x4e\x99\xfc\x5e\xd5\x61\x5f\xa7\xc0\x24\xbf\x1c\x6c\x47\x61\xd9\x92\x2b\x1b\x1d\xcc\xd6\x8d\x30\x94\xcb\x5c\xb5\x33\xa9\x48\xd3\x9a\x69\xda\xfd\x8e\xeb\x6d\xd9\x66\xa9\x2b\x51\xbd\x71\x7b\xd1\x6c\x6e\xcb\xeb\x8e\x57\xdb\xa3\x8a\xa3\x8d\x27\xf8\xba\x53\xfa\x2f\x60\x97\xf9\x7a\xe0\xf3\xaf\x7a\xf7\x0c\xf5\xa7\x4a\xf0\xce\x86\x5d\xe2\xde\x3d\xd5\xba\xf8\x2f\x4c\x2a\xe6\xb8\x5f\x37\x5f\xc8\x3a\xc1\xfa\x15\xef\xce\x65\xc4\x2e\xd1\x91\x46\x0a\x76\x81\x62\x8f\x98\xd6\x0a\x4b\xb7\xd0\x0e\x60\x85\x9d\x0f\xf2\x61\xcd\x5a\xce\x83\x81\xde\x1d\x82\x49\x50\x61\xeb\xcd\x9d\xaf\xf7\x26\x1d\x1e\xd4\xba\x53\x77\x57\x1d\xaa\xf3\x5e\xa3\x61\x58\xf2\x06\x28\xab\x79\xad\x44\xda\x3b\xa6\xab\x83\x86\xdc\xc2\x27\x1e\x74\x6b\x26\x0d\xb2\xdf\x52\x0a\x10\x06\x19\x8a\xa0\x75\xa8\xb3\x38\xcd\x50\x3a\xa5\xd3\x14\xc2\x18\x68\x1a\xa6\x06\x30\x1a\xa7\x49\x1a\x50\x94\xc5\x18\x9a\x05\x48\x88\x9b\x98\xae\xe1\x3a\x41\x5b\x26\x65\xd1\x10\x98\xef\xc7\x9b\x1e\x3f\x73\x50\xec\x2b\x7c\x45\x16\x84\x1e\xdd\x68\x0e\x7b\xad\x6a\x4b\x1e\xf2\x5c\x73\x58\x87\x0a\x51\x83\x0d\xa9\x3d\x1c\x40\x99\x54\xba\xcd\x46\xa9\x5b\xe3\x86\x25\xb2\x57\xaa\x72\xaa\xf4\xfb\xcf\x1c\x41\x43\x82\xa0\x08\x00\xc9\x87\x3c\x43\x01\x9c\x62\x13\x79\xe6\xfd\x2e\x89\x91\x38\x4b\x61\x18\x45\x7c\xa4\x5a\xb7\xfa\xa3\x89\x58\x9b\x34\xac\xde\x8a\xad\x54\x47\x0d\xa2\xd3\x5c\x39\xbd\x7c\x0d\x78\x7b\x1a\x62\xcd\x80\xf0\x7b\x52\x83\x1b\x56\xe1\x78\xcf\x7d\xf0\xcc\xf1\x94\xe2\x3b\xf1\xb4\xd3\x35\x2e\x34\x0a\xfc\xdc\x67\xcb\x52\xc1\xe9\x0b\x95\xa6\xb9\x28\xee\x8b\x93\x96\xea\x2d\x8c\xce\x7e\x5b\x9b\x94\x17\x94\xc8\xef\xb7\x7b\x67\x78\x8a\x1f\xc5\x4e\x78\xe0\xcb\x5b\xd6\x6d\x9d\x36\xf2\x6c\x54\xb6\xda\xe1\xba\x50\x6e\xb5\x11\x5e\x6f\xaf\x17\x3c\x1f\xf2\x33\x38\xb5\xdc\x31\xa8\x94\x85\xb2\x3a\x6b\x8a\xe5\x7d\x47\x5c\xae\xd5\x39\x6d\x8f\xec\x70\x21\x91\xe5\xce\x62\xd6\x98\xaf\x0b\xa2\x44\x8e\x0c\x6d\xc0\x2f\x87\x46\x75\x22\x75\xbc\x19\x08\xe0\x50\xe6\x3b\x99\x9d\xc4\x1d\xfd\x5e\xd1\xb6\xbf\x10\x80\x2d\x78\xac\x4c\x03\xd5\x9c\xf9\x13\x6e\x45\x99\x0a\x31\x95\x9d\xf6\x5a\xef\xad\xf6\x76\xd8\x21\x5c\xde\xb0\xb4\xd2\xe9\xb3\x2f\x9f\xeb\x36\x09\x27\x3f\x1f\xaf\x07\x3c\x93\x9d\xfe\x57\x63\x68\x91\x91\x6f\xa9\x3c\x50\x15\xb9\x26\x3b\x58\x73\xdd\x9d\x40\x79\x25\x14\x69\x0f\x6b\xdb\x62\xc3\xc8\xb3\xa1\x4c\x4b\x0d\xac\x64\x8f\x3f\xda\x38\x12\x17\x29\xde\xcc\x13\x38\x5f\xf7\x7d\xdb\x0b\xfa\x6b\xb0\xed\x50\x2f\xe8\xe7\x62\xfa\x8b\x25\xc6\x67\x2a\x83\x35\xe8\xd1\x79\xb3\xd7\x51\x43\xa8\x36\x5b\x0d\x61\x6b\x2f\x46\xdb\x52\x5b\xb0\x85\x81\xee\x82\x6a\xbb\x2a\x99\xd6\x49\xe6\xc7\x29\x2d\xb7\xbf\x7f\x75\x4f\xff\xa3\x59\x4c\x9a\xb6\x38\x29\x2a\x26\x9c\x18\x7b\xdf\x16\xaf\xb6\x45\xbc\x2f\x7c\xdd\x16\xff\x36\x2a\xed\x2e\xe0\x04\x73\x36\x2d\xe1\xe1\x4e\xa1\xc9\x71\x47\xef\x8c\x44\x17\xa7\x6a\x22\x3f\xe9\xf2\x83\x7a\x71\xdf\x35\x40\x49\xf3\x61\x1d\x11\x0b\xd5\x1f\x4d\x57\xdd\x5e\xdd\x2b\x7a\x7a\x61\xaf\xac\x06\xc3\xca\x7c\xac\xcd\x71\x9e\x50\x66\x8d\x91\xd8\x14\xe7\x60\xdc\xcb\xfe\x24\x6e\x8a\xa6\x00\xa6\x41\x43\x83\x18\xd4\x19\x02\xb0\x50\xa3\x4d\xdc\x20\x21\x86\xd1\x50\xa7\x68\x8b\xd0\x59\xdc\xa4\xa1\x4e\x43\x4a\x03\x94\x41\x12\x80\xc5\x68\x13\x6a\x3a\x83\x1b\x98\x76\x98\x3b\x60\xd8\xcb\x31\x32\xf2\x11\xbb\xd0\x58\xf2\x37\x74\xde\x6f\x9e\xc9\x8a\xc0\xc9\x0c\x36\x89\xc6\xa3\x39\x57\x7e\x29\x85\x9f\x4b\xda\x24\x5a\x6d\xce\x5c\xaa\xa0\xe8\x6b\xb0\x2e\x90\xc5\x41\x75\x66\x6f\x87\x8e\xbe\x73\x7b\xd4\xb2\x43\xed\xd0\x66\x54\x59\xb6\xab\x65\x79\x2f\x6e\x5b\xd6\xca\x64\x28\xa4\x56\x66\x22\x98\x12\x8b\xa0\x55\x1c\x5a\x4d\xce\x5d\xd1\x76\x43\xea\x3b\xa0\x35\xd8\xed\xe7\xde\x5a\xed\x72\x20\xb3\x08\x59\xb4\xff\x7f\x71\x0e\xb1\x1c\xa9\x8d\x46\xc4\x97\x24\x38\xb4\xfb\xbe\x3d\x43\xfd\xa9\x22\x74\x11\x6e\x79\x21\x2a\xe3\xcd\xc7\xae\xfb\x6a\x54\xe6\x17\x46\x85\x38\xee\xd7\xf9\xf3\x9b\xdc\xd2\x48\x6f\x8b\x43\x5b\x78\x9b\x87\x95\xfe\xb6\xb6\x88\xf7\x85\xaf\xe8\x3f\x73\x4b\x77\x47\x36\xa4\x35\x80\x34\xdb\x2c\xe4\x3b\xf3\x09\x59\x0b\xfa\x24\x64\x96\x65\x69\x39\x93\x02\x27\xa8\x74\x97\xd3\xed\xb6\x17\xec\x0d\xa5\x4a\x3b\x96\xe4\x2c\x78\xaf\x64\xd6\xf2\x75\xbf\xc4\xcd\xf1\xdd\xa6\xd1\x5c\x11\xf6\x74\x3e\x62\x4b\xe5\x20\x20\x35\x1d\x0a\x1d\xf1\x1b\x8e\xe1\x36\x75\x8a\xa5\x34\x9a\xa5\x75\x9a\x62\x31\x9a\x32\x74\x0d\x99\xb8\x46\x02\x44\x68\x94\xc6\x50\x34\x86\x61\x2c\x05\x31\x8b\x22\x35\xdc\x62\x49\x42\xc3\x75\xc3\xc2\x00\x30\x68\x64\x01\xe3\x7d\x0f\xce\x31\x27\xa0\x42\x42\x65\x40\x75\x38\x9e\x52\x2a\x62\x4b\x52\x9a\x38\x54\x89\x16\xde\xeb\x74\x9a\xa2\x28\x56\xdb\x42\x55\xe5\x6a\x6a\xb7\x4d\xc9\xe5\x32\x35\xe2\xfa\xdd\x32\x39\xea\xa8\xbf\xff\xcc\xe1\x0c\x45\x12\x34\x20\x29\x8a\xfc\x99\x83\x07\x96\x81\x49\x44\xc2\x00\x86\x4c\x9e\xc3\xbc\xdf\x25\x20\x4b\xb0\x14\x7d\x10\xf9\xe1\x63\x71\x53\x09\x29\x8e\xcb\x2f\x46\x7e\x55\x19\xae\x07\x52\x0b\x97\x85\x72\xa8\x01\x7a\x57\xdd\xf0\x95\xda\x9a\xad\x79\xd6\x88\x1f\xd4\x8f\x9f\x24\x3c\x9d\x1f\x78\xa4\x83\x38\xaf\x5c\x1d\x31\xfd\x05\x9f\x17\x11\x13\xcb\x81\xeb\x7c\xb5\x9f\x46\x7a\xb7\x35\x9c\x44\x24\xf1\xb0\xd3\xb7\x8f\x9f\x80\x14\xb8\xed\x6e\x4e\x8e\xd7\xc5\x62\xa9\x48\xcf\x81\xb7\x64\xf5\x02\x1f\x9a\xb8\x21\xf5\x7a\x86\xca\x76\xd7\x80\x6b\x6e\xfb\xd3\xd5\x9c\x10\xfa\x72\x13\xeb\xef\xc6\x3e\x06\xe8\xca\xb4\xe4\x62\x58\x5e\x0d\x36\xe6\x62\x5e\x5c\x0d\x2c\xba\x51\x0b\x7b\xc0\x98\xcc\xdd\xb1\xe7\x04\xc4\xe5\xfe\xa0\x56\x64\x71\xfe\xf3\x9f\xf7\xe6\x7d\xc9\x3c\x16\x11\x55\x32\x31\x09\x77\xb9\x12\x35\x5a\x04\xa6\x88\x58\x95\xd8\x9b\xb5\xce\x00\xf3\x95\x75\xcd\xe2\x04\xd5\x2e\x00\x7b\xce\xce\xca\xea\xfc\x98\xaa\xfa\x79\x24\x5d\xd2\xf6\xc7\x48\x7b\xdd\xb8\xf8\x8c\xf5\x7f\x75\x8e\x54\xe4\x2e\x73\xeb\x22\xfb\x9d\xbe\xec\xbb\x46\xf3\x60\xf2\xaa\xef\x7c\xc5\x77\x5b\xd3\xc6\xfc\x05\xfd\x71\xee\xe0\x44\xbc\x47\xa3\x31\xd7\xf6\x16\xda\x16\x60\x34\x36\x9d\x8a\x60\xc2\xeb\x83\x50\x62\x26\x0c\xd7\xb5\x9b\x05\xb9\x31\xfc\xe8\xf3\x91\xcf\x7c\x16\xd3\xe7\x59\xbc\xa2\xff\x23\x7f\x3a\x9d\xfe\xe3\x8d\xab\xf7\xe1\xab\xe3\xaa\x21\xb1\xf3\x8c\x58\xa5\x12\xea\x7a\xeb\xba\x7a\x1f\xd2\x8c\xeb\x5e\xd0\xff\xb2\x3f\xf8\xdc\x1b\x91\xea\x7d\x7c\xd9\x1f\xc8\x45\x6e\x92\x52\xff\xad\xb1\x54\x74\xff\x63\x8a\xf7\xe1\x3c\xae\x4b\x9f\xcb\x92\xe1\xfb\xf8\x15\xfd\xef\x5b\xa2\x0d\x0f\xa9\xcd\xcd\x78\xbc\x59\x91\x66\x43\x5b\x0f\x0a\x1b\x7d\x68\xe2\xa5\x79\xb9\x5a\x69\xf7\x74\x96\x2b\x32\x7d\x95\xf6\x10\x5f\xee\xd9\x6c\xdf\x70\x1b\xac\x29\x6d\xdd\x0d\x25\x9a\xd6\xa2\xd9\xaa\x8e\xbc\x8e\x3f\x77\xf2\x3b\xbb\xd9\x17\xab\xb5\x91\x56\x6b\x8c\x68\xe9\xf1\x9e\x35\xf8\xd5\x79\x3a\x65\x40\xc6\x00\x86\x61\x92\x1a\x44\x04\x0d\x20\xa5\x69\x86\x66\x69\x10\x19\x87\xf9\x3a\xc0\x2c\x44\x9a\x08\x27\x58\x64\xe1\x34\x65\x00\x8b\xd1\x2c\x9d\x61\x71\x83\xd6\x75\x92\x34\xcf\x5f\xcb\x78\x6d\x9a\x4e\x3c\x9a\xa6\x33\x2c\x9b\x7c\x2e\xd9\xc7\xdd\xf3\x88\x8c\xa5\xa9\x0c\x16\x1b\xe3\xbd\xfc\xea\x65\x48\x41\xb6\x49\xa9\x2c\xdd\x12\xd5\xea\x0f\xc6\x02\x27\xaf\x02\x6b\xe2\x53\x75\xb3\xcc\x36\xf0\xae\x8d\xe0\xb0\xd8\xad\x37\xe6\x05\xd1\x6d\xec\xea\x86\x6f\x62\xb3\x4a\x80\xef\xc6\x9e\x47\x92\xdb\x15\x03\xd4\x79\x79\x33\x16\xfb\x82\x3f\x2a\x18\x02\x57\x55\xa5\x25\xa0\x8a\x1b\xd1\x72\x04\x18\x14\x33\x9b\xa8\xa7\x5f\xec\x0b\x7c\xad\x11\x44\x1c\x6a\x42\xfc\xef\x81\x43\xcb\x4e\x7f\x2a\x42\x89\x4c\xd4\x5f\x18\xe0\xec\x42\x77\xb9\x7d\x75\x80\xf3\x0b\x07\x58\x1c\xf7\xeb\x1c\xfa\xcd\x89\x7a\x2d\xbd\x2d\x0e\x6d\xb1\x8f\x44\x2a\x5a\x5f\xb4\xc5\xab\x6d\x11\xef\x0b\x5f\xd1\x7f\x26\x17\xa1\x93\x9f\x96\xfb\x2e\xcf\x09\xbe\x5f\x25\xb4\xbc\x3d\xec\x5a\x72\x08\x54\x27\x9c\x6f\x66\xb2\xec\x59\x7b\xad\x18\xd8\xc3\x06\x6e\x8e\x78\x5f\x64\xdc\x86\x86\x90\xa1\xaf\xc5\x4a\xbd\x6b\xd4\x4a\xfd\x6e\x03\x5a\x38\xcf\xf1\xb0\xb4\x0c\xa5\x71\x50\x1a\x92\x5b\xd1\xcf\xfe\x9b\xea\xa6\x49\x13\x08\x21\x48\x6b\x24\xcb\x10\xb8\x0e\x19\x0d\x90\x9a\x09\x18\x4d\xb7\x28\x12\x69\x1a\xc4\x31\x9d\x36\x91\x8e\x08\x13\x90\x24\x09\x0c\xa0\xd1\x1a\x01\x8f\x9f\x10\x64\x21\x7b\xfe\xd0\xdf\x71\x9e\x4e\x50\xfd\x6e\x83\x2f\x36\x6b\x70\x48\x95\x86\xcd\x21\xcd\xd5\x84\x7e\xb7\xdc\xac\xe2\x3d\x8e\x1e\xf2\x02\x2e\x56\xc4\x36\x25\xd1\x44\xb5\x43\x36\x87\xd5\x56\x45\xe9\xc3\x3a\x7f\x60\x05\x00\x28\x82\xa6\x31\x40\xe3\x8f\x68\x86\x65\xe9\x3b\x7b\xd5\x0e\x77\xc9\x83\x40\x02\x27\x59\x9a\x80\x04\xf5\x91\x31\x59\x72\x96\xdb\x9e\x20\x03\x6d\x4d\x98\x18\x9e\xe7\xf9\xc6\xb6\x11\xe4\x29\xae\x60\xd4\xc5\xb0\x85\x15\x1c\x8b\x40\xd5\x3e\x2e\x6d\x00\xc5\x7d\xd0\xcc\xd1\x31\x45\x68\xe5\xb3\x47\xa3\xf8\x3a\xcd\x0b\xf3\x6c\xae\x6e\x0f\x22\x43\x42\xde\xdc\x07\xa2\x74\x9c\x67\xf3\xd3\x19\x29\x94\x97\x7d\xa6\x62\x37\xd9\x6d\x8f\x82\x52\xaf\xd8\x40\xb2\x31\xdc\x56\x5a\x4a\x6b\x59\x77\x4b\xb4\xbf\xb3\x2b\x16\x39\x6d\xb6\x47\xf5\xae\xec\x4d\xf4\x41\x11\x1f\xed\x8c\xaa\x90\xf7\x4a\xab\x55\x81\x12\x1a\xf2\xd2\x10\x7b\xbe\x4d\x72\xc3\x7d\x67\xd1\xca\x57\xd4\x67\xe2\xc3\xcf\xcd\xab\x23\x85\x24\xe4\x97\x3b\xdd\x66\x77\x59\x98\xe0\x43\x8b\x10\x04\x5f\x58\xee\x69\xb7\xc4\x6e\x66\x10\xec\x87\x79\xb3\x30\x21\xcc\xa1\x41\x97\x94\x8f\x0f\x27\xda\xc6\xa7\x8d\x13\xae\x07\xb4\x93\x9d\xfe\x57\xf7\xac\x45\x72\x64\xbe\xec\x6a\xfc\x4e\xb9\xf9\xaa\xab\x7b\xc5\xd5\xfa\x50\x9a\xbf\xa0\x3f\xee\xea\x39\x92\x0a\xfa\xbc\x10\x56\xfc\x7c\xd5\x34\x0b\xe3\x8e\x1c\xc8\x7e\x7b\x35\x6e\xe3\x45\x45\x57\x44\x8b\xc4\xf3\xd5\x09\xf6\xd1\xc7\x8f\xb4\x53\x3f\xbe\x0d\x7c\xfa\xb5\xd7\x57\xf4\x2b\x06\x58\xa7\xd4\x7f\x93\xf6\xaa\xaf\xd9\xe2\xa4\x88\x4f\xbf\xf6\xf9\x62\x5b\x9c\xd6\x3e\xbf\xae\xff\x7d\x7f\x00\x29\x75\x79\x6d\x6e\xf1\xae\x5d\x30\xfa\x52\x38\x9d\x73\xb6\x33\x73\xca\x4c\x77\x2a\xec\x1b\xb6\xc6\xe5\x29\xc6\x9d\x38\x79\x75\xb7\x87\x72\xaf\xb7\x54\x07\xd8\x38\x84\x86\xc3\x6f\xdb\xcb\x0e\xed\xaf\xf3\x93\xe2\x6c\xe9\x94\xfa\x0d\x62\xd5\x29\x33\xa0\x3e\x6d\x4f\x16\x1f\xf1\x69\xfb\x30\x99\xf9\x1d\x3b\x1e\xcb\xf5\x0a\xeb\x41\x92\xc4\x10\x4d\x69\xb8\x81\x08\x86\xc1\x74\x5a\x37\x68\xdc\x32\x2d\x93\x61\x4d\x8a\x06\x26\x89\x1d\x0f\x16\x21\x10\x49\x61\x24\x05\x49\xa8\x93\x18\x4d\xd3\x00\xe2\x10\x40\x13\xd1\x27\xd6\x83\x59\xb1\xde\xfd\xc9\x15\xf1\x0f\x70\xb8\x12\x76\xac\x9d\xef\x12\x9f\xac\x47\x91\x9f\x93\xab\xf4\xac\x17\x8f\x46\x97\xb5\x4a\x3f\xd0\x2b\x1f\x1f\xa9\xb9\xda\xb1\x9d\x2e\x5a\x5d\x54\xe7\xd1\x0f\xff\x7c\x78\x68\x9e\x93\x61\xbe\xa5\xb5\x39\x9a\xe5\xa6\x25\xc0\xd3\xcb\xe9\x1c\x2c\x85\x3d\x10\xa9\xe2\xac\xb4\x70\x65\x62\x36\x76\x86\xd3\x91\xa5\xaf\x4d\x83\xad\xcd\x96\xe2\x02\xc8\xf9\x75\x83\x17\xc7\x7d\x06\x78\xb2\x65\x6f\x55\x8c\xa0\x9b\xf9\xe9\x5a\x2e\x8c\x35\xc7\x5d\x2f\xf5\xa6\x7d\xe8\x48\xff\x0d\xec\xd7\xc3\xc5\x26\xf3\x69\xeb\x74\x51\xa4\xec\xf4\xa7\xfa\xc2\x47\x84\xfd\x7e\x21\xfb\xbc\xcc\x7e\x81\x31\x2c\xfc\x4a\xf6\x8b\xb1\x4f\xaa\xc3\x3d\x5f\xd1\x2f\x43\xbf\x9f\x52\xff\xa3\x49\xdf\x7f\x92\x7d\x32\xb1\xc5\x47\x44\x31\x35\xfb\x7d\x97\xd3\x3a\xb2\x5f\x88\xb6\xe1\xe1\xff\x13\x34\x9f\xbb\x2f\x32\x20\xae\xe3\x14\x32\x2c\xdd\x30\xa0\x66\xea\xc0\xb0\x2c\x0b\x30\x1a\x62\x28\x8c\x65\x2d\x8a\x40\x18\x64\x19\xc2\x02\x0c\xa1\xeb\x00\xd7\x11\xab\xeb\x90\x60\x29\x0c\x33\x2d\x06\xe2\x10\xea\x27\x06\xc4\xb3\x62\xc0\xfb\x59\x40\x47\x8e\x83\x58\xc2\x4e\xb9\xc8\xdd\x13\x03\x92\x80\xfa\x5c\x9f\x4d\xcf\x80\x91\x50\x58\x8b\xe7\x3a\xc2\x93\xff\x71\xff\x81\x79\x61\xa9\xc8\x4e\x5a\x61\x4b\x1d\x51\x13\x7d\x8b\xd9\x9c\x39\x74\xc6\x5d\xa3\xa8\x76\x16\x16\x83\x56\xc4\xde\xe0\x59\xb9\x59\x85\x2c\xcb\x84\x7d\xd5\xd0\xa8\x8e\x6a\x8f\xc9\x52\x30\xde\x41\xb7\xd8\x2a\x61\x75\x7a\x36\xa9\x35\xd9\xe9\xbe\xd0\x1f\x4c\x8d\xf6\x64\xad\xf4\xd7\x45\xe6\x99\xef\x33\x7e\x3f\x33\x86\x4b\x2c\x32\x2f\x4b\xc5\x4c\x19\xea\x4f\x15\x0e\xcd\x88\x19\x5f\x64\xa6\x97\x99\x31\x2c\x4e\x46\xbf\x92\x19\x63\xcc\xf4\x6a\xfd\xbf\xae\xbf\x33\xa1\x53\xea\xff\x0e\x66\x4c\xcb\x4c\x99\xd8\xe2\xe3\x88\xc9\xd4\xcc\xb8\x59\xab\x4a\x53\xa9\x54\x90\x3b\x60\x4c\x75\x5c\xed\x93\x84\x30\xe4\x16\x42\xdd\x29\x54\xfd\x55\x0b\xe7\xac\x9e\xbd\x65\xd7\x96\xd5\xec\x0f\xaa\x95\xd5\xac\x52\x73\xa5\x0e\x5d\x6b\x85\x4b\x6e\xca\xb1\x32\x22\x6b\x72\xb1\xa0\x32\x05\xac\xef\xe2\x25\x42\x07\x98\xbe\x59\x7f\xac\xb5\x4d\xb4\x60\x72\xb4\xf8\xd3\x5e\x5a\xf8\xe7\x8b\x04\x8a\x30\x82\x61\x48\x43\xa7\x28\xcd\x22\x2d\xda\x22\x11\xcb\x62\x3a\xc0\x08\x84\x0c\x82\x26\x0d\x0a\x33\x0c\x06\xc7\x20\x0b\x0d\x8b\x24\x21\x65\x98\x26\x30\x4c\x0d\x23\x48\x1c\x00\x06\x3f\x4f\x21\x89\xac\x08\xf4\xfe\x56\xf3\x23\x45\x12\x58\xc2\x66\xc0\xc8\xdd\x33\x81\x12\xf4\x67\x22\x6d\x7a\x02\x8d\xb2\x60\x91\x1b\xd7\x9e\xfc\x2f\xfe\x28\xf7\x1d\x81\x55\xc9\x5a\xb2\xed\x0e\x23\x54\x0c\x6f\xda\x1d\x08\xda\xb2\xb2\x5d\x4f\xf8\xa9\xdc\x5f\xcd\xa8\x06\xc9\x41\x3f\x9c\x34\xd5\xaa\xb3\xe0\x86\x8b\x0e\x56\x9f\xd6\x4b\xb8\x60\x9a\x53\x5b\x90\x71\xbd\x3d\xea\x95\x49\x64\x55\x3a\xeb\xee\xd4\x15\x1a\x63\x9b\x87\x66\x0f\xb7\xe0\xe6\x99\x4d\x1d\xdf\x4f\xa0\xb3\x1a\x8a\x4c\xed\xd2\xad\xe7\x65\xa7\x3f\xd5\x7a\x5e\x46\x04\xfa\x22\x81\xbd\x4e\xa0\xaf\xad\x61\xbd\x4e\xa0\x97\x04\x96\x6a\x3d\xf3\x15\xfd\xdd\xa5\x3a\x49\xa9\xff\x3b\x08\x34\x2d\x81\x65\x62\x8b\x58\x90\xfb\x2b\xfa\xcf\x04\xaa\x38\xd4\x5a\x01\x6d\x9e\x77\x1a\x2a\xe8\x4f\x2a\xcb\x49\xa0\x13\x1d\x15\xcc\x75\x4f\xb2\x57\x1d\x71\x49\xab\x32\xe4\x49\x72\x5c\x26\x67\xa2\xac\xee\x3a\xec\xb6\x3a\x54\x89\x89\x10\xc2\x3d\xec\xf3\x68\xe8\x6c\x67\x4d\xa0\x52\x7c\xd1\xad\x8d\x4d\xa7\x2f\xe2\x6c\xf0\xb1\xa9\xc4\x47\xe1\xca\x77\x8e\x36\x7f\xda\x4f\xd7\x5e\xa5\x50\x60\xe9\xac\x01\x11\x04\x2c\x41\x41\x08\x75\x88\x23\xcc\xc4\x4c\x8a\x02\x26\x62\x4d\x9a\x20\x69\xca\x20\x30\xc3\x32\x59\xfd\x30\x2b\x65\x4d\x92\x22\x35\x80\xe1\x06\x86\x2c\x86\x35\xb4\xf3\x1e\xf8\xd7\x12\x5b\xee\xaf\x38\x12\xff\x00\x18\x46\x31\x09\x07\x66\x46\xee\x9e\x99\x98\xc4\xa8\x0c\x0e\x92\x89\x3b\xed\xab\xf8\x60\x8a\xd5\xae\xa4\x5d\xf4\x4c\x58\x2b\xef\xdc\x31\x2b\xa1\xe2\xd2\xd6\x78\xd1\xda\x96\x4d\x6e\xee\x20\x51\x55\xac\xa9\x2b\xf6\x5a\x0d\x9b\x2a\xd7\x8b\x2a\xc9\x2f\xd1\xc2\x29\xf1\xdb\x8a\xbb\xc5\xa7\x0d\xb9\xb0\xdd\x12\x7b\x6f\xcb\x29\xae\x50\xf6\x70\x15\xb6\xf2\x62\xb7\x5e\xeb\x11\xe3\x42\x15\xd4\xb3\x4a\x6c\xe1\xd3\xef\x00\x59\x15\xc0\x06\x62\x9f\x8f\xa7\x8b\x71\x66\xa7\x3f\x15\x11\x47\x12\x5b\x5e\x58\x61\xa4\x0b\x4a\x49\x7f\x95\x08\x7f\xe1\x0a\x27\xc7\xfd\xba\x55\xb5\x9b\x44\x24\xa4\xb7\xc5\xa1\x2d\x0c\xe2\xf3\xf9\x54\xf1\xde\x0c\xfb\xc2\xd7\xe3\xbd\xff\x2e\x34\x65\xde\x58\xd6\xda\x8a\x33\x5e\xba\x8b\xb1\x32\x64\xba\xc3\x4e\x7d\x84\x4f\x07\x02\x4e\x50\xa5\x7e\xa9\xe8\xae\x26\x72\x67\x21\x75\x34\xc7\xf3\x36\x1b\xa3\xbb\x6c\x99\xc4\x6e\xb6\x66\x0c\xd4\xa2\x8c\x8e\x40\xf1\xbc\x66\x42\x71\xda\x9e\xf6\x64\xb9\x2c\xd5\xda\x5b\xf6\x1b\x0e\xc1\x64\x01\x43\x22\x0c\xb7\x74\xc4\x5a\x0c\x81\x00\x8d\x70\x08\x34\x60\x12\x04\x49\xe2\x94\x89\x30\xcd\x32\xa0\x61\x99\x8c\x41\x23\x02\x63\x28\x8a\xa1\x19\x40\x23\xd3\x84\x3a\xc1\x1c\x93\x15\xdf\xcf\xc0\x2c\x0d\xcb\x5c\xa5\x5e\x94\x8b\x35\xa2\xd8\x80\x6a\xb5\x41\xa9\xdd\x26\x47\x29\x0d\x61\x58\xee\x0e\x69\xbe\x55\xe9\xb7\x3a\x12\xdf\x6a\xd6\x84\x5e\xbd\x5b\x27\xd5\x8e\xd8\x6c\xd5\xf8\xe3\xd1\xce\x18\xcd\x32\x14\x8b\x31\xf7\x4f\xc3\x24\xfe\x01\x20\xc6\x52\xc9\x34\xf3\x7e\x17\x92\x34\xc5\x32\x80\x66\xe8\x8f\xf9\x99\x44\x76\x2a\x72\xa3\x33\x33\x04\x7f\x50\x5a\xb2\x76\x63\x83\x16\xee\x7c\xb7\x5d\x33\xcd\x52\x7f\xc4\x6f\x06\xcd\x8e\x6c\xab\x1e\x6d\x97\xfa\xdc\x07\xcd\xdc\xcc\x20\x8f\xd0\x4c\xef\x56\x57\x88\x2f\xf9\x25\xb8\xd4\xd8\x65\x3b\xab\x9a\xd8\x3b\x3e\x4e\xb4\xbb\x1c\x02\xc8\x29\x00\x72\x54\x85\x73\x54\x44\x8a\x14\x6c\x75\x05\x18\xbd\x15\xbe\x19\xf7\xec\x41\xcf\x58\x95\x66\xc4\x4e\x63\x2a\x85\x71\x8d\x05\x93\x79\x9f\xf2\xf6\x92\xba\xe3\x78\x6a\x14\x92\x96\x48\x7b\x5d\xbf\xdd\x6d\x16\x42\x65\xee\x4c\xc7\x3d\x6e\xaa\xf1\xeb\x8d\xb8\xed\x19\x47\xf9\xe5\x85\x59\xe9\x52\xf5\xd1\x6a\xd6\xa8\xf7\x8a\x78\xdf\x87\xaa\x50\x2f\x40\xa2\x82\x87\x28\x58\x2a\x6b\xc2\xe8\x93\x3d\x83\x73\xf7\xdd\x89\x0c\xf3\x1b\xd0\x56\xfb\xfd\x25\xa2\x3b\xca\x42\x6a\x89\x72\x65\x58\x28\xe2\xcd\x6d\x1b\x0b\xe5\xb5\x37\x34\x7b\x05\x69\xa2\x06\x52\x69\xf3\xd4\x92\x61\x74\x83\x61\x32\x9d\x45\x26\xa3\xf4\x9c\x1b\xd5\xbc\x62\x77\x23\x87\xaa\xe4\xc3\x9a\xcf\x53\x9a\x3d\x6c\x56\x0a\x9b\x40\xec\xcf\xaa\xbc\x1a\xf2\x8d\xfa\x87\xed\xa2\x09\x2b\x9f\x89\xfc\xd7\xaf\xa9\x70\x18\x97\x7d\xb6\x55\x7b\xd4\x76\x7a\xbb\xe6\x28\x2f\xa9\xf5\xe9\xc4\x21\xab\xb3\x19\x81\xf7\x56\x03\xca\x97\x56\x68\x31\x6a\x97\x3b\x73\x53\x72\x98\x92\xd1\xaf\xde\x94\x77\xfc\x1d\xf9\xa4\xd6\x17\xfb\x1a\x37\xef\x34\x09\xe7\xb3\xc3\xf0\x9f\xff\x8c\x5e\xfc\x71\x3c\xf9\x59\x35\xa8\x90\x58\xcb\x86\xd6\x74\x51\x54\x89\x51\xbe\x4b\xd6\x25\x11\x6b\xce\x57\xcb\xbd\xef\x78\x0b\x65\xdb\xab\x0f\x15\xaf\xc0\xe1\xe5\x6e\xf2\x47\xfd\x4f\xf2\x36\xff\xb3\xf8\x23\x1f\xfd\x29\xad\x7a\xfc\xcc\x9e\xd5\xab\x7c\x03\x1b\x53\x6b\xd5\x0e\xaa\xde\xb2\x6e\x04\x7a\xa1\xb8\xc1\x06\x1d\xa3\x6c\x03\xc1\x5d\x34\x10\x31\xc6\x8e\xfa\x23\x73\x8d\x4f\x50\xd7\x4b\xcc\xb1\x58\x0b\x3b\xcb\x9b\x4a\x2d\x74\x08\x72\xbc\x90\x2a\xd6\x0c\xc9\xca\xd4\x94\x49\x7d\x19\x50\x24\x35\x9a\x31\xfd\x62\x1f\x50\xd6\xfb\xbb\x96\x10\xc7\xf8\xa4\xc8\xf2\x6b\xf8\xcf\x73\x35\xee\x43\xde\x8d\x0f\x8a\x66\x86\xff\xe3\xf7\xff\xd2\xfb\x79\xa4\xe2\x6f\x71\xa8\xe3\x7f\xfe\xf3\xe7\x77\x79\xd2\x6c\x39\x1e\xb1\x26\xa6\xe1\x00\x00\x0d\x69\x38\x45\x13\x38\x46\x40\x0b\x22\x13\x61\x80\x32\x71\x43\xc7\x69\x03\x37\x69\xc4\x30\x24\x60\x10\x6e\x41\x4c\x27\x58\x0a\xa7\x81\x4e\x32\x14\xce\x90\xcc\xf9\xf3\x03\x59\x71\xfc\xc3\x45\xcc\xe4\x73\x16\x3e\x6f\x42\x8c\xa0\x09\x86\x00\x24\x4d\xbd\xce\xf0\x77\xb6\x4b\x3e\xc5\xf0\x09\x89\x29\xb1\x4b\xfe\x64\x78\x79\x0f\xb8\x70\x84\xc4\xfd\x50\x1e\xce\xa6\xed\x59\xa3\x6e\x49\x02\xa3\xf5\x14\x6e\xd5\xee\x52\xde\xb6\xd5\xd2\xcb\x38\x4e\x6f\xb6\x02\xbe\xd9\x4b\x4a\x49\xeb\x1b\xa5\x0e\x14\xc9\x36\x2e\xb0\xad\x86\x91\xb7\x74\xbd\x3c\x19\xb1\x8d\x3c\xa0\xf3\x75\xb0\x69\x55\xe6\x61\x97\x7f\xee\x98\x85\xe7\x18\x38\xf2\x68\x9a\x37\x1c\x8b\x29\xbd\xbe\x1e\x4c\x28\xb3\xd3\x9f\xee\x73\x39\xdf\xe2\xa1\x53\xa5\x6c\xfe\x22\xfd\x8f\x26\x74\x69\xb0\x74\x22\x06\x4e\xb8\xbe\xd5\x16\xd2\xbd\x2c\xb2\x87\xb6\xf8\xf7\x77\xbd\xb4\xd9\x3a\x7b\x96\x25\x75\x16\x52\xac\xc5\x5a\xac\x41\x18\x18\xd2\x0c\x44\x93\x24\xc0\x30\x86\xa1\x29\xca\xa4\x10\xa3\x21\x02\x18\x24\x81\xe1\x18\x04\x0c\x86\x69\x24\x20\x70\x9a\x62\x70\x43\xa7\x8f\x47\xb7\x91\xe7\x9c\xcd\x2c\x9c\xfd\xc3\x05\x37\x9c\x22\xd9\xe4\x9c\xcd\xf7\xbb\x67\x77\x4f\x50\x59\x4c\xe8\xee\x9c\xdc\xf6\xc8\xdd\x3f\xea\x3a\x2f\x0c\x6a\x23\xf4\x50\xf2\x66\xad\x96\x41\xcf\x66\x33\x75\x5a\x72\xdb\x1b\xab\x37\x97\x04\x36\x6c\xaa\xeb\x41\xb5\x56\x58\xe4\x79\x1e\x15\xc6\xc5\x86\x88\x19\xfd\xb5\x50\xc6\x7a\x7c\x1f\x15\x8b\xab\x0a\xaf\x4c\x57\xfe\x8e\x50\x96\xee\xd6\x9b\x37\xe6\x84\x38\xaa\xd6\x65\x85\x2c\x48\xbe\x4f\x0c\x9f\x3c\x85\xe7\x3f\x4d\x0f\xe9\x76\x14\x64\xa7\xff\xd5\x53\xdb\x32\x74\xcf\xa9\x62\x5c\x59\xeb\xbf\x73\x22\xc1\x7f\xe3\x04\x28\xad\x4b\xcf\xca\x7e\xe7\x6c\x93\x2f\xeb\x3f\x53\xca\x77\xbd\xe8\x19\x7f\xa5\x86\x66\x4c\x43\x63\x21\x45\x90\x24\x49\xb3\x3a\xa1\x11\x38\x00\xac\x65\x21\x9c\xd4\x00\x84\xac\x46\x99\x1a\xa1\x23\x13\x87\x26\x6e\xb0\x2c\x6b\xd2\x1a\xa0\x34\x13\x52\x14\x20\xb5\xe3\x4e\x33\xf2\x9c\x04\x99\x05\xa5\xdc\xff\xd0\xe6\x91\x34\x58\x2a\x39\x09\xf2\xfd\xee\x99\x52\x18\xfa\x73\x29\x2a\x3d\xa5\xdc\x59\x8a\x7a\x8a\x52\x22\x6e\x25\x12\x97\xf9\x74\xbb\xf1\x8b\xff\xe8\x86\x42\x69\x3c\xef\x76\x77\x8b\xb0\x39\xdd\xe4\x6b\x8b\x45\x65\xa3\xd8\xbc\xd2\x10\xe8\x21\x9d\x87\xc5\xdd\x66\x5e\xdd\x30\x4b\x64\xe6\x43\x12\x02\x59\xe7\xc6\x63\x63\xa9\xbb\x3b\xb2\x31\xac\x6b\x42\x9b\xc4\x0a\x9b\xae\xdc\xda\x86\xa3\xde\xb8\x24\x95\x2d\xad\xe4\xb0\x25\x0b\x10\x02\xf7\x5f\x48\x15\xc5\x98\xfe\xf3\x25\x5c\xda\x30\x93\x58\x5e\xa4\xcd\xd2\xc4\xc2\xc2\x48\xdc\xf8\xf6\x66\x29\xfe\x52\x5f\xd6\xb1\xbc\xff\x49\xfc\xff\x4b\xb1\xb0\x6f\xa0\xb2\xb4\x54\xf2\x1d\xf6\xfb\x8a\xfe\x33\x95\x11\xf5\x5e\xcf\xf0\x7c\x99\x60\xda\x76\xdd\x9b\x04\x02\x31\xee\xed\x26\x12\x96\x2f\x84\xe3\x20\xa8\x77\x99\x76\xc3\x71\x2c\x1f\x99\xa1\xb0\xd8\xd4\x6a\xd5\xc6\x82\x6a\xad\xe4\xf9\x6e\x30\x29\xe5\x4d\x8b\x11\x7a\xd3\x96\x3e\xe8\xd7\xb8\xfd\x64\x08\xc7\x64\xa1\x83\xb3\x12\xcf\x65\x4e\x65\x14\x85\x20\x6d\xe9\x90\x61\x68\x60\xe8\x24\x24\x29\xa4\xb1\x90\xa6\x08\xdd\xd0\x21\x84\x9a\xa9\xeb\x9a\x46\x5a\xc8\x80\x26\xc3\x6a\x14\xd4\x58\x4d\x63\x0c\xd6\x60\x10\x44\x88\x45\xec\xf9\x13\x1f\xaf\xe5\x52\xc0\x47\x04\x46\x42\xf6\xce\x9c\xe8\x70\x97\xf8\x60\x44\x1a\x87\x4c\x06\x1f\x09\xb9\x33\x27\x3a\x5d\x29\x9c\x77\x42\x2e\x45\x91\xeb\x79\x52\x7e\xb2\xb5\xc7\x5a\x41\x1d\x77\x83\x3e\xee\x97\x3d\xbe\xbe\x33\xc7\xd2\xce\x01\x4a\xbb\xb6\xa8\x17\xb5\x4e\xb1\x65\xcf\x66\x76\x25\x1f\x88\x43\xbf\xc3\x07\xea\x70\x5a\xf7\xe7\x92\x5d\x99\x56\x82\x1e\xee\xcf\x88\xd5\xba\x32\x67\x48\xce\xb7\xd5\x12\xdb\x16\xd5\x6a\x66\xb9\x14\x91\x42\x5f\xcc\x65\xd8\xf6\xb0\xb1\x2e\xdd\xb1\xe1\xe9\x7a\x26\xa9\x2f\x13\xfd\xe9\xe6\x36\x9f\xb9\x14\xe9\xd7\xef\xfd\xb2\x32\xe0\x83\x57\xd7\xef\x7f\x61\xfe\x00\xc7\xfd\xba\x70\xd7\xcd\xd0\x5b\x29\xb5\x2d\x8e\x6d\xb1\x7a\xfc\xed\x98\xef\x6a\x8b\xab\xbe\xf0\xf5\x30\xe4\xbf\xc7\xe5\x86\xb2\x76\x0d\xd5\xad\xac\x61\x4d\xd0\xcb\x26\xb5\xd5\x3a\xb2\xe1\xa8\xa5\xc0\x63\x85\xfd\xae\x6e\x77\x16\x33\x6e\x5a\xf3\xea\xb5\xc9\xda\x2f\xf5\x29\xc0\xe9\x95\x32\xb1\xd3\x48\xc5\x9d\xb8\x5b\xbf\x2c\x52\xa4\xd4\xf4\xb7\x9e\x25\x75\x1a\xc4\x82\x2b\x28\xfb\x01\x9f\xf9\x3a\x0b\x41\x91\xb4\x45\x6b\x3a\xb4\x4c\x06\x6a\x08\x02\xdc\x02\x84\x09\x01\x8b\x28\xcd\x44\x06\x40\x94\x6e\x10\x16\x60\x19\x5c\x27\x2c\x03\xb7\x34\x8a\x66\x19\x08\x90\x49\xd0\x08\xb2\x26\x7d\x4e\xb3\x7b\x8d\x5c\xb0\x47\xe4\x42\x91\x38\x93\xbc\xbc\xf2\x7e\x17\x83\x0c\x43\xb0\x80\x64\x99\x0c\x0e\x53\xe3\x2f\x1b\x98\x3f\x11\x43\x99\x68\x82\xf5\x9d\x0e\x50\xa4\x44\xda\xad\x1b\x75\xbb\xb7\x57\x06\xdd\x52\x93\x65\xf8\x29\x6e\xf5\xfd\x7a\x8f\xc7\xd8\x4d\x79\x5d\xdc\x0b\xd5\x09\xe3\x38\x85\xda\xc9\xe7\xcd\x3b\x4d\x32\xfa\x7c\xff\x7d\x13\x35\xcf\x31\xc5\xdd\x74\x2f\x0c\x87\x7d\xa5\xb5\x71\x46\x0a\xa7\x57\xad\x3a\x5a\xc2\x6a\xb1\xbc\xe7\x06\xb3\x1e\xcb\x98\x01\x1c\x0d\xcb\xcb\xe9\xb0\x4b\x08\xb6\x08\xd4\x92\xcc\xe9\x05\xaf\xbe\x19\x1a\x40\x99\xb6\xb4\x42\x41\xb6\x87\x0c\xde\x5b\xf7\xca\xf2\xb2\xa9\x31\x13\x69\x58\x2b\x3e\xb7\x49\xfa\xa2\x37\x27\x91\x0e\x97\x3e\x81\x6e\xef\xc1\x76\xc1\xb9\xa9\x2e\x7a\x3d\xe3\xf4\x33\xd1\xff\xea\xf1\x04\x2f\x38\x3a\xc9\x51\x0b\xf9\x57\x1d\xdd\x2f\x74\xb4\x5c\x4c\x3f\xb7\x9a\x22\xaa\x38\x2c\xaa\xf3\x16\xc0\x66\x9d\x5d\xc1\xe2\x54\x16\xc7\xb4\x5d\x9f\x33\x2d\x83\xd7\x37\xc1\x96\x27\xac\x3a\x59\xd8\x95\x62\x09\x7c\x5c\x36\x09\x7c\x7c\x7a\x5b\x48\xce\xe8\x6e\x35\x1f\xd9\xe2\xd5\xb6\x88\xf7\x85\xaf\xdb\xe2\xdf\xdf\xe5\x34\xae\x49\xe7\xf7\xff\x77\x70\xd9\x18\x49\xd3\x0c\x8e\x43\x00\xfe\xf8\xfd\x92\x82\x7e\xfb\xed\xcf\x3f\x7f\xfb\xf3\xcf\x9c\xa8\x2d\xd0\x5f\x39\x2d\x08\x50\xf8\x16\x84\x5a\x18\x44\xff\xfd\xe6\xcd\xd0\xee\xef\x9c\xb2\xf3\xd0\x5f\xb9\xa2\x24\xca\x4a\x97\xab\x89\xca\xdf\x39\xd9\x98\xa0\x85\xf6\x57\xce\x5b\xe9\x73\xdb\xf8\x3b\x27\x6d\x1c\xe4\xff\x95\x3b\x48\xfc\xed\x37\xae\xa9\x08\xdd\x9c\xc2\xf1\x4d\x21\x27\x89\x4d\x35\x2a\xf1\xb7\x5c\x2e\x97\xe3\x4a\xa5\x88\xb4\x2b\x85\xb9\x76\xb7\xd6\xe2\xba\x6a\xae\x21\xa8\xb9\x1f\xb6\x79\x85\x16\x6d\xbd\xb7\x28\xe2\xd8\xef\x8c\x50\xc7\xa4\xde\x42\x7e\x4b\xf1\x25\xfa\xd3\x5d\xc3\x35\xd1\xcf\x73\x3d\xed\x20\x58\x21\xff\xfd\x57\xb8\xf3\xd0\x55\xfd\xc6\xae\xef\xbd\x2d\xec\xb1\xaf\x9d\x46\x03\xb1\xdf\x19\xd5\x2f\x26\xf5\x56\xfd\x6e\x29\x7e\xd8\x3a\xef\x03\x9a\x63\x05\x83\xd8\xcf\xb7\x4f\x83\xbc\x7d\x5a\xe0\x2d\x6a\x9a\xb7\x4c\x6a\x77\xa9\xf6\x56\xe5\x52\x01\xcb\xf5\xc4\x5a\xa7\x27\xdc\x6a\xd8\x43\xf9\xcb\x46\xfe\xa2\x69\xb2\x69\xd6\x2f\x57\xfc\x4b\x8d\xea\x7a\xe8\xd4\x19\xde\x3c\xcd\x0f\x6d\xc3\xf6\x34\x27\x52\x93\xdb\xb7\x33\xae\xd9\x6d\x25\xf7\x6a\x7a\x07\xd6\xd3\x35\x8f\x8c\xcf\x6f\xd7\x3d\xa9\x40\xc6\xb5\x4f\x52\x73\xaf\xfe\x77\xa1\x3d\xb4\xc0\x0c\xed\xde\xd6\xda\x7c\x85\xde\x0e\xd2\x50\xfc\x77\x46\xf5\x8b\x49\xbd\x55\x9d\x5b\x8a\x2f\xd1\xcf\xd0\xee\x0a\xbe\x6b\x59\xc8\x0f\xce\xff\xcb\x08\xec\x49\xd8\x2d\x8c\x11\x35\x97\xd0\x8e\x37\xde\x6e\x98\x37\xf4\x57\x41\xf8\x36\xb7\x1d\x14\x44\xff\x9d\x11\xd2\x88\xc4\x5b\x70\xe3\x0a\x2f\x31\xcf\x91\x39\x3e\xf9\xbd\xeb\x11\x83\x61\xb8\xab\x43\x17\x32\xb5\x50\x7b\x3b\xff\x7a\xb3\xcd\x37\x47\x5b\xa0\x77\xd8\x35\xb1\x24\x0c\x1f\x20\x2e\x76\x05\x4e\x11\xde\xfd\xea\xf1\x89\xfb\xc2\x73\x92\x78\x59\x20\xd7\x93\x6b\x62\x25\xa7\x87\x3e\x42\xb9\x1f\x9f\xa5\x7f\xe6\x0e\xc5\x93\x91\x4f\xdc\x05\x7a\x33\xdd\x85\x66\x3b\x29\x00\xc7\x90\x46\x84\x45\x01\x5e\x62\x8b\x14\x4a\x86\x65\x3b\xd6\xfc\xe4\xac\x4c\x14\x84\xb6\x73\xfc\xf7\xeb\x00\x6f\x8a\x4d\x86\x7a\xb3\x78\xc2\xb8\x51\xdf\x1d\x89\x30\x3d\xc6\xa8\x94\x03\xa4\x18\x4f\x5e\xb6\xef\x07\xf1\x26\xa3\x39\xd1\xef\xeb\x78\x4e\x72\x9e\x43\x94\x40\xf9\xfa\xee\xbd\xff\xa6\x86\xf3\x29\x22\x8a\xe4\x22\x5e\x74\xeb\x0d\xf8\xf9\xde\xb2\x6f\x01\x5a\xae\x90\x63\x5c\x1b\x4c\xdf\xbd\x59\x08\x7d\x0d\xa0\xe7\x06\xe1\xd8\x47\xc1\x4d\x9c\x11\x69\xcf\x61\x8d\x3c\xf0\x47\x6e\x50\x15\xba\x42\x2e\x2a\xa3\x26\xe7\x44\x49\x39\x4e\x69\xfe\xbe\x82\x3e\xd1\x82\x49\x06\x98\x0f\x62\x0e\x60\x4f\xcf\xfc\xe3\x31\xe6\x28\x9b\x1e\x1e\xbe\x65\x56\xdb\x71\x90\x9f\x15\xc2\x4f\x61\xcf\x19\xf5\x54\xfe\x0a\xe6\xd9\xbe\xb7\xef\xde\x37\xf5\x89\x08\x5e\xe9\xc1\x27\x09\xcf\xe1\x3f\xd3\xce\x7b\xb7\xfd\x99\xd3\x3c\x6f\x6e\x1b\x27\x77\xe4\xfa\xe6\x8d\xd7\x2c\x3e\x13\x7b\xd5\x25\x25\xc8\x3b\xe0\x8f\xcf\x3e\x9f\xf5\x4e\x37\x44\xbe\xe8\xa7\x12\x25\x3e\x09\xf3\xce\x2c\xe5\x0d\x1d\xa4\x1d\x6d\xfd\x2a\x99\xc7\xc4\x45\xbb\x00\xb2\x2c\x64\x5c\x91\xe4\xd5\xa0\xfd\x40\xe5\xff\x77\x7c\xf8\xff\x92\xc0\xda\x66\x46\x30\x6d\xf3\x69\x80\xd1\x91\x46\x0a\xd0\xae\xf7\xe6\x65\x85\xfb\x2c\x2b\x0a\x3d\x61\xa6\x96\xaa\x26\xb7\x2b\x10\x6e\xb3\xab\xc0\x59\x56\x82\x7f\x48\x59\x85\xa8\x84\x5b\x95\x70\xbd\x43\xaf\x9c\xb8\xa9\xea\x70\x06\xff\x29\x23\xad\xf1\xef\x1b\x3a\x38\x7b\xce\xe3\xcb\xfd\xba\xad\x2f\xc5\x45\x21\x9f\xfe\x1e\x67\x92\x9b\x88\xa2\x76\xcd\x0a\xd6\x95\xcc\x27\xa9\xee\x06\xc0\xf0\xd4\x24\xe1\x2b\xcd\xfa\x29\x23\x7d\x97\x7c\xd4\xfd\x42\xdf\x3c\x28\xd1\xb5\xe0\x8b\x43\xb0\x9b\x80\xaf\x85\xc5\x90\x9b\x28\x86\x33\x5a\xf6\x21\xc0\xe3\xf4\x35\x1b\x78\x47\x51\x4f\x81\x4b\x9c\x33\xbf\xcb\x3b\x62\x47\x7e\x66\xe6\x8b\xc9\x7b\x04\x32\x56\xfc\x19\xa4\xd9\xd8\xf1\x42\xda\xb3\x28\x1f\x5a\x33\x1b\x6c\x4f\x61\xba\x8f\xe5\x1d\xf1\xdc\x75\x67\x2b\xef\x35\x44\x97\xb2\x9e\x6e\xd1\xd3\x00\x29\x01\x9f\xa7\xd9\xfe\x5b\x68\x2f\x50\x26\x08\xe3\xd2\x9e\x7b\x6f\xcf\x00\x7f\xe6\xe2\x90\x7f\xe6\xce\x2e\xde\x98\xbb\x01\x32\xdf\xb4\x30\xa1\x12\x19\xf8\xed\xb3\x9c\x47\x88\xbf\x38\x3a\x3a\x48\xcd\xcc\xba\x5f\x30\xec\x43\xbb\xd9\x8e\x89\xb6\x6f\xb1\x21\x47\xf0\xe6\x3a\x6f\x9a\x69\xfa\x28\x08\x5e\x35\xe8\x43\x05\x17\xb1\x88\x9b\x81\x9b\x73\xc1\x2f\x60\x7f\xbd\x1f\xdc\x93\xfd\x18\xf1\x8d\xb7\xec\x52\xe0\x79\x14\x7e\x90\x17\xee\xbc\xf4\x93\xba\xbb\x52\x1f\x0e\xfb\x6f\x2e\xd5\x5d\x8a\x3c\x8f\xa1\x0e\x22\x3f\x3a\x51\x46\x68\x6f\x89\x7e\x38\x7c\x7b\xb6\x27\x47\x84\x67\xdd\x19\x2e\x44\xa7\x19\x6f\x26\x8b\x5b\x78\xae\x7f\x70\x7c\x6b\xe4\x07\xaf\x84\x48\x9f\xd6\xf0\x18\x7e\xec\x81\xe7\x2b\x73\x76\x3d\xcf\x07\x8c\x52\xd8\x3f\xa2\xe3\x61\x4d\x22\x65\x9f\xaf\x84\xe7\xa3\xb5\xed\xae\x82\xff\x48\x6d\x6e\x29\x7b\x58\xad\x5b\x0f\x3d\x5f\xbf\xf7\x80\xd4\xb7\xd5\xe9\x5d\xc1\xc3\x7a\x24\x46\x74\x2f\x45\x7f\xf0\xed\xb7\xbc\xda\x71\xe9\x37\x27\xc0\x5f\x7d\xc1\x2f\x85\x5e\x4e\xa1\x32\x7a\xc3\xef\xa9\x78\xa6\x0e\x0f\xe6\x75\x77\x95\x65\x47\x5f\xd7\x82\x9f\xc2\xfe\x98\xc4\xa2\x93\xed\xef\xe8\x36\xd7\xf2\x53\x4f\xf5\xcf\x8b\xad\x87\x89\xe5\x6a\x67\x3b\xe3\xd3\x18\x38\xb5\x81\x6f\x8b\x3b\xa0\x3b\xaf\x21\x5f\x0e\xc3\x23\x65\xee\x20\x9b\x6b\x41\xf8\xb6\x70\x4d\xdb\xb2\x91\xf9\x6a\x20\xfd\xbe\xd8\x24\xa4\xb7\xca\xde\x41\x1c\xa0\xf9\x3c\x13\x8c\x27\x41\x49\xa8\x4e\x77\x6f\xbd\x41\x97\x12\xb2\x6c\xd7\x0b\x79\xf7\x80\x25\xb7\x6c\x60\x8f\x9d\xb3\xb4\x57\xa3\x0e\xd7\xa2\x2e\xd6\xb6\xcf\xb7\x93\x96\xb7\x6f\x24\x12\x68\x26\xfa\x18\xd6\xbe\xc7\xdb\xdf\x74\xd7\x9d\xa5\x86\x78\x47\xe6\xc3\x01\xf3\x8f\x1f\x26\x0a\x35\x7b\x1e\xe4\xfe\xfc\xd7\xbf\x72\xbf\x07\xee\xdc\x8c\xa4\x56\xfd\xfe\xd7\x5f\x21\xda\x86\x7f\xfc\xf1\x33\x97\x5c\xd0\x70\xcd\xe7\x0a\x9e\x96\x32\x92\x8b\xea\xee\x6a\x3c\x09\x9f\x52\x7f\x51\xf4\x3e\x80\x8b\xa2\x31\x08\xef\xcb\x6d\x47\x97\x9b\xfb\x67\x0e\xc7\xef\x65\x7e\x44\xfa\xc0\x2b\x44\x97\x28\xf1\xd0\x58\xd1\x44\x93\xe7\xfb\xd4\x85\xc0\x17\x17\xac\x6e\x4a\xbb\x0f\xed\xde\x42\x55\x4c\xdc\x31\x61\xef\x98\xc0\x97\x2d\xcc\xb8\xdc\x27\x00\x47\x73\x01\xaf\x93\x3f\x9f\x4e\xbf\xb5\xcd\x37\x2b\x92\x05\x54\x6e\xfc\x67\x92\x70\xcf\x6a\x73\x65\xa9\x2b\xd4\x2a\xe2\x47\x62\x58\xae\x2b\x94\x85\xae\x20\x16\x05\x39\x96\x10\x71\xbc\x2b\x89\xb9\x5e\xbb\x74\x30\x63\x57\x90\x95\x6e\xad\xa8\x1c\xfe\x54\x12\x9a\x82\x22\xe4\x8a\x9c\x5c\xe4\x4a\xc2\x9d\xec\x3a\x13\x05\xb1\x9f\x6f\xb1\x08\x75\x76\xc6\xb8\xd4\xf3\x20\x75\x2e\x09\xc9\xa5\x7d\xe2\xd1\xf4\x9b\xc6\x3a\xbb\xf6\x07\x79\x86\x89\x96\x38\x47\xf8\x7e\xb9\x1d\xa2\x38\x6e\x59\xe1\x3d\x78\x7a\xbf\xc3\x7c\xcd\x02\xd7\xb1\xf6\x5f\x68\x86\x04\x30\x97\xb6\xb8\xb1\x3a\x90\x6d\xa7\x88\x47\x7e\xff\x1b\x0c\x92\xdc\x35\xae\x42\xeb\xcf\xf4\x8e\x9c\x83\x90\x89\xcc\x9c\xe5\xfa\xb9\x48\x46\x7c\xe8\xe6\x36\xae\x3f\xbb\x00\xfe\x11\x5c\x3c\x20\x0d\x3c\xd7\x09\x5c\x3f\xa7\x08\x43\xe5\xef\x84\x8c\xb8\xc3\x78\xf0\x5c\xec\x3a\x0d\x8e\x57\xba\x82\xf0\xe3\x7c\xff\x00\xe6\x96\xaa\x53\xfa\xe1\xb3\xfa\x8e\xc9\x8c\xb7\x95\x46\xf3\x18\xef\x6a\x8e\xf2\xce\x03\xbd\x31\x16\x8b\x68\xbd\x26\xaf\xbb\x3a\xcf\x63\xe4\x07\xea\x22\x83\xec\x4f\x4d\x17\xc3\xeb\xbb\x4a\x9e\x48\xee\x2e\x75\xa5\x76\xae\x28\x35\x7b\x2d\x31\x67\x9b\x09\xcf\xdf\x4d\x8f\xbe\x92\x70\xfc\xc3\xb9\x4d\x23\x1c\x6c\x68\x81\xa1\x99\xe8\xbd\xc0\xdd\x94\xa0\xcb\x42\x0f\xa7\xd2\x77\x4a\x5f\x46\x61\x2f\x0a\x5e\xe6\x59\x3e\xac\xf9\xa9\xb1\xde\xfd\xce\x62\xb5\x45\x66\x6e\xad\xf9\xc6\x44\xf3\x7f\x50\xec\x1f\xe7\x2d\x42\x87\x32\x91\x64\xbb\x84\x72\x7f\xdf\x6f\xa6\x73\xbf\x70\x57\xbe\x91\x4e\xd2\xfb\xd4\xe1\x88\xf8\xb4\x76\x91\xf8\xfc\xd1\x23\x10\xec\x65\xc7\xab\x95\x8f\x89\x6b\xc2\xb0\x26\x2b\xf2\x69\xcd\x47\x1b\x8f\x8f\xeb\x3e\x41\xa8\x2d\xbc\x37\xdd\xb7\x9d\xeb\xa5\x9f\x37\x0a\x00\x00\xde\x07\x6f\xbe\xed\xfc\xf8\x78\xe2\x8f\x63\x02\x5c\x8e\x04\xef\x8a\x4e\x88\xe7\xf6\x72\x65\x9b\x76\xb8\x7b\xf3\x5c\x77\x1e\xe4\x7e\x1c\xfb\x94\x6d\xe6\x0e\x03\xfe\x8f\xe4\xb9\x9f\xb9\x3f\xff\xcc\x4d\xd0\xf6\x4f\xe4\x1c\x1a\xcb\xcc\xb5\x5d\x77\x5e\x2b\x1d\xcb\x1e\xa7\x02\xc1\x42\x3b\xcc\x2f\x23\x4f\x1c\xef\x59\x08\xe5\x6c\x27\x44\x87\x79\xfc\xe5\x9d\xe3\xeb\x7a\x78\x5b\x4f\x5e\x36\xa7\xdb\xe3\xe8\xe3\xb9\x62\x55\x28\x36\x72\x3f\xe2\xc5\xfe\x95\x03\x7f\x9c\x04\x04\x13\xcd\x4f\x7a\xb8\x24\x94\xb9\x5e\x53\xc9\x81\x93\x98\x1f\xd1\xb2\xff\xfa\xe7\x87\x88\x53\x07\xf4\x51\x80\xfc\x35\x0a\x72\xd3\xc0\x75\xf4\x18\xcc\x9b\x11\x89\xdb\x35\x32\xd1\x1c\x85\xc8\xcc\xe9\xae\x3b\x47\x9a\x73\x8d\xc6\xd2\xe6\x01\x3a\x95\x8d\xef\x4e\xf8\xed\xd0\x3a\x17\x1d\x20\xd6\x2e\xc7\x89\xcf\x25\x5c\x49\xbc\x6a\xbc\x53\xbb\x8f\x6d\xe7\xc7\xad\xaa\xbd\x79\x5a\x38\x79\x73\xbd\xe0\x8f\xbf\xe3\xba\xd6\xe8\x2d\x2e\x2b\x51\xfc\xd1\xe3\xe5\x7e\x9c\xab\xfb\xf3\xa6\x89\x22\xd5\x91\x85\x4e\xef\x40\x87\x9f\xb1\xdf\x58\xcd\x6c\xf3\x2d\x40\xcb\xa3\x59\x64\x85\xeb\x2a\xb9\x41\x4d\xa9\xe6\xb0\xe3\x1f\x6a\x62\xb1\x2b\xb4\x04\x51\xc9\xf1\xea\xf9\x4f\xa2\x94\x6b\xd5\xc4\xe3\x5e\xf2\x8f\xdf\xdc\xf0\xf3\x77\x91\x2b\x56\x85\x1c\xf6\x09\xe0\xf2\xcd\x4c\xec\xef\x49\x7d\xc8\x41\xdb\x70\xad\xcd\x7f\xfc\x7e\x1f\xff\xef\x7f\xfd\xe5\xa3\xb1\x31\xd7\x82\xe0\xdc\xbb\x2e\xcb\xbd\xc5\x5f\xa9\x68\x93\xdf\x0b\xa4\xc7\xb4\x5d\x2f\x39\xdd\x6c\xa4\x68\x78\x31\x95\x92\x6b\xf4\xcf\xea\xbc\x7a\xf2\x8f\xa4\xb6\xf8\xa4\xc5\xdb\xad\x72\x6b\x85\x3d\xde\x4e\x3f\x2f\x4a\x5e\xa3\x8e\x15\x7f\xd2\xea\x89\xc8\x4e\xf6\x0f\x6e\xa7\xc5\xdd\x35\xcb\xcd\xca\xfc\x4c\x46\x1e\x7f\x49\xbf\x00\xf0\x42\x45\x36\x48\x13\xdb\x30\x3a\x34\xb9\xdf\x8a\xb1\x35\x89\xff\x48\x3b\xde\x41\x77\xa3\x25\xef\xd5\xe5\x89\x64\xb8\xf4\xad\xf9\x00\x66\xf2\x6a\xce\x8b\x88\x1f\x0d\xc4\x6f\xbb\xb0\xfb\x43\xf2\x9b\x8e\x23\x69\x70\x7e\xd3\x57\x44\x86\x88\x97\x49\x5c\x57\x77\x62\x29\x55\x89\x43\xc8\xc3\x34\xf3\xf8\xec\x7b\x86\xd6\xa9\xb7\x25\x5c\x27\x21\xb1\xd0\xc7\xe9\xf9\xcb\xde\x7a\xf7\xf9\xeb\x89\xf2\x57\x44\x94\x4a\x27\x00\x49\x2f\xc2\xc3\xa7\x3f\x4c\x93\x56\x40\xec\xc1\xf3\x48\x2e\x3e\x4c\xb9\xc8\x81\xbc\xcb\x19\x17\xb9\x49\xa7\xd6\xbf\xfd\x50\xfc\x55\x89\xa7\x07\x7e\x55\x4b\xe2\x73\xef\x23\x62\x2c\xda\xaf\x6a\xe5\xf7\x41\x77\x64\x2d\x24\xb1\x40\x74\xa3\xd1\x7b\x90\xa3\x7d\xda\xf5\x22\x77\x9a\xb9\xc3\x04\xf8\x50\xc9\x9c\xb9\x5a\x78\x39\xc3\x5d\x78\x87\x31\xd3\xa1\xdc\xff\x0f\x00\x00\xff\xff\x19\x33\xb7\x32\xb0\xde\x04\x00") + +func kahunaHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _kahunaHorizonSql, + "kahuna-horizon.sql", + ) +} + +func kahunaHorizonSql() (*asset, error) { + bytes, err := kahunaHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "kahuna-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xed, 0x81, 0xcb, 0xee, 0xa, 0x69, 0x55, 0xe, 0x28, 0xb4, 0xb2, 0x9b, 0x5e, 0x40, 0x6c, 0x8f, 0xe1, 0x8e, 0x58, 0xe, 0x53, 0xcb, 0xb7, 0xb, 0x67, 0x32, 0xa7, 0x26, 0x96, 0xa4, 0xa8, 0xc6}} + return a, nil +} + +var _offer_idsCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x67\xb3\xea\x48\xd2\x3f\xf8\xbe\x3f\x05\xd1\x6f\x6e\x4f\x70\x67\x28\x55\xc9\x76\x6f\x3f\x11\xc2\x0a\x10\x12\xde\x6d\x6c\xdc\x28\x27\x10\x46\x02\x19\xdc\xc6\xf3\xdd\x37\x30\xe7\xc0\xe1\xc0\x31\xc0\x35\xff\x9d\x56\xcc\xdc\x3e\xa0\x54\x66\xd6\xaf\x32\xb3\x32\xcb\x88\x7f\xff\xfb\xb7\x7f\xff\x3b\x51\xf5\xc3\x68\x10\xf0\x46\xcd\x4c\x30\x1c\x61\x82\x43\x9e\x60\xf1\x74\xf6\xdb\xbf\xff\xfd\xdb\xf6\x7e\x36\x9e\xce\x38\x4b\x38\x81\x3f\x3d\x12\x2c\x78\x10\xba\xbe\x97\xd0\xfe\x23\xff\x47\x38\xa1\x22\xeb\xc4\x6c\xf0\x6d\xfb\xf8\x19\xc9\x6f\x8d\x5c\x33\x11\x46\x38\xe2\x53\xee\x45\xdf\x22\x77\xca\xfd\x38\x4a\xfc\x9d\x00\x7f\xed\x6e\x4d\x7c\x3a\x7e\xfd\xad\xcb\x26\xfc\x9b\xeb\x7d\x8b\x02\xec\x85\x98\x46\xae\xef\x7d\x0b\x79\xb8\xe5\xfb\x9a\x98\x4e\xdc\x2d\x6b\xee\x51\x9f\xb9\xde\x20\xf1\x77\xe2\x4b\xab\x99\x57\xbf\xfc\xf5\x24\xdb\x63\x38\x60\xdf\xa8\xef\x39\x7e\x30\x75\xbd\xc1\xb7\x30\x0a\x5c\x6f\x10\x26\xfe\x4e\xf8\xde\x81\xc7\x90\xd3\xf1\x37\x27\xf6\xf6\xb2\x88\xcf\x5c\xbe\xbd\xef\xe0\x49\xc8\x5f\x88\x99\xba\xde\xb7\x29\x0f\x43\x3c\xd8\x11\x2c\x71\xe0\xb9\xde\x60\x4f\x12\xf8\xcb\x6f\x21\xa7\x71\xe0\x46\xeb\x2d\x73\xc7\xf9\xeb\x00\x00\xc7\x01\x1d\x7e\x9b\xe1\x68\x98\xf8\x3b\x31\x8b\xc9\xc4\xa5\x5f\xb7\x88\x51\x1c\xe1\x89\x3f\xf8\xeb\xb7\xdf\xb2\x75\xbb\x9a\x28\x5a\xd9\x5c\x37\x51\xcc\x27\x72\xdd\x62\xa3\xd9\x38\x50\xfe\x27\x9e\x0d\x02\xcc\xf8\xd0\x0d\x23\xb2\x0e\xf9\xfc\xaf\x37\xa9\x43\x3a\x9b\xc7\x7e\x10\x4f\xc3\x8f\x11\x73\x6f\xf1\x11\xca\x09\x67\x03\x1e\x7c\x84\x72\xab\xa7\xc3\xf9\x07\x29\x3f\x40\x46\x78\x18\xf9\x8e\xc3\x03\xd7\x63\x7c\xf5\x36\x2d\xa6\xd4\x8f\xbd\x88\xe0\x09\xf6\x28\x0f\xff\xfa\x4d\x37\x9b\xb9\x7a\xa2\xa9\xa7\xcd\xdc\x09\xb5\x6d\x99\xbd\x0b\xf0\xfa\xc1\x3a\xb1\xe3\x9e\xb1\xad\x46\xb3\xae\x17\xad\xe6\xc9\x43\x2f\x09\xbf\xcd\xc6\x7c\xfd\x11\xfe\xd1\xea\x7d\xd6\xcf\x34\x9f\xe0\xea\xf0\x0f\xe8\x7c\x4a\xf6\x71\xde\x41\x1c\x46\x13\xd7\xe3\xe1\x5b\x9c\x9f\x89\x3e\xcc\x77\xab\x05\xdf\x45\x83\x37\xf8\x1e\x89\x3e\xce\xf7\xd9\xe4\xdf\xe2\xfb\x4c\xf4\x61\xbe\x7b\x7a\xd7\x73\xfc\x37\xf8\x1e\x89\x3e\xcc\x77\x16\x93\x30\x26\x6f\xf0\xdc\x13\x7c\x86\xdf\xc4\x0d\x87\xf3\x98\xc7\x6f\x21\x7b\x4a\xf6\x71\xde\x9c\x07\x6f\xc1\xba\xbb\xff\x61\x6e\x3b\x37\x7e\x8b\xdd\x9e\xe0\xc3\xfc\xf6\x51\x69\xc8\x31\x7b\x9b\xed\x0b\xba\xef\xcc\xfd\x10\x29\xf9\xfc\xdb\x07\xc5\x10\xec\xbd\xc1\x9c\x60\xef\xc3\x0a\x1f\xa2\xdf\x5b\xba\x3e\x91\x7c\x96\xe7\x36\x07\x78\x9f\xed\x96\xea\xc0\x79\x47\x7b\xce\xf8\x62\xc8\x7d\x9b\xf6\x39\x34\xbe\x47\x76\x0c\x74\xef\x50\x3e\x07\xae\xb7\xe9\x8e\x81\xe8\x1d\xba\xe7\xc0\xf2\x2e\xdd\x87\xf4\x3b\x06\x94\xb7\xe9\xf6\x41\xe2\x5d\x9a\x67\x97\x7f\x87\x72\xeb\xc7\x6f\x93\xec\x7d\xf3\x6d\x9a\x17\xae\xf0\x36\x29\xc1\xde\xdb\x04\x4f\xa6\xfa\x21\xaa\xad\xe5\x1d\x08\x73\xdd\x66\xce\x6a\x14\x6d\xeb\x94\x78\x32\x1b\x84\xf3\xc9\x81\xa2\x91\x31\x72\x15\xfd\x15\xaf\xbf\x7e\xdb\xe7\xc6\x16\x9e\xf2\x3f\x9f\xbe\x4b\x34\xd7\x33\xfe\xe7\xe1\x91\xbf\x12\x0d\x3a\xe4\x53\xfc\x67\xe2\xdf\x7f\x25\xec\xa5\xc7\x83\x3f\x13\xff\xde\xa5\xcc\x99\x7a\x4e\x6f\xe6\x9e\x38\x3f\xf1\xfb\xed\x05\xc7\x97\x37\x0f\x8c\x33\x76\xa5\x92\xb3\x9a\x6f\x70\xde\x13\x24\x6c\xeb\x25\x83\x44\xb1\x91\xf8\xf2\x94\xdf\x3e\x7d\x17\xee\x98\x7c\x39\x97\xfc\xd4\xfc\x83\xcc\x67\x84\xde\x6d\xcf\x0b\x2c\x2d\xbb\x79\x86\x67\xa2\x53\x6c\x1a\xcf\x6a\x9d\x26\xb4\x2f\xc4\x1f\xb9\x9c\x29\xf2\x99\xc6\xbf\x62\xb2\x03\xa0\x6a\xa6\x66\x83\x6d\x15\x33\x0b\x7c\xca\x59\x1c\xe0\x49\x62\x82\xbd\x41\x8c\x07\x7c\x07\xc3\x07\x13\xf0\x2d\x19\xe3\x0e\x8e\x27\xd1\xb7\x08\x93\x09\x0f\x67\x98\xf2\x6d\x35\xf1\xe5\xec\xee\xd2\x8d\x86\xdf\x7c\x97\x9d\x14\x08\x2f\x1a\x7b\x6a\x90\x87\x66\xee\x4c\xf7\xd8\xc8\x27\x03\xb8\x04\xf8\xde\xca\x4f\x83\xee\x1f\xbf\x25\x12\x89\xa7\x6f\x5c\x96\xa0\x43\x1c\x60\x1a\xf1\x20\xb1\xc0\xc1\xda\xf5\x06\x7f\x48\xf2\xbf\x76\x7d\x63\xb5\x4c\xf3\xeb\x8e\x7a\xfb\xa0\x87\xa7\xfc\x02\xb1\xaa\x5e\x22\x5e\xe0\x49\x7c\x89\x5a\x10\xe0\x39\xf9\x04\x87\xd1\xd4\x67\xae\xe3\x72\x96\x70\xbd\x88\x0f\x78\xf0\x4c\xf2\xdb\xbf\xce\xfb\xfe\xd9\x8b\xef\xc4\x22\xbc\x09\x88\x43\x21\x90\x20\xee\xc0\xf5\xa2\xb3\x9b\x21\x9f\x7b\xf1\xf4\xf2\x3d\x2f\x9e\x86\x31\xe1\x5e\x14\x6c\x4b\xc1\xf3\x66\xee\x69\x5c\xcf\x99\xe0\x6d\xc5\xc8\x78\x18\x5d\x56\x67\x4f\x38\xf4\xa7\x9c\xf9\x53\xec\x7a\x17\xa8\x44\xf1\x5c\xe9\x68\x18\xf0\x70\xe8\x4f\x58\x98\x88\xf8\xea\x5c\x33\x67\x82\x07\xd7\x34\x7a\xb3\x6f\x0e\x88\xc4\x5b\xa9\x13\x17\x13\x77\xe2\x46\xdb\xc6\xed\xdb\xff\x04\xc9\x64\xf2\xd6\x6d\x77\xe0\x6d\x73\xa1\xad\x5a\xfb\x6f\x4e\xb2\x81\xe7\xd4\xe2\x00\xfa\xb7\x5d\x59\x9d\xc8\x18\xb9\x4c\x39\xf1\xc7\x1f\x4f\x5d\xf1\x3f\x7f\x27\xc0\xbf\xfe\xf5\xc6\xd3\xe7\x0a\x9e\xf3\x79\xd5\x80\xf7\x38\xbe\xe8\xcb\x33\x6e\x2f\xfb\xf9\x3d\x4e\xaf\xe1\x39\x63\x77\x01\xbf\x3d\xcf\xd7\x8e\xb1\x1d\xff\x6e\xf5\x89\x6d\xca\xb8\x77\x07\xcf\x67\xfc\xd4\x17\x5e\xf8\xc0\x6b\xa1\x2f\xc7\xe7\x5b\xc5\xbf\x4c\x8c\xf7\x8a\x1c\xbe\xc3\xe1\xf0\x44\x19\xf9\x95\x6d\xcf\x02\xbe\x78\x97\x88\xc4\x74\xcc\xa3\x89\x1b\x46\xef\x92\x3e\x67\xdb\x4f\xe6\xbe\xff\x9a\x4e\xfc\x90\x47\xee\xf4\x8a\xe7\xef\x02\xeb\x05\xdf\x3a\xe9\xf3\x97\x49\xfd\x33\xbf\xb3\xfe\x3e\xca\xb9\x62\x3a\xd7\x6a\x83\x97\x6c\x8e\xad\xb8\x66\x2d\x87\xe4\xeb\xd6\x1e\x3b\x14\x5e\x7f\x3c\x3b\x39\x0f\x3e\x18\x41\xf7\x33\x2f\xec\x5a\x04\xdd\x99\x3b\x0e\x43\x1e\x5d\xc2\x73\xef\xab\x57\x6f\xe3\xe9\xd6\xad\x2e\xb3\x9e\x05\x2e\xe5\xde\x95\x20\xb6\xbb\x79\x2d\xc2\xed\x6e\x26\x98\x1f\x93\x09\xdf\xda\x1b\x75\x77\x33\x92\x0f\x8d\xa2\x27\x3d\x7c\x28\x59\xf7\x6d\x39\xeb\xd7\x43\x03\xaf\xd8\xc6\xe1\xc9\x03\xc2\x67\x8f\x3e\xe1\x7e\xcd\x20\xf6\x09\xfb\xad\xf6\xb0\x2f\xeb\xf7\xe6\xe0\xce\x2e\x0d\xfc\xd2\x2b\xcf\xf5\x83\xe8\x19\x8d\x6c\x2e\xaf\xb7\xcc\x66\x02\x9c\x0f\x9b\x7c\x15\xe1\x28\xe2\xd3\x59\x94\xd8\xba\x45\x18\xe1\xe9\x2c\xb1\x4d\x99\xfc\x78\xff\x4d\x62\xe3\x7b\xfc\xf5\x60\xeb\x60\x77\x12\x07\x27\x43\xed\x35\x09\xd1\x7a\xc6\xdf\xef\x94\xfd\xb4\xc4\x09\xdf\xd7\x61\xff\x59\xe2\x95\xde\x39\xcc\x6c\xf8\xc1\x79\xa7\xfe\xb1\x43\xe2\x7f\x12\xe0\x5f\x09\xdd\xca\x26\xf6\x1f\xff\xaf\xbf\x13\xb2\x24\x21\xe9\x5f\x17\xfb\xea\xb4\x0c\xbb\xb9\xcb\x4e\x67\x79\x4e\x63\xee\x15\x34\xf6\x13\x6d\x5b\xaf\xbb\xa8\xd0\xb6\x76\xbc\x43\x95\x30\x26\x07\x25\x02\x1e\xbe\x18\x80\xd0\xc5\x8c\x31\xe0\xf8\xd9\x97\x5e\xeb\x73\x52\xf3\xde\xaa\xd3\xc9\x64\xdd\x07\x46\xc6\xbd\x62\xf3\x90\xbf\x35\xc2\xbc\xd6\xf3\xa4\x86\xbf\x55\xcf\x23\x8b\x8f\xeb\xf9\x6a\x90\x3b\xbb\xcf\xbd\x05\x9f\xf8\x33\xfe\xce\x90\x76\x14\x7d\xc7\x40\x74\x32\xdd\x71\x07\x04\x4f\xf3\xb5\x7f\x7c\xa4\x1f\x8e\x56\xf4\x1e\x10\xf3\x2b\x03\xcd\x4b\x10\x9e\xe6\x81\x5f\x70\x3c\x07\xe2\x85\xb4\xab\x60\x1c\xe7\x88\x6e\x06\xe3\x38\x29\xfe\xc7\xd1\x6f\x5f\x16\x6f\x17\x7c\xea\x2d\xef\x3e\x99\xe1\xba\x55\xab\x93\x25\x80\x5b\xca\xae\xdd\x88\xff\x46\xa4\x76\xc3\x30\xe6\xc1\xc7\x59\x51\x9f\x5d\xac\x4e\x5f\xc1\x12\x4d\xdc\xa9\x7b\x25\xa3\x78\xb3\x16\xfc\x99\x55\xd5\x89\x75\x9e\xac\xaa\xdc\x54\x45\x9d\x3e\xff\xa8\x3a\xea\x84\xe7\xed\xf5\xcf\x5b\x5c\xf7\x9d\x76\xc6\xe9\xd0\x93\xff\x73\xd9\xf1\x5e\x4c\xf7\xde\x6c\xe4\xa7\x6b\x68\x7b\x33\x8f\x56\x2f\x42\xf1\x07\xea\x8d\x73\x03\x5c\xed\x56\x29\xaf\xde\xa5\x43\xec\x0d\xf8\xc5\xc2\xfe\x14\x9c\xd3\x65\xbb\xdb\x63\xf5\x71\xee\xfc\x76\x88\x7e\x30\x3e\xc4\x67\xeb\x4b\xe0\x44\xab\x80\x87\xf1\xe4\x62\x74\x8f\x56\x53\xfe\x6e\x3d\x77\x5c\x62\xbd\x1d\xcf\xb3\x75\x8b\x5b\x41\x3d\x5b\x71\xfe\xe3\x43\xc0\x1d\x1e\x7a\x0b\xbd\x03\xc9\x25\x20\x3e\x66\x76\x67\x2b\xdc\xb7\x00\x95\xdd\x56\xd6\x8e\x1f\xbc\x33\x19\x9a\xc8\xea\x4d\xfd\x1d\xcc\xde\x66\x19\x7e\x9a\x5f\xd1\x6a\xe4\xea\xcd\x44\xd1\x6a\xda\xc7\x49\xc5\xb6\x6e\xb6\x72\x8d\xc4\x1f\x5f\x0a\xe9\x7a\xb5\x67\x14\x4d\x98\x29\xa2\xbc\x55\x13\xd3\x5d\x33\x5f\xb1\xb2\x66\xbe\xd4\xb2\xaa\x2d\x68\xf4\x50\xbf\x92\x6f\x18\xb6\xd5\xca\xe4\x6c\xbd\xd1\x51\x6a\x19\xc5\xee\x42\xe3\xcb\xd7\x84\x76\xb8\xa4\xfd\x7f\x64\x00\xbe\x26\xc4\xaf\x09\xf0\x75\x8f\x72\xe2\xcb\x97\xaf\x89\x2f\x7a\x4d\xd7\x75\xfd\xef\xbf\xbf\xec\x6e\xc0\xa7\x7b\xc7\x7f\xff\xf5\xd7\x7b\x1a\x66\x6a\xd5\x5e\xc1\x10\xcb\x92\xd2\x4d\x67\x73\x56\xb9\xdc\x95\xa4\x72\xb6\xd9\xb1\x9b\x65\xa9\x93\xed\xd4\x6b\xb6\x01\x4d\x23\x97\xed\xa2\x5c\xb9\x5d\xac\xd5\xcd\x4a\xae\x51\x48\x17\x8e\x1a\x6e\x15\x53\x25\x55\xd3\x90\x28\x69\xe8\x6d\x0d\xc5\x9b\x34\x84\xa8\x96\x87\x46\x2b\x27\x41\xbd\xd2\x6d\xe5\x5b\x06\xd2\x7b\x25\xbd\xdb\x2d\x74\xbb\x6d\xd8\x36\xba\xbd\x5e\x5d\xce\xf5\xba\xb9\x66\xb5\x9c\xed\xf6\x1b\x7a\x47\x56\xba\xb6\xf8\x23\x35\xec\x96\x0b\x72\xdd\x12\x6d\xab\x98\xab\x66\x2a\x56\x3e\xad\x20\xa8\x8b\x48\xee\x4b\x55\x2b\xdb\xa8\x9b\x85\x4e\x59\x29\xa4\xcd\x4c\xa5\x66\x16\xf3\xb6\xd8\x50\x72\xbd\x4e\xbb\xf5\xac\xa1\xfc\x42\x43\xf9\x6b\x02\xbd\xa1\xa1\x76\x8b\x86\xba\xd4\x49\x57\x7b\xba\xd4\x13\x3b\x7a\xce\xe8\x76\xea\xb0\x55\xb6\x61\xcb\x16\xd3\xad\x82\xd1\xaa\x29\x62\xae\x55\x2d\xdb\x16\xac\x19\x6d\xb1\x53\x37\xec\x62\xdd\x2a\x97\x0d\xf8\xac\xa1\xf4\x42\x43\xe5\xc4\xd6\x3e\xac\xe1\x15\xcf\x3b\x9f\xb1\xbc\xc3\x89\xaf\xcf\x43\x7e\xd6\x93\x5f\xce\x45\x3e\xc3\x28\x23\xa6\xa9\x8e\x84\x64\xce\x65\x95\x09\x04\x2a\x44\x22\xaa\xe6\x40\x84\x1d\x09\x09\x02\x51\x24\x59\xc3\x50\x74\xb0\x23\x88\x00\x61\x06\x88\x04\x89\x8c\x10\x01\x0a\xe1\x9a\xb6\x85\x0a\xdc\x79\x6d\x79\x48\x0a\xc4\x90\x23\xe8\x38\x50\x54\x31\x50\x08\xe0\x0a\x70\x98\xe0\xc8\x0c\x09\x2a\x15\x1c\x4c\x19\x04\x44\xa6\x14\xa8\x14\x21\x26\x29\x8a\x04\x25\x4d\x95\x55\x01\x4a\x58\x90\xbf\x7c\x4d\x08\xbb\x9e\xfa\xa2\xff\xb2\x57\xba\x5b\x76\xc5\x75\x6a\xdd\x28\xa7\x95\xac\x97\xd5\x0c\x08\x56\xa3\x74\x32\x04\x83\x28\x5c\x16\x97\x1b\xa1\xcb\x1a\x9d\x1e\x4e\x97\x70\x7e\xb0\xa5\xcf\x59\xa2\x89\x37\x33\x58\x7b\x97\x73\x5f\xef\x0a\xe2\x8e\x2c\x3d\xfe\x01\x0d\x79\xe8\xf5\xe5\xcc\xd5\xaf\x18\xaa\x44\x64\x19\x89\x94\x6a\x5c\x13\x34\xcd\x61\xc8\x21\x04\x3b\x40\x96\x24\x2a\x89\x14\x4b\x5c\x21\xaa\x00\x99\x24\x43\x2e\x23\x4e\x31\x70\x08\x24\x1a\x56\x35\x22\xa8\x80\x8a\xce\xd6\xc8\x1e\x61\xec\x98\x43\x45\xd0\x98\xc8\xb9\x44\x09\xa7\x8c\x53\x49\x23\x98\x4a\x8e\xc0\x00\x83\x4c\xa1\x58\x85\x44\x23\x4c\x12\x81\x4a\x28\xa2\x14\x69\x0e\x17\x64\x59\x06\x12\xd2\xb8\xba\x35\x54\xf8\x35\x21\x48\x92\x26\x29\x9a\x22\x08\x07\x8b\xcd\xc0\x6a\x7f\x24\x58\xb1\xe4\x03\x52\x52\x3a\xa2\xb7\xb6\x17\xad\x55\x01\xb5\x67\xfe\x38\xb9\xc8\xeb\x76\x94\x11\xca\xb0\xa2\xa4\x15\xb9\x3f\x54\x61\x6d\x10\xc6\x7a\x63\xda\x00\xab\x41\x7e\x26\x38\x23\xb7\x09\x97\xd6\x60\xd1\xee\xd4\x33\x93\x46\x47\x8a\x2b\x55\x24\x8a\x8d\x8a\xbf\x43\xb8\x5b\x6d\x57\xaa\xcb\xed\x5f\xc5\xe7\x7f\xf6\xe1\x2d\x3c\x7e\x5e\xea\xd5\xda\xc1\x76\x86\x4a\x60\x18\x88\x0f\xe7\x8b\x56\xd8\xc9\x55\x1d\x7d\xb2\x68\x87\x33\x5d\x87\xa9\x7e\x1d\x94\x46\x55\x3d\x5f\x64\x06\xa9\xc1\xb0\xe1\xc6\xa5\x95\xd7\x52\x26\xeb\xe4\x86\xaf\xfa\x71\xd8\x5d\x01\xcb\xec\xae\xfd\xb2\xb4\x68\xd7\x8a\x8b\xec\x26\xeb\x25\xf3\xd3\x5e\xde\xe6\xc3\x9d\x45\x17\x2f\x58\x74\xe1\xa2\x79\x3f\x59\x74\x16\x94\xbe\xa3\xed\x7d\x9f\xeb\x83\x16\x8d\xb1\x82\x10\x91\xb9\xa0\x31\x8d\x09\xcc\x01\x2a\x26\x2a\xa6\x02\x21\x0a\xa0\x5c\x13\x05\xcc\x88\x46\x91\xa0\x29\x82\xcc\x08\xe1\x0c\x10\x15\x61\xd1\xd1\x14\x41\xc1\x22\x54\x76\x61\xf3\x01\x5e\xa1\x32\x51\x24\xaa\xa6\x00\x45\x93\x55\x45\xa1\x18\x00\x59\xc2\x84\x51\x85\xca\xaa\x26\x01\x11\x09\x2a\x82\x54\x52\x80\xc6\x54\xea\x50\x0d\x73\x4d\x93\x14\x24\x6b\x92\xa8\x4a\x3b\x1e\xe8\xc4\xa2\xe1\x93\x45\x0b\xd1\xd4\xaa\xac\xf9\xb8\xef\x69\x55\x25\x5e\xa4\x27\xdd\x7c\xb3\xdc\xf5\xe2\xb4\xd9\xee\xc5\xbd\xe4\x7a\x90\x5c\xc7\xd8\x25\x05\x7d\x55\xcd\x49\xba\x1d\x25\x21\x42\x4d\xa3\x1a\x35\x2d\xd2\xf2\x46\x8d\x65\x75\x31\xf5\xe5\x01\xda\xac\xf3\x34\x5f\xf5\xa9\xda\x36\xab\x85\xc2\x1e\xe1\xad\x45\x9f\x9a\x4d\x63\xb8\x99\x17\x35\x6c\xf6\x92\x4a\x79\x26\xf5\x97\x48\xee\x71\x05\x20\xb7\xbf\xf2\x0c\x5d\xcc\x2e\x85\xa9\x54\xce\x8b\x75\xa3\x3c\xb5\xad\xba\xe9\x76\xd3\x12\x36\x9c\xb2\x5e\xe8\x04\xc8\x58\xf9\x93\x5a\x6d\x35\x58\xaf\xba\x19\x36\xaa\xae\xb1\xdc\x6f\x6f\x70\xbb\x91\x6f\xee\x3c\xa6\x72\xc1\x62\x2b\x83\x4b\xbd\xfe\x5f\x60\xb1\x0a\x25\x0c\x52\x49\xc6\x32\x41\x22\x72\x30\x00\x8e\xaa\x70\x28\x42\x59\x64\x5c\x92\x1c\x07\x28\xdb\x21\x1c\x11\x41\xd4\x80\x2a\x32\x4d\xe3\x92\x2c\x71\x07\x50\x26\x3a\x00\xe2\x5d\xfc\x7c\x80\xd5\x6f\x19\x33\x40\x01\x95\x90\xc2\x14\xcd\xc1\x1c\xcb\x8e\xa8\xaa\xa2\xc8\x04\x45\xc3\x92\xaa\x61\x88\x1d\x4d\xe0\x10\x6a\x48\xd4\x14\x07\x39\x22\x46\x8c\x60\x00\xa0\xa0\x21\xf2\x65\x97\x14\x3f\x5b\x2c\x7a\xb2\x58\x79\xb6\xb1\xa1\xd8\x87\xcc\x70\x32\xe5\xb8\xbc\x24\xd1\x72\x33\xab\x61\x28\x2d\x57\x93\x15\x21\x8b\x28\x23\xda\xe3\x64\x37\x2f\xd7\xbc\xd1\x78\xd8\x8d\x9b\xe3\x6c\x27\x9a\x88\xa6\x45\x52\x3d\xab\xd6\x1c\xcd\x4a\xb9\x62\x21\x9b\xd4\xc2\xe1\x28\xdc\xa0\xd2\x26\x9c\x2f\xc4\x54\x6f\x87\xf0\xce\x62\x4f\x8c\xc8\x92\xd2\x52\x47\x2b\x87\x2b\xdf\xee\xda\xb3\x90\x5a\x16\xcb\xba\x1e\x8e\xad\x55\x26\x55\x5d\xc2\xf1\x58\xc9\x66\x5b\x3e\x68\x74\xed\x6e\xdf\x8b\xb2\xfa\xaa\xe9\x74\x3d\x59\x9e\x6a\xc5\x61\x0e\x74\xa7\x13\x17\x97\x17\x63\x5e\x9c\x59\x25\x67\xa3\x95\xab\xc4\xd7\x8b\x7d\x7b\x67\xb1\xb5\x0b\x16\x5b\xf5\x2f\xf5\xfa\x7f\x81\xc5\x12\xca\x31\x90\x14\x2e\x50\x06\x05\x81\x41\xea\x10\x85\x23\x87\x68\x9a\xe4\x30\x51\xa2\x4c\xc6\xa2\x08\xa1\xe0\x20\x15\x53\x0a\x24\x08\x89\x28\x11\x95\xaa\x02\xd3\x64\x09\x6e\x2b\xae\x87\x58\x3d\x42\x48\x71\xb0\x4a\x55\x59\x52\xa1\x20\x51\x8d\x40\x41\xa5\xa2\x8a\x08\xc6\xb2\x23\x48\x0c\x30\x2a\x2a\x40\x46\x5c\xd0\x10\x86\x1a\xe0\x92\x23\x8a\x64\x6b\xf0\x08\x21\x71\xab\x87\x74\x62\xb1\xe2\x93\xc5\xa2\xb5\x66\xe6\xf1\x34\x04\x55\x59\xaf\x0e\x93\xa5\x4c\xdf\x92\xda\xa9\xa5\x33\x32\x37\x61\x6b\x5c\x6c\xf6\xbd\x49\x9f\xab\x15\xa1\xaa\xcf\x8b\xa9\x52\xde\xdb\xb8\xe3\xda\x72\xa2\x77\xd2\x70\x96\xd1\xb4\x1e\x4e\xb5\x49\x0d\x96\x1a\x93\x6e\x30\x5b\x59\x5a\x8e\x69\x66\x2a\xc8\x56\x76\x08\xef\x2c\xf6\x24\xd2\x49\x51\x1e\x8f\x53\xce\x50\xa9\xe4\x6c\x37\x1a\x8e\x26\x6b\x96\x0b\xd2\x2d\x90\xaa\x00\x77\x82\xb9\x13\x0b\x2d\x02\x32\x4b\x81\x92\xda\xc6\x4a\xcd\x2b\xc3\xc9\x60\xd8\x2d\x85\xc3\x51\x6e\xa0\xcc\xa7\x6a\x1b\x58\x2b\x23\x3d\x12\xfb\xb6\x3b\xb6\xbb\xa0\x21\xd7\xbc\xca\xa6\xbe\xe3\xdc\xba\x60\xb1\xf5\xca\x59\x87\xa7\xf5\xff\x0e\x8b\xa5\x08\x6b\x94\x21\x28\x3b\x32\xc0\x9a\x28\x42\x85\x51\xcc\x38\x91\x35\x8d\x21\x47\xe6\x94\x21\x0d\x49\x88\x52\x0d\x41\x4c\xa9\xc2\xa0\x22\x4a\xc2\xd6\x4c\xa0\x4c\x1c\xaa\x6e\xad\xed\x11\x56\x0f\x00\xd0\x98\xa3\x6a\x32\xa4\x88\x40\x49\x14\x11\x24\x2a\x10\x09\xe5\x90\x60\x55\x05\x90\x51\x41\xe2\x92\x46\x20\x56\x10\x86\x44\x75\xa0\x44\x35\x99\xef\xb3\x0d\x67\x9b\xe7\xca\x27\x16\x2b\x3d\x59\xac\xb2\x99\xa7\xbb\x22\x05\x43\xa3\xb1\x51\x9c\x51\x52\x9a\x38\x42\x7e\xd3\x99\xe5\x8a\x43\x55\x0c\x2a\xe9\x46\x19\x74\xc4\x75\x1a\x76\x5a\xe3\xd1\xb4\x32\x8f\x92\x30\xb7\x58\xa4\x73\x7d\x6c\x16\xe4\x88\x4c\x3b\xc1\xc4\xc6\xe9\x82\x35\x9b\x37\xb4\x7a\xd2\x69\x2b\x59\x21\x23\x90\xdc\x0e\xe1\x9d\xc5\x2e\x8f\x88\xc7\xb5\xa0\x5a\x0b\xe0\x7c\x62\xb5\x67\x99\xe2\x7c\x58\xc3\xbc\x56\x09\xb3\x5a\xa9\xd3\xb1\xf0\xaa\xd8\xb0\x8b\xcb\x4a\xb8\x5a\xb8\x93\x62\x4b\xcf\xb0\x64\xa9\x17\xda\xeb\x76\xae\x12\x0c\x72\x0b\xbb\x2c\xbb\xba\x49\xf3\xbc\x43\xca\x9e\xed\xc6\xd5\x12\x9d\xc4\xb9\xa8\xed\x6a\x3b\x8f\xe8\x5d\xb0\xd8\xc6\xf2\xac\xc3\x33\xfa\x7f\x87\xc5\x02\x86\x1c\x24\x22\x02\x1d\xcd\x01\x14\x73\xc2\x20\xd7\x88\x84\x99\x04\x11\x62\x48\x15\x19\xe3\x8e\x24\x2b\x1c\x08\x0e\xa3\x4c\xe5\x8a\x46\x10\x96\x1d\x20\x00\x2e\x88\x94\x6d\xad\xed\x11\x56\xaf\x00\x48\x55\x11\x02\x11\xa8\x82\x44\x44\x07\x51\x2a\x08\x22\xc6\x84\x40\x2e\x39\x58\x64\x9a\xcc\xb0\xa3\xc8\x48\xd0\x00\x11\x00\x15\x38\x95\x1c\x82\x91\x2c\x48\x80\x88\x0c\x7c\xf9\x9a\x50\x4e\x2c\x56\x7e\xb2\x58\xd5\x9e\x6d\x9a\xa5\x45\xaf\x3c\xc9\x95\xb4\x75\x24\xc3\x29\xab\x42\xc5\xb2\x9b\x2d\x75\xdd\x2c\x87\x2b\xad\x04\x5a\x0d\xba\x29\x05\xa9\x62\x9c\x5e\xfb\x35\x7b\x3e\x6d\xea\x95\x56\x73\x25\x0f\xd6\x60\x34\xf4\x0b\x05\x21\xef\x9a\x1e\x8a\x8b\xb5\x6c\x2e\x96\x60\x33\x14\x90\x23\xee\x10\xde\x5a\x6c\xfd\x04\x71\x15\x08\x42\x51\xa5\x4b\xee\xb6\x5b\x66\x7d\xac\x0c\x92\x45\xc4\x3b\x95\xd4\x38\xd9\x4a\x86\x0b\xd3\x5c\x94\x4c\xb5\xab\xe5\xac\xb4\x30\x5b\x9a\xc5\xda\xa0\x56\xcc\x93\xe6\x66\x93\xce\x97\xe7\x8a\x39\x49\x8e\x61\x07\x2e\xd8\xa8\x50\x0b\x6b\x4b\xbe\x72\xe2\x51\xaf\x9d\x01\x60\xc7\x99\x5e\xb0\xd8\x56\xeb\xac\xc3\xb3\xfa\x7f\x87\xc5\x12\xbe\xcd\x18\x11\x54\x00\x64\x5c\x02\xdb\xff\x09\x2a\x54\x29\x15\x1d\xd9\x81\x0e\x96\x04\x45\x51\x24\x80\x25\xe8\x38\x44\x71\x34\x15\x70\xc0\x39\xa1\x8a\x26\x70\x59\x63\xbb\x11\xfd\x11\x56\x2f\x3b\x70\xfb\x04\x21\x8e\x2a\x50\x51\x53\x34\x99\x42\x80\x30\x96\x24\x42\x11\x42\x80\xcb\xb2\x80\x30\x94\xa8\xc2\xa8\x28\x41\x04\xa0\x26\x10\xc6\x39\x47\x80\x69\x0a\x12\xbe\x7c\x4d\xa8\x27\x16\xab\x3c\x59\xec\x12\xa4\xac\x6c\xb8\xf0\x96\xeb\x78\xd1\x90\x49\x27\x6a\x8d\xaa\xcd\xa1\x85\x16\x6d\x4f\x4c\xa7\x28\xb4\x39\xb1\xa7\x6a\x3a\x3b\x6c\x5a\xf9\x24\x17\x52\x42\x79\xd5\xda\xd8\x8a\xe8\x19\x05\x34\x9e\x8f\x0b\xb3\xc6\x34\xcf\xf0\xb0\x5f\xf2\x86\x34\x97\x19\xcc\x8b\x62\x27\x9e\xa8\x3b\x84\x77\x16\x7b\x6a\x44\x76\xa5\xb0\xe6\x2b\x3b\xe6\x8b\xb1\x58\x2b\xf6\x32\xe5\x14\x87\x4d\xb3\xd6\xe5\x34\x97\x6a\xa6\xe3\x58\x60\x8b\x42\xcd\x9d\x34\x43\x6b\x51\x16\x02\xb2\x10\x8d\xdc\x44\x22\x19\x5d\x9e\xb5\x17\xd9\xca\x52\x9a\xe6\xe4\x12\x75\x68\xbd\x51\xc9\x8c\x09\x8a\xc5\x25\xec\xee\x4b\xbb\xc1\x05\x8b\x6d\x8b\x67\x1d\xfe\xdf\x62\xb1\xa2\x2a\x53\x02\x1d\x8e\x11\x16\x65\x55\x13\x18\x02\x02\x96\x19\x13\x18\x90\x1d\xa0\x31\x44\x1d\x2e\x0b\xa2\xc8\x1c\xa0\x49\xaa\xa0\xaa\x2a\x06\x8e\x24\x40\x4d\xe3\x14\x8b\x74\x37\x73\xf5\x08\xab\x27\x80\x2b\x10\x31\x51\x73\x90\xca\x38\x45\x90\x23\x8e\x1d\x95\x0a\x9c\x38\x4c\x82\xa2\x48\x08\x62\x92\x88\x1c\x01\x62\xc0\x55\x28\x88\x2a\x42\x0e\x15\x14\xa2\x48\xa2\xa4\x6d\x79\x68\x27\x16\xab\x3e\x67\x05\x72\x3c\x1b\x95\x96\x26\x6f\xd9\x2d\xbb\x90\x19\x55\x9a\x50\x5d\xcc\xea\x0c\xb5\xca\xad\x45\x12\x25\x7b\xd9\x81\x12\xac\x24\x63\xea\xe1\xb1\xb6\xc9\xa8\x85\x6a\x49\xee\x6f\x0a\x55\x29\x59\x6d\xf9\x29\xc9\x33\xf0\xa4\xb4\xae\x66\xcb\xc2\xca\x2f\x28\xfe\x4c\x09\x36\x6a\xb6\x78\x12\x63\x4f\xf3\xd8\x58\x31\xec\xb4\x4a\xbb\xa0\xe3\xcd\xdd\x95\xb7\xd8\xd8\x93\x1e\xca\xa2\x1e\xf2\xab\x80\xf5\x36\x9b\x8e\xb6\xee\x78\x6d\x32\x05\x78\x29\xad\xab\x2d\x6f\x33\xe2\xcb\xb5\x98\x0c\xc4\x25\x5f\x68\xad\x71\x43\xa9\xb6\x6b\xa9\xdc\x3c\xeb\x17\x5b\x83\x4d\x2a\xcd\x51\x3b\x3f\xdd\xf1\x1f\x5f\xb0\xd8\x2e\x3d\xeb\xf0\x5d\xaa\xf2\x7f\xba\xc5\x5e\x59\xad\xb8\xb0\xf9\xf6\xb3\xcb\x14\x87\x0d\xb8\x8f\x58\x88\x7a\x2a\x92\xf4\x76\xbb\xf9\x34\xc8\x46\x64\xd0\xad\xcb\x39\xc5\xcf\x9a\xc0\xac\x25\x97\xbd\x46\x46\xdb\x74\x17\xdd\x76\x13\xad\xdc\xaa\xdb\x8b\x1b\x44\xc8\x2e\xa6\x35\x93\xab\x7f\x7f\x79\x7a\xbe\xd5\x6e\xb7\x0e\x06\xe4\x57\x97\xbd\x54\x9e\xa5\x68\x71\x6d\xb5\xe6\x29\x3e\x37\xec\xcd\x6c\xae\xf4\x98\xbf\xa4\x4e\x63\x33\x36\x9c\x7e\xbe\x5d\xcf\x94\x61\x6e\xbc\x7d\x5e\x38\xac\x70\xec\x94\x91\xbe\x26\xc0\x7f\xd4\xb3\xb5\x0f\x71\xbf\x00\xaa\x5d\x07\xf5\xf5\x06\xd6\x3b\xd6\x93\xae\xee\xb0\xbc\x8f\xe7\xf9\x26\xc9\x3b\xb8\x5d\xd9\xe2\x78\x07\xc7\x2b\x9b\x11\x3f\x6b\x99\x27\x1b\x12\x4f\x16\x21\x9b\xed\x42\xb3\xa0\xf4\x72\x85\x8a\x91\x86\x7a\xde\xec\x36\x8d\x7e\xbf\x99\x6f\xa4\xb3\x92\x24\x76\xbb\x69\x68\x74\x94\xba\x6c\x40\x31\x8b\x8c\x66\xad\xd3\x2c\x98\xdd\x5a\x59\xd9\x2f\x02\xec\xad\x2b\xc3\x9a\x4d\x2b\x95\xce\x56\x6c\xa3\x96\x09\x16\x95\x24\xdd\x98\xc5\x62\x52\x93\xb8\xc8\x8c\x28\x59\x1d\xc9\x62\xb5\x2f\x2d\xa3\xca\x5a\x39\x3a\xe1\x6e\xce\x7e\x57\x60\xec\x26\xf2\x73\xae\xb1\xe9\x67\x32\x6b\x31\x5d\xee\x9b\xcd\x82\xde\xf1\xda\x49\xbb\x54\x25\x3a\xcc\x68\xed\x7e\xae\xdc\x2e\x91\xa9\xb8\x4c\x39\xa3\x61\x71\xbd\xb3\xe4\x34\xd5\x5a\xcb\xd4\xee\xf9\xc1\xf3\x3f\xbb\x12\x3b\xb3\x7c\xfe\x9c\xd5\x75\x2d\x73\x12\xcc\xd2\x6b\x3c\xf5\x63\x27\x02\x76\xb3\x4a\x79\x58\x9a\x92\xf5\x7a\x90\xcc\x85\xb5\xa8\x3a\x8d\x33\xc9\x92\x11\x12\xd6\xcb\xb3\xde\xd4\xc9\xb7\x76\xec\x56\x0d\xd7\xf3\x7b\x6b\xa5\x14\xce\x19\xae\x32\x2c\xf5\xe5\x28\xad\x47\xcb\x9e\x96\xec\x94\xcb\x83\x88\xf0\x1c\x98\x16\x7b\x8a\xd0\x80\x45\x2b\x9b\xab\x69\x08\x17\x69\xd3\x5a\x13\x09\x65\x4c\x7b\x02\x17\xb6\x36\x4b\x8b\x71\x39\x08\xe4\x71\x77\xd0\x40\x7a\xaa\xd2\x4c\xd7\xfe\xfe\xfb\x7c\xd0\x7c\x70\xd7\xa0\xbb\xba\xa6\xf2\xb2\x6b\xb2\x7a\x73\xac\xcb\x48\x21\x0e\xab\xd0\xa4\x50\x11\xa2\x06\xb7\x4c\x3d\x99\xc4\xa3\x79\xd6\xa9\x16\xdb\xcb\x56\x72\xb8\x59\xb5\x42\xb5\xb7\x9f\x29\xdc\x76\xcd\xea\x3c\xe8\x7e\x1a\x7a\xea\x3a\x45\x6a\xcd\x06\x2c\xb3\xc8\xe4\xf3\x5d\xda\x69\x8d\xa2\x8e\x63\x35\xea\x6c\xdd\xc9\xb3\x56\xd0\x73\xe5\xa9\x3e\xe2\x36\x9b\xa4\xa4\x61\x91\xcd\x88\xa0\xcf\x4b\xf6\x12\xae\x26\xf5\x51\x2e\xdf\xd9\xa4\xac\x51\xa5\x9b\x09\x6d\xc5\x13\x79\xdb\xf6\x2a\x39\x90\xf6\xb3\xdf\x1f\x7a\xf1\x2e\xe8\x6b\x67\xd0\x67\xec\x46\x9e\x4b\x76\xcd\xc2\xb0\x3b\x08\x85\x85\x36\x10\xd2\xb6\x3d\x1e\xd7\x06\xbd\xaa\x02\xd7\x05\x7b\x53\xa5\x9e\xbd\x9e\xa7\x46\xda\xb3\x57\xac\xd2\xf7\x42\xcf\x5b\x41\xb0\x2c\x2e\xaa\xb5\x3c\x52\xe7\x2d\x50\x22\x99\x25\xb2\xe0\x20\x19\x5a\x1b\x54\x1c\x94\xca\x26\x37\x26\xf2\x62\x1d\x74\x60\xa1\x5b\x5f\x95\x78\xaf\x86\x8a\x49\x6d\x51\xb5\x0c\xa3\x5a\xce\x0f\xcc\xe9\xb0\x29\x1a\x85\x46\xad\xdc\x4e\x76\xc6\xfd\x74\x0d\x9a\x4c\x4f\xeb\xdf\x1d\x7a\xe9\x2e\xe8\x5b\xe7\x56\x3f\x52\xc7\x1d\xc7\x2e\xd7\xb3\x99\x56\xba\x67\xe0\xb1\x89\x84\x61\xa0\xb5\xa3\x6c\x6f\x52\x6e\xf3\xd8\xcb\x21\x50\x47\x60\x91\x0c\x2b\xcb\x67\xe8\x33\xf7\x42\x0f\x27\x83\x75\x1f\x1a\x25\xb7\x9e\x1d\xe1\xd8\xcb\xf7\x67\xd6\x68\x94\x6e\xad\x2b\xed\x94\x82\xa5\xb1\xcb\xa7\x15\xd1\x14\xd2\x7d\x58\x16\x14\x63\x3c\x5e\xa8\xf5\xb8\x10\xe5\x2c\x17\xc9\x3e\xe3\xee\xba\x83\x17\x3d\x23\x53\x2f\xa7\xf2\xd9\xf6\xca\xaa\x86\xa2\x3c\x34\x3a\xe9\xc1\x77\x87\x5e\xbe\x0b\xfa\xde\x2b\xab\xef\xad\xe7\x48\xe9\x35\x92\x6a\xae\x3e\xf1\x43\x32\x17\xe2\x3e\x8e\x5b\x92\x9f\xeb\xc1\xe9\xcc\x14\x0c\x49\xeb\x86\xd5\x96\xd9\x0e\x6b\xcf\xd0\x67\xef\x85\x7e\xcc\x4b\x29\x6a\x8f\xea\xad\x1c\xe8\x57\x58\x57\xed\x17\x37\xd8\x4e\xa6\xd7\xad\xa5\xe4\x38\x42\xce\x2c\x0d\xfb\x2e\xa6\xc6\x08\x66\x9b\xf9\x8e\xb5\x89\xeb\x72\x6f\x60\x2e\xb2\xc5\x78\x11\x55\x9c\x31\x25\xdd\xd0\x48\xc5\x6e\x0f\x7a\x4d\x16\x29\x9b\x01\x5f\x8d\x3a\xbd\xec\xf2\xbb\x43\xaf\xdc\x05\x3d\x3d\x87\x5e\x34\xca\x43\x5d\x9e\xf7\x2b\xfa\xaa\x5e\x35\xe6\x59\xb3\x61\x17\x06\x3d\xd2\xea\x14\x63\x87\x8b\x43\xbd\xd2\x50\xbc\xbe\xbd\xe9\xb2\xd4\x31\xe0\xe4\xee\x85\xbe\x60\xa9\xe1\x6c\xd2\x6e\x8b\xf3\xe5\x64\xde\xb5\x95\xae\x18\x0b\x65\xa3\x8d\x9c\x6e\xa7\xae\xf7\xac\xba\xed\xe4\x66\xc2\x7a\x25\x2b\x41\x53\x0b\x82\x81\x1b\x6b\x2d\x27\x68\x56\x36\x0d\x67\xa8\x75\x32\x93\xfe\xa0\x59\x1a\x19\xa3\xa2\x63\x0c\xad\xce\x6c\x08\x17\xe6\x86\xac\x33\xdf\xdf\xea\xd5\xbb\xa0\x1f\x9c\x07\x9c\xae\xa4\x74\x53\xad\x20\xdf\xac\x28\x0b\x97\x52\xc2\x1b\xf3\x1a\x9e\x94\x7b\x6d\x61\x9e\x1f\x4f\x79\x7e\x59\x2b\x67\xfc\xd1\x10\xcb\xdd\x63\xc0\xc9\xdf\x0b\xfd\x0a\x95\x3a\x8b\xf6\x8a\x94\x5a\x45\x3a\xcb\x17\xa6\x5e\x4f\x4d\x57\xac\xa6\xcb\xf2\x8d\x6e\x3d\x4c\x05\x9d\x62\x6c\xca\xcb\x38\xde\x88\xc0\x53\x6a\x75\xd5\x1a\x56\x5a\x95\xb2\xca\xbc\xe4\xac\xb4\x8c\x55\x96\x2a\x4e\x9b\x69\xaf\x3f\xf2\x35\xd1\xe7\xa4\xba\xb6\xe5\x34\xcc\x7c\xff\x58\xaf\xdd\x05\xfd\xf8\xdc\xea\x9b\xa8\x62\x2e\x7b\xaa\x37\xf3\x2a\xbd\x94\x27\x6a\x8d\x51\x6a\x4a\xd9\xbc\xe5\x15\xd5\x4a\xd0\x2d\x0c\x48\xec\x7a\xf1\xa2\xba\xac\x8c\x8e\x56\x5f\xb8\x17\xfa\x70\x3c\x58\xb6\x71\x7f\x23\xcd\x79\x6b\x25\x9b\xfd\x85\x26\x37\x73\xb9\x6c\x6d\xca\xa6\x76\x75\x45\x19\x35\xa7\xd9\x70\x6e\xd0\xe4\x20\x94\xc0\xa8\x97\x9f\x00\xb7\x0b\x9b\x38\x6b\x6d\x8a\x8b\x40\xdc\x88\xa9\xae\xa6\x94\xb0\x2f\x0b\xed\x95\x57\x6d\xf6\x46\xe9\xa5\x97\x7d\x1a\x66\xaf\x57\x12\x97\xce\xf4\xdc\x50\x49\x3c\x9d\xeb\x39\x2e\xfb\x68\x58\xc3\x2a\xd1\x1c\xa2\x88\x48\x43\x88\x51\x05\x53\x28\xcb\xb2\x03\x29\x06\x8e\x2a\x12\x01\x10\xd1\xd1\x30\x07\x84\x43\x51\x21\x02\x51\x24\x55\x90\x98\x0c\x65\x85\x4a\xd2\x69\xa7\xee\xa2\xfa\x3e\xaa\x94\x04\xab\x82\xd4\x5c\x65\x29\x32\xbd\x1c\xab\x1b\x69\x53\x09\x07\x23\x05\x4d\x94\x21\xe0\x48\x4c\x56\x83\x81\xe6\x79\x19\xb0\x31\x77\x5b\x81\xde\x2a\xee\xaf\x9c\xe1\xf9\x74\xe3\x8f\xe7\x78\x9e\x1b\x3f\xc1\x61\x14\xd2\xd9\xee\xb4\xf1\xa5\xeb\xb9\x12\xdf\x57\xe1\x5e\x0b\x6c\x9c\x65\x6d\x93\x1d\x02\x7d\xae\x6c\xaa\x5e\x65\xbd\xce\x54\x17\xbc\x1b\x1b\x75\xc5\x19\x89\xc9\x38\x0b\x39\x35\x9b\x95\xf8\xb8\x24\xb3\x9f\x67\x51\xa7\xb3\xb9\xe9\x29\x2c\x3b\xde\x20\x23\xc8\xf4\xa7\x6a\xe8\x57\x87\x66\xce\x6c\x4a\xc1\x60\xe1\xd6\x95\x02\xea\xa4\xbb\x9d\x92\xb7\x6a\x3f\x23\x59\x72\xef\x34\xef\x5b\x7a\x22\xad\xa7\x67\x75\x55\x60\xe3\xcd\xb8\xd2\xed\x6c\x6a\xab\x3c\x58\x06\x6d\xd2\xe5\x69\x69\x83\x2b\x95\x55\xd8\x14\xed\x6c\x7b\xec\x1a\x65\xe8\x35\x16\x0b\x8b\x68\x05\xd5\x4f\xe2\x7a\xaa\x93\x1c\x2c\x60\x4a\x9d\xce\x69\xbc\x91\x5d\xb3\xd3\x9e\xa9\x46\xa0\xcd\x8a\x0a\xc5\xcd\xd1\x1e\xbf\xad\x3e\x99\xde\x9d\xd3\x68\xe9\x5b\xc2\x47\x4d\xc7\x2d\xc7\xea\xf6\x2b\x52\x36\x2f\x84\xa0\x52\x67\x95\xb2\xd0\x11\xd0\x80\x53\x38\xca\x56\x48\x2e\x99\x1d\x08\xfd\xe2\x70\x1d\xcd\x40\xa0\x6c\x3a\xa9\xfa\xa2\x5c\x9d\x8e\x1d\x61\xe1\x9b\xda\x22\x55\x9a\x7b\x66\x98\xf4\x5d\x61\x82\x9d\xb4\x9c\xc2\x0d\x1b\x15\xc6\xe2\xb2\xb8\x53\x27\x97\x9c\x19\xe3\x8a\x9a\xe9\x8c\xa3\x1a\x4e\x85\xed\xae\xed\xa6\xe1\xa8\x18\xb2\x61\x4a\x88\xb9\x94\x8d\x65\xbd\x50\xc2\x52\xb3\xec\xb9\xeb\x9e\xe3\x6b\xd3\xec\xa2\x38\x5d\x08\x79\xbb\x5e\xc8\x1b\x69\x3b\x36\xf4\x9e\xeb\xa6\x61\xb6\xef\x08\x93\x9c\xd0\x4f\xcd\x5a\xf3\x45\x7a\x79\xa7\xbd\x3d\xe3\xad\x2f\xef\x9d\xb6\xdc\x5d\xf4\x73\xf6\x5b\x33\x4b\xc5\x4a\xbe\x33\xa5\x49\xec\x4d\x2a\xdc\x85\x24\xc5\xc7\xab\xb4\x0e\x4a\x5e\x7f\x34\x52\xbb\x46\x37\x23\x0d\x94\xf2\x10\x55\x7d\xd3\x66\x45\x98\xee\xb3\xe2\x44\x1b\xc3\xc1\x86\xae\x4d\x39\x59\x49\x56\x85\x14\x5f\x77\xe6\x76\xd4\xa6\xfd\x0d\x80\xc5\x25\x2d\x2d\x97\xfb\xa4\x71\x31\x97\xa7\x15\x4f\x8f\xa4\x9a\x54\x13\x7b\xe5\xca\xaa\x4a\xd6\xc9\x49\x1e\xb1\xda\xac\x91\x52\x1c\x69\x60\x67\xe3\x85\x31\xe6\x98\xf9\x47\x7b\x29\xcb\x23\xee\xa2\xd1\xd4\x2f\xaa\xcd\xc2\x24\x9b\xe2\x03\x8a\x94\x6a\x37\x32\xca\xe5\x4d\xa7\xad\x2e\xdb\x6e\x3f\x8d\x33\xb1\x64\x4a\xbb\x2a\xba\xbf\x7b\xa8\x78\x74\x22\xfd\xcc\xfe\x76\xd7\x8e\x34\xdd\x6e\x5b\x07\x9a\x8c\x10\xa7\x99\xe1\x37\xe3\x41\x65\x51\x8b\xb2\x4a\x7a\x58\x34\x91\xc5\x35\xd6\xae\x3a\x85\x62\xb2\xe4\x4a\xa5\x45\xcb\x4e\xf6\xf5\x48\xd9\xf5\x57\x3e\xdf\x6e\xef\xf7\x40\xea\xe5\xac\x5a\xa8\xae\xba\x0e\xca\x54\x46\xed\xf2\x02\xcd\x87\x9b\x50\xc6\x31\x34\x70\xc5\x30\x40\x28\xa5\x11\xac\xb7\x5b\x35\x37\x1a\xee\xfb\x37\xef\xeb\xab\xde\xd1\xc9\xd3\x17\xb7\x32\x16\x9e\xdb\x53\xb3\x70\x3c\x2c\x4c\xc7\x5a\x03\xf5\x94\xb1\x2c\xf5\x58\xbd\x0c\xeb\xba\x31\x32\x96\x3d\x37\x5a\x8c\x79\x5c\x49\x22\xaf\x9a\xa9\xa2\x61\x61\xd0\x1a\x6f\xb4\x4e\x01\x8b\xd5\x9e\x2a\x66\x78\x65\x30\x1d\x4f\x04\xc9\xa5\xe3\xe4\xa4\x97\x0f\xba\x0e\x11\x4d\x3f\x3b\x1f\xc6\xcd\xda\x09\xbe\x77\x46\xfe\xf7\xe2\xf4\xd3\x0b\x4e\xf7\x6f\x4f\xba\x1c\xa7\x05\xf0\x11\x4e\x1e\x8f\x96\x7e\x30\x9e\xe1\x30\x9c\x0d\x03\x1c\xf2\x0b\x9c\x9a\x3c\x8c\x12\x8d\x6c\x3e\x61\xed\x89\x13\x7f\x25\x1a\x7c\x16\xf1\x29\xe1\x41\x02\x02\x41\xfa\x88\x20\xc7\x0f\x28\x0f\xe9\xcc\xf7\x3c\xbe\x8a\x26\x38\xf6\xe8\xf0\x5c\xd0\xee\xed\x44\x1f\x61\xb6\x5f\x9c\x39\x9c\xfe\x09\x2f\xb7\xff\xff\xdd\x1d\x16\xfa\x3d\x72\xa7\xfc\xf7\x3f\x13\x60\x7f\x76\xe8\xf7\xc3\xeb\x5e\x7f\xff\x33\xb1\xbf\xbf\xfb\x72\x88\xc3\xdf\xff\xdc\xbf\x1b\x69\xf7\xe5\xff\x1e\x88\x1d\xce\x3f\x46\x38\xc5\xab\x68\x15\xba\x9b\x0f\x92\x07\x3c\xe4\xc1\xe2\x3d\xe2\xdf\xfe\xf7\x43\x50\xe0\x30\xda\xbd\xd4\x83\x1d\x0e\xd7\x5f\x80\xe2\xee\x45\xab\x0f\xe8\x71\xc8\x8a\x71\x40\x87\xee\xe2\x70\xf3\x4a\x97\x1c\xfb\x40\x38\x00\xb2\x83\x23\xf8\xfd\xcf\xc4\xef\x0b\x41\xf8\x8f\xf0\x1f\xf0\xfb\xe1\x06\x8d\x83\x80\x7b\x91\xb9\x6b\xda\xef\x7f\x26\xb4\x97\xdf\xa7\x77\xef\x5d\xd9\xe2\xf6\x7f\x3f\x03\x79\x84\xf4\x99\x72\xcb\x58\x55\x54\x26\x73\x47\x72\x38\x47\x50\x11\x19\x47\x8c\x41\xa6\x2a\xaa\xac\x70\x51\xc2\xb2\x24\x30\x24\x49\x58\x53\x1c\xc7\x71\xa8\xa6\x22\x26\x02\xa2\x61\x55\x94\x15\x8e\xb8\x40\x0f\x1a\x3d\xf3\xdd\x5a\xf1\x8b\x0e\x7c\xbe\xb3\x6b\xfb\xd6\xe4\x5e\xdc\xfa\xdf\xb3\xe7\x43\x0f\xcf\xb6\x7a\x21\x59\xe0\x1c\x49\x9a\x48\x15\x20\x53\x09\x70\xc2\x30\x93\x44\xc6\x28\x54\x18\xc7\x12\x53\x98\xec\x40\x20\x62\x0e\x15\x2c\xa8\x44\x81\x0e\x00\x8c\x69\x12\xe5\x2a\x60\x1a\xfe\xfd\xb7\x0b\x12\xae\x60\xe0\x08\x50\x25\x58\x92\xa8\xa4\x30\xc8\x28\x46\x40\xd6\x00\xe2\x1a\x95\x31\x94\x35\xa4\x42\x07\x10\xa6\xc8\x8e\x8c\xa8\xca\x45\x4d\x13\x14\x05\x70\x8c\xb7\x72\x15\x00\x31\xfa\x3c\x06\xc2\xd7\xd7\xf7\xfc\x38\x9a\xc5\xdb\xe7\x7e\xc7\xdc\x51\x35\xa8\x12\x89\x88\x10\x51\xe8\x10\x05\x0b\x40\x26\x08\x6c\xf5\xa4\x8a\xc6\x45\x51\x61\xaa\xa6\x08\x40\x86\xaa\x84\x20\x84\x9a\x04\x24\xc2\x88\xc8\x24\x81\x52\xf4\xfb\x87\x10\x06\x98\x38\x40\x84\x1a\x17\x20\x52\x01\x62\x1a\xe0\x14\x68\x18\x71\x24\xaa\x84\x43\x8c\x65\x51\x16\x31\x97\x45\xae\x31\x55\x84\xaa\x20\x09\x50\x95\x88\x88\xb0\x22\x00\x45\x63\x9f\x41\xf8\xde\x13\x38\x8f\x46\xf8\x71\x6d\x7f\x13\xe1\x7b\x5b\xfd\x4b\x23\xfc\x31\x3f\xfe\x07\x83\x7f\x30\xf8\x07\x83\x7f\x30\xf8\x07\x83\x7f\x30\xf8\x07\x83\xef\x86\xc1\xee\xaf\xff\x67\x5f\x1a\x5d\x99\x1f\xbd\xf2\x36\xa1\xcf\xce\x8f\x9e\xbc\x51\xe8\x11\x9b\xa0\x84\xaf\x89\x7b\x5e\x88\xf0\x25\xd7\xaa\x7f\xf9\x9a\xd0\x20\x44\x48\x81\x00\xc9\xaa\x24\x2a\x8a\xa4\x02\xe5\x6b\x02\x2a\x4f\x40\xed\xc4\x68\x5f\x13\xea\xd3\x47\x70\x56\xb9\x5d\x6c\xd4\xed\x07\xf8\xf7\x8d\xba\xf9\x1d\x0a\x5f\x5a\x8d\xec\xd5\x46\x3d\xbd\xa4\x42\x7b\x6a\xd4\x8f\x6a\xce\xf7\xe9\x23\x74\xde\x47\x1f\x6d\xce\xbd\x26\xf7\x7d\x7a\xe7\xa9\x35\xc2\x49\x73\x9e\x36\xd9\xbd\xe1\x9a\xd7\xde\x82\xf4\x69\xe7\x3c\x7d\x13\xd2\x33\x56\x10\x29\x22\xd7\x34\x24\x6a\x44\xe3\x8e\xc2\x08\xd6\xb0\xc4\x08\x42\x48\x23\x8a\xea\x30\xac\x3a\x48\x54\x14\x85\x08\xd8\x41\x88\x60\x51\x56\x31\x93\x28\x60\x8e\x26\xca\x4c\x64\x87\x23\xe1\x2f\x16\x39\x2a\x67\x93\x9c\x69\x37\x95\x06\x26\x28\x15\xd6\xd1\x70\x69\x09\x93\x1e\xc0\xeb\x99\x2f\x68\x96\xb1\x5a\x98\x99\xb5\x2d\x45\xe9\x1c\xcd\xb4\x17\xcb\xbc\xb6\x44\x83\x28\xb0\xbd\xfe\xa5\xb9\xc8\xf3\xeb\xea\xab\x05\x76\x93\x88\x99\x7b\xe4\xf7\x52\x49\x7a\xc6\xef\x83\xf2\x5f\x2f\x7f\x5e\xc6\x5d\x90\x45\x2c\x01\x59\xe4\x04\xcb\xa2\x03\x29\x23\x98\x11\x55\x92\x89\x83\x44\x51\x15\x55\xc9\xa1\x32\x94\xa1\xa8\x60\x86\x11\x67\x48\xa3\x8c\x39\xc0\x91\x35\x00\x05\x84\xc8\xe1\x28\x3e\x7c\x8d\xfb\x4f\x6a\xf7\xa3\x70\x57\x4f\x76\xc9\x17\xbf\x03\xee\x90\x21\x59\xa2\x88\x8a\x9a\x83\x14\x59\x52\x00\x73\xb6\xa8\x53\x2c\x43\x09\x20\x4d\x96\x1d\xc0\xa1\xac\x61\xe8\x48\x02\x65\x44\x56\x29\x87\x9c\x0b\x1a\x56\x1c\x15\x8a\x12\xde\xe3\x8e\x1e\x8e\xfb\xad\xed\x7e\x14\xee\xca\xc9\x79\x9a\xf3\xe3\x8b\x8f\xc0\x1d\x00\xc4\x35\x41\x00\x02\x13\x34\x8c\x09\x11\xa1\x26\x6a\x82\x26\x21\x15\xc8\xaa\x2a\x33\x45\x51\x28\x94\x01\x44\x88\xca\xa2\xa2\x3a\x02\xd5\xa0\x8a\x15\x8d\x53\x91\x43\x45\x44\x7b\xdc\xc5\x87\xe3\x7e\x6b\xbb\x1f\x85\xbb\xb4\x7c\x57\xd0\x5d\xb8\x13\x26\xaa\x32\x23\x8c\x01\xc8\x44\x19\xa8\x82\x22\x2b\x02\x15\xb1\x84\x15\xae\x31\x99\xab\xb2\x44\x31\xd4\x28\x11\x05\x2e\x43\xa6\x60\xec\x28\x00\x43\x87\x73\x89\x20\x99\xf1\xc3\x0b\x12\xde\xc4\x3d\x13\xfb\xc8\x8f\x44\x69\x9e\xa9\xe6\x56\xb3\x5a\x0a\xf9\x86\x95\xdc\x08\x4a\x7d\xed\x86\xc2\xc4\xa9\xe4\x7b\xd3\x5a\x67\x10\xc4\x8d\x64\x73\xff\x80\x32\x0d\x0f\x87\xba\x2f\xbe\x8d\xe0\x43\xb8\x67\xef\x93\x3f\xa5\xb7\xc9\xff\xb8\xbd\x63\xa2\x51\x87\x72\x48\x44\x2a\x0a\xa2\xc0\x54\x42\x64\x45\x56\x19\x03\x9a\x48\x18\x21\x02\x55\x44\x20\x2b\x02\x60\x84\x20\x82\x01\x77\x34\x55\x55\x80\x23\x23\xbc\x3f\x34\x8d\xde\x8b\xef\xba\xd2\x33\x55\x5d\x19\x4d\x06\xb9\x2a\x07\xac\xd5\x52\xda\x06\xcd\xd6\x56\x72\x2d\xb5\x9c\x18\x73\x8a\x5a\x59\x41\xc2\x25\x54\x74\x85\xda\xf7\xc0\xfd\x16\xf9\xdf\x1b\x77\xaa\x60\x82\x54\x11\x39\x00\x03\x2a\x3a\x5c\xd1\x18\xa7\x1c\x38\x8e\x40\x54\xa4\x09\x8e\x82\x35\x86\x44\x2a\x2a\x94\x30\x85\x23\x09\x20\xd1\x51\x08\x54\x1d\x19\x30\x87\xee\xde\xe7\x84\x2e\xc6\xf7\x9f\x64\x6f\x97\x70\xbf\x45\xfe\x58\xfc\xbe\xb8\x4b\xdc\xd1\x28\x90\x09\x91\xa1\xc4\x20\xc3\x10\xaa\x82\x20\xa8\x84\x39\x82\xc8\x54\xa0\x2a\xb2\x8a\x24\xc4\x39\x74\x28\x56\x44\x49\xa1\x2a\x97\x80\xec\x10\x4e\x34\xc1\xd9\xef\x01\x42\x17\xe3\xfb\x4f\xb2\xb7\x4b\xb8\xdf\x22\xff\x7b\xe3\x0e\x55\xa4\x60\x81\x20\xc6\x1d\x48\x21\x63\x0e\xa3\x8c\x4b\x44\x14\x89\x03\x54\xa6\x60\x41\xc0\x1a\x82\x2a\x53\xb7\x99\x8e\xa0\x38\x18\x12\x22\xcb\x4c\xa4\x2a\x82\x32\xa0\x87\x7d\xeb\xef\xc4\xf7\x4f\x6e\x4a\x78\x48\x9c\xc9\xdd\x27\xff\x7b\xc7\x19\x2e\x3a\x9c\x10\x19\x63\x22\x39\x2a\x62\x8e\x48\x05\x8d\x88\x8e\xe0\xc8\x44\x95\x55\xc5\x11\x04\x55\x95\x05\x59\xe3\x9a\x26\x73\x41\x26\x18\x2b\x12\xc6\x14\x32\x15\x03\x8d\xca\xea\x1e\xf7\xb7\xe3\x7b\x66\x90\x4a\x8f\xd4\x36\xd2\x96\xa3\xa2\xd0\x08\x34\xd9\xa7\x8a\x3d\x0d\xa2\x21\x1c\xa5\x57\x9a\x69\xd7\x98\x36\x6e\xb7\x73\xc5\xa0\xd7\x78\x5c\x7c\xcf\xdd\x27\xff\x7b\xe3\x8e\x24\x99\x73\x2a\x38\x40\x44\x90\x10\x42\x30\xd0\x18\xa3\x54\xd0\x90\xc2\xa1\x0a\x25\x84\x45\x46\x55\xae\xa9\x8e\xa3\x28\x4c\xe2\x1a\xe3\x5c\x10\x01\xe1\x12\xd0\x10\x56\xc0\xe1\x65\x24\x0f\x8f\x33\xa7\x7e\x5e\x3c\xeb\xc7\x0f\xe1\x9e\xbf\x4f\xfe\xa8\x75\x9b\xfc\x8f\xd7\x4d\x80\x41\x59\x64\x1a\x87\x1c\x21\x59\xa4\x50\x73\xb0\x23\x63\x99\x53\x55\x90\xb0\x8a\x55\x2e\x63\x51\xe6\x10\x01\x4d\x66\x58\x10\x05\xc1\x81\xda\xf6\x13\x53\x54\x45\x80\x87\x57\x6a\xbc\xc6\xfd\x81\xed\x7e\xd5\x8f\x1f\xc1\xbd\x70\x9f\xfc\xe1\xf2\x36\xf9\x1f\xc5\x5d\xc4\x00\x53\x41\xe6\x9a\xa8\x2a\x02\x10\x34\x4d\x96\x34\x02\xb8\x2a\x20\x81\x70\x20\x33\x47\xd2\x44\x91\x4b\x8a\x0a\x64\x0a\x18\x51\x20\x54\x25\x8d\x8b\x5c\xc5\x98\x09\x7c\x97\x47\x2a\x17\x71\x7f\x60\xbb\x4f\x76\x6e\x7d\x1c\x77\xe3\x3e\xf9\x83\xca\x6d\xf2\x3f\x3c\x3f\xe3\x60\x4d\xc6\x84\x73\x02\x38\x24\xca\xf6\xa3\xa8\x30\xd5\x51\x00\x40\x14\x4b\x0c\x71\x4d\x52\x90\x28\x4b\x90\x22\x95\x20\xa8\x62\x2e\x43\xac\x52\x81\x6c\x07\x63\x05\x1d\x5e\x6f\xf0\xf0\x3c\xf2\xee\x38\x53\xbc\x4f\xfe\xf7\x8e\x33\xd2\xb6\xdc\xe7\x88\x29\x54\x15\x1d\x46\x14\x8d\xc9\x0a\xd0\x34\x08\xa9\x04\xf1\x76\x10\x85\x0e\x93\x80\x0a\x18\x65\x22\x42\xd8\xd1\x80\x40\x99\xa0\xa8\x32\xc0\x9a\xa4\x10\x78\x38\xa4\xff\x1a\xf7\x07\xb6\xfb\x55\x1d\xf6\x11\xdc\x4b\xf7\xc9\x7f\x15\x67\x3e\x85\xfb\xd5\xb9\xe5\xc7\x4c\x2c\x7f\xef\x59\xe5\x82\xa9\x1a\xb5\x45\x6d\x4c\xca\xd0\xd0\x51\xa7\x3d\xaa\x07\xe5\xe9\xa8\x0b\x80\x53\x50\x43\xb3\xa8\x4c\x41\xae\xbe\x2c\x75\x52\x7a\x17\xe9\xcf\x5b\x6f\x77\xd7\xab\x03\x91\xe7\xf8\x04\x73\x4b\x36\xb9\x8d\x07\xa3\x55\x05\xb7\xaa\x9a\x9c\xde\x38\xa1\xc6\x01\xf5\x03\xab\xdf\xdd\xa4\x3b\xa5\x71\xde\x2f\x2b\xe3\xc5\x78\xdf\x01\xb6\x14\x94\x4f\xf9\xed\x67\x73\xb6\x9d\x9c\xc9\x6e\xe6\x8b\x71\x2d\x5d\xf3\x2d\xbd\xe4\x3a\xd5\x7a\x37\xeb\x9b\xc3\x45\xb4\xa6\x4d\x34\xc9\x57\x33\x35\x49\x18\x8c\x59\x98\x37\x70\xda\xea\x2c\x81\xd4\x48\xb5\x87\x1d\xd0\x1d\x8c\x03\x90\x49\x57\x73\xa2\x85\xf3\x6d\x58\x9e\xd2\x10\xf5\x97\xe6\xd4\x25\x62\xb3\x1e\x54\xcc\x2f\x5f\x13\x5f\x8a\xa8\x39\x1b\x83\xa9\xa4\x20\x28\x4f\x27\x70\x53\x91\x22\x31\x85\x47\x9b\x3a\xe2\xab\x60\x63\xcb\x75\x3f\x68\x2f\xad\x64\x8b\xb4\xc0\xb3\x62\x6f\x60\x70\xb4\x8c\x17\x87\x2e\x4e\x68\x97\x97\xac\xec\x47\xce\x6e\xe9\x0f\x98\x5d\xb3\x82\x75\xb3\x7e\x87\x7c\x5d\xff\x79\xb3\x5c\x97\xa2\xe5\xf7\x5e\x42\xb8\xdd\xc9\x5e\x1d\x7d\x3d\x6f\x8e\x0d\x33\x29\xdd\x16\xa5\x5e\x3a\x8b\x22\xa3\x9d\xb7\x85\x3a\xd2\x41\x85\x8f\xab\x6a\xa9\x2e\x7b\x96\xa0\x6b\xbc\xe3\xb2\x75\x31\xda\x47\xd9\xeb\x4e\xa6\x37\x72\x7d\xb7\x4f\x78\x7e\x99\x09\x83\x72\xda\x2b\x17\xe3\x30\x05\xa4\x76\x54\xca\xa6\x83\x81\x1f\xc6\x43\xb3\x96\x6a\xc9\xdd\xd6\x48\x8c\x96\x9d\xf5\x30\x54\x5a\x51\x43\xcc\x54\xf8\xca\xae\xc8\xa5\x39\x75\xe6\xa5\xb2\x00\x3a\x93\xf4\x78\xbc\xf4\xc4\x81\x5a\x2d\x3a\xa3\xe2\x6e\x21\x32\x3f\x9e\xd5\xfa\x76\x3c\x6d\xae\x37\x24\x20\xc3\xce\x62\x53\x2f\x36\xf2\xa9\x42\xc9\xad\xcb\x70\xa4\x34\x3d\x0b\x2d\xb5\x99\x3e\xac\x28\xbd\x67\xc5\x7e\xbe\x93\xdd\x6b\xe4\xf7\x3a\x59\x65\x69\x4e\x67\x0f\x74\xb2\x1f\x39\xa5\xf9\x21\x27\x7b\xf0\x7a\xd1\xed\x4e\xf6\xea\x90\xf3\x79\x73\x3e\xf1\x7a\x98\x1d\xfd\x75\x27\x4b\xa7\x4a\xe9\xc1\xa2\x58\x51\x84\x81\x99\x2e\x0e\x41\xd3\xb6\x34\x92\x9c\x24\xf5\x59\x7f\x23\x64\xca\xd9\x9a\xdb\xca\x37\xda\x20\xa4\xf5\x41\x9c\x16\xb0\xb7\xae\x2c\x2b\x75\x39\x94\x1a\x2e\xce\x2c\xa3\xac\xb7\x0a\xab\x1e\x16\x60\x9d\xb5\x6a\xe5\x89\x01\x31\xaf\xe8\x6b\xd5\xd8\x3a\x82\xd9\xec\xd3\x6a\xce\xdb\xb0\x09\xb5\x92\x79\xac\xce\xdc\x56\x76\x32\x55\xed\xd2\xcc\x5f\xb4\x28\x84\x23\xdb\x8c\x0b\xd8\x11\x4b\xf9\xe3\x4b\x5f\x7f\xbe\x93\xdd\x6b\xe4\xf7\x3a\x99\x09\xc6\x72\xfa\x91\x23\xd9\x0f\x9c\xcf\xfb\x90\x93\x3d\x78\x71\xf0\x76\x27\x7b\xe3\xa4\xd6\xfe\xfa\xc4\x3b\x94\x76\xf4\xd7\x9d\x2c\x5b\xb4\x63\xb6\x89\x47\x8e\x9f\x64\x8d\x62\xa9\xdb\x25\x40\x6e\x8c\xcc\xb4\x69\x76\xc3\xfc\xca\x6b\x7b\x75\xc1\x28\xf1\x91\x33\x87\xd6\x20\xdf\x8a\xcc\xb8\x0b\xcb\x41\xdb\x6a\xf4\xd7\xf5\x74\x21\xd9\x59\x38\xcc\xef\x2e\x33\x55\x3a\x53\xa5\x61\xb6\x4e\x32\xd3\x11\xa9\xf6\x2a\x3b\x47\xc8\xca\xf5\x5c\x1a\xf4\xe7\x41\x94\x99\x94\x0a\x6d\x5b\xc7\x45\xd2\x65\x6a\x69\x50\xdc\xac\xc6\xbc\x6a\x94\xca\x65\xce\x57\x76\x09\x1c\x17\x3c\x7f\xbe\x93\xdd\x6b\xe4\xf7\x3a\x59\x59\x4c\x5a\xfd\x47\x3a\xd9\x0f\x9c\xbc\xfd\x90\x93\x3d\x78\x25\xf8\xf6\xe3\x90\x6f\xd4\x64\xbb\xd4\xea\xb6\xe3\x90\x28\xf5\x7c\x1d\xf9\x9d\x1c\x5f\x4c\x6b\xe3\x69\xb9\x03\xe7\x68\xa1\xd4\x9c\xb5\x5a\xad\xf0\x71\x8e\x08\xcd\x66\x51\x72\x57\xf3\x71\x11\xa4\xfd\x41\x37\xb0\x23\x65\x60\x0b\x32\xac\x91\xf1\x10\xb2\x46\xb3\xe5\xf0\xac\xbf\xa0\xa0\xaa\x63\x67\x98\xed\xae\xa2\x61\x5b\x9f\x84\x66\x3c\x9a\xa4\xa7\xeb\x51\x5a\xef\xed\x9c\x62\xd1\x1a\x46\x0b\xa6\x47\xf5\xc1\xa0\xdb\x5f\xad\xca\x1d\x4f\x66\xc4\xef\x53\x7f\xb6\x06\xc6\xd4\xec\xce\x17\xb4\x6c\x26\x25\x62\x45\xc7\x4d\x1e\x97\x1d\xae\xf7\xb2\x53\x5f\x38\xdc\x0e\xbb\x7d\x16\x70\x62\xa6\x37\xd4\xaf\x25\xfb\xd8\x17\x57\xae\x2b\x3b\x7f\x5e\x38\xfc\x43\xe4\x5f\x09\x36\x1f\x91\xff\xe8\xd9\xb4\x9b\x76\x3b\x3d\x5a\xfe\x6b\x3c\xde\x0e\x38\xd9\x23\xcd\x67\xe5\xe7\xdb\x2d\x70\x60\x6e\x76\xc4\x3c\xe0\x43\x5b\xd6\xd7\x5a\x06\x54\xc3\x42\x6e\xb0\xa0\x82\x22\x08\x2d\x4d\xed\x8d\xc4\xa9\x39\x9e\x6a\x35\x45\x1a\x67\xd0\xe2\xa4\x2b\x8e\x0e\xb7\x3c\xc7\xef\x83\xa3\xfe\x43\xb7\x48\x64\xa3\xc1\x62\x99\x8d\xed\x8e\x5e\xd3\x94\xba\x50\x6f\x46\x2d\xb6\xb4\xb2\xc6\x2c\x9b\xca\xb4\xf8\x6c\xc3\x6a\xd5\xee\xc4\xf7\xa8\x6b\xee\xce\x97\xff\x9c\x80\x94\x7b\x96\x5f\x33\xdb\xa4\x58\x4c\x0a\x26\x77\x37\xab\x81\x47\xb3\x45\x51\x2e\x1b\xeb\x74\x04\xa4\xa4\xa1\x41\x4f\x48\xd6\x11\x54\x4b\xda\xc6\x9a\x40\x67\x50\xea\x40\xcf\xf1\x50\xbe\xe8\x9b\xed\x10\xce\xdb\x42\x32\xd3\x9a\x05\x02\x14\x30\x4c\x2b\x7a\x45\x36\xca\x81\xe8\xa5\x4d\xbd\xb8\x0f\x1a\xe5\x98\xa6\xec\x32\x58\xa5\x6b\xcc\x55\xfa\x70\xc4\x32\xad\x05\x09\x29\xd3\xfb\xab\x2c\x09\x15\xdf\x4e\xf6\x86\x4b\x6d\xe4\x83\xa3\x75\x3d\x2a\x20\xdd\x50\xeb\x3f\x34\x20\xdd\x29\xff\x81\x01\xe9\xde\x65\xc4\x7b\x03\xd2\x43\xe4\xdf\x11\x90\x3e\x2b\xff\xa7\x07\xa4\x07\xef\x1d\xba\x3d\x43\x7a\x63\x42\x6d\x1f\x90\xea\xed\x76\x63\xff\xf9\x13\x69\x66\xf7\x9d\x0c\xc9\x4e\x79\x64\xe2\xab\x65\x7d\xdc\xb0\x1d\x95\xd6\x6c\xdb\xd5\xb3\xdd\xf5\x60\x95\x2c\x82\x3e\xeb\xf8\x15\xb1\x3d\x10\x73\xe5\xaa\xae\x97\xba\x79\x2f\xa2\xd1\x28\x53\x5c\xd7\x44\x1a\xb6\x5b\xeb\x25\xee\x20\x58\x4a\x8d\x79\xad\xd3\x23\x6b\x09\xa6\x47\xae\x35\x6c\xa6\xe4\x9a\xbf\x0b\x1a\x2b\x39\x14\x6b\xa9\xcc\x60\x55\x95\x24\x14\x8a\x55\x95\x0c\x24\x03\xcd\x99\x55\xa9\xab\x8b\xae\x28\xe8\x40\x53\xca\xd5\x9e\x75\xa2\xd8\xaf\x92\x21\x9d\xbc\x9c\xe2\xf4\xfa\x61\x19\x52\xe6\x76\xf9\xdf\x65\xbd\xf1\x13\x01\xe1\xd1\x19\xd2\x67\xd7\x3b\x75\xfd\xce\x0c\xe9\xf6\x97\xab\xec\xbb\xe2\xde\x80\xf4\xe0\x4d\x75\xb7\x67\x48\x3f\x30\x20\x9d\x64\x48\x76\x01\x16\xca\xb6\xe6\xca\x80\x56\x84\x5a\x19\xb6\x2c\x61\xd0\x0d\x8c\x5c\x6f\xc4\x0b\x66\xbd\x9b\x6f\x4a\xa1\x97\x99\xdb\x52\xde\xab\xe6\x9a\x12\x6d\x87\x08\x58\x4a\xb1\x59\xed\xcb\x86\x21\xd7\x68\x1f\x16\x59\x46\xa0\xb2\xbc\x34\x07\x5c\x2c\xac\xeb\xea\x62\xb6\x5c\x82\x5d\xd0\xe8\x2e\x3c\x1d\x2b\xbd\x09\x88\x7c\x77\x50\x2f\x98\x68\xd5\xec\x65\x0c\x9c\x6d\x25\xc5\xc5\xda\xab\x77\xd7\x76\xab\x90\x4c\xc6\xf5\x54\xfe\xb8\x3f\xfc\x17\xc9\x90\xee\x0e\x48\x77\xca\x7f\x60\x40\x7a\xc8\x46\xab\x3b\x02\xd2\x8f\xde\xe8\xa5\xeb\x77\x66\x48\x3f\x3b\x20\x3d\x78\xb7\xe9\x27\xd3\xbc\x8f\x95\x6c\xfb\x46\x7d\x72\xd4\xbd\xe3\x15\x5c\xb9\x79\xda\x39\x2d\x5d\x8c\x67\x7d\x6b\x05\xbf\x56\x75\xb3\xc0\xc8\xa5\x5d\xd0\x52\xa9\x61\xc9\xde\xa4\x8f\xfc\x71\xce\x61\xd3\x48\x99\xce\xc7\x35\xa3\x18\xc1\x28\xae\x9b\xb8\xdf\x17\xdc\x4a\x6d\xd9\x12\x51\x45\x5d\x24\xbd\xa8\x54\x0b\x75\x91\x42\x9e\xee\x46\x5a\xa1\x37\x4b\x41\x2d\x6f\x8e\x3c\x82\x85\xda\x72\x17\x64\xca\x59\x3e\x0c\x35\x65\xbd\x8c\x90\xb6\xe1\x1d\xd0\x48\x65\x2c\x3e\x2c\xcc\x2a\x3e\x34\xb2\x4e\x3e\xe5\x07\xd1\x54\xa8\x0c\xd6\xbd\xca\xf1\x40\xd7\xe5\x00\x76\x66\x04\x57\x02\xd8\x29\xe0\x37\xac\x34\x95\xbc\x63\xdf\x5d\xb9\xde\x0e\x20\x0f\x94\x7f\x5b\x89\x77\xbe\x83\xeb\x97\x9b\x73\xd1\x5f\xee\xe0\xfd\xd1\xfa\x35\xfc\xbc\x3a\xb8\x3b\xc0\x3c\x78\x5b\xf5\x27\xa3\xe4\x67\x02\xcc\x27\x47\xd1\xdb\x32\xa4\xfd\xce\xc6\xb3\x00\x93\x7b\xd6\xb7\x96\x1b\x0c\x3b\x69\x33\x3b\x9a\x5a\xe6\x86\x56\xf3\xb2\xaf\xb2\x79\xcb\xa8\x84\x45\xd0\x19\x46\x51\x27\x97\xd2\xd3\x8d\x32\x36\x2c\x9a\x04\xf9\x79\x98\x4f\xc6\xae\x94\xe4\xe3\xb5\x98\xab\xaf\x63\x4d\xeb\xd5\xe5\x74\xce\xc8\x8b\xd6\x64\xb0\x36\xbd\xc6\x1c\x6d\x68\x36\xd0\x0e\x25\x9b\x54\x95\x15\x3c\x8f\x9c\x81\xa6\x55\x0a\xa4\xb9\xd2\x82\xa1\xef\xac\x0a\xc5\x5e\xc7\x9b\x4d\xe2\x7c\x30\xf7\x3a\xf3\x65\xe4\x0e\xbc\xc2\x71\xb9\xe2\x51\x01\xe6\x86\x55\xb6\x87\x06\x98\x3b\xe5\x3f\x30\xc0\xfc\x4a\x19\x82\x7e\x21\xc0\xfc\x40\xfd\x1e\x13\x60\x1e\x7c\x7e\xe0\xf6\x92\xea\x8d\xfd\x1c\xfb\xf3\xf3\xb7\x07\x8c\x3b\xa2\x78\x36\xc6\xfb\x1f\x59\xb9\x56\x66\xe8\x2f\x4b\xb4\x9c\xac\x8f\xd7\xd9\x64\x17\x55\x7d\x2a\xe7\x47\xf2\x04\x4f\x7a\x8b\xa9\xd1\x62\xc4\x42\x6a\x1c\x7b\x72\xa6\x2b\x55\x68\x8a\x96\xbc\x7c\x0d\xcf\xa3\x7e\xda\xf6\xf5\x65\xd6\x8b\x9a\x13\xa1\xb0\x49\x39\x4a\xc0\xeb\xb3\x2e\x2d\xf5\x1a\xac\x96\x9c\xe5\x68\xcb\x4b\xc1\xaa\x9e\xdb\x05\x09\xab\xa3\x84\xc6\xb2\xb6\x8e\x95\x78\xe0\xb1\x4d\x7a\x8c\x44\xbf\xd4\x9c\x59\x6b\x7b\x5a\xd5\x90\xc0\x3d\x2e\xb6\x32\xc9\x56\xc9\xf6\x8e\x9a\x5d\x0e\x40\x97\xcf\xf5\x7e\xb6\x0c\x3a\xe2\x71\x87\x11\xdf\xf6\x73\x4e\x7b\x7d\x9f\xb7\x12\x5c\x9e\x98\xbd\x1a\x58\x6b\xb7\xb7\xd9\x96\x82\xa2\xa0\xeb\x2f\x4b\x9d\x57\xcb\xd3\xef\x04\xd6\xc7\xc9\xcf\xde\x24\xff\x58\x7a\x3e\xfa\xac\xc9\x4f\x3c\xe3\xf3\xd9\xb3\x2e\xfa\x51\xfe\xe0\x26\xf9\xcf\x78\xde\xf8\x93\x62\xfa\x7d\x55\xd6\xc9\xf6\x88\x2b\xc7\x3d\x5e\x26\x12\x3f\x27\x49\x73\x9e\x47\x27\x55\xbf\x96\x44\xa6\x6b\xbf\x98\x7e\xe7\x36\x73\x1c\x03\x4e\xae\xbb\x13\x93\x1f\x51\xf9\xe4\x7f\x31\xfd\xd2\xd7\xf1\xdc\x5f\x1f\xda\x80\xfa\xd0\x83\x77\xb7\x27\x2c\xef\xbe\xc5\xfc\xe7\x24\x2c\x9d\xb1\x5c\x4b\x67\xec\x51\xd7\x38\xdb\x6b\x77\x6a\x1c\xc7\x84\xc5\xf0\x1d\x8e\xc8\x80\x8d\xd2\x4c\x96\x45\x3d\x98\x07\x65\x73\x04\x53\x2e\x16\x89\x89\xa5\xde\xa4\x90\xaa\x98\x4a\x9c\x6f\xd7\xcb\x9d\x26\x99\x81\xc9\x8c\x94\x33\x18\x90\x7c\x5d\xd2\x73\x5e\xc6\xc8\xa6\xf2\xeb\xd2\xa0\x3c\x44\x4d\xd7\xb2\x67\x65\x64\xe4\xeb\xe3\x4e\x6d\xff\x7b\x8b\x66\x0d\x4c\x9b\x7d\x31\xae\xc0\x66\xc6\x93\xb5\x39\x09\x07\xed\x79\x79\x34\x9d\x15\xc4\x51\xa6\x03\x87\xb5\xba\x3a\x2b\x2e\xbd\xee\x70\x75\xac\x52\xbe\x7b\xc2\xb2\x33\xbc\x9f\x93\xb0\x58\xb8\x39\xd3\x73\x45\xd9\xa2\xf4\x18\xbc\x4f\x8d\xff\x5a\xc2\x32\xb8\xbd\xcd\xdb\x84\x61\x3f\xe1\x75\x79\xee\xfd\x60\x0f\x97\xcd\xe4\x29\x40\x3f\x4e\x7e\xee\x26\xf9\xc7\x84\xe5\xd1\x87\x34\x7f\xe2\xe1\xd8\xcf\x1e\x12\xd5\x8f\xf2\x6f\x4f\x58\xf6\x09\xda\xcf\x49\x58\x84\x99\x3d\xd6\x6b\x23\x5f\x58\x6d\xae\x0e\x00\x27\x09\xfa\x6d\x09\xc1\x1d\xfa\xed\xae\x37\x12\x82\x8b\x3e\x7b\xbc\x4e\xec\xe3\x57\x1b\x70\x0d\x2a\x8c\xf7\xed\xb9\x7c\xed\xe8\x5b\x47\xfa\x4f\x8e\x89\xf7\xd8\x53\x79\xd0\x5d\xeb\xc6\xd9\xba\xe7\x65\xd7\xb8\x84\xff\x49\x7c\xfa\x85\x13\x48\x23\x97\x0a\x5f\xc4\xbc\x0f\xcd\xcc\x3c\xf8\xa4\xfb\xed\x89\xce\xab\x1f\x56\x3b\xfe\xf9\x13\x13\x9d\x4c\x72\x71\x28\x37\xf3\xfa\xd5\x39\xc6\x93\x44\xa7\x90\x59\x98\x75\x7b\x69\x44\x85\x82\x3c\x85\xd5\x12\x28\x16\x37\x28\x5f\x37\x0a\xad\x95\xd6\x59\x0f\xe6\x56\xd7\xb0\x6a\x55\x0b\xf4\x66\xe3\x94\xdf\xb4\x26\x51\x49\x8f\xab\x9e\x04\xf3\xfd\xa4\xad\x38\xc5\xe9\x82\x1a\x8e\x53\x31\xdb\x95\x4c\x3e\x6b\x65\x9b\x83\xbc\x17\x14\x6b\x95\x5d\xa2\xd3\x18\x04\x69\x32\x6b\x18\xb9\xf4\xb4\xd3\x21\x59\x3f\xb7\x5a\x0c\x88\xd3\x69\x35\x27\x5c\x27\x3a\x44\x45\xbf\xc3\x9b\xfe\x3c\x32\xc6\xef\x6d\x2f\x7c\x60\xa2\x73\xef\xcf\xe1\xdc\x91\xe8\x98\xb2\x7a\x10\xba\xd3\xe7\x95\x27\x5f\x4d\x74\xee\x5b\x94\x2f\xee\x47\xf3\x63\xa2\xf1\xda\x26\x3e\x92\x68\x3c\x44\xfe\x15\x9b\xfc\x70\xa2\xf3\xc0\xb7\x41\x5c\xee\x83\x1f\xf7\x36\x8a\x96\x7e\x21\x86\xbc\x97\xe8\x18\xc7\xe7\x6f\x4a\x74\xf6\x5d\xf1\x73\x12\x9d\x45\xb0\x3c\x4c\x19\xd7\xae\x36\xf4\x24\xb1\xff\xd5\x12\x9d\x34\xb2\xfa\xd5\xd4\xf5\xae\x39\xb1\x8f\x5f\x2d\xd1\x31\xcd\x5a\xd7\x79\x2f\xd1\xe9\xe9\xbf\x76\xa2\x93\x5e\x55\x95\x57\xe3\xc0\x49\x7c\xf8\x85\x13\x9d\x72\x36\x8e\x3f\x9f\xe8\x3c\xf8\xd5\x32\xb7\x6f\x33\x7e\x77\x09\xea\xe7\xfc\x2e\xdd\xc4\x37\xf7\xa0\x96\xf6\xdd\x77\xc9\xb2\x4f\xb6\x2d\x17\xc5\x8d\x6a\xb0\x55\xa6\x42\xb5\xde\x48\x29\x56\x7a\x22\x4a\x0e\xbc\xba\x29\x4d\x79\xab\xd2\x2c\xd8\x52\xc5\x9a\x1b\x61\x52\x70\xb0\xdf\xc9\x78\x99\x24\x28\x66\x50\x50\xef\x8e\x7a\x1d\xdf\x1d\x54\xbd\x5c\x3a\x3b\x6d\x5a\xab\x5a\xcf\xb1\x74\xcd\xac\xd5\x2c\x43\x6a\xb3\xe5\x7e\x93\x8d\x21\x4f\xe6\x8b\x38\xb4\xcb\xc8\x90\x27\x06\xac\x52\xbd\x5a\xee\x36\xe5\x36\x05\xfd\xc6\x52\x0c\xd7\x6e\xdc\x9b\x8f\xd2\x21\xa9\x76\xdf\x3b\x49\x79\x62\xe0\x3f\x6f\xfa\xfd\x64\x09\xe9\x21\x4b\x84\x3f\x67\x46\x2a\x1d\x34\xd8\xfe\xd1\x7b\x06\x05\xb6\xe9\xb4\x4e\xfb\xe3\xea\xbe\x84\xd3\x35\x9e\x1b\xb6\x72\xdf\xbd\x7c\xf6\x40\xf9\x37\x2d\x9f\x65\x8f\xcb\x4d\xf7\x0c\x82\xdf\x63\xdd\xff\xf5\xe9\x6c\x5b\x56\x5e\xb5\xa2\xf6\xdc\xe8\x1f\x3d\x08\xea\x40\x72\x37\x83\x8f\x0d\x82\xbb\xf1\xf2\xec\x3a\x99\x1d\xda\x63\xf7\xff\xb7\x25\x6b\x5d\x7f\xb1\x93\xf6\x8e\xf6\xdd\xbd\x31\xef\x47\xec\x9b\x29\xfe\x14\xfd\xf2\xfd\xa8\xea\xbf\xa7\xdf\xdd\xfe\xfd\xf3\x92\xdc\x7b\xfd\xfb\x0e\xfb\xee\x74\x06\x8b\xb7\x92\xdc\x4e\x67\x10\xa7\x5e\xcd\xfe\x3f\x5f\x27\x45\xd1\x1d\xf6\xff\x0b\xaf\xf0\x9c\xf5\x4f\xe6\xa6\xfe\xd1\x9f\xf1\xf9\x39\x13\x6f\xbb\x7c\xe3\xbd\x15\xc6\xef\x32\x89\xf1\x89\x49\x84\xf3\x57\x3b\xfe\xe8\x49\x94\x8a\xfe\xeb\x6e\x3c\xd6\x1f\x11\x7f\x6f\xd7\xcf\xca\xb0\xe5\xf2\xfe\xa3\x60\x8f\x7d\x2f\xe6\x3f\x3f\x66\xfe\x3d\x7e\xcc\x7c\x57\x44\xb4\xed\x52\xb2\xda\x5d\xe7\x52\x84\xb3\xfe\x72\xda\x30\xf3\xe5\xf9\x4c\x30\xb5\x56\x31\x4b\x85\xac\xbd\x18\xa7\x37\x75\x5e\xc8\xcc\xda\xca\x27\xb6\x01\xfc\xbc\x25\x50\xbd\x10\x95\xc0\xe0\x5e\x27\xd8\x4d\xb2\x1d\x92\xc2\x9f\x31\xbb\x7f\x5c\x3d\x79\xcc\x1e\xb4\xfd\xf3\x9f\x3d\xcf\xfa\x6c\x9f\x77\x0c\xba\x77\x0c\x82\xba\x3e\x2d\xe1\x13\x23\x6b\x9d\x3b\xc8\xf5\x22\xf8\xc4\x9f\x6e\x29\x42\x5f\x6d\xc9\xf8\x6c\x11\xfc\x38\xf9\xb9\x5b\xe4\xe7\x8e\x45\x46\xf1\xe4\xf9\xff\xd3\xb6\x24\x60\xe0\x35\xde\xdb\x92\x90\x79\x48\xfb\x96\xfa\xcb\xa4\xfc\x93\xe3\xcd\x3d\xf1\xaa\x3d\x05\xc9\xcb\x45\xf7\xc9\xb5\xfb\x3c\xfe\x29\xfa\xed\x26\x29\xde\x9a\x14\x30\xfc\xc6\x35\x63\x7c\x39\x29\xf5\xa3\x57\x52\xd2\x7d\x9c\x51\xdf\x5a\x49\xe9\xe3\x8c\xf2\xde\x4a\x4a\xe9\xa4\x3f\x7e\x5c\x91\xb9\x2b\x22\xb5\x0f\x4d\x22\x5d\xbc\x76\xf4\x54\xbf\xaf\x88\xf9\xd5\x77\x0f\x14\x1f\xd1\xbe\xbb\xed\xf3\x76\x7c\xb2\xcd\xa9\x69\x7f\x6c\xe5\xe6\xfc\x4c\xdd\x51\xdf\x7b\xec\xf3\xf6\xfc\xa4\x38\xee\x5a\x97\x8b\x84\x4f\xd8\xe7\x49\x40\xff\x6c\xff\x65\x63\xbc\xde\x33\xd9\x75\xdd\x95\x95\xdc\xeb\xba\x1c\xe3\xe9\x03\xe5\xbf\xea\xa0\xeb\xf2\xef\x1d\x6f\xee\x98\x04\x01\x25\x94\xbd\x1c\xcf\x2f\xe2\xf3\xc3\xf5\xdb\xad\x74\x9c\xbf\x6a\xe3\xca\x6e\xcc\xa7\xfc\xf0\xe4\x7a\x74\xfe\x77\x53\xfe\xf5\x70\xf9\x1f\xdf\x92\xab\x1f\xe5\x17\x6f\x92\xff\xdc\x88\x7b\xe2\xf7\x1d\xf9\x86\x5e\x72\x3b\xfb\x47\xf7\x41\xea\xbc\x6d\x6f\xfc\x3e\x44\x3c\x1b\x04\x98\x3d\xe4\xd7\x87\x5f\xb2\x7a\x9e\xd7\x38\xfd\x95\x87\x9d\x7a\xe1\xb1\x06\xb9\x30\x31\x72\x9d\xcb\xf3\x91\xef\xa5\x5e\xad\x8d\xf5\x57\x5c\x9e\x5a\xb8\x6f\x18\xa6\xd4\x8f\xbd\x88\x6d\x9b\x7b\xf2\xf7\xb7\xd9\x98\x3f\x37\x33\x63\x5b\x8d\x66\x5d\x2f\x5a\xcd\x77\x9a\xa9\x9b\xcd\x5c\xfd\x80\x8a\x6d\x99\xbd\x53\x8e\xbb\xdf\x5e\xd7\xb3\xd9\x13\x6e\xaf\x04\x26\xaa\xf5\x62\x45\xaf\xf7\x12\xe5\x5c\x2f\xf1\xc7\xe1\xae\xcb\xbe\x26\xb6\x14\x1e\x9e\xf2\x6b\xea\x87\xcf\x7f\x3c\x56\xf1\xf0\x0d\xad\xc3\xb7\x54\x7e\xa5\x28\xc1\xde\xf6\xff\x0f\x52\x8f\x60\xef\x92\x66\x4f\x02\x5e\x2a\xe5\xf9\x8c\x5f\xd0\x68\xc2\xd9\x80\x07\x43\x8e\x19\x0f\xc2\x97\x9f\xbe\xed\x3f\x85\x7c\xfe\xed\x21\xea\xbe\x60\x7e\x49\xf1\x37\xa4\x27\x5a\x56\xb1\xd6\xca\x25\xfe\x78\xfe\xf6\x53\x2d\x79\x0c\xde\x9f\x6c\xc0\xeb\x3e\x38\xdc\xc7\xe1\xf0\x95\xf6\xbe\xe3\x6c\xd5\xde\xff\xe7\x41\xfa\xee\x99\x5d\x52\xf4\x44\xcc\x4b\x0d\x77\x37\x2e\x98\xc9\x8c\x6f\xb5\xdb\xfd\xfb\x20\xe5\x76\xbc\x2e\xe9\x76\x14\xf2\x52\x35\x77\xf6\x35\x31\xf3\x83\xe8\xb5\x6e\x5b\x79\xe1\x70\x1e\xf3\x98\xbf\xf8\xf0\x28\x4d\x4f\x58\x5e\x54\xf8\x5c\xe4\xa5\x4e\xbf\xa4\x75\x18\x93\xc3\x7f\x1e\xa7\x69\x18\x93\x2b\x3a\x3e\x89\x79\xa9\x5d\xc0\xc3\x0b\xdd\x3d\x8f\xfd\x20\x9e\xba\x9e\xe3\x9f\xfc\xf9\x20\x25\x8f\x0c\x2f\x29\x7a\x26\xee\x43\x31\x2c\xa4\xb3\xfd\x63\xe1\xc9\x9f\x0f\xd2\xf6\xc8\xf0\x92\xb6\x67\xe2\x5e\x6a\x3b\x0f\x79\x74\xd1\xd7\xb7\x83\x35\x0f\x23\x1c\xf1\x93\x3f\x1f\xa5\xef\x33\xc3\x8b\xfa\xbe\x14\xf7\x52\xdf\xdd\xf7\x17\xc7\xd7\x28\x88\xc3\x68\xe2\x7a\x3c\x3c\xf9\xf3\x41\x0a\x1f\x19\x5e\x52\xf8\x4c\xdc\xd5\xd4\xc0\x0d\xc3\x98\x07\x5f\x13\x38\x0c\x79\x44\x7d\x76\xa1\x0d\xa7\x3f\xaf\x76\xfa\xe1\x51\xed\x38\x61\x79\xb1\x25\xe7\x22\x2f\x45\x89\x90\xcf\xbf\x26\xa2\x95\xeb\x31\xbe\xba\xd0\x82\xa3\xfa\x8f\xd6\xfd\x4d\xc5\xef\xd2\xfa\x2c\x45\x7d\xf9\xf1\x41\xfa\xbf\x64\x7a\xa9\x11\x17\xc4\x5e\x6d\xc9\x81\xf6\x72\x73\x0e\x36\x47\xf0\x04\x7b\x94\x87\x4f\xba\x17\xad\x6c\xae\xfb\x8e\xda\x99\x7a\x4e\x6f\xe6\xf6\xa4\xe7\x7c\x12\xb6\x75\xcc\x62\x5b\x8d\xa2\x55\x48\x90\x28\xe0\x3c\xf1\xc7\x81\xe2\x5f\x89\x8e\x91\xab\xe7\x9e\x3f\x27\xfe\xe7\xef\x84\x00\x9e\xae\xd7\x89\x26\x0f\xa3\xfd\x58\xbe\x6d\xc5\xcd\x5a\xbe\x64\xb3\x55\xf2\x90\xa7\xbc\x50\x31\xe4\x93\x89\xeb\x0d\x76\xbe\xf7\x35\x41\xe2\xf5\xf1\xc3\x2c\x70\xe9\x6b\x5f\xdc\x76\x03\x59\x87\x7c\x7e\xb3\x62\xcf\x1c\xb6\x3a\x1d\x1d\xe3\x85\x5a\xd7\x53\xc5\x2d\xb9\xc3\xf9\xfd\x2a\x3c\x31\xd9\x6b\x71\x12\x60\x3e\xa8\xc8\xe1\xce\x7d\x8a\x9c\x32\xd9\x2a\xf2\x32\x11\xfe\xa0\x26\x21\x9d\x71\x6f\x71\xa7\x26\xa7\x4c\xb6\x9a\x84\x74\xf6\x49\x40\x8e\x03\xea\xdd\x9a\x9c\xf2\x39\x28\xf3\x94\x25\xbc\x54\x06\x87\xd1\x75\x85\x4e\x02\xc7\x7d\x1a\x9d\x33\xda\xaa\x74\x16\x1b\xdf\xc5\xa8\xea\x87\xd1\x20\xe0\x8d\x9a\xb9\xab\x85\x09\x0e\x79\x82\xc5\xd3\x59\x82\xfa\xd3\xd9\x84\x47\x7c\x27\xf6\xff\x0b\x00\x00\xff\xff\x46\x54\xa4\x4c\xed\xf0\x00\x00") + +func offer_idsCoreSqlBytes() ([]byte, error) { + return bindataRead( + _offer_idsCoreSql, + "offer_ids-core.sql", + ) +} + +func offer_idsCoreSql() (*asset, error) { + bytes, err := offer_idsCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "offer_ids-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x92, 0x1b, 0xfb, 0xe, 0xea, 0x46, 0x2e, 0x96, 0x2a, 0x34, 0x8f, 0xac, 0xab, 0x46, 0xe, 0x62, 0x40, 0x4c, 0xbb, 0x7e, 0x7f, 0xce, 0xfd, 0xcc, 0xf3, 0xf, 0x8b, 0xed, 0xfb, 0x39, 0x96, 0xe8}} + return a, nil +} + +var _offer_idsHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\xf7\x8f\xa3\x48\xf6\xf8\xef\xf3\x57\xa0\xd1\x49\xbd\x23\xf7\xac\x29\x32\xb3\xdf\x3d\x09\xdb\x38\x1b\xe7\xd4\xa7\x93\x55\x40\x81\x71\x00\x1b\x70\x3c\x7d\xfe\xf7\xaf\x0c\x0e\x98\x76\xc4\xee\x9e\xd9\xbb\x45\xab\xd9\xb6\x29\x5e\xaa\x97\xea\xd5\x2b\xfc\xfd\xfb\x97\xef\xdf\xb1\x8a\xe5\xb8\xba\x8d\xea\xd5\x22\xa6\x42\x17\xca\xd0\x41\x98\x3a\x1b\x4f\xbe\x7c\xff\xfe\x65\x73\x3f\x35\x1b\x4f\x90\x8a\x69\xb6\x35\x3e\x0c\x98\x23\xdb\x31\x2c\x13\xe3\x7f\x67\x7e\x07\x81\x51\xf2\x0a\x9b\xe8\xbd\xcd\xe3\xa1\x21\x5f\xea\x62\x03\x73\x5c\xe8\xa2\x31\x32\xdd\x9e\x6b\x8c\x91\x35\x73\xb1\x3f\x31\xfc\x0f\xef\xd6\xc8\x52\x86\xef\xbf\x55\x46\xc6\x66\x34\x32\x15\x4b\x35\x4c\x1d\xfb\x13\x7b\x69\x36\xd2\xdc\xcb\x1f\x3b\x70\xa6\x0a\x6d\xb5\xa7\x58\xa6\x66\xd9\x63\xc3\xd4\x7b\x8e\x6b\x1b\xa6\xee\x60\x7f\x62\x96\xb9\x85\xd1\x47\xca\xb0\xa7\xcd\x4c\xc5\x35\x2c\xb3\x27\x5b\xaa\x81\x36\xf7\x35\x38\x72\xd0\x11\x9a\xb1\x61\xf6\xc6\xc8\x71\xa0\xee\x0d\x58\x40\xdb\x34\x4c\xfd\x8f\x2d\xed\x08\xda\x4a\xbf\x37\x81\x6e\x1f\xfb\x13\x9b\xcc\xe4\x91\xa1\xbc\x6e\x98\x55\xa0\x0b\x47\xd6\x66\x98\x50\x6c\x88\x35\xac\x21\x24\x8a\x22\x96\x4b\x63\x62\x27\x57\x6f\xd4\xb1\xb2\x54\xec\x6e\xc7\xff\xde\x37\x1c\xd7\xb2\x57\x3d\xd7\x86\x2a\x72\xb0\x54\xad\x5c\xc1\x92\x65\xa9\xde\xa8\x09\x39\xa9\x11\x78\xe8\x78\x60\x4f\xb1\x66\xa6\x8b\xec\x1e\x74\x1c\xe4\xf6\x0c\xb5\xa7\x0d\xd1\xea\x8f\xcf\x40\xa8\x78\x7f\x7d\x06\xca\x8d\x5e\x7d\x1e\x83\x3e\xb6\xfb\xb9\xf3\x09\xdc\x28\xf2\x25\x64\x81\x51\x07\xe0\xde\xf0\x9c\x94\x12\x3b\x81\x91\x5b\xb0\x1e\x55\x3d\xa4\x69\x48\x71\x9d\x9e\xbc\xea\x59\xb6\x8a\xec\x9e\x6c\x59\xc3\xcb\x0f\x1a\xa6\x8a\x96\xbd\x00\x73\xa6\x03\x3d\x45\x77\x7a\x96\xd9\x33\xd4\x7b\x9e\xb6\x26\xc8\x86\xfb\x67\xdd\xd5\x04\x3d\xf0\xf4\x81\x92\x87\xa8\xb8\xef\xd9\x11\x52\x75\x64\x7b\x0f\x3a\x68\x3a\x43\xa6\x72\x17\x0b\x81\xc7\x27\x36\x9a\x1b\xd6\xcc\xd9\x7e\xd7\xeb\x43\xa7\x1f\x11\xd4\xe3\x10\x8c\xf1\xc4\xb2\x37\xe6\xb8\xf5\xa9\x51\xc1\x44\x95\xa5\x32\xb2\x1c\xa4\xf6\xa0\x7b\xcf\xf3\x3b\x65\x8e\xa0\x4a\x5b\xbb\x8c\x40\x74\xf0\x49\xa8\xaa\x36\x72\x9c\xcb\x8f\xf7\x5d\x5b\xf5\xe2\x4e\x6f\x64\x59\xc3\xd9\xe4\x86\xd1\x93\x6b\x24\xf9\xa3\xa0\x61\xdf\x09\x78\xe7\x74\x6f\x7e\x60\xe3\x27\x34\x0d\xd9\xb7\x0d\xdd\x81\x8f\xf0\xc8\x56\xac\xb7\x3d\xe4\xb9\xd6\x3b\x90\x04\x5d\xf1\xb5\x27\x26\x9b\x07\xfa\xee\xd5\x19\x70\x8e\x1c\x90\xbc\xba\xaa\x46\xfd\xbd\xa5\xdf\x32\xd8\xf2\xe9\xb0\xae\x0e\x34\x1c\xb7\xe7\x2e\x7b\x93\xeb\x20\x37\x23\xad\xc9\xad\x23\xd1\xad\xc3\x76\xa1\xe4\xf2\x60\x79\x67\xee\x57\x87\x5d\xf7\x62\xf2\xea\xb6\xc9\xf4\x63\xe4\x46\xda\x8e\x33\xbb\x86\x79\x3f\x58\xb1\x54\x74\x67\x5e\xb0\x57\x83\x09\xb4\x5d\x43\x31\x26\xd0\xbc\x18\xbc\xaf\x3d\xda\x9b\xdc\x99\x9b\xec\x23\xda\xbd\x14\x9c\x7e\xf0\x6e\xfc\x9e\xf0\x6e\xc1\xe7\x0f\xfc\x70\xf8\xfe\x64\x6e\x66\x72\xfb\xe7\x26\x3e\xec\x52\x3f\x4f\x19\x7a\x37\x52\xa0\x5b\xf6\xa4\x37\x36\xf4\x6d\xc2\x70\x81\x84\xd0\xc8\x9b\x79\xbc\x3f\xdf\xbb\x04\xf9\x56\xe5\xf4\x9f\x4e\x96\x8b\xcd\x92\x84\x19\xaa\x8f\x39\x25\xa6\x85\x66\xb1\x71\x23\xec\x33\x4a\xf7\x04\xc8\xdb\xe9\xbe\x0c\xc9\xfb\x74\x3b\xfb\xbb\x28\x5d\x17\xab\x4d\x51\x4a\x46\x90\xd9\x26\xcf\x76\xd0\xf4\x6e\xcc\x47\x40\x6e\x7e\x5a\x45\x37\x8e\x3d\x64\xb3\x37\x73\x78\xc6\xea\xef\xe1\xef\x34\x88\xdb\x9e\xdd\xe6\x7d\xb7\x0d\xde\x26\x79\x37\xf3\xb6\xf5\x00\xf7\xf0\xe2\x3f\x72\xe3\xd8\x6d\xfa\x77\x3b\x3d\xbb\x7c\xf1\x16\x8a\x42\x3e\xe4\xf2\xe0\x80\x4b\xd8\x0e\x14\x32\x99\x9a\x98\x11\x1a\x27\x06\x8f\x8d\xcd\x8a\xc3\x50\xd0\x6f\xe6\x6c\x8c\x6c\x43\xf9\xd7\xbf\xbf\xdd\xf0\x14\x5c\x46\x78\x6a\x04\x1d\xf7\x37\x68\xae\xd0\xc8\x2b\xc5\xdc\xf0\x84\x66\xd8\x27\x1f\x49\x37\xa5\x64\x23\x57\x96\x2e\xf0\xd3\x83\xba\x7e\xa0\xee\x15\x7b\x47\xe8\x05\x18\x3b\xee\x1e\x80\xb1\xe1\xd5\x7b\xfc\x40\xfc\x2b\x76\x0f\x23\x1e\xeb\x37\x40\x10\x3b\x0d\x51\xaa\x87\x40\x8c\x26\xba\x33\x1d\xed\x74\x31\x99\x15\x4b\xc2\x3b\x0c\x7f\x7c\xf1\xab\x70\x12\x1c\xa3\x1f\xbb\xef\xb0\xc6\x6a\x82\x7e\x6c\x1f\xf9\x03\xab\x2b\x7d\x34\x86\x3f\xb0\xef\x7f\x60\xe5\x85\x89\xec\x1f\xd8\x77\xaf\x38\x97\xac\x89\x9b\xf9\xda\x42\xde\xc1\xfb\x72\x04\xf1\xf8\xe6\x16\x70\xb2\x5c\x2a\x89\x52\xe3\x02\x64\x7f\x00\x56\x96\x8e\x01\x60\xb9\x3a\xf6\xb2\x2b\xbb\xed\xbe\x73\x3c\x20\x2f\x61\xcc\x3b\xf6\xb7\x38\xf7\x12\xba\xca\xcf\x91\x2c\xa5\x72\x23\x24\x4f\xac\x9d\x6b\x64\xf7\x64\x05\xeb\x6f\x47\xe8\x0f\x50\x42\x84\xdc\xc3\xfc\x3b\x20\x9e\x00\x2a\xc5\xf8\x44\xaf\x57\x8b\xd8\xc4\xb6\x14\xa4\xce\x6c\x38\xc2\x46\xd0\xd4\x67\x50\x47\x9e\x18\x6e\xac\x17\x06\xc9\xbd\xae\x68\x5b\xf2\x77\xba\x7a\xa0\x7f\x37\xb7\xa7\x64\xb9\xd7\xec\xab\xf0\xb1\x9a\xd8\x68\xd6\xa4\x7a\xe0\xbb\x2f\x18\x86\x61\x45\x41\xca\x34\x85\x8c\x88\x79\xdc\x97\x4a\x4d\xdf\xdf\xd5\x1b\xb5\x5c\xb2\xe1\x8d\x10\xea\xd8\x3f\x7a\xff\xc0\xea\x62\x51\x4c\x36\xb0\x7f\x80\xcd\xa7\xf0\x6c\x5c\x35\xc4\xc7\xb8\xbb\x06\xfe\x69\xcc\x11\xa7\x98\xbb\xc5\x53\x3d\xc6\xdf\x0d\x18\xf6\x2c\xee\xbf\x8a\xc4\xe1\x6f\x5f\x30\x2c\x29\xd4\x45\xac\x9d\x15\x25\xec\x1f\xe0\x5f\xe0\xdf\xf1\x7f\x80\x7f\x11\xff\xfe\xe7\x3f\x08\xef\x6f\xe2\x5f\xc4\xbf\xb1\x86\x7f\x13\x13\x8b\x75\x71\x23\x14\x51\x4a\x7d\x3b\x29\x99\x1b\xe2\xc0\x83\x92\xb9\x8e\xe1\xa3\x25\xf3\xff\xa2\x48\xe6\x7d\x4c\xdd\xca\x61\x1f\x87\x6f\x13\xc4\x21\x6c\xbf\x83\xe8\x51\x8c\x61\xf5\x8d\xac\xb0\x3f\x0f\x1e\xe0\xd5\xff\xba\xd1\xad\x88\xd8\x9f\x41\x8b\xf8\x76\xca\x6a\x9f\x4a\x63\x18\x60\x88\xc4\x9d\x19\xdf\x4e\xe1\xc9\x14\xe8\x51\x2a\x4f\x01\x0d\x51\x7a\x64\x90\xc7\xe4\x1e\xb4\xec\x3d\xb5\xa7\xd2\xbc\x87\xa9\x3d\x01\x34\x4c\x6d\xd0\x48\x2e\x52\xbb\x89\x5c\x2a\xd2\xe0\x6c\xe4\xf6\x5c\x28\x8f\x90\x33\x81\x0a\xc2\xfe\xc4\x5e\x5e\xfe\x38\xbe\xbb\x30\xdc\x7e\xcf\x32\xd4\xc0\x56\xda\x11\xaf\xc1\xfc\x77\xcb\xa2\x67\x60\xb7\xb1\xe7\xdb\x62\x70\xf1\xed\x73\x64\xa8\x98\x6c\xe8\x86\xe9\x7a\x89\x81\xd4\x2c\x16\x7d\x76\xe0\x78\x93\xc6\x63\x4a\x1f\xda\x50\x71\x91\x8d\xcd\xa1\xbd\x32\x4c\x3d\x34\xcc\x9c\x8d\xf7\x29\x3f\x66\x98\x2e\xd2\x91\x1d\x1a\xa2\x8d\xa0\xee\x60\xce\x18\x8e\x46\xef\xd1\xb8\xd6\x78\xf4\x1e\xc9\x6f\x04\x4d\x7f\xdb\x8f\x7c\x3f\xed\xe1\x75\x43\x54\x71\x84\xab\x1d\x7b\x91\xb8\x68\xf9\x4e\x20\x93\xc9\xc8\xf0\x6a\xf6\x98\x6b\x8c\x91\xe3\xc2\xf1\x04\xdb\xcc\x99\xf7\x11\x5b\x5b\x26\x7a\x4f\xe8\xb9\x55\xd1\x2e\x1f\xdd\x2e\xa7\x6e\xa3\x79\xbf\xf8\x3a\x03\x75\xab\x86\x42\xad\xe1\x67\x74\xc0\xfb\x22\x27\x25\x6b\xa2\x97\x7e\x25\xba\xdb\xaf\xa4\x32\x56\xca\x49\x2d\xa1\xd8\x14\xf7\x9f\x85\xce\xe1\x73\x52\x48\x66\x45\x0c\x5c\x63\x26\xb2\xd8\xc3\x80\xde\xa9\xe2\xb6\xe8\x81\x99\x68\xe9\xce\xe1\xe8\xb7\x97\x33\x1c\xbf\xfc\xf8\x61\x23\x5d\x19\x41\xc7\xf9\x16\x9e\x2e\x7f\xaf\xe2\x84\x6e\x31\xd4\xb7\x0b\x13\xe5\xaf\x8d\x1f\xe6\xcc\xaf\xe8\xec\xf9\x3a\x6d\x19\x87\x5a\xdd\x69\x32\x4f\x0e\x57\x2c\xf5\xd4\x70\x40\x9c\x1e\xee\x97\xff\x4e\x3c\x40\x33\x97\x2c\xec\x74\x79\xe1\x49\x6a\x1b\x84\xf9\x69\x4a\x7b\x89\x11\xac\xdc\x96\xc4\x14\x96\xe8\x5e\xe1\xc8\xaf\xd0\x5d\x66\x68\x0f\x2b\x74\xfb\x77\x43\x3d\x47\xdb\xae\xe6\xf3\xa8\xd6\x6d\xe1\x6c\xd5\x2e\x64\x33\xbd\x73\x9e\xfe\x7d\x89\xeb\xdc\xc8\xaf\xde\xc6\xc7\xd7\x33\xda\xec\xe9\xf1\xe9\x5b\x2a\x72\xa1\x31\x72\xb0\x81\x63\x99\xf2\x79\x65\xdb\x15\xca\x1e\x95\xc3\x16\xce\x56\x0e\xbb\x7d\xeb\x33\xb4\x05\x36\x93\x6f\xb2\xc2\x53\xfb\xd8\xa7\x1f\xdc\x8a\x25\x50\x19\xf5\x26\x62\x4f\xc7\xce\xcb\xe1\x21\x0c\x87\x89\xb8\x6d\xfc\x7e\x33\x39\x14\x98\xac\x99\x7b\x88\x4d\xe1\x67\x6c\x04\xdd\xab\x0f\xf9\x63\x67\x13\xf5\xe6\xb1\x7b\xd5\xd9\x7e\x0c\xed\xb3\xbf\xe3\x05\xbc\xcb\x07\x5c\x38\xea\x29\x96\x61\x3a\xa7\x75\x50\x43\xa8\x37\xb1\xac\xd1\xe9\xbb\xde\xce\xa7\x86\xce\xcd\xb5\x77\xdb\x46\x0e\xb2\xe7\xe7\x86\x6c\xf2\x50\x77\xd9\xf3\xd2\x24\x63\x7d\x6e\xd4\xc4\xb6\x5c\x4b\xb1\x46\x67\xf9\x0a\xcf\xd1\x4e\x59\x10\x54\x91\xed\xa5\x17\xfe\xf7\xce\x4c\x51\x90\xe3\x68\xb3\x51\xef\xac\xa2\x6c\x19\x87\xc6\x08\xa9\xe7\x47\x9d\x37\xab\x33\xb5\xeb\x47\xad\xec\xcc\x7e\xc8\x95\x98\x77\xbb\xb7\xb9\xee\xbf\xee\x65\xf9\xb9\x61\xec\x22\x8e\xcf\x0a\x6b\x77\x31\xfa\x60\x98\xbb\x88\xeb\x7d\xd8\x3b\x3d\xfc\x42\x18\x0c\xec\xec\x3c\x4d\x37\xaf\x2d\x73\x8e\xbb\xaa\xce\x2c\x85\x36\x99\xbf\xe2\xb3\xe2\x45\xc0\x07\x03\xe0\xd6\xf2\xad\x99\xad\xec\xdb\x34\xce\x84\x9e\x9d\x3b\x79\x79\xf9\xf1\xe3\xfc\x52\xec\xbc\x1d\x6c\x37\xd6\x1e\x15\xe7\xb6\x17\xf0\xb7\xa7\xe6\x0b\x5b\x97\x18\x25\x7a\x79\xbd\x30\x67\xd1\x86\x3a\x11\x2f\x0d\xda\x36\x47\x5e\x1a\xe2\xaf\x83\x4f\x0e\x78\xdf\xd3\x79\x65\xdc\x45\x74\xfb\x51\x17\x30\x7a\x24\x19\x4e\xcf\x41\xa3\x11\xb2\x31\xd9\xb2\x46\x08\x9a\xbb\x98\x64\x28\xa8\x67\x1e\xc5\x5f\xff\xbb\xe3\x98\x7c\xe8\x26\xea\x85\xa2\xf5\x51\x3f\x53\xf8\x66\x60\x9b\xfe\x64\xe7\xa7\x47\x75\xcf\xeb\x0d\xc6\x92\x59\x31\x59\xc0\x7e\xfb\x2d\x28\xc1\x7f\x62\xf8\xb7\x6f\xd7\x40\x9d\x7a\x7c\x27\xb4\xff\xf7\x4e\x8e\x37\xc0\x3b\x92\x69\x08\x7c\x48\xe0\x1e\x81\x17\x4d\xe9\xf4\x0e\xf7\x13\x8c\xeb\x74\xcf\xc2\x8d\x91\xf4\x16\x17\xf6\x48\x2c\xbd\xd6\x1f\xf0\x9c\x68\x7a\x05\xcb\x67\xc5\xd3\x3b\x99\x7d\x30\xa2\x5e\xc1\xf6\x3e\xa6\x9e\x7b\xe0\x42\x54\x3d\xea\x09\x79\xa2\xae\xee\xf4\x33\x48\xd2\xcd\x8b\xa8\xad\xef\xbf\xb2\x34\xbb\x35\xf0\x5e\x8e\xa1\x27\xc7\x1e\x50\x9f\xb4\x97\xcd\x2a\xe0\xfc\x32\xe2\xdc\x02\xed\xa7\x2c\xb1\xdc\x65\x0f\x99\x73\x34\xb2\x26\xe8\x54\xd9\xd2\x5d\x6e\x16\x3c\xb3\x91\x7b\xe6\xe6\x18\xb9\xf0\xcc\xad\xcd\x52\xeb\xdc\x6d\xc7\xd0\x4d\xe8\xce\x6c\x74\xaa\xc2\xc6\x33\xdf\xfe\xf5\xef\x43\xee\xf2\x9f\xff\x3b\x95\xbd\xfc\xeb\xdf\x61\x99\xa3\xb1\x75\xa6\x18\x76\x80\x65\x5a\x26\xba\x98\x0b\x1d\x60\xbd\x07\xb3\xe5\xcc\x18\xa3\x9e\x6c\xcd\x4c\xd5\xab\x58\x73\x36\x34\x75\x14\x5e\x8d\x1d\x87\xd6\x8d\x24\x36\xd0\x74\xa4\x5e\x5f\x6e\x6d\x6b\x7f\x86\xba\xb3\xb6\x5d\x0b\xd7\x2d\x2e\xc2\x37\x37\xaf\x5f\xee\x4a\x77\x58\x5d\x6c\x5c\xa8\x93\x06\x2b\x52\xc1\x2a\xe9\x7d\xeb\x88\xe7\x31\x71\x63\xf3\xdc\x45\xa6\x2e\xae\x3f\x6e\x61\xf2\x6c\xa4\x7d\x1a\x9b\x37\xf7\x1f\x5e\x64\xf4\x4a\x58\x38\xcd\x6a\x0a\xba\x10\xd3\x2c\xfb\xca\x6e\x11\x96\x12\x1a\xc2\x15\xf6\x72\x52\x5d\xac\x35\xb0\x9c\xd4\x28\x1f\xed\x18\x79\x51\xb4\x8e\xfd\x06\x5e\xb1\x17\x1a\xdf\x5d\x2f\xaf\x18\xf1\x8a\xe1\xaf\xd8\xcb\xcb\xb7\x3f\xae\x3e\x4b\x5c\x78\xf6\x0c\x2b\x97\x76\x7b\xee\x65\x27\xbc\xe3\xb3\x23\xeb\x05\xf4\x0c\xd3\x70\x0d\x38\xea\xf9\xdd\x37\xbf\x3b\xd3\xd1\xcb\x2b\xf6\x42\xe0\x80\xff\x8e\x33\xdf\x71\x12\x03\xdc\x0f\x82\xfb\x41\xb1\xbf\xe3\x24\x41\xf1\x4c\x0c\x27\xc2\x0c\x9f\x85\x4e\xf4\xfc\x53\x15\x47\xb3\x29\xaf\x7a\xae\x65\xa8\x97\x31\xf1\x0c\xcd\xde\x83\x89\xec\xcd\x1c\xb4\x8f\x6e\x3d\xc3\x7c\x77\x92\xe3\x22\x3e\x8a\xc2\x29\xee\x1e\x7c\x54\x0f\xaa\x6a\x2f\x5c\x0f\xbb\x88\x83\xa6\x68\x92\xb8\x07\x07\xdd\xf3\x63\xe9\x2e\xab\xf7\xf6\x51\x2f\xa2\x60\x48\x9c\xb8\x8b\x0d\x66\x87\x62\xeb\x39\x6f\x40\xc1\x51\x80\xbe\x07\x05\xdb\x1b\x5b\xaa\xa1\xad\x6e\xe7\x82\x03\x0c\x71\x17\x0a\xee\x88\x8b\x6d\xfb\xf4\x0d\x78\x58\x8a\x21\xef\xc3\xb3\x99\x74\xa8\xeb\x36\xd2\xa1\x6b\xd9\x97\x75\x8a\xc7\x01\xce\xdf\x03\x9e\xf7\xc0\xfb\xb5\xd2\xde\x52\xb5\x2f\x43\x27\x58\x70\xd7\x54\x03\xdc\x03\xbf\x9d\x05\x6f\x85\x7c\x19\x01\xcd\xb3\x77\x49\x07\x80\x20\x82\xfd\x92\x6b\xe3\x00\x2e\x23\xe2\x19\xfe\x3e\x4e\x88\xa3\x89\xde\x2e\x72\xfd\x03\xbb\x97\x30\x01\x9c\xa5\xa9\xbb\x66\x04\x90\x3e\x3b\xfb\xd2\xc0\xc5\x19\x07\x80\x60\x99\xfb\x38\xa1\x7a\x9a\xb1\xdc\x1d\x5e\xb0\xc6\xa3\x9e\x66\xa0\xd1\x45\xd7\x08\x00\x0d\xc0\x5d\x4e\x18\xd0\xbb\x3d\x9b\x5d\x2d\x7d\x79\x85\x0d\x86\xbd\xcf\xcd\x03\xa6\x67\x98\x3a\x72\xdc\xde\xfb\x6a\xfd\x15\x54\x2c\xcf\xdd\x37\x23\xec\x51\x9a\xe0\x6d\x8b\xc0\xcb\xc1\x04\x10\x38\x4e\x52\x77\x21\xe1\xf6\xea\xab\x59\x76\x6f\x93\xf4\xa3\x90\xb1\x03\xfc\x3b\x09\x30\x40\xfd\x00\xfc\x0f\x8a\xff\x1d\x10\x24\x47\xd2\x31\x1c\x5c\x88\xe7\x17\xfb\x08\xee\x0d\xe8\xef\x7a\x09\x82\x49\x4a\x46\xa0\xdb\x89\x4a\x57\xa0\xbb\x54\x5b\x10\xb3\x9d\x76\x8d\x68\x16\xca\x44\xb3\x4c\x25\x9a\x99\x6c\xb3\xca\x52\x62\xb3\x52\x28\x4b\x44\x35\xdb\xa2\xda\xb5\x6c\x39\x57\x93\x0a\x85\xec\x3b\x01\x9d\x45\xb2\xc9\x66\x32\xc9\x4e\x21\xc3\xd4\x24\xaa\x2c\xe5\xc4\x4a\xb2\x24\xa5\x13\x2c\x49\x08\x14\xc9\xbc\xd1\x15\x29\x55\xaf\x15\x33\xed\x02\x9b\x49\x14\x93\xa5\x6a\x31\x97\x2e\x53\x75\x56\xec\xb6\x5b\xcd\x9b\x91\x90\x1e\x12\x82\xac\xa6\x89\x6c\x53\xa4\x09\xa1\xd4\x69\xa6\x9b\x59\x52\xe8\xe6\x85\x4e\x27\xd3\xe9\xb4\x88\x56\xb6\xd3\xed\xd6\x18\xb1\xdb\x11\x1b\x95\x42\xaa\xf3\x56\x17\xda\x0c\xdb\x29\x53\x37\x23\xa1\x3c\x24\xd5\x4a\x37\x93\xa5\x0a\x34\xdb\x49\xa4\x44\xa9\x50\xe8\xd0\x74\x21\xd5\x68\x97\x1b\x05\xba\x9d\x6a\xd7\xaa\xe5\x2c\x51\xcc\x8a\xa9\x0e\x29\x16\x5a\xb9\x6a\xad\x58\x12\xeb\x99\x44\xe6\x66\x24\xf4\x06\x49\xa2\x56\xe9\x66\x73\x45\x22\x99\x23\xd3\x52\x95\x4a\x74\x8a\xe9\x92\x94\x2a\xa6\xf3\x4d\xa9\xd2\x24\xb2\x5d\xf2\xad\x94\xae\x67\xcb\x52\x33\x29\x96\x85\x7a\x9b\xad\x26\xd9\x72\x87\xc8\xbe\x44\x6d\xae\xd9\xe4\xda\x57\x14\x6a\xdb\x90\x78\xe8\x25\xfe\xdd\x41\x97\x1b\x4f\x5e\x31\xfa\x15\x73\xed\x19\xba\x41\xcd\xdf\xb7\x94\x44\x56\x72\x7f\x29\x18\x54\x71\xc5\x46\xaa\xe1\xf6\xe0\x68\xd2\x87\xe6\x6c\x4c\x6d\x0c\xb3\x59\x4f\xbd\x7c\x84\xce\x1c\x63\x27\xce\x60\x17\x9b\xb5\x97\x07\x95\x29\x4a\x0b\xc7\x53\x66\xf9\x68\xd9\xec\xad\x4d\x6e\x9b\xe3\x53\x1d\x1c\x51\x27\x79\xd7\xc5\x11\x90\x33\xc9\x31\x34\xc5\xe2\x3c\xcb\x80\x57\x0c\xbc\x62\xe4\xc6\x25\xfc\xe7\xab\xbf\x0b\xf0\xf5\x07\xf6\x35\xaa\xa3\xfb\xfa\x8a\x7d\xdd\x85\xf4\xaf\x3f\x36\xfc\x7e\x75\xac\x91\xba\x4d\x25\x36\x90\x29\xfe\x77\xde\xbf\x36\x63\x65\x6b\xa6\xf7\xdd\xe0\x6d\xfa\xf7\xed\x52\xee\xeb\xfe\xd9\x7d\x9b\xd2\x66\x40\xb3\x9e\x0a\xdd\x71\x57\x13\xef\x4e\x58\x77\x82\xf0\x8f\x40\x88\xcd\xda\xbb\x7b\x97\x80\x04\x70\xf9\xdd\x4f\x9e\x84\x22\x1a\xc3\x3b\xcc\x41\x90\xd1\x34\xfc\xeb\xff\x9d\xb3\xb0\xf0\xd4\x83\xd0\xd4\x13\xa7\xa6\x3e\x62\xf8\xb9\x61\xea\x8f\xe6\xf6\xfd\xd4\x1f\x69\xc6\x89\xa9\xdf\xce\xdb\x23\x53\xbf\xd5\x9e\x47\xa7\x3e\xe2\x3c\x5d\x9a\xfa\x88\xda\x74\xf3\xd4\x87\xad\x9e\xfc\x38\xab\x27\xdf\x4f\x3d\xc0\xf1\x8b\x73\xcf\xe1\x7f\x9b\xfd\xe7\x99\x3d\xf5\x71\x66\x7f\x62\xee\xb9\xcb\x53\x1f\x52\x8d\xbf\xed\xfe\xf9\x76\x4f\x91\x34\xcf\x52\x04\xc5\xd0\x1f\x1a\xed\xc1\xfd\x76\x7f\x7d\xf2\xff\x36\xfc\x07\x0d\x3f\x38\xf9\x1f\x18\xef\x3f\x64\xf2\xff\xb6\xfc\x27\x5a\xfe\x07\x46\xfc\x13\xc9\x1e\x7d\x79\xee\xff\xce\xf3\x3f\xd3\xee\x3f\x30\xe0\xdf\x9f\xe7\xd3\x7f\x5b\xfd\xc7\x5a\x3d\x60\x01\xcb\x73\x2c\x49\x70\xfe\xea\x9e\xf0\x66\x3e\x38\x01\x47\x33\x70\x52\x72\xd7\x45\xf6\xf9\xac\x91\x21\xd6\x08\xcf\xa7\xfd\x57\xb0\x06\xf6\xac\xb1\x24\xcb\xde\x3f\x6b\x5b\x93\x89\xc2\xda\x47\xfb\x22\x2a\xc4\xda\xbd\xb3\xf6\x0b\xb3\xb6\xb1\x35\x82\xe3\x28\x1e\xa7\x79\xce\xcf\xad\x09\xdc\xe3\x6d\x64\x8c\x0d\x8f\x35\x9e\x20\x48\x92\x25\x70\x92\xe1\xe8\xdf\x29\x96\xa5\x39\x9c\xfd\xcb\x69\xa6\xc7\x23\xc0\xf1\x9d\x3f\xf9\x6f\xe3\x71\x3f\x8f\x80\x02\x3b\xeb\x8b\xc4\xe3\x2f\xac\xab\x87\x79\xe4\x08\x96\xfc\xef\xe4\x91\x78\xc5\x38\x9a\xe3\x79\x92\x63\x38\xde\x63\xd1\xe7\xd0\x71\xa1\xed\x1a\xa6\xde\x93\xe1\x08\x9a\x0a\x0a\x2f\x3e\x6e\x46\x40\x1f\x23\x38\xe1\xca\xc0\x29\x57\xb6\x93\x92\x09\x5d\x63\x8e\x22\xf3\xb3\x99\x35\x9f\xa1\x05\x32\xf4\xbe\xbb\x5d\x75\xf9\x1b\x01\xbd\x21\x5a\x3d\x94\xd6\xdd\xa3\x49\x1e\x55\x14\xc1\x6e\x9d\xde\x07\x49\x79\x8b\xe0\xa3\xa5\x1c\xe2\xe7\x36\x29\x47\x5d\x37\xdd\x93\xec\xf8\x54\x31\xdc\xd6\xed\x7e\x94\x94\x7d\x04\x1f\x2d\xe5\x10\x3f\x37\xea\xf2\x47\x7b\x7e\x6a\x4b\x15\x8d\xf3\x5b\xc7\xff\x41\x52\xde\x22\xf8\x68\x29\x87\xf8\xb9\x51\xca\x8f\xf8\xe5\x2b\x1b\x99\xa7\x8e\x60\x47\xdd\xc8\xdc\x1d\xc3\xde\x31\xcb\xbf\x62\x2f\x14\xc7\x28\x32\xa1\x21\x48\x42\x8a\xe1\x78\xa0\x92\x38\x80\x8c\xaa\x02\x15\x67\x34\x9c\x57\x49\x45\x43\x0c\xa0\x28\x55\xc3\x79\x9a\x03\x1c\xc7\x41\x5c\xa3\x01\xc1\xf3\x48\x81\x94\xc2\xbf\xbc\x62\x2f\x32\x82\x08\x32\x24\xc1\xe2\x84\x8a\x68\x7c\xf3\x1f\xe0\x08\x4e\x51\x28\x8d\xd1\x08\x0d\xd2\x80\x65\x59\x1a\x87\x34\xa1\x69\x32\xab\xf1\x1c\x8e\x70\x84\x64\x85\xe5\x01\x62\x78\x15\xbe\x78\x8a\x03\x42\x5d\x32\xcc\x0f\x92\xfe\x01\xb8\x70\xf3\x8c\xff\x35\xf5\x3b\xcd\x00\x92\xa3\xae\xde\xdd\xd6\xed\x69\x86\xd9\xe4\xd1\xcc\x66\x3e\xdf\x5d\xaf\x18\xa0\xbd\x7f\xb7\xff\xec\xbf\xdd\xff\xb1\xa1\x4d\x10\x04\x21\xc9\x32\xb3\xc9\x20\xbf\x28\xa2\x66\xb9\x59\xce\x24\x07\xa5\x06\xc1\xcd\x27\x35\x95\x6c\x16\x9a\xf3\x18\x19\xeb\xa6\x74\xd6\x5e\xd2\xd9\xb1\x09\x87\xfc\x3a\xc9\x65\x2a\x79\xe6\x6d\x9d\xa9\xd0\xb1\x4a\xd3\x8a\xd3\x66\x16\x8e\xf2\xab\x4a\xaa\x00\x96\x56\x86\xb5\x26\xac\xbd\xe6\x52\x39\x6a\x03\x5a\xe8\x54\x5a\xa5\x9a\x2e\xec\x2f\x7a\xc6\x66\xcb\x09\x4e\xe9\xe0\x6d\x73\x6a\x2c\xcd\xf9\xba\x3c\xea\x92\x29\xb2\x4b\x5a\x15\x5c\xed\xae\xd7\x6d\x7e\xd5\x36\x5b\xf2\x18\x87\x0b\x7a\x55\x69\x9a\xeb\x01\x5a\xac\xa8\x98\x4d\x2d\xd0\x9c\x6f\x0e\xeb\x6c\xa5\x55\x8d\x8b\xd3\x94\x95\x6b\xea\xeb\x78\x02\x91\xad\xf4\xd8\x83\x3f\x94\xa8\x22\x5c\x4f\x88\xea\x01\x99\xd0\x51\x84\xe3\x4b\xdc\xfc\xf3\x26\x74\x00\x55\x15\x84\x14\x9e\x17\xfe\x6a\x97\xaf\x55\xf8\x19\xc3\x0f\xdb\x02\xf7\x1c\x3d\x7e\xc1\x55\x52\x23\x29\x52\x26\x34\x5e\xc3\x15\x88\x64\x95\x40\xbc\x4c\x43\x95\x26\x48\x52\x25\x39\x4a\x55\x91\x46\x33\x2c\xc2\x81\xa6\x2a\x2a\x87\x58\x5e\x26\x21\xa3\xe1\x00\x47\x80\x52\xd4\x4b\xb6\xc0\x9e\xd3\x76\x06\x67\x70\x9c\xbf\x7a\xd7\x2f\x69\x91\x1c\xc9\x70\x97\x6c\x81\xba\xd1\x16\x16\x78\x5c\x4a\x39\x73\x73\xb1\x9a\xcd\xeb\x8c\xdc\x76\x9b\x83\x4a\xa3\x2f\x91\xf3\x96\x49\x25\xe2\x0a\x51\x46\x72\x79\xcc\x25\x52\xfd\x86\x94\x8e\x21\x10\x07\x85\x65\x73\x5d\x66\x29\x33\x9b\x21\x87\xd3\x61\x66\x52\x1f\xa7\x55\xd8\x7f\xcb\x9b\x7d\x45\x4c\xea\xd3\x1c\xd5\x9e\x8d\x38\x6f\xee\x3c\x5b\x08\xaa\x67\xb9\x94\x59\xa1\x65\x79\x86\xe6\x43\xaa\x9a\xeb\x26\x0b\x71\x44\x34\x8a\xd5\x0e\x52\xc4\x78\x23\x31\x9b\x01\x75\x9e\xa9\x1a\xa3\x86\x23\xcd\x0b\xc0\x96\xe7\x54\x56\x1c\xd1\x72\x52\x60\x26\xad\x79\xaa\xb4\xa0\xc7\x22\x93\x57\x34\xa5\x56\x2f\x25\x87\x32\x39\xa3\x16\x44\xa7\xe4\xc1\xd7\x4f\xd8\x42\x8b\x0a\xa9\x52\x4a\xf8\x9f\xb2\x05\xf6\x39\x7a\xfc\xa2\x90\x90\x57\x54\x92\x60\x34\x06\x87\x3c\x45\x11\xac\xaa\x40\x15\xc9\x0c\xcf\xab\xa4\xc6\x20\x45\x25\x79\x92\x26\x15\x85\x27\x09\xa8\x28\xac\x4a\xb0\x14\x0d\x08\x96\x24\x09\x46\xd6\x14\xee\x92\x2d\x30\x67\xb5\x9d\xa0\x59\xfc\xfa\x5d\x12\xc7\x19\x8a\x65\x01\xce\x12\x97\x6c\x81\xbc\xd1\x16\xb8\xf2\x64\xdd\xc8\xcf\xbb\x85\x91\x98\xe7\x57\x2e\x43\x8c\xd5\x0a\xc1\x4a\xe5\x46\x93\x5b\x35\x0a\xce\x92\xcf\xe3\xcd\xba\xb2\xce\xdb\xf1\xdc\x2c\xb1\xb2\xaa\xe5\xe9\xb8\x21\x94\x9a\x8d\x25\xa3\xaf\xf0\x41\xdf\xca\x64\x40\xda\x28\x9a\xe4\x2c\x57\x4d\x89\x33\x9a\x68\x38\x80\xd4\x02\x71\x21\x30\x97\x1c\x0e\x40\x8e\x53\x16\xc8\x68\x35\x8b\xb5\x21\xab\xc7\x72\x24\x6a\x97\xe2\xc3\x58\x33\xe6\xcc\x8b\xc5\x79\xbe\xc8\x75\x78\x51\x4a\x80\xc9\xa2\x98\xab\xea\xd5\x5c\x5a\x6e\xac\xd7\x89\x74\x61\xca\x16\x47\xb1\x21\xd1\x26\xe6\xea\x20\x53\x75\xaa\x0b\xb4\xd4\x66\x83\x6e\x2b\x89\xe3\x1e\x64\xe5\x84\x2d\x34\x9b\x21\x55\xfa\x1f\xb3\x05\xe6\x39\x7a\xfc\x22\x2b\x08\xe2\x34\x8b\x80\xa2\x12\x00\xa8\x84\xa2\xc9\x2c\x22\x35\x99\xe7\x69\x4d\xa5\x68\x45\x65\x20\x45\x11\x04\xd0\x48\x0e\x2a\x0a\x4e\x13\x84\x4c\xd1\x32\xa7\x70\x40\xe5\x19\x9a\xa0\x2e\xd9\x02\x7d\x56\xdb\x49\x0e\xa7\x89\xab\x77\x09\x9a\x65\x78\x0e\x27\x59\x96\xb9\x64\x0b\xc4\xad\x39\xd2\x7a\x9a\xe8\x50\x0a\xde\xcf\xd6\xd7\xac\x36\x88\xd1\x23\x0d\xa4\xd7\xed\x89\x98\xeb\x73\x94\x5d\x4a\xd4\x0b\x78\x9b\x5a\x25\x88\x76\x73\x38\x18\x97\xa6\x6e\x8c\x10\xe7\xf3\x84\xf8\x06\x8b\x19\xc6\x95\xc7\x6d\x7b\x54\x86\x89\x8c\x34\x99\xd6\xf9\x5a\x4c\x6b\xb1\x29\x90\x04\xb2\x97\x8e\x78\xb6\x50\x5d\x1c\xe6\x72\x56\xb5\x2b\x55\x9b\x98\x8e\xa4\xd6\x24\x99\x9b\xf6\xab\x10\x55\x4b\x4e\x8a\xcf\xb7\xdb\x12\x5c\xe6\xea\xe5\xdc\xa2\xe4\x2c\xe7\xc6\x28\xd7\x14\x92\x6a\x2c\xdf\x75\xca\xab\x96\x58\xb2\x75\x71\x5e\x2e\x30\x86\x50\x54\xd2\xa8\x2d\x17\xcc\xb2\x31\xab\xe4\x95\xd1\x4c\x74\x5b\x06\xef\xe5\x48\xdd\x13\xb6\x50\x5f\x84\x54\x29\x29\xfc\x4f\xd9\x02\xfd\x1c\x3d\x7e\x61\x15\x59\x25\x14\x9a\x81\x8c\x4c\x52\xa4\x06\x71\x5c\xe3\x58\x44\x50\x04\x43\xa9\x88\xa6\x35\x0d\x67\x11\x49\x68\xa4\x0c\x28\x1e\xe7\x28\x95\xe7\x11\xcd\xd0\x48\xc3\x15\x95\xd2\x70\xe2\xe2\x7a\xe1\xec\x8a\x80\xa1\x58\x8e\x07\x57\xef\x12\x80\x62\x29\x8e\x64\x28\x0e\xbf\x64\x0b\xe0\x46\x5b\x20\x57\x7c\x31\x0d\xc7\x0e\x5e\x61\x84\x4a\x3f\x96\x4f\xbe\x49\x74\x2b\xbe\xd0\x06\xc5\xb5\xd3\x1c\xe6\x1a\x6f\xe6\xe8\x0d\x71\x25\x50\x11\xa6\xb9\x78\x3e\x6d\xae\x8d\x61\x75\x31\x12\xda\x09\x62\x92\xe4\xf9\x2e\x8c\xb7\xe4\x2a\x91\xaf\x8f\x3a\xf6\x64\x29\xf1\xa2\xca\x17\xe3\x76\xaa\xe4\xcd\x9d\x67\x0b\xc1\xf5\x82\x9b\x86\xc3\xb8\xd6\x67\x4b\x62\xd9\x70\xfb\x83\xd1\x4a\x15\xed\x44\x13\x8f\x97\x70\x63\x04\x91\x36\x03\x4d\x19\x4f\x2e\x80\x22\x57\xd7\x52\x7c\x5a\xea\x8f\xf4\x7e\x27\xef\xf4\x07\xa2\xce\x4e\xc7\x5c\x0b\x97\x96\xd9\xc4\x80\x7a\x2b\x1b\xc3\x72\x07\xaf\x33\x55\xb3\xb4\xf6\xe3\x4e\xf3\x84\x2d\xd4\x4a\x21\x55\x4a\x08\xff\x53\xb6\x40\x3d\x47\x8f\x5f\x20\x64\x49\x52\x66\x10\xe0\x55\x5e\x05\xaa\x86\x73\x50\xe6\xa0\x02\x64\x99\xc5\x15\xc4\x53\x00\xaa\x32\xaf\x90\x80\x67\x01\xa3\xca\x32\x52\x71\x99\x23\x21\xa5\xf1\x2c\x60\x21\x45\xb0\x7e\x53\x34\x71\x52\xad\xc9\xb3\xda\xce\xe0\x38\x79\xfd\xae\xbf\xed\xc4\xf0\x80\xbb\xb8\x76\xc6\x6f\xb4\x05\x66\xb2\x2e\x13\xd4\x1b\xa1\x66\xb5\x64\x61\x56\x58\xc8\xee\x62\x3d\xa9\x42\x82\x5e\x2c\x47\x4b\x59\x9e\xbb\x49\xaa\x3c\x8c\x75\xd2\x4c\xd5\x1c\x0c\xfb\x9d\x59\x63\x98\x6a\xbb\x23\xaa\x28\xc9\xf1\xae\x54\x6d\x0c\x26\x79\x31\x97\x49\xc5\x78\xa7\x3f\x70\xd6\x64\x7e\xed\x4c\xe7\x54\xbc\xeb\xcd\x9d\x67\x0b\x01\xf5\x94\xe8\x04\xdd\xe6\x0b\xce\xd2\x2a\x77\xca\x13\x47\x91\x24\x35\x65\x98\x70\x26\x2d\x93\xf1\xca\x82\x18\x0e\xd9\x54\xaa\x69\xe1\xf5\x4e\xb9\xf3\x66\xba\x29\x61\xd9\xd0\x3a\x26\xc3\x8c\xf9\x5c\x5f\xc4\x3b\xe3\x91\x01\x0b\xf3\x21\xca\x4d\xa4\xbc\xb6\xe6\x0b\x15\xd9\x12\x72\x6f\x65\xcf\xfb\x57\x4f\xd8\x42\xc5\x3a\xa5\x4f\x7f\x71\x5b\x20\x6e\xb7\x05\xf2\x39\x7a\xfc\x42\xcb\x0c\x43\x52\x8a\xc2\x23\x1e\xf0\xbc\xa6\x92\x9a\x2c\x43\x0d\x67\x68\x5a\xa1\x29\x05\xd2\x88\x95\x39\x40\xa8\x34\x43\x20\x86\x44\x0a\xc4\x35\x99\x90\x79\xc8\xf1\x32\xe0\x70\x85\xd2\x5e\xbc\x46\x0d\xea\xa4\x5a\x9f\xcf\x82\x58\x82\xa0\xce\xc7\x85\xdd\xdd\xed\x3e\x25\xe0\xb8\x4b\x6b\x67\xee\x46\x53\x00\xee\x58\x2a\xad\xd0\xf0\xcd\xe4\x2b\xec\x6c\x9e\x18\x75\xd2\x8d\x42\xc7\x9c\x25\x8a\xad\xee\xac\x1b\x5b\xe9\xb1\xd5\x0c\x1a\x72\x46\x58\x56\x44\x5a\x28\xbb\x31\x82\x24\x1b\xd9\x8a\xdb\x90\xe4\xa6\x39\xa8\x2f\x2a\xf3\xb1\xc5\xe8\xe4\x7a\x95\x56\xd2\x15\x4b\xe1\x5a\xc5\x4a\x26\xe3\x4f\x9d\x67\x0a\x81\xa9\xac\xf7\xd7\xd3\x1c\x0f\x8b\xdd\x18\x5b\x98\xd0\x6f\x0b\x92\xe9\x22\x16\x27\x8d\xb7\xa5\x99\x15\xa8\xd4\x02\x8c\xe9\x42\x9a\xaa\x65\x0b\xe3\xb2\x54\x2b\x1a\x9d\x04\x0d\xb3\x5a\x41\xc8\xb4\x6d\x32\xbb\xb4\x46\xd5\xea\x52\x5f\x2d\x3b\x49\x75\x50\x59\x41\xe6\xad\xb5\x86\xad\x7a\xba\xe1\x99\x42\xe9\x84\x29\x94\xf4\x53\xea\xf4\x17\x37\x05\xea\x76\x53\x20\x9e\xa3\xc6\x2f\x0c\xa9\xf2\x9c\x46\x93\x0c\x42\x0c\xa7\x02\x99\x60\x65\x5a\xe6\x78\x8d\x20\xa1\x46\x93\x00\xc8\x2c\xcd\xf0\x90\xa0\x34\xa8\x01\x0a\x27\xa1\x8a\xcb\x34\x21\x33\x24\x29\xe3\xac\x8c\x78\xfe\x92\x29\x9c\x57\x76\x8e\xb9\x76\xcf\xdf\x4e\xa3\x68\xfe\xd2\xaa\xf9\xd6\x02\x12\x51\x79\x1b\x00\x69\x46\x5b\xb8\x9c\x67\xdb\x94\xb9\x2a\xcf\x9b\xcb\x0c\xd9\x9a\x58\xc3\xd8\x3c\x2d\x94\xdd\x24\x28\x10\x25\x36\xc1\x32\x6f\x7d\x8e\xa8\xea\xce\x4c\xa8\x8f\xeb\xf8\x52\x4f\x4f\x80\x36\x30\x1a\xc4\x42\xd2\xe7\xad\x76\x2d\x39\xaa\xb7\xe9\x59\xa9\x42\x52\x54\xbd\xe4\xfb\xe2\x8d\x15\x54\x3c\x2d\xcd\xed\xff\x11\x3c\x45\x75\x0e\x9f\x17\x42\xa5\x3a\xf4\x67\xb9\xcf\xda\xd9\x2c\x89\xfa\xd3\x79\xd3\x69\x8b\x15\x4d\x18\xcd\x5b\xce\x44\x10\x88\xf8\x5b\x0d\xcf\x0f\x2a\x42\x3a\xa7\x66\xe5\x2a\xe1\xd4\x8d\x59\x7e\x69\x36\xd9\xd1\x2a\xb6\x46\xcb\xb7\x99\xd3\x59\xe2\x52\xb1\xb3\xb2\x0a\xf4\xbc\x55\xcd\xcd\x53\xeb\x94\x19\x4b\x8f\xbb\xe9\x32\xea\x7b\xb6\x90\x3b\x61\x25\x99\xea\x29\x4d\xfb\xdf\xb1\x12\xf0\x1c\x0d\xf7\x5e\x2f\x83\xe1\xfe\x9e\x13\xe0\x59\xfc\x3b\x0e\xbe\xe3\x00\xc3\xf1\x1f\xde\x7f\x67\x75\x99\xe7\x18\xf2\x7c\x40\xd8\xdd\xa5\x08\x9e\xe2\x19\x96\xe0\x2f\xad\x8a\x4f\x6b\xba\x4f\xd2\xcf\x9e\x94\xf3\x57\xa2\x53\x30\xa8\x55\x7c\x55\x2f\x24\xd8\x94\x99\xe2\xb3\x04\xbe\x1c\x24\x62\x0e\xae\xbb\xce\x22\xb7\x58\x83\x8e\x5a\x6f\x77\x61\x22\x0f\xd3\x9e\x12\x8b\x27\x94\xf8\xf4\xb5\x53\x62\x41\x48\x0c\x3f\x81\x91\xa7\x5e\x2f\xbe\x32\x5d\xdf\x94\xbb\xe1\x05\xae\x51\xf7\xe8\xce\xbc\x97\xe7\xec\x01\xb4\x33\x16\x77\x05\x4c\xb8\xb5\x39\x22\x18\x72\x5f\x47\xa5\x01\xc3\xbf\x62\x20\x1a\x18\x6a\x5f\x82\x62\x39\xaf\xb5\x26\x1a\x18\x7a\xb7\x7a\xa7\x70\xda\xdb\x8c\x8e\x06\x86\x09\x75\x49\x92\xd1\xc0\xb0\xe1\x66\xcb\x68\x60\xb8\x50\xf7\x1f\x15\x0d\x0c\x1f\xee\x8f\x8c\x06\x06\xe0\xa1\x8e\xbd\x88\x5c\x81\x77\x5d\x71\x11\xe1\x84\x3b\xcf\xa2\xd2\x43\x86\xbb\xbb\x22\xc2\xa1\x8e\x3b\x8e\xe8\x88\x60\xc2\x7d\x52\x11\xc1\x30\xc7\x9d\x39\x51\xa9\x61\x43\x0d\x4b\x11\xc1\x70\xc7\x1d\x2c\x51\xa9\xe1\x43\x8d\x30\x11\x3d\x20\x7e\xdc\xe9\x11\x91\x1a\x02\x1c\x83\xa1\x9e\xf3\x06\xec\xa7\x1c\x9e\xbf\xfc\x7a\xb6\x8d\xb3\xbc\xf5\x34\xfd\x99\x17\x41\x3f\x1c\xe3\x02\x91\xe4\xe8\x68\xed\xfe\x03\xee\x9f\xb7\xf3\x3a\x70\xbc\x17\x03\x7d\xfd\x81\x7d\xc5\x7f\xe7\xfc\x54\xcb\x6b\xf7\x39\xf4\x01\xd1\x20\xd8\x07\xe4\xbf\x69\xd7\xfe\xfa\x03\xfb\xcf\x57\xf5\xeb\x0f\x8c\x7e\xc5\xbe\x9a\x5f\x7f\x60\xd4\xff\x1d\x9f\xbf\xc0\x5f\xb1\xaf\xf2\x6c\x65\x98\xfa\x99\xd3\x2f\xc1\x7b\x17\x8f\x40\xa0\xd1\xe8\x3d\x94\xdd\x31\x9c\xa3\x9b\x17\x8f\x63\x04\xf1\x3d\xe7\x2c\xc5\x31\xf2\xa7\x34\x06\x7f\xc0\x6b\x56\x4e\x69\xc5\xd1\xf1\xab\xfd\x07\xea\xb4\x56\xf0\xa7\xb5\x02\xbf\xa4\x15\x9b\x80\xe6\xa9\x05\xff\xb7\x5a\xfc\x75\xd4\x22\x98\x73\x1e\x3e\x70\xa7\xd4\x02\xfc\x4e\xd0\x27\xd4\x82\xbb\xa8\x15\xd4\x56\x29\xe8\x5b\x95\x62\x77\x5c\xea\x31\xa5\xd8\x9d\xdb\x7a\x5c\x29\xa2\x1f\xb9\x3b\xaf\x14\x91\x9b\x1c\x3f\xe0\xbd\x4f\x27\x94\xe2\x68\x05\x71\xf8\x40\x9c\x56\x0a\xe0\x5f\xc7\x4a\xc1\x5f\x71\x15\x87\x25\xbe\xa7\x1d\x60\x7f\xfd\xad\x26\x7f\x1d\x35\x09\xae\x10\x0f\x1f\x98\xd3\x6a\x82\x9f\x0c\x29\xf8\x65\x3d\xd9\xa9\xc7\xdf\x5a\xf1\x97\xd1\x8a\xe3\x13\x9f\xfb\x0f\xf8\xbe\xd1\xe0\x3f\x5f\x5d\xeb\xd1\x43\xbf\x9a\x6d\x8d\x1f\x95\xf0\x2f\x7c\x52\xf3\x03\x5e\x70\x76\x7e\xa6\xb6\x55\x8c\xfd\x07\xe6\xfd\x4c\x3d\x70\x32\xff\x30\x53\xd1\xb3\xab\x5f\xf8\x74\xe6\x07\xbc\x59\xf0\xd4\x4c\x1d\x9d\xec\xdc\x7f\xf0\x93\x77\xe6\xb6\x93\x73\xae\x3d\x73\x5c\x84\x1e\xb5\x1a\x0f\x8c\xf5\xf0\xa9\xfd\x5f\xc4\xc6\x3e\x3c\xbf\x3e\xae\xcc\xed\x3f\xe0\x3f\x79\xe6\x1e\x30\xe8\x5f\x63\xe6\x3e\x21\x8e\x1d\x9d\xc2\xdd\x7f\x60\x22\xcf\x5c\x74\xff\xf7\x21\x36\xf7\xd3\xbc\xe5\x67\xd9\xdc\xb6\xfa\xbc\xff\x40\xfc\xe4\x99\x7b\x9a\xcd\xfd\xac\x99\xfb\x78\x9b\x0b\x16\xea\xf7\x7f\x73\x81\xb3\x90\xda\xcc\x54\xb7\xbc\x44\x7c\xd5\xae\x27\x17\xff\x85\xb7\x8f\xda\xd3\xf5\x83\x99\x0f\xbe\x12\xf8\x1e\xa9\x6d\x37\x14\xf6\x7f\x53\x1f\x2a\xb5\x07\x74\xf9\xd7\x92\x9a\xbf\xf1\xb1\xff\x1b\xff\x58\x5d\x7b\x60\x39\xf8\x2b\x49\x6d\xbb\x41\xb3\xff\x9b\xf9\x58\xa9\x3d\x50\x96\xfd\x68\xa9\x5d\xd9\xec\x39\xf1\x13\x95\x51\x37\x7a\xb6\x3f\x53\x79\x72\x93\x07\x3f\x7b\xdc\xd7\xef\x67\x06\xaf\x18\xb5\x7d\x9d\x2d\xbf\x7b\x9f\x0f\x45\xef\x2b\x70\x1a\x1c\x39\xe8\xf5\xa8\x28\x77\xa8\xc7\x79\xcd\x48\xe7\x36\x4d\x2f\x11\x75\xfe\x0c\x32\xb9\x23\xea\x08\xa3\xf7\x25\x87\x87\xa9\xa2\xbc\xb7\x91\x53\xe7\x37\x27\xc3\x34\x04\xf7\x39\x4e\x0b\x86\x7d\xd9\xa1\x3b\x49\x43\xf0\x9b\x9d\x68\x7c\x19\x32\x00\x30\x1c\x83\xd3\x04\xcb\xb1\x80\xc4\xc9\x4b\x7d\x1a\x97\xc8\x3a\x7f\x24\x75\x3f\x5f\x74\x98\xaa\x3b\xe6\xeb\x14\x99\xc4\x4d\xca\x7a\xfd\x47\x20\x1f\x50\xdf\xd3\x3f\x1e\x75\xb2\x1b\x07\xbf\xa8\x72\x97\x01\x11\xa1\xed\xad\xc8\x80\xc8\xf0\x86\x48\x54\x40\x54\xb8\x88\x1e\x15\x10\x1d\x2e\xb3\x46\x05\xc4\x84\x2a\x73\x17\x6c\xeb\x32\x20\x36\x04\x28\xb2\xb0\xb9\x50\x05\xea\x5c\x6f\xce\x55\x40\x7c\xb8\x94\x15\x15\xd0\x71\x7f\xce\x23\x8a\x04\xde\x2d\xfd\x23\x43\x22\x42\x4b\xd1\xe8\x34\xbd\x5b\x1a\x45\x86\x44\x1d\xe7\xea\xe7\xba\x3f\xae\x03\xa2\x8f\x01\x45\xe7\x8d\x39\xce\x83\xa3\x53\xc4\x86\x12\xea\xc8\x80\xb8\xe3\x1c\x33\x3a\x45\xfc\x31\xa0\xc8\x86\x7b\xd4\xb3\xc3\x3c\x40\xd1\x51\xd7\x0e\x73\xa9\x6b\xe7\xde\x9f\x9f\x7d\x46\xdf\xce\xb5\x5f\x1b\xbc\xa7\x73\xe7\xec\x8f\xcd\x3e\x21\x26\x06\x7f\x10\x8e\x42\x04\x8b\x48\x95\x55\x38\x4a\x53\x65\x96\x57\x19\x16\xe7\x79\x82\x50\x68\x02\x42\xc8\xd2\x84\xa6\xd2\x38\x87\xab\x8a\x4a\x91\x24\xd4\x78\x1c\x28\x2a\x60\x39\x06\x87\x3c\xcd\xca\xc4\xcb\x2b\xe6\x27\x79\xd1\x4b\x2d\x81\xb3\x09\xcc\xae\x27\xfb\xfc\xcb\x61\x58\xea\xd2\xab\x63\x58\x8a\x7e\x09\x85\x76\xbf\x99\xbb\xc0\x0c\x90\x41\x0e\xc6\x56\x8e\x6b\x64\x46\xa9\x38\xd2\x15\x92\xad\x74\xdc\x6c\xa1\xb0\x6e\xb7\xb8\x45\xcb\x78\x4b\xc0\xe4\x8c\x2e\xd2\xde\xf1\xc8\x37\xef\x21\xef\xb0\x81\x18\xea\x35\x4e\x1c\xfe\xf4\x86\x26\x5a\x2d\x69\x3b\x26\x09\x66\x09\x35\x6b\x35\x66\x7a\x69\x5e\x75\x53\x6c\xa2\x9f\x2b\x92\x12\xe2\xd5\x56\x45\xcb\xe4\x62\x79\x83\xce\xcf\x9b\xe5\xd8\x9b\xe0\xb2\xde\x09\x87\x74\xba\xd5\xf2\x0f\x34\x08\x85\x14\x97\xa9\x2c\x3b\x1a\x99\x2c\x0d\x5a\x85\x39\x39\xed\xaf\x1d\x06\xce\x88\x2c\x2c\x65\xb3\xb8\x43\x27\x48\xa2\xd6\x6a\x56\x0d\xb7\xef\x1f\x31\x48\x5b\xc2\xd2\x3f\x41\xe7\xe1\x4e\x9c\xec\xef\xce\xec\xf9\xa9\x4a\x70\xd6\xcf\x8c\x87\x7c\x9d\xec\xb2\x43\x86\xee\xaa\xb5\x02\x51\x13\xb2\x83\xec\xa2\x6b\xb8\xf3\x21\x9a\x95\x62\xa4\x59\x49\x56\xc8\x7e\x46\x6f\x0e\xd7\x7c\x3b\x03\xa9\x4a\x97\xa3\x92\xa8\xa4\x8f\x87\x23\x40\x1b\xca\x30\x36\xea\xa6\xed\x8e\x26\x53\x45\x2b\x35\xed\xcf\x1a\x55\xeb\xcf\x97\x60\xb3\x7c\xf0\x70\xc4\xe1\xcf\xd4\xe1\x4b\x9f\xd7\x32\x91\x8c\x0b\x65\x8a\xee\x26\x52\xa4\x9b\x6d\xa5\xcb\xa0\x46\x0a\x78\x09\x0d\x2b\x5c\xbe\xc6\x98\x12\x10\x78\xd4\x36\xd4\x55\xce\x0d\xbc\xc1\x40\x7f\x50\x5e\x09\xbb\xae\xfa\x8f\x3e\x30\x5f\x82\xba\x6e\x37\xe2\x3e\x57\xae\x3e\x5f\xa4\x66\xe5\xb6\x50\xe5\xd9\x1a\xa8\x35\xdc\xa6\xba\x90\x52\xd9\x49\x2a\x9e\x6c\xa2\xc9\x5a\xad\x56\x3a\x23\xcb\x54\x8c\x62\x2b\xac\x2f\xb5\x56\xab\xbe\xc5\xaf\xc7\x13\x03\xae\x45\xf2\x8b\x41\x0e\xd4\x6d\x9e\xb1\x14\xb6\x3c\xb6\xdd\x3e\x31\x48\x2c\xf9\x62\xb9\xaa\xf2\xc3\x56\x4b\xcc\xd9\xdd\xba\x2f\xcf\xb9\xbd\xd8\x1e\xd3\x6a\xb5\x1a\xbb\x57\x4b\xb8\xb2\xde\xa9\x31\x22\x6b\xa5\x8a\x78\xb1\x1a\x5b\x74\xeb\x49\x7e\xdd\x99\x77\x5a\x0d\x72\x69\x54\x8c\xee\xac\x2e\x83\xd4\x7c\x5c\x2d\x22\xff\x15\x2d\x65\xda\x2e\x04\xb5\xc4\x9e\x4a\x4c\x11\x95\xa1\x3e\x58\x96\x60\xb3\xc2\x33\x89\xb5\xe6\xf0\x08\x57\x2c\x5b\x7a\xeb\xac\x13\xed\xfc\x30\x6d\x15\xd8\xe1\x7c\xb8\x08\x19\x41\xba\xd5\xc4\xb7\x13\x5d\x6c\x53\x69\x1c\xf5\xcb\x8c\xb0\xe2\x93\x78\xc5\xc9\x88\xfa\x5c\x01\x2c\x00\x4d\x9e\xeb\x0e\xa8\x71\x71\x38\xe6\xab\x2c\x3d\x4c\x92\x73\x6f\x7c\xb3\xd5\x6a\x6e\x59\xb1\x2a\x8b\x6e\x3c\xad\xc6\x95\xdc\x4a\x6a\x4e\xe3\x68\x9a\x2d\xaf\x27\x53\xb6\xab\x5a\x0b\x45\xab\xaf\x87\x59\xed\x2d\xdd\xaa\x25\x0b\x84\xb8\x3d\x97\x90\x37\xda\xfa\x41\xc9\xd2\x61\xad\xdf\xeb\x64\x75\xaf\x73\x9e\x0e\x26\xab\xd1\x79\x2e\xd3\x76\x4e\x11\x8e\xe1\x09\xef\x4e\xcc\x85\x5d\xc5\xb1\xb8\x9e\x88\x5f\x8c\x82\x5f\xf4\xf0\x97\xf6\x40\xb6\xcf\xb3\xdd\x22\x27\xb0\x83\x91\x2e\x56\x10\xae\x36\x9b\x6c\x2b\xab\xa4\xaa\x4b\xa6\x1a\x5f\x8c\xb2\x53\x85\x6c\xa6\x00\x0d\xf3\x64\xce\x00\x07\xfa\xbd\xd7\x27\x3c\x30\x87\x8f\xd8\x20\xc4\xcd\xba\x50\x1d\x58\x60\xb9\x9e\xc1\xd5\xc9\x53\x8b\x5b\xfa\x1e\xe5\x6f\xb1\x07\xe2\xab\xfb\x9d\x31\xe4\x11\x1d\x6f\x8d\xf1\x98\x9e\x8d\xef\x2f\xef\xcb\xb0\x8f\xf7\x3e\x0f\x7f\x0a\x7d\x56\x99\x61\x85\x93\xf4\x6d\xa7\x35\x6b\xd5\xcf\x29\xe3\xde\x1e\x85\xc3\xf8\x3b\x63\xc2\x03\xfa\x93\x78\x83\x49\x4e\xd0\xf6\x94\x73\x07\xf9\x55\x77\xf7\xd9\xf8\x79\xd2\x3d\xb4\xf9\xc0\x7c\xdc\xa9\x5f\x0f\xf8\xcc\x62\xb1\xda\xd1\xf8\x83\xd0\x17\x7b\x7a\x12\x17\x64\x7d\xb8\xbc\xf1\xca\x41\xf4\x3f\x25\x86\x3d\xc0\x7f\x32\x36\xf7\x09\xf7\x7d\xfe\x69\xa6\x73\xcf\xe0\xef\x61\xfd\x8c\x2e\x9f\x54\x63\x5c\x2c\x5f\xd0\xcf\xa0\x2a\x4e\x13\x5a\x2e\xcc\xff\xc3\xfa\x19\x3d\xc7\xca\x0d\x3b\xd2\x22\xf1\xa0\x7e\x06\x1c\xfa\xbd\xf3\x97\x9a\xc1\x95\x0f\xc4\x9b\x3a\x3f\x27\x7e\x97\x18\x9f\xa7\xe5\xe0\x4f\x9f\x88\xff\xdd\x04\x9d\xc7\xff\x68\xbc\x79\x20\x27\xc4\xf3\x64\xea\xb4\x3f\x3f\x29\x9f\x4f\xa7\x4f\x5d\xb7\x9b\x42\xe7\x42\xbc\x09\x5c\xe3\x3c\x0c\x7f\xf5\xec\xfc\x2f\x52\xfe\xf5\x74\xfc\xe1\x97\x5a\x9d\xc7\x2f\x1c\xf0\xe7\x22\xe1\x17\xfe\x2a\x39\xbf\x37\x28\x90\xe3\xfa\x9c\xcf\x2c\xd2\x72\x29\x7a\x9a\xac\x88\xcb\x49\x35\x4e\x5a\x59\x29\xb6\x06\x6c\x6d\x65\x38\x60\xa4\x95\xd2\xdd\x71\xb5\xad\xdb\xb3\x7a\xac\xe1\x3f\xc0\x8e\x9d\x41\x33\x04\x2f\xf9\x4e\xa0\x67\xcf\x54\x07\x7c\x70\x64\xfc\xfd\x45\x34\xfc\x7f\x7a\x6b\xf2\xff\x80\x29\x23\x42\xd8\x00\x45\x75\x30\x6b\xd8\xa3\x3e\x10\xed\x37\x51\x40\x25\x2d\x61\x14\x88\x58\x8d\xa1\xd6\x2c\x52\xb8\x5c\x1c\x89\x30\x51\x6f\x54\x40\xf7\xcd\xd6\xf9\xc1\x5a\xcf\x53\xab\x24\xac\x77\x3a\xe3\xfc\xaa\xc1\xb4\xf4\xb6\xab\xf2\x73\x7d\xa6\x97\xa7\x19\x5a\x4a\xea\x7f\xfe\xe9\xed\x61\x9a\x96\x89\xf6\xa7\xf8\xfd\x7f\x5d\x7b\xe6\x6f\x12\x5d\xaf\xec\x05\x7f\x15\x5b\x83\x3c\x03\x65\x84\x64\x1c\x11\x32\xbb\xf9\x48\xb1\x2a\xa7\xb1\x38\x4e\x2a\x90\x56\x49\xc4\xd3\x2c\x49\x31\x34\xa1\x90\x9c\x4c\x12\x1c\x44\x0c\x01\x39\x05\xc8\x40\x26\x55\x96\x7c\x79\xc5\xb8\xe7\x55\x9f\xe8\x2b\xd5\x27\x06\x67\xc8\xf3\xd5\x27\xef\x2e\xfd\x12\xda\x0f\x7a\xb4\xfa\x94\x0a\x4d\xef\x2f\x52\x7d\x1a\x59\x45\x5f\x3f\xbd\xcf\xc9\x93\xab\xb0\x40\xf5\x29\x47\xad\xb9\xac\xba\x4c\x96\x14\xbe\x3b\x60\x73\xa5\x2e\x45\xc6\x74\xb3\x56\xa4\xc7\xa8\x59\x6a\x64\xca\x74\x49\x9a\x66\x9d\x18\xd0\xa0\xd5\x4e\x9a\xc9\x18\x9e\x4b\x92\x76\xad\x33\xe8\xb6\x2d\x43\xaf\x98\x62\x22\x35\x6e\x48\xcb\x6a\x57\x93\x04\xbe\x58\xad\x4a\x59\xba\xa5\x2e\x16\x9f\x57\x7d\x7a\x34\x33\x4a\xed\x57\xaa\x8f\xac\x7c\xd9\xb1\xb3\x7d\x5f\x55\xa4\xcc\xd6\xa3\xfd\x91\xea\x93\xb7\xf2\xf6\xae\x07\x22\x79\x56\x01\xa3\xe0\x7c\x9c\xad\xd6\x04\x34\x29\x4a\xb4\x04\x07\x1c\xc1\x6a\x40\xe0\xba\x12\xad\x9f\x87\x3f\x52\xb5\x28\x75\xf0\xfc\xd9\x83\x7c\x3e\x31\x9b\xaf\x5b\x69\x4e\xbf\x2d\x9b\xf7\xaa\x01\xe1\xab\x2a\x1c\x67\xb3\xf7\xea\xec\x03\xd9\x02\x4e\x1b\xeb\xd3\xd5\x93\x77\xc5\xfc\x82\xde\x59\x85\x29\xf7\xe6\x2b\x98\xbd\xff\x14\x9f\xf1\x94\x0a\xef\x85\xd5\x97\xfe\x0c\xfe\x0e\xab\x65\xff\xf3\xbd\x99\xce\x03\xf2\xf1\xe7\xef\x94\x7e\x06\x2e\x51\x38\x7e\x87\xd4\x27\xd2\x97\x7e\x73\x2b\xd6\x35\xfa\x1e\xb6\xef\xe8\xd9\xf8\x79\xf9\xf9\xfa\xe2\x55\x9b\xb8\x93\xaa\x73\xa0\xf7\x01\xfb\x7e\x40\xbf\xdb\x6d\x7d\x7e\xa9\xfa\xd8\x6e\xeb\xb3\xf8\xbb\xd5\xd1\xfe\xf2\xed\xfb\x51\xfd\x7f\x74\x47\xea\x01\xfe\x25\xd8\x98\x08\x62\x8e\x91\x14\xe5\x90\x13\x9c\x9f\x9f\x07\xaa\x61\x8f\xe6\x0c\x0f\xe8\xa7\xb7\x63\x97\x48\x96\x07\x9d\x6c\x78\xc7\xea\x70\x05\xec\xc7\xbf\xee\xad\xf6\xb3\x63\x47\x2f\x1d\xe6\xd3\x9b\xdf\x77\xab\xcb\x2b\x2b\xbd\xdc\x93\xf1\xbf\x4f\x0c\xce\xe2\x2f\x1d\xc6\x47\xf3\x6f\xd1\xe7\xe7\x7c\x7e\x70\x5a\x3e\x9f\x4d\x9f\x94\x54\x17\xa7\xab\x91\xdb\xeb\xcf\x3f\x4f\x54\x0d\x02\xeb\x85\x28\xab\xf6\x21\x75\x80\x17\xc8\x3d\x6f\x9a\xcb\xb0\x2e\x3d\x5a\xb5\xb8\x07\xff\xb6\x6a\x30\x18\x54\x16\x2a\x29\xe6\x96\x6b\x60\x54\x1c\x7d\x39\x18\x68\x4c\x52\x15\xe7\xe3\x37\xba\xba\x2c\x75\xd9\xe1\x82\xb0\xd0\x9a\x6d\xf1\xd3\x7e\xb7\xa0\x14\xd9\xaa\x5e\x44\x6e\x1a\x49\x7d\x68\x14\x84\x98\x52\x15\xcb\x6f\x25\x32\x9d\xe8\x73\x78\x0a\x77\x13\x02\xae\x8d\x5a\x64\x4a\x78\x76\xd5\x80\x82\x38\x54\x00\x83\x78\x8a\x63\x01\x0e\x78\x9e\xa1\x79\x19\x47\x1c\x20\x81\x8c\x70\x46\xd5\x68\x9e\xa2\x10\xcd\x72\x38\xa3\xe0\xaa\xcc\x12\x04\x47\xf3\x88\x42\x1c\x84\x2a\x40\xde\x0a\x9e\xdd\x56\x0d\xa2\x1e\x32\x09\x54\x0d\xd8\x6b\x55\x03\x82\xe6\xf9\xf3\x2f\xfa\xdf\xdc\x0d\xfc\xac\x85\xdf\xfc\xe9\x6b\xe5\x9d\x1e\x3b\x50\x35\x08\x7b\xb0\x77\x55\x83\x5f\x77\xff\x46\xdc\xf3\x53\xcd\x24\xe7\xc5\x5a\x79\x91\x75\x33\x19\x66\x4c\x54\xf2\x78\x2e\xb7\x26\xd3\xb5\x6c\xa6\xb9\xe4\xdb\x2b\x7d\x2a\x75\xb2\x52\xb5\x22\xe1\xdd\xc9\x30\x6e\x35\xa4\x91\x9b\x17\x66\x15\x93\x26\xd2\x6f\xb1\x32\xab\xe5\xc6\x73\x25\xab\x69\xa5\x62\xab\x94\x4c\xa7\xa4\x54\x43\x4f\x9b\x76\xae\x5a\xba\xb3\x6a\x10\xb8\xfe\xeb\xf6\xcb\x02\x1e\x30\xb4\xfa\x4e\x04\xb2\xa7\x7b\xb3\xa3\xcd\xea\xd7\x17\xe0\xa1\x56\xfd\x1e\xf7\xe5\xd5\xef\x13\xf1\x9f\xe1\xfd\x16\xfc\x1f\x92\x5d\xdc\x11\xdd\xc5\x8f\xc0\x7f\x47\x76\x23\x1c\xf0\x47\xef\xa5\xf0\xa7\xe2\xe7\xf4\x8a\xec\xfb\xa5\xce\xec\x05\x0a\xc7\xab\x81\xcf\xee\x45\xf0\xaf\x0b\xbd\x08\xa4\xf4\x56\xb9\xd6\x8b\xf0\xd7\x5c\x3d\x7a\xe3\xbb\xc2\x4f\xa8\x0e\xf9\x25\x9f\xdb\xaa\x43\xcb\x0a\x1b\xfe\xb5\x8c\xa0\x7f\xf8\xec\xde\x00\x7f\xc3\xff\xa6\xde\x80\x42\x6a\x36\x3b\x5a\x01\x9f\xcc\x76\x33\x81\x01\x11\x7c\x4b\x70\x8f\xea\xcc\x66\xec\xa7\xf9\xb6\x7b\xf0\x6f\xb3\xdd\x6e\x81\x73\x45\x56\x40\x78\x57\x9e\xca\x5d\xce\xac\xea\x83\x8a\xd2\x12\x95\xb7\x46\x16\xc8\x85\xa4\x05\x14\x05\x08\x1c\x59\x33\xc6\x8d\x58\xbf\x44\xb4\xf1\x61\x92\x8a\x69\x66\xb7\x65\x52\xac\xcb\x19\x90\x5f\xa8\x3c\xb7\x70\x9b\x8b\x5c\xb3\x84\x4b\x65\xa1\x8d\x9c\xbe\xb0\x78\x76\xb6\x4b\xa8\xb8\x4a\x30\x94\xca\x23\x02\x91\x24\x43\x29\x04\xaf\x41\x8d\x81\x0c\x52\x38\x40\x43\x0e\x72\x88\x81\x14\x83\x08\x12\xe7\x19\x15\x02\x0a\x00\x8d\xe0\x37\x9f\x54\x96\x63\xbd\x9f\x4f\x60\x9e\x97\xed\x5e\xeb\xd0\x66\x48\x6e\xfb\x9b\x10\x17\xef\x1e\x9d\x50\x7a\x34\xdb\xbd\xda\xa1\xfd\x53\xb2\xaf\x54\x7b\xc8\x54\xaf\xd5\x2f\x02\xd9\x6e\xd6\xd2\x10\x29\xeb\xea\x20\xa1\x32\x0c\x25\xd8\x53\xbb\x50\x1c\x10\x71\x03\x52\x72\x11\xd2\xdd\x51\x26\x5e\x2a\xb2\xb3\x74\xab\x56\x68\x37\xe4\x09\x3e\x9a\xc8\x85\x24\xc4\xe5\x74\x8d\x16\x44\x33\x99\x4d\xc5\xd3\xab\xbc\x5e\xe8\x93\x0d\x43\x2a\x4f\x0a\x64\x36\x5d\x1b\xb6\xab\xdc\x27\x67\xbb\x3f\xb1\x5e\x74\x55\xde\x17\xb2\xdd\xc0\xde\x4c\x94\x6c\xf3\x5d\x67\xf0\xed\x9d\x19\x89\x27\xe3\xbf\xb7\x33\x24\x9c\xed\x3e\x3b\x22\xbc\xd7\xad\xcb\x11\xe1\x27\x46\x24\xe1\x80\xff\xaf\xd9\x19\x0d\x26\xe5\xe1\xb5\xce\xe8\xc0\xde\xd6\xaf\x96\xed\x0a\xe7\x6b\xd9\x82\x70\xac\x1f\xbf\x5a\xb6\x9b\x55\xc0\x30\x7e\x2d\xdb\x6d\x0a\xbf\x76\xb6\x7b\x52\xfe\x01\xff\xf4\x0b\x67\xbb\x59\x31\xee\x5c\xcf\x76\x83\x2b\xdf\x08\xbe\xe5\x5d\x47\xd8\x4f\xf4\x6d\xf7\xe0\xdf\x66\xbb\xa8\xcf\xb3\xea\x2c\x41\x94\xd2\xa4\x6d\xeb\xc9\xd9\xd4\xb1\x66\x15\x39\x96\xb7\xfb\x8e\x6b\x8f\x8c\xa6\xcc\x2d\xe6\x33\xaa\xd5\x12\x27\x6f\xd2\xcc\xac\xb7\x47\x8e\x95\xb7\x6b\x4e\x2b\x3b\x14\xea\x4a\x4e\xa9\x70\x9d\xdc\x58\x98\x66\xd5\x72\x0e\x67\x86\xb6\xa2\xb4\x32\xb5\xb7\xd4\xd3\xb3\x5d\x85\x85\x32\xc9\x51\xa4\x86\x43\x5c\xa1\x34\xc4\xf2\x2a\x52\x10\xae\x69\x40\xe6\x48\x1e\x68\x2c\xe4\x55\x92\x52\x28\x56\x91\x55\x16\x91\x34\x4e\x52\x1a\x2b\x13\x9c\xc6\xe0\xaa\xa6\x78\xbf\x1c\x43\xfa\xaf\x03\x7d\x46\x47\x18\x75\x2d\xdb\x65\x09\x0e\x3f\xfb\x43\xf7\xfb\xbb\x47\x87\xa8\x1f\xed\x08\x0b\x57\xf4\x03\xd9\xae\xbf\x9e\x8e\x64\x71\xe1\xde\x5a\xef\x0a\x74\x70\x95\xe3\xa6\x3c\xb2\xb8\x82\x30\xac\x97\x35\x4e\xa9\x96\xcb\x86\x90\xea\xac\xf4\x65\x2c\x87\xbf\xa9\x6d\xab\x44\xb5\x74\x4a\x2c\x54\x04\x21\xdf\x49\x9b\xae\xe2\x0e\x92\xb9\x55\x95\x52\x9c\x56\x73\xb5\x80\x6d\x92\xc8\xc7\x87\xa8\xda\xee\xca\x2b\x9a\x48\x0c\x0c\xa9\xdf\x88\x33\xb7\x9d\x1f\x0c\x5a\xcd\xd9\x0c\x2e\x98\xf1\x44\xe8\xd6\xc9\x97\x0f\xf2\x3d\xed\x61\xae\x64\x50\x4f\xc4\x9f\x8c\x8e\xff\x43\x76\xb0\xde\x97\x4e\x2f\x7b\xb9\x9f\xb8\x83\x26\x08\xbf\x6e\x37\xca\x47\xec\x36\x8e\x95\x03\xbc\x33\xd7\xa7\xcd\xd5\x3d\xf8\xb7\x11\x89\x8d\xa9\xb3\xf6\x60\x61\x25\x6b\x39\x3a\xbe\x5c\x0a\x14\x93\x13\x24\xad\x90\xca\x52\x83\xda\x08\x40\x7d\xdd\x6f\xa7\xf4\xaa\xc5\x09\xc2\x48\x69\x23\xb0\x22\x4a\xb9\x41\x3e\xd5\x5f\x2d\x5b\x8d\x62\x62\x22\x6b\x5d\x33\x56\xa3\x13\x6f\xfd\x79\xd1\xec\x66\xca\x39\x22\x5d\x89\x4f\x9e\xdf\xa3\x4c\xd2\x0c\x42\x0a\xd0\x70\x8a\x24\x64\x59\x96\x21\xce\xab\xaa\xa2\x00\x9e\x64\x11\xc1\x11\x34\x09\x29\x55\xe1\x10\xcf\x69\x1a\xcb\xaa\x34\xe2\x55\x84\x00\x85\xcb\x88\xc6\x79\x12\xb2\x9b\x88\x44\x3f\xaf\xfe\x72\xb5\x47\x99\xe2\x88\x0b\x3f\xb1\xbf\xbb\x7b\xf4\x62\x97\x47\xeb\x2f\x57\x7b\x94\x7f\x4e\x3d\xe0\xd0\x73\x7b\x5e\x13\x03\xf5\x17\x91\x11\x86\xab\x54\xac\x43\x56\x2c\x85\x49\x0f\x98\x11\x1c\x75\xe7\xe3\x6c\x53\x95\x25\x92\x9b\xcd\x4c\x26\xd9\xa1\x4b\x4a\x5c\xc9\x9b\xe9\x2a\x9c\xba\x6f\x89\xb2\x25\x2c\x52\xa6\xdb\x18\x81\xcc\x3a\xae\xb1\x36\xaa\x4d\x3a\x4a\xbe\x5b\x57\xab\xb1\x89\xa8\x34\xcd\x38\x51\x11\xc4\x4f\xae\xbf\x88\xbf\xb0\xbc\x2f\xd4\x5f\x02\x63\xa3\xd4\x3f\x1e\xed\xf5\x7d\x22\xfe\x48\xbd\xbe\x81\xfa\xcb\xb3\xd7\x28\xf7\xf6\x9f\xfc\xc4\x35\x92\x70\xc0\x1f\xbd\xfe\xe2\xeb\xde\xcf\xa9\xbf\x1c\xd6\xcf\x67\x4a\x4f\x7b\x7d\x3f\xc8\xfb\xd7\x58\x4f\x0b\xc7\xf2\x7b\xa4\x3e\xf4\x29\xeb\xfd\xd4\xa9\xfa\x56\xb8\x17\xf9\x97\xa8\x0f\x05\xae\x80\x7d\xfd\x3a\xf4\x25\xce\xcb\xd3\xbf\x2e\x67\xad\x51\x7c\xc4\xc3\x3d\x72\x4f\xf4\x51\x11\x7a\xe4\x1a\x56\xb2\x91\xab\xd0\x9a\x12\xeb\xaf\xad\x76\x65\xda\x9a\xb6\x8c\x58\x57\xad\x01\x87\xd4\x56\x0c\xa3\x59\x79\x6d\xb8\x5c\xf3\x8b\xb1\xd2\x4a\x4c\xa7\x60\x28\x32\x7a\x4a\x28\x23\x50\xee\x34\xe5\x0a\x1f\x9f\xb9\x74\x66\xa4\x2e\x46\xfd\x3c\x48\x31\xc3\xda\xb2\xae\xc5\xbb\x9c\x50\x7d\xfa\xae\x21\x47\xb2\x10\xc8\xa4\x8a\x34\x42\x21\x54\x55\x53\x15\x15\xd1\x32\x45\xc9\x1a\xce\xa9\x2c\x04\x00\xf2\x24\xc1\xa9\x1c\x8b\xab\x1a\x60\x35\x48\xc8\x32\xc3\xa8\x94\xc2\x91\x04\x83\x2b\xac\xff\xb3\xc0\xe0\x91\x9f\x86\x08\x64\xad\xe4\xb5\xac\x95\xc1\x01\xc7\x9f\xcd\x5a\x77\x77\x8f\x5e\xfe\xe7\x6b\xe5\x9d\x96\x10\xc8\x5a\x4f\x7a\xbe\x80\x96\xdd\xbb\x8e\x7f\x20\x72\x84\x0f\xf2\x67\xf7\xf4\x56\x33\x56\xb5\x62\xa4\xf0\xac\x98\x30\xf0\x26\xa7\x64\x25\xc6\x1c\xbd\x91\xd6\x50\xd4\xd4\xb1\xcb\x8e\xa7\xc3\x6a\x36\xe7\x12\xee\xac\x56\x84\x6f\x6f\xc0\x28\x55\x17\x4d\x8a\x2c\x71\xf3\x98\xe9\xe6\xab\x8e\x40\x29\x04\x4a\x74\x5c\x3e\xd3\x9d\xc4\x09\x3e\x5d\x1c\x98\x32\x04\xd5\x9b\x4e\xce\x05\xe5\x73\x3e\x73\x0b\x0c\x8a\x70\x4a\x25\x6f\x1e\xe6\xe3\xcc\x75\x25\x73\x7a\x1e\xfe\x33\xe7\xf2\xaf\xd4\x5d\xc2\x5e\xf1\xb3\xbb\x9c\x7d\x52\x6e\x89\x3c\xbf\x68\x97\xf8\xc9\xba\x48\xc0\x23\x3f\x90\x85\x45\xaf\x8b\x88\x8f\xe1\x8f\x5a\x97\xd9\x46\x18\xd8\x17\x62\xb9\x4a\x55\xa9\x66\x8a\xb5\xc6\x72\xa1\x90\x53\xd4\x32\x91\x51\xad\xf1\x04\x24\x67\x70\x5a\x13\x14\x83\x84\x04\x2d\xba\x93\x91\xd9\xce\x2d\x53\x89\xc6\x40\x59\xaf\xe2\x0c\xc2\x47\xc9\x45\xaa\xbf\x7e\xa3\xd2\x88\xc4\xdf\x0c\x33\x2e\xe3\xcd\x59\x59\x75\xed\xd6\xf3\xbb\xb0\x11\xa5\x21\x59\x66\x20\x94\x69\x8d\x23\x55\x8d\x52\x00\x2f\x53\x1a\xd0\x18\x99\x63\x38\x56\x03\x80\xe3\x18\xc0\xf0\x88\xe7\x19\x04\x18\x19\x42\x96\x86\x50\x21\x54\x0e\xe2\xbc\xc2\x70\x7e\x84\x21\x1e\xf9\x49\x9b\xfb\x22\x0c\xc9\x9c\x3f\xbb\xbd\xb9\x4b\xbf\x84\x5e\xe6\xba\xad\xd4\xdf\x57\x09\xbc\x27\xc2\x7c\x76\x6e\x1d\xc0\xbf\xa7\xb7\x2a\xea\xfd\x76\xa2\x98\x1a\x8c\xa5\xe2\x5a\xa9\xa4\x19\x8b\x53\xa7\xcd\x6c\xc9\xc9\xe1\xed\xbe\xeb\xb6\xc5\xb8\x90\xa8\x17\x60\x56\x52\x62\x78\x7a\xea\xa4\x63\x33\x83\x8e\xa1\xe1\x8a\x12\x6b\xab\x19\xcf\x77\x6b\x4c\x42\xcc\xa6\x29\x69\xa4\xaf\x8a\x66\x7d\x4a\xae\x95\x94\xcd\xdf\x56\xd9\xbf\x3b\xc2\xdc\x7b\x0e\xf4\xd9\x11\xe6\x41\xfc\x4f\x8c\x30\x9f\x78\x0e\xd8\x27\xe5\xce\x08\xf3\x4b\x9c\x53\xde\x5e\x57\x23\xcc\xbd\xe7\xe0\x9f\x1d\x61\x22\xe0\x7f\x30\xc2\xd4\x93\xe9\xae\xe8\x94\xcb\x5d\x7c\x2e\x2d\xb8\xce\x74\xb0\x24\x26\x55\x65\xb5\x18\xd4\x60\x86\x00\xdd\x06\x27\xa4\x73\x13\x4b\x01\x6b\xb6\xda\x9e\x2e\x3a\x0c\x53\x34\x69\xa6\x51\xd4\x6b\xd9\x02\x4b\x82\x7e\xd6\x12\xab\x4a\x47\x27\xda\xa9\xdc\x4c\x2d\xd8\x9a\xb4\x28\xcf\xc1\xf3\x2b\xef\xb2\x4a\x71\x8c\x2a\xab\x2a\x4e\xa8\x14\x83\x73\x80\x65\x58\xa0\x50\x90\x86\x2c\xe2\x55\x06\x71\x0c\xad\x40\x82\x57\x64\x0a\x20\x86\x50\x59\x08\x35\x16\x87\x84\x86\x10\x2d\x93\x8c\x8a\xfc\xbd\xe0\x67\xbd\x1d\xe4\x6a\x84\x61\x09\x8a\x3c\xbf\x86\x61\x09\xef\xe0\xd1\xd1\x3b\xbe\x1f\xdd\x0a\xbe\x10\x60\xfc\xad\xe0\x48\x4b\x12\xf2\xca\x56\x70\x82\x1f\x8e\x0b\x6d\x62\x4a\xce\xd9\xaa\xb6\xe2\x2a\x25\x34\x14\x65\xd0\x68\xe4\x68\x63\x39\x1d\xe6\xf0\x84\xa5\x77\xec\xb2\xcb\xea\x65\xc0\x10\x55\x79\xd8\x27\xd4\x7a\xa3\xa9\xa1\x94\x35\x57\xf0\x8a\x00\xb5\x7e\xaa\xb3\x74\xfb\x2d\x61\xe4\x14\x67\x83\x51\x62\xbc\x1a\x24\x84\xee\xaf\xb8\x15\x7c\xe6\xfa\xb4\xad\xe0\x68\x01\xe3\x43\xb6\x82\xcf\xe0\xfc\xb4\xed\xc5\x33\xa7\x88\x3e\x6e\x2b\xf8\x03\x97\x64\x57\x03\x52\x04\x59\x3d\x1c\x90\x7e\xd2\x56\xf4\x36\x20\x65\x89\x7a\x77\x22\x43\x1b\xc5\xdd\x44\xbc\xb8\xe0\x96\x4c\xb5\x36\x6f\x49\xa5\xc1\xb8\x98\x99\x56\x07\xd5\x8c\x91\x40\x0e\x43\xce\x04\xb6\x63\xbf\x25\x66\xf5\xec\x1b\xc8\x4b\x35\x9e\x2a\x1b\xfc\xba\xca\x25\x26\x31\x51\xd2\x32\x44\xba\x99\x6c\x2f\x66\x4c\xb9\x99\x91\x0b\x25\x31\xf1\xf4\x80\x84\xe3\x50\xe6\x15\x4d\x41\x84\x4c\x29\x14\xa0\x80\xca\xc9\x32\xc3\x32\x9c\xaa\xe2\x3c\x25\xab\xb2\x0c\x14\x96\xc2\x19\x16\xe0\xaa\x2c\x93\x32\xc4\x91\xc6\x73\x1c\x8b\x6b\x0c\x09\xfd\x57\x47\x91\xdb\x25\xcf\x13\xb6\x82\x6f\x08\x48\xcc\xf9\x9d\xe0\xed\xcd\xa3\x5f\x8a\x78\x74\x23\xf8\x13\xe3\x51\x60\xe3\xb6\xd8\x92\x73\xb9\x18\x28\x22\x63\xbd\xd4\x4d\x25\x95\xa3\x98\x42\x76\x95\x70\x71\x3a\x96\xe5\x09\x13\xc4\x6a\x24\xc1\xe5\xf9\xb5\x34\x22\x34\x3d\xdf\x26\x4c\xcd\x24\xd3\x39\xab\xd8\x72\x88\x69\x0b\xc4\x92\xcd\x89\x0d\x08\x00\x89\x04\x2b\x94\x98\x6c\xc1\xa6\xcc\x44\x51\xc8\x7d\x48\x3c\x8a\xb0\xb9\xf8\xd4\x78\xf4\x20\xfe\x27\xc6\xa3\x47\x37\x0e\x1e\x8d\x47\x4f\xc1\xff\x40\x3c\xfa\x25\x36\x7f\xb6\xd7\xb5\x78\x14\x45\x56\xcf\x8c\x47\x51\xf0\x3f\x18\x8f\xdc\x96\xa3\x0f\xd8\xa6\xcb\x14\x2b\x99\xa4\xba\x28\x0d\x06\x96\xa5\xe5\x32\x64\xc3\xa4\x34\x12\x6a\x1d\x3a\xab\xc9\x0b\x93\x2c\x11\x9d\xb7\x98\x30\x92\xa1\x1a\xd3\x94\xa6\xa1\xbb\xed\x35\x9c\x74\x3a\x54\xbe\x3e\x9e\x77\xe4\x96\xdd\xcd\x3a\xc2\xda\x52\xa6\x73\x43\x11\x1d\xe1\xe9\xf1\x88\x46\x1a\xaf\xe0\x8c\x2c\x33\x04\xad\x12\x2a\x24\x08\x0e\x00\xc0\xc9\xaa\x06\x28\x95\xc3\x39\x96\xe1\x48\x9a\x44\x88\xd0\x14\xc8\x52\x34\xab\x70\x88\xc6\x19\x4d\x46\x32\x0f\x34\x85\xa6\xfd\x78\x44\x3d\x29\x1e\xdd\xd0\x2c\xcb\x53\xe0\x42\x40\xe2\x29\xe2\x25\xf4\x3b\x41\x8f\x46\xa4\x4f\x6c\x96\x0d\x44\xa4\x72\x86\xc8\x14\xca\xbc\xc1\xe0\x4a\x09\x54\x0b\x44\x53\x02\x7a\xc7\xce\x8a\xdd\x01\xca\x14\x6b\x9d\x74\x83\x76\xcc\xe4\xb4\x4c\xa7\xcd\x8a\xd8\xa0\x95\x96\x43\xe2\x12\x9b\x6b\x54\xde\x98\x6c\x96\xa9\x2a\x6f\x44\x4e\x4d\x02\x85\x61\x16\x45\x1d\x51\x99\x55\x8d\x9b\x4f\x16\x0b\xfc\x57\x8c\x48\x0f\x37\xcb\x3e\x88\xff\x89\xcd\xb2\x4f\xd9\xca\x7e\xa0\x59\xf6\xb3\xb7\xd2\x05\xe1\xd7\x2d\x29\x5e\x6b\x96\xfd\xcc\x88\x20\x7c\xc0\x5c\xdd\x83\x7f\x1b\x91\x28\xb9\xdb\xb5\x58\xa2\x68\xd7\x16\xeb\x96\x60\xbf\x55\xc9\x76\x02\x39\x4a\xcd\x90\xa8\xae\x9b\x56\x5a\x95\xf1\x4a\x29\x58\xec\xb0\xad\x70\xb5\xca\x68\xd9\x5e\x6b\x55\xd2\xe9\x97\x78\xd3\x52\xb4\x49\x62\x69\x76\xfb\x78\xb1\xb5\xb6\x6d\x61\x96\x60\x75\x39\x9f\x5d\xc5\xcc\x54\xea\xf9\x6d\x07\x24\x4b\xa1\x4d\x38\xe0\x65\x1e\x69\xac\x2a\x43\x1e\xd2\xaa\x4c\x92\x24\x2f\xb3\x9c\xa6\x42\x4e\x23\x29\x96\x65\x65\x00\x35\x92\x94\x21\xc5\x70\x50\xa5\x15\x5c\xd5\x78\x8a\x51\x29\xf5\x65\xf7\x8b\x94\xd1\x7f\x27\x75\xf7\x3b\x9b\x17\x02\x11\xc7\x10\x04\x71\x36\x10\x6d\xee\x92\x2f\xc7\xbf\x0e\xe7\xeb\x62\xa6\xc8\x65\xab\xf3\xea\x50\x2e\x10\x59\x81\x6c\xb7\x06\x35\xbb\x30\x1e\x74\x70\x5c\xcb\x70\x4e\x31\xc7\x8e\x71\xb1\xb6\xc8\xb7\xe3\x42\x87\x14\xf6\x71\xc8\xbb\x2e\xac\x8c\xfc\x2b\x42\xa5\x28\x78\xc4\x35\xd1\x9a\x2f\xd2\xfc\xe6\x96\x98\x4c\xad\xa7\xf3\x61\x35\x51\xb5\x24\x21\x6f\x68\x95\x5a\x27\x65\x15\xfb\x73\x77\xa5\x34\xc8\x51\xba\x92\xac\xd2\x40\x1f\xaa\x4e\x3a\x0b\x13\x52\x7b\x81\xd3\xf5\x78\xab\xdf\xc6\x3b\xfa\xd0\xc6\x93\x89\x8a\x48\x49\x30\xdd\x22\x0a\x63\xc5\x21\xdf\x16\xc5\xb1\x21\x53\x8d\x9a\x5d\x2a\xde\x10\x7b\x8e\x94\xf6\x38\xf6\x04\x78\x3e\xf8\xde\x80\xaf\x4a\x18\xf1\x04\x5e\xc4\xf3\x99\x95\xdb\x5f\x48\x60\xd4\xc5\xe1\x6a\x62\x01\x5e\xca\x2e\xe7\xc5\xe4\xaa\x4c\xbb\x09\x51\x49\xfa\x3c\x92\xba\x6b\x97\xcd\x6e\x9c\x5e\x5c\x23\xe2\x8a\x3d\x3f\x80\x5f\xb2\x57\x8d\xda\x03\xf8\x05\xe1\xe7\x55\x7c\x4e\xfa\xd6\x44\x74\x59\x94\xcd\xb7\x8b\x6c\x5e\x93\xc5\xa3\x73\xb1\xd1\x85\x98\x12\x82\x77\x97\x2c\xfe\xa3\x73\x8c\x4d\x8b\x42\xb3\x90\xaa\x26\xbb\xe6\x1a\x6f\x2d\x98\x24\x25\xb3\x8a\x29\xf2\x74\xad\xb1\x18\x96\xd5\x6e\x3e\x2b\x27\x6a\x84\xde\x68\x39\x52\xb9\x39\x07\xdd\x96\x9b\xa6\xf2\x05\x5e\xd0\x1b\xcb\x72\xaa\xdd\x6f\xa9\xc6\xc4\x2c\x4a\x84\x92\xa4\xad\x71\x4c\xc4\xe1\x3a\xf9\xf4\xa3\x71\x80\xa1\x20\x8d\x33\x14\x92\x21\x43\x69\x84\xa2\xca\x50\x95\x39\x9a\x91\x35\x92\xa2\x38\x8a\xa3\x35\x85\x21\x18\x82\x62\xa1\x0a\x49\xa4\x92\xbc\xa2\xaa\x1a\xae\x31\x3c\x4e\x00\x92\x94\x99\xfd\x0f\xfe\x3e\xe2\x5b\x89\xeb\xbe\xf5\xd2\x3e\xfb\xee\x6e\xf0\x77\x2e\x1f\xf5\xad\x17\x72\x7c\xff\x8a\x90\x63\x9e\xf1\xad\x42\x5d\x7c\x33\xde\x64\x94\x5e\x24\x1d\xbb\x90\x30\x0b\xb9\x99\x13\xc7\xe9\x96\x9b\x4f\x25\x6c\xdd\x72\x66\xfd\x62\x35\xde\x64\x3a\xcd\x01\xe5\x2e\xda\xab\xbe\xc3\x36\xdd\x3a\x95\x2c\xa1\x65\xb9\xc4\xe4\xa7\x8a\x36\xcd\x17\x00\xde\x1e\x25\x86\xc3\x85\x49\xe9\x5c\x25\xa7\x0d\x72\x99\x5f\xcb\xb7\x3e\xea\xdb\x1e\xb5\xe7\xd2\xa2\x38\x9e\x3c\xd1\xb7\x7e\x66\xf5\xe2\x5a\x25\xe5\x33\x7d\x9b\xf0\x24\xdf\xca\x51\x87\xe7\xcf\x54\x21\x2f\xf9\x56\xb1\x9f\xe9\x8e\xdb\x64\x5f\x11\xec\xc2\x4a\x7f\x5b\x19\x45\xbb\xc2\x97\x5b\x72\xbd\xba\x80\x54\xa1\x58\xb4\xea\x78\x05\x94\x47\x20\x17\x2b\x2a\x69\xc7\x92\xcb\xa0\xd8\x9c\x09\x83\xac\xd3\x18\x94\x0d\x68\x66\x19\xa3\xee\xaa\xe9\x49\xf5\x2d\x5f\xca\xc7\x72\x95\xd4\x2a\x4b\xad\x9e\x5f\xd9\x27\x54\x92\xa1\x15\x52\xa1\x78\x8d\x64\x19\x9a\xc5\x55\x6d\xe3\x59\x15\xc8\x10\x34\x4e\xf2\x0c\xa3\xe1\x88\x60\x78\x48\x68\x34\x50\x54\x99\xe1\x14\x44\x20\x04\x78\xc8\x6a\x1c\x41\xd1\xd0\xf7\x8c\xe4\x63\xbe\xf5\x6a\x41\x9f\x63\x28\xe6\xfc\x0e\xf3\xee\x6e\xf0\x17\x7b\x1f\xf5\xad\x17\x8e\x76\xf9\xd7\x83\x3f\x3e\x19\xf0\xad\x89\x78\x3e\xa1\xcf\x73\x25\x16\xe8\xc5\x44\xae\x8f\x37\xca\x12\x2f\xc7\x46\x31\x61\xf2\xb6\x06\xc9\x42\xaa\x6a\x34\xd3\xf5\x16\xee\x28\x35\x7d\x96\x00\xd0\x5c\x95\x16\xa5\x1a\xe3\xd0\x75\x03\x26\x17\x6e\xca\x5c\x3a\x15\x13\x02\xa2\xa6\x36\xab\x85\x51\x96\x80\xa8\x24\xac\xb8\xec\xaf\xe5\x5b\x1f\xf5\x6d\x8f\xda\x73\x11\x1f\x32\x89\x67\xe6\xad\x9f\xd8\x9c\xf9\x11\xbe\x35\xaa\x6f\x13\x9e\xe4\x5b\xd9\xc0\xfb\xc4\xdf\xbd\x68\xee\xaa\x2c\xfe\xa3\xad\xaa\xdd\xe2\x2a\x55\xe6\xbb\xc9\x45\x3d\xa7\x8a\xeb\x81\xd6\x8e\x4f\x34\xbd\xd0\x56\xf8\xaa\xa1\xe3\xb9\x51\xa2\x39\x52\x8b\x59\xb1\x5b\xd4\xd5\xf6\x84\x1b\x94\x52\x22\xb2\xcb\x4d\x6b\xac\x3b\xc5\x2a\xcd\xc9\x29\x9a\x70\xd5\x61\xa7\x25\x26\x27\x35\x7e\x6c\x0e\x84\xd2\x3c\xf1\xf4\xbc\x15\xc7\x49\xc4\x03\x80\x03\x15\xf0\x10\xca\x32\x45\xf0\x14\x0f\x78\x9a\xe4\x70\x86\xe3\x18\x95\x65\x59\x85\x60\x70\x82\x24\x15\x86\x62\x39\x0d\x28\x3c\xc1\x41\x96\x47\x0a\x85\x08\x96\x22\x7d\xdf\x4a\x3d\xe6\x5b\xaf\x16\xa7\x39\x86\x66\x2f\xf9\x56\xff\x6e\xf0\xb7\xc7\x1f\xf5\xad\x17\x5e\x5b\xe6\x5f\x11\xda\x0d\xcf\xf8\xd6\x54\xae\x3c\x53\xd7\xb3\x81\x66\xc5\xd4\x7a\x2e\xdf\xe9\xc8\x38\x53\x1f\x14\x13\xc5\x62\xc7\x49\x2f\xcd\x96\x59\x03\xd9\x3c\x1a\x68\x53\x42\xd2\xd3\x4d\xb7\x38\xeb\x10\x05\xbb\x25\xd5\xdf\x56\xb5\x44\x26\xd6\x9e\x6b\xaa\xd5\x59\x24\x2b\xca\x84\xa3\xfb\xa9\x9a\x9c\x1c\x0f\xe4\x4a\xb7\xf4\x6b\xf9\xd6\x47\x7d\xdb\xa3\xf6\x5c\xa0\x62\xd2\xdb\x33\x7d\xeb\x27\xb6\x25\x7e\x84\x6f\x8d\xea\xdb\x84\x27\xf9\xd6\xa8\xf5\xa1\xad\x6f\x5d\xa5\x6c\x53\x61\x2d\x92\xa9\x98\x4d\x23\xd9\x02\x04\x5f\x1e\x5a\xab\x45\x7d\x05\xd8\x84\xf2\x06\xde\x70\xb5\xb6\xea\x58\x24\xe3\x0e\xba\x89\x56\xb1\xce\x8e\x78\x63\x0a\x1a\xcd\xb1\x32\xac\xd6\xe6\x23\x9b\x24\x61\x9a\x13\x06\x64\x41\x2b\x77\xab\x78\x7b\x31\xb1\x88\x35\x71\x6b\x13\xfe\x97\x2f\xdf\xbf\x7f\xf9\xfe\x1d\x93\xe0\x18\xfd\xc0\xa0\xe3\x20\xb7\xe7\xb8\xd0\x75\x82\x7f\xf7\x26\x43\xb4\xfa\x03\x6b\xac\x26\xe8\x07\x96\x2c\x4b\xf5\x46\x4d\xc8\x49\x8d\x3f\xb0\xba\xd2\x47\x63\xf8\x03\x9b\xcc\xe4\x91\xa1\xfc\x81\x95\x17\x26\xb2\x7f\x60\x1b\x88\x5f\xbe\x08\xc5\x86\x58\xc3\x1a\x42\xa2\x28\x62\x65\xa9\xd8\x0d\x42\xfc\x82\x61\x18\x26\xa4\x52\x01\x68\xef\x10\x62\x95\x5a\xae\x24\xd4\xba\x58\x41\xec\x62\xbf\x19\xea\x3b\x6a\x75\xcb\x9e\xf4\xc6\x86\x6e\x43\x3f\x0c\x84\x3e\x3f\x89\xea\x10\xd4\x53\x94\x9f\x42\x7c\x95\xfa\x5d\x24\xf3\xd8\x76\x42\x1f\xfd\xff\xf5\x14\x4b\x45\xdb\x3f\xdd\xd5\x64\xf7\xa7\xe1\x38\x33\x64\xf7\x9e\xc2\xdd\x31\xda\x53\xcc\x45\x22\x0c\x6b\x4a\xb9\x6a\x53\xc4\x7e\x3b\x0c\x7f\xc5\x0e\xe3\x77\x7f\xfb\x0f\xdc\x29\x9a\xe7\x4c\xeb\xdd\x8c\xdf\x35\xa9\xd6\x04\xf9\xca\xd0\x9b\x40\xdb\x35\x14\x63\x02\xcd\x00\x27\xa7\x6f\x3f\x99\xb3\xd3\x48\x2e\x71\x7a\x81\xac\x9b\x39\x0f\x24\x66\xa7\x79\x3f\x37\xe0\xc9\xdc\x9f\x43\x73\x89\xff\x8b\xa4\x5d\x95\x80\xaf\xd2\xf2\xca\xd3\xf6\x1d\x23\x39\x29\x25\x76\xae\xf0\x90\xac\x89\x42\x43\xf4\x87\x1e\x43\xc1\xca\x52\xd8\x18\x9a\xf5\x9c\x94\xc1\x64\xd7\x46\x28\x68\x5d\xe7\xa9\xf1\x6d\xec\x71\x7a\x7c\x38\xb7\x51\x74\xc6\xae\xe5\x55\x0f\x2a\x8a\x35\x33\xdd\xc8\xe4\x1c\x40\x04\x29\x39\x5a\x0d\x1c\xd3\xe3\x0f\x7e\xc5\xb6\x7f\xf4\x1c\x34\x9d\x21\x53\x79\x2f\x30\x79\xd5\xeb\x43\xa7\xff\x08\x65\x9b\xe7\x6f\x23\x2b\xa8\x69\x9b\xa7\x4e\x51\x33\x42\xaa\xfe\xc0\xc4\xed\x21\xdc\x46\x91\x3f\x76\x2f\x9e\x57\x0c\x4e\x26\x23\x43\xf1\xdd\x81\x65\xab\x67\xdc\x74\x0f\x6d\x74\xc3\xbb\x1f\x81\xd2\x6d\x94\xf0\x09\x0e\x81\x0b\x92\x8d\x34\x0d\x29\x61\x55\x7b\xef\xb5\x0c\xf5\x15\xfb\xea\x3d\xfc\xf5\x1c\xb1\x86\xfa\x24\x32\x0d\xf5\x66\x02\x77\xaa\xb7\x21\x2f\x02\xd1\xd6\xa4\x37\x79\x16\xdd\x5b\x58\x41\xd2\xcf\x84\xaa\x48\x9c\x9c\x66\xc0\x5d\x3e\x8f\x81\x2d\xac\x33\x3a\x1d\x91\x85\x20\x84\x53\x4c\x58\x93\x8d\x56\xf6\xad\x48\x3c\x6c\x89\x3f\xc0\x88\x2a\xfc\xcb\x82\x76\xb6\xd6\xee\xb9\xea\xc7\x65\x7d\x0c\x2e\x48\xb2\xff\x7d\x88\xc6\xd3\x14\x05\xe5\xfa\x2c\xb2\xde\xc1\xbc\xcd\xbd\x9d\x22\xd0\xf5\xa7\xc4\x7d\x64\x5a\x0f\x30\xa2\xab\xe4\x35\xf5\x73\x6d\x75\x83\x44\x86\x0e\x7a\x38\x7e\x9e\x02\x16\xa2\x5c\x45\x21\x3a\x83\x63\xaf\x12\x68\x69\xda\x03\x41\xeb\x3d\xa8\x9b\x88\xf3\x46\x5e\x22\xcd\xa3\x1d\xd9\x4f\x13\x5f\x08\xde\x35\x22\x43\xc3\x6f\xa1\xf4\x39\x72\x3c\x82\x76\x2b\x95\x57\xa5\xf9\x1c\xda\x6e\xa2\xe9\x32\x2d\x3b\x8a\x47\x96\x35\x9c\x4d\x1e\xa3\xe8\x18\xd6\xcd\x33\xea\xe7\xbb\x67\xe8\x9b\x40\xc3\xee\xb9\xc6\x18\x3d\x85\xc2\x30\xb4\xdb\xec\x76\x4b\xe0\x2b\x16\x26\xf9\x15\xdb\xba\x78\x65\x64\x39\x48\xed\x41\xf7\x0c\x13\x4f\xf0\xdb\x5b\x38\xd7\x28\xbe\x33\x3b\xda\x40\x7d\x9a\x74\xef\x10\xec\x55\xb9\x19\xa6\x8a\x96\xbd\x50\xca\xe1\xf4\x2c\xb3\x07\x55\xd5\x46\x8e\xf3\xa8\x40\xaf\x22\x38\x5a\xa7\x6d\x6f\x87\x56\x46\xfe\xc0\x3b\x68\x7f\x5c\x0f\x2e\xc1\xbe\x4e\xf1\x09\x2b\x3b\x06\xb8\xcd\xc2\x37\xf0\xdc\xd5\x24\xfa\x1a\xfc\x22\xd4\xab\x69\xff\x66\xd0\x15\x42\xb7\x39\xd4\x06\xe4\x5e\x89\x9e\x44\xed\x29\xd0\x57\xd3\xb7\x5b\x35\x39\x00\xfc\xd9\xca\x70\x04\x3a\x4a\xbe\x79\x1e\xdc\x78\x62\xd9\x1b\xc7\x37\x47\xb6\x63\x58\xe6\xf3\x05\x1d\xc6\x70\x9d\xfc\xd0\x03\xb7\x33\xb3\x75\x3d\x11\x2b\x15\xb7\xc9\x3f\x80\xe3\x2a\x27\x81\xb1\xb7\x33\x31\xb1\xd1\xdc\xb0\x66\xce\xa7\x70\x73\x0a\xd9\x55\xb6\x4e\x3d\x74\x3b\x7f\xbb\x22\xca\x87\xf1\xb4\x43\x70\x95\x8f\xb3\xd5\xae\x63\xd0\xfb\x78\xfb\x21\xa6\x1d\x86\x7e\x72\x01\x7c\xaf\x81\x1f\x03\x3d\x5e\x42\x3d\xc9\xc2\x2f\xa1\xb8\x85\x87\x2b\xeb\xba\x8b\xc8\x9e\x17\xbe\xde\x03\xbe\x89\xf6\xeb\x41\x2c\xb8\xd8\xfe\x08\xb5\x79\x0f\x3f\xf2\x52\xdf\x4b\xe2\xf6\x81\x7c\x57\x61\xec\xc9\x96\x35\x8c\x2c\xe5\x0b\x30\xaf\xa6\x08\xbf\xfd\xa6\x22\x17\x1a\x23\x07\xfb\xfe\xcf\x7f\x62\x2f\x8e\x35\x52\x03\xbb\x69\x2f\x3f\x7e\xb8\x68\xe9\x7e\xfb\xf6\x8a\x9d\x1f\xa8\x58\xea\x6d\x03\xfd\x5a\xfc\xf9\xa1\xb2\x35\xd3\xfb\xee\x4d\xe8\x8f\x86\x5e\x26\xe0\x68\x68\x88\x84\x6f\x58\x3b\x2b\xd6\x44\x5f\xc9\xb0\x3f\x31\x92\xbc\x79\x23\xda\x50\x7b\x5a\x60\x9b\x28\x5d\xf8\x9c\xed\xe8\x2d\x5a\x2c\x5d\xae\x89\xb9\x8c\xb4\xdf\x02\xc2\x6a\x62\x5a\xac\x89\x52\x52\xac\x87\x76\x45\xbc\xbb\x65\x09\x6b\x56\x52\x1b\x95\xa9\x89\xf5\x46\x2d\x97\x6c\x6c\xbe\x4a\x89\x45\xb1\x21\x62\x49\xa1\x9e\x14\x52\xe2\x85\x7d\xb4\xcd\xba\xe3\xf8\x63\x2f\x54\x8a\x79\x9e\x30\x8e\xf1\x5c\xd9\x24\x3b\x47\xc9\xb1\x7c\xc2\x65\xa3\x93\xc2\xda\x26\xfa\x57\x76\x14\xcf\x4a\x62\xbb\x94\xfd\xe9\x72\x08\xd2\x71\x4a\x0a\xbb\x2a\xc1\x65\x85\xb9\x4f\x02\xef\x8b\x4a\x3f\x51\x0c\x67\x88\x39\x96\xc5\x89\x32\xd8\x73\x95\x22\x5c\xe2\xf8\x15\x04\x72\x5e\x35\xde\xd5\x90\xae\x6a\xc7\xf7\xef\x18\x54\x55\xa4\x62\x63\x68\xce\xe0\x68\xb4\x3a\x22\x34\x97\xc6\xc4\x4e\xae\xde\xa8\xfb\x24\xfb\x8c\xfd\x3e\x44\xab\xde\x1c\x8e\x66\xa8\xb7\x01\x88\xb0\x54\xad\x5c\x09\xd2\x7d\x78\x2a\x34\xd2\xdf\x90\xff\xe2\x8d\x0f\x23\x38\x0d\xfb\x8f\x5d\x84\xf4\x87\x87\x31\xff\xe6\x89\xed\xe2\xb5\x91\x91\xd2\x87\x36\x54\x5c\x64\x63\x73\x68\xaf\x0c\x53\xff\x8d\xa0\xe9\x6f\x98\x54\x6e\xf8\xfd\x4b\xd7\xa1\x78\x48\xaf\xc1\xf9\x12\xea\x2c\x0d\x53\x7b\x78\xfb\xe8\x72\xd2\x33\x4c\x1d\x39\x6e\x6f\x04\x37\xff\x78\x89\xf6\xcb\x2b\xf6\x82\xbf\x7c\xfb\xe3\xbd\xaa\x84\x00\x9d\xd2\x95\x53\x92\x3e\xee\x2f\x18\xa2\xd5\x66\xc2\x8f\xe4\xb9\xaf\x97\x38\x86\x6e\x6e\x52\xfd\x1b\x04\xba\x7b\xe8\x84\x34\x18\xea\xdb\x0d\xb2\xc4\x7c\x64\xd1\x9f\x5f\x20\x43\xef\xbb\x98\x61\xba\x48\x47\x76\x68\x1a\xbf\x7f\xc7\x16\x08\x5b\x18\xa3\x11\x36\x9d\x21\x7b\x85\xc9\xab\x1d\x42\xc7\xc2\xdc\x3e\x74\x31\xc3\xc1\x16\xfd\xfd\xb7\x86\x83\xb9\x7d\x84\x69\x86\xed\xb8\x98\xe1\xa2\x31\x66\x98\xde\x37\x8a\x35\x9e\x58\x8e\xe1\xa2\x8d\x70\x6f\x20\xeb\x48\xda\x3e\xf0\x7d\x93\xc0\xb7\x2f\xbe\xb1\x6d\xec\xad\x62\x39\xae\x6e\xa3\x7a\xb5\x88\xa9\xd0\x85\x1b\x77\x8e\xa9\xb3\xf1\xc4\x43\x38\x42\x2e\xf2\xfc\xc5\xff\x0f\x00\x00\xff\xff\xaa\x18\x0c\x0f\x44\x4e\x01\x00") + +func offer_idsHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _offer_idsHorizonSql, + "offer_ids-horizon.sql", + ) +} + +func offer_idsHorizonSql() (*asset, error) { + bytes, err := offer_idsHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "offer_ids-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4e, 0x24, 0x45, 0x3, 0x34, 0x27, 0x77, 0xb2, 0x26, 0xf5, 0x69, 0xdd, 0x48, 0x7b, 0x17, 0x9a, 0x84, 0x69, 0x1e, 0x91, 0x72, 0x24, 0xc, 0x37, 0x24, 0x27, 0xde, 0x70, 0x6e, 0x36, 0x5e, 0xcb}} + return a, nil +} + +var _operation_fee_stats_1CoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x6b\x8f\xe2\xb8\xd6\xee\xf7\xf9\x15\x68\xbe\x74\x8f\xe8\xd9\x38\x71\xe2\x24\x33\x67\xb6\x14\x20\xdc\x09\x77\x08\x1c\x1d\xb5\x6c\xc7\x09\x81\x90\x40\x2e\xdc\x8e\xf6\x7f\x3f\xe2\x52\xdc\x8a\xaa\xa6\x80\x9a\x7e\x8f\xf6\x20\x75\x75\x20\x8b\xb5\x96\x1f\x2f\x3f\xb6\x97\xed\xf0\xfb\xef\xbf\xfc\xfe\x7b\xa2\xee\x87\x91\x1d\xb0\x56\xa3\x92\x30\x71\x84\x09\x0e\x59\xc2\x8c\x27\xd3\x5f\x7e\xff\xfd\x97\xcd\xfd\x6c\x3c\x99\x32\x33\x61\x05\xfe\xe4\x28\x30\x67\x41\xe8\xf8\x5e\x42\xf9\x17\xfa\x17\x77\x22\x45\x56\x89\xa9\xfd\x7d\xf3\xf5\x0b\x91\x5f\x5a\x5a\x3b\x11\x46\x38\x62\x13\xe6\x45\xdf\x23\x67\xc2\xfc\x38\x4a\xfc\x95\x00\x7f\x6e\x6f\xb9\x3e\x1d\xbf\xfe\xd4\x31\x5d\xf6\xdd\xf1\xbe\x47\x01\xf6\x42\x4c\x23\xc7\xf7\xbe\x87\x2c\xdc\xe8\x7d\x2d\x4c\x5d\x67\xa3\x9a\x79\xd4\x37\x1d\xcf\x4e\xfc\x95\xf8\xd2\x69\xe7\xe4\x2f\x7f\xbe\xd8\xf6\x4c\x1c\x98\xdf\xa9\xef\x59\x7e\x30\x71\x3c\xfb\x7b\x18\x05\x8e\x67\x87\x89\xbf\x12\xbe\xb7\xd7\x31\x64\x74\xfc\xdd\x8a\xbd\x9d\x2d\xe2\x9b\x0e\xdb\xdc\xb7\xb0\x1b\xb2\x33\x33\x13\xc7\xfb\x3e\x61\x61\x88\xed\xad\xc0\x02\x07\x9e\xe3\xd9\x3b\x91\xc0\x5f\x7c\x0f\x19\x8d\x03\x27\x5a\x6d\x94\x5b\xd6\x9f\x7b\x00\x18\x0e\xe8\xf0\xfb\x14\x47\xc3\xc4\x5f\x89\x69\x4c\x5c\x87\x7e\xdb\x20\x46\x71\x84\x5d\xdf\xfe\xf3\x97\x5f\xb2\xcd\x5a\x3d\x51\xd4\xb3\x9a\x91\x28\xe6\x12\x9a\x51\x6c\xb5\x5b\x7b\xc9\x7f\xc5\x53\x3b\xc0\x26\x1b\x3a\x61\x44\x56\x21\x9b\xfd\xf9\xae\x74\x48\xa7\xb3\xd8\x0f\xe2\x49\x78\x9b\x30\xf3\xe6\xb7\x48\xba\xcc\xb4\x59\x70\x8b\xe4\xc6\x4f\x8b\xb1\x1b\x25\x6f\x10\x23\x2c\x8c\x7c\xcb\x62\x81\xe3\x99\x6c\xf9\xbe\x2c\xa6\xd4\x8f\xbd\x88\x60\x17\x7b\x94\x85\x7f\xfe\xa2\x56\xda\x5a\x33\xd1\x56\xd3\x15\xed\x44\xba\xa6\x57\xfa\x57\xe0\xf5\x83\x55\x62\xab\x3d\x53\xd3\x5b\xed\xa6\x5a\xd4\xdb\x27\x5f\x3a\x17\xfc\x3e\x1d\xb3\xd5\x2d\xfa\xa3\xe5\x8f\x55\x1f\x64\x3e\xa0\xd5\x62\x37\xf8\x7c\x2a\x76\xbb\xee\x20\x0e\x23\xd7\xf1\x58\xf8\x9e\xe6\x83\xd0\xcd\x7a\x37\x5e\xb0\x2d\x1b\xbc\xa3\xf7\x28\x74\xbb\xde\x43\xc8\xbf\xa7\xf7\x20\x74\xb3\xde\x9d\xbc\xe3\x59\xfe\x3b\x7a\x8f\x42\x37\xeb\x9d\xc6\x24\x8c\xc9\x3b\x3a\x77\x02\x1f\xd1\xe7\x3a\xe1\x70\x16\xb3\xf8\x3d\x64\x4f\xc5\x6e\xd7\xcd\x58\xf0\x1e\xac\xdb\xfb\x37\x6b\xdb\x36\xe3\xf7\xd4\xed\x04\x6e\xd6\xb7\x63\xa5\x21\xc3\xe6\xfb\x6a\xcf\xe4\x3e\x59\xfb\x9e\x29\xd9\xec\xfb\x8d\x66\x08\xf6\xde\x51\x4e\xb0\x77\xb3\xc3\x7b\xf6\x7b\xcf\xd7\x17\x91\x8f\xea\xdc\x8c\x01\x7e\xac\x76\x23\xb5\xd7\xbc\x95\xbd\x54\x7c\x95\x72\xdf\x97\x3d\x50\xe3\x8f\xc4\x8e\x44\xf7\x03\xc9\x03\x71\xbd\x2f\x77\x24\xa2\x1f\xc8\x1d\x88\xe5\x87\x72\x37\xf9\x77\x24\x94\xf7\xe5\x76\x24\xf1\x43\x99\x43\x93\xff\x81\xe4\xa6\x1d\xbf\x2f\xb2\x6b\x9b\xef\xcb\x9c\x35\x85\xf7\x45\x09\xf6\xde\x17\x78\x09\xd5\x9b\xa4\x36\x91\xb7\x17\xd4\x8c\xb6\xa6\xb7\x8a\x35\xfd\x54\xd8\x9d\xda\xe1\xcc\xdd\x4b\xb4\x32\x05\xad\xaa\xbe\xd2\xf5\xe7\x2f\xbb\xb1\xb1\x8e\x27\xec\x8f\x97\xcf\x12\xed\xd5\x94\xfd\xb1\xff\xca\x9f\x89\x16\x1d\xb2\x09\xfe\x23\xf1\xfb\x9f\x89\xda\xc2\x63\xc1\x1f\x89\xdf\xb7\x43\xe6\x4c\x53\x53\xdb\xda\x8b\xe6\x17\x7d\xbf\x9c\x69\x3c\xbf\xb9\x57\x9c\xa9\x55\xab\x9a\xde\x7e\x47\xf3\x4e\x20\x51\xd3\xcf\x15\x24\x8a\xad\xc4\x97\x97\xf1\xed\xcb\x67\xe1\x56\xc9\x97\x4b\xcb\x2f\xc5\xdf\xdb\x3c\x20\xf4\xc3\xf2\x9c\x61\xa9\xd7\xda\x17\x78\x26\x7a\xc5\x76\xe1\xe0\xd6\xe9\x80\xf6\xcc\xfc\x51\xcb\x85\x23\x1f\x29\xfc\x2b\x25\x5b\x00\xea\x95\xd4\xd4\xde\xcc\x62\xa6\x81\x4f\x99\x19\x07\xd8\x4d\xb8\xd8\xb3\x63\x6c\xb3\x2d\x0c\x37\x0e\xc0\x37\x62\x26\xb3\x70\xec\x46\xdf\x23\x4c\x5c\x16\x4e\x31\x65\x9b\xd9\xc4\x97\x8b\xbb\x0b\x27\x1a\x7e\xf7\x1d\xf3\x64\x82\x70\x56\xd8\xd3\x80\xdc\x17\x73\x1b\xba\xc7\x42\xbe\x04\xc0\x35\xc0\x77\x51\x7e\x4a\xba\x5f\x7f\x49\x24\x12\x2f\x9f\x38\x66\x82\x0e\x71\x80\x69\xc4\x82\xc4\x1c\x07\x2b\xc7\xb3\xbf\x8a\xe8\xb7\x6d\xdd\xe8\x9d\x4a\xe5\xdb\x56\x7a\xf3\x45\x0f\x4f\xd8\x15\x61\x59\xbe\x26\x3c\xc7\x6e\x7c\x4d\x9a\xe3\xf8\x4b\x71\x17\x87\xd1\xc4\x37\x1d\xcb\x61\x66\xc2\xf1\x22\x66\xb3\xe0\x20\xf2\xcb\x6f\x97\x75\x7f\x68\xc5\x0f\x62\x11\xde\x05\xc4\x7e\x22\x90\x20\x8e\xed\x78\xd1\xc5\xcd\x90\xcd\xbc\x78\x72\xfd\x9e\x17\x4f\xc2\x98\x30\x2f\x0a\x36\x53\xc1\xcb\x62\xee\x64\x1c\xcf\x72\xf1\x66\xc6\x68\xb2\x30\xba\xee\xce\x4e\x70\xe8\x4f\x98\xe9\x4f\xb0\xe3\x5d\x91\x12\x84\x4b\xa7\xa3\x61\xc0\xc2\xa1\xef\x9a\x61\x22\x62\xcb\x4b\xcf\x2c\x17\xdb\x6f\x79\xf4\x6e\xdd\xec\x11\x89\x37\x56\x5d\x07\x13\xc7\x75\xa2\x4d\xe1\x76\xe5\x7f\x81\xc4\x75\xdf\xbb\xed\xd8\xde\x66\x2c\xb4\x71\x6b\xf7\xc9\xc9\x68\xe0\x30\xb4\xd8\x83\xfe\x7d\x3b\xad\x4e\x64\x0a\x5a\xa6\x9c\xf8\xfa\xf5\xa5\x2a\xfe\xfd\x57\x02\xfc\xf6\xdb\x3b\xdf\xbe\x74\xf0\x52\xcf\xab\x02\xfc\x48\xe3\x59\x5d\x5e\x68\x3b\xaf\xe7\x1f\x69\x7a\x0d\xcf\x85\xba\x2b\xf8\xed\x74\xbe\x6e\x18\x9b\xfe\xef\xde\x36\xb1\x19\x32\xee\x9a\x83\xe7\x9b\xec\xb4\x2d\x9c\xb5\x81\xd7\x46\xcf\xfb\xe7\x7b\xcd\x9f\x0f\x8c\x77\x8e\xec\x3f\xc3\xe1\xf0\xc4\x19\xf4\x2a\xb6\xa7\x01\x9b\xff\x50\x88\xc4\x74\xcc\x22\xd7\x09\xa3\x1f\x8a\x1e\x46\xdb\x2f\xe1\xbe\xfb\x98\xba\x7e\xc8\x22\x67\xf2\x46\xcb\xdf\x12\xeb\x95\xb6\x75\x52\xe7\xe7\x83\xfa\x83\xbe\x8b\xfa\x3e\xda\x79\x23\x74\xde\x9a\x1b\x9c\xab\x39\x96\xe2\xad\x68\xd9\x0f\xbe\xee\xad\xb1\xfd\xc4\xeb\xeb\xa1\x91\xb3\xe0\x46\x06\xdd\x65\x5e\xcc\xb7\x18\x74\x1b\xee\x38\x0c\x59\x74\x0d\xcf\x5d\x5b\x7d\xf3\x36\x9e\x6c\x9a\xd5\x75\xd5\xd3\xc0\xa1\xcc\x7b\x83\xc4\xb6\x37\xdf\x62\xb8\xed\xcd\x84\xe9\xc7\xc4\x65\x9b\x78\xa3\xce\x36\x23\xf9\x54\x16\x3d\xa9\xe1\xfd\x94\x75\x57\x96\x8b\x7a\xdd\x17\xf0\x8d\xd8\xd8\x7f\x73\x8f\xf0\xc5\x57\x5f\x70\x7f\x2b\x20\x76\x03\xf6\x7b\xe3\x61\x37\xad\xdf\x85\x83\x33\xbd\xd6\xf1\x8b\xaf\x5a\xae\x1f\x44\x07\x34\xb2\x5a\x4e\xed\x54\xda\x09\x70\xd9\x6d\xb2\x65\x84\xa3\x88\x4d\xa6\x51\x62\xd3\x2c\xc2\x08\x4f\xa6\x89\xcd\x90\xc9\x8f\x77\x9f\x24\xd6\xbe\xc7\x5e\x77\xb6\x16\x76\xdc\x38\x38\xe9\x6a\xdf\xb2\x10\xad\xa6\xec\xc7\x95\xb2\x4b\x4b\x9c\xe8\x7d\x4d\xfb\x07\x8b\x6f\xd4\xce\x3e\xb3\xe1\x07\x97\x95\xfa\x75\x8b\xc4\xbf\x13\xe0\xb7\x84\xaa\x67\x13\xbb\xb7\xff\xeb\xaf\x04\x12\x45\x28\xfe\x76\xb5\xae\x4e\xa7\x61\x77\x57\xd9\x69\x96\xe7\x94\x73\xdf\x40\x63\x97\x68\xdb\xb4\xba\xab\x0e\x6d\xe6\x8e\x0f\xb8\x12\xc6\x64\xef\x44\xc0\xc2\xb3\x0e\x08\x5e\x1d\x31\x06\x0c\x1f\xda\xd2\x6b\x7f\x4e\xe6\xbc\xf7\xfa\x74\x92\xac\xbb\xa1\x67\xdc\x39\x36\x0b\xd9\x7b\x3d\xcc\x6b\x3f\x4f\xe6\xf0\xf7\xfa\x79\x54\x71\xbb\x9f\xaf\x3a\xb9\x8b\xfb\xcc\x9b\x33\xd7\x9f\xb2\x1f\x74\x69\x47\xd3\x0f\x74\x44\x27\xe9\x8e\x07\x20\x78\xc9\xd7\x7e\xbd\xa5\x1e\x8e\x51\xf4\x23\x20\x66\x6f\x74\x34\xe7\x20\xbc\xe4\x81\xcf\x34\x5e\x02\x71\x66\xed\x4d\x30\x8e\x39\xa2\xbb\xc1\x38\x26\xc5\xbf\x1e\xdb\xed\xf9\xe4\xed\x4a\x9b\x7a\xaf\x75\x9f\x64\xb8\xee\xf5\xea\x64\x09\xe0\x9e\x69\xd7\xb6\xc7\x7f\x87\xa9\x9d\x30\x8c\x59\x70\xbb\x2a\xea\x9b\x57\x67\xa7\xaf\x60\x89\x5c\x67\xe2\xbc\x31\xa2\x78\x77\x2e\xf8\x33\x67\x55\x27\xd1\x79\xb2\xaa\x72\xd7\x2c\xea\xf4\xfb\xcf\x9a\x47\x9d\xe8\xbc\x7f\xfe\xf3\x9e\xd6\x5d\xa5\x5d\x68\xda\xd7\xe4\xbf\xaf\x37\xbc\xb3\x74\xef\xdd\x41\x7e\xba\x86\xb6\x0b\xf3\x68\x79\x46\xc5\x37\xcc\x37\x2e\x03\x70\xb9\x5d\xa5\x7c\xf3\x2e\x1d\x62\xcf\x66\x57\x27\xf6\xa7\xe0\x9c\x2e\xdb\xdd\xcf\xd5\xc7\xdc\xf9\xfd\x10\xfd\xcd\xf8\x10\xdf\x5c\x5d\x03\x27\x5a\x06\x2c\x8c\xdd\xab\xec\x1e\x2d\x27\xec\x87\xf3\xb9\xe3\x12\xeb\xfd\x78\x5e\xac\x5b\xdc\x0b\xea\xc5\x8a\xf3\xd7\x9b\x80\xdb\x7f\xe9\x3d\xf4\xf6\x22\xd7\x80\xb8\x2d\xec\x2e\x56\xb8\xef\x01\x2a\xbb\x99\x59\x5b\x7e\xf0\x83\x64\x68\x22\xab\xb6\xd5\x1f\x60\xf6\xbe\xca\xf0\xc3\xfa\x8a\x7a\x4b\x6b\xb6\x13\x45\xbd\x5d\x3b\x26\x15\xbb\x6a\xa5\xa3\xb5\x12\x5f\xbf\xe4\xd3\xcd\x7a\xbf\x50\xac\xf0\x99\x22\xcc\xe9\x0d\x21\x6d\x54\x72\x55\x3d\x5b\xc9\x95\x3a\x7a\xbd\xc3\x17\xfa\x70\x50\xcd\xb5\x0a\x35\xbd\x93\xd1\x6a\x6a\xab\x27\x35\x32\x52\xcd\xe0\x0b\x5f\xbe\x25\x94\xfd\x4b\xda\xfd\x27\x03\xf0\x2d\xc1\x7f\x4b\x80\x6f\x3b\x94\x13\x5f\xbe\x7c\x4b\x7c\x51\x1b\xaa\xaa\xaa\x7f\xfd\xf5\x65\x7b\x83\x7f\xb9\x77\xfc\xfb\xdb\x9f\x3f\xf4\xd0\xc8\x37\x4a\xbd\x6e\xa5\x57\xeb\x17\x72\x95\x6e\xbb\xdc\xeb\x8a\xb9\x7c\x41\x85\x15\xbd\xdf\xe7\x4b\x8d\x72\x55\xaa\xa9\x25\xb5\xa3\x35\x72\x1d\x54\xa9\x67\x5a\x5a\xae\x6b\xd4\xf4\x2f\xdf\x12\xfc\xd6\x31\x6e\xe3\x98\x2c\xca\x8a\x02\x05\x04\xb8\xf7\x3d\x94\xee\xf1\x30\x63\x94\xf3\xa8\xa9\x0b\x35\xbd\xa8\xd5\x33\x55\x3d\x97\x96\x20\xaf\x0a\x10\x0d\xc4\xba\x9e\x6d\x35\x2b\xf9\x5e\x59\xca\xa7\x2b\x99\x6a\xa3\x52\xcc\xd5\x84\x96\xa4\xf5\x7b\xdd\xce\x97\x6f\x09\x0e\x1d\xb1\xdb\xbb\x28\x2a\xc2\x3d\x2e\xbe\x11\x36\x97\xe9\xb6\x07\x22\xf0\xed\x24\xda\x47\xc3\xf0\x3c\x91\x76\xc0\x11\x41\x53\x91\x2d\x11\x22\xc6\x90\x6c\x72\x84\x97\x88\x48\x64\xc5\xe2\x21\xb6\x44\xc8\x71\x44\x12\x91\x82\x79\xc1\xc2\x16\x27\x00\x88\x4d\x40\x44\x9e\x20\x08\x09\x90\x08\x53\x94\x0d\x54\xe0\xc1\xd7\x46\x87\x28\xf1\x98\x67\x90\xb7\x2c\x5e\x90\x31\x90\x08\x60\x12\xb0\x4c\xce\x42\x26\xe4\x64\xca\x59\x98\x9a\x3c\x20\x88\x52\x20\x53\x08\x4d\x51\x92\x44\x5e\x54\x64\x24\x73\xbc\x88\x39\xb4\xa9\xd7\x6d\x4d\x7d\x51\xff\xc7\xbe\xd2\x46\xd9\x11\x56\xa9\x55\xab\x9c\x96\xb2\x5e\x56\x29\xf0\x60\x39\x4a\x27\x43\x60\x47\xe1\xa2\xb8\x58\x73\x86\xd9\xea\xf5\x71\xba\x84\x73\xf6\x46\x5e\xd3\x85\x0a\x5e\x4f\xf9\xc6\x0f\x35\x0f\x54\x83\x13\xb6\x62\xe9\xf1\xdf\x50\x90\xa7\xbe\xbe\x5c\xb4\xf5\x37\x02\x95\xf0\x0a\xb2\x00\x12\x45\x91\x72\x90\x59\x22\xaf\x98\x58\x50\x78\x9e\x88\x96\x0c\x14\xde\x92\x08\x54\x18\x95\x21\x12\x28\x53\x20\xa6\x32\x06\x80\x48\x98\x72\x90\xa3\x58\xc6\x70\x13\x64\xcf\x08\x76\x0a\x81\x28\x70\x96\xa2\xc8\x22\xa4\x88\xb3\x24\xd3\x12\x00\xe1\x65\x0e\xc9\x3c\xe4\xa1\x29\x30\x89\x40\xc4\x03\x02\x31\x82\x88\x0a\x50\x44\x26\x60\x3c\xc5\x04\x62\x5e\xe6\x36\x7e\xf0\xdf\x12\x9c\x28\x2a\xa2\xa4\xc8\x1b\x6a\xdc\x46\x6c\x86\xaf\x0f\x46\x9c\x1e\x8b\x3e\x20\x25\xa9\x27\x78\xab\xda\xbc\xb3\xcc\xc3\xee\xd4\x1f\x27\xe7\x39\xb5\x16\x65\xb8\x32\x5f\x95\xd2\x12\x1a\x34\x07\x2d\x2d\x58\x69\x76\xbe\xe3\xc8\xcd\xd8\x23\x1c\x1f\xf7\x64\xb3\x3c\x19\xe0\xde\x84\xab\x98\x85\x06\x9f\x8c\xd6\x8e\x97\x8b\xf0\x0e\x61\xa3\xde\xad\x4e\xb6\xd1\x51\x3c\xfc\xd9\xd1\x5b\x78\x7c\xbf\x50\xeb\x8d\x7d\xec\x24\x95\x6e\x9e\xd5\xf3\x19\x87\xfa\x9e\x57\x5e\xe4\x7b\x0d\xbb\x15\xa4\x6b\xde\x30\x17\x16\x4b\xb5\x49\xbf\x54\x4b\xc9\x05\x9f\x3a\x79\xd0\xce\xa6\x3b\x05\x71\x92\xab\xe7\x0b\x10\x82\xcc\xca\xee\xd9\xa3\xa2\xd2\xf6\x42\xbe\x58\xa9\x4d\xf4\xb0\xc1\x91\x86\x10\xce\xd6\xbe\xaf\x2d\xb6\xa6\xae\x44\x74\xb6\x78\x2d\x2a\x5e\x22\x3a\x0b\x4a\x9f\x17\x7a\x9f\xf4\xba\x31\xa2\x19\x27\x99\x02\x15\x05\x89\xc9\x94\x17\x4d\xac\x88\xb2\x0c\x37\xe1\x67\x22\xd1\xc2\xa6\xc0\x0b\x22\x05\x88\x23\x9c\x0c\x44\x48\x01\x81\x50\x14\x99\x20\x03\x4c\x08\xc6\x9c\xcc\x6d\xa2\xf1\x19\xad\x82\x2a\xb2\x80\x15\xc9\xe2\x4c\x13\x62\xc8\x99\x78\x43\xc0\x4c\x34\x2d\x62\xf2\x4c\xe2\x25\x68\xc9\x1c\x94\x64\x60\xf1\x1c\x64\x3c\xa6\xcc\xa2\x1c\x91\x04\x0b\x88\xbc\x4c\x05\x5e\xfa\xf2\x2d\x01\x4f\x22\x9a\x7f\x89\x68\xa9\xdc\x93\xf3\xdd\xee\xa2\x2d\x71\x65\x73\x5a\x2a\x06\x86\x90\x69\x29\x52\x8d\xad\xb2\x83\x2a\x6a\x07\x45\x5f\x8d\xd0\xa2\x5d\x28\x3b\xa3\xfe\xc8\x98\xb6\xd7\x7d\xb5\x0d\x3b\x5d\x53\x4c\xca\x60\x12\x13\x29\xbb\xb2\xa2\xa0\xea\x2b\xf5\x5c\x21\x85\x07\xba\xde\x1c\x82\xd6\xbc\xba\x45\x78\x1b\xd1\xf6\x11\xf1\xf5\xb4\xb7\xb4\x3a\xe6\x78\x31\x2d\x26\x27\x78\x58\xed\xe4\xbb\x0b\x30\xac\xf3\x56\x59\xf2\x73\x85\xbc\xe2\x00\xae\xde\x66\xf3\xf9\x3a\x48\x95\x86\x65\x37\x55\x30\x6b\xa3\x02\x6e\x95\xb3\x2e\x94\x40\x4c\xbd\xba\xa0\x41\xbb\x5e\x6c\x0b\xb3\xf5\x7c\x41\xcc\x7a\xba\x35\xd2\x4a\xdb\x88\xad\x5e\x89\x58\x2d\xbc\x56\xeb\xff\x05\x11\xcb\x21\x8e\x72\xc4\x44\x84\x17\x31\xe3\x30\x07\x25\x81\xc7\xb2\x44\x11\x25\x0a\x4f\x65\x24\x62\x88\x28\x94\x65\x8b\xc8\x32\x35\x25\x45\x91\x89\x8c\x38\xcc\x08\x46\x48\x41\xe6\x96\x3f\x9f\x11\xf5\x4c\x42\x00\x53\x8e\x12\xc8\x11\x24\x0a\x98\x93\x09\xc0\x54\x01\x3c\x14\x79\x53\x14\x89\x44\x19\x34\x65\x4b\x92\x99\x24\x73\x92\x00\x04\x26\x22\x26\x4b\x04\x11\x05\xf0\x44\x36\xbf\x7c\x4b\x08\x27\x11\x0b\x5f\x22\x36\x99\x53\xda\xb9\x66\x72\x94\x31\xb0\xdb\xb7\x45\x7f\xcc\xb9\xc9\xa8\x31\x36\xd4\x3e\xc9\xab\x1d\x39\xb3\xd6\xbb\x62\x31\x13\xc4\xb3\x7c\x7a\x2e\xd9\x85\x64\xd5\x09\x0c\x9a\x77\xe7\x56\xd0\x5a\x79\x68\x14\xf6\x44\xb2\xac\xe5\x04\x9c\x0f\x2b\x46\xa7\xd7\x47\x04\xe4\xbb\x59\xba\x45\x78\x1b\xb1\x8b\x23\xe2\x8a\x6e\x49\xa2\x6a\xad\xcb\x05\x6a\xad\x79\xcf\x82\x0b\x2a\x39\x5c\x75\x4a\xda\x4e\x38\x18\x86\xee\x4a\xaa\x6a\x59\xc1\xe7\x0a\xd0\xf3\xfa\xe5\x05\x0d\x97\x91\xdb\x1a\x3a\x8b\xa0\xa4\x8e\x3a\x51\x97\x58\xb5\x7a\x5f\x11\x44\x21\x07\xd2\xb5\x6e\x3c\x94\xb0\xa8\x06\xa3\x6d\x90\x36\xae\x44\x6c\xfe\xea\x10\xe2\xbf\x20\x62\x65\xca\xf3\x94\xc8\x54\xd9\x44\x92\x44\x2d\x45\xb6\x24\x8b\x21\xc9\xa2\x10\xcb\x08\x48\x48\xa4\x90\x21\x99\x52\xc0\x20\xc5\x12\x14\x30\xcf\x0b\xa2\x89\x10\xda\x44\x24\xb6\x36\xd1\xf6\x8c\xa8\x87\x0a\x62\x82\x64\x4a\x8a\x4c\x00\x05\x44\x14\x91\x05\x78\xb4\xb1\x2e\xc9\x18\x20\x4e\xe2\x21\x31\x01\x50\x36\xb1\x2f\x21\x01\x63\xca\x03\x93\xb3\x30\x36\x39\x22\x0b\x1c\xf9\xf2\x2d\x21\x9e\x44\xac\xf0\x12\xb1\xcb\x3e\xcd\x2b\x78\xd5\x8b\xf3\x43\x1d\x94\x7d\x2b\xbf\x12\x2b\xc5\x01\xd6\xc3\x5a\x31\x29\x8c\x74\x36\x70\x84\x3e\x46\x68\x30\x25\x83\xb9\xb2\xea\x56\x8c\xca\x80\x13\xd2\xea\x88\x18\xcd\x72\xaf\x3c\xab\x16\xec\xba\x60\xa6\x56\x4e\xbb\xd2\xcf\xfb\x95\xe1\xa0\x3f\xa5\xe1\x3e\x58\x36\x11\xeb\x9d\x20\x5e\x96\x06\xb9\xc2\xb4\x33\x17\x97\x76\x5d\xef\x8a\xf5\xae\x2e\xb3\x9e\xb2\x5a\x05\x25\x9a\xee\x0c\xb2\x65\x3e\xd3\x2a\x17\xcc\x79\xdd\x8e\x6c\x91\x8c\x2d\x63\x52\xc9\xaa\x51\x97\x2c\x4a\x13\x23\xcb\xca\xe9\x21\x1d\xcd\x1b\x99\xee\x22\x6f\x8e\x67\x61\x51\x2f\xa0\xa0\x19\x6b\xf9\x6d\x8b\xe8\x5c\x89\xd8\xaa\x7d\xad\xd6\xff\x0b\x22\x96\xe3\x2d\x4e\xa6\x50\x20\x44\x40\x12\xe2\x64\x6c\x49\x26\x0f\x14\x88\x4c\x86\x28\x16\x30\x34\x39\x8a\xb1\x2c\x73\x14\x22\x4c\x15\x2c\x11\x59\xa1\x4c\xe2\xa0\x20\x0a\xcc\x14\xf8\x4d\xb4\x3d\x23\xea\x05\x85\x12\x85\x48\x80\xca\x48\x96\x4d\x4a\x98\xc5\x24\x00\x39\x13\x11\x46\x45\x49\x62\x96\x80\x28\xe4\x4c\xd1\x12\x65\x26\xc8\x54\x01\x48\x32\x25\xaa\x20\x80\x4d\x2a\x02\x8c\xbf\x7c\x4b\xa0\x93\x88\x15\x5f\x22\x56\x58\x38\x2b\x61\x45\x33\x6b\xab\x3e\xa9\x5b\xc9\x41\x6a\x81\x86\xb6\xe9\x2e\x92\xb8\xba\xa8\xd5\xcb\x14\xf8\x4e\xd3\x1c\x4c\xfc\x31\x3f\x77\x26\xba\x34\x5d\xd9\x41\xd3\x85\xca\xc2\x17\x82\x7c\x13\xfa\x23\x36\x59\x73\x23\x65\x9e\x6f\x4e\xf3\x86\x3d\x2f\xcf\x73\xc3\x2e\x68\xef\x86\x90\xdb\x88\x3d\x09\xa2\x64\x3e\x37\x28\xcc\x3a\xc5\xb1\xd7\x82\x53\x31\x1c\x10\x81\xae\xa9\x5c\x1e\x01\x4c\xf2\xad\x6a\xd7\xd2\xf1\x2c\xa9\xac\xd7\x6e\xc4\x37\x4b\x2b\x31\x5a\xac\xf2\x8e\xbe\x4a\xa6\x3c\x75\xc9\x05\x52\xce\x4c\x2a\xf9\xc5\x92\xb3\x7a\xb5\x56\x75\x9c\xb7\x8c\x55\x2f\x13\x2d\x1b\xb3\x6d\x5c\xf6\xaf\x44\x6c\xdd\xbf\x56\xeb\xff\x0d\x11\x8b\x2c\x64\x99\x44\xa4\x3c\x10\x14\x19\x30\x28\x30\x2a\x42\x28\x50\x01\x28\x44\x10\x79\x22\x2a\x9b\x11\x2a\x30\x11\xe3\x09\x15\x19\x12\x89\x24\x03\x08\x45\xc6\xf1\xb2\xc5\x8b\x5b\x8e\x7d\x42\xd4\x9b\x3c\x56\x38\xc8\x0b\x84\x53\x64\x68\x99\x88\x5a\x26\x84\x12\x82\x1c\x34\x81\x25\x50\xc0\x33\x0b\x49\x40\x22\x82\x8c\x08\x6f\x59\x84\x32\x0b\x61\x4a\x28\x30\x25\x1e\xf2\xb2\xf0\x65\x9b\xe6\x39\x44\x2c\x3a\x70\x6c\x65\x39\xca\xb6\xa4\xa6\xd7\xef\x07\x10\xd8\x63\x1e\x4e\x56\x63\x5f\x29\x94\x67\xc5\x42\x16\x87\x13\x2f\x2e\xad\x3d\x0d\x74\x6b\x5c\xa6\xa4\x6a\x78\xaa\xac\x75\x25\x76\x83\xcc\x2c\x94\x56\xac\x3c\x2b\xa7\xa9\xd3\xae\x4c\x0a\x35\x7d\x92\x54\xd5\xd6\x2c\x3f\xce\x0f\x23\xad\xb3\x45\x78\x1b\xb1\x27\x4c\x57\xe1\x44\x90\x1a\x2b\x86\x15\x82\x76\xa9\x91\xb1\x8a\xb1\x92\xcd\x84\x35\x6d\xec\x70\x65\x2f\x39\xac\xf0\xc5\xdc\x6a\x55\xe9\x56\x4d\xa3\xd2\x9a\x35\xab\xe3\x70\x60\xa7\xb8\x30\xd5\xd6\xf9\x6a\x1b\xd4\xdb\x6a\x65\x3e\x58\x30\x30\x0c\x2a\xa9\x79\x4d\x99\xad\xe4\xac\x51\x5c\x0d\xb7\x9a\xe9\x95\x88\xed\x74\xae\xd5\xfa\xff\xef\x11\xfb\x46\x26\xed\xca\xae\xa6\x07\xf2\x72\xaf\xb7\xc4\x3c\xa2\xec\xad\x3d\x1b\x8f\xe9\xbc\xdc\x76\xf1\x80\xb6\x37\x36\x4d\x3c\xa0\xf1\x8d\xed\x0d\x1f\xcd\x6a\x9e\x6c\x71\x38\x49\x0d\x17\xf3\x4d\xd4\xd0\x24\x3d\xab\xc1\x46\xb5\xa7\xf6\xda\x86\xd1\x6d\x76\x1b\xf9\x6a\xbe\xd4\xed\x35\x0a\x83\x6a\x99\x17\xdb\xad\x5e\x5d\xd3\x2b\x52\xbb\x93\xcd\x4b\xe9\xf6\xa0\x5f\xdc\x65\x66\x76\xf9\xc3\x52\xda\x4f\xda\x6d\xa9\x39\x86\x69\xd7\xee\x79\x4c\xe9\x73\xf6\x60\x38\xe6\x22\x35\xd9\x2b\x73\x3c\x0d\xea\x45\xce\x12\x41\x9e\x08\x83\x63\xf4\x6d\x7b\xa1\xcc\xf6\x72\xd3\xcc\xb4\x61\xce\x2d\xb6\xe6\xc5\x96\x3a\x68\x55\x96\x79\x64\xce\x0d\x2c\x92\x25\x98\x0d\xdc\x29\x26\x9d\x08\x98\x59\x82\x60\xad\x4c\x7b\xdc\xae\xeb\x48\x53\xa5\xb3\xda\xe9\xb3\x0f\x7f\xd2\x5b\xa5\x8b\xc3\xfb\xac\xaa\x2a\x99\x93\x56\x9c\x26\x59\x56\x33\x72\x0e\xe2\xd2\x7c\x9d\xa1\x70\x4d\xd2\xed\x89\xba\x5e\xe4\x48\x56\x61\x76\x66\xd6\x59\x9a\xe5\xa8\x39\x34\xd4\x05\xae\x6e\xd5\x69\xd9\xb1\x51\xab\x7a\x8b\x59\xab\x39\x99\x64\x8d\xb5\x93\x51\x07\xbe\x27\x37\xa7\xcd\x49\xb1\xd3\x04\x0d\xbb\x1e\xab\x30\xea\x17\xf1\xd0\x80\x8d\xb8\xd0\xed\x28\xa3\xd0\xe9\x14\x59\x60\xf0\xf5\x7e\x32\x5b\xb6\xf0\x62\xa8\x9a\xdd\xba\x2a\x4a\x72\x7f\x8e\x52\x5e\x53\xd4\x33\x8b\xbf\xfe\xba\xec\x2d\x9e\x5c\x35\xf0\xa1\xaa\xa9\x9e\x57\x4d\x36\xed\xe8\xcc\xad\xeb\x76\xba\x6e\x36\xbb\xd0\x93\xd6\x2d\x2c\xce\xc3\x7a\x29\xc9\x87\xab\x11\x90\x3b\x96\x32\x1d\x03\x2e\x5f\x68\x96\xe5\xc5\xa1\x6a\xf0\x05\xdb\x7c\x18\xfa\x59\xd7\x2c\x46\x25\x79\x98\x6b\xa0\x78\x34\x74\x0b\x0e\xd7\x0d\x01\xc6\xa5\x1e\x50\x35\x8f\x55\xda\xc3\x45\x6f\x1e\xcd\xb4\x6c\x72\xd0\x9a\xc8\x35\xac\x66\x88\xdd\x5c\x47\xe9\x40\x6a\xa1\x61\x0f\xb7\xa3\xe6\x3a\xb3\xcc\x46\x5e\xc1\x19\xac\x8b\x95\xca\x52\x68\xab\x61\x4e\x6d\x7c\x3a\xf4\xc2\x43\xd0\x37\x2e\xa0\xcf\xa4\x62\xd5\x12\x56\xe5\xc8\x5c\xe0\x9e\x92\x8c\x2a\x65\x6b\x56\x5b\x12\x77\xae\x09\xc6\xd0\xc7\x8b\xc8\x6c\x0e\x46\xd3\x79\x63\xd0\xd1\x8f\xd0\x93\x47\xa1\x2f\x8d\x16\x4b\x02\xa5\x38\x27\xe5\x5d\xc9\x5c\x44\x84\x94\x69\x6e\xad\xa5\xc5\x96\xda\xd1\x8a\x9e\x6e\x2f\x17\x83\x39\x57\xa2\xa1\x82\xdb\x49\xab\x03\x17\xae\x11\x2f\x9b\xd0\xd7\xe8\xbc\xa4\x54\xc6\x25\xbb\xd4\x5b\x2c\x52\x92\x9c\xd7\x5d\xbf\xdb\x73\xa4\xb8\x24\x35\xd2\x9f\x1f\xf5\xe2\x43\xd0\x77\x5e\x41\x0f\x4b\x9d\x88\x46\x9e\x61\x6b\x19\x3d\x32\xb5\x69\x7f\xe6\x2f\x98\x9a\x1a\xc2\x54\xb9\x54\x8d\x6c\x6c\xc7\x39\xd7\x71\x57\x8b\xc1\x91\x90\xe8\xc3\x51\x3f\x1c\x01\xc1\x9d\xa2\x71\xdb\x98\xb5\x7c\x7f\x3a\x2d\x5b\x53\x5f\xec\xa5\xb0\xd4\x93\x74\x69\x0c\x3a\xe9\xa8\x25\x8d\x38\x8b\x37\x40\xc3\x91\x92\xab\x59\xd1\xa9\xb7\x33\xc1\xdc\xeb\x57\x27\x11\xcf\xb0\xae\x90\x81\x59\xea\x18\x93\x06\xd7\xe3\x52\x2e\x2e\x4a\xfd\x45\xc6\xfe\x74\xe8\xd1\x43\xd0\xf7\x2f\xa1\x2f\xf7\x61\xec\x95\x33\x51\xde\xb0\x60\x66\xe4\x84\x83\x02\x73\x74\x44\xea\xbd\x3a\x2f\x0f\xf2\xe3\x01\xcb\xc8\x33\xb9\x97\x33\x9a\xbb\x99\xef\x16\x7a\xf3\x51\xe8\xe9\x48\x53\x63\x9f\x0a\x75\x9c\xee\x94\x93\x9e\xa4\x26\xc5\x99\xc1\xcf\x0b\x71\x21\xca\x44\x58\x1a\x44\xcd\xf1\x32\xe5\x65\xec\xec\x48\xaa\xf7\x2a\xf3\x60\x38\x18\xd6\xb2\x0e\x5d\x33\xd3\xa2\x61\xc3\x1e\xd5\xab\x92\xc3\x69\xa5\x42\xaf\x20\xa8\x93\x52\xbb\xb3\xee\x17\xd7\xdb\x45\xd1\xcf\x85\x5e\x7a\x08\x7a\x7a\x01\xbd\x3a\x56\x9b\x33\x0f\x56\x21\x8f\x7a\x61\x79\xb6\x9e\x97\x84\x99\xef\xe7\x56\xa5\x6a\xdc\xa7\x02\x4f\x04\x35\x5d\xf6\x71\x03\xe7\x41\xb3\x71\x80\x9e\x3d\x0a\xfd\x80\xf5\x5b\x3d\xb0\x84\xa0\xb2\x4c\xc6\xb9\x64\xd9\x94\x46\x5d\x2d\x04\x62\xb5\x14\xe5\x5a\x76\x4a\xaf\x74\x86\xea\xba\x50\xb5\x39\x2b\x58\x05\x53\x64\x85\x24\x9e\xdb\x8c\xd3\x44\xaa\x30\x63\x85\x0a\x6e\xbd\x99\x9f\x97\xa6\x28\x9e\xad\x89\xa7\xd6\x4c\xbd\x38\x3a\x74\xb3\x6f\x8f\xa9\xae\xed\x97\xbc\x63\x4c\xf5\xb2\x67\xf2\xb8\x4c\x4c\xa1\x24\x33\x91\x8a\x32\xc1\xa6\xc0\x99\xb2\x25\x11\x4c\x20\x44\x94\x13\x18\x02\x10\x52\x20\x22\x0a\x2d\x91\xc9\x80\xc7\x8a\x40\x45\x93\xc7\xa6\x80\x38\x91\x02\xca\x6d\xd7\x19\x0e\x95\xba\x05\x59\xdb\x5e\x8e\xf3\x23\x94\xae\x47\xf9\x36\xcd\xf7\xd2\xd8\x94\xb8\x51\x2f\x3d\xc9\xb7\x7b\x20\x2b\xf6\x03\x63\xb0\x0a\xe5\x51\x37\xe5\x35\x06\xf3\xe1\x6e\x69\xea\xbd\xc2\x5f\xdf\x1f\xf9\xe1\xc2\x1f\xf7\x48\x1e\x0a\xef\xe2\x30\x0a\xe9\x74\x7b\x92\xe3\xda\xeb\xcb\x4b\xc9\xb6\x2d\x37\xd3\xc0\x75\x5f\x4b\x82\x81\xbe\x18\xf4\x73\x53\x38\xef\xe9\xfd\x7c\x7f\xa0\x93\x46\xdd\x75\x22\xd3\x2b\xaf\x57\xba\x91\x34\xd3\x93\xe4\x49\x56\xb6\xb0\xfd\xcb\x2f\xe0\xc8\x5d\xf6\xe3\xa8\x61\x8e\x60\x1c\x54\xf9\x45\x47\xb4\xab\x72\xba\xb7\x48\x19\xbe\x3a\x73\xab\x46\x2b\xe8\xf4\xbb\x8b\x6a\x7e\x74\x40\xb2\x64\x3f\x18\xde\xf7\xd4\x44\x5a\xa5\x5e\xda\xac\x91\x8a\x1a\xa8\x8b\xb1\xb9\x60\x5a\x52\xe8\xd6\x62\x2d\xd9\xcb\x4e\xd2\x32\x0e\x15\x3a\x5a\xe4\x9b\x71\x5a\xb7\x9d\xb5\xba\xcc\xb4\xf3\x73\x56\x2c\xae\x16\xcd\xac\x59\x18\x68\xa3\x54\xa0\xc7\x28\x66\xf1\x92\xcc\xd3\x48\x5c\x30\x2e\x63\x79\x95\xd5\xbc\x90\x3d\xf8\x93\xe9\x3f\x38\x93\x4e\xdf\x43\x1f\x8d\x42\x69\x61\xb4\x27\xab\x45\x79\x51\x2d\x15\x68\x61\x58\x8f\x73\xed\x60\x58\x77\x6d\xd1\xb6\xf2\x41\xdd\x28\xca\xe9\x31\xb1\xdb\xfd\x62\xb8\xa8\x36\xc6\xcb\x00\x3a\x99\x6a\xa8\x35\x60\x93\x6f\x16\x53\x68\x4d\xe2\xc0\x0b\xc2\x9e\xb4\x60\x31\x2d\x44\x0d\x4f\x5c\xc9\xc1\x72\xb1\xd8\xba\x03\x40\x8f\x13\x32\xac\xbe\x92\x78\x4f\xcb\x19\xcd\xe4\x22\x76\x64\x2e\x5c\x15\x6d\x9d\xcd\xc8\x44\xf2\xb3\x9d\x55\xdc\x6b\xa6\x05\x3c\x52\x59\xc9\xae\x89\xcc\xee\x2e\x7b\x94\x7a\x15\xb1\x5f\xad\x8d\xdb\x85\xd4\x1c\xe9\x6a\xb1\x5f\x57\x0b\x8e\xc7\xf7\xdc\x25\xeb\xd4\x1f\x8e\x37\xfb\x50\xff\x8b\x47\x33\x17\xdb\x57\xef\x63\xf1\xdb\xc8\x1b\x13\xcd\x8d\xaa\xa6\x92\x91\xad\x60\x68\x39\x1e\x13\xb8\x66\x45\xaf\xb5\x33\xa4\xd9\xf1\xeb\xeb\x16\x57\x6c\x54\x97\xeb\xa2\x6e\x20\x79\x86\x98\x27\xe5\xa5\x40\x28\x44\xcd\x9a\x51\x37\x5c\x39\x1e\x8a\x6d\xd0\x0c\x56\x98\x05\xb3\x90\x17\x17\x59\xaf\xdd\x2a\xae\x1b\xdb\xc5\xb9\xb4\x36\xcf\x57\xf5\x4a\x94\x37\x87\xce\xdc\x6a\x65\xda\x84\x91\x72\x6b\x04\xe8\xcc\xb7\xa9\x3e\x5b\x61\x26\x78\x35\xda\x6e\x76\xa4\xce\x76\xd6\xb6\x0b\xbd\xbc\xe8\x97\xa2\xae\xe9\xf5\x6b\x5d\x73\x30\x8b\x8c\x69\xbb\x90\x8e\x08\xed\x83\x49\x66\x62\xd1\x74\xb1\xac\xd9\x3d\xcf\x9d\xe7\x8a\xc3\xed\x54\x60\xd7\xe7\x6c\xbf\x7f\x99\x2a\x49\x5f\xb6\xaf\x60\xa6\xa3\x0a\xab\x61\x7b\xb4\xac\xe2\x4e\x5d\x41\xe9\xb5\x15\x2a\x0c\x50\x3f\xd0\x07\xc6\x3a\xdd\x2b\x8d\x73\x7e\x59\x1a\xcf\xc7\x27\x8b\x44\x87\x5d\x1d\x2f\xf8\xca\x1d\x27\xe7\x6f\xd5\xaf\x29\x5c\x26\xe7\x61\xc4\xe5\xbd\x92\x91\x8a\x97\xcd\xfe\xa2\x1b\xb5\x65\x49\x5a\x15\x9d\x82\xe6\x0c\x1a\x2b\xd5\xee\xe7\x93\xf5\xa4\x30\xf5\x66\x5a\x15\x0d\x53\x49\x45\xd7\x6d\x3f\x8a\x5b\x46\xa6\x4b\x64\xd3\x22\x79\xec\xa5\xb3\x8d\xae\x96\x9a\x05\x93\x4e\xd1\xeb\x3b\xe9\xdd\x78\x3b\xc6\x19\xd2\x35\x06\x7c\xd6\x35\x7a\x38\xe8\xa2\xce\x72\x41\x7a\x30\xaf\x97\xec\xa9\x07\xd5\x56\x66\x58\xcc\x4d\x45\xb2\x6c\x15\x7b\xf6\x61\x69\x29\x73\xf8\x73\xfa\x6a\x5c\xe0\x51\x46\x23\xe6\xc0\xd1\xc4\x2f\xca\xed\xbc\x9b\x4d\x31\x9b\x42\xa9\x6e\x44\x85\x72\x79\xdd\xeb\xca\x8b\xae\x33\x48\xe3\x4c\x2c\x56\xc4\xea\x89\x96\x9c\xc2\xb4\xd3\xb7\x07\xfc\x1b\xd9\xce\xba\x9e\xe9\xb8\x1c\x8f\x56\xb1\xda\x20\xf2\x3c\x19\xce\xb5\x9a\x5b\xa5\x35\x29\x6e\x42\x97\xe5\x0d\x81\xf7\x84\xa1\xc8\x41\x63\x66\x74\x56\x52\x4e\x21\xeb\x09\x28\xa6\x17\xab\x30\xdd\x19\xeb\x3d\x8e\x34\xd6\x63\x6c\x23\xa1\x65\x0d\xfa\xc0\x88\xd3\x42\x71\xdc\xd8\xc5\x37\x99\xd8\x13\xae\xcb\x9b\xb6\xd8\xe5\x26\x33\x8e\xb9\x55\x9a\xe7\xa2\xe5\xa8\xd5\x2f\x0f\x94\x85\x66\xfb\xad\x34\x66\xc7\xfa\x18\x1f\xdb\xd3\xe5\x78\x59\xbb\xc0\x23\x13\xfb\xd0\x8f\x04\x71\x96\xa9\x6b\xcb\x69\x23\x05\xfd\x82\x9e\x5c\x73\x52\x73\xe5\x84\x9c\x6b\x55\x73\xfd\x49\xa3\x67\x07\x71\x2b\xd9\x3e\x0d\x28\xcb\x18\x9e\xbe\x3d\xe0\xaf\x65\xdd\x2e\x68\x9a\xb5\x42\xdc\x40\x3d\xd6\xc9\x0c\xbc\x72\x73\x3e\x0a\x06\xa0\x33\x0f\xc4\x66\x2e\x99\x8a\x5b\x16\x21\x83\x7e\x3d\x46\x43\x9a\x87\xa3\x9a\xd7\x70\xf5\x62\x12\x14\x68\x95\x94\xf3\xba\x9c\xe1\x2c\x32\x09\x3b\xee\x72\x1d\x0d\x39\xca\xa3\x5c\x3a\x2a\x82\x6a\xf6\xe8\xaf\xa6\xde\xcb\xa7\x57\x47\x8a\x57\xfa\xd7\x97\x87\x7e\xed\x9e\x28\x70\xbd\x7f\xe5\xc0\x2d\x9a\x3c\x16\x2d\xfc\x60\x3c\xc5\x61\x38\x1d\x06\x38\x64\x57\x34\xb5\x59\x18\x25\x5a\xd9\x5c\x42\xdf\x09\x27\xfe\x4c\xb4\xd8\x34\x62\x13\xc2\x82\x04\x0f\x38\xf1\x16\x43\x96\x1f\x50\x16\xd2\xa9\xef\x79\x6c\x19\xb9\x38\xf6\xe8\xf0\xd2\xd0\xf6\xc4\xfe\x2d\xca\x76\x79\xf5\xfd\x8e\xd8\xf0\x7a\xf9\xff\xef\x76\x03\xed\xaf\x91\x33\x61\xbf\xfe\x91\x00\xbb\xfd\xb4\xbf\xee\x1f\x81\xf6\xeb\x1f\x89\xdd\xfd\xed\x87\x43\x1c\xfe\xfa\xc7\xee\x79\x01\xdb\x0f\xff\xb3\x17\xb6\x18\xbb\x4d\x70\x82\x97\xd1\x32\x74\xd6\x37\x8a\x07\x2c\x64\xc1\xfc\x47\xc2\xbf\xfc\xe7\x26\x28\x70\x18\x6d\x0f\xba\x9a\xfb\x03\x67\xd7\x42\xe1\xd1\xf5\x86\x1b\xfc\xd8\xcf\x66\x70\x40\x87\xce\x7c\x7f\xf3\x8d\x2a\x39\xd6\x01\xb7\x07\x64\x0b\x47\xf0\xeb\x1f\x89\x5f\xe7\x1c\xf7\x2f\xee\x5f\xe0\xd7\xfd\x0d\x1a\x07\x01\xf3\xa2\xca\xb6\x68\xbf\xfe\x91\x90\xce\x3f\x4f\x6f\xcf\x22\x6f\x70\xfb\xdf\x07\x20\x8f\x90\x1e\x24\x37\x8a\x79\x05\x5b\x0c\x73\x18\x21\x59\xe1\x4c\x85\x07\x58\x36\x25\x1e\x8b\x08\x61\x04\xa0\xa0\x48\x82\xc5\x08\x16\x29\x2f\x09\xa2\x4c\x19\x12\x28\x0f\x14\x00\xa1\x2c\x32\x45\x92\x21\xbf\xf7\xe8\xa0\x77\x13\xc5\x67\x15\x78\xb8\xb3\x2d\xfb\x26\xe4\xce\x6e\xfd\xe7\xe2\xfb\xa1\x87\xa7\x1b\xbf\x2c\x06\x21\x6f\x22\x48\x64\x09\x52\x44\x21\x0f\x44\x28\x20\x44\x20\x00\x84\xe7\x4d\x20\x03\x44\x2d\x62\x12\x09\x22\x00\xa8\x44\x44\xc1\x94\x10\x53\x44\x85\x03\x80\x30\x0e\xfd\xfa\xcb\x15\x0b\x6f\x60\x00\xa0\x89\x39\x8a\x14\x85\x08\x8c\x10\xb6\x61\x1e\x6c\x31\x26\x33\x4b\x26\x4c\xa6\x4c\x62\x54\x11\xb1\x64\x9a\x0a\xb4\x04\x0b\x20\x84\x05\x01\x89\x08\x43\x2a\x5a\x18\x23\xe9\xe3\x18\x70\xdf\x5e\xdf\xf3\xe3\x68\x1a\x47\xcf\x2d\xfb\x7b\x08\x3f\xba\xd9\xf6\x43\x08\x3f\x6a\xeb\x93\xa2\xec\x1f\x0c\xfe\xc1\xe0\x1f\x0c\xfe\xc1\xe0\x1f\x0c\xfe\xc1\xe0\x1f\x0c\xfe\xc1\xe0\x13\x31\xd8\x5e\xfd\x9f\xdd\xe4\xe5\x8d\xcc\xf3\x1b\x67\xe0\x1f\xd8\x1c\xf1\xe6\x91\xe3\x8f\x66\xb3\xcf\x8e\x1d\x1f\x26\x37\x3c\x94\x04\xa6\x28\x50\x50\x88\xc2\x2c\xc9\x24\x58\xc1\xa2\x49\x20\x84\x0a\x91\x64\xcb\xc4\xb2\x05\x05\x49\x92\x08\x87\x2d\x08\x09\x16\x90\x8c\x4d\x91\x02\xd3\x52\x04\x64\x0a\xe6\xfe\x08\xcb\x59\xd6\xbb\x7a\x91\xf5\x4a\x3b\xa9\x34\xa8\x80\x52\x7e\x15\x0d\x17\x3a\xe7\xf6\x01\x5e\x4d\x7d\x4e\xd1\x0b\xcb\x79\x25\xb3\xaa\x89\x51\x5a\xa3\x99\xee\x7c\x91\x53\x16\xd0\x8e\x82\x9a\x37\xb8\xcc\x1e\x5d\x7b\xbd\x79\x14\x6a\x9b\xa5\xc8\x3c\x62\xbf\x9f\x4a\xd2\xab\x59\x9b\x1f\xda\x7f\x9d\xe5\xb8\x8e\x3b\xc7\x4c\x99\x57\x10\x67\x11\xc0\x41\x13\x2a\x96\x82\x30\x96\x98\xc0\xcb\x54\xe0\x24\x81\x62\x2c\x60\x11\x0b\xd0\x24\x14\x89\x12\x07\x31\x94\x4c\x01\x99\x0a\x62\x4c\xa4\x90\x13\x76\xb8\xf3\xaf\x71\xff\x49\xe5\x7e\x16\xee\xb2\x70\xfc\xfe\xd5\xc3\x40\x0f\xe2\x8e\x19\xe1\x11\x07\x81\xc9\x49\x9c\x48\x45\xa6\x10\x40\x65\xd1\x64\x9b\x60\x16\x05\x1e\x00\xca\x28\x8f\x44\x0e\x6e\x66\x8d\x00\x61\x02\x44\xc4\xcb\x00\x29\x84\x61\x20\x29\x70\x7f\xc0\xe5\x7d\xdc\x3f\x96\x35\x55\x55\x69\x12\xee\x37\x8c\x5e\xdd\xe9\x7c\x13\xee\xd9\xc7\xec\x4f\xe8\x7d\xf6\x6f\xc5\xdd\xe4\x24\x99\xf1\x88\x27\x32\xe1\x31\xa6\x08\x61\x49\xb4\x90\xc2\x24\x40\x19\xa4\x44\xb2\x4c\x0b\x70\x18\x01\x04\x04\x08\x11\x44\x54\x22\x02\x86\x98\x93\x64\x1e\x22\x81\x57\xf6\xc7\x34\x5e\xe3\x7e\x52\xee\x8f\x66\x53\xd3\xe9\x71\x39\x7c\x14\x77\xed\x31\xfb\xb3\x4f\xc6\xdd\x02\x02\xe2\x24\xcc\x23\x2a\xf0\x1c\x6f\x71\x22\x96\xa0\x28\xc9\x96\xa8\x30\xac\x28\x1c\x14\x01\xb0\x28\x11\x64\x4e\xb6\x64\x5e\xa6\x9c\xc4\x29\xc0\xe2\x04\x51\xc0\x40\xe0\x64\x44\xf7\x87\x0d\x5e\xe3\x7e\x52\xee\x47\xe3\xed\x2e\x9e\xc9\x3d\x66\x7f\x2c\xdc\x67\xff\x56\xdc\x09\xe4\x4c\xca\x04\x5e\x11\xb1\xc8\x2c\xc0\x64\x8b\x53\x2c\x22\x43\x99\x43\xcc\x94\x65\x60\x8a\x12\x64\x22\xe6\x11\x52\x2c\x59\x24\x4c\xa4\x14\x01\x1e\x2b\xc0\x52\x78\x11\x50\x71\x87\xfb\x15\x7e\xff\x49\xe5\x7e\x16\xee\xa3\xce\x27\xe3\x0e\x4c\x02\x78\x4b\x90\x11\x42\x22\xe0\x2d\x89\x10\x9e\x97\x80\xa0\x30\x41\xb6\x24\x19\x88\x50\x06\x16\x90\x30\x63\x88\x49\x40\x20\x08\x73\x3c\xe1\x45\x53\x90\xa9\x44\x44\x85\xdb\xe1\x0e\x9f\x8e\xfb\xbd\xe5\x7e\x16\xee\xc3\xc5\xe7\xe2\x2e\x98\x1c\x93\x15\x62\x2a\x32\x14\x29\x36\x99\xc9\x71\x00\x9b\x18\x62\xca\x44\x44\x4c\x0e\x2b\x9c\x42\x64\x26\x42\x06\x09\x11\x44\x19\x33\x22\x72\x10\x02\x80\x64\x1e\x29\x14\xed\x70\x17\x9e\x8e\xfb\xbd\xe5\x7e\x16\xee\x76\xf5\x73\x71\xb7\x10\x20\x96\x24\x21\xce\x14\x05\x51\xb1\x38\x20\x4b\x3c\xe2\x29\x16\x24\x41\x46\x54\x01\x9c\x0c\x64\x93\x87\x8a\x20\x23\xa0\x20\xaa\xf0\xa2\xc0\x31\x0b\x0b\x82\x48\xa9\x60\x31\x65\x7f\x34\xe7\x5d\xdc\x3f\xdc\xaf\xb5\xe9\x98\x7f\x94\xdf\xf3\x8f\xd9\x07\x9f\xcc\xef\x0a\x13\x11\x14\x79\x8e\x83\x1b\x06\xe7\x65\x64\x0a\x8a\x08\x98\x28\x9a\xc0\x54\x24\x80\x2c\x53\xe0\x30\x20\x10\x03\x22\x09\xa6\xc8\x44\x8e\x47\x32\x31\x01\xaf\xc8\x4c\x62\x82\xb4\xc3\xfd\xc9\xfc\x3e\x5a\x80\xc6\x49\xbc\x5d\x3d\xd2\xf1\x01\xdc\xef\xb1\x5f\xf3\xef\xb3\x7f\x2b\xee\x12\x0f\x24\x51\x80\x50\x34\x2d\x59\x51\x98\x29\x2a\x26\xa6\x0a\x81\x22\x95\x15\xcc\x99\x0c\x01\x2c\x01\x51\xa0\x00\x20\x93\x08\x32\x65\x1c\x6f\x52\x11\xc8\x1c\x46\x84\x50\xd1\xda\x1f\xec\x79\x8d\xfb\x13\xcb\xdd\xbf\x07\xf7\xc2\x63\xf6\x75\xed\x3e\xfb\x37\x8f\xdf\x2d\x02\xa0\xb0\x19\x1b\x5a\x18\x33\x59\xe2\x81\xa8\xc8\x1c\x8f\x88\x29\x49\x00\x5b\x32\x93\xac\x0d\xc8\x54\x66\x00\xc9\x90\x32\x2c\x8a\x58\xc6\x18\x8a\x18\x11\x8c\x00\xde\xe1\x7e\x25\xde\x7f\x52\xb9\x9f\x85\x7b\xc5\xfe\x5c\xdc\xe9\xf6\x00\x9a\x80\x4c\xc8\x21\x91\xc9\x96\xc0\x4b\x1c\xcf\xf3\x14\x63\x81\x32\x0e\x98\x02\xc1\x1c\x2f\x08\x80\xe3\xb0\xcc\x03\xc1\xa4\x48\x26\x32\x07\x2c\x20\x8a\xb2\x00\xac\x3d\xee\x57\xc6\x33\x3f\xa9\xdc\xcf\xc2\xbd\x24\x3f\x82\xfb\x9b\xb9\xb6\xe7\x24\xda\x3e\x3b\xcb\x96\xaf\xc8\x85\xc6\xbc\x31\x26\x65\xbe\xa0\xc2\x5e\x77\xd4\x0c\xca\x93\x91\x01\x80\x95\x97\xc3\x4a\x51\x9a\x00\xad\xb9\x28\xf5\x52\xaa\x01\xd5\xc3\xde\xb4\x1d\xd4\x17\x70\x5c\xbe\xff\xf0\xde\xb4\x9a\x18\x94\x4f\xf5\xed\xb2\x39\x9b\x4a\xce\x64\xd7\xb3\xf9\xb8\x91\x6e\xf8\xba\x5a\x72\xac\x7a\xd3\xc8\xfa\x95\xe1\x3c\x5a\xd1\x36\x74\x73\xf5\x4c\x43\xe4\xec\xb1\x19\xe6\x0a\x38\xad\xf7\x16\x40\x6c\xa5\xba\xc3\x1e\x30\xec\x71\x00\x32\xe9\xba\x26\xe8\x38\xd7\xe5\xcb\x13\x1a\xc2\xc1\xa2\x32\x71\x88\xd0\x6e\x06\xd5\xca\x97\x6f\x89\x2f\x45\xd8\x9e\x8e\xc1\x44\x94\x20\x8f\x26\x2e\xbf\xae\x8a\x91\x90\xc2\xa3\x75\x13\xb2\x65\xb0\xae\xa1\xa6\x1f\x74\x17\x7a\xb2\x43\x3a\xe0\xe0\xd8\x3b\x18\x1c\x23\xe3\x6c\x57\xf2\x89\xec\xee\xe9\x1f\xea\xcf\xcb\x6e\xa9\x4f\xc8\xae\xe9\xc1\xaa\xdd\x79\xc0\xbe\x7a\x61\xff\xc3\x7b\xd7\x1e\xc8\x72\x5d\x63\xcb\xcf\x4e\xa9\xde\xdf\xc8\x2e\xf7\x3e\xbe\x0a\xb8\x8f\x6d\x28\x7c\xb7\x91\xc5\x85\x21\x6c\x48\x83\x29\x89\xdd\x45\x72\xb9\x9e\x4a\x3a\xab\xd6\x4d\xab\xaf\x05\xfa\xba\x54\x0b\xe6\x0d\x47\xce\xd7\xc6\xba\xd4\xb3\xad\x45\xab\x6e\xaf\x0b\xb4\xae\xa5\xb4\x54\x28\x67\x2a\x29\xb5\x11\x8c\xd2\xd1\x02\x20\x09\x77\x06\xea\xdc\xc7\x5d\x8b\xe3\xfd\x4c\x23\xbd\x69\x08\x85\xc8\x9e\xf6\xad\x30\x5d\x6f\x7b\x22\x9e\x59\xe3\x72\xb5\x61\xb4\xcb\xd3\x72\x6f\x5c\x27\x4b\x97\xb6\x7d\xc8\xe5\xf9\x9e\xe4\x2e\x96\xc7\x1a\xfb\xf9\x8d\xec\xd1\x20\x7f\xb4\x91\x55\x17\x95\x49\xf8\xc4\x46\xf6\x77\xa6\x92\x6f\x6a\x64\x4f\xce\x9f\xdf\xbf\xcb\xfa\x9d\x9e\xec\x73\x76\x59\x97\x38\xb5\xed\xfb\xd0\xd4\x0a\x4a\x52\x4a\xa5\x6a\xa3\x69\xb1\x10\xf5\x32\xd9\xb0\x92\x02\xd5\x4e\xc3\x25\xa3\x4e\xab\x91\xe1\x93\x45\xd8\xee\x6a\xae\x42\x87\xb3\x60\x3a\x5b\x8a\xf9\x3c\x5a\x8d\x74\xb9\xa8\xbb\xb0\x38\x90\x52\x85\x96\x0a\x34\xab\x99\xe6\x97\x03\xa5\x5b\xdd\x1e\x5d\xfb\x96\xf8\x12\x04\xa5\x61\x55\xcf\x2d\x8d\x1c\x22\xd9\xa2\xc1\x9a\x51\xb3\x53\x54\xd7\x61\xc9\xd5\x92\xa3\x5a\x7a\x16\xe6\xfa\x7e\x7a\x3a\x9f\xa5\x4f\x36\x43\x5f\x6f\x6c\xda\x79\x85\x9e\x35\xb6\x93\xdd\xed\x27\x45\xbe\x83\x8c\x4a\xde\xb1\x1e\xde\x78\xbd\x31\xe7\x3e\x6b\xec\x4f\xb1\xff\x06\xd1\xbc\x6f\x7f\xf7\xa5\x4b\xb2\xf9\x3b\x7b\x34\xf5\xca\xba\xcd\xdf\xbd\x7e\x71\xb9\x7e\xf2\x53\xf2\xf8\x8f\xd8\xe7\xa6\x35\xe7\x4e\xfb\x37\x91\xdd\x93\x17\xad\x3e\x78\xe4\xe1\x23\x64\xf7\xd1\x46\xb4\x7b\x5d\x90\x1d\x7e\x21\xc3\xb4\xaa\xcc\x3c\x1a\x24\x21\xc3\x05\xdc\x9f\x4d\xb3\xe1\x74\xee\x17\x89\xa3\xe9\x5e\x1f\x46\x4c\x31\x67\x41\xa6\x1f\xe5\x89\xd3\xe6\x60\xa6\x37\x4c\x91\x24\x9d\x24\x33\x1d\xa6\xa4\x52\x4b\xa0\x67\x97\x4e\x27\x35\x8d\xfa\xa0\xe1\xf6\x53\x6b\x51\x28\xe6\xa4\x51\x0e\x14\xa4\x4c\x63\x47\x48\xc0\x18\x39\xfd\xc0\x59\xcd\xc2\xc1\xcc\xb4\x30\xa3\x55\x41\x8e\x52\x70\x91\x9f\xa4\xed\xc6\xba\xcf\x2f\xa5\xd6\xc8\x37\xec\xd1\x20\x73\x7c\x7e\xe9\x23\x64\x77\x0a\xde\x47\x3b\x85\xc6\x20\x33\x7b\x94\xec\x9e\x68\xff\x51\xb2\xfb\x49\x8d\x4d\xbd\xb2\x58\xfa\x77\x93\xcd\xe5\xa2\xe5\xa3\x8b\xb5\x8f\x96\xff\x9e\xce\xe6\xde\xf2\xdf\x44\x76\x4f\x5e\x29\xbe\x7f\x64\xf7\xce\xf4\xe9\x73\x46\x76\x79\xb1\x90\xc9\x96\xf9\xa9\x95\x92\xa4\xfa\xd4\xb2\xec\xb9\x38\xc4\xae\xd4\x00\xab\x95\x15\x93\x4e\xa5\xa2\x2b\xe1\x7a\x02\xbd\x52\xbf\xa2\xf4\xdb\x08\x4c\x2b\x61\x5c\xcc\x08\xfd\xd6\x62\xa9\xae\xba\xf3\xb0\x53\x58\x35\xa0\x53\x12\x64\xad\x91\x84\xdd\x56\x2a\x2e\x3a\x8e\xba\x1f\xd9\xc9\x5a\xdf\xf0\x27\xcb\x8c\x36\xcf\x61\xca\xb1\x7a\x8f\xcd\x06\x1a\xa7\xd6\x57\xc0\xee\x27\x33\xa3\x34\x1d\x64\x97\xcd\x8e\xaf\xe5\xf3\x47\x5f\x1f\x22\xbb\x93\x32\xde\x31\xb2\x2a\x66\x8f\xf5\x70\x1f\xd9\x3c\xcf\xfe\xab\x23\x84\x37\xd9\xdf\x5e\x3e\xda\xd8\x9f\xbd\x43\xe1\xef\x26\x9b\x67\xac\x9c\x9e\xae\x5c\xde\x35\x8d\x7e\xc4\x3e\x37\xad\xd1\x3b\xed\xdf\x44\x76\x4f\xde\x9e\x71\x3f\xd9\x65\x2f\xbc\xff\x74\xb2\xb3\xd7\x8d\x1c\x94\xc7\x6b\x9b\x15\xca\x96\xd0\x87\xc1\xa0\xdc\x37\xa2\x38\xf0\x3b\xd0\xe3\x2b\xc6\x6a\x15\x17\x5b\x39\xb3\x8c\x52\x59\x31\x64\xed\x60\x54\x33\x0a\x9d\xe4\x44\x18\x3b\xc5\x2e\xeb\x75\xf4\xa8\xb0\x94\xd8\x14\x56\x5b\x59\x6e\x51\x34\xe2\xf2\xa8\x0d\x9a\xec\x85\x90\xc2\x25\xac\x35\xa6\x3d\x57\x5a\xa0\x7a\xde\x8a\xb3\x76\x4f\xea\xdb\x7a\x77\x2d\xe2\xd2\xc4\x13\x7a\x49\xa3\xda\xcf\xcc\x1a\xc9\x56\xa7\x7a\x5c\x1a\xfc\x59\x64\xa7\xe3\xb6\xf7\x30\xd9\x3c\xcf\xfe\x65\x1c\xdc\x66\x7f\x7b\xf9\xf0\x72\xfd\x33\x46\x36\x8f\xd8\x2f\x0c\xfd\xf8\x67\x92\xdd\x05\xd9\x54\x1f\x2c\xff\x87\xed\xcf\x83\x45\xff\x4e\xfb\xb7\x91\xdd\x73\xf7\x44\xdd\x4f\x76\x97\x35\xfb\xe9\x64\x97\x9e\x55\x3b\xb0\x57\x6a\xa3\x38\xe0\x15\x29\x98\x8f\x62\x37\x33\x5b\xb9\x5d\x96\xe1\x88\x5e\x5e\x35\x48\xc6\x9d\xad\xb0\x99\xf4\xe3\xe4\x52\x8e\xa5\x7c\xd4\xcf\x5a\xc8\x4f\xe2\x7a\x2a\xac\x94\x9d\xa8\xdf\xcf\x7b\x6e\xb6\x33\x9f\xb6\x6d\xb3\xb8\x8a\xab\xb3\x96\x47\xfb\xea\x62\x4f\x76\x7a\x98\x51\x8a\x83\x49\x27\xa3\x48\xa1\x43\x35\x6f\x3c\x82\x76\xae\xa6\xca\x85\x20\x26\x5e\xba\x82\x87\xe5\x95\xd1\x19\x15\xa2\xc1\xb1\xd8\x3f\x8b\xec\x2a\x48\xce\x3f\x4c\x36\xcf\xb3\xff\x86\xa1\xbf\x85\xec\x1e\x24\x9b\x87\xc9\x4e\xb7\xf2\xab\x9f\x49\x76\x17\x64\xf3\x86\xa1\xcf\x23\xbb\x69\xdc\xea\xdc\x69\xff\x26\xb2\x7b\xf2\x46\xc4\xfb\xc9\x2e\x77\xe1\xfd\xa7\x93\x5d\x39\xeb\x4d\xfd\xd2\xc2\x10\xaa\x12\x13\xf9\xb4\x19\x0d\xb0\x59\x9c\x71\xad\x8c\x64\x96\x96\xea\xa8\x94\x75\x8c\x75\x55\x2d\xa3\x55\x58\xe9\xf3\xdd\x72\xbe\x3b\x37\x7a\x61\xaa\xd9\x9b\x0c\xfa\x8e\x51\x1c\x67\x6b\x40\xcb\xac\xca\x56\xd1\x9f\x27\xb9\x95\x10\x46\x8d\xd5\x74\x90\xdd\x93\x5d\xbb\x89\x4a\xf3\x81\xcd\xad\x22\xa9\xa9\x95\xf9\x51\x50\xeb\x06\x00\x8f\x07\x71\xad\x93\xac\x49\xcd\x7e\x10\x74\xda\x55\x15\x67\x30\x3d\x6e\xc6\xf8\x59\x64\x57\x26\x63\xf7\x61\xb2\x79\x9e\xfd\xcb\x38\xb8\xcd\xfe\xf6\xf2\x61\xb2\x7b\x90\x6c\x1e\x26\xbb\x07\x37\x82\x3e\x4c\x76\x17\x64\x73\xd7\x86\xc8\x9f\xb4\x21\xf3\xb6\x9c\xdd\x73\x77\xff\xde\xbf\x40\xf1\xc3\x9c\xdd\xb3\x17\x28\x92\xee\x5c\xd2\x8b\x1a\x5c\x05\x45\xc3\xed\xd6\x8d\xa5\xc7\x4d\xfb\x39\x28\x2f\xab\xe1\x6c\x8c\xb1\xc0\x8f\xa7\x41\xa3\xb1\x50\x1b\xae\x6a\xe6\xe5\x62\xa6\x28\x44\xc9\x41\xc5\x10\xa6\xcc\x45\x54\x1d\xe4\xf2\xfd\xa5\x04\xad\xc1\xca\x28\xa7\x0b\x41\xdc\x05\xcd\xb9\x9e\xcf\xec\x09\x49\xb1\xe7\xd0\x1c\x72\x9d\x9e\xa5\x15\xe9\xa4\x52\x6e\x82\x61\x38\x4e\xdb\x45\x30\x72\x8b\xfd\x12\x29\x95\x3a\x85\xf9\x54\x33\xaa\xed\xe4\x93\x16\x28\x4e\x56\xef\x3e\xda\x29\x74\x60\x49\xaf\x1d\xeb\xe1\xbe\x9c\xdd\xf3\xec\x3f\x9a\xb3\xfb\x49\xbb\x8f\xd5\x67\xec\x7e\x9e\xc6\xad\xd6\x03\xf6\x2f\xcb\xff\xe8\xae\xf7\xbb\x72\x76\x8f\xd8\xd7\xad\xfc\xe2\x4e\xfb\x37\x91\xdd\x93\xb7\xdc\xdf\x3f\xb2\xcb\x5f\x78\xff\xe9\x23\xbb\x1c\x63\xe9\x32\xa3\x96\x2f\xce\x01\xe2\xcc\x15\x6c\x58\x24\x27\xaf\xed\x1a\xb2\xb5\x4e\x53\x76\x5c\x73\x5c\x76\x6c\x81\xd7\x41\xdf\x15\x06\x02\x94\xca\xd3\x91\x50\x05\x95\x9e\x95\x75\xc8\x70\x5e\x36\x51\x72\xee\x1b\x55\x31\xd0\x72\x95\x6e\x75\xda\xcf\x05\x29\xf1\x85\x90\x3c\xb7\xcf\x15\x9b\xfa\xc4\x5b\x0d\xa3\x56\x37\xeb\x1a\x0d\xde\x48\xcf\xb9\xb4\x5f\xa9\xd9\x11\xe0\x58\xa7\x85\x2b\x20\x33\xa9\x59\xe3\xe3\xc9\xe0\x67\x91\xdd\x1d\x23\xab\xf1\xec\x58\x0f\xf7\x8d\xac\x9e\x67\xff\x32\x0e\x6e\xb3\xbf\xbd\x7c\xb8\xb1\x3f\xd0\xd8\xd4\x4f\x38\x62\xf3\x51\xfb\x0f\x93\xfd\x05\xd9\xde\xb5\x05\xfd\x27\x1d\xf9\xb8\x89\xec\x9e\x7c\xce\xe5\x7e\xb2\x2b\x5c\x78\xff\xe9\x64\x27\x76\x4d\xcd\x68\x0f\xa5\x71\xcd\xf5\x5c\x75\x32\x58\x8d\x89\x80\x78\x33\x57\x41\xc9\x4e\xd3\x9a\x4b\x63\x8f\xe7\xf9\x5e\x56\x8a\x7d\x23\x1d\x09\xeb\x29\x28\xb5\x5b\xf5\x28\x0d\xb3\xf9\xd5\x70\x64\xa9\x91\xc1\x8b\x41\xae\x44\xa9\xd4\x37\x0d\x3d\x1e\x36\x48\x4b\xcf\xbe\xe4\xec\xa8\x6d\x76\xaa\x1c\x14\x26\x8c\x1b\xf0\xe1\x64\x6d\x14\x27\x43\x38\xc9\x78\xe9\xce\x42\x25\xa4\x55\x15\x5a\x30\x97\x49\x4f\x83\x79\x4e\x3e\xb8\xf6\x10\xd9\x3d\xb0\xcf\xad\x28\xeb\x19\xeb\x58\x0f\xf7\x91\xcd\xf3\xec\x5f\xc6\xc1\x6d\xf6\xb7\x97\xff\x23\x46\x36\x85\x07\xec\x0f\x70\x46\x78\x94\xec\x9e\x78\xde\xe5\xf2\xa9\xb2\x1f\x2d\xff\x87\xed\x9b\xeb\x5e\xee\x4e\xfb\xb7\xed\xb3\x7b\xee\xe1\xb2\xfb\xc9\xee\xb2\x66\x3f\x9d\xec\xf4\x76\x55\x2e\xb5\x0c\x83\x04\x65\x21\x9d\x9e\xaf\x52\x68\x25\x37\x36\xf5\x21\xc5\x62\x81\x75\x85\x81\x35\xc2\x96\x53\xf0\x0c\x93\x4d\xcd\x76\x25\x34\xb8\x79\x8d\x34\xec\x42\xb6\xbc\xc8\xb5\x1a\x1c\xe9\x46\xd9\x5a\x73\x96\x0d\x86\x25\xc5\x1d\x35\x99\x50\xb0\x9d\x66\x7a\xbf\xcf\x0e\x4a\x2a\x18\xd7\x0b\x28\xf0\xa9\xdd\xeb\x6b\x93\xb9\x41\xcb\xc9\x9a\xb5\xb0\xf1\xaa\x96\xf6\xeb\xb5\x69\x77\x56\xf6\xb9\x69\x30\xb5\x8f\x53\xee\x9f\x45\x76\x05\xca\x0d\x1f\x26\x9b\xe7\xd9\x7f\xa3\x85\xdf\x4c\x76\x3f\x91\x6c\x1e\x26\x3b\xcb\xb0\xe5\x9f\x49\x76\x17\x64\xf3\xc6\xce\xe6\xcf\x23\x3b\x83\x13\xd2\x77\xda\xbf\x89\xec\x9e\x7c\xa2\xf3\xbf\xfe\x39\xf5\x3b\x42\x5a\x75\x59\xbf\x19\x55\x7b\x86\xaf\x64\x68\xab\x18\xce\xc6\xeb\x61\xb6\x13\x0f\x4b\x9a\xda\x9c\x65\xd2\xfa\xd2\x8f\xd3\xd9\x45\xb7\xdf\xa8\xff\x7c\xb2\xcb\x29\x4c\x7d\x98\x6c\x9e\x67\xff\x8d\x9f\x76\xfc\x5b\xc8\xee\x41\xb2\x79\x98\xec\xdc\x4e\x4a\xfd\x99\x64\x77\x41\x36\xe3\x07\xcb\xff\x61\xfb\x4d\x61\xa0\xdc\x69\xff\xdd\x93\xd4\xfb\x07\xd8\x3f\xe3\x38\xf5\xb9\xaa\x03\x8f\x9e\x9e\x87\xde\xfa\x17\x1e\xdb\xea\x15\x22\x7e\x5b\xcb\x61\xd8\xb8\x50\xeb\x8d\xb1\xfa\x4a\xcb\x4b\x09\x77\x05\xc3\x94\xfa\xb1\x17\x6d\x7f\xff\xe7\xe4\xfa\xfb\x74\xcc\x0e\xc5\xcc\xd4\xf4\x56\xbb\xa9\x16\xf5\xf6\x0f\x8a\xa9\x56\xda\x5a\x73\x8f\x4a\x4d\xaf\xf4\x4f\x35\x6e\x1f\x36\xa9\x66\xb3\x27\xda\x5e\x19\x4c\xd4\x9b\xc5\xaa\xda\xec\x27\xca\x5a\x3f\xf1\x75\x7f\xd7\x31\xbf\x25\x36\x12\x1e\x9e\xb0\xb7\xdc\x0f\x0f\x17\xcf\x75\x3c\x7c\xc7\xeb\xf0\x3d\x97\x5f\x39\x4a\xb0\xb7\xf9\xf7\x24\xf7\x08\xf6\xae\x79\xf6\x62\xe0\xdc\x29\xcf\x37\xd9\x15\x8f\xce\x7f\xf1\xf8\xec\xdd\xf7\xdd\xbb\x90\xcd\xbe\x3f\xc5\xdd\x33\xe5\xd7\x1c\x7f\xc7\x7a\xa2\xa3\x17\x1b\x1d\x2d\xf1\xf5\xf0\xe9\x87\x4a\xf2\x1c\xbc\x3f\x58\x80\xd7\x75\xb0\xbf\x8f\xc3\xe1\x2b\xef\x77\x3f\xde\xbb\xff\xef\x49\xfe\xee\x94\x5d\x73\xf4\xc4\xcc\xb9\x87\xdb\x1b\x57\xc2\x64\xfb\x6b\xc0\xbb\xbf\x4f\x72\x6e\xab\xeb\x9a\x6f\x47\x23\xe7\xae\x39\xd3\x6f\x89\xa9\x1f\x44\xaf\x7d\x3b\xf9\x71\xe1\xb3\x37\xcf\xf2\xf4\x44\xe5\x55\x87\x2f\x4d\x5e\xab\xf4\x6b\x5e\x87\x31\xd9\xff\xf7\x3c\x4f\xc3\x98\xbc\xe1\xe3\x8b\x99\x73\xef\x02\x16\x5e\xa9\xee\xe3\xaf\x21\x9f\x5c\x3e\xc9\xc9\xa3\xc2\x6b\x8e\x5e\x98\xbb\x89\xc3\x4e\x7e\xce\xef\x78\xf9\x24\x6f\x8f\x0a\xaf\x79\x7b\x61\xee\xdc\xdb\x59\xc8\xa2\xab\x6d\xfd\xe4\xe7\x58\x8e\x97\xcf\xf2\xf7\xa0\xf0\xaa\xbf\xe7\xe6\xce\xfd\xdd\x7e\x7e\xb5\x7f\x3d\x3e\xff\xf9\xe4\xf2\x49\x0e\x1f\x15\x5e\x73\xf8\xc2\xdc\x9b\x43\x03\x27\x0c\x63\x16\x7c\x4b\xe0\x30\x64\x11\xf5\xcd\x2b\x65\x38\x7d\x10\xd1\xe9\x9b\x67\x95\xe3\x44\xe5\xd5\x92\x5c\x9a\xbc\xc6\x12\x21\x9b\x7d\x4b\x44\x4b\xc7\x33\xd9\xf2\x4a\x09\x8e\xee\x3f\xdb\xf7\x77\x1d\x7f\xc8\xeb\x8b\x21\xea\xf9\xdb\x27\xf9\x7f\xae\xf4\x5a\x21\xae\x98\x7d\xb3\x24\x7b\xd9\xeb\xc5\xd9\xc7\x1c\xc1\x2e\xf6\xe8\xf1\x79\xe8\x45\x3d\xab\x19\x3f\x70\x3b\xd3\xd4\xd4\xb6\xb6\x13\xbd\xd4\x93\xa8\xe9\xc7\x51\x6c\xa7\x55\xd4\xf3\x09\x12\x05\x8c\x25\xbe\xee\x25\x7e\x4b\xf4\x0a\x5a\x53\x3b\xbc\x4f\xfc\xfb\xaf\x04\x77\x78\x9c\xfb\xeb\x81\x26\x0b\xa3\x5d\x5f\xbe\x29\xc5\xdd\x5e\x9e\xab\xd9\x38\xb9\x1f\xa7\x9c\xb9\x18\x32\xd7\x75\x3c\x7b\xdb\xf6\xbe\x25\x48\xbc\x3a\xbe\x99\x06\x0e\x7d\xdd\x16\x37\xd5\x40\x56\x21\x9b\xdd\xed\xd8\x41\xc3\xc6\xa7\x63\xc3\x38\x73\xeb\xed\xa1\xe2\x46\xdc\x62\xec\x71\x17\x5e\x94\xec\xbc\x38\x21\x98\x1b\x1d\xd9\xdf\x79\xcc\x91\x53\x25\x1b\x47\xce\x07\xc2\x37\x7a\x12\xd2\x29\xf3\xe6\x0f\x7a\x72\xaa\x64\xe3\xc9\xc9\x8f\x26\xdf\xee\xc6\xbe\x43\x7d\xd8\x93\x53\x3d\x7b\x67\x5e\x46\x09\xe7\xce\xe0\x30\x7a\xdb\xa1\x13\xe2\x78\xcc\xa3\x4b\x45\x1b\x97\x2e\xb8\xf1\x87\x18\xd5\xfd\x30\xb2\x03\xd6\x6a\x54\x12\x2f\xbf\x28\x98\x30\xe3\xc9\x34\x41\xfd\xc9\xd4\x65\x11\xdb\x9a\xfd\x7f\x01\x00\x00\xff\xff\x56\x99\xa3\x0e\x94\xbc\x00\x00") + +func operation_fee_stats_1CoreSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_1CoreSql, + "operation_fee_stats_1-core.sql", + ) +} + +func operation_fee_stats_1CoreSql() (*asset, error) { + bytes, err := operation_fee_stats_1CoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_1-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc3, 0x9a, 0xb4, 0xd0, 0xf, 0x87, 0x5f, 0xa2, 0xfe, 0xd3, 0x59, 0xb0, 0x2e, 0xdd, 0x39, 0x30, 0x1c, 0xed, 0xb0, 0x10, 0x24, 0xad, 0x85, 0xb3, 0x5e, 0x67, 0xf3, 0x5, 0xb9, 0xcb, 0x5f, 0xf0}} + return a, nil +} + +var _operation_fee_stats_1HorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x79\x8f\xaa\x48\xf7\xff\xff\xf3\x2a\xc8\xcd\x24\x7d\x6f\xba\xef\x34\xfb\x72\xe7\x3b\x4f\x82\x8a\x4b\xab\xb8\xaf\x93\x89\x29\xa0\x50\x5a\x05\x1b\x70\xeb\xc9\xf3\xde\x7f\x11\x70\xa3\x41\x11\xb0\xef\x3c\xf9\x4d\x67\x72\x47\xa4\xea\x6c\x75\xea\x7c\xea\x54\x1d\xf0\xfb\xf7\x5f\xbe\x7f\x47\xea\x86\x65\x8f\x4d\xd8\x6a\x54\x10\x05\xd8\x40\x02\x16\x44\x94\xe5\x7c\xf1\xcb\xf7\xef\xbf\xec\xee\xe7\x96\xf3\x05\x54\x10\xd5\x34\xe6\xc7\x06\x2b\x68\x5a\x9a\xa1\x23\xdc\x6f\xf4\x6f\xd8\x49\x2b\x69\x8b\x2c\xc6\xa3\x5d\x77\x5f\x93\x5f\x5a\x42\x1b\xb1\x6c\x60\xc3\x39\xd4\xed\x91\xad\xcd\xa1\xb1\xb4\x91\x3f\x10\xf4\x77\xe7\xd6\xcc\x90\xa7\x1f\xbf\x95\x67\xda\xae\x35\xd4\x65\x43\xd1\xf4\x31\xf2\x07\xf2\xd0\x69\xe7\xd9\x87\xdf\xf7\xe4\x74\x05\x98\xca\x48\x36\x74\xd5\x30\xe7\x9a\x3e\x1e\x59\xb6\xa9\xe9\x63\x0b\xf9\x03\x31\x74\x8f\xc6\x04\xca\xd3\x91\xba\xd4\x65\x5b\x33\xf4\x91\x64\x28\x1a\xdc\xdd\x57\xc1\xcc\x82\x67\x6c\xe6\x9a\x3e\x9a\x43\xcb\x02\x63\xa7\xc1\x1a\x98\xba\xa6\x8f\x7f\xf7\x64\x87\xc0\x94\x27\xa3\x05\xb0\x27\xc8\x1f\xc8\x62\x29\xcd\x34\xf9\x69\xa7\xac\x0c\x6c\x30\x33\x76\xcd\xf8\x4a\x5b\x68\x22\x6d\x3e\x53\x11\x90\x52\x1e\x11\xfa\xa5\x56\xbb\x85\xd4\xc4\xca\xc0\x6b\xff\xdb\x44\xb3\x6c\xc3\xdc\x8e\x6c\x13\x28\xd0\x42\x72\xcd\x5a\x1d\xc9\xd6\xc4\x56\xbb\xc9\x97\xc4\xf6\x49\xa7\xf3\x86\x23\xd9\x58\xea\x36\x34\x47\xc0\xb2\xa0\x3d\xd2\x94\x91\x3a\x85\xdb\xdf\x3f\x83\xa1\xec\x7c\xfa\x0c\x96\x3b\xbf\xfa\x3c\x05\x5d\x6e\xb7\x6b\xe7\x0a\xb8\x73\xe4\x4b\xcc\x4e\x5a\x1d\x89\x3b\xcd\x4b\x62\x4e\xe8\x9f\xb4\xf4\xc8\x3a\x52\x8d\xa0\xaa\x42\xd9\xb6\x46\xd2\x76\x64\x98\x0a\x34\x47\x92\x61\x4c\x2f\x77\xd4\x74\x05\x6e\x46\x27\xca\xe9\x16\x70\x1c\xdd\x1a\x19\xfa\x48\x53\x6e\xe9\x6d\x2c\xa0\x09\x0e\x7d\xed\xed\x02\x26\xe8\x7d\x94\x24\x91\x14\xb7\xf5\x9d\x41\x65\x0c\x4d\xa7\xa3\x05\xdf\x96\x50\x97\x6f\x52\xe1\xa4\xfb\xc2\x84\x2b\xcd\x58\x5a\xde\x77\xa3\x09\xb0\x26\x31\x49\x25\xa7\xa0\xcd\x17\x86\xb9\x9b\x8e\x5e\x4c\x8d\x4b\x26\xae\x2d\xe5\x99\x61\x41\x65\x04\xec\x5b\xfa\xef\x9d\x39\x86\x2b\x79\xf3\x32\x86\xd0\xa7\x3d\x81\xa2\x98\xd0\xb2\x2e\x77\x9f\xd8\xa6\xe2\xe0\xce\x68\x66\x18\xd3\xe5\x22\x42\xeb\xc5\x35\x91\xdc\x56\x40\x33\x6f\x24\xbc\x0f\xba\x91\x3b\xec\xe2\x84\xaa\x42\x33\x5a\xd3\x3d\xf9\x18\x5d\x3c\xb3\x46\xeb\xe4\x84\xd6\x1b\x98\x9c\x86\xe2\x6b\x3d\x16\xbb\x0e\x13\xfb\xea\x08\x58\x67\x01\x48\xda\x5e\x75\xa3\xc9\x61\xa6\x47\x69\x6c\xb8\x72\x18\x57\x1b\x6a\x96\x3d\xb2\x37\xa3\xc5\x75\x92\xbb\x96\xc6\x22\x6a\x4b\x18\xb5\xd9\x1e\x4a\x2e\x37\x96\xf6\xd3\xfd\x6a\xb3\xeb\x51\x4c\xda\x46\x1b\x4c\x17\x23\x77\xd6\xb6\xac\xe5\x35\xce\x87\xc6\xb2\xa1\xc0\x1b\xd7\x05\x07\x37\x58\x00\xd3\xd6\x64\x6d\x01\xf4\x8b\xe0\x7d\xad\xeb\x68\x71\xe3\xda\xe4\x80\x68\xb7\x4a\x10\xdc\xf1\x66\xfe\x8e\xf1\xa2\xf0\x73\x1b\xde\x9d\xbe\x3b\x98\xbb\x91\xf4\x3e\xee\xf0\x61\xbf\xf4\x73\x9c\x61\x14\x51\x82\xb1\x61\x2e\x46\x73\x6d\xec\x2d\x18\x2e\x88\xe0\x6b\x19\x59\xc7\xdb\xd7\x7b\x97\x28\x47\x75\x4e\xb7\x77\xb6\x56\xe9\x54\x45\x44\x53\x5c\xce\x39\x21\xcf\x77\x2a\xed\x88\xb4\x43\x9c\x2e\x05\xca\xde\x70\x5f\xa6\xe4\x5c\x45\x57\x7f\x8f\xd2\x2d\xa1\xd1\x11\xc4\x6c\x0c\x9b\xed\xd6\xd9\x16\x7c\xbb\x99\xf3\x19\x91\xc8\xbd\x15\x18\xb1\xed\x71\x35\x1b\x59\xc3\x90\x59\x7f\x8b\x7e\xc1\x24\xa2\xf5\xf5\xd6\x7d\xd1\x1a\x7b\x8b\xbc\xc8\xba\x79\x11\xe0\x16\x5d\xdc\x2e\x11\xdb\x7a\xcb\xbf\xe8\xf2\xec\xd7\x8b\x51\x24\xf2\xc5\x90\xcb\x8d\x4f\x42\x82\xd7\x90\x2f\x14\x9a\x42\x81\x6f\x07\x34\x9e\x6b\xbb\x8c\x43\x93\xe1\x57\x7d\x39\x87\xa6\x26\xff\xf9\xd7\xb7\x08\xbd\xc0\x26\x46\xaf\x19\xb0\xec\xaf\x40\xdf\xc2\x99\xb3\x15\x13\xa1\x87\xaa\x99\x81\x5d\xf2\x1d\x31\xdb\x2e\xd5\xc4\x0b\xfa\x8c\xc0\x78\x7c\x94\xee\x09\xf9\x20\xe8\x05\x1a\x7b\xed\x12\xd0\xd8\xe9\xea\x74\x3f\x0a\xff\x84\xdc\xa2\x88\xa3\x7a\x04\x0a\x42\xbf\x2d\x88\x2d\x1f\x89\xd9\x62\x6c\xbd\xcd\xf6\xbe\x98\x2d\x0a\x55\xfe\x03\x87\xdf\x7f\x71\x77\xe1\x44\x30\x87\x3f\xf6\xdf\x21\xed\xed\x02\xfe\xf0\xba\xfc\x8e\xb4\xe4\x09\x9c\x83\x1f\xc8\xf7\xdf\x91\xda\x5a\x87\xe6\x0f\xe4\xbb\xb3\x39\x97\x6d\x0a\xbb\xf1\xf2\x28\xef\xe9\xfd\x72\x46\xf1\xfc\xa6\x47\x38\x5b\xab\x56\x05\xb1\x7d\x81\xb2\xdb\x00\xa9\x89\xe7\x04\x90\x52\x0b\x79\xd8\x6f\xbb\xed\xbf\xb3\x1c\x22\x0f\x7e\xce\x7b\xf5\x3d\x9e\x07\x0b\x5d\xd5\xe7\xcc\x96\x62\xad\xed\xb3\x27\xd2\x2b\xb5\x8b\x07\xb1\x4e\xf7\xdf\xce\xd8\x1f\xa9\xf8\x04\xb9\x45\xf9\x0f\x44\x1c\x03\xd4\x2b\xcf\x8b\x71\xab\x51\x41\x16\xa6\x21\x43\x65\x69\x82\x19\x32\x03\xfa\x78\x09\xc6\xd0\x31\x43\xc4\xfd\xc2\x53\x71\xaf\x3b\x9a\x27\xfe\xde\x57\x8f\xf2\xef\xc7\x36\xc8\x96\x07\xcf\xbe\x4a\x1f\x69\x0a\xed\x4e\x53\x6c\x9d\x7c\xf7\x0b\x82\x20\x48\x85\x17\x0b\x1d\xbe\x20\x20\x8e\xf6\xd5\x6a\xc7\x8d\x77\xad\x76\xb3\x94\x6d\x3b\x2d\xf8\x16\xf2\xeb\xe8\x57\xa4\x25\x54\x84\x6c\x1b\xf9\x15\xdb\x5d\xf9\x47\xe3\xea\x44\x4c\xa6\xdd\x35\xf2\xa9\x29\x87\x07\x29\x17\x25\x52\x25\xd3\x2f\x02\x87\x83\x8a\x87\xaf\x62\x69\xf8\xf5\x17\x04\xc9\xf2\x2d\x01\xe9\x15\x05\x11\xf9\x15\xfb\x13\xfb\xeb\xf9\x57\xec\x4f\xfc\xaf\xff\xfc\x8a\x3b\x9f\xf1\x3f\xf1\xbf\x90\xb6\x7b\x13\x11\x2a\x2d\x61\x67\x14\x41\xcc\x7d\x0b\xb4\x4c\x04\x1c\x48\x68\x99\xeb\x1c\xee\x6d\x99\xff\x8b\x63\x99\x8f\x98\xea\xd9\xe1\x80\xc3\xd1\x0c\x71\x84\xed\x0f\x14\x1d\x89\x11\xa4\xb5\xb3\x15\xf2\xc7\x31\x02\x3c\xb9\x5f\xb7\x07\x75\x01\xf9\xe3\x74\x46\x7c\x0b\x9a\xb5\xa9\xca\xe8\x27\xe8\x13\x71\x3f\x8d\xa3\x4b\x18\xb8\x04\x4a\x2a\x65\x10\x51\x9f\xa4\x67\x13\xf2\x5c\xdc\xa3\x97\x7d\x94\x36\x68\x99\x97\x58\xda\x00\xa2\x7e\x69\x4f\x27\xc9\x45\x69\x77\xc8\xa5\x40\x15\x2c\x67\xf6\xc8\x06\xd2\x0c\x5a\x0b\x20\x43\xe4\x0f\xe4\xe1\xe1\xf7\xf3\xbb\x6b\xcd\x9e\x8c\x0c\x4d\x39\x39\x4a\x3b\xd3\xf5\x74\xfd\xeb\xa9\xe8\x4c\xb0\x68\xea\xb9\x73\xf1\x34\xf9\x76\x35\xd2\x14\x44\xd2\xc6\x9a\x6e\x3b\x0b\x03\xb1\x53\xa9\xb8\xea\x80\xf9\x6e\x19\x8f\xc8\x13\x60\x02\xd9\x86\x26\xb2\x02\xe6\x56\xd3\xc7\xbe\x66\xfa\x72\x7e\x58\xf2\x23\x9a\x6e\xc3\x31\x34\x7d\x4d\xd4\x19\x18\x5b\x88\x35\x07\xb3\xd9\x47\x36\xb6\x31\x9f\x7d\x64\xf2\x15\xa7\xa8\x6f\x87\x96\x1f\x87\xdd\x9f\x37\xc4\x35\x87\x7f\xb7\xe3\x60\x12\x1b\x6e\x3e\x18\x64\xb1\x98\x69\xce\x9e\x3d\x62\x6b\x73\x68\xd9\x60\xbe\x40\x76\x63\xe6\x5c\x22\xef\x86\x0e\x3f\x0a\x1a\x96\x15\xed\xd7\xa3\x5e\x3a\x15\x4d\xe6\x43\xf2\x15\x42\xd5\x73\x43\xbe\xd9\x76\x57\x74\x98\xf3\x45\x49\xcc\x36\x05\x67\xf9\x95\x19\x78\x5f\x89\x35\xa4\x5a\x12\xbb\x7c\xa5\x23\x1c\xae\xf9\xfe\xf1\x3a\xcb\x67\x8b\x02\x82\x5d\x53\x26\xb6\xd9\xfd\x84\x3e\xb8\xa2\xb7\xe9\x81\xe8\x70\x63\xaf\xc0\xec\xeb\x43\x88\xc6\x0f\x3f\x7e\x98\x70\x2c\xcf\x80\x65\x7d\xf3\x0f\x97\x7b\x56\x11\xe0\x5b\x34\xf9\xed\xc2\x40\xb9\xb9\x71\x62\xcd\xdc\x1d\x9d\x83\x5e\xc1\x33\xe3\xb8\x57\x17\x2c\x66\x60\x73\xd9\x50\x82\x9a\x63\x78\x70\x73\x77\xfb\x2f\xa0\x03\x45\x5f\x9a\x61\xc1\xdb\x0b\x29\xb9\xed\x29\xcd\x4f\x73\xda\x4b\x8a\x20\xb5\x9e\x28\xe4\x90\xcc\xe0\x8a\x46\xee\x0e\xdd\x65\x85\x0e\xb4\x7c\xb7\x7f\xd3\x94\x30\xd9\xf6\x7b\x3e\x49\xbd\xce\xa3\xe3\xb9\x9d\x6f\xce\x8c\xc2\x22\xfd\xc7\x2d\xae\xb0\x96\x5f\x9c\x83\x8f\x2f\x21\xde\xec\xf8\x71\xf0\x2d\x05\xda\x40\x9b\x59\xc8\xab\x65\xe8\x52\xb8\xb3\xed\x37\xca\x92\xda\xc1\xa3\xe3\xd9\x61\x7f\x6e\x1d\x22\xdb\xc9\x61\x72\xa4\x59\x18\x74\x8e\x1d\xdc\xd1\x33\xcb\xc9\xce\xa8\x33\x10\x07\x39\xf6\x51\x0e\xf5\x71\x38\x0e\x44\xb4\xf6\x87\xc3\x64\x1f\x30\x19\x4b\xfb\x88\x4d\xfe\x3e\x26\x04\xf6\xd5\x4e\x6e\xdb\xe5\x42\x89\xdc\xf6\xe0\x3a\xde\xa5\xef\x9c\xfd\x83\x2e\xd8\x87\xf5\x80\x0d\x66\x23\xd9\xd0\x74\x2b\xd8\x07\x55\x08\x47\x0b\xc3\x98\x05\xdf\x75\x4e\x3e\x55\x18\x36\xd6\xce\x6d\x13\x5a\xd0\x5c\x85\x35\xd9\xad\x43\xed\xcd\xc8\x59\x26\x69\xef\x61\xad\x16\xa6\x61\x1b\xb2\x31\x0b\xd5\xcb\x3f\x46\x7b\x67\x81\x40\x81\xa6\xb3\xbc\x70\xbf\xb7\x96\xb2\x0c\x2d\x4b\x5d\xce\x46\xa1\x8e\xe2\x29\x0e\xb4\x19\x54\xc2\x5b\x85\x4f\xab\x90\xbd\xeb\xa4\xb3\x2c\xe4\x3c\xe4\x0a\xe6\x45\x8f\x36\xd7\xe3\xd7\xad\x2a\xa7\x0b\x63\x17\x79\x7c\x16\xac\xdd\xa4\x68\x42\x98\xbb\xc8\xeb\x23\xec\x05\x37\xbf\x00\x83\x27\x27\x3b\xa9\xf9\xe6\xb5\x34\xe7\xbc\xaa\x2a\x24\x15\xda\xad\xfc\x65\x57\x15\x07\x01\x13\x02\xa0\x37\xf3\x8d\xa5\x29\x1f\xca\x34\x42\xa0\x67\x1f\x4e\x1e\x1e\x7e\xfc\x08\x4f\xc5\xc2\xe7\x81\x77\xb0\x96\xd4\x9c\x5e\x2d\xe0\xd7\x54\xd7\x0b\x5e\x48\x8c\x83\x5e\x4e\x2d\x4c\x28\x5b\x5f\x25\xe2\xa5\x46\x5e\x71\xe4\xa5\x26\x6e\x1e\x1c\xd8\xe0\x63\x4d\xe7\x95\x76\x17\xd9\x1d\x5a\x5d\xe0\xe8\x88\xa4\x59\x23\x0b\xce\x66\xd0\x44\x24\xc3\x98\x41\xa0\xef\x31\x49\x93\xe1\x48\x3f\xc3\x5f\xf7\xbb\x73\x4c\x3e\x56\x13\x8d\x7c\x68\x7d\x56\xcf\xe4\xbf\x79\x72\x4c\x1f\x58\xf9\xe9\x48\x3d\x72\x6a\x83\x91\x6c\x51\xc8\x96\x91\xaf\x5f\x4f\x2d\xf8\x1f\x04\xfd\xf6\xed\x1a\xa9\xa0\xee\x7b\xa3\xfd\xdf\x07\x3b\x46\xa0\x77\x66\x53\x1f\x79\x9f\xc1\x1d\x01\x2f\x4e\xa5\xe0\x13\xee\x14\x26\x57\x70\xcd\x42\x44\x24\x8d\x12\xc2\x92\x60\xe9\xb5\xfa\x80\x74\xd0\xf4\x0a\x97\xcf\xc2\xd3\x1b\x95\x4d\x88\xa8\x57\xb8\x7d\xc4\xd4\xb0\x0e\x17\x50\xf5\xac\x26\x24\x45\x5f\xdd\xfb\xe7\xa9\x48\x91\x93\x28\x2f\xf6\x5f\x49\xcd\xa2\x02\xef\x65\x0c\x0d\x6c\x7b\x64\x1d\x38\x5f\x76\x59\x40\x78\x1a\x11\x96\xa0\xfd\x94\x14\xcb\xde\x8c\xa0\xbe\x82\x33\x63\x01\x83\xb6\x2d\xed\xcd\x2e\xe1\x59\xce\xec\x90\x9b\x73\x68\x83\x90\x5b\xbb\x54\x2b\xec\xb6\xa5\x8d\x75\x60\x2f\x4d\x18\xb4\xc3\xc6\xd1\xdf\xfe\xfc\xeb\xb8\x76\xf9\xfb\xbf\x41\xab\x97\x3f\xff\xf2\xdb\x1c\xce\x8d\x90\xcd\xb0\x23\x2d\xdd\xd0\xe1\xc5\xb5\xd0\x91\xd6\x47\x32\x9e\x66\xda\x1c\x8e\x24\x63\xa9\x2b\xce\x8e\x35\x6b\x02\x7d\x0c\xfd\xd9\xd8\x39\xb4\xee\x2c\xb1\xa3\x36\x86\xca\x79\x52\xa6\xc3\xf5\xc8\xe7\x2b\xd7\xf6\xd2\x10\x4d\xd9\x4f\xc3\x7d\x6d\x57\x94\xd8\xe1\xce\x43\xa7\x90\xee\x4a\xd9\x58\x4b\x68\x5f\xd8\x40\x3d\xdd\xaa\x3a\xdd\x3e\xbd\x2d\xc1\x48\x4f\x89\x88\x55\x75\x17\x95\xba\x98\x98\x44\x51\x32\x14\x82\x53\x53\x33\x72\x61\xe2\x45\x45\xaf\xe0\x45\xb0\xaa\x39\x60\x03\x44\x35\xcc\x2b\xc7\x48\x48\x8e\x6f\xf3\x57\xd4\x0b\x21\x79\xe9\x38\x26\x0a\xd9\x92\xd8\x12\x9a\x6d\xa4\x24\xb6\x6b\x1f\x8e\x64\x1c\xe4\x6e\x21\x5f\x1f\xb0\x91\xa6\x6b\xb6\x06\x66\x23\xb7\x3c\xe6\x37\xeb\x6d\xf6\xf0\x84\x3c\xe0\x28\xc6\x7d\x47\xe9\xef\x28\x81\x60\xec\x0f\x9c\xfd\x41\x32\xbf\xa1\x04\x4e\x72\xf4\x23\x8a\x3f\x7c\xfb\x3d\x1a\x75\x7c\xe4\x3e\xf6\x70\x66\x55\x69\x3b\xb2\x0d\x4d\xb9\xcc\x89\xa3\x29\xe6\x16\x4e\xc4\x68\x69\xc1\x03\xfc\x8c\x34\xfd\xc3\xa3\x16\x17\xf9\x91\x24\x4a\xb2\xb7\xf0\x23\x47\x40\x51\x46\xfe\x0d\xab\x8b\x3c\x28\x92\x22\xf0\x5b\x78\x50\x23\x17\xec\xf6\xcb\x6e\xe7\xa0\xf3\x22\x0b\x9a\x40\xf1\x9b\xd4\xa0\xf7\x2c\xbc\x08\x16\x81\x05\x4b\x62\xd4\x2d\x2c\x98\xd1\xdc\x50\x34\x75\x1b\x5d\x0b\x16\xa3\xf1\x9b\x58\xb0\x67\x5a\x78\xf5\xcd\x11\xf8\x30\x24\x4d\xdc\xc6\x67\x37\xe8\x60\x3c\x36\xe1\x18\xd8\x86\x79\xd9\xa7\x38\x14\x43\xb9\x5b\xc8\x73\x0e\x79\x77\x33\x73\xb4\x51\xcc\xcb\xd4\x71\x06\xbb\x69\xa8\x31\xd4\x21\xef\x8d\x82\x93\xc2\x5e\x66\x40\x71\xcc\x4d\xd6\xc1\xb0\x53\x06\x87\x9c\x68\x17\x00\x2e\x33\xe2\x68\xee\x36\x4d\xf0\xb3\x81\xf6\xb2\x50\xf7\x89\xda\x4b\x9c\x30\x94\xa1\xc8\x9b\x46\x04\x23\x5c\x75\x0e\xb9\xfb\xc5\x11\xc7\x30\x9c\xa1\x6f\xd3\x84\x1c\xa9\xda\x66\xff\x74\x81\x31\x9f\x8d\x54\x0d\xce\x2e\x86\x46\x0c\xa3\x30\xec\xa6\x20\x8c\x51\xfb\x43\x95\xfd\x66\xf7\xe6\x8a\x1a\x34\x73\x5b\x98\xc7\xe8\x91\xa6\x8f\xa1\x65\x8f\x3e\x6e\xa7\x5f\x61\xc5\x70\xec\x6d\x23\xc2\x9c\xc1\xb5\x73\x6e\x01\x2e\x83\x09\x86\xa3\x28\x41\x7a\x4c\x42\xb0\xf6\xe2\x21\xfc\xad\x60\xfb\xe1\x20\x7e\x2f\x3d\xf6\x84\x3c\x14\xb2\xfd\x72\x81\x6e\x8a\x64\x4d\x2c\x09\xf5\x6c\x55\xcc\x67\x18\x02\xe7\x49\x82\x1e\x52\x75\x31\xd7\x6a\x56\x0a\xbd\x32\x53\xc8\x54\xb2\xd5\x46\xa5\x94\xaf\x91\x2d\x46\x18\xf4\xba\x1d\xbf\x85\x42\x99\xe0\x3b\x26\x99\x7e\xa1\xf1\xd2\xeb\x56\x7a\xb5\x41\x31\x5f\xe9\xb6\xcb\xbd\x2e\x95\x2f\x14\x79\xa2\x22\x0e\x06\xf8\x4b\xa3\x5c\x65\x6a\xfc\x0b\xdf\x11\x1a\xf9\x0e\x5d\xa9\x67\x5b\x42\xbe\xdb\xaf\x89\x91\x99\x10\x0e\x93\x66\x7d\x50\x2c\x55\xf0\x6c\x89\xc8\x8b\x0d\x32\xd3\xaf\xe4\xab\x62\xae\x92\x7f\xe9\x88\xf5\x0e\x5e\x1c\x10\xc3\x6a\xbe\x55\xac\x89\x9d\xac\x50\xe3\x5b\x3d\xa6\x91\x65\x6a\x7d\xbc\xf8\x10\xb7\x9e\x63\xb7\x8a\xbb\x32\x0c\x5e\x0d\xdc\xb1\x7c\xf5\x37\x0b\x5e\xae\x75\x78\x42\x88\x27\xc4\x36\x97\x30\x82\x73\x7c\xac\x62\xb8\x65\x79\x77\xcb\xc9\x79\x2a\x9a\x9e\x25\x25\x4f\x08\xf6\xe4\x16\x40\x5d\x57\x34\xe8\xe4\x3c\xee\x24\xd8\x9f\x9e\x9f\xcc\x01\x02\x45\x69\x92\x61\x28\x8c\xe6\x1c\xa9\x76\x1e\xfb\xf7\x17\x37\x8c\x7f\xf9\x81\x7c\xc1\xd0\xdf\x50\xf7\xef\xcb\x13\xf2\xe5\x58\xce\xb1\xbb\xa7\x03\x5b\x5b\xc1\x2f\xff\x0d\xf3\x54\x3f\x3b\xdc\xc7\x0e\x77\xc6\xfb\x6e\xec\x8e\xda\x71\x38\x4d\x7d\xa2\x76\x2e\xbb\xcf\xd2\x8e\x25\x08\x1a\xfb\x3c\xed\x3c\x76\x77\xd6\x0e\x7f\x42\x70\x8a\xa1\x39\x16\x65\x58\x86\xb8\xbb\x76\x98\x8f\xdd\xfd\xc7\xce\x65\x87\x61\xdc\xe7\xcc\xbb\x33\x76\x9f\xa0\x1d\x46\x32\x24\x4b\xa2\x14\xc3\x7c\x8a\x76\xa7\xec\x3e\x4d\x3b\x92\xfe\x04\xcf\xc4\x7d\xec\x3e\x4d\x3b\x96\xf9\x24\xcf\x3c\x65\xf7\x59\xda\x51\x38\xfb\x39\x88\x70\xc6\xee\xfe\x31\x13\x63\x76\x8b\x78\x86\xc0\xd9\xfb\x23\x02\xe6\x63\x77\xff\xb1\xc3\x70\x96\x25\x39\x94\xe2\xd8\x4f\x19\xbb\x33\x76\xf7\xd7\x8e\xa5\x58\x8e\x23\x58\x9a\x75\xa7\x1d\xea\x70\xb3\x6c\x60\xda\x9a\x3e\x1e\x49\x60\x06\x74\x19\xba\x7c\x8f\x8c\x23\x33\x20\xce\x19\x04\xaa\x73\x3f\x7d\x76\x71\xd2\x55\x68\x0d\xb5\xf1\x64\xc7\x0f\x7b\x42\xbe\xb8\x0b\xd7\xd1\x14\x6e\x77\x3c\xe2\x66\x63\x37\x0d\xaa\x23\x15\x89\x33\x9e\x0b\xdd\xc9\xca\x1e\x83\x7b\x5b\xd9\xa7\x4f\x34\x2b\xc7\x4c\x47\x5d\xa9\xae\x24\x2b\x41\xe5\xad\x71\x93\x95\x7d\x89\xeb\x5e\x59\xe6\x09\x79\xc0\x68\x95\x56\x15\x89\x92\x71\x94\xe4\x58\x14\x12\x24\x94\x29\x82\x20\x65\x12\xe5\x24\x92\xc2\x25\x8a\x43\x69\x8a\x42\x15\x1a\xe2\x92\x4c\x41\x9a\x92\x18\x16\x25\x08\x0a\x62\x38\xab\xe2\xd4\xc3\x8e\x06\xae\x62\xac\x4c\x90\x92\x44\xd2\x0c\x8d\xb1\x40\x65\x14\x1c\xe5\x08\x5a\x81\xb4\x0c\x48\x40\x28\x98\x0c\x00\xcb\x62\x32\x41\x03\x99\x03\x8c\xc4\x72\x32\x64\x30\x82\xa4\x48\xa8\x90\xb8\x9b\xa5\x12\xbe\x0d\x0e\xfa\x07\x41\xff\x20\x69\xff\xbe\x87\xfb\x35\xf9\x1b\x81\x91\x0c\xcb\x5e\xbc\xcb\x3d\x1c\x12\x08\x0c\x65\x76\xf1\x87\xde\x8d\xe7\x87\xbf\x27\x04\x23\x9c\x7f\xbd\x7f\x0e\xdf\x1e\x3e\x60\x4f\xc8\x03\xcf\xf3\x7c\x76\x53\xd9\xbc\xe6\x5a\x4c\x53\x1f\x0c\x4c\x02\x1d\x4f\x71\x62\xbe\x9d\x1a\x5c\xb1\xfc\x56\x2a\xe6\x80\x35\xd7\x97\x2f\xef\xba\x80\x76\x6b\x58\xf6\x85\x17\xc0\x82\x7b\x17\xb9\xe5\xcc\xcc\xbe\x59\xcc\x16\x96\xdf\xca\x19\x59\x6b\x57\xe6\xc5\x9a\x38\x7f\xe4\xf9\xd6\x5b\x61\x5a\x98\xd8\x42\x67\x47\x9a\xef\xd7\xbb\x55\x7d\xcc\x1f\xfe\x2a\x18\x85\x3e\x4f\xb9\xbe\x6a\xa1\xed\x97\x46\x56\x2d\x2d\xb9\x5c\xd6\xaa\x09\x53\x0d\x2b\xeb\x8f\x93\x0a\x5e\xca\x6f\xb7\x95\x6e\x55\xe9\x57\x5a\x6f\xcd\xea\xd4\x1a\x8e\x9f\x31\xeb\xb9\x2d\xe2\xd5\x36\x5a\x6f\xf3\x95\xd5\x70\x0d\xd1\x89\x59\x79\x5e\xd5\xb8\xb7\x2d\x9b\xeb\x97\xb6\x13\x87\xb2\x2c\x92\x15\xf0\xbe\xc0\x1b\x47\x66\x7c\xa7\xc3\x07\xfc\x0d\xf9\x3e\x46\x36\x78\x3e\x87\xbe\x04\xdd\xfe\x47\xff\xb9\x5e\x85\x86\x4c\x7c\xff\x5c\xa0\xd3\xf1\xe3\x07\x56\xc6\x71\x59\x62\x65\x4e\x46\x25\x82\x91\x55\x8e\x55\x19\x15\xd2\x8c\x2a\x13\x80\xa5\x51\x86\xa6\x64\x02\xd2\xac\x2c\xa3\x90\x90\x01\x43\x90\x00\xc7\x49\x4a\xa1\x69\x1a\xe0\xa4\x02\xd4\x07\x27\xc6\xe1\x81\x6e\x4d\x85\x7a\x3b\x49\xe0\x34\x73\xf5\xae\x97\x90\x11\x0c\x43\x5f\x9a\x0b\x68\xc4\xb9\x40\xae\xb5\x2d\xb9\x95\xb3\xef\x6a\x7d\x5e\x57\x1f\x87\xcf\x6b\x7a\x32\x56\x66\xeb\x47\x50\x5d\xd7\xea\x65\x19\x35\xb4\xa6\x32\x9c\x1b\x53\x7c\xa5\xcd\x45\x66\xb1\x1d\x9b\xcd\x19\xc1\xad\x0d\xd2\x2c\x34\x09\xe3\x15\xce\xdf\xb1\x57\x6e\x55\x68\x2e\x0a\xfd\xf1\xaa\xbc\xca\x4f\xba\x68\xbb\xe4\x8c\x9d\x33\x17\x4e\xdc\xf3\xb1\x90\x1f\x16\xdf\x3a\xa5\xa9\xde\x22\x16\x94\x35\x94\x48\xf9\x5d\x66\xcb\xaf\x28\x90\x0a\xad\x6a\x57\x15\xc1\xdb\x23\xf7\xfe\x3e\xb3\xf1\xe6\xcb\x96\xb2\xd7\xdb\x82\x26\x6e\x1f\x9f\x75\x7e\x83\x99\x4c\x5e\x79\xe4\x0a\xeb\x0d\xa6\xf6\x6a\xad\xea\xb4\xa0\xf6\xb7\xbd\xac\xbd\x69\xbc\x39\x73\x6d\x10\x30\x17\xea\x46\x90\x3f\xfd\x8f\xcf\x05\x3c\xfa\x5c\xa0\xd2\xf1\xe3\x07\x8c\xc6\x64\x4c\x52\x68\x09\xa7\x00\xc4\x00\x46\x30\x24\x0e\x58\x46\xa6\x65\x89\xc3\x65\x96\xa6\x00\x41\xcb\x04\xcb\xaa\x12\xcb\xca\x0a\xc3\x71\xac\xc4\xd2\x18\x80\x12\xa0\x69\x8e\x56\x76\xe1\x9b\x74\xfe\x0b\x72\xeb\x50\x6f\xa7\x18\x16\x0b\x9f\x29\xfb\xbb\x6e\x1a\x41\xd0\x24\x8b\x5e\x98\x0b\x6c\x54\x58\x18\xc8\x05\x0e\x6c\x7b\xcb\xc2\x44\x44\xcb\x86\x5a\xd8\x52\x95\xd2\x10\x88\x56\xad\xf4\x48\xbe\x8a\x70\xa8\x91\x03\x40\xd3\xc3\x85\x34\x5c\x71\xdb\x6e\xa5\x5f\x19\x62\x64\x86\x7f\x95\xfa\xcd\x72\xaf\xfc\x56\x2d\x8e\xeb\xa4\xf2\xbc\xd5\xda\x95\x41\xc1\xa8\x4c\x86\x83\x85\x6c\x15\x5c\x97\x74\xa6\xc2\xc9\x50\x96\x99\x61\xbe\xb8\xe8\xac\xa8\xcd\xb8\x2e\x76\xa9\x7a\x57\x64\x61\x8f\xdb\x6e\xcd\x17\x39\xd3\x19\xe6\xca\x78\xb6\x55\x2e\x2a\xab\xfa\xd8\x1e\x53\xd2\x54\xed\xcf\x2b\x39\xde\xee\x4a\xeb\x97\x79\x3f\x07\xcb\x99\x89\xfc\xba\x6a\x64\xbb\xeb\x82\x32\x7d\xb3\x4a\x62\x91\x36\x9b\x4b\xa1\xb0\xde\x51\xee\x04\x4c\x85\xea\x38\xc8\x9d\xfe\xc7\xa7\x02\x19\x7d\x2a\x90\xe9\xb8\xf1\x03\xc4\x18\x85\x94\x29\x92\x81\xac\x8c\x53\x0a\xe0\x28\x96\x25\xb8\xdd\x54\xa1\x29\x15\x28\x24\x4e\x52\x32\x4a\x63\x12\xc6\xa2\x14\xb1\x9b\x72\x04\x45\x41\x92\x45\x81\x24\x01\x80\xb1\x98\xbb\xc1\x8d\x05\x7a\x35\x11\xea\xec\x0c\x4b\x33\xe1\x0b\xa8\xfd\x5d\x37\xe7\xa4\x39\x8c\x25\x2f\x4c\x05\x32\xe2\x54\x78\xcc\x73\xed\x7c\xf3\xf1\x35\xdb\x07\xb3\xc1\x98\x32\xa6\xd8\xec\xd1\x6e\x4c\xfb\xfc\x40\x2a\xf0\x1d\x36\xfb\x2e\x76\xa9\x52\xd6\x5c\xbe\x15\x32\x2b\x66\x5c\x7c\xac\x6a\x66\x5f\x2e\xcc\x56\xaa\xd9\xda\xea\xf4\xab\xd5\xa3\xa4\x4d\x2d\x4f\x82\x82\x55\xe9\x77\x7a\x03\x5a\x42\x0b\xdd\x9c\xec\x0c\xdd\x6e\x2a\xcc\xd7\xc7\xa1\xe4\x44\x95\xa1\x78\xf5\xbd\x5c\x94\xd5\x77\x5c\x57\x89\xb5\xcc\x68\x58\x75\x21\xb5\x35\x6b\x38\xb1\x66\x5b\xa6\x2a\xe4\x48\x03\x2b\x12\xba\x3e\x28\xaf\x65\x6b\x63\xcf\x5a\x13\x6d\x6d\xbe\xf0\xaf\x1d\xbb\x2b\xa9\xb5\xfa\x80\x23\x29\x32\x8f\x66\x6a\xdd\xe5\x84\x01\x14\x6f\xbe\x3a\xde\xdf\x08\x98\x0a\x85\x46\x90\x3b\xfd\x8f\x4f\x05\x2c\xfa\x54\x20\xd2\x71\xe3\x07\x09\xe7\x68\x75\x97\x53\x50\x32\x46\x40\x95\xc2\x39\x05\x90\x1c\x8e\x4b\x94\xca\xa2\x1c\xae\x32\x12\xc1\x41\x79\x17\x9a\x65\xc8\x11\x40\x66\x01\x8a\x4a\x0c\x90\x31\x02\x93\x01\x0b\x88\x4b\x53\x01\x0f\x75\x76\x8e\x22\xd1\xf0\x6c\x61\x7f\xd7\xdb\xa0\xc0\x58\x96\xbd\x30\x15\xa2\x26\x0b\x4c\xb9\xc7\x16\xba\xdd\x75\x9b\xc1\xca\xca\xe2\xa5\x64\xf6\xc9\x6c\x8b\x63\x6a\x70\x9b\x1b\x56\xe9\xb6\x59\x32\x78\x9b\x5e\xb7\x8b\x65\xed\x75\xf0\xda\x5f\xb4\xdf\x07\x7c\x9b\xe8\x74\x15\xea\x91\x45\xe7\x4b\x89\xc9\x6d\x55\xdb\xac\x1a\x5c\x3d\x5f\x7c\x06\x43\x51\x6c\x4e\xd0\xd6\xaa\xea\x0c\x9d\x33\x15\x4e\x62\xf3\xfb\xa2\xb7\x51\x3b\xca\x74\xbd\x28\x3d\xce\xc1\xa4\xda\x29\x74\xd7\xe8\xa4\x8e\xab\x65\xc6\xc8\x17\x0b\x9c\x86\x62\xf5\x36\x5c\xad\xde\xcd\xe7\x97\x49\x79\xf6\x5c\x54\x6a\xaf\x45\xd0\x2a\xe7\x66\x04\x83\x2e\x65\xbd\x4e\x0a\xc4\xb8\x5e\x6a\x93\x6f\xef\xab\xb5\xa4\xd4\x33\xad\x57\xe1\xc5\x99\x6a\xd5\x80\xa9\x20\x58\x41\xee\xf4\xff\xcf\x54\xc0\xd3\x71\xe3\x07\x9a\x50\x38\x56\xa5\x08\x1a\x42\x9a\x55\x30\x09\x67\x24\x4a\x62\x39\x15\x27\x80\x4a\x11\x18\x26\x31\x14\xbd\x9b\x5d\x2a\x50\x31\x12\x25\x80\x82\x4a\x14\x2e\xd1\x04\x21\xa1\x8c\x04\x39\xee\x52\xb2\x80\x85\x39\x3b\x89\x52\x2c\x15\x8a\x0a\x87\xbb\xee\x66\x13\x49\x71\x97\xf2\x66\x3c\xe2\x4c\xc0\xeb\xc3\x57\x4c\x5c\x52\x06\x2a\xbd\x30\x3d\x52\xdf\xd6\x56\x9d\x4d\x81\xe8\x2e\x8c\xe9\xe3\x2a\xcf\xd7\xec\x2c\x56\xc6\xab\x4c\x86\xa1\x87\xcd\x61\x4b\x30\xb7\xc2\xb8\xd0\xd1\xd8\xe6\x52\x97\x30\x7c\xd9\x63\x95\xf2\x7c\x08\x7a\x73\xac\xa2\x14\x1b\xf8\xa3\xfd\xae\xe9\x79\x1b\xb8\x23\xe7\xcc\x04\xc7\x39\x4b\x87\x7f\x78\xe7\xda\x3a\x5e\xaf\xf9\x7a\x63\xea\xb6\x7f\xe4\xba\x05\x58\x2f\x64\x35\xd9\xd0\xf5\xf2\xba\xd0\x6b\x8c\x5b\x66\xa6\xa6\x4f\xf2\x56\xe9\xa5\x36\x1f\xbc\xd4\x9e\xd9\xa2\x21\x6b\x05\xb4\x9d\xcb\x74\x8a\xd4\x3c\x5f\x2f\x14\x09\x02\xcd\x6e\xc7\xbd\xf1\x6b\x89\x6b\xeb\x16\x5e\xaa\xd4\xe6\xa2\xd5\xc0\xa4\x06\x69\xbd\xbd\x1b\x86\xe0\xcc\x94\x52\xc0\x4c\xc9\x95\x82\xbc\xed\x7f\x7c\xa6\xdc\x90\x4a\x60\xe9\x78\xb9\x53\xa4\x8d\xa0\xee\xee\x22\xc6\x31\xe8\x77\x14\xfb\x8e\x62\x08\x8a\xfe\x70\xfe\x0b\xf5\x66\x1c\xa7\x99\xf0\x99\xb0\xbb\xbb\x83\x0c\x12\xe7\x48\x8e\x66\x70\xee\x52\x5e\x1c\xec\xe9\xae\x48\x3f\x7b\x50\xc2\xff\x32\xfd\xb2\x46\x6e\x9f\xb7\xad\x72\x86\xc9\xe9\x39\xae\x88\xa3\x9b\xd7\xcc\xa3\x85\x8e\x6d\x6b\x5d\x5a\xbf\x63\x7d\xa5\xd5\x1b\x80\xcc\x0b\xc8\x3b\x70\x22\x04\x38\x71\xf0\xdf\xde\x89\x79\x3e\x33\xfd\x04\x45\x52\xfd\x7b\x70\x9d\xe9\xfa\xf6\x6b\x84\xc7\xa0\xe3\xee\xc6\x86\x14\xb1\x87\x55\x92\xe0\x21\x33\xee\x0a\x19\x7f\x85\x08\x16\x8f\x0c\xe1\xaf\xfc\x88\x47\x86\xf4\x57\x74\xc4\x23\x43\xf9\x4b\x27\xe2\x91\xa1\xfd\x05\x1f\xf1\xc8\x30\xfe\xca\x8a\x78\x64\x58\x7f\xc5\x44\x3c\x32\x9c\xbf\x34\x21\x1e\x19\x0c\xf5\x57\x54\xc4\xa4\xf3\xa1\x76\x21\x26\x9d\x0f\x45\x09\x31\xe9\x10\xfe\xe3\xff\x98\x74\x48\x7f\xd5\x42\x4c\x3a\x94\xff\xc0\x3e\x26\x1d\xda\x5f\x67\x10\x93\x0e\xe3\x3f\x62\x8f\x49\x87\xf5\x57\x06\xc4\xa4\xc3\xf9\xcf\xe0\x63\xc6\x41\xd4\x7f\xb8\x1e\x93\x8e\xff\xd4\x3c\x2e\x1d\xff\x71\x78\x5c\xbd\x7c\xc7\xd0\x44\x4c\x32\xa4\xef\xb8\x3c\x26\x19\xca\x77\x7e\x1a\x93\x0c\xed\x3b\xf5\x4d\xe7\x9d\x1d\xa9\xd4\x9d\x5e\x7e\x6e\xec\x09\xd9\xc9\x1e\xad\xe0\x36\xe4\xd5\x15\x89\xd7\x13\x27\xa8\x7d\x8a\xfc\xc7\x0b\xf6\xb0\x83\xf2\xf7\x17\xdb\x48\x54\x28\xf0\x84\x7c\x51\x4d\x63\x9e\xe8\x18\xfc\x09\xb9\xb5\x04\xe4\x0e\x35\xe0\xe1\xd6\x73\x57\x2a\xc7\x0b\xf2\x5f\xeb\x45\xb7\x9e\xb7\xb2\x3a\x5e\xa0\xff\x5a\x2f\x8a\xf5\xce\x57\x82\x87\x0b\xfc\xa3\xf5\x12\x68\x7e\xb0\x5e\x82\x11\x88\x61\xbd\x94\x1f\x11\x09\xb5\xde\x7e\x01\x7c\xb8\xf8\x37\xee\x45\xb3\xde\xd9\x32\xfb\x70\x41\xff\x6b\xbd\x1b\xac\xe7\x25\x17\x87\x8b\x80\x99\xfb\xaf\xf5\x42\xad\xe7\xa5\x42\x87\x8b\x7f\x67\xee\x0d\xd6\xdb\x27\x6e\x87\x8b\x7f\x57\x2c\x91\xac\x77\x9e\x66\x1e\x2e\x02\x56\x2c\xff\x62\x6e\x80\xf5\xce\x92\xd9\xc3\xc5\xbf\xbe\x17\xc9\x7a\xa7\xb9\xf7\xe1\x33\x7b\x52\x50\xad\x2e\x75\x05\x9a\xae\xee\xf1\x1e\x4c\x75\x74\x74\x1f\x0f\x4d\x3a\x0c\xd7\xab\xbb\x13\x3e\x40\x7b\x8b\xd5\xbc\x3d\x82\xc3\x67\xf2\xae\x56\x4b\xe0\x78\x77\xb7\xda\x95\xfd\x86\x80\xf7\x3a\x26\x78\x7f\xcb\x4d\xaf\xb8\x8b\xbb\xa7\x11\xfa\x06\x9c\xc0\x53\x12\x36\x7c\xdf\xea\x2a\x21\xdc\x47\x28\x6c\x5b\xef\x2a\x21\xc2\xb7\x57\x10\x9b\x10\xe9\xdf\x74\x88\x4b\x88\xf2\xe5\xdf\xb1\x25\xa2\xfd\x89\x7c\x5c\x42\x8c\x2f\xa7\x8d\x2d\x11\xeb\x4f\x8e\xe3\x12\xfa\x90\x27\xc6\x25\x74\x7e\x6e\x92\xc4\x91\xce\x4f\x4e\xe8\x24\x94\x70\x7f\x1e\x17\x9b\x92\x3f\xa7\x89\x2f\x13\xe9\xcf\x8e\x62\x53\xa2\x7c\x99\x42\x7c\x99\x68\x7f\xce\x11\x9b\x12\xe3\x5b\x7f\xc7\x97\x89\xf5\xaf\xe4\x63\x53\xe2\xfc\xab\xda\xd8\xb1\x12\xf5\x51\x8a\xad\xdd\xf9\x69\x4a\x12\x3b\x9d\x9f\xa7\x24\xb1\xd3\xd9\x89\x0a\x1b\x7e\x86\x71\x9d\x10\xe9\x5b\xcb\xc5\x26\xe4\x5b\xde\xc4\x97\x88\x3e\x27\x14\x7e\xae\x72\xeb\x2b\x4d\xd3\x38\x59\xb9\xf6\xa2\xba\x5b\xce\x56\x42\x5f\x60\x9a\xc2\x4a\xe4\xf4\xed\x5f\x38\xca\x50\x24\x41\x50\x8a\xca\x72\x1c\x54\x28\x4e\x01\x32\x27\x11\x94\xcc\x72\x00\x53\x20\x8d\x02\x06\xa5\x48\x19\x45\x69\x45\x22\x59\x19\x62\xb8\x22\x53\x28\x8b\x01\x5a\x92\x64\x4a\x7d\x78\x42\xdc\xc7\xf9\xe3\xe7\x18\x27\x95\x7a\xdc\xbe\x42\x29\xfc\xa1\x38\xee\x42\x3d\xf8\xfe\xee\xd9\x3a\xc8\x2d\x6d\x2a\x50\xc6\x8b\xdd\x55\xf4\x41\xad\xab\x0c\xdf\xec\xfe\xa2\x5d\xcc\xd8\x92\x3c\x40\xe7\xd9\xb9\x2a\x67\x4a\x65\x61\xdc\xd3\x67\xab\x7c\x69\xe2\x54\xe5\x0d\x9d\x4e\x4e\xe9\x5b\xd1\x57\x79\x93\x39\x7e\x14\x9c\x7f\xcd\x37\x91\xae\xc0\x1a\x18\xbf\x6e\xaa\xa0\x53\xe7\xe8\xcc\xbb\x6a\x71\x10\x95\x0d\x53\x1c\xf6\xdf\x33\xbd\x97\x69\xde\x28\x33\xd3\xd5\xf4\xa4\xf2\xfb\x50\x7a\xe4\xfd\xf5\xd8\x8e\x96\x37\x1c\xf2\x54\x57\x11\xfa\xed\x09\x33\xad\xcd\xf4\x19\x3f\x1f\x6e\xa7\x12\x49\xe3\x4a\xbe\x42\x3f\x76\x9a\xea\x8a\x99\xea\x38\x8e\xf7\x72\xcc\xd2\xe8\x67\x6c\xf2\x7d\x81\xbe\xb4\x5b\x75\x3b\x43\xe4\x0a\xdb\xc9\xab\xca\xdb\x7d\x9c\x32\xf3\x2f\xb2\xcc\x0c\x94\xbe\xb8\x9c\x34\xa4\x96\x98\xe3\xd7\x7f\xfc\xf1\x70\x5a\xe6\x75\x5a\x0b\xde\x08\xd2\x8d\x3f\xb6\x3f\xd6\x26\xe6\x9c\x46\x27\x6a\x48\xf3\xf1\x1c\xeb\xe2\xca\x98\xea\x62\xf3\x37\x0c\xce\xaa\x72\x01\xb3\x37\xaf\xad\x41\x79\xc8\xad\x85\xb1\xd1\xca\x00\x78\xd0\x8d\xe7\x4b\xac\x98\x55\x8f\xb6\x2d\x04\x17\x37\x09\x61\x55\x4f\x99\x94\xf9\xfb\xc7\x36\x1a\x7f\xe7\x63\xd5\x27\x7f\x76\x69\x10\x86\x4d\x52\x6f\xd9\xba\xb0\x59\x34\x9e\x09\xa3\x28\x3e\xbe\x63\x4c\x73\xab\x59\xd8\x4c\xad\xe6\x07\xf3\x46\x6f\x6c\x2e\x5b\x8f\x6d\x97\x54\x5b\x9e\xa2\xa4\xf3\x71\x7c\x90\xe7\xe3\x5f\x68\x15\x9b\xe0\x93\xff\x66\xfe\x43\x90\x25\x13\xf0\xaf\xfa\xf8\x67\x96\x20\x2b\x75\xfb\x43\x3c\x37\xeb\xf7\x80\xd9\xa5\x3b\x9b\xb5\xd4\x23\x0a\xe2\xcb\x78\xa1\x13\x7c\x2b\x3b\x29\xe5\x17\x94\xb4\x69\x95\x7a\x6e\xd5\xf7\xeb\x1a\x7d\x61\x8f\xfc\xe5\x84\xfa\xdf\xcc\x5f\x79\xef\xe5\x63\xf2\x3f\x99\x4b\x63\x3e\xc0\x17\xe2\xd8\xa2\x66\x1c\xe9\x0d\x3e\xdb\x16\xaf\x6b\x54\x14\xe2\xf1\x77\x6d\xf1\xf7\xbd\x82\x96\xb3\xa5\xe0\xbc\xfe\x78\x5f\x43\xeb\xfe\xbb\x03\x51\x07\x2c\xae\xaf\x24\x4e\xd0\x4e\x51\x25\x94\x20\x39\x54\xc5\x54\x00\x20\xcb\xe0\xbb\xa5\x16\x86\xd3\x92\xc2\x30\x28\x50\x59\xc8\xa8\x3b\x68\x93\x59\x88\xd2\x2c\x21\x43\x40\x51\x80\x05\x80\xa0\x00\x2d\x01\x1a\x05\x2e\xda\x25\x7a\x83\xdb\x01\xed\xe8\x63\xe5\x79\x28\x9e\x51\x38\x1e\xfe\x50\xec\xfe\xee\x59\x8e\x9d\x14\xed\xfc\x61\xe0\xee\x68\x27\xb6\xab\xec\x4b\xab\xdf\x97\xcc\x32\x99\xc9\xac\xb6\xcf\xf4\x96\x6d\xec\x1c\x96\x59\x52\x45\xd8\x25\x87\xea\x2b\x50\xb5\xa2\xde\x57\xe0\x42\x69\x57\xac\x3e\xb6\xaa\x49\x8d\x71\x31\x57\x5e\xe7\x5b\x0d\x4c\xea\xda\xb9\x5a\xf3\x2d\x67\x4e\x5e\xb8\xd9\x6b\x13\x92\xc5\xb1\xd6\xcc\x34\xfe\x19\x68\x57\x94\xb1\x49\x62\xb4\x49\x8f\x7f\x48\x88\x8f\x8c\x76\x3f\x11\x6d\x12\xa3\x9d\xda\x1f\xb3\x3f\x13\xed\x7c\x68\x13\xf8\x50\xea\x3d\x23\x7c\x1f\x23\x33\x31\xf9\x07\xa2\xdd\x4f\x42\x9b\x54\x6c\xf1\xba\x46\x2b\xe3\x78\xfc\x3d\xb4\xbb\x57\xd0\x4a\x17\xed\x64\x8e\x62\x38\x96\xa4\x15\x02\xa3\x29\xc8\xaa\x24\xce\x60\x38\x8e\xcb\x00\x90\x32\xc4\x50\x85\x94\x00\x86\x93\x24\x8a\x61\x80\xc5\x51\x52\x91\x69\x56\x62\x31\x54\x45\x29\x8a\x25\x51\xd5\x43\x3b\x22\x25\xb4\xc3\xae\xa3\x1d\xc1\x5c\x78\x1d\x8a\x77\xf7\x6c\xff\x36\x29\xda\xf9\x1f\x5e\xba\x3b\xda\xbd\xcb\xc4\xe6\x71\x65\xd9\x58\x41\x7f\xe9\x3f\x2f\x37\xcd\xc1\xba\x6b\xb7\x59\x86\xd9\x96\xb4\xa2\xa0\x0d\x1b\x5b\x7e\x3c\x28\x3c\xd6\x1f\xc9\x85\xfe\x26\x54\xe9\xc9\xf3\x23\x27\x8a\x63\xc3\x5e\xb6\xfa\xd9\xae\xc4\x2a\xaa\x54\x00\x7a\x26\xd7\xe8\x0a\xcf\x6f\xe6\xbc\x53\xd2\x07\x5a\xe6\x1f\x92\xdb\xe5\x39\xc8\x27\x46\x9b\xf4\xf8\x87\x3c\x98\xf6\x29\x68\x97\x10\x6d\x12\xa3\xdd\xac\xf3\xcc\xff\x4c\xb4\xf3\xa1\x4d\xc8\xd3\x55\xf7\x8b\xf0\x4d\x72\xc8\xc5\xe4\x7f\x0f\xb4\x8b\x8b\x36\xa9\xd8\xc2\x97\x67\xc7\x40\xbb\x7b\x05\xad\x74\xd1\x4e\xa5\x51\x49\x65\x18\x1a\x53\x28\x92\xe2\x54\x0c\x65\x19\x9c\xc6\x65\x40\x32\x24\x4b\xcb\x1c\x8a\xb1\x28\xab\xe0\x04\x47\xb2\x34\xca\xd1\x32\x87\x53\x24\x06\x55\x40\x92\x94\x2c\x93\x2a\xe4\x1e\x9e\x10\xda\xdb\xc9\x8c\x5b\x6b\x72\xb2\x93\x49\x5e\x43\x3b\x92\x20\xd9\x4b\x2f\x3c\x22\x9d\x57\x83\x9d\x9d\x0d\xba\x5e\x59\xa6\x5f\xa1\x46\xbc\xce\x8d\x12\xdb\x2e\xcc\x72\xcf\x70\x2c\x13\x4c\xbd\x6f\x17\xcb\xe5\xf7\x5e\x97\x5d\x77\xb5\x61\x06\x64\x97\x54\x85\x72\x5c\xf7\x04\xed\xb2\xbe\xe1\xfd\x80\x76\xb7\x46\x5c\xf7\xcf\x87\x76\x60\x8f\x86\x19\xfe\x71\xb6\x62\xc4\x92\x40\x6c\xcd\x52\x7f\xd6\xad\xf7\x37\x3a\xb6\x18\xe4\x09\x76\x53\xb5\xde\xa6\x00\x90\xf8\x74\x61\x36\x1a\x6b\xbe\x31\xe3\x95\x02\x5b\xca\x96\x48\xfb\x71\x58\xe9\x93\x0b\x38\xa3\x65\x7e\x98\x2f\x0c\x36\x0c\xa1\x0e\xb7\xfd\x72\xa6\x68\x2e\xbb\x68\x73\x25\x16\xb2\x7c\x8a\x68\x77\xb2\x0c\xbe\x15\xe9\x3b\xc4\x8b\x58\x3b\xda\x36\x13\x3c\xad\xae\xa0\x4d\x7a\xfc\xfd\x63\x1b\x8d\xbf\xf3\x31\xed\xdd\xab\xc0\xf7\xb0\x5d\x8b\x70\x49\xf8\x2f\x96\xad\x56\x02\xfe\xff\x88\x9d\xdc\x24\xfc\x45\xb5\xb0\x8e\xc9\x3f\x10\xed\xf2\xc9\x6c\x81\xcb\x47\x7a\x21\xfe\x77\x3f\x5b\xf8\xc6\xe2\x16\xfe\x1e\xda\xdd\x2b\x68\xa5\x8b\x76\x1c\xa4\x68\x82\xc2\x31\x8c\xa0\x69\x4e\xc5\x59\x5a\x21\x39\x0a\x85\x14\xa5\xa0\x0a\xc7\xa0\xb4\xaa\x90\x18\x40\x25\x02\xa0\x12\x43\x2a\x14\xa4\x30\x9c\x66\x25\x05\xc5\x39\x16\x32\x90\x64\x5c\xb4\x4b\x67\x27\x93\xe2\xd8\xeb\x68\xc7\x10\xe1\x6f\x79\xda\xdf\x3d\x2b\x3b\x49\x9a\xdb\xf9\xcf\x96\xee\x9e\xdb\xe5\x21\xcc\x94\xa1\xac\x1a\xd4\x0a\xa5\x31\x65\x4b\x34\x54\x29\xcf\xbe\x8f\x6b\xf4\x58\xe8\x34\x59\x6d\xa6\x4c\xcb\xda\x98\xc4\x45\x74\x30\x23\x87\x24\xc1\x94\x17\xaf\x64\x15\xad\xf4\xd4\x9c\x26\x4d\x56\x65\x85\x7e\x5c\x19\xfd\x2a\x65\x0a\xf9\x4a\xb7\xba\x18\xe4\xcd\x67\x8a\xbf\x13\xda\xdd\x8a\xf4\x65\x69\x3a\x7d\x3b\xda\x36\x1f\x3c\xad\xa2\xa3\x5d\x42\xfe\xf1\xce\x0d\x9d\x8f\x89\xa3\x7d\x82\x68\xcb\xdf\x21\xc2\xc5\xca\xed\x52\x44\xdb\x58\xf9\xcc\x4f\x3a\x2b\xbb\x86\x76\x71\x64\x69\x54\x8f\xf4\x3e\x7d\xe5\x93\x60\xe5\xe5\xa1\xdd\xbd\x82\x56\xca\xb9\x1d\x4a\xd2\x18\x03\x70\x5a\x26\x71\x0c\x57\x31\x0a\x30\x04\xc5\xb0\x2a\xc5\x41\xc0\x71\x18\x41\xa1\xa8\x2a\x4b\x24\x8b\xb1\x2a\x8b\xb3\x32\xc6\x60\x1c\xaa\x62\x24\x45\x02\x94\xc4\x58\x5a\x7e\x78\x42\xa8\xf4\xaa\x54\xae\xe6\x76\x14\x8b\x91\xe1\xb9\xdd\xfe\xee\x59\x41\x63\x52\xb4\xbb\x9a\xdb\xa5\x8d\x76\x05\xaa\x98\xcd\x95\xf1\x85\xfa\xcc\x30\xf5\x85\xaa\x8e\x57\xd4\x04\xcc\x98\x06\xba\xdd\xaa\x4b\xa9\x53\xa9\x88\x9c\xf5\x3e\x27\xf4\x97\x41\x85\x1b\xb4\x69\x74\x51\xb1\x96\xa5\x2c\x39\x68\xad\x37\xfc\xb6\xbb\xb2\x3a\xc5\x6d\x83\xd0\x5e\x48\x56\x68\x3c\x12\xdd\xd6\xf3\xb2\xa4\x69\xfc\x38\x45\xb4\x3b\xe9\x7a\x2b\xda\xd4\x28\xb3\x94\x3b\xda\x36\x5e\x6e\x97\x1e\xff\xa4\xb9\xdd\x49\xc3\x5b\xd1\x86\x67\xe6\xd6\x3c\xe9\x7a\x3e\x49\x3e\x91\x99\x96\xdf\x12\xf0\x4f\x1c\xe1\x99\xb9\x35\x3e\x89\xf0\xb1\xd0\x3e\x09\x7f\x6c\x51\x93\x63\xf2\x0f\x44\x3b\x21\x99\x2d\xd2\xf4\x85\x38\xfc\xa7\xc9\x72\xbb\x7b\x05\xad\x74\xd1\x4e\x22\x30\x45\x86\x24\xce\x51\x80\x82\x2a\x0a\x59\x15\xe3\x54\x89\x25\x58\x8c\x86\x0a\xcb\xa2\x0a\xc5\x10\x90\x02\xf8\x2e\xf3\x63\x29\x09\x52\xb2\x4c\xa3\x38\xe0\x50\x95\xc3\x29\x54\xa6\x5c\xb4\x4b\x2b\xb7\xa3\xae\xa3\xdd\x85\x97\x55\xb3\xc7\x57\x55\x7b\x65\xf2\x49\x91\x2e\xe7\x1b\xda\xbb\x23\xdd\xf8\xbd\x91\x27\xd8\xe9\xfb\x18\x16\xcb\x2a\x39\x20\xcc\x61\x79\xd0\xb7\x97\xa6\xd1\x21\x74\xbc\xd2\xdf\x6e\x97\xa5\x56\x5e\x29\xd3\xcf\x39\xca\x82\x6d\xf3\xb5\xd6\x2f\x76\x1e\xe7\xe4\x54\x2b\x75\x61\xaf\x23\xda\xc5\x0d\x03\x17\x44\xb5\x95\xc3\xd6\xa5\xfe\xb2\xfc\xda\x46\x9b\x30\xd5\xbc\x2e\x01\xd2\x88\xa0\xad\x27\x46\x9a\xf4\xf8\xfb\xc7\x36\x1a\x7f\xe7\x63\xe2\x9d\xab\x84\x48\x93\x18\xe9\x8a\x13\x63\xf9\x33\x91\xce\x87\x34\xd5\x84\xfa\xdf\xcc\x7f\x65\xae\x07\x31\xf9\xa7\x9e\xd7\x25\x40\x9a\x54\x6c\xc1\xcc\xad\xd7\x4e\x3c\xfe\x1e\xd2\xdd\x2b\x68\xa5\x8c\x74\xa8\x22\xa1\xb8\x4a\xb2\x34\x4d\x53\x28\xae\x32\x92\x84\xe3\x0c\x4a\x72\x90\x64\x55\x86\x45\x29\x82\x45\x55\x94\x01\x10\xd2\x90\x41\x49\x89\x06\x18\x2e\xe1\x94\x42\xb2\x32\x23\x51\x1c\xe6\x22\x5d\x3a\x15\x2a\x14\x47\x5f\x45\x3a\x0e\xc3\x2f\xfc\x6c\x83\x77\xf7\xec\x01\xac\xa4\x68\xe7\x1f\xfa\xbb\xa3\x5d\xe6\xad\xda\x21\x7a\x2f\x6d\x7a\x69\xe2\x1c\x63\xae\x5e\x97\xb3\xec\xdb\x76\xd6\x85\x59\x4c\x12\xcb\xdb\x86\x94\x9d\xbd\x6d\x81\xf2\x68\x2c\x1f\x37\xec\x92\x29\xd8\x83\x9c\x4a\x1b\x8f\xa0\xfe\x6c\x55\xca\x9a\x3d\x18\x14\xf4\x59\xae\xb3\x5a\xb4\xc7\x4a\x69\xbb\xac\xbe\xb5\x74\x79\x90\xea\xd3\x07\x09\xd0\xa6\x42\xb3\x85\xc4\x68\x93\x1e\xff\x10\x46\x9f\x82\x76\x09\xd1\x26\x31\xda\x89\x6a\x61\xfb\x33\xd1\xce\x87\x36\x21\x8c\xee\x17\xe1\x17\xcb\x56\x27\x26\xff\x7b\xa0\x5d\x5c\xb4\x49\xc5\x16\xcc\xdc\x9a\xac\xe3\xf1\xf7\xd0\xee\x5e\x41\x2b\x5d\xb4\x23\x15\x0c\xb2\x9c\xa4\x70\x2c\x41\xc9\x40\x81\x0a\x86\xa1\x40\x01\x04\x90\x21\x45\x4b\x0a\x06\x38\x8c\x93\x58\x48\x11\x90\x90\x24\x92\x62\x01\x94\x28\x8c\x20\x50\x94\x66\x71\x9a\x93\x69\x17\xed\xc8\x94\xd0\x8e\xb9\x8e\x76\x14\xce\x5d\x40\x3b\x8a\x78\xf0\x3d\xd9\x9b\x14\xec\xfc\xc7\x4a\x77\x07\xbb\x72\x4e\x5f\x18\x2f\xeb\x3e\x59\x65\x20\x85\x67\x14\x7b\x08\x94\xd2\x1b\xd6\xca\x32\xca\xcb\x86\x7f\x7d\xc9\x69\xfd\xf7\x2a\x5f\xa6\xb7\x56\x65\x80\x77\xcb\x85\xee\xaa\xdf\xb3\x9e\x9b\xbd\xf9\x70\xa0\xf5\x4b\xd3\x5c\x0d\x15\xb2\xdb\xb2\x5a\x32\x56\x8f\xd8\x96\xb4\xec\xc6\x76\x31\xcc\xfd\x43\xc0\xae\x2c\x4d\x67\x89\xc1\x26\x3d\xfe\xf1\x8e\x0c\x9d\x8f\x89\xc1\x2e\x21\xd8\x24\x06\xbb\x84\x45\x11\x89\x03\xbc\x0f\x6c\x62\x1d\x53\xfd\xa4\x63\xb2\x7b\x80\x5d\x5c\xb0\x49\xc5\x16\xbe\x0d\xe5\x18\x60\x77\xaf\xa0\x95\xf2\xa3\x76\x18\xc3\x42\x9c\xc6\x25\x56\xc2\x01\x90\x69\x1a\x30\x94\x4a\x73\x90\x41\x65\x48\xc8\x12\xa3\x2a\x2a\x8a\x01\x1a\xa5\x51\x92\x20\x68\x82\x96\x19\x89\x04\x04\xc0\x18\x16\x27\x68\x12\xf7\x7e\x3a\x2f\xad\x72\x4c\xe2\x1a\xd8\x31\x2c\x7b\xe1\xb7\x58\xf7\x77\xcf\xde\x19\x91\xb4\x1c\xd3\x7f\xac\x74\xf7\x72\x4c\xee\x4d\x97\xcd\x47\x02\x82\x22\x18\xbc\x2d\x72\xd6\x62\x65\x94\x24\x4d\x10\xf5\x01\x61\x43\x4e\x79\x33\xb3\x03\xbb\x20\x69\x6d\x8c\xc8\xf6\x26\xcf\xd2\xa3\x3c\x7f\xcc\x76\x20\xf7\xfc\xbc\x41\xc5\xdc\x46\xeb\x3c\x2f\xec\x01\xda\x98\x0d\x9e\xdf\x29\xb2\x94\x67\x5e\xf3\x68\x91\xc9\xa6\xf9\xa8\xdd\x49\xa3\x5b\x91\xbe\x31\xcc\xbe\xe9\x47\xdb\x86\xfc\x45\x41\x9b\x54\xf8\xc7\x3b\x32\x74\x3e\x56\xf9\xf3\xd4\x34\xce\x46\x9e\x96\x14\xed\x7e\xe2\x31\x51\xd2\x23\xcb\x54\x36\x72\x7f\xd2\x91\x69\x20\xda\xe5\x92\xd9\xc2\xfb\xa5\xb3\x90\x27\xfe\xee\x6b\x0b\xff\x58\xdc\xc2\xdf\x43\xbb\x7b\x05\xad\x74\xd1\x0e\x40\x09\xa7\x31\x02\x55\x30\x06\xa3\x64\x0a\x72\x12\x2a\xb3\x94\x02\x49\x5a\x21\x29\x12\x47\x51\x19\xca\x38\x4d\x61\x04\x64\x65\x88\xd2\x40\x42\x29\x1a\x67\x51\x9a\x93\x20\x40\x19\x8e\x70\x7f\xea\x39\xad\x02\x95\xab\x68\xc7\x51\xf4\x85\x02\x95\xfd\xdd\xb3\xb7\x11\x25\xcd\xed\xae\xa2\x5d\xda\xb9\xdd\x0b\xc6\xb7\x0d\x83\x50\x84\x22\xf7\xc8\x3c\x3f\xd7\x5e\x17\xa5\xa2\xdd\xcb\xe6\xac\xca\x33\x5a\xed\x34\x66\xd2\x6b\xa7\xd5\xc8\xe2\x8f\x25\xa2\xdd\x15\x66\x9c\x3c\x79\x33\x17\x6f\x1b\xaa\x50\xa0\xb7\xaf\x22\x5b\x12\x67\x44\x69\xc8\x3c\x17\x5b\x3c\x2a\xa8\xcd\x0c\xbe\x19\x72\xdd\x6a\x36\xc5\x02\x15\x3e\xc1\xa3\x6e\x35\xca\x7c\x49\x8a\x76\x29\xf2\x4f\x8a\x76\x27\xc7\x8e\x71\xa2\xad\xb7\xfa\x89\x1f\xe1\x7e\x62\x84\x4d\x8c\xf6\x69\x14\x65\xfc\xa4\xd5\x46\x20\xda\x65\x93\xd9\x22\x4d\x5f\x48\x3a\x16\xb7\xf0\xf7\xd0\xee\x5e\x41\x2b\x5d\xb4\xc3\x09\x86\x84\x3b\xa8\xe1\x24\x0e\xaa\x8c\x22\x01\x0e\x50\x8a\x44\x10\x04\x27\x31\xac\xaa\x00\x56\x25\x48\x86\x61\x24\x0c\xa8\x04\x21\x01\x92\x66\x81\x42\xc9\xa8\xa2\x72\x3b\x40\x54\xdc\x5f\x60\xc4\x92\xbc\x2c\xd6\x7d\x29\xf3\x25\x90\x23\x51\x1a\x25\x42\x2b\x53\x9c\xbb\xc7\x5f\x09\x75\xdf\x6f\xe7\x61\x5c\x85\x2d\x36\x56\x8d\xa9\x54\xc6\x8b\x3c\xd1\xeb\xbe\x36\xcd\xf2\xfc\xb5\x8f\xa2\x6a\x81\xb5\x2a\x25\x66\x8e\x0a\xcd\xf5\x4b\xef\x99\xef\x13\xfc\x01\xe3\x5c\x87\xf1\x0d\xea\x87\xb8\x78\x2b\xc6\xd5\x28\xb3\x7c\x4a\xaf\xbb\x5a\xe7\xb9\xdd\x2d\x21\x9b\x7b\x7f\x5b\x4d\x1b\x99\x86\x21\xf2\x2f\x9a\x5a\x6f\xf6\x73\x46\x65\xb2\xb2\xb7\x72\x9b\x98\xe5\xeb\xd9\x06\x85\x8d\xa7\x8a\x95\x2f\x82\x8c\xd8\x5b\xa3\x54\xeb\xb9\x3b\xe9\xa1\xfd\xf1\xd4\x44\xb3\x99\xba\x40\x8a\x20\xdf\xc5\xcb\x73\xd9\x22\x86\xeb\xca\x5c\x93\xc8\x76\xd3\xac\x56\x22\x60\xdb\x99\xd3\x9e\x63\xdb\x89\xce\xee\x0f\xef\xf2\xbe\xf9\xac\x3d\x67\xd0\x0a\xfa\x52\xd8\xda\x93\xb5\x88\xcd\x06\x28\xd8\x2e\x0c\x8c\x13\x8b\x9b\x55\x25\xbb\xad\x51\x76\x46\x90\xb3\xae\x8e\xc4\xd8\x36\x6b\xfa\xe0\xd9\x7b\x8c\xda\xf9\x8b\x55\x7c\x97\x80\xbf\x68\x6e\xdb\x9d\x04\xfc\x79\xfe\xe7\x61\x5b\x60\x6c\xcd\xc4\xb7\x45\x4d\x1f\x5e\x54\xf3\x9a\x2d\x92\x8e\xc5\xce\x17\x1e\x65\x1f\xbd\x9b\x6c\xf1\xf7\x98\xa5\x4d\x4a\xe0\x3b\xe5\x5c\x23\x3b\xd0\xdf\xd1\xee\x9a\xce\x92\x12\x23\xeb\x02\x47\x35\xdb\xeb\x69\x4d\x19\xbc\x14\xa5\x4c\x13\x1f\xb7\xbb\x96\x58\xeb\xac\xb0\x41\xd7\xce\x93\x2f\x65\x8e\x1f\xb7\x37\xb5\x5c\x6f\xd2\x55\xb4\x85\x5e\x11\x71\x39\x4b\x19\xf3\x47\x01\x05\xef\xd9\xd4\xf7\xcd\x30\xa8\xb0\x38\x47\x63\xaa\x84\x62\x84\x42\x70\x2a\x47\x03\xc0\x40\x12\x67\x65\x12\x63\x48\x19\x00\x12\x50\x80\x24\x14\x49\xa6\x29\x06\x23\x00\xc1\x28\x24\xad\x70\x34\x84\x94\x4c\x60\xe4\xf1\x17\x95\x13\xc4\x56\xfc\x7a\x6c\x0d\x3f\x1a\xda\xdf\x3c\x7d\x4f\x67\xd2\xc8\x7a\xa1\xbc\xdd\xfd\x8b\xb1\x82\x0d\x8b\xac\xcb\xe2\x84\x68\x30\xc3\x85\xb4\x9c\xad\x1f\x37\xef\x0b\x46\x84\xd5\xba\xa2\x0e\x04\x53\x7c\x7f\xa9\x99\xab\x86\xc6\x16\x6a\x53\x91\xe9\x8d\xd5\x75\xab\x3e\x7e\x2f\xca\x75\xe1\x59\x78\xb6\xd8\x6c\xe5\x99\x6f\x98\xaf\x19\x7b\x8d\xd2\x0c\xe8\x0c\xf9\x95\x01\xba\x2a\x86\x1b\xd9\x46\xe6\x9f\x15\x59\x93\x46\xb6\xa4\xb3\xb9\xba\xae\xcc\xad\x14\x23\xeb\x67\xae\x14\xaf\xae\x5a\x3f\x31\xb2\xf1\x29\x45\xd6\xb8\x28\xeb\x45\x56\x73\x42\x2a\x68\x0d\x07\x3d\x66\x21\xd7\x2d\x99\x86\xef\xfd\xd7\x1c\xd1\xc7\x33\xe5\x77\x79\xdb\x7e\x63\xd0\xd2\x2a\xf3\xba\xc8\x41\xcc\x28\xb2\xc2\x2b\x59\xdd\x10\xb9\x4d\x7d\x53\x67\xea\xbc\xf6\xbc\x16\xca\xe4\x5a\x92\xc5\xda\x12\x9f\x15\x1a\x15\xba\x30\xeb\x73\xca\x1b\x3f\xe5\x23\xee\xd1\x9c\xbf\xfd\xd7\xfd\x09\x0c\xcb\x06\xb6\x75\xfa\x79\xb4\x98\xc2\xed\xfe\x2d\xba\xd9\x9a\xd8\x6a\x37\xf9\x92\x78\xed\x85\xbf\x7c\xa5\x2d\x34\xbd\x97\xee\xd6\xc4\xca\xe0\x94\xe2\x2f\x08\x82\x20\x7c\x2e\x77\x42\xed\x03\x43\xa4\xde\x2c\x55\xf9\xe6\x00\x29\x0b\x03\xe4\xab\xa6\x7c\x90\x76\x6c\x98\x8b\xd1\x5c\x1b\xef\x7f\x15\xc2\x77\x9d\x92\xd4\x3e\xaa\x41\x92\x07\x31\xbe\x2a\xfd\x1e\xc7\x1c\xb5\x2d\xdf\xa5\xfb\xbf\x91\x6c\x28\x70\x74\xfc\x5d\x12\xef\xa3\x66\x59\x4b\x68\x8e\x52\xd1\xee\x9c\x6d\x90\x72\xb1\x04\x43\x3a\x62\xa9\xd1\x11\x90\xaf\xc7\xe6\x4f\xc8\xb1\xfd\xfe\xb3\xdb\xe1\x46\xd3\xa4\x33\xac\x37\x2b\x7e\xd3\xa0\x86\xfc\xbe\xe9\x95\xdf\x10\x4d\x57\xb3\x60\x26\x97\x34\xbd\x20\x56\x64\xcd\x43\xdf\x41\x7e\xf5\x2d\xdf\xe9\x6a\x1f\xc6\xe6\x92\xfe\x17\x45\xbb\x6a\x01\xd7\xa5\xa5\xad\xe3\xed\x7b\x45\x4a\x62\x4e\xe8\x5f\xd1\x21\xdb\x14\xf8\xb6\xe0\x36\x3d\xa7\x82\xd4\x44\xff\x64\xe8\xb4\x4a\x62\x01\x91\x6c\x13\xc2\xd3\xd9\x15\x2e\x8d\x3b\xc7\x92\xcb\xe3\xd2\x89\x26\x51\xc8\xbc\x96\xb6\x23\xef\xa7\x74\x62\x8b\x73\x24\x71\x2a\xc9\x59\x2e\x70\x2e\x8f\xdb\xf8\x09\xf1\x3e\x8c\x2c\xf8\xb6\x84\xba\xfc\xd1\x60\xd2\x76\x34\x01\xd6\x24\x89\x64\xbb\xfe\xd1\xc4\x3a\xf5\xb4\x5d\xaf\x20\x69\x66\x50\x19\x27\x18\xb8\x03\x85\x68\x12\xb9\x6d\x0f\xe6\x79\x42\xc0\x62\x31\xd3\x64\x37\x1c\x18\xa6\x12\x12\xa6\x47\x70\xe7\x1b\xce\xfd\x18\x92\x7a\x28\xe1\x0a\xec\x23\x77\x2a\x36\x54\x55\x28\xfb\x5d\xed\x63\xd4\xd2\x94\x27\xe4\x8b\xd3\xf9\x4b\x98\xb0\x9a\x92\x92\x98\x9a\x12\x59\xc0\xbd\xeb\xed\xc4\x8b\x21\xb4\xb1\x18\x2d\xd2\x92\xdb\xa3\x75\x2a\x7a\x08\x54\xc5\xd2\x24\x58\x01\x7b\x93\x9e\x02\x1e\xad\x10\x9f\x8e\xa9\xc2\x29\x85\x20\x25\x8c\xc5\xce\x2b\x27\x46\x2c\x1d\x3c\xe1\x8f\x34\xe2\x1a\xff\xb2\xa1\x2d\x6f\xb6\x3b\xa1\x3a\xb9\xad\xcf\xc9\x9d\x8a\xec\x7e\xef\x93\x31\x58\xa2\x53\xbb\xa6\x25\xd6\x07\x9a\xd1\xc2\x5b\x90\x80\xb6\x3b\x24\x76\x92\x61\x3d\xd2\x88\xef\x92\xd7\xdc\xcf\x36\x95\x1d\x13\x09\x58\x30\x31\x7e\x06\x11\xf3\x49\xae\x40\x9f\x9c\xa7\x6d\xaf\x0a\x68\xa8\x6a\x02\xd0\xfa\x48\x2a\x92\x70\x4e\xcb\x4b\xa2\x39\xb2\x43\x33\x35\xf3\xf9\xe8\x5d\x13\xd2\xd7\x3c\x8a\xa4\xe9\xd8\xf1\x8c\x5a\x54\x29\xaf\x5a\x33\x1d\xd9\x22\xc9\x74\x59\x96\xbd\xc4\x33\xc3\x98\x2e\x17\xc9\x24\x3a\xa7\x15\x79\x44\xdd\xf5\x6e\x88\x7c\x0b\xa0\x99\x23\x5b\x9b\xc3\x54\x24\xf4\x53\x8b\x36\x6f\x3d\x01\x9f\x10\xbf\xc8\x4f\x88\x17\xe2\xe5\x99\x61\x41\x65\x04\xec\x10\x25\x52\x88\xdb\x1e\x9d\x6b\x12\xdf\xb8\x3a\xda\x51\x4d\xcd\xba\x37\x18\xf6\xaa\xdd\x34\x5d\x81\x9b\x91\x6f\xc9\x61\x8d\x0c\x7d\x04\x14\xc5\x84\x96\x95\xd4\xa0\x57\x19\x9c\xe5\x69\xde\x6d\x5f\x66\xe4\x36\xbc\x41\xf6\xe4\x7e\x70\x89\xf6\x75\x89\x03\x66\xd9\x39\x41\x6f\x15\xbe\xa3\x67\x6f\x17\xf1\x73\xf0\x8b\x54\xaf\x2e\xfb\x77\x8d\xae\x08\xea\xad\xa1\x76\x24\x0f\x4e\x94\x92\xb4\x41\xa4\xaf\x2e\xdf\xa2\x7a\xf2\x09\xf1\xb4\x9d\xe1\x8c\x74\x9c\xf5\x66\x38\xb9\xf9\xc2\x30\x77\x81\x6f\x05\x4d\x4b\x33\xf4\xf4\x0d\xed\xe7\x70\x5d\x7c\x5f\x87\xe8\xca\x78\xa1\x27\xe6\x4e\x45\x34\xfb\x9f\xf0\xb8\xaa\xc9\x49\xdb\xe8\x4a\x2c\x4c\xb8\xd2\x8c\xa5\xf5\x29\xda\x04\x31\xbb\xaa\x56\x50\xa7\xe8\xfa\xed\x37\x51\xee\xa6\xd3\x9e\xc1\x55\x3d\x42\x77\xbb\xce\x49\x1f\x7f\x5a\xfb\x1e\x53\xdb\x4f\x3d\x30\x01\xbe\x75\x82\x9f\x13\x3d\x4f\xa1\x52\x9a\xe1\x97\x58\x44\xd1\xe1\x4a\x5e\x77\x91\x59\x7a\xf0\xf5\x91\x70\x24\xd9\xaf\x83\xd8\x69\xb2\x7d\x0f\xb7\xf9\x48\x3f\x76\xaa\xef\x2c\xe2\x0e\x40\xbe\xdf\x61\x1c\x49\x86\x31\x8d\x6d\xe5\x0b\x34\xaf\x2e\x11\xbe\x7e\x55\xa0\x0d\xb4\x99\x85\x7c\xff\xcf\x7f\x90\x07\xcb\x98\x29\x27\xa7\x69\x0f\x3f\x7e\xd8\x70\x63\x7f\xfb\xf6\x84\x84\x37\x94\x0d\x25\x5a\x43\x77\x2f\x3e\xbc\xa9\x64\x2c\xc7\x13\x3b\x12\xfb\xb3\xa6\x97\x05\x38\x6b\xea\x13\xe1\x1b\xd2\x2b\x0a\x4d\xc1\x75\x32\xe4\x0f\x84\x20\x22\x1f\x44\x6b\xca\x48\x3d\x39\x26\xca\x97\x3f\xe7\x38\xda\x63\x8b\xe4\x6b\x4d\xa1\x54\x10\x0f\x47\x40\x48\x53\xc8\x0b\x4d\x41\xcc\x0a\x2d\xdf\xa9\x88\x73\xb7\x26\x22\x9d\x7a\x6e\xe7\x32\x4d\xa1\xd5\x6e\x96\xb2\xed\xdd\x57\x39\xa1\x22\xb4\x05\x24\xcb\xb7\xb2\x7c\x4e\xb8\x70\x8e\xb6\xcb\x3b\xce\x2f\x47\xbe\xad\x98\xf4\x8c\x71\xce\xe7\xca\x21\x59\x98\x24\xe7\xf6\xf1\x6f\x1b\x05\x1a\xcb\x5b\xe8\x5f\x39\x51\x0c\xb5\x84\x97\xca\xfe\x74\x3b\x9c\xca\x11\x64\x85\xfd\x2e\xc1\x65\x87\xb9\xcd\x02\x1f\x37\x95\x7e\xa2\x19\x42\x84\x39\xb7\x45\xc0\x36\x58\xba\x4e\xe1\xdf\xe2\xf8\x27\x18\x24\xdc\x35\x3e\xec\x21\x45\xf5\x8e\xba\x61\xd9\x63\x13\xb6\x1a\x15\x44\x01\x36\xd8\xb9\x18\xa2\x2c\xe7\x0b\x44\x36\xe6\x8b\x19\xb4\xa1\xa3\xc3\xff\x0b\x00\x00\xff\xff\x65\xed\xdb\x53\x58\x00\x01\x00") + +func operation_fee_stats_1HorizonSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_1HorizonSql, + "operation_fee_stats_1-horizon.sql", + ) +} + +func operation_fee_stats_1HorizonSql() (*asset, error) { + bytes, err := operation_fee_stats_1HorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_1-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2d, 0x5d, 0x8b, 0xd0, 0xa4, 0x29, 0x5, 0x60, 0x85, 0xc6, 0xbe, 0x42, 0x0, 0x53, 0x8d, 0x6c, 0x7a, 0xc4, 0xc0, 0xbe, 0x51, 0xa7, 0xa2, 0x9a, 0xcd, 0x72, 0xe4, 0xdb, 0xf1, 0x13, 0x3b, 0x7f}} + return a, nil +} + +var _operation_fee_stats_2CoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x69\x8f\xda\xcc\xba\xed\xf7\xfc\x0a\x2b\x5f\x3a\x11\x9d\xb7\x3d\x0f\xc9\xcd\x96\x8c\x99\x07\x33\x4f\x7d\x75\xd5\x2a\x97\xab\x8c\xc1\xd8\xc6\x03\xd3\xd5\xfe\xef\x47\xb6\x19\x0c\x0d\x34\xdd\x74\xf6\x39\xd2\x09\x8a\x5a\x04\x3f\xac\x5a\xb5\x6a\x55\xb9\xaa\x6c\x3f\xfc\xf8\xf1\xe5\xc7\x0f\xa2\xe9\xf8\x81\xe1\xa1\x4e\xab\x46\xe8\x20\x00\x1a\xf0\x11\xa1\x87\x33\xf7\xcb\x8f\x1f\x5f\xa2\xe3\xb9\x70\xe6\x22\x9d\xc0\x9e\x33\x3b\x04\x2c\x90\xe7\x9b\x8e\x4d\x48\xff\xf0\xff\x50\xa9\x28\x6d\x4d\xb8\xc6\x4b\xf4\xf5\x93\x90\x2f\x9d\x7c\x97\xf0\x03\x10\xa0\x19\xb2\x83\x97\xc0\x9c\x21\x27\x0c\x88\xdf\x04\xf9\x2b\x3e\x64\x39\x70\xfa\xfa\x53\x53\xb7\xd0\x8b\x69\xbf\x04\x1e\xb0\x7d\x00\x03\xd3\xb1\x5f\x7c\xe4\x47\xb8\xaf\x83\xa1\x65\x46\xd0\xc8\x86\x8e\x6e\xda\x06\xf1\x9b\x78\xe8\x75\x0b\xe2\xc3\xaf\x5d\xd9\xb6\x0e\x3c\xfd\x05\x3a\x36\x76\xbc\x99\x69\x1b\x2f\x7e\xe0\x99\xb6\xe1\x13\xbf\x09\xc7\xde\x62\x8c\x11\x9c\xbe\xe0\xd0\x4e\xca\xd2\x1c\xdd\x44\xd1\x71\x0c\x2c\x1f\x1d\x15\x33\x33\xed\x97\x19\xf2\x7d\x60\xc4\x01\x4b\xe0\xd9\xa6\x6d\x24\x21\x9e\xb3\x7c\xf1\x11\x0c\x3d\x33\x58\x47\xe0\x18\xff\xda\x0a\x80\x80\x07\xc7\x2f\x2e\x08\xc6\xc4\x6f\xc2\x0d\x35\xcb\x84\x8f\x91\x62\x10\x04\xc0\x72\x8c\x5f\x5f\xbe\xe4\xda\x8d\x26\x51\x56\x73\xf9\x21\x51\x2e\x10\xf9\x61\xb9\xd3\xed\x6c\x23\xff\x09\x5d\xc3\x03\x3a\x1a\x9b\x7e\xa0\xad\x7d\x34\xff\x75\x35\xda\x87\xee\x3c\x74\xbc\x70\xe6\xdf\x16\x8c\xec\xc5\x2d\x91\x16\xd2\x0d\xe4\xdd\x12\x19\xf1\xc4\x08\xdd\x18\x79\x43\x98\x86\xfc\xc0\xc1\x18\x79\xa6\xad\xa3\xd5\xf5\x58\x00\xa1\x13\xda\x81\x06\x2c\x60\x43\xe4\xff\xfa\x22\xd7\xba\xf9\x36\xd1\x95\xb3\xb5\x7c\x2a\xba\xa1\xd6\x46\x67\xe4\x75\xbc\x35\x11\xa3\x2b\x0d\xb5\xd3\x6d\xcb\x65\xb5\x9b\xfa\xd2\x71\xe0\x8b\x3b\x45\xeb\x5b\xf0\x83\xd5\xdb\xd0\xfb\x98\x77\xa0\x62\x74\x03\xe7\x74\xd8\xed\xd8\x5e\xe8\x07\x96\x69\x23\xff\x1a\xf2\x3e\xe8\x66\xdc\x88\x05\x8a\x47\x83\x2b\xb8\x87\xa0\xdb\x71\xf7\x96\xbf\x86\xbb\x0f\xba\x19\x37\x89\x37\x6d\xec\x5c\xc1\x3d\x04\xdd\x8c\xeb\x86\x9a\x1f\x6a\x57\x30\x93\x80\xf7\xe0\x59\xa6\x3f\x9e\x87\x28\xbc\xa6\x6c\x3a\xec\x76\x6c\x84\xbc\x6b\xb2\xc6\xc7\x6f\x46\x8b\xbb\xf1\x35\xb8\x24\xe0\x66\xbc\x64\x54\x1a\x23\xa0\x5f\x87\x3d\x8a\xfb\xc3\xe8\xdb\x91\x12\xcd\x5f\x6e\x2c\x46\x03\xf6\x15\x70\x0d\xd8\x37\x13\xde\x8e\x7e\xd7\xb8\xee\x42\xde\x8b\x19\xcd\x01\xde\x86\x8d\xa2\xb6\xc8\x71\xec\x29\xf0\xd9\x21\xf7\x7a\xec\x7e\x68\x7c\x2b\xec\x30\xd0\xbd\x11\xb9\x1f\xb8\xae\xc7\x1d\x06\xa2\x37\xe2\xf6\x03\xcb\x9b\x71\x37\xf1\x3b\x0c\x28\xd7\xe3\x92\x41\xe2\xcd\x98\x7d\x97\x7f\x23\x32\xea\xc7\xd7\x43\x92\xbe\x79\x3d\xe6\xa8\x2b\x5c\x0f\xd5\x80\x7d\x3d\x60\x67\xd5\x9b\xa2\x22\xe7\x6d\x03\xf3\xc3\x6e\x5e\xed\x94\x1b\x6a\x3a\xd8\x72\x0d\x7f\x6e\x6d\x23\x3a\x4a\x29\x5f\x97\x5f\x61\xfd\xfa\x92\xcc\x8d\x55\x30\x43\x3f\x77\x9f\x11\xdd\xb5\x8b\x7e\x6e\xbf\xf2\x8b\xe8\xc0\x31\x9a\x81\x9f\xc4\x8f\x5f\x44\x63\x69\x23\xef\x27\xf1\x23\x9e\x32\x2b\xed\xbc\xdc\xcd\xef\x90\x77\x78\x5f\x8e\x10\x8f\x0f\x6e\x81\x95\x46\xbd\x9e\x57\xbb\x57\x90\x93\x00\xa2\xa1\x1e\x03\x10\xe5\x0e\xf1\xb0\x9b\xdf\xee\x3e\xf3\x63\x90\x87\xd3\x92\x77\xd5\xdf\x96\xb9\x57\xe8\xcd\xfa\x1c\x69\xa9\x36\xba\x27\x7a\x12\x83\x72\xb7\xb4\xa7\x95\x9e\xd0\x1e\x15\x7f\x40\x39\x21\xf2\x9e\xca\xbf\x02\x89\x05\x68\xd6\x9e\x5c\x23\x5a\xc5\xb8\x9e\x03\x91\x1e\x7a\xc0\x22\x2c\x60\x1b\x21\x30\x50\x2c\xc3\x8d\x13\xf0\x28\x4c\x47\x18\x84\x56\xf0\x12\x00\xcd\x42\xbe\x0b\x20\x8a\x56\x13\x0f\x27\x47\x97\x66\x30\x7e\x71\x4c\x3d\xb5\x40\x38\xaa\x6c\xda\x90\xdb\x6a\xc6\xd6\x3d\x54\x72\x67\x80\x73\x82\x27\x2e\x4f\x0f\xba\xdf\xbe\x10\x04\xb1\xfb\xc4\xd4\x09\x38\x06\x1e\x80\x01\xf2\x88\x05\xf0\xd6\xa6\x6d\x7c\xe3\xf8\xef\x71\xdb\xa8\xbd\x5a\xed\x31\x8e\x8e\xbe\x68\x83\x19\x3a\x13\x2c\x8a\xe7\x82\x17\xc0\x0a\xcf\x45\x53\x14\x7d\x1a\x6e\x01\x3f\x98\x39\xba\x89\x4d\xa4\x13\xa6\x1d\x20\x03\x79\xfb\x90\x2f\xdf\x4f\xdb\x7e\xdf\x8b\xef\xd4\xc2\xff\x90\x10\xdb\x85\x00\xa1\x99\x86\x69\x07\x27\x07\x7d\x34\xb7\xc3\xd9\xf9\x63\x76\x38\xf3\x43\x0d\xd9\x81\x17\x2d\x05\x4f\xab\x99\xc4\x98\x36\xb6\x40\xb4\x62\xd4\x91\x1f\x9c\xa7\x93\x04\x8e\x9d\x19\xd2\x9d\x19\x30\xed\x33\x51\x2c\x7b\x4a\x3a\x18\x7b\xc8\x1f\x3b\x96\xee\x13\x01\x5a\x9d\x32\xc3\x16\x30\x2e\x31\xba\xda\x36\x5b\x45\xc2\xa8\x54\xcb\x04\x9a\x69\x99\x41\x54\xb9\xa4\xfe\x3b\x49\x2c\xeb\xda\x61\xd3\xb0\xa3\xb9\x50\x44\x2b\xf9\x24\x35\x1b\xd8\x4f\x2d\xb6\xa2\xbf\xc4\xcb\x6a\x42\x29\xe5\x95\x2a\xf1\xed\xdb\xae\x29\xfe\xf5\x9b\x20\xbf\x7f\xbf\xf2\xed\x53\x82\xa7\x38\xaf\x2a\xf0\x16\xe2\x51\x5b\x9e\xa0\x1d\xb7\xf3\x5b\x48\xaf\xe5\x39\x81\x3b\xa3\x5f\x82\xf9\xba\x63\x44\xe7\xbf\x8f\xf6\x89\x68\xca\x98\x74\x07\xdb\xd1\x51\xba\x2f\x1c\xf5\x81\xd7\x85\x1e\x9f\x9f\x3f\x5a\xfc\xf1\xc4\x38\x21\xb2\xfd\x0c\xf8\xe3\x14\x19\xfe\x95\xb7\x5d\x0f\x2d\xde\x0c\xd2\x42\x38\x45\x81\x65\xfa\xc1\x9b\xa1\xfb\xd9\xf6\xce\xee\xc9\xc7\xd0\x72\x7c\x14\x98\xb3\x0b\x3d\x3f\x1e\x58\xcf\xf4\xad\x54\x9b\x1f\x4f\xea\xf7\x78\x27\xed\x7d\x28\xe7\x82\x75\x2e\xad\x0d\x8e\x61\x0e\xb5\xb8\xe4\x96\xed\xe4\xeb\xa3\x2d\xb6\x5d\x78\x7d\xdb\x77\x72\xe4\xdd\x38\x82\x26\x3b\x2f\xfa\xa5\x11\x34\xb6\x3b\xf0\x7d\x14\x9c\xd3\x33\xe9\xab\x17\x0f\x83\x59\xd4\xad\xce\x43\xbb\x9e\x09\x91\x7d\x61\x10\x8b\x0f\x5e\x1a\xe1\xe2\x83\x84\xee\x84\x9a\x85\x22\xbf\x41\x33\xde\x91\xfc\xd4\x51\x34\xd5\xc2\xdb\x25\x6b\x52\x97\x93\x76\xdd\x56\xf0\x82\x37\xb6\xdf\xdc\x2a\x7c\xf2\xd5\x9d\xee\x97\x0c\x91\x4c\xd8\x3f\xea\x87\x64\x59\x9f\xd8\xc1\x74\xcf\x9d\xf8\xb9\x57\x3d\xd7\xf1\x82\xbd\x1a\xb9\x7c\x41\xee\xd5\xba\x04\x79\x7a\xda\x44\xab\x00\x04\x01\x9a\xb9\x01\x11\x75\x0b\x3f\x00\x33\x97\x88\xa6\x4c\x4e\x98\x7c\x42\x6c\x1c\x1b\xbd\x3e\xd9\x62\x60\x5a\xa1\x97\x3a\xd5\x5e\x2a\x21\x58\xbb\xe8\xed\x46\x49\xb6\x25\x52\xb8\xaf\x87\xfd\x7d\x89\x17\x5a\x67\xbb\xb3\xe1\x78\xa7\x8d\xfa\x2d\x56\xe2\x5f\x04\xf9\x9d\x90\xd5\x1c\x91\xfc\xf7\xff\xfc\x26\x78\x8e\x63\xb8\xef\x67\xdb\x2a\xbd\x0c\xfb\x70\x93\xa5\x77\x79\xd2\x63\xee\x05\x35\x92\x8d\xb6\xa8\xd7\x9d\x25\x14\xad\x1d\xef\xa0\xe2\x87\xda\x96\x84\x87\xfc\xa3\x13\x10\x73\x76\xc6\xe8\x21\xb0\xef\x4b\xaf\xf9\xa4\xd6\xbc\x1f\xe5\x94\xda\xac\xbb\xe1\xcc\x98\x10\x9b\xfb\xe8\xda\x19\xe6\x35\xcf\xd4\x1a\xfe\xa3\x3c\x0f\x10\xb7\xf3\x7c\x75\x92\x3b\x39\x8e\xec\x05\xb2\x1c\x17\xbd\x71\x4a\x3b\x14\x7d\xc7\x89\x28\xb5\xdd\x71\x87\x04\xbb\xfd\xda\x6f\xb7\xb4\xc3\xc1\x45\x6f\x09\x31\xbf\x70\xa2\x39\x16\x61\xb7\x0f\x7c\x84\x78\x2a\xc4\x51\x69\x17\xc5\x38\xec\x11\x7d\x58\x8c\xc3\xa6\xf8\xb7\x43\xbf\x3d\x5e\xbc\x9d\xe9\x53\xd7\x7a\x77\x6a\x87\xeb\xa3\xac\x52\x97\x00\x3e\xb2\xec\x8a\xcf\xf8\x57\x46\x6a\xd3\xf7\x43\xe4\xdd\x0e\x05\x1d\xfd\xec\xea\xf4\x95\x2c\x81\x65\xce\xcc\x0b\x33\x8a\xab\x6b\xc1\xff\xce\x55\x55\xca\x9d\xa9\xab\x2a\x1f\x5a\x45\xa5\xbf\xff\x59\xeb\xa8\x14\xe6\xc7\xd7\x3f\xd7\x50\x93\x46\x3b\x41\xda\xb6\xe4\xbf\xce\x77\xbc\xa3\xed\xde\x0f\x9b\x3c\x7d\x0d\x2d\xb1\x79\xb0\x3a\x1a\x8a\x6f\x58\x6f\x9c\x1a\x70\x15\x5f\xa5\xbc\x78\x14\x8e\x81\x6d\xa0\xb3\x0b\xfb\xb4\x38\xe9\xcb\x76\x1f\x1f\xab\x0f\x7b\xe7\x1f\x97\xe8\x3f\xac\x8f\xe6\xe8\xeb\x73\xe2\x04\x2b\x0f\xf9\xa1\x75\x76\x74\x0f\x56\x33\xf4\xe6\x7a\xee\x70\x89\xf5\xe3\x7a\x9e\x5c\xb7\xf8\xa8\xa8\x27\x57\x9c\xbf\xdd\x24\xdc\xf6\x4b\xd7\xd4\xdb\x86\x9c\x13\xe2\x36\xdb\x9d\x5c\xe1\xfe\x88\x50\xb9\x68\x65\x8d\x1d\xef\x8d\xcd\x50\x22\x27\x77\xe5\x37\x34\xbb\x0e\xe9\xbf\x1b\xaf\xac\x76\xf2\xed\x2e\x51\x56\xbb\x8d\xc3\xa6\x62\x5f\xae\xf5\xf2\x1d\xe2\xdb\x43\x31\xdb\x6e\x8e\x4a\xe5\x1a\xad\x94\x99\x82\xda\x62\xb3\xc3\x5a\xa1\xae\xe6\x6a\x85\x4a\x4f\x6d\xf6\xe8\xd2\x88\x79\xae\x17\x3a\xa5\x86\xda\x53\xf2\x0d\xb9\x33\x10\x5a\x8a\xd0\x18\xd2\xa5\x87\x47\x82\x22\x5f\xbd\x1e\x89\xe4\x5f\x2c\x33\xf1\xf0\xf0\x48\x3c\xc8\x2d\x59\x96\xe5\xdf\xbf\x1f\xe2\x03\xd4\xee\xd8\xe1\xef\x65\x15\x4f\xb7\x8a\xee\x50\xef\xf2\x06\xd0\x7b\x25\x3c\xde\x04\xda\xeb\xc8\x33\xba\x24\x62\x8e\xe1\x11\xe2\x45\x9d\xd2\x68\x41\xe3\x34\x51\xc2\x34\x03\x30\xc7\x50\x94\x26\x70\xbc\x04\x68\x16\x03\x4c\xb1\x24\x03\x74\x52\xe3\x68\x8d\x67\x18\x8d\x14\x34\x24\x49\x91\x54\xaf\xf5\x7c\xdf\x2b\xc2\xe0\x04\x1a\xd0\x88\xa1\x31\xa6\x59\x11\x90\x82\x46\x22\x81\xc4\x3a\x85\x79\x9d\xa1\x44\x48\x61\x00\x75\x9a\xd4\x78\x08\x49\x11\x32\x8c\xce\x09\x02\x47\x73\x92\xc8\x8b\x14\xcd\x01\x8a\x7f\x88\xdb\x88\x8c\x1a\xee\x7f\xec\x2b\x3b\xac\x9a\xec\xfa\x69\xdd\xa9\x66\x85\x9c\x9d\x93\x4a\x34\xb9\x9a\x64\x33\x3e\x69\x04\xfe\xb2\xbc\xdc\x50\x43\xbd\x33\x18\x81\x6c\x05\x14\x8c\x28\x3e\xaf\xb2\x35\xb0\x71\xe9\xd6\x9b\xc8\xcf\xf2\x90\x62\xe3\xb0\xec\xf4\x3f\x50\x91\x4f\x7d\x3d\x7c\xff\x75\x8b\x51\xb1\xae\xeb\xbc\x4e\xf2\xba\xc0\x22\x52\xe7\x78\x41\x40\x1a\x49\x93\x1a\x89\xb1\xc8\x40\x86\xa6\x00\x45\x23\x0a\x41\x11\xb2\xac\x40\xd3\x9c\x0e\x21\x82\x3c\xc9\x68\x02\xa6\x01\x4d\x6a\x91\xc9\x3e\xc3\xec\x08\x63\x51\xd3\x78\x1d\x63\x5a\x60\x18\x8c\x29\xcc\x60\x0c\x38\x8a\xa5\x30\xc3\x02\x24\x70\x02\x85\x10\xa3\x8b\x10\x20\x5e\xd7\x74\x8a\x96\x34\x00\x39\x8a\xc2\x80\xd4\xb0\xce\xb3\x0f\x8f\x04\xfd\x48\x50\x1c\x27\x71\x82\x24\xd2\xc2\xd6\xb1\x0a\xdd\x7c\x9e\x50\x6a\xc8\x39\xa4\x56\x11\x06\xac\xbd\x6e\x2c\x7a\xab\x22\xd3\x77\x9d\x69\x66\x51\x90\x1b\x81\x42\x55\xe9\xba\x90\x15\xf8\xe7\xf0\xb9\x5d\xf2\xa4\x9e\x3e\xac\x6a\xdd\xaa\x09\x2d\xdc\x98\xac\x05\x7a\xf4\x5c\xa8\xf4\xa6\x95\x66\x1f\x56\xbb\xdc\x38\x58\x38\xde\x8c\x4a\x9c\x33\x6c\xf6\xeb\xfe\x32\x7a\x57\xde\xff\x49\x86\x37\xff\xf0\xff\xa5\xdc\x6c\x6d\xbd\xc3\x6c\xa4\xf1\x4c\xee\xb8\x35\x29\x90\xfb\xab\x69\xb0\xca\x31\xc3\x4e\xc3\x65\xcc\x60\xd5\x59\xe4\x67\x75\x5e\xee\x4d\x97\xd9\x0e\x9b\x6f\xdb\x8b\x4c\x2d\x08\x9e\xd6\x70\xf3\xb4\x12\x9f\x32\x56\x21\x2b\x92\x9e\xdd\x5f\x09\x13\xba\xee\xcd\x68\x29\xef\x86\x7e\xaf\x9d\x31\x16\xd2\x73\x8c\x5c\x7e\xb7\xa3\x73\x64\xe5\x8f\xb8\xee\x4f\xbe\x6e\x74\x34\x03\x75\x80\x04\x4a\x43\x14\x29\xf0\xa2\xc0\x22\xc8\x01\x46\xe2\x38\xc0\x60\x5a\x80\x1c\x10\x39\x9a\x45\x3a\x47\xb1\x40\xe0\xa1\xc0\x0b\x34\x4b\x73\x2c\x94\x80\x04\x28\x5e\x47\x5a\x34\xe4\x7d\x4a\xaf\xf8\x04\x47\x33\x29\x47\x8b\x3b\x47\x3f\x31\xba\xd6\xd2\x86\x5d\x83\x1a\xe8\x19\x5f\x51\x96\x4f\x6c\x33\x57\x06\xf9\xb0\xe4\x4f\xf2\xed\x75\x45\xdf\x34\x56\x46\x83\x11\xe7\xe5\x5a\x43\x6d\xf1\xcb\x50\x56\x4a\xfc\x0a\x8f\x27\xe1\x50\x5a\x7a\xa8\x52\xe9\xac\x87\x81\x5f\xf0\xa8\xbc\x56\x60\x42\xb9\x12\xe6\x82\x6a\x62\xa3\xd8\xd1\x41\x4a\xf1\x3f\xe7\xd8\xfa\x5f\xc7\x1e\x1c\x4b\x21\x4e\xd7\x75\x16\xb0\x14\x03\x30\xcd\x0b\x94\x44\x89\x3a\x2f\x40\x0d\x52\xa2\x00\x28\xa8\x47\xd6\x92\x00\xc9\x90\x02\xcb\x90\xba\x44\x71\x10\x08\x2c\xc7\x43\x8a\x17\x25\x1a\x45\x4e\xf9\x1c\xd7\x0b\x52\x6c\x3e\x16\x93\x12\x4b\x01\x41\xd4\x25\x46\xd3\x10\xe2\x05\x09\xd3\x24\xaf\x8b\x02\xd6\x48\x1d\xd0\xa4\x28\x72\x02\x12\x31\x80\x3c\x44\x3c\x29\xf1\x22\xcb\xb2\x3c\x15\xf1\x60\x53\x8e\x95\x76\x8e\xdd\x6c\x00\xb7\x5a\x8c\xb3\xf4\x98\x14\x0a\x0e\xd7\x07\xcd\x35\x2e\xcc\x0b\x95\x06\xd5\xee\xd8\x5a\xe9\x79\xdd\xb2\xba\x95\x39\x28\x04\x3c\x3d\xaa\x50\x4d\x8c\xfd\x66\x9e\xcd\x1a\x1a\x5d\x2f\xb6\x82\x3e\x6b\xb3\x6d\xaa\x3d\x06\x4d\x2b\x6f\xab\xfe\x82\x0e\x1a\x85\x79\xc1\xcf\xc5\xa7\xf2\xc4\xb1\x29\x13\xbd\xc3\xb1\x8c\xcd\x2b\x4f\xdd\xa5\x95\x75\x59\xba\x1b\x66\x38\xdb\x5e\x67\x83\xf1\x93\xaf\x3a\x86\x59\xc0\x4e\xc6\xd7\x1a\xa3\x0a\xc8\xb7\x8b\x85\x18\xb9\xf5\xd7\xb1\x07\xc7\xd2\x98\xd4\x00\xcf\x92\x24\x27\x92\xa4\x08\x28\x4e\x17\xb1\x06\x58\x96\xe1\x31\x0b\x69\x56\x17\x24\x89\x91\x24\x16\x4b\x58\x60\x00\x4b\xf3\x50\x40\x34\x23\x0a\x12\x04\x0c\x05\x24\x06\x47\x6e\xfb\x0c\xd7\x7f\x82\x63\xb9\x83\x63\x99\xdd\x3c\x57\x59\x71\x3a\xd3\x73\xb3\x8d\x45\xc5\x2e\xb6\x27\x03\x5c\x5b\x8e\xd0\x78\x03\x47\xb9\x99\x51\x2f\xb5\x96\x74\x7b\x58\xd5\x0b\x5a\x16\x54\x6a\x53\xbe\x25\x35\xa4\xb9\x63\xf3\xc3\xa6\xa3\xe6\xf8\x5a\x49\x2c\xd8\x9d\xb5\x4a\xb3\x5c\x9b\xae\xc1\x0d\x84\xdd\x55\x5e\x0c\x58\xf1\xa9\x9b\x28\x1c\x3b\xd6\x38\x28\xfe\xe7\x1c\xdb\xfb\xeb\xd8\x83\x63\x01\x49\x41\x0d\x21\x80\x34\x4a\x84\x1a\xd2\x49\x84\x24\x8a\x01\x14\xa3\x01\x16\x0b\x3c\xc9\xf3\x24\x26\x01\xcf\xb1\x94\x86\x44\x4c\x49\x24\x4d\x89\x14\xcf\x93\x14\x96\x78\x20\xe8\x62\xe4\xb6\xcf\x70\x3d\x0f\x78\x88\x68\x4c\x52\x08\x08\x50\xd2\x38\x4a\x40\x24\x46\x0c\x23\x40\xc8\xe8\x58\x20\x69\x9d\x21\x31\x43\xd1\x18\x0a\x8c\x28\x21\x88\x34\x49\x87\xac\xa4\x6b\x12\x12\x04\x91\x83\x0f\x8f\x04\x9f\x72\x2c\xb5\x73\xec\x5a\xac\xb9\x53\xb9\x60\xc8\xe6\x78\x38\xca\xf0\xed\x9c\x46\x2e\xa7\x14\x37\xe5\xac\xa6\xcd\x34\xa6\x15\x1f\x9b\x8d\x92\x5d\xad\x83\xe9\x53\x4f\x95\xc7\xde\x66\x62\xe5\x72\x48\xab\x55\x03\x65\x38\x7a\x46\x39\xbf\xea\xe4\x3c\x6c\x0f\x32\xf2\x6c\x02\x31\x72\xa4\xdc\xc8\x44\xf1\x3c\x36\x71\xec\xf2\xa0\xf8\xed\x8e\xb5\xe6\x5a\xa3\xb6\x2c\xcd\x71\x25\x68\x63\xe3\x69\xa2\x8a\x4b\x49\x5a\xd6\xba\xb9\x4d\x7e\xb1\xda\x98\x68\xe3\xd9\xb0\xa3\x87\x48\x67\x87\x31\xf2\xe8\xaf\x63\x53\xb3\x02\xcc\x93\x9a\x08\x79\x44\x6a\xba\xa0\x45\x26\xd5\x38\x92\x44\x3a\xc9\xea\x24\x44\x12\x83\x45\xcc\x50\x22\xc7\x00\x11\xd2\x9a\x8e\x75\x96\xc7\x00\x41\x51\xd3\x00\x94\x00\x0d\xa8\xc8\x6d\x9f\xe1\xfa\x4f\x70\xac\x90\x72\x2c\xbd\x73\x2c\x2f\xc3\x45\xe8\x79\xc5\xfa\x22\xc8\x85\xd3\xae\xd3\x0d\xa7\x12\x9d\x9d\xe5\x96\xae\xd5\xd2\xf8\x66\xb1\x55\x1e\x15\x66\x32\xb6\xe6\x78\x94\x25\xcb\x0d\xd3\xb6\x27\xcb\xa9\x64\x51\x8c\xbc\x1e\x28\x7d\xb2\xdc\xc5\x45\xb3\xb2\xec\x2f\x5c\x47\xb3\x2b\xab\x55\x8e\x04\xc6\x92\x77\x62\x85\x23\xc7\x86\x29\xc5\xff\x9c\x63\xe1\xff\x1a\xc7\x5e\xd8\x49\x3b\x73\x47\xce\x1d\xfb\x72\xaf\x6f\xe7\xb8\x07\xec\xd2\xfd\x06\xf7\x61\x9e\xde\x32\x70\x07\xda\x85\x0b\xfe\x77\x20\x5e\xb8\x34\xff\xde\x5d\xcd\xd4\xe5\xf9\xc3\xd6\x70\x8e\xab\xf1\xad\x9e\x9c\x6f\x34\xfa\x74\x8b\xeb\xb5\xca\x3c\xab\xd6\xd5\x42\xb9\xd3\x50\x6b\xcd\x61\xab\xc4\x96\xb2\x72\x3e\x5f\x28\x75\xfa\xf5\x66\x75\xd4\xeb\x0c\x4b\x4c\xbf\x53\x2c\x26\x3b\x33\xc9\xfe\x61\x73\xfe\xd4\x72\xe4\x89\xdd\xa7\x72\x41\xb9\x2c\xad\xbc\xa2\xd5\xae\x6c\xfa\x8b\x85\xfc\x94\x1b\xcb\xad\x71\x77\x3d\x5f\xf1\x7e\xcf\x82\x19\xfe\xe0\xbe\x78\x23\x45\x89\xdf\x46\xdd\x2c\x6f\x72\x56\x1e\x2d\xa8\x36\x05\xdd\xa0\x3e\xaf\xac\xfb\x22\xdf\xac\x2d\x9e\xc7\xd3\x9e\xd5\x69\x4d\xa5\xfe\xd2\x6d\xce\x8a\x74\xc6\x0c\xb5\x7e\xfc\xa5\x2c\x94\x7a\xeb\x4d\xfc\xd6\xd8\xff\xc9\xc6\xa0\xcb\xfd\xff\x73\xb2\x2c\x29\xa9\x5e\x9c\x9d\x74\x4a\xed\x7e\xcf\xe3\xfb\x75\x2b\x0f\x41\xc8\x94\x38\x69\x5a\xad\x3f\x99\xb6\x1f\x7a\xca\x40\xa4\x86\xf9\xf0\x49\x2d\x32\x4a\x58\xf2\x63\xb8\x89\xeb\xc1\x29\x9d\xab\xf8\xcb\xf1\x1a\xb3\x52\xa7\x3c\x17\xc2\xcd\x42\xf2\x02\xde\xea\x8f\x6c\x5e\x59\x95\xea\x9b\x8e\x97\xcf\xf7\x9a\xda\x04\x59\x6a\x5f\x26\x57\x82\xdc\xa4\x85\x81\x56\x1f\x5a\xbe\x55\xe8\x2c\x4b\x02\x89\xdb\xb2\xbc\x51\xca\xfd\x89\x6f\x15\x0a\xd9\x79\x4e\xfe\xfd\xfb\xf4\x6c\xf1\xc9\x4d\xc3\xdc\xd5\x34\xf5\xe3\xa6\xc9\xc9\x2c\x95\xf3\x14\x56\x2e\x63\xaf\x90\x29\x36\x38\xcc\x28\x01\x3b\xb5\x6a\x15\x44\x2f\x07\x8b\x5e\xdb\x1f\x22\x56\x9e\xb1\x0d\xd2\x9d\x1e\x9a\x86\x3c\x19\x6d\xde\x2d\xbd\x62\x14\xa7\x60\xa6\x64\x9e\xca\x96\xb9\x02\x75\x35\x6c\x8a\xe2\x93\xb2\xf6\x50\xa7\x96\xad\xd0\xca\x33\x5f\x13\xb0\x1b\x92\xd5\x26\x64\x17\x41\x81\x2a\x85\x55\xbf\xc8\x8d\xb8\x42\x56\x63\xc2\x2e\x5f\xad\x05\x96\x02\x6a\x19\x4e\x6e\xcf\x2a\x36\x10\x8b\xe6\x28\x58\x65\x73\xcb\x3f\x2e\x3d\x7b\x97\xf4\xad\x13\xe9\xb3\x86\xdd\x93\xa4\xcc\x52\xec\x1a\xc5\xec\x62\xb4\x7c\x56\xa8\xa1\x89\xc7\xa5\x7e\xb1\xe0\x64\x7a\x1d\x48\xaf\x9f\x00\xc9\x0e\x9c\xc1\xb2\x71\x90\x9e\xba\x57\xfa\x52\xae\xa3\xc2\x79\xdf\x07\x4c\x85\xd4\x9a\x4d\xb8\x90\x3a\x3e\xa2\x5a\x6e\x6e\xdc\x2d\x18\x9a\x0d\x0b\x34\x60\xb2\x52\x6b\xdd\x42\xcc\x92\xaa\xce\x7c\x7d\xd1\xae\x2d\x07\xbd\x59\xdf\x1b\x52\xf9\xe9\x6a\xd2\xad\xd4\xfb\x6b\x7d\x35\x91\x32\x0e\xe5\xb2\x85\xb9\xf6\xcc\x2f\x65\xe3\x8f\x4b\xcf\xdd\x25\x7d\xef\x44\xfa\x9c\x9b\x23\x05\x7a\x6e\x62\x17\x66\x0c\xb2\xe9\xf8\x78\x39\xd0\x6b\x65\x46\x9b\x58\xa5\x51\xb0\x51\x57\xab\x66\xbe\xbb\x66\x26\x1b\xa9\x7e\x90\x9e\xbe\x57\xfa\xe9\x68\xb4\x54\x16\xcb\x66\x5f\xef\x49\x7c\xb6\x52\x9e\xce\x86\xd3\x76\x90\x29\xcc\x24\xd8\xf0\xaa\x83\x2a\x58\x84\xcd\x5e\xb6\x28\x29\x8a\x60\xb0\x05\xea\x69\xdd\xee\x02\xaa\x29\x95\x24\x2e\xab\xce\x97\xf5\xa9\xcd\xeb\xc2\xe2\xe9\x39\x83\x85\x85\x04\x9e\x33\x95\xbc\xed\xf5\xff\x03\x03\x0e\x7f\x97\xf4\xa3\x53\xd7\xb7\x48\xa5\xb8\x68\x34\x7a\x75\x95\xf3\xfd\x39\x59\xd1\xc7\x16\xdb\x58\xce\x8d\x46\x90\xd1\x35\x56\x01\xea\x4a\xe2\x27\xa4\x6a\x56\xf8\xe5\x5e\x7a\xe6\x6e\xd7\x57\xcd\x67\x9d\xac\x09\x46\x86\x2c\x8b\xdd\xec\x90\x13\x1c\xa5\x29\xab\xb2\xf3\x14\xfa\x35\xd7\x2f\x41\x7a\x58\x5e\x05\xe5\x59\x66\x61\xf4\x84\x52\xa9\x6b\x2e\x2a\x48\xa6\x66\x13\x71\xa4\x4c\x9d\x49\x10\x88\x74\xae\xec\x51\x46\x6b\x50\xae\xbb\xa3\x7c\xe8\x0e\xd4\x5c\x3b\xfb\xe7\xa5\x17\xee\x92\x1e\x9e\x8e\xf5\xa5\x96\xc1\x57\x11\x6a\x2a\x5d\x7a\x38\x84\xb9\xca\xa8\x32\x6c\x8d\x55\x11\x94\xed\xec\x20\x33\x1b\x87\xd0\x2e\xe5\x9b\xed\x79\x2e\x37\x37\xf6\xd2\xb3\xf7\x4a\xbf\x79\x0e\x9a\x86\xce\x3c\x8d\xd4\x92\x5f\x5b\xd7\xdc\x11\xbf\x74\xb2\x63\x83\x1a\xa9\xf2\xa4\x59\x19\xaa\xcf\x24\xb5\x16\x73\xec\x40\x11\x16\x4a\x36\x08\x46\x75\xdb\x5d\x94\xda\xfa\xf3\x93\xf9\xac\x48\x6a\xd0\x98\x66\x57\x2a\xd3\x07\xc3\x9e\xea\xd5\xc0\xaa\xd9\xf0\x78\xb5\xa4\x6c\xc7\xfa\xcb\x73\xaa\x73\xf7\xfa\x7d\x60\x4e\xb5\xbb\xdf\x6f\xdf\x98\xa2\x4e\x53\x3a\xc5\x71\x2c\xc0\x80\x63\xa1\xc4\xb2\x02\x05\x34\x4d\xa0\xb0\x84\x79\x96\x16\x21\x46\xb4\x84\xa0\x06\x34\x52\xe2\x31\xc3\x71\x90\xd5\x34\xcc\xb0\xbc\x0e\x69\x4d\x14\xb4\x74\xa3\xc6\x27\x84\x7c\xfc\x36\xe3\x49\x8a\xa1\x34\xf4\x61\xaf\x41\x1a\x13\xa6\xe8\x83\x7e\xde\x56\x07\x19\x31\x27\x36\xf2\x59\xa5\xd0\xac\x7a\xa5\xf9\xaa\xd3\xdf\x08\xc9\x7a\xe9\x5a\xe5\xcf\xdf\xdb\xf7\xee\xca\x1f\xee\xef\xdb\x57\xde\x02\x7e\xe0\x43\x37\x7e\x0a\xe1\xdc\xeb\x61\x57\xb3\x64\x82\xc6\x2f\xc8\xaa\x5c\xe6\x28\xbd\x25\x74\x94\x26\x04\x2b\xb7\xd7\x81\x94\x26\x2c\x9b\x4b\xb6\x95\x2f\xf7\xc4\xb9\x8f\xbc\x42\x65\xd8\x0c\x0f\xbb\x6b\xa5\xf8\x2f\x4b\x8e\xc9\x7e\xbf\x9a\xb1\xba\x95\x76\xa9\xe8\x05\xab\x0c\x7e\x56\x26\x4d\xd6\x15\x6a\xf3\xa5\xb5\x50\xfb\xab\xda\x62\xd3\x0e\x96\xde\x58\xd8\x2b\x59\x31\xee\xb4\xf7\x47\x5a\x22\xea\xb8\xd9\x7a\x39\xeb\xe6\xcb\x8a\xad\xd6\x94\x3c\xbd\x54\x0a\xec\x74\xd9\xa1\x73\x93\x61\xd1\x83\xc3\x86\x3a\x6f\x76\x07\x2d\x7b\xa8\x14\x5b\x2b\x4f\xca\x42\x37\x93\x2d\x90\x6b\x86\xb7\x3b\xb5\x71\x5f\x29\x2c\x5a\xa4\x2e\xb2\x68\xe8\x42\x71\xa4\x68\x4e\x31\x68\xd5\x5d\xe5\xc0\x47\xb9\x77\x25\x9d\xfd\xc8\xf0\xd1\xca\x66\xe5\xae\x22\x83\xb6\x22\xbb\x9b\xce\x72\xac\xfa\xf2\x18\x55\xea\xf9\xc0\x60\xa9\xd5\x9c\x29\x6c\x26\x60\x42\x52\xd3\x0a\xb5\x1c\x4f\xeb\xe0\xa9\x35\xac\x62\xa3\xad\xd7\x83\x8c\x4b\x9a\x6c\xbf\x35\xd6\x48\xb5\x84\x1b\x25\x8b\x1f\x36\x8b\xf2\x8c\xcf\x7a\xbd\x5c\xb5\xb5\x9d\xc4\x3b\x43\x72\x21\xf6\xe6\x9a\x6e\x72\xab\xc5\xa4\x2c\xd4\x33\x9d\xfc\xf4\xa9\xa6\x94\x36\x93\x46\x76\x28\x50\x9b\x82\x84\x66\xcb\xee\x90\xd7\x59\xfe\x59\xd2\xc8\x76\xb9\xc8\x64\x8c\x67\xdd\x25\x9b\x16\x23\x90\x05\xaa\xc5\xe6\x41\x3f\x5f\x1b\xd3\x13\xc9\x1c\x00\xc1\x66\x27\xa3\xa4\x92\x77\xf8\xcd\xd8\xb7\xff\xf2\xde\x9d\x8b\xe4\xa4\xf7\x3e\xff\xb6\xea\xb4\xd6\x65\x4b\xfa\x13\x9d\x6b\x0b\x8a\x68\xf2\x83\x86\x5f\x95\x47\x23\x75\x90\x6b\x95\x37\xeb\xfe\x66\xa0\xab\x70\x21\x67\x0a\x46\x28\x2c\x0d\x4d\x1b\xe4\x2a\xbc\xb6\x22\x87\x03\xcc\xce\x5a\x8b\xae\xd6\x75\x65\xd8\xd5\xa9\x81\x45\xe5\xc0\x9a\xf6\xbb\x9b\x79\x66\xd2\x5e\xc6\x7b\x81\x59\x27\xbb\xce\xf0\xa1\x3f\x5a\x67\xc8\x06\xdf\x6e\x8c\x1b\x7c\x97\x19\x15\x47\x4d\x65\xd6\xcf\x2e\x1c\xf1\x59\x1e\x1b\x83\x51\x36\x33\x70\xa5\xd4\x16\xf7\x61\x64\xfa\x90\x7f\xe4\x73\x67\xc6\x33\xe3\xc9\x2e\x41\x4f\xf2\xf4\xef\xf9\xf1\x84\x22\x6f\x41\xb2\x51\xb0\x74\xbc\xa9\x0b\x7c\xdf\x1d\x7b\xc0\x47\x67\x90\xba\xc8\x0f\x88\x4e\xae\x40\xa8\x49\x30\xf1\x8b\xe8\x20\x37\x40\x33\x0d\x79\x04\x4d\x52\xdc\x2d\x05\x61\xc7\x83\xc8\x87\xae\x63\xdb\x68\x15\x58\x20\xb4\xe1\xf8\xb4\xa0\xf8\xe9\xda\x5b\xc0\x92\x7d\xc4\xed\xdd\x6b\xfe\xf9\xfa\xff\xff\xf8\x66\xb7\xaf\x81\x39\x43\x5f\x7f\x12\x64\x72\xef\xdb\xd7\x6d\xba\xa2\xaf\x3f\x89\xe4\x78\xfc\xe1\x18\xf8\x5f\x7f\x26\xcf\xf6\xc6\x1f\xfe\x7b\x1b\x8c\x11\xba\x2d\x70\x06\x56\xc1\xca\x37\x37\x37\x86\x7b\xc8\x47\xde\xe2\xad\xe0\x2f\xff\xbe\x49\x0a\xe0\x07\xf1\x43\x69\xfa\xf6\xe1\x90\x73\x56\xb8\x77\x7f\xf5\x06\x1e\xdb\xd9\x1b\xf0\xe0\xd8\x5c\x6c\x0f\x5e\x68\x92\x43\x1b\x50\x5b\x41\x62\x39\xbc\xaf\x3f\x89\xaf\x0b\x8a\xfa\x87\xfa\x87\xfc\xba\x3d\x00\x43\xcf\x43\x76\x50\x8b\xab\xf6\xf5\x27\x21\x1c\x7f\x9e\x8d\x9f\x1b\x8c\x74\xfb\xbf\x7b\x21\x0f\x92\xee\x23\x23\x60\x92\xe3\xa1\x4e\x4a\x18\xd0\x22\xaf\xd1\x3a\xa2\x30\x66\x68\x49\xa3\x69\x96\xe6\x24\x4d\xe4\x44\x2c\xb1\x08\x52\x3a\x84\x12\x83\x49\x44\x72\x14\xad\xd1\x98\x17\x29\x89\x61\x45\x89\xa3\xb6\x8c\xf6\xb8\x91\x8b\x8f\x1a\x70\x7f\x24\xae\x7b\x64\xb9\xa3\x43\xff\x3e\xf9\xbe\x6f\x03\xf7\x53\x78\x7d\x39\x53\xc2\x05\x0d\x10\xa3\x41\x9e\xd3\x91\x4e\xb3\x98\x87\x02\x42\x92\x20\x61\x89\xe1\x48\x00\x10\xc4\x92\xc8\xf1\x98\xa3\x91\xc0\xb1\x12\x09\x58\x92\xc2\x14\xe0\x79\x4d\x63\x74\x41\xd0\x05\x5e\xd0\xf5\xf7\x6b\x40\x3d\xbe\x3e\xe6\x84\x81\x1b\x06\x9f\x5b\xf7\xab\x0a\xdf\xf9\x7a\x8f\xc2\x77\x97\xf5\xa7\x5c\xf6\x57\x83\xbf\x1a\xfc\xd5\xe0\xaf\x06\x7f\x35\xf8\xab\xc1\x5f\x0d\xfe\x6a\xf0\xe7\x34\x88\xdf\xfd\xbf\x64\xf1\x72\x61\xa7\xed\xc2\xf3\xaa\x77\x5c\x0c\xbe\xf8\x78\xe0\x5d\x98\x9f\x0c\x78\xe5\x69\xb2\xf7\x6e\x32\x9e\x3c\x51\xb6\x5b\x85\xd1\xf1\x33\x2c\xa9\x4d\x52\xff\xf7\x7e\x6b\xf1\xcc\xd6\xc2\x65\x94\xfd\x65\xec\xe4\xa6\xfe\x57\x28\x17\x33\x60\xbd\x4e\x0c\xb8\xcf\xfc\xb5\x7b\xea\xec\x8d\x6a\xa6\x33\x14\xc6\x79\x09\x53\x88\xb1\xb7\xe4\x5c\xee\x4c\xae\x9e\x7d\x81\x44\xb3\x5d\xae\xcb\xed\x11\x51\xcd\x8f\x88\x6f\xfb\xc7\x98\x1f\xf7\x39\xb2\x2e\x66\xac\x3a\xc9\x95\xf8\x59\xc4\xfd\x2b\xac\xfd\x6b\x94\xcf\x65\x10\x3a\xa4\x87\xbc\x9b\x9e\x06\xec\x73\xcc\x76\x05\x1c\x93\x4a\x92\x16\x5c\x4f\x2f\x74\x35\x2f\xe6\xdd\x74\x8f\xc0\xcf\x11\xbf\x52\x3a\xd1\x53\xcb\xad\x5e\x9e\x38\x3c\xc9\xf8\xae\x9a\x7c\x8e\xde\xef\xac\xc0\xeb\x36\x38\x64\x5c\xba\x90\x2d\xe8\x28\x9b\xea\xdd\x7c\x13\xb0\x73\x44\x53\xc5\x1c\x33\xdc\xe6\xb0\x39\x9f\xbb\x26\x9d\x3a\xf6\x6e\x72\x31\xd6\x39\x6e\x87\x42\x8e\xa9\x99\xee\x63\x9c\xd1\xe6\x6a\xae\x96\x33\x29\x73\xef\x67\x9a\x82\x3c\x4b\xf8\xb4\xc8\x73\x8d\x7e\x21\xa1\xcb\x51\xe2\xe0\xcf\x60\xea\x87\xda\x05\x8e\xbb\x62\x8e\xd9\xc5\xb9\x60\xae\x64\x77\x79\x95\x2d\xf9\x6e\x92\x07\xc0\x73\x44\x4f\x8a\xbb\x69\x0c\x4b\x5d\xad\x3c\xcd\x19\x7d\x37\xdb\x03\xe0\x39\xb6\x27\xc5\x1d\xb3\xdd\xe5\x48\xb9\x92\x83\xe4\x55\xee\xec\xfb\xf9\xee\x01\xcf\xf2\x3d\x2e\xee\x98\xef\x3e\x8d\xc9\x95\xf4\x24\xaf\x92\x88\xdf\x4d\xf8\x00\x78\x8e\xf0\x49\x71\x17\xa7\x06\x49\x7a\x92\xc7\x43\xee\x91\xab\xf9\x27\xce\x24\x59\xbf\xbf\x1e\x29\xc8\xb3\x35\x39\x2d\xf2\xdc\x28\xe1\xa3\xf9\xe3\x2e\xe1\xc2\xe5\xdc\x10\xa7\xc9\xe7\x3f\x81\xfb\x55\xe2\x77\xb1\x3e\x99\xa2\x9e\x4b\xc9\x7f\x37\xff\x63\xd0\x73\x95\x38\x53\xec\xc5\x9a\xa4\xf3\x36\x5c\x9a\x6a\xee\x7f\xae\x60\xcb\x3d\xfe\x71\x83\xdb\x32\x4a\x24\xbf\x83\x70\x82\x43\x34\xd4\xc3\x2c\xb6\xd7\x29\xab\x45\x42\x0b\x3c\x84\x88\x5d\xfa\x98\xef\xc4\xa0\x94\x6f\xe7\x89\x74\x3a\x99\x43\x16\x83\xd7\x13\xcd\xe3\x5f\x60\xf8\x28\xcb\x63\x98\x88\xe4\x76\x9e\x72\x44\x31\x9d\x53\xf0\x31\x9d\x41\xf0\x31\xc9\xec\xf7\x8a\xdd\xe1\x67\x24\x3e\x4a\x6c\x8f\x10\x71\x3a\x74\x8c\x23\x5a\x97\xa7\x8a\x47\x3f\x78\x71\x0f\x85\x1d\x48\xc2\x22\x35\xc0\xdc\x48\xe4\xe8\x37\x3a\x3e\x4a\x24\x0d\x12\x11\x39\x9e\x08\xdf\xc8\xe4\xe8\x77\x45\x3e\xca\x24\x0d\x12\x31\x49\xdd\x13\x76\x3b\x8d\xa3\xdf\x42\xb9\x83\x49\x1a\x67\x4b\x66\x37\x4b\x38\x26\x93\xce\x1b\x76\x6d\xf8\xba\x8f\xd1\x29\x50\x44\xe9\x64\x6c\x7c\x53\xa3\x4b\x3f\x01\x44\x40\x67\xe6\x5a\x28\x40\x71\xb1\xff\x15\x00\x00\xff\xff\xd8\xd1\x23\x1e\x2f\x68\x00\x00") + +func operation_fee_stats_2CoreSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_2CoreSql, + "operation_fee_stats_2-core.sql", + ) +} + +func operation_fee_stats_2CoreSql() (*asset, error) { + bytes, err := operation_fee_stats_2CoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_2-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdd, 0xc7, 0xd6, 0xbc, 0xfc, 0x17, 0xfe, 0x7d, 0x1e, 0x96, 0xdb, 0x72, 0x4, 0xb8, 0xee, 0x80, 0x8a, 0x8, 0xe8, 0xcc, 0x88, 0x26, 0x39, 0x0, 0xc2, 0x52, 0xf5, 0x18, 0xb9, 0x23, 0xb1, 0xf7}} + return a, nil +} + +var _operation_fee_stats_2HorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x6b\x6f\xe2\x48\xb3\xfe\x3e\xbf\xa2\xf5\x6a\x25\x12\x4d\x32\x71\xfb\xee\xcc\x3b\x2b\x39\x60\x12\x12\x02\x09\x90\x99\x64\x56\x2b\xab\x6d\xb7\x89\x13\x63\x13\xdb\x04\xc8\xd1\xf9\xef\x47\xf8\x02\xb6\xf1\x15\x98\xd9\xf3\x61\xa3\xd5\x6a\x70\x57\x57\x3d\x55\x5d\xd5\xd5\x37\xb7\x4f\x4f\x3f\x9d\x9e\x82\x3b\xdb\xf5\xc6\x0e\x1e\xde\x77\x81\x86\x3c\xa4\x20\x17\x03\x6d\x36\x99\x7e\x3a\x3d\xfd\xb4\x2a\x6f\xcd\x26\x53\xac\x01\xdd\xb1\x27\x1b\x82\x77\xec\xb8\x86\x6d\x01\xe1\x0b\xfb\x05\xc6\xa8\x94\x25\x98\x8e\xe5\x55\xf5\x14\xc9\xa7\xa1\x34\x02\xae\x87\x3c\x3c\xc1\x96\x27\x7b\xc6\x04\xdb\x33\x0f\x7c\x03\xc4\x57\xbf\xc8\xb4\xd5\xd7\xed\xa7\xaa\x69\xac\xa8\xb1\xa5\xda\x9a\x61\x8d\xc1\x37\xd0\x78\x18\xb5\xf9\xc6\xd7\x88\x9d\xa5\x21\x47\x93\x55\xdb\xd2\x6d\x67\x62\x58\x63\xd9\xf5\x1c\xc3\x1a\xbb\xe0\x1b\xb0\xad\x90\xc7\x33\x56\x5f\x65\x7d\x66\xa9\x9e\x61\x5b\xb2\x62\x6b\x06\x5e\x95\xeb\xc8\x74\x71\x42\xcc\xc4\xb0\xe4\x09\x76\x5d\x34\xf6\x09\xe6\xc8\xb1\x0c\x6b\xfc\x35\xc4\x8e\x91\xa3\x3e\xcb\x53\xe4\x3d\x83\x6f\x60\x3a\x53\x4c\x43\x3d\x59\x29\xab\x22\x0f\x99\xf6\x8a\x4c\xec\x8e\xa4\x01\x18\x89\x17\x5d\x09\x74\xda\x40\x7a\xec\x0c\x47\x43\xd0\xef\x75\x9f\x42\xfa\x2f\xcf\x86\xeb\xd9\xce\x52\xf6\x1c\xa4\x61\x17\xb4\x06\xfd\x3b\xd0\xec\xf7\x86\xa3\x81\xd8\xe9\x8d\x62\x95\x92\x84\xb2\x6a\xcf\x2c\x0f\x3b\x32\x72\x5d\xec\xc9\x86\x26\xeb\xaf\x78\xf9\xf5\x77\x08\x54\xfd\x7f\xfd\x0e\x91\x2b\xbf\xfa\x7d\x0a\x06\xd2\xea\x6b\x17\x00\x5c\x39\x72\x91\xb0\x18\xd5\x86\xb9\x4f\xde\xe9\xb5\xa4\xc7\x18\x65\xc8\xd6\x47\x25\x63\x5d\xc7\xaa\xe7\xca\xca\x52\xb6\x1d\x0d\x3b\xb2\x62\xdb\xaf\xc5\x15\x0d\x4b\xc3\x0b\x39\xa6\x9c\xe5\x22\xdf\xd1\x5d\xd9\xb6\x64\x43\xab\x53\xdb\x9e\x62\x07\xad\xeb\x7a\xcb\x29\xde\xa3\xf6\x06\xc9\x5e\x28\xea\xd5\x35\xb1\x36\xc6\x8e\x5f\xd1\xc5\x6f\x33\x6c\xa9\xb5\x54\x88\x55\x9f\x3a\xf8\xdd\xb0\x67\x6e\xf8\x4c\x7e\x46\xee\xf3\x8e\xac\xf6\xe7\x60\x4c\xa6\xb6\xb3\x0a\xc7\xb0\x4f\xdd\x95\xcd\xae\xb6\x54\x4d\xdb\xc5\x9a\x8c\xbc\x3a\xf5\x23\x67\xde\xc1\x95\xc2\xb8\xdc\x01\x74\xbc\x26\xd2\x34\x07\xbb\x6e\x71\xf5\x67\xcf\xd1\xfc\xbc\x23\x9b\xb6\xfd\x3a\x9b\x56\xa0\x9e\x96\x41\x0a\xa8\x90\xe1\xd4\x64\x1c\x75\xba\x95\x2b\xac\xfa\x09\x5d\xc7\x4e\x35\xd2\x88\xfd\x0e\x55\x42\xb3\x56\xab\xe4\x77\xad\x35\x84\xc4\xbb\xe2\xb2\x1a\xd3\x55\x85\x67\xaf\xb4\x05\xdc\x44\x07\xa4\x2c\x4b\xdd\xe8\x79\x1d\xe9\x55\x88\xed\x00\x87\x5d\x4a\x68\xb8\x9e\xec\x2d\xe4\x69\x39\xcb\x15\xa5\x3d\xad\x4a\x89\xab\x92\x45\xa9\xa4\x98\x58\x89\xc2\xbd\x94\xac\xbc\x17\x53\x96\xd5\x1a\x33\xc8\x91\x2b\x6b\xbb\xee\xac\x4c\xf2\x9a\x58\xb5\x35\x5c\x73\x5c\xb0\x76\x83\x29\x72\x3c\x43\x35\xa6\xc8\x2a\x4c\xde\x65\x55\xe5\x69\xcd\xb1\xc9\x3a\xa3\xd5\x45\x90\x5d\xb1\xb6\x7c\xdf\x78\x55\xe4\x05\x84\xbf\x9c\x7f\xd0\x98\xab\x96\x0c\xff\xb9\xca\x0f\xd1\xd0\xcf\x77\x06\xb9\x22\x82\xb1\xed\x4c\xe5\x89\x31\x0e\x07\x0c\x05\x10\x52\x94\x95\x75\xac\x3f\xde\x2b\xe2\x5c\xd5\x39\x83\xda\xcd\x7e\xf7\xe1\xb6\x07\x0c\x2d\x90\xdc\x92\xda\xe2\x43\x77\x54\x91\x77\x8e\xd3\x1d\x80\x73\xd8\xdc\xc5\x9c\xfc\x5f\xd5\xd5\x8f\xb2\xf4\x50\xba\x7f\x90\x7a\xcd\x1d\x6c\xb6\x1a\x67\xbb\xf8\xad\xb6\xe4\x04\x93\xca\xb5\x35\x5c\x91\x76\x33\x9a\xad\xac\x61\x4e\xd4\xd7\xd1\x2f\x9b\x45\xb5\xba\xe1\xb8\xaf\x1a\x71\x38\xc8\xab\xac\x5b\xd8\x03\xd4\xd1\x25\xa8\x52\x91\x36\x1c\xfe\x55\xc7\x13\x8d\x17\xab\x20\x4a\xf5\x21\xc5\xc4\xb1\x2e\x21\x24\x14\x2f\x2f\x07\xd2\xa5\x38\xca\x20\x9e\x18\xab\x19\x87\xa1\xe2\x23\x6b\x36\xc1\x8e\xa1\xfe\xf5\xf7\x71\x85\x5a\x68\xb1\x43\x2d\x13\xb9\xde\x11\xb2\x96\xd8\xf4\x97\x62\x2a\xd4\xd0\x0d\x27\xb3\x4a\xfb\xa1\xd7\x1c\x75\xfa\xbd\x02\x7d\x64\x34\x1e\x6f\xd0\x9d\x80\x2d\xa0\x05\x3c\x22\xed\xf6\xe0\xb1\xd2\xd5\xaf\xbe\x01\x7f\x02\xea\x28\xe2\xab\x5e\x81\x83\xf4\x38\x92\x7a\xc3\x14\x0b\x73\x3a\x76\xdf\xcc\xc8\x17\x9b\x57\xd2\xad\xb8\x25\xe1\xeb\xa7\x60\x15\xae\x87\x26\xf8\x3c\x7a\x06\x46\xcb\x29\x3e\x0f\xab\x7c\x05\x43\xf5\x19\x4f\xd0\x39\x38\xfd\x0a\xfa\x73\x0b\x3b\xe7\xe0\xd4\x5f\x9c\x6b\x0e\xa4\x55\x7b\x85\x9c\x23\x7e\x9f\x12\x1c\x93\x85\x21\xe3\x66\xff\xf6\x56\xea\x8d\x0a\x38\x07\x04\xa0\xdf\x4b\x32\x00\x9d\x21\x68\x44\xcb\x6e\xd1\x33\xd7\x67\xd2\x48\x4b\x8e\xd4\x0f\x65\xae\x2d\x54\xaa\x4f\xc2\x96\xbd\xfe\x28\x65\x4f\xf0\xa3\x33\xba\x5a\xc3\x8a\xaf\xbf\x25\xc4\x6f\xb8\xa4\x80\xd4\x51\x7e\x8b\x89\x6f\x80\xbb\xee\xd9\x74\x3c\xbc\xef\x82\xa9\x63\xab\x58\x9b\x39\xc8\x04\x26\xb2\xc6\x33\x34\xc6\xbe\x19\x2a\xae\x17\xc6\xe1\x96\x3b\x5a\x08\x3f\xf2\xd5\x0d\xfe\xa8\x6d\xb3\x6c\xb9\xf6\xec\x52\xfe\x60\x20\x8d\x1e\x06\xbd\x61\xec\xd9\x27\x00\x00\xe8\x8a\xbd\xcb\x07\xf1\x52\x02\xbe\xf6\xb7\xb7\x0f\x41\x7f\x37\x1c\x0d\x3a\xcd\x91\x4f\x21\x0e\xc1\x1f\xf2\x1f\x60\x28\x75\xa5\xe6\x08\xfc\x01\x57\xbf\xd2\xad\x51\x1a\x88\xfb\x69\x57\xc6\xfe\x60\xca\x91\x59\xca\x55\xe9\xa9\xf6\xd3\xaf\x82\x84\xb5\x8a\xeb\x47\x3b\x69\x78\xf4\x09\x80\xa6\x38\x94\xc0\x8f\x2b\xa9\x07\xfe\x80\x7f\xc1\xbf\xcf\xfe\x80\x7f\x91\x7f\xff\xf9\x07\xe9\xff\x9b\xfc\x8b\xfc\x1b\x8c\x82\x42\x20\x75\x87\xd2\xca\x28\x52\xaf\x75\x9c\x69\x99\x0a\x79\x60\x4f\xcb\x94\x4b\xf8\xd5\x96\xf9\xef\x2e\x96\xd9\xce\xa9\xa1\x1d\xd6\x79\xb8\x9a\x21\x36\x69\x7b\x8b\xa3\x8f\x18\x80\xe1\xca\x56\xe0\xdb\xa6\x07\x38\x09\x1e\x8f\x9e\xee\x24\xf0\x2d\x1e\x11\xc7\x59\x51\x7b\x50\x8c\x69\x86\x29\x88\x51\x18\x57\x47\x98\x39\x04\xda\x17\x65\x16\xd3\x14\xd2\x44\x40\x26\xe1\x6e\xbc\x6c\x1b\x6d\xd6\x30\x6f\x6f\xb4\x19\x4c\xd3\x68\xe3\x41\x52\x88\x76\x95\xb9\x34\xac\xa3\x99\xe9\xc9\x1e\x52\x4c\xec\x4e\x91\x8a\xc1\x37\xd0\x68\x7c\x4d\x96\xce\x0d\xef\x59\xb6\x0d\x2d\xb6\x95\x96\xd0\x35\x3e\xfe\x0d\x55\xf4\x03\xac\x9a\x7a\x41\x2c\xc6\x27\xdf\x81\x46\x86\x06\x14\x63\x6c\x58\x9e\x3f\x30\xe8\x3d\x74\xbb\x81\x3a\x68\xb2\x1a\xc6\x03\xf5\x19\x39\x48\xf5\xb0\x03\xde\x91\xb3\x34\xac\x71\x8a\xcc\x9a\x4d\xd6\x43\x7e\x60\x58\x1e\x1e\x63\x27\x45\xa2\x9b\x68\xec\x02\x77\x82\x4c\x73\x5b\x8c\x67\x4f\xcc\x6d\x21\x47\x24\xc3\x1c\xaf\x29\xb7\x9b\x3d\x3d\x6f\xd8\xd5\x1c\xe9\xd5\x8e\xb5\x49\x3c\xbc\xd8\x32\xc8\x74\x6a\x1a\xfe\x9a\x3d\xf0\x8c\x09\x76\x3d\x34\x99\x82\x55\x9b\xf9\x3f\xc1\x87\x6d\xe1\x6d\xa0\x79\xb3\xa2\x68\x3c\x1a\x4e\xa7\xaa\x61\x5e\x4f\xbe\x72\xb8\x86\x6e\x28\x0e\x46\xc1\x88\x0e\xfa\x0f\x3a\xbd\xe6\x40\xf2\x87\x5f\x17\x4f\xe1\xa3\x5e\x1f\xdc\x76\x7a\xdf\xc5\xee\x83\xb4\xfe\x2d\x3e\x6e\x7e\x37\xc5\xe6\x95\x04\x60\x99\x32\x3b\x9b\x3d\xcd\x68\xcb\x15\xc3\x45\x0f\x60\xe1\x85\xf7\x8e\xcc\xa3\x46\x8e\xc6\x8d\xf3\x73\x07\x8f\x55\x13\xb9\xee\x71\xba\xb9\x82\xbd\x8a\x0c\xdf\x62\xe9\xe3\x82\x86\x0a\xe6\xc6\x7b\x6b\x16\xac\xe8\xac\xf5\xca\x8e\x8c\xcd\x5a\x5d\x36\xcc\x4c\x72\xd5\xd6\xb2\xc8\x21\x99\x4d\x1e\x2c\xff\x65\x54\x60\xd8\xa2\x08\xcb\x5e\x5e\x38\x90\xdb\xc6\x79\xfe\x36\xa7\x2d\x52\x04\xf4\x7f\xf4\xa4\x16\xb8\x78\x2a\xd1\x28\x58\xa1\x2b\x56\x68\xcd\x2b\x55\xfc\xc5\xd0\xf2\xb0\x45\x6b\x3e\xfb\x7a\x5d\xc8\x27\x74\xbb\x54\xcc\xc8\x79\x3d\xfd\xf6\x12\x57\x1e\xe5\x7f\xfc\x8d\x8f\xff\xe4\x78\xb3\xef\xc7\xd9\x45\x1a\xf6\x90\x61\xba\xe0\xc5\xb5\x2d\x25\xdf\xd9\xa2\x85\xb2\x7d\xed\x10\xf2\x09\xed\x10\xed\x5b\xe7\x60\x8b\x6d\x26\x57\x8a\xc2\xac\x7d\xec\xec\x8a\xa1\x59\x62\x2b\xa3\x7e\x43\xac\x71\x44\xbd\x1c\x91\x92\xb0\x69\x88\x6a\xf4\xeb\xcd\xe4\x54\x62\xb2\x67\xde\x26\x37\xa5\xeb\x38\x18\x79\xa5\x95\x02\xda\xd9\x54\xab\x4c\xbb\x76\x9d\xf0\x67\x6a\x9f\x7d\x4b\x17\xb8\x35\x1e\xf0\x90\x29\xab\xb6\x61\xb9\xd9\x3e\xa8\x63\x2c\x4f\x6d\xdb\xcc\x2e\xf5\x77\x3e\x75\x9c\xd7\xd6\x7e\xb1\x83\x5d\xec\xbc\xe7\x91\xac\xc6\xa1\xde\x42\xf6\x87\x49\xc6\x47\x1e\xd5\xd4\xb1\x3d\x5b\xb5\xcd\x5c\xbd\xd2\x6d\x14\x39\x0b\x46\x1a\x76\xfc\xe1\x45\xf0\xdc\x9d\xa9\x2a\x76\x5d\x7d\x66\xca\xb9\x8e\x12\x2a\x8e\x0c\x13\x6b\xf9\x54\xf9\x61\x95\xb3\x76\xbd\x6f\x94\xe5\xec\x87\x94\xe4\xbc\xea\xbd\x4d\x79\xff\x55\x57\xe5\xc3\xa6\xb1\x42\x19\xbf\x2b\xad\xd5\x52\x74\xcf\x34\x57\x28\x6b\x3b\xed\x65\x93\x17\xa4\xc1\xd8\xce\xce\xc1\x7c\xb3\x6c\x9a\x93\x3c\x55\x95\x33\x15\x5a\x8d\xfc\xd5\x40\x15\x3f\x03\xee\x99\x00\xc3\xc8\xb7\x67\x8e\xba\x3e\xa6\x91\x93\x7a\xa2\xee\xa4\xd1\x38\x3f\xcf\x9f\x8a\xe5\xc7\x41\xb8\xb1\xb6\xaf\x39\xc3\xb3\x80\x47\x07\x1d\x2f\x84\x5d\xe2\x2e\xd9\xcb\x3f\x0b\x93\x2b\x36\x75\x12\xb1\x88\x28\x3c\x1c\x59\x44\x12\xcc\x83\x33\x09\xb6\xcf\x74\x96\xd0\x15\x8a\x5b\x53\x15\x48\xf4\x21\x19\xae\xec\x62\xd3\xc4\x0e\x50\x6c\xdb\xc4\xc8\x8a\x72\x92\xa1\x62\xd9\x4a\xe4\xdf\xe0\x59\x32\x27\x6f\x4e\x13\xc9\xa9\x6c\x9d\x38\xcf\x94\x2e\x8c\x6d\xd3\x67\x9e\xfc\xf4\x51\xcb\xfe\xd9\x60\xd0\xbc\x92\x9a\x37\xe0\xe8\x28\x6e\xc1\x3f\x01\x71\x7c\x5c\xc6\x2a\xab\x7a\x64\xb4\xff\x6e\xd9\xb1\x02\xbf\x84\x4d\x53\xec\x53\x06\xf7\x01\x16\x86\x52\xf6\x0e\xf7\x01\x82\x2b\xfb\xcc\x42\xc5\x4c\x5a\xa5\x0b\xdb\x27\x97\x96\x9d\x0f\x38\x4c\x36\x2d\x91\xf2\xbb\xf2\x69\x4d\x65\xf7\xcc\xa8\x25\xd2\xb6\x73\x6a\x5e\x85\x82\xac\x9a\x38\x13\x72\x40\x5f\x8d\xfc\x33\x0e\xa9\xf2\x24\x2a\xec\xfb\x4b\xa6\x66\x55\x13\x6f\x71\x0e\xcd\xa4\xdd\x88\xce\x8c\x97\xd5\x2c\x20\x7f\x1a\x91\x37\x41\xfb\x47\xa6\x58\xde\x42\xc6\xd6\x3b\x36\xed\x29\xce\x5a\xb6\xf4\x16\xab\x09\xcf\xcc\xf4\x72\x0a\x27\xd8\x43\x39\x45\xab\xa9\x56\x5e\xb1\x6b\x8c\x2d\xe4\xcd\x1c\x9c\xb5\xc2\x26\xb0\xc7\x7f\xfd\xbd\x19\xbb\xfc\xcf\xff\x66\x8d\x5e\xfe\xfa\x3b\x6d\x73\x3c\xb1\x73\x16\xc3\x36\xbc\x2c\xdb\xc2\x85\x63\xa1\x0d\xaf\x6d\x36\xa1\x66\xc6\x04\xcb\x8a\x3d\xb3\x34\x7f\xc5\x9a\x77\x90\x35\xc6\xe9\xd9\x58\x32\xb5\xae\x2c\xb1\xe2\x36\xc6\x5a\x72\x52\x66\xe1\xb9\x9c\xf2\x95\xb2\xb5\x34\x60\x68\x51\x18\x46\x67\xbb\xaa\xf4\x1d\x41\x1c\xfa\x07\xe9\x4a\x8e\x8d\x0d\xa5\x51\xc1\x02\x6a\x7c\xa9\x2a\xbe\x7c\x5a\x6f\x82\x71\x38\x25\x2a\x9e\xaa\x2b\x54\xaa\x70\x62\x52\x45\xc9\xdc\x14\x7c\x30\x35\x2b\x1f\x4c\x2c\x54\xb4\x24\x5f\x64\xab\xda\x42\x1e\x02\xba\xed\x94\x6c\x23\x81\x96\x38\x12\x4b\xd4\xcb\x61\x59\xb4\x1d\x53\x85\x6d\xa7\x37\x94\x06\x23\xd0\xe9\x8d\xfa\x5b\x5b\x32\x7e\xe6\x1e\x82\xa3\x06\x94\x0d\xcb\xf0\x0c\x64\xca\xc1\xf1\x98\x2f\xee\x9b\xd9\x38\x01\x0d\x92\x80\xc2\x29\xc1\x9e\x12\x14\x80\xfc\x39\xc9\x9f\xd3\xdc\x17\x82\x22\x69\x81\xfd\x4c\x90\x8d\xe3\xaf\xd5\xb8\x93\x72\xf0\xda\x43\xc2\xaa\xca\x52\xf6\x6c\x43\x2b\x96\x24\xb0\x0c\x57\x47\x12\x25\xcf\x5c\xbc\x4e\x3f\xb2\x61\x6d\xbd\x6a\x51\x28\x8f\xa6\x09\x9a\xaf\x23\x8f\x96\x91\xa6\xc9\xe9\x05\xab\x42\x19\x0c\xcd\x50\x64\x1d\x19\x8c\x1c\x24\xbb\x68\xd8\xed\x6f\x74\x16\x8a\x60\x29\x82\xac\xa5\x06\x1b\x89\x08\x7b\xb0\x0a\x22\x78\x1a\x32\x75\x44\x70\xf2\xc4\xd6\x0c\x7d\x59\x5d\x0b\x1e\xb2\x64\x2d\x11\x7c\x42\x8b\xf0\x7c\x73\x05\x39\x1c\xcd\x52\xf5\xe4\xac\x1a\x1d\x8d\xc7\x0e\x1e\x23\xcf\x76\x8a\x7d\x4a\x20\x20\x21\xd4\x61\x2f\xf8\xec\x83\xc5\x4c\x79\xa1\x39\xc5\xdc\x49\x0e\xd6\x6a\x6a\x48\xf8\xec\xc3\x56\xf0\xa7\xb0\xc5\x02\x18\x81\xab\x65\x1d\x08\xe3\x02\xd6\x73\xa2\x55\x07\x50\x2c\x48\x60\x85\x7a\x9a\x90\x89\x86\x0e\x67\xa1\xc1\x1b\xb5\x45\x92\x20\xc1\x31\x74\xad\x16\x81\x54\xa0\xce\x7a\xee\x5e\xd8\xe2\x10\x92\x1c\x5b\x4f\x13\x5a\xd6\x8d\x45\xf4\x76\x81\x3d\x31\x65\xdd\xc0\x66\x61\xd7\x08\x21\x03\x61\xad\x4e\x18\x32\xd1\xa6\x4a\xb4\xd8\xbd\x28\x51\x83\xe5\xea\x75\xf3\x90\x95\x0d\x6b\x8c\x5d\x4f\xde\x5e\x4e\x2f\x11\xc5\x09\x7c\xbd\x16\xe1\x12\xe9\xda\xdf\xb7\x40\xc5\xc9\x04\x92\x04\x41\xd1\xa1\x90\x9c\x5c\x5b\xb8\x09\x5f\x27\x87\xd7\x3a\xa0\xb0\x1a\x96\x94\xf0\x0d\x0f\x75\x6d\xce\x63\x7e\x71\x71\xf1\xe6\xfd\x09\x80\x27\xc1\x49\x97\x0a\xea\x6e\xef\xcb\xef\xa1\x6c\xe1\x5e\xf0\x41\x54\x4d\x0c\xb3\xeb\x28\x9a\xb5\x17\xbc\xc7\xd0\xac\x68\x6b\xb5\xee\xd0\x2c\xbd\xbd\x1a\xb9\x3a\x77\x02\x1a\x50\x67\x09\x85\x57\x59\x4c\x28\x1a\xa7\xd0\x3a\xc7\x2a\x0c\x41\x60\x8d\xa0\x35\x42\xc5\x02\xa5\xf3\x3a\x05\x79\x86\x42\xbc\x4a\x2a\x9a\xae\xd1\xac\x8e\xb0\xca\x2b\x0a\x52\x05\x44\x22\xb8\x0a\x09\x44\x40\x55\xc1\x18\x61\x05\xf2\xaa\x82\x35\x02\x63\x01\x52\x08\x52\x0a\x5a\x31\x24\x58\x96\xd0\x09\xc4\x32\x34\x54\x30\xaf\x43\x81\x20\x21\x0f\x59\x96\x80\xba\xc0\x22\x4e\xe3\x1b\x27\x80\xf0\xff\x4b\x04\x17\x7b\x4e\x71\xe7\x90\x4c\xc7\x5c\xf0\x98\xf8\xc2\x70\x34\x64\xe8\xd2\x52\x8a\x20\x58\x9a\xe3\x20\xc1\x91\x27\x00\xb2\x27\x00\x12\x5b\x7f\xbe\x6c\x48\x10\xb1\xc2\xf5\x3f\x4f\x00\x84\x27\xa0\x21\x8a\xa2\xd8\x64\x45\xf5\x7d\xe6\x38\x97\xb7\xef\x5e\x6b\xf6\x3a\xb2\x47\xb3\x57\x81\xbc\x98\xb4\xe6\x53\xf3\x5e\x61\xef\x2e\xef\x3b\x4f\xed\x89\xa8\x9b\x6f\xfa\xd3\x05\xd1\xe9\x1b\x96\xf5\x32\x7f\x15\x4c\x48\x89\xcb\x1f\xcd\xef\x44\x67\xa4\x5f\x1a\xd7\xf3\xef\xef\x53\x5b\xb1\xae\x17\x8b\x16\x81\xc6\x73\xd6\x5e\xb1\x16\x1f\xef\xbe\xdf\xce\xc4\xcd\x1f\xf5\x21\x3c\x4f\xc4\xe1\xb4\x2b\x78\xe2\xf7\xc5\xab\xb7\x68\x51\x8f\xc3\xfe\x94\x32\xbc\xc5\xf0\x5d\x9a\xdc\xb2\xe2\xc3\xeb\xfc\x62\x48\x4b\x03\xf3\x4d\xe9\x77\xe7\x57\x6f\xfa\xb5\x37\xd0\xc7\x67\x2f\x3d\x7e\x2e\x08\xf3\xee\xa8\xf5\x21\xbd\x2f\x3e\x0c\xfc\xe1\x58\xea\x50\x9b\x61\x8d\x7e\xf4\x39\xab\x3d\xba\x8b\x3e\xa6\xe4\xbd\x58\xf6\xf7\x53\x7c\x84\xf4\xbd\x28\xb6\x88\xeb\x52\xda\xff\x6f\x7f\xa1\x4b\xa5\xfa\xfc\xbc\x40\x60\x0f\xe3\xc4\x0d\x52\x27\x14\xc4\xd2\x04\xc1\xf0\x04\xc1\x23\xc8\x68\xbc\xae\x20\x9a\xa6\x58\x9d\x56\x49\x5a\xe3\x04\x81\x12\x04\x5a\x17\x74\x8e\x42\x34\xc9\xaa\x1c\x26\x29\x9e\x13\x54\x44\x41\x24\x50\x7a\x51\x20\xc0\x5c\x57\xe7\x49\x58\xa1\x94\x64\x38\x56\xe0\x09\x8a\xe3\xd8\xbd\x03\x61\xc9\x77\xa7\xaf\x62\x7b\x2c\x1a\xcf\x8f\x4f\x9f\xd9\x41\x4b\x21\xe6\xaf\x90\x79\x65\xcc\x3b\x8b\xea\xbf\x5e\xbb\xba\xd1\xbf\xb2\x6e\x6e\xd1\xeb\xd9\x43\x4f\x7c\x76\x3e\x5e\xcc\x56\x0b\x2b\xdd\x1b\xaf\xf9\xf8\xf4\x13\xb7\xdc\x1b\xbb\xe5\xe8\xd6\x8f\xcf\xe2\xe4\x45\xd5\xb1\x2d\xb4\x9e\x0c\xec\xfa\x0d\xb7\x0a\x04\x6f\xbe\x69\xc8\x5f\x17\x08\x4f\xff\x06\xc2\x56\x20\x30\x87\x71\xe2\x06\xc4\x8c\xa6\x69\x34\xa2\x21\x85\x74\x92\xe5\xa0\x00\x79\x8d\xe5\x54\x45\x85\x3c\x87\xa0\xaa\xa9\x2c\x41\x09\x88\xa0\x08\x8e\xa6\x08\x4d\x80\x8c\x8a\x38\x9a\x61\x55\xc8\xf2\x02\x89\xe9\xa2\x40\x20\xf2\x5d\x9d\xe3\x28\xb6\xb4\x94\x84\x34\x47\xf3\x14\x4b\xf3\xc4\xde\x81\xb0\x60\x34\xea\x61\x7a\xd1\x7f\xbf\xb6\x2e\x07\x2f\x3f\xf4\xee\xfc\x09\x3f\x7f\xa8\x4f\xad\xc9\xf8\xf6\xea\x7e\x4e\x0e\x1e\x6f\xb4\xb6\x72\x81\xae\xbb\xaf\xec\xbd\xd0\x17\xde\x6c\x8b\x7d\xbc\xb3\x7b\x2d\xb6\x7b\xc5\xb7\xad\xe1\xb2\x47\xd2\xcc\x80\xec\xaa\x1f\xaa\x3a\x5a\x48\xbc\x47\xf3\x67\xa3\xa0\xe1\xfc\x40\x18\x6f\x1a\xb2\x7a\x20\x58\x94\xc5\x36\xcf\x46\x73\xf3\x62\x4a\x93\xa3\xd9\x67\xc6\xb2\x96\x17\xde\xf3\x99\xdb\xb3\xc7\x46\x5b\xb7\x3f\xbb\x4a\xff\xe9\x1a\x49\x83\xcb\xb6\xcf\xf9\xe1\xdf\x40\xd8\x0a\x04\xfa\x30\x4e\xdc\xa0\x54\x0d\x61\x0e\x2a\x18\x12\x1c\xcb\x73\x34\x56\x19\x44\x09\x0c\x83\x28\x9d\xe4\x54\x06\xf1\x0c\x49\x63\x8d\x81\x34\xe2\x58\x95\x63\x39\x92\x26\x19\x5a\x15\x90\x80\x20\xab\x61\x85\x2d\x08\x04\x42\xc8\x75\x75\x81\x21\x09\xae\xb4\x14\x72\xab\xb9\x11\x2b\x40\x9e\xde\x3b\x10\x3e\x3e\x10\xb3\x78\x7f\xbe\x20\x9f\x09\xae\x6d\x33\xdf\xd1\xdd\x52\x6f\xbf\xb5\xaf\xfb\x70\x30\xb4\x94\xab\x9f\xcb\x7b\x73\x74\xfd\x86\xda\x1e\x4b\x3e\x5d\xc3\x3b\x5d\x77\xef\x24\xfa\x62\xac\x90\xb7\x97\xf7\xde\x77\xda\xa2\x07\x70\xf0\x8c\xee\x4c\xc9\xea\xb9\xef\xa4\xd7\x6f\xbf\xb5\xdd\x56\xe0\xfd\x7e\x20\xc4\x7c\xf3\xd7\x05\xc2\xfd\xbf\x81\xb0\x15\x08\xd4\x61\x9c\xb8\xa1\x6b\x9a\xc6\x6a\x04\xab\x71\x34\x26\x34\x86\xe5\x38\xac\x10\x24\xa1\x10\xba\xce\x53\x2a\x45\x42\x04\x49\x0c\xb1\xca\xab\x34\xcd\x91\x24\xa3\xa9\x2a\x5e\xc5\x97\xc2\xe9\x24\x22\x09\xa5\x28\x10\xf8\x3c\x57\x67\x09\x48\xe4\xcf\x20\xfc\x52\x6a\x15\x08\x24\xcf\xd3\x02\x01\x79\x9e\xdf\x3b\x10\xce\x28\x4d\xb9\x57\x1e\x47\x63\xf8\x43\xfb\xec\x36\x9b\xf3\x33\xfa\xae\xd5\x41\xd2\xec\xca\x7d\x91\x06\xcb\x6b\xed\xa3\xbf\x18\xf7\x29\xfe\xad\xd3\xed\xf7\xee\xd9\xf9\x4c\x6c\x5e\xb1\x0b\xfd\xf9\x65\xf6\x28\xcc\x1d\x7c\x7d\x3d\x5c\x3e\x7a\x6e\xdb\x81\x92\xd2\xa6\x66\xe2\xf5\xac\xe5\xdd\xfc\x0c\x1a\xce\x0f\x84\x58\x43\xd6\x08\x84\xf7\xcf\x5d\xcf\x3b\x5b\xaa\x1f\x67\x0b\xfe\xec\xb3\xd9\xbe\xe0\x09\xc7\xfa\xbe\xe0\x5e\xc8\x5b\x67\x42\x0a\xd2\x74\xe6\x3e\x0c\x3e\x8f\xdf\x85\x40\xd4\xed\xbf\x81\xb0\x15\x08\xe4\x61\x9c\xb8\xc1\x52\x9a\xc0\xeb\x0c\xc5\x62\xcc\xf2\x1a\x54\x48\x4e\x61\x14\x5e\xd0\x49\x0a\xe9\x0c\x05\xa1\xc2\x31\xac\x80\x48\x5a\x47\x3a\xa4\x09\x0a\x69\x84\xc2\x90\x0a\x4b\x51\x0a\xc1\x29\x58\x10\x8a\x02\x21\xb7\xcf\x67\x21\x64\x60\x79\x29\xcf\xf0\x82\x40\xd1\x8c\xb0\xff\x5c\x99\xbc\xfb\xf9\x02\x7b\x33\xc6\x26\x94\x6b\xee\x07\x6d\x2d\xfb\xef\x0f\x8b\x4b\xea\xfb\xd4\x7e\xfd\xfc\xde\x16\xfb\x5e\x13\xde\x90\xb7\xdc\x05\xc7\xfe\x9c\xfd\x1c\x5c\x39\xc2\x83\xf6\x78\xa3\x8c\x6e\x0c\xd5\xd4\xfb\x2f\x4b\x8e\x7c\xfa\xd9\xbe\x7e\x78\xbd\xbe\xfb\xae\xde\x8c\x98\x67\xef\xdd\x76\x26\x30\xf0\xc7\x55\x1c\xb8\xfe\x14\xa1\xb3\xfe\x9f\xe8\x17\xb9\x9b\xdf\x73\xf1\xee\xfe\x35\x68\xe7\x5f\x17\x27\x9d\x7f\xe3\x64\x2b\x4e\xe0\x61\x7c\xdc\x3f\x12\x10\x79\x3a\x14\x38\xe2\x94\x80\xa7\x04\x04\x04\x71\xee\xff\x97\xef\xcb\x02\xcd\x50\xa5\xa5\x34\x29\xd0\x02\xcb\x91\x42\xfd\xc9\x70\x00\xe9\x9f\x6e\x94\xfc\xbf\x8b\xc7\x1b\x83\x5e\x9e\x2d\x87\x37\x17\x5c\xcb\x6a\x09\x57\x24\xb1\x78\xb9\xf8\xec\x12\x63\xcf\x9d\x77\xe6\x1f\xf0\x51\x1b\xfe\x78\x42\x17\xd7\xa8\xed\x0f\xb0\xa4\xda\x4e\x2c\x8a\x17\xaf\xbf\x41\x91\x83\xfe\x6d\x9c\xb8\x64\xc1\xb5\xc2\xa1\xfb\xdd\x17\xb0\xeb\x9d\xfa\x3e\xc4\x82\x76\xf1\x11\x8b\x3a\x0b\xdc\x39\xa7\xbc\x0f\xb0\xc6\x9d\x71\xd8\xf9\x30\x5c\xcb\xcf\x7d\xee\xde\x94\x75\x0f\x1c\x1e\xa2\x31\xcb\x8e\x91\xd4\x69\xce\xdc\xe3\x85\xf5\x4d\x12\x7f\x39\x79\xeb\xfe\xaf\xf5\x5d\x23\xd1\x51\xdf\xba\x27\x71\x62\x1c\x83\xbb\x08\x5a\xad\xf8\xc1\xe1\xb4\x40\x70\x37\xe8\xdc\x8a\x83\x27\x70\x23\x3d\x81\x23\x43\x2b\x7b\x07\x39\xfb\x3e\xb4\xbd\x51\xa7\xb8\x66\x21\xcf\x12\x5c\x8a\x3e\x75\x86\x6c\xb7\xfb\xe4\xf6\xd6\x2e\x29\x36\x4b\xb9\x9d\x80\x81\x87\x5e\xe7\xfe\x41\x02\x47\x1b\xf2\x93\xd8\xcb\xb6\x27\x89\x57\x63\x6b\x9a\xe6\x30\xcd\x5a\x5b\xf1\x5a\x8d\x9a\x73\xa6\xae\xca\x25\x88\x07\xd3\x2c\x5b\x48\x91\xa6\x05\xb0\x2a\x6b\x9e\x7b\xcc\xae\xda\x15\x94\x07\xd3\x3e\x4f\x4c\x91\xfe\x85\xd0\x4a\x2d\x90\xbc\xcf\x33\x54\xc4\xbf\xfb\xb3\xda\x39\xef\xe0\x9a\xd0\x04\x17\xd0\xef\xa5\x83\xe1\x61\xd8\xe9\x5d\x02\xc5\x73\x30\x8e\x47\x57\x3e\x9a\xf0\x2a\xd2\xbd\xf1\x84\xaf\xb1\x57\x42\x94\x13\xd7\xb1\x6b\x54\x77\x85\xb3\x61\x11\x47\x92\x38\x14\x9f\xc4\x13\x10\x9f\x6c\x9d\x3a\xcf\x02\xe7\x5f\x04\xbb\x07\x32\xff\xf0\x7d\x25\x58\xe9\x23\xfb\x59\x68\xc2\xdb\x6b\xf7\xc0\x13\x70\xa8\x86\x28\xf5\x3e\xc0\xc9\xf6\xd1\xff\xcc\x90\x8f\x5f\xc7\x5b\x1f\x69\x98\x25\x02\xc0\x29\x76\x71\xd8\xd1\x6b\xf5\x09\xc4\x59\x6f\xc1\x9d\x44\x6f\xbc\xe5\x81\xdd\x1c\x27\xde\x13\xa6\xa1\x55\x06\xb8\x79\xe5\xe7\x24\xf3\xd5\xbd\x12\xd0\xd1\x0d\xca\x87\xc0\x1d\xf2\x8a\x43\xcf\x49\x55\x3b\x69\x92\xad\x40\x74\x59\xf4\x21\x14\x08\x79\xe5\xf8\xf4\x8e\x2a\x24\xdf\xdf\xda\x56\x22\x76\x35\xf6\xae\xd1\x18\xe3\xb1\xab\xf1\x8b\x0d\x9d\xba\xeb\x7b\x5f\x5b\x27\xd9\xc5\x21\x47\xcb\x44\x09\x8c\xd9\x88\xb6\xef\x2b\xdf\x1f\xd6\x16\xcf\x6a\xdd\x5b\x16\xc0\xd8\xcd\xeb\x3b\x37\xeb\x86\xc7\xee\x2e\x59\xe6\x7e\x59\x77\xca\xef\x0e\x78\x9b\x59\x0a\xb9\x86\x53\x38\x53\x6f\xf3\x16\x03\x0c\x2e\xc9\x3f\x08\x3c\x9f\x55\x25\x70\xd1\x91\xdb\x5c\x68\xe9\x4b\xff\xf7\xc5\x97\xe2\x57\x06\x72\xfb\x35\xe5\x52\xa4\x87\xb1\x63\x82\x5b\x55\x94\xa5\xd6\x3c\x0c\xb6\x4a\x98\x8a\xb1\xa4\xbe\x2e\xb1\x17\xa2\x24\xaf\xca\x2d\x1a\xbd\x08\x9d\x89\x6f\xeb\x83\x19\x7b\x21\x4c\x73\xab\x16\xb7\x21\xc0\x93\xad\x77\xb7\x4f\xb6\xde\xff\xcf\x51\xe2\x00\xfd\x76\xc8\xa7\x0c\x71\xcd\xd1\x51\xfa\x3b\x27\x7b\x59\xb7\x86\x61\x4b\xed\x56\xfe\x01\x97\x3d\x0d\x5a\x2a\x20\x31\x4f\x8b\x2e\x79\x4b\xce\x8c\x02\xc2\x1a\xd8\xf7\xf7\x83\x22\xde\xe5\x88\x33\xa2\xac\xf8\xf3\x3c\xbb\xfa\x43\x21\xd7\xd2\x61\xff\x8a\xa8\x04\x68\xe6\x77\x88\x0e\x83\x36\x8b\x75\xe9\xf0\xad\xaa\x27\x27\x3f\xbc\x74\x50\x67\x48\xb0\xde\x65\xbc\x59\xfd\x4b\x53\x07\x37\xf4\xd6\x1d\x5b\xa5\xf0\x53\x15\xaa\x2b\x13\xff\xf0\xd6\xaf\xb2\x7f\xfc\x5a\xb5\x32\x4d\x62\xb4\xd5\x95\xc8\xfc\x10\xd9\xaf\xd2\x26\xf3\xb6\xb8\x32\xb5\xb2\x2a\x55\xd7\x6f\xfd\x9d\xb6\x5f\xa5\xd3\xfa\xea\x84\x32\x3d\x72\x57\xbb\x4a\xbe\x4f\x77\x50\xe0\x69\xee\x99\x13\xe0\xba\x01\x5e\xf8\x69\xbe\xc3\x44\x78\x91\x88\x2a\x3a\x94\xcc\xeb\x4a\x3f\x54\xf8\x4b\xb4\x48\x65\xb0\x5c\xec\xe5\x49\x2c\xe3\xc3\x8c\x07\x75\x9b\x6d\xfe\x3b\x4f\xf5\x8b\x3e\x45\xb9\xab\x95\x0b\x78\x96\x0e\x11\x8e\x8e\xa2\xeb\xc8\x4e\xff\xfc\x13\x34\x5c\xdb\xd4\x62\xbb\x69\x8d\xf3\x73\x0f\x2f\xbc\xe3\xe3\x13\x90\x4f\xa8\xda\x5a\x35\xc2\x60\x2d\x3e\x9f\x54\xb1\x67\xe3\x67\xaf\x92\xf8\x04\x69\x31\x80\x04\x69\x0a\xc2\x31\xf8\x71\x25\x0d\xa4\xc0\xc9\xc0\x37\x40\x51\x39\xbb\x17\xdb\x1b\xd1\xd1\x87\x47\xa3\x0b\xf6\x6f\x7e\xcf\x76\x74\x28\x16\xb4\xfb\x03\xa9\x73\xd9\x5b\x6f\x01\x81\x81\xd4\x96\x06\x52\xaf\x29\xa5\x3f\x08\xe6\x97\xf6\x7b\xe0\xe1\xae\xb5\x72\x99\x81\x14\xdc\xc1\xbf\x7a\xd4\x92\xba\xd2\x48\x02\x4d\x71\xd8\x14\x5b\x52\xf1\xbd\x71\xd9\x17\x7d\xa5\x3e\xf1\x7a\x08\x63\x24\xe5\x94\x6c\x92\xe5\x21\x49\xda\x27\xbd\x6c\x94\x69\xac\x70\xa0\x5f\xb2\xa3\x98\x6b\x89\xc4\xa7\x75\xff\x41\x3b\xc4\x71\x64\x59\x21\x5a\x25\x28\x76\x98\x7a\x16\xc8\xfb\x9e\xf1\x3f\x62\x86\x1c\x30\x49\x5b\x64\x2c\x83\x1d\xd6\x29\xb2\xbf\x29\xfd\xcf\x1a\x24\xdf\x35\xb6\xd6\x90\xaa\x7a\x47\xde\xe7\xcd\x81\x6a\x4f\xa6\x26\xf6\xb0\xaf\xc3\xff\x05\x00\x00\xff\xff\x0e\xea\xac\x3d\x0b\x7d\x00\x00") + +func operation_fee_stats_2HorizonSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_2HorizonSql, + "operation_fee_stats_2-horizon.sql", + ) +} + +func operation_fee_stats_2HorizonSql() (*asset, error) { + bytes, err := operation_fee_stats_2HorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_2-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xbe, 0xe0, 0x9, 0x53, 0xf, 0x1c, 0x4d, 0x3e, 0xda, 0x21, 0x19, 0x9e, 0x55, 0x4d, 0xd5, 0xfd, 0xa7, 0x6a, 0xdc, 0x97, 0xb3, 0x5c, 0x4e, 0x62, 0x1a, 0x95, 0x1c, 0x7c, 0xcb, 0x64, 0xc1, 0x3}} + return a, nil +} + +var _operation_fee_stats_3CoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x69\x93\xe2\xb8\xf2\x37\xfa\xbe\x3f\x05\x31\x6f\x7a\x26\xe8\x39\xc8\xf2\x26\xcd\xdc\x39\x11\xc6\xec\xfb\xbe\xdd\xb8\xd1\xa1\x15\x0c\xc6\x06\xdb\xac\x4f\xfc\xbf\xfb\x0d\x96\x2a\x28\x8a\xaa\xa6\x0a\xfa\xcc\xff\x89\x33\x8e\xe8\x6a\xc0\xe9\xcc\xd4\x4f\x3f\xa5\x94\x92\x6c\xff\xfe\xfb\x97\xdf\x7f\x8f\xd5\xfc\x30\x1a\x06\xa2\x59\x2f\xc5\x38\x89\x08\x25\xa1\x88\xf1\xc5\x74\xf6\xe5\xf7\xdf\xbf\xec\xce\xa7\x16\xd3\x99\xe0\x31\x19\xf8\xd3\x93\xc0\x52\x04\xa1\xe3\x7b\x31\xfc\x2f\xe3\x5f\xca\x99\x14\xdd\xc4\x66\xc3\xef\xbb\xcb\x2f\x44\xbe\x34\xd3\xad\x58\x18\x91\x48\x4c\x85\x17\x7d\x8f\x9c\xa9\xf0\x17\x51\xec\xaf\x18\xf8\x73\x7f\xca\xf5\xd9\xe4\xf5\xaf\x0e\x77\xc5\x77\xc7\xfb\x1e\x05\xc4\x0b\x09\x8b\x1c\xdf\xfb\x1e\x8a\x70\xa7\xf7\xb5\x30\x73\x9d\x9d\x6a\xe1\x31\x9f\x3b\xde\x30\xf6\x57\xec\x6b\xbb\x95\x41\x5f\xff\x7c\xb2\xed\x71\x12\xf0\xef\xcc\xf7\xa4\x1f\x4c\x1d\x6f\xf8\x3d\x8c\x02\xc7\x1b\x86\xb1\xbf\x62\xbe\x77\xd4\x31\x12\x6c\xf2\x5d\x2e\xbc\x83\x2d\xea\x73\x47\xec\xce\x4b\xe2\x86\xe2\x85\x99\xa9\xe3\x7d\x9f\x8a\x30\x24\xc3\xbd\xc0\x8a\x04\x9e\xe3\x0d\x0f\x22\x81\xbf\xfa\x1e\x0a\xb6\x08\x9c\x68\xb3\x53\x2e\xe5\x9f\x47\x00\x04\x09\xd8\xe8\xfb\x8c\x44\xa3\xd8\x5f\xb1\xd9\x82\xba\x0e\xfb\xb6\x43\x8c\x91\x88\xb8\xfe\xf0\xcf\x2f\x5f\x52\x8d\x6a\x2d\x96\xaf\xa4\xd2\xbd\x58\x3e\x13\x4b\xf7\xf2\xcd\x56\xf3\x28\xf9\xaf\xc5\x6c\x18\x10\x2e\x46\x4e\x18\xd1\x4d\x28\xe6\x7f\xbe\x2b\x1d\xb2\xd9\x7c\xe1\x07\x8b\x69\x78\x9b\xb0\xf0\x96\xb7\x48\xba\x82\x0f\x45\x70\x8b\xe4\xce\x4f\x29\xc4\x8d\x92\x37\x88\x51\x11\x46\xbe\x94\x22\x70\x3c\x2e\xd6\xef\xcb\x12\xc6\xfc\x85\x17\x51\xe2\x12\x8f\x89\xf0\xcf\x2f\x56\xa9\x95\x6e\xc4\x5a\x56\xb2\x94\x3e\x93\xae\x56\x4a\xfd\x2b\xf0\xfa\xc1\x26\xb6\xd7\x6e\x57\x2b\xcd\x56\xc3\xca\x57\x5a\x67\x17\xbd\x14\xfc\x3e\x9b\x88\xcd\x2d\xfa\xa3\xf5\x8f\x55\x3f\xcb\x7c\x40\xab\x14\x37\xf8\x7c\x2e\x76\xbb\xee\x60\x11\x46\xae\xe3\x89\xf0\x3d\xcd\xcf\x42\x37\xeb\xdd\x79\x21\xf6\xd1\xe0\x1d\xbd\x27\xa1\xdb\xf5\x3e\x53\xfe\x3d\xbd\xcf\x42\x37\xeb\x3d\xc8\x3b\x9e\xf4\xdf\xd1\x7b\x12\xba\x59\xef\x6c\x41\xc3\x05\x7d\x47\xe7\x41\xe0\x23\xfa\x5c\x27\x1c\xcd\x17\x62\xf1\x1e\xb2\xe7\x62\xb7\xeb\x16\x22\x78\x0f\xd6\xfd\xf9\x9b\xb5\xed\x9b\xf1\x7b\xea\x0e\x02\x37\xeb\x3b\x44\xa5\x91\x20\xfc\x7d\xb5\x2f\xe4\x7e\xb2\xf6\x63\xa4\x14\xf3\xef\x37\x9a\xa1\xc4\x7b\x47\x39\x25\xde\xcd\x0e\x1f\xa3\xdf\x7b\xbe\x3e\x89\x7c\x54\xe7\x6e\x0c\xf0\x63\xb5\x3b\xa9\xa3\xe6\xbd\xec\xa5\xe2\xab\x21\xf7\x7d\xd9\xe7\xd0\xf8\x23\xb1\x53\xa0\xfb\x81\xe4\x73\xe0\x7a\x5f\xee\x14\x88\x7e\x20\xf7\x1c\x58\x7e\x28\x77\x93\x7f\xa7\x80\xf2\xbe\xdc\x21\x48\xfc\x50\xe6\xb9\xc9\xff\x40\x72\xd7\x8e\xdf\x17\x39\xb4\xcd\xf7\x65\x5e\x34\x85\xf7\x45\x29\xf1\xde\x17\x78\xa2\xea\x4d\x52\x3b\xe6\x1d\x05\xd3\xbd\x56\xba\xd2\xcc\x57\x2b\xe7\xc2\xee\x6c\x18\xce\xdd\xa3\x44\xd3\xce\xa5\xcb\xd6\x2b\x5d\x7f\x7e\x39\x8c\x8d\x2b\x64\x2a\xfe\x78\xfa\x2d\xd6\xda\xcc\xc4\x1f\xc7\x4b\xfe\x8c\x35\xd9\x48\x4c\xc9\x1f\xb1\xdf\xff\x8c\x55\x57\x9e\x08\xfe\x88\xfd\xbe\x1f\x32\xdb\x8d\xb4\xd5\x4a\x3f\x69\x7e\xd2\xf7\xe5\x85\xc6\x97\x27\x8f\x8a\xed\x6a\xb9\x9c\xae\xb4\xde\xd1\x7c\x10\x88\x55\x2b\x2f\x15\xc4\xf2\xcd\xd8\xd7\xa7\xf1\xed\xd3\x6f\xe1\x5e\xc9\xd7\x4b\xcb\x4f\xc5\x3f\xda\x7c\x46\xe8\x87\xe5\x79\x81\x65\xa5\xda\xba\xc0\x33\xd6\xcd\xb7\x72\xcf\x6e\x9d\x0f\x68\x5f\x98\x3f\x69\xb9\x70\xe4\x23\x85\x7f\xa5\x64\x0f\x40\xad\x94\x98\x0d\x77\x59\xcc\x2c\xf0\x99\xe0\x8b\x80\xb8\x31\x97\x78\xc3\x05\x19\x8a\x3d\x0c\x37\x0e\xc0\x77\x62\x5c\x48\xb2\x70\xa3\xef\x11\xa1\xae\x08\x67\x84\x89\x5d\x36\xf1\xf5\xe2\xec\xca\x89\x46\xdf\x7d\x87\x9f\x25\x08\x2f\x0a\x7b\x4e\xc8\x63\x31\xf7\xd4\x3d\x15\xf2\x89\x00\xd7\x00\x3f\xb0\xfc\x3c\xe8\xfe\xfa\x25\x16\x8b\x3d\xfd\xe2\xf0\x18\x1b\x91\x80\xb0\x48\x04\xb1\x25\x09\x36\x8e\x37\xfc\x55\x37\x7e\xdb\xd7\x4d\xa5\x5d\x2a\x7d\xdb\x4b\xef\x2e\xf4\xc8\x54\x5c\x11\x46\xe8\x9a\xf0\x92\xb8\x8b\x6b\xd2\x8a\x02\x2f\xc5\x5d\x12\x46\x53\x9f\x3b\xd2\x11\x3c\xe6\x78\x91\x18\x8a\xe0\x59\xe4\xcb\x6f\x97\x75\xff\xdc\x8a\xef\xc4\x22\xfc\x14\x10\xc7\x44\x20\x46\x9d\xa1\xe3\x45\x17\x27\x43\x31\xf7\x16\xd3\xeb\xe7\xbc\xc5\x34\x5c\x50\xe1\x45\xc1\x2e\x15\xbc\x2c\xe6\x41\xc6\xf1\xa4\x4b\x76\x19\x23\x17\x61\x74\xdd\x9d\x83\xe0\xc8\x9f\x0a\xee\x4f\x89\xe3\x5d\x91\xd2\xb4\x4b\xa7\xa3\x51\x20\xc2\x91\xef\xf2\x30\x16\x89\xf5\xa5\x67\xd2\x25\xc3\xb7\x3c\x7a\xb7\x6e\x8e\x88\x2c\x76\x56\x5d\x87\x50\xc7\x75\xa2\x5d\xe1\x0e\xe5\x7f\x82\xc4\x75\xdf\x3b\xed\x0c\xbd\xdd\x58\x68\xe7\xd6\xe1\x97\xb3\xd1\xc0\xf3\xd0\xe2\x08\xfa\xf7\x7d\x5a\x1d\xb3\x73\x69\xbb\x18\xfb\xf5\xd7\xa7\xaa\xf8\xf7\x5f\x31\xf0\xdb\x6f\xef\x5c\x7d\xe9\xe0\xa5\x9e\x57\x05\xf8\x91\xc6\x17\x75\x79\xa1\xed\x65\x3d\xff\x48\xd3\x6b\x78\x2e\xd4\x5d\xc1\xef\xa0\xf3\x75\xc3\xd8\xf5\x7f\x9f\x6d\x13\xbb\x21\xe3\xa1\x39\x78\x3e\x17\xe7\x6d\xe1\x45\x1b\x78\x6d\xf4\x65\xff\xfc\x59\xf3\x2f\x07\xc6\x07\x47\x8e\xbf\x91\x70\x74\xe6\x8c\xf1\x8a\xdb\xb3\x40\x2c\x7f\x28\x44\x17\x6c\x22\x22\xd7\x09\xa3\x1f\x8a\x3e\x8f\xb6\x9f\xe8\x7e\xf8\x99\xb9\x7e\x28\x22\x67\xfa\x46\xcb\xdf\x07\xd6\x2b\x6d\xeb\xac\xce\x5f\x0e\xea\x9f\xf5\x5d\xd4\xf7\xc9\xce\x1b\xd4\x79\x2b\x37\x78\xa9\xe6\x54\x8a\xb7\xd8\x72\x1c\x7c\x7d\xb6\xc6\x8e\x89\xd7\xaf\xcf\x8d\x5c\x04\x37\x46\xd0\xc3\xcc\x0b\x7f\x2b\x82\xee\xe9\x4e\xc2\x50\x44\xd7\xf0\x3c\xb4\xd5\x37\x4f\x93\xe9\xae\x59\x5d\x57\x3d\x0b\x1c\x26\xbc\x37\x82\xd8\xfe\xe4\x5b\x11\x6e\x7f\x32\xc6\xfd\x05\x75\xc5\x8e\x6f\xcc\xd9\xcf\x48\x3e\x34\x8a\x9e\xd5\xf0\x31\x65\x3d\x94\xe5\xa2\x5e\x8f\x05\x7c\x83\x1b\xc7\x2b\x8f\x08\x5f\x5c\xfa\x84\xfb\x5b\x84\x38\x0c\xd8\x3f\xcb\x87\x43\x5a\x7f\xa0\x83\x33\xbb\xd6\xf1\xeb\xaf\x5a\xae\x1f\x44\xcf\x68\xa4\xd2\x19\xab\x5d\x6a\xc5\xc0\x65\xb7\x29\xd6\x11\x89\x22\x31\x9d\x45\xb1\x5d\xb3\x08\x23\x32\x9d\xc5\x76\x43\x26\x7f\x71\xf8\x25\xb6\xf5\x3d\xf1\xba\xb3\x95\xc4\x71\x17\xc1\x59\x57\xfb\x96\x85\x68\x33\x13\x3f\xae\x94\xc3\xb4\xc4\x99\xde\xd7\x61\xff\xd9\xe2\x1b\xb5\x73\x9c\xd9\xf0\x83\xcb\x4a\xfd\x75\x8f\xc4\xbf\x63\xe0\xb7\x98\x55\x49\xc5\x0e\x5f\xff\x9f\xbf\x62\x86\xae\xab\xfa\x6f\x57\xeb\xea\x3c\x0d\xfb\x74\x95\x9d\xcf\xf2\x9c\xc7\xdc\x37\xd0\x38\x4c\xb4\xed\x5a\xdd\x55\x87\x76\xb9\xe3\x1d\xae\x84\x0b\x7a\x74\x22\x10\xe1\x8b\x0e\x48\xbd\x3a\x62\x0c\x04\x79\x6e\x4b\xaf\xfd\x39\xcb\x79\x3f\xeb\xd3\xd9\x64\xdd\x0d\x3d\xe3\xc1\xb1\x79\x28\xde\xeb\x61\x5e\xfb\x79\x96\xc3\x7f\xd6\xcf\x93\x8a\xdb\xfd\x7c\xd5\xc9\x5d\x9c\x17\xde\x52\xb8\xfe\x4c\xfc\xa0\x4b\x3b\x99\xbe\xa3\x23\x3a\x9b\xee\xb8\x03\x82\xa7\xf9\xda\x5f\x6f\xa9\x87\x13\x8b\x7e\x04\xc4\xfc\x8d\x8e\xe6\x25\x08\x4f\xf3\xc0\x2f\x34\x5e\x02\xf1\xc2\xda\x9b\x60\x9c\xe6\x88\x3e\x0d\xc6\x69\x52\xfc\xd7\x53\xbb\x7d\x99\xbc\x5d\x69\x53\xef\xb5\xee\xb3\x19\xae\xcf\x7a\x75\xb6\x04\xf0\x99\xb4\x6b\xdf\xe3\xbf\x13\xa9\x9d\x30\x5c\x88\xe0\x76\x55\xcc\xe7\x57\xb3\xd3\x57\xb0\x44\xae\x33\x75\xde\x18\x51\xbc\x9b\x0b\xfe\x9d\x59\xd5\x19\x3b\xcf\x56\x55\x3e\x95\x45\x9d\x5f\xff\xa8\x3c\xea\x4c\xe7\xe7\xf3\x9f\xf7\xb4\x1e\x2a\xed\x42\xd3\xb1\x26\xff\x7d\xbd\xe1\xbd\x98\xee\xfd\x34\xc9\xcf\xd7\xd0\x0e\x34\x8f\xd6\x2f\x42\xf1\x0d\xf9\xc6\x25\x01\xd7\xfb\x55\xca\x37\xcf\xb2\x11\xf1\x86\xe2\x6a\x62\x7f\x0e\xce\xf9\xb2\xdd\xe7\x63\xf5\x69\xee\xfc\xf3\x10\xfd\x87\xf1\xa1\x3e\xdf\x5c\x03\x27\x5a\x07\x22\x5c\xb8\x57\xa3\x7b\xb4\x9e\x8a\x1f\xe6\x73\xa7\x25\xd6\xcf\xe3\x79\xb1\x6e\xf1\x59\x50\x2f\x56\x9c\x7f\xbd\x09\xb8\xe3\x45\xef\xa1\x77\x14\xb9\x06\xc4\x6d\xb4\xbb\x58\xe1\xfe\x0c\x50\xa9\x5d\x66\x2d\xfd\xe0\x07\x93\xa1\xb1\x94\xd5\xb2\x7e\x80\xd9\xfb\x2a\xc3\x0f\xeb\xcb\x57\x9a\xe9\x46\x2b\x96\xaf\xb4\xaa\xa7\x49\xc5\x8e\x55\x6a\xa7\x9b\xb1\x5f\xbf\x66\xad\xac\x0e\x5b\x5d\xa3\x6e\x25\x8d\x56\xb6\x52\xae\xb6\x4a\x2a\xec\x6b\x65\xb5\x5d\xaf\x97\x2a\x95\x4a\xae\x96\xce\xf5\xad\x7c\xbf\x51\x33\x9a\x99\x8c\x31\xb0\x3a\x8d\x8c\x3e\xa8\xf7\xbf\x7e\x8b\x61\x80\x77\x87\x02\xc0\xb7\x98\x02\x11\xd2\x30\x50\x10\x36\xbe\xc5\xc0\xb7\x03\xce\xb1\xaf\x5f\xbf\xc5\xbe\x5a\x75\xcb\xb2\xac\xbf\xfe\xfa\xba\x3f\x81\x9f\xce\x9d\xfe\xfe\xf6\xe7\x8f\x7c\x4c\x36\x6a\xfd\x5c\xbe\x04\xed\xbc\x9a\xa9\xd4\xb5\x64\xaf\x94\x29\x57\x52\xa5\x4c\xa1\x5d\xa9\xb5\x61\xae\xaf\x0e\xca\x99\x66\xae\x5a\x69\xdb\xe9\xaa\xd5\xec\x9a\x75\xdb\xac\xf6\x60\x6e\xe7\xe3\xf1\x00\xe8\xf8\xff\xce\xd7\xcf\x78\xf8\x46\xad\x5c\xce\x66\xdd\x51\xc1\x6f\xcf\x51\x7d\xb4\x96\x5f\xce\x53\x3d\xc3\x68\xa8\x1c\x23\xa9\xab\x86\x10\x06\xe2\x0a\x85\x26\xd5\x29\xc2\x12\xaa\x44\xea\xaa\xa2\x50\x53\x37\x30\x81\x9a\x24\x52\xd1\x80\x4a\x38\xa0\x3a\xa4\x86\xaa\x52\x60\x52\x81\xf1\x0e\x2a\x70\xe7\xb1\xd3\xa1\x9b\x90\x40\xa1\x42\x29\xa1\x86\x08\x30\x29\x10\x26\x90\x5c\x91\x06\x57\x15\xc4\x14\x49\x18\x87\x80\x1a\x8c\x01\xc4\x54\x95\xeb\xa6\xa9\x43\x1d\x23\x03\x29\x50\x27\x8a\xf1\xf5\xa9\x0a\xbf\x5a\xff\x6b\x8f\x64\xaf\xe8\x68\x9b\xc4\xa6\x59\x4c\x9a\x29\x2f\x85\x73\x10\xac\xc7\xc9\x78\x08\x86\x51\xb8\xca\xaf\xb6\x4a\x8f\x37\xbb\x7d\x92\x2c\x90\xcc\x70\x27\x9f\xae\x68\x25\xb2\x9d\xc1\xfa\x0f\x35\x0f\xac\x9e\xa2\xed\xc5\x92\x93\xff\x40\x41\x1e\x7a\x7c\xbd\x68\xea\x6f\x10\x15\x99\x5c\x35\x39\xa3\x94\x70\x8d\x32\xc1\x0d\xc2\x24\xd1\x11\x96\x18\x19\x98\x0b\x82\x54\xac\x62\x29\x81\x54\x38\x95\x42\x62\x6c\x42\x93\x61\x8a\x04\xa3\x40\xa8\x52\xd9\x91\xec\x11\x64\x17\x52\x22\x4a\x0d\x2e\x25\x34\x55\x55\x4a\x45\xaa\x52\x12\x5d\xd1\x14\xa9\x6a\x44\x98\xba\xa9\x08\xa1\x72\xc4\x88\x30\x38\xe5\x0a\xc4\x94\x30\x5d\x51\x24\x01\x54\x72\x43\xfb\xfa\x2d\x06\xbf\xc5\x14\x5d\xc7\xba\x89\x91\xa6\x1d\x19\x6b\xc3\xda\x60\xac\x54\x16\xba\x0f\x68\xc1\xec\x6a\xde\xa6\xba\x6c\xaf\xb3\x6a\x67\xe6\x4f\xe2\xcb\x8c\x55\x8d\x6c\xa5\x08\xcb\x66\xd2\x34\x06\x8b\x41\x23\x17\xe0\x36\xef\x15\x69\xab\xe8\x30\x57\x56\xc7\x1b\x13\xf6\x07\x99\x42\x7b\x52\xa8\x75\x58\xb1\xa5\x8f\xa2\xa5\x1f\x4c\x95\x03\x73\x7a\xb5\x4e\x79\xbd\xff\x94\x7f\xfe\x73\x08\x6f\xe1\xe9\xfb\xca\xaa\xd5\x8f\xdc\x51\xb7\x78\x34\xb5\x9a\xb3\x12\x8e\xac\xce\x7a\x12\xad\x53\x6a\xaf\x59\x9d\xa9\x4e\xb4\x6e\x2e\xd3\xd3\xb2\x61\xb5\x27\xab\x64\x53\x4b\x37\xbc\x65\xbc\x14\x45\x89\x0d\xdb\x26\xd6\x28\x11\x77\x33\x49\x04\x02\xaf\xb3\x36\xc7\xb0\x1c\x4c\x21\x4e\xcf\x16\x61\xbb\x11\x1f\x2e\xf1\xe0\x60\xff\xc3\x8c\x4e\x81\xc2\xcf\x20\xdd\x4f\x3d\x6e\x64\xb4\xa1\x99\x9c\x0a\x4d\xc7\xa6\x49\x85\x09\x29\x33\xa4\x30\x55\xaa\xa8\x54\xd7\x4c\x83\x60\x95\x6a\x40\x67\xdc\x24\xba\x01\x89\xdc\xf5\xa2\x10\x31\x93\x51\xae\x29\x82\x21\xbc\x67\xf4\x23\x5a\x05\x43\x1a\xd4\x01\x12\x82\x23\x9d\x43\xa0\x2b\xd8\x60\x50\x83\x1c\x29\x18\x43\xa8\x41\x81\x74\x82\x74\x2a\x14\xc6\x35\x41\x09\x65\x26\x86\x50\xd5\x19\x05\x3a\x55\x09\xda\x31\x5a\x3d\x63\xb4\xfe\xc4\x68\x4d\xb6\x24\x5b\x18\x4a\x69\x1b\x91\x30\xee\x3a\x72\x9a\xf5\xc4\x3c\x35\xd1\x13\x2b\xc4\x97\xf1\xf8\x80\x79\x1b\x3a\x0e\xc3\x6a\x6d\x6d\xcc\xcc\xe2\x74\xeb\x97\xe6\x46\x2b\xe7\xae\xa2\x2d\x6f\xf4\x1c\x5e\xc1\x75\x25\x8c\xe7\xfd\xf6\x52\xf8\x6b\xb8\xca\x36\xad\x8e\xd7\x3f\x50\x74\xcf\xe8\x33\x12\xc9\x8c\xd7\x1e\x94\xd7\x33\x46\x1a\x99\x61\xd7\x40\x5a\xc1\x73\xda\xd9\x04\xdf\x0e\xf4\xb1\xa7\x89\x1a\x9c\x0e\x50\x29\x9f\xf5\xbd\x66\x90\xaf\xbb\x76\xd5\xec\xf7\x9a\xc9\x86\x1b\x16\x6c\x98\x9c\x36\x0b\xb6\x91\x99\x67\x96\xa3\x6d\xdb\x30\x96\xb9\x89\x53\x61\x61\x26\x34\x46\x7b\xcd\xe5\x2b\x8c\xbd\x1e\x63\xff\x0b\x18\xab\x00\x6a\x28\x86\x82\x34\x55\x50\xcc\x0d\x1d\x0a\x80\x55\x0d\x19\x92\x20\xc5\xd4\x24\x56\x25\xd3\x34\xa2\xab\x08\x99\x00\x41\x2c\x4d\x09\x08\xd3\x4c\x4e\x90\x24\xc4\xd0\x0f\x31\xf8\x01\xac\xd7\xb1\x0e\x18\x43\xd8\x50\x74\xa2\x20\x22\x54\xa8\x62\x4c\x14\xce\xa5\x6e\x6a\x40\x27\x5c\x10\x43\xd7\x15\x41\xa5\xd0\xe1\x2e\xee\x4a\x13\x6a\x5c\xd3\x4d\xc6\x81\x89\xc4\x6e\xb0\xa0\x9d\x31\xd6\x78\x8e\xc1\x0d\xbc\x9c\x74\x7b\x22\xce\x82\xf5\x52\xdf\x86\xad\xa8\xcd\xe7\x13\xb3\x9e\xd9\xf6\x66\xdd\xe2\x32\x5b\xe8\xfb\x6b\xb4\x6c\xe7\x42\xa7\xe1\xd4\xb4\x6e\x96\xe8\x69\x45\xdf\xe4\x68\x21\x91\x19\x87\x26\xf1\x9c\x14\xab\x2d\x73\xcb\xfa\xb2\xd1\x68\x91\xe1\x4c\x2c\x36\xb0\x34\x60\x7b\x84\xf7\x8c\x1d\x9e\x10\x5f\xb6\x1d\x34\x33\x0a\xd6\xb4\xd7\x20\x33\x3d\x67\x20\xb3\x1a\xaf\xf6\x98\x55\xac\xc0\x44\xc2\x0e\x4b\x03\xba\x59\x77\x4b\x1d\x77\x40\xf1\xa0\x5d\xde\x14\xfa\x1d\xbf\x1f\x8c\xf3\xfa\x74\xa4\xca\x0e\x48\x12\x75\x3e\xe8\xe4\x82\x84\x5b\x1c\x2e\x13\x6c\xa2\x64\x64\x25\xa9\xe9\x7b\xfd\xf5\x2b\x8c\x4d\x87\xd7\x6a\xfd\xbf\x80\xb1\x3a\x11\x08\xe9\xc0\x14\x92\x49\x43\x33\x0c\x64\x70\x55\x70\xa0\xaa\x50\x51\xa0\xa6\xef\x38\x4a\x31\x86\x88\x4a\xa0\xab\x8a\xe4\x42\x55\x08\xd1\x75\x4a\x88\x4a\x81\x14\x6c\xc7\xb6\x47\xb0\x1e\x11\x13\x71\xd5\x54\xb1\xa2\xeb\x04\x52\x06\x25\x64\xaa\x81\x38\xe7\xc4\x10\x86\xc0\xba\x20\x84\xee\xa2\xb6\x80\x94\xea\x80\x08\x53\x31\x4c\x62\x72\x8d\x69\x58\x0a\x2c\xbe\x7e\x8b\xe9\x67\x8c\x35\x9f\x18\xbb\xb6\x61\x66\x34\x4a\x19\xba\xe2\x96\x86\x13\x30\x5a\xce\x93\xbc\x36\x49\xac\xd3\xb3\x96\x93\xb3\x6d\x4f\x45\xc5\x75\x0e\xfa\xf1\xf9\xa0\x91\x32\x81\xd5\x00\x49\x50\x9d\x26\x60\xa2\x90\x1a\x46\xbe\x32\x50\x73\xbc\x5a\x49\xea\x2d\xde\xb1\xba\x28\xbe\x68\x4d\x02\x9d\x68\x43\xb0\x47\x78\xcf\xd8\xd5\x09\x71\x39\xd0\x83\x84\x63\xe5\x39\xc8\xf5\x6b\x89\x02\xd2\xfd\xae\xbf\x9c\x75\x97\x8b\x99\x54\xb7\x25\xbe\x01\xfd\x51\x9a\x27\x6b\x5e\x17\x1a\x45\x51\xa9\x6c\x27\x9d\xae\xb3\xb4\x51\x58\xf1\x55\x3e\x5b\xd0\x59\x6f\x3e\xd7\xe3\x7d\x2d\x88\xea\x81\x97\xf1\x66\xb8\x95\xf6\xe2\xde\x9e\xb1\xed\x2b\x8c\xcd\x5e\x1d\x22\xfc\x17\x30\x56\xea\x5c\x17\x0c\x1a\x08\x73\x05\x99\x92\x30\xc2\x75\xc8\xa0\x90\x58\x03\x12\xec\x3a\x5c\x03\x41\xc8\xa0\x26\x90\xe4\x44\x10\x26\x0d\x69\xaa\x8a\xc9\x55\x43\x51\xb5\x7d\x6c\x7b\x08\xeb\x09\x90\x3a\x82\xd0\x14\x48\x6a\x48\x33\x19\xd5\x0d\x86\x05\x80\xcc\xd0\x35\x09\x01\x50\x15\x0a\x74\xa4\x61\x28\x05\x12\x0c\x0b\xc5\x54\x54\x83\x9b\x58\x9a\x18\x52\x02\xe9\xd7\x6f\x31\xe3\x8c\xb1\xe8\x89\xb1\x4a\xe0\x8f\xea\x72\xb9\x85\x8d\xe9\x28\xaa\x45\xd6\x36\xdf\x28\x64\x8a\x52\xe8\x13\x7f\xb9\x6a\xaf\x13\xa2\x4c\x66\x9d\xc5\xbc\xba\x4a\x84\x5e\x22\x3e\x8b\xd3\x96\x67\x2e\x7c\x77\xd3\x5f\x6c\xf4\x42\x11\x96\x6c\x25\xe8\xe5\x94\x55\xdf\xa5\x43\x25\x1d\x24\x52\x4d\xe8\xb9\xf5\x13\x63\x37\x67\x88\xcf\xe7\xa3\x64\x65\xd4\x8c\xfb\x7d\x90\x99\x27\x1a\xcd\x59\x3a\x3d\xf1\xe3\x13\xaf\x63\x18\x33\xd0\x77\xed\x78\xca\xe9\x48\x90\xed\xbb\x76\x76\x88\xfb\x79\xcf\xa8\x35\xd3\xb2\xd4\x09\xbd\x61\x29\xdb\xa9\xe5\xad\x32\x4d\xf6\x9b\xcd\x84\x6f\x16\xb4\x5e\x1a\x2a\x3a\xd6\x8b\x46\x71\xdf\x22\xfa\x57\x18\x9b\x03\xd7\x6a\xfd\xbf\x80\xb1\x26\xd3\x05\xa6\xba\x60\x48\x08\x45\x17\xba\xa4\x5c\x00\x0d\x18\x1a\xc6\xd8\x54\x09\xd2\x09\xe0\x18\x50\xa8\xa9\x84\x73\x26\x00\x53\xa4\x54\x0c\x82\x74\x05\x60\x13\x88\x7d\xfa\xff\x08\xd6\x1b\x42\xd5\x0d\x22\x34\xa0\xe8\xd0\xd0\x85\x64\x18\xab\x44\x20\x21\x01\x81\x08\x01\xa6\x70\x1d\x53\x88\x80\x22\x14\x09\x8d\xdd\x18\x96\x62\x40\xa4\x86\x81\xa9\x1b\x48\xdf\xf9\x61\x9e\x31\x16\x3f\x31\x36\xd1\xeb\x98\x36\x29\x80\xbe\x34\x36\x51\x3b\x2c\x2d\x5d\x0b\x95\xaa\x3e\xb1\x4b\x76\xcb\x4f\x10\x23\xc4\x4b\xb6\x96\xad\x7e\xab\x35\x75\xd4\xec\x20\x0c\x6b\x8b\x49\xa4\x27\xb5\x59\x17\x7b\x7d\x61\x39\x98\x69\x32\x59\x48\x64\x21\x00\x38\xd3\x6b\xc5\x89\x2f\x6b\xde\x19\x63\xcf\x48\x54\x1d\x36\xa7\x79\xdb\xad\xae\x71\xa1\x87\x19\x1c\x66\xc6\xc6\x94\x5a\xdd\x05\xdc\x28\x53\xb7\x0a\x26\x59\xee\xae\xf4\x51\x86\xf5\xb7\xca\xa2\xd2\x0d\x26\x56\xa7\xe0\x9a\x68\xda\x0a\x7c\x73\xe5\xfb\xc3\x72\xae\x33\xdd\x0c\x93\x9a\x2c\xcc\xf2\xe3\x45\x3d\x00\x93\x5c\xc7\x6f\xef\x35\xb3\x2b\x8c\x2d\xf4\xaf\xd5\xfa\x7f\x01\x63\x39\xd1\x4c\x6a\x28\x90\x33\xc4\x75\x0a\x25\x31\xa9\xc4\x54\x12\x80\x39\x55\x14\xa9\x72\xca\x54\x4a\xb8\x84\x1a\x36\x01\x52\x89\xae\x60\x6c\x22\x03\x23\x84\x00\x31\x91\xdc\xb1\xed\x11\xac\x37\x88\x2a\x21\x50\x21\x93\x9a\xc1\x18\xd0\x29\xa7\x84\xca\xdd\xd8\x41\x35\xa1\xa2\x73\x09\x14\x0d\x52\x81\x76\x59\x9e\x2e\xb0\xae\x48\x02\x89\x4e\x20\x31\x15\x53\xa7\x00\x7d\xfd\x16\x43\x27\xc6\xea\x4f\xb3\x5f\xb6\xba\x16\x53\xc5\xcc\x9b\xa3\x9e\x1b\xc7\x5a\x3a\x39\x99\x0e\x98\x31\x22\xa9\x81\xbd\xa9\x1b\x2a\xd3\xca\xb9\x44\x66\x3e\x6a\xd8\xbd\xd4\xb0\xd8\x5c\x36\x36\x0e\xb4\x47\x89\xa0\x6d\x0f\x6d\x73\xba\xdc\xa4\x87\x13\xbf\x39\x5e\x95\xfc\x19\x4c\x7b\x71\x59\xec\x69\x0b\xd3\x6c\xe2\xc3\x10\x72\xcf\xd8\xb3\x71\xac\xd2\x1d\x6c\xf4\x5c\x65\x5b\xcf\x1a\x4b\x98\xd8\xd4\x25\xf7\x13\x0a\x6a\xd7\x60\xd9\x51\x4b\xd5\x5e\x3c\x17\xe6\x33\xab\x71\x54\xeb\xab\x7c\x5e\xdb\x58\x9b\x2d\xa0\xe5\x24\x85\xc6\x12\x35\x70\x65\x93\xe1\x28\x59\x0f\x8c\xbc\x6c\xf5\x66\x6d\xe9\xcf\xbb\xce\x6c\xcd\xa9\xbd\xd7\x3c\xbc\xc2\xd8\x22\xba\x56\xeb\xff\x05\x8c\x85\x54\xaa\x1a\xc1\x88\x62\x84\x14\x4d\xa1\x48\x57\x28\x13\xd2\x80\x98\x9a\x42\xaa\x8a\xe0\xa6\x30\x35\x46\x54\x0c\x00\x60\x5c\x62\xc8\x01\x80\x04\x2b\x5c\x61\x06\x56\xf5\x1d\xdb\x1e\xc1\x7a\x5d\x72\x0e\x19\xd6\x75\x1d\x9b\xc8\x34\x54\xcc\x21\xd5\x08\x42\x40\xe1\x58\xc5\x1a\x33\x34\x62\xaa\x4c\xc1\x0c\x60\x48\xa0\xa1\xea\x86\xa4\x94\x08\x2c\x18\x37\x0d\x86\xcd\xaf\xfb\xa9\xf4\x67\xc6\x2a\x4f\x8c\xc5\xb3\x5c\x34\x2a\xb1\x71\x27\x5c\xce\xcc\x38\x8d\x0f\x3d\x9a\x96\x35\xba\x32\x03\x59\x28\xb0\x7c\xd5\xcd\x0e\x44\x76\x9a\x1f\x16\x45\x6d\x52\xd0\xfb\xb9\x42\x72\x39\x68\x56\xd5\x4c\xd7\xc3\xb2\xa3\x06\x56\xbe\x0e\xe8\x14\x0f\xc3\xdc\xb6\x27\xdb\x66\x94\x26\xdb\xdc\x30\x71\x20\xcb\x9e\xb1\x67\xe3\x58\xde\xdd\x36\x73\x23\x69\x6f\x7b\xd9\x52\x7e\x52\xad\x8e\x7d\x5b\xef\x4f\x51\x35\x39\x0b\x66\x39\x9d\xa5\xab\xc5\x54\x25\x33\xce\x98\xd3\x9c\x22\xd5\xe6\xa6\xd3\x19\x88\x5c\x5f\x07\x41\xd3\x1f\x26\x61\xcb\x2d\x0f\x8a\x0c\x65\x99\xdd\xf4\xa7\x95\x6e\xdc\x0c\x66\x66\x85\x87\xee\x5e\xff\xe4\x0a\x63\x6b\xfe\xb5\x5a\xff\xbf\x9d\xb1\x6f\xac\x56\x5c\xd9\x98\x79\xc7\xda\xc7\xeb\x5d\x7d\xf7\x28\x7b\x6b\xdb\xd9\x7d\x3a\x2f\x77\x8e\xdd\xa1\xed\x8d\x7d\x5f\x77\x68\x7c\x63\x87\xd6\x47\x57\x8e\xce\x76\x69\x9d\x56\xdf\xec\xec\x20\xa3\x16\xda\xd0\xd6\x2a\xa5\x6a\x36\x97\x32\x73\xa9\x9e\x5d\xb1\x2b\xed\x5c\xae\x97\xef\xd6\x7b\x8d\x7c\xb2\xd1\xaf\x5b\x9d\x72\xb6\xdd\x6c\x96\x60\x39\x35\x30\xd2\x8d\x16\x3c\xcc\x7e\x1f\xd6\x68\xf2\xb0\x69\xb6\x5a\xf5\x45\xa5\xab\xad\xd7\x89\x6a\x55\x9b\x3a\xb4\xae\x9b\x7e\x64\xa3\x76\x72\xec\x58\xc5\xb0\xd2\x6c\xbb\x46\xbf\x56\x73\x4e\xec\xdb\x4f\x56\x1f\x3a\x88\x5d\x33\x4b\x3b\xba\x9b\x16\x4b\xa5\xa1\xb0\x59\x54\x9e\x17\x36\x1d\x64\xd4\x4a\xcb\xc1\x68\xd2\x76\x9b\xf5\x09\xee\xac\x66\xb5\x69\x16\xc6\x9d\x05\xed\xec\x2f\x4a\x32\xdc\xde\xa6\xf7\x1f\x87\xcf\x7f\x92\x7b\xa5\xab\xe7\xef\x29\xcb\xc2\xf6\x59\x2b\x4e\xae\xcd\x0a\x37\xfb\x75\xb5\x0f\x8d\x56\x2a\x00\xcc\xca\xf5\x8d\x8a\x5f\x56\x58\x15\x98\xf1\x54\x12\x6d\xa5\x55\x8f\xba\xdb\x56\xb9\xae\xed\xd5\xb5\x8b\xe9\x92\x23\xbc\x61\xb6\x66\x2e\x5d\x77\x6d\x6f\x33\x8d\x14\xed\xd5\x97\xcb\x09\xea\xcd\xfb\x68\x86\xd8\x26\x59\x1b\x56\x23\x2e\xc3\x89\x5b\x8c\x5c\x53\xc4\x59\xc3\x53\xd0\xc4\x19\x60\x77\x60\xd4\xe2\x9e\x95\xa9\x06\x9c\x15\x7b\x23\x9a\x25\xa0\x2d\x3d\x8d\x27\x87\x7f\xfd\x75\xd9\x5b\x3c\xb8\x6a\xd4\xbb\xaa\xa6\xfc\xb2\x6a\x52\xa9\xb9\x17\xce\x69\x75\xb8\x08\x66\x65\xd1\xb3\xd5\x8a\x92\x11\x05\xa0\x2a\xa9\xee\x56\x73\x46\x4d\x6c\x8c\x73\xd4\x1a\xe4\x93\x5d\xee\xd4\x9f\xab\x26\x73\x11\x6d\x3e\x0c\x7d\x67\x44\x0c\x0c\xd0\x28\xb7\x41\xd4\xd2\xd2\xad\x48\x77\x73\xe5\xf9\xaa\xe4\x56\x91\x17\x54\x38\x48\x27\x5a\x49\x27\x61\x67\x46\x75\x7f\x9b\xb3\x99\x09\xac\xb8\x97\xc4\x9a\x4e\xd8\x20\xe5\x27\x6d\x34\x23\x45\xd2\xaa\xd5\xab\xa1\x0d\x52\x22\xdf\xd4\xd4\xbc\x6e\x27\xad\x9f\x0e\xbd\x76\x17\xf4\xf5\x0b\xe8\xed\x7c\x62\xd4\x1f\x04\x93\x56\xcf\xcb\xf3\xd0\x43\xdd\xea\x36\x9a\x8b\x7c\x65\x15\x47\x02\xdb\x38\x93\xa9\xcc\x6d\xd7\x30\x4b\xfd\xe8\xd0\x73\xed\xa1\xcf\xde\x0b\x3d\x64\xb6\x92\x6a\x39\xca\xa2\xdc\xad\x8b\x92\x4f\x66\xed\x90\x2d\x42\xbb\xd8\xab\x60\xab\xd4\x08\xa2\x7e\x8f\x88\x68\xdd\xb1\x7d\xb2\xec\x56\x9b\x8d\x7e\xb7\xa4\xd1\x5e\xcd\xaa\xb7\x13\xc1\x10\x5a\x7a\x71\x03\xa7\xc1\x06\x6c\x37\x73\x3b\x5e\xcb\x41\x60\x14\x7a\xaa\x9b\xaa\xff\x74\xe8\xf5\xbb\xa0\x6f\x5f\x40\x6f\xd5\x96\xf5\x64\xae\x9e\xab\x1b\x34\x41\xd1\xa4\x6a\xc3\x71\xc7\x63\x1c\x68\x20\xe7\x56\x94\x76\x92\x6e\x0d\xbd\xda\x5c\xba\x81\x93\x3a\xb1\x3e\x77\x2f\xf4\x4c\x34\xb0\xd7\xd3\x25\x6a\xc1\x4e\x7d\xe6\x6e\x2a\x78\x54\xf2\xe7\xab\x7a\x30\xaf\x54\xcd\x69\x6a\x5b\x6d\x3a\xbd\x8d\x86\xdb\xd5\x90\xdb\xc4\x8a\x6a\x41\xa3\x4a\x66\xe9\x5c\x9b\x36\x8a\xae\x92\x8b\xe3\x40\x8f\x9c\x62\x23\xd1\x2f\xcc\xe0\x22\x70\x57\xf6\x36\x5b\x5a\xa4\x56\x3f\x1d\x7a\xe3\x2e\xe8\xfb\x97\xac\x97\x09\xc3\xd3\xa3\xaa\x5c\x18\x4e\xaf\xe0\x98\xa5\xc9\x24\xe8\x87\xa5\x6e\xc4\x64\x2f\xe9\x74\x16\xa9\x76\x73\x89\x2a\x25\x22\x3a\x67\xd0\xe7\xef\x85\xbe\x19\xcf\xb9\xdc\xf2\x0a\x85\xea\x5c\xb3\x32\xd3\xcc\x4a\x64\xb3\xa9\x15\x5c\xe0\x4c\x67\x4e\x72\xab\x46\xa6\x87\xd7\xa1\xb3\x71\x32\xd5\x44\x46\x8b\xfb\x6e\x7e\xcc\x16\x04\xe5\x78\xb4\x9e\x74\x72\xca\xa2\xd5\x97\x1a\xa9\xaf\x96\xdd\x66\xa2\xea\xb5\xf9\x24\xe1\xdb\x2b\xd5\xfe\xf9\xac\x37\xef\x82\x9e\x5d\x42\x5f\x62\x83\xe9\x66\x15\x37\x9a\xea\x24\xe7\xb8\x14\xf2\x91\x66\x97\x94\xed\x08\xa5\x3d\x44\x69\xab\x05\x3a\xbc\xa6\xcf\x47\x28\x2e\x4f\xd0\x5f\x8e\x7d\x3f\x0c\xfd\xb4\xe3\x97\x66\x69\xd2\x6a\xda\xee\x70\xd4\x92\xf1\x1e\x8a\xa8\x1c\x6e\xec\x4d\xd5\x37\x1a\x6b\x99\xa9\xa3\xad\x9e\x8e\xa7\x5a\xe9\xe4\x4c\x1f\xda\x09\xb0\x5e\x0e\x45\x54\xac\x94\x30\x59\x3b\x55\x7f\x92\x89\x77\x9a\x83\x9a\xd0\xda\xab\x24\xcb\x94\x6d\xb3\x56\xac\x76\xb4\xff\x00\xeb\xd1\x5d\xd0\x0f\x2f\x03\xce\xac\x88\x73\xc5\x52\xbf\x98\x8b\x47\xf5\xa2\x55\x5a\x90\x44\xbe\x69\x37\x47\xc5\x9a\xb5\x70\xbc\x7e\x53\xea\x68\x26\x1d\x73\x19\x95\xe0\x29\xd6\x17\xef\x85\xde\x1b\x66\xe2\x7a\x3e\x9d\x6a\x98\x8d\x9e\xbe\xd2\x60\x7c\xb0\xe8\x2c\x92\x68\x02\x66\xe6\x40\xe7\x5b\xb0\xee\x0e\xd7\xd3\x65\xdd\x72\x41\xba\x36\x34\xc9\x74\x3b\x6d\x9b\x29\xbc\xd0\x72\x5e\xa3\x1d\x6c\x60\x6e\x35\x9f\x24\x50\x76\xda\x9b\x2d\x85\x69\x0f\xc6\x99\xd2\x3c\x1a\xa5\x7e\xfe\x08\x07\xdf\x05\xfd\xe4\x92\xf5\x75\xcf\x1d\xb2\x49\x16\xbb\x79\x93\x75\x88\x54\x70\x4f\x84\xd6\x28\xd5\x58\x50\x68\xaf\x64\x85\xe3\xd6\x02\x34\x82\xb2\x48\x25\x4e\xd0\x97\xee\x86\x7e\x9b\x9c\xc7\x09\x4c\x05\x49\xb9\xee\xda\x80\xc0\x54\x6a\xb6\x98\x87\x8d\x5c\x35\x4d\x1d\x5b\x6c\x47\x8d\xc4\x3c\xb1\x32\xd9\xa4\x16\x34\x03\x9b\x8b\x6c\x79\xaa\xd4\xec\x6e\x57\xab\x89\xf1\x0c\xe1\x5e\xa2\x81\xf5\xa5\x12\x6c\x22\xad\xb6\x2c\xfa\xa5\x65\x7b\xe0\xc2\x27\xd6\xbf\x9d\x49\x5c\xbb\xd1\xe1\x13\x99\xc4\xd3\xcd\x0e\xcf\x95\xc9\x4c\xaa\xea\x5c\x70\xa4\xa9\xa6\xa1\x1a\x02\xab\x40\x50\xae\x30\x00\x4c\x43\x25\xaa\x81\x54\x55\x37\x81\xe0\x2a\x95\x48\x05\x26\x63\x5c\x02\x05\x50\xdd\x60\x5c\x55\x15\x20\xce\x2b\x75\x1f\x5a\x0e\x83\xfb\xf1\xa0\x14\x55\x2a\xb6\xa6\xd0\x71\x2e\x87\x34\xd3\x21\x85\x28\xe5\x2d\x1c\x50\x5a\xd7\xb3\xd5\xbc\x35\x5f\x29\x85\x66\x6f\x36\x44\xf1\x43\x07\xf0\x5e\xe1\xaf\xdf\xd8\xf0\xe1\xc2\x9f\x6e\x6e\x78\x2e\xbc\x4b\xc2\x68\x7f\xdb\x24\x3f\xde\xbe\x74\x71\x7c\xfd\x16\xbb\x7f\xea\xe7\xb2\x45\x5d\xf1\xe3\xd8\xc4\x48\xc0\x46\xce\xf2\x78\xf2\xd2\x8f\xff\xb3\xdf\x21\xfb\xcb\xf1\xa1\x66\xbf\xfc\x11\x53\x0e\x7b\x66\x7f\x09\x45\xb0\x14\xc1\x2f\x7f\xc4\x7e\x59\x2a\xca\xbf\x94\x7f\x81\x5f\x8e\x27\xd8\x22\x08\x84\x17\x95\xf6\x45\xfb\xe5\x8f\x18\x7e\xf9\x7b\x72\x7f\x67\x6b\xf8\xcb\x1f\xb1\xff\xf7\xcb\x93\xa1\xff\xf3\xe5\xdc\xec\x5e\x72\xa7\x18\xa8\x12\x98\x0a\x10\x12\x02\x93\x32\x0a\x21\x65\x98\x49\xa2\x48\x48\x15\xae\x98\x3a\x87\xd2\x94\x42\x01\x04\x6b\x48\x55\x24\x14\xd0\x80\x06\x27\x54\x6a\x2a\x35\x0d\xf8\xe4\xd1\xb3\x5e\x4f\xac\xa3\x5f\xfe\xb8\xb0\x76\x28\xcc\xae\xec\xbf\xfc\x11\x03\x2f\x4e\xfd\xcf\xc5\xf5\xa1\x47\x66\x3b\xbf\x24\xe7\x08\x70\x60\x18\x4c\x70\x8d\x69\x1a\x65\x86\xae\x02\xd3\x30\x18\x86\x02\x10\x41\x75\x93\x20\x93\x43\xae\x63\xa0\x52\x48\x11\x55\x30\x93\x06\x96\xa6\x26\x10\xd2\xc5\x2f\x5f\xae\x58\x78\x03\x03\x68\x08\x45\x57\x35\xaa\x69\x00\x98\x4c\x85\x58\x25\x90\x0a\xac\x72\x41\x99\x09\x99\xaa\x9a\x06\xd2\x28\x16\x4c\xe1\x8a\x54\x75\x48\x55\x95\x08\xa2\x98\x4c\x07\x06\xa5\x42\xf9\x38\x06\xca\xb7\xd7\xe7\xfc\x45\x34\x5b\x44\x8f\x2d\xfb\x7b\x08\x0b\x41\x98\x44\x90\x32\xa1\xa8\x8a\x02\x99\x0e\x11\xe4\x4c\x31\x14\x86\x24\xde\x55\x3f\xc3\x08\x50\xa1\x18\xaa\x09\x4d\x43\xd3\x0c\xa0\x30\x0d\x6b\x1a\x35\x0d\x84\x75\x66\x7e\x04\xe1\x7b\xf7\xb1\x3e\x1a\x61\x6e\x12\xc4\x98\x0a\x28\x22\x82\xeb\x18\x32\xc8\x85\x8e\x0c\x48\xa4\x6a\x22\xa6\x09\x8d\x99\xba\x89\x91\xd0\x81\x01\x05\xc2\x00\x60\x0d\x70\x41\x39\x24\x82\x63\x6c\xe0\xdb\x10\xbe\xbb\xd4\xff\xab\x11\xbe\xad\x1d\xff\x83\xc1\x3f\x18\xfc\x83\xc1\x3f\x18\xfc\x83\xc1\x3f\x18\xfc\x83\xc1\x4f\xc3\x60\xff\xe9\xff\xfb\xf2\x3f\xb7\xa4\x24\x4f\x0f\x5a\x3e\x3c\xc5\xed\xb2\x0c\x87\x94\x44\x01\xb7\x68\xf2\x44\xb4\xf2\x83\xc9\x8c\x84\xe1\x6c\x14\x90\x50\x5c\xd1\xd4\x12\x61\x14\x6b\xa6\x32\xb1\xca\x41\x38\xf6\x67\xac\x29\x66\x91\x98\x52\x11\xc4\x20\x50\x6e\xca\xa2\xa4\x1f\x30\x11\xb2\x99\xef\xed\xb0\x77\xc9\xc2\x63\xa3\x4b\x43\xfb\xa7\xa4\xdd\xa2\xec\x90\x10\x1e\xef\x42\x0c\xaf\x97\xff\x98\x92\x45\xce\x74\x5f\x9d\xdf\x2e\x33\xb4\x53\xdd\xff\x32\x22\xbb\x44\x6b\x6f\xfd\xcb\x59\x6d\xff\x22\x85\xb8\x4d\x70\x4a\xd6\xd1\x3a\x74\xb6\x37\x8a\x07\x62\x9f\x16\xfe\x40\xf8\x36\x2a\xec\xb2\xe4\x90\xcd\xf6\x0f\x2a\xba\x76\x7c\x7d\xca\xff\x0f\x53\x63\x95\xc9\x02\x00\x50\x1a\x77\x16\x65\x26\xb7\xe3\x45\xc1\x9f\x82\xaa\x30\x4a\xf5\x65\xc6\xea\x6b\x43\x3b\x48\xb5\xdd\x82\x98\xa6\xb6\xda\x69\x1f\xcb\x61\x2a\x12\x89\x6d\x4f\xc0\x74\x05\x56\x16\x93\x95\x81\x73\x56\x12\x56\xc7\x24\x55\xe9\xa5\xa2\x6a\x62\xb8\x92\x65\x75\x95\x2e\x75\x42\xb0\x4d\x57\x9f\xe7\x1b\x0a\xce\x9d\x93\x40\x9f\x99\xaf\x48\x5a\xd3\xae\x1c\xe1\x69\xb6\xa4\xa9\xc9\x46\xd8\x56\xd4\x42\xaa\xe1\xbb\x5e\x3c\x6c\xa8\x59\x9a\x8e\xca\x64\x60\x25\xf4\x76\x86\xd4\xea\x78\x36\x29\x57\x2d\xa7\xdf\x63\x20\x81\x7b\xd8\xcc\xcc\x93\x23\xb4\xb2\xa4\x15\x0f\x8a\x55\x90\xe7\x2c\x9d\x37\xf0\x62\x93\xf1\xd3\x4a\x6a\xf8\xe4\x8f\xdd\xbf\x73\x97\x45\xf2\x33\x93\x6c\xf5\x82\xeb\x69\x72\x30\x72\xe2\x95\x55\x9b\x66\x2a\x7c\x53\x07\xa4\x30\x48\x04\x69\xbe\x9e\xae\x4b\xad\xec\xb4\x5e\x8b\x77\x92\xdd\x31\xa8\x91\x41\x2a\x35\xcc\x4f\x33\x6a\xa5\x96\xe8\x24\xc4\x9a\x0c\xfb\xb2\x6c\xe5\x56\xb5\xf9\x66\x1c\xd9\xb9\x5e\xd2\xae\x2e\x69\x38\x22\xc9\xca\x71\xd2\x6c\x6c\x0f\x47\xa3\xbc\x51\x77\x7a\x5b\x51\x52\x80\xf0\xd0\x62\xbc\xf4\xa3\xc4\x2a\x3d\x5e\x28\x3d\x58\xca\xe6\xc6\x75\x5e\xb3\x47\xb0\xb2\xb5\x15\x0d\xe0\xde\x7c\x56\x6b\xae\xab\x65\xc7\x55\x51\x45\x5f\x64\x56\x95\x62\x31\xde\x1f\xb6\xd5\x2e\x73\xdc\x38\x63\x8b\x55\xb6\x9a\xba\x97\x6f\xcf\x78\x5b\xab\x7b\x77\xb5\xec\x0f\xf6\x31\xfe\xd6\x0b\x68\x45\x96\xd3\x68\x68\xac\x7a\xa8\x33\x8c\xb2\xd1\x70\x65\xd0\x79\x90\x6e\x6c\x47\x59\x6d\xe8\x85\x5a\x5b\x1a\x4b\x54\x55\x0b\x29\x03\xcc\x57\x5e\x6f\x34\x2e\x44\xad\x95\xeb\x2e\x52\xaa\x6f\xc8\x5a\x27\x01\xa4\xa0\x98\xa0\x40\xa4\xcc\xcd\xdc\x36\x95\xec\x80\x0f\xf7\xfe\x25\xe1\x44\xc0\x74\xb4\xa9\x74\x37\x71\x6f\xa9\x2f\x0d\x9b\x87\x0d\x84\x97\xa9\x45\x84\x26\xee\x6a\x68\xb4\x07\xae\x46\xfa\x8e\x35\xd3\xf6\xf2\xa9\x43\x01\x54\xde\x8a\x0c\xcb\x8a\x4f\x07\x41\xae\xd5\x5b\x76\xab\x65\xb5\x99\xce\x44\x04\x98\x9b\xdc\x2a\x99\xcd\x2f\x71\x7e\x26\x07\xc9\x6e\x41\x9c\x6e\xc3\xd9\x2f\x3b\xbf\x5a\xc5\xb9\x6c\x5f\xfd\xe5\x8a\xdb\xd8\x6e\x84\x41\x9f\xa5\xf8\xa0\x5b\xc9\x86\x73\x52\x91\xad\x06\xa2\x9b\x55\x38\x5e\xd0\x7a\x23\x67\xb9\x14\x25\x25\x3b\x53\xf3\x74\x57\xe5\xf1\xe0\x93\x4c\xdf\x3d\xf0\x89\xc0\x79\xb0\xd2\xb5\x9a\xe2\x2e\x71\x3e\x5b\x2e\x8e\x7b\x59\x69\x57\x5c\xc6\xed\x46\xcf\x55\xd5\xa5\x6e\x76\xe2\x0b\x6b\x3a\xe8\x4b\xa5\xbd\xcc\x96\x43\x32\xe1\x15\x3a\xc8\xe4\xd2\x95\xa1\xa9\x2f\xb9\xb7\x2e\x6b\x24\xb7\xb1\x58\xd0\x2a\xfb\xcd\xd6\x7c\xb3\x2c\x33\xf3\xb8\xe5\xa0\xa2\xb6\x4d\x31\xb4\x6a\xb3\x29\x59\x03\xc5\x54\xc6\xe3\x0a\x18\x25\x69\x37\xaa\xa2\x11\xb2\x1a\x4e\x29\xd1\x2c\xf6\x60\xbd\xe3\xf4\x86\xfb\xad\x44\xcf\xf8\x25\x87\x17\x28\xd4\x2f\xf0\xc8\x96\x50\xae\xbe\xac\x4f\x68\x11\xe6\x2c\xb5\xdb\x19\x37\x82\xe2\x74\xdc\x03\x40\x66\x51\x58\xca\x9b\x53\x90\x6e\xac\x0a\xdd\x84\xd5\x53\xcf\xb4\x64\xb0\x48\x9f\x7d\xcd\x3d\xe1\x6f\xd5\x4b\xeb\xc8\x49\x16\xad\xbc\x6d\x69\x39\x30\xad\x47\x56\xdc\x9a\xd5\x53\x05\xaa\xd4\x8a\xab\xd9\x0c\x37\x69\xbf\xa8\x07\xf5\x5a\x03\xf8\x1d\x5a\x9c\xa7\x52\x6a\x38\x5c\x16\xa1\x62\x2e\x87\x5a\x68\x2c\xe2\xe5\x65\x1b\xcc\xb8\x4c\xb4\x70\x1c\x66\x6b\x83\x8c\xa7\x54\x6a\xe4\xb0\xb4\x64\xa5\xb8\x52\x55\x7d\x2b\x65\x0c\xa6\x21\xaf\x08\xdc\xd7\xb6\x3c\x5f\xef\x2a\x41\x6b\x99\x97\x56\xba\xef\x24\x80\xe3\xe2\xa7\xfa\x38\xde\x2d\xb0\x6f\x0f\xec\x02\x84\xf4\x05\x1e\x49\x27\x91\x04\x25\x50\xc8\x6e\xa2\xd1\xaa\xa2\xb8\x7d\x40\x36\x33\x5f\xc1\x95\xdc\x7a\x59\xb2\x37\x55\x3d\x4a\xa6\x99\xdd\x59\xae\x32\xf8\xac\x7d\x25\x65\x6f\x74\xce\xaf\x67\xfc\xd3\xa9\x34\xe5\x6e\x58\x1a\xc1\x6a\x3b\x23\x93\x03\xbb\xa0\x54\x34\x77\x99\x88\x52\x83\xf8\xb2\xde\x47\xb5\x2e\xcd\x25\x06\x61\xcf\xd4\x58\x4b\xd7\xd2\xf1\xd0\x4f\x34\x9d\x5e\xa7\x93\x57\x1a\xc9\xce\x8c\x2e\xfa\xeb\x35\xe8\x56\x92\xeb\x32\x2f\x2c\x0d\x2d\xeb\x04\x46\x43\xe5\xa0\x78\xf2\x37\x6d\x7d\x36\x9e\x5a\xd6\xbb\x93\xfa\x6f\x3c\x1a\xe7\x8e\x0d\x47\x6f\x3e\x89\xe4\xa3\x73\xe5\x2f\x9e\x46\x72\x1a\x5f\x29\xdc\x50\x19\xa0\x08\x11\x85\x50\x03\x11\x4d\xa3\x8c\x03\x28\x4c\x86\x39\x37\x99\xc9\x09\x52\x10\x61\x8a\x94\xc8\x34\x0d\x28\x30\x84\x84\x11\x86\xb1\x8e\x4c\xc3\x10\xc7\x1b\x15\x5f\x8c\x16\xca\x17\xd1\xe2\x23\xec\x50\x87\x51\x50\xf5\x06\xd6\x0d\xc7\x9b\x37\xbc\xee\x6b\x37\x75\x8f\xfd\x7e\x22\x7e\x49\xf9\x1b\xed\xbf\x5e\x6d\xbb\x8e\x3b\x25\x2a\x12\x26\x83\x40\xa3\xaa\x44\x84\x22\x0c\x4c\xa2\x51\x6c\x28\x48\x53\x4c\xa4\x6b\x94\x31\xaa\x6b\xc4\x94\x44\xc3\x1a\x51\x0d\xa6\x72\x05\xe9\x94\x6a\x3a\x07\x26\x7f\xba\xdd\xee\x35\xee\x67\xe5\xfe\x60\xd4\xb3\x0a\x6d\x3b\x7e\x2c\xd4\x65\x2f\x78\x3b\xee\xe9\xfb\xec\x97\xb5\xcf\xd9\xbf\x19\x77\x24\xb9\x2e\x0c\xc1\x55\x0e\x0d\x1d\x11\x62\x18\x40\x03\xaa\x82\x05\x30\x0d\xa1\x02\x00\x0c\x69\x62\x1d\x08\x05\x09\x4c\x80\x2a\x14\xc1\xb9\x10\x5c\x32\x46\xa9\xa6\xa0\xe3\x4d\x63\xaf\x71\xbf\xa7\xdc\xf9\xbc\x9d\x3e\x2b\x77\xfa\x33\xb8\x67\xee\xb3\x9f\x6a\x7f\xce\xfe\xcd\xb8\x6b\x1a\xc6\x8c\x6b\x94\x99\x08\x1a\x50\x95\x98\x30\x6c\xe8\x1a\xd0\x00\xd7\x30\x53\x74\x4d\x12\x6a\x10\x8a\x38\x52\x29\x54\x14\x00\x00\x64\x06\x04\x44\x17\xd4\xd4\x80\x79\xbc\xf5\xe9\x35\xee\xf7\x94\xdb\x2e\x8e\x13\x67\xe5\xbe\x1c\xf0\xdc\x84\x7b\xf6\x3e\xfb\xc7\xfe\xf0\xc3\xf6\x6f\xc5\x9d\x43\x62\x40\x2a\x4d\x8a\x85\xa2\x20\xaa\x20\xc8\x54\x95\x42\xc9\xb1\x4a\x21\x63\x10\x28\x2a\xa7\x02\x13\xc4\x4d\x53\xaa\x3a\x81\x2a\xa6\x26\x60\x88\x18\x86\x29\x74\xa1\x1f\x6f\xe0\x79\x8d\xfb\x1d\xe5\xce\xa3\x4a\x46\x3f\x2b\x77\xf9\x33\xb8\xe7\xee\xb3\xaf\x95\x3f\x67\xff\x56\xdc\x25\x15\x74\x17\xc5\x75\x93\x99\x82\x20\x5d\x05\x10\x49\xa8\x0a\x2a\x4c\x85\x0b\x43\x61\x8a\x60\x94\x23\x62\x68\xd2\xd0\x54\x93\x13\x53\xd5\x85\x50\x4d\x84\x54\x26\x74\xfd\xe9\x36\x94\xd7\xb8\xdf\x53\x6e\x58\xf3\xc0\x59\xb9\xdf\x00\xf8\x7d\xdc\xf3\xf7\xd9\xdf\xf8\x9f\xb3\x7f\x2b\xee\x06\x51\x35\x2c\x4c\x55\x55\x04\x56\x09\xd4\x15\xd5\x30\x21\x32\x05\x34\x35\x49\x15\x03\x63\x42\x89\x34\xa1\x4a\xb9\x50\x4d\x22\xa8\xc0\x06\xd4\x10\x33\x0d\xc9\x55\x60\x2a\xe4\x78\x33\xc5\x6b\xdc\xef\x29\xf7\xaa\x59\x58\x9c\x95\xbb\xfd\x19\xdc\x0b\xf7\xd9\x8f\xd2\x9f\xb3\x7f\x2b\xee\x98\x98\x0a\x16\x04\x50\x66\x48\xae\x20\x80\x20\xa3\x44\x30\xc4\x15\x09\x0c\x06\x4c\x4d\x18\xcc\x20\xc4\x44\x9a\x24\x58\x08\x2c\x01\x05\x8a\xce\x24\x22\x2a\x46\x94\xeb\x07\xdc\xe1\x6b\xdc\xff\xa6\x72\x3f\x0a\xf7\x60\xf8\x73\x71\x87\x3a\x17\x5c\x87\x1c\xab\x8a\xa6\x60\x5d\x18\x2a\x62\xa6\x8e\xa8\x21\x84\x90\xfb\x67\x33\x30\xc0\xa4\xc6\x10\x36\x0c\x6c\x4a\x83\xeb\x8c\x42\x88\x98\xa6\x71\xc2\x38\x47\x07\xdc\xd5\x87\xe3\xfe\xd9\x72\x3f\x0a\xf7\x19\xba\x07\xf7\x37\x73\xb1\xc7\x24\x62\x3f\x3b\x0b\xfb\xe0\x9c\xc4\x59\xb6\x95\xbc\x80\xe3\xf2\xfb\x07\xe7\x10\x2c\xbb\x63\x2d\xcf\x9f\xec\x92\x7c\x9e\x03\x48\xa7\xda\x5d\xcb\xf3\x0c\x9a\x1c\xa2\x55\x03\x6d\x12\x29\xd3\x90\x23\xa3\x1c\xcf\x4c\xa7\xeb\x22\xb1\xeb\x25\x55\x4d\x6f\x1a\xdd\x6e\x3f\xb3\x76\x33\x15\x6d\x05\x83\x19\x19\xa8\x8b\xae\x81\x87\x9d\xa1\xea\xfb\x65\x54\xb2\x27\x99\x78\x0d\x59\x5d\xb2\x66\xc5\x64\x79\x1c\x24\xcb\xf6\xfe\xc1\x10\xbc\x8f\x6c\xcd\x19\xcd\xa1\x33\x69\x22\x60\xeb\xc8\xe3\x32\x07\xfd\x6c\x71\x25\xe3\x39\xee\x18\xcd\x62\x18\x14\xdc\x3e\x9f\x6a\xcf\x8e\xbd\x83\xc1\x89\x19\x2f\xf6\xc4\xed\x3b\x84\xd4\x89\x65\x87\xe3\x03\x73\x62\xc7\x3b\xdb\x6a\x09\xef\xba\xc1\xb3\xe3\x8d\x11\xf9\x93\xaf\x8f\xb3\xff\x46\xd9\x6f\xb1\x7f\x99\x7d\xde\x9b\x75\x7f\x2a\x0b\xba\xc3\x7e\xa6\xd5\x4d\x2e\xee\xb0\x6f\x59\x7f\x5f\xf6\x7d\xad\xb7\xf8\xd9\x53\x0e\x1f\x9b\x58\xb6\x0e\x0f\x91\xb8\x9c\x1a\xda\x1f\xf6\x25\xe8\x0f\x99\x58\xfe\x29\x13\x93\xc9\x6d\x4b\xf5\x6a\x53\xb0\x8e\x16\x86\x3d\x69\xeb\x63\xa7\xb7\x48\x66\x32\xee\x00\x6f\x5a\x3d\xcf\x4d\x17\x37\x7a\xae\xc4\xfc\x8e\x8f\x37\x53\x6d\x36\x2a\xa3\x31\x51\x27\xde\x80\x96\x06\xda\xb6\xe0\x54\x26\x6e\x58\x71\x31\x9e\x4e\x9b\x9d\x49\xa1\xb8\x35\x7b\xbd\x61\xb5\x97\xce\xed\x02\xcb\x62\x3c\xf6\x56\xc3\xe6\x36\x5e\x5c\x14\x92\x46\x53\xef\xf7\xeb\xbd\x51\x7b\x59\x8e\xda\xb3\xc4\xac\xd0\x1c\xd3\x14\xe8\x77\xcd\x06\x4f\xe2\xd3\x13\x06\x36\xef\x60\x78\xfd\xfb\xdb\x41\xec\xbc\x52\x3e\x11\xe4\xc7\xd5\x53\xfd\xbe\x71\xbc\x1f\x44\x1e\x68\xff\xa3\x41\xcc\x3e\x5d\x74\x6f\x10\xbb\x37\x88\x5c\x4e\xa1\x7d\xd8\xfe\xa0\x25\x37\x77\xd8\xbf\x7b\x2a\xeb\x62\x0a\xef\xde\xf2\x7f\xd8\x7e\x35\x33\xcc\xdf\x61\x7f\x7f\xe2\x55\x7b\xf8\x60\xa7\xba\xee\xb6\x55\x74\x6f\xa7\x7a\x8f\xfd\x1e\xa8\xf1\x3b\xec\xdf\x1d\x0f\x5a\xa3\xbe\x7d\x47\x7b\xbc\x3b\x1e\x34\x6d\x6b\xf4\x49\xfb\x37\x75\xa4\x8f\x9d\x43\xfe\x7c\x47\x6a\x5f\x03\xee\xbc\x70\x8f\x5e\xa1\xed\xa3\x6d\x5d\xb4\xdc\x89\x31\x5f\x28\x52\x8c\x12\x50\x8d\x70\xba\xbc\xf6\xfc\x6a\x37\x6e\x4c\xfd\xec\x74\xdc\x2b\x2e\xa6\xba\x99\x9c\x4c\xea\x73\x63\xeb\x77\x12\x1d\xe6\x14\xcc\x7c\x27\x3f\x73\xe2\xe3\x5a\x67\xd0\x9c\xc4\x8b\xcd\xc0\xae\x73\x6b\x6a\xa4\x3b\xda\x38\x49\x97\xc9\x63\xa7\xb4\xa8\x29\x82\x46\xb5\xe6\xa0\x3f\x9f\xf6\xd3\xd6\xda\x1b\xf2\x45\xd9\x4a\x2e\x45\x27\x35\x1a\xcf\xfc\x94\x26\x54\x61\xca\x6d\x29\x3a\x5b\x92\xbd\x3e\x6a\xbf\xb1\xc3\xab\xdf\x45\xb0\xa1\xf2\x39\x82\x3d\xcb\x3f\xd0\xfe\x25\x0f\x6e\xb3\x7f\xba\xfe\x9e\x0e\x47\xd6\x31\xbc\xb7\xc3\xcb\xdc\x61\xdf\xad\x52\xe3\xde\x0e\xef\x81\x6b\x27\x9f\x5a\x43\xf8\x9b\xd6\x30\x6e\xcb\x1a\x1e\xbb\x70\xf3\xf9\xed\x28\xa9\x0b\xef\x7f\x7a\xb0\xb3\x92\x72\x4d\x94\x68\x59\x4a\x0d\x8b\xc5\x86\x17\x76\x56\x53\x6c\x66\xc5\x60\x9a\x8b\x96\xc9\x82\x10\x0a\xac\x6b\x78\x15\x8a\xca\xb2\x98\x1b\xaf\x28\x20\xd3\x79\x2f\x5b\xe8\xf4\x33\x15\xb3\x96\xcd\xe9\x83\x81\x6e\x34\x05\xce\x2e\x37\x4e\x09\xe3\x6d\xa9\xd4\xda\x42\xbc\xbf\x27\xf0\x5b\xec\x6b\x94\x9e\x32\xa5\xb4\xd6\x0a\x4e\x4d\x0f\x0a\x83\x76\xd2\x52\x0a\xab\x4e\xcb\x88\xe6\x8b\xca\xd0\xcc\x37\x2c\xab\x94\xcd\x17\x7b\x01\x3f\xdb\x7a\x71\x57\xb0\x3b\xdb\xc6\xf2\xd1\x60\x53\x77\xfc\x04\x3b\xd5\xc3\xe7\x82\xcd\xe3\xec\x5f\xf2\xe0\x36\xfb\xa7\xeb\xff\xc6\x60\x73\xb9\x60\xf8\x61\xfb\x41\x49\xbf\x67\x8a\xe4\xde\x85\xbb\xcb\x05\xcb\x4f\x2d\xdc\xfd\x4d\x0b\x87\x37\x05\xbb\x07\xaf\x96\x7e\x3e\xd8\x5d\xd6\xec\x4f\x0f\x76\xac\xe8\xf5\x4a\x0a\x0b\x4c\xd2\x9a\xf4\x91\x2a\x75\xbd\x3a\x02\xe5\x44\xad\x32\xae\x35\x5b\x64\xe0\xfb\xa9\xbc\x9c\xfa\x83\xad\x62\x24\x87\xa9\x0a\xd6\x46\xf3\x80\x14\x4c\x75\x39\x6d\xf0\xdc\x74\x1e\x15\x48\xb1\xcf\x57\x11\xab\x0c\xfd\x25\x4f\x2f\x3b\xeb\x4c\x9f\xe4\xf7\x4f\xd7\xf9\x16\xfb\x0a\xe6\xfd\x00\x9b\xde\x28\x5b\xca\xda\xab\x6d\xb8\x84\xad\x30\x5c\x0d\xd3\x78\x39\x9b\x57\x78\xa2\x42\xf2\x7a\xb4\xda\x14\x07\x9e\x2e\x4e\x33\xf5\x77\x05\xbb\xb3\xa9\x9c\x8f\x06\x9b\x5a\xaa\x21\x52\xa7\x7a\xf8\x5c\xb0\x79\x9c\xfd\x37\x0c\xdd\x1c\xec\xfe\xc6\x60\x73\xb9\x4a\xff\x61\xfb\xeb\x42\xdf\xbe\x37\xd8\x3d\x70\x97\xc0\xa7\x56\xcb\xff\xa6\xd5\xfa\x9b\x82\xdd\x83\xb7\x28\x7c\x30\xd8\x95\x4e\x95\xf4\xea\xf9\x54\x17\x20\x3e\x3c\xd8\x05\x96\x95\xef\xcf\x92\x5a\xb6\xda\xaf\xce\xc7\x05\x67\x55\x5c\x36\xc2\x41\xbc\xa3\x5a\x83\x5e\x38\x6a\x95\xea\x54\x2f\x37\x96\xd5\xc5\x42\x24\xdc\x26\x59\x75\x14\xe8\x54\x9a\xad\x74\x32\x3f\xab\x55\xe7\xfd\x36\x03\x23\xd9\x59\xf0\xae\x9c\x96\x38\x24\xdd\x81\xde\xae\x73\xfc\x14\x90\xe2\xf1\x45\xa6\xa9\xc8\x9c\xe1\xb7\x57\xc5\x5a\x3e\xbe\xf4\x72\xa2\xcf\x72\xe1\xb2\x3f\x9b\xe0\x49\x05\xcf\xb6\xbd\x45\x45\x1b\x22\xbd\xf3\x98\x34\xf6\x7c\x38\xf6\xd1\x60\x53\x19\xeb\x68\x7e\xaa\x87\x4f\x05\x9b\x07\xda\xbf\xe4\xc1\x4d\xf6\xcf\x82\xdd\xdf\x18\x6c\x2e\xb7\xc6\x7c\xd8\xbe\x9a\x85\xd9\x7b\x83\xdd\x03\xb7\xe6\x7c\x6a\xc9\xfe\x6f\xda\x22\x73\x53\xb0\x7b\xf0\xbe\xa0\xcf\x8f\xec\x5e\x3d\x11\xee\x02\xc4\x87\x07\xbb\x65\x16\xe6\xd3\xbe\x35\xcc\xa7\x86\xb2\x39\xb0\x41\x4a\xb3\x5d\xab\xec\x2e\xdb\x28\xb0\xa7\x9e\x52\x88\x86\xc1\x34\xb2\x70\xae\x39\xea\x84\x73\xbf\x5c\x13\x1b\x1b\x05\xb4\xb7\x88\xa7\x9c\xed\xdc\xd4\x36\xb8\xd5\x74\x95\x04\xae\xa9\x66\xbf\x8f\x27\x5d\xd9\x06\xd8\x4f\x1e\xd3\x58\x32\x6e\x0a\xb6\xcd\xcd\xaa\x4e\xbb\x35\xd8\x8c\xe2\x05\x10\x5f\x93\xc1\xdc\xc0\x9b\x2a\xd6\xd4\x20\x30\xba\x85\xfc\x1a\x26\x5a\xc9\xf5\xe9\x91\xad\x77\x05\xbb\x3b\xe6\xcc\xca\x69\x87\xc8\x53\x3d\x7c\x2a\xd8\x3c\xd0\xfe\x25\x0f\x6e\xb2\x7f\x16\xec\xfe\xc6\x60\x73\xb9\x3f\xe7\xc3\xf6\x71\xba\x5d\xbc\x37\xd8\x3d\x70\x7f\xd0\xd5\x47\xb6\xff\xcc\xfd\x49\xf3\x76\xe0\x7e\xd2\xfe\x4d\xc1\xee\xc1\x9b\xf1\x3e\x1f\xec\x5e\x3d\x83\xf1\x02\xc4\x87\x07\xbb\x75\x56\x1d\x50\x5b\xe3\x63\x37\xd9\x5b\x75\xeb\x0e\x6f\x89\x02\x4d\x98\x75\x28\x03\x90\xad\xa5\x94\xe9\x3a\x01\x69\x26\x1e\xcf\xa5\xe3\x22\x59\x0b\x8a\x35\xe0\xbb\x4a\xa7\x59\x69\xd7\x3b\xa4\x6b\x4e\xcb\x8c\x67\xc6\x75\xd6\xca\x35\x69\x7c\x91\xf4\xe7\xf1\x09\x57\xb9\x7d\x4c\x63\xa7\x5e\x56\xf8\xa5\xf5\x12\xf4\x6d\x7b\x63\x98\x79\x20\x93\x61\x12\xe8\x21\x99\x8b\x74\x7c\x66\xce\x50\xc9\xea\x6c\x35\x5f\x77\xf0\x63\xd2\xd8\x7b\x82\x4d\xd1\x2d\xea\xf7\x06\x9b\x07\xda\xbf\xe4\xc1\x4d\xf6\xcf\x82\xdd\xdf\x18\x6c\xee\x0d\x76\xd9\x54\x72\x53\xfd\x3b\x83\xdd\x45\xb0\x79\xe3\xbe\xa6\x9f\x17\xec\x26\xbd\xca\xe8\x93\xf6\x6f\x0a\x76\x0f\xde\x01\xfb\x5f\x7f\xbf\xec\x21\x20\x15\xb8\xd9\x29\x0d\xca\xed\x6c\x47\x1f\x8f\x73\xdd\x12\x5d\x98\xea\xb6\x2e\xad\x2d\xd8\x14\x06\x33\x09\x15\x16\x3a\xe3\x74\x8b\x6c\xf9\x83\xd2\xd8\x3b\x82\x4d\x21\xb3\xed\xdf\x1b\x6c\x1e\x68\xff\x8d\x25\xb8\xff\x44\xb0\xbb\x37\xd8\xdc\x1d\xec\xf2\x89\x7a\xf3\xef\x0c\x76\x17\xc1\xe6\xf2\xe6\xed\x8f\x96\xff\xc3\xf6\xc5\x60\xc9\x3f\x69\xff\xdd\x9d\xe7\xef\xbc\x19\xfe\xa3\xdb\xcf\x2f\xde\x0e\xff\x14\x47\xe1\x59\x76\x7b\x78\x8f\xed\xa9\xad\x5e\x09\xc4\x6f\x6b\x79\x1e\x36\x1e\xde\x7e\xfb\x4a\xcb\xcb\x57\xde\x9f\xbd\xc0\xfd\xfc\xf3\xf7\xd9\x44\x3c\x17\xf3\xf4\x06\xf9\x1f\x14\xd3\x2a\xb5\xd2\x8d\x23\x2a\xd5\x4a\xa9\x7f\xae\x71\xff\x70\x14\x2b\x95\x3a\x7f\x1f\xfd\xa5\xc1\x58\xad\x91\x2f\x5b\x8d\x7e\xac\x98\xee\xc7\x7e\x3d\x9e\x75\xf8\xb7\xd8\x4e\xc2\x23\x53\xf1\x96\xfb\xe1\xf3\x87\xc7\x3a\x1e\xbe\xe3\x75\xf8\x9e\xcb\xaf\x1c\xa5\xc4\xdb\xfd\x7b\x90\x7b\x94\x78\xd7\x3c\x7b\x32\xf0\xd2\x29\xcf\xe7\xe2\x8a\x47\x2f\xdf\xca\xf3\xe2\xdb\xf7\xc3\xb7\x50\xcc\xbf\x3f\xc4\xdd\x17\xca\xaf\x39\xfe\x8e\xf5\x58\xbb\x92\xaf\xb7\xd3\xb1\x5f\x9f\x7f\xfd\x50\x49\x1e\x83\xf7\x07\x0b\xf0\xba\x0e\x8e\xe7\x49\x38\x7a\xe5\xfd\xe1\x05\x33\xc7\xff\x1e\xe4\xef\x41\xd9\x35\x47\xcf\xcc\xbc\xf4\x70\x7f\xe2\x0a\x4d\xf6\x6f\xac\x39\xfc\x7d\x90\x73\x7b\x5d\xd7\x7c\x3b\x19\x79\xe9\x9a\x33\xfb\x16\x9b\xf9\x41\xf4\xda\xb7\xb3\x17\xe0\xbc\xf8\xf2\x28\x4f\xcf\x54\x5e\x75\xf8\xd2\xe4\xb5\x4a\xbf\xe6\x75\xb8\xa0\xc7\xff\x1e\xe7\x69\xb8\xa0\x6f\xf8\xf8\x64\xe6\xa5\x77\x81\x08\xaf\x54\xf7\xe9\x8d\x3d\x67\x1f\x1f\xe4\xe4\x49\xe1\x35\x47\x2f\xcc\xdd\x14\xc3\xce\x1e\xbe\x7d\xfa\xf8\x20\x6f\x4f\x0a\xaf\x79\x7b\x61\xee\xa5\xb7\xf3\x50\x44\x57\xdb\xfa\xd9\x93\xc0\x4e\x1f\x1f\xe5\xef\xb3\xc2\xab\xfe\xbe\x34\xf7\xd2\xdf\xfd\xef\x57\xfb\xd7\xd3\xf3\x54\xce\x3e\x3e\xc8\xe1\x93\xc2\x6b\x0e\x5f\x98\x7b\x73\x68\xe0\x84\xe1\x42\x04\xdf\x62\x24\x0c\x45\xc4\x7c\x7e\xa5\x0c\xe7\x37\x6e\x9e\x7f\x79\x54\x39\xce\x54\x5e\x2d\xc9\xa5\xc9\x6b\x51\x22\x14\xf3\x6f\xb1\x68\xed\x78\x5c\xac\xaf\x94\xe0\xe4\xfe\xa3\x7d\x7f\xd7\xf1\xbb\xbc\xbe\x18\xa2\xbe\xfc\xfa\x20\xff\x5f\x2a\xbd\x56\x88\x2b\x66\xdf\x2c\xc9\x51\xf6\x7a\x71\x8e\x9c\xa3\xc4\x25\x1e\x3b\x3d\x5f\x28\x5f\x49\xa5\x7b\x3f\x70\xdb\x6e\xa4\xad\x56\xfa\x20\x7a\xa9\x27\x56\xad\x9c\x46\xb1\xed\x66\xbe\x92\x8d\xd1\x28\x10\x22\xf6\xeb\x51\xe2\xb7\x58\x37\x97\x6e\xa4\x9f\xbf\xc7\xfe\xfd\x57\x4c\x79\x7e\x12\xe5\xeb\x81\xa6\x08\xa3\x43\x5f\xbe\x2b\xc5\xa7\xbd\x7c\xa9\x66\xe7\xe4\x71\x9c\xf2\xc2\xc5\x50\xb8\xae\xe3\x0d\xf7\x6d\xef\x5b\x8c\x2e\x36\xa7\x2f\xb3\xc0\x61\xaf\xdb\xe2\xae\x1a\xe8\x26\x14\xf3\x4f\x3b\xf6\xac\x61\xe7\xd3\xa9\x61\xbc\x70\xeb\xed\xa1\xe2\x4e\x5c\x0a\x71\xbf\x0b\x4f\x4a\x0e\x5e\x9c\x05\x98\x1b\x1d\x39\x9e\xb9\xcf\x91\x73\x25\x3b\x47\x5e\x0e\x84\x6f\xf4\x24\x64\x33\xe1\x2d\xef\xf4\xe4\x5c\xc9\xce\x93\xb3\x57\x9c\xdc\xee\xc6\xb1\x43\xbd\xdb\x93\x73\x3d\x47\x67\x9e\x46\x09\x2f\x9d\x21\x61\xf4\xb6\x43\x67\x81\xe3\x3e\x8f\x2e\x15\xed\x5c\xba\x88\x8d\x3f\xc4\xa8\xe6\x87\xd1\x30\x10\xcd\x7a\x29\xf6\xf4\x10\xd9\x18\x5f\x4c\x67\x31\xe6\x4f\x67\xae\x88\xc4\xde\xec\xff\x1f\x00\x00\xff\xff\x6f\xb3\xb1\x3d\xfb\xaf\x00\x00") + +func operation_fee_stats_3CoreSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_3CoreSql, + "operation_fee_stats_3-core.sql", + ) +} + +func operation_fee_stats_3CoreSql() (*asset, error) { + bytes, err := operation_fee_stats_3CoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_3-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3d, 0xdd, 0xf, 0x58, 0x32, 0xe8, 0xc1, 0x6c, 0xc9, 0xc7, 0x4, 0x1b, 0xd3, 0xd0, 0xa0, 0x27, 0xfd, 0x2, 0xba, 0x15, 0xbe, 0xe5, 0xe, 0x92, 0x13, 0x93, 0xa7, 0x7e, 0xec, 0xaa, 0x98, 0x92}} + return a, nil +} + +var _operation_fee_stats_3HorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x79\x8f\xe2\xb8\xb6\xff\x7f\x3e\x45\xd4\x1a\xa9\xba\x45\xf5\x90\x7d\xe9\x79\x7d\xa5\x00\x61\xdf\x77\x18\x8d\x90\x93\x38\x90\x22\x5b\x25\x61\xab\xab\xf7\xdd\x9f\x08\x5b\x48\x25\x10\x08\x55\x7d\x9f\xee\xa0\x51\x0f\xc1\xf6\xd9\x7c\x8e\x7f\xf6\xb1\xe3\xfa\xfe\xfd\xb7\xef\xdf\x91\xa6\xe9\xb8\x53\x1b\x76\x5a\x55\x44\x06\x2e\x10\x81\x03\x11\x79\xa1\x5b\xbf\x7d\xff\xfe\xdb\xb6\x3c\xb7\xd0\x2d\x28\x23\x8a\x6d\xea\xa7\x0a\x4b\x68\x3b\xaa\x69\x20\xdc\x1f\xf4\x1f\x98\xaf\x96\xb8\x41\xac\xe9\x64\xdb\x3c\x50\xe5\xb7\x8e\xd0\x45\x1c\x17\xb8\x50\x87\x86\x3b\x71\x55\x1d\x9a\x0b\x17\xf9\x89\xa0\x7f\x7a\x45\x9a\x29\xcd\xdf\xff\x2a\x69\xea\xb6\x36\x34\x24\x53\x56\x8d\x29\xf2\x13\x79\xea\x75\xf3\xec\xd3\x9f\x07\x72\x86\x0c\x6c\x79\x22\x99\x86\x62\xda\xba\x6a\x4c\x27\x8e\x6b\xab\xc6\xd4\x41\x7e\x22\xa6\xb1\xa7\x31\x83\xd2\x7c\xa2\x2c\x0c\xc9\x55\x4d\x63\x22\x9a\xb2\x0a\xb7\xe5\x0a\xd0\x1c\x78\xc6\x46\x57\x8d\x89\x0e\x1d\x07\x4c\xbd\x0a\x2b\x60\x1b\xaa\x31\xfd\x73\x2f\x3b\x04\xb6\x34\x9b\x58\xc0\x9d\x21\x3f\x11\x6b\x21\x6a\xaa\xf4\xbc\x55\x56\x02\x2e\xd0\xcc\x6d\x35\xbe\xda\x15\xda\x48\x97\xcf\x54\x05\xa4\x94\x47\x84\x61\xa9\xd3\xed\x20\x8d\x7a\x75\xb4\xaf\xff\xc7\x4c\x75\x5c\xd3\xde\x4c\x5c\x1b\xc8\xd0\x41\x72\xed\x46\x13\xc9\x36\xea\x9d\x6e\x9b\x2f\xd5\xbb\xbe\x46\xe7\x15\x27\x92\xb9\x30\x5c\x68\x4f\x80\xe3\x40\x77\xa2\xca\x13\x65\x0e\x37\x7f\x7e\x06\x43\xc9\xfb\xf6\x19\x2c\xb7\x7e\xf5\x79\x0a\xee\xb8\xdd\xae\xdd\x4e\xc0\xad\x23\x5f\x62\xe6\xab\x75\x22\xee\x55\x2f\xd5\x73\xc2\xd0\x57\x73\x4f\xd6\x93\x6a\x02\x15\x05\x4a\xae\x33\x11\x37\x13\xd3\x96\xa1\x3d\x11\x4d\x73\x7e\xb9\xa1\x6a\xc8\x70\x3d\xf1\x29\x67\x38\xc0\x73\x74\x67\x62\x1a\x13\x55\xbe\xa5\xb5\x69\x41\x1b\x1c\xdb\xba\x1b\x0b\x26\x68\x7d\x92\x24\x91\x14\xb7\xb5\xd5\xa0\x3c\x85\xb6\xd7\xd0\x81\xaf\x0b\x68\x48\x37\xa9\xe0\x6b\x6e\xd9\x70\xa9\x9a\x0b\x67\xff\xdb\x64\x06\x9c\xd9\x9d\xa4\x92\x53\x50\x75\xcb\xb4\xb7\xe1\xb8\x1f\x53\xef\x25\x73\xaf\x2d\x25\xcd\x74\xa0\x3c\x01\xee\x2d\xed\x0f\xce\x7c\x87\x2b\xed\xe3\xf2\x0e\xa1\xfd\x2d\x81\x2c\xdb\xd0\x71\x2e\x37\x9f\xb9\xb6\xec\xe1\xce\x44\x33\xcd\xf9\xc2\x8a\x51\xdb\xba\x26\xd2\xae\x16\x50\xed\x1b\x09\x1f\x06\xdd\xd8\x0d\xb6\xe3\x84\xa2\x40\x3b\x5e\xd5\x03\xf9\x3b\x9a\xec\xcd\x1a\xaf\x91\x37\xb4\xde\xc0\xc4\x3f\x14\x5f\x6b\x61\x6d\x1b\xcc\xdc\xab\x3d\xe0\x9c\x0d\x40\xe2\xe6\xaa\x1b\xcd\x8e\x91\x1e\xa7\xb2\xb9\x93\xc3\xbc\x5a\x51\x75\xdc\x89\xbb\x9e\x58\xd7\x49\x6e\x6b\x9a\x56\xdc\x9a\x30\x6e\xb5\x03\x94\x5c\xae\x2c\x1e\xc2\xfd\x6a\xb5\xeb\xa3\x98\xb8\x89\xd7\x99\x3b\x8c\xdc\x5a\xdb\x71\x16\xd7\x38\x1f\x2b\x4b\xa6\x0c\x6f\x9c\x17\x1c\xdd\xc0\x02\xb6\xab\x4a\xaa\x05\x8c\x8b\xe0\x7d\xad\xe9\xc4\xba\x71\x6e\x72\x44\xb4\x5b\x25\x08\x6f\x78\x33\x7f\xcf\x78\x71\xf8\xed\x2a\x7e\x38\xfd\x5d\x67\x6e\x7b\x72\xff\x75\x8b\x0f\x87\xa9\x9f\xe7\x0c\x93\x98\x12\x4c\x4d\xdb\x9a\xe8\xea\x74\x3f\x61\xb8\x20\x42\xa0\x66\x6c\x1d\x6f\x9f\xef\x5d\xa2\x1c\xd7\x39\x77\xad\xb3\x8d\x6a\xaf\x56\x47\x54\x79\xc7\x39\x27\xe4\xf9\x5e\xb5\x1b\x93\x76\x84\xd3\x3d\x80\xf2\xbe\xbb\x2f\x53\xf2\x9e\xe2\xab\x7f\x40\xe9\x8e\xd0\xea\x09\xf5\xec\x1d\x36\xdb\xce\xb3\x1d\xf8\x7a\x33\xe7\x33\x22\xb1\x5b\xcb\x30\x66\xdd\xd3\x6c\x36\xb6\x86\x11\x51\x7f\x8b\x7e\xe1\x24\xe2\xb5\xdd\xcf\xfb\xe2\x55\xde\x4f\xf2\x62\xeb\xb6\x1f\x01\x6e\xd1\x65\xd7\x24\x66\xdd\xfd\xf4\x2f\xbe\x3c\x87\xf9\x62\x1c\x89\x02\x63\xc8\xe5\xca\xbe\x21\x61\x5f\x91\x2f\x14\xda\x42\x81\xef\x86\x54\xd6\xd5\xed\x8a\x43\x95\xe0\x57\x63\xa1\x43\x5b\x95\xfe\xfa\xfb\x5b\x8c\x56\x60\x7d\x47\x2b\x0d\x38\xee\x57\x60\x6c\xa0\xe6\xa5\x62\x62\xb4\x50\x54\x3b\xb4\x49\xbe\x57\xcf\x76\x4b\x8d\xfa\x05\x7d\x26\x60\x3a\x3d\x49\xf7\x8c\xbc\x13\xf4\x02\x8d\x83\x76\x09\x68\x6c\x75\xf5\x9a\x9f\x84\x7f\x46\x6e\x51\xc4\x53\x3d\x06\x05\x61\xd8\x15\xea\x9d\x00\x09\xcd\x9a\x3a\xaf\xda\xc1\x17\xb3\x45\xa1\xc6\xbf\xe3\xf0\xe7\x6f\xbb\x2c\x5c\x1d\xe8\xf0\xc7\xe1\x37\xa4\xbb\xb1\xe0\x8f\x7d\x93\x3f\x91\x8e\x34\x83\x3a\xf8\x81\x7c\xff\x13\x69\xac\x0c\x68\xff\x40\xbe\x7b\xc9\xb9\x6c\x5b\xd8\xf6\xd7\x9e\xf2\x81\xde\x6f\x67\x14\xcf\x0b\xf7\x84\xb3\x8d\x5a\x4d\xa8\x77\x2f\x50\xde\x55\x40\x1a\xf5\x73\x02\x48\xa9\x83\x3c\x1d\xd2\x6e\x87\xdf\x1c\x8f\xc8\x53\x90\xf3\x41\xfd\x3d\xcf\xa3\x85\xae\xea\x73\x66\xcb\x7a\xa3\x1b\xb0\x27\x32\x28\x75\x8b\x47\xb1\xfc\xf9\xb7\x33\xf6\x27\x2a\x01\x41\x6e\x51\xfe\x1d\x11\xcf\x00\xcd\x6a\xda\x9a\x76\x5a\x55\xc4\xb2\x4d\x09\xca\x0b\x1b\x68\x88\x06\x8c\xe9\x02\x4c\xa1\x67\x86\x98\xf9\x42\xbf\xb8\xd7\x1d\x6d\x2f\xfe\xc1\x57\x4f\xf2\x1f\xfa\x36\xcc\x96\x47\xcf\xbe\x4a\x1f\x69\x0b\xdd\x5e\xbb\xde\xf1\xfd\xf6\x1b\x82\x20\x48\x95\xaf\x17\x7a\x7c\x41\x40\x3c\xed\x6b\xb5\xde\x6e\xbc\xeb\x74\xdb\xa5\x6c\xd7\xab\xc1\x77\x90\xdf\x27\xbf\x23\x1d\xa1\x2a\x64\xbb\xc8\xef\xd8\xf6\x29\xd8\x1b\x57\x03\x31\x99\x76\xd7\xc8\x3f\x4c\x39\x3c\x4c\xb9\x38\x23\x55\x32\xfd\x62\x70\x38\xaa\x78\xfc\xe9\x2e\x0d\xbf\xfe\x86\x20\x59\xbe\x23\x20\x83\xa2\x50\x47\x7e\xc7\xfe\xc2\xfe\x4e\xff\x8e\xfd\x85\xff\xfd\xaf\xdf\x71\xef\x3b\xfe\x17\xfe\x37\xd2\xdd\x15\x22\x42\xb5\x23\x6c\x8d\x22\xd4\x73\xdf\x42\x2d\x13\x03\x07\x12\x5a\xe6\x3a\x87\x8f\xb6\xcc\xff\xdc\x63\x99\xf7\x98\xba\xb7\xc3\x11\x87\xe3\x19\xe2\x04\xdb\xef\x28\x7a\x12\x23\x48\x67\x6b\x2b\xe4\xe7\x69\x04\x78\xde\xfd\xdc\x1d\x35\x05\xe4\xa7\x3f\x22\xbe\x85\x45\xed\x43\x65\x0c\x12\x0c\x88\x78\x08\xe3\xf8\x12\x86\x4e\x81\x92\x4a\x19\x46\x34\x20\xe9\x59\x40\x9e\x8b\x7b\xf2\xb2\xf7\xd2\x86\x4d\xf3\x12\x4b\x1b\x42\x34\x28\xad\x3f\x48\x2e\x4a\xbb\x45\x2e\x19\x2a\x60\xa1\xb9\x13\x17\x88\x1a\x74\x2c\x20\x41\xe4\x27\xf2\xf4\xf4\xe7\x79\xe9\x4a\x75\x67\x13\x53\x95\x7d\x5b\x69\x67\xba\xfa\xe7\xbf\x7b\x15\xbd\x00\x8b\xa7\xde\x2e\x16\xfd\x8b\xef\x9d\x46\xaa\x8c\x88\xea\x54\x35\x5c\x6f\x62\x50\xef\x55\xab\x3b\x75\x80\xbe\x9d\xc6\x23\xd2\x0c\xd8\x40\x72\xa1\x8d\x2c\x81\xbd\x51\x8d\x69\xa0\x9a\xb1\xd0\x8f\x53\x7e\x44\x35\x5c\x38\x85\x76\xa0\x8a\xa2\x81\xa9\x83\x38\x3a\xd0\xb4\xf7\x6c\x5c\x53\xd7\xde\x33\xf9\x8a\x53\xd4\xb7\x63\xcd\xf7\xdd\x1e\x5c\x37\xdc\x6b\x8e\x60\xb6\xe3\x68\x12\x17\xae\xdf\x19\xc4\xb2\x34\xd5\xcb\xd9\x23\xae\xaa\x43\xc7\x05\xba\x85\x6c\xfb\xcc\x7b\x44\xde\x4c\x03\xbe\x17\x34\x6a\x55\x74\x98\x8f\xee\x97\x53\xf1\x64\x3e\x2e\xbe\x22\xa8\xee\xdd\x90\x6f\x77\x77\x33\x3a\xcc\xfb\xa1\x54\xcf\xb6\x05\x6f\xfa\x95\x19\xed\x7f\xaa\x37\x90\x5a\xa9\xde\xe7\xab\x3d\xe1\xf8\xcc\x0f\x4f\xcf\x59\x3e\x5b\x14\x10\xec\x9a\x32\x77\x9b\x3d\x48\xe8\x9d\x2b\xee\x93\x1e\x88\x01\xd7\xee\x12\x68\x5f\x9f\x22\x34\x7e\xfa\xf1\xc3\x86\x53\x49\x03\x8e\xf3\x2d\xd8\x5d\xbb\xbd\x8a\x10\xdf\xa2\xc9\x6f\x17\x3a\x6a\xb7\x36\x4e\xac\xd9\x2e\xa3\x73\xd4\x2b\x3c\x32\x4e\xb9\xba\x70\x31\x43\xab\x4b\xa6\x1c\x56\x1d\xc3\xc3\xab\xef\xd2\x7f\x21\x0d\x28\xfa\x52\x84\x85\xa7\x17\x1e\xe4\xb6\x7e\x9a\x9f\xe6\xb4\x97\x14\x41\x1a\x83\xba\x90\x43\x32\xa3\x2b\x1a\xed\x32\x74\x97\x15\x3a\xd2\x0a\x14\xff\xa1\xca\x51\xb2\x1d\x72\x3e\x49\xbd\x6e\x4f\x67\xef\x76\x81\x98\x99\x44\x8d\xf4\xef\x53\x5c\x51\x35\xbf\x78\x1b\x1f\x5f\x22\xbc\xd9\xf3\xe3\xf0\x22\x19\xba\x40\xd5\x1c\xe4\xc5\x31\x0d\x31\xda\xd9\x0e\x89\xb2\xa4\x76\xd8\xd3\xd9\xdb\xe1\xb0\x6f\x1d\x21\x9b\x6f\x33\x39\x56\x14\x86\xed\x63\x87\x37\xdc\x9b\xc5\x97\x19\xf5\x3a\xe2\x28\xc7\x61\x94\x43\x03\x1c\x4e\x1d\x11\xaf\xfe\x71\x33\x39\x00\x4c\xe6\xc2\x3d\x61\x53\xb0\x8d\x0d\x81\x7b\xb5\xd1\xae\xee\xc2\x92\x63\xd7\x3d\xba\xce\xfe\x31\xb0\xcf\xfe\x4e\x17\xec\xdd\x7c\xc0\x05\xda\x44\x32\x55\xc3\x09\xf7\x41\x05\xc2\x89\x65\x9a\x5a\x78\xa9\xb7\xf3\xa9\xc0\xa8\xbe\xf6\x8a\x6d\xe8\x40\x7b\x19\x55\x65\x3b\x0f\x75\xd7\x13\x6f\x9a\xa4\xbe\x45\xd5\xb2\x6c\xd3\x35\x25\x53\x8b\xd4\x2b\xd8\x47\x07\x67\x81\x40\x86\xb6\x37\xbd\xd8\xfd\xee\x2c\x24\x09\x3a\x8e\xb2\xd0\x26\x91\x8e\xb2\x57\x1c\xa8\x1a\x94\xa3\x6b\x45\x87\x55\x44\xee\x3a\x69\x94\x45\xec\x87\x5c\xc1\xbc\xf8\xa3\xcd\xf5\xf1\xeb\x56\x95\x1f\x0b\x63\x17\x79\x7c\x16\xac\xdd\xa4\x68\x42\x98\xbb\xc8\xeb\x3d\xec\x85\x57\xbf\x00\x83\xbe\x9d\x9d\x87\xf9\xe6\xb5\x65\xce\xf9\xa9\xaa\x88\xa5\xd0\x76\xe6\x2f\xed\x54\xf1\x10\x30\x21\x00\xee\x23\xdf\x5c\xd8\xd2\xf1\x98\x46\x04\xf4\x1c\x86\x93\xa7\xa7\x1f\x3f\xa2\x97\x62\xd1\x71\xb0\xdf\x58\x4b\x6a\xce\xfd\x59\xc0\xaf\x0f\x9d\x2f\xec\x87\xc4\x7b\xd0\xcb\x3b\x0b\x13\xc9\x36\x70\x12\xf1\x52\xa5\xfd\xe1\xc8\x4b\x55\x76\xeb\xe0\xd0\x0a\xef\xcf\x74\x5e\xa9\x77\x91\xdd\xb1\xd6\x05\x8e\x9e\x48\xaa\x33\x71\xa0\xa6\x41\x1b\x11\x4d\x53\x83\xc0\x38\x60\x92\x2a\xc1\x89\x71\x86\xbf\xbb\xdf\xce\x31\xf9\x74\x9a\x68\x12\x40\xeb\xb3\xf3\x4c\xc1\x42\xdf\x36\x7d\xe8\xc9\x4f\x4f\xea\x89\x77\x36\x18\xc9\x16\x85\x6c\x05\xf9\xfa\xd5\x6f\xc1\x7f\x21\xe8\xb7\x6f\xd7\x48\x85\x35\x3f\x18\xed\x7f\xde\xd9\x31\x06\xbd\x33\x9b\x06\xc8\x07\x0c\xee\x09\x78\x31\x94\xc2\x77\xb8\x1f\x10\x5c\xe1\x67\x16\x62\x22\x69\x9c\x21\x2c\x09\x96\x5e\x3b\x1f\xf0\x18\x34\xbd\xc2\xe5\xb3\xf0\xf4\x46\x65\x13\x22\xea\x15\x6e\xef\x31\x35\xaa\xc1\x05\x54\x3d\x3b\x13\xf2\x40\x5f\x3d\xf8\xa7\x5f\xa4\xd8\x8b\xa8\xfd\xd8\x7f\x65\x69\x16\x17\x78\x2f\x63\x68\x68\xdd\x13\xeb\xd0\x78\xd9\xae\x02\xa2\x97\x11\x51\x0b\xb4\x5f\xb2\xc4\x72\xd7\x13\x68\x2c\xa1\x66\x5a\x30\x2c\x6d\xe9\xae\xb7\x0b\x9e\x85\xe6\x46\x14\xea\xd0\x05\x11\x45\xdb\xa5\x56\x54\xb1\xa3\x4e\x0d\xe0\x2e\x6c\x18\x96\x61\xe3\xe8\x6f\x7f\xfd\x7d\x9a\xbb\xfc\xfb\x7f\xc3\x66\x2f\x7f\xfd\x1d\xb4\x39\xd4\xcd\x88\x64\xd8\x89\x96\x61\x1a\xf0\xe2\x5c\xe8\x44\xeb\x3d\x99\xbd\x66\xaa\x0e\x27\xa2\xb9\x30\x64\x2f\x63\xcd\xda\xc0\x98\xc2\xe0\x6a\xec\x1c\x5a\xb7\x96\xd8\x52\x9b\x42\xf9\x7c\x51\x66\xc0\xd5\x24\xe0\x2b\xd7\x72\x69\x88\x2a\x1f\xc2\xf0\x70\xb6\x2b\xce\xd8\xb1\x8b\x43\xef\x20\xdd\x95\x63\x63\x1d\xa1\x7b\x21\x81\xea\x4f\x55\xf9\xd3\xa7\xb7\x2d\x30\x1e\xa7\x44\xcc\x53\x75\x17\x95\xba\xb8\x30\x89\xa3\x64\x24\x04\x3f\x4c\xcd\xd8\x07\x13\x2f\x2a\x7a\x05\x2f\xc2\x55\xcd\x01\x17\x20\x8a\x69\x5f\xd9\x46\x42\x72\x7c\x97\xbf\xa2\x5e\x04\xc9\x4b\xdb\x31\x71\xc8\x96\xea\x1d\xa1\xdd\x45\x4a\xf5\x6e\xe3\xdd\x96\x8c\x87\xdc\x1d\xe4\xeb\x13\x36\x51\x0d\xd5\x55\x81\x36\xd9\x1d\x8f\xf9\xc3\x79\xd5\x9e\x9e\x91\x27\x1c\xc5\xb8\xef\x28\xfd\x1d\x25\x10\x8c\xfd\x81\xb3\x3f\x48\xe6\x0f\x94\xc0\x49\x8e\x4e\xa1\xf8\xd3\xb7\x3f\xe3\x51\xc7\x27\xbb\xd7\x1e\xce\xac\x2a\x6e\x26\xae\xa9\xca\x97\x39\x71\x34\xc5\xdc\xc2\x89\x98\x2c\x1c\x78\x84\x9f\x89\x6a\xbc\x7b\xd5\xe2\x22\x3f\x92\x44\x49\xf6\x16\x7e\xe4\x04\xc8\xf2\x24\x98\xb0\xba\xc8\x83\x22\x29\x02\xbf\x85\x07\x35\xd9\x81\xdd\x61\xda\xed\x6d\x74\x5e\x64\x41\x13\x28\x7e\x93\x1a\xf4\x81\xc5\x7e\x04\x8b\xc1\x82\x25\x31\xea\x16\x16\xcc\x44\x37\x65\x55\xd9\xc4\xd7\x82\xc5\x68\xfc\x26\x16\xec\x99\x16\xfb\xf3\xcd\x31\xf8\x30\x24\x4d\xdc\xc6\x67\xdb\xe9\x60\x3a\xb5\xe1\x14\xb8\xa6\x7d\xd9\xa7\x38\x14\x43\xb9\x5b\xc8\x73\x1e\xf9\x5d\x32\x73\xb2\x96\xed\xcb\xd4\x71\x06\xbb\xa9\xab\x31\xd4\x23\xbf\xef\x05\x6f\x09\x7b\x99\x01\xc5\x31\x37\x59\x07\xc3\xfc\x0c\x8e\x6b\xa2\xed\x00\x70\x99\x11\x47\x73\xb7\x69\x82\x9f\x75\xf4\x7e\x15\xba\x7b\xa3\xf6\x12\x27\x0c\x65\x28\xf2\xa6\x1e\xc1\x88\x9d\x3a\xc7\xb5\xfb\xc5\x1e\xc7\x30\x9c\xa1\x6f\xd3\x84\x9c\x28\xea\xfa\xf0\x76\x81\xa9\x6b\x13\x45\x85\xda\xc5\xa1\x11\xc3\x28\x0c\xbb\x69\x10\xc6\xa8\xc3\xa6\xca\x21\xd9\xbd\xbe\xa2\x06\xcd\xdc\x36\xcc\x63\xf4\x44\x35\xa6\xd0\x71\x27\xef\xd3\xe9\x57\x58\x31\x1c\x7b\x5b\x8f\x30\x67\x70\xed\xed\x5b\x80\xcb\x60\x82\xe1\x28\x4a\x90\x7b\x26\x11\x58\x7b\x71\x13\xfe\x56\xb0\x7d\xb7\x11\x7f\x90\x1e\x7b\x46\x9e\x0a\x99\x76\x73\x54\x2c\x55\xf1\x6c\x89\xc8\xd7\x5b\x64\x66\x58\xcd\xd7\xea\xb9\x6a\xbe\xdc\xab\x37\x7b\x78\x71\x44\x8c\x6b\xf9\x4e\xb1\x51\xef\x65\x85\x06\xdf\x19\x30\xad\x2c\xd3\x18\xe2\xc5\xa0\x85\x22\x99\xe0\x5b\x26\x7c\x81\xc2\xbb\x03\xba\xc5\x67\xe8\x6e\xa1\x5e\x6b\x74\xab\x04\x3e\x22\x6b\x44\xaf\xd5\xaa\xd6\xeb\xf5\x62\x53\x28\x8e\xf8\xd2\xa8\xdd\xa4\x3b\xf9\x3c\x3d\xe6\xfb\xed\x3c\x35\x6e\x8d\x9e\xee\x3d\x6a\xb1\x9d\x60\x5d\xb1\xd0\xfe\x78\xda\xe9\x64\xe9\x1f\x0e\xbc\x7c\x0c\xe1\x19\xc1\x9f\x11\xd7\x5e\xc0\x18\xfd\xf6\xfe\x80\xc1\x2d\x33\xaf\x5b\x36\xb5\x1f\xa2\xe9\xd9\x7a\xe1\x19\xc1\x9e\x77\x67\x93\xae\x2b\x1a\xb6\xa9\x7d\xaf\x7f\x1e\x36\xb6\x7d\xee\x49\xb0\x34\x45\x32\x28\xc7\xd0\x98\x27\xd5\xd6\x99\xfe\xfd\x65\x37\xc2\x7e\xf9\x81\x7c\xc1\xd0\x3f\xd0\xdd\xe7\xcb\x33\xf2\xe5\x74\xd2\x62\x5b\x66\x00\x57\x5d\xc2\x2f\xff\x1b\xe5\xa9\x41\x76\x78\x80\xdd\xf6\xf9\x03\xd9\x1d\xb5\xc3\x08\x96\x62\x3e\x4f\xbb\x3d\xbb\x4f\xd3\x8e\xe1\x28\xe2\x13\xb5\xdb\xb1\xfb\x04\xed\x48\x82\xe2\x18\x12\x27\x69\xea\x53\xb4\xf3\xb3\xfb\x04\xed\x50\x94\x26\x19\x86\xc2\x68\xee\x53\xb4\xf3\xb3\xfb\x78\xed\x70\x8a\xa1\x39\x16\x65\x58\xe6\x53\x3c\xf3\x8c\xdd\x27\x68\x87\x91\x0c\xc9\x92\x28\xc5\x7c\xca\xa8\x72\xc6\xee\xe3\xb5\xc3\xb6\x21\xce\x32\x04\xce\x7e\x0a\x22\x9c\xb1\xfb\x4c\xed\xf0\xcf\xd5\x0e\xff\x70\xed\xb6\xec\x70\x96\x25\x39\x94\xe2\xd8\xdd\x98\x89\x7a\xec\x1c\x17\xd8\xae\x6a\x4c\x27\x22\xd0\x80\x21\xc1\x1d\x63\xf4\xc4\xfa\x26\x03\xfa\x59\x84\x6a\x84\x7e\xa0\x4e\xdb\xe1\x64\xa7\xd4\x0a\xaa\xd3\xd9\x96\x23\xf6\x8c\x7c\xd9\xcd\x80\x26\x73\xb8\xd9\x32\xb9\x77\x32\xbc\x13\xeb\xca\x7c\x2c\xec\x70\xdd\xbd\xf3\xb1\xc3\x01\xbb\x83\xb6\xdc\x76\x41\x23\x2a\x04\x09\x38\x56\xe4\x58\x16\x23\x31\x91\xa5\x30\x51\x82\x0a\x8d\x73\x22\x03\x15\x02\x83\x32\x03\x19\x52\x02\x04\x87\xa2\xa8\x24\x2b\x1c\x2e\xa3\x28\x0e\x38\x4c\xc6\x24\x9a\x23\xa8\xed\xa2\x48\x06\x24\x23\xd2\x18\x2e\x4b\xac\x4c\x89\xb8\x02\x18\x51\xe1\x44\x05\xa0\x9c\x2c\x62\x98\x42\xc8\xa2\x44\x88\x40\x56\x70\x92\x63\x50\x96\x00\x14\xc6\x71\x0c\x4b\x73\x2c\xcb\xa2\x80\x61\x95\x27\xcf\xd0\x44\x60\x79\x45\xff\x20\x98\x1f\x04\x16\x5c\x75\x79\x3f\xe3\xcc\x1f\x2c\x83\xd2\x1c\x7d\xb5\x74\x3f\x03\xa4\x68\x9a\x7c\x46\x30\x7a\xdb\x9f\xef\x3e\xbb\x1f\xbd\x7f\x7d\xe5\xc7\xaf\xcf\x08\xb6\x5d\x58\xf1\x3c\xcf\x67\x39\xab\xe8\xce\xaa\xd2\x4b\xdf\x59\x5a\x4c\x4a\x4c\x4d\x0d\x51\x50\x9a\xe2\x8a\xb1\x95\x72\x59\x2a\x35\xb4\xc2\x18\x16\xf4\xd2\xb4\x02\x9b\xf3\x32\x35\x2a\x96\x33\xcb\x71\xa7\x41\xe4\x07\x06\xa7\xf4\x09\x9b\x2f\xb5\x50\x51\xe7\xa6\x4e\xf1\x6d\xa8\xf4\x18\x57\x00\x6f\xc5\x69\x9a\xdd\x92\xe6\x87\xcd\x7e\x6d\xb3\xe2\x8f\x1f\x79\xf0\xd6\x29\xce\x94\xec\xdb\xb0\x50\x2d\xcd\x1b\x8d\x17\x33\x4b\x8d\x74\xb6\x91\xb1\x6c\xab\x48\x49\x42\xa3\x92\xab\xe7\x5f\xf2\x8c\x5e\xc4\x14\xa2\xb3\xe9\xf7\xc7\xb0\x38\xa2\x50\xbb\x63\x4e\x33\x78\x57\xab\x8d\x2b\x12\x5b\x90\xb2\x1d\x53\xaf\x0f\x52\x8c\x6d\x31\x75\xd9\xd1\x3c\xfa\xf3\x3a\x59\x05\x6f\x16\xde\x3a\x31\xe3\x9b\x26\x1f\xf2\x19\xf3\x43\x8c\x6c\xf1\x7c\x0e\x2d\x87\x15\xff\x47\x7f\x76\x5e\x85\x46\x44\x7e\x30\x16\xd8\xc7\xf8\xf1\x13\x23\x51\x90\x13\x29\x28\xb1\x10\x62\x14\xa4\x14\x51\x86\x28\x89\xd2\x24\xc7\x71\x0c\x01\x58\x0a\xa0\x32\x87\x8a\x38\x49\x00\x59\x96\x20\x2a\x61\x8a\x82\xd1\x80\xa5\x30\x94\x63\x50\x88\xee\x16\x6b\x58\x68\x2c\xa0\x91\xde\xce\x72\x18\x81\x5f\x2d\xdd\xcd\x39\x09\x96\xa0\xd9\x0b\xb1\xc0\xc4\x0c\x05\x62\x0d\x75\x8c\x29\x31\xb3\xa1\x96\xe2\x48\x21\x33\xd7\xc7\x12\x3d\x03\xb9\x71\x76\xd3\xa2\x09\x89\xac\x15\xd3\xf9\xd7\x59\x3b\x3b\xcc\x4d\x2b\x9d\x65\x7b\xa3\xe2\xd9\x59\xda\xee\x65\xa7\x59\x46\x5f\x6e\x84\xe9\xdc\xec\xbc\xac\xaa\xa6\x85\x0b\x46\x4a\xa9\x0c\xc9\x05\xc3\x74\x38\xc7\xeb\x3a\x2f\x14\xa6\xa7\xae\xc4\x06\xe3\x0d\x55\xac\xbf\xb5\x0a\xf4\x12\x4f\x6f\x5a\x8a\x6c\xa6\x31\xb6\xd7\xc4\x6b\x2a\x51\x6d\x0c\x53\x45\xa7\x94\x5f\xbd\xb8\xcd\x11\x21\xbf\x36\x37\xfc\xe6\x0d\x15\x6b\x19\x11\xa7\x97\x6c\x9b\xab\x6f\xf2\x32\x9b\x69\xd9\x74\x49\xe9\x0e\xad\x9e\x62\xbe\x0e\x54\x6b\x2d\x8b\x59\x8f\xf2\x34\x24\x14\x2a\x6c\x98\x3b\xfd\x3f\x0f\x05\x2c\x7e\x28\x30\x8f\x71\xe3\x27\x85\x92\x29\x28\xe1\x34\xcb\xc9\x18\xcb\x28\x40\x02\x32\x85\x4b\x38\x54\x38\x12\x55\x50\x91\x00\x2c\xcd\xe2\xb8\x84\x93\x90\x55\x64\x00\x81\xa4\xd0\x0a\x43\x60\x8c\x4c\xd0\x18\x41\x42\xfa\x42\x28\xe0\x5c\x94\xb3\x6f\x61\x0a\x8b\x84\x05\xaf\x94\x79\x3a\x2e\x50\x30\x94\xc1\x2f\x84\x02\x1d\x33\x14\xd2\xc3\x3e\x93\x05\x65\x74\xa4\xd0\x1b\xb7\xe7\x54\x97\x1a\xcf\x56\x1b\x26\xc8\x56\xb3\x5d\x33\x0d\x68\x87\x5b\x4a\x6b\xa5\x3b\xea\x76\x75\x95\x28\x8c\x1d\xa7\xb9\x98\xbb\x54\x86\xb4\x06\x9c\x31\x82\xbc\xca\x49\xa4\x92\x29\xa7\x0b\x38\x8a\x72\xf9\x61\x37\x05\x4c\xa5\x69\xa0\x5e\xd7\x79\xa1\xe0\xf3\xce\xc6\xb4\xa3\x97\xb2\x5a\x63\xcd\x95\x87\x9c\x84\x4f\xf3\x2f\xb4\x2e\xf2\x83\x05\xbe\xc1\x74\xad\x81\xce\x0b\xb2\xb6\xa2\x66\x79\x69\xf4\x86\x2d\xea\x03\x7b\xce\xf7\xcb\x1a\xc3\xea\x5d\xdb\x64\x56\xa6\x39\xad\x15\xfb\xfa\x66\x9a\x21\x95\xb2\x55\x7a\x59\xb4\x6c\x74\x5e\xec\x9b\x3d\x8f\xb2\x14\x12\x0a\xe5\x51\x98\x3b\xfd\xf7\x84\x02\xfd\x18\x37\x7e\xa2\x00\x64\x59\x0a\x65\xa0\x22\x29\x34\x49\xd3\x2c\x2d\x13\x50\x46\x09\x02\xc7\x30\x9c\xa4\x70\x4e\x61\x44\x8e\xc3\x59\x51\x41\x29\x02\x53\x64\x48\x60\x00\x50\x94\x08\x00\x21\xa2\x0a\x94\x2e\x85\x02\x1b\xe9\xec\x18\x8a\xe2\xd4\xd5\xd2\xfd\x6a\x96\x60\x18\xfa\x42\x28\x50\x31\x43\x01\xb3\xcd\x59\x4b\x59\xbe\xe1\x6d\x7d\xe6\x36\x5d\xfe\xad\xd4\x2e\xe7\x2b\x0a\xa4\xe6\xe6\x72\xd5\x5b\xa7\x61\x0d\x58\xfd\xc5\x6b\x63\x95\x76\x8c\x74\xca\x4a\x89\x5d\x83\x59\x98\xda\x66\xb4\xd8\x50\xe5\x0a\x5e\xcd\x62\xf6\xb0\x88\xad\x46\x9a\x38\xc5\x04\x3b\x9d\xeb\xe0\x86\xd6\xf2\x85\x82\xaf\x2b\x5f\x5f\x67\x99\xfa\xac\x93\x32\x47\x68\xfe\x35\xdd\xee\x58\x82\x30\x37\x53\x73\xa3\x4f\xd3\x16\x3a\xd2\xb2\xa9\x9c\xda\x57\xd0\xc2\x48\xcb\x16\xa6\xdc\xa8\x64\xd0\xcd\x8e\xa0\x54\xfb\x8e\x31\xad\x16\xfa\xcd\x12\x5f\x13\x33\xa3\x4e\x27\x6d\x32\x65\x72\x28\xe0\x18\xc5\x51\x15\xba\xe2\x4d\x90\x46\x21\xa1\x50\x44\xc3\xdc\xe9\xbf\x27\x14\xa8\xc7\xb8\xf1\x13\x86\x8a\x34\x46\x63\x2c\x49\x40\x91\x93\x69\x0a\x87\x28\x47\x90\x2c\xad\x00\x16\x63\x48\x85\x23\x14\x89\x24\x01\x45\xb0\x2c\x83\xb2\x5b\x8a\x0a\x0a\x24\x92\x91\x01\xab\x00\x40\x53\xd8\xa5\x50\x60\x22\x9d\x1d\x47\x29\x3a\x3a\x50\x0e\xa5\xbb\xd4\x07\x41\x93\x2c\x7a\x21\x14\xc8\x98\xa1\xb0\xce\xe2\xf9\xd9\x2c\x47\x53\x98\x56\x9d\xce\xd1\xd9\xf2\x35\x23\x37\xe7\xe9\xb5\x60\x75\xd5\x62\x36\x6b\x10\x6c\x65\x5d\xc4\xcd\xd4\xeb\xb8\x9d\x63\x50\xbe\x8d\x66\xd0\x86\x9e\xc6\xd3\xe5\xdc\xd4\x35\xb1\x31\x51\x94\x1b\xf5\x0c\xd5\x95\xfb\xfc\x80\x4d\x2d\xba\x73\x9b\x02\xe4\xf4\x14\x0a\x6b\xdf\x5a\x41\x19\x53\x76\x5a\xe5\x4b\x32\x5a\x1c\x35\xd3\x65\x96\x32\x07\xe6\xd2\x1a\x2c\x17\x96\x42\xbc\x55\xe5\x0d\x3a\x9a\x09\x72\xa6\x69\x0c\x70\xba\x02\xeb\xf5\xb7\x79\x7f\xa0\x2e\xb3\xac\x53\x37\x09\xd9\x5a\x88\xd6\xf0\xf5\x95\x4a\x8d\x48\xdb\x6d\xd9\x46\xde\xb0\xb8\xae\x60\xa4\x0c\x6f\x02\xd6\x0b\x09\x85\x42\x2b\xcc\x9d\xfe\x7b\x42\x81\x7c\x8c\x1b\x3f\xd1\x24\x23\x8b\x90\xa4\x38\x86\x11\x21\x83\x8b\x12\xad\x40\x86\x10\x31\x42\xa4\x48\x86\x06\x1c\x21\x92\x28\x25\xc9\x0c\xa0\x68\x1c\x28\x18\xcb\xd1\x38\x2b\x31\x92\x28\x93\x18\x94\x58\x6e\x1f\x0a\x78\xa8\x57\x47\x4f\x81\x08\x8c\xa2\xa3\xa7\x4f\x87\xd2\x5d\x26\x89\xe6\x30\xf6\xd2\xba\x99\x88\x19\x0a\x78\x9b\x5b\xce\x07\x43\x98\x92\xec\xf5\x92\x7a\x73\xba\x6e\x4f\x7e\x9d\x33\xad\xfc\xdb\xd0\x1a\x54\x96\x85\xf2\xc8\x5c\xb3\xcb\x5e\xd1\x51\xdb\x6a\x93\x1c\x14\x00\x25\x60\xd4\xa6\x28\x96\xd3\xf9\x17\x87\x01\x86\x9a\x93\x9a\xcb\xe2\xb2\xb5\x6c\xb7\xbb\x60\x6a\xc1\xc5\x06\xaf\x8e\x25\xaf\xeb\xbc\x50\xf0\xad\x15\x96\x3d\x95\xb5\xe8\x32\xaf\x0f\xdb\xc0\xa2\x8a\x34\xcb\x34\x52\x8d\xa1\xc4\x57\xea\x78\x3a\x9d\x75\xaa\x63\x71\xb3\x1e\x54\xfb\xda\x58\xe4\xc6\xbd\xda\xa6\x3c\xea\x9b\x23\xfb\xa5\x44\xe9\x33\x42\xe9\xa3\x19\x40\xbc\x8e\xfb\x45\x3b\xad\x55\xa6\xcb\xb4\x34\xc7\xf2\x4a\x3d\x43\x52\x1e\xfd\x56\x48\x28\x08\x4e\x98\x3b\xfd\xf7\x84\x02\xf1\x18\x37\x7e\x62\x19\x99\x60\x64\x49\x14\x81\x4c\x8a\x12\x94\x69\x20\x29\x80\x62\x39\x65\xeb\x83\x32\x04\x2c\xc1\x11\x9c\xa2\xa0\x0a\x26\x8b\x0a\x54\x38\x8e\xc1\x19\x89\x13\x59\x28\x89\x28\x24\x94\x8b\xa8\x10\x3d\x05\x22\x71\x96\x88\x46\x85\x6d\xa9\x17\x0a\xbb\x8c\x20\xc6\xb2\x97\x96\xcd\x58\xcc\x50\x20\x95\xae\x22\x2d\x68\xac\xfa\xe6\x02\x27\xa5\xa9\x8a\x5e\x30\xe0\x6b\x6e\x4e\xa5\x57\xac\xbc\x4c\xa5\xc6\x92\xb1\x11\x5f\x1c\xa7\xd1\x5c\xd3\x16\x53\xd1\xdf\xcc\xea\x2b\xdd\x2d\x6a\x2b\xf7\x4d\x6e\x0f\x55\xb9\xce\xb5\x30\x27\x55\x32\x7b\x4b\x68\xae\xf1\x55\xa1\xc3\xf7\x8d\xd1\xdc\xeb\x3a\x2f\x14\x7c\xde\xa9\xe4\x8d\xde\xb8\xb6\xb6\x24\xd0\xce\x4f\x07\x34\x4b\x96\x0d\xb5\x57\x48\xcb\x6f\x63\xea\xc5\x20\x61\x13\xd7\xc7\x6c\xb5\x54\x30\x8d\x8e\x5d\x6a\x69\xd9\x06\x33\x1a\x76\x32\x6d\xcd\x29\x67\xf1\x8c\xde\x29\x67\xe9\xfc\x6b\x7e\x39\x7b\xeb\xd1\xf4\xb2\x38\x57\xeb\x92\x93\x77\xe8\x99\x47\xb9\x16\x12\x0a\x99\x79\x98\x3b\xfd\xf7\x84\x02\xfe\x18\x37\x7e\xa2\x09\x99\x63\x15\x8a\xa0\x21\xa4\x59\x19\x13\x71\x46\xa4\x44\x96\x53\x70\x02\x28\x14\x81\x61\x22\x43\xd1\x1c\xc0\x49\x05\x28\x18\x89\x12\x40\x46\x45\x0a\x17\x69\x82\x10\x51\x46\x84\xdc\xd6\x63\xd1\x5d\x2a\xfe\xbd\x57\x93\x91\xce\x4e\xe1\x2c\x1d\x3d\x7d\x3a\x94\xb2\x14\xcb\x71\x04\x49\x71\x97\x56\xcd\x31\x21\xa1\x39\x7e\xc1\xea\x0b\xca\x44\xc5\x32\x33\x20\x8d\x4d\x63\xd9\x5b\x17\x88\xbe\x65\xce\x53\xcb\x3c\xdf\x70\xb3\x58\x05\xaf\x31\x19\x86\x1e\x2f\xc6\xed\xa2\xcd\xf5\xe4\x61\x45\xec\x56\x54\x49\x53\x1a\x2f\x1b\x06\x1f\x8d\xf3\xe5\xde\xbc\xdc\xec\x4b\x95\x2e\x35\x73\x97\xa6\xad\x63\x3b\x7f\xf4\xe2\xc0\xfb\x56\x3a\xfe\xc3\x7b\x45\xce\xe9\x79\xc5\x37\x5b\x7b\x8f\x25\xde\xb8\x99\xce\x77\xac\x2a\xe7\xf2\xfd\xf5\xdc\x5d\xe7\x88\x61\xa7\x61\x11\xaa\xbb\xee\x2c\x05\xbd\x46\xf3\xbd\xf9\x2a\xd3\x21\x85\xb6\xb1\x4c\x55\x5d\x37\xbd\x91\xde\xd2\x6b\x36\x9d\xd2\xf2\x19\x16\xb5\x8d\xfe\x9a\x79\xc1\x6b\xb6\x8e\x73\x82\xb5\x70\x7a\xed\xd4\x74\xc9\x8d\x77\xfc\x43\xe2\x24\xfc\xf3\xff\x3c\x4e\xd0\xf8\x71\x82\x3d\xc6\xc7\xbd\xb7\x06\x0e\x9e\x8e\x71\x0c\xfa\x1d\xc5\xbe\xa3\x18\x82\xa2\x3f\xbc\xff\xa2\x7d\x99\x65\xb9\xe8\x41\xff\x50\x4a\xe2\x1c\xc9\xd1\x0c\xce\x5d\x5a\x14\x87\x7b\xfa\x4e\xa4\x5f\xdd\x29\xd1\x9f\xcc\xb0\xa2\x92\x9b\xf4\xa6\x53\xc9\x30\x39\x23\xc7\x15\x71\x74\xfd\x92\x49\x39\xe8\xd4\x75\x56\xa5\xd5\x1b\x36\x94\x3b\x83\x11\xc8\x94\x41\xde\x9b\xf7\x08\x37\x3b\x71\x14\x18\xfc\x27\x7f\x4e\x4e\x7c\x65\x47\x2e\xc6\x7b\xf9\xf7\x6e\xd0\x45\xbc\x55\x11\x75\x7e\x0a\x8f\x88\xb8\x2b\x64\x82\xe7\xa2\xb0\xfb\xc8\x10\xc1\x03\x48\xf7\x91\x21\x83\xc7\xa6\xee\x23\x43\x05\x0f\x0c\xdd\x47\x86\x0e\x1e\x73\xba\x8f\x0c\x13\x3c\xe0\x73\x1f\x19\x36\x78\x2c\xe9\x3e\x32\x5c\xf0\x40\xce\x7d\x64\x30\x34\x78\x8e\xe8\x4e\x3a\xc1\x13\x3b\xf7\xca\x13\x3c\x8a\x73\xaf\x3c\x44\xf0\xd0\xcb\x9d\x74\xc8\xe0\x59\x9d\x3b\xe9\x50\xc1\x63\x2a\x77\xd2\xa1\x83\xa7\x6b\xee\xa4\xc3\x04\x0f\x96\xdc\x49\x87\x0d\x9e\x87\xb9\x93\x0e\x17\x3c\x79\x72\xe7\x38\x88\x06\x8f\x97\x3c\xe6\x62\x92\x87\x9c\xe0\xbd\xfc\x72\xdc\x33\xb2\x15\x3e\xde\xd1\xe5\x88\xfb\x39\x12\x63\x94\x0f\x09\xfc\x68\x72\x7a\x40\x8f\x6b\xf2\x7f\x7f\x71\x4d\xef\xa4\xcc\x9d\x67\xd3\xbf\x3c\x23\x5f\x14\xdb\xd4\x13\x9d\xb6\x79\x46\x6e\x3d\x08\x95\xf0\xa0\xfb\x4d\xd6\xdb\xa3\xdf\xe9\x81\xfe\xc7\x7a\x37\x58\x6f\x87\xd6\xa7\x07\xfc\x1f\xeb\xc5\xb2\x9e\x7f\x76\x71\x7a\x20\xff\xb1\x5e\x2c\xeb\xf9\x27\x43\xa7\x07\xf6\x1f\xeb\xc5\xb1\xde\xf9\x14\xf0\xf8\xf0\x4f\xe4\xc6\xb3\xde\xd9\x84\xf5\xf8\xf0\x0f\x6a\xc4\xb2\xde\xf9\xb4\xf8\xf8\xf0\xcf\x8c\xe5\x46\xeb\xe1\x01\xeb\xe1\xff\x58\xef\xba\xf5\xce\x96\x2e\xc7\x07\xd2\x77\x82\x5e\x59\x18\x32\xb4\x93\x5a\x70\xff\xba\x63\x52\x23\xc6\x38\xce\x9f\xf0\xb5\xd3\x2b\x0b\x98\x90\xdb\xf0\x12\xdc\x7a\x71\xd3\xc5\x60\xf7\x2e\x92\x22\xef\x0d\x09\x4d\xe5\xa1\xd1\x4b\xea\xab\x84\xf0\xe0\x8a\xeb\x5e\x42\x44\x60\xf1\x71\xb7\x44\x64\x70\x15\x73\x2f\x21\x2a\x30\xa1\xbf\x5b\x22\x3a\xb8\x32\xb8\x97\x10\x13\x98\x24\xdf\x2d\x11\x1b\x9c\x6d\xdf\x4b\x28\x38\xf1\xbc\x5b\xa2\xf3\xe4\x1e\x9b\x40\xa4\xf3\xf4\x5e\x92\x7e\x3b\x4f\xf0\x25\xe9\xb8\xf3\x14\x5f\x12\xef\x3e\x4f\xf2\x25\x71\xef\xf3\x34\x5f\x92\x31\xe0\x3c\xd1\x97\x64\x10\xf0\x52\x7d\x67\x90\x74\x37\x25\x36\x40\x29\x3a\xb9\x76\xeb\xe5\x7d\x8f\x48\xaf\x5d\xbb\x92\xe9\x19\xd9\x2a\x10\x2f\xc1\x16\x79\x55\xdf\x03\xd0\xc3\x7f\x95\x0e\x20\x48\x0e\x32\x04\x81\x41\x8e\x00\x38\x85\x11\x34\x83\xb3\x0c\xc4\x19\x52\x11\x31\x9a\xe3\x80\x08\x14\x06\x27\x44\x19\x12\x0c\x80\x22\xe4\x68\x9c\x64\x25\x86\x56\x64\x02\x65\x30\xf0\xf4\x8c\xec\xde\xfd\xbd\x7f\x92\xe3\x3b\x0b\xc3\x91\x87\x83\x90\xd1\x6f\x60\xb1\x6c\xe4\xd1\x9b\x63\xe9\x19\x78\xed\x37\x4d\x09\xb9\xeb\xd2\x3c\x9f\xd2\xc7\x76\xb1\x3b\x5c\x0e\x1a\x35\xa2\x23\xe4\x5d\x80\x32\x9b\xe2\x2a\x53\x28\x2d\xb9\x92\xa5\x8c\x33\x83\x32\x3c\xed\x34\xd6\xb6\xff\x14\x02\x7b\x7a\x99\xd3\x57\xc1\xfb\x77\xb4\x5c\xc9\x59\x2e\xdb\x76\xec\x91\x94\x93\xc7\x83\x7a\xc1\x79\x05\x75\xa5\xdb\x66\xc5\xcd\xca\x79\x59\x88\xad\x76\x91\xd7\x44\x36\xa3\x48\x3e\x32\x87\x4d\xcd\xfd\x47\x9e\xe7\x47\x1a\xe9\x91\x5f\x16\xf0\x92\x60\xf2\xd3\x52\x6e\xaa\x74\xc6\x59\x34\x47\x66\x35\xbe\xa6\x2d\x7b\xac\x9d\xd5\x0d\xac\xec\x4e\x6d\xdd\xe5\xb9\x62\x67\xd6\x77\x5e\xcd\x5a\x13\x6e\xb2\xac\x2d\x0e\x17\xa9\x9c\xfa\xf6\xca\x90\x1b\xae\xdb\xd1\xb0\x34\xd7\x24\x98\xd1\x88\x9b\x0f\x94\x1e\xca\x99\x99\xd5\xcf\x9f\x4f\xfe\x0d\x64\xff\x71\xcb\x56\x98\x6e\xfc\xa9\xbe\x57\xee\x1d\x70\xc8\x6d\xff\xc9\xfa\x9a\xe6\x64\xac\x41\x98\x7c\x8e\x1e\xeb\x8e\x5c\x87\xdc\x88\x7c\x93\x4b\xad\x01\x66\x77\x97\x25\x85\x17\x46\x6a\x1a\x55\x35\xee\xa0\x1b\x9f\xad\x09\x2a\x50\x4e\xb6\xcd\x87\x6f\x9b\x0a\x51\xfb\xa9\x99\x07\xf3\x0f\xf6\x6d\x2c\xfe\x99\x53\xfb\x92\xaf\x48\x4d\x67\xd0\x2a\x5a\x2e\x6c\xdc\xd9\xaa\x8e\x69\x23\x14\x6c\x2c\x13\xe3\xea\xc5\xf5\xb2\x9a\xdd\x34\x28\x37\x23\x48\xd9\xfe\x72\x95\xe7\x56\xc4\xd4\xb5\xf3\x44\x01\x2f\x48\x57\x79\x46\xee\x8f\x7b\xf5\xcb\x09\xf8\x73\x42\xaf\x92\x80\x7f\x2d\xc0\x9f\xaf\x13\x3d\x06\x4e\xf9\xa6\xa5\x83\x35\x8a\x31\xd8\xcb\x4b\x1d\x9d\x65\xc4\x81\xdb\x60\x67\x2c\xdf\x56\xab\xe9\x4e\x65\x88\xb7\xfa\xea\xd0\x3b\x00\x50\x5a\x75\xca\xd6\xee\xa5\xa8\xdd\xf1\xf8\x84\xfa\xdf\xcc\xff\xb5\x67\x6b\x77\xf2\xf7\xc5\xd2\x94\x0f\xf1\x85\x7b\x6c\xb1\x30\x4f\xb2\xf4\x3e\xdb\x16\xab\x4e\xd9\x15\xee\xe3\xbf\xb3\xc5\xbf\x3f\x6a\xd0\xf2\xd6\x81\xde\x4d\x9f\x87\xd3\x39\xbb\x7f\xb7\x28\xea\x1d\x8c\xb9\x3e\x97\xf0\x5f\x56\x06\x18\x8c\x83\x00\x15\x25\x5a\x91\x31\x16\x65\x71\x49\x04\x50\x62\x65\x4c\x41\x69\x09\x65\x48\x48\x4b\x34\x00\x0c\x4b\x2a\x80\x83\x90\x53\x50\x11\xc5\x28\x49\x61\x01\xc1\xb1\xa2\x4c\xed\xe0\x2e\xd1\x65\x45\x7e\xb8\xa3\xae\xc2\x1d\x16\xfd\xda\x99\x57\x88\x3d\x05\x16\x46\x49\xd1\xae\x18\xe8\xde\x0f\x47\xbb\x75\x81\x18\x8b\x59\x52\x7e\xd1\x32\xc3\xd5\xa0\xa5\xca\x5d\x58\x16\xd3\x4c\x0b\x57\x6c\xb4\xd0\xcc\x61\xfa\x3a\x8d\x8b\xf9\x54\xaa\x28\xa4\x60\xa6\x69\x57\x9a\xa8\xa9\x61\xfd\x4e\xbd\xd7\xea\x83\x01\xa3\xd7\x24\x39\xff\xd2\x92\xba\xc5\x8e\x98\x5a\x64\xcc\xd7\xd4\x5c\x26\xe4\xec\xf4\x3f\x03\xed\x2a\x5a\x85\x4a\x8a\x36\x0f\xe4\x1f\xec\xdb\x58\xfc\x7d\x68\xf7\x0b\xd1\x26\x29\xda\x15\x72\x99\x4d\xe3\x57\xa2\x5d\x00\x6d\xa4\x84\xfa\xdf\xcc\x7f\x3e\xac\xcf\xee\xe4\x1f\x8a\x76\xbf\x08\x6d\x1e\x62\x8b\x55\xa7\x6c\x4f\xef\xe3\xbf\x47\xbb\x8f\x1a\xb4\x1e\x8b\x76\x38\x25\x43\x99\xc2\x65\x8e\xc0\x48\x8c\xa3\x20\x4d\xb0\x12\x43\xb1\x22\x0d\x21\x54\x18\x49\x46\x19\x09\x95\x14\x52\x62\x39\x9a\xe6\x18\x85\x96\x29\x49\xc4\x71\x56\x22\x49\x19\x48\xb2\xcc\xee\xd0\x8e\x78\x14\xda\xd1\xd7\xd1\x8e\xc0\xa3\x2f\x1c\xd8\x96\x12\x4f\x81\xac\x5b\x52\xb8\x2b\x05\xfa\xf7\xc3\xe1\x0e\xe0\xaf\xf6\x8a\x22\x9b\x98\xb6\xe4\x4a\x85\x5a\xe5\x65\x58\x50\xb2\x75\x4d\x92\xb3\xed\xa1\x46\x10\x4b\x8a\xe9\xa7\x16\xbc\x3e\x1e\x29\x58\x6f\x59\xa8\x39\x60\x2e\xd7\xc5\x71\xbe\x28\xd4\xa7\x0c\xb5\x94\x8d\x75\x8d\x04\xc5\x0d\x2f\xd9\xdd\x9a\xd9\xe9\xbe\x6e\x96\x35\x89\xc9\xb6\xfe\x33\xe0\xae\x9c\x7f\x1b\x25\x85\x9b\x07\xf2\x0f\xf6\x6d\x2c\xfe\x8f\x81\xbb\xa4\x70\x93\x18\xee\x4a\xe9\x56\xe7\x57\xc2\x5d\x00\x6e\xa6\x09\xf5\xbf\x99\x3f\x1c\x2f\xe5\x3b\xf9\x7f\x04\xdc\xdd\x0b\x37\x0f\xb1\x45\x60\xa1\x7d\x07\xdc\x7d\xd4\xa0\xf5\x58\xb8\x53\x44\x28\xb2\x14\x29\x52\x8c\xc4\x40\xc0\x52\x04\x8a\xb3\x0a\x4e\x40\x11\x32\x98\x0c\x69\x4c\xc2\xa0\x24\xca\x2c\xa0\x49\x85\x26\x09\x46\x06\x0c\x41\x41\x48\x30\x2c\x4b\x48\x90\xa2\xb6\x70\xc7\x3e\x30\x97\x49\x1c\xde\x64\x8d\xbe\x41\x87\xc0\xa3\x6f\xa2\x3a\x94\x9e\x6d\xe9\xdc\x05\x77\x55\x9e\x8f\xca\xb7\x7d\x38\xdc\xd9\x3c\x5f\x1a\x59\x19\xb2\xd0\x18\x35\x5e\x5f\xca\xea\xaa\xb2\x6c\x3b\xe3\x54\x9f\xe0\xc7\x43\x67\xd6\xad\xb6\x44\xaa\xd6\x5e\x36\x16\x0b\x98\xd6\x3a\x60\xd5\xc7\x70\xb5\xde\xe9\x0a\x99\x92\xd5\x6c\xbc\x8e\x7a\x12\x3a\x53\xfa\x0b\x79\xa0\xe8\x55\x19\x07\x83\x31\xd5\x6b\xc9\xdc\x59\x88\xf2\x09\xe1\xce\x57\xe9\x56\xb8\xa9\xbf\x50\xec\xeb\xc9\xb6\x11\x63\xec\x15\xb8\x7b\x1c\xff\xbb\x72\xa9\x3e\xb8\xf3\xc1\xf5\xcd\xab\xbb\x75\x79\x94\x4d\x0a\x77\xbf\x30\x97\x9a\x38\x7f\x87\x37\x8d\x4d\xd2\xfc\xdd\x2f\xca\x1f\x86\xc2\x5d\x31\x99\x2d\xd0\xda\x49\x96\x08\xa5\x3f\xce\x16\x81\xbe\xb8\x85\xff\x1e\xee\x3e\x6a\xd0\x7a\x2c\xdc\xc9\x38\xa0\x71\x51\x61\x44\x0e\x62\x18\x2b\x62\x2c\x2e\x11\x84\x88\x2b\x32\x47\x88\xb8\x24\xe1\x28\x46\xc8\x22\xe4\x00\x2b\x33\x8c\x42\x50\x00\x27\x38\x91\x41\x25\x16\xd0\x34\x03\x29\x48\x3d\x3d\x23\xcc\x03\xe1\x0e\xbf\xb2\xba\xe3\x50\x14\x63\xa2\x5f\x91\x3c\x94\x9e\x1d\x17\x48\xba\xba\x0b\x8e\x03\x1f\x0e\x77\x52\xc5\x18\x56\x31\xc9\x66\x40\x77\x3e\x62\x09\x85\xa2\x1a\x33\xb4\x96\x6e\xd6\x5f\x9a\x9d\x2e\x18\x9b\x66\xae\xa4\xe8\xe6\xf8\x0d\xa3\x33\xd3\x5c\x9d\x23\x67\xaf\x36\x28\x33\xc4\x52\x6f\xcb\x45\xfd\xd5\x2d\x83\xca\x48\x5e\xb9\x52\x7d\x6a\x2e\x65\x61\xd9\x5f\xe7\x47\xa0\x94\x79\x60\x32\x33\xe3\xbb\xde\xe5\x56\xb8\x69\xe6\xda\x30\x77\xb2\x6d\x2e\x3c\xae\xae\xc0\xcd\xe3\xf8\xdf\x05\xb7\x3e\xb8\xf3\x25\x63\x6f\x86\x1b\xbb\x4a\xa5\x92\xc2\xdd\x2f\x84\xdb\xc4\x43\x3c\x5b\xcf\x93\x49\x87\xf8\x5f\x04\x31\xa1\x70\x57\x48\x66\x0b\x6a\x75\x92\xa5\xf6\xd9\xb6\x08\xf4\xc5\x2d\xfc\xf7\x70\xf7\x51\x83\xd6\x63\xe1\x4e\x24\x49\x8e\x93\x64\x52\x94\x18\x16\xa7\x71\x42\xe1\x80\xc4\xd1\x14\x89\x92\xa8\x4c\x72\x12\x46\x91\x0a\x10\x69\x20\xb2\x32\x4b\x88\x38\x86\xa1\x28\x8a\x4b\x34\x8e\x02\x0a\x8a\x0c\x89\x32\x4f\xcf\x08\xfd\x40\xb8\xc3\xae\xc1\x1d\x86\xe2\x14\x71\xe1\x9e\x3c\x9c\x22\x9f\x02\x27\xd1\x92\xc2\x5d\x70\x48\xfe\x70\xb8\xe3\x33\xca\x1a\x60\xee\xb2\x9a\x9b\x56\x2a\x6d\xc3\xe9\xaf\x74\x8e\x29\xc0\xb1\x5e\x74\x97\x99\x32\x84\x18\xde\x22\xb9\x95\x03\xeb\xcb\x4a\xf1\x65\x25\xa2\x40\x7f\x1d\x16\xca\xfd\x51\xbe\xce\x34\x0b\x45\x6a\x3c\xa6\xe8\x0e\xe4\x0a\xcb\x8d\x5a\xe5\xb8\xb7\x6a\xb5\xfb\x86\x73\xb9\x07\x9e\x54\xc9\xf8\xf2\x3e\xb7\xc2\x4d\x4b\x35\xd3\xd2\xc9\xb6\xd9\xf0\xb8\xba\x02\x37\x8f\xe3\x7f\x1f\xdc\x9e\xda\xfb\x56\x87\x37\xc3\x8d\xd6\x10\xe9\xa4\x70\xf7\x0b\xe1\x36\xe9\x10\x5f\xce\x56\x5e\xb8\xa4\x43\xfc\x2f\x82\x98\x50\xb8\xcb\x27\xb3\x45\xba\x77\x92\x25\x22\xc7\xfe\x61\xb6\x08\xf6\xc5\x2d\xfc\xf7\x70\xf7\x51\x83\xd6\x83\xe1\x8e\x55\x64\x0a\xd2\x50\x26\x64\x9c\xa6\x58\x00\x68\x1a\x25\x51\x02\xe3\x20\xca\xd0\x90\x40\x51\x94\x56\x18\x8e\x42\x21\xc6\x42\x0e\xa0\x04\xc4\xa0\x2c\x43\x28\x2b\x92\x24\x8a\x24\xb6\x5d\x4a\x51\x0f\x84\x3b\xf4\x19\xc1\x2f\xc2\x1d\x8e\x32\xcc\xa5\x9b\x32\x77\xa5\x67\x87\x9c\xef\x81\x3b\x7e\x77\x57\x6b\xe8\x90\xfc\xe1\x70\x37\x62\xdf\x5a\xb0\xab\xcd\xe9\xd7\x05\xa6\xc0\x59\x1a\x27\x5c\x4e\xa8\xad\x0d\xb3\x31\x48\xd1\xba\x59\xd0\x5f\x86\x95\x85\x4e\x31\x99\xf9\xbc\xf5\x4a\xbf\x99\xfd\x74\x5f\x52\xcb\x4c\xa9\x5f\xb2\xd4\xd4\x4b\xb3\x3f\xee\xcc\x53\x95\x8e\x9d\x6d\xc9\xbc\x4e\x0b\x7d\xf2\x25\x23\x2e\x33\x0f\x4c\x66\x66\x12\xec\x9d\x75\xb2\xfc\x14\x3b\xd9\x36\x13\x1e\x57\x57\xe0\xe6\x71\xfc\xef\x83\xdb\x53\x7b\x5f\xc5\x9b\xe1\x46\x69\x71\x78\x52\xb8\xfb\x85\x70\x9b\x78\x88\x2f\x95\xb2\xb9\xa4\x43\xfc\x2f\x82\x98\x50\xb8\x13\x92\xd9\x42\x20\x4f\xb2\x24\xf5\x85\xa4\x7d\x71\x0b\xff\x3d\xdc\x7d\xd4\xa0\xf5\x60\xb8\x03\x04\x0b\x19\x09\x47\x49\x91\x50\x58\x20\xb2\x1c\xca\x00\x52\xe4\x68\x8c\x25\x31\x86\xa5\x48\x51\x92\x44\x8a\x04\x8c\x02\x48\x8e\x04\x04\x2d\x11\x32\xc6\x52\xa2\x48\x52\x32\xca\xc8\xf4\xd3\x33\x42\x3e\x0e\xee\x58\x6e\x0f\x77\xe1\xd7\xe0\x7a\x37\xda\x32\xd1\xef\x21\x1c\x4b\xcf\xde\x9f\x49\x0a\x77\xc1\x21\x39\x1b\x70\xb3\xc7\xc0\xdd\xee\xa2\xb5\x1b\x86\xac\x53\xd3\x8c\x32\x9c\xf9\x28\x65\x8e\x6e\x2c\x64\xde\xba\x84\xd1\xd4\xd1\xb5\xbb\xa0\xb3\xf3\x1e\xf5\xa2\x0e\x17\x99\x7c\x5e\x1b\x73\x9b\xee\xd0\xd0\x84\xca\x86\x2a\x56\x25\xb3\x6f\x72\x1b\x9d\xb4\x66\x35\xf6\x05\x10\x73\x63\x2c\x56\xc7\xe4\x5b\x59\xad\xcf\x35\xa7\xae\x71\x9c\xae\x77\xfa\xf3\x72\xe5\x8d\x19\x0e\xa7\x8d\xa1\x50\x3c\x83\xcd\x9a\x6f\xa9\x75\xfa\x1a\xb4\x59\x3c\x18\xf5\x55\xba\x15\xc6\xfa\xfc\xf2\xa5\x71\xea\xb3\x88\x4f\x1c\x18\x7b\x08\xff\x5b\x61\x3c\xcb\x9f\xc3\xa8\x6f\xd5\x79\x33\x8c\x75\x07\x99\x45\x52\x18\x4d\x02\xe3\xe3\xae\xb2\x49\x0a\xa3\x49\xa0\xa3\x97\x4d\xd5\x92\x42\x47\x12\xfe\x8d\xfc\xb4\x94\x80\xbf\x57\xf0\x2e\x1e\x6e\x18\x63\xea\x64\x15\xac\x07\x3d\x82\x35\x4e\xcd\xef\x9b\x56\x26\xe0\x3f\x44\x9b\x72\x02\xfe\x89\xc7\x83\xee\x6c\x94\x4d\x10\x8f\x89\xc7\x83\x4e\x96\x9f\xdd\xc9\x3f\x6c\x2a\xe5\xcf\x42\xdd\x11\x0f\x7b\x67\x5b\x45\xa9\xfb\xa1\xf1\x10\x88\xc7\x5b\xf8\x1f\x12\xe5\x28\x47\xbd\x51\x6e\x4d\x14\x17\x53\x2b\xdf\x18\x91\x1a\x33\x6d\xb7\xc7\x03\x45\x9a\x63\x14\xd5\xce\x3a\x8b\xf6\x86\xa8\xe4\x41\x53\xb2\x16\x95\x51\xb7\x59\xc2\x5d\xaa\x8c\x0f\x36\x38\xac\x6d\x46\x2f\xe3\xb2\x98\x1b\x2b\xca\xd8\x9a\x6b\xe3\x6c\xc7\x49\x61\x18\x99\xd3\xd6\xd7\xdf\x71\xc1\x6f\x3d\x06\x85\xc9\x34\x21\xa1\x22\xcb\x02\x0c\x88\x34\x0b\x48\x52\x94\x64\x14\x87\x8c\xc4\xc9\x32\x23\x31\x32\x60\x31\x16\x48\x98\xa2\xb0\x0c\x43\xe3\x90\xc3\x71\x20\x01\x89\xe3\x28\x96\xa1\x69\xb8\xfb\x13\x67\x89\xfe\xea\xf7\xee\xc6\x93\x8b\xf9\x02\x92\xc0\xd8\xe8\x7c\xc1\xb6\x94\x7d\x0a\xbc\xec\xbb\x73\xc6\x42\x95\x2d\xb6\x96\xad\xb9\x58\xc1\x8b\x3c\x31\xe8\xbf\xb4\xed\x8a\xfe\x32\x44\x51\xa5\xc0\x3a\xd5\x12\xa3\xa3\x42\x7b\x55\x1e\xa4\xf9\x21\xc1\x7b\xb7\xc7\x1e\x3f\x97\x26\x03\xde\xe7\x0e\xb0\xf5\xdf\x48\x9b\x39\x4e\x88\x84\x5c\x6f\xc0\x1b\x06\x2d\x66\xa6\xec\xaa\xcd\x6e\xd2\x39\x86\x56\x66\x74\x2d\x95\xd7\xf5\x75\x05\x64\x5b\x55\x82\x10\x36\xed\xc1\x60\x94\x5f\x6b\xf9\x3a\xb9\xc2\x6d\x0b\x8c\x89\xc5\x80\xe6\xa6\xfd\x29\x61\x9a\x35\xb6\x9a\x9d\xe7\x53\x4d\x96\x1f\x80\xb5\x54\xc9\xd4\x5e\xec\x4c\x2d\x1b\x23\x4f\x70\xe6\xb5\x51\x13\x1c\x7f\x00\xdc\x38\xa0\xbe\x59\x78\x33\x6d\x44\x87\xcf\xfe\x73\x79\x40\x7b\x20\xff\xbb\x06\x54\x5f\x1a\x35\xc1\x04\xa7\x61\x8c\xd2\x89\x77\x81\x7f\xe1\x04\x8b\xe7\x7f\xdd\x80\x1e\xba\x4e\xcf\xdc\x6f\x8b\x86\x31\xbe\xa8\xe6\x35\x5b\x24\xed\x8b\xa0\x2f\xdc\x6e\x8b\x7f\x63\xf9\x69\x99\x4a\xe9\xab\x51\xb3\x26\x28\xb5\xe5\x2a\x95\x32\x48\xf8\xd2\x9c\x8d\x2d\xa7\xa3\x4f\xe7\x59\x8e\x5b\xd7\xe6\x7d\x4d\xcf\x48\xe3\x76\x17\xd6\xea\xaf\xf4\x40\x97\x19\x6d\xb1\x1c\xe5\x47\x75\xba\x92\x6b\x66\x57\x56\x46\x79\x49\xf3\x79\xdd\x19\x66\xa7\xdd\x12\xbd\xea\xf2\x31\x77\x61\xcf\xaf\x50\xd8\x5d\x64\xe4\xb8\xc0\x75\xfc\xdf\x27\xd6\x1c\x6e\x0e\x57\x11\x64\x1b\xf5\x4e\xb7\xcd\x97\xea\xd7\x6e\x4d\xe0\xab\x5d\xa1\xbd\xbf\xb9\xa0\x51\xaf\x8e\xfc\x14\x7f\x43\x10\x04\xe1\x73\x39\x1f\xb5\x77\x0c\x91\x66\xbb\x54\xe3\xdb\x23\xa4\x22\x8c\x90\xaf\xaa\xfc\x4e\xda\xa9\x69\x5b\x13\x5d\x9d\x1e\x6e\x44\x0a\x3c\x3f\x48\xea\x00\xd5\x30\xc9\xc3\x18\x5f\x95\xfe\x00\xe5\x9e\xda\x4e\xe0\x71\xf7\xbf\x89\x64\xca\x70\x72\xba\x5d\x6a\xff\x55\x75\x9c\x05\xb4\x27\x0f\xd1\xee\x9c\x6d\x98\x72\x77\x09\x86\xf4\xea\xa5\x56\x4f\x40\xbe\x9e\xaa\x3f\x23\xa7\xfa\x87\xef\xbb\x06\x37\x9a\xe6\x31\xdd\x7a\xb3\xe2\x37\x75\x6a\xc4\x55\xc1\x57\x6e\xe3\x7d\xac\x66\xe1\x4c\x2e\x69\x7a\x41\xac\xd8\x9a\x47\xde\xe4\x72\xf5\xaa\x94\xc7\x6a\x1f\xc5\xe6\x92\xfe\x17\x45\xbb\x6a\x81\x9d\x4b\x8b\x1b\xcf\xdb\x0f\x8a\x94\xea\x39\x61\x78\x45\x87\x6c\x5b\xe0\xbb\xc2\xae\xea\x39\x15\xa4\x51\x0f\x06\x43\xaf\x53\xaa\x17\x10\xd1\xb5\x21\xf4\x47\x57\xb4\x34\xbb\x18\x4b\x2e\xcf\x8e\x4e\x3c\x89\x22\xe2\x5a\xdc\x4c\xf6\x97\xc8\xdd\x2d\xce\x89\x84\x5f\x92\xb3\xe5\xd0\xb9\x3c\xbb\xca\xcf\xc8\xfe\xcb\xc4\x81\xaf\x0b\x68\x48\xef\x0d\x26\x6e\x26\x33\xe0\xcc\x92\x48\xb6\x6d\x1f\x4f\x2c\xbf\xa7\x6d\x5b\x85\x49\xb3\xfb\x43\x27\x49\xe4\xd9\x51\x88\x27\xd1\xae\xee\xd1\x3c\xcf\x08\xb0\x2c\x4d\x95\x76\xc3\x81\x69\xcb\x11\xc3\xf4\x04\x6e\x7d\xc3\x2b\xbf\x43\xd2\x3d\x4a\xec\x04\x0e\x90\xf3\x8b\x7d\xf8\xbb\xfa\x67\x12\xbf\x1f\xb5\x54\xf9\x19\xf9\xe2\x35\xfe\x12\x25\xac\x2a\x3f\x48\x4c\x55\x8e\x2d\xe0\xc1\xf5\xb6\xe2\xdd\x21\xb4\x69\x4d\xac\x47\xc9\xbd\xa7\xe5\x17\x3d\x02\xaa\xee\xd2\x24\x5c\x01\x77\xfd\x38\x05\xf6\xb4\x22\x7c\xfa\x4e\x15\xfc\x14\xc2\x94\x30\xad\xad\x57\xce\xcc\xbb\x74\xd8\x0b\x7f\xa2\x71\xaf\xf1\x2f\x1b\xda\xd9\x47\xbb\x37\x54\x27\xb7\xf5\x39\x39\xbf\xc8\x87\xbf\xbf\x74\x26\x63\xb8\x44\x7e\xbb\x3e\x4a\xac\x77\x34\xe3\x0d\x6f\x61\x02\xba\xbb\x2e\x71\x93\x74\xeb\x89\xc6\xfd\x2e\x79\xcd\xfd\x5c\x5b\xde\x32\x11\x81\x03\x13\xe3\x67\x18\xb1\x80\xe4\x32\x0c\xc8\xe9\xaf\x7b\x55\x40\x53\x51\x12\x80\xd6\x7b\x52\xb1\x84\xf3\x6a\x5e\x12\xcd\x93\x1d\xda\x0f\x33\x5f\x80\xde\x35\x21\x03\xd5\xe3\x48\xfa\x18\x3b\x9e\x51\x8b\x2b\xe5\x55\x6b\x3e\x46\xb6\x58\x32\x5d\x96\xe5\x20\xb1\x66\x9a\xf3\x85\x95\x4c\xa2\x73\x5a\xb1\x7b\x74\x37\xdf\x8d\x90\xcf\x02\xaa\x3d\x71\x55\x1d\x3e\x44\xc2\x20\xb5\x78\x71\xbb\x17\xf0\x19\x09\x8a\xfc\x8c\xec\x87\x78\x49\x33\x1d\x28\x4f\x80\x1b\xa1\xc4\x03\xc6\xed\x3d\x9d\x6b\x12\xdf\x38\x3b\xda\x52\x7d\x98\x75\x6f\x30\xec\x55\xbb\xa9\x86\x0c\xd7\x93\xc0\x94\xc3\x99\x98\xc6\x04\xc8\xb2\x0d\x1d\x27\xa9\x41\xaf\x32\x38\x5b\xa7\xed\x8b\x03\x2b\xa3\x5d\xc5\x1b\x64\x4f\xee\x07\x97\x68\x5f\x97\x38\x24\xca\xce\x09\xee\x67\xe1\x5b\x7a\xee\xc6\xba\x7f\x0d\x7e\x91\xea\xd5\x69\xff\xb6\xd2\x15\x41\xf7\x73\xa8\x2d\xc9\xa3\x13\x3d\x48\xda\x30\xd2\x57\xa7\x6f\x71\x3d\xd9\x47\xfc\xd1\xce\x70\x46\xfa\x9e\xf9\x66\x34\x39\xdd\x32\xed\xed\xc0\xb7\x84\xb6\xa3\x9a\xc6\xe3\x0d\x1d\xe4\x70\x5d\xfc\x40\x83\xf8\xca\xec\x87\x9e\x3b\x33\x15\xf1\xec\xef\xe3\x71\x55\x13\x5f\xdd\xf8\x4a\x58\x36\x5c\xaa\xe6\xc2\xf9\x14\x6d\xc2\x98\x5d\x55\x2b\xac\x51\x7c\xfd\x0e\x49\x94\x0f\xd3\xe9\xc0\xe0\xaa\x1e\x91\xd9\xae\x73\xd2\xa7\x3f\x2b\xf1\x11\xa1\x1d\xa4\x1e\xba\x00\xbe\x35\xc0\xcf\x89\x9e\x2f\xa1\x1e\x14\xe1\x97\x58\xc4\xd1\xe1\xca\xba\xee\x22\xb3\xc7\xc1\xd7\x7b\xc2\xb1\x64\xbf\x0e\x62\xfe\xc5\xf6\x47\xb8\xcd\x7b\xfa\x77\x2f\xf5\xbd\x49\xdc\x11\xc8\x0f\x19\xc6\x89\x68\x9a\xf3\xbb\xad\x7c\x81\xe6\xd5\x29\xc2\xd7\xaf\x32\x74\x81\xaa\x39\xc8\xf7\x7f\xfd\x0b\x79\x72\x4c\x4d\xf6\xed\xa6\x3d\xfd\xf8\xe1\xc2\xb5\xfb\xed\xdb\x33\x12\x5d\x51\x32\xe5\x78\x15\x77\xb9\xf8\xe8\xaa\xa2\xb9\x98\xce\xdc\x58\xec\xcf\xaa\x5e\x16\xe0\xac\x6a\x40\x84\x6f\xc8\xa0\x28\xb4\x85\x9d\x93\x21\x3f\x11\x82\x88\xbd\x11\xad\xca\x13\xc5\xb7\x4d\x94\xaf\x7c\xce\x76\xf4\x9e\x2d\x92\x6f\xb4\x85\x52\xa1\x7e\xdc\x02\x42\xda\x42\x5e\x68\x0b\xf5\xac\xd0\x09\xec\x8a\x78\xa5\x8d\x3a\xd2\x6b\xe6\xb6\x2e\xd3\x16\x3a\xdd\x76\x29\xdb\xdd\xfe\x94\x13\xaa\x42\x57\x40\xb2\x7c\x27\xcb\xe7\x84\x0b\xfb\x68\xdb\x75\xc7\xf9\xe3\x24\x90\x8a\x79\x9c\x31\xce\xf9\x5c\xd9\x24\x8b\x92\xe4\xdc\x3e\xc1\xb4\x51\xa8\xb1\xf6\x13\xfd\x2b\x3b\x8a\x91\x96\xd8\x2f\x65\x7f\xb9\x1d\xfc\x72\x84\x59\xe1\x90\x25\xb8\xec\x30\xb7\x59\xe0\x7d\x52\xe9\x17\x9a\x21\x42\x98\x73\x5b\x84\xa4\xc1\x1e\xeb\x14\xc1\x14\xc7\x7f\x82\x41\xa2\x5d\xe3\x5d\x0e\x29\xae\x77\x34\x4d\xc7\x9d\xda\xb0\xd3\xaa\x22\x32\x70\xc1\xd6\xc5\x10\x79\xa1\x5b\x88\x64\xea\x96\x06\x5d\xe8\xe9\xf0\x7f\x01\x00\x00\xff\xff\xc1\xf2\x7d\x83\x87\xe4\x00\x00") + +func operation_fee_stats_3HorizonSqlBytes() ([]byte, error) { + return bindataRead( + _operation_fee_stats_3HorizonSql, + "operation_fee_stats_3-horizon.sql", + ) +} + +func operation_fee_stats_3HorizonSql() (*asset, error) { + bytes, err := operation_fee_stats_3HorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operation_fee_stats_3-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9a, 0x77, 0x1b, 0x10, 0xa, 0x8a, 0x57, 0xe, 0x60, 0x23, 0xcb, 0xd, 0x53, 0x7b, 0x4, 0xee, 0xb3, 0xaa, 0xf1, 0xb3, 0x8d, 0x27, 0xb6, 0x16, 0xc0, 0xee, 0x76, 0x90, 0xc4, 0xe0, 0x2e, 0x1}} + return a, nil +} + +var _pathed_paymentCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x69\x8f\xe2\xc8\xb6\x36\xfa\xbd\x7e\x05\xea\x2f\xd5\x5b\xd4\xde\x84\x1d\x0e\x0f\xdd\xb7\x8f\x64\x6c\x03\x66\x36\x83\x19\xae\xae\x4a\xe1\x70\xd8\x18\xb0\x0d\x1e\x98\x5e\xbd\xff\xfd\x8a\x21\x13\x92\x24\xc9\x4c\xa0\xaa\xfa\x9c\xd3\x96\xba\x3a\xc1\xcb\x6b\x3d\xf1\xc4\x5a\x2b\x66\xf3\xef\x7f\x7f\xf9\xf7\xbf\x33\xcd\x30\x4e\xdc\x88\xb6\x8d\x6a\xc6\xc6\x09\xb6\x70\x4c\x33\x76\xea\xcf\xbe\xfc\xfb\xdf\x5f\xb6\xf7\xd5\xd4\x9f\x51\x3b\xe3\x44\xa1\x7f\x14\x58\xd0\x28\xf6\xc2\x20\x23\xfd\x87\xff\x0f\x73\x22\x65\xad\x33\x33\xf7\xfb\xf6\xf1\x33\x91\x2f\x6d\xad\x93\x89\x13\x9c\x50\x9f\x06\xc9\xf7\xc4\xf3\x69\x98\x26\x99\xbf\x32\xe0\xcf\xdd\xad\x69\x48\x26\xaf\xbf\xf5\xec\x29\xfd\xee\x05\xdf\x93\x08\x07\x31\x26\x89\x17\x06\xdf\x63\x1a\x6f\xf5\xbe\x16\x26\x53\x6f\xab\x9a\x06\x24\xb4\xbd\xc0\xcd\xfc\x95\xf9\xda\xed\x14\xc4\xaf\x7f\x3e\xd9\x0e\x6c\x1c\xd9\xdf\x49\x18\x38\x61\xe4\x7b\x81\xfb\x3d\x4e\x22\x2f\x70\xe3\xcc\x5f\x99\x30\x38\xe8\x18\x51\x32\xf9\xee\xa4\xc1\xde\x96\x15\xda\x1e\xdd\xde\x77\xf0\x34\xa6\x2f\xcc\xf8\x5e\xf0\xdd\xa7\x71\x8c\xdd\x9d\xc0\x12\x47\x81\x17\xb8\x7b\x91\x28\x5c\x7e\x8f\x29\x49\x23\x2f\x59\x6f\x95\x3b\xce\x9f\x07\x02\x28\x8e\xc8\xe8\xfb\x0c\x27\xa3\xcc\x5f\x99\x59\x6a\x4d\x3d\xf2\x6d\xcb\x18\xc1\x09\x9e\x86\xee\x9f\x5f\xbe\xa8\xad\x46\x33\xa3\xd7\x55\xad\x9f\xd1\x0b\x19\xad\xaf\xb7\x3b\xed\x83\xe4\x7f\xd2\x99\x1b\x61\x9b\x8e\xbc\x38\xb1\xd6\x31\x9d\xff\x79\x55\x3a\x26\xb3\x79\x1a\x46\xa9\x1f\x7f\x4c\x98\x06\x8b\x8f\x48\x4e\xa9\xed\xd2\xe8\x23\x92\x5b\x9c\x0e\xa5\x1f\x94\xfc\x80\x98\x45\xe3\x24\x74\x1c\x1a\x79\x81\x4d\x57\xd7\x65\x31\x21\x61\x1a\x24\x16\x9e\xe2\x80\xd0\xf8\xcf\x2f\x72\xb5\xa3\xb5\x32\x1d\x39\x5f\xd5\x4e\xa4\x1b\xf5\xea\xe0\x02\xbd\x61\xb4\xce\xec\xb4\x2b\x8d\x7a\xbb\xd3\x92\xf5\x7a\xe7\xe4\xa1\x97\x82\xdf\x67\x13\xba\xfe\x88\xfe\x64\xf5\xbe\xea\x67\x99\x4f\x68\x75\xe8\x07\x30\x9f\x8a\x7d\x5c\x77\x94\xc6\xc9\xd4\x0b\x68\x7c\x4d\xf3\xb3\xd0\x87\xf5\x6e\x51\xd0\x5d\x36\xb8\xa2\xf7\x28\xf4\x71\xbd\xcf\x2e\x7f\x4d\xef\xb3\xd0\x87\xf5\xee\xe5\xbd\xc0\x09\xaf\xe8\x3d\x0a\x7d\x58\xef\x2c\xb5\xe2\xd4\xba\xa2\x73\x2f\xf0\x19\x7d\x53\x2f\x1e\xcd\x53\x9a\x5e\x63\xf6\x54\xec\xe3\xba\x29\x8d\xae\xd1\xba\xbb\xff\x61\x6d\xbb\x30\xbe\xa6\x6e\x2f\xf0\x61\x7d\xfb\xac\x34\xa2\xd8\xbe\xae\xf6\x85\xdc\x0f\xd6\x7e\xc8\x94\x74\xfe\xfd\x83\x66\x2c\x1c\x5c\x51\x6e\xe1\xe0\xc3\x80\x0f\xd9\xef\x1a\xd6\x27\x91\xcf\xea\xdc\xf6\x01\xde\x57\xbb\x95\x3a\x68\xde\xc9\x9e\x2b\xbe\x98\x72\xaf\xcb\x3e\xa7\xc6\xf7\xc4\x8e\x89\xee\x1d\xc9\xe7\xc4\x75\x5d\xee\x98\x88\xde\x91\x7b\x4e\x2c\xef\xca\x7d\x08\xdf\x31\xa1\x5c\x97\xdb\x27\x89\x77\x65\x9e\x43\xfe\x1d\xc9\x6d\x1c\x5f\x17\xd9\xc7\xe6\x75\x99\x17\xa1\x70\x5d\xd4\xc2\xc1\x75\x81\x27\x57\xfd\x90\xd4\xd6\xf3\x0e\x82\x5a\xbf\xa3\xd5\xdb\x7a\xa3\x7e\x2a\x3c\x9d\xb9\xf1\x7c\x7a\x90\x68\x2b\x25\xad\x26\xbf\xd2\xf5\xe7\x97\x7d\xdf\xb8\x8e\x7d\xfa\xc7\xd3\x77\x99\xce\x7a\x46\xff\x38\x3c\xf2\x67\xa6\x4d\x46\xd4\xc7\x7f\x64\xfe\xfd\x67\xa6\xb1\x0c\x68\xf4\x47\xe6\xdf\xbb\x2e\xb3\xd2\xd2\xe4\x8e\xf6\xa4\xf9\x49\xdf\x97\x17\x1a\x5f\xde\x3c\x28\x56\x1a\xb5\x9a\x56\xef\x5c\xd1\xbc\x17\xc8\x34\xea\x2f\x15\x64\xf4\x76\xe6\xeb\x53\xff\xf6\xe9\xbb\x78\xa7\xe4\xeb\xb9\xe5\xa7\xe2\x1f\x6c\x3e\x33\xf4\x6e\x79\x5e\x70\x59\x6f\x74\xce\xf8\xcc\xf4\xf4\x4e\xe9\x19\xd6\x69\x87\xf6\x85\xf9\xa3\x96\x33\x20\x9f\x29\xfc\x2b\x25\x3b\x02\x9a\xd5\xdc\xcc\xdd\x8e\x62\x66\x51\x48\xa8\x9d\x46\x78\x9a\x99\xe2\xc0\x4d\xb1\x4b\x77\x34\x7c\xb0\x03\xbe\x15\xb3\xa9\x83\xd3\x69\xf2\x3d\xc1\xd6\x94\xc6\x33\x4c\xe8\x76\x34\xf1\xf5\xec\xee\xd2\x4b\x46\xdf\x43\xcf\x3e\x19\x20\xbc\x28\xec\xa9\x43\x1e\x8a\xb9\x73\xdd\x63\x21\x9f\x1c\xe0\x12\xe1\x7b\x2f\x3f\x4d\xba\xbf\x7f\xc9\x64\x32\x4f\xdf\x78\x76\x86\x8c\x70\x84\x49\x42\xa3\xcc\x02\x47\x6b\x2f\x70\x7f\x47\xfc\xbf\x76\x75\x53\xef\x56\xab\xdf\x76\xd2\xdb\x07\x03\xec\xd3\x0b\xc2\xa2\x78\x49\x78\x81\xa7\xe9\x25\x69\x86\x61\xcf\xc5\xa7\x38\x4e\xfc\xd0\xf6\x1c\x8f\xda\x19\x2f\x48\xa8\x4b\xa3\x67\x91\x2f\xff\x3a\xaf\xfb\xe7\x28\xbe\x93\x8b\xf8\x26\x22\x0e\x03\x81\x8c\xe5\xb9\x5e\x90\x9c\xdd\x8c\xe9\x3c\x48\xfd\xcb\xf7\x82\xd4\x8f\x53\x8b\x06\x49\xb4\x1d\x0a\x9e\x17\x73\x2f\xe3\x05\xce\x14\x6f\x47\x8c\x36\x8d\x93\xcb\x70\xf6\x82\xa3\xd0\xa7\x76\xe8\x63\x2f\xb8\x20\xc5\x71\xe7\xa0\x93\x51\x44\xe3\x51\x38\xb5\xe3\x4c\x42\x57\xe7\xc8\x9c\x29\x76\xdf\x42\x74\xb5\x6e\x0e\x8c\xa4\x5b\xab\x53\x0f\x5b\xde\xd4\x4b\xb6\x85\xdb\x97\xff\x89\x92\xe9\xf4\xda\x6d\xcf\x0d\xb6\x7d\xa1\x2d\xac\xfd\x37\x27\xbd\x81\xe7\xae\xc5\x81\xf4\xef\xbb\x61\x75\x46\x29\x69\x4a\x25\xf3\xfb\xef\x4f\x55\xf1\x5f\x7f\x65\xc0\xbf\xfe\x75\xe5\xe9\x73\x80\xe7\x7a\x5e\x15\xe0\x3d\x8d\x2f\xea\xf2\x4c\xdb\xcb\x7a\x7e\x4f\xd3\x6b\x7a\xce\xd4\x5d\xe0\x6f\xaf\xf3\x75\x60\x6c\xdb\xbf\x5b\x63\x62\xdb\x65\xdc\x87\x43\x10\xda\xf4\x34\x16\x5e\xc4\xc0\x6b\xa3\x2f\xdb\xe7\x5b\xcd\xbf\xec\x18\xef\x81\x1c\xbe\xc3\xf1\xe8\x04\x0c\xff\xca\xb7\x67\x11\x5d\xbc\x2b\x64\xa5\x64\x42\x93\xa9\x17\x27\xef\x8a\x3e\xf7\xb6\x9f\xdc\x7d\xff\x35\x99\x86\x31\x4d\x3c\xff\x8d\xc8\xdf\x25\xd6\x0b\xb1\x75\x52\xe7\x2f\x3b\xf5\xcf\xfa\xce\xea\xfb\x68\xe7\x0d\xd7\x79\x6b\x6c\xf0\x52\xcd\xb1\x14\x6f\x79\xcb\xa1\xf3\x75\x6b\x8d\x1d\x06\x5e\xbf\x3f\x07\x39\x8d\x3e\x98\x41\xf7\x33\x2f\xf6\x5b\x19\x74\xe7\xee\x38\x8e\x69\x72\x89\xcf\x7d\xac\xbe\x79\x1b\xfb\xdb\xb0\xba\xac\x7a\x16\x79\x84\x06\x6f\x24\xb1\xdd\xcd\xb7\x32\xdc\xee\x66\xc6\x0e\x53\x6b\x4a\xb7\xfe\x46\xbc\xdd\x8c\xe4\x43\xb3\xe8\x49\x0d\x1f\x86\xac\xfb\xb2\x9c\xd5\xeb\xa1\x80\x6f\xf8\xc6\xe1\xc9\x03\xc3\x67\x8f\x3e\xf1\xfe\x96\x43\xec\x3b\xec\xb7\xfa\xc3\x7e\x58\xbf\x77\x07\x6f\x76\xa9\xe1\x47\xaf\x22\x37\x8c\x92\x67\x36\x54\xad\x20\x77\xab\x9d\x0c\x38\x6f\x36\xe9\x2a\xc1\x49\x42\xfd\x59\x92\xd9\x86\x45\x9c\x60\x7f\x96\xd9\x76\x99\xc2\x74\xff\x4d\x66\x13\x06\xf4\x75\x63\xeb\x60\x6f\x9a\x46\x27\x4d\xed\x5b\x16\x92\xf5\x8c\xbe\x5f\x29\xfb\x69\x89\x13\xbd\xaf\xd3\xfe\xb3\xc5\x37\x6a\xe7\x30\xb3\x11\x46\xe7\x95\xfa\xfb\x8e\x89\xff\xca\x80\x7f\x65\xe4\xba\x9a\xd9\x7f\xfc\x7f\xfe\xca\xf0\x08\x41\xf4\xaf\x8b\x75\x75\x3a\x0c\xbb\xb9\xca\x4e\x67\x79\x4e\x73\xee\x1b\x6c\xec\x27\xda\xb6\x51\x77\x11\xd0\x76\xec\x78\x07\x94\x38\xb5\x0e\x20\x22\x1a\xbf\x68\x80\xe0\xc5\x1e\x63\x44\xf1\x73\x2c\xbd\xc6\x73\x32\xe6\xbd\x15\xd3\xc9\x64\xdd\x07\x5a\xc6\x3d\xb0\x79\x4c\xaf\xb5\x30\xaf\x71\x9e\x8c\xe1\x6f\xc5\x79\x54\xf1\x71\x9c\xaf\x1a\xb9\xb3\xfb\x34\x58\xd0\x69\x38\xa3\xef\x34\x69\x47\xd3\x77\x34\x44\x27\xd3\x1d\x77\x50\xf0\x34\x5f\xfb\xfb\x47\xea\xe1\xe8\x45\xef\x11\x31\x7f\xa3\xa1\x79\x49\xc2\xd3\x3c\xf0\x0b\x8d\xe7\x44\xbc\xb0\xf6\x26\x19\xc7\x39\xa2\x9b\xc9\x38\x4e\x8a\xff\x7e\x8c\xdb\x97\x83\xb7\x0b\x31\x75\x2d\xba\x4f\x66\xb8\x6e\x45\x75\xb2\x04\x70\xcb\xb0\x6b\xd7\xe2\x5f\xc9\xd4\x5e\x1c\xa7\x34\xfa\xb8\x2a\x12\xda\x17\x47\xa7\xaf\x68\x49\xa6\x9e\xef\xbd\xd1\xa3\xb8\x3a\x16\xfc\x95\xa3\xaa\x13\xef\x3c\x59\x55\xb9\x69\x14\x75\xfa\xfc\xa3\xc6\x51\x27\x3a\x6f\x1f\xff\x5c\xd3\xba\xaf\xb4\x33\x4d\x87\x9a\xfc\xaf\xcb\x81\xf7\x62\xba\xf7\x66\x27\x3f\x5d\x43\xdb\xbb\x79\xb2\x7a\x91\x8a\x3f\x30\xde\x38\x77\xc0\xd5\x6e\x95\xf2\xcd\xbb\x64\x84\x03\x97\x5e\x1c\xd8\x9f\x92\x73\xba\x6c\x77\x7b\xae\x3e\xce\x9d\xdf\x4e\xd1\x4f\xe6\xc7\x0a\xed\xf5\x25\x72\x92\x55\x44\xe3\x74\x7a\x31\xbb\x27\x2b\x9f\xbe\x3b\x9e\x3b\x2e\xb1\xde\xce\xe7\xd9\xba\xc5\xad\xa4\x9e\xad\x38\xff\xfe\x21\xe2\x0e\x0f\x5d\x63\xef\x20\x72\x89\x88\x8f\xb9\xdd\xd9\x0a\xf7\x2d\x44\xa9\xdb\x91\xb5\x13\x46\xef\x4c\x86\x66\x54\xb9\x23\xbf\xc3\xd9\x75\x95\xf1\xa7\xf5\xe9\xf5\xb6\xd6\xea\x64\xf4\x7a\xa7\x71\x9c\x54\x34\xe5\x6a\x57\x6b\x67\x7e\xff\x5a\xcc\xb7\x9a\x83\x92\x5e\x65\x15\x1d\x16\xea\x06\x97\xef\x57\x0b\xb5\xba\x5a\x2d\x94\xbb\xf5\x66\x97\x2d\x0d\xe0\xb0\x56\x68\x97\x1a\xf5\xae\xa2\x35\xe4\x76\x4f\x30\x14\xa1\xd1\x67\x4b\x5f\xbf\x65\xa4\xc3\xc5\xed\xff\x87\x00\xf8\x96\x41\xdf\x32\xe0\xdb\x9e\xe5\xcc\xd7\xaf\xdf\x32\x5f\x65\x43\x96\x65\xf9\xaf\xbf\xbe\xee\x6e\xb0\x4f\xf7\x8e\xff\xfe\xeb\xcf\xf7\x10\xca\xa8\x97\x6f\x0e\x64\x34\xe0\x7a\xb2\x56\xea\xf7\x5a\x6c\xb7\xd2\x60\xbb\x0d\x2e\xdf\x2d\x96\xba\x86\xc0\x69\xdd\x66\xa5\x51\x67\x8d\x92\xc9\xf5\x5a\xa5\x86\xde\xaa\x57\x2a\x25\xf6\x88\x70\x0b\x4c\x44\xa2\x24\x41\x0e\x49\xf0\x5b\x86\xb9\x82\x10\xde\x82\x50\x31\x9a\x83\x62\x89\xab\x20\xa1\x9f\x57\xb5\x7a\xa5\xd2\x47\xa8\xa2\x76\x7a\x8d\x4e\x05\xf5\xd4\x5e\xcb\x68\x94\xd8\x6a\x49\x53\xfb\x50\xab\x98\xba\xd1\xaa\xd6\xb4\x76\x31\x5f\x7c\x46\x28\xbe\x40\xc8\x5d\xe7\x90\xbb\x09\x21\x0b\x8d\x02\x5b\xea\x6a\x88\x95\x6b\xfd\x6e\xa1\x5b\x82\xf2\xa0\x2c\xf7\xfb\xc5\x7e\xdf\x64\xcd\x52\x7f\x30\x68\xf1\xda\xa0\xaf\x75\x9a\x15\xb5\x3f\x6c\xcb\x3d\x5e\xe8\x37\xb8\x9f\x88\x30\xdf\x2f\x1a\xe5\x9e\x59\xed\x35\x06\xa5\x42\xd5\xec\x54\x7a\x26\x2a\x14\x4b\x32\xac\xd6\x07\x03\xb6\x6c\x54\x6a\x42\x43\x2e\xcb\x5d\xcd\x28\x74\xf9\x6a\x53\x69\x6b\x05\xb3\xdf\xa8\x3f\x23\x14\x5e\x20\x44\x27\x35\x79\x01\x21\xba\x89\xc3\x7e\xa5\xc8\xb7\xea\x5c\xa3\xae\x6b\x4d\xa5\x56\x2f\xe4\x05\xc8\xca\x1c\xe4\x87\xa8\x59\x57\xdb\xad\x6a\xb1\x57\x11\x8a\xf9\xaa\x52\x33\xaa\x7a\xa1\xc1\xb5\x05\x6d\xd0\x33\xbb\x6f\x73\x78\xcd\x0f\xf9\x8b\x08\xdf\xc8\x0d\xe7\x73\xaa\x77\xa4\x99\xb7\x67\x4a\x3f\x9b\x6b\x5e\xce\x96\x3e\xd3\xc8\x43\x5b\x12\x1d\x04\x79\x4a\x79\xd1\x66\x2c\x56\xb0\x90\x25\x4a\x0e\x0b\xb1\x83\x20\xc3\x58\x02\xe2\x25\xcc\x72\x0e\x76\x18\x0e\x40\x6c\x03\x0b\xb1\x16\x0f\xa1\x05\x04\x8b\x4a\xd2\x96\x2a\x70\xe7\xb5\xd5\x81\x04\x16\xb3\x14\xb2\x8e\xc3\x72\x22\x06\x82\x05\xa8\x00\x1c\x9b\x71\x78\x1b\x32\x22\x61\x1c\x4c\x6c\x16\x58\x3c\x21\x40\x24\x10\xda\x48\x10\x10\x8b\x24\x91\x17\x19\x16\x61\x86\xff\xba\xab\x3f\xb0\xad\xb8\xbf\xed\x95\xef\x57\x3c\x6e\x9d\x5b\xb7\x2b\x79\x41\x0d\x54\xa9\xc4\x82\xd5\x38\x9f\x8d\x81\x9b\xc4\x4b\x7d\xb9\x61\xfa\x76\xbb\x37\xc0\xf9\x32\x2e\xb8\x5b\x79\xad\xce\x55\xf1\x66\xc6\x1a\xef\x6a\x1e\xca\x7d\x86\xdb\x89\xe5\x27\x3f\xa1\x20\x0f\xbd\xbe\x9e\x85\xfa\x1b\x8e\x0a\x1d\x09\x11\xe2\x10\xc6\xb6\x00\x10\xa1\xe8\x40\x1e\x01\xc2\xb0\x44\x60\x08\x10\x78\xc4\x10\x47\xe4\xa9\x44\xa1\x6d\x41\x82\x88\xe8\x88\x2c\x10\x91\xc8\x31\x94\xc3\x0c\x03\xb7\xed\xcf\x43\x9c\xdd\x22\x22\x82\x96\xc0\xf3\x2c\xe0\x78\x81\x23\x22\x47\x44\xcb\x82\x94\xc5\x10\x49\x0c\x94\xb0\x48\x20\x82\xc4\xe1\x30\xc7\xd9\x50\xb4\x11\x12\x18\x0e\x71\x3c\x61\x79\x24\x61\x04\xbf\xee\x9a\x5d\x06\x21\x09\x09\x92\xc0\xf3\x07\x8f\x55\xd8\xe6\x70\xcc\xd4\x53\x14\x02\xab\x2c\xf4\xb8\x60\xdd\x58\x74\x57\x45\x68\xce\xc2\x49\x76\x51\x90\x1b\x89\xc2\x54\xd8\x9a\x90\x17\xf8\xe1\x28\x70\x2c\x4d\x9d\x95\x9a\x03\x9b\x43\x81\x41\x3a\x42\x7f\xe2\xe8\xb5\x65\xbb\x2f\xb9\x05\x55\xca\x3a\x3d\x9c\x08\x0c\x6f\xe8\x3d\xb2\x63\xb8\xdf\x34\x6b\xf6\xce\xa3\xf4\xe7\x7f\xf6\xe9\x2d\x3e\x7e\x5e\xca\x4d\xe3\xe0\x3b\x86\xe4\x8b\xd5\x76\x51\xb4\x50\x0f\x58\xe9\x4a\x4b\xe6\x71\x62\x1a\x1b\x41\x37\x73\xa5\x48\xf5\x8b\x5a\x62\xcb\x9c\xaf\x68\x6a\x7f\x2d\x8e\x3a\x09\x3b\x70\x5b\x41\x47\xef\xe8\xe9\x86\x9b\xd7\x87\x1a\x9a\xd7\xea\x9d\x0d\x98\x69\x80\x63\x4c\xd2\x35\x8b\x4b\xbf\x87\xbb\xcb\x9d\xa9\x0b\x1e\x5d\x02\x97\xbc\xe2\xc9\xa3\x55\x50\xfe\x81\xbe\xf7\x63\xae\x0f\x7a\x34\x95\x24\x81\x08\x3c\x86\x9c\x20\x59\xa2\x44\x58\x87\x50\x11\x43\x2a\x30\x90\x73\x20\x74\x28\x45\x9c\x2d\xd8\x88\x81\x3c\x6f\x59\x92\xc3\x10\x09\x72\x44\x64\x21\x62\x6c\x8b\x05\x78\xeb\x8d\x8f\x88\x0a\x02\xb6\x29\xd6\xe6\x31\x22\x88\x5a\x12\xe2\x81\xc4\xf3\x84\xe5\x6c\x28\x38\x94\xe5\x2d\x56\x84\x94\xb5\x24\x96\xb5\xb0\x20\x31\x16\x20\xd4\xb6\x18\x86\xb2\x0e\x00\x8e\x23\xed\xd2\x37\x3c\xf1\x68\xe1\xc9\xa3\x37\x59\x73\xd3\x5c\xda\xb1\xec\x72\x22\xdb\xad\x69\x31\x21\x79\xb6\x4b\xb2\x45\x9e\x36\xad\x66\x61\xdd\x74\xdd\xd1\xc0\xa0\xed\x91\xb6\x1e\x42\x6d\x82\x86\x7a\x85\x71\xaa\x5a\xbb\x4f\xaa\x53\x2d\x07\xc0\x46\x8e\x01\xaf\x48\x74\x32\x63\xca\x56\x8e\xa5\x51\x7d\x4d\xd2\x9d\x1b\xed\x3d\x7a\x79\x64\xbc\xed\xf7\x06\x03\xa0\x75\x27\xf9\x5e\x8d\x73\x73\xd9\x49\xe0\x76\xbd\xf1\x52\x37\x85\x46\xa4\xa0\xe1\x24\x57\x2b\x0d\x8a\xe5\xb1\xa6\xc5\x50\x06\xab\x71\x6f\x46\xfa\xd1\xb4\xa7\xf4\x2c\xa5\xd3\x71\xb2\xe5\xa8\xa2\x72\xd1\xc4\x4b\x83\x89\xa5\x36\xd8\x55\xc9\x13\xe5\x1c\xda\x6b\xae\x5d\xf0\xd8\x86\x76\xa9\xd6\xff\x17\x78\x2c\x07\xa9\x85\x81\xc3\x52\x89\x93\x30\x11\x25\x07\x60\x56\x72\x20\xb6\x04\x16\x70\x18\x61\xd6\x61\xa1\xcd\x00\x8c\x25\x8c\x28\xa0\xa2\x85\xa1\x6d\xd9\x14\xf2\x98\x17\x25\x9e\xdb\xe5\xcf\x47\x78\x3d\x61\x39\x24\x70\xdb\x4c\x2b\x52\xc2\x11\x6a\x49\x3c\x63\x13\x8a\x19\x07\x39\x00\x70\x02\x96\x90\x8d\x24\x4c\x2c\x0b\x8b\x94\xa7\x8c\xc4\x63\x20\x4a\xc8\x61\x6d\x87\x25\xa2\xb8\xf5\x7a\xee\xc4\x63\xc5\x27\x8f\xcd\xfa\xfd\x15\x1b\x02\x6a\x79\xa4\x2a\xf2\x95\x66\xa0\x01\x71\x93\x4b\xcd\xba\x63\x6a\x2c\x16\x82\xd5\xba\xd3\xd1\x75\xa6\x64\xe9\x95\xd6\x4a\x5b\x27\x52\xdb\x9e\xa0\x65\x76\x2d\xb5\x48\x3d\x46\xc5\x54\x5f\xc1\x39\x6a\x2b\x26\x4b\xaa\xdd\x99\x10\x26\x5d\x50\xce\x76\x06\x3b\x86\xb7\x1e\x4b\x4f\x18\x1f\xd2\xa9\xde\x6c\xc3\xd8\x18\xc9\x4a\x5a\xd1\x27\x31\xe4\x78\x06\x89\x2d\x58\x96\x98\x49\x34\xcf\x96\xc5\x1c\xf5\x8b\x93\xd5\xa2\x1d\x29\xad\x7e\x3b\x5c\x72\x82\xb6\x89\xa6\x23\xd8\x08\x9d\xfe\x32\x5f\x9a\x9b\xcc\x30\xaa\xa6\x21\x4a\x8b\xbd\x50\x9f\x3a\x55\xa7\xaa\xbb\xbb\x1c\x6f\x5c\xf0\xd8\x6e\xf7\x52\xad\xff\x2f\xf0\x58\xc4\x4b\x82\x48\x6d\x24\xf1\x0c\xb5\x1d\x28\x8a\x2c\x43\x2c\x16\x48\x90\xb5\x6c\x87\x15\x1c\x8a\x00\x66\x19\x8e\x61\x19\x96\x03\xac\x8d\x2c\x07\x61\x16\x08\x58\x60\x6d\x44\x24\x1e\x6d\xbd\xed\x11\x5e\x4f\xa8\x2d\x3a\x22\x16\x28\xb5\x05\x0a\x18\x8e\x05\x80\xd9\x66\x63\x69\xeb\xd9\xac\xc0\x43\xc4\x42\xc8\x70\xc8\x16\x10\x4f\x2d\x1b\xb0\x90\xa5\x36\x81\x58\x60\x30\x2f\x31\xfc\xb6\x7b\x8b\x4e\x3c\x56\x7a\xf2\x58\xd0\x8c\xc2\x66\x75\xd6\xc6\xeb\xb2\x58\xa9\x38\x8d\x88\xb8\xed\x69\xb8\xd0\x25\xad\x32\xc7\x7d\x97\xaf\x86\xd2\x82\xd6\xe7\xb8\x3c\x2c\x77\xda\xf2\xb0\x86\x26\x65\xac\x8f\x61\xc1\x9f\xfb\x8a\x46\xbc\x9e\xd6\x69\x65\x8d\x5c\x23\x96\x04\x47\x37\x39\xb9\x81\xcd\x56\x79\xb1\x4f\x6f\x3b\x8f\x3d\x71\xa2\x9a\x37\x33\x5d\xd5\xaa\x36\xf2\xcb\x56\x38\x2d\xe5\xe3\x4e\x3b\xf6\x57\x85\xfa\x86\x76\xfa\x50\xb6\xe7\xc3\x76\x3f\x81\xd5\x62\xad\xe0\x8f\x1b\x6c\xd3\x0b\x84\xc4\x71\x0b\x8a\x9c\x5f\x37\x3d\xe2\x8e\xcb\x6c\xbd\x5d\x5b\xb5\x6c\xbb\xc7\x4b\xf2\xb8\x9a\x2c\x79\x82\x71\x6b\xb8\xf3\xd8\xee\x05\x8f\x35\xb9\xb3\x0a\xcf\xcb\xff\x3b\x3c\x56\x62\x79\xdb\x61\x19\x01\x61\x4e\xa0\x82\xc3\x11\x44\x59\x01\x42\x41\xe2\x89\x65\x71\x98\xf0\x96\x68\x21\x80\x28\x67\x63\x11\xf1\x12\x01\x36\xcf\x8a\x9c\xc4\x58\x92\xcd\x08\x0c\x72\x76\x83\xa9\x07\x78\x3d\x11\x11\x02\x92\xe5\x60\x01\x70\x16\x8b\x2d\x51\xb4\x1d\x01\x3b\x12\x85\x70\x9b\x87\x05\x5b\x14\x05\x46\x00\x1c\x03\x6d\xe8\x58\x90\x73\x18\x81\x17\x45\x49\xb4\xa8\x23\xf1\x58\xa0\x5f\x77\x83\xe6\x27\x8f\x15\x9e\x46\x66\x0a\x83\xfb\xe3\xa4\xd7\x2b\x25\x22\xe7\x8e\xd6\x6b\xa5\x53\x91\xc4\x20\x37\x55\xbc\x42\x5e\x6f\x1b\x0a\x63\x49\x58\x2f\xcd\xd6\x0c\x99\x4e\xa7\x9e\xcf\xcc\x59\x83\x81\xd6\x68\x64\x0a\xec\xa0\xec\x8a\x93\x38\x32\x84\x51\x43\x64\x67\xeb\xec\x60\xee\x4d\x55\x5d\x16\xa5\xb5\xb6\x73\xa3\xbd\xc7\xba\x47\xc6\xed\x70\x56\x9c\x0f\x73\x05\x77\x23\xae\x5c\x89\x71\x94\x65\x27\x5e\xac\x74\x3f\x96\xd2\x49\x4f\x80\x4a\x89\x2a\x49\x3e\xad\xfb\xf5\x5e\x55\x37\x0d\x3f\x37\x5b\xb6\xd7\x73\x6e\xec\xd0\x45\x30\x6e\x5a\x7a\x89\xd5\x49\x3f\x9f\x6f\x76\xb2\x9b\xce\xca\x0e\xbd\xc1\x62\x31\x9d\x3b\x3b\xfd\x83\x0b\x1e\xdb\x27\x67\x15\xfe\x3f\xc2\x63\xdf\x98\x97\xb8\xb0\x11\xe8\xb3\x13\x12\x87\xcd\x40\x8f\x98\x72\x62\x9e\x86\xfd\x5d\xd3\xec\x1e\x1c\x20\x6c\x2e\x07\xb9\x82\x9d\x23\xfa\xba\xde\x9d\xe7\xe8\xbc\xd4\xd8\xcc\xe6\xc2\xc0\x0e\x97\xc4\x69\x6f\x26\x25\x67\x58\x30\x5b\x4a\x85\xd5\x26\x7f\x7d\x7d\x7a\xde\x34\x3b\xad\x43\xe1\x13\xcb\xed\xb7\x78\x4d\x08\xd5\x2a\xa8\x1a\xd9\xe5\xa0\xad\x48\x9b\xfe\xa2\x6f\x76\xe0\xca\x6b\x7a\x83\xb4\x6d\x31\xea\xc2\x37\xaa\x54\xdc\x3e\xcf\x3c\x4d\x66\xec\xd0\x30\x4f\xb3\x11\xfc\xdb\x14\xbe\xde\x3a\x73\xc7\x3c\xd1\x9b\x7b\x3b\xee\xd3\x79\xbe\x3d\xe3\x0e\x6d\x6f\x6c\xae\xb8\x43\xe3\x1b\xdb\x20\x3e\xeb\x87\x27\x5b\x21\x4e\x26\x17\x3b\xad\xd6\xc0\x90\xfb\x85\x7c\xad\xd8\xab\x9b\x4d\xc5\x2c\x09\x72\x57\xcb\x23\x41\xe9\xb5\x58\xbd\x63\x36\xbb\x95\xee\x40\xab\x2b\xed\x2a\xdb\x10\x1a\x6c\xb5\xab\xd5\xf2\x0a\xbb\x1f\xdc\xef\x7d\xa9\x42\x06\x9c\xab\x20\x23\xae\x0f\x93\xbe\x37\x77\xb8\x8a\xa6\x42\x2e\x6a\x01\xcf\xee\x03\xb3\x93\x2f\x7a\x93\x45\x67\x91\x82\x74\x54\x3c\x86\xdc\x6e\x2c\xae\xec\xfe\xdc\xe6\x16\xcd\x2b\xda\x52\x6c\x34\x26\x44\x1a\xc1\xb1\x6f\xe7\x57\xcd\x84\xb6\x44\x77\x93\x2f\x3b\xac\xdc\x6d\x42\x24\x0d\xe7\x70\xd1\x9f\xc9\xa3\xe1\x6e\x38\x94\x27\x52\x77\xc5\xee\x9e\x77\x9f\xff\xd9\x25\x22\x65\xf9\xfc\x59\x95\x65\x49\x39\x49\x5d\x79\x69\xa0\xc0\xb6\xaa\xc8\xed\x42\x90\x1b\x25\x95\xa9\x14\x4c\x2c\x6e\x83\xe0\xa2\xaf\x74\x20\x27\x19\xad\x72\x4e\x2f\xf0\x53\x6d\x60\xd6\x76\xea\x42\x65\x19\x17\x04\xa5\x04\xb0\xed\x2b\xeb\x54\x60\xf8\xba\xb2\x6e\x56\x87\x8d\x76\x3d\xd0\x26\xd5\xe6\x90\x13\x7a\xfc\xcc\xcd\xad\xa7\x40\x6a\x75\xfb\x6c\x4e\x2e\x7b\xed\xca\x52\x8f\x96\x92\x38\xb2\x43\x14\xd5\xa6\x20\x98\x12\x5f\xd7\x72\x2b\x12\x17\xf9\x7e\x14\x6d\xf2\xee\x5f\x7f\x9d\x37\x91\x0f\xae\x1a\x78\x57\xd5\xd4\x5e\x56\x8d\x9a\x0f\x48\xbb\x33\x9d\xb8\x91\x29\xc6\xad\xb2\xbd\x4c\xbb\x9d\x5c\xa7\x53\x53\x36\x9d\xb0\xca\xf0\xed\xa0\x3b\x5d\xe4\x86\x7c\x0c\xcb\x6b\x41\x7e\xae\x1a\x78\x96\x62\x3f\x4d\x3d\x5c\x7a\x72\xa5\x4b\x7c\x87\x19\xdb\xac\x4f\xb9\xfc\x22\x5f\x2a\xb6\xcc\x4a\xd2\x9f\xa8\x4b\x73\x99\x88\x26\xcd\x85\x12\xd2\xf2\xeb\xd0\x4c\xad\x89\x07\x0a\xe3\x89\x3d\x34\x56\x31\x6f\x54\x8c\xf2\x50\x09\x47\x59\xab\x50\x57\x04\x7e\x4a\xdc\xa5\xd5\x69\xe6\x1c\x30\xf8\x09\xd4\x73\x77\x51\x6f\x9c\x53\x5f\xd2\x3a\x15\xc8\x94\xd9\x4e\xa0\x0a\x55\xa6\xb0\x64\x37\x13\xcc\x8d\x4b\x34\x9a\xea\xe5\xfe\x70\x99\xb4\x83\xd4\x63\x3a\x46\x80\xea\xee\x33\xf5\xaf\x3a\x94\x9f\xa5\xbe\x5a\x1c\x41\x52\x5e\x8b\xd2\x20\x9e\x69\x9d\x0d\x35\x2c\x54\x18\xb2\x86\x10\x6f\xe4\x76\xdf\xcf\xd5\x41\x98\x48\x92\xbb\x52\xb4\x51\xd3\x2f\xe4\xea\x82\xaf\x76\xd5\x61\x05\x56\x10\xcf\x08\x6d\xab\x69\xd8\x12\xb3\x28\xe1\xd8\x0a\x9d\x99\xdb\x04\xc6\x20\xdb\x5f\x38\x8a\xf1\xc3\xa9\x47\x77\x51\xdf\x3d\xa7\xbe\xae\xe7\x27\x1b\xdf\x98\x86\x5e\x93\xf4\xf0\x7c\xa0\xb7\xd6\xe5\x41\xab\x5e\x42\xaa\xc8\x6f\x60\x22\x8e\xfa\xae\x8c\x66\xa6\xe6\x8b\xc6\x33\xf5\xe8\x5e\xea\x37\x9a\x07\x07\xa0\x58\x89\xbc\x41\xce\xa1\xa3\x1e\x9a\xf7\xd5\xaa\x10\x0c\x72\x4a\x81\x2c\x7c\xd6\x80\x35\x69\x9c\x8c\xad\x16\x5d\x6b\xba\x3c\x92\xb0\x34\xe3\xe5\xcd\x22\x22\xf5\x4d\x35\x19\xf0\xee\x82\x31\x9b\x4a\x77\xac\x34\x88\x12\xe9\xee\xa6\x06\x40\xe0\xd4\x94\xe5\x0f\xa7\x9e\xbf\x8b\xfa\xc1\x19\xf5\x4a\xaf\x62\xf5\xa2\xa1\xda\xb7\xd3\x62\xa1\xbf\x18\xba\xbe\xba\x6e\xaf\x13\x35\xd5\x84\x0d\x0e\xaa\xc8\x9b\x57\xba\x35\x57\xdd\x40\xbd\x7d\x4c\x38\xfc\xbd\xd4\x87\x63\x3e\x59\x96\xe8\xaa\xd4\x9f\x45\x15\xa9\x50\xf6\xf2\x7a\x7d\x51\x97\xa0\xeb\xa1\x49\x58\x23\x4a\x67\xd4\x4d\xd6\x0c\x18\xd1\x5a\xa9\xa5\xa9\x6b\xd0\x71\xd4\x4e\xb3\xda\x5b\xc7\xbc\xb4\xa9\x7a\x41\x09\x72\xb8\x9b\x4a\x4d\x86\x96\xe7\xde\xaa\x8c\xeb\xa6\x0f\x03\xf9\x40\xfd\xdb\x0d\xfb\xa5\xcd\x7d\x37\x34\xec\x4f\x1b\xfc\x9e\x2b\xd3\x41\x22\xb0\x04\x4e\x84\x40\x04\x9c\x88\x24\xc7\x11\x79\x9b\xc5\xc8\xe1\x05\x89\xb1\x28\x84\x54\x10\x2c\x5b\x00\x12\xb4\x1d\xd1\x41\x80\xe3\x24\x87\x88\x8c\x80\x25\x8e\xe3\x99\xdd\xec\xfd\x73\xa5\xee\xfc\x7b\x3f\x56\x9e\xad\xc6\x9e\x5c\x9d\x2a\x4b\xc6\x67\x68\x65\x96\x73\x43\xa3\xe9\x78\x49\xa9\x5d\x66\x9c\x96\x59\xd3\x70\xa5\x2d\x35\xb2\x42\x9b\xd7\xf6\xb3\x41\xd7\x0a\x7f\x79\x33\xdf\xa7\x0b\x7f\xdc\xd0\xf7\x5c\xf8\x29\x8e\x93\xdd\x51\x01\xfb\xb0\x65\xf7\xec\xda\x76\x83\xef\x1e\x7e\x9e\x47\xd4\x05\x1c\x87\x10\xc3\x11\x19\x79\x8b\xc3\xcd\x73\x1c\xff\x67\xb7\x2b\xe4\xb7\xc3\x8b\x3c\x7e\xfb\x23\xc3\xec\xf7\x89\xfc\x16\xd3\x68\x41\xa3\xdf\xfe\xc8\xfc\xb6\x60\x98\xff\x30\xff\x01\xbf\x1d\x6e\x90\x34\x8a\x68\x90\x54\x77\x45\xfb\xed\x8f\x0c\xff\xf2\xfb\xfc\xee\x34\x47\xfc\xdb\x1f\x99\xff\xf7\xcb\x93\xa1\xff\xf3\xe5\xd4\xec\x4e\x72\xab\x98\x02\xc0\x31\xa2\xcd\x8b\x16\xa5\x82\x05\x58\x0b\x89\x22\x2b\x21\x89\x01\x12\x63\x43\xc1\x86\x36\x0f\x88\xe4\x08\x9c\x20\x51\x28\x58\x1c\x24\x1c\xc7\x63\x91\xf2\xa2\xe8\x38\x16\xc2\x07\x44\xcf\x7a\x03\xba\x4a\x7e\xfb\xe3\xcc\xda\xbe\x30\xdb\xb2\xff\xf6\x47\x06\xbc\xb8\xf5\x7f\xcf\x9e\x8f\x03\x3c\xdb\xe2\x02\x12\x8f\x6c\xe4\x08\x48\xe4\x90\xcd\x32\x84\x85\xbc\x03\x39\x22\xf1\x84\xe1\x21\x72\x10\xc7\x58\xd8\xa1\x44\xe0\x01\xa1\x22\xe4\x05\x1b\xb0\xbc\xe4\x48\x84\x48\x80\x22\x60\xd9\xbf\x7d\xb9\x60\xe1\x0d\x0e\x80\x04\x05\x48\x19\x44\x89\x60\x61\xcc\x31\x98\x75\x80\x88\x24\xc6\xc1\x2c\xcb\x8b\x8e\x48\x05\x91\x07\x82\x48\x78\x22\x02\x41\x94\xa0\x03\x19\x42\x29\x44\x02\x6f\x71\x9c\x44\xad\xcf\x73\xc0\x7c\x7b\x7d\x2f\x4c\x93\x59\x9a\x3c\xb6\xec\x57\x19\xbe\xf3\xfa\x14\xc3\xf7\xda\xfa\x51\x5e\xf6\x0f\x07\xff\x70\xf0\x0f\x07\xff\x70\xf0\x0f\x07\xff\x70\xf0\x0f\x07\xff\x70\xf0\xe3\x38\xd8\xfd\xf5\xff\x7d\xf9\xbf\x1f\x19\x34\x3c\xbd\xfe\x6f\xff\x6e\x91\xf3\x32\xec\x07\x0d\x0c\xf8\x88\xa6\x80\x26\xcb\x30\x9a\xcc\x70\x1c\xcf\x46\x11\x8e\xe9\x05\x4d\x1d\x1a\x27\x99\xb6\x5a\xc8\xd4\xf7\xc2\x99\x3f\x33\x6d\x3a\x4b\xa8\x6f\xd1\x28\xc3\x02\x06\x7d\xc4\x90\x13\x46\x84\xc6\x64\x16\x06\x5b\xee\xa7\x38\x0d\xc8\xe8\xdc\xd0\xee\xdd\x1d\x1f\x51\xb6\x1f\xb2\x1d\xf6\xc6\xc7\x97\xcb\x7f\x18\x34\x25\x9e\xbf\xab\xce\x6f\xe7\x63\xa8\x63\xdd\xff\x36\xc2\xdb\xa1\xd0\xce\xfa\x97\x93\xda\xfe\xcd\xa1\xf4\x63\x82\x3e\x5e\x25\xab\xd8\xdb\x7c\x50\x3c\xa2\xbb\x81\xdb\x3b\xc2\x1f\x73\x85\xed\x38\x36\x26\xb3\xdd\xf1\xf9\x4b\xd7\xf3\x72\xce\x6e\xf2\x4f\x09\x8a\x0d\x5d\x4e\xbb\x55\xb5\x67\x99\xdc\x3c\xc8\x2a\x23\x59\xca\x56\x80\xad\x07\xa6\x54\xe8\x2e\x5b\xe1\xac\x0a\x04\x21\xa9\x86\xad\xe3\xda\xe1\x7e\x1a\x26\xd7\x93\x13\xe0\x2e\x5d\x6d\x34\xcc\x71\x56\x7b\xe6\x0c\x51\x31\x5b\xcb\xda\x02\xb3\x9c\x48\xd9\x66\x57\x6b\x3b\xeb\x3c\x9d\xb5\x8a\x85\xce\xf3\x8c\x40\xd9\xbb\x73\x9a\xe6\x96\x19\x85\xbc\xbc\x68\xcf\x3a\x4c\xa3\x53\x0b\x17\xd0\xe1\x4b\x8d\x3a\x1b\xd2\x41\xb0\x59\x94\x4c\xab\xd9\xe7\xb3\xf5\x0e\x58\x17\xdb\x6d\x35\x72\x56\x85\xe1\x3a\x5a\x7b\x6b\xa3\x32\xd2\x02\xc9\x9d\xf0\x33\xa4\xd7\x0a\x83\xb5\x2d\x54\x98\x12\xd3\x47\xf5\x06\xdf\x07\x79\xd3\xe9\x14\x1c\x5f\x7d\x9e\xe1\x50\x06\x77\xae\xc5\xe6\x6f\x99\x06\x33\xaa\x60\xde\x95\x3a\x93\x4d\xa5\x2a\xc1\xec\x68\x33\xb6\xe7\x25\xbf\x2c\x0a\x2b\xa6\xb7\x61\xb2\x8b\x71\x57\xaa\x8d\x26\x13\x97\x87\x62\xab\x47\xe6\x62\x18\x4f\x94\xb0\x55\xce\x0d\xca\x8d\x39\x6d\xab\xf9\x5e\x2d\xa0\xeb\xa4\x25\x99\xd9\x4e\x27\x9d\x4a\xb2\xd9\x07\xab\x3e\x72\xc1\x0e\xce\xb8\x32\x5e\xaf\xc7\xa3\x5a\xde\x34\x0d\x28\x3b\xab\x85\xb8\x42\x9d\x6c\xdb\xaf\x56\xec\xd9\xd2\xeb\xa2\x70\xe8\x4f\xe2\xb4\x40\x04\xc6\x30\x40\x54\x2a\xf9\x68\x38\x2e\x16\xd3\x4a\x33\x1b\x38\x53\xb1\xed\xa5\x0b\x58\x57\x3b\x72\xcd\xcc\xd7\x4b\x8c\x59\xb0\x2a\x72\xb9\xa1\xca\x77\xfa\x9b\xfb\x5c\xff\xcb\x7b\xd7\xbe\x77\x97\xf3\x39\xff\x35\x2a\x7a\x36\x22\x79\x18\xb7\x18\x1e\xaf\x17\xad\xf6\xc0\x6d\xab\xd6\xc6\xb1\x39\x3d\x1d\x56\xd4\x92\x3c\xe1\xcc\x2a\x49\x6c\xbd\x3f\xce\x83\x96\x2c\x26\x1a\x5c\x82\xcd\x9a\x89\xa3\xc6\x82\xac\xb9\xd9\x4a\xca\x16\xa7\xd5\x45\x47\xea\x7b\x38\x8c\xdb\xbd\xb1\x39\x4c\xd0\x72\xb7\x6e\x92\x37\x67\xb4\xc1\x0c\x07\x14\x6e\x3c\xa5\x54\xd5\xcb\xb5\x08\xae\x9d\x6c\xb7\xa2\x77\xb5\x51\x59\xae\x9a\x0b\x26\x74\x69\x50\x35\xd7\xbd\xee\x89\xbf\xf0\x63\xea\xc1\xb1\x1f\xea\x62\xa7\x38\x55\x73\xd4\x25\x50\x68\xf6\x93\x52\xa5\xb2\xe9\x99\xe2\xd2\xf4\x86\x79\xac\xa4\xa8\x8a\x76\x46\x86\xbb\x87\x4e\x96\xd0\xe4\x33\xff\x3b\xde\xcf\x9b\x66\xfd\xb0\x8b\x4f\x61\xd2\xbc\x5d\x0a\x3b\xa9\x5b\x5b\x18\x89\x2a\xe4\x47\x7a\x15\xd6\xa9\x64\x9b\x4d\xa7\xa8\x67\xcb\x1e\x2a\x2f\xba\x8d\xec\x50\x4e\x84\xfd\x8e\xc3\x82\x44\x0f\x8f\xaa\x89\xbb\x58\xaa\x69\xa3\x27\x1b\x92\xd0\x62\x5a\x9d\xa4\x6b\x2f\xeb\x6a\x69\xa6\xe6\x94\x2e\x9d\x6d\x6c\xa3\xd9\x9f\x86\x01\xf1\xaa\xe6\x4e\xfe\xb6\xe5\xe9\x7d\x01\x9c\xfe\x48\x3e\xbf\xb0\x30\x59\x4c\x96\xbb\xe2\xa9\x9c\x84\x5b\xdd\x74\xd3\xef\xd3\x7a\x29\x65\x0a\x8e\x3e\xc8\x5b\xb3\x69\x3e\x6c\x66\x2d\x26\xcf\xa5\xb5\x62\xc2\x76\x8b\xc3\x84\x1f\x37\x2b\x8b\xa5\xd9\x68\xf9\xb5\x8d\xd3\x57\x87\xb8\x94\x5f\xe8\x5e\x14\xeb\x22\xad\x3b\x59\xa9\xc0\x68\x3a\x28\x8c\xa4\x5a\x8e\x1d\xfb\xca\xd1\x1f\xf3\xcf\xf9\xed\xf3\xfe\x7d\x6d\xaa\xf7\x8d\x43\xc2\x9f\x9d\xed\x3c\x39\x28\xfc\x88\x83\x4a\xcc\xb7\xcc\x3d\xa7\x88\xbe\x6a\xdd\xd6\xd7\x6f\x19\x89\x65\x21\x14\x58\x00\x79\x11\x71\x82\x80\x44\x20\x7c\xcb\x30\x2f\x37\x0a\xf0\x57\xce\xbc\x5c\x2c\xd4\x7d\x9b\x24\xee\x38\x78\xf4\xb5\xdb\x56\xdf\x2a\x14\x3c\x2f\xd3\x49\x21\xc1\x47\x4a\x75\xfb\x59\x9e\x1f\x58\x2a\xe9\xd7\xd6\xd4\x8f\x71\xbf\xf3\x32\x81\x93\xef\xae\x84\xe9\x5b\x07\x9d\x3f\x1d\xa8\xa7\x87\x9d\x8f\x87\x90\x78\x9e\x47\x3c\xe6\x29\xb6\x29\x0b\x44\x96\x20\x41\x04\x48\x60\x58\x5e\xb0\x25\xca\x71\x08\x52\x4a\x91\x20\x32\x04\x4b\x18\x89\x18\x43\x46\xa2\x16\x70\xa8\x08\x39\x84\x1c\xfb\x70\xa6\xe2\x45\xb7\xaf\x76\x96\xf6\xf3\x5e\x2e\x0f\xaa\xa0\x5c\x5c\x27\xa3\x65\x9d\x99\x0e\x00\x5e\xcf\x42\x46\xaa\x97\x56\x8b\xaa\xb2\x6e\xa0\x24\xaf\x11\xc5\x5c\x2c\x0b\xd2\x12\xba\x49\xd4\x08\x86\xaf\xf2\xed\x85\xeb\xcd\xb3\x39\xda\x59\x33\xf4\x79\xfb\x83\x5c\x96\x9c\xe9\xfb\xa0\xfd\xd7\x0b\x9b\x97\x79\xb7\x18\x47\x64\x45\x28\x72\x04\xf1\x94\xb3\x00\xcb\x39\x1c\x2b\x20\x87\xe5\x78\x24\x02\x6c\x61\xcb\x71\x18\x4c\xa1\x25\x59\x98\x67\x2c\x00\x45\x49\x80\x48\xa0\x48\xa0\xd4\x22\x2c\xd8\xf3\xce\xbe\xe6\xfd\x17\x95\xfb\x51\xbc\x8b\x27\x7b\x05\xf4\x1f\xc0\x3b\x6b\xb1\x54\x64\x6d\x0b\x5b\x5b\xce\x2d\x56\xc0\x80\x40\x86\x03\x04\x0b\x8c\x2d\x62\x22\x59\x44\x60\x44\xc8\x38\x92\x83\x30\xb4\x6c\x5e\xa2\x04\x43\x5b\x14\x1d\x0b\x50\x82\xc8\x9e\x77\xf8\x70\xde\x6f\x2d\xf7\xa3\x78\x17\x4e\xb6\xa9\xd7\x7e\x00\xef\x94\x11\x2c\x4c\x11\x62\x6d\x0c\x18\x80\xb0\x0d\x59\x07\x58\x12\xc6\xb6\x63\x03\x87\x67\x21\x75\xa0\x40\x2d\x4e\xe4\x2d\x06\x58\x80\xe3\x08\x23\xb1\x50\x84\x3c\xa0\x1c\x42\xb6\xb0\xe7\x9d\x7b\x38\xef\xb7\x96\xfb\x51\xbc\xa3\xe5\xbb\x86\xee\xe2\x9d\x38\xb6\x28\x32\x3c\xb5\x91\x28\x10\x44\x6d\x51\xb4\x29\x06\xd4\x02\x40\x64\x44\x82\x1d\x28\x12\x20\x20\x40\x05\x81\x03\x14\x20\x9b\x42\xc2\x0a\x8c\xc0\x53\x09\x53\x2a\x52\xba\xe7\x1d\x3d\x9c\xf7\x5b\xcb\xfd\x28\xde\xb9\x93\xca\xbe\x78\x42\xe3\xde\xfc\x8e\x08\xb6\x09\x05\xc8\x21\x00\xdb\xc8\xe6\x1d\x0a\x80\x64\x89\x16\xb0\xa9\x45\x80\x0d\x6d\xc7\x86\x2c\xc5\x1c\x6b\x89\xd4\xc2\x90\x4a\x14\xf3\x22\x61\x05\x8e\xa7\x1c\x03\x9c\xc3\xc9\xae\xab\xbc\xcb\xc2\xa0\x2a\xca\xc2\x78\xea\x6a\x4d\x0a\xec\x6e\x57\x30\x4b\x44\x35\x56\xbc\x91\x5b\x4e\x4b\x73\x02\xbb\x2a\x83\x70\x19\xea\x1e\xb3\x2f\x8b\xe0\xc7\xa1\x7c\xd4\xf7\xc6\x75\x9d\x77\xf5\x3e\xfb\x3e\xb9\xcd\xfe\x87\x79\xb7\x39\x91\xb7\x2d\xdb\x06\xac\xcd\xf1\x40\x64\x04\x5e\x60\x08\x87\x11\x16\xa8\x64\xf3\x54\xe4\x11\xc1\xac\x44\x2c\x8e\xa1\x3c\x6b\x0b\x18\x3b\x02\xc0\xac\x43\x29\xb2\x20\x6f\xd3\x3d\xef\xd7\xdb\x55\x25\x0d\x61\x98\x70\x68\xae\x34\xb5\xd5\xcc\xc8\xc1\xb0\x54\xcf\x6e\x18\xa1\xb5\xf6\x62\x66\xea\xd4\x0a\x03\xdf\xe8\xb9\x51\xda\xce\x76\xe4\x1f\xc1\xfb\x2d\xf6\x7f\x34\xef\x22\xc3\x30\x12\x4b\xa0\xc8\x73\xd0\x76\x04\x48\x00\x83\x30\xc2\x8c\x2d\x08\x96\x08\x58\xdb\x71\x00\xb2\x39\x07\x01\x87\xf0\x36\x03\x44\x86\xc7\x58\x40\x04\x60\x9e\x01\x92\x23\xe1\x3d\xef\xef\xb4\xab\x29\x56\x2c\xb3\x3f\x64\xd5\x69\xbf\x87\x23\x93\xef\xae\x96\x56\x0f\x16\xeb\x65\x77\x16\x40\xb9\xad\x8c\xf4\xc2\x0c\x59\xab\xb6\xde\x73\x7f\x04\xef\xb7\xd8\xff\xd1\xbc\x43\x96\x72\x16\x66\x1c\x96\x11\x2d\x1e\x63\x96\x63\x81\xc5\x49\x02\x87\x78\xcc\x58\x40\x02\x12\xa0\x50\x84\x02\xe5\x79\x0b\x72\x12\x02\x10\xd8\x1c\xb5\x6d\x1e\xd8\xc0\x01\x48\x38\x9c\x20\xbd\xd0\xae\xfe\xa2\x72\x3f\x8a\xf7\x09\xf7\x63\x79\x07\x12\x01\x0c\x27\x60\x9e\x25\xdb\x7e\x3c\xcb\x50\x2a\x4a\x92\x0d\x08\x21\xb6\xc4\x12\x91\x41\x2c\xa2\x14\x0b\x0c\x8b\x80\xc0\x02\x96\x07\x90\x65\x2c\x88\x59\x4e\x92\x10\x15\xad\xc3\x39\xc8\xeb\x79\xc6\xcd\xe5\xc7\xa2\x09\xa5\xe5\x58\x67\xda\x91\xc4\x87\x44\x68\xf8\x51\x32\x62\xc7\xf9\x95\x54\x6d\x18\xb6\x34\x31\x4d\x4d\x8f\x06\xed\xc7\xe5\x77\xed\x3e\xfb\x3f\xda\xdf\x05\x01\xf2\x0e\xb0\x44\x5e\xb2\x29\x70\x04\x0e\x23\x6a\x0b\x8e\x68\xf3\x88\x95\x24\x6e\xdb\x65\xdc\x8e\x4e\x01\xc7\x32\x0e\x74\x08\x6b\x59\x84\x83\x22\x10\x39\x87\x65\x88\x08\x10\xda\xf3\xfe\x4e\x7e\xff\xec\xb4\xe4\xa3\x79\xbf\xc1\xfe\x8f\xe6\xdd\x11\x38\x62\x23\x4e\x04\xc0\x46\x50\x20\x80\xb7\x1d\x07\x22\x42\x38\x41\x84\x16\x15\x45\x86\xb0\x36\x2f\x00\xc2\x30\x88\xc1\x88\x27\x58\x70\x24\xc4\x08\x48\xb4\x89\xc0\x31\xc8\xde\xf3\x7e\x21\xbf\xff\xa2\x72\x3f\x8a\xf7\x1f\x9d\x67\x88\x04\x1d\x11\xf0\xbc\xe0\x40\xc1\xde\x75\x55\x24\x4a\x80\x4d\x25\xde\x71\x20\x14\x19\x9e\xe3\xf0\x36\x03\x61\x8e\x50\xcc\x10\x8b\x27\xd4\x62\x2c\x42\x09\xef\x6c\x9b\x5c\x71\xcf\xfb\x85\xfc\xfe\x8b\xe2\xfc\x22\xef\x37\xd8\xff\xd1\xbc\x5b\x88\x75\x18\xde\x71\x2c\x49\x24\x80\x13\x28\x84\x96\x44\x58\x4a\x20\x10\x45\x00\x21\x20\x36\x15\x18\x47\x44\x16\xe4\x38\x68\x63\xca\x12\x44\x2c\x91\x27\x82\xcd\xd9\x22\x87\x0e\xbb\xff\x2f\xe4\xf7\x07\xb6\x6b\x17\x97\x5b\xde\xe3\xbd\x70\x9f\xfd\x71\xf7\x36\xfb\x1f\xe5\x9d\xb1\x59\xcc\x59\x54\x60\x39\x01\xf0\x48\x74\x78\xd1\xb6\x10\xa0\xd4\x61\x25\x8a\x01\x76\xa0\x23\x89\x88\x50\x46\xb4\x10\x61\x19\xd1\x01\x90\xe3\x19\x9b\x03\x84\x13\x6c\xc2\x4a\x87\xad\xff\xd7\x79\xbf\xb7\xff\x7c\xb2\x2c\xf3\x71\xde\x8b\xf7\xd9\x3f\xad\xf7\xcf\xd8\xbf\xba\xdb\xff\xf2\xeb\x21\x3f\x3f\xb1\xfc\xa3\x67\x95\x8b\x55\xb1\x64\x2c\x8c\x89\x55\x61\x4b\x32\xec\x99\xe3\x56\x54\xf1\xc7\x7d\x00\x9c\xa2\x18\x57\x75\xc1\x07\x5a\x6b\x59\xee\xe5\xe4\xfe\xee\x90\xd7\xc9\xec\xf1\x39\x4b\xaf\x58\xfb\xc4\x59\xd3\x9d\x7c\x03\x45\x95\x53\x7d\xfb\x59\x85\x6d\xa5\xe4\x6d\xb5\x4f\x59\xd8\xe5\xa8\xa4\xb0\xed\xd5\x2c\x5f\x1d\xb6\x56\x4c\x54\xde\xb4\x0b\xd5\x32\x58\xa9\x2a\x48\x2b\xc5\x99\x3f\xb7\x9c\xa9\xda\xc9\xf6\xfb\xfa\x9c\xef\x7a\x6a\x7e\xe3\x17\x56\x49\xb6\xd8\xc8\xf6\xe7\x85\xd0\x68\xda\x51\xbf\x23\xcc\x98\xb0\x3a\xd4\x4b\xf3\x4e\xad\xf9\xf5\x5b\xe6\xeb\xd0\x1f\xf6\x66\x69\x44\x75\xbd\x5a\xa0\xb2\xb9\x2a\x07\x2c\x6d\x15\x9a\x29\xea\xbb\x64\x8e\x7b\x95\x70\x15\x44\x6a\xd6\x05\xa6\x73\x7c\x9f\xcf\x15\x0e\x8e\x9e\xf1\xe2\x38\xc5\x89\xec\xae\x75\x7b\xc4\x2c\xcf\xad\xb3\x2c\xf2\x03\x66\x79\xea\xd1\xba\x63\xdc\x61\x5f\x96\x7f\x5d\xaf\xf0\x52\xb6\xfc\xd1\x4b\x08\xb7\x07\xd9\x95\x15\xff\xfd\xf5\xd9\x15\xf7\x6b\x41\xe6\x04\x85\xf6\x66\x48\x47\x8c\xb0\x49\x44\x36\xac\xd9\x73\xca\x65\x73\xab\x20\xce\x4d\x4a\x79\xbb\xd8\x77\xa4\xbc\x9e\x6f\x0d\x1c\x73\x08\x55\xa3\x03\xf1\x72\x39\x0c\x50\x75\xee\xea\x45\xa9\x3c\x9f\x2e\x6a\x7e\x8b\xe9\x54\xf0\x8a\x84\x65\x43\x4d\xe7\x45\xa2\xb4\x49\x6d\x1b\x08\xb1\xe3\x86\x0d\x6d\xd5\x43\x55\x79\x22\x29\xb6\x53\x2e\xf6\xe4\x39\xbf\xc8\x15\xd3\x06\x4a\xfd\xe2\xb2\x51\x26\x8c\x33\x75\xd2\xc5\x49\x69\x7f\x7d\x90\xdd\xeb\xe4\xf7\x06\x59\x6d\x59\xf5\xc3\x47\x06\xd9\x4f\x1c\xf2\x7e\x28\xc8\x1e\xbc\x5e\x74\x7b\x90\xa9\x67\xe8\x5f\x39\x9c\xe5\xbb\x3e\x63\xb2\xb6\x8b\x4c\xc6\x9f\x33\x74\x5a\x23\x45\x26\x59\x8d\xdb\x83\xca\x50\x5a\x6a\x6e\xd8\xce\x63\xda\x13\xbb\x5e\x61\x4f\xd8\xdb\x41\xa6\x96\xd3\x29\x93\x54\x8b\xd5\x02\x67\xae\x96\x09\xb0\x55\xc5\xd4\x1c\x3e\xb1\xd0\x94\xb3\xd6\xb5\xa8\xe8\x2a\xb3\xec\xd4\x1c\xd6\xfc\x15\x49\x10\xe7\xd5\x1d\xd6\x5f\x25\xe3\x15\x5f\xb3\xd1\xb0\xcc\x69\x9c\x3a\x25\xb1\xc3\xf1\x9a\x3c\xca\x17\xdb\xdd\x66\x1c\x88\xce\x40\xdd\x06\x42\x65\xcd\x2b\x2c\x9f\x2e\xcb\xd5\x32\xaf\xaa\x05\x79\x1d\x94\x06\x51\x79\x51\x2a\xaa\x25\x24\xe1\x86\x84\xe9\x7a\xcc\xea\xd9\xa5\x50\x38\x2e\x0f\xfc\xfa\x20\xbb\xd7\xc9\xef\x0d\xb2\x2a\x98\xf0\xf2\x03\x83\xec\x67\xce\xa3\x7e\x28\xc8\x1e\xbc\x38\x78\x7b\x90\x9d\xaf\x78\xbf\x72\xb8\x06\xab\xe4\xe4\x06\x87\x06\x79\x15\x26\x25\xb3\xd0\x60\x5a\x50\x06\x35\x3a\x69\x8a\xe5\x16\x1f\xd4\x19\x59\xa2\x3d\xcf\x5e\xeb\xc9\xbe\x92\xae\x04\xd9\x60\xe0\xac\x1b\x91\xdf\x1c\x39\x13\x8f\x9f\x46\xcd\x6c\xa8\x38\x69\x50\xf6\x5b\x29\xeb\x3b\xab\x29\x07\x42\x34\xef\xb5\x84\x35\x33\xf0\x9b\xe2\x2c\x2c\x66\xf9\xfe\xdc\xe5\x98\x71\x45\xe9\xd5\x97\x66\xd8\x14\x18\x41\x31\xb5\x26\x15\x80\x0e\x12\xda\x5b\x74\xe8\x78\x52\xde\xbd\x34\xab\x9f\xa6\x66\x9b\x75\xb5\x42\xd4\xa9\x2e\x53\x1c\x39\xa0\x39\x18\x0b\x1b\x27\x6a\x17\x63\x23\xd6\x3a\xf9\xc9\xd8\x65\xd5\x89\x69\x1f\xd7\xfa\x7f\x7d\x90\xdd\xeb\xe4\xf7\x06\x59\x85\xcb\xd6\x07\x0f\x0c\xb2\x9f\xb9\x38\xf7\xa1\x20\x7b\xf0\x4a\xf0\xed\x41\x56\x38\x43\xff\xca\xe1\xa2\x79\x9d\xaf\xd2\x06\x76\xc7\xab\x1a\xee\x36\x25\x3e\xbf\x71\x62\x89\x02\x12\x46\xf5\x61\x7f\x93\xef\x95\x27\x85\xb0\xf2\xb4\x01\xf3\x6a\x90\xd5\x7d\x63\x64\x1b\x74\x5d\x23\xbd\x42\xaf\x29\xda\x66\x6b\xa2\x26\x85\x36\x97\x94\x74\x65\x5d\xb1\x71\x73\xd2\xe2\xb3\xb9\xaa\x90\x4d\x6a\x9b\x4a\x2f\xec\x8e\x2d\x55\xee\xc7\xa4\x35\xd0\xb3\x63\xbe\x60\xe7\xcc\x42\x17\xab\x1b\x7b\x10\xf7\x14\x39\x2e\x41\xd0\x4b\xc7\x7a\x75\x1b\x08\x1b\xc9\xcb\x5b\x49\xaf\xb4\xa2\xac\x0e\xe7\x6a\x24\xeb\xc5\x5a\xb4\x19\xcb\xb6\x81\xa0\xd1\xc8\xdb\x9c\x58\x5e\xd9\xa9\x9f\xf2\x8d\xe3\xc6\x8e\x5f\x1f\x64\xf7\x3a\xf9\xbd\x41\x56\x96\xfa\xee\xf2\x91\xdd\xc5\x9f\xb8\x12\xfb\xb1\x31\xd9\x63\x97\xfd\x3f\xb9\xcb\xf9\x64\x17\xf6\x95\x89\x8f\xdd\x7b\x10\xf2\x2d\xd3\x6c\x1f\x48\xfc\x78\x9f\xbb\x9f\x7b\xbe\x8e\xfa\xb4\x67\xfb\x86\x5c\x37\xda\x9b\xc5\x4c\xcb\x2b\x72\x7f\x11\x33\x4d\xd7\x2f\xe5\xba\x05\xcb\x91\x07\x09\x6c\xc8\xae\x3b\x68\x9a\x1d\x1b\xcc\xc6\x26\x29\x63\x13\x4e\x55\x2d\x37\x6e\x0c\x6b\x41\xb5\x30\xac\x4d\x0a\xda\xc8\xe5\xec\x69\x6a\x1a\x2b\xaa\x0e\xe5\x65\xa7\xd2\x6d\xe2\xc0\x8e\x97\xee\x2e\x28\x12\x12\x11\xae\x90\x93\x23\x93\x59\x70\xb2\xcf\xc5\x75\x5e\x54\x3b\x50\xaa\xa5\x33\x25\x6d\xa4\x63\x9e\xce\xc2\x65\xd0\x4a\x0d\x43\x7c\x46\x76\x39\xe0\x06\x2f\x2b\xf5\x45\xc0\xed\xb8\xdb\x77\xb5\x4f\x36\xa8\xdc\xd0\xea\x97\x83\x63\x5d\xbc\x71\xbd\xb1\xbd\xee\x45\xc0\x3f\xc4\xfe\x1b\xc9\xe6\x23\xf6\xcf\xa7\x4e\xef\xdd\xf2\x71\xd3\x96\xc2\x47\xdb\xff\xc4\xd4\xad\x7c\xb4\x6f\xdc\x62\xbf\x50\x30\xcd\x83\x0f\x54\x54\xb1\xd8\x5c\xf5\x1d\xa8\xd4\xc6\x66\x65\x01\xe7\xa3\x4d\xcc\xe3\x94\x2d\xe1\x5a\xa9\x04\x62\x94\x87\x6c\xcb\xec\x1a\x5e\x32\x3a\x79\x5d\x61\xfe\x18\x70\xcb\x73\xfe\x3e\x96\x90\x1e\xbb\x1f\xe6\xf6\x63\x21\xef\x26\xa4\x9b\x8e\x85\xc0\x4b\x09\xa9\xf8\x6c\xdf\xc8\x4b\x13\xbf\xd2\x63\xe7\x70\x21\x18\xce\x5a\x6c\xd6\xe8\x44\xb3\x98\x4e\x47\x47\xde\x6a\x3e\xd1\x41\x3e\x74\xfb\x51\x23\x11\xdc\x06\xc3\xb3\x86\x35\x19\xb1\x76\xbb\xd3\x75\xa8\x1a\x2e\x08\x68\xca\xd8\x19\xa9\xfd\x55\x32\x32\xe5\x69\x5c\x4d\xc7\xd3\xbc\xbf\x1e\xe7\xe5\xc1\x2e\x69\x2c\xba\xa3\x64\x61\xcb\x49\xcb\x75\xfb\xc3\xd5\xaa\xd2\x0b\x78\xdb\x0a\x87\x24\x9c\xad\x41\xc9\xaf\xf6\xe7\x0b\x52\xa9\x66\x91\x55\x4f\xde\xeb\x01\x7c\x3e\x21\xdd\xd0\x43\x7a\x68\x42\xba\xd3\xfe\x03\x13\xd2\x43\xd6\x72\x5e\x5f\x3f\x6d\x2f\xd6\xbd\x09\xe9\xb3\xf6\x0b\x66\x17\x1c\x94\x57\x7b\x5c\x01\xd0\x51\x83\x97\xd7\x92\x02\x9a\x71\x51\x73\x17\x84\x11\x18\xa6\x2b\x89\x83\x31\xe7\x57\x27\xbe\x64\x08\x68\xa2\xc0\xc5\x49\x55\xdc\x9b\x90\x1e\xbc\x51\xac\x88\xc2\x72\x62\xda\xc1\xa0\x61\xda\xc3\x79\xd2\x9f\x75\x4a\xf9\xc4\x22\x03\xe0\x2b\xbe\x43\xf2\x7a\x45\x73\x7b\xc1\x74\x51\xd0\x47\x58\xfe\x65\x09\xa9\xf0\x6c\xdf\x68\x4e\x5d\xa1\x58\x1d\x95\x5d\xb0\x12\xc6\x33\x79\xc9\x68\x4b\xbe\xc4\xf6\x0b\xfc\xba\xa5\xfb\x4e\x59\x5f\x16\x9c\x15\x90\xea\xa9\x87\xac\xa4\x51\x96\x0b\x74\x59\x90\xeb\x4e\x03\xcb\x79\xb1\x60\x0c\xa7\x68\x06\x65\xe4\xc2\x09\xaf\x96\x54\xcf\x75\xba\xf5\xae\xca\xf0\x2e\xd9\x25\x0d\xb7\x55\x6c\x2f\xb9\xa1\x0a\x61\x53\x2e\xe0\xde\x88\x81\xa9\x5c\x75\xc4\x02\xd3\xec\x36\x57\x89\xa6\x17\xe6\x33\x66\x59\x19\x18\x01\x0a\x9f\x91\x3d\x2a\x21\xdd\x30\xf9\x58\x6e\x1c\xeb\xe2\x8d\xeb\xe3\x09\xe9\x4e\xfb\x0f\x4c\x48\xf7\x2e\xea\xdf\x9b\x90\x1e\x62\xff\x8e\x84\xf4\x59\xfb\xbf\x3c\x21\x3d\x78\x07\xe5\xed\x09\xe9\xca\x32\xda\xa3\x87\x6c\x27\x09\xa9\x56\xf6\x17\xd9\xe9\xa8\x80\x8c\xe1\xd4\xb5\xf5\x79\xbe\xaf\xb6\xed\x91\x96\xb8\x11\xee\x44\xad\xf6\xd2\x8c\x90\x9d\xcb\x47\x75\x85\x15\xa9\x53\xaa\x85\xab\x41\x1d\x67\xd9\x54\x4a\xda\x9a\xbd\xea\x66\x47\xc5\x3e\x5f\x6e\xf5\x84\xa5\x5c\x80\x56\x63\x26\x8b\xd1\x6a\xb5\x92\xb5\x5d\xd2\xa8\xa5\x6d\xbe\xb4\x2e\x56\xf1\xbc\xac\x28\x60\x0a\xcc\x79\x71\x39\x29\xab\x63\x17\x22\x3f\x06\x53\xa3\x56\xef\x24\x8c\x0b\x9a\xf9\xfe\x11\xd9\xdf\x24\x21\x9d\x1c\xba\x3d\xbd\x7e\x5a\x42\x52\x6e\xb7\xff\x43\x76\x19\x7d\x76\xb7\xcb\x2f\xdc\xe5\x24\xcb\x77\x26\xa4\x5f\x3d\x64\x7b\xf0\xd6\xe2\x4f\x16\xe2\x63\x3d\xa4\x7d\xa1\x3e\xe9\xe4\xb7\x25\xb0\x7d\x45\x57\x79\xf1\xb4\xa7\xa0\x3d\xe3\x35\x14\x5c\x1a\xcd\x3c\xb3\x2e\x69\x1c\x14\xf4\x7e\x81\x94\x66\x8e\x19\x85\x2b\xa6\xdd\xc8\xd5\xcb\x89\xe2\xe7\xd9\x91\x8b\xdd\x9a\x51\x52\x5b\x6a\xd1\xa8\x95\xea\x70\x3e\x6e\x93\x4e\x63\x3e\xa7\x5a\x5c\xd7\xd2\x45\x25\x1b\x08\x3a\x67\x65\xa5\x42\x04\x5a\x30\x59\xfa\xc4\x8d\x77\x49\x46\x21\x72\x97\xfa\x55\xbd\xa2\xea\x94\x2f\x07\xc6\xa6\xc6\xb6\xd7\xf9\xee\x54\x98\x93\x76\xb7\xa4\x2b\x03\x55\xb7\x1a\x5e\x7b\xd8\x0f\xe3\x67\x64\x97\x13\xd8\x99\x13\xbc\x91\xc0\x4e\x09\xbf\x61\xcf\xc4\xdd\x3d\xaa\x07\xda\xbf\xad\x47\x75\xbe\x5d\xef\x6f\x17\xc0\xf2\xcb\xed\xab\x3f\x1d\xdf\x22\x5a\xba\x77\x27\x98\x07\xef\xa1\xff\x64\xb7\xed\x33\x09\xe6\x93\xf3\x0c\xf7\xbc\x5a\xe4\xb8\x32\xb0\xbb\x4a\xcf\x78\x0d\x75\x9c\x6f\xcb\xe9\xb4\x42\xa4\xa4\x35\x2f\xba\xd9\xc6\xc4\xb4\x2a\xcd\x0d\x17\x4e\x5b\xc6\xa0\x4b\x17\x6b\xc5\x59\x80\xaa\x3c\x9d\x5b\xfd\x22\xbf\x6e\x5a\xb3\x16\xc8\xb7\xf8\x50\xf0\x23\xcf\x6f\xf9\x62\x83\x33\xc3\x56\xbe\xa8\xcb\x1d\x23\xcf\xb1\xf5\x46\x8f\x14\x36\xb6\xb1\xdc\x25\x01\x7b\x63\x2d\xd3\x62\x40\x55\xd8\x9e\x0a\xfd\x6c\x7d\xd8\xf6\xbb\x93\xf1\x38\x52\x97\xc6\x48\xcc\x2d\xa3\x85\xd6\x90\x47\x4d\x9d\xb8\x85\xe3\x4a\xc9\xa3\x12\xcc\x0d\x3b\x1f\x1f\x9a\x60\xee\xb4\xff\xc0\x04\xf3\x77\x9a\x43\x91\x2f\x24\x98\x9f\x88\x4f\x4d\xf1\xfa\xfe\x04\xf3\xe0\xc3\x22\xb7\x27\x98\x2b\x43\xaa\xdb\x7a\x30\x77\x24\x98\xb3\x1e\xcc\x49\x82\x69\xe8\x95\xf6\x54\x35\x7c\xa9\x1b\xcd\xd9\x74\x1c\x2c\x56\xc3\x71\x71\xce\x6f\xca\x46\x40\x9a\x9d\x99\xb8\x98\x72\xb1\xa2\x70\x2d\xcc\xd5\xba\x01\x2e\x0d\x54\x3c\x69\xf5\x3b\x85\xd0\x98\xea\x05\x39\x42\x09\x08\xcb\x4b\x3b\xdf\x8e\xf9\x4e\x59\xac\x21\x93\x16\xdd\xbc\x65\xec\x57\xcd\x24\xb0\x31\xdb\xb2\x6a\xd6\xc5\x7c\x92\xdb\xf4\x6b\x2d\xae\x91\xf5\xf2\xcb\x64\xb8\x5c\xb6\xba\xd8\x8c\x67\xb9\x69\xcb\x1e\xc0\x92\x61\xbe\xb7\x75\xf8\xa7\x27\x98\x9b\x02\xfc\x81\xf6\x5f\x0f\x39\x3e\x60\xff\x41\x3d\x98\x9f\x9c\x60\x7e\x26\xbe\x87\xf4\x60\x1e\x7c\x2a\xea\xf6\x21\xd2\xbb\x09\xe6\x93\x4b\xb7\xf7\x0c\x91\xce\x7a\x30\x27\x43\xa4\x22\x37\x15\x26\x8a\x3c\x47\x78\xbe\x68\xb3\x76\xae\xd4\x49\x06\x44\xa8\xca\x58\x98\xb5\xbb\x9e\xd7\xe0\x2a\xeb\x4a\xd9\x9a\xfb\xb1\xea\x2e\xc8\xa4\xa8\xb0\xb6\x35\x22\x51\x28\x25\x64\xa1\x0c\x4b\xce\x72\xde\x31\xb3\xde\x64\x21\x96\x7c\x71\xe8\x60\x1e\x4e\x06\xcd\x89\xb1\x5f\x05\x5b\x77\xb2\xf2\x30\x10\x21\x84\x8a\x17\xc4\x2a\x9d\x2e\xc4\x8d\xdb\x6b\x55\xd6\x8a\x37\xd9\xcc\x4b\x55\xab\x11\x5b\x9b\xd8\xca\x89\x85\xe3\x86\x8b\xbf\xc9\x10\xe9\xee\x04\x73\xa7\xfd\x07\x26\x98\xbf\xd3\xb2\xb4\x7c\x21\xc1\xfc\x44\x7c\x8f\xe9\xc1\x3c\xf8\xf8\xdf\xed\x93\xc2\x57\xb6\x7d\xef\x5f\xb3\x75\x7b\xc2\xb8\x37\x8b\x9f\xe0\xb9\x38\x19\x77\x32\xc9\x5c\x98\xf6\x97\x38\xf4\x85\xf5\xb4\xdd\xb1\xd7\xb8\x21\xcc\xeb\x35\x81\x1b\x64\xcb\xdd\x89\xcc\x86\x20\x25\x42\x8f\x2d\x6c\xf2\xd3\x89\x49\xd9\x98\x6c\x82\x5a\x0d\x67\x5b\xe2\xc8\xec\x58\xd3\x06\x9a\x1a\x7c\xb6\x40\x3a\x35\x34\xd6\xa3\x9e\x31\x5f\xcd\x0b\x85\xa1\xd5\x98\xc8\xfb\x21\x54\xd2\x16\x7b\x39\xc1\xaf\xe5\x4b\xdc\x26\x25\xd5\xb8\xa6\xbb\x6a\xad\x06\x83\x12\xd7\xdb\xb4\x54\x36\xad\x16\xd6\x9c\x55\x62\xea\xa3\xc2\xb1\xf7\x75\x39\x01\x5d\x7e\xfd\xcf\x67\x27\x72\x8f\x7c\xdc\xe1\xc4\xb7\xfd\x18\xe2\xde\x0b\xb2\x4f\x27\x54\x2e\xaf\xe4\xbc\x99\x58\x8d\xdb\xcb\xdc\x40\x91\xce\xc8\xf2\xcb\xc4\xf6\x6a\x83\xdd\x3b\x89\xf5\x71\xf6\xd5\x9b\xec\x1f\x27\xcf\x1f\x78\x44\xf6\xd5\x2b\x7b\xf6\xd7\x4f\x3b\xa2\xfb\x6a\xff\xd6\x3b\xf6\x4f\x8e\x86\xff\xed\xe6\xb6\xce\xf8\xf9\xfb\xe0\x7b\x15\x63\xc7\x18\x3c\xb9\x96\x47\xd0\xbb\xcf\x9f\x6c\x13\xee\xce\x09\xa5\xb3\x85\xc0\x73\x1f\xd8\x7d\xee\xfe\xcd\xf0\x69\x6f\xf3\x79\x76\xe5\x8f\xf9\xec\x7f\x5e\xce\x7e\xbf\xc3\xf2\xe0\x73\xf3\xff\xbc\xfe\xf9\x17\xbf\xfe\x79\x3f\x02\x2a\xb5\x67\x55\xb4\x6e\x2d\x87\x03\x29\x64\x19\x35\x15\x67\xbc\x1b\x6d\xb2\x83\x7e\xa3\x58\xed\x2b\xc5\xa6\x0c\x06\xb6\x51\x6b\x49\xcb\xd9\x7b\x9d\x1b\xfd\xcc\xd1\x3f\xdd\xb0\x1c\xf5\x3d\x84\xcb\x7b\x3a\x9f\xfb\x9f\xf5\x96\x7f\xee\xe8\xfa\xe8\x7b\xd7\x46\xa7\x27\x9d\x8e\x5b\xf6\x48\xde\xbb\x03\xe1\x81\xf6\x6f\xda\x81\xa0\x9c\xbf\x5f\xe6\x6f\x37\xfa\x93\x5f\xbe\x8f\xe3\x67\xe3\xd3\x8a\xc6\x7c\xf9\x1e\xbe\x73\xfe\xfe\x4e\xf3\xff\x97\xf8\xfb\x99\xf8\x7a\x13\xde\xf8\x10\x7f\xfb\x4e\xe2\xfe\x37\x88\x7f\x45\xae\x7b\xcc\x52\x81\x71\xa9\x7c\x47\x7c\xf9\xe3\x0e\x99\xff\x76\x9d\x9e\x63\x5b\xf0\xc6\x96\x43\xf9\xa9\xd3\xfc\xdf\xb2\x53\x7a\x52\x3f\x3f\x7d\xa9\xa9\xc5\xe1\x9a\xec\x3c\x23\x17\xe5\xb3\xf6\xe4\xa4\x2d\xbb\x74\x9d\xc4\xcf\x7f\xb7\x41\xd6\x0e\xef\xe0\x76\x7f\xb9\xb7\x6f\x73\xc5\x5f\x4e\xae\x73\xfe\xaf\xbd\xa3\xe9\xf0\xb3\x53\x8f\x78\x51\xd3\x4b\x55\xcf\x63\x87\xd3\x37\x2d\xed\xf0\xc6\xc7\xfe\xcd\x85\xd9\xd2\xb7\xb5\x3c\xef\x08\x59\xca\x4d\x63\x22\xbf\xd2\xf2\x54\xc2\x7d\xc1\x30\x21\x61\x1a\x24\xbb\x9f\x92\x3a\xf9\xfb\xfb\x6c\x42\x9f\x8b\xa9\x34\xea\xed\x4e\x4b\xd6\xeb\x9d\x77\x8a\x29\x57\x3b\x5a\xeb\xc0\x4a\xa3\x5e\x1d\x9c\x6a\xdc\xfd\xbe\x95\xac\xaa\x27\xda\x5e\x19\xcc\x34\x5b\x7a\x4d\x6e\x0d\x32\x15\x6d\x90\xf9\xfd\x70\xd7\xb3\xbf\x65\xb6\x12\x01\xf6\xe9\x5b\xf0\xe3\xe7\x3f\x1e\x0b\x3c\xbe\x82\x3a\xbe\x06\xf9\x15\x50\x0b\x07\xdb\xff\x1e\x04\xcf\xc2\xc1\x25\x64\x4f\x06\x5e\x82\x0a\x42\x9b\x5e\x40\xb4\xff\x3d\xb5\x11\xc5\x36\x8d\xe2\x97\x9f\xbe\xef\x3f\xc5\x74\xfe\xfd\x21\x70\x5f\x28\xbf\x04\xfc\x8a\xf5\x4c\xb7\xae\x1b\x5d\x2d\xf3\xfb\xf3\xb7\x9f\x2a\xc9\x63\xf8\xfe\x64\x01\x5e\xd7\xc1\xe1\x3e\x8e\x47\xaf\xd0\x87\x8e\xb3\x85\xbd\xff\xdf\x83\xf0\xee\x95\x5d\x02\x7a\x62\xe6\x25\xc2\xdd\x8d\x0b\x6e\x32\xa3\x5b\x74\xbb\x7f\x1f\x04\x6e\xa7\xeb\x12\xb6\xa3\x91\x97\xd0\xbc\xd9\xb7\xcc\x2c\x8c\x92\xd7\xd8\xb6\xf6\xe2\xd1\x3c\xa5\x29\x7d\xf1\xe1\x51\x48\x4f\x54\x5e\x04\x7c\x6e\xf2\x52\xa5\x5f\x42\x1d\xa7\xd6\xe1\x7f\x8f\x43\x1a\xa7\xd6\x1b\x18\x9f\xcc\xbc\x44\x17\xd1\xf8\x42\x75\xcf\xd3\x30\x4a\x7d\x2f\x70\xc2\x93\x3f\x1f\x04\xf2\xa8\xf0\x12\xd0\x33\x73\x1f\xca\x61\x31\x99\xed\x1f\x8b\x4f\xfe\x7c\x10\xda\xa3\xc2\x4b\x68\xcf\xcc\xbd\x44\x3b\x8f\x69\x72\x31\xd6\x4f\x7e\xcc\xf1\xf8\xe7\xa3\xf0\x3e\x2b\xbc\x88\xf7\xa5\xb9\x97\x78\x77\xdf\x5f\x6c\x5f\x4f\x7e\xd8\xe9\xf8\xe7\x83\x00\x1f\x15\x5e\x02\x7c\x66\xee\xcd\xae\x81\x17\xc7\x29\x8d\xbe\x65\x70\x1c\xd3\x84\x84\xf6\x85\x32\x9c\xbe\xe2\xf4\xf4\xc3\xa3\xca\x71\xa2\xf2\x62\x49\xce\x4d\x5e\xca\x12\x31\x9d\x7f\xcb\x24\x2b\x2f\xb0\xe9\xea\x42\x09\x8e\xf0\x1f\x8d\xfd\x2a\xf0\xbb\x50\x9f\x75\x51\x5f\x7e\x7c\x10\xfe\x97\x4a\x2f\x15\xe2\x82\xd9\x37\x4b\x72\x90\xbd\x5c\x9c\x83\xcf\x59\x78\x8a\x03\x72\xfc\xb9\x3e\xbd\xae\x6a\xfd\x77\x60\x2b\x2d\x4d\xee\x68\x7b\xd1\x73\x3d\x99\x46\xfd\xd8\x8b\xed\xb6\xf5\x7a\x31\x63\x25\x11\xa5\x99\xdf\x0f\x12\xff\xca\xf4\x4a\x5a\x4b\x7b\xfe\x9c\xf9\xaf\xbf\x8e\xbf\x58\x06\x5e\x77\x34\x69\x9c\xec\xdb\xf2\x6d\x29\x6e\x46\xf9\x52\xcd\x16\xe4\xa1\x9f\xf2\x02\x62\x4c\xa7\x53\x2f\x70\x77\xb1\xf7\x2d\x63\xa5\xeb\xe3\x87\x59\xe4\x91\xd7\xb1\xb8\xad\x06\x6b\x1d\xd3\xf9\xcd\xc0\x9e\x35\x6c\x31\x1d\x03\xe3\x05\xac\xb7\xbb\x8a\x5b\x71\x87\xd2\xfb\x21\x3c\x29\xd9\xa3\x38\x49\x30\x1f\x04\x72\xb8\x73\x1f\x90\x53\x25\x5b\x20\x2f\x3b\xc2\x1f\x44\x12\x93\x19\x0d\x16\x77\x22\x39\x55\xb2\x45\x12\x93\xd9\x27\x09\x39\x36\xa8\x77\x23\x39\xd5\x73\x00\xf3\xd4\x4b\x78\x09\x06\xc7\xc9\xdb\x80\x4e\x12\xc7\x7d\x88\xce\x15\x6d\x21\x9d\xe5\xc6\x77\x39\x6a\x86\x71\xe2\x46\xb4\x6d\x54\x33\x4f\xbf\x03\x9e\xb1\x53\x7f\x96\x21\xa1\x3f\x9b\xd2\x84\xee\xcc\xfe\xff\x01\x00\x00\xff\xff\x73\x11\x8c\x9e\x54\xcc\x00\x00") + +func pathed_paymentCoreSqlBytes() ([]byte, error) { + return bindataRead( + _pathed_paymentCoreSql, + "pathed_payment-core.sql", + ) +} + +func pathed_paymentCoreSql() (*asset, error) { + bytes, err := pathed_paymentCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pathed_payment-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x80, 0x88, 0xda, 0x2e, 0x31, 0x6, 0xa7, 0x5a, 0x61, 0xf6, 0x8, 0x67, 0x65, 0x55, 0x70, 0x60, 0xd1, 0x77, 0x3a, 0x68, 0xb3, 0x91, 0x3c, 0x4e, 0x18, 0x19, 0x6d, 0xed, 0x33, 0x65, 0xf7, 0x11}} + return a, nil +} + +var _pathed_paymentHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x7d\x69\x93\xaa\xc8\x12\xe8\xf7\xf9\x15\xc4\x89\x1b\xd1\xe7\x84\x7d\xa6\x29\x76\xce\xbc\xb9\x11\xa8\xb8\xef\xbb\xde\x98\x30\x0a\x28\x10\x45\x51\xc0\xad\x6f\xdc\xff\xfe\x42\x70\x41\x14\x17\xb4\x7b\x7a\xde\x23\xce\xf4\xa8\x54\xe5\x56\x59\x99\x59\x59\x59\xf0\xf3\xe7\x6f\x3f\x7f\x62\x15\xd3\x76\x34\x0b\xd5\xab\x05\x4c\x81\x0e\x94\xa0\x8d\x30\x65\x3e\x9e\xfe\xf6\xf3\xe7\x6f\x9b\xfb\xc9\xf9\x78\x8a\x14\x4c\xb5\xcc\xf1\xa1\xc1\x02\x59\xb6\x6e\x4e\x30\xfe\x77\xe6\x77\xe0\x6b\x25\xad\xb1\xa9\xd6\xdf\x74\x0f\x34\xf9\xad\x2e\x36\x30\xdb\x81\x0e\x1a\xa3\x89\xd3\x77\xf4\x31\x32\xe7\x0e\xf6\x27\x86\xff\xe1\xde\x32\x4c\x79\x74\xfa\xab\x6c\xe8\x9b\xd6\x68\x22\x9b\x8a\x3e\xd1\xb0\x3f\xb1\x97\x66\x23\xc5\xbd\xfc\xb1\x03\x37\x51\xa0\xa5\xf4\x65\x73\xa2\x9a\xd6\x58\x9f\x68\x7d\xdb\xb1\xf4\x89\x66\x63\x7f\x62\xe6\x64\x0b\x63\x80\xe4\x51\x5f\x9d\x4f\x64\x47\x37\x27\x7d\xc9\x54\x74\xb4\xb9\xaf\x42\xc3\x46\x47\x68\xc6\xfa\xa4\x3f\x46\xb6\x0d\x35\xb7\xc1\x12\x5a\x13\x7d\xa2\xfd\xb1\xa5\x1d\x41\x4b\x1e\xf4\xa7\xd0\x19\x60\x7f\x62\xd3\xb9\x64\xe8\xf2\xeb\x86\x59\x19\x3a\xd0\x30\x37\xcd\x84\x42\x43\xac\x61\x0d\x21\x5e\x10\xb1\x6c\x0a\x13\x3b\xd9\x7a\xa3\x8e\x95\x4b\x85\xee\xb6\xfd\xef\x03\xdd\x76\x4c\x6b\xdd\x77\x2c\xa8\x20\x1b\x4b\xd6\xca\x15\x2c\x51\x2e\xd5\x1b\x35\x21\x5b\x6a\xf8\x3a\x1d\x37\xec\xcb\xe6\x7c\xe2\x20\xab\x0f\x6d\x1b\x39\x7d\x5d\xe9\xab\x23\xb4\xfe\xe3\x33\x10\xca\xee\xa7\xcf\x40\xb9\xd1\xab\xcf\x63\xd0\xc3\x76\x3f\x77\x1e\x81\x1b\x45\xbe\x84\xcc\xd7\xea\x00\xdc\x6d\x9e\x2d\x25\xc5\x8e\xaf\xe5\x16\xac\x4b\x55\x1f\xa9\x2a\x92\x1d\xbb\x2f\xad\xfb\xa6\xa5\x20\xab\x2f\x99\xe6\xe8\x72\x47\x7d\xa2\xa0\x55\xdf\xc7\xdc\xc4\x86\xae\xa2\xdb\x7d\x73\xd2\xd7\x95\x7b\x7a\x9b\x53\x64\xc1\x7d\x5f\x67\x3d\x45\x0f\xf4\x3e\x50\xf2\x10\x15\xf7\xf5\x35\x90\xa2\x21\xcb\xed\x68\xa3\xd9\x1c\x4d\xe4\xbb\x58\xf0\x75\x9f\x5a\x68\xa1\x9b\x73\x7b\xfb\x5b\x7f\x00\xed\x41\x44\x50\x8f\x43\xd0\xc7\x53\xd3\xda\x4c\xc7\xad\x4d\x8d\x0a\x26\xaa\x2c\x65\xc3\xb4\x91\xd2\x87\xce\x3d\xfd\x77\xca\x1c\x41\x95\xb6\xf3\x32\x02\xd1\xfe\x9e\x50\x51\x2c\x64\xdb\x97\xbb\x0f\x1c\x4b\x71\xfd\x4e\xdf\x30\xcd\xd1\x7c\x7a\x43\xeb\xe9\x35\x92\xbc\x56\x50\xb7\xee\x04\xbc\x33\xba\x37\x77\xd8\xd8\x09\x55\x45\xd6\x6d\x4d\x77\xe0\x23\x74\xd9\x8a\xf5\xb6\x4e\xae\x69\xbd\x03\x89\xdf\x14\x5f\xeb\x31\xdd\x74\x18\x38\x57\x47\xc0\x3e\x32\x40\xd2\xfa\xaa\x1a\x0d\xf6\x33\xfd\x96\xc6\xa6\x47\x87\x79\xb5\xa1\x6e\x3b\x7d\x67\xd5\x9f\x5e\x07\xb9\x69\x69\x4e\x6f\x6d\x89\x6e\x6d\xb6\x73\x25\x97\x1b\x4b\xbb\xe9\x7e\xb5\xd9\x75\x2b\x26\xad\x6f\x1b\x4c\xcf\x47\x6e\xa4\x6d\xdb\xf3\x6b\x98\xf7\x8d\x65\x53\x41\x77\xc6\x05\x7b\x35\x98\x42\xcb\xd1\x65\x7d\x0a\x27\x17\x9d\xf7\xb5\xae\xfd\xe9\x9d\xb1\xc9\xde\xa3\xdd\x4b\xc1\xf9\x8e\x77\xe3\x77\x85\x77\x0b\x3e\xaf\xe1\x87\xc3\xf7\x06\x73\x33\x92\xdb\x8f\x1b\xff\xb0\x0b\xfd\x5c\x65\xe8\xdf\x48\x81\x66\x5a\xd3\xfe\x58\xd7\xb6\x01\xc3\x05\x12\x02\x2d\x6f\xe6\xf1\xfe\x78\xef\x12\xe4\x5b\x95\xd3\xeb\x9d\x28\x17\x9a\xc5\x12\xa6\x2b\x1e\xe6\xa4\x98\x12\x9a\x85\xc6\x8d\xb0\x43\x94\xee\x09\x90\xb7\xc3\x7d\x19\x92\xfb\xed\x76\xf6\x77\x5e\xba\x2e\x56\x9b\x62\x29\x11\x41\x66\x9b\x38\xdb\x46\xb3\xbb\x31\x1f\x01\xb9\xb9\xb7\x82\x6e\x6c\x7b\x88\x66\x6f\xe6\x30\x64\xd6\xdf\xc3\xdf\x79\x10\xb7\xf5\xdd\xc6\x7d\xb7\x35\xde\x06\x79\x37\xf3\xb6\xb5\x00\xf7\xf0\xe2\x75\xb9\xb1\xed\x36\xfc\xbb\x9d\x9e\x5d\xbc\x78\x0b\x45\x01\x1b\x72\xb9\xb1\xcf\x24\x6c\x1b\x0a\xe9\x74\x4d\x4c\x0b\x8d\x33\x8d\xc7\xfa\x66\xc5\xa1\xcb\xe8\xfb\x64\x3e\x46\x96\x2e\xff\xe7\xaf\x1f\x37\xf4\x82\xab\x08\xbd\x0c\x68\x3b\xdf\xe1\x64\x8d\x0c\x37\x15\x73\x43\x0f\x55\xb7\xce\x76\x49\x35\x4b\x89\x46\xb6\x5c\xba\xc0\x4f\x1f\x6a\xda\x81\xba\x57\xec\x84\xd0\x0b\x30\x76\xdc\x3d\x00\x63\xc3\xab\xdb\xfd\x40\xfc\x2b\x76\x0f\x23\x2e\xeb\x37\x40\x10\x3b\x0d\xb1\x54\x0f\x80\x30\xa6\x9a\x3d\x33\x76\xba\x98\xc8\x88\x45\xe1\x04\xc3\x1f\xbf\x79\x59\xb8\x12\x1c\xa3\x5f\xbb\xdf\xb0\xc6\x7a\x8a\x7e\x6d\xbb\xfc\x81\xd5\xe5\x01\x1a\xc3\x5f\xd8\xcf\x3f\xb0\xf2\x72\x82\xac\x5f\xd8\x4f\x37\x39\x97\xa8\x89\x9b\xf1\xda\x42\xde\xc1\xfb\xed\x08\xe2\xf1\xcd\x2d\xe0\x44\xb9\x58\x14\x4b\x8d\x0b\x90\xbd\x06\x58\xb9\x74\x0c\x00\xcb\xd6\xb1\x97\x5d\xda\x6d\xf7\x9b\xed\x02\x79\x09\x62\xde\xb1\xbf\xc5\xb9\x97\xd0\x55\x7e\x8e\x64\x59\x2a\x37\x02\xf2\xc4\xda\xd9\x46\x66\x4f\x96\x3f\xff\x76\x84\xfe\x00\x25\x40\xc8\x3d\xcc\x9f\x00\x71\x05\x50\x29\xbc\x4d\xb5\x7a\xb5\x80\x4d\x2d\x53\x46\xca\xdc\x82\x06\x66\xc0\x89\x36\x87\x1a\x72\xc5\x70\x63\xbe\xd0\x4f\xee\x75\x45\xdb\x92\xbf\xd3\xd5\x03\xfd\xbb\xb1\x3d\x27\xcb\xbd\x66\x5f\x85\x8f\xd5\xc4\x46\xb3\x56\xaa\xfb\x7e\xfb\x0d\xc3\x30\xac\x20\x94\xd2\x4d\x21\x2d\x62\x2e\xf7\xc5\x62\xd3\xb3\x77\xf5\x46\x2d\x9b\x68\xb8\x2d\x84\x3a\xf6\xaf\xfe\xbf\xb0\xba\x58\x10\x13\x0d\xec\x5f\x60\xf3\x2d\x38\x1a\x57\x27\xe2\x63\xdc\x5d\x03\xff\x34\xe6\x88\x73\xcc\xdd\x62\xa9\x1e\xe3\xef\x06\x0c\x7b\x16\xf7\x3f\x45\xe2\xf0\xfb\x6f\x18\x96\x10\xea\x22\xd6\xce\x88\x25\xec\x5f\xe0\x3f\xe0\xaf\xb7\x7f\x81\xff\x10\x7f\xfd\xfb\x5f\x84\xfb\x99\xf8\x0f\xf1\x17\xd6\xf0\x6e\x62\x62\xa1\x2e\x6e\x84\x22\x96\x92\x3f\xce\x4a\xe6\x06\x3f\xf0\xa0\x64\xae\x63\xf8\x68\xc9\xfc\x9f\x28\x92\x39\xf5\xa9\x5b\x39\xec\xfd\xf0\x6d\x82\x38\xb8\xed\x13\x88\x2e\xc5\x18\x56\xdf\xc8\x0a\xfb\xf3\x60\x01\x5e\xbd\x9f\x1b\xdd\x8a\x88\xfd\xe9\x9f\x11\x3f\xce\xcd\xda\xa7\xd2\x18\x04\x18\x20\x71\x37\x8d\x6f\xa7\xf0\x6c\x08\xf4\x28\x95\xe7\x80\x06\x28\x3d\x9a\x90\xc7\xe4\x1e\xb4\xec\x94\xda\x73\x61\xde\xc3\xd4\x9e\x01\x1a\xa4\xd6\x3f\x49\x2e\x52\xbb\xf1\x5c\x0a\x52\xe1\xdc\x70\xfa\x0e\x94\x0c\x64\x4f\xa1\x8c\xb0\x3f\xb1\x97\x97\x3f\x8e\xef\x2e\x75\x67\xd0\x37\x75\xc5\xb7\x95\x76\xc4\xab\x3f\xfe\xdd\xb2\xe8\x4e\xb0\xdb\xd8\xf3\xe6\xa2\x7f\xf1\xed\x71\xa4\x2b\x98\xa4\x6b\xfa\xc4\x71\x03\x83\x52\xb3\x50\xf0\xd8\x81\xe3\x4d\x18\x8f\xc9\x03\x68\x41\xd9\x41\x16\xb6\x80\xd6\x5a\x9f\x68\x81\x66\x93\xf9\x78\x1f\xf2\x63\xfa\xc4\x41\x1a\xb2\x02\x4d\x54\x03\x6a\x36\x66\x8f\xa1\x61\x9c\xa2\x71\xcc\xb1\x71\x8a\xe4\x3b\x41\xd3\x3f\xf6\x2d\x4f\x87\x3d\xb8\x6e\x88\x2a\x8e\x60\xb6\x63\x2f\x12\x07\xad\x4e\x04\x32\x9d\x1a\xba\x9b\xb3\xc7\x1c\x7d\x8c\x6c\x07\x8e\xa7\xd8\x66\xcc\xdc\xaf\xd8\xbb\x39\x41\xa7\x84\x86\xad\x8a\x76\xf1\xe8\x76\x39\x75\x1b\xcd\xfb\xc5\x57\x08\xd4\xad\x1a\x0a\xb5\x86\x17\xd1\x01\xf7\x87\x6c\x29\x51\x13\xdd\xf0\x2b\xde\xdd\xfe\x54\x2a\x63\xc5\x6c\xa9\x25\x14\x9a\xe2\xfe\xbb\xd0\x39\x7c\x4f\x08\x89\x8c\x88\x81\x6b\xcc\x44\x16\x7b\x10\xd0\x89\x2a\x6e\x93\x1e\xd8\x04\xad\x9c\x05\x34\xbe\xbf\x84\x70\xfc\xf2\xeb\x97\x85\x34\xd9\x80\xb6\xfd\x23\x38\x5c\xde\x5e\xc5\x19\xdd\x62\xa8\x1f\x17\x06\xca\x5b\x1b\x3f\xcc\x99\x97\xd1\xd9\xf3\x75\x7e\x66\x1c\x72\x75\xe7\xc9\x3c\xdb\x5c\x36\x95\x73\xcd\x01\x71\xbe\xb9\x97\xfe\x3b\xd3\x81\x66\x2e\xcd\xb0\xf3\xe9\x85\x27\xa9\xad\x1f\xe6\xa7\x29\xed\x25\x46\xb0\x72\xbb\x24\x26\xb1\x78\xf7\x0a\x47\x5e\x86\xee\x32\x43\x7b\x58\x81\xdb\xbf\xeb\x4a\x18\x6d\xbb\x9c\xcf\xa3\x5a\xb7\x85\xb3\x55\xbb\xc0\x9c\xe9\x87\x59\xfa\xd3\x14\x57\x58\xcb\x6f\xee\xc6\xc7\xb7\x10\x6d\x76\xf5\xf8\xfc\x2d\x05\x39\x50\x37\x6c\x6c\x68\x9b\x13\x29\x5c\xd9\x76\x89\xb2\x47\xe5\xb0\x85\xb3\x95\xc3\x6e\xdf\x3a\x84\x36\xdf\x66\xf2\x4d\xb3\xf0\xdc\x3e\xf6\xf9\x8e\x5b\xb1\xf8\x32\xa3\xee\x40\xec\xe9\xd8\x59\x39\x3c\x80\xe1\x30\x10\xb7\xb5\xdf\x6f\x26\x07\x1c\x93\x39\x77\x0e\xbe\x29\xd8\xc7\x42\xd0\xb9\xda\xc9\x6b\x3b\x9f\x2a\x37\xb7\xdd\xab\xce\xf6\x6b\x60\x9f\xfd\x84\x17\x70\x12\x0f\x38\xd0\xe8\xcb\xa6\x3e\xb1\xcf\xeb\xa0\x8a\x50\x7f\x6a\x9a\xc6\xf9\xbb\xee\xce\xa7\x8a\xc2\xc6\xda\xbd\x6d\x21\x1b\x59\x8b\xb0\x26\x9b\x38\xd4\x59\xf5\xdd\x30\x49\x7f\x0f\x6b\x35\xb5\x4c\xc7\x94\x4d\x23\x94\xaf\xe0\x18\xed\x94\x05\x41\x05\x59\x6e\x78\xe1\xfd\x6e\xcf\x65\x19\xd9\xb6\x3a\x37\xfa\xa1\x8a\xb2\x65\x1c\xea\x06\x52\xae\xb5\xda\x92\x1e\xa2\x42\xe1\x53\x2f\x24\xbf\xfd\xe8\x4c\x0c\xd9\x33\xb9\xe2\x17\x6f\xb7\x48\xd7\x6d\xdc\xbd\x2c\x3f\xd7\xd5\x5d\xc4\xf1\x59\xae\xef\x2e\x46\x1f\x74\x85\x17\x71\x9d\xba\xc6\xf3\xcd\x2f\xb8\x4a\xdf\xee\xcf\xd3\x74\xf3\xda\x52\xe8\xb8\xf2\x2a\x64\xb9\xb4\x59\x1d\xc8\x1e\x2b\xae\x97\x7c\xd0\x49\x6e\xad\x83\x39\xb7\xe4\x7d\x29\x47\x88\x7b\xda\x99\x9c\x97\x97\x5f\xbf\xc2\x97\x6b\xe1\xf3\x60\xbb\xf9\xf6\xa8\x38\xb7\xf5\x82\xdf\x9f\x1a\x53\x6c\xcd\x66\x14\x0f\xe7\xd6\xcb\x84\xa2\x0d\x54\x2b\x5e\x6a\xb4\x2d\xa0\xbc\xd4\xc4\x5b\x2b\x9f\x6d\x70\x5a\xf7\x79\xa5\xdd\x45\x74\xfb\x56\x17\x30\xba\x24\xe9\x76\xdf\x46\x86\x81\x2c\x4c\x32\x4d\x03\xc1\xc9\xce\x6f\xe9\x32\xea\x4f\x8e\x7c\xb4\xf7\xdb\xb1\xdf\x3e\x54\x1c\xf5\x03\x1e\xfd\xa8\xe6\x29\x78\xd3\xb7\x95\x7f\xb6\x3a\xd4\xa5\xba\xef\xd6\x0f\x63\x89\x8c\x98\xc8\x63\xdf\xbf\xfb\x25\xf8\x6f\x0c\xff\xf1\xe3\x1a\xa8\x73\xdd\x77\x42\xfb\x3f\x27\x72\xbc\x01\xde\x91\x4c\x03\xe0\x03\x02\x77\x09\xbc\x38\x95\xce\xef\x82\x3f\x61\x72\x9d\xaf\x6b\xb8\xd1\x93\xde\x62\xc2\x1e\xf1\xa5\xd7\x6a\x08\x9e\xe3\x4d\xaf\x60\xf9\x2c\x7f\x7a\x27\xb3\x0f\x7a\xd4\x2b\xd8\x4e\x7d\x6a\x58\x87\x0b\x5e\xf5\xa8\x6e\xe4\x89\xba\xba\xd3\x4f\x3f\x49\x37\x2f\xb4\xb6\xb6\xff\xca\xf2\xed\x56\xc7\x7b\xd9\x87\x9e\x6d\x7b\x40\x7d\x76\xbe\x6c\x56\x0a\xe1\x4b\x8d\xb0\x45\xdc\xdf\xb2\x0c\x73\x56\x7d\x34\x59\x20\xc3\x9c\xa2\x73\xa9\x4d\x67\xb5\x59\x14\xcd\x0d\x27\xe4\xe6\x18\x39\x30\xe4\xd6\x66\x39\x16\x76\xdb\xd6\xb5\x09\x74\xe6\x16\x3a\x97\x85\xe3\x99\x1f\xff\xf9\xeb\x10\xbb\xfc\xf7\x7f\xe7\xa2\x97\xff\xfc\x15\x94\x39\x1a\x9b\x21\x09\xb3\x03\xac\x89\x39\x41\x17\x63\xa1\x03\xac\x53\x30\x5b\xce\xf4\x31\xea\x4b\xe6\x7c\xa2\xb8\x59\x6d\xce\x82\x13\x0d\x05\x57\x6c\xc7\xae\x75\x23\x89\x0d\x34\x0d\x05\x96\xc0\x93\x09\xb2\xfa\xb7\xcd\x80\x03\xa4\x8b\xea\xea\x07\x7c\x5d\xc8\xdb\x44\x3d\x5a\xf6\x77\x1a\xeb\x91\x77\x2d\xe9\x87\xe9\xca\xce\x16\xec\x8a\xd0\x6e\x31\x60\x9e\x31\x70\x2b\xfe\xae\xd4\xb7\xd5\xc5\xc6\x85\x4c\xaf\x3f\xa7\xe6\xcf\xf3\xde\xb7\xca\x79\x1e\x13\x37\x96\xff\x5d\x64\xea\xe2\xea\xe8\x16\x26\x43\xe3\x80\xa7\xb1\x79\x73\x05\xe5\x45\x46\xaf\x38\xad\xf3\xac\x26\xa1\x03\x31\xd5\xb4\xae\xec\x77\x61\x49\xa1\x21\x5c\x61\x2f\x5b\xaa\x8b\xb5\x06\x96\x2d\x35\xca\x47\x7b\x5e\xae\x8f\xaf\x63\xdf\xc1\x2b\xf6\x02\x08\x7c\x7b\xbd\xbc\x62\xc4\x2b\x86\xbf\x62\x2f\x2f\x3f\xfe\xb8\xda\x97\xb8\xd0\x37\x84\x95\x4b\xfb\x55\xf7\xb2\x13\xdc\xb3\xda\x91\xf5\x02\xfa\xfa\x44\x77\x74\x68\xf4\xbd\xfa\xa1\xdf\xed\x99\xf1\xf2\x8a\xbd\x10\x38\xe0\x7f\xe2\xcc\x4f\x9c\xc4\x00\xf7\x8b\xe0\x7e\x51\xec\xef\x38\x49\x50\x3c\x13\xc3\x89\x20\xc3\xa1\xd0\x89\xbe\x77\x2e\xe4\x68\x34\xa5\x75\xdf\x31\x75\xe5\x32\x26\x9e\xa1\xd9\x7b\x30\x91\xfd\xb9\x8d\xf6\xbe\xb7\xaf\x4f\x4e\xce\xa2\x5c\xc4\x47\x51\x38\xc5\xdd\x83\x8f\xea\x43\x45\xe9\x07\x33\x7a\x17\x71\xd0\x14\x4d\x12\xf7\xe0\xa0\xfb\x9e\xa7\xdf\xad\x39\xdc\x9d\xe0\x8b\x28\x18\x12\x27\xee\x62\x83\xd9\xa1\xd8\x5a\xce\x1b\x50\x70\x14\xa0\xef\x41\xc1\xf6\xc7\xa6\xa2\xab\xeb\xdb\xb9\xe0\x00\x43\xdc\x85\x82\x3b\xe2\x62\x5b\x00\x7e\x03\x1e\x96\x62\xc8\xfb\xf0\x6c\x06\x1d\x6a\x9a\x85\x34\xe8\x98\xd6\x65\x9d\xe2\x71\x80\xf3\xf7\x80\xe7\x5d\xf0\x5e\xb6\xb7\xbf\x52\xac\xcb\xd0\x09\x16\xdc\x35\xd4\x00\x77\xc1\x6f\x47\xc1\x5d\xbf\x5f\x46\x40\xf3\xec\x5d\xd2\x01\xc0\x8f\x60\xbf\x20\xdc\x18\x80\xcb\x88\x78\x86\xbf\x8f\x13\xe2\x68\xa0\xb7\x4b\x70\xef\xc8\xf1\x25\x4c\x00\x67\x69\xea\xae\x11\x01\xa4\xc7\xce\x3e\x71\x71\x71\xc4\x01\x20\x58\xe6\x3e\x4e\xa8\xbe\xaa\xaf\x76\xc7\x2f\xcc\xb1\xd1\x57\x75\x64\x5c\x34\x8d\x00\xd0\x00\xdc\x65\x84\x01\xbd\xdb\x75\xda\xed\x06\xac\xae\xb0\xc1\xb0\xf7\x99\x79\xc0\xf4\xf5\x89\x86\x6c\xa7\x7f\xba\xdf\x70\x05\x15\xcb\x73\xf7\x8d\x08\x7b\x14\x26\xb8\x1b\x3b\xf0\xb2\x33\xd9\xb8\x5d\x92\xba\x0b\x09\xb7\x57\x5f\xd5\xf4\xa2\x65\x14\x98\xec\x00\xff\x49\x02\x0c\x50\xbf\x00\xff\x8b\xe2\x7f\x07\x04\xc9\x91\x74\x0c\x07\x17\xfc\xf9\xc5\x4a\x88\x7b\x1d\xfa\x49\x35\x84\x3f\x48\x49\xc7\x3b\xe9\x6a\xae\xdd\x2a\xb4\xcb\xdd\x4c\xaa\xd0\x6a\xe4\xdb\x2d\x3a\x95\xce\x08\x64\xa1\xd4\xed\x12\xb9\x6a\xbe\xc8\x96\x85\x9c\xd0\x14\xab\xa9\x26\x53\xa8\x24\xea\x62\xaa\xd5\x29\x97\x82\x02\x0a\x45\xb2\x89\x66\xd2\x89\x4e\x3e\xcd\xd4\x4a\x54\xb9\x94\x15\x2b\x89\x62\x29\x15\x67\x49\x42\xa0\x48\xa6\x47\x57\x4a\xc9\x7a\xad\x90\x6e\xe7\xd9\x74\xbc\x90\x28\x56\x0b\xd9\x54\x99\xaa\xb3\x62\xb7\xdd\x6a\xde\x8c\x84\xdc\x20\x11\xe8\x76\xbc\xd2\x15\xe8\x2e\xd5\x16\xc4\x4c\xa7\x5d\x23\x9a\xf9\x32\xd1\x2c\x53\xf1\x66\x3a\xd3\xac\xb2\x94\xd8\xac\xe4\xcb\x25\xa2\x9a\x69\x51\xed\x5a\xa6\x9c\xad\x95\xf2\xf9\xcc\xc9\x50\x87\x22\xa1\x5c\x4e\xaa\x95\x6e\x3a\x43\xe5\x69\xb6\x13\x4f\x8a\xa5\x7c\xbe\x43\xd3\xf9\x64\xa3\x5d\x6e\xe4\xe9\x76\xb2\x5d\xab\x96\x33\x44\x21\x23\x26\x3b\xa4\x98\x6f\x65\xab\xb5\x42\x51\xac\xa7\xe3\xe9\x9b\x91\xd0\x2e\x12\x82\xac\xa6\x88\x4c\x53\xa4\x09\xa1\xd8\x69\xa6\x9a\x19\x52\xe8\xe6\x84\x4e\x27\xdd\xe9\xb4\x88\x56\xa6\xd3\xed\xd6\x18\xb1\xdb\x11\x1b\x95\x7c\xb2\xd3\xab\x0b\x6d\x86\xed\x94\xa9\x9b\x91\x30\xee\xc0\xd7\x2a\xdd\x4c\xb6\x40\x24\xb2\x64\xaa\x54\xa5\xe2\x9d\x42\xaa\x58\x4a\x16\x52\xb9\x66\xa9\xd2\x24\x32\x5d\xb2\x57\x4c\xd5\x33\xe5\x52\x33\x21\x96\x85\x7a\x9b\xad\x26\xd8\x72\x87\xc8\xbc\x44\xad\x41\xda\x04\xf4\x57\xb4\x76\x5b\xb7\x79\x28\xb9\xfe\xdd\x46\x97\xeb\x73\x5e\x31\xe6\x15\x73\xac\x39\xba\x61\x2e\x9d\x56\xde\x44\x9e\x49\xde\x7a\xd3\x3f\x8f\x64\x0b\x29\xba\xd3\x87\xc6\x74\x00\x27\xf3\x31\xb5\x99\xfd\xcd\x7a\xf2\xe5\x23\x86\xf3\x18\x3b\x11\x82\x5d\x6c\xd6\x5e\x1e\xd4\xd8\x28\x95\x2e\x4f\x19\xe5\xa3\xb5\xb9\xbb\x00\xba\x6d\x8c\xcf\x15\xba\x44\x1d\xe4\x5d\xb1\x8b\xcf\xc6\x10\x34\xcb\xf0\x1c\xce\x72\x2c\xf9\x8a\x01\x97\xae\x97\xff\x7e\xf3\x82\x8a\x6f\xbf\xb0\x6f\x00\xff\x7d\xbb\x6a\xfb\xf6\x8a\x7d\x3b\x94\x53\x6d\xee\x89\xcd\xda\xe1\x47\x67\x3d\x75\x7f\x0c\x8e\xdb\xa1\x85\x57\x56\xb5\x69\x13\x75\x00\xbf\xfd\x2f\x4c\x81\x82\x9c\x11\x01\xce\x88\x57\x8c\xbc\x83\xb3\x66\x3d\x19\x95\xb3\x68\x13\x23\x3a\x67\x9b\x7f\x1e\x6b\xde\xe6\x95\x4b\x45\x44\x0f\xb8\xe1\x68\x17\xeb\x7d\xfb\xb5\x51\x87\x6f\xb6\x69\x28\xfd\x30\xa1\x49\xe6\x5c\x1b\x38\xa1\xb7\xbd\xbe\xe7\x04\xeb\xbb\x73\x49\xba\x3b\xf8\xe7\xb4\xee\xe8\xde\x25\x20\x3e\x5c\x4f\x18\xa7\x20\xe6\xcf\x54\x6a\x10\x18\x7a\xea\xdc\xd0\x47\x8c\x4b\x3e\x7e\xe8\xb7\xe3\xf6\xc8\xd0\x6f\xb5\xe7\xd1\xa1\x8f\x38\x4e\x97\x86\xfe\xa3\x67\x3d\x78\xc5\x00\xbb\x59\x2e\xb0\x24\xc1\x81\xb3\x96\x9a\xf8\x67\x5a\x6a\x2a\xc0\xd9\x19\x4b\xfd\x0f\xe5\x8c\xd8\x73\xc6\x92\x2c\x1b\xe2\x5d\xff\x99\x4e\x88\x0e\xb0\x76\xd6\xbd\xfe\x33\x59\xdb\xcf\x34\x0e\x50\x21\x31\xd1\x25\x7d\xfc\xc2\x9c\xd1\x01\xce\xee\x9c\x69\x5f\x98\x33\x72\xcf\x19\x4d\x33\xfc\xfd\x33\xed\x0b\x1b\x11\x2a\xc0\xda\xbd\x33\xed\x0b\xb3\xb6\x19\x35\x82\xe3\x28\x1e\xa7\x79\x8e\xf6\x46\x0d\x77\x79\x33\xf4\xb1\xee\xb2\xc6\x13\x04\x49\xb2\x04\x4e\x32\x1c\xfd\x3b\xc5\xb2\x34\x87\xb3\xff\x28\x1e\x89\x1d\x8f\x00\xc7\x77\x7e\x3b\x12\x8f\x5f\x78\xf6\x81\x3d\x8f\x14\xd8\xf9\xb9\xff\x77\x79\xe4\x88\x9d\x57\xf8\x7f\x4d\x57\xe9\x57\x8c\xa3\x39\x9e\x27\x39\x86\xf3\x8c\xa8\xc7\xa1\xed\x40\xcb\xd1\x27\x5a\x5f\x82\x06\x9c\xc8\x28\x68\x73\x6e\x46\xc0\x1c\x23\xb8\xd5\x94\xed\xa4\x34\x81\x8e\xbe\x40\x91\xf9\xd9\x8c\x9a\xc7\xd0\x12\xe9\xda\xc0\xd9\xae\x72\xbc\xe4\x49\x7f\x84\xd6\x9f\xa3\x49\xd4\x96\x2a\x8a\x60\xb7\x46\xef\x83\xa4\xbc\x45\xf0\xd1\x52\x0e\xf0\x73\xa3\x94\x3f\x61\xa9\xec\x51\xc5\x70\x5b\xb3\xfb\x51\x52\xf6\x10\x7c\xb4\x94\x03\xfc\xdc\x26\xe5\xa8\x59\xa0\x7b\x3c\xb8\x4b\x15\x8d\xf3\x5b\xc3\xff\x41\x52\xde\x22\xf8\x68\x29\x07\xf8\xb9\x4d\xca\x11\xf7\x68\xee\x8a\x21\x3c\xaa\x68\x7c\xeb\x7a\x3e\x4a\xca\x1e\x82\x8f\x96\x72\x80\x9f\x1b\x2d\x46\xc4\xb4\xd6\xff\x6e\xd8\x92\x3c\x77\x86\x32\x6a\x8a\x7d\x77\x8e\xd2\xbf\x2f\xc5\x13\x8c\xa2\x12\x80\xa5\x21\xc5\x22\x56\xa5\x64\x1a\x11\x2c\x49\xb2\x3c\x23\x4b\x12\x05\x65\x46\xe2\x24\x1a\xa7\x11\xa5\x40\x8e\x66\x78\x19\x57\x18\x62\x13\x6a\x48\xbc\x02\x58\x40\xab\x2f\xaf\xd8\x0b\xcd\xf0\x2c\x87\x14\x9a\x67\x00\x52\x54\x92\xe3\x08\x20\x4b\x04\xce\x93\x84\xa4\xa8\x04\xab\x22\x1a\x87\x04\xa0\x00\x01\x08\x0a\x27\x14\x5a\x52\x69\x48\xe0\x2c\x64\x09\x85\x96\x79\x86\x7e\x71\x15\x07\x04\x36\x89\x99\x5f\x24\xf3\x0b\xe0\xc1\xbd\x63\xf7\x67\x9c\xff\x9d\x21\x29\x86\xe1\xae\xde\xdd\x26\x25\x49\x96\x65\x5e\x31\xb0\xf9\x0f\x3f\xb9\x5e\x31\x40\xbb\x7f\xb7\x7f\xf6\xbf\xee\x3f\x6c\x68\x13\x04\x41\x48\x00\xd8\x19\x3a\xed\x76\xc6\xe1\x28\x6d\xb0\x5e\x27\x1a\x79\x9e\x9b\xbc\x19\x09\x3d\x15\xcf\xd6\xab\x09\x20\xf1\x30\x9b\x99\xae\x81\x6c\x18\x86\x3e\x06\x33\xa2\x0a\x48\x69\x30\x68\xb1\x44\x37\xa7\x71\x23\xdb\xaa\xb2\x83\x32\x47\x4c\xd7\xb1\xee\x4c\x37\x92\x59\x81\xe3\xd7\xa2\xb6\x01\x2d\x74\x2a\xad\x22\xf2\x3e\xba\x97\x62\x4e\xd3\xb3\xde\x5b\x4a\x7b\xe7\x56\x1a\x0f\xd4\xc4\xb2\x61\x2f\x56\xd9\xb1\xcd\xcf\x47\x6d\x96\x4c\x64\x50\xc2\x89\xcf\x4b\xe3\x52\xbb\x90\x6d\x55\xc7\x6f\xd3\x65\x7d\x3d\xa3\x86\x2a\x5a\x4c\x86\x15\x29\x9b\x21\xb2\x72\x27\x1e\xaf\x34\x62\xef\x8d\x95\x62\xea\xdd\xc5\xc2\x98\xa9\x2e\xfc\x6e\x89\x2a\xc0\xf7\x29\x51\x3d\x20\x13\x3a\xb2\x70\x7c\xc5\x37\x7f\x7a\x42\x07\x50\x55\x41\x48\xe2\x39\xe1\x9f\x76\xbd\xec\xcc\x51\xa9\x59\x28\x84\x4c\xff\xe0\x8c\xa0\x9f\xa3\xcd\x2f\x14\x89\x24\x88\xab\x04\xe2\x29\x1e\xca\x1c\xaf\xe2\x90\xe0\x55\x12\x4a\x2c\x81\x53\x90\x86\x84\x4a\x90\x0a\xc0\x21\xe4\x21\x8d\x70\xc4\x49\x90\x54\x24\x05\x91\x0c\x64\x38\x9e\xa1\xf8\x0b\x33\x02\xe7\x43\x75\x9e\x61\x19\x9c\xbc\x7a\x97\x00\x14\x4b\x71\x24\x43\x71\xf8\xa5\x19\x41\xdd\x38\x23\xf0\x8a\x65\x56\x0a\xd3\x3a\x5c\xe7\xb8\x7c\x5e\x2d\x5b\xb2\x56\x37\xcc\x45\x96\x17\xf3\x33\xd8\xd1\x98\x82\xc9\x2f\x50\x69\x06\x73\xbd\x5c\xa3\x2e\xf4\x8a\xf4\x28\x07\xb3\x43\x32\x35\x9e\x8d\x13\xa2\xac\xb7\xc5\x46\x2d\x56\x7d\x2b\xdb\x3c\xab\x66\x5b\x94\x50\x86\xad\x5a\x6e\x21\xba\x23\xe8\xce\x08\x9f\x92\x16\xf5\x69\x4b\x4b\x4a\x85\x72\x7c\x59\x33\x8d\x4c\xdc\x6e\xd4\xed\xf1\x2a\x55\x7a\x47\x8d\x0e\x29\x28\xb3\x5e\xbd\xe3\x90\x85\x74\x31\x35\x1e\x96\x89\x8a\x3e\x61\x1d\x55\x4b\x25\x84\xf8\xba\xa2\xcb\xda\x30\x47\x94\xea\xc5\x55\x4d\x51\xda\x0c\x2f\x0c\x0b\xce\x92\x91\x21\xac\xf5\xdc\x19\xd1\x3c\x33\x23\x5a\x54\x40\xa1\xfe\xbf\x9c\x11\xd4\x73\xb4\xf9\x05\xf1\x3c\x2b\xb3\x0c\x24\x29\x96\x97\x38\x5e\x26\x54\x19\x71\x90\x44\x2c\x20\x29\x95\x24\x55\x84\x68\x4a\x61\x15\x1a\x90\x0c\x23\x49\xbc\x0a\x64\x9e\xa4\x64\x8e\x20\x69\xa0\x48\x04\x0e\x5f\xdc\x2d\x25\xea\xac\x72\x87\x7b\x01\x8e\x22\x69\xea\xe2\x5d\xd7\xf9\xb8\x49\x2c\x86\x07\x1c\x75\x69\x46\x90\x37\xce\x88\xd8\xb8\xb3\x22\x4c\x1c\x49\xba\x5c\xe0\x98\x7c\x65\x22\xe2\xdc\xfb\xdb\xbc\x55\x52\x5b\x22\x01\xd9\xc9\x6a\xdd\x68\x64\xb3\x20\x23\x65\xf3\xb5\x95\xb8\x76\xf8\xba\x32\xa2\x97\xb1\x35\x5f\x93\x4b\x36\x9d\x9e\x67\x57\xe4\x8c\xae\x27\x5a\x84\x5c\x68\x4e\x59\xd3\x69\xe2\xb9\x58\xa3\xeb\x8e\xa0\x3b\x23\x7c\x23\xda\x43\x46\xb6\x52\x27\xed\xea\x40\x48\xcc\xf3\xd9\x91\x4d\x52\x0c\xa0\xb9\x1a\x99\xe3\xc1\xc8\x9a\xc5\x72\xdc\x1b\x1a\xa7\x47\xab\x45\xdd\x4a\xd4\x3a\x75\x73\x49\xb1\xe2\xbb\x65\x0c\xc8\xb2\xa9\x76\x96\xf1\xcc\xac\x05\x7a\x56\x61\x6e\xd2\xf3\x74\xdb\xcc\x1a\x6a\x41\x2d\x64\x35\x77\x46\x54\xcf\xcc\x88\x66\xf3\x9c\x56\xfd\xc3\x67\x04\x75\xef\x8c\x20\x9f\xa3\xcd\x2f\xa4\xca\xd3\xb2\xac\xca\x40\x91\x70\x9c\x23\x39\x95\x64\x68\x5c\x06\x84\xcc\x02\x19\x67\x19\x1a\xc8\x2a\xc7\x20\x1e\x91\x8a\x44\xca\xb4\xcc\xa9\x1c\x81\x73\x34\x47\x01\x44\x41\x00\x48\xe2\xd2\x8c\x60\x43\x75\x7e\x13\xda\x86\xcf\x88\xdd\xdd\x6d\xee\x13\x70\x1c\x77\x61\x46\xf0\x37\x4e\x88\xf7\x58\xeb\xbd\xb2\x54\x6c\x41\xa3\x38\xa2\x59\x14\x6d\x59\x8e\x13\x4d\x39\x96\x66\x50\x45\xaa\xa4\xd6\x15\x4d\x1b\x74\xab\xa8\x3e\x10\xd7\x3d\x52\x1c\xd1\xbd\x6c\x1e\xa8\x05\xb1\xde\x91\x0b\x86\xf8\x86\xe3\xef\x82\x8d\x33\x09\x1e\x8d\xa6\x20\x27\xbd\x11\xc8\x2a\xad\xe5\xf9\xd2\x1d\xc0\xcd\x84\x50\x96\x87\x01\xad\x8f\xdb\xdd\x2e\x2e\x36\x47\xf1\x76\x91\xd2\xde\x62\xa3\x89\xd6\xd4\x87\xcb\x6c\x8b\x2d\x5b\x09\xba\x37\x7a\x2b\x66\xba\xe9\xdc\x50\x14\x6d\x52\xc0\x57\xc3\xf6\x54\xee\x58\x46\x3b\xd1\x96\x12\x8d\x86\x1a\xcb\x59\xf9\x24\x65\x8d\xf4\xf9\x64\x24\x25\xcb\xc4\x2a\xa3\x73\xc2\x1b\xed\x41\x2e\x9e\x99\x10\x65\xf1\x9c\x52\xfd\xff\x36\x21\x88\xe7\x28\xf3\x0b\x43\x2a\x3c\xa7\xd2\x24\x83\x10\xc3\x29\x40\x22\x58\x89\x96\x38\x5e\x25\x48\xa8\xd2\x24\x00\x12\x4b\x33\x3c\x24\x28\x15\xaa\x80\xc2\x49\xa8\xe0\x12\x4d\x48\x0c\x49\x4a\x38\x2b\x21\x7e\xe3\x66\x68\xf7\xdf\x19\xdd\x66\xc2\x54\x9e\x05\x0c\xe9\xa1\x0f\xbd\xbb\x09\x9a\xbc\x64\x1d\x45\xf3\xc4\x85\xf9\x70\xeb\x22\x82\xa8\xf4\x86\xa0\x34\xa7\x4d\x5c\xca\xb1\x6d\x6a\xb2\x2e\x2f\x9a\xab\x34\xd9\x9a\x9a\xa3\xd8\x22\x25\x94\x9d\x04\xc8\x13\x45\x36\xce\x32\xbd\xc1\x44\x95\xc4\xe4\x34\x53\xe9\x2a\x14\x3d\xa9\xca\x0d\xb6\x33\x52\xb3\xc5\x65\xbd\xc3\x6b\xa9\x24\x1f\x53\xdb\xd0\x61\x01\x53\xcd\xb6\xbd\xf8\xdd\x9d\x0f\xae\x01\xcf\xee\xff\x08\xae\xca\xda\x87\xef\x4b\xa1\x52\x1d\x79\xe3\x5d\xe5\xc7\x5c\xa1\x9e\xe6\x24\xba\x8d\x4b\xf3\x95\xe8\xcc\x6c\xa7\x55\x7d\x67\xb3\xad\xb7\x8c\x95\x1c\xa7\x45\x47\x11\xa8\x71\x42\x4c\x76\xd6\xdc\xa0\xe1\x10\x5d\xad\x36\x69\x64\x1b\xd9\xf9\x3b\x35\x2b\xf5\x44\x7a\x56\x2c\x35\xde\xf1\xa9\x88\x53\xa0\x25\x37\x5b\xe9\xe5\xb8\x0d\x9b\xee\x7c\xcc\x9e\x99\x2f\x19\xfc\x9c\xce\xfd\xc3\xe7\x0b\x7d\xef\x7c\x01\xcf\xd1\x75\xf7\xe4\xe3\x06\x35\xfe\x8a\xbd\x00\x9e\xc5\x7f\xe2\xe0\x27\x0e\x30\x1c\xff\xe5\xfe\x0b\xd5\x69\x92\xf3\xc2\x9e\xcb\x77\x29\x82\xa7\x78\x86\x25\xf8\x4b\xeb\xe6\xf3\xfa\xee\x91\xf4\x77\x0f\x4d\xf8\x15\xef\xe4\x75\x6a\xfd\xb6\xae\xe7\xe3\x6c\x72\x92\xe4\x33\x04\xbe\x1a\xc6\x63\x36\xae\x39\xf6\x32\xbb\x7c\x07\x1d\xa5\xde\xee\xc2\x78\x0e\xa6\xdc\xa9\x24\x9e\x51\xe5\xf3\xd7\x4e\x95\x05\x21\x3e\xfa\x04\x46\x9e\x7a\xbd\xec\x94\x69\xab\xca\x57\x12\x53\x37\x3c\x61\x28\x6a\x9e\x2a\xe4\x68\x66\x58\xa9\x19\x11\x32\xef\xae\x80\x39\x29\x56\x8c\x06\x86\xdc\xad\xa8\x29\x9c\x76\x13\xcd\xd1\xc0\x04\x4b\x8d\xa8\x68\x60\xe8\x60\x2d\x56\x34\x30\x4c\xa0\x86\x86\x8e\x06\x86\x0d\x96\xe2\x44\x03\xc3\x05\x8a\x43\x22\x52\xc3\x07\xab\x67\xa2\x81\x01\x78\xa0\xec\x21\xe2\x50\x01\x10\x80\x13\x51\x01\x01\x11\xa8\x55\x88\x0a\x87\x0c\xd4\x03\x44\x1c\x2d\x40\x05\xf7\xdc\x23\xc2\xa1\x83\xfb\xda\x11\xe1\x04\xb6\x76\x99\x88\x60\xd8\x63\x30\x11\x95\x10\x70\xc7\x7b\x92\x51\xa9\xe1\x8f\xc1\x44\xd4\x41\x02\x3f\xde\xbb\x8b\x48\x0d\x11\xdc\xd2\x8c\x08\x86\x38\xde\xe3\x8a\x4a\x4d\x70\xab\x2c\x22\x18\xea\x78\x2f\x28\x2a\x35\x74\x60\x07\xeb\x39\x0f\xb0\x7b\xca\xc1\x8f\xcb\xcf\x2f\xd8\xf8\xc7\x5b\x4f\x82\x84\x3c\xc7\xed\xe1\x08\xc0\x2f\x47\xbf\xcb\xdf\x7f\x21\x7c\x15\x75\x8e\xf9\xd0\xa6\xe7\x2b\xf6\x4d\xb5\xcc\xf1\xa3\x95\xea\x53\xe8\x0c\xbe\xfd\xc2\xfe\xf3\xd7\x2b\xf6\xec\xb3\x2a\xdb\xe7\xc7\x8d\xe1\x2a\x0c\xe4\x73\x4a\xc9\x77\xcf\xa9\x0b\x2d\xa0\xf7\x6e\x9f\x3f\x3d\xe1\xbb\x77\x03\x2f\x4f\x2c\x97\xfa\x80\x93\x8f\xe7\xf4\xd0\x1f\xec\x1d\xbe\x30\xae\x1e\x7a\x7b\xd3\xee\x09\x6e\x57\x6c\x47\x03\x14\x52\xc9\xea\x3d\xaf\x6d\xc3\xfc\x7f\xbf\xed\x8e\x37\x4c\x36\xff\xff\xdf\xf1\xb1\x07\xfc\x15\xfb\x26\xcd\xd7\xfa\x44\x0b\x39\x79\xe0\xbf\x77\x51\xf2\xc8\x30\x4e\xa1\xec\x8e\x40\x1c\xdd\xbc\x78\x0a\xc2\x8f\xef\x39\xa7\x57\x8e\x91\x3f\xa5\xbe\xec\x03\xce\xdc\x9e\x51\x8a\xe3\x98\x7b\xff\x05\xdf\x6f\xe3\xed\x8d\xd3\x03\xa7\x9f\x0e\xc6\x29\xfa\xcc\xfe\xba\x07\x17\x3e\xe0\xb4\x6f\xf8\x40\x6d\x0f\x3d\xec\xbf\x30\xa7\x03\xf5\x80\x07\x38\x0c\x54\xf4\xa9\xf0\x85\x0f\x2b\x7c\xc0\x19\xdb\xd0\x91\xda\x2d\xd5\xf6\x5f\x88\x0f\x9a\x52\xcf\x18\xa9\xaf\x76\x42\xe1\x53\x07\x6a\x7b\xba\x61\xff\x85\x3b\x1d\xa8\xa7\x04\x66\xcf\xb0\x7d\x5f\xee\x54\xc2\x27\x19\xbf\xa3\x13\x0d\xfb\x2f\x94\x3b\x4c\xcc\x6d\x15\xe3\x8e\x35\xb7\x1d\x84\x1e\x1d\x0b\x17\x8c\x69\x3d\xaa\x16\x5f\x63\xe4\x9e\xfc\x24\x8c\xd0\x91\xdb\x9d\xd3\xd8\x7f\xc1\x23\x8f\x5c\x74\x73\xe7\x1f\xb9\x07\x7c\xe4\x17\xb1\x8e\x1f\xbe\x5c\x38\xce\x84\xed\xbf\x30\x7f\xf3\xc8\x3d\xe0\x33\xbf\xc6\xc8\x7d\x42\x4c\x7f\x94\x7b\xdc\x7f\x21\xfe\x66\x6b\xf9\xb4\x91\xfb\xbb\xac\xe5\xc7\x8f\x9c\x3f\x4d\xbb\xff\xcc\xf9\xaa\xd3\xd5\xf9\x44\xd9\x3d\x8d\x22\xda\x63\x79\x5c\xb9\x78\x0f\xc7\x79\x78\xd1\x7b\xb5\x54\xfe\xc1\xc7\x07\xdd\x23\xb5\x6d\x3a\x79\xff\x99\xfa\x58\xa9\x3d\x90\xa2\xfa\x52\x52\xf3\xd2\xde\xfb\xcf\xf8\x87\x4a\xed\x01\x0b\xf0\xa5\xa4\xb6\x4d\xcf\xef\x3f\x33\x1f\x2a\xb5\x07\xa2\xcc\xaf\x25\x35\x6f\x1b\x61\xff\x99\xf8\xd8\x19\x1a\x3d\xc2\xfb\x70\xa9\x5d\xd9\x92\x38\xf3\x1e\x94\xa8\xdb\x11\xdb\x77\xa1\x9c\xdd\x8a\xc0\x43\x8f\xad\x78\xfb\x12\xe0\xb8\x3a\xc6\xfb\xed\xf0\x83\xfb\x66\xd0\xdd\x52\x99\x62\x00\x60\x38\x06\xa7\x28\xc0\xb3\x80\xa7\xd9\xdd\xf6\xe9\x75\x56\xaf\xbf\xa7\xe2\x01\xe6\xcf\x3f\x41\xfa\x6c\x3d\x06\x11\xbe\x6f\x7c\x15\x10\x11\x00\x14\xb6\x71\x77\x15\x10\x19\xcc\xcc\x47\x05\x44\x05\xb2\xb9\x61\xdb\xac\x57\x01\xd1\xc1\xb4\x70\x54\x40\x4c\x20\x6d\x19\xb6\x0d\x7d\x15\x50\x30\xff\x19\x79\xd4\xb8\x40\x7a\x2e\x32\x45\x7c\x30\xcf\x17\x15\xd0\x71\x85\x06\xf7\xc0\xb0\x1d\xd7\x68\x70\x0f\xa8\xe4\x71\x95\x06\xf5\x08\x24\x32\x90\x02\x88\x3c\x72\xc7\x95\x1a\x8f\xcc\x93\xe3\x5a\x8d\x87\xc6\x8e\x39\x8e\xd9\xc3\x76\xdd\xaf\x03\x62\x8f\x01\x45\x56\xcb\xa3\x8a\x0d\xea\x11\x8a\xf8\x63\x40\x91\xb5\xf2\xa8\x6a\x03\x7f\x80\x22\x22\x18\xb4\x46\x06\x44\x1c\xc7\x71\xd1\x29\x22\x8f\x01\x45\x9e\x25\x47\xd5\x1b\xc4\x23\x14\xd1\xc7\x80\xc2\xeb\x37\xee\x7d\x8f\xd0\x33\x2a\x38\xae\xbd\x98\xe1\x9e\x1a\x8e\xd0\xb7\x06\x3d\x21\x72\xf0\x3f\xc2\x59\x21\x20\x25\x21\x96\xa0\x58\x9c\xa1\x39\x95\xe1\x14\x89\xc6\x11\x52\x09\x1e\x41\x1c\xaa\xa4\xca\x73\xb4\x8c\x00\x27\xd1\x32\x01\x38\x15\x27\x29\x06\x28\x14\x2e\x53\xac\x22\x13\x3c\xe1\x3d\x84\x16\x3c\x92\xbe\xf3\xd5\xf3\x53\xbb\x0a\xe6\xd0\x83\xc4\x34\xce\x86\x56\x4b\xef\xef\x1e\xc5\x2d\x5e\xe9\x73\x9e\x19\x22\x9d\x1c\x8e\xcd\x2c\xd7\x48\x1b\xc9\x37\xa4\xc9\x24\x5b\xe9\x38\x99\x7c\xfe\xbd\xdd\xe2\x96\x2d\xbd\x17\x87\x89\x39\x5d\xa0\x8b\x82\x5b\x3a\x2c\xec\x0a\xf4\x13\x81\xca\xdc\xf8\xe1\xa3\x7b\x3f\xde\x6a\x95\xb6\x27\x4f\x12\x60\x1e\x57\x32\x66\x63\xae\x15\x17\x55\x27\xc9\xc6\x07\xd9\x02\x59\x42\xbc\xd2\xaa\xa8\xe9\x6c\x2c\xa7\xd3\xb9\x45\xb3\x1c\xeb\x09\x0e\xeb\x9d\x92\x49\xf1\x68\xdb\x35\xe9\x68\x8b\x65\x72\x5e\x6e\x0b\x55\x9e\xad\x81\x5a\xc3\x69\x2a\xcb\x52\x32\x33\x4d\xbe\x25\x9a\x68\xfa\xae\x54\x2b\x1d\xc3\x9c\xc8\x7a\xa1\xe5\xb6\x6f\xb6\x5a\xcd\xed\xf1\x64\xb3\xb2\xec\xbe\xa5\x94\x37\x39\xbb\x2e\x35\x67\x6f\x68\x96\x29\xbf\x4f\x67\x6c\x57\x31\x97\xb2\x5a\x7f\x1f\x65\xd4\x5e\xaa\x55\x4b\xe4\x09\xd1\xab\x82\x8e\xab\x9d\xc1\x49\xbd\x31\x64\x47\x8b\xd1\xd2\x65\x2f\x49\xf1\xb0\xd6\x9c\xbf\x77\x3a\xa8\x94\x99\x83\x94\x9a\xed\xc6\xa5\xa9\x11\x37\x2b\x31\x09\xc4\xa9\x79\x31\xed\x10\xcd\x74\xcf\x61\x86\x95\xfc\x62\xd9\x2a\xd7\xc6\xc5\x77\xb5\x93\xec\xc1\x4c\x7c\x91\xd5\x2d\x3b\xcb\xa1\x92\x1a\xe3\x53\x40\xcc\xe2\xa9\x01\x5f\x7c\x23\x86\xe3\x84\xf6\xe7\x9f\x2f\xfe\x32\xf4\xb4\xaf\x7c\xfb\xf0\xd1\x27\x6b\x4f\x2e\xd2\x58\x1b\x83\x16\xa1\x68\x74\x0b\x8c\x67\x00\x19\x45\x39\x0d\x9c\xd5\xb0\xde\xcd\xf7\xf8\xa5\xa8\x99\xf5\x38\x44\x6d\xae\xa9\xa7\xcc\x00\xbc\x54\xaa\xd5\xf2\x0e\x58\x08\xf9\x24\x97\xae\xac\x3a\x2a\x99\x28\x0e\x5b\xf9\x05\x39\x1b\xbc\xdb\x0c\x9c\x13\x19\x58\xcc\x64\x70\x9b\x8e\x93\x44\xad\xd5\xac\xea\xce\x60\x7b\xe4\x61\x57\x36\xfe\x8f\x1e\xdb\x83\xbc\xab\x7b\x5d\x4d\xba\x6d\x7c\xc7\xda\xad\x59\x89\x29\xa0\x32\xd4\x86\xab\x22\x6c\x56\x78\x26\xfe\xae\xda\x3c\xc2\x65\xd3\x2a\xf5\x3a\xef\xf1\x76\x6e\x94\x32\xf3\x3b\xdd\x10\x84\x32\x6d\xe5\xca\x3e\xdd\x0f\x8c\xdf\xd1\xd8\x9d\xb9\xe2\x4f\xc6\x9f\x88\x82\x3f\xe1\xe2\x2f\x1e\x1a\x7a\xfd\xd9\x6e\x81\x13\xd8\xa1\xa1\x89\x15\x84\x2b\xcd\x26\xdb\xca\xc8\xc9\xea\x8a\xa9\xbe\x2d\x8d\xcc\x4c\x26\x9b\x49\x40\xc3\x1c\x99\xd5\xc1\xa3\xfa\x95\x9c\xc3\xb5\x16\x7f\xdb\x5f\xcb\x73\x34\xbb\xdf\xd3\x7f\x0b\x7d\x62\xba\x3a\x5b\x5e\xa3\x2f\x28\xbf\xc4\xdc\x24\x4d\x87\xa2\x67\x89\x8a\xb8\x9a\x56\xdf\x48\x33\x53\x8a\xbd\x03\xb6\xb6\xd6\x6d\x60\xa8\xc5\x54\x77\x5c\x6d\x6b\xd6\xbc\x1e\x6b\xb8\x9d\x52\xad\x26\xbe\x1d\xb8\x42\x9b\x4a\xe1\x68\x50\x66\x84\x35\x9f\xc0\x2b\x76\x5a\xd4\x16\x32\x60\x01\x68\xf2\x5c\x77\x48\x8d\x0b\xa3\x31\x5f\x65\xe9\x51\x82\x5c\x44\x97\xdf\x67\xd2\xd7\x1e\x31\xd5\x9b\xe4\x97\x72\x3f\xba\xfa\x18\x9f\xc3\x84\xd4\xea\xf4\x88\xa4\xd1\x69\x43\xab\xc5\x34\x57\x4b\xa9\x4d\xa6\x4b\x39\x6d\x3a\x21\x85\x7a\x62\x90\x4d\x4d\x69\x69\x55\xcf\xb6\xb5\x63\x7d\x7e\xc0\x5e\x3c\x62\xdf\x0a\x0c\xb7\xd5\xaf\xea\x39\xfe\x0e\xf4\xc5\x0f\x27\xe0\xfe\x1e\x7b\xde\x6a\x35\x6a\x5b\x50\x8e\xa4\x75\x6a\x8c\xc8\x9a\xc9\x02\x5e\xa8\xc6\x96\xdd\x7a\x82\x7f\xef\x2c\x3a\xad\x06\xb9\xd2\x2b\x7a\x77\x5e\x97\x40\x72\x31\xae\x16\x10\xe7\x31\xb0\xb7\xa7\xa2\x70\xec\xfb\x7d\x97\x2b\x8f\xe6\xe1\x7e\x9a\x36\x73\x4e\x4b\x99\x74\xcb\x2d\xa5\x37\x73\x3a\xd3\x46\x26\xee\x48\x72\x17\x1f\x27\xc6\xaa\x1c\xcf\xe6\x45\xad\x3d\x31\x16\xa9\xec\x00\x3e\x4a\x5f\x22\xb6\x48\x08\x99\x83\xaa\x05\xc7\xc3\xbd\x1f\x7e\xf9\xc6\x27\x92\xbf\x7d\x44\x7f\x6a\x14\x2c\x0a\xea\x9e\x72\x4e\x08\xf8\x93\x83\xff\x3c\x7b\xf9\xe6\x8f\x77\xc4\xeb\xce\xf9\xf3\x48\x7c\xb0\xb0\x96\xe7\xed\xcf\x89\x7e\x9c\x95\xbf\x4b\x6f\x37\xba\xbe\x3c\x1a\x1f\x5c\xd0\x17\xdf\x15\x94\xff\x21\x9e\x38\xf8\xcf\xa4\x8f\xd3\x3b\xed\xab\xc0\x8e\xed\xb1\x7c\x80\x17\x32\xbf\x42\x4f\xf9\xf9\xec\x7b\x64\xfc\x23\x2a\x1a\x7e\x2f\x96\xfd\xef\x47\x05\xca\x6e\x2e\xde\x7d\xcf\xe6\xee\x5c\xa9\xf7\x77\xb3\x70\x74\x17\x48\xd7\x57\xcf\xbe\x15\x9e\x44\x13\x2a\x60\x54\x55\xe2\x39\x19\xa7\x58\x44\x92\x12\x2f\x13\x48\x26\x71\x8e\xc3\x49\x12\x97\x15\xc4\x02\x95\xa3\x25\x92\xa2\x48\x05\x22\x42\xa6\x65\x89\x63\x64\x56\xa1\x14\x8e\xa2\x09\xef\x50\xed\x43\xaf\xcb\xf1\xad\xf0\xe8\x6b\x2b\x3c\x86\x03\x4c\xf8\x03\x13\x76\x77\x8f\x12\xca\x9e\x56\xde\x39\x8b\x7c\x2b\xbc\x64\x60\x78\x7d\x5a\xe0\x6a\x79\xbc\xd6\x6a\xd5\xb7\x5a\xa6\xbd\xc5\x87\x5c\x8b\xe4\x97\xc3\x2c\xa8\x5b\x3c\x63\xca\x6c\x79\x6c\x39\x03\x62\x18\x5f\xf1\x85\x72\x55\xe1\x47\xad\x96\x98\xb5\xba\x75\xcf\x6b\x45\x8f\x22\x5c\x2b\xe3\xa3\xe7\xac\x26\xa6\xf6\xfc\x54\x53\x46\x67\x09\xcd\x31\xbb\x36\xea\x0d\x65\x0d\xcb\xec\xac\x54\x64\xa9\x6e\x2c\xd7\x1c\x09\x84\x89\xcf\x65\xb6\x4d\xa4\xde\xe3\xc6\xa8\x85\x08\x5b\x7e\x9f\x14\x8b\x30\x56\xe3\x06\xad\x86\x64\x94\x69\xa3\xca\xc4\x52\x72\xa3\x48\x0f\xb3\x56\xbb\x3a\x5b\xcd\x52\xa9\x9e\x54\x1e\x09\xcb\x5b\x56\x6d\x41\xf9\x09\x51\xc6\xc3\xbb\xc4\x2f\x2c\xef\x3f\xc3\x57\x54\xbe\xb6\xf7\x46\x38\x65\xda\xca\x02\x41\x38\x5e\xd1\x68\x41\xdc\x57\x56\x54\xcf\xc3\x9f\x8c\x84\xdf\xf5\x80\x3e\x8f\xec\xdd\xba\x37\xa2\x65\xc7\xf6\xb0\x79\xc0\x5f\x14\xce\x64\x5f\xae\x79\x84\x67\xe3\x3f\xd5\xed\x50\xfc\xc1\x15\xd1\x97\x88\x48\xce\xcb\xe7\xeb\xd0\x77\x5b\xc4\xb4\x3c\x10\xed\x7e\xff\x12\x11\x76\x50\x27\xfe\x91\x2b\x00\xf7\x8a\x1f\xec\xd9\x3f\x6e\x85\x76\xe0\xef\xbc\x3e\x5d\x8e\x5a\xa3\xd8\x08\x7f\xd4\x78\x36\x43\xfc\x89\x36\xea\x1e\xfc\xdb\xa8\xb5\xdd\x52\xe3\x33\x5d\x5a\xe4\x5b\xb9\x12\x99\x33\xd9\xb9\x89\xbf\x2f\x06\x43\xca\xa8\x57\x93\x70\x58\xa7\xdf\x1d\xa9\xdb\x29\xa6\xdb\xb5\x16\x0b\x57\xef\x65\x79\xb9\xb2\xe8\xcc\x3a\xd5\x2a\xcd\x9b\xec\xb8\x95\xb4\xa8\xd6\xaa\xf8\x3e\x2e\xea\x4e\x2f\x61\xa5\xcd\x66\xcb\xb0\x99\x6a\x52\x78\x76\xd4\x8a\xf3\x32\x0e\x28\x16\x32\x84\xcc\x11\x1c\x49\x00\x84\x38\x9e\x57\x70\x59\x96\x15\x9e\x90\x39\x40\x13\x34\x42\x90\x05\x04\x8d\xb3\x04\x4e\x30\x38\x49\x00\x89\x84\x04\xc5\xf3\x34\xe2\x24\xef\xd1\x49\xe0\x91\x52\x7e\x5f\xd4\x4a\x5e\x8b\x5a\x39\x8a\x01\xe1\x0f\x7b\xdc\xdd\x3d\x2a\x3a\x78\x89\x32\x53\x7c\x51\x6b\x50\xd3\xe3\x01\x2d\xbb\x7b\x2d\x1f\x3d\xea\xf2\xe5\x82\x3c\xfc\x7b\x7a\xab\x09\x98\x19\x4c\xf5\x56\x89\x17\x29\x92\xcd\x76\x52\x72\x66\xaa\xb6\x2c\x73\x05\xea\xe5\xb7\x52\xce\x49\x8c\xe3\xc4\x40\x83\x5a\xb1\x9a\x49\xd6\x92\xe9\x6a\x31\x53\x22\x67\xc3\xba\xdc\x28\xcf\x66\x48\xb4\x4b\xe2\x7c\x91\x8f\x4d\xd8\x2c\x25\xc5\xf8\x94\x85\xd7\x48\x67\x39\x96\x35\xfb\x96\xa8\xd4\x2f\x9f\xf0\xc8\xcd\xd7\xe8\xde\xb5\x74\x20\x17\x1d\x72\x5d\x89\x9c\x9e\x87\xff\x7c\x6e\xea\x0a\xfe\x13\xab\xf8\xe9\x91\x81\x8b\xef\x96\xc8\xe5\x8b\x46\x56\x7f\x9e\xcb\x8b\xf8\x2c\xf2\xdd\x73\x89\x1d\xdb\x5b\xcf\x7a\x12\x85\xfb\xae\xcb\x1e\x46\x7c\x0c\xbf\x3f\x2f\x73\x0f\xfe\xad\x87\xc9\x99\x28\x3d\xce\x35\x49\xbc\x31\x54\xed\x81\xdc\x5a\x22\x83\x6f\xcf\x87\x99\x56\x96\xe5\xf0\x31\x9e\xef\x66\x60\x3a\x3e\x13\x56\x82\x5c\x12\x8b\x3d\x61\x29\x93\x68\x56\xca\xad\x8a\xcc\x6c\x4a\xd5\x97\x78\x9d\xe1\x2c\x46\xb5\x87\x83\x05\x8b\xb7\x17\xb5\x0c\x22\x13\xbd\x75\x62\xf9\x6c\x0f\xc3\xb2\x24\xa3\xe2\x12\xc7\xf0\x0a\xc2\x55\x96\x82\x34\x52\x58\x95\x53\x18\x9a\xe0\x79\x8a\x27\x48\x0e\x49\xb8\x8a\x53\x04\x50\x49\x55\x26\x24\x49\xa6\x48\x0e\xe7\x28\x95\x00\x32\x87\x7b\x4f\xe5\xf2\x9e\x75\x10\xf9\x58\xdf\x7d\x1e\x86\xa3\xc2\xf3\x22\xbb\xbb\x47\x45\x64\x9e\x56\xde\xb9\x0e\xbe\xc7\xc3\xdc\xb9\x7b\xf7\x48\xb6\xf8\x30\x2b\xdc\x2b\xb3\xa7\xb7\x9a\x1c\xc6\xeb\xc2\xdc\xc8\xcb\xbc\x53\x9b\xa5\xb5\x58\x79\xd4\x92\xf2\x95\x77\xca\x34\x6a\xd5\x6e\x13\x2d\xd6\x09\x75\x81\x17\x04\x63\x26\x75\xd2\xcc\xba\x22\x4d\x6b\x78\xbc\xc6\x98\xec\xd8\xd2\xc7\xb5\x31\x57\xa6\x5a\x66\x2d\x9e\xce\x0a\x8d\x6a\x9c\x22\x4a\xe5\xb6\x9c\x7a\x57\xaa\x37\xe5\x3d\xee\xf6\x30\xf7\xc6\xb6\xcf\xf6\x30\x0f\xe2\x7f\xa2\x87\xf9\xc4\xdd\x38\x8f\x94\x3b\x3d\xcc\x57\xda\xcd\xbc\xea\x61\xa2\xcf\xa5\xe7\x78\x98\x08\xf8\x1f\xf4\x30\xe5\x62\x2a\x9b\x60\x9a\xd3\x77\x02\xa4\x4d\x98\xa4\x98\x5a\xcb\x36\xdf\x2a\xba\xde\x4a\xc5\x07\x35\xe6\x2d\x9b\x8b\xbd\x55\xed\x44\x7b\xea\xc8\x92\x95\xe5\xe7\x46\xa6\x9a\xca\xcc\x86\x73\x38\xcf\xf7\xd2\xd2\x92\x1d\xb4\x07\xa2\xd8\xd5\xe2\x25\x21\x33\xec\xe2\x74\x6f\xd9\x29\x81\xe7\xaf\x61\x54\x96\x92\x15\x9a\xe2\x70\x5c\xa1\x49\x56\xc6\x19\x45\x55\x49\x5a\x96\x29\x96\x23\x25\xc4\x71\x40\x26\x14\x86\xc5\x65\x00\x68\x00\x69\x46\x86\xac\xca\xd3\x80\xa5\x39\x45\x66\x29\x40\x2b\x9e\x87\x21\x9f\xe4\x61\xae\xd6\x56\x71\x34\xce\x5c\x78\x3c\x37\x8d\x33\x87\xc7\x73\x6f\x8b\x82\x1f\xf5\x30\x17\x6a\xab\x3e\x7d\x3f\x32\xb0\x86\xf1\x79\x98\x72\x36\x5f\x37\x92\xd5\x31\xdf\xb4\x66\xc4\x7c\x38\x59\xac\x7a\xc3\xf4\x8c\x79\xcf\x55\x27\x72\xa5\x31\xe5\x16\x06\x65\x27\x12\x54\x0d\x52\xc5\xe6\x04\x66\xba\x49\x38\xaa\x75\x1a\x29\xb3\x6a\x64\x53\x82\x45\x3b\xb8\x99\x5b\x2a\xf1\xba\xcd\x34\x72\x5c\x91\x6e\xa1\xb4\x16\x97\xaa\xda\x57\xf4\x30\x91\x2c\xfc\x13\xf1\x87\x64\xaa\x3e\x65\x0d\xf3\xc9\x1e\xe6\x33\xe9\x8b\xb4\x86\xf9\x9b\x2c\xbc\xf0\x24\x0f\xe3\xcf\xd2\xdd\x83\x7f\xeb\x61\x28\x6d\x9a\x6f\x96\x12\x12\xa8\xcf\x2d\xc8\x94\xd1\x5b\x6a\x5c\x84\x96\x55\x34\x12\xca\x92\x2f\x4f\xd6\xb1\x8e\xbe\xcc\x16\x06\x29\x4b\x5b\xd5\x15\x53\xd1\x4a\xb3\x5a\x4a\x29\xb6\x07\x89\xa6\xd6\x4c\x2c\xc6\x64\x5d\x9b\xc4\xf1\x54\xfe\x7d\x5a\x9c\x2c\xdf\x8d\x16\x05\x85\x94\x93\x78\xba\x87\x91\x79\x52\xe5\x70\x86\x61\x55\x92\x55\x54\x16\x87\x04\x8f\x64\x5c\x41\x3c\xa3\xaa\x24\xc9\x01\x86\xa2\xa0\xcc\x11\x1c\xa4\x64\x04\x81\x2c\x31\x32\x92\x80\x24\x23\x99\x51\x55\x15\xa7\xb9\xc3\x53\xeb\x9f\x91\x25\xbb\xc1\xc3\x10\x17\xf6\x76\x77\x77\x8f\x8e\x78\x3c\x9a\x25\xbb\xea\x61\xca\x44\xe2\x4d\x28\x53\x74\x37\x9e\x24\x9d\x4c\x2b\x55\x06\x35\x52\xc0\x8b\x68\x54\xe1\x72\x35\x66\x52\x02\x02\x8f\xda\xba\xb2\xce\x3a\x5e\x7e\xff\x81\x2c\x59\x60\x0d\xe3\xcb\x92\xa5\x29\x83\x1d\x25\x84\x19\x0d\x67\x8b\x3a\xa1\xbc\x65\x1a\x4e\x57\x66\x0b\x02\x64\xa7\xf5\xa6\xae\x97\xa9\xfc\x3a\x9f\x93\x66\x63\x3b\xa9\x2d\xe4\x51\x3a\x41\x28\xd2\x40\xb6\x4c\xde\x91\x17\x89\x5e\x46\x5d\xce\x1a\xad\x98\x3e\x5a\x70\x99\x31\xd7\x53\x21\x43\x8e\xba\x95\x51\xb5\xfb\x15\xb3\x64\x0f\x7b\x98\x07\xf1\x3f\xd1\xc3\x7c\x62\x45\xa6\x47\xca\x9d\x1e\xe6\x2b\x55\xb4\x5e\xf5\x30\x9f\x98\xa5\x12\x9e\x94\x25\x7b\xd0\xc3\x48\x7a\x67\x5e\xcd\x26\x2c\x63\x36\xe3\x0b\x3d\x92\x53\xca\x60\xf0\x6e\xdb\x71\x6b\x6e\xe4\xea\xf9\x2c\xab\x59\x59\xd3\x98\xcf\xe0\xb2\x9c\xe0\xd7\xd5\x6e\xa1\x07\xe6\xa9\xf5\x7c\x48\x80\x35\x97\x1b\x29\x6f\x89\x69\xa9\xc3\xe4\xeb\x6f\x4b\x24\xad\x0c\xde\xb2\x50\x4d\x8b\xd5\xe2\x1f\x50\x3d\x24\x43\x45\x46\x38\xad\xca\x38\x54\x68\x85\x51\x11\x8e\xf3\x12\x27\xe1\x0a\x92\x64\x5c\x21\x15\x55\x21\x09\x04\x29\x42\xe2\x90\x04\x49\xc4\x23\xc8\x70\x32\xc1\x52\x0c\xa2\x00\xae\xbe\x78\xaf\xed\x7b\xe4\xc1\x3c\xf7\x64\xc9\x78\x9e\xbd\xf4\xb6\x15\xf7\xe6\xd1\xc1\x3f\x4f\x27\xef\xac\xd0\xbf\x2d\x47\xe6\x55\xf8\x45\xf2\x17\x9d\xc0\x7e\xae\xa7\xb3\x7b\xfc\x55\xa1\x54\xad\xbf\x2f\xa6\x62\x3c\x21\x74\x16\x36\xa8\x68\xe3\xcc\x5b\x33\x25\xa9\x42\xd7\x21\xcb\x82\xa6\x75\x2b\xad\x86\x82\x4f\x87\x2d\x39\x07\x5b\xa4\x91\x14\xdf\x86\xe5\x5e\x71\x52\x48\xf5\x8a\xa3\x94\x38\xd0\x28\xc5\x98\xb7\xaa\x2b\x94\xec\x09\xcb\x46\xbe\x59\x81\x13\xc5\x5e\xde\xb4\x22\x49\xfb\x15\x39\xcc\x5f\x08\xbe\x77\xc0\xdc\xe9\x5f\x5d\x7b\x3d\x39\xc8\x37\xe4\xba\x62\xaf\x9f\x87\x3f\x5a\xce\xeb\x50\x0f\xe3\xaf\x1f\xb9\xd3\x1e\x9f\xad\x90\x3c\xbd\x2e\xdb\xb8\x67\xe3\xbf\xa3\x42\x53\x10\xbe\xae\xbf\xbc\x96\x53\x8b\x22\xab\x87\x73\x6a\x4f\x1c\xab\x7b\xf0\x6f\xfd\x91\x00\xe2\x85\x72\x6c\x54\x15\xb3\x71\x14\x7b\x6f\xc6\x12\x5d\x95\xaf\xb6\x1c\x2e\xae\x93\x32\x95\x48\xc4\x35\xbe\x55\x22\xeb\xe3\x52\x6b\x69\x4c\x3b\xa8\x59\x6c\xc4\x8a\xf4\x68\x2d\xdb\xad\xd1\xba\xda\xac\xa7\x93\x03\xa2\x4d\xb7\x92\x29\xaa\x34\x4a\xc6\x8b\xd3\x2a\x3f\x53\x88\xf7\xe7\xaf\x78\x24\x85\xe2\x18\x45\x52\x14\x9c\x50\x28\x06\xe7\x00\xcb\xb0\x40\xa6\x20\x0d\x59\xc4\x2b\x0c\xe2\x18\x5a\x86\x04\x2f\x4b\x14\x40\x0c\xa1\xb0\x10\xba\xcb\x22\x15\x21\x5a\x22\x19\x05\x79\xfe\x88\x78\xd2\x79\xc5\x6b\xfe\x88\xc5\x71\x9c\x0a\x7f\xdb\xd1\xe6\xee\xc1\x23\x6d\x0f\x90\x3f\x7a\x5e\xf1\xaa\x47\x8a\x94\x23\x23\xcf\x79\xa4\xf4\x1e\x7f\x35\xce\x8f\xc6\xf9\x36\x31\x23\x17\x6c\x55\x5d\x73\x95\x22\x1a\x89\x12\x68\x34\xb2\xb4\xbe\x9a\x8d\xb2\x78\xdc\xd4\x3a\x56\xd9\x61\xb5\x32\x60\x88\xaa\x34\x1a\x10\x4a\xbd\xd1\x54\x51\xd2\x5c\xc8\x78\x45\x80\xea\x20\xd9\x59\x39\x83\x96\x60\xd8\x85\xf9\xd0\x88\x8f\xd7\xc3\xb8\x70\xd3\x0a\xe6\x6e\x8f\x14\xe5\xcc\xd9\x33\x3d\xd2\x83\xf8\x9f\xe8\x91\x9e\x52\xb3\x7f\x7a\xdd\x6c\xe5\x3e\xfb\xcc\x80\x20\x7c\xdd\x5d\xa8\xab\xbb\x3c\x11\x64\xf5\x4c\x8f\xf4\xe8\x58\xdd\x83\x7f\xeb\x91\x32\x44\xbd\x3b\x95\xa0\x85\xde\x9c\xf8\x5b\x61\xc9\xad\x98\x6a\x6d\xd1\x2a\x15\x87\xe3\x42\x7a\x56\x1d\x56\xd3\x7a\x1c\xd9\x0c\x39\x17\xd8\x8e\xd5\x8b\xcf\xeb\x99\x1e\xc8\x95\x6a\x3c\x55\xd6\xf9\xf7\x2a\x17\x9f\xc6\xc4\x92\x9a\x26\x52\xcd\x44\x7b\x39\x67\xca\xcd\xb4\x94\x2f\x8a\xcf\x5f\x21\x71\x00\x00\x9e\x90\x49\x8e\xa1\x48\x45\x65\x49\x19\x07\x34\xa4\x21\x50\x58\x56\xe2\x70\x42\x51\x55\x9c\x56\x28\x95\xc6\x55\x99\x51\x00\xce\x01\x06\x42\x96\x96\x71\xc8\x00\x9c\x57\x79\xe8\x79\x24\xf2\x49\xe7\x2b\x6e\xf0\x48\x44\xe8\x3b\x8b\x77\x37\x8f\x1e\x43\xf2\xe8\xe9\x8a\x4f\xf4\x47\xbe\xd3\x10\x15\x43\x63\xd3\x85\x41\x4e\xc3\x57\xec\x70\x2a\x2c\x81\xb8\x64\x32\x44\x27\xc5\xac\x6b\xd9\xb1\x9a\xcb\x2e\x53\xea\x0a\xe7\x4b\x73\x9d\x96\x9c\x72\x4e\x48\xa1\x65\x4a\x28\xa9\x65\x28\xc4\xb9\x54\xb5\x67\xd0\x53\x52\xa0\x35\x72\xc4\x24\x33\x49\x5d\x53\x9b\xa5\x66\x12\x30\x9a\xfc\x21\xfe\x28\x42\xc5\xfe\xc3\x55\x01\x4f\xc4\xff\x44\x7f\xf4\x68\x35\xee\xa3\xfe\xe8\x29\xf8\x1f\xf0\x47\x5f\x69\xcf\xea\x9a\x3f\x8a\x22\xab\x67\xfa\xa3\x28\xf8\x1f\xf4\x47\xb1\x76\xd2\xee\xce\xc5\x71\xb2\x91\x99\x97\x47\xc9\x64\xb3\x91\x34\xd5\x9e\xdc\xb1\x72\xa2\xde\xe3\x46\x43\xa1\xf5\x96\x69\xe0\x04\x53\x30\xe6\x38\x35\x12\x5a\xac\xd0\x14\x00\x47\x9b\x42\x66\xd9\x8a\x8f\x3b\xe3\x89\x9c\x1c\x27\x51\xc3\x2c\xca\xe5\x7c\x9c\xaf\x82\x6a\xa5\x33\x8b\x3f\xbd\xae\x8d\x24\x10\x25\x41\xa0\x12\x80\x93\x18\x08\x09\x8a\xc0\x25\x8a\x67\x29\x9a\x81\x40\xc2\x79\x9c\xc7\x11\xc9\x91\x2c\x62\x18\x89\xa4\x78\x1a\x27\x71\x85\x42\x8a\xc2\xe0\x0a\xae\xe2\x34\x8b\x7b\xfe\x88\x7a\x92\x3f\xba\xb6\x27\xc4\xe2\x38\xcd\x84\xbf\xff\x72\x77\xf7\xe8\x71\x56\x8f\x7a\xa4\x0b\x7b\x42\xcf\xce\xd9\xf9\x3c\x52\x31\x37\x5e\xc4\x8c\x41\x8a\xae\xf6\x0c\x4d\xc9\xce\xe2\x9d\x64\x5d\x19\x88\x8e\x66\xc1\x86\x55\xab\x2f\x5b\x16\xad\xbc\xc5\xad\x52\x82\xe0\x90\x9a\x29\x9a\xab\x6e\x09\xc6\x88\x39\xef\xd4\x45\x65\xd5\x8c\x0d\xd2\x1d\x26\x57\x6b\xb3\x4b\x21\x45\x4a\xe5\xa9\xc0\x59\xab\xd5\x4a\x10\xbf\xa2\x47\x8a\xf4\x54\x90\x27\xe2\x8f\xf4\x54\x90\x8f\xf1\x48\x21\x1e\xf2\xd3\x3c\xd2\xbd\xe7\x53\x04\xe1\xeb\x56\x82\x9f\xf5\x48\x7f\x93\x47\x10\x3e\x60\xac\xee\xc1\xbf\xf5\x48\xcb\x31\x7c\x63\xda\x62\xc7\x88\x8f\xdb\x71\x5c\x37\x53\x72\x29\x47\x88\x75\x22\xe1\x4c\xcb\x4e\xaa\x10\x6f\x2f\x0c\x92\x4b\xdb\x78\x41\x5a\xd1\x9c\xbc\x1e\xa6\x34\x60\xb1\x90\x25\x40\xb6\x46\xa6\x1a\x8c\xd8\x53\x4d\x23\x25\x2d\x04\xa1\xa3\xd8\xcc\x28\xb9\x5e\x64\x32\xa2\x50\x7d\xb6\x47\x62\x18\x86\xa1\x19\xc8\x20\xa8\x20\x02\xe7\x08\x99\x66\x39\x9c\x66\x01\xc1\xb0\x0a\x8f\x28\x8a\x26\x11\x42\x34\xcb\x01\x19\xf2\x90\xe6\x20\x24\x01\x8f\x24\x5c\x45\x1c\x49\xd1\xb4\xaa\xbc\xec\x1e\xdd\x1a\xfd\x01\xb5\xbb\x27\xbf\x5e\x70\x44\x80\xa1\x2f\xbd\x79\x9c\x3e\x6c\x44\x79\x8f\x30\xdc\x7a\xa1\x02\x97\xa9\x2e\xaa\x23\x29\x4f\x64\x04\xb2\xdd\x1a\xd6\xac\xfc\x78\xd8\xc1\x71\x35\xcd\xd9\x85\x2c\x3b\xc6\xc5\xda\x32\xd7\x7e\x13\x3a\xa4\xb0\xf7\x42\x9e\xba\x04\x86\xf4\xc4\x4a\x44\xa8\xa5\xca\xfb\xe1\xb5\x16\xcb\x14\xbf\x99\x45\x62\x5c\x49\x76\x10\x41\x36\x29\xc4\x27\x88\xfa\x6a\x1a\x2f\xf4\x6a\x2b\x60\xe5\xde\xeb\xa9\x42\x0e\x5f\x25\x93\xf8\x3c\x9f\x9e\x8e\x67\x92\x6a\x24\x1b\xb1\x4e\x27\x3b\x63\x9a\x7a\x32\xfe\x3e\x4e\xad\x9c\x58\xba\x1c\xeb\xcc\x52\x66\xb5\xa2\x58\x9d\x06\x3b\x05\x66\xa1\x97\xcd\xcc\x1a\xc5\xca\x0d\x9e\xe7\x48\x65\x8f\x3d\x8f\x8f\xe7\x83\xe5\xf5\xc7\x97\xfa\x5b\x1c\x2f\xe0\xb9\xf4\xda\x19\x2c\x4b\xc0\xe8\xe2\x70\x3d\x35\x01\x5f\xca\xac\x16\x85\xc4\xba\x4c\x3b\x71\x51\x4e\x78\x3c\x92\x9a\x63\x95\x27\xdd\x37\xaa\x78\xe8\xdf\x3c\x4b\xc4\x95\xd9\xfc\x00\xfe\x92\xb5\x6e\x54\x1f\xc0\x2f\x08\x7f\x5f\x55\xef\x59\xcb\x1a\x8f\x2e\x8b\xf2\xa4\x77\x91\xcd\x6b\xb2\x78\x74\x2c\x36\xba\x10\x93\x03\xf0\xee\x92\xc5\x7f\x3b\x55\x40\x3a\x0e\x28\x67\x16\x55\x67\x64\xc3\x6a\x9d\x68\xca\x0a\x5c\xcb\xf8\xa0\xbe\x56\x8a\xd5\x25\x5f\xd0\x07\xb3\xde\x6c\x4c\xd2\x55\xfc\xcd\x00\xeb\xfc\xdc\xc8\x6a\x4b\x99\x1e\xc8\x92\x3a\x18\x2e\x0c\x66\x00\xc5\x24\x09\x01\x1e\x63\x14\x98\x20\xea\x71\x66\xf4\x9e\x7c\x7a\xac\x2f\x01\x95\x23\x38\x92\xa3\x64\x9a\x41\x94\x84\x13\x94\x4a\x11\x2c\xad\x12\x14\x43\x73\x38\x94\xa0\xa4\xaa\x00\x22\x52\xe2\x25\xc8\x00\x09\x27\x39\x9e\x25\x69\x16\xd1\x2c\x42\x92\x4c\xe0\x9e\x65\x25\x1e\xb3\xac\xc4\x75\xcb\xca\x52\x97\x4c\xab\x77\xd7\xff\x30\xd6\x47\x6d\xeb\x85\x08\xdf\xbb\x22\x54\x11\x85\xda\x56\x75\x92\xaa\xbf\xf7\xd0\x00\xb0\xef\x0e\x47\x98\x45\x65\x86\xa8\xd8\xdb\x6a\x62\xbf\x8d\x32\x71\x25\xdd\x51\xf9\x78\x36\x5e\xeb\xaa\xad\x1e\x99\xac\x36\x48\xb8\x5c\xf6\x26\x74\x61\xa6\x65\xd3\x7c\x6e\x66\x2c\x8a\xe3\x1a\x68\xe4\xe1\x4a\x36\x73\xd5\xe4\x7c\x96\x96\x13\x75\xb9\xf8\xb5\x6c\xeb\xa3\xb6\xed\xd1\xf9\x5c\x5c\x16\xc6\xe6\x33\x6d\xeb\x27\x9e\xc9\xbb\x9a\x47\xf9\x44\xdb\x26\x3c\xc9\xb6\x72\xd4\xa1\x7f\x48\x0e\xf2\xa2\x6d\xed\xd0\xab\xa6\x4d\x74\x4c\x05\x71\xac\x5a\x9a\x25\x33\x70\x32\xaf\x2c\xb8\x1e\x5b\xa1\xe3\xcb\x4e\xcd\x20\xdf\xaa\x75\xa1\xd9\xce\x80\xb6\xb2\xc4\x45\x9e\xb0\x8b\xe9\x5e\xac\xce\x98\x89\xf8\xa2\x0e\xa7\xd2\x7b\x6e\xa4\x34\xd7\x63\xbb\x93\x4f\x34\x05\x76\x36\x98\x08\xa3\xc9\xf3\x4f\x6f\x10\x12\x81\x38\x42\x91\xa0\xb4\xb1\xab\x12\xc1\x42\x5c\x26\x01\x85\xcb\x90\x05\x0a\x07\x65\x5e\x92\x59\xc0\x91\x40\xe5\x55\x1a\x92\x92\xc2\xf0\x48\x86\xa4\xc2\x71\xaa\x84\x23\x99\x96\x3d\xcb\x48\x3e\x66\x5b\xaf\xa6\xf3\x01\xc3\x93\xa1\x07\xcf\xf7\x77\xfd\x8f\x95\x7e\xd4\xb6\x5e\x78\x5a\x92\x77\x45\x58\xbd\x87\xd8\xd6\x64\x6e\x6e\x00\xa7\x90\x2e\xa4\xa8\xd6\x6a\xe9\xe0\x4a\x32\xd1\x12\x55\xc6\x91\x68\x83\x92\xd6\x45\x2b\xad\x25\xa6\x31\xa3\xd5\x2b\x8e\x57\xb2\x43\x53\x7a\x49\x25\xc6\x2b\x67\xb8\x62\x8a\x0a\xdd\xcb\x51\x22\x95\x34\x64\x5b\xa5\x18\x51\x18\xc4\xd3\xf5\x66\xc5\x9e\x70\x6a\x37\xf9\xb5\x6c\xeb\xa3\xb6\xed\xd1\xf9\x5c\xc0\x47\x8c\xf0\x44\xdb\xfa\x99\x79\xe1\x8f\xb0\xad\x51\x6d\x9b\xf0\x24\xdb\xca\xfa\x06\xa0\x78\xb7\x2c\xfe\xbb\x96\xa6\x8a\x54\x5f\xe9\x2b\x94\x92\xe5\x82\x92\xa9\x2e\x8d\x5a\x26\x66\xb5\x63\x3d\x94\xe6\x86\xf9\x95\x29\xcc\xd4\x69\xab\xdd\xc8\xd9\x9d\x02\x42\xd9\x61\x87\x9f\xda\x52\x97\x43\xc3\x0c\x6a\xd7\x51\xbc\x2c\xd0\x9d\x42\x26\x56\x1e\x08\xd9\x6a\x6d\x64\x24\xd9\xdc\x5b\x86\x10\x9e\x1e\xb7\x22\xc0\x4a\x10\xd1\x34\xa1\x40\x1c\xe0\x34\x54\x48\x42\xc5\x25\x1e\x42\x45\x55\x70\x95\x21\x48\xa4\x92\x2c\x92\x28\x8e\x91\x00\x2e\xe1\x14\x25\x03\x9e\x20\x39\x92\xc1\x11\x45\xd3\x0a\xeb\xd9\x56\xea\x31\xdb\x7a\x35\x35\x0d\x58\x00\xc2\x4d\xab\x77\xd3\xff\x78\xfc\x47\x2d\x6b\xd0\x95\x9f\x58\xd6\x08\xb5\x8c\x61\x96\xb5\xdb\x55\xd7\x65\x6b\x5c\x19\xa8\x23\x9d\x31\xac\x4a\xcc\x4c\xa8\xf3\x49\x6e\x5c\x9b\x13\x63\x75\x65\x50\xb8\x49\xcf\xda\x35\x76\x0d\xba\xe3\x0a\x37\x35\xd3\x31\xa6\x33\xd3\x28\x30\xcc\x27\xda\xa5\x65\xcb\xac\xb0\x80\x4d\xb4\xc4\x0a\x62\xf1\x2c\xee\xa0\xf6\xa2\x81\x86\xa3\xdc\xd7\xb2\xac\x8f\x5a\xb6\x47\x67\x73\x9e\x8a\x95\xba\x4f\xb4\xac\x9f\x59\x93\xf8\x11\x96\x35\xaa\x65\x13\x9e\x64\x59\xe9\xe5\x55\x44\x97\x2c\x2b\x91\xce\x70\xc3\x19\x3d\xa4\x3a\x74\x76\x3e\x85\xef\x6f\x33\x61\xc2\x4e\xd7\xbd\x91\xe4\x4c\x27\x5c\x0f\x95\xf2\x65\x68\x8c\x10\xe7\xb4\x73\xc3\xb7\x3c\x8c\x2f\xe6\x06\x63\x96\x4b\xdd\xb5\x66\x0c\xe5\x14\x4c\xc6\x78\xb4\x34\x6a\x49\x72\xce\x27\x4a\x85\x8e\x61\xe1\xa4\x49\x27\x9e\x9e\x6b\x95\x55\x85\xe3\x00\x83\x14\x9a\x63\x65\x1a\x29\x1c\xa7\x20\x88\x23\x09\xc7\x39\xc0\xc9\x50\x25\x39\x19\x67\x69\x1c\xb1\x2c\x85\x23\x9c\x56\x10\x29\x13\x2c\x60\x19\xc4\x43\x84\x38\x84\x3c\xcb\x4a\x3f\x66\x59\xaf\x3d\xe4\x93\x05\x2c\xcf\x72\x17\x4c\x2b\xcf\xf2\x2f\xc7\x2f\xfa\x78\xd4\xb6\xa6\x02\x83\x7a\x62\x5b\x23\x54\xe5\x85\xd9\xd6\xd2\xb8\x3a\x50\xaa\x68\x5d\x94\xdb\xa9\x76\x85\x53\x5a\xb5\x51\xd2\x49\xd5\x29\x27\x93\x4d\xac\xf3\x0a\xac\x8c\x6a\x4c\xec\xad\xc0\xc6\x9c\xe2\x7b\xbe\x6d\x36\x87\x52\x52\xe8\xd8\x72\xad\x9b\x8d\x0d\x99\x94\xf2\xd6\x4a\x35\x61\xf2\x5d\xe9\xda\xed\x84\x60\x67\x48\xbc\x3d\x1f\x66\x0b\x5f\xcb\xb6\x3e\x6a\xdb\x1e\x9d\xcf\x39\xbe\xa3\x2d\x9f\x99\x11\xf8\xc4\xea\xba\x8f\xb0\xad\x51\x6d\x9b\xf0\x24\xdb\x1a\x35\xf3\xbe\xb5\xad\xef\xbd\x51\xb6\xd3\xcc\xd8\xc3\x4c\x6a\xd0\x1a\xbe\x65\x5a\xcd\x9e\xc0\xd6\x9a\xf3\x42\x6d\x2d\xd8\xfa\xa4\x3d\xa4\x45\xb4\x58\xac\x63\x6f\x56\xa3\xb8\x36\x66\xa9\x2c\xb1\x5c\xa6\xd8\x8c\xd8\x4e\x54\xcc\xd8\xa0\xa3\x82\x5a\x2b\xad\x71\x64\xbb\x90\xd2\xb4\x42\x9c\xe7\x53\x96\x79\xf3\x13\x83\x8e\x5f\x1f\xe4\xbd\x5e\xd7\x76\xa0\x63\xfb\x3f\xf7\xa7\x23\xb4\xde\xbd\x86\x27\x51\x2e\xd5\x1b\x35\x21\x5b\xba\xf6\xc6\x20\xa1\xd0\x10\x6b\xdb\xb7\xf6\x94\x4b\x85\xae\x1f\xe2\x6f\x18\x86\x61\x42\x32\xe9\x83\x76\x82\x10\xab\xd4\xb2\x45\xa1\xd6\xc5\xf2\x62\x17\xfb\xae\x2b\x27\xd4\x6a\xa6\x35\xed\x8f\x75\x6d\xf7\x6e\xca\xc0\xf7\x27\x51\x1d\x80\x7a\x8e\xf2\x73\x88\xaf\x52\xbf\xf3\x64\x2e\xdb\x76\xe0\x6b\xff\xf0\x7e\xe4\xfe\xe1\xad\xc8\x7d\xff\xeb\x8f\xfb\x4f\xe1\xee\x18\xed\x39\xe6\x22\x11\x86\x35\x4b\xd9\x6a\x53\xc4\xbe\x1f\x9a\xbf\x62\x87\xf6\xbb\xcf\x5e\x87\x3b\x45\xf3\x9c\x61\xbd\x9b\xf1\xbb\x06\x75\xff\xc6\xd4\xe3\x17\x79\x5d\xbe\xfd\x64\xce\xce\x23\xb9\xc4\xe9\x05\xb2\x6e\xe6\x3c\xf4\x25\x66\x57\x5f\x13\xf6\x5c\xee\xc3\xd0\x5c\xe2\xff\x22\x69\x57\x25\xe0\xa9\xb4\xb4\x76\xb5\x7d\xc7\x48\xb6\x94\x14\x3b\x57\x78\x48\xd4\x44\xa1\x21\x7a\x4d\x8f\xa1\x60\xe5\x52\x70\x32\x34\xeb\xd9\x52\x1a\x93\x1c\x0b\x21\xff\xec\x0a\xa7\xc6\x9b\x63\x8f\xd3\xe3\xc1\xb9\x8d\xa2\x90\x79\x2d\xad\xfb\xdb\x17\xfa\x46\x26\xe7\x00\xc2\x4f\xc9\xd1\x6a\xe0\x98\x1e\xaf\xf1\x2b\xb6\xfd\xd0\xb7\xd1\x6c\x8e\x26\xf2\xa9\xc0\xa4\x75\x5f\x45\xe8\x3e\x02\xa7\xa6\xed\x68\x16\xb2\xcf\xd2\xe9\x83\x76\x1b\xad\xbe\x0e\x3f\xb0\x76\x46\xac\x89\x98\x1f\x46\xb6\x8e\x95\xca\x0d\xd7\x87\xff\x71\x42\xfa\x00\xda\x83\x27\xd0\xbc\x01\x73\x1b\xb1\xfe\xb9\xb2\xe9\x75\x4e\x9e\xfa\x64\x82\xac\x67\x91\x76\x00\x76\x1b\x81\x5e\xfb\x13\x32\xb7\x82\x3d\x7f\xf7\xb2\x8c\x0d\xa4\x68\x0f\xcc\xa4\x3d\x84\xdb\xe8\xf7\xda\xee\xf5\xf5\x15\x83\xd3\xa9\xa1\xcb\x9e\x7d\x36\x2d\x25\xc4\x6f\xf6\xd1\x66\xb2\xba\xf7\x23\x50\xba\x75\xdb\x1e\xc1\x01\x70\x7e\xb2\x91\xaa\x22\x39\x38\xf7\x4f\xdd\x88\xae\xbc\x62\xdf\xdc\xce\xdf\xc2\x88\xd5\x95\x27\x91\xa9\x2b\x37\x13\xb8\xb3\x05\x1b\xf2\x22\x10\x6d\x4e\xfb\xd3\x67\xd1\xbd\x85\xe5\x27\x3d\x24\x76\x88\xc4\xc9\x79\x06\x9c\xd5\xf3\x18\xd8\xc2\x0a\xd1\xe9\x88\x2c\xf8\x21\x9c\x63\xc2\x9c\x6e\xb4\x72\x60\x46\xe2\x61\x4b\xfc\x01\x46\x54\xe1\x5f\x16\xb4\xbd\x9d\xed\xae\xef\x7c\x5c\xd6\xc7\xe0\xfc\x24\x7b\xbf\x07\xad\xdf\x59\x8a\xfc\x72\x7d\x16\x59\x27\x30\x6f\x34\xcf\x67\x08\x74\xbc\x21\x71\x1e\x19\xd6\x03\x8c\xe8\x2a\x79\x4d\xfd\x1c\x4b\xd9\x20\x91\xa0\x7d\x67\xbc\x70\x96\xe0\x53\x60\x01\xca\x15\x14\xa0\xd3\xdf\xf6\x2a\x81\xa6\xaa\x3e\xe0\xb4\x4e\x41\xdd\x44\x9c\xdb\xf2\x12\x69\x2e\xed\xc8\x7a\x9a\xf8\x02\xf0\xae\x11\x19\x68\x7e\x0b\xa5\xcf\x91\xe3\x11\xb4\x5b\xa9\xbc\x2a\xcd\xe7\xd0\x76\x13\x4d\x97\x69\xd9\x51\x6c\x98\xe6\x68\x3e\x7d\x8c\xa2\x63\x58\x37\x8f\xa8\xb7\x00\x09\xa1\x6f\x0a\x75\xab\xef\xe8\x63\xf4\x14\x0a\x83\xd0\x6e\x9b\xb7\x5b\x02\x5f\xb1\x20\xc9\xaf\xd8\xd6\xc4\xcb\x86\x69\x23\xa5\x0f\x9d\x10\x26\x9e\x60\xb7\xb7\x70\xae\x51\x7c\x67\x74\xb4\x81\xfa\x34\xe9\xde\x21\xd8\xab\x72\xd3\x27\x0a\x5a\xf5\x03\x21\x87\xdd\x37\x27\x7d\xa8\x28\x16\xb2\xed\x47\x05\x7a\x15\xc1\xd1\xc2\x79\x7b\x3b\xb0\x54\xf5\x1a\xde\x41\xfb\xe3\x7a\x70\x09\xf6\x75\x8a\xcf\xcc\xb2\x63\x80\xdb\x28\x7c\x03\xcf\x59\x4f\xa3\x27\x45\x2e\x42\xbd\x1a\xf6\x6f\x1a\x5d\x21\x74\x1b\x43\x6d\x40\xee\x95\xe8\x49\xd4\x9e\x03\x7d\x35\x7c\xbb\x55\x93\x7d\xc0\x9f\xad\x0c\x47\xa0\xa3\xc4\x9b\xe1\xe0\xc6\x53\xd3\xda\x18\xbe\x05\xb2\x6c\xdd\x9c\x3c\x5f\xd0\x41\x0c\xd7\xc9\x0f\x74\xb8\x9d\x99\xad\xe9\xb9\x3d\xc9\x11\x41\xfe\x3e\x1c\x57\x39\xf1\xb5\xbd\x9d\x89\xa9\x85\x16\xba\x39\xb7\x3f\x85\x9b\x73\xc8\xae\xb2\x75\xae\xd3\xed\xfc\xed\x92\x28\x1f\xc6\xd3\x0e\xc1\x55\x3e\x42\xd3\x8f\xc7\xa0\xf7\xfe\xf6\x43\xa6\x76\x10\xfa\xd9\x05\xf0\xbd\x13\xfc\x18\xe8\xf1\x12\xea\x49\x33\xfc\x12\x8a\x5b\x78\xb8\xb2\xae\xbb\x88\xec\x79\xee\xeb\x14\xf0\x4d\xb4\x5f\x77\x62\xfe\xc5\xf6\x47\xa8\xcd\x29\xfc\xc8\x4b\x7d\x37\x88\xdb\x3b\xf2\x5d\x86\xb1\x2f\x99\xe6\x28\xb2\x94\x2f\xc0\xbc\x1a\x22\x7c\xff\xae\x20\x07\xea\x86\x8d\xfd\xfc\xf7\xbf\xb1\x17\xdb\x34\x14\xdf\xf6\xe6\xcb\xaf\x5f\x0e\x5a\x39\x3f\x7e\xbc\x62\xe1\x0d\x65\x53\xb9\xad\xa1\xb7\x39\x12\xde\x54\x32\xe7\xda\xc0\xb9\x09\xfd\x51\xd3\xcb\x04\x1c\x35\x0d\x90\xb0\x4b\x8a\xbb\x4a\x86\xfd\x89\x91\xe4\xcd\x95\x01\xba\xd2\x57\x7d\xfb\x76\xa9\xfc\xe7\xd4\x07\x6c\xd1\x62\xa9\x72\x4d\xcc\xa6\x4b\xfb\x3d\x39\xac\x26\xa6\xc4\x9a\x58\x4a\x88\xf5\xc0\x36\x95\x7b\xb7\x5c\xc2\x9a\x95\xe4\x46\x65\x6a\x62\xbd\x51\xcb\x26\x1a\x9b\x9f\x92\x62\x41\x6c\x88\x58\x42\xa8\x27\x84\xa4\x78\x61\x63\x73\xb3\xee\x38\xfe\xda\x0f\xa4\x62\x9e\x27\x8c\x63\x3c\x57\x76\x2d\xc3\x28\x39\x96\x4f\x30\x6d\x74\x56\x58\xdb\x40\xff\xca\x16\x6f\xa8\x24\xb6\x4b\xd9\xbf\x5d\x0e\x7e\x3a\xce\x49\x61\x97\x25\xb8\xac\x30\xf7\x49\xe0\x34\xa9\xf4\x37\x8a\x21\x84\x98\x63\x59\x9c\x49\x83\x3d\x57\x29\x82\x29\x8e\xaf\x20\x90\x70\xd5\x38\xc9\x21\xdd\xa2\x1d\x18\x54\x14\xa4\x60\x63\x38\x99\x43\xc3\x58\x1f\x51\x9a\x4d\x61\x62\x27\x5b\x6f\xd4\x3d\x9a\x3d\xce\x7e\x1f\xa1\x75\x7f\x01\x8d\x39\xea\x6f\x20\x22\x2c\x59\x2b\x57\xfc\x84\x1f\x7a\x05\x5a\x7a\x25\x12\xbf\xb9\xed\x83\x08\xce\xc3\xfe\x63\xe7\x22\xbd\xe6\x41\xcc\xdf\x5d\xb9\x5d\xbc\x36\x42\x92\x07\xd0\x82\xb2\x83\x2c\x6c\x01\xad\xb5\x3e\xd1\xbe\x13\x34\xfd\x63\xbf\x53\xfa\x7a\x1d\x8a\x8b\xf4\x1a\x9c\xdf\x02\xb5\xbe\x41\x6a\x0f\x07\x28\x56\xd3\xbe\x3e\xd1\x90\xed\xf4\x0d\xb8\xf9\xe3\x46\xda\x2f\xaf\xd8\x0b\xfe\xf2\xe3\x8f\x53\x5d\x09\x00\x3a\xa7\x2c\xe7\x24\x7d\x5c\xf1\x31\x42\xeb\xcd\x88\x1f\xc9\x73\x9f\x30\xb1\x75\x6d\xb2\x89\xf5\x6f\x10\xe8\xae\xd3\x19\x69\x30\xd4\x8f\x1b\x64\x89\x79\xc8\xa2\xf7\x5f\x22\x5d\x1b\x38\x98\x3e\x71\x90\x86\xac\xc0\x30\xfe\xfc\x89\x2d\x11\xb6\xd4\x0d\x03\x9b\xcd\x91\xb5\xc6\xa4\xf5\x0e\xa1\x6d\x62\xce\x00\x3a\x98\x6e\x63\xcb\xc1\xfe\x57\xdd\xc6\x9c\x01\xc2\x54\xdd\xb2\x1d\x4c\x77\xd0\x18\xd3\x27\xee\x2f\xb2\x39\x9e\x9a\xb6\xee\xa0\x8d\x70\x6f\x20\xeb\x48\xda\x1e\xf0\x7d\xd9\xc6\x8f\xdf\x02\xe3\x7a\x36\xea\xdc\x8c\xe9\xce\x94\x8d\xe7\x2b\xa4\x6c\x24\xb3\x11\xd3\x77\x86\xff\xb1\x2d\x7e\xdc\xb4\xf1\x55\x55\x84\xb4\x3b\x8f\xcc\x17\x9b\x6f\xc0\xd8\xe6\xdc\x92\xa3\x41\xda\x05\xa1\x2e\xc5\x5e\xde\x2f\xb4\xff\xd6\xdc\x56\xbc\xe2\x88\x7a\xb5\x80\x29\xd0\x81\x1b\x37\x86\x29\xf3\xf1\xd4\x95\xb3\x81\x1c\xe4\xda\xc9\xff\x1b\x00\x00\xff\xff\xe6\xc2\x51\x59\x27\x2a\x01\x00") + +func pathed_paymentHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _pathed_paymentHorizonSql, + "pathed_payment-horizon.sql", + ) +} + +func pathed_paymentHorizonSql() (*asset, error) { + bytes, err := pathed_paymentHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pathed_payment-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7, 0x51, 0xab, 0x17, 0x3b, 0x30, 0x1c, 0xe, 0x44, 0x69, 0x42, 0xdf, 0x58, 0xbb, 0x36, 0xd1, 0xc2, 0xe5, 0x90, 0x33, 0x7b, 0x5b, 0xb6, 0xb9, 0x5c, 0x78, 0x36, 0x2f, 0x37, 0x4a, 0xd7, 0xd9}} + return a, nil +} + +var _paths_strict_sendCoreSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x69\x93\xaa\xc8\xd6\x30\xfa\xbd\x7f\x85\xd1\x5f\x76\x9f\x70\xf7\x91\x39\xa1\xfb\xf6\x13\x01\x22\xa0\xe2\x80\x0a\x0e\x37\x6e\xec\x48\x92\x04\x51\x01\x65\x10\xf5\xc6\xf3\xdf\xdf\x70\xa8\xd2\xb2\xac\x49\xad\xda\x7d\xde\xd3\x74\x74\x6d\x95\xc5\xca\x95\x2b\xd7\x94\x2b\x57\x26\xbf\xff\xfe\xcb\xef\xbf\x17\xda\x51\x92\x7a\x31\xee\x1a\x7a\xc1\x81\x29\xb4\x61\x82\x0b\x4e\x16\xcc\x7f\xf9\xfd\xf7\x5f\xb6\xf7\xe5\x2c\x98\x63\xa7\xe0\xc6\x51\x70\x04\x58\xe2\x38\xf1\xa3\xb0\x20\xfc\x9b\xfb\x37\x79\x02\x65\xaf\x0b\x73\xef\xc7\xf6\xf1\x33\x90\x5f\xba\x95\x5e\x21\x49\x61\x8a\x03\x1c\xa6\x3f\x52\x3f\xc0\x51\x96\x16\xfe\x2a\x10\x7f\xee\x6e\xcd\x22\x34\x7d\xfe\xab\xef\xcc\xf0\x0f\x3f\xfc\x91\xc6\x30\x4c\x20\x4a\xfd\x28\xfc\x91\xe0\x64\x8b\xf7\x39\x30\x9a\xf9\x5b\xd4\x38\x44\x91\xe3\x87\x5e\xe1\xaf\xc2\x37\xb3\xa7\xf0\xdf\xfe\x7c\x68\x3b\x74\x60\xec\xfc\x40\x51\xe8\x46\x71\xe0\x87\xde\x8f\x24\x8d\xfd\xd0\x4b\x0a\x7f\x15\xa2\xf0\x80\x63\x8c\xd1\xf4\x87\x9b\x85\xfb\xb6\xec\xc8\xf1\xf1\xf6\xbe\x0b\x67\x09\x7e\xd2\x4c\xe0\x87\x3f\x02\x9c\x24\xd0\xdb\x01\xe4\x30\x0e\xfd\xd0\xdb\x83\xc4\x51\xfe\x23\xc1\x28\x8b\xfd\x74\xbd\x45\xee\xba\x7f\x1e\x18\x80\x61\x8c\xc6\x3f\xe6\x30\x1d\x17\xfe\x2a\xcc\x33\x7b\xe6\xa3\xef\x5b\x8e\x21\x98\xc2\x59\xe4\xfd\xf9\xcb\x2f\x72\xa7\xd5\x2e\x54\x9b\x72\x65\x50\xa8\x2a\x85\xca\xa0\xda\xed\x75\x0f\x90\xff\xce\xe6\x5e\x0c\x1d\x3c\xf6\x93\xd4\x5e\x27\x78\xf1\xe7\xab\xd0\x09\x9a\x2f\xb2\x28\xce\x82\xe4\x7d\xc0\x38\x5c\xbe\x07\x72\x86\x1d\x0f\xc7\xef\x81\xdc\xd2\xe9\x62\xfc\x4e\xc8\x77\x80\xd9\x38\x49\x23\xd7\xc5\xb1\x1f\x3a\x78\xf5\x3a\x2c\x44\x28\xca\xc2\xd4\x86\x33\x18\x22\x9c\xfc\xf9\x8b\xa8\xf7\x2a\x9d\x42\x4f\x94\xf4\xca\x09\x74\xab\xa9\x0f\x2f\xb0\x37\x8a\xd7\x85\x1d\xf6\x72\xab\xd9\xed\x75\xc4\x6a\xb3\x77\xf2\xd0\x53\xc0\x1f\xf3\x29\x5e\xbf\x07\x7f\xba\x7a\x1b\xf5\x23\xcc\x07\xb0\xba\xf8\x1d\x34\x9f\x82\xbd\x1f\x77\x9c\x25\xe9\xcc\x0f\x71\xf2\x1a\xe6\x47\xa0\x77\xe3\xdd\x52\x81\x77\xd6\xe0\x15\xbc\x47\xa0\xf7\xe3\x7d\x14\xf9\xd7\xf0\x3e\x02\xbd\x1b\xef\x1e\xde\x0f\xdd\xe8\x15\xbc\x47\xa0\x77\xe3\x9d\x67\x76\x92\xd9\xaf\xe0\xdc\x03\x7c\x04\xdf\xcc\x4f\xc6\x8b\x0c\x67\xaf\x71\xf6\x14\xec\xfd\xb8\x31\x8e\x5f\x63\xeb\xee\xfe\xbb\xb1\xed\xd4\xf8\x35\x74\x7b\x80\x77\xe3\xdb\x5b\xa5\x31\x86\xce\xeb\x68\x9f\xc0\x7d\x32\xf6\x83\xa5\xc4\x8b\x1f\xef\x6c\xc6\x86\xe1\x2b\xc8\x6d\x18\xbe\x9b\xe0\x83\xf5\x7b\x8d\xd6\x07\x90\x8f\xe2\xdc\xc6\x00\x6f\xa3\xdd\x42\x1d\x30\xef\x60\xcf\x11\x5f\x34\xb9\xaf\xc3\x3e\x9a\xc6\xb7\xc0\x8e\x86\xee\x0d\xc8\x47\xc3\xf5\x3a\xdc\xd1\x10\xbd\x01\xf7\x68\x58\xde\x84\x7b\x17\x7d\x47\x83\xf2\x3a\xdc\xde\x48\xbc\x09\xf3\xa8\xf2\x6f\x40\x6e\xf5\xf8\x75\x90\xbd\x6e\xbe\x0e\xf3\x44\x15\x5e\x07\xb5\x61\xf8\x3a\xc0\x83\xa8\xbe\x0b\x6a\x2b\x79\x07\xc0\xca\xa0\x57\x69\x76\xab\xad\xe6\x29\xf0\x6c\xee\x25\x8b\xd9\x01\xa2\x5b\xd6\x2a\x0d\xf1\x19\xae\x3f\x7f\xd9\xc7\xc6\x4d\x18\xe0\x3f\x1e\x7e\x2b\xf4\xd6\x73\xfc\xc7\xe1\x91\x3f\x0b\x5d\x34\xc6\x01\xfc\xa3\xf0\xfb\x9f\x85\x56\x1e\xe2\xf8\x8f\xc2\xef\xbb\x90\xb9\xdc\xa9\x88\xbd\xca\x03\xe6\x07\x7c\xbf\x3c\xc1\xf8\xf4\xe6\x01\x71\xb9\xd5\x68\x54\x9a\xbd\x57\x30\xef\x01\x0a\xad\xe6\x53\x04\x85\x6a\xb7\xf0\xed\x21\xbe\x7d\xf8\x2d\xd9\x21\xf9\x76\xde\xf2\x43\xf7\x0f\x6d\x3e\x72\xe8\xcd\xfe\x3c\xe1\x65\xb3\xd5\x3b\xe3\x67\xa1\x5f\xed\x69\x8f\x64\x9d\x06\xb4\x4f\x9a\x3f\x62\x39\x23\xe4\x23\x9d\x7f\x86\x64\xc7\x80\xb6\x5e\x9a\x7b\xdb\x59\xcc\x3c\x8e\x10\x76\xb2\x18\xce\x0a\x33\x18\x7a\x19\xf4\xf0\x8e\x0d\xef\x0c\xc0\xb7\x60\x0e\x76\x61\x36\x4b\x7f\xa4\xd0\x9e\xe1\x64\x0e\x11\xde\xce\x26\xbe\x9d\xdd\xcd\xfd\x74\xfc\x23\xf2\x9d\x93\x09\xc2\x93\xce\x9e\x0a\xe4\xa1\x9b\x3b\xd1\x3d\x76\xf2\x41\x00\x2e\x31\x7c\x2f\xe5\xa7\x46\xf7\xb7\x5f\x0a\x85\xc2\xc3\x2f\xbe\x53\x40\x63\x18\x43\x94\xe2\xb8\xb0\x84\xf1\xda\x0f\xbd\xdf\x58\xee\x5f\xbb\xb1\x69\x9a\xba\xfe\x7d\x07\xbd\x7d\x30\x84\x01\xbe\x00\xcc\xf3\x97\x80\x97\x70\x96\x5d\x82\x26\x49\xea\x1c\x7c\x06\x93\x34\x88\x1c\xdf\xf5\xb1\x53\xf0\xc3\x14\x7b\x38\x7e\x04\xf9\xe5\x5f\xe7\x63\xff\xa8\xc5\x37\xf2\x22\xb9\x8a\x11\x87\x89\x40\xc1\xf6\x3d\x3f\x4c\xcf\x6f\x66\xdb\xa7\x66\x3e\xb4\xfd\x99\x9f\x6e\xa7\x7c\x7b\xb0\xfd\xdd\x04\xcf\x66\xaf\xde\x5e\x84\x59\x70\x19\x71\x98\x05\x49\x66\xe3\x30\x8d\xb7\x4f\x9d\xf3\x68\x0f\xe3\x87\xee\x0c\x6e\xa7\x9b\x0e\x4e\xd2\xcb\x7d\xd9\x03\x8e\xa3\x00\x3b\x51\x00\xfd\xf0\x02\x14\xc3\x9c\xf7\x38\x1d\xc7\x38\x19\x47\x33\x27\x29\xa4\x78\x75\x4e\x99\x3b\x83\xde\x4b\x14\x25\xbe\x17\x6e\x03\x9d\xed\x63\xef\x18\xea\x3d\xc8\x49\x34\xf0\x18\x5a\x1c\x98\xfe\x63\x37\xad\x2e\x94\xb5\x4a\xb9\x5e\xf8\xed\xb7\x87\xa1\xf8\x9f\xbf\x0a\xc4\xbf\xfe\xf5\xca\xd3\xe7\xa3\x72\x8e\xe7\xd9\xa8\xbd\x85\xf1\xc9\x70\x9c\x61\x7b\x3a\x54\x6f\x61\x7a\x2e\x13\x67\xe8\x2e\x08\xcd\x1e\xe7\x73\xc5\xd8\xfa\xbf\x6b\x75\x62\x1b\x32\xee\xd5\x21\x8c\x1c\x7c\xaa\x0b\x4f\x74\xe0\x79\xa3\x4f\xfd\xf3\xb5\xcd\x3f\x0d\x8c\xf7\x84\x1c\x7e\x83\xc9\xf8\x84\x18\xee\x99\x78\xce\x63\xbc\x7c\x13\xc8\xce\xd0\x14\xa7\x33\x3f\x49\xdf\x04\x7d\x8c\xb6\x1f\xe4\x73\xff\x33\x9a\x45\x09\x4e\xfd\xe0\x05\xcd\xdf\x19\xd6\x0b\xea\x71\x32\xe6\x4f\x83\xfa\x47\x7c\x67\xe3\x7d\x6c\xe7\x05\xd1\x79\x69\x6e\xf0\x14\xcd\xb1\x17\x2f\x49\xcb\x21\xf8\xba\x76\xc4\x0e\x13\xaf\xdf\x1e\x2d\x1b\x8e\xdf\x69\x41\xf7\x99\x17\xe7\x32\x1f\x0f\xe2\x0e\x93\x04\xa7\x97\xf8\xb9\xd7\xd5\x17\x6f\xc3\x60\xab\x56\x97\x51\xcf\x63\x1f\xe1\xf0\x05\xab\xb3\xbb\xf9\x92\x49\xda\xdd\x2c\x38\x51\x66\xcf\xf0\x56\xde\x90\xbf\xcb\x48\xbe\xdf\x10\x7e\xcc\xec\x1d\xa6\xac\xfb\xbe\x9c\x8d\xeb\xa1\x83\x2f\xc8\xc6\xe1\xc9\x03\x87\xcf\x1e\x7d\xe0\xfb\x4b\x02\xb1\x0f\xd8\xaf\x95\x87\xfd\xb4\x7e\x2f\x0e\xfe\xfc\x92\xe3\x67\x9f\x69\x6e\x14\xa7\x8f\xdc\x90\x2b\x8a\x68\xea\xbd\x02\x71\xee\xf9\xf0\x2a\x85\x69\x8a\x83\x79\x5a\xd8\xaa\x45\x92\xc2\x60\x5e\xd8\x86\x4c\x51\xb6\xff\xa5\xb0\x89\x42\xfc\xdc\x5f\xba\xd0\x9f\x65\xf1\x89\xb7\x7c\xa9\x85\x74\x3d\xc7\x6f\x0f\xca\x3e\x2d\x71\x82\xf7\xb9\xd9\x7f\x6c\xf1\x85\xd1\x39\x64\x36\xa2\xf8\x7c\x50\x7f\xdb\x71\xe2\x7f\x0a\xc4\xbf\x0a\x62\x53\x2e\xec\xbf\xfe\x3f\x7f\x15\x38\x96\xa5\xd9\x7f\x5d\x1c\xab\xd3\x69\xd8\xd5\x43\x76\x9a\xe5\x39\xb5\xb9\x2f\xb9\xf3\x5d\xa2\x6d\xab\x75\x17\x09\xda\xce\x1d\x6f\x20\x25\xc9\xec\x03\x11\x31\x4e\x9e\x38\x20\xfa\x62\xc4\x18\x63\xf8\xa8\x4b\xcf\xe9\x39\x99\xf3\x5e\x4b\xd3\x49\xb2\xee\x1d\x9e\x71\x4f\xd8\x22\xc1\xaf\x79\x98\xe7\x74\x9e\xcc\xe1\xaf\xa5\xf3\x88\xe2\xfd\x74\x3e\x73\x72\x67\xf7\x71\xb8\xc4\xb3\x68\x8e\xdf\x70\x69\xc7\xa6\x6f\x70\x44\x27\xe9\x8e\x1b\x58\xf0\x90\xaf\xfd\xed\x3d\xe3\x70\x94\xa2\xb7\x18\xb1\x78\xc1\xd1\x3c\x65\xc2\x43\x1e\xf8\x09\xc6\x73\x46\x3c\x69\xed\x45\x66\x1c\x73\x44\x57\x33\xe3\x98\x14\xff\xed\xa8\xb7\x4f\x27\x6f\x17\x74\xea\x35\xed\x3e\xc9\x70\x5d\x4b\xd5\xc9\x12\xc0\x35\xd3\xae\x9d\xc7\x7f\xc5\x52\xfb\x49\x92\xe1\xf8\xfd\xa8\x50\xe4\x5c\x9c\x9d\x3e\x63\x4b\x3a\xf3\x03\xff\x85\x88\xe2\xf3\xe6\x82\xf7\x0b\x26\x4e\x56\x55\xae\x9a\x45\x9d\x3e\x7f\xaf\x79\xd4\x09\xce\xeb\xe7\x3f\xaf\x61\xdd\x0f\xda\x19\xa6\xc3\x48\xfe\xcf\x65\xc5\x7b\x92\xee\xbd\x5a\xc8\x4f\xd7\xd0\xf6\x62\x9e\xae\x9e\x98\xe2\x77\xcc\x37\xce\x05\x70\xb5\x5b\xa5\x7c\xf1\x2e\x1a\xc3\xd0\xc3\x17\xe7\xe6\xa7\xcc\x39\x5d\xb6\xbb\xde\x56\x1f\x73\xe7\xd7\xb3\xe8\x8b\xf9\x63\x47\xce\xfa\x12\x73\xd2\x55\x8c\x93\x6c\x76\xd1\xba\xa7\xab\x00\xbf\x39\x9f\x3b\x2e\xb1\x5e\xcf\xcf\xb3\x75\x8b\x6b\x99\x7a\xb6\xe2\xfc\xdb\xbb\x18\x77\x78\xe8\x35\xee\x1d\x40\x2e\x31\xe2\x7d\x62\x77\xb6\xc2\x7d\x0d\xa3\xe4\xed\xcc\xda\x8d\xe2\x37\x92\xa1\x05\x59\xec\x89\x6f\xf0\xec\x75\x94\xc9\x87\xf1\x55\x9b\xdd\x4a\xa7\x57\xa8\x36\x7b\xad\x63\x52\xd1\x12\x75\xb3\xd2\x2d\xfc\xf6\x4d\x95\x3a\xed\xa1\x56\xd5\xa9\x72\x95\x56\x9a\x06\x23\x0d\x74\xa5\xd1\x94\x75\xa5\x66\x36\xdb\x26\xa5\x0d\xe9\x51\x43\xe9\x6a\xad\xa6\x59\xae\xb4\xc4\x6e\x1f\x18\x65\xd0\x1a\x50\xda\xb7\xef\x05\xe1\x70\x31\xfb\x7f\x58\x82\xf8\xbe\xe7\xef\xe1\x2f\xfb\xbd\xf0\xf8\xcb\xb7\x6f\xdf\x0b\xdf\x44\x43\x14\x45\xf1\xaf\xbf\xbe\x9d\xdc\xa0\xfe\xf5\xe7\x5b\x14\x8a\x6c\x5f\x6a\x0f\x45\x76\xc8\xf4\xc5\x8a\x36\xe8\x77\x28\xb3\xde\xa2\xcc\x16\x23\x99\xaa\x66\x1a\x80\xa9\x98\xed\x7a\xab\x49\x19\x9a\xc5\xf4\x3b\x5a\xab\xda\x69\xd6\xeb\x1a\x75\xa4\xf0\x9c\x30\x9e\xe5\x05\x81\x66\x58\x81\xfe\x5e\x20\xdf\xa4\x90\x7e\x9b\xc2\xb2\xd1\x1e\xaa\x1a\x53\x67\xc1\x40\x92\x2b\xcd\x7a\x7d\xc0\xb2\x75\xb9\xd7\x6f\xf5\xea\x6c\x5f\xee\x77\x8c\x96\x46\xe9\x5a\x45\x1e\xd0\x95\xba\x55\x35\x3a\x7a\xa3\xd2\x55\x25\xf5\x91\x42\xfe\x45\x0a\x99\xf7\xf0\x90\x79\x07\x85\x14\x6d\x28\x94\x66\x56\x58\x4a\x6c\x0c\x4c\xc5\xd4\x68\x71\x58\x13\x07\x03\x75\x30\xb0\x28\x4b\x1b\x0c\x87\x1d\xae\x32\x1c\x54\x7a\xed\xba\x3c\x18\x75\xc5\x3e\x07\x06\x2d\xe6\x2b\x29\x1c\xd4\x55\xae\xd3\x64\x5a\xcd\x6a\xa5\x5d\x6e\x34\x15\x09\xd0\x94\xc8\xd0\xdc\x88\x6d\x37\xe5\x6e\x47\x57\xfb\x75\xa0\x4a\x7a\xb9\x61\xe8\x55\xa5\xc5\x74\x41\x65\xd8\xb7\xcc\x47\x0a\xb9\x17\x29\xe4\xde\x33\xca\xdc\xdb\x14\x4a\x03\xd5\xa8\xf5\x2d\xbd\xdf\x1a\x6a\x8a\x6e\xf5\xea\x7d\x8b\x55\x54\x4d\xa4\xf5\xe6\x70\x48\xd5\x8c\x7a\x03\xb4\xc4\x9a\x68\x56\x0c\xc5\xe4\xf4\x76\xb9\x5b\x51\xac\x41\xab\xf9\x48\xe1\x4e\x41\x38\x9e\xd8\x5d\xdf\x0b\xfc\xf1\xd3\x03\xa5\x60\xa7\x34\xef\xa0\xf4\x05\x1b\x71\x9e\x5b\xbd\xc1\xdc\xbc\x9c\x31\xfd\xa8\xcd\x79\x9a\x35\x7d\x64\x27\x47\x3b\x02\xef\xb2\x34\x87\x31\xc7\x3b\xa4\x4d\x01\x9b\xb5\x79\xc1\xa5\x68\xe8\xb2\x34\x49\xda\x80\xe5\x04\x48\x31\x2e\x74\x49\x86\xa0\xa1\x43\xd8\x2c\x65\x73\x34\x6d\x13\xc0\xc6\x82\xb0\x65\x0f\x71\xe3\xb5\xc5\xc1\x02\x0a\x52\x98\xa6\x5c\x97\x62\x78\x48\x00\x9b\xc0\x80\x70\x1d\xd2\xe5\x1c\x9a\xe4\x11\xe9\x42\xe4\x50\x84\xcd\x21\x44\xf0\x88\xa6\x1d\x16\x00\x96\x62\x05\x9e\xe3\x49\x8a\x85\x24\xf7\x6d\x27\x5d\xc4\x76\xb0\xfe\xb6\x97\x34\xa8\xfb\xcc\xba\xb4\xee\xd6\x25\x20\x87\xb2\xa0\x51\xc4\x6a\x22\x15\x13\xc2\x4b\x93\xbc\x9a\x6f\xc8\x81\xd3\xed\x0f\xa1\x54\x83\x8a\xb7\x85\xaf\x34\x19\x1d\x6e\xe6\x94\xf1\x26\xe6\x91\x38\x20\x99\x1d\x98\x34\xfd\x82\x8e\xdc\xf5\xfa\x76\xa6\xf2\x2f\x08\x2a\xc1\x60\x1a\xdb\x18\xba\x82\x43\xda\x3c\x26\x5c\x96\x61\x79\x87\xa5\x09\x92\x13\x5c\x4c\x38\xbc\xe0\x92\xb6\xeb\x38\xc0\x76\x18\x07\xf1\x0e\x63\x13\x2c\xcf\xd8\x14\xcd\x39\x24\xc0\x24\xbb\x15\xb2\x7b\x08\x3b\x25\x38\x90\x10\x28\xde\x11\x20\x47\x60\xde\xa5\x48\xdb\x61\x79\x47\x00\x90\xe0\x09\x12\xf3\x34\x2d\x38\x0c\x44\x8c\x40\x32\xd0\xe5\x08\x87\xe2\x69\x88\x29\x17\x08\x14\x72\x29\x8a\xc5\xdf\xbe\x17\xa8\xef\x05\x92\x05\x04\x03\x08\x92\xa7\x1e\x24\x56\x6d\x8f\x26\x64\x33\x63\x23\xc2\xae\x81\x3e\x13\xae\x5b\x4b\x73\xa5\xd2\xd6\x3c\x9a\x16\x97\x8a\xd8\x4a\xcb\x64\x9d\x6a\x00\x09\x70\xa3\x71\xe8\xda\x15\x79\xae\xb5\x87\x0e\xc3\x86\x06\xea\x81\xc1\xd4\xad\x36\xf2\xee\x40\xf0\x14\x59\x28\xba\x7d\x98\x02\x92\x33\xaa\x7d\xb4\x43\x3d\x18\xa5\x6c\xcd\x3b\x72\xdc\x10\x02\x5e\xef\xaa\xbc\xcd\xf6\x09\x3b\x5b\x55\xd2\x45\x92\x5a\xc6\x06\x54\xad\x92\x16\xcb\x81\x5a\x49\x1d\x91\x09\xca\x15\x79\x90\xcf\x29\x6f\x1a\x51\x70\xd8\x9a\xac\x55\xa1\xdf\x9c\x71\x65\x51\x8b\x1a\x2c\x59\x5f\xd5\x94\xba\xe0\x11\x51\x2b\xd3\xe9\x69\xc2\xfb\x83\x1d\xfe\xea\x05\x89\xd5\x88\x4b\xa3\xfe\x5f\x20\xb1\x02\xa0\x58\x1b\x39\x8e\xed\x3a\x98\x82\x14\x10\x80\x03\x58\x1a\x01\x5a\x60\xb1\xc3\x6d\xfd\x0e\x66\x1c\x97\x02\x58\xa0\x68\x1b\x0a\x0c\x25\x50\x2c\xcb\xd1\x98\x06\x2c\x09\x20\x8f\x76\xa6\xf5\x0e\x52\x8f\x5d\x17\xf3\x36\x74\x29\x68\xdb\x2e\xef\xd2\x0c\xc3\xd2\x14\x06\x34\x41\x93\x0c\x60\x58\x8e\xa6\x30\xa2\x31\x65\x73\x2e\x62\x79\x81\xa6\x09\x92\xc0\x04\xc2\xc0\x15\x58\x1e\x01\x17\x7e\xfb\x5e\xa0\x4f\x24\x96\x3e\x48\xac\x2c\xf6\x26\x5c\x31\x66\x09\x7b\xe2\x09\x66\xbf\x69\xe6\xca\xbc\xe4\x51\x35\xde\x2e\x39\x58\xe8\xa1\x89\x99\x28\x63\xbd\x4a\x11\x03\xc6\x9a\x64\x9b\x99\x3e\x13\xdb\x6e\x91\x4c\x72\x93\x72\x57\x24\x4c\xb3\x9e\x4b\xba\xeb\x16\x56\x56\xb2\x30\x59\x45\xbd\x74\xac\x0d\xa4\x79\xb4\xe3\xf0\x4e\x62\xf3\xed\xa7\xea\xe3\x9f\xbd\x13\x3e\xfe\x28\xe6\x62\xdb\x38\xc8\xce\x1a\x18\x03\xdc\x9a\x1a\x86\x67\xaa\x9e\x66\x4c\x11\x2c\x2e\x9c\x66\xa6\xc9\xa3\xac\x5f\x29\x4f\xd3\xca\x68\xd8\x52\xcb\x79\xa6\xf8\xa5\x65\x29\xca\x96\xf5\x78\xc9\xf0\x44\xa7\xa7\x87\x0d\x59\xd1\x3a\xfd\x46\x96\x17\xeb\x54\x49\xf1\x7b\x0d\x49\xf6\x36\x61\xd1\x9a\x68\xc5\x9d\x44\x37\x2e\x48\x74\xab\x72\x49\x2a\x1e\x24\x5a\x26\x6a\x9f\x28\x7b\x9f\x73\xbd\x53\xa2\x11\x01\x49\x9a\xe2\x11\xe4\x5c\xc7\xb6\x5d\x16\xd2\x8c\xed\xd0\x24\xa4\x00\x70\x59\x20\x00\x92\x71\x6d\x92\x77\x04\xd6\xa6\x09\x06\x43\x02\x39\xbc\xed\xd8\x82\x83\x00\xe6\x79\xd7\xd9\xc6\xb1\x77\xd1\x0a\x12\xf2\x14\xcb\xb1\x2e\x47\x39\x02\x4f\x0b\x3c\xe5\xda\x10\x12\x04\x81\x59\x64\x23\x4c\x23\xc7\xc5\x8e\xc0\x0a\xd0\x01\x88\x71\x1c\xe8\x90\x14\x63\x3b\x24\x02\x14\xe0\x79\x48\xb1\xdb\x60\x81\x39\x91\x68\xe6\x41\xa2\x6b\x68\xb6\x6c\x52\x25\xda\x8f\xc2\x99\xe0\xf4\x56\x9b\x19\x26\x23\x4f\x64\x9b\x7c\xc8\x75\x5b\xdc\xac\x3c\x9d\x59\x13\x86\x36\x07\x8b\x86\xd4\xaa\x63\x5c\x14\xa6\x5e\x7d\xaa\x50\xd4\x70\x3a\x00\x9d\x7e\x4d\xe9\xc9\x46\xc4\x68\x84\x18\x93\x23\x33\x1f\x27\x12\xac\x79\xee\xde\xf0\x6e\x25\xba\x7e\xc2\xf1\xc6\x44\x6d\x99\x41\xee\x0d\xc1\xa2\x63\x13\x56\x4a\x9b\x59\x87\x1d\x2d\x02\xcc\x4e\xa9\xc5\x44\xe3\xc1\x42\x1b\x8b\x1d\x9c\x13\x8b\x1c\x7a\xb3\x91\x3b\x4c\x03\x39\x28\x17\xb9\x48\x94\x67\x6b\x9e\xe1\xe9\x22\x65\x05\xa9\x5b\xa1\x21\xd1\xed\x0a\x9a\x56\x63\x7c\xdf\xda\x35\x65\x5c\x90\x58\xd3\xbc\x34\xea\xff\x05\x12\xeb\x60\x2c\x70\xc0\x66\x78\x1b\x11\xac\x8b\x1d\x40\x11\x0e\x43\x39\x04\x01\x48\x9a\x77\x78\x01\x39\x04\x26\x38\x1a\x38\x8e\xcb\x30\x02\x01\x30\xb5\x75\xdc\x34\xe7\x10\x0e\x40\x1c\xbf\x95\x94\xbb\x48\x3d\x61\x53\x82\x4d\x09\x34\xe1\x20\x47\x80\xfc\xd6\x4a\x53\x02\x26\x08\x9e\x20\x38\x9e\x42\x1c\xc6\x04\xa0\x80\x4d\x43\x9b\x45\xc0\xb6\x49\x1a\x01\x8e\x21\x29\x97\x73\x69\x9e\x17\xb8\x2d\x0e\xf6\x44\x62\xd9\x07\x89\x6d\x94\xc7\x8d\x68\x1d\x94\xec\x25\x19\x11\xcb\x9e\xea\x3b\x25\x8b\x45\x66\x71\x35\x19\xf5\x37\x52\x2b\x6a\x50\x3a\xc5\xd2\x1a\x57\x2d\x99\x3d\xbb\x33\x2d\xad\x06\xad\x31\xec\x52\x0b\x75\x45\x03\x45\x1b\x37\x37\x79\x80\x27\x4b\x27\x70\xb2\x3c\xea\x3a\x4c\xce\xf2\xbe\x08\xf8\x1d\x87\x77\x12\x7b\x22\x44\x33\xbf\xa9\x22\x49\x93\x46\x40\x31\xe4\xb9\x48\x84\x45\x1a\xcc\x56\x8e\xe3\x82\x6a\xcc\x4e\x47\xa4\x01\x16\x21\xea\x84\xab\x4d\x7f\xaa\xd7\xed\x7a\x4f\x46\x14\xf4\xf3\x7e\x39\xf4\xca\xa2\x14\xe9\x2a\xf0\x50\x98\x70\x29\xc2\xa0\xc2\x3b\xd3\xca\xd2\xde\xf8\xb5\xd1\x0e\xb3\x79\x41\x62\x47\xe7\x03\x2e\x8b\xff\x25\x12\xcb\x33\x1c\x4d\x03\xd7\x11\x30\x05\x38\x86\x16\x18\x86\x73\x28\xc8\x32\x94\x6b\x0b\xc0\x46\x1c\x84\x24\xc7\x12\x04\x86\x0e\x8b\x1c\x0e\x0b\x24\xc5\x03\x00\x90\x83\x1d\x4c\x12\x60\x67\x1f\xef\x21\xf5\x90\x84\x8e\x8d\x68\x9a\x76\x6d\x17\x38\x1c\xe5\x02\x04\x05\xc1\x71\x11\x16\x6c\x16\x93\x2e\xa0\x39\x97\x71\x01\x0f\x05\x87\xe1\x08\x82\x07\x0e\xe5\x3a\x2c\x41\x32\x36\x6f\x33\x80\xda\xc6\xca\xdc\x89\xc4\x72\x0f\x12\xdb\x04\xf3\x11\xe8\xea\xb9\x52\x4a\x91\x47\x96\x09\x11\xf5\x26\xc3\xb0\x29\x7b\x43\x9a\x76\x3b\x35\xa9\x58\xab\x29\xb2\x6d\x90\x3c\x54\x65\xb6\x26\xe6\x14\x8f\xe5\x72\x20\x53\x64\x2f\x5b\xb5\xe3\xb0\xcf\xa9\x23\x21\xea\xd8\x4d\x75\xda\x1e\x57\x4b\x0e\x0c\x04\x8f\x5c\x37\x76\x1c\xde\x49\xec\x49\x9c\x8b\x45\x4b\xdf\xac\x96\xf9\x46\x9c\xe6\xa6\x44\x73\xd9\x3a\x1b\x70\x64\x4b\xda\xac\x46\xb5\xc4\x37\x66\xa5\x61\x27\xd2\x54\x30\x1b\xd6\xa9\x71\x6c\xe7\x9b\x36\x10\x84\xe1\xd2\xad\x07\x4e\xa9\x15\x90\x8c\x8b\x28\xa1\x8d\xeb\xa1\x39\x14\xc7\x82\x2e\x98\x52\x97\x49\xb5\xbd\x46\x0c\x2f\x48\xac\x93\x9c\x0d\xf8\xff\x15\x12\xfb\x42\x5e\xe2\x42\x41\xd0\x47\x13\x12\x87\xa2\xa0\x7b\x24\x76\x1e\x27\x51\xa6\x65\x99\x07\x01\x88\xda\xf9\xb0\xa4\x38\x25\x54\x5d\x37\xcd\x45\x09\x2f\xb4\xd6\x66\xbe\x00\x43\x27\xca\x91\xdb\xdd\x4c\x35\x77\xa4\x58\x9d\x72\x9d\xaa\x4c\xff\xfa\xf6\x98\x36\xd8\xe5\x78\x78\x92\x3b\xa4\x84\xd8\x9d\x10\x13\xff\xe6\xe9\xa7\x17\xd8\xa5\x1a\xce\x93\x55\x77\xec\x11\x7d\xaf\x1e\x59\x56\xaf\x73\x18\xce\xd4\xf6\x06\x1d\xae\x02\x22\x59\x27\x74\xa3\x98\x0f\xbb\x65\x61\x33\x58\x0e\xac\x1e\xbd\xf2\xdb\xfe\x30\xeb\xda\xa4\xbc\x0c\x0c\x1d\xf3\xdb\xe7\x49\x9a\x78\xc8\x8c\x91\xdb\xff\xe9\x2d\x23\x00\x27\x50\x34\x71\xfc\x0b\xe8\xcf\xe6\x04\xf9\x74\x6c\x6e\xed\xd5\x31\xdd\xb7\xeb\x14\xb9\xed\x94\x40\x3c\xfd\x8f\x23\x1f\x3b\xf5\x82\xf8\x3f\x2f\x7f\xba\x21\xc7\xf7\x62\x7d\xce\x6d\x38\xcf\x4b\x6c\x6e\xc0\xf6\x42\x81\xcc\x0d\x18\x5f\x28\x65\xf9\xa8\x0d\x39\x29\x67\x39\x91\xb5\xa1\x5e\xe9\x73\x1d\xa6\xc6\x34\x59\x85\x6e\x2a\xb5\x06\x35\xd4\x38\x71\x50\x01\x15\xb1\x27\x0e\xeb\xa2\x2e\xd6\xe8\x7e\x57\xa2\x2c\xb6\x61\x0d\xd4\x01\xad\x89\x12\x2d\xe9\xa7\x76\x44\x2b\xaf\xfb\x82\xe6\xe3\x26\x37\xb0\xbb\xf3\x51\x2c\x95\xc4\xac\xc7\x8a\x3d\x69\x6c\x28\x5e\x2f\x23\x3c\x2e\x26\x67\x76\xb0\x4c\x98\x13\x73\xb9\x9b\x95\x96\x77\x1f\xb7\x7e\x41\x2e\xab\x8e\x90\x18\xad\x29\x12\xc6\xf4\x24\x70\xa4\x55\x3b\xc5\x1d\xde\xdb\x48\x35\x97\x12\xcd\x36\xcd\x0a\xa3\x05\xbd\x1c\xcc\xc5\xf1\x68\x37\xab\x95\x9c\x80\x9e\x06\x67\x36\x58\x12\xc5\xa9\x47\x46\x4e\xc6\x0e\x5a\x32\xa1\x28\xfc\x74\x38\x0b\x94\x8a\xbb\xd6\x2d\xc0\x4e\xb5\xa0\x3e\x2a\x49\x25\xd1\x64\xa4\x7e\x7b\x0f\x2e\x80\xbe\x9a\x37\x7b\x03\xda\xb6\xdc\x16\xae\x94\x7a\xcd\x78\x69\xf9\xa5\x81\xbf\xd2\x59\x26\xb0\x1d\xd2\xe9\xcc\x45\x69\x3c\xe8\x94\x56\x70\xe1\xf6\x26\x72\x29\x18\x8e\xba\xe2\xba\x5c\x76\xdb\xa2\xd9\x2d\xc1\x44\x91\x50\x6b\x2e\xca\x8d\x25\xa3\xa8\x4e\x64\x8c\xa1\x27\x7b\x7f\xfd\x75\x1e\xbe\xdc\x99\xf5\xf4\x4d\xac\x6f\x3c\x65\x7d\xc5\x6f\x81\x96\x99\x99\xa2\x50\x02\xfd\x8d\xd4\x1b\x95\x34\x6b\x41\xb1\xcd\x92\x55\xaa\xb2\xcc\xa0\xd2\xa6\xda\xea\xb8\x45\x55\x1c\xa4\x06\xde\x23\xeb\xc3\xdd\xf3\xde\xe3\x1f\x49\x7c\x70\xe2\xfb\xef\xb2\x28\x0a\x65\xe3\x86\xa1\xe1\x09\x9d\xb5\x7d\x90\x2f\xb1\x92\x75\x21\x47\x4b\xf5\x5c\xa1\x8c\x36\xdb\x51\xc7\x7a\x5d\x9e\x08\x81\x62\x4c\x06\x5c\x31\x27\x2d\xa9\xd3\x32\x56\xb1\xb8\xd4\x7d\x3c\x1e\xad\x6a\xa3\xc5\xa0\x41\xdb\x9e\x1e\x89\xc9\x22\xb0\x82\x9e\x3a\x1f\x42\x3b\xe4\xd8\x11\x3d\x95\x8c\x4f\x1f\x1a\xe6\xa6\xa1\x31\xce\xb4\x42\xac\x30\x73\x16\x00\xaa\x2b\x2e\x8c\x81\x3d\xf2\x3b\x6e\xaa\x0c\xa7\x56\xa3\x59\x9e\x78\xae\x51\x5e\x5a\xb3\x5e\x59\xcd\x95\x28\x90\x8a\xe2\xe3\xd0\x44\xb7\x6a\x85\x0c\xd4\xf6\x2a\x05\x42\x5f\x43\xae\x28\xda\x89\xde\x0c\xd5\x45\xcf\x22\x8b\xbd\x5e\xce\x8e\xe2\xf1\x70\xbd\xee\x2d\xfa\xa5\x8a\x13\x99\x33\x75\x4e\xf5\x8c\x76\x96\x8c\x1a\xfc\x10\x25\xab\x89\xc7\xe6\xeb\x72\x71\xb0\xc2\x43\x61\xa8\x09\x74\xa8\x27\x1b\xb0\x06\x95\x48\xca\x3f\x9d\xf5\xec\x4d\xac\x37\xcf\x58\x2f\x35\x53\xb5\x57\x52\x10\xa7\xcc\x75\x18\xd9\x9a\x9b\x98\xb8\x42\xb7\xcb\x23\xae\x25\x50\x23\x0a\x94\xc7\x35\x7a\x22\x87\xeb\xaa\xbc\x3c\x1a\xa4\xf9\xad\xac\xc7\x90\x61\x37\x60\xa4\x77\x44\xb9\x57\xec\x6b\x8c\x95\xcc\x41\xac\xfa\x35\x48\xcb\x93\x99\x44\xd4\x96\x6b\xce\x87\xd8\xb5\xc6\xf5\x12\xf0\x2d\xa5\xa7\x25\x49\x59\xa8\xa9\xab\xe9\x62\x3a\x8e\x40\xae\x8e\x9a\x0b\x42\x26\xd6\x03\x2a\xef\x33\x7a\xa3\x5c\x33\xd1\xbc\x2f\x7f\x3e\xeb\xb9\x9b\x58\x3f\x3c\x97\xfa\xf6\x74\x2a\xcb\xf6\x8a\x69\xd4\x87\x6d\xdb\x6a\x81\x4a\x31\x73\xec\x68\x14\x52\x63\x25\x21\xa0\x51\xac\x4c\x04\x72\x61\x53\xf2\xa0\x7a\x64\xfd\xe2\x56\xd6\xb7\xc8\x25\xa4\x6a\xa3\x58\x1f\x29\xf3\x8e\x3f\x32\x37\xcb\x55\x90\x2e\x65\xaa\x24\x08\x0b\x22\x9b\x8c\x1a\x7e\x08\xf9\xb1\xc3\x4d\x13\x45\x50\x8c\xf1\x84\x25\x5b\x93\x34\x9d\x44\xb1\x4a\x76\xf5\x7e\x4a\x4f\x4b\x15\xd9\x6d\x96\x5a\x96\x3e\x5a\xda\x03\xbe\xa2\xd5\x4b\xf4\x83\xc1\x79\xd9\xb1\x5f\x2a\xd0\xbc\xc2\xb1\x3f\x14\x69\x1e\x57\x80\x08\x42\xa0\x08\x87\xe3\x01\x87\x31\x0b\x68\x9e\x76\x08\x96\x04\x48\x20\x79\x81\xe3\x48\x86\x01\x2e\x45\x39\x2c\x76\x05\x81\x00\x02\x0f\x39\xe0\x12\xc0\x25\x48\x06\x13\x24\xcb\xbb\xc4\xe9\xa0\xee\xac\xd2\x3e\x8d\x8b\xf4\x9a\x4d\xe0\x1a\x43\xcf\x9d\xb4\x3e\x0b\x12\x8d\x2f\xb3\xed\xa9\xd4\x50\x15\xb1\x2f\xb5\x40\x57\x5e\x2c\xfb\x56\x06\x8b\x9b\x43\x18\xfe\x5a\xe7\x2f\x17\x64\x7e\xb8\xf3\xc7\xa2\xcc\x63\x5a\xe0\x70\x0a\xca\x7e\x8b\x65\xe1\xd9\xb5\x4b\x8b\x12\xcf\x74\xe2\x02\xa6\x10\xa7\x79\x14\x4f\xe7\x30\x49\xe6\xe3\x18\x26\xf8\x02\xa6\x1e\x4e\xd2\x42\x57\x56\x0a\xcd\x3d\x70\xe1\xcf\x42\x17\xcf\x53\x1c\xd8\x38\x2e\x50\x04\xc9\xbe\xa7\x21\x37\x8a\x11\x4e\xd0\x3c\x0a\x43\xbc\x4a\x67\x30\x0b\xd1\xf8\xbc\xa1\xdd\x16\xc6\xf7\x20\x9b\xc1\x24\x4d\xd0\x7c\xcb\x06\xfa\x79\xef\xf7\xc8\x8e\xae\x5a\xca\x93\xd9\xb2\xc3\x84\x13\x3c\xa3\x88\x45\x1f\xe6\x6e\xae\x4f\x8b\x46\x25\x1f\x9a\xd2\xb0\x02\xd2\x6a\x6b\x21\x8c\xfa\x6c\x0c\x5a\x27\xea\xb5\x77\xf5\xb9\x58\xab\x36\xa1\x66\x67\xd6\xc6\x13\xa4\x8e\x5b\x53\x6b\xa3\x71\x47\xe3\x7d\x12\x17\x47\x12\xeb\x07\x6e\xee\xe6\x4a\x4b\xb4\x26\xf9\xd7\xc7\x1b\xa2\xd8\xad\x82\x84\xed\xb2\x86\x4c\x97\x52\xbb\xa1\x34\x43\xde\xe9\xc7\xf6\x94\x16\x06\xfc\x24\x1c\x23\xa3\x34\xe4\x61\x05\x0c\x3b\x64\x0e\xe1\xee\x29\x85\xb2\x71\x17\x3d\x9a\xa3\xbd\x4d\xda\x49\xfe\x49\xd0\xd4\x78\x9a\x9f\xa8\x94\xd7\xeb\x9e\xd8\xb4\x53\x5c\x1a\x28\x6e\x4f\x84\xd3\x60\x4a\x06\x62\x27\x9a\x6d\x06\xab\x74\xb5\x6c\x0e\xb5\x3c\x41\xce\xb2\xeb\x19\xf6\xb4\xdb\x8a\x47\x3d\x27\x1b\xe8\x59\xde\xc8\xab\xb5\x91\x6e\x34\x2c\xd2\x96\xdb\xf2\x3c\x0d\x3b\x71\x55\x1d\xf7\x6a\x5a\x53\x9a\x8f\xc5\xfa\xf5\xe6\xf4\xbf\x39\xbe\xdb\x0d\x48\x85\x29\x2e\x17\x45\x47\x65\x98\xb6\xa5\x4c\xac\x86\x04\x5d\xa6\xe9\xbb\x6a\x89\x1e\x2c\x4d\xba\x4a\xea\xd2\xb0\xbb\x6e\xa6\x4a\x51\x31\x1e\xcd\x9b\x68\x07\x5e\x40\x5a\x94\xe3\xb1\x16\x19\x2c\x48\x3c\x6b\x20\x95\x4c\x57\x93\xee\xb0\x3e\x12\xf2\x8a\x17\x75\x25\x88\xfb\xbc\xe9\x2b\xd1\x71\x1d\xd6\x3b\x5a\xc6\x13\x81\x38\x32\x65\x77\x5f\xb1\x4c\xe2\xc0\x29\xbd\xcf\x28\x04\x1e\xb7\x38\x71\x2d\x94\x89\x76\xa2\x56\xbc\x25\x22\x01\x49\x9a\x02\x3f\x9c\x30\x81\x3e\x0d\x04\x03\xb0\xd3\x32\xbd\x74\x4b\x0f\x17\x7f\x44\x7d\x6c\x5f\x2c\xf6\xe5\x64\x98\x55\x02\xb9\xa7\x65\xad\xa9\x2c\x9b\x3d\x39\x72\x47\x68\x10\xd7\x2a\xfe\x88\x9f\x4e\x44\xab\xa4\xf5\x08\x8a\xd3\x67\x19\xc1\x4c\x45\x0b\x88\xa6\x48\xf2\x6c\x24\x6a\xb9\x25\x05\x83\x20\x44\x72\x20\xe3\x5e\xd4\x40\xad\xba\x24\x18\xa4\xd1\x1e\x2c\xa4\xbd\x3b\xcd\x60\xd9\xb6\x06\x23\x4a\x9e\x0d\xfa\x30\xb6\x38\x73\x95\xdb\x7d\x5a\x6d\xd6\xbc\x79\x48\x8b\xdd\xf2\xb8\xaa\xcc\x59\x7b\xd5\xad\xf6\xb7\xfd\x53\x4f\x82\x56\xef\x8c\x15\x47\x01\x51\x77\x7f\xaf\x4a\xd0\xcc\x1e\x39\x51\xca\x8f\xac\x7d\x6c\xbf\x22\x97\x47\x71\x69\x3e\xec\x60\x53\x1d\x0d\xb5\x6e\xdd\xb3\x72\x22\x1c\x76\xf4\x61\x9d\x9a\x72\x84\x99\x28\xb0\x38\x70\x73\xb8\x31\xd2\xa5\x16\xae\x36\xf5\x46\x5f\xee\x2f\xd3\x78\x69\x9b\x63\x0d\x59\xed\xa8\x33\x2b\xfa\xa6\x95\xf1\xa2\xe4\x50\x9b\x85\xd1\xae\xf3\x08\x19\x3b\x79\x17\xeb\xdc\x04\xfb\xf4\x24\x88\xaa\x7c\x4f\x9d\xc9\x25\xec\x21\x1a\xb4\x07\xa9\x56\xaf\x6f\xfa\x16\x9f\x5b\xfe\x48\x82\xe5\x8c\xd5\xd9\x9d\xd2\xed\xf3\xf0\xd5\x47\x7d\x11\x9f\xe8\xc7\xc3\xb5\x0b\x77\x24\xcb\x6a\x1e\x04\xa5\x4c\x66\x92\xa3\x45\xbd\xcc\x6b\x2c\x8d\x54\x06\xd2\xb8\xaa\xd3\x4d\x2c\x38\x56\xdb\x55\xab\xc5\x9a\xcf\xd6\x96\x66\xab\x38\x12\x53\x40\x1f\x59\x71\xc4\xa7\x3e\xb6\x6f\x48\xc2\x34\xa8\xf7\xa9\x05\xbd\x04\x86\xbb\xe6\xdb\x0d\x3c\xad\xd8\x64\xaf\x57\x65\xfd\xd5\x62\x5a\x25\xa4\xc8\x1b\xc4\xad\x14\x78\x2d\x92\xa3\x0c\x7b\x3a\xa6\x9c\x6e\xcf\x74\xb1\x1c\x2d\x11\xd1\x16\xa1\x3b\x96\x07\xab\x74\x6c\x89\xb3\x44\xcf\x26\x33\x29\x58\x4f\xa4\x3d\xbd\x62\x8b\x2a\x97\xc4\x16\xc3\x0e\x25\x99\x4e\x35\x4b\x69\x91\x1d\x5a\x24\x1a\x78\xda\xe6\x6b\x1d\x2e\x6c\x92\xa2\x80\xfb\xbe\xb3\xae\xa6\xe6\x47\xf5\x43\xb1\xac\xfd\x22\xb5\x58\x97\x79\xb5\xbd\x1a\xb8\x74\xb9\x31\xb1\xea\x4b\x7a\x31\xde\x24\x1c\xcc\x28\x0d\x36\x34\x8d\x48\x58\x89\xa6\x3a\x96\x69\xf8\xe9\xb8\x76\x51\x3f\xba\x8f\xed\x8b\x22\x29\xe9\xad\xe2\xd4\xa8\x54\x25\x5c\xdc\x98\xc5\xf2\xd0\x15\x0c\x2b\xe5\x25\x9f\x46\x4c\xb9\x2c\x79\x82\xd5\xa4\xbb\x41\xd3\xca\x67\xf3\x01\x36\x1b\xbd\x62\x83\x9d\xae\x51\x62\x4d\xd7\x86\xd9\x55\xe5\x31\xd5\x67\x2d\x59\x61\x9a\x53\x59\x6a\xcc\x0d\x61\xe1\x50\x9b\x13\x7f\x20\x1d\xe5\xfb\xc3\xfe\xf3\x79\x34\xfe\x82\x0f\xdf\x6d\x59\x75\x0e\x5b\xc7\x2e\xf8\xf0\x9b\x97\x3f\x3e\x16\x4b\x50\x9f\x14\x4b\xec\xd9\x7a\x4d\x2c\x71\xb7\xb4\x91\x28\x36\xaa\x23\x9a\x5a\x89\x5c\x67\x43\x69\xb8\x35\x22\xb4\x4a\x91\x64\xb5\xb5\xdc\xa8\xcc\x4a\x43\xc9\x28\xb9\x21\x19\xc4\x58\xc0\xd3\xb2\xb2\xf7\x8d\xbb\x58\x61\x78\x26\xda\xb2\xb4\x8c\xbb\x4a\x9d\x68\xce\x14\x94\xb3\x1b\xd2\xcb\xc8\xda\xb0\x1b\x05\xa8\xa9\x36\xdd\x52\x3a\x70\x5d\xa1\x9b\x75\xf2\xbc\x85\xad\xb5\xba\x22\x66\x2b\xce\x9c\xce\x38\x33\x33\x51\x9d\xe8\x52\xaa\x91\x13\xda\xc2\xf5\x49\x6b\x53\x0c\x3b\x2b\x3e\x2f\x89\x80\x2a\x4f\xb5\x1d\xee\x7f\xd2\x6c\x67\x69\xb6\xdd\x20\x4c\xa8\xa1\xd0\xb3\x33\xd8\x54\xd7\x98\xcc\x6a\xfc\x24\x26\x1b\x9d\x94\x84\xb0\xd6\x8e\x57\x86\x1c\x1b\x69\x37\x9d\xb4\x72\x5c\x0c\x76\xba\xba\x5f\xda\x1f\x2e\x73\xa7\x2c\x94\x3b\x49\x3c\x44\xb2\x33\xea\x37\xd5\x64\x01\x9b\x6e\xaf\xc3\xdb\xeb\x3c\x99\x64\xb6\xd1\xd1\xc4\x99\xcd\x4b\x2e\x3a\xab\xb9\x3a\xaf\x2e\x7c\x56\xdc\x02\x86\x3a\x2f\x82\xc9\xcc\xab\xb4\x31\xe1\x98\x26\xb0\x34\x24\x1b\x2b\xce\x28\xe5\x33\x6d\x81\x68\x53\x26\x59\x58\xa3\xab\x3e\xb9\x47\x05\x82\xe4\x34\x5f\xa3\xf4\x4b\xe2\x80\xde\xb5\xd4\x1c\x8f\x4b\x55\x2e\x1b\x16\x95\x62\x57\x5f\xf4\x93\x12\xe7\xd5\x8a\x1c\x0a\x46\x2a\x80\xa3\x92\x3a\x98\x74\x27\xc1\x7c\xa4\x2d\x75\xcb\x1f\x96\xd6\x81\x67\x83\x39\x5e\xc8\x93\x7e\x23\xaa\x0d\x69\xa9\xef\x95\x96\x83\xa4\x66\x1a\x02\x58\x1a\x93\x2e\xc9\x42\xa1\xc9\xb5\x0e\x25\x40\xd7\xf7\xff\x65\x5b\xbe\xbf\x3e\xe0\xcb\xf2\x37\xfa\xaf\x10\x4d\x07\xd8\x4e\x6f\x0c\x08\x7d\xa4\xab\xd3\x4a\x3a\xd3\xb4\x7e\x12\x36\xab\x66\x12\xf6\x2a\x8d\x76\x97\x89\x60\x00\xe7\x69\xd1\x6c\xb6\x59\x07\xf9\xf1\xbc\x5b\x6d\x68\xad\xe1\x40\xa5\x99\x21\x60\x71\xd4\x1f\x8b\x02\x95\x3a\xed\x2c\xec\x7b\xe9\xd4\xc3\xf3\x46\xce\xdf\xd8\xff\xf3\xd2\x90\x67\xfd\xcf\x22\x3a\x4a\x19\x76\x51\x6e\x57\x56\x73\xa3\x44\x47\x5a\xb3\xb8\x21\x41\x67\xed\x27\xe4\xcc\x6d\x28\xc3\xc0\xe8\x7b\x71\xd6\x2d\xf6\xc4\x37\xfa\xdf\xa0\x46\x65\x85\x94\x40\x75\xb5\x1a\x5a\xc3\xd2\x8a\xb4\x54\xa3\x45\x58\xba\x4f\x20\x4f\xaf\xce\xc9\xa8\xd8\xd1\x62\xc0\x2f\x97\x1c\xb1\x69\xcc\xe1\xb8\xdb\x4c\x1a\x12\x5e\xad\x14\x6f\xc2\xb5\x23\x8b\x16\x4c\xab\x13\xb5\x9b\xa4\xbf\x1a\x56\xcb\xb9\xeb\x76\x20\xd7\xf0\x92\x1b\xfb\xdf\x78\xa3\xff\x1f\x8c\x1d\x5f\xed\x7f\xc0\x0d\xfa\x44\x32\x4c\x06\xe3\x81\x56\xa6\x3b\x44\xa3\x66\x76\x4a\x0b\x72\x19\x0c\xc6\xcb\xea\x3a\x81\x62\x3d\xe4\x2c\x6b\xba\xb6\x95\x35\x1d\xfa\x55\xb2\x04\x6d\x95\x6a\x6b\xd1\x8a\x9e\x4d\x43\xaf\xe7\xb5\xac\xf5\xaa\x34\x89\x8c\x72\xa5\x32\xaa\x19\xc5\xb5\xbb\x12\xbc\xc6\x8d\xfd\xaf\xbe\x35\xfe\x5e\x49\x9a\xf0\x16\x2d\xe4\x93\x2a\xd9\x8d\x05\x2e\x42\xa0\x15\xc4\xe9\x98\x9a\x48\x2b\x41\x6f\x19\x8e\x30\xb5\xac\x4a\x35\x1e\x76\xdf\xd2\x7f\x85\x2c\x22\x4b\x6f\xce\x38\x6d\xb0\x6c\xd1\x1b\xe8\xad\xa8\x39\x98\x80\x92\x8a\x37\x45\x03\x29\xc4\xc8\x29\x11\x15\x4f\x51\xc6\x82\x15\xa2\xa6\xd4\x76\x62\x59\x0a\xdc\x69\xb6\x10\x3d\x9b\x08\x16\x7d\x7e\x3d\xd2\xac\xc6\x3c\x56\xd6\xde\x4c\x6c\x71\xd1\x28\xaf\xd5\xf2\xbd\x51\xdf\x35\x75\x63\x56\xe7\xcd\xb8\x61\x17\xb5\x1c\xb6\x29\x25\x97\xe3\x86\xff\x7f\xb7\xab\xe9\xd7\xd4\x0f\xf0\xaf\x7f\x9c\x96\xf9\xee\x7f\x3f\x1c\x50\xfb\xeb\x1f\x85\x3d\xe0\xee\xc7\x31\x4c\x7e\xfd\x63\x7f\x9a\xd3\xee\xc7\xff\x3d\x00\xbb\x18\xbf\x0f\x30\x80\xab\x74\x95\xf8\x9b\x77\x82\xc7\x38\xc1\xf1\xf2\x2d\xe0\x5f\xfe\xf7\x3d\x3c\x39\xa4\x5c\x61\x8c\xc6\xfe\xf2\x70\xf3\x05\x9e\x1c\xfb\x4e\x1e\x08\xd9\x91\x11\xff\xfa\x47\xe1\xd7\x25\x49\xfd\x9b\xf8\x37\x11\x23\xea\xd7\xc3\x3d\x94\xc5\x31\x0e\x53\x7d\xc7\xf3\x5f\xff\x28\x70\x4f\x7f\x97\x76\x87\xb4\x6c\x49\xfe\x7f\x1f\xfb\x70\xec\xcd\x23\xe4\x16\x37\x63\x23\x8c\x49\x96\xe1\x04\x97\x70\x68\xc8\x08\xc0\x25\x48\xc8\xd1\x14\xc9\x09\x34\xe3\x72\x0c\x01\x79\xd2\x11\x20\xc3\x43\x8a\x84\xae\x4d\x03\xc0\x00\x9e\x42\x80\x75\x09\x82\xb6\xd9\x03\x45\x8f\x78\x43\xbc\x4a\x9f\xf0\xee\xf1\xce\xae\xfb\xbf\xfe\x51\x20\x9e\xdc\xfa\xdf\xb3\xe7\x93\x10\xce\xb7\x74\x11\x36\xed\x50\x36\x72\x59\xec\x3a\xc0\x05\x04\x2d\x70\x24\x89\x00\x0d\x11\x05\x20\x74\x5c\x86\xb6\x39\xca\xc6\x98\x43\x82\x43\x33\x0e\x4d\xbb\x14\x60\x29\x8a\x03\x04\xc2\xc8\x76\xd1\xaf\xbf\x5c\x68\xe1\x05\x1e\xd0\x0c\x70\x20\x80\x0e\x4f\x00\x1a\x50\x10\x33\xac\xc3\x42\x96\x61\x01\x83\x01\x2b\x60\x1b\xda\x04\x41\x51\x0c\xcf\x13\x2e\x89\x38\x17\xb0\x00\x3a\xb6\x80\x79\x82\x22\x59\x84\x58\xfa\xe3\x3c\x20\xbf\x3f\xbf\x17\x65\xe9\x3c\x4b\xef\xdb\xf7\x57\x39\x7c\xe3\xf5\x11\x0e\xdf\xdc\xd6\x67\x49\xd9\x3f\x3c\xf8\x87\x07\xff\xf0\xe0\x1f\x1e\xfc\xc3\x83\x7f\x78\xf0\x0f\x0f\xfe\xe1\xc1\xe7\xf1\x60\xf7\xe9\xff\x7b\xdf\xbc\xe1\x24\x07\xcb\x9c\xf7\xa0\x70\x9f\x1c\xec\x7e\x22\x78\x4d\x0e\xb6\xe6\xdd\x58\xa4\x74\xcd\x1c\x54\x12\x8b\x38\x6e\x71\x1d\xa9\x84\xeb\x90\x50\x17\x8c\xde\xec\x2d\x97\xb4\x3b\x8f\x93\xca\xa0\x3b\xeb\x8c\x34\x57\x64\x43\xa5\xdd\xe5\xb5\x55\x3e\x77\xf1\xa4\x25\xd5\xb2\xb9\xa5\x8d\x6a\xae\xd2\x35\x5b\xa2\x3a\xf7\xd5\x2a\x6f\x29\x49\x7b\x2e\x68\xc4\xc6\x64\x9b\xfd\x70\x62\xcb\xf9\x03\x3d\xe5\xe1\x8d\x7b\x96\xf6\x73\xec\x0f\xe6\x6c\x8d\x76\xb8\xd8\x64\x53\xc3\xa5\xfd\x20\x95\x16\x59\x79\x63\x02\x20\xd0\x1c\x04\x92\x42\xcc\xcd\x7e\x87\xce\x5b\xa3\x55\x8f\x58\x4a\x3c\xaa\x0f\xe8\x68\xe3\x75\xed\x85\xd5\xa1\xba\x83\x95\x39\x93\x3d\x69\x31\x1c\xfb\x6d\xa5\x63\xcb\x9c\xdb\x11\x1a\x64\xab\x67\xcd\x19\x6a\x97\xf3\x92\xc4\xa5\xcd\x8f\x16\x51\x2b\x2b\x6b\x0e\x6f\xd5\x84\x70\x80\xb9\x4c\x6f\x83\x3a\xa5\x62\xbe\xc3\x34\xf9\x8d\x47\x0b\x89\x3a\x58\x88\xec\x54\xad\x14\xdb\xae\x26\x85\x3a\xec\x99\x55\xce\xe8\x6f\x22\xa8\x33\xc5\x41\xbf\xef\x2e\x86\x19\x99\x85\x53\xd8\xad\xd4\x53\xa9\xae\xee\xf9\x75\x83\xbc\x1d\xd7\x87\xf2\x5b\xf7\x88\x1d\xe4\xf1\x43\xf2\x6b\x88\xc5\xd5\x84\xb7\x71\xc9\x1a\xd3\x5a\x2e\xaa\xa0\xbc\x19\xad\x16\x53\xd2\x71\xa7\x04\xdf\xea\x43\xa6\xdf\x48\xa6\xdc\x6c\xb9\xd2\xa0\x52\xeb\x2c\x9c\x29\x21\x83\x58\xed\xb5\x55\x4d\x6f\x0c\x99\x16\x6a\x78\xcb\x19\x3f\x08\xda\x7d\xa9\xe4\xb0\x6b\xd0\x28\xf2\xd9\xaa\x2e\xee\xea\x07\xa4\xd9\xba\xcf\xd3\x76\x09\xd7\x7d\x3c\x20\xc9\xb6\xd6\xb2\x40\xdf\x17\xe5\x29\xbd\x76\xe7\x55\xb0\x30\xeb\x5d\xab\xdf\x9e\x38\x1d\x1c\xe5\x47\x7e\x7c\x60\x7d\x58\x7c\xf7\x7a\xe3\x1e\xf5\xc7\xd6\xb7\xaf\x5c\x9f\xbc\x98\xd3\xd5\x1e\xe9\x35\x1a\x0b\xaa\x32\xaf\x13\xfa\x20\x1f\xf9\xdd\x38\x1f\x24\x81\x58\x8e\xa5\x85\x37\xe8\x94\x8a\xd3\x76\x75\xae\x92\x66\xc3\x1b\xb5\x99\x22\xb4\x98\x65\x8f\x6b\x55\xf2\xa5\xe8\x75\xa4\xa8\x0e\x3b\xed\x49\xcd\x61\x5b\x03\xde\x6b\x49\xad\xda\x92\xe8\xd4\xdb\xd8\x68\xd9\xa3\x80\x31\xf6\x8d\x7e\xa0\xb4\xfd\x6c\x7d\xf2\x95\x9c\xde\x7e\x69\xfb\x83\x39\xdd\xeb\xd6\xfb\x77\xf0\x72\x06\xd7\xa7\x6b\xe8\xee\x23\xbd\x62\x55\x57\x15\x46\xf3\x6a\x41\xea\x57\x2d\x3e\x97\xec\xf9\xba\xdb\x2e\x36\x1b\x61\x03\xd7\xb5\xa4\x4e\x6a\xab\x46\xdb\x18\xad\x05\x6b\x19\x97\x22\x05\xea\xe4\x00\x22\xa5\xcc\xb7\xe8\x92\x8e\x28\xa4\xd2\xf5\x55\x62\x1a\xcc\xc6\x1c\x0d\xe2\xb8\xd2\xa7\xc0\xca\x34\x87\x7b\x21\xf9\x60\xce\x52\x3d\x2b\x52\x7d\xaa\x7f\x4f\xe5\x4f\x65\xa3\x5a\x6a\x39\xe1\xb0\x65\x39\xa3\x45\x3a\x98\xf7\x34\x29\xb5\xd1\x90\x08\xca\x81\x8b\xa4\x6a\xbd\xe2\xf5\xc3\xd9\x52\xa9\x8e\xf7\xf5\x37\xd7\x6d\xe0\xd8\xc1\xb7\xd8\xf8\xd4\x1c\x48\x8f\xf4\x56\x64\x65\xc9\x45\xcb\x89\xb7\x58\x97\xea\x23\xba\x01\x16\x6b\xec\x0d\xe9\x9a\xd9\x27\xf3\xa0\x21\x36\xc5\x6e\xaf\x9f\x94\x03\x32\x6c\xd0\x79\xa3\x62\x47\x50\x17\xd2\xf9\x4c\x15\x06\x54\xea\x75\x7c\x76\x38\xa6\x4b\x15\x98\x4d\x87\x7e\x54\xe9\xe5\x5d\x20\x74\xb8\xf5\x78\xdd\xd0\x77\xad\x7c\x60\xbd\xfa\x4c\x5f\xcb\x6f\xe9\xeb\xc7\xd6\xdb\x45\xa9\x63\x59\x5d\xf1\x9a\xf1\x7c\xa6\xaf\x95\x47\x7a\x0d\x95\x99\x81\x69\x59\x5c\xb0\x70\xb1\xec\x52\x4e\x49\xeb\xa5\x43\x04\x74\x11\x82\x79\xd7\xf4\xfd\x16\x53\x5f\xd7\x6b\xf6\x22\x48\x64\x6f\x89\xa6\x6a\x99\x72\xec\x31\x8a\x23\x21\x45\xcb\xf2\x48\x73\xf3\x45\xcf\x2a\xfa\xd3\x25\xaf\x05\xfc\xc8\x85\x1c\x3d\x1d\xb6\xa7\xc6\x50\xfc\xb2\x1c\xf4\x31\x6e\x62\x3f\x29\x6e\x52\x76\x7f\xaf\x8a\x9b\xc6\x37\x56\x18\x5f\x17\x37\xe5\xcb\x41\xb7\x92\xae\xcd\x16\x54\xa3\x4c\x46\xf9\x62\xa1\xcb\x5d\x7a\xcd\xea\x16\xd0\x2d\xbc\xd4\x1d\x61\xce\xf6\x70\xa6\x81\xe2\x10\x8f\x60\x6d\x5a\x54\x26\x93\xf9\x2a\xde\x34\x2a\x4e\x9c\xf6\xd2\x41\x71\xea\x58\xb8\x23\x9a\x1b\x5d\x53\x93\xde\x62\x30\xc3\x9a\x2a\x3f\xfa\xf1\xf2\xf0\xc6\x9d\xb3\xd7\xc5\x4d\x0d\x9d\x24\xc6\x3a\x9a\xc9\xc1\x78\xa1\x7b\x74\xa3\xbe\xf0\x73\x22\xe5\xb3\x2e\x89\xd7\xe4\x20\x5e\xd3\x2e\xc4\x26\x1d\x8f\x8b\xa5\x40\x0b\xfa\xfe\xa8\x3d\x1e\x32\x1c\x82\xfc\x46\xd2\x20\x30\x81\x55\x9a\x6b\xd6\x60\x6a\x28\x8d\xf5\x6a\x15\x57\xb8\x19\x3b\x18\xaf\x3c\x66\x47\x4e\x4c\x0e\x31\x37\x66\x17\x41\x85\x98\xd8\x6d\x89\x54\x4d\x6f\xe9\x5b\x7d\xda\x2d\xc5\x6c\x2a\xc4\xc1\x7a\xd2\xe7\xea\x1c\xd3\x5b\x68\x0e\xdd\x00\x9b\x30\xe6\xcc\xc6\x28\x6c\x4a\xce\x6c\xd0\xab\x1a\x8d\x11\x6f\x08\x93\xe5\x48\xc4\x73\x52\xe8\x77\x6b\x9e\x34\xf3\x72\xf2\xca\xba\x91\xa3\xbc\x9d\xc4\x4d\xb7\xee\x54\x3e\xc8\xe3\xc7\xe2\x26\x2d\xc8\x5a\xa8\x48\x75\x09\x43\x24\x4a\xb3\x71\x51\xb1\xeb\x98\x5b\x45\x7e\x3f\xcd\x19\xd6\x70\xca\x36\x9f\x45\x41\x18\x92\xc3\xee\xb2\xc8\xcc\x3a\xe6\x0a\xe8\xe2\xb2\xdb\x49\x46\xf5\x79\x15\xb6\x78\x29\xe8\xc1\x54\x14\x1a\x33\x21\x51\xb2\xf2\x46\xf4\x2d\xad\x6e\x79\xfc\x9e\x29\xf5\xca\x7a\x52\xb7\x85\xac\xd4\x9f\x74\x85\x06\xac\xd1\xc2\x60\xb6\xea\x01\xb5\x39\xb3\x1b\x15\xce\xdb\x0c\x97\x76\x88\xdc\x68\x22\xec\x3a\xb1\x2f\x49\xfc\xa0\x9f\x39\xb1\xc3\xcf\x16\x30\x8f\x1f\x1b\xb7\xda\xd5\xdd\x25\x4f\x99\xfd\xc1\x32\xfb\xb5\x63\x55\xbc\x74\x9d\xd4\xf5\xf9\x03\x6d\x53\xe5\xe2\x09\x4d\x95\x46\x4e\xd6\xaa\xcd\xaa\x63\xbe\xd8\x57\xbb\x4d\x3d\x99\xcf\x6b\x24\xd5\x9e\x94\xf0\x34\x0d\x29\xc2\x92\x64\x30\x25\x56\xfd\x89\x40\x95\xf1\xc6\xe9\xc6\x9e\xc8\x02\x6a\x60\xd0\x63\x73\xd0\x1f\x9a\x51\x1b\xb4\xd5\xe5\x74\x3d\x90\x87\x9b\xf2\xad\x75\x7d\xf9\x33\x79\x79\xec\xde\x89\x68\x5e\x1f\xf7\x88\x04\x3a\xc4\x8e\x3b\x1f\x5e\x3e\x6f\x6f\x8f\xff\x71\xfc\x8c\x2a\x9a\xb6\xc6\x4d\xbd\xd4\x91\x73\x79\x50\x95\x29\x35\x76\x9b\x52\x6f\xce\xf6\x29\xd3\x9b\x75\xd8\x78\x31\x35\x7b\x42\x98\x98\x35\x97\x0b\x2a\x5e\x2d\x8c\xda\xc9\x1c\x6b\x42\xbb\x9a\xfb\x86\x1f\xb4\xfa\x58\xf6\xba\x24\x5a\x35\xed\xb2\x3b\x2e\x5a\x29\x99\x92\x49\xd5\xbb\x26\x4e\x3e\x8d\x23\x5f\xa9\x0d\xd8\xf5\xe7\xba\x3a\xb7\x1d\xfc\x75\x5b\x38\xf7\x03\x44\x0b\x87\x62\xe5\x9d\x8f\x6f\xbe\x25\x7f\xe5\x7c\x44\xfb\xe3\x4d\x56\x9a\xbb\x22\xbd\x90\x84\x15\x9f\xba\xa5\x91\x37\x29\x37\x2b\x0d\x8b\xe1\xc5\x4d\x83\xed\x48\x1b\x46\x1d\x8e\x2d\xd6\xd2\x97\x83\x32\x08\xd7\x9b\x28\xb1\xb3\xda\xb8\xc3\x8f\xc4\xb2\x1e\x37\x8d\xc9\xc2\x4f\xba\xa3\xcd\x50\xc4\x0d\xb9\xaa\x37\x6a\xd2\xd1\xcf\x7f\x49\xdd\xdc\xc1\xe7\x73\x9f\xe4\xf3\xd5\xc3\x98\x5e\x93\x2b\xb9\x71\x6b\xcb\x75\x3e\x9f\x4c\x5c\x4a\x51\xbb\x44\x0f\xc9\xd5\x31\xbb\x08\xb5\xb2\xc3\xaa\x1d\x45\xd5\x66\xa3\x79\xb9\x57\xe9\x62\xd3\xc6\x1d\x6d\xb4\x62\xb9\x60\xec\x56\x8d\x9e\xd8\x19\x72\x78\x15\xd7\x15\x3f\x64\xd6\x95\xc4\x86\x0b\xda\x5a\x6f\x9a\xd9\x90\x52\x9b\x6d\xcf\xee\xe6\x13\x72\x72\x9a\x2b\xb9\xf1\xec\x81\xeb\x7c\x7e\xd3\xd6\x84\x71\x67\x9a\x56\x68\x71\x5d\xc5\x70\xbe\xca\x43\xdc\x99\x76\x3a\x63\xb6\x0f\x8d\xe9\xaa\x12\xce\x54\x7a\xda\xa1\x10\xc6\xf3\x68\xb0\xae\x54\xf2\x4a\xbf\x15\x26\x70\x3d\x1e\xce\x8b\x8d\xb1\xae\x52\x8b\x94\x44\xc9\xc6\x0e\x9a\xe3\x49\x8f\x51\x89\xa4\x2a\x0c\x0f\xb9\x92\xb1\x3f\x2e\x35\x34\xb9\x16\x32\xc0\x69\x68\xf9\xa6\x32\xf4\xa2\xa1\x6b\x72\xed\x85\x98\x97\x24\xd4\x0f\x65\xac\x97\xd7\x13\xd5\xd1\x3c\xb9\xa4\xcf\x42\x3a\xc0\xb5\xb6\x1b\xea\x25\x28\x50\x66\x24\x75\x15\x63\x89\xb9\x64\xc5\x2f\xd9\xa6\x38\x92\x7c\x26\x63\x38\xba\x7c\x7d\xae\x44\x3d\xca\xeb\xde\xe7\xdf\x7a\xd6\xc3\x41\x1e\x3f\xe6\xf3\xe5\xd4\xa6\x52\xbf\x0f\xd7\x54\x07\x9a\xc3\xc0\x6a\x00\x7e\x14\xdb\xb9\xb0\x2c\xb9\x30\xd5\x23\xaa\x57\x9d\x53\xcb\xea\x00\xcf\x75\xc9\xed\x98\xd5\x61\xd1\xe9\x45\xc0\x1e\x72\xf5\x55\x6a\xfa\x64\xec\xb0\xed\x95\x48\x6f\xdc\xcd\xac\x4b\xd9\x14\x59\x92\xa4\xf5\x52\xc8\xf7\x73\x17\x3a\x9b\x85\x69\x75\x29\x0e\x8a\xe4\x5a\x36\x75\x43\x5a\xb5\x9a\x3e\x22\x5a\xd2\xc4\x75\x84\xca\x54\x63\xa6\x53\xb3\x99\xca\x83\x55\xb4\x9b\x3b\xec\x7d\xce\xf5\xb5\xd9\xf2\x19\x27\x4e\x7c\xfe\xee\xf8\xb5\x5b\x72\x1f\x5a\x2d\x3f\xd4\x5f\xca\xa9\xb7\xcc\xe5\xac\xd5\x17\x0d\x01\x74\xc8\x4e\x2f\x35\x9d\xbc\x29\x6b\x73\xb9\x54\x36\xf1\x7c\xe3\x18\xed\xc1\x2c\x0a\x91\xaf\x5b\x3b\xf8\x1b\xe6\xbe\x62\xaf\x92\x9e\xda\x87\x13\xd1\x39\xd6\x86\x6b\x5c\x17\xac\x4a\x86\x5e\x9e\x7b\x69\x89\x1a\x8f\x9a\xa3\xf1\xa0\x32\x57\xe6\x70\x8d\x4c\xae\x3f\xc1\x5d\x7a\xa3\x37\x48\x75\x82\xdd\xa6\x0a\xec\x54\x66\x6c\xbd\x9c\x0d\xd7\xfd\xda\x6a\x89\xec\xa6\x0b\x38\x7e\xd4\xde\xd4\x06\xed\x96\x53\xe9\x81\x61\x39\x1f\xb3\x73\xd1\xdb\xfb\xbc\x78\xd1\xe4\x74\xdc\x82\xde\x64\xd5\x80\x66\x5b\xe0\xa4\x8d\x9b\x08\x98\x40\x51\xdc\x1c\x0d\x36\x52\xbf\x36\x55\xa2\x3a\x98\x2e\xa7\xf9\xbb\x73\x2f\xf2\xce\x47\xdc\x12\x53\x3c\x9c\x99\xf7\xa5\x73\x69\x31\xa8\x41\xf1\x5c\xbe\x1e\x73\x43\x15\xb9\x5a\x97\x5b\x0e\x28\xc9\xc3\x88\x5b\xb0\x60\x25\xac\xb8\xf1\x40\x5e\x39\xd1\xa0\xda\xe9\x55\x65\x4b\x5a\x97\x87\x4b\xb6\x96\x91\x4d\x8d\xd2\x6b\xe3\x4c\x95\xb2\x71\x4c\x27\x4d\x6e\x6a\xb6\x37\xcd\xa6\xb5\xe9\xce\x7b\xd2\x9c\x4a\x38\x6b\xa9\x6d\x48\xb2\x35\x44\xde\x30\xa9\xdc\xa8\x0f\xaf\xc4\xc0\xb7\xeb\xc3\x92\xeb\x7e\xb9\x3e\x9c\xe5\x82\xf6\x17\x7c\x94\x3f\x51\xac\x16\xe7\x9c\x8f\x5a\xfa\xac\x3b\x5a\x9b\x5d\xb3\x26\x16\x13\x42\x1e\xeb\xc5\x46\x4f\xae\x8b\x34\xee\x2f\x31\x4b\xa8\x6d\xcd\xcd\x23\x1b\x62\x8c\xb8\x41\x39\x40\x04\x9e\x76\x53\x82\x0c\xf3\x2e\xd3\xd4\x03\x77\xed\xf6\xd4\x51\xc8\x3b\x9d\xce\xa8\xec\x19\x26\x2d\xde\x31\xc6\x79\x61\xc7\xe4\x0b\x2f\x8b\xf8\xe8\x8e\xc9\x93\x17\x46\xdc\xe3\x28\x63\xf2\x7b\xe1\x96\xb3\x9a\xbf\x99\x5d\xf9\xdb\xf7\x82\x40\x51\x34\x0d\x28\x82\xe6\x78\x96\x01\x80\xe5\x09\xf0\xbd\x00\x0e\xe7\x8f\x9c\x9d\x92\x4c\x3e\x3f\x43\xe5\x62\x9f\x6e\x3b\x47\xe5\x93\xfa\x44\x52\xd4\x63\xa7\xc8\x63\xff\x88\xaf\xeb\xd6\xd5\x07\x7f\x7f\xab\x98\x9d\x17\x87\x8a\x7c\x3c\x07\x88\xf8\x5e\xa0\x8e\xdf\xde\xdb\xab\xeb\x4f\x4c\xff\xc4\x5e\x91\x14\xcf\x33\x2f\x4b\xe0\x4b\x7a\xfa\xd2\x1b\x2f\x3e\xac\xa9\xa7\x6f\xbd\x38\x9e\x42\xcd\x71\x1c\xcb\x41\x0e\x43\x07\x53\x04\x4f\x21\x16\xf0\x04\x0b\x48\x8a\x03\x8e\x80\x19\x86\xa5\x31\xc6\x2c\xe0\x49\x04\x05\xc8\xf2\x10\xd2\xa4\x80\x6d\xc2\xc5\x3c\xcd\xb0\xac\xeb\x1c\x0e\xd5\x7d\x32\x0f\x6a\x9c\xd9\x7d\xc9\x2f\x49\x84\x4e\xd4\xd4\x75\x3a\xce\x9b\xe4\x6c\x48\xc0\xf5\x3c\x22\x85\xa6\xb6\x5a\xea\xe5\x75\x8b\x4d\xa5\x0a\x2a\x5b\xcb\x5c\x11\x72\xda\x4b\xe3\x56\xf8\xec\x0c\xba\x4b\xd7\x8b\x87\x33\xef\xec\x66\xf9\x96\xf6\x87\xa5\x22\x3a\xc3\xf7\xce\xf6\x9f\x9f\x8e\x70\x99\xef\x36\xe9\xf2\x14\x4f\xf3\x0c\x62\x39\xcc\xd8\x04\xc5\xb8\x0c\x05\x58\x97\x62\x38\x96\x27\xa0\x0d\x6d\xd7\x25\x21\xa6\x6d\xc1\x86\x1c\x69\x13\x34\x2f\x00\x9a\x05\x98\x05\x18\xdb\x88\x22\xf6\x7c\xa7\x9e\xf3\xfd\x27\xf5\xfb\x5e\x7c\xe7\x4f\x36\x1c\x9d\xc7\x73\xf7\xe0\x3b\x65\x53\x98\xa7\x1c\x1b\xda\x5b\x9e\xdb\x14\x80\x04\xa2\x49\x86\x40\x10\x90\x0e\x0f\x91\x60\x23\x40\xf2\x34\xe9\x0a\x2e\x0b\x69\xdb\xe1\x04\x8c\x20\xed\xf0\xbc\x6b\x13\x18\xb1\x68\xcf\x77\xfa\xee\x7c\xbf\xb6\xdf\xf7\xe2\x3b\x38\x49\x38\x9d\xef\xcb\xb8\x07\xdf\x31\x09\x6c\x88\x59\x96\x72\x20\x41\x12\x2c\x74\x68\xca\x25\x6c\x01\x42\xc7\x75\x08\x97\xa3\x68\xec\xd2\x00\xdb\x0c\xcf\xd9\x24\x61\x13\x0c\x83\x48\x81\xa2\x79\x9a\x23\x30\xc3\xb2\x0e\xd8\xf3\x9d\xb9\x3b\xdf\xaf\xed\xf7\xbd\xf8\xce\xe6\x6f\x36\x74\x13\xdf\x91\xeb\xf0\x3c\xc9\x61\x87\xe5\x01\x62\xb1\xc3\xf3\x0e\x86\x04\xb6\x09\x82\x27\x79\x04\x5d\x9a\x47\x04\x60\x09\x0c\x00\x43\x60\x82\x75\x30\x8d\x28\x40\x02\x0e\x0b\x10\x63\x1e\x1f\x0e\x4d\x67\xef\xce\xf7\x6b\xfb\x7d\x2f\xbe\x33\x27\x83\x7d\xf1\x88\xde\x1b\xf9\xce\x93\x24\x29\x50\x88\xe6\x39\x86\x76\x5c\x40\x23\x82\x64\x21\x0b\x49\x07\x00\x9b\x27\x28\xc7\x75\x09\xd6\x61\x5c\x96\x70\x11\xe7\x90\x04\x4f\x72\x10\x02\x16\x11\x90\x23\x09\xc1\x15\x1e\x8e\xfe\x7e\x9d\xef\xd7\xef\x8f\x3a\xcf\xdf\xbc\x9f\xef\xf2\x6d\xed\x07\xe8\xd8\xfe\xa7\xf8\x55\x87\xe1\x39\xc7\x76\x1c\x82\x72\x18\x8e\xe0\x49\xc0\x01\x12\x31\x90\x85\x00\x0b\x0e\x87\x79\x8e\x45\x90\x12\x90\xcd\x90\x98\xa3\x1c\x00\xa1\x0b\x08\x48\xb9\x18\xb3\x36\xcd\x39\x78\xcf\xf7\xd7\xfd\xea\x0d\xfb\xf2\xee\xc2\xf7\x6b\xda\xff\x74\xbe\xb3\x08\x3a\x08\x13\xac\x8b\x08\xe8\xb0\x0e\xe7\x62\x82\x10\x6c\xde\x26\x1c\x6c\x23\xc2\xa1\x1d\xd7\xa1\x29\x0c\x19\xca\xe6\xb1\x0d\x69\x2c\x60\xc8\xf1\x88\x02\x0c\x87\x19\x92\x70\xf7\x7c\x7f\xdd\xaf\xde\xb0\x1f\xf6\x2e\x7c\xbf\xa6\xfd\xcf\xe6\x3b\x4d\x61\xc6\x86\xa4\x4b\x91\xbc\xcd\x41\x48\x31\x14\x61\x33\x02\x60\x58\x0e\x92\xf6\xee\x4c\x4d\x4c\xf3\x34\xc0\x1c\x67\xd3\x8c\xc0\x12\x34\xe1\x30\xd8\x71\x38\xc2\x21\x5c\x82\x05\xc4\x9e\xef\x17\xfc\xea\x4f\xd2\xf3\x4b\x7c\xbf\xa6\xfd\x29\x73\x6c\xff\x33\xe2\x48\x81\x71\x81\x60\x03\xca\x85\x8e\x83\x21\xcb\x0b\x82\x40\x09\x50\xb0\x09\x12\x20\x12\xb1\x24\x03\x58\xd2\x75\x21\x01\x81\xe3\x0a\xae\x63\x43\x80\x30\x4f\x22\x96\x70\x01\x0b\xf9\x5d\xfc\xce\xbc\x65\xdf\x6f\xa8\x95\xbb\x5e\xde\x2b\xb7\xb5\x7f\x3a\xee\x1f\x69\xff\xdd\xf2\xee\x12\xbc\xed\x42\x12\x23\xdb\x15\x68\x9b\xc4\xb4\xc0\x42\xc6\x16\x48\x48\xf1\x14\x66\x19\x1a\xb9\x36\xe9\x38\x00\x91\x2e\xc3\x61\x20\x50\xc4\x16\x8e\xa2\x79\xcc\x53\x8c\x60\xa3\x3d\xdf\xdf\xb0\xef\xd7\xd7\x3c\xdd\x87\xef\x57\xb4\xff\xd9\x7c\x07\x02\x41\xd9\x04\xe3\xb8\x84\x6b\xd3\xb4\x0b\x21\x2b\xd8\x3c\xed\x0a\x24\x6f\xbb\x24\x04\x02\x0d\x49\x8e\xb2\x1d\xca\x41\xb4\xc3\x30\xb6\x20\xd0\x02\x06\x2c\x40\x2e\x40\xf6\xd6\x05\xef\xf9\x7e\xc1\xbe\xff\x24\x79\xbb\xc8\xf7\x2b\xda\x3f\xb5\x33\x9f\xc1\x77\x24\xd0\x2e\x4f\x70\x1c\x70\x69\xe0\xec\x42\x15\x01\x23\xc2\xc1\x02\xe7\xba\x34\xcd\x93\x1c\xc3\x40\xc4\x53\x3c\x64\x10\x86\x24\xb2\x39\x84\x6d\xd2\x46\x18\x71\xee\x36\xc4\xe4\xf7\x7c\xbf\x60\xdf\x7f\x92\xbc\x5d\xe4\xfb\x15\xed\x7f\x36\xdf\x79\x9a\x73\x18\xce\x71\x68\x48\x71\x0c\xcb\x12\x88\xb2\x1d\x86\x20\x00\x22\x09\x02\x12\x48\x60\x6d\x96\x46\x18\xf0\x1c\x47\x00\x0c\x19\x4e\x20\x11\x09\x09\x9e\x65\x29\x97\xc7\x24\x84\x87\xd7\x46\xdc\xdd\xaf\x3e\xf3\x6b\xe7\xb5\x9f\x6f\xf1\x5d\xb9\xad\xfd\x89\x79\x5d\xfb\xef\xe5\x3b\x09\x20\x70\x18\x0e\x39\x34\x2f\x50\x34\xc1\xdb\x18\xbb\xd0\x26\x11\x45\xb2\x80\x22\x29\x64\x33\x94\x40\x60\x8a\xe1\x28\x48\x73\x0c\xef\x12\x04\x63\x33\x02\xa2\x21\xa0\x01\x07\xb9\x03\xdf\x2f\xd8\xf7\x9f\xd4\xef\x7b\xf1\x7d\x9c\x7f\x2e\xdf\x5d\x1b\x73\x0e\x70\x19\x01\x51\x14\x64\x09\xc2\x76\x69\x4c\x11\x04\x20\x01\x23\x20\x48\x52\x34\x20\x48\x0a\x3a\xc8\x66\x19\xd7\x41\x88\x65\x30\x82\x5b\x7b\x84\x5c\xc8\x63\x9e\x15\xf6\x7c\xbf\x60\xdf\x7f\x52\xbf\xef\xc5\xf7\xc3\x21\x1f\x9f\xc6\x77\x8e\xc1\x90\x73\xd0\x36\x50\x47\xbc\x43\x52\x14\x45\x02\xc4\x40\x44\x20\x1a\x0a\x14\x60\x5c\x8e\x25\x18\x1a\x09\x14\x24\x1c\x9b\x73\x30\xcf\xb0\x80\xc2\xbc\xc3\x08\xc8\x21\x58\x8a\x3b\xbc\xec\xe3\x55\x3b\x73\x97\xf9\xe2\xf9\x9e\x90\xb7\xf8\xae\xde\xd6\xfe\xa9\x9d\xfb\x48\xfb\xef\x8e\xdf\x01\xe6\x68\x8e\x82\x3c\x69\x23\xce\x86\xd0\xe1\x20\x45\xb9\xd8\xc6\x18\x52\x98\xe6\xb0\xbb\x8d\x19\x6d\x04\x00\xcf\x3a\x34\xe5\xb8\x90\xa0\xed\xad\x59\xe2\x11\x8f\x29\xc0\xd8\x7b\xbe\x5f\xb0\x33\x3f\xa9\xdf\xf7\xe2\xfb\xa9\x9d\xfb\x0c\xbe\x13\x90\xb4\x6d\xc6\x45\x3c\xa6\x05\x88\x04\x01\xd0\x04\x42\x34\x47\x53\x1c\x22\x28\x5e\xe0\x28\x52\x60\x39\xc8\xb9\x82\x43\x61\x81\xc2\x58\xa0\x80\x03\x38\x0a\x72\x80\xe0\x19\x02\xa1\x3d\xdf\x2f\xd8\x99\x9f\xd4\xef\x7b\xf1\xfd\xd4\xce\x7d\x9c\xef\x2f\xae\x1d\xde\x67\xe1\xf0\xb3\x57\x0d\x55\x9d\xd7\x8c\xa5\x31\xb5\xeb\x94\x26\xd2\x7d\x6b\xd2\x89\xeb\xc1\x64\x40\x10\xae\xca\x27\x7a\x15\x04\x44\xa5\x93\xd7\x1e\x4f\x33\x3a\x59\x1d\x7c\x65\xa7\xda\xfe\xfa\x68\x85\xeb\xf9\xce\x9f\x7d\xd6\x78\x3b\x28\x92\x23\x0f\x30\x45\x9b\x0c\x16\xca\x54\x77\x35\x97\xf4\x51\x67\x45\xc6\xb5\x4d\x57\xd1\x6b\xc4\x4a\x96\x89\xac\xae\xce\x83\x85\xed\xce\xe4\x5e\x71\x30\xa8\x2e\x38\xd3\x97\xa5\x4d\xa0\xac\xd2\xa2\xda\x2a\x0e\x16\x4a\x64\xb4\x9d\x78\xd0\x03\x73\x32\xd2\x47\x55\x6d\xd1\x6b\xb4\xbf\x7d\x2f\x7c\x1b\x05\xa3\xfe\x3c\x8b\x71\xb5\xaa\x2b\x58\xb4\x56\xb5\x90\xc2\x1d\xa5\x9d\xb1\x03\x0f\x2d\x60\xbf\x1e\xad\xc2\x58\x2e\x7a\x84\xe5\x1e\x5f\xe8\xf9\x0a\x0f\x8e\x92\xf1\xe4\xcc\xed\x13\xd8\x9d\xac\xdd\x23\x8b\x7f\x6d\x16\x5d\xbc\x43\x16\xbf\x19\xaf\x7b\xc6\x0d\xed\x8b\xe2\xcf\xcb\xb6\x5c\xb2\x96\x9f\xbd\x44\x7c\xbd\x92\xbd\xb2\xbd\x6c\x7f\xdd\xba\xbd\xee\x54\xc9\xdc\x50\xe9\x6e\x46\x78\x4c\x82\x4d\xca\x53\x51\xc3\x59\x60\xa6\x58\x5a\x85\x49\x69\xaa\x49\x8e\x3a\x70\x05\xa9\x2a\x75\x86\xae\x35\xa2\x65\xa3\x47\xc3\x3c\x1f\x85\xac\xbe\xf0\xaa\xaa\x50\x5b\xcc\x96\x8d\xa0\x43\xf6\xea\x70\x85\xa2\x9a\x21\x67\x0b\x15\x95\xbb\xa8\xb1\x55\x84\xc4\xf5\xa2\x56\x65\xd5\x67\x75\x71\x2a\x94\x1d\xb7\xa6\xf6\xc5\x05\xb7\x2c\xa9\x59\x8b\xcd\x02\x35\x6f\xd5\x10\xe9\xce\xdc\x6c\x79\xd2\xdb\x9f\xaf\x64\xb7\x0a\xf9\xad\x4a\xd6\xc8\xf5\x20\xba\xa7\x92\x7d\x61\x6a\xed\x5d\x4a\x76\xe7\x7a\x80\xeb\x95\xec\x95\x3a\xe2\xfd\xf5\xc1\x3d\xd7\xaf\x28\x99\x5c\xcb\x66\x64\xaa\xab\xba\xc2\x58\xab\x3c\x25\x1c\xb9\x6c\x55\x5c\x2e\xb5\xd9\x19\x63\xaf\x1b\xb1\xea\x95\xe7\xc5\x99\x35\x6a\x04\x2b\x94\xb2\x8c\xdf\x74\xa9\x60\x95\x4e\x56\x5c\xc3\x61\x47\x35\xa6\xc2\xc8\x33\x94\xb8\x0c\x57\x11\xc7\x92\xda\x35\xdb\x49\xc8\xbb\x43\x79\xab\x08\xf5\x35\x57\xa6\xb8\x2c\xaf\xe9\x35\x4e\x96\x15\x71\x1d\x6a\xc3\xb8\xb6\xd4\x54\x59\x63\x05\xd8\x12\x20\x5e\x4f\xa8\x6a\x31\x07\xca\x71\xf9\xf7\xe7\x2b\xd9\xad\x42\x7e\xab\x92\xe9\xc4\x94\x13\xef\xa8\x64\x5f\xb9\x2e\xfc\x2e\x25\xbb\x73\xf1\xc7\xf5\x4a\xf6\x4a\x71\xf2\xfe\xfa\x60\x71\xf7\x6b\x4a\x36\x1c\xba\xeb\x56\x1c\xb4\xc7\xee\xd4\xe7\x66\x71\xbb\x18\x95\xdd\x2c\xac\x05\x9d\x8c\x0a\xdc\xd5\x8c\x21\x22\x76\xd1\xef\x80\x35\x39\x0c\xda\xfc\x3c\x52\x8b\xdc\x60\xe1\x31\xe4\xa4\x5e\xee\x37\x73\x2b\x6a\x03\x12\x94\xad\x4a\x1b\x03\xa2\x4a\xa4\xb8\xbf\xec\xe1\xc9\xb4\xb6\x55\x04\x66\x90\x65\x56\x97\xf2\x2a\x4a\xdc\xd3\xf3\x0c\xc6\x2e\xd1\x1e\x4e\xc0\xc6\x8d\xbb\x6a\x62\x24\x95\x9e\x34\x9d\x78\x94\x3c\xb5\x9c\x63\x2d\xd7\xcf\x57\xb2\x5b\x85\xfc\x56\x25\xab\x33\xc5\xe6\xf0\x8e\x4a\xf6\x95\x8b\xd1\xef\x52\xb2\x3b\x57\xfa\x5c\xaf\x64\xca\x19\xf5\xcf\x04\xee\x63\x3b\x3c\x5e\x55\xb2\x66\x60\x8c\x1d\x03\xaf\x1b\xa8\xaf\xf4\xdb\xbc\x63\x75\xa6\x72\xaa\x74\x99\x54\xab\x96\xd7\x75\x07\xb6\xa7\x1d\xae\x58\xd2\x41\x31\x6d\x6c\xea\xfd\xc8\x9c\xd8\xb2\x38\x48\x50\x67\x58\x2d\x4e\x38\xc5\x29\x59\x8a\x09\xe5\x8d\x33\x4c\xfa\x65\x31\xd1\x68\xa2\x9f\x4d\xaa\xfa\x56\x11\x36\x82\x2f\xd9\x69\x5f\x5b\x61\xaa\x4a\x2f\xe4\x58\xac\xaa\x8d\x78\x33\x11\x1d\x83\xa5\x8d\x96\xe4\x30\x7c\x6d\xe5\x64\x41\xc6\xb5\x8e\x85\x7b\x3f\x5f\xc9\x6e\x15\xf2\x5b\x95\xac\x26\x0c\xbc\xfc\x9e\xe1\xe2\x17\x56\xda\xbc\x4b\xc9\xee\x5c\xd6\x75\xfd\x56\xf3\x2f\x7c\x25\xc8\xc9\x56\xe9\xf6\xcc\x03\xaa\x3e\xae\x79\xc4\x0a\x4c\xe6\x62\x4e\x56\x72\x4e\xa3\x06\x0a\xb7\xee\x54\x03\xb7\x56\xcd\x15\x77\x45\x08\xcd\xcc\x67\xed\xb4\x55\x13\x15\x9c\x2b\x62\xd3\x6d\x41\x51\xe2\x15\x63\x34\x63\xe7\xb4\xc8\x7a\xf4\x94\x93\x35\xd9\xf7\x5c\xb3\x69\xca\x24\xe7\xa1\x9d\x52\x78\x1d\xb5\x9b\x33\x23\x99\xa6\xdb\xa2\x02\xfb\x63\x92\xce\x44\xdd\xe5\x15\xb2\x6d\xb6\x57\x69\xa5\xaa\x2c\xe6\x64\x5e\x1f\x1a\x21\x7b\x3c\x5e\xe4\xb2\xc2\x0d\x9f\x0e\xea\xdb\x0a\xf7\x93\x4a\x4d\xc4\xcf\x2a\x75\xf9\x40\xea\x52\x3c\xb6\x6f\x5c\xd3\xfe\x2d\x5b\xea\x76\xed\x9d\xbd\x8e\xe7\x34\x40\x7b\x5f\x92\xe4\xbe\xf5\x7e\xff\xbc\xa3\x67\x22\x89\xc3\x9d\xd2\x2c\xcd\x71\xba\x74\xc4\xb4\xe3\x79\x83\xd1\x6a\x55\xef\x87\x9c\x63\x47\x23\x14\xcd\xd7\x84\x16\xe8\x83\xc5\x12\xd5\xf5\x22\x6b\x37\xd3\xb7\x3c\xe0\x2d\x0a\xf9\x95\xb5\x8e\xe2\x67\xd5\x5a\xde\xa0\x90\x5f\x78\x5e\xd8\x7e\x28\x6e\x56\xc8\xfb\x16\x82\x7e\x70\xa3\xe7\x47\x14\xf2\xaa\x8d\xb9\x83\x4b\x0a\x59\x79\x6c\xdf\x10\x9b\x46\x77\xb3\x9c\x57\xa4\xb2\x38\x58\x26\x64\xdb\x0b\xb4\x92\xa9\xd8\xae\x38\x4c\xe9\x96\xe8\x79\xc3\xb6\xd5\x73\x88\xf9\xc4\x42\x35\x68\xd1\x33\xb9\x52\x9a\xb4\x46\x8d\x50\x57\x46\x8d\xa9\x52\x19\x7b\x8c\x33\xcb\x2c\x63\x85\xe5\x91\x98\xf7\xea\x66\x1b\x86\x4e\x92\x7b\x3b\xa5\x49\x51\x8c\x18\xa5\x24\xc6\x16\xb9\x64\xc4\x80\x49\x9a\x1c\x2f\xf7\x68\xa1\x91\xcd\xcb\x59\x2b\x9b\x70\x78\x1e\xe5\x61\x27\x33\x8c\xe3\x4b\xac\xee\xaf\x90\x5f\x59\x04\x2b\x7e\x56\x11\xee\x0d\x0a\xf9\xd1\xf6\x6f\x38\xa8\x65\x3f\x14\xb7\x2a\xe4\x9d\x2b\x84\xaf\x0f\x59\x5f\x59\x46\xb8\xb7\x42\x9e\x84\xac\x8d\x5a\xb0\x2c\xce\xc6\x0a\x6b\x8c\x66\x9e\x53\x5d\x48\x03\xb9\xeb\x8c\x2b\xa9\x17\xc3\x5e\xdc\xe9\xe6\x56\xcc\x3a\x25\x29\x6e\x96\x29\x1e\xbb\x5a\x23\x5a\x0d\x9b\xb0\x48\x65\x42\xda\xad\x38\x2b\xb3\x38\x56\x07\x5c\xad\xd3\x07\xb9\xa8\xd0\x76\x6b\x2e\xf2\xf1\x6a\xb5\x12\x2b\x3b\xa5\x69\x64\x5d\x4e\x5b\xab\x3a\x5c\xd4\xca\x65\x62\x46\x58\x0b\x35\x9f\xd6\xe4\x89\x47\xb3\x41\x42\xcc\x8c\x46\xb3\x97\x92\x1e\xd1\x96\x06\x47\xca\xfe\xa6\x21\xeb\x47\x57\xbb\x7f\x62\x15\x9b\x28\xde\x18\xb2\xfe\x6c\x85\xbc\x73\xe9\xf8\x3f\xc7\xbc\x5e\x73\xcc\xeb\x4e\xc9\x66\x6d\x6c\xa3\x65\xec\x70\x33\x7f\x34\x9d\x2f\x6c\x71\x90\x23\xd3\x74\x3a\x25\xae\x1c\xd2\x6c\xc9\x9e\xf3\x9c\xb4\x32\x04\x72\xf1\xe6\x9a\xe0\x99\x10\x3c\x51\xe0\xe3\x31\x31\xa7\x0c\xbf\xa2\x30\xa1\xd6\x3a\x8e\xdd\x0b\xd7\x0b\xde\xf5\xd0\xf4\x1d\xdb\x7f\x21\xc1\xf5\x7a\xfb\xcf\xca\xd3\xfe\x76\x73\x4e\xf1\x69\x79\xf2\x57\xd3\xb7\x3b\xb2\xf7\x76\x8f\x7f\xdf\x3d\x12\xd7\x9f\x4b\x7b\x77\x03\xf3\x29\xe7\xd2\x36\xfa\xa5\x85\x5f\x6c\x95\x63\x9d\x9f\x87\x68\x93\xc5\x35\x4e\x9a\xa0\x59\xc7\x1e\x94\x87\xb9\x48\x48\xb5\x26\xcc\xeb\x76\x1f\x6d\x5c\x31\xef\x64\xe3\x68\x49\x51\x81\x69\x93\x2e\xa4\x24\x55\x9f\xf9\x9a\xcb\x77\x16\x5c\xc7\xaf\x7b\x9d\xb6\xa4\x2f\x49\x2d\xae\x6b\xd5\x3c\xd9\x19\x81\x76\xee\x97\x22\xbc\x2e\x4d\x81\x36\x99\xc1\x2e\xab\xfa\x5e\x66\xc9\x1b\xa0\x39\xae\x24\xa8\x2c\x5b\xd5\x8b\xbd\x2a\xc3\x95\xbb\xf6\x5b\xeb\xa1\x1f\x37\x30\x57\x14\x65\xdc\xd5\xc0\xdc\xd8\xfe\xcf\x34\x30\x9f\x18\x21\x88\xf7\x30\x30\xd7\xd3\x77\x1f\x03\x73\xe7\xcd\x40\xd7\x47\x30\x6f\x1e\x7c\xfd\xc1\xa5\xa6\xcf\x89\x60\xca\xe5\xd5\x18\x4b\x4c\x79\x14\x0f\x7d\xc5\x6d\x88\x7d\x0e\x4d\x27\xa5\x49\xaf\xb6\xd1\xfc\x31\x28\xa7\x1d\xbe\x27\x13\x2a\x5a\x5a\x36\x57\xe2\xa4\xbe\x2f\x58\x54\x28\x19\x4b\x39\x2d\xad\xe9\x66\x28\xa5\xeb\xc4\x56\x2a\xad\x06\xa9\x5a\x1c\xb7\x52\xd2\x8c\xb7\x14\x55\x34\x76\x46\x00\x1b\xf5\xbc\xe7\xca\xa0\x51\x5a\xcc\x02\xa6\xcd\xaa\x3a\x0f\x71\x2f\xea\xd7\x05\x1d\xb5\xcd\x2e\x3b\x65\x59\xd2\x6d\xbb\xba\x13\x1e\x07\xf9\x6f\x12\xc1\x5c\xa5\xe0\x77\x6c\xff\xf9\x94\xe3\x1d\xed\x5f\x36\x30\x7f\xa7\x24\x9d\x78\xc1\xc0\x7c\xf5\x4b\x07\x6e\x36\x30\x77\xde\xf5\xf6\xcf\xc9\xfa\xd7\x9c\xac\xbf\x33\x02\xeb\x5e\x51\x1c\x85\x3c\x4d\xd3\x65\x3f\x4c\x64\x3c\x5b\xf2\x1b\xaf\xdf\xa9\xaf\xcb\xfe\x74\xb3\xd0\x74\xbb\x95\xd8\x9b\xc4\x2e\xf1\xca\x71\xc1\xf5\x6f\x12\xc1\xdc\x6c\x60\x6e\x6c\xff\x8e\x06\xe6\xef\x94\x74\x14\x2f\x18\x98\x2f\xa4\xef\x3e\x06\xe6\xce\xdb\x3b\xaf\x4f\x8a\xbe\x52\xf6\x79\x52\x47\x71\xcb\x91\xdd\x9d\xcd\xc1\x73\xec\x8c\x87\x7e\x51\xf4\x4e\x8e\xec\x1e\xaf\x0d\xae\x42\x2c\x85\x4a\x5b\x6c\x22\xaf\x3d\x84\x29\x4f\x28\xad\x70\x66\x8f\xba\x65\x4b\x0b\xb2\x45\xa7\xd3\xa6\xf0\xca\x98\x95\x16\xc3\xae\x18\x60\xaf\xb8\x9e\x31\x2e\xc1\x4f\xca\xb5\x72\x7d\xc8\x8e\x98\x96\xa4\x5b\x9b\x0a\x99\xd4\x8a\x1a\xdb\xa7\xfb\x74\x3f\xf7\x45\x63\xaf\xf0\x1e\x45\xaa\x74\x2f\x0a\x3a\x96\x5c\x5e\x9a\xa2\x96\x1b\x65\x6f\x6d\xa5\xbd\x4d\x38\x0e\xa4\xe2\x5c\x9d\x22\xd5\x1b\x5b\x3a\xc3\xc0\xb7\xd6\xf8\x2f\x1f\xd5\xf4\xd1\x12\xd9\x13\x01\x16\x6f\x8c\xfe\x9a\x5a\x6b\xcf\xef\xf2\x4e\x81\x93\x73\xd2\x5e\x36\x78\xc6\xf5\xf4\xb7\xd8\xb8\xba\x17\xa0\x13\x83\xf3\xac\xf0\xe5\x0d\x83\x77\xbf\xf6\xe5\xeb\xdb\xbf\xc7\x56\xd1\xd3\xad\x9a\xcf\x8e\x8e\xd9\x5f\x5f\xb6\x55\xf4\xd9\xaa\xd9\xa5\xf6\x9f\x39\xa8\xa3\xae\x9e\x5f\xe6\xb1\x3f\x1f\x7d\x0d\xd2\xb1\x7f\xfb\xeb\xb3\x6d\x49\xfe\xd8\xd1\xeb\x5e\xdb\x74\x03\x7d\xbb\x42\x41\xed\x6c\x41\xe8\x7c\xcc\x8d\x47\x7e\xfe\x8d\xe8\xdb\x8f\x4f\xb9\xb8\x7c\x21\x68\x78\xfe\xba\x82\x0b\x4e\xed\xce\x7b\xe7\xff\x79\x0f\xca\x1b\xef\x41\xd9\x3b\x35\x85\x73\x4d\xbb\xd9\xaa\x55\x73\xbf\x08\xb8\x04\x55\x2d\x34\xd6\x75\x63\x2e\xfb\x1d\x3f\xa2\xba\x6d\xb1\x92\xd6\x72\x0e\xd1\x30\xf8\x42\xa7\x76\x97\x3c\x96\xd8\xea\x1d\xb6\xa5\xee\x8d\xf4\xf0\x9c\xb4\xaf\x70\x6a\xfb\x8f\xcf\xde\xe1\x72\x26\x62\xcf\x0c\xfb\xe3\x3b\x60\xce\x9f\xb9\xa3\x91\x37\xc4\xbb\x19\xf9\xa3\x51\xba\x37\xff\xf6\x8c\xfa\xfb\xf2\xef\x79\x74\x70\x91\x7f\xcb\x38\xf7\x5e\xe1\xdf\xee\xba\xd5\x49\xee\x67\x8d\x5f\x62\x9b\x6e\x75\x92\xb7\x9e\x67\xff\xd9\x4e\xf2\x53\xe8\x7b\x26\x2a\x8f\xf6\xe9\x70\xbd\xc7\x49\xde\xf9\xa0\x93\xeb\x9d\xe4\x2b\xdb\x24\x6e\x75\x92\xb7\xe4\x17\x85\xd2\x41\x98\x77\x5b\x2e\xe4\x8b\x11\xf3\x49\x79\x8d\x98\xa8\x0e\x13\x21\xb0\xe4\x06\x79\x93\xf3\x5c\xe4\xea\x83\x12\x35\xac\xe6\x93\x8e\xac\xe0\xb6\xd8\xd8\xb4\x4c\x03\x15\xa5\xa0\x6a\x61\xab\x0b\xc8\x3c\x63\xf9\x84\xd3\x55\xe0\x0f\x4d\x57\x35\x44\x9f\xdb\x98\x55\x2e\x8a\x2b\x01\xa2\x44\x6d\x22\xae\xcb\x9b\xb2\x61\xee\x1c\x4b\xb1\x68\x0f\x84\x5a\xee\xcf\x8d\x72\xa9\xed\x8b\x9a\x42\x84\xf5\xca\x04\x49\xf5\x74\x4d\xf6\x84\x8d\xd2\x4b\x62\x17\x35\x8a\x11\xa7\x4c\x1f\x29\xfb\x74\xc7\xfa\x37\x7e\xf9\xd6\x17\x38\xe3\x17\x8a\xe7\x2e\x3a\x13\x9d\xe3\x1f\x16\x12\xbf\xc2\x99\x98\x17\x08\xb9\xd2\x99\xec\x9e\x41\x9f\xc1\x3f\xe5\x6f\xce\xbf\x73\x9b\x74\x1d\xff\x72\xf1\x3f\xd1\x79\x88\x4f\x33\x12\x3f\xa7\x40\xc1\x7d\xa4\x9c\x3f\xf2\xef\xdc\x06\x4b\x74\x73\x74\x39\x93\x71\xf2\x42\xcf\x0f\xcb\x83\xf8\x18\xcc\x5e\xcf\xff\x5b\x32\x68\x6d\x97\x98\x1e\x38\x27\x3e\xbc\x2c\xe8\xfc\x92\x6f\x1d\x9f\x5b\xd7\x77\x5f\x19\x9f\x13\x7d\xbd\x74\x9d\xe8\xeb\xdf\xa7\xaa\x68\x4f\xbf\xe4\x0e\xc6\x2f\x50\xfd\x8e\x80\xea\xce\x27\x98\x5d\xbf\x03\xe7\x95\xb5\xba\x9b\xdf\x3c\xa5\x08\xf8\xf0\xe8\x57\xbe\x89\xad\xe6\xf7\x9f\x65\x53\x4f\xde\x3c\xb5\x2e\x7b\x9b\x10\x97\x72\xaa\xde\x5a\x64\x98\x77\x11\x8e\xac\x9c\x1f\x40\x65\x5d\x31\xd7\x22\x69\x20\x2f\xd0\x8b\x5d\x3b\xed\x75\x04\x7f\x3d\xb4\xc7\x9e\x1d\x41\x01\xc8\x78\xae\xc8\xfc\xa6\x67\x21\x62\x61\xe6\xd0\x89\x5b\x33\x7b\xc5\x0b\x4e\x2f\xd0\xaa\xaa\x2e\x1d\xd6\xe6\x46\xad\x79\xba\x36\xa6\x8d\x49\xa7\xea\x2b\x7c\x37\x91\xe5\x45\xd7\x69\x8f\x0c\x83\x9f\x2e\x64\xdb\xc6\x63\x05\x65\x13\x33\x6c\x4a\xdd\x63\x56\xe0\x72\xf0\x75\xa2\xc8\x57\x09\xfe\x9e\xeb\xb7\xf2\x52\x02\xc5\xe9\xfe\xd1\xff\xb8\xb7\xc0\x1d\xdf\x7a\xf6\xda\xba\xe9\x89\xa0\x5c\xb1\xbf\xba\x8a\x8e\x7a\x74\xd9\xe9\xbc\x95\xc6\xbf\x5f\xfb\xe5\x6b\xda\x2f\x9f\x9f\x6c\xfb\xb7\x2b\x7c\x10\x9f\x9e\x14\xf7\xe5\xf4\xf5\xa7\x9c\xf1\x16\x7d\x27\xcb\x30\xff\xf7\x06\x11\x27\xf2\xba\x77\xd8\xff\x69\x13\xc1\xa3\x2d\x7b\xe5\x2d\xcc\xb7\x06\xe1\x9f\xb8\xcc\x71\x74\xa6\x17\xaf\x93\xf1\xf9\xea\x20\x4f\x92\xa6\xf5\xe4\xb5\x20\xaf\xc3\xc0\x17\xde\x12\xb4\xbb\x4e\xf4\xe7\xef\x53\xd9\xf9\x6c\xbe\xa9\x21\x72\xf6\x8c\xf4\x63\x86\xff\xcb\x27\x6d\x0d\x21\x99\x79\xef\x9b\xb4\xf5\x65\xf4\x6c\x85\xec\xa4\x6e\xe8\xe7\xd4\x64\x5d\x94\x97\xe3\xf5\x4c\x9e\xbf\x8e\x3e\x49\xee\x07\xdc\x6b\xf4\xbd\x6b\xbf\xd2\x7d\x8f\xca\xfd\xe7\x55\xcb\x7f\x87\x57\x2d\xef\xf7\x3f\x15\x87\x54\x3d\x52\x79\xc8\xc5\xfd\xc8\x2f\xc5\x60\xa1\x4f\xec\x65\x63\xe2\xf3\x0e\x33\xe8\xe9\x6e\x24\xa7\xea\x66\xb8\x6e\xd5\x88\x63\x3d\xcb\x9b\x41\xfe\xf5\x81\xc3\xa9\x60\x6a\xb4\x7f\xe0\xd1\x2d\x8e\x72\x8d\x0e\xaf\xc2\xbd\x8a\x9e\xf2\xad\xe3\x5b\x1e\xc8\xc5\x27\x02\x2b\x30\xf3\xc3\xc7\x2f\x2c\xe8\x9b\xe5\xa5\x03\xfd\x5f\x30\x79\xb8\x2a\x78\xbf\x63\xfb\xf2\x35\xed\xcb\x77\x3b\xae\xfe\x93\x32\xaa\xe7\xcb\xef\x1f\x75\xce\xdb\xe0\x60\x1f\x11\xee\xc6\x47\x3a\x2e\xef\x5d\xa0\xe5\x29\xcf\x24\x91\x7a\x50\x79\xa5\x9d\x3c\x2b\x01\x38\x91\x9f\xff\xb4\x60\xed\x7c\x32\xf6\x85\x19\xb9\x8a\x4f\x67\x97\x27\x63\x87\x31\x58\x70\xb3\xd7\x36\x40\x1f\x83\xfb\xff\x8c\x9a\xb0\xa3\xfc\xfe\x74\x7a\xa5\x1e\x50\xdf\xa4\x37\x17\x9f\x06\xc3\x1f\x8c\x59\x6e\xa1\xaf\x09\x7b\xf3\xf7\x2d\xcf\xff\x1c\xfa\xf4\x90\xb6\xdf\xa4\xef\x2e\xf2\xf9\x85\xe5\x18\x77\x91\xcf\xfb\xd0\x2b\x35\x09\xfc\x26\xbd\xe7\xf2\xf9\x9f\x33\x59\xbb\x79\x72\x7f\x03\x7f\x17\xd0\xc7\xef\x5b\x61\x13\xd7\x28\x8f\xce\x7f\x3b\x79\xcd\xc4\x57\x6f\x7a\xa8\x34\xa1\x1d\x7d\x34\xb9\xf8\x95\xf4\x6d\x74\xbb\x7a\xf3\xa6\x8c\x3b\xbf\x1b\xe4\xfa\x89\xe6\x2b\xf5\xab\xb7\x4f\x34\x1f\xb3\xf9\x5f\x38\xd1\x3c\x3b\x41\x75\x7f\x9d\xac\x24\x89\xd5\xe2\x9c\xf3\x51\x4b\x9f\x75\x47\x6b\xb3\x6b\xd6\xc4\x62\x42\xc8\x63\xbd\xd8\xe8\xc9\x75\x91\xc6\xfd\x25\x66\x09\xb5\xad\xb9\x79\x64\x43\x8c\x11\x37\x28\x07\x88\xc0\xd3\x6e\x4a\x90\x61\xde\x65\x9a\x7a\xe0\xae\xdd\x9e\x3a\x0a\x79\xa7\xd3\x19\x95\x3d\xc3\xa4\x1f\x36\x7d\x94\xc7\x19\x51\xaa\x32\x6c\x5c\x9b\x6d\xe4\x46\x73\x52\x4b\xc4\x68\xe6\xab\x16\x0c\x8a\x8e\x3e\xd7\xb3\x69\x48\x52\xf5\x00\x55\x8d\xc6\xc9\x1e\xf8\xf6\xa5\x6c\xc5\xee\x30\x3c\xea\x51\xa0\x3e\x7f\x32\x73\xd5\x64\xe2\x8e\xed\x57\x3e\xd8\xbe\xf8\xca\xbb\x60\xb2\xb9\x17\x43\x07\xdf\xe3\x85\x30\x4f\x51\x3d\xea\xef\xe9\xc1\xa6\x3b\x9a\xf3\xe3\x18\x5d\x30\x00\x2f\x63\x79\x4c\x15\xe5\x62\xdb\x98\x8a\xcf\xb0\x3c\xf4\x70\xdf\x31\x88\x50\x94\x85\xa9\xb3\xed\xee\xc9\xe7\x1f\xf3\x29\x7e\xec\x66\xb9\xd5\xec\xf6\x3a\x62\xb5\xd9\x7b\xa3\x9b\xa2\xde\xab\x74\x0e\x5c\x69\x35\xf5\xe1\x29\xc6\x5f\x0a\x85\x42\x41\x94\xe5\x13\x6c\xcf\x1a\x2c\xb4\x3b\xd5\x86\xd8\x19\x16\xea\x95\x61\xe1\xb7\xc3\x5d\xdf\xf9\x5e\xd8\x42\x84\x30\xc0\x2f\x91\x9f\x3c\x7e\xb8\x2f\xe1\xc9\x2b\x54\x27\xaf\x91\xfc\x8c\x50\x1b\x86\xdb\xff\xef\x44\x9e\x0d\xc3\x4b\x94\x3d\x34\xf0\x94\xa8\x30\x72\xf0\x05\x8a\x66\xd8\xf1\x70\x3c\xc6\xd0\xc1\x71\xf2\xf4\xdb\x8f\xfd\xb7\x04\x2f\x7e\xdc\x85\xdc\x27\xc8\x2f\x11\xfe\x4a\xeb\x05\xb3\x59\x35\xcc\x4a\xe1\xb7\xc7\x5f\x3f\xd4\x93\xfb\xf0\xfb\x83\x1d\x78\x3e\x06\x87\xfb\x30\x19\x3f\xa3\x3e\x72\xdd\x2d\xd9\xfb\x7f\xee\x44\xef\x1e\xd9\x25\x42\x4f\x9a\x79\x4a\xe1\xee\xc6\x05\x31\x99\xe3\x2d\x75\xbb\xbf\x77\x22\x6e\x87\xeb\x12\x6d\xc7\x46\x9e\x92\xe6\xcf\xbf\x17\xe6\x51\x9c\x3e\xa7\x6d\xdb\x5e\x32\x5e\x64\x38\xc3\x4f\xbe\xdc\x8b\xd2\x13\x94\x17\x09\x3e\x6f\xf2\xd2\xa0\x5f\xa2\x3a\xc9\xec\xc3\x3f\xf7\xa3\x34\xc9\xec\x17\x68\x7c\x68\xe6\x29\x75\x31\x4e\x2e\x0c\xf7\x22\x8b\xe2\x2c\xf0\x43\x37\x3a\xf9\x78\x27\x22\x8f\x08\x2f\x11\x7a\xd6\xdc\xbb\x6c\x58\x82\xe6\xfb\xc7\x92\x93\x8f\x77\xa2\xf6\x88\xf0\x12\xb5\x67\xcd\x3d\xa5\x76\x91\xe0\xf4\xa2\xae\x6f\x9d\x35\x4e\x52\x98\xe2\x93\x8f\xf7\xa2\xf7\x11\xe1\x45\x7a\x9f\x36\xf7\x94\xde\xdd\xef\x17\xfd\x6b\x1a\x67\x49\x3a\xf3\x43\x9c\x9c\x7c\xbc\x13\xc1\x47\x84\x97\x08\x3e\x6b\xee\xc5\xd0\xc0\x4f\x92\x0c\xc7\xdf\x0b\x30\x49\x70\x8a\x22\xe7\x42\x1f\x4e\x5f\xa5\x78\xfa\xe5\x5e\xfd\x38\x41\x79\xb1\x27\xe7\x4d\x5e\xb2\x12\x09\x5e\x7c\x2f\xa4\x2b\x3f\x74\xf0\xea\x42\x0f\x8e\xe4\xdf\x9b\xf6\x57\x09\xbf\x89\xea\xb3\x10\xf5\xe9\xd7\x3b\xd1\xff\x14\xe9\xa5\x4e\x5c\x68\xf6\xc5\x9e\x1c\x60\x2f\x77\xe7\x20\x73\x36\x9c\xc1\x10\xe1\xe4\x81\xf6\x6a\x53\xae\x0c\xde\x20\xbb\xdc\xa9\x88\xbd\xca\x1e\xf4\x1c\x4f\xa1\xd5\x3c\x46\xb1\x66\xb7\xda\x54\x0b\x76\x1a\x63\x5c\xf8\xed\x00\xf1\xaf\x42\x5f\xab\x74\x2a\x8f\xdf\x0b\xff\xf3\x57\x81\x24\x1e\xae\xe7\x81\x26\x4e\xd2\xbd\x2f\xdf\xf6\xe2\x6a\x2a\x9f\xa2\xd9\x12\x79\x88\x53\x9e\x90\x98\xe0\xd9\xcc\x0f\xbd\x9d\xee\x7d\x2f\xd8\xd9\xfa\xf8\x65\x1e\xfb\xe8\xb9\x2e\x6e\x87\xc1\x5e\x27\x78\x71\x35\x61\x8f\x18\xb6\x34\x1d\x15\xe3\x09\x59\x2f\x87\x8a\x5b\x70\x17\xe3\xdb\x49\x78\x40\xb2\xa7\xe2\xc4\xc0\xbc\x93\x90\xc3\x9d\xdb\x08\x39\x45\xb2\x25\xe4\x69\x20\xfc\x4e\x4a\x12\x34\xc7\xe1\xf2\x46\x4a\x4e\x91\x6c\x29\x49\xd0\xfc\x83\x0c\x39\x3a\xd4\x9b\x29\x39\xc5\x73\x20\xe6\x21\x4a\x78\x4a\x0c\x4c\xd2\x97\x09\x3a\x31\x1c\xb7\x51\x74\x8e\x68\x4b\xd2\x99\x6d\x7c\x93\x47\xed\x28\x49\xbd\x18\x77\x0d\x7d\x37\x17\xb6\x61\x82\x0b\x4e\x16\xcc\x0b\x28\x0a\xe6\x33\x9c\xe2\x5d\xb3\xff\x27\x00\x00\xff\xff\x6b\xc6\x84\x82\xa5\x14\x01\x00") + +func paths_strict_sendCoreSqlBytes() ([]byte, error) { + return bindataRead( + _paths_strict_sendCoreSql, + "paths_strict_send-core.sql", + ) +} + +func paths_strict_sendCoreSql() (*asset, error) { + bytes, err := paths_strict_sendCoreSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "paths_strict_send-core.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8c, 0x85, 0x75, 0x4, 0x43, 0xc0, 0x16, 0x2b, 0x12, 0x7a, 0x4f, 0xcd, 0xc0, 0x70, 0xe5, 0x9c, 0xcc, 0xc9, 0xd3, 0xff, 0xd1, 0x9d, 0x32, 0xbb, 0xcc, 0x3c, 0xd1, 0xb0, 0x28, 0x7, 0xea, 0x89}} + return a, nil +} + +var _paths_strict_sendHorizonSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xbd\x69\xb3\xa2\x48\xf6\x38\xfc\xbe\x3f\x05\x51\x31\x11\xd5\x1d\x56\xb5\x90\xec\xd5\xff\xfe\x45\xa0\xe2\xbe\xef\x3a\x31\x61\x24\x90\x28\x8a\xa2\x80\xdb\x9d\x98\xef\xfe\x84\x80\x1b\xe2\x86\xdc\x5b\xb7\xe6\x19\xa6\xe6\xb6\x4a\x72\xb6\x3c\xe7\xe4\xc9\x93\x27\x93\xef\xdf\x7f\xfb\xfe\x1d\xab\x1a\x96\x3d\x34\x51\xa3\x56\xc4\x14\x68\x43\x09\x5a\x08\x53\x96\xd3\xf9\x6f\xdf\xbf\xff\xb6\xbb\x9f\x5a\x4e\xe7\x48\xc1\x54\xd3\x98\x1e\x1b\xac\x90\x69\x69\xc6\x0c\xe3\xff\x64\xfe\x24\x4e\x5a\x49\x5b\x6c\x3e\x1c\xec\x1e\xf7\x35\xf9\xad\x21\x36\x31\xcb\x86\x36\x9a\xa2\x99\x3d\xb0\xb5\x29\x32\x96\x36\xf6\x37\x86\xff\xe5\xdc\xd2\x0d\x79\x72\xf9\xab\xac\x6b\xbb\xd6\x68\x26\x1b\x8a\x36\x1b\x62\x7f\x63\x5f\x5b\xcd\x34\xf7\xf5\xaf\x3d\xb8\x99\x02\x4d\x65\x20\x1b\x33\xd5\x30\xa7\xda\x6c\x38\xb0\x6c\x53\x9b\x0d\x2d\xec\x6f\xcc\x98\x79\x30\x46\x48\x9e\x0c\xd4\xe5\x4c\xb6\x35\x63\x36\x90\x0c\x45\x43\xbb\xfb\x2a\xd4\x2d\x74\x86\x66\xaa\xcd\x06\x53\x64\x59\x70\xe8\x34\x58\x43\x73\xa6\xcd\x86\x7f\x79\xb4\x23\x68\xca\xa3\xc1\x1c\xda\x23\xec\x6f\x6c\xbe\x94\x74\x4d\xfe\xb6\x63\x56\x86\x36\xd4\x8d\x5d\x33\xa1\xd8\x14\xeb\x58\x53\x48\x14\x45\x2c\x97\xc6\xc4\x6e\xae\xd1\x6c\x60\x95\x72\xb1\xe7\xb5\xff\x73\xa4\x59\xb6\x61\x6e\x07\xb6\x09\x15\x64\x61\xa9\x7a\xa5\x8a\x25\x2b\xe5\x46\xb3\x2e\xe4\xca\xcd\x93\x87\xce\x1b\x0e\x64\x63\x39\xb3\x91\x39\x80\x96\x85\xec\x81\xa6\x0c\xd4\x09\xda\xfe\xf5\x11\x08\x65\xe7\xd3\x47\xa0\xdc\xe9\xd5\xc7\x31\xe8\x62\x7b\x9e\x3b\x97\xc0\x9d\x22\xdf\x42\x76\xd2\xea\x08\xdc\x69\x9e\x2b\xa7\xc4\xee\x49\x4b\x0f\xac\x43\xd5\x00\xa9\x2a\x92\x6d\x6b\x20\x6d\x07\x86\xa9\x20\x73\x20\x19\xc6\xe4\xf6\x83\x96\x36\x9c\x21\xd3\x79\xc4\xe3\xe6\x76\x7b\x43\x55\xbd\xe6\x16\xd2\xf5\x9d\xc5\x38\xb4\x3e\xf3\x10\x32\x1f\x6d\xad\x43\xcb\x1e\x4c\x0d\x45\x53\x35\xa4\x0c\x74\xa4\x0c\x1f\x7f\x56\x5a\x6e\x1f\xa4\x4e\x9b\x29\x68\x33\x38\xe9\xdf\x99\x05\x1d\x5b\xb7\x06\xc6\x6c\xa0\x29\xcf\x3c\x6d\xcc\x91\x09\x0f\xcf\xda\xdb\x39\x7a\xe1\xe9\x23\x25\x2f\x51\xf1\xdc\xb3\xae\x94\x9d\x07\x2d\xb4\x58\xa2\x99\xfc\x14\x0b\x27\x8f\xcf\x4d\xb4\xd2\x8c\xa5\xe5\xfd\x36\x18\x41\x6b\x14\x12\xd4\xeb\x10\xb4\xe9\xdc\x30\x77\x1e\xc9\x1b\x56\xc2\x82\x09\x2b\x4b\x59\x37\x2c\xa4\x0c\xe0\x53\xba\xb8\xb7\xe7\x10\xaa\xe4\x19\x73\x08\xa2\x4f\x9f\x84\x8a\x62\x22\xcb\xba\xfd\xf8\xc8\x36\x15\x67\xe8\x1d\xe8\x86\x31\x59\xce\x1f\x68\x3d\xbf\x47\x92\xdb\x0a\x6a\xe6\x93\x80\xf7\xe3\xce\xc3\x0f\xec\x5c\xe5\xce\x67\x3c\xd6\x74\x0f\x3e\xc4\x23\x0f\x79\xd7\xfd\x43\xce\xe8\xf2\x04\x92\xd3\xd1\xe8\xde\x13\xf3\xdd\x03\x23\xfb\x6e\x0f\x58\x67\x0e\x48\xda\xde\x55\xa3\xd1\xc1\xd2\x1f\x69\x6c\xb8\x74\x18\x77\x1b\x6a\x96\x3d\xb0\x37\x83\xf9\x7d\x90\xbb\x96\xc6\xfc\xd1\x96\xe8\xd1\x66\xfb\xd1\xf4\x76\x63\x69\xfb\xd0\x00\xb5\xe3\xf9\xae\x17\x7b\x74\x30\x76\xc3\x84\x9d\xb4\x2d\x6b\x79\x0f\xf3\xa1\xb1\x6c\x28\xe8\x91\x50\xc5\x1d\x4b\x6f\x44\x29\xde\x60\x3b\x7f\x30\xf4\x99\xa0\xed\x60\x05\xf5\x25\x1a\xec\x1c\x0d\xba\x01\xd8\xd7\xf2\x61\x0c\x01\x63\xf8\x60\x0e\x4d\x5b\x93\xb5\x39\x9c\xdd\x8c\xb8\xee\x3d\xfa\x34\x0d\x87\x31\xf8\x59\x0a\x82\x1f\x7c\x1a\xbf\xd3\xdd\x8f\xe0\x73\x1b\xbe\x3b\x7c\x57\xfd\x76\xba\xe7\x7d\xdc\x8d\x68\xfb\x78\xdd\x51\xdf\xc1\x83\x14\x0c\x0d\x73\x3e\x98\x6a\x43\x2f\xc4\xb9\x41\x82\xaf\xe5\xc3\x3c\x3e\x1f\xa4\x3f\x0c\x79\x3f\xb8\x7a\x71\xf7\x2d\xf0\xbe\xa6\x37\x71\x3c\x6a\x00\xee\xd3\xc9\x4a\xb1\x55\x2a\x63\x9a\xe2\xa2\x4f\x89\x69\xa1\x55\x6c\x3e\x08\xfb\x8a\x62\x47\x00\xd9\x53\xa9\xdb\x90\x9c\x6f\x57\x00\xb9\x0e\xe9\x76\x1b\x9f\x6f\xb9\xdd\x38\x68\x52\xe0\x3d\xd1\x10\x6b\x2d\xb1\x9c\x0c\xd1\x09\xbb\x19\x9d\x85\x16\x4f\x63\x3e\x03\xf2\xf0\xd3\x0a\x7a\xb0\xed\x71\xd2\xf0\x30\x87\x57\x5c\xd5\x33\xfc\x05\x83\x78\xec\x59\x2f\xbc\x7e\xac\xb1\x17\x4b\x3f\xcc\x9b\xe7\xb6\x9e\xe1\xc5\x7d\xe4\xc1\xb6\x9e\x75\x3f\x4e\xcf\xde\x1d\x3c\x42\x91\xcf\xf1\xdd\x6e\x7c\xe2\xc7\xee\x34\xf4\x79\x24\xaf\xb5\x90\xc9\xd4\xc5\x8c\xd0\x0c\x78\x62\xaa\xed\xa6\x81\x9a\x8c\x7e\x9f\x2d\xa7\xc8\xd4\xe4\x7f\xfe\xeb\x8f\x07\x9e\x82\x9b\x10\x4f\xe9\xd0\xb2\x7f\x87\xb3\x2d\xd2\x9d\x14\xe1\x03\x4f\xa8\x9a\x19\xf8\x48\xba\x55\x4e\x36\x73\x95\xf2\x0d\x7e\x06\x70\x38\x3c\x52\xf7\x0d\xbb\x20\xf4\x06\x8c\x3d\x77\x2f\xc0\x70\x12\x23\xbb\xc7\x8f\xc4\x7f\xc3\x9e\x61\xc4\x61\xfd\x01\x08\x62\xb7\x29\x96\x1b\x3e\x10\xfa\x7c\x68\x2d\xf4\xbd\xe6\x26\xb3\x62\x49\xb8\xc0\xf0\xd7\x6f\x6e\x76\xb8\x0c\xa7\xe8\xc7\xfe\x37\xac\xb9\x9d\xa3\x1f\xde\x23\x7f\x61\x0d\x79\x84\xa6\xf0\x07\xf6\xfd\x2f\xac\xb2\x9e\x21\xf3\x07\xf6\xdd\x49\x1a\x27\xeb\xe2\xae\xbf\x3c\xc8\x7b\x78\xbf\x9d\x41\x3c\xbf\xe9\x01\x4e\x56\x4a\x25\xb1\xdc\xbc\x01\xd9\x6d\x80\x55\xca\xe7\x00\xb0\x5c\x03\xfb\xba\x4f\x07\xef\x7f\xb3\x1c\x20\x5f\xfd\x98\xf7\xec\x7b\x38\x0f\x12\xba\xcb\xcf\x99\x2c\xcb\x95\xa6\x4f\x9e\x58\x27\xd7\xcc\x1e\xc8\x3a\xcd\x0b\x9f\xa1\x3f\x42\xf1\x11\xf2\x0c\xf3\x17\x40\x1c\x01\x54\x8b\xf1\xf9\xb0\x51\x2b\x62\x73\xd3\x90\x91\xb2\x34\xa1\x8e\xe9\x70\x36\x5c\xc2\x21\x72\xc4\xf0\x60\x1e\xfb\x94\xdc\xfb\x8a\xe6\x91\xbf\xd7\xd5\x23\xfd\xfb\xbe\x0d\x92\xe5\x41\xb3\xef\xc2\xc7\xea\x62\xb3\x55\x2f\x37\x4e\x7e\xfb\x0d\xc3\x30\xac\x28\x94\x33\x2d\x21\x23\x62\x0e\xf7\xa5\x52\xcb\x75\x7a\x8d\x66\x3d\x97\x6c\x3a\x2d\x84\x06\xf6\x8f\xc1\x3f\xb0\x86\x58\x14\x93\x4d\xec\x1f\xc4\xee\x9b\xbf\x37\xee\x1a\xe2\x6b\xdc\xdd\x03\x1f\x19\x73\x20\x88\xb9\x47\x3c\xd5\x6b\xfc\x3d\x80\xe1\xc0\xe2\xe1\xa7\x50\x1c\xfe\xfe\x1b\x86\x25\x85\x86\x88\x75\xb2\x62\x19\xfb\x07\xf1\x4f\xe2\x5f\xf1\x7f\x10\xff\x04\xff\xfa\xbf\x7f\x00\xe7\x33\xf8\x27\xf8\x17\xd6\x74\x6f\x62\x62\xb1\x21\xee\x84\x22\x96\x53\x7f\x04\x4a\xe6\x81\x71\xe0\x45\xc9\xdc\xc7\xf0\xde\x92\xf9\x7f\x61\x24\x73\x39\xa6\x7a\x72\x38\x8c\xc3\x8f\x09\xe2\x38\x6c\x5f\x40\x74\x28\xc6\xb0\xc6\x4e\x56\xd8\xdf\x47\x0f\xf0\xcd\xfd\xb9\xd9\xab\x8a\xd8\xdf\xa7\x16\xf1\x47\x90\xd5\x46\x4a\xa3\x1f\xa0\x8f\xc4\xbd\x19\x3f\x4e\x61\x60\x08\xf4\x2a\x95\x41\x40\x7d\x94\x9e\x19\xe4\x39\xb9\x47\x2d\xbb\xa4\x36\x28\xcc\x7b\x99\xda\x00\xa0\x7e\x6a\x4f\x8d\xe4\x26\xb5\xbb\x91\x4b\x41\x2a\x5c\xea\xf6\xc0\x86\x92\x8e\xac\x39\x94\x11\xf6\x37\xf6\xf5\xeb\x5f\xe7\x77\xd7\x9a\x3d\x1a\x18\x9a\x72\xb2\xc4\x7b\xc6\xeb\x45\x10\xec\xf1\xe9\x58\xd9\x63\x3c\xba\x06\x79\x91\x0a\x70\x79\xf3\x7e\xc6\xe4\x11\x34\xa1\x6c\x23\x13\x5b\x41\x73\xab\xcd\x86\xbf\x33\xd4\x1f\x4e\xd8\x50\x6e\x15\x8b\x2e\xb3\xee\x93\x0f\x35\x5d\x23\x6d\x38\xb2\x31\x6d\x66\xa3\x21\x32\x0f\x37\x2f\xfb\xf2\x74\x36\x10\x9a\xb5\x93\xfc\x89\xcb\x95\xa6\x60\x92\x36\xd4\x66\xb6\x8f\x2c\x38\x0d\x66\xd6\xd7\x6c\xb6\x9c\x1e\x26\x40\x17\x3c\xb8\x4d\x54\x1d\x0e\x2d\xcc\x9a\x42\x5d\xbf\x44\x63\x1b\x53\x3d\x40\x4c\x80\xa6\xff\xb8\x21\x0a\xff\x2c\x2a\xac\x38\xfc\x09\xab\x83\x48\x6c\xb4\xb9\x10\xc8\x7c\xae\x6b\xce\x42\x11\x66\x6b\x53\x64\xd9\x70\x3a\xc7\x76\x3a\xe9\x7c\xc5\xde\x8c\x19\xba\x24\xf4\xda\x1c\x71\x1f\x6f\x7b\x93\xcb\xc7\x68\x3e\x4c\x45\xaf\x40\xf5\xcc\x4c\xa8\x37\xdd\x88\x95\x70\x7e\xc8\x95\x93\x75\xd1\x09\x2f\x13\x3d\xef\xa7\x72\x05\x2b\xe5\xca\x6d\xa1\xd8\x12\x0f\xdf\x85\xee\xf1\x7b\x52\x48\x66\x45\x8c\xb8\xc7\x4c\x68\xb1\xfb\x01\x5d\xa8\xa2\x97\x53\xc2\x66\x68\x63\xaf\xa0\xfe\xfb\xd7\x2b\x1c\x7f\xfd\xf1\xc3\x44\x43\x59\x87\x96\xe5\x37\x2b\x6f\x81\x2c\xd8\x04\x6f\x74\x94\x9b\x29\x78\x99\x33\x37\x61\x76\xe0\x2b\xd8\x32\x8e\xe9\xd6\x87\x3c\xc5\x31\x51\x1b\xd0\x9c\x00\xc1\xcd\xdd\x0c\x6e\xc0\x03\x34\x73\xcb\xc2\x82\x93\x2d\x11\xa9\xed\x29\xcc\x0f\x53\xda\x5b\x8c\x60\x95\x4e\x59\x4c\x61\x89\xde\x1d\x8e\xdc\x04\xe8\x6d\x86\x0e\xb0\x7c\xb7\xff\xd4\x94\x6b\xb4\xed\x33\x60\xaf\x6a\x9d\x07\xc7\x53\x3b\x9f\xcd\x0c\xae\x79\xfa\xcb\x84\xdf\xb5\x96\x5f\x9c\xd5\xb6\x2f\x57\xb4\xd9\xd1\xe3\xe0\x5b\x0a\xb2\xa1\xa6\x5b\xd8\xd8\x32\x66\xd2\x75\x65\xdb\xa7\x0d\x5f\x95\x83\x07\xc7\x93\xc3\xbe\x58\xe2\x0a\x6d\x27\x15\x0c\x0f\x59\x61\x50\xf1\x44\xf0\x83\x9e\x58\x4e\xf2\xc4\x6e\x00\xb1\xa7\x63\xef\xe5\x70\x1f\x86\x63\x47\x3c\xd6\xfe\x50\xc1\xe0\x1b\x98\x8c\xa5\x7d\x1c\x9b\xfc\xcf\x98\x08\xda\x77\x1f\x72\xdb\x2e\xe7\xca\xc3\x6d\x0f\xaa\xe3\x7d\xf5\x15\x77\x5c\xf0\x42\x5c\xc4\x03\x36\xd4\x07\xb2\xa1\xcd\xac\x60\x1d\x54\x11\x1a\xcc\x0d\x43\x0f\xbe\xeb\x2c\xb7\xab\xe8\x5a\x5f\x3b\xb7\x4d\x64\x21\x73\x75\xad\xc9\x2e\xce\xb6\x37\x03\x27\x4c\xd2\xde\xae\xb5\x9a\x9b\x86\x6d\xc8\x86\x7e\x95\x2f\x7f\x1f\xed\x95\x05\x41\x05\x99\x4e\x78\xe1\x05\x8a\x4b\x59\x46\x96\xa5\x2e\xf5\xc1\x55\x45\xf1\x18\x87\x9a\x8e\x94\x7b\xad\x3c\xd2\xaf\xa8\xd0\x75\xd3\xbb\x92\xed\x7f\xd5\x12\xaf\x2c\x49\xdd\x19\x17\x1f\xf7\x48\xf7\x7d\xdc\xb3\x2c\x47\x3b\xd4\xdd\xc4\xf1\x51\x43\xdf\x53\x8c\xbe\x38\x14\xde\xc4\x75\x39\x34\x06\x37\xbf\x31\x54\x9e\xac\x85\x45\xa6\x9b\xf7\xa6\x42\xe7\xe5\x7e\x57\xa6\x4b\xbb\xd9\x81\xec\xb2\xe2\x8c\x92\x2f\x0e\x92\x9e\x77\x30\x96\xa6\x7c\xa8\x1f\xba\x32\x3c\xed\x5d\xce\xd7\xaf\x3f\x7e\x5c\x9f\xae\x5d\xb7\x03\x6f\x29\xf2\x55\x71\x7a\x75\xba\xbf\x47\x1a\x53\x78\x6e\x33\xcc\x08\xe7\x2c\x39\x5f\x45\xeb\xab\x12\xbe\xd5\xc8\x2b\x5c\xbe\xd5\xc4\x9d\x2b\x07\x36\xb8\xac\xb7\xbe\xd3\xee\x26\xba\x43\xab\x1b\x18\x1d\x92\x34\xcb\xab\xe8\xc5\x24\xc3\xd0\x11\x9c\xed\xc7\x2d\x4d\x46\x83\xd9\xd9\x18\xed\xfe\x76\x3e\x6e\x1f\xcb\xdc\x06\xbe\x11\xfd\xac\xd0\xce\x7f\xf3\xa4\x5c\x22\xb0\x2a\xdb\xa1\x7a\xe0\xd4\xed\x63\xc9\xac\x98\x2c\x60\xbf\xff\x7e\x2a\xc1\xff\xfb\x1b\xc3\xff\xf8\xe3\x1e\xac\xa0\xe7\xf7\x52\xfb\x7f\x17\x82\x7c\x00\xde\x99\x50\x7d\xe0\x7d\x12\x77\x29\xbc\x69\x4c\xc1\x55\x01\x11\x98\x57\x70\xe1\xc8\x83\x63\xe9\x23\x4e\xec\x95\xd1\xf4\x5e\x4d\x45\x34\xe3\xe9\x1d\x2c\x1f\x35\xa2\x3e\xc9\xec\x8b\x63\xea\x1d\x6c\x97\xa3\xea\xb5\x07\x6e\x8c\xab\x67\x75\x34\x11\xea\xea\x5e\x3f\x4f\x49\x7a\x78\xaa\xe5\x79\xff\x3b\x13\xb8\x47\x87\xde\x67\xf2\xb7\x7b\x0b\x38\xa0\x0e\xb4\x97\xdd\x5c\xe1\xfa\x64\xe3\xda\x34\xee\xa7\x4c\xc4\xec\xcd\x00\xcd\x56\x48\x37\xe6\x28\x28\xb9\x69\x6f\x76\xd3\xa2\xa5\x6e\x5f\xb9\x39\x45\x36\xbc\x72\x6b\x37\x21\xbb\x76\xdb\xd2\x86\x33\x68\x2f\x4d\x14\x94\x87\xe3\x99\x3f\xfe\xf9\xaf\x63\xf4\xf2\xef\xff\x04\xc5\x2f\xff\xfc\x97\x5f\xe6\x68\x6a\x5c\x49\x99\x1d\x61\xcd\x8c\x19\xba\x19\x0d\x1d\x61\x5d\x82\xf1\x38\xd3\xa6\x68\x20\x19\xcb\x99\xe2\xe4\xb5\x39\x13\xce\x86\xc8\x3f\x67\x3b\x1f\x5c\x77\x92\xd8\x41\x1b\x22\xe5\xfa\x84\xcb\x5f\xe5\x16\xd6\xd6\xfc\x35\xbb\xae\x99\x4d\xd0\xf6\x5e\x36\xdd\xa5\xd5\x79\xf4\xf9\xc4\xbb\x57\xc7\x17\x96\x68\xaf\x70\x79\x9f\x97\xd9\xc5\x27\x9a\x72\x27\x37\x79\x12\xcd\x5d\x1b\xb7\xbc\x0d\x4f\xce\x88\x1f\xa4\x86\xee\x8e\xa3\xab\xb7\x6f\x05\x53\x4e\x70\x34\xbb\x9a\x05\xd0\x64\x74\x6d\xe0\x75\x6e\x62\x8a\xb1\x94\x74\x84\xcd\x4d\x24\x6b\x4e\xa2\x20\x68\x6d\xe4\x4a\xec\x1b\xb0\xc7\xea\x81\x65\x22\x5f\x0a\x5a\x53\xf6\xbd\xb5\x2f\xd4\x7c\x64\x0c\x72\xbb\xcb\xa9\x8f\xbd\x53\x03\xda\x10\x9b\x37\xd2\xf5\xa7\x89\xd1\xd3\x64\xfd\x73\x53\xd5\xe8\x98\x78\xb0\x44\xf6\x26\x53\x37\xa7\xb8\x8f\x30\x79\x35\x94\x8b\x8c\xcd\x87\xab\x8c\x6f\x32\x7a\x27\xee\x08\x66\x35\x05\x6d\x88\xa9\x86\xf9\xc8\xa2\x2c\x96\x12\x9a\xc2\x1d\x1e\xaf\xc1\xbd\xb2\x18\xfa\x02\xc8\x5b\x8b\x8a\x8f\x80\xcd\x95\x1b\x62\xbd\x89\xe5\xca\xcd\xca\xc5\xc2\xa2\x13\x59\x36\xb0\xdf\xbf\x12\x03\x6d\xa6\xd9\x1a\xd4\x07\x6e\x11\xdb\x9f\xd6\x42\xff\xfa\x0d\xfb\x0a\x70\x82\xff\x4e\xe0\xdf\x71\x16\x23\xf8\x1f\x14\xf9\x03\xa7\xff\x24\x09\x9a\x26\xd9\x18\x0e\xbe\xfe\xf1\xd7\x63\xd0\xc1\xc0\xdd\x31\x76\xd6\x5b\xd2\x76\x60\x1b\x9a\x72\x13\x13\xa0\x39\x8a\x7f\x06\x13\x39\x58\x5a\xe8\x10\x1e\x0d\xb4\xd9\xc5\x2e\xb5\xdb\xf8\x78\x8e\x21\x9f\xc1\x47\x0d\xa0\xa2\x0c\xfc\x69\xd7\x9b\x38\x48\x8e\xa3\x9f\x92\x1e\x3d\x70\x83\xb1\xfd\xbc\xd0\x29\x47\xb8\x89\x82\x62\x69\xfe\x29\x36\x98\x3d\x0a\xcf\x33\xde\x47\x41\x03\x8e\x7a\x06\x03\xeb\x0e\x19\xdb\xc7\x99\x60\x68\x92\x79\x4a\x4e\x9c\xd3\x17\x70\x38\x34\xd1\x10\xda\x86\x79\xbb\xab\x59\x9c\xc2\x9f\x52\x2d\xee\x4c\x46\xde\x36\x8e\xfb\x6c\xb0\x34\xcb\x3f\xc5\x06\xef\xb0\xe1\x66\xe4\x07\x1b\xc5\xbc\x0d\x9d\xe3\x59\xf0\x0c\x74\x02\x77\xc0\x7b\xbd\xe0\x04\x03\x37\x11\x70\x04\xc7\x70\x4f\x21\x20\x4e\x11\x1c\xa6\xec\x3b\xfb\xbf\x8d\x88\x26\x9e\xea\x0e\x02\x9c\x75\x84\x97\x24\x71\x4f\x63\xb8\x89\x88\x27\x28\x9a\x78\x0a\x13\xe9\x72\x73\xc8\x2d\xdd\x56\x2c\x9e\xc5\x79\xe6\x29\xf8\xd4\x40\xd5\x36\xfb\x4d\x4e\xc6\x54\x1f\xa8\x1a\xd2\x6f\x3b\x46\x9e\xe3\x9e\xb3\x3f\x82\xde\x2f\x0c\xee\x17\x6c\x36\x37\xd9\xa0\x70\x9c\x65\x9f\xf2\x21\x04\x33\xd0\x66\x43\x64\xd9\x83\xcb\x25\xa1\x3b\xa8\x00\x78\xce\x14\x09\xf6\x2c\x08\x70\xd6\xde\xe0\xed\xa1\x84\xc2\x49\x9e\x7a\x8e\x1f\xee\xa0\xbd\xaa\x61\xee\x03\x85\x9b\x38\x08\x9c\xa3\x9e\xeb\x15\xde\x55\xaa\xdb\x60\x01\xa0\xc0\x53\x1a\x05\xf0\x00\xd2\xef\xdb\x20\x05\x28\x1e\x7f\x0e\x11\xb1\x37\x74\x13\x4d\x8d\x15\x1a\xbc\x21\xd3\x38\x24\x2c\x8d\x99\x65\x9b\x50\xbb\x33\xe8\x52\x80\x3b\x1a\xcc\x95\x18\xe8\x66\x89\xcf\xb3\x41\xd0\x45\x99\xcf\x9e\x1d\xe2\x1b\xf6\x35\x93\xe8\x66\x6a\xf9\x4e\xbb\xd8\xa9\xf4\xb2\xe9\x62\xbb\x59\xe8\xb4\xe9\x74\x26\x2b\x90\xc5\x72\xaf\x07\xf2\xb5\x42\x89\xad\x08\x79\xa1\x25\xd6\xd2\x2d\xa6\x58\x4d\x36\xc4\x74\xbb\x5b\x29\xfb\x45\x76\x15\x09\xd8\x21\x49\x76\x0b\x19\xa6\x5e\xa6\x2a\xe5\x9c\x58\x4d\x96\xca\xe9\x04\x4b\x02\x81\x22\x99\x3e\x5d\x2d\xa7\x1a\xf5\x62\xa6\x53\x60\x33\x89\x62\xb2\x54\x2b\xe6\xd2\x15\xaa\xc1\x8a\xbd\x4e\xbb\xf5\x30\x12\x72\x87\x44\xa0\x3b\x89\x6a\x4f\xa0\x7b\x54\x47\x10\xb3\xdd\x4e\x1d\xb4\x0a\x15\xd0\xaa\x50\x89\x56\x26\xdb\xaa\xb1\x94\xd8\xaa\x16\x2a\x65\x50\xcb\xb6\xa9\x4e\x3d\x5b\xc9\xd5\xcb\x85\x42\xf6\xa2\xf3\xaf\x22\xa1\x1c\x4e\x00\x59\x4b\x83\x6c\x4b\xa4\x81\x50\xea\xb6\xd2\xad\x2c\x29\xf4\xf2\x42\xb7\x9b\xe9\x76\xdb\xa0\x9d\xed\xf6\x7a\x75\x46\xec\x75\xc5\x66\xb5\x90\xea\xf6\x1b\x42\x87\x61\xbb\x15\xea\x61\x24\xb4\x83\xa4\x56\xed\x65\xb2\x54\x81\x66\xbb\x89\x94\x58\x2e\x14\xba\x34\x5d\x48\x35\x3b\x95\x66\x81\xee\xa4\x3a\xf5\x5a\x25\x0b\x8a\x59\x31\xd5\x25\xc5\x42\x3b\x57\xab\x17\x4b\x62\x23\x93\xc8\x3c\x8c\x84\x71\x3a\xbe\x5e\xed\x65\x73\x45\x90\xcc\x91\xe9\x72\x8d\x4a\x74\x8b\xe9\x52\x39\x55\x4c\xe7\x5b\xe5\x6a\x0b\x64\x7b\x64\xbf\x94\x6e\x64\x2b\xe5\x56\x52\xac\x08\x8d\x0e\x5b\x4b\xb2\x95\x2e\xc8\x7e\x0d\x5b\x5c\xb7\x9b\xe4\xdc\xd1\x5a\xaf\xe0\xfa\xb8\x57\xe2\x4f\x0b\xdd\x2e\x3c\xfb\x86\x31\xdf\x30\xdb\x5c\xa2\x07\x6c\xe9\xb2\xa4\x2c\xb4\x25\xb9\x73\xf0\x53\x3b\x92\x4d\xa4\x68\xf6\x00\xea\xf3\x11\x9c\x2d\xa7\xd4\xce\xfa\xc5\x56\xfd\xeb\x7b\x74\xe7\x39\x76\x70\x05\x7b\xab\x91\xfa\xfa\x1e\x1a\x7b\x8e\x7d\x67\x79\x33\x68\x6b\x2b\xb4\x43\xe6\xfc\x3f\x5c\x11\x5b\x34\xea\x71\x96\xe8\xf8\x86\x91\x8f\x2a\x47\x50\xe9\x57\x58\xed\xd8\x97\x7f\x9d\x88\x08\xd0\x2c\xc3\x73\x38\xcb\xb1\xe4\x37\x8c\xf8\x86\xed\xfa\xec\xdf\x5f\xdc\x71\xe3\xcb\x0f\xec\x0b\x41\xfe\x89\xbb\xd7\x97\x6f\xd8\x97\x63\x81\xe1\xee\x9e\xd8\xaa\x1f\x7f\xb4\xb7\x73\xe7\x47\x7f\x87\x1f\x5b\xb8\x85\x86\xbb\x36\x61\xf5\xee\xcb\x7f\xae\xf5\xbd\x9f\x33\xe0\xe3\x0c\x38\x12\x3f\xe7\x0c\xbf\xce\x59\xab\x91\x0a\xcb\x59\x38\x9d\x0e\xcf\xd9\xee\x9f\xcb\x9a\x9b\x2e\x75\xa8\x08\x39\x74\xee\x38\xda\x87\xd6\x5f\x7e\xec\x40\x7f\xb1\x0c\x5d\x19\x5c\x13\x9a\x64\x2c\x87\x23\x7b\x70\x4d\x5b\xdc\x67\x83\x04\x7b\x72\xe7\x96\x74\xf7\xf0\x83\xb4\xee\xec\xde\x2d\x20\x27\xb8\x22\xe8\x27\x3f\xe6\x8f\x54\x6a\xc2\xd7\xf5\x54\x50\xd7\x87\x0c\x68\x1e\xe8\x7a\xf2\x76\xd7\xe3\x77\xba\xde\xeb\xb7\x57\xba\xde\xd3\x9e\x57\xbb\x3e\x64\x3f\xdd\xea\xfa\xf7\xb6\xfa\x83\xa7\x26\x08\x9e\xe1\x83\x3d\x35\xfd\x27\x47\xfd\xc2\x9e\xda\xe3\x2c\xc8\x53\x83\x5f\xdb\x53\x7b\x9c\xbd\xa3\xa7\x26\x02\xcc\x15\xdc\xf3\xd4\xe0\xbd\x3c\xf5\xbe\xa1\x1b\x7d\x45\xef\x83\x9f\x77\x98\x5e\x0f\xbc\xa3\xc3\x0c\xea\x81\x33\x11\x5f\xf6\x00\xb8\xe2\x30\x2f\xc5\x17\x89\x33\xfc\x69\xbe\xcb\x6f\x07\xf4\xfb\xd9\x01\x78\xbe\x17\xce\xbc\xe6\xf3\xbd\xf0\x6c\x34\xf2\xf3\x83\x07\xaf\x17\x98\xf7\xb3\x85\xa0\x5e\x38\x13\xf3\xf3\xde\xe8\x85\xe0\xe1\x61\x6f\xf4\x01\x3d\x40\xb0\x04\xcb\x73\x2c\x09\x38\x22\x78\x0c\xc7\x7f\xcd\x49\x09\xe5\x63\x2d\x70\xba\xf5\x6b\xb2\x76\xec\x35\x96\x64\xd9\xe7\x7b\xed\x13\x87\x5e\xb4\x8f\xb5\x67\x7b\xed\x13\xb3\x06\xf6\xac\x71\x04\x75\x2d\xb3\xf1\x8b\x2a\x24\xe5\x63\xed\xbf\xc8\xd6\xc8\x03\x6b\x34\x7d\x6d\x96\xf3\x8b\x2a\x24\xed\x63\xed\xbf\xc8\xd6\x76\x1e\x12\x70\x1c\xc5\xe3\x34\xcf\xd1\x6e\xaf\xe1\x0e\x6f\xba\x36\xd5\x1c\xd6\x78\x00\x48\x92\x05\x38\xc9\x70\xf4\x9f\x14\xcb\xd2\x1c\xce\xfe\x52\x9a\x09\xf6\x3c\x12\x38\xbe\x1f\xbb\xff\xdb\x78\x24\x0f\x3c\x52\xc4\x7e\xa4\x0b\xc5\xe3\xaf\xa0\xab\x04\x07\xf6\xe3\xc2\x7f\x1b\x8f\xd4\x37\x8c\xa3\x39\x9e\x27\x39\x86\x73\x9d\xa8\xcb\xa1\x65\x43\xd3\xd6\x66\xc3\x81\x04\x75\x38\x93\x91\xdf\xe7\x3c\x8c\x80\x39\x47\xf0\xa8\x2b\xf3\x05\xe2\x61\xf9\xd9\xf5\x9a\xcb\x90\x7b\x30\x8a\x37\xf9\x76\x17\x41\x06\x13\xb4\xfd\x18\x6b\xa1\x3d\xaa\x28\xc0\x7a\x4e\xef\x9d\xa4\xec\x21\x78\x6f\x29\xfb\xf8\x79\x50\xca\x1f\x60\xaf\x2e\x55\x0c\xe7\xb9\xdd\xf7\x92\xb2\x8b\xe0\xbd\xa5\xec\xe3\xe7\x31\x29\x87\xcd\x8d\x3c\xe3\xf9\x1d\xaa\x68\x9c\xf7\x1c\xff\x3b\x49\xd9\x43\xf0\xde\x52\xf6\xf1\xf3\x98\x94\x43\x16\x69\x3c\x15\x43\xb8\x54\xd1\xb8\x37\xf4\xbc\x97\x94\x5d\x04\xef\x2d\x65\x1f\x3f\x0f\x7a\x8c\x90\x19\xa6\xff\x3c\x50\x93\x14\x74\x3a\x48\xd8\xa5\xf2\xfd\x09\x21\xa7\x85\x29\x0a\x47\x31\x24\xc9\xaa\x0a\x8f\x00\xcb\x50\x24\x4f\x51\x8c\x02\x20\x4d\x01\x55\xe2\x59\x49\x66\x20\x24\x18\x1a\xc7\x11\x54\x68\x59\x61\x10\x4f\x00\x8e\x65\x59\x59\x41\x0a\x22\x70\x56\xfe\xba\x83\x81\x10\xcf\xb0\x12\xc5\x49\x32\x4e\xab\x48\x61\x01\xae\x50\x40\xc1\x71\x96\x20\x39\x85\xe3\x65\x05\x47\x38\x43\xb2\x8a\xa2\x52\x14\x8f\xb3\x08\xec\xe2\x31\x92\x51\x70\x85\x95\x19\x8e\xf9\xea\xf4\x2b\xf0\x55\x70\xb1\x4e\x05\x17\xe3\x2f\xec\x62\xbd\xc2\x2e\x86\xe7\x19\xfa\xfe\x5d\x6f\x71\x91\x64\x59\xe6\x1b\x46\xec\xfe\x8f\x5f\x5c\xdf\x30\x82\x77\xfe\x7a\x7f\x0e\xbf\x1e\x3e\xec\x68\x13\x04\x41\x48\x95\xd9\x79\x9f\x6d\x14\xd7\xe9\xb8\x2d\x0f\x89\x24\x2e\xc8\xcd\x71\x6f\x56\x4e\x0d\x7b\x24\xa9\xd6\xf3\x89\x58\x3e\x9f\x4e\x49\x35\x82\x83\x99\x14\x9d\x17\xd6\x80\x43\xa9\xe4\x34\x05\x88\xe6\x72\x53\x35\x67\x1d\x26\xd3\xe7\x8d\xba\x54\xce\x4c\xaa\xa3\x5c\x5c\x81\x53\x7e\x48\x6c\x4b\x3b\xd0\x42\xb7\x6f\xd3\x85\xa1\x70\xb8\x90\xd0\x2e\xbe\x6d\x56\xeb\x37\x61\xb2\x6e\x25\x48\x66\xb9\x5d\x76\x19\xa2\x92\x78\xdb\xf4\xf3\x96\x56\xd3\xe3\xbd\xba\x91\xcd\xb0\x7a\xaf\x00\x46\xa6\xb4\x7e\xab\xb2\x3c\xdf\x5b\xa9\x85\xa9\x12\xaf\x4c\x09\x4a\x95\x01\x5f\x45\x85\x59\xab\x27\x8c\xf8\x22\xdf\x4a\x34\x28\x3b\x5b\xa8\xed\x20\xf7\xca\x54\x11\xbe\xcd\x41\xed\x88\x4c\x50\x2c\xe1\xfc\x4a\xed\xfe\xf4\x85\x2e\x41\xd5\x04\x21\x85\xe7\x85\x5f\xed\x72\xb5\x8a\xf8\xe6\xec\x18\xba\x62\xfe\x7e\x8b\xa0\xa3\xd1\xe6\xaf\x32\x0e\x09\x12\x70\x32\x64\x54\x45\x92\x54\x1a\x92\x94\xa4\x90\x04\x04\x2c\xab\xd2\x2c\xcf\x12\x94\x2a\x11\x9c\xc2\xd3\x12\x89\x53\x08\xe2\xb2\xc2\x49\x8a\xc4\x2b\x32\x8b\x38\x4e\x55\x28\xb7\x98\x86\x0c\x56\xee\x6b\x3a\xcf\x92\x1c\x4b\xdc\xbb\x09\x08\x8a\xa5\x38\x92\xa1\x38\xfc\x96\x3d\x30\x0f\xda\x43\x29\x39\x2a\x19\xdb\x69\x5c\x5a\x11\x06\xbe\x6a\x66\x34\x25\xde\xa6\xe5\x56\x6c\x33\xee\x77\xde\x12\x15\xa3\x04\x8a\x80\x26\xb3\x4c\x2e\xde\x6a\x4a\xf5\x49\x7c\xd3\xad\x8c\x60\x03\x2c\x32\x1b\x92\x4d\x67\x47\xe5\xb7\xf5\x14\x8d\x57\xca\x54\x59\xae\x8d\x86\x42\xad\x69\x4e\x13\x58\xce\xe9\x3f\xc7\x1e\x4e\x54\x54\xd7\xca\x19\x39\x91\x4d\xf4\xd9\x74\x2d\x35\x17\xf0\x59\x8c\x64\xf5\x8d\xa2\xa8\x6c\xce\xa4\x27\x7d\xa2\xc6\x2e\x66\x72\x7d\xb6\x79\xeb\x4c\x8a\x05\xa9\xd0\x4c\xc9\x00\x6a\xeb\x4e\x72\x36\x4c\x0a\x09\xa3\x98\x61\x87\xf2\xcc\x62\x6c\x19\xb1\x22\xa7\x4c\xc4\x95\xf4\xa6\xe5\xfb\x0e\xe4\x56\x80\x3d\xf4\xfd\xea\xf4\xdf\x60\x0f\xa4\x33\x3c\x3f\x61\x0f\x54\x34\xba\xfc\x95\x67\x01\x2d\xc9\x8a\x22\xa9\x0a\x02\x10\xb0\x3c\xab\xb0\x34\x29\xb3\x24\x4f\x23\x85\xe1\x38\x1c\x47\x94\xa2\x02\x16\xf1\x80\x94\x20\x4f\x01\x1e\xd0\x34\x43\x22\x92\xa5\x09\x16\x72\xbb\x51\x86\x72\xfe\x05\xe8\x36\x75\x55\xe5\x69\x1c\xe7\xae\x1b\xc4\xfe\xae\x9b\xc2\x62\x78\x82\xa3\x6e\x59\x04\xf9\xa0\x45\xe4\x65\x7d\x55\x06\x71\x52\x33\x66\x3a\xaf\x34\x37\x6f\x3a\x22\x8c\xa1\x40\x97\xb9\x19\xd3\xa8\x30\x7a\x72\xa2\xb7\xc7\x14\xd9\xea\x2e\x4a\x89\x4a\x01\xa1\x18\x3f\x19\x16\x26\x69\x00\x7a\x93\x2e\x5b\xef\xe4\xd3\xcd\x54\xcd\xa0\xb2\xb8\x60\x12\xfd\xd6\x7a\x64\x25\x60\x7e\xa8\xba\xc3\x82\x63\x11\x27\x3d\x5a\x1a\x67\x2a\xad\xe9\x7a\xd8\x63\x17\x75\x09\x6f\xdb\x64\x6b\x59\xa7\xfb\x8b\x29\xa2\x27\x60\x31\xce\x72\xec\x22\x3b\x12\xea\x68\x8d\x2f\xd6\x70\xa8\xf7\xd5\x9e\x3d\x4d\x4d\x93\x31\xc6\x10\x52\xfa\x96\xa3\x38\x32\x06\xda\x53\x5b\x15\x49\x88\x37\x1a\x7c\x36\x9b\xa7\x34\xad\xed\xa0\xaa\x05\x58\x44\xab\x15\xa4\x55\xbf\xb8\x45\x50\xcf\x5a\x04\x19\x8d\x36\x7f\xc5\x29\x44\x22\x09\x41\x95\x57\x08\x89\x43\xb8\x4a\x53\x34\xa7\xd0\x24\x4e\x30\xbc\x8a\x70\x85\xe3\x55\x42\x52\x15\x85\x95\x14\x4a\x91\x39\x85\x92\x70\x9a\xa3\x24\x40\x32\x0a\xc1\x22\x82\xbe\x65\x11\xe4\x55\x9d\x67\x28\x8e\x67\xef\xde\xf5\x32\x9f\x04\xc7\x71\x37\x2c\xe2\xd1\x90\x49\x68\x8e\x99\x98\x49\xe3\xd2\x78\xc8\xb7\x3a\xe5\xd6\x3a\x3d\x8f\x0f\x41\x9e\x93\xe2\x0a\xe2\x9b\xf2\xb8\x65\xa5\x47\xc5\x1c\xc0\xbb\x54\x7b\xbc\x7c\xd3\x8b\xba\x50\x55\x63\x84\xb5\x6e\x01\x75\x43\x40\x7b\xd9\x54\x09\x75\x5b\x41\xe9\x4d\x8a\x1f\x6f\x8c\xa6\x3d\xca\x76\x13\x73\xc3\xe9\xc0\x9d\x41\xe4\xd7\xbb\x4f\xb9\xc3\x1f\xc1\xd1\xd9\xe3\x8f\xc2\x5a\xa8\xd6\x26\x6e\x87\x6f\xd9\x5a\x17\x55\x26\xb5\xda\xb0\x95\x19\x66\x6b\x13\x19\xc6\x16\x4a\x79\x99\x4d\xf5\x97\x1d\x31\x39\xb1\xc5\x7e\xaf\x92\x49\xae\x97\x69\x2d\xbe\x8a\x1b\xcb\x55\xc1\x5c\x51\x1c\x5e\x6f\x16\x67\xa5\x54\x3a\x5b\xef\x94\x96\xeb\x58\x01\xc4\xd3\x5a\xb3\x94\x48\x0d\xdf\x66\xb1\xf6\x38\x1b\x73\x0c\xa6\x14\x60\x30\x15\x31\x48\xe9\xfe\xff\x66\x30\x20\x1a\x65\xff\xca\x90\x0a\xcf\xa9\x34\xc9\x20\xc4\x70\x0a\x21\x01\x56\xa2\x25\x8e\x57\x01\x09\x55\x9a\x24\x08\x89\xa5\x19\x1e\x02\x4a\x85\x2a\x41\xe1\x24\x54\x70\x89\x06\x12\x43\x92\x12\xce\x4a\x88\xe7\xbf\x3a\x45\x1a\x74\xa0\xee\x83\xab\x26\xc1\x32\x2c\x77\xf3\xe6\x6e\x04\x71\x33\x79\x14\xcd\x83\x1b\xe6\x42\x5f\x31\x17\x77\xc6\xec\x88\x37\x53\xed\x8f\x89\xf2\x92\x36\x70\x29\xcf\x76\xa8\xd9\xb6\xb2\x6a\x6d\x32\x64\x7b\x6e\x4c\x62\xab\xb4\x50\xb1\x93\x44\x01\x94\xd8\x04\xcb\xf4\x47\x33\x55\x12\x53\xf3\x6c\xb5\xa7\x50\xf4\xac\x26\x37\xd9\xee\x44\xcd\x95\xd6\x8d\x2e\x3f\x4c\xa7\xf8\x98\xda\x81\x36\x4b\x30\xb5\x5c\x47\x76\x40\x3b\x96\x72\x32\xb9\xa8\xf1\x53\xae\xd8\xc8\x70\x12\xdd\xc1\xa5\xe5\x46\xb4\x17\x96\xdd\xae\xbd\xb1\xb9\x76\x3c\x6b\xa6\xa6\x19\xd1\x56\x04\x6a\x9a\x14\x53\xdd\xf5\x1c\x0c\x27\x06\x80\xbd\xca\x78\x9b\xe1\x3b\x65\x9d\x49\x0a\x59\xa3\x44\x13\x85\x4d\x3e\x5d\xe0\x87\xb8\x51\x59\x16\xc9\x89\xc5\x69\x5d\x07\x7e\x2e\xc0\x12\xb2\x78\x90\x36\xed\x2d\x41\x10\x12\x93\x77\x55\xdb\x77\xb8\x5c\x6d\x7a\xce\x12\x88\x68\xb4\xd8\x39\x2b\x01\xf3\xf4\x86\xe0\x59\xfc\x3b\x4e\x7c\xc7\x09\x0c\xc7\x7f\x38\xff\xae\xaa\x2b\x8f\x13\xe4\xf5\x60\x69\x7f\x97\x02\x3c\xc5\x33\x2c\xe0\x6f\xcd\x97\xef\xaa\xf2\xa7\xbc\x12\xdd\x82\x46\x6d\xe3\xdb\x46\x21\xc1\xa6\x66\x29\x3e\x0b\xf0\xcd\x38\x11\xb3\xf0\xa1\x6d\xad\x73\xeb\x37\xa2\xab\x34\x3a\x3d\x98\xc8\xc3\xb4\xa3\xca\x62\x80\x2a\x07\x5f\xbf\xb8\x2a\xe3\xa7\xaa\x7c\x27\x21\xf5\xc0\x99\x99\x61\xf3\x53\x57\xce\xa9\xb8\x56\x2a\x0e\xae\xd8\xdd\x1d\x30\x17\x9b\x0d\xc2\x81\xf1\x57\x2f\x87\xa4\x86\xf2\x17\xd4\x86\x03\x43\x1f\xc0\x30\x38\x43\x87\xa6\x86\xf1\x81\x09\x49\x0d\xbb\xcf\x33\x50\x38\xed\x24\xdf\xc3\x81\xe1\x0e\x60\x28\xc6\xc9\x2e\x87\x03\xc3\x1f\xc0\x70\xac\x53\x81\x11\x0e\x0c\x81\xfb\x0a\xc2\xa8\x90\x70\x2e\x6a\xe6\x42\xc2\x01\xbe\x52\x27\x3a\x24\x1c\xd2\x5f\x0d\x16\x12\x8e\xbf\x88\x27\xac\x7c\x68\x7f\x31\x50\x48\x38\x8c\xaf\x3c\x25\xac\x7c\x58\x1f\x9c\x90\x46\x41\x70\xfe\x9a\x92\x90\x70\x78\x5f\xdd\x46\x58\x3f\x88\xfb\x6a\x23\x42\xf2\x05\x2e\xea\x0f\x42\xc2\x01\xe7\x6b\xe2\x4c\x48\x30\xe4\x39\x98\x90\x5a\x08\xa8\xf3\xb5\xe3\xb0\xd4\xf8\x96\xa0\x43\xea\x20\xf0\x2d\xe9\x86\xa5\x86\xf5\x2d\x3d\x87\x04\xc3\xf9\xd6\x22\x43\x82\xe1\xcf\xc1\x84\x64\x8a\xc4\xcf\xd7\xec\xc2\x82\x21\x7c\x2b\x8d\xd1\x1c\xa1\x1d\xc9\x46\xdb\xdb\x87\x6f\x7d\xc3\x76\xb4\x3f\xb6\xf3\xf6\xca\x49\xd2\x2f\x47\x6c\xa7\x1a\x7f\x1a\xa2\x1d\xbe\xb8\x4b\x31\x84\xbb\x78\x6b\x1b\x2f\xad\x4e\x7f\xc3\xbe\xa8\xa6\x31\x7d\xb5\xba\x7f\x0e\xed\xd1\x97\x1f\xd8\x3f\xff\xf5\x0d\xfb\xb4\x9b\x83\x9d\xca\x7e\xf7\x38\xec\x2b\xbb\x12\x15\x64\xd9\xda\xcc\xd5\x8e\xa9\x36\x73\x1a\x9c\x6f\xc2\x71\x1f\x0f\xde\x08\x75\x72\xef\xf6\xa6\xc3\x93\x86\x91\xd4\xef\xbd\xc3\x39\x15\x57\x95\x71\x1f\xa1\x1f\xbe\x70\x9f\x57\x19\xff\x7d\xa5\x80\xc1\xa7\xa4\x9f\x6a\x5f\x64\xa0\x92\x82\x3b\x4a\x0a\xfe\xa7\xa4\xe7\x4a\xea\x4d\xdc\x0e\x5f\xa8\xcf\xab\xa4\xe7\xca\xf8\xa9\x2a\xbc\x03\x75\x91\xbc\xe7\x30\x71\xfc\x7f\xda\x78\x08\xe7\x4f\x26\xee\xc7\x2f\x8c\xa3\x8d\xae\x32\x3a\xe7\xba\xb9\x3d\xcf\xe3\xce\xff\xbe\x9c\x29\x04\x38\xb3\x6d\xf7\xa4\xfd\x1d\xf3\xff\xfe\xe2\xec\x2a\x25\xbe\x61\x5f\x76\x62\x27\xf0\xff\x9c\xef\xb1\xc3\xbf\x61\x5f\xdc\x53\x6b\xaf\x6c\x0b\x3d\xbd\x77\x53\xf6\xee\xd1\xb8\xd7\x77\x3d\x9e\x02\x8a\xac\x6f\x22\x3e\xdb\xe9\x7a\xdf\xb8\xd9\x90\xe3\x17\x10\xdc\x37\x1c\xe9\x5c\xbe\xbe\xa1\x6e\xf5\x0d\xe3\x75\x0d\x7d\xaf\x67\x02\x36\x20\x9e\xc9\xdc\xbf\xc7\x31\xb0\x43\xee\x77\x5d\x24\x25\xe6\x1f\xda\x37\x6e\x8a\xe9\xf8\x85\x0b\xee\x1b\x96\xe1\x01\x89\x73\xbe\xbe\x61\x6e\xda\x0d\xf9\xc1\x76\x13\xba\x0f\xa3\x36\xaf\x5f\x59\x2d\xce\x33\x7d\x87\x2f\xb8\x3b\xb8\x9f\x8e\xed\x2f\xec\x16\x3f\x8e\xed\xe1\x25\xfc\x89\x77\xda\xbd\xc3\xb9\x5a\xd7\x7b\xca\xcb\x81\x1e\xbe\x30\xef\xd4\x53\xe1\xe3\x9b\x4f\xbc\xbb\xee\x1d\xce\x5f\xbb\xda\x53\xfb\x5d\xb0\x87\x2f\xe0\xb2\xa7\x5e\x88\x75\xff\x67\x53\x91\xd9\xd4\x7e\xe7\xeb\xe1\x0b\x77\xd9\x53\x91\xcc\x6c\xfe\x67\x53\xe1\x6d\xea\x6c\x65\xe2\xf0\xc5\x9d\x84\x32\x8f\xed\x26\xb4\xcd\xa5\x65\x23\xf4\xaa\xd5\x38\x60\x8c\x97\x8f\x50\xf9\x1c\x36\xf6\x01\x11\xc6\xd9\x1e\xde\xc3\x17\xfc\x27\xf7\xdc\x0b\xae\xf7\x73\xf4\xdc\xfb\x4f\xb5\xcf\x77\x26\x1f\xbe\x30\xa1\x7b\x2e\xbc\xff\x3b\xed\xb9\x17\x5c\xf1\xe7\xf0\x96\x11\x1f\x4d\x7c\xbd\xe7\xbc\xf5\xce\xc3\x17\xf0\x93\x7b\x2e\x32\x6f\xf9\xb3\x7a\xee\xfd\xbd\xe5\xe9\xd2\xf0\xe1\x33\x77\xb2\x73\x51\x5d\xce\x94\xfd\xf9\x5d\xe1\xce\x6c\x76\xe4\xe2\x9e\x9c\xfc\xf2\xb4\xf7\xee\x36\xca\x17\xcf\x96\x7e\x46\x6a\xde\x12\xf6\xe1\x33\xf5\xbe\x52\x7b\x21\x01\xfc\xa9\xa4\xe6\x2e\xb5\x1f\x3e\xe3\xef\x2a\xb5\x17\x3c\xc0\xa7\x92\x9a\x57\x12\x70\xf8\xcc\xbc\xab\xd4\x5e\x18\xf1\x3e\x97\xd4\xdc\xd2\x85\xc3\x67\xf0\xbe\x16\x1a\x3e\xc2\x7b\x77\xa9\xdd\x29\x83\x08\x78\xfb\x73\xd8\x12\x08\xef\x0d\xd0\x81\xe5\x0f\xf8\xd5\x2d\xcd\xe4\x7e\x06\x4c\x90\x87\x0a\x6a\x77\x0f\xf4\x49\x49\xb5\x6d\x2e\x91\xbb\x01\x9d\x70\x77\x83\x52\x0c\x41\x30\x1c\x83\x53\x14\xc1\xb3\x04\x4f\xb3\x2c\x7b\x45\x43\x02\xa9\xf2\xd6\xc1\xaf\x53\x45\x1c\x88\x00\xc7\xba\x6e\x97\x58\x12\xec\x7f\x50\xa1\x6e\xed\xc8\x22\x5c\xd2\x7c\x34\xf1\xdc\xad\x12\xb0\x5b\x64\x11\x57\xc9\x3a\x24\x76\x08\xda\x5b\xf5\xde\x1f\x04\x70\x42\x96\x2b\x2c\xda\x09\xbf\x40\x20\x59\x0f\xa9\xc5\xfd\xf7\x18\xbf\xa0\x28\xc1\xaf\xa7\x0b\xac\x6f\x06\xd7\xeb\xfa\xee\x02\x02\x3e\x40\xd7\xea\xb3\xee\x02\x22\x7d\x45\x13\xa1\x01\x51\x3e\x40\xa1\x59\xf3\xaf\x90\x87\x06\xc4\xf8\x00\x85\x66\xed\x62\x95\x34\x2c\x20\xce\xbf\xa4\x17\x16\xd0\xc5\xfa\x53\x58\x40\xe7\x15\xcf\xf8\xf5\x5a\xca\xfb\x90\x2e\x16\x3f\x42\x43\x02\xbe\xe4\xfc\xb5\x8a\xca\xfb\x90\x48\x7f\x9a\x3f\x34\x24\xca\x97\x86\x0e\xad\x95\xe7\xb5\xcf\xe0\x15\x89\x33\xbe\x84\x6b\x78\x39\xb1\x3e\x48\xa1\x4d\xe5\xbc\x02\x9a\x7a\x45\xe2\xbc\x2f\xd5\x15\xde\x57\xe2\xbe\xd4\x4b\x68\xee\xce\xeb\xa0\x5f\x31\xe0\xb3\x4a\x68\xee\x7a\x45\xeb\x7d\x40\xe4\x39\xa0\xd0\xda\x74\x56\x0d\x4d\xbd\x42\x91\x6f\x02\x19\x5a\x2b\xcf\x2a\xa2\xf1\x57\x28\x62\x7d\x93\xb3\xd0\x80\xb8\xf3\xf9\x4a\x78\x8a\xf8\x73\x40\xe1\x87\x6f\xfc\x7c\x2e\x10\xda\x48\xce\x6a\xa3\x81\xcb\xda\x73\x6f\xf9\x7d\xcf\xea\xe8\x7b\x6f\xec\x7d\xa6\x3e\xfa\xf4\xad\x82\x51\x47\x7d\x47\x79\x7e\x65\x28\x04\x19\x45\xe6\x71\x1e\x97\x39\x85\x00\x00\x10\xac\x4c\x41\x19\x97\x49\xc8\x03\x96\x52\x19\x1a\xa7\x48\x99\x07\x10\x57\x24\x46\x41\x1c\x45\xb3\x00\x71\x0a\xc5\xcb\x0a\x4e\x03\xc6\x7d\x13\x17\xf1\x4a\x9a\xfa\x64\xdb\x32\xb5\xdf\xcd\x79\xfd\x30\x25\x9e\xe6\x6f\x1c\xb5\xe4\xde\x3d\x8b\x39\xdd\x6d\xa0\x05\x66\x8c\x34\x72\x3c\x35\x72\x5c\x33\xa3\xa7\xe2\x68\x28\x93\x6c\xb5\x6b\x67\x0b\x85\xb7\x4e\x9b\x5b\xb7\xb5\x7e\x02\x26\x97\x74\x91\x76\xce\x40\x72\x0f\x5d\x71\xb6\xed\x27\x7d\xbb\x14\x13\xc7\x8f\xce\x66\xe2\x44\xbb\x5d\xf6\xf6\xd7\x27\x89\x65\x42\xc9\x1a\xcd\xe5\xb0\xb4\xaa\xd9\x29\x36\x31\xca\x15\xc9\x32\xe2\x95\x76\x55\xcd\xe4\x62\x79\x8d\xce\xaf\x5a\x95\x58\x5f\xb0\x59\xe7\x58\x00\x21\xcd\x23\xef\xd1\x94\x3d\x5c\xad\x53\xcb\x4a\x47\xa8\xf1\x6c\x9d\xa8\x37\xed\x96\xb2\x2e\xa7\xb2\xf3\x54\x3c\xd9\x42\xf3\x37\xa5\x56\xed\xea\xc6\x4c\xd6\x8a\x6d\xa7\x7d\xab\xdd\x6e\x79\xbb\xa8\x8d\xea\xba\x17\x4f\x2b\x71\x39\xb7\x2d\xb7\x16\x71\xb4\xc8\x56\xde\xe6\x0b\xb6\xa7\x18\x6b\x59\x6d\xbc\x4d\xb2\x6a\x3f\xdd\xae\x27\x0b\x40\xf4\x76\x84\xe6\xb5\xce\xd0\xbf\xf7\x12\xb2\x93\xd5\x64\xed\xb0\xb7\x4d\x0e\xdf\x66\x28\xbe\x06\x85\xca\x62\x89\x38\x55\x46\x46\x7b\xcd\x75\x61\x7a\x2b\xb6\xb6\x02\x51\x93\x87\xd3\x62\xac\x21\xd9\xcd\x3a\xaf\x6d\x7b\xd2\x68\x28\x19\x90\x67\x53\x68\x9e\x4e\x71\x6f\xcd\xb6\x8c\x2f\x5a\x6b\xa8\x98\x15\x5d\xda\x70\xbc\xd2\x9c\x66\x73\x99\x62\x42\xf8\xfb\xef\xaf\xa7\x5b\x72\x33\x27\x5b\x59\x8f\x1f\xcb\xc7\x1f\x5d\xb9\x48\xd3\xe1\x94\x68\x03\x65\x48\xb7\x89\xe9\x82\x40\x7a\x49\xce\x10\xf6\x66\xdc\xe8\x15\xfa\xfc\x5a\x1c\x1a\x8d\x04\x44\x1d\xae\xa5\xa5\x8d\xe3\xa3\x8e\x6c\xd3\xe9\x76\xdb\x3d\x76\x41\x28\xa4\xb8\x4c\x75\xd3\x55\xc9\x64\x69\xdc\x2e\xac\xc8\xc5\xe8\xcd\x62\xe0\x12\x64\x61\x29\x9b\xc5\x2d\x3a\x41\x82\x7a\xbb\x55\xd3\xec\x91\x77\x10\x02\x8a\xcf\xdd\x47\x7f\xb9\xbe\x4d\xb0\xb1\x89\xf7\xe8\x51\xde\xb5\x83\x2e\x3b\x47\x07\x25\x4e\x3a\xdf\x5c\x94\x99\x22\xaa\xc0\xe1\x78\x53\x82\xad\x2a\xcf\x24\xde\x54\x8b\x47\xb8\x6c\x98\xe5\x7e\xf7\x2d\xd1\xc9\x4f\xd2\x46\x61\xaf\x1b\x82\x50\xa1\xcd\x9c\x7c\xb4\x8d\x84\xaf\xff\xce\xfa\x2e\xe0\x4a\x44\x8c\x3f\x19\x06\x7f\xd2\x3d\x27\xe3\xd8\xd0\x79\x3e\xb9\x34\x48\xc3\xa6\xe8\x45\xb2\x2a\x6e\xe6\xb5\x38\x69\x64\xcb\xb1\x37\x82\xad\x6f\x35\x8b\xd0\xd5\x52\xba\x37\xad\x75\x86\xe6\xb2\x11\x6b\xba\xdd\xdb\x6e\xe1\x1e\xe2\x62\x87\x4a\xe3\x68\x54\x61\x84\x2d\x9f\xc4\xab\x56\x46\x1c\xae\x64\x82\x25\x88\x16\xcf\xf5\xc6\xd4\xb4\x38\x99\xf2\x35\x96\x9e\x24\xc9\x95\xab\x0f\x4b\xb8\x1d\x26\xe2\x87\x6b\x1d\x44\xb3\xf3\x3d\xf3\x73\xe8\xeb\x4c\x98\xda\x3d\xfa\x1c\xf9\xa5\x9d\x8f\x8e\x3c\x13\x4b\x98\x94\xda\xdd\x3e\x48\xe9\xdd\x0e\x34\xdb\x4c\x6b\xb3\x96\x3a\x64\xa6\x9c\x1f\xce\x67\xa4\xd0\x48\x8e\x72\xe9\x39\x2d\x6d\x1a\xb9\x13\xe7\xe3\xe8\xe3\x0b\xfa\xfe\x8a\x7d\x56\x55\xdc\xb5\x19\x57\x1f\x02\x0f\x86\x38\xd1\x57\xd7\x21\xfc\x14\x7f\xd4\x6e\x37\xeb\x1e\x28\x5b\x1a\x76\xeb\x8c\xc8\x1a\xa9\x22\x5e\xac\xc5\xd6\xbd\x46\x92\x7f\xeb\xae\xba\xed\x26\xb9\xd1\xaa\x5a\x6f\xd9\x90\x88\xd4\x6a\x5a\x2b\x22\xf7\xe8\xb2\xa3\x3f\x70\xe8\x29\x07\x31\xe9\xd2\xd7\x3a\xf0\x2b\x64\x68\x23\x6f\xb7\x95\x59\xaf\xd2\x56\xfa\x0b\xbb\x3b\x6f\x66\x13\xb6\x24\xf7\xf0\x69\x72\xaa\xca\x89\x5c\x41\x1c\x76\x66\xfa\x2a\x9d\x1b\xc1\x57\xe9\xab\xd0\x66\x41\xc8\x1e\x55\xed\xa8\xef\x6e\xb7\x1e\x9d\x68\xe0\x75\xd2\x3f\xa1\xc6\x8b\x17\xf4\x27\x91\x98\x14\x2c\x41\x3d\x50\xce\x09\x3e\x7f\x58\xa7\x60\xe9\x06\xe9\x27\xf6\xe3\x1e\xd7\xf0\xa4\xfd\xbc\xa0\x4f\xd7\xfd\x4f\xc2\x4f\x65\x56\x26\xf4\x0b\xd2\xdd\xa3\x1b\xc3\xeb\xcb\x0b\xe3\x5b\x89\xb7\xf4\xe1\x0d\x7d\x39\xb9\x3a\x29\xd9\x1f\xba\x79\xe3\x9f\x70\x6c\x5f\x01\xc9\xb8\x50\xa1\xe8\x5e\x22\x45\xda\xd9\x76\xba\x42\xd4\x49\x01\x2f\xa1\x49\x95\xcb\xd7\x99\x59\x99\x10\x78\xd4\xd1\x94\x6d\xce\x76\xed\xa3\xde\x6e\x37\x3c\x7d\x19\xc6\x13\x63\xae\x4d\xf2\xeb\x71\x8e\x68\x98\x3c\x63\xc8\x6c\x65\x6a\xda\x23\x30\x4e\x6c\xf8\x62\xa5\xa6\xf0\x93\x76\x5b\xcc\x99\xbd\x86\xab\x0f\xec\xd4\x32\x82\xf5\xe5\x78\x5d\xe8\xf3\xc7\xd1\x97\x48\x75\xa6\xcc\x2d\xfa\x8e\xf1\xc4\x71\xfc\x4c\x1d\x6f\x3f\x3b\x3e\xed\xe4\x31\x95\x8f\xf0\x82\xf5\xef\x72\x6c\xdf\x5f\x27\xe3\x63\x68\xfc\x13\x2a\x1c\x7e\x37\x96\xfd\xf7\x7b\x05\xca\xce\x9a\xd3\xcc\x98\xa1\xc3\x19\x3b\xee\xdf\xfd\x2a\x0c\x7e\x7f\xf6\x7c\x32\xc3\xe3\x59\xc4\x90\x0c\x80\x1c\x21\xc9\x8c\x04\xa1\xc2\x40\x00\x54\x24\x21\x04\x01\x22\x19\xa4\x92\x80\xe4\x24\x99\x65\x39\x5a\x21\x81\xa2\x42\x9c\x94\x28\x46\x56\x38\x99\x43\x80\xa5\xa4\xaf\xfb\x85\x8c\x48\x66\x78\xf4\x9d\x19\x1e\x4b\x00\x9a\xb9\x7e\xd0\xd5\xee\xee\x71\x86\xe7\xe5\xf0\x5f\x9d\xe1\xa5\x7c\xdd\x1b\xe5\x0c\x2f\x9b\x5f\xbb\x4a\xf6\xa1\x33\xbc\xa6\x68\x9f\x58\xc9\xc9\x95\x39\xf0\x5f\xcb\x32\x0d\x76\x13\xaf\x15\x93\xf3\xa1\x1d\x07\xa3\x7e\xb9\x3f\xea\x8a\xf3\xf4\x1c\x6e\xe5\x16\xd3\x19\xa3\x06\xf9\x56\x2c\x11\x99\x31\x52\xcb\x19\x56\xb2\x53\x94\x54\x4c\x2e\x7b\xdb\x4e\x7e\xb3\x92\xa5\xb2\xca\x32\x5c\xbf\xfa\x96\xef\x56\x2b\x8a\xd8\x64\x7b\xc9\xf5\x88\x9e\x0b\xc3\xd6\x93\xb3\xbc\x17\xa2\xaa\x13\xd0\x09\x9e\x9a\xbb\x9f\x5e\x88\x7a\x05\x79\x96\xf2\xe4\x1d\x8a\x9e\x57\x47\x65\x41\x5f\xc7\xcf\xa6\xe5\x2a\xda\x1f\x3a\xfb\x81\xa3\x54\x5e\xae\x7a\xfa\x7a\x3a\x63\x7f\xbf\x19\x64\xa8\x19\x5c\x84\xf8\x53\x61\xf0\xa7\x8e\x23\x60\xfa\xe4\xd6\xb3\x33\x20\x76\x6a\x0d\xdd\x10\xd1\xf9\xde\xf2\xc1\xf3\xae\xda\x25\x7e\x41\x58\x99\x6b\x4f\x06\xa9\xfa\xdb\x2a\xf8\x99\xde\xf1\xfb\xb3\x11\xda\x2e\x42\x74\xa7\x05\x4e\xff\x24\x6a\x07\xfa\x02\x68\x39\x97\x59\x42\x00\xfb\x13\x8b\xd3\x55\xab\xe7\x7f\xe6\x44\x7f\x7e\xb5\x88\xdd\x3f\x23\x7f\x3a\x62\x0f\xef\x9b\x44\x8d\x5c\x06\xcf\xc8\xbd\x3e\x58\x30\xfa\xad\xb3\xe1\x8e\x33\x3c\xd7\xde\x9e\xd5\x87\x93\xfe\x75\xae\x17\x66\x7c\x47\x7d\x75\xfa\xb3\x18\x48\xef\x51\x7f\x7f\x3a\xbd\x89\x26\x9b\xb9\x4b\xef\x5a\x38\x9f\x11\x3d\x19\x8b\xbc\x42\x5f\x19\x36\xe7\xc1\x33\xe8\x40\x79\x7e\x38\x7d\xc5\x19\x29\xdd\xa5\x2f\x12\xfd\x74\x33\x92\x2f\x8c\x7d\xce\x95\x9a\x50\xee\xf0\xe0\xd2\x93\x09\xec\xef\x48\xf4\x33\x1a\x7a\x13\x65\x1c\xdd\xa5\xd7\xaf\x9f\xbf\xce\x8c\xfd\xe5\x0c\xcf\x0b\xf2\x5d\x40\x0d\xdd\x1a\x2f\x4e\xae\xad\xbc\x36\xfc\xbf\x95\x84\xf3\xf1\x42\x60\x7b\x45\x4e\x60\xc7\xfa\x50\xac\x22\x5c\x69\xb5\xd8\x76\x56\x4e\xd5\x36\x4c\x2d\xbe\xd6\xb3\x0b\x99\x6c\xa5\x08\x1a\xe6\xc9\x9c\x46\x38\xed\x5f\x88\x25\xc5\x32\x94\x8c\x67\x33\xcc\x1f\x49\xdf\x5b\x51\xca\xdd\xa2\xef\xef\xa0\x6c\xc3\x4f\x9a\xed\x0b\x11\x65\x1b\xc6\xad\x70\xf8\xbd\x6c\x83\x3a\x2f\xae\xb2\xbc\x60\x15\xa6\x49\x32\x0e\xd3\x13\x62\x9a\x96\x1b\x93\x8e\x6e\xe6\x37\xcd\x39\x2c\xd3\x45\xb5\x64\xbd\xb5\x60\x99\xe6\x70\xc9\x5e\xe2\xd5\x91\x65\x15\xe8\x71\xbe\x37\xcb\xf0\x1b\x8b\x88\xaf\xcc\xcd\x24\x5e\xd2\x65\x8e\xc6\xeb\x55\x7b\x58\x4c\x66\xa7\x93\x64\xa2\x16\x75\xb6\x01\x87\x84\x24\x51\xaa\xcc\x21\x92\x87\x32\xcf\xb3\x24\x2e\xcb\x24\x43\x02\x46\xc6\x01\xc7\x33\x80\xe0\x69\x06\x32\x2a\xaf\x00\xc4\x03\x84\x78\xc0\x2a\x2c\x03\x20\xc3\xe2\x1c\x85\xcb\xb2\x9b\x6d\x20\x23\xca\x36\x30\xf7\xb2\x0d\x80\x60\xc8\xeb\x67\x6e\xef\xef\x9e\xd5\xe7\xbd\x9a\x6d\xf0\x9b\x61\xa4\xd9\x86\x15\xe3\x39\xbb\x0f\xcc\x36\x38\x39\x7c\xff\x75\xb2\x9e\x2c\xe4\x62\x73\x46\x93\x2b\x45\xbd\xd1\xdf\xb6\x1a\xad\xbc\x10\xb3\xf0\xd4\xa8\x18\x2b\x35\x53\x05\x81\x44\x9d\x15\xa2\xf1\x4c\x35\xab\xae\x0d\x09\x22\x24\x33\xdd\xe4\x54\xc6\xd1\xa4\x61\xe3\xc4\x6c\xdd\xa0\xca\xc5\xa9\xba\x55\x9b\x99\xfe\x8c\x53\xea\xf5\x7e\x72\x58\x6b\x91\x42\xcd\xbf\x9e\xdc\x0c\x8a\x2e\xca\xbb\xdf\x78\x97\xaf\x8f\x98\xcd\x86\x9a\x4d\x46\x88\x5f\x7c\x12\xbf\x10\xbd\x97\x0d\xeb\xe5\x84\x88\xbc\xec\x68\x1d\x0e\xbf\xe7\x65\xdf\x4b\x59\xaf\x79\xd9\x7d\x0d\xfb\x93\x6e\x96\x23\x19\x85\x62\x14\x85\x84\x80\xa1\x68\x1a\x97\x81\xa4\x50\x38\xce\xca\x04\x8e\x43\x5c\xe6\x69\x89\x26\x65\xc4\x72\x0c\x83\xb3\x08\x52\x0c\x4f\xc8\x04\xc4\x39\x9a\x06\x2a\x87\x08\x08\xdd\x53\xe3\x89\x57\x76\xba\x3d\x93\xd4\x25\x39\x8e\xbb\xfe\xb6\x8f\xfd\xdd\xb3\xea\x65\xef\x45\x04\xcf\x05\xaa\x8f\x25\x75\x4f\x26\xf9\xef\x3d\x91\x3d\x04\xa2\x09\x61\xb4\xad\x31\x22\xbe\xe2\xc5\xaa\x50\x96\x87\xd5\x1e\xb4\x39\x3c\x5d\x99\xe9\x52\xbf\x91\x6c\x67\xa7\xcb\x45\xbd\x5e\x05\x68\x53\xd3\xe3\x8b\x5e\x43\x98\xa2\x61\x6c\xab\x53\x2a\xce\x8d\x93\xf9\x64\xa1\x47\xf7\xa9\x4a\xa2\xd8\x7e\x13\x09\x2b\x1f\xcb\xd2\x1d\xb2\x43\x76\xd6\xda\xa5\x1b\x0c\x4c\xb8\xfa\x65\x21\x84\x91\xad\x7b\x7d\x4c\x12\xe0\xaa\xab\x3e\x61\xef\xd9\x24\xed\xce\x55\xba\x82\x38\x49\x3c\x5e\xd4\x41\xdd\x71\xd5\xd1\xe1\x4f\x85\xc7\x1f\x75\xe2\xb1\x24\x04\x94\xb9\xdd\x73\xd3\x51\xe3\xbf\xd4\xd1\xc0\xc4\xe7\xf1\x0a\x4c\x7c\x3a\xd7\x2f\x94\xe8\x5a\x1f\x18\xfd\x44\xa5\x17\xfe\x3e\xf8\x9c\xa5\x21\xc9\xd8\xea\x52\x67\xdd\xeb\xce\x52\x75\x18\x7d\x3d\x9d\x3c\x06\x96\x85\xbe\xb3\xbd\x9c\x86\x55\xcf\xe0\xf7\xc2\x9a\xf7\x1a\x7c\xa2\x9d\x3c\x12\x2c\x64\x15\x8a\x91\x15\x92\x73\x0e\xa5\x93\x10\x52\xa1\x44\xc8\x80\xa0\x59\x40\x00\x59\xa2\x00\x8f\x23\x40\x31\x00\x92\x0c\xc5\xa9\x38\x4e\x49\x14\x2f\x93\x90\x25\x59\x06\x32\x5e\x54\x03\x22\x8a\x6a\xee\x4e\x1e\x49\x1e\xe7\xaf\xbf\xc6\x66\x7f\xf7\x6c\x2b\xd5\xab\x51\xcd\x8d\xc9\xa3\xa3\xe5\x1f\x93\xfe\x3c\x89\x6a\xb4\x6e\xf6\x2d\xc7\x98\x63\x12\xc4\xfb\xca\xb2\x92\xd7\x73\x23\x2e\xd6\xc9\x34\xca\x45\x6b\x3e\xcf\x13\xa0\x3a\x8e\xa3\x89\x3d\x03\x78\x3b\x91\x62\x27\xf8\xa6\x33\xe6\x41\x12\xbd\x29\x0d\x73\x28\xd0\x2c\xe8\xd6\xc8\x51\xab\xdb\xe9\xb5\x8c\x2a\x5b\xcd\xac\x26\xdb\x6e\xaa\xf7\x96\x5c\x7f\x74\x54\xf3\x31\xa9\xe3\x0f\x88\x6a\xdc\x8f\x6b\x3f\x66\x9f\xda\x5c\x78\x0a\x5c\x66\x83\x9f\x89\x70\x94\xaf\x09\x91\x8d\xf2\xc7\x51\x29\x6a\xf9\xb9\x82\xfa\xbc\xf2\xbb\x0c\x0f\xc3\x2c\x0f\x3b\xd7\x2f\xb4\xdc\xf2\x6a\x94\xf4\x6a\x72\xeb\xbd\xa3\xa4\x77\xa1\xef\x42\x55\x2a\xcd\xd1\x99\x92\x06\x45\x49\x3f\x29\x4a\x11\x22\xb2\x97\xd3\xe4\x4f\x88\x28\xe9\xbd\x06\xb3\x68\xa3\x24\x86\x61\x18\x9a\x81\x0c\x82\x0a\x02\x38\x07\x64\x9a\xe5\x70\x9a\x25\x00\xc3\x2a\x3c\xa2\x28\x9a\x44\x08\xd1\x2c\x47\xc8\x90\x87\x34\x07\x21\x49\xf0\x48\xc2\x55\xc4\x91\x14\x4d\xab\xca\xe1\x1c\x83\xf0\xe7\x5a\xb8\x67\x20\xdc\x0c\x8e\x58\x86\xe3\xae\xbe\x14\xff\x70\xf7\x74\x4f\xa8\x17\x1b\x15\xb9\x6c\x6d\x55\x9b\x48\x05\x90\x15\xc8\x4e\x7b\x5c\x37\x0b\xd3\x71\x17\xc7\xd5\x0c\x67\x15\x73\xec\x14\x17\xeb\xeb\x7c\x27\x2e\x74\x49\xe1\xfc\xed\xd8\x81\xc5\xb4\xa7\x57\xa8\x19\xc8\x09\xbc\xf6\x6a\x9d\xe6\x77\x1a\x26\x26\x94\x54\x17\x01\xb2\x45\x21\x3e\x09\x1a\x9b\x79\xa2\xd8\xaf\x6f\x08\x33\xff\xd6\x48\x17\xf3\xf8\x26\x95\xc2\x97\x85\xcc\x7c\xba\x90\x54\x3d\xd5\x8c\x75\xbb\xb9\x05\xd3\xd2\x52\x89\xb7\x69\x7a\x63\xc7\x32\x95\x58\x77\x91\x36\x6a\x55\xc5\xec\x36\xd9\x39\x61\x14\xfb\xb9\xec\xa2\x59\xaa\x3e\x10\xf3\x9c\x29\xed\x79\x3c\x71\xc2\xb3\x63\x05\xfe\x59\x7d\x42\x8b\x27\xf0\x22\x9e\xcf\x6c\xed\xd1\xba\x4c\xe8\x3d\x1c\x6e\xe7\x06\xc1\x97\xb3\x9b\x55\x31\xb9\xad\xd0\x76\x42\x94\x93\x2e\x8f\xe4\xd0\x36\x2b\xb3\x5e\x9c\x3a\x49\x95\x05\xbe\x74\xf9\x9e\x3d\xbf\x80\xbf\x6c\x6e\x9b\xb5\x17\xf0\x0b\x3e\xfc\x4f\x2f\xa6\x38\x05\xdb\xce\x75\x91\xa5\xb9\x87\x3f\x70\xf9\x32\x11\x5e\x16\x95\xd9\xc5\x5b\xe0\x9f\x92\xc5\xab\x7d\xb1\xd3\x85\x98\xec\x83\xf7\x94\x2c\xfe\xdd\xad\x11\xa4\x6d\x13\x95\xec\xaa\x66\x4f\x2c\x58\x6b\x80\x96\xac\xc0\xad\x8c\x8f\x1a\x5b\xa5\x54\x5b\xf3\x45\x6d\xb4\xe8\x2f\xa6\x24\x5d\xc3\xe3\x3a\xb1\x2d\x2c\xf5\xdc\x70\x2d\xd3\x23\x59\x52\x47\xe3\x95\xce\x8c\xa0\x98\x22\x21\x81\xc7\x18\x05\x26\x41\x23\xc1\x4c\xde\x52\x91\xfb\x56\x55\x42\x8c\xc2\xaa\x14\x2f\x03\x00\x69\x1c\x97\x54\x12\x01\x1c\x67\x09\x96\xe2\x65\x48\x00\x92\xc5\x09\x00\x15\x59\xa2\x29\x55\x91\x65\x9a\x42\x32\x54\x54\x16\x97\x55\xc8\x21\x8e\xf6\xde\xed\x4b\x46\x34\x03\x65\xef\xcf\x40\x01\x7b\x6b\x06\xea\xde\x3d\x3b\x83\xe3\xd5\x19\xa8\xbf\x40\x31\xc2\x19\xe8\x2b\xc5\xbb\x7c\xdc\x8b\x2a\x1d\x8f\x9d\x0a\xd4\xc4\xf4\x81\x9f\x9a\x60\x65\x14\xca\x90\xd9\x15\xd3\x5d\x97\x99\xa1\x2a\xab\xc5\x6e\x1c\xf4\x72\xeb\x71\x3d\x95\x46\x55\xa1\xf4\x56\x69\xd5\xe4\x58\x62\x9a\x6b\xa3\x76\x83\x25\xd6\x4b\x9a\xb3\x98\x62\x86\xd5\x7a\x2d\x35\x53\x13\x34\xe6\xad\x95\x63\x0c\x53\x9c\xca\x40\xc8\x8e\x85\x6d\xf2\x2d\x59\x7b\xa8\xf8\x39\xc2\x59\xeb\x67\x96\xf7\x8d\xe2\xe1\x88\x67\x6b\x97\xeb\x7b\x81\xb3\xb5\x22\xc3\x79\xc5\x2a\x1f\x32\x5b\x6b\x05\x10\xf2\x4a\x31\xaf\xfc\x1e\xf2\x4b\x7f\x72\xf9\x45\x53\x0c\xfd\xea\xf6\xc6\x9f\x33\x3b\x13\xce\xd7\x7c\x3e\xbc\xf8\xed\xfa\x76\x35\xbf\xad\x27\xc8\x72\x3f\x38\xf2\xf8\x6f\xdf\x1e\x9c\x7a\xb5\x7f\x5e\xa0\xef\x5e\xff\x9c\xd8\x6b\xd0\x75\x62\xaf\x1f\x5d\xcc\x7e\x6f\xfb\x69\x42\xed\x8e\xae\x50\x1d\x7d\xc6\x22\x6c\xc6\x40\x78\x07\x9f\x17\x22\x63\x91\x5c\xf7\x49\x6d\xf4\xb6\x8c\xcf\x55\x81\x5c\x24\xf8\x0d\x67\xab\xf1\xfe\x70\x9c\x2c\x8b\xa5\x36\xc5\x09\x6f\x25\xba\x9e\x78\xa3\x32\xbd\x51\x9b\x6e\x17\x57\xdd\x24\x3b\xdb\xbe\x19\x96\xb4\xcc\x8f\xea\x5c\x5f\x48\x16\xcd\x72\x6d\xbc\xd0\xac\x46\xff\xad\x27\xa0\x52\x2a\x57\x2c\xe5\xa3\x2f\x0a\xe4\x29\x95\xe5\x25\x16\xa8\x50\x51\x10\xdc\x45\xb7\x3c\xe0\x21\x2f\xe1\x04\x2b\x13\x32\x4d\x50\x2c\x4d\xa8\x2a\xc4\x21\xab\xa8\xbc\xaa\x48\x90\x95\x11\x47\xc8\x34\xae\xb2\x34\xe4\x00\xfe\xf5\x1b\x46\xed\x0f\x99\x09\xf9\xa6\x88\x93\xa8\x9a\xbc\x17\x55\xd3\x38\x00\xd7\x53\x17\xfb\xbb\x67\xa7\xc8\x7d\x0d\x63\x09\x27\x51\xf5\x8d\xdc\xc5\xcf\xf1\x2c\xc7\x2b\x7b\xa0\xb7\x56\x5a\x00\x71\x5e\xc0\x8b\xdd\x75\x5f\x6b\x98\xeb\xae\x35\x15\x92\x66\x62\x31\xec\xd6\xe3\xb1\x49\x35\x37\xcf\x10\xad\xd2\xb0\x5f\xa5\x62\xb0\x4d\xad\x9a\x4c\x45\x5c\xaf\x84\x61\x3d\x61\x14\x60\xbd\x3a\xce\x2b\x74\xa5\xcb\x0d\x2b\x89\x4a\x7e\x85\xd7\x0b\x55\x54\xab\x48\xfd\x29\x55\x5b\x3f\x12\x35\x07\x6f\x44\xf6\x47\x96\x27\x8d\x42\xe4\x7a\xf2\x95\x63\x7f\x5c\xb9\xee\x54\x67\x44\x87\xff\x32\x28\x7b\x00\xff\xc5\x6a\xf7\x47\x7b\x76\x97\x94\x47\xca\xce\xc5\x9f\x42\xdf\xdd\x83\x57\x02\xf3\x36\x3f\x29\x87\xe4\x93\x55\x68\xfc\xa7\x9b\xec\x9f\xc1\xbf\xdf\xe4\x6e\xf6\x1a\x13\xb3\x66\xab\x89\x69\xbe\xb0\x4a\xa0\x6d\x4f\x28\x58\x99\x45\x42\xc9\xc6\x99\x1a\xa7\x4d\xa4\x76\x6d\x9b\x98\xc4\xc7\xf4\xbc\xab\xf1\x55\x83\x6a\x26\xb9\x64\x42\xcc\x0c\xe7\x73\x31\x56\xd2\xc9\x09\xad\x6e\x05\x4a\xa4\xa6\xf1\xba\x68\xf0\xb4\x40\xdb\x53\x69\x94\x8a\x7c\x93\x3b\xa9\xe2\x9c\xa4\x42\x02\xc9\x92\xca\x93\x12\x81\x48\x9e\x86\x94\xc4\x13\x10\x70\x00\xd1\x14\x29\xab\x12\xa1\x28\xac\x4c\xa8\x14\x83\x58\x1e\xe0\xbb\x76\x80\xe4\x10\x07\x28\x5e\x92\xdd\x11\x06\xbc\xf2\x86\x9b\xe7\x46\x18\x92\xbd\x35\xc2\xb8\x77\xcf\xce\x04\xf5\xca\xce\x9f\xdb\x68\xf1\x9e\x23\xcc\xab\x73\x8b\x13\xfc\x07\x7a\x6b\xa5\x4e\x7c\xa1\xc5\x2a\x49\xb3\xc8\xcd\x67\xf2\xdb\xd2\xcc\x33\x89\xb1\xac\xd7\xa5\x6e\xb2\xb7\x16\xf0\x44\xbe\x0c\xd7\x05\xa9\x23\xbf\xa9\xc2\xba\xbe\x1c\x19\x2b\x00\xa6\x2d\x89\x50\x21\x48\x64\x8a\xba\x96\x55\xb9\xfa\x82\xa9\x6b\x85\x61\xbd\x9a\x28\xae\x88\xac\x59\xc8\xe6\xd6\xd6\xbb\x8c\x30\x21\xe6\x82\x91\x8e\x30\x2f\xe2\xff\x99\x23\xcc\x0b\x9b\xe4\x85\x8f\x18\x61\xde\xe3\x68\x1d\xef\xba\x3b\xc2\x84\xb7\xa5\x68\x46\x98\x10\xf8\x5f\x1c\x61\x36\x52\x6c\x51\xa4\xa8\x82\xb5\xda\x4e\x95\xb7\x0a\x63\xcd\x8c\x4c\x79\xdb\x4e\xdb\x72\x7e\x2c\xa4\x6a\xe2\x84\x30\x85\xb9\xdd\x7f\x2b\x73\xa9\x44\x86\xc9\x68\x71\x09\xf6\xeb\xab\x36\x6f\xf6\xc4\xde\xb2\x93\x53\xe2\x9b\x8c\x39\xcf\xe4\x16\x09\x91\x13\x97\xf1\x16\xb2\x0c\x79\x1c\xfd\xaa\x2b\xcb\xe3\x40\xc2\x29\x45\xc5\x55\x89\x24\x55\x08\x69\x5e\xe2\x48\x95\x27\x38\x49\x25\x20\xcb\x93\x90\x60\x80\xa4\x00\x45\x26\x15\x8a\x92\x78\x9e\xe4\x11\x4b\xb3\xb2\xca\xca\x12\xcb\xb0\x84\x3b\xc2\x90\x11\xcd\x61\xee\x1d\x94\xc9\xd2\x38\xc5\x5d\x5f\x19\xd8\xdf\x3d\x3b\xe1\xf9\xd5\x39\xcc\x8d\x83\x32\x5d\x05\x7b\x72\x73\xcb\xfb\xcc\x61\x92\xc9\xcd\x08\x25\xa8\x64\xdf\xec\x69\x69\xb5\x24\x74\x18\x79\x32\x8e\x8f\x9b\xf9\xb7\xac\x36\x62\x93\x76\x9d\x6b\xa6\xf0\x8c\xbc\x6a\x4b\x4c\x9c\x49\x74\x34\xbe\x0d\x66\x89\xda\x2a\x65\xc7\xb7\x64\x79\x96\xb0\xb7\x96\x94\x16\x2b\x25\x22\xd3\x66\x98\x4d\xda\x5e\x72\xed\x74\x46\xa8\x7d\xc6\x39\x4c\x28\x0f\x1f\x21\xfe\x2b\xd5\xb9\x61\x46\x98\x0f\x3c\xfc\xd0\x25\xe5\xc9\x11\xe6\x33\x1d\x1e\x19\x38\xc2\xfc\xa4\x39\x84\x10\xd1\x1c\xe6\xb4\xfa\xfa\x19\xfc\xde\x08\x93\x2b\x66\xd2\x54\x76\x98\x9f\xda\x5a\xae\xcd\xad\x13\xd2\x7c\xdb\xa8\xc6\xca\xa5\x59\x09\x15\xb2\x56\x81\xc8\x6e\x4a\xd5\x5a\x7f\xcb\xb7\x57\x66\xdc\x48\xc3\x22\xd1\x85\x72\x3a\xc9\x55\xc8\x78\x51\x06\x72\x86\x2c\x6c\xac\x56\x8d\x7a\x6b\xf5\xbb\xa6\x29\x76\x00\xbb\x69\xb5\x7a\xd1\x1f\xd4\x25\x11\x2a\x07\x38\x92\xa3\x64\x9a\x41\x94\x84\x03\x4a\xa5\x00\x4b\xab\x80\x62\x68\x0e\x87\x12\x94\x54\x95\x80\x88\x94\x78\x09\x32\x84\x84\x93\x1c\xcf\x92\x34\x8b\x68\x16\x21\x49\x76\xb2\x64\x60\x5f\xfd\x1c\xba\xae\x07\xdc\xad\xeb\x61\x71\xea\x46\x5d\x8f\x77\xf7\xf4\x64\xf5\x57\xeb\x7a\x6e\x8c\x2b\xee\xf5\xe2\x86\xd5\xb3\xba\x1e\x75\x96\x6e\xbc\xf5\xd1\x88\x60\xdf\x6c\x0e\x18\x25\x65\x81\xa8\x58\x7c\x33\xb3\xe2\x93\x6c\x42\xc9\x74\x55\x3e\x91\x4b\xd4\x7b\x6a\xbb\x4f\xa6\x6a\x4d\x12\xae\xd7\xfd\x19\x5d\x5c\x0c\x73\x19\x3e\xbf\xd0\x57\xa5\x69\x9d\x68\x16\xe0\x46\x36\xf2\xb5\xd4\x72\x91\x91\x93\x0d\xb9\xf4\xb9\xea\x7a\x5e\xad\xab\x79\xb5\x96\xa4\xb4\x2e\x4e\x8d\x28\xeb\x7a\x3e\x30\x62\xbe\x17\xbd\x7f\x64\x5d\x8d\x10\x51\x5d\x0f\x47\x1d\x9f\xbf\x32\x33\xbd\x59\xd7\xd3\xa5\x37\x2d\x0b\x74\x0d\x05\x71\xac\x5a\x5e\xa4\xb2\x70\xb6\xac\xae\xb8\x3e\x5b\xa5\x13\xeb\x6e\x5d\x27\xe3\xb5\x86\xd0\xea\x64\x89\x8e\xb2\xc6\x45\x1e\x58\xa5\x4c\x3f\xd6\x60\x8c\x64\x62\xd5\x80\x73\xe9\x2d\x3f\x51\x5a\xdb\xa9\xd5\x2d\x24\x5b\x02\xbb\x18\xcd\x84\xc9\x2c\xfa\xfc\x90\xcc\x93\x2a\x87\x33\x0c\xab\x92\xac\xa2\xb2\x38\x04\x3c\x92\x71\x05\xf1\x8c\xaa\x92\x24\x47\x30\x14\x05\x65\x0e\x70\x90\x92\x11\x24\x64\x89\x91\x91\x44\x48\x32\x92\x19\x55\x55\x71\x9a\x73\xa3\x77\x2a\xa2\xfc\xd0\x03\xd1\x3b\xcd\x13\x37\xa2\x77\xf7\xee\xd9\xbb\x50\x5e\xcd\x0f\xdd\x8d\xde\x3f\xfa\xa8\xd4\x13\xfc\xc7\xfc\x50\x86\xd2\xd9\x49\x52\x58\xd0\x70\xb1\x6a\x00\x25\x9e\x6d\xda\x3d\x99\x2d\x0a\x90\x9d\x37\x5a\x9a\x56\xa1\x0a\xdb\x42\x5e\x5a\x4c\xad\xd4\x70\x25\x4f\x32\x49\xa0\x48\x23\xd9\x34\x78\x5b\x5e\x25\xfb\x59\x75\xbd\x68\xb6\x63\xda\x64\xc5\x65\xa7\x5c\x5f\x85\x0c\x39\xe9\x55\x27\xb5\xde\x67\xcc\x0f\xbd\x1c\xbd\xbf\x88\x3f\xc2\xe8\xfd\x03\x0f\x96\x71\x49\x79\x32\x7a\xff\x40\xfa\x5e\x8f\xde\x3f\x30\x3f\x23\x44\x94\x1f\x7a\x31\x7a\x97\xb4\xee\xb2\x96\x4b\x9a\xfa\x62\xc1\x17\xfb\x24\xa7\x54\x88\xd1\x9b\x65\x25\xcc\xa5\x9e\x6f\x14\x72\xec\xd0\xcc\x19\xfa\x72\x01\xd7\x95\x24\xbf\xad\xf5\x8a\x7d\x62\x99\xde\x2e\xc7\x80\xd8\x72\xf9\x89\x12\x4f\xce\xcb\x5d\xa6\xd0\x88\xaf\x91\xb4\xd1\x79\xd3\x44\xf5\x61\xac\x9e\x18\x46\x3d\xc2\x70\x04\x41\xf0\x40\x26\x39\x86\x22\x15\x95\x25\x65\x9c\xa0\x21\x0d\x09\x85\x65\x25\x0e\x07\xca\x6e\x18\x51\x28\x95\xc6\x55\x99\x51\x08\x9c\x23\x18\x08\x59\x5a\xc6\x21\x43\xe0\xbc\xca\xc3\xc3\xab\x18\x23\xa9\x1c\xbd\xbb\x02\xc1\xd0\x38\x71\x7d\x84\xd9\xdf\x3d\x7b\x47\xd6\xab\x95\xa3\x37\x56\x20\xdc\xa3\xc9\x42\xe5\x7b\x48\x5f\x75\x95\x73\x9d\x54\x7a\x56\xf5\x21\x9b\x29\x8e\xf2\x43\x7c\xc3\x8e\xe7\xc2\x9a\x10\xd7\x4c\x16\x74\xd3\xcc\xb6\x9e\x9b\xaa\xf9\xdc\x3a\xad\x6e\x70\xbe\xbc\xd4\x68\xc9\xae\xe4\x85\x34\x5a\xa7\x85\xb2\x5a\x81\x42\x82\x4b\xd7\xfa\x3a\x3d\x27\x05\x7a\x48\x4e\x98\x54\x36\xa5\x0d\xd5\x56\xb9\x95\x22\x98\xa1\xfc\xc8\x88\x71\xba\x97\xe9\x91\x98\x3e\xea\x1d\xca\x0f\x5a\x99\xb0\x37\xf0\xa8\xf1\x3f\x71\xf0\x8b\x20\x7c\xde\x35\xeb\xbb\x31\x7f\x08\x59\xbd\x9c\xb1\x7f\xb1\xaf\x2e\x0e\x5e\x7f\x10\xbf\xe7\x91\x63\x9d\x94\xd5\x5b\x8a\xd3\x54\x33\xbb\xac\x4c\x52\xa9\x56\x33\x65\xa8\x7d\xb9\x6b\xe6\x45\xad\xcf\x4d\xc6\x42\x3b\x9e\x6d\xe2\x80\x29\xea\x4b\x9c\x9a\x08\x6d\x56\x68\x09\x04\x47\x1b\x42\x76\xdd\x4e\x4c\xbb\xd3\x99\x9c\x9a\xa6\x50\xd3\x28\xc9\x95\x42\x82\xaf\x11\xb5\x6a\x77\x91\x88\x3c\x63\x2f\x29\x14\xc7\x28\x92\xa2\xe0\x40\xa1\x18\x9c\x23\x58\x86\x25\x64\x0a\xd2\x90\x45\xbc\xc2\x20\x8e\xa1\x65\x08\x78\x59\xa2\x08\xc4\x00\x85\x85\xd0\x99\x18\xa8\x08\xd1\x12\xc9\x28\xc8\xf5\xc8\x51\x1d\x7c\xfe\x80\x47\x26\x18\xf2\x86\x47\x76\xef\x9e\xbd\x6b\xf0\xd5\xa3\xc8\x3e\xd0\x23\x9f\x1c\x3c\x9e\xe0\x27\xd3\x42\x07\x2c\xc8\x15\x5b\x53\xb7\x5c\xb5\x84\x26\xa2\x44\x34\x9b\x39\x5a\xdb\x2c\x26\x39\x3c\x61\x0c\xbb\x66\xc5\x66\x87\x15\x82\x01\x35\x69\x32\x02\x4a\xa3\xd9\x52\x51\xca\x58\xc9\x78\x55\x80\xea\x28\xd5\xdd\xd8\xa3\xb6\xa0\x5b\xc5\xe5\x58\x4f\x4c\xb7\xe3\x84\xf0\x50\x0c\xff\x82\x47\x8e\xe4\xf5\x06\x0f\x5a\x99\x10\x60\xe5\x1f\xfd\x7a\x05\x41\xf8\xbc\x19\xf8\xbb\x6b\xa8\x21\x64\x15\xa5\x47\xfe\x48\x5d\xf1\x3c\x72\x16\x34\x7a\x73\x09\x9a\x28\x6e\x27\xe2\xc5\x35\xb7\x61\x6a\xf5\x55\xbb\x5c\x1a\x4f\x8b\x99\x45\x6d\x5c\xcb\x68\x09\x64\x31\xe4\x52\x60\xbb\x66\x3f\xb1\x6c\x64\xfb\x44\xbe\x5c\xe7\xa9\x8a\xc6\xbf\xd5\xb8\xc4\x3c\x26\x96\xd5\x0c\x48\xb7\x92\x9d\xf5\x92\xa9\xb4\x32\x52\xa1\x24\x46\x1f\x23\x4b\xb4\x0c\x15\x19\xe1\xb4\x2a\xe3\x50\xa1\x15\x46\x45\x38\xce\x4b\x9c\x84\x2b\x48\x92\x71\x85\x54\x54\x85\x04\x08\x52\x40\xe2\x90\x04\x49\xc4\x23\xc8\x70\x32\x60\x29\x06\x51\x04\xae\xba\x1e\xd9\x59\x43\x0d\xf9\xa2\xfc\x27\x3d\x32\xe0\xae\xbf\x8a\x62\x7f\xf7\xec\x9d\xad\xae\x56\x3e\x79\xf8\xe2\x33\x1e\x39\x54\x56\xa5\x1b\xe4\x91\xc5\x03\xfe\x9a\x50\xae\x35\xde\x56\x73\x31\x91\x14\xba\x2b\x8b\xa8\x0e\xa7\xd9\x78\x2b\x2d\xa9\x42\xcf\x26\x2b\xc2\x70\xd8\xab\xb6\x9b\x0a\x3e\x1f\xb7\xe5\x3c\x6c\x93\x7a\x4a\x8c\x8f\x2b\xfd\xd2\xac\x98\xee\x97\x26\x69\x71\x34\xa4\x14\x7d\xd9\xae\x6d\x50\xaa\x2f\xac\x9b\x85\x56\x15\xce\x14\x6b\x3d\x7c\x67\x8f\xfc\xec\xac\x3d\x6a\x8f\x1c\x09\xfe\x17\x3c\xf2\x67\xca\xaa\xdc\xf3\xc8\x61\x64\x15\xa5\x47\xfe\x48\x5d\xd9\x1f\x24\x49\x24\x8a\x95\xd8\xa4\x26\xe6\x12\x28\xf6\xd6\x8a\x25\x7b\x2a\x5f\x6b\xdb\x5c\x42\x23\x65\x2a\x99\x4c\x0c\xf9\x76\x99\x6c\x4c\xcb\xed\xb5\x3e\xef\xa2\x56\xa9\x19\x2b\xd1\x93\xad\x6c\xb5\x27\xdb\x5a\xab\x91\x49\x8d\x40\x87\x6e\xa7\xd2\x54\x79\x92\x4a\x94\xe6\x35\x7e\xa1\x80\xb7\x64\xf4\x75\x93\x00\x51\x12\x24\x54\x40\x70\x12\x03\x21\xa0\x00\x2e\x51\x3c\x4b\xd1\x0c\x24\x24\x9c\xc7\x79\x1c\x91\x1c\xc9\x22\x86\x91\x48\x8a\xa7\x71\x12\x57\x28\xa4\x28\x0c\xae\xe0\x2a\x4e\xb3\xb8\xeb\x91\xa9\x88\xb2\x16\x77\xf3\xe2\x0c\x4d\xf2\xd7\xcf\x91\xdc\xdf\x3d\x7b\xf7\xf5\xab\x59\x8b\x1b\x79\xf1\xa8\x3d\xf2\x49\xd6\xa2\x94\x9f\xae\x62\xfa\x28\x4d\xd7\xfa\xfa\x50\xc9\x2d\x12\xdd\x54\x43\x19\x89\xf6\xd0\x84\x4d\xb3\xde\x58\xb7\x4d\x5a\x89\x27\xcc\x72\x12\x70\x48\xcd\x96\x8c\x4d\xaf\x0c\x63\x60\xc9\xdb\x0d\x51\xd9\xb4\x62\xa3\x4c\x97\xc9\xd7\x3b\xec\x5a\x48\x93\x52\x65\x2e\x70\xe6\x66\xb3\x11\xc4\x5f\x21\x6b\xf1\xec\x71\xb1\x3f\xf1\x5c\x37\x41\xf8\xbc\x75\x90\x81\x1e\xf9\x27\x65\x0d\x84\xf7\xea\xab\x07\xf1\x7b\x1e\x79\x3d\x85\x71\xa6\x23\x76\xf5\xc4\xb4\x93\xc0\x35\x23\x2d\x97\xf3\x40\x6c\x80\xa4\x3d\xaf\xd8\xe9\x62\xa2\xb3\xd2\x49\x2e\x63\xe1\x45\x69\x43\x73\xf2\x76\x9c\x1e\x12\x26\x0b\x59\x40\xe4\xea\x64\xba\xc9\x88\x7d\xd5\xd0\xd3\xd2\x4a\x10\xba\x8a\xc5\x4c\x52\xdb\x55\x36\x2b\x46\x7f\x06\x1e\x90\x00\xe2\x80\x22\x41\x49\xc2\x01\x25\x01\x16\xe2\x32\x49\x50\xb8\x0c\x59\x42\xe1\xa0\xcc\x4b\x32\x4b\x70\x24\xa1\xf2\x2a\x0d\x49\x49\x61\x78\x24\x43\x52\xe1\x38\x55\xc2\x91\x4c\xcb\x6e\x0d\x07\xf9\x5a\x15\xc8\xdd\xd0\x98\x65\x09\xe6\x7a\x79\xe1\xfe\xee\xe9\xdb\xec\x5f\xad\x02\xb9\x71\x9e\xaf\x7b\x85\xd8\x0b\x7d\xa5\x0a\x24\x95\x5f\xea\x84\x5d\xcc\x14\xd3\x54\x7b\xb3\xb6\x71\x25\x95\x6c\x8b\x2a\x63\x4b\xb4\x4e\x49\xdb\x92\x99\x19\x26\xe7\x31\xbd\xdd\x2f\x4d\x37\xb2\x4d\x53\x5a\x59\x05\xd3\x8d\x3d\xde\x30\x25\x85\xee\xe7\x29\x91\x4a\xe9\xb2\xa5\x52\x8c\x28\x8c\x12\x99\x46\xab\x6a\xcd\x38\xb5\x97\xfa\x5c\x55\x20\xaf\x56\x61\xbc\x5a\x79\x50\xc4\x27\x8c\x10\x61\x15\xc8\x47\x66\x61\xdf\xa3\x0a\x24\x6c\x15\x86\x10\x51\x15\x08\x7b\xd2\x01\x57\x5e\x82\x75\x73\x97\x90\x34\x57\xa4\xc6\x46\xdb\xa0\xb4\x2c\x17\x95\x6c\x6d\xad\xd7\xb3\x31\xb3\x13\xeb\xa3\x0c\x37\x2e\x6c\x0c\x61\xa1\xce\xdb\x9d\x66\xde\xea\x16\x11\xca\x8d\xbb\xfc\xdc\x92\x7a\x1c\x1a\x67\x51\xa7\x81\x12\x15\x81\xee\x16\xb3\xb1\xca\x48\xc8\xd5\xea\x13\x3d\xc5\xe6\xe3\x59\x20\x44\x9e\x11\x46\x04\x2b\x41\x44\xd3\x40\x81\x38\x81\xd3\x50\x21\x81\x8a\x4b\x3c\x84\x8a\xaa\xe0\x2a\x03\x48\xa4\x92\x2c\x92\x28\x8e\x91\x08\x5c\xc2\x29\x4a\x26\x78\x40\x72\x24\x83\x23\x8a\xa6\x15\xd6\xf5\xad\xd4\x6b\xbe\xf5\x6e\x90\xcb\xb2\x80\xbd\xfe\x4e\x8a\xfd\x5d\xc7\xb7\xd2\x38\x7f\x72\x56\x7a\x68\xdf\x7a\xe3\x54\x51\xf7\x7a\xb2\xf6\xe3\x96\x6f\xed\xf5\xd4\x6d\xc5\x9c\x56\x47\xea\x44\x63\x74\xb3\x1a\x33\x92\xea\x72\x96\x9f\xd6\x97\x60\xaa\x6e\x74\x0a\x37\xe8\x45\xa7\xce\x6e\x89\xde\xb4\xca\xcd\x8d\x4c\x8c\xe9\x2e\x86\x14\x31\x2e\x24\x3b\xe5\x75\xdb\xa8\xb2\x04\x9b\x6c\x8b\x55\xc4\xe2\x39\xdc\x46\x9d\x55\x13\x8d\x27\xf9\xcf\xe5\x5b\x5f\xf5\x6d\xaf\xda\x73\x81\x8a\x95\x7b\x11\xfa\xd6\x8f\x9c\xbd\xbf\x87\x6f\x0d\xeb\xdb\x84\x88\x7c\x2b\xbd\xbe\x8b\xe8\x96\x6f\x05\x99\x2c\x37\x5e\xd0\x63\xaa\x4b\xe7\x96\x73\xf8\x16\x5f\x08\x33\x76\xbe\xed\x4f\x24\x7b\x3e\xe3\xfa\xa8\x5c\xa8\x40\x7d\x82\x38\xbb\x93\x1f\xc7\x0b\x30\xb1\x5a\xea\x8c\x51\x29\xf7\xb6\x43\x7d\x2c\xa7\x61\x2a\xc6\xa3\xb5\x5e\x4f\x91\x4b\x3e\x59\x2e\x76\x75\x13\x27\x0d\x3a\x19\x79\xdc\x2a\xab\x0a\xc7\x11\x0c\x52\x68\x8e\x95\x69\xa4\x70\x9c\x82\x20\x8e\x24\x1c\xe7\x08\x4e\x86\x2a\xc9\xc9\x38\x4b\xe3\x88\x65\x29\x1c\xe1\xb4\x82\x48\x19\xb0\x04\xcb\x20\x1e\x22\xc4\x21\xe4\xfa\x56\xfa\x35\xdf\x7a\xf7\x45\x14\xce\x66\xca\x1b\xbe\xd5\xbd\xeb\xfa\x56\x1a\x3f\xc9\x1f\x84\xf6\xad\x37\xce\xcb\x72\xaf\x10\xaf\x7c\xb9\xe6\x5b\xcb\xd3\xda\x48\xa9\xa1\x6d\x49\xee\xa4\x3b\x55\x4e\x69\xd7\x27\x29\x3b\xdd\xa0\xec\x6c\x2e\xb9\x2d\x28\xb0\x3a\xa9\x33\xb1\x78\x91\x8d\xd9\xa5\xb7\x42\xc7\x68\x8d\xa5\x94\xd0\xb5\xe4\x7a\x2f\x17\x1b\x33\x69\x25\xde\x4e\xb7\x60\xea\x4d\xe9\x59\x9d\xa4\x60\x65\x49\xbc\xb3\x1c\xe7\x8a\x9f\xcb\xb7\xbe\xea\xdb\x5e\xb5\xe7\x3c\xdf\x1d\xae\xa3\xac\x5e\xfe\xc0\xb5\xaa\xf7\xf0\xad\x61\x7d\x9b\x10\x91\x6f\x0d\x7b\x42\xa5\xe7\x5b\xdf\xfa\x93\x5c\xb7\x95\xb5\xc6\xd9\xf4\xa8\x3d\x8e\x67\xdb\xad\xbe\xc0\xd6\x5b\xcb\x62\x7d\x2b\x58\xda\xac\x33\xa6\x45\xb4\x5a\x6d\x63\x71\xb3\x59\xda\xea\x8b\x74\x0e\xac\xd7\x69\x36\x2b\x76\x92\x55\x23\x36\xea\xaa\x44\xbd\x9d\x19\x72\x64\xa7\x98\x1e\x0e\x8b\x09\x9e\x4f\x9b\xc6\xf6\xd1\xbd\x87\xbf\xfd\xf6\xfd\xfb\x6f\xdf\xbf\x63\x29\x68\x43\x4c\x35\x4c\xac\x0c\xa7\xe8\x07\x36\x41\xdb\xc1\x0a\xea\x4b\x34\xd8\xb9\x5b\xf4\x17\xd6\xdc\xce\xd1\x0f\xac\x29\x24\x8a\x22\x96\x12\x9a\xc2\x5f\x58\x43\x1e\xa1\x29\xfc\x81\xcd\x97\x92\xae\xc9\x7f\x61\x95\xf5\x0c\x99\x3f\xb0\x1d\xb4\xdf\xce\x5c\xb6\x0f\xd6\x49\x24\xbc\x99\x0f\xb4\xd9\x10\x59\xf6\x40\x87\xbb\x3f\x48\x19\x22\x73\x47\x34\xfe\xf5\x3a\x69\x86\xaa\x22\xd3\x7a\x9a\xa2\x3d\x34\x17\x08\x94\x65\x63\x39\xb3\xad\x81\xa5\x0d\x67\xc8\xb4\x2e\x7e\x18\xcc\x27\x68\xbb\x47\x92\xac\x94\x1b\xcd\xba\x90\x2b\x37\xef\x20\x11\x8a\x4d\xb1\xee\xd1\x54\x29\x17\x7b\x17\x60\x7f\xc3\x30\x0c\x13\x52\xa9\x13\x90\xc1\xa8\xb1\x6a\x3d\x57\x12\xea\x3d\xac\x20\xf6\xb0\xdf\xdd\x3b\xdf\xf6\x4d\x4f\x84\xe3\xb1\x63\x59\xc8\x1e\x58\x36\xb4\xad\xd3\xcf\x51\x31\x71\x84\x18\x48\xbf\x0f\xe1\x39\xe9\x9a\x72\x41\xed\xd0\x30\xe7\x83\xa9\x36\x34\xa1\x3b\x7c\xfb\xbe\x47\x44\xb5\x0f\x6a\x10\xe5\x41\x88\xef\x52\xbf\x8f\x40\x1c\xb6\x2d\xdf\x57\xf7\x3f\x03\xd9\x50\x90\xf7\xd1\xde\xce\xf7\x1f\x35\xcb\x5a\x22\x73\x10\x09\x77\xe7\x68\x83\x98\x0b\x45\x18\xd6\x2a\xe7\x6a\x2d\x11\xfb\xfd\xd8\xfc\x1b\x76\x6c\xbf\xff\xec\x3e\xf0\xa4\x68\xa2\xe9\xd6\xa7\x19\x7f\xaa\x53\x8d\x39\x72\x95\x61\x30\x87\xa6\xad\xc9\xda\x1c\xce\x4e\x38\x09\xbe\x1d\x31\x67\xc1\x48\x6e\x71\x7a\x83\xac\x87\x39\x3f\x09\xa8\x83\x79\xbf\xd6\x20\x62\xee\xaf\xa1\xb9\xc5\xff\x4d\xd2\xee\x4a\xc0\x3f\x3e\xf9\xbe\x47\xc4\x9f\x0f\x6a\x10\x3b\x41\x88\xcf\xa9\x9f\xa0\xed\x05\xf9\xee\x78\xe8\xfd\x27\x22\x62\x5d\x60\x41\x34\x9e\xa0\x39\x27\xcd\xb9\x11\x20\x5d\xd7\x61\x48\x5b\xc7\x97\xec\x29\xcb\x95\x53\x62\xf7\x0e\x51\xc9\xba\x28\x34\x45\xb7\xe9\x39\x14\xac\x52\xf6\xbb\x9a\x56\x23\x57\xce\x60\x92\x6d\x22\x74\xea\xbb\xae\x53\xe3\x7a\xb0\xd7\xe9\x71\xe1\x3c\x46\xd1\x15\xaf\x29\x6d\x07\xde\xb8\x1e\x9a\x9c\x23\x88\x53\x4a\xce\xe6\xc8\xe7\xf4\xb8\x8d\x0f\x01\xc5\xc0\x42\x8b\x25\x9a\xc9\x97\x02\x93\xb6\x83\x11\xb4\x46\xaf\x50\xb6\x7b\xfe\x31\xb2\x4e\xed\x78\xf7\x54\x10\x35\x6e\x90\xf8\x0a\x3d\x2e\x84\xc7\x28\x72\xdb\x1e\xc4\xf3\x0d\x83\xf3\xb9\xae\xc9\xae\xb3\x35\x4c\xe5\xca\x20\x38\x40\x3b\xdd\x70\xee\x87\xa0\xd4\x1b\x83\x5d\x82\x7d\xe0\x4e\xc9\x46\xaa\x8a\x64\xbf\xaa\x5d\x8e\x09\x9a\xf2\x0d\xfb\xe2\x3c\xfc\xe5\x1a\xb1\x9a\x12\x11\x99\x9a\xf2\x30\x81\x7b\xd5\xdb\x91\x17\x82\x68\x63\x3e\x98\x47\x45\xb7\x07\xeb\x94\xf4\x2b\x81\x40\x28\x4e\x82\x19\xb0\x37\xd1\x31\xe0\xc1\xba\xa2\xd3\x21\x59\x38\x85\x10\xc4\x84\x31\xdf\x69\xe5\xc8\x08\xc5\x83\x47\xfc\x11\x46\x58\xe1\xdf\x16\xb4\xe5\x59\xbb\xe3\xaa\x5f\x97\xf5\x39\xb8\x53\x92\xdd\xdf\x7d\x34\x06\x53\x74\x2a\xd7\xa8\xc8\xba\x80\xf9\x98\x7b\x0b\x22\xd0\x76\xbb\xc4\x7e\xa5\x5b\x8f\x30\xc2\xab\xe4\x3d\xf5\xb3\x4d\x65\x87\x44\x82\x16\x7a\x79\xfc\x0c\x02\xe6\xa3\x5c\x41\x3e\x3a\x4f\xdb\xde\x25\xd0\x89\x8e\xa2\x21\xcf\x01\xf5\x10\x71\x4e\xcb\x5b\xa4\x39\xb4\x23\x33\x32\xf1\xf9\xe0\xdd\x23\xd2\xd7\xfc\x11\x4a\xa3\x91\xe3\x19\xb4\x47\xa9\xbc\x2b\xcd\x68\x68\x7b\x88\xa6\xdb\xb4\xec\x29\xd6\x0d\x63\xb2\x9c\xbf\x46\xd1\x39\xac\x87\x7b\xd4\x8d\x77\xaf\xd0\x37\x87\x9a\x39\xb0\xb5\x29\x8a\x84\x42\x3f\xb4\xc7\xec\xd6\x23\xf0\x1b\xe6\x27\xf9\x1b\xe6\xb9\x78\x59\x37\x2c\xa4\x0c\xe0\x65\xb6\xcd\x45\x1b\x81\xdf\xf6\xe0\xdc\xa3\xf8\xc9\xe8\x68\x07\x35\x32\xe9\x3e\x21\xd8\xbb\x72\xd3\x66\x0a\xda\x0c\x7c\x21\x87\x35\x30\x66\x03\xa8\x28\x26\xb2\xac\x57\x05\x7a\x17\xc1\xd9\x3c\xcd\xbb\xed\x9b\x19\xb9\x0d\x9f\xa0\xfd\x75\x3d\xb8\x05\xfb\x3e\xc5\x01\x56\x76\x0e\xd0\x8b\xc2\x77\xf0\xec\xed\x3c\xfc\x1c\xfc\x26\xd4\xbb\x61\xff\xae\xd1\x1d\x42\xbd\x18\x6a\x07\xf2\xa0\x44\x11\x51\x1b\x04\xfa\x6e\xf8\xf6\xa8\x26\x9f\x00\x8f\x5a\x19\xce\x40\x87\x89\x37\xaf\x83\x9b\xce\x0d\x73\xe7\xf8\x56\xc8\xb4\x34\x63\x16\xbd\xa0\xfd\x18\xee\x93\xef\x7b\xe0\x71\x66\x3c\xd7\x13\x32\x53\xf1\x98\xfc\x4f\x70\xdc\xe5\xe4\xa4\xed\xe3\x4c\xcc\x4d\xb4\xd2\x8c\xa5\xf5\x21\xdc\x04\x21\xbb\xcb\x56\xd0\x43\x8f\xf3\xb7\x4f\xa2\xbc\x1b\x4f\x7b\x04\x77\xf9\xb8\x9a\xed\x3a\x07\x7d\x18\x6f\xdf\xc5\xb4\xfd\xd0\x03\x27\xc0\xcf\x1a\xf8\x39\xd0\xf3\x29\x54\x44\x16\x7e\x0b\xc5\x23\x3c\xdc\x99\xd7\xdd\x44\x16\xdd\xf0\x75\x09\xf8\x21\xda\xef\x0f\x62\xa7\x93\xed\xf7\x50\x9b\x4b\xf8\xa1\xa7\xfa\x5e\x2e\x7f\x37\xb1\x5c\x6e\xb5\xd9\xd0\x8d\x81\x43\x0b\x38\x18\xdc\x8e\x3a\x6f\x89\xe2\x3c\x0c\x77\xda\x38\x4d\x6e\x10\xe6\xd4\x05\x4c\x0d\x45\x53\x35\xa4\xbc\x9a\xfb\xbd\x0d\xf6\x1a\xa1\x41\x6d\x6f\x50\x6c\x21\x5d\x8f\x84\x46\x17\xd0\x35\xaa\xdc\xbb\x37\x3b\x75\xd7\x24\xca\x5e\x3d\x83\x77\x8b\xae\xab\xfd\xba\x2f\x70\x88\x60\xc5\xe3\x12\xd4\x8e\xa2\x8b\xb2\x8e\xa0\x55\x8f\x0b\xb2\x9c\xc9\xcc\x21\xa0\xdd\x67\xda\x07\x92\x61\x4c\x42\xd3\x77\x03\xe6\xdd\x50\xf9\xf7\xdf\x15\x64\x43\x4d\xb7\xb0\xef\xff\xf7\x7f\xd8\x57\xcb\xd0\x95\x93\x35\xfb\xaf\x3f\x7e\xd8\x68\x63\xff\xf1\xc7\x37\xec\x7a\x43\xd9\x50\x1e\x6b\xe8\xae\x49\x5d\x6f\x2a\x19\xcb\xe1\xc8\x7e\x08\xfd\x59\xd3\xdb\x04\x9c\x35\xf5\x91\xf0\x07\xd6\xc9\x8a\x75\xd1\x75\xb6\xd8\xdf\x18\x49\x3e\x5c\xee\xa2\x29\x03\xf5\x64\xfd\x33\x5d\xf8\x98\xa2\x17\x0f\x2d\x96\xae\xd4\xc5\x5c\xa6\x7c\x58\x68\xc6\xea\x62\x5a\xac\x8b\xe5\xa4\xd8\xf0\xad\x0e\x3a\x77\x2b\x65\xac\x55\x4d\xed\x54\xa6\x2e\x36\x9a\xf5\x5c\xb2\xb9\xfb\x29\x25\x16\xc5\xa6\x88\x25\x85\x46\x52\x48\x89\x37\x56\xeb\x77\xf3\xef\xf3\xaf\x03\x5f\x4a\x32\x3a\x61\x9c\xe3\xb9\xb3\x14\x7f\x8d\x92\x73\xf9\xf8\xd3\xa7\x81\xc2\xf2\xcc\xf9\x4e\xdd\xc2\x55\x49\x78\x29\x9d\x9f\x2e\x87\x53\x3a\x82\xa4\xb0\xcf\x96\xdd\x56\x98\xe7\x24\x70\x99\x5c\xfd\x89\x62\xb8\x42\xcc\xb9\x2c\x02\xd2\xc1\xd1\x2a\x85\x3f\xd5\xf7\x19\x04\x72\x5d\x35\x2e\x72\xa9\x77\xb5\xe3\xfb\x77\x0c\x2a\x0a\x52\xb0\x29\x9c\x2d\xa1\xae\x6f\xcf\x08\x0d\x0c\x11\x77\x54\xee\xe5\x3d\x5d\x6e\x90\x82\xad\xa0\x29\x8f\xa0\xf9\x3b\xc3\xff\xe1\x95\x8b\xee\xda\xa8\xe8\x68\xac\xc1\xed\xfe\x0a\x44\x76\x12\x48\xef\xc0\x58\xc6\xd2\x94\xc3\x41\xda\x8f\x94\x0e\xc5\x6e\x92\xee\xea\xf3\x9e\x4e\x54\x0d\xcb\x1e\x9a\xa8\x51\x2b\x62\x0a\xb4\xe1\xce\xd6\x30\x65\x39\x9d\x63\xb2\x31\x9d\xeb\xc8\x46\x4e\x67\xfe\x7f\x01\x00\x00\xff\xff\xab\x33\xed\x40\x28\x6b\x01\x00") + +func paths_strict_sendHorizonSqlBytes() ([]byte, error) { + return bindataRead( + _paths_strict_sendHorizonSql, + "paths_strict_send-horizon.sql", + ) +} + +func paths_strict_sendHorizonSql() (*asset, error) { + bytes, err := paths_strict_sendHorizonSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "paths_strict_send-horizon.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb9, 0xbf, 0xdf, 0x6a, 0xd7, 0xf4, 0xc5, 0x71, 0xa6, 0xd9, 0x3c, 0x39, 0xf1, 0x43, 0x96, 0xea, 0x34, 0x84, 0xb3, 0xee, 0x11, 0x2e, 0xd1, 0x42, 0x26, 0xa3, 0x10, 0x7d, 0x88, 0x47, 0xf6, 0xce}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "account_merge-core.sql": account_mergeCoreSql, + "account_merge-horizon.sql": account_mergeHorizonSql, + "base-core.sql": baseCoreSql, + "base-horizon.sql": baseHorizonSql, + "failed_transactions-core.sql": failed_transactionsCoreSql, + "failed_transactions-horizon.sql": failed_transactionsHorizonSql, + "ingest_asset_stats-core.sql": ingest_asset_statsCoreSql, + "ingest_asset_stats-horizon.sql": ingest_asset_statsHorizonSql, + "kahuna-core.sql": kahunaCoreSql, + "kahuna-horizon.sql": kahunaHorizonSql, + "offer_ids-core.sql": offer_idsCoreSql, + "offer_ids-horizon.sql": offer_idsHorizonSql, + "operation_fee_stats_1-core.sql": operation_fee_stats_1CoreSql, + "operation_fee_stats_1-horizon.sql": operation_fee_stats_1HorizonSql, + "operation_fee_stats_2-core.sql": operation_fee_stats_2CoreSql, + "operation_fee_stats_2-horizon.sql": operation_fee_stats_2HorizonSql, + "operation_fee_stats_3-core.sql": operation_fee_stats_3CoreSql, + "operation_fee_stats_3-horizon.sql": operation_fee_stats_3HorizonSql, + "pathed_payment-core.sql": pathed_paymentCoreSql, + "pathed_payment-horizon.sql": pathed_paymentHorizonSql, + "paths_strict_send-core.sql": paths_strict_sendCoreSql, + "paths_strict_send-horizon.sql": paths_strict_sendHorizonSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "account_merge-core.sql": &bintree{account_mergeCoreSql, map[string]*bintree{}}, + "account_merge-horizon.sql": &bintree{account_mergeHorizonSql, map[string]*bintree{}}, + "base-core.sql": &bintree{baseCoreSql, map[string]*bintree{}}, + "base-horizon.sql": &bintree{baseHorizonSql, map[string]*bintree{}}, + "failed_transactions-core.sql": &bintree{failed_transactionsCoreSql, map[string]*bintree{}}, + "failed_transactions-horizon.sql": &bintree{failed_transactionsHorizonSql, map[string]*bintree{}}, + "ingest_asset_stats-core.sql": &bintree{ingest_asset_statsCoreSql, map[string]*bintree{}}, + "ingest_asset_stats-horizon.sql": &bintree{ingest_asset_statsHorizonSql, map[string]*bintree{}}, + "kahuna-core.sql": &bintree{kahunaCoreSql, map[string]*bintree{}}, + "kahuna-horizon.sql": &bintree{kahunaHorizonSql, map[string]*bintree{}}, + "offer_ids-core.sql": &bintree{offer_idsCoreSql, map[string]*bintree{}}, + "offer_ids-horizon.sql": &bintree{offer_idsHorizonSql, map[string]*bintree{}}, + "operation_fee_stats_1-core.sql": &bintree{operation_fee_stats_1CoreSql, map[string]*bintree{}}, + "operation_fee_stats_1-horizon.sql": &bintree{operation_fee_stats_1HorizonSql, map[string]*bintree{}}, + "operation_fee_stats_2-core.sql": &bintree{operation_fee_stats_2CoreSql, map[string]*bintree{}}, + "operation_fee_stats_2-horizon.sql": &bintree{operation_fee_stats_2HorizonSql, map[string]*bintree{}}, + "operation_fee_stats_3-core.sql": &bintree{operation_fee_stats_3CoreSql, map[string]*bintree{}}, + "operation_fee_stats_3-horizon.sql": &bintree{operation_fee_stats_3HorizonSql, map[string]*bintree{}}, + "pathed_payment-core.sql": &bintree{pathed_paymentCoreSql, map[string]*bintree{}}, + "pathed_payment-horizon.sql": &bintree{pathed_paymentHorizonSql, map[string]*bintree{}}, + "paths_strict_send-core.sql": &bintree{paths_strict_sendCoreSql, map[string]*bintree{}}, + "paths_strict_send-horizon.sql": &bintree{paths_strict_sendHorizonSql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/services/horizon/internal/test/scenarios/failed_transactions-core.sql b/services/horizon/internal/test/scenarios/failed_transactions-core.sql new file mode 100644 index 0000000000..bffd3ab38f --- /dev/null +++ b/services/horizon/internal/test/scenarios/failed_transactions-core.sql @@ -0,0 +1,743 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999969999999700, 3, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 9999999900, 8589934593, 1, NULL, '', 'AQAAAA==', 0, 3, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 9999999700, 8589934595, 1, NULL, '', 'AQAAAA==', 0, 4, 0, 4000000000, NULL); +INSERT INTO accounts VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 9999999800, 8589934594, 1, NULL, '', 'AQAAAA==', 0, 5, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('75068cac2f1e6a0b2cafa2d5dbbccc5c196717f6fff8799cc3ab6a695b07b91a', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'a765aea154b2d40441dabbf7b127a90d308cf33d5bf55cef5ec291f15d50053e', 2, 1559579676, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Z/M5ISCqR3+pzFWKQOzEgiglt4JSAeD7YrosEaL/5ILEAAAAAXPVMHAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA0VQ8umpMO+RJ/CzI0VZJe+5ehw/k4T4dhwPQwwEV77mnZa6hVLLUBEHau/exJ6kNMIzzPVv1XO9ewpHxXVAFPgAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('a2492d997ac133f9013efc258fb5bc53bef098bf998dc58cb50fc6f88b7fa800', '75068cac2f1e6a0b2cafa2d5dbbccc5c196717f6fff8799cc3ab6a695b07b91a', 'e3b59a2525fc5a7ce6b0fd3df7514c25ccc74e38a5e078d0c1d537aa103f6861', 3, 1559579677, 'AAAAC3UGjKwvHmoLLK+i1du8zFwZZxf2//h5nMOramlbB7kabzv0QGPkGXSc2cgBCDpoiiEGR4f2mr0pB9Ga7Yg+XbIAAAAAXPVMHQAAAAAAAAAASRLUUHsB//zHsjHAo1ZAUBko8x4SRXNyaftZqVnF5MHjtZolJfxafOaw/T33UUwlzMdOOKXgeNDB1TeqED9oYQAAAAMN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('bf1986c2bbfdb2c8674d34b9ee1307bff4c0c1abb052e9f7afa0d76cec32b5f8', 'a2492d997ac133f9013efc258fb5bc53bef098bf998dc58cb50fc6f88b7fa800', 'bc276d95b1adfdb92b6ddf80fd4283234f9ff88146c56adfc895668d81d9791d', 4, 1559579678, 'AAAAC6JJLZl6wTP5AT78JY+1vFO+8Ji/mY3FjLUPxviLf6gAUlLur1iXGHmYU5SmRHdfFQ9yODq9lFMyFq406Jsb7XwAAAAAXPVMHgAAAAAAAAAA/rqTjILWTvdE+20btzdaa93EPtUkF4WSO8KIRAy9IIC8J22Vsa39uStt34D9QoMjT5/4gUbFat/IlWaNgdl5HQAAAAQN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('66c8b3359da7efdd61f5f2b37062916254c6418633cd212143740c7cd0be0855', 'bf1986c2bbfdb2c8674d34b9ee1307bff4c0c1abb052e9f7afa0d76cec32b5f8', '935b20697525381bc629b5d7472b096f3ec05c616c5d224c922608dde72c0c16', 5, 1559579679, 'AAAAC78ZhsK7/bLIZ000ue4TB7/0wMGrsFLp96+g12zsMrX4qoxNrxj1nF8+UII6WNkDzljRRer3VuRkzSJZAQQ47TQAAAAAXPVMHwAAAAAAAAAAedLFm9CgXV79AuZN9agFNxrZDQgV3bflGAqP7+xIfZCTWyBpdSU4G8YptddHKwlvPsBcYWxdIkySJgjd5ywMFgAAAAUN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO offers VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'AAAAAA==', 'AAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkw=', 4000000000, 1, 2, 0.5, 0, 4); + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GB5ETNF4QI5ZUGYI3JVIZRWQTBQ7L3PLXNPUNIUEC3R7R3C6V43BMVGE', 2, 'AAAAAHpJtLyCO5obCNpqjMbQmGH17eu7X0aihBbj+OxerzYWAAAAAAAAAAIAAAACAAAAAQAAAEj8zkhIKpHf6nMVYpA7MSCKCW3glIB4PtiuiwRov/kgsQAAAABc9UwcAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABkhCAcZaHL0qdGywPXBoNiY+knLkn7QvwWzutNwD6Au8AAABAaYo3bNj4V+Q+wOw+zfOpnzKSR5MCLlnmha4F7O4UbZFqyOR0DFkfLbh4TvEB7j8rZgqROb5fa159VKJQmPzLBQ=='); +INSERT INTO scphistory VALUES ('GB5ETNF4QI5ZUGYI3JVIZRWQTBQ7L3PLXNPUNIUEC3R7R3C6V43BMVGE', 3, 'AAAAAHpJtLyCO5obCNpqjMbQmGH17eu7X0aihBbj+OxerzYWAAAAAAAAAAMAAAACAAAAAQAAADBvO/RAY+QZdJzZyAEIOmiKIQZHh/aavSkH0ZrtiD5dsgAAAABc9UwdAAAAAAAAAAAAAAABkhCAcZaHL0qdGywPXBoNiY+knLkn7QvwWzutNwD6Au8AAABAdcGUmKE4FVBzYYXixh6xdfTNMQKm5KpvaA0gTA3v76kR09sZDbBg4xCvHcokj3nfSsO9X/5lP0IHS8fKSrqrCA=='); +INSERT INTO scphistory VALUES ('GB5ETNF4QI5ZUGYI3JVIZRWQTBQ7L3PLXNPUNIUEC3R7R3C6V43BMVGE', 4, 'AAAAAHpJtLyCO5obCNpqjMbQmGH17eu7X0aihBbj+OxerzYWAAAAAAAAAAQAAAACAAAAAQAAADBSUu6vWJcYeZhTlKZEd18VD3I4Or2UUzIWrjTomxvtfAAAAABc9UweAAAAAAAAAAAAAAABkhCAcZaHL0qdGywPXBoNiY+knLkn7QvwWzutNwD6Au8AAABAMNgaTRx0HqqTMhRBwcG+SzAH1Qu3vnn4RoW0GREr7m0gJ3IZZ0iipmvEKvWfy+m0Y9J3uiQYCBIrlqxslGC6BQ=='); +INSERT INTO scphistory VALUES ('GB5ETNF4QI5ZUGYI3JVIZRWQTBQ7L3PLXNPUNIUEC3R7R3C6V43BMVGE', 5, 'AAAAAHpJtLyCO5obCNpqjMbQmGH17eu7X0aihBbj+OxerzYWAAAAAAAAAAUAAAACAAAAAQAAADCqjE2vGPWcXz5QgjpY2QPOWNFF6vdW5GTNIlkBBDjtNAAAAABc9UwfAAAAAAAAAAAAAAABkhCAcZaHL0qdGywPXBoNiY+knLkn7QvwWzutNwD6Au8AAABASOPv8xyFkClYPYGHw7QUyER+Ez6YhYA2H73q4lvEoOiUYCTQQ9WXqec5h/v0kEDO8DhptImqATeui3fKSXjZBg=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('9210807196872f4a9d1b2c0f5c1a0d898fa49cb927ed0bf05b3bad3700fa02ef', 5, 'AAAAAQAAAAEAAAAAekm0vII7mhsI2mqMxtCYYfXt67tfRqKEFuP47F6vNhYAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastclosedledger ', '66c8b3359da7efdd61f5f2b37062916254c6418633cd212143740c7cd0be0855'); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 5, + "currentBuckets": [ + { + "curr": "800813b473e21f2e44c033ea50411e0793cf63b6c6f67dc76412c1ec023622b1", + "next": { + "state": 0 + }, + "snap": "3027fab3fa479286a980e7daaf084b9a1742d2e1f54b01d6c06a96ed6cec7470" + }, + { + "curr": "ef31a20a398ee73ce22275ea8177786bac54656f33dcc4f3fec60d55ddf163d9", + "next": { + "state": 1, + "output": "3027fab3fa479286a980e7daaf084b9a1742d2e1f54b01d6c06a96ed6cec7470" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAAB6SbS8gjuaGwjaaozG0Jhh9e3ru19GooQW4/jsXq82FgAAAAAAAAAFAAAAA5IQgHGWhy9KnRssD1waDYmPpJy5J+0L8Fs7rTcA+gLvAAAAAQAAAJiqjE2vGPWcXz5QgjpY2QPOWNFF6vdW5GTNIlkBBDjtNAAAAABc9UwfAAAAAAAAAAEAAAAAekm0vII7mhsI2mqMxtCYYfXt67tfRqKEFuP47F6vNhYAAABABMJ4/lG0xV7SfQpk59M0s7iGw6ajpTNs8S5HmKw7EQ/9AoHwfOYY0Ic1EcrwwLi5hAZNpSa9PTMu+oiy4VhCBQAAAAEAAACYqoxNrxj1nF8+UII6WNkDzljRRer3VuRkzSJZAQQ47TQAAAAAXPVMHwAAAAAAAAABAAAAAHpJtLyCO5obCNpqjMbQmGH17eu7X0aihBbj+OxerzYWAAAAQATCeP5RtMVe0n0KZOfTNLO4hsOmo6UzbPEuR5isOxEP/QKB8HzmGNCHNRHK8MC4uYQGTaUmvT0zLvqIsuFYQgUAAABARnoQZFL7eg1G8Q6cQboWq78iqEHgGBqByGbq+ScET0qljlstJUMd0ApkhpwH8YHbAs1wgLLNV2cjp0O1YG2wDwAAAAB6SbS8gjuaGwjaaozG0Jhh9e3ru19GooQW4/jsXq82FgAAAAAAAAAFAAAAAgAAAAEAAAAwqoxNrxj1nF8+UII6WNkDzljRRer3VuRkzSJZAQQ47TQAAAAAXPVMHwAAAAAAAAAAAAAAAZIQgHGWhy9KnRssD1waDYmPpJy5J+0L8Fs7rTcA+gLvAAAAQEjj7/MchZApWD2Bh8O0FMhEfhM+mIWANh+96uJbxKDolGAk0EPVl6nnOYf79JBAzvA4abSJqgE3rot3ykl42QYAAAABvxmGwrv9sshnTTS57hMHv/TAwauwUun3r6DXbOwytfgAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAB3NZQAAAAAAAAAAAFvFIhaAAAAQKcGS9OsVnVHCVIH04C9ZKzzKYBRdCmy+Jwmzld7QcALOxZUcAgkuGfoSdvXpH38mNvrqQiaMsSNmTJWYRzHvgoAAAABAAAAAQAAAAEAAAAAekm0vII7mhsI2mqMxtCYYfXt67tfRqKEFuP47F6vNhYAAAAA'); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 'USD', 9223372036854775807, 1000000000, 1, 4, NULL, NULL); +INSERT INTO trustlines VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 'USD', 9223372036854775807, 1000000000, 1, 4, NULL, NULL); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4d34e4401553f64a69ca63824598f4d9f0a29b0fe3ce38c4e4d7040daff12fff', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('511a1f25e1f5ea2dc0b019231fe79273fcc27472c236c5980fc9dcdd6d915e20', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a2dabf4e9d1642722602272e178a37c973c9177b957da86192a99b3e9f3a9aa4', 3, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1', 4, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1c454630267aa8767ec8c8e30450cea6ba660145e9c924abb75d7a6669b6c28a', 4, 2, 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('9ebeedebc52da318d6bd354644393970dd7506bb8bfa86f63c89c5678c07c549', 4, 3, 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf', 5, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+M4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDt3KwmaPuPdFSUxdAFeb6OQetyQKIWazlbSMMhmHKNLD4sqhEqUZcQP0l+X/Op+osWmN6+FUYbsz75Q2jG4vMM', 'sqInw5xkpE/Hq9TJaBlFbwOZkG0SxHbXC0Ar/bKW1qMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4d34e4401553f64a69ca63824598f4d9f0a29b0fe3ce38c4e4d7040daff12fff', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvkAAAAAAAAAAABVvwF9wAAAECdDtG2xmgQ/MAtqqffgBM+UfZVHz9oDxtzFNd58k/m2blPGnIbbueamtpQvC94rRhaw/HsBEfaa9qjZw7YpVkG', 'TTTkQBVT9kppymOCRZj02fCimw/jzjjE5NcEDa/xL/8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBj4gBQ/BAbgqf7qOotatgZUHjDlsOtDNdp7alZR5/Fk9fGj+lxEygAZWzY7/LY1Z3SF6c0qs172LhAkkvV8p0M', 'cldWsfvfg7CBJ/OF7+3wkJzIILbM5x8cCJfRVCfLWt0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('511a1f25e1f5ea2dc0b019231fe79273fcc27472c236c5980fc9dcdd6d915e20', 3, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAEnciLVAAAAQHE1p+5tBPq8pUoGAXqO9S7aw5O9bn87RyPw0X1dK0d7hSR67uG/khAyC3o9TrPT6z9dZkhmX/NAk8nxm9hlYQE=', 'URofJeH16i3AsBkjH+eSc/zCdHLCNsWYD8nc3W2RXiAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('a2dabf4e9d1642722602272e178a37c973c9177b957da86192a99b3e9f3a9aa4', 3, 2, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAFvFIhaAAAAQHiLpENW73jcT1Sdkf/eaxjSLGTQCgIne0t34aIeydhplVtW9xDQ6hAT38G9kirKKRIyoKukoUNNhAwdWy/PjQc=', 'otq/Tp0WQnImAicuF4o3yXPJF3uVfahhkqmbPp86mqQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1', 4, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAGu5L5MAAAAQEnKDbDYvKkJjYK0arvhFln+GK0+7Ay6g0a+1hjRRelEAe4wmjeqNcRg2m4Cn7t4AjJzAsDQI0iXahGboJPINAw=', 'VuMhYEXVeb6kDy01oJQG3jqJTstb5w29peycBCeg1aEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('1c454630267aa8767ec8c8e30450cea6ba660145e9c924abb75d7a6669b6c28a', 4, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAGu5L5MAAAAQDpIk9q30tzfQkpQuCwF7iaP3bN6DRCk+wU3V867tqkLQV3Id452WsKUYpPQrN8ej6fk0uxeemBNsz1N5VMs9gY=', 'HEVGMCZ6qHZ+yMjjBFDOprpmAUXpySSrt116Zmm2wooAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('9ebeedebc52da318d6bd354644393970dd7506bb8bfa86f63c89c5678c07c549', 4, 3, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAa7kvkwAAABAvsu5f+v7VrJDHKu28WwE2zwDQ5lMWnC7FogSlT/NjxgHxD7kkZHMW2lkjYx/9S45sIJGCO4vj6+gIvxHrw6lBA==', 'nr7t68UtoxjWvTVGRDk5cN11BruL+ob2PInFZ4wHxUkAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAQAAAAAAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAADuaygAAAAAAQAAAAIAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAAAQAAAACAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf', 5, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAB3NZQAAAAAAAAAAAFvFIhaAAAAQKcGS9OsVnVHCVIH04C9ZKzzKYBRdCmy+Jwmzld7QcALOxZUcAgkuGfoSdvXpH38mNvrqQiaMsSNmTJWYRzHvgo=', 'qhaPEhJLfBlsCtrufHOmTTf5lCjKy1mpH/OJYmhF588AAAAAAAAAZP////8AAAABAAAAAAAAAAH////+AAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/failed_transactions-horizon.sql b/services/horizon/internal/test/scenarios/failed_transactions-horizon.sql new file mode 100644 index 0000000000..a55231de42 --- /dev/null +++ b/services/horizon/internal/test/scenarios/failed_transactions-horizon.sql @@ -0,0 +1,1038 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer, + tx_set_operation_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged bigint, + inner_transaction_hash character varying(64), + fee_account character varying(64), + inner_signatures character varying(96)[], + new_max_fee bigint +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO asset_stats VALUES (1, '2000000000', 2, 0, ''); + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-31 14:19:49.123835+01'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_accounts VALUES (2, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (3, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (4, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 4, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'USD', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 17179873281, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (3, 17179873281, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (2, 17179877377, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (3, 17179877377, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 12884910081, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589938689, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 3, 10, '{"weight": 1, "public_key": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589946881, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (5, '66c8b3359da7efdd61f5f2b37062916254c6418633cd212143740c7cd0be0855', 'bf1986c2bbfdb2c8674d34b9ee1307bff4c0c1abb052e9f7afa0d76cec32b5f8', 0, 0, '2019-06-03 16:34:39', '2019-06-03 16:34:39.513495', '2019-06-03 16:34:39.513495', 21474836480, 16, 1000000000000000000, 900, 100, 100000000, 1000000, 11, 'AAAAC78ZhsK7/bLIZ000ue4TB7/0wMGrsFLp96+g12zsMrX4qoxNrxj1nF8+UII6WNkDzljRRer3VuRkzSJZAQQ47TQAAAAAXPVMHwAAAAAAAAAAedLFm9CgXV79AuZN9agFNxrZDQgV3bflGAqP7+xIfZCTWyBpdSU4G8YptddHKwlvPsBcYWxdIkySJgjd5ywMFgAAAAUN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 1, NULL); +INSERT INTO history_ledgers VALUES (4, 'bf1986c2bbfdb2c8674d34b9ee1307bff4c0c1abb052e9f7afa0d76cec32b5f8', 'a2492d997ac133f9013efc258fb5bc53bef098bf998dc58cb50fc6f88b7fa800', 3, 3, '2019-06-03 16:34:38', '2019-06-03 16:34:39.530635', '2019-06-03 16:34:39.530635', 17179869184, 16, 1000000000000000000, 800, 100, 100000000, 1000000, 11, 'AAAAC6JJLZl6wTP5AT78JY+1vFO+8Ji/mY3FjLUPxviLf6gAUlLur1iXGHmYU5SmRHdfFQ9yODq9lFMyFq406Jsb7XwAAAAAXPVMHgAAAAAAAAAA/rqTjILWTvdE+20btzdaa93EPtUkF4WSO8KIRAy9IIC8J22Vsa39uStt34D9QoMjT5/4gUbFat/IlWaNgdl5HQAAAAQN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (3, 'a2492d997ac133f9013efc258fb5bc53bef098bf998dc58cb50fc6f88b7fa800', '75068cac2f1e6a0b2cafa2d5dbbccc5c196717f6fff8799cc3ab6a695b07b91a', 2, 2, '2019-06-03 16:34:37', '2019-06-03 16:34:39.542628', '2019-06-03 16:34:39.542629', 12884901888, 16, 1000000000000000000, 500, 100, 100000000, 1000000, 11, 'AAAAC3UGjKwvHmoLLK+i1du8zFwZZxf2//h5nMOramlbB7kabzv0QGPkGXSc2cgBCDpoiiEGR4f2mr0pB9Ga7Yg+XbIAAAAAXPVMHQAAAAAAAAAASRLUUHsB//zHsjHAo1ZAUBko8x4SRXNyaftZqVnF5MHjtZolJfxafOaw/T33UUwlzMdOOKXgeNDB1TeqED9oYQAAAAMN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (2, '75068cac2f1e6a0b2cafa2d5dbbccc5c196717f6fff8799cc3ab6a695b07b91a', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 3, 3, '2019-06-03 16:34:36', '2019-06-03 16:34:39.554749', '2019-06-03 16:34:39.55475', 8589934592, 16, 1000000000000000000, 300, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Z/M5ISCqR3+pzFWKQOzEgiglt4JSAeD7YrosEaL/5ILEAAAAAXPVMHAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA0VQ8umpMO+RJ/CzI0VZJe+5ehw/k4T4dhwPQwwEV77mnZa6hVLLUBEHau/exJ6kNMIzzPVv1XO9ewpHxXVAFPgAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:34:39.568757', '2019-06-03 16:34:39.568758', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (2, 21474840577, 2); +INSERT INTO history_operation_participants VALUES (3, 17179873281, 3); +INSERT INTO history_operation_participants VALUES (4, 17179873281, 1); +INSERT INTO history_operation_participants VALUES (5, 17179877377, 3); +INSERT INTO history_operation_participants VALUES (6, 17179877377, 2); +INSERT INTO history_operation_participants VALUES (7, 17179881473, 3); +INSERT INTO history_operation_participants VALUES (8, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (9, 12884910081, 1); +INSERT INTO history_operation_participants VALUES (10, 8589938689, 4); +INSERT INTO history_operation_participants VALUES (11, 8589938689, 3); +INSERT INTO history_operation_participants VALUES (12, 8589942785, 4); +INSERT INTO history_operation_participants VALUES (13, 8589942785, 1); +INSERT INTO history_operation_participants VALUES (14, 8589946881, 4); +INSERT INTO history_operation_participants VALUES (15, 8589946881, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 15, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "200.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (17179877377, 17179877376, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (17179881473, 17179881472, 1, 3, '{"price": "0.5000000", "amount": "400.0000000", "price_r": {"d": 2, "n": 1}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_type": "native", "buying_asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (12884910081, 12884910080, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (2, 21474840576, 2); +INSERT INTO history_transaction_participants VALUES (3, 17179873280, 3); +INSERT INTO history_transaction_participants VALUES (4, 17179873280, 1); +INSERT INTO history_transaction_participants VALUES (5, 17179877376, 3); +INSERT INTO history_transaction_participants VALUES (6, 17179877376, 2); +INSERT INTO history_transaction_participants VALUES (7, 17179881472, 3); +INSERT INTO history_transaction_participants VALUES (8, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (9, 12884910080, 1); +INSERT INTO history_transaction_participants VALUES (10, 8589938688, 4); +INSERT INTO history_transaction_participants VALUES (11, 8589938688, 3); +INSERT INTO history_transaction_participants VALUES (12, 8589942784, 4); +INSERT INTO history_transaction_participants VALUES (13, 8589942784, 1); +INSERT INTO history_transaction_participants VALUES (14, 8589946880, 4); +INSERT INTO history_transaction_participants VALUES (15, 8589946880, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 15, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf', 5, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934594, 100, 1, '2019-06-03 16:34:39.51363', '2019-06-03 16:34:39.51363', 21474840576, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAB3NZQAAAAAAAAAAAFvFIhaAAAAQKcGS9OsVnVHCVIH04C9ZKzzKYBRdCmy+Jwmzld7QcALOxZUcAgkuGfoSdvXpH38mNvrqQiaMsSNmTJWYRzHvgo=', 'AAAAAAAAAGT/////AAAAAQAAAAAAAAAB/////gAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+M4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{pwZL06xWdUcJUgfTgL1krPMpgFF0KbL4nCbOV3tBwAs7FlRwCCS4Z+hJ29ekffyY2+upCJoyxI2ZMlZhHMe+Cg==}', 'none', NULL, NULL, false, 100); +INSERT INTO history_transactions VALUES ('56e3216045d579bea40f2d35a09406de3a894ecb5be70dbda5ec9c0427a0d5a1', 4, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:34:39.530791', '2019-06-03 16:34:39.530791', 17179873280, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAGu5L5MAAAAQEnKDbDYvKkJjYK0arvhFln+GK0+7Ay6g0a+1hjRRelEAe4wmjeqNcRg2m4Cn7t4AjJzAsDQI0iXahGboJPINAw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ScoNsNi8qQmNgrRqu+EWWf4YrT7sDLqDRr7WGNFF6UQB7jCaN6o1xGDabgKfu3gCMnMCwNAjSJdqEZugk8g0DA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('1c454630267aa8767ec8c8e30450cea6ba660145e9c924abb75d7a6669b6c28a', 4, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-06-03 16:34:39.53099', '2019-06-03 16:34:39.53099', 17179877376, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAGu5L5MAAAAQDpIk9q30tzfQkpQuCwF7iaP3bN6DRCk+wU3V867tqkLQV3Id452WsKUYpPQrN8ej6fk0uxeemBNsz1N5VMs9gY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{OkiT2rfS3N9CSlC4LAXuJo/ds3oNEKT7BTdXzru2qQtBXch3jnZawpRik9Cs3x6Pp+TS7F56YE2zPU3lUyz2Bg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('9ebeedebc52da318d6bd354644393970dd7506bb8bfa86f63c89c5678c07c549', 4, 3, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934595, 100, 1, '2019-06-03 16:34:39.531178', '2019-06-03 16:34:39.531178', 17179881472, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAa7kvkwAAABAvsu5f+v7VrJDHKu28WwE2zwDQ5lMWnC7FogSlT/NjxgHxD7kkZHMW2lkjYx/9S45sIJGCO4vj6+gIvxHrw6lBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAAAQAAAACAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{vsu5f+v7VrJDHKu28WwE2zwDQ5lMWnC7FogSlT/NjxgHxD7kkZHMW2lkjYx/9S45sIJGCO4vj6+gIvxHrw6lBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('511a1f25e1f5ea2dc0b019231fe79273fcc27472c236c5980fc9dcdd6d915e20', 3, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934593, 100, 1, '2019-06-03 16:34:39.542842', '2019-06-03 16:34:39.542842', 12884905984, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAEnciLVAAAAQHE1p+5tBPq8pUoGAXqO9S7aw5O9bn87RyPw0X1dK0d7hSR67uG/khAyC3o9TrPT6z9dZkhmX/NAk8nxm9hlYQE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{cTWn7m0E+rylSgYBeo71LtrDk71ufztHI/DRfV0rR3uFJHru4b+SEDILej1Os9PrP11mSGZf80CTyfGb2GVhAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a2dabf4e9d1642722602272e178a37c973c9177b957da86192a99b3e9f3a9aa4', 3, 2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934593, 100, 1, '2019-06-03 16:34:39.543101', '2019-06-03 16:34:39.543101', 12884910080, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAFvFIhaAAAAQHiLpENW73jcT1Sdkf/eaxjSLGTQCgIne0t34aIeydhplVtW9xDQ6hAT38G9kirKKRIyoKukoUNNhAwdWy/PjQc=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{eIukQ1bveNxPVJ2R/95rGNIsZNAKAid7S3fhoh7J2GmVW1b3ENDqEBPfwb2SKsopEjKgq6ShQ02EDB1bL8+NBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b2a227c39c64a44fc7abd4c96819456f0399906d12c476d70b402bfdb296d6a3', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:34:39.554903', '2019-06-03 16:34:39.554903', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDt3KwmaPuPdFSUxdAFeb6OQetyQKIWazlbSMMhmHKNLD4sqhEqUZcQP0l+X/Op+osWmN6+FUYbsz75Q2jG4vMM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{7dysJmj7j3RUlMXQBXm+jkHrckCiFms5W0jDIZhyjSw+LKoRKlGXED9Jfl/zqfqLFpjevhVGG7M++UNoxuLzDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4d34e4401553f64a69ca63824598f4d9f0a29b0fe3ce38c4e4d7040daff12fff', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:34:39.555138', '2019-06-03 16:34:39.555138', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAACVAvkAAAAAAAAAAABVvwF9wAAAECdDtG2xmgQ/MAtqqffgBM+UfZVHz9oDxtzFNd58k/m2blPGnIbbueamtpQvC94rRhaw/HsBEfaa9qjZw7YpVkG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{nQ7RtsZoEPzALaqn34ATPlH2VR8/aA8bcxTXefJP5tm5TxpyG27nmpraULwveK0YWsPx7ARH2mvao2cO2KVZBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-06-03 16:34:39.55527', '2019-06-03 16:34:39.55527', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBj4gBQ/BAbgqf7qOotatgZUHjDlsOtDNdp7alZR5/Fk9fGj+lxEygAZWzY7/LY1Z3SF6c0qs172LhAkkvV8p0M', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Y+IAUPwQG4Kn+6jqLWrYGVB4w5bDrQzXae2pWUefxZPXxo/pcRMoAGVs2O/y2NWd0henNKrNe9i4QJJL1fKdDA==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_fee_account; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_inner_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_inner_hash ON history_transactions USING btree (inner_transaction_hash) WHERE inner_transaction_hash IS NOT NULL; + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- added manually +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +DROP TABLE IF EXISTS public.key_value_store; +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); +INSERT INTO key_value_store VALUES ('exp_ingest_last_ledger', '0'); +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + +CREATE TABLE accounts_signers ( + account character varying(64), + signer character varying(64), + weight integer NOT NULL, + -- we will query by signer so that is why signer is the first item in the composite key + PRIMARY KEY (signer, account) +); +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/ingest_asset_stats-core.sql b/services/horizon/internal/test/scenarios/ingest_asset_stats-core.sql new file mode 100644 index 0000000000..61c152eb36 --- /dev/null +++ b/services/horizon/internal/test/scenarios/ingest_asset_stats-core.sql @@ -0,0 +1,775 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999969999999700, 3, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 9999999200, 8589934600, 0, NULL, 'dGVzdC5jb20=', 'AQAAAA==', 1, 6, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 9999999600, 8589934596, 2, NULL, '', 'AQAAAA==', 0, 8, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 9999999400, 8589934598, 2, NULL, '', 'AQAAAA==', 2, 8, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('4a4b1ff214c4817fd203885fdf554c8d86271d6788616565150d7de7566fdf98', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'ed397b2691125cc979a57b664a163ce14e4d2edf5759a08917e75ba89dc7cfe3', 2, 1559579861, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Z9GwX6q8KzxXUDk3+fdZshszCWX2SbuAnJNMiSoulCykAAAAAXPVM1QAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAACyk/eWGAYEOhla9SxZHjkxGIQ61pIijmPF9hVp1Qv5rtOXsmkRJcyXmle2ZKFjzhTk0u31dZoIkX51uoncfP4wAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('73d45d07ebf473cd3a6ae2d4aa6f56d01f6ffd71d7b3567afd5060b6fc921b56', '4a4b1ff214c4817fd203885fdf554c8d86271d6788616565150d7de7566fdf98', 'c5d545ffb4a5bf362c49831c8ba3af0bb30ca63ba9689393ca1ad1bdbe6699ad', 3, 1559579862, 'AAAAC0pLH/IUxIF/0gOIX99VTI2GJx1niGFlZRUNfedWb9+YispBRYjJkjprlnEXQeQh2xZxp4jvWl+jqXzxvvNnVZkAAAAAXPVM1gAAAAAAAAAAz4e0cNoSrSyJbiU9ww4Xz/C47nFVIBEHkR11kgme+DzF1UX/tKW/NixJgxyLo68LswymO6lok5PKGtG9vmaZrQAAAAMN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('c06da07d9df310bcff40b5fd1f635316526a35aa2170720d7e7af6a1231bc889', '73d45d07ebf473cd3a6ae2d4aa6f56d01f6ffd71d7b3567afd5060b6fc921b56', 'c9bc16393c022f1d22b2bb5a1edfef09b8afcb9c7c9155a7d5df678e53cd5f94', 4, 1559579863, 'AAAAC3PUXQfr9HPNOmri1KpvVtAfb/1x17NWev1QYLb8khtWr+ch0bnpNLHQ9oKhg8ODAYI15MnucRB9qXMxtHv/+0cAAAAAXPVM1wAAAAAAAAAAg4bDLmPB7NuPhcFNu33PjSojcPo2TH0x5yLLubhqMjzJvBY5PAIvHSKyu1oe3+8JuK/LnHyRVafV32eOU81flAAAAAQN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('333ec5e9ed90bbebb591729290b241ba6530ddf9f6102e0244f2e5b79721c1e4', 'c06da07d9df310bcff40b5fd1f635316526a35aa2170720d7e7af6a1231bc889', '6e741a3887d68776d7d65d3b202541b8f0a153b383d3593a115eefd52d5750e7', 5, 1559579864, 'AAAAC8BtoH2d8xC8/0C1/R9jUxZSajWqIXByDX569qEjG8iJbKMKb88bKwjF/tf5B2srqlB1Wt1wpqMZDxD5nC0zt2YAAAAAXPVM2AAAAAAAAAAAAvTWoAZ8sb3n9zINDA1hqoanXiN/pcjfEtKLDrzVvDNudBo4h9aHdtfWXTsgJUG48KFTs4PTWToRXu/VLVdQ5wAAAAUN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e48cf2fed5e3b8fbf715a418f92f81b8362c8804fc334e73d59b9ec99ee4224b', '333ec5e9ed90bbebb591729290b241ba6530ddf9f6102e0244f2e5b79721c1e4', 'cb7e1b8adc848cb02b952650e76809bbca14748116da89afd669e2c443d915f2', 6, 1559579865, 'AAAACzM+xentkLvrtZFykpCyQbplMN359hAuAkTy5beXIcHku8Zf/iDsmpUtzhUhvCkZxGvlhB58WJOSt+WZKn6exCUAAAAAXPVM2QAAAAAAAAAAXbLrzjyhc09OMR0rGIcNlJyvUas57e4z2TpUhlLwGBrLfhuK3ISMsCuVJlDnaAm7yhR0gRbaia/WaeLEQ9kV8gAAAAYN4Lazp2QAAAAAAAAAAAakAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('1491ff6cbfb702b38010befb6f0a64d62293bc00539c24a4cfe3038bf7174339', 'e48cf2fed5e3b8fbf715a418f92f81b8362c8804fc334e73d59b9ec99ee4224b', '5b58c2bc29fe6bac1d0c56dea80f64e5eb0f2f0a2f9fe20d0e83a4883536040d', 7, 1559579866, 'AAAAC+SM8v7V47j79xWkGPkvgbg2LIgE/DNOc9Wbnsme5CJLT03OgiB/+MODRPQfafM9wx2p409pQvaZdj1O67SYKJ0AAAAAXPVM2gAAAAAAAAAAp6X4a1fQWRN1fT4Oha1VAb23rxjrafhg/8wjyOPQ4elbWMK8Kf5rrB0MVt6oD2Tl6w8vCi+f4g0Og6SINTYEDQAAAAcN4Lazp2QAAAAAAAAAAAdsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b126571afcaf19157b639f5eba881e25930d94a37a52815db11819ccd5340b94', '1491ff6cbfb702b38010befb6f0a64d62293bc00539c24a4cfe3038bf7174339', '2201c7170524fe7f40a7d311613b1a444759e586a8e2208b0c5e76b65469702c', 8, 1559579867, 'AAAACxSR/2y/twKzgBC++28KZNYik7wAU5wkpM/jA4v3F0M5SrnXL2P8lSzo6IVDmtY1ZEbJBT/DkKVVD6JAdZ0hcmsAAAAAXPVM2wAAAAAAAAAAZxdQWfDM1MxUCuefAme/bjqOchzkHE89OsTko56c4I4iAccXBST+f0Cn0xFhOxpER1nlhqjiIIsMXna2VGlwLAAAAAgN4Lazp2QAAAAAAAAAAAg0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 2, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAIAAAACAAAAAQAAAEj0bBfqrwrPFdQOTf591myGzMJZfZJu4Cck0yJKi6ULKQAAAABc9UzVAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABAl9HPpG86XoCaUSzs+j8l+EdDIRw++X/6DhtP2qCw15ivl0jU40dK7qW0KG+w2+pmpVfbAg1U3h0X8PY3/uUeBg=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 3, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAMAAAACAAAAAQAAADCKykFFiMmSOmuWcRdB5CHbFnGniO9aX6OpfPG+82dVmQAAAABc9UzWAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABA0VNrzYuvnMwiRWK94gHmtR9bYJbuCQ710q37H9exBuH54uOBypS6Evnwwg5OwFHiw0Fbwn+4NViaVWQ4LF7dDQ=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 4, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAQAAAACAAAAAQAAADCv5yHRuek0sdD2gqGDw4MBgjXkye5xEH2pczG0e//7RwAAAABc9UzXAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABAsT+L3/vpFcrinu5W7OcIiaB5U2WP8c1Jfj32pRJLcIjAxU3mLyCYH2er6zawxHwO+T3QwKFsXX76qGEmJr/ACQ=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 5, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAUAAAACAAAAAQAAADBsowpvzxsrCMX+1/kHayuqUHVa3XCmoxkPEPmcLTO3ZgAAAABc9UzYAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABAKUVJypXnR72YXNvWFd/QA19OilzpPyikn8tGL3DFkICVyIG3c0o48XO2JOaPU9BaBeou1XGY9ZJ08FGJFTlrCA=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 6, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAYAAAACAAAAAQAAADC7xl/+IOyalS3OFSG8KRnEa+WEHnxYk5K35Zkqfp7EJQAAAABc9UzZAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABAoTxgUf5JWf+khFu6WE1nnguOEEuhpwFGvG2Zvq6zyhWjz4O6CAIMsC6k776gq/12PjsFV/UiRp9ICD0WzawpCA=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 7, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAcAAAACAAAAAQAAADBPTc6CIH/4w4NE9B9p8z3DHanjT2lC9pl2PU7rtJgonQAAAABc9UzaAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABA0ZDAcPtHZTvM1TTJoaV/9yq1xlm1PYS6TyQKG7fzH55TcuvdcDTxbwrv5Wp1uXk4AuYmHOic/RlcqpiW0DpdAg=='); +INSERT INTO scphistory VALUES ('GBK3SDGCKGXGTNDANCRVOLMKSOF35B7ILJHLVMV2HGC24K5DTEPIEW3J', 8, 'AAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAAAAAAAgAAAACAAAAAQAAADBKudcvY/yVLOjohUOa1jVkRskFP8OQpVUPokB1nSFyawAAAABc9UzbAAAAAAAAAAAAAAABZSW7EcXyJc3IqZaixfKTjskUr4LGn9vf9iKG8be52UsAAABApIDWWdB1l0CEfOPOxHD21iW3GeqwDitMz8XWvkrCExybSyshWpB8wYDkUeM6qQQzJk/eXVLnfRz8WByWwLEXBw=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('6525bb11c5f225cdc8a996a2c5f2938ec914af82c69fdbdff62286f1b7b9d94b', 8, 'AAAAAQAAAAEAAAAAVbkMwlGuabRgaKNXLYqTi76H6FpOurK6OYWuK6OZHoIAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', 'b126571afcaf19157b639f5eba881e25930d94a37a52815db11819ccd5340b94'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 8, + "currentBuckets": [ + { + "curr": "0ee556994512ba1cecd8206a737f35dac0b118acf9f8e2b9116e3fb8cae5587c", + "next": { + "state": 0 + }, + "snap": "0748c2684ffa5a4cab41f76e8ca636360e473fb6381d5e09420cb53126a78bf9" + }, + { + "curr": "e1ba18513fd20835bfcbb6596da1b583a28278569bd4ea765bbe81e0049452f3", + "next": { + "state": 1, + "output": "db7264c8d54c6ac7ac5e50a2438ddcfea23db51c0a2e61dcffc62b66ea61f304" + }, + "snap": "67a15d5341e360bd83abdedc3eed497e6d0eaf1c3d90e096bda2d6db7b7f10a0" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "67a15d5341e360bd83abdedc3eed497e6d0eaf1c3d90e096bda2d6db7b7f10a0" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAABVuQzCUa5ptGBoo1ctipOLvofoWk66sro5ha4ro5keggAAAAAAAAAIAAAAA2UluxHF8iXNyKmWosXyk47JFK+Cxp/b3/YihvG3udlLAAAAAQAAAJhKudcvY/yVLOjohUOa1jVkRskFP8OQpVUPokB1nSFyawAAAABc9UzbAAAAAAAAAAEAAAAAVbkMwlGuabRgaKNXLYqTi76H6FpOurK6OYWuK6OZHoIAAABAhFDXSvGRgDD6ahcDuIpkVj1qUF4ORToElQztXzSrupfJ1AZf6mcKjBdI0Q6FzQkhcJhGytX3Loqktw3eaqksBAAAAAEAAACYSrnXL2P8lSzo6IVDmtY1ZEbJBT/DkKVVD6JAdZ0hcmsAAAAAXPVM2wAAAAAAAAABAAAAAFW5DMJRrmm0YGijVy2Kk4u+h+haTrqyujmFriujmR6CAAAAQIRQ10rxkYAw+moXA7iKZFY9alBeDkU6BJUM7V80q7qXydQGX+pnCowXSNEOhc0JIXCYRsrV9y6KpLcN3mqpLAQAAABAaHFXi46ZomVXihMvQ9vXSDE2fqDuXgbkelJKqI+3Rv3SdO6ucP2zwrworLcO42SOcwqOsPRRgsu+/91nzjXjAQAAAABVuQzCUa5ptGBoo1ctipOLvofoWk66sro5ha4ro5keggAAAAAAAAAIAAAAAgAAAAEAAAAwSrnXL2P8lSzo6IVDmtY1ZEbJBT/DkKVVD6JAdZ0hcmsAAAAAXPVM2wAAAAAAAAAAAAAAAWUluxHF8iXNyKmWosXyk47JFK+Cxp/b3/YihvG3udlLAAAAQKSA1lnQdZdAhHzjzsRw9tYltxnqsA4rTM/F1r5KwhMcm0srIVqQfMGA5FHjOqkEMyZP3l1S530c/FgclsCxFwcAAAABFJH/bL+3ArOAEL77bwpk1iKTvABTnCSkz+MDi/cXQzkAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAz97YAAAAAAAAAAEnciLVAAAAQHbmlPqVcxoIqzJFayddJwGRM8Vxm0BYlui3LVu9d/nB2hb/tsUWgUZLCUnNv/CPjsMTAN2LmVkYOMtCdYc+NQ8AAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAABkAAAAAgAAAAYAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAABLel5AAAAAAAAAAAa7kvkwAAABAqeNpZLpGTrXueTDX8q4J99p43UzKa7Pyb0GubREnUhg0p2e51X7vYewIzVWd+fP/6IHoREXE6wSnWYEelQxDCQAAAAEAAAABAAAAAQAAAABVuQzCUa5ptGBoo1ctipOLvofoWk66sro5ha4ro5keggAAAAA='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'BTC', 9223372036854775807, 1009876000, 1, 6, NULL, NULL); +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 'SCOT', 9223372036854775807, 10000000000, 1, 6, NULL, NULL); +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 10000000000000000, 2001211688680, 1, 8, NULL, NULL); +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 998798745320, 1, 8, NULL, NULL); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('db398eb4ae89756325643cad21c94e13bfc074b323ee83e141bf701a5d904f1b', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f97caffab8c16023a37884165cb0b3ff1aa2daf4000fef49d21efc847ddbfbea', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bd60680a1378ffec739e1ffa2db4cd51f58babfb714e04a52bd2b65bf8a31b4f', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c780569c402c298b7b5f3f1a6a20ac1219a06df39a78fb3ac6d93ca53ad4e5ed', 3, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2d317dcef8626e639bcaab4a4b1ca1e8e6647eb46d65ca8d98137cd98eb10ae7', 3, 3, 'AAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 4, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4486298e04ffb1f3620c521f81adb5207f5d12c21b08a076589d2be3d8dae543', 4, 2, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1d6308dc6e9617bee39a69f68176cf6f3abcf4d3617db3c766647bd198a5e442', 4, 3, 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('647eaba7f3bc5726dc1041553fe4741542ed0a2af2d098d93b0bac5b6f3c624c', 4, 4, 'AAAAAgAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('3b666a253313fc7a0d241ee28064eec78aaa5ebd0a7c0ae7f85259e80fad029f', 5, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4tQAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('d9d6b816a0a3c640637d48fe33fa00f9ef116103c204834a1c18a9765803fd5d', 5, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+LUAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4nAAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('6ab66668ea2801de6a7239c94d44e5d41f361812607748125da372b27b66cd3c', 5, 3, 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+JwAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4gwAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('cf0f5fcd46881458ba623f9e6e7c52489d4bd3979a4196819882bb6240b4e855', 6, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+JwAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2300600248f841cd5f50276fc18eb16bc88a734e7a290f287ab3a2aa92684826', 6, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4agAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('5a48a811ec874fc9c5d77c7caeb8abcea076c1baa51b755b2a878391a089c7d1', 6, 3, 'AAAAAgAAAAMAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+GoAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4UQAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('4e4779f0d69db51ec4f7b73387b60c239433804d2747def21b7771e9b71d75be', 6, 4, 'AAAAAgAAAAMAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+FEAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4OAAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txfeehistory VALUES ('fa17f7c083fddc53e8e28885be934e19bf637e287c1951be581dd05c0be93b56', 7, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+LUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c42c988a72ac8aed3bb9a7b7dfb96b905e33d1506f4e663360135e6c6e115078', 7, 2, 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+JwAAAAAgAAAAQAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+IMAAAAAgAAAAQAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('142d3dbe5948eb39db1fd62d912ce67131b1b300adb015acf0f17d91a057429d', 8, 1, 'AAAAAgAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+JwAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('3362c9b76d85a844c739b338dbef4213ce64eca1ceb6c0d70e878975ab1477b1', 8, 2, 'AAAAAgAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+IMAAAAAgAAAAUAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+GoAAAAAgAAAAUAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('db398eb4ae89756325643cad21c94e13bfc074b323ee83e141bf701a5d904f1b', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEAYjQcPT2G5hqnBmgGGeg9J8l4c1EnUlxklElH9sqZr0971F6OLWfe/m4kpFtI+sI0i1qLit5A0JyWnbhYLW5oD', '2zmOtK6JdWMlZDytIclOE7/AdLMj7oPhQb9wGl2QTxsAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('f97caffab8c16023a37884165cb0b3ff1aa2daf4000fef49d21efc847ddbfbea', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBmKpSgvrwKO20XCOfYfXsGEEUtwYaaEfqSu6ymJmlDma+IX6I7IggbUZMocQdZ94IMAfKdQANqXbIO7ysweeMC', '+Xyv+rjBYCOjeIQWXLCz/xqi2vQAD+9J0h78hH3b++oAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBj4gBQ/BAbgqf7qOotatgZUHjDlsOtDNdp7alZR5/Fk9fGj+lxEygAZWzY7/LY1Z3SF6c0qs172LhAkkvV8p0M', 'cldWsfvfg7CBJ/OF7+3wkJzIILbM5x8cCJfRVCfLWt0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bd60680a1378ffec739e1ffa2db4cd51f58babfb714e04a52bd2b65bf8a31b4f', 3, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABruS+TAAAAEBkz5uRgU5FxqOu8Yak7Bbdc0BtgvEJ0FjurZz/LgGwT2EX91Y81YrdSVu2NPR0lbhSAotGQlvSPYEy5vN67p4C', 'vWBoChN4/+xznh/6LbTNUfWLq/txTgSlK9K2W/ijG08AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c780569c402c298b7b5f3f1a6a20ac1219a06df39a78fb3ac6d93ca53ad4e5ed', 3, 2, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAAAAAAAAAAAAAAAB+ZAt7wAAAEBHwkZcyIWmaPvEtDlR8Ed4dD1Mep2juLtHF3n5RG0jurJhKq/3MB1zR6bDHr+wow35ijK92ihjHWqTxjzKDhkO', 'x4BWnEAsKYt7Xz8aaiCsEhmgbfOaePs6xtk8pTrU5e0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('2d317dcef8626e639bcaab4a4b1ca1e8e6647eb46d65ca8d98137cd98eb10ae7', 3, 3, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB+ZAt7wAAAEB8q5Of+GA0eadw+hTrTCIAoedKyFge/Kv+RUNsq7sv7pSoLAQFWqwFIvxCGBul0XhSxOomG/gWgmIiwj6a1goM', 'LTF9zvhibmObyqtKSxyh6OZkfrRtZcqNmBN82Y6xCucAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 4, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAEnciLVAAAAQLVbII+1LeizxgncDI46KHyBt05+H92n1+R328J9zNl2fgJW2nfn3FIoLVs2qV1+CUpr121a2B7AM6HKr4nBLAI=', 'AKuc/OK0xBQdi7Z2jdCUvbscdAZxDbs7oO+Yhw9jo0QAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4486298e04ffb1f3620c521f81adb5207f5d12c21b08a076589d2be3d8dae543', 4, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQFp8rsD4Au1oeZkBT1RHIJRyxWayau3f5UjeA0w4+0LzjLEyi9nGMs8elAH4lDhhDJxCJ8HhxbG+XT/cmQsu1QA=', 'RIYpjgT/sfNiDFIfga21IH9dEsIbCKB2WJ0r49ja5UMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAIAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAIAAAABAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('1d6308dc6e9617bee39a69f68176cf6f3abcf4d3617db3c766647bd198a5e442', 4, 3, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQLEyHlSQ5gb4aQ7evOl4mZ6lSTIF7kShyso/iyP0uz3ipHocd38/dLiu7lVvMGXwo6ymJ7mixdDuNLIWiI9TbQI=', 'HWMI3G6WF77jmmn2gXbPbzq89NNhfbPHZmR70Zil5EIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAQAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAADAAAAAQAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAABAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFCVEMAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('647eaba7f3bc5726dc1041553fe4741542ed0a2af2d098d93b0bac5b6f3c624c', 4, 4, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABU0NPVAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAEnciLVAAAAQHTUKeZaZX/yonQdzrGY0klZqwhUZd7ontUbjpQmLk+XRY8uYos+AI2Z3qqU3QF27EV4VRsVcUUvvn57fqFdzgQ=', 'ZH6rp/O8VybcEEFVP+R0FULtCiry0JjZOwusW288YkwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('3b666a253313fc7a0d241ee28064eec78aaa5ebd0a7c0ae7f85259e80fad029f', 5, 1, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAEAAAAAAAAAAfmQLe8AAABAL6czYFvSBhdVeD4fbXOHuXFa2CDqLpFfc+QJnoiPLt/23YViURGLyfg388FKMKsbNJEgmFsCJjtgl3fj7wr/Aw==', 'O2ZqJTMT/HoNJB7igGTux4qqXr0KfArn+FJZ6A+tAp8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAACAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAMAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('d9d6b816a0a3c640637d48fe33fa00f9ef116103c204834a1c18a9765803fd5d', 5, 2, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAEAAAAAAAAAAfmQLe8AAABAMIB8sKelxTqFOLPILjB0nItcfrGrCwursIhshVeKHSw2IC4pmCeg7KGDOLpfUCLc23n5HeTsxJsb/CrHJF/XDQ==', '2da4FqCjxkBjfUj+M/oA+e8RYQPCBINKHBipdlgD/V0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAADAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAQAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('6ab66668ea2801de6a7239c94d44e5d41f361812607748125da372b27b66cd3c', 5, 3, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAEAAAAAAAAAAfmQLe8AAABA78VZpv8Z9a3XM9gv6hyMLt2bBrZ5sKsFRU4GKXYtxY2MkAt9J9ENrSRZn1M0jlx9FFGtCvtFFZi8DhxvqDyaBQ==', 'arZmaOooAd5qcjnJTUTl1B82GBJgd0gSXaNysntmzTwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAAEAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('cf0f5fcd46881458ba623f9e6e7c52489d4bd3979a4196819882bb6240b4e855', 6, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABU0NPVAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAAAAAAGu5L5MAAAAQLSYQCC1+DGQ8srHLxi6SfnN/dn8t7mAcXlDniU3J+d6Ezg1U6lg9i0jWOsfamioYVbJ9dAiQBZyIsn7TB5cLww=', 'zw9fzUaIFFi6Yj+ebnxSSJ1L05eaQZaBmIK7YkC06FUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvicAAAAAIAAAADAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvicAAAAAIAAAAEAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAlQL5AB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2300600248f841cd5f50276fc18eb16bc88a734e7a290f287ab3a2aa92684826', 6, 2, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAOjUt1+AAAAAAAAAAAH5kC3vAAAAQIqp3RfP1ueB0TRJRYXnao+kmde4BDh8q0Ep7q14Q8oRNx1R9utncfpoXr7JOcqiwtgarT9k6KmMyjda97H5RgM=', 'IwBgAkj4Qc1fUCdvwY6xa8iKc056KQ8oerOiqpJoSCYAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAFAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAYAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAOjUt1+Af/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('5a48a811ec874fc9c5d77c7caeb8abcea076c1baa51b755b2a878391a089c7d1', 6, 3, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA8MXwgAAAAAAAAAAH5kC3vAAAAQGEXqpE9OKOxah6oBhR955A4BYmO+yuLNMMtcALlLsKj2M1e9QTlBvAzuwkgECvg2iw8qXZB2kHteYw8qoozcQA=', 'WkioEeyHT8nF13x8rrirzqB2wbqlG3VbKoeDkaCJx9EAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAGAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAcAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA8MXwgf/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('4e4779f0d69db51ec4f7b73387b60c239433804d2747def21b7771e9b71d75be', 6, 4, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAdGp1wZQAAAAAAAAAAH5kC3vAAAAQHQFhOcK6JMPYxfRWB+xO13EkPDqkvvPG/Hp8EWDTIMTpHHi4Mqr3/SreJLUxOi3qGSqYFJHiAoK65rFYQaPEAQ=', 'Tkd58NadtR7E97czh7YMI5QzgE0nR97yG3dx6bcddb4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAHAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAgAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAdGp1wZQf/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('fa17f7c083fddc53e8e28885be934e19bf637e287c1951be581dd05c0be93b56', 7, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAjhvJvwQAAAAAAAAAAAAEnciLVAAAAQNI8SXbUBWJi/xf8bWtBBKonww9YpbLck1/295qxZOYN5vjFDYQLaG3b1aGWqzWZqa9FMHkJ2tAEDPjEHIMkzAw=', '+hf3wIP93FPo4oiFvpNOGb9jfih8GVG+WB3QXAvpO1YAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvi1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvi1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlAAI4byb8EAAAAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c42c988a72ac8aed3bb9a7b7dfb96b905e33d1506f4e663360135e6c6e115078', 7, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA1nUfgAAAAAAAAAAGu5L5MAAAAQAGYynFy2CKfKZyhmWMLfgmhdJtJHXW7ogTdyZ7aviECOHYJSQKPkcnMoG4N76ipkuVH6hjuxDHBJ83+HnyhbAQ=', 'xCyYinKsiu07uae337lrkF4z0VBvTmYzYBNebG4RUHgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAviDAAAAAIAAAAEAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAviDAAAAAIAAAAFAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d90TjAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAMAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6NS3X4B//////////wAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J8aF6B//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('142d3dbe5948eb39db1fd62d912ce67131b1b300adb015acf0f17d91a057429d', 8, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAz97YAAAAAAAAAAEnciLVAAAAQHbmlPqVcxoIqzJFayddJwGRM8Vxm0BYlui3LVu9d/nB2hb/tsUWgUZLCUnNv/CPjsMTAN2LmVkYOMtCdYc+NQ8=', 'FC09vllI6znbH9YtkSzmcTGxswCtsBWs8PF9kaBXQp0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvicAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvicAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J8aF6B//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J/p9nh//////////wAAAAEAAAAAAAAAAAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d90TjAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d6kb1gAI4byb8EAAAAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('3362c9b76d85a844c739b338dbef4213ce64eca1ceb6c0d70e878975ab1477b1', 8, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAS3peQAAAAAAAAAAGu5L5MAAAAQKnjaWS6Rk617nkw1/KuCffaeN1Mymuz8m9Brm0RJ1IYNKdnudV+72HsCM1Vnfnz/+iB6ERFxOsEp1mBHpUMQwk=', 'M2LJt22FqETHObM42+9CE85k7KHOtsDXDoeJdasUd7EAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvhqAAAAAIAAAAFAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvhqAAAAAIAAAAGAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d6kb1gAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0fGDBugAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAMAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J/p9nh//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6I0LXuh//////////wAAAAEAAAAAAAAAAA=='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/ingest_asset_stats-horizon.sql b/services/horizon/internal/test/scenarios/ingest_asset_stats-horizon.sql new file mode 100644 index 0000000000..46321b3033 --- /dev/null +++ b/services/horizon/internal/test/scenarios/ingest_asset_stats-horizon.sql @@ -0,0 +1,1086 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO asset_stats VALUES (1, '1009876000', 1, 1, 'https://test.com/.well-known/stellar.toml'); +INSERT INTO asset_stats VALUES (2, '3000010434000', 2, 1, 'https://test.com/.well-known/stellar.toml'); +INSERT INTO asset_stats VALUES (3, '10000000000', 1, 2, ''); + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (2, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_accounts VALUES (4, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 4, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'BTC', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_assets VALUES (2, 'credit_alphanum4', 'USD', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_assets VALUES (3, 'credit_alphanum4', 'SCOT', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 3, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 34359742465, 1, 2, '{"amount": "1.3623000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 34359742465, 2, 3, '{"amount": "1.3623000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 34359746561, 1, 2, '{"amount": "31.6577680", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 34359746561, 2, 3, '{"amount": "31.6577680", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 30064775169, 1, 22, '{"limit": "1000000000.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 30064779265, 1, 2, '{"amount": "89.9500000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 30064779265, 2, 3, '{"amount": "89.9500000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 1, 2, '{"amount": "1000.0000000", "asset_code": "SCOT", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 25769807873, 2, 3, '{"amount": "1000.0000000", "asset_code": "SCOT", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 25769811969, 1, 2, '{"amount": "100000.1200000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 25769811969, 2, 3, '{"amount": "100000.1200000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 25769816065, 1, 2, '{"amount": "100.9876000", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 25769816065, 2, 3, '{"amount": "100.9876000", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769820161, 1, 2, '{"amount": "200000.9234000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 25769820161, 2, 3, '{"amount": "200000.9234000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 21474840577, 1, 23, '{"trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 21474844673, 1, 23, '{"trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 21474848769, 1, 23, '{"trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 17179873281, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179877377, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179881473, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 17179885569, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "SCOT", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 1, 6, '{"auth_revocable_flag": true}'); +INSERT INTO history_effects VALUES (3, 12884910081, 1, 5, '{"home_domain": "test.com"}'); +INSERT INTO history_effects VALUES (3, 12884914177, 1, 6, '{"auth_required_flag": true}'); +INSERT INTO history_effects VALUES (3, 8589938689, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589938689, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 3, 10, '{"weight": 1, "public_key": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (4, 8589946881, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589946881, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (8, 'b126571afcaf19157b639f5eba881e25930d94a37a52815db11819ccd5340b94', '1491ff6cbfb702b38010befb6f0a64d62293bc00539c24a4cfe3038bf7174339', 2, 2, '2019-06-03 16:37:47', '2019-06-03 16:37:45.007637', '2019-06-03 16:37:45.007637', 34359738368, 16, 1000000000000000000, 2100, 100, 100000000, 1000000, 11, 'AAAACxSR/2y/twKzgBC++28KZNYik7wAU5wkpM/jA4v3F0M5SrnXL2P8lSzo6IVDmtY1ZEbJBT/DkKVVD6JAdZ0hcmsAAAAAXPVM2wAAAAAAAAAAZxdQWfDM1MxUCuefAme/bjqOchzkHE89OsTko56c4I4iAccXBST+f0Cn0xFhOxpER1nlhqjiIIsMXna2VGlwLAAAAAgN4Lazp2QAAAAAAAAAAAg0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (7, '1491ff6cbfb702b38010befb6f0a64d62293bc00539c24a4cfe3038bf7174339', 'e48cf2fed5e3b8fbf715a418f92f81b8362c8804fc334e73d59b9ec99ee4224b', 2, 2, '2019-06-03 16:37:46', '2019-06-03 16:37:45.030701', '2019-06-03 16:37:45.030702', 30064771072, 16, 1000000000000000000, 1900, 100, 100000000, 1000000, 11, 'AAAAC+SM8v7V47j79xWkGPkvgbg2LIgE/DNOc9Wbnsme5CJLT03OgiB/+MODRPQfafM9wx2p409pQvaZdj1O67SYKJ0AAAAAXPVM2gAAAAAAAAAAp6X4a1fQWRN1fT4Oha1VAb23rxjrafhg/8wjyOPQ4elbWMK8Kf5rrB0MVt6oD2Tl6w8vCi+f4g0Og6SINTYEDQAAAAcN4Lazp2QAAAAAAAAAAAdsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (6, 'e48cf2fed5e3b8fbf715a418f92f81b8362c8804fc334e73d59b9ec99ee4224b', '333ec5e9ed90bbebb591729290b241ba6530ddf9f6102e0244f2e5b79721c1e4', 4, 4, '2019-06-03 16:37:45', '2019-06-03 16:37:45.043381', '2019-06-03 16:37:45.043381', 25769803776, 16, 1000000000000000000, 1700, 100, 100000000, 1000000, 11, 'AAAACzM+xentkLvrtZFykpCyQbplMN359hAuAkTy5beXIcHku8Zf/iDsmpUtzhUhvCkZxGvlhB58WJOSt+WZKn6exCUAAAAAXPVM2QAAAAAAAAAAXbLrzjyhc09OMR0rGIcNlJyvUas57e4z2TpUhlLwGBrLfhuK3ISMsCuVJlDnaAm7yhR0gRbaia/WaeLEQ9kV8gAAAAYN4Lazp2QAAAAAAAAAAAakAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0); +INSERT INTO history_ledgers VALUES (5, '333ec5e9ed90bbebb591729290b241ba6530ddf9f6102e0244f2e5b79721c1e4', 'c06da07d9df310bcff40b5fd1f635316526a35aa2170720d7e7af6a1231bc889', 3, 3, '2019-06-03 16:37:44', '2019-06-03 16:37:45.06041', '2019-06-03 16:37:45.06041', 21474836480, 16, 1000000000000000000, 1300, 100, 100000000, 1000000, 11, 'AAAAC8BtoH2d8xC8/0C1/R9jUxZSajWqIXByDX569qEjG8iJbKMKb88bKwjF/tf5B2srqlB1Wt1wpqMZDxD5nC0zt2YAAAAAXPVM2AAAAAAAAAAAAvTWoAZ8sb3n9zINDA1hqoanXiN/pcjfEtKLDrzVvDNudBo4h9aHdtfWXTsgJUG48KFTs4PTWToRXu/VLVdQ5wAAAAUN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0); +INSERT INTO history_ledgers VALUES (4, 'c06da07d9df310bcff40b5fd1f635316526a35aa2170720d7e7af6a1231bc889', '73d45d07ebf473cd3a6ae2d4aa6f56d01f6ffd71d7b3567afd5060b6fc921b56', 4, 4, '2019-06-03 16:37:43', '2019-06-03 16:37:45.072877', '2019-06-03 16:37:45.072877', 17179869184, 16, 1000000000000000000, 1000, 100, 100000000, 1000000, 11, 'AAAAC3PUXQfr9HPNOmri1KpvVtAfb/1x17NWev1QYLb8khtWr+ch0bnpNLHQ9oKhg8ODAYI15MnucRB9qXMxtHv/+0cAAAAAXPVM1wAAAAAAAAAAg4bDLmPB7NuPhcFNu33PjSojcPo2TH0x5yLLubhqMjzJvBY5PAIvHSKyu1oe3+8JuK/LnHyRVafV32eOU81flAAAAAQN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0); +INSERT INTO history_ledgers VALUES (3, '73d45d07ebf473cd3a6ae2d4aa6f56d01f6ffd71d7b3567afd5060b6fc921b56', '4a4b1ff214c4817fd203885fdf554c8d86271d6788616565150d7de7566fdf98', 3, 3, '2019-06-03 16:37:42', '2019-06-03 16:37:45.085791', '2019-06-03 16:37:45.085792', 12884901888, 16, 1000000000000000000, 600, 100, 100000000, 1000000, 11, 'AAAAC0pLH/IUxIF/0gOIX99VTI2GJx1niGFlZRUNfedWb9+YispBRYjJkjprlnEXQeQh2xZxp4jvWl+jqXzxvvNnVZkAAAAAXPVM1gAAAAAAAAAAz4e0cNoSrSyJbiU9ww4Xz/C47nFVIBEHkR11kgme+DzF1UX/tKW/NixJgxyLo68LswymO6lok5PKGtG9vmaZrQAAAAMN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0); +INSERT INTO history_ledgers VALUES (2, '4a4b1ff214c4817fd203885fdf554c8d86271d6788616565150d7de7566fdf98', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 3, 3, '2019-06-03 16:37:41', '2019-06-03 16:37:45.10023', '2019-06-03 16:37:45.10023', 8589934592, 16, 1000000000000000000, 300, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Z9GwX6q8KzxXUDk3+fdZshszCWX2SbuAnJNMiSoulCykAAAAAXPVM1QAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAACyk/eWGAYEOhla9SxZHjkxGIQ61pIijmPF9hVp1Qv5rtOXsmkRJcyXmle2ZKFjzhTk0u31dZoIkX51uoncfP4wAAAAIN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:37:45.111028', '2019-06-03 16:37:45.111029', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 34359742465, 2); +INSERT INTO history_operation_participants VALUES (2, 34359742465, 1); +INSERT INTO history_operation_participants VALUES (3, 34359746561, 1); +INSERT INTO history_operation_participants VALUES (4, 34359746561, 2); +INSERT INTO history_operation_participants VALUES (5, 30064775169, 2); +INSERT INTO history_operation_participants VALUES (6, 30064779265, 2); +INSERT INTO history_operation_participants VALUES (7, 30064779265, 1); +INSERT INTO history_operation_participants VALUES (8, 25769807873, 1); +INSERT INTO history_operation_participants VALUES (9, 25769807873, 2); +INSERT INTO history_operation_participants VALUES (10, 25769811969, 3); +INSERT INTO history_operation_participants VALUES (11, 25769811969, 1); +INSERT INTO history_operation_participants VALUES (12, 25769816065, 3); +INSERT INTO history_operation_participants VALUES (13, 25769816065, 1); +INSERT INTO history_operation_participants VALUES (14, 25769820161, 3); +INSERT INTO history_operation_participants VALUES (15, 25769820161, 2); +INSERT INTO history_operation_participants VALUES (16, 21474840577, 3); +INSERT INTO history_operation_participants VALUES (17, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (18, 21474844673, 3); +INSERT INTO history_operation_participants VALUES (19, 21474844673, 1); +INSERT INTO history_operation_participants VALUES (20, 21474848769, 3); +INSERT INTO history_operation_participants VALUES (21, 21474848769, 2); +INSERT INTO history_operation_participants VALUES (22, 17179873281, 2); +INSERT INTO history_operation_participants VALUES (23, 17179877377, 1); +INSERT INTO history_operation_participants VALUES (24, 17179881473, 1); +INSERT INTO history_operation_participants VALUES (25, 17179885569, 2); +INSERT INTO history_operation_participants VALUES (26, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (27, 12884910081, 3); +INSERT INTO history_operation_participants VALUES (28, 12884914177, 3); +INSERT INTO history_operation_participants VALUES (29, 8589938689, 4); +INSERT INTO history_operation_participants VALUES (30, 8589938689, 3); +INSERT INTO history_operation_participants VALUES (31, 8589942785, 4); +INSERT INTO history_operation_participants VALUES (32, 8589942785, 1); +INSERT INTO history_operation_participants VALUES (33, 8589946881, 4); +INSERT INTO history_operation_participants VALUES (34, 8589946881, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 34, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (34359742465, 34359742464, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "amount": "1.3623000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (34359746561, 34359746560, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "31.6577680", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (30064775169, 30064775168, 1, 6, '{"limit": "1000000000.0000000", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (30064779265, 30064779264, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "89.9500000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "1000.0000000", "asset_code": "SCOT", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (25769811969, 25769811968, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "100000.1200000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (25769816065, 25769816064, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "100.9876000", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (25769820161, 25769820160, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "200000.9234000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 7, '{"trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "authorize": true, "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (21474844673, 21474844672, 1, 7, '{"trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "authorize": true, "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (21474848769, 21474848768, 1, 7, '{"trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "authorize": true, "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (17179877377, 17179877376, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (17179881473, 17179881472, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "BTC", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (17179885569, 17179885568, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "SCOT", "asset_type": "credit_alphanum4", "asset_issuer": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 5, '{"set_flags": [2], "set_flags_s": ["auth_revocable"]}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884910081, 12884910080, 1, 5, '{"home_domain": "test.com"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (12884914177, 12884914176, 1, 5, '{"set_flags": [1], "set_flags_s": ["auth_required"]}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 34359742464, 2); +INSERT INTO history_transaction_participants VALUES (2, 34359742464, 1); +INSERT INTO history_transaction_participants VALUES (3, 34359746560, 1); +INSERT INTO history_transaction_participants VALUES (4, 34359746560, 2); +INSERT INTO history_transaction_participants VALUES (5, 30064775168, 2); +INSERT INTO history_transaction_participants VALUES (6, 30064779264, 1); +INSERT INTO history_transaction_participants VALUES (7, 30064779264, 2); +INSERT INTO history_transaction_participants VALUES (8, 25769807872, 1); +INSERT INTO history_transaction_participants VALUES (9, 25769807872, 2); +INSERT INTO history_transaction_participants VALUES (10, 25769811968, 3); +INSERT INTO history_transaction_participants VALUES (11, 25769811968, 1); +INSERT INTO history_transaction_participants VALUES (12, 25769816064, 1); +INSERT INTO history_transaction_participants VALUES (13, 25769816064, 3); +INSERT INTO history_transaction_participants VALUES (14, 25769820160, 3); +INSERT INTO history_transaction_participants VALUES (15, 25769820160, 2); +INSERT INTO history_transaction_participants VALUES (16, 21474840576, 3); +INSERT INTO history_transaction_participants VALUES (17, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (18, 21474844672, 3); +INSERT INTO history_transaction_participants VALUES (19, 21474844672, 1); +INSERT INTO history_transaction_participants VALUES (20, 21474848768, 2); +INSERT INTO history_transaction_participants VALUES (21, 21474848768, 3); +INSERT INTO history_transaction_participants VALUES (22, 17179873280, 2); +INSERT INTO history_transaction_participants VALUES (23, 17179877376, 1); +INSERT INTO history_transaction_participants VALUES (24, 17179881472, 1); +INSERT INTO history_transaction_participants VALUES (25, 17179885568, 2); +INSERT INTO history_transaction_participants VALUES (26, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (27, 12884910080, 3); +INSERT INTO history_transaction_participants VALUES (28, 12884914176, 3); +INSERT INTO history_transaction_participants VALUES (29, 8589938688, 4); +INSERT INTO history_transaction_participants VALUES (30, 8589938688, 3); +INSERT INTO history_transaction_participants VALUES (31, 8589942784, 4); +INSERT INTO history_transaction_participants VALUES (32, 8589942784, 1); +INSERT INTO history_transaction_participants VALUES (33, 8589946880, 2); +INSERT INTO history_transaction_participants VALUES (34, 8589946880, 4); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 34, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('142d3dbe5948eb39db1fd62d912ce67131b1b300adb015acf0f17d91a057429d', 8, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934596, 100, 1, '2019-06-03 16:37:45.007926', '2019-06-03 16:37:45.007926', 34359742464, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAz97YAAAAAAAAAAEnciLVAAAAQHbmlPqVcxoIqzJFayddJwGRM8Vxm0BYlui3LVu9d/nB2hb/tsUWgUZLCUnNv/CPjsMTAN2LmVkYOMtCdYc+NQ8=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvicAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvicAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J8aF6B//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J/p9nh//////////wAAAAEAAAAAAAAAAAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d90TjAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d6kb1gAI4byb8EAAAAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+JwAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{duaU+pVzGgirMkVrJ10nAZEzxXGbQFiW6LctW713+cHaFv+2xRaBRksJSc2/8I+OwxMA3YuZWRg4y0J1hz41Dw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('3362c9b76d85a844c739b338dbef4213ce64eca1ceb6c0d70e878975ab1477b1', 8, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934598, 100, 1, '2019-06-03 16:37:45.008333', '2019-06-03 16:37:45.008334', 34359746560, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAS3peQAAAAAAAAAAGu5L5MAAAAQKnjaWS6Rk617nkw1/KuCffaeN1Mymuz8m9Brm0RJ1IYNKdnudV+72HsCM1Vnfnz/+iB6ERFxOsEp1mBHpUMQwk=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvhqAAAAAIAAAAFAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvhqAAAAAIAAAAGAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d6kb1gAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0fGDBugAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAMAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J/p9nh//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6I0LXuh//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+IMAAAAAgAAAAUAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+GoAAAAAgAAAAUAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{qeNpZLpGTrXueTDX8q4J99p43UzKa7Pyb0GubREnUhg0p2e51X7vYewIzVWd+fP/6IHoREXE6wSnWYEelQxDCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('fa17f7c083fddc53e8e28885be934e19bf637e287c1951be581dd05c0be93b56', 7, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934595, 100, 1, '2019-06-03 16:37:45.03086', '2019-06-03 16:37:45.03086', 30064775168, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAjhvJvwQAAAAAAAAAAAAEnciLVAAAAQNI8SXbUBWJi/xf8bWtBBKonww9YpbLck1/295qxZOYN5vjFDYQLaG3b1aGWqzWZqa9FMHkJ2tAEDPjEHIMkzAw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvi1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvi1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlAAI4byb8EAAAAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+LUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{0jxJdtQFYmL/F/xta0EEqifDD1ilstyTX/b3mrFk5g3m+MUNhAtobdvVoZarNZmpr0UweQna0AQM+MQcgyTMDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c42c988a72ac8aed3bb9a7b7dfb96b905e33d1506f4e663360135e6c6e115078', 7, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934597, 100, 1, '2019-06-03 16:37:45.031078', '2019-06-03 16:37:45.031078', 30064779264, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA1nUfgAAAAAAAAAAGu5L5MAAAAQAGYynFy2CKfKZyhmWMLfgmhdJtJHXW7ogTdyZ7aviECOHYJSQKPkcnMoG4N76ipkuVH6hjuxDHBJ83+HnyhbAQ=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAviDAAAAAIAAAAEAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAviDAAAAAIAAAAFAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0anXBlAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAB0d90TjAAI4byb8EAAAAAAAEAAAAAAAAAAAAAAAMAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6NS3X4B//////////wAAAAEAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAA6J8aF6B//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+JwAAAAAgAAAAQAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+IMAAAAAgAAAAQAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{AZjKcXLYIp8pnKGZYwt+CaF0m0kddbuiBN3Jntq+IQI4dglJAo+Rycygbg3vqKmS5UfqGO7EMcEnzf4efKFsBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('cf0f5fcd46881458ba623f9e6e7c52489d4bd3979a4196819882bb6240b4e855', 6, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934596, 100, 1, '2019-06-03 16:37:45.043543', '2019-06-03 16:37:45.043543', 25769807872, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABU0NPVAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAAAAAAGu5L5MAAAAQLSYQCC1+DGQ8srHLxi6SfnN/dn8t7mAcXlDniU3J+d6Ezg1U6lg9i0jWOsfamioYVbJ9dAiQBZyIsn7TB5cLww=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvicAAAAAIAAAADAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvicAAAAAIAAAAEAAAAAgAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAlQL5AB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+JwAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{tJhAILX4MZDyyscvGLpJ+c392fy3uYBxeUOeJTcn53oTODVTqWD2LSNY6x9qaKhhVsn10CJAFnIiyftMHlwvDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2300600248f841cd5f50276fc18eb16bc88a734e7a290f287ab3a2aa92684826', 6, 2, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934598, 100, 1, '2019-06-03 16:37:45.043709', '2019-06-03 16:37:45.04371', 25769811968, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAOjUt1+AAAAAAAAAAAH5kC3vAAAAQIqp3RfP1ueB0TRJRYXnao+kmde4BDh8q0Ep7q14Q8oRNx1R9utncfpoXr7JOcqiwtgarT9k6KmMyjda97H5RgM=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAFAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAYAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAOjUt1+Af/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4agAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{iqndF8/W54HRNElFhedqj6SZ17gEOHyrQSnurXhDyhE3HVH262dx+mhevsk5yqLC2BqtP2ToqYzKN1r3sflGAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('5a48a811ec874fc9c5d77c7caeb8abcea076c1baa51b755b2a878391a089c7d1', 6, 3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934599, 100, 1, '2019-06-03 16:37:45.043846', '2019-06-03 16:37:45.043846', 25769816064, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA8MXwgAAAAAAAAAAH5kC3vAAAAQGEXqpE9OKOxah6oBhR955A4BYmO+yuLNMMtcALlLsKj2M1e9QTlBvAzuwkgECvg2iw8qXZB2kHteYw8qoozcQA=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAGAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAcAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA8MXwgf/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+GoAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4UQAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{YReqkT04o7FqHqgGFH3nkDgFiY77K4s0wy1wAuUuwqPYzV71BOUG8DO7CSAQK+DaLDypdkHaQe15jDyqijNxAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4e4779f0d69db51ec4f7b73387b60c239433804d2747def21b7771e9b71d75be', 6, 4, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934600, 100, 1, '2019-06-03 16:37:45.043977', '2019-06-03 16:37:45.043977', 25769820160, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAdGp1wZQAAAAAAAAAAH5kC3vAAAAQHQFhOcK6JMPYxfRWB+xO13EkPDqkvvPG/Hp8EWDTIMTpHHi4Mqr3/SreJLUxOi3qGSqYFJHiAoK65rFYQaPEAQ=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvg4AAAAAIAAAAHAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+DgAAAAAgAAAAgAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAdGp1wZQf/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAGAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+FEAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4OAAAAACAAAABQAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{dAWE5wrokw9jF9FYH7E7XcSQ8OqS+88b8enwRYNMgxOkceLgyqvf9Kt4ktTE6LeoZKpgUkeICgrrmsVhBo8QBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('3b666a253313fc7a0d241ee28064eec78aaa5ebd0a7c0ae7f85259e80fad029f', 5, 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934595, 100, 1, '2019-06-03 16:37:45.060663', '2019-06-03 16:37:45.060663', 21474840576, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAEAAAAAAAAAAfmQLe8AAABAL6czYFvSBhdVeD4fbXOHuXFa2CDqLpFfc+QJnoiPLt/23YViURGLyfg388FKMKsbNJEgmFsCJjtgl3fj7wr/Aw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAACAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAMAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4tQAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{L6czYFvSBhdVeD4fbXOHuXFa2CDqLpFfc+QJnoiPLt/23YViURGLyfg388FKMKsbNJEgmFsCJjtgl3fj7wr/Aw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d9d6b816a0a3c640637d48fe33fa00f9ef116103c204834a1c18a9765803fd5d', 5, 2, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934596, 100, 1, '2019-06-03 16:37:45.061037', '2019-06-03 16:37:45.061037', 21474844672, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAEAAAAAAAAAAfmQLe8AAABAMIB8sKelxTqFOLPILjB0nItcfrGrCwursIhshVeKHSw2IC4pmCeg7KGDOLpfUCLc23n5HeTsxJsb/CrHJF/XDQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAADAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAQAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+LUAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4nAAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{MIB8sKelxTqFOLPILjB0nItcfrGrCwursIhshVeKHSw2IC4pmCeg7KGDOLpfUCLc23n5HeTsxJsb/CrHJF/XDQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6ab66668ea2801de6a7239c94d44e5d41f361812607748125da372b27b66cd3c', 5, 3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934597, 100, 1, '2019-06-03 16:37:45.061357', '2019-06-03 16:37:45.061357', 21474848768, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAEAAAAAAAAAAfmQLe8AAABA78VZpv8Z9a3XM9gv6hyMLt2bBrZ5sKsFRU4GKXYtxY2MkAt9J9ENrSRZn1M0jlx9FFGtCvtFFZi8DhxvqDyaBQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAviDAAAAAIAAAAEAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+IMAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAAAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+JwAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAlQL4gwAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAh0ZXN0LmNvbQEAAAAAAAAAAAAAAAAAAAA=', '{78VZpv8Z9a3XM9gv6hyMLt2bBrZ5sKsFRU4GKXYtxY2MkAt9J9ENrSRZn1M0jlx9FFGtCvtFFZi8DhxvqDyaBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 4, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934593, 100, 1, '2019-06-03 16:37:45.073022', '2019-06-03 16:37:45.073022', 17179873280, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAEnciLVAAAAQLVbII+1LeizxgncDI46KHyBt05+H92n1+R328J9zNl2fgJW2nfn3FIoLVs2qV1+CUpr121a2B7AM6HKr4nBLAI=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{tVsgj7Ut6LPGCdwMjjoofIG3Tn4f3afX5Hfbwn3M2XZ+Albad+fcUigtWzapXX4JSmvXbVrYHsAzocqvicEsAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4486298e04ffb1f3620c521f81adb5207f5d12c21b08a076589d2be3d8dae543', 4, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-06-03 16:37:45.073203', '2019-06-03 16:37:45.073203', 17179877376, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQFp8rsD4Au1oeZkBT1RHIJRyxWayau3f5UjeA0w4+0LzjLEyi9nGMs8elAH4lDhhDJxCJ8HhxbG+XT/cmQsu1QA=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAABAAAAAAAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAAAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAIAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAIAAAABAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{WnyuwPgC7Wh5mQFPVEcglHLFZrJq7d/lSN4DTDj7QvOMsTKL2cYyzx6UAfiUOGEMnEInweHFsb5dP9yZCy7VAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('1d6308dc6e9617bee39a69f68176cf6f3abcf4d3617db3c766647bd198a5e442', 4, 3, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934595, 100, 1, '2019-06-03 16:37:45.07334', '2019-06-03 16:37:45.07334', 17179881472, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABQlRDAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQLEyHlSQ5gb4aQ7evOl4mZ6lSTIF7kShyso/iyP0uz3ipHocd38/dLiu7lVvMGXwo6ymJ7mixdDuNLIWiI9TbQI=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAACAAAAAQAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvi1AAAAAIAAAADAAAAAQAAAAAAAAACAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAABAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAMAAAACAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFCVEMAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+LUAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{sTIeVJDmBvhpDt686XiZnqVJMgXuRKHKyj+LI/S7PeKkehx3fz90uK7uVW8wZfCjrKYnuaLF0O40shaIj1NtAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('647eaba7f3bc5726dc1041553fe4741542ed0a2af2d098d93b0bac5b6f3c624c', 4, 4, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934594, 100, 1, '2019-06-03 16:37:45.073472', '2019-06-03 16:37:45.073472', 17179885568, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABU0NPVAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TH//////////AAAAAAAAAAEnciLVAAAAQHTUKeZaZX/yonQdzrGY0klZqwhUZd7ontUbjpQmLk+XRY8uYos+AI2Z3qqU3QF27EV4VRsVcUUvvn57fqFdzgQ=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvjOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFTQ09UAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{dNQp5lplf/KidB3OsZjSSVmrCFRl3uie1RuOlCYuT5dFjy5iiz4AjZneqpTdAXbsRXhVGxVxRS++fnt+oV3OBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bd60680a1378ffec739e1ffa2db4cd51f58babfb714e04a52bd2b65bf8a31b4f', 3, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:37:45.086095', '2019-06-03 16:37:45.086096', 12884905984, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABruS+TAAAAEBkz5uRgU5FxqOu8Yak7Bbdc0BtgvEJ0FjurZz/LgGwT2EX91Y81YrdSVu2NPR0lbhSAotGQlvSPYEy5vN67p4C', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvjnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAEAAAAAAAAAAAAAAAIAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ZM+bkYFORcajrvGGpOwW3XNAbYLxCdBY7q2c/y4BsE9hF/dWPNWK3UlbtjT0dJW4UgKLRkJb0j2BMubzeu6eAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c780569c402c298b7b5f3f1a6a20ac1219a06df39a78fb3ac6d93ca53ad4e5ed', 3, 2, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934593, 100, 1, '2019-06-03 16:37:45.086312', '2019-06-03 16:37:45.086312', 12884910080, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAACHRlc3QuY29tAAAAAAAAAAAAAAAB+ZAt7wAAAEBHwkZcyIWmaPvEtDlR8Ed4dD1Mep2juLtHF3n5RG0jurJhKq/3MB1zR6bDHr+wow35ijK92ihjHWqTxjzKDhkO', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{R8JGXMiFpmj7xLQ5UfBHeHQ9THqdo7i7Rxd5+URtI7qyYSqv9zAdc0emwx6/sKMN+YoyvdooYx1qk8Y8yg4ZDg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2d317dcef8626e639bcaab4a4b1ca1e8e6647eb46d65ca8d98137cd98eb10ae7', 3, 3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934594, 100, 1, '2019-06-03 16:37:45.086491', '2019-06-03 16:37:45.086491', 12884914176, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB+ZAt7wAAAEB8q5Of+GA0eadw+hTrTCIAoedKyFge/Kv+RUNsq7sv7pSoLAQFWqwFIvxCGBul0XhSxOomG/gWgmIiwj6a1goM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvjOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAACHRlc3QuY29tAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAIdGVzdC5jb20BAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+OcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+M4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{fKuTn/hgNHmncPoU60wiAKHnSshYHvyr/kVDbKu7L+6UqCwEBVqsBSL8QhgbpdF4UsTqJhv4FoJiIsI+mtYKDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('db398eb4ae89756325643cad21c94e13bfc074b323ee83e141bf701a5d904f1b', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:37:45.10044', '2019-06-03 16:37:45.10044', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEAYjQcPT2G5hqnBmgGGeg9J8l4c1EnUlxklElH9sqZr0971F6OLWfe/m4kpFtI+sI0i1qLit5A0JyWnbhYLW5oD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{GI0HD09huYapwZoBhnoPSfJeHNRJ1JcZJRJR/bKma9Pe9Reji1n3v5uJKRbSPrCNItai4reQNCclp24WC1uaAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f97caffab8c16023a37884165cb0b3ff1aa2daf4000fef49d21efc847ddbfbea', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:37:45.10063', '2019-06-03 16:37:45.10063', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBmKpSgvrwKO20XCOfYfXsGEEUtwYaaEfqSu6ymJmlDma+IX6I7IggbUZMocQdZ94IMAfKdQANqXbIO7ysweeMC', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBrUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ZiqUoL68CjttFwjn2H17BhBFLcGGmhH6kruspiZpQ5mviF+iOyIIG1GTKHEHWfeCDAHynUADal2yDu8rMHnjAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('725756b1fbdf83b08127f385efedf0909cc820b6cce71f1c0897d15427cb5add', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-06-03 16:37:45.100739', '2019-06-03 16:37:45.10074', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBj4gBQ/BAbgqf7qOotatgZUHjDlsOtDNdp7alZR5/Fk9fGj+lxEygAZWzY7/LY1Z3SF6c0qs172LhAkkvV8p0M', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAJUC+QAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Y+IAUPwQG4Kn+6jqLWrYGVB4w5bDrQzXae2pWUefxZPXxo/pcRMoAGVs2O/y2NWd0henNKrNe9i4QJJL1fKdDA==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/kahuna-core.sql b/services/horizon/internal/test/scenarios/kahuna-core.sql new file mode 100644 index 0000000000..fb22d18ef7 --- /dev/null +++ b/services/horizon/internal/test/scenarios/kahuna-core.sql @@ -0,0 +1,1040 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accountdata VALUES ('GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 'bmFtZSA=', 'aXRzIGdvdCBzcGFjZXMh', 49); +INSERT INTO accountdata VALUES ('GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 'bmFtZTE=', 'MDAwMA==', 52); + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK', 10000000000, 12884901888, 0, NULL, '', 'AQAAAA==', 0, 3, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB', 9999999600, 17179869188, 1, NULL, '', 'AgICAg==', 0, 6, NULL, NULL, 'AAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAE='); +INSERT INTO accounts VALUES ('GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB', 9959999600, 30064771076, 0, NULL, '', 'AQAAAA==', 0, 8, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 9799999800, 38654705665, 0, NULL, '', 'AQAAAA==', 0, 10, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO', 9499999900, 47244640257, 0, NULL, '', 'AQAAAA==', 0, 12, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCB7FPYGLL6RJ37HKRAYW5TAWMFBGGFGM4IM6ERBCZXI2BZ4OOOX2UAY', 500000000, 51539607552, 0, NULL, '', 'AQAAAA==', 0, 12, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C', 10099999900, 55834574849, 1, NULL, '', 'AQAAAA==', 0, 14, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG', 9899999800, 55834574850, 0, NULL, '', 'AQAAAA==', 0, 15, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP', 9999999900, 68719476737, 1, NULL, '', 'AQAAAA==', 0, 17, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD', 8999999700, 68719476739, 1, NULL, '', 'AQAAAA==', 0, 20, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 10999999700, 68719476739, 1, NULL, '', 'AQAAAA==', 0, 20, 0, 2000000000, NULL); +INSERT INTO accounts VALUES ('GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD', 10199999900, 90194313217, 1, NULL, '', 'AQAAAA==', 0, 24, 100000000, 0, NULL); +INSERT INTO accounts VALUES ('GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC', 9799999800, 90194313218, 1, NULL, '', 'AQAAAA==', 0, 24, 0, 0, NULL); +INSERT INTO accounts VALUES ('GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q', 9999999800, 107374182402, 2, NULL, '', 'AQAAAA==', 0, 26, 2000000000, 2000000000, NULL); +INSERT INTO accounts VALUES ('GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 9999998800, 115964117004, 0, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 'ZXhhbXBsZS5jb20=', 'AgACAg==', 0, 32, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG', 9999999600, 141733920772, 0, NULL, '', 'AQAAAA==', 0, 37, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG', 9999999800, 163208757250, 2, NULL, '', 'AQAAAA==', 0, 40, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 9999999600, 163208757252, 0, NULL, '', 'AQAAAA==', 3, 42, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS', 200038144200001319, 193273528321, 0, 'GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS', '', 'AQAAAA==', 0, 47, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 9999999400, 206158430214, 2, NULL, '', 'AQAAAA==', 0, 52, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X', 10000000000, 227633266688, 0, NULL, '', 'AQAAAA==', 0, 54, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y', 9999999900, 236223201281, 0, NULL, '', 'AQAAAA==', 0, 56, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 800152367009533292, 26, 0, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', '', 'AQAAAA==', 0, 57, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN', 9999999600, 300000000003, 0, NULL, '', 'AQAAAA==', 0, 61, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('fd9268d16d7501b6eb7956b6756b28fe43ce5a6010b76e776d91f60e5eed7b2a', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'eff8bb6dff2733ff1f3ffa5141f34ae7571ee3d8cae6dbd129bac511fa0bfd64', 2, 1559579728, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMUAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('341ff9c498b9076f8f21aea78a33bc0c639ab5cf986adbc2d8d04d92b5a9d6df', 'fd9268d16d7501b6eb7956b6756b28fe43ce5a6010b76e776d91f60e5eed7b2a', '1900a0f9535471372f28be3fb83a3338ac80743ff0b7710fa108b02b338f60f8', 3, 1559579729, 'AAAAC/2SaNFtdQG263lWtnVrKP5DzlpgELdud22R9g5e7XsqDZGQBE1YsotIrRXkw7x6/cKPd2zOCBuz7H2RlRoSGYEAAAAAXPVMUQAAAAAAAAAA5PnvEvMOE8kyDs3Gbu1hou8cmoww1I7xqYGObt0Zo0oZAKD5U1RxNy8ovj+4OjM4rIB0P/C3cQ+hCLArM49g+AAAAAMN4Lazp2QAAAAAAAAAAABkAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('c26019112e7d1a307b7b6a843ad56fd73b51fbdc2e635fe7b0fcc8bae46e9417', '341ff9c498b9076f8f21aea78a33bc0c639ab5cf986adbc2d8d04d92b5a9d6df', 'fe15fe837e09f81504c6257977b3ec41ab2067faebc43c4630aab3d3acf13f54', 4, 1559579730, 'AAAACzQf+cSYuQdvjyGup4ozvAxjmrXPmGrbwtjQTZK1qdbfS+Xef4FXXIT2kc1XcfznO1AYP9JNuDCI1JFXHpnU9IsAAAAAXPVMUgAAAAAAAAAAI8FeHEzJ0/4FO8/NG/43wTEny1CW/vcH2jcrQkzbbcj+Ff6Dfgn4FQTGJXl3s+xBqyBn+uvEPEYwqrPTrPE/VAAAAAQN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f5f9c1fc700571da5df0d6e0ecedab78fe81e6f66054e4352f97a3f0cf3d5f37', 'c26019112e7d1a307b7b6a843ad56fd73b51fbdc2e635fe7b0fcc8bae46e9417', '97eda606d33dd7985c319abdf83ebc61572f124739543068e14cb59ede2fa524', 5, 1559579731, 'AAAAC8JgGREufRowe3tqhDrVb9c7UfvcLmNf57D8yLrkbpQXyz9iY2rHhwuUvOFuLE6mBvTO46hI+XxK2J5c1tuLsiUAAAAAXPVMUwAAAAAAAAAAk6vO/KRxwZ0ynJMMrKUpOSf5WhbdkNk0LIO/zZxB+qeX7aYG0z3XmFwxmr34PrxhVy8SRzlUMGjhTLWe3i+lJAAAAAUN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e56b9392f09492f4084789a6f7eea3274d6991c918d95096009e985fbe6c0d09', 'f5f9c1fc700571da5df0d6e0ecedab78fe81e6f66054e4352f97a3f0cf3d5f37', '705dc9ee1d4d6e5291aa4d4b5b23fc993f5c98bd0127880b8c7f6862c226ad4c', 6, 1559579732, 'AAAAC/X5wfxwBXHaXfDW4Oztq3j+geb2YFTkNS+Xo/DPPV83Ny97k/0F2AWH2PP6jBxodIXMgjmRLJwhDtO8tQZgf8sAAAAAXPVMVAAAAAAAAAAAN8Q2AtCCsel4HIsK4udqXyLQqcraGjcAHVSUN815grFwXcnuHU1uUpGqTUtbI/yZP1yYvQEniAuMf2hiwiatTAAAAAYN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('0cba5d6486b080c20944d8e2078da05854eb4d68b0c44a7a70f06dfbb28c0809', 'e56b9392f09492f4084789a6f7eea3274d6991c918d95096009e985fbe6c0d09', '98dfbf2b3e2b10fcf5912dacf130294bc3c2f652421df68eb8788a6569593cd6', 7, 1559579733, 'AAAAC+Vrk5LwlJL0CEeJpvfuoydNaZHJGNlQlgCemF++bA0JrU2Bn4aTjYMLpDfffI/e6HD3GSRCEJo4ArYti94OWqYAAAAAXPVMVQAAAAAAAAAAk5HOQp3TCIyOUj8VmNGrhs2C+IXj942pn3qPVIAXXx6Y378rPisQ/PWRLazxMClLw8L2UkId9o64eIplaVk81gAAAAcN4Lazp2QAAAAAAAAAAAK8AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('6afd133af73ffbd7ed56c265d6d3b7e374b0d17051cd1cf8e9edb76befcdb47b', '0cba5d6486b080c20944d8e2078da05854eb4d68b0c44a7a70f06dfbb28c0809', '34873390d2d130a1dd162860f990ce8bc151cb29b20ecc199bffd488c04a3807', 8, 1559579734, 'AAAACwy6XWSGsIDCCUTY4geNoFhU601osMRKenDwbfuyjAgJZ/gVHh4EGoRXjxBlJXgdb6o55e5LPB8/rItX+iBMmg0AAAAAXPVMVgAAAAAAAAAA5fh0DRZ+OfeA4iH3GTMbG6cqJGEx0qC57216HbTwQPU0hzOQ0tEwod0WKGD5kM6LwVHLKbIOzBmb/9SIwEo4BwAAAAgN4Lazp2QAAAAAAAAAAARMAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('69f27e72b97f7cb7bfd7f7cd0012b7ba080d3dec181688848626b1e1bad25ea5', '6afd133af73ffbd7ed56c265d6d3b7e374b0d17051cd1cf8e9edb76befcdb47b', 'c60305f9cb41555048c377960389bef087f6cd4b167ad3a60cd03e4ff6f8a04a', 9, 1559579735, 'AAAAC2r9Ezr3P/vX7VbCZdbTt+N0sNFwUc0c+Ontt2vvzbR7I1aOU+Htzv1YyoOW+NUXosjEw6RZFhwtO1PMnARAtukAAAAAXPVMVwAAAAAAAAAAbXQLWug2IZ2G4RnwUrCTvIwO6TMpu21+S2UXhqLKWTHGAwX5y0FVUEjDd5YDib7wh/bNSxZ606YM0D5P9vigSgAAAAkN4Lazp2QAAAAAAAAAAASwAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('62ad4b3aabc2a93e094118968e6114d1e15af1dfb753f5b7048c01be5fb3e9b9', '69f27e72b97f7cb7bfd7f7cd0012b7ba080d3dec181688848626b1e1bad25ea5', '1cd65c4dda5a48f127a0198eadbc06aca6ee1d1f198b55dac49234a5218bdf72', 10, 1559579736, 'AAAAC2nyfnK5f3y3v9f3zQASt7oIDT3sGBaIhIYmseG60l6lp9U56RSocR4ZpXXZs/glcnGVHESVFFU6LiK9fweLLYkAAAAAXPVMWAAAAAAAAAAA9+jjsWK6v6g0OYMFxTo1+Yogi2yDSjXhJ86N1AxJOvEc1lxN2lpI8SegGY6tvAaspu4dHxmLVdrEkjSlIYvfcgAAAAoN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('5e5e893b0f9078a77603f60689405d24d24247d0e19b00f815d212befb819656', '62ad4b3aabc2a93e094118968e6114d1e15af1dfb753f5b7048c01be5fb3e9b9', 'add66e020da6db1550b0c5a4f5548bc2e160bfea7ea6aeb392be8fce6f99a4d5', 11, 1559579737, 'AAAAC2KtSzqrwqk+CUEYlo5hFNHhWvHft1P1twSMAb5fs+m577MF5b1Iei3zHF9EAmsLT84l7YbAUSH/7F/wymRq98QAAAAAXPVMWQAAAAAAAAAApIxpaTPANJS3XftGXFSYrEJ0MhfN6GN1Oe5+YEP609+t1m4CDabbFVCwxaT1VIvC4WC/6n6mrrOSvo/Ob5mk1QAAAAsN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('039d9303c20c9a58082f15f4875e84c1f0ce4dc20ea237866c59d597cc349a1d', '5e5e893b0f9078a77603f60689405d24d24247d0e19b00f815d212befb819656', 'c34f356a68bf9dc06aedd63092a99edd90f7940fdf1a48a2d93757501d8a1085', 12, 1559579738, 'AAAAC15eiTsPkHindgP2BolAXSTSQkfQ4ZsA+BXSEr77gZZWFSHyciAPeIm0Aaehq+XrRbfBf2YWt2omzfQnMSnMapcAAAAAXPVMWgAAAAAAAAAAc4P3o+BJiJvXXIfj/eVQPF1RRIrnE2nfX0a4J7WFySrDTzVqaL+dwGrt1jCSqZ7dkPeUD98aSKLZN1dQHYoQhQAAAAwN4Lazp2QAAAAAAAAAAAZAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('0c14a465f60001303a566fec15f2cbcda27abc34e9c8b51b88954a2f9b30f6b1', '039d9303c20c9a58082f15f4875e84c1f0ce4dc20ea237866c59d597cc349a1d', '2df8658cf54b4476b9189a6239547628b93df33e56292c23b296a8ead9c891f6', 13, 1559579739, 'AAAACwOdkwPCDJpYCC8V9IdehMHwzk3CDqI3hmxZ1ZfMNJodFfvdztxrYHP9xzdqb/9pcKohIXiTntWz+SxP2oy5B3sAAAAAXPVMWwAAAAAAAAAAFkWUNyF+w67R5n+HgDNebl5snV2YlBsz3xLTqBMQsQEt+GWM9UtEdrkYmmI5VHYouT3zPlYpLCOylqjq2ciR9gAAAA0N4Lazp2QAAAAAAAAAAAcIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('a64af1c08fb5d28abcd1fc9dae8ccb1bfc9bbec1b7b477e14fca2cbadca4907c', '0c14a465f60001303a566fec15f2cbcda27abc34e9c8b51b88954a2f9b30f6b1', 'dddbba0eefe5564841bcafc7dc38560af24dd97699546f321d7c8c6435a05a29', 14, 1559579740, 'AAAACwwUpGX2AAEwOlZv7BXyy82ierw06ci1G4iVSi+bMPaxv5ctYdcMSEm83sSvokp32C1S/AIgbbctZWaWvUxN2AMAAAAAXPVMXAAAAAAAAAAAl8eMbvLJtOBoTCvorlEKxO6gpnsCesjn+1C1/HG0v1rd27oO7+VWSEG8r8fcOFYK8k3ZdplUbzIdfIxkNaBaKQAAAA4N4Lazp2QAAAAAAAAAAAfQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('8a29232992d69bb61f457c768a745b6fb2d81dadb8d6780f4960b9ee382c9ad5', 'a64af1c08fb5d28abcd1fc9dae8ccb1bfc9bbec1b7b477e14fca2cbadca4907c', '870919158a705ca5e830e5b86f60ca702766dd7de6fdde5d5b4813d55038f0ae', 15, 1559579741, 'AAAAC6ZK8cCPtdKKvNH8na6Myxv8m77Bt7R34U/KLLrcpJB8d2gda9aXjRx0ma+uG5InDPEEWb37fjZyNQyPwPiQEvEAAAAAXPVMXQAAAAAAAAAAUsjUlWJzXzXDGEu7ZX8NM4G3hXMJgTqm1o4qvEMfb6GHCRkVinBcpegw5bhvYMpwJ2bdfeb93l1bSBPVUDjwrgAAAA8N4Lazp2QAAAAAAAAAAAg0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('6f4b7c957a42a32dd830d43212d5bbafeb9221538e4010a423455330ddb09b2b', '8a29232992d69bb61f457c768a745b6fb2d81dadb8d6780f4960b9ee382c9ad5', 'e5298f508390802317515dbb3f5dc56711bdc96097ff0e4f4ad0d3285b93bc4f', 16, 1559579742, 'AAAAC4opIymS1pu2H0V8dop0W2+y2B2tuNZ4D0lgue44LJrVY1gAUWBGwQQPMqY6PP8FFyppkHosKR4TD1wsYd//I+cAAAAAXPVMXgAAAAAAAAAAYKou7BkPtQtmD4O54/TToUgBAVUCmCl/O+9Tgq2hNHzlKY9Qg5CAIxdRXbs/XcVnEb3JYJf/Dk9K0NMoW5O8TwAAABAN4Lazp2QAAAAAAAAAAAlgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('8aff00265fb98412902df9d5c30e194a01adbd792490ef58836478db9d8e3156', '6f4b7c957a42a32dd830d43212d5bbafeb9221538e4010a423455330ddb09b2b', 'df8cf2da1111832eb602eed658551ee74d34d4ed39f4e0d7374ceb35de0b02d7', 17, 1559579743, 'AAAAC29LfJV6QqMt2DDUMhLVu6/rkiFTjkAQpCNFUzDdsJsrycO3J/Hp2cEvr433GPeq3XtJHPV8+zXdxLRRUx16QhEAAAAAXPVMXwAAAAAAAAAAqHWOFRdWDweK4ZIEBYTYOkzwi2q8iIoB9xFzAOqPS/ffjPLaERGDLrYC7tZYVR7nTTTU7Tn04Nc3TOs13gsC1wAAABEN4Lazp2QAAAAAAAAAAAooAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e6f54ea5dcfa926f5e46dd056690fb031dc392717dc0fbac0b2f411e84965210', '8aff00265fb98412902df9d5c30e194a01adbd792490ef58836478db9d8e3156', '6c7ec875d32a5eadb9887ca51ac28a0f60da8dfd00768cae2abb6b405feb486b', 18, 1559579744, 'AAAAC4r/ACZfuYQSkC351cMOGUoBrb15JJDvWINkeNudjjFWp7Hi+kDBELcUYlO/VeJpvL6v5oIQgjfBH9t2VsYU0vYAAAAAXPVMYAAAAAAAAAAANKxyAMoyM/gn59hBYzLxbU28Wbix3QHy7s086vpSY4dsfsh10yperbmIfKUawooPYNqN/QB2jK4qu2tAX+tIawAAABIN4Lazp2QAAAAAAAAAAAtUAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('ce1cd7aab0e22a1e81c89bd171d0bda5d8dec28824143025f6f0d15a1d7acc18', 'e6f54ea5dcfa926f5e46dd056690fb031dc392717dc0fbac0b2f411e84965210', 'e9245b4c9879a9b36700c68424feacdff10a0d381881206004ebc787f7147269', 19, 1559579745, 'AAAAC+b1TqXc+pJvXkbdBWaQ+wMdw5JxfcD7rAsvQR6EllIQaY9SIsx+2mzlbk+f9SEovenpLo/LJyUPh6A+g+VF8p4AAAAAXPVMYQAAAAAAAAAAYzC64fmWjZOtqE4JjsKqwerdnzN6ywnJSvdsDWFPcu3pJFtMmHmps2cAxoQk/qzf8QoNOBiBIGAE68eH9xRyaQAAABMN4Lazp2QAAAAAAAAAAAu4AAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e0c98a06e3dc47183a4803f947cd00afe37d8b94dc34de429add40fa7c19c618', 'ce1cd7aab0e22a1e81c89bd171d0bda5d8dec28824143025f6f0d15a1d7acc18', 'bf0a1ca276a7ec7bf6b7ec589f2c09432be12e437c46c9aa824b930b9164145e', 20, 1559579746, 'AAAAC84c16qw4ioegcib0XHQvaXY3sKIJBQwJfbw0VodeswY+fowsLcKz7J1dE35gKnYRMa2zA3cqfuoNUM1feiZPu8AAAAAXPVMYgAAAAAAAAAAx/hxaEW2XNJB3iDf23eYfGcwcQ862pSt7OJ6FF108Ia/Chyidqfse/a37FifLAlDK+EuQ3xGyaqCS5MLkWQUXgAAABQN4Lazp2QAAAAAAAAAAAwcAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('1e73266b9c1d6ed85ed20270b1c9c4081445d5f201189bebb5721c9b2565f749', 'e0c98a06e3dc47183a4803f947cd00afe37d8b94dc34de429add40fa7c19c618', '9051d68a3df017621caf933290284f9361b87774c8fc717d20b5fd3e3a84edbb', 21, 1559579747, 'AAAAC+DJigbj3EcYOkgD+UfNAK/jfYuU3DTeQprdQPp8GcYYFo7+Glas2jipoGla9vrLLK0ooAiMI5GQeOkLV/uj9cIAAAAAXPVMYwAAAAAAAAAA3LAnNjSwmLo474ojoHgysfkXmeeBIXdgWbyOJwbnsx+QUdaKPfAXYhyvkzKQKE+TYbh3dMj8cX0gtf0+OoTtuwAAABUN4Lazp2QAAAAAAAAAAAzkAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b8d81a94cc9ac1dcd4975e5bae0680a746f41ab923de9b68650422039f8df47a', '1e73266b9c1d6ed85ed20270b1c9c4081445d5f201189bebb5721c9b2565f749', '7468f15db9522c27e9dfcfd20d0894abe7c34e3f13cd413299a934d8cd2e7e3b', 22, 1559579748, 'AAAACx5zJmucHW7YXtICcLHJxAgURdXyARib67VyHJslZfdJ4DzzvVfPje5vC6cghCFfrZwMHMEDMF1phwp/kIju/78AAAAAXPVMZAAAAAAAAAAA0AMiyT4CyDLLvayS0AmP97hen9j1rkIsoYeN0iQrjup0aPFduVIsJ+nfz9INCJSr58NOPxPNQTKZqTTYzS5+OwAAABYN4Lazp2QAAAAAAAAAAA1IAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('d09c2755ea31109767d6ad6ad7155cff1b630edb6368a4b280bb68de24c2ebb4', 'b8d81a94cc9ac1dcd4975e5bae0680a746f41ab923de9b68650422039f8df47a', '2c50ea426feb0e6dca6cccfb4ff7e1f2d0c1e6cf1869cc03ffb13013710a672a', 23, 1559579749, 'AAAAC7jYGpTMmsHc1JdeW64GgKdG9Bq5I96baGUEIgOfjfR6OPg5AoiFUgt3GG0WOJr4C3JaQHm7MPBQisdC4hkzxS8AAAAAXPVMZQAAAAAAAAAAeu6ZLz++eOyW3/HPY616v+oF2X756WCT8G93ApdHifosUOpCb+sObcpszPtP9+Hy0MHmzxhpzAP/sTATcQpnKgAAABcN4Lazp2QAAAAAAAAAAA2sAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('d71b122bfb10e1193597acf4769f99b716e79e7d1eaccf225adf5fd65cbeee37', 'd09c2755ea31109767d6ad6ad7155cff1b630edb6368a4b280bb68de24c2ebb4', '7729f75e8aa26acb285f93eee6c16501d2b6dbbedeb2eb8d212ff6e7a0f319f6', 24, 1559579750, 'AAAAC9CcJ1XqMRCXZ9atatcVXP8bYw7bY2iksoC7aN4kwuu03hM6c6SywARSnPb3ctF6foVdbgyxhp/R4/s+WE+te5AAAAAAXPVMZgAAAAAAAAAA1ziHajubDrj1Iu5EQ8YZB24Czm29AJPdaJnsX3+A+fV3KfdeiqJqyyhfk+7mwWUB0rbbvt6y640hL/bnoPMZ9gAAABgN4Lazp2QAAAAAAAAAAA4QAAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f54c0097d441ff76c487d4ebb18f5e5f52c5a51ca96800bf9b818295790a3778', 'd71b122bfb10e1193597acf4769f99b716e79e7d1eaccf225adf5fd65cbeee37', 'ec5881346595c227bd8cb1affa52b51d4c393e9aa34cab629e6552818ddb4c6f', 25, 1559579751, 'AAAAC9cbEiv7EOEZNZes9HafmbcW5559HqzPIlrfX9Zcvu43C5djYweCkfubeYiyrAvxlPmuUHgxbk+pkX450lbiUBIAAAAAXPVMZwAAAAAAAAAAM9gLVDvjwMuVZ+njRjvvMxlhodRg/VJpuapiCb0y9SDsWIE0ZZXCJ72Msa/6UrUdTDk+mqNMq2KeZVKBjdtMbwAAABkN4Lazp2QAAAAAAAAAAA50AAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('925e48fe8c319e23b85014ddf6e3698dbaeb15bd84e0e4435614f3125d847eaf', 'f54c0097d441ff76c487d4ebb18f5e5f52c5a51ca96800bf9b818295790a3778', '81975999810da40603a50c2df23389355c54a483c9ed79524ca7f2a1a8ec96db', 26, 1559579752, 'AAAAC/VMAJfUQf92xIfU67GPXl9SxaUcqWgAv5uBgpV5Cjd4ObBqces8zXZAjfLGaR+Md0gHwPV4L+NudPgSQJdsE58AAAAAXPVMaAAAAAAAAAAAGTKcjd5tsclF3P+ptDiMdUWp83tuYF+zlADp+X+gt+eBl1mZgQ2kBgOlDC3yM4k1XFSkg8nteVJMp/KhqOyW2wAAABoN4Lazp2QAAAAAAAAAAA88AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('902b0f5e1ee506b9f9d688e464673bda2faa056876cdfe0ac250ef5c9df0a8a6', '925e48fe8c319e23b85014ddf6e3698dbaeb15bd84e0e4435614f3125d847eaf', '13314f51e696c5401fe9844a7188689c4e4941117512416f17aed5741fd04df8', 27, 1559579753, 'AAAAC5JeSP6MMZ4juFAU3fbjaY266xW9hODkQ1YU8xJdhH6vQydJIXEAs1uOD6IkUMs8GlnyPDTxRqoRKW4/wgZZaNwAAAAAXPVMaQAAAAAAAAAAkpeXdNXfmh1EnUjCYIMebdzV8PlgCe6J6eJq8Mb4SXYTMU9R5pbFQB/phEpxiGicTklBEXUSQW8XrtV0H9BN+AAAABsN4Lazp2QAAAAAAAAAAA+gAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('c14459522fe42e2d10b29a370bbeba513e4aee429282dbe44842835d1b443663', '902b0f5e1ee506b9f9d688e464673bda2faa056876cdfe0ac250ef5c9df0a8a6', '5bb65b22580371a846775012b622cb9814ce1aebcace450c2e75e66c04ad6e55', 28, 1559579754, 'AAAAC5ArD14e5Qa5+daI5GRnO9ovqgVods3+CsJQ71yd8KimnNDwLYHe7gIHYMyxYiWPL6AdpOmOGCRINmPcEvmBn5oAAAAAXPVMagAAAAAAAAAAdQ/T4yJNKn9hiJoTuxDsNbd9ibRWf2hNHJhsiqBKWD1btlsiWANxqEZ3UBK2IsuYFM4a68rORQwudeZsBK1uVQAAABwN4Lazp2QAAAAAAAAAABJcAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('936c1e1f0bf1af96380eb09e59e521d5e3ddd370d21ff4fd1a09ab123a4cfbaa', 'c14459522fe42e2d10b29a370bbeba513e4aee429282dbe44842835d1b443663', '221d4cc449bee6c5681beab0fd8be6ef61a7638713111bf7b1a035a9f3fa1433', 29, 1559579755, 'AAAAC8FEWVIv5C4tELKaNwu+ulE+Su5CkoLb5EhCg10bRDZjMHDsmJoTpyfMr0n7ZRj1PtVYciwS5F25/ebV5KmPHyIAAAAAXPVMawAAAAAAAAAAbaCD4RbfEqSrI+qpIyzzUU+bYYMJsRw3pHcmHwQrvaAiHUzESb7mxWgb6rD9i+bvYadjhxMRG/exoDWp8/oUMwAAAB0N4Lazp2QAAAAAAAAAABLAAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('750fedf881913e508427d7c6f1a31f032cffd8b513eab2fca3c482743c92e16c', '936c1e1f0bf1af96380eb09e59e521d5e3ddd370d21ff4fd1a09ab123a4cfbaa', '91ceaa567e0bf1ee77f9f3b7718c961536681195f7c9133583060c217c99a053', 30, 1559579756, 'AAAAC5NsHh8L8a+WOA6wnlnlIdXj3dNw0h/0/RoJqxI6TPuqkzOXz5mf2LgYnaoAtXxNZLk+UfT8YR92Qb9Z7YPbL3MAAAAAXPVMbAAAAAAAAAAA1jXLaoK/IOqKNVkib2cNLrjlETHXpZlcJ4BFYO8bM3mRzqpWfgvx7nf587dxjJYVNmgRlffJEzWDBgwhfJmgUwAAAB4N4Lazp2QAAAAAAAAAABMkAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('fe13366c4c53c04a65ec45e5dd7540d332a8c38456f71a46b8c197a8f61614b3', '750fedf881913e508427d7c6f1a31f032cffd8b513eab2fca3c482743c92e16c', '0c29620dbd4ce40e210d9f3300b3596f1fc12211625a470ac946f37462e46fcc', 31, 1559579757, 'AAAAC3UP7fiBkT5QhCfXxvGjHwMs/9i1E+qy/KPEgnQ8kuFscIqVmjyvqIlbFdmmCscTHwuF1xf/128KmopBLMQWNqwAAAAAXPVMbQAAAAAAAAAA8GmZ2mhGWPCcWAd1pU9SBJXfejtD7FOEoLD6oFsTOv4MKWINvUzkDiENnzMAs1lvH8EiEWJaRwrJRvN0YuRvzAAAAB8N4Lazp2QAAAAAAAAAABOIAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('05e77cb96060cb1b543745f88fe36e3246b45b26a9ee597f194fdfa332915ac6', 'fe13366c4c53c04a65ec45e5dd7540d332a8c38456f71a46b8c197a8f61614b3', 'cda986d97a35a6bf0478e055503db30876ed9b8c7ea45f7209ec2862e189a0ba', 32, 1559579758, 'AAAAC/4TNmxMU8BKZexF5d11QNMyqMOEVvcaRrjBl6j2FhSzbdxxbfFDV+si6AGRvL5s+oVcsY8ZwkF5kL9EBypi7k8AAAAAXPVMbgAAAAAAAAAA2zD57d2oUw2L6murtm/oe2Dj6FuzBXnVm7l7HoDsMUzNqYbZejWmvwR44FVQPbMIdu2bjH6kX3IJ7Chi4YmgugAAACAN4Lazp2QAAAAAAAAAABRQAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('5273d226f3ad944db78e740d2ef1736845197a098f8c22c2a1a412838d2f2770', '05e77cb96060cb1b543745f88fe36e3246b45b26a9ee597f194fdfa332915ac6', '0876355c1dbdd5261400b6e9cd3e386986449f87e864e24701c24e2d7cff62eb', 33, 1559579759, 'AAAACwXnfLlgYMsbVDdF+I/jbjJGtFsmqe5ZfxlP36MykVrGueMGwKUy3zdCJEbcnLnxoZEU3V59aYr7+qbfMi0DwngAAAAAXPVMbwAAAAAAAAAAHdP8tBBtpeyL899tLPnNdaqUoHRWmHKjK8mobr412wEIdjVcHb3VJhQAtunNPjhphkSfh+hk4kcBwk4tfP9i6wAAACEN4Lazp2QAAAAAAAAAABS0AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e0905bd61697815e7dd80a6f7ad8e83077a5304ab0681825e5a820bfbcc12dbd', '5273d226f3ad944db78e740d2ef1736845197a098f8c22c2a1a412838d2f2770', '7466ac5c732c9dc0a8a0b9525d6c819910648c67df5440b5bd448c8fa1c7224c', 34, 1559579760, 'AAAAC1Jz0ibzrZRNt450DS7xc2hFGXoJj4wiwqGkEoONLydwytNJJuK4uRpBqNE6DA/twCtsyMwX/BNjTK2j1A2cLpIAAAAAXPVMcAAAAAAAAAAA/bE+PeQRbcMvSIfQyaXSWXEgzB7abwreNVQeYYIhwiZ0ZqxccyydwKiguVJdbIGZEGSMZ99UQLW9RIyPocciTAAAACIN4Lazp2QAAAAAAAAAABUYAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f090655f0d5cee583227d0088cb7af2b780f6fd91e836a4117abb11d479b5ce1', 'e0905bd61697815e7dd80a6f7ad8e83077a5304ab0681825e5a820bfbcc12dbd', '31413b48347e547a0a5d3b4da4668cce7ecebf8d8445a710a9620113cfa49eee', 35, 1559579761, 'AAAAC+CQW9YWl4FefdgKb3rY6DB3pTBKsGgYJeWoIL+8wS29TrWUd6O7M4S0fe8UEcdhlcmYcyd0+obgadQjYZA/UK4AAAAAXPVMcQAAAAAAAAAAU3lWP9fTSJmRnN7PpNBaZaYziGh9Xk2I2lyAgYUdNswxQTtINH5UegpdO02kZozOfs6/jYRFpxCpYgETz6Se7gAAACMN4Lazp2QAAAAAAAAAABV8AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f965f5e76e2df794975e7245831b95b86ecc9711f7b7fb144df9bcac3c8a7a2a', 'f090655f0d5cee583227d0088cb7af2b780f6fd91e836a4117abb11d479b5ce1', '27c5f1168e56c378875d0d7782293efd8e8e1fc32574ca63c0cfaff503de05f4', 36, 1559579762, 'AAAAC/CQZV8NXO5YMifQCIy3ryt4D2/ZHoNqQRersR1Hm1zht40cpv6aCNmZXclj0Gp1g8/QTb+rEsz7O6QRevEp7pkAAAAAXPVMcgAAAAAAAAAADx7u3Tt890DDCAY2PUo3lKiU3M3nylcrqmvMlFLS+fEnxfEWjlbDeIddDXeCKT79jo4fwyV0ymPAz6/1A94F9AAAACQN4Lazp2QAAAAAAAAAABXgAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f39961e7de53003d9a878277343091c60e1eadefba33202be08334cae6787bef', 'f965f5e76e2df794975e7245831b95b86ecc9711f7b7fb144df9bcac3c8a7a2a', '8862875b350193180b0b4f7c1516ccee401c973693cadac43cd202a0028e91e1', 37, 1559579763, 'AAAAC/ll9eduLfeUl15yRYMblbhuzJcR97f7FE35vKw8inoqjeXfs5RaC/pSbMQ0UkNgOHq6KFpFmq5/xPhha/qIrikAAAAAXPVMcwAAAAAAAAAAPORbuvHNNFfsSSmddqmUP8Fk8iW9Jb1yQL/uEyYGc2uIYodbNQGTGAsLT3wVFszuQByXNpPK2sQ80gKgAo6R4QAAACUN4Lazp2QAAAAAAAAAABZEAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b8296a81cb3c468c47f100662acb337e168861732ce211ddda30218098e806ce', 'f39961e7de53003d9a878277343091c60e1eadefba33202be08334cae6787bef', 'c46e2a74c8bbd647c67bba970f36ba835de2ef571662cc1e34862da42228679e', 38, 1559579764, 'AAAAC/OZYefeUwA9moeCdzQwkcYOHq3vujMgK+CDNMrmeHvvvdOMXcZQv2fCMQnVMUfKBJ7XSFWbgB/wE3Bs7er87uAAAAAAXPVMdAAAAAAAAAAAN/+dCsXSVcipJLX5fSVl/dtD8pFe+iZnfnNZ2g7vXj/Ebip0yLvWR8Z7upcPNrqDXeLvVxZizB40hi2kIihnngAAACYN4Lazp2QAAAAAAAAAABcMAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('bc3a5a953848fe2cb46ed75ecebe6ba09c856d47d6fea025223858d8dc7aad95', 'b8296a81cb3c468c47f100662acb337e168861732ce211ddda30218098e806ce', 'c4d538fb46a0f5ff443af87d2e3243153b0596ad314d7f763c715cfda23089d5', 39, 1559579765, 'AAAAC7gpaoHLPEaMR/EAZirLM34WiGFzLOIR3dowIYCY6AbOHivLxyP8I+6k4JpB4A3PxOKsREZUMnzYBoAng8ZuENAAAAAAXPVMdQAAAAAAAAAASeHBruDgNPJb9kfcJVMyCg6meb0kov5dW9Ok8EEep2bE1Tj7RqD1/0Q6+H0uMkMVOwWWrTFNf3Y8cVz9ojCJ1QAAACcN4Lazp2QAAAAAAAAAABdwAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('a6f5bc74da946970949cc8a58b7fc0ca52a9627fa1a45fccf567a6ad2f311a90', 'bc3a5a953848fe2cb46ed75ecebe6ba09c856d47d6fea025223858d8dc7aad95', '74537b293500bc9b94156a5d8c5b6ed0a3c3b00409c7ba1f3011628c858b4880', 40, 1559579766, 'AAAAC7w6WpU4SP4stG7XXs6+a6CchW1H1v6gJSI4WNjceq2V/inIeGJFrV1RVU+xprIZRH8KVaI74l/TAt+/rJeHJ50AAAAAXPVMdgAAAAAAAAAAWJlo97rfQjjINuAEkeCOb3Rwl6NYUk95Xa63Nc0UCv10U3spNQC8m5QVal2MW27Qo8OwBAnHuh8wEWKMhYtIgAAAACgN4Lazp2QAAAAAAAAAABg4AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('22b06b409446c1f0c278af0ce15ad99657a0ff2196a5bfeded56d69d36779c9d', 'a6f5bc74da946970949cc8a58b7fc0ca52a9627fa1a45fccf567a6ad2f311a90', '34ed31eca04b01effc3c4508c9e349693392b511ba02793f32fe3ffffe3dc06a', 41, 1559579767, 'AAAAC6b1vHTalGlwlJzIpYt/wMpSqWJ/oaRfzPVnpq0vMRqQKNyYbOwL7RbiajIIn9wo1XIqxrxKXhScSJZushgn1U8AAAAAXPVMdwAAAAAAAAAA3texHWkeXlsh8yl52JzE1M7p/8uzk36DdlbvwdqWw1Y07THsoEsB7/w8RQjJ40lpM5K1EboCeT8y/j///j3AagAAACkN4Lazp2QAAAAAAAAAABkAAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('a96cbf9f3c2d3c56bd14b4050246a5e39e5920a201e9d66c40bc5aa68782fe53', '22b06b409446c1f0c278af0ce15ad99657a0ff2196a5bfeded56d69d36779c9d', '3e57af2a9c2685144c782b582859add10c7f0866c63b8b605759208c5aace9c6', 42, 1559579768, 'AAAACyKwa0CURsHwwnivDOFa2ZZXoP8hlqW/7e1W1p02d5ydI1zHat+THSiG8FiJYhX2IjqcewMyqp7eILRtLxlqAV0AAAAAXPVMeAAAAAAAAAAA4mQxuIyGkMnUNprxo1u6b3NOKgS/CqRR0k9oMQPLWs8+V68qnCaFFEx4K1goWa3RDH8IZsY7i2BXWSCMWqzpxgAAACoN4Lazp2QAAAAAAAAAABlkAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('4409a871d550650b6a0a630136e80cd259a333ca55203db1548d315f19ef4a5c', 'a96cbf9f3c2d3c56bd14b4050246a5e39e5920a201e9d66c40bc5aa68782fe53', 'b073624b968233a6d621a0d0bb1f438a0de0fda3064093921a9e7ade8ebd0e03', 43, 1559579769, 'AAAAC6lsv588LTxWvRS0BQJGpeOeWSCiAenWbEC8WqaHgv5TZNSyhXDfKEyU6jk0m8nxAE6PCJaGxAoOwqvcASMsUS4AAAAAXPVMeQAAAAAAAAAA9L2wqBam8xx6EMlQ1e/R7JPhksJqFoZDmKFcFBwf3xCwc2JLloIzptYhoNC7H0OKDeD9owZAk5Iannrejr0OAwAAACsN4Lazp2QAAAAAAAAAABnIAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('5eef1067a0fbdb41321e472a1c14fedc553d13539516dea4fdd319d0ca7d2af8', '4409a871d550650b6a0a630136e80cd259a333ca55203db1548d315f19ef4a5c', '7ebdf1512e2e49da20cc5cd4d6a8d9a77edf331c53b8eb86202594fbb9bd2aad', 44, 1559579770, 'AAAAC0QJqHHVUGULagpjATboDNJZozPKVSA9sVSNMV8Z70pcrowUdKRzA/ZRkdeo8Gu2qV4ThU16d9p8N74Hanu5UssAAAAAXPVMegAAAAAAAAAAWWc1pYhVWRoEJa6GcHIsaj1ysxI4CQ+BcJqiGW3wjmJ+vfFRLi5J2iDMXNTWqNmnft8zHFO464YgJZT7ub0qrQAAACwN4Lazp2QAAAAAAAAAABosAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('69840e81fe7e584aad0f22dfa89ee767cd3524497563e471af8af67b53f8d471', '5eef1067a0fbdb41321e472a1c14fedc553d13539516dea4fdd319d0ca7d2af8', '3dd918d7e436cec5884930cfc441cd68a86c3482c15bb6ab0e5c18cb2d5f445c', 45, 1559579771, 'AAAAC17vEGeg+9tBMh5HKhwU/txVPRNTlRbepP3TGdDKfSr4kGCHHnA6KOdoPcSvp6Rxhk2hURlGF1VP2oSVNBkDI9cAAAAAXPVMewAAAAAAAAAA2s+Yd36GUBmFrCn8nxzR3G7G6BlWwDjWIvSuQxGMICY92RjX5DbOxYhJMM/EQc1oqGw0gsFbtqsOXBjLLV9EXAAAAC0N4Lazp2QAAAAAAAAAABqQAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('706d948a65a01559c33264168e913f4b490034942f79aea96f616a03e98297c3', '69840e81fe7e584aad0f22dfa89ee767cd3524497563e471af8af67b53f8d471', 'edc0d91a6dd43eb6065ee371ed5c4b026d0f6058eb915fa2f972f3bc670051a5', 46, 1559579772, 'AAAAC2mEDoH+flhKrQ8i36ie52fNNSRJdWPkca+K9ntT+NRxAgTIPLGOynuQgOXzmXQZrLyGTAGIDWOHJRQJqUfbAiUAAAAAXPVMfAAAAAAAAAAAhkYgTYVFfiNxywZFyKUj6rUeR42SRzNzOnasgV/WuDvtwNkabdQ+tgZe43HtXEsCbQ9gWOuRX6L5cvO8ZwBRpQAAAC4N4Lazp2QAAAAAAAAAABtYAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b49ea0eb1fe629fc2c9df2c976bc9971fed6fd8d257ece738e45c3f8967d1569', '706d948a65a01559c33264168e913f4b490034942f79aea96f616a03e98297c3', 'af317862d7c846c4063bee2e8009312a6b00fcbd792f5cb8c1cde7980e008f4d', 47, 1559579773, 'AAAAC3BtlIploBVZwzJkFo6RP0tJADSUL3muqW9hagPpgpfD1EV61P5oEG1jpv4Ia/HUgGCpKwTQ4ZplHht7o7pA71gAAAAAXPVMfQAAAAAAAAAA8OM7CNjloxWqwlaanmBarRzmaEAjS1RTT7DMBUh/OHivMXhi18hGxAY77i6ACTEqawD8vXkvXLjBzeeYDgCPTQAAAC8N4WQpWNjKAAAAAAAB0O3VAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('41b518eb3573162dba39489d5e79c1c04ee6c6d08c19311b5bf7d9b8f9aa17d6', 'b49ea0eb1fe629fc2c9df2c976bc9971fed6fd8d257ece738e45c3f8967d1569', '2b4e071bee7dd45c41b46aff22baf8715a509925feb83021feb716204172963c', 48, 1559579774, 'AAAAC7SeoOsf5in8LJ3yyXa8mXH+1v2NJX7Oc45Fw/iWfRVp6aDW3LPbWQnkQqAyNZZJlDWaVAL9GmVKlMVbHoaATywAAAAAXPVMfgAAAAAAAAAAQNg4cr4Do7quX33o4rbaXa7+BDdHc8PbTEXBcuQETP8rTgcb7n3UXEG0av8iuvhxWlCZJf64MCH+txYgQXKWPAAAADAN4WQpWNjKAAAAAAAB0O45AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('0dc8610ec649e718d45083b69105153b5689bec67427a72058b9357eeb0aceac', '41b518eb3573162dba39489d5e79c1c04ee6c6d08c19311b5bf7d9b8f9aa17d6', '376e4e35c2daf233fde58219c00159cffe2078ebdf74a88b437e6fad94e58d69', 49, 1559579775, 'AAAAC0G1GOs1cxYtujlInV55wcBO5sbQjBkxG1v32bj5qhfWeVRF+FiW1Qlc+3u1mBcZ84ULRdo3+cTo45YRJDO+PS4AAAAAXPVMfwAAAAAAAAAAsSOLB+Fh7PI1BdmA+nlx5DdeXSAZY7OnyHcuyeMEmJA3bk41wtryM/3lghnAAVnP/iB46990qItDfm+tlOWNaQAAADEN4WQpWNjKAAAAAAAB0O9lAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b1c7d71aeba870b162b022c31adeb26614e63070928647a1b9c6bc51aba83d0d', '0dc8610ec649e718d45083b69105153b5689bec67427a72058b9357eeb0aceac', '2badaa7e31970f28fed695d32c07acebffcf7621cc225e601ce9138de5ceff0d', 50, 1559579776, 'AAAACw3IYQ7GSecY1FCDtpEFFTtWib7GdCenIFi5NX7rCs6si0bTsBbSdDOFoh+y5f8cl3t+mLZfzrhHG/6bxBlR1MwAAAAAXPVMgAAAAAAAAAAAe2zMUWV2ofxxSJUd0hgEpW+dk+M503UNhsqXLKL0TMgrrap+MZcPKP7WldMsB6zr/892IcwiXmAc6RON5c7/DQAAADIN4WQpWNjKAAAAAAAB0O/JAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('90534350f3fea0a6a559de207695e282e898909544823ac5c7000f76abadc08e', 'b1c7d71aeba870b162b022c31adeb26614e63070928647a1b9c6bc51aba83d0d', '81fa97ae34fb1895f78837f050dfec4e419eddfdb57fb672602c4a577e74a6e1', 51, 1559579777, 'AAAAC7HH1xrrqHCxYrAiwxresmYU5jBwkoZHobnGvFGrqD0Nt2vJTHsKyMHdVCH/gs0c3U/eQKqaWReE+4jy2gt+ukwAAAAAXPVMgQAAAAAAAAAAG9Lveo8KOMBsuTXxxarafYQzHKlqwhXWtcRjpmTOc+OB+peuNPsYlfeIN/BQ3+xOQZ7d/bV/tnJgLEpXfnSm4QAAADMN4WQpWNjKAAAAAAAB0PAtAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('bd7b429dbd8f8efd1b56ed702bf183e473315e2f0422db5599548c404cee6066', '90534350f3fea0a6a559de207695e282e898909544823ac5c7000f76abadc08e', 'd9ceea405e2cdf0579a1f7d9c74eeed6e47d73be99164b6085d6da8110ac2e09', 52, 1559579778, 'AAAAC5BTQ1Dz/qCmpVneIHaV4oLomJCVRII6xccAD3arrcCOzeFEa2j6tGG8gbM/1MR8aABY/PxuO9Zz2s9mxGnLVoQAAAAAXPVMggAAAAAAAAAAdFs0uvdku2e14ZLZNY/inkD0IgG9m+9h08GbknZs9EzZzupAXizfBXmh99nHTu7W5H1zvpkWS2CF1tqBEKwuCQAAADQN4WQpWNjKAAAAAAAB0PCRAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('48645ad8430e5952553cd69569a5711bac93461b1c9c030b60b062f4853b6b14', 'bd7b429dbd8f8efd1b56ed702bf183e473315e2f0422db5599548c404cee6066', '3a33dcbea7fa6d359eb3076aa2ea9ab01c5cb97ff74c977e05d69c5fc20bc177', 53, 1559579779, 'AAAAC717Qp29j479G1btcCvxg+RzMV4vBCLbVZlUjEBM7mBmDvRi34u5RgEADibpcO4c5OBbLI5HFWx1tz1OBwx7IMYAAAAAXPVMgwAAAAAAAAAAY80asxoHVId70xk95LhsRNki3uSYhgJrvMpz6Qe35wQ6M9y+p/ptNZ6zB2qi6pqwHFy5f/dMl34F1pxfwgvBdwAAADUN4WQpWNjKAAAAAAAB0PD1AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b890cfd7326083ad579a151669212f51273c080e2b67a3664eb7cbcb9f177140', '48645ad8430e5952553cd69569a5711bac93461b1c9c030b60b062f4853b6b14', 'b939d2c7e0b5eec2737d0e9b893a4f0111ce0889c8a346302bc1889c577938c5', 54, 1559579780, 'AAAAC0hkWthDDllSVTzWlWmlcRusk0YbHJwDC2CwYvSFO2sUdAzpmZ4VmiJAGIhn1C8C65yAkmtBcCkFXHDwkTPoYkwAAAAAXPVMhAAAAAAAAAAAfXVfPS9jBqhTfz4LivYYP9btnu+qwf/qpVYL7HgbAM25OdLH4LXuwnN9DpuJOk8BEc4IicijRjArwYicV3k4xQAAADYN4WQpWNjKAAAAAAAB0PG9AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('9fe688714ba462bb2badb522a28f435678cc3ad383dc4afd91a8c11e35a3d735', 'b890cfd7326083ad579a151669212f51273c080e2b67a3664eb7cbcb9f177140', 'b93f6d99994fa08cf0c88e1016819056cd9674c1187bc8e46bf678a3a41df05b', 55, 1559579781, 'AAAAC7iQz9cyYIOtV5oVFmkhL1EnPAgOK2ejZk63y8ufF3FAGEeGE9B3tl7o9CfKtPVM4DeWbUf/Bo39cDHTrEIancYAAAAAXPVMhQAAAAAAAAAAHOu0BR+25TDycY7dcNSyVck1nyZBCkABFZXwjkic52S5P22ZmU+gjPDIjhAWgZBWzZZ0wRh7yORr9nijpB3wWwAAADcN4WQpWNjKAAAAAAAB0PIhAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('44943aac60f737822cbb8ed09dad3acff60f9811db9cc56efe2fd6082ed71fb0', '9fe688714ba462bb2badb522a28f435678cc3ad383dc4afd91a8c11e35a3d735', '2a4380b069e1d9166fb4b86a196e661da01c006a6179bf5d049a755fe2ec47ea', 56, 1559579782, 'AAAAC5/miHFLpGK7K621IqKPQ1Z4zDrTg9xK/ZGowR41o9c1AfruE1INcN+aRvkAlMPc22dTgggif6wloMtQ47jJqRIAAAAAXPVMhgAAAAAAAAAAudvIKbdFQO6cnqBlfAQ0qwiv8T/bow/WPTIANo/ERREqQ4CwaeHZFm+0uGoZbmYdoBwAamF5v10EmnVf4uxH6gAAADgN4WQpWNjKAAAAAAAB0PKFAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('4f6d9aa8f0c2a0bb76221321312b0449720e047bcc47a8d47e3885621396de32', '44943aac60f737822cbb8ed09dad3acff60f9811db9cc56efe2fd6082ed71fb0', '23f65f10fdb3b5f16a5e1f13b54e129319b02d226e510b8caab57e0a2da447fd', 57, 1559579783, 'AAAAC0SUOqxg9zeCLLuO0J2tOs/2D5gR25zFbv4v1ggu1x+wSZEueVQpVrdX4o+KMu65Z6AqPEQg3hDgMGXVzWdfg4YAAAAAXPVMhwAAAAAAAAAA14sFIRujWwg9KpW3xcPTBAGeuB6CsnSMTpP8SjSsd9oj9l8Q/bO18WpeHxO1ThKTGbAtIm5RC4yqtX4KLaRH/QAAADkN4WQpWNjKAAAAAAAB0PLpAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('0e91662d61bb7b28faf575b9a350576fc40f16e382177310964394bb22de658b', '4f6d9aa8f0c2a0bb76221321312b0449720e047bcc47a8d47e3885621396de32', 'fc91d5913ea45f77ab24393749877830d422f20197e5d0ab29bde6a4fd7501ac', 58, 1559579784, 'AAAAC09tmqjwwqC7diITITErBElyDgR7zEeo1H44hWITlt4y5laaexZi3BeoJbBYKEHVRaXQBzZzXDjrizmBZaG28WoAAAAAXPVMiAAAAAAAAAAAGMfQBF/YqYedEUNnQuilzukzLJPeVrR9I6q3baNG83n8kdWRPqRfd6skOTdJh3gw1CLyAZfl0Kspveak/XUBrAAAADoN4WQpWNjKAAAAAAAB0PNNAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('85e72664cd18352262866f05355401362ba8cea6a2f47e2716471ab80d7a8208', '0e91662d61bb7b28faf575b9a350576fc40f16e382177310964394bb22de658b', 'ed1df8f95fc08b56849e476829f000a813221e4ad3783b9aedeea4926b1c61bb', 59, 1559579785, 'AAAACw6RZi1hu3so+vV1uaNQV2/EDxbjghdzEJZDlLsi3mWL+ICn3BLQXRs7fl4siWFC2ldDe+71dfZ+Ot6SzuRKboAAAAAAXPVMiQAAAAAAAAAAlNMwBDUEqtiETmU+aulkMrDuVtBffL8sVeqm0xm+yFztHfj5X8CLVoSeR2gp8ACoEyIeStN4O5rt7qSSaxxhuwAAADsN4WQpWNjKAAAAAAAB0POxAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('fc843d2143c76bd29eb3b1ffddc3d48d60c7a1dcfa13ad88191da037092f12d0', '85e72664cd18352262866f05355401362ba8cea6a2f47e2716471ab80d7a8208', 'dc85b1773e17973fecfbfadb09e5be4662c5846b2e838ba0a492469d266164dc', 60, 1559579786, 'AAAAC4XnJmTNGDUiYoZvBTVUATYrqM6movR+JxZHGrgNeoII58fVvlrThcLsRhZ/MWhMMf/UYr2OzkRkTKFYZDGQ5ewAAAAAXPVMigAAAAAAAAAAZB2X7lcCb772lP7QltIwf7Af+ESisKsoFpU1cBZvSc7chbF3PheXP+z7+tsJ5b5GYsWEay6Di6CkkkadJmFk3AAAADwN4WQpWNjKAAAAAAAB0PQVAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('74c6dfd38c6bc4b10deddbe2fdd5a143a81765123e6eb6324dfef0dcd4a55fb1', 'fc843d2143c76bd29eb3b1ffddc3d48d60c7a1dcfa13ad88191da037092f12d0', 'f8aef3c7031ddab22d68e3b30fd76acd0904885150e3d55addbbcd7eb4e2c37f', 61, 1559579787, 'AAAAC/yEPSFDx2vSnrOx/93D1I1gx6Hc+hOtiBkdoDcJLxLQwnboDZyOgcyfb4ke5wI3SosVZ6YQuxInN2U6dBd49hYAAAAAXPVMiwAAAAAAAAAAosEhvRTCvadI/J9r+8kB+lYA6OphTjHH/5o2+iklI4P4rvPHAx3asi1o47MP12rNCQSIUVDj1Vrdu81+tOLDfwAAAD0N4WQpWNjKAAAAAAAB0PR5AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO offers VALUES ('GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 1, 'AAAAAA==', 'AAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7E=', 2000000000, 1, 2, 0.5, 0, 19); +INSERT INTO offers VALUES ('GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD', 4, 'AAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWE=', 'AAAAAA==', 100000000, 1, 1, 1, 0, 24); +INSERT INTO offers VALUES ('GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q', 5, 'AAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhg=', 'AAAAAA==', 2000000000, 1, 1, 1, 1, 26); +INSERT INTO offers VALUES ('GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q', 6, 'AAAAAA==', 'AAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhg=', 2000000000, 1, 1, 1, 1, 26); + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 2, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAIAAAACAAAAAQAAAEi5lEev1R1cptMqJyV86PLvZhkUlSQk9VwpPmG2+iubVAAAAABc9UxQAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAOIAQROKmgzAAmA3BksTko4NgUHKgm3WSkZ6MmIKarH9ZLm2XvuzUAW8SwZz0DxGe5xkYxrg+weZ8JtB3HgcSBQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 3, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAMAAAACAAAAAQAAADANkZAETViyi0itFeTDvHr9wo93bM4IG7PsfZGVGhIZgQAAAABc9UxRAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAQTIdMTXJF0RMvW2vteuwCgOe0iHQt3mkK15D9psQs7DHEeMTISlyISsOVMbJj+BeiQM9xQke5izGc27vYcgaCA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 4, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAQAAAACAAAAAQAAADBL5d5/gVdchPaRzVdx/Oc7UBg/0k24MIjUkVcemdT0iwAAAABc9UxSAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAO/oUx8sRplKBFNatnZuWvGYcKfCbjTC1eonR8sFtvcUWPtUKoSef35kMyXdeXPvcqrVcGSOKr/mKk0j9gsILAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 5, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAUAAAACAAAAAQAAADDLP2JjaseHC5S84W4sTqYG9M7jqEj5fErYnlzW24uyJQAAAABc9UxTAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAAYb43jmh1IDIqDuH2ifTkDgv+irEQhrbnhAmFclyoLeEUzBTLKko98FjLFReIjQ7rT1Uhj0kp3/S/ZVQl2HTBg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 6, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAYAAAACAAAAAQAAADA3L3uT/QXYBYfY8/qMHGh0hcyCOZEsnCEO07y1BmB/ywAAAABc9UxUAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAUZXVKnfC3Jt0xtPdZiHIknSh6q4j/iMDEiZ7ZzBSyB/Gj8cw0d+jEZ0WVVJU36Uf/BxwBo8lXky1zMzW4s9xAw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 7, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAcAAAACAAAAAQAAADCtTYGfhpONgwukN998j97ocPcZJEIQmjgCti2L3g5apgAAAABc9UxVAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAHMeuajbZn8hqAtUDrlCM1G5DvsbFwQOiDc/meoSQsBDt3sE6koOKUMLYCp66K1des9xpR/snj4dpXRoVwowDCA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 8, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAgAAAACAAAAAQAAADBn+BUeHgQahFePEGUleB1vqjnl7ks8Hz+si1f6IEyaDQAAAABc9UxWAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABABfeId9w7hBgyNCLhCbCvKZbeP/bo7KHwfFmGA3fAs2+LntyfOdAsmgwv47ZxON4SFeBZrWNDkmKpsKIPwc18Bg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 9, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAkAAAACAAAAAQAAADAjVo5T4e3O/VjKg5b41ReiyMTDpFkWHC07U8ycBEC26QAAAABc9UxXAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABALXg6drtTi1/YP2KGu2ZEaE5Hx4M//81Cjj7F/z+q8jBw2PY58Y3m5Vs0QBkWw5FOrrAV/GYfDsumN7AMEoYsAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 10, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAoAAAACAAAAAQAAADCn1TnpFKhxHhmlddmz+CVycZUcRJUUVTouIr1/B4stiQAAAABc9UxYAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA7N/e5jJ4Bk8M0y6KXA0I8rCsiQoBjV8Pgy/r4hQqy5B7f/Oq70K2RnW7pD9OpbGGOV4/YXoIRFgnby9gdgAFCw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 11, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAsAAAACAAAAAQAAADDvswXlvUh6LfMcX0QCawtPziXthsBRIf/sX/DKZGr3xAAAAABc9UxZAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAtTj72Vhj5EcqyLHjpqMenMR3XlIpucWRVUQFa3NHiuCVWjM/0DAg2tEKcjvTdVjJJzRsR37rT8DY5IL4nl8EAQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 12, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAAwAAAACAAAAAQAAADAVIfJyIA94ibQBp6Gr5etFt8F/Zha3aibN9CcxKcxqlwAAAABc9UxaAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA60B2GXrQLwOEUu5Nfn9kefuvJevcU2QB/YDBs7KJhiKkedkHzWZqHtFcxku1ydI1jZEGAUFHbe3c59dtMGxTCA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 13, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAA0AAAACAAAAAQAAADAV+93O3Gtgc/3HN2pv/2lwqiEheJOe1bP5LE/ajLkHewAAAABc9UxbAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAkcpKWpg0eT9GdYHeBbX9xM4DCJKMZXGXYfyFaidAaXgnVTLSEf7X6eOZmTXqUW5iv0RKz5wNG/b9Bw5A5krbAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 14, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAA4AAAACAAAAAQAAADC/ly1h1wxISbzexK+iSnfYLVL8AiBtty1lZpa9TE3YAwAAAABc9UxcAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAAUqv7uG4z12Q2Sd5OIi2F44wFdlU7mXlhzNFc02YuepAKq5cJ+S6UAA22APX36ELPT3YJQHHc5RmaurnjoOlCw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 15, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAA8AAAACAAAAAQAAADB3aB1r1peNHHSZr64bkicM8QRZvft+NnI1DI/A+JAS8QAAAABc9UxdAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAqSkpRrwWEAu4NPwNPDWm4s5q1Of+136lBY3/EL3F2El1WTCBExFwYbLsG5pMpE9Y5bGuR1aTNGr2DRAsqca0Cg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 16, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABAAAAACAAAAAQAAADBjWABRYEbBBA8ypjo8/wUXKmmQeiwpHhMPXCxh3/8j5wAAAABc9UxeAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAD1v8scoE/lAO0zMCQl6cC/ilsYPtA0g5I6DN6/Uk036Qr59WtGnb3v2mSwCfsH90F+9fO1L5Dh4RMlNN2q89CA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 17, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABEAAAACAAAAAQAAADDJw7cn8enZwS+vjfcY96rde0kc9Xz7Nd3EtFFTHXpCEQAAAABc9UxfAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAIuBUjjYVBwe1rnXwZOKECHE1jvDZgIJcvv41b8T3R2YgF0sE/Kpy3G0R1F4K0O9hHbINeg65S7cuGyTabc5rAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 18, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABIAAAACAAAAAQAAADCnseL6QMEQtxRiU79V4mm8vq/mghCCN8Ef23ZWxhTS9gAAAABc9UxgAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAOzKkxBDxHIeWokkLsrPpqyUlBwkvS77LuHpqGr+lqKflLXlUQFVfq+3XG+M3LHHDmhlRWnKtxlr8DHv3mTZKCw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 19, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABMAAAACAAAAAQAAADBpj1IizH7abOVuT5/1ISi96ekuj8snJQ+HoD6D5UXyngAAAABc9UxhAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAGYXRJJLOQFIErZBhxk0BhQZKHVIRR0OJFF35ENUx+/t/XWqd2+gPsnzBN9sHR9TOqCvodArOuEV2RvGTwcYWBA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 20, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABQAAAACAAAAAQAAADD5+jCwtwrPsnV0TfmAqdhExrbMDdyp+6g1QzV96Jk+7wAAAABc9UxiAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAhoKYuNFVvmzk7DC3e9BQtM39pnjsVchx84gTxYquW9bfEC2lmI48hKErIRW0m9nWcFVtM2R703Qbgemk/CaxDw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 21, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABUAAAACAAAAAQAAADAWjv4aVqzaOKmgaVr2+sssrSigCIwjkZB46QtX+6P1wgAAAABc9UxjAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAtu22bedxsLxcf8wIMonGU5FvyJWQ35W03dVgV4Hdylg5EqE5uFXTiPWMGVOAYiMmFpcxHbGUItr4I00v5/xnCw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 22, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABYAAAACAAAAAQAAADDgPPO9V8+N7m8LpyCEIV+tnAwcwQMwXWmHCn+QiO7/vwAAAABc9UxkAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAYBYzp44Edr2IHvov6WXHwMo8CGxWc6keQQn2jCwaes2dw2w8Ef8y2zegbYhzyE/RRzPoWZMlXHYI+tD6t3NKBQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 23, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABcAAAACAAAAAQAAADA4+DkCiIVSC3cYbRY4mvgLclpAebsw8FCKx0LiGTPFLwAAAABc9UxlAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABACncKAzoco5PObWW7mRg26eva48+FF24gIUaDJN3FjbPNhNI7dgBoOiU91viCqjLgAoAxvKRTCc0hORPtdArCDQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 24, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABgAAAACAAAAAQAAADDeEzpzpLLABFKc9vdy0Xp+hV1uDLGGn9Hj+z5YT617kAAAAABc9UxmAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAxerUNZGvZYgte6FaNzA73v7p4ZS+ofheMjJoEvcbr4IM1U8j88PBmdlyES23swPdR2DGlBfmPzGAoCRIs7aGBQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 25, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABkAAAACAAAAAQAAADALl2NjB4KR+5t5iLKsC/GU+a5QeDFuT6mRfjnSVuJQEgAAAABc9UxnAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAh7XF37XyWXc1LSzLx3+T3G2rVPU3xiD+03NjjB2yxwKPgVHYkBLYkzZiBQ1LeJ7qhvHcd/z6AcA+43bu3JbGAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 26, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABoAAAACAAAAAQAAADA5sGpx6zzNdkCN8sZpH4x3SAfA9Xgv4250+BJAl2wTnwAAAABc9UxoAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAC8npHMrUBUjkgE67BQfZ9ggJctl9Ip33Wp2lICUuHcpCcVCuOsZOOKZuXCJ8dLfVzlRN4q2qR/hslQZd+dB0Cw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 27, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABsAAAACAAAAAQAAADBDJ0khcQCzW44PoiRQyzwaWfI8NPFGqhEpbj/CBllo3AAAAABc9UxpAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA1x3hiTZYWD9lbHZSR8A0l5Gk1mIai2WQwX6+J0CDlS0evof5waRgg92HEvWtDiI97332+V3VhALjYmthnzn7Dw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 28, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAABwAAAACAAAAAQAAADCc0PAtgd7uAgdgzLFiJY8voB2k6Y4YJEg2Y9wS+YGfmgAAAABc9UxqAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAhVk33BGL5BzspaumzHEQ9/7Ap9ZyPbdK0A/eGaBsJN4BdIuHSPpjB+VlvJKnKIgGLy1yjD4M/1vqPU+ICAAbDw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 29, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAB0AAAACAAAAAQAAADAwcOyYmhOnJ8yvSftlGPU+1VhyLBLkXbn95tXkqY8fIgAAAABc9UxrAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAExRoFFJ9dAjbx2bNovl+YKiLX1GVE6zdCbMHsLs9cgMgke+iSU+nf2xRbPCvidSpmy+Z8x8Nvf7NiDRJ7jXrBg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 30, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAB4AAAACAAAAAQAAADCTM5fPmZ/YuBidqgC1fE1kuT5R9PxhH3ZBv1ntg9svcwAAAABc9UxsAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA1z/UtFZkvnZH9I3+BQbOPeh5ZWWpz9ugsoEc1MNI2koSch9FXclPEvjLA7SbHi2dehACq3zexPCyetGe4R3oCg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 31, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAB8AAAACAAAAAQAAADBwipWaPK+oiVsV2aYKxxMfC4XXF//XbwqaikEsxBY2rAAAAABc9UxtAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAgbADn1DDkaOdsJRX+1u+NfkYkOOS2RvFfXRBqxAkQIfejE/ZlTIzkZLKCVhNG9UUVyHuNjjR258L8G1i/tYXDw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 32, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACAAAAACAAAAAQAAADBt3HFt8UNX6yLoAZG8vmz6hVyxjxnCQXmQv0QHKmLuTwAAAABc9UxuAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAme1ZWb5ZisNOoB9yQeq0Ka2+jAMTZteGiWL5CPE3O/Nx4MJp6q4Z/oJnmYfwUcmnM2QLYiV0Thfmg9JFRhOzDg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 33, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACEAAAACAAAAAQAAADC54wbApTLfN0IkRtycufGhkRTdXn1pivv6pt8yLQPCeAAAAABc9UxvAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAVVNO47bVWpvNB6sL3Zioubb8sDxXtLJmkQd30eGGmLS4nzBvqjFebekwPcjWZb8sn3HiqdoJSu4nR947fL3eBw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 34, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACIAAAACAAAAAQAAADDK00km4ri5GkGo0ToMD+3AK2zIzBf8E2NMraPUDZwukgAAAABc9UxwAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABArQ6za78utjqvJehdPmHbBboRSJCFJMXncxowErcQlEGE+z8CeEye/HxoNckIbfYJ9Fr4tqi1pLCzUQSxPbzBAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 35, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACMAAAACAAAAAQAAADBOtZR3o7szhLR97xQRx2GVyZhzJ3T6huBp1CNhkD9QrgAAAABc9UxxAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAEaizZYo9OEHlh71+6KxnKs8veKY/1bW/VK/cu1/TF7lnXgdMTG5S8uh6Utehm+6XB3RoW3bLwIfOr+6M492BDA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 36, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACQAAAACAAAAAQAAADC3jRym/poI2ZldyWPQanWDz9BNv6sSzPs7pBF68SnumQAAAABc9UxyAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAdjKewmSukGCl7MrMx9liABLlUu0B/H1W7unx3IvV20SuBBIr8f6mLIAxSxEHgiT8DSkyGQlexjHdFakOHbEoBA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 37, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACUAAAACAAAAAQAAADCN5d+zlFoL+lJsxDRSQ2A4erooWkWarn/E+GFr+oiuKQAAAABc9UxzAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAcsZNljpTmOAJW9T1S+SY4Db9oYkOS7VXEZ2jC/+sVa7iumM4ZPzO5/+opcP8hxg5P5gsymU4r4KFi1aueHmADg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 38, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACYAAAACAAAAAQAAADC904xdxlC/Z8IxCdUxR8oEntdIVZuAH/ATcGzt6vzu4AAAAABc9Ux0AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABASL+nj9+n9uSDVqvOP2SkwUpIpxrtpxS0klcX84YmSNzW3SzSze7ZHu6wD1nf+C1SLjOHJwghcPl2N/yRjnzIDg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 39, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACcAAAACAAAAAQAAADAeK8vHI/wj7qTgmkHgDc/E4qxERlQyfNgGgCeDxm4Q0AAAAABc9Ux1AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAb9YhSUv6XQGX8Mb47+gZSX/UrGQrMCYwT8kEJxtFQo/EBvAEQ2WSbDSNKqyx3sFtaK7E6qkc05BkJqWzS6kQBg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 40, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACgAAAACAAAAAQAAADD+Kch4YkWtXVFVT7GmshlEfwpVojviX9MC37+sl4cnnQAAAABc9Ux2AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAOTDp7eIrKd1wXyTRBbd6dUXIu0Lhj7WcysWeglLY27DjnwfNrtVVNcrdoxH71ZZXujT/NT3YnQHToHzTvYkdDQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 41, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACkAAAACAAAAAQAAADAo3Jhs7AvtFuJqMgif3CjVcirGvEpeFJxIlm6yGCfVTwAAAABc9Ux3AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA8Td7rKG+C8g/h+6pvKMaKJyLY083sV/tyBnE10ndYyilVldbbuimRx1LdbBGM8yr/zhK86mQCPv/kAgmFw00Cw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 42, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACoAAAACAAAAAQAAADAjXMdq35MdKIbwWIliFfYiOpx7AzKqnt4gtG0vGWoBXQAAAABc9Ux4AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAWOQjMI2he8LbAPiaboIoYTP+dQS/MDf8uQ3A3MMOZ0UQguW5ByO8W7Hr2L8HbzL/l4t7i75FvBh+me2PeNtRAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 43, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACsAAAACAAAAAQAAADBk1LKFcN8oTJTqOTSbyfEATo8IlobECg7Cq9wBIyxRLgAAAABc9Ux5AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAYS72KHXyc3BvyKY06MiY7rAPtV5U2ZizrlNKODi9ZHn4+sEolTle9uBE6RPOsA4OLSLTKxH1iKSHEO88dgjxDg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 44, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAACwAAAACAAAAAQAAADCujBR0pHMD9lGR16jwa7apXhOFTXp32nw3vgdqe7lSywAAAABc9Ux6AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAYtj7hA8NuZiNVolpaniesCkJOrtOGwMd5FLWxDen4/2ggpXMfyh/wdOrWnh4ea5qdKAxzVZ0jOb5sWQR/JolBA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 45, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAC0AAAACAAAAAQAAADCQYIcecDoo52g9xK+npHGGTaFRGUYXVU/ahJU0GQMj1wAAAABc9Ux7AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAY9RuTvfmAuAfn3375MIQ8rhhgzT13uAD2CU6KfVL7+tSH3xbvMm/tChux+YqauApDQZ6Tzv/JKx/ZK1d4TzVBA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 46, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAC4AAAACAAAAAQAAADACBMg8sY7Ke5CA5fOZdBmsvIZMAYgNY4clFAmpR9sCJQAAAABc9Ux8AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABASYfhas+zMU3PwqERMN1iuYCPdHs2n+jNhfZPSimUwNz0HKua6drdCsDWsqzKFnnu1b3voNlgyWXMrTnbt9SADQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 47, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAC8AAAACAAAAAQAAADDURXrU/mgQbWOm/ghr8dSAYKkrBNDhmmUeG3ujukDvWAAAAABc9Ux9AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABArZZ3GmclY3+9jBJKLRCTBw01Nxi5aXHwXBC1C+/ulXU9tlK2YruGVtmZ3z0lckk3Gg7OEhcrgcqMI+mvLLZmBQ=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 48, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADAAAAACAAAAAQAAADDpoNbcs9tZCeRCoDI1lkmUNZpUAv0aZUqUxVsehoBPLAAAAABc9Ux+AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAPtSCDf2JruJ/oLufmuUCwmhwZXOv9MfT4hAVjqnNk3B9rgzkyDIuO7bvxZhDR8wBxoLbMVgqmobPEyDAw8mnAA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 49, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADEAAAACAAAAAQAAADB5VEX4WJbVCVz7e7WYFxnzhQtF2jf5xOjjlhEkM749LgAAAABc9Ux/AAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAw1hV/VT9kiY61GVPLAPpQQnPaegvd/5CFgN8skTfocLriku0bMQMj+IAFtys1+kbbCklS22AoYi6FWCpezv9CA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 50, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADIAAAACAAAAAQAAADCLRtOwFtJ0M4WiH7Ll/xyXe36Ytl/OuEcb/pvEGVHUzAAAAABc9UyAAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA3+zvx10WHiUoOy5pMRX4WxuB2Gld/ZQ4SMxcP3Mg8IrmIFaHjx+GyyuTThqqMe0MQbnvma0VqyRVUeW8xyXGDw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 51, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADMAAAACAAAAAQAAADC3a8lMewrIwd1UIf+CzRzdT95AqppZF4T7iPLaC366TAAAAABc9UyBAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABArMim0aW4V5A4qZjwpYt+VhqBjIOBxC/IG83mJqgBCKRFqhZVtiLkPJhlyX8uC2wXQSv1b+xluy/ueyLQpgW0Aw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 52, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADQAAAACAAAAAQAAADDN4URraPq0YbyBsz/UxHxoAFj8/G471nPaz2bEactWhAAAAABc9UyCAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAljO2tcOMxZy0Txf97VW8lTogemb94E0wI6MzWuzdI/SuOFnT3r+D2X5KXtrqCZDOq27092WF2psg9iSwFGa+Cw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 53, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADUAAAACAAAAAQAAADAO9GLfi7lGAQAOJulw7hzk4FssjkcVbHW3PU4HDHsgxgAAAABc9UyDAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAOGErGKrzVZHS49/n31L2+iUDfS8IBwCxP7beh0XI9NQmrR9LJdchTq618ID6tjD+qs3R9wKwkPsoZ/hVrZrUDw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 54, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADYAAAACAAAAAQAAADB0DOmZnhWaIkAYiGfULwLrnICSa0FwKQVccPCRM+hiTAAAAABc9UyEAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA3Qhad1yWb8CXpugm/5YChV3OuJQBWcE7vY86YqH0h4g/ryc99YjYyYua+l3QUejF77Cy1szzCtI5NwgePfYvAg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 55, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADcAAAACAAAAAQAAADAYR4YT0He2Xuj0J8q09UzgN5ZtR/8Gjf1wMdOsQhqdxgAAAABc9UyFAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAemcmk4ICf4N7wunMr0a1ZwlpkaHKK+leMkFWlGl4tZbfO9Lmuu4PZJN7YcFrfiWybh9jweIc2MBAWL6GLXlDDA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 56, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADgAAAACAAAAAQAAADAB+u4TUg1w35pG+QCUw9zbZ1OCCCJ/rCWgy1DjuMmpEgAAAABc9UyGAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA3PHgZEBjiyByyF2YG9We7jMk2Tad6EswJH+V4+UABc9+i+e5Y2eay4UGIMHP1n7u5gQF8tZm777wbP9WHFx3DA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 57, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADkAAAACAAAAAQAAADBJkS55VClWt1fij4oy7rlnoCo8RCDeEOAwZdXNZ1+DhgAAAABc9UyHAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA2fmBWSFyA6w0z6v9OUM7nZt/KV/UpUP9X1GMco/H04DBaBVx1PHzxol1SvSkrrxoqCnwknZKesKiI8/aUgbDDA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 58, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADoAAAACAAAAAQAAADDmVpp7FmLcF6glsFgoQdVFpdAHNnNcOOuLOYFlobbxagAAAABc9UyIAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAWZezJP0H548HTw4pqZWgoYNcPp7lthKl9DXuGkNsktN9wjjzCW7/96GLiZgDh/Z1joWAy45REwb9f8VCdyBHCA=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 59, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADsAAAACAAAAAQAAADD4gKfcEtBdGzt+XiyJYULaV0N77vV19n463pLO5EpugAAAAABc9UyJAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAl1AuzGKFNP53aFHM3bzVw5aJYDYZ6jW2DrZe5cRVo44LpNltKMPYWRhSrY5y39Z5c3XKm/KAXkB0+BAtMZA8Cg=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 60, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAADwAAAACAAAAAQAAADDnx9W+WtOFwuxGFn8xaEwx/9RivY7ORGRMoVhkMZDl7AAAAABc9UyKAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABAG9W1wN7S2VuuAD3vZIaQpNLHAvfhnOFgEVAIL2ZWZwz14fJkN/59U4eeumlZXrMZyePQnhCKjJrZ4xkTZFZGCw=='); +INSERT INTO scphistory VALUES ('GA3S2OQDRXOCJTFEB4DNK4TL72ZHIE6ZXTIZ7POZ62I4DFOOJLSUACND', 61, 'AAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAAAAAD0AAAACAAAAAQAAADDCdugNnI6BzJ9viR7nAjdKixVnphC7Eic3ZTp0F3j2FgAAAABc9UyLAAAAAAAAAAAAAAABwLwqSWZgt0ngLt0zkmOApnWe+GS+RbRXMpDelWNPmmAAAABA8/Qrhku9ML9bbQDdUA6l5fik50kMPmaTHSW+3R2vVleaXtr1oDqYzezccDuiKbs7eFEFo72kxkmlhkHiGUG1AA=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('c0bc2a496660b749e02edd33926380a6759ef864be45b4573290de95634f9a60', 61, 'AAAAAQAAAAEAAAAANy06A43cJMykDwbVcmv+snQT2bzRn73Z9pHBlc5K5UAAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastclosedledger ', '74c6dfd38c6bc4b10deddbe2fdd5a143a81765123e6eb6324dfef0dcd4a55fb1'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 61, + "currentBuckets": [ + { + "curr": "cd93a51625fe9e4ef778b6679b4300234695993b3f049cc997b17e58ed781431", + "next": { + "state": 0 + }, + "snap": "ae9084bc36e2f6c610576ceebaf8d28ff8f80c9dc76e9764967fb64e6f8de9b1" + }, + { + "curr": "f6471607f3dd9f536338c30fe7e8b3c827bb159da5817416258a6ceb928bbce7", + "next": { + "state": 1, + "output": "7acc37b7212b9aaa9b093ee9ac2d6e6e7521def3f7e991fe320b0124b220e44b" + }, + "snap": "984fa2d1ca6685fde8acfa55ff20e499e74f057e57807feb8623a8cd5e2a40a8" + }, + { + "curr": "7e01d83808bc69acba0c6d331da8fb47fcf99cb75f58ac271ebb59dc4ae505ac", + "next": { + "state": 1, + "output": "cdfdba6ff7ead4a57cf4fcb9a795f97bbdeac0fe868e7b93d9688e45952ba0ec" + }, + "snap": "3bf2b7e0f10d61e2578c9d6ff9370d760bbde04f04887d279b52a68158968600" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "3bf2b7e0f10d61e2578c9d6ff9370d760bbde04f04887d279b52a68158968600" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAAA3LToDjdwkzKQPBtVya/6ydBPZvNGfvdn2kcGVzkrlQAAAAAAAAAA9AAAAA8C8KklmYLdJ4C7dM5JjgKZ1nvhkvkW0VzKQ3pVjT5pgAAAAAQAAAJjCdugNnI6BzJ9viR7nAjdKixVnphC7Eic3ZTp0F3j2FgAAAABc9UyLAAAAAAAAAAEAAAAANy06A43cJMykDwbVcmv+snQT2bzRn73Z9pHBlc5K5UAAAABATulIVit6Zs34QLUk3ZtXbMf5BJ20xVfXvWndVVsOzEtTHhxgyad8jB+d3lVLqQkWdzy7jLhbHsU5c5+TaNThCQAAAAEAAACYwnboDZyOgcyfb4ke5wI3SosVZ6YQuxInN2U6dBd49hYAAAAAXPVMiwAAAAAAAAABAAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAQE7pSFYrembN+EC1JN2bV2zH+QSdtMVX171p3VVbDsxLUx4cYMmnfIwfnd5VS6kJFnc8u4y4Wx7FOXOfk2jU4QkAAABAAo9pMSGxk2D87egUe8KuNnO8GPtLnHq58aOthQgPbEQDDwa8ZPB6hGSjEYqGh4qYf/skL1KT0cVigZcHBJczAwAAAAA3LToDjdwkzKQPBtVya/6ydBPZvNGfvdn2kcGVzkrlQAAAAAAAAAA9AAAAAgAAAAEAAAAwwnboDZyOgcyfb4ke5wI3SosVZ6YQuxInN2U6dBd49hYAAAAAXPVMiwAAAAAAAAAAAAAAAcC8KklmYLdJ4C7dM5JjgKZ1nvhkvkW0VzKQ3pVjT5pgAAAAQPP0K4ZLvTC/W20A3VAOpeX4pOdJDD5mkx0lvt0dr1ZXml7a9aA6mM3s3HA7oim7O3hRBaO9pMZJpYZB4hlBtQAAAAAB/IQ9IUPHa9Kes7H/3cPUjWDHodz6E62IGR2gNwkvEtAAAAABAAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8AAAAAAQAAAAEAAAABAAAAADctOgON3CTMpA8G1XJr/rJ0E9m80Z+92faRwZXOSuVAAAAAAA=='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C', 1, 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG', 'USD', 9223372036854775807, 100000000, 1, 15, NULL, NULL); +INSERT INTO trustlines VALUES ('GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD', 1, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 'USD', 9223372036854775807, 0, 1, 19, NULL, NULL); +INSERT INTO trustlines VALUES ('GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP', 1, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 'EUR', 9223372036854775807, 3000000000, 1, 20, NULL, NULL); +INSERT INTO trustlines VALUES ('GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC', 1, 'GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD', 'USD', 9223372036854775807, 200000000, 1, 24, 0, 0); +INSERT INTO trustlines VALUES ('GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG', 1, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 'USD', 9223372036854775807, 0, 1, 41, NULL, NULL); +INSERT INTO trustlines VALUES ('GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG', 1, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 'EUR', 9223372036854775807, 0, 0, 42, NULL, NULL); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('4657f7ab2fd82ae203f04d209e6adec0e6bc4f0983b4fc3fa679820ed47e29d7', 3, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('66e27fb28870cb5256ea92764bcb222adbbaa5fec2d89a62a9aa8c9c8e2ee9e9', 4, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e9d1a3000aea36743142f2ede106d3cb37c3d7e88508e3f21b496370b5863858', 5, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+QAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+OcAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('995b9269f9f9c4c1eace75501188766d6e8ae40c5413120811a50437683cb74c', 5, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+OcAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+M4AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f78dca926455579b4a43009ffe35a0229a6da4bed32d3c999d7a06ad26605a25', 5, 3, 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+M4AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a9085e13fbe9f84e07e320a0d445536de1afc2cfd8c7e4186687807edd2b4897', 6, 1, 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAMAAAABAAAAAAAAAAAAAAAAAQICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAAAAAABAAAABgAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvicAAAAAQAAAADAAAAAQAAAAAAAAAAAAAAAAECAgIAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('0fb9c2e20946222b23e1d1d660de9d74576c41cfd9b199f9d565a013c1ef89ca', 7, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('dd74eee27a59843b28a05ad08abf65eaa231b7debe4d05550c0a7a424cca5929', 8, 1, 'AAAAAgAAAAMAAAAHAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+QAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+OcAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2551e76a3ce4881b7bc73fdfd89d670d511ea7d4e56156252b51777023202de7', 8, 2, 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+OcAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+M4AAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('3b36ecfbcc2adb0cfff08ae86199f64e12984f084bb03be9bb249611df82322b', 8, 3, 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+M4AAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+LUAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e14885cb66af5f7f5e991b014eec475c61cc831292cf5526cdd0cda145300837', 8, 4, 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+LUAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+JwAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('66c28c0ccd5a2e47026aacafa2ecd3c501fe5de349ef376c0f8afb893c7bb55d', 9, 1, 'AAAAAgAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqzUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqxwAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('fdb696a797b769176cbaed3a50e4a6a8671119621f65a3f954a3bcf100c7ef0c', 10, 1, 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('67601a2ca212b84092a7d3c521172b67f4b93d72b726a06c540917d2ab83c1a1', 11, 1, 'AAAAAgAAAAMAAAAKAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgopwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgooMAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('0e128647b2b93786b6b76e182dcda0173757066f8caf0523d1ba3b47fd6f720d', 12, 1, 'AAAAAgAAAAMAAAALAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+QAAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+OcAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('cd8a8e9eb53fd268d1294e228995c27f422d90783c4054e44ab0028fc1da210a', 13, 1, 'AAAAAgAAAAMAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqWoAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bfbd5e9457d717bcf847291a6c24b7cd8db4ff784ecd4592be30d08146c0c264', 13, 2, 'AAAAAgAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqWoAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqVEAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('30880dd42d8e402a30d8a3527b56c1e33e18e87c46e1332ea5cfc1721fd87cfb', 14, 1, 'AAAAAgAAAAMAAAANAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('dbd964fcfdb336a30f21c240fffdaf73d7c75880ed1b99375c62f84e3e592570', 14, 2, 'AAAAAgAAAAMAAAANAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('142c988b1f67984f74a1581de9caecf499e60f1e0eed496661aa2c559238764c', 15, 1, 'AAAAAgAAAAMAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgKcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAPAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgI4AAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a5a9e3ca63e9cc155359c97337bcb14464cca56b230a4d0c7f27582644d16809', 16, 1, 'AAAAAgAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXt1EAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtzgAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('6a056189b45760c607e331c90c5a8b4cd720961df8bc8cecfd4aa388b577a6cb', 16, 2, 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtzgAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtx8AAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('18bf6cce20cfbb0f9079c4b8783718949d13bd12d173a60363d2b8e3a07efead', 16, 3, 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtx8AAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtwYAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('cdef45dd961d59375351ea7dd7ef6414ff49371a335723e84dafacea1e13665a', 17, 1, 'AAAAAgAAAAMAAAAQAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d1f593eb5e14f97027bc79821fa46628c107034fba9a5acef6a9da79e051ee73', 17, 2, 'AAAAAgAAAAMAAAAQAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('902b90c2322b9e6b335e7543389a7446b86e3039ebf59ec66dffb50eaec0dc85', 18, 1, 'AAAAAgAAAAMAAAAQAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3', 18, 2, 'AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('37bb79f6959c0e8e9b3d31f6c9308d8d084d9c6742cfa56ca094cfa6eae99423', 18, 3, 'AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('198844c8b472daacc5b717695a4ca16ac799a13fb2cf4152d19e2117ae1c56c3', 19, 1, 'AAAAAgAAAAMAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f08dc1fec150f276562866ce4f5272f658cf0bd9fd8c1d96a22c196be2e1b25a', 20, 1, 'AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2a6987a6930eab7e3becacf9b76ed7a06802668c1f1eb0f82f5671014b4b636a', 21, 1, 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOzAYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy+0AAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588', 21, 2, 'AAAAAgAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy+0AAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy9QAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('be05e4bd966d58689e1b6fae013e5aa77bde56e6acd2db9b96870e5e746a4ab7', 22, 1, 'AAAAAgAAAAMAAAAVAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d8b2508123656b1df1ee17c2767829bc22ab41959ad25e6ccc520e849516fba1', 23, 1, 'AAAAAgAAAAMAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('01346de1ca30ce03149d9f54945956a22f9cbed3d81f81c62bb59cf8cdd8b893', 24, 1, 'AAAAAgAAAAMAAAAVAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+OcAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('5065cd7c97cfb6fbf7da8493beed47ed2c7efb3b00b77a4c92692ed487fb86a4', 25, 1, 'AAAAAgAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2dQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2bsAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a76e0260f6b83c6ea93f545d17de721c079dc31e81ee5edc41f159ec5fb48443', 26, 1, 'AAAAAgAAAAMAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('92a654c76966ac61acc9df0b75f91cbde3b551c9e9766730827af42d1e247cc3', 26, 2, 'AAAAAgAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('700fa44bb40e6ad2c5888656cd2e7b8d86de3d3557b653ae6874466175d64927', 27, 1, 'AAAAAgAAAAMAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4LsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4KIAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('fe3707fbd5c844395c598f31dc719c61218d4cea4e8dddadb6733f4866089100', 28, 1, 'AAAAAgAAAAMAAAAbAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+QAAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+OcAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('345ef7f85c6ea297e3f994feef279b63812628681bd173a1f615185a4368e482', 28, 2, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+OcAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2a14735d7b05109359444acdd87e7fe92c98e9295d2ba61b05e25d1f7ee10fd3', 28, 3, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4f9598206ab17cf27b5c3eb9e906d63ebee2626654112eabdd2bce7bf12cccf2', 28, 4, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('852ba25e0e4aa149a22dc193bcb645ae9eba23e7f7432707f3b910474e9b6a5b', 28, 5, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('8ccc0c28c3e99a63cc59bad7dec3f5c56eb3942c548ecd40bc39c509d6b081d4', 28, 6, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('83201014880073f8eff6f21ae76e51c2c4faf533e550ecd3c2205b48a092960a', 28, 7, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('69f64ae0f809b08996c1f394ee795001a40eee69adb675ab63bfd1932d3aafb2', 29, 1, 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHQAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvg4AAAABsAAAAHAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('c3cd47a311e025446f72c50426b5b5444e5261431fc5760e8e57467c87cd49fc', 30, 1, 'AAAAAgAAAAMAAAAdAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+DgAAAAGwAAAAgAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHgAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgfAAAABsAAAAIAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('299dc6631d585a55ae3602f660ec5b5a0088d24a14b344c72eccc2a62d9a8938', 31, 1, 'AAAAAgAAAAMAAAAeAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+B8AAAAGwAAAAkAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHwAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgGAAAABsAAAAJAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('bb9d6654111fae501594400dc901c70d47489a67163d2a34f9b3e32a921a50dc', 32, 1, 'AAAAAgAAAAMAAAAfAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+AYAAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvftAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('6b38cdd5c17df2013d5a5e211c4b32218b6be91025316b1aab28bc12316615d5', 32, 2, 'AAAAAgAAAAMAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC9+0AAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA'); +INSERT INTO txfeehistory VALUES ('6d78f17fafa2317d6af679e1e5420f351207ff61cdff21c600ea8f85155b3ea1', 33, 1, 'AAAAAgAAAAMAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC56IAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC54kAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a05daae230b1f743474e83ab6d4817df1f3f77661a7d815f7620cee2a9809480', 34, 1, 'AAAAAgAAAAMAAAAhAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+QAAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4e2442fe2e8dd8c686570c9f537acb2f50153a9883f8d199b6f4701eb289b3a0', 35, 1, 'AAAAAgAAAAMAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAjAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+M4AAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('44cb6c8ed4dbec542af1aad23001dd9d678cf19c8c461a653e762a7253eded82', 36, 1, 'AAAAAgAAAAMAAAAjAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+M4AAAAIQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAkAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+LUAAAAIQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('52388a98e4e36c17749a94374270cc65bdb7271cb51277f095aaa8f1ca9d322c', 37, 1, 'AAAAAgAAAAMAAAAkAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+LUAAAAIQAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAlAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+JwAAAAIQAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('afeb8080522eba71ca328225bbcf731029edcfa254c827c45be580bae95c7231', 38, 1, 'AAAAAgAAAAMAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7okAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7nAAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2354df802111418a999e31c2964d16b8efe8e492b7d74de54939825190e1041f', 38, 2, 'AAAAAgAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7nAAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7lcAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('11705f94cd65d7b673a124a85ce368c80f8458ffaedff719304d8f849535b4e0', 39, 1, 'AAAAAgAAAAMAAAAmAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da', 40, 1, 'AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('0bcb67aa365446fd244fecff3a0c397f81f3a9b13428688965e776d447c0b1ea', 40, 2, 'AAAAAgAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50', 41, 1, 'AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('a832ff67085cb9eb6f1c4b740f6e033ba9b508af725fbf203469729a64a199ff', 41, 2, 'AAAAAgAAAAMAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+LUAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d67cfb271a889e7854ffd61b08eacde76d56e758466fc37a8eec2d3a40ef8b14', 42, 1, 'AAAAAgAAAAMAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+LUAAAAJgAAAAMAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAqAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+JwAAAAJgAAAAMAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('945b6171de747ab323b3cda52290933df39edd7061f6e260762663efc51bccb0', 43, 1, 'AAAAAgAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/FcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/D4AAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc', 44, 1, 'AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('5b42c77042f04bf716659a05e7ca3f4703af038a7da75b10b8538707c9ff172f', 45, 1, 'AAAAAgAAAAMAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/AwAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('7207de5b75243e0b062c3833f587036b7e9f64453be49ff50f3f3fdc7516ec6b', 46, 1, 'AAAAAgAAAAMAAAAtAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7FAAAAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d24f486bd722fd1875b843839e880bdeea324e25db706a26af5e4daa8c5071eb', 46, 2, 'AAAAAgAAAAMAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256', 47, 1, 'AAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('eb8586c9176c4cf2e864b2521948a972db5274de24673669463e0c7824cee056', 48, 1, 'AAAAAgAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h9gAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('9fff61916716fb2550043fac968ac6c13802af5176a10fc29108fcfc445ef513', 49, 1, 'AAAAAgAAAAMAAAAwAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+QAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99', 49, 2, 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('48415cd0fda9bc9aeb1f0b419bfb2997f7a2aa1b1ef2e51a0602c61104fc23cc', 49, 3, 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('616c609047ef8f9ca908a47a47aa4bb018449c569549ad2ca60590aab74267e8', 50, 1, 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1d7833c4faab08e62609acf3714d1babe27621a2b328edf37465e99aaf389cab', 51, 1, 'AAAAAgAAAAMAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAzAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+IMAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c8a28fb25d4784f37a7a078e1feef0eb30ca64e994734625ac4ea067cc621464', 52, 1, 'AAAAAgAAAAMAAAAzAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+IMAAAAMAAAAAUAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA0AAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+GoAAAAMAAAAAUAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('df5f0e8b3b533dd9cda0ff7540bef3e9e19369060f8a4b0414b0e3c1b4315b1c', 53, 1, 'AAAAAgAAAAMAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jtgAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jr8AAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('85bbd2b558563518a38e9b749bd4b8ced60b9fbbb7a6b283e15ae98548302ac4', 54, 1, 'AAAAAgAAAAMAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lb8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('5bbbedfb52efd1d5d973e22540044a27b8115772314293e3ba8b1fb12e63ca2e', 55, 1, 'AAAAAgAAAAMAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lXQAAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c', 56, 1, 'AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2', 57, 1, 'AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('829d53f2dceebe10af8007564b0aefde819b95734ad431df84270651e7ed8a90', 58, 1, 'AAAAAgAAAAMAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('74b62d52311ea3f47359f74790595343f976afa4fd306caaefee5efdbbb104ff', 59, 1, 'AAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA7AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+M4AAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c8132b95c0063cafd20b26d27f06c12e688609d2d9d3724b840821e861870b8e', 60, 1, 'AAAAAgAAAAMAAAA7AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+M4AAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21', 61, 1, 'AAAAAgAAAAMAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA9AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+JwAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('4657f7ab2fd82ae203f04d209e6adec0e6bc4f0983b4fc3fa679820ed47e29d7', 3, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAQAAAAAAAABkAAAAAF4L0vAAAAAAAAAAAQAAAAAAAAAAAAAAAC6N7oJcJiUzTWRDL98Bj3fVrJUB19wFvCzEHh8nn/IOAAAAAlQL5AAAAAAAAAAAAVb8BfcAAABA8CyjzEXXVTMwnZTAbHfJeq2HCFzAWkU98ds2ZXFqjXR4EiN0YDSAb/pJwXc0TjMa//SiX83UvUFSqLa8hOXICQ==', 'Rlf3qy/YKuID8E0gnmrewOa8TwmDtPw/pnmCDtR+KdcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAAAAAuje6CXCYlM01kQy/fAY931ayVAdfcBbwsxB4fJ5/yDgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('66e27fb28870cb5256ea92764bcb222adbbaa5fec2d89a62a9aa8c9c8e2ee9e9', 4, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvkAAAAAAAAAAABVvwF9wAAAECAUpO+hxiga/YgRsV3rFpBJydgOyn0TPImJCaQCMikkiG+sNXrQBsYXjJrlOiGjGsU3rk4uvGl85AriYD9PNYH', 'ZuJ/sohwy1JW6pJ2S8siKtu6pf7C2JpiqaqMnI4u6ekAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxU1gbOAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxU1gbOAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+QAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e9d1a3000aea36743142f2ede106d3cb37c3d7e88508e3f21b496370b5863858', 5, 1, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASnuINUAAABASz0AtZeNzXSXkjPkKJfOE8aUTAuPR6pxMMbF337wxE3wzOTDaVcDQ2N5P3E9MKc+fbbFhZ9K+07+J0wMGltRBA==', '6dGjAArqNnQxQvLt4QbTyzfD1+iFCOPyG0ljcLWGOFgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('995b9269f9f9c4c1eace75501188766d6e8ae40c5413120811a50437683cb74c', 5, 2, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAASnuINUAAABADpkMMc7kkkYjDoPwfUlOE9tLYvWHI/m+BBe/gCKN1cVvEF1UBVeCCuGBTjury4TqoxplKl4NZHJST5/Orr4XCA==', 'mVuSafn5xMHqznVQEYh2bW6K5AxUExIIEaUEN2g8t0wAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('f78dca926455579b4a43009ffe35a0229a6da4bed32d3c999d7a06ad26605a25', 5, 3, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAgAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABKe4g1QAAAEDglRRymtLjw+ImmGwTiBTKE7X7+2CywlHw8qed+t520SbAggcqboy5KXJaEP51/wRSMxtZUgDOFfaDn9Df04EA', '943KkmRVV5tKQwCf/jWgIpptpL7TLTyZnXoGrSZgWiUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAMAAAABAAAAAAAAAAAAAAAAAQICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('a9085e13fbe9f84e07e320a0d445536de1afc2cfd8c7e4186687807edd2b4897', 6, 1, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAinuINUAAABA4PRAe0en/05ZH2leCeTOsxbT0cUu3wgUiWUcuDk4ya8G/gI90hlV6pzOYyAB6Zt5fN7pRrPRL/tTlnjgUAjaBvwNxUcAAABAFmdGR6JZukKJUC3Vr2YEJ/24G3tesqTv4cV5UcAozRhS2+w0PYVVqe7QTmOMNSGX/C3LxP1tSvpXdU/OhYsODw==', 'qQheE/vp+E4H4yCg1EVTbeGvws/Yx+QYZoeAft0rSJcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvicAAAAAQAAAADAAAAAQAAAAAAAAAAAAAAAAECAgIAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4nAAAAAEAAAABAAAAAEAAAAAAAAAAAAAAAABAgICAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAAYAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4nAAAAAEAAAABAAAAAEAAAAAAAAAAAAAAAABAgICAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+JwAAAABAAAAAQAAAABAAAAAAAAAAAAAAAAAgICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('0fb9c2e20946222b23e1d1d660de9d74576c41cfd9b199f9d565a013c1ef89ca', 7, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvkAAAAAAAAAAABVvwF9wAAAED8tIFyog9OeCqiaBNfxFdAlneNYTfjoNUMKi6FJCY5BqemnDBxGox3jKS/xx4zpxAToEFp3Y2M+NRJIU4g/H0J', 'D7nC4glGIisj4dHWYN6ddFdsQc/ZsZn51WWgE8HvicoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lau/0w21AAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lau/0w21AAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+QAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('dd74eee27a59843b28a05ad08abf65eaa231b7debe4d05550c0a7a424cca5929', 8, 1, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAABAAAAAAAAAAIAAAAAAAAAewAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEBjk5EFqV8GiL9xU62OUCKeScXxGMTMqJoD7ryiGf5jLPZJRSphbWC3ZycHE+pDuu/6EKSqcNUri5AXzQmM+GYB', '3XTu4npZhDsooFrQir9l6qIxt96+TQVVDAp6QkzKWSkAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvicAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvicAAAAAcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyr2OlUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+JwAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJTc0vwAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2551e76a3ce4881b7bc73fdfd89d670d511ea7d4e56156252b51777023202de7', 8, 2, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAACAAAAAAAAAAEAAAAFaGVsbG8AAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAACYloAAAAAAAAAAAXc7DToAAABAS2+MaPA79AjD0B7qjl0qEz0N6CkDmoS4kgnXjZfbvdc9IkqNm0S+vKBNgV80pSfixY147L+jvS/ganovqbLiAQ==', 'JVHnajzkiBt7xz/f2J1nDVEep9TlYVYlK1F3cCMgLecAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACU3NL8AAAAAcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACU3NL8AAAAAcAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyr2OlUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyscX/UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJTc0vwAAAABwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJS2rVwAAAABwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('3b36ecfbcc2adb0cfff08ae86199f64e12984f084bb03be9bb249611df82322b', 8, 3, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAADAAAAAAAAAAMBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEDC9hMtMYZ6hbx1iAdXngRcCYQmf8eu4zcB9SLH2998tVYca6QYig5Dsgy2oCMD1J7khIL9jz/VWjcPhvTVvC8L', 'Ozbs+8wq2wz/8IroYZn2ThKYTwhLsDvpuySWEd+CMisAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUtq1cAAAAAcAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUtq1cAAAAAcAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyscX/UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytChZUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJS2rVwAAAABwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJSQh7wAAAABwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e14885cb66af5f7f5e991b014eec475c61cc831292cf5526cdd0cda145300837', 8, 4, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAAEAAAAAAAAAAQCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEBOfq9PQ8EGcpjRWEaqGxvhBjSVuk6K5A2rthLYHnmAXmQ1JjJD3EddjiES3bPZUF5efGQvRjoEKgiB2dU3f2wF', '4UiFy2avX39emRsBTuxHXGHMgxKSz1UmzdDNoUUwCDcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUkIe8AAAAAcAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUkIe8AAAAAcAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytChZUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqzUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJSQh7wAAAABwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJRqYhwAAAABwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('66c28c0ccd5a2e47026aacafa2ecd3c501fe5de349ef376c0f8afb893c7bb55d', 9, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEARD6MVWgEASusfhr6JdF9K3Rie2XCRJKl/NoKyJcrd1kGs3ygpp55xu80YlFwgNVErZ/cEAHYOq06CwNfnE2sC', 'ZsKMDM1aLkcCaqyvouzTxQH+XeNJ7zdsD4r7iTx7tV0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LasraKscAAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LasraKscAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqxwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpZlshwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('fdb696a797b769176cbaed3a50e4a6a8671119621f65a3f954a3bcf100c7ef0c', 10, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAkAAAABAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAAB2QViXgAAAEAxyl5gvCCDC7l0pq9b/Btd3cOUUcY9Rv0ALxVjul4EVSL1Vygr107GjDo11+YswdmlCuWf7KItU0chlogpns4L', '/baWp5e3aRdsuu06UOSmqGcRGWIfZaP5VKO88QDH7wwAAAAAAAAAyAAAAAAAAAACAAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpZlshwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpfjKlwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJOFgI4AAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAACgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqX4ypcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACThYCOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAhOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('67601a2ca212b84092a7d3c521172b67f4b93d72b726a06c540917d2ab83c1a1', 11, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBHLko6/Tbv0v/5CWHkixXnbyoU6qQ6yewZGqPHFSzNxMfud86eYGkN0j4msMCXfLAou7iKOVn0MWyzlpvYRA0B', 'Z2AaLKISuECSp9PFIRcrZ/S5PXK3JqBsVAkX0quDwaEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKDAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKDAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgooMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAALAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+QAAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('0e128647b2b93786b6b76e182dcda0173757066f8caf0523d1ba3b47fd6f720d', 12, 1, 'AAAAAMVZhNIhKfUu9GHZK4QLunU+I0pz721Ls4rUOKAXH2gzAAAAZAAAAAsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAg/K/Blr9FO/nVEGLdmCzChMYpmcQzxIhFm6NBzxznX0AAAAAHc1lAAAAAAAAAAABFx9oMwAAAEBwY9HQAR2SMPe3JPvmBBtBk2jfog0GFEFYkLNFzQNqvYl7iZitmO5FQmkKlv/NO5ZcaWBqXcHhOQpk0s2XSBQF', 'DhKGR7K5N4a2t24YLc2gFzdXBm+MrwUj0bo7R/1vcg0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvjnAAAAAsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvjnAAAAAsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+OcAAAACwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAI2Pn6cAAAACwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAACD8r8GWv0U7+dUQYt2YLMKEximZxDPEiEWbo0HPHOdfQAAAAAdzWUAAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('cd8a8e9eb53fd268d1294e228995c27f422d90783c4054e44ab0028fc1da210a', 13, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEAUtdYWyr64yv/rKPr0/vV4vYyonfsWxpxHsiYLHKJ3bm6k+ypiAByc8t0K+7bzxSLPjmjKKN5Prw7AdenlC7MB', 'zYqOnrU/0mjRKU4iiZXCf0ItkHg8QFTkSrACj8HaIQoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaoEXalRAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaoEXalRAAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqVEAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqW9asFEAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAANAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bfbd5e9457d717bcf847291a6c24b7cd8db4ff784ecd4592be30d08146c0c264', 13, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDY1TiMj+qj8+zYb2Vb60h+qWxZtFfSGwb0kvKttSFAHQhGOjIddiVQopx9LDRO6UgPmLLxFvQpIzeGnagh3vQD', 'v71elFfXF7z4RykabCS3zY20/3hOzUWSvjDQgUbAwmQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LalvWrBRAAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LalvWrBRAAAAAAAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqW9asFEAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXt1EAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAANAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('30880dd42d8e402a30d8a3527b56c1e33e18e87c46e1332ea5cfc1721fd87cfb', 14, 1, 'AAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAZAAAAA0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAAAAAAAAAX14QAAAAAAAAAAAfY7ZNwAAABAieZSSuOZqlwtyjnj5d/S0GUSGiQvy0ipPLynpl4UvO8qc7CDz3vsLROlN2g50qXirydSOdao56hvRhrEfRsGCA==', 'MIgN1C2OQCow2KNSe1bB4z4Y6HxG4TMupc/Bch/YfPsAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADgAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvjnAAAAA0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvjnAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+OcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgKcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('dbd964fcfdb336a30f21c240fffdaf73d7c75880ed1b99375c62f84e3e592570', 14, 2, 'AAAAABuTPVjtpHsbuHZY03dGErshJ5g1LMSmpsyVor+BRpKEAAAAZAAAAA0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3H//////////AAAAAAAAAAGBRpKEAAAAQGDAV/5Op2DmFUP84dmyT5G/gxn1WzgdMrkSSU7wfpu39ycq36Sg+gs2ypRjw5hxxeMUj/GVEKipcDGndei38Aw=', '29lk/P2zNqMPIcJA//2vc9fHWIDtG5k3XGL4Tj5ZJXAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADgAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACWgHEnAAAAA0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACWgHEnAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('142c988b1f67984f74a1581de9caecf499e60f1e0eed496661aa2c559238764c', 15, 1, 'AAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAZAAAAA0AAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAABVVNEAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAAF9eEAAAAAAAAAAAH2O2TcAAAAQJBUx5tWfjAwXxab9U5HOjZvBRv3u95jXbyzuqeZ/kjsyMsU0jO/g03Rf1zgect1hj4hDYGN8mW4oEot0sSTZgw=', 'FCyYix9nmE90oVgd6crs9JnmDx4O7UlmYaosVZI4dkwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAADwAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACThYCOAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADwAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACThYCOAAAAA0AAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAOAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAPAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAX14QB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('a5a9e3ca63e9cc155359c97337bcb14464cca56b230a4d0c7f27582644d16809', 16, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBhFD/bYaTZZJ3VJ9xJqXoW5eeLK0AeFaATBH92cRfx0WUTFqp6rXx47fMBUxkWYq8bAHMfYCS5XXPRg86sAGUK', 'panjymPpzBVTWclzN7yxRGTMpWsjCk0MfydYJkTRaAkAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LajaV7cGAAAAAAAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LajaV7cGAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtwYAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqEVUvgYAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('6a056189b45760c607e331c90c5a8b4cd720961df8bc8cecfd4aa388b577a6cb', 16, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvkAAAAAAAAAAABVvwF9wAAAEAxC5cl7tkjQI0cfFZTiIFDuo0SwyYnNqTUH2hxDBtm7h/vUkBG3cgwGXS87ninVkhmvdIpTWfeIeGiw7kgefUA', 'agVhibRXYMYH4zHJDFqLTNcglh34vIzs/UqjiLV3pssAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LahFVL4GAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LahFVL4GAAAAAAAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqEVUvgYAAAAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtp7BRxQYAAAAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('18bf6cce20cfbb0f9079c4b8783718949d13bd12d173a60363d2b8e3a07efead', 16, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAKAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvkAAAAAAAAAAABVvwF9wAAAEC/RVto6ytAqHpd6ZFWjwXQyXopKORz8QSvz0d8RoPrOEBgNEuAj8+kbyhS7QieOqwbiJrS0AU8YWaBQQ4zc+wL', 'GL9sziDPuw+QecS4eDcYlJ0TvRLRc6YDY9K446B+/q0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaewUcUGAAAAAAAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaewUcUGAAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtp7BRxQYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOzAYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('cdef45dd961d59375351ea7dd7ef6414ff49371a335723e84dafacea1e13665a', 17, 1, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsX//////////AAAAAAAAAAHoOTUKAAAAQIjLqcYXE8EAsH6Dx2hwPjiEfHGZ4jsMNZZc7PynNiJi9kFXjfvvLDlWizGAr2B9MFDrfDRDvjnBxKKhJifEcQM=', 'ze9F3ZYdWTdTUep91+9kFP9JNxozVyPoTa+s6h4TZloAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEQAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjnAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjnAAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAARAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d1f593eb5e14f97027bc79821fa46628c107034fba9a5acef6a9da79e051ee73', 17, 2, 'AAAAAAQI6ATBFImTS1I7Fly9YiufQ/dC4uMOetO+m/BysWXyAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsX//////////AAAAAAAAAAFysWXyAAAAQI7hbwZc1+KWfheVnYAq5TXFX9ancHJmJq0wV0c9ONIfG6U8trhIVeVoiED2eUFFmhx+bBtF9TPSvifF/mfDlQk=', '0fWT614U+XAnvHmCH6RmKMEHA0+6mlrO9qnaeeBR7nMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEQAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvjnAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvjnAAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAARAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('902b90c2322b9e6b335e7543389a7446b86e3039ebf59ec66dffb50eaec0dc85', 18, 1, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAERVFexAAAAQC9X2I3Zz1x3AQMqL4XCzePTlwnokv2BQnWGmT007oH59gai3eNu7/WVoHtW8hsgHjs1mZK709FzzRF2cbD2tQE=', 'kCuQwjIrnmszXnVDOJp0RrhuMDnr9Z7Gbf+1Dq7A3IUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAARAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAASAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('be05e4bd966d58689e1b6fae013e5aa77bde56e6acd2db9b96870e5e746a4ab7', 22, 1, 'AAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAZAAAABUAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYX//////////AAAAAAAAAAEVxY53AAAAQDMCWfC0eGNJuYIX3s5AUNLernpcHTn8O6ygq/Nw3S5vny/W42O5G4G6UsihVU1xd5bR4im2+VzQlQYQhe0jhwg=', 'vgXkvZZtWGieG2+uAT5ap3veVuas0tublocOXnRqSrcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAFgAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjnAAAABUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFgAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjnAAAABUAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWAAAAAQAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3', 18, 2, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAARFUV7EAAABALuai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==', 'ynVtFRnO2nn4ciBCsSzqe6AEw72WGttitZ+IqGf4brMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAADuaygAAAAAAQAAAAIAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('37bb79f6959c0e8e9b3d31f6c9308d8d084d9c6742cfa56ca094cfa6eae99423', 18, 3, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAARFUV7EAAABArzp9Fxxql+yoysglDjXm9+rsJeNX2GsSa7TOy3AzHOu4Y5Z8ICx52Q885gQGQWMtEP0w6yh83d6+o6kjC/WuAg==', 'N7t59pWcDo6bPTH2yTCNjQhNnGdCz6VsoJTPpurplCMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAACy0F4AAAAAAQAAAAEAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAO5rKAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAQAAAAMAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAO5rKAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAALLQXgAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('198844c8b472daacc5b717695a4ca16ac799a13fb2cf4152d19e2117ae1c56c3', 19, 1, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAQI6ATBFImTS1I7Fly9YiufQ/dC4uMOetO+m/BysWXyAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAHoOTUKAAAAQMs9vNZ518oYUMp38TakovW//DDTbs/9oPj1RAix5ElC/d7gbWaaNNJxKQR7eMNO6rB+ntGqee4WurTJgA4k2ws=', 'GYhEyLRy2qzFtxdpWkyhaseZoT+yz0FS0Z4hF64cVsMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAABAAAAAAAAAAB3NZQAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAO5rKAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAdzWUAAAAAAAAAAAAdzWUAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAAAAAAA', 'AAAAAQAAAAIAAAADAAAAEwAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjOAAAABAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEwAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjOAAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAARAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAMAAAASAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAATAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAMAAAASAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAALLQXgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAALLQXgAAAAAA7msoAAAAAAAAAAAAAAAAAQAAABMAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAlQL4tQAAAAQAAAAAwAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAO5rKAAAAAAB3NZQAAAAAAAAAAAAAAAADAAAAEgAAAAIAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAADuaygAAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABAAAAEwAAAAIAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAB3NZQAAAAAAQAAAAIAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('f08dc1fec150f276562866ce4f5272f658cf0bd9fd8c1d96a22c196be2e1b25a', 20, 1, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAAAAAAB6Dk1CgAAAEB+7jxesBKKrF343onyycjp2tiQLZiGH2ETl+9fuOqotveY2rIgvt9ng+QJ2aDP3+PnDsYEa9ZUaA+Zne2nIGgE', '8I3B/sFQ8nZWKGbOT1Jy9ljPC9n9jB2WoiwZa+LhsloAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAAQAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAO5rKAAAAAAAAAAAAO5rKAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAA', 'AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('2a6987a6930eab7e3becacf9b76ed7a06802668c1f1eb0f82f5671014b4b636a', 21, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAALAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDfpUesb4kQ/RfBx1UxqNOtZ2+4R4S0XxzggPR1C3YyhZAr/K8KyZCg4ejDTFnhu9qAh4GLZLkbBraGncT9DcYF', 'KmmHppMOq3477Kz5t27XoGgCZowfHrD4L1ZxAUtLY2oAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LacbTsvUAAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LacbTsvUAAAAAAAAAALAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy9QAAAAAAAAAAsAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpoZL0tQAAAAAAAAAAsAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588', 21, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAMAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDdJGdvdZ2S4QoXdO+Odt8ZRdeVu7mBvq7FtP9okqr98pGD/jSAraklQvaRmCyMALIMD2kG8R2KjhKvy7oIL6IB', 'lkFawdL3liGyaxVo+WP9jdbFDCCiLHQozvv+ne6GdYgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaaGS9LUAAAAAAAAAALAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaaGS9LUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpoZL0tQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2dQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d8b2508123656b1df1ee17c2767829bc22ab41959ad25e6ccc520e849516fba1', 23, 1, 'AAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAZAAAABUAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAARXFjncAAABATR48xYiKbu8AOoXFwvcvILZ0/pQkfGuwwAoIZNefr7ydIwlcuL44XPM7pJ/6jDSbqBudTNWdE2JRjuq7HI7IAA==', '2LJQgSNlax3x7hfCdngpvCKrQZWa0l5szFIOhJUW+6EAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAAAAAAAAwAAAAAAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAL68IAAAAAAQAAAAEAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAFwAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjOAAAABUAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFwAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjOAAAABUAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAMAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAAAAABcAAAACAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAwAAABYAAAABAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAH//////////AAAAAQAAAAAAAAAAAAAAAQAAABcAAAABAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('01346de1ca30ce03149d9f54945956a22f9cbed3d81f81c62bb59cf8cdd8b893', 24, 1, 'AAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAZAAAABUAAAABAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAAEeGjAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAbHWDWEAAABA0L+69D1hxpytAkX6cvPiBuO80ql8SQKZ15POVxx9wYl6mZrL+6UWGab/+6ng2M+a29E7ON+Xs46Y9MNqTh91AQ==', 'ATRt4cowzgMUnZ9UlFlWoi+cvtPYH4HGK7Wc+M3YuJMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAQAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAAAAAADAAAAAAAAAAAL68IAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAABAAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAAAAAAF9eEAAAAAAQAAAAEAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAGAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvjnAAAABUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvjnAAAABUAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAMAAAAXAAAAAgAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAAAAAADAAAAAAAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAvrwgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAAAAAAMAAAADAAAAFwAAAAEAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAAf/////////8AAAABAAAAAQAAAAAL68IAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAQAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAABcAAAAAAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAlQL4zgAAAAVAAAAAgAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAL68IAAAAAAAAAAAAAAAABAAAAGAAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACSCAhOAAAABUAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAAgAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAEAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAAX14QAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAMAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+OcAAAAFQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJf96WcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAX14QAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('5065cd7c97cfb6fbf7da8493beed47ed2c7efb3b00b77a4c92692ed487fb86a4', 25, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAANAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBthwT3JCg5IZkKRNK3pHBa/eG8zq8Af9gFPWlYvEdRo6jzA5D9fYOcDpKD3dEAuPLNNAHj9tNbZUJA3rwxN94B', 'UGXNfJfPtvv32oSTvu1H7Sx++zsAt3pMkmku1If7hqQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAGQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaXxSNm7AAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaXxSNm7AAAAAAAAAANAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2bsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4LsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('a76e0260f6b83c6ea93f545d17de721c079dc31e81ee5edc41f159ec5fb48443', 26, 1, 'AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAQAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBqzCYDuLYn/jXhfEVxEGigMCJGoOBCK92lUb3Um15PgwSJ63tNl+FpH8+y5c+mCs/rzcvdyo9uXdodd4LXWiQg=', 'p24CYPa4PG6pP1RdF95yHAedwx6B7l7cQfFZ7F+0hEMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAAAAABQAAAAFVU0QAAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAB3NZQAAAAAAQAAAAEAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('92a654c76966ac61acc9df0b75f91cbde3b551c9e9766730827af42d1e247cc3', 26, 2, 'AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAACAAAAAAAAAAAAAAABAAAAAAAAAAQAAAAAAAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBbE9T7oBKoN0/S3AV7GoSRe+xT79SlWNCYEtL1RPExL8FLhw5EDsXLoAvIBbBvHIr9NKcPtWDyhcHlIuaZKIg8=', 'kqZUx2lmrGGsyd8LdfkcveO1Ucnpdmcwgnr0LR4kfMMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAAAAABgAAAAAAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAB3NZQAAAAAAQAAAAEAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAMAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAdzWUAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAYAAAAAAAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('700fa44bb40e6ad2c5888656cd2e7b8d86de3d3557b653ae6874466175d64927', 27, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAOAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBq3GPDVeRPfwqtW45GZNiUdQ9j6E9Nsz/lMYWcWDWGCZADSsEiEoXar1HWFK6drptsGEl9P6I9f7C2GBKb4YQM', 'cA+kS7QOatLFiIZWzS57jYbePTVXtlOuaHRGYXXWSScAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAGwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaVcReCiAAAAAAAAAANAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaVcReCiAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4KIAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC56IAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+QAAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('fe3707fbd5c844395c598f31dc719c61218d4cea4e8dddadb6733f4866089100', 28, 1, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEA/GIgE9sYPGwbCiIdLdhoEu25CyB0ZAcmjQonQItu6SE0gaSBVT/le355A/dw1NPaoXY9P/u0ou9D7h5Vb1fcK', '/jcH+9XIRDlcWY8x3HGcYSGNTOpOjd2ttnM/SGYIkQAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('345ef7f85c6ea297e3f994feef279b63812628681bd173a1f615185a4368e482', 28, 2, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEDYxq3zpaFIC2JcuJUbrQ3MFXzqvu+5G7XUi4NnHlfbLutn76ylQcjuwLgbUG2lqcQfl75doPUZyurKtFP1rkMO', 'NF73+Fxuopfj+ZT+7yebY4EmKGgb0XOh9hUYWkNo5IIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2a14735d7b05109359444acdd87e7fe92c98e9295d2ba61b05e25d1f7ee10fd3', 28, 3, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAKuQ1exMu8hdf8dOPeULX2DG7DZx5WWIUFHXJMWGG9KmVrQoZDt2S6a/1uYEVJnvvY/EoJM5RpVjh2ZCs30VYA', 'KhRzXXsFEJNZRErN2H5/6SyY6SldK6YbBeJdH37hD9MAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4f9598206ab17cf27b5c3eb9e906d63ebee2626654112eabdd2bce7bf12cccf2', 28, 4, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCeMFAAAABAAd6MzHDjUdRtHozzDnD3jJA+uRDCar3PQtuH/43pnROzk1HkovJPQ1YyzcpOb/NeuU/LKNzseL0PJNasVX1lAQ==', 'T5WYIGqxfPJ7XD656QbWPr7iYmZUES6r3SvOe/EszPIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('852ba25e0e4aa149a22dc193bcb645ae9eba23e7f7432707f3b910474e9b6a5b', 28, 5, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAnFzc6kqweyIL4TzIDbr+8GUOGGs1W5jcX5iSNw4DeonzQARlejYJ9NOn/XkrcoC9Hvd8hc5lNx+1h991GxJUJ', 'hSuiXg5KoUmiLcGTvLZFrp66I+f3QycH87kQR06balsAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('a832ff67085cb9eb6f1c4b740f6e033ba9b508af725fbf203469729a64a199ff', 41, 2, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAADAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABRVVSAAAAAAEAAAAAAAAAAUpI8/gAAABA1Qe8ngwANz4fLqYChwRjR5xng6cIqU5WBtjkZgF4ugVhi8J6kTpACvnvXso3IVym6Rfd6JdQW8QcLkFTX1MGCg==', 'qDL/ZwhcuetvHEt0D24DO6m1CK9yX78gNGlymmShmf8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAADAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('8ccc0c28c3e99a63cc59bad7dec3f5c56eb3942c548ecd40bc39c509d6b081d4', 28, 6, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAC2V4YW1wbGUuY29tAAAAAAAAAAAAAAAAATCeMFAAAABAkID6CkBHP9eovLQXkMQJ7QkE6NWlmdKGmLxaiI1YaVKZaKJxz5P85x+6wzpYxxbs6Bd2l4qxVjS7Q36DwRiqBA==', 'jMwMKMPpmmPMWbrX3sP1xW6zlCxUjs1AvDnFCdawgdQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('83201014880073f8eff6f21ae76e51c2c4faf533e550ecd3c2205b48a092960a', 28, 7, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAATCeMFAAAABAtYtlsqMReQo1UoU2GYjb3h52wEKvnouCSO6LQO1xm/ArhtQO/sX5q35St8BjaYWEiFnp+SQj2FZC89OswCldAw==', 'gyAQFIgAc/jv9vIa525RwsT69TPlUOzTwiBbSKCSlgoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAHAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('69f64ae0f809b08996c1f394ee795001a40eee69adb675ab63bfd1932d3aafb2', 29, 1, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCeMFAAAABAi69qDHclVS9A8GAaqyk6oIxiMC2KXXEneFijfxH5VyLGIQZNAxOOcCPpIalU6P1pYRX3K4OlKHZ4hIdxJzD6BQ==', 'afZK4PgJsImWwfOU7nlQAaQO7mmttnWrY7/Rky06r7IAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHQAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvg4AAAABsAAAAHAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB0AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4OAAAAAbAAAACAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB0AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4OAAAAAbAAAACAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAdAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+DgAAAAGwAAAAgAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('c3cd47a311e025446f72c50426b5b5444e5261431fc5760e8e57467c87cd49fc', 30, 1, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAATCeMFAAAABA7ZMKq80ucQSt+55q+6VQrG3Hrv6zHtOLwkfAxxsZdYPIuk7xZsgbyhOCVXjheOQ9ygAW1vtybdXG41AxSFRtAg==', 'w81HoxHgJURvcsUEJrW1RE5SYUMfxXYOjldGfIfNSfwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHgAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgfAAAABsAAAAIAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB4AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4HwAAAAbAAAACQAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB4AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4HwAAAAbAAAACQAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAeAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+B8AAAAGwAAAAkAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('299dc6631d585a55ae3602f660ec5b5a0088d24a14b344c72eccc2a62d9a8938', 31, 1, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAKAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAATCeMFAAAABA0wriernSr+5P2QCeon1uj5mrOLNTOrPYPPi5ricLug/nreEUhsgS/k3lA9JGpVbd+tacMEKmXKmFxHCEMjWPBg==', 'KZ3GYx1YWlWuNgL2YOxbWgCI0koUs0THLszCpi2aiTgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAHwAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgGAAAABsAAAAJAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB8AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4BgAAAAbAAAACgAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB8AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4BgAAAAbAAAACgAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAfAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+AYAAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('1d7833c4faab08e62609acf3714d1babe27621a2b328edf37465e99aaf389cab', 51, 1, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDEyMzQAAAAAAAAAAS6Z+xkAAABAIW4yrFdk66fgDDir7YFATEd2llOubzx/iaJcM2wkF3ouqJQN+Aziy2rVtK5AoyphokiwsYXvHS6UF9MhdnUADQ==', 'HXgzxPqrCOYmCazzcU0bq+J2IaKzKO3zdGXpmq84nKsAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMwAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAviDAAAADAAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMwAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAviDAAAADAAAAAFAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAAAAAABAAAAMwAAAAMAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAAFbmFtZTEAAAAAAAAEMTIzNAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('bb9d6654111fae501594400dc901c70d47489a67163d2a34f9b3e32a921a50dc', 32, 1, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAALAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAFytUxjxN4bnJMrEJkSprnES9iGpOxAsNOFYrTP/xtGVk/PZ2oThUW+/hLRIk+hYYEgF21Gf58N/abJKFpqlsI', 'u51mVBEfrlAVlEANyQHHDUdImmcWPSo0+bPjKpIaUNwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAsAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('6b38cdd5c17df2013d5a5e211c4b32218b6be91025316b1aab28bc12316615d5', 32, 2, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAMAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAAAAAAAAAAAATCeMFAAAABAOb0qGWnk1WrSUXS6iQFocaIOY/BDmgG1zTmlPyg0boSid3jTBK3z9U8+IPGAOELNLgkQHtgGYFgFGMio1xY+BQ==', 'azjN1cF98gE9Wl4hHEsyIYtr6RAlMWsaqyi8EjFmFdUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAALAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAwAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('6d78f17fafa2317d6af679e1e5420f351207ff61cdff21c600ea8f85155b3ea1', 33, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAPAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEC+mgKIzZqflQIKIqWn9LrciuyEx7XPfXGUhvyQ3sIQBnGdOWhkOt57UU/75LtUy4recT+jrY2cHKZj33puue8F', 'bXjxf6+iMX1q9nnh5UIPNRIH/2HN/yHGAOqPhRVbPqEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAIQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaTHQueJAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaTHQueJAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC54kAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7okAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAhAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+QAAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('a05daae230b1f743474e83ab6d4817df1f3f77661a7d815f7620cee2a9809480', 34, 1, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF93//////////AAAAAAAAAAEctWW1AAAAQBYUnV3I1O35EAyay0msjg3MzZfanCtvalKGG+94pe6RxgE/kCk2kTT9HXgXjbraq//Q/0vJ0AoCAXSeT18Ujgk=', 'oF2q4jCx90NHToOrbUgX3x8/d2YafYFfdiDO4qmAlIAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAIgAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjnAAAACEAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIgAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjnAAAACEAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4e2442fe2e8dd8c686570c9f537acb2f50153a9883f8d199b6f4701eb289b3a0', 35, 1, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAA7msoAAAAAAAAAAAEctWW1AAAAQNugq+B30pdbzvVVGz9RO3+DMeRdWqc/Xsd2NYdg6NBu7esvOdTWQ3nvoBEJyeGz8EE9zRQiSiqorwHlm+AGfwI=', 'TiRC/i6N2MaGVwyfU3rLL1AVOpiD+NGZtvRwHrKJs6AAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAIwAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjOAAAACEAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIwAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjOAAAACEAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAiAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAjAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('44cb6c8ed4dbec542af1aad23001dd9d678cf19c8c461a653e762a7253eded82', 36, 1, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAA7msoAAAAAAAAAAAEctWW1AAAAQO+eTIPXUZk+GAq7O6H8d1/WT5buo0apjLhGgtBeSyl37UV7LCpZfCn6DYVc7lQOVNWhBc7KDA7Ne83AR41kYAk=', 'RMtsjtTb7FQq8arSMAHdnWeM8ZyMRhplPnYqclPt7YIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAJAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvi1AAAACEAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvi1AAAACEAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAjAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAAAAAAEAAAAkAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('52388a98e4e36c17749a94374270cc65bdb7271cb51277f095aaa8f1ca9d322c', 37, 1, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAAAAAAAAAAEctWW1AAAAQM5SCoW10EJoKBBwwMu0Vw+f+bQ0GjQ9FO6w3l9Q/FIctm87248t9jXTbl0Rd4NgGcom0yoGxgcJiERwZGBMXQc=', 'UjiKmOTjbBd0mpQ3QnDMZb23Jxy1Enfwlaqo8cqdMiwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAkAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAAAAAAIAAAABAAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAAVVTRAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('afeb8080522eba71ca328225bbcf731029edcfa254c827c45be580bae95c7231', 38, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAQAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDnzvNgEYB1u3BGTHFDlIWnk0GOq7BMpfcyewJRsJK9lT4HTMEwMQ2jSJyrWmB7xdBxHKaNMXQaAIx6CShLXpQH', 'r+uAgFIuunHKMoIlu89zECntz6JUyCfEW+WAuulccjEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaQyP+5XAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaQyP+5XAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7lcAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gto5089VcAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2354df802111418a999e31c2964d16b8efe8e492b7d74de54939825190e1041f', 38, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAARAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDD6WvAYL1wilsd7zYDJt0iFO/lppQ6GJJn/A8UJl9jTjMNOjuQPBtA7fSxR5KT0BZLbtQy8qFlys0I6fTe/cwO', 'I1TfgCERQYqZnjHClk0WuO/o5JK3103lSTmCUZDhBB8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaOdPPVXAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaOdPPVXAAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gto5089VcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/FcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('11705f94cd65d7b673a124a85ce368c80f8458ffaedff719304d8f849535b4e0', 39, 1, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABSkjz+AAAAECyjDa1e+jtXukTrHluO7x0Mx7Wj4mRoM4S5UAFmRV+2rVoxjMwqFJhtYnEAUV19+C5ycp5jOLLpWxrCeRKJQUG', 'EXBflM1l17ZzoSSoXONoyA+EWP+u3/cZME2PhJU1tOAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAJwAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvjnAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJwAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvjnAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da', 40, 1, 'AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQKN8LftAafeoAGmvpsEokqm47jAuqw4g1UWjmL0j6QPm1jxoalzDwDS3W+N2HOHdjSJlEQaTxGBfQKHhr6nNsAA=', 'b6RntT9Thtd601wlAu0s092LRgpb4itrKBi4G80+0toAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('0bcb67aa365446fd244fecff3a0c397f81f3a9b13428688965e776d447c0b1ea', 40, 2, 'AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQMPVgYf+w09depDSxMcJnjVZHA2FlkBmhPmi0N66FuhAzTekWcCOMdCI0cUc+xJhywLXSMiKA6wP6K94NRlFlQE=', 'C8tnqjZURv0kT+z/Ogw5f4HzqbE0KGiJZed21EfAseoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50', 41, 1, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAUpI8/gAAABA6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==', 'bS4w/VdJK/LisTLhvJGlSKNpGJvr936ys9gpEhqdLFAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d67cfb271a889e7854ffd61b08eacde76d56e758466fc37a8eec2d3a40ef8b14', 42, 1, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABRVVSAAAAAAAAAAAAAAAAAUpI8/gAAABAEPKcQmATGpevrtlAcZnNI/GjfLLQEp9aODGGRFV+2C4UO8dU+UAMTkCSXQLD+xPaRQxzw93ScEok6GzYCtt7Bg==', '1nz7JxqInnhU/9YbCOrN521W51hGb8N6juwtOkDvixQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAcAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKgAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvicAAAACYAAAADAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKgAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvicAAAACYAAAAEAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAqAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('945b6171de747ab323b3cda52290933df39edd7061f6e260762663efc51bccb0', 43, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAASAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEBFbS2c5rrYNGslNVslTHH8j8x0ggew1eHHOUTNajMPy8GYn52RSwRncwwvv1ejEfA+g/mTXMpXrBO847C46KoA', 'lFthcd50erMjs82lIpCTPfOe3XBh9uJgdiZj78UbzLAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAKwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfw+AAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfw+AAAAAAAAAASAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/D4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc', 44, 1, 'AAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+HvAAAAZAAAACsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAYrj4e8AAABA3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==', '4Hc9B6uiPRHmoGsCFoIpS+H58gKikmgnAiU5Zizix/wAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAgAAAAAAAAAAlQL45wAAAAA', 'AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv'); +INSERT INTO txhistory VALUES ('5b42c77042f04bf716659a05e7ca3f4703af038a7da75b10b8538707c9ff172f', 45, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAATAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxQAAAAAAAAAAAABVvwF9wAAAECGClRePcAExQ/WKroo3/3dfchP/yI8TRDrrjt/chZ83ULiTc54l5wcz1AkbLa6CAapdSGpUWXk5ksTqDXLn4AA', 'W0LHcELwS/cWZZoF58o/RwOvA4p9p1sQuFOHB8n/Fy8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAALQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfwMAAAAAAAAAASAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfwMAAAAAAAAAATAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7FAAAAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('7207de5b75243e0b062c3833f587036b7e9f64453be49ff50f3f3fdc7516ec6b', 46, 1, 'AAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAZAAAAC0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMYK3JwAAAEAOkGOPTOBDSQ7nW2Zn+bls2PDUebk2/k3/gqHKQ8eYOFsD6nBeEvyMD858vo5BabjQwB9injABIM8esDh7bEkC', 'cgfeW3UkPgsGLDgz9YcDa36fZEU75J/1Dz8/3HUW7GsAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d24f486bd722fd1875b843839e880bdeea324e25db706a26af5e4daa8c5071eb', 46, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAUAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVvwF9wAAAEBYI0TMQVWPvnC2KPbDph9Myz5UMuBRIYt2YQdtlPYC4UHamYnHsMghpIMfaS7MWdHuGY81+FBozOsS+/HGohQD', '0k9Ia9ci/Rh1uEODnogL3uoyTiXbcGomr15NqoxQcesAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAALgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvzAAAAAAAAAATAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvzAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c8a28fb25d4784f37a7a078e1feef0eb30ca64e994734625ac4ea067cc621464', 52, 1, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDAwMDAAAAAAAAAAAS6Z+xkAAABA3ExJNH79wGSRYZerPP1zMYlepMsuhoJF5vHn2gCsHmDpWfgO8VKC3BRImO+ne9spUXlVHMjEuhOHoPhl1hrMCg==', 'yKKPsl1HhPN6egeOH+7w6zDKZOmUc0YlrE6gZ8xiFGQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAANAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvhqAAAADAAAAAFAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvhqAAAADAAAAAGAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAzAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAAAAAABAAAANAAAAAMAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAAFbmFtZTEAAAAAAAAEMDAwMAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256', 47, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAVAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAVb8BfcAAABABUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==', '6pPv2ML05FwDGMaeyVhiOg5DdPQNVp7sEk1DyKVNYlYAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAgAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAisSMJjBcAAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAisSN9x4sAAAAA', 'AAAAAQAAAAIAAAADAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAUAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGraHekccnAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('eb8586c9176c4cf2e864b2521948a972db5274de24673669463e0c7824cee056', 48, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAWAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvkAAAAAAAAAAABVvwF9wAAAECAMOn6G4jusgpfSoHwntHQkYIDxI/VnyH/qIi+bdMWzi1T6WlwnO+yITgm2+mOaWc6zVuxiLjHllzBeQ/xKvQN', '64WGyRdsTPLoZLJSGUipcttSdN4kZzZpRj4MeCTO4FYAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZf8fofYAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZf8fofYAAAAAAAAAAWAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h9gAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jtgAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+QAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('9fff61916716fb2550043fac968ac6c13802af5176a10fc29108fcfc445ef513', 49, 1, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDEyMzQAAAAAAAAAAS6Z+xkAAABAxKiHYYNLJiW3r5+kCJm8ucaoV7BcrEnQXFb3s1RyRyUbAkDlaCvE+RKwMZoNUfbkQUGrouyVKy1ZpUeccByqDg==', 'n/9hkWcW+yVQBD+slorGwTgCr1F2oQ/CkQj8/ERe9RMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99', 49, 2, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAABAAAABDU2NzgAAAAAAAAAAS6Z+xkAAABAjxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==', '5GCRgHUedwJGaohFhX30Pk0VTshLa61izlB/4S8dr5kAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('48415cd0fda9bc9aeb1f0b419bfb2997f7a2aa1b1ef2e51a0602c61104fc23cc', 49, 3, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZSAAAAAAAAABAAAAD2l0cyBnb3Qgc3BhY2VzIQAAAAAAAAAAAS6Z+xkAAABANmYginYhX+6VAsl1JumfxkB57y2LHraWDUkR+KDxWW8l5pfTViLxx7J85KrOV0qNCY4RfasgqxF0FC3ErYceCQ==', 'SEFc0P2pvJrrHwtBm/spl/eiqhse8uUaBgLGEQT8I8wAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lIAAAAAAAAA9pdHMgZ290IHNwYWNlcyEAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('616c609047ef8f9ca908a47a47aa4bb018449c569549ad2ca60590aab74267e8', 50, 1, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAAAAAAAAAAAAAEumfsZAAAAQAYRZNPhJCTwjJgAJ9beE3ZO/H3kYJhYmV1pCmy7c8Zr2sKdKOmaLn4fmA5qaL+lQMKwOShtjwkZ8JHxPUd8GAk=', 'YWxgkEfvj5ypCKR6R6pLsBhEnFaVSa0spgWQqrdCZ+gAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAoAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAMgAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvicAAAADAAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMgAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvicAAAADAAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAAAAAACAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAMAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21', 61, 1, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8A', 'vBG1xB3nkTaf2F+hzPAcNcIN9fmP8vddAurWG/1SDiEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgDAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA=='); +INSERT INTO txhistory VALUES ('df5f0e8b3b533dd9cda0ff7540bef3e9e19369060f8a4b0414b0e3c1b4315b1c', 53, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAXAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDyHwhW9GXQVXG1qibbeqSjxYzhv5IC08K2vSkxzYTwJykvQ8l0+e4M4h2guoK89s8HUfIqIOzDmoGsNTaLcYUG', '318OiztTPdnNoP91QL7z6eGTaQYPiksEFLDjwbQxWxwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAANQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZdne46/AAAAAAAAAAWAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZdne46/AAAAAAAAAAXAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jr8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lb8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJUC+QAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('85bbd2b558563518a38e9b749bd4b8ced60b9fbbb7a6b283e15ae98548302ac4', 54, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAyAAAAAAAAAAYAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAAAAAAAAAX14QAAAAABAAAAAASXk5ikdaq/gmaCMowHxsayF2f7SNVSHdjn2ZEaMA8+AAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAACVvwF9wAAAEDRRWwMrdLrhnl+FIP+71tTHB5rlzCsPVyGnR3scvID9NmIL3LZEo992uTvDI9QLys5bC2yRc3WYR0vFiZRs40IGjAPPgAAAEDXbXWVdzmN6NWBjYU5OvB33WTUaa2wDZX3RmFTZQQ/+7JvPdblMtNCxo8IOYePQg90RajV9rB+k8P+SEpPHCUH', 'hbvStVhWNRijjpt0m9S4ztYLn7u3prKD4VrphUgwKsQAAAAAAAAAyAAAAAAAAAACAAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAXAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAA1AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJUC+QAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJaAcUAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltD7HU0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbQ+x1NAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAANgAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACWgHFAAAAADUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACVAvkAAAAADUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('5bbbedfb52efd1d5d973e22540044a27b8115772314293e3ba8b1fb12e63ca2e', 55, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAZAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBCMMjX9xO3XKpQ6uS/U1BqdzRhSBYQ35ivmZxPBgfqQsTDma1BzOsq/bmHJ4P+fkYJRJUdZZazXJM2i4mF7nUH', 'W7vt+1Lv0dXZc+IlQARKJ7gRV3IxQpPjuosfsS5jyi4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAANwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJV0AAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJV0AAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lXQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c', 56, 1, 'AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==', 'KoBXEsbRD550uwzPVK6SorSx5YZFH+gTOiQzgW9rVnwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA=='); +INSERT INTO txhistory VALUES ('0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2', 57, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F', 'DlvTMikeMJjkmIbfLNubU2ml+eCplz8NnhqUicZYG6IAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('829d53f2dceebe10af8007564b0aefde819b95734ad431df84270651e7ed8a90', 58, 1, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAADkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AAAAAAAAAAABHK0SlAAAAEDq0JVhKNIq9ag0sR+R/cv3d9tEuaYEm2BazIzILRdGj9alaVMZBhxoJ3ZIpP3rraCJzyoKZO+p5HBVe10a2+UG', 'gp1T8tzuvhCvgAdWSwrv3oGblXNK1DHfhCcGUeftipAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('74b62d52311ea3f47359f74790595343f976afa4fd306caaefee5efdbbb104ff', 59, 1, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgBAAAAAAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAZAAAAAAAAAABHK0SlAAAAEAOrvZSFnT3JvmT1P5lJ/lggpZe4nxH5WvJ9K/SLOD49wfqq84suncoZIn3IAf0PExMw3etu5FiDVw3c3jYYhAL', 'dLYtUjEeo/RzWfdHkFlTQ/l2r6T9MGyq7+5e/buxBP8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAOwAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjOAAAAEXZZLgAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOwAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjOAAAAEXZZLgBAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA=='); +INSERT INTO txhistory VALUES ('c8132b95c0063cafd20b26d27f06c12e688609d2d9d3724b840821e861870b8e', 60, 1, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgCAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AQAAAAAAAAABHK0SlAAAAEC4H7TDntOUXDMg4MfoCPlbLRQZH7VwNpUHMvtnRWqWIiY/qnYYu0bvgYUVtoFOOeqElRKLYqtOW3Fz9iKl0WQJ', 'yBMrlcAGPK/SCybSfwbBLmiGCdLZ03JLhAgh6GGHC44AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgBAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA=='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/kahuna-horizon.sql b/services/horizon/internal/test/scenarios/kahuna-horizon.sql new file mode 100644 index 0000000000..687c299b59 --- /dev/null +++ b/services/horizon/internal/test/scenarios/kahuna-horizon.sql @@ -0,0 +1,2117 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trust_lines_by_type_code_issuer; +DROP INDEX IF EXISTS public.trust_lines_by_issuer; +DROP INDEX IF EXISTS public.trust_lines_by_account_id; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.signers_by_account; +DROP INDEX IF EXISTS public.offers_by_selling_asset; +DROP INDEX IF EXISTS public.offers_by_seller; +DROP INDEX IF EXISTS public.offers_by_last_modified_ledger; +DROP INDEX IF EXISTS public.offers_by_buying_asset; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.exp_asset_stats_by_issuer; +DROP INDEX IF EXISTS public.exp_asset_stats_by_code; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +DROP INDEX IF EXISTS public.accounts_inflation_destination; +DROP INDEX IF EXISTS public.accounts_home_domain; +DROP INDEX IF EXISTS public.accounts_data_account_id_name; +ALTER TABLE IF EXISTS ONLY public.trust_lines DROP CONSTRAINT IF EXISTS trust_lines_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.exp_asset_stats DROP CONSTRAINT IF EXISTS exp_asset_stats_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts_signers DROP CONSTRAINT IF EXISTS accounts_signers_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts_data DROP CONSTRAINT IF EXISTS accounts_data_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.trust_lines; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.key_value_store; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP INDEX IF EXISTS public.htrd_agg_open_ledger_toid; +DROP INDEX IF EXISTS public.htrd_agg_bucket_lookup; +DROP TABLE IF EXISTS public.history_trades_60000; +DROP FUNCTION IF EXISTS public.to_millis(timestamp with time zone, numeric); +DROP FUNCTION IF EXISTS public.to_millis(timestamp without time zone, numeric); +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.exp_asset_stats; +DROP TABLE IF EXISTS public.asset_stats; +DROP TABLE IF EXISTS public.accounts_signers; +DROP TABLE IF EXISTS public.accounts_data; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.history_operation_liquidity_pools; +DROP TABLE IF EXISTS public.history_transaction_liquidity_pools; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + account_id character varying(56) NOT NULL, + balance bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + sequence_number bigint NOT NULL, + num_subentries integer NOT NULL, + inflation_destination character varying(56) NOT NULL, + flags integer NOT NULL, + home_domain character varying(32) NOT NULL, + master_weight smallint NOT NULL, + threshold_low smallint NOT NULL, + threshold_medium smallint NOT NULL, + threshold_high smallint NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: accounts_data; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts_data ( + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + name character varying(64) NOT NULL, + value character varying(90) NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: accounts_signers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts_signers ( + account_id character varying(64) NOT NULL, + signer character varying(64) NOT NULL, + weight integer NOT NULL, + sponsor character varying(56) +); + +CREATE INDEX accounts_signers_by_sponsor ON accounts_signers USING BTREE(sponsor); + + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: exp_asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE exp_asset_stats ( + asset_type integer NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL, + amount text NOT NULL, + num_accounts integer NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- history_claimable_balances (manually added) +CREATE SEQUENCE history_claimable_balances_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_claimable_balances ( + id bigint NOT NULL DEFAULT nextval('history_claimable_balances_id_seq'::regclass), + claimable_balance_id text NOT NULL +); + +CREATE UNIQUE INDEX "index_history_claimable_balances_on_id" ON history_claimable_balances USING btree (id); +CREATE UNIQUE INDEX "index_history_claimable_balances_on_claimable_balance_id" ON history_claimable_balances USING btree (claimable_balance_id); + +CREATE TABLE history_operation_claimable_balances ( + history_operation_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_operation_claimable_balances_on_ids" ON history_operation_claimable_balances USING btree (history_operation_id , history_claimable_balance_id); +CREATE INDEX "index_history_operation_claimable_balances_on_operation_id" ON history_operation_claimable_balances USING btree (history_operation_id); + +CREATE TABLE history_transaction_claimable_balances ( + history_transaction_id bigint NOT NULL, + history_claimable_balance_id bigint NOT NULL +); + +CREATE UNIQUE INDEX "index_history_transaction_claimable_balances_on_ids" ON history_transaction_claimable_balances USING btree (history_transaction_id , history_claimable_balance_id); +CREATE INDEX "index_history_transaction_claimable_balances_on_transaction_id" ON history_transaction_claimable_balances USING btree (history_transaction_id); + + +INSERT INTO history_claimable_balances VALUES (1, '00000000178826fbfe339e1f5c53417c6fedfe2c05e8bec14303143ec46b38981b09c3f9'); +SELECT pg_catalog.setval('history_claimable_balances_id_seq', 1, true); +-- The operations/transactions are going to be unrelated to claimable balances, but it doesn't matter for testing +INSERT INTO history_operation_claimable_balances VALUES (12884905985, 1); +INSERT INTO history_operation_claimable_balances VALUES (8589938689, 1); +INSERT INTO history_transaction_claimable_balances VALUES (12884905984, 1); +INSERT INTO history_transaction_claimable_balances VALUES (8589938688, 1); + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer, + tx_set_operation_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount >= 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount >= 0)) +); + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades_60000 ( + timestamp bigint not null, + base_asset_id bigint not null, + counter_asset_id bigint not null, + count integer not null, + base_volume numeric not null, + counter_volume numeric not null, + avg numeric not null, + high_n numeric not null, + high_d numeric not null, + low_n numeric not null, + low_d numeric not null, + open_ledger_toid bigint not null, + open_n numeric not null, + open_d numeric not null, + close_ledger_toid bigint not null, + close_n numeric not null, + close_d numeric not null, + + PRIMARY KEY(base_asset_id, counter_asset_id, timestamp) +); + +CREATE OR REPLACE FUNCTION to_millis(t timestamp without time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN div(cast((extract(epoch from t) * 1000 ) as bigint), trun)*trun; + END; +$$ LANGUAGE plpgsql IMMUTABLE; + +CREATE OR REPLACE FUNCTION to_millis(t timestamp with time zone, trun numeric DEFAULT 1) + RETURNS bigint AS $$ + BEGIN + RETURN to_millis(t::timestamp, trun); + END; +$$ LANGUAGE plpgsql IMMUTABLE; + +CREATE INDEX htrd_agg_bucket_lookup ON history_trades + USING btree (to_millis(ledger_closed_at, '60000'::numeric)); + +CREATE INDEX htrd_agg_open_ledger_toid ON history_trades_60000 USING btree (open_ledger_toid); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged bigint, + inner_transaction_hash character varying(64), + fee_account character varying(64), + inner_signatures character varying(96)[], + new_max_fee bigint +); + +-- +-- Name: key_value_store; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + seller_id character varying(56) NOT NULL, + offer_id bigint NOT NULL, + selling_asset text NOT NULL, + buying_asset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: trust_lines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trust_lines ( + ledger_key character varying(150) NOT NULL, + account_id character varying(56) NOT NULL, + asset_type integer NOT NULL, + asset_issuer character varying(56) NOT NULL, + asset_code character varying(12) NOT NULL, + balance bigint NOT NULL, + trust_line_limit bigint NOT NULL, + buying_liabilities bigint NOT NULL, + selling_liabilities bigint NOT NULL, + flags integer NOT NULL, + last_modified_ledger integer NOT NULL +); + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts_data; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts_signers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO asset_stats VALUES (1, '200000000', 1, 0, ''); +INSERT INTO asset_stats VALUES (2, '0', 1, 0, ''); +INSERT INTO asset_stats VALUES (3, '0', 0, 3, ''); +INSERT INTO asset_stats VALUES (4, '3000000000', 1, 0, ''); +INSERT INTO asset_stats VALUES (5, '0', 1, 3, ''); +INSERT INTO asset_stats VALUES (6, '100000000', 1, 0, ''); + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-10-31 14:19:49.03833+01'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-10-31 14:19:49.04267+01'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-10-31 14:19:49.045926+01'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-10-31 14:19:49.054147+01'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-10-31 14:19:49.061804+01'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-10-31 14:19:49.067093+01'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-10-31 14:19:49.081047+01'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-10-31 14:19:49.085128+01'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-10-31 14:19:49.089574+01'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-10-31 14:19:49.092366+01'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-10-31 14:19:49.095671+01'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-10-31 14:19:49.099289+01'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-10-31 14:19:49.105961+01'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-10-31 14:19:49.111757+01'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-10-31 14:19:49.113736+01'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-10-31 14:19:49.115578+01'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-10-31 14:19:49.116928+01'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-10-31 14:19:49.118562+01'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-31 14:19:49.123835+01'); +INSERT INTO gorp_migrations VALUES ('19_offers.sql', '2019-10-31 14:19:49.133107+01'); +INSERT INTO gorp_migrations VALUES ('20_account_for_signer_index.sql', '2019-10-31 14:19:49.135499+01'); +INSERT INTO gorp_migrations VALUES ('21_trades_remove_zero_amount_constraints.sql', '2019-10-31 14:19:49.138031+01'); +INSERT INTO gorp_migrations VALUES ('22_trust_lines.sql', '2019-10-31 14:19:49.144708+01'); +INSERT INTO gorp_migrations VALUES ('23_exp_asset_stats.sql', '2019-10-31 14:19:49.15222+01'); +INSERT INTO gorp_migrations VALUES ('24_accounts.sql', '2019-10-31 14:19:49.160844+01'); +INSERT INTO gorp_migrations VALUES ('25_expingest_rename_columns.sql', '2019-10-31 14:19:49.163717+01'); +INSERT INTO gorp_migrations VALUES ('33_remove_unused.sql', '2019-11-30 10:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('34_fee_bump_transactions.sql', '2019-11-30 11:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('35_drop_participant_id.sql', '2019-11-30 14:19:49.163728+01'); +INSERT INTO gorp_migrations VALUES ('37_add_tx_set_operation_count_to_ledgers.sql', '2019-11-30 12:19:49.163728+01'); +INSERT INTO gorp_migrations VALUES ('41_add_sponsor_to_state_tables.sql', '2019-11-30 13:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('45_add_claimable_balances_history.sql', '2019-11-30 14:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('46_add_muxed_accounts.sql', '2019-12-30 14:19:49.163718+01'); +INSERT INTO gorp_migrations VALUES ('47_precompute_trade_aggregations.sql', '2019-12-30 14:19:49.163719+01'); +INSERT INTO gorp_migrations VALUES ('48_rebuild_trade_aggregations.sql', '2021-12-02 01:33:33.428419+00'); +INSERT INTO gorp_migrations VALUES ('49_add_brin_index_trade_aggregations.sql', '2021-12-02 01:33:33.43274+00'); +INSERT INTO gorp_migrations VALUES ('50_liquidity_pools.sql', '2021-12-02 01:33:33.471893+00'); +INSERT INTO gorp_migrations VALUES ('51_remove_ht_unused_indexes.sql', '2021-12-02 01:33:33.47903+00'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN'); +INSERT INTO history_accounts VALUES (2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_accounts VALUES (3, 'GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y'); +INSERT INTO history_accounts VALUES (4, 'GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X'); +INSERT INTO history_accounts VALUES (5, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_accounts VALUES (6, 'GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS'); +INSERT INTO history_accounts VALUES (7, 'GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ'); +INSERT INTO history_accounts VALUES (8, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_accounts VALUES (9, 'GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG'); +INSERT INTO history_accounts VALUES (10, 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG'); +INSERT INTO history_accounts VALUES (11, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_accounts VALUES (12, 'GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q'); +INSERT INTO history_accounts VALUES (13, 'GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC'); +INSERT INTO history_accounts VALUES (14, 'GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD'); +INSERT INTO history_accounts VALUES (15, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_accounts VALUES (16, 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD'); +INSERT INTO history_accounts VALUES (17, 'GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP'); +INSERT INTO history_accounts VALUES (18, 'GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C'); +INSERT INTO history_accounts VALUES (19, 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG'); +INSERT INTO history_accounts VALUES (20, 'GCB7FPYGLL6RJ37HKRAYW5TAWMFBGGFGM4IM6ERBCZXI2BZ4OOOX2UAY'); +INSERT INTO history_accounts VALUES (21, 'GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO'); +INSERT INTO history_accounts VALUES (22, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_accounts VALUES (23, 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB'); +INSERT INTO history_accounts VALUES (24, 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB'); +INSERT INTO history_accounts VALUES (25, 'GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 25, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'USD', 'GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD'); +INSERT INTO history_assets VALUES (2, 'credit_alphanum4', 'USD', 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_assets VALUES (3, 'credit_alphanum4', 'EUR', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_assets VALUES (4, 'credit_alphanum4', 'EUR', 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_assets VALUES (5, 'credit_alphanum4', 'USD', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_assets VALUES (6, 'credit_alphanum4', 'USD', 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG'); +INSERT INTO history_assets VALUES (7, 'native', '', ''); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 7, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 249108107265, 1, 43, '{"new_seq": 300000000000}'); +INSERT INTO history_effects VALUES (1, 244813139969, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 244813139969, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 244813139969, 3, 10, '{"weight": 1, "public_key": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN"}'); +INSERT INTO history_effects VALUES (3, 240518172673, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 240518172673, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 236223205377, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 236223205377, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 236223205377, 3, 10, '{"weight": 1, "public_key": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y"}'); +INSERT INTO history_effects VALUES (4, 231928238081, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 231928238081, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 231928238082, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 231928238082, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 227633270785, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 227633270785, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 227633270785, 3, 10, '{"weight": 1, "public_key": "GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X"}'); +INSERT INTO history_effects VALUES (5, 223338303489, 1, 42, '{"name": "name1", "value": "MDAwMA=="}'); +INSERT INTO history_effects VALUES (5, 219043336193, 1, 42, '{"name": "name1", "value": "MTIzNA=="}'); +INSERT INTO history_effects VALUES (5, 214748368897, 1, 41, '{"name": "name2"}'); +INSERT INTO history_effects VALUES (5, 210453401601, 1, 40, '{"name": "name1", "value": "MTIzNA=="}'); +INSERT INTO history_effects VALUES (5, 210453405697, 1, 40, '{"name": "name2", "value": "NTY3OA=="}'); +INSERT INTO history_effects VALUES (5, 210453409793, 1, 40, '{"name": "name ", "value": "aXRzIGdvdCBzcGFjZXMh"}'); +INSERT INTO history_effects VALUES (5, 206158434305, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 206158434305, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (5, 206158434305, 3, 10, '{"weight": 1, "public_key": "GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD"}'); +INSERT INTO history_effects VALUES (2, 201863467009, 1, 2, '{"amount": "15257676.9536092", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (6, 201863467009, 2, 2, '{"amount": "3814420.0001419", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (6, 197568499713, 1, 7, '{"inflation_destination": "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS"}'); +INSERT INTO history_effects VALUES (2, 197568503809, 1, 7, '{"inflation_destination": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (6, 193273532417, 1, 0, '{"starting_balance": "20000000000.0000000"}'); +INSERT INTO history_effects VALUES (2, 193273532417, 2, 3, '{"amount": "20000000000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (6, 193273532417, 3, 10, '{"weight": 1, "public_key": "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS"}'); +INSERT INTO history_effects VALUES (7, 188978565121, 1, 3, '{"amount": "999.9999900", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 188978565121, 2, 2, '{"amount": "999.9999900", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (7, 188978565121, 3, 1, '{}'); +INSERT INTO history_effects VALUES (7, 184683597825, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 184683597825, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (7, 184683597825, 3, 10, '{"weight": 1, "public_key": "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ"}'); +INSERT INTO history_effects VALUES (8, 180388630529, 1, 24, '{"trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (8, 176093663233, 1, 23, '{"trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (8, 176093667329, 1, 23, '{"trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (9, 171798695937, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (9, 171798700033, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (8, 167503728641, 1, 6, '{"auth_required_flag": true, "auth_revocable_flag": true}'); +INSERT INTO history_effects VALUES (9, 163208761345, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 163208761345, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (9, 163208761345, 3, 10, '{"weight": 1, "public_key": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG"}'); +INSERT INTO history_effects VALUES (8, 163208765441, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 163208765441, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (8, 163208765441, 3, 10, '{"weight": 1, "public_key": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}'); +INSERT INTO history_effects VALUES (10, 158913794049, 1, 21, '{"limit": "0.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (10, 154618826753, 1, 22, '{"limit": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (10, 150323859457, 1, 22, '{"limit": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (10, 146028892161, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (10, 141733924865, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 141733924865, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (10, 141733924865, 3, 10, '{"weight": 1, "public_key": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG"}'); +INSERT INTO history_effects VALUES (11, 137438957569, 1, 6, '{"auth_required_flag": false, "auth_revocable_flag": false}'); +INSERT INTO history_effects VALUES (11, 137438961665, 1, 12, '{"weight": 2, "public_key": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"}'); +INSERT INTO history_effects VALUES (11, 137438961665, 2, 11, '{"public_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE"}'); +INSERT INTO history_effects VALUES (11, 133143990273, 1, 12, '{"weight": 2, "public_key": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"}'); +INSERT INTO history_effects VALUES (11, 133143990273, 2, 12, '{"weight": 5, "public_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE"}'); +INSERT INTO history_effects VALUES (11, 120259088385, 1, 7, '{"inflation_destination": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}'); +INSERT INTO history_effects VALUES (11, 120259092481, 1, 6, '{"auth_required_flag": true}'); +INSERT INTO history_effects VALUES (11, 120259096577, 1, 6, '{"auth_revocable_flag": true}'); +INSERT INTO history_effects VALUES (11, 120259100673, 1, 12, '{"weight": 2, "public_key": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"}'); +INSERT INTO history_effects VALUES (11, 120259104769, 1, 4, '{"low_threshold": 0, "med_threshold": 2, "high_threshold": 2}'); +INSERT INTO history_effects VALUES (11, 120259108865, 1, 5, '{"home_domain": "example.com"}'); +INSERT INTO history_effects VALUES (11, 120259112961, 1, 12, '{"weight": 2, "public_key": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"}'); +INSERT INTO history_effects VALUES (11, 120259112961, 2, 10, '{"weight": 1, "public_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE"}'); +INSERT INTO history_effects VALUES (11, 115964121089, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 115964121089, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (11, 115964121089, 3, 10, '{"weight": 1, "public_key": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES"}'); +INSERT INTO history_effects VALUES (12, 107374186497, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 107374186497, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (12, 107374186497, 3, 10, '{"weight": 1, "public_key": "GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q"}'); +INSERT INTO history_effects VALUES (14, 103079219201, 1, 33, '{"seller": "GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC", "offer_id": 3, "sold_amount": "20.0000000", "bought_amount": "20.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}'); +INSERT INTO history_effects VALUES (13, 103079219201, 2, 33, '{"seller": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD", "offer_id": 3, "sold_amount": "20.0000000", "bought_amount": "20.0000000", "sold_asset_type": "native", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}'); +INSERT INTO history_effects VALUES (13, 94489284609, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}'); +INSERT INTO history_effects VALUES (13, 90194317313, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 90194317313, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (13, 90194317313, 3, 10, '{"weight": 1, "public_key": "GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC"}'); +INSERT INTO history_effects VALUES (14, 90194321409, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 90194321409, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (14, 90194321409, 3, 10, '{"weight": 1, "public_key": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}'); +INSERT INTO history_effects VALUES (17, 85899350017, 1, 2, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 85899350017, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (16, 85899350017, 3, 33, '{"seller": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "offer_id": 2, "sold_amount": "100.0000000", "bought_amount": "100.0000000", "sold_asset_type": "native", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (15, 85899350017, 4, 33, '{"seller": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "offer_id": 2, "sold_amount": "100.0000000", "bought_amount": "100.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (17, 81604382721, 1, 2, '{"amount": "200.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 81604382721, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 81604382721, 3, 33, '{"seller": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "offer_id": 1, "sold_amount": "100.0000000", "bought_amount": "200.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (15, 81604382721, 4, 33, '{"seller": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "offer_id": 1, "sold_amount": "200.0000000", "bought_amount": "100.0000000", "sold_asset_type": "native", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 81604382721, 5, 33, '{"seller": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "offer_id": 2, "sold_amount": "200.0000000", "bought_amount": "200.0000000", "sold_asset_type": "native", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (15, 81604382721, 6, 33, '{"seller": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "offer_id": 2, "sold_amount": "200.0000000", "bought_amount": "200.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 77309415425, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (15, 77309415425, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 73014448129, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (17, 73014452225, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (16, 68719480833, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 68719480833, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (16, 68719480833, 3, 10, '{"weight": 1, "public_key": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD"}'); +INSERT INTO history_effects VALUES (17, 68719484929, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 68719484929, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (17, 68719484929, 3, 10, '{"weight": 1, "public_key": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP"}'); +INSERT INTO history_effects VALUES (15, 68719489025, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 68719489025, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (15, 68719489025, 3, 10, '{"weight": 1, "public_key": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}'); +INSERT INTO history_effects VALUES (18, 64424513537, 1, 2, '{"amount": "10.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}'); +INSERT INTO history_effects VALUES (19, 64424513537, 2, 3, '{"amount": "10.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}'); +INSERT INTO history_effects VALUES (18, 60129546241, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (19, 60129546241, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (18, 60129550337, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}'); +INSERT INTO history_effects VALUES (19, 55834578945, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 55834578945, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (19, 55834578945, 3, 10, '{"weight": 1, "public_key": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}'); +INSERT INTO history_effects VALUES (18, 55834583041, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 55834583041, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (18, 55834583041, 3, 10, '{"weight": 1, "public_key": "GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C"}'); +INSERT INTO history_effects VALUES (20, 51539611649, 1, 0, '{"starting_balance": "50.0000000"}'); +INSERT INTO history_effects VALUES (21, 51539611649, 2, 3, '{"amount": "50.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (20, 51539611649, 3, 10, '{"weight": 1, "public_key": "GCB7FPYGLL6RJ37HKRAYW5TAWMFBGGFGM4IM6ERBCZXI2BZ4OOOX2UAY"}'); +INSERT INTO history_effects VALUES (21, 47244644353, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 47244644353, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (21, 47244644353, 3, 10, '{"weight": 1, "public_key": "GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO"}'); +INSERT INTO history_effects VALUES (2, 42949677057, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (22, 42949677057, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 42949677058, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (22, 42949677058, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (22, 38654709761, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 38654709761, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (22, 38654709761, 3, 10, '{"weight": 1, "public_key": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY"}'); +INSERT INTO history_effects VALUES (2, 34359742465, 1, 2, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 34359742465, 2, 3, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 34359746561, 1, 2, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 34359746561, 2, 3, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 34359750657, 1, 2, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 34359750657, 2, 3, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 34359754753, 1, 2, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 34359754753, 2, 3, '{"amount": "1.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 30064775169, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 30064775169, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (23, 30064775169, 3, 10, '{"weight": 1, "public_key": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB"}'); +INSERT INTO history_effects VALUES (24, 25769807873, 1, 12, '{"weight": 2, "public_key": "GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB"}'); +INSERT INTO history_effects VALUES (24, 25769807873, 2, 12, '{"weight": 1, "public_key": "GD3E7HKMRNT6HGBGHBT6I6JE4N2S4W5KZ246TGJ4KQSXJ2P4BXCUPQMP"}'); +INSERT INTO history_effects VALUES (24, 21474844673, 1, 12, '{"weight": 1, "public_key": "GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB"}'); +INSERT INTO history_effects VALUES (24, 21474844673, 2, 10, '{"weight": 1, "public_key": "GD3E7HKMRNT6HGBGHBT6I6JE4N2S4W5KZ246TGJ4KQSXJ2P4BXCUPQMP"}'); +INSERT INTO history_effects VALUES (24, 21474848769, 1, 4, '{"low_threshold": 2, "med_threshold": 2, "high_threshold": 2}'); +INSERT INTO history_effects VALUES (24, 17179873281, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 17179873281, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (24, 17179873281, 3, 10, '{"weight": 1, "public_key": "GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB"}'); +INSERT INTO history_effects VALUES (25, 12884905985, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (25, 12884905985, 3, 10, '{"weight": 1, "public_key": "GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (61, '74c6dfd38c6bc4b10deddbe2fdd5a143a81765123e6eb6324dfef0dcd4a55fb1', 'fc843d2143c76bd29eb3b1ffddc3d48d60c7a1dcfa13ad88191da037092f12d0', 1, 1, '2019-06-03 16:36:27', '2019-06-03 16:35:33.313992', '2019-06-03 16:35:33.313992', 261993005056, 16, 1000190721000000000, 30471289, 100, 100000000, 1000000, 11, 'AAAAC/yEPSFDx2vSnrOx/93D1I1gx6Hc+hOtiBkdoDcJLxLQwnboDZyOgcyfb4ke5wI3SosVZ6YQuxInN2U6dBd49hYAAAAAXPVMiwAAAAAAAAAAosEhvRTCvadI/J9r+8kB+lYA6OphTjHH/5o2+iklI4P4rvPHAx3asi1o47MP12rNCQSIUVDj1Vrdu81+tOLDfwAAAD0N4WQpWNjKAAAAAAAB0PR5AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (60, 'fc843d2143c76bd29eb3b1ffddc3d48d60c7a1dcfa13ad88191da037092f12d0', '85e72664cd18352262866f05355401362ba8cea6a2f47e2716471ab80d7a8208', 1, 1, '2019-06-03 16:36:26', '2019-06-03 16:35:33.335421', '2019-06-03 16:35:33.335421', 257698037760, 16, 1000190721000000000, 30471189, 100, 100000000, 1000000, 11, 'AAAAC4XnJmTNGDUiYoZvBTVUATYrqM6movR+JxZHGrgNeoII58fVvlrThcLsRhZ/MWhMMf/UYr2OzkRkTKFYZDGQ5ewAAAAAXPVMigAAAAAAAAAAZB2X7lcCb772lP7QltIwf7Af+ESisKsoFpU1cBZvSc7chbF3PheXP+z7+tsJ5b5GYsWEay6Di6CkkkadJmFk3AAAADwN4WQpWNjKAAAAAAAB0PQVAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (59, '85e72664cd18352262866f05355401362ba8cea6a2f47e2716471ab80d7a8208', '0e91662d61bb7b28faf575b9a350576fc40f16e382177310964394bb22de658b', 1, 1, '2019-06-03 16:36:25', '2019-06-03 16:35:33.346065', '2019-06-03 16:35:33.346066', 253403070464, 16, 1000190721000000000, 30471089, 100, 100000000, 1000000, 11, 'AAAACw6RZi1hu3so+vV1uaNQV2/EDxbjghdzEJZDlLsi3mWL+ICn3BLQXRs7fl4siWFC2ldDe+71dfZ+Ot6SzuRKboAAAAAAXPVMiQAAAAAAAAAAlNMwBDUEqtiETmU+aulkMrDuVtBffL8sVeqm0xm+yFztHfj5X8CLVoSeR2gp8ACoEyIeStN4O5rt7qSSaxxhuwAAADsN4WQpWNjKAAAAAAAB0POxAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (58, '0e91662d61bb7b28faf575b9a350576fc40f16e382177310964394bb22de658b', '4f6d9aa8f0c2a0bb76221321312b0449720e047bcc47a8d47e3885621396de32', 1, 1, '2019-06-03 16:36:24', '2019-06-03 16:35:33.357252', '2019-06-03 16:35:33.357252', 249108103168, 16, 1000190721000000000, 30470989, 100, 100000000, 1000000, 11, 'AAAAC09tmqjwwqC7diITITErBElyDgR7zEeo1H44hWITlt4y5laaexZi3BeoJbBYKEHVRaXQBzZzXDjrizmBZaG28WoAAAAAXPVMiAAAAAAAAAAAGMfQBF/YqYedEUNnQuilzukzLJPeVrR9I6q3baNG83n8kdWRPqRfd6skOTdJh3gw1CLyAZfl0Kspveak/XUBrAAAADoN4WQpWNjKAAAAAAAB0PNNAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (57, '4f6d9aa8f0c2a0bb76221321312b0449720e047bcc47a8d47e3885621396de32', '44943aac60f737822cbb8ed09dad3acff60f9811db9cc56efe2fd6082ed71fb0', 1, 1, '2019-06-03 16:36:23', '2019-06-03 16:35:33.368443', '2019-06-03 16:35:33.368444', 244813135872, 16, 1000190721000000000, 30470889, 100, 100000000, 1000000, 11, 'AAAAC0SUOqxg9zeCLLuO0J2tOs/2D5gR25zFbv4v1ggu1x+wSZEueVQpVrdX4o+KMu65Z6AqPEQg3hDgMGXVzWdfg4YAAAAAXPVMhwAAAAAAAAAA14sFIRujWwg9KpW3xcPTBAGeuB6CsnSMTpP8SjSsd9oj9l8Q/bO18WpeHxO1ThKTGbAtIm5RC4yqtX4KLaRH/QAAADkN4WQpWNjKAAAAAAAB0PLpAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (56, '44943aac60f737822cbb8ed09dad3acff60f9811db9cc56efe2fd6082ed71fb0', '9fe688714ba462bb2badb522a28f435678cc3ad383dc4afd91a8c11e35a3d735', 1, 1, '2019-06-03 16:36:22', '2019-06-03 16:35:33.380494', '2019-06-03 16:35:33.380495', 240518168576, 16, 1000190721000000000, 30470789, 100, 100000000, 1000000, 11, 'AAAAC5/miHFLpGK7K621IqKPQ1Z4zDrTg9xK/ZGowR41o9c1AfruE1INcN+aRvkAlMPc22dTgggif6wloMtQ47jJqRIAAAAAXPVMhgAAAAAAAAAAudvIKbdFQO6cnqBlfAQ0qwiv8T/bow/WPTIANo/ERREqQ4CwaeHZFm+0uGoZbmYdoBwAamF5v10EmnVf4uxH6gAAADgN4WQpWNjKAAAAAAAB0PKFAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (55, '9fe688714ba462bb2badb522a28f435678cc3ad383dc4afd91a8c11e35a3d735', 'b890cfd7326083ad579a151669212f51273c080e2b67a3664eb7cbcb9f177140', 1, 1, '2019-06-03 16:36:21', '2019-06-03 16:35:33.392461', '2019-06-03 16:35:33.392462', 236223201280, 16, 1000190721000000000, 30470689, 100, 100000000, 1000000, 11, 'AAAAC7iQz9cyYIOtV5oVFmkhL1EnPAgOK2ejZk63y8ufF3FAGEeGE9B3tl7o9CfKtPVM4DeWbUf/Bo39cDHTrEIancYAAAAAXPVMhQAAAAAAAAAAHOu0BR+25TDycY7dcNSyVck1nyZBCkABFZXwjkic52S5P22ZmU+gjPDIjhAWgZBWzZZ0wRh7yORr9nijpB3wWwAAADcN4WQpWNjKAAAAAAAB0PIhAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (54, 'b890cfd7326083ad579a151669212f51273c080e2b67a3664eb7cbcb9f177140', '48645ad8430e5952553cd69569a5711bac93461b1c9c030b60b062f4853b6b14', 1, 2, '2019-06-03 16:36:20', '2019-06-03 16:35:33.405812', '2019-06-03 16:35:33.405812', 231928233984, 16, 1000190721000000000, 30470589, 100, 100000000, 1000000, 11, 'AAAAC0hkWthDDllSVTzWlWmlcRusk0YbHJwDC2CwYvSFO2sUdAzpmZ4VmiJAGIhn1C8C65yAkmtBcCkFXHDwkTPoYkwAAAAAXPVMhAAAAAAAAAAAfXVfPS9jBqhTfz4LivYYP9btnu+qwf/qpVYL7HgbAM25OdLH4LXuwnN9DpuJOk8BEc4IicijRjArwYicV3k4xQAAADYN4WQpWNjKAAAAAAAB0PG9AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (53, '48645ad8430e5952553cd69569a5711bac93461b1c9c030b60b062f4853b6b14', 'bd7b429dbd8f8efd1b56ed702bf183e473315e2f0422db5599548c404cee6066', 1, 1, '2019-06-03 16:36:19', '2019-06-03 16:35:33.420298', '2019-06-03 16:35:33.420298', 227633266688, 16, 1000190721000000000, 30470389, 100, 100000000, 1000000, 11, 'AAAAC717Qp29j479G1btcCvxg+RzMV4vBCLbVZlUjEBM7mBmDvRi34u5RgEADibpcO4c5OBbLI5HFWx1tz1OBwx7IMYAAAAAXPVMgwAAAAAAAAAAY80asxoHVId70xk95LhsRNki3uSYhgJrvMpz6Qe35wQ6M9y+p/ptNZ6zB2qi6pqwHFy5f/dMl34F1pxfwgvBdwAAADUN4WQpWNjKAAAAAAAB0PD1AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (52, 'bd7b429dbd8f8efd1b56ed702bf183e473315e2f0422db5599548c404cee6066', '90534350f3fea0a6a559de207695e282e898909544823ac5c7000f76abadc08e', 1, 1, '2019-06-03 16:36:18', '2019-06-03 16:35:33.430701', '2019-06-03 16:35:33.430701', 223338299392, 16, 1000190721000000000, 30470289, 100, 100000000, 1000000, 11, 'AAAAC5BTQ1Dz/qCmpVneIHaV4oLomJCVRII6xccAD3arrcCOzeFEa2j6tGG8gbM/1MR8aABY/PxuO9Zz2s9mxGnLVoQAAAAAXPVMggAAAAAAAAAAdFs0uvdku2e14ZLZNY/inkD0IgG9m+9h08GbknZs9EzZzupAXizfBXmh99nHTu7W5H1zvpkWS2CF1tqBEKwuCQAAADQN4WQpWNjKAAAAAAAB0PCRAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (51, '90534350f3fea0a6a559de207695e282e898909544823ac5c7000f76abadc08e', 'b1c7d71aeba870b162b022c31adeb26614e63070928647a1b9c6bc51aba83d0d', 1, 1, '2019-06-03 16:36:17', '2019-06-03 16:35:33.443509', '2019-06-03 16:35:33.443509', 219043332096, 16, 1000190721000000000, 30470189, 100, 100000000, 1000000, 11, 'AAAAC7HH1xrrqHCxYrAiwxresmYU5jBwkoZHobnGvFGrqD0Nt2vJTHsKyMHdVCH/gs0c3U/eQKqaWReE+4jy2gt+ukwAAAAAXPVMgQAAAAAAAAAAG9Lveo8KOMBsuTXxxarafYQzHKlqwhXWtcRjpmTOc+OB+peuNPsYlfeIN/BQ3+xOQZ7d/bV/tnJgLEpXfnSm4QAAADMN4WQpWNjKAAAAAAAB0PAtAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (50, 'b1c7d71aeba870b162b022c31adeb26614e63070928647a1b9c6bc51aba83d0d', '0dc8610ec649e718d45083b69105153b5689bec67427a72058b9357eeb0aceac', 1, 1, '2019-06-03 16:36:16', '2019-06-03 16:35:33.457157', '2019-06-03 16:35:33.457157', 214748364800, 16, 1000190721000000000, 30470089, 100, 100000000, 1000000, 11, 'AAAACw3IYQ7GSecY1FCDtpEFFTtWib7GdCenIFi5NX7rCs6si0bTsBbSdDOFoh+y5f8cl3t+mLZfzrhHG/6bxBlR1MwAAAAAXPVMgAAAAAAAAAAAe2zMUWV2ofxxSJUd0hgEpW+dk+M503UNhsqXLKL0TMgrrap+MZcPKP7WldMsB6zr/892IcwiXmAc6RON5c7/DQAAADIN4WQpWNjKAAAAAAAB0O/JAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAK62qfjGXDyj+1pXTLAes6//PdiHMIl5gHOkTjeXO/w0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (49, '0dc8610ec649e718d45083b69105153b5689bec67427a72058b9357eeb0aceac', '41b518eb3573162dba39489d5e79c1c04ee6c6d08c19311b5bf7d9b8f9aa17d6', 3, 3, '2019-06-03 16:36:15', '2019-06-03 16:35:33.472035', '2019-06-03 16:35:33.472035', 210453397504, 16, 1000190721000000000, 30469989, 100, 100000000, 1000000, 11, 'AAAAC0G1GOs1cxYtujlInV55wcBO5sbQjBkxG1v32bj5qhfWeVRF+FiW1Qlc+3u1mBcZ84ULRdo3+cTo45YRJDO+PS4AAAAAXPVMfwAAAAAAAAAAsSOLB+Fh7PI1BdmA+nlx5DdeXSAZY7OnyHcuyeMEmJA3bk41wtryM/3lghnAAVnP/iB46990qItDfm+tlOWNaQAAADEN4WQpWNjKAAAAAAAB0O9lAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (48, '41b518eb3573162dba39489d5e79c1c04ee6c6d08c19311b5bf7d9b8f9aa17d6', 'b49ea0eb1fe629fc2c9df2c976bc9971fed6fd8d257ece738e45c3f8967d1569', 1, 1, '2019-06-03 16:36:14', '2019-06-03 16:35:33.482429', '2019-06-03 16:35:33.48243', 206158430208, 16, 1000190721000000000, 30469689, 100, 100000000, 1000000, 11, 'AAAAC7SeoOsf5in8LJ3yyXa8mXH+1v2NJX7Oc45Fw/iWfRVp6aDW3LPbWQnkQqAyNZZJlDWaVAL9GmVKlMVbHoaATywAAAAAXPVMfgAAAAAAAAAAQNg4cr4Do7quX33o4rbaXa7+BDdHc8PbTEXBcuQETP8rTgcb7n3UXEG0av8iuvhxWlCZJf64MCH+txYgQXKWPAAAADAN4WQpWNjKAAAAAAAB0O45AAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (47, 'b49ea0eb1fe629fc2c9df2c976bc9971fed6fd8d257ece738e45c3f8967d1569', '706d948a65a01559c33264168e913f4b490034942f79aea96f616a03e98297c3', 1, 1, '2019-06-03 16:36:13', '2019-06-03 16:35:33.495168', '2019-06-03 16:35:33.495169', 201863462912, 16, 1000190721000000000, 30469589, 100, 100000000, 1000000, 11, 'AAAAC3BtlIploBVZwzJkFo6RP0tJADSUL3muqW9hagPpgpfD1EV61P5oEG1jpv4Ia/HUgGCpKwTQ4ZplHht7o7pA71gAAAAAXPVMfQAAAAAAAAAA8OM7CNjloxWqwlaanmBarRzmaEAjS1RTT7DMBUh/OHivMXhi18hGxAY77i6ACTEqawD8vXkvXLjBzeeYDgCPTQAAAC8N4WQpWNjKAAAAAAAB0O3VAAAAAQAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (46, '706d948a65a01559c33264168e913f4b490034942f79aea96f616a03e98297c3', '69840e81fe7e584aad0f22dfa89ee767cd3524497563e471af8af67b53f8d471', 2, 2, '2019-06-03 16:36:12', '2019-06-03 16:35:33.506546', '2019-06-03 16:35:33.506546', 197568495616, 16, 1000000000000000000, 7000, 100, 100000000, 1000000, 11, 'AAAAC2mEDoH+flhKrQ8i36ie52fNNSRJdWPkca+K9ntT+NRxAgTIPLGOynuQgOXzmXQZrLyGTAGIDWOHJRQJqUfbAiUAAAAAXPVMfAAAAAAAAAAAhkYgTYVFfiNxywZFyKUj6rUeR42SRzNzOnasgV/WuDvtwNkabdQ+tgZe43HtXEsCbQ9gWOuRX6L5cvO8ZwBRpQAAAC4N4Lazp2QAAAAAAAAAABtYAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (45, '69840e81fe7e584aad0f22dfa89ee767cd3524497563e471af8af67b53f8d471', '5eef1067a0fbdb41321e472a1c14fedc553d13539516dea4fdd319d0ca7d2af8', 1, 1, '2019-06-03 16:36:11', '2019-06-03 16:35:33.518547', '2019-06-03 16:35:33.518547', 193273528320, 16, 1000000000000000000, 6800, 100, 100000000, 1000000, 11, 'AAAAC17vEGeg+9tBMh5HKhwU/txVPRNTlRbepP3TGdDKfSr4kGCHHnA6KOdoPcSvp6Rxhk2hURlGF1VP2oSVNBkDI9cAAAAAXPVMewAAAAAAAAAA2s+Yd36GUBmFrCn8nxzR3G7G6BlWwDjWIvSuQxGMICY92RjX5DbOxYhJMM/EQc1oqGw0gsFbtqsOXBjLLV9EXAAAAC0N4Lazp2QAAAAAAAAAABqQAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (44, '5eef1067a0fbdb41321e472a1c14fedc553d13539516dea4fdd319d0ca7d2af8', '4409a871d550650b6a0a630136e80cd259a333ca55203db1548d315f19ef4a5c', 1, 1, '2019-06-03 16:36:10', '2019-06-03 16:35:33.528911', '2019-06-03 16:35:33.528911', 188978561024, 16, 1000000000000000000, 6700, 100, 100000000, 1000000, 11, 'AAAAC0QJqHHVUGULagpjATboDNJZozPKVSA9sVSNMV8Z70pcrowUdKRzA/ZRkdeo8Gu2qV4ThU16d9p8N74Hanu5UssAAAAAXPVMegAAAAAAAAAAWWc1pYhVWRoEJa6GcHIsaj1ysxI4CQ+BcJqiGW3wjmJ+vfFRLi5J2iDMXNTWqNmnft8zHFO464YgJZT7ub0qrQAAACwN4Lazp2QAAAAAAAAAABosAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (43, '4409a871d550650b6a0a630136e80cd259a333ca55203db1548d315f19ef4a5c', 'a96cbf9f3c2d3c56bd14b4050246a5e39e5920a201e9d66c40bc5aa68782fe53', 1, 1, '2019-06-03 16:36:09', '2019-06-03 16:35:33.541246', '2019-06-03 16:35:33.541246', 184683593728, 16, 1000000000000000000, 6600, 100, 100000000, 1000000, 11, 'AAAAC6lsv588LTxWvRS0BQJGpeOeWSCiAenWbEC8WqaHgv5TZNSyhXDfKEyU6jk0m8nxAE6PCJaGxAoOwqvcASMsUS4AAAAAXPVMeQAAAAAAAAAA9L2wqBam8xx6EMlQ1e/R7JPhksJqFoZDmKFcFBwf3xCwc2JLloIzptYhoNC7H0OKDeD9owZAk5Iannrejr0OAwAAACsN4Lazp2QAAAAAAAAAABnIAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (42, 'a96cbf9f3c2d3c56bd14b4050246a5e39e5920a201e9d66c40bc5aa68782fe53', '22b06b409446c1f0c278af0ce15ad99657a0ff2196a5bfeded56d69d36779c9d', 1, 1, '2019-06-03 16:36:08', '2019-06-03 16:35:33.552813', '2019-06-03 16:35:33.552813', 180388626432, 16, 1000000000000000000, 6500, 100, 100000000, 1000000, 11, 'AAAACyKwa0CURsHwwnivDOFa2ZZXoP8hlqW/7e1W1p02d5ydI1zHat+THSiG8FiJYhX2IjqcewMyqp7eILRtLxlqAV0AAAAAXPVMeAAAAAAAAAAA4mQxuIyGkMnUNprxo1u6b3NOKgS/CqRR0k9oMQPLWs8+V68qnCaFFEx4K1goWa3RDH8IZsY7i2BXWSCMWqzpxgAAACoN4Lazp2QAAAAAAAAAABlkAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (41, '22b06b409446c1f0c278af0ce15ad99657a0ff2196a5bfeded56d69d36779c9d', 'a6f5bc74da946970949cc8a58b7fc0ca52a9627fa1a45fccf567a6ad2f311a90', 2, 2, '2019-06-03 16:36:07', '2019-06-03 16:35:33.564965', '2019-06-03 16:35:33.564965', 176093659136, 16, 1000000000000000000, 6400, 100, 100000000, 1000000, 11, 'AAAAC6b1vHTalGlwlJzIpYt/wMpSqWJ/oaRfzPVnpq0vMRqQKNyYbOwL7RbiajIIn9wo1XIqxrxKXhScSJZushgn1U8AAAAAXPVMdwAAAAAAAAAA3texHWkeXlsh8yl52JzE1M7p/8uzk36DdlbvwdqWw1Y07THsoEsB7/w8RQjJ40lpM5K1EboCeT8y/j///j3AagAAACkN4Lazp2QAAAAAAAAAABkAAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (40, 'a6f5bc74da946970949cc8a58b7fc0ca52a9627fa1a45fccf567a6ad2f311a90', 'bc3a5a953848fe2cb46ed75ecebe6ba09c856d47d6fea025223858d8dc7aad95', 2, 2, '2019-06-03 16:36:06', '2019-06-03 16:35:33.580316', '2019-06-03 16:35:33.580316', 171798691840, 16, 1000000000000000000, 6200, 100, 100000000, 1000000, 11, 'AAAAC7w6WpU4SP4stG7XXs6+a6CchW1H1v6gJSI4WNjceq2V/inIeGJFrV1RVU+xprIZRH8KVaI74l/TAt+/rJeHJ50AAAAAXPVMdgAAAAAAAAAAWJlo97rfQjjINuAEkeCOb3Rwl6NYUk95Xa63Nc0UCv10U3spNQC8m5QVal2MW27Qo8OwBAnHuh8wEWKMhYtIgAAAACgN4Lazp2QAAAAAAAAAABg4AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (39, 'bc3a5a953848fe2cb46ed75ecebe6ba09c856d47d6fea025223858d8dc7aad95', 'b8296a81cb3c468c47f100662acb337e168861732ce211ddda30218098e806ce', 1, 1, '2019-06-03 16:36:05', '2019-06-03 16:35:33.592749', '2019-06-03 16:35:33.592749', 167503724544, 16, 1000000000000000000, 6000, 100, 100000000, 1000000, 11, 'AAAAC7gpaoHLPEaMR/EAZirLM34WiGFzLOIR3dowIYCY6AbOHivLxyP8I+6k4JpB4A3PxOKsREZUMnzYBoAng8ZuENAAAAAAXPVMdQAAAAAAAAAASeHBruDgNPJb9kfcJVMyCg6meb0kov5dW9Ok8EEep2bE1Tj7RqD1/0Q6+H0uMkMVOwWWrTFNf3Y8cVz9ojCJ1QAAACcN4Lazp2QAAAAAAAAAABdwAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (38, 'b8296a81cb3c468c47f100662acb337e168861732ce211ddda30218098e806ce', 'f39961e7de53003d9a878277343091c60e1eadefba33202be08334cae6787bef', 2, 2, '2019-06-03 16:36:04', '2019-06-03 16:35:33.60356', '2019-06-03 16:35:33.60356', 163208757248, 16, 1000000000000000000, 5900, 100, 100000000, 1000000, 11, 'AAAAC/OZYefeUwA9moeCdzQwkcYOHq3vujMgK+CDNMrmeHvvvdOMXcZQv2fCMQnVMUfKBJ7XSFWbgB/wE3Bs7er87uAAAAAAXPVMdAAAAAAAAAAAN/+dCsXSVcipJLX5fSVl/dtD8pFe+iZnfnNZ2g7vXj/Ebip0yLvWR8Z7upcPNrqDXeLvVxZizB40hi2kIihnngAAACYN4Lazp2QAAAAAAAAAABcMAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (37, 'f39961e7de53003d9a878277343091c60e1eadefba33202be08334cae6787bef', 'f965f5e76e2df794975e7245831b95b86ecc9711f7b7fb144df9bcac3c8a7a2a', 1, 1, '2019-06-03 16:36:03', '2019-06-03 16:35:33.615591', '2019-06-03 16:35:33.615591', 158913789952, 16, 1000000000000000000, 5700, 100, 100000000, 1000000, 11, 'AAAAC/ll9eduLfeUl15yRYMblbhuzJcR97f7FE35vKw8inoqjeXfs5RaC/pSbMQ0UkNgOHq6KFpFmq5/xPhha/qIrikAAAAAXPVMcwAAAAAAAAAAPORbuvHNNFfsSSmddqmUP8Fk8iW9Jb1yQL/uEyYGc2uIYodbNQGTGAsLT3wVFszuQByXNpPK2sQ80gKgAo6R4QAAACUN4Lazp2QAAAAAAAAAABZEAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (36, 'f965f5e76e2df794975e7245831b95b86ecc9711f7b7fb144df9bcac3c8a7a2a', 'f090655f0d5cee583227d0088cb7af2b780f6fd91e836a4117abb11d479b5ce1', 1, 1, '2019-06-03 16:36:02', '2019-06-03 16:35:33.635589', '2019-06-03 16:35:33.63559', 154618822656, 16, 1000000000000000000, 5600, 100, 100000000, 1000000, 11, 'AAAAC/CQZV8NXO5YMifQCIy3ryt4D2/ZHoNqQRersR1Hm1zht40cpv6aCNmZXclj0Gp1g8/QTb+rEsz7O6QRevEp7pkAAAAAXPVMcgAAAAAAAAAADx7u3Tt890DDCAY2PUo3lKiU3M3nylcrqmvMlFLS+fEnxfEWjlbDeIddDXeCKT79jo4fwyV0ymPAz6/1A94F9AAAACQN4Lazp2QAAAAAAAAAABXgAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (35, 'f090655f0d5cee583227d0088cb7af2b780f6fd91e836a4117abb11d479b5ce1', 'e0905bd61697815e7dd80a6f7ad8e83077a5304ab0681825e5a820bfbcc12dbd', 1, 1, '2019-06-03 16:36:01', '2019-06-03 16:35:33.645127', '2019-06-03 16:35:33.645127', 150323855360, 16, 1000000000000000000, 5500, 100, 100000000, 1000000, 11, 'AAAAC+CQW9YWl4FefdgKb3rY6DB3pTBKsGgYJeWoIL+8wS29TrWUd6O7M4S0fe8UEcdhlcmYcyd0+obgadQjYZA/UK4AAAAAXPVMcQAAAAAAAAAAU3lWP9fTSJmRnN7PpNBaZaYziGh9Xk2I2lyAgYUdNswxQTtINH5UegpdO02kZozOfs6/jYRFpxCpYgETz6Se7gAAACMN4Lazp2QAAAAAAAAAABV8AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (34, 'e0905bd61697815e7dd80a6f7ad8e83077a5304ab0681825e5a820bfbcc12dbd', '5273d226f3ad944db78e740d2ef1736845197a098f8c22c2a1a412838d2f2770', 1, 1, '2019-06-03 16:36:00', '2019-06-03 16:35:33.656811', '2019-06-03 16:35:33.656811', 146028888064, 16, 1000000000000000000, 5400, 100, 100000000, 1000000, 11, 'AAAAC1Jz0ibzrZRNt450DS7xc2hFGXoJj4wiwqGkEoONLydwytNJJuK4uRpBqNE6DA/twCtsyMwX/BNjTK2j1A2cLpIAAAAAXPVMcAAAAAAAAAAA/bE+PeQRbcMvSIfQyaXSWXEgzB7abwreNVQeYYIhwiZ0ZqxccyydwKiguVJdbIGZEGSMZ99UQLW9RIyPocciTAAAACIN4Lazp2QAAAAAAAAAABUYAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (33, '5273d226f3ad944db78e740d2ef1736845197a098f8c22c2a1a412838d2f2770', '05e77cb96060cb1b543745f88fe36e3246b45b26a9ee597f194fdfa332915ac6', 1, 1, '2019-06-03 16:35:59', '2019-06-03 16:35:33.665558', '2019-06-03 16:35:33.665558', 141733920768, 16, 1000000000000000000, 5300, 100, 100000000, 1000000, 11, 'AAAACwXnfLlgYMsbVDdF+I/jbjJGtFsmqe5ZfxlP36MykVrGueMGwKUy3zdCJEbcnLnxoZEU3V59aYr7+qbfMi0DwngAAAAAXPVMbwAAAAAAAAAAHdP8tBBtpeyL899tLPnNdaqUoHRWmHKjK8mobr412wEIdjVcHb3VJhQAtunNPjhphkSfh+hk4kcBwk4tfP9i6wAAACEN4Lazp2QAAAAAAAAAABS0AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (32, '05e77cb96060cb1b543745f88fe36e3246b45b26a9ee597f194fdfa332915ac6', 'fe13366c4c53c04a65ec45e5dd7540d332a8c38456f71a46b8c197a8f61614b3', 2, 2, '2019-06-03 16:35:58', '2019-06-03 16:35:33.677273', '2019-06-03 16:35:33.677273', 137438953472, 16, 1000000000000000000, 5200, 100, 100000000, 1000000, 11, 'AAAAC/4TNmxMU8BKZexF5d11QNMyqMOEVvcaRrjBl6j2FhSzbdxxbfFDV+si6AGRvL5s+oVcsY8ZwkF5kL9EBypi7k8AAAAAXPVMbgAAAAAAAAAA2zD57d2oUw2L6murtm/oe2Dj6FuzBXnVm7l7HoDsMUzNqYbZejWmvwR44FVQPbMIdu2bjH6kX3IJ7Chi4YmgugAAACAN4Lazp2QAAAAAAAAAABRQAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (31, 'fe13366c4c53c04a65ec45e5dd7540d332a8c38456f71a46b8c197a8f61614b3', '750fedf881913e508427d7c6f1a31f032cffd8b513eab2fca3c482743c92e16c', 1, 1, '2019-06-03 16:35:57', '2019-06-03 16:35:33.694173', '2019-06-03 16:35:33.694173', 133143986176, 16, 1000000000000000000, 5000, 100, 100000000, 1000000, 11, 'AAAAC3UP7fiBkT5QhCfXxvGjHwMs/9i1E+qy/KPEgnQ8kuFscIqVmjyvqIlbFdmmCscTHwuF1xf/128KmopBLMQWNqwAAAAAXPVMbQAAAAAAAAAA8GmZ2mhGWPCcWAd1pU9SBJXfejtD7FOEoLD6oFsTOv4MKWINvUzkDiENnzMAs1lvH8EiEWJaRwrJRvN0YuRvzAAAAB8N4Lazp2QAAAAAAAAAABOIAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (30, '750fedf881913e508427d7c6f1a31f032cffd8b513eab2fca3c482743c92e16c', '936c1e1f0bf1af96380eb09e59e521d5e3ddd370d21ff4fd1a09ab123a4cfbaa', 1, 1, '2019-06-03 16:35:56', '2019-06-03 16:35:33.708481', '2019-06-03 16:35:33.708481', 128849018880, 16, 1000000000000000000, 4900, 100, 100000000, 1000000, 11, 'AAAAC5NsHh8L8a+WOA6wnlnlIdXj3dNw0h/0/RoJqxI6TPuqkzOXz5mf2LgYnaoAtXxNZLk+UfT8YR92Qb9Z7YPbL3MAAAAAXPVMbAAAAAAAAAAA1jXLaoK/IOqKNVkib2cNLrjlETHXpZlcJ4BFYO8bM3mRzqpWfgvx7nf587dxjJYVNmgRlffJEzWDBgwhfJmgUwAAAB4N4Lazp2QAAAAAAAAAABMkAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (29, '936c1e1f0bf1af96380eb09e59e521d5e3ddd370d21ff4fd1a09ab123a4cfbaa', 'c14459522fe42e2d10b29a370bbeba513e4aee429282dbe44842835d1b443663', 1, 1, '2019-06-03 16:35:55', '2019-06-03 16:35:33.718442', '2019-06-03 16:35:33.718442', 124554051584, 16, 1000000000000000000, 4800, 100, 100000000, 1000000, 11, 'AAAAC8FEWVIv5C4tELKaNwu+ulE+Su5CkoLb5EhCg10bRDZjMHDsmJoTpyfMr0n7ZRj1PtVYciwS5F25/ebV5KmPHyIAAAAAXPVMawAAAAAAAAAAbaCD4RbfEqSrI+qpIyzzUU+bYYMJsRw3pHcmHwQrvaAiHUzESb7mxWgb6rD9i+bvYadjhxMRG/exoDWp8/oUMwAAAB0N4Lazp2QAAAAAAAAAABLAAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (28, 'c14459522fe42e2d10b29a370bbeba513e4aee429282dbe44842835d1b443663', '902b0f5e1ee506b9f9d688e464673bda2faa056876cdfe0ac250ef5c9df0a8a6', 7, 7, '2019-06-03 16:35:54', '2019-06-03 16:35:33.729461', '2019-06-03 16:35:33.729461', 120259084288, 16, 1000000000000000000, 4700, 100, 100000000, 1000000, 11, 'AAAAC5ArD14e5Qa5+daI5GRnO9ovqgVods3+CsJQ71yd8KimnNDwLYHe7gIHYMyxYiWPL6AdpOmOGCRINmPcEvmBn5oAAAAAXPVMagAAAAAAAAAAdQ/T4yJNKn9hiJoTuxDsNbd9ibRWf2hNHJhsiqBKWD1btlsiWANxqEZ3UBK2IsuYFM4a68rORQwudeZsBK1uVQAAABwN4Lazp2QAAAAAAAAAABJcAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 7, 0, NULL); +INSERT INTO history_ledgers VALUES (27, '902b0f5e1ee506b9f9d688e464673bda2faa056876cdfe0ac250ef5c9df0a8a6', '925e48fe8c319e23b85014ddf6e3698dbaeb15bd84e0e4435614f3125d847eaf', 1, 1, '2019-06-03 16:35:53', '2019-06-03 16:35:33.756347', '2019-06-03 16:35:33.756348', 115964116992, 16, 1000000000000000000, 4000, 100, 100000000, 1000000, 11, 'AAAAC5JeSP6MMZ4juFAU3fbjaY266xW9hODkQ1YU8xJdhH6vQydJIXEAs1uOD6IkUMs8GlnyPDTxRqoRKW4/wgZZaNwAAAAAXPVMaQAAAAAAAAAAkpeXdNXfmh1EnUjCYIMebdzV8PlgCe6J6eJq8Mb4SXYTMU9R5pbFQB/phEpxiGicTklBEXUSQW8XrtV0H9BN+AAAABsN4Lazp2QAAAAAAAAAAA+gAAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (26, '925e48fe8c319e23b85014ddf6e3698dbaeb15bd84e0e4435614f3125d847eaf', 'f54c0097d441ff76c487d4ebb18f5e5f52c5a51ca96800bf9b818295790a3778', 2, 2, '2019-06-03 16:35:52', '2019-06-03 16:35:33.767845', '2019-06-03 16:35:33.767845', 111669149696, 16, 1000000000000000000, 3900, 100, 100000000, 1000000, 11, 'AAAAC/VMAJfUQf92xIfU67GPXl9SxaUcqWgAv5uBgpV5Cjd4ObBqces8zXZAjfLGaR+Md0gHwPV4L+NudPgSQJdsE58AAAAAXPVMaAAAAAAAAAAAGTKcjd5tsclF3P+ptDiMdUWp83tuYF+zlADp+X+gt+eBl1mZgQ2kBgOlDC3yM4k1XFSkg8nteVJMp/KhqOyW2wAAABoN4Lazp2QAAAAAAAAAAA88AAAAAAAAAAAAAAAGAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (25, 'f54c0097d441ff76c487d4ebb18f5e5f52c5a51ca96800bf9b818295790a3778', 'd71b122bfb10e1193597acf4769f99b716e79e7d1eaccf225adf5fd65cbeee37', 1, 1, '2019-06-03 16:35:51', '2019-06-03 16:35:33.779704', '2019-06-03 16:35:33.779704', 107374182400, 16, 1000000000000000000, 3700, 100, 100000000, 1000000, 11, 'AAAAC9cbEiv7EOEZNZes9HafmbcW5559HqzPIlrfX9Zcvu43C5djYweCkfubeYiyrAvxlPmuUHgxbk+pkX450lbiUBIAAAAAXPVMZwAAAAAAAAAAM9gLVDvjwMuVZ+njRjvvMxlhodRg/VJpuapiCb0y9SDsWIE0ZZXCJ72Msa/6UrUdTDk+mqNMq2KeZVKBjdtMbwAAABkN4Lazp2QAAAAAAAAAAA50AAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (24, 'd71b122bfb10e1193597acf4769f99b716e79e7d1eaccf225adf5fd65cbeee37', 'd09c2755ea31109767d6ad6ad7155cff1b630edb6368a4b280bb68de24c2ebb4', 1, 1, '2019-06-03 16:35:50', '2019-06-03 16:35:33.791139', '2019-06-03 16:35:33.791139', 103079215104, 16, 1000000000000000000, 3600, 100, 100000000, 1000000, 11, 'AAAAC9CcJ1XqMRCXZ9atatcVXP8bYw7bY2iksoC7aN4kwuu03hM6c6SywARSnPb3ctF6foVdbgyxhp/R4/s+WE+te5AAAAAAXPVMZgAAAAAAAAAA1ziHajubDrj1Iu5EQ8YZB24Czm29AJPdaJnsX3+A+fV3KfdeiqJqyyhfk+7mwWUB0rbbvt6y640hL/bnoPMZ9gAAABgN4Lazp2QAAAAAAAAAAA4QAAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (23, 'd09c2755ea31109767d6ad6ad7155cff1b630edb6368a4b280bb68de24c2ebb4', 'b8d81a94cc9ac1dcd4975e5bae0680a746f41ab923de9b68650422039f8df47a', 1, 1, '2019-06-03 16:35:49', '2019-06-03 16:35:33.815281', '2019-06-03 16:35:33.815282', 98784247808, 16, 1000000000000000000, 3500, 100, 100000000, 1000000, 11, 'AAAAC7jYGpTMmsHc1JdeW64GgKdG9Bq5I96baGUEIgOfjfR6OPg5AoiFUgt3GG0WOJr4C3JaQHm7MPBQisdC4hkzxS8AAAAAXPVMZQAAAAAAAAAAeu6ZLz++eOyW3/HPY616v+oF2X756WCT8G93ApdHifosUOpCb+sObcpszPtP9+Hy0MHmzxhpzAP/sTATcQpnKgAAABcN4Lazp2QAAAAAAAAAAA2sAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (22, 'b8d81a94cc9ac1dcd4975e5bae0680a746f41ab923de9b68650422039f8df47a', '1e73266b9c1d6ed85ed20270b1c9c4081445d5f201189bebb5721c9b2565f749', 1, 1, '2019-06-03 16:35:48', '2019-06-03 16:35:33.827411', '2019-06-03 16:35:33.827411', 94489280512, 16, 1000000000000000000, 3400, 100, 100000000, 1000000, 11, 'AAAACx5zJmucHW7YXtICcLHJxAgURdXyARib67VyHJslZfdJ4DzzvVfPje5vC6cghCFfrZwMHMEDMF1phwp/kIju/78AAAAAXPVMZAAAAAAAAAAA0AMiyT4CyDLLvayS0AmP97hen9j1rkIsoYeN0iQrjup0aPFduVIsJ+nfz9INCJSr58NOPxPNQTKZqTTYzS5+OwAAABYN4Lazp2QAAAAAAAAAAA1IAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (21, '1e73266b9c1d6ed85ed20270b1c9c4081445d5f201189bebb5721c9b2565f749', 'e0c98a06e3dc47183a4803f947cd00afe37d8b94dc34de429add40fa7c19c618', 2, 2, '2019-06-03 16:35:47', '2019-06-03 16:35:33.838148', '2019-06-03 16:35:33.838148', 90194313216, 16, 1000000000000000000, 3300, 100, 100000000, 1000000, 11, 'AAAAC+DJigbj3EcYOkgD+UfNAK/jfYuU3DTeQprdQPp8GcYYFo7+Glas2jipoGla9vrLLK0ooAiMI5GQeOkLV/uj9cIAAAAAXPVMYwAAAAAAAAAA3LAnNjSwmLo474ojoHgysfkXmeeBIXdgWbyOJwbnsx+QUdaKPfAXYhyvkzKQKE+TYbh3dMj8cX0gtf0+OoTtuwAAABUN4Lazp2QAAAAAAAAAAAzkAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (20, 'e0c98a06e3dc47183a4803f947cd00afe37d8b94dc34de429add40fa7c19c618', 'ce1cd7aab0e22a1e81c89bd171d0bda5d8dec28824143025f6f0d15a1d7acc18', 1, 1, '2019-06-03 16:35:46', '2019-06-03 16:35:33.847808', '2019-06-03 16:35:33.847808', 85899345920, 16, 1000000000000000000, 3100, 100, 100000000, 1000000, 11, 'AAAAC84c16qw4ioegcib0XHQvaXY3sKIJBQwJfbw0VodeswY+fowsLcKz7J1dE35gKnYRMa2zA3cqfuoNUM1feiZPu8AAAAAXPVMYgAAAAAAAAAAx/hxaEW2XNJB3iDf23eYfGcwcQ862pSt7OJ6FF108Ia/Chyidqfse/a37FifLAlDK+EuQ3xGyaqCS5MLkWQUXgAAABQN4Lazp2QAAAAAAAAAAAwcAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (19, 'ce1cd7aab0e22a1e81c89bd171d0bda5d8dec28824143025f6f0d15a1d7acc18', 'e6f54ea5dcfa926f5e46dd056690fb031dc392717dc0fbac0b2f411e84965210', 1, 1, '2019-06-03 16:35:45', '2019-06-03 16:35:33.86558', '2019-06-03 16:35:33.86558', 81604378624, 16, 1000000000000000000, 3000, 100, 100000000, 1000000, 11, 'AAAAC+b1TqXc+pJvXkbdBWaQ+wMdw5JxfcD7rAsvQR6EllIQaY9SIsx+2mzlbk+f9SEovenpLo/LJyUPh6A+g+VF8p4AAAAAXPVMYQAAAAAAAAAAYzC64fmWjZOtqE4JjsKqwerdnzN6ywnJSvdsDWFPcu3pJFtMmHmps2cAxoQk/qzf8QoNOBiBIGAE68eH9xRyaQAAABMN4Lazp2QAAAAAAAAAAAu4AAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (18, 'e6f54ea5dcfa926f5e46dd056690fb031dc392717dc0fbac0b2f411e84965210', '8aff00265fb98412902df9d5c30e194a01adbd792490ef58836478db9d8e3156', 3, 3, '2019-06-03 16:35:44', '2019-06-03 16:35:33.883905', '2019-06-03 16:35:33.883906', 77309411328, 16, 1000000000000000000, 2900, 100, 100000000, 1000000, 11, 'AAAAC4r/ACZfuYQSkC351cMOGUoBrb15JJDvWINkeNudjjFWp7Hi+kDBELcUYlO/VeJpvL6v5oIQgjfBH9t2VsYU0vYAAAAAXPVMYAAAAAAAAAAANKxyAMoyM/gn59hBYzLxbU28Wbix3QHy7s086vpSY4dsfsh10yperbmIfKUawooPYNqN/QB2jK4qu2tAX+tIawAAABIN4Lazp2QAAAAAAAAAAAtUAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (17, '8aff00265fb98412902df9d5c30e194a01adbd792490ef58836478db9d8e3156', '6f4b7c957a42a32dd830d43212d5bbafeb9221538e4010a423455330ddb09b2b', 2, 2, '2019-06-03 16:35:43', '2019-06-03 16:35:33.897912', '2019-06-03 16:35:33.897912', 73014444032, 16, 1000000000000000000, 2600, 100, 100000000, 1000000, 11, 'AAAAC29LfJV6QqMt2DDUMhLVu6/rkiFTjkAQpCNFUzDdsJsrycO3J/Hp2cEvr433GPeq3XtJHPV8+zXdxLRRUx16QhEAAAAAXPVMXwAAAAAAAAAAqHWOFRdWDweK4ZIEBYTYOkzwi2q8iIoB9xFzAOqPS/ffjPLaERGDLrYC7tZYVR7nTTTU7Tn04Nc3TOs13gsC1wAAABEN4Lazp2QAAAAAAAAAAAooAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (16, '6f4b7c957a42a32dd830d43212d5bbafeb9221538e4010a423455330ddb09b2b', '8a29232992d69bb61f457c768a745b6fb2d81dadb8d6780f4960b9ee382c9ad5', 3, 3, '2019-06-03 16:35:42', '2019-06-03 16:35:33.910541', '2019-06-03 16:35:33.910541', 68719476736, 16, 1000000000000000000, 2400, 100, 100000000, 1000000, 11, 'AAAAC4opIymS1pu2H0V8dop0W2+y2B2tuNZ4D0lgue44LJrVY1gAUWBGwQQPMqY6PP8FFyppkHosKR4TD1wsYd//I+cAAAAAXPVMXgAAAAAAAAAAYKou7BkPtQtmD4O54/TToUgBAVUCmCl/O+9Tgq2hNHzlKY9Qg5CAIxdRXbs/XcVnEb3JYJf/Dk9K0NMoW5O8TwAAABAN4Lazp2QAAAAAAAAAAAlgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (15, '8a29232992d69bb61f457c768a745b6fb2d81dadb8d6780f4960b9ee382c9ad5', 'a64af1c08fb5d28abcd1fc9dae8ccb1bfc9bbec1b7b477e14fca2cbadca4907c', 1, 1, '2019-06-03 16:35:41', '2019-06-03 16:35:33.920642', '2019-06-03 16:35:33.920642', 64424509440, 16, 1000000000000000000, 2100, 100, 100000000, 1000000, 11, 'AAAAC6ZK8cCPtdKKvNH8na6Myxv8m77Bt7R34U/KLLrcpJB8d2gda9aXjRx0ma+uG5InDPEEWb37fjZyNQyPwPiQEvEAAAAAXPVMXQAAAAAAAAAAUsjUlWJzXzXDGEu7ZX8NM4G3hXMJgTqm1o4qvEMfb6GHCRkVinBcpegw5bhvYMpwJ2bdfeb93l1bSBPVUDjwrgAAAA8N4Lazp2QAAAAAAAAAAAg0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (14, 'a64af1c08fb5d28abcd1fc9dae8ccb1bfc9bbec1b7b477e14fca2cbadca4907c', '0c14a465f60001303a566fec15f2cbcda27abc34e9c8b51b88954a2f9b30f6b1', 2, 2, '2019-06-03 16:35:40', '2019-06-03 16:35:33.933196', '2019-06-03 16:35:33.933196', 60129542144, 16, 1000000000000000000, 2000, 100, 100000000, 1000000, 11, 'AAAACwwUpGX2AAEwOlZv7BXyy82ierw06ci1G4iVSi+bMPaxv5ctYdcMSEm83sSvokp32C1S/AIgbbctZWaWvUxN2AMAAAAAXPVMXAAAAAAAAAAAl8eMbvLJtOBoTCvorlEKxO6gpnsCesjn+1C1/HG0v1rd27oO7+VWSEG8r8fcOFYK8k3ZdplUbzIdfIxkNaBaKQAAAA4N4Lazp2QAAAAAAAAAAAfQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (13, '0c14a465f60001303a566fec15f2cbcda27abc34e9c8b51b88954a2f9b30f6b1', '039d9303c20c9a58082f15f4875e84c1f0ce4dc20ea237866c59d597cc349a1d', 2, 2, '2019-06-03 16:35:39', '2019-06-03 16:35:33.94895', '2019-06-03 16:35:33.94895', 55834574848, 16, 1000000000000000000, 1800, 100, 100000000, 1000000, 11, 'AAAACwOdkwPCDJpYCC8V9IdehMHwzk3CDqI3hmxZ1ZfMNJodFfvdztxrYHP9xzdqb/9pcKohIXiTntWz+SxP2oy5B3sAAAAAXPVMWwAAAAAAAAAAFkWUNyF+w67R5n+HgDNebl5snV2YlBsz3xLTqBMQsQEt+GWM9UtEdrkYmmI5VHYouT3zPlYpLCOylqjq2ciR9gAAAA0N4Lazp2QAAAAAAAAAAAcIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0, NULL); +INSERT INTO history_ledgers VALUES (12, '039d9303c20c9a58082f15f4875e84c1f0ce4dc20ea237866c59d597cc349a1d', '5e5e893b0f9078a77603f60689405d24d24247d0e19b00f815d212befb819656', 1, 1, '2019-06-03 16:35:38', '2019-06-03 16:35:33.960179', '2019-06-03 16:35:33.960179', 51539607552, 16, 1000000000000000000, 1600, 100, 100000000, 1000000, 11, 'AAAAC15eiTsPkHindgP2BolAXSTSQkfQ4ZsA+BXSEr77gZZWFSHyciAPeIm0Aaehq+XrRbfBf2YWt2omzfQnMSnMapcAAAAAXPVMWgAAAAAAAAAAc4P3o+BJiJvXXIfj/eVQPF1RRIrnE2nfX0a4J7WFySrDTzVqaL+dwGrt1jCSqZ7dkPeUD98aSKLZN1dQHYoQhQAAAAwN4Lazp2QAAAAAAAAAAAZAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (11, '5e5e893b0f9078a77603f60689405d24d24247d0e19b00f815d212befb819656', '62ad4b3aabc2a93e094118968e6114d1e15af1dfb753f5b7048c01be5fb3e9b9', 1, 1, '2019-06-03 16:35:37', '2019-06-03 16:35:33.971637', '2019-06-03 16:35:33.971637', 47244640256, 16, 1000000000000000000, 1500, 100, 100000000, 1000000, 11, 'AAAAC2KtSzqrwqk+CUEYlo5hFNHhWvHft1P1twSMAb5fs+m577MF5b1Iei3zHF9EAmsLT84l7YbAUSH/7F/wymRq98QAAAAAXPVMWQAAAAAAAAAApIxpaTPANJS3XftGXFSYrEJ0MhfN6GN1Oe5+YEP609+t1m4CDabbFVCwxaT1VIvC4WC/6n6mrrOSvo/Ob5mk1QAAAAsN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (10, '62ad4b3aabc2a93e094118968e6114d1e15af1dfb753f5b7048c01be5fb3e9b9', '69f27e72b97f7cb7bfd7f7cd0012b7ba080d3dec181688848626b1e1bad25ea5', 1, 2, '2019-06-03 16:35:36', '2019-06-03 16:35:33.980663', '2019-06-03 16:35:33.980663', 42949672960, 16, 1000000000000000000, 1400, 100, 100000000, 1000000, 11, 'AAAAC2nyfnK5f3y3v9f3zQASt7oIDT3sGBaIhIYmseG60l6lp9U56RSocR4ZpXXZs/glcnGVHESVFFU6LiK9fweLLYkAAAAAXPVMWAAAAAAAAAAA9+jjsWK6v6g0OYMFxTo1+Yogi2yDSjXhJ86N1AxJOvEc1lxN2lpI8SegGY6tvAaspu4dHxmLVdrEkjSlIYvfcgAAAAoN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (9, '69f27e72b97f7cb7bfd7f7cd0012b7ba080d3dec181688848626b1e1bad25ea5', '6afd133af73ffbd7ed56c265d6d3b7e374b0d17051cd1cf8e9edb76befcdb47b', 1, 1, '2019-06-03 16:35:35', '2019-06-03 16:35:33.989863', '2019-06-03 16:35:33.989863', 38654705664, 16, 1000000000000000000, 1200, 100, 100000000, 1000000, 11, 'AAAAC2r9Ezr3P/vX7VbCZdbTt+N0sNFwUc0c+Ontt2vvzbR7I1aOU+Htzv1YyoOW+NUXosjEw6RZFhwtO1PMnARAtukAAAAAXPVMVwAAAAAAAAAAbXQLWug2IZ2G4RnwUrCTvIwO6TMpu21+S2UXhqLKWTHGAwX5y0FVUEjDd5YDib7wh/bNSxZ606YM0D5P9vigSgAAAAkN4Lazp2QAAAAAAAAAAASwAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (8, '6afd133af73ffbd7ed56c265d6d3b7e374b0d17051cd1cf8e9edb76befcdb47b', '0cba5d6486b080c20944d8e2078da05854eb4d68b0c44a7a70f06dfbb28c0809', 4, 4, '2019-06-03 16:35:34', '2019-06-03 16:35:33.999565', '2019-06-03 16:35:33.999565', 34359738368, 16, 1000000000000000000, 1100, 100, 100000000, 1000000, 11, 'AAAACwy6XWSGsIDCCUTY4geNoFhU601osMRKenDwbfuyjAgJZ/gVHh4EGoRXjxBlJXgdb6o55e5LPB8/rItX+iBMmg0AAAAAXPVMVgAAAAAAAAAA5fh0DRZ+OfeA4iH3GTMbG6cqJGEx0qC57216HbTwQPU0hzOQ0tEwod0WKGD5kM6LwVHLKbIOzBmb/9SIwEo4BwAAAAgN4Lazp2QAAAAAAAAAAARMAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0, NULL); +INSERT INTO history_ledgers VALUES (7, '0cba5d6486b080c20944d8e2078da05854eb4d68b0c44a7a70f06dfbb28c0809', 'e56b9392f09492f4084789a6f7eea3274d6991c918d95096009e985fbe6c0d09', 1, 1, '2019-06-03 16:35:33', '2019-06-03 16:35:34.011488', '2019-06-03 16:35:34.011488', 30064771072, 16, 1000000000000000000, 700, 100, 100000000, 1000000, 11, 'AAAAC+Vrk5LwlJL0CEeJpvfuoydNaZHJGNlQlgCemF++bA0JrU2Bn4aTjYMLpDfffI/e6HD3GSRCEJo4ArYti94OWqYAAAAAXPVMVQAAAAAAAAAAk5HOQp3TCIyOUj8VmNGrhs2C+IXj942pn3qPVIAXXx6Y378rPisQ/PWRLazxMClLw8L2UkId9o64eIplaVk81gAAAAcN4Lazp2QAAAAAAAAAAAK8AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (6, 'e56b9392f09492f4084789a6f7eea3274d6991c918d95096009e985fbe6c0d09', 'f5f9c1fc700571da5df0d6e0ecedab78fe81e6f66054e4352f97a3f0cf3d5f37', 1, 1, '2019-06-03 16:35:32', '2019-06-03 16:35:34.0218', '2019-06-03 16:35:34.0218', 25769803776, 16, 1000000000000000000, 600, 100, 100000000, 1000000, 11, 'AAAAC/X5wfxwBXHaXfDW4Oztq3j+geb2YFTkNS+Xo/DPPV83Ny97k/0F2AWH2PP6jBxodIXMgjmRLJwhDtO8tQZgf8sAAAAAXPVMVAAAAAAAAAAAN8Q2AtCCsel4HIsK4udqXyLQqcraGjcAHVSUN815grFwXcnuHU1uUpGqTUtbI/yZP1yYvQEniAuMf2hiwiatTAAAAAYN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (5, 'f5f9c1fc700571da5df0d6e0ecedab78fe81e6f66054e4352f97a3f0cf3d5f37', 'c26019112e7d1a307b7b6a843ad56fd73b51fbdc2e635fe7b0fcc8bae46e9417', 3, 3, '2019-06-03 16:35:31', '2019-06-03 16:35:34.03588', '2019-06-03 16:35:34.035881', 21474836480, 16, 1000000000000000000, 500, 100, 100000000, 1000000, 11, 'AAAAC8JgGREufRowe3tqhDrVb9c7UfvcLmNf57D8yLrkbpQXyz9iY2rHhwuUvOFuLE6mBvTO46hI+XxK2J5c1tuLsiUAAAAAXPVMUwAAAAAAAAAAk6vO/KRxwZ0ynJMMrKUpOSf5WhbdkNk0LIO/zZxB+qeX7aYG0z3XmFwxmr34PrxhVy8SRzlUMGjhTLWe3i+lJAAAAAUN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (4, 'c26019112e7d1a307b7b6a843ad56fd73b51fbdc2e635fe7b0fcc8bae46e9417', '341ff9c498b9076f8f21aea78a33bc0c639ab5cf986adbc2d8d04d92b5a9d6df', 1, 1, '2019-06-03 16:35:30', '2019-06-03 16:35:34.052795', '2019-06-03 16:35:34.052795', 17179869184, 16, 1000000000000000000, 200, 100, 100000000, 1000000, 11, 'AAAACzQf+cSYuQdvjyGup4ozvAxjmrXPmGrbwtjQTZK1qdbfS+Xef4FXXIT2kc1XcfznO1AYP9JNuDCI1JFXHpnU9IsAAAAAXPVMUgAAAAAAAAAAI8FeHEzJ0/4FO8/NG/43wTEny1CW/vcH2jcrQkzbbcj+Ff6Dfgn4FQTGJXl3s+xBqyBn+uvEPEYwqrPTrPE/VAAAAAQN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (3, '341ff9c498b9076f8f21aea78a33bc0c639ab5cf986adbc2d8d04d92b5a9d6df', 'fd9268d16d7501b6eb7956b6756b28fe43ce5a6010b76e776d91f60e5eed7b2a', 1, 1, '2019-06-03 16:35:29', '2019-06-03 16:35:34.065174', '2019-06-03 16:35:34.065175', 12884901888, 16, 1000000000000000000, 100, 100, 100000000, 1000000, 11, 'AAAAC/2SaNFtdQG263lWtnVrKP5DzlpgELdud22R9g5e7XsqDZGQBE1YsotIrRXkw7x6/cKPd2zOCBuz7H2RlRoSGYEAAAAAXPVMUQAAAAAAAAAA5PnvEvMOE8kyDs3Gbu1hou8cmoww1I7xqYGObt0Zo0oZAKD5U1RxNy8ovj+4OjM4rIB0P/C3cQ+hCLArM49g+AAAAAMN4Lazp2QAAAAAAAAAAABkAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (2, 'fd9268d16d7501b6eb7956b6756b28fe43ce5a6010b76e776d91f60e5eed7b2a', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 0, 0, '2019-06-03 16:35:28', '2019-06-03 16:35:34.076291', '2019-06-03 16:35:34.076291', 8589934592, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMUAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:35:34.082668', '2019-06-03 16:35:34.082668', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 261993009153, 1); +INSERT INTO history_operation_participants VALUES (2, 257698041857, 1); +INSERT INTO history_operation_participants VALUES (3, 253403074561, 1); +INSERT INTO history_operation_participants VALUES (4, 249108107265, 1); +INSERT INTO history_operation_participants VALUES (5, 244813139969, 1); +INSERT INTO history_operation_participants VALUES (6, 244813139969, 2); +INSERT INTO history_operation_participants VALUES (7, 240518172673, 3); +INSERT INTO history_operation_participants VALUES (8, 236223205377, 2); +INSERT INTO history_operation_participants VALUES (9, 236223205377, 3); +INSERT INTO history_operation_participants VALUES (10, 231928238081, 2); +INSERT INTO history_operation_participants VALUES (11, 231928238081, 4); +INSERT INTO history_operation_participants VALUES (12, 231928238082, 4); +INSERT INTO history_operation_participants VALUES (13, 231928238082, 2); +INSERT INTO history_operation_participants VALUES (14, 227633270785, 2); +INSERT INTO history_operation_participants VALUES (15, 227633270785, 4); +INSERT INTO history_operation_participants VALUES (16, 223338303489, 5); +INSERT INTO history_operation_participants VALUES (17, 219043336193, 5); +INSERT INTO history_operation_participants VALUES (18, 214748368897, 5); +INSERT INTO history_operation_participants VALUES (19, 210453401601, 5); +INSERT INTO history_operation_participants VALUES (20, 210453405697, 5); +INSERT INTO history_operation_participants VALUES (21, 210453409793, 5); +INSERT INTO history_operation_participants VALUES (22, 206158434305, 2); +INSERT INTO history_operation_participants VALUES (23, 206158434305, 5); +INSERT INTO history_operation_participants VALUES (24, 201863467009, 2); +INSERT INTO history_operation_participants VALUES (25, 197568499713, 6); +INSERT INTO history_operation_participants VALUES (26, 197568503809, 2); +INSERT INTO history_operation_participants VALUES (27, 193273532417, 6); +INSERT INTO history_operation_participants VALUES (28, 193273532417, 2); +INSERT INTO history_operation_participants VALUES (29, 188978565121, 2); +INSERT INTO history_operation_participants VALUES (30, 188978565121, 7); +INSERT INTO history_operation_participants VALUES (31, 184683597825, 7); +INSERT INTO history_operation_participants VALUES (32, 184683597825, 2); +INSERT INTO history_operation_participants VALUES (33, 180388630529, 8); +INSERT INTO history_operation_participants VALUES (34, 180388630529, 9); +INSERT INTO history_operation_participants VALUES (35, 176093663233, 8); +INSERT INTO history_operation_participants VALUES (36, 176093663233, 9); +INSERT INTO history_operation_participants VALUES (37, 176093667329, 8); +INSERT INTO history_operation_participants VALUES (38, 176093667329, 9); +INSERT INTO history_operation_participants VALUES (39, 171798695937, 9); +INSERT INTO history_operation_participants VALUES (40, 171798700033, 9); +INSERT INTO history_operation_participants VALUES (41, 167503728641, 8); +INSERT INTO history_operation_participants VALUES (42, 163208761345, 2); +INSERT INTO history_operation_participants VALUES (43, 163208761345, 9); +INSERT INTO history_operation_participants VALUES (44, 163208765441, 2); +INSERT INTO history_operation_participants VALUES (45, 163208765441, 8); +INSERT INTO history_operation_participants VALUES (46, 158913794049, 10); +INSERT INTO history_operation_participants VALUES (47, 154618826753, 10); +INSERT INTO history_operation_participants VALUES (48, 150323859457, 10); +INSERT INTO history_operation_participants VALUES (49, 146028892161, 10); +INSERT INTO history_operation_participants VALUES (50, 141733924865, 2); +INSERT INTO history_operation_participants VALUES (51, 141733924865, 10); +INSERT INTO history_operation_participants VALUES (52, 137438957569, 11); +INSERT INTO history_operation_participants VALUES (53, 137438961665, 11); +INSERT INTO history_operation_participants VALUES (54, 133143990273, 11); +INSERT INTO history_operation_participants VALUES (55, 128849022977, 11); +INSERT INTO history_operation_participants VALUES (56, 124554055681, 11); +INSERT INTO history_operation_participants VALUES (57, 120259088385, 11); +INSERT INTO history_operation_participants VALUES (58, 120259092481, 11); +INSERT INTO history_operation_participants VALUES (59, 120259096577, 11); +INSERT INTO history_operation_participants VALUES (60, 120259100673, 11); +INSERT INTO history_operation_participants VALUES (61, 120259104769, 11); +INSERT INTO history_operation_participants VALUES (62, 120259108865, 11); +INSERT INTO history_operation_participants VALUES (63, 120259112961, 11); +INSERT INTO history_operation_participants VALUES (64, 115964121089, 2); +INSERT INTO history_operation_participants VALUES (65, 115964121089, 11); +INSERT INTO history_operation_participants VALUES (66, 111669153793, 12); +INSERT INTO history_operation_participants VALUES (67, 111669157889, 12); +INSERT INTO history_operation_participants VALUES (68, 107374186497, 2); +INSERT INTO history_operation_participants VALUES (69, 107374186497, 12); +INSERT INTO history_operation_participants VALUES (70, 103079219201, 14); +INSERT INTO history_operation_participants VALUES (71, 98784251905, 13); +INSERT INTO history_operation_participants VALUES (72, 94489284609, 13); +INSERT INTO history_operation_participants VALUES (73, 90194317313, 2); +INSERT INTO history_operation_participants VALUES (74, 90194317313, 13); +INSERT INTO history_operation_participants VALUES (75, 90194321409, 2); +INSERT INTO history_operation_participants VALUES (76, 90194321409, 14); +INSERT INTO history_operation_participants VALUES (77, 85899350017, 16); +INSERT INTO history_operation_participants VALUES (78, 85899350017, 17); +INSERT INTO history_operation_participants VALUES (79, 81604382721, 16); +INSERT INTO history_operation_participants VALUES (80, 81604382721, 17); +INSERT INTO history_operation_participants VALUES (81, 77309415425, 15); +INSERT INTO history_operation_participants VALUES (82, 77309415425, 16); +INSERT INTO history_operation_participants VALUES (83, 77309419521, 15); +INSERT INTO history_operation_participants VALUES (84, 77309423617, 15); +INSERT INTO history_operation_participants VALUES (85, 73014448129, 16); +INSERT INTO history_operation_participants VALUES (86, 73014452225, 17); +INSERT INTO history_operation_participants VALUES (87, 68719480833, 2); +INSERT INTO history_operation_participants VALUES (88, 68719480833, 16); +INSERT INTO history_operation_participants VALUES (89, 68719484929, 2); +INSERT INTO history_operation_participants VALUES (90, 68719484929, 17); +INSERT INTO history_operation_participants VALUES (91, 68719489025, 2); +INSERT INTO history_operation_participants VALUES (92, 68719489025, 15); +INSERT INTO history_operation_participants VALUES (93, 64424513537, 19); +INSERT INTO history_operation_participants VALUES (94, 64424513537, 18); +INSERT INTO history_operation_participants VALUES (95, 60129546241, 19); +INSERT INTO history_operation_participants VALUES (96, 60129546241, 18); +INSERT INTO history_operation_participants VALUES (97, 60129550337, 18); +INSERT INTO history_operation_participants VALUES (98, 55834578945, 2); +INSERT INTO history_operation_participants VALUES (99, 55834578945, 19); +INSERT INTO history_operation_participants VALUES (100, 55834583041, 2); +INSERT INTO history_operation_participants VALUES (101, 55834583041, 18); +INSERT INTO history_operation_participants VALUES (102, 51539611649, 21); +INSERT INTO history_operation_participants VALUES (103, 51539611649, 20); +INSERT INTO history_operation_participants VALUES (104, 47244644353, 2); +INSERT INTO history_operation_participants VALUES (105, 47244644353, 21); +INSERT INTO history_operation_participants VALUES (106, 42949677057, 22); +INSERT INTO history_operation_participants VALUES (107, 42949677057, 2); +INSERT INTO history_operation_participants VALUES (108, 42949677058, 22); +INSERT INTO history_operation_participants VALUES (109, 42949677058, 2); +INSERT INTO history_operation_participants VALUES (110, 38654709761, 2); +INSERT INTO history_operation_participants VALUES (111, 38654709761, 22); +INSERT INTO history_operation_participants VALUES (112, 34359742465, 23); +INSERT INTO history_operation_participants VALUES (113, 34359742465, 2); +INSERT INTO history_operation_participants VALUES (114, 34359746561, 23); +INSERT INTO history_operation_participants VALUES (115, 34359746561, 2); +INSERT INTO history_operation_participants VALUES (116, 34359750657, 23); +INSERT INTO history_operation_participants VALUES (117, 34359750657, 2); +INSERT INTO history_operation_participants VALUES (118, 34359754753, 23); +INSERT INTO history_operation_participants VALUES (119, 34359754753, 2); +INSERT INTO history_operation_participants VALUES (120, 30064775169, 2); +INSERT INTO history_operation_participants VALUES (121, 30064775169, 23); +INSERT INTO history_operation_participants VALUES (122, 25769807873, 24); +INSERT INTO history_operation_participants VALUES (123, 21474840577, 24); +INSERT INTO history_operation_participants VALUES (124, 21474844673, 24); +INSERT INTO history_operation_participants VALUES (125, 21474848769, 24); +INSERT INTO history_operation_participants VALUES (126, 17179873281, 24); +INSERT INTO history_operation_participants VALUES (127, 17179873281, 2); +INSERT INTO history_operation_participants VALUES (128, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (129, 12884905985, 25); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 129, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (261993009153, 261993009152, 1, 11, '{"bump_to": "300000000003"}', 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN'); +INSERT INTO history_operations VALUES (257698041857, 257698041856, 1, 11, '{"bump_to": "300000000001"}', 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN'); +INSERT INTO history_operations VALUES (253403074561, 253403074560, 1, 11, '{"bump_to": "100"}', 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN'); +INSERT INTO history_operations VALUES (249108107265, 249108107264, 1, 11, '{"bump_to": "300000000000"}', 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN'); +INSERT INTO history_operations VALUES (244813139969, 244813139968, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (240518172673, 240518172672, 1, 1, '{"to": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", "from": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", "amount": "10.0000000", "asset_type": "native"}', 'GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y'); +INSERT INTO history_operations VALUES (236223205377, 236223205376, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (231928238081, 231928238080, 1, 1, '{"to": "GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X", "from": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "amount": "10.0000000", "asset_type": "native"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (231928238082, 231928238080, 2, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X", "amount": "10.0000000", "asset_type": "native"}', 'GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X'); +INSERT INTO history_operations VALUES (227633270785, 227633270784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GACJPE4YUR22VP4CM2BDFDAHY3DLEF3H7NENKUQ53DT5TEI2GAHT5N4X", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (223338303489, 223338303488, 1, 10, '{"name": "name1", "value": "MDAwMA=="}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (219043336193, 219043336192, 1, 10, '{"name": "name1", "value": "MTIzNA=="}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (214748368897, 214748368896, 1, 10, '{"name": "name2", "value": null}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (210453401601, 210453401600, 1, 10, '{"name": "name1", "value": "MTIzNA=="}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (210453405697, 210453405696, 1, 10, '{"name": "name2", "value": "NTY3OA=="}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (210453409793, 210453409792, 1, 10, '{"name": "name ", "value": "aXRzIGdvdCBzcGFjZXMh"}', 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD'); +INSERT INTO history_operations VALUES (206158434305, 206158434304, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (201863467009, 201863467008, 1, 9, '{}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (197568499713, 197568499712, 1, 5, '{"inflation_dest": "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS"}', 'GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS'); +INSERT INTO history_operations VALUES (197568503809, 197568503808, 1, 5, '{"inflation_dest": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (193273532417, 193273532416, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS", "starting_balance": "20000000000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (188978565121, 188978565120, 1, 8, '{"into": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ"}', 'GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ'); +INSERT INTO history_operations VALUES (184683597825, 184683597824, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (180388630529, 180388630528, 1, 7, '{"trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "authorize": false, "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_operations VALUES (176093663233, 176093663232, 1, 7, '{"trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "authorize": true, "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_operations VALUES (176093667329, 176093667328, 1, 7, '{"trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "authorize": true, "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_operations VALUES (171798695937, 171798695936, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}', 'GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG'); +INSERT INTO history_operations VALUES (171798700033, 171798700032, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "trustor": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF"}', 'GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG'); +INSERT INTO history_operations VALUES (167503728641, 167503728640, 1, 5, '{"set_flags": [1, 2], "set_flags_s": ["auth_required", "auth_revocable"]}', 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF'); +INSERT INTO history_operations VALUES (163208761345, 163208761344, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (163208765441, 163208765440, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (158913794049, 158913794048, 1, 6, '{"limit": "0.0000000", "trustee": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "trustor": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG'); +INSERT INTO history_operations VALUES (154618826753, 154618826752, 1, 6, '{"limit": "100.0000000", "trustee": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "trustor": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG'); +INSERT INTO history_operations VALUES (150323859457, 150323859456, 1, 6, '{"limit": "100.0000000", "trustee": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "trustor": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG'); +INSERT INTO history_operations VALUES (146028892161, 146028892160, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "trustor": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG'); +INSERT INTO history_operations VALUES (141733924865, 141733924864, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (137438957569, 137438957568, 1, 5, '{"clear_flags": [1, 2], "clear_flags_s": ["auth_required", "auth_revocable"]}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (137438961665, 137438961664, 1, 5, '{"signer_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE", "signer_weight": 0}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (133143990273, 133143990272, 1, 5, '{"signer_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE", "signer_weight": 5}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (128849022977, 128849022976, 1, 5, '{"signer_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE", "signer_weight": 1}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (124554055681, 124554055680, 1, 5, '{"master_key_weight": 2}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259088385, 120259088384, 1, 5, '{"inflation_dest": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H"}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259092481, 120259092480, 1, 5, '{"set_flags": [1], "set_flags_s": ["auth_required"]}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259096577, 120259096576, 1, 5, '{"set_flags": [2], "set_flags_s": ["auth_revocable"]}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259100673, 120259100672, 1, 5, '{"master_key_weight": 2}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259104769, 120259104768, 1, 5, '{"low_threshold": 0, "med_threshold": 2, "high_threshold": 2}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259108865, 120259108864, 1, 5, '{"home_domain": "example.com"}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (120259112961, 120259112960, 1, 5, '{"signer_key": "GB6J3WOLKYQE6KVDZEA4JDMFTTONUYP3PUHNDNZRWIKA6JQWIMJZATFE", "signer_weight": 1}', 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES'); +INSERT INTO history_operations VALUES (115964121089, 115964121088, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (111669153793, 111669153792, 1, 4, '{"price": "1.0000000", "amount": "200.0000000", "price_r": {"d": 1, "n": 1}, "buying_asset_type": "native", "selling_asset_code": "USD", "selling_asset_type": "credit_alphanum4", "selling_asset_issuer": "GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q"}', 'GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q'); +INSERT INTO history_operations VALUES (111669157889, 111669157888, 1, 4, '{"price": "1.0000000", "amount": "200.0000000", "price_r": {"d": 1, "n": 1}, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_type": "native", "buying_asset_issuer": "GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q"}', 'GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q'); +INSERT INTO history_operations VALUES (107374186497, 107374186496, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (103079219201, 103079219200, 1, 3, '{"price": "1.0000000", "amount": "30.0000000", "price_r": {"d": 1, "n": 1}, "offer_id": 0, "buying_asset_type": "native", "selling_asset_code": "USD", "selling_asset_type": "credit_alphanum4", "selling_asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}', 'GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD'); +INSERT INTO history_operations VALUES (98784251905, 98784251904, 1, 3, '{"price": "1.0000000", "amount": "20.0000000", "price_r": {"d": 1, "n": 1}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_type": "native", "buying_asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}', 'GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC'); +INSERT INTO history_operations VALUES (94489284609, 94489284608, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD", "trustor": "GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD"}', 'GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC'); +INSERT INTO history_operations VALUES (90194317313, 90194317312, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (90194321409, 90194321408, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (85899350017, 85899350016, 1, 2, '{"to": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP", "from": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "path": [], "amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "source_max": "100.0000000", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "source_amount": "100.0000000", "source_asset_type": "native"}', 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD'); +INSERT INTO history_operations VALUES (81604382721, 81604382720, 1, 2, '{"to": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP", "from": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "path": [{"asset_type": "native"}], "amount": "200.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "source_max": "100.0000000", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "source_amount": "100.0000000", "source_asset_code": "USD", "source_asset_type": "credit_alphanum4", "source_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD'); +INSERT INTO history_operations VALUES (77309415425, 77309415424, 1, 1, '{"to": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "from": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_operations VALUES (77309419521, 77309419520, 1, 3, '{"price": "0.5000000", "amount": "400.0000000", "price_r": {"d": 2, "n": 1}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_type": "native", "buying_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_operations VALUES (77309423617, 77309423616, 1, 3, '{"price": "1.0000000", "amount": "300.0000000", "price_r": {"d": 1, "n": 1}, "offer_id": 0, "buying_asset_type": "native", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "selling_asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU'); +INSERT INTO history_operations VALUES (73014448129, 73014448128, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "trustor": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD'); +INSERT INTO history_operations VALUES (73014452225, 73014452224, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "trustor": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU"}', 'GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP'); +INSERT INTO history_operations VALUES (68719480833, 68719480832, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (68719484929, 68719484928, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (68719489025, 68719489024, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (64424513537, 64424513536, 1, 1, '{"to": "GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C", "from": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG", "amount": "10.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}', 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG'); +INSERT INTO history_operations VALUES (60129546241, 60129546240, 1, 1, '{"to": "GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C", "from": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG", "amount": "10.0000000", "asset_type": "native"}', 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG'); +INSERT INTO history_operations VALUES (60129550337, 60129550336, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG", "trustor": "GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG"}', 'GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C'); +INSERT INTO history_operations VALUES (55834578945, 55834578944, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (55834583041, 55834583040, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (51539611649, 51539611648, 1, 0, '{"funder": "GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO", "account": "GCB7FPYGLL6RJ37HKRAYW5TAWMFBGGFGM4IM6ERBCZXI2BZ4OOOX2UAY", "starting_balance": "50.0000000"}', 'GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO'); +INSERT INTO history_operations VALUES (47244644353, 47244644352, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (42949677057, 42949677056, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (42949677058, 42949677056, 2, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (38654709761, 38654709760, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (34359742465, 34359742464, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB", "amount": "1.0000000", "asset_type": "native"}', 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB'); +INSERT INTO history_operations VALUES (34359746561, 34359746560, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB", "amount": "1.0000000", "asset_type": "native"}', 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB'); +INSERT INTO history_operations VALUES (34359750657, 34359750656, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB", "amount": "1.0000000", "asset_type": "native"}', 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB'); +INSERT INTO history_operations VALUES (34359754753, 34359754752, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB", "amount": "1.0000000", "asset_type": "native"}', 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB'); +INSERT INTO history_operations VALUES (30064775169, 30064775168, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 5, '{"master_key_weight": 2}', 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 5, '{"master_key_weight": 1}', 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB'); +INSERT INTO history_operations VALUES (21474844673, 21474844672, 1, 5, '{"signer_key": "GD3E7HKMRNT6HGBGHBT6I6JE4N2S4W5KZ246TGJ4KQSXJ2P4BXCUPQMP", "signer_weight": 1}', 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB'); +INSERT INTO history_operations VALUES (21474848769, 21474848768, 1, 5, '{"low_threshold": 2, "med_threshold": 2, "high_threshold": 2}', 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GAXI33UCLQTCKM2NMRBS7XYBR535LLEVAHL5YBN4FTCB4HZHT7ZA5CVK", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_trades VALUES (103079219201, 0, '2019-06-03 16:35:50', 3, 14, 1, 200000000, 13, 7, 200000000, false, 1, 1, 4, 3); +INSERT INTO history_trades VALUES (85899350017, 0, '2019-06-03 16:35:46', 2, 15, 4, 1000000000, 16, 7, 1000000000, true, 1, 1, 2, 4611686104326737921); +INSERT INTO history_trades VALUES (81604382721, 0, '2019-06-03 16:35:45', 1, 16, 2, 1000000000, 15, 7, 2000000000, false, 2, 1, 4611686100031770625, 1); +INSERT INTO history_trades VALUES (81604382721, 1, '2019-06-03 16:35:45', 2, 15, 4, 2000000000, 16, 7, 2000000000, true, 1, 1, 2, 4611686100031770625); + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 261993009152, 1); +INSERT INTO history_transaction_participants VALUES (2, 257698041856, 1); +INSERT INTO history_transaction_participants VALUES (3, 253403074560, 1); +INSERT INTO history_transaction_participants VALUES (4, 249108107264, 1); +INSERT INTO history_transaction_participants VALUES (5, 244813139968, 2); +INSERT INTO history_transaction_participants VALUES (6, 244813139968, 1); +INSERT INTO history_transaction_participants VALUES (7, 240518172672, 3); +INSERT INTO history_transaction_participants VALUES (8, 236223205376, 2); +INSERT INTO history_transaction_participants VALUES (9, 236223205376, 3); +INSERT INTO history_transaction_participants VALUES (10, 231928238080, 2); +INSERT INTO history_transaction_participants VALUES (11, 231928238080, 4); +INSERT INTO history_transaction_participants VALUES (12, 227633270784, 2); +INSERT INTO history_transaction_participants VALUES (13, 227633270784, 4); +INSERT INTO history_transaction_participants VALUES (14, 223338303488, 5); +INSERT INTO history_transaction_participants VALUES (15, 219043336192, 5); +INSERT INTO history_transaction_participants VALUES (16, 214748368896, 5); +INSERT INTO history_transaction_participants VALUES (17, 210453401600, 5); +INSERT INTO history_transaction_participants VALUES (18, 210453405696, 5); +INSERT INTO history_transaction_participants VALUES (19, 210453409792, 5); +INSERT INTO history_transaction_participants VALUES (20, 206158434304, 2); +INSERT INTO history_transaction_participants VALUES (21, 206158434304, 5); +INSERT INTO history_transaction_participants VALUES (22, 201863467008, 2); +INSERT INTO history_transaction_participants VALUES (23, 197568499712, 6); +INSERT INTO history_transaction_participants VALUES (24, 197568503808, 2); +INSERT INTO history_transaction_participants VALUES (25, 193273532416, 2); +INSERT INTO history_transaction_participants VALUES (26, 193273532416, 6); +INSERT INTO history_transaction_participants VALUES (27, 188978565120, 7); +INSERT INTO history_transaction_participants VALUES (28, 188978565120, 2); +INSERT INTO history_transaction_participants VALUES (29, 184683597824, 2); +INSERT INTO history_transaction_participants VALUES (30, 184683597824, 7); +INSERT INTO history_transaction_participants VALUES (31, 180388630528, 8); +INSERT INTO history_transaction_participants VALUES (32, 180388630528, 9); +INSERT INTO history_transaction_participants VALUES (33, 176093663232, 8); +INSERT INTO history_transaction_participants VALUES (34, 176093663232, 9); +INSERT INTO history_transaction_participants VALUES (35, 176093667328, 9); +INSERT INTO history_transaction_participants VALUES (36, 176093667328, 8); +INSERT INTO history_transaction_participants VALUES (37, 171798695936, 9); +INSERT INTO history_transaction_participants VALUES (38, 171798700032, 9); +INSERT INTO history_transaction_participants VALUES (39, 167503728640, 8); +INSERT INTO history_transaction_participants VALUES (40, 163208761344, 2); +INSERT INTO history_transaction_participants VALUES (41, 163208761344, 9); +INSERT INTO history_transaction_participants VALUES (42, 163208765440, 8); +INSERT INTO history_transaction_participants VALUES (43, 163208765440, 2); +INSERT INTO history_transaction_participants VALUES (44, 158913794048, 10); +INSERT INTO history_transaction_participants VALUES (45, 154618826752, 10); +INSERT INTO history_transaction_participants VALUES (46, 150323859456, 10); +INSERT INTO history_transaction_participants VALUES (47, 146028892160, 10); +INSERT INTO history_transaction_participants VALUES (48, 141733924864, 2); +INSERT INTO history_transaction_participants VALUES (49, 141733924864, 10); +INSERT INTO history_transaction_participants VALUES (50, 137438957568, 11); +INSERT INTO history_transaction_participants VALUES (51, 137438961664, 11); +INSERT INTO history_transaction_participants VALUES (52, 133143990272, 11); +INSERT INTO history_transaction_participants VALUES (53, 128849022976, 11); +INSERT INTO history_transaction_participants VALUES (54, 124554055680, 11); +INSERT INTO history_transaction_participants VALUES (55, 120259088384, 11); +INSERT INTO history_transaction_participants VALUES (56, 120259092480, 11); +INSERT INTO history_transaction_participants VALUES (57, 120259096576, 11); +INSERT INTO history_transaction_participants VALUES (58, 120259100672, 11); +INSERT INTO history_transaction_participants VALUES (59, 120259104768, 11); +INSERT INTO history_transaction_participants VALUES (60, 120259108864, 11); +INSERT INTO history_transaction_participants VALUES (61, 120259112960, 11); +INSERT INTO history_transaction_participants VALUES (62, 115964121088, 11); +INSERT INTO history_transaction_participants VALUES (63, 115964121088, 2); +INSERT INTO history_transaction_participants VALUES (64, 111669153792, 12); +INSERT INTO history_transaction_participants VALUES (65, 111669157888, 12); +INSERT INTO history_transaction_participants VALUES (66, 107374186496, 2); +INSERT INTO history_transaction_participants VALUES (67, 107374186496, 12); +INSERT INTO history_transaction_participants VALUES (68, 103079219200, 14); +INSERT INTO history_transaction_participants VALUES (69, 98784251904, 13); +INSERT INTO history_transaction_participants VALUES (70, 94489284608, 13); +INSERT INTO history_transaction_participants VALUES (71, 90194317312, 2); +INSERT INTO history_transaction_participants VALUES (72, 90194317312, 13); +INSERT INTO history_transaction_participants VALUES (73, 90194321408, 2); +INSERT INTO history_transaction_participants VALUES (74, 90194321408, 14); +INSERT INTO history_transaction_participants VALUES (75, 85899350016, 16); +INSERT INTO history_transaction_participants VALUES (76, 85899350016, 17); +INSERT INTO history_transaction_participants VALUES (77, 81604382720, 16); +INSERT INTO history_transaction_participants VALUES (78, 81604382720, 17); +INSERT INTO history_transaction_participants VALUES (79, 77309415424, 15); +INSERT INTO history_transaction_participants VALUES (80, 77309415424, 16); +INSERT INTO history_transaction_participants VALUES (81, 77309419520, 15); +INSERT INTO history_transaction_participants VALUES (82, 77309423616, 15); +INSERT INTO history_transaction_participants VALUES (83, 73014448128, 16); +INSERT INTO history_transaction_participants VALUES (84, 73014452224, 17); +INSERT INTO history_transaction_participants VALUES (85, 68719480832, 2); +INSERT INTO history_transaction_participants VALUES (86, 68719480832, 16); +INSERT INTO history_transaction_participants VALUES (87, 68719484928, 17); +INSERT INTO history_transaction_participants VALUES (88, 68719484928, 2); +INSERT INTO history_transaction_participants VALUES (89, 68719489024, 2); +INSERT INTO history_transaction_participants VALUES (90, 68719489024, 15); +INSERT INTO history_transaction_participants VALUES (91, 64424513536, 19); +INSERT INTO history_transaction_participants VALUES (92, 64424513536, 18); +INSERT INTO history_transaction_participants VALUES (93, 60129546240, 19); +INSERT INTO history_transaction_participants VALUES (94, 60129546240, 18); +INSERT INTO history_transaction_participants VALUES (95, 60129550336, 18); +INSERT INTO history_transaction_participants VALUES (96, 55834578944, 2); +INSERT INTO history_transaction_participants VALUES (97, 55834578944, 19); +INSERT INTO history_transaction_participants VALUES (98, 55834583040, 2); +INSERT INTO history_transaction_participants VALUES (99, 55834583040, 18); +INSERT INTO history_transaction_participants VALUES (100, 51539611648, 21); +INSERT INTO history_transaction_participants VALUES (101, 51539611648, 20); +INSERT INTO history_transaction_participants VALUES (102, 47244644352, 2); +INSERT INTO history_transaction_participants VALUES (103, 47244644352, 21); +INSERT INTO history_transaction_participants VALUES (104, 42949677056, 22); +INSERT INTO history_transaction_participants VALUES (105, 42949677056, 2); +INSERT INTO history_transaction_participants VALUES (106, 38654709760, 2); +INSERT INTO history_transaction_participants VALUES (107, 38654709760, 22); +INSERT INTO history_transaction_participants VALUES (108, 34359742464, 23); +INSERT INTO history_transaction_participants VALUES (109, 34359742464, 2); +INSERT INTO history_transaction_participants VALUES (110, 34359746560, 23); +INSERT INTO history_transaction_participants VALUES (111, 34359746560, 2); +INSERT INTO history_transaction_participants VALUES (112, 34359750656, 23); +INSERT INTO history_transaction_participants VALUES (113, 34359750656, 2); +INSERT INTO history_transaction_participants VALUES (114, 34359754752, 23); +INSERT INTO history_transaction_participants VALUES (115, 34359754752, 2); +INSERT INTO history_transaction_participants VALUES (116, 30064775168, 2); +INSERT INTO history_transaction_participants VALUES (117, 30064775168, 23); +INSERT INTO history_transaction_participants VALUES (118, 25769807872, 24); +INSERT INTO history_transaction_participants VALUES (119, 21474840576, 24); +INSERT INTO history_transaction_participants VALUES (120, 21474844672, 24); +INSERT INTO history_transaction_participants VALUES (121, 21474848768, 24); +INSERT INTO history_transaction_participants VALUES (122, 17179873280, 2); +INSERT INTO history_transaction_participants VALUES (123, 17179873280, 24); +INSERT INTO history_transaction_participants VALUES (124, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (125, 12884905984, 25); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 125, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('bc11b5c41de791369fd85fa1ccf01c35c20df5f98ff2f75d02ead61bfd520e21', 61, 1, 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN', 300000000003, 100, 1, '2019-06-03 16:35:33.314201', '2019-06-03 16:35:33.314201', 261993009152, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgDAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AwAAAAAAAAABHK0SlAAAAECcI6ex0Dq6YAh6aK14jHxuAvhvKG2+NuzboAKrfYCaC1ZSQ77BYH/5MghPX97JO9WXV17ehNK7d0umxBgaJj8A', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPQAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvicAAAAEXZZLgDAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==', 'AAAAAgAAAAMAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA9AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+JwAAAARdlkuAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{nCOnsdA6umAIemiteIx8bgL4byhtvjbs26ACq32AmgtWUkO+wWB/+TIIT1/eyTvVl1de3oTSu3dLpsQYGiY/AA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c8132b95c0063cafd20b26d27f06c12e688609d2d9d3724b840821e861870b8e', 60, 1, 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN', 300000000002, 100, 1, '2019-06-03 16:35:33.335583', '2019-06-03 16:35:33.335584', 257698041856, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgCAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AQAAAAAAAAABHK0SlAAAAEC4H7TDntOUXDMg4MfoCPlbLRQZH7VwNpUHMvtnRWqWIiY/qnYYu0bvgYUVtoFOOeqElRKLYqtOW3Fz9iKl0WQJ', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgBAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAPAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvi1AAAAEXZZLgCAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==', 'AAAAAgAAAAMAAAA7AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+M4AAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA8AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+LUAAAARdlkuAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{uB+0w57TlFwzIODH6Aj5Wy0UGR+1cDaVBzL7Z0VqliImP6p2GLtG74GFFbaBTjnqhJUSi2KrTltxc/YipdFkCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('74b62d52311ea3f47359f74790595343f976afa4fd306caaefee5efdbbb104ff', 59, 1, 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN', 300000000001, 100, 1, '2019-06-03 16:35:33.346257', '2019-06-03 16:35:33.346257', 253403074560, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAAEXZZLgBAAAAAAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAZAAAAAAAAAABHK0SlAAAAEAOrvZSFnT3JvmT1P5lJ/lggpZe4nxH5WvJ9K/SLOD49wfqq84suncoZIn3IAf0PExMw3etu5FiDVw3c3jYYhAL', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAOwAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjOAAAAEXZZLgAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOwAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjOAAAAEXZZLgBAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==', 'AAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA7AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+M4AAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Dq72UhZ09yb5k9T+ZSf5YIKWXuJ8R+VryfSv0izg+PcH6qvOLLp3KGSJ9yAH9DxMTMN3rbuRYg1cN3N42GIQCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('829d53f2dceebe10af8007564b0aefde819b95734ad431df84270651e7ed8a90', 58, 1, 'GCQZP3IU7XU6EJ63JZXKCQOYT2RNXN3HB5CNHENNUEUHSMA4VUJJJSEN', 244813135873, 100, 1, '2019-06-03 16:35:33.357399', '2019-06-03 16:35:33.357399', 249108107264, 'AAAAAKGX7RT96eIn205uoUHYnqLbt2cPRNORraEoeTAcrRKUAAAAZAAAADkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAsAAABF2WS4AAAAAAAAAAABHK0SlAAAAEDq0JVhKNIq9ag0sR+R/cv3d9tEuaYEm2BazIzILRdGj9alaVMZBhxoJ3ZIpP3rraCJzyoKZO+p5HBVe10a2+UG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOgAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvjnAAAADkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAARdlkuAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA6AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+OcAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{6tCVYSjSKvWoNLEfkf3L93fbRLmmBJtgWsyMyC0XRo/WpWlTGQYcaCd2SKT9662gic8qCmTvqeRwVXtdGtvlBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('0e5bd332291e3098e49886df2cdb9b5369a5f9e0a9973f0d9e1a9489c6581ba2', 57, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 26, 100, 1, '2019-06-03 16:35:33.368603', '2019-06-03 16:35:33.368603', 244813139968, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAaAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoZftFP3p4ifbTm6hQdieotu3Zw9E05GtoSh5MBytEpQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDHU95E9wxgETD8TqxUrkgC0/7XHyNDts6Q5huRHfDRyRcoHdv7aMp/sPvC3RPkXjOMjgbKJUX7SgExUeYB5f8F', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZY9dZxbAAAAAAAAAAaAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlahyo1sAAAAAAAAABoAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5AAAAAAAAAAChl+0U/eniJ9tObqFB2J6i27dnD0TTka2hKHkwHK0SlAAAAAJUC+QAAAAAOQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA5AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nFsAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{x1PeRPcMYBEw/E6sVK5IAtP+1x8jQ7bOkOYbkR3w0ckXKB3b+2jKf7D7wt0T5F4zjI4GyiVF+0oBMVHmAeX/BQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2a805712c6d10f9e74bb0ccf54ae92a2b4b1e586451fe8133a2433816f6b567c', 56, 1, 'GANFZDRBCNTUXIODCJEYMACPMCSZEVE4WZGZ3CZDZ3P2SXK4KH75IK6Y', 236223201281, 100, 1, '2019-06-03 16:35:33.380688', '2019-06-03 16:35:33.380688', 240518172672, 'AAAAABpcjiETZ0uhwxJJhgBPYKWSVJy2TZ2LI87fqV1cUf/UAAAAZAAAADcAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAAAAAAAAAX14QAAAAAAAAAAAVxR/9QAAABAK6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAOAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvjnAAAADcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAA==', 'AAAAAgAAAAMAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA4AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+OcAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{K6pcXYMzAEmH08CZ1LWmvtNDKauhx+OImtP/Lk4hVTMJRVBOebVs5WEPj9iSrgGT0EswuDCZ2i5AEzwgGof9Ag==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('5bbbedfb52efd1d5d973e22540044a27b8115772314293e3ba8b1fb12e63ca2e', 55, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 25, 100, 1, '2019-06-03 16:35:33.392703', '2019-06-03 16:35:33.392703', 236223205376, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAZAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAGlyOIRNnS6HDEkmGAE9gpZJUnLZNnYsjzt+pXVxR/9QAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBCMMjX9xO3XKpQ6uS/U1BqdzRhSBYQ35ivmZxPBgfqQsTDma1BzOsq/bmHJ4P+fkYJRJUdZZazXJM2i4mF7nUH', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAANwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJV0AAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJV0AAAAAAAAAAZAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lXQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatlj11nHQAAAAAAAAABkAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA3AAAAAAAAAAAaXI4hE2dLocMSSYYAT2ClklSctk2diyPO36ldXFH/1AAAAAJUC+QAAAAANwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA3AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lXQAAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{QjDI1/cTt1yqUOrkv1NQanc0YUgWEN+Yr5mcTwYH6kLEw5mtQczrKv25hyeD/n5GCUSVHWWWs1yTNouJhe51Bw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('85bbd2b558563518a38e9b749bd4b8ced60b9fbbb7a6b283e15ae98548302ac4', 54, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 24, 200, 2, '2019-06-03 16:35:33.406028', '2019-06-03 16:35:33.406028', 231928238080, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAyAAAAAAAAAAYAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAAAAAAAAAX14QAAAAABAAAAAASXk5ikdaq/gmaCMowHxsayF2f7SNVSHdjn2ZEaMA8+AAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAACVvwF9wAAAEDRRWwMrdLrhnl+FIP+71tTHB5rlzCsPVyGnR3scvID9NmIL3LZEo992uTvDI9QLys5bC2yRc3WYR0vFiZRs40IGjAPPgAAAEDXbXWVdzmN6NWBjYU5OvB33WTUaa2wDZX3RmFTZQQ/+7JvPdblMtNCxo8IOYePQg90RajV9rB+k8P+SEpPHCUH', 'AAAAAAAAAMgAAAAAAAAAAgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAXAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAA1AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJUC+QAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJaAcUAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltD7HU0AAAAAAAAABgAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbQ+x1NAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZbSeJWNAAAAAAAAAAYAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAANgAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACWgHFAAAAADUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANgAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACVAvkAAAAADUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lb8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA2AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lY0AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{0UVsDK3S64Z5fhSD/u9bUxwea5cwrD1chp0d7HLyA/TZiC9y2RKPfdrk7wyPUC8rOWwtskXN1mEdLxYmUbONCA==,1211lXc5jejVgY2FOTrwd91k1GmtsA2V90ZhU2UEP/uybz3W5TLTQsaPCDmHj0IPdEWo1fawfpPD/khKTxwlBw==}', 'none', NULL, NULL, true, 200); +INSERT INTO history_transactions VALUES ('df5f0e8b3b533dd9cda0ff7540bef3e9e19369060f8a4b0414b0e3c1b4315b1c', 53, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 23, 100, 1, '2019-06-03 16:35:33.420452', '2019-06-03 16:35:33.420452', 227633270784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAXAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAABJeTmKR1qr+CZoIyjAfGxrIXZ/tI1VId2OfZkRowDz4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDyHwhW9GXQVXG1qibbeqSjxYzhv5IC08K2vSkxzYTwJykvQ8l0+e4M4h2guoK89s8HUfIqIOzDmoGsNTaLcYUG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAANQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZdne46/AAAAAAAAAAWAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZdne46/AAAAAAAAAAXAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jr8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatltJ4lb8AAAAAAAAABcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1AAAAAAAAAAAEl5OYpHWqv4JmgjKMB8bGshdn+0jVUh3Y59mRGjAPPgAAAAJUC+QAAAAANQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jtgAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA1AAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jr8AAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{8h8IVvRl0FVxtaom23qko8WM4b+SAtPCtr0pMc2E8CcpL0PJdPnuDOIdoLqCvPbPB1HyKiDsw5qBrDU2i3GFBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c8a28fb25d4784f37a7a078e1feef0eb30ca64e994734625ac4ea067cc621464', 52, 1, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430214, 100, 1, '2019-06-03 16:35:33.430853', '2019-06-03 16:35:33.430854', 223338303488, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDAwMDAAAAAAAAAAAS6Z+xkAAABA3ExJNH79wGSRYZerPP1zMYlepMsuhoJF5vHn2gCsHmDpWfgO8VKC3BRImO+ne9spUXlVHMjEuhOHoPhl1hrMCg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAANAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvhqAAAADAAAAAFAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAANAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvhqAAAADAAAAAGAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAzAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAAAAAABAAAANAAAAAMAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAAFbmFtZTEAAAAAAAAEMDAwMAAAAAAAAAAA', 'AAAAAgAAAAMAAAAzAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+IMAAAAMAAAAAUAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAA0AAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+GoAAAAMAAAAAUAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{3ExJNH79wGSRYZerPP1zMYlepMsuhoJF5vHn2gCsHmDpWfgO8VKC3BRImO+ne9spUXlVHMjEuhOHoPhl1hrMCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('1d7833c4faab08e62609acf3714d1babe27621a2b328edf37465e99aaf389cab', 51, 1, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430213, 100, 1, '2019-06-03 16:35:33.443723', '2019-06-03 16:35:33.443724', 219043336192, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDEyMzQAAAAAAAAAAS6Z+xkAAABAIW4yrFdk66fgDDir7YFATEd2llOubzx/iaJcM2wkF3ouqJQN+Aziy2rVtK5AoyphokiwsYXvHS6UF9MhdnUADQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMwAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAviDAAAADAAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMwAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAviDAAAADAAAAAFAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAAAAAABAAAAMwAAAAMAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAAFbmFtZTEAAAAAAAAEMTIzNAAAAAAAAAAA', 'AAAAAgAAAAMAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAzAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+IMAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{IW4yrFdk66fgDDir7YFATEd2llOubzx/iaJcM2wkF3ouqJQN+Aziy2rVtK5AoyphokiwsYXvHS6UF9MhdnUADQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('616c609047ef8f9ca908a47a47aa4bb018449c569549ad2ca60590aab74267e8', 50, 1, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430212, 100, 1, '2019-06-03 16:35:33.457305', '2019-06-03 16:35:33.457305', 214748368896, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAAAAAAAAAAAAAEumfsZAAAAQAYRZNPhJCTwjJgAJ9beE3ZO/H3kYJhYmV1pCmy7c8Zr2sKdKOmaLn4fmA5qaL+lQMKwOShtjwkZ8JHxPUd8GAk=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMgAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvicAAAADAAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMgAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvicAAAADAAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAAAAAACAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAMAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAQAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAyAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+JwAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{BhFk0+EkJPCMmAAn1t4Tdk78feRgmFiZXWkKbLtzxmvawp0o6Zoufh+YDmpov6VAwrA5KG2PCRnwkfE9R3wYCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('9fff61916716fb2550043fac968ac6c13802af5176a10fc29108fcfc445ef513', 49, 1, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430209, 100, 1, '2019-06-03 16:35:33.472183', '2019-06-03 16:35:33.472183', 210453401600, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTEAAAAAAAABAAAABDEyMzQAAAAAAAAAAS6Z+xkAAABAxKiHYYNLJiW3r5+kCJm8ucaoV7BcrEnQXFb3s1RyRyUbAkDlaCvE+RKwMZoNUfbkQUGrouyVKy1ZpUeccByqDg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMQAAAAAAAAQxMjM0AAAAAAAAAAA=', 'AAAAAgAAAAMAAAAwAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+QAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{xKiHYYNLJiW3r5+kCJm8ucaoV7BcrEnQXFb3s1RyRyUbAkDlaCvE+RKwMZoNUfbkQUGrouyVKy1ZpUeccByqDg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e4609180751e7702466a8845857df43e4d154ec84b6bad62ce507fe12f1daf99', 49, 2, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430210, 100, 1, '2019-06-03 16:35:33.472323', '2019-06-03 16:35:33.472324', 210453405696, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZTIAAAAAAAABAAAABDU2NzgAAAAAAAAAAS6Z+xkAAABAjxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lMgAAAAAAAAQ1Njc4AAAAAAAAAAA=', 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+OcAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{jxgnTRBCa0n1efZocxpEjXeITQ5sEYTVd9fowuto2kPw5eFwgVnz6OrKJwCRt5L8ylmWiATXVI3Zyfi3yTKqBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('48415cd0fda9bc9aeb1f0b419bfb2997f7a2aa1b1ef2e51a0602c61104fc23cc', 49, 3, 'GAYSCMKQY6EYLXOPTT6JPPOXDMVNBWITPTSZIVWW4LWARVBOTH5RTLAD', 206158430211, 100, 1, '2019-06-03 16:35:33.472436', '2019-06-03 16:35:33.472436', 210453409792, 'AAAAADEhMVDHiYXdz5z8l73XGyrQ2RN85ZRW1uLsCNQumfsZAAAAZAAAADAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAoAAAAFbmFtZSAAAAAAAAABAAAAD2l0cyBnb3Qgc3BhY2VzIQAAAAAAAAAAAS6Z+xkAAABANmYginYhX+6VAsl1JumfxkB57y2LHraWDUkR+KDxWW8l5pfTViLxx7J85KrOV0qNCY4RfasgqxF0FC3ErYceCQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAKAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMQAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvi1AAAADAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxAAAAAwAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAVuYW1lIAAAAAAAAA9pdHMgZ290IHNwYWNlcyEAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+M4AAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAxAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+LUAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{NmYginYhX+6VAsl1JumfxkB57y2LHraWDUkR+KDxWW8l5pfTViLxx7J85KrOV0qNCY4RfasgqxF0FC3ErYceCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('eb8586c9176c4cf2e864b2521948a972db5274de24673669463e0c7824cee056', 48, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 22, 100, 1, '2019-06-03 16:35:33.482587', '2019-06-03 16:35:33.482587', 206158434304, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAWAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAMSExUMeJhd3PnPyXvdcbKtDZE3zllFbW4uwI1C6Z+xkAAAACVAvkAAAAAAAAAAABVvwF9wAAAECAMOn6G4jusgpfSoHwntHQkYIDxI/VnyH/qIi+bdMWzi1T6WlwnO+yITgm2+mOaWc6zVuxiLjHllzBeQ/xKvQN', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAMAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZf8fofYAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAMAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGrZf8fofYAAAAAAAAAAWAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h9gAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl2d7jtgAAAAAAAAABYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAxITFQx4mF3c+c/Je91xsq0NkTfOWUVtbi7AjULpn7GQAAAAJUC+QAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAwAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h9gAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{gDDp+huI7rIKX0qB8J7R0JGCA8SP1Z8h/6iIvm3TFs4tU+lpcJzvsiE4JtvpjmlnOs1bsYi4x5ZcwXkP8Sr0DQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('ea93efd8c2f4e45c0318c69ec958623a0e4374f40d569eec124d43c8a54d6256', 47, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 21, 100, 1, '2019-06-03 16:35:33.495412', '2019-06-03 16:35:33.495413', 201863467008, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAVAAAAAAAAAAAAAAABAAAAAAAAAAkAAAAAAAAAAVb8BfcAAABABUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAJAAAAAAAAAAIAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAIrEjCYwXAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAIrEjfceLAAAAAA==', 'AAAAAQAAAAIAAAADAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAUAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvaAAAAAAAAAAVAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsatl/x+h/EAAAAAAAAABUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGraHekccnAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAvAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+9oAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{BUHuXY+MTgW/wDv5+NDVh9fw4meszxeXO98HEQfgXVeCZ7eObCI2orSGUNA/SK6HV9/uTVSxIQQWIso1QoxHBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('7207de5b75243e0b062c3833f587036b7e9f64453be49ff50f3f3fdc7516ec6b', 46, 1, 'GDR53WAEIKOU3ZKN34CSHAWH7HV6K63CBJRUTWUDBFSMY7RRQK3SPKOS', 193273528321, 100, 1, '2019-06-03 16:35:33.506693', '2019-06-03 16:35:33.506693', 197568499712, 'AAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAZAAAAC0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAOPd2ARCnU3lTd8FI4LH+evle2IKY0nagwlkzH4xgrcnAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMYK3JwAAAEAOkGOPTOBDSQ7nW2Zn+bls2PDUebk2/k3/gqHKQ8eYOFsD6nBeEvyMD858vo5BabjQwB9injABIM8esDh7bEkC', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALgAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxP/nAAAAC0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAEAAAAAAAAAAQAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAtAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7FAAAAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7E/+cAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{DpBjj0zgQ0kO51tmZ/m5bNjw1Hm5Nv5N/4KhykPHmDhbA+pwXhL8jA/OfL6OQWm40MAfYp4wASDPHrA4e2xJAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d24f486bd722fd1875b843839e880bdeea324e25db706a26af5e4daa8c5071eb', 46, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 20, 100, 1, '2019-06-03 16:35:33.506856', '2019-06-03 16:35:33.506856', 197568503808, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAUAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABVvwF9wAAAEBYI0TMQVWPvnC2KPbDph9Myz5UMuBRIYt2YQdtlPYC4UHamYnHsMghpIMfaS7MWdHuGY81+FBozOsS+/HGohQD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAALgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvzAAAAAAAAAATAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcLGiubZdPvzAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAuAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0+/MAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{WCNEzEFVj75wtij2w6YfTMs+VDLgUSGLdmEHbZT2AuFB2pmJx7DIIaSDH2kuzFnR7hmPNfhQaMzrEvvxxqIUAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('5b42c77042f04bf716659a05e7ca3f4703af038a7da75b10b8538707c9ff172f', 45, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 19, 100, 1, '2019-06-03 16:35:33.518779', '2019-06-03 16:35:33.51878', 193273532416, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAATAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA493YBEKdTeVN3wUjgsf56+V7YgpjSdqDCWTMfjGCtycCxorwuxQAAAAAAAAAAAABVvwF9wAAAECGClRePcAExQ/WKroo3/3dfchP/yI8TRDrrjt/chZ83ULiTc54l5wcz1AkbLa6CAapdSGpUWXk5ksTqDXLn4AA', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAALQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfwMAAAAAAAAAASAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfwMAAAAAAAAAATAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wsaK5tl0/AwAAAAAAAAABMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAtAAAAAAAAAADj3dgEQp1N5U3fBSOCx/nr5XtiCmNJ2oMJZMx+MYK3JwLGivC7FAAAAAAALQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAtAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/AwAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{hgpUXj3ABMUP1iq6KN/93X3IT/8iPE0Q6647f3IWfN1C4k3OeJecHM9QJGy2uggGqXUhqVFl5OZLE6g1y5+AAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e0773d07aba23d11e6a06b021682294be1f9f202a2926827022539662ce2c7fc', 44, 1, 'GCHPXGVDKPF5KT4CNAT7X77OXYZ7YVE4JHKFDUHCGCVWCL4K4PQ67KKZ', 184683593729, 100, 1, '2019-06-03 16:35:33.529055', '2019-06-03 16:35:33.529055', 188978565120, 'AAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+HvAAAAZAAAACsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAgAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAYrj4e8AAABA3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAIAAAAAAAAAAJUC+OcAAAAAA==', 'AAAAAQAAAAIAAAADAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAALAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvjnAAAACsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/CUAAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAI77mqNTy9VPgmgn+//uvjP8VJxJ1FHQ4jCrYS+K4+Hv', 'AAAAAgAAAAMAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAsAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+OcAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{3jJ7wBrRpsrcnqBQWjyzwvVz2v5UJ56G60IhgsaWQFSf+7om462KToc+HJ27aLVOQ83dGh1ivp+VIuREJq/SBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('945b6171de747ab323b3cda52290933df39edd7061f6e260762663efc51bccb0', 43, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 18, 100, 1, '2019-06-03 16:35:33.541377', '2019-06-03 16:35:33.541377', 184683597824, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAASAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAjvuao1PL1U+CaCf7/+6+M/xUnEnUUdDiMKthL4rj4e8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEBFbS2c5rrYNGslNVslTHH8j8x0ggew1eHHOUTNajMPy8GYn52RSwRncwwvv1ejEfA+g/mTXMpXrBO847C46KoA', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfw+AAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaMIOfw+AAAAAAAAAASAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/D4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtonM3Az4AAAAAAAAABIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAArAAAAAAAAAACO+5qjU8vVT4JoJ/v/7r4z/FScSdRR0OIwq2EviuPh7wAAAAJUC+QAAAAAKwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/FcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAArAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/D4AAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{RW0tnOa62DRrJTVbJUxx/I/MdIIHsNXhxzlEzWozD8vBmJ+dkUsEZ3MML79XoxHwPoP5k1zKV6wTvOOwuOiqAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d67cfb271a889e7854ffd61b08eacde76d56e758466fc37a8eec2d3a40ef8b14', 42, 1, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 163208757252, 100, 1, '2019-06-03 16:35:33.552965', '2019-06-03 16:35:33.552965', 180388630528, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABRVVSAAAAAAAAAAAAAAAAAUpI8/gAAABAEPKcQmATGpevrtlAcZnNI/GjfLLQEp9aODGGRFV+2C4UO8dU+UAMTkCSXQLD+xPaRQxzw93ScEok6GzYCtt7Bg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKgAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvicAAAACYAAAADAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKgAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvicAAAACYAAAAEAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAqAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+LUAAAAJgAAAAMAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAqAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+JwAAAAJgAAAAMAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{EPKcQmATGpevrtlAcZnNI/GjfLLQEp9aODGGRFV+2C4UO8dU+UAMTkCSXQLD+xPaRQxzw93ScEok6GzYCtt7Bg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6d2e30fd57492bf2e2b132e1bc91a548a369189bebf77eb2b3d829121a9d2c50', 41, 1, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 163208757250, 100, 1, '2019-06-03 16:35:33.565164', '2019-06-03 16:35:33.565164', 176093663232, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABVVNEAAAAAAEAAAAAAAAAAUpI8/gAAABA6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAABAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{6O2fe1gQBwoO0fMNNEUKH0QdVXVjEWbN5VL51DmRUedYMMXtbX5JKVSzla2kIGvWgls1dXuXHZY/IOlaK01rBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a832ff67085cb9eb6f1c4b740f6e033ba9b508af725fbf203469729a64a199ff', 41, 2, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 163208757251, 100, 1, '2019-06-03 16:35:33.565428', '2019-06-03 16:35:33.565429', 176093667328, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAADAAAAAAAAAAAAAAABAAAAAAAAAAcAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAABRVVSAAAAAAEAAAAAAAAAAUpI8/gAAABA1Qe8ngwANz4fLqYChwRjR5xng6cIqU5WBtjkZgF4ugVhi8J6kTpACvnvXso3IVym6Rfd6JdQW8QcLkFTX1MGCg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAHAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAACAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKQAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvi1AAAACYAAAADAAAAAAAAAAAAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAApAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+LUAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{1Qe8ngwANz4fLqYChwRjR5xng6cIqU5WBtjkZgF4ugVhi8J6kTpACvnvXso3IVym6Rfd6JdQW8QcLkFTX1MGCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6fa467b53f5386d77ad35c2502ed2cd3dd8b460a5be22b6b2818b81bcd3ed2da', 40, 1, 'GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG', 163208757249, 100, 1, '2019-06-03 16:35:33.580485', '2019-06-03 16:35:33.580485', 171798695936, 'AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQKN8LftAafeoAGmvpsEokqm47jAuqw4g1UWjmL0j6QPm1jxoalzDwDS3W+N2HOHdjSJlEQaTxGBfQKHhr6nNsAA=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFVU0QAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{o3wt+0Bp96gAaa+mwSiSqbjuMC6rDiDVRaOYvSPpA+bWPGhqXMPANLdb43Yc4d2NImURBpPEYF9AoeGvqc2wAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('0bcb67aa365446fd244fecff3a0c397f81f3a9b13428688965e776d447c0b1ea', 40, 2, 'GCVW5LCRZFP7PENXTAGOVIQXADDNUXXZJCNKF4VQB2IK7W2LPJWF73UG', 163208757250, 100, 1, '2019-06-03 16:35:33.58064', '2019-06-03 16:35:33.58064', 171798700032, 'AAAAAKturFHJX/eRt5gM6qIXAMbaXvlImqLysA6Qr9tLemxfAAAAZAAAACYAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+H//////////AAAAAAAAAAFLemxfAAAAQMPVgYf+w09depDSxMcJnjVZHA2FlkBmhPmi0N66FuhAzTekWcCOMdCI0cUc+xJhywLXSMiKA6wP6K94NRlFlQE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAKAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvjOAAAACYAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAAAAQAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAFFVVIAAAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAAAAAAAB//////////wAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAoAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+M4AAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{w9WBh/7DT116kNLExwmeNVkcDYWWQGaE+aLQ3roW6EDNN6RZwI4x0IjRxRz7EmHLAtdIyIoDrA/or3g1GUWVAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('11705f94cd65d7b673a124a85ce368c80f8458ffaedff719304d8f849535b4e0', 39, 1, 'GD4SMOE3VPSF7ZR3CTEQ3P5UNTBMEJDA2GLXTHR7MMARANKKJDZ7RPGF', 163208757249, 100, 1, '2019-06-03 16:35:33.5929', '2019-06-03 16:35:33.5929', 167503728640, 'AAAAAPkmOJur5F/mOxTJDb+0bMLCJGDRl3meP2MBEDVKSPP4AAAAZAAAACYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABSkjz+AAAAECyjDa1e+jtXukTrHluO7x0Mx7Wj4mRoM4S5UAFmRV+2rVoxjMwqFJhtYnEAUV19+C5ycp5jOLLpWxrCeRKJQUG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAJwAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvjnAAAACYAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJwAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvjnAAAACYAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAEAAAAAAAAAAAAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAmAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAnAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+OcAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{sow2tXvo7V7pE6x5bju8dDMe1o+JkaDOEuVABZkVftq1aMYzMKhSYbWJxAFFdffgucnKeYziy6VsawnkSiUFBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('afeb8080522eba71ca328225bbcf731029edcfa254c827c45be580bae95c7231', 38, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 16, 100, 1, '2019-06-03 16:35:33.603781', '2019-06-03 16:35:33.603781', 163208761344, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAQAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAq26sUclf95G3mAzqohcAxtpe+UiaovKwDpCv20t6bF8AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDnzvNgEYB1u3BGTHFDlIWnk0GOq7BMpfcyewJRsJK9lT4HTMEwMQ2jSJyrWmB7xdBxHKaNMXQaAIx6CShLXpQH', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaQyP+5XAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaQyP+5XAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7lcAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gto5089VcAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAACrbqxRyV/3kbeYDOqiFwDG2l75SJqi8rAOkK/bS3psXwAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7okAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7nAAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{587zYBGAdbtwRkxxQ5SFp5NBjquwTKX3MnsCUbCSvZU+B0zBMDENo0icq1pge8XQcRymjTF0GgCMegkoS16UBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2354df802111418a999e31c2964d16b8efe8e492b7d74de54939825190e1041f', 38, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 17, 100, 1, '2019-06-03 16:35:33.603995', '2019-06-03 16:35:33.603995', 163208765440, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAARAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA+SY4m6vkX+Y7FMkNv7RswsIkYNGXeZ4/YwEQNUpI8/gAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDD6WvAYL1wilsd7zYDJt0iFO/lppQ6GJJn/A8UJl9jTjMNOjuQPBtA7fSxR5KT0BZLbtQy8qFlys0I6fTe/cwO', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaOdPPVXAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaOdPPVXAAAAAAAAAARAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gto5089VcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtowg5/FcAAAAAAAAABEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmAAAAAAAAAAD5Jjibq+Rf5jsUyQ2/tGzCwiRg0Zd5nj9jARA1Skjz+AAAAAJUC+QAAAAAJgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7nAAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAmAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7lcAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{w+lrwGC9cIpbHe82AybdIhTv5aaUOhiSZ/wPFCZfY04zDTo7kDwbQO30sUeSk9AWS27UMvKhZcrNCOn03v3MDg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('52388a98e4e36c17749a94374270cc65bdb7271cb51277f095aaa8f1ca9d322c', 37, 1, 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG', 141733920772, 100, 1, '2019-06-03 16:35:33.615811', '2019-06-03 16:35:33.615811', 158913794048, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAAAAAAAAAAEctWW1AAAAQM5SCoW10EJoKBBwwMu0Vw+f+bQ0GjQ9FO6w3l9Q/FIctm87248t9jXTbl0Rd4NgGcom0yoGxgcJiERwZGBMXQc=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAkAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAAAAAAIAAAABAAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAAVVTRAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJQAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvicAAAACEAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAkAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+LUAAAAIQAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAlAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+JwAAAAIQAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{zlIKhbXQQmgoEHDAy7RXD5/5tDQaND0U7rDeX1D8Uhy2bzvbjy32NdNuXRF3g2AZyibTKgbGBwmIRHBkYExdBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('44cb6c8ed4dbec542af1aad23001dd9d678cf19c8c461a653e762a7253eded82', 36, 1, 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG', 141733920771, 100, 1, '2019-06-03 16:35:33.635753', '2019-06-03 16:35:33.635753', 154618826752, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAADAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAA7msoAAAAAAAAAAAEctWW1AAAAQO+eTIPXUZk+GAq7O6H8d1/WT5buo0apjLhGgtBeSyl37UV7LCpZfCn6DYVc7lQOVNWhBc7KDA7Ne83AR41kYAk=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAJAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvi1AAAACEAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAJAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvi1AAAACEAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAjAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAAAAAAEAAAAkAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAjAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+M4AAAAIQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAkAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+LUAAAAIQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{755Mg9dRmT4YCrs7ofx3X9ZPlu6jRqmMuEaC0F5LKXftRXssKll8KfoNhVzuVA5U1aEFzsoMDs17zcBHjWRgCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4e2442fe2e8dd8c686570c9f537acb2f50153a9883f8d199b6f4701eb289b3a0', 35, 1, 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG', 141733920770, 100, 1, '2019-06-03 16:35:33.645263', '2019-06-03 16:35:33.645263', 150323859456, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAA7msoAAAAAAAAAAAEctWW1AAAAQNugq+B30pdbzvVVGz9RO3+DMeRdWqc/Xsd2NYdg6NBu7esvOdTWQ3nvoBEJyeGz8EE9zRQiSiqorwHlm+AGfwI=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAIwAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjOAAAACEAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIwAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjOAAAACEAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAiAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAjAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAAAAAAO5rKAAAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAjAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+M4AAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{26Cr4HfSl1vO9VUbP1E7f4Mx5F1apz9ex3Y1h2Do0G7t6y851NZDee+gEQnJ4bPwQT3NFCJKKqivAeWb4AZ/Ag==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a05daae230b1f743474e83ab6d4817df1f3f77661a7d815f7620cee2a9809480', 34, 1, 'GCJKJXPKBFIHOO3455WXWG5CDBZXQNYFRRGICYMPUQ35CPQ4WVS3KZLG', 141733920769, 100, 1, '2019-06-03 16:35:33.656985', '2019-06-03 16:35:33.656985', 146028892160, 'AAAAAJKk3eoJUHc7fO9texuiGHN4NwWMTIFhj6Q30T4ctWW1AAAAZAAAACEAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF93//////////AAAAAAAAAAEctWW1AAAAQBYUnV3I1O35EAyay0msjg3MzZfanCtvalKGG+94pe6RxgE/kCk2kTT9HXgXjbraq//Q/0vJ0AoCAXSeT18Ujgk=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAIgAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjnAAAACEAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIgAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvjnAAAACEAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiAAAAAQAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAFVU0QAAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAhAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+QAAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+OcAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{FhSdXcjU7fkQDJrLSayODczNl9qcK29qUoYb73il7pHGAT+QKTaRNP0deBeNutqr/9D/S8nQCgIBdJ5PXxSOCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6d78f17fafa2317d6af679e1e5420f351207ff61cdff21c600ea8f85155b3ea1', 33, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 15, 100, 1, '2019-06-03 16:35:33.665712', '2019-06-03 16:35:33.665712', 141733924864, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAPAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAkqTd6glQdzt87217G6IYc3g3BYxMgWGPpDfRPhy1ZbUAAAACVAvkAAAAAAAAAAABVvwF9wAAAEC+mgKIzZqflQIKIqWn9LrciuyEx7XPfXGUhvyQ3sIQBnGdOWhkOt57UU/75LtUy4recT+jrY2cHKZj33puue8F', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAIQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaTHQueJAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAIQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaTHQueJAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC54kAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpDI/7okAAAAAAAAAA8AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAhAAAAAAAAAACSpN3qCVB3O3zvbXsbohhzeDcFjEyBYY+kN9E+HLVltQAAAAJUC+QAAAAAIQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC56IAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAhAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC54kAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{vpoCiM2an5UCCiKlp/S63IrshMe1z31xlIb8kN7CEAZxnTloZDree1FP++S7VMuK3nE/o62NnBymY996brnvBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bb9d6654111fae501594400dc901c70d47489a67163d2a34f9b3e32a921a50dc', 32, 1, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964117003, 100, 1, '2019-06-03 16:35:33.677436', '2019-06-03 16:35:33.677436', 137438957568, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAALAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAFytUxjxN4bnJMrEJkSprnES9iGpOxAsNOFYrTP/xtGVk/PZ2oThUW+/hLRIk+hYYEgF21Gf58N/abJKFpqlsI', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAACwAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAsAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAfAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+AYAAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvftAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA', '{BcrVMY8TeG5yTKxCZEqa5xEvYhqTsQLDThWK0z/8bRlZPz2dqE4VFvv4S0SJPoWGBIBdtRn+fDf2myShaapbCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6b38cdd5c17df2013d5a5e211c4b32218b6be91025316b1aab28bc12316615d5', 32, 2, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964117004, 100, 1, '2019-06-03 16:35:33.677655', '2019-06-03 16:35:33.677656', 137438961664, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAMAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAAAAAAAAAAAATCeMFAAAABAOb0qGWnk1WrSUXS6iQFocaIOY/BDmgG1zTmlPyg0boSid3jTBK3z9U8+IPGAOELNLgkQHtgGYFgFGMio1xY+BQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAALAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAAAAAAAQAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAACAAAAAwAAACAAAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL31AAAAAbAAAADAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAAAAAAAEAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC99QAAAAGwAAAAwAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAgAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC9+0AAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAAAAAABAAAAIAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvfUAAAABsAAAAKAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAABQAAAAAAAAAA', '{Ob0qGWnk1WrSUXS6iQFocaIOY/BDmgG1zTmlPyg0boSid3jTBK3z9U8+IPGAOELNLgkQHtgGYFgFGMio1xY+BQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('299dc6631d585a55ae3602f660ec5b5a0088d24a14b344c72eccc2a62d9a8938', 31, 1, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964117002, 100, 1, '2019-06-03 16:35:33.694359', '2019-06-03 16:35:33.694359', 133143990272, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAKAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAUAAAAAAAAAATCeMFAAAABA0wriernSr+5P2QCeon1uj5mrOLNTOrPYPPi5ricLug/nreEUhsgS/k3lA9JGpVbd+tacMEKmXKmFxHCEMjWPBg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHwAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgGAAAABsAAAAJAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB8AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4BgAAAAbAAAACgAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB8AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4BgAAAAbAAAACgAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAfAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+AYAAAAGwAAAAoAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAAFAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAeAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+B8AAAAGwAAAAkAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHwAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgGAAAABsAAAAJAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA', '{0wriernSr+5P2QCeon1uj5mrOLNTOrPYPPi5ricLug/nreEUhsgS/k3lA9JGpVbd+tacMEKmXKmFxHCEMjWPBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c3cd47a311e025446f72c50426b5b5444e5261431fc5760e8e57467c87cd49fc', 30, 1, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964117001, 100, 1, '2019-06-03 16:35:33.708736', '2019-06-03 16:35:33.708737', 128849022976, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAATCeMFAAAABA7ZMKq80ucQSt+55q+6VQrG3Hrv6zHtOLwkfAxxsZdYPIuk7xZsgbyhOCVXjheOQ9ygAW1vtybdXG41AxSFRtAg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHgAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgfAAAABsAAAAIAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB4AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4HwAAAAbAAAACQAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB4AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4HwAAAAbAAAACQAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAeAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+B8AAAAGwAAAAkAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAdAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+DgAAAAGwAAAAgAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHgAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvgfAAAABsAAAAIAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA', '{7ZMKq80ucQSt+55q+6VQrG3Hrv6zHtOLwkfAxxsZdYPIuk7xZsgbyhOCVXjheOQ9ygAW1vtybdXG41AxSFRtAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('69f64ae0f809b08996c1f394ee795001a40eee69adb675ab63bfd1932d3aafb2', 29, 1, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964117000, 100, 1, '2019-06-03 16:35:33.718611', '2019-06-03 16:35:33.718612', 124554055680, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCeMFAAAABAi69qDHclVS9A8GAaqyk6oIxiMC2KXXEneFijfxH5VyLGIQZNAxOOcCPpIalU6P1pYRX3K4OlKHZ4hIdxJzD6BQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHQAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvg4AAAABsAAAAHAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAAAAAAAQAAAB0AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4OAAAAAbAAAACAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAB0AAAAAAAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAAlQL4OAAAAAbAAAACAAAAAEAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAwAAAAtleGFtcGxlLmNvbQACAAICAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAAAAAAAEAAAAdAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+DgAAAAGwAAAAgAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAAAAAABAAAAHQAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvg4AAAABsAAAAHAAAAAQAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAABAAAAAHyd2ctWIE8qo8kBxI2FnNzaYft9DtG3MbIUDyYWQxOQAAAAAQAAAAAAAAAA', '{i69qDHclVS9A8GAaqyk6oIxiMC2KXXEneFijfxH5VyLGIQZNAxOOcCPpIalU6P1pYRX3K4OlKHZ4hIdxJzD6BQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('fe3707fbd5c844395c598f31dc719c61218d4cea4e8dddadb6733f4866089100', 28, 1, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116993, 100, 1, '2019-06-03 16:35:33.729669', '2019-06-03 16:35:33.729669', 120259088384, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEA/GIgE9sYPGwbCiIdLdhoEu25CyB0ZAcmjQonQItu6SE0gaSBVT/le355A/dw1NPaoXY9P/u0ou9D7h5Vb1fcK', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAbAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+QAAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+OcAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{PxiIBPbGDxsGwoiHS3YaBLtuQsgdGQHJo0KJ0CLbukhNIGkgVU/5Xt+eQP3cNTT2qF2PT/7tKLvQ+4eVW9X3Cg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('345ef7f85c6ea297e3f994feef279b63812628681bd173a1f615185a4368e482', 28, 2, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116994, 100, 1, '2019-06-03 16:35:33.729884', '2019-06-03 16:35:33.729884', 120259092480, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEDYxq3zpaFIC2JcuJUbrQ3MFXzqvu+5G7XUi4NnHlfbLutn76ylQcjuwLgbUG2lqcQfl75doPUZyurKtFP1rkMO', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAIAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+OcAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{2Mat86WhSAtiXLiVG60NzBV86r7vuRu11IuDZx5X2y7rZ++spUHI7sC4G1BtpanEH5e+XaD1GcrqyrRT9a5DDg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2a14735d7b05109359444acdd87e7fe92c98e9295d2ba61b05e25d1f7ee10fd3', 28, 3, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116995, 100, 1, '2019-06-03 16:35:33.730069', '2019-06-03 16:35:33.73007', 120259096576, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAKuQ1exMu8hdf8dOPeULX2DG7DZx5WWIUFHXJMWGG9KmVrQoZDt2S6a/1uYEVJnvvY/EoJM5RpVjh2ZCs30VYA', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAABAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAEAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAMAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+M4AAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{CrkNXsTLvIXX/HTj3lC19gxuw2ceVliFBR1yTFhhvSpla0KGQ7dkumv9bmBFSZ772PxKCTOUaVY4dmQrN9FWAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4f9598206ab17cf27b5c3eb9e906d63ebee2626654112eabdd2bce7bf12cccf2', 28, 4, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116996, 100, 1, '2019-06-03 16:35:33.730256', '2019-06-03 16:35:33.730256', 120259100672, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATCeMFAAAABAAd6MzHDjUdRtHozzDnD3jJA+uRDCar3PQtuH/43pnROzk1HkovJPQ1YyzcpOb/NeuU/LKNzseL0PJNasVX1lAQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAADAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAQAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+LUAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Ad6MzHDjUdRtHozzDnD3jJA+uRDCar3PQtuH/43pnROzk1HkovJPQ1YyzcpOb/NeuU/LKNzseL0PJNasVX1lAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('852ba25e0e4aa149a22dc193bcb645ae9eba23e7f7432707f3b910474e9b6a5b', 28, 5, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116997, 100, 1, '2019-06-03 16:35:33.730474', '2019-06-03 16:35:33.730474', 120259104768, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABMJ4wUAAAAEAnFzc6kqweyIL4TzIDbr+8GUOGGs1W5jcX5iSNw4DeonzQARlejYJ9NOn/XkrcoC9Hvd8hc5lNx+1h991GxJUJ', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAEAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAUAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+JwAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Jxc3OpKsHsiC+E8yA26/vBlDhhrNVuY3F+YkjcOA3qJ80AEZXo2CfTTp/15K3KAvR73fIXOZTcftYffdRsSVCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('8ccc0c28c3e99a63cc59bad7dec3f5c56eb3942c548ecd40bc39c509d6b081d4', 28, 6, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116998, 100, 1, '2019-06-03 16:35:33.730663', '2019-06-03 16:35:33.730664', 120259108864, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAC2V4YW1wbGUuY29tAAAAAAAAAAAAAAAAATCeMFAAAABAkID6CkBHP9eovLQXkMQJ7QkE6NWlmdKGmLxaiI1YaVKZaKJxz5P85x+6wzpYxxbs6Bd2l4qxVjS7Q36DwRiqBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAFAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAAAAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAYAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+IMAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{kID6CkBHP9eovLQXkMQJ7QkE6NWlmdKGmLxaiI1YaVKZaKJxz5P85x+6wzpYxxbs6Bd2l4qxVjS7Q36DwRiqBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('83201014880073f8eff6f21ae76e51c2c4faf533e550ecd3c2205b48a092960a', 28, 7, 'GCIFFRQKHMH6JD7CK5OI4XVCYCMNRNF6PYA7JTCR3FPHPJZQTYYFB5ES', 115964116999, 100, 1, '2019-06-03 16:35:33.730949', '2019-06-03 16:35:33.73095', 120259112960, 'AAAAAJBSxgo7D+SP4ldcjl6iwJjYtL5+AfTMUdled6cwnjBQAAAAZAAAABsAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAB8ndnLViBPKqPJAcSNhZzc2mH7fQ7RtzGyFA8mFkMTkAAAAAEAAAAAAAAAATCeMFAAAABAtYtlsqMReQo1UoU2GYjb3h52wEKvnouCSO6LQO1xm/ArhtQO/sX5q35St8BjaYWEiFnp+SQj2FZC89OswCldAw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAGAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAHAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvhRAAAABsAAAAHAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAADAAAAC2V4YW1wbGUuY29tAAIAAgIAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAcAAAABAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAMAAAALZXhhbXBsZS5jb20AAgACAgAAAAEAAAAAfJ3Zy1YgTyqjyQHEjYWc3Nph+30O0bcxshQPJhZDE5AAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+GoAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAcAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+FEAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{tYtlsqMReQo1UoU2GYjb3h52wEKvnouCSO6LQO1xm/ArhtQO/sX5q35St8BjaYWEiFnp+SQj2FZC89OswCldAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('700fa44bb40e6ad2c5888656cd2e7b8d86de3d3557b653ae6874466175d64927', 27, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 14, 100, 1, '2019-06-03 16:35:33.756542', '2019-06-03 16:35:33.756543', 115964121088, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAOAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAkFLGCjsP5I/iV1yOXqLAmNi0vn4B9MxR2V53pzCeMFAAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBq3GPDVeRPfwqtW45GZNiUdQ9j6E9Nsz/lMYWcWDWGCZADSsEiEoXar1HWFK6drptsGEl9P6I9f7C2GBKb4YQM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAGwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaVcReCiAAAAAAAAAANAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaVcReCiAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4KIAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpMdC56IAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbAAAAAAAAAACQUsYKOw/kj+JXXI5eosCY2LS+fgH0zFHZXnenMJ4wUAAAAAJUC+QAAAAAGwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4LsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAbAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4KIAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{atxjw1XkT38KrVuORmTYlHUPY+hPTbM/5TGFnFg1hgmQA0rBIhKF2q9R1hSuna6bbBhJfT+iPX+wthgSm+GEDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a76e0260f6b83c6ea93f545d17de721c079dc31e81ee5edc41f159ec5fb48443', 26, 1, 'GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q', 107374182401, 100, 1, '2019-06-03 16:35:33.768436', '2019-06-03 16:35:33.768437', 111669153792, 'AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAABAAAAAAAAAAAAAAABAAAAAAAAAAQAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBqzCYDuLYn/jXhfEVxEGigMCJGoOBCK92lUb3Um15PgwSJ63tNl+FpH8+y5c+mCs/rzcvdyo9uXdodd4LXWiQg=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAUAAAABVVNEAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAAAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA', 'AAAAAgAAAAMAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{GrMJgO4tif+NeF8RXEQaKAwIkag4EIr3aVRvdSbXk+DBInre02X4Wkfz7Llz6YKz+vNy93Kj25d2h13gtdaJCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('92a654c76966ac61acc9df0b75f91cbde3b551c9e9766730827af42d1e247cc3', 26, 2, 'GB6GN3LJUW6JYR7EDOJ47VBH7D45M4JWHXGK6LHJRAEI5JBSN2DBQY7Q', 107374182402, 100, 1, '2019-06-03 16:35:33.76906', '2019-06-03 16:35:33.76906', 111669157888, 'AAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAZAAAABkAAAACAAAAAAAAAAAAAAABAAAAAAAAAAQAAAAAAAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAdzWUAAAAAAEAAAABAAAAAAAAAAEyboYYAAAAQBbE9T7oBKoN0/S3AV7GoSRe+xT79SlWNCYEtL1RPExL8FLhw5EDsXLoAvIBbBvHIr9NKcPtWDyhcHlIuaZKIg8=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAYAAAAAAAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAMAAAADAAAAGgAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvjOAAAABkAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAHc1lAAAAAAAdzWUAAAAAAAAAAAAAAAAAAAAABoAAAACAAAAAHxm7WmlvJxH5BuTz9Qn+PnWcTY9zK8s6YgIjqQyboYYAAAAAAAAAAYAAAAAAAAAAVVTRAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAAAdzWUAAAAAAEAAAABAAAAAQAAAAAAAAAA', 'AAAAAgAAAAMAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+OcAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAaAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+M4AAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{FsT1PugEqg3T9LcBXsahJF77FPv1KVY0JgS0vVE8TEvwUuHDkQOxcugC8gFsG8civ00pw+1YPKFweUi5pkoiDw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('5065cd7c97cfb6fbf7da8493beed47ed2c7efb3b00b77a4c92692ed487fb86a4', 25, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 13, 100, 1, '2019-06-03 16:35:33.779849', '2019-06-03 16:35:33.779849', 107374186496, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAANAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAfGbtaaW8nEfkG5PP1Cf4+dZxNj3MryzpiAiOpDJuhhgAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBthwT3JCg5IZkKRNK3pHBa/eG8zq8Af9gFPWlYvEdRo6jzA5D9fYOcDpKD3dEAuPLNNAHj9tNbZUJA3rwxN94B', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAGQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaXxSNm7AAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaXxSNm7AAAAAAAAAANAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2bsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpVxF4LsAAAAAAAAAA0AAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAAAAAAAAB8Zu1ppbycR+Qbk8/UJ/j51nE2PcyvLOmICI6kMm6GGAAAAAJUC+QAAAAAGQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2dQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAZAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2bsAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{bYcE9yQoOSGZCkTSt6RwWv3hvM6vAH/YBT1pWLxHUaOo8wOQ/X2DnA6Sg93RALjyzTQB4/bTW2VCQN68MTfeAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('01346de1ca30ce03149d9f54945956a22f9cbed3d81f81c62bb59cf8cdd8b893', 24, 1, 'GB2QIYT2IAUFMRXKLSLLPRECC6OCOGJMADSPTRK7TGNT2SFR2YGWDARD', 90194313217, 100, 1, '2019-06-03 16:35:33.79149', '2019-06-03 16:35:33.79149', 103079219200, 'AAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAZAAAABUAAAABAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAAEeGjAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAbHWDWEAAABA0L+69D1hxpytAkX6cvPiBuO80ql8SQKZ15POVxx9wYl6mZrL+6UWGab/+6ng2M+a29E7ON+Xs46Y9MNqTh91AQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAEAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAAAAAAAAwAAAAAAAAAAC+vCAAAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAvrwgAAAAAAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAAAAAQAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAABfXhAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAGAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvjnAAAABUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAGAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvjnAAAABUAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAMAAAAXAAAAAgAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAAAAAADAAAAAAAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAvrwgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAAAAAAMAAAADAAAAFwAAAAEAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAAf/////////8AAAABAAAAAQAAAAAL68IAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAQAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAABcAAAAAAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAlQL4zgAAAAVAAAAAgAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAL68IAAAAAAAAAAAAAAAABAAAAGAAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACSCAhOAAAABUAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAAgAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAAAAAAEAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAAX14QAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAMAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+OcAAAAFQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJf96WcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAX14QAAAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAAVAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAYAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+OcAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{0L+69D1hxpytAkX6cvPiBuO80ql8SQKZ15POVxx9wYl6mZrL+6UWGab/+6ng2M+a29E7ON+Xs46Y9MNqTh91AQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d8b2508123656b1df1ee17c2767829bc22ab41959ad25e6ccc520e849516fba1', 23, 1, 'GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC', 90194313218, 100, 1, '2019-06-03 16:35:33.815586', '2019-06-03 16:35:33.815586', 98784251904, 'AAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAZAAAABUAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAARXFjncAAABATR48xYiKbu8AOoXFwvcvILZ0/pQkfGuwwAoIZNefr7ydIwlcuL44XPM7pJ/6jDSbqBudTNWdE2JRjuq7HI7IAA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAFwAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjOAAAABUAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFwAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjOAAAABUAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAMAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAAAAABcAAAACAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAwAAABYAAAABAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAH//////////AAAAAQAAAAAAAAAAAAAAAQAAABcAAAABAAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAAVVTRAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAAAAAAAAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAXAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+M4AAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{TR48xYiKbu8AOoXFwvcvILZ0/pQkfGuwwAoIZNefr7ydIwlcuL44XPM7pJ/6jDSbqBudTNWdE2JRjuq7HI7IAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('be05e4bd966d58689e1b6fae013e5aa77bde56e6acd2db9b96870e5e746a4ab7', 22, 1, 'GBOK7BOUSOWPHBANBYM6MIRYZJIDIPUYJPXHTHADF75UEVIVYWHHONQC', 90194313217, 100, 1, '2019-06-03 16:35:33.827572', '2019-06-03 16:35:33.827572', 94489284608, 'AAAAAFyvhdSTrPOEDQ4Z5iI4ylA0PphL7nmcAy/7QlUVxY53AAAAZAAAABUAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYX//////////AAAAAAAAAAEVxY53AAAAQDMCWfC0eGNJuYIX3s5AUNLernpcHTn8O6ygq/Nw3S5vny/W42O5G4G6UsihVU1xd5bR4im2+VzQlQYQhe0jhwg=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAFgAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjnAAAABUAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFgAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvjnAAAABUAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWAAAAAQAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAFVU0QAAAAAAHUEYnpAKFZG6lyWt8SCF5wnGSwA5PnFX5mbPUix1g1hAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAVAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAWAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+OcAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{MwJZ8LR4Y0m5ghfezkBQ0t6uelwdOfw7rKCr83DdLm+fL9bjY7kbgbpSyKFVTXF3ltHiKbb5XNCVBhCF7SOHCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2a6987a6930eab7e3becacf9b76ed7a06802668c1f1eb0f82f5671014b4b636a', 21, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 11, 100, 1, '2019-06-03 16:35:33.838303', '2019-06-03 16:35:33.838303', 90194317312, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAALAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAXK+F1JOs84QNDhnmIjjKUDQ+mEvueZwDL/tCVRXFjncAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDfpUesb4kQ/RfBx1UxqNOtZ2+4R4S0XxzggPR1C3YyhZAr/K8KyZCg4ejDTFnhu9qAh4GLZLkbBraGncT9DcYF', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LacbTsvUAAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LacbTsvUAAAAAAAAAALAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy9QAAAAAAAAAAsAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpoZL0tQAAAAAAAAAAsAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVAAAAAAAAAABcr4XUk6zzhA0OGeYiOMpQND6YS+55nAMv+0JVFcWOdwAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOzAYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy+0AAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{36VHrG+JEP0XwcdVMajTrWdvuEeEtF8c4ID0dQt2MoWQK/yvCsmQoOHow0xZ4bvagIeBi2S5Gwa2hp3E/Q3GBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('96415ac1d2f79621b26b1568f963fd8dd6c50c20a22c7428cefbfe9dee867588', 21, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 12, 100, 1, '2019-06-03 16:35:33.838461', '2019-06-03 16:35:33.838461', 90194321408, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAMAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdQRiekAoVkbqXJa3xIIXnCcZLADk+cVfmZs9SLHWDWEAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDdJGdvdZ2S4QoXdO+Odt8ZRdeVu7mBvq7FtP9okqr98pGD/jSAraklQvaRmCyMALIMD2kG8R2KjhKvy7oIL6IB', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaaGS9LUAAAAAAAAAALAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaaGS9LUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpoZL0tQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpfFI2dQAAAAAAAAAAwAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVAAAAAAAAAAB1BGJ6QChWRupclrfEghecJxksAOT5xV+Zmz1IsdYNYQAAAAJUC+QAAAAAFQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy+0AAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAVAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOy9QAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{3SRnb3WdkuEKF3TvjnbfGUXXlbu5gb6uxbT/aJKq/fKRg/40gK2pJUL2kZgsjACyDA9pBvEdio4Sr8u6CC+iAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f08dc1fec150f276562866ce4f5272f658cf0bd9fd8c1d96a22c196be2e1b25a', 20, 1, 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD', 68719476739, 100, 1, '2019-06-03 16:35:33.848132', '2019-06-03 16:35:33.848132', 85899350016, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAAAAAAB6Dk1CgAAAEB+7jxesBKKrF343onyycjp2tiQLZiGH2ETl+9fuOqotveY2rIgvt9ng+QJ2aDP3+PnDsYEa9ZUaA+Zne2nIGgE', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAAAAAAADuaygAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAMAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAUAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAALLQXgB//////////wAAAAEAAAAAAAAAAAAAAAMAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAADAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAFAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACGHEY1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAAEwAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAA7msoAAAAAAHc1lAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAKPpqzUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAAdzWUAAAAAAAAAAAA', 'AAAAAgAAAAMAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAUAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{fu48XrASiqxd+N6J8snI6drYkC2Yhh9hE5fvX7jqqLb3mNqyIL7fZ4PkCdmgz9/j5w7GBGvWVGgPmZ3tpyBoBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('198844c8b472daacc5b717695a4ca16ac799a13fb2cf4152d19e2117ae1c56c3', 19, 1, 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD', 68719476738, 100, 1, '2019-06-03 16:35:33.865967', '2019-06-03 16:35:33.865967', 81604382720, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAQI6ATBFImTS1I7Fly9YiufQ/dC4uMOetO+m/BysWXyAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAdzWUAAAAAAEAAAAAAAAAAAAAAAHoOTUKAAAAQMs9vNZ518oYUMp38TakovW//DDTbs/9oPj1RAix5ElC/d7gbWaaNNJxKQR7eMNO6rB+ntGqee4WurTJgA4k2ws=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAIAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAAAdzWUAAAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAgAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAAAAAAAAAAAAHc1lAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAB3NZQAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAEwAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjOAAAABAAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEwAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjOAAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAARAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAATAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAHc1lAB//////////wAAAAEAAAAAAAAAAAAAAAMAAAASAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAATAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAMAAAASAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAALLQXgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAATAAAAAgAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAACAAAAAUVVUgAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAALLQXgAAAAAA7msoAAAAAAAAAAAAAAAAAQAAABMAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAlQL4tQAAAAQAAAAAwAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAAO5rKAAAAAAB3NZQAAAAAAAAAAAAAAAADAAAAEgAAAAIAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAADuaygAAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABAAAAEwAAAAIAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAB3NZQAAAAAAQAAAAIAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAATAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+M4AAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{yz281nnXyhhQynfxNqSi9b/8MNNuz/2g+PVECLHkSUL93uBtZpo00nEpBHt4w07qsH6e0ap57ha6tMmADiTbCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('902b90c2322b9e6b335e7543389a7446b86e3039ebf59ec66dffb50eaec0dc85', 18, 1, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 68719476737, 100, 1, '2019-06-03 16:35:33.884097', '2019-06-03 16:35:33.884097', 77309415424, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAA7msoAAAAAAAAAAAERVFexAAAAQC9X2I3Zz1x3AQMqL4XCzePTlwnokv2BQnWGmT007oH59gai3eNu7/WVoHtW8hsgHjs1mZK709FzzRF2cbD2tQE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAARAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAASAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{L1fYjdnPXHcBAyovhcLN49OXCeiS/YFCdYaZPTTugfn2BqLd427v9ZWge1byGyAeOzWZkrvT0XPNEXZxsPa1AQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('ca756d1519ceda79f8722042b12cea7ba004c3bd961adb62b59f88a867f86eb3', 18, 2, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 68719476738, 100, 1, '2019-06-03 16:35:33.884375', '2019-06-03 16:35:33.884376', 77309419520, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAARFUV7EAAABALuai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAAA7msoAAAAAAEAAAACAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Luai5QxceFbtAiC5nkntNVnvSPeWR+C+FgplPAdRgRS+PPESpUiSCyuiwuhmvuDw7kwxn+A6E0M4ca1s2qzMAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('37bb79f6959c0e8e9b3d31f6c9308d8d084d9c6742cfa56ca094cfa6eae99423', 18, 3, 'GAXMF43TGZHW3QN3REOUA2U5PW5BTARXGGYJ3JIFHW3YT6QRKRL3CPPU', 68719476739, 100, 1, '2019-06-03 16:35:33.884772', '2019-06-03 16:35:33.884772', 77309423616, 'AAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAZAAAABAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAARFUV7EAAABArzp9Fxxql+yoysglDjXm9+rsJeNX2GsSa7TOy3AzHOu4Y5Z8ICx52Q885gQGQWMtEP0w6yh83d6+o6kjC/WuAg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAO5rKAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAQAAAAMAAAADAAAAEgAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvi1AAAABAAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAO5rKAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAALLQXgAAAAAA7msoAAAAAAAAAAAAAAAAAAAAABIAAAACAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAIAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAAAAAAAstBeAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+M4AAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAASAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+LUAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{rzp9Fxxql+yoysglDjXm9+rsJeNX2GsSa7TOy3AzHOu4Y5Z8ICx52Q885gQGQWMtEP0w6yh83d6+o6kjC/WuAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('cdef45dd961d59375351ea7dd7ef6414ff49371a335723e84dafacea1e13665a', 17, 1, 'GDRW375MAYR46ODGF2WGANQC2RRZL7O246DYHHCGWTV2RE7IHE2QUQLD', 68719476737, 100, 1, '2019-06-03 16:35:33.898077', '2019-06-03 16:35:33.898077', 73014448128, 'AAAAAONt/6wGI884Zi6sYDYC1GOV/drnh4OcRrTrqJPoOTUKAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsX//////////AAAAAAAAAAHoOTUKAAAAQIjLqcYXE8EAsH6Dx2hwPjiEfHGZ4jsMNZZc7PynNiJi9kFXjfvvLDlWizGAr2B9MFDrfDRDvjnBxKKhJifEcQM=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEQAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjnAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvjnAAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAARAAAAAQAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAFVU0QAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{iMupxhcTwQCwfoPHaHA+OIR8cZniOww1llzs/Kc2ImL2QVeN++8sOVaLMYCvYH0wUOt8NEO+OcHEoqEmJ8RxAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d1f593eb5e14f97027bc79821fa46628c107034fba9a5acef6a9da79e051ee73', 17, 2, 'GACAR2AEYEKITE2LKI5RMXF5MIVZ6Q7XILROGDT22O7JX4DSWFS7FDDP', 68719476737, 100, 1, '2019-06-03 16:35:33.898272', '2019-06-03 16:35:33.898273', 73014452224, 'AAAAAAQI6ATBFImTS1I7Fly9YiufQ/dC4uMOetO+m/BysWXyAAAAZAAAABAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsX//////////AAAAAAAAAAFysWXyAAAAQI7hbwZc1+KWfheVnYAq5TXFX9ancHJmJq0wV0c9ONIfG6U8trhIVeVoiED2eUFFmhx+bBtF9TPSvifF/mfDlQk=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEQAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvjnAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvjnAAAABAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAARAAAAAQAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAFFVVIAAAAAAC7C83M2T23Bu4kdQGqdfboZgjcxsJ2lBT23ifoRVFexAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAARAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+OcAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{juFvBlzX4pZ+F5WdgCrlNcVf1qdwcmYmrTBXRz040h8bpTy2uEhV5WiIQPZ5QUWaHH5sG0X1M9K+J8X+Z8OVCQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a5a9e3ca63e9cc155359c97337bcb14464cca56b230a4d0c7f27582644d16809', 16, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 8, 100, 1, '2019-06-03 16:35:33.910688', '2019-06-03 16:35:33.910688', 68719480832, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA423/rAYjzzhmLqxgNgLUY5X92ueHg5xGtOuok+g5NQoAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBhFD/bYaTZZJ3VJ9xJqXoW5eeLK0AeFaATBH92cRfx0WUTFqp6rXx47fMBUxkWYq8bAHMfYCS5XXPRg86sAGUK', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LajaV7cGAAAAAAAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LajaV7cGAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtwYAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqEVUvgYAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAADjbf+sBiPPOGYurGA2AtRjlf3a54eDnEa066iT6Dk1CgAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXt1EAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtzgAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{YRQ/22Gk2WSd1SfcSal6FuXniytAHhWgEwR/dnEX8dFlExaqeq18eO3zAVMZFmKvGwBzH2AkuV1z0YPOrABlCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('6a056189b45760c607e331c90c5a8b4cd720961df8bc8cecfd4aa388b577a6cb', 16, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 9, 100, 1, '2019-06-03 16:35:33.910845', '2019-06-03 16:35:33.910845', 68719484928, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAABAjoBMEUiZNLUjsWXL1iK59D90Li4w56076b8HKxZfIAAAACVAvkAAAAAAAAAAABVvwF9wAAAEAxC5cl7tkjQI0cfFZTiIFDuo0SwyYnNqTUH2hxDBtm7h/vUkBG3cgwGXS87ninVkhmvdIpTWfeIeGiw7kgefUA', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LahFVL4GAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LahFVL4GAAAAAAAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqEVUvgYAAAAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtp7BRxQYAAAAAAAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAECOgEwRSJk0tSOxZcvWIrn0P3QuLjDnrTvpvwcrFl8gAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtzgAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtx8AAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{MQuXJe7ZI0CNHHxWU4iBQ7qNEsMmJzak1B9ocQwbZu4f71JARt3IMBl0vO54p1ZIZr3SKU1n3iHhosO5IHn1AA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('18bf6cce20cfbb0f9079c4b8783718949d13bd12d173a60363d2b8e3a07efead', 16, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 10, 100, 1, '2019-06-03 16:35:33.910968', '2019-06-03 16:35:33.910968', 68719489024, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAKAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAALsLzczZPbcG7iR1Aap19uhmCNzGwnaUFPbeJ+hFUV7EAAAACVAvkAAAAAAAAAAABVvwF9wAAAEC/RVto6ytAqHpd6ZFWjwXQyXopKORz8QSvz0d8RoPrOEBgNEuAj8+kbyhS7QieOqwbiJrS0AU8YWaBQQ4zc+wL', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaewUcUGAAAAAAAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaewUcUGAAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtp7BRxQYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtpxtOzAYAAAAAAAAAAoAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAuwvNzNk9twbuJHUBqnX26GYI3MbCdpQU9t4n6EVRXsQAAAAJUC+QAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtx8AAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAQAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXtwYAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{v0VbaOsrQKh6XemRVo8F0Ml6KSjkc/EEr89HfEaD6zhAYDRLgI/PpG8oUu0InjqsG4ia0tAFPGFmgUEOM3PsCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('142c988b1f67984f74a1581de9caecf499e60f1e0eed496661aa2c559238764c', 15, 1, 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG', 55834574850, 100, 1, '2019-06-03 16:35:33.920804', '2019-06-03 16:35:33.920804', 64424513536, 'AAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAZAAAAA0AAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAABVVNEAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAAF9eEAAAAAAAAAAAH2O2TcAAAAQJBUx5tWfjAwXxab9U5HOjZvBRv3u95jXbyzuqeZ/kjsyMsU0jO/g03Rf1zgect1hj4hDYGN8mW4oEot0sSTZgw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADwAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACThYCOAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADwAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACThYCOAAAAA0AAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAOAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAPAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAX14QB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgKcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAPAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgI4AAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{kFTHm1Z+MDBfFpv1Tkc6Nm8FG/e73mNdvLO6p5n+SOzIyxTSM7+DTdF/XOB5y3WGPiENgY3yZbigSi3SxJNmDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('30880dd42d8e402a30d8a3527b56c1e33e18e87c46e1332ea5cfc1721fd87cfb', 14, 1, 'GCHC4D2CS45CJRNN4QAHT2LFZAJIU5PA7H53K3VOP6WEJ6XWHNSNZKQG', 55834574849, 100, 1, '2019-06-03 16:35:33.933624', '2019-06-03 16:35:33.933624', 60129546240, 'AAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAZAAAAA0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAAAAAAAAAX14QAAAAAAAAAAAfY7ZNwAAABAieZSSuOZqlwtyjnj5d/S0GUSGiQvy0ipPLynpl4UvO8qc7CDz3vsLROlN2g50qXirydSOdao56hvRhrEfRsGCA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADgAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvjnAAAAA0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvjnAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+OcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJOFgKcAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAANAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ieZSSuOZqlwtyjnj5d/S0GUSGiQvy0ipPLynpl4UvO8qc7CDz3vsLROlN2g50qXirydSOdao56hvRhrEfRsGCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('dbd964fcfdb336a30f21c240fffdaf73d7c75880ed1b99375c62f84e3e592570', 14, 2, 'GANZGPKY5WSHWG5YOZMNG52GCK5SCJ4YGUWMJJVGZSK2FP4BI2JIJN2C', 55834574849, 100, 1, '2019-06-03 16:35:33.934033', '2019-06-03 16:35:33.934033', 60129550336, 'AAAAABuTPVjtpHsbuHZY03dGErshJ5g1LMSmpsyVor+BRpKEAAAAZAAAAA0AAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3H//////////AAAAAAAAAAGBRpKEAAAAQGDAV/5Op2DmFUP84dmyT5G/gxn1WzgdMrkSSU7wfpu39ycq36Sg+gs2ypRjw5hxxeMUj/GVEKipcDGndei38Aw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADgAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACWgHEnAAAAA0AAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACWgHEnAAAAA0AAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJaAcScAAAADQAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAQAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAFVU0QAAAAAAI4uD0KXOiTFreQAeellyBKKdeD5+7Vurn+sRPr2O2TcAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAANAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAOAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+OcAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{YMBX/k6nYOYVQ/zh2bJPkb+DGfVbOB0yuRJJTvB+m7f3JyrfpKD6CzbKlGPDmHHF4xSP8ZUQqKlwMad16LfwDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('cd8a8e9eb53fd268d1294e228995c27f422d90783c4054e44ab0028fc1da210a', 13, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 6, 100, 1, '2019-06-03 16:35:33.949136', '2019-06-03 16:35:33.949137', 55834578944, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAji4PQpc6JMWt5AB56WXIEop14Pn7tW6uf6xE+vY7ZNwAAAACVAvkAAAAAAAAAAABVvwF9wAAAEAUtdYWyr64yv/rKPr0/vV4vYyonfsWxpxHsiYLHKJ3bm6k+ypiAByc8t0K+7bzxSLPjmjKKN5Prw7AdenlC7MB', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaoEXalRAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaoEXalRAAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqVEAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqW9asFEAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAANAAAAAAAAAACOLg9Clzokxa3kAHnpZcgSinXg+fu1bq5/rET69jtk3AAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqWoAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{FLXWFsq+uMr/6yj69P71eL2MqJ37FsacR7ImCxyid25upPsqYgAcnPLdCvu288Uiz45oyijeT68OwHXp5QuzAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bfbd5e9457d717bcf847291a6c24b7cd8db4ff784ecd4592be30d08146c0c264', 13, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 7, 100, 1, '2019-06-03 16:35:33.949341', '2019-06-03 16:35:33.949341', 55834583040, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAG5M9WO2kexu4dljTd0YSuyEnmDUsxKamzJWiv4FGkoQAAAACVAvkAAAAAAAAAAABVvwF9wAAAEDY1TiMj+qj8+zYb2Vb60h+qWxZtFfSGwb0kvKttSFAHQhGOjIddiVQopx9LDRO6UgPmLLxFvQpIzeGnagh3vQD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LalvWrBRAAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LalvWrBRAAAAAAAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqW9asFEAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqNpXt1EAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAANAAAAAAAAAAAbkz1Y7aR7G7h2WNN3RhK7ISeYNSzEpqbMlaK/gUaShAAAAAJUC+QAAAAADQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqWoAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAANAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqVEAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{2NU4jI/qo/Ps2G9lW+tIfqlsWbRX0hsG9JLyrbUhQB0IRjoyHXYlUKKcfSw0TulID5iy8Rb0KSM3hp2oId70Aw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('0e128647b2b93786b6b76e182dcda0173757066f8caf0523d1ba3b47fd6f720d', 12, 1, 'GDCVTBGSEEU7KLXUMHMSXBALXJ2T4I2KOPXW2S5TRLKDRIAXD5UDHAYO', 47244640257, 100, 1, '2019-06-03 16:35:33.960369', '2019-06-03 16:35:33.960369', 51539611648, 'AAAAAMVZhNIhKfUu9GHZK4QLunU+I0pz721Ls4rUOKAXH2gzAAAAZAAAAAsAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAg/K/Blr9FO/nVEGLdmCzChMYpmcQzxIhFm6NBzxznX0AAAAAHc1lAAAAAAAAAAABFx9oMwAAAEBwY9HQAR2SMPe3JPvmBBtBk2jfog0GFEFYkLNFzQNqvYl7iZitmO5FQmkKlv/NO5ZcaWBqXcHhOQpk0s2XSBQF', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAADAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvjnAAAAAsAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvjnAAAAAsAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+OcAAAACwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAI2Pn6cAAAACwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAACD8r8GWv0U7+dUQYt2YLMKEximZxDPEiEWbo0HPHOdfQAAAAAdzWUAAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAALAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+QAAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAMAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+OcAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{cGPR0AEdkjD3tyT75gQbQZNo36INBhRBWJCzRc0Dar2Je4mYrZjuRUJpCpb/zTuWXGlgal3B4TkKZNLNl0gUBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('67601a2ca212b84092a7d3c521172b67f4b93d72b726a06c540917d2ab83c1a1', 11, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 5, 100, 1, '2019-06-03 16:35:33.97177', '2019-06-03 16:35:33.97177', 47244644352, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAxVmE0iEp9S70YdkrhAu6dT4jSnPvbUuzitQ4oBcfaDMAAAACVAvkAAAAAAAAAAABVvwF9wAAAEBHLko6/Tbv0v/5CWHkixXnbyoU6qQ6yewZGqPHFSzNxMfud86eYGkN0j4msMCXfLAou7iKOVn0MWyzlpvYRA0B', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKDAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKDAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgooMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqgRdqYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAALAAAAAAAAAADFWYTSISn1LvRh2SuEC7p1PiNKc+9tS7OK1DigFx9oMwAAAAJUC+QAAAAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAKAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgopwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAALAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqplgooMAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Ry5KOv0279L/+Qlh5IsV528qFOqkOsnsGRqjxxUszcTH7nfOnmBpDdI+JrDAl3ywKLu4ijlZ9DFss5ab2EQNAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('fdb696a797b769176cbaed3a50e4a6a8671119621f65a3f954a3bcf100c7ef0c', 10, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 38654705665, 200, 2, '2019-06-03 16:35:33.980859', '2019-06-03 16:35:33.980859', 42949677056, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAkAAAABAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAAB2QViXgAAAEAxyl5gvCCDC7l0pq9b/Btd3cOUUcY9Rv0ALxVjul4EVSL1Vygr107GjDo11+YswdmlCuWf7KItU0chlogpns4L', 'AAAAAAAAAMgAAAAAAAAAAgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAkAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpZlshwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpfjKlwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJOFgI4AAAACQAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAACgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqX4ypcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaqZYKKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACThYCOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAhOAAAAAkAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{McpeYLwggwu5dKavW/wbXd3DlFHGPUb9AC8VY7peBFUi9VcoK9dOxow6NdfmLMHZpQrln+yiLVNHIZaIKZ7OCw==}', 'none', NULL, NULL, true, 200); +INSERT INTO history_transactions VALUES ('66c28c0ccd5a2e47026aacafa2ecd3c501fe5de349ef376c0f8afb893c7bb55d', 9, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 4, 100, 1, '2019-06-03 16:35:33.989997', '2019-06-03 16:35:33.989997', 38654709760, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEARD6MVWgEASusfhr6JdF9K3Rie2XCRJKl/NoKyJcrd1kGs3ygpp55xu80YlFwgNVErZ/cEAHYOq06CwNfnE2sC', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LasraKscAAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LasraKscAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqxwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqpZlshwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqzUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqxwAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{EQ+jFVoBAErrH4a+iXRfSt0YntlwkSSpfzaCsiXK3dZBrN8oKaeecbvNGJRcIDVRK2f3BAB2DqtOgsDX5xNrAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('dd74eee27a59843b28a05ad08abf65eaa231b7debe4d05550c0a7a424cca5929', 8, 1, 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB', 30064771073, 100, 1, '2019-06-03 16:35:33.999724', '2019-06-03 16:35:33.999725', 34359742464, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAABAAAAAAAAAAIAAAAAAAAAewAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEBjk5EFqV8GiL9xU62OUCKeScXxGMTMqJoD7ryiGf5jLPZJRSphbWC3ZycHE+pDuu/6EKSqcNUri5AXzQmM+GYB', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvicAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvicAAAAAcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyr2OlUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+JwAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJTc0vwAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+QAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+OcAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Y5ORBalfBoi/cVOtjlAinknF8RjEzKiaA+68ohn+Yyz2SUUqYW1gt2cnBxPqQ7rv+hCkqnDVK4uQF80JjPhmAQ==}', 'id', '123', NULL, true, 100); +INSERT INTO history_transactions VALUES ('2551e76a3ce4881b7bc73fdfd89d670d511ea7d4e56156252b51777023202de7', 8, 2, 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB', 30064771074, 100, 1, '2019-06-03 16:35:34.000003', '2019-06-03 16:35:34.000004', 34359746560, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAACAAAAAAAAAAEAAAAFaGVsbG8AAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAACYloAAAAAAAAAAAXc7DToAAABAS2+MaPA79AjD0B7qjl0qEz0N6CkDmoS4kgnXjZfbvdc9IkqNm0S+vKBNgV80pSfixY147L+jvS/ganovqbLiAQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACU3NL8AAAAAcAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACU3NL8AAAAAcAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyr2OlUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyscX/UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJTc0vwAAAABwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJS2rVwAAAABwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+OcAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+M4AAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{S2+MaPA79AjD0B7qjl0qEz0N6CkDmoS4kgnXjZfbvdc9IkqNm0S+vKBNgV80pSfixY147L+jvS/ganovqbLiAQ==}', 'text', 'hello', NULL, true, 100); +INSERT INTO history_transactions VALUES ('3b36ecfbcc2adb0cfff08ae86199f64e12984f084bb03be9bb249611df82322b', 8, 3, 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB', 30064771075, 100, 1, '2019-06-03 16:35:34.000216', '2019-06-03 16:35:34.000216', 34359750656, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAADAAAAAAAAAAMBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEDC9hMtMYZ6hbx1iAdXngRcCYQmf8eu4zcB9SLH2998tVYca6QYig5Dsgy2oCMD1J7khIL9jz/VWjcPhvTVvC8L', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUtq1cAAAAAcAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUtq1cAAAAAcAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyscX/UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytChZUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJS2rVwAAAABwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJSQh7wAAAABwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+M4AAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+LUAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{wvYTLTGGeoW8dYgHV54EXAmEJn/HruM3AfUix9vffLVWHGukGIoOQ7IMtqAjA9Se5ISC/Y8/1Vo3D4b01bwvCw==}', 'hash', 'AQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQE=', NULL, true, 100); +INSERT INTO history_transactions VALUES ('e14885cb66af5f7f5e991b014eec475c61cc831292cf5526cdd0cda145300837', 8, 4, 'GA46VRKBCLI2X6DXLX7AIEVRFLH3UA7XBE3NGNP6O74HQ5LXHMGTV2JB', 30064771076, 100, 1, '2019-06-03 16:35:34.000411', '2019-06-03 16:35:34.000411', 34359754752, 'AAAAADnqxUES0av4d13+BBKxKs+6A/cJNtM1/nf4eHV3Ow06AAAAZAAAAAcAAAAEAAAAAAAAAAQCAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAAAJiWgAAAAAAAAAABdzsNOgAAAEBOfq9PQ8EGcpjRWEaqGxvhBjSVuk6K5A2rthLYHnmAXmQ1JjJD3EddjiES3bPZUF5efGQvRjoEKgiB2dU3f2wF', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUkIe8AAAAAcAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACUkIe8AAAAAcAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytChZUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqytoqzUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJSQh7wAAAABwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJRqYhwAAAABwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+LUAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+JwAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Tn6vT0PBBnKY0VhGqhsb4QY0lbpOiuQNq7YS2B55gF5kNSYyQ9xHXY4hEt2z2VBeXnxkL0Y6BCoIgdnVN39sBQ==}', 'return', 'AgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgI=', NULL, true, 100); +INSERT INTO history_transactions VALUES ('0fb9c2e20946222b23e1d1d660de9d74576c41cfd9b199f9d565a013c1ef89ca', 7, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-06-03 16:35:34.011682', '2019-06-03 16:35:34.011682', 30064775168, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAOerFQRLRq/h3Xf4EErEqz7oD9wk20zX+d/h4dXc7DToAAAACVAvkAAAAAAAAAAABVvwF9wAAAED8tIFyog9OeCqiaBNfxFdAlneNYTfjoNUMKi6FJCY5BqemnDBxGox3jKS/xx4zpxAToEFp3Y2M+NRJIU4g/H0J', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lau/0w21AAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lau/0w21AAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtqyrQFLUAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAAAAAAA56sVBEtGr+Hdd/gQSsSrPugP3CTbTNf53+Hh1dzsNOgAAAAJUC+QAAAAABwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDbUAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{/LSBcqIPTngqomgTX8RXQJZ3jWE346DVDCouhSQmOQanppwwcRqMd4ykv8ceM6cQE6BBad2NjPjUSSFOIPx9CQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('a9085e13fbe9f84e07e320a0d445536de1afc2cfd8c7e4186687807edd2b4897', 6, 1, 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB', 17179869188, 100, 1, '2019-06-03 16:35:34.021962', '2019-06-03 16:35:34.021962', 25769807872, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAinuINUAAABA4PRAe0en/05ZH2leCeTOsxbT0cUu3wgUiWUcuDk4ya8G/gI90hlV6pzOYyAB6Zt5fN7pRrPRL/tTlnjgUAjaBvwNxUcAAABAFmdGR6JZukKJUC3Vr2YEJ/24G3tesqTv4cV5UcAozRhS2+w0PYVVqe7QTmOMNSGX/C3LxP1tSvpXdU/OhYsODw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvicAAAAAQAAAADAAAAAQAAAAAAAAAAAAAAAAECAgIAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4nAAAAAEAAAABAAAAAEAAAAAAAAAAAAAAAABAgICAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAAYAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4nAAAAAEAAAABAAAAAEAAAAAAAAAAAAAAAABAgICAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+JwAAAABAAAAAQAAAABAAAAAAAAAAAAAAAAAgICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAMAAAABAAAAAAAAAAAAAAAAAQICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAAAAAABAAAABgAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvicAAAAAQAAAADAAAAAQAAAAAAAAAAAAAAAAECAgIAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAA', '{4PRAe0en/05ZH2leCeTOsxbT0cUu3wgUiWUcuDk4ya8G/gI90hlV6pzOYyAB6Zt5fN7pRrPRL/tTlnjgUAjaBg==,FmdGR6JZukKJUC3Vr2YEJ/24G3tesqTv4cV5UcAozRhS2+w0PYVVqe7QTmOMNSGX/C3LxP1tSvpXdU/OhYsODw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e9d1a3000aea36743142f2ede106d3cb37c3d7e88508e3f21b496370b5863858', 5, 1, 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB', 17179869185, 100, 1, '2019-06-03 16:35:34.03624', '2019-06-03 16:35:34.03624', 21474840576, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAABAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASnuINUAAABASz0AtZeNzXSXkjPkKJfOE8aUTAuPR6pxMMbF337wxE3wzOTDaVcDQ2N5P3E9MKc+fbbFhZ9K+07+J0wMGltRBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+QAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+OcAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Sz0AtZeNzXSXkjPkKJfOE8aUTAuPR6pxMMbF337wxE3wzOTDaVcDQ2N5P3E9MKc+fbbFhZ9K+07+J0wMGltRBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('995b9269f9f9c4c1eace75501188766d6e8ae40c5413120811a50437683cb74c', 5, 2, 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB', 17179869186, 100, 1, '2019-06-03 16:35:34.036593', '2019-06-03 16:35:34.036593', 21474844672, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAACAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAASnuINUAAABADpkMMc7kkkYjDoPwfUlOE9tLYvWHI/m+BBe/gCKN1cVvEF1UBVeCCuGBTjury4TqoxplKl4NZHJST5/Orr4XCA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+OcAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+M4AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{DpkMMc7kkkYjDoPwfUlOE9tLYvWHI/m+BBe/gCKN1cVvEF1UBVeCCuGBTjury4TqoxplKl4NZHJST5/Orr4XCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f78dca926455579b4a43009ffe35a0229a6da4bed32d3c999d7a06ad26605a25', 5, 3, 'GDXFAGJCSCI4CK2YHK6YRLA6TKEXFRX7BMGVMQOBMLIEUJRJ5YQNLMIB', 17179869187, 100, 1, '2019-06-03 16:35:34.036966', '2019-06-03 16:35:34.036966', 21474848768, 'AAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAZAAAAAQAAAADAAAAAAAAAAAAAAABAAAAAAAAAAUAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAAAAAABAAAAAgAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAABKe4g1QAAAEDglRRymtLjw+ImmGwTiBTKE7X7+2CywlHw8qed+t520SbAggcqboy5KXJaEP51/wRSMxtZUgDOFfaDn9Df04EA', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAFAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvi1AAAAAQAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAABAAAAAPZPnUyLZ+OYJjhn5Hkk43UuW6rOuemZPFQldOn8DcVHAAAAAQAAAAAAAAAAAAAAAQAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAACAAAAAwAAAAUAAAAAAAAAAO5QGSKQkcErWDq9iKwemolyxv8LDVZBwWLQSiYp7iDVAAAAAlQL4tQAAAAEAAAAAwAAAAEAAAAAAAAAAAAAAAABAAAAAAAAAQAAAAD2T51Mi2fjmCY4Z+R5JON1LluqzrnpmTxUJXTp/A3FRwAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAMAAAABAAAAAAAAAAAAAAAAAQICAgAAAAEAAAAA9k+dTItn45gmOGfkeSTjdS5bqs656Zk8VCV06fwNxUcAAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+M4AAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+LUAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{4JUUcprS48PiJphsE4gUyhO1+/tgssJR8PKnnfredtEmwIIHKm6MuSlyWhD+df8EUjMbWVIAzhX2g5/Q39OBAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('66e27fb28870cb5256ea92764bcb222adbbaa5fec2d89a62a9aa8c9c8e2ee9e9', 4, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:35:34.052993', '2019-06-03 16:35:34.052994', 17179873280, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA7lAZIpCRwStYOr2IrB6aiXLG/wsNVkHBYtBKJinuINUAAAACVAvkAAAAAAAAAAABVvwF9wAAAECAUpO+hxiga/YgRsV3rFpBJydgOyn0TPImJCaQCMikkiG+sNXrQBsYXjJrlOiGjGsU3rk4uvGl85AriYD9PNYH', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxU1gbOAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxU1gbOAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtq7/TDc4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAADuUBkikJHBK1g6vYisHpqJcsb/Cw1WQcFi0EomKe4g1QAAAAJUC+QAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBs4AAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{gFKTvocYoGv2IEbFd6xaQScnYDsp9EzyJiQmkAjIpJIhvrDV60AbGF4ya5TohoxrFN65OLrxpfOQK4mA/TzWBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4657f7ab2fd82ae203f04d209e6adec0e6bc4f0983b4fc3fa679820ed47e29d7', 3, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:35:34.065384', '2019-06-03 16:35:34.065384', 12884905984, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAQAAAAAAAABkAAAAAF4L0vAAAAAAAAAAAQAAAAAAAAAAAAAAAC6N7oJcJiUzTWRDL98Bj3fVrJUB19wFvCzEHh8nn/IOAAAAAlQL5AAAAAAAAAAAAVb8BfcAAABA8CyjzEXXVTMwnZTAbHfJeq2HCFzAWkU98ds2ZXFqjXR4EiN0YDSAb/pJwXc0TjMa//SiX83UvUFSqLa8hOXICQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAAAAAuje6CXCYlM01kQy/fAY931ayVAdfcBbwsxB4fJ5/yDgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{8CyjzEXXVTMwnZTAbHfJeq2HCFzAWkU98ds2ZXFqjXR4EiN0YDSAb/pJwXc0TjMa//SiX83UvUFSqLa8hOXICQ==}', 'none', NULL, '[100,1577833200)', true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: exp_asset_stats exp_asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY exp_asset_stats + ADD CONSTRAINT exp_asset_stats_pkey PRIMARY KEY (asset_code, asset_issuer, asset_type); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: key_value_store key_value_store_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offer_id); + + +-- +-- Name: trust_lines trust_lines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trust_lines + ADD CONSTRAINT trust_lines_pkey PRIMARY KEY (ledger_key); + + +-- +-- Name: accounts_data_account_id_name; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX accounts_data_account_id_name ON accounts_data USING btree (account_id, name); + + +-- +-- Name: accounts_home_domain; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accounts_home_domain ON accounts USING btree (home_domain); + + +-- +-- Name: accounts_inflation_destination; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accounts_inflation_destination ON accounts USING btree (inflation_destination); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_fee_account; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_hash ON public.history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_inner_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_inner_hash ON history_transactions USING btree (inner_transaction_hash) WHERE inner_transaction_hash IS NOT NULL; + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: exp_asset_stats_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX exp_asset_stats_by_code ON exp_asset_stats USING btree (asset_code); + + +-- +-- Name: exp_asset_stats_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX exp_asset_stats_by_issuer ON exp_asset_stats USING btree (asset_issuer); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: offers_by_buying_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_buying_asset ON offers USING btree (buying_asset); + + +-- +-- Name: offers_by_last_modified_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_last_modified_ledger ON offers USING btree (last_modified_ledger); + + +-- +-- Name: offers_by_seller; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_seller ON offers USING btree (seller_id); + + +-- +-- Name: offers_by_selling_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_selling_asset ON offers USING btree (selling_asset); + + +-- +-- Name: signers_by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX signers_by_account ON accounts_signers USING btree (account_id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: trust_lines_by_account_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_account_id ON trust_lines USING btree (account_id); + + +-- +-- Name: trust_lines_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_issuer ON trust_lines USING btree (asset_issuer); + + +-- +-- Name: trust_lines_by_type_code_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trust_lines_by_type_code_issuer ON trust_lines USING btree (asset_type, asset_code, asset_issuer); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- needed for migrations to work +ALTER TABLE accounts ADD sponsor TEXT; +CREATE INDEX accounts_by_sponsor ON accounts USING BTREE(sponsor); + +ALTER TABLE accounts_data ADD sponsor TEXT; +CREATE INDEX accounts_data_by_sponsor ON accounts_data USING BTREE(sponsor); + +ALTER TABLE trust_lines ADD sponsor TEXT; +CREATE INDEX trust_lines_by_sponsor ON trust_lines USING BTREE(sponsor); + +ALTER TABLE offers ADD sponsor TEXT; +CREATE INDEX offers_by_sponsor ON offers USING BTREE(sponsor); + +ALTER TABLE history_operation_participants + DROP COLUMN id; + +ALTER TABLE history_transaction_participants + DROP COLUMN id; + +DROP TABLE asset_stats cascade; + +DROP INDEX exp_asset_stats_by_code; + +DROP INDEX index_history_transactions_on_id; + +DROP INDEX index_history_ledgers_on_id; + +DROP INDEX asset_by_code; + +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + + +-- 49 +CREATE INDEX IF NOT EXISTS htrd_agg_timestamp_brin ON history_trades_60000 USING brin(timestamp); + +-- 50 +CREATE TABLE liquidity_pools ( + id text NOT NULL, -- hex-encoded PoolID + type smallint NOT NULL, + fee integer NOT NULL, + trustline_count bigint NOT NULL CHECK (trustline_count > 0), + share_count bigint NOT NULL DEFAULT 0 CHECK(share_count >= 0), + asset_reserves jsonb NOT NULL, + last_modified_ledger integer NOT NULL, + deleted boolean NOT NULL DEFAULT false, + PRIMARY KEY (id) +); + +CREATE INDEX liquidity_pools_by_asset_reserves ON liquidity_pools USING gin(asset_reserves jsonb_path_ops); +CREATE INDEX live_liquidity_pools ON liquidity_pools USING BTREE (deleted, last_modified_ledger); + +CREATE SEQUENCE history_liquidity_pools_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + +CREATE TABLE history_liquidity_pools ( + id bigint NOT NULL DEFAULT nextval('history_liquidity_pools_id_seq'::regclass), + liquidity_pool_id text NOT NULL +); + +CREATE UNIQUE INDEX index_history_liquidity_pools_on_id ON history_liquidity_pools USING btree (id); +CREATE UNIQUE INDEX index_history_liquidity_pools_on_liquidity_pool_id ON history_liquidity_pools USING btree (liquidity_pool_id); + +CREATE TABLE history_operation_liquidity_pools ( + history_operation_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_operation_liquidity_pools_on_ids ON history_operation_liquidity_pools USING btree (history_operation_id , history_liquidity_pool_id); +CREATE INDEX index_history_operation_liquidity_pools_on_operation_id ON history_operation_liquidity_pools USING btree (history_operation_id); + +CREATE TABLE history_transaction_liquidity_pools ( + history_transaction_id bigint NOT NULL, + history_liquidity_pool_id bigint NOT NULL +); + +CREATE UNIQUE INDEX index_history_transaction_liquidity_pools_on_ids ON history_transaction_liquidity_pools USING btree (history_transaction_id , history_liquidity_pool_id); +CREATE INDEX index_history_transaction_liquidity_pools_on_transaction_id ON history_transaction_liquidity_pools USING btree (history_transaction_id); + +ALTER TABLE trust_lines ADD liquidity_pool_id text; +CREATE INDEX trust_lines_by_liquidity_pool_id ON trust_lines USING BTREE(liquidity_pool_id); + +DROP INDEX htrd_by_offer; +DROP INDEX htrd_counter_lookup; + +ALTER TABLE history_trades DROP offer_id, + ALTER base_account_id DROP NOT NULL, + ALTER counter_account_id DROP NOT NULL, + ADD base_liquidity_pool_id bigint, + ADD counter_liquidity_pool_id bigint, + ADD liquidity_pool_fee int; + +CREATE INDEX htrd_by_base_liquidity_pool_id ON history_trades USING BTREE(base_liquidity_pool_id); +CREATE INDEX htrd_by_counter_liquidity_pool_id ON history_trades USING BTREE(counter_liquidity_pool_id); + +-- 51 +DROP INDEX IF EXISTS by_account; +DROP INDEX IF EXISTS by_fee_account; + +-- +-- PostgreSQL database dump complete +-- diff --git a/services/horizon/internal/test/scenarios/main.go b/services/horizon/internal/test/scenarios/main.go new file mode 100644 index 0000000000..df9b032ae1 --- /dev/null +++ b/services/horizon/internal/test/scenarios/main.go @@ -0,0 +1,36 @@ +package scenarios + +import ( + "log" + + "github.com/jmoiron/sqlx" +) + +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -ignore (go|rb)$ -pkg scenarios . + +// Load executes the sql script at `path` on postgres database at `url` +func Load(url string, path string) { + sql, err := Asset(path) + if err != nil { + log.Panic(err) + } + + db, err := sqlx.Open("postgres", url) + if err != nil { + log.Fatalf("could not exec open postgres connection: %v\n", err) + } + defer db.Close() + + // clear out existing schema before applying scenario + // otherwise, applying the scenario will result in the following error: + // pq: cannot drop schema public because other objects depend on it + _, err = db.Exec("DROP SCHEMA IF EXISTS public cascade") + if err != nil { + log.Fatalf("could not drop public schema: %v\n", err) + } + + _, err = db.Exec(string(sql)) + if err != nil { + log.Fatalf("could not exec scenario %v: %v\n", path, err) + } +} diff --git a/services/horizon/internal/test/scenarios/main_test.go b/services/horizon/internal/test/scenarios/main_test.go new file mode 100644 index 0000000000..4980cdedfc --- /dev/null +++ b/services/horizon/internal/test/scenarios/main_test.go @@ -0,0 +1,24 @@ +package scenarios + +import ( + "net/http" + "os" + "strings" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/shurcooL/httpfs/filter" + + supportHttp "github.com/stellar/go/support/http" +) + +func TestGeneratedAssets(t *testing.T) { + var localAssets http.FileSystem = filter.Keep(http.Dir("."), func(path string, fi os.FileInfo) bool { + return fi.IsDir() || strings.HasSuffix(path, ".sql") + }) + generatedAssets := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo} + + if !supportHttp.EqualFileSystems(localAssets, generatedAssets, "/") { + t.Fatalf("generated migrations does not match local migrations") + } +} diff --git a/services/horizon/internal/test/scenarios/offer_ids-core.sql b/services/horizon/internal/test/scenarios/offer_ids-core.sql new file mode 100644 index 0000000000..b188928cf5 --- /dev/null +++ b/services/horizon/internal/test/scenarios/offer_ids-core.sql @@ -0,0 +1,767 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999995999999600, 4, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 999999900, 8589934593, 0, NULL, '', 'AQAAAA==', 0, 4, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 999999900, 8589934593, 0, NULL, '', 'AQAAAA==', 0, 4, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 999999600, 8589934596, 3, NULL, '', 'AQAAAA==', 0, 9, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 999999500, 8589934597, 2, NULL, '', 'AQAAAA==', 0, 9, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('5b6634cc9e9199fd3fbbaf0655c54ca5e7b812d562e63eca0fb2b9a89b180c4f', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'ae2719d4ee5cbecdec59bac5f1d0d2d7ca82b9bd5408bc3cc39fe16660539e86', 2, 1559579711, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Zh82QgsuASmS0xgFp1fjiT2wNgvVWRClSW5uMP344SMoAAAAAXPVMPwAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAh7rHH3ehqvUsWEPfAlvVspAA2/ZR0JjPAFIdHbQ2sSiuJxnU7ly+zexZusXx0NLXyoK5vVQIvDzDn+FmYFOehgAAAAIN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('aa733b6e19d9d1df08ab8ac1bb70ce941adb9c319716dbbed0b83a4f9717a427', '5b6634cc9e9199fd3fbbaf0655c54ca5e7b812d562e63eca0fb2b9a89b180c4f', '8d44b8970796877ca0065abdc7c68950431832c5709d8cfc9ae995736954854f', 3, 1559579712, 'AAAAC1tmNMyekZn9P7uvBlXFTKXnuBLVYuY+yg+yuaibGAxPE5AOt+233THPtTNbUnjSwPvmo6g3zyFcFPoc8VLPGGAAAAAAXPVMQAAAAAAAAAAAShzqI9aLY+7Kp5Zw36Ye703iZxnHA4Dw1m5KF4RHKmONRLiXB5aHfKAGWr3HxolQQxgyxXCdjPya6ZVzaVSFTwAAAAMN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('7cbd2c56a6b343fa00f87e24264de55ff07e32f3b149084d99e565ef0cd4f02a', 'aa733b6e19d9d1df08ab8ac1bb70ce941adb9c319716dbbed0b83a4f9717a427', 'd99ed0c0c537d79faea6f48844d179a589a2af91e2293497f3f4a3dba002193b', 4, 1559579713, 'AAAAC6pzO24Z2dHfCKuKwbtwzpQa25wxlxbbvtC4Ok+XF6QnjkhXuTkDWtl4LNb/YNQTjpJEIGD+9shjsz3Jzsqv4/YAAAAAXPVMQQAAAAAAAAAAN5B5W9KsxoOXOpscNNdDinauNxC/Pw2kk7DDUo0SXOXZntDAxTfXn66m9IhE0XmliaKvkeIpNJfz9KPboAIZOwAAAAQN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('bcea057e1cd211d2cfb7e3fb995fd45cd6a44221f38acc0522b45b8c81d96524', '7cbd2c56a6b343fa00f87e24264de55ff07e32f3b149084d99e565ef0cd4f02a', '3337fa8c8658215c9b218c483baa6f15d0dc47063e193a290e5f44ba42733344', 5, 1559579714, 'AAAAC3y9LFams0P6APh+JCZN5V/wfjLzsUkITZnlZe8M1PAqI/JFnzikQwlAWB2pC99Ya/VbQ2JSlXrpxN9Ed9L/rDMAAAAAXPVMQgAAAAAAAAAA5tFak/fh7MEOithjlydErBU0/M0ilaefu1Ub0Cw1cbQzN/qMhlghXJshjEg7qm8V0NxHBj4ZOikOX0S6QnMzRAAAAAUN4Lazp2QAAAAAAAAAAARMAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('c3a9cd326f60a94427dcadeb699d3f6ecd39353cc932acc7d27451273326bfc8', 'bcea057e1cd211d2cfb7e3fb995fd45cd6a44221f38acc0522b45b8c81d96524', '0009df8962c3b254432b804bce2ba8802dc15e59b2a73a2b8f25c96e12d562f6', 6, 1559579715, 'AAAAC7zqBX4c0hHSz7fj+5lf1FzWpEIh84rMBSK0W4yB2WUkjmMqt+2EvvBEZaLG6tbmWrlOaBGNpqS9R+fV7D1C1bEAAAAAXPVMQwAAAAAAAAAAuQrPQr2qlNVpCIqhQaeQMsD9JWWNaxISOIwMsxvilIUACd+JYsOyVEMrgEvOK6iALcFeWbKnOiuPJcluEtVi9gAAAAYN4Lazp2QAAAAAAAAAAASwAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('0d3f343b2f9f0caebd2e9b5ad5233d384ddef567e01fdcd8e79b3a6f010e14cd', 'c3a9cd326f60a94427dcadeb699d3f6ecd39353cc932acc7d27451273326bfc8', '702c842040815b4f3cc114aabb2e5fa4d96daf763190b10c1ec5fba36150b4d0', 7, 1559579716, 'AAAAC8OpzTJvYKlEJ9yt62mdP27NOTU8yTKsx9J0USczJr/IuByoQOqmTAMUTx6gy0jhoGG1FiLn3uIQDEu52Ts13f4AAAAAXPVMRAAAAAAAAAAA8011I8cweiVULRk7g+I3eWM/k+U+svLLvJL8X9ENB1pwLIQgQIFbTzzBFKq7Ll+k2W2vdjGQsQwexfujYVC00AAAAAcN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('beaea632702de50e50e1828cc4f6f2fa5177750a52ffb7f980e0eebc791e69da', '0d3f343b2f9f0caebd2e9b5ad5233d384ddef567e01fdcd8e79b3a6f010e14cd', '6f2b5adbbf81c49796c203aa55bc3330e6613a25c7dc45230291bdeee30d9731', 8, 1559579717, 'AAAACw0/NDsvnwyuvS6bWtUjPThN3vVn4B/c2OebOm8BDhTNF+e1/1KxUzO74nHG3kqkGpSmFdahZJnhcECgqI4Wul8AAAAAXPVMRQAAAAAAAAAAAOMGyexOuevk4QIYCK/e2TLQXecE/TBuu1dvGQilTsNvK1rbv4HEl5bCA6pVvDMw5mE6JcfcRSMCkb3u4w2XMQAAAAgN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('486cb2fea3a46891d301a6dd1d06f09d3cfe6144df09581888a0f51299eca4c9', 'beaea632702de50e50e1828cc4f6f2fa5177750a52ffb7f980e0eebc791e69da', 'b0e723d49f38dec32e3eaf8c1ebfd5244bb3d543f12a0e8214833fc17b75459a', 9, 1559579718, 'AAAAC76upjJwLeUOUOGCjMT28vpRd3UKUv+3+YDg7rx5Hmnak9zC8GPJ6ZzGP5+PUo/5nHalJyPDK1xoG7op7rz8DI4AAAAAXPVMRgAAAAAAAAAA5u7HOB8cX0WnqixnvzOlY3D3Y3oP0dYzzW9yWnVbm0aw5yPUnzjewy4+r4wev9UkS7PVQ/EqDoIUgz/Be3VFmgAAAAkN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO offers VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 4, 'AAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8=', 'AAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2Ek=', 10000000, 4, 5, 0.800000000000000044, 0, 9); + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 2, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAIAAAACAAAAAQAAAEiHzZCCy4BKZLTGAWnV+OJPbA2C9VZEKVJbm4w/fjhIygAAAABc9Uw/AAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAxSinoYy7JsqdaPda5Z6tBAtwY9+WKKgtbeE0mIY71S2INDEQ93aIcTNyb53CLOl2vO9pB4uKrr6kXgS3A/MTBQ=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 3, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAMAAAACAAAAAQAAADATkA637bfdMc+1M1tSeNLA++ajqDfPIVwU+hzxUs8YYAAAAABc9UxAAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAcifIcNpgdCvCFFXcWUjtWfNSRdyWFdUrYi6mAjeOdl/5hIdpb1AqJOw2xlRjEFWz/NjMXCsO7n4eVOnME0BoDQ=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 4, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAQAAAACAAAAAQAAADCOSFe5OQNa2Xgs1v9g1BOOkkQgYP72yGOzPcnOyq/j9gAAAABc9UxBAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAeUrrwIvPQF38qU0JbCw3N2g+sNz3IgJKLeHl6vyrW2GXRxJeYQ3I+9vPNHHPKFgLmhT4HGSQKV+WkZBQ2LdABA=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 5, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAUAAAACAAAAAQAAADAj8kWfOKRDCUBYHakL31hr9VtDYlKVeunE30R30v+sMwAAAABc9UxCAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABA2lgyZ2HJiRDjaunFZpNjjBUyMV/7a5kiemM4L1BZ2K17Hkkv8RuGtENi36odeiyWavYHCRK/FDVxNPs46hHWBg=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 6, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAYAAAACAAAAAQAAADCOYyq37YS+8ERlosbq1uZauU5oEY2mpL1H59XsPULVsQAAAABc9UxDAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAkeJ/cOjRUE0ZMdX8ZIzaO+ByUw5ff1ELJhZiacHj2DTFWNzuR6YgLvDIuvtMfkcbXsH/uiY2nTdt7zgexjWYDw=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 7, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAcAAAACAAAAAQAAADC4HKhA6qZMAxRPHqDLSOGgYbUWIufe4hAMS7nZOzXd/gAAAABc9UxEAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAGN8splVV4qwlqXO7X4u1KHV3fXWRAYNROfEp1yx67rT9rrgiu9UfrTMzSfh9WClZgTJjHjIfHhNWph2vLzbyCg=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 8, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAgAAAACAAAAAQAAADAX57X/UrFTM7viccbeSqQalKYV1qFkmeFwQKCojha6XwAAAABc9UxFAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAx3JWvVxbJUIcpFGmnY8BMNTidFSXRs/rWIuL6wuuz40n7QR8NhMUMK8dn+pJwu8d/ImTBnZjo94oebPyO6B2CA=='); +INSERT INTO scphistory VALUES ('GATVGTG7YEGMHB2AFLXTHZZTFSBD554XXB2HW7R6H24D3HTQWTGLXQK7', 9, 'AAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAAAAAAAkAAAACAAAAAQAAADCT3MLwY8npnMY/n49Sj/mcdqUnI8MrXGgbuinuvPwMjgAAAABc9UxGAAAAAAAAAAAAAAAByamouft0OTPcesJmbyyg+EsQtPmuC+JHsbdYFdYmfFUAAABAskgwVaZz5qeUx6LZv96TEEDQmdmOPxcdcLmDsqHc+gs50jYFl0iX2TaDNzIvr4z4/X97Jao61VxnPTYjBwnDBA=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('c9a9a8b9fb743933dc7ac2666f2ca0f84b10b4f9ae0be247b1b75815d6267c55', 9, 'AAAAAQAAAAEAAAAAJ1NM38EMw4dAKu8z5zMsgj73l7h0e34+Prg9nnC0zLsAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAAAnU0zfwQzDh0Aq7zPnMyyCPveXuHR7fj4+uD2ecLTMuwAAAAAAAAAJAAAAA8mpqLn7dDkz3HrCZm8soPhLELT5rgviR7G3WBXWJnxVAAAAAQAAAJiT3MLwY8npnMY/n49Sj/mcdqUnI8MrXGgbuinuvPwMjgAAAABc9UxGAAAAAAAAAAEAAAAAJ1NM38EMw4dAKu8z5zMsgj73l7h0e34+Prg9nnC0zLsAAABABpR81dkzkMXWzQxF0wrVbXeB5zaMMxsT4ODVkiHK2nSvvNb9G8o+aR/W+gv2/8mqcuz6iLWVp8Hr9pI7caTjAgAAAAEAAACYk9zC8GPJ6ZzGP5+PUo/5nHalJyPDK1xoG7op7rz8DI4AAAAAXPVMRgAAAAAAAAABAAAAACdTTN/BDMOHQCrvM+czLII+95e4dHt+Pj64PZ5wtMy7AAAAQAaUfNXZM5DF1s0MRdMK1W13gec2jDMbE+Dg1ZIhytp0r7zW/RvKPmkf1voL9v/JqnLs+oi1lafB6/aSO3Gk4wIAAABAE+pHkM8CWktQa/sVXOiB2jIsdh/1ue5Du6AGJa5TKniyYfo9mDvImv1FORGFHBOuHAYiiB2DZf1lE1Z/pUqvBwAAAAAnU0zfwQzDh0Aq7zPnMyyCPveXuHR7fj4+uD2ecLTMuwAAAAAAAAAJAAAAAgAAAAEAAAAwk9zC8GPJ6ZzGP5+PUo/5nHalJyPDK1xoG7op7rz8DI4AAAAAXPVMRgAAAAAAAAAAAAAAAcmpqLn7dDkz3HrCZm8soPhLELT5rgviR7G3WBXWJnxVAAAAQLJIMFWmc+anlMei2b/ekxBA0JnZjj8XHXC5g7Kh3PoLOdI2BZdIl9k2gzcyL6+M+P1/eyWqOtVcZz02IwcJwwQAAAABvq6mMnAt5Q5Q4YKMxPby+lF3dQpS/7f5gODuvHkeadoAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFoAxYAAAAAEAAAABQAAAAAAAAAAAAAAAAAAAAGu5L5MAAAAQNauhGmk9S3Y7k65YdRK2RAHjHwYitvkeuM+3nPCP3hGgUkz9WGa4PY84CeMgmkl15ick+lYFrXfb4LoDqhuTQoAAAABAAAAAQAAAAEAAAAAJ1NM38EMw4dAKu8z5zMsgj73l7h0e34+Prg9nnC0zLsAAAAA'); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', '486cb2fea3a46891d301a6dd1d06f09d3cfe6144df09581888a0f51299eca4c9'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 9, + "currentBuckets": [ + { + "curr": "878d6ef5fee3274de3dd2d87867e45a651d355a97ffffc983d40b9a8467e3e1c", + "next": { + "state": 0 + }, + "snap": "361ee3594c706c50ebdad54ddc27dea5d7d6f204ae27a18b72f00dd95ce80d9a" + }, + { + "curr": "f128ba55c57d2dca306903e9c6a269382f0bd76f63c8e4991770eaa7a18702a3", + "next": { + "state": 1, + "output": "aef8928b5b423c2fb7a106b30128bc79e447d89710628532229505bdb4d51cc3" + }, + "snap": "0abf0429e123803d90ec09a3e348be2aa6464ae64e9d8428151285b43a71079d" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "0abf0429e123803d90ec09a3e348be2aa6464ae64e9d8428151285b43a71079d" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 2700000000, 1, 9, 8000000, 0); +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 2999999999, 1, 9, 0, 0); +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 2300000000, 1, 9, 0, 0); +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 2000000001, 1, 9, 0, 10000000); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2d365c3c49f376570df856bca62503966f0e269a2f51cdb68ce2ee19a7f8245a', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('003e91101d19aabb429491953806886d777c260233c6478f1c928a79ec4e2743', 2, 4, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 3, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c7ab3843f0a0c4fe79dece0ff1b8391f7a9d34c47cbd7e35034f7b28f60dfc00', 3, 3, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('5ef9c06bb625d2da2281118bdf14d808768353ee2fca7457c8e506fbeb91fc55', 3, 4, 'AAAAAgAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2837a1b3def2c2ddfdcde5b44bf08d7a11a9328d870df17fa2bb66d4c83260c7', 4, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e4febb6aab5f83df4c19b4f1f6b8687f11886169e996e16baa75aac2d8a09c68', 4, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('356eec1f0432bbbba09ddcc1937e28253a4dc8e98ff77d5e9dee140be5093a70', 5, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2d0d264d9e2e3364c29faf6a6ec815a8a8e6a46e23096da1411f292309d78712', 6, 1, 'AAAAAgAAAAMAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4a0ac16e948710199659b0e8131be06df5944e57806c0db722859e4e8aad1e44', 7, 1, 'AAAAAgAAAAMAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1fa96abeeb0e2b71fa947d8f7003ca5d3e95734652c38b328ae62a8c1b1b3d73', 8, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('54e27e3d7c84fdb79d6709922c52aaa752fd5080dcd433af901cd17860a957b2', 9, 1, 'AAAAAgAAAAMAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7mshwAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'I3Tpk0m57326ml2zM5t4/ajzR3exrzO6RorVwN+UbU0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTRwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEASEZiZbeFwCsrKBnKIus/05VtJDBrgosuhLQ/U6XUj4twWyhs7UtS4CMexOM6JqcfqJK10WlBkkwn4g8PIfjIG', 'FkpQZOumTyzbrbhWvzRISF/GJiR62j7TnN3w9pAhM7YAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTRwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmpwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2d365c3c49f376570df856bca62503966f0e269a2f51cdb68ce2ee19a7f8245a', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEB/JBgvIM71gLBIh0TON9b+l+ApZz1CKDQiUFSV0scRguB1anyMwMR6s5SiaCwtDnxsPna12RdUQKlH2aeMAy8H', 'LTZcPEnzdlcN+Fa8piUDlm8OJpovUc22jOLuGaf4JFoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmpwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6BwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('003e91101d19aabb429491953806886d777c260233c6478f1c928a79ec4e2743', 2, 4, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDIOudzujfo+dSIJXXb06SjLBLLXsFxnVnR1HJejfq2NgFUtLuX2KrVNSZyRBG+WvfdoXwCPcp85hDRbCmjbPYM', 'AD6REB0ZqrtClJGVOAaIbXd8JgIzxkePHJKKeexOJ0MAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6BwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NZwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'vUhtvdAtRggXZxxKWn6dboZcopy0HmLXqvcKL+5bNt4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 3, 2, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAEnciLVAAAAQLVbII+1LeizxgncDI46KHyBt05+H92n1+R328J9zNl2fgJW2nfn3FIoLVs2qV1+CUpr121a2B7AM6HKr4nBLAI=', 'AKuc/OK0xBQdi7Z2jdCUvbscdAZxDbs7oO+Yhw9jo0QAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c7ab3843f0a0c4fe79dece0ff1b8391f7a9d34c47cbd7e35034f7b28f60dfc00', 3, 3, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAGu5L5MAAAAQO/nblo8KAkSOf8cQOOiADXygx+I0ZdWoM4Vg4EKPAAJXFntctjCIyQ4csVUywaW32J/keQWYby52BjiNhT/6Qo=', 'x6s4Q/CgxP553s4P8bg5H3qdNMR8vX41A097KPYN/AAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('5ef9c06bb625d2da2281118bdf14d808768353ee2fca7457c8e506fbeb91fc55', 3, 4, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQOG2GKO9i60cM1QK2UN1gXrHEYjeGLRXFT5snCqO5FnPET5cVs30N7ITPZ6HH6QcZ2IdC1c66wLge4GyR8vpww0=', 'XvnAa7Yl0toigRGL3xTYCHaDU+4vynRXyOUG++uR/FUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2837a1b3def2c2ddfdcde5b44bf08d7a11a9328d870df17fa2bb66d4c83260c7', 4, 1, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAEqBfIAAAAAAAAAAAH5kC3vAAAAQGoQPiD0HEBi0U8cHN6nlZ3okEfdmt7mqkQHIt2tuRLaZZ1iMQwU43M8v+ntJQsA4c2eBXt9GYp/29FLjnba1Qw=', 'KDehs97ywt39zeW0S/CNehGpMo2HDfF/ortm1MgyYMcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAASoF8gB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e4febb6aab5f83df4c19b4f1f6b8687f11886169e996e16baa75aac2d8a09c68', 4, 2, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAAAAAAAAAAAEQithJAAAAQEghWBLDjmNLzcPF6o8dqUHMsI0WhttWE/ABSKaHNc+0FqsF+ui5+eky4ERyu99YR6BEHF4NlgyLnSq3zcDr9Qo=', '5P67aqtfg99MGbTx9rhofxGIYWnpluFrqnWqwtignGgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAASoF8gB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('356eec1f0432bbbba09ddcc1937e28253a4dc8e98ff77d5e9dee140be5093a70', 5, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQE6AkyD+X3Poc6Fj6lalYvmHUdbN38uun6CX5Mc/cJnFQaqtZBOoAwDntTl1Gz/f7reRpXcJYSdQ+pEcUn/2PAE=', 'NW7sHwQyu7ugndzBk34oJTpNyOmP931ene4UC+UJOnAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAADuaygAAAAAAAAAAAAAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAADuaygAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('2d0d264d9e2e3364c29faf6a6ec815a8a8e6a46e23096da1411f292309d78712', 6, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QBCOjXHO5rKAAAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQHofe3bgdjBd664ArqrKLj2/ia4bLa5YlG/ML7uFVRKWTbp0lpbKCa0bFR5AEnCHD/FyJgKh3TiNOpK3HFRkWQ8=', 'LQ0mTZ4uM2TCn69qbsgVqKjmpG4jCW2hQR8pIwnXhxIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAANaTpAEI6Ncc7msoAAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIcAAAAAIAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIcAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA1pOkAQjo1xzuaygAAAAAAAAAAAAAAAAAAAAADAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAQAAAAA7msoAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAAHc1k/8AAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABKgXyAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAHE/swAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4a0ac16e948710199659b0e8131be06df5944e57806c0db722859e4e8aad1e44', 7, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAC+vCAAAAAAFAAAABAAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQGCvLROwHtGG6m2PJ0IIz3FRHGUx9WygqNXHNQPN0Ypk/oTNltJAuPn52FZ+O7fImvcHffMLVMCFDNDTgFnrIQM=', 'SgrBbpSHEBmWWbDoExvgbfWUTleAbA23IoWeToqtHkQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAL68IAAAAAAUAAAAEAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIDAAAAAIAAAAEAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIDAAAAAIAAAAFAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAvrwgAAAAABQAAAAQAAAAAAAAAAAAAAAAAAAADAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAQAAAAB3NZP/AAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAALLQXf8AAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABKgXyAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAABxP7MAAAAAAAAAAAAAAAABAAAABwAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAKDuuwAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('1fa96abeeb0e2b71fa947d8f7003ca5d3e95734652c38b328ae62a8c1b1b3d73', 8, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFloLwAAAAAJAAAACgAAAAAAAAAAAAAAAAAAAAGu5L5MAAAAQI4z8HdxCMc9Yj7IMY43+gnRL5meUMTGO5MNqHs+1faoWCnC+0IC3rRXjYWoigPnEBDmTNxQYfNA9LQQNH5Vdww=', 'H6lqvusOK3H6lH2PcAPKXT6Vc0ZSw4syiuYqjBsbPXMAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAABrSdIAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAdzWUAAAAAAgAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAASoF8gB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAoO67AAAAAAAAAAAAAAAAAQAAAAgAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAA05izgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAABKgXyAAAAAAAAAAAAAAAADAAAABQAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAgAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFZtPoB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAALLQXf8AAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAgAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAWWgvAH//////////AAAAAQAAAAEAAAAAWWgu/wAAAAAAAAAAAAAAAAAAAAAAAAADAAAABgAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAANaTpAEI6Ncc7msoAAAAAAAAAAAAAAAAAAAAAAQAAAAgAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAABrSdIBCOjXHO5rKAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAASoF8gB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAANCdwwB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('54e27e3d7c84fdb79d6709922c52aaa752fd5080dcd433af901cd17860a957b2', 9, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFoAxYAAAAAEAAAABQAAAAAAAAAAAAAAAAAAAAGu5L5MAAAAQNauhGmk9S3Y7k65YdRK2RAHjHwYitvkeuM+3nPCP3hGgUkz9WGa4PY84CeMgmkl15ick+lYFrXfb4LoDqhuTQo=', 'VOJ+PXyE/bedZwmSLFKqp1L9UIDc1DOvkBzReGCpV7IAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAGtJ0gAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAB3NZP8AAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAC+vCAAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAAAAAAEAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAmJaAAAAABAAAAAUAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAMAAAAIAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAa0nSAQjo1xzuaygAAAAAAAAAAAAAAAAAAAAACAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAwAAAAgAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAVm0+gH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAkAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAoO67AH//////////AAAAAQAAAAEAAAAAAHoSAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAACAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABZaC8Af/////////8AAAABAAAAAQAAAABZaC7/AAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAALLQXf9//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAcAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAC+vCAAAAAAFAAAABAAAAAAAAAAAAAAAAAAAAAIAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAADAAAACAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAADTmLOAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAEqBfIAAAAAAAAAAAAAAAAEAAAAJAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAIkXNwB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAgAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAADuayAwAAAACAAAABQAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAkAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAADuayAwAAAACAAAABQAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAgAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAA0J3DAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAkAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAdzWUAX//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAAmJaAAAAAAAAAAAAAAAADAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQAAAAIAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAABAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAAJiWgAAAAAQAAAAFAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/offer_ids-horizon.sql b/services/horizon/internal/test/scenarios/offer_ids-horizon.sql new file mode 100644 index 0000000000..c9e6388cd1 --- /dev/null +++ b/services/horizon/internal/test/scenarios/offer_ids-horizon.sql @@ -0,0 +1,1065 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO asset_stats VALUES (1, '5000000000', 2, 0, ''); +INSERT INTO asset_stats VALUES (2, '5000000000', 2, 0, ''); + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-31 14:19:49.123835+01'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_accounts VALUES (4, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_accounts VALUES (5, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 5, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'USD', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_assets VALUES (2, 'credit_alphanum4', 'EUR', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 2, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (2, 38654709761, 1, 33, '{"seller": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "offer_id": 2, "sold_amount": "49.9999999", "bought_amount": "45.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 38654709761, 2, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 2, "sold_amount": "45.0000000", "bought_amount": "49.9999999", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 38654709761, 3, 33, '{"seller": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "offer_id": 3, "sold_amount": "100.0000000", "bought_amount": "80.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 38654709761, 4, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 3, "sold_amount": "80.0000000", "bought_amount": "100.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 34359742465, 1, 33, '{"seller": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "offer_id": 1, "sold_amount": "100.0000000", "bought_amount": "100.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 34359742465, 2, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 1, "sold_amount": "100.0000000", "bought_amount": "100.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 34359742465, 3, 33, '{"seller": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "offer_id": 2, "sold_amount": "50.0000000", "bought_amount": "45.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 34359742465, 4, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 2, "sold_amount": "45.0000000", "bought_amount": "50.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 17179873281, 1, 2, '{"amount": "500.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 17179873281, 2, 3, '{"amount": "500.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179877377, 1, 2, '{"amount": "500.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (4, 17179877377, 2, 3, '{"amount": "500.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 12884910081, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 12884914177, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 12884918273, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 8589938689, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (5, 8589938689, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589938689, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (5, 8589942785, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589942785, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); +INSERT INTO history_effects VALUES (3, 8589946881, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (5, 8589946881, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 8589946881, 3, 10, '{"weight": 1, "public_key": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (4, 8589950977, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (5, 8589950977, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 8589950977, 3, 10, '{"weight": 1, "public_key": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (9, '486cb2fea3a46891d301a6dd1d06f09d3cfe6144df09581888a0f51299eca4c9', 'beaea632702de50e50e1828cc4f6f2fa5177750a52ffb7f980e0eebc791e69da', 1, 1, '2019-06-03 16:35:18', '2019-06-03 16:35:14.561384', '2019-06-03 16:35:14.561384', 38654705664, 16, 1000000000000000000, 1500, 100, 100000000, 1000000, 11, 'AAAAC76upjJwLeUOUOGCjMT28vpRd3UKUv+3+YDg7rx5Hmnak9zC8GPJ6ZzGP5+PUo/5nHalJyPDK1xoG7op7rz8DI4AAAAAXPVMRgAAAAAAAAAA5u7HOB8cX0WnqixnvzOlY3D3Y3oP0dYzzW9yWnVbm0aw5yPUnzjewy4+r4wev9UkS7PVQ/EqDoIUgz/Be3VFmgAAAAkN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAAEAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (8, 'beaea632702de50e50e1828cc4f6f2fa5177750a52ffb7f980e0eebc791e69da', '0d3f343b2f9f0caebd2e9b5ad5233d384ddef567e01fdcd8e79b3a6f010e14cd', 1, 1, '2019-06-03 16:35:17', '2019-06-03 16:35:14.606009', '2019-06-03 16:35:14.606009', 34359738368, 16, 1000000000000000000, 1400, 100, 100000000, 1000000, 11, 'AAAACw0/NDsvnwyuvS6bWtUjPThN3vVn4B/c2OebOm8BDhTNF+e1/1KxUzO74nHG3kqkGpSmFdahZJnhcECgqI4Wul8AAAAAXPVMRQAAAAAAAAAAAOMGyexOuevk4QIYCK/e2TLQXecE/TBuu1dvGQilTsNvK1rbv4HEl5bCA6pVvDMw5mE6JcfcRSMCkb3u4w2XMQAAAAgN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (7, '0d3f343b2f9f0caebd2e9b5ad5233d384ddef567e01fdcd8e79b3a6f010e14cd', 'c3a9cd326f60a94427dcadeb699d3f6ecd39353cc932acc7d27451273326bfc8', 1, 1, '2019-06-03 16:35:16', '2019-06-03 16:35:14.625706', '2019-06-03 16:35:14.625706', 30064771072, 16, 1000000000000000000, 1300, 100, 100000000, 1000000, 11, 'AAAAC8OpzTJvYKlEJ9yt62mdP27NOTU8yTKsx9J0USczJr/IuByoQOqmTAMUTx6gy0jhoGG1FiLn3uIQDEu52Ts13f4AAAAAXPVMRAAAAAAAAAAA8011I8cweiVULRk7g+I3eWM/k+U+svLLvJL8X9ENB1pwLIQgQIFbTzzBFKq7Ll+k2W2vdjGQsQwexfujYVC00AAAAAcN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (6, 'c3a9cd326f60a94427dcadeb699d3f6ecd39353cc932acc7d27451273326bfc8', 'bcea057e1cd211d2cfb7e3fb995fd45cd6a44221f38acc0522b45b8c81d96524', 1, 1, '2019-06-03 16:35:15', '2019-06-03 16:35:14.638052', '2019-06-03 16:35:14.638052', 25769803776, 16, 1000000000000000000, 1200, 100, 100000000, 1000000, 11, 'AAAAC7zqBX4c0hHSz7fj+5lf1FzWpEIh84rMBSK0W4yB2WUkjmMqt+2EvvBEZaLG6tbmWrlOaBGNpqS9R+fV7D1C1bEAAAAAXPVMQwAAAAAAAAAAuQrPQr2qlNVpCIqhQaeQMsD9JWWNaxISOIwMsxvilIUACd+JYsOyVEMrgEvOK6iALcFeWbKnOiuPJcluEtVi9gAAAAYN4Lazp2QAAAAAAAAAAASwAAAAAAAAAAAAAAACAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (5, 'bcea057e1cd211d2cfb7e3fb995fd45cd6a44221f38acc0522b45b8c81d96524', '7cbd2c56a6b343fa00f87e24264de55ff07e32f3b149084d99e565ef0cd4f02a', 1, 1, '2019-06-03 16:35:14', '2019-06-03 16:35:14.647891', '2019-06-03 16:35:14.647891', 21474836480, 16, 1000000000000000000, 1100, 100, 100000000, 1000000, 11, 'AAAAC3y9LFams0P6APh+JCZN5V/wfjLzsUkITZnlZe8M1PAqI/JFnzikQwlAWB2pC99Ya/VbQ2JSlXrpxN9Ed9L/rDMAAAAAXPVMQgAAAAAAAAAA5tFak/fh7MEOithjlydErBU0/M0ilaefu1Ub0Cw1cbQzN/qMhlghXJshjEg7qm8V0NxHBj4ZOikOX0S6QnMzRAAAAAUN4Lazp2QAAAAAAAAAAARMAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (4, '7cbd2c56a6b343fa00f87e24264de55ff07e32f3b149084d99e565ef0cd4f02a', 'aa733b6e19d9d1df08ab8ac1bb70ce941adb9c319716dbbed0b83a4f9717a427', 2, 2, '2019-06-03 16:35:13', '2019-06-03 16:35:14.660033', '2019-06-03 16:35:14.660033', 17179869184, 16, 1000000000000000000, 1000, 100, 100000000, 1000000, 11, 'AAAAC6pzO24Z2dHfCKuKwbtwzpQa25wxlxbbvtC4Ok+XF6QnjkhXuTkDWtl4LNb/YNQTjpJEIGD+9shjsz3Jzsqv4/YAAAAAXPVMQQAAAAAAAAAAN5B5W9KsxoOXOpscNNdDinauNxC/Pw2kk7DDUo0SXOXZntDAxTfXn66m9IhE0XmliaKvkeIpNJfz9KPboAIZOwAAAAQN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (3, 'aa733b6e19d9d1df08ab8ac1bb70ce941adb9c319716dbbed0b83a4f9717a427', '5b6634cc9e9199fd3fbbaf0655c54ca5e7b812d562e63eca0fb2b9a89b180c4f', 4, 4, '2019-06-03 16:35:12', '2019-06-03 16:35:14.672241', '2019-06-03 16:35:14.672241', 12884901888, 16, 1000000000000000000, 800, 100, 100000000, 1000000, 11, 'AAAAC1tmNMyekZn9P7uvBlXFTKXnuBLVYuY+yg+yuaibGAxPE5AOt+233THPtTNbUnjSwPvmo6g3zyFcFPoc8VLPGGAAAAAAXPVMQAAAAAAAAAAAShzqI9aLY+7Kp5Zw36Ye703iZxnHA4Dw1m5KF4RHKmONRLiXB5aHfKAGWr3HxolQQxgyxXCdjPya6ZVzaVSFTwAAAAMN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0); +INSERT INTO history_ledgers VALUES (2, '5b6634cc9e9199fd3fbbaf0655c54ca5e7b812d562e63eca0fb2b9a89b180c4f', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 4, 4, '2019-06-03 16:35:11', '2019-06-03 16:35:14.6861', '2019-06-03 16:35:14.6861', 8589934592, 16, 1000000000000000000, 400, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76Zh82QgsuASmS0xgFp1fjiT2wNgvVWRClSW5uMP344SMoAAAAAXPVMPwAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAh7rHH3ehqvUsWEPfAlvVspAA2/ZR0JjPAFIdHbQ2sSiuJxnU7ly+zexZusXx0NLXyoK5vVQIvDzDn+FmYFOehgAAAAIN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:35:14.698632', '2019-06-03 16:35:14.698632', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 38654709761, 2); +INSERT INTO history_operation_participants VALUES (2, 34359742465, 2); +INSERT INTO history_operation_participants VALUES (3, 30064775169, 1); +INSERT INTO history_operation_participants VALUES (4, 25769807873, 1); +INSERT INTO history_operation_participants VALUES (5, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (6, 17179873281, 3); +INSERT INTO history_operation_participants VALUES (7, 17179873281, 2); +INSERT INTO history_operation_participants VALUES (8, 17179877377, 4); +INSERT INTO history_operation_participants VALUES (9, 17179877377, 1); +INSERT INTO history_operation_participants VALUES (10, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (11, 12884910081, 1); +INSERT INTO history_operation_participants VALUES (12, 12884914177, 2); +INSERT INTO history_operation_participants VALUES (13, 12884918273, 1); +INSERT INTO history_operation_participants VALUES (14, 8589938689, 5); +INSERT INTO history_operation_participants VALUES (15, 8589938689, 2); +INSERT INTO history_operation_participants VALUES (16, 8589942785, 5); +INSERT INTO history_operation_participants VALUES (17, 8589942785, 1); +INSERT INTO history_operation_participants VALUES (18, 8589946881, 5); +INSERT INTO history_operation_participants VALUES (19, 8589946881, 3); +INSERT INTO history_operation_participants VALUES (20, 8589950977, 5); +INSERT INTO history_operation_participants VALUES (21, 8589950977, 4); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 21, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (38654709761, 38654709760, 1, 3, '{"price": "0.8000000", "amount": "151.0000000", "price_r": {"d": 5, "n": 4}, "offer_id": 0, "buying_asset_code": "EUR", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "USD", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "selling_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (34359742465, 34359742464, 1, 3, '{"price": "0.9000000", "amount": "150.0000000", "price_r": {"d": 10, "n": 9}, "offer_id": 0, "buying_asset_code": "EUR", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "USD", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "selling_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (30064775169, 30064775168, 1, 3, '{"price": "1.2500000", "amount": "80.0000000", "price_r": {"d": 4, "n": 5}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 3, '{"price": "1.1111111", "amount": "90.0000000", "price_r": {"d": 1000000000, "n": 1111111111}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 3, '{"price": "1.0000000", "amount": "100.0000000", "price_r": {"d": 1, "n": 1}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "500.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179877377, 17179877376, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "amount": "500.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884910081, 12884910080, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (12884914177, 12884914176, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884918273, 12884918272, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589950977, 8589950976, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_trades VALUES (38654709761, 0, '2019-06-03 16:35:18', 2, 2, 1, 499999999, 1, 2, 450000000, false, 1000000000, 1111111111, 4, 2); +INSERT INTO history_trades VALUES (38654709761, 1, '2019-06-03 16:35:18', 3, 2, 1, 1000000000, 1, 2, 800000000, false, 4, 5, 4, 3); +INSERT INTO history_trades VALUES (34359742465, 0, '2019-06-03 16:35:17', 1, 2, 1, 1000000000, 1, 2, 1000000000, false, 1, 1, 4611686052787130369, 1); +INSERT INTO history_trades VALUES (34359742465, 1, '2019-06-03 16:35:17', 2, 2, 1, 500000000, 1, 2, 450000000, false, 1000000000, 1111111111, 4611686052787130369, 2); + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 38654709760, 2); +INSERT INTO history_transaction_participants VALUES (2, 34359742464, 2); +INSERT INTO history_transaction_participants VALUES (3, 30064775168, 1); +INSERT INTO history_transaction_participants VALUES (4, 25769807872, 1); +INSERT INTO history_transaction_participants VALUES (5, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (6, 17179873280, 3); +INSERT INTO history_transaction_participants VALUES (7, 17179873280, 2); +INSERT INTO history_transaction_participants VALUES (8, 17179877376, 4); +INSERT INTO history_transaction_participants VALUES (9, 17179877376, 1); +INSERT INTO history_transaction_participants VALUES (10, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (11, 12884910080, 1); +INSERT INTO history_transaction_participants VALUES (12, 12884914176, 2); +INSERT INTO history_transaction_participants VALUES (13, 12884918272, 1); +INSERT INTO history_transaction_participants VALUES (14, 8589938688, 5); +INSERT INTO history_transaction_participants VALUES (15, 8589938688, 2); +INSERT INTO history_transaction_participants VALUES (16, 8589942784, 5); +INSERT INTO history_transaction_participants VALUES (17, 8589942784, 1); +INSERT INTO history_transaction_participants VALUES (18, 8589946880, 5); +INSERT INTO history_transaction_participants VALUES (19, 8589946880, 3); +INSERT INTO history_transaction_participants VALUES (20, 8589950976, 5); +INSERT INTO history_transaction_participants VALUES (21, 8589950976, 4); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 21, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('54e27e3d7c84fdb79d6709922c52aaa752fd5080dcd433af901cd17860a957b2', 9, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934596, 100, 1, '2019-06-03 16:35:14.561744', '2019-06-03 16:35:14.561745', 38654709760, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFoAxYAAAAAEAAAABQAAAAAAAAAAAAAAAAAAAAGu5L5MAAAAQNauhGmk9S3Y7k65YdRK2RAHjHwYitvkeuM+3nPCP3hGgUkz9WGa4PY84CeMgmkl15ick+lYFrXfb4LoDqhuTQo=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAABrSdIAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAdzWT/AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAvrwgAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAABAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAAJiWgAAAAAQAAAAFAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAEQAAAAMAAAAIAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAa0nSAQjo1xzuaygAAAAAAAAAAAAAAAAAAAAACAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAwAAAAgAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAVm0+gH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAkAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAoO67AH//////////AAAAAQAAAAEAAAAAAHoSAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAACAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABZaC8Af/////////8AAAABAAAAAQAAAABZaC7/AAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAALLQXf9//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAcAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAC+vCAAAAAAFAAAABAAAAAAAAAAAAAAAAAAAAAIAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAADAAAACAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAADTmLOAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAEqBfIAAAAAAAAAAAAAAAAEAAAAJAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAIkXNwB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAgAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAADuayAwAAAACAAAABQAAAAQAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAkAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAADuayAwAAAACAAAABQAAAAIAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAgAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAA0J3DAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAkAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAdzWUAX//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAAmJaAAAAAAAAAAAAAAAADAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQAAAAIAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAABAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAAJiWgAAAAAQAAAAFAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7mshwAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{1q6EaaT1LdjuTrlh1ErZEAeMfBiK2+R64z7ec8I/eEaBSTP1YZrg9jzgJ4yCaSXXmJyT6VgWtd9vgugOqG5NCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('1fa96abeeb0e2b71fa947d8f7003ca5d3e95734652c38b328ae62a8c1b1b3d73', 8, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934595, 100, 1, '2019-06-03 16:35:14.606344', '2019-06-03 16:35:14.606345', 34359742464, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFloLwAAAAAJAAAACgAAAAAAAAAAAAAAAAAAAAGu5L5MAAAAQI4z8HdxCMc9Yj7IMY43+gnRL5meUMTGO5MNqHs+1faoWCnC+0IC3rRXjYWoigPnEBDmTNxQYfNA9LQQNH5Vdww=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAa0nSAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAHc1lAAAAAAIAAAAA', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAASoF8gB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAoO67AAAAAAAAAAAAAAAAAQAAAAgAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAA05izgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAABKgXyAAAAAAAAAAAAAAAADAAAABQAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAgAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAQAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAFZtPoB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAALLQXf8AAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAgAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAWWgvAH//////////AAAAAQAAAAEAAAAAWWgu/wAAAAAAAAAAAAAAAAAAAAAAAAADAAAABgAAAAIAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAANaTpAEI6Ncc7msoAAAAAAAAAAAAAAAAAAAAAAQAAAAgAAAACAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAABrSdIBCOjXHO5rKAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAASoF8gB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAIAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAANCdwwB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{jjPwd3EIxz1iPsgxjjf6CdEvmZ5QxMY7kw2oez7V9qhYKcL7QgLetFeNhaiKA+cQEOZM3FBh80D0tBA0flV3DA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4a0ac16e948710199659b0e8131be06df5944e57806c0db722859e4e8aad1e44', 7, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934597, 100, 1, '2019-06-03 16:35:14.625995', '2019-06-03 16:35:14.625996', 30064775168, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAC+vCAAAAAAFAAAABAAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQGCvLROwHtGG6m2PJ0IIz3FRHGUx9WygqNXHNQPN0Ypk/oTNltJAuPn52FZ+O7fImvcHffMLVMCFDNDTgFnrIQM=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAC+vCAAAAAAFAAAABAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIDAAAAAIAAAAEAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIDAAAAAIAAAAFAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAvrwgAAAAABQAAAAQAAAAAAAAAAAAAAAAAAAADAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAQAAAAB3NZP/AAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAALLQXf8AAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABKgXyAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAABxP7MAAAAAAAAAAAAAAAABAAAABwAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAKDuuwAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msgMAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{YK8tE7Ae0YbqbY8nQgjPcVEcZTH1bKCo1cc1A83RimT+hM2W0kC4+fnYVn47t8ia9wd98wtUwIUM0NOAWeshAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2d0d264d9e2e3364c29faf6a6ec815a8a8e6a46e23096da1411f292309d78712', 6, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934596, 100, 1, '2019-06-03 16:35:14.638427', '2019-06-03 16:35:14.638427', 25769807872, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QBCOjXHO5rKAAAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQHofe3bgdjBd664ArqrKLj2/ia4bLa5YlG/ML7uFVRKWTbp0lpbKCa0bFR5AEnCHD/FyJgKh3TiNOpK3HFRkWQ8=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QBCOjXHO5rKAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIcAAAAAIAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rIcAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA1pOkAQjo1xzuaygAAAAAAAAAAAAAAAAAAAAADAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAAAAAAf/////////8AAAABAAAAAQAAAAA7msoAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAAHc1k/8AAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABKgXyAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAHE/swAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7mshwAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{eh97duB2MF3rrgCuqsouPb+JrhstrliUb8wvu4VVEpZNunSWlsoJrRsVHkAScIcP8XImAqHdOI06krccVGRZDw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c7ab3843f0a0c4fe79dece0ff1b8391f7a9d34c47cbd7e35034f7b28f60dfc00', 3, 3, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-06-03 16:35:14.672808', '2019-06-03 16:35:14.672808', 12884914176, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAGu5L5MAAAAQO/nblo8KAkSOf8cQOOiADXygx+I0ZdWoM4Vg4EKPAAJXFntctjCIyQ4csVUywaW32J/keQWYby52BjiNhT/6Qo=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{7+duWjwoCRI5/xxA46IANfKDH4jRl1agzhWDgQo8AAlcWe1y2MIjJDhyxVTLBpbfYn+R5BZhvLnYGOI2FP/pCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('356eec1f0432bbbba09ddcc1937e28253a4dc8e98ff77d5e9dee140be5093a70', 5, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934595, 100, 1, '2019-06-03 16:35:14.648216', '2019-06-03 16:35:14.648216', 21474840576, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAEnciLVAAAAQE6AkyD+X3Poc6Fj6lalYvmHUdbN38uun6CX5Mc/cJnFQaqtZBOoAwDntTl1Gz/f7reRpXcJYSdQ+pEcUn/2PAE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAAAAAAEAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygAAAAABAAAAAQAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAgAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAADuaygAAAAAAAAAAAAAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAABAAAAADuaygAAAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ToCTIP5fc+hzoWPqVqVi+YdR1s3fy66foJfkxz9wmcVBqq1kE6gDAOe1OXUbP9/ut5GldwlhJ1D6kRxSf/Y8AQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2837a1b3def2c2ddfdcde5b44bf08d7a11a9328d870df17fa2bb66d4c83260c7', 4, 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934593, 100, 1, '2019-06-03 16:35:14.660189', '2019-06-03 16:35:14.660189', 17179873280, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAEqBfIAAAAAAAAAAAH5kC3vAAAAQGoQPiD0HEBi0U8cHN6nlZ3okEfdmt7mqkQHIt2tuRLaZZ1iMQwU43M8v+ntJQsA4c2eBXt9GYp/29FLjnba1Qw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAASoF8gB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ahA+IPQcQGLRTxwc3qeVneiQR92a3uaqRAci3a25EtplnWIxDBTjczy/6e0lCwDhzZ4Fe30Zin/b0UuOdtrVDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e4febb6aab5f83df4c19b4f1f6b8687f11886169e996e16baa75aac2d8a09c68', 4, 2, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 8589934593, 100, 1, '2019-06-03 16:35:14.660364', '2019-06-03 16:35:14.660365', 17179877376, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAEqBfIAAAAAAAAAAAEQithJAAAAQEghWBLDjmNLzcPF6o8dqUHMsI0WhttWE/ABSKaHNc+0FqsF+ui5+eky4ERyu99YR6BEHF4NlgyLnSq3zcDr9Qo=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAASoF8gB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{SCFYEsOOY0vNw8Xqjx2pQcywjRaG21YT8AFIpoc1z7QWqwX66Ln56TLgRHK731hHoEQcXg2WDIudKrfNwOv1Cg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:35:14.672439', '2019-06-03 16:35:14.67244', 12884905984, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{H2SYpbare/tB/Lw8x6QRvVNMjmLGqQjQGiBes63uA7XrZBuSHZ1JNR94Oi9zQ8Bp+ENfG2FUCWwu6OUGbKMEBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('00ab9cfce2b4c4141d8bb6768dd094bdbb1c7406710dbb3ba0ef98870f63a344', 3, 2, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934593, 100, 1, '2019-06-03 16:35:14.67266', '2019-06-03 16:35:14.67266', 12884910080, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAEnciLVAAAAQLVbII+1LeizxgncDI46KHyBt05+H92n1+R328J9zNl2fgJW2nfn3FIoLVs2qV1+CUpr121a2B7AM6HKr4nBLAI=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{tVsgj7Ut6LPGCdwMjjoofIG3Tn4f3afX5Hfbwn3M2XZ+Albad+fcUigtWzapXX4JSmvXbVrYHsAzocqvicEsAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('5ef9c06bb625d2da2281118bdf14d808768353ee2fca7457c8e506fbeb91fc55', 3, 4, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934594, 100, 1, '2019-06-03 16:35:14.672941', '2019-06-03 16:35:14.672942', 12884918272, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQOG2GKO9i60cM1QK2UN1gXrHEYjeGLRXFT5snCqO5FnPET5cVs30N7ITPZ6HH6QcZ2IdC1c66wLge4GyR8vpww0=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{4bYYo72LrRwzVArZQ3WBescRiN4YtFcVPmycKo7kWc8RPlxWzfQ3shM9nocfpBxnYh0LVzrrAuB7gbJHy+nDDQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:35:14.686222', '2019-06-03 16:35:14.686223', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTRwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{g86r5EAUKDQCYnz0Vw6C4b7cnE95RTwkOdYJHbBR2gTVsNOUv1YVtF4JK9AgTxODWhVdipnLN2cC5om+E0azCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('164a5064eba64f2cdbadb856bf3448485fc626247ada3ed39cddf0f6902133b6', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:35:14.686364', '2019-06-03 16:35:14.686364', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEASEZiZbeFwCsrKBnKIus/05VtJDBrgosuhLQ/U6XUj4twWyhs7UtS4CMexOM6JqcfqJK10WlBkkwn4g8PIfjIG', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTRwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmpwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{EhGYmW3hcArKygZyiLrP9OVbSQwa4KLLoS0P1Ol1I+LcFsobO1LUuAjHsTjOianH6iStdFpQZJMJ+IPDyH4yBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2d365c3c49f376570df856bca62503966f0e269a2f51cdb68ce2ee19a7f8245a', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-06-03 16:35:14.686469', '2019-06-03 16:35:14.686469', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEB/JBgvIM71gLBIh0TON9b+l+ApZz1CKDQiUFSV0scRguB1anyMwMR6s5SiaCwtDnxsPna12RdUQKlH2aeMAy8H', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmpwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6BwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{fyQYLyDO9YCwSIdEzjfW/pfgKWc9Qig0IlBUldLHEYLgdWp8jMDEerOUomgsLQ58bD52tdkXVECpR9mnjAMvBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('003e91101d19aabb429491953806886d777c260233c6478f1c928a79ec4e2743', 2, 4, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 4, 100, 1, '2019-06-03 16:35:14.686579', '2019-06-03 16:35:14.686579', 8589950976, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDIOudzujfo+dSIJXXb06SjLBLLXsFxnVnR1HJejfq2NgFUtLuX2KrVNSZyRBG+WvfdoXwCPcp85hDRbCmjbPYM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6BwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NZwAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{yDrnc7o36PnUiCV129OkoywSy17BcZ1Z0dRyXo36tjYBVLS7l9iq1TUmckQRvlr33aF8Aj3KfOYQ0Wwpo2z2DA==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + +-- added manually +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +DROP TABLE IF EXISTS public.key_value_store; +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); +INSERT INTO key_value_store VALUES ('exp_ingest_last_ledger', '0'); +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + +CREATE TABLE accounts_signers ( + account character varying(64), + signer character varying(64), + weight integer NOT NULL, + -- we will query by signer so that is why signer is the first item in the composite key + PRIMARY KEY (signer, account) +); + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_1-core.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_1-core.sql new file mode 100644 index 0000000000..d214fbbfbe --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_1-core.sql @@ -0,0 +1,751 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999997999999800, 2, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 299999100, 8589934601, 0, NULL, '', 'AQAAAA==', 0, 7, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1699999800, 8589934594, 0, NULL, '', 'AQAAAA==', 0, 7, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('b296f06555c13ef529da4922b5f8092f7b39ec8364ce93ac8a00b7ac131ca8a3', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'c30541f99853c61f7df40b281682323d4e7b3620b3a636c4356d0e2cab3a2813', 2, 1559579801, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZRZSEryEgGUi8Runb12uW8dKmZaWm1LdHQ2+tzinFtaAAAAAAXPVMmQAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA+9VGePGCiconnKwGWQgSrBOnhFsIJOmYJO/8HociG0TDBUH5mFPGH330CygWgjI9Tns2ILOmNsQ1bQ4sqzooEwAAAAIN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e17d4c547e8c25da958839a24d65fad4245c061b18053c0b3355e480abbaa181', 'b296f06555c13ef529da4922b5f8092f7b39ec8364ce93ac8a00b7ac131ca8a3', 'c984a97f1dd3a31da48a0e5dfbd2e7273f813780f213e2acefc1b74f0528c427', 3, 1559579802, 'AAAAC7KW8GVVwT71KdpJIrX4CS97OeyDZM6TrIoAt6wTHKijYjXpTzYAT3UVd5+80mub7DyftrMo9PFH/aZNNRh0SvMAAAAAXPVMmgAAAAAAAAAAzpWxfUdkwpI+mahMUGVw0hP2fK7oFHG9i01PTevvzr/JhKl/HdOjHaSKDl370ucnP4E3gPIT4qzvwbdPBSjEJwAAAAMN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('161c1bd6b25ae1a13742a87c6cb92c865a36c388fb88cd7998b861aeba6696d9', 'e17d4c547e8c25da958839a24d65fad4245c061b18053c0b3355e480abbaa181', 'e760ac1cb31b654a18b0ac902352d55b7ce3d8f78e7817404e56e87b6b902b8d', 4, 1559579803, 'AAAAC+F9TFR+jCXalYg5ok1l+tQkXAYbGAU8CzNV5ICruqGBv7gH+MirXcGlvfrSyn6jsW5bxOF4aGsLXUWY6b0GVDcAAAAAXPVMmwAAAAAAAAAA9Nf75AfzKHcfz2nf3wc7i1MpbTisZhsly7MED4o1H3nnYKwcsxtlShiwrJAjUtVbfOPY9454F0BOVuh7a5ArjQAAAAQN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('8c22cb8c9c0b37cf98f7fe67fc3a860765c3e68cc0e3ca734a2245d666a24daf', '161c1bd6b25ae1a13742a87c6cb92c865a36c388fb88cd7998b861aeba6696d9', '396e47d798b0c0b556f02665c378a061723bd0095c06764aac20d1faad1b841b', 5, 1559579804, 'AAAACxYcG9ayWuGhN0KofGy5LIZaNsOI+4jNeZi4Ya66ZpbZv9yVLXLZ14BAjbXRKWKqMHgP4d/yiTLYGoLhZYpcsGQAAAAAXPVMnAAAAAAAAAAAK7ZFHpUv5xgPNV5PVN8eW9yyrJcBUZDK2CSKHdvPgtg5bkfXmLDAtVbwJmXDeKBhcjvQCVwGdkqsINH6rRuEGwAAAAUN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('12f18c34bb467618af7d20936de6ca4a3d1caa881c36ac9a7b89ce713454ed42', '8c22cb8c9c0b37cf98f7fe67fc3a860765c3e68cc0e3ca734a2245d666a24daf', '49cb9b70c8688dcbefe7031d6bec577ef46c31d5f58e48c9067d7c960adc50aa', 6, 1559579805, 'AAAAC4wiy4ycCzfPmPf+Z/w6hgdlw+aMwOPKc0oiRdZmok2vimN7pygrRl39wo4rGR3ojemz1j9vGRpGXgvKvFhV0TIAAAAAXPVMnQAAAAAAAAAA+GFZHqUIknS3p5sZb4czc8Kj0abGSMVfNaq+9zzlt2RJy5twyGiNy+/nAx1r7Fd+9Gwx1fWOSMkGfXyWCtxQqgAAAAYN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('16f6fdb5c204980e34ec5334c409b452b5906550d6e2bc5e65b780335e128f25', '12f18c34bb467618af7d20936de6ca4a3d1caa881c36ac9a7b89ce713454ed42', 'd2a91324b1983fd6cfd3376313d0f4c02ef6707b486b2ffbcef6acbc0d723284', 7, 1559579806, 'AAAACxLxjDS7RnYYr30gk23myko9HKqIHDasmnuJznE0VO1CJAEap9zN9ulrCqs7yeKqKBciTLmHONm+AASqGkGhtEUAAAAAXPVMngAAAAAAAAAAL150/k9Xfs0TJQCfIu9DCsOEki1Kn+hL2IFyyLVMdXLSqRMksZg/1s/TN2MT0PTALvZwe0hrL/vO9qy8DXIyhAAAAAcN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 2, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAIAAAACAAAAAQAAAEhFlISvISAZSLxG6dvXa5bx0qZlpabUt0dDb63OKcW1oAAAAABc9UyZAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAEDkXOMnwqSRmmDXziCAZon8RpRmIUR0QgPuA3tYIahX3QuHVU9jsiUIerX2PY+DKfawhAdVPA578Yv6/nR5NCw=='); +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 3, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAMAAAACAAAAAQAAADBiNelPNgBPdRV3n7zSa5vsPJ+2syj08Uf9pk01GHRK8wAAAABc9UyaAAAAAAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAqVdItJ8hFQ6ujhlHi1Vs0aaJW0AEneLThwWvtqED+ZSm8OaACbgRztBr7S6hWaTtRzCxDtnHiZzILLx4TAsFAQ=='); +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 4, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAQAAAACAAAAAQAAADC/uAf4yKtdwaW9+tLKfqOxblvE4XhoawtdRZjpvQZUNwAAAABc9UybAAAAAAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAJjwxb37uF7Gl7dwtbbKcFzEB5SAUEInNgxwZv1Jcs9aT+fU3wlXuxR3oEcvJ9LkJgJWww/78GNloVWi7uJ7QBw=='); +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 5, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAUAAAACAAAAAQAAADC/3JUtctnXgECNtdEpYqoweA/h3/KJMtgaguFlilywZAAAAABc9UycAAAAAAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAqhj04lp6kTXqSooppKfpo5W/a7W7N7k0UBtS7j1f2X0Qi7+yqIiPTCrvnYMmt2eaN9bZdJUXmQ1W1/laI7YwCg=='); +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 6, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAYAAAACAAAAAQAAADCKY3unKCtGXf3CjisZHeiN6bPWP28ZGkZeC8q8WFXRMgAAAABc9UydAAAAAAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAcjEAuoc4PaBUK+n7A+5qX2vHuHtCta7ZtRkx/nCgDj7PWLvrhZhODiczedfcsQgjPM7i1EJHWH4AmJTUzYIzAA=='); +INSERT INTO scphistory VALUES ('GCIGR6QE7NDE3QMWAWTXXVRVQGMGJVWQHZMK25TSWPENL7TUDG7BTZYI', 7, 'AAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAAAAAAcAAAACAAAAAQAAADAkARqn3M326WsKqzvJ4qooFyJMuYc42b4ABKoaQaG0RQAAAABc9UyeAAAAAAAAAAAAAAABbDeOXFi61B2Pe6szbBTmAzwFbD9egCqUxdKtRhXAwaMAAABAZeYSW0x30Lx+uF+Kd7jVEs05MJtFSg/NLUhAzHMg1fryrp6fsbuvge1E5c9eXy6HlPRGvJp6uqzbnAOdNIjNCw=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('6c378e5c58bad41d8f7bab336c14e6033c056c3f5e802a94c5d2ad4615c0c1a3', 7, 'AAAAAQAAAAEAAAAAkGj6BPtGTcGWBad71jWBmGTW0D5YrXZys8jV/nQZvhkAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAACQaPoE+0ZNwZYFp3vWNYGYZNbQPlitdnKzyNX+dBm+GQAAAAAAAAAHAAAAA2w3jlxYutQdj3urM2wU5gM8BWw/XoAqlMXSrUYVwMGjAAAAAQAAAJgkARqn3M326WsKqzvJ4qooFyJMuYc42b4ABKoaQaG0RQAAAABc9UyeAAAAAAAAAAEAAAAAkGj6BPtGTcGWBad71jWBmGTW0D5YrXZys8jV/nQZvhkAAABAcnBdObLArAwkdweE+4VOuE+WDmB8as9cjwGRuBNgizAxCTGveIIywRDdHZEj/rNu6ueuxbvB65we1CfnLyvHDAAAAAEAAACYJAEap9zN9ulrCqs7yeKqKBciTLmHONm+AASqGkGhtEUAAAAAXPVMngAAAAAAAAABAAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAQHJwXTmywKwMJHcHhPuFTrhPlg5gfGrPXI8BkbgTYIswMQkxr3iCMsEQ3R2RI/6zburnrsW7weucHtQn5y8rxwwAAABA00W14CePy72nEFXR+wui81syIgNeqbm7oDUyuWRB4ajAeJgO5egVxWccnL5YMOkTH/v6NAIYPAHin2WlxeUPAgAAAACQaPoE+0ZNwZYFp3vWNYGYZNbQPlitdnKzyNX+dBm+GQAAAAAAAAAHAAAAAgAAAAEAAAAwJAEap9zN9ulrCqs7yeKqKBciTLmHONm+AASqGkGhtEUAAAAAXPVMngAAAAAAAAAAAAAAAWw3jlxYutQdj3urM2wU5gM8BWw/XoAqlMXSrUYVwMGjAAAAQGXmEltMd9C8frhfine41RLNOTCbRUoPzS1IQMxzINX68q6en7G7r4HtROXPXl8uh5T0Rryaerqs25wDnTSIzQsAAAABEvGMNLtGdhivfSCTbebKSj0cqogcNqyae4nOcTRU7UIAAAADAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAzc3x+vst1GnJX/uxRYwVtT877yIiHEiZQyAgYG+P+4pnqEM6h/+9NNgotuSXCVb8dfbGanBDQVE/qrmUInYiBwAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAGQAAAACAAAACAAAAAAAAAAAAAAAAQAAAAAAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAAAAAAAF9eEAAAAAAAAAAAFvFIhaAAAAQDUzPCUl126yuAQb8v+svEOlMcO7uR3leGX42n4h513XqXUy7F9bzm0IBwysBUkNW1bQzkag64SfZY0XuB4IkQUAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAABkAAAAAgAAAAcAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAAAAAAABfXhAAAAAAAAAAABbxSIWgAAAEDlV0RdOHuQ6WeUCZnKRvjrZ0Uvr5RF+/uSfbbZYPu6hcG3jOnQlNI+0HcMbKGN8C1fbmsUlxzth1c26FBtI0MDAAAAAQAAAAEAAAABAAAAAJBo+gT7Rk3BlgWne9Y1gZhk1tA+WK12crPI1f50Gb4ZAAAAAA=='); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', '16f6fdb5c204980e34ec5334c409b452b5906550d6e2bc5e65b780335e128f25'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 7, + "currentBuckets": [ + { + "curr": "29afea1a66891d920a8d72a566a6034974feba5c27458ce64c20903385e97832", + "next": { + "state": 0 + }, + "snap": "fe332d63b873c6c32053466b300b22d0806cfbdb73600c7b54d76e959100be16" + }, + { + "curr": "03da1c699b4ebbe1f50afee8ef8be8ce7ec95a7dd93f4f066a44656a3c5faa67", + "next": { + "state": 1, + "output": "fe332d63b873c6c32053466b300b22d0806cfbdb73600c7b54d76e959100be16" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1ed82961fb013d39f96aa7e428c4174caa4a5a43dbc65713a37d46d96ee5c314', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('aeb26130d1715c5e9b0c85de46d454200cec26513e8ce06ab05628069bea0793', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d178e262b8b2aac66a75f69e70ce3cb7fdf01a6060433636c7b4a3a178236429', 4, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKsAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f04617a26c4212f15a73578f59ea9913500fcb4818f828c17190f1454a04186c', 5, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b31dce4295a5ef0e8f19fb83816ed880d573e5a2669f85be5cc602a90f9250c5', 5, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b0db02f48666502f7bb227049e48f7805380f07aee6e704b6a12b25d48c7b591', 5, 3, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('4d1e89bd9835caded110ada3ace56bd1a919b8e53e3bb458aeb51330068269c6', 5, 4, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f60bf7761d5459f1087262ca47486c901808d239486096c92541efa445cc4fe9', 6, 1, 'AAAAAgAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck2cAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('9e56352113669f286d4950e55d0d9706fd41a0b3a0b74d5e51268bd0298e7e47', 6, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0QMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('720754335df899ed59dac9b35c89a1de60a7054c006db48ce12dc5081a6bbc5f', 7, 1, 'AAAAAgAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0NEAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('dfb03490f1faae8720598126bd770af8e7f081ac8e0683cea55a8aa35a6ba60a', 7, 2, 'AAAAAgAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0NEAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0LgAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c9579846d3165e8f4271222caa4ce10d4ba1244011a8204dc68b810f055840fa', 7, 3, 'AAAAAgAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0LgAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0J8AAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'I3Tpk0m57326ml2zM5t4/ajzR3exrzO6RorVwN+UbU0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTU4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('1ed82961fb013d39f96aa7e428c4174caa4a5a43dbc65713a37d46d96ee5c314', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECuHh3Q7Zpbulw+xzp7NeMPdfYErNzJOrvQi8GOkN7WgfwSPgzHcPE/E/s8CL/AQrjBtw067aUZAvoaVf12oCQB', 'HtgpYfsBPTn5aqfkKMQXTKpKWkPbxlcTo31G2W7lwxQAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTU4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLms4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('aeb26130d1715c5e9b0c85de46d454200cec26513e8ce06ab05628069bea0793', 3, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAJ1AToo3dEH9+7//OjpIHtWCDsL/0MUQlbjUSQC2+I3TVEl9chqrpqx5GG6yjN8INl3IZ7/HSA0EfRB2xZ9VMCg==', 'rrJhMNFxXF6bDIXeRtRUIAzsJlE+jOBqsFYoBpvqB5MAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKsAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOicAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d178e262b8b2aac66a75f69e70ce3cb7fdf01a6060433636c7b4a3a178236429', 4, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAX14QAAAAAAAAAAAa7kvkwAAABA9qncr+3eaHaYqpDspvoIbiENnY3te9dqrCYtGbiT13CWh/b+cm+CUe9//x0NDxiU/ptY0QlY/z54IF7jF0H7CQ==', '0XjiYriyqsZqdfaecM48t/3wGmBgQzY2x7SjoXgjZCkAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAQZCqnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAQZCqnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOicAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('f04617a26c4212f15a73578f59ea9913500fcb4818f828c17190f1454a04186c', 5, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAG5HCDK2pf/77Ppffgv5hal7Q0yyfubULLN9szm3nJYL9YT60pLsuIC4YSwxAyVvsUHyQ3iJ48EQ+3VS/uIiiAg==', '8EYXomxCEvFac1ePWeqZE1APy0gY+CjBcZDxRUoEGGwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOcMAAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b31dce4295a5ef0e8f19fb83816ed880d573e5a2669f85be5cc602a90f9250c5', 5, 2, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAgzQF38kzgeHKf4Y3rZKYXturoU3n2LXyyuISFdK6/D5seTrjOXHU+m4kiIVeWUNtHx7ep3MSD1wIXuKjT0ReAA==', 'sx3OQpWl7w6PGfuDgW7YgNVz5aJmn4W+XMYCqQ+SUMUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAANaTnDAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAANaTnDAAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABHhoucAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOcMAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAvrwYMAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b0db02f48666502f7bb227049e48f7805380f07aee6e704b6a12b25d48c7b591', 5, 3, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABABqMU3WJT6ur297rvjulCqylVeC1bNKyQbClqyad+ou+x8u7GtYDf6o+aP/sLKitYYGnlDUvpTgdIyuMqSncYAw==', 'sNsC9IZmUC97sicEnkj3gFOA8HrubnBLahKyXUjHtZEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAL68GDAAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAL68GDAAAAAIAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABHhoucAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGycAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAvrwYMAAAAAgAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSUMAAAAAgAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('4d1e89bd9835caded110ada3ace56bd1a919b8e53e3bb458aeb51330068269c6', 5, 4, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAKDnpoJwX4M7e52BdtZadIq1SC7dJxAjJDiXzMAK6ysLY2VKGVvXWs/RWmZYiXIkDO0ECyKfIov+1y4stQypZDw==', 'TR6JvZg1yt7REK2jrOVr0akZuOU+O7RYrrUTMAaCacYAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbklDAAAAAIAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbklDAAAAAIAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGycAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck2cAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSUMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0QMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('f60bf7761d5459f1087262ca47486c901808d239486096c92541efa445cc4fe9', 6, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAX14QAAAAAAAAAAAa7kvkwAAABA+lv7NIE3yrIXlVPXxn1pYF38xMsqkaa42kprQQwAQlAdG8ICI4t+ZLX4pel6cAZFGYx73fZyXKBHruV0RvNGCA==', '9gv3dh1UWfEIcmLKR0hskBgI0jlIYJbJJUHvpEXMT+kAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAU3JNOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAU3JNOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSSoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGw4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('9e56352113669f286d4950e55d0d9706fd41a0b3a0b74d5e51268bd0298e7e47', 6, 2, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAFeeBKecfo5v061dy3QfbF8zgO6gEUR8ildkKig42N0Yl4Z437Kpj4M0LWfDibhvKd6+voXM5rEFLVMpYFr/5AA==', 'nlY1IRNmnyhtSVDlXQ2XBv1BoLOgt01eUSaL0CmOfkcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbkkqAAAAAIAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbkkqAAAAAIAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGw4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSSoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('720754335df899ed59dac9b35c89a1de60a7054c006db48ce12dc5081a6bbc5f', 7, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABA5VdEXTh7kOlnlAmZykb462dFL6+URfv7kn222WD7uoXBt4zp0JTSPtB3DGyhjfAtX25rFJcc7YdXNuhQbSNDAw==', 'cgdUM134me1Z2smzXImh3mCnBUwAbbSM4S3FCBprvF8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAI8NCfAAAAAIAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAI8NCfAAAAAIAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABZaC44AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0J8AAAAAgAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAdzWF8AAAAAgAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('dfb03490f1faae8720598126bd770af8e7f081ac8e0683cea55a8aa35a6ba60a', 7, 2, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABANTM8JSXXbrK4BBvy/6y8Q6Uxw7u5HeV4ZfjafiHnXdepdTLsX1vObQgHDKwFSQ1bVtDORqDrhJ9ljRe4HgiRBQ==', '37A0kPH6rocgWYEmvXcK+OfwgayOBoPOpVqKo1prpgoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAHc1hfAAAAAIAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAHc1hfAAAAAIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABZaC44AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABfXg84AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAdzWF8AAAAAgAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAX14B8AAAAAgAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c9579846d3165e8f4271222caa4ce10d4ba1244011a8204dc68b810f055840fa', 7, 3, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAzc3x+vst1GnJX/uxRYwVtT877yIiHEiZQyAgYG+P+4pnqEM6h/+9NNgotuSXCVb8dfbGanBDQVE/qrmUInYiBw==', 'yVeYRtMWXo9CcSIsqkzhDUuhJEARqCBNxouBDwVYQPoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAF9eAfAAAAAIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAF9eAfAAAAAIAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABfXg84AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABlU/A4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAX14B8AAAAAgAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAR4Z98AAAAAgAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_1-horizon.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_1-horizon.sql new file mode 100644 index 0000000000..6760d00d0b --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_1-horizon.sql @@ -0,0 +1,1041 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer, + new_max_fee integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_accounts VALUES (3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 3, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, false); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 30064775169, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 30064775169, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 30064779265, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 30064779265, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 30064783361, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 30064783361, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 25769807873, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 25769811969, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 25769811969, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 21474840577, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 21474840577, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 21474844673, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 21474844673, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 21474848769, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 21474848769, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 21474852865, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 21474852865, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 17179873281, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 17179873281, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589938689, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589938689, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589938689, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); +INSERT INTO history_effects VALUES (2, 8589942785, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (3, 8589942785, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589942785, 3, 10, '{"weight": 1, "public_key": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (7, '16f6fdb5c204980e34ec5334c409b452b5906550d6e2bc5e65b780335e128f25', '12f18c34bb467618af7d20936de6ca4a3d1caa881c36ac9a7b89ce713454ed42', 3, 3, '2019-06-03 16:36:46', '2019-06-03 16:36:44.314788', '2019-06-03 16:36:44.314789', 30064771072, 16, 1000000000000000000, 1300, 100, 100000000, 1000000, 11, 'AAAACxLxjDS7RnYYr30gk23myko9HKqIHDasmnuJznE0VO1CJAEap9zN9ulrCqs7yeKqKBciTLmHONm+AASqGkGhtEUAAAAAXPVMngAAAAAAAAAAL150/k9Xfs0TJQCfIu9DCsOEki1Kn+hL2IFyyLVMdXLSqRMksZg/1s/TN2MT0PTALvZwe0hrL/vO9qy8DXIyhAAAAAcN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0); +INSERT INTO history_ledgers VALUES (6, '12f18c34bb467618af7d20936de6ca4a3d1caa881c36ac9a7b89ce713454ed42', '8c22cb8c9c0b37cf98f7fe67fc3a860765c3e68cc0e3ca734a2245d666a24daf', 2, 2, '2019-06-03 16:36:45', '2019-06-03 16:36:44.343267', '2019-06-03 16:36:44.343267', 25769803776, 16, 1000000000000000000, 1000, 100, 100000000, 1000000, 11, 'AAAAC4wiy4ycCzfPmPf+Z/w6hgdlw+aMwOPKc0oiRdZmok2vimN7pygrRl39wo4rGR3ojemz1j9vGRpGXgvKvFhV0TIAAAAAXPVMnQAAAAAAAAAA+GFZHqUIknS3p5sZb4czc8Kj0abGSMVfNaq+9zzlt2RJy5twyGiNy+/nAx1r7Fd+9Gwx1fWOSMkGfXyWCtxQqgAAAAYN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (5, '8c22cb8c9c0b37cf98f7fe67fc3a860765c3e68cc0e3ca734a2245d666a24daf', '161c1bd6b25ae1a13742a87c6cb92c865a36c388fb88cd7998b861aeba6696d9', 4, 4, '2019-06-03 16:36:44', '2019-06-03 16:36:44.357815', '2019-06-03 16:36:44.357815', 21474836480, 16, 1000000000000000000, 800, 100, 100000000, 1000000, 11, 'AAAACxYcG9ayWuGhN0KofGy5LIZaNsOI+4jNeZi4Ya66ZpbZv9yVLXLZ14BAjbXRKWKqMHgP4d/yiTLYGoLhZYpcsGQAAAAAXPVMnAAAAAAAAAAAK7ZFHpUv5xgPNV5PVN8eW9yyrJcBUZDK2CSKHdvPgtg5bkfXmLDAtVbwJmXDeKBhcjvQCVwGdkqsINH6rRuEGwAAAAUN4Lazp2QAAAAAAAAAAAMgAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0); +INSERT INTO history_ledgers VALUES (4, '161c1bd6b25ae1a13742a87c6cb92c865a36c388fb88cd7998b861aeba6696d9', 'e17d4c547e8c25da958839a24d65fad4245c061b18053c0b3355e480abbaa181', 1, 1, '2019-06-03 16:36:43', '2019-06-03 16:36:44.378676', '2019-06-03 16:36:44.378676', 17179869184, 16, 1000000000000000000, 400, 100, 100000000, 1000000, 11, 'AAAAC+F9TFR+jCXalYg5ok1l+tQkXAYbGAU8CzNV5ICruqGBv7gH+MirXcGlvfrSyn6jsW5bxOF4aGsLXUWY6b0GVDcAAAAAXPVMmwAAAAAAAAAA9Nf75AfzKHcfz2nf3wc7i1MpbTisZhsly7MED4o1H3nnYKwcsxtlShiwrJAjUtVbfOPY9454F0BOVuh7a5ArjQAAAAQN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (3, 'e17d4c547e8c25da958839a24d65fad4245c061b18053c0b3355e480abbaa181', 'b296f06555c13ef529da4922b5f8092f7b39ec8364ce93ac8a00b7ac131ca8a3', 1, 1, '2019-06-03 16:36:42', '2019-06-03 16:36:44.395408', '2019-06-03 16:36:44.395408', 12884901888, 16, 1000000000000000000, 300, 100, 100000000, 1000000, 11, 'AAAAC7KW8GVVwT71KdpJIrX4CS97OeyDZM6TrIoAt6wTHKijYjXpTzYAT3UVd5+80mub7DyftrMo9PFH/aZNNRh0SvMAAAAAXPVMmgAAAAAAAAAAzpWxfUdkwpI+mahMUGVw0hP2fK7oFHG9i01PTevvzr/JhKl/HdOjHaSKDl370ucnP4E3gPIT4qzvwbdPBSjEJwAAAAMN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (2, 'b296f06555c13ef529da4922b5f8092f7b39ec8364ce93ac8a00b7ac131ca8a3', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 2, 2, '2019-06-03 16:36:41', '2019-06-03 16:36:44.405856', '2019-06-03 16:36:44.405856', 8589934592, 16, 1000000000000000000, 200, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZRZSEryEgGUi8Runb12uW8dKmZaWm1LdHQ2+tzinFtaAAAAAAXPVMmQAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA+9VGePGCiconnKwGWQgSrBOnhFsIJOmYJO/8HociG0TDBUH5mFPGH330CygWgjI9Tns2ILOmNsQ1bQ4sqzooEwAAAAIN4Lazp2QAAAAAAAAAAADIAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:36:44.422671', '2019-06-03 16:36:44.422672', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 30064775169, 2); +INSERT INTO history_operation_participants VALUES (2, 30064775169, 1); +INSERT INTO history_operation_participants VALUES (3, 30064779265, 1); +INSERT INTO history_operation_participants VALUES (4, 30064779265, 2); +INSERT INTO history_operation_participants VALUES (5, 30064783361, 2); +INSERT INTO history_operation_participants VALUES (6, 30064783361, 1); +INSERT INTO history_operation_participants VALUES (7, 25769807873, 1); +INSERT INTO history_operation_participants VALUES (8, 25769807873, 2); +INSERT INTO history_operation_participants VALUES (9, 25769811969, 2); +INSERT INTO history_operation_participants VALUES (10, 25769811969, 1); +INSERT INTO history_operation_participants VALUES (11, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (12, 21474840577, 2); +INSERT INTO history_operation_participants VALUES (13, 21474844673, 2); +INSERT INTO history_operation_participants VALUES (14, 21474844673, 1); +INSERT INTO history_operation_participants VALUES (15, 21474848769, 2); +INSERT INTO history_operation_participants VALUES (16, 21474848769, 1); +INSERT INTO history_operation_participants VALUES (17, 21474852865, 2); +INSERT INTO history_operation_participants VALUES (18, 21474852865, 1); +INSERT INTO history_operation_participants VALUES (19, 17179873281, 1); +INSERT INTO history_operation_participants VALUES (20, 17179873281, 2); +INSERT INTO history_operation_participants VALUES (21, 12884905985, 2); +INSERT INTO history_operation_participants VALUES (22, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (23, 8589938689, 3); +INSERT INTO history_operation_participants VALUES (24, 8589938689, 1); +INSERT INTO history_operation_participants VALUES (25, 8589942785, 3); +INSERT INTO history_operation_participants VALUES (26, 8589942785, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 26, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (30064775169, 30064775168, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (30064779265, 30064779264, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (30064783361, 30064783360, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "10.0000000", "asset_type": "native"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (25769811969, 25769811968, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474844673, 21474844672, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474848769, 21474848768, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474852865, 21474852864, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "amount": "10.0000000", "asset_type": "native"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "amount": "10.0000000", "asset_type": "native"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 30064775168, 1); +INSERT INTO history_transaction_participants VALUES (2, 30064775168, 2); +INSERT INTO history_transaction_participants VALUES (3, 30064779264, 2); +INSERT INTO history_transaction_participants VALUES (4, 30064779264, 1); +INSERT INTO history_transaction_participants VALUES (5, 30064783360, 2); +INSERT INTO history_transaction_participants VALUES (6, 30064783360, 1); +INSERT INTO history_transaction_participants VALUES (7, 25769807872, 2); +INSERT INTO history_transaction_participants VALUES (8, 25769807872, 1); +INSERT INTO history_transaction_participants VALUES (9, 25769811968, 1); +INSERT INTO history_transaction_participants VALUES (10, 25769811968, 2); +INSERT INTO history_transaction_participants VALUES (11, 21474840576, 2); +INSERT INTO history_transaction_participants VALUES (12, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (13, 21474844672, 2); +INSERT INTO history_transaction_participants VALUES (14, 21474844672, 1); +INSERT INTO history_transaction_participants VALUES (15, 21474848768, 2); +INSERT INTO history_transaction_participants VALUES (16, 21474848768, 1); +INSERT INTO history_transaction_participants VALUES (17, 21474852864, 2); +INSERT INTO history_transaction_participants VALUES (18, 21474852864, 1); +INSERT INTO history_transaction_participants VALUES (19, 17179873280, 1); +INSERT INTO history_transaction_participants VALUES (20, 17179873280, 2); +INSERT INTO history_transaction_participants VALUES (21, 12884905984, 2); +INSERT INTO history_transaction_participants VALUES (22, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (23, 8589938688, 3); +INSERT INTO history_transaction_participants VALUES (24, 8589938688, 1); +INSERT INTO history_transaction_participants VALUES (25, 8589942784, 3); +INSERT INTO history_transaction_participants VALUES (26, 8589942784, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 26, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('720754335df899ed59dac9b35c89a1de60a7054c006db48ce12dc5081a6bbc5f', 7, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934599, 100, 1, '2019-06-03 16:36:44.314976', '2019-06-03 16:36:44.314976', 30064775168, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABA5VdEXTh7kOlnlAmZykb462dFL6+URfv7kn222WD7uoXBt4zp0JTSPtB3DGyhjfAtX25rFJcc7YdXNuhQbSNDAw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAI8NCfAAAAAIAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAI8NCfAAAAAIAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABZaC44AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0J8AAAAAgAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAdzWF8AAAAAgAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0NEAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{5VdEXTh7kOlnlAmZykb462dFL6+URfv7kn222WD7uoXBt4zp0JTSPtB3DGyhjfAtX25rFJcc7YdXNuhQbSNDAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('dfb03490f1faae8720598126bd770af8e7f081ac8e0683cea55a8aa35a6ba60a', 7, 2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934600, 100, 1, '2019-06-03 16:36:44.315227', '2019-06-03 16:36:44.315227', 30064779264, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABANTM8JSXXbrK4BBvy/6y8Q6Uxw7u5HeV4ZfjafiHnXdepdTLsX1vObQgHDKwFSQ1bVtDORqDrhJ9ljRe4HgiRBQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAHc1hfAAAAAIAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAHc1hfAAAAAIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABZaC44AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABfXg84AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAdzWF8AAAAAgAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAX14B8AAAAAgAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0NEAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0LgAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{NTM8JSXXbrK4BBvy/6y8Q6Uxw7u5HeV4ZfjafiHnXdepdTLsX1vObQgHDKwFSQ1bVtDORqDrhJ9ljRe4HgiRBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c9579846d3165e8f4271222caa4ce10d4ba1244011a8204dc68b810f055840fa', 7, 3, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934601, 100, 1, '2019-06-03 16:36:44.315378', '2019-06-03 16:36:44.315378', 30064783360, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAJAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAzc3x+vst1GnJX/uxRYwVtT877yIiHEiZQyAgYG+P+4pnqEM6h/+9NNgotuSXCVb8dfbGanBDQVE/qrmUInYiBw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAF9eAfAAAAAIAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAF9eAfAAAAAIAAAAJAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABfXg84AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABlU/A4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAX14B8AAAAAgAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAR4Z98AAAAAgAAAAkAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0LgAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0J8AAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{zc3x+vst1GnJX/uxRYwVtT877yIiHEiZQyAgYG+P+4pnqEM6h/+9NNgotuSXCVb8dfbGanBDQVE/qrmUInYiBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f60bf7761d5459f1087262ca47486c901808d239486096c92541efa445cc4fe9', 6, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-06-03 16:36:44.343487', '2019-06-03 16:36:44.343488', 25769807872, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAX14QAAAAAAAAAAAa7kvkwAAABA+lv7NIE3yrIXlVPXxn1pYF38xMsqkaa42kprQQwAQlAdG8ICI4t+ZLX4pel6cAZFGYx73fZyXKBHruV0RvNGCA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAU3JNOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAU3JNOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSSoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGw4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck2cAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{+lv7NIE3yrIXlVPXxn1pYF38xMsqkaa42kprQQwAQlAdG8ICI4t+ZLX4pel6cAZFGYx73fZyXKBHruV0RvNGCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('9e56352113669f286d4950e55d0d9706fd41a0b3a0b74d5e51268bd0298e7e47', 6, 2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934598, 100, 1, '2019-06-03 16:36:44.343733', '2019-06-03 16:36:44.343733', 25769811968, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAFeeBKecfo5v061dy3QfbF8zgO6gEUR8ildkKig42N0Yl4Z437Kpj4M0LWfDibhvKd6+voXM5rEFLVMpYFr/5AA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbkkqAAAAAIAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbkkqAAAAAIAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGw4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck04AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSSoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0QMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0OoAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{FeeBKecfo5v061dy3QfbF8zgO6gEUR8ildkKig42N0Yl4Z437Kpj4M0LWfDibhvKd6+voXM5rEFLVMpYFr/5AA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f04617a26c4212f15a73578f59ea9913500fcb4818f828c17190f1454a04186c', 5, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934594, 100, 1, '2019-06-03 16:36:44.358147', '2019-06-03 16:36:44.358147', 21474840576, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAG5HCDK2pf/77Ppffgv5hal7Q0yyfubULLN9szm3nJYL9YT60pLsuIC4YSwxAyVvsUHyQ3iJ48EQ+3VS/uIiiAg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOcMAAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{G5HCDK2pf/77Ppffgv5hal7Q0yyfubULLN9szm3nJYL9YT60pLsuIC4YSwxAyVvsUHyQ3iJ48EQ+3VS/uIiiAg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b31dce4295a5ef0e8f19fb83816ed880d573e5a2669f85be5cc602a90f9250c5', 5, 2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934595, 100, 1, '2019-06-03 16:36:44.3585', '2019-06-03 16:36:44.3585', 21474844672, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAgzQF38kzgeHKf4Y3rZKYXturoU3n2LXyyuISFdK6/D5seTrjOXHU+m4kiIVeWUNtHx7ep3MSD1wIXuKjT0ReAA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAANaTnDAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAANaTnDAAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABHhoucAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOcMAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAvrwYMAAAAAgAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{gzQF38kzgeHKf4Y3rZKYXturoU3n2LXyyuISFdK6/D5seTrjOXHU+m4kiIVeWUNtHx7ep3MSD1wIXuKjT0ReAA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b0db02f48666502f7bb227049e48f7805380f07aee6e704b6a12b25d48c7b591', 5, 3, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934596, 100, 1, '2019-06-03 16:36:44.359124', '2019-06-03 16:36:44.359124', 21474848768, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABABqMU3WJT6ur297rvjulCqylVeC1bNKyQbClqyad+ou+x8u7GtYDf6o+aP/sLKitYYGnlDUvpTgdIyuMqSncYAw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAL68GDAAAAAIAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAL68GDAAAAAIAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABHhoucAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGycAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAvrwYMAAAAAgAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSUMAAAAAgAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{BqMU3WJT6ur297rvjulCqylVeC1bNKyQbClqyad+ou+x8u7GtYDf6o+aP/sLKitYYGnlDUvpTgdIyuMqSncYAw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('4d1e89bd9835caded110ada3ace56bd1a919b8e53e3bb458aeb51330068269c6', 5, 4, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934597, 100, 1, '2019-06-03 16:36:44.359529', '2019-06-03 16:36:44.35953', 21474852864, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAKDnpoJwX4M7e52BdtZadIq1SC7dJxAjJDiXzMAK6ysLY2VKGVvXWs/RWmZYiXIkDO0ECyKfIov+1y4stQypZDw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbklDAAAAAIAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAKbklDAAAAAIAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABNfGycAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABTck2cAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAApuSUMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAjw0QMAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{KDnpoJwX4M7e52BdtZadIq1SC7dJxAjJDiXzMAK6ysLY2VKGVvXWs/RWmZYiXIkDO0ECyKfIov+1y4stQypZDw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d178e262b8b2aac66a75f69e70ce3cb7fdf01a6060433636c7b4a3a178236429', 4, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:36:44.378888', '2019-06-03 16:36:44.378888', 17179873280, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAAX14QAAAAAAAAAAAa7kvkwAAABA9qncr+3eaHaYqpDspvoIbiENnY3te9dqrCYtGbiT13CWh/b+cm+CUe9//x0NDxiU/ptY0QlY/z54IF7jF0H7CQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAQZCqnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAQZCqnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOicAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKsAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKqcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{9qncr+3eaHaYqpDspvoIbiENnY3te9dqrCYtGbiT13CWh/b+cm+CUe9//x0NDxiU/ptY0QlY/z54IF7jF0H7CQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('aeb26130d1715c5e9b0c85de46d454200cec26513e8ce06ab05628069bea0793', 3, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934593, 100, 1, '2019-06-03 16:36:44.395647', '2019-06-03 16:36:44.395647', 12884905984, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAAAAAAAX14QAAAAAAAAAAAW8UiFoAAABAJ1AToo3dEH9+7//OjpIHtWCDsL/0MUQlbjUSQC2+I3TVEl9chqrpqx5GG6yjN8INl3IZ7/HSA0EfRB2xZ9VMCg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAABBkKsAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA1pOicAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{J1AToo3dEH9+7//OjpIHtWCDsL/0MUQlbjUSQC2+I3TVEl9chqrpqx5GG6yjN8INl3IZ7/HSA0EfRB2xZ9VMCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:36:44.406035', '2019-06-03 16:36:44.406036', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECDzqvkQBQoNAJifPRXDoLhvtycT3lFPCQ51gkdsFHaBNWw05S/VhW0Xgkr0CBPE4NaFV2Kmcs3ZwLmib4TRrML', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTU4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{g86r5EAUKDQCYnz0Vw6C4b7cnE95RTwkOdYJHbBR2gTVsNOUv1YVtF4JK9AgTxODWhVdipnLN2cC5om+E0azCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('1ed82961fb013d39f96aa7e428c4174caa4a5a43dbc65713a37d46d96ee5c314', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:36:44.40629', '2019-06-03 16:36:44.40629', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAECuHh3Q7Zpbulw+xzp7NeMPdfYErNzJOrvQi8GOkN7WgfwSPgzHcPE/E/s8CL/AQrjBtw067aUZAvoaVf12oCQB', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTU4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLms4AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{rh4d0O2aW7pcPsc6ezXjD3X2BKzcyTq70IvBjpDe1oH8Ej4Mx3DxPxP7PAi/wEK4wbcNOu2lGQL6GlX9dqAkAQ==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_2-core.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_2-core.sql new file mode 100644 index 0000000000..7987381e33 --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_2-core.sql @@ -0,0 +1,723 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1000000000000000000, 0, 0, NULL, '', 'AQAAAA==', 0, 1, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('fddd6d06d74e0d5677eb020b0ff83c321a12e1ec8c447225dccec603b7f2a20b', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'eff8bb6dff2733ff1f3ffa5141f34ae7571ee3d8cae6dbd129bac511fa0bfd64', 2, 1559579827, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMswAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('3cdae71be1076874ec5a3955a3f27c5a8524ed514a76c76724254c9a9a16deb6', 'fddd6d06d74e0d5677eb020b0ff83c321a12e1ec8c447225dccec603b7f2a20b', 'eff8bb6dff2733ff1f3ffa5141f34ae7571ee3d8cae6dbd129bac511fa0bfd64', 3, 1559579828, 'AAAAC/3dbQbXTg1Wd+sCCw/4PDIaEuHsjERyJdzOxgO38qILONQ6wuACH6xfhjuX9wreJJSyXtsFr1EbF3uAJuDtKZAAAAAAXPVMtAAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAMN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('1e5ddd4a413af2671918d67cbc187a1cdc6039a0307430d915ca7456c16892e4', '3cdae71be1076874ec5a3955a3f27c5a8524ed514a76c76724254c9a9a16deb6', 'f79fa0bf4f0941a78d93bbee679f206d87fb0da208857e8fac6ce60968444614', 4, 1559579829, 'AAAACzza5xvhB2h07Fo5VaPyfFqFJO1RSnbHZyQlTJqaFt62YJ1PffsPE4Bgb2MGQtV4n4R1RhaPlEnNsv2tOFqFsDgAAAAAXPVMtQAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERn3n6C/TwlBp42Tu+5nnyBth/sNogiFfo+sbOYJaERGFAAAAAQN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('2f0ba640058008a15d8fba4436f4c24d7993994f9f73a426c7e23879ca31a93f', '1e5ddd4a413af2671918d67cbc187a1cdc6039a0307430d915ca7456c16892e4', 'f79fa0bf4f0941a78d93bbee679f206d87fb0da208857e8fac6ce60968444614', 5, 1559579830, 'AAAACx5d3UpBOvJnGRjWfLwYehzcYDmgMHQw2RXKdFbBaJLk6Q9O9qon6XPoND6LH8FnSyN245R2LczccTxE8t48/TAAAAAAXPVMtgAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERn3n6C/TwlBp42Tu+5nnyBth/sNogiFfo+sbOYJaERGFAAAAAUN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('a01cbeeaeb18cbed0ee913a13ba4f760660f0a6541be8f19021816601f96a7d8', '2f0ba640058008a15d8fba4436f4c24d7993994f9f73a426c7e23879ca31a93f', '6a6ce2f01ea7c9b517e0fe337cc3df702d30f312fc7389eceb9dc49db9e7785c', 6, 1559579831, 'AAAACy8LpkAFgAihXY+6RDb0wk15k5lPn3OkJsfiOHnKMak/UNAhrzjlDDebLKtCXYZeDsKoDrfnW+Amjcfeo9DYiesAAAAAXPVMtwAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERlqbOLwHqfJtRfg/jN8w99wLTDzEvxziezrncSdued4XAAAAAYN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('1f60b8c6e0bd7b4f76b500ed04d0ce93f8f31853a8c2bdfd46faec8bbac9a2a1', 'a01cbeeaeb18cbed0ee913a13ba4f760660f0a6541be8f19021816601f96a7d8', '6a6ce2f01ea7c9b517e0fe337cc3df702d30f312fc7389eceb9dc49db9e7785c', 7, 1559579832, 'AAAAC6AcvurrGMvtDukToTuk92BmDwplQb6PGQIYFmAflqfYB0IOinnjwk9l13AyWCV0ITfGiJwVvpobnJxxD0agw6oAAAAAXPVMuAAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERlqbOLwHqfJtRfg/jN8w99wLTDzEvxziezrncSdued4XAAAAAcN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 2, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAIAAAACAAAAAQAAAEi5lEev1R1cptMqJyV86PLvZhkUlSQk9VwpPmG2+iubVAAAAABc9UyzAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABAjprck2DJswhyf49SIq7uzv9rt6lVYn6CxHMzSrEEUPbjelNVA0x7AP27WbMXlslFSwH70fRAAzCIVjslFFBqDA=='); +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 3, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAMAAAACAAAAAQAAADA41DrC4AIfrF+GO5f3Ct4klLJe2wWvURsXe4Am4O0pkAAAAABc9Uy0AAAAAAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABACgGkamC+/IlixaMNuP88/CyreSLBJ2CZ6L7fpu0KPc4vtF1HuKsG5Y5FBb3uT6KLtlCaL+5ARmJna8GiYtxBDw=='); +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 4, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAQAAAACAAAAAQAAADBgnU99+w8TgGBvYwZC1XifhHVGFo+USc2y/a04WoWwOAAAAABc9Uy1AAAAAAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABAHDSNcqVsa3J0bPPcv9Sse1QpDhTFgbncF2a3B9QyQe3w1KmsdvRLwWUmVrX1EkxjTJMVydxj9+o1p4FqbZ6wAg=='); +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 5, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAUAAAACAAAAAQAAADDpD072qifpc+g0PosfwWdLI3bjlHYtzNxxPETy3jz9MAAAAABc9Uy2AAAAAAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABAkYYwCvwPVdU96BJIkmXkRt+Fm9cOrKWKavuPUBG9CC7g4F1/yRTa1P9H95BNqwMkn6d7v/Z+f7v9aZ+JEnrVDA=='); +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 6, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAYAAAACAAAAAQAAADBQ0CGvOOUMN5ssq0Jdhl4OwqgOt+db4CaNx96j0NiJ6wAAAABc9Uy3AAAAAAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABAHKiZd0L7g+0I8TBX57oCPANAo/usLpsHc2XIxtIm+vgU7HHTivJeA1mj8YCkojtt82DIr1gQWIMpYEupWNDRBA=='); +INSERT INTO scphistory VALUES ('GD5L6QUAEOOV2Q5UQI64NMNFISONLPXQH4HBAEEFHSVMPKYUSXH3VSGG', 7, 'AAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAAAAAAcAAAACAAAAAQAAADAHQg6KeePCT2XXcDJYJXQhN8aInBW+mhucnHEPRqDDqgAAAABc9Uy4AAAAAAAAAAAAAAABjSHRVUr6VMlEcau3H59kKM/insurCW81XEu/NG3CuHsAAABAzZtPgd3/YNHsLyLpY6woBhg1YNAjPJXNZ01y8D4WC7vCBttYMnpvHRdZ/iZC9NtOkBxN3VaXUNrLaxPOr6NHCw=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('8d21d1554afa54c94471abb71f9f6428cfe29ecbab096f355c4bbf346dc2b87b', 7, 'AAAAAQAAAAEAAAAA+r9CgCOdXUO0gj3GsaVEnNW+8D8OEBCFPKrHqxSVz7oAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAAD6v0KAI51dQ7SCPcaxpUSc1b7wPw4QEIU8qserFJXPugAAAAAAAAAHAAAAA40h0VVK+lTJRHGrtx+fZCjP4p7LqwlvNVxLvzRtwrh7AAAAAQAAAJgHQg6KeePCT2XXcDJYJXQhN8aInBW+mhucnHEPRqDDqgAAAABc9Uy4AAAAAAAAAAEAAAAA+r9CgCOdXUO0gj3GsaVEnNW+8D8OEBCFPKrHqxSVz7oAAABAEEBMIBpEICnNLCE2wCF4kwS2DjXGrcXONqPTWQnXCGQxr9Bcp+BF0y36nSLhVCFvQ0d84eXpc8YCboGtQMpCAAAAAAEAAACYB0IOinnjwk9l13AyWCV0ITfGiJwVvpobnJxxD0agw6oAAAAAXPVMuAAAAAAAAAABAAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAQBBATCAaRCApzSwhNsAheJMEtg41xq3Fzjaj01kJ1whkMa/QXKfgRdMt+p0i4VQhb0NHfOHl6XPGAm6BrUDKQgAAAABAoX0v8Uqbdi5xvjI7M+SEk/LCHzjOBX71zF9emwTX6d46Z9b0RIG3+gZdp0Pl370F1Q4EaVELh2j9iWa7n4jYBAAAAAD6v0KAI51dQ7SCPcaxpUSc1b7wPw4QEIU8qserFJXPugAAAAAAAAAHAAAAAgAAAAEAAAAwB0IOinnjwk9l13AyWCV0ITfGiJwVvpobnJxxD0agw6oAAAAAXPVMuAAAAAAAAAAAAAAAAY0h0VVK+lTJRHGrtx+fZCjP4p7LqwlvNVxLvzRtwrh7AAAAQM2bT4Hd/2DR7C8i6WOsKAYYNWDQIzyVzWdNcvA+Fgu7wgbbWDJ6bx0XWf4mQvTbTpAcTd1Wl1Day2sTzq+jRwsAAAABoBy+6usYy+0O6ROhO6T3YGYPCmVBvo8ZAhgWYB+Wp9gAAAAAAAAAAQAAAAEAAAABAAAAAPq/QoAjnV1DtII9xrGlRJzVvvA/DhAQhTyqx6sUlc+6AAAAAA=='); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastclosedledger ', '1f60b8c6e0bd7b4f76b500ed04d0ce93f8f31853a8c2bdfd46faec8bbac9a2a1'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 7, + "currentBuckets": [ + { + "curr": "056cd09fa286b2de1ff329b224259b858f94ec1dcc93f0e0512b2f6819348951", + "next": { + "state": 0 + }, + "snap": "056cd09fa286b2de1ff329b224259b858f94ec1dcc93f0e0512b2f6819348951" + }, + { + "curr": "e3bc65ded24f6c7ee979f9350aaecf9856f52e75490a401f1a66bb3d77d767dd", + "next": { + "state": 1, + "output": "056cd09fa286b2de1ff329b224259b858f94ec1dcc93f0e0512b2f6819348951" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_2-horizon.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_2-horizon.sql new file mode 100644 index 0000000000..b1134990f0 --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_2-horizon.sql @@ -0,0 +1,932 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer, + new_max_fee integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 1, false); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, false); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (7, '1f60b8c6e0bd7b4f76b500ed04d0ce93f8f31853a8c2bdfd46faec8bbac9a2a1', 'a01cbeeaeb18cbed0ee913a13ba4f760660f0a6541be8f19021816601f96a7d8', 0, 0, '2019-06-03 16:37:12', '2019-06-03 16:37:10.574154', '2019-06-03 16:37:10.574154', 30064771072, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAAC6AcvurrGMvtDukToTuk92BmDwplQb6PGQIYFmAflqfYB0IOinnjwk9l13AyWCV0ITfGiJwVvpobnJxxD0agw6oAAAAAXPVMuAAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERlqbOLwHqfJtRfg/jN8w99wLTDzEvxziezrncSdued4XAAAAAcN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (6, 'a01cbeeaeb18cbed0ee913a13ba4f760660f0a6541be8f19021816601f96a7d8', '2f0ba640058008a15d8fba4436f4c24d7993994f9f73a426c7e23879ca31a93f', 0, 0, '2019-06-03 16:37:11', '2019-06-03 16:37:10.582111', '2019-06-03 16:37:10.582111', 25769803776, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAACy8LpkAFgAihXY+6RDb0wk15k5lPn3OkJsfiOHnKMak/UNAhrzjlDDebLKtCXYZeDsKoDrfnW+Amjcfeo9DYiesAAAAAXPVMtwAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERlqbOLwHqfJtRfg/jN8w99wLTDzEvxziezrncSdued4XAAAAAYN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (5, '2f0ba640058008a15d8fba4436f4c24d7993994f9f73a426c7e23879ca31a93f', '1e5ddd4a413af2671918d67cbc187a1cdc6039a0307430d915ca7456c16892e4', 0, 0, '2019-06-03 16:37:10', '2019-06-03 16:37:10.587736', '2019-06-03 16:37:10.587736', 21474836480, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAACx5d3UpBOvJnGRjWfLwYehzcYDmgMHQw2RXKdFbBaJLk6Q9O9qon6XPoND6LH8FnSyN245R2LczccTxE8t48/TAAAAAAXPVMtgAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERn3n6C/TwlBp42Tu+5nnyBth/sNogiFfo+sbOYJaERGFAAAAAUN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (4, '1e5ddd4a413af2671918d67cbc187a1cdc6039a0307430d915ca7456c16892e4', '3cdae71be1076874ec5a3955a3f27c5a8524ed514a76c76724254c9a9a16deb6', 0, 0, '2019-06-03 16:37:09', '2019-06-03 16:37:10.595207', '2019-06-03 16:37:10.595207', 17179869184, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAACzza5xvhB2h07Fo5VaPyfFqFJO1RSnbHZyQlTJqaFt62YJ1PffsPE4Bgb2MGQtV4n4R1RhaPlEnNsv2tOFqFsDgAAAAAXPVMtQAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERn3n6C/TwlBp42Tu+5nnyBth/sNogiFfo+sbOYJaERGFAAAAAQN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (3, '3cdae71be1076874ec5a3955a3f27c5a8524ed514a76c76724254c9a9a16deb6', 'fddd6d06d74e0d5677eb020b0ff83c321a12e1ec8c447225dccec603b7f2a20b', 0, 0, '2019-06-03 16:37:08', '2019-06-03 16:37:10.601012', '2019-06-03 16:37:10.601013', 12884901888, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAAC/3dbQbXTg1Wd+sCCw/4PDIaEuHsjERyJdzOxgO38qILONQ6wuACH6xfhjuX9wreJJSyXtsFr1EbF3uAJuDtKZAAAAAAXPVMtAAAAAAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAMN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (2, 'fddd6d06d74e0d5677eb020b0ff83c321a12e1ec8c447225dccec603b7f2a20b', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 0, 0, '2019-06-03 16:37:07', '2019-06-03 16:37:10.611517', '2019-06-03 16:37:10.611517', 8589934592, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMswAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:37:10.619453', '2019-06-03 16:37:10.619453', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 1, false); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 1, false); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_3-core.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_3-core.sql new file mode 100644 index 0000000000..887779c0db --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_3-core.sql @@ -0,0 +1,747 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 9099999100, 12884901896, 0, NULL, '', 'AQAAAA==', 0, 9, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999990899999900, 1, 0, NULL, '', 'AQAAAA==', 0, 9, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('87d37dcbbad4bced6acfa589f9869dea83939ff0f1dbfef99727c9b8ecb0e3f1', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'eff8bb6dff2733ff1f3ffa5141f34ae7571ee3d8cae6dbd129bac511fa0bfd64', 2, 1559579844, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMxAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('647dbe45977be72bc6fe73b13b5476a93b405cd7a562af189628c7cbd41ec891', '87d37dcbbad4bced6acfa589f9869dea83939ff0f1dbfef99727c9b8ecb0e3f1', 'c842508eed85d205196c242d81992242e85a85be1cd4ebabc792235cb05b3a84', 3, 1559579845, 'AAAAC4fTfcu61Lztas+lifmGneqDk5/w8dv++ZcnybjssOPx6p7KmzoLq6THlwtzdRXidN9Q1s+IoUveox2wGSAVnYkAAAAAXPVMxQAAAAAAAAAAfFnUZMxpcaRFgW684JniUG/dzZ5jn4eP2mZ8LIGonSrIQlCO7YXSBRlsJC2BmSJC6FqFvhzU66vHkiNcsFs6hAAAAAMN4Lazp2QAAAAAAAAAAABkAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('10b6161843eb9d652e093486fa8174f93fc44a538870829f7f0ac47da8faa651', '647dbe45977be72bc6fe73b13b5476a93b405cd7a562af189628c7cbd41ec891', '5950cc89615a18ae32399a1ddf57405adea6551ebfe52a0bff724d457cd078e6', 4, 1559579846, 'AAAAC2R9vkWXe+crxv5zsTtUdqk7QFzXpWKvGJYox8vUHsiRiP4WGa5E15yHbJ/Fjs7aniDcPvHvQvRRTagpeuy2LZcAAAAAXPVMxgAAAAAAAAAAvUi8p6JAmXRap5H687O+OXcAKN2//CsLZbyxWLVlZb9ZUMyJYVoYrjI5mh3fV0Ba3qZVHr/lKgv/ck1FfNB45gAAAAQN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('5ae88507efcf646686d3ed03321124529f7b9928bf0531fde31aa55baa3b0fec', '10b6161843eb9d652e093486fa8174f93fc44a538870829f7f0ac47da8faa651', '8a78d3739155a2bc2f2c368ddda6e6e95eaab9f98e2bb50ae7167a7d4c49fe9e', 5, 1559579847, 'AAAACxC2FhhD651lLgk0hvqBdPk/xEpTiHCCn38KxH2o+qZRD70AR0B0Om/2/JDgto1Z3HdONB5TdVAW8+uTkr5a4g0AAAAAXPVMxwAAAAAAAAAAfZ5r/iAId0HYP/J85oWovpWvupf3zLdy0YhEdBPnW26KeNNzkVWivC8sNo3dpubpXqq5+Y4rtQrnFnp9TEn+ngAAAAUN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('f5d5ec2689d187facad52c2ef940f0b3a86822c24e8fdaeacf6f7317d36134e6', '5ae88507efcf646686d3ed03321124529f7b9928bf0531fde31aa55baa3b0fec', 'a0f58227e8f4847cb56c9e02c654f20031b058492fe8ec9e17136d79f792ba2b', 6, 1559579848, 'AAAAC1rohQfvz2RmhtPtAzIRJFKfe5kovwUx/eMapVuqOw/sn/+p+bTn7uolyYuy5JK2LC1rXH1wYlbg1Er/DS2nlQ0AAAAAXPVMyAAAAAAAAAAAqqhBNhS+oY0Fq/RSpEEko+knV66p0YlC+DiVf0GYlCGg9YIn6PSEfLVsngLGVPIAMbBYSS/o7J4XE21595K6KwAAAAYN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('7c5e9b5ec8ee15e5fbde0406499973a85a0d90b243addce0c1ff16a8510970e0', 'f5d5ec2689d187facad52c2ef940f0b3a86822c24e8fdaeacf6f7317d36134e6', '6e356ae4015265efc993ae8ef0a2880c1d59b2801e1f269223b90af490756850', 7, 1559579849, 'AAAAC/XV7CaJ0Yf6ytUsLvlA8LOoaCLCTo/a6s9vcxfTYTTmi3GZssPukt5B4pW9nYeAi9c4fBJ/G2009FXT+aofPn0AAAAAXPVMyQAAAAAAAAAAOgSmIClOx9JX9c2gFj6mbAWu2y1mlO0kGdlw5hFcYz1uNWrkAVJl78mTro7woogMHVmygB4fJpIjuQr0kHVoUAAAAAcN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('da47b612dc8d5b2fa7bf9bfa09db11f3dbc3badf2497083a519978698880a78f', '7c5e9b5ec8ee15e5fbde0406499973a85a0d90b243addce0c1ff16a8510970e0', '6a3f2032cf46cc05bdbabfc47d37215df0142be887d35e951fa2a5a2a7175b08', 8, 1559579850, 'AAAAC3xem17I7hXl+94EBkmZc6haDZCyQ63c4MH/FqhRCXDgKSvRyi2Ch/rUCgC7mvyEgkoSjwLop2En+fKX4u77S9sAAAAAXPVMygAAAAAAAAAA1WZy5HNzQG6v2/yQfdo/18UP2Mi3LOX+HsIFwjtPY3dqPyAyz0bMBb26v8R9NyFd8BQr6IfTXpUfoqWipxdbCAAAAAgN4Lazp2QAAAAAAAAAAAK8AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('2bf34a98b988141b851bcef629b7ef31ed7e74ca39000cdf92d002a91d1c6935', 'da47b612dc8d5b2fa7bf9bfa09db11f3dbc3badf2497083a519978698880a78f', '5fdd2c95559787639d2b4a8801d9394c64a73c19c092a26356fbbae9ecd76c97', 9, 1559579851, 'AAAAC9pHthLcjVsvp7+b+gnbEfPbw7rfJJcIOlGZeGmIgKePkJ5YHJBvZSO3FWn9fV3rAIQ0bm9gsHzXfU7tEazHg/8AAAAAXPVMywAAAAAAAAAAdWzSHhfCzXGLIkOOjoC5Ym8OBprpH5cEOKDNFjF7mH1f3SyVVZeHY50rSogB2TlMZKc8GcCSomNW+7rp7NdslwAAAAkN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 2, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAIAAAACAAAAAQAAAEi5lEev1R1cptMqJyV86PLvZhkUlSQk9VwpPmG2+iubVAAAAABc9UzEAAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAUKELiengGP7vllxCzFRDbXQvvk8XqY8p8cyBPgOtdfsklKtl7e+cRn18kiZ9lZ6P+nAFOrdcKXhbGa0Ufn4dBg=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 3, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAMAAAACAAAAAQAAADDqnsqbOgurpMeXC3N1FeJ031DWz4ihS96jHbAZIBWdiQAAAABc9UzFAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAVha6908hHy8bA4ETt5lHMqwLlO8nrNd0E/TBi/CFhQozHCc70A+nB945acZDoBC8paKaTPQOsC0DeIS43I5CBA=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 4, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAQAAAACAAAAAQAAADCI/hYZrkTXnIdsn8WOztqeINw+8e9C9FFNqCl67LYtlwAAAABc9UzGAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABA2cC1DTi1uMWQeLoapUscusCKXN9ALRrtYXaetxVCoavWOSRYWL4bXPAQU/rg2A5Ky2mry0zyqC+PH206JX3lDQ=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 5, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAUAAAACAAAAAQAAADAPvQBHQHQ6b/b8kOC2jVncd040HlN1UBbz65OSvlriDQAAAABc9UzHAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAceR9nX5f8T2VQplyN9hLoqwQrqNO7mDzOSiXy49UOsdCaAtPrROapEHUbRKl1H+9r5tiKR/YJp2urlwCzGLuDw=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 6, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAYAAAACAAAAAQAAADCf/6n5tOfu6iXJi7LkkrYsLWtcfXBiVuDUSv8NLaeVDQAAAABc9UzIAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAS+HldAnJJOq4AFmFweGGDw2u9FVqaHwRFX9xsiyiFO/F4+olIjcua8HdtxkVH1uTYf4aQwvWS/OnUdk/oCw3CQ=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 7, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAcAAAACAAAAAQAAADCLcZmyw+6S3kHilb2dh4CL1zh8En8bbTT0VdP5qh8+fQAAAABc9UzJAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAmVoLpEaTSClghTf+X8tbfgyCyOo6RxfFQ8z5E+DTEBp5gC/0xvgetKNL9axiOokF+VSZPe4UwBcFMC7PKOV4Dw=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 8, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAgAAAACAAAAAQAAADApK9HKLYKH+tQKALua/ISCShKPAuinYSf58pfi7vtL2wAAAABc9UzKAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAngF+5IEDR7RX5w42+ZuVuB8k0p7Z5dz0xWgxmvQAl0EPg7amzmU7D9u4HnRUry2Hwqk/8GmXpve7CZjFLqthDg=='); +INSERT INTO scphistory VALUES ('GCGZF3JU2C4NLOGHD7HDXCNCNUHHXIWQXRIBRYQAVMGUSSL2MDZ6ERT2', 9, 'AAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAAAAAAkAAAACAAAAAQAAADCQnlgckG9lI7cVaf19XesAhDRub2CwfNd9Tu0RrMeD/wAAAABc9UzLAAAAAAAAAAAAAAABx7Nd7YQ3Y26TDr0cAHY6NoM1cO07+DB8zfAQtWzTMQ4AAABAnzBq+a2DrBfxWC0a2DDpuqsRHOEbiCezhR/q/w7ckPrSrCdeGMm1PCWW4Pejp89X/R95v1ryt4PvKoLvUZl2Dw=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('c7b35ded8437636e930ebd1c00763a36833570ed3bf8307ccdf010b56cd3310e', 9, 'AAAAAQAAAAEAAAAAjZLtNNC41bjHH847iaJtDnui0LxQGOIAqw1JSXpg8+IAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastclosedledger ', '2bf34a98b988141b851bcef629b7ef31ed7e74ca39000cdf92d002a91d1c6935'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 9, + "currentBuckets": [ + { + "curr": "03f0710ef207bcb22bc9cfa1f2b1d175d2f7fe10a94831f2e2626dabf43b7620", + "next": { + "state": 0 + }, + "snap": "fdd80d066ced4c44bc6530766c92e0aeb57a87d2d5903b2b8b19cf69f74e885e" + }, + { + "curr": "26e1534b44007c3293a2be93debc72c337684b9ec1d1f352b33aea17c506bbe1", + "next": { + "state": 1, + "output": "fdd80d066ced4c44bc6530766c92e0aeb57a87d2d5903b2b8b19cf69f74e885e" + }, + "snap": "eeacf82bce13112c5282dc161c8f9f2b1c980be163727644601c4944b76895c7" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 1, + "output": "d7a8cc30b8aed592c2de5862af378c4e4c75798e5062e8900940debd2aed9969" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAACNku000LjVuMcfzjuJom0Oe6LQvFAY4gCrDUlJemDz4gAAAAAAAAAJAAAAA8ezXe2EN2Nukw69HAB2OjaDNXDtO/gwfM3wELVs0zEOAAAAAQAAAJiQnlgckG9lI7cVaf19XesAhDRub2CwfNd9Tu0RrMeD/wAAAABc9UzLAAAAAAAAAAEAAAAAjZLtNNC41bjHH847iaJtDnui0LxQGOIAqw1JSXpg8+IAAABAmWfh9mGL43BRsU13JDRoln+sR3GbEtMaZA/5UFaPQ9pkMOAiYXc0/9X97FqBh8wAfA+rKO0IdcEI69uyFoE1DgAAAAEAAACYkJ5YHJBvZSO3FWn9fV3rAIQ0bm9gsHzXfU7tEazHg/8AAAAAXPVMywAAAAAAAAABAAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAQJln4fZhi+NwUbFNdyQ0aJZ/rEdxmxLTGmQP+VBWj0PaZDDgImF3NP/V/exagYfMAHwPqyjtCHXBCOvbshaBNQ4AAABAjCghhI6QiXzeL10en8ujvot/wEju1X2LGHjQdPCh2NzC1409XqpPSxOMil38N5uFwNKK+YgU3Wcil+ccuwGODgAAAACNku000LjVuMcfzjuJom0Oe6LQvFAY4gCrDUlJemDz4gAAAAAAAAAJAAAAAgAAAAEAAAAwkJ5YHJBvZSO3FWn9fV3rAIQ0bm9gsHzXfU7tEazHg/8AAAAAXPVMywAAAAAAAAAAAAAAAcezXe2EN2Nukw69HAB2OjaDNXDtO/gwfM3wELVs0zEOAAAAQJ8wavmtg6wX8VgtGtgw6bqrERzhG4gns4Uf6v8O3JD60qwnXhjJtTwlluD3o6fPV/0feb9a8reD7yqC71GZdg8AAAAB2ke2EtyNWy+nv5v6CdsR89vDut8klwg6UZl4aYiAp48AAAADAAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAa2qrw54P1lv9IGMKjXGfCNlcdCRXl33v57V+uAmZYf1UvGMsakdNbZFHENg75vdnxM4aHyAcrTMoSTqyvMc7CQAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAZAAAAADAAAABgAAAAAAAAAAAAAAAQAAAAAAAAABAAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAAAAAAAAF9eEAAAAAAAAAAAHZBWJeAAAAQLxtiBKAICA4H0mQtA+ApQDJb1PKwpp9SbYK5rQPR0oVbKqDD3sgvK217vg4s6u+MvU0pdf/T9+2GPZFn1NPaAcAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAGQAAAAAwAAAAcAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAAB2QViXgAAAEDEbdlsLh2OUFfBZCJ1N4lv/tDZ+vQY8PWbH/ZsX74cT54E+so/SiXVVI1RBVpbuYxx0WNBxMdJv64Gir6R3d0KAAAAAQAAAAEAAAABAAAAAI2S7TTQuNW4xx/OO4mibQ57otC8UBjiAKsNSUl6YPPiAAAAAA=='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('f1d63c0b88a1ab68a44bcd02e7c9dd7c7da818ac1ff87762e922acac9958766e', 3, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('ba38e7c204b3f8ab8907a4b9618417854bccb54a7fa494a36c3d185bb45d07d6', 4, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b8fd5e6ed3d2658aa66040319e076e30006f7950e18e9a03e1eddeedfccbb418', 5, 1, 'AAAAAgAAAAMAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICE4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICDUAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b4499cd4bc782623f9ac9654040d49c154fab6ab8d83b2110002c620a5eb7407', 6, 1, 'AAAAAgAAAAMAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj/UAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj9wAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('d2a62bf7b9e118b182c33b2fd93b2cc2013dbe9a8d77f35a239b70c8a667e5e5', 7, 1, 'AAAAAgAAAAMAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF5wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF4MAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('fbeb854b57c7ea853028f23ebe71de61c1ecbd8a64f6437da735ee37883ce558', 8, 1, 'AAAAAgAAAAMAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2Pn0MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2PnyoAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('6a349e7331e93a251367287e274fb1699abaf723bde37aebe96248c76fd3071a', 9, 1, 'AAAAAgAAAAMAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJuoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJtEAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('9a719ea0bc6fd18082cbaec8d1f06c074e6c6aa784fa9ee9f0b015cf8a398bd5', 9, 2, 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJtEAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJrgAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('25ded52d9314195e638c758b6eeef7cd07c0cf4c896697f6d5cb228c44dacdd8', 9, 3, 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJrgAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJp8AAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('f1d63c0b88a1ab68a44bcd02e7c9dd7c7da818ac1ff87762e922acac9958766e', 3, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDUWAnn6bBg8wR8y/D76fh6M+FmmxKaCQL33EyRWWYFxlFN4w2rpaZ3uW69gVg3ooM8LCkF+P8AWaxcKBMjrBMC', '8dY8C4ihq2ikS80C58ndfH2oGKwf+Hdi6SKsrJlYdm4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('ba38e7c204b3f8ab8907a4b9618417854bccb54a7fa494a36c3d185bb45d07d6', 4, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAMAAAABAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAAB2QViXgAAAEBzT3nPm0xtu6CkU5jiXuBFFlZ9yTXnlEKy5HLcoVo9ym4phM8ja3knZbLZ4zJiNklsNl99mmSVkJKz7XXgOXEH', 'ujjnwgSz+KuJB6S5YYQXhUvMtUp/pJSjbD0YW7RdB9YAAAAAAAAAyAAAAAAAAAACAAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFZTfycAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJOFgI4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxWU38nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxX0PdnAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACThYCOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAhOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('b8fd5e6ed3d2658aa66040319e076e30006f7950e18e9a03e1eddeedfccbb418', 5, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAMAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAY8zQeTlk6qu1feh/23t9EMxnoOW+6moGmjXKum57BkkQq6zoV/VciJ7IVIpi+jPVZSk+KSrCQdAm6EV4jBbvBA==', 'uP1ebtPSZYqmYEAxngduMABveVDhjpoD4e3e7fzLtBgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAg1AAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAg1AAAAAMAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFfQ92cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFlOb6cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICDUAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj/UAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b4499cd4bc782623f9ac9654040d49c154fab6ab8d83b2110002c620a5eb7407', 6, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAADAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAABfxa1tvLDgKKRnsVwm97GeZmHtvBJee12Q49wseNvKHjwb0amqXGJVYFN7PGH5ZZ56Se9GvyiL99zLLTz29Dw==', 'tEmc1Lx4JiP5rJZUBA1JwVT6tquNg7IRAALGIKXrdAcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACQio/cAAAAAMAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACQio/cAAAAAMAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFlOb6cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFrL5+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj9wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF5wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('d2a62bf7b9e118b182c33b2fd93b2cc2013dbe9a8d77f35a239b70c8a667e5e5', 7, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAcKnXL1cr7aTkY83f55Oh0M/PNjPSTaZooDIfmoZz16BgDN94hqraJ73vmRdHmqtJaKYdwtcNgovdEvVxFYaIBg==', '0qYr97nhGLGCwzsv2TsswgE9vpqNd/NaI5twyKZn5eUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACPDReDAAAAAMAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACPDReDAAAAAMAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFrL5+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFxJYCcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF4MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2Pn0MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('fbeb854b57c7ea853028f23ebe71de61c1ecbd8a64f6437da735ee37883ce558', 8, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABLAAAAAMAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABArAAIYpB4GOYOqjJiwKvRsZ+V3AZXshTLQb5MRvOuue/lSawV12iNSTEBIpPOqYUc0hfVudWfmLd2aWZ5UQd9AA==', '++uFS1fH6oUwKPI+vnHeYcHsvYpk9kN9pzXuN4g85VgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACNj58qAAAAAMAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACNj58qAAAAAMAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFxJYCcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF3G2GcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2PnyoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJuoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('6a349e7331e93a251367287e274fb1699abaf723bde37aebe96248c76fd3071a', 9, 1, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAvG2IEoAgIDgfSZC0D4ClAMlvU8rCmn1JtgrmtA9HShVsqoMPeyC8rbXu+Dizq74y9TSl1/9P37YY9kWfU09oBw==', 'ajSeczHpOiUTZyh+J0+xaZq69yO943rr6WJIx2/TBxoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACMEiafAAAAAMAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACMEiafAAAAAMAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF3G2GcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF9EUKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJp8AAAAAwAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIqUrl8AAAAAwAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('9a719ea0bc6fd18082cbaec8d1f06c074e6c6aa784fa9ee9f0b015cf8a398bd5', 9, 2, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAxG3ZbC4djlBXwWQidTeJb/7Q2fr0GPD1mx/2bF++HE+eBPrKP0ol1VSNUQVaW7mMcdFjQcTHSb+uBoq+kd3dCg==', 'mnGeoLxv0YCCy67I0fBsB05saqeE+p7p8LAVz4o5i9UAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACKlK5fAAAAAMAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACKlK5fAAAAAMAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF9EUKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGDByOcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIqUrl8AAAAAwAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIkXNh8AAAAAwAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('25ded52d9314195e638c758b6eeef7cd07c0cf4c896697f6d5cb228c44dacdd8', 9, 3, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAa2qrw54P1lv9IGMKjXGfCNlcdCRXl33v57V+uAmZYf1UvGMsakdNbZFHENg75vdnxM4aHyAcrTMoSTqyvMc7CQ==', 'Jd7VLZMUGV5jjHWLbu73zQfAz0yJZpf21csijETazdgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACJFzYfAAAAAMAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACJFzYfAAAAAMAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGDByOcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGI/QScAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIkXNh8AAAAAwAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIeZvd8AAAAAwAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/operation_fee_stats_3-horizon.sql b/services/horizon/internal/test/scenarios/operation_fee_stats_3-horizon.sql new file mode 100644 index 0000000000..b4e5c1feae --- /dev/null +++ b/services/horizon/internal/test/scenarios/operation_fee_stats_3-horizon.sql @@ -0,0 +1,1014 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer, + new_max_fee integer +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_accounts VALUES (2, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 2, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 1, false); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (1, 38654709761, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 38654709761, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 38654713857, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 38654713857, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 38654717953, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 38654717953, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 34359742465, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 34359742465, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 30064775169, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 30064775169, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 25769807873, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 21474840577, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 21474840577, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 17179873281, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 17179873281, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 17179873282, 1, 2, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 17179873282, 2, 3, '{"amount": "10.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 1, 0, '{"starting_balance": "1000.0000000"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 2, 3, '{"amount": "1000.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 12884905985, 3, 10, '{"weight": 1, "public_key": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (9, '2bf34a98b988141b851bcef629b7ef31ed7e74ca39000cdf92d002a91d1c6935', 'da47b612dc8d5b2fa7bf9bfa09db11f3dbc3badf2497083a519978698880a78f', 3, 3, '2019-06-03 16:37:31', '2019-06-03 16:37:27.870696', '2019-06-03 16:37:27.870696', 38654705664, 16, 1000000000000000000, 1000, 100, 100000000, 1000000, 11, 'AAAAC9pHthLcjVsvp7+b+gnbEfPbw7rfJJcIOlGZeGmIgKePkJ5YHJBvZSO3FWn9fV3rAIQ0bm9gsHzXfU7tEazHg/8AAAAAXPVMywAAAAAAAAAAdWzSHhfCzXGLIkOOjoC5Ym8OBprpH5cEOKDNFjF7mH1f3SyVVZeHY50rSogB2TlMZKc8GcCSomNW+7rp7NdslwAAAAkN4Lazp2QAAAAAAAAAAAPoAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0); +INSERT INTO history_ledgers VALUES (8, 'da47b612dc8d5b2fa7bf9bfa09db11f3dbc3badf2497083a519978698880a78f', '7c5e9b5ec8ee15e5fbde0406499973a85a0d90b243addce0c1ff16a8510970e0', 1, 1, '2019-06-03 16:37:30', '2019-06-03 16:37:27.889132', '2019-06-03 16:37:27.889132', 34359738368, 16, 1000000000000000000, 700, 100, 100000000, 1000000, 11, 'AAAAC3xem17I7hXl+94EBkmZc6haDZCyQ63c4MH/FqhRCXDgKSvRyi2Ch/rUCgC7mvyEgkoSjwLop2En+fKX4u77S9sAAAAAXPVMygAAAAAAAAAA1WZy5HNzQG6v2/yQfdo/18UP2Mi3LOX+HsIFwjtPY3dqPyAyz0bMBb26v8R9NyFd8BQr6IfTXpUfoqWipxdbCAAAAAgN4Lazp2QAAAAAAAAAAAK8AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (7, '7c5e9b5ec8ee15e5fbde0406499973a85a0d90b243addce0c1ff16a8510970e0', 'f5d5ec2689d187facad52c2ef940f0b3a86822c24e8fdaeacf6f7317d36134e6', 1, 1, '2019-06-03 16:37:29', '2019-06-03 16:37:27.900016', '2019-06-03 16:37:27.900017', 30064771072, 16, 1000000000000000000, 600, 100, 100000000, 1000000, 11, 'AAAAC/XV7CaJ0Yf6ytUsLvlA8LOoaCLCTo/a6s9vcxfTYTTmi3GZssPukt5B4pW9nYeAi9c4fBJ/G2009FXT+aofPn0AAAAAXPVMyQAAAAAAAAAAOgSmIClOx9JX9c2gFj6mbAWu2y1mlO0kGdlw5hFcYz1uNWrkAVJl78mTro7woogMHVmygB4fJpIjuQr0kHVoUAAAAAcN4Lazp2QAAAAAAAAAAAJYAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (6, 'f5d5ec2689d187facad52c2ef940f0b3a86822c24e8fdaeacf6f7317d36134e6', '5ae88507efcf646686d3ed03321124529f7b9928bf0531fde31aa55baa3b0fec', 1, 1, '2019-06-03 16:37:28', '2019-06-03 16:37:27.910025', '2019-06-03 16:37:27.910025', 25769803776, 16, 1000000000000000000, 500, 100, 100000000, 1000000, 11, 'AAAAC1rohQfvz2RmhtPtAzIRJFKfe5kovwUx/eMapVuqOw/sn/+p+bTn7uolyYuy5JK2LC1rXH1wYlbg1Er/DS2nlQ0AAAAAXPVMyAAAAAAAAAAAqqhBNhS+oY0Fq/RSpEEko+knV66p0YlC+DiVf0GYlCGg9YIn6PSEfLVsngLGVPIAMbBYSS/o7J4XE21595K6KwAAAAYN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (5, '5ae88507efcf646686d3ed03321124529f7b9928bf0531fde31aa55baa3b0fec', '10b6161843eb9d652e093486fa8174f93fc44a538870829f7f0ac47da8faa651', 1, 1, '2019-06-03 16:37:27', '2019-06-03 16:37:27.920568', '2019-06-03 16:37:27.920568', 21474836480, 16, 1000000000000000000, 400, 100, 100000000, 1000000, 11, 'AAAACxC2FhhD651lLgk0hvqBdPk/xEpTiHCCn38KxH2o+qZRD70AR0B0Om/2/JDgto1Z3HdONB5TdVAW8+uTkr5a4g0AAAAAXPVMxwAAAAAAAAAAfZ5r/iAId0HYP/J85oWovpWvupf3zLdy0YhEdBPnW26KeNNzkVWivC8sNo3dpubpXqq5+Y4rtQrnFnp9TEn+ngAAAAUN4Lazp2QAAAAAAAAAAAGQAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (4, '10b6161843eb9d652e093486fa8174f93fc44a538870829f7f0ac47da8faa651', '647dbe45977be72bc6fe73b13b5476a93b405cd7a562af189628c7cbd41ec891', 1, 2, '2019-06-03 16:37:26', '2019-06-03 16:37:27.931569', '2019-06-03 16:37:27.931569', 17179869184, 16, 1000000000000000000, 300, 100, 100000000, 1000000, 11, 'AAAAC2R9vkWXe+crxv5zsTtUdqk7QFzXpWKvGJYox8vUHsiRiP4WGa5E15yHbJ/Fjs7aniDcPvHvQvRRTagpeuy2LZcAAAAAXPVMxgAAAAAAAAAAvUi8p6JAmXRap5H687O+OXcAKN2//CsLZbyxWLVlZb9ZUMyJYVoYrjI5mh3fV0Ba3qZVHr/lKgv/ck1FfNB45gAAAAQN4Lazp2QAAAAAAAAAAAEsAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (3, '647dbe45977be72bc6fe73b13b5476a93b405cd7a562af189628c7cbd41ec891', '87d37dcbbad4bced6acfa589f9869dea83939ff0f1dbfef99727c9b8ecb0e3f1', 1, 1, '2019-06-03 16:37:25', '2019-06-03 16:37:27.942838', '2019-06-03 16:37:27.942839', 12884901888, 16, 1000000000000000000, 100, 100, 100000000, 1000000, 11, 'AAAAC4fTfcu61Lztas+lifmGneqDk5/w8dv++ZcnybjssOPx6p7KmzoLq6THlwtzdRXidN9Q1s+IoUveox2wGSAVnYkAAAAAXPVMxQAAAAAAAAAAfFnUZMxpcaRFgW684JniUG/dzZ5jn4eP2mZ8LIGonSrIQlCO7YXSBRlsJC2BmSJC6FqFvhzU66vHkiNcsFs6hAAAAAMN4Lazp2QAAAAAAAAAAABkAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0); +INSERT INTO history_ledgers VALUES (2, '87d37dcbbad4bced6acfa589f9869dea83939ff0f1dbfef99727c9b8ecb0e3f1', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 0, 0, '2019-06-03 16:37:24', '2019-06-03 16:37:27.952867', '2019-06-03 16:37:27.952867', 8589934592, 16, 1000000000000000000, 0, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZuZRHr9UdXKbTKiclfOjy72YZFJUkJPVcKT5htvorm1QAAAAAXPVMxAAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAA3z9hmASpL9tAVxktxD3XSOp3itxSvEmM6AUkwBS4ERnv+Ltt/ycz/x8/+lFB80rnVx7j2Mrm29EpusUR+gv9ZAAAAAIN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:37:27.958898', '2019-06-03 16:37:27.958898', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 38654709761, 2); +INSERT INTO history_operation_participants VALUES (2, 38654709761, 1); +INSERT INTO history_operation_participants VALUES (3, 38654713857, 2); +INSERT INTO history_operation_participants VALUES (4, 38654713857, 1); +INSERT INTO history_operation_participants VALUES (5, 38654717953, 2); +INSERT INTO history_operation_participants VALUES (6, 38654717953, 1); +INSERT INTO history_operation_participants VALUES (7, 34359742465, 2); +INSERT INTO history_operation_participants VALUES (8, 34359742465, 1); +INSERT INTO history_operation_participants VALUES (9, 30064775169, 2); +INSERT INTO history_operation_participants VALUES (10, 30064775169, 1); +INSERT INTO history_operation_participants VALUES (11, 25769807873, 2); +INSERT INTO history_operation_participants VALUES (12, 25769807873, 1); +INSERT INTO history_operation_participants VALUES (13, 21474840577, 2); +INSERT INTO history_operation_participants VALUES (14, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (15, 17179873281, 2); +INSERT INTO history_operation_participants VALUES (16, 17179873281, 1); +INSERT INTO history_operation_participants VALUES (17, 17179873282, 2); +INSERT INTO history_operation_participants VALUES (18, 17179873282, 1); +INSERT INTO history_operation_participants VALUES (19, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (20, 12884905985, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 20, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (38654709761, 38654709760, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (38654713857, 38654713856, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (38654717953, 38654717952, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (34359742465, 34359742464, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (30064775169, 30064775168, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (17179873282, 17179873280, 2, 1, '{"to": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "from": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "amount": "10.0000000", "asset_type": "native"}', 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY", "starting_balance": "1000.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 38654709760, 2); +INSERT INTO history_transaction_participants VALUES (2, 38654709760, 1); +INSERT INTO history_transaction_participants VALUES (3, 38654713856, 2); +INSERT INTO history_transaction_participants VALUES (4, 38654713856, 1); +INSERT INTO history_transaction_participants VALUES (5, 38654717952, 2); +INSERT INTO history_transaction_participants VALUES (6, 38654717952, 1); +INSERT INTO history_transaction_participants VALUES (7, 34359742464, 2); +INSERT INTO history_transaction_participants VALUES (8, 34359742464, 1); +INSERT INTO history_transaction_participants VALUES (9, 30064775168, 2); +INSERT INTO history_transaction_participants VALUES (10, 30064775168, 1); +INSERT INTO history_transaction_participants VALUES (11, 25769807872, 2); +INSERT INTO history_transaction_participants VALUES (12, 25769807872, 1); +INSERT INTO history_transaction_participants VALUES (13, 21474840576, 2); +INSERT INTO history_transaction_participants VALUES (14, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (15, 17179873280, 2); +INSERT INTO history_transaction_participants VALUES (16, 17179873280, 1); +INSERT INTO history_transaction_participants VALUES (17, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (18, 12884905984, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 18, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('6a349e7331e93a251367287e274fb1699abaf723bde37aebe96248c76fd3071a', 9, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901894, 400, 1, '2019-06-03 16:37:27.870885', '2019-06-03 16:37:27.870885', 38654709760, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAGAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAvG2IEoAgIDgfSZC0D4ClAMlvU8rCmn1JtgrmtA9HShVsqoMPeyC8rbXu+Dizq74y9TSl1/9P37YY9kWfU09oBw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACMEiafAAAAAMAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACMEiafAAAAAMAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF3G2GcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF9EUKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJp8AAAAAwAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIqUrl8AAAAAwAAAAYAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJuoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJtEAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{vG2IEoAgIDgfSZC0D4ClAMlvU8rCmn1JtgrmtA9HShVsqoMPeyC8rbXu+Dizq74y9TSl1/9P37YY9kWfU09oBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('9a719ea0bc6fd18082cbaec8d1f06c074e6c6aa784fa9ee9f0b015cf8a398bd5', 9, 2, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901895, 400, 1, '2019-06-03 16:37:27.87116', '2019-06-03 16:37:27.871161', 38654713856, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAHAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAxG3ZbC4djlBXwWQidTeJb/7Q2fr0GPD1mx/2bF++HE+eBPrKP0ol1VSNUQVaW7mMcdFjQcTHSb+uBoq+kd3dCg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACKlK5fAAAAAMAAAAGAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACKlK5fAAAAAMAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF9EUKcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGDByOcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIqUrl8AAAAAwAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIkXNh8AAAAAwAAAAcAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJtEAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJrgAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{xG3ZbC4djlBXwWQidTeJb/7Q2fr0GPD1mx/2bF++HE+eBPrKP0ol1VSNUQVaW7mMcdFjQcTHSb+uBoq+kd3dCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('25ded52d9314195e638c758b6eeef7cd07c0cf4c896697f6d5cb228c44dacdd8', 9, 3, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901896, 400, 1, '2019-06-03 16:37:27.871322', '2019-06-03 16:37:27.871323', 38654717952, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAIAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAa2qrw54P1lv9IGMKjXGfCNlcdCRXl33v57V+uAmZYf1UvGMsakdNbZFHENg75vdnxM4aHyAcrTMoSTqyvMc7CQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACJFzYfAAAAAMAAAAHAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACJFzYfAAAAAMAAAAIAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGDByOcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrGI/QScAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIkXNh8AAAAAwAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIeZvd8AAAAAwAAAAgAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJrgAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAJAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJp8AAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{a2qrw54P1lv9IGMKjXGfCNlcdCRXl33v57V+uAmZYf1UvGMsakdNbZFHENg75vdnxM4aHyAcrTMoSTqyvMc7CQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('fbeb854b57c7ea853028f23ebe71de61c1ecbd8a64f6437da735ee37883ce558', 8, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901893, 300, 1, '2019-06-03 16:37:27.889321', '2019-06-03 16:37:27.889321', 34359742464, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABLAAAAAMAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABArAAIYpB4GOYOqjJiwKvRsZ+V3AZXshTLQb5MRvOuue/lSawV12iNSTEBIpPOqYUc0hfVudWfmLd2aWZ5UQd9AA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAACAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACNj58qAAAAAMAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACNj58qAAAAAMAAAAFAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFxJYCcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrF3G2GcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2PnyoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAIwSJuoAAAAAwAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2Pn0MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAIAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2PnyoAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{rAAIYpB4GOYOqjJiwKvRsZ+V3AZXshTLQb5MRvOuue/lSawV12iNSTEBIpPOqYUc0hfVudWfmLd2aWZ5UQd9AA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('d2a62bf7b9e118b182c33b2fd93b2cc2013dbe9a8d77f35a239b70c8a667e5e5', 7, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901892, 400, 1, '2019-06-03 16:37:27.900178', '2019-06-03 16:37:27.900178', 30064775168, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAcKnXL1cr7aTkY83f55Oh0M/PNjPSTaZooDIfmoZz16BgDN94hqraJ73vmRdHmqtJaKYdwtcNgovdEvVxFYaIBg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABwAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACPDReDAAAAAMAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACPDReDAAAAAMAAAAEAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAGAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFrL5+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFxJYCcAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF4MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI2Pn0MAAAAAwAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF5wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAHAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF4MAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{cKnXL1cr7aTkY83f55Oh0M/PNjPSTaZooDIfmoZz16BgDN94hqraJ73vmRdHmqtJaKYdwtcNgovdEvVxFYaIBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b4499cd4bc782623f9ac9654040d49c154fab6ab8d83b2110002c620a5eb7407', 6, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901891, 400, 1, '2019-06-03 16:37:27.910253', '2019-06-03 16:37:27.910254', 25769807872, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAABkAAAAAMAAAADAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAABfxa1tvLDgKKRnsVwm97GeZmHtvBJee12Q49wseNvKHjwb0amqXGJVYFN7PGH5ZZ56Se9GvyiL99zLLTz29Dw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACQio/cAAAAAMAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACQio/cAAAAAMAAAADAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAFAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFlOb6cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFrL5+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj9wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAI8NF5wAAAAAwAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj/UAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj9wAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ABfxa1tvLDgKKRnsVwm97GeZmHtvBJee12Q49wseNvKHjwb0amqXGJVYFN7PGH5ZZ56Se9GvyiL99zLLTz29Dw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b8fd5e6ed3d2658aa66040319e076e30006f7950e18e9a03e1eddeedfccbb418', 5, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901890, 200, 1, '2019-06-03 16:37:27.920777', '2019-06-03 16:37:27.920777', 21474840576, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAMAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAdkFYl4AAABAY8zQeTlk6qu1feh/23t9EMxnoOW+6moGmjXKum57BkkQq6zoV/VciJ7IVIpi+jPVZSk+KSrCQdAm6EV4jBbvBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAg1AAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAg1AAAAAMAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAMAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFfQ92cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFlOb6cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICDUAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJCKj/UAAAAAwAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICE4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJIICDUAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{Y8zQeTlk6qu1feh/23t9EMxnoOW+6moGmjXKum57BkkQq6zoV/VciJ7IVIpi+jPVZSk+KSrCQdAm6EV4jBbvBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('ba38e7c204b3f8ab8907a4b9618417854bccb54a7fa494a36c3d185bb45d07d6', 4, 1, 'GAG52TW6QAB6TGNMOTL32Y4M3UQQLNNNHPEHYAIYRP6SFF6ZAVRF5ZQY', 12884901889, 200, 2, '2019-06-03 16:37:27.931785', '2019-06-03 16:37:27.931785', 17179873280, 'AAAAAA3dTt6AA+mZrHTXvWOM3SEFta07yHwBGIv9IpfZBWJeAAAAyAAAAAMAAAABAAAAAAAAAAAAAAACAAAAAAAAAAEAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAAAAAAAAAAX14QAAAAAAAAAAAQAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAAAAAAABfXhAAAAAAAAAAAB2QViXgAAAEBzT3nPm0xtu6CkU5jiXuBFFlZ9yTXnlEKy5HLcoVo9ym4phM8ja3knZbLZ4zJiNklsNl99mmSVkJKz7XXgOXEH', 'AAAAAAAAAMgAAAAAAAAAAgAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvjOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAACAAAABAAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFZTfycAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJOFgI4AAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAQAAAADAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxWU38nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4LaxX0PdnAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAADAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACThYCOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACSCAhOAAAAAMAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAADAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+M4AAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{c095z5tMbbugpFOY4l7gRRZWfck155RCsuRy3KFaPcpuKYTPI2t5J2Wy2eMyYjZJbDZffZpklZCSs+114DlxBw==}', 'none', NULL, NULL, true, 200); +INSERT INTO history_transactions VALUES ('f1d63c0b88a1ab68a44bcd02e7c9dd7c7da818ac1ff87762e922acac9958766e', 3, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:37:27.943187', '2019-06-03 16:37:27.943188', 12884905984, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAADd1O3oAD6ZmsdNe9Y4zdIQW1rTvIfAEYi/0il9kFYl4AAAACVAvkAAAAAAAAAAABVvwF9wAAAEDUWAnn6bBg8wR8y/D76fh6M+FmmxKaCQL33EyRWWYFxlFN4w2rpaZ3uW69gVg3ooM8LCkF+P8AWaxcKBMjrBMC', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcN4Lazp2P/nAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrFTWBucAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAAAAAAAN3U7egAPpmax0171jjN0hBbWtO8h8ARiL/SKX2QViXgAAAAJUC+QAAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{1FgJ5+mwYPMEfMvw++n4ejPhZpsSmgkC99xMkVlmBcZRTeMNq6Wmd7luvYFYN6KDPCwpBfj/AFmsXCgTI6wTAg==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/pathed_payment-core.sql b/services/horizon/internal/test/scenarios/pathed_payment-core.sql new file mode 100644 index 0000000000..8478081340 --- /dev/null +++ b/services/horizon/internal/test/scenarios/pathed_payment-core.sql @@ -0,0 +1,761 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + signers text, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999994999999500, 5, 0, NULL, '', 'AQAAAA==', 0, 2, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 999999900, 8589934593, 1, NULL, '', 'AQAAAA==', 0, 3, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 999999800, 8589934594, 0, NULL, '', 'AQAAAA==', 0, 4, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 999999800, 8589934594, 0, NULL, '', 'AQAAAA==', 0, 4, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 999999700, 8589934595, 3, NULL, '', 'AQAAAA==', 0, 5, NULL, NULL, NULL); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 999999800, 8589934594, 1, NULL, '', 'AQAAAA==', 0, 6, NULL, NULL, NULL); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('3f95ccfc1db00838f3650c12c71c07651cf86e9e3db3c5c8f82085841e4a1132', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 'bc853b766204674c84c8bb3e2a359139a8c353cf4a44d38d55714546c2659a53', 2, 1559579766, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZhnfbEDpHPYd45nQcT7XkfIMwSX9gFD9+fWat716QIWcAAAAAXPVMdgAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAQ9m8LSG8b5W0buxEtqstVQz7IV/HrDmGEtdA4mCEDXy8hTt2YgRnTITIuz4qNZE5qMNTz0pE041VcUVGwmWaUwAAAAIN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('e997c76a3479b89c2fce8a3e7134f33fee54d7d51366bb9f1c934c82351db20a', '3f95ccfc1db00838f3650c12c71c07651cf86e9e3db3c5c8f82085841e4a1132', 'c0d318d6a5c5eb9560966c24d37fe26b283e2b922ba791b0cedb11e2f00ff900', 3, 1559579767, 'AAAACz+VzPwdsAg482UMEsccB2Uc+G6ePbPFyPgghYQeShEyZ3Ek5ZIK1fLESXcLlE/00zAs06C9ekp1Jb/2erNycuwAAAAAXPVMdwAAAAAAAAAASmWYY0EUkBWM4g/+kngUijwIV7OrC5Zk/MHYGJjEEs3A0xjWpcXrlWCWbCTTf+JrKD4rkiunkbDO2xHi8A/5AAAAAAMN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('43eba0f2e949ac89f0a29f3ab7204a5a2f23d10aa9a5e0e8ba3dbde36a689649', 'e997c76a3479b89c2fce8a3e7134f33fee54d7d51366bb9f1c934c82351db20a', 'c24574a8c38ec4ceb961dcea1f5f0047a95d59acbba8e6e196a0895f2df2c882', 4, 1559579768, 'AAAAC+mXx2o0ebicL86KPnE08z/uVNfVE2a7nxyTTII1HbIKRxEyt9Sdk5w+y9RcNs5GuIx3q5SCV2cLUp7otU0J+TYAAAAAXPVMeAAAAAAAAAAAZelIPS3sQhACuKIks346158R3J91krq+J8/emGkxvSrCRXSow47Ezrlh3OofXwBHqV1ZrLuo5uGWoIlfLfLIggAAAAQN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('56978ed5961edf38821cb20932bdf27fe50a2141212402d5bf5a207a72d5c965', '43eba0f2e949ac89f0a29f3ab7204a5a2f23d10aa9a5e0e8ba3dbde36a689649', 'ced8f8a7eed7e0142001c8f89c82327635233145d756ebd0232edc3a71a69166', 5, 1559579769, 'AAAAC0ProPLpSayJ8KKfOrcgSlovI9EKqaXg6Lo9veNqaJZJTSAZM5kJaIj3FmqmCEciWETR+Q/Os97fIV4AOaVRJvEAAAAAXPVMeQAAAAAAAAAAMipVgDbLOBwRolHBsTSsmxFNzeTX3AdqZSXt3LGMFmjO2Pin7tfgFCAByPicgjJ2NSMxRddW69AjLtw6caaRZgAAAAUN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('926df2175a47e7f4c5e2733796cbb4ac6b8b505e4da8569c0d628491b9d1715f', '56978ed5961edf38821cb20932bdf27fe50a2141212402d5bf5a207a72d5c965', 'c85509bfa704b2ab88df7af9e33db207d887170413d3fb34f1768898bef96a7e', 6, 1559579770, 'AAAAC1aXjtWWHt84ghyyCTK98n/lCiFBISQC1b9aIHpy1clllim1q2Q13bhhV72YJg8ksrQ7hO82py+YqilDIA89yEgAAAAAXPVMegAAAAAAAAAAdopGqZ/Fgz8xg91fCwTsvxIms9ukW73CHeCtBuNmNWLIVQm/pwSyq4jfevnjPbIH2IcXBBPT+zTxdoiYvvlqfgAAAAYN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO offers VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'AAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2Ek=', 'AAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8=', 100000000, 1, 1, 1, 0, 6); + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GCTRRYQAXFBMGWNVPCVH7AUEB57CWR2ITVPUKUYENCSL2O7O2LUEMBC2', 2, 'AAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAAAAAAAIAAAACAAAAAQAAAEiGd9sQOkc9h3jmdBxPteR8gzBJf2AUP359Zq3vXpAhZwAAAABc9Ux2AAAAAgAAAAgAAAABAAAACwAAAAgAAAADAA9CQAAAAAAAAAAB9YC3SDCASFn/htKl9nkb4z53vXCT349QRJ/IF6lEYVMAAABAoCwsF7CH0admCyu716NCyPLZOSNnEkLPZ47W6pg/yl09RUX2/AJiSKwIrw98hdo5rMl0nlcmIE/xcsG6XrrzBg=='); +INSERT INTO scphistory VALUES ('GCTRRYQAXFBMGWNVPCVH7AUEB57CWR2ITVPUKUYENCSL2O7O2LUEMBC2', 3, 'AAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAAAAAAAMAAAACAAAAAQAAADBncSTlkgrV8sRJdwuUT/TTMCzToL16SnUlv/Z6s3Jy7AAAAABc9Ux3AAAAAAAAAAAAAAAB9YC3SDCASFn/htKl9nkb4z53vXCT349QRJ/IF6lEYVMAAABA3wiAKUcmf1jd2me4BvBHGRVKtXkDwVwt8Ve/o95EByoVubki0FjkdZQxs6QKQJZCoh+bFNC76lcgwbTP/f0YBg=='); +INSERT INTO scphistory VALUES ('GCTRRYQAXFBMGWNVPCVH7AUEB57CWR2ITVPUKUYENCSL2O7O2LUEMBC2', 4, 'AAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAAAAAAAQAAAACAAAAAQAAADBHETK31J2TnD7L1Fw2zka4jHerlIJXZwtSnui1TQn5NgAAAABc9Ux4AAAAAAAAAAAAAAAB9YC3SDCASFn/htKl9nkb4z53vXCT349QRJ/IF6lEYVMAAABALGh3cJy89YspETzeQb5FZ2Q7szASXm/N0ot99gxCEhPmF/N7mDUDZK3K5617SbPQd91vHasbofpgP0QY+XvfCQ=='); +INSERT INTO scphistory VALUES ('GCTRRYQAXFBMGWNVPCVH7AUEB57CWR2ITVPUKUYENCSL2O7O2LUEMBC2', 5, 'AAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAAAAAAAUAAAACAAAAAQAAADBNIBkzmQloiPcWaqYIRyJYRNH5D86z3t8hXgA5pVEm8QAAAABc9Ux5AAAAAAAAAAAAAAAB9YC3SDCASFn/htKl9nkb4z53vXCT349QRJ/IF6lEYVMAAABAzEi3Y0GKriY/fehW5qXDL7nY/CFcvm2Q3M9jtjbReyEIAh9a9p6AzvrcNzLtY6gv1VPCUjCOcCrIgzM00nfMCw=='); +INSERT INTO scphistory VALUES ('GCTRRYQAXFBMGWNVPCVH7AUEB57CWR2ITVPUKUYENCSL2O7O2LUEMBC2', 6, 'AAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAAAAAAAYAAAACAAAAAQAAADCWKbWrZDXduGFXvZgmDySytDuE7zanL5iqKUMgDz3ISAAAAABc9Ux6AAAAAAAAAAAAAAAB9YC3SDCASFn/htKl9nkb4z53vXCT349QRJ/IF6lEYVMAAABAoj6twHexHXprK9FJiBINvN93gi5koMcCThUty10heMHREDy0TfDTPLWys69zLinH34aUu9P1eJqixJaNVm3nAw=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('f580b74830804859ff86d2a5f6791be33e77bd7093df8f50449fc817a9446153', 6, 'AAAAAQAAAAEAAAAApxjiALlCw1m1eKp/goQPfitHSJ1fRVMEaKS9O+7S6EYAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('lastclosedledger ', '926df2175a47e7f4c5e2733796cbb4ac6b8b505e4da8569c0d628491b9d1715f'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v11.1.0", + "currentLedger": 6, + "currentBuckets": [ + { + "curr": "e00418d68bee7b02b58829591091d37d3d60c9f7479e37b43c446a8e688ffb5a", + "next": { + "state": 0 + }, + "snap": "0965d5f75845d21c236f34c96c1635f541bafec760ce8367d0269f9cc90e50bd" + }, + { + "curr": "09373e15ec7baa41a2f08591fa2268f8e786078c6c807893f31cee3576b449eb", + "next": { + "state": 1, + "output": "0965d5f75845d21c236f34c96c1635f541bafec760ce8367d0269f9cc90e50bd" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 0, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('lastscpdata ', 'AAAAAgAAAACnGOIAuULDWbV4qn+ChA9+K0dInV9FUwRopL077tLoRgAAAAAAAAAGAAAAA/WAt0gwgEhZ/4bSpfZ5G+M+d71wk9+PUESfyBepRGFTAAAAAQAAAJiWKbWrZDXduGFXvZgmDySytDuE7zanL5iqKUMgDz3ISAAAAABc9Ux6AAAAAAAAAAEAAAAApxjiALlCw1m1eKp/goQPfitHSJ1fRVMEaKS9O+7S6EYAAABAvSpT1OTMov3f6HON2oeYnzvHVbPX6+NT0yGSSDrfxFZyryiyQKhEn9gk6p5IMFYyd7K1H1X5NO6X0BVfTFfmDQAAAAEAAACYlim1q2Q13bhhV72YJg8ksrQ7hO82py+YqilDIA89yEgAAAAAXPVMegAAAAAAAAABAAAAAKcY4gC5QsNZtXiqf4KED34rR0idX0VTBGikvTvu0uhGAAAAQL0qU9TkzKL93+hzjdqHmJ87x1Wz1+vjU9Mhkkg638RWcq8oskCoRJ/YJOqeSDBWMneytR9V+TTul9AVX0xX5g0AAABAjKjyyjhMBVVQ3Afxv8x5T+SmLKdpwiU5oZmksuFc71QQ0rHHm5ZjGGuKP+nfl8Siuv3NDTAMVBNH1VFbKAJODAAAAACnGOIAuULDWbV4qn+ChA9+K0dInV9FUwRopL077tLoRgAAAAAAAAAGAAAAAgAAAAEAAAAwlim1q2Q13bhhV72YJg8ksrQ7hO82py+YqilDIA89yEgAAAAAXPVMegAAAAAAAAAAAAAAAfWAt0gwgEhZ/4bSpfZ5G+M+d71wk9+PUESfyBepRGFTAAAAQKI+rcB3sR16ayvRSYgSDbzfd4IuZKDHAk4VLctdIXjB0RA8tE3w0zy1srOvcy4px9+GlLvT9XiaosSWjVZt5wMAAAABVpeO1ZYe3ziCHLIJMr3yf+UKIUEhJALVv1ogenLVyWUAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAAAAAAAAAAAAAAAAAa7kvkwAAABAD49aRUuzXXeNHu1FfIYBbplBoP+b1B4uMGt2UGZt6jPKvwVORmMzfXDZaHBvIirsI8eNf+9F1EI0Fh9M/2jmCgAAAAEAAAABAAAAAQAAAACnGOIAuULDWbV4qn+ChA9+K0dInV9FUwRopL077tLoRgAAAAA='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 1100000000, 1, 6, NULL, NULL); +INSERT INTO trustlines VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 300000000, 1, 6, 100000000, 0); +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 900000000, 1, 6, NULL, NULL); +INSERT INTO trustlines VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 100000000, 1, 6, 0, 100000000); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('09c0147a62c828321ee899d0cccd92c81525eea71250720260321b3a24995e8b', 4, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('7736f0b869de0f74a5ed7f8d6529949238eb0f0421f3fc2bbc438084f21c8055', 4, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('f74cd54800d537c06dff35cc4783be881c2d670c1151a56ca7f951758dc7415d', 4, 3, 'AAAAAgAAAAMAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'AAAAAgAAAAMAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b52f16ffb98c047e33b9c2ec30880330cde71f85b3443dae2c5cb86c7d4d8452', 5, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292', 6, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdDXe23U4e9C2SxpBLZRx1rJzSFLJ0xDD0uKGpmqbflDT+XXIq6UiDBzmFxt+GO+XqFoQPdrXT7p1oLZIHqTMP', 'ZmZWpureIILFeAVxJn2eRFPu5XgcqaWKoxnrD+g0Vf0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdfnFSzZeh17zt82oMdqe4+/xns/kHBdGXf9BIBRYfVZ3DQT3awwZn5LqgIG9JqlvMmR1TKaxcoJQDuqGcCScM', 'sfgoOExW5LAk9CdfJGWAq6v/GuO5umGwOJc1flfuvCAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'Ky6C26uwJLJ6DDFAynHYrJvHGDH59aO9aeyj2I+w7FwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDYYfyOrmPhfki6lrP+oCfunJmRu2mfxl40o5qWR7y1YmP8poG+6Xqg41jKCWNwVoP717CVEPe70I0teWvTejkJ', '4XuuVS2gEFrTLwuarf0PYj7zfrSGsQsETBkjg2DkVdcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDNmQhdQeyMcWFWP8dVRkDtFS4tHICyKdaPkR6+/L7+tMzKWoUjbDAXscRYI+j6Fd/VFUaDzdYsWCAsH30WujIL', 'z9iBbtWHxe2I3qDrAIGMrzjAdQ53QOBd48Jxdumu6O4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrJ9XgwMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 1, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQANQSzvpEBCAXvs1PgmH/UFbfAYt3OAggYPVTd0pjVcJaV3lDE/jOZMnLFZMkFEhg4dluVQxeDZAwTKUPandswg=', 'tcrc4F/ArV1v4Am4sN68DT39MupCuOuj6epownRuQQ8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'vUhtvdAtRggXZxxKWn6dboZcopy0HmLXqvcKL+5bNt4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 3, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAFvFIhaAAAAQPlg7GLhJg0x7jpAw1Ew6H2XF6yRImfJIwFfx09Nui5btOJAFewFANfOaAB8FQZl5p3A5g3k6DHDigfUNUD16gc=', 'gRGSw4ZD33PAFaWh13uALf8F1PUPxtEIFqp1wKYQn5oAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAFvFIhaAAAAQMJmv+lhF5QZlgdIqBXDSdhEtgraTrRSwVr5d/BrNC28efHMoxYNa+2u9tSEdxU+hGX6JRW7wAF3bOpA8rxxxAE=', 'MuS6HyGLaqJCC0l0VqGwkJDjg35ms0lQMNTt1g0PBXAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('09c0147a62c828321ee899d0cccd92c81525eea71250720260321b3a24995e8b', 4, 1, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAL68IAAAAAAAAAAAEQithJAAAAQCaHhpiVN9E437IXFcHpfVrox1SO/NJtCmB2hgagMQHDRDGQMHN3qjScTOqqeEsNEuvK+n7I4b+9Fr0R3twmcgs=', 'CcAUemLIKDIe6JnQzM2SyBUl7qcSUHICYDIbOiSZXosAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('7736f0b869de0f74a5ed7f8d6529949238eb0f0421f3fc2bbc438084f21c8055', 4, 2, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQDjBSAulKc9tRqGg+OkVbKPz4olRQYUevyCfv0LAlqbXG6yPbpR0BR6o7mrimRm8O4VoRBGIATQB42NOWcFzdQw=', 'dzbwuGneD3Sl7X+NZSmUkjjrDwQh8/wrvEOAhPIcgFUAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('f74cd54800d537c06dff35cc4783be881c2d670c1151a56ca7f951758dc7415d', 4, 3, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAL68IAAAAAAAAAAAH5kC3vAAAAQOIKSlDQm9Urq2ujnvxZjGq6zJQncPTp8vl4sCC4Ra4MUnaHYDakRXTFoQlIFAr5t0oJwdBSs6TJ8M5VeGgBbQg=', '90zVSADVN8Bt/zXMR4O+iBwtZwwRUaVsp/lRdY3HQV0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQG4l7kCAq5aqvS2d/HTtYc7LAa7pSUiiO4KyKJbqmsDgvckGC2dbhcro9tcvCZHfwqTV+ikv8Hm8Zfa63kYPkQY=', 'yT+AZn8333CinsDelv8zgWRKyCikzqHLbOsbzsb/8FgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b52f16ffb98c047e33b9c2ec30880330cde71f85b3443dae2c5cb86c7d4d8452', 5, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAvrwgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAFvFIhaAAAAQFlXwaom7ylSTdyaO7qNM74Y+JUkA2o0uc7W2FzBlkVe2scznMMa+R8hVTblO5lQ6+FcTM5jIrWQqxqFFZbOkAw=', 'tS8W/7mMBH4zucLsMIgDMM3nH4WzRD2uLFy4bH1NhFIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292', 6, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAAAAAAAAAAAAAAAAAa7kvkwAAABAD49aRUuzXXeNHu1FfIYBbplBoP+b1B4uMGt2UGZt6jPKvwVORmMzfXDZaHBvIirsI8eNf+9F1EI0Fh9M/2jmCg==', 'HSpL5yRwZY9o21Du8p6grz+YXOGLXCGPA0YdQMR9wpIAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAIAAAAAAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAX14QAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAF9eEAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAEGQqwB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAL68IAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAABAAAABgAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAABfXhAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAR4aMAf/////////8AAAABAAAAAQAAAAAF9eEAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAA='); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (2, 1, 'AAAAAQAAAAs=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (2, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/pathed_payment-horizon.sql b/services/horizon/internal/test/scenarios/pathed_payment-horizon.sql new file mode 100644 index 0000000000..f1466021ee --- /dev/null +++ b/services/horizon/internal/test/scenarios/pathed_payment-horizon.sql @@ -0,0 +1,1092 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer, + tx_set_operation_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount > 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount > 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged bigint, + inner_transaction_hash character varying(64), + fee_account character varying(64), + inner_signatures character varying(96)[], + new_max_fee bigint +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO asset_stats VALUES (1, '1200000000', 2, 0, ''); +INSERT INTO asset_stats VALUES (2, '1200000000', 2, 0, ''); + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-06-03 18:28:47.032496+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-06-03 18:28:47.039657+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-06-03 18:28:47.044048+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-06-03 18:28:47.054532+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-06-03 18:28:47.063028+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-06-03 18:28:47.068415+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-06-03 18:28:47.081625+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-06-03 18:28:47.087463+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-06-03 18:28:47.090109+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-06-03 18:28:47.092718+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-06-03 18:28:47.095973+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-06-03 18:28:47.099698+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-06-03 18:28:47.107549+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-06-03 18:28:47.112768+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-06-03 18:28:47.115116+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-06-03 18:28:47.116796+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-06-03 18:28:47.117989+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-06-03 18:28:47.120034+02'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-31 14:19:49.123835+01'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_accounts VALUES (2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (3, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (4, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_accounts VALUES (5, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_accounts VALUES (6, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 6, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'USD', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_assets VALUES (2, 'credit_alphanum4', 'EUR', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 2, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (3, 25769807873, 1, 2, '{"amount": "10.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 2, 3, '{"amount": "10.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 3, 33, '{"seller": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "offer_id": 1, "sold_amount": "10.0000000", "bought_amount": "10.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 25769807873, 4, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 1, "sold_amount": "10.0000000", "bought_amount": "10.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179873281, 1, 2, '{"amount": "20.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (4, 17179873281, 2, 3, '{"amount": "20.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 17179877377, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (5, 17179877377, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179881473, 1, 2, '{"amount": "20.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (5, 17179881473, 2, 3, '{"amount": "20.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 17179885569, 1, 2, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (4, 17179885569, 2, 3, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (3, 12884905985, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 12884910081, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 12884914177, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 12884918273, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (5, 8589938689, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589938689, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (5, 8589938689, 3, 10, '{"weight": 1, "public_key": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589942785, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 8589942785, 3, 10, '{"weight": 1, "public_key": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 8589946881, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589946881, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589946881, 3, 10, '{"weight": 1, "public_key": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"}'); +INSERT INTO history_effects VALUES (3, 8589950977, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589950977, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 8589950977, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); +INSERT INTO history_effects VALUES (2, 8589955073, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589955073, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589955073, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (6, '926df2175a47e7f4c5e2733796cbb4ac6b8b505e4da8569c0d628491b9d1715f', '56978ed5961edf38821cb20932bdf27fe50a2141212402d5bf5a207a72d5c965', 1, 1, '2019-06-03 16:36:10', '2019-06-03 16:36:09.634668', '2019-06-03 16:36:09.634668', 25769803776, 16, 1000000000000000000, 1500, 100, 100000000, 1000000, 11, 'AAAAC1aXjtWWHt84ghyyCTK98n/lCiFBISQC1b9aIHpy1clllim1q2Q13bhhV72YJg8ksrQ7hO82py+YqilDIA89yEgAAAAAXPVMegAAAAAAAAAAdopGqZ/Fgz8xg91fCwTsvxIms9ukW73CHeCtBuNmNWLIVQm/pwSyq4jfevnjPbIH2IcXBBPT+zTxdoiYvvlqfgAAAAYN4Lazp2QAAAAAAAAAAAXcAAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (5, '56978ed5961edf38821cb20932bdf27fe50a2141212402d5bf5a207a72d5c965', '43eba0f2e949ac89f0a29f3ab7204a5a2f23d10aa9a5e0e8ba3dbde36a689649', 1, 1, '2019-06-03 16:36:09', '2019-06-03 16:36:09.667603', '2019-06-03 16:36:09.667603', 21474836480, 16, 1000000000000000000, 1400, 100, 100000000, 1000000, 11, 'AAAAC0ProPLpSayJ8KKfOrcgSlovI9EKqaXg6Lo9veNqaJZJTSAZM5kJaIj3FmqmCEciWETR+Q/Os97fIV4AOaVRJvEAAAAAXPVMeQAAAAAAAAAAMipVgDbLOBwRolHBsTSsmxFNzeTX3AdqZSXt3LGMFmjO2Pin7tfgFCAByPicgjJ2NSMxRddW69AjLtw6caaRZgAAAAUN4Lazp2QAAAAAAAAAAAV4AAAAAAAAAAAAAAABAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 1, 0, NULL); +INSERT INTO history_ledgers VALUES (4, '43eba0f2e949ac89f0a29f3ab7204a5a2f23d10aa9a5e0e8ba3dbde36a689649', 'e997c76a3479b89c2fce8a3e7134f33fee54d7d51366bb9f1c934c82351db20a', 4, 4, '2019-06-03 16:36:08', '2019-06-03 16:36:09.684354', '2019-06-03 16:36:09.684355', 17179869184, 16, 1000000000000000000, 1300, 100, 100000000, 1000000, 11, 'AAAAC+mXx2o0ebicL86KPnE08z/uVNfVE2a7nxyTTII1HbIKRxEyt9Sdk5w+y9RcNs5GuIx3q5SCV2cLUp7otU0J+TYAAAAAXPVMeAAAAAAAAAAAZelIPS3sQhACuKIks346158R3J91krq+J8/emGkxvSrCRXSow47Ezrlh3OofXwBHqV1ZrLuo5uGWoIlfLfLIggAAAAQN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0, NULL); +INSERT INTO history_ledgers VALUES (3, 'e997c76a3479b89c2fce8a3e7134f33fee54d7d51366bb9f1c934c82351db20a', '3f95ccfc1db00838f3650c12c71c07651cf86e9e3db3c5c8f82085841e4a1132', 4, 4, '2019-06-03 16:36:07', '2019-06-03 16:36:09.699554', '2019-06-03 16:36:09.699554', 12884901888, 16, 1000000000000000000, 900, 100, 100000000, 1000000, 11, 'AAAACz+VzPwdsAg482UMEsccB2Uc+G6ePbPFyPgghYQeShEyZ3Ek5ZIK1fLESXcLlE/00zAs06C9ekp1Jb/2erNycuwAAAAAXPVMdwAAAAAAAAAASmWYY0EUkBWM4g/+kngUijwIV7OrC5Zk/MHYGJjEEs3A0xjWpcXrlWCWbCTTf+JrKD4rkiunkbDO2xHi8A/5AAAAAAMN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0, NULL); +INSERT INTO history_ledgers VALUES (2, '3f95ccfc1db00838f3650c12c71c07651cf86e9e3db3c5c8f82085841e4a1132', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 5, 5, '2019-06-03 16:36:06', '2019-06-03 16:36:09.716332', '2019-06-03 16:36:09.716333', 8589934592, 16, 1000000000000000000, 500, 100, 100000000, 1000000, 11, 'AAAAC2PZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZhnfbEDpHPYd45nQcT7XkfIMwSX9gFD9+fWat716QIWcAAAAAXPVMdgAAAAIAAAAIAAAAAQAAAAsAAAAIAAAAAwAPQkAAAAAAQ9m8LSG8b5W0buxEtqstVQz7IV/HrDmGEtdA4mCEDXy8hTt2YgRnTITIuz4qNZE5qMNTz0pE041VcUVGwmWaUwAAAAIN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 5, 0, NULL); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-06-03 16:36:09.738355', '2019-06-03 16:36:09.738355', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 25769807873, 2); +INSERT INTO history_operation_participants VALUES (2, 25769807873, 3); +INSERT INTO history_operation_participants VALUES (3, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (4, 17179873281, 4); +INSERT INTO history_operation_participants VALUES (5, 17179873281, 1); +INSERT INTO history_operation_participants VALUES (6, 17179877377, 5); +INSERT INTO history_operation_participants VALUES (7, 17179877377, 2); +INSERT INTO history_operation_participants VALUES (8, 17179881473, 5); +INSERT INTO history_operation_participants VALUES (9, 17179881473, 1); +INSERT INTO history_operation_participants VALUES (10, 17179885569, 4); +INSERT INTO history_operation_participants VALUES (11, 17179885569, 3); +INSERT INTO history_operation_participants VALUES (12, 12884905985, 3); +INSERT INTO history_operation_participants VALUES (13, 12884910081, 2); +INSERT INTO history_operation_participants VALUES (14, 12884914177, 1); +INSERT INTO history_operation_participants VALUES (15, 12884918273, 1); +INSERT INTO history_operation_participants VALUES (16, 8589938689, 6); +INSERT INTO history_operation_participants VALUES (17, 8589938689, 5); +INSERT INTO history_operation_participants VALUES (18, 8589942785, 6); +INSERT INTO history_operation_participants VALUES (19, 8589942785, 4); +INSERT INTO history_operation_participants VALUES (20, 8589946881, 6); +INSERT INTO history_operation_participants VALUES (21, 8589946881, 1); +INSERT INTO history_operation_participants VALUES (22, 8589950977, 6); +INSERT INTO history_operation_participants VALUES (23, 8589950977, 3); +INSERT INTO history_operation_participants VALUES (24, 8589955073, 6); +INSERT INTO history_operation_participants VALUES (25, 8589955073, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 25, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 2, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "path": [], "amount": "10.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "source_max": "10.0000000", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "source_amount": "10.0000000", "source_asset_code": "USD", "source_asset_type": "credit_alphanum4", "source_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 3, '{"price": "1.0000000", "amount": "20.0000000", "price_r": {"d": 1, "n": 1}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "amount": "20.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_operations VALUES (17179877377, 17179877376, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179881473, 17179881472, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "20.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179885569, 17179885568, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (12884910081, 12884910080, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884914177, 12884914176, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (12884918273, 12884918272, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589950977, 8589950976, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589955073, 8589955072, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_trades VALUES (25769807873, 0, '2019-06-03 16:36:10', 1, 2, 1, 100000000, 1, 2, 100000000, false, 1, 1, 4611686044197195777, 1); + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 25769807872, 2); +INSERT INTO history_transaction_participants VALUES (2, 25769807872, 3); +INSERT INTO history_transaction_participants VALUES (3, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (4, 17179873280, 4); +INSERT INTO history_transaction_participants VALUES (5, 17179873280, 1); +INSERT INTO history_transaction_participants VALUES (6, 17179877376, 5); +INSERT INTO history_transaction_participants VALUES (7, 17179877376, 2); +INSERT INTO history_transaction_participants VALUES (8, 17179881472, 5); +INSERT INTO history_transaction_participants VALUES (9, 17179881472, 1); +INSERT INTO history_transaction_participants VALUES (10, 17179885568, 4); +INSERT INTO history_transaction_participants VALUES (11, 17179885568, 3); +INSERT INTO history_transaction_participants VALUES (12, 12884905984, 3); +INSERT INTO history_transaction_participants VALUES (13, 12884910080, 2); +INSERT INTO history_transaction_participants VALUES (14, 12884914176, 1); +INSERT INTO history_transaction_participants VALUES (15, 12884918272, 1); +INSERT INTO history_transaction_participants VALUES (16, 8589938688, 6); +INSERT INTO history_transaction_participants VALUES (17, 8589938688, 5); +INSERT INTO history_transaction_participants VALUES (18, 8589942784, 6); +INSERT INTO history_transaction_participants VALUES (19, 8589942784, 4); +INSERT INTO history_transaction_participants VALUES (20, 8589946880, 6); +INSERT INTO history_transaction_participants VALUES (21, 8589946880, 1); +INSERT INTO history_transaction_participants VALUES (22, 8589950976, 6); +INSERT INTO history_transaction_participants VALUES (23, 8589950976, 3); +INSERT INTO history_transaction_participants VALUES (24, 8589955072, 6); +INSERT INTO history_transaction_participants VALUES (25, 8589955072, 2); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 25, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('1d2a4be72470658f68db50eef29ea0af3f985ce18b5c218f03461d40c47dc292', 6, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-06-03 16:36:09.635075', '2019-06-03 16:36:09.635075', 25769807872, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAIAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAAAAAAAAAAAAAAAAAa7kvkwAAABAD49aRUuzXXeNHu1FfIYBbplBoP+b1B4uMGt2UGZt6jPKvwVORmMzfXDZaHBvIirsI8eNf+9F1EI0Fh9M/2jmCg==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAX14QAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAEGQqwB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAABAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAL68IAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAABAAAABgAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAABfXhAAAAAAEAAAABAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAR4aMAf/////////8AAAABAAAAAQAAAAAF9eEAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAABfXhAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAF9eEAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{D49aRUuzXXeNHu1FfIYBbplBoP+b1B4uMGt2UGZt6jPKvwVORmMzfXDZaHBvIirsI8eNf+9F1EI0Fh9M/2jmCg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b52f16ffb98c047e33b9c2ec30880330cde71f85b3443dae2c5cb86c7d4d8452', 5, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934595, 100, 1, '2019-06-03 16:36:09.668167', '2019-06-03 16:36:09.668167', 21474840576, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAvrwgAAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAFvFIhaAAAAQFlXwaom7ylSTdyaO7qNM74Y+JUkA2o0uc7W2FzBlkVe2scznMMa+R8hVTblO5lQ6+FcTM5jIrWQqxqFFZbOkAw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAvrwgAAAAABAAAAAQAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rI1AAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rI1AAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABwAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAMAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAC+vCAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAC+vCAAAAAAEAAAABAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{WVfBqibvKVJN3Jo7uo0zvhj4lSQDajS5ztbYXMGWRV7axzOcwxr5HyFVNuU7mVDr4VxMzmMitZCrGoUVls6QDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('09c0147a62c828321ee899d0cccd92c81525eea71250720260321b3a24995e8b', 4, 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 8589934593, 100, 1, '2019-06-03 16:36:09.684613', '2019-06-03 16:36:09.684613', 17179873280, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAL68IAAAAAAAAAAAEQithJAAAAQCaHhpiVN9E437IXFcHpfVrox1SO/NJtCmB2hgagMQHDRDGQMHN3qjScTOqqeEsNEuvK+n7I4b+9Fr0R3twmcgs=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{JoeGmJU30TjfshcVwel9WujHVI780m0KYHaGBqAxAcNEMZAwc3eqNJxM6qp4Sw0S68r6fsjhv70WvRHe3CZyCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('7736f0b869de0f74a5ed7f8d6529949238eb0f0421f3fc2bbc438084f21c8055', 4, 2, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934593, 100, 1, '2019-06-03 16:36:09.684847', '2019-06-03 16:36:09.684847', 17179877376, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQDjBSAulKc9tRqGg+OkVbKPz4olRQYUevyCfv0LAlqbXG6yPbpR0BR6o7mrimRm8O4VoRBGIATQB42NOWcFzdQw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{OMFIC6Upz21GoaD46RVso/PiiVFBhR6/IJ+/QsCWptcbrI9ulHQFHqjuauKZGbw7hWhEEYgBNAHjY05ZwXN1DA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('f74cd54800d537c06dff35cc4783be881c2d670c1151a56ca7f951758dc7415d', 4, 3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934594, 100, 1, '2019-06-03 16:36:09.685064', '2019-06-03 16:36:09.685065', 17179881472, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAL68IAAAAAAAAAAAH5kC3vAAAAQOIKSlDQm9Urq2ujnvxZjGq6zJQncPTp8vl4sCC4Ra4MUnaHYDakRXTFoQlIFAr5t0oJwdBSs6TJ8M5VeGgBbQg=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAvrwgB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{4gpKUNCb1Sura6Oe/FmMarrMlCdw9Ony+XiwILhFrgxSdodgNqRFdMWhCUgUCvm3SgnB0FKzpMnwzlV4aAFtCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 8589934594, 100, 1, '2019-06-03 16:36:09.685267', '2019-06-03 16:36:09.685267', 17179885568, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQG4l7kCAq5aqvS2d/HTtYc7LAa7pSUiiO4KyKJbqmsDgvckGC2dbhcro9tcvCZHfwqTV+ikv8Hm8Zfa63kYPkQY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{biXuQICrlqq9LZ38dO1hzssBrulJSKI7grIoluqawOC9yQYLZ1uFyuj21y8Jkd/CpNX6KS/webxl9rreRg+RBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 1, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934593, 100, 1, '2019-06-03 16:36:09.69977', '2019-06-03 16:36:09.69977', 12884905984, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQANQSzvpEBCAXvs1PgmH/UFbfAYt3OAggYPVTd0pjVcJaV3lDE/jOZMnLFZMkFEhg4dluVQxeDZAwTKUPandswg=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{A1BLO+kQEIBe+zU+CYf9QVt8Bi3c4CCBg9VN3SmNVwlpXeUMT+M5kycsVkyQUSGDh2W5VDF4NkDBMpQ9qd2zCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-06-03 16:36:09.700046', '2019-06-03 16:36:09.700047', 12884910080, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJnAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rJnAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{H2SYpbare/tB/Lw8x6QRvVNMjmLGqQjQGiBes63uA7XrZBuSHZ1JNR94Oi9zQ8Bp+ENfG2FUCWwu6OUGbKMEBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 3, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934593, 100, 1, '2019-06-03 16:36:09.70029', '2019-06-03 16:36:09.70029', 12884914176, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAFvFIhaAAAAQPlg7GLhJg0x7jpAw1Ew6H2XF6yRImfJIwFfx09Nui5btOJAFewFANfOaAB8FQZl5p3A5g3k6DHDigfUNUD16gc=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{+WDsYuEmDTHuOkDDUTDofZcXrJEiZ8kjAV/HT026Llu04kAV7AUA185oAHwVBmXmncDmDeToMcOKB9Q1QPXqBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934594, 100, 1, '2019-06-03 16:36:09.700565', '2019-06-03 16:36:09.700565', 12884918272, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAFvFIhaAAAAQMJmv+lhF5QZlgdIqBXDSdhEtgraTrRSwVr5d/BrNC28efHMoxYNa+2u9tSEdxU+hGX6JRW7wAF3bOpA8rxxxAE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rJOAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{wma/6WEXlBmWB0ioFcNJ2ES2CtpOtFLBWvl38Gs0Lbx58cyjFg1r7a721IR3FT6EZfolFbvAAXds6kDyvHHEAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-06-03 16:36:09.71652', '2019-06-03 16:36:09.71652', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdDXe23U4e9C2SxpBLZRx1rJzSFLJ0xDD0uKGpmqbflDT+XXIq6UiDBzmFxt+GO+XqFoQPdrXT7p1oLZIHqTMP', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{XQ13tt1OHvQtksaQS2Ucdayc0hSydMQw9LihqZqm35Q0/l1yKulIgwc5hcbfhjvl6haED3a10+6daC2SB6kzDw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-06-03 16:36:09.716742', '2019-06-03 16:36:09.716742', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdfnFSzZeh17zt82oMdqe4+/xns/kHBdGXf9BIBRYfVZ3DQT3awwZn5LqgIG9JqlvMmR1TKaxcoJQDuqGcCScM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{XX5xUs2Xode87fNqDHanuPv8Z7P5BwXRl3/QSAUWH1Wdw0E92sMGZ+S6oCBvSapbzJkdUymsXKCUA7qhnAknDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-06-03 16:36:09.716933', '2019-06-03 16:36:09.716933', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ybpdbSxixeFccLdHQwlRH+rW+ZeG8jKxoAqfpVWTJsXLeeIjX9psbY8ejHeWSeBOA5XLH+OhAIQRklD7J/H2Aw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 4, 100, 1, '2019-06-03 16:36:09.71711', '2019-06-03 16:36:09.71711', 8589950976, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDYYfyOrmPhfki6lrP+oCfunJmRu2mfxl40o5qWR7y1YmP8poG+6Xqg41jKCWNwVoP717CVEPe70I0teWvTejkJ', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{2GH8jq5j4X5Iupaz/qAn7pyZkbtpn8ZeNKOalke8tWJj/KaBvul6oONYygljcFaD+9ewlRD3u9CNLXlr03o5CQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 5, 100, 1, '2019-06-03 16:36:09.717978', '2019-06-03 16:36:09.717979', 8589955072, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDNmQhdQeyMcWFWP8dVRkDtFS4tHICyKdaPkR6+/L7+tMzKWoUjbDAXscRYI+j6Fd/VFUaDzdYsWCAsH30WujIL', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrJ9XgwMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{zZkIXUHsjHFhVj/HVUZA7RUuLRyAsinWj5Eevvy+/rTMylqFI2wwF7HEWCPo+hXf1RVGg83WLFggLB99FroyCw==}', 'none', NULL, NULL, true, 100); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_fee_account; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_fee_account ON history_transactions USING btree (fee_account) WHERE fee_account IS NOT NULL; + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_inner_hash; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX by_inner_hash ON history_transactions USING btree (inner_transaction_hash) WHERE inner_transaction_hash IS NOT NULL; + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + + +-- added manually +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +DROP TABLE IF EXISTS public.key_value_store; +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); +INSERT INTO key_value_store VALUES ('exp_ingest_last_ledger', '0'); +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + +CREATE TABLE accounts_signers ( + account character varying(64), + signer character varying(64), + weight integer NOT NULL, + -- we will query by signer so that is why signer is the first item in the composite key + PRIMARY KEY (signer, account) +); +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/paths_strict_send-core.sql b/services/horizon/internal/test/scenarios/paths_strict_send-core.sql new file mode 100644 index 0000000000..811ad9d34d --- /dev/null +++ b/services/horizon/internal/test/scenarios/paths_strict_send-core.sql @@ -0,0 +1,775 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +SET search_path = public, pg_catalog; + +DROP INDEX IF EXISTS public.upgradehistbyseq; +DROP INDEX IF EXISTS public.scpquorumsbyseq; +DROP INDEX IF EXISTS public.scpenvsbyseq; +DROP INDEX IF EXISTS public.ledgersbyseq; +DROP INDEX IF EXISTS public.histfeebyseq; +DROP INDEX IF EXISTS public.histbyseq; +DROP INDEX IF EXISTS public.bestofferindex; +DROP INDEX IF EXISTS public.accountbalances; +ALTER TABLE IF EXISTS ONLY public.upgradehistory DROP CONSTRAINT IF EXISTS upgradehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txhistory DROP CONSTRAINT IF EXISTS txhistory_pkey; +ALTER TABLE IF EXISTS ONLY public.txfeehistory DROP CONSTRAINT IF EXISTS txfeehistory_pkey; +ALTER TABLE IF EXISTS ONLY public.trustlines DROP CONSTRAINT IF EXISTS trustlines_pkey; +ALTER TABLE IF EXISTS ONLY public.storestate DROP CONSTRAINT IF EXISTS storestate_pkey; +ALTER TABLE IF EXISTS ONLY public.scpquorums DROP CONSTRAINT IF EXISTS scpquorums_pkey; +ALTER TABLE IF EXISTS ONLY public.quoruminfo DROP CONSTRAINT IF EXISTS quoruminfo_pkey; +ALTER TABLE IF EXISTS ONLY public.pubsub DROP CONSTRAINT IF EXISTS pubsub_pkey; +ALTER TABLE IF EXISTS ONLY public.publishqueue DROP CONSTRAINT IF EXISTS publishqueue_pkey; +ALTER TABLE IF EXISTS ONLY public.peers DROP CONSTRAINT IF EXISTS peers_pkey; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_pkey; +ALTER TABLE IF EXISTS ONLY public.ledgerheaders DROP CONSTRAINT IF EXISTS ledgerheaders_ledgerseq_key; +ALTER TABLE IF EXISTS ONLY public.ban DROP CONSTRAINT IF EXISTS ban_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts DROP CONSTRAINT IF EXISTS accounts_pkey; +ALTER TABLE IF EXISTS ONLY public.accountdata DROP CONSTRAINT IF EXISTS accountdata_pkey; +DROP TABLE IF EXISTS public.upgradehistory; +DROP TABLE IF EXISTS public.txhistory; +DROP TABLE IF EXISTS public.txfeehistory; +DROP TABLE IF EXISTS public.trustlines; +DROP TABLE IF EXISTS public.storestate; +DROP TABLE IF EXISTS public.scpquorums; +DROP TABLE IF EXISTS public.scphistory; +DROP TABLE IF EXISTS public.quoruminfo; +DROP TABLE IF EXISTS public.pubsub; +DROP TABLE IF EXISTS public.publishqueue; +DROP TABLE IF EXISTS public.peers; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.ledgerheaders; +DROP TABLE IF EXISTS public.ban; +DROP TABLE IF EXISTS public.accounts; +DROP TABLE IF EXISTS public.accountdata; +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accountdata; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accountdata ( + accountid character varying(56) NOT NULL, + dataname character varying(88) NOT NULL, + datavalue character varying(112) NOT NULL, + lastmodified integer NOT NULL +); + + +-- +-- Name: accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts ( + accountid character varying(56) NOT NULL, + balance bigint NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + seqnum bigint NOT NULL, + numsubentries integer NOT NULL, + inflationdest character varying(56), + homedomain character varying(44) NOT NULL, + thresholds text NOT NULL, + flags integer NOT NULL, + signers text, + lastmodified integer NOT NULL, + CONSTRAINT accounts_balance_check CHECK ((balance >= 0)), + CONSTRAINT accounts_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT accounts_numsubentries_check CHECK ((numsubentries >= 0)), + CONSTRAINT accounts_sellingliabilities_check CHECK ((sellingliabilities >= 0)) +); + + +-- +-- Name: ban; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ban ( + nodeid character(56) NOT NULL +); + + +-- +-- Name: ledgerheaders; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE ledgerheaders ( + ledgerhash character(64) NOT NULL, + prevhash character(64) NOT NULL, + bucketlisthash character(64) NOT NULL, + ledgerseq integer, + closetime bigint NOT NULL, + data text NOT NULL, + CONSTRAINT ledgerheaders_closetime_check CHECK ((closetime >= 0)), + CONSTRAINT ledgerheaders_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT offers_amount_check CHECK ((amount >= 0)), + CONSTRAINT offers_offerid_check CHECK ((offerid >= 0)) +); + + +-- +-- Name: peers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE peers ( + ip character varying(15) NOT NULL, + port integer DEFAULT 0 NOT NULL, + nextattempt timestamp without time zone NOT NULL, + numfailures integer DEFAULT 0 NOT NULL, + type integer NOT NULL, + CONSTRAINT peers_numfailures_check CHECK ((numfailures >= 0)), + CONSTRAINT peers_port_check CHECK (((port > 0) AND (port <= 65535))) +); + + +-- +-- Name: publishqueue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE publishqueue ( + ledger integer NOT NULL, + state text +); + + +-- +-- Name: pubsub; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE pubsub ( + resid character(32) NOT NULL, + lastread integer +); + + +-- +-- Name: quoruminfo; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE quoruminfo ( + nodeid character(56) NOT NULL, + qsethash character(64) NOT NULL +); + + +-- +-- Name: scphistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scphistory ( + nodeid character(56) NOT NULL, + ledgerseq integer NOT NULL, + envelope text NOT NULL, + CONSTRAINT scphistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: scpquorums; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE scpquorums ( + qsethash character(64) NOT NULL, + lastledgerseq integer NOT NULL, + qset text NOT NULL, + CONSTRAINT scpquorums_lastledgerseq_check CHECK ((lastledgerseq >= 0)) +); + + +-- +-- Name: storestate; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE storestate ( + statename character(32) NOT NULL, + state text +); + + +-- +-- Name: trustlines; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE trustlines ( + accountid character varying(56) NOT NULL, + assettype integer NOT NULL, + issuer character varying(56) NOT NULL, + assetcode character varying(12) NOT NULL, + tlimit bigint NOT NULL, + balance bigint NOT NULL, + buyingliabilities bigint, + sellingliabilities bigint, + flags integer NOT NULL, + lastmodified integer NOT NULL, + CONSTRAINT trustlines_balance_check CHECK ((balance >= 0)), + CONSTRAINT trustlines_buyingliabilities_check CHECK ((buyingliabilities >= 0)), + CONSTRAINT trustlines_sellingliabilities_check CHECK ((sellingliabilities >= 0)), + CONSTRAINT trustlines_tlimit_check CHECK ((tlimit > 0)) +); + + +-- +-- Name: txfeehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txfeehistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txchanges text NOT NULL, + CONSTRAINT txfeehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: txhistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE txhistory ( + txid character(64) NOT NULL, + ledgerseq integer NOT NULL, + txindex integer NOT NULL, + txbody text NOT NULL, + txresult text NOT NULL, + txmeta text NOT NULL, + CONSTRAINT txhistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Name: upgradehistory; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE upgradehistory ( + ledgerseq integer NOT NULL, + upgradeindex integer NOT NULL, + upgrade text NOT NULL, + changes text NOT NULL, + CONSTRAINT upgradehistory_ledgerseq_check CHECK ((ledgerseq >= 0)) +); + + +-- +-- Data for Name: accountdata; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO accounts VALUES ('GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 999999994999999500, NULL, NULL, 5, 0, NULL, '', 'AQAAAA==', 0, NULL, 2); +INSERT INTO accounts VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 999999900, NULL, NULL, 8589934593, 1, NULL, '', 'AQAAAA==', 0, NULL, 3); +INSERT INTO accounts VALUES ('GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 999999800, NULL, NULL, 8589934594, 0, NULL, '', 'AQAAAA==', 0, NULL, 4); +INSERT INTO accounts VALUES ('GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 999999800, NULL, NULL, 8589934594, 0, NULL, '', 'AQAAAA==', 0, NULL, 4); +INSERT INTO accounts VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 999999600, NULL, NULL, 8589934596, 1, NULL, '', 'AQAAAA==', 0, NULL, 6); +INSERT INTO accounts VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 999999500, 68000000, 88000000, 8589934597, 5, NULL, '', 'AQAAAA==', 0, NULL, 6); + + +-- +-- Data for Name: ban; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: ledgerheaders; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO ledgerheaders VALUES ('63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '0000000000000000000000000000000000000000000000000000000000000000', '572a2e32ff248a07b0e70fd1f6d318c1facd20b6cc08c33d5775259868125a16', 1, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('04e3ebeaf9d1b8e0f5458d530169fe0d89f1bfdd7bd4dc8d4b0584b236d17e15', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', '29da0928d9a60e8f21bd58d97a0801e8339d4ac4914af60d283ae2f792cf225e', 2, 1570470182, 'AAAAAGPZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZhnfbEDpHPYd45nQcT7XkfIMwSX9gFD9+fWat716QIWcAAAAAXZt5JgAAAAAAAAAAQ9m8LSG8b5W0buxEtqstVQz7IV/HrDmGEtdA4mCEDXwp2gko2aYOjyG9WNl6CAHoM51KxJFK9g0oOuL3ks8iXgAAAAIN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('9725bcddbfde2a2797d753c7395ed68800e4df27e923ba942925563e37517a8c', '04e3ebeaf9d1b8e0f5458d530169fe0d89f1bfdd7bd4dc8d4b0584b236d17e15', 'effe8baf2abbf8f344532e730314745632ec3e2b6fc58933010e0ce7f958c7fa', 3, 1570470183, 'AAAADATj6+r50bjg9UWNUwFp/g2J8b/de9TcjUsFhLI20X4VjuzlLlAPf+1swU2fx1atuTf1fyOeFxD9jxoTthHXBpoAAAAAXZt5JwAAAAIAAAAIAAAAAQAAAAwAAAAIAAAAAwAPQkAAAAAAy7QXeOkQQgUGgHQkca+qdNuHDZuWECktEZYOGCwuFi/v/ouvKrv480RTLnMDFHRWMuw+K2/FiTMBDgzn+VjH+gAAAAMN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('c0a1328ca6fdbbf5a34bd31a277f579714fb18d95b304ea0cd8bdb9dc7e88fd4', '9725bcddbfde2a2797d753c7395ed68800e4df27e923ba942925563e37517a8c', '1a82565f62d983982fbaa000e5cbce3cdfed959ad7c4ddad124bd1c72788a256', 4, 1570470184, 'AAAADJclvN2/3ionl9dTxzle1ogA5N8n6SO6lCklVj43UXqMBOKee+9kgKkF22YkX7RWJFTDQo4H0Ar1ZUwhsBaJgfgAAAAAXZt5KAAAAAAAAAAAMjGOUmwgY7qRb0Vt3UuR5Zqme5k2qjH87qHhARew0qwaglZfYtmDmC+6oADly8483+2VmtfE3a0SS9HHJ4iiVgAAAAQN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('dee967b48bc05fed720d42d007138d89cd0e0637ddf44907e2491436d0d7c686', 'c0a1328ca6fdbbf5a34bd31a277f579714fb18d95b304ea0cd8bdb9dc7e88fd4', '0b29b2930dcd9a8b05829e00800682c6ee0727b3ab5c7bb13c76412f6f388964', 5, 1570470185, 'AAAADMChMoym/bv1o0vTGid/V5cU+xjZWzBOoM2L253H6I/UTbRk/xXOhaS2qGx37FHhNzwmejvdmduwoSd4w58iA78AAAAAXZt5KQAAAAAAAAAAliNGcBHBZ7FQDpA0n+37lxddf7Ir5kZ1Q7qncRnxzWkLKbKTDc2aiwWCngCABoLG7gcns6tce7E8dkEvbziJZAAAAAUN4Lazp2QAAAAAAAAAAAZAAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO ledgerheaders VALUES ('d846337fd9e276439446d2a542fb97bc6aa16500ead5cd6e9128777cdede107c', 'dee967b48bc05fed720d42d007138d89cd0e0637ddf44907e2491436d0d7c686', 'a1adbc333fbf7d62f7ca99dfce9b5e1f736f4f78a9d460087d2fd5014b8b4729', 6, 1570470186, 'AAAADN7pZ7SLwF/tcg1C0AcTjYnNDgY33fRJB+JJFDbQ18aGD5JAw28eDCmD21TuxPrnW6GZ9oRbNGkPhI/dam9g1yMAAAAAXZt5KgAAAAAAAAAAeAVLzxvwzAkwUB36uyuX61OBzxZJsiQl/YRoHG7lYK2hrbwzP799YvfKmd/Om14fc29PeKnUYAh9L9UBS4tHKQAAAAYN4Lazp2QAAAAAAAAAAAdsAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO offers VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 2, 'AAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2Ek=', 'AAAAAA==', 81600000, 5, 6, 0.83333333333333337, 0, 6); +INSERT INTO offers VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 3, 'AAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2Ek=', 'AAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8=', 130000000, 10, 13, 0.769230769230769273, 0, 6); +INSERT INTO offers VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'AAAAAA==', 'AAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8=', 88000000, 10, 11, 0.909090909090909061, 0, 6); + + +-- +-- Data for Name: peers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: publishqueue; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: pubsub; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: quoruminfo; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: scphistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scphistory VALUES ('GBYLEW6R4J4N5F3NFJM2YH6AXE7EATAYKALAJ3WSB2V5MVXGX3HAB3BL', 2, 'AAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAIAAAACAAAAAQAAADCGd9sQOkc9h3jmdBxPteR8gzBJf2AUP359Zq3vXpAhZwAAAABdm3kmAAAAAAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABA97WGwNTX3bVfOeE/TNrvVi/XixL54mbd1dRpABhXR/xaqfTjD/mYZSAyCCfPAUS/asFBcOpADMv4FGdoQhagDg=='); +INSERT INTO scphistory VALUES ('GBYLEW6R4J4N5F3NFJM2YH6AXE7EATAYKALAJ3WSB2V5MVXGX3HAB3BL', 3, 'AAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAMAAAACAAAAAQAAAEiO7OUuUA9/7WzBTZ/HVq25N/V/I54XEP2PGhO2EdcGmgAAAABdm3knAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABA80L5bi7wveFuSa63BKwF2QP5RGhLKDj9mFQjX6+w1VBROQxrAvLiehZxJZqXM3bgLoAsqmVmTGpYabn65Z3kBQ=='); +INSERT INTO scphistory VALUES ('GBYLEW6R4J4N5F3NFJM2YH6AXE7EATAYKALAJ3WSB2V5MVXGX3HAB3BL', 4, 'AAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAQAAAACAAAAAQAAADAE4p5772SAqQXbZiRftFYkVMNCjgfQCvVlTCGwFomB+AAAAABdm3koAAAAAAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABAD7GPxt79WHcfAAbsLNnGqTV1+TTw5ZrhYyyTqW/EdoUlGp2TQPusZM8Ycsxjg5wyC+XxeY9YH93nLsz7y7EoBw=='); +INSERT INTO scphistory VALUES ('GBYLEW6R4J4N5F3NFJM2YH6AXE7EATAYKALAJ3WSB2V5MVXGX3HAB3BL', 5, 'AAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAUAAAACAAAAAQAAADBNtGT/Fc6FpLaobHfsUeE3PCZ6O92Z27ChJ3jDnyIDvwAAAABdm3kpAAAAAAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABAea45z7ZLRADT+WH4Vsp7rGiJa3DjlB0Jvy6iaefVhK/7iVFTHssC9JGxkqkho7wGZNq0D0yX2wW4LMCJUcpWDw=='); +INSERT INTO scphistory VALUES ('GBYLEW6R4J4N5F3NFJM2YH6AXE7EATAYKALAJ3WSB2V5MVXGX3HAB3BL', 6, 'AAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAYAAAACAAAAAQAAADAPkkDDbx4MKYPbVO7E+udboZn2hFs0aQ+Ej91qb2DXIwAAAABdm3kqAAAAAAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABAO1va2JZrLZFpRiZUzvxmtvD2/99q0ujZMina8hd6ksF9FQhj51OjttjorG1SLWt3k/EDfN/OVLZvbX8EHK/3BQ=='); + + +-- +-- Data for Name: scpquorums; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO scpquorums VALUES ('000920d6876ee57383d0517c9189661447f22d5ef990798a67f07f014e0158f0', 6, 'AAAAAQAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAAAA'); + + +-- +-- Data for Name: storestate; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO storestate VALUES ('databaseschema ', '10'); +INSERT INTO storestate VALUES ('networkpassphrase ', 'Test SDF Network ; September 2015'); +INSERT INTO storestate VALUES ('forcescponnextlaunch ', 'false'); +INSERT INTO storestate VALUES ('lastscpdata3 ', 'AAAAAgAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAADAAAAAwAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAAQAAAEiO7OUuUA9/7WzBTZ/HVq25N/V/I54XEP2PGhO2EdcGmgAAAABdm3knAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAABAAAASI7s5S5QD3/tbMFNn8dWrbk39X8jnhcQ/Y8aE7YR1waaAAAAAF2beScAAAACAAAACAAAAAEAAAAMAAAACAAAAAMAD0JAAAAAAAAAAECyyTANbte/XFfTAakmk1mARolzXxtxvNYHwscdvSgQbkSOrZTduXLuwMwIJZLQMV1bDPDptnRrIGhTJHNBphAKAAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAMAAAACAAAAAQAAAEiO7OUuUA9/7WzBTZ/HVq25N/V/I54XEP2PGhO2EdcGmgAAAABdm3knAAAAAgAAAAgAAAABAAAADAAAAAgAAAADAA9CQAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABA80L5bi7wveFuSa63BKwF2QP5RGhLKDj9mFQjX6+w1VBROQxrAvLiehZxJZqXM3bgLoAsqmVmTGpYabn65Z3kBQAAAAEE4+vq+dG44PVFjVMBaf4NifG/3XvU3I1LBYSyNtF+FQAAAAQAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAABkAAAAAgAAAAEAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vf/////////8AAAAAAAAAAW8UiFoAAABA+WDsYuEmDTHuOkDDUTDofZcXrJEiZ8kjAV/HT026Llu04kAV7AUA185oAHwVBmXmncDmDeToMcOKB9Q1QPXqBwAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAGQAAAACAAAAAgAAAAAAAAAAAAAAAQAAAAAAAAAGAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2El//////////wAAAAAAAAABbxSIWgAAAEDCZr/pYReUGZYHSKgVw0nYRLYK2k60UsFa+XfwazQtvHnxzKMWDWvtrvbUhHcVPoRl+iUVu8ABd2zqQPK8ccQBAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAYAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAABkAAAAAgAAAAEAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJf/////////8AAAAAAAAAASdyItUAAABAA1BLO+kQEIBe+zU+CYf9QVt8Bi3c4CCBg9VN3SmNVwlpXeUMT+M5kycsVkyQUSGDh2W5VDF4NkDBMpQ9qd2zCAAAAAEAAAABAAAAAQAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAA='); +INSERT INTO storestate VALUES ('lastclosedledger ', 'd846337fd9e276439446d2a542fb97bc6aa16500ead5cd6e9128777cdede107c'); +INSERT INTO storestate VALUES ('lastscpdata2 ', 'AAAAAgAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAACAAAAAwAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAAQAAADCGd9sQOkc9h3jmdBxPteR8gzBJf2AUP359Zq3vXpAhZwAAAABdm3kmAAAAAAAAAAAAAAABAAAAMIZ32xA6Rz2HeOZ0HE+15HyDMEl/YBQ/fn1mre9ekCFnAAAAAF2beSYAAAAAAAAAAAAAAEDBvrSFK0NlFcw5z1gu1JYSomcNGNf/tXff9SuRwwOeVyGx0lx6Ukl6UuUcK0S2GQw0Hqfi1Vz+nRx8w/A72CkHAAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAAAAAAAIAAAACAAAAAQAAADCGd9sQOkc9h3jmdBxPteR8gzBJf2AUP359Zq3vXpAhZwAAAABdm3kmAAAAAAAAAAAAAAABAAkg1odu5XOD0FF8kYlmFEfyLV75kHmKZ/B/AU4BWPAAAABA97WGwNTX3bVfOeE/TNrvVi/XixL54mbd1dRpABhXR/xaqfTjD/mYZSAyCCfPAUS/asFBcOpADMv4FGdoQhagDgAAAAFj2Y9TbuaNGye1uJ8jr1MRt1aaJPrxQDrQtStjOwe+mQAAAAUAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAQAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQNhh/I6uY+F+SLqWs/6gJ+6cmZG7aZ/GXjSjmpZHvLViY/ymgb7peqDjWMoJY3BWg/vXsJUQ97vQjS15a9N6OQkAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAEAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQF0Nd7bdTh70LZLGkEtlHHWsnNIUsnTEMPS4oamapt+UNP5dcirpSIMHOYXG34Y75eoWhA92tdPunWgtkgepMw8AAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAUAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQM2ZCF1B7IxxYVY/x1VGQO0VLi0cgLIp1o+RHr78vv60zMpahSNsMBexxFgj6PoV39UVRoPN1ixYICwffRa6MgsAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAMAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQMm6XW0sYsXhXHC3R0MJUR/q1vmXhvIysaAKn6VVkybFy3niI1/abG2PHox3lkngTgOVyx/joQCEEZJQ+yfx9gMAAAAAYvwdC9CRsrYcDdZWNGsqaNfTR8bywsjubQRHAlb8BfcAAABkAAAAAAAAAAIAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAFW/AX3AAAAQF1+cVLNl6HXvO3zagx2p7j7/Gez+QcF0Zd/0EgFFh9VncNBPdrDBmfkuqAgb0mqW8yZHVMprFyglAO6oZwJJwwAAAABAAAAAQAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAAAA'); +INSERT INTO storestate VALUES ('ledgerupgrades ', '{ + "time": 1570470182, + "version": { + "has": false + }, + "fee": { + "has": false + }, + "maxtxsize": { + "has": false + }, + "reserve": { + "has": false + } +}'); +INSERT INTO storestate VALUES ('historyarchivestate ', '{ + "version": 1, + "server": "v12.0.0rc2", + "currentLedger": 6, + "currentBuckets": [ + { + "curr": "4bcee15469f0d3a497f01a63216934f640a81d9a48a21afb3774782c75f003b5", + "next": { + "state": 0 + }, + "snap": "0b3d2bcf5efd7f7039611c73ac27aadf43b62bee6c9d34d33f27522670cecbfc" + }, + { + "curr": "347da7ad807372ae45d5a54574e759ebab00224880f1c6f757adb9e80215cc53", + "next": { + "state": 1, + "output": "0b3d2bcf5efd7f7039611c73ac27aadf43b62bee6c9d34d33f27522670cecbfc" + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "curr": "0000000000000000000000000000000000000000000000000000000000000000", + "next": { + "state": 0 + }, + "snap": "0000000000000000000000000000000000000000000000000000000000000000" + } + ] +}'); +INSERT INTO storestate VALUES ('lastscpdata4 ', 'AAAAAgAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAEAAAAAwAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAAQAAAJgE4p5772SAqQXbZiRftFYkVMNCjgfQCvVlTCGwFomB+AAAAABdm3koAAAAAAAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAABA+erO6RB/eKa0Gq4LNTvv3fprsEXSlRZHfA5nFPS8HxwpfejOBJupVHZJfFSUOAGpiGI8VFsPp9H0zU5NWnjbDwAAAAEAAACYBOKee+9kgKkF22YkX7RWJFTDQo4H0Ar1ZUwhsBaJgfgAAAAAXZt5KAAAAAAAAAABAAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAQPnqzukQf3imtBquCzU77936a7BF0pUWR3wOZxT0vB8cKX3ozgSbqVR2SXxUlDgBqYhiPFRbD6fR9M1OTVp42w8AAABAvb8ZqoOuCHd8VJ9nXe6uLP7K2Ge8R4N8zg39sGXqA5kGE+PfHBnLaTUI6QWzoaL4+XWWfqYu1unkaSEKtBKGDwAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAEAAAAAgAAAAEAAAAwBOKee+9kgKkF22YkX7RWJFTDQo4H0Ar1ZUwhsBaJgfgAAAAAXZt5KAAAAAAAAAAAAAAAAQAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAQA+xj8be/Vh3HwAG7CzZxqk1dfk08OWa4WMsk6lvxHaFJRqdk0D7rGTPGHLMY4OcMgvl8XmPWB/d5y7M+8uxKAcAAAABlyW83b/eKieX11PHOV7WiADk3yfpI7qUKSVWPjdReowAAAAEAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQMq2EpK0LXwZiSrwXsmACrBqgXR/+kPIpG1UMgZP4+aV4vT6OEwvAgRBoKaRPjJd5OX8gOBOJv0RKPeQObZm4QwAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAABkAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygAAAAAAAAAAAfmQLe8AAABAILGF4HgJmtiIV8wBbpySP+NMnMeKHsK1HxMPQZy9Vvr/oFaL1XacFC8O3/Lc2cG3KxsUQ4zUZXrrEW27xUUYBAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAGQAAAACAAAAAQAAAAAAAAAAAAAAAQAAAAAAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABEIrYSQAAAEDFv6ovjgqy/KZ3M7qyegY3JUW1wmMANASTWsCm1nM3wMEboaL9tplG9X2tgRi5Yh3/EaukYioETwS79R6yhyMLAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQG4l7kCAq5aqvS2d/HTtYc7LAa7pSUiiO4KyKJbqmsDgvckGC2dbhcro9tcvCZHfwqTV+ikv8Hm8Zfa63kYPkQYAAAABAAAAAQAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAAAA'); +INSERT INTO storestate VALUES ('lastscpdata5 ', 'AAAAAgAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAFAAAAAwAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAAQAAAJhNtGT/Fc6FpLaobHfsUeE3PCZ6O92Z27ChJ3jDnyIDvwAAAABdm3kpAAAAAAAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAABAwvXSEtyUOaGouDcwqqLDS3y5LV7LVevLd9p5TeuH7+YeZaJk+FjjpxrzMEdrtTtX+kdVeRAUzLHGsTqXleHGDgAAAAEAAACYTbRk/xXOhaS2qGx37FHhNzwmejvdmduwoSd4w58iA78AAAAAXZt5KQAAAAAAAAABAAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAQML10hLclDmhqLg3MKqiw0t8uS1ey1Xry3faeU3rh+/mHmWiZPhY46ca8zBHa7U7V/pHVXkQFMyxxrE6l5Xhxg4AAABAr1Ye6h5qmE0jbPB1GUgviVW3f/r5t9rmyjW6K64TqHd3M7znr6UMZnNBdlXTIQMZ8Q9jvZAep19WSJgBlgw1AQAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAFAAAAAgAAAAEAAAAwTbRk/xXOhaS2qGx37FHhNzwmejvdmduwoSd4w58iA78AAAAAXZt5KQAAAAAAAAAAAAAAAQAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAQHmuOc+2S0QA0/lh+FbKe6xoiWtw45QdCb8uomnn1YSv+4lRUx7LAvSRsZKpIaO8BmTatA9Ml9sFuCzAiVHKVg8AAAABwKEyjKb9u/WjS9MaJ39XlxT7GNlbME6gzYvbncfoj9QAAAADAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABAiXHzI6rj32/ZduOJlIh8+WGSNLsppJ12Pj/ektn20VBD7k0xWj92CezdSrgA572XQ3hUXWYUoP7PGvkyXDYzCwAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAGQAAAACAAAAAwAAAAAAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAA0c7wAAAAAKAAAACwAAAAAAAAAAAAAAAAAAAAFvFIhaAAAAQIckOhNL/RDwDXID2GrfNBTp5W2UglR5rqkUT9nsUJf6mEgJnoPspeH9PIwiQimOWeDgS1cxNbCfh+Vt1t1sIgEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAABkAAAAAgAAAAUAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAD39JAAAAAAoAAAANAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABACwZ3ihzu/pfA3qB9x8tf/ZgjCNEMV48AzM5RBz4GYhV5VLvXC7nyzosbuJhR8ZACLrNQjqisSZzYAeMDILMJBQAAAAEAAAABAAAAAQAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAA='); +INSERT INTO storestate VALUES ('lastscpdata6 ', 'AAAAAgAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAGAAAAAwAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAAQAAAJgPkkDDbx4MKYPbVO7E+udboZn2hFs0aQ+Ej91qb2DXIwAAAABdm3kqAAAAAAAAAAEAAAAAcLJb0eJ43pdtKlmsH8C5PkBMGFAWBO7SDqvWVua+zgAAAABA1sf2FGS0TcDIh5qnHCd5GRFGHlZpCTESeUbeRHZx56mhfIQTARY6exrKFin4yEsbaq3VyzNuY2GNPgbSwj1jDwAAAAEAAACYD5JAw28eDCmD21TuxPrnW6GZ9oRbNGkPhI/dam9g1yMAAAAAXZt5KgAAAAAAAAABAAAAAHCyW9HieN6XbSpZrB/AuT5ATBhQFgTu0g6r1lbmvs4AAAAAQNbH9hRktE3AyIeapxwneRkRRh5WaQkxEnlG3kR2ceepoXyEEwEWOnsayhYp+MhLG2qt1cszbmNhjT4G0sI9Yw8AAABAhih/MHDJn47dMHwzEYgoYfU6PqAw/BcWnDeLCyjGdHgD/Lln3meJPfnL/a92UoBSFQve6sx8v5NAZBi4u463CwAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAAAAAAGAAAAAgAAAAEAAAAwD5JAw28eDCmD21TuxPrnW6GZ9oRbNGkPhI/dam9g1yMAAAAAXZt5KgAAAAAAAAAAAAAAAQAJINaHbuVzg9BRfJGJZhRH8i1e+ZB5imfwfwFOAVjwAAAAQDtb2tiWay2RaUYmVM78Zrbw9v/fatLo2TIp2vIXepLBfRUIY+dTo7bY6KxtUi1rd5PxA3zfzlS2b21/BByv9wUAAAAB3ulntIvAX+1yDULQBxONic0OBjfd9EkH4kkUNtDXxoYAAAADAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHJw4AAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAATEtAAAAAAEAAAAAAAAAAAAAAAGu5L5MAAAAQH6S7x/QLCpgt/2hZNZhXEpFpaycU6WjeS3zLM1GjefNG7btD4bLCuYyWJxvcbNf768ZPzJXPOdET7YCwh5pAgUAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAABkAAAAAgAAAAIAAAAAAAAAAAAAAAEAAAAAAAAADQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAX14QAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAmJaAAAAAAAAAAAAAAAABruS+TAAAAEDIKDOd7/DYo6q57x9x6hXDxdoXIRTIDVByCYv5Ju1NH2LJhuGBuhr3sN6kUPzNNVzSpTBp2s6VvHz11OYcgYsEAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHv6SAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAAAAAAAAa7kvkwAAABAAI+p6icOLlSZyUSUJA+s0DhL+MTDKA3eWve50GPHfwobaeec6XCmc0ekSt01nwS4NLmfyfTGZn8dRRZCgQU3AQAAAAEAAAABAAAAAQAAAABwslvR4njel20qWawfwLk+QEwYUBYE7tIOq9ZW5r7OAAAAAAA='); + + +-- +-- Data for Name: trustlines; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO trustlines VALUES ('GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 780000000, NULL, NULL, 1, 6); +INSERT INTO trustlines VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 'USD', 9223372036854775807, 1220000000, 180000000, 0, 1, 6); +INSERT INTO trustlines VALUES ('GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 711600000, 0, 211600000, 1, 6); +INSERT INTO trustlines VALUES ('GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 1, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 'EUR', 9223372036854775807, 1288400000, NULL, NULL, 1, 6); + + +-- +-- Data for Name: txfeehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txfeehistory VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 3, 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('94f79b72faddea5899929a9b017c1c514751ffa0a7df9fdba7ce81c50f75a820', 4, 1, 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('3f08bfa1ecbf93b1e395a4b91a282e543cfb1dd7c1f46e7920bf93238e8249bc', 4, 2, 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('7902b04df0fb33faa59b83f918bf1a793a162bd2dc3d44b9939e757cf7cb7671', 4, 3, 'AAAAAgAAAAMAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'AAAAAgAAAAMAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('836d46dd3a264550c2bd4007c100a0c95b53ce786607ea4691c1a08552f8e1aa', 5, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('17a7d46cd3892308beefab1c2157212cb4290e2462a3648f004b49c3a7376a6a', 5, 2, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('fbe6d7f49c22a500bf3e20071749ca1237012adcb54fdcc54ecadf70cfa8e859', 5, 3, 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('64ea6dc9090c8d122217c4ac0c3a9274f65043c92a0db6de84572e8d49cd0526', 6, 1, 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('97e6362a81bc6baad6a22febeea2e36ef3238bc7785d32dfa03b46cd8c8e274b', 6, 2, 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txfeehistory VALUES ('0a1bb4fc8e39ac99730cc36326c0289621956a6f9d2e92ee927d762a670840cc', 6, 3, 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7mshwAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); + + +-- +-- Data for Name: txhistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO txhistory VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdDXe23U4e9C2SxpBLZRx1rJzSFLJ0xDD0uKGpmqbflDT+XXIq6UiDBzmFxt+GO+XqFoQPdrXT7p1oLZIHqTMP', 'ZmZWpureIILFeAVxJn2eRFPu5XgcqaWKoxnrD+g0Vf0AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdfnFSzZeh17zt82oMdqe4+/xns/kHBdGXf9BIBRYfVZ3DQT3awwZn5LqgIG9JqlvMmR1TKaxcoJQDuqGcCScM', 'sfgoOExW5LAk9CdfJGWAq6v/GuO5umGwOJc1flfuvCAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'Ky6C26uwJLJ6DDFAynHYrJvHGDH59aO9aeyj2I+w7FwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDYYfyOrmPhfki6lrP+oCfunJmRu2mfxl40o5qWR7y1YmP8poG+6Xqg41jKCWNwVoP717CVEPe70I0teWvTejkJ', '4XuuVS2gEFrTLwuarf0PYj7zfrSGsQsETBkjg2DkVdcAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDNmQhdQeyMcWFWP8dVRkDtFS4tHICyKdaPkR6+/L7+tMzKWoUjbDAXscRYI+j6Fd/VFUaDzdYsWCAsH30WujIL', 'z9iBbtWHxe2I3qDrAIGMrzjAdQ53QOBd48Jxdumu6O4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrJ9XgwMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAFvFIhaAAAAQPlg7GLhJg0x7jpAw1Ew6H2XF6yRImfJIwFfx09Nui5btOJAFewFANfOaAB8FQZl5p3A5g3k6DHDigfUNUD16gc=', 'gRGSw4ZD33PAFaWh13uALf8F1PUPxtEIFqp1wKYQn5oAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'vUhtvdAtRggXZxxKWn6dboZcopy0HmLXqvcKL+5bNt4AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 3, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQANQSzvpEBCAXvs1PgmH/UFbfAYt3OAggYPVTd0pjVcJaV3lDE/jOZMnLFZMkFEhg4dluVQxeDZAwTKUPandswg=', 'tcrc4F/ArV1v4Am4sN68DT39MupCuOuj6epownRuQQ8AAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAFvFIhaAAAAQMJmv+lhF5QZlgdIqBXDSdhEtgraTrRSwVr5d/BrNC28efHMoxYNa+2u9tSEdxU+hGX6JRW7wAF3bOpA8rxxxAE=', 'MuS6HyGLaqJCC0l0VqGwkJDjg35ms0lQMNTt1g0PBXAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAYAAAAAAAAAAA==', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('94f79b72faddea5899929a9b017c1c514751ffa0a7df9fdba7ce81c50f75a820', 4, 1, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQMq2EpK0LXwZiSrwXsmACrBqgXR/+kPIpG1UMgZP4+aV4vT6OEwvAgRBoKaRPjJd5OX8gOBOJv0RKPeQObZm4Qw=', 'lPebcvrd6liZkpqbAXwcUUdR/6Cn35/bp86BxQ91qCAAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('3f08bfa1ecbf93b1e395a4b91a282e543cfb1dd7c1f46e7920bf93238e8249bc', 4, 2, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQMW/qi+OCrL8pnczurJ6BjclRbXCYwA0BJNawKbWczfAwRuhov22mUb1fa2BGLliHf8Rq6RiKgRPBLv1HrKHIws=', 'Pwi/oey/k7HjlaS5GiguVDz7HdfB9G55IL+TI46CSbwAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('7902b04df0fb33faa59b83f918bf1a793a162bd2dc3d44b9939e757cf7cb7671', 4, 3, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQCCxheB4CZrYiFfMAW6ckj/jTJzHih7CtR8TD0GcvVb6/6BWi9V2nBQvDt/y3NnBtysbFEOM1GV66xFtu8VFGAQ=', 'eQKwTfD7M/qlm4P5GL8aeToWK9LcPUS5k551fPfLdnEAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQG4l7kCAq5aqvS2d/HTtYc7LAa7pSUiiO4KyKJbqmsDgvckGC2dbhcro9tcvCZHfwqTV+ikv8Hm8Zfa63kYPkQY=', 'yT+AZn8333CinsDelv8zgWRKyCikzqHLbOsbzsb/8FgAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('836d46dd3a264550c2bd4007c100a0c95b53ce786607ea4691c1a08552f8e1aa', 5, 1, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABAhyQ6E0v9EPANcgPYat80FOnlbZSCVHmuqRRP2exQl/qYSAmeg+yl4f08jCJCKY5Z4OBLVzE1sJ+H5W3W3WwiAQ==', 'g21G3TomRVDCvUAHwQCgyVtTznhmB+pGkcGghVL44aoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAANHO8AAAAACgAAAAsAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAADRzvAAAAAAAAAAAAAAAAAAAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('17a7d46cd3892308beefab1c2157212cb4290e2462a3648f004b49c3a7376a6a', 5, 2, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABAiXHzI6rj32/ZduOJlIh8+WGSNLsppJ12Pj/ektn20VBD7k0xWj92CezdSrgA572XQ3hUXWYUoP7PGvkyXDYzCw==', 'F6fUbNOJIwi+76scIVchLLQpDiRio2SPAEtJw6c3amoAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAAOThwAAAAABQAAAAYAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAUAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAAAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAOThwAAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('fbe6d7f49c22a500bf3e20071749ca1237012adcb54fdcc54ecadf70cfa8e859', 5, 3, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAA9/SQAAAAAKAAAADQAAAAAAAAAAAAAAAAAAAAFvFIhaAAAAQAsGd4oc7v6XwN6gfcfLX/2YIwjRDFePAMzOUQc+BmIVeVS71wu58s6LG7iYUfGQAi6zUI6orEmc2AHjAyCzCQU=', '++bX9JwipQC/PiAHF0nKEjcBKty1T9zFTsrfcM+o6FkAAAAAAAAAZAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAD39JAAAAAAoAAAANAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAEAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAL68IAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAcAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAFAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAL68IAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAOThwAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAPf0kAAAAACgAAAA0AAAAAAAAAAAAAAAAAAAADAAAABQAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAf/////////8AAAABAAAAAQAAAAAL68IAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAABAAAAABfXhAAAAAAAAAAAAAAAAAAAAAAA'); +INSERT INTO txhistory VALUES ('64ea6dc9090c8d122217c4ac0c3a9274f65043c92a0db6de84572e8d49cd0526', 6, 1, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAAJiWgAAAAAAAAAAAAAAAAa7kvkwAAABAyCgzne/w2KOque8fceoVw8XaFyEUyA1QcgmL+SbtTR9iyYbhgboa97DepFD8zTVc0qUwadrOlbx89dTmHIGLBA==', 'ZOptyQkMjRIiF8SsDDqSdPZQQ8kqDbbehFcujUnNBSYAAAAAAAAAZAAAAAAAAAABAAAAAAAAAA0AAAAAAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAB7+kgAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAX14QAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAHv6SAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAPf0kAAAAACgAAAA0AAAAAAAAAAAAAAAAAAAABAAAABgAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAB7+kgAAAAAoAAAANAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAEAAAAAF9eEAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABBkKsAf/////////8AAAABAAAAAQAAAAAR4aMAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAHc1lAAAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAM9slgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAWDcCAAAAAAAAAAAAAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAABDWm6Af/////////8AAAABAAAAAAAAAAA='); +INSERT INTO txhistory VALUES ('97e6362a81bc6baad6a22febeea2e36ef3238bc7785d32dfa03b46cd8c8e274b', 6, 2, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHJw4AAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAATEtAAAAAAEAAAAAAAAAAAAAAAGu5L5MAAAAQH6S7x/QLCpgt/2hZNZhXEpFpaycU6WjeS3zLM1GjefNG7btD4bLCuYyWJxvcbNf768ZPzJXPOdET7YCwh5pAgU=', 'l+Y2KoG8a6rWoi/r7qLjbvMji8d4XTLfoDtGzYyOJ0sAAAAAAAAAZAAAAAAAAAABAAAAAAAAAA0AAAAAAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAABAAAAAAAAAAAH3ikAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAABycOAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAACAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAACXD+AAAAAAAAAAAAB94pAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAlw/gAAAAAA', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAADuayAwAAAACAAAABQAAAAUAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAABA2ZAAAAAAAFPsYAAAAAAAAAAAAAAAADAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABBkKsAf/////////8AAAABAAAAAQAAAAAR4aMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAEi3uQB//////////wAAAAEAAAABAAAAAAq6lQAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAABT7GAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAANaTpAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAALn3bAH//////////AAAAAQAAAAAAAAAAAAAAAwAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAABN0eAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAM9slgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAWDcCAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAqaieAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAAycwoAAAAAAAAAAAAAAAAMAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAENaboB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAEzLbIB//////////wAAAAEAAAAAAAAAAA=='); +INSERT INTO txhistory VALUES ('0a1bb4fc8e39ac99730cc36326c0289621956a6f9d2e92ee927d762a670840cc', 6, 3, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHv6SAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAAAAAAAAa7kvkwAAABAAI+p6icOLlSZyUSUJA+s0DhL+MTDKA3eWve50GPHfwobaeec6XCmc0ekSt01nwS4NLmfyfTGZn8dRRZCgQU3AQ==', 'Chu0/I45rJlzDMNjJsAoliGVam+dLpLukn12KmcIQMwAAAAAAAAAZP////8AAAABAAAAAAAAAA3////2AAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAA'); + + +-- +-- Data for Name: upgradehistory; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO upgradehistory VALUES (3, 1, 'AAAAAQAAAAw=', 'AAAAAA=='); +INSERT INTO upgradehistory VALUES (3, 2, 'AAAAAwAPQkA=', 'AAAAAA=='); + + +-- +-- Name: accountdata accountdata_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accountdata + ADD CONSTRAINT accountdata_pkey PRIMARY KEY (accountid, dataname); + + +-- +-- Name: accounts accounts_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts + ADD CONSTRAINT accounts_pkey PRIMARY KEY (accountid); + + +-- +-- Name: ban ban_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ban + ADD CONSTRAINT ban_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: ledgerheaders ledgerheaders_ledgerseq_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_ledgerseq_key UNIQUE (ledgerseq); + + +-- +-- Name: ledgerheaders ledgerheaders_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY ledgerheaders + ADD CONSTRAINT ledgerheaders_pkey PRIMARY KEY (ledgerhash); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: peers peers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY peers + ADD CONSTRAINT peers_pkey PRIMARY KEY (ip, port); + + +-- +-- Name: publishqueue publishqueue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY publishqueue + ADD CONSTRAINT publishqueue_pkey PRIMARY KEY (ledger); + + +-- +-- Name: pubsub pubsub_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY pubsub + ADD CONSTRAINT pubsub_pkey PRIMARY KEY (resid); + + +-- +-- Name: quoruminfo quoruminfo_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY quoruminfo + ADD CONSTRAINT quoruminfo_pkey PRIMARY KEY (nodeid); + + +-- +-- Name: scpquorums scpquorums_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY scpquorums + ADD CONSTRAINT scpquorums_pkey PRIMARY KEY (qsethash); + + +-- +-- Name: storestate storestate_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY storestate + ADD CONSTRAINT storestate_pkey PRIMARY KEY (statename); + + +-- +-- Name: trustlines trustlines_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY trustlines + ADD CONSTRAINT trustlines_pkey PRIMARY KEY (accountid, issuer, assetcode); + + +-- +-- Name: txfeehistory txfeehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txfeehistory + ADD CONSTRAINT txfeehistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: txhistory txhistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY txhistory + ADD CONSTRAINT txhistory_pkey PRIMARY KEY (ledgerseq, txindex); + + +-- +-- Name: upgradehistory upgradehistory_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY upgradehistory + ADD CONSTRAINT upgradehistory_pkey PRIMARY KEY (ledgerseq, upgradeindex); + + +-- +-- Name: accountbalances; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX accountbalances ON accounts USING btree (balance) WHERE (balance >= 1000000000); + + +-- +-- Name: bestofferindex; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX bestofferindex ON offers USING btree (sellingasset, buyingasset, price); + + +-- +-- Name: histbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histbyseq ON txhistory USING btree (ledgerseq); + + +-- +-- Name: histfeebyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX histfeebyseq ON txfeehistory USING btree (ledgerseq); + + +-- +-- Name: ledgersbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ledgersbyseq ON ledgerheaders USING btree (ledgerseq); + + +-- +-- Name: scpenvsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpenvsbyseq ON scphistory USING btree (ledgerseq); + + +-- +-- Name: scpquorumsbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX scpquorumsbyseq ON scpquorums USING btree (lastledgerseq); + + +-- +-- Name: upgradehistbyseq; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX upgradehistbyseq ON upgradehistory USING btree (ledgerseq); + + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/scenarios/paths_strict_send-horizon.sql b/services/horizon/internal/test/scenarios/paths_strict_send-horizon.sql new file mode 100644 index 0000000000..4fab92edf5 --- /dev/null +++ b/services/horizon/internal/test/scenarios/paths_strict_send-horizon.sql @@ -0,0 +1,1216 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 9.6.1 +-- Dumped by pg_dump version 9.6.1 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SET check_function_bodies = false; +SET client_min_messages = warning; + +SET search_path = public, pg_catalog; + +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_counter_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_asset_id_fkey; +ALTER TABLE IF EXISTS ONLY public.history_trades DROP CONSTRAINT IF EXISTS history_trades_base_account_id_fkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_id_fkey; +DROP INDEX IF EXISTS public.trade_effects_by_order_book; +DROP INDEX IF EXISTS public.signers_by_account; +DROP INDEX IF EXISTS public.offers_by_selling_asset; +DROP INDEX IF EXISTS public.offers_by_seller; +DROP INDEX IF EXISTS public.offers_by_last_modified_ledger; +DROP INDEX IF EXISTS public.offers_by_buying_asset; +DROP INDEX IF EXISTS public.index_history_transactions_on_id; +DROP INDEX IF EXISTS public.index_history_operations_on_type; +DROP INDEX IF EXISTS public.index_history_operations_on_transaction_id; +DROP INDEX IF EXISTS public.index_history_operations_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_sequence; +DROP INDEX IF EXISTS public.index_history_ledgers_on_previous_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_ledger_hash; +DROP INDEX IF EXISTS public.index_history_ledgers_on_importer_version; +DROP INDEX IF EXISTS public.index_history_ledgers_on_id; +DROP INDEX IF EXISTS public.index_history_ledgers_on_closed_at; +DROP INDEX IF EXISTS public.index_history_effects_on_type; +DROP INDEX IF EXISTS public.index_history_accounts_on_id; +DROP INDEX IF EXISTS public.index_history_accounts_on_address; +DROP INDEX IF EXISTS public.htrd_time_lookup; +DROP INDEX IF EXISTS public.htrd_pid; +DROP INDEX IF EXISTS public.htrd_pair_time_lookup; +DROP INDEX IF EXISTS public.htrd_counter_lookup; +DROP INDEX IF EXISTS public.htrd_by_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_offer; +DROP INDEX IF EXISTS public.htrd_by_counter_account; +DROP INDEX IF EXISTS public.htrd_by_base_offer; +DROP INDEX IF EXISTS public.htrd_by_base_account; +DROP INDEX IF EXISTS public.htp_by_htid; +DROP INDEX IF EXISTS public.hs_transaction_by_id; +DROP INDEX IF EXISTS public.hs_ledger_by_id; +DROP INDEX IF EXISTS public.hop_by_hoid; +DROP INDEX IF EXISTS public.hist_tx_p_id; +DROP INDEX IF EXISTS public.hist_op_p_id; +DROP INDEX IF EXISTS public.hist_e_id; +DROP INDEX IF EXISTS public.hist_e_by_order; +DROP INDEX IF EXISTS public.by_ledger; +DROP INDEX IF EXISTS public.by_hash; +DROP INDEX IF EXISTS public.by_account; +DROP INDEX IF EXISTS public.asset_by_issuer; +DROP INDEX IF EXISTS public.asset_by_code; +ALTER TABLE IF EXISTS ONLY public.offers DROP CONSTRAINT IF EXISTS offers_pkey; +ALTER TABLE IF EXISTS ONLY public.key_value_store DROP CONSTRAINT IF EXISTS key_value_store_pkey; +ALTER TABLE IF EXISTS ONLY public.history_transaction_participants DROP CONSTRAINT IF EXISTS history_transaction_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_operation_participants DROP CONSTRAINT IF EXISTS history_operation_participants_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_pkey; +ALTER TABLE IF EXISTS ONLY public.history_assets DROP CONSTRAINT IF EXISTS history_assets_asset_code_asset_type_asset_issuer_key; +ALTER TABLE IF EXISTS ONLY public.gorp_migrations DROP CONSTRAINT IF EXISTS gorp_migrations_pkey; +ALTER TABLE IF EXISTS ONLY public.asset_stats DROP CONSTRAINT IF EXISTS asset_stats_pkey; +ALTER TABLE IF EXISTS ONLY public.accounts_signers DROP CONSTRAINT IF EXISTS accounts_signers_pkey; +ALTER TABLE IF EXISTS public.history_transaction_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_operation_participants ALTER COLUMN id DROP DEFAULT; +ALTER TABLE IF EXISTS public.history_assets ALTER COLUMN id DROP DEFAULT; +DROP TABLE IF EXISTS public.offers; +DROP TABLE IF EXISTS public.key_value_store; +DROP TABLE IF EXISTS public.history_transactions; +DROP SEQUENCE IF EXISTS public.history_transaction_participants_id_seq; +DROP TABLE IF EXISTS public.history_transaction_participants; +DROP TABLE IF EXISTS public.history_trades; +DROP TABLE IF EXISTS public.history_operations; +DROP SEQUENCE IF EXISTS public.history_operation_participants_id_seq; +DROP TABLE IF EXISTS public.history_operation_participants; +DROP TABLE IF EXISTS public.history_ledgers; +DROP TABLE IF EXISTS public.history_effects; +DROP SEQUENCE IF EXISTS public.history_assets_id_seq; +DROP TABLE IF EXISTS public.history_assets; +DROP TABLE IF EXISTS public.history_accounts; +DROP SEQUENCE IF EXISTS public.history_accounts_id_seq; +DROP TABLE IF EXISTS public.gorp_migrations; +DROP TABLE IF EXISTS public.asset_stats; +DROP TABLE IF EXISTS public.accounts_signers; +DROP AGGREGATE IF EXISTS public.min_price(numeric[]); +DROP AGGREGATE IF EXISTS public.max_price(numeric[]); +DROP AGGREGATE IF EXISTS public.last(anyelement); +DROP AGGREGATE IF EXISTS public.first(anyelement); +DROP FUNCTION IF EXISTS public.min_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.max_price_agg(numeric[], numeric[]); +DROP FUNCTION IF EXISTS public.last_agg(anyelement, anyelement); +DROP FUNCTION IF EXISTS public.first_agg(anyelement, anyelement); +DROP EXTENSION IF EXISTS plpgsql; +DROP SCHEMA IF EXISTS public; +-- +-- Name: public; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA public; + + +-- +-- Name: SCHEMA public; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA public IS 'standard public schema'; + + +-- +-- Name: plpgsql; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS plpgsql WITH SCHEMA pg_catalog; + + +-- +-- Name: EXTENSION plpgsql; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION plpgsql IS 'PL/pgSQL procedural language'; + + +SET search_path = public, pg_catalog; + +-- +-- Name: first_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION first_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $1 $_$; + + +-- +-- Name: last_agg(anyelement, anyelement); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION last_agg(anyelement, anyelement) RETURNS anyelement + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT $2 $_$; + + +-- +-- Name: max_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION max_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]>$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: min_price_agg(numeric[], numeric[]); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION min_price_agg(numeric[], numeric[]) RETURNS numeric[] + LANGUAGE sql IMMUTABLE STRICT + AS $_$ SELECT ( + CASE WHEN $1[1]/$1[2]<$2[1]/$2[2] THEN $1 ELSE $2 END) $_$; + + +-- +-- Name: first(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE first(anyelement) ( + SFUNC = first_agg, + STYPE = anyelement +); + + +-- +-- Name: last(anyelement); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE last(anyelement) ( + SFUNC = last_agg, + STYPE = anyelement +); + + +-- +-- Name: max_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE max_price(numeric[]) ( + SFUNC = max_price_agg, + STYPE = numeric[] +); + + +-- +-- Name: min_price(numeric[]); Type: AGGREGATE; Schema: public; Owner: - +-- + +CREATE AGGREGATE min_price(numeric[]) ( + SFUNC = min_price_agg, + STYPE = numeric[] +); + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: accounts_signers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE accounts_signers ( + account character varying(64) NOT NULL, + signer character varying(64) NOT NULL, + weight integer NOT NULL +); + + +-- +-- Name: asset_stats; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE asset_stats ( + id bigint NOT NULL, + amount character varying NOT NULL, + num_accounts integer NOT NULL, + flags smallint NOT NULL, + toml character varying(255) NOT NULL +); + + +-- +-- Name: gorp_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE gorp_migrations ( + id text NOT NULL, + applied_at timestamp with time zone +); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_accounts_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_accounts; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_accounts ( + id bigint DEFAULT nextval('history_accounts_id_seq'::regclass) NOT NULL, + address character varying(64) +); + + +-- +-- Name: history_assets; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_assets ( + id integer NOT NULL, + asset_type character varying(64) NOT NULL, + asset_code character varying(12) NOT NULL, + asset_issuer character varying(56) NOT NULL +); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_assets_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_assets_id_seq OWNED BY history_assets.id; + + +-- +-- Name: history_effects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_effects ( + history_account_id bigint NOT NULL, + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + type integer NOT NULL, + details jsonb +); + + +-- +-- Name: history_ledgers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_ledgers ( + sequence integer NOT NULL, + ledger_hash character varying(64) NOT NULL, + previous_ledger_hash character varying(64), + transaction_count integer DEFAULT 0 NOT NULL, + operation_count integer DEFAULT 0 NOT NULL, + closed_at timestamp without time zone NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + importer_version integer DEFAULT 1 NOT NULL, + total_coins bigint NOT NULL, + fee_pool bigint NOT NULL, + base_fee integer NOT NULL, + base_reserve integer NOT NULL, + max_tx_set_size integer NOT NULL, + protocol_version integer DEFAULT 0 NOT NULL, + ledger_header text, + successful_transaction_count integer, + failed_transaction_count integer, + tx_set_operation_count integer +); + + +-- +-- Name: history_operation_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operation_participants ( + id integer NOT NULL, + history_operation_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_operation_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_operation_participants_id_seq OWNED BY history_operation_participants.id; + + +-- +-- Name: history_operations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_operations ( + id bigint NOT NULL, + transaction_id bigint NOT NULL, + application_order integer NOT NULL, + type integer NOT NULL, + details jsonb, + source_account character varying(64) DEFAULT ''::character varying NOT NULL +); + + +-- +-- Name: history_trades; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_trades ( + history_operation_id bigint NOT NULL, + "order" integer NOT NULL, + ledger_closed_at timestamp without time zone NOT NULL, + offer_id bigint NOT NULL, + base_account_id bigint NOT NULL, + base_asset_id bigint NOT NULL, + base_amount bigint NOT NULL, + counter_account_id bigint NOT NULL, + counter_asset_id bigint NOT NULL, + counter_amount bigint NOT NULL, + base_is_seller boolean, + price_n bigint, + price_d bigint, + base_offer_id bigint, + counter_offer_id bigint, + CONSTRAINT history_trades_base_amount_check CHECK ((base_amount >= 0)), + CONSTRAINT history_trades_check CHECK ((base_asset_id < counter_asset_id)), + CONSTRAINT history_trades_counter_amount_check CHECK ((counter_amount >= 0)) +); + + +-- +-- Name: history_transaction_participants; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transaction_participants ( + id integer NOT NULL, + history_transaction_id bigint NOT NULL, + history_account_id bigint NOT NULL +); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE history_transaction_participants_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE history_transaction_participants_id_seq OWNED BY history_transaction_participants.id; + + +-- +-- Name: history_transactions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE history_transactions ( + transaction_hash character varying(64) NOT NULL, + ledger_sequence integer NOT NULL, + application_order integer NOT NULL, + account character varying(64) NOT NULL, + account_sequence bigint NOT NULL, + max_fee integer NOT NULL, + operation_count integer NOT NULL, + created_at timestamp without time zone, + updated_at timestamp without time zone, + id bigint, + tx_envelope text NOT NULL, + tx_result text NOT NULL, + tx_meta text NOT NULL, + tx_fee_meta text NOT NULL, + signatures character varying(96)[] DEFAULT '{}'::character varying[] NOT NULL, + memo_type character varying DEFAULT 'none'::character varying NOT NULL, + memo character varying, + time_bounds int8range, + successful boolean, + fee_charged integer +); + + +-- +-- Name: key_value_store; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE key_value_store ( + key character varying(255) NOT NULL, + value character varying(255) NOT NULL +); + + +-- +-- Name: offers; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE offers ( + sellerid character varying(56) NOT NULL, + offerid bigint NOT NULL, + sellingasset text NOT NULL, + buyingasset text NOT NULL, + amount bigint NOT NULL, + pricen integer NOT NULL, + priced integer NOT NULL, + price double precision NOT NULL, + flags integer NOT NULL, + last_modified_ledger integer NOT NULL +); + + +-- +-- Name: history_assets id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets ALTER COLUMN id SET DEFAULT nextval('history_assets_id_seq'::regclass); + + +-- +-- Name: history_operation_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants ALTER COLUMN id SET DEFAULT nextval('history_operation_participants_id_seq'::regclass); + + +-- +-- Name: history_transaction_participants id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants ALTER COLUMN id SET DEFAULT nextval('history_transaction_participants_id_seq'::regclass); + + +-- +-- Data for Name: accounts_signers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: asset_stats; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Data for Name: gorp_migrations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO gorp_migrations VALUES ('1_initial_schema.sql', '2019-10-07 19:43:05.315537+02'); +INSERT INTO gorp_migrations VALUES ('2_index_participants_by_toid.sql', '2019-10-07 19:43:05.325849+02'); +INSERT INTO gorp_migrations VALUES ('3_use_sequence_in_history_accounts.sql', '2019-10-07 19:43:05.329863+02'); +INSERT INTO gorp_migrations VALUES ('4_add_protocol_version.sql', '2019-10-07 19:43:05.338857+02'); +INSERT INTO gorp_migrations VALUES ('5_create_trades_table.sql', '2019-10-07 19:43:05.347593+02'); +INSERT INTO gorp_migrations VALUES ('6_create_assets_table.sql', '2019-10-07 19:43:05.35284+02'); +INSERT INTO gorp_migrations VALUES ('7_modify_trades_table.sql', '2019-10-07 19:43:05.365367+02'); +INSERT INTO gorp_migrations VALUES ('8_add_aggregators.sql', '2019-10-07 19:43:05.370409+02'); +INSERT INTO gorp_migrations VALUES ('8_create_asset_stats_table.sql', '2019-10-07 19:43:05.375797+02'); +INSERT INTO gorp_migrations VALUES ('9_add_header_xdr.sql', '2019-10-07 19:43:05.378972+02'); +INSERT INTO gorp_migrations VALUES ('10_add_trades_price.sql', '2019-10-07 19:43:05.381868+02'); +INSERT INTO gorp_migrations VALUES ('11_add_trades_account_index.sql', '2019-10-07 19:43:05.38519+02'); +INSERT INTO gorp_migrations VALUES ('12_asset_stats_amount_string.sql', '2019-10-07 19:43:05.391451+02'); +INSERT INTO gorp_migrations VALUES ('13_trade_offer_ids.sql', '2019-10-07 19:43:05.397096+02'); +INSERT INTO gorp_migrations VALUES ('14_fix_asset_toml_field.sql', '2019-10-07 19:43:05.398884+02'); +INSERT INTO gorp_migrations VALUES ('15_ledger_failed_txs.sql', '2019-10-07 19:43:05.400773+02'); +INSERT INTO gorp_migrations VALUES ('16_ingest_failed_transactions.sql', '2019-10-07 19:43:05.402209+02'); +INSERT INTO gorp_migrations VALUES ('17_transaction_fee_paid.sql', '2019-10-07 19:43:05.403943+02'); +INSERT INTO gorp_migrations VALUES ('18_account_for_signers.sql', '2019-10-07 19:43:05.410844+02'); +INSERT INTO gorp_migrations VALUES ('19_offers.sql', '2019-10-07 19:43:05.422426+02'); +INSERT INTO gorp_migrations VALUES ('20_account_for_signer_index.sql', '2019-10-07 19:43:05.424906+02'); +INSERT INTO gorp_migrations VALUES ('21_trades_remove_zero_amount_constraints.sql', '2019-10-07 19:43:05.428096+02'); + + +-- +-- Data for Name: history_accounts; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_accounts VALUES (1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_accounts VALUES (2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_accounts VALUES (3, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_accounts VALUES (4, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_accounts VALUES (5, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_accounts VALUES (6, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Name: history_accounts_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_accounts_id_seq', 6, true); + + +-- +-- Data for Name: history_assets; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_assets VALUES (1, 'credit_alphanum4', 'EUR', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_assets VALUES (2, 'credit_alphanum4', 'USD', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_assets VALUES (3, 'native', '', ''); + + +-- +-- Name: history_assets_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_assets_id_seq', 3, true); + + +-- +-- Data for Name: history_effects; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_effects VALUES (3, 25769807873, 1, 2, '{"amount": "13.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 2, 3, '{"amount": "10.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769807873, 3, 33, '{"seller": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "offer_id": 3, "sold_amount": "10.0000000", "bought_amount": "13.0000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 25769807873, 4, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 3, "sold_amount": "13.0000000", "bought_amount": "10.0000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 25769811969, 1, 2, '{"amount": "15.8400000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 25769811969, 2, 3, '{"amount": "12.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769811969, 3, 33, '{"seller": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "offer_id": 1, "sold_amount": "12.0000000", "bought_amount": "13.2000000", "sold_asset_code": "USD", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 25769811969, 4, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 1, "sold_amount": "13.2000000", "bought_amount": "12.0000000", "sold_asset_type": "native", "bought_asset_code": "USD", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 25769811969, 5, 33, '{"seller": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "offer_id": 2, "sold_amount": "13.2000000", "bought_amount": "15.8400000", "sold_asset_type": "native", "bought_asset_code": "EUR", "bought_asset_type": "credit_alphanum4", "bought_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 25769811969, 6, 33, '{"seller": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "offer_id": 2, "sold_amount": "15.8400000", "bought_amount": "13.2000000", "sold_asset_code": "EUR", "sold_asset_type": "credit_alphanum4", "bought_asset_type": "native", "sold_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 17179873281, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (4, 17179873281, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (1, 17179877377, 1, 2, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (5, 17179877377, 2, 3, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (2, 17179881473, 1, 2, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (4, 17179881473, 2, 3, '{"amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 17179885569, 1, 2, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (5, 17179885569, 2, 3, '{"amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 12884905985, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (2, 12884910081, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (3, 12884914177, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 12884918273, 1, 20, '{"limit": "922337203685.4775807", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (4, 8589938689, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589938689, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (4, 8589938689, 3, 10, '{"weight": 1, "public_key": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}'); +INSERT INTO history_effects VALUES (5, 8589942785, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589942785, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (5, 8589942785, 3, 10, '{"weight": 1, "public_key": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}'); +INSERT INTO history_effects VALUES (1, 8589946881, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589946881, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (1, 8589946881, 3, 10, '{"weight": 1, "public_key": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON"}'); +INSERT INTO history_effects VALUES (3, 8589950977, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589950977, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (3, 8589950977, 3, 10, '{"weight": 1, "public_key": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2"}'); +INSERT INTO history_effects VALUES (2, 8589955073, 1, 0, '{"starting_balance": "100.0000000"}'); +INSERT INTO history_effects VALUES (6, 8589955073, 2, 3, '{"amount": "100.0000000", "asset_type": "native"}'); +INSERT INTO history_effects VALUES (2, 8589955073, 3, 10, '{"weight": 1, "public_key": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}'); + + +-- +-- Data for Name: history_ledgers; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_ledgers VALUES (6, 'd846337fd9e276439446d2a542fb97bc6aa16500ead5cd6e9128777cdede107c', 'dee967b48bc05fed720d42d007138d89cd0e0637ddf44907e2491436d0d7c686', 2, 2, '2019-10-07 17:43:06', '2019-10-07 17:43:05.699656', '2019-10-07 17:43:05.699656', 25769803776, 16, 1000000000000000000, 1900, 100, 100000000, 1000000, 12, 'AAAADN7pZ7SLwF/tcg1C0AcTjYnNDgY33fRJB+JJFDbQ18aGD5JAw28eDCmD21TuxPrnW6GZ9oRbNGkPhI/dam9g1yMAAAAAXZt5KgAAAAAAAAAAeAVLzxvwzAkwUB36uyuX61OBzxZJsiQl/YRoHG7lYK2hrbwzP799YvfKmd/Om14fc29PeKnUYAh9L9UBS4tHKQAAAAYN4Lazp2QAAAAAAAAAAAdsAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 2, 1, NULL); +INSERT INTO history_ledgers VALUES (5, 'dee967b48bc05fed720d42d007138d89cd0e0637ddf44907e2491436d0d7c686', 'c0a1328ca6fdbbf5a34bd31a277f579714fb18d95b304ea0cd8bdb9dc7e88fd4', 3, 3, '2019-10-07 17:43:05', '2019-10-07 17:43:05.73871', '2019-10-07 17:43:05.73871', 21474836480, 16, 1000000000000000000, 1600, 100, 100000000, 1000000, 12, 'AAAADMChMoym/bv1o0vTGid/V5cU+xjZWzBOoM2L253H6I/UTbRk/xXOhaS2qGx37FHhNzwmejvdmduwoSd4w58iA78AAAAAXZt5KQAAAAAAAAAAliNGcBHBZ7FQDpA0n+37lxddf7Ir5kZ1Q7qncRnxzWkLKbKTDc2aiwWCngCABoLG7gcns6tce7E8dkEvbziJZAAAAAUN4Lazp2QAAAAAAAAAAAZAAAAAAAAAAAAAAAADAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 3, 0, NULL); +INSERT INTO history_ledgers VALUES (4, 'c0a1328ca6fdbbf5a34bd31a277f579714fb18d95b304ea0cd8bdb9dc7e88fd4', '9725bcddbfde2a2797d753c7395ed68800e4df27e923ba942925563e37517a8c', 4, 4, '2019-10-07 17:43:04', '2019-10-07 17:43:05.750081', '2019-10-07 17:43:05.750081', 17179869184, 16, 1000000000000000000, 1300, 100, 100000000, 1000000, 12, 'AAAADJclvN2/3ionl9dTxzle1ogA5N8n6SO6lCklVj43UXqMBOKee+9kgKkF22YkX7RWJFTDQo4H0Ar1ZUwhsBaJgfgAAAAAXZt5KAAAAAAAAAAAMjGOUmwgY7qRb0Vt3UuR5Zqme5k2qjH87qHhARew0qwaglZfYtmDmC+6oADly8483+2VmtfE3a0SS9HHJ4iiVgAAAAQN4Lazp2QAAAAAAAAAAAUUAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0, NULL); +INSERT INTO history_ledgers VALUES (3, '9725bcddbfde2a2797d753c7395ed68800e4df27e923ba942925563e37517a8c', '04e3ebeaf9d1b8e0f5458d530169fe0d89f1bfdd7bd4dc8d4b0584b236d17e15', 4, 4, '2019-10-07 17:43:03', '2019-10-07 17:43:05.764897', '2019-10-07 17:43:05.764897', 12884901888, 16, 1000000000000000000, 900, 100, 100000000, 1000000, 12, 'AAAADATj6+r50bjg9UWNUwFp/g2J8b/de9TcjUsFhLI20X4VjuzlLlAPf+1swU2fx1atuTf1fyOeFxD9jxoTthHXBpoAAAAAXZt5JwAAAAIAAAAIAAAAAQAAAAwAAAAIAAAAAwAPQkAAAAAAy7QXeOkQQgUGgHQkca+qdNuHDZuWECktEZYOGCwuFi/v/ouvKrv480RTLnMDFHRWMuw+K2/FiTMBDgzn+VjH+gAAAAMN4Lazp2QAAAAAAAAAAAOEAAAAAAAAAAAAAAAAAAAAZAX14QAAD0JAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 4, 0, NULL); +INSERT INTO history_ledgers VALUES (2, '04e3ebeaf9d1b8e0f5458d530169fe0d89f1bfdd7bd4dc8d4b0584b236d17e15', '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', 5, 5, '2019-10-07 17:43:02', '2019-10-07 17:43:05.77678', '2019-10-07 17:43:05.776781', 8589934592, 16, 1000000000000000000, 500, 100, 100000000, 100, 0, 'AAAAAGPZj1Nu5o0bJ7W4nyOvUxG3Vpok+vFAOtC1K2M7B76ZhnfbEDpHPYd45nQcT7XkfIMwSX9gFD9+fWat716QIWcAAAAAXZt5JgAAAAAAAAAAQ9m8LSG8b5W0buxEtqstVQz7IV/HrDmGEtdA4mCEDXwp2gko2aYOjyG9WNl6CAHoM51KxJFK9g0oOuL3ks8iXgAAAAIN4Lazp2QAAAAAAAAAAAH0AAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 5, 0, NULL); +INSERT INTO history_ledgers VALUES (1, '63d98f536ee68d1b27b5b89f23af5311b7569a24faf1403ad0b52b633b07be99', NULL, 0, 0, '1970-01-01 00:00:00', '2019-10-07 17:43:05.790134', '2019-10-07 17:43:05.790134', 4294967296, 16, 1000000000000000000, 0, 100, 100000000, 100, 0, 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABXKi4y/ySKB7DnD9H20xjB+s0gtswIwz1XdSWYaBJaFgAAAAEN4Lazp2QAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAX14QAAAABkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA', 0, 0, NULL); + + +-- +-- Data for Name: history_operation_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operation_participants VALUES (1, 25769807873, 2); +INSERT INTO history_operation_participants VALUES (2, 25769807873, 3); +INSERT INTO history_operation_participants VALUES (3, 25769811969, 2); +INSERT INTO history_operation_participants VALUES (4, 25769811969, 3); +INSERT INTO history_operation_participants VALUES (5, 25769816065, 2); +INSERT INTO history_operation_participants VALUES (6, 25769816065, 3); +INSERT INTO history_operation_participants VALUES (7, 21474840577, 1); +INSERT INTO history_operation_participants VALUES (8, 21474844673, 1); +INSERT INTO history_operation_participants VALUES (9, 21474848769, 1); +INSERT INTO history_operation_participants VALUES (10, 17179873281, 4); +INSERT INTO history_operation_participants VALUES (11, 17179873281, 1); +INSERT INTO history_operation_participants VALUES (12, 17179877377, 5); +INSERT INTO history_operation_participants VALUES (13, 17179877377, 1); +INSERT INTO history_operation_participants VALUES (14, 17179881473, 4); +INSERT INTO history_operation_participants VALUES (15, 17179881473, 2); +INSERT INTO history_operation_participants VALUES (16, 17179885569, 5); +INSERT INTO history_operation_participants VALUES (17, 17179885569, 3); +INSERT INTO history_operation_participants VALUES (18, 12884905985, 1); +INSERT INTO history_operation_participants VALUES (19, 12884910081, 2); +INSERT INTO history_operation_participants VALUES (20, 12884914177, 3); +INSERT INTO history_operation_participants VALUES (21, 12884918273, 1); +INSERT INTO history_operation_participants VALUES (22, 8589938689, 6); +INSERT INTO history_operation_participants VALUES (23, 8589938689, 4); +INSERT INTO history_operation_participants VALUES (24, 8589942785, 6); +INSERT INTO history_operation_participants VALUES (25, 8589942785, 5); +INSERT INTO history_operation_participants VALUES (26, 8589946881, 6); +INSERT INTO history_operation_participants VALUES (27, 8589946881, 1); +INSERT INTO history_operation_participants VALUES (28, 8589950977, 3); +INSERT INTO history_operation_participants VALUES (29, 8589950977, 6); +INSERT INTO history_operation_participants VALUES (30, 8589955073, 6); +INSERT INTO history_operation_participants VALUES (31, 8589955073, 2); + + +-- +-- Name: history_operation_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_operation_participants_id_seq', 31, true); + + +-- +-- Data for Name: history_operations; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_operations VALUES (25769807873, 25769807872, 1, 13, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "path": [], "amount": "13.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "source_amount": "10.0000000", "destination_min": "1.0000000", "source_asset_code": "USD", "source_asset_type": "credit_alphanum4", "source_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (25769811969, 25769811968, 1, 13, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "path": [{"asset_type": "native"}], "amount": "15.8400000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "source_amount": "12.0000000", "destination_min": "2.0000000", "source_asset_code": "USD", "source_asset_type": "credit_alphanum4", "source_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (25769816065, 25769816064, 1, 13, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "path": [], "amount": "0.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "source_amount": "13.0000000", "destination_min": "100.0000000", "source_asset_code": "USD", "source_asset_type": "credit_alphanum4", "source_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (21474840577, 21474840576, 1, 3, '{"price": "0.9090909", "amount": "22.0000000", "price_r": {"d": 11, "n": 10}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_type": "native", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474844673, 21474844672, 1, 3, '{"price": "0.8333333", "amount": "24.0000000", "price_r": {"d": 6, "n": 5}, "offer_id": 0, "buying_asset_type": "native", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (21474848769, 21474848768, 1, 3, '{"price": "0.7692308", "amount": "26.0000000", "price_r": {"d": 13, "n": 10}, "offer_id": 0, "buying_asset_code": "USD", "buying_asset_type": "credit_alphanum4", "selling_asset_code": "EUR", "selling_asset_type": "credit_alphanum4", "buying_asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "selling_asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (17179873281, 17179873280, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179877377, 17179877376, 1, 1, '{"to": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "from": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_operations VALUES (17179881473, 17179881472, 1, 1, '{"to": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "from": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "amount": "100.0000000", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4'); +INSERT INTO history_operations VALUES (17179885569, 17179885568, 1, 1, '{"to": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "from": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "amount": "100.0000000", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG'); +INSERT INTO history_operations VALUES (12884905985, 12884905984, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (12884910081, 12884910080, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "trustor": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "asset_code": "USD", "asset_type": "credit_alphanum4", "asset_issuer": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4"}', 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU'); +INSERT INTO history_operations VALUES (12884914177, 12884914176, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2'); +INSERT INTO history_operations VALUES (12884918273, 12884918272, 1, 6, '{"limit": "922337203685.4775807", "trustee": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "trustor": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "asset_code": "EUR", "asset_type": "credit_alphanum4", "asset_issuer": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG"}', 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON'); +INSERT INTO history_operations VALUES (8589938689, 8589938688, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589942785, 8589942784, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589946881, 8589946880, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589950977, 8589950976, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); +INSERT INTO history_operations VALUES (8589955073, 8589955072, 1, 0, '{"funder": "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "account": "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU", "starting_balance": "100.0000000"}', 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H'); + + +-- +-- Data for Name: history_trades; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_trades VALUES (25769807873, 0, '2019-10-07 17:43:06', 3, 1, 1, 130000000, 2, 2, 100000000, true, 10, 13, 3, 4611686044197195777); +INSERT INTO history_trades VALUES (25769811969, 0, '2019-10-07 17:43:06', 1, 2, 2, 120000000, 1, 3, 132000000, false, 11, 10, 4611686044197199873, 1); +INSERT INTO history_trades VALUES (25769811969, 1, '2019-10-07 17:43:06', 2, 1, 1, 158400000, 2, 3, 132000000, true, 5, 6, 2, 4611686044197199873); + + +-- +-- Data for Name: history_transaction_participants; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transaction_participants VALUES (1, 25769807872, 2); +INSERT INTO history_transaction_participants VALUES (2, 25769807872, 3); +INSERT INTO history_transaction_participants VALUES (3, 25769811968, 3); +INSERT INTO history_transaction_participants VALUES (4, 25769811968, 2); +INSERT INTO history_transaction_participants VALUES (5, 25769816064, 2); +INSERT INTO history_transaction_participants VALUES (6, 25769816064, 3); +INSERT INTO history_transaction_participants VALUES (7, 21474840576, 1); +INSERT INTO history_transaction_participants VALUES (8, 21474844672, 1); +INSERT INTO history_transaction_participants VALUES (9, 21474848768, 1); +INSERT INTO history_transaction_participants VALUES (10, 17179873280, 4); +INSERT INTO history_transaction_participants VALUES (11, 17179873280, 1); +INSERT INTO history_transaction_participants VALUES (12, 17179877376, 5); +INSERT INTO history_transaction_participants VALUES (13, 17179877376, 1); +INSERT INTO history_transaction_participants VALUES (14, 17179881472, 2); +INSERT INTO history_transaction_participants VALUES (15, 17179881472, 4); +INSERT INTO history_transaction_participants VALUES (16, 17179885568, 5); +INSERT INTO history_transaction_participants VALUES (17, 17179885568, 3); +INSERT INTO history_transaction_participants VALUES (18, 12884905984, 1); +INSERT INTO history_transaction_participants VALUES (19, 12884910080, 2); +INSERT INTO history_transaction_participants VALUES (20, 12884914176, 3); +INSERT INTO history_transaction_participants VALUES (21, 12884918272, 1); +INSERT INTO history_transaction_participants VALUES (22, 8589938688, 6); +INSERT INTO history_transaction_participants VALUES (23, 8589938688, 4); +INSERT INTO history_transaction_participants VALUES (24, 8589942784, 6); +INSERT INTO history_transaction_participants VALUES (25, 8589942784, 5); +INSERT INTO history_transaction_participants VALUES (26, 8589946880, 6); +INSERT INTO history_transaction_participants VALUES (27, 8589946880, 1); +INSERT INTO history_transaction_participants VALUES (28, 8589950976, 6); +INSERT INTO history_transaction_participants VALUES (29, 8589950976, 3); +INSERT INTO history_transaction_participants VALUES (30, 8589955072, 2); +INSERT INTO history_transaction_participants VALUES (31, 8589955072, 6); + + +-- +-- Name: history_transaction_participants_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('history_transaction_participants_id_seq', 31, true); + + +-- +-- Data for Name: history_transactions; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO history_transactions VALUES ('64ea6dc9090c8d122217c4ac0c3a9274f65043c92a0db6de84572e8d49cd0526', 6, 1, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934594, 100, 1, '2019-10-07 17:43:05.699959', '2019-10-07 17:43:05.699959', 25769807872, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAAJiWgAAAAAAAAAAAAAAAAa7kvkwAAABAyCgzne/w2KOque8fceoVw8XaFyEUyA1QcgmL+SbtTR9iyYbhgboa97DepFD8zTVc0qUwadrOlbx89dTmHIGLBA==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAe/pIAAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAF9eEAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAB7+kgAAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAABAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAACgAAAAMAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADWk6QB//////////wAAAAEAAAAAAAAAAAAAAAMAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAPf0kAAAAACgAAAA0AAAAAAAAAAAAAAAAAAAABAAAABgAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAwAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAB7+kgAAAAAoAAAANAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAEAAAAAF9eEAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABBkKsAf/////////8AAAABAAAAAQAAAAAR4aMAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAABAAAAAAAAAAAAAAAAHc1lAAAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAM9slgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAWDcCAAAAAAAAAAAAAAAADAAAABAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAf/////////8AAAABAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAABDWm6Af/////////8AAAABAAAAAAAAAAA=', 'AAAAAgAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{yCgzne/w2KOque8fceoVw8XaFyEUyA1QcgmL+SbtTR9iyYbhgboa97DepFD8zTVc0qUwadrOlbx89dTmHIGLBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('97e6362a81bc6baad6a22febeea2e36ef3238bc7785d32dfa03b46cd8c8e274b', 6, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934595, 100, 1, '2019-10-07 17:43:05.712568', '2019-10-07 17:43:05.712569', 25769811968, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHJw4AAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAATEtAAAAAAEAAAAAAAAAAAAAAAGu5L5MAAAAQH6S7x/QLCpgt/2hZNZhXEpFpaycU6WjeS3zLM1GjefNG7btD4bLCuYyWJxvcbNf768ZPzJXPOdET7YCwh5pAgU=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAANAAAAAAAAAAIAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAQAAAAAAAAAAB94pAAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAcnDgAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAAAAAAgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAlw/gAAAAAAAAAAAAfeKQAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAJcP4AAAAAAA==', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAACAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAADgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAYAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAADuayAwAAAACAAAABQAAAAUAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAEAAAAABA2ZAAAAAAAFPsYAAAAAAAAAAAAAAAADAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAABBkKsAf/////////8AAAABAAAAAQAAAAAR4aMAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAEi3uQB//////////wAAAAEAAAABAAAAAAq6lQAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAABT7GAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAANaTpAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAYAAAABAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAALn3bAH//////////AAAAAQAAAAAAAAAAAAAAAwAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAQAAAAYAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAABN0eAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAwAAAAYAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAM9slgH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAWDcCAAAAAAAAAAAAAAAABAAAABgAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAqaieAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAAycwoAAAAAAAAAAAAAAAAMAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAENaboB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAGAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAEzLbIB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msk4AAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{fpLvH9AsKmC3/aFk1mFcSkWlrJxTpaN5LfMszUaN580btu0PhssK5jJYnG9xs1/vrxk/Mlc850RPtgLCHmkCBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('0a1bb4fc8e39ac99730cc36326c0289621956a6f9d2e92ee927d762a670840cc', 6, 3, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934596, 100, 1, '2019-10-07 17:43:05.721632', '2019-10-07 17:43:05.721632', 25769816064, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAA0AAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAHv6SAAAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAAAAAAAAa7kvkwAAABAAI+p6icOLlSZyUSUJA+s0DhL+MTDKA3eWve50GPHfwobaeec6XCmc0ekSt01nwS4NLmfyfTGZn8dRRZCgQU3AQ==', 'AAAAAAAAAGT/////AAAAAQAAAAAAAAAN////9gAAAAA=', 'AAAAAQAAAAIAAAADAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAADAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABgAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rIcAAAAAIAAAAEAAAAAQAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msjUAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAGAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7mshwAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{AI+p6icOLlSZyUSUJA+s0DhL+MTDKA3eWve50GPHfwobaeec6XCmc0ekSt01nwS4NLmfyfTGZn8dRRZCgQU3AQ==}', 'none', NULL, NULL, false, 100); +INSERT INTO history_transactions VALUES ('836d46dd3a264550c2bd4007c100a0c95b53ce786607ea4691c1a08552f8e1aa', 5, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934595, 100, 1, '2019-10-07 17:43:05.738887', '2019-10-07 17:43:05.738887', 21474840576, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAADAAAAAAAAAAAAAAABAAAAAAAAAAMAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABAhyQ6E0v9EPANcgPYat80FOnlbZSCVHmuqRRP2exQl/qYSAmeg+yl4f08jCJCKY5Z4OBLVzE1sJ+H5W3W3WwiAQ==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAACAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAADAAAAAgAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABQAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAMAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAMAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAADRzvAAAAAAAAAAAAAAAAAAAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAEAAAAAAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAADRzvAAAAAAoAAAALAAAAAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAVVTRAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAH//////////AAAAAQAAAAEAAAAAC+vCAAAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{hyQ6E0v9EPANcgPYat80FOnlbZSCVHmuqRRP2exQl/qYSAmeg+yl4f08jCJCKY5Z4OBLVzE1sJ+H5W3W3WwiAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('17a7d46cd3892308beefab1c2157212cb4290e2462a3648f004b49c3a7376a6a', 5, 2, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934596, 100, 1, '2019-10-07 17:43:05.739094', '2019-10-07 17:43:05.739094', 21474844672, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAW8UiFoAAABAiXHzI6rj32/ZduOJlIh8+WGSNLsppJ12Pj/ektn20VBD7k0xWj92CezdSrgA572XQ3hUXWYUoP7PGvkyXDYzCw==', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAA', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAADAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAQAAAADAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAAAAAAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAUAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAEAAAAAwAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAQAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAAAAAAUAAAACAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAIAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAAAAAAADk4cAAAAAAUAAAAGAAAAAAAAAAAAAAAAAAAAAwAAAAQAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAAAAAAAAAAAAQAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAOThwAAAAAAAAAAAA=', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msjUAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{iXHzI6rj32/ZduOJlIh8+WGSNLsppJ12Pj/ektn20VBD7k0xWj92CezdSrgA572XQ3hUXWYUoP7PGvkyXDYzCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('666656a6eade2082c5780571267d9e4453eee5781ca9a58aa319eb0fe83455fd', 2, 1, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 1, 100, 1, '2019-10-07 17:43:05.776886', '2019-10-07 17:43:05.776886', 8589938688, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdDXe23U4e9C2SxpBLZRx1rJzSFLJ0xDD0uKGpmqbflDT+XXIq6UiDBzmFxt+GO+XqFoQPdrXT7p1oLZIHqTMP', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{XQ13tt1OHvQtksaQS2Ucdayc0hSydMQw9LihqZqm35Q0/l1yKulIgwc5hcbfhjvl6haED3a10+6daC2SB6kzDw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('fbe6d7f49c22a500bf3e20071749ca1237012adcb54fdcc54ecadf70cfa8e859', 5, 3, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934597, 100, 1, '2019-10-07 17:43:05.739274', '2019-10-07 17:43:05.739274', 21474848768, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAA9/SQAAAAAKAAAADQAAAAAAAAAAAAAAAAAAAAFvFIhaAAAAQAsGd4oc7v6XwN6gfcfLX/2YIwjRDFePAMzOUQc+BmIVeVS71wu58s6LG7iYUfGQAi6zUI6orEmc2AHjAyCzCQU=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAADAAAAAAAAAAAAAAAAAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAAAAAAMAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAA9/SQAAAAAKAAAADQAAAAAAAAAAAAAAAA==', 'AAAAAQAAAAIAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAEAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAL68IAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAEAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAQAAAAcAAAADAAAABQAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rIDAAAAAIAAAAFAAAABAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAQAAAAAL68IAAAAAAA0c7wAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAUAAAAFAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAABAAAAAAvrwgAAAAAADRzvAAAAAAAAAAAAAAAAAwAAAAUAAAABAAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAH//////////AAAAAQAAAAEAAAAAAAAAAAAAAAAOThwAAAAAAAAAAAAAAAABAAAABQAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAf/////////8AAAABAAAAAQAAAAAAAAAAAAAAAB3NZQAAAAAAAAAAAAAAAAAAAAAFAAAAAgAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAAAAAADAAAAAUVVUgAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAAPf0kAAAAACgAAAA0AAAAAAAAAAAAAAAAAAAADAAAABQAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAf/////////8AAAABAAAAAQAAAAAL68IAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAABAAAAABfXhAAAAAAAAAAAAAAAAAAAAAAA', 'AAAAAgAAAAMAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7mshwAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msgMAAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{CwZ3ihzu/pfA3qB9x8tf/ZgjCNEMV48AzM5RBz4GYhV5VLvXC7nyzosbuJhR8ZACLrNQjqisSZzYAeMDILMJBQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('94f79b72faddea5899929a9b017c1c514751ffa0a7df9fdba7ce81c50f75a820', 4, 1, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934593, 100, 1, '2019-10-07 17:43:05.750226', '2019-10-07 17:43:05.750226', 17179873280, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQMq2EpK0LXwZiSrwXsmACrBqgXR/+kPIpG1UMgZP4+aV4vT6OEwvAgRBoKaRPjJd5OX8gOBOJv0RKPeQObZm4Qw=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{yrYSkrQtfBmJKvBeyYAKsGqBdH/6Q8ikbVQyBk/j5pXi9Po4TC8CBEGgppE+Ml3k5fyA4E4m/REo95A5tmbhDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('3f08bfa1ecbf93b1e395a4b91a282e543cfb1dd7c1f46e7920bf93238e8249bc', 4, 2, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 8589934593, 100, 1, '2019-10-07 17:43:05.750376', '2019-10-07 17:43:05.750376', 17179877376, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQMW/qi+OCrL8pnczurJ6BjclRbXCYwA0BJNawKbWczfAwRuhov22mUb1fa2BGLliHf8Rq6RiKgRPBLv1HrKHIws=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{xb+qL44KsvymdzO6snoGNyVFtcJjADQEk1rAptZzN8DBG6Gi/baZRvV9rYEYuWId/xGrpGIqBE8Eu/UesocjCw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('7902b04df0fb33faa59b83f918bf1a793a162bd2dc3d44b9939e757cf7cb7671', 4, 3, 'GC23QF2HUE52AMXUFUH3AYJAXXGXXV2VHXYYR6EYXETPKDXZSAW67XO4', 8589934594, 100, 1, '2019-10-07 17:43:05.750484', '2019-10-07 17:43:05.750484', 17179881472, 'AAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msoAAAAAAAAAAAH5kC3vAAAAQCCxheB4CZrYiFfMAW6ckj/jTJzHih7CtR8TD0GcvVb6/6BWi9V2nBQvDt/y3NnBtysbFEOM1GV66xFtu8VFGAQ=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAtbgXR6E7oDL0LQ+wYSC9zXvXVT3xiPiYuSb1DvmQLe8AAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt7wAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ILGF4HgJmtiIV8wBbpySP+NMnMeKHsK1HxMPQZy9Vvr/oFaL1XacFC8O3/Lc2cG3KxsUQ4zUZXrrEW27xUUYBA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b1f828384c56e4b024f4275f246580ababff1ae3b9ba61b03897357e57eebc20', 2, 2, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 2, 100, 1, '2019-10-07 17:43:05.777046', '2019-10-07 17:43:05.777046', 8589942784, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAACAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEBdfnFSzZeh17zt82oMdqe4+/xns/kHBdGXf9BIBRYfVZ3DQT3awwZn5LqgIG9JqlvMmR1TKaxcoJQDuqGcCScM', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrNryTQMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{XX5xUs2Xode87fNqDHanuPv8Z7P5BwXRl3/QSAUWH1Wdw0E92sMGZ+S6oCBvSapbzJkdUymsXKCUA7qhnAknDA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('c93f80667f37df70a29ec0de96ff3381644ac828a4cea1cb6ceb1bcec6fff058', 4, 4, 'GCQPYGH4K57XBDENKKX55KDTWOTK5WDWRQOH2LHEDX3EKVIQRLMESGBG', 8589934594, 100, 1, '2019-10-07 17:43:05.750591', '2019-10-07 17:43:05.750591', 17179885568, 'AAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msoAAAAAAAAAAAEQithJAAAAQG4l7kCAq5aqvS2d/HTtYc7LAa7pSUiiO4KyKJbqmsDgvckGC2dbhcro9tcvCZHfwqTV+ikv8Hm8Zfa63kYPkQY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAABAAAAAAAAAAA=', 'AAAAAQAAAAIAAAADAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAABAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAABAAAAAAAAAAAoPwY/Fd/cIyNUq/eqHOzpq7YdowcfSzkHfZFVRCK2EkAAAAAO5rJOAAAAAIAAAACAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAMAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAAAAAAEAAAAEAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAADuaygB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msmcAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAAEAAAAAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSQAAAAA7msk4AAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{biXuQICrlqq9LZ38dO1hzssBrulJSKI7grIoluqawOC9yQYLZ1uFyuj21y8Jkd/CpNX6KS/webxl9rreRg+RBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('811192c38643df73c015a5a1d77b802dff05d4f50fc6d10816aa75c0a6109f9a', 3, 1, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934593, 100, 1, '2019-10-07 17:43:05.765011', '2019-10-07 17:43:05.765011', 12884905984, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAFvFIhaAAAAQPlg7GLhJg0x7jpAw1Ew6H2XF6yRImfJIwFfx09Nui5btOJAFewFANfOaAB8FQZl5p3A5g3k6DHDigfUNUD16gc=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{+WDsYuEmDTHuOkDDUTDofZcXrJEiZ8kjAV/HT026Llu04kAV7AUA185oAHwVBmXmncDmDeToMcOKB9Q1QPXqBw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('bd486dbdd02d460817671c4a5a7e9d6e865ca29cb41e62d7aaf70a2fee5b36de', 3, 2, 'GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU', 8589934593, 100, 1, '2019-10-07 17:43:05.765163', '2019-10-07 17:43:05.765163', 12884910080, 'AAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABVVNEAAAAAAC1uBdHoTugMvQtD7BhIL3Ne9dVPfGI+Ji5JvUO+ZAt73//////////AAAAAAAAAAGu5L5MAAAAQB9kmKW2q3v7Qfy8PMekEb1TTI5ixqkI0BogXrOt7gO162Qbkh2dSTUfeDovc0PAafhDXxthVAlsLujlBmyjBAY=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAFVU0QAAAAAALW4F0ehO6Ay9C0PsGEgvc1711U98Yj4mLkm9Q75kC3vAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{H2SYpbare/tB/Lw8x6QRvVNMjmLGqQjQGiBes63uA7XrZBuSHZ1JNR94Oi9zQ8Bp+ENfG2FUCWwu6OUGbKMEBg==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('b5cadce05fc0ad5d6fe009b8b0debc0d3dfd32ea42b8eba3e9ea68c2746e410f', 3, 3, 'GA5WBPYA5Y4WAEHXWR2UKO2UO4BUGHUQ74EUPKON2QHV4WRHOIRNKKH2', 8589934593, 100, 1, '2019-10-07 17:43:05.765288', '2019-10-07 17:43:05.765288', 12884914176, 'AAAAADtgvwDuOWAQ97R1RTtUdwNDHpD/CUepzdQPXlonciLVAAAAZAAAAAIAAAABAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAEnciLVAAAAQANQSzvpEBCAXvs1PgmH/UFbfAYt3OAggYPVTd0pjVcJaV3lDE/jOZMnLFZMkFEhg4dluVQxeDZAwTKUPandswg=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{A1BLO+kQEIBe+zU+CYf9QVt8Bi3c4CCBg9VN3SmNVwlpXeUMT+M5kycsVkyQUSGDh2W5VDF4NkDBMpQ9qd2zCA==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('32e4ba1f218b6aa2420b497456a1b09090e3837e66b3495030d4edd60d0f0570', 3, 4, 'GBXGQJWVLWOYHFLVTKWV5FGHA3LNYY2JQKM7OAJAUEQFU6LPCSEFVXON', 8589934594, 100, 1, '2019-10-07 17:43:05.765397', '2019-10-07 17:43:05.765397', 12884918272, 'AAAAAG5oJtVdnYOVdZqtXpTHBtbcY0mCmfcBIKEgWnlvFIhaAAAAZAAAAAIAAAACAAAAAAAAAAAAAAABAAAAAAAAAAYAAAABRVVSAAAAAACg/Bj8V39wjI1Sr96oc7Omrth2jBx9LOQd9kVVEIrYSX//////////AAAAAAAAAAFvFIhaAAAAQMJmv+lhF5QZlgdIqBXDSdhEtgraTrRSwVr5d/BrNC28efHMoxYNa+2u9tSEdxU+hGX6JRW7wAF3bOpA8rxxxAE=', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAGAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAACAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAAAAQAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAFFVVIAAAAAAKD8GPxXf3CMjVKv3qhzs6au2HaMHH0s5B32RVUQithJAAAAAAAAAAB//////////wAAAAEAAAAAAAAAAA==', 'AAAAAgAAAAMAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msmcAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAADAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msk4AAAAAgAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{wma/6WEXlBmWB0ioFcNJ2ES2CtpOtFLBWvl38Gs0Lbx58cyjFg1r7a721IR3FT6EZfolFbvAAXds6kDyvHHEAQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('2b2e82dbabb024b27a0c3140ca71d8ac9bc71831f9f5a3bd69eca3d88fb0ec5c', 2, 3, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 3, 100, 1, '2019-10-07 17:43:05.777164', '2019-10-07 17:43:05.777164', 8589946880, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAbmgm1V2dg5V1mq1elMcG1txjSYKZ9wEgoSBaeW8UiFoAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDJul1tLGLF4Vxwt0dDCVEf6tb5l4byMrGgCp+lVZMmxct54iNf2mxtjx6Md5ZJ4E4Dlcsf46EAhBGSUPsn8fYD', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrMwLmoMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAABuaCbVXZ2DlXWarV6UxwbW3GNJgpn3ASChIFp5bxSIWgAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/84AAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{ybpdbSxixeFccLdHQwlRH+rW+ZeG8jKxoAqfpVWTJsXLeeIjX9psbY8ejHeWSeBOA5XLH+OhAIQRklD7J/H2Aw==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('e17bae552da0105ad32f0b9aadfd0f623ef37eb486b10b044c19238360e455d7', 2, 4, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 4, 100, 1, '2019-10-07 17:43:05.777272', '2019-10-07 17:43:05.777272', 8589950976, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAEAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAO2C/AO45YBD3tHVFO1R3A0MekP8JR6nN1A9eWidyItUAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDYYfyOrmPhfki6lrP+oCfunJmRu2mfxl40o5qWR7y1YmP8poG+6Xqg41jKCWNwVoP717CVEPe70I0teWvTejkJ', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrL0k6AMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAA7YL8A7jlgEPe0dUU7VHcDQx6Q/wlHqc3UD15aJ3Ii1QAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/7UAAAAAAAAAAMAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{2GH8jq5j4X5Iupaz/qAn7pyZkbtpn8ZeNKOalke8tWJj/KaBvul6oONYygljcFaD+9ewlRD3u9CNLXlr03o5CQ==}', 'none', NULL, NULL, true, 100); +INSERT INTO history_transactions VALUES ('cfd8816ed587c5ed88dea0eb00818caf38c0750e7740e05de3c27176e9aee8ee', 2, 5, 'GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H', 5, 100, 1, '2019-10-07 17:43:05.777376', '2019-10-07 17:43:05.777376', 8589955072, 'AAAAAGL8HQvQkbK2HA3WVjRrKmjX00fG8sLI7m0ERwJW/AX3AAAAZAAAAAAAAAAFAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAArqN6LeOagjxMaUP96Bzfs9e0corNZXzBWJkFoK7kvkwAAAAAO5rKAAAAAAAAAAABVvwF9wAAAEDNmQhdQeyMcWFWP8dVRkDtFS4tHICyKdaPkR6+/L7+tMzKWoUjbDAXscRYI+j6Fd/VFUaDzdYsWCAsH30WujIL', 'AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAA=', 'AAAAAQAAAAAAAAABAAAAAwAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrK4+NYMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrJ9XgwMAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAACuo3ot45qCPExpQ/3oHN+z17Ryis1lfMFYmQWgruS+TAAAAAA7msoAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', 'AAAAAgAAAAMAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/5wAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/4MAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA==', '{zZkIXUHsjHFhVj/HVUZA7RUuLRyAsinWj5Eevvy+/rTMylqFI2wwF7HEWCPo+hXf1RVGg83WLFggLB99FroyCw==}', 'none', NULL, NULL, true, 100); + + +-- +-- Data for Name: key_value_store; Type: TABLE DATA; Schema: public; Owner: - +-- + +INSERT INTO key_value_store VALUES ('exp_ingest_last_ledger', '0'); + + +-- +-- Data for Name: offers; Type: TABLE DATA; Schema: public; Owner: - +-- + + + +-- +-- Name: accounts_signers accounts_signers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY accounts_signers + ADD CONSTRAINT accounts_signers_pkey PRIMARY KEY (signer, account); + + +-- +-- Name: asset_stats asset_stats_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_pkey PRIMARY KEY (id); + + +-- +-- Name: gorp_migrations gorp_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY gorp_migrations + ADD CONSTRAINT gorp_migrations_pkey PRIMARY KEY (id); + + +-- +-- Name: history_assets history_assets_asset_code_asset_type_asset_issuer_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_asset_code_asset_type_asset_issuer_key UNIQUE (asset_code, asset_type, asset_issuer); + + +-- +-- Name: history_assets history_assets_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_assets + ADD CONSTRAINT history_assets_pkey PRIMARY KEY (id); + + +-- +-- Name: history_operation_participants history_operation_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_operation_participants + ADD CONSTRAINT history_operation_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: history_transaction_participants history_transaction_participants_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_transaction_participants + ADD CONSTRAINT history_transaction_participants_pkey PRIMARY KEY (id); + + +-- +-- Name: key_value_store key_value_store_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY key_value_store + ADD CONSTRAINT key_value_store_pkey PRIMARY KEY (key); + + +-- +-- Name: offers offers_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY offers + ADD CONSTRAINT offers_pkey PRIMARY KEY (offerid); + + +-- +-- Name: asset_by_code; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_code ON history_assets USING btree (asset_code); + + +-- +-- Name: asset_by_issuer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX asset_by_issuer ON history_assets USING btree (asset_issuer); + + +-- +-- Name: by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_account ON history_transactions USING btree (account, account_sequence); + + +-- +-- Name: by_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_hash ON history_transactions USING btree (transaction_hash); + + +-- +-- Name: by_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX by_ledger ON history_transactions USING btree (ledger_sequence, application_order); + + +-- +-- Name: hist_e_by_order; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_by_order ON history_effects USING btree (history_operation_id, "order"); + + +-- +-- Name: hist_e_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_e_id ON history_effects USING btree (history_account_id, history_operation_id, "order"); + + +-- +-- Name: hist_op_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_op_p_id ON history_operation_participants USING btree (history_account_id, history_operation_id); + + +-- +-- Name: hist_tx_p_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hist_tx_p_id ON history_transaction_participants USING btree (history_account_id, history_transaction_id); + + +-- +-- Name: hop_by_hoid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX hop_by_hoid ON history_operation_participants USING btree (history_operation_id); + + +-- +-- Name: hs_ledger_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_ledger_by_id ON history_ledgers USING btree (id); + + +-- +-- Name: hs_transaction_by_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX hs_transaction_by_id ON history_transactions USING btree (id); + + +-- +-- Name: htp_by_htid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htp_by_htid ON history_transaction_participants USING btree (history_transaction_id); + + +-- +-- Name: htrd_by_base_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_account ON history_trades USING btree (base_account_id); + + +-- +-- Name: htrd_by_base_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_base_offer ON history_trades USING btree (base_offer_id); + + +-- +-- Name: htrd_by_counter_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_account ON history_trades USING btree (counter_account_id); + + +-- +-- Name: htrd_by_counter_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_counter_offer ON history_trades USING btree (counter_offer_id); + + +-- +-- Name: htrd_by_offer; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_by_offer ON history_trades USING btree (offer_id); + + +-- +-- Name: htrd_counter_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_counter_lookup ON history_trades USING btree (counter_asset_id); + + +-- +-- Name: htrd_pair_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_pair_time_lookup ON history_trades USING btree (base_asset_id, counter_asset_id, ledger_closed_at); + + +-- +-- Name: htrd_pid; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX htrd_pid ON history_trades USING btree (history_operation_id, "order"); + + +-- +-- Name: htrd_time_lookup; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX htrd_time_lookup ON history_trades USING btree (ledger_closed_at); + + +-- +-- Name: index_history_accounts_on_address; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_address ON history_accounts USING btree (address); + + +-- +-- Name: index_history_accounts_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_accounts_on_id ON history_accounts USING btree (id); + + +-- +-- Name: index_history_effects_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_effects_on_type ON history_effects USING btree (type); + + +-- +-- Name: index_history_ledgers_on_closed_at; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_closed_at ON history_ledgers USING btree (closed_at); + + +-- +-- Name: index_history_ledgers_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_id ON history_ledgers USING btree (id); + + +-- +-- Name: index_history_ledgers_on_importer_version; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_ledgers_on_importer_version ON history_ledgers USING btree (importer_version); + + +-- +-- Name: index_history_ledgers_on_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_ledger_hash ON history_ledgers USING btree (ledger_hash); + + +-- +-- Name: index_history_ledgers_on_previous_ledger_hash; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_previous_ledger_hash ON history_ledgers USING btree (previous_ledger_hash); + + +-- +-- Name: index_history_ledgers_on_sequence; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_ledgers_on_sequence ON history_ledgers USING btree (sequence); + + +-- +-- Name: index_history_operations_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_operations_on_id ON history_operations USING btree (id); + + +-- +-- Name: index_history_operations_on_transaction_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_transaction_id ON history_operations USING btree (transaction_id); + + +-- +-- Name: index_history_operations_on_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX index_history_operations_on_type ON history_operations USING btree (type); + + +-- +-- Name: index_history_transactions_on_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE UNIQUE INDEX index_history_transactions_on_id ON history_transactions USING btree (id); + + +-- +-- Name: offers_by_buying_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_buying_asset ON offers USING btree (buyingasset); + + +-- +-- Name: offers_by_last_modified_ledger; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_last_modified_ledger ON offers USING btree (last_modified_ledger); + + +-- +-- Name: offers_by_seller; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_seller ON offers USING btree (sellerid); + + +-- +-- Name: offers_by_selling_asset; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX offers_by_selling_asset ON offers USING btree (sellingasset); + + +-- +-- Name: signers_by_account; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX signers_by_account ON accounts_signers USING btree (account); + + +-- +-- Name: trade_effects_by_order_book; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX trade_effects_by_order_book ON history_effects USING btree (((details ->> 'sold_asset_type'::text)), ((details ->> 'sold_asset_code'::text)), ((details ->> 'sold_asset_issuer'::text)), ((details ->> 'bought_asset_type'::text)), ((details ->> 'bought_asset_code'::text)), ((details ->> 'bought_asset_issuer'::text))) WHERE (type = 33); + + +-- +-- Name: asset_stats asset_stats_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY asset_stats + ADD CONSTRAINT asset_stats_id_fkey FOREIGN KEY (id) REFERENCES history_assets(id) ON UPDATE RESTRICT ON DELETE CASCADE; + + +-- +-- Name: history_trades history_trades_base_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_account_id_fkey FOREIGN KEY (base_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_base_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_base_asset_id_fkey FOREIGN KEY (base_asset_id) REFERENCES history_assets(id); + + +-- +-- Name: history_trades history_trades_counter_account_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_account_id_fkey FOREIGN KEY (counter_account_id) REFERENCES history_accounts(id); + + +-- +-- Name: history_trades history_trades_counter_asset_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY history_trades + ADD CONSTRAINT history_trades_counter_asset_id_fkey FOREIGN KEY (counter_asset_id) REFERENCES history_assets(id); + +-- added manually +ALTER TABLE history_transactions ADD account_muxed varchar(69) NULL, ADD fee_account_muxed varchar(69) NULL; +ALTER TABLE history_operations ADD source_account_muxed varchar(69) NULL; +ALTER TABLE history_effects ADD address_muxed varchar(69) NULL; + +-- +-- PostgreSQL database dump complete +-- + diff --git a/services/horizon/internal/test/static_mock_server.go b/services/horizon/internal/test/static_mock_server.go new file mode 100644 index 0000000000..eb36f2b81c --- /dev/null +++ b/services/horizon/internal/test/static_mock_server.go @@ -0,0 +1,19 @@ +package test + +import ( + "fmt" + "net/http" + "net/http/httptest" +) + +// NewStaticMockServer creates a new mock server that always responds with +// `response` +func NewStaticMockServer(response string) *StaticMockServer { + result := &StaticMockServer{} + result.Server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + result.LastRequest = r + fmt.Fprintln(w, response) + })) + + return result +} diff --git a/services/horizon/internal/test/t.go b/services/horizon/internal/test/t.go new file mode 100644 index 0000000000..cf497b60d2 --- /dev/null +++ b/services/horizon/internal/test/t.go @@ -0,0 +1,176 @@ +//lint:file-ignore U1001 Ignore all unused code, thinks the code is unused because of the test skips +package test + +import ( + "io" + "testing" + + "encoding/json" + + "github.com/jmoiron/sqlx" + "github.com/sirupsen/logrus" + "github.com/stellar/go/services/horizon/internal/db2/schema" + "github.com/stellar/go/services/horizon/internal/ledger" + "github.com/stellar/go/services/horizon/internal/operationfeestats" + "github.com/stellar/go/services/horizon/internal/test/scenarios" + "github.com/stellar/go/support/db" + "github.com/stellar/go/support/render/hal" +) + +// CoreSession returns a db.Session instance pointing at the stellar core test database +func (t *T) CoreSession() *db.Session { + return &db.Session{ + DB: t.CoreDB, + } +} + +// Finish finishes the test, logging any accumulated horizon logs to the logs +// output +func (t *T) Finish() { + logEntries := t.testLogs() + operationfeestats.ResetState() + + for _, entry := range logEntries { + logString, err := entry.String() + if err != nil { + t.T.Logf("Error from entry.String: %v", err) + } else { + t.T.Log(logString) + } + } +} + +// HorizonSession returns a db.Session instance pointing at the horizon test +// database +func (t *T) HorizonSession() *db.Session { + return &db.Session{ + DB: t.HorizonDB, + } +} + +func (t *T) loadScenario(scenarioName string, includeHorizon bool) { + stellarCorePath := scenarioName + "-core.sql" + + scenarios.Load(StellarCoreDatabaseURL(), stellarCorePath) + + if includeHorizon { + horizonPath := scenarioName + "-horizon.sql" + scenarios.Load(DatabaseURL(), horizonPath) + } +} + +// Scenario loads the named sql scenario into the database +func (t *T) Scenario(name string) ledger.Status { + clearHorizonDB(t.T, t.HorizonDB) + t.loadScenario(name, true) + return t.LoadLedgerStatus() +} + +// ScenarioWithoutHorizon loads the named sql scenario into the database +func (t *T) ScenarioWithoutHorizon(name string) ledger.Status { + t.loadScenario(name, false) + ResetHorizonDB(t.T, t.HorizonDB) + return t.LoadLedgerStatus() +} + +// ResetHorizonDB sets up a new horizon database with empty tables +func ResetHorizonDB(t *testing.T, db *sqlx.DB) { + clearHorizonDB(t, db) + _, err := schema.Migrate(db.DB, schema.MigrateUp, 0) + if err != nil { + t.Fatalf("could not run migrations up on test db: %v", err) + } +} + +func clearHorizonDB(t *testing.T, db *sqlx.DB) { + _, err := schema.Migrate(db.DB, schema.MigrateDown, 0) + if err != nil { + t.Fatalf("could not run migrations down on test db: %v", err) + } +} + +// UnmarshalPage populates dest with the records contained in the json-encoded page in r +func (t *T) UnmarshalPage(r io.Reader, dest interface{}) hal.Links { + var env struct { + Embedded struct { + Records json.RawMessage `json:"records"` + } `json:"_embedded"` + Links struct { + Self hal.Link `json:"self"` + Next hal.Link `json:"next"` + Prev hal.Link `json:"prev"` + } `json:"_links"` + } + + err := json.NewDecoder(r).Decode(&env) + t.Require.NoError(err, "failed to decode page") + + err = json.Unmarshal(env.Embedded.Records, dest) + t.Require.NoError(err, "failed to decode records") + + return env.Links +} + +// UnmarshalNext extracts and returns the next link +func (t *T) UnmarshalNext(r io.Reader) string { + var env struct { + Links struct { + Next struct { + Href string `json:"href"` + } `json:"next"` + } `json:"_links"` + } + + err := json.NewDecoder(r).Decode(&env) + t.Require.NoError(err, "failed to decode page") + return env.Links.Next.Href +} + +// UnmarshalExtras extracts and returns extras content +func (t *T) UnmarshalExtras(r io.Reader) map[string]string { + var resp struct { + Extras map[string]string `json:"extras"` + } + + err := json.NewDecoder(r).Decode(&resp) + t.Require.NoError(err, "failed to decode page") + + return resp.Extras +} + +// LoadLedgerStatus loads ledger state from the core db(or panicing on failure). +func (t *T) LoadLedgerStatus() ledger.Status { + var next ledger.Status + + err := t.CoreSession().GetRaw(t.Ctx, &next, ` + SELECT + COALESCE(MAX(ledgerseq), 0) as core_latest + FROM ledgerheaders + `) + + if err != nil { + panic(err) + } + + err = t.HorizonSession().GetRaw(t.Ctx, &next, ` + SELECT + COALESCE(MIN(sequence), 0) as history_elder, + COALESCE(MAX(sequence), 0) as history_latest + FROM history_ledgers + `) + + if err != nil { + panic(err) + } + + return next +} + +// retrieves entries from test logger instance +func (t *T) testLogs() []logrus.Entry { + if t.EndLogTest == nil { + return []logrus.Entry{} + } + + return t.EndLogTest() +} diff --git a/services/horizon/internal/test/transactions/main.go b/services/horizon/internal/test/transactions/main.go new file mode 100644 index 0000000000..2cf9a9340a --- /dev/null +++ b/services/horizon/internal/test/transactions/main.go @@ -0,0 +1,48 @@ +//Package transactions offers common infrastructure for testing Transactions +package transactions + +import ( + "encoding/hex" + "testing" + + "github.com/stellar/go/ingest" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +// TestTransaction transaction meta +type TestTransaction struct { + Index uint32 + EnvelopeXDR string + ResultXDR string + FeeChangesXDR string + MetaXDR string + Hash string +} + +// BuildLedgerTransaction builds a ledger transaction +func BuildLedgerTransaction(t *testing.T, tx TestTransaction) ingest.LedgerTransaction { + transaction := ingest.LedgerTransaction{ + Index: tx.Index, + Envelope: xdr.TransactionEnvelope{}, + Result: xdr.TransactionResultPair{}, + FeeChanges: xdr.LedgerEntryChanges{}, + UnsafeMeta: xdr.TransactionMeta{}, + } + + tt := assert.New(t) + + err := xdr.SafeUnmarshalBase64(tx.EnvelopeXDR, &transaction.Envelope) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.ResultXDR, &transaction.Result.Result) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.MetaXDR, &transaction.UnsafeMeta) + tt.NoError(err) + err = xdr.SafeUnmarshalBase64(tx.FeeChangesXDR, &transaction.FeeChanges) + tt.NoError(err) + + _, err = hex.Decode(transaction.Result.TransactionHash[:], []byte(tx.Hash)) + tt.NoError(err) + + return transaction +} diff --git a/services/horizon/internal/tls/localhost.conf b/services/horizon/internal/tls/localhost.conf new file mode 100644 index 0000000000..30f5f23dbd --- /dev/null +++ b/services/horizon/internal/tls/localhost.conf @@ -0,0 +1,17 @@ +[ req ] +distinguished_name = req_distinguished_name + +[ req_distinguished_name ] +C = US +C_default = US +ST = California +ST_default = California +L = San Francisco +L_default = San Francisco +O = Stellar Development Foundation +O_default = Stellar Development Foundation +OU = Engineering +OU_default = Engineering +CN = localhost:8000 +CN_default = localhost:8000 +emailAddress_default = diff --git a/services/horizon/internal/tls/regen.sh b/services/horizon/internal/tls/regen.sh new file mode 100755 index 0000000000..6027f42a05 --- /dev/null +++ b/services/horizon/internal/tls/regen.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +pushd $DIR + +openssl genrsa -des3 -passout pass:x -out new.pass.key 2048 +openssl rsa -passin pass:x -in new.pass.key -out new.key +rm new.pass.key +openssl req -new -key new.key -out new.csr -config localhost.conf +openssl x509 -req -days 365 -in new.csr -signkey new.key -out new.crt + +mv new.csr server.csr +mv new.crt server.crt +mv new.key server.key diff --git a/services/horizon/internal/toid/main.go b/services/horizon/internal/toid/main.go new file mode 100644 index 0000000000..9ce468f5d1 --- /dev/null +++ b/services/horizon/internal/toid/main.go @@ -0,0 +1,174 @@ +package toid + +import ( + "errors" + "fmt" +) + +// +// ID represents the total order of Ledgers, Transactions and +// Operations. +// +// Operations within the stellar network have a total order, expressed by three +// pieces of information: the ledger sequence the operation was validated in, +// the order which the operation's containing transaction was applied in +// that ledger, and the index of the operation within that parent transaction. +// +// We express this order by packing those three pieces of information into a +// single signed 64-bit number (we used a signed number for SQL compatibility). +// +// The follow diagram shows this format: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Ledger Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Transaction Application Order | Op Index | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// By component: +// +// Ledger Sequence: 32-bits +// +// A complete ledger sequence number in which the operation was validated. +// +// Expressed in network byte order. +// +// Transaction Application Order: 20-bits +// +// The order that the transaction was applied within the ledger it was +// validated. Accommodates up to 1,048,575 transactions in a single ledger. +// +// Expressed in network byte order. +// +// Operation Index: 12-bits +// +// The index of the operation within its parent transaction. Accommodates up +// to 4095 operations per transaction. +// +// Expressed in network byte order. +// +// +// Note: API Clients should not be interpreting this value. We will use it +// as an opaque paging token that clients can parrot back to us after having read +// it within a resource to page from the represented position in time. +// +// Note: This does not uniquely identify an object. Given a ledger, it will +// share its id with its first transaction and the first operation of that +// transaction as well. Given that this ID is only meant for ordering within a +// single type of object, the sharing of ids across object types seems +// acceptable. +// +type ID struct { + LedgerSequence int32 + TransactionOrder int32 + OperationOrder int32 +} + +const ( + // LedgerMask is the bitmask to mask out ledger sequences in a + // TotalOrderID + LedgerMask = (1 << 32) - 1 + // TransactionMask is the bitmask to mask out transaction indexes + TransactionMask = (1 << 20) - 1 + // OperationMask is the bitmask to mask out operation indexes + OperationMask = (1 << 12) - 1 + + // LedgerShift is the number of bits to shift an int64 to target the + // ledger component + LedgerShift = 32 + // TransactionShift is the number of bits to shift an int64 to + // target the transaction component + TransactionShift = 12 + // OperationShift is the number of bits to shift an int64 to target + // the operation component + OperationShift = 0 +) + +// AfterLedger returns a new toid that represents the ledger time _after_ any +// contents (e.g. transactions, operations) that occur within the specified +// ledger. +func AfterLedger(seq int32) *ID { + return New(seq, TransactionMask, OperationMask) +} + +// LedgerRangeInclusive returns inclusive range representation between two +// ledgers inclusive. The second value points at the to+1 ledger so when using +// this value make sure < order is used. +func LedgerRangeInclusive(from, to int32) (int64, int64, error) { + if from > to { + return 0, 0, errors.New("Invalid range: from > to") + } + + if from <= 0 || to <= 0 { + return 0, 0, errors.New("Invalid range: from or to negative") + } + + var toidFrom, toidTo int64 + if from == 1 { + toidFrom = 0 + } else { + toidFrom = New(from, 0, 0).ToInt64() + } + + toidTo = New(to+1, 0, 0).ToInt64() + + return toidFrom, toidTo, nil +} + +// IncOperationOrder increments the operation order, rolling over to the next +// ledger if overflow occurs. This allows queries to easily advance a cursor to +// the next operation. +func (id *ID) IncOperationOrder() { + id.OperationOrder++ + + if id.OperationOrder > OperationMask { + id.OperationOrder = 0 + id.LedgerSequence++ + } +} + +// New creates a new total order ID +func New(ledger int32, tx int32, op int32) *ID { + return &ID{ + LedgerSequence: ledger, + TransactionOrder: tx, + OperationOrder: op, + } +} + +// ToInt64 converts this struct back into an int64 +func (id ID) ToInt64() (result int64) { + + if id.LedgerSequence < 0 { + panic("invalid ledger sequence") + } + + if id.TransactionOrder > TransactionMask { + panic("transaction order overflow") + } + + if id.OperationOrder > OperationMask { + panic("operation order overflow") + } + + result = result | ((int64(id.LedgerSequence) & LedgerMask) << LedgerShift) + result = result | ((int64(id.TransactionOrder) & TransactionMask) << TransactionShift) + result = result | ((int64(id.OperationOrder) & OperationMask) << OperationShift) + return +} + +// String returns a string representation of this id +func (id ID) String() string { + return fmt.Sprintf("%d", id.ToInt64()) +} + +// Parse parses an int64 into a TotalOrderID struct +func Parse(id int64) (result ID) { + result.LedgerSequence = int32((id >> LedgerShift) & LedgerMask) + result.TransactionOrder = int32((id >> TransactionShift) & TransactionMask) + result.OperationOrder = int32((id >> OperationShift) & OperationMask) + + return +} diff --git a/services/horizon/internal/toid/main_test.go b/services/horizon/internal/toid/main_test.go new file mode 100644 index 0000000000..70433b1ac9 --- /dev/null +++ b/services/horizon/internal/toid/main_test.go @@ -0,0 +1,181 @@ +package toid + +import ( + "fmt" + "math" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ledger = int64(4294967296) // ledger sequence 1 +var tx = int64(4096) // tx index 1 +var op = int64(1) // op index 1 + +func TestID_ToInt64(t *testing.T) { + testCases := []struct { + id *ID + expected int64 + shouldPanic bool + }{ + // accomodates 12-bits of precision for the operation field + { + id: &ID{0, 0, 1}, + expected: 1, + }, + { + id: &ID{0, 0, 4095}, + expected: 4095, + }, + { + id: &ID{0, 0, 4096}, + shouldPanic: true, + }, + // accomodates 20-bits of precision for the transaction field + { + id: &ID{0, 1, 0}, + expected: 4096, + }, + { + id: &ID{0, 1048575, 0}, + expected: 4294963200, + }, + { + id: &ID{0, 1048576, 0}, + shouldPanic: true, + }, + // accomodates 32-bits of precision for the ledger field + { + id: &ID{1, 0, 0}, + expected: 4294967296, + }, + { + id: &ID{math.MaxInt32, 0, 0}, + expected: 9223372032559808512, + }, + { + id: &ID{-1, 0, 0}, + shouldPanic: true, + }, + { + id: &ID{math.MinInt32, 0, 0}, + shouldPanic: true, + }, + // works as expected + { + id: &ID{1, 1, 1}, + expected: ledger + tx + op, + }, + { + id: &ID{1, 1, 0}, + expected: ledger + tx, + }, + { + id: &ID{1, 0, 1}, + expected: ledger + op, + }, + { + id: &ID{1, 0, 0}, + expected: ledger, + }, + { + id: &ID{0, 1, 0}, + expected: tx, + }, + { + id: &ID{0, 0, 1}, + expected: op, + }, + { + id: &ID{0, 0, 0}, + expected: 0, + }, + } + for _, tc := range testCases { + t.Run("Testing ToInt64", func(t *testing.T) { + if tc.shouldPanic { + assert.Panics(t, func() { + tc.id.ToInt64() + }) + return + } + assert.Equal(t, tc.expected, tc.id.ToInt64()) + }) + } +} + +func TestParse(t *testing.T) { + testCases := []struct { + parsed ID + expected ID + }{ + {Parse(ledger + tx + op), ID{1, 1, 1}}, + {Parse(ledger + tx), ID{1, 1, 0}}, + {Parse(ledger + op), ID{1, 0, 1}}, + {Parse(ledger), ID{1, 0, 0}}, + {Parse(tx), ID{0, 1, 0}}, + {Parse(op), ID{0, 0, 1}}, + } + for _, tc := range testCases { + t.Run("Testing Parse", func(t *testing.T) { + assert.Equal(t, tc.expected, tc.parsed) + }) + } +} + +// Test InOperationOrder to make sure it rolls over to the next ledger sequence if overflow occurs. +func TestID_IncOperationOrder(t *testing.T) { + tid := ID{0, 0, 0} + tid.IncOperationOrder() + assert.Equal(t, int32(1), tid.OperationOrder) + tid.OperationOrder = OperationMask + tid.IncOperationOrder() + assert.Equal(t, int32(0), tid.OperationOrder) + assert.Equal(t, int32(1), tid.LedgerSequence) +} + +func ExampleParse() { + toid := Parse(12884910080) + fmt.Printf("ledger:%d, tx:%d, op:%d", toid.LedgerSequence, toid.TransactionOrder, toid.OperationOrder) + // Output: ledger:3, tx:2, op:0 +} + +func TestLedgerRangeInclusive(t *testing.T) { + testCases := []struct { + from int32 + to int32 + + fromLedger int32 + toLedger int32 + }{ + {1, 1, 0, 2}, + {1, 2, 0, 3}, + {2, 2, 2, 3}, + {2, 3, 2, 4}, + } + for _, tc := range testCases { + t.Run("Testing TestLedgerRangeInclusive", func(t *testing.T) { + toidFrom, toidTo, err := LedgerRangeInclusive(tc.from, tc.to) + assert.NoError(t, err) + + id := Parse(toidFrom) + assert.Equal(t, tc.fromLedger, id.LedgerSequence) + assert.Equal(t, int32(0), id.TransactionOrder) + assert.Equal(t, int32(0), id.OperationOrder) + + id = Parse(toidTo) + assert.Equal(t, tc.toLedger, id.LedgerSequence) + assert.Equal(t, int32(0), id.TransactionOrder) + assert.Equal(t, int32(0), id.OperationOrder) + }) + } + + _, _, err := LedgerRangeInclusive(2, 1) + assert.Error(t, err) + + _, _, err = LedgerRangeInclusive(-1, 1) + assert.Error(t, err) + + _, _, err = LedgerRangeInclusive(-3, -5) + assert.Error(t, err) +} diff --git a/services/horizon/internal/txsub/doc.go b/services/horizon/internal/txsub/doc.go new file mode 100644 index 0000000000..1d392fcd0c --- /dev/null +++ b/services/horizon/internal/txsub/doc.go @@ -0,0 +1,12 @@ +// Package txsub provides the machinery that horizon uses to submit transactions to +// the stellar network and track their progress. It also helps to hide some of the +// complex asynchronous nature of transaction submission, waiting to respond to +// submitters when no definitive state is known. +package txsub + +// Package layout: +// - main.go: interface and result types +// - errors.go: error definitions exposed by txsub +// - system.go: txsub.System, the struct that ties all the interfaces together +// - open_submission_list.go: A default implementation of the OpenSubmissionList interface +// - submitter.go: A default implementation of the Submitter interface diff --git a/services/horizon/internal/txsub/errors.go b/services/horizon/internal/txsub/errors.go new file mode 100644 index 0000000000..7821537d7f --- /dev/null +++ b/services/horizon/internal/txsub/errors.go @@ -0,0 +1,91 @@ +package txsub + +import ( + "encoding/hex" + "errors" + "fmt" + + "github.com/stellar/go/services/horizon/internal/codes" + "github.com/stellar/go/xdr" +) + +var ( + ErrNoResults = errors.New("No result found") + ErrCanceled = errors.New("canceled") + ErrTimeout = errors.New("timeout") + + // ErrBadSequence is a canned error response for transactions whose sequence + // number is wrong. + ErrBadSequence = &FailedTransactionError{"AAAAAAAAAAD////7AAAAAA=="} + // ErrNoAccount is returned when the source account for the transaction + // cannot be found in the database + ErrNoAccount = &FailedTransactionError{"AAAAAAAAAAD////4AAAAAA=="} +) + +// FailedTransactionError represent an error that occurred because +// stellar-core rejected the transaction. ResultXDR is a base64 +// encoded TransactionResult struct +type FailedTransactionError struct { + ResultXDR string +} + +func (err *FailedTransactionError) Error() string { + return fmt.Sprintf("tx failed: %s", err.ResultXDR) +} + +func (fte *FailedTransactionError) Result() (result xdr.TransactionResult, err error) { + err = xdr.SafeUnmarshalBase64(fte.ResultXDR, &result) + return +} + +// ResultCodes represents the result codes from a request attempting to submit a fee bump transaction. +type ResultCodes struct { + Code string + InnerCode string +} + +func (fte *FailedTransactionError) TransactionResultCodes(transactionHash string) (result ResultCodes, err error) { + r, err := fte.Result() + if err != nil { + return + } + + if innerResultPair, ok := r.Result.GetInnerResultPair(); ok { + // This handles the case of a transaction which was fee bumped by another request. + // The request submitting the inner transaction should have a view of the inner result, + // instead of the fee bump transaction. + if transactionHash == hex.EncodeToString(innerResultPair.TransactionHash[:]) { + result.Code, err = codes.String(innerResultPair.Result.Result.Code) + return + } + result.InnerCode, err = codes.String(innerResultPair.Result.Result.Code) + if err != nil { + return + } + } + result.Code, err = codes.String(r.Result.Code) + return +} + +func (fte *FailedTransactionError) OperationResultCodes() (result []string, err error) { + r, err := fte.Result() + if err != nil { + return + } + oprs, ok := r.OperationResults() + + if !ok { + return + } + + result = make([]string, len(oprs)) + + for i, opr := range oprs { + result[i], err = codes.ForOperationResult(opr) + if err != nil { + return + } + } + + return +} diff --git a/services/horizon/internal/txsub/helpers_test.go b/services/horizon/internal/txsub/helpers_test.go new file mode 100644 index 0000000000..119515ebc2 --- /dev/null +++ b/services/horizon/internal/txsub/helpers_test.go @@ -0,0 +1,69 @@ +package txsub + +// This file provides mock implementations for the txsub interfaces +// which are useful in a testing context. +// +// NOTE: this file is not a test file so that other packages may import +// txsub and use these mocks in their own tests + +import ( + "context" + "database/sql" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stretchr/testify/mock" +) + +// MockSubmitter is a test helper that simplements the Submitter interface +type MockSubmitter struct { + R SubmissionResult + WasSubmittedTo bool +} + +// Submit implements `txsub.Submitter` +func (sub *MockSubmitter) Submit(ctx context.Context, env string) SubmissionResult { + sub.WasSubmittedTo = true + return sub.R +} + +type mockDBQ struct { + mock.Mock +} + +func (m *mockDBQ) BeginTx(txOpts *sql.TxOptions) error { + args := m.Called(txOpts) + return args.Error(0) +} + +func (m *mockDBQ) Rollback() error { + args := m.Called() + return args.Error(0) +} + +func (m *mockDBQ) NoRows(err error) bool { + args := m.Called(err) + return args.Bool(0) +} + +func (m *mockDBQ) GetLatestHistoryLedger(ctx context.Context) (uint32, error) { + args := m.Called() + return args.Get(0).(uint32), args.Error(1) +} + +func (m *mockDBQ) GetSequenceNumbers(ctx context.Context, addresses []string) (map[string]uint64, error) { + args := m.Called(ctx, addresses) + return args.Get(0).(map[string]uint64), args.Error(1) +} + +func (m *mockDBQ) TransactionsByHashesSinceLedger(ctx context.Context, hashes []string, sinceLedgerSeq uint32) ([]history.Transaction, error) { + args := m.Called(ctx, hashes, sinceLedgerSeq) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]history.Transaction), args.Error(1) +} + +func (m *mockDBQ) TransactionByHash(ctx context.Context, dest interface{}, hash string) error { + args := m.Called(ctx, dest, hash) + return args.Error(0) +} diff --git a/services/horizon/internal/txsub/main.go b/services/horizon/internal/txsub/main.go new file mode 100644 index 0000000000..ede2df8dd8 --- /dev/null +++ b/services/horizon/internal/txsub/main.go @@ -0,0 +1,89 @@ +package txsub + +import ( + "time" + + "context" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/xdr" +) + +// Listener represents some client who is interested in retrieving the result +// of a specific transaction. +type Listener chan<- Result + +// OpenSubmissionList represents the structure that tracks pending transactions +// and forwards Result structs on to listeners as they become available. +// +// NOTE: An implementation of this interface will be called from multiple go-routines +// concurrently. +// +// NOTE: A Listener must be a buffered channel. A panic will trigger if you +// provide an unbuffered channel +type OpenSubmissionList interface { + // Add registers the provided listener as interested in being notified when a + // result is available for the provided transaction hash. + Add(context.Context, string, Listener) error + + // Finish forwards the provided result on to any listeners and cleans up any + // resources associated with the transaction that this result is for + Finish(context.Context, string, Result) error + + // Clean removes any open submissions over the provided age. + Clean(context.Context, time.Duration) (int, error) + + // Pending return a list of transaction hashes that have at least one + // listener registered to them in this list. + Pending(context.Context) []string +} + +// Submitter represents the low-level "submit a transaction to stellar-core" +// provider. +type Submitter interface { + // Submit sends the provided transaction envelope to stellar-core + Submit(context.Context, string) SubmissionResult +} + +// Result represents the response from a ResultProvider. Given no +// Err is set, the rest of the struct should be populated appropriately. +type Result struct { + // Any error that occurred during the retrieval of this result + Err error + + // The full details of the transaction which was submitted + // to Stellar Core + Transaction history.Transaction +} + +// SubmissionResult gets returned in response to a call to Submitter.Submit. +// It represents a single discrete submission of a transaction envelope to +// the stellar network. +type SubmissionResult struct { + // Any error that occurred during the attempted submission. A nil value + // indicates that the submission will or already is being considered for + // inclusion in the ledger (i.e. A successful submission). + Err error + + // Duration records the time it took to submit a transaction + // to stellar-core + Duration time.Duration +} + +func (s SubmissionResult) IsBadSeq() (bool, error) { + if s.Err == nil { + return false, nil + } + + fte, ok := s.Err.(*FailedTransactionError) + if !ok { + return false, nil + } + + result, err := fte.Result() + if err != nil { + return false, err + } + + return result.Result.Code == xdr.TransactionResultCodeTxBadSeq, nil +} diff --git a/services/horizon/internal/txsub/open_submission_list.go b/services/horizon/internal/txsub/open_submission_list.go new file mode 100644 index 0000000000..9b64915732 --- /dev/null +++ b/services/horizon/internal/txsub/open_submission_list.go @@ -0,0 +1,123 @@ +package txsub + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/go-errors/errors" + "github.com/stellar/go/support/log" +) + +// NewDefaultSubmissionList returns a list that manages open submissions purely +// in memory. +func NewDefaultSubmissionList() OpenSubmissionList { + return &submissionList{ + submissions: map[string]*openSubmission{}, + log: log.DefaultLogger.WithField("service", "txsub.submissionList"), + } +} + +// openSubmission tracks a slice of channels that should be emitted to when we +// know the result for the transactions with the provided hash +type openSubmission struct { + Hash string + SubmittedAt time.Time + Listeners []Listener +} + +type submissionList struct { + sync.Mutex + submissions map[string]*openSubmission // hash => `*openSubmission` + log *log.Entry +} + +func (s *submissionList) Add(ctx context.Context, hash string, l Listener) error { + s.Lock() + defer s.Unlock() + + if cap(l) == 0 { + panic("Unbuffered listener cannot be added to OpenSubmissionList") + } + + if len(hash) != 64 { + return errors.New("Unexpected transaction hash length: must be 64 hex characters") + } + + os, ok := s.submissions[hash] + + if !ok { + os = &openSubmission{ + Hash: hash, + SubmittedAt: time.Now(), + Listeners: []Listener{}, + } + s.submissions[hash] = os + s.log.WithField("hash", hash).Info("Created a new submission for a transaction") + } else { + s.log.WithField("hash", hash).Info("Adding listener to existing submission") + } + + os.Listeners = append(os.Listeners, l) + + return nil +} + +func (s *submissionList) Finish(ctx context.Context, hash string, r Result) error { + s.Lock() + defer s.Unlock() + + os, ok := s.submissions[hash] + if !ok { + return nil + } + + s.log.WithFields(log.F{ + "hash": hash, + "listeners": len(os.Listeners), + "result": fmt.Sprintf("%+v", r), + }).Info("Sending submission result to listeners") + + for _, l := range os.Listeners { + l <- r + close(l) + } + + delete(s.submissions, hash) + return nil +} + +func (s *submissionList) Clean(ctx context.Context, maxAge time.Duration) (int, error) { + s.Lock() + defer s.Unlock() + + for _, os := range s.submissions { + if time.Since(os.SubmittedAt) > maxAge { + s.log.WithFields(log.F{ + "hash": os.Hash, + "listeners": len(os.Listeners), + }).Warn("Cleared submission due to timeout") + r := Result{Err: ErrTimeout} + delete(s.submissions, os.Hash) + for _, l := range os.Listeners { + l <- r + close(l) + } + } + } + + return len(s.submissions), nil +} + +func (s *submissionList) Pending(ctx context.Context) []string { + s.Lock() + defer s.Unlock() + results := make([]string, 0, len(s.submissions)) + + for hash := range s.submissions { + results = append(results, hash) + } + + return results +} diff --git a/services/horizon/internal/txsub/open_submission_list_test.go b/services/horizon/internal/txsub/open_submission_list_test.go new file mode 100644 index 0000000000..f8155363d1 --- /dev/null +++ b/services/horizon/internal/txsub/open_submission_list_test.go @@ -0,0 +1,151 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txsub + +import ( + "context" + "testing" + "time" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +type SubmissionListTestSuite struct { + suite.Suite + list OpenSubmissionList + realList *submissionList + listeners []chan Result + hashes []string + ctx context.Context +} + +func (suite *SubmissionListTestSuite) SetupTest() { + suite.list = NewDefaultSubmissionList() + suite.realList = suite.list.(*submissionList) + suite.hashes = []string{ + "0000000000000000000000000000000000000000000000000000000000000000", + "0000000000000000000000000000000000000000000000000000000000000001", + } + suite.listeners = []chan Result{ + make(chan Result, 1), + make(chan Result, 1), + } + suite.ctx = test.Context() +} + +func (suite *SubmissionListTestSuite) TestSubmissionList_Add() { + // adds an entry to the submission list when a new hash is used + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[0]) + sub := suite.realList.submissions[suite.hashes[0]] + assert.Equal(suite.T(), suite.hashes[0], sub.Hash) + assert.WithinDuration(suite.T(), sub.SubmittedAt, time.Now(), 1*time.Second) + + // drop the send side of the channel by casting to listener + var l Listener = suite.listeners[0] + assert.Equal(suite.T(), l, sub.Listeners[0]) + +} + +func (suite *SubmissionListTestSuite) TestSubmissionList_AddListener() { + // adds an listener to an existing entry when a hash is used with a new listener + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[0]) + sub := suite.realList.submissions[suite.hashes[0]] + st := sub.SubmittedAt + <-time.After(20 * time.Millisecond) + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[1]) + + // increases the size of the listener + assert.Equal(suite.T(), 2, len(sub.Listeners)) + // doesn't update the submitted at time + assert.Equal(suite.T(), true, st == sub.SubmittedAt) + + // Panics when the listener is not buffered + // panics when the listener is not buffered + assert.Panics(suite.T(), func() { + suite.list.Add(suite.ctx, suite.hashes[0], make(Listener)) + }) + + // errors when the provided hash is not 64-bytes + err := suite.list.Add(suite.ctx, "123", suite.listeners[0]) + assert.NotNil(suite.T(), err) +} + +func (suite *SubmissionListTestSuite) TestSubmissionList_Finish() { + + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[0]) + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[1]) + r := Result{Err: errors.New("test error")} + suite.list.Finish(suite.ctx, suite.hashes[0], r) + + // Wries to every listener + r1, ok1 := <-suite.listeners[0] + + assert.Equal(suite.T(), r, r1) + assert.True(suite.T(), ok1) + + r2, ok2 := <-suite.listeners[1] + assert.Equal(suite.T(), r, r2) + assert.True(suite.T(), ok2) + + // Removes the entry + _, ok := suite.realList.submissions[suite.hashes[0]] + assert.False(suite.T(), ok) + + // Closes every ledger + _, _ = <-suite.listeners[0] + _, more := <-suite.listeners[0] + assert.False(suite.T(), more) + + _, _ = <-suite.listeners[1] + _, more = <-suite.listeners[1] + assert.False(suite.T(), more) + + // works when no one is waiting for the result + err := suite.list.Finish(suite.ctx, suite.hashes[0], r) + assert.Nil(suite.T(), err) +} + +func (suite *SubmissionListTestSuite) TestSubmissionList_Clean() { + + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[0]) + <-time.After(200 * time.Millisecond) + suite.list.Add(suite.ctx, suite.hashes[1], suite.listeners[1]) + left, err := suite.list.Clean(suite.ctx, 200*time.Millisecond) + + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), 1, left) + + // removes submissions older than the maxAge provided + _, ok := suite.realList.submissions[suite.hashes[0]] + assert.False(suite.T(), ok) + + // leaves submissions that are younger than the maxAge provided + _, ok = suite.realList.submissions[suite.hashes[1]] + assert.True(suite.T(), ok) + + // closes any cleaned listeners + assert.Equal(suite.T(), 1, len(suite.listeners[0])) + <-suite.listeners[0] + select { + case _, stillOpen := <-suite.listeners[0]: + assert.False(suite.T(), stillOpen) + default: + panic("cleaned listener is still open") + } +} + +//Tests that Pending works as expected +func (suite *SubmissionListTestSuite) TestSubmissionList_Pending() { + assert.Equal(suite.T(), 0, len(suite.list.Pending(suite.ctx))) + suite.list.Add(suite.ctx, suite.hashes[0], suite.listeners[0]) + assert.Equal(suite.T(), 1, len(suite.list.Pending(suite.ctx))) + suite.list.Add(suite.ctx, suite.hashes[1], suite.listeners[1]) + assert.Equal(suite.T(), 2, len(suite.list.Pending(suite.ctx))) +} + +func TestSubmissionListTestSuite(t *testing.T) { + suite.Run(t, new(SubmissionListTestSuite)) +} diff --git a/services/horizon/internal/txsub/results.go b/services/horizon/internal/txsub/results.go new file mode 100644 index 0000000000..575261d061 --- /dev/null +++ b/services/horizon/internal/txsub/results.go @@ -0,0 +1,74 @@ +package txsub + +import ( + "context" + "database/sql" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +func txResultByHash(ctx context.Context, db HorizonDB, hash string) (history.Transaction, error) { + // query history database + var hr history.Transaction + err := db.TransactionByHash(ctx, &hr, hash) + if err == nil { + return txResultFromHistory(hr) + } + + if !db.NoRows(err) { + return hr, errors.Wrap(err, "could not lookup transaction by hash") + } + + // if no result was found in either db, return ErrNoResults + return hr, ErrNoResults +} + +func txResultFromHistory(tx history.Transaction) (history.Transaction, error) { + var txResult xdr.TransactionResult + err := xdr.SafeUnmarshalBase64(tx.TxResult, &txResult) + if err == nil { + if !txResult.Successful() { + err = &FailedTransactionError{ + ResultXDR: tx.TxResult, + } + } + } else { + err = errors.Wrap(err, "could not unmarshall transaction result") + } + + return tx, err +} + +// checkTxAlreadyExists uses a repeatable read transaction to look up both transaction results +// and sequence numbers. Without the repeatable read transaction it is possible that the two database +// queries execute on different ledgers. In this case, txsub can mistakenly respond with a bad_seq error +// because the first query occurs when the tx is not yet ingested and the second query occurs when the tx +// is ingested. +func checkTxAlreadyExists(ctx context.Context, db HorizonDB, hash, sourceAddress string) (history.Transaction, uint64, error) { + err := db.BeginTx(&sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }) + if err != nil { + return history.Transaction{}, 0, errors.Wrap(err, "cannot start repeatable read tx") + } + defer db.Rollback() + + tx, err := txResultByHash(ctx, db, hash) + if err == ErrNoResults { + var sequenceNumbers map[string]uint64 + sequenceNumbers, err = db.GetSequenceNumbers(ctx, []string{sourceAddress}) + if err != nil { + return tx, 0, errors.Wrapf(err, "cannot fetch sequence number for %v", sourceAddress) + } + + num, ok := sequenceNumbers[sourceAddress] + if !ok { + return tx, 0, ErrNoAccount + } + return tx, num, ErrNoResults + } + return tx, 0, err +} diff --git a/services/horizon/internal/txsub/results_test.go b/services/horizon/internal/txsub/results_test.go new file mode 100644 index 0000000000..2a04d7ecdd --- /dev/null +++ b/services/horizon/internal/txsub/results_test.go @@ -0,0 +1,52 @@ +package txsub + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestGetIngestedTx(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &history.Q{SessionInterface: tt.HorizonSession()} + hash := "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d" + tx, err := txResultByHash(tt.Ctx, q, hash) + tt.Assert.NoError(err) + tt.Assert.Equal(hash, tx.TransactionHash) +} + +func TestGetIngestedTxHashes(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &history.Q{SessionInterface: tt.HorizonSession()} + hashes := []string{"2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d"} + txs, err := q.TransactionsByHashesSinceLedger(tt.Ctx, hashes, 0) + tt.Assert.NoError(err) + tt.Assert.Equal(hashes[0], txs[0].TransactionHash) +} + +func TestGetMissingTx(t *testing.T) { + tt := test.Start(t) + tt.Scenario("base") + defer tt.Finish() + q := &history.Q{SessionInterface: tt.HorizonSession()} + hash := "adf1efb9fd253f53cbbe6230c131d2af19830328e52b610464652d67d2fb7195" + + _, err := txResultByHash(tt.Ctx, q, hash) + tt.Assert.Equal(ErrNoResults, err) +} + +func TestGetFailedTx(t *testing.T) { + tt := test.Start(t) + tt.Scenario("failed_transactions") + defer tt.Finish() + q := &history.Q{SessionInterface: tt.HorizonSession()} + hash := "aa168f12124b7c196c0adaee7c73a64d37f99428cacb59a91ff389626845e7cf" + + _, err := txResultByHash(tt.Ctx, q, hash) + tt.Assert.Equal("AAAAAAAAAGT/////AAAAAQAAAAAAAAAB/////gAAAAA=", err.(*FailedTransactionError).ResultXDR) +} diff --git a/services/horizon/internal/txsub/sequence/doc.go b/services/horizon/internal/txsub/sequence/doc.go new file mode 100644 index 0000000000..805e571090 --- /dev/null +++ b/services/horizon/internal/txsub/sequence/doc.go @@ -0,0 +1,3 @@ +// Package sequence providers helpers to manage sequence numbers on behalf of horizon clients. +// See Manager for more details on the api. +package sequence diff --git a/services/horizon/internal/txsub/sequence/errors.go b/services/horizon/internal/txsub/sequence/errors.go new file mode 100644 index 0000000000..2a7e1258e5 --- /dev/null +++ b/services/horizon/internal/txsub/sequence/errors.go @@ -0,0 +1,10 @@ +package sequence + +import ( + "errors" +) + +var ( + ErrNoMoreRoom = errors.New("queue full") + ErrBadSequence = errors.New("bad sequence") +) diff --git a/services/horizon/internal/txsub/sequence/manager.go b/services/horizon/internal/txsub/sequence/manager.go new file mode 100644 index 0000000000..356e91301d --- /dev/null +++ b/services/horizon/internal/txsub/sequence/manager.go @@ -0,0 +1,115 @@ +package sequence + +import ( + "fmt" + "strings" + "sync" +) + +// Manager provides a system for tracking the transaction submission queue for +// a set of addresses. Requests to submit at a certain sequence number are +// registered using the Push() method, and as the system is updated with +// account sequence information (through the Update() method) requests are +// notified that they can safely submit to stellar-core. +type Manager struct { + mutex sync.Mutex + MaxSize int + queues map[string]*Queue +} + +// NewManager returns a new manager +func NewManager() *Manager { + return &Manager{ + MaxSize: 1024, //TODO: make MaxSize configurable + queues: map[string]*Queue{}, + } +} + +func (m *Manager) String() string { + m.mutex.Lock() + defer m.mutex.Unlock() + var addys []string + + for addy, q := range m.queues { + addys = append(addys, fmt.Sprintf("%5s:%d", addy, q.nextSequence)) + } + + return "[ " + strings.Join(addys, ",") + " ]" +} + +// Size returns the count of submissions buffered within +// this manager. +func (m *Manager) Size() int { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.size() +} + +func (m *Manager) Addresses() []string { + m.mutex.Lock() + defer m.mutex.Unlock() + addys := make([]string, 0, len(m.queues)) + + for addy := range m.queues { + addys = append(addys, addy) + } + + return addys +} + +// Push registers an intent to submit a transaction for the provided address at +// the provided sequence. A channel is returned that will be written to when +// the requester should attempt the submission. +func (m *Manager) Push(address string, sequence uint64) <-chan error { + m.mutex.Lock() + defer m.mutex.Unlock() + + if m.size() >= m.MaxSize { + return m.getError(ErrNoMoreRoom) + } + + aq, ok := m.queues[address] + if !ok { + aq = NewQueue() + m.queues[address] = aq + } + + return aq.Push(sequence) +} + +// Update notifies the manager of newly loaded account sequence information. The manager uses this information +// to notify requests to submit that they should proceed. See Queue#Update for the actual meat of the logic. +func (m *Manager) Update(updates map[string]uint64) { + m.mutex.Lock() + defer m.mutex.Unlock() + + for address, seq := range updates { + queue, ok := m.queues[address] + if !ok { + continue + } + + queue.Update(seq) + if queue.Size() == 0 { + delete(m.queues, address) + } + } +} + +// size returns the count of submissions buffered within this manager. This +// internal version assumes you have locked the manager previously. +func (m *Manager) size() int { + var result int + for _, q := range m.queues { + result += q.Size() + } + + return result +} + +func (m *Manager) getError(err error) <-chan error { + ch := make(chan error, 1) + ch <- err + close(ch) + return ch +} diff --git a/services/horizon/internal/txsub/sequence/manager_test.go b/services/horizon/internal/txsub/sequence/manager_test.go new file mode 100644 index 0000000000..a08b4ebfcb --- /dev/null +++ b/services/horizon/internal/txsub/sequence/manager_test.go @@ -0,0 +1,55 @@ +package sequence + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +// Test the Push method +func TestManager_Push(t *testing.T) { + mgr := NewManager() + + mgr.Push("1", 2) + mgr.Push("1", 2) + mgr.Push("1", 3) + mgr.Push("2", 2) + + assert.Equal(t, 4, mgr.Size()) + assert.Equal(t, 3, mgr.queues["1"].Size()) + assert.Equal(t, 1, mgr.queues["2"].Size()) +} + +// Test the Update method +func TestManager_Update(t *testing.T) { + mgr := NewManager() + results := []<-chan error{ + mgr.Push("1", 2), + mgr.Push("1", 3), + mgr.Push("2", 2), + } + + mgr.Update(map[string]uint64{ + "1": 1, + "2": 1, + }) + + assert.Equal(t, 1, mgr.Size()) + _, ok := mgr.queues["2"] + assert.False(t, ok) + + assert.Equal(t, nil, <-results[0]) + assert.Equal(t, nil, <-results[2]) + assert.Equal(t, 0, len(results[1])) +} + +// Push until maximum queue size is reached and check that another push results in ErrNoMoreRoom +func TestManager_PushNoMoreRoom(t *testing.T) { + mgr := NewManager() + for i := 0; i < mgr.MaxSize; i++ { + mgr.Push("1", 2) + } + + assert.Equal(t, 1024, mgr.Size()) + assert.Equal(t, ErrNoMoreRoom, <-mgr.Push("1", 2)) +} diff --git a/services/horizon/internal/txsub/sequence/queue.go b/services/horizon/internal/txsub/sequence/queue.go new file mode 100644 index 0000000000..35cdf1b087 --- /dev/null +++ b/services/horizon/internal/txsub/sequence/queue.go @@ -0,0 +1,161 @@ +package sequence + +import ( + "container/heap" + "time" +) + +// Queue manages the submission queue for a single source account. The +// transaction system uses Push to enqueue submissions for given sequence +// numbers. +// +// Queue maintains a priority queue of pending submissions, and when updated +// (via the Update() method) with the current sequence number of the account +// being managed, queued submissions that can be acted upon will be unblocked. +// +type Queue struct { + lastActiveAt time.Time + timeout time.Duration + nextSequence uint64 + queue pqueue +} + +// NewQueue creates a new *Queue +func NewQueue() *Queue { + result := &Queue{ + lastActiveAt: time.Now(), + timeout: 10 * time.Second, + queue: nil, + } + + heap.Init(&result.queue) + + return result +} + +// Size returns the count of currently buffered submissions in the queue. +func (q *Queue) Size() int { + return len(q.queue) +} + +// Push enqueues the intent to submit a transaction at the provided sequence +// number and returns a channel that will emit when it is safe for the client +// to do so. +// +// Push does not perform any triggering (which +// occurs in Update(), even if the current sequence number for this queue is +// the same as the provided sequence, to keep internal complexity much lower. +// Given that, the recommended usage pattern is: +// +// 1. Push the submission onto the queue +// 2. Load the current sequence number for the source account from the DB +// 3. Call Update() with the result from step 2 to trigger the submission if +// possible +func (q *Queue) Push(sequence uint64) <-chan error { + ch := make(chan error, 1) + heap.Push(&q.queue, item{sequence, ch}) + return ch +} + +// Update notifies the queue that the provided sequence number is the latest +// seen value for the account that this queue manages submissions for. +// +// This function is monotonic... calling it with a sequence number lower than +// the latest seen sequence number is a noop. +func (q *Queue) Update(sequence uint64) { + if q.nextSequence <= sequence { + q.nextSequence = sequence + 1 + } + + wasChanged := false + + for { + if q.Size() == 0 { + break + } + + ch, hseq := q.head() + // if the next queued transaction has a sequence higher than the account's + // current sequence, stop removing entries + if hseq > q.nextSequence { + break + } + + // since this entry is unlocked (i.e. it's sequence is the next available + // or in the past we can remove it an mark the queue as changed + q.pop() + wasChanged = true + + if hseq < q.nextSequence { + ch <- ErrBadSequence + close(ch) + } else if hseq == q.nextSequence { + ch <- nil + close(ch) + } + } + + // if we modified the queue, bump the timeout for this queue + if wasChanged { + q.lastActiveAt = time.Now() + return + } + + // if the queue wasn't changed, see if it is too old, clear + // it and make room for other's + if time.Since(q.lastActiveAt) > q.timeout { + for q.Size() > 0 { + ch, _ := q.pop() + ch <- ErrBadSequence + close(ch) + } + } +} + +// helper function for interacting with the priority queue +func (q *Queue) head() (chan error, uint64) { + if len(q.queue) == 0 { + return nil, uint64(0) + } + + return q.queue[0].Chan, q.queue[0].Sequence +} + +// helper function for interacting with the priority queue +func (q *Queue) pop() (chan error, uint64) { + i := heap.Pop(&q.queue).(item) + + return i.Chan, i.Sequence +} + +// item is a element of the priority queue +type item struct { + Sequence uint64 + Chan chan error +} + +// pqueue is a priority queue used by Queue to manage buffered submissions. It +// implements heap.Interface. +type pqueue []item + +func (pq pqueue) Len() int { return len(pq) } + +func (pq pqueue) Less(i, j int) bool { + return pq[i].Sequence < pq[j].Sequence +} + +func (pq pqueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *pqueue) Push(x interface{}) { + *pq = append(*pq, x.(item)) +} + +func (pq *pqueue) Pop() interface{} { + old := *pq + n := len(old) + result := old[n-1] + *pq = old[0 : n-1] + return result +} diff --git a/services/horizon/internal/txsub/sequence/queue_test.go b/services/horizon/internal/txsub/sequence/queue_test.go new file mode 100644 index 0000000000..736fb3d0be --- /dev/null +++ b/services/horizon/internal/txsub/sequence/queue_test.go @@ -0,0 +1,82 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package sequence + +import ( + "context" + "testing" + "time" + + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +type QueueTestSuite struct { + suite.Suite + ctx context.Context + queue *Queue +} + +func (suite *QueueTestSuite) SetupTest() { + suite.ctx = test.Context() + suite.queue = NewQueue() +} + +//Push adds the provided channel on to the priority queue +func (suite *QueueTestSuite) TestQueue_Push() { + ctx := test.Context() + _ = ctx + + assert.Equal(suite.T(), 0, suite.queue.Size()) + + suite.queue.Push(2) + assert.Equal(suite.T(), 1, suite.queue.Size()) + _, s := suite.queue.head() + assert.Equal(suite.T(), uint64(2), s) + + suite.queue.Push(1) + assert.Equal(suite.T(), 2, suite.queue.Size()) + _, s = suite.queue.head() + assert.Equal(suite.T(), uint64(1), s) +} + +// Tests the update method +func (suite *QueueTestSuite) TestQueue_Update() { + // Update removes sequences that are submittable or in the past + results := []<-chan error{ + suite.queue.Push(1), + suite.queue.Push(2), + suite.queue.Push(3), + suite.queue.Push(4), + } + + suite.queue.Update(2) + + // the update above signifies that 2 is the accounts current sequence, + // meaning that 3 is submittable, and so only 4 should still be queued + assert.Equal(suite.T(), 1, suite.queue.Size()) + _, s := suite.queue.head() + assert.Equal(suite.T(), uint64(4), s) + + suite.queue.Update(4) + assert.Equal(suite.T(), 0, suite.queue.Size()) + + assert.Equal(suite.T(), ErrBadSequence, <-results[0]) + assert.Equal(suite.T(), ErrBadSequence, <-results[1]) + assert.Equal(suite.T(), nil, <-results[2]) + assert.Equal(suite.T(), ErrBadSequence, <-results[3]) + + // Update clears the queue if the head has not been released within the time limit + suite.queue.timeout = 1 * time.Millisecond + result := suite.queue.Push(2) + <-time.After(10 * time.Millisecond) + suite.queue.Update(0) + + assert.Equal(suite.T(), 0, suite.queue.Size()) + assert.Equal(suite.T(), ErrBadSequence, <-result) +} + +func TestQueueTestSuite(t *testing.T) { + suite.Run(t, new(QueueTestSuite)) +} diff --git a/services/horizon/internal/txsub/submitter.go b/services/horizon/internal/txsub/submitter.go new file mode 100644 index 0000000000..511d843025 --- /dev/null +++ b/services/horizon/internal/txsub/submitter.go @@ -0,0 +1,69 @@ +package txsub + +import ( + "context" + "net/http" + "time" + + "github.com/stellar/go/clients/stellarcore" + proto "github.com/stellar/go/protocols/stellarcore" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +// NewDefaultSubmitter returns a new, simple Submitter implementation +// that submits directly to the stellar-core at `url` using the http client +// `h`. +func NewDefaultSubmitter(h *http.Client, url string) Submitter { + return &submitter{ + StellarCore: &stellarcore.Client{ + HTTP: h, + URL: url, + }, + Log: log.DefaultLogger.WithField("service", "txsub.submitter"), + } +} + +// submitter is the default implementation for the Submitter interface. It +// submits directly to the configured stellar-core instance using the +// configured http client. +type submitter struct { + StellarCore *stellarcore.Client + Log *log.Entry +} + +// Submit sends the provided envelope to stellar-core and parses the response into +// a SubmissionResult +func (sub *submitter) Submit(ctx context.Context, env string) (result SubmissionResult) { + start := time.Now() + defer func() { + result.Duration = time.Since(start) + sub.Log.Ctx(ctx).WithFields(log.F{ + "err": result.Err, + "duration": result.Duration.Seconds(), + }).Info("Submitter result") + }() + + cresp, err := sub.StellarCore.SubmitTransaction(ctx, env) + if err != nil { + result.Err = errors.Wrap(err, "failed to submit") + return + } + + // interpet response + if cresp.IsException() { + result.Err = errors.Errorf("stellar-core exception: %s", cresp.Exception) + return + } + + switch cresp.Status { + case proto.TXStatusError: + result.Err = &FailedTransactionError{cresp.Error} + case proto.TXStatusPending, proto.TXStatusDuplicate, proto.TXStatusTryAgainLater: + //noop. A nil Err indicates success + default: + result.Err = errors.Errorf("Unrecognized stellar-core status response: %s", cresp.Status) + } + + return +} diff --git a/services/horizon/internal/txsub/submitter_test.go b/services/horizon/internal/txsub/submitter_test.go new file mode 100644 index 0000000000..4406f46fb8 --- /dev/null +++ b/services/horizon/internal/txsub/submitter_test.go @@ -0,0 +1,89 @@ +package txsub + +import ( + "github.com/stretchr/testify/assert" + "net/http" + "testing" + + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestDefaultSubmitter(t *testing.T) { + ctx := test.Context() + // submits to the configured stellar-core instance correctly + server := test.NewStaticMockServer(`{ + "status": "PENDING", + "error": null + }`) + defer server.Close() + + s := NewDefaultSubmitter(http.DefaultClient, server.URL) + sr := s.Submit(ctx, "hello") + assert.Nil(t, sr.Err) + assert.True(t, sr.Duration > 0) + assert.Equal(t, "hello", server.LastRequest.URL.Query().Get("blob")) + + // Succeeds when stellar-core gives the DUPLICATE response. + server = test.NewStaticMockServer(`{ + "status": "DUPLICATE", + "error": null + }`) + defer server.Close() + + s = NewDefaultSubmitter(http.DefaultClient, server.URL) + sr = s.Submit(ctx, "hello") + assert.Nil(t, sr.Err) + + // Errors when the stellar-core url is empty + + s = NewDefaultSubmitter(http.DefaultClient, "") + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + + //errors when the stellar-core url is not parseable + + s = NewDefaultSubmitter(http.DefaultClient, "http://Not a url") + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + + // errors when the stellar-core url is not reachable + s = NewDefaultSubmitter(http.DefaultClient, "http://127.0.0.1:65535") + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + + // errors when the stellar-core returns an unparseable response + server = test.NewStaticMockServer(`{`) + defer server.Close() + + s = NewDefaultSubmitter(http.DefaultClient, server.URL) + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + + // errors when the stellar-core returns an exception response + server = test.NewStaticMockServer(`{"exception": "Invalid XDR"}`) + defer server.Close() + + s = NewDefaultSubmitter(http.DefaultClient, server.URL) + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + assert.Contains(t, sr.Err.Error(), "Invalid XDR") + + // errors when the stellar-core returns an unrecognized status + server = test.NewStaticMockServer(`{"status": "NOTREAL"}`) + defer server.Close() + + s = NewDefaultSubmitter(http.DefaultClient, server.URL) + sr = s.Submit(ctx, "hello") + assert.NotNil(t, sr.Err) + assert.Contains(t, sr.Err.Error(), "NOTREAL") + + // errors when the stellar-core returns an error response + server = test.NewStaticMockServer(`{"status": "ERROR", "error": "1234"}`) + defer server.Close() + + s = NewDefaultSubmitter(http.DefaultClient, server.URL) + sr = s.Submit(ctx, "hello") + assert.IsType(t, &FailedTransactionError{}, sr.Err) + ferr := sr.Err.(*FailedTransactionError) + assert.Equal(t, "1234", ferr.ResultXDR) +} diff --git a/services/horizon/internal/txsub/system.go b/services/horizon/internal/txsub/system.go new file mode 100644 index 0000000000..9b0557d095 --- /dev/null +++ b/services/horizon/internal/txsub/system.go @@ -0,0 +1,450 @@ +package txsub + +import ( + "context" + "database/sql" + "fmt" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/txsub/sequence" + "github.com/stellar/go/support/log" + "github.com/stellar/go/xdr" +) + +type HorizonDB interface { + GetLatestHistoryLedger(ctx context.Context) (uint32, error) + TransactionByHash(ctx context.Context, dest interface{}, hash string) error + TransactionsByHashesSinceLedger(ctx context.Context, hashes []string, sinceLedgerSeq uint32) ([]history.Transaction, error) + GetSequenceNumbers(ctx context.Context, addresses []string) (map[string]uint64, error) + BeginTx(*sql.TxOptions) error + Rollback() error + NoRows(error) bool +} + +// System represents a completely configured transaction submission system. +// Its methods tie together the various pieces used to reliably submit transactions +// to a stellar-core instance. +type System struct { + initializer sync.Once + + tickMutex sync.Mutex + tickInProgress bool + + accountSeqPollInterval time.Duration + + DB func(context.Context) HorizonDB + Pending OpenSubmissionList + Submitter Submitter + SubmissionQueue *sequence.Manager + SubmissionTimeout time.Duration + Log *log.Entry + + Metrics struct { + // SubmissionDuration exposes timing metrics about the rate and latency of + // submissions to stellar-core + SubmissionDuration prometheus.Summary + + // BufferedSubmissionGauge tracks the count of submissions buffered + // behind this system's SubmissionQueue + BufferedSubmissionsGauge prometheus.Gauge + + // OpenSubmissionsGauge tracks the count of "open" submissions (i.e. + // submissions whose transactions haven't been confirmed successful or failed + OpenSubmissionsGauge prometheus.Gauge + + // FailedSubmissionsCounter tracks the rate of failed transactions that have + // been submitted to this process + FailedSubmissionsCounter prometheus.Counter + + // SuccessfulSubmissionsCounter tracks the rate of successful transactions that + // have been submitted to this process + SuccessfulSubmissionsCounter prometheus.Counter + + // V0TransactionsCounter tracks the rate of v0 transaction envelopes that + // have been submitted to this process + V0TransactionsCounter prometheus.Counter + + // V1TransactionsCounter tracks the rate of v1 transaction envelopes that + // have been submitted to this process + V1TransactionsCounter prometheus.Counter + + // FeeBumpTransactionsCounter tracks the rate of fee bump transaction envelopes that + // have been submitted to this process + FeeBumpTransactionsCounter prometheus.Counter + } +} + +// RegisterMetrics registers the prometheus metrics +func (sys *System) RegisterMetrics(registry *prometheus.Registry) { + registry.MustRegister(sys.Metrics.SubmissionDuration) + registry.MustRegister(sys.Metrics.BufferedSubmissionsGauge) + registry.MustRegister(sys.Metrics.OpenSubmissionsGauge) + registry.MustRegister(sys.Metrics.FailedSubmissionsCounter) + registry.MustRegister(sys.Metrics.SuccessfulSubmissionsCounter) + registry.MustRegister(sys.Metrics.V0TransactionsCounter) + registry.MustRegister(sys.Metrics.V1TransactionsCounter) + registry.MustRegister(sys.Metrics.FeeBumpTransactionsCounter) +} + +// Submit submits the provided base64 encoded transaction envelope to the +// network using this submission system. +func (sys *System) Submit( + ctx context.Context, + rawTx string, + envelope xdr.TransactionEnvelope, + hash string, +) (result <-chan Result) { + sys.Init() + response := make(chan Result, 1) + result = response + + db := sys.DB(ctx) + // The database doesn't (yet) store muxed accounts, so we query + // the corresponding AccountId + sourceAccount := envelope.SourceAccount().ToAccountId() + sourceAddress := sourceAccount.Address() + + sys.Log.Ctx(ctx).WithFields(log.F{ + "hash": hash, + "tx_type": envelope.Type.String(), + "tx": rawTx, + }).Info("Processing transaction") + + if envelope.SeqNum() < 0 { + sys.finish(ctx, hash, response, Result{Err: ErrBadSequence}) + return + } + + tx, sequenceNumber, err := checkTxAlreadyExists(ctx, db, hash, sourceAddress) + if err == nil { + sys.Log.Ctx(ctx).WithField("hash", hash).Info("Found submission result in a DB") + sys.finish(ctx, hash, response, Result{Transaction: tx}) + return + } + if err != ErrNoResults { + sys.Log.Ctx(ctx).WithField("hash", hash).Info("Error getting submission result from a DB") + sys.finish(ctx, hash, response, Result{Transaction: tx, Err: err}) + return + } + + // queue the submission and get the channel that will emit when + // submission is valid + seq := sys.SubmissionQueue.Push(sourceAddress, uint64(envelope.SeqNum())) + + // update the submission queue with the source accounts current sequence value + // which will cause the channel returned by Push() to emit if possible. + sys.SubmissionQueue.Update(map[string]uint64{ + sourceAddress: sequenceNumber, + }) + + select { + case err := <-seq: + if err == sequence.ErrBadSequence { + // convert the internal only ErrBadSequence into the FailedTransactionError + err = ErrBadSequence + } + + if err != nil { + sys.finish(ctx, hash, response, Result{Err: err}) + return + } + + sr := sys.submitOnce(ctx, rawTx) + sys.updateTransactionTypeMetrics(envelope) + + // if submission succeeded + if sr.Err == nil { + // add transactions to open list + sys.Pending.Add(ctx, hash, response) + // update the submission queue, allowing the next submission to proceed + sys.SubmissionQueue.Update(map[string]uint64{ + sourceAddress: uint64(envelope.SeqNum()), + }) + return + } + + // any error other than "txBAD_SEQ" is a failure + isBad, err := sr.IsBadSeq() + if err != nil { + sys.finish(ctx, hash, response, Result{Err: err}) + return + } + + if !isBad { + sys.finish(ctx, hash, response, Result{Err: sr.Err}) + return + } + if err = sys.waitUntilAccountSequence(ctx, db, sourceAddress, uint64(envelope.SeqNum())); err != nil { + sys.finish(ctx, hash, response, Result{Err: err}) + return + } + + // If error is txBAD_SEQ, check for the result again + tx, err = txResultByHash(ctx, db, hash) + if err == nil { + // If the found use it as the result + sys.finish(ctx, hash, response, Result{Transaction: tx}) + } else { + // finally, return the bad_seq error if no result was found on 2nd attempt + sys.finish(ctx, hash, response, Result{Err: sr.Err}) + } + + case <-ctx.Done(): + sys.finish(ctx, hash, response, Result{Err: sys.deriveTxSubError(ctx)}) + } + + return +} + +// waitUntilAccountSequence blocks until either the context times out or the sequence number of the +// given source account is greater than or equal to `seq` +func (sys *System) waitUntilAccountSequence(ctx context.Context, db HorizonDB, sourceAddress string, seq uint64) error { + timer := time.NewTimer(sys.accountSeqPollInterval) + defer timer.Stop() + + for { + sequenceNumbers, err := db.GetSequenceNumbers(ctx, []string{sourceAddress}) + if err != nil { + sys.Log.Ctx(ctx). + WithError(err). + WithField("sourceAddress", sourceAddress). + Warn("cannot fetch sequence number") + } else { + num, ok := sequenceNumbers[sourceAddress] + if !ok { + sys.Log.Ctx(ctx). + WithField("sequenceNumbers", sequenceNumbers). + WithField("sourceAddress", sourceAddress). + Warn("missing sequence number for account") + } + if num >= seq { + return nil + } + } + + select { + case <-ctx.Done(): + return sys.deriveTxSubError(ctx) + case <-timer.C: + timer.Reset(sys.accountSeqPollInterval) + } + } +} + +func (sys *System) deriveTxSubError(ctx context.Context) error { + if ctx.Err() == context.Canceled { + return ErrCanceled + } + return ErrTimeout +} + +// Submit submits the provided base64 encoded transaction envelope to the +// network using this submission system. +func (sys *System) submitOnce(ctx context.Context, env string) SubmissionResult { + // submit to stellar-core + sr := sys.Submitter.Submit(ctx, env) + sys.Metrics.SubmissionDuration.Observe(float64(sr.Duration.Seconds())) + + // if received or duplicate, add to the open submissions list + if sr.Err == nil { + sys.Metrics.SuccessfulSubmissionsCounter.Inc() + } else { + sys.Metrics.FailedSubmissionsCounter.Inc() + } + + return sr +} + +func (sys *System) updateTransactionTypeMetrics(envelope xdr.TransactionEnvelope) { + switch envelope.Type { + case xdr.EnvelopeTypeEnvelopeTypeTxV0: + sys.Metrics.V0TransactionsCounter.Inc() + case xdr.EnvelopeTypeEnvelopeTypeTx: + sys.Metrics.V1TransactionsCounter.Inc() + case xdr.EnvelopeTypeEnvelopeTypeTxFeeBump: + sys.Metrics.FeeBumpTransactionsCounter.Inc() + } +} + +// setTickInProgress sets `tickInProgress` to `true` if it's +// `false`. Returns `true` if `tickInProgress` has been switched +// to `true` inside this method and `Tick()` should continue. +func (sys *System) setTickInProgress(ctx context.Context) bool { + sys.tickMutex.Lock() + defer sys.tickMutex.Unlock() + + if sys.tickInProgress { + logger := log.Ctx(ctx) + logger.Info("ticking in progress") + return false + } + + sys.tickInProgress = true + return true +} + +func (sys *System) unsetTickInProgress() { + sys.tickMutex.Lock() + defer sys.tickMutex.Unlock() + sys.tickInProgress = false +} + +// Tick triggers the system to update itself with any new data available. +func (sys *System) Tick(ctx context.Context) { + sys.Init() + logger := log.Ctx(ctx) + + // Make sure Tick is not run concurrently + if !sys.setTickInProgress(ctx) { + return + } + + defer sys.unsetTickInProgress() + + logger. + WithField("queued", sys.SubmissionQueue.String()). + Debug("ticking txsub system") + + db := sys.DB(ctx) + options := &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + } + if err := db.BeginTx(options); err != nil { + logger.WithError(err).Error("could not start repeatable read transaction for txsub tick") + return + } + defer db.Rollback() + + addys := sys.SubmissionQueue.Addresses() + if len(addys) > 0 { + curSeq, err := db.GetSequenceNumbers(ctx, addys) + if err != nil { + logger.WithStack(err).Error(err) + return + } else { + sys.SubmissionQueue.Update(curSeq) + } + } + + pending := sys.Pending.Pending(ctx) + + if len(pending) > 0 { + latestLedger, err := db.GetLatestHistoryLedger(ctx) + if err != nil { + logger.WithError(err).Error("error getting latest history ledger") + return + } + + // In Tick we only check txs in a queue so those which did not have results before Tick + // so we check for them in the last 5 mins of ledgers: 60. + var sinceLedgerSeq int32 = int32(latestLedger) - 60 + if sinceLedgerSeq < 0 { + sinceLedgerSeq = 0 + } + + txs, err := db.TransactionsByHashesSinceLedger(ctx, pending, uint32(sinceLedgerSeq)) + if err != nil && !db.NoRows(err) { + logger.WithError(err).Error("error getting transactions by hashes") + return + } + + txMap := make(map[string]history.Transaction, len(txs)) + for _, tx := range txs { + txMap[tx.TransactionHash] = tx + if tx.InnerTransactionHash.Valid { + txMap[tx.InnerTransactionHash.String] = tx + } + } + + for _, hash := range pending { + tx, found := txMap[hash] + if !found { + continue + } + _, err := txResultFromHistory(tx) + + if err == nil { + logger.WithField("hash", hash).Debug("finishing open submission") + sys.Pending.Finish(ctx, hash, Result{Transaction: tx}) + continue + } + + if _, ok := err.(*FailedTransactionError); ok { + logger.WithField("hash", hash).Debug("finishing open submission") + sys.Pending.Finish(ctx, hash, Result{Transaction: tx, Err: err}) + continue + } + + if err != nil { + logger.WithStack(err).Error(err) + } + } + } + + stillOpen, err := sys.Pending.Clean(ctx, sys.SubmissionTimeout) + if err != nil { + logger.WithStack(err).Error(err) + return + } + + sys.Metrics.OpenSubmissionsGauge.Set(float64(stillOpen)) + sys.Metrics.BufferedSubmissionsGauge.Set(float64(sys.SubmissionQueue.Size())) +} + +// Init initializes `sys` +func (sys *System) Init() { + sys.initializer.Do(func() { + sys.Log = log.DefaultLogger.WithField("service", "txsub.System") + + sys.Metrics.SubmissionDuration = prometheus.NewSummary(prometheus.SummaryOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "submission_duration_seconds", + Help: "submission durations to Stellar-Core, sliding window = 10m", + }) + sys.Metrics.FailedSubmissionsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "failed", + }) + sys.Metrics.SuccessfulSubmissionsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "succeeded", + }) + sys.Metrics.OpenSubmissionsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "open", + }) + sys.Metrics.BufferedSubmissionsGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "buffered", + }) + sys.Metrics.V0TransactionsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "v0", + }) + sys.Metrics.V1TransactionsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "v1", + }) + sys.Metrics.FeeBumpTransactionsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "horizon", Subsystem: "txsub", Name: "feebump", + }) + + sys.accountSeqPollInterval = time.Second + + if sys.SubmissionTimeout == 0 { + // HTTP clients in SDKs usually timeout in 60 seconds. We want SubmissionTimeout + // to be lower than that to make sure that they read the response before the client + // timeout. + // 30 seconds is 6 ledgers (with avg. close time = 5 sec), enough for stellar-core + // to drop the transaction if not added to the ledger and ask client to try again + // by sending a Timeout response. + sys.SubmissionTimeout = 30 * time.Second + } + }) +} + +func (sys *System) finish(ctx context.Context, hash string, response chan<- Result, r Result) { + sys.Log.Ctx(ctx). + WithField("result", fmt.Sprintf("%+v", r)). + WithField("hash", hash). + Info("Submission system result") + response <- r + close(response) +} diff --git a/services/horizon/internal/txsub/system_test.go b/services/horizon/internal/txsub/system_test.go new file mode 100644 index 0000000000..6f01134f39 --- /dev/null +++ b/services/horizon/internal/txsub/system_test.go @@ -0,0 +1,502 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txsub + +import ( + "context" + "database/sql" + "errors" + "testing" + "time" + + "github.com/guregu/null" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/suite" + + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/test" + "github.com/stellar/go/services/horizon/internal/txsub/sequence" + "github.com/stellar/go/xdr" +) + +type SystemTestSuite struct { + suite.Suite + ctx context.Context + submitter *MockSubmitter + db *mockDBQ + system *System + noResults Result + successTx Result + successXDR xdr.TransactionEnvelope + badSeq SubmissionResult + unmuxedSource xdr.AccountId +} + +func (suite *SystemTestSuite) SetupTest() { + suite.ctx = test.Context() + suite.submitter = &MockSubmitter{} + suite.db = &mockDBQ{} + + suite.system = &System{ + Pending: NewDefaultSubmissionList(), + Submitter: suite.submitter, + SubmissionQueue: sequence.NewManager(), + DB: func(ctx context.Context) HorizonDB { + return suite.db + }, + } + + suite.unmuxedSource = xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + source := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *suite.unmuxedSource.Ed25519, + }, + } + + tx := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: source, + Fee: 100, + SeqNum: 1, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + CreateAccountOp: &xdr.CreateAccountOp{ + Destination: xdr.MustAddress("GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"), + StartingBalance: 1000000000, + }, + }, + }, + }, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{86, 252, 5, 247}, + Signature: xdr.Signature{131, 206, 171, 228, 64, 20, 40, 52, 2, 98, 124, 244, 87, 14, 130, 225, 190, 220, 156, 79, 121, 69, 60, 36, 57, 214, 9, 29, 176, 81, 218, 4, 213, 176, 211, 148, 191, 86, 21, 180, 94, 9, 43, 208, 32, 79, 19, 131, 90, 21, 93, 138, 153, 203, 55, 103, 2, 230, 137, 190, 19, 70, 179, 11}, + }, + }, + }, + } + + result := xdr.TransactionResult{ + FeeCharged: 123, + Result: xdr.TransactionResultResult{ + Code: xdr.TransactionResultCodeTxSuccess, + Results: &[]xdr.OperationResult{}, + }, + } + resultXDR, err := xdr.MarshalBase64(result) + suite.Assert().NoError(err) + + suite.noResults = Result{Err: ErrNoResults} + envelopeBase64, _ := xdr.MarshalBase64(tx) + suite.successTx = Result{ + Transaction: history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + TransactionHash: "2374e99349b9ef7dba9a5db3339b78fda8f34777b1af33ba468ad5c0df946d4d", + LedgerSequence: 2, + TxEnvelope: envelopeBase64, + TxResult: resultXDR, + }, + }, + } + assert.NoError(suite.T(), xdr.SafeUnmarshalBase64(suite.successTx.Transaction.TxEnvelope, &suite.successXDR)) + + suite.badSeq = SubmissionResult{ + Err: ErrBadSequence, + } + + suite.db.On("GetLatestHistoryLedger").Return(uint32(1000), nil).Maybe() +} + +func (suite *SystemTestSuite) TearDownTest() { + t := suite.T() + suite.db.AssertExpectations(t) +} + +// Returns the result provided by the ResultProvider. +func (suite *SystemTestSuite) TestSubmit_Basic() { + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Run(func(args mock.Arguments) { + ptr := args.Get(1).(*history.Transaction) + *ptr = suite.successTx.Transaction + }). + Return(nil).Once() + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.Nil(suite.T(), r.Err) + assert.Equal(suite.T(), suite.successTx, r) + assert.False(suite.T(), suite.submitter.WasSubmittedTo) +} + +func (suite *SystemTestSuite) TestTimeoutDuringSequnceLoop() { + var cancel context.CancelFunc + suite.ctx, cancel = context.WithTimeout(suite.ctx, time.Duration(0)) + defer cancel() + + suite.submitter.R = suite.badSeq + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil) + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.NotNil(suite.T(), r.Err) + assert.Equal(suite.T(), ErrTimeout, r.Err) +} + +func (suite *SystemTestSuite) TestClientDisconnectedDuringSequnceLoop() { + var cancel context.CancelFunc + suite.ctx, cancel = context.WithCancel(suite.ctx) + + suite.submitter.R = suite.badSeq + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Run(func(args mock.Arguments) { + // simulate client disconnecting while looping on sequnce number check + cancel() + suite.ctx.Deadline() + }). + Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil) + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.NotNil(suite.T(), r.Err) + assert.Equal(suite.T(), ErrCanceled, r.Err) +} + +func getMetricValue(metric prometheus.Metric) *dto.Metric { + value := &dto.Metric{} + err := metric.Write(value) + if err != nil { + panic(err) + } + return value +} + +// Returns the error from submission if no result is found by hash and the suite.submitter returns an error. +func (suite *SystemTestSuite) TestSubmit_NotFoundError() { + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Once() + + suite.submitter.R.Err = errors.New("busted for some reason") + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.NotNil(suite.T(), r.Err) + assert.True(suite.T(), suite.submitter.WasSubmittedTo) + assert.Equal(suite.T(), float64(0), getMetricValue(suite.system.Metrics.SuccessfulSubmissionsCounter).GetCounter().GetValue()) + assert.Equal(suite.T(), float64(1), getMetricValue(suite.system.Metrics.FailedSubmissionsCounter).GetCounter().GetValue()) + assert.Equal(suite.T(), uint64(1), getMetricValue(suite.system.Metrics.SubmissionDuration).GetSummary().GetSampleCount()) +} + +// If the error is bad_seq and the result at the transaction's sequence number is for the same hash, return result. +func (suite *SystemTestSuite) TestSubmit_BadSeq() { + suite.submitter.R = suite.badSeq + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 1}, nil). + Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Run(func(args mock.Arguments) { + ptr := args.Get(1).(*history.Transaction) + *ptr = suite.successTx.Transaction + }). + Return(nil).Once() + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.Nil(suite.T(), r.Err) + assert.Equal(suite.T(), suite.successTx, r) + assert.True(suite.T(), suite.submitter.WasSubmittedTo) +} + +// If error is bad_seq and no result is found, return error. +func (suite *SystemTestSuite) TestSubmit_BadSeqNotFound() { + suite.submitter.R = suite.badSeq + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Twice() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Twice() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Times(3) + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 1}, nil). + Once() + + // set poll interval to 1ms so we don't need to wait 3 seconds for the test to complete + suite.system.Init() + suite.system.accountSeqPollInterval = time.Millisecond + + r := <-suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + + assert.NotNil(suite.T(), r.Err) + assert.True(suite.T(), suite.submitter.WasSubmittedTo) +} + +// If no result found and no error submitting, add to open transaction list. +func (suite *SystemTestSuite) TestSubmit_OpenTransactionList() { + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, suite.successTx.Transaction.TransactionHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{suite.unmuxedSource.Address()}). + Return(map[string]uint64{suite.unmuxedSource.Address(): 0}, nil). + Once() + + suite.system.Submit( + suite.ctx, + suite.successTx.Transaction.TxEnvelope, + suite.successXDR, + suite.successTx.Transaction.TransactionHash, + ) + pending := suite.system.Pending.Pending(suite.ctx) + assert.Equal(suite.T(), 1, len(pending)) + assert.Equal(suite.T(), suite.successTx.Transaction.TransactionHash, pending[0]) + assert.Equal(suite.T(), float64(1), getMetricValue(suite.system.Metrics.SuccessfulSubmissionsCounter).GetCounter().GetValue()) + assert.Equal(suite.T(), float64(0), getMetricValue(suite.system.Metrics.FailedSubmissionsCounter).GetCounter().GetValue()) + assert.Equal(suite.T(), uint64(1), getMetricValue(suite.system.Metrics.SubmissionDuration).GetSummary().GetSampleCount()) +} + +// Tick should be a no-op if there are no open submissions. +func (suite *SystemTestSuite) TestTick_Noop() { + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + + suite.system.Tick(suite.ctx) +} + +// TestTick_Deadlock is a regression test for Tick() deadlock: if for any reason +// call to Tick() takes more time and another Tick() is called. +// This test starts two go routines: both calling Tick() but the call to +// `sys.Sequences.Get(addys)` is delayed by 1 second. It allows to simulate two +// calls to `Tick()` executed at the same time. +func (suite *SystemTestSuite) TestTick_Deadlock() { + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + + // Start first Tick + suite.system.SubmissionQueue.Push("address", 0) + suite.db.On("GetSequenceNumbers", suite.ctx, []string{"address"}). + Return(map[string]uint64{}, nil). + Run(func(args mock.Arguments) { + // Start second tick + suite.system.Tick(suite.ctx) + }). + Once() + + suite.system.Tick(suite.ctx) +} + +// Test that Tick finishes any available transactions, +func (suite *SystemTestSuite) TestTick_FinishesTransactions() { + l := make(chan Result, 1) + suite.system.Pending.Add(suite.ctx, suite.successTx.Transaction.TransactionHash, l) + + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionsByHashesSinceLedger", suite.ctx, []string{suite.successTx.Transaction.TransactionHash}, uint32(940)). + Return(nil, sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + + suite.system.Tick(suite.ctx) + + assert.Equal(suite.T(), 0, len(l)) + assert.Equal(suite.T(), 1, len(suite.system.Pending.Pending(suite.ctx))) + + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionsByHashesSinceLedger", suite.ctx, []string{suite.successTx.Transaction.TransactionHash}, uint32(940)). + Return([]history.Transaction{suite.successTx.Transaction}, nil).Once() + + suite.system.Tick(suite.ctx) + + assert.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), 0, len(suite.system.Pending.Pending(suite.ctx))) +} + +func (suite *SystemTestSuite) TestTickFinishFeeBumpTransaction() { + innerTxEnvelope := "AAAAAAMDAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYwAAAAAAAABhAAAAAQAAAAAAAAACAAAAAAAAAAQAAAAAAAAAAQAAAAAAAAALAAAAAAAAAGIAAAAAAAAAAQICAgIAAAADFBQUAA==" + innerHash := "e98869bba8bce08c10b78406202127f3888c25454cd37b02600862452751f526" + var parsedInnerTx xdr.TransactionEnvelope + assert.NoError(suite.T(), xdr.SafeUnmarshalBase64(innerTxEnvelope, &parsedInnerTx)) + + feeBumpTx := Result{ + Transaction: history.Transaction{ + TransactionWithoutLedger: history.TransactionWithoutLedger{ + Account: "GABQGAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2MX", + TransactionHash: "3dfef7d7226995b504f2827cc63d45ad41e9687bb0a8abcf08ba755fedca0352", + LedgerSequence: 123, + TxEnvelope: "AAAABQAAAAACAgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMIAAAAAgAAAAADAwMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGMAAAAAAAAAYQAAAAEAAAAAAAAAAgAAAAAAAAAEAAAAAAAAAAEAAAAAAAAACwAAAAAAAABiAAAAAAAAAAECAgICAAAAAxQUFAAAAAAAAAAAAQMDAwMAAAADHh4eAA==", + TxResult: "AAAAAAAAAHsAAAAB6Yhpu6i84IwQt4QGICEn84iMJUVM03sCYAhiRSdR9SYAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAsAAAAAAAAAAAAAAAA=", + TxMeta: "AAAAAQAAAAAAAAAA", + InnerTransactionHash: null.StringFrom("e98869bba8bce08c10b78406202127f3888c25454cd37b02600862452751f526"), + }, + }, + } + + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionByHash", suite.ctx, mock.Anything, innerHash). + Return(sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + suite.db.On("GetSequenceNumbers", suite.ctx, []string{"GABQGAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2MX"}). + Return(map[string]uint64{"GABQGAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB2MX": 96}, nil). + Once() + + l := suite.system.Submit(suite.ctx, innerTxEnvelope, parsedInnerTx, innerHash) + assert.Equal(suite.T(), 0, len(l)) + assert.Equal(suite.T(), 1, len(suite.system.Pending.Pending(suite.ctx))) + + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionsByHashesSinceLedger", suite.ctx, []string{innerHash}, uint32(940)). + Return([]history.Transaction{feeBumpTx.Transaction}, nil).Once() + + suite.system.Tick(suite.ctx) + + assert.Equal(suite.T(), 1, len(l)) + assert.Equal(suite.T(), 0, len(suite.system.Pending.Pending(suite.ctx))) + r := <-l + assert.NoError(suite.T(), r.Err) + assert.Equal(suite.T(), feeBumpTx, r) +} + +// Test that Tick removes old submissions that have timed out. +func (suite *SystemTestSuite) TestTick_RemovesStaleSubmissions() { + l := make(chan Result, 1) + suite.system.SubmissionTimeout = 100 * time.Millisecond + suite.system.Pending.Add(suite.ctx, suite.successTx.Transaction.TransactionHash, l) + <-time.After(101 * time.Millisecond) + + suite.db.On("BeginTx", &sql.TxOptions{ + Isolation: sql.LevelRepeatableRead, + ReadOnly: true, + }).Return(nil).Once() + suite.db.On("Rollback").Return(nil).Once() + suite.db.On("TransactionsByHashesSinceLedger", suite.ctx, []string{suite.successTx.Transaction.TransactionHash}, uint32(940)). + Return(nil, sql.ErrNoRows).Once() + suite.db.On("NoRows", sql.ErrNoRows).Return(true).Once() + + suite.system.Tick(suite.ctx) + + assert.Equal(suite.T(), 0, len(suite.system.Pending.Pending(suite.ctx))) + assert.Equal(suite.T(), 1, len(l)) + <-l + select { + case _, stillOpen := <-l: + assert.False(suite.T(), stillOpen) + default: + panic("could not read from listener") + } +} + +func TestSystemTestSuite(t *testing.T) { + suite.Run(t, new(SystemTestSuite)) +} diff --git a/services/horizon/internal/utf8/main.go b/services/horizon/internal/utf8/main.go new file mode 100644 index 0000000000..cf2ccfd3bc --- /dev/null +++ b/services/horizon/internal/utf8/main.go @@ -0,0 +1,34 @@ +//Package utf8 contains utilities for working with utf8 data. +package utf8 + +import ( + "bytes" + "unicode/utf8" +) + +// Scrub ensures that a given string is valid utf-8, replacing any invalid byte +// sequences with the utf-8 replacement character. +func Scrub(in string) string { + + // First check validity using the stdlib, returning if the string is already + // valid + if utf8.ValidString(in) { + return in + } + + left := []byte(in) + var result bytes.Buffer + + for len(left) > 0 { + r, n := utf8.DecodeRune(left) + + _, err := result.WriteRune(r) + if err != nil { + panic(err) + } + + left = left[n:] + } + + return result.String() +} diff --git a/services/horizon/internal/utf8/main_test.go b/services/horizon/internal/utf8/main_test.go new file mode 100644 index 0000000000..743aa9dbab --- /dev/null +++ b/services/horizon/internal/utf8/main_test.go @@ -0,0 +1,16 @@ +package utf8 + +import ( + "testing" + + "github.com/stellar/go/services/horizon/internal/test" +) + +func TestScrub(t *testing.T) { + tt := test.Start(t) + defer tt.Finish() + + tt.Assert.Equal("scott", Scrub("scott")) + tt.Assert.Equal("scΓΆtt", Scrub("scΓΆtt")) + tt.Assert.Equal("οΏ½(", Scrub(string([]byte{0xC3, 0x28}))) +} diff --git a/services/horizon/main.go b/services/horizon/main.go new file mode 100644 index 0000000000..9365d51148 --- /dev/null +++ b/services/horizon/main.go @@ -0,0 +1,18 @@ +package main + +import ( + "fmt" + "os" + + "github.com/stellar/go/services/horizon/cmd" +) + +func main() { + err := cmd.Execute() + if e, ok := err.(cmd.ErrExitCode); ok { + os.Exit(int(e)) + } else if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/services/keystore/CHANGELOG.md b/services/keystore/CHANGELOG.md new file mode 100644 index 0000000000..9bac4c2a2b --- /dev/null +++ b/services/keystore/CHANGELOG.md @@ -0,0 +1,20 @@ +## Unreleased + +- Dropped support for Go 1.12. +* Dropped support for Go 1.13. + +## [v1.2.0] - 2019-11-20 + +- Add `ReadTimeout` to Keystore HTTP server configuration to fix potential DoS vector. +- Dropped support for Go 1.10, 1.11. + +## [v1.1.0] - 2019-08-21 + +Keystore has new interface for managing keys blob. +Please refer to the [spec](https://github.com/stellar/go/blob/bcaf3d55229df822b155442633adc230294588b4/services/keystore/spec.md) for the new changes. +Note that the data you previously store will be wiped out. Be sure to save the +data that's important to you before upgrading to this version. + +## [v1.0.0] - 2019-06-18 + +Initial release of the keystore. diff --git a/services/keystore/Makefile b/services/keystore/Makefile new file mode 100644 index 0000000000..d091c04836 --- /dev/null +++ b/services/keystore/Makefile @@ -0,0 +1,16 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/keystore:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f services/keystore/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../ && \ + $(SUDO) docker push $(TAG) diff --git a/services/keystore/README.md b/services/keystore/README.md new file mode 100644 index 0000000000..b03cd31e76 --- /dev/null +++ b/services/keystore/README.md @@ -0,0 +1 @@ +# Keystore diff --git a/services/keystore/api.go b/services/keystore/api.go new file mode 100644 index 0000000000..f4915f6334 --- /dev/null +++ b/services/keystore/api.go @@ -0,0 +1,191 @@ +package keystore + +import ( + "database/sql" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "strings" + "time" + + "github.com/rs/cors" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/health" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/support/render/problem" +) + +func init() { + // register errors + problem.RegisterError(httpjson.ErrBadRequest, probInvalidRequest) + problem.RegisterError(sql.ErrNoRows, problem.NotFound) + + // register service host as an empty string + problem.RegisterHost("") +} + +func (s *Service) wrapMiddleware(handler http.Handler) http.Handler { + handler = authHandler(handler, s.authenticator) + handler = recoverHandler(handler) + handler = corsHandler(handler) + return handler +} + +func ServeMux(s *Service) http.Handler { + mux := http.NewServeMux() + mux.Handle("/keys", s.wrapMiddleware(s.keysHTTPMethodHandler())) + mux.Handle("/health", s.wrapMiddleware(health.PassHandler{})) + return mux +} + +func (s *Service) keysHTTPMethodHandler() http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + switch req.Method { + case http.MethodGet: + jsonHandler(s.getKeys).ServeHTTP(rw, req) + + case http.MethodPut: + jsonHandler(s.putKeys).ServeHTTP(rw, req) + + case http.MethodDelete: + jsonHandler(s.deleteKeys).ServeHTTP(rw, req) + + default: + problem.Render(req.Context(), rw, probMethodNotAllowed) + } + }) +} + +type authResponse struct { + UserID string `json:"userID"` +} + +var forwardHeaders = map[string]struct{}{ + "authorization": struct{}{}, + "cookie": struct{}{}, +} + +func authHandler(next http.Handler, authenticator *Authenticator) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if authenticator == nil { + // to facilitate API testing + next.ServeHTTP(rw, req.WithContext(withUserID(req.Context(), "test-user"))) + return + } + + var ( + proxyReq *http.Request + err error + clientIP string + ) + ctx := req.Context() + // set a 5-second timeout + client := http.Client{Timeout: time.Duration(5 * time.Second)} + + switch authenticator.APIType { + case REST: + proxyReq, err = http.NewRequest("GET", authenticator.URL, nil) + if err != nil { + problem.Render(ctx, rw, errors.Wrap(err, "creating the auth proxy request")) + return + } + + case GraphQL: + // to be implemented later + default: + problem.Render(ctx, rw, probNotAuthorized) + return + } + + proxyReq.Header = make(http.Header) + for k, v := range req.Header { + // http headers are case-insensitive + // https://www.ietf.org/rfc/rfc2616.txt + if _, ok := forwardHeaders[strings.ToLower(k)]; ok { + proxyReq.Header[k] = v + } + } + + if clientIP, _, err = net.SplitHostPort(req.RemoteAddr); err == nil { + proxyReq.Header.Set("X-Forwarded-For", clientIP) + } + proxyReq.Header.Set("Accept-Encoding", "identity") + + resp, err := client.Do(proxyReq) + if err != nil { + problem.Render(ctx, rw, errors.Wrap(err, "sending the auth proxy request")) + return + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + problem.Render(ctx, rw, probNotAuthorized) + return + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + problem.Render(ctx, rw, errors.Wrap(err, "reading the auth response")) + return + } + + var authResp authResponse + err = json.Unmarshal(body, &authResp) + if err != nil { + log.Ctx(ctx).Infof("Response body as a plain string: %s\n. Response body as a hex dump string: %s\n", string(body), hex.Dump(body)) + problem.Render(ctx, rw, errors.Wrap(err, "unmarshaling the auth response")) + return + } + if authResp.UserID == "" { + problem.Render(ctx, rw, probNotAuthorized) + return + } + + next.ServeHTTP(rw, req.WithContext(withUserID(ctx, authResp.UserID))) + }) +} + +func jsonHandler(f interface{}) http.Handler { + h, err := httpjson.ReqBodyHandler(f, httpjson.JSON) + if err != nil { + panic(err) + } + return h +} + +func recoverHandler(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + defer func() { + r := recover() + if r == nil { + return + } + err, ok := r.(error) + if !ok { + err = fmt.Errorf("panic: %v", r) + } + if errors.Cause(err) == http.ErrAbortHandler { + panic(err) + } + + ctx := req.Context() + log.Ctx(ctx).WithStack(err).Error(err) + problem.Render(ctx, rw, err) + }() + + next.ServeHTTP(rw, req) + }) +} + +func corsHandler(next http.Handler) http.Handler { + cors := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"*"}, + AllowedMethods: []string{"GET", "PUT", "POST", "PATCH", "DELETE", "HEAD", "OPTIONS"}, + }) + return cors.Handler(next) +} diff --git a/services/keystore/api_test.go b/services/keystore/api_test.go new file mode 100644 index 0000000000..34b945dce9 --- /dev/null +++ b/services/keystore/api_test.go @@ -0,0 +1,201 @@ +package keystore + +import ( + "bytes" + "context" + "database/sql" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/httpjson" +) + +func TestPutKeysAPI(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, `{"userID":"test-user"}`) + })) + defer ts.Close() + + h := ServeMux(&Service{ + db: conn.DB, + authenticator: &Authenticator{ + URL: ts.URL, + APIType: REST, + }, + }) + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + body, err := json.Marshal(putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + req := httptest.NewRequest("PUT", "/keys", bytes.NewReader([]byte(body))) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("PUT %s responded with %s, want %s", req.URL, http.StatusText(rr.Code), http.StatusText(http.StatusOK)) + } + got := &encryptedKeysData{} + json.Unmarshal(rr.Body.Bytes(), &got) + if got == nil { + t.Error("Expected to receive an encryptedKeysData response but did not") + } + + verifyKeysBlob(t, got.KeysBlob, keysBlob) + + if got.CreatedAt.Before(time.Now().Add(-time.Hour)) { + t.Errorf("got CreatedAt=%s, want CreatedAt within the last hour", got.CreatedAt) + } +} + +func TestGetKeysAPI(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, `{"userID":"test-user"}`) + })) + defer ts.Close() + + ctx := withUserID(context.Background(), "test-user") + s := &Service{ + db: conn.DB, + authenticator: &Authenticator{ + URL: ts.URL, + APIType: REST, + }, + } + h := ServeMux(s) + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + _, err := json.Marshal(putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + _, err = s.putKeys(ctx, putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + req := httptest.NewRequest("GET", "/keys", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("GET %s responded with %s, want %s", req.URL, http.StatusText(rr.Code), http.StatusText(http.StatusOK)) + } + got := &encryptedKeysData{} + json.Unmarshal(rr.Body.Bytes(), &got) + if got == nil { + t.Error("Expected to receive an encryptedKeysData response but did not") + } + + verifyKeysBlob(t, got.KeysBlob, keysBlob) + + if got.CreatedAt.Before(time.Now().Add(-time.Hour)) { + t.Errorf("got CreatedAt=%s, want CreatedAt within the last hour", got.CreatedAt) + } + + err = s.deleteKeys(ctx) + if err != nil { + t.Fatal(err) + } + + rr = httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusNotFound { + t.Errorf("GET %s responded with %s, want %s", req.URL, http.StatusText(rr.Code), http.StatusText(http.StatusNotFound)) + } +} + +func TestDeleteKeysAPI(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, `{"userID":"test-user"}`) + })) + defer ts.Close() + + s := &Service{ + db: conn.DB, + authenticator: &Authenticator{ + URL: ts.URL, + APIType: REST, + }, + } + h := ServeMux(s) + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + _, err := json.Marshal(putKeysRequest{ + KeysBlob: keysBlob, + }) + if err != nil { + t.Fatal(err) + } + + ctx := withUserID(context.Background(), "test-user") + _, err = s.putKeys(ctx, putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + req := httptest.NewRequest("DELETE", "/keys", nil) + rr := httptest.NewRecorder() + h.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("DELETE %s responded with %s, want %s", req.URL, http.StatusText(rr.Code), http.StatusText(http.StatusOK)) + } + + got := rr.Body.Bytes() + dr, _ := json.MarshalIndent(httpjson.DefaultResponse, "", " ") + if !bytes.Equal(got, dr) { + t.Errorf("got: %s, expected: %s", got, dr) + } + + _, err = s.getKeys(ctx) + if errors.Cause(err) != sql.ErrNoRows { + t.Errorf("expect the keys blob of the user %s to be deleted", userID(ctx)) + } +} diff --git a/services/keystore/attachments/2019-07-10-keystore-auth.png b/services/keystore/attachments/2019-07-10-keystore-auth.png new file mode 100644 index 0000000000..959775a428 Binary files /dev/null and b/services/keystore/attachments/2019-07-10-keystore-auth.png differ diff --git a/services/keystore/cmd/keystored/README.md b/services/keystore/cmd/keystored/README.md new file mode 100644 index 0000000000..c472a6ecac --- /dev/null +++ b/services/keystore/cmd/keystored/README.md @@ -0,0 +1,99 @@ +# Run keystored in development + +Generate the certificate and the private key pair for localhost +if you haven't done so: + +```sh +cd github.com/stellar/go/exp/services/keystore +./tls/regen.sh +``` +Simply choose all the default options. This will create three files: +tls/server.crt, tls/server.key, and tls/server.csr. + +We will only be using `server.crt` and `server.key`. + +## Install the `keystored` command: + +```sh +cd github.com/stellar/go/exp/services/keystore +go install ./cmd/keystored +``` + +## Set up `keystore` Postgres database locally: + +```sh +createdb keystore +keystored migrate up +``` + +You can undo the last migration by running +```sh +keystored migrate down +``` + +You can redo the last migration by running +```sh +keystored migrate redo +``` + +You can check whether there is any unapplied migrations by running +```sh +keystored migrate status +``` + +## Run `keystored` in development with authentication disabled: + +You might want to set the `KEYSTORE_LISTENER_PORT` environment variable +for the keystored listener. Otherwise, the default value is port 8000. + +```sh +keystored -tls-cert=tls/server.crt -tls-key=tls/server.key -auth=false serve +``` + +Before you have a valid endpoint that can handle your auth token and return a +user id in plaintext, you might want to disable authentication for testing. + +## Run `keystored` in production: + +There are five environment variables used for starting keystored: +`KEYSTORE_DATABASE_URL`, `DB_MAX_IDLE_CONNS`, `DB_MAX_OPEN_CONNS`, +`KEYSTORE_AUTHFORWARDING_URL`, and `KEYSTORE_LISTENER_PORT`. +* `KEYSTORE_DATABASE_URL` is required. +* `KEYSTORE_AUTHFORWARDING_URL` is required if authentication is turned on. +* `DB_MAX_IDLE_CONNS` and `DB_MAX_OPEN_CONNS` are default to 5. +* `KEYSTORE_LISTENER_PORT` is default to 8000. + +```sh +keystored -tls-cert=PATH_TO_TLS_CERT -tls-key=PATH_TO_TLS_KEY serve +``` + +To disable authentication, you can simply add the `-auth=false` flag. + +## Build docker image: + +To build docker image: +```sh +cd github.com/stellar/go/services/keystore +make docker-build +``` + +to use custom tag: +```sh +cd github.com/stellar/go/services/keystore +TAG=my-registry.example.com/keystore:dev make docker-build +``` + +to push image built using the command above: +```sh +cd github.com/stellar/go/services/keystore +TAG=my-registry.example.com/keystore:dev make docker-push +``` + +## Logging + +You can put the log messages in a designated file with the `-log-file` flag as well as determine +the log severity level with the `-log-level` flag: + +```sh +keystored -log-file=PATH_TO_YOUR_LOG_FILE -log-level=[debug|info|warn|error] serve +``` diff --git a/services/keystore/cmd/keystored/dev.go b/services/keystore/cmd/keystored/dev.go new file mode 100644 index 0000000000..1d87de9ff1 --- /dev/null +++ b/services/keystore/cmd/keystored/dev.go @@ -0,0 +1,19 @@ +//go:build !aws +// +build !aws + +package main + +import ( + "github.com/stellar/go/services/keystore" + "github.com/stellar/go/support/env" +) + +func getConfig() *keystore.Config { + return &keystore.Config{ + DBURL: env.String("KEYSTORE_DATABASE_URL", "postgres:///keystore?sslmode=disable"), + MaxIdleDBConns: env.Int("DB_MAX_IDLE_CONNS", 5), + MaxOpenDBConns: env.Int("DB_MAX_OPEN_CONNS", 5), + AUTHURL: env.String("KEYSTORE_AUTHFORWARDING_URL", ""), + ListenerPort: env.Int("KEYSTORE_LISTENER_PORT", 8000), + } +} diff --git a/services/keystore/cmd/keystored/main.go b/services/keystore/cmd/keystored/main.go new file mode 100644 index 0000000000..c5daf9d692 --- /dev/null +++ b/services/keystore/cmd/keystored/main.go @@ -0,0 +1,272 @@ +package main + +import ( + "context" + "crypto/tls" + "database/sql" + "flag" + "fmt" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" + + migrate "github.com/rubenv/sql-migrate" + "github.com/sirupsen/logrus" + "github.com/stellar/go/services/keystore" + "github.com/stellar/go/support/log" + + _ "github.com/lib/pq" +) + +var keystoreMigrations = &migrate.FileMigrationSource{ + Dir: "migrations", +} + +const dbDriverName = "postgres" + +func main() { + ctx := context.Background() + tlsCert := flag.String("tls-cert", "", "TLS certificate file path") + tlsKey := flag.String("tls-key", "", "TLS private key file path") + logFilePath := flag.String("log-file", "", "Log file file path") + logLevel := flag.String("log-level", "info", "Log level used by logrus (debug, info, warn, error)") + auth := flag.Bool("auth", true, "Enable authentication") + apiType := flag.String("api-type", "REST", "Auth Forwarding API Type") + + flag.Parse() + if len(flag.Args()) < 1 { + fmt.Fprintln(os.Stderr, "too few arguments") + os.Exit(1) + } + + if (*tlsCert == "" && *tlsKey != "") || (*tlsCert != "" && *tlsKey == "") { + fmt.Fprintln(os.Stderr, "TLS cert and TLS key have to be presented together") + os.Exit(1) + } + + if *logFilePath != "" { + logFile, err := os.OpenFile(*logFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to open file to log: %v\n", err) + os.Exit(1) + } + + log.DefaultLogger.SetOutput(logFile) + + ll, err := logrus.ParseLevel(*logLevel) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not parse log-level: %v\n", err) + os.Exit(1) + } + log.DefaultLogger.SetLevel(ll) + } + + cfg := getConfig() + if cfg.ListenerPort < 0 { + fmt.Fprintf(os.Stderr, "Port number %d cannot be negative\n", cfg.ListenerPort) + os.Exit(1) + } + + db, err := sql.Open(dbDriverName, cfg.DBURL) + if err != nil { + fmt.Fprintf(os.Stderr, "error opening database: %v\n", err) + os.Exit(1) + } + db.SetMaxOpenConns(cfg.MaxOpenDBConns) + db.SetMaxIdleConns(cfg.MaxIdleDBConns) + + err = db.Ping() + if err != nil { + fmt.Fprintf(os.Stderr, "error accessing database: %v\n", err) + os.Exit(1) + } + + cmd := flag.Arg(0) + switch cmd { + case "serve": + if *auth { + if cfg.AUTHURL == "" { + fmt.Fprintln(os.Stderr, "Auth is enabled but auth forwarding URL is not set") + os.Exit(1) + } + if _, err := url.Parse(cfg.AUTHURL); err != nil { + fmt.Fprintln(os.Stderr, "Invalid auth forwarding URL") + os.Exit(1) + } + } + + aType := strings.ToUpper(*apiType) + if aType != keystore.REST && aType != keystore.GraphQL { + fmt.Fprintln(os.Stderr, `Auth forwarding endpoint type can only be either "REST" or "GRAPHQL"`) + os.Exit(1) + } + + addr := ":" + strconv.Itoa(cfg.ListenerPort) + var authenticator *keystore.Authenticator + if *auth { + authenticator = &keystore.Authenticator{ + URL: cfg.AUTHURL, + APIType: aType, + } + } + + server := &http.Server{ + Addr: addr, + Handler: keystore.ServeMux(keystore.NewService(ctx, db, authenticator)), + ReadTimeout: 5 * time.Second, + } + + listener, err := net.Listen("tcp", addr) + if err != nil { + fmt.Fprintf(os.Stderr, "error listening: %v\n", err) + os.Exit(1) + } + + listener = tcpKeepAliveListener{listener.(*net.TCPListener)} + if *tlsCert != "" { + cer, err := tls.LoadX509KeyPair(*tlsCert, *tlsKey) + if err != nil { + fmt.Fprintf(os.Stderr, "error parsing TLS keypair: %v\n", err) + os.Exit(1) + } + + listener = tls.NewListener(listener, &tls.Config{Certificates: []tls.Certificate{cer}}) + } + + go func() { + err := server.Serve(listener) + if err != nil { + panic(err) + } + }() + fmt.Fprintln(os.Stdout, "Server listening at https://localhost"+addr) + + // block forever without using any resources so this process won't quit while + // the goroutine containing ListenAndServe is still working + select {} + + case "migrate": + migrateCmd := flag.Arg(1) + switch migrateCmd { + case "up": + n, err := migrate.Exec(db, dbDriverName, keystoreMigrations, migrate.Up) + if err != nil { + fmt.Fprintf(os.Stderr, "error applying up migrations: %v\n", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stdout, "Applied %d up migrations!\n", n) + + case "down": + n, err := migrate.ExecMax(db, dbDriverName, keystoreMigrations, migrate.Down, 1) + if err != nil { + fmt.Fprintf(os.Stderr, "error applying down migrations: %v\n", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stdout, "Applied %d down migration!\n", n) + + case "redo": + migrations, _, err := migrate.PlanMigration(db, dbDriverName, keystoreMigrations, migrate.Down, 1) + if err != nil { + fmt.Fprintf(os.Stderr, "error getting migration data: %v\n", err) + os.Exit(1) + } + + if len(migrations) == 0 { + fmt.Fprintln(os.Stdout, "Nothing to do!") + os.Exit(0) + } + + _, err = migrate.ExecMax(db, dbDriverName, keystoreMigrations, migrate.Down, 1) + if err != nil { + fmt.Fprintf(os.Stderr, "error applying the last down migration: %v\n", err) + os.Exit(1) + } + + _, err = migrate.ExecMax(db, dbDriverName, keystoreMigrations, migrate.Up, 1) + if err != nil { + fmt.Fprintf(os.Stderr, "error applying the last up migration: %v\n", err) + os.Exit(1) + } + + fmt.Fprintf(os.Stdout, "Reapplied migration %s.\n", migrations[0].Id) + + case "status": + unappliedMigrations := getUnappliedMigrations(db) + if len(unappliedMigrations) > 0 { + fmt.Fprintf(os.Stdout, "There are %d unapplied migrations:\n", len(unappliedMigrations)) + for _, id := range unappliedMigrations { + fmt.Fprintln(os.Stdout, id) + } + } else { + fmt.Fprintln(os.Stdout, "All migrations have been applied!") + } + + default: + fmt.Fprintf(os.Stderr, "unrecognized migration command: %q\n", migrateCmd) + os.Exit(1) + } + + default: + fmt.Fprintf(os.Stderr, "unrecognized command: %q\n", cmd) + os.Exit(1) + } +} + +// https://github.com/golang/go/blob/c5cf6624076a644906aa7ec5c91c4e01ccd375d3/src/net/http/server.go#L3272-L3288 +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (net.Conn, error) { + tc, err := ln.AcceptTCP() + if err != nil { + return nil, err + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +func getUnappliedMigrations(db *sql.DB) []string { + migrations, err := keystoreMigrations.FindMigrations() + if err != nil { + fmt.Fprintf(os.Stderr, "error getting keystore migrations: %v\n", err) + os.Exit(1) + } + + records, err := migrate.GetMigrationRecords(db, dbDriverName) + if err != nil { + fmt.Fprintf(os.Stderr, "error getting keystore migrations records: %v\n", err) + os.Exit(1) + } + + unappliedMigrations := make(map[string]struct{}) + for _, m := range migrations { + unappliedMigrations[m.Id] = struct{}{} + } + + for _, r := range records { + if _, ok := unappliedMigrations[r.Id]; !ok { + fmt.Fprintf(os.Stdout, "Could not find migration file: %v\n", r.Id) + continue + } + + delete(unappliedMigrations, r.Id) + } + + result := make([]string, 0, len(unappliedMigrations)) + for id := range unappliedMigrations { + result = append(result, id) + } + + sort.Strings(result) + + return result +} diff --git a/services/keystore/context.go b/services/keystore/context.go new file mode 100644 index 0000000000..bdcfbd36fb --- /dev/null +++ b/services/keystore/context.go @@ -0,0 +1,16 @@ +package keystore + +import "context" + +type contextKey int + +const userKey contextKey = iota + +func userID(ctx context.Context) string { + uid, _ := ctx.Value(userKey).(string) + return uid +} + +func withUserID(ctx context.Context, userID string) context.Context { + return context.WithValue(ctx, userKey, userID) +} diff --git a/services/keystore/db_test.go b/services/keystore/db_test.go new file mode 100644 index 0000000000..c2e6ca1a7a --- /dev/null +++ b/services/keystore/db_test.go @@ -0,0 +1,26 @@ +package keystore + +import ( + "testing" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" +) + +//TODO: creating a DB for every single test is inefficient. Maybe we can +//improve our dbtest package so that we can just get a transaction. +func openKeystoreDB(t *testing.T) *dbtest.DB { + db := dbtest.Postgres(t) + migrations := &migrate.FileMigrationSource{ + Dir: "migrations", + } + + conn := db.Open() + defer conn.Close() + + _, err := migrate.Exec(conn.DB, "postgres", migrations, migrate.Up) + if err != nil { + t.Fatal(err) + } + return db +} diff --git a/services/keystore/docker/Dockerfile b/services/keystore/docker/Dockerfile new file mode 100644 index 0000000000..7da6e1be33 --- /dev/null +++ b/services/keystore/docker/Dockerfile @@ -0,0 +1,15 @@ +FROM golang:1.17 as build + +ADD . /src/keystore +WORKDIR /src/keystore +RUN go build -o /bin/keystored ./services/keystore/cmd/keystored + + +FROM ubuntu:18.04 + +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates +ADD ./services/keystore/migrations/ /app/migrations/ +COPY --from=build /bin/keystored /app/ +EXPOSE 8000 +ENTRYPOINT ["/app/keystored"] +CMD ["serve"] diff --git a/services/keystore/keys.go b/services/keystore/keys.go new file mode 100644 index 0000000000..9da6606659 --- /dev/null +++ b/services/keystore/keys.go @@ -0,0 +1,135 @@ +package keystore + +import ( + "context" + "encoding/base64" + "encoding/json" + "time" + + "github.com/lib/pq" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" +) + +type encryptedKeysData struct { + KeysBlob string `json:"keysBlob"` + CreatedAt time.Time `json:"createdAt"` + ModifiedAt *time.Time `json:"modifiedAt,omitempty"` +} + +type encryptedKeyData struct { + ID string `json:"id"` + Salt string `json:"salt"` + EncrypterName string `json:"encrypterName"` + EncryptedBlob string `json:"encryptedBlob"` +} + +type putKeysRequest struct { + KeysBlob string `json:"keysBlob"` +} + +func (s *Service) putKeys(ctx context.Context, in putKeysRequest) (*encryptedKeysData, error) { + userID := userID(ctx) + if userID == "" { + return nil, probNotAuthorized + } + + if in.KeysBlob == "" { + return nil, problem.MakeInvalidFieldProblem("keysBlob", errRequiredField) + } + + keysData, err := base64.RawURLEncoding.DecodeString(in.KeysBlob) + if err != nil { + // TODO: we need to implement a helper function in the + // support/error package for keeping the stack trace from err + // and substitude the root error for the one we want for better + // debugging experience. + // Thowing away the original err makes it harder for debugging. + return nil, probInvalidKeysBlob + } + + var encryptedKeys []encryptedKeyData + err = json.Unmarshal(keysData, &encryptedKeys) + if err != nil { + return nil, probInvalidKeysBlob + } + + for _, ek := range encryptedKeys { + if ek.Salt == "" { + return nil, problem.MakeInvalidFieldProblem("keysBlob", errors.New("salt is required for all the encrypted key data")) + } + if ek.EncrypterName == "" { + return nil, problem.MakeInvalidFieldProblem("keysBlob", errors.New("encrypterName is required for all the encrypted key data")) + } + if ek.EncryptedBlob == "" { + return nil, problem.MakeInvalidFieldProblem("keysBlob", errors.New("encryptedBlob is required for all the encrypted key data")) + } + if ek.ID == "" { + return nil, problem.MakeInvalidFieldProblem("keysBlob", errors.New("id is required for all the encrypted key data")) + } + } + + q := ` + INSERT INTO encrypted_keys (user_id, encrypted_keys_data) + VALUES ($1, $2) + ON CONFLICT (user_id) DO UPDATE SET encrypted_keys_data = excluded.encrypted_keys_data, modified_at = NOW() + RETURNING encrypted_keys_data, created_at, modified_at + ` + var ( + keysBlob []byte + out encryptedKeysData + modifiedAt pq.NullTime + ) + err = s.db.QueryRowContext(ctx, q, userID, keysData).Scan(&keysBlob, &out.CreatedAt, &modifiedAt) + if err != nil { + return nil, errors.Wrap(err, "storing keys blob") + } + + out.KeysBlob = base64.RawURLEncoding.EncodeToString(keysBlob) + if modifiedAt.Valid { + out.ModifiedAt = &modifiedAt.Time + } + return &out, nil +} + +func (s *Service) getKeys(ctx context.Context) (*encryptedKeysData, error) { + userID := userID(ctx) + if userID == "" { + return nil, probNotAuthorized + } + + q := ` + SELECT encrypted_keys_data, created_at, modified_at + FROM encrypted_keys + WHERE user_id = $1 + ` + var ( + keysBlob []byte + out encryptedKeysData + modifiedAt pq.NullTime + ) + err := s.db.QueryRowContext(ctx, q, userID).Scan(&keysBlob, &out.CreatedAt, &modifiedAt) + if err != nil { + return nil, errors.Wrap(err, "getting keys blob") + } + + out.KeysBlob = base64.RawURLEncoding.EncodeToString(keysBlob) + if modifiedAt.Valid { + out.ModifiedAt = &modifiedAt.Time + } + return &out, nil +} + +func (s *Service) deleteKeys(ctx context.Context) error { + userID := userID(ctx) + if userID == "" { + return probNotAuthorized + } + + q := ` + DELETE FROM encrypted_keys + WHERE user_id = $1 + ` + _, err := s.db.ExecContext(ctx, q, userID) + return errors.Wrap(err, "deleting keys blob") +} diff --git a/services/keystore/keys_test.go b/services/keystore/keys_test.go new file mode 100644 index 0000000000..b54f291350 --- /dev/null +++ b/services/keystore/keys_test.go @@ -0,0 +1,137 @@ +package keystore + +import ( + "context" + "database/sql" + "encoding/base64" + "encoding/json" + "reflect" + "testing" + "time" + + "github.com/stellar/go/support/errors" +) + +func TestPutKeys(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ctx := withUserID(context.Background(), "test-user") + s := &Service{conn.DB, nil} + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + + got, err := s.putKeys(ctx, putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + verifyKeysBlob(t, got.KeysBlob, keysBlob) + + if got.CreatedAt.Before(time.Now().Add(-time.Hour)) { + t.Errorf("got CreatedAt=%s, want CreatedAt within the last hour", got.CreatedAt) + } +} + +func TestGetKeys(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ctx := withUserID(context.Background(), "test-user") + s := &Service{conn.DB, nil} + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + + _, err := s.putKeys(ctx, putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + got, err := s.getKeys(ctx) + if err != nil { + t.Fatal(err) + } + + verifyKeysBlob(t, got.KeysBlob, keysBlob) + + if got.CreatedAt.Before(time.Now().Add(-time.Hour)) { + t.Errorf("got CreatedAt=%s, want CreatedAt within the last hour", got.CreatedAt) + } +} + +func TestDeleteKeys(t *testing.T) { + db := openKeystoreDB(t) + defer db.Close() // drop test db + + conn := db.Open() + defer conn.Close() // close db connection + + ctx := withUserID(context.Background(), "test-user") + s := &Service{conn.DB, nil} + + blob := `[{ + "id": "test-id", + "salt": "test-salt", + "encrypterName": "test-encrypter-name", + "encryptedBlob": "test-encryptedblob" + }]` + keysBlob := base64.RawURLEncoding.EncodeToString([]byte(blob)) + + _, err := s.putKeys(ctx, putKeysRequest{KeysBlob: keysBlob}) + if err != nil { + t.Fatal(err) + } + + err = s.deleteKeys(ctx) + if err != nil { + t.Fatal(err) + } + + _, err = s.getKeys(ctx) + if errors.Cause(err) != sql.ErrNoRows { + t.Errorf("expect the keys blob of the user %s to be deleted", userID(ctx)) + } +} + +func verifyKeysBlob(t *testing.T, gotKeysBlob, inKeysBlob string) { + var gotEncryptedKeys, inEncryptedKeys []encryptedKeyData + gotKeysData, err := base64.RawURLEncoding.DecodeString(gotKeysBlob) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(gotKeysData, &gotEncryptedKeys) + if err != nil { + t.Fatal(err) + } + + inKeysData, err := base64.RawURLEncoding.DecodeString(inKeysBlob) + if err != nil { + t.Fatal(err) + } + err = json.Unmarshal(inKeysData, &inEncryptedKeys) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(gotEncryptedKeys, inEncryptedKeys) { + t.Errorf("got keys: %v, want keys: %v\n", gotEncryptedKeys, inEncryptedKeys) + } +} diff --git a/services/keystore/migrations/2019-05-23.0.initial-migrations.sql b/services/keystore/migrations/2019-05-23.0.initial-migrations.sql new file mode 100644 index 0000000000..c4c4317e17 --- /dev/null +++ b/services/keystore/migrations/2019-05-23.0.initial-migrations.sql @@ -0,0 +1,14 @@ +-- +migrate Up + +CREATE TABLE public.encrypted_keys ( + user_id text NOT NULL PRIMARY KEY, + encrypted_keys_data bytea NOT NULL, + salt text NOT NULL, + encrypter_name text NOT NULL, + created_at timestamp with time zone NOT NULL DEFAULT NOW(), + modified_at timestamp with time zone +); + +-- +migrate Down + +DROP TABLE public.encrypted_keys; diff --git a/services/keystore/migrations/2019-08-20.0.update-encrypted-keys.sql b/services/keystore/migrations/2019-08-20.0.update-encrypted-keys.sql new file mode 100644 index 0000000000..83afc1cd89 --- /dev/null +++ b/services/keystore/migrations/2019-08-20.0.update-encrypted-keys.sql @@ -0,0 +1,21 @@ +-- +migrate Up + +ALTER TABLE public.encrypted_keys + DROP COLUMN salt, + DROP COLUMN encrypter_name, + DROP COLUMN encrypted_keys_data; + +TRUNCATE public.encrypted_keys; + +ALTER TABLE public.encrypted_keys + ADD COLUMN encrypted_keys_data jsonb NOT NULL; + +-- +migrate Down + +ALTER TABLE public.encrypted_keys + DROP COLUMN encrypted_keys_data; + +ALTER TABLE public.encrypted_keys + ADD COLUMN salt text NOT NULL, + ADD COLUMN encrypter_name text NOT NULL, + ADD COLUMN encrypted_keys_data bytea NOT NULL; diff --git a/services/keystore/problems.go b/services/keystore/problems.go new file mode 100644 index 0000000000..4e41693641 --- /dev/null +++ b/services/keystore/problems.go @@ -0,0 +1,44 @@ +package keystore + +import ( + "errors" + "net/http" + + "github.com/stellar/go/support/render/problem" +) + +var errRequiredField = errors.New("field value cannot be empty") + +var ( + probInvalidRequest = problem.P{ + Type: "invalid_request_body", + Title: "Invalid Request Body", + Status: 400, + Detail: "Your request body is invalid.", + } + + probMethodNotAllowed = problem.P{ + Type: "method_not_allowed", + Title: "Method Not Allowed", + Status: http.StatusMethodNotAllowed, + Detail: "This endpoint does not support the request method you used. " + + "The server supports HTTP GET/PUT/DELETE for the /keys endpoint.", + } + + probInvalidKeysBlob = problem.P{ + Type: "invalid_keys_blob", + Title: "Invalid Keys Blob", + Status: 400, + Detail: "The keysBlob in your request body is not a valid base64-URL-encoded string or " + + "the decoded content cannt be mapped to EncryptedKeys type." + + "Please encode the keysBlob in your request body as a base64-URL string properly or " + + "make sure the encoded content matches EncryptedKeys type specified in the spec and try again.", + } + + probNotAuthorized = problem.P{ + Type: "not_authorized", + Title: "Not Authorized", + Status: 401, + Detail: "Your request is not authorized.", + } +) diff --git a/services/keystore/service.go b/services/keystore/service.go new file mode 100644 index 0000000000..e6a47ff3f5 --- /dev/null +++ b/services/keystore/service.go @@ -0,0 +1,36 @@ +package keystore + +import ( + "context" + "database/sql" +) + +const ( + REST = "REST" + GraphQL = "GRAPHQL" +) + +type Config struct { + DBURL string + MaxIdleDBConns int + MaxOpenDBConns int + + AUTHURL string + + ListenerPort int +} + +type Authenticator struct { + URL string + APIType string + //GraphQL related fields will be added later +} + +type Service struct { + db *sql.DB + authenticator *Authenticator +} + +func NewService(ctx context.Context, db *sql.DB, authenticator *Authenticator) *Service { + return &Service{db: db, authenticator: authenticator} +} diff --git a/services/keystore/spec.md b/services/keystore/spec.md new file mode 100644 index 0000000000..15c41366ae --- /dev/null +++ b/services/keystore/spec.md @@ -0,0 +1,306 @@ +## Keystore Spec + +### Problem + +We need a keystore service that supports non-custodial applications. +It will make the process of stellarizing any applications easier as +they don't have to implement the logic to create a stellar account +and handle the encrypted private key themselves. + +It is also intended to be the service that wallet SDK talks to. + +### Authentication + +For simplicity we will have each application spin up their own keystore +server, so there won’t be any routing logic in the keystore server that +directs requests to the correct client server to authenticate. Since we +don’t anticipate a lot of requests to the keystore from each user, we +should be able to tolerate having another round trip for relaying the +auth token to the client server. + + + +Keystore will forward two header fields, *Authorization* and *Cookie*, to the +designated endpoint on the client server with an extra header field +*X-Forwarded-For* specifying the request's origin. At this moment, keystore +forwards incoming requests by using HTTP GET method. We plan on adding the +support for clients who use GraphQL to authenticate in the future. + +Clients are expected to put their auth tokens in one of the request header +fields. For example, those who use a bearer token to authenticate should have an +*Authorization* header in the following format: + +``` +Authorization: Bearer +``` + +As mentioned above, clients will have to configure a API endpoint on their +servers used for authentication when booting up the keystore. For those who +choose to autheticate via a REST endpoint, keystore expects to receive a +response in the following json format: + +```json +{ + "userID": "some-user-id" +} +``` + +Requests that the keystore is not able to derive a userID from will +receive the following error: + +*not_authorized:* +```json +{ + "type": "not_authorized", + "title": "Not Authorized", + "status": 401, + "detail": "The request is not authorized." +} +``` + +### Raw Key Data + +*RawKeyData Object:* + +```typescript +interface RawKeyData { + keyType: string; + publicKey: string; + privateKey: string; + path?: string; + extra?: any; +} +``` + +### Encrypted Key Data + +*EncryptedKeysData Object:* + +```typescript +interface EncryptedKeyData { + id: string; + encrypterName: string; + salt: string; + encryptedBlob: string; +} +``` + +Clients will encrypt each `RawKeyData` they want to store on the keystore with +a salt based on the encrypter they use. Clients should assign the resulting +base64-encoded string to the field `encryptedBlob` in the `EncryptedKeyData`. +Please refer to this [encrypt function](https://github.com/stellar/js-stellar-wallets/blob/4a667171df4b22ba9cd15576d022f3e88f3951ff/src/helpers/ScryptEncryption.ts#L71-L108) in our wallet sdk for more details. + +### Encrypted Keys + +```typescript +type EncryptedKeys = EncryptedKeyData[] +``` + +Clients will have to convert `EncryptedKeys` as a base64 URL encoded string +before sending it to the keystore. + +We support three different kinds of HTTP methods to manipulate keys: + +### Encrypted Keys Data + +```typescript +interface EncryptedKeysData { + keysBlob: string; + creationTime: number; + modifiedTime: number; +} +``` + +Note that keysBlob has one global creation time and modified time even though +there could be multiple keys in the blob. + +### PUT /keys + +Put Keys Request: + +```typescript +interface PutKeysRequest { + keysBlob: string; +} +``` + +where the value of the `keysBlob` field is `base64_url_encode(EncryptedKeys)`. + +Put Keys Response: + +```typescript +type PutKeysResponse = EncryptedKeysData; +``` + +
Errors + +*bad_request:* +```json +{ + "keysBlob": "", +} +``` +```json +{ + "type": "bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way.", + "extras": { + "invalid_field": "keysBlob", + "reason": "field value cannot be empty" + } +} +``` +
+ +*bad_request:* +```json +{ + "keysBlob": "some-encrypted-key-data-with-no-salt", +} +``` +```json +{ + "type": "bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way.", + "extras": { + "invalid_field": "keysBlob", + "reason": "salt is required for all the encrypted key data" + } +} +``` +
+ +*bad_request:* +```json +{ + "keysBlob": "some-encrypted-key-data-with-no-encryptername", +} +``` +```json +{ + "type": "bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way.", + "extras": { + "invalid_field": "keysBlob", + "reason": "encrypterName is required for all the encrypted key data" + } +} +``` +
+ +*bad_request:* +```json +{ + "keysBlob": "some-encrypted-key-data-with-no-encryptedblob", +} +``` +```json +{ + "type": "bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way.", + "extras": { + "invalid_field": "keysBlob", + "reason": "encryptedBlob is required for all the encrypted key data" + } +} +``` +
+ +*bad_request:* +```json +{ + "keysBlob": "some-encrypted-key-data-with-no-id", +} +``` +```json +{ + "type": "bad_request", + "title": "Bad Request", + "status": 400, + "detail": "The request you sent was invalid in some way.", + "extras": { + "invalid_field": "keysBlob", + "reason": "id is required for all the encrypted key data" + } +} +``` +
+ +*invalid_keys_blob:* +```json +{ + "keysBlob": "some-badly-encoded-blob", +} +``` +```json +{ + "type": "invalid_keys_blob", + "title": "Invalid Keys Blob", + "status": 400, + "detail": "The keysBlob in your request body is not a valid base64-URL-encoded string or + the decoded content cannt be mapped to EncryptedKeys type. Please encode the + keysBlob in your request body as a base64-URL string properly or make sure the + encoded content matches EncryptedKeys type specified in the spec and try again." +} +``` +
+ +### GET /keys + +Get Keys Request: + +This endpoint will return the keys blob corresponding to the auth token +in the request header, if the token is valid. This endpoint does not take +any parameter. + +Get Keys Response: + +```typescript +type GetKeysResponse = EncryptedKeysData; +``` +
Errors + +*not_found:* + +The keystore cannot find any keys assocaited with the derived userID. +```json +{ + "type": "not_found", + "title": "Resourse Missing", + "status": 404, + "detail": "The resource at the url requested was not found. This + usually occurs for one of two reasons: The url requested is not valid, + or no data in our database could be found with the parameters + provided." +} +``` +
+ +### DELETE /keys + +Delete Keys Request: + +This endpoint will delete the keys blob corresponding to the auth token +in the request header, if the token is valid. This endpoint does not take any +parameter. + +Delete Keys Response: + +*Success:* + +```typescript +interface Success { + message: "ok"; +} +``` + +
Errors +
diff --git a/services/keystore/tls/localhost.conf b/services/keystore/tls/localhost.conf new file mode 100644 index 0000000000..54762ecf43 --- /dev/null +++ b/services/keystore/tls/localhost.conf @@ -0,0 +1,17 @@ +[ req ] +distinguished_name = req_distinguished_name + +[ req_distinguished_name ] +C = US +C_default = US +ST = New York +ST_default = New York +L = New York +L_default = New York +O = Stellar Development Foundation +O_default = Stellar Development Foundation +OU = Engineering +OU_default = Engineering +CN = localhost:8443 +CN_default = localhost:8443 +emailAddress_default = diff --git a/services/keystore/tls/regen.sh b/services/keystore/tls/regen.sh new file mode 100755 index 0000000000..6027f42a05 --- /dev/null +++ b/services/keystore/tls/regen.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +pushd $DIR + +openssl genrsa -des3 -passout pass:x -out new.pass.key 2048 +openssl rsa -passin pass:x -in new.pass.key -out new.key +rm new.pass.key +openssl req -new -key new.key -out new.csr -config localhost.conf +openssl x509 -req -days 365 -in new.csr -signkey new.key -out new.crt + +mv new.csr server.csr +mv new.crt server.crt +mv new.key server.key diff --git a/services/regulated-assets-approval-server/.gitignore b/services/regulated-assets-approval-server/.gitignore new file mode 100644 index 0000000000..4cb512ec1f --- /dev/null +++ b/services/regulated-assets-approval-server/.gitignore @@ -0,0 +1 @@ +/.env \ No newline at end of file diff --git a/services/regulated-assets-approval-server/CHANGELOG.md b/services/regulated-assets-approval-server/CHANGELOG.md new file mode 100644 index 0000000000..5728aba3da --- /dev/null +++ b/services/regulated-assets-approval-server/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +All notable changes to this project will be documented in this file. This +project adheres to [Semantic Versioning](http://semver.org/). + +As this project is pre 1.0, breaking changes may happen for minor version bumps. +A breaking change will get clearly notified in this log. + +## Unreleased + +Initial release. diff --git a/services/regulated-assets-approval-server/Makefile b/services/regulated-assets-approval-server/Makefile new file mode 100644 index 0000000000..a9e83306c6 --- /dev/null +++ b/services/regulated-assets-approval-server/Makefile @@ -0,0 +1,16 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/regulated-assets-approval-server:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f services/regulated-assets-approval-server/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../ && \ + $(SUDO) docker push $(TAG) diff --git a/services/regulated-assets-approval-server/README.md b/services/regulated-assets-approval-server/README.md new file mode 100644 index 0000000000..717a914a43 --- /dev/null +++ b/services/regulated-assets-approval-server/README.md @@ -0,0 +1,367 @@ +# regulated-assets-approval-server + +```sh +Status: supports SEP-8 transactions revision with a simplified rule: +- only revises transactions containing a single operation of type payment. +- payments whose amount does not meet the configured threshold are considered compliant and revised according to the SEP-8 specification. +- payments with an amount exceeding the threshold need further action. +- transactions already compliant with SEP-8 that don't need to be revised will be signed and returned with the "success" SEP-8 status. + +Note: SEP-8 states the service should be able to handle offers in addition to payments, but we're not supporting that at the moment. +``` + +This is a [SEP-8] Approval Server reference implementation based on SEP-8 v1.7.1 +intended for **testing only**. It is being conceived to: + +1. Be used as an example of how regulated assets transactions can be validated + and revised by an anchor. +2. Serve as a demo server where wallets can test and validate their SEP-8 + implementation. + +## Table of Contents + +* [regulated\-assets\-approval\-server](#regulated-assets-approval-server) + * [Table of Contents](#table-of-contents) + * [Usage](#usage) + * [Usage: Configure Issuer](#usage-configure-issuer) + * [Usage: Migrate](#usage-migrate) + * [Migration files](#migration-files) + * [Usage: Serve](#usage-serve) + * [Account Setup](#account-setup) + * [GET /friendbot?addr=\{stellar\_address\}](#get-friendbotaddrstellar_address) + * [API Spec](#api-spec) + * [POST /tx\-approve](#post-tx-approve) + * [POST /kyc\-status/\{CALLBACK\_ID\}](#post-kyc-statuscallback_id) + * [GET /kyc\-status/\{STELLAR\_ADDRESS\_OR\_CALLBACK\_ID\}](#get-kyc-statusstellar_address_or_callback_id) + * [DELETE /kyc\-status/\{STELLAR\_ADDRESS\}](#delete-kyc-statusstellar_address) + +Created by [gh-md-toc](https://github.com/ekalinin/github-markdown-toc.go) + +## Usage + +``` +$ go install +$ regulated-assets-approval-server --help +SEP-8 Approval Server + +Usage: + regulated-assets-approval-server [command] [flags] + regulated-assets-approval-server [command] + +Available Commands: + configure-issuer Configure the Asset Issuer Account for SEP-8 Regulated Assets + migrate Run migrations on the database + serve Serve the SEP-8 Approval Server + +Use "regulated-assets-approval-server [command] --help" for more information about a command. +``` + +### Usage: Configure Issuer + +``` +$ go install +$ regulated-assets-approval-server configure-issuer --help +Configure the Asset Issuer Account for SEP-8 Regulated Assets + +Usage: + regulated-assets-approval-server configure-issuer [flags] + +Flags: + --asset-code string The code of the regulated asset (ASSET_CODE) + --base-url string The base url to the server where the asset home domain should be. For instance, "https://test.example.com/" if your desired asset home domain is "test.example.com". (BASE_URL) + --horizon-url string Horizon URL used for looking up account details (HORIZON_URL) (default "https://horizon-testnet.stellar.org/") + --issuer-account-secret string Secret key of the issuer account. (ISSUER_ACCOUNT_SECRET) + --network-passphrase string Network passphrase of the Stellar network transactions should be signed for (NETWORK_PASSPHRASE) (default "Test SDF Network ; September 2015") +``` + +### Usage: Migrate + +``` +$ go install +$ regulated-assets-approval-server migrate --help +Run migrations on the database + +Usage: + regulated-assets-approval-server migrate [up|down] [count] [flags] + +Flags: + --database-url string Database URL (DATABASE_URL) (default "postgres://localhost:5432/?sslmode=disable") +``` + +#### Migration files + +This project builds the migrations into the binary and embeds it into the built +project. If there are any changes to the db schema, generate a new version of +`internal/db/dbmigrate/dbmigrate_generated.go` using the `gogenerate.sh` script +located at the root of the repo. + +```sh +$ ./gogenerate.sh +``` + +### Usage: Serve + +``` +$ go install +$ regulated-assets-approval-server serve --help +Serve the SEP-8 Approval Server + +Usage: + regulated-assets-approval-server serve [flags] + +Flags: + --asset-code string The code of the regulated asset (ASSET_CODE) + --base-url string The base url address to this server (BASE_URL) + --database-url string Database URL (DATABASE_URL) (default "postgres://localhost:5432/?sslmode=disable") + --friendbot-payment-amount int The amount of regulated assets the friendbot will be distributing (FRIENDBOT_PAYMENT_AMOUNT) (default 10000) + --horizon-url string Horizon URL used for looking up account details (HORIZON_URL) (default "https://horizon-testnet.stellar.org/") + --issuer-account-secret string Secret key of the issuer account. (ISSUER_ACCOUNT_SECRET) + --kyc-required-payment-amount-threshold string The amount threshold when KYC is required, may contain decimals and is greater than 0 (KYC_REQUIRED_PAYMENT_AMOUNT_THRESHOLD) (default "500") + --network-passphrase string Network passphrase of the Stellar network transactions should be signed for (NETWORK_PASSPHRASE) (default "Test SDF Network ; September 2015") + --port int Port to listen and serve on (PORT) (default 8000) +``` + +## Account Setup + +In order to properly use this server for regulated assets, the account whose +secret was added in `--issuer-account-secret (ISSUER_ACCOUNT_SECRET)` needs to +be configured according with SEP-8 [authorization flags] by setting both +`Authorization Required` and `Authorization Revocable` flags. This allows the +issuer to grant and revoke authorization to transact the asset at will. + +You can use the command [`$ regulated-assets-approval-server +configure-issuer`](#usage-configure-issuer) or [this Stellar Laboratory +link](https://laboratory.stellar.org/#txbuilder?params=eyJhdHRyaWJ1dGVzIjp7ImZlZSI6IjEwMCIsImJhc2VGZWUiOiIxMDAiLCJtaW5GZWUiOiIxMDAifSwiZmVlQnVtcEF0dHJpYnV0ZXMiOnsibWF4RmVlIjoiMTAwIn0sIm9wZXJhdGlvbnMiOlt7ImlkIjowLCJhdHRyaWJ1dGVzIjp7InNldEZsYWdzIjozfSwibmFtZSI6InNldE9wdGlvbnMifV19) +to set those flags. + +After setting up the issuer account you can send some amount of the regulated +asset to a stellar account using the servers friendbot +`friendbot/?addr={stellar_address}` endpoint. The friendbot endpoint is not part +of the SEP-8 Approval Server specification, it's a debug feature that allows +accounts to test sending transactions containing payments with the issuer's +regulated asset, to the server. + +### `GET /friendbot?addr={stellar_address}` + +This endpoint sends a payment of 10,000 (this value is configurable) regulated +assets to the provided `addr`. Please be aware the address must first establish +a trustline to the regulated asset in order to receive that payment. You can use +[this +link](https://laboratory.stellar.org/#txbuilder?params=eyJhdHRyaWJ1dGVzIjp7ImZlZSI6IjEwMCIsImJhc2VGZWUiOiIxMDAiLCJtaW5GZWUiOiIxMDAifSwiZmVlQnVtcEF0dHJpYnV0ZXMiOnsibWF4RmVlIjoiMTAwIn0sIm9wZXJhdGlvbnMiOlt7ImlkIjowLCJhdHRyaWJ1dGVzIjp7ImFzc2V0Ijp7InR5cGUiOiJjcmVkaXRfYWxwaGFudW00IiwiY29kZSI6IiIsImlzc3VlciI6IiJ9fSwibmFtZSI6ImNoYW5nZVRydXN0In1dfQ%3D%3D&network=test) +to do that in Stellar Laboratory. + +## API Spec + +### `POST /tx-approve` + +This is the core [SEP-8] endpoint used to validate and process regulated assets +transactions. Its response will contain one of the following statuses: +[Success], [Revised], [Action Required], or [Rejected]. + +Note: The example responses below have set their `base-url` env var configured +to `"https://example.com"`. + +**Request:** + +```json +{ + "tx": "AAAAAgAAAAA0Nk3++mfFw4Is6OaUJTKe71XNtxdktcjGrPildK84xAAAJxAAAJ3YAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAARllVv+K58Rbwzc/2Ti1IsisLC03udNJblQx2sPLfDygAAAAJNWVVTRAAAAAAAAAAAAAAAqjdTmDnuZm4YrIZ3wQVmVXmWSMO4dLk5dOPzUjWDvIgAAAABKp6IgAAAAAAAAAABdK84xAAAAEACHShDhulyTyvFx9lCU2LjAN9P7g6XqZJ6aNKo/NFb+9awp4pE5soK5cTtahhVzx9RsUcH+FSRmOPu4YEqqBsK" +} +``` + +**Responses:** + +_Success:_ means the transaction has been approved and signed by the issuer +without being revised. For more info read the SEP-8 [Success] section. + +```json +{ + "status": "success", + "message": "Transaction is compliant and signed by the issuer.", + "tx": "AAAAAgAAAAA0Nk3++mfFw4Is6OaUJTKe71XNtxdktcjGrPildK84xAAABdwAAJ3YAAAABwAAAAEAAAAAAAAAAAAAAABgXdapAAAAAAAAAAUAAAABAAAAAKo3U5g57mZuGKyGd8EFZlV5lkjDuHS5OXTj81I1g7yIAAAABwAAAAA0Nk3++mfFw4Is6OaUJTKe71XNtxdktcjGrPildK84xAAAAAJNWVVTRAAAAAAAAAAAAAABAAAAAQAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAcAAAAAEZZVb/iufEW8M3P9k4tSLIrCwtN7nTSW5UMdrDy3w8oAAAACTVlVU0QAAAAAAAAAAAAAAQAAAAAAAAABAAAAABGWVW/4rnxFvDNz/ZOLUiyKwsLTe500luVDHaw8t8PKAAAAAk1ZVVNEAAAAAAAAAAAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAEqnoiAAAAAAQAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAcAAAAAEZZVb/iufEW8M3P9k4tSLIrCwtN7nTSW5UMdrDy3w8oAAAACTVlVU0QAAAAAAAAAAAAAAAAAAAEAAAAAqjdTmDnuZm4YrIZ3wQVmVXmWSMO4dLk5dOPzUjWDvIgAAAAHAAAAADQ2Tf76Z8XDgizo5pQlMp7vVc23F2S1yMas+KV0rzjEAAAAAk1ZVVNEAAAAAAAAAAAAAAAAAAAAAAAAATWDvIgAAABAxXindTDbKTpw9B+1aUdTOTE6CUF610A0ZL+ofBVSlcvHYadc3LfO/L4/V22h2FyHNt2ALwncmlEq+3hpojZDDQ==" +} +``` + +_Revised:_ this response means the transaction was revised to be made compliant, +and signed by the issuer. For more info read the SEP-8 [Revised] section. + +```json +{ + "status": "revised", + "message": "Authorization and deauthorization operations were added.", + "tx": "AAAAAgAAAAA0Nk3++mfFw4Is6OaUJTKe71XNtxdktcjGrPildK84xAAABdwAAJ3YAAAABwAAAAEAAAAAAAAAAAAAAABgXdapAAAAAAAAAAUAAAABAAAAAKo3U5g57mZuGKyGd8EFZlV5lkjDuHS5OXTj81I1g7yIAAAABwAAAAA0Nk3++mfFw4Is6OaUJTKe71XNtxdktcjGrPildK84xAAAAAJNWVVTRAAAAAAAAAAAAAABAAAAAQAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAcAAAAAEZZVb/iufEW8M3P9k4tSLIrCwtN7nTSW5UMdrDy3w8oAAAACTVlVU0QAAAAAAAAAAAAAAQAAAAAAAAABAAAAABGWVW/4rnxFvDNz/ZOLUiyKwsLTe500luVDHaw8t8PKAAAAAk1ZVVNEAAAAAAAAAAAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAEqnoiAAAAAAQAAAACqN1OYOe5mbhishnfBBWZVeZZIw7h0uTl04/NSNYO8iAAAAAcAAAAAEZZVb/iufEW8M3P9k4tSLIrCwtN7nTSW5UMdrDy3w8oAAAACTVlVU0QAAAAAAAAAAAAAAAAAAAEAAAAAqjdTmDnuZm4YrIZ3wQVmVXmWSMO4dLk5dOPzUjWDvIgAAAAHAAAAADQ2Tf76Z8XDgizo5pQlMp7vVc23F2S1yMas+KV0rzjEAAAAAk1ZVVNEAAAAAAAAAAAAAAAAAAAAAAAAATWDvIgAAABAxXindTDbKTpw9B+1aUdTOTE6CUF610A0ZL+ofBVSlcvHYadc3LfO/L4/V22h2FyHNt2ALwncmlEq+3hpojZDDQ==" +} +``` + +_Rejected:_ this response means the transaction is not and couldn't be made +compliant. For more info read the SEP-8 [Rejected] section. + +```json +{ + "status": "rejected", + "error": "There is one or more unauthorized operations in the provided transaction." +} +``` + +_Action Required:_ this response means the user must complete an action before this transaction can +be approved. The approval server will provide a URL that facilitates the action. +Upon completion, the user can resubmit the transaction. For more info read the +SEP-8 [Action Required] section. + +```json +{ + "status": "action_required", + "message": "Payments exceeding 500.00 GOAT needs KYC approval. Please provide an email address.", + "action_url": "https://example.com/kyc-status/cf4fe081-5b38-48b6-86ed-1bcfb7171c7d", + "action_method": "POST", + "action_fields": [ + "email_address" + ] +} +``` + +_Pending:_ this response means the user KYC could not be verified as approved +nor rejected and was marked as "pending". As an arbitrary rule, this server is +marking as "pending" all accounts whose email starts with "y". For more info +read the SEP-8 [Pending] section. + +```json +{ + "status": "pending", + "error": "Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above 500.00 GOAT." +} +``` + +### `POST /kyc-status/{CALLBACK_ID}` + +This endpoint is used for the extra action after `/tx-approve`, as described in +the SEP-8 [Action Required] section. + +Currently an arbitrary criteria is implemented: + +* email addresses starting with "x" will have the KYC automatically denied. +* email addresses starting with "y" will have their KYC marked as pending. +* all other emails will be accepted. + +_Note: you'll need to resubmit your transaction to +[`/tx_approve`](#post-tx-approve) in order to verify if your KYC was approved._ + +**Request:** + +```json +{ + "email_address": "foo@bar.com" +} +``` + +**Response:** + +```json +{ + "result": "no_further_action_required", +} +``` + +After the user has been approved or rejected they can POST their transaction to +[`POST /tx-approve`](#post-tx-approve) for revision. + +If their KYC was rejected they should see a rejection response. +**Response (rejected for emails starting with "x"):** + +```json +{ + "status": "rejected", + "error": "Your KYC was rejected and you're not authorized for operations above 500.00 GOAT." +} +``` + +If their KYC was marked as pending they should see a pending response. +**Response (pending for emails starting with "y"):** + +```json +{ + "status": "pending", + "error": "Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above 500.00 GOAT." +} +``` + +### `GET /kyc-status/{STELLAR_ADDRESS_OR_CALLBACK_ID}` + +Returns the detail of an account that requested KYC, as well some metadata about +its status. + +_Note: This functionality is for test/debugging purposes and it's not +part of the [SEP-8] spec._ + +**Response (pending KYC submission):** + +```json +{ + "stellar_address": "GA2DMTP67JT4LQ4CFTUONFBFGKPO6VONW4LWJNOIY2WPRJLUV44MJZOK", + "callback_id":"e0d9243a-40cf-4baa-9575-913e6c98a12e", + "created_at": "2021-03-26T09:35:06.907293-03:00", +} +``` + +**Response (approved KYC):** + +```json +{ + "stellar_address": "GA2DMTP67JT4LQ4CFTUONFBFGKPO6VONW4LWJNOIY2WPRJLUV44MJZOK", + "callback_id":"e0d9243a-40cf-4baa-9575-913e6c98a12e", + "email_address": "test@test.com", + "created_at": "2021-03-26T09:35:06.907293-03:00", + "kyc_submitted_at": "2021-03-26T14:03:43.314334-03:00", + "approved_at": "2021-03-26T14:03:43.314334-03:00", +} +``` + +**Response (rejected KYC):** + +```json +{ + "stellar_address": "GA2DMTP67JT4LQ4CFTUONFBFGKPO6VONW4LWJNOIY2WPRJLUV44MJZOK", + "callback_id":"e0d9243a-40cf-4baa-9575-913e6c98a12e", + "email_address": "xtest@test.com", + "created_at": "2021-03-26T09:35:06.907293-03:00", + "kyc_submitted_at": "2021-03-26T14:03:43.314334-03:00", + "rejected_at": "2021-03-26T14:03:43.314334-03:00", +} +``` + +**Response (pending KYC):** + +```json +{ + "stellar_address": "GA2DMTP67JT4LQ4CFTUONFBFGKPO6VONW4LWJNOIY2WPRJLUV44MJZOK", + "callback_id":"e0d9243a-40cf-4baa-9575-913e6c98a12e", + "email_address": "ytest@test.com", + "created_at": "2021-03-26T09:35:06.907293-03:00", + "kyc_submitted_at": "2021-03-26T14:03:43.314334-03:00", + "pending_at": "2021-03-26T14:03:43.314334-03:00", +} +``` + +### `DELETE /kyc-status/{STELLAR_ADDRESS}` + +Deletes a stellar account from the list of KYCs. If the stellar address is not +in the database to be deleted the server will return with a `404 - Not Found`. + +_Note: This functionality is for test/debugging purposes and it's not part of +the [SEP-8] spec._ + +**Response:** + +```json +{ + "message": "ok" +} +``` + +[SEP-8]: https://github.com/stellar/stellar-protocol/blob/7c795bb9abc606cd1e34764c4ba07900d58fe26e/ecosystem/sep-0008.md +[authorization flags]: https://github.com/stellar/stellar-protocol/blob/7c795bb9abc606cd1e34764c4ba07900d58fe26e/ecosystem/sep-0008.md#authorization-flags +[Action Required]: https://github.com/stellar/stellar-protocol/blob/7c795bb9abc606cd1e34764c4ba07900d58fe26e/ecosystem/sep-0008.md#action-required +[Rejected]: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md#rejected +[Revised]:https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md#revised +[Success]: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md#success +[Pending]: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0008.md#pending diff --git a/services/regulated-assets-approval-server/cmd/configureissuer.go b/services/regulated-assets-approval-server/cmd/configureissuer.go new file mode 100644 index 0000000000..ae9a94e8bd --- /dev/null +++ b/services/regulated-assets-approval-server/cmd/configureissuer.go @@ -0,0 +1,71 @@ +package cmd + +import ( + "go/types" + + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/network" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/configureissuer" + "github.com/stellar/go/support/config" +) + +type ConfigureIssuer struct{} + +func (c *ConfigureIssuer) Command() *cobra.Command { + opts := configureissuer.Options{} + configOpts := config.ConfigOptions{ + { + Name: "asset-code", + Usage: "The code of the regulated asset", + OptType: types.String, + ConfigKey: &opts.AssetCode, + Required: true, + }, + { + Name: "base-url", + Usage: "The base url to the server where the asset home domain should be. For instance, \"https://test.example.com/\" if your desired asset home domain is \"test.example.com\".", + OptType: types.String, + ConfigKey: &opts.BaseURL, + Required: true, + }, + { + Name: "horizon-url", + Usage: "Horizon URL used for looking up account details", + OptType: types.String, + ConfigKey: &opts.HorizonURL, + FlagDefault: horizonclient.DefaultTestNetClient.HorizonURL, + Required: true, + }, + { + Name: "issuer-account-secret", + Usage: "Secret key of the issuer account.", + OptType: types.String, + ConfigKey: &opts.IssuerAccountSecret, + Required: true, + }, + { + Name: "network-passphrase", + Usage: "Network passphrase of the Stellar network transactions should be signed for", + OptType: types.String, + ConfigKey: &opts.NetworkPassphrase, + FlagDefault: network.TestNetworkPassphrase, + Required: true, + }, + } + cmd := &cobra.Command{ + Use: "configure-issuer", + Short: "Configure the Asset Issuer Account for SEP-8 Regulated Assets", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + c.Run(opts) + }, + } + configOpts.Init(cmd) + return cmd +} + +func (c *ConfigureIssuer) Run(opts configureissuer.Options) { + configureissuer.Setup(opts) +} diff --git a/services/regulated-assets-approval-server/cmd/migrate.go b/services/regulated-assets-approval-server/cmd/migrate.go new file mode 100644 index 0000000000..c6801678a1 --- /dev/null +++ b/services/regulated-assets-approval-server/cmd/migrate.go @@ -0,0 +1,103 @@ +package cmd + +import ( + "go/types" + "strconv" + "strings" + + migrate "github.com/rubenv/sql-migrate" + "github.com/spf13/cobra" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbmigrate" + "github.com/stellar/go/support/config" + "github.com/stellar/go/support/log" +) + +type MigrateCommand struct { + DatabaseURL string +} + +func (c *MigrateCommand) Command() *cobra.Command { + configOpts := config.ConfigOptions{ + { + Name: "database-url", + Usage: "Database URL", + OptType: types.String, + ConfigKey: &c.DatabaseURL, + FlagDefault: "postgres://localhost:5432/?sslmode=disable", + Required: true, + }, + } + cmd := &cobra.Command{ + Use: "migrate [up|down] [count]", + Short: "Run migrations on the database", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + configOpts.Require() + configOpts.SetValues() + }, + Run: func(cmd *cobra.Command, args []string) { + c.Migrate(cmd, args) + }, + } + configOpts.Init(cmd) + + return cmd +} + +func (c *MigrateCommand) Migrate(cmd *cobra.Command, args []string) { + db, err := db.Open(c.DatabaseURL) + if err != nil { + log.Errorf("Error opening database: %s", err.Error()) + return + } + + if len(args) < 1 { + cmd.Help() + return + } + dirStr := args[0] + + var dir migrate.MigrationDirection + switch dirStr { + case "down": + dir = migrate.Down + case "up": + dir = migrate.Up + default: + log.Errorf("Invalid migration direction, must be 'up' or 'down'.") + return + } + + var count int + if len(args) >= 2 { + count, err = strconv.Atoi(args[1]) + if err != nil { + log.Errorf("Invalid migration count, must be a number.") + return + } + if count < 1 { + log.Errorf("Invalid migration count, must be a number greater than zero.") + return + } + } + + migrations, err := dbmigrate.PlanMigration(db, dir, count) + if err != nil { + log.Errorf("Error planning migration: %s", err.Error()) + return + } + if len(migrations) > 0 { + log.Infof("Migrations to apply %s: %s", dirStr, strings.Join(migrations, ", ")) + } + + n, err := dbmigrate.Migrate(db, dir, count) + if err != nil { + log.Errorf("Error applying migrations: %s", err.Error()) + return + } + if n > 0 { + log.Infof("Successfully applied %d migrations %s.", n, dirStr) + } else { + log.Infof("No migrations applied %s.", dirStr) + } +} diff --git a/services/regulated-assets-approval-server/cmd/serve.go b/services/regulated-assets-approval-server/cmd/serve.go new file mode 100644 index 0000000000..c54f9b3ed2 --- /dev/null +++ b/services/regulated-assets-approval-server/cmd/serve.go @@ -0,0 +1,103 @@ +package cmd + +import ( + "go/types" + + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/network" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve" + "github.com/stellar/go/support/config" +) + +type ServeCommand struct{} + +func (c *ServeCommand) Command() *cobra.Command { + opts := serve.Options{} + configOpts := config.ConfigOptions{ + { + Name: "issuer-account-secret", + Usage: "Secret key of the issuer account.", + OptType: types.String, + ConfigKey: &opts.IssuerAccountSecret, + Required: true, + }, + { + Name: "asset-code", + Usage: "The code of the regulated asset", + OptType: types.String, + ConfigKey: &opts.AssetCode, + Required: true, + }, + { + Name: "database-url", + Usage: "Database URL", + OptType: types.String, + ConfigKey: &opts.DatabaseURL, + FlagDefault: "postgres://localhost:5432/?sslmode=disable", + Required: true, + }, + { + Name: "friendbot-payment-amount", + Usage: "The amount of regulated assets the friendbot will be distributing", + OptType: types.Int, + ConfigKey: &opts.FriendbotPaymentAmount, + FlagDefault: 10000, + Required: true, + }, + { + Name: "horizon-url", + Usage: "Horizon URL used for looking up account details", + OptType: types.String, + ConfigKey: &opts.HorizonURL, + FlagDefault: horizonclient.DefaultTestNetClient.HorizonURL, + Required: true, + }, + { + Name: "network-passphrase", + Usage: "Network passphrase of the Stellar network transactions should be signed for", + OptType: types.String, + ConfigKey: &opts.NetworkPassphrase, + FlagDefault: network.TestNetworkPassphrase, + Required: true, + }, + { + Name: "port", + Usage: "Port to listen and serve on", + OptType: types.Int, + ConfigKey: &opts.Port, + FlagDefault: 8000, + Required: true, + }, + { + Name: "base-url", + Usage: "The base url address to this server", + OptType: types.String, + ConfigKey: &opts.BaseURL, + Required: true, + }, + { + Name: "kyc-required-payment-amount-threshold", + Usage: "The amount threshold when KYC is required, may contain decimals and is greater than 0", + OptType: types.String, + ConfigKey: &opts.KYCRequiredPaymentAmountThreshold, + FlagDefault: "500", + Required: true, + }, + } + cmd := &cobra.Command{ + Use: "serve", + Short: "Serve the SEP-8 Approval Server", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + c.Run(opts) + }, + } + configOpts.Init(cmd) + return cmd +} + +func (c *ServeCommand) Run(opts serve.Options) { + serve.Serve(opts) +} diff --git a/services/regulated-assets-approval-server/docker/Dockerfile b/services/regulated-assets-approval-server/docker/Dockerfile new file mode 100644 index 0000000000..c0eed3ff97 --- /dev/null +++ b/services/regulated-assets-approval-server/docker/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.17 as build + +ADD . /src/regulated-assets-approval-server +WORKDIR /src/regulated-assets-approval-server +RUN go build -o /bin/regulated-assets-approval-server ./services/regulated-assets-approval-server + + +FROM ubuntu:20.04 + +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates +COPY --from=build /bin/regulated-assets-approval-server /app/ +EXPOSE 8000 +ENTRYPOINT ["/app/regulated-assets-approval-server"] +CMD ["serve"] diff --git a/services/regulated-assets-approval-server/internal/configureissuer/configureissuer.go b/services/regulated-assets-approval-server/internal/configureissuer/configureissuer.go new file mode 100644 index 0000000000..611c92448d --- /dev/null +++ b/services/regulated-assets-approval-server/internal/configureissuer/configureissuer.go @@ -0,0 +1,168 @@ +package configureissuer + +import ( + "net/http" + "net/url" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/txnbuild" +) + +type Options struct { + AssetCode string + BaseURL string + HorizonURL string + IssuerAccountSecret string + NetworkPassphrase string +} + +func Setup(opts Options) { + hClient := &horizonclient.Client{ + HorizonURL: opts.HorizonURL, + HTTP: &http.Client{Timeout: 30 * time.Second}, + } + if opts.HorizonURL == horizonclient.DefaultTestNetClient.HorizonURL && opts.NetworkPassphrase == network.TestNetworkPassphrase { + hClient = horizonclient.DefaultTestNetClient + } + + issuerKP := keypair.MustParse(opts.IssuerAccountSecret) + + err := setup(opts, hClient) + if err != nil { + log.Error(errors.Wrap(err, "setting up issuer account")) + log.Fatal("Couldn't complete setup!") + } + + log.Infof("πŸŽ‰πŸŽ‰πŸŽ‰ Successfully configured asset issuer for %s:%s", opts.AssetCode, issuerKP.Address()) +} + +func setup(opts Options, hClient horizonclient.ClientInterface) error { + issuerKP, err := keypair.ParseFull(opts.IssuerAccountSecret) + if err != nil { + log.Fatal(errors.Wrap(err, "parsing secret")) + } + + issuerAcc, err := getOrFundIssuerAccount(issuerKP.Address(), hClient) + if err != nil { + return errors.Wrap(err, "getting or funding issuer account") + } + + asset := txnbuild.CreditAsset{ + Code: opts.AssetCode, + Issuer: issuerKP.Address(), + } + assetResults, err := hClient.Assets(horizonclient.AssetRequest{ + ForAssetCode: asset.Code, + ForAssetIssuer: asset.Issuer, + Limit: 1, + }) + if err != nil { + return errors.Wrap(err, "getting list of assets") + } + + u, err := url.Parse(opts.BaseURL) + if err != nil { + return errors.Wrap(err, "parsing base url") + } + homeDomain := u.Hostname() + + if issuerAcc.Flags.AuthRequired && issuerAcc.Flags.AuthRevocable && issuerAcc.HomeDomain == homeDomain && len(assetResults.Embedded.Records) > 0 { + log.Warn("Account already configured. Aborting without performing any action.") + return nil + } + + trustorKP, err := keypair.Random() + if err != nil { + return errors.Wrap(err, "generating keypair") + } + + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: issuerAcc, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRequired, + txnbuild.AuthRevocable, + }, + HomeDomain: &homeDomain, + }, + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: trustorKP.Address(), + SourceAccount: issuerKP.Address(), + }, + &txnbuild.CreateAccount{ + Destination: trustorKP.Address(), + Amount: "0", + SourceAccount: asset.Issuer, + }, + // a trustline is generated to the desired so horizon creates entry at `{horizon-url}/assets`. This was added as many Wallets reach that endpoint to check if a given asset exists. + &txnbuild.ChangeTrust{ + Line: asset.MustToChangeTrustAsset(), + SourceAccount: trustorKP.Address(), + }, + &txnbuild.SetOptions{ + MasterWeight: txnbuild.NewThreshold(0), + LowThreshold: txnbuild.NewThreshold(1), + MediumThreshold: txnbuild.NewThreshold(1), + HighThreshold: txnbuild.NewThreshold(1), + Signer: &txnbuild.Signer{Address: issuerKP.Address(), Weight: txnbuild.Threshold(10)}, + SourceAccount: trustorKP.Address(), + }, + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: trustorKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + if err != nil { + return errors.Wrap(err, "building transaction") + } + + tx, err = tx.Sign(opts.NetworkPassphrase, issuerKP, trustorKP) + if err != nil { + return errors.Wrap(err, "signing transaction") + } + + _, err = hClient.SubmitTransaction(tx) + if err != nil { + return errors.Wrap(err, "submitting transaction") + } + + return nil +} + +func getOrFundIssuerAccount(issuerAddress string, hClient horizonclient.ClientInterface) (*horizon.Account, error) { + issuerAcc, err := hClient.AccountDetail(horizonclient.AccountRequest{ + AccountID: issuerAddress, + }) + if err != nil { + if !horizonclient.IsNotFoundError(err) || hClient != horizonclient.DefaultTestNetClient { + return nil, errors.Wrapf(err, "getting detail for account %s", issuerAddress) + } + + log.Info("Issuer account not found πŸ‘€ on network, will fund it using friendbot.") + _, err = hClient.Fund(issuerAddress) + if err != nil { + return nil, errors.Wrap(err, "funding account with friendbot") + } + log.Info("πŸŽ‰ Successfully funded account using friendbot.") + } + + // now the account should be funded by the friendbot already + issuerAcc, err = hClient.AccountDetail(horizonclient.AccountRequest{ + AccountID: issuerAddress, + }) + if err != nil { + return nil, errors.Wrapf(err, "getting detail for account %s", issuerAddress) + } + + return &issuerAcc, nil +} diff --git a/services/regulated-assets-approval-server/internal/configureissuer/configureissuer_test.go b/services/regulated-assets-approval-server/internal/configureissuer/configureissuer_test.go new file mode 100644 index 0000000000..ad18da5c0f --- /dev/null +++ b/services/regulated-assets-approval-server/internal/configureissuer/configureissuer_test.go @@ -0,0 +1,200 @@ +package configureissuer + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/problem" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestSetup_accountAlreadyConfigured(t *testing.T) { + // declare a logging buffer to validate output logs + buf := new(strings.Builder) + log.DefaultLogger.SetOutput(buf) + log.DefaultLogger.SetLevel(log.InfoLevel) + + issuerKP := keypair.MustRandom() + opts := Options{ + AssetCode: "FOO", + BaseURL: "https://domain.test.com/", + HorizonURL: horizonclient.DefaultTestNetClient.HorizonURL, + IssuerAccountSecret: issuerKP.Seed(), + NetworkPassphrase: network.TestNetworkPassphrase, + } + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: issuerKP.Address()}). + Return(horizon.Account{ + AccountID: issuerKP.Address(), + Flags: horizon.AccountFlags{ + AuthRequired: true, + AuthRevocable: true, + }, + HomeDomain: "domain.test.com", + Sequence: "10", + }, nil) + horizonMock. + On("Assets", horizonclient.AssetRequest{ + ForAssetCode: opts.AssetCode, + ForAssetIssuer: issuerKP.Address(), + Limit: 1, + }). + Return(horizon.AssetsPage{ + Embedded: struct{ Records []horizon.AssetStat }{ + Records: []horizon.AssetStat{ + {Amount: "0.0000001"}, + }, + }, + }, nil) + + err := setup(opts, &horizonMock) + require.NoError(t, err) + + require.Contains(t, buf.String(), "Account already configured. Aborting without performing any action.") +} + +func TestGetOrFundIssuerAccount_failsIfNotDefaultTesntet(t *testing.T) { + issuerKP := keypair.MustRandom() + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: issuerKP.Address()}). + Return(horizon.Account{}, problem.NotFound) + + _, err := getOrFundIssuerAccount(issuerKP.Address(), &horizonMock) + wantErrMsg := fmt.Sprintf("getting detail for account %s: problem: not_found", issuerKP.Address()) + require.EqualError(t, err, wantErrMsg) +} + +func TestSetup(t *testing.T) { + issuerKP := keypair.MustRandom() + opts := Options{ + AssetCode: "FOO", + BaseURL: "https://domain.test.com/", + HorizonURL: horizonclient.DefaultTestNetClient.HorizonURL, + IssuerAccountSecret: issuerKP.Seed(), + NetworkPassphrase: network.TestNetworkPassphrase, + } + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: issuerKP.Address()}). + Return(horizon.Account{ + AccountID: issuerKP.Address(), + Sequence: "10", + }, nil) + horizonMock. + On("Assets", horizonclient.AssetRequest{ + ForAssetCode: opts.AssetCode, + ForAssetIssuer: issuerKP.Address(), + Limit: 1, + }). + Return(horizon.AssetsPage{}, nil) + + var didTestSubmitTransaction bool + horizonMock. + On("SubmitTransaction", mock.AnythingOfType("*txnbuild.Transaction")). + Run(func(args mock.Arguments) { + tx, ok := args.Get(0).(*txnbuild.Transaction) + require.True(t, ok) + + issuerSimpleAcc := txnbuild.SimpleAccount{ + AccountID: issuerKP.Address(), + Sequence: 11, + } + assert.Equal(t, issuerSimpleAcc, tx.SourceAccount()) + + assert.Equal(t, int64(11), tx.SequenceNumber()) + assert.Equal(t, int64(300), tx.BaseFee()) + assert.Equal(t, int64(0), tx.Timebounds().MinTime) + assert.LessOrEqual(t, time.Now().UTC().Unix()+299, tx.Timebounds().MaxTime) + assert.GreaterOrEqual(t, time.Now().UTC().Unix()+301, tx.Timebounds().MaxTime) + + beginSponsorOp, ok := tx.Operations()[1].(*txnbuild.BeginSponsoringFutureReserves) + require.True(t, ok) + trustorAccKP := beginSponsorOp.SponsoredID + homeDomain := "domain.test.com" + testAsset := txnbuild.CreditAsset{ + Code: opts.AssetCode, + Issuer: issuerKP.Address(), + } + + wantOps := []txnbuild.Operation{ + &txnbuild.SetOptions{ + SetFlags: []txnbuild.AccountFlag{ + txnbuild.AuthRequired, + txnbuild.AuthRevocable, + }, + HomeDomain: &homeDomain, + }, + &txnbuild.BeginSponsoringFutureReserves{ + SponsoredID: trustorAccKP, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.CreateAccount{ + Destination: trustorAccKP, + Amount: "0", + SourceAccount: issuerKP.Address(), + }, + // a trustline is generated to the desired so horizon creates entry at `{horizon-url}/assets`. This was added as many Wallets reach that endpoint to check if a given asset exists. + &txnbuild.ChangeTrust{ + Line: testAsset.MustToChangeTrustAsset(), + SourceAccount: trustorAccKP, + Limit: "922337203685.4775807", + }, + &txnbuild.SetOptions{ + MasterWeight: txnbuild.NewThreshold(0), + LowThreshold: txnbuild.NewThreshold(1), + MediumThreshold: txnbuild.NewThreshold(1), + HighThreshold: txnbuild.NewThreshold(1), + Signer: &txnbuild.Signer{Address: issuerKP.Address(), Weight: txnbuild.Threshold(10)}, + SourceAccount: trustorAccKP, + }, + &txnbuild.EndSponsoringFutureReserves{ + SourceAccount: trustorAccKP, + }, + } + // SetOptions operation is validated separatedly because the value returned from tx.Operations()[0] contains the unexported field `xdrOp` that prevents a proper comparision. + require.Equal(t, wantOps[0].(*txnbuild.SetOptions).SetFlags, tx.Operations()[0].(*txnbuild.SetOptions).SetFlags) + require.Equal(t, wantOps[0].(*txnbuild.SetOptions).HomeDomain, tx.Operations()[0].(*txnbuild.SetOptions).HomeDomain) + + require.Equal(t, wantOps[1:4], tx.Operations()[1:4]) + + // SetOptions operation is validated separatedly because the value returned from tx.Operations()[4] contains the unexported field `xdrOp` that prevents a proper comparision. + require.Equal(t, wantOps[4].(*txnbuild.SetOptions).SetFlags, tx.Operations()[4].(*txnbuild.SetOptions).SetFlags) + require.Equal(t, wantOps[4].(*txnbuild.SetOptions).HomeDomain, tx.Operations()[4].(*txnbuild.SetOptions).HomeDomain) + + require.Equal(t, wantOps[5:], tx.Operations()[5:]) + + txHash, err := tx.Hash(opts.NetworkPassphrase) + require.NoError(t, err) + + err = issuerKP.Verify(txHash[:], tx.Signatures()[0].Signature) + require.NoError(t, err) + + trustorKP, err := keypair.ParseAddress(trustorAccKP) + require.NoError(t, err) + err = trustorKP.Verify(txHash[:], tx.Signatures()[1].Signature) + require.NoError(t, err) + + didTestSubmitTransaction = true + }). + Return(horizon.Transaction{}, nil) + + err := setup(opts, &horizonMock) + require.NoError(t, err) + + require.True(t, didTestSubmitTransaction) +} diff --git a/services/regulated-assets-approval-server/internal/db/db.go b/services/regulated-assets-approval-server/internal/db/db.go new file mode 100644 index 0000000000..7b461a9218 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/db.go @@ -0,0 +1,10 @@ +package db + +import ( + "github.com/jmoiron/sqlx" + _ "github.com/lib/pq" +) + +func Open(dataSourceName string) (*sqlx.DB, error) { + return sqlx.Open("postgres", dataSourceName) +} diff --git a/services/regulated-assets-approval-server/internal/db/db_test.go b/services/regulated-assets-approval-server/internal/db/db_test.go new file mode 100644 index 0000000000..91cb6771c0 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/db_test.go @@ -0,0 +1,30 @@ +package db + +import ( + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOpen_openAndPingSucceeds(t *testing.T) { + db := dbtest.Postgres(t) + + sqlxDB, err := Open(db.DSN) + require.NoError(t, err) + assert.Equal(t, "postgres", sqlxDB.DriverName()) + + err = sqlxDB.Ping() + require.NoError(t, err) +} + +func TestOpen_openAndPingFails(t *testing.T) { + sqlxDB, err := Open("postgres://127.0.0.1:0") + require.NoError(t, err) + assert.Equal(t, "postgres", sqlxDB.DriverName()) + + err = sqlxDB.Ping() + require.Error(t, err) + require.Contains(t, err.Error(), "dial tcp 127.0.0.1:0: connect:") +} diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate.go b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate.go new file mode 100644 index 0000000000..f5cbc5822b --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate.go @@ -0,0 +1,35 @@ +package dbmigrate + +import ( + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" +) + +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -ignore .+\.(go|swp)$ -pkg dbmigrate -o dbmigrate_generated.go ./migrations + +var migrationSource = &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// PlanMigration finds the migrations that would be applied if Migrate was to +// be run now. +func PlanMigration(db *sqlx.DB, dir migrate.MigrationDirection, count int) ([]string, error) { + migrations, _, err := migrate.PlanMigration(db.DB, db.DriverName(), migrationSource, dir, count) + if err != nil { + return nil, err + } + ids := make([]string, 0, len(migrations)) + for _, m := range migrations { + ids = append(ids, m.Id) + } + return ids, nil +} + +// Migrate runs all the migrations to get the database to the state described +// by the migration files in the direction specified. Count is the maximum +// number of migrations to apply or rollback. +func Migrate(db *sqlx.DB, dir migrate.MigrationDirection, count int) (int, error) { + return migrate.ExecMax(db.DB, db.DriverName(), migrationSource, dir, count) +} diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_generated.go b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_generated.go new file mode 100644 index 0000000000..4b52521be3 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_generated.go @@ -0,0 +1,319 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// migrations/2021-05-05.0.initial.sql (162B) +// migrations/2021-05-18.0.accounts-kyc-status.sql (414B) +// migrations/2021-06-08.0.pending-kyc-status.sql (193B) + +package dbmigrate + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _migrations202105050InitialSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x54\xcc\xd1\x0d\xc2\x30\x0c\x04\xd0\xff\x4c\x71\xff\x28\x4c\xc1\x08\x30\x80\x01\xa7\xb5\xd4\xda\x91\x6d\xa8\xb2\x3d\x8a\xf8\x40\x7c\xde\xdd\xd3\xd5\x8a\xeb\x2a\x81\x5d\x16\xa7\x14\x53\x34\xd9\x18\x12\x10\x4d\xd6\xd9\xd0\xb6\x0d\xf0\xde\x73\x80\xf4\x39\x27\x42\x13\x8f\x44\x24\x79\x8a\x2e\xe8\x26\x9a\x68\xe6\xa5\x56\xd8\xcb\x7f\x77\x81\x3b\x37\x73\xc6\xc1\x18\x9c\x58\xe9\xcd\x20\xc4\x63\xe5\x9d\xce\x65\xfa\xd3\x17\x33\x6e\xfd\x3f\x5f\xec\xd0\x52\x3e\x01\x00\x00\xff\xff\xd3\x79\x21\xda\xa2\x00\x00\x00") + +func migrations202105050InitialSqlBytes() ([]byte, error) { + return bindataRead( + _migrations202105050InitialSql, + "migrations/2021-05-05.0.initial.sql", + ) +} + +func migrations202105050InitialSql() (*asset, error) { + bytes, err := migrations202105050InitialSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/2021-05-05.0.initial.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd1, 0xd1, 0x21, 0xe9, 0x6d, 0xe0, 0xfe, 0xb4, 0x8b, 0x78, 0x2, 0xae, 0x5c, 0xd5, 0x8b, 0x41, 0xb8, 0x4b, 0xaa, 0x3a, 0xea, 0x69, 0xf, 0xf3, 0x2f, 0x6c, 0xae, 0x38, 0x46, 0xb, 0x2, 0xfc}} + return a, nil +} + +var _migrations202105180AccountsKycStatusSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x90\xc1\x4e\x83\x40\x10\x86\xef\xfb\x14\xff\xb1\x8d\xd6\x17\xe8\x09\x05\x13\x23\x42\x43\x20\xa6\x27\x32\x2c\x13\x5d\xbb\x0b\x9b\xdd\xc1\xaa\x4f\x6f\x02\x26\xda\x13\x1e\x27\xf3\xfd\xdf\x4c\xfe\xdd\x0e\x57\xce\xbc\x04\x12\x46\xe3\x95\xba\xab\xb2\xa4\xce\x50\x27\xb7\x79\x06\x3f\x75\xd6\xe8\x1b\xd2\x7a\x9c\x06\x89\xed\xe9\x53\xb7\x51\x48\xa6\x88\x8d\x02\x80\x28\x6c\x2d\x85\x96\xfa\x3e\x70\x8c\x10\xfe\x10\x14\x65\x8d\xa2\xc9\x73\x1c\xaa\x87\xa7\xa4\x3a\xe2\x31\x3b\x5e\xcf\xb8\x26\x6b\x3b\xd2\xa7\xd6\xf4\x97\xe8\xb2\x66\x47\xc6\x5e\xb8\x7e\x62\x81\x49\xb8\x6f\x49\x20\xc6\x71\x14\x72\x1e\x67\x23\xaf\xf3\x88\xaf\x71\xe0\xdf\xa3\x69\x76\x9f\x34\x79\x8d\xa2\x7c\xde\x6c\x97\xfc\xfc\xf6\xd4\x39\x23\x2b\x96\x05\x27\xef\xc3\xf8\xfe\x1f\x32\xf0\x1b\xeb\x15\xa7\xda\xee\x95\xfa\xdb\x72\x3a\x9e\x07\xa5\xd2\xaa\x3c\xac\xb6\xbc\xff\x0e\x00\x00\xff\xff\x68\xde\x80\x57\x9e\x01\x00\x00") + +func migrations202105180AccountsKycStatusSqlBytes() ([]byte, error) { + return bindataRead( + _migrations202105180AccountsKycStatusSql, + "migrations/2021-05-18.0.accounts-kyc-status.sql", + ) +} + +func migrations202105180AccountsKycStatusSql() (*asset, error) { + bytes, err := migrations202105180AccountsKycStatusSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/2021-05-18.0.accounts-kyc-status.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb0, 0x7b, 0x8c, 0x97, 0xe7, 0x6, 0x27, 0x5f, 0x19, 0xe2, 0xbb, 0x98, 0x73, 0x1e, 0x37, 0x74, 0xf0, 0x4a, 0x7, 0xe7, 0x15, 0x66, 0x90, 0x3c, 0x2, 0xab, 0x16, 0x39, 0x65, 0xf2, 0x8a, 0x1f}} + return a, nil +} + +var _migrations202106080PendingKycStatusSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\xcd\x31\x0a\xc2\x30\x14\x06\xe0\xfd\x9d\xe2\xdf\xa5\x5e\xa0\x53\x35\xdd\xa2\x95\xd2\xce\x21\xc6\x50\x83\xe6\x25\x98\x17\x8a\x9e\x5e\x70\x12\x9c\x1c\xbf\xe9\x6b\x1a\x6c\x62\x58\x1e\x56\x3c\xe6\x4c\xd4\xe9\xa9\x1f\x31\x75\x3b\xdd\x23\xd7\xf3\x3d\xb8\xad\x75\x2e\x55\x96\x62\x6e\x4f\x67\x8a\x58\xa9\x85\x00\xa0\x53\x0a\xfb\x41\xcf\x87\x23\xb2\xe7\x4b\xe0\xc5\x58\x81\x84\xe8\x8b\xd8\x98\xb1\x06\xb9\x7e\x88\x57\x62\xdf\x12\x7d\x5f\x2a\xad\xfc\xd7\xa6\xc6\xe1\xf4\xdb\xb5\xf4\x0e\x00\x00\xff\xff\x0b\x35\xb1\x8a\xc1\x00\x00\x00") + +func migrations202106080PendingKycStatusSqlBytes() ([]byte, error) { + return bindataRead( + _migrations202106080PendingKycStatusSql, + "migrations/2021-06-08.0.pending-kyc-status.sql", + ) +} + +func migrations202106080PendingKycStatusSql() (*asset, error) { + bytes, err := migrations202106080PendingKycStatusSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/2021-06-08.0.pending-kyc-status.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x10, 0x1c, 0x6f, 0xa9, 0x5e, 0x89, 0xfa, 0x5b, 0x1f, 0x1e, 0xf2, 0xc6, 0xe0, 0xeb, 0x6f, 0xe5, 0xa5, 0x63, 0x50, 0x6b, 0xd5, 0xdb, 0x54, 0xac, 0xc2, 0x1, 0x82, 0x27, 0xc4, 0x70, 0xcf, 0x9c}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "migrations/2021-05-05.0.initial.sql": migrations202105050InitialSql, + "migrations/2021-05-18.0.accounts-kyc-status.sql": migrations202105180AccountsKycStatusSql, + "migrations/2021-06-08.0.pending-kyc-status.sql": migrations202106080PendingKycStatusSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "migrations": &bintree{nil, map[string]*bintree{ + "2021-05-05.0.initial.sql": &bintree{migrations202105050InitialSql, map[string]*bintree{}}, + "2021-05-18.0.accounts-kyc-status.sql": &bintree{migrations202105180AccountsKycStatusSql, map[string]*bintree{}}, + "2021-06-08.0.pending-kyc-status.sql": &bintree{migrations202106080PendingKycStatusSql, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_test.go b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_test.go new file mode 100644 index 0000000000..22251658e9 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/dbmigrate_test.go @@ -0,0 +1,247 @@ +package dbmigrate + +import ( + "net/http" + "os" + "strings" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + migrate "github.com/rubenv/sql-migrate" + "github.com/shurcooL/httpfs/filter" + dbpkg "github.com/stellar/go/services/regulated-assets-approval-server/internal/db" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + supportHttp "github.com/stellar/go/support/http" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGeneratedAssets(t *testing.T) { + localAssets := http.FileSystem(filter.Keep(http.Dir("."), func(path string, fi os.FileInfo) bool { + return fi.IsDir() || strings.HasSuffix(path, ".sql") + })) + generatedAssets := &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, AssetInfo: AssetInfo} + + if !supportHttp.EqualFileSystems(localAssets, generatedAssets, "/") { + t.Fatalf("generated migrations does not match local migrations") + } +} + +func TestPlanMigration_upApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + migrations, err := PlanMigration(session, migrate.Up, 1) + require.NoError(t, err) + wantMigrations := []string{"2021-05-05.0.initial.sql"} + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_upApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + migrations, err := PlanMigration(session, migrate.Up, 0) + require.NoError(t, err) + require.GreaterOrEqual(t, len(migrations), 3) + wantAtLeastMigrations := []string{ + "2021-05-05.0.initial.sql", + "2021-05-18.0.accounts-kyc-status.sql", + "2021-06-08.0.pending-kyc-status.sql", + } + assert.Equal(t, wantAtLeastMigrations, migrations) +} + +func TestPlanMigration_upApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + migrations, err := PlanMigration(session, migrate.Up, 0) + require.NoError(t, err) + require.Empty(t, migrations) +} + +func TestPlanMigration_downApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + migrations, err := PlanMigration(session, migrate.Down, 1) + require.NoError(t, err) + wantMigrations := []string{"2021-05-18.0.accounts-kyc-status.sql"} + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_downApplyTwo(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 3) + require.NoError(t, err) + require.Equal(t, 3, n) + + migrations, err := PlanMigration(session, migrate.Down, 0) + require.NoError(t, err) + wantMigrations := []string{ + "2021-06-08.0.pending-kyc-status.sql", + "2021-05-18.0.accounts-kyc-status.sql", + "2021-05-05.0.initial.sql", + } + assert.Equal(t, wantMigrations, migrations) +} + +func TestPlanMigration_downApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + migrations, err := PlanMigration(session, migrate.Down, 0) + require.NoError(t, err) + assert.Empty(t, migrations) +} + +func TestMigrate_upApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 1) + require.NoError(t, err) + assert.Equal(t, 1, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "2021-05-05.0.initial.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_upApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "2021-05-05.0.initial.sql", + "2021-05-18.0.accounts-kyc-status.sql", + "2021-06-08.0.pending-kyc-status.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_upApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Greater(t, n, 1) + + n, err = Migrate(session, migrate.Up, 0) + require.NoError(t, err) + require.Zero(t, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "2021-05-05.0.initial.sql", + "2021-05-18.0.accounts-kyc-status.sql", + "2021-06-08.0.pending-kyc-status.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_downApplyOne(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 1) + require.NoError(t, err) + require.Equal(t, 1, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + wantIDs := []string{ + "2021-05-05.0.initial.sql", + } + assert.Equal(t, wantIDs, ids) +} + +func TestMigrate_downApplyAll(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + assert.Empty(t, ids) +} + +func TestMigrate_downApplyNone(t *testing.T) { + db := dbtest.OpenWithoutMigrations(t) + session, err := dbpkg.Open(db.DSN) + require.NoError(t, err) + + n, err := Migrate(session, migrate.Up, 2) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 2, n) + + n, err = Migrate(session, migrate.Down, 0) + require.NoError(t, err) + require.Equal(t, 0, n) + + ids := []string{} + err = session.Select(&ids, `SELECT id FROM gorp_migrations`) + require.NoError(t, err) + assert.Empty(t, ids) +} diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-05.0.initial.sql b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-05.0.initial.sql new file mode 100644 index 0000000000..21884dcac3 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-05.0.initial.sql @@ -0,0 +1,7 @@ +-- This migration file is intentionally empty and is a first starting point for +-- our migrations before we yet have a schema. + +-- +migrate Up + +-- +migrate Down + diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-18.0.accounts-kyc-status.sql b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-18.0.accounts-kyc-status.sql new file mode 100644 index 0000000000..a40e01892b --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-05-18.0.accounts-kyc-status.sql @@ -0,0 +1,15 @@ +-- +migrate Up + +CREATE TABLE public.accounts_kyc_status ( + stellar_address text NOT NULL PRIMARY KEY, + callback_id text NOT NULL, + email_address text, + created_at timestamp with time zone NOT NULL DEFAULT NOW(), + kyc_submitted_at timestamp with time zone, + approved_at timestamp with time zone, + rejected_at timestamp with time zone +); + +-- +migrate Down + +DROP TABLE public.accounts_kyc_status; \ No newline at end of file diff --git a/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-06-08.0.pending-kyc-status.sql b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-06-08.0.pending-kyc-status.sql new file mode 100644 index 0000000000..70bad829bd --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbmigrate/migrations/2021-06-08.0.pending-kyc-status.sql @@ -0,0 +1,9 @@ +-- +migrate Up + +ALTER TABLE public.accounts_kyc_status + ADD COLUMN pending_at timestamp with time zone; + +-- +migrate Down + +ALTER TABLE public.accounts_kyc_status + DROP COLUMN pending_at; diff --git a/services/regulated-assets-approval-server/internal/db/dbtest/dbtest.go b/services/regulated-assets-approval-server/internal/db/dbtest/dbtest.go new file mode 100644 index 0000000000..d09610c795 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbtest/dbtest.go @@ -0,0 +1,39 @@ +package dbtest + +import ( + "path" + "runtime" + "testing" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" +) + +func OpenWithoutMigrations(t *testing.T) *dbtest.DB { + db := dbtest.Postgres(t) + return db +} + +func Open(t *testing.T) *dbtest.DB { + db := OpenWithoutMigrations(t) + + // Get the folder holding the migrations relative to this file. We cannot + // hardcode "../dbmigrate/migrations" because Open is called from tests in + // multiple packages and tests are executed with the current working + // directory set to the package the test lives in. + _, filename, _, _ := runtime.Caller(0) + migrationsDir := path.Join(path.Dir(filename), "..", "dbmigrate", "migrations") + + migrations := &migrate.FileMigrationSource{ + Dir: migrationsDir, + } + + conn := db.Open() + defer conn.Close() + + _, err := migrate.Exec(conn.DB, "postgres", migrations, migrate.Up) + if err != nil { + t.Fatal(err) + } + return db +} diff --git a/services/regulated-assets-approval-server/internal/db/dbtest/dbtest_test.go b/services/regulated-assets-approval-server/internal/db/dbtest/dbtest_test.go new file mode 100644 index 0000000000..95f6213b13 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/db/dbtest/dbtest_test.go @@ -0,0 +1,18 @@ +package dbtest + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestOpen(t *testing.T) { + db := Open(t) + session := db.Open() + + count := 0 + err := session.Get(&count, `SELECT COUNT(*) FROM gorp_migrations`) + require.NoError(t, err) + assert.Greater(t, count, 0) +} diff --git a/services/regulated-assets-approval-server/internal/serve/api_kyc_status_test.go b/services/regulated-assets-approval-server/internal/serve/api_kyc_status_test.go new file mode 100644 index 0000000000..ab655e07b6 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/api_kyc_status_test.go @@ -0,0 +1,216 @@ +package serve + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/go-chi/chi" + "github.com/google/uuid" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/kycstatus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAPI_postKYCStatus(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + handler := kycstatus.PostHandler{DB: conn} + m := chi.NewMux() + m.Post("/kyc-status/{callback_id}", handler.ServeHTTP) + + q := ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id) + VALUES ($1, $2) + ` + clientKP := keypair.MustRandom() + callbackID := uuid.New().String() + _, err := handler.DB.ExecContext(ctx, q, clientKP.Address(), callbackID) + require.NoError(t, err) + + r := httptest.NewRequest("POST", "/kyc-status/"+callbackID, strings.NewReader(`{"email_address": "email@test.com"}`)) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + type kycStatusPOSTResponse struct { + Result string `json:"result"` + } + var kycStatusPOSTResponseApprove kycStatusPOSTResponse + err = json.Unmarshal(body, &kycStatusPOSTResponseApprove) + require.NoError(t, err) + wantPostResponse := kycStatusPOSTResponse{ + Result: "no_further_action_required", + } + require.Equal(t, wantPostResponse, kycStatusPOSTResponseApprove) + + q = ` + SELECT rejected_at, pending_at, approved_at + FROM accounts_kyc_status + WHERE stellar_address = $1 AND callback_id = $2 + ` + var rejectedAt, pendingAt, approvedAt sql.NullTime + err = handler.DB.QueryRowContext(ctx, q, clientKP.Address(), callbackID).Scan(&rejectedAt, &pendingAt, &approvedAt) + require.NoError(t, err) + + assert.True(t, approvedAt.Valid) + assert.False(t, rejectedAt.Valid) + assert.False(t, pendingAt.Valid) +} + +func TestAPI_getKYCStatus(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + handler := kycstatus.GetDetailHandler{DB: conn} + m := chi.NewMux() + m.Get("/kyc-status/{stellar_address_or_callback_id}", handler.ServeHTTP) + + // step 1: insert data into database + const q = ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id, email_address, created_at, kyc_submitted_at, rejected_at, pending_at, approved_at) + VALUES + ('rejected-stellar-address', 'rejected-callback-id', 'xrejected@test.com', $1::timestamptz, $2::timestamptz, $2::timestamptz, NULL, NULL), + ('pending-stellar-address', 'pending-callback-id', 'ypending@test.com', $1::timestamptz, $3::timestamptz, NULL, $3::timestamptz, NULL), + ('approved-stellar-address', 'approved-callback-id', 'approved@test.com', $1::timestamptz, $4::timestamptz, NULL, NULL, $4::timestamptz) + ` + rejectedAt := time.Now().Add(-2 * time.Hour).UTC().Truncate(time.Second).Format(time.RFC3339) + pendingAt := time.Now().Add(-1 * time.Hour).UTC().Truncate(time.Second).Format(time.RFC3339) + approvedAt := time.Now().UTC().Truncate(time.Second).Format(time.RFC3339) + createdAt := time.Now().Add(-1 * time.Hour).UTC().Truncate(time.Second).Format(time.RFC3339) + _, err := handler.DB.ExecContext(ctx, q, createdAt, rejectedAt, pendingAt, approvedAt) + require.NoError(t, err) + + // step 2: GET "rejected" response + r := httptest.NewRequest("GET", "/kyc-status/rejected-stellar-address", nil) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := fmt.Sprintf(`{ + "stellar_address": "rejected-stellar-address", + "callback_id": "rejected-callback-id", + "email_address": "xrejected@test.com", + "created_at": "%s", + "kyc_submitted_at": "%s", + "rejected_at": "%s" + }`, createdAt, rejectedAt, rejectedAt) + require.JSONEq(t, wantBody, string(body)) + + // step 2: GET "pending" response + r = httptest.NewRequest("GET", "/kyc-status/pending-stellar-address", nil) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = fmt.Sprintf(`{ + "stellar_address": "pending-stellar-address", + "callback_id": "pending-callback-id", + "email_address": "ypending@test.com", + "created_at": "%s", + "kyc_submitted_at": "%s", + "pending_at": "%s" + }`, createdAt, pendingAt, pendingAt) + require.JSONEq(t, wantBody, string(body)) + + // step 3: GET "approved" response + r = httptest.NewRequest("GET", "/kyc-status/approved-stellar-address", nil) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = fmt.Sprintf(`{ + "stellar_address": "approved-stellar-address", + "callback_id": "approved-callback-id", + "email_address": "approved@test.com", + "created_at": "%s", + "kyc_submitted_at": "%s", + "approved_at": "%s" + }`, createdAt, approvedAt, approvedAt) + require.JSONEq(t, wantBody, string(body)) +} + +func TestAPI_deleteKYCStatus(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + handler := kycstatus.DeleteHandler{DB: conn} + m := chi.NewMux() + m.Delete("/kyc-status/{stellar_address}", handler.ServeHTTP) + + q := ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id, email_address, kyc_submitted_at, approved_at, rejected_at, pending_at) + VALUES ($1, $2, $3, NOW(), NOW(), NULL, NULL) + ` + approveKP := keypair.MustRandom() + approveCallbackID := uuid.New().String() + approveEmailAddress := "email@test.com" + _, err := handler.DB.ExecContext(ctx, q, approveKP.Address(), approveCallbackID, approveEmailAddress) + require.NoError(t, err) + + r := httptest.NewRequest("DELETE", fmt.Sprintf("/kyc-status/%s", approveKP.Address()), nil) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "message":"ok" + }` + require.JSONEq(t, wantBody, string(body)) + + q = ` + SELECT EXISTS( + SELECT stellar_address + FROM accounts_kyc_status + WHERE stellar_address = $1 + ) + ` + var exists bool + err = handler.DB.QueryRowContext(ctx, q, approveKP.Address()).Scan(&exists) + require.NoError(t, err) + require.False(t, exists) +} diff --git a/services/regulated-assets-approval-server/internal/serve/api_tx_approve_test.go b/services/regulated-assets-approval-server/internal/serve/api_tx_approve_test.go new file mode 100644 index 0000000000..bdba6a9825 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/api_tx_approve_test.go @@ -0,0 +1,611 @@ +package serve + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/amount" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/kycstatus" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAPI_txApprove_rejected(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + issuerKP := keypair.MustRandom() + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: "FOO", + horizonClient: &horizonclient.MockClient{}, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // rejected if no transaction "tx" is submitted + m := chi.NewMux() + m.Post("/tx-approve", handler.ServeHTTP) + r := httptest.NewRequest("POST", "/tx-approve", nil) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "status": "rejected", + "error": "Missing parameter \"tx\"." + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestAPI_txApprove_revised(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + m := chi.NewMux() + m.Post("/tx-approve", handler.ServeHTTP) + r := httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + var gotResponse txApprovalResponse + err = json.Unmarshal(body, &gotResponse) + require.NoError(t, err) + require.Equal(t, sep8StatusRevised, gotResponse.Status) + require.Equal(t, "Authorization and deauthorization operations were added.", gotResponse.Message) + + gotGenericTx, err := txnbuild.TransactionFromXDR(gotResponse.Tx) + require.NoError(t, err) + gotTx, ok := gotGenericTx.Transaction() + require.True(t, ok) + + require.Len(t, gotTx.Operations(), 5) + // AllowTrust op where issuer fully authorizes sender, asset GOAT + op0, ok := gotTx.Operations()[0].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op0.Trustor, senderKP.Address()) + assert.Equal(t, op0.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op0.Authorize) + // AllowTrust op where issuer fully authorizes receiver, asset GOAT + op1, ok := gotTx.Operations()[1].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op1.Trustor, receiverKP.Address()) + assert.Equal(t, op1.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op1.Authorize) + // Payment from sender to receiver + op2, ok := gotTx.Operations()[2].(*txnbuild.Payment) + require.True(t, ok) + assert.Equal(t, op2.Destination, receiverKP.Address()) + assert.Equal(t, op2.Asset, assetGOAT) + // AllowTrust op where issuer fully deauthorizes receiver, asset GOAT + op3, ok := gotTx.Operations()[3].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op3.Trustor, receiverKP.Address()) + assert.Equal(t, op3.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op3.Authorize) + // AllowTrust op where issuer fully deauthorizes sender, asset GOAT + op4, ok := gotTx.Operations()[4].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op4.Trustor, senderKP.Address()) + assert.Equal(t, op4.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op4.Authorize) +} + +func TestAPI_txAprove_actionRequired(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + // prepare handler dependencies + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "1", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // setup route handlers + m := chi.NewMux() + m.Post("/tx-approve", handler.ServeHTTP) + m.Post("/kyc-status/{callback_id}", kycstatus.PostHandler{DB: conn}.ServeHTTP) + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "1", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "501", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + r := httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + var callbackID string + q := `SELECT callback_id FROM accounts_kyc_status WHERE stellar_address = $1` + err = conn.QueryRowContext(ctx, q, senderKP.Address()).Scan(&callbackID) + require.NoError(t, err) + + var gotTxApprovalResponse txApprovalResponse + err = json.Unmarshal(body, &gotTxApprovalResponse) + require.NoError(t, err) + wantTxApprovalResponse := txApprovalResponse{ + Status: sep8Status("action_required"), + Message: "Payments exceeding 500.00 GOAT require KYC approval. Please provide an email address.", + ActionURL: "https://example.com/kyc-status/" + callbackID, + ActionMethod: "POST", + ActionFields: []string{"email_address"}, + } + assert.Equal(t, wantTxApprovalResponse, gotTxApprovalResponse) +} + +func TestAPI_txAprove_actionRequiredFlow(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + // prepare handler dependencies + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "1", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // setup route handlers + m := chi.NewMux() + m.Post("/tx-approve", handler.ServeHTTP) + m.Post("/kyc-status/{callback_id}", kycstatus.PostHandler{DB: conn}.ServeHTTP) + + // Step 1: client sends payment with 500+ GOAT + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "1", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "501", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + r := httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + var callbackID string + q := `SELECT callback_id FROM accounts_kyc_status WHERE stellar_address = $1` + err = conn.QueryRowContext(ctx, q, senderKP.Address()).Scan(&callbackID) + require.NoError(t, err) + + var gotTxApprovalResponse txApprovalResponse + err = json.Unmarshal(body, &gotTxApprovalResponse) + require.NoError(t, err) + wantTxApprovalResponse := txApprovalResponse{ + Status: sep8Status("action_required"), + Message: "Payments exceeding 500.00 GOAT require KYC approval. Please provide an email address.", + ActionURL: "https://example.com/kyc-status/" + callbackID, + ActionMethod: "POST", + ActionFields: []string{"email_address"}, + } + assert.Equal(t, wantTxApprovalResponse, gotTxApprovalResponse) + + // Step 2: client follows up with action required. KYC should get approved for emails not starting with "x" nor "y" + actionMethod := gotTxApprovalResponse.ActionMethod + actionURL := gotTxApprovalResponse.ActionURL + actionFields := strings.NewReader(`{"email_address": "test@email.com"}`) + r = httptest.NewRequest(actionMethod, actionURL, actionFields) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{"result": "no_further_action_required"}` + require.JSONEq(t, wantBody, string(body)) + + // Step 3: verify transactions with 500+ GOAT can now be revised + r = httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + gotTxApprovalResponse = txApprovalResponse{} + err = json.Unmarshal(body, &gotTxApprovalResponse) + require.NoError(t, err) + assert.Equal(t, sep8StatusRevised, gotTxApprovalResponse.Status) + assert.Equal(t, "Authorization and deauthorization operations were added.", gotTxApprovalResponse.Message) + require.NotEmpty(t, gotTxApprovalResponse.Tx) + + // Step 4: client follows up with action required again. This time KYC will get rejected as the email starts with "x" + actionFields = strings.NewReader(`{"email_address": "xtest@email.com"}`) + r = httptest.NewRequest(actionMethod, actionURL, actionFields) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = `{"result": "no_further_action_required"}` + require.JSONEq(t, wantBody, string(body)) + + // Step 5: verify transactions with 500+ GOAT are rejected + r = httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = `{ + "status": "rejected", + "error": "Your KYC was rejected and you're not authorized for operations above 500.00 GOAT." + }` + require.JSONEq(t, wantBody, string(body)) + + // Step 6: client follows up with action required again. This time KYC will be marked as pending as the email starts with "y" + actionFields = strings.NewReader(`{"email_address": "ytest@email.com"}`) + r = httptest.NewRequest(actionMethod, actionURL, actionFields) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = `{"result": "no_further_action_required"}` + require.JSONEq(t, wantBody, string(body)) + + // Step 7: verify transactions with 500+ GOAT are pending + r = httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w = httptest.NewRecorder() + m.ServeHTTP(w, r) + resp = w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody = `{ + "status": "pending", + "message": "Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above 500.00 GOAT.", + "timeout": 0 + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestAPI_txApprove_success(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + m := chi.NewMux() + m.Post("/tx-approve", handler.ServeHTTP) + + // prepare SEP-8 compliant transaction + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + r := httptest.NewRequest("POST", "/tx-approve", strings.NewReader(`{"tx": "`+txe+`"}`)) + r = r.WithContext(ctx) + w := httptest.NewRecorder() + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + var gotSuccessResponse txApprovalResponse + err = json.Unmarshal(body, &gotSuccessResponse) + require.NoError(t, err) + wantSuccessResponse := txApprovalResponse{ + Status: sep8Status("success"), + Tx: gotSuccessResponse.Tx, + Message: "Transaction is compliant and signed by the issuer.", + } + assert.Equal(t, wantSuccessResponse, gotSuccessResponse) + + genericTx, err := txnbuild.TransactionFromXDR(gotSuccessResponse.Tx) + require.NoError(t, err) + tx, ok := genericTx.Transaction() + require.True(t, ok) + require.Equal(t, senderKP.Address(), tx.SourceAccount().AccountID) + + require.Len(t, tx.Operations(), 5) + // AllowTrust op where issuer fully authorizes sender, asset GOAT + op0, ok := tx.Operations()[0].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op0.Trustor, senderKP.Address()) + assert.Equal(t, op0.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op0.Authorize) + // AllowTrust op where issuer fully authorizes receiver, asset GOAT + op1, ok := tx.Operations()[1].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op1.Trustor, receiverKP.Address()) + assert.Equal(t, op1.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op1.Authorize) + // Payment from sender to receiver + op2, ok := tx.Operations()[2].(*txnbuild.Payment) + require.True(t, ok) + assert.Equal(t, op2.Destination, receiverKP.Address()) + assert.Equal(t, op2.Asset, assetGOAT) + // AllowTrust op where issuer fully deauthorizes receiver, asset GOAT + op3, ok := tx.Operations()[3].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op3.Trustor, receiverKP.Address()) + assert.Equal(t, op3.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op3.Authorize) + // AllowTrust op where issuer fully deauthorizes sender, asset GOAT + op4, ok := tx.Operations()[4].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op4.Trustor, senderKP.Address()) + assert.Equal(t, op4.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op4.Authorize) + + // check if the transaction contains the issuer's signature + txHash, err := tx.Hash(handler.networkPassphrase) + require.NoError(t, err) + err = handler.issuerKP.Verify(txHash[:], tx.Signatures()[0].Signature) + require.NoError(t, err) +} diff --git a/services/regulated-assets-approval-server/internal/serve/friendbot.go b/services/regulated-assets-approval-server/internal/serve/friendbot.go new file mode 100644 index 0000000000..6da2936461 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/friendbot.go @@ -0,0 +1,184 @@ +package serve + +import ( + "context" + "fmt" + "net/http" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" + "github.com/stellar/go/txnbuild" +) + +type friendbotHandler struct { + issuerAccountSecret string + assetCode string + horizonClient horizonclient.ClientInterface + horizonURL string + networkPassphrase string + paymentAmount int +} + +func (h friendbotHandler) validate() error { + if h.issuerAccountSecret == "" { + return errors.New("issuer secret cannot be empty") + } + + if !strkey.IsValidEd25519SecretSeed(h.issuerAccountSecret) { + return errors.Errorf("the provided string %q is not a valid Stellar account seed", h.issuerAccountSecret) + } + + if h.assetCode == "" { + return errors.New("asset code cannot be empty") + } + + if h.horizonClient == nil { + return errors.New("horizon client cannot be nil") + } + + if h.horizonURL == "" { + return errors.New("horizon url cannot be empty") + } + + if h.networkPassphrase == "" { + return errors.New("network passphrase cannot be empty") + } + + if h.paymentAmount == 0 { + return errors.New("payment amount must be greater than zero") + } + + return nil +} + +type friendbotRequest struct { + Address string `query:"addr"` +} + +func (h friendbotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + in := friendbotRequest{} + err := httpdecode.Decode(r, &in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "decoding input parameters")) + httpErr := httperror.NewHTTPError(http.StatusBadRequest, "Invalid input parameters") + httpErr.Render(w) + return + } + + err = h.topUpAccountWithRegulatedAsset(ctx, in) + if err != nil { + httpErr, ok := err.(*httperror.Error) + if !ok { + httpErr = httperror.InternalServer + } + httpErr.Render(w) + return + } + + httpjson.Render(w, httpjson.DefaultResponse, httpjson.JSON) +} + +func (h friendbotHandler) topUpAccountWithRegulatedAsset(ctx context.Context, in friendbotRequest) error { + err := h.validate() + if err != nil { + err = errors.Wrap(err, "validating friendbotHandler") + log.Ctx(ctx).Error(err) + return err + } + + if in.Address == "" { + return httperror.NewHTTPError(http.StatusBadRequest, `Missing query paramater "addr".`) + } + + if !strkey.IsValidEd25519PublicKey(in.Address) { + return httperror.NewHTTPError(http.StatusBadRequest, `"addr" is not a valid Stellar address.`) + } + + account, err := h.horizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: in.Address}) + if err != nil { + log.Ctx(ctx).Error(errors.Wrapf(err, "getting detail for account %s", in.Address)) + return httperror.NewHTTPError(http.StatusBadRequest, `Please make sure the provided account address already exists in the network.`) + } + + kp, err := keypair.ParseFull(h.issuerAccountSecret) + if err != nil { + err = errors.Wrap(err, "parsing secret") + log.Ctx(ctx).Error(err) + return err + } + + asset := txnbuild.CreditAsset{ + Code: h.assetCode, + Issuer: kp.Address(), + } + + var accountHasTrustline bool + for _, b := range account.Balances { + if b.Asset.Code == asset.Code && b.Asset.Issuer == asset.Issuer { + accountHasTrustline = true + break + } + } + if !accountHasTrustline { + return httperror.NewHTTPError(http.StatusBadRequest, fmt.Sprintf("Account with address %s doesn't have a trustline for %s:%s", in.Address, asset.Code, asset.Issuer)) + } + + issuerAcc, err := h.horizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: kp.Address()}) + if err != nil { + log.Ctx(ctx).Error(errors.Wrapf(err, "getting detail for issuer account %s", kp.Address())) + return httperror.InternalServer + } + + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &issuerAcc, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: in.Address, + Type: asset, + Authorize: true, + }, + &txnbuild.Payment{ + Destination: in.Address, + Amount: fmt.Sprintf("%d", h.paymentAmount), + Asset: asset, + }, + &txnbuild.AllowTrust{ + Trustor: in.Address, + Type: asset, + Authorize: false, + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + if err != nil { + err = errors.Wrap(err, "building transaction") + log.Ctx(ctx).Error(err) + return err + } + + tx, err = tx.Sign(h.networkPassphrase, kp) + if err != nil { + err = errors.Wrap(err, "signing transaction") + log.Ctx(ctx).Error(err) + return err + } + + _, err = h.horizonClient.SubmitTransaction(tx) + if err != nil { + err = httperror.ParseHorizonError(err) + log.Ctx(ctx).Error(err) + return err + } + + return nil +} diff --git a/services/regulated-assets-approval-server/internal/serve/friendbot_test.go b/services/regulated-assets-approval-server/internal/serve/friendbot_test.go new file mode 100644 index 0000000000..b28f7ea18e --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/friendbot_test.go @@ -0,0 +1,330 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/protocols/horizon/base" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFriendbotHandler_validate(t *testing.T) { + // missing secret seed + fh := friendbotHandler{} + err := fh.validate() + require.EqualError(t, err, "issuer secret cannot be empty") + + // invalid secret seed + fh = friendbotHandler{ + issuerAccountSecret: "foo bar", + } + err = fh.validate() + require.EqualError(t, err, "the provided string \"foo bar\" is not a valid Stellar account seed") + + // missing asset code + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + } + err = fh.validate() + require.EqualError(t, err, "asset code cannot be empty") + + // missing horizon client + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + } + err = fh.validate() + require.EqualError(t, err, "horizon client cannot be nil") + + // missing horizon URL + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + } + err = fh.validate() + require.EqualError(t, err, "horizon url cannot be empty") + + // missing network passphrase + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + horizonURL: "https://horizon-testnet.stellar.org/", + } + err = fh.validate() + require.EqualError(t, err, "network passphrase cannot be empty") + + // missing payment amount + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + } + err = fh.validate() + require.EqualError(t, err, "payment amount must be greater than zero") + + // success! + fh = friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 1, + } + err = fh.validate() + require.NoError(t, err) +} + +func TestFriendbotHandler_serveHTTP_missingAddress(t *testing.T) { + ctx := context.Background() + + handler := friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "error":"Missing query paramater \"addr\"." + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestFriendbotHandler_serveHTTP_invalidAddress(t *testing.T) { + ctx := context.Background() + + handler := friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: horizonclient.DefaultTestNetClient, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot?addr=123", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "error":"\"addr\" is not a valid Stellar address." + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestFriendbotHandler_serveHTTP_accountDoesntExist(t *testing.T) { + ctx := context.Background() + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP"}). + Return(horizon.Account{}, errors.New("something went wrong")) // account doesn't exist on ledger + + handler := friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: &horizonMock, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot?addr=GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "error":"Please make sure the provided account address already exists in the network." + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestFriendbotHandler_serveHTTP_missingTrustline(t *testing.T) { + ctx := context.Background() + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP"}). + Return(horizon.Account{}, nil) + + handler := friendbotHandler{ + issuerAccountSecret: "SB6SFUY6ZJ2ETQHTY456GDAQ547R6NDAU74DTI2CKVVI4JERTUXKB2R4", + assetCode: "FOO", + horizonClient: &horizonMock, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot?addr=GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusBadRequest, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "error":"Account with address GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP doesn't have a trustline for FOO:GDCRZMSHZGQYSRXPWDMIUNUQW36SV2NIC3C7R6WWT6XDO267WCI2TTBR" + }` + require.JSONEq(t, wantBody, string(body)) +} + +func TestFriendbotHandler_serveHTTP_issuerAccountDoesntExist(t *testing.T) { + ctx := context.Background() + + // declare a logging buffer to validate output logs + buf := new(strings.Builder) + log.DefaultLogger.SetOutput(buf) + log.DefaultLogger.SetLevel(log.InfoLevel) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP"}). + Return(horizon.Account{ + Balances: []horizon.Balance{ + { + Asset: base.Asset{Code: "FOO", Issuer: "GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S"}, + Balance: "0", + }, + }, + }, nil) + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S"}). + Return(horizon.Account{}, errors.New("account doesn't exist")) // issuer account doesn't exist on ledger + + handler := friendbotHandler{ + issuerAccountSecret: "SDVFEIZ3WH5F6GHGK56QITTC5IO6QJ2UIQDWCHE72DAFZFSXYPIHQ6EV", // GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S + assetCode: "FOO", + horizonClient: &horizonMock, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot?addr=GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "error":"An error occurred while processing this request." + }` + require.JSONEq(t, wantBody, string(body)) + require.Contains(t, buf.String(), "getting detail for issuer account") +} + +func TestFriendbotHandler_serveHTTP(t *testing.T) { + ctx := context.Background() + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP"}). + Return(horizon.Account{ + Balances: []horizon.Balance{ + { + Asset: base.Asset{Code: "FOO", Issuer: "GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S"}, + Balance: "0", + }, + }, + }, nil) + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: "GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S"}). + Return(horizon.Account{ + AccountID: "GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S", + Sequence: "1", + }, nil) + horizonMock. + On("SubmitTransaction", mock.AnythingOfType("*txnbuild.Transaction")). + Return(horizon.Transaction{}, nil) + + handler := friendbotHandler{ + issuerAccountSecret: "SDVFEIZ3WH5F6GHGK56QITTC5IO6QJ2UIQDWCHE72DAFZFSXYPIHQ6EV", // GDDIO6SFRD4SJEQFJOSKPIDYTDM7LM4METFBKN4NFGVR5DTGB7H75N5S + assetCode: "FOO", + horizonClient: &horizonMock, + horizonURL: "https://horizon-testnet.stellar.org/", + networkPassphrase: network.TestNetworkPassphrase, + paymentAmount: 10000, + } + + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/friendbot?addr=GA2ILZPZAQ4R5PRKZ2X2AFAZK3ND6AGA4VFBQGR66BH36PV3VKMWLLZP", nil) + r = r.WithContext(ctx) + m := chi.NewMux() + m.Get("/friendbot", handler.ServeHTTP) + m.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + wantBody := `{ + "message":"ok" + }` + require.JSONEq(t, wantBody, string(body)) +} diff --git a/services/regulated-assets-approval-server/internal/serve/httperror/http_error.go b/services/regulated-assets-approval-server/internal/serve/httperror/http_error.go new file mode 100644 index 0000000000..7f0acd1c80 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/httperror/http_error.go @@ -0,0 +1,54 @@ +package httperror + +import ( + "net/http" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/httpjson" +) + +type Error struct { + ErrorMessage string `json:"error"` + Status int `json:"-"` +} + +func (h *Error) Error() string { + return h.ErrorMessage +} + +func NewHTTPError(status int, errorMessage string) *Error { + return &Error{ + ErrorMessage: errorMessage, + Status: status, + } +} + +func (e *Error) Render(w http.ResponseWriter) { + httpjson.RenderStatus(w, e.Status, e, httpjson.JSON) +} + +var InternalServer = &Error{ + ErrorMessage: "An error occurred while processing this request.", + Status: http.StatusInternalServerError, +} + +var BadRequest = &Error{ + ErrorMessage: "The request was invalid in some way.", + Status: http.StatusBadRequest, +} + +func ParseHorizonError(err error) error { + if err == nil { + return nil + } + + rootErr := errors.Cause(err) + if hError := horizonclient.GetError(rootErr); hError != nil { + resultCode, _ := hError.ResultCodes() + err = errors.Wrapf(err, "error submitting transaction: %+v, %+v\n", hError.Problem, resultCode) + } else { + err = errors.Wrap(err, "error submitting transaction") + } + return err +} diff --git a/services/regulated-assets-approval-server/internal/serve/httperror/http_error_test.go b/services/regulated-assets-approval-server/internal/serve/httperror/http_error_test.go new file mode 100644 index 0000000000..695ada7c78 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/httperror/http_error_test.go @@ -0,0 +1,40 @@ +package httperror + +import ( + "net/http" + "testing" + + "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/problem" + "github.com/stretchr/testify/require" +) + +func TestParseHorizonError(t *testing.T) { + err := ParseHorizonError(nil) + require.Nil(t, err) + + err = ParseHorizonError(errors.New("some error")) + require.EqualError(t, err, "error submitting transaction: some error") + + horizonError := horizonclient.Error{ + Problem: problem.P{ + Type: "bad_request", + Title: "Bad Request", + Status: http.StatusBadRequest, + Extras: map[string]interface{}{ + "result_codes": hProtocol.TransactionResultCodes{ + TransactionCode: "tx_code_here", + InnerTransactionCode: "", + OperationCodes: []string{ + "op_success", + "op_bad_auth", + }, + }, + }, + }, + } + err = ParseHorizonError(horizonError) + require.EqualError(t, err, "error submitting transaction: problem: bad_request, &{TransactionCode:tx_code_here InnerTransactionCode: OperationCodes:[op_success op_bad_auth]}\n: horizon error: \"Bad Request\" (tx_code_here, op_success, op_bad_auth) - check horizon.Error.Problem for more information") +} diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler.go new file mode 100644 index 0000000000..03e1a945ba --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler.go @@ -0,0 +1,85 @@ +package kycstatus + +import ( + "context" + "net/http" + + "github.com/jmoiron/sqlx" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type DeleteHandler struct { + DB *sqlx.DB +} + +func (h DeleteHandler) validate() error { + if h.DB == nil { + return errors.New("database cannot be nil") + } + return nil +} + +type deleteRequest struct { + StellarAddress string `path:"stellar_address"` +} + +func (h DeleteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + err := h.validate() + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating kyc-status DeleteHandler")) + httperror.InternalServer.Render(w) + return + } + + in := deleteRequest{} + err = httpdecode.Decode(r, &in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "decoding kyc-status DELETE Request")) + httperror.BadRequest.Render(w) + return + } + + err = h.handle(ctx, in) + if err != nil { + httpErr, ok := err.(*httperror.Error) + if !ok { + httpErr = httperror.InternalServer + } + httpErr.Render(w) + return + } + + httpjson.Render(w, httpjson.DefaultResponse, httpjson.JSON) +} + +func (h DeleteHandler) handle(ctx context.Context, in deleteRequest) error { + // Check if deleteRequest StellarAddress value is present. + if in.StellarAddress == "" { + return httperror.NewHTTPError(http.StatusBadRequest, "Missing stellar address.") + } + + var existed bool + const q = ` + WITH deleted_rows AS ( + DELETE FROM accounts_kyc_status + WHERE stellar_address = $1 + RETURNING * + ) SELECT EXISTS ( + SELECT * FROM deleted_rows + ) + ` + err := h.DB.QueryRowContext(ctx, q, in.StellarAddress).Scan(&existed) + if err != nil { + return errors.Wrap(err, "querying the database") + } + if !existed { + return httperror.NewHTTPError(http.StatusNotFound, "Not found.") + } + + return nil +} diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler_test.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler_test.go new file mode 100644 index 0000000000..1e7bd3cfad --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/delete_handler_test.go @@ -0,0 +1,87 @@ +package kycstatus + +import ( + "context" + "net/http" + "testing" + + "github.com/google/uuid" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stretchr/testify/require" +) + +func TestDeleteHandler_validate(t *testing.T) { + // database is nil + h := DeleteHandler{} + err := h.validate() + require.EqualError(t, err, "database cannot be nil") + + // success + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + h = DeleteHandler{DB: conn} + err = h.validate() + require.NoError(t, err) +} + +func TestDeleteHandler_handle_errors(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + h := DeleteHandler{DB: conn} + + // returns "400 - Missing stellar address." if no stellar address is provided + in := deleteRequest{} + err := h.handle(ctx, in) + require.Equal(t, httperror.NewHTTPError(http.StatusBadRequest, "Missing stellar address."), err) + + // returns "404 - Not found." if the provided address could not be found + accountKP := keypair.MustRandom() + in = deleteRequest{StellarAddress: accountKP.Address()} + err = h.handle(ctx, in) + require.Equal(t, httperror.NewHTTPError(http.StatusNotFound, "Not found."), err) +} + +func TestDeleteHandler_handle_success(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + h := DeleteHandler{DB: conn} + + // tests if the delete handler is really deleting a row from the database + q := ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id, email_address, kyc_submitted_at, approved_at, rejected_at, pending_at) + VALUES ($1, $2, $3, NOW(), NOW(), NULL, NULL) + ` + accountKP := keypair.MustRandom() + callbackID := uuid.New().String() + emailAddress := "email@approved.com" + _, err := h.DB.ExecContext(ctx, q, accountKP.Address(), callbackID, emailAddress) + require.NoError(t, err) + + in := deleteRequest{StellarAddress: accountKP.Address()} + err = h.handle(ctx, in) + require.NoError(t, err) + + q = ` + SELECT EXISTS( + SELECT stellar_address + FROM accounts_kyc_status + WHERE stellar_address = $1 + ) + ` + var exists bool + err = h.DB.QueryRowContext(ctx, q, accountKP.Address()).Scan(&exists) + require.NoError(t, err) + require.False(t, exists) +} diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler.go new file mode 100644 index 0000000000..ff3accc8fc --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler.go @@ -0,0 +1,122 @@ +package kycstatus + +import ( + "context" + "database/sql" + "net/http" + "time" + + "github.com/jmoiron/sqlx" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type kycGetResponse struct { + StellarAddress string `json:"stellar_address"` + CallbackID string `json:"callback_id"` + EmailAddress string `json:"email_address,omitempty"` + CreatedAt *time.Time `json:"created_at"` + KYCSubmittedAt *time.Time `json:"kyc_submitted_at,omitempty"` + ApprovedAt *time.Time `json:"approved_at,omitempty"` + RejectedAt *time.Time `json:"rejected_at,omitempty"` + PendingAt *time.Time `json:"pending_at,omitempty"` +} + +func (k *kycGetResponse) Render(w http.ResponseWriter) { + httpjson.Render(w, k, httpjson.JSON) +} + +type GetDetailHandler struct { + DB *sqlx.DB +} + +func (h GetDetailHandler) validate() error { + if h.DB == nil { + return errors.New("database cannot be nil") + } + return nil +} + +type getDetailRequest struct { + StellarAddressOrCallbackID string `path:"stellar_address_or_callback_id"` +} + +func (h GetDetailHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + err := h.validate() + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating kyc-status GetDetailHandler")) + httperror.InternalServer.Render(w) + return + } + + in := getDetailRequest{} + err = httpdecode.Decode(r, &in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "decoding kyc-status GET Request")) + httperror.BadRequest.Render(w) + return + } + + kycGetResponse, err := h.handle(ctx, in) + if err != nil { + httpErr, ok := err.(*httperror.Error) + if !ok { + httpErr = httperror.InternalServer + } + httpErr.Render(w) + return + } + + kycGetResponse.Render(w) +} + +func (h GetDetailHandler) handle(ctx context.Context, in getDetailRequest) (*kycGetResponse, error) { + // Check if getDetailRequest StellarAddressOrCallbackID value is present. + if in.StellarAddressOrCallbackID == "" { + return nil, httperror.NewHTTPError(http.StatusBadRequest, "Missing stellar address or callbackID.") + } + + // Prepare SELECT query return values. + var ( + stellarAddress, callbackID string + emailAddress sql.NullString + createdAt time.Time + kycSubmittedAt, approvedAt, rejectedAt, pendingAt sql.NullTime + ) + const q = ` + SELECT stellar_address, email_address, created_at, kyc_submitted_at, approved_at, rejected_at, pending_at, callback_id + FROM accounts_kyc_status + WHERE stellar_address = $1 OR callback_id = $1 + ` + err := h.DB.QueryRowContext(ctx, q, in.StellarAddressOrCallbackID).Scan(&stellarAddress, &emailAddress, &createdAt, &kycSubmittedAt, &approvedAt, &rejectedAt, &pendingAt, &callbackID) + if err == sql.ErrNoRows { + return nil, httperror.NewHTTPError(http.StatusNotFound, "Not found.") + } + if err != nil { + return nil, errors.Wrap(err, "querying the database") + } + + return &kycGetResponse{ + StellarAddress: stellarAddress, + CallbackID: callbackID, + EmailAddress: emailAddress.String, + CreatedAt: &createdAt, + KYCSubmittedAt: timePointerIfValid(kycSubmittedAt), + ApprovedAt: timePointerIfValid(approvedAt), + RejectedAt: timePointerIfValid(rejectedAt), + PendingAt: timePointerIfValid(pendingAt), + }, nil +} + +// timePointerIfValid returns a pointer to the date from the provided +// `sql.NullTime` if it's valid or `nil` if it's not. +func timePointerIfValid(nt sql.NullTime) *time.Time { + if nt.Valid { + return &nt.Time + } + return nil +} diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler_test.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler_test.go new file mode 100644 index 0000000000..f735a0b8a8 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/get_detail_handler_test.go @@ -0,0 +1,159 @@ +package kycstatus + +import ( + "context" + "database/sql" + "net/http" + "testing" + "time" + + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetDetailHandler_validate(t *testing.T) { + // database is nil + h := GetDetailHandler{} + err := h.validate() + require.EqualError(t, err, "database cannot be nil") + + // success + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + h = GetDetailHandler{DB: conn} + err = h.validate() + require.NoError(t, err) +} + +func TestTimePointerIfValid(t *testing.T) { + // invalid sql.NullTime should return nil + sqlNullTime := sql.NullTime{} + timePointer := timePointerIfValid(sqlNullTime) + require.Nil(t, timePointer) + + // a valid sql.NullTime should return a time.Time pointer + desiredTime := time.Now() + sqlNullTime = sql.NullTime{ + Valid: true, + Time: desiredTime, + } + timePointer = timePointerIfValid(sqlNullTime) + require.Equal(t, &desiredTime, timePointer) +} + +func TestGetDetailHandler_handle_error(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + handler := GetDetailHandler{DB: conn} + + // empty parameter will trigger a "400 - Missing stellar address or callbackID." + in := getDetailRequest{} + kycGetResp, err := handler.handle(ctx, in) + assert.Nil(t, kycGetResp) + require.Equal(t, httperror.NewHTTPError(http.StatusBadRequest, "Missing stellar address or callbackID."), err) + + // nonexistent row will trigger a "404 - Not found.". + in = getDetailRequest{StellarAddressOrCallbackID: "nonexistent"} + kycGetResp, err = handler.handle(ctx, in) + assert.Nil(t, kycGetResp) + require.Equal(t, httperror.NewHTTPError(http.StatusNotFound, "Not found."), err) +} + +func TestGetDetailHandler_handle_success(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + handler := GetDetailHandler{DB: conn} + + // step 1: insert test data into database + const q = ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id, email_address, kyc_submitted_at, approved_at, pending_at, rejected_at, created_at) + VALUES + ('rejected-address', 'rejected-callback-id', 'xrejected@test.com', $1::timestamptz, NULL, NULL, $1::timestamptz, $4::timestamptz), + ('pending-address', 'pending-callback-id', 'ypending@test.com', $2::timestamptz, NULL, $2::timestamptz, NULL, $4::timestamptz), + ('approved-address', 'approved-callback-id', 'approved@test.com', $3::timestamptz, $3::timestamptz, NULL, NULL, $4::timestamptz) + ` + rejectedAt := time.Now().Add(-2 * time.Hour).UTC().Truncate(time.Second) + pendingAt := time.Now().Add(-1 * time.Hour).UTC().Truncate(time.Second) + approvedAt := time.Now().UTC().Truncate(time.Second) + createdAt := time.Now().UTC().Truncate(time.Second) + _, err := handler.DB.ExecContext(ctx, q, rejectedAt.Format(time.RFC3339), pendingAt.Format(time.RFC3339), approvedAt.Format(time.RFC3339), createdAt.Format(time.RFC3339)) + require.NoError(t, err) + + // step 2.1: retrieve "rejected" entry with stellar address + in := getDetailRequest{StellarAddressOrCallbackID: "rejected-address"} + kycGetResp, err := handler.handle(ctx, in) + require.NoError(t, err) + wantKYCGetResponse := kycGetResponse{ + StellarAddress: "rejected-address", + CallbackID: "rejected-callback-id", + EmailAddress: "xrejected@test.com", + CreatedAt: &createdAt, + KYCSubmittedAt: &rejectedAt, + RejectedAt: &rejectedAt, + PendingAt: nil, + ApprovedAt: nil, + } + assert.Equal(t, &wantKYCGetResponse, kycGetResp) + + // step 2.2: retrieve "rejected" entry with callbackID + in = getDetailRequest{StellarAddressOrCallbackID: "rejected-callback-id"} + kycGetResp, err = handler.handle(ctx, in) + require.NoError(t, err) + assert.Equal(t, &wantKYCGetResponse, kycGetResp) + + // step 3.1: retrieve "pending" entry with stellar address + in = getDetailRequest{StellarAddressOrCallbackID: "pending-address"} + kycGetResp, err = handler.handle(ctx, in) + require.NoError(t, err) + wantKYCGetResponse = kycGetResponse{ + StellarAddress: "pending-address", + CallbackID: "pending-callback-id", + EmailAddress: "ypending@test.com", + CreatedAt: &createdAt, + KYCSubmittedAt: &pendingAt, + RejectedAt: nil, + PendingAt: &pendingAt, + ApprovedAt: nil, + } + assert.Equal(t, &wantKYCGetResponse, kycGetResp) + + // step 3.2: retrieve "pending" entry with callbackID + in = getDetailRequest{StellarAddressOrCallbackID: "pending-callback-id"} + kycGetResp, err = handler.handle(ctx, in) + require.NoError(t, err) + assert.Equal(t, &wantKYCGetResponse, kycGetResp) + + // step 4.1: retrieve "approved" entry with stellar address + in = getDetailRequest{StellarAddressOrCallbackID: "approved-address"} + kycGetResp, err = handler.handle(ctx, in) + require.NoError(t, err) + wantKYCGetResponse = kycGetResponse{ + StellarAddress: "approved-address", + CallbackID: "approved-callback-id", + EmailAddress: "approved@test.com", + CreatedAt: &createdAt, + KYCSubmittedAt: &approvedAt, + RejectedAt: nil, + PendingAt: nil, + ApprovedAt: &approvedAt, + } + assert.Equal(t, &wantKYCGetResponse, kycGetResp) + + // step 4.2: retrieve "approved" entry with callbackID + in = getDetailRequest{StellarAddressOrCallbackID: "approved-callback-id"} + kycGetResp, err = handler.handle(ctx, in) + require.NoError(t, err) + assert.Equal(t, &wantKYCGetResponse, kycGetResp) +} diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler.go new file mode 100644 index 0000000000..6695f886df --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler.go @@ -0,0 +1,158 @@ +package kycstatus + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/httpjson" +) + +type kycPostRequest struct { + CallbackID string `path:"callback_id"` + EmailAddress string `json:"email_address"` +} + +type kycPostResponse struct { + Result string `json:"result"` + StatusCode int `json:"-"` +} + +func (k *kycPostResponse) Render(w http.ResponseWriter) { + httpjson.RenderStatus(w, k.StatusCode, k, httpjson.JSON) +} + +func NewKYCStatusPostResponse() *kycPostResponse { + return &kycPostResponse{ + Result: "no_further_action_required", + StatusCode: http.StatusOK, + } +} + +type PostHandler struct { + DB *sqlx.DB +} + +func (h PostHandler) validate() error { + if h.DB == nil { + return errors.New("database cannot be nil") + } + return nil +} + +func (h PostHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + err := h.validate() + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating kyc-status PostHandler")) + httperror.InternalServer.Render(w) + return + } + + in := kycPostRequest{} + err = httpdecode.Decode(r, &in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "decoding kyc-status POST Request")) + httperror.BadRequest.Render(w) + return + } + + kycResponse, err := h.handle(ctx, in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating the input POST request for kyc-status")) + httpErr, ok := err.(*httperror.Error) + if !ok { + httpErr = httperror.InternalServer + } + httpErr.Render(w) + return + } + + kycResponse.Render(w) +} + +func (h PostHandler) handle(ctx context.Context, in kycPostRequest) (*kycPostResponse, error) { + // Check if kycPostRequest values are present or not malformed. + if in.CallbackID == "" { + return nil, httperror.NewHTTPError(http.StatusBadRequest, "Missing callbackID.") + } + if in.EmailAddress == "" { + return nil, httperror.NewHTTPError(http.StatusBadRequest, "Missing email_address.") + } + if !RxEmail.MatchString(in.EmailAddress) { + return nil, httperror.NewHTTPError(http.StatusBadRequest, "The provided email_address is invalid.") + } + + var exists bool + query, args := in.buildUpdateKYCQuery() + err := h.DB.QueryRowContext(ctx, query, args...).Scan(&exists) + if err != nil { + return nil, errors.Wrap(err, "querying the database") + } + if !exists { + return nil, httperror.NewHTTPError(http.StatusNotFound, "Not found.") + } + + return NewKYCStatusPostResponse(), nil +} + +// isKYCRuleRespected validates if KYC data is rejected. As an arbitrary rule, +// emails starting with "x" are rejected. +func (in kycPostRequest) isKYCRejected() bool { + return strings.HasPrefix(strings.ToLower(in.EmailAddress), "x") +} + +// isKYCRuleRespected validates if KYC data is pending. As an arbitrary rule, +// emails starting with "y" are marked as pending. +func (in kycPostRequest) isKYCPending() bool { + return strings.HasPrefix(strings.ToLower(in.EmailAddress), "y") +} + +// buildUpdateKYCQuery builds a query that will approve or reject stellar account from accounts_kyc_status table. +// Afterwards the query should return an exists boolean if present. +func (in kycPostRequest) buildUpdateKYCQuery() (string, []interface{}) { + var ( + query strings.Builder + args []interface{} + ) + query.WriteString("WITH updated_row AS (") + query.WriteString("UPDATE accounts_kyc_status ") + query.WriteString("SET kyc_submitted_at = NOW(), ") + + args = append(args, in.EmailAddress) + query.WriteString(fmt.Sprintf("email_address = $%d, ", len(args))) + + // update KYC status to rejected, pending or approved + if in.isKYCRejected() { + query.WriteString("rejected_at = NOW(), pending_at = NULL, approved_at = NULL ") + } else if in.isKYCPending() { + query.WriteString("rejected_at = NULL, pending_at = NOW(), approved_at = NULL ") + } else { + query.WriteString("rejected_at = NULL, pending_at = NULL, approved_at = NOW() ") + } + + args = append(args, in.CallbackID) + query.WriteString(fmt.Sprintf("WHERE callback_id = $%d ", len(args))) + + query.WriteString("RETURNING * ") + query.WriteString(")") + query.WriteString(` + SELECT EXISTS( + SELECT * FROM updated_row + ) + `) + + return query.String(), args +} + +// RxEmail is a regex used to validate e-mail addresses, according with the reference https://www.alexedwards.net/blog/validation-snippets-for-go#email-validation. +// It's free to use under the [MIT Licence](https://opensource.org/licenses/MIT) +var RxEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") diff --git a/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler_test.go b/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler_test.go new file mode 100644 index 0000000000..51e421816d --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/kycstatus/post_handler_test.go @@ -0,0 +1,230 @@ +package kycstatus + +import ( + "context" + "database/sql" + "net/http" + "testing" + + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPostHandler_validate(t *testing.T) { + // database is nil + h := PostHandler{} + err := h.validate() + require.EqualError(t, err, "database cannot be nil") + + // success + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + h = PostHandler{ + DB: conn, + } + err = h.validate() + require.NoError(t, err) +} + +func TestIsKYCRejected(t *testing.T) { + in := kycPostRequest{ + EmailAddress: "test@email.com", + } + isRejected := in.isKYCRejected() + assert.False(t, isRejected) + + // emails starting with "x" should be rejected + in = kycPostRequest{ + EmailAddress: "xtest@email.com", + } + isRejected = in.isKYCRejected() + assert.True(t, isRejected) +} + +func TestIsKYCPending(t *testing.T) { + in := kycPostRequest{ + EmailAddress: "test@email.com", + } + isPending := in.isKYCPending() + assert.False(t, isPending) + + // emails starting with "y" should be marked as pending + in = kycPostRequest{ + EmailAddress: "ytest@email.com", + } + isPending = in.isKYCPending() + assert.True(t, isPending) +} + +func TestBuildUpdateKYCQuery(t *testing.T) { + // test rejected query + in := kycPostRequest{ + CallbackID: "9999999999-9999", + EmailAddress: "xtest@email.com", + } + query, args := in.buildUpdateKYCQuery() + expectedQuery := "WITH updated_row AS (UPDATE accounts_kyc_status SET kyc_submitted_at = NOW(), email_address = $1, rejected_at = NOW(), pending_at = NULL, approved_at = NULL WHERE callback_id = $2 RETURNING * )\n\t\tSELECT EXISTS(\n\t\t\tSELECT * FROM updated_row\n\t\t)\n\t" + expectedArgs := []interface{}{in.EmailAddress, in.CallbackID} + require.Equal(t, expectedQuery, query) + require.Equal(t, expectedArgs, args) + + // test pending query + in = kycPostRequest{ + CallbackID: "1234567890-12345", + EmailAddress: "ytest@email.com", + } + query, args = in.buildUpdateKYCQuery() + expectedQuery = "WITH updated_row AS (UPDATE accounts_kyc_status SET kyc_submitted_at = NOW(), email_address = $1, rejected_at = NULL, pending_at = NOW(), approved_at = NULL WHERE callback_id = $2 RETURNING * )\n\t\tSELECT EXISTS(\n\t\t\tSELECT * FROM updated_row\n\t\t)\n\t" + expectedArgs = []interface{}{in.EmailAddress, in.CallbackID} + require.Equal(t, expectedQuery, query) + require.Equal(t, expectedArgs, args) + + // test approved query + in = kycPostRequest{ + CallbackID: "1234567890-12345", + EmailAddress: "test@email.com", + } + query, args = in.buildUpdateKYCQuery() + expectedQuery = "WITH updated_row AS (UPDATE accounts_kyc_status SET kyc_submitted_at = NOW(), email_address = $1, rejected_at = NULL, pending_at = NULL, approved_at = NOW() WHERE callback_id = $2 RETURNING * )\n\t\tSELECT EXISTS(\n\t\t\tSELECT * FROM updated_row\n\t\t)\n\t" + expectedArgs = []interface{}{in.EmailAddress, in.CallbackID} + require.Equal(t, expectedQuery, query) + require.Equal(t, expectedArgs, args) +} + +func TestPostHandler_handle_error(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + handler := PostHandler{DB: conn} + + // missing callbackID + in := kycPostRequest{} + kycPostResp, err := handler.handle(ctx, in) + require.Nil(t, kycPostResp) + require.Equal(t, httperror.NewHTTPError(http.StatusBadRequest, "Missing callbackID."), err) + + // missing email_address + in = kycPostRequest{ + CallbackID: "random-callback-id", + } + kycPostResp, err = handler.handle(ctx, in) + require.Nil(t, kycPostResp) + require.Equal(t, httperror.NewHTTPError(http.StatusBadRequest, "Missing email_address."), err) + + // invalid email_address + in = kycPostRequest{ + CallbackID: "random-callback-id", + EmailAddress: "invalidemail", + } + kycPostResp, err = handler.handle(ctx, in) + require.Nil(t, kycPostResp) + require.Equal(t, httperror.NewHTTPError(http.StatusBadRequest, "The provided email_address is invalid."), err) + + // no entry found for the given callbackID + in = kycPostRequest{ + CallbackID: "random-callback-id", + EmailAddress: "email@test.com", + } + kycPostResp, err = handler.handle(ctx, in) + require.Nil(t, kycPostResp) + require.Equal(t, httperror.NewHTTPError(http.StatusNotFound, "Not found."), err) +} + +func TestPostHandler_handle_success(t *testing.T) { + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + ctx := context.Background() + + handler := PostHandler{DB: conn} + + rejectedCallbackID := "rejected-callback-id" + pendingCallbackID := "pending-callback-id" + approvedCallbackID := "approved-callback-id" + q := ` + INSERT INTO accounts_kyc_status (stellar_address, callback_id) + VALUES + ('rejected-address', $1), + ('pending-address', $2), + ('approved-address', $3) + ` + + _, err := conn.DB.ExecContext(ctx, q, rejectedCallbackID, pendingCallbackID, approvedCallbackID) + require.NoError(t, err) + + // should be rejected as email starts with "x" + in := kycPostRequest{ + CallbackID: rejectedCallbackID, + EmailAddress: "xemail@test.com", + } + kycPostResp, err := handler.handle(ctx, in) + assert.NoError(t, err) + require.Equal(t, NewKYCStatusPostResponse(), kycPostResp) + + var rejectedAt, pendingAt, approvedAt sql.NullTime + q = ` + SELECT rejected_at, pending_at, approved_at + FROM accounts_kyc_status + WHERE callback_id = $1 + ` + err = conn.DB.QueryRowContext(ctx, q, rejectedCallbackID).Scan(&rejectedAt, &pendingAt, &approvedAt) + require.NoError(t, err) + + assert.False(t, pendingAt.Valid) + assert.False(t, approvedAt.Valid) + require.True(t, rejectedAt.Valid) + + // should be marked as pending as email starts with "y" + in = kycPostRequest{ + CallbackID: pendingCallbackID, + EmailAddress: "yemail@test.com", + } + kycPostResp, err = handler.handle(ctx, in) + assert.NoError(t, err) + require.Equal(t, NewKYCStatusPostResponse(), kycPostResp) + + err = conn.DB.QueryRowContext(ctx, q, pendingCallbackID).Scan(&rejectedAt, &pendingAt, &approvedAt) + require.NoError(t, err) + + assert.False(t, rejectedAt.Valid) + assert.False(t, approvedAt.Valid) + require.True(t, pendingAt.Valid) + + // should be approved as email doesn't start with "x" nor "y" + in = kycPostRequest{ + CallbackID: pendingCallbackID, + EmailAddress: "email@test.com", + } + kycPostResp, err = handler.handle(ctx, in) + assert.NoError(t, err) + require.Equal(t, NewKYCStatusPostResponse(), kycPostResp) + + err = conn.DB.QueryRowContext(ctx, q, pendingCallbackID).Scan(&rejectedAt, &pendingAt, &approvedAt) + require.NoError(t, err) + + assert.False(t, rejectedAt.Valid) + assert.False(t, pendingAt.Valid) + require.True(t, approvedAt.Valid) +} + +func TestRxEmail(t *testing.T) { + // Test empty email string. + assert.NotRegexp(t, RxEmail, "") + + // Test empty prefix. + assert.NotRegexp(t, RxEmail, "email.com") + + // Test only domain given. + assert.NotRegexp(t, RxEmail, "@email.com") + + // Test correct email. + assert.Regexp(t, RxEmail, "t@email.com") +} diff --git a/services/regulated-assets-approval-server/internal/serve/middleware.go b/services/regulated-assets-approval-server/internal/serve/middleware.go new file mode 100644 index 0000000000..ff3001b77d --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/middleware.go @@ -0,0 +1,16 @@ +package serve + +import ( + "net/http" + + "github.com/rs/cors" +) + +func corsHandler(next http.Handler) http.Handler { + cors := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"*"}, + AllowedMethods: []string{"GET", "PUT", "POST", "PATCH", "DELETE", "HEAD", "OPTIONS"}, + }) + return cors.Handler(next) +} diff --git a/services/regulated-assets-approval-server/internal/serve/serve.go b/services/regulated-assets-approval-server/internal/serve/serve.go new file mode 100644 index 0000000000..1cd283441f --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/serve.go @@ -0,0 +1,135 @@ +package serve + +import ( + "fmt" + "net/http" + "net/url" + "path" + "time" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/stellar/go/amount" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/kycstatus" + "github.com/stellar/go/support/errors" + supporthttp "github.com/stellar/go/support/http" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/health" +) + +type Options struct { + AssetCode string + BaseURL string + DatabaseURL string + FriendbotPaymentAmount int + HorizonURL string + IssuerAccountSecret string + KYCRequiredPaymentAmountThreshold string + NetworkPassphrase string + Port int +} + +func Serve(opts Options) { + listenAddr := fmt.Sprintf(":%d", opts.Port) + serverConfig := supporthttp.Config{ + ListenAddr: listenAddr, + Handler: handleHTTP(opts), + TCPKeepAlive: time.Minute * 3, + ShutdownGracePeriod: time.Second * 50, + ReadTimeout: time.Second * 5, + WriteTimeout: time.Second * 35, + IdleTimeout: time.Minute * 2, + OnStarting: func() { + log.Info("Starting SEP-8 Approval Server") + log.Infof("Listening on %s", listenAddr) + }, + OnStopping: func() { + log.Info("Stopping SEP-8 Approval Server") + }, + } + supporthttp.Run(serverConfig) +} + +func handleHTTP(opts Options) http.Handler { + issuerKP, err := keypair.ParseFull(opts.IssuerAccountSecret) + if err != nil { + log.Fatal(errors.Wrap(err, "parsing secret")) + } + parsedKYCRequiredPaymentThreshold, err := amount.ParseInt64(opts.KYCRequiredPaymentAmountThreshold) + if err != nil { + log.Fatal(errors.Wrapf(err, "%s cannot be parsed as a Stellar amount", opts.KYCRequiredPaymentAmountThreshold)) + } + db, err := db.Open(opts.DatabaseURL) + if err != nil { + log.Fatal(errors.Wrap(err, "error parsing database url")) + } + db.SetMaxOpenConns(20) + err = db.Ping() + if err != nil { + log.Warn("Error pinging to Database: ", err) + } + mux := chi.NewMux() + + mux.Use(middleware.RequestID) + mux.Use(middleware.RealIP) + mux.Use(supporthttp.LoggingMiddleware) + mux.Use(corsHandler) + + mux.Get("/health", health.PassHandler{}.ServeHTTP) + mux.Get("/.well-known/stellar.toml", stellarTOMLHandler{ + assetCode: opts.AssetCode, + issuerAddress: issuerKP.Address(), + networkPassphrase: opts.NetworkPassphrase, + approvalServer: buildURLString(opts.BaseURL, "tx-approve"), + kycThreshold: parsedKYCRequiredPaymentThreshold, + }.ServeHTTP) + mux.Get("/friendbot", friendbotHandler{ + assetCode: opts.AssetCode, + issuerAccountSecret: opts.IssuerAccountSecret, + horizonClient: opts.horizonClient(), + horizonURL: opts.HorizonURL, + networkPassphrase: opts.NetworkPassphrase, + paymentAmount: opts.FriendbotPaymentAmount, + }.ServeHTTP) + mux.Post("/tx-approve", txApproveHandler{ + assetCode: opts.AssetCode, + issuerKP: issuerKP, + horizonClient: opts.horizonClient(), + networkPassphrase: opts.NetworkPassphrase, + db: db, + kycThreshold: parsedKYCRequiredPaymentThreshold, + baseURL: opts.BaseURL, + }.ServeHTTP) + mux.Route("/kyc-status", func(mux chi.Router) { + mux.Post("/{callback_id}", kycstatus.PostHandler{ + DB: db, + }.ServeHTTP) + mux.Get("/{stellar_address_or_callback_id}", kycstatus.GetDetailHandler{ + DB: db, + }.ServeHTTP) + mux.Delete("/{stellar_address}", kycstatus.DeleteHandler{ + DB: db, + }.ServeHTTP) + }) + + return mux +} + +func (opts Options) horizonClient() horizonclient.ClientInterface { + return &horizonclient.Client{ + HorizonURL: opts.HorizonURL, + HTTP: &http.Client{Timeout: 30 * time.Second}, + } +} + +func buildURLString(baseURL, endpoint string) string { + URL, err := url.Parse(baseURL) + if err != nil { + log.Fatal(errors.Wrapf(err, "Unable to parse URL: %s", baseURL)) + } + URL.Path = path.Join(URL.Path, endpoint) + return URL.String() +} diff --git a/services/regulated-assets-approval-server/internal/serve/serve_test.go b/services/regulated-assets-approval-server/internal/serve/serve_test.go new file mode 100644 index 0000000000..5a4b254602 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/serve_test.go @@ -0,0 +1,23 @@ +package serve + +import ( + "net/http" + "testing" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stretchr/testify/require" +) + +func TestHorizonClient(t *testing.T) { + opts := Options{HorizonURL: "my-horizon.domain.com"} + horizonClientInterface := opts.horizonClient() + + horizonClient, ok := horizonClientInterface.(*horizonclient.Client) + require.True(t, ok) + require.Equal(t, "my-horizon.domain.com", horizonClient.HorizonURL) + + httpClient, ok := horizonClient.HTTP.(*http.Client) + require.True(t, ok) + require.Equal(t, http.Client{Timeout: 30 * time.Second}, *httpClient) +} diff --git a/services/regulated-assets-approval-server/internal/serve/toml.go b/services/regulated-assets-approval-server/internal/serve/toml.go new file mode 100644 index 0000000000..8f2914c7aa --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/toml.go @@ -0,0 +1,75 @@ +package serve + +import ( + "fmt" + "net/http" + + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +type stellarTOMLHandler struct { + assetCode string + approvalServer string + issuerAddress string + networkPassphrase string + kycThreshold int64 +} + +func (h stellarTOMLHandler) validate() error { + if h.networkPassphrase == "" { + return errors.New("network passphrase cannot be empty") + } + + if h.assetCode == "" { + return errors.New("asset code cannot be empty") + } + + if h.issuerAddress == "" { + return errors.New("asset issuer address cannot be empty") + } + + if !strkey.IsValidEd25519PublicKey(h.issuerAddress) { + return errors.New("asset issuer address is not a valid public key") + } + + if h.approvalServer == "" { + return errors.New("approval server cannot be empty") + } + + if h.kycThreshold <= 0 { + return errors.New("kyc threshold cannot be less than or equal to zero") + } + + return nil +} + +func (h stellarTOMLHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + err := h.validate() + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating tomlHandler")) + httperror.InternalServer.Render(rw) + return + } + + // Convert kycThreshold value to human readable string; from amount package's int64 5000000000 to 500.00. + kycThreshold, err := convertAmountToReadableString(h.kycThreshold) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "converting kycThreshold value to human readable string")) + httperror.InternalServer.Render(rw) + return + } + + // Generate toml content. + fmt.Fprintf(rw, "NETWORK_PASSPHRASE=%q\n", h.networkPassphrase) + fmt.Fprintf(rw, "[[CURRENCIES]]\n") + fmt.Fprintf(rw, "code=%q\n", h.assetCode) + fmt.Fprintf(rw, "issuer=%q\n", h.issuerAddress) + fmt.Fprintf(rw, "regulated=true\n") + fmt.Fprintf(rw, "approval_server=%q\n", h.approvalServer) + fmt.Fprintf(rw, "approval_criteria=\"The approval server currently only accepts payments. The transaction must have exactly one operation of type payment. If the payment amount exceeds %s %s it will need KYC approval if the account hasn’t been previously approved.\"", kycThreshold, h.assetCode) +} diff --git a/services/regulated-assets-approval-server/internal/serve/toml_test.go b/services/regulated-assets-approval-server/internal/serve/toml_test.go new file mode 100644 index 0000000000..db15e8f092 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/toml_test.go @@ -0,0 +1,119 @@ +package serve + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/go-chi/chi" + "github.com/stellar/go/network" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTomlHandler_validate(t *testing.T) { + // empty network passphrase + h := stellarTOMLHandler{} + err := h.validate() + require.EqualError(t, err, "network passphrase cannot be empty") + + // empty asset code + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + } + err = h.validate() + require.EqualError(t, err, "asset code cannot be empty") + + // empty asset issuer address + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + } + err = h.validate() + require.EqualError(t, err, "asset issuer address cannot be empty") + + // invalid asset issuer address + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + issuerAddress: "foobar", + } + err = h.validate() + require.EqualError(t, err, "asset issuer address is not a valid public key") + + // empty approval server + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + issuerAddress: "GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM", + } + err = h.validate() + require.EqualError(t, err, "approval server cannot be empty") + + // empty kyc threshold + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + issuerAddress: "GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM", + approvalServer: "localhost:8000/tx-approve", + } + err = h.validate() + require.EqualError(t, err, "kyc threshold cannot be less than or equal to zero") + + // negative kyc threshold + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + issuerAddress: "GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM", + approvalServer: "localhost:8000/tx-approve", + kycThreshold: -500, + } + err = h.validate() + require.EqualError(t, err, "kyc threshold cannot be less than or equal to zero") + + // success + h = stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOOBAR", + issuerAddress: "GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM", + approvalServer: "localhost:8000/tx-approve", + kycThreshold: 500, + } + err = h.validate() + require.NoError(t, err) +} + +func TestTomlHandler_ServeHTTP(t *testing.T) { + mux := chi.NewMux() + mux.Get("/.well-known/stellar.toml", stellarTOMLHandler{ + networkPassphrase: network.TestNetworkPassphrase, + assetCode: "FOO", + issuerAddress: "GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM", + approvalServer: "localhost:8000/tx-approve", + kycThreshold: 5000000000, + }.ServeHTTP) + + ctx := context.Background() + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/.well-known/stellar.toml", nil) + r = r.WithContext(ctx) + mux.ServeHTTP(w, r) + + resp := w.Result() + assert.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "text/plain; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `NETWORK_PASSPHRASE="` + network.TestNetworkPassphrase + `" +[[CURRENCIES]] +code="FOO" +issuer="GCVDOU4YHHXGM3QYVSDHPQIFMZKXTFSIYO4HJOJZOTR7GURVQO6IQ5HM" +regulated=true +approval_server="localhost:8000/tx-approve" +approval_criteria="The approval server currently only accepts payments. The transaction must have exactly one operation of type payment. If the payment amount exceeds 500.00 FOO it will need KYC approval if the account hasn’t been previously approved."` + require.Equal(t, wantBody, string(body)) +} diff --git a/services/regulated-assets-approval-server/internal/serve/tx_approve.go b/services/regulated-assets-approval-server/internal/serve/tx_approve.go new file mode 100644 index 0000000000..47b82ae83c --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/tx_approve.go @@ -0,0 +1,439 @@ +package serve + +import ( + "context" + "database/sql" + "fmt" + "net/http" + "strconv" + + "github.com/google/uuid" + "github.com/jmoiron/sqlx" + "github.com/stellar/go/amount" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/serve/httperror" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/log" + "github.com/stellar/go/txnbuild" +) + +type txApproveHandler struct { + issuerKP *keypair.Full + assetCode string + horizonClient horizonclient.ClientInterface + networkPassphrase string + db *sqlx.DB + kycThreshold int64 + baseURL string +} + +type txApproveRequest struct { + Tx string `json:"tx" form:"tx"` +} + +// validate performs some validations on the provided handler data. +func (h txApproveHandler) validate() error { + if h.issuerKP == nil { + return errors.New("issuer keypair cannot be nil") + } + if h.assetCode == "" { + return errors.New("asset code cannot be empty") + } + if h.horizonClient == nil { + return errors.New("horizon client cannot be nil") + } + if h.networkPassphrase == "" { + return errors.New("network passphrase cannot be empty") + } + if h.db == nil { + return errors.New("database cannot be nil") + } + if h.kycThreshold <= 0 { + return errors.New("kyc threshold cannot be less than or equal to zero") + } + if h.baseURL == "" { + return errors.New("base url cannot be empty") + } + return nil +} + +func (h txApproveHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + err := h.validate() + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating txApproveHandler")) + httperror.InternalServer.Render(w) + return + } + + in := txApproveRequest{} + err = httpdecode.Decode(r, &in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "decoding txApproveRequest")) + httperror.BadRequest.Render(w) + return + } + + txApproveResp, err := h.txApprove(ctx, in) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "validating the input transaction for approval")) + httperror.InternalServer.Render(w) + return + } + + txApproveResp.Render(w) +} + +// validateInput validates if the input parameters contain a valid transaction +// and if the source account is not set in a way that would harm the issuer. +func (h txApproveHandler) validateInput(ctx context.Context, in txApproveRequest) (*txApprovalResponse, *txnbuild.Transaction) { + if in.Tx == "" { + log.Ctx(ctx).Error(`request is missing parameter "tx".`) + return NewRejectedTxApprovalResponse(`Missing parameter "tx".`), nil + } + + genericTx, err := txnbuild.TransactionFromXDR(in.Tx) + if err != nil { + log.Ctx(ctx).Error(errors.Wrap(err, "parsing transaction xdr")) + return NewRejectedTxApprovalResponse(`Invalid parameter "tx".`), nil + } + + tx, ok := genericTx.Transaction() + if !ok { + log.Ctx(ctx).Error(`invalid parameter "tx", generic transaction not given.`) + return NewRejectedTxApprovalResponse(`Invalid parameter "tx".`), nil + } + + if tx.SourceAccount().AccountID == h.issuerKP.Address() { + log.Ctx(ctx).Errorf("transaction sourceAccount is the same as the server issuer account %s", h.issuerKP.Address()) + return NewRejectedTxApprovalResponse("Transaction source account is invalid."), nil + } + + // only AllowTrust operations can have the issuer as their source account + for _, op := range tx.Operations() { + if _, ok := op.(*txnbuild.AllowTrust); ok { + continue + } + + if op.GetSourceAccount() == h.issuerKP.Address() { + log.Ctx(ctx).Error("transaction contains one or more unauthorized operations where source account is the issuer account") + return NewRejectedTxApprovalResponse("There are one or more unauthorized operations in the provided transaction."), nil + } + } + + return nil, tx +} + +// txApprove is called to validate the input transaction. +func (h txApproveHandler) txApprove(ctx context.Context, in txApproveRequest) (resp *txApprovalResponse, err error) { + defer func() { + log.Ctx(ctx).Debug("==== will log responses ====") + log.Ctx(ctx).Debugf("req: %+v", in) + log.Ctx(ctx).Debugf("resp: %+v", resp) + log.Ctx(ctx).Debugf("err: %+v", err) + log.Ctx(ctx).Debug("==== did log responses ====") + }() + + rejectedResponse, tx := h.validateInput(ctx, in) + if rejectedResponse != nil { + return rejectedResponse, nil + } + + txSuccessResp, err := h.handleSuccessResponseIfNeeded(ctx, tx) + if err != nil { + return nil, errors.Wrap(err, "checking if transaction in request was compliant") + } + if txSuccessResp != nil { + return txSuccessResp, nil + } + + // validate the revisable transaction has one operation. + if len(tx.Operations()) != 1 { + return NewRejectedTxApprovalResponse("Please submit a transaction with exactly one operation of type payment."), nil + } + + paymentOp, ok := tx.Operations()[0].(*txnbuild.Payment) + if !ok { + log.Ctx(ctx).Error("transaction does not contain a payment operation") + return NewRejectedTxApprovalResponse("There is one or more unauthorized operations in the provided transaction."), nil + } + paymentSource := paymentOp.SourceAccount + if paymentSource == "" { + paymentSource = tx.SourceAccount().AccountID + } + + if paymentOp.Destination == h.issuerKP.Address() { + return NewRejectedTxApprovalResponse("Can't transfer asset to its issuer."), nil + } + + // validate payment asset is the one supported by the issuer + issuerAddress := h.issuerKP.Address() + if paymentOp.Asset.GetCode() != h.assetCode || paymentOp.Asset.GetIssuer() != issuerAddress { + log.Ctx(ctx).Error(`the payment asset is not supported by this issuer`) + return NewRejectedTxApprovalResponse("The payment asset is not supported by this issuer."), nil + } + + acc, err := h.horizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: paymentSource}) + if err != nil { + return nil, errors.Wrapf(err, "getting detail for payment source account %s", paymentSource) + } + + // validate the sequence number + accountSequence, err := strconv.ParseInt(acc.Sequence, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing account sequence number %q from string to int64", acc.Sequence) + } + if tx.SourceAccount().Sequence != accountSequence+1 { + log.Ctx(ctx).Errorf(`invalid transaction sequence number tx.SourceAccount().Sequence: %d, accountSequence+1: %d`, tx.SourceAccount().Sequence, accountSequence+1) + return NewRejectedTxApprovalResponse("Invalid transaction sequence number."), nil + } + + actionRequiredResponse, err := h.handleActionRequiredResponseIfNeeded(ctx, paymentSource, paymentOp) + if err != nil { + return nil, errors.Wrap(err, "handling KYC required payment") + } + if actionRequiredResponse != nil { + return actionRequiredResponse, nil + } + + // build the transaction + revisedOperations := []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: paymentSource, + Type: paymentOp.Asset, + Authorize: true, + SourceAccount: issuerAddress, + }, + &txnbuild.AllowTrust{ + Trustor: paymentOp.Destination, + Type: paymentOp.Asset, + Authorize: true, + SourceAccount: issuerAddress, + }, + paymentOp, + &txnbuild.AllowTrust{ + Trustor: paymentOp.Destination, + Type: paymentOp.Asset, + Authorize: false, + SourceAccount: issuerAddress, + }, + &txnbuild.AllowTrust{ + Trustor: paymentSource, + Type: paymentOp.Asset, + Authorize: false, + SourceAccount: issuerAddress, + }, + } + revisedTx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &acc, + IncrementSequenceNum: true, + Operations: revisedOperations, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + if err != nil { + return nil, errors.Wrap(err, "building transaction") + } + + revisedTx, err = revisedTx.Sign(h.networkPassphrase, h.issuerKP) + if err != nil { + return nil, errors.Wrap(err, "signing transaction") + } + txe, err := revisedTx.Base64() + if err != nil { + return nil, errors.Wrap(err, "encoding revised transaction") + } + + return NewRevisedTxApprovalResponse(txe), nil +} + +// handleActionRequiredResponseIfNeeded validates and returns an action_required +// response if the payment requires KYC. +func (h txApproveHandler) handleActionRequiredResponseIfNeeded(ctx context.Context, stellarAddress string, paymentOp *txnbuild.Payment) (*txApprovalResponse, error) { + paymentAmount, err := amount.ParseInt64(paymentOp.Amount) + if err != nil { + return nil, errors.Wrap(err, "parsing payment amount from string to Int64") + } + if paymentAmount <= h.kycThreshold { + return nil, nil + } + + intendedCallbackID := uuid.New().String() + const q = ` + WITH new_row AS ( + INSERT INTO accounts_kyc_status (stellar_address, callback_id) + VALUES ($1, $2) + ON CONFLICT(stellar_address) DO NOTHING + RETURNING * + ) + SELECT callback_id, approved_at, rejected_at, pending_at FROM new_row + UNION + SELECT callback_id, approved_at, rejected_at, pending_at + FROM accounts_kyc_status + WHERE stellar_address = $1 + ` + var ( + callbackID string + approvedAt, rejectedAt, pendingAt sql.NullTime + ) + err = h.db.QueryRowContext(ctx, q, stellarAddress, intendedCallbackID).Scan(&callbackID, &approvedAt, &rejectedAt, &pendingAt) + if err != nil { + return nil, errors.Wrap(err, "inserting new row into accounts_kyc_status table") + } + + if approvedAt.Valid { + return nil, nil + } + + kycThreshold, err := convertAmountToReadableString(h.kycThreshold) + if err != nil { + return nil, errors.Wrap(err, "converting kycThreshold to human readable string") + } + + if rejectedAt.Valid { + return NewRejectedTxApprovalResponse(fmt.Sprintf("Your KYC was rejected and you're not authorized for operations above %s %s.", kycThreshold, h.assetCode)), nil + } + + if pendingAt.Valid { + return NewPendingTxApprovalResponse(fmt.Sprintf("Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above %s %s.", kycThreshold, h.assetCode)), nil + } + + return NewActionRequiredTxApprovalResponse( + fmt.Sprintf(`Payments exceeding %s %s require KYC approval. Please provide an email address.`, kycThreshold, h.assetCode), + fmt.Sprintf("%s/kyc-status/%s", h.baseURL, callbackID), + []string{"email_address"}, + ), nil +} + +// handleSuccessResponseIfNeeded inspects the incoming transaction and returns a +// "success" response if it's already compliant with the SEP-8 authorization spec. +func (h txApproveHandler) handleSuccessResponseIfNeeded(ctx context.Context, tx *txnbuild.Transaction) (*txApprovalResponse, error) { + if len(tx.Operations()) != 5 { + return nil, nil + } + + rejectedResp, paymentOp, paymentSource := validateTransactionOperationsForSuccess(ctx, tx, h.issuerKP.Address()) + if rejectedResp != nil { + return rejectedResp, nil + } + + if paymentOp.Destination == h.issuerKP.Address() { + return NewRejectedTxApprovalResponse("Can't transfer asset to its issuer."), nil + } + + // pull current account details from the network then validate the tx sequence number + acc, err := h.horizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: paymentSource}) + if err != nil { + return nil, errors.Wrapf(err, "getting detail for payment source account %s", paymentSource) + } + accountSequence, err := strconv.ParseInt(acc.Sequence, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing account sequence number %q from string to int64", acc.Sequence) + } + if tx.SourceAccount().Sequence != accountSequence+1 { + log.Ctx(ctx).Errorf(`invalid transaction sequence number tx.SourceAccount().Sequence: %d, accountSequence+1: %d`, tx.SourceAccount().Sequence, accountSequence+1) + return NewRejectedTxApprovalResponse("Invalid transaction sequence number."), nil + } + + kycRequiredResponse, err := h.handleActionRequiredResponseIfNeeded(ctx, paymentSource, paymentOp) + if err != nil { + return nil, errors.Wrap(err, "handling KYC required payment") + } + if kycRequiredResponse != nil { + return kycRequiredResponse, nil + } + + // sign transaction with issuer's signature and encode it + tx, err = tx.Sign(h.networkPassphrase, h.issuerKP) + if err != nil { + return nil, errors.Wrap(err, "signing transaction") + } + txe, err := tx.Base64() + if err != nil { + return nil, errors.Wrap(err, "encoding revised transaction") + } + + return NewSuccessTxApprovalResponse(txe, "Transaction is compliant and signed by the issuer."), nil +} + +// validateTransactionOperationsForSuccess checks if the incoming transaction +// operations are compliant with the anchor's SEP-8 policy. +func validateTransactionOperationsForSuccess(ctx context.Context, tx *txnbuild.Transaction, issuerAddress string) (resp *txApprovalResponse, paymentOp *txnbuild.Payment, paymentSource string) { + if len(tx.Operations()) != 5 { + return NewRejectedTxApprovalResponse("Unsupported number of operations."), nil, "" + } + + // extract the payment operation and payment source account. + paymentOp, ok := tx.Operations()[2].(*txnbuild.Payment) + if !ok { + log.Ctx(ctx).Error(`third operation is not of type payment`) + return NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), nil, "" + } + paymentSource = paymentOp.SourceAccount + if paymentSource == "" { + paymentSource = tx.SourceAccount().AccountID + } + + assetCode := paymentOp.Asset.GetCode() + + operationsValid := func() bool { + op0, ok := tx.Operations()[0].(*txnbuild.AllowTrust) + if !ok || + op0.Trustor != paymentSource || + op0.Type.GetCode() != assetCode || + !op0.Authorize || + op0.SourceAccount != issuerAddress { + return false + } + + op1, ok := tx.Operations()[1].(*txnbuild.AllowTrust) + if !ok || + op1.Trustor != paymentOp.Destination || + op1.Type.GetCode() != assetCode || + !op1.Authorize || + op1.SourceAccount != issuerAddress { + return false + } + + op2, ok := tx.Operations()[2].(*txnbuild.Payment) + if !ok || op2 != paymentOp { + return false + } + + op3, ok := tx.Operations()[3].(*txnbuild.AllowTrust) + if !ok || + op3.Trustor != paymentOp.Destination || + op3.Type.GetCode() != assetCode || + op3.Authorize || + op3.SourceAccount != issuerAddress { + return false + } + + op4, ok := tx.Operations()[4].(*txnbuild.AllowTrust) + if !ok || + op4.Trustor != paymentSource || + op4.Type.GetCode() != assetCode || + op4.Authorize || + op4.SourceAccount != issuerAddress { + return false + } + + return true + }() + if !operationsValid { + return NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), nil, "" + } + + return nil, paymentOp, paymentSource +} + +func convertAmountToReadableString(threshold int64) (string, error) { + amountStr := amount.StringFromInt64(threshold) + amountFloat, err := strconv.ParseFloat(amountStr, 64) + if err != nil { + return "", errors.Wrap(err, "converting threshold amount from string to float") + } + return fmt.Sprintf("%.2f", amountFloat), nil +} diff --git a/services/regulated-assets-approval-server/internal/serve/tx_approve_response.go b/services/regulated-assets-approval-server/internal/serve/tx_approve_response.go new file mode 100644 index 0000000000..d555443475 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/tx_approve_response.go @@ -0,0 +1,80 @@ +package serve + +import ( + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +type txApprovalResponse struct { + Error string `json:"error,omitempty"` + Message string `json:"message,omitempty"` + Status sep8Status `json:"status"` + StatusCode int `json:"-"` + Tx string `json:"tx,omitempty"` + ActionURL string `json:"action_url,omitempty"` + ActionMethod string `json:"action_method,omitempty"` + ActionFields []string `json:"action_fields,omitempty"` + Timeout *int64 `json:"timeout,omitempty"` +} + +func (t *txApprovalResponse) Render(w http.ResponseWriter) { + httpjson.RenderStatus(w, t.StatusCode, t, httpjson.JSON) +} + +func NewRejectedTxApprovalResponse(errMessage string) *txApprovalResponse { + return &txApprovalResponse{ + Status: sep8StatusRejected, + Error: errMessage, + StatusCode: http.StatusBadRequest, + } +} + +func NewRevisedTxApprovalResponse(tx string) *txApprovalResponse { + return &txApprovalResponse{ + Status: sep8StatusRevised, + Tx: tx, + StatusCode: http.StatusOK, + Message: "Authorization and deauthorization operations were added.", + } +} + +func NewActionRequiredTxApprovalResponse(message, actionURL string, actionFields []string) *txApprovalResponse { + return &txApprovalResponse{ + Status: sep8StatusActionRequired, + Message: message, + ActionMethod: "POST", + StatusCode: http.StatusOK, + ActionURL: actionURL, + ActionFields: actionFields, + } +} + +func NewSuccessTxApprovalResponse(tx, message string) *txApprovalResponse { + return &txApprovalResponse{ + Status: sep8StatusSuccess, + Tx: tx, + Message: message, + StatusCode: http.StatusOK, + } +} + +func NewPendingTxApprovalResponse(message string) *txApprovalResponse { + timeout := int64(0) + return &txApprovalResponse{ + Status: sep8StatusPending, + Message: message, + StatusCode: http.StatusOK, + Timeout: &timeout, + } +} + +type sep8Status string + +const ( + sep8StatusRejected sep8Status = "rejected" + sep8StatusRevised sep8Status = "revised" + sep8StatusActionRequired sep8Status = "action_required" + sep8StatusSuccess sep8Status = "success" + sep8StatusPending sep8Status = "pending" +) diff --git a/services/regulated-assets-approval-server/internal/serve/tx_approve_test.go b/services/regulated-assets-approval-server/internal/serve/tx_approve_test.go new file mode 100644 index 0000000000..1b8a19aa94 --- /dev/null +++ b/services/regulated-assets-approval-server/internal/serve/tx_approve_test.go @@ -0,0 +1,1427 @@ +package serve + +import ( + "context" + "net/http" + "testing" + + "github.com/stellar/go/amount" + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/regulated-assets-approval-server/internal/db/dbtest" + "github.com/stellar/go/txnbuild" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTxApproveHandlerValidate(t *testing.T) { + // empty issuer KP. + h := txApproveHandler{} + err := h.validate() + require.EqualError(t, err, "issuer keypair cannot be nil") + + // empty asset code. + issuerAccKeyPair := keypair.MustRandom() + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + } + err = h.validate() + require.EqualError(t, err, "asset code cannot be empty") + + // No Horizon client. + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + } + err = h.validate() + require.EqualError(t, err, "horizon client cannot be nil") + + // No network passphrase. + horizonMock := horizonclient.MockClient{} + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + } + err = h.validate() + require.EqualError(t, err, "network passphrase cannot be empty") + + // No db. + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + } + err = h.validate() + require.EqualError(t, err, "database cannot be nil") + + // Empty kycThreshold. + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + } + err = h.validate() + require.EqualError(t, err, "kyc threshold cannot be less than or equal to zero") + + // Negative kycThreshold. + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: -1, + } + err = h.validate() + require.EqualError(t, err, "kyc threshold cannot be less than or equal to zero") + + // no baseURL. + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: 1, + } + err = h.validate() + require.EqualError(t, err, "base url cannot be empty") + + // Success. + h = txApproveHandler{ + issuerKP: issuerAccKeyPair, + assetCode: "FOOBAR", + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: 1, + baseURL: "https://example.com", + } + err = h.validate() + require.NoError(t, err) +} + +func TestTxApproveHandler_validateInput(t *testing.T) { + h := txApproveHandler{} + ctx := context.Background() + + // rejects if incoming tx is empty + in := txApproveRequest{} + txApprovalResp, gotTx := h.validateInput(ctx, in) + require.Equal(t, NewRejectedTxApprovalResponse("Missing parameter \"tx\"."), txApprovalResp) + require.Nil(t, gotTx) + + // rejects if incoming tx is invalid + in = txApproveRequest{Tx: "foobar"} + txApprovalResp, gotTx = h.validateInput(ctx, in) + require.Equal(t, NewRejectedTxApprovalResponse("Invalid parameter \"tx\"."), txApprovalResp) + require.Nil(t, gotTx) + + // rejects if incoming tx is a fee bump transaction + in = txApproveRequest{Tx: "AAAABQAAAAAo/cVyQxyGh7F/Vsj0BzfDYuOJvrwgfHGyqYFpHB5RCAAAAAAAAADIAAAAAgAAAAAo/cVyQxyGh7F/Vsj0BzfDYuOJvrwgfHGyqYFpHB5RCAAAAGQAEfDJAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAAo/cVyQxyGh7F/Vsj0BzfDYuOJvrwgfHGyqYFpHB5RCAAAAAAAAAAAAJiWgAAAAAAAAAAAAAAAAAAAAAA="} + txApprovalResp, gotTx = h.validateInput(ctx, in) + require.Equal(t, NewRejectedTxApprovalResponse("Invalid parameter \"tx\"."), txApprovalResp) + require.Nil(t, gotTx) + + // rejects if tx source account is the issuer + clientKP := keypair.MustRandom() + h.issuerKP = keypair.MustRandom() + + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: h.issuerKP.Address(), + Sequence: "1", + }, + IncrementSequenceNum: true, + Timebounds: txnbuild.NewInfiniteTimeout(), + BaseFee: 300, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: clientKP.Address(), + Amount: "1", + Asset: txnbuild.NativeAsset{}, + }, + }, + }) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + in.Tx = txe + txApprovalResp, gotTx = h.validateInput(ctx, in) + require.Equal(t, NewRejectedTxApprovalResponse("Transaction source account is invalid."), txApprovalResp) + require.Nil(t, gotTx) + + // rejects if there are any operations other than Allowtrust where the source account is the issuer + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: clientKP.Address(), + Sequence: "1", + }, + IncrementSequenceNum: true, + Timebounds: txnbuild.NewInfiniteTimeout(), + BaseFee: 300, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + &txnbuild.Payment{ + Destination: clientKP.Address(), + Amount: "1.0000000", + Asset: txnbuild.NativeAsset{}, + SourceAccount: h.issuerKP.Address(), + }, + }, + }) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + in.Tx = txe + txApprovalResp, gotTx = h.validateInput(ctx, in) + require.Equal(t, NewRejectedTxApprovalResponse("There are one or more unauthorized operations in the provided transaction."), txApprovalResp) + require.Nil(t, gotTx) + + // validation success + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: clientKP.Address(), + Sequence: "1", + }, + IncrementSequenceNum: true, + Timebounds: txnbuild.NewInfiniteTimeout(), + BaseFee: 300, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: clientKP.Address(), + Amount: "1.0000000", + Asset: txnbuild.NativeAsset{}, + }, + }, + }) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + in.Tx = txe + txApprovalResp, gotTx = h.validateInput(ctx, in) + require.Nil(t, txApprovalResp) + require.Equal(t, gotTx, tx) +} + +func TestTxApproveHandler_handleActionRequiredResponseIfNeeded(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + kycThreshold, err := amount.ParseInt64("500") + require.NoError(t, err) + h := txApproveHandler{ + assetCode: "FOO", + baseURL: "https://example.com", + kycThreshold: kycThreshold, + db: conn, + } + + // payments up to the the threshold won't trigger "action_required" + clientKP := keypair.MustRandom() + paymentOp := &txnbuild.Payment{ + Amount: amount.StringFromInt64(kycThreshold), + } + txApprovalResp, err := h.handleActionRequiredResponseIfNeeded(ctx, clientKP.Address(), paymentOp) + require.NoError(t, err) + require.Nil(t, txApprovalResp) + + // payments greater than the threshold will trigger "action_required" + paymentOp = &txnbuild.Payment{ + Amount: amount.StringFromInt64(kycThreshold + 1), + } + txApprovalResp, err = h.handleActionRequiredResponseIfNeeded(ctx, clientKP.Address(), paymentOp) + require.NoError(t, err) + + var callbackID string + q := `SELECT callback_id FROM accounts_kyc_status WHERE stellar_address = $1` + err = conn.QueryRowContext(ctx, q, clientKP.Address()).Scan(&callbackID) + require.NoError(t, err) + + wantResp := &txApprovalResponse{ + Status: sep8StatusActionRequired, + Message: "Payments exceeding 500.00 FOO require KYC approval. Please provide an email address.", + ActionMethod: "POST", + StatusCode: http.StatusOK, + ActionURL: "https://example.com/kyc-status/" + callbackID, + ActionFields: []string{"email_address"}, + } + require.Equal(t, wantResp, txApprovalResp) + + // if KYC was previously approved, handleActionRequiredResponseIfNeeded will return nil + q = ` + UPDATE accounts_kyc_status + SET + approved_at = NOW(), + rejected_at = NULL, + pending_at = NULL + WHERE stellar_address = $1 + ` + _, err = conn.ExecContext(ctx, q, clientKP.Address()) + require.NoError(t, err) + txApprovalResp, err = h.handleActionRequiredResponseIfNeeded(ctx, clientKP.Address(), paymentOp) + require.NoError(t, err) + require.Nil(t, txApprovalResp) + + // if KYC was previously rejected, handleActionRequiredResponseIfNeeded will return a "rejected" response + q = ` + UPDATE accounts_kyc_status + SET + approved_at = NULL, + rejected_at = NOW(), + pending_at = NULL + WHERE stellar_address = $1 + ` + _, err = conn.ExecContext(ctx, q, clientKP.Address()) + require.NoError(t, err) + txApprovalResp, err = h.handleActionRequiredResponseIfNeeded(ctx, clientKP.Address(), paymentOp) + require.NoError(t, err) + require.Equal(t, NewRejectedTxApprovalResponse("Your KYC was rejected and you're not authorized for operations above 500.00 FOO."), txApprovalResp) + + // if KYC was previously marked as pending, handleActionRequiredResponseIfNeeded will return a "pending" response + q = ` + UPDATE accounts_kyc_status + SET + approved_at = NULL, + rejected_at = NULL, + pending_at = NOW() + WHERE stellar_address = $1 + ` + _, err = conn.ExecContext(ctx, q, clientKP.Address()) + require.NoError(t, err) + txApprovalResp, err = h.handleActionRequiredResponseIfNeeded(ctx, clientKP.Address(), paymentOp) + require.NoError(t, err) + require.Equal(t, NewPendingTxApprovalResponse("Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above 500.00 FOO."), txApprovalResp) +} + +func TestTxApproveHandler_txApprove_rejected(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // "rejected" if tx is empty + rejectedResponse, err := handler.txApprove(ctx, txApproveRequest{}) + require.NoError(t, err) + wantRejectedResponse := txApprovalResponse{ + Status: "rejected", + Error: `Missing parameter "tx".`, + StatusCode: http.StatusBadRequest, + } + assert.Equal(t, &wantRejectedResponse, rejectedResponse) + + // rejected if contains more than one operation + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + txApprovalResp, err := handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Please submit a transaction with exactly one operation of type payment."), txApprovalResp) + + // rejected if the single operation is not a payment + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + txApprovalResp, err = handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("There is one or more unauthorized operations in the provided transaction."), txApprovalResp) + + // rejected if attempting to transfer an asset to its own issuer + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: issuerKP.Address(), // <--- this will trigger the rejection + Amount: "1", + Asset: txnbuild.CreditAsset{ + Code: "FOO", + Issuer: keypair.MustRandom().Address(), + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + txApprovalResp, err = handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Can't transfer asset to its issuer."), txApprovalResp) + + // rejected if payment asset is not supported + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "1", + Asset: txnbuild.CreditAsset{ + Code: "FOO", + Issuer: keypair.MustRandom().Address(), + }, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + txApprovalResp, err = handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("The payment asset is not supported by this issuer."), txApprovalResp) + + // rejected if sequence number is not incremental + tx, err = txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "20", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err = tx.Base64() + require.NoError(t, err) + + txApprovalResp, err = handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Invalid transaction sequence number."), txApprovalResp) +} + +func TestTxApproveHandler_txApprove_success(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + txApprovalResp, err := handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + require.Equal(t, NewSuccessTxApprovalResponse(txApprovalResp.Tx, "Transaction is compliant and signed by the issuer."), txApprovalResp) +} + +func TestTxApproveHandler_txApprove_actionRequired(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "501", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + txApprovalResp, err := handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + + var callbackID string + q := `SELECT callback_id FROM accounts_kyc_status WHERE stellar_address = $1` + err = conn.QueryRowContext(ctx, q, senderKP.Address()).Scan(&callbackID) + require.NoError(t, err) + + wantResp := &txApprovalResponse{ + Status: sep8StatusActionRequired, + Message: "Payments exceeding 500.00 GOAT require KYC approval. Please provide an email address.", + ActionMethod: "POST", + StatusCode: http.StatusOK, + ActionURL: "https://example.com/kyc-status/" + callbackID, + ActionFields: []string{"email_address"}, + } + require.Equal(t, wantResp, txApprovalResp) +} + +func TestTxApproveHandler_txApprove_revised(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + Destination: receiverKP.Address(), + Amount: "500", + Asset: assetGOAT, + }, + }, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + txe, err := tx.Base64() + require.NoError(t, err) + + txApprovalResp, err := handler.txApprove(ctx, txApproveRequest{Tx: txe}) + require.NoError(t, err) + require.Equal(t, sep8StatusRevised, txApprovalResp.Status) + require.Equal(t, http.StatusOK, txApprovalResp.StatusCode) + require.Equal(t, "Authorization and deauthorization operations were added.", txApprovalResp.Message) + + gotGenericTx, err := txnbuild.TransactionFromXDR(txApprovalResp.Tx) + require.NoError(t, err) + gotTx, ok := gotGenericTx.Transaction() + require.True(t, ok) + require.Equal(t, senderKP.Address(), gotTx.SourceAccount().AccountID) + require.Equal(t, int64(3), gotTx.SourceAccount().Sequence) + + require.Len(t, gotTx.Operations(), 5) + // AllowTrust op where issuer fully authorizes sender, asset GOAT + op0, ok := gotTx.Operations()[0].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op0.Trustor, senderKP.Address()) + assert.Equal(t, op0.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op0.Authorize) + // AllowTrust op where issuer fully authorizes receiver, asset GOAT + op1, ok := gotTx.Operations()[1].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op1.Trustor, receiverKP.Address()) + assert.Equal(t, op1.Type.GetCode(), assetGOAT.GetCode()) + require.True(t, op1.Authorize) + // Payment from sender to receiver + op2, ok := gotTx.Operations()[2].(*txnbuild.Payment) + require.True(t, ok) + assert.Equal(t, op2.Destination, receiverKP.Address()) + assert.Equal(t, op2.Asset, assetGOAT) + // AllowTrust op where issuer fully deauthorizes receiver, asset GOAT + op3, ok := gotTx.Operations()[3].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op3.Trustor, receiverKP.Address()) + assert.Equal(t, op3.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op3.Authorize) + // AllowTrust op where issuer fully deauthorizes sender, asset GOAT + op4, ok := gotTx.Operations()[4].(*txnbuild.AllowTrust) + require.True(t, ok) + assert.Equal(t, op4.Trustor, senderKP.Address()) + assert.Equal(t, op4.Type.GetCode(), assetGOAT.GetCode()) + require.False(t, op4.Authorize) +} + +func TestValidateTransactionOperationsForSuccess(t *testing.T) { + ctx := context.Background() + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + + // rejected if number of operations is unsupported + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, paymentOp, paymentSource := validateTransactionOperationsForSuccess(ctx, tx, issuerKP.Address()) + assert.Equal(t, NewRejectedTxApprovalResponse("Unsupported number of operations."), txApprovalResp) + assert.Nil(t, paymentOp) + assert.Empty(t, paymentSource) + + // rejected if operation at index "2" is not a payment + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, paymentOp, paymentSource = validateTransactionOperationsForSuccess(ctx, tx, issuerKP.Address()) + assert.Equal(t, NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), txApprovalResp) + assert.Nil(t, paymentOp) + assert.Empty(t, paymentSource) + + // rejected if the operations list don't match the expected format [AllowTrust, AllowTrust, Payment, AllowTrust, AllowTrust] + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, paymentOp, paymentSource = validateTransactionOperationsForSuccess(ctx, tx, issuerKP.Address()) + assert.Equal(t, NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), txApprovalResp) + assert.Nil(t, paymentOp) + assert.Empty(t, paymentSource) + + // rejected if the values inside the operations list don't match the expected format + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, // <--- this flag is the only wrong value in this transaction + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, paymentOp, paymentSource = validateTransactionOperationsForSuccess(ctx, tx, issuerKP.Address()) + assert.Equal(t, NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), txApprovalResp) + assert.Nil(t, paymentOp) + assert.Empty(t, paymentSource) + + // success + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "5", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, paymentOp, paymentSource = validateTransactionOperationsForSuccess(ctx, tx, issuerKP.Address()) + assert.Nil(t, txApprovalResp) + assert.Equal(t, senderKP.Address(), paymentSource) + wantPaymentOp := &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + } + assert.Equal(t, wantPaymentOp, paymentOp) +} + +func TestTxApproveHandler_handleSuccessResponseIfNeeded_revisable(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + revisableTx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txSuccessResponse, err := handler.handleSuccessResponseIfNeeded(ctx, revisableTx) + require.NoError(t, err) + assert.Nil(t, txSuccessResponse) +} + +func TestTxApproveHandler_handleSuccessResponseIfNeeded_rejected(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // rejected if operations don't match the expected format + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.BumpSequence{}, + &txnbuild.BumpSequence{}, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, err := handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("There are one or more unexpected operations in the provided transaction."), txApprovalResp) + + // rejected if attempting to transfer an asset to its own issuer + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: issuerKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: issuerKP.Address(), // <--- this will trigger the rejection + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: issuerKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, err = handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Can't transfer asset to its issuer."), txApprovalResp) + + // rejected if sequence number is not incremental + compliantOps := []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + } + tx, err = txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "3", + }, + IncrementSequenceNum: true, + Operations: compliantOps, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResp, err = handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Invalid transaction sequence number."), txApprovalResp) +} + +func TestTxApproveHandler_handleSuccessResponseIfNeeded_actionRequired(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + // compliant operations with a payment above threshold will return "action_required" if the user hasn't gone through KYC yet + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "501", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResponse, err := handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + + var callbackID string + q := `SELECT callback_id FROM accounts_kyc_status WHERE stellar_address = $1` + err = conn.QueryRowContext(ctx, q, senderKP.Address()).Scan(&callbackID) + require.NoError(t, err) + + wantTxApprovalResponse := NewActionRequiredTxApprovalResponse( + "Payments exceeding 500.00 GOAT require KYC approval. Please provide an email address.", + "https://example.com/kyc-status/"+callbackID, + []string{"email_address"}, + ) + assert.Equal(t, wantTxApprovalResponse, txApprovalResponse) + + // compliant operations with a payment above threshold will return "rejected" if the user's KYC was rejected + query := ` + UPDATE accounts_kyc_status + SET + approved_at = NULL, + rejected_at = NOW(), + pending_at = NULL + WHERE stellar_address = $1 + ` + _, err = handler.db.ExecContext(ctx, query, senderKP.Address()) + require.NoError(t, err) + txApprovalResponse, err = handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewRejectedTxApprovalResponse("Your KYC was rejected and you're not authorized for operations above 500.00 GOAT."), txApprovalResponse) + + // compliant operations with a payment above threshold will return "pending" if the user's KYC was marked as pending + query = ` + UPDATE accounts_kyc_status + SET + approved_at = NULL, + rejected_at = NULL, + pending_at = NOW() + WHERE stellar_address = $1 + ` + _, err = handler.db.ExecContext(ctx, query, senderKP.Address()) + require.NoError(t, err) + txApprovalResponse, err = handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewPendingTxApprovalResponse("Your account could not be verified as approved nor rejected and was marked as pending. You will need staff authorization for operations above 500.00 GOAT."), txApprovalResponse) + + // compliant operations with a payment above threshold will return "success" if the user's KYC was approved + query = ` + UPDATE accounts_kyc_status + SET + approved_at = NOW(), + rejected_at = NULL, + pending_at = NULL + WHERE stellar_address = $1 + ` + _, err = handler.db.ExecContext(ctx, query, senderKP.Address()) + require.NoError(t, err) + txApprovalResponse, err = handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + assert.Equal(t, NewSuccessTxApprovalResponse(txApprovalResponse.Tx, "Transaction is compliant and signed by the issuer."), txApprovalResponse) +} + +func TestTxApproveHandler_handleSuccessResponseIfNeeded_success(t *testing.T) { + ctx := context.Background() + db := dbtest.Open(t) + defer db.Close() + conn := db.Open() + defer conn.Close() + + senderKP := keypair.MustRandom() + receiverKP := keypair.MustRandom() + issuerKP := keypair.MustRandom() + assetGOAT := txnbuild.CreditAsset{ + Code: "GOAT", + Issuer: issuerKP.Address(), + } + kycThresholdAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + + horizonMock := horizonclient.MockClient{} + horizonMock. + On("AccountDetail", horizonclient.AccountRequest{AccountID: senderKP.Address()}). + Return(horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, nil) + + handler := txApproveHandler{ + issuerKP: issuerKP, + assetCode: assetGOAT.GetCode(), + horizonClient: &horizonMock, + networkPassphrase: network.TestNetworkPassphrase, + db: conn, + kycThreshold: kycThresholdAmount, + baseURL: "https://example.com", + } + + tx, err := txnbuild.NewTransaction(txnbuild.TransactionParams{ + SourceAccount: &horizon.Account{ + AccountID: senderKP.Address(), + Sequence: "2", + }, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{ + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: true, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.Payment{ + SourceAccount: senderKP.Address(), + Destination: receiverKP.Address(), + Amount: "1", + Asset: assetGOAT, + }, + &txnbuild.AllowTrust{ + Trustor: receiverKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + &txnbuild.AllowTrust{ + Trustor: senderKP.Address(), + Type: assetGOAT, + Authorize: false, + SourceAccount: issuerKP.Address(), + }, + }, + BaseFee: 300, + Timebounds: txnbuild.NewTimeout(300), + }) + require.NoError(t, err) + + txApprovalResponse, err := handler.handleSuccessResponseIfNeeded(ctx, tx) + require.NoError(t, err) + require.Equal(t, NewSuccessTxApprovalResponse(txApprovalResponse.Tx, "Transaction is compliant and signed by the issuer."), txApprovalResponse) + + gotGenericTx, err := txnbuild.TransactionFromXDR(txApprovalResponse.Tx) + require.NoError(t, err) + gotTx, ok := gotGenericTx.Transaction() + require.True(t, ok) + + // test transaction params + assert.Equal(t, tx.SourceAccount(), gotTx.SourceAccount()) + assert.Equal(t, tx.BaseFee(), gotTx.BaseFee()) + assert.Equal(t, tx.Timebounds(), gotTx.Timebounds()) + assert.Equal(t, tx.SequenceNumber(), gotTx.SequenceNumber()) + + // test if the operations are as expected + resp, _, _ := validateTransactionOperationsForSuccess(ctx, gotTx, issuerKP.Address()) + assert.Nil(t, resp) + + // check if the transaction contains the issuer's signature + gotTxHash, err := gotTx.Hash(handler.networkPassphrase) + require.NoError(t, err) + err = handler.issuerKP.Verify(gotTxHash[:], gotTx.Signatures()[0].Signature) + require.NoError(t, err) +} + +func TestConvertAmountToReadableString(t *testing.T) { + parsedAmount, err := amount.ParseInt64("500") + require.NoError(t, err) + assert.Equal(t, int64(5000000000), parsedAmount) + + readableAmount, err := convertAmountToReadableString(parsedAmount) + require.NoError(t, err) + assert.Equal(t, "500.00", readableAmount) +} diff --git a/services/regulated-assets-approval-server/main.go b/services/regulated-assets-approval-server/main.go new file mode 100644 index 0000000000..d7f9d9ac29 --- /dev/null +++ b/services/regulated-assets-approval-server/main.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stellar/go/services/regulated-assets-approval-server/cmd" + "github.com/stellar/go/support/log" +) + +func main() { + log.DefaultLogger = log.New() + log.DefaultLogger.SetLevel(logrus.TraceLevel) + + rootCmd := &cobra.Command{ + Use: "regulated-assets-approval-server [command]", + Short: "SEP-8 Approval Server", + Run: func(cmd *cobra.Command, args []string) { + cmd.Help() + }, + } + + rootCmd.AddCommand((&cmd.MigrateCommand{}).Command()) + rootCmd.AddCommand((&cmd.ServeCommand{}).Command()) + rootCmd.AddCommand((&cmd.ConfigureIssuer{}).Command()) + + err := rootCmd.Execute() + if err != nil { + log.Fatal(err) + } +} diff --git a/services/ticker/.gitignore b/services/ticker/.gitignore new file mode 100644 index 0000000000..87748477a2 --- /dev/null +++ b/services/ticker/.gitignore @@ -0,0 +1,24 @@ +.DS_Store +internal/docs/public +/.env +/.go +/tmp/ +/pkg +/bin +/dist +/vendor/pkg +/vendor/bin +/vendor/src +/.vscode/last.sql +/.vscode/temp.sql +/.vscode/* +*.bts +*.swp +*.swo + +/test.go +/tls/*.crt +/tls/*.key +/tls/*.csr + +docker/db diff --git a/services/ticker/.gqlconfig b/services/ticker/.gqlconfig new file mode 100644 index 0000000000..20662eb06b --- /dev/null +++ b/services/ticker/.gqlconfig @@ -0,0 +1,5 @@ +{ + schema: { + files: 'internal/gql/**/*.gql' + } +} diff --git a/services/ticker/CHANGELOG.md b/services/ticker/CHANGELOG.md new file mode 100644 index 0000000000..3400061c18 --- /dev/null +++ b/services/ticker/CHANGELOG.md @@ -0,0 +1,22 @@ +## Unreleased + +* Dropped support for Go 1.12. +* Dropped support for Go 1.13. + + +## [v1.2.0] - 2019-11-20 +- Add `ReadTimeout` to Ticker HTTP server configuration to fix potential DoS vector. +- Added nested `"issuer_detail"` field to `/assets.json`. +- Dropped support for Go 1.10, 1.11. + + +## [v1.1.0] - 2019-07-22 + +- Added support for running the ticker on Stellar's Test Network, by using the `--testnet` CLI flag. +- The ticker now retries requests to Horizon if it gets rate-limited. +- Minor bug fixes and performance improvements. + + +## [v1.0.0] - 2019-05-20 + +Initial release of the ticker. diff --git a/services/ticker/Makefile b/services/ticker/Makefile new file mode 100644 index 0000000000..eb2ef29bff --- /dev/null +++ b/services/ticker/Makefile @@ -0,0 +1,20 @@ +# Check if we need to prepend docker commands with sudo +SUDO := $(shell docker version >/dev/null 2>&1 || echo "sudo") + +# If TAG is not provided set default value +TAG ?= stellar/ticker:$(shell git rev-parse --short HEAD)$(and $(shell git status -s),-dirty-$(shell id -u -n)) +# https://github.com/opencontainers/image-spec/blob/master/annotations.md +BUILD_DATE := $(shell date -u +%FT%TZ) + +docker-build: + cd ../../ && \ + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f services/ticker/docker/Dockerfile -t $(TAG) . + +docker-push: + cd ../../ && \ + $(SUDO) docker push $(TAG) + +docker-build-dev: + $(SUDO) docker build --pull --label org.opencontainers.image.created="$(BUILD_DATE)" \ + -f docker/Dockerfile-dev -t $(TAG) . diff --git a/services/ticker/README.md b/services/ticker/README.md new file mode 100644 index 0000000000..6debae0d26 --- /dev/null +++ b/services/ticker/README.md @@ -0,0 +1,36 @@ +# Ticker +This project aims to provide an easy-to-deploy Stellar ticker. + +## Quick Start +This project provides a docker setup that makes it easy to get a Ticker up and running (you can +check an architecture overview [here](docs/Architecture.md)). In order to get up and running, +follow these steps: + +1. Install [Docker](https://hub.docker.com/editions/community/docker-ce-desktop-mac) +2. Clone the [monorepo](https://github.com/stellar/go) +3. Build the Ticker's docker image. At the repo's root, run `$ docker build -t ticker -f services/ticker/docker/Dockerfile-dev .` +4. Run the Ticker: `$ docker run --rm -it -p "8000:8000" ticker` (you'll be asked to enter a + PostgreSQL password) +5. After the initial setup (after the `supervisord started` message), you should be able to visit + the two available endpoints: http://localhost:8000/markets.json and + http://localhost:8000/assets.json + +### Persisting the data +The quickstart guide creates an ephemeral database that will be deleted once the Docker image stops +running. If you wish to have a persisting ticker, you'll have to mount a volume inside of it. If +you want to do this, replace step `4` with the following steps: + +1. Create a folder for the persisting data `$ mkdir /path/to/data/folder` +2. Run the ticker with the mounted folder: `$ docker run --rm -it -p "8000:8000" -v + "/path/to/data/folder:/opt/stellar/postgresql" ticker` (you'll also be asked to enter a + PostgreSQL password in the first time you run, but shouldn't happen the next time you run this + command). +3. VoilΓ ! After the initial setup / population is done, you should be able to visit you should be + able to visit the two available endpoints: http://localhost:8000/markets.json and + http://localhost:8000/assets.json + +## Using the CLI +You can also test the Ticker locally, without the Docker setup. For that, you'll need a PostgreSQL +instance running. In order to build the Ticker project, follow these steps: +1. See the details in [README.md](../../../../README.md#dependencies) for installing dependencies. +2. Run `$ go run main.go --help` to see the list of available commands. diff --git a/services/ticker/cmd/clean.go b/services/ticker/cmd/clean.go new file mode 100644 index 0000000000..5582029bcb --- /dev/null +++ b/services/ticker/cmd/clean.go @@ -0,0 +1,54 @@ +package cmd + +import ( + "context" + "time" + + "github.com/lib/pq" + "github.com/spf13/cobra" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +var DaysToKeep int + +func init() { + rootCmd.AddCommand(cmdClean) + cmdClean.AddCommand(cmdCleanTrades) + + cmdCleanTrades.Flags().IntVarP( + &DaysToKeep, + "keep-days", + "k", + 7, + "Trade entries older than keep-days will be deleted", + ) +} + +var cmdClean = &cobra.Command{ + Use: "clean [data type]", + Short: "Cleans up the database for a given data type", +} + +var cmdCleanTrades = &cobra.Command{ + Use: "trades", + Short: "Cleans up old trades from the database", + Run: func(cmd *cobra.Command, args []string) { + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + + now := time.Now() + minDate := now.AddDate(0, 0, -DaysToKeep) + Logger.Infof("Deleting trade entries older than %d days", DaysToKeep) + err = session.DeleteOldTrades(context.Background(), minDate) + if err != nil { + Logger.Fatal("could not delete trade entries:", err) + } + }, +} diff --git a/services/ticker/cmd/generate.go b/services/ticker/cmd/generate.go new file mode 100644 index 0000000000..8860f5a2a9 --- /dev/null +++ b/services/ticker/cmd/generate.go @@ -0,0 +1,84 @@ +package cmd + +import ( + "context" + + "github.com/lib/pq" + "github.com/spf13/cobra" + ticker "github.com/stellar/go/services/ticker/internal" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +var MarketsOutFile string +var AssetsOutFile string + +func init() { + rootCmd.AddCommand(cmdGenerate) + cmdGenerate.AddCommand(cmdGenerateMarketData) + cmdGenerate.AddCommand(cmdGenerateAssetData) + + cmdGenerateMarketData.Flags().StringVarP( + &MarketsOutFile, + "out-file", + "o", + "markets.json", + "Set the name of the output file", + ) + + cmdGenerateAssetData.Flags().StringVarP( + &AssetsOutFile, + "out-file", + "o", + "assets.json", + "Set the name of the output file", + ) +} + +var cmdGenerate = &cobra.Command{ + Use: "generate [data type]", + Short: "Generates reports about assets and markets", +} + +var cmdGenerateMarketData = &cobra.Command{ + Use: "market-data", + Short: "Generate the aggregated market data (for 24h and 7d) and outputs to a file.", + Run: func(cmd *cobra.Command, args []string) { + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + + Logger.Infof("Starting market data generation, outputting to: %s\n", MarketsOutFile) + err = ticker.GenerateMarketSummaryFile(&session, Logger, MarketsOutFile) + if err != nil { + Logger.Fatal("could not generate market data:", err) + } + }, +} + +var cmdGenerateAssetData = &cobra.Command{ + Use: "asset-data", + Short: "Generate the aggregated asset data and outputs to a file.", + Run: func(cmd *cobra.Command, args []string) { + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + + Logger.Infof("Starting asset data generation, outputting to: %s\n", AssetsOutFile) + err = ticker.GenerateAssetsFile(context.Background(), &session, Logger, AssetsOutFile) + if err != nil { + Logger.Fatal("could not generate asset data:", err) + } + }, +} diff --git a/services/ticker/cmd/ingest.go b/services/ticker/cmd/ingest.go new file mode 100644 index 0000000000..4068a8d7f5 --- /dev/null +++ b/services/ticker/cmd/ingest.go @@ -0,0 +1,123 @@ +package cmd + +import ( + "context" + + "github.com/lib/pq" + "github.com/spf13/cobra" + ticker "github.com/stellar/go/services/ticker/internal" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +var ShouldStream bool +var BackfillHours int + +func init() { + rootCmd.AddCommand(cmdIngest) + cmdIngest.AddCommand(cmdIngestAssets) + cmdIngest.AddCommand(cmdIngestTrades) + cmdIngest.AddCommand(cmdIngestOrderbooks) + + cmdIngestTrades.Flags().BoolVar( + &ShouldStream, + "stream", + false, + "Continuously stream new trades from the Horizon Stream API as a daemon", + ) + + cmdIngestTrades.Flags().IntVar( + &BackfillHours, + "num-hours", + 7*24, + "Number of past hours to backfill trade data", + ) +} + +var cmdIngest = &cobra.Command{ + Use: "ingest [data type]", + Short: "Ingests new data from data type into the database.", +} + +var cmdIngestAssets = &cobra.Command{ + Use: "assets", + Short: "Refreshes the asset database with new data retrieved from Horizon.", + Run: func(cmd *cobra.Command, args []string) { + Logger.Info("Refreshing the asset database") + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + ctx := context.Background() + err = ticker.RefreshAssets(ctx, &session, Client, Logger) + if err != nil { + Logger.Fatal("could not refresh asset database:", err) + } + }, +} + +var cmdIngestTrades = &cobra.Command{ + Use: "trades", + Short: "Fills the trade database with data retrieved form Horizon.", + Run: func(cmd *cobra.Command, args []string) { + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + ctx := context.Background() + numDays := float32(BackfillHours) / 24.0 + Logger.Infof( + "Backfilling Trade data for the past %d hour(s) [%.2f days]\n", + BackfillHours, + numDays, + ) + err = ticker.BackfillTrades(ctx, &session, Client, Logger, BackfillHours, 0) + if err != nil { + Logger.Fatal("could not refresh trade database:", err) + } + + if ShouldStream { + Logger.Info("Streaming new data (this is a continuous process)") + err = ticker.StreamTrades(ctx, &session, Client, Logger) + if err != nil { + Logger.Fatal("could not refresh trade database:", err) + } + } + }, +} + +var cmdIngestOrderbooks = &cobra.Command{ + Use: "orderbooks", + Short: "Refreshes the orderbook stats database with new data retrieved from Horizon.", + Run: func(cmd *cobra.Command, args []string) { + Logger.Info("Refreshing the asset database") + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + err = ticker.RefreshOrderbookEntries(&session, Client, Logger) + if err != nil { + Logger.Fatal("could not refresh orderbook database:", err) + } + }, +} diff --git a/services/ticker/cmd/migrate.go b/services/ticker/cmd/migrate.go new file mode 100644 index 0000000000..604b596124 --- /dev/null +++ b/services/ticker/cmd/migrate.go @@ -0,0 +1,36 @@ +package cmd + +import ( + "github.com/lib/pq" + "github.com/spf13/cobra" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +func init() { + rootCmd.AddCommand(cmdMigrate) +} + +var cmdMigrate = &cobra.Command{ + Use: "migrate", + Short: "Updates the database to the latest schema version.", + Run: func(cmd *cobra.Command, args []string) { + Logger.Info("Refreshing the asset database") + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + Logger.Info("Upgrading the database") + n, err := tickerdb.MigrateDB(&session) + if err != nil { + Logger.Fatal("could not upgrade the database:", err) + } + Logger.Infof("Database Successfully Upgraded. Applied %d migrations.\n", n) + }, +} diff --git a/services/ticker/cmd/root.go b/services/ticker/cmd/root.go new file mode 100644 index 0000000000..3cc8034bb0 --- /dev/null +++ b/services/ticker/cmd/root.go @@ -0,0 +1,68 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + horizonclient "github.com/stellar/go/clients/horizonclient" + hlog "github.com/stellar/go/support/log" +) + +var DatabaseURL string +var Client *horizonclient.Client +var UseTestNet bool +var Logger = hlog.New() + +var defaultDatabaseURL = getEnv("DB_URL", "postgres://localhost:5432/stellarticker01?sslmode=disable") + +var rootCmd = &cobra.Command{ + Use: "ticker", + Short: "Stellar Development Foundation Ticker.", + Long: `A tool to provide Stellar Asset and Market data.`, +} + +func getEnv(key, fallback string) string { + value, exists := os.LookupEnv(key) + if !exists { + value = fallback + } + return value +} + +func init() { + cobra.OnInitialize(initConfig) + rootCmd.PersistentFlags().StringVarP( + &DatabaseURL, + "db-url", + "d", + defaultDatabaseURL, + "database URL, such as: postgres://user:pass@localhost:5432/ticker", + ) + rootCmd.PersistentFlags().BoolVar( + &UseTestNet, + "testnet", + false, + "use the Stellar Test Network, instead of the Stellar Public Network", + ) + + Logger.SetLevel(logrus.DebugLevel) +} + +func initConfig() { + if UseTestNet { + Logger.Debug("Using Stellar Default Test Network") + Client = horizonclient.DefaultTestNetClient + } else { + Logger.Debug("Using Stellar Default Public Network") + Client = horizonclient.DefaultPublicNetClient + } +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/services/ticker/cmd/serve.go b/services/ticker/cmd/serve.go new file mode 100644 index 0000000000..0f3f51f375 --- /dev/null +++ b/services/ticker/cmd/serve.go @@ -0,0 +1,41 @@ +package cmd + +import ( + "github.com/lib/pq" + "github.com/spf13/cobra" + ticker "github.com/stellar/go/services/ticker/internal" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +var ServerAddr string + +func init() { + rootCmd.AddCommand(cmdServe) + + cmdServe.Flags().StringVar( + &ServerAddr, + "address", + "0.0.0.0:3000", + "Server address and port", + ) +} + +var cmdServe = &cobra.Command{ + Use: "serve", + Short: "Runs a GraphQL interface to get Ticker data", + Run: func(cmd *cobra.Command, args []string) { + Logger.Info("Starting GraphQL Server") + dbInfo, err := pq.ParseURL(DatabaseURL) + if err != nil { + Logger.Fatal("could not parse db-url:", err) + } + + session, err := tickerdb.CreateSession("postgres", dbInfo) + if err != nil { + Logger.Fatal("could not connect to db:", err) + } + defer session.DB.Close() + + ticker.StartGraphQLServer(&session, Logger, ServerAddr) + }, +} diff --git a/services/ticker/dbconfig.yml b/services/ticker/dbconfig.yml new file mode 100644 index 0000000000..94cdd0781f --- /dev/null +++ b/services/ticker/dbconfig.yml @@ -0,0 +1,5 @@ +development: + dialect: postgres + datasource: dbname=stellarticker01 sslmode=disable + dir: internal/tickerdb/migrations + table: migrations diff --git a/services/ticker/docker/Dockerfile b/services/ticker/docker/Dockerfile new file mode 100644 index 0000000000..8afbabd446 --- /dev/null +++ b/services/ticker/docker/Dockerfile @@ -0,0 +1,13 @@ +FROM golang:1.17 as build + +ADD . /src/ticker +WORKDIR /src/ticker +RUN go build -o /bin/ticker ./services/ticker + + +FROM ubuntu:20.04 + +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends ca-certificates +COPY --from=build /bin/ticker /app/ +EXPOSE 8000 +ENTRYPOINT ["/app/ticker"] diff --git a/services/ticker/docker/Dockerfile-dev b/services/ticker/docker/Dockerfile-dev new file mode 100644 index 0000000000..993c2074a1 --- /dev/null +++ b/services/ticker/docker/Dockerfile-dev @@ -0,0 +1,21 @@ +FROM golang:1.17-stretch as build + +LABEL maintainer="Alex Cordeiro " + +EXPOSE 5432 +EXPOSE 8000 + +ADD . /src/ticker +WORKDIR /src/ticker +RUN go build -o /opt/stellar/bin/ticker ./services/ticker/ + +WORKDIR /src/ticker/services/ticker/docker/ +RUN ["chmod", "+x", "./dependencies"] +RUN ["./dependencies"] +RUN ["chmod", "+x", "setup"] +RUN ["./setup"] +RUN ["cp", "-r", "conf", "/opt/stellar/conf"] +RUN ["crontab", "-u", "stellar", "/opt/stellar/conf/crontab.txt"] +RUN ["chmod", "+x", "start"] + +ENTRYPOINT ["/src/ticker/services/ticker/docker/start"] diff --git a/services/ticker/docker/conf/.pgpass b/services/ticker/docker/conf/.pgpass new file mode 100644 index 0000000000..f5f3e8c5a3 --- /dev/null +++ b/services/ticker/docker/conf/.pgpass @@ -0,0 +1 @@ +*:*:*:stellar:__PGPASS__ diff --git a/services/ticker/docker/conf/crontab.txt b/services/ticker/docker/conf/crontab.txt new file mode 100644 index 0000000000..8d74a67b1f --- /dev/null +++ b/services/ticker/docker/conf/crontab.txt @@ -0,0 +1,17 @@ +# --------------- +# Ticker Crontab +# --------------- +# Refresh the database of assets, hourly: +@hourly /opt/stellar/bin/ticker ingest assets > /home/stellar/last-ingest-assets.log 2>&1 + +# Refresh the database of orderbooks, every 10 minutes: +*/10 * * * * /opt/stellar/bin/ticker ingest orderbooks > /home/stellar/last-ingest-orderbooks.log 2>&1 + +# Backfill the database of trades (including possible new assets), every 6 hours: +0 */6 * * * /opt/stellar/bin/ticker ingest trades > /home/stellar/last-ingest-trades.log 2>&1 + +# Update the assets.json file, hourly: +@hourly /opt/stellar/bin/ticker generate asset-data -o /opt/stellar/www/assets.json > /home/stellar/last-generate-asset-data.log 2>&1 + +# Update the markets.json file, every minute: +* * * * * /opt/stellar/bin/ticker generate market-data -o /opt/stellar/www/markets.json > /home/stellar/last-generate-market-data.log 2>&1 diff --git a/services/ticker/docker/conf/nginx.conf b/services/ticker/docker/conf/nginx.conf new file mode 100644 index 0000000000..60df03dedd --- /dev/null +++ b/services/ticker/docker/conf/nginx.conf @@ -0,0 +1,58 @@ +user www-data; +worker_processes auto; +pid /run/nginx.pid; +include /etc/nginx/modules-enabled/*.conf; + +events { + worker_connections 768; +} + +http { + sendfile on; + tcp_nopush on; + tcp_nodelay on; + keepalive_timeout 65; + types_hash_max_size 2048; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_prefer_server_ciphers on; + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + gzip on; + gzip_disable "msie6"; + + + include /etc/nginx/conf.d/*.conf; + + server { + listen 8000 default_server; + listen [::]:8000 default_server; + + root /opt/stellar/www; + + index markets.json; + + server_name _; + + rewrite ^/(.*)/$ /$1 permanent; + + location / { + try_files $uri $uri/ =404; + } + + location ~ ^/(graphql|graphiql) { + proxy_pass http://localhost:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_http_version 1.1; + proxy_set_header Connection ""; + } + } +} diff --git a/services/ticker/docker/conf/pg_hba.conf b/services/ticker/docker/conf/pg_hba.conf new file mode 100644 index 0000000000..7660b495ca --- /dev/null +++ b/services/ticker/docker/conf/pg_hba.conf @@ -0,0 +1,100 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# plain TCP/IP socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi", +# "ident", "peer", "pam", "ldap", "radius" or "cert". Note that +# "password" sends passwords in clear text; "md5" is preferred since +# it sends encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + + + + +# DO NOT DISABLE! +# If you change this first entry you will need to make sure that the +# database superuser can access the database using some other method. +# Noninteractive access to all databases is required during automatic +# maintenance (custom daily cronjobs, replication, and similar tasks). +# +# Database administrative login by Unix domain socket +local all postgres peer + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all md5 +# IPv4 local connections: +host all all 127.0.0.1/32 md5 +host all all 0.0.0.0/0 md5 +# IPv6 local connections: +host all all ::1/128 md5 +# Allow replication connections from localhost, by a user with the +# replication privilege. +#local replication postgres peer +#host replication postgres 127.0.0.1/32 md5 +#host replication postgres ::1/128 md5 diff --git a/services/ticker/docker/conf/pg_ident.conf b/services/ticker/docker/conf/pg_ident.conf new file mode 100644 index 0000000000..a5870e6448 --- /dev/null +++ b/services/ticker/docker/conf/pg_ident.conf @@ -0,0 +1,42 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME diff --git a/services/ticker/docker/conf/postgresql.conf b/services/ticker/docker/conf/postgresql.conf new file mode 100644 index 0000000000..290d2ebe16 --- /dev/null +++ b/services/ticker/docker/conf/postgresql.conf @@ -0,0 +1,603 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, or use "pg_ctl reload". Some +# parameters, which are marked below, require a server shutdown and restart to +# take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +data_directory = '/opt/stellar/postgresql/data' +hba_file = '/opt/stellar/conf/pg_hba.conf' +ident_file = '/opt/stellar/conf/pg_ident.conf' + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' +port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +# Note: Increasing max_connections costs ~400 bytes of shared memory per +# connection slot, plus lock space (see max_locks_per_transaction). +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/opt/stellar/postgresql/run' +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +ssl = true # (change requires restart) +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers + # (change requires restart) +#ssl_prefer_server_ciphers = on # (change requires restart) +#ssl_ecdh_curve = 'prime256v1' # (change requires restart) +#ssl_renegotiation_limit = 0 # amount of data between renegotiations +ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' # (change requires restart) +ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' # (change requires restart) +#ssl_ca_file = '' # (change requires restart) +#ssl_crl_file = '' # (change requires restart) +#password_encryption = on +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory +# per transaction slot, plus lock space (see max_locks_per_transaction). +# It is not advisable to set max_prepared_transactions nonzero unless you +# actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#maintenance_work_mem = 64MB # min 1MB +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + +# - Disk - + +#temp_file_limit = -1 # limits per-session temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +#shared_preload_libraries = '' # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 + + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +#wal_level = minimal # minimal, archive, hot_standby, or logical + # (change requires restart) +#fsync = on # turns forced synchronization on or off +#synchronous_commit = on # synchronization level; + # off, local, remote_write, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each +#checkpoint_timeout = 5min # range 30s-1h +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # allows archiving to be done + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 0 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 0 # max number of replication slots + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = off # "on" allows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'pg_log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' + +# This is only relevant when logging to eventlog (win32): +#event_source = 'PostgreSQL' + +# - When to Log - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +log_line_prefix = '%t [%p-%l] %q%u@%d ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_statement = 'none' # none, ddl, mod, all +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +#update_process_title = on +# stats_temp_directory = '' + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#search_path = '"$user",public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'C' # locale for system error message + # strings +lc_monetary = 'C' # locale for monetary formatting +lc_numeric = 'C' # locale for number formatting +lc_time = 'C' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +#session_preload_libraries = '' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +# Note: Each lock table slot uses ~270 bytes of shared memory, and there are +# max_locks_per_transaction * (max_connections + max_prepared_transactions) +# lock table slots. +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#quote_all_identifiers = off +#sql_inheritance = on +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. + +#include_dir = 'conf.d' # include files ending in '.conf' from + # directory 'conf.d' +#include_if_exists = 'exists.conf' # include file only if it exists +#include = 'special.conf' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/services/ticker/docker/conf/supervisord.conf b/services/ticker/docker/conf/supervisord.conf new file mode 100644 index 0000000000..2f0f614ed7 --- /dev/null +++ b/services/ticker/docker/conf/supervisord.conf @@ -0,0 +1,56 @@ +[unix_http_server] +file=/var/run/supervisor.sock +chmod=0700 + + +[supervisord] +logfile=/var/log/supervisor/supervisord.log +pidfile=/var/run/supervisord.pid +childlogdir=/var/log/supervisor + + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + + +[supervisorctl] +serverurl=unix:///var/run/supervisor.sock + + +[program:postgresql] +user=postgres +command=/usr/lib/postgresql/9.5/bin/postgres -D "/opt/stellar/postgresql/data" -c config_file=/opt/stellar/conf/postgresql.conf +stopsignal=INT +autostart=true +autorestart=true +priority=10 + + +[program:nginx] +command=/usr/sbin/nginx -g "daemon off;" -c /opt/stellar/conf/nginx.conf +stopsignal=INT +autostart=true +autorestart=true +priority=20 + + +[program:tradestream] +user=stellar +command=/opt/stellar/bin/ticker ingest trades --stream +autostart=true +autorestart=true +priority=30 + + +[program:graphqlserver] +user=stellar +command=/opt/stellar/bin/ticker serve --address 0.0.0.0:8080 +autostart=true +autorestart=true +priority=30 + + +[program:cron] +command=cron -f -L 15 +autostart=true +autorestart=true diff --git a/services/ticker/docker/dependencies b/services/ticker/docker/dependencies new file mode 100644 index 0000000000..5825997624 --- /dev/null +++ b/services/ticker/docker/dependencies @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -e + +# Required for using a newer PostgreSQL version: +wget -q https://www.postgresql.org/media/keys/ACCC4CF8.asc -O- | apt-key add - +echo "deb http://apt.postgresql.org/pub/repos/apt/ stretch-pgdg main" | tee /etc/apt/sources.list.d/postgresql.list + + +# Install dependencies: +apt-get update +apt-get install -y \ + curl \ + git \ + libpq-dev \ + postgresql-client-9.5 \ + postgresql-9.5 \ + postgresql-contrib-9.5 \ + sudo \ + vim \ + supervisor \ + nginx \ + cron +apt-get clean diff --git a/services/ticker/docker/setup b/services/ticker/docker/setup new file mode 100644 index 0000000000..9603011ec3 --- /dev/null +++ b/services/ticker/docker/setup @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +set -e + +useradd --uid 10011001 --home-dir /home/stellar --no-log-init stellar \ + && mkdir -p /home/stellar \ + && chown -R stellar:stellar /home/stellar + +mkdir -p /opt/stellar/bin +mkdir -p /opt/stellar/www +chown -R stellar:stellar /opt/stellar/www +mkdir -p /opt/stellar/postgresql/data diff --git a/services/ticker/docker/start b/services/ticker/docker/start new file mode 100644 index 0000000000..a0c92daa0c --- /dev/null +++ b/services/ticker/docker/start @@ -0,0 +1,233 @@ +#!/usr/bin/env bash +set -e + + +export STELLAR_HOME="/opt/stellar" +export STELLAR_BIN="$STELLAR_HOME/bin" +export CONF_HOME="$STELLAR_HOME/conf" +export WWW_HOME="$STELLAR_HOME/www" + +export PGHOME="$STELLAR_HOME/postgresql" +export PGBIN="/usr/lib/postgresql/9.5/bin" +export PGDATA="$PGHOME/data" +export PGUSER="stellar" +export PGDB="ticker" +export PGURL="postgres://127.0.0.1:5432/$PGDB" + + +function main() { + echo "" + echo "Initializing Ticker" + echo "" + + init_db + copy_pgpass + + start_postgres + migrate_db + + populate_assets + populate_trades + populate_orderbooks + generate_assets_file + generate_markets_file + + stop_postgres + exec_supervisor +} + + +function migrate_db() { + echo "" + echo "Upgrading database to latest version" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker migrate +} + + +function populate_assets() { + if [ -f $PGHOME/.assets-populated ]; then + echo "ticker: assets already pre-populated" + return 0 + fi + echo "" + echo "Populating initial asset database" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker ingest assets + touch $PGHOME/.assets-populated +} + + +function populate_trades() { + if [ -f $PGHOME/.trades-populated ]; then + echo "ticker: trades already pre-populated" + return 0 + fi + echo "" + echo "Populating initial trade database" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker ingest trades + touch $PGHOME/.trades-populated +} + + +function populate_orderbooks() { + if [ -f $PGHOME/.orderbooks-populated ]; then + echo "ticker: orderbooks already pre-populated" + return 0 + fi + echo "" + echo "Populating initial orderbook database" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker ingest orderbooks + touch $PGHOME/.orderbooks-populated +} + + +function generate_assets_file() { + if [ -f $STELLAR_HOME/www/assets.json ]; then + echo "ticker: assets.json already pre-populated" + return 0 + fi + echo "" + echo "Creating assets.json file" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker generate asset-data -o $WWW_HOME/assets.json +} + + +function generate_markets_file() { + if [ -f $STELLAR_HOME/www/markets.json ]; then + echo "ticker: markets.json already pre-populated" + return 0 + fi + echo "" + echo "Creating markets.json file" + echo "" + sudo -E -u stellar $STELLAR_BIN/ticker generate market-data -o $WWW_HOME/markets.json +} + + +# run_silent is a utility function that runs a command with an abbreviated +# output provided it succeeds. +function run_silent() { + local LABEL=$1 + shift + local COMMAND=$1 + shift + local ARGS=$@ + local OUTFILE="/tmp/run_silent.out" + + echo -n "$LABEL: " + set +e + + $COMMAND $ARGS &> $OUTFILE + + if [ $? -eq 0 ]; then + echo "ok" + else + echo "failed!" + echo "" + cat $OUTFILE + exit 1 + fi + + set -e +} + + +function set_db_url() { + PGPASS="stellar" + export DB_URL="postgres://stellar:${PGPASS}@127.0.0.1:5432/ticker" + echo "Setting db url: $DB_URL" + echo DB_URL="${DB_URL}" >> /etc/environment +} + +function copy_pgpass() { + cp $PGHOME/.pgpass /home/stellar + chmod 0600 /home/stellar/.pgpass + chown stellar:stellar /home/stellar/.pgpass +} + + +function init_db() { + if [ -f $PGHOME/.quickstart-initialized ]; then + echo "postgres: already initialized" + return 0 + fi + pushd $PGHOME + + echo "postgres user: $PGUSER" + + set_db_url + + run_silent "finalize-pgpass" sed -ri "s/__PGPASS__/$PGPASS/g" $CONF_HOME/.pgpass + + cp $CONF_HOME/.pgpass $PGHOME/.pgpass + + mkdir -p $PGDATA + chown postgres:postgres $PGDATA + chmod 0700 $PGDATA + + run_silent "init-postgres" sudo -u postgres $PGBIN/initdb -D $PGDATA + + start_postgres + run_silent "create-ticker-db" sudo -u postgres createdb $PGDB + run_silent "stellar-postgres-user" sudo -u postgres psql <<-SQL + CREATE USER $PGUSER WITH PASSWORD '$PGPASS'; + GRANT ALL PRIVILEGES ON DATABASE $PGDB to $PGUSER; + SQL + + touch .quickstart-initialized + popd +} + + +function start_postgres() { + if [ ! -z "$CURRENT_POSTGRES_PID" ]; then + return 0 + fi + + sudo -u postgres $PGBIN/postgres -D $PGDATA -c config_file=$CONF_HOME/postgresql.conf &> /dev/null & + CURRENT_POSTGRES_PID=$! + + while ! sudo -u postgres psql -c 'select 1' &> /dev/null ; do + echo "Waiting for postgres to be available..." + sleep 1 + done + + echo "postgres: up" +} + + +function stop_postgres() { + if [ -z "$CURRENT_POSTGRES_PID" ]; then + return 0 + fi + + killall postgres + # wait for postgres to die + while kill -0 "$CURRENT_POSTGRES_PID" &> /dev/null; do + sleep 0.5 + done + echo "postgres: down" +} + + +function exec_supervisor() { + echo "starting supervisor" + exec supervisord -n -c $CONF_HOME/supervisord.conf +} + + +pushd () { + command pushd "$@" > /dev/null +} + + +popd () { + command popd "$@" > /dev/null +} + + +main $@ diff --git a/services/ticker/docs/API.md b/services/ticker/docs/API.md new file mode 100644 index 0000000000..456017da0f --- /dev/null +++ b/services/ticker/docs/API.md @@ -0,0 +1,315 @@ +# Ticker API Documentation + +## Market (Ticker) Data +Provides trade data about each trade pair within the last 7-day period. Asset pairs that did not have any activity in the last 7 days are omitted from the response. + +Assets from different issuers but with the same code are aggregated, so trades between, for instance: +- `native` and `BTC:GDT3ZKQZXXHDPJUKNHUMANMNIT4JWSUYXUGN7EQZDVXBO7NPNFVFPBAK` +- `native` and `BTC:GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH` + +are aggregated in the `XLM_BTC` pair. + +### Trade Pairs + +Trade pairs are ordered `_`. + +Example: + +The pair `XLM_ZZZ` has the `XLM` as the counter currency and `ZZZ` as the base. For that pair if the API returns a `close` value of `2`, then the last trade for the pair was `2 XLM` exchanged for `1 ZZZ`. + +### Response Fields + +* `generated_at`: UNIX timestamp of when data was generated +* `generated_at_rfc3339 `: RFC 3339 formatted string of when data was generated +* `name`: name of the trade pair +* `base_volume`: accumulated amount of base traded in the last 24h +* `counter_volume`: accumulated amount of counter traded in the last 24h +* `trade_count`: number of trades in the last 24h +* `open`: open price in the last 24h period +* `low`: lowest price in the last 24h +* `high`: highest price in the last 24h +* `change`: price difference between open and low in the last 24h +* `base_volume_7d`: accumulated amount of base traded in the last 7 days +* `counter_volume_7d`: accumulated amount of counter traded in the last 7 days +* `trade_count_7d`: number of trades in the last 7 days +* `open_7d`: open price in the last 7-day period +* `low_7d`: lowest price in the last 7 days +* `high_7d`: highest price in the last 7 days +* `change_7d`: price difference between open and low in the last 7 days +* `price`: (DEPRECATED) price of the most recent trade in this market +* `close`: price of the most recent trade in this market +* `close_time`: ledger close time of the most recent trade in this market +* `bid_count`: number of open bids on order book +* `bid_volume`: volume of open bids on order book +* `bid_max`: maximum open bid price on order book +* `ask_count`: number of open asks on order book +* `ask_volume`: volume of open asks on order book +* `ask_min`: minimum asked price on order book +* `spread`: spread between bid_max an ask_min +* `spread_mid_point`: spread mid point + +### Example +#### Endpoint +GET `https://ticker.stellar.org/markets.json` +#### Response (application/json) + +```json +{ + "generated_at": 1556828634778, + "generated_at_rfc3339": "2019-05-02T17:23:54-03:00", + "pairs": [ + { + "name": "ABDT_DOP", + "base_volume": 27933.1306978, + "counter_volume": 703779.0492835, + "trade_count": 73, + "open": 0.03987601218950153, + "low": 0.038593480963638155, + "high": 0.03989875591737053, + "change": -0.0011995715564988227, + "base_volume_7d": 199598.77306629982, + "counter_volume_7d": 5004887.537185903, + "trade_count_7d": 488, + "open_7d": 0.03988668687332845, + "low_7d": 0.038593480963638155, + "high_7d": 0.04145936964569084, + "change_7d": -0.0012102462403257436, + "price": 0.038676440633002705, + "close": 0.038676440633002705, + "close_time": "2019-05-02T12:23:57-03:00", + "bid_count": 200, + "bid_volume": 229694.35403809912, + "bid_max": 25.8555333333, + "ask_count": 36, + "ask_volume": 149041.62309569685, + "ask_min": 25.902828723, + "spread": 0.0018258774053509135, + "spread_mid_point": 25.856446272002675 + }, + { + "name": "BTC_CNY", + "base_volume": 0.0737282, + "counter_volume": 2686.9835871000005, + "trade_count": 49, + "open": 0.0000276, + "low": 0.0000269, + "high": 0.0000278, + "change": -3.9999999999999956e-7, + "base_volume_7d": 0.37105660000000024, + "counter_volume_7d": 13616.162691900003, + "trade_count_7d": 285, + "open_7d": 0.0000264, + "low_7d": 0.0000263, + "high_7d": 0.000028, + "change_7d": 7.999999999999991e-7, + "price": 0.0000272, + "close": 0.0000272, + "close_time": "2019-05-02T12:48:41-03:00", + "bid_count": 27, + "bid_volume": 22126.4118872, + "bid_max": 36630.03663003663, + "ask_count": 21, + "ask_volume": 4438.404611090742, + "ask_min": 36900.36900369004, + "spread": 0.007326007326007345, + "spread_mid_point": 36630.04029304029 + } + ] +} +``` +## Asset (Currency) Data +Lists all the valid assets within the Stellar network. The provided fields are based on the [Currency Documentation of SEP-0001](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md#currency-documentation) and the [Asset fields from Horizon](https://developers.stellar.org/api/resources/assets/). +### Response Fields + +* `generated_at`: UNIX timestamp of when data was generated +* `generated_at_rfc3339 `: RFC 3339 formatted string of when data was generated +* `code`: code of the asset +* `issuer`: token issuer Stellar public key +* `type`: type of the asset (e.g. `native` or `credit_alphanum4`) +* `num_accounts`: the number of accounts that: 1) trust this asset and 2) where if the asset has the auth_required flag then the account is authorized to hold the asset. +* `auth_required`: an anchor must approve anyone who wants to hold its asset +* `auth_revocable`: an anchor can set the authorize flag of an existing trustline to freeze the assets held by an asset holder +* `amount`: number of units of credit issued +* `asset_controlled_by_domain`: whether the reported issuer domain controls the asset +* `is_asset_anchored`: whether the asset is anchored to another one +* `anchor_asset`: anchor asset associated (e.g. USDT is anchored to USD) +* `anchor_asset_type`: type of anchor asset +* `display_decimals`: preference for number of decimals to show when a client displays currency balance +* `name`: name of the token +* `desc`: description of the token +* `conditions`: conditions on token +* `fixed_number`: if the number of tokens issued will never change +* `max_number`: max number of tokens, if there will never be more than max_number tokens +* `is_unlimited`: if the number of tokens is dilutable at the issuer's discretion +* `redemption_instructions`: if anchored token, these are instructions to redeem the underlying asset from tokens. +* `collateral_addresses`: if this is an anchored crypto token, list of one or more public addresses that hold the assets for which you are issuing tokens. +* `collateral_address_signatures`: occasional collateral address signatures +* `countries`: countries in which the asset is available +* `status`: status of token +* `last_valid`: last the time the asset info was validated + +### Example +#### Endpoint +GET `https://ticker.stellar.org/assets.json` + +#### Response (application/json) + +```json +{ + "generated_at": 1556828621410, + "generated_at_rfc3339": "2019-05-02T17:23:41-03:00", + "assets": [ + { + "code": "AngelXYZ", + "issuer": "GANZBUS4726LBT2CBJ3BGF3TP4NJT5MHZMI423NHMXFRWGO2DCBQEXYZ", + "type": "credit_alphanum12", + "num_accounts": 282, + "auth_required": false, + "auth_revocable": false, + "amount": 4999999999.999953, + "asset_controlled_by_domain": true, + "anchor_asset": "", + "anchor_asset_type": "", + "display_decimals": 0, + "name": "", + "desc": "", + "conditions": "", + "is_asset_anchored": false, + "fixed_number": 0, + "max_number": 0, + "is_unlimited": false, + "redemption_instructions": "", + "collateral_addresses": [], + "collateral_address_signatures": [], + "countries": "", + "status": "", + "last_valid": 1555509989002 + }, + { + "code": "PUSH", + "issuer": "GBB5TTFQE5KT3TEBCR7Z3FZR3R3WTVD654XL2KHKVONRIOBEI5UGOFQQ", + "type": "credit_alphanum4", + "num_accounts": 15, + "auth_required": false, + "auth_revocable": false, + "amount": 1000000000, + "asset_controlled_by_domain": true, + "anchor_asset": "", + "anchor_asset_type": "", + "display_decimals": 2, + "name": "Push", + "desc": "1 PUSH token entitles you to access the push API.", + "conditions": "Token used to access the PUSH api to send a push request to the stellar network.", + "is_asset_anchored": false, + "fixed_number": 0, + "max_number": 0, + "is_unlimited": false, + "redemption_instructions": "", + "collateral_addresses": [], + "collateral_address_signatures": [], + "countries": "", + "status": "", + "last_valid": 1555509990457 + } + ] +} + +``` + +## GraphQL interface +Asset, issuer, markets and ticker data can be queried through a GraphQL interface, which is also provided by the Ticker. + +To explore the GraphQL queries, you can access the GraphiQL URL: https://ticker.stellar.org/graphiql + +## Orderbook +Apart from the orderbook data provided by `markets.json`, orderbook data can be retrieved directly from Horizon. In order to retrieve `ask` and `bid` data, you have to provide the following parameters from the asset pairs: + +- `selling_asset_type`: type of selling asset (e.g. `native`, `credit_alphanum4`) +- `selling_asset_code`: code of the selling asset. Omit if `selling_asset_type` = `native` +- `selling_asset_issuer`: selling asset's issuer ID. Omit if `selling_asset_type` = `native` +- `buying_asset_type`: type of buying asset (e.g. `native` or `credit_alphanum4`) +- `buying_asset_code`: code of the buying asset. Omit if `buying_asset_type` = `native` +- `buying_asset_issuer`: buying asset's issuer ID. Omit if `buying_asset_type` = `native` + +The `type`, `code` and `issuer` parameters for any given asset can be found in the Ticker's `assets.json` endpoint described in the previous section. + + +Full documentation on Horizon's Orderbook endpoint can be found [here](https://developers.stellar.org/api/aggregations/order-books/). + +### Example +#### Endpoint +GET `https://horizon.stellar.org/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=BTC&buying_asset_issuer=GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH` + +#### Response (application/json) +```json +{ + "bids": [ + { + "price_r": { + "n": 223, + "d": 10000000 + }, + "price": "0.0000223", + "amount": "0.0006261" + }, + { + "price_r": { + "n": 16469, + "d": 739692077 + }, + "price": "0.0000223", + "amount": "0.0037850" + }, + { + "price_r": { + "n": 16469, + "d": 741750702 + }, + "price": "0.0000222", + "amount": "0.0037745" + }, + { + "price_r": { + "n": 111, + "d": 5000000 + }, + "price": "0.0000222", + "amount": "0.0040000" + } + ], + "asks": [ + { + "price_r": { + "n": 7, + "d": 312500 + }, + "price": "0.0000224", + "amount": "150.8482143" + }, + { + "price_r": { + "n": 9, + "d": 400000 + }, + "price": "0.0000225", + "amount": "348.4311112" + }, + { + "price_r": { + "n": 113, + "d": 5000000 + }, + "price": "0.0000226", + "amount": "335.6238939" + } + ], + "base": { + "asset_type": "native" + }, + "counter": { + "asset_type": "credit_alphanum4", + "asset_code": "BTC", + "asset_issuer": "GATEMHCCKCY67ZUCKTROYN24ZYT5GK4EQZ65JJLDHKHRUZI3EUEKMTCH" + } +} +``` diff --git a/services/ticker/docs/Architecture.md b/services/ticker/docs/Architecture.md new file mode 100644 index 0000000000..1383a08444 --- /dev/null +++ b/services/ticker/docs/Architecture.md @@ -0,0 +1,26 @@ +## Architecture + +The proposed solution consists of a set of tasks (which run according to a time schedule) and some services (which run continuously, as daemons) that are put together in order to provide a ticker (via https://ticker.stellar.org) that is highly available and provide data as fresh as possible. + +The setup and tools used are focused on creating an environment that is easy to replicate and doesn't depend on AWS specific services, so that Stellar users / developers can easily deploy a ticker of their own if they want. All code created should be open-sourced. + +The following diagram illustrates the proposed services, tasks, external sources, containers and some of the data flow for the New Stellar ticker: + +![Stellar Ticker Architecture Overview](images/StellarTicker.png) + +Here is a quick overview of each of the proposed services, tasks and other components: +- **Trade ingester (service):** connects to the Horizon Trade Stream API in order to stream new trades performed on the Stellar Network and ingest them into the PostgreSQL Database. +- **Market & Assets Data Ingester:** connects to other Horizon APIs to retrieve other important data, such as assets. +- **Trade Aggregator:** provides the logic for querying / aggregating trade and market data from the database and outputting it to either the JSON Generator or the GraphQL server. +JSON Generator: gets the data provided by the trade Aggregator, formats it into the desired JSON format (similar to what we have in http://ticker.stellar.org) and output it to a file. +- **GraphQL Endpoint:** provides a GraphQL interface for users to retrieve aggregated trade data from the Postgres DB. +- **Web Server (nginx):** routes the client requests to either a) serve the JSON file ("/") or forward the request to the GraphQL server ("/graphql"). +- **Psql DB:** a PostgreSQL database to store the relational trade / market / asset data. +Database Cleaner: since the Ticker has a limited time range of data, this service can clear old entries so the database doesn't considerably grow its storage usage throughout time. + +### Considerations +1. All tasks (Market & Assets Data Ingester, JSON Generator, Database Cleaner) and services (Trade Ingester, GraphQL Endpoint, Web Server) would run within a single container being supervised by supervisord, similarly to what is done in Horizon – enabling a very simple and fast deployment. +1. We could also split each of the tasks / services into separate containers and orchestrate them, but this might defeat the purpose of making it easy to deploy. +1. The Postgres database could run inside the container, or alternatively we could point to an external database and use docker-compose for local development. +1. In this architecture, the output of the JSON Generator is saved in the filesystem, but we could also think about uploading the output to S3 and figure out some smart routing / reverse proxy. +1. CoinMarketCap uses (at least to some degree) ticker.stellar.org to power its Stellar DEX markets page. Ensure that this keeps working. Hopefully that page will even get better by including more markets. diff --git a/services/ticker/docs/images/StellarTicker.png b/services/ticker/docs/images/StellarTicker.png new file mode 100644 index 0000000000..3f56ba4eae Binary files /dev/null and b/services/ticker/docs/images/StellarTicker.png differ diff --git a/services/ticker/internal/actions_asset.go b/services/ticker/internal/actions_asset.go new file mode 100644 index 0000000000..d333c87087 --- /dev/null +++ b/services/ticker/internal/actions_asset.go @@ -0,0 +1,176 @@ +package ticker + +import ( + "context" + "encoding/json" + "strings" + "time" + + horizonclient "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/services/ticker/internal/scraper" + "github.com/stellar/go/services/ticker/internal/tickerdb" + "github.com/stellar/go/services/ticker/internal/utils" + hlog "github.com/stellar/go/support/log" +) + +// RefreshAssets scrapes the most recent asset list and ingests then into the db. +func RefreshAssets(ctx context.Context, s *tickerdb.TickerSession, c *horizonclient.Client, l *hlog.Entry) (err error) { + sc := scraper.ScraperConfig{ + Client: c, + Logger: l, + } + finalAssetList, err := sc.FetchAllAssets(0, 10) + if err != nil { + return + } + + for _, finalAsset := range finalAssetList { + dbIssuer := tomlIssuerToDBIssuer(finalAsset.IssuerDetails) + if dbIssuer.PublicKey == "" { + dbIssuer.PublicKey = finalAsset.Issuer + } + issuerID, err := s.InsertOrUpdateIssuer(ctx, &dbIssuer, []string{"public_key"}) + if err != nil { + l.Error("Error inserting issuer:", dbIssuer, err) + continue + } + + dbAsset := finalAssetToDBAsset(finalAsset, issuerID) + err = s.InsertOrUpdateAsset(ctx, &dbAsset, []string{"code", "issuer_account", "issuer_id"}) + if err != nil { + l.Error("Error inserting asset:", dbAsset, err) + } + } + + return +} + +// GenerateAssetsFile generates a file with the info about all valid scraped Assets +func GenerateAssetsFile(ctx context.Context, s *tickerdb.TickerSession, l *hlog.Entry, filename string) error { + l.Info("Retrieving asset data from db...") + var assets []Asset + validAssets, err := s.GetAssetsWithNestedIssuer(ctx) + if err != nil { + return err + } + + for _, dbAsset := range validAssets { + asset := dbAssetToAsset(dbAsset) + assets = append(assets, asset) + } + l.Info("Asset data successfully retrieved! Writing to: ", filename) + now := time.Now() + assetSummary := AssetSummary{ + GeneratedAt: utils.TimeToUnixEpoch(now), + GeneratedAtRFC3339: utils.TimeToRFC3339(now), + Assets: assets, + } + numBytes, err := writeAssetSummaryToFile(assetSummary, filename) + if err != nil { + return err + } + l.Infof("Wrote %d bytes to %s\n", numBytes, filename) + return nil +} + +// writeAssetSummaryToFile creates a list of assets exported in a JSON file. +func writeAssetSummaryToFile(assetSummary AssetSummary, filename string) (numBytes int, err error) { + jsonAssets, err := json.MarshalIndent(assetSummary, "", "\t") + if err != nil { + return + } + + numBytes, err = utils.WriteJSONToFile(jsonAssets, filename) + if err != nil { + return + } + return +} + +// finalAssetToDBAsset converts a scraper.TOMLAsset to a tickerdb.Asset. +func finalAssetToDBAsset(asset scraper.FinalAsset, issuerID int32) tickerdb.Asset { + return tickerdb.Asset{ + Code: asset.Code, + IssuerID: issuerID, + IssuerAccount: asset.Issuer, + Type: asset.Type, + NumAccounts: asset.NumAccounts, + AuthRequired: asset.AuthRequired, + AuthRevocable: asset.AuthRevocable, + Amount: asset.Amount, + AssetControlledByDomain: asset.AssetControlledByDomain, + AnchorAssetCode: asset.AnchorAsset, + AnchorAssetType: asset.AnchorAssetType, + IsValid: asset.IsValid, + ValidationError: asset.Error, + LastValid: asset.LastValid, + LastChecked: asset.LastChecked, + DisplayDecimals: asset.DisplayDecimals, + Name: asset.Name, + Desc: asset.Desc, + Conditions: asset.Conditions, + IsAssetAnchored: asset.IsAssetAnchored, + FixedNumber: asset.FixedNumber, + MaxNumber: asset.MaxNumber, + IsUnlimited: asset.IsUnlimited, + RedemptionInstructions: asset.RedemptionInstructions, + CollateralAddresses: strings.Join(asset.CollateralAddresses, ","), + CollateralAddressSignatures: strings.Join(asset.CollateralAddressSignatures, ","), + Countries: asset.Countries, + Status: asset.Status, + } +} + +// dbAssetToAsset converts a tickerdb.Asset to an Asset. +func dbAssetToAsset(dbAsset tickerdb.Asset) (a Asset) { + collAddrs := strings.Split(dbAsset.CollateralAddresses, ",") + if len(collAddrs) == 1 && collAddrs[0] == "" { + collAddrs = []string{} + } + + collAddrSigns := strings.Split(dbAsset.CollateralAddressSignatures, ",") + if len(collAddrSigns) == 1 && collAddrSigns[0] == "" { + collAddrSigns = []string{} + } + + a.Code = dbAsset.Code + a.Issuer = dbAsset.IssuerAccount + a.Type = dbAsset.Type + a.NumAccounts = dbAsset.NumAccounts + a.AuthRequired = dbAsset.AuthRequired + a.AuthRevocable = dbAsset.AuthRevocable + a.Amount = dbAsset.Amount + a.AssetControlledByDomain = dbAsset.AssetControlledByDomain + a.AnchorAsset = dbAsset.AnchorAssetCode + a.AnchorAssetType = dbAsset.AnchorAssetType + a.LastValidTimestamp = utils.TimeToRFC3339(dbAsset.LastValid) + a.DisplayDecimals = dbAsset.DisplayDecimals + a.Name = dbAsset.Name + a.Desc = dbAsset.Desc + a.Conditions = dbAsset.Conditions + a.IsAssetAnchored = dbAsset.IsAssetAnchored + a.FixedNumber = dbAsset.FixedNumber + a.MaxNumber = dbAsset.MaxNumber + a.IsUnlimited = dbAsset.IsUnlimited + a.RedemptionInstructions = dbAsset.RedemptionInstructions + a.CollateralAddresses = collAddrs + a.CollateralAddressSignatures = collAddrSigns + a.Countries = dbAsset.Countries + a.Status = dbAsset.Status + + i := Issuer{ + PublicKey: dbAsset.Issuer.PublicKey, + Name: dbAsset.Issuer.Name, + URL: dbAsset.Issuer.URL, + TOMLURL: dbAsset.Issuer.TOMLURL, + FederationServer: dbAsset.Issuer.FederationServer, + AuthServer: dbAsset.Issuer.AuthServer, + TransferServer: dbAsset.Issuer.TransferServer, + WebAuthEndpoint: dbAsset.Issuer.WebAuthEndpoint, + DepositServer: dbAsset.Issuer.DepositServer, + OrgTwitter: dbAsset.Issuer.OrgTwitter, + } + a.IssuerDetail = i + + return +} diff --git a/services/ticker/internal/actions_graphql.go b/services/ticker/internal/actions_graphql.go new file mode 100644 index 0000000000..c812bf0125 --- /dev/null +++ b/services/ticker/internal/actions_graphql.go @@ -0,0 +1,13 @@ +package ticker + +import ( + "github.com/stellar/go/services/ticker/internal/gql" + "github.com/stellar/go/services/ticker/internal/tickerdb" + hlog "github.com/stellar/go/support/log" +) + +func StartGraphQLServer(s *tickerdb.TickerSession, l *hlog.Entry, port string) { + graphql := gql.New(s, l) + + graphql.Serve(port) +} diff --git a/services/ticker/internal/actions_issuer.go b/services/ticker/internal/actions_issuer.go new file mode 100644 index 0000000000..16be516f36 --- /dev/null +++ b/services/ticker/internal/actions_issuer.go @@ -0,0 +1,21 @@ +package ticker + +import ( + "github.com/stellar/go/services/ticker/internal/scraper" + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +func tomlIssuerToDBIssuer(issuer scraper.TOMLIssuer) tickerdb.Issuer { + return tickerdb.Issuer{ + PublicKey: issuer.SigningKey, + Name: issuer.Documentation.OrgName, + URL: issuer.Documentation.OrgURL, + TOMLURL: issuer.TOMLURL, + FederationServer: issuer.FederationServer, + AuthServer: issuer.AuthServer, + TransferServer: issuer.TransferServer, + WebAuthEndpoint: issuer.WebAuthEndpoint, + DepositServer: issuer.DepositServer, + OrgTwitter: issuer.Documentation.OrgTwitter, + } +} diff --git a/services/ticker/internal/actions_market.go b/services/ticker/internal/actions_market.go new file mode 100644 index 0000000000..38737e7b58 --- /dev/null +++ b/services/ticker/internal/actions_market.go @@ -0,0 +1,96 @@ +package ticker + +import ( + "context" + "encoding/json" + "time" + + "github.com/stellar/go/services/ticker/internal/tickerdb" + "github.com/stellar/go/services/ticker/internal/utils" + hlog "github.com/stellar/go/support/log" +) + +// GenerateMarketSummaryFile generates a MarketSummary with the statistics for all +// valid markets within the database and outputs it to . +func GenerateMarketSummaryFile(s *tickerdb.TickerSession, l *hlog.Entry, filename string) error { + l.Info("Generating market data...") + marketSummary, err := GenerateMarketSummary(s) + if err != nil { + return err + } + l.Info("Market data successfully generated!") + + jsonMkt, err := json.MarshalIndent(marketSummary, "", " ") + if err != nil { + return err + } + + l.Info("Writing market data to: ", filename) + numBytes, err := utils.WriteJSONToFile(jsonMkt, filename) + if err != nil { + return err + } + l.Infof("Wrote %d bytes to %s\n", numBytes, filename) + return nil +} + +// GenerateMarketSummary outputs a MarketSummary with the statistics for all +// valid markets within the database. +func GenerateMarketSummary(s *tickerdb.TickerSession) (ms MarketSummary, err error) { + var marketStatsSlice []MarketStats + now := time.Now() + nowMillis := utils.TimeToUnixEpoch(now) + nowRFC339 := utils.TimeToRFC3339(now) + ctx := context.Background() + + dbMarkets, err := s.RetrieveMarketData(ctx) + if err != nil { + return + } + + for _, dbMarket := range dbMarkets { + marketStats := dbMarketToMarketStats(dbMarket) + marketStatsSlice = append(marketStatsSlice, marketStats) + } + + ms = MarketSummary{ + GeneratedAt: nowMillis, + GeneratedAtRFC3339: nowRFC339, + Pairs: marketStatsSlice, + } + return +} + +func dbMarketToMarketStats(m tickerdb.Market) MarketStats { + closeTime := utils.TimeToRFC3339(m.LastPriceCloseTime) + + spread, spreadMidPoint := utils.CalcSpread(m.HighestBid, m.LowestAsk) + return MarketStats{ + TradePairName: m.TradePair, + BaseVolume24h: m.BaseVolume24h, + CounterVolume24h: m.CounterVolume24h, + TradeCount24h: m.TradeCount24h, + Open24h: m.OpenPrice24h, + Low24h: m.LowestPrice24h, + High24h: m.HighestPrice24h, + Change24h: m.PriceChange24h, + BaseVolume7d: m.BaseVolume7d, + CounterVolume7d: m.CounterVolume7d, + TradeCount7d: m.TradeCount7d, + Open7d: m.OpenPrice7d, + Low7d: m.LowestPrice7d, + High7d: m.HighestPrice7d, + Change7d: m.PriceChange7d, + Price: m.LastPrice, + Close: m.LastPrice, + BidCount: m.NumBids, + BidVolume: m.BidVolume, + BidMax: m.HighestBid, + AskCount: m.NumAsks, + AskVolume: m.AskVolume, + AskMin: m.LowestAsk, + Spread: spread, + SpreadMidPoint: spreadMidPoint, + CloseTime: closeTime, + } +} diff --git a/services/ticker/internal/actions_orderbook.go b/services/ticker/internal/actions_orderbook.go new file mode 100644 index 0000000000..51f40ba9d8 --- /dev/null +++ b/services/ticker/internal/actions_orderbook.go @@ -0,0 +1,67 @@ +package ticker + +import ( + "context" + "time" + + horizonclient "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/services/ticker/internal/scraper" + "github.com/stellar/go/services/ticker/internal/tickerdb" + "github.com/stellar/go/support/errors" + hlog "github.com/stellar/go/support/log" +) + +// RefreshOrderbookEntries updates the orderbook entries for the relevant markets that were active +// in the past 7-day interval +func RefreshOrderbookEntries(s *tickerdb.TickerSession, c *horizonclient.Client, l *hlog.Entry) error { + sc := scraper.ScraperConfig{ + Client: c, + Logger: l, + } + ctx := context.Background() + + // Retrieve relevant markets for the past 7 days (168 hours): + mkts, err := s.Retrieve7DRelevantMarkets(ctx) + if err != nil { + return errors.Wrap(err, "could not retrieve partial markets") + } + + for _, mkt := range mkts { + ob, err := sc.FetchOrderbookForAssets( + mkt.BaseAssetType, + mkt.BaseAssetCode, + mkt.BaseAssetIssuer, + mkt.CounterAssetType, + mkt.CounterAssetCode, + mkt.CounterAssetIssuer, + ) + if err != nil { + l.Error(errors.Wrap(err, "could not fetch orderbook for assets")) + continue + } + + dbOS := orderbookStatsToDBOrderbookStats(ob, mkt.BaseAssetID, mkt.CounterAssetID) + err = s.InsertOrUpdateOrderbookStats(ctx, &dbOS, []string{"base_asset_id", "counter_asset_id"}) + if err != nil { + l.Error(errors.Wrap(err, "could not insert orderbook stats into db")) + } + } + + return nil +} + +func orderbookStatsToDBOrderbookStats(os scraper.OrderbookStats, bID, cID int32) tickerdb.OrderbookStats { + return tickerdb.OrderbookStats{ + BaseAssetID: bID, + CounterAssetID: cID, + NumBids: os.NumBids, + BidVolume: os.BidVolume, + HighestBid: os.HighestBid, + NumAsks: os.NumAsks, + AskVolume: os.AskVolume, + LowestAsk: os.LowestAsk, + Spread: os.Spread, + SpreadMidPoint: os.SpreadMidPoint, + UpdatedAt: time.Now(), + } +} diff --git a/services/ticker/internal/actions_trade.go b/services/ticker/internal/actions_trade.go new file mode 100644 index 0000000000..5397b35dce --- /dev/null +++ b/services/ticker/internal/actions_trade.go @@ -0,0 +1,172 @@ +package ticker + +import ( + "context" + "errors" + "fmt" + "math/big" + "strconv" + "time" + + horizonclient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/ticker/internal/scraper" + "github.com/stellar/go/services/ticker/internal/tickerdb" + hlog "github.com/stellar/go/support/log" +) + +// StreamTrades constantly streams and ingests new trades directly from horizon. +func StreamTrades( + ctx context.Context, + s *tickerdb.TickerSession, + c *horizonclient.Client, + l *hlog.Entry, +) error { + sc := scraper.ScraperConfig{ + Client: c, + Logger: l, + Ctx: &ctx, + } + handler := func(trade hProtocol.Trade) { + l.Infof("New trade arrived. ID: %v; Close Time: %v\n", trade.ID, trade.LedgerCloseTime) + scraper.NormalizeTradeAssets(&trade) + bID, cID, err := findBaseAndCounter(ctx, s, trade) + if err != nil { + l.Error(err) + return + } + dbTrade, err := hProtocolTradeToDBTrade(trade, bID, cID) + if err != nil { + l.Error(err) + return + } + + err = s.BulkInsertTrades(ctx, []tickerdb.Trade{dbTrade}) + if err != nil { + l.Error("Could not insert trade in database: ", trade.ID) + } + } + + // Ensure we start streaming from the last stored trade + lastTrade, err := s.GetLastTrade(ctx) + if err != nil { + return err + } + + cursor := lastTrade.HorizonID + return sc.StreamNewTrades(cursor, handler) +} + +// BackfillTrades ingest the most recent trades (limited to numDays) directly from Horizon +// into the database. +func BackfillTrades( + ctx context.Context, + s *tickerdb.TickerSession, + c *horizonclient.Client, + l *hlog.Entry, + numHours int, + limit int, +) error { + sc := scraper.ScraperConfig{ + Client: c, + Logger: l, + } + now := time.Now() + since := now.Add(time.Hour * -time.Duration(numHours)) + trades, err := sc.FetchAllTrades(since, limit) + if err != nil { + return err + } + + var dbTrades []tickerdb.Trade + + for _, trade := range trades { + var bID, cID int32 + bID, cID, err = findBaseAndCounter(ctx, s, trade) + if err != nil { + continue + } + + var dbTrade tickerdb.Trade + dbTrade, err = hProtocolTradeToDBTrade(trade, bID, cID) + if err != nil { + l.Error("Could not convert entry to DB Trade: ", err) + continue + } + dbTrades = append(dbTrades, dbTrade) + } + + l.Infof("Inserting %d entries in the database.\n", len(dbTrades)) + err = s.BulkInsertTrades(ctx, dbTrades) + if err != nil { + fmt.Println(err) + } + + return nil +} + +// findBaseAndCounter tries to find the Base and Counter assets IDs in the database, +// and returns an error if it doesn't find any. +func findBaseAndCounter(ctx context.Context, s *tickerdb.TickerSession, trade hProtocol.Trade) (bID int32, cID int32, err error) { + bFound, bID, err := s.GetAssetByCodeAndIssuerAccount( + ctx, + trade.BaseAssetCode, + trade.BaseAssetIssuer, + ) + if err != nil { + return + } + + cFound, cID, err := s.GetAssetByCodeAndIssuerAccount( + ctx, + trade.CounterAssetCode, + trade.CounterAssetIssuer, + ) + if err != nil { + return + } + + if !bFound || !cFound { + err = errors.New("base or counter asset no found") + return + } + + return +} + +// hProtocolTradeToDBTrade converts from a hProtocol.Trade to a tickerdb.Trade +func hProtocolTradeToDBTrade( + hpt hProtocol.Trade, + baseAssetID int32, + counterAssetID int32, +) (trade tickerdb.Trade, err error) { + fBaseAmount, err := strconv.ParseFloat(hpt.BaseAmount, 64) + if err != nil { + return + } + fCounterAmount, err := strconv.ParseFloat(hpt.CounterAmount, 64) + if err != nil { + return + } + + rPrice := big.NewRat(int64(hpt.Price.D), int64(hpt.Price.N)) + fPrice, _ := rPrice.Float64() + + trade = tickerdb.Trade{ + HorizonID: hpt.ID, + LedgerCloseTime: hpt.LedgerCloseTime, + OfferID: hpt.OfferID, + BaseOfferID: hpt.BaseOfferID, + BaseAccount: hpt.BaseAccount, + BaseAmount: fBaseAmount, + BaseAssetID: baseAssetID, + CounterOfferID: hpt.CounterOfferID, + CounterAccount: hpt.CounterAccount, + CounterAmount: fCounterAmount, + CounterAssetID: counterAssetID, + BaseIsSeller: hpt.BaseIsSeller, + Price: fPrice, + } + + return +} diff --git a/services/ticker/internal/actions_trade_test.go b/services/ticker/internal/actions_trade_test.go new file mode 100644 index 0000000000..6b0147e53c --- /dev/null +++ b/services/ticker/internal/actions_trade_test.go @@ -0,0 +1,38 @@ +package ticker + +import ( + "fmt" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProtocolTradeToDBTrade_priceMapping(t *testing.T) { + testCases := []struct { + N int64 + D int64 + WantPrice float64 + }{ + {100, 200, 2}, + {1, 2, 2}, + {1187492342, 283724929, 0.23892779680763618}, + } + for _, tc := range testCases { + name := fmt.Sprintf("%d/%d=%f", tc.N, tc.D, tc.WantPrice) + t.Run(name, func(t *testing.T) { + hpt := hProtocol.Trade{ + BaseAmount: "0", + CounterAmount: "0", + Price: hProtocol.TradePrice{ + N: tc.N, + D: tc.D, + }, + } + dbTrade, err := hProtocolTradeToDBTrade(hpt, 0, 0) + require.NoError(t, err) + assert.Equal(t, tc.WantPrice, dbTrade.Price) + }) + } +} diff --git a/services/ticker/internal/gql/bigint.go b/services/ticker/internal/gql/bigint.go new file mode 100644 index 0000000000..8a37b737cd --- /dev/null +++ b/services/ticker/internal/gql/bigint.go @@ -0,0 +1,39 @@ +/* +This file implements an interface to this application's +custom BigInt GraphQL scalar type. It can receive values +of type int, int32 and int64. +*/ + +package gql + +import ( + "errors" + "strconv" +) + +type BigInt int + +func (BigInt) ImplementsGraphQLType(name string) bool { + return name == "BigInt" +} + +func (bigInt *BigInt) UnmarshalGraphQL(input interface{}) error { + var err error + + switch input := input.(type) { + case int: + *bigInt = BigInt(input) + case int32: + *bigInt = BigInt(int(input)) + case int64: + *bigInt = BigInt(int(input)) + default: + err = errors.New("wrong type") + } + + return err +} + +func (bigInt BigInt) MarshalJSON() ([]byte, error) { + return strconv.AppendInt(nil, int64(bigInt), 10), nil +} diff --git a/services/ticker/internal/gql/graphiql.go b/services/ticker/internal/gql/graphiql.go new file mode 100644 index 0000000000..6627c80b2a --- /dev/null +++ b/services/ticker/internal/gql/graphiql.go @@ -0,0 +1,28 @@ +package gql + +import ( + "bytes" + "fmt" + "net/http" + + "github.com/stellar/go/services/ticker/internal/gql/static" +) + +// GraphiQL is an in-browser IDE for exploring GraphiQL APIs. +// This handler returns GraphiQL when requested. +// +// For more information, see https://github.com/graphql/graphiql. +type GraphiQL struct{} + +func (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(http.StatusMethodNotAllowed) + buf := bytes.Buffer{} + fmt.Fprint(&buf, `{"error": "Only GET is allowed"}`) + _, _ = w.Write(buf.Bytes()) + return + } + + graphiql, _ := static.Asset("graphiql.html") + _, _ = w.Write(graphiql) +} diff --git a/services/ticker/internal/gql/main.go b/services/ticker/internal/gql/main.go new file mode 100644 index 0000000000..bb385a3c7a --- /dev/null +++ b/services/ticker/internal/gql/main.go @@ -0,0 +1,123 @@ +package gql + +import ( + "net/http" + "time" + + "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go/relay" + "github.com/stellar/go/services/ticker/internal/gql/static" + "github.com/stellar/go/services/ticker/internal/tickerdb" + hlog "github.com/stellar/go/support/log" +) + +// asset represents a Stellar asset, with some type +// adaptations to match the GraphQL type system +type asset struct { + Code string + IssuerAccount string + Type string + NumAccounts int32 + AuthRequired bool + AuthRevocable bool + Amount float64 + AssetControlledByDomain bool + AnchorAssetCode string + AnchorAssetType string + IsValid bool + DisplayDecimals BigInt + Name string + Desc string + Conditions string + IsAssetAnchored bool + FixedNumber BigInt + MaxNumber BigInt + IsUnlimited bool + RedemptionInstructions string + CollateralAddresses string + CollateralAddressSignatures string + Countries string + Status string + IssuerID int32 + OrderbookStats orderbookStats +} + +// partialMarket represents the aggregated market data for a +// specific pair of assets since +type partialMarket struct { + TradePair string + BaseAssetCode string + BaseAssetIssuer string + CounterAssetCode string + CounterAssetIssuer string + BaseVolume float64 + CounterVolume float64 + TradeCount int32 + Open float64 + Low float64 + High float64 + Change float64 + Close float64 + IntervalStart graphql.Time + FirstLedgerCloseTime graphql.Time + LastLedgerCloseTime graphql.Time + OrderbookStats orderbookStats +} + +// orderbookStats represents the orderbook stats for a +// specific pair of assets (aggregated or not) +type orderbookStats struct { + BidCount BigInt + BidVolume float64 + BidMax float64 + AskCount BigInt + AskVolume float64 + AskMin float64 + Spread float64 + SpreadMidPoint float64 +} + +type resolver struct { + db *tickerdb.TickerSession + logger *hlog.Entry +} + +// New creates a new GraphQL resolver +func New(s *tickerdb.TickerSession, l *hlog.Entry) *resolver { + if s == nil { + panic("A valid database session must be provided for the GraphQL server") + } + return &resolver{db: s, logger: l} +} + +// Serve creates a GraphQL interface on
/graphql and a GraphiQL explorer on /graphiql +func (r *resolver) Serve(address string) { + relayHandler := r.NewRelayHandler() + mux := http.NewServeMux() + mux.Handle("/graphql", http.HandlerFunc(func(wr http.ResponseWriter, re *http.Request) { + r.logger.Infof("%s %s %s\n", re.RemoteAddr, re.Method, re.URL) + relayHandler.ServeHTTP(wr, re) + })) + mux.Handle("/graphiql", GraphiQL{}) + + server := &http.Server{ + Addr: address, + Handler: mux, + ReadTimeout: 5 * time.Second, + } + r.logger.Infof("Starting to serve on address %s\n", address) + + if err := server.ListenAndServe(); err != nil { + r.logger.Error("server.ListenAndServe:", err) + } +} + +// NewRelayHandler sets up the response handler. +func (r *resolver) NewRelayHandler() relay.Handler { + opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()} + r.logger.Info("Validating GraphQL schema") + s := graphql.MustParseSchema(static.Schema(), r, opts...) + r.logger.Infof("Schema Validated!") + relayHandler := relay.Handler{Schema: s} + return relayHandler +} diff --git a/services/ticker/internal/gql/main_test.go b/services/ticker/internal/gql/main_test.go new file mode 100644 index 0000000000..3444ceee3b --- /dev/null +++ b/services/ticker/internal/gql/main_test.go @@ -0,0 +1,14 @@ +package gql + +import ( + "testing" + + "github.com/graph-gophers/graphql-go" + "github.com/stellar/go/services/ticker/internal/gql/static" +) + +func TestValidateSchema(t *testing.T) { + r := resolver{} + opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()} + graphql.MustParseSchema(static.Schema(), &r, opts...) +} diff --git a/services/ticker/internal/gql/resolvers_asset.go b/services/ticker/internal/gql/resolvers_asset.go new file mode 100644 index 0000000000..a385220105 --- /dev/null +++ b/services/ticker/internal/gql/resolvers_asset.go @@ -0,0 +1,55 @@ +package gql + +import ( + "context" + "errors" + + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +// Assets resolves the assets() GraphQL query. +func (r *resolver) Assets(ctx context.Context) (assets []*asset, err error) { + dbAssets, err := r.db.GetAllValidAssets(ctx) + if err != nil { + // obfuscating sql errors to avoid exposing underlying + // implementation + err = errors.New("could not retrieve the requested data") + return + } + + for _, dbAsset := range dbAssets { + assets = append(assets, dbAssetToAsset(dbAsset)) + } + return +} + +// dbAssetToAsset converts a tickerdb.Asset to an *asset +func dbAssetToAsset(dbAsset tickerdb.Asset) *asset { + return &asset{ + Code: dbAsset.Code, + IssuerAccount: dbAsset.IssuerAccount, + Type: dbAsset.Type, + NumAccounts: dbAsset.NumAccounts, + AuthRequired: dbAsset.AuthRequired, + AuthRevocable: dbAsset.AuthRevocable, + Amount: dbAsset.Amount, + AssetControlledByDomain: dbAsset.AssetControlledByDomain, + AnchorAssetCode: dbAsset.AnchorAssetCode, + AnchorAssetType: dbAsset.AnchorAssetType, + IsValid: dbAsset.IsValid, + DisplayDecimals: BigInt(dbAsset.DisplayDecimals), + Name: dbAsset.Name, + Desc: dbAsset.Desc, + Conditions: dbAsset.Conditions, + IsAssetAnchored: dbAsset.IsAssetAnchored, + FixedNumber: BigInt(dbAsset.FixedNumber), + MaxNumber: BigInt(dbAsset.MaxNumber), + IsUnlimited: dbAsset.IsUnlimited, + RedemptionInstructions: dbAsset.RedemptionInstructions, + CollateralAddresses: dbAsset.CollateralAddresses, + CollateralAddressSignatures: dbAsset.CollateralAddressSignatures, + Countries: dbAsset.Countries, + Status: dbAsset.Status, + IssuerID: dbAsset.IssuerID, + } +} diff --git a/services/ticker/internal/gql/resolvers_issuer.go b/services/ticker/internal/gql/resolvers_issuer.go new file mode 100644 index 0000000000..15bca92e5d --- /dev/null +++ b/services/ticker/internal/gql/resolvers_issuer.go @@ -0,0 +1,24 @@ +package gql + +import ( + "context" + "errors" + + "github.com/stellar/go/services/ticker/internal/tickerdb" +) + +// Issuers resolves the issuers() GraphQL query. +func (r *resolver) Issuers(ctx context.Context) (issuers []*tickerdb.Issuer, err error) { + dbIssuers, err := r.db.GetAllIssuers(ctx) + if err != nil { + // obfuscating sql errors to avoid exposing underlying + // implementation + err = errors.New("could not retrieve the requested data") + } + + for i := range dbIssuers { + issuers = append(issuers, &dbIssuers[i]) + } + + return issuers, err +} diff --git a/services/ticker/internal/gql/resolvers_market.go b/services/ticker/internal/gql/resolvers_market.go new file mode 100644 index 0000000000..96b4d5cfdc --- /dev/null +++ b/services/ticker/internal/gql/resolvers_market.go @@ -0,0 +1,120 @@ +package gql + +import ( + "context" + "errors" + + "github.com/graph-gophers/graphql-go" + "github.com/stellar/go/services/ticker/internal/tickerdb" + "github.com/stellar/go/services/ticker/internal/utils" +) + +// Markets resolves the markets() GraphQL query. +func (r *resolver) Markets(ctx context.Context, args struct { + BaseAssetCode *string + BaseAssetIssuer *string + CounterAssetCode *string + CounterAssetIssuer *string + NumHoursAgo *int32 +}) (partialMarkets []*partialMarket, err error) { + numHours, err := validateNumHoursAgo(args.NumHoursAgo) + if err != nil { + return + } + + dbMarkets, err := r.db.RetrievePartialMarkets(ctx, + args.BaseAssetCode, + args.BaseAssetIssuer, + args.CounterAssetCode, + args.CounterAssetIssuer, + numHours, + ) + if err != nil { + // obfuscating sql errors to avoid exposing underlying + // implementation + err = errors.New("could not retrieve the requested data") + return + } + + for _, dbMkt := range dbMarkets { + partialMarkets = append(partialMarkets, dbMarketToPartialMarket(dbMkt)) + } + return +} + +// Ticker resolves the ticker() GraphQL query (TODO) +func (r *resolver) Ticker(ctx context.Context, + args struct { + Code *string + PairName *string + NumHoursAgo *int32 + }, +) (partialMarkets []*partialMarket, err error) { + numHours, err := validateNumHoursAgo(args.NumHoursAgo) + if err != nil { + return + } + + dbMarkets, err := r.db.RetrievePartialAggMarkets(ctx, args.PairName, numHours) + if err != nil { + // obfuscating sql errors to avoid exposing underlying + // implementation + err = errors.New("could not retrieve the requested data") + return + } + + for _, dbMkt := range dbMarkets { + partialMarkets = append(partialMarkets, dbMarketToPartialMarket(dbMkt)) + } + return + +} + +// validateNumHoursAgo validates if the numHoursAgo parameter is within an acceptable +// time range (at most 168 hours ago = 7 days) +func validateNumHoursAgo(n *int32) (int, error) { + if n == nil { + return 24, nil // default numHours = 24 + } + + if *n <= 168 { + return int(*n), nil + } + + return 0, errors.New("numHoursAgo cannot be greater than 168 (7 days)") +} + +// dbMarketToPartialMarket converts a tickerdb.PartialMarket to a *partialMarket +func dbMarketToPartialMarket(dbMarket tickerdb.PartialMarket) *partialMarket { + spread, spreadMidPoint := utils.CalcSpread(dbMarket.HighestBid, dbMarket.LowestAsk) + os := orderbookStats{ + BidCount: BigInt(dbMarket.NumBids), + BidVolume: dbMarket.BidVolume, + BidMax: dbMarket.HighestBid, + AskCount: BigInt(dbMarket.NumAsks), + AskVolume: dbMarket.AskVolume, + AskMin: dbMarket.LowestAsk, + Spread: spread, + SpreadMidPoint: spreadMidPoint, + } + + return &partialMarket{ + TradePair: dbMarket.TradePairName, + BaseAssetCode: dbMarket.BaseAssetCode, + BaseAssetIssuer: dbMarket.BaseAssetIssuer, + CounterAssetCode: dbMarket.CounterAssetCode, + CounterAssetIssuer: dbMarket.CounterAssetIssuer, + BaseVolume: dbMarket.BaseVolume, + CounterVolume: dbMarket.CounterVolume, + TradeCount: dbMarket.TradeCount, + Open: dbMarket.Open, + Low: dbMarket.Low, + High: dbMarket.High, + Change: dbMarket.Change, + Close: dbMarket.Close, + IntervalStart: graphql.Time{Time: dbMarket.IntervalStart}, + FirstLedgerCloseTime: graphql.Time{Time: dbMarket.FirstLedgerCloseTime}, + LastLedgerCloseTime: graphql.Time{Time: dbMarket.LastLedgerCloseTime}, + OrderbookStats: os, + } +} diff --git a/services/ticker/internal/gql/static/bindata.go b/services/ticker/internal/gql/static/bindata.go new file mode 100644 index 0000000000..3295ebab36 --- /dev/null +++ b/services/ticker/internal/gql/static/bindata.go @@ -0,0 +1,294 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// graphiql.html (1.182kB) +// schema.gql (2.42kB) + +package static + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _graphiqlHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x54\x4f\x6f\x13\x3f\x10\x3d\x6f\x3e\x85\x7f\x96\x7e\xd2\x46\x2a\x76\x52\x24\x0e\x9b\x4d\x0e\xd0\xaa\x02\x15\x4a\x81\x0b\x47\xd7\x9e\x5d\x3b\x78\xed\xed\xd8\x9b\x36\xaa\xf2\xdd\x91\xf7\x4f\x28\x7f\x2a\x21\x04\x17\xaf\x3d\x7e\xf3\xde\xf3\xcc\x68\xcb\xff\xce\xae\x5e\x7d\xfa\xfc\xfe\x9c\xe8\xd8\xd8\xcd\xac\x1c\x3e\x59\xa9\x41\xa8\xcd\x2c\xcb\x4a\x6b\xdc\x17\x82\x60\xd7\x34\xc4\xbd\x85\xa0\x01\x22\x25\x1a\xa1\x5a\x53\x1d\x63\x1b\x0a\xce\xa5\x72\xdb\xc0\xa4\xf5\x9d\xaa\xac\x40\x60\xd2\x37\x5c\x6c\xc5\x3d\xb7\xe6\x26\xf0\x1a\x45\xab\xcd\xad\xe5\x0b\xb6\x5c\xb2\xe5\xf2\x18\x60\x32\x04\xca\x7b\x99\x20\xd1\xb4\x91\x04\x94\xbf\x4d\x5b\x41\x94\x9a\x9f\xb2\x05\x7b\x3e\xec\x59\x63\x1c\xdb\x06\xba\x29\xf9\x40\xf7\xa7\xcc\x08\x42\x46\xbe\x7c\xc1\x4e\xd9\x82\x77\x8d\x1a\x02\xac\x45\xaf\x3a\x19\x8d\x77\x7f\x57\xe9\x99\xf2\xcd\x4f\x6a\x29\xf8\x2f\x14\x9f\x6e\xc6\x2f\x14\x4a\x3e\xce\x41\x79\xe3\xd5\x9e\xf4\x13\xb0\xa6\x77\x46\x45\x5d\x90\xe5\x62\xf1\xff\x8a\x68\x30\xb5\x8e\xd3\xa9\x11\x58\x1b\x57\x90\xc5\x8a\xf8\x1d\x60\x65\xfd\x5d\x41\xb4\x51\x0a\xdc\x8a\xf6\x96\x95\xd9\x11\xa3\xd6\x74\x92\xa5\x13\xeb\x23\xa2\x9d\x5e\xd1\xcd\xa5\x17\xca\xb8\x9a\x31\x56\x72\x65\x76\x8f\xde\x9b\xb6\x59\xd5\xb9\xbe\x30\xa4\x6f\xfd\xc5\xf5\x65\xde\x0a\x14\x4d\x98\x93\x87\x74\x9d\x21\xc4\x0e\xc7\xdb\x9c\x0e\xaf\xbc\xb5\xf4\x64\xbc\xce\x1a\x88\xda\xab\x82\xd0\xd6\x87\x48\x4f\x86\x60\x7a\x65\x41\xde\x7c\xbc\x7a\xc7\x42\x44\xe3\x6a\x53\xed\x27\xde\x11\x22\x11\x14\xb8\x68\x84\x0d\x05\xa1\xc6\x49\xdb\x29\x18\xf3\x0f\x73\x16\x35\xb8\xfc\xe8\x2d\x47\x08\xed\xe4\x68\xb2\x94\x62\x2c\xc2\x7d\xcc\xe7\xab\x27\xd2\x92\x8f\x63\x5a\xc4\xfd\xb4\x9d\x28\x7a\x87\xad\xc0\x00\x03\x74\xe0\xc9\x0e\x44\x8a\x28\x35\xc9\x01\xd1\xe3\xfc\xc7\xac\x04\x9d\x90\xa3\x70\x7f\x3c\xcc\xd2\xfa\x21\x4d\xdd\xd9\xd5\x5b\x86\xe0\x14\x60\xde\x23\xfa\x20\x93\x08\x22\xc2\xb9\x85\x06\x5c\xcc\x2f\xfa\xce\x5d\x5f\x9e\x90\x87\xbe\xba\x80\xc5\xb1\x09\x87\xb1\x4c\xca\xcb\x2e\x81\x59\x0d\x71\xcc\x7b\xb9\x7f\xad\xf2\x6f\x6d\x9f\x27\x5c\x5a\xbe\x1b\xb7\x64\x71\x33\x2b\xf9\xf0\x1b\xfa\x1a\x00\x00\xff\xff\xdb\x8e\x2c\x18\x9e\x04\x00\x00") + +func graphiqlHtmlBytes() ([]byte, error) { + return bindataRead( + _graphiqlHtml, + "graphiql.html", + ) +} + +func graphiqlHtml() (*asset, error) { + bytes, err := graphiqlHtmlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "graphiql.html", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x8, 0xb4, 0x3a, 0xe7, 0xdb, 0xc8, 0x3d, 0x2d, 0x1f, 0x1c, 0x2d, 0xd3, 0x9b, 0xf2, 0xd8, 0xe5, 0xd6, 0x5f, 0x3a, 0x7c, 0x6d, 0x80, 0xf7, 0x40, 0xdc, 0x58, 0xf1, 0x75, 0xbd, 0xf0, 0xa1}} + return a, nil +} + +var _schemaGql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe4\x54\x51\x6f\xe3\x36\x0c\x7e\xb6\x7f\x05\xdb\xbd\xb4\x2f\x79\x18\xf6\x64\x6c\x03\xd2\x76\xc3\x8a\x35\xb7\xdb\xa5\x37\x0c\x28\x86\x81\xb1\x18\x87\x88\x2c\xf9\x28\x29\x6d\x70\xe8\x7f\x1f\x24\x27\xa9\x6c\xb7\xdd\x0f\xb8\x27\x5b\x24\x3f\x8a\xfc\xf4\x91\xae\xde\x50\x8b\xf0\xb5\x2c\xbe\x04\x92\x7d\x05\xc5\x9f\xf1\x5b\x3e\x97\xa5\xdf\x77\x04\xe9\x14\xdd\xdf\x81\x90\x17\xa6\x1d\x01\x6a\x0d\x3b\xd4\xac\xd0\x93\x02\x74\x8e\xbc\x03\x6b\xc0\x6f\x08\x96\x9e\xb4\x46\x01\x43\xfe\xd1\xca\x76\x56\x16\xbd\xbf\x82\x87\x79\xfc\x39\xfb\xe7\xac\x7c\x27\x19\x3b\x17\x48\xde\xc9\x76\x08\xa8\xe0\xe1\x36\xfd\x4d\xf2\x79\x41\x45\xe0\x3c\x7a\x07\x6b\xb1\x6d\xca\xa3\xd1\x79\xf8\xd1\x84\xf6\x37\x1b\xc4\xcd\x1b\xfb\x33\x6c\xe2\x5f\x44\x5e\x28\x5a\x63\xd0\x1e\x7e\x82\xef\x7f\xe8\xcd\x97\x33\xb0\x9d\x67\x6b\x50\xeb\x3d\x74\x62\x77\xac\x08\x6a\x1b\x8c\x27\x01\x34\x2a\xe2\x56\xe8\xa8\x6f\x1e\xd8\xac\x2d\xac\xad\xc0\x9a\xb5\x27\x61\xd3\xcc\xca\xa2\x45\xd9\x92\x77\x17\x65\x51\xc4\xd0\xd4\xfd\xb5\x55\x54\xc1\xd2\xc7\x90\xdc\xde\xf7\x92\x79\x0e\x77\xbd\x06\xca\x5d\x13\x5c\xd6\x62\x05\xb7\xc6\x97\xc5\x65\x05\x0f\x8b\x54\xca\x84\xf9\xa6\x11\x6a\x12\xed\x03\xd2\xac\xbc\xc1\x59\x44\x27\x7e\x5e\xa5\x07\xa1\x43\x96\x0f\xd8\x12\x5c\xd0\xac\x99\xc1\xf9\xdf\x77\x8b\x7f\xaf\xee\xaf\xcf\xc1\x0a\x20\x44\xb4\x63\xd3\x68\x82\x3a\x88\x90\xa9\xf7\x59\xe0\xf9\xe5\x90\x40\x10\x72\x41\x7b\x37\x2b\x0b\xcf\xf5\x96\x24\xf2\x78\xbc\xe0\x7f\x1b\x9e\x9f\x5a\x3b\xb5\xfe\x5c\x96\xae\xc6\x28\xa6\x2b\x6e\x62\xe0\xe1\x74\xcf\x2d\x1d\xb4\x9e\x28\x8d\x5a\xaf\x33\xc6\xcf\x8e\x9a\x9b\xd7\x89\xf9\xcc\x1e\x41\xd9\xd1\x84\xf6\x10\xe3\x52\x29\x67\x65\x81\xc1\x6f\x3e\xd1\x97\xc0\x42\xaa\x82\x2b\x6b\x35\xa1\x39\xd9\x77\xb6\xc6\x95\xa6\x81\xa3\xed\xef\xf8\x55\x5b\x4c\x09\x7a\x01\x18\x2f\x56\x6b\x52\x57\xfb\x1b\xdb\x22\x9b\x01\xc4\xd4\x1b\x3b\x55\xca\xd0\x73\x3f\x2c\x95\x5d\xb2\xce\x53\xc0\xb0\x34\xc5\xae\xd3\xb8\xbf\xa1\x9a\x5b\xd4\xae\x3a\xd0\x15\xfb\xcb\x98\x8f\x81\xe4\xea\xec\x58\x5b\xa3\x38\x8a\xc2\x65\xc6\x35\x3f\x91\xfa\x10\xda\x55\x14\xe9\x29\x51\x8b\x4f\x13\x1b\xbb\xcf\x46\x73\xcb\x7e\x58\x8d\x90\xa2\x36\x69\xed\xd6\x38\x2f\xa1\x1e\xdf\x50\x5b\xad\xd1\x93\xa0\x9e\x2b\x25\xe4\x1c\xbd\xeb\x5d\x72\x63\xd0\x07\x19\x45\x05\x13\x67\x22\xb7\xc5\x59\x08\x6e\x22\x82\xdb\x9b\xc3\xd3\x1e\xf7\x63\xaf\xaf\x28\x9a\x34\x43\x1f\x91\x25\x03\xbd\x3a\xf8\xb9\x7d\x38\xc0\xc7\x5a\x5e\x19\xfc\x91\x6b\x82\x8b\x19\xff\xb2\x3a\xc4\x27\x3a\x8a\xe7\x00\x18\x9b\x53\xa1\xd7\xbd\xce\x7a\xf2\x6d\x47\xe6\xc5\xaf\xed\xe3\xcb\x61\xc3\xcd\x26\xcb\xb8\x41\xd3\xe4\x37\x68\xeb\xb2\x23\xc7\xeb\x76\xa8\x97\x1e\xc5\x57\x69\xb4\x92\x08\xc4\xf9\x3b\x52\x0d\xc9\x75\x8c\x8f\xe6\x93\x33\x6e\x99\xb7\x7c\x56\x14\xc9\xca\xda\xed\x32\x2e\xa6\x0a\xfe\x18\x9c\x5f\xde\x60\x3c\xed\xef\xbd\xc6\xb7\xca\xd1\xd0\x0e\x5f\x4b\x28\x56\xac\x0e\x1d\x9e\xa6\x70\xc5\x6a\xcc\xc4\x8a\xd5\x02\x9f\xf2\x8d\xb4\x1d\xa3\xd0\x6d\xc7\x28\x74\xdb\x05\x67\x7c\xb9\x4e\x08\xd5\xf8\xbc\x60\xf5\xd1\x72\xb6\xef\x8e\xd5\xf6\xf2\x8e\xef\xd8\x85\x95\xe6\xfa\x77\xda\xe7\x8b\x76\xb8\x88\x82\xe8\x7c\x29\xdb\x56\x7f\xfe\x74\x97\x2f\x21\x52\x24\x18\x17\xc7\x92\x64\x37\x98\x9a\xb8\x87\x27\x46\x2f\x68\xdc\x9a\x64\xe2\x78\xa4\xd5\x3c\xf8\xcd\x2f\x46\x75\x7d\xd5\xd9\x2e\xec\xac\x63\x3f\x41\x58\x69\xee\x1f\xd9\xfb\xdc\xf8\x5c\xfe\x17\x00\x00\xff\xff\x20\x26\x65\x82\x74\x09\x00\x00") + +func schemaGqlBytes() ([]byte, error) { + return bindataRead( + _schemaGql, + "schema.gql", + ) +} + +func schemaGql() (*asset, error) { + bytes, err := schemaGqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "schema.gql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf7, 0x3c, 0xa2, 0xb0, 0x7, 0x29, 0xef, 0xb6, 0x66, 0x0, 0x23, 0x1e, 0xf8, 0xf4, 0x6a, 0x79, 0x4f, 0xa5, 0xd8, 0xec, 0x24, 0x85, 0xfe, 0xa, 0xfd, 0xe9, 0xc6, 0x27, 0x6c, 0xf1, 0xd8, 0x46}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "graphiql.html": graphiqlHtml, + "schema.gql": schemaGql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "graphiql.html": &bintree{graphiqlHtml, map[string]*bintree{}}, + "schema.gql": &bintree{schemaGql, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/services/ticker/internal/gql/static/graphiql.html b/services/ticker/internal/gql/static/graphiql.html new file mode 100644 index 0000000000..a81cb7443a --- /dev/null +++ b/services/ticker/internal/gql/static/graphiql.html @@ -0,0 +1,35 @@ + + + + + + + + + + +
Loading...
+ + + diff --git a/services/ticker/internal/gql/static/schema.go b/services/ticker/internal/gql/static/schema.go new file mode 100644 index 0000000000..c886b2bab3 --- /dev/null +++ b/services/ticker/internal/gql/static/schema.go @@ -0,0 +1,28 @@ +package static + +import ( + "bytes" + "strings" +) + +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -ignore=\.go -pkg=static -o=bindata.go ./... + +// Schema reads the .gql schema files from the generated _bindata.go file, concatenating the +// files together into one string. +func Schema() string { + buf := bytes.Buffer{} + + for _, name := range AssetNames() { + if strings.Contains(name, ".gql") { + b := MustAsset(name) + buf.Write(b) + + // Add a newline if the file does not end in a newline. + if len(b) > 0 && b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + } + } + + return buf.String() +} diff --git a/services/ticker/internal/gql/static/schema.gql b/services/ticker/internal/gql/static/schema.gql new file mode 100644 index 0000000000..4db8674f48 --- /dev/null +++ b/services/ticker/internal/gql/static/schema.gql @@ -0,0 +1,120 @@ +schema { + query: Query +} + +type Query { + # retrieve all validated assets on the Stellar network. + assets: [Asset!]! + + # retrieve all validated issuers on the Stellar network. + issuers: [Issuer!]! + + # retrieve trade stats from the last hours + # (default = 24 hours). optionally provide counter and + # base asset info for filtering. + markets( + baseAssetCode: String + baseAssetIssuer: String + counterAssetCode: String + counterAssetIssuer: String + numHoursAgo: Int + ): [Market]! + + # retrieve aggregated trade stats for the last + # hours. optionally provide a pairName (e.g. "XLM_BTC" or a + # single currency (e.g. "XLM") for filtering results. + ticker( + pairName: String + numHoursAgo: Int + ): [AggregatedMarket]! +} + +scalar BigInt +scalar Time + +type Asset { + code: String! + issuerAccount: String! + type: String! + numAccounts: Int! + authRequired: Boolean! + authRevocable: Boolean! + amount: Float! + assetControlledByDomain: Boolean! + anchorAssetCode: String! + anchorAssetType: String! + isAssetAnchored: Boolean! + displayDecimals: BigInt! + name: String! + desc: String! + conditions: String! + fixedNumber: BigInt! + maxNumber: BigInt! + isUnlimited: Boolean! + redemptionInstructions: String! + collateralAddresses: String! + collateralAddressSignatures: String! + countries: String! + status: String! + issuerID: Int! +} + +type Market { + tradePair: String! + baseAssetCode: String! + baseAssetIssuer: String! + counterAssetCode: String! + counterAssetIssuer: String! + baseVolume: Float! + counterVolume: Float! + tradeCount: Int! + open: Float! + low: Float! + high: Float! + change: Float! + close: Float! + intervalStart: Time! + firstLedgerCloseTime: Time! + lastLedgerCloseTime: Time! + orderbookStats: OrderbookStats! +} + +type AggregatedMarket { + tradePair: String! + baseVolume: Float! + counterVolume: Float! + tradeCount: Int! + open: Float! + low: Float! + high: Float! + change: Float! + close: Float! + intervalStart: Time! + firstLedgerCloseTime: Time! + lastLedgerCloseTime: Time! + orderbookStats: OrderbookStats! +} + +type OrderbookStats { + bidCount: BigInt! + bidVolume: Float! + bidMax: Float! + askCount: BigInt! + askVolume: Float! + askMin: Float! + spread: Float! + spreadMidPoint: Float! +} + +type Issuer { + publicKey: String! + name: String! + url: String! + tomlURL: String! + federationServer: String! + authServer: String! + transferServer: String! + webAuthEndpoint: String! + depositServer: String! + orgTwitter: String! +} diff --git a/services/ticker/internal/gql/static/schema_test.go b/services/ticker/internal/gql/static/schema_test.go new file mode 100644 index 0000000000..5f260a26ca --- /dev/null +++ b/services/ticker/internal/gql/static/schema_test.go @@ -0,0 +1,28 @@ +package static + +import ( + "net/http" + "os" + "strings" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/shurcooL/httpfs/filter" + + supportHttp "github.com/stellar/go/support/http" +) + +func TestGeneratedAssets(t *testing.T) { + var localAssets http.FileSystem = filter.Skip(http.Dir("."), func(path string, fi os.FileInfo) bool { + return !fi.IsDir() && strings.HasSuffix(path, ".go") + }) + generatedAssets := &assetfs.AssetFS{ + Asset: Asset, + AssetDir: AssetDir, + AssetInfo: AssetInfo, + } + + if !supportHttp.EqualFileSystems(localAssets, generatedAssets, "/") { + t.Fatalf("generated migrations does not match local migrations") + } +} diff --git a/services/ticker/internal/graphql_test.go b/services/ticker/internal/graphql_test.go new file mode 100644 index 0000000000..245187530d --- /dev/null +++ b/services/ticker/internal/graphql_test.go @@ -0,0 +1,174 @@ +package ticker + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/go-chi/chi" + + "github.com/stellar/go/services/ticker/internal/gql" + "github.com/stellar/go/services/ticker/internal/tickerdb/tickerdbtest" + hlog "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTicker_btcEth(t *testing.T) { + session := tickerdbtest.SetupTickerTestSession(t, "./tickerdb/migrations") + defer session.DB.Close() + + logger := hlog.New() + resolver := gql.New(&session, logger) + h := resolver.NewRelayHandler() + m := chi.NewMux() + m.Post("/graphql", h.ServeHTTP) + + req := `{ + "query": "query getTicker() { ticker(pairName: \"BTC_ETH\", numHoursAgo: 24) { tradePair, baseVolume, counterVolume, tradeCount, open, close, high, low, orderbookStats { bidCount, bidVolume, bidMax, askCount, askVolume, askMin, spread, spreadMidPoint, } } }", + "operationName": "getTicker", + "variables": {} +}` + r := httptest.NewRequest("POST", "/graphql", strings.NewReader(req)) + w := httptest.NewRecorder() + + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + wantBody := `{ + "data": { + "ticker": [ + { + "tradePair": "BTC_ETH", + "baseVolume": 174, + "counterVolume": 86, + "tradeCount": 3, + "open": 1, + "close": 0.92, + "high": 1.0, + "low": 0.1, + "orderbookStats": { + "bidCount": 16, + "bidVolume": 0.25, + "bidMax": 200, + "askCount": 18, + "askVolume": 45, + "askMin": 0.1, + "spread": -1999, + "spreadMidPoint": -799.5 + } + } + ] + } +}` + assert.JSONEq(t, wantBody, string(body)) +} + +func TestMarkets(t *testing.T) { + session := tickerdbtest.SetupTickerTestSession(t, "./tickerdb/migrations") + defer session.DB.Close() + + logger := hlog.New() + resolver := gql.New(&session, logger) + h := resolver.NewRelayHandler() + m := chi.NewMux() + m.Post("/graphql", h.ServeHTTP) + + issuerPK := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + + query := fmt.Sprintf(` + query getMarkets() { + markets( + baseAssetCode: \"BTC\", + baseAssetIssuer: \"%s\", + counterAssetCode: \"ETH\", + counterAssetIssuer: \"%s\", + numHoursAgo: 24 + ) + { + tradePair, + baseAssetCode, + baseAssetIssuer, + counterAssetCode, + counterAssetIssuer, + baseVolume, + counterVolume, + tradeCount, + open, + low, + high, + close, + change, + orderbookStats { + bidCount, + bidVolume, + bidMax, + askCount, + askVolume, + askMin + } + } + }`, issuerPK, issuerPK) + req := fmt.Sprintf(`{ + "query": "%s", + "operationName": "getMarkets", + "variables": {} + }`, formatMultiline(query)) + t.Log(req) + + wantBody := fmt.Sprintf(` + {"data":{"markets": [{ + "tradePair": "BTC:%s / ETH:%s", + "baseAssetCode": "BTC", + "baseAssetIssuer":"%s", + "counterAssetCode": "ETH", + "counterAssetIssuer":"%s", + "baseVolume": 150, + "counterVolume": 60, + "tradeCount": 2, + "open":1, + "low":0.1, + "high":1, + "close":0.1, + "change":-0.9, + "orderbookStats": { + "bidCount": 15, + "bidVolume": 0.15, + "bidMax": 200, + "askCount": 17, + "askVolume": 30, + "askMin": 0.1 + } + }]}}`, issuerPK, issuerPK, issuerPK, issuerPK) + testRequest(t, m, req, wantBody) +} + +func testRequest(t *testing.T, m *chi.Mux, req, wantBody string) { + r := httptest.NewRequest("POST", "/graphql", strings.NewReader(req)) + w := httptest.NewRecorder() + + m.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, wantBody, string(body)) +} + +func formatMultiline(s string) string { + s = strings.ReplaceAll(s, "\n", "") + return strings.ReplaceAll(s, "\t", "") +} diff --git a/services/ticker/internal/main.go b/services/ticker/internal/main.go new file mode 100644 index 0000000000..bc2cfc96d2 --- /dev/null +++ b/services/ticker/internal/main.go @@ -0,0 +1,73 @@ +package ticker + +import ( + "github.com/stellar/go/services/ticker/internal/scraper" +) + +// MarketSummary represents a summary of statistics of all valid markets +// within a given period of time. +type MarketSummary struct { + GeneratedAt int64 `json:"generated_at"` + GeneratedAtRFC3339 string `json:"generated_at_rfc3339"` + Pairs []MarketStats `json:"pairs"` +} + +// Market stats represents the statistics of a specific market (identified by +// a trade pair). +type MarketStats struct { + TradePairName string `json:"name"` + BaseVolume24h float64 `json:"base_volume"` + CounterVolume24h float64 `json:"counter_volume"` + TradeCount24h int64 `json:"trade_count"` + Open24h float64 `json:"open"` + Low24h float64 `json:"low"` + High24h float64 `json:"high"` + Change24h float64 `json:"change"` + BaseVolume7d float64 `json:"base_volume_7d"` + CounterVolume7d float64 `json:"counter_volume_7d"` + TradeCount7d int64 `json:"trade_count_7d"` + Open7d float64 `json:"open_7d"` + Low7d float64 `json:"low_7d"` + High7d float64 `json:"high_7d"` + Change7d float64 `json:"change_7d"` + Price float64 `json:"price"` + Close float64 `json:"close"` + CloseTime string `json:"close_time"` + BidCount int `json:"bid_count"` + BidVolume float64 `json:"bid_volume"` + BidMax float64 `json:"bid_max"` + AskCount int `json:"ask_count"` + AskVolume float64 `json:"ask_volume"` + AskMin float64 `json:"ask_min"` + Spread float64 `json:"spread"` + SpreadMidPoint float64 `json:"spread_mid_point"` +} + +// Asset Sumary represents the collection of valid assets. +type AssetSummary struct { + GeneratedAt int64 `json:"generated_at"` + GeneratedAtRFC3339 string `json:"generated_at_rfc3339"` + Assets []Asset `json:"assets"` +} + +// Asset represent the aggregated data for a given asset. +type Asset struct { + scraper.FinalAsset + + IssuerDetail Issuer `json:"issuer_detail"` + LastValidTimestamp string `json:"last_valid"` +} + +// Issuer represents the aggregated data for a given issuer. +type Issuer struct { + PublicKey string `json:"public_key"` + Name string `json:"name"` + URL string `json:"url"` + TOMLURL string `json:"toml_url"` + FederationServer string `json:"federation_server"` + AuthServer string `json:"auth_server"` + TransferServer string `json:"transfer_server"` + WebAuthEndpoint string `json:"web_auth_endpoint"` + DepositServer string `json:"deposit_server"` + OrgTwitter string `json:"org_twitter"` +} diff --git a/services/ticker/internal/scraper/asset_scraper.go b/services/ticker/internal/scraper/asset_scraper.go new file mode 100644 index 0000000000..6fc1c942d3 --- /dev/null +++ b/services/ticker/internal/scraper/asset_scraper.go @@ -0,0 +1,349 @@ +package scraper + +import ( + "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/BurntSushi/toml" + + horizonclient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/ticker/internal/utils" + "github.com/stellar/go/support/errors" +) + +// shouldDiscardAsset maps the criteria for discarding an asset from the asset index +func shouldDiscardAsset(asset hProtocol.AssetStat, shouldValidateTOML bool) bool { + if asset.Amount == "" { + return true + } + f, _ := strconv.ParseFloat(asset.Amount, 64) + if f == 0.0 { + return true + } + // [StellarX Ticker]: assets need at least some adoption to show up + if asset.NumAccounts < 10 { + return true + } + if asset.Code == "REMOVE" { + return true + } + // [StellarX Ticker]: assets with at least 100 accounts get a pass, + // even with toml issues + if asset.NumAccounts >= 100 { + return false + } + + if shouldValidateTOML { + if asset.Links.Toml.Href == "" { + return true + } + // [StellarX Ticker]: TOML files should be hosted on HTTPS + if !strings.HasPrefix(asset.Links.Toml.Href, "https://") { + return true + } + } + + return false +} + +// decodeTOMLIssuer decodes retrieved TOML issuer data into a TOMLIssuer struct +func decodeTOMLIssuer(tomlData string) (issuer TOMLIssuer, err error) { + _, err = toml.Decode(tomlData, &issuer) + return +} + +// fetchTOMLData fetches the TOML data for a given hProtocol.AssetStat +func fetchTOMLData(asset hProtocol.AssetStat) (data string, err error) { + tomlURL := asset.Links.Toml.Href + + if tomlURL == "" { + err = errors.New("Asset does not have a TOML URL") + return + } + + timeout := time.Duration(10 * time.Second) + client := http.Client{ + Timeout: timeout, + } + + req, err := http.NewRequest("GET", tomlURL, nil) + if err != nil { + err = errors.Wrap(err, "invalid URL or request") + return + } + + req.Header.Set("User-Agent", "Stellar Ticker v1.0") + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + data = string(body) + return +} + +func domainsMatch(tomlURL *url.URL, orgURL *url.URL) bool { + tomlDomainParts := strings.Split(tomlURL.Host, ".") + orgDomainParts := strings.Split(orgURL.Host, ".") + + if len(orgDomainParts) < len(tomlDomainParts) { + // Org can only be a subdomain if it has more (or equal) + // pieces than the TOML domain + return false + } + + lenDiff := len(orgDomainParts) - len(tomlDomainParts) + orgDomainParts = orgDomainParts[lenDiff:] + orgRootDomain := strings.Join(orgDomainParts, ".") + return tomlURL.Host == orgRootDomain +} + +// isDomainVerified performs some checking to ensure we can trust the Asset's domain +func isDomainVerified(orgURL string, tomlURL string, hasCurrency bool) bool { + if tomlURL == "" { + return false + } + + parsedTomlURL, err := url.Parse(tomlURL) + if err != nil || parsedTomlURL.Scheme != "https" { + return false + } + + if !hasCurrency { + return false + } + + if orgURL == "" { + // if no orgURL is provided, we'll simply use tomlURL, so no need + // for domain verification + return true + } + + parsedOrgURL, err := url.Parse(orgURL) + if err != nil || parsedOrgURL.Scheme != "https" { + return false + } + + if !domainsMatch(parsedTomlURL, parsedOrgURL) { + return false + } + return true +} + +// makeTomlAsset aggregates Horizon Data with TOML Data +func makeFinalAsset( + asset hProtocol.AssetStat, + issuer TOMLIssuer, + errors []error, +) (t FinalAsset, err error) { + amount, err := strconv.ParseFloat(asset.Amount, 64) + if err != nil { + return + } + + t = FinalAsset{ + Type: asset.Type, + Code: asset.Code, + Issuer: asset.Issuer, + NumAccounts: asset.NumAccounts, + AuthRequired: asset.Flags.AuthRequired, + AuthRevocable: asset.Flags.AuthRevocable, + Amount: amount, + IssuerDetails: issuer, + } + + t.IssuerDetails.TOMLURL = asset.Links.Toml.Href + + hasCurrency := false + for _, currency := range t.IssuerDetails.Currencies { + if currency.Code == asset.Code && currency.Issuer == asset.Issuer { + hasCurrency = true + t.AnchorAsset = currency.AnchorAsset + t.AnchorAssetType = currency.AnchorAssetType + t.DisplayDecimals = currency.DisplayDecimals + t.Name = currency.Name + t.Desc = currency.Desc + t.Conditions = currency.Conditions + t.IsAssetAnchored = currency.IsAssetAnchored + t.FixedNumber = currency.FixedNumber + t.MaxNumber = currency.MaxNumber + t.IsUnlimited = currency.IsUnlimited + t.RedemptionInstructions = currency.RedemptionInstructions + t.CollateralAddresses = currency.CollateralAddresses + t.CollateralAddressSignatures = currency.CollateralAddressSignatures + t.Status = currency.Status + break + } + } + t.AssetControlledByDomain = isDomainVerified( + t.IssuerDetails.Documentation.OrgURL, + asset.Links.Toml.Href, + hasCurrency, + ) + + if !hasCurrency { + t.AssetControlledByDomain = false + } + + now := time.Now() + if len(errors) > 0 { + t.Error = fmt.Sprintf("%v", errors) + t.IsValid = false + } else { + t.LastValid = now + t.IsValid = true + } + t.LastChecked = now + t.AnchorAssetType = strings.ToLower(t.AnchorAssetType) + + return +} + +// processAsset merges data from an AssetStat with data retrieved from its corresponding TOML file +func processAsset(asset hProtocol.AssetStat, shouldValidateTOML bool) (FinalAsset, error) { + var errors []error + var issuer TOMLIssuer + + if shouldValidateTOML { + tomlData, err := fetchTOMLData(asset) + if err != nil { + errors = append(errors, err) + } + + issuer, err = decodeTOMLIssuer(tomlData) + if err != nil { + errors = append(errors, err) + } + } + + return makeFinalAsset(asset, issuer, errors) +} + +// parallelProcessAssets filters the assets that don't match the shouldDiscardAsset criteria. +// The TOML validation is performed in parallel to improve performance. +func (c *ScraperConfig) parallelProcessAssets(assets []hProtocol.AssetStat, parallelism int) (cleanAssets []FinalAsset, numTrash int) { + queue := make(chan FinalAsset, parallelism) + shouldValidateTOML := c.Client != horizonclient.DefaultTestNetClient // TOMLs shouldn't be validated on TestNet + + var mutex = &sync.Mutex{} + var wg sync.WaitGroup + numAssets := len(assets) + chunkSize := int(math.Ceil(float64(numAssets) / float64(parallelism))) + wg.Add(numAssets) + + // The assets are divided into chunks of chunkSize, and each goroutine is responsible + // for cleaning up one chunk + for i := 0; i < parallelism; i++ { + go func(start int) { + end := start + chunkSize + + if end > numAssets { + end = numAssets + } + + for j := start; j < end; j++ { + if !shouldDiscardAsset(assets[j], shouldValidateTOML) { + finalAsset, err := processAsset(assets[j], shouldValidateTOML) + if err != nil { + mutex.Lock() + numTrash++ + mutex.Unlock() + // Invalid assets are also sent to the queue to preserve + // the WaitGroup count + queue <- FinalAsset{IsTrash: true} + continue + } + queue <- finalAsset + } else { + mutex.Lock() + numTrash++ + mutex.Unlock() + // Discarded assets are also sent to the queue to preserve + // the WaitGroup count + queue <- FinalAsset{IsTrash: true} + } + } + }(i * chunkSize) + } + + // Whenever a new asset is sent to the channel, it is appended to the cleanAssets + // slice. This does not preserve the original order, but shouldn't be an issue + // in this case. + go func() { + count := 0 + for t := range queue { + count++ + if !t.IsTrash { + cleanAssets = append(cleanAssets, t) + } + c.Logger.Debug("Total assets processed:", count) + wg.Done() + } + }() + + wg.Wait() + close(queue) + + return +} + +// retrieveAssets retrieves existing assets from the Horizon API. If limit=0, will fetch all assets. +func (c *ScraperConfig) retrieveAssets(limit int) (assets []hProtocol.AssetStat, err error) { + r := horizonclient.AssetRequest{Limit: 200} + + assetsPage, err := c.Client.Assets(r) + if err != nil { + return + } + + c.Logger.Info("Fetching assets from Horizon") + + for assetsPage.Links.Next.Href != assetsPage.Links.Self.Href { + err = utils.Retry(5, 5*time.Second, c.Logger, func() error { + assetsPage, err = c.Client.Assets(r) + if err != nil { + c.Logger.Info("Horizon rate limit reached!") + } + return err + }) + if err != nil { + return + } + assets = append(assets, assetsPage.Embedded.Records...) + + if limit != 0 { // for performance reasons, only perform these additional checks when limit != 0 + numAssets := len(assets) + if numAssets >= limit { + diff := numAssets - limit + assets = assets[0 : numAssets-diff] + break + } + } + + nextURL := assetsPage.Links.Next.Href + n, err := nextCursor(nextURL) + if err != nil { + return assets, err + } + c.Logger.Debug("Cursor currently at:", n) + + r = horizonclient.AssetRequest{Limit: 200, Cursor: n} + } + + c.Logger.Infof("Fetched: %d assets\n", len(assets)) + return +} diff --git a/services/ticker/internal/scraper/asset_scraper_test.go b/services/ticker/internal/scraper/asset_scraper_test.go new file mode 100644 index 0000000000..8e19717513 --- /dev/null +++ b/services/ticker/internal/scraper/asset_scraper_test.go @@ -0,0 +1,149 @@ +package scraper + +import ( + "net/url" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/render/hal" + "github.com/stretchr/testify/assert" +) + +func TestShouldDiscardAsset(t *testing.T) { + testAsset := hProtocol.AssetStat{ + Amount: "", + } + + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "0.0", + } + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "0", + } + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 8, + } + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 12, + } + testAsset.Code = "REMOVE" + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 100, + } + testAsset.Code = "SOMETHINGVALID" + testAsset.Links.Toml.Href = "" + assert.Equal(t, shouldDiscardAsset(testAsset, true), false) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 40, + } + testAsset.Code = "SOMETHINGVALID" + testAsset.Links.Toml.Href = "http://www.stellar.org/.well-known/stellar.toml" + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 40, + } + testAsset.Code = "SOMETHINGVALID" + testAsset.Links.Toml.Href = "" + assert.Equal(t, shouldDiscardAsset(testAsset, true), true) + + testAsset = hProtocol.AssetStat{ + Amount: "123901.0129310", + NumAccounts: 40, + } + testAsset.Code = "SOMETHINGVALID" + testAsset.Links.Toml.Href = "https://www.stellar.org/.well-known/stellar.toml" + assert.Equal(t, shouldDiscardAsset(testAsset, true), false) +} + +func TestDomainsMatch(t *testing.T) { + tomlURL, _ := url.Parse("https://stellar.org/stellar.toml") + orgURL, _ := url.Parse("https://stellar.org/") + assert.True(t, domainsMatch(tomlURL, orgURL)) + + tomlURL, _ = url.Parse("https://assets.stellar.org/stellar.toml") + orgURL, _ = url.Parse("https://stellar.org/") + assert.False(t, domainsMatch(tomlURL, orgURL)) + + tomlURL, _ = url.Parse("https://stellar.org/stellar.toml") + orgURL, _ = url.Parse("https://home.stellar.org/") + assert.True(t, domainsMatch(tomlURL, orgURL)) + + tomlURL, _ = url.Parse("https://stellar.org/stellar.toml") + orgURL, _ = url.Parse("https://home.stellar.com/") + assert.False(t, domainsMatch(tomlURL, orgURL)) + + tomlURL, _ = url.Parse("https://stellar.org/stellar.toml") + orgURL, _ = url.Parse("https://stellar.com/") + assert.False(t, domainsMatch(tomlURL, orgURL)) +} + +func TestIsDomainVerified(t *testing.T) { + tomlURL := "https://stellar.org/stellar.toml" + orgURL := "https://stellar.org/" + hasCurrency := true + assert.True(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "https://stellar.org/stellar.toml" + orgURL = "" + hasCurrency = true + assert.True(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "" + orgURL = "" + hasCurrency = true + assert.False(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "https://stellar.org/stellar.toml" + orgURL = "https://stellar.org/" + hasCurrency = false + assert.False(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "http://stellar.org/stellar.toml" + orgURL = "https://stellar.org/" + hasCurrency = true + assert.False(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "https://stellar.org/stellar.toml" + orgURL = "http://stellar.org/" + hasCurrency = true + assert.False(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) + + tomlURL = "https://stellar.org/stellar.toml" + orgURL = "https://stellar.com/" + hasCurrency = true + assert.False(t, isDomainVerified(orgURL, tomlURL, hasCurrency)) +} + +func TestIgnoreInvalidTOMLUrls(t *testing.T) { + invalidURL := "https:// there is something wrong here.com/stellar.toml" + assetStat := hProtocol.AssetStat{} + assetStat.Links.Toml = hal.Link{Href: invalidURL} + + _, err := fetchTOMLData(assetStat) + + urlErr, ok := errors.Cause(err).(*url.Error) + if !ok { + t.Fatalf("err expected to be a url.Error but was %#v", err) + } + assert.Equal(t, "parse", urlErr.Op) + assert.Equal(t, "https:// there is something wrong here.com/stellar.toml", urlErr.URL) + assert.EqualError(t, urlErr.Err, `invalid character " " in host name`) +} diff --git a/services/ticker/internal/scraper/helpers.go b/services/ticker/internal/scraper/helpers.go new file mode 100644 index 0000000000..fcd9170760 --- /dev/null +++ b/services/ticker/internal/scraper/helpers.go @@ -0,0 +1,19 @@ +package scraper + +import "net/url" + +// nextCursor finds the cursor parameter on the "next" link of a hProtocol page. +func nextCursor(nextPageURL string) (cursor string, err error) { + u, err := url.Parse(nextPageURL) + if err != nil { + return + } + + m, err := url.ParseQuery(u.RawQuery) + if err != nil { + return + } + cursor = m["cursor"][0] + + return +} diff --git a/services/ticker/internal/scraper/main.go b/services/ticker/internal/scraper/main.go new file mode 100644 index 0000000000..ce61439bb8 --- /dev/null +++ b/services/ticker/internal/scraper/main.go @@ -0,0 +1,175 @@ +package scraper + +import ( + "context" + "time" + + horizonclient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/ticker/internal/utils" + hlog "github.com/stellar/go/support/log" +) + +type ScraperConfig struct { + Client *horizonclient.Client + Logger *hlog.Entry + Ctx *context.Context +} + +// TOMLDoc is the interface for storing TOML Issuer Documentation. +// See: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md#currency-documentation +type TOMLDoc struct { + OrgName string `toml:"ORG_NAME"` + OrgURL string `toml:"ORG_URL"` + OrgTwitter string `toml:"ORG_TWITTER"` +} + +// TOMLCurrency is the interface for storing TOML Currency Information. +// See: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md#currency-documentation +type TOMLCurrency struct { + Code string `toml:"code"` + Issuer string `toml:"issuer"` + IsAssetAnchored bool `toml:"is_asset_anchored"` + AnchorAsset string `toml:"anchor_asset"` + AnchorAssetType string `toml:"anchor_asset_type"` + DisplayDecimals int `toml:"display_decimals"` + Name string `toml:"name"` + Desc string `toml:"desc"` + Conditions string `toml:"conditions"` + FixedNumber int `toml:"fixed_number"` + MaxNumber int `toml:"max_number"` + IsUnlimited bool `toml:"is_unlimited"` + RedemptionInstructions string `toml:"redemption_instructions"` + CollateralAddresses []string `toml:"collateral_addresses"` + CollateralAddressSignatures []string `toml:"collateral_address_signatures"` + Status string `toml:"status"` +} + +// TOMLIssuer is the interface for storing TOML Issuer Information. +// See: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0001.md#currency-documentation +type TOMLIssuer struct { + FederationServer string `toml:"FEDERATION_SERVER"` + AuthServer string `toml:"AUTH_SERVER"` + TransferServer string `toml:"TRANSFER_SERVER"` + WebAuthEndpoint string `toml:"WEB_AUTH_ENDPOINT"` + SigningKey string `toml:"SIGNING_KEY"` + DepositServer string `toml:"DEPOSIT_SERVER"` // for legacy purposes + Documentation TOMLDoc `toml:"DOCUMENTATION"` + Currencies []TOMLCurrency `toml:"CURRENCIES"` + TOMLURL string `toml:"-"` +} + +// FinalAsset is the interface to represent the aggregated Asset data. +type FinalAsset struct { + Code string `json:"code"` + Issuer string `json:"issuer"` + Type string `json:"type"` + NumAccounts int32 `json:"num_accounts"` + AuthRequired bool `json:"auth_required"` + AuthRevocable bool `json:"auth_revocable"` + Amount float64 `json:"amount"` + IssuerDetails TOMLIssuer `json:"-"` + AssetControlledByDomain bool `json:"asset_controlled_by_domain"` + Error string `json:"-"` + AnchorAsset string `json:"anchor_asset"` + AnchorAssetType string `json:"anchor_asset_type"` + IsValid bool `json:"-"` + LastValid time.Time `json:"-"` + LastChecked time.Time `json:"-"` + IsTrash bool `json:"-"` + DisplayDecimals int `json:"display_decimals"` + Name string `json:"name"` + Desc string `json:"desc"` + Conditions string `json:"conditions"` + IsAssetAnchored bool `json:"is_asset_anchored"` + FixedNumber int `json:"fixed_number"` + MaxNumber int `json:"max_number"` + IsUnlimited bool `json:"is_unlimited"` + RedemptionInstructions string `json:"redemption_instructions"` + CollateralAddresses []string `json:"collateral_addresses"` + CollateralAddressSignatures []string `json:"collateral_address_signatures"` + Countries string `json:"countries"` + Status string `json:"status"` +} + +// OrderbookStats represents the Orderbook stats for a given asset +type OrderbookStats struct { + BaseAssetCode string + BaseAssetType string + BaseAssetIssuer string + CounterAssetCode string + CounterAssetType string + CounterAssetIssuer string + NumBids int + BidVolume float64 + HighestBid float64 + NumAsks int + AskVolume float64 + LowestAsk float64 + Spread float64 + SpreadMidPoint float64 +} + +// FetchAllAssets fetches assets from the Horizon public net. If limit = 0, will fetch all assets. +func (c *ScraperConfig) FetchAllAssets(limit int, parallelism int) (assets []FinalAsset, err error) { + dirtyAssets, err := c.retrieveAssets(limit) + if err != nil { + return + } + + assets, numTrash := c.parallelProcessAssets(dirtyAssets, parallelism) + + c.Logger.Infof( + "Scanned %d entries. Trash: %d. Non-trash: %d\n", + len(dirtyAssets), + numTrash, + len(assets), + ) + return +} + +// FetchAllTrades fetches all trades for a given period, respecting the limit. If limit = 0, +// will fetch all trades for that given period. +func (c *ScraperConfig) FetchAllTrades(since time.Time, limit int) (trades []hProtocol.Trade, err error) { + c.Logger.Info("Fetching trades from Horizon") + + trades, err = c.retrieveTrades(since, limit) + + c.Logger.Info("Last close time ingested:", trades[len(trades)-1].LedgerCloseTime) + c.Logger.Infof("Fetched: %d trades\n", len(trades)) + return +} + +// StreamNewTrades streams trades directly from horizon and calls the handler function +// whenever a new trade appears. +func (c *ScraperConfig) StreamNewTrades(cursor string, h horizonclient.TradeHandler) error { + c.Logger.Info("Starting to stream trades with cursor at:", cursor) + return c.streamTrades(h, cursor) +} + +// FetchOrderbookForAssets fetches the orderbook stats for the base and counter assets provided in the parameters +func (c *ScraperConfig) FetchOrderbookForAssets(bType, bCode, bIssuer, cType, cCode, cIssuer string) (OrderbookStats, error) { + c.Logger.Infof("Fetching orderbook info for %s:%s / %s:%s\n", bCode, bIssuer, cCode, cIssuer) + return c.fetchOrderbook(bType, bCode, bIssuer, cType, cCode, cIssuer) +} + +// NormalizeTradeAssets enforces the following rules: +// 1. native asset type refers to a "XLM" code and a "native" issuer +// 2. native is always the base asset (and if not, base and counter are swapped) +// 3. when trades are between two non-native, the base is the asset whose string +// comes first alphabetically. +func NormalizeTradeAssets(t *hProtocol.Trade) { + addNativeData(t) + if t.BaseAssetType == "native" { + return + } + if t.CounterAssetType == "native" { + reverseAssets(t) + return + } + bAssetString := utils.GetAssetString(t.BaseAssetType, t.BaseAssetCode, t.BaseAssetIssuer) + cAssetString := utils.GetAssetString(t.CounterAssetType, t.CounterAssetCode, t.CounterAssetIssuer) + if bAssetString > cAssetString { + reverseAssets(t) + } +} diff --git a/services/ticker/internal/scraper/orderbook_scraper.go b/services/ticker/internal/scraper/orderbook_scraper.go new file mode 100644 index 0000000000..9433414cdf --- /dev/null +++ b/services/ticker/internal/scraper/orderbook_scraper.go @@ -0,0 +1,135 @@ +package scraper + +import ( + "math" + "strconv" + "time" + + "github.com/pkg/errors" + horizonclient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/services/ticker/internal/utils" +) + +// fetchOrderbook fetches the orderbook stats for the base and counter assets provided in the parameters +func (c *ScraperConfig) fetchOrderbook(bType, bCode, bIssuer, cType, cCode, cIssuer string) (OrderbookStats, error) { + var ( + err error + summary hProtocol.OrderBookSummary + ) + + obStats := OrderbookStats{ + BaseAssetCode: bType, + BaseAssetType: bCode, + BaseAssetIssuer: bIssuer, + CounterAssetCode: cType, + CounterAssetType: cCode, + CounterAssetIssuer: cIssuer, + HighestBid: math.Inf(-1), // start with -Inf to make sure we catch the correct max bid + LowestAsk: math.Inf(1), // start with +Inf to make sure we catch the correct min ask + } + r := createOrderbookRequest(bType, bCode, bIssuer, cType, cCode, cIssuer) + + err = utils.Retry(5, 5*time.Second, c.Logger, func() error { + summary, err = c.Client.OrderBook(r) + if err != nil { + c.Logger.Info("Horizon rate limit reached!") + } + return err + }) + if err != nil { + return obStats, errors.Wrap(err, "could not fetch orderbook summary") + } + + err = calcOrderbookStats(&obStats, summary) + if err != nil { + return obStats, errors.Wrap(err, "could not calculate orderbook stats") + } + return obStats, nil +} + +// calcOrderbookStats calculates the NumBids, BidVolume, BidMax, NumAsks, AskVolume and AskMin +// statistics for a given OrdebookStats instance +func calcOrderbookStats(obStats *OrderbookStats, summary hProtocol.OrderBookSummary) error { + // Calculate Bid Data: + obStats.NumBids = len(summary.Bids) + if obStats.NumBids == 0 { + obStats.HighestBid = 0 + } + for _, bid := range summary.Bids { + pricef := float64(bid.PriceR.N) / float64(bid.PriceR.D) + if pricef > obStats.HighestBid { + obStats.HighestBid = pricef + } + + amountf, err := strconv.ParseFloat(bid.Amount, 64) + if err != nil { + return errors.Wrap(err, "invalid bid amount") + } + obStats.BidVolume += amountf + } + + // Calculate Ask Data: + obStats.NumAsks = len(summary.Asks) + if obStats.NumAsks == 0 { + obStats.LowestAsk = 0 + } + for _, ask := range summary.Asks { + pricef := float64(ask.PriceR.N) / float64(ask.PriceR.D) + amountf, err := strconv.ParseFloat(ask.Amount, 64) + if err != nil { + return errors.Wrap(err, "invalid ask amount") + } + + // On Horizon, Ask prices are in units of counter, but + // amount is in units of base. Therefore, real amount = amount * price + // See: https://github.com/stellar/go/issues/612 + obStats.AskVolume += pricef * amountf + if pricef < obStats.LowestAsk { + obStats.LowestAsk = pricef + } + } + + obStats.Spread, obStats.SpreadMidPoint = utils.CalcSpread(obStats.HighestBid, obStats.LowestAsk) + + // Clean up remaining infinity values: + if math.IsInf(obStats.LowestAsk, 0) { + obStats.LowestAsk = 0 + } + + if math.IsInf(obStats.HighestBid, 0) { + obStats.HighestBid = 0 + } + + return nil +} + +// createOrderbookRequest generates a horizonclient.OrderBookRequest based on the base +// and counter asset parameters provided +func createOrderbookRequest(bType, bCode, bIssuer, cType, cCode, cIssuer string) horizonclient.OrderBookRequest { + r := horizonclient.OrderBookRequest{ + SellingAssetType: horizonclient.AssetType(bType), + BuyingAssetType: horizonclient.AssetType(cType), + // NOTE (Alex C, 2019-05-02): + // Orderbook requests are currently not paginated on Horizon. + // This limit has been added to ensure we capture at least 200 + // orderbook entries once pagination is added. + Limit: 200, + } + + // The Horizon API requires *AssetCode and *AssetIssuer fields to be empty + // when an Asset is native. As we store "XLM" as the asset code for native, + // we should only add Code and Issuer info in case we're dealing with + // non-native assets. + // See: https://developers.stellar.org/api/aggregations/order-books/single/ + if bType != string(horizonclient.AssetTypeNative) { + r.SellingAssetCode = bCode + r.SellingAssetIssuer = bIssuer + } + if cType != string(horizonclient.AssetTypeNative) { + r.BuyingAssetCode = cCode + r.BuyingAssetIssuer = cIssuer + } + + return r +} diff --git a/services/ticker/internal/scraper/trade_scraper.go b/services/ticker/internal/scraper/trade_scraper.go new file mode 100644 index 0000000000..e8c71ff4eb --- /dev/null +++ b/services/ticker/internal/scraper/trade_scraper.go @@ -0,0 +1,119 @@ +package scraper + +import ( + "time" + + "github.com/stellar/go/services/ticker/internal/utils" + + horizonclient "github.com/stellar/go/clients/horizonclient" + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// checkRecords check if a list of records contains entries older than minTime. If it does, +// it will return a filtered page with only the passing records and lastPage = true. +func (c *ScraperConfig) checkRecords(trades []hProtocol.Trade, minTime time.Time) (lastPage bool, cleanTrades []hProtocol.Trade) { + lastPage = false + for _, t := range trades { + if t.LedgerCloseTime.After(minTime) { + NormalizeTradeAssets(&t) + cleanTrades = append(cleanTrades, t) + } else { + c.Logger.Debug("Reached entries older than the acceptable time range:", t.LedgerCloseTime) + lastPage = true + return + } + } + return +} + +// retrieveTrades retrieves trades from the Horizon API for the last timeDelta period. +// If limit = 0, will fetch all trades within that period. +func (c *ScraperConfig) retrieveTrades(since time.Time, limit int) (trades []hProtocol.Trade, err error) { + r := horizonclient.TradeRequest{Limit: 200, Order: horizonclient.OrderDesc} + + tradesPage, err := c.Client.Trades(r) + if err != nil { + return + } + + for tradesPage.Links.Next.Href != tradesPage.Links.Self.Href { + // Enforcing time boundaries: + last, cleanTrades := c.checkRecords(tradesPage.Embedded.Records, since) + trades = append(trades, cleanTrades...) + if last { + break + } + + // Enforcing limit of results: + if limit != 0 { + numTrades := len(trades) + if numTrades >= limit { + diff := numTrades - limit + trades = trades[0 : numTrades-diff] + break + } + } + + // Finding next page's params: + nextURL := tradesPage.Links.Next.Href + n, err := nextCursor(nextURL) + if err != nil { + return trades, err + } + c.Logger.Debug("Cursor currently at:", n) + r.Cursor = n + + err = utils.Retry(5, 5*time.Second, c.Logger, func() error { + tradesPage, err = c.Client.Trades(r) + if err != nil { + c.Logger.Info("Horizon rate limit reached!") + } + return err + }) + if err != nil { + return trades, err + } + } + + return +} + +// streamTrades streams trades directly from horizon and calls the handler function +// whenever a new trade appears. +func (c *ScraperConfig) streamTrades(h horizonclient.TradeHandler, cursor string) error { + if cursor == "" { + cursor = "now" + } + + r := horizonclient.TradeRequest{ + Limit: 200, + Cursor: cursor, + } + + return r.StreamTrades(*c.Ctx, c.Client, h) +} + +// addNativeData adds additional fields when one of the assets is native. +func addNativeData(trade *hProtocol.Trade) { + if trade.BaseAssetType == "native" { + trade.BaseAssetCode = "XLM" + trade.BaseAssetIssuer = "native" + } + + if trade.CounterAssetType == "native" { + trade.CounterAssetCode = "XLM" + trade.CounterAssetIssuer = "native" + } +} + +// reverseAssets swaps out the base and counter assets of a trade. +func reverseAssets(trade *hProtocol.Trade) { + trade.BaseAmount, trade.CounterAmount = trade.CounterAmount, trade.BaseAmount + trade.BaseAccount, trade.CounterAccount = trade.CounterAccount, trade.BaseAccount + trade.BaseAssetCode, trade.CounterAssetCode = trade.CounterAssetCode, trade.BaseAssetCode + trade.BaseAssetType, trade.CounterAssetType = trade.CounterAssetType, trade.BaseAssetType + trade.BaseAssetIssuer, trade.CounterAssetIssuer = trade.CounterAssetIssuer, trade.BaseAssetIssuer + + trade.BaseIsSeller = !trade.BaseIsSeller + trade.Price.N, trade.Price.D = trade.Price.D, trade.Price.N +} diff --git a/services/ticker/internal/scraper/trade_scraper_test.go b/services/ticker/internal/scraper/trade_scraper_test.go new file mode 100644 index 0000000000..935424923e --- /dev/null +++ b/services/ticker/internal/scraper/trade_scraper_test.go @@ -0,0 +1,151 @@ +package scraper + +import ( + "fmt" + "testing" + + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stretchr/testify/assert" +) + +func TestReverseAssets(t *testing.T) { + baseAmount := "10.0" + baseAccount := "BASEACCOUNT" + baseAssetCode := "BASECODE" + baseAssetType := "BASEASSETTYPE" + baseAssetIssuer := "BASEASSETISSUER" + + counterAmount := "5.0" + counterAccount := "COUNTERACCOUNT" + counterAssetCode := "COUNTERASSETCODE" + counterAssetType := "COUNTERASSETTYPE" + counterAssetIssuer := "COUNTERASSETISSUER" + + baseIsSeller := true + + n := int64(10) + d := int64(50) + + price := hProtocol.TradePrice{ + N: n, + D: d, + } + + trade1 := hProtocol.Trade{ + BaseAmount: baseAmount, + BaseAccount: baseAccount, + BaseAssetCode: baseAssetCode, + BaseAssetType: baseAssetType, + BaseAssetIssuer: baseAssetIssuer, + CounterAmount: counterAmount, + CounterAccount: counterAccount, + CounterAssetCode: counterAssetCode, + CounterAssetType: counterAssetType, + CounterAssetIssuer: counterAssetIssuer, + BaseIsSeller: baseIsSeller, + Price: price, + } + + fmt.Println(trade1) + + reverseAssets(&trade1) + + assert.Equal(t, counterAmount, trade1.BaseAmount) + assert.Equal(t, counterAccount, trade1.BaseAccount) + assert.Equal(t, counterAssetCode, trade1.BaseAssetCode) + assert.Equal(t, counterAssetType, trade1.BaseAssetType) + assert.Equal(t, counterAssetIssuer, trade1.BaseAssetIssuer) + + assert.Equal(t, baseAmount, trade1.CounterAmount) + assert.Equal(t, baseAccount, trade1.CounterAccount) + assert.Equal(t, baseAssetCode, trade1.CounterAssetCode) + assert.Equal(t, baseAssetType, trade1.CounterAssetType) + assert.Equal(t, baseAssetIssuer, trade1.CounterAssetIssuer) + + assert.Equal(t, !baseIsSeller, trade1.BaseIsSeller) + + assert.Equal(t, d, trade1.Price.N) + assert.Equal(t, n, trade1.Price.D) +} + +func TestAddNativeData(t *testing.T) { + trade1 := hProtocol.Trade{ + BaseAssetType: "native", + } + + addNativeData(&trade1) + assert.Equal(t, "XLM", trade1.BaseAssetCode) + assert.Equal(t, "native", trade1.BaseAssetIssuer) + + trade2 := hProtocol.Trade{ + CounterAssetType: "native", + } + addNativeData(&trade2) + assert.Equal(t, "XLM", trade2.CounterAssetCode) + assert.Equal(t, "native", trade2.CounterAssetIssuer) +} + +func TestNormalizeTradeAssets(t *testing.T) { + baseAmount := "10.0" + baseAccount := "BASEACCOUNT" + baseAssetCode := "BASECODE" + baseAssetType := "BASEASSETTYPE" + baseAssetIssuer := "BASEASSETISSUER" + + n := int64(10) + d := int64(50) + + price := hProtocol.TradePrice{ + N: n, + D: d, + } + + trade1 := hProtocol.Trade{ + BaseAmount: baseAmount, + BaseAccount: baseAccount, + BaseAssetCode: baseAssetCode, + BaseAssetType: baseAssetType, + BaseAssetIssuer: baseAssetIssuer, + CounterAssetType: "native", + Price: price, + } + + NormalizeTradeAssets(&trade1) + assert.Equal(t, baseAmount, trade1.CounterAmount) + assert.Equal(t, baseAccount, trade1.CounterAccount) + assert.Equal(t, baseAssetCode, trade1.CounterAssetCode) + assert.Equal(t, baseAssetType, trade1.CounterAssetType) + assert.Equal(t, baseAssetIssuer, trade1.CounterAssetIssuer) + assert.Equal(t, "native", trade1.BaseAssetType) + + counterAmount := "5.0" + counterAccount := "COUNTERACCOUNT" + counterAssetType := "COUNTERASSETTYPE" + counterAssetIssuer := "COUNTERASSETISSUER" + + trade2 := hProtocol.Trade{ + BaseAmount: baseAmount, + BaseAccount: baseAccount, + BaseAssetCode: "BBB", + BaseAssetType: baseAssetType, + BaseAssetIssuer: baseAssetIssuer, + CounterAmount: counterAmount, + CounterAccount: counterAccount, + CounterAssetCode: "AAA", + CounterAssetType: counterAssetType, + CounterAssetIssuer: counterAssetIssuer, + Price: price, + } + NormalizeTradeAssets(&trade2) + assert.Equal(t, baseAmount, trade2.CounterAmount) + assert.Equal(t, baseAccount, trade2.CounterAccount) + assert.Equal(t, "BBB", trade2.CounterAssetCode) + assert.Equal(t, baseAssetType, trade2.CounterAssetType) + assert.Equal(t, baseAssetIssuer, trade2.CounterAssetIssuer) + + assert.Equal(t, counterAmount, trade2.BaseAmount) + assert.Equal(t, counterAccount, trade2.BaseAccount) + assert.Equal(t, "AAA", trade2.BaseAssetCode) + assert.Equal(t, counterAssetType, trade2.BaseAssetType) + assert.Equal(t, counterAssetIssuer, trade2.BaseAssetIssuer) +} diff --git a/services/ticker/internal/tickerdb/helpers.go b/services/ticker/internal/tickerdb/helpers.go new file mode 100644 index 0000000000..4958d61535 --- /dev/null +++ b/services/ticker/internal/tickerdb/helpers.go @@ -0,0 +1,138 @@ +package tickerdb + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + + "github.com/stellar/go/services/ticker/internal/utils" +) + +// getDBFieldTags returns all "db" tags for a given struct, optionally excluding the "id". +func getDBFieldTags(model interface{}, excludeID bool) (fields []string) { + r := reflect.ValueOf(model) + for i := 0; i < r.Type().NumField(); i++ { + dbField := r.Type().Field(i).Tag.Get("db") + if (excludeID && dbField == "id") || dbField == "-" { // ensure fields marked with a "-" tag are ignored + continue + } + fields = append(fields, dbField) + } + fields = sanitizeFieldNames(fields) + return +} + +// sanitizeFieldNames adds double quotes to each entry on a slice of field names. +func sanitizeFieldNames(fieldNames []string) (sanitizedFields []string) { + for _, v := range fieldNames { + quotedField := fmt.Sprintf("\"%s\"", v) + sanitizedFields = append(sanitizedFields, quotedField) + } + return +} + +// getDBFieldValues returns all "db"-tagged values, optionally excluding the "id". +func getDBFieldValues(model interface{}, excludeID bool) (values []interface{}) { + r := reflect.ValueOf(model) + for i := 0; i < r.Type().NumField(); i++ { + dbField := r.Type().Field(i).Tag.Get("db") + dbVal := r.Field(i).Interface() + if (excludeID && dbField == "id") || dbField == "-" { // ensure fields marked with a "-" tag are ignored + continue + } + values = append(values, dbVal) + } + return +} + +// createOnConflictFragment generates a ON CONFLICT sql clause for a given constraint, +// preserving the fields listed in the fields param. +func createOnConflictFragment(constraint string, fields []string) (fragment string) { + fragment = fmt.Sprintf("ON CONFLICT ON CONSTRAINT %s DO UPDATE SET ", constraint) + for i, field := range fields { + fragment += fmt.Sprintf("%s = EXCLUDED.%s", field, field) + + if i != len(fields)-1 { + fragment += "," + } + } + + return +} + +// generatePlaceholders generates a string formatted as (?, ?, ?, ?) of length +// equal to the size of the fields param +func generatePlaceholders(fields []interface{}) (placeholder string) { + for i := range fields { + placeholder += "?" + + if i != len(fields)-1 { + placeholder += ", " + } + } + return +} + +// optionalVar is a simple struct to represent a query variable that should +// only be used in a statement if its value is not null +type optionalVar struct { + name string + val *string +} + +// generateWhereClause generates a WHERE clause in the format: +// "WHERE x = ? AND y = ? AND ..." where the number of conditions is equal +// to the number of optVars with val != nil. It also returns the valid vals +// in the args param. This function was created to take advantage of go/sql's +// sanitization and to prevent possible SQL injections. +func generateWhereClause(optVars []optionalVar) (clause string, args []string) { + for _, ov := range optVars { + if ov.val != nil { + if clause == "" { + clause = fmt.Sprintf("WHERE %s = ?", ov.name) + } else { + clause += fmt.Sprintf(" AND %s = ?", ov.name) + } + args = append(args, *ov.val) + } + } + return +} + +// getBaseAndCounterCodes takes an asset pair name string (e.g: XLM_BTC) +// and returns the parsed asset codes (e.g.: XLM, BTC). It also reverses +// the assets, according to the following rules: +// 1. XLM is always the base asset +// 2. If XLM is not in the pair, the assets should be ordered alphabetically +func getBaseAndCounterCodes(pairName string) (string, string, error) { + assets := strings.Split(pairName, "_") + if len(assets) != 2 { + return "", "", errors.New("invalid asset pair name") + } + + if (assets[1] == "XLM") || (assets[0] != "XLM" && assets[0] > assets[1]) { + return assets[1], assets[0], nil + } + + return assets[0], assets[1], nil +} + +// performUpsertQuery introspects a dbStruct interface{} and performs an insert query +// (if the conflictConstraint isn't broken), otherwise it updates the instance on the +// db, preserving the old values for the fields in preserveFields +func (s *TickerSession) performUpsertQuery(ctx context.Context, dbStruct interface{}, tableName string, conflictConstraint string, preserveFields []string) error { + dbFields := getDBFieldTags(dbStruct, true) + dbFieldsString := strings.Join(dbFields, ", ") + dbValues := getDBFieldValues(dbStruct, true) + + cleanPreservedFields := sanitizeFieldNames(preserveFields) + toUpdateFields := utils.SliceDiff(dbFields, cleanPreservedFields) + + qs := fmt.Sprintf("INSERT INTO %s (", tableName) + dbFieldsString + ")" + qs += " VALUES (" + generatePlaceholders(dbValues) + ")" + qs += " " + createOnConflictFragment(conflictConstraint, toUpdateFields) + ";" + _, err := s.ExecRaw(ctx, qs, dbValues...) + return err +} diff --git a/services/ticker/internal/tickerdb/helpers_test.go b/services/ticker/internal/tickerdb/helpers_test.go new file mode 100644 index 0000000000..6c09306636 --- /dev/null +++ b/services/ticker/internal/tickerdb/helpers_test.go @@ -0,0 +1,129 @@ +package tickerdb + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" +) + +type exampleDBModel struct { + ID int `db:"id"` + Name string `db:"name"` + Counter int `db:"counter"` +} + +func TestGetDBFieldTags(t *testing.T) { + m := exampleDBModel{ + ID: 10, + Name: "John Doe", + Counter: 15, + } + + fieldTags := getDBFieldTags(m, true) + assert.Contains(t, fieldTags, "\"name\"") + assert.Contains(t, fieldTags, "\"counter\"") + assert.NotContains(t, fieldTags, "\"id\"") + assert.Equal(t, 2, len(fieldTags)) + + fieldTagsWithID := getDBFieldTags(m, false) + assert.Contains(t, fieldTagsWithID, "\"name\"") + assert.Contains(t, fieldTagsWithID, "\"counter\"") + assert.Contains(t, fieldTagsWithID, "\"id\"") + assert.Equal(t, 3, len(fieldTagsWithID)) +} + +func TestGetDBFieldValues(t *testing.T) { + m := exampleDBModel{ + ID: 10, + Name: "John Doe", + Counter: 15, + } + + fieldValues := getDBFieldValues(m, true) + assert.Contains(t, fieldValues, 15) + assert.Contains(t, fieldValues, "John Doe") + assert.NotContains(t, fieldValues, 10) + assert.Equal(t, 2, len(fieldValues)) + + fieldTagsWithID := getDBFieldValues(m, false) + assert.Contains(t, fieldTagsWithID, 15) + assert.Contains(t, fieldTagsWithID, "John Doe") + assert.Contains(t, fieldTagsWithID, 10) + assert.Equal(t, 3, len(fieldTagsWithID)) +} + +func TestGeneratePlaceholders(t *testing.T) { + var p []interface{} + p = append(p, 1) + p = append(p, 2) + p = append(p, 3) + placeholder := generatePlaceholders(p) + assert.Equal(t, "?, ?, ?", placeholder) +} + +func TestGenerateWhereClause(t *testing.T) { + baseAssetCode := new(string) + baseAssetIssuer := new(string) + *baseAssetCode = "baseAssetCode" + *baseAssetIssuer = "baseAssetIssuer" + + where1, args1 := generateWhereClause([]optionalVar{ + optionalVar{"t1.base_asset_code", nil}, + optionalVar{"t1.base_asset_issuer", nil}, + optionalVar{"t1.counter_asset_code", nil}, + optionalVar{"t1.counter_asset_issuer", nil}, + }) + + assert.Equal(t, "", where1) + assert.Equal(t, 0, len(args1)) + + where2, args2 := generateWhereClause([]optionalVar{ + optionalVar{"t1.base_asset_code", baseAssetCode}, + optionalVar{"t1.base_asset_issuer", nil}, + optionalVar{"t1.counter_asset_code", nil}, + optionalVar{"t1.counter_asset_issuer", nil}, + }) + + assert.Equal(t, "WHERE t1.base_asset_code = ?", where2) + assert.Equal(t, 1, len(args2)) + assert.Equal(t, *baseAssetCode, args2[0]) + + where3, args3 := generateWhereClause([]optionalVar{ + optionalVar{"t1.base_asset_code", baseAssetCode}, + optionalVar{"t1.base_asset_issuer", baseAssetIssuer}, + optionalVar{"t1.counter_asset_code", nil}, + optionalVar{"t1.counter_asset_issuer", nil}, + }) + + assert.Equal(t, "WHERE t1.base_asset_code = ? AND t1.base_asset_issuer = ?", where3) + assert.Equal(t, 2, len(args3)) + assert.Equal(t, *baseAssetCode, args3[0]) + assert.Equal(t, *baseAssetIssuer, args3[1]) +} + +func TestGetBaseAndCounterCodes(t *testing.T) { + a1, a2, err := getBaseAndCounterCodes("XLM_BTC") + require.NoError(t, err) + assert.Equal(t, "XLM", a1) + assert.Equal(t, "BTC", a2) + + a3, a4, err := getBaseAndCounterCodes("BTC_XLM") + require.NoError(t, err) + assert.Equal(t, "XLM", a3) + assert.Equal(t, "BTC", a4) + + a5, a6, err := getBaseAndCounterCodes("BTC_ETH") + require.NoError(t, err) + assert.Equal(t, "BTC", a5) + assert.Equal(t, "ETH", a6) + + a7, a8, err := getBaseAndCounterCodes("ETH_BTC") + require.NoError(t, err) + assert.Equal(t, "BTC", a7) + assert.Equal(t, "ETH", a8) + + _, _, err = getBaseAndCounterCodes("BTC") + require.Error(t, err) +} diff --git a/services/ticker/internal/tickerdb/main.go b/services/ticker/internal/tickerdb/main.go new file mode 100644 index 0000000000..3553273fc3 --- /dev/null +++ b/services/ticker/internal/tickerdb/main.go @@ -0,0 +1,182 @@ +package tickerdb + +import ( + "time" + + "github.com/jmoiron/sqlx" + migrate "github.com/rubenv/sql-migrate" + bdata "github.com/stellar/go/services/ticker/internal/tickerdb/migrations" + "github.com/stellar/go/support/db" +) + +//go:generate go run github.com/kevinburke/go-bindata/go-bindata@v3.18.0+incompatible -nometadata -ignore .+\.go$ -pkg bdata -o migrations/bindata.go ./... + +// TickerSession provides helper methods for making queries against `DB`. +type TickerSession struct { + db.Session +} + +// Asset represents an entry on the assets table +type Asset struct { + ID int32 `db:"id"` + Code string `db:"code"` + IssuerAccount string `db:"issuer_account"` + Type string `db:"type"` + NumAccounts int32 `db:"num_accounts"` + AuthRequired bool `db:"auth_required"` + AuthRevocable bool `db:"auth_revocable"` + Amount float64 `db:"amount"` + AssetControlledByDomain bool `db:"asset_controlled_by_domain"` + AnchorAssetCode string `db:"anchor_asset_code"` + AnchorAssetType string `db:"anchor_asset_type"` + IsValid bool `db:"is_valid"` + ValidationError string `db:"validation_error"` + LastValid time.Time `db:"last_valid"` + LastChecked time.Time `db:"last_checked"` + DisplayDecimals int `db:"display_decimals"` + Name string `db:"name"` + Desc string `db:"description"` + Conditions string `db:"conditions"` + IsAssetAnchored bool `db:"is_asset_anchored"` + FixedNumber int `db:"fixed_number"` + MaxNumber int `db:"max_number"` + IsUnlimited bool `db:"is_unlimited"` + RedemptionInstructions string `db:"redemption_instructions"` + CollateralAddresses string `db:"collateral_addresses"` + CollateralAddressSignatures string `db:"collateral_address_signatures"` + Countries string `db:"countries"` + Status string `db:"status"` + IssuerID int32 `db:"issuer_id"` + Issuer Issuer `db:"-"` +} + +// Issuer represents an entry on the issuers table +type Issuer struct { + ID int32 `db:"id"` + PublicKey string `db:"public_key"` + Name string `db:"name"` + URL string `db:"url"` + TOMLURL string `db:"toml_url"` + FederationServer string `db:"federation_server"` + AuthServer string `db:"auth_server"` + TransferServer string `db:"transfer_server"` + WebAuthEndpoint string `db:"web_auth_endpoint"` + DepositServer string `db:"deposit_server"` + OrgTwitter string `db:"org_twitter"` +} + +// Trade represents an entry on the trades table +type Trade struct { + ID int32 `db:"id"` + HorizonID string `db:"horizon_id"` + LedgerCloseTime time.Time `db:"ledger_close_time"` + OfferID string `db:"offer_id"` + BaseOfferID string `db:"base_offer_id"` + BaseAccount string `db:"base_account"` + BaseAmount float64 `db:"base_amount"` + BaseAssetID int32 `db:"base_asset_id"` + CounterOfferID string `db:"counter_offer_id"` + CounterAccount string `db:"counter_account"` + CounterAmount float64 `db:"counter_amount"` + CounterAssetID int32 `db:"counter_asset_id"` + BaseIsSeller bool `db:"base_is_seller"` + Price float64 `db:"price"` +} + +// OrderbookStats represents an entry on the orderbook_stats table +type OrderbookStats struct { + ID int32 `db:"id"` + BaseAssetID int32 `db:"base_asset_id"` + CounterAssetID int32 `db:"counter_asset_id"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` + Spread float64 `db:"spread"` + SpreadMidPoint float64 `db:"spread_mid_point"` + UpdatedAt time.Time `db:"updated_at"` +} + +// Market represent the aggregated market data retrieved from the database. +// Note: this struct does *not* directly map to a db entity. +type Market struct { + TradePair string `db:"trade_pair_name"` + BaseVolume24h float64 `db:"base_volume_24h"` + CounterVolume24h float64 `db:"counter_volume_24h"` + TradeCount24h int64 `db:"trade_count_24h"` + OpenPrice24h float64 `db:"open_price_24h"` + LowestPrice24h float64 `db:"lowest_price_24h"` + HighestPrice24h float64 `db:"highest_price_24h"` + PriceChange24h float64 `db:"price_change_24h"` + BaseVolume7d float64 `db:"base_volume_7d"` + CounterVolume7d float64 `db:"counter_volume_7d"` + TradeCount7d int64 `db:"trade_count_7d"` + OpenPrice7d float64 `db:"open_price_7d"` + LowestPrice7d float64 `db:"lowest_price_7d"` + HighestPrice7d float64 `db:"highest_price_7d"` + PriceChange7d float64 `db:"price_change_7d"` + LastPriceCloseTime time.Time `db:"close_time"` + LastPrice float64 `db:"last_price"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` +} + +// PartialMarket represents the aggregated market data for a +// specific pair of assets (or asset codes) during an arbitrary +// time range. +// Note: this struct does *not* directly map to a db entity. +type PartialMarket struct { + TradePairName string `db:"trade_pair_name"` + BaseAssetID int32 `db:"base_asset_id"` + BaseAssetCode string `db:"base_asset_code"` + BaseAssetIssuer string `db:"base_asset_issuer"` + BaseAssetType string `db:"base_asset_type"` + CounterAssetID int32 `db:"counter_asset_id"` + CounterAssetCode string `db:"counter_asset_code"` + CounterAssetIssuer string `db:"counter_asset_issuer"` + CounterAssetType string `db:"counter_asset_type"` + BaseVolume float64 `db:"base_volume"` + CounterVolume float64 `db:"counter_volume"` + TradeCount int32 `db:"trade_count"` + Open float64 `db:"open_price"` + Low float64 `db:"lowest_price"` + High float64 `db:"highest_price"` + Change float64 `db:"price_change"` + Close float64 `db:"last_price"` + NumBids int `db:"num_bids"` + BidVolume float64 `db:"bid_volume"` + HighestBid float64 `db:"highest_bid"` + NumAsks int `db:"num_asks"` + AskVolume float64 `db:"ask_volume"` + LowestAsk float64 `db:"lowest_ask"` + IntervalStart time.Time `db:"interval_start"` + FirstLedgerCloseTime time.Time `db:"first_ledger_close_time"` + LastLedgerCloseTime time.Time `db:"last_ledger_close_time"` +} + +// CreateSession returns a new TickerSession that connects to the given db settings +func CreateSession(driverName, dataSourceName string) (session TickerSession, err error) { + dbconn, err := sqlx.Connect(driverName, dataSourceName) + if err != nil { + return + } + + session.DB = dbconn + return +} + +func MigrateDB(s *TickerSession) (int, error) { + migrations := &migrate.AssetMigrationSource{ + Asset: bdata.Asset, + AssetDir: bdata.AssetDir, + Dir: "migrations", + } + migrate.SetTable("migrations") + return migrate.Exec(s.DB.DB, "postgres", migrations, migrate.Up) +} diff --git a/services/ticker/internal/tickerdb/migrations/20190404184050-initial.sql b/services/ticker/internal/tickerdb/migrations/20190404184050-initial.sql new file mode 100644 index 0000000000..3c724a8bb1 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190404184050-initial.sql @@ -0,0 +1,28 @@ + +-- +migrate Up + +CREATE TABLE public.assets ( + id serial NOT NULL PRIMARY KEY, + code character varying(12) NOT NULL, + issuer text NOT NULL, + type character varying(64) NOT NULL, + num_accounts integer NOT NULL, + auth_required boolean NOT NULL, + auth_revocable boolean NOT NULL, + amount double precision NOT NULL, + asset_controlled_by_domain boolean NOT NULL, + anchor_asset_code character varying(12) NOT NULL, + anchor_asset_type character varying(64) NOT NULL, + is_valid boolean NOT NULL, + validation_error text NOT NULL, + last_valid timestamp with time zone NOT NULL, + last_checked timestamp with time zone DEFAULT now() NOT NULL +); + +ALTER TABLE ONLY public.assets + ADD CONSTRAINT assets_code_issuer_key UNIQUE (code, issuer); + + +-- +migrate Down + +DROP TABLE public.assets; diff --git a/services/ticker/internal/tickerdb/migrations/20190405112544-increase_asset_code_size.sql b/services/ticker/internal/tickerdb/migrations/20190405112544-increase_asset_code_size.sql new file mode 100644 index 0000000000..d15146a107 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190405112544-increase_asset_code_size.sql @@ -0,0 +1,14 @@ + +-- +migrate Up +ALTER TABLE public.assets + ALTER COLUMN code type character varying(64); + +ALTER TABLE public.assets + ALTER COLUMN anchor_asset_code type character varying(64); + +-- +migrate Down +ALTER TABLE public.assets + ALTER COLUMN code type character varying(12); + +ALTER TABLE public.assets + ALTER COLUMN anchor_asset_code type character varying(12); diff --git a/services/ticker/internal/tickerdb/migrations/20190408115724-add_new_asset_fields.sql b/services/ticker/internal/tickerdb/migrations/20190408115724-add_new_asset_fields.sql new file mode 100644 index 0000000000..d7656b46cb --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190408115724-add_new_asset_fields.sql @@ -0,0 +1,38 @@ + +-- +migrate Up +ALTER TABLE public.assets + RENAME COLUMN issuer TO public_key; + +ALTER TABLE public.assets + ADD COLUMN display_decimals integer NOT NULL DEFAULT 7, + ADD COLUMN "name" text NOT NULL DEFAULT '', + ADD COLUMN "desc" text NOT NULL DEFAULT '', + ADD COLUMN conditions text NOT NULL DEFAULT '', + ADD COLUMN is_asset_anchored boolean NOT NULL DEFAULT FALSE, + ADD COLUMN fixed_number bigint NOT NULL DEFAULT 0, + ADD COLUMN max_number bigint NOT NULL DEFAULT 0, + ADD COLUMN is_unlimited boolean NOT NULL DEFAULT TRUE, + ADD COLUMN redemption_instructions text NOT NULL DEFAULT '', + ADD COLUMN collateral_addresses text NOT NULL DEFAULT '', + ADD COLUMN collateral_address_signatures text NOT NULL DEFAULT '', + ADD COLUMN countries text NOT NULL DEFAULT '', + ADD COLUMN "status" text NOT NULL DEFAULT ''; + +-- +migrate Down +ALTER TABLE public.assets + RENAME COLUMN public_key TO issuer; + +ALTER TABLE public.assets + DROP COLUMN display_decimals, + DROP COLUMN "name", + DROP COLUMN "desc", + DROP COLUMN conditions, + DROP COLUMN is_asset_anchored, + DROP COLUMN fixed_number, + DROP COLUMN max_number, + DROP COLUMN is_unlimited, + DROP COLUMN redemption_instructions, + DROP COLUMN collateral_addresses, + DROP COLUMN collateral_address_signatures, + DROP COLUMN countries, + DROP COLUMN "status"; diff --git a/services/ticker/internal/tickerdb/migrations/20190408155841-add_issuers_table.sql b/services/ticker/internal/tickerdb/migrations/20190408155841-add_issuers_table.sql new file mode 100644 index 0000000000..ae0b83b81f --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190408155841-add_issuers_table.sql @@ -0,0 +1,36 @@ + +-- +migrate Up +CREATE TABLE public.issuers ( + id serial NOT NULL PRIMARY KEY, + public_key text NOT NULL, + name text NOT NULL, + url text NOT NULL, + toml_url text NOT NULL, + federation_server text NOT NULL, + auth_server text NOT NULL, + transfer_server text NOT NULL, + web_auth_endpoint text NOT NULL, + deposit_server text NOT NULL, + org_twitter text NOT NULL +); + +-- Issuer public key should be unique +ALTER TABLE ONLY public.issuers + ADD CONSTRAINT public_key_unique UNIQUE (public_key); + +-- Add FK from assets to issuers +ALTER TABLE public.assets + ADD COLUMN issuer_id integer NOT NULL; + +ALTER TABLE public.assets + ADD CONSTRAINT fkey_assets_issuers FOREIGN KEY (issuer_id) REFERENCES issuers (id); + +-- Delete Public Key from assets +ALTER TABLE public.assets + DROP COLUMN "public_key"; + +ALTER TABLE ONLY public.assets + ADD CONSTRAINT assets_code_issuer_key UNIQUE (code, issuer_id); + +-- +migrate Down + diff --git a/services/ticker/internal/tickerdb/migrations/20190409152216-add_trades_table.sql b/services/ticker/internal/tickerdb/migrations/20190409152216-add_trades_table.sql new file mode 100644 index 0000000000..17d745cfad --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190409152216-add_trades_table.sql @@ -0,0 +1,25 @@ + +-- +migrate Up +CREATE TABLE trades ( + id serial NOT NULL PRIMARY KEY, + horizon_id text NOT NULL UNIQUE, + + ledger_close_time timestamptz NOT NULL, + offer_id text NOT NULL, + + base_offer_id text NOT NULL, + base_account text NOT NULL, + base_amount double precision NOT NULL, + base_asset_id integer REFERENCES assets (id), + + counter_offer_id text NOT NULL, + counter_account text NOT NULL, + counter_amount double precision NOT NULL, + counter_asset_id integer REFERENCES assets (id), + + base_is_seller boolean NOT NULL, + price double precision NOT NULL +); + +-- +migrate Down +DROP TABLE trades; diff --git a/services/ticker/internal/tickerdb/migrations/20190409172610-rename_assets_desc_description.sql b/services/ticker/internal/tickerdb/migrations/20190409172610-rename_assets_desc_description.sql new file mode 100644 index 0000000000..b9541ef122 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190409172610-rename_assets_desc_description.sql @@ -0,0 +1,8 @@ + +-- +migrate Up +ALTER TABLE public.assets + RENAME COLUMN "desc" TO description; + +-- +migrate Down +ALTER TABLE public.assets + RENAME COLUMN description TO "desc"; diff --git a/services/ticker/internal/tickerdb/migrations/20190410094830-add_assets_issuer_account_field.sql b/services/ticker/internal/tickerdb/migrations/20190410094830-add_assets_issuer_account_field.sql new file mode 100644 index 0000000000..31236b2da8 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190410094830-add_assets_issuer_account_field.sql @@ -0,0 +1,15 @@ + +-- +migrate Up +ALTER TABLE public.assets + ADD COLUMN issuer_account text NOT NULL; + +ALTER TABLE public.assets + DROP CONSTRAINT assets_code_issuer_key; + +ALTER TABLE ONLY public.assets + ADD CONSTRAINT assets_code_issuer_account UNIQUE (code, issuer_account); + + +-- +migrate Down +ALTER TABLE public.assets + DROP COLUMN issuer_account; diff --git a/services/ticker/internal/tickerdb/migrations/20190411165735-data_seed_and_indices.sql b/services/ticker/internal/tickerdb/migrations/20190411165735-data_seed_and_indices.sql new file mode 100644 index 0000000000..9fda89ad7f --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190411165735-data_seed_and_indices.sql @@ -0,0 +1,92 @@ + +-- +migrate Up +-- Seed Issuer for 'native' assets +INSERT INTO public.issuers ( + public_key, + name, + url, + toml_url, + federation_server, + auth_server, + transfer_server, + web_auth_endpoint, + deposit_server, + org_twitter +) VALUES ( + 'native', + 'Stellar Development Foundation', + 'http://stellar.org', + '', + '', + '', + '', + '', + '', + 'https://twitter.com/stellarorg' +); + +INSERT INTO public.assets ( + code, + type, + num_accounts, + auth_required, + auth_revocable, + amount, + asset_controlled_by_domain, + anchor_asset_code, + anchor_asset_type, + is_valid, + validation_error, + last_valid, + last_checked, + display_decimals, + name, + description, + conditions, + is_asset_anchored, + fixed_number, + max_number, + is_unlimited, + redemption_instructions, + collateral_addresses, + collateral_address_signatures, + countries, + status, + issuer_id, + issuer_account +) VALUES ( + 'XLM', + 'native', + 0, + FALSE, + FALSE, + 0.0, + TRUE, + '', + '', + TRUE, + '', + now(), + now(), + 7, + 'Stellar Lumens', + '', + '', + FALSE, + 0, + 0, + FALSE, + '', + '', + '', + '', + '', + (SELECT id FROM public.issuers WHERE public_key = 'native' AND org_twitter = 'https://twitter.com/stellarorg'), + 'native' +); + +CREATE INDEX trades_ledger_close_time_idx ON public.trades (ledger_close_time DESC); + + +-- +migrate Down +DROP INDEX trades_ledger_close_time_idx; diff --git a/services/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql b/services/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql new file mode 100644 index 0000000000..5dba26e32b --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190425110313-add_orderbook_stats.sql @@ -0,0 +1,26 @@ + +-- +migrate Up +CREATE TABLE orderbook_stats ( + id serial NOT NULL PRIMARY KEY, + + base_asset_id integer REFERENCES assets (id) NOT NULL, + counter_asset_id integer REFERENCES assets (id) NOT NULL, + + num_bids bigint NOT NULL, + bid_volume double precision NOT NULL, + highest_bid double precision NOT NULL, + + num_asks bigint NOT NULL, + ask_volume double precision NOT NULL, + lowest_ask double precision NOT NULL, + + spread double precision NOT NULL, + spread_mid_point double precision NOT NULL, + + updated_at timestamptz NOT NULL +); +ALTER TABLE ONLY public.orderbook_stats + ADD CONSTRAINT orderbook_stats_base_counter_asset_key UNIQUE (base_asset_id, counter_asset_id); + +-- +migrate Down +DROP TABLE orderbook_stats; diff --git a/services/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql b/services/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql new file mode 100644 index 0000000000..844fc41c74 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/20190426092321-add_aggregated_orderbook_view.sql @@ -0,0 +1,20 @@ + +-- +migrate Up +CREATE OR REPLACE VIEW aggregated_orderbook AS + SELECT + concat(bAsset.code, '_', cAsset.code) as trade_pair_name, + bAsset.code as base_asset_code, + cAsset.code as counter_asset_code, + COALESCE(sum(os.num_bids), 0) AS num_bids, + COALESCE(sum(os.bid_volume), 0.0) AS bid_volume, + COALESCE(max(os.highest_bid), 0.0) AS highest_bid, + COALESCE(sum(os.num_asks), 0) AS num_asks, + COALESCE(sum(os.ask_volume), 0.0) AS ask_volume, + COALESCE(min(os.lowest_ask), 0.0) AS lowest_ask + FROM orderbook_stats AS os + JOIN assets AS bAsset ON os.base_asset_id = bAsset.id + JOIN assets AS cAsset on os.counter_asset_id = cAsset.id + GROUP BY trade_pair_name, base_asset_code, counter_asset_code; + +-- +migrate Down +DROP VIEW IF EXISTS aggregated_orderbook; diff --git a/services/ticker/internal/tickerdb/migrations/bindata.go b/services/ticker/internal/tickerdb/migrations/bindata.go new file mode 100644 index 0000000000..782c917fd1 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations/bindata.go @@ -0,0 +1,480 @@ +// Code generated by go-bindata. DO NOT EDIT. +// sources: +// migrations/20190404184050-initial.sql (821B) +// migrations/20190405112544-increase_asset_code_size.sql (366B) +// migrations/20190408115724-add_new_asset_fields.sql (1.371kB) +// migrations/20190408155841-add_issuers_table.sql (950B) +// migrations/20190409152216-add_trades_table.sql (628B) +// migrations/20190409172610-rename_assets_desc_description.sql (168B) +// migrations/20190410094830-add_assets_issuer_account_field.sql (344B) +// migrations/20190411165735-data_seed_and_indices.sql (1.522kB) +// migrations/20190425110313-add_orderbook_stats.sql (749B) +// migrations/20190426092321-add_aggregated_orderbook_view.sql (831B) + +package bdata + +import ( + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %w", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo + digest [sha256.Size]byte +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _migrations20190404184050InitialSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x93\x41\x8f\x9b\x30\x10\x85\xef\xfe\x15\x73\x64\xd5\xdd\x4a\xad\xaa\x5e\xf6\x44\x17\x2a\x45\xa5\xb0\xa5\x70\xc8\xc9\x32\x66\x14\x46\x6b\x3c\xd4\x36\x49\xe9\xaf\xaf\x20\x69\xab\xa4\x64\x95\x23\xf8\x7b\x6f\xac\xe7\x37\xe2\xe1\x01\xde\xf4\xb4\x73\x2a\x20\xd4\x83\x10\x4f\x65\x1a\x57\x29\x54\xf1\xa7\x2c\x85\x61\x6c\x0c\xe9\xb7\xca\x7b\x0c\x1e\x22\x01\x00\x40\x2d\x78\x74\xa4\x0c\xe4\x45\x05\x79\x9d\x65\xf0\x5c\x6e\xbe\xc6\xe5\x16\xbe\xa4\xdb\xfb\x85\xd1\xdc\x22\xe8\x4e\x39\xa5\x03\x3a\xd8\x2b\x37\x91\xdd\x45\xef\xde\xdf\xfd\x15\x1d\x41\xf2\x7e\x44\x07\x01\x7f\x86\x8b\x93\x30\x0d\x6b\x16\x1f\x3f\x5c\x5a\xd8\xb1\x97\x4a\x6b\x1e\x6d\xf0\x40\x36\xe0\x0e\xdd\x05\xa2\xc6\xd0\x49\x87\x3f\x46\x72\xd8\x42\xc3\x6c\x50\xd9\x75\x66\xcf\x5a\x35\x06\xaf\x41\xfd\x3c\x06\x5a\x1e\x67\x66\x70\xa8\xc9\x13\xff\x47\xcd\x71\x49\xcd\x36\x38\x36\x06\x5b\xd9\x4c\xb2\xe5\x5e\x91\xbd\x66\x6b\x75\xc7\x4e\xfe\xd1\xdd\x98\xdd\x99\xea\xe6\xb8\xc8\xcb\xbd\x32\x74\x2d\x86\xe5\x4c\x05\x62\x2b\xd1\x39\x5e\x7d\x1a\xa3\x7c\x38\x99\x04\xea\xd1\x07\xd5\x0f\x70\xa0\xd0\x2d\x9f\xf0\x8b\x2d\xae\x29\x74\x87\xfa\x05\x5f\xd1\x24\xe9\xe7\xb8\xce\x2a\xb0\x7c\x88\xfe\x5d\x5b\xdc\x3d\x0a\x11\x67\x55\x5a\x9e\x4a\x59\xe4\xd9\xf6\xbc\x99\xcb\x8c\x38\x49\xe0\xa9\xc8\xbf\x57\x65\xbc\xc9\xab\xe3\x23\xf8\x25\x4d\x79\x6c\x99\x7c\xc1\x09\xea\x7c\xf3\xad\x4e\x21\x9a\xff\xdf\x9f\xea\x37\x0f\x38\xdb\x83\x84\x0f\x56\x88\xa4\x2c\x9e\xd7\xf6\xe0\x51\xfc\x0e\x00\x00\xff\xff\x48\x16\x89\x51\x35\x03\x00\x00") + +func migrations20190404184050InitialSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190404184050InitialSql, + "migrations/20190404184050-initial.sql", + ) +} + +func migrations20190404184050InitialSql() (*asset, error) { + bytes, err := migrations20190404184050InitialSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190404184050-initial.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x48, 0xaa, 0xc8, 0x33, 0x90, 0xe0, 0xbb, 0xf3, 0xa8, 0x5c, 0xb6, 0x9b, 0x50, 0xfb, 0xcb, 0x43, 0x16, 0x5d, 0x4d, 0xbd, 0x24, 0xec, 0xda, 0x11, 0x2d, 0x5c, 0x5f, 0xe, 0x7c, 0xaf, 0xd9, 0xb3}} + return a, nil +} + +var _migrations20190405112544Increase_asset_code_sizeSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe2\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x28\x4d\xca\xc9\x4c\xd6\x4b\x2c\x2e\x4e\x2d\x29\xe6\x52\x50\x50\x50\x80\xc8\x3a\xfb\xfb\x84\xfa\xfa\x29\x24\xe7\xa7\xa4\x2a\x94\x54\x16\xa4\x2a\x24\x67\x24\x16\x25\x26\x97\xa4\x16\x29\x94\x25\x16\x55\x66\xe6\xa5\x6b\x98\x99\x68\x5a\x73\x91\x62\x58\x62\x5e\x72\x46\x7e\x51\x3c\x58\x3a\x9e\xb0\xc9\xc8\xae\x76\xc9\x2f\xcf\xa3\x96\xbb\x0d\x8d\x68\xe5\x6e\xb0\xc9\x80\x00\x00\x00\xff\xff\x5e\x84\x69\x2a\x6e\x01\x00\x00") + +func migrations20190405112544Increase_asset_code_sizeSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190405112544Increase_asset_code_sizeSql, + "migrations/20190405112544-increase_asset_code_size.sql", + ) +} + +func migrations20190405112544Increase_asset_code_sizeSql() (*asset, error) { + bytes, err := migrations20190405112544Increase_asset_code_sizeSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190405112544-increase_asset_code_size.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa1, 0x95, 0x4e, 0xf5, 0x5, 0xec, 0xe3, 0x22, 0x80, 0x7a, 0x8e, 0xdc, 0xda, 0x65, 0x26, 0x33, 0xea, 0x37, 0xb7, 0xd0, 0x59, 0x70, 0xe, 0x61, 0x6a, 0xe8, 0x44, 0x7e, 0x9, 0x1, 0x43, 0x46}} + return a, nil +} + +var _migrations20190408115724Add_new_asset_fieldsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x93\x4d\x8f\x9b\x30\x10\x86\xef\xfc\x8a\x51\x2e\x39\x34\xa9\x7a\xeb\x21\x27\x5a\xc8\x89\x40\x45\xe1\x6c\x19\x3c\xa5\xa3\x1a\x83\x3c\xb6\x9a\xfc\xfb\x2a\x1f\xdd\x64\xd7\x64\x13\xb4\x57\xfb\x7d\x07\x33\x7a\x9e\x68\xbd\x86\x4f\x3d\x75\x56\x3a\x84\x7a\x8c\xe2\xac\x4a\x4b\xa8\xe2\x6f\x59\x0a\xa3\x6f\x34\xb5\x9f\x25\x33\x3a\x8e\x00\x00\xca\x34\x8f\x77\x29\x7c\x2f\xb2\x7a\x97\x03\x31\x7b\xb4\x50\x15\x97\xa4\xf8\x83\x87\x4d\xf4\x60\x44\x9c\x24\xff\xfb\x8a\x78\xd4\xf2\x20\x14\xb6\xd4\x4b\xcd\x40\xc6\x61\x87\x16\xf2\xa2\x82\xbc\xce\x32\x48\xd2\x6d\x5c\x67\x15\x7c\x5d\xbd\xed\x2e\x8c\xec\x71\x01\x0e\xf7\x2e\x8c\x2f\x97\x61\x5e\x21\xb7\x33\xf2\xed\x60\x14\x39\x1a\x0c\x3f\xdf\x21\x16\xa7\x1f\x15\xd2\xb4\xbf\x07\x8b\x0a\x9a\x61\xd0\x28\x4d\xd8\xde\xc6\xd9\xcf\x34\x18\xf0\x8b\xf6\xa8\x84\xf1\x7d\x83\x16\x1a\xea\xc8\x4c\x7c\xf8\x4b\x50\xeb\xe5\x7e\x7e\x89\x58\x78\xa3\xa9\x27\xf7\xde\x3b\xab\xb2\x0e\x9f\x69\x51\x61\x3f\x1e\x97\x23\xc8\xb0\xb3\xbe\x9d\xb9\xa8\x76\xd0\x5a\x3a\xb4\x52\x0b\xa9\x94\x45\x66\xfc\x50\x5b\x30\x75\x46\x3a\x6f\xe7\x8d\xf1\xc6\x59\x9a\x53\x59\xb0\x93\xce\xf3\x7d\x8c\x36\xd1\x2b\x9f\x92\xe1\xaf\x99\x65\xd4\xd5\xa3\xa3\x55\x67\xbf\x1e\x1a\x95\x94\xc5\x8f\x7b\x4a\xad\x82\xc4\x59\x9c\x89\xf3\x93\x20\xe1\xf9\x55\x84\xf0\x2e\x00\x3e\x8c\xdc\x22\x1d\xde\x5e\xc9\x9d\x1c\xfe\x02\x68\x78\x7b\x87\xc1\xa9\xf7\x87\xac\x3d\x93\xba\x61\x6a\x2a\x7e\x61\x67\x62\x8f\x17\x46\x36\xd1\xbf\x00\x00\x00\xff\xff\xd3\x88\x81\xf2\x5b\x05\x00\x00") + +func migrations20190408115724Add_new_asset_fieldsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190408115724Add_new_asset_fieldsSql, + "migrations/20190408115724-add_new_asset_fields.sql", + ) +} + +func migrations20190408115724Add_new_asset_fieldsSql() (*asset, error) { + bytes, err := migrations20190408115724Add_new_asset_fieldsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190408115724-add_new_asset_fields.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc1, 0xe2, 0x30, 0xe, 0x77, 0x54, 0xdb, 0xeb, 0xc5, 0xb0, 0x0, 0xac, 0x7b, 0xf2, 0xc8, 0xc8, 0x77, 0xb8, 0xa3, 0x8f, 0x56, 0xf2, 0x7a, 0xa, 0x7f, 0x11, 0xa6, 0x76, 0x54, 0x87, 0x5, 0x80}} + return a, nil +} + +var _migrations20190408155841Add_issuers_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x53\xc1\x72\xda\x30\x10\xbd\xeb\x2b\x76\x72\x22\xd3\xa4\x3f\x90\x93\x8b\x45\xc7\x83\x23\x53\xc5\x3e\x70\xd2\x98\x68\x21\x9a\x1a\x89\x4a\xeb\xd2\xfc\x7d\xc7\xb6\x30\x1e\x06\x68\xaf\xfb\x9e\x76\xf7\xbd\xb7\x62\xcf\xcf\xf0\x65\x6f\x76\xbe\x26\x84\xea\xc0\xe6\x92\x27\x25\x87\x32\xf9\x96\x73\x38\xb4\x9b\xc6\xbc\x7f\x35\x21\xb4\xe8\x03\xcc\x18\x00\x80\xd1\x10\xd0\x9b\xba\x01\x51\x94\x20\xaa\x3c\x87\x95\xcc\x5e\x13\xb9\x86\x25\x5f\x3f\xf5\x9c\xe1\xa1\xfa\x89\x9f\x40\xf8\x87\x46\xe6\x80\xda\x7a\x8f\xd7\xea\xad\x6f\xae\x95\xc9\xed\x1b\x75\x03\xdb\xa2\x46\x5f\x93\x71\x56\x05\xf4\xbf\xd1\x5f\x23\xd5\x2d\x7d\xdc\x81\xc9\xd7\x36\x6c\xd1\xdf\xa1\x1c\x71\xa3\xfa\x2e\x68\xf5\xc1\x19\x4b\xd7\x48\x1a\x0f\x2e\x18\xba\xd3\xc6\xf9\x9d\xa2\xa3\x21\xba\x84\xd9\xe3\x0b\xeb\x92\xc8\x7a\xa7\xa3\x7d\xd0\xd9\x17\x3e\x5c\xdb\x68\xd8\x20\xb4\xd6\xfc\x6a\x91\x25\x79\xc9\x65\xcc\xa7\x10\xf9\xfa\x22\xa4\x7e\x4e\x92\xa6\x30\x2f\xc4\x5b\x29\x93\x4c\x94\x93\x34\xd4\xd0\x04\x2a\x91\xfd\xa8\x38\xcc\xce\x48\x5c\x20\xd1\x1a\x16\x4b\xd8\x7a\xb7\x87\x3a\x04\xa4\x00\xe4\xe0\xd4\x7b\x3a\x3b\x8e\x1d\x48\x93\xa9\x79\xf5\x2a\xe2\x03\x65\x34\x18\x4b\xb8\x43\x3f\x2a\x7d\x61\xff\xd5\x65\xdc\x7d\xdb\x6d\x3d\xc0\xea\x74\x87\x8b\x42\xf2\xec\xbb\xe8\xce\x0d\x66\xe3\xa8\x47\x90\x7c\xc1\x25\x17\x73\xfe\x06\xe3\xc9\x1a\x1d\x95\xa5\xd8\x20\x21\xac\x06\x6b\x97\xf8\x39\x15\xf9\x8f\x9d\x52\x59\xac\x4e\xd2\x1e\xce\x9e\x3d\x5c\x88\x99\xc6\x71\x5b\x51\x14\xf3\xee\x34\x46\x45\xfd\x3f\x39\x45\xd2\xd5\x9f\xce\x06\xc6\xed\xc7\x2f\x9a\xba\xa3\x65\xec\x6f\x00\x00\x00\xff\xff\xda\x82\x34\x58\xb6\x03\x00\x00") + +func migrations20190408155841Add_issuers_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190408155841Add_issuers_tableSql, + "migrations/20190408155841-add_issuers_table.sql", + ) +} + +func migrations20190408155841Add_issuers_tableSql() (*asset, error) { + bytes, err := migrations20190408155841Add_issuers_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190408155841-add_issuers_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7b, 0xc6, 0x44, 0x9b, 0x7f, 0x20, 0x98, 0x1, 0xc2, 0x25, 0x17, 0x73, 0x3b, 0x41, 0xb1, 0xc4, 0xbe, 0xe4, 0x94, 0xd8, 0x54, 0x9c, 0x6e, 0x6e, 0xd0, 0x38, 0x7a, 0xbf, 0x24, 0xd9, 0x26, 0x6e}} + return a, nil +} + +var _migrations20190409152216Add_trades_tableSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x92\x41\x6e\xc2\x30\x10\x45\xf7\x3e\xc5\x2c\x41\x85\x13\xb0\x4a\xc1\x95\x50\xd3\x40\xdd\x64\xc1\x2a\x72\xe2\x21\x1d\xc9\x89\x23\x8f\x51\x2b\x4e\x5f\xe1\xd2\x54\xb4\x0d\x2a\x9b\x6c\xfe\xcb\xff\xcf\xd2\x88\xf9\x1c\xee\x5a\x6a\xbc\x0e\x08\x45\x2f\x96\x4a\x26\xb9\x84\x3c\xb9\x4f\x25\x04\xaf\x0d\x32\x4c\x04\x00\x00\x19\x60\xf4\xa4\x2d\x64\x9b\x1c\xb2\x22\x4d\x61\xab\xd6\x4f\x89\xda\xc1\xa3\xdc\xcd\x22\xf3\xea\x3c\x1d\x5d\x57\x92\x81\x80\xef\xe1\x9b\x2c\xb2\xf5\x73\x21\x67\x22\x52\x16\x4d\x83\xbe\xac\xad\x63\x2c\x03\xb5\x08\xa7\x0f\x07\xdd\xf6\xe1\x38\xfc\xf3\xd9\xe8\xf6\x7b\xf4\xbf\xfa\xce\x45\x95\x66\x2c\xc7\x88\x01\xd0\x75\xed\x0e\x5d\x18\xcf\xdb\x18\x1b\x77\xa8\x2c\x42\xef\xb1\x26\x26\xd7\xfd\x89\x32\x63\x38\x6d\x51\x17\xb0\x41\x0f\x4a\x3e\x48\x25\xb3\xa5\x7c\x81\x98\x31\x4c\xc8\x4c\xcf\x7a\x71\x16\xfd\x55\xc3\x2f\xe6\x8a\xe4\x80\xfc\xcb\x73\xa0\x6f\x52\x8d\xaf\x23\x2e\x19\xad\x45\x0f\x95\x73\x16\xf5\xcf\xea\xde\x53\x8d\xe3\xfb\x62\xba\x10\x17\xe7\xb4\x72\x6f\x9d\x58\xa9\xcd\xf6\xe2\x9c\x16\xe2\x23\x00\x00\xff\xff\xab\x60\xb3\x30\x74\x02\x00\x00") + +func migrations20190409152216Add_trades_tableSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190409152216Add_trades_tableSql, + "migrations/20190409152216-add_trades_table.sql", + ) +} + +func migrations20190409152216Add_trades_tableSql() (*asset, error) { + bytes, err := migrations20190409152216Add_trades_tableSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190409152216-add_trades_table.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x47, 0xc1, 0xda, 0x1d, 0x61, 0x7c, 0xb6, 0x5d, 0x7d, 0xed, 0x2a, 0xd0, 0x8e, 0x4e, 0x24, 0x3d, 0x94, 0xd8, 0x88, 0x47, 0x2d, 0x5b, 0x65, 0xc1, 0x57, 0xf, 0xc6, 0x22, 0x39, 0xf7, 0xe9, 0x81}} + return a, nil +} + +var _migrations20190409172610Rename_assets_desc_descriptionSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xe2\xd2\xd5\x55\xd0\xce\xcd\x4c\x2f\x4a\x2c\x49\x55\x08\x2d\xe0\x72\xf4\x09\x71\x0d\x52\x08\x71\x74\xf2\x71\x55\x28\x28\x4d\xca\xc9\x4c\xd6\x4b\x2c\x2e\x4e\x2d\x29\xe6\x52\x50\x50\x50\x08\x72\xf5\x73\xf4\x75\x55\x70\xf6\xf7\x09\xf5\xf5\x53\x50\x4a\x49\x2d\x4e\x56\x52\x08\xf1\x57\x00\x31\x8a\x32\x0b\x4a\x32\xf3\xf3\xac\xb9\x50\x8c\x74\xc9\x2f\xcf\x23\xc9\x50\x24\xa3\x40\x26\x43\xec\xb0\xe6\x02\x04\x00\x00\xff\xff\x80\x17\x6b\xa4\xa8\x00\x00\x00") + +func migrations20190409172610Rename_assets_desc_descriptionSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190409172610Rename_assets_desc_descriptionSql, + "migrations/20190409172610-rename_assets_desc_description.sql", + ) +} + +func migrations20190409172610Rename_assets_desc_descriptionSql() (*asset, error) { + bytes, err := migrations20190409172610Rename_assets_desc_descriptionSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190409172610-rename_assets_desc_description.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa7, 0x71, 0xc8, 0x2f, 0x71, 0x22, 0xa8, 0x42, 0x2b, 0xa, 0x0, 0x5, 0x51, 0x15, 0x9, 0x8b, 0xd6, 0x45, 0x72, 0x29, 0x8e, 0x38, 0x9d, 0x9f, 0x16, 0x84, 0x7b, 0xf, 0x29, 0x48, 0x3c, 0x3f}} + return a, nil +} + +var _migrations20190410094830Add_assets_issuer_account_fieldSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\xd0\xbf\xee\x82\x30\x1c\x04\xf0\xbd\x4f\x71\xe3\xef\x17\xc5\x17\x60\xaa\x94\x81\xa4\x7e\xab\xd8\x0e\x4e\x04\x6b\x63\x88\x0a\x84\x96\xa8\x6f\x6f\x8c\x3a\x48\xfc\x37\x5f\xf2\xc9\xdd\xb1\x28\xc2\xe8\x50\x6d\xbb\x32\x38\x98\x96\x71\xa9\xd3\x1c\x9a\x4f\x65\x8a\xb6\x5f\xef\x2b\x3b\x29\xbd\x77\xc1\x33\x00\xe0\x42\x20\x51\xd2\xcc\x08\x95\xf7\xbd\xeb\x8a\xd2\xda\xa6\xaf\x03\x82\x3b\x05\x90\xd2\x20\x23\x65\xcc\xbe\x38\x22\x57\x73\x24\x8a\x96\x3a\xe7\x19\x69\xdc\xa2\xc2\x36\x1b\x57\xdc\xe1\x9d\x3b\x0f\x18\x45\x72\xf5\xb6\xd3\x27\xea\xd1\xd1\x50\xb6\x30\x29\xfe\xae\xd9\x78\x30\xe0\x3f\x66\xec\xe9\x0b\xd1\x1c\xeb\xdf\x56\xbc\xb8\x23\x66\x97\x00\x00\x00\xff\xff\xd9\xd1\xc9\x0f\x58\x01\x00\x00") + +func migrations20190410094830Add_assets_issuer_account_fieldSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190410094830Add_assets_issuer_account_fieldSql, + "migrations/20190410094830-add_assets_issuer_account_field.sql", + ) +} + +func migrations20190410094830Add_assets_issuer_account_fieldSql() (*asset, error) { + bytes, err := migrations20190410094830Add_assets_issuer_account_fieldSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190410094830-add_assets_issuer_account_field.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2, 0xed, 0xce, 0xda, 0x59, 0x78, 0xcb, 0xa0, 0x4c, 0xd2, 0x4a, 0x45, 0x42, 0x10, 0x70, 0xc0, 0xbe, 0x2b, 0x11, 0x6c, 0x85, 0x92, 0x0, 0xdd, 0xd9, 0xb1, 0x74, 0x79, 0x30, 0x78, 0xfc, 0x4}} + return a, nil +} + +var _migrations20190411165735Data_seed_and_indicesSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x54\x4d\x6f\x1a\x3d\x10\xbe\xef\xaf\xf0\x2d\xa0\x37\x81\xdc\x5e\x29\x51\x0f\x28\x6c\x54\x24\x02\x15\x90\x36\x37\xcb\xd8\x03\x58\xf1\xc7\xd6\x33\x86\xf0\xef\xab\xdd\xb5\x13\x36\x24\x6a\x7a\xda\xf9\x78\x3c\xf3\xec\x7c\x15\x57\x57\xec\x3f\xab\xb7\x41\x10\xb0\xc7\xaa\x56\x97\x00\x8a\x4d\x10\x23\x04\xb6\xf1\x81\x5d\x38\x41\x7a\x0f\x17\x4c\x20\x02\x61\x31\x99\x2d\xcb\xc5\x8a\x4d\x66\xab\x39\xab\xe2\xda\x68\x39\xd0\x0d\x1a\x59\xaf\x60\x8c\x25\x23\x7f\x86\xe3\x65\xa3\x3b\x61\xa1\x95\x62\x30\xad\x40\xde\x1a\xfe\xaa\x6d\x40\x41\x10\xa4\xbd\xe3\x08\x61\x0f\xa1\x35\x8b\x48\xbb\x8e\x81\x82\x70\xb8\x81\xd0\x31\x1e\x60\xcd\x1b\x24\x38\x55\x79\xed\xa8\x35\x2b\xa8\x3c\x6a\xea\x40\x7d\xd8\x72\x3a\x68\x22\x08\x45\x9f\xfd\x1c\x4d\x1f\xcb\x65\xe2\x9c\x7f\xb2\x05\x5e\x2c\x09\x8c\x11\x81\x8d\x61\x0f\xc6\x57\x16\x1c\xb1\x7b\x1f\x9d\x6a\x58\x66\xd4\x8e\xa8\xba\x19\x0e\xb1\x05\x0f\x7c\xd8\x66\xcf\xbf\x7e\xeb\x48\x78\x33\x1c\x26\x76\x03\xe9\x6d\x0e\x5b\x47\x2d\xfa\xb7\xc5\x47\x75\x6f\x3b\x92\x7e\x41\x7a\x95\xca\x4c\xc7\x2a\x49\x2e\x5a\x2e\xa4\xf4\xd1\x11\x9e\x14\x35\xc0\xef\xa8\x03\xa8\x8e\x69\xef\xa5\x58\x9b\xf4\x50\xd8\xfa\x4d\x92\xeb\x2c\x5c\x7a\x47\xc1\x1b\x03\x8a\xaf\x8f\x5c\x79\x2b\xb4\x4b\x7e\x27\x77\x3e\xf0\x0c\xcb\x2c\x3a\xe6\x37\x4a\x1a\xf9\x5e\x18\x9d\x72\x37\x62\xdb\x7a\x08\xc1\xa7\x46\x19\x81\x74\x8a\x6a\x74\xb9\x03\xf9\x9c\x39\x2b\x8d\x95\x11\x47\xae\x40\x6a\x2b\x0c\xbe\x9f\x34\x05\x28\x83\xae\xea\xc0\x97\xa9\x3a\x4e\xe9\x5a\xc5\x57\x1a\x2d\xb3\x96\x66\x8e\xbb\xd1\x2f\xa0\xb8\x8b\x76\x9d\x87\xc6\x8a\x97\x8e\xae\x91\x47\x67\xb4\xd5\x94\xdf\x04\x50\x60\x9b\x54\x5c\x3b\xa4\x10\xe5\x49\x1e\xe9\x8d\x11\x04\x41\x18\x2e\x94\x0a\x80\x08\x9f\x7a\x38\xea\xad\x13\x14\xc3\x1b\x24\x3a\x0a\x3a\xab\x48\x82\xe2\x2b\xff\x7a\xe5\x78\xae\x50\x52\x53\xaf\xcf\xa6\xfb\x69\xfa\x90\x47\xad\x33\xe8\xd7\xed\xe7\x7e\x34\x5d\x96\x67\xe2\xf5\x20\xb9\x57\x8b\xc7\xf2\xe3\xc1\x3d\xf7\x38\x7f\xe8\xf5\xcf\xc4\xff\xdf\xed\xd5\x34\x5a\x70\xf8\xc9\x36\x9c\x52\xf8\x8c\xe6\x57\x37\xab\xb7\x2c\xa7\xe5\xdd\x8a\x69\xc5\xee\x17\xf3\x87\xf7\x07\xeb\xd7\xf7\x72\x51\x9e\x1c\x2c\xf6\xed\xed\xdc\x8d\x66\xe3\xd3\x8b\x51\xbb\xfe\xb2\xa6\xfd\x6e\x89\x9b\xad\xbd\x5b\x94\xa3\x55\xc9\x26\xb3\x71\xf9\x54\x1f\x30\x05\xc8\x0d\xa8\x2d\x04\x2e\x8d\x47\xe0\xa4\x2d\x70\xad\x5e\xd8\x7c\x96\xd9\xb5\x30\xd6\x3b\xc3\xb1\x71\xb9\xbc\xab\xa3\x76\xce\xf6\xd8\x1f\x5c\x31\x5e\xcc\x7f\x7c\x21\xcb\x6d\xf1\x27\x00\x00\xff\xff\xa1\x61\x7e\x4c\xf2\x05\x00\x00") + +func migrations20190411165735Data_seed_and_indicesSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190411165735Data_seed_and_indicesSql, + "migrations/20190411165735-data_seed_and_indices.sql", + ) +} + +func migrations20190411165735Data_seed_and_indicesSql() (*asset, error) { + bytes, err := migrations20190411165735Data_seed_and_indicesSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190411165735-data_seed_and_indices.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xa1, 0x54, 0x4a, 0x71, 0x6e, 0x9c, 0xc8, 0x5f, 0xd, 0xd6, 0xfc, 0x34, 0x65, 0x24, 0x53, 0xa1, 0x90, 0xf9, 0x33, 0x66, 0x76, 0x3a, 0x62, 0x69, 0xc6, 0x47, 0xea, 0x4f, 0xfd, 0x4c, 0xe, 0x96}} + return a, nil +} + +var _migrations20190425110313Add_orderbook_statsSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x92\xcd\x6e\xea\x30\x10\x85\xf7\x7e\x8a\x59\x82\x2e\xdc\x17\x60\x95\x4b\x7c\x25\xd4\x34\xa1\x26\x59\xb0\xb2\x1c\x3c\x0a\xa3\xfc\x38\xf2\x38\x45\xed\xd3\x57\x44\x15\x15\x69\x0b\x55\xd7\x3e\xe7\x7c\x23\xf9\x13\xcb\x25\xfc\x69\xa9\xf2\x26\x20\x14\xbd\x58\x2b\x19\xe5\x12\xf2\xe8\x5f\x22\xc1\x79\x8b\xbe\x74\xae\xd6\x1c\x4c\x60\x98\x09\x00\x00\xb2\xc0\xe8\xc9\x34\x90\x66\x39\xa4\x45\x92\xc0\x56\x6d\x1e\x23\xb5\x87\x07\xb9\x5f\x88\x31\x54\x1a\x46\x6d\x98\x31\x68\xb2\x40\x5d\xc0\x0a\x3d\x28\xf9\x5f\x2a\x99\xae\xe5\x0e\xc6\x37\x86\x19\xd9\xf9\x65\x67\x31\x56\x0f\x6e\xe8\x02\xfa\x5f\xb4\xc7\x7a\x37\xb4\xba\x24\xcb\x50\x52\x45\x5d\x98\x8c\x97\x64\xf5\xb3\x6b\x86\x16\xc1\xba\xa1\x6c\x10\x7a\x8f\x07\x62\x72\xdd\x24\x79\xa4\xea\x88\x1c\xce\x5b\xb7\xa2\x17\xa6\xe1\xfa\x1b\xa6\xe1\xfa\x87\xcc\xc6\x9d\xce\x48\xc3\xf5\x5d\x24\xf7\x1e\xcd\xcd\xcb\x3e\x52\xba\x25\xab\x7b\x77\xbe\xec\xde\xec\xd0\x5b\x13\xd0\x6a\x13\x20\x50\x8b\x1c\x4c\xdb\x87\xd7\x4b\x4a\xcc\x57\x22\x4a\x72\xa9\xde\x0d\xc9\xd2\x64\x0f\xfd\x50\x36\x74\xf8\x3b\xb1\x65\x9c\x8b\xe2\x18\xd6\x59\xba\xcb\x55\xb4\x49\xf3\xa9\x50\x7a\xb4\xe4\xfa\xbf\x6b\x7c\x81\x22\xdd\x3c\x15\x12\x66\x57\x12\x2d\x3e\x89\x31\x5f\x89\x2b\x7d\x63\x77\xea\x44\xac\xb2\xed\xd7\xfa\xae\xc4\x5b\x00\x00\x00\xff\xff\x06\x01\x94\xcd\xed\x02\x00\x00") + +func migrations20190425110313Add_orderbook_statsSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190425110313Add_orderbook_statsSql, + "migrations/20190425110313-add_orderbook_stats.sql", + ) +} + +func migrations20190425110313Add_orderbook_statsSql() (*asset, error) { + bytes, err := migrations20190425110313Add_orderbook_statsSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190425110313-add_orderbook_stats.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x87, 0xb0, 0x88, 0x90, 0x9, 0x84, 0x2b, 0xf7, 0x33, 0xa4, 0x8c, 0x91, 0xc8, 0x6a, 0x90, 0x7b, 0x6b, 0x28, 0x70, 0x69, 0xf7, 0x1, 0x8c, 0x89, 0xa1, 0xe3, 0x11, 0xc0, 0x5e, 0x8f, 0x47, 0x0}} + return a, nil +} + +var _migrations20190426092321Add_aggregated_orderbook_viewSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x93\xcf\x6e\xb2\x40\x14\xc5\xf7\x3c\xc5\xdd\xa9\xf9\xd0\x7c\x7b\xd3\x05\xc5\xb1\xa1\xb1\x62\x40\xfb\x67\x35\xb9\x30\x13\x9c\x20\x8c\xe1\x0e\xb5\x8f\xdf\x0c\x44\x81\x88\x65\x45\x0e\xf7\x77\x39\x99\x73\xc6\x99\xcf\xe1\x5f\xa1\xb2\x0a\x8d\x84\xc3\xd9\xf1\x23\xe6\xed\x19\x84\x11\x44\x6c\xb7\xf1\x7c\x06\xef\x01\xfb\x00\xcc\xb2\x4a\x66\x68\xa4\xe0\xba\x12\xb2\x4a\xb4\xce\xc1\x8b\x1d\x00\x80\x98\x6d\x98\xbf\x6f\x5e\xed\x93\xea\x32\x45\x33\x4d\x3c\x22\x69\x16\xa9\x16\xd2\x85\x09\x9f\xb8\x90\x76\xca\x0c\x90\xc0\x54\x28\x24\x3f\xa3\xaa\x78\x89\x85\x74\x6f\x1b\x7a\xa8\x9d\x4b\x90\x24\x47\x2b\xf1\x66\x5b\xf7\xa7\xe1\x5c\xaa\xeb\xd2\xc8\x6a\x74\xd4\x0f\xbd\x0d\x8b\x7d\x36\xa5\xba\x98\x6a\x5a\x94\x75\xc1\x13\x25\x68\xe6\xc2\xff\x19\x78\x31\x5c\x85\xc7\x48\xa2\x04\xff\xd6\xa7\xba\x90\x16\x5a\xb4\x58\x27\x8e\x80\x05\xfe\x58\xf0\xa8\xb2\xa3\x24\x63\xd7\xf7\xc8\x9e\xfa\xb7\x4d\xa4\x7c\x68\xd3\x0a\x8f\x11\xa4\xfc\xde\x66\x27\x8e\xd9\x54\xa5\x05\x4f\xfa\x62\xfd\x20\xe5\x3d\xb0\x13\x1b\x6e\x1d\x85\x6f\x70\x2b\x00\x27\x83\x86\xec\x98\xa6\xe6\xf3\x6b\x18\x6c\xa1\x39\xfe\x46\x6d\x73\x84\x70\x0b\xf6\xf8\xba\x14\x95\x80\xa7\x6b\xc8\x4a\x8c\x91\x6d\xb2\xa0\x4b\x4b\x0e\x73\x6d\xe0\x74\x00\xbf\x44\xe1\x61\x07\xcf\x5f\x77\x8d\xba\xab\xce\x48\x47\x96\xce\xe0\x0a\xac\xf4\xa5\x74\x56\x51\xb8\x6b\x7b\x1f\xac\x81\x7d\x06\xf1\x3e\x1e\xbd\x01\x4b\xe7\x37\x00\x00\xff\xff\x72\xc3\x7e\xff\x3f\x03\x00\x00") + +func migrations20190426092321Add_aggregated_orderbook_viewSqlBytes() ([]byte, error) { + return bindataRead( + _migrations20190426092321Add_aggregated_orderbook_viewSql, + "migrations/20190426092321-add_aggregated_orderbook_view.sql", + ) +} + +func migrations20190426092321Add_aggregated_orderbook_viewSql() (*asset, error) { + bytes, err := migrations20190426092321Add_aggregated_orderbook_viewSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "migrations/20190426092321-add_aggregated_orderbook_view.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xca, 0xb7, 0xc5, 0x31, 0xb1, 0x11, 0xad, 0xf1, 0x3e, 0x39, 0x2, 0x77, 0x17, 0xb8, 0xb7, 0x6b, 0xb2, 0x37, 0x56, 0x71, 0x49, 0xc9, 0x56, 0x43, 0xc5, 0x18, 0x54, 0xae, 0xd1, 0xbb, 0xa4, 0x5e}} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "migrations/20190404184050-initial.sql": migrations20190404184050InitialSql, + "migrations/20190405112544-increase_asset_code_size.sql": migrations20190405112544Increase_asset_code_sizeSql, + "migrations/20190408115724-add_new_asset_fields.sql": migrations20190408115724Add_new_asset_fieldsSql, + "migrations/20190408155841-add_issuers_table.sql": migrations20190408155841Add_issuers_tableSql, + "migrations/20190409152216-add_trades_table.sql": migrations20190409152216Add_trades_tableSql, + "migrations/20190409172610-rename_assets_desc_description.sql": migrations20190409172610Rename_assets_desc_descriptionSql, + "migrations/20190410094830-add_assets_issuer_account_field.sql": migrations20190410094830Add_assets_issuer_account_fieldSql, + "migrations/20190411165735-data_seed_and_indices.sql": migrations20190411165735Data_seed_and_indicesSql, + "migrations/20190425110313-add_orderbook_stats.sql": migrations20190425110313Add_orderbook_statsSql, + "migrations/20190426092321-add_aggregated_orderbook_view.sql": migrations20190426092321Add_aggregated_orderbook_viewSql, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + canonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(canonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "migrations": &bintree{nil, map[string]*bintree{ + "20190404184050-initial.sql": &bintree{migrations20190404184050InitialSql, map[string]*bintree{}}, + "20190405112544-increase_asset_code_size.sql": &bintree{migrations20190405112544Increase_asset_code_sizeSql, map[string]*bintree{}}, + "20190408115724-add_new_asset_fields.sql": &bintree{migrations20190408115724Add_new_asset_fieldsSql, map[string]*bintree{}}, + "20190408155841-add_issuers_table.sql": &bintree{migrations20190408155841Add_issuers_tableSql, map[string]*bintree{}}, + "20190409152216-add_trades_table.sql": &bintree{migrations20190409152216Add_trades_tableSql, map[string]*bintree{}}, + "20190409172610-rename_assets_desc_description.sql": &bintree{migrations20190409172610Rename_assets_desc_descriptionSql, map[string]*bintree{}}, + "20190410094830-add_assets_issuer_account_field.sql": &bintree{migrations20190410094830Add_assets_issuer_account_fieldSql, map[string]*bintree{}}, + "20190411165735-data_seed_and_indices.sql": &bintree{migrations20190411165735Data_seed_and_indicesSql, map[string]*bintree{}}, + "20190425110313-add_orderbook_stats.sql": &bintree{migrations20190425110313Add_orderbook_statsSql, map[string]*bintree{}}, + "20190426092321-add_aggregated_orderbook_view.sql": &bintree{migrations20190426092321Add_aggregated_orderbook_viewSql, map[string]*bintree{}}, + }}, +}} + +// RestoreAsset restores an asset under the given directory. +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) +} + +// RestoreAssets restores an asset under the given directory recursively. +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + canonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) +} diff --git a/services/ticker/internal/tickerdb/migrations_test.go b/services/ticker/internal/tickerdb/migrations_test.go new file mode 100644 index 0000000000..5b5c2e0665 --- /dev/null +++ b/services/ticker/internal/tickerdb/migrations_test.go @@ -0,0 +1,30 @@ +package tickerdb + +import ( + "net/http" + "os" + "strings" + "testing" + + assetfs "github.com/elazarl/go-bindata-assetfs" + "github.com/shurcooL/httpfs/filter" + + bdata "github.com/stellar/go/services/ticker/internal/tickerdb/migrations" + supportHttp "github.com/stellar/go/support/http" +) + +func TestGeneratedAssets(t *testing.T) { + var localAssets http.FileSystem = filter.Keep(http.Dir("migrations"), func(path string, fi os.FileInfo) bool { + return fi.IsDir() || strings.HasSuffix(path, ".sql") + }) + generatedAssets := &assetfs.AssetFS{ + Asset: bdata.Asset, + AssetDir: bdata.AssetDir, + AssetInfo: bdata.AssetInfo, + Prefix: "/migrations", + } + + if !supportHttp.EqualFileSystems(localAssets, generatedAssets, "/") { + t.Fatalf("generated migrations does not match local migrations") + } +} diff --git a/services/ticker/internal/tickerdb/queries_asset.go b/services/ticker/internal/tickerdb/queries_asset.go new file mode 100644 index 0000000000..a55e861ebb --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_asset.go @@ -0,0 +1,98 @@ +package tickerdb + +import ( + "context" +) + +// InsertOrUpdateAsset inserts an Asset on the database (if new), +// or updates an existing one +func (s *TickerSession) InsertOrUpdateAsset(ctx context.Context, a *Asset, preserveFields []string) (err error) { + return s.performUpsertQuery(ctx, *a, "assets", "assets_code_issuer_account", preserveFields) +} + +// GetAssetByCodeAndIssuerAccount searches for an Asset with the given code +// and public key, and returns its ID in case it is found. +func (s *TickerSession) GetAssetByCodeAndIssuerAccount(ctx context.Context, + code string, + issuerAccount string, +) (found bool, id int32, err error) { + var assets []Asset + tbl := s.GetTable("assets") + + err = tbl.Select( + &assets, + "assets.code = ? AND assets.issuer_account = ?", + code, + issuerAccount, + ).Exec(ctx) + if err != nil { + return + } + + if len(assets) > 0 { + id = assets[0].ID + found = true + } + return +} + +// GetAllValidAssets returns a slice with all assets in the database +// with is_valid = true +func (s *TickerSession) GetAllValidAssets(ctx context.Context) (assets []Asset, err error) { + tbl := s.GetTable("assets") + + err = tbl.Select( + &assets, + "assets.is_valid = TRUE", + ).Exec(ctx) + + return +} + +// GetAssetsWithNestedIssuer returns a slice with all assets in the database +// with is_valid = true, also adding the nested Issuer attribute +func (s *TickerSession) GetAssetsWithNestedIssuer(ctx context.Context) (assets []Asset, err error) { + const q = ` + SELECT + a.code, a.issuer_account, a.type, a.num_accounts, a.auth_required, a.auth_revocable, + a.amount, a.asset_controlled_by_domain, a.anchor_asset_code, a.anchor_asset_type, + a.is_valid, a.validation_error, a.last_valid, a.last_checked, a.display_decimals, + a.name, a.description, a.conditions, a.is_asset_anchored, a.fixed_number, a.max_number, + a.is_unlimited, a.redemption_instructions, a.collateral_addresses, a.collateral_address_signatures, + a.countries, a.status, a.issuer_id, i.public_key, i.name, i.url, i.toml_url, i.federation_server, + i.auth_server, i.transfer_server, i.web_auth_endpoint, i.deposit_server, i.org_twitter + FROM assets AS a + INNER JOIN issuers AS i ON a.issuer_id = i.id + WHERE a.is_valid = TRUE + ` + + rows, err := s.DB.QueryContext(ctx, q) + if err != nil { + return + } + + for rows.Next() { + var ( + a Asset + i Issuer + ) + + err = rows.Scan( + &a.Code, &a.IssuerAccount, &a.Type, &a.NumAccounts, &a.AuthRequired, &a.AuthRevocable, + &a.Amount, &a.AssetControlledByDomain, &a.AnchorAssetCode, &a.AnchorAssetType, + &a.IsValid, &a.ValidationError, &a.LastValid, &a.LastChecked, &a.DisplayDecimals, + &a.Name, &a.Desc, &a.Conditions, &a.IsAssetAnchored, &a.FixedNumber, &a.MaxNumber, + &a.IsUnlimited, &a.RedemptionInstructions, &a.CollateralAddresses, &a.CollateralAddressSignatures, + &a.Countries, &a.Status, &a.IssuerID, &i.PublicKey, &i.Name, &i.URL, &i.TOMLURL, &i.FederationServer, + &i.AuthServer, &i.TransferServer, &i.WebAuthEndpoint, &i.DepositServer, &i.OrgTwitter, + ) + if err != nil { + return + } + + a.Issuer = i + assets = append(assets, a) + } + + return +} diff --git a/services/ticker/internal/tickerdb/queries_asset_test.go b/services/ticker/internal/tickerdb/queries_asset_test.go new file mode 100644 index 0000000000..c6d53a2249 --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_asset_test.go @@ -0,0 +1,232 @@ +package tickerdb + +import ( + "context" + "testing" + "time" + + _ "github.com/lib/pq" + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsertOrUpdateAsset(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + publicKey := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + issuerAccount := "AM2FQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + name := "FOO BAR" + code := "XLM" + + // Adding a seed issuer to be used later: + issuer := Issuer{ + PublicKey: publicKey, + Name: name, + } + tbl := session.GetTable("issuers") + _, err = tbl.Insert(issuer).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var dbIssuer Issuer + err = session.GetRaw(ctx, &dbIssuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating first asset: + firstTime := time.Now() + t.Log("firstTime:", firstTime) + a := Asset{ + Code: code, + IssuerAccount: issuerAccount, + IssuerID: dbIssuer.ID, + LastValid: firstTime, + LastChecked: firstTime, + } + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset1 Asset + err = session.GetRaw(ctx, &dbAsset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, code, dbAsset1.Code) + assert.Equal(t, issuerAccount, dbAsset1.IssuerAccount) + assert.Equal(t, dbIssuer.ID, dbAsset1.IssuerID) + assert.Equal( + t, + firstTime.Local().Round(time.Millisecond), + dbAsset1.LastValid.Local().Round(time.Millisecond), + ) + assert.Equal( + t, + firstTime.Local().Round(time.Millisecond), + dbAsset1.LastChecked.Local().Round(time.Millisecond), + ) + + // Creating Seconde Asset: + secondTime := time.Now() + t.Log("secondTime:", secondTime) + a.LastValid = secondTime + a.LastChecked = secondTime + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset2 Asset + err = session.GetRaw(ctx, &dbAsset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Validating if changes match what was expected: + assert.Equal(t, dbAsset1.ID, dbAsset2.ID) + assert.Equal(t, code, dbAsset2.Code) + assert.Equal(t, issuerAccount, dbAsset1.IssuerAccount) + assert.Equal(t, dbIssuer.ID, dbAsset2.IssuerID) + assert.True(t, dbAsset2.LastValid.After(firstTime)) + assert.True(t, dbAsset2.LastChecked.After(firstTime)) + assert.Equal( + t, + secondTime.Local().Round(time.Millisecond), + dbAsset2.LastValid.Local().Round(time.Millisecond), + ) + assert.Equal( + t, + secondTime.Local().Round(time.Millisecond), + dbAsset2.LastChecked.Local().Round(time.Millisecond), + ) + + // Creating Third Asset: + thirdTime := time.Now() + t.Log("thirdTime:", thirdTime) + a.LastValid = thirdTime + a.LastChecked = thirdTime + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_id", "last_valid", "last_checked", "issuer_account"}) + require.NoError(t, err) + var dbAsset3 Asset + err = session.GetRaw(ctx, &dbAsset3, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Validating if changes match what was expected: + assert.Equal(t, dbAsset2.ID, dbAsset3.ID) + assert.Equal(t, code, dbAsset3.Code) + assert.Equal(t, issuerAccount, dbAsset3.IssuerAccount) + assert.Equal(t, dbIssuer.ID, dbAsset3.IssuerID) + assert.True(t, dbAsset3.LastValid.Before(thirdTime)) + assert.True(t, dbAsset3.LastChecked.Before(thirdTime)) + assert.Equal( + t, + dbAsset2.LastValid.Local().Round(time.Millisecond), + dbAsset3.LastValid.Local().Round(time.Millisecond), + ) + assert.Equal( + t, dbAsset2.LastValid.Local().Round(time.Millisecond), + dbAsset3.LastChecked.Local().Round(time.Millisecond), + ) +} + +func TestGetAssetByCodeAndIssuerAccount(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + publicKey := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + name := "FOO BAR" + code := "XLM" + issuerAccount := "AM2FQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + + // Adding a seed issuer to be used later: + issuer := Issuer{ + PublicKey: publicKey, + Name: name, + } + tbl := session.GetTable("issuers") + _, err = tbl.Insert(issuer).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var dbIssuer Issuer + err = session.GetRaw(ctx, &dbIssuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating first asset: + firstTime := time.Now() + a := Asset{ + Code: code, + IssuerAccount: issuerAccount, + IssuerID: dbIssuer.ID, + LastValid: firstTime, + LastChecked: firstTime, + } + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset Asset + err = session.GetRaw(ctx, &dbAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Searching for an asset that exists: + found, id, err := session.GetAssetByCodeAndIssuerAccount(ctx, code, issuerAccount) + require.NoError(t, err) + assert.Equal(t, dbAsset.ID, id) + assert.True(t, found) + + // Now searching for an asset that does not exist: + found, _, err = session.GetAssetByCodeAndIssuerAccount(ctx, + "NONEXISTENT CODE", + issuerAccount, + ) + require.NoError(t, err) + assert.False(t, found) +} diff --git a/services/ticker/internal/tickerdb/queries_issuer.go b/services/ticker/internal/tickerdb/queries_issuer.go new file mode 100644 index 0000000000..1b0d0f2d2c --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_issuer.go @@ -0,0 +1,40 @@ +package tickerdb + +import ( + "context" + "strings" + + "github.com/stellar/go/services/ticker/internal/utils" +) + +// InsertOrUpdateIssuer inserts an Issuer on the database (if new), +// or updates an existing one +func (s *TickerSession) InsertOrUpdateIssuer(ctx context.Context, issuer *Issuer, preserveFields []string) (id int32, err error) { + dbFields := getDBFieldTags(*issuer, true) + dbFieldsString := strings.Join(dbFields, ", ") + dbValues := getDBFieldValues(*issuer, true) + + cleanPreservedFields := sanitizeFieldNames(preserveFields) + toUpdateFields := utils.SliceDiff(dbFields, cleanPreservedFields) + + qs := "INSERT INTO issuers (" + dbFieldsString + ")" + qs += " VALUES (" + generatePlaceholders(dbValues) + ")" + qs += " " + createOnConflictFragment("public_key_unique", toUpdateFields) + qs += " RETURNING id;" + + rows, err := s.QueryRaw(ctx, qs, dbValues...) + if err != nil { + return + } + + for rows.Next() { + err = rows.Scan(&id) + } + return +} + +// GetAllIssuers returns a slice with all issuers in the database +func (s *TickerSession) GetAllIssuers(ctx context.Context) (issuers []Issuer, err error) { + err = s.SelectRaw(ctx, &issuers, "SELECT * FROM issuers") + return +} diff --git a/services/ticker/internal/tickerdb/queries_issuer_test.go b/services/ticker/internal/tickerdb/queries_issuer_test.go new file mode 100644 index 0000000000..f4400e497b --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_issuer_test.go @@ -0,0 +1,95 @@ +package tickerdb + +import ( + "context" + "testing" + + _ "github.com/lib/pq" + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsertOrUpdateIssuer(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + publicKey := "ASOKDASDKMAKSD19023ASDSAD0912309" + name := "FOO BAR" + + // Adding a seed issuer to be used later: + issuer := Issuer{ + PublicKey: publicKey, + Name: name, + } + id, err := session.InsertOrUpdateIssuer(ctx, &issuer, []string{"public_key"}) + + require.NoError(t, err) + var dbIssuer Issuer + err = session.GetRaw(ctx, &dbIssuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, publicKey, dbIssuer.PublicKey) + assert.Equal(t, dbIssuer.ID, id) + + // Adding another issuer to validate we're correctly returning the ID + issuer2 := Issuer{ + PublicKey: "ANOTHERKEY", + Name: "Hello from the other side", + } + id2, err := session.InsertOrUpdateIssuer(ctx, &issuer2, []string{"public_key"}) + + require.NoError(t, err) + var dbIssuer2 Issuer + err = session.GetRaw(ctx, &dbIssuer2, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, issuer2.Name, dbIssuer2.Name) + assert.Equal(t, issuer2.PublicKey, dbIssuer2.PublicKey) + assert.Equal(t, id2, dbIssuer2.ID) + + // Validate if it only changes the un-preserved fields + name3 := "The Dark Side of the Moon" + issuer3 := Issuer{ + PublicKey: publicKey, + Name: name3, + } + id, err = session.InsertOrUpdateIssuer(ctx, &issuer3, []string{"public_key"}) + require.NoError(t, err) + + var dbIssuer3 Issuer + err = session.GetRaw(ctx, + &dbIssuer3, + "SELECT * FROM issuers WHERE id=?", + id, + ) + require.NoError(t, err) + + assert.Equal(t, dbIssuer.ID, dbIssuer3.ID) + assert.Equal(t, dbIssuer.PublicKey, dbIssuer3.PublicKey) + assert.Equal(t, name3, dbIssuer3.Name) +} diff --git a/services/ticker/internal/tickerdb/queries_market.go b/services/ticker/internal/tickerdb/queries_market.go new file mode 100644 index 0000000000..8984fcb3eb --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_market.go @@ -0,0 +1,277 @@ +package tickerdb + +import ( + "context" + "fmt" + "strings" +) + +// RetrieveMarketData retrieves the 24h- and 7d aggregated market data for all +// markets that were active during this period. +func (s *TickerSession) RetrieveMarketData(ctx context.Context) (markets []Market, err error) { + err = s.SelectRaw(ctx, &markets, marketQuery) + return +} + +// RetrievePartialAggMarkets retrieves the aggregated market data for all +// markets (or for a specific one if PairName != nil) for a given period. +func (s *TickerSession) RetrievePartialAggMarkets(ctx context.Context, + pairName *string, + numHoursAgo int, +) (partialMkts []PartialMarket, err error) { + var bCode, cCode string + sqlTrue := new(string) + *sqlTrue = "TRUE" + optVars := []optionalVar{ + optionalVar{"bAsset.is_valid", sqlTrue}, + optionalVar{"cAsset.is_valid", sqlTrue}, + } + + // parse base and asset codes and add them as SQL parameters + if pairName != nil { + bCode, cCode, err = getBaseAndCounterCodes(*pairName) + if err != nil { + return + } + optVars = append(optVars, []optionalVar{ + optionalVar{"bAsset.code", &bCode}, + optionalVar{"cAsset.code", &cCode}, + }...) + } + + where, args := generateWhereClause(optVars) + where += fmt.Sprintf( + " AND t.ledger_close_time > now() - interval '%d hours'", + numHoursAgo, + ) + q := strings.Replace(aggMarketQuery, "__WHERECLAUSE__", where, -1) + q = strings.Replace(q, "__NUMHOURS__", fmt.Sprintf("%d", numHoursAgo), -1) + + argsInterface := make([]interface{}, len(args)) + for i, v := range args { + argsInterface[i] = v + } + + err = s.SelectRaw(ctx, &partialMkts, q, argsInterface...) + return +} + +// RetrievePartialMarkets retrieves data in the PartialMarket format from the database. +// It optionally filters the data according to the provided base and counter asset params +// provided, as well as the numHoursAgo time offset. +func (s *TickerSession) RetrievePartialMarkets(ctx context.Context, + baseAssetCode *string, + baseAssetIssuer *string, + counterAssetCode *string, + counterAssetIssuer *string, + numHoursAgo int, +) (partialMkts []PartialMarket, err error) { + sqlTrue := new(string) + *sqlTrue = "TRUE" + + where, args := generateWhereClause([]optionalVar{ + optionalVar{"bAsset.is_valid", sqlTrue}, + optionalVar{"cAsset.is_valid", sqlTrue}, + optionalVar{"bAsset.code", baseAssetCode}, + optionalVar{"bAsset.issuer_account", baseAssetIssuer}, + optionalVar{"cAsset.code", counterAssetCode}, + optionalVar{"cAsset.issuer_account", counterAssetIssuer}, + }) + where += fmt.Sprintf( + " AND t.ledger_close_time > now() - interval '%d hours'", + numHoursAgo, + ) + + q := strings.Replace(partialMarketQuery, "__WHERECLAUSE__", where, -1) + q = strings.Replace(q, "__NUMHOURS__", fmt.Sprintf("%d", numHoursAgo), -1) + + argsInterface := make([]interface{}, len(args)) + for i, v := range args { + argsInterface[i] = v + } + err = s.SelectRaw(ctx, &partialMkts, q, argsInterface...) + return +} + +// Retrieve7DRelevantMarkets retrieves the base and counter asset data of the markets +// that were relevant in the last 7-day period. +func (s *TickerSession) Retrieve7DRelevantMarkets(ctx context.Context) (partialMkts []PartialMarket, err error) { + q := ` + SELECT + ba.id as base_asset_id, ba.type AS base_asset_type, ba.code AS base_asset_code, ba.issuer_account AS base_asset_issuer, + ca.id as counter_asset_id, ca.type AS counter_asset_type, ca.code AS counter_asset_code, ca.issuer_account AS counter_asset_issuer + FROM trades as t + JOIN assets AS ba ON t.base_asset_id = ba.id + JOIN assets AS ca ON t.counter_asset_id = ca.id + WHERE ba.is_valid = TRUE AND ca.is_valid = TRUE AND t.ledger_close_time > now() - interval '7 days' + GROUP BY ba.id, ba.type, ba.code, ba.issuer_account, ca.id, ca.type, ca.code, ca.issuer_account + ` + err = s.SelectRaw(ctx, &partialMkts, q) + return +} + +var marketQuery = ` +SELECT + t2.trade_pair_name, + COALESCE(base_volume_24h, 0.0) as base_volume_24h, + COALESCE(counter_volume_24h, 0.0) as counter_volume_24h, + COALESCE(trade_count_24h, 0) as trade_count_24h, + COALESCE(highest_price_24h, last_price_7d, 0.0) as highest_price_24h, + COALESCE(lowest_price_24h, last_price_7d, 0.0) as lowest_price_24h, + COALESCE(price_change_24h, 0.0) as price_change_24h, + COALESCE(open_price_24h, last_price_7d, 0.0) as open_price_24h, + + COALESCE(base_volume_7d, 0) as base_volume_7d, + COALESCE(counter_volume_7d, 0) as counter_volume_7d, + COALESCE(trade_count_7d, 0) as trade_count_7d, + COALESCE(highest_price_7d, 0.0) as highest_price_7d, + COALESCE(lowest_price_7d, 0.0) as lowest_price_7d, + COALESCE(price_change_7d, 0.0) as price_change_7d, + COALESCE(open_price_7d, 0.0) as open_price_7d, + + COALESCE(last_price, last_price_7d, 0.0) as last_price, + COALESCE(last_close_time_24h, last_close_time_7d) as close_time, + + COALESCE(os.num_bids, 0) as num_bids, + COALESCE(os.bid_volume, 0.0) as bid_volume, + COALESCE(os.highest_bid, 0.0) as highest_bid, + COALESCE(os.num_asks, 0) as num_asks, + COALESCE(os.ask_volume, 0.0) as ask_volume, + COALESCE(os.lowest_ask, 0.0) as lowest_ask +FROM ( + SELECT + -- All valid trades for 24h period + concat( + COALESCE(NULLIF(bAsset.anchor_asset_code, ''), bAsset.code), + '_', + COALESCE(NULLIF(cAsset.anchor_asset_code, ''), cAsset.code) + ) as trade_pair_name, + sum(t.base_amount) AS base_volume_24h, + sum(t.counter_amount) AS counter_volume_24h, + count(t.base_amount) AS trade_count_24h, + max(t.price) AS highest_price_24h, + min(t.price) AS lowest_price_24h, + (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price_24h, + (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change_24h, + max(t.ledger_close_time) AS last_close_time_24h + FROM trades AS t + JOIN assets AS bAsset ON t.base_asset_id = bAsset.id + JOIN assets AS cAsset on t.counter_asset_id = cAsset.id + WHERE bAsset.is_valid = TRUE + AND cAsset.is_valid = TRUE + AND t.ledger_close_time > now() - interval '1 day' + GROUP BY trade_pair_name + ) t1 RIGHT JOIN ( + SELECT + -- All valid trades for 7d period + concat( + COALESCE(NULLIF(bAsset.anchor_asset_code, ''), bAsset.code), + '_', + COALESCE(NULLIF(cAsset.anchor_asset_code, ''), cAsset.code) + ) as trade_pair_name, + sum(t.base_amount) AS base_volume_7d, + sum(t.counter_amount) AS counter_volume_7d, + count(t.base_amount) AS trade_count_7d, + max(t.price) AS highest_price_7d, + min(t.price) AS lowest_price_7d, + (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price_7d, + (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price_7d, + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change_7d, + max(t.ledger_close_time) AS last_close_time_7d + FROM trades AS t + LEFT JOIN orderbook_stats AS os + ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id + JOIN assets AS bAsset ON t.base_asset_id = bAsset.id + JOIN assets AS cAsset on t.counter_asset_id = cAsset.id + WHERE bAsset.is_valid = TRUE + AND cAsset.is_valid = TRUE + AND t.ledger_close_time > now() - interval '7 days' + GROUP BY trade_pair_name + ) t2 ON t1.trade_pair_name = t2.trade_pair_name + LEFT JOIN aggregated_orderbook AS os ON t2.trade_pair_name = os.trade_pair_name; +` + +var partialMarketQuery = ` +SELECT + concat(bAsset.code, ':', bAsset.issuer_account, ' / ', cAsset.code, ':', cAsset.issuer_account) as trade_pair_name, + bAsset.id AS base_asset_id, + bAsset.code AS base_asset_code, + bAsset.issuer_account as base_asset_issuer, + bAsset.type as base_asset_type, + cAsset.id AS counter_asset_id, + cAsset.code AS counter_asset_code, + cAsset.issuer_account AS counter_asset_issuer, + cAsset.type as counter_asset_type, + sum(t.base_amount) AS base_volume, + sum(t.counter_amount) AS counter_volume, + count(t.base_amount) AS trade_count, + max(t.price) AS highest_price, + min(t.price) AS lowest_price, + (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price, + (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change, + (now() - interval '__NUMHOURS__ hours') AS interval_start, + min(t.ledger_close_time) AS first_ledger_close_time, + max(t.ledger_close_time) AS last_ledger_close_time, + COALESCE((array_agg(os.num_bids))[1], 0) AS num_bids, + COALESCE((array_agg(os.bid_volume))[1], 0.0) AS bid_volume, + COALESCE((array_agg(os.highest_bid))[1], 0.0) AS highest_bid, + COALESCE((array_agg(os.num_asks))[1], 0) AS num_asks, + COALESCE((array_agg(os.ask_volume))[1], 0.0) AS ask_volume, + COALESCE((array_agg(os.lowest_ask))[1], 0.0) AS lowest_ask +FROM trades AS t + LEFT JOIN orderbook_stats AS os ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id + JOIN assets AS bAsset ON t.base_asset_id = bAsset.id + JOIN assets AS cAsset on t.counter_asset_id = cAsset.id +__WHERECLAUSE__ +GROUP BY bAsset.id, bAsset.code, bAsset.issuer_account, bAsset.type, cAsset.id, cAsset.code, cAsset.issuer_account, cAsset.type; +` + +var aggMarketQuery = ` +SELECT + t1.trade_pair_name, + t1.base_volume, + t1.counter_volume, + t1.trade_count, + t1.highest_price, + t1.lowest_price, + t1.open_price, + t1.last_price, + t1.price_change, + t1.interval_start, + t1.first_ledger_close_time, + t1.last_ledger_close_time, + COALESCE(aob.base_asset_code, '') as base_asset_code, + COALESCE(aob.counter_asset_code, '') as counter_asset_code, + COALESCE(aob.num_bids, 0) AS num_bids, + COALESCE(aob.bid_volume, 0.0) AS bid_volume, + COALESCE(aob.highest_bid, 0.0) AS highest_bid, + COALESCE(aob.num_asks, 0) AS num_asks, + COALESCE(aob.ask_volume, 0.0) AS ask_volume, + COALESCE(aob.lowest_ask, 0.0) AS lowest_ask +FROM ( + SELECT + concat( + COALESCE(NULLIF(bAsset.anchor_asset_code, ''), bAsset.code), + '_', + COALESCE(NULLIF(cAsset.anchor_asset_code, ''), cAsset.code) + ) as trade_pair_name, + sum(t.base_amount) AS base_volume, + sum(t.counter_amount) AS counter_volume, + count(t.base_amount) AS trade_count, + max(t.price) AS highest_price, + min(t.price) AS lowest_price, + (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1] AS open_price, + (array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] AS last_price, + ((array_agg(t.price ORDER BY t.ledger_close_time DESC))[1] - (array_agg(t.price ORDER BY t.ledger_close_time ASC))[1]) AS price_change, + (now() - interval '__NUMHOURS__ hours') AS interval_start, + min(t.ledger_close_time) AS first_ledger_close_time, + max(t.ledger_close_time) AS last_ledger_close_time + FROM trades AS t + LEFT JOIN orderbook_stats AS os ON t.base_asset_id = os.base_asset_id AND t.counter_asset_id = os.counter_asset_id + JOIN assets AS bAsset ON t.base_asset_id = bAsset.id + JOIN assets AS cAsset on t.counter_asset_id = cAsset.id + __WHERECLAUSE__ + GROUP BY trade_pair_name +) t1 LEFT JOIN aggregated_orderbook AS aob ON t1.trade_pair_name = aob.trade_pair_name;` diff --git a/services/ticker/internal/tickerdb/queries_market_test.go b/services/ticker/internal/tickerdb/queries_market_test.go new file mode 100644 index 0000000000..65a9df2e47 --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_market_test.go @@ -0,0 +1,834 @@ +package tickerdb + +import ( + "context" + "fmt" + "math" + "testing" + "time" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRetrieveMarketData(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var xlmAsset Asset + err = session.GetRaw(ctx, &xlmAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var btcAsset Asset + err = session.GetRaw(ctx, &btcAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a third asset: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "ETH", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var ethAsset Asset + err = session.GetRaw(ctx, ðAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Verify that we actually have three assets: + assert.NotEqual(t, xlmAsset.ID, btcAsset.ID) + assert.NotEqual(t, btcAsset.ID, ethAsset.ID) + assert.NotEqual(t, xlmAsset.ID, ethAsset.ID) + + // A few times to be used: + now := time.Now() + oneHourAgo := now.Add(-1 * time.Hour) + threeDaysAgo := now.AddDate(0, 0, -3) + oneMonthAgo := now.AddDate(0, -1, 0) + + // Now let's create the trades: + trades := []Trade{ + Trade{ // XLM_BTC trade + HorizonID: "hrzid1", + BaseAssetID: xlmAsset.ID, + BaseAmount: 100.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 10.0, + Price: 0.1, + LedgerCloseTime: now, + }, + Trade{ // XLM_ETH trade + HorizonID: "hrzid3", + BaseAssetID: xlmAsset.ID, + BaseAmount: 24.0, + CounterAssetID: ethAsset.ID, + CounterAmount: 26.0, + Price: 0.92, + LedgerCloseTime: oneHourAgo, + }, + Trade{ // XLM_ETH trade + HorizonID: "hrzid2", + BaseAssetID: xlmAsset.ID, + BaseAmount: 50.0, + CounterAssetID: ethAsset.ID, + CounterAmount: 50.0, + Price: 1.0, + LedgerCloseTime: now, + }, + Trade{ // XLM_BTC trade + HorizonID: "hrzid4", + BaseAssetID: xlmAsset.ID, + BaseAmount: 50.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 6.0, + Price: 0.12, + LedgerCloseTime: threeDaysAgo, + }, + Trade{ // XLM_ETH trade + HorizonID: "hrzid5", + BaseAssetID: xlmAsset.ID, + BaseAmount: 24.0, + CounterAssetID: ethAsset.ID, + CounterAmount: 28.0, + Price: 1.10, + LedgerCloseTime: oneMonthAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + // Adding some orderbook stats: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: xlmAsset.ID, + CounterAssetID: ethAsset.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH1 OrderbookStats + err = session.GetRaw(ctx, &obBTCETH1, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = OrderbookStats{ + BaseAssetID: xlmAsset.ID, + CounterAssetID: btcAsset.ID, + NumBids: 1, + BidVolume: 0.1, + HighestBid: 20.0, + NumAsks: 1, + AskVolume: 15.0, + LowestAsk: 0.2, + Spread: 0.96, + SpreadMidPoint: 0.36, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH2 OrderbookStats + err = session.GetRaw(ctx, &obBTCETH2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + assert.NotEqual(t, obBTCETH1.ID, obBTCETH2.ID) + + markets, err := session.RetrieveMarketData(ctx) + require.NoError(t, err) + assert.Equal(t, 2, len(markets)) + + // Mapping the retrieved markets: + var xlmbtcMkt, xlmethMkt Market + for _, mkt := range markets { + if mkt.TradePair == "XLM_BTC" { + xlmbtcMkt = mkt + } + + if mkt.TradePair == "XLM_ETH" { + xlmethMkt = mkt + } + } + assert.NotEqual(t, "", xlmbtcMkt.TradePair) + assert.NotEqual(t, "", xlmethMkt.TradePair) + + // Validating the aggregated data + assert.Equal(t, 100.0, xlmbtcMkt.BaseVolume24h) + assert.Equal(t, 10.0, xlmbtcMkt.CounterVolume24h) + assert.Equal(t, int64(1), xlmbtcMkt.TradeCount24h) + assert.Equal(t, 0.1, xlmbtcMkt.OpenPrice24h) + assert.Equal(t, 0.1, xlmbtcMkt.LowestPrice24h) + assert.Equal(t, 0.1, xlmbtcMkt.HighestPrice24h) + + assert.Equal(t, 150.0, xlmbtcMkt.BaseVolume7d) + assert.Equal(t, 16.0, xlmbtcMkt.CounterVolume7d) + assert.Equal(t, int64(2), xlmbtcMkt.TradeCount7d) + assert.Equal(t, 0.12, xlmbtcMkt.OpenPrice7d) + assert.Equal(t, 0.1, xlmbtcMkt.LowestPrice7d) + assert.Equal(t, 0.12, xlmbtcMkt.HighestPrice7d) + + assert.Equal(t, 0.1, xlmbtcMkt.LastPrice) + assert.WithinDuration(t, now.Local(), xlmbtcMkt.LastPriceCloseTime.Local(), 10*time.Millisecond) + + assert.Equal(t, 0.0, xlmbtcMkt.PriceChange24h) + // There might be some floating point rounding issues, so this test + // needs to be a bit more flexible. Since the change is 0.02, an error + // around 0.0000000000001 is acceptable: + priceChange7dDiff := math.Abs(-0.02 - xlmbtcMkt.PriceChange7d) + assert.True(t, priceChange7dDiff < 0.0000000000001) + + assert.Equal(t, 74.0, xlmethMkt.BaseVolume24h) + assert.Equal(t, 76.0, xlmethMkt.CounterVolume24h) + assert.Equal(t, int64(2), xlmethMkt.TradeCount24h) + assert.Equal(t, 0.92, xlmethMkt.OpenPrice24h) + assert.Equal(t, 0.92, xlmethMkt.LowestPrice24h) + assert.Equal(t, 1.0, xlmethMkt.HighestPrice24h) + + assert.Equal(t, 74.0, xlmethMkt.BaseVolume7d) + assert.Equal(t, 76.0, xlmethMkt.CounterVolume7d) + assert.Equal(t, int64(2), xlmethMkt.TradeCount7d) + assert.Equal(t, 0.92, xlmethMkt.OpenPrice7d) + assert.Equal(t, 0.92, xlmethMkt.LowestPrice7d) + assert.Equal(t, 1.0, xlmethMkt.HighestPrice7d) + + assert.Equal(t, 1.0, xlmethMkt.LastPrice) + assert.WithinDuration(t, now.Local(), xlmbtcMkt.LastPriceCloseTime.Local(), 10*time.Millisecond) + + // There might be some floating point rounding issues, so this test + // needs to be a bit more flexible. Since the change is 0.08, an error + // around 0.0000000000001 is acceptable: + priceChange24hDiff := math.Abs(0.08 - xlmethMkt.PriceChange24h) + assert.True(t, priceChange24hDiff < 0.0000000000001) + + priceChange7dDiff = math.Abs(0.08 - xlmethMkt.PriceChange7d) + assert.True(t, priceChange7dDiff < 0.0000000000001) + + assert.Equal(t, priceChange24hDiff, priceChange7dDiff) + + // Analysing aggregated orderbook data: + assert.Equal(t, 15, xlmethMkt.NumBids) + assert.Equal(t, 0.15, xlmethMkt.BidVolume) + assert.Equal(t, 200.0, xlmethMkt.HighestBid) + assert.Equal(t, 17, xlmethMkt.NumAsks) + assert.Equal(t, 30.0, xlmethMkt.AskVolume) + assert.Equal(t, 0.1, xlmethMkt.LowestAsk) + + assert.Equal(t, 1, xlmbtcMkt.NumBids) + assert.Equal(t, 0.1, xlmbtcMkt.BidVolume) + assert.Equal(t, 20.0, xlmbtcMkt.HighestBid) + assert.Equal(t, 1, xlmbtcMkt.NumAsks) + assert.Equal(t, 15.0, xlmbtcMkt.AskVolume) + assert.Equal(t, 0.2, xlmbtcMkt.LowestAsk) +} + +func TestRetrievePartialMarkets(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + issuer1PK := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: issuer1PK, + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer1 Issuer + err = session.GetRaw(ctx, &issuer1, ` + SELECT * + FROM issuers + WHERE public_key = ?`, + issuer1PK, + ) + require.NoError(t, err) + + // Adding another issuer to be used later: + issuer2PK := "ABF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + _, err = tbl.Insert(Issuer{ + PublicKey: issuer2PK, + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer2 Issuer + err = session.GetRaw(ctx, &issuer2, ` + SELECT * + FROM issuers + WHERE public_key = ?`, + issuer2PK, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "ETH", + IssuerAccount: issuer1PK, + IssuerID: issuer1.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var ethAsset1 Asset + err = session.GetRaw(ctx, ðAsset1, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "ETH", + issuer1PK, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "ETH", + IssuerAccount: issuer2PK, + IssuerID: issuer2.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var ethAsset2 Asset + err = session.GetRaw(ctx, ðAsset2, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "ETH", + issuer2PK, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerAccount: issuer1PK, + IssuerID: issuer1.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var btcAsset Asset + err = session.GetRaw(ctx, &btcAsset, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "BTC", + issuer1PK, + ) + require.NoError(t, err) + + // A few times to be used: + now := time.Now() + tenMinutesAgo := now.Add(-10 * time.Minute) + oneHourAgo := now.Add(-1 * time.Hour) + threeDaysAgo := now.AddDate(0, 0, -3) + + // Now let's create the trades: + trades := []Trade{ + Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid1", + BaseAssetID: btcAsset.ID, + BaseAmount: 100.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 10.0, + Price: 0.1, + LedgerCloseTime: tenMinutesAgo, + }, + Trade{ // BTC_ETH trade (ETH is from issuer 2) + HorizonID: "hrzid3", + BaseAssetID: btcAsset.ID, + BaseAmount: 24.0, + CounterAssetID: ethAsset2.ID, + CounterAmount: 26.0, + Price: 0.92, + LedgerCloseTime: now, + }, + Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid2", + BaseAssetID: btcAsset.ID, + BaseAmount: 50.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 50.0, + Price: 1.0, + LedgerCloseTime: oneHourAgo, + }, + Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid4", + BaseAssetID: btcAsset.ID, + BaseAmount: 50.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 6.0, + Price: 0.12, + LedgerCloseTime: threeDaysAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + // Adding some orderbook stats: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset1.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH1 OrderbookStats + err = session.GetRaw(ctx, &obBTCETH1, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset2.ID, + NumBids: 1, + BidVolume: 0.1, + HighestBid: 20.0, + NumAsks: 1, + AskVolume: 15.0, + LowestAsk: 0.2, + Spread: 0.96, + SpreadMidPoint: 0.36, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH2 OrderbookStats + err = session.GetRaw(ctx, &obBTCETH2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + assert.NotEqual(t, obBTCETH1.ID, obBTCETH2.ID) + + partialMkts, err := session.RetrievePartialMarkets(ctx, + nil, nil, nil, nil, 12, + ) + require.NoError(t, err) + assert.Equal(t, 2, len(partialMkts)) + + // Mapping the retrieved markets: + var btceth1Mkt, btceth2Mkt PartialMarket + for _, mkt := range partialMkts { + if mkt.CounterAssetIssuer == issuer1PK { + btceth1Mkt = mkt + } + + if mkt.CounterAssetIssuer == issuer2PK { + btceth2Mkt = mkt + } + } + tradePair1 := fmt.Sprintf("BTC:%s / ETH:%s", issuer1PK, issuer1PK) + tradePair2 := fmt.Sprintf("BTC:%s / ETH:%s", issuer1PK, issuer2PK) + + assert.Equal(t, tradePair1, btceth1Mkt.TradePairName) + assert.Equal(t, tradePair2, btceth2Mkt.TradePairName) + + // Validating the aggregated data + assert.Equal(t, 150.0, btceth1Mkt.BaseVolume) + assert.Equal(t, 60.0, btceth1Mkt.CounterVolume) + assert.Equal(t, int32(2), btceth1Mkt.TradeCount) + assert.Equal(t, 1.0, btceth1Mkt.Open) + assert.Equal(t, 0.1, btceth1Mkt.Close) + assert.Equal(t, -0.9, btceth1Mkt.Change) + assert.Equal(t, 1.0, btceth1Mkt.High) + assert.Equal(t, 0.1, btceth1Mkt.Low) + assert.WithinDuration(t, oneHourAgo.Local(), btceth1Mkt.FirstLedgerCloseTime.Local(), 10*time.Millisecond) + assert.WithinDuration(t, tenMinutesAgo.Local(), btceth1Mkt.LastLedgerCloseTime.Local(), 10*time.Millisecond) + assert.Equal(t, 24.0, btceth2Mkt.BaseVolume) + assert.Equal(t, 26.0, btceth2Mkt.CounterVolume) + assert.Equal(t, int32(1), btceth2Mkt.TradeCount) + assert.Equal(t, 0.92, btceth2Mkt.Open) + assert.Equal(t, 0.92, btceth2Mkt.Close) + assert.Equal(t, 0.0, btceth2Mkt.Change) + assert.Equal(t, 0.92, btceth2Mkt.High) + assert.Equal(t, 0.92, btceth2Mkt.Low) + assert.WithinDuration(t, now.Local(), btceth2Mkt.FirstLedgerCloseTime.Local(), 10*time.Millisecond) + assert.WithinDuration(t, now.Local(), btceth2Mkt.LastLedgerCloseTime.Local(), 10*time.Millisecond) + + // Analyzing non-aggregated orderbook data + assert.Equal(t, 15, btceth1Mkt.NumBids) + assert.Equal(t, 0.15, btceth1Mkt.BidVolume) + assert.Equal(t, 200.0, btceth1Mkt.HighestBid) + assert.Equal(t, 17, btceth1Mkt.NumAsks) + assert.Equal(t, 30.0, btceth1Mkt.AskVolume) + assert.Equal(t, 0.1, btceth1Mkt.LowestAsk) + + assert.Equal(t, 1, btceth2Mkt.NumBids) + assert.Equal(t, 0.1, btceth2Mkt.BidVolume) + assert.Equal(t, 20.0, btceth2Mkt.HighestBid) + assert.Equal(t, 1, btceth2Mkt.NumAsks) + assert.Equal(t, 15.0, btceth2Mkt.AskVolume) + assert.Equal(t, 0.2, btceth2Mkt.LowestAsk) + + // Now let's use the same data, but aggregating by asset pair + partialAggMkts, err := session.RetrievePartialAggMarkets(ctx, nil, 12) + require.NoError(t, err) + assert.Equal(t, 1, len(partialAggMkts)) + + partialAggMkt := partialAggMkts[0] + + assert.Equal(t, "BTC_ETH", partialAggMkt.TradePairName) + assert.Equal(t, 174.0, partialAggMkt.BaseVolume) + assert.Equal(t, 86.0, partialAggMkt.CounterVolume) + assert.Equal(t, int32(3), partialAggMkt.TradeCount) + assert.Equal(t, 1.0, partialAggMkt.Open) + assert.Equal(t, 0.92, partialAggMkt.Close) + assert.Equal(t, 1.0, partialAggMkt.High) + assert.Equal(t, 0.1, partialAggMkt.Low) + assert.WithinDuration(t, oneHourAgo.Local(), partialAggMkt.FirstLedgerCloseTime.Local(), 10*time.Millisecond) + assert.WithinDuration(t, now.Local(), partialAggMkt.LastLedgerCloseTime.Local(), 10*time.Millisecond) + + // There might be some floating point rounding issues, so this test + // needs to be a bit more flexible. Since the change is 0.08, an error + // around 0.0000000000001 is acceptable: + priceDiff := math.Abs(-0.08 - partialAggMkt.Change) + assert.True(t, priceDiff < 0.0000000000001) + + // Validate the pair name parsing: + pairName := new(string) + *pairName = "BTC_ETH" + partialAggMkts, err = session.RetrievePartialAggMarkets(ctx, pairName, 12) + require.NoError(t, err) + assert.Equal(t, 1, len(partialAggMkts)) + assert.Equal(t, int32(3), partialAggMkts[0].TradeCount) + + // Analyzing aggregated orderbook data: + assert.Equal(t, 16, partialAggMkt.NumBids) + assert.Equal(t, 0.25, partialAggMkt.BidVolume) + assert.Equal(t, 200.0, partialAggMkt.HighestBid) + assert.Equal(t, 18, partialAggMkt.NumAsks) + assert.Equal(t, 45.0, partialAggMkt.AskVolume) + assert.Equal(t, 0.1, partialAggMkt.LowestAsk) +} + +func Test24hStatsFallback(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var xlmAsset Asset + err = session.GetRaw(ctx, &xlmAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var btcAsset Asset + err = session.GetRaw(ctx, &btcAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // A few times to be used: + now := time.Now() + twoDaysAgo := now.AddDate(0, 0, -3) + threeDaysAgo := now.AddDate(0, 0, -3) + + // Now let's create the trades: + trades := []Trade{ + Trade{ + HorizonID: "hrzid1", + BaseAssetID: xlmAsset.ID, + BaseAmount: 1.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 1.0, + Price: 0.5, // close price & lowest price + LedgerCloseTime: twoDaysAgo, + }, + Trade{ // BTC_ETH trade (ETH is from issuer 2) + HorizonID: "hrzid2", + BaseAssetID: xlmAsset.ID, + BaseAmount: 1.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 1.0, + Price: 1.0, // open price & highest price + LedgerCloseTime: threeDaysAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + markets, err := session.RetrieveMarketData(ctx) + require.NoError(t, err) + assert.Equal(t, 1, len(markets)) + mkt := markets[0] + + // When there are no 24h data, 24h OHLC should fallback to the 7d close value + assert.Equal(t, 0.5, mkt.LastPrice) + assert.Equal(t, 0.5, mkt.LowestPrice24h) + assert.Equal(t, 0.5, mkt.OpenPrice24h) + assert.Equal(t, 0.5, mkt.HighestPrice24h) +} + +func TestPreferAnchorAssetCode(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var xlmAsset Asset + err = session.GetRaw(ctx, &xlmAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "EURT", + IssuerID: issuer.ID, + IsValid: true, + AnchorAssetCode: "EUR", + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var btcAsset Asset + err = session.GetRaw(ctx, &btcAsset, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // A few times to be used: + now := time.Now() + twoDaysAgo := now.AddDate(0, 0, -3) + threeDaysAgo := now.AddDate(0, 0, -3) + + // Now let's create the trades: + trades := []Trade{ + Trade{ + HorizonID: "hrzid1", + BaseAssetID: xlmAsset.ID, + BaseAmount: 1.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 1.0, + Price: 0.5, // close price & lowest price + LedgerCloseTime: twoDaysAgo, + }, + Trade{ // BTC_ETH trade (ETH is from issuer 2) + HorizonID: "hrzid2", + BaseAssetID: xlmAsset.ID, + BaseAmount: 1.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 1.0, + Price: 1.0, // open price & highest price + LedgerCloseTime: threeDaysAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + markets, err := session.RetrieveMarketData(ctx) + require.NoError(t, err) + require.Equal(t, 1, len(markets)) + for _, mkt := range markets { + require.Equal(t, "XLM_EUR", mkt.TradePair) + } + + partialAggMkts, err := session.RetrievePartialAggMarkets(ctx, nil, 168) + require.NoError(t, err) + assert.Equal(t, 1, len(partialAggMkts)) + for _, aggMkt := range partialAggMkts { + require.Equal(t, "XLM_EUR", aggMkt.TradePairName) + } +} diff --git a/services/ticker/internal/tickerdb/queries_orderbook.go b/services/ticker/internal/tickerdb/queries_orderbook.go new file mode 100644 index 0000000000..9b8bc5c855 --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_orderbook.go @@ -0,0 +1,11 @@ +package tickerdb + +import ( + "context" +) + +// InsertOrUpdateOrderbookStats inserts an OrdebookStats entry on the database (if new), +// or updates an existing one +func (s *TickerSession) InsertOrUpdateOrderbookStats(ctx context.Context, o *OrderbookStats, preserveFields []string) (err error) { + return s.performUpsertQuery(ctx, *o, "orderbook_stats", "orderbook_stats_base_counter_asset_key", preserveFields) +} diff --git a/services/ticker/internal/tickerdb/queries_orderbook_test.go b/services/ticker/internal/tickerdb/queries_orderbook_test.go new file mode 100644 index 0000000000..b3c9344f0b --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_orderbook_test.go @@ -0,0 +1,181 @@ +package tickerdb + +import ( + "context" + "testing" + "time" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsertOrUpdateOrderbokStats(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + publicKey := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + issuerAccount := "AM2FQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + name := "FOO BAR" + code := "XLM" + + // Adding a seed issuer to be used later: + issuer := Issuer{ + PublicKey: publicKey, + Name: name, + } + tbl := session.GetTable("issuers") + _, err = tbl.Insert(issuer).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var dbIssuer Issuer + err = session.GetRaw(ctx, &dbIssuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating first asset: + firstTime := time.Now() + a := Asset{ + Code: code, + IssuerAccount: issuerAccount, + IssuerID: dbIssuer.ID, + LastValid: firstTime, + LastChecked: firstTime, + } + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset1 Asset + err = session.GetRaw(ctx, &dbAsset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, code, dbAsset1.Code) + assert.Equal(t, issuerAccount, dbAsset1.IssuerAccount) + assert.Equal(t, dbIssuer.ID, dbAsset1.IssuerID) + assert.WithinDuration(t, firstTime.Local(), dbAsset1.LastValid.Local(), 10*time.Millisecond) + assert.WithinDuration(t, firstTime.Local(), dbAsset1.LastChecked.Local(), 10*time.Millisecond) + + // Creating Seconde Asset: + secondTime := time.Now() + a.LastValid = secondTime + a.LastChecked = secondTime + err = session.InsertOrUpdateAsset(ctx, &a, []string{"code", "issuer_account", "issuer_id"}) + require.NoError(t, err) + + var dbAsset2 Asset + err = session.GetRaw(ctx, &dbAsset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Creating an orderbook_stats entry: + obTime := time.Now() + orderbookStats := OrderbookStats{ + BaseAssetID: dbAsset1.ID, + CounterAssetID: dbAsset2.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var dbOS OrderbookStats + err = session.GetRaw(ctx, &dbOS, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, dbAsset1.ID, dbOS.BaseAssetID) + assert.Equal(t, dbAsset2.ID, dbOS.CounterAssetID) + assert.Equal(t, 15, dbOS.NumBids) + assert.Equal(t, 0.15, dbOS.BidVolume) + assert.Equal(t, 200.0, dbOS.HighestBid) + assert.Equal(t, 17, dbOS.NumAsks) + assert.Equal(t, 30.0, dbOS.AskVolume) + assert.Equal(t, 0.1, dbOS.LowestAsk) + assert.Equal(t, 0.93, dbOS.Spread) + assert.Equal(t, 0.35, dbOS.SpreadMidPoint) + assert.WithinDuration(t, obTime.Local(), dbOS.UpdatedAt.Local(), 10*time.Millisecond) + + // Making sure we're upserting: + obTime2 := time.Now() + orderbookStats2 := OrderbookStats{ + BaseAssetID: dbAsset1.ID, + CounterAssetID: dbAsset2.ID, + NumBids: 30, + BidVolume: 0.3, + HighestBid: 400.0, + NumAsks: 34, + AskVolume: 60.0, + LowestAsk: 0.2, + Spread: 1.86, + SpreadMidPoint: 0.7, + UpdatedAt: obTime2, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats2, + []string{"base_asset_id", "counter_asset_id", "lowest_ask"}, + ) + require.NoError(t, err) + + var dbOS2 OrderbookStats + err = session.GetRaw(ctx, &dbOS2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + assert.Equal(t, dbOS2.ID, dbOS.ID) // shouldn't create another instance + + assert.Equal(t, dbAsset1.ID, dbOS2.BaseAssetID) + assert.Equal(t, dbAsset2.ID, dbOS2.CounterAssetID) + assert.Equal(t, 30, dbOS2.NumBids) + assert.Equal(t, 0.3, dbOS2.BidVolume) + assert.Equal(t, 400.0, dbOS2.HighestBid) + assert.Equal(t, 34, dbOS2.NumAsks) + assert.Equal(t, 60.0, dbOS2.AskVolume) + assert.Equal(t, 0.1, dbOS2.LowestAsk) // should keep the old value, since on preserveFields + assert.Equal(t, 1.86, dbOS2.Spread) + assert.Equal(t, 0.7, dbOS2.SpreadMidPoint) + assert.WithinDuration(t, obTime2.Local(), dbOS2.UpdatedAt.Local(), 10*time.Millisecond) +} diff --git a/services/ticker/internal/tickerdb/queries_trade.go b/services/ticker/internal/tickerdb/queries_trade.go new file mode 100644 index 0000000000..8a5cc5db70 --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_trade.go @@ -0,0 +1,89 @@ +package tickerdb + +import ( + "context" + "math" + "strings" + "time" +) + +// BulkInsertTrades inserts a slice of trades in the database. Trades +// that are already in the database (i.e. horizon_id already exists) +// are ignored. +func (s *TickerSession) BulkInsertTrades(ctx context.Context, trades []Trade) (err error) { + if len(trades) <= 50 { + return performInsertTrades(ctx, s, trades) + } + + chunks := chunkifyDBTrades(trades, 50) + for _, chunk := range chunks { + err = performInsertTrades(ctx, s, chunk) + if err != nil { + return + } + } + + return +} + +// GetLastTrade returns the newest Trade object in the database. +func (s *TickerSession) GetLastTrade(ctx context.Context) (trade Trade, err error) { + err = s.GetRaw(ctx, &trade, "SELECT * FROM trades ORDER BY ledger_close_time DESC LIMIT 1") + return +} + +// DeleteOldTrades deletes trades in the database older than minDate. +func (s *TickerSession) DeleteOldTrades(ctx context.Context, minDate time.Time) error { + _, err := s.ExecRaw(ctx, "DELETE FROM trades WHERE ledger_close_time < ?", minDate) + return err +} + +// chunkifyDBTrades transforms a slice into a slice of chunks (also slices) of chunkSize +// e.g.: Chunkify([b, c, d, e, f], 2) = [[b c] [d e] [f]] +func chunkifyDBTrades(sl []Trade, chunkSize int) [][]Trade { + var chunkedSlice [][]Trade + + numChunks := int(math.Ceil(float64(len(sl)) / float64(chunkSize))) + start := 0 + length := len(sl) + + for i := 0; i < numChunks; i++ { + end := start + chunkSize + + if end > length { + end = length + } + chunk := sl[start:end] + chunkedSlice = append(chunkedSlice, chunk) + start = end + + } + + return chunkedSlice +} + +func performInsertTrades(ctx context.Context, s *TickerSession, trades []Trade) (err error) { + var t Trade + var placeholders string + var dbValues []interface{} + + dbFields := getDBFieldTags(t, true) + dbFieldsString := strings.Join(dbFields, ", ") + + for i, trade := range trades { + v := getDBFieldValues(trade, true) + placeholders += "(" + generatePlaceholders(v) + ")" + dbValues = append(dbValues, v...) + + if i != len(trades)-1 { + placeholders += "," + } + } + + qs := "INSERT INTO trades (" + dbFieldsString + ")" + qs += " VALUES " + placeholders + qs += " ON CONFLICT ON CONSTRAINT trades_horizon_id_key DO NOTHING;" + + _, err = s.ExecRaw(ctx, qs, dbValues...) + return +} diff --git a/services/ticker/internal/tickerdb/queries_trade_test.go b/services/ticker/internal/tickerdb/queries_trade_test.go new file mode 100644 index 0000000000..51b67dff8c --- /dev/null +++ b/services/ticker/internal/tickerdb/queries_trade_test.go @@ -0,0 +1,353 @@ +package tickerdb + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + _ "github.com/lib/pq" + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/require" +) + +func TestBulkInsertTrades(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset1 Asset + err = session.GetRaw(ctx, &asset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset2 Asset + err = session.GetRaw(ctx, &asset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Verify that we actually have two assets: + assert.NotEqual(t, asset1.ID, asset2.ID) + + // Now let's create the trades: + trades := []Trade{ + Trade{ + HorizonID: "hrzid1", + BaseAssetID: asset1.ID, + CounterAssetID: asset2.ID, + LedgerCloseTime: time.Now(), + }, + Trade{ + HorizonID: "hrzid2", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: time.Now(), + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + // Ensure only two were created: + rows, err := session.QueryRaw(ctx, "SELECT * FROM trades") + require.NoError(t, err) + rowsCount := 0 + for rows.Next() { + rowsCount++ + } + assert.Equal(t, 2, rowsCount) + + // Re-insert the same trades and check if count remains = 2: + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + rows, err = session.QueryRaw(ctx, "SELECT * FROM trades") + require.NoError(t, err) + rowsCount2 := 0 + for rows.Next() { + rowsCount2++ + } + assert.Equal(t, 2, rowsCount2) +} + +func TestGetLastTrade(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Sanity Check (there are no trades in the database) + _, err = session.GetLastTrade(ctx) + require.Error(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset1 Asset + err = session.GetRaw(ctx, &asset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset2 Asset + err = session.GetRaw(ctx, &asset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Verify that we actually have two assets: + assert.NotEqual(t, asset1.ID, asset2.ID) + + now := time.Now() + oneYearBefore := now.AddDate(-1, 0, 0) + + // Now let's create the trades: + trades := []Trade{ + Trade{ + HorizonID: "hrzid2", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: oneYearBefore, + }, + Trade{ + HorizonID: "hrzid1", + BaseAssetID: asset1.ID, + CounterAssetID: asset2.ID, + LedgerCloseTime: now, + }, + Trade{ + HorizonID: "hrzid2", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: oneYearBefore, + }, + } + + // Re-insert the same trades and check if count remains = 2: + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + lastTrade, err := session.GetLastTrade(ctx) + require.NoError(t, err) + assert.WithinDuration(t, now.Local(), lastTrade.LedgerCloseTime.Local(), 10*time.Millisecond) +} + +func TestDeleteOldTrades(t *testing.T) { + db := dbtest.Postgres(t) + defer db.Close() + + var session TickerSession + session.DB = db.Open() + ctx := context.Background() + defer session.DB.Close() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: "./migrations", + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + tbl := session.GetTable("issuers") + _, err = tbl.Insert(Issuer{ + PublicKey: "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB", + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + var issuer Issuer + err = session.GetRaw(ctx, &issuer, ` + SELECT * + FROM issuers + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "XLM", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset1 Asset + err = session.GetRaw(ctx, &asset1, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &Asset{ + Code: "BTC", + IssuerID: issuer.ID, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var asset2 Asset + err = session.GetRaw(ctx, &asset2, ` + SELECT * + FROM assets + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Verify that we actually have two assets: + assert.NotEqual(t, asset1.ID, asset2.ID) + + // Setting up some times for testing + now := time.Now() + oneDayAgo := now.AddDate(0, 0, -1) + oneMonthAgo := now.AddDate(0, -1, 0) + oneYearAgo := now.AddDate(-1, 0, 0) + + // Now let's create the trades: + trades := []Trade{ + Trade{ + HorizonID: "hrzid1", + BaseAssetID: asset1.ID, + CounterAssetID: asset2.ID, + LedgerCloseTime: now, + }, + Trade{ + HorizonID: "hrzid2", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: oneDayAgo, + }, + Trade{ + HorizonID: "hrzid3", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: oneMonthAgo, + }, + Trade{ + HorizonID: "hrzid4", + BaseAssetID: asset2.ID, + CounterAssetID: asset1.ID, + LedgerCloseTime: oneYearAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + // Deleting trades older than 1 day ago: + err = session.DeleteOldTrades(ctx, oneDayAgo) + require.NoError(t, err) + + var dbTrades []Trade + var trade1, trade2 Trade + err = session.SelectRaw(ctx, &dbTrades, "SELECT * FROM trades") + require.NoError(t, err) + assert.Equal(t, 2, len(dbTrades)) + + // Make sure we're actually deleting the entries we wanted: + for i, trade := range dbTrades { + if trade.HorizonID == "hrzid1" { + trade1 = dbTrades[i] + } + + if trade.HorizonID == "hrzid2" { + trade2 = dbTrades[i] + } + } + + assert.NotEqual(t, trade1.HorizonID, "") + assert.NotEqual(t, trade2.HorizonID, "") + assert.WithinDuration(t, now.Local(), trade1.LedgerCloseTime.Local(), 10*time.Millisecond) + assert.WithinDuration(t, oneDayAgo.Local(), trade2.LedgerCloseTime.Local(), 10*time.Millisecond) +} diff --git a/services/ticker/internal/tickerdb/tickerdbtest/tickerdbtest.go b/services/ticker/internal/tickerdb/tickerdbtest/tickerdbtest.go new file mode 100644 index 0000000000..34bebb6602 --- /dev/null +++ b/services/ticker/internal/tickerdb/tickerdbtest/tickerdbtest.go @@ -0,0 +1,330 @@ +package tickerdbtest + +import ( + "context" + "testing" + "time" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/services/ticker/internal/tickerdb" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// SetupTickerTestSession sets up the database for testing the GraphQL endpoints +// and associated query logic. +func SetupTickerTestSession(t *testing.T, migrationsDir string) (session tickerdb.TickerSession) { + db := dbtest.Postgres(t) + session.DB = db.Open() + ctx := context.Background() + + // Run migrations to make sure the tests are run + // on the most updated schema version + migrations := &migrate.FileMigrationSource{ + Dir: migrationsDir, + } + _, err := migrate.Exec(session.DB.DB, "postgres", migrations, migrate.Up) + require.NoError(t, err) + + // Adding a seed issuer to be used later: + issuer1PK := "GCF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + tbl := session.GetTable("issuers") + _, err = tbl.Insert(tickerdb.Issuer{ + PublicKey: issuer1PK, + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + + var issuer1 tickerdb.Issuer + err = session.GetRaw(ctx, &issuer1, ` + SELECT * + FROM issuers + WHERE public_key = ?`, + issuer1PK, + ) + require.NoError(t, err) + + // Adding another issuer to be used later: + issuer2PK := "ABF3TQXKZJNFJK7HCMNE2O2CUNKCJH2Y2ROISTBPLC7C5EIA5NNG2XZB" + _, err = tbl.Insert(tickerdb.Issuer{ + PublicKey: issuer2PK, + Name: "FOO BAR", + }).IgnoreCols("id").Exec(ctx) + require.NoError(t, err) + + var issuer2 tickerdb.Issuer + err = session.GetRaw(ctx, &issuer2, ` + SELECT * + FROM issuers + WHERE public_key = ?`, + issuer2PK, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &tickerdb.Asset{ + Code: "ETH", + IssuerAccount: issuer1PK, + IssuerID: issuer1.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + var ethAsset1 tickerdb.Asset + err = session.GetRaw(ctx, ðAsset1, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "ETH", + issuer1PK, + ) + require.NoError(t, err) + + // Adding a seed asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &tickerdb.Asset{ + Code: "ETH", + IssuerAccount: issuer2PK, + IssuerID: issuer2.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + + var ethAsset2 tickerdb.Asset + err = session.GetRaw(ctx, ðAsset2, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "ETH", + issuer2PK, + ) + require.NoError(t, err) + + // Adding another asset to be used later: + err = session.InsertOrUpdateAsset(ctx, &tickerdb.Asset{ + Code: "BTC", + IssuerAccount: issuer1PK, + IssuerID: issuer1.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + + var btcAsset tickerdb.Asset + err = session.GetRaw(ctx, &btcAsset, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "BTC", + issuer1PK, + ) + require.NoError(t, err) + + // A few times to be used: + now := time.Now() + tenMinutesAgo := now.Add(-10 * time.Minute) + oneHourAgo := now.Add(-1 * time.Hour) + threeDaysAgo := now.AddDate(0, 0, -3) + + // Now let's create the trades: + trades := []tickerdb.Trade{ + tickerdb.Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid1", + BaseAssetID: btcAsset.ID, + BaseAmount: 100.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 10.0, + Price: 0.1, + LedgerCloseTime: tenMinutesAgo, + }, + tickerdb.Trade{ // BTC_ETH trade (ETH is from issuer 2) + HorizonID: "hrzid3", + BaseAssetID: btcAsset.ID, + BaseAmount: 24.0, + CounterAssetID: ethAsset2.ID, + CounterAmount: 26.0, + Price: 0.92, + LedgerCloseTime: now, + }, + tickerdb.Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid2", + BaseAssetID: btcAsset.ID, + BaseAmount: 50.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 50.0, + Price: 1.0, + LedgerCloseTime: oneHourAgo, + }, + tickerdb.Trade{ // BTC_ETH trade (ETH is from issuer 1) + HorizonID: "hrzid4", + BaseAssetID: btcAsset.ID, + BaseAmount: 50.0, + CounterAssetID: ethAsset1.ID, + CounterAmount: 6.0, + Price: 0.12, + LedgerCloseTime: threeDaysAgo, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + // Adding some orderbook stats: + obTime := time.Now() + orderbookStats := tickerdb.OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset1.ID, + NumBids: 15, + BidVolume: 0.15, + HighestBid: 200.0, + NumAsks: 17, + AskVolume: 30.0, + LowestAsk: 0.1, + Spread: 0.93, + SpreadMidPoint: 0.35, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH1 tickerdb.OrderbookStats + err = session.GetRaw(ctx, &obBTCETH1, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = tickerdb.OrderbookStats{ + BaseAssetID: ethAsset1.ID, + CounterAssetID: btcAsset.ID, + NumBids: 10, + BidVolume: 0.90, + HighestBid: 100.0, + NumAsks: 12, + AskVolume: 25.0, + LowestAsk: 0.2, + Spread: 0.55, + SpreadMidPoint: 0.85, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obETH1BTC tickerdb.OrderbookStats + err = session.GetRaw(ctx, &obETH1BTC, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + orderbookStats = tickerdb.OrderbookStats{ + BaseAssetID: btcAsset.ID, + CounterAssetID: ethAsset2.ID, + NumBids: 1, + BidVolume: 0.1, + HighestBid: 20.0, + NumAsks: 1, + AskVolume: 15.0, + LowestAsk: 0.2, + Spread: 0.96, + SpreadMidPoint: 0.36, + UpdatedAt: obTime, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obBTCETH2 tickerdb.OrderbookStats + err = session.GetRaw(ctx, &obBTCETH2, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + assert.NotEqual(t, obBTCETH1.ID, obBTCETH2.ID) + + orderbookStats = tickerdb.OrderbookStats{ + BaseAssetID: ethAsset2.ID, + CounterAssetID: btcAsset.ID, + NumBids: 20, + BidVolume: 0.60, + HighestBid: 300.0, + NumAsks: 20, + AskVolume: 256.0, + LowestAsk: 0.70, + Spread: 150.0, + SpreadMidPoint: 200.0, + } + err = session.InsertOrUpdateOrderbookStats(ctx, + &orderbookStats, + []string{"base_asset_id", "counter_asset_id"}, + ) + require.NoError(t, err) + + var obETH2BTC tickerdb.OrderbookStats + err = session.GetRaw(ctx, &obETH2BTC, ` + SELECT * + FROM orderbook_stats + ORDER BY id DESC + LIMIT 1`, + ) + require.NoError(t, err) + + // Add an XLM asset. + err = session.InsertOrUpdateAsset(ctx, &tickerdb.Asset{ + Code: "XLM", + IssuerAccount: issuer1PK, + IssuerID: issuer1.ID, + IsValid: true, + }, []string{"code", "issuer_id"}) + require.NoError(t, err) + + var xlmAsset tickerdb.Asset + err = session.GetRaw(ctx, &xlmAsset, ` + SELECT * + FROM assets + WHERE code = ? + AND issuer_account = ?`, + "XLM", + issuer1PK, + ) + require.NoError(t, err) + + // Add XLM/BTC trades. + trades = []tickerdb.Trade{ + tickerdb.Trade{ + HorizonID: "hrzid5", + BaseAssetID: xlmAsset.ID, + BaseAmount: 10.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 10.0, + Price: 0.5, // close price & lowest price + LedgerCloseTime: tenMinutesAgo, + }, + tickerdb.Trade{ + HorizonID: "hrzid6", + BaseAssetID: xlmAsset.ID, + BaseAmount: 10.0, + CounterAssetID: btcAsset.ID, + CounterAmount: 10.0, + Price: 1.0, // open price & highest price + LedgerCloseTime: now, + }, + } + err = session.BulkInsertTrades(ctx, trades) + require.NoError(t, err) + + return +} diff --git a/services/ticker/internal/utils/main.go b/services/ticker/internal/utils/main.go new file mode 100644 index 0000000000..3286037855 --- /dev/null +++ b/services/ticker/internal/utils/main.go @@ -0,0 +1,105 @@ +package utils + +import ( + "fmt" + "math/rand" + "os" + "time" + + hlog "github.com/stellar/go/support/log" +) + +// PanicIfError is an utility function that panics if err != nil +func PanicIfError(e error) { + if e != nil { + panic(e) + } +} + +// WriteJSONToFile atomically writes a json []byte dump to +// It ensures atomicity by first creating a tmp file (filename.tmp), writing +// the contents to it, then renaming it to the originally specified filename. +func WriteJSONToFile(jsonBytes []byte, filename string) (numBytes int, err error) { + tmp := fmt.Sprintf("%s.tmp", filename) + f, err := os.Create(tmp) + PanicIfError(err) + defer f.Close() + + numBytes, err = f.Write(jsonBytes) + if err != nil { + return + } + + err = f.Sync() + if err != nil { + return + } + + err = os.Rename(tmp, filename) + return +} + +// SliceDiff returns the elements in `a` that aren't in `b`. +func SliceDiff(a, b []string) (diff []string) { + bmap := map[string]bool{} + for _, x := range b { + bmap[x] = true + } + for _, x := range a { + if _, ok := bmap[x]; !ok { + diff = append(diff, x) + } + } + return +} + +// GetAssetString returns a string representation of an asset +func GetAssetString(assetType string, code string, issuer string) string { + if assetType == "native" { + return "native" + } + return fmt.Sprintf("%s:%s", code, issuer) +} + +// TimeToTimestamp converts a time.Time into a Unix epoch +func TimeToUnixEpoch(t time.Time) int64 { + return t.UnixNano() / 1000000 +} + +// TimeToRFC3339 converts a time.Time to a string in RFC3339 format +func TimeToRFC3339(t time.Time) string { + return t.Format(time.RFC3339) +} + +// CalcSpread calculates the spread stats for the given bidMax and askMin orderbook values +func CalcSpread(bidMax float64, askMin float64) (spread float64, midPoint float64) { + if askMin == 0 || bidMax == 0 { + return 0, 0 + } + spread = (askMin - bidMax) / askMin + midPoint = bidMax + spread/2.0 + return +} + +// Retry retries running a function that returns an error numRetries times, multiplying +// the sleep time by a factor of 2 each time it retries. +func Retry(numRetries int, delay time.Duration, logger *hlog.Entry, f func() error) error { + if err := f(); err != nil { + if numRetries--; numRetries > 0 { + jitter := time.Duration(rand.Int63n(int64(delay))) + delay = delay + jitter/2 + + logger.Infof("Backing off for %.3f seconds before retrying", delay.Seconds()) + + time.Sleep(delay) + return Retry(numRetries, 2*delay, logger, f) + } + return err + } + + return nil +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} diff --git a/services/ticker/internal/utils/main_test.go b/services/ticker/internal/utils/main_test.go new file mode 100644 index 0000000000..086734e030 --- /dev/null +++ b/services/ticker/internal/utils/main_test.go @@ -0,0 +1,18 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSliceDiff(t *testing.T) { + slice1 := []string{"a", "b", "c"} + slice2 := []string{"a", "b"} + + diff := SliceDiff(slice1, slice2) + assert.Contains(t, diff, "c") + assert.NotContains(t, diff, "a") + assert.NotContains(t, diff, "b") + assert.Equal(t, 1, len(diff)) +} diff --git a/services/ticker/main.go b/services/ticker/main.go new file mode 100644 index 0000000000..4e658e80a2 --- /dev/null +++ b/services/ticker/main.go @@ -0,0 +1,7 @@ +package main + +import "github.com/stellar/go/services/ticker/cmd" + +func main() { + cmd.Execute() +} diff --git a/staticcheck.sh b/staticcheck.sh new file mode 100755 index 0000000000..8bb3cece69 --- /dev/null +++ b/staticcheck.sh @@ -0,0 +1,21 @@ +#! /bin/bash +set -e + +# Check if staticcheck is installed, if not install it. +command -v staticcheck >/dev/null 2>&1 || ( + dir=$(mktemp -d) + pushd $dir + go mod init tool + go get honnef.co/go/tools/cmd/staticcheck@2020.1.4 + popd +) + +printf "Running staticcheck...\n" + +ls -d */ \ + | egrep -v '^vendor|^docs' \ + | xargs -I {} staticcheck -tests=false -checks="all,-ST1003,-SA1019,-ST1005,-ST1000,-ST1016,-S1039,-ST1021,-ST1020,-ST1019,-SA4022" ./{}... + + +# Check horizon for unused exported symbols (relying on the fact that it should be self-contained) +staticcheck -unused.whole-program -checks='U*' ./services/horizon/... diff --git a/strkey/benchmark_test.go b/strkey/benchmark_test.go new file mode 100644 index 0000000000..a22079719a --- /dev/null +++ b/strkey/benchmark_test.go @@ -0,0 +1,23 @@ +package strkey_test + +import ( + "testing" + + "github.com/stellar/go/strkey" + "github.com/stretchr/testify/require" +) + +func BenchmarkDecode_accountID(b *testing.B) { + accountID, err := strkey.Encode(strkey.VersionByteAccountID, make([]byte, 32)) + require.NoError(b, err) + for i := 0; i < b.N; i++ { + _, _ = strkey.Decode(strkey.VersionByteAccountID, accountID) + } +} + +func BenchmarkEncode_accountID(b *testing.B) { + accountID := make([]byte, 32) + for i := 0; i < b.N; i++ { + _, _ = strkey.Encode(strkey.VersionByteAccountID, accountID) + } +} diff --git a/strkey/decode_test.go b/strkey/decode_test.go index 699677a4c1..431a87d287 100644 --- a/strkey/decode_test.go +++ b/strkey/decode_test.go @@ -1,75 +1,195 @@ -package strkey_test +package strkey import ( - . "github.com/stellar/go/strkey" + "testing" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" ) -var _ = Describe("strkey.Decode", func() { - validAccount := "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5" - validAccountPayload := []byte{ - 0x36, 0x3e, 0xaa, 0x38, 0x67, 0x84, 0x1f, 0xba, - 0xd0, 0xf4, 0xed, 0x88, 0xc7, 0x79, 0xe4, 0xfe, - 0x66, 0xe5, 0x6a, 0x24, 0x70, 0xdc, 0x98, 0xc0, - 0xec, 0x9c, 0x07, 0x3d, 0x05, 0xc7, 0xb1, 0x03, +func TestDecode(t *testing.T) { + cases := []struct { + Name string + Address string + ExpectedVersionByte VersionByte + ExpectedPayload []byte + }{ + { + Name: "AccountID", + Address: "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5", + ExpectedVersionByte: VersionByteAccountID, + ExpectedPayload: []byte{ + 0x36, 0x3e, 0xaa, 0x38, 0x67, 0x84, 0x1f, 0xba, + 0xd0, 0xf4, 0xed, 0x88, 0xc7, 0x79, 0xe4, 0xfe, + 0x66, 0xe5, 0x6a, 0x24, 0x70, 0xdc, 0x98, 0xc0, + 0xec, 0x9c, 0x07, 0x3d, 0x05, 0xc7, 0xb1, 0x03, + }, + }, + { + Name: "MuxedAccount", + Address: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + ExpectedVersionByte: VersionByteMuxedAccount, + ExpectedPayload: []byte{ + 0x3f, 0x0c, 0x34, 0xbf, 0x93, 0xad, 0x0d, 0x99, + 0x71, 0xd0, 0x4c, 0xcc, 0x90, 0xf7, 0x05, 0x51, + 0x1c, 0x83, 0x8a, 0xad, 0x97, 0x34, 0xa4, 0xa2, + 0xfb, 0x0d, 0x7a, 0x03, 0xfc, 0x7f, 0xe8, 0x9a, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, + { + Name: "Seed", + Address: "SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR", + ExpectedVersionByte: VersionByteSeed, + ExpectedPayload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + }, + { + Name: "HashTx", + Address: "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + ExpectedVersionByte: VersionByteHashTx, + ExpectedPayload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + }, + { + Name: "HashX", + Address: "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + ExpectedVersionByte: VersionByteHashX, + ExpectedPayload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + }, } - validSeed := "SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR" - validSeedPayload := []byte{ - 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, - 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, - 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, - 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + + for _, kase := range cases { + payload, err := Decode(kase.ExpectedVersionByte, kase.Address) + if assert.NoError(t, err, "An error occured decoding case %s", kase.Name) { + assert.Equal(t, kase.ExpectedPayload, payload, "Output mismatch in case %s", kase.Name) + } } - It("decodes valid values", func() { - payload, err := Decode(VersionByteAccountID, validAccount) - Expect(err).To(BeNil()) - Expect(payload).To(Equal(validAccountPayload)) - - payload, err = Decode(VersionByteSeed, validSeed) - Expect(err).To(BeNil()) - Expect(payload).To(Equal(validSeedPayload)) - }) - - Context("the expected version byte doesn't match the actual version byte", func() { - It("fails", func() { - _, err := Decode(VersionByteAccountID, validSeed) - Expect(err).To(HaveOccurred()) - _, err = Decode(VersionByteSeed, validAccount) - Expect(err).To(HaveOccurred()) - }) - }) + // the expected version byte doesn't match the actual version byte + _, err := Decode(VersionByteSeed, cases[0].Address) + assert.Error(t, err) - Context("the expected version byte isn't a valid constant", func() { - It("fails", func() { - _, err := Decode(VersionByte(2), validAccount) - Expect(err).To(HaveOccurred()) - }) - }) + // invalid version byte + _, err = Decode(VersionByte(2), cases[0].Address) + assert.Error(t, err) - Context("the checksum has been corrupted", func() { - It("fails", func() { - corrupted := "GB" + validAccount[2:] - _, err := Decode(VersionByteAccountID, corrupted) - Expect(err).To(HaveOccurred()) - }) - }) + // empty input + _, err = Decode(VersionByteAccountID, "") + assert.Error(t, err) - Context("the payload has been corrupted", func() { - It("fails", func() { - corrupted := validAccount[0:len(validAccount)-2] + "Z5" - _, err := Decode(VersionByteAccountID, corrupted) - Expect(err).To(HaveOccurred()) - }) - }) + // corrupted checksum + _, err = Decode(VersionByteAccountID, "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHE55") + assert.Error(t, err) - Context("the input is empty", func() { - It("fails", func() { - _, err := Decode(VersionByteAccountID, "") - Expect(err).To(HaveOccurred()) - }) - }) + // corrupted payload + _, err = Decode(VersionByteAccountID, "GA3D5KRYM6CB7OWOOOORR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5") + assert.Error(t, err) + + // non-canonical representation due to extra character + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLKA") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused leftover character") + } -}) + // non-canonical representation due to leftover bits set to 1 (some of the test strkeys are too short for a muxed account + // but they comply with the test's purpose all the same) + + // 1 unused bit (length 69) + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLH") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUR") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // 4 unused bits (length 68) + + // 'B' is equivalent to 0b00001 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJB") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // 'C' is equivalent to 0b00010 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJC") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // 'E' is equivalent to 0b00100 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJE") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // 'I' is equivalent to 0b01000 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJI") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // '7' is equivalent to 0b11111 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJ7") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // '6' is equivalent to 0b11110 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJ6") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // '4' is equivalent to 0b11100 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJ4") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + // 'Y' is equivalent to 0b11000 + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJY") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "unused bits should be set to 0") + } + + // 'Q' is equivalent to 0b10000, so there should be no error + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJQ") + assert.NotContains(t, err.Error(), "unused bits should be set to 0") + + // Padding bytes are not allowed + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK===") + assert.Contains(t, err.Error(), "illegal base32 data") + + // Invalid algorithm (low 3 bits of version byte are 7) + _, err = Decode(VersionByteMuxedAccount, "M47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ") + assert.Contains(t, err.Error(), "invalid version byte") + + // Invalid checksum + _, err = Decode(VersionByteMuxedAccount, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUO") + assert.Contains(t, err.Error(), "invalid checksum") +} + +func TestMalformed(t *testing.T) { + // found by go-fuzz + crashers := []string{ + "\n\n5JY", + "UURL\xff\xff\xff\xff", + "\r\r222", + } + + for _, c := range crashers { + t.Run("crashers "+c, func(t *testing.T) { + assert.NotPanics(t, func() { + _, err := Decode(VersionByteAccountID, c) + assert.Error(t, err) + }) + }) + } +} diff --git a/strkey/encode_test.go b/strkey/encode_test.go index efd9a66cd2..77f15e4f9e 100644 --- a/strkey/encode_test.go +++ b/strkey/encode_test.go @@ -1,43 +1,95 @@ -package strkey_test +package strkey import ( - . "github.com/stellar/go/strkey" + "testing" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" ) -var _ = Describe("strkey.Encode", func() { - validAccount := "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5" - validAccountPayload := []byte{ - 0x36, 0x3e, 0xaa, 0x38, 0x67, 0x84, 0x1f, 0xba, - 0xd0, 0xf4, 0xed, 0x88, 0xc7, 0x79, 0xe4, 0xfe, - 0x66, 0xe5, 0x6a, 0x24, 0x70, 0xdc, 0x98, 0xc0, - 0xec, 0x9c, 0x07, 0x3d, 0x05, 0xc7, 0xb1, 0x03, +func TestEncode(t *testing.T) { + cases := []struct { + Name string + VersionByte VersionByte + Payload []byte + Expected string + }{ + { + Name: "AccountID", + VersionByte: VersionByteAccountID, + Payload: []byte{ + 0x36, 0x3e, 0xaa, 0x38, 0x67, 0x84, 0x1f, 0xba, + 0xd0, 0xf4, 0xed, 0x88, 0xc7, 0x79, 0xe4, 0xfe, + 0x66, 0xe5, 0x6a, 0x24, 0x70, 0xdc, 0x98, 0xc0, + 0xec, 0x9c, 0x07, 0x3d, 0x05, 0xc7, 0xb1, 0x03, + }, + Expected: "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5", + }, + { + Name: "MuxedAccount", + VersionByte: VersionByteMuxedAccount, + Payload: []byte{ + 0x3f, 0x0c, 0x34, 0xbf, 0x93, 0xad, 0x0d, 0x99, + 0x71, 0xd0, 0x4c, 0xcc, 0x90, 0xf7, 0x05, 0x51, + 0x1c, 0x83, 0x8a, 0xad, 0x97, 0x34, 0xa4, 0xa2, + 0xfb, 0x0d, 0x7a, 0x03, 0xfc, 0x7f, 0xe8, 0x9a, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + Expected: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + }, + { + Name: "Seed", + VersionByte: VersionByteSeed, + Payload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + Expected: "SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR", + }, + { + Name: "HashTx", + VersionByte: VersionByteHashTx, + Payload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + Expected: "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + }, + { + Name: "HashX", + VersionByte: VersionByteHashX, + Payload: []byte{ + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, + 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, + 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, + }, + Expected: "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + }, } - validSeed := "SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR" - validSeedPayload := []byte{ + + for _, kase := range cases { + actual, err := Encode(kase.VersionByte, kase.Payload) + if assert.NoError(t, err, "An error occured in case %s", kase.Name) { + assert.Equal(t, kase.Expected, actual, "Output mismatch in case %s", kase.Name) + } + } + + // test bad version byte + _, err := Encode(VersionByte(2), cases[0].Payload) + assert.Error(t, err) + + // test too long payload + _, err = Encode(VersionByteAccountID, []byte{ 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, 0x07, 0x98, 0xf6, 0xe1, 0xac, 0x65, 0xd0, 0x6c, 0x31, 0x62, 0x92, 0x90, 0x56, 0xbc, 0xf4, 0xcd, 0xb7, 0xd3, 0x73, 0x8d, 0x18, 0x55, 0xf3, 0x63, - } - - It("encodes valid values", func() { - payload, err := Encode(VersionByteAccountID, validAccountPayload) - Expect(err).To(BeNil()) - Expect(payload).To(Equal(validAccount)) - - payload, err = Encode(VersionByteSeed, validSeedPayload) - Expect(err).To(BeNil()) - Expect(payload).To(Equal(validSeed)) - }) - - Context("the expected version byte isn't a valid constant", func() { - It("fails", func() { - _, err := Encode(VersionByte(2), validAccountPayload) - Expect(err).To(HaveOccurred()) - }) + 0x69, 0xa8, 0xc4, 0xcb, 0xb9, 0xf6, 0x4e, 0x8a, + 0x01, }) - -}) + assert.EqualError(t, err, "data exceeds maximum payload size for strkey") +} diff --git a/crc16/main.go b/strkey/internal/crc16/main.go similarity index 94% rename from crc16/main.go rename to strkey/internal/crc16/main.go index 550b391e03..43be5c5629 100644 --- a/crc16/main.go +++ b/strkey/internal/crc16/main.go @@ -47,8 +47,6 @@ package crc16 import ( - "bytes" - "encoding/binary" "errors" ) @@ -92,29 +90,21 @@ var crc16tab = [256]uint16{ } // Checksum returns the 2-byte checksum for the provided data -func Checksum(data []byte) []byte { +func Checksum(data []byte) uint16 { var crc uint16 - var out bytes.Buffer for _, b := range data { crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF] } - - err := binary.Write(&out, binary.LittleEndian, crc) - if err != nil { - panic(err) - } - - return out.Bytes() + return crc } // Validate returns an error if the provided checksum does not match // the calculated checksum of the provided data -func Validate(data []byte, expected []byte) error { - +func Validate(data []byte, expected uint16) error { actual := Checksum(data) // validate the provided checksum against the calculated - if !bytes.Equal(actual, expected) { + if actual != expected { return ErrInvalidChecksum } diff --git a/strkey/internal/crc16/main_test.go b/strkey/internal/crc16/main_test.go new file mode 100644 index 0000000000..bfa1be0d60 --- /dev/null +++ b/strkey/internal/crc16/main_test.go @@ -0,0 +1,20 @@ +package crc16 + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestChecksum(t *testing.T) { + result := Checksum([]byte{0x12, 0x34, 0x56, 0x78, 0x90}) + assert.Equal(t, uint16(0x48e6), result) +} + +func TestValidate(t *testing.T) { + err := Validate([]byte{0x12, 0x34, 0x56, 0x78, 0x90}, 0x48e6) + assert.NoError(t, err) + + err = Validate([]byte{0x12, 0x34, 0x56, 0x78, 0x90}, 0x48e7) + assert.ErrorIs(t, err, ErrInvalidChecksum) +} diff --git a/strkey/main.go b/strkey/main.go index 79791137db..ee56fc237c 100644 --- a/strkey/main.go +++ b/strkey/main.go @@ -1,11 +1,11 @@ package strkey import ( - "bytes" "encoding/base32" "encoding/binary" + "fmt" - "github.com/stellar/go/crc16" + "github.com/stellar/go/strkey/internal/crc16" "github.com/stellar/go/support/errors" ) @@ -20,10 +20,62 @@ type VersionByte byte const ( //VersionByteAccountID is the version byte used for encoded stellar addresses VersionByteAccountID VersionByte = 6 << 3 // Base32-encodes to 'G...' + //VersionByteSeed is the version byte used for encoded stellar seed VersionByteSeed = 18 << 3 // Base32-encodes to 'S...' + + //VersionByteMuxedAccounts is the version byte used for encoded stellar multiplexed addresses + VersionByteMuxedAccount = 12 << 3 // Base32-encodes to 'M...' + + //VersionByteHashTx is the version byte used for encoded stellar hashTx + //signer keys. + VersionByteHashTx = 19 << 3 // Base32-encodes to 'T...' + + //VersionByteHashX is the version byte used for encoded stellar hashX + //signer keys. + VersionByteHashX = 23 << 3 // Base32-encodes to 'X...' ) +// maxPayloadSize is the maximum length of the payload for all versions. +const maxPayloadSize = 40 + +// maxRawSize is the maximum length of a strkey in its raw form not encoded. +const maxRawSize = 1 + maxPayloadSize + 2 + +// maxEncodedSize is the maximum length of a strkey when base32 encoded. +const maxEncodedSize = (maxRawSize*8 + 4) / 5 // (8n+4)/5 is the EncodedLen for no padding + +// encoding to use when encoding and decoding a strkey to and from strings. +var encoding = base32.StdEncoding.WithPadding(base32.NoPadding) + +// DecodeAny decodes the provided StrKey into a raw value, checking the checksum +// and if the version byte is one of allowed values. +func DecodeAny(src string) (VersionByte, []byte, error) { + raw, err := decodeString(src) + if err != nil { + return 0, nil, err + } + + // decode into components + version := VersionByte(raw[0]) + vp := raw[0 : len(raw)-2] + payload := raw[1 : len(raw)-2] + checksum := raw[len(raw)-2:] + + // ensure version byte is allowed + if err := checkValidVersionByte(version); err != nil { + return 0, nil, err + } + + // ensure checksum is valid + if err := crc16.Validate(vp, binary.LittleEndian.Uint16(checksum)); err != nil { + return 0, nil, err + } + + // if we made it through the gaunlet, return the decoded value + return version, payload, nil +} + // Decode decodes the provided StrKey into a raw value, checking the checksum // and ensuring the expected VersionByte (the version parameter) is the value // actually encoded into the provided src string. @@ -32,13 +84,14 @@ func Decode(expected VersionByte, src string) ([]byte, error) { return nil, err } - raw, err := base32.StdEncoding.DecodeString(src) + raw, err := decodeString(src) if err != nil { return nil, err } + // check length if len(raw) < 3 { - return nil, errors.Errorf("encoded value is %d bytes; minimum valid length is 3", len(raw)) + return nil, errors.New("decoded string is too short") } // decode into components @@ -53,7 +106,7 @@ func Decode(expected VersionByte, src string) ([]byte, error) { } // ensure checksum is valid - if err := crc16.Validate(vp, checksum); err != nil { + if err := crc16.Validate(vp, binary.LittleEndian.Uint16(checksum)); err != nil { return nil, err } @@ -77,26 +130,32 @@ func Encode(version VersionByte, src []byte) (string, error) { return "", err } - var raw bytes.Buffer + payloadSize := len(src) - // write version byte - if err := binary.Write(&raw, binary.LittleEndian, version); err != nil { - return "", err + // check src does not exceed maximum payload size + if payloadSize > maxPayloadSize { + return "", fmt.Errorf("data exceeds maximum payload size for strkey") } - // write payload - if _, err := raw.Write(src); err != nil { - return "", err - } + // pack + // 1 byte version + // src bytes + // 2 byte crc16 + rawArr := [maxRawSize]byte{} + rawSize := 1 + payloadSize + 2 + raw := rawArr[:rawSize] + raw[0] = byte(version) + copy(raw[1:], src) + crc := crc16.Checksum(raw[:1+payloadSize]) + binary.LittleEndian.PutUint16(raw[1+payloadSize:], crc) - // calculate and write checksum - checksum := crc16.Checksum(raw.Bytes()) - if _, err := raw.Write(checksum); err != nil { - return "", err - } + // base32 encode + encArr := [maxEncodedSize]byte{} + encSize := encoding.EncodedLen(rawSize) + enc := encArr[:encSize] + encoding.Encode(enc, raw) - result := base32.StdEncoding.EncodeToString(raw.Bytes()) - return result, nil + return string(enc), nil } // MustEncode is like Encode, but panics on error @@ -108,15 +167,112 @@ func MustEncode(version VersionByte, src []byte) string { return e } +// Version extracts and returns the version byte from the provided source +// string. +func Version(src string) (VersionByte, error) { + raw, err := decodeString(src) + if err != nil { + return VersionByte(0), err + } + + return VersionByte(raw[0]), nil +} + // checkValidVersionByte returns an error if the provided value // is not one of the defined valid version byte constants. func checkValidVersionByte(version VersionByte) error { - if version == VersionByteAccountID { + switch version { + case VersionByteAccountID, VersionByteMuxedAccount, VersionByteSeed, VersionByteHashTx, VersionByteHashX: return nil + default: + return ErrInvalidVersionByte } - if version == VersionByteSeed { - return nil +} + +var decodingTable = initDecodingTable() + +func initDecodingTable() [256]byte { + var localDecodingTable [256]byte + for i := range localDecodingTable { + localDecodingTable[i] = 0xff + } + for i, ch := range []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567") { + localDecodingTable[ch] = byte(i) + } + return localDecodingTable +} + +// decodeString decodes a base32 string into the raw bytes, and ensures it could +// potentially be strkey encoded (i.e. it has both a version byte and a +// checksum, neither of which are explicitly checked by this func) +func decodeString(src string) ([]byte, error) { + // operations on strings are expensive since it involves unicode parsing + // so, we use bytes from the beginning + srcBytes := []byte(src) + // The minimal binary decoded length is 3 bytes (version byte and 2-byte CRC) which, + // in unpadded base32 (since each character provides 5 bits) corresponds to ceiling(8*3/5) = 5 + if len(srcBytes) < 5 { + return nil, errors.Errorf("strkey is %d bytes long; minimum valid length is 5", len(srcBytes)) + } + // SEP23 enforces strkeys to be in canonical base32 representation. + // Go's decoder doesn't help us there, so we need to do it ourselves. + // 1. Make sure there is no full unused leftover byte at the end + // (i.e. there shouldn't be 5 or more leftover bits) + leftoverBits := (len(srcBytes) * 5) % 8 + if leftoverBits >= 5 { + return nil, errors.New("non-canonical strkey; unused leftover character") } + // 2. In the last byte of the strkey there may be leftover bits (4 at most, otherwise it would be a full byte, + // which we have for checked above). If there are any leftover bits, they should be set to 0 + if leftoverBits > 0 { + lastChar := srcBytes[len(srcBytes)-1] + decodedLastChar := decodingTable[lastChar] + if decodedLastChar == 0xff { + // The last character from the input wasn't in the expected input alphabet. + // Let's output an error matching the errors from the base32 decoder invocation below + return nil, errors.Wrap(base32.CorruptInputError(len(srcBytes)), "base32 decode failed") + } + leftoverBitsMask := byte(0x0f) >> (4 - leftoverBits) + if decodedLastChar&leftoverBitsMask != 0 { + return nil, errors.New("non-canonical strkey; unused bits should be set to 0") + } + } + n, err := base32.StdEncoding.WithPadding(base32.NoPadding).Decode(srcBytes, srcBytes) + if err != nil { + return nil, errors.Wrap(err, "base32 decode failed") + } + + return srcBytes[:n], nil +} + +// IsValidEd25519PublicKey validates a stellar public key +func IsValidEd25519PublicKey(i interface{}) bool { + enc, ok := i.(string) + + if !ok { + return false + } + + _, err := Decode(VersionByteAccountID, enc) + + return err == nil +} + +// IsValidMuxedAccountEd25519PublicKey validates a Stellar SEP-23 muxed address. +func IsValidMuxedAccountEd25519PublicKey(s string) bool { + _, err := Decode(VersionByteMuxedAccount, s) + return err == nil +} + +// IsValidEd25519SecretSeed validates a stellar secret key +func IsValidEd25519SecretSeed(i interface{}) bool { + enc, ok := i.(string) + + if !ok { + return false + } + + _, err := Decode(VersionByteSeed, enc) - return ErrInvalidVersionByte + return err == nil } diff --git a/strkey/main_test.go b/strkey/main_test.go new file mode 100644 index 0000000000..017d1de9a3 --- /dev/null +++ b/strkey/main_test.go @@ -0,0 +1,152 @@ +package strkey + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMaxEncodedSize(t *testing.T) { + assert.Equal(t, encoding.EncodedLen(maxRawSize), maxEncodedSize) +} + +func TestVersion(t *testing.T) { + cases := []struct { + Name string + Address string + ExpectedVersionByte VersionByte + }{ + { + Name: "AccountID", + Address: "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5", + ExpectedVersionByte: VersionByteAccountID, + }, + { + Name: "Seed", + Address: "SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR", + ExpectedVersionByte: VersionByteSeed, + }, + { + Name: "HashTx", + Address: "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + ExpectedVersionByte: VersionByteHashTx, + }, + { + Name: "HashX", + Address: "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + ExpectedVersionByte: VersionByteHashX, + }, + { + Name: "Other (0x60)", + Address: "MBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + ExpectedVersionByte: VersionByte(0x60), + }, + } + + for _, kase := range cases { + actual, err := Version(kase.Address) + if assert.NoError(t, err, "An error occured decoding case %s", kase.Name) { + assert.Equal(t, kase.ExpectedVersionByte, actual, "Output mismatch in case %s", kase.Name) + } + } +} + +func TestIsValidEd25519PublicKey(t *testing.T) { + validKey := "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7YFMTLB65PYM" + isValid := IsValidEd25519PublicKey(validKey) + assert.Equal(t, true, isValid) + + invalidKey := "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7Y" + isValid = IsValidEd25519PublicKey(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "" + isValid = IsValidEd25519PublicKey(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSGWRRVO" + isValid = IsValidEd25519PublicKey(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + isValid = IsValidEd25519PublicKey(invalidKey) + assert.False(t, isValid) +} + +func TestIsValidMuxedAccountEd25519PublicKey(t *testing.T) { + validKey := "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + isValid := IsValidMuxedAccountEd25519PublicKey(validKey) + assert.True(t, isValid) + + invalidKeys := []struct { + key string + reason string + }{ + { + key: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUR", + reason: "The unused trailing bit must be zero in the encoding of the last three bytes (24 bits) as five base-32 symbols (25 bits)", + }, + { + key: "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZA", + reason: "Invalid length (congruent to 1 mod 8)", + }, + { + key: "G47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVP2I", + reason: "Invalid algorithm (low 3 bits of version byte are 7)", + }, + { + key: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLKA", + reason: "Invalid length (congruent to 6 mod 8)", + }, + { + key: "M47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ", + reason: "Invalid algorithm (low 3 bits of version byte are 7)", + }, + { + key: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUK", + reason: "Padding bytes are not allowed", + }, + { + key: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUO", + reason: "Invalid checksum", + }, + { + key: "", + reason: "Invalid length (string is empty)", + }, + { + key: "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSGWRRVO", + reason: "Invalid key (this is a secret key)", + }, + { + key: "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7YFMTLB65PYM", + reason: "Invalid key (this is an Ed25519 G-address)", + }, + } + for _, invalidKey := range invalidKeys { + isValid = IsValidMuxedAccountEd25519PublicKey(invalidKey.key) + assert.False(t, isValid, invalidKey.reason) + } +} + +func TestIsValidEd25519SecretSeed(t *testing.T) { + validKey := "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSGWRRVO" + isValid := IsValidEd25519SecretSeed(validKey) + assert.Equal(t, true, isValid) + + invalidKey := "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSG" + isValid = IsValidEd25519SecretSeed(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "" + isValid = IsValidEd25519SecretSeed(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7YFMTLB65PYM" + isValid = IsValidEd25519SecretSeed(invalidKey) + assert.Equal(t, false, isValid) + + invalidKey = "MAQAA5L65LSYH7CQ3VTJ7F3HHLGCL3DSLAR2Y47263D56MNNGHSQSAAAAAAAAAAE2LP26" + isValid = IsValidEd25519SecretSeed(invalidKey) + assert.False(t, isValid) +} diff --git a/strkey/muxed_account.go b/strkey/muxed_account.go new file mode 100644 index 0000000000..beea0b46de --- /dev/null +++ b/strkey/muxed_account.go @@ -0,0 +1,87 @@ +package strkey + +import ( + "bytes" + "fmt" + + xdr "github.com/stellar/go-xdr/xdr3" + "github.com/stellar/go/support/errors" +) + +type MuxedAccount struct { + id uint64 + ed25519 [32]byte +} + +// SetID populates the muxed account ID. +func (m *MuxedAccount) SetID(id uint64) { + m.id = id +} + +// SetAccountID populates the muxed account G-address. +func (m *MuxedAccount) SetAccountID(address string) error { + raw, err := Decode(VersionByteAccountID, address) + if err != nil { + return errors.New("invalid ed25519 public key") + } + if len(raw) != 32 { + return fmt.Errorf("invalid binary length: %d", len(raw)) + } + + copy(m.ed25519[:], raw) + + return nil +} + +// ID returns the muxed account id according with the SEP-23 definition for +// multiplexed accounts. +func (m *MuxedAccount) ID() uint64 { + return m.id +} + +// AccountID returns the muxed account G-address according with the SEP-23 +// definition for multiplexed accounts. +func (m *MuxedAccount) AccountID() (string, error) { + return Encode(VersionByteAccountID, m.ed25519[:]) +} + +// Address returns the muxed account M-address according with the SEP-23 +// definition for multiplexed accounts. +func (m *MuxedAccount) Address() (string, error) { + if m.ed25519 == [32]byte{} { + return "", errors.New("muxed account has no ed25519 key") + } + + b := new(bytes.Buffer) + _, err := xdr.Marshal(b, m.id) + if err != nil { + return "", errors.Wrap(err, "marshaling muxed address id") + } + + raw := make([]byte, 0, 40) + raw = append(raw, m.ed25519[:]...) + raw = append(raw, b.Bytes()...) + + return Encode(VersionByteMuxedAccount, raw) +} + +// DecodeMuxedAccount receives a muxed account M-address and parses it into a +// MuxedAccount object containing an ed25519 address and an id. +func DecodeMuxedAccount(address string) (*MuxedAccount, error) { + raw, err := Decode(VersionByteMuxedAccount, address) + if err != nil { + return nil, errors.New("invalid muxed account") + } + if len(raw) != 40 { + return nil, errors.Errorf("invalid binary length: %d", len(raw)) + } + + var muxed MuxedAccount + copy(muxed.ed25519[:], raw[:32]) + _, err = xdr.Unmarshal(bytes.NewReader(raw[32:]), &muxed.id) + if err != nil { + return nil, errors.Wrap(err, "can't marshall binary") + } + + return &muxed, nil +} diff --git a/strkey/muxed_account_test.go b/strkey/muxed_account_test.go new file mode 100644 index 0000000000..3a84b779f7 --- /dev/null +++ b/strkey/muxed_account_test.go @@ -0,0 +1,88 @@ +package strkey + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMuxedAccount_ID(t *testing.T) { + muxed := MuxedAccount{} + assert.Equal(t, uint64(0), muxed.ID()) + + muxed = MuxedAccount{id: uint64(9223372036854775808)} + assert.Equal(t, uint64(9223372036854775808), muxed.ID()) +} + +func TestMuxedAccount_SetID(t *testing.T) { + muxed := MuxedAccount{} + muxed.SetID(123) + assert.Equal(t, uint64(123), muxed.ID()) + + muxed.SetID(456) + assert.Equal(t, uint64(456), muxed.ID()) +} + +func TestMuxedAccount_AccountID(t *testing.T) { + muxed := MuxedAccount{} + publicKey, err := muxed.AccountID() + assert.NoError(t, err) + assert.Equal(t, "GAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWHF", publicKey) + + muxed = MuxedAccount{ed25519: [32]byte{63, 12, 52, 191, 147, 173, 13, 153, 113, 208, 76, 204, 144, 247, 5, 81, 28, 131, 138, 173, 151, 52, 164, 162, 251, 13, 122, 3, 252, 127, 232, 154}} + publicKey, err = muxed.AccountID() + assert.NoError(t, err) + assert.Equal(t, "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", publicKey) +} + +func TestMuxedAccount_SetAccountID(t *testing.T) { + muxed := MuxedAccount{} + err := muxed.SetAccountID("") + assert.EqualError(t, err, "invalid ed25519 public key") + + err = muxed.SetAccountID("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZ") + assert.EqualError(t, err, "invalid ed25519 public key") + + err = muxed.SetAccountID("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ") + assert.NoError(t, err) + publicKey, err := muxed.AccountID() + assert.NoError(t, err) + assert.Equal(t, "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", publicKey) + wantMuxed := MuxedAccount{ed25519: [32]byte{63, 12, 52, 191, 147, 173, 13, 153, 113, 208, 76, 204, 144, 247, 5, 81, 28, 131, 138, 173, 151, 52, 164, 162, 251, 13, 122, 3, 252, 127, 232, 154}} + assert.Equal(t, wantMuxed, muxed) + + muxed.SetID(123) + wantMuxed = MuxedAccount{ + ed25519: [32]byte{63, 12, 52, 191, 147, 173, 13, 153, 113, 208, 76, 204, 144, 247, 5, 81, 28, 131, 138, 173, 151, 52, 164, 162, 251, 13, 122, 3, 252, 127, 232, 154}, + id: 123, + } + assert.Equal(t, wantMuxed, muxed) +} + +func TestMuxedAccount_Address(t *testing.T) { + muxed := MuxedAccount{} + publicKey, err := muxed.Address() + assert.EqualError(t, err, "muxed account has no ed25519 key") + assert.Empty(t, publicKey) + + muxed = MuxedAccount{ + id: uint64(9223372036854775808), + ed25519: [32]byte{63, 12, 52, 191, 147, 173, 13, 153, 113, 208, 76, 204, 144, 247, 5, 81, 28, 131, 138, 173, 151, 52, 164, 162, 251, 13, 122, 3, 252, 127, 232, 154}, + } + publicKey, err = muxed.Address() + assert.NoError(t, err) + assert.Equal(t, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", publicKey) +} + +func TestDecodeMuxedAccount(t *testing.T) { + muxed, err := DecodeMuxedAccount("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ") + assert.EqualError(t, err, "invalid muxed account") + assert.Nil(t, muxed) + + muxed, err = DecodeMuxedAccount("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK") + assert.NoError(t, err) + assert.Equal(t, uint64(9223372036854775808), muxed.ID()) + publicKey, err := muxed.AccountID() + assert.NoError(t, err) + assert.Equal(t, "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ", publicKey) +} diff --git a/strkey/strkey_suite_test.go b/strkey/strkey_suite_test.go deleted file mode 100644 index 6477116d7d..0000000000 --- a/strkey/strkey_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package strkey_test - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "testing" -) - -func TestStrkey(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Strkey Suite") -} diff --git a/support/clock/clocktest/fixed.go b/support/clock/clocktest/fixed.go new file mode 100644 index 0000000000..ae7b6e6783 --- /dev/null +++ b/support/clock/clocktest/fixed.go @@ -0,0 +1,14 @@ +package clocktest + +import ( + "time" +) + +// FixedSource is a clock source that has its current time stopped and fixed at +// a specific time. +type FixedSource time.Time + +// Now returns the fixed source's constant time. +func (s FixedSource) Now() time.Time { + return time.Time(s) +} diff --git a/support/clock/clocktest/fixed_test.go b/support/clock/clocktest/fixed_test.go new file mode 100644 index 0000000000..202bd31773 --- /dev/null +++ b/support/clock/clocktest/fixed_test.go @@ -0,0 +1,28 @@ +package clocktest_test + +import ( + "testing" + "time" + + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/clock/clocktest" + "github.com/stretchr/testify/assert" +) + +// TestNewFixed tests that the time returned from the fixed clocks Now() +// function is equal to the time given when creating the clock. +func TestFixedSource_Now(t *testing.T) { + timeNow := time.Date(2015, 9, 30, 17, 15, 54, 0, time.UTC) + c := clock.Clock{Source: clocktest.FixedSource(timeNow)} + assert.Equal(t, timeNow, c.Now()) +} + +// TestNewFixed_compose tests that FixedSource can be used easily to change +// time during a test. +func TestFixedSource_compose(t *testing.T) { + timeNow := time.Date(2015, 9, 30, 17, 15, 54, 0, time.UTC) + c := clock.Clock{Source: clocktest.FixedSource(timeNow)} + assert.Equal(t, timeNow, c.Now()) + c.Source = clocktest.FixedSource(timeNow.AddDate(0, 0, 1)) + assert.Equal(t, timeNow.AddDate(0, 0, 1), c.Now()) +} diff --git a/support/clock/main.go b/support/clock/main.go new file mode 100644 index 0000000000..e59fbc2e87 --- /dev/null +++ b/support/clock/main.go @@ -0,0 +1,35 @@ +package clock + +import "time" + +// Clock provides access to the current time from an underlying source. +type Clock struct { + Source Source +} + +func (c *Clock) getSource() Source { + if c == nil || c.Source == nil { + return RealSource{} + } + return c.Source +} + +// Now returns the current time as defined by the Clock's Source. +func (c *Clock) Now() time.Time { + return c.getSource().Now() +} + +// Source is any type providing a Now function that returns the current time. +type Source interface { + // Now returns the current time. + Now() time.Time +} + +// RealSource is a Source that uses the real time as provided by the stdlib +// time.Now() function as the current time. +type RealSource struct{} + +// Now returns the real system time as reported by time.Now(). +func (RealSource) Now() time.Time { + return time.Now() +} diff --git a/support/clock/main_test.go b/support/clock/main_test.go new file mode 100644 index 0000000000..bdbfa7aea3 --- /dev/null +++ b/support/clock/main_test.go @@ -0,0 +1,43 @@ +package clock_test + +import ( + "testing" + "time" + + "github.com/stellar/go/support/clock" + "github.com/stellar/go/support/clock/clocktest" + "github.com/stretchr/testify/assert" +) + +// TestClock_Now_sourceNotSet tests that when the Source field is not set that +// the real time is used when the Clock is asked for the current time. +func TestClock_Now_sourceNotSet(t *testing.T) { + c := clock.Clock{} + before := time.Now() + cNow := c.Now() + after := time.Now() + assert.True(t, cNow.After(before)) + assert.True(t, cNow.Before(after)) +} + +// TestClock_Now_sourceNotSetPtrNil tests that when the identifier is a +// unset/nil pointer to a Clock, it still has default behavior. +func TestClock_Now_sourceNotSetPtrNil(t *testing.T) { + c := (*clock.Clock)(nil) + before := time.Now() + cNow := c.Now() + after := time.Now() + assert.True(t, cNow.After(before)) + assert.True(t, cNow.Before(after)) +} + +// TestClock_Now_sourceSet tests that when the Source field is set that it is +// used when the Clock is asked for the current time. +func TestClock_Now_sourceSet(t *testing.T) { + timeNow := time.Date(2015, 9, 30, 17, 15, 54, 0, time.UTC) + c := clock.Clock{ + Source: clocktest.FixedSource(timeNow), + } + cNow := c.Now() + assert.Equal(t, timeNow, cNow) +} diff --git a/support/config/broken_test.go b/support/config/broken_test.go new file mode 100644 index 0000000000..e4b0384653 --- /dev/null +++ b/support/config/broken_test.go @@ -0,0 +1,25 @@ +package config + +import ( + "testing" + + "github.com/asaskevich/govalidator" + "github.com/stretchr/testify/assert" +) + +// NOTE: this test is presently failing because govalidator doesn't support +// optional fields that also use a custom validator. We'll remove the build tag +// above that disabled it from running during tests when we fix upstream. +func TestOptionalStellarFields(t *testing.T) { + var val struct { + F1 string `valid:"stellar_accountid,optional"` + F2 string `valid:"optional,stellar_accountid"` + F3 string `valid:"stellar_seed,optional"` + F4 string `valid:"optional,stellar_accountid"` + } + + // run the validation + ok, err := govalidator.ValidateStruct(val) + assert.NoError(t, err) + assert.True(t, ok) +} diff --git a/support/config/config_option.go b/support/config/config_option.go new file mode 100644 index 0000000000..fde5825a43 --- /dev/null +++ b/support/config/config_option.go @@ -0,0 +1,236 @@ +package config + +import ( + "fmt" + "go/types" + stdLog "log" + "net/url" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/strutils" +) + +// ConfigOptions is a group of ConfigOptions that can be for convenience +// initialized and set at the same time. +type ConfigOptions []*ConfigOption + +// Init calls Init on each ConfigOption passing on the cobra.Command. +func (cos ConfigOptions) Init(cmd *cobra.Command) error { + for _, co := range cos { + err := co.Init(cmd) + if err != nil { + return err + } + } + return nil +} + +// Require calls Require on each ConfigOption. +func (cos ConfigOptions) Require() { + for _, co := range cos { + co.Require() + } +} + +// RequireE is like Require, but returns the error instead of Fatal +func (cos ConfigOptions) RequireE() error { + for _, co := range cos { + if err := co.RequireE(); err != nil { + return err + } + } + return nil +} + +// SetValues calls SetValue on each ConfigOption. +func (cos ConfigOptions) SetValues() error { + for _, co := range cos { + if err := co.SetValue(); err != nil { + return err + } + } + return nil +} + +// ConfigOption is a complete description of the configuration of a command line option +type ConfigOption struct { + Name string // e.g. "db-url" + EnvVar string // e.g. "DATABASE_URL". Defaults to uppercase/underscore representation of name + OptType types.BasicKind // The type of this option, e.g. types.Bool + FlagDefault interface{} // A default if no option is provided. Omit or set to `nil` if no default + Required bool // Whether this option must be set for Horizon to run + Usage string // Help text + CustomSetValue func(*ConfigOption) error // Optional function for custom validation/transformation + ConfigKey interface{} // Pointer to the final key in the linked Config struct + flag *pflag.Flag // The persistent flag that the config option is attached to +} + +// Init handles initialisation steps, including configuring and binding the env variable name. +func (co *ConfigOption) Init(cmd *cobra.Command) error { + // Bind the command line and environment variable name + // Unless overriden, default to a transform like tls-key -> TLS_KEY + if co.EnvVar == "" { + co.EnvVar = strutils.KebabToConstantCase(co.Name) + } + // Initialise and bind the persistent flags + return co.setFlag(cmd) +} + +// Bind binds the config option to viper. +func (co *ConfigOption) Bind() { + viper.BindPFlag(co.Name, co.flag) + viper.BindEnv(co.Name, co.EnvVar) +} + +// Require checks that a required string configuration option is not empty, raising a user error if it is. +func (co *ConfigOption) Require() { + if err := co.RequireE(); err != nil { + stdLog.Fatal(err.Error()) + } +} + +// RequireE is like Require, but returns the error instead of Fatal +func (co *ConfigOption) RequireE() error { + co.Bind() + if co.Required && viper.GetString(co.Name) == "" { + return fmt.Errorf("Invalid config: %s is blank. Please specify --%s on the command line or set the %s environment variable.", co.Name, co.Name, co.EnvVar) + } + return nil +} + +// SetValue sets a value in the global config, using a custom function, if one was provided. +func (co *ConfigOption) SetValue() error { + co.Bind() + + // Use a custom setting function, if one is provided + if co.CustomSetValue != nil { + if err := co.CustomSetValue(co); err != nil { + return err + } + // Otherwise, just set the provided arg directly + } else if co.ConfigKey != nil { + co.setSimpleValue() + } + return nil +} + +// UsageText returns the string to use for the usage text of the option. The +// string returned will be the Usage defined on the ConfigOption, along with +// the environment variable. +func (co *ConfigOption) UsageText() string { + return fmt.Sprintf("%s (%s)", co.Usage, co.EnvVar) +} + +// setSimpleValue sets the value of a ConfigOption's configKey, based on the ConfigOption's default type. +func (co *ConfigOption) setSimpleValue() { + if co.ConfigKey != nil { + switch co.OptType { + case types.String: + *(co.ConfigKey.(*string)) = viper.GetString(co.Name) + case types.Int: + *(co.ConfigKey.(*int)) = viper.GetInt(co.Name) + case types.Bool: + *(co.ConfigKey.(*bool)) = viper.GetBool(co.Name) + case types.Uint: + *(co.ConfigKey.(*uint)) = uint(viper.GetInt(co.Name)) + case types.Uint32: + *(co.ConfigKey.(*uint32)) = uint32(viper.GetInt(co.Name)) + } + } +} + +// setFlag sets the correct pFlag type, based on the ConfigOption's default type. +func (co *ConfigOption) setFlag(cmd *cobra.Command) error { + switch co.OptType { + case types.String: + // Set an empty string if no default was provided, since some value is always required for pflags + if co.FlagDefault == nil { + co.FlagDefault = "" + } + cmd.PersistentFlags().String(co.Name, co.FlagDefault.(string), co.UsageText()) + case types.Int: + cmd.PersistentFlags().Int(co.Name, co.FlagDefault.(int), co.UsageText()) + case types.Bool: + cmd.PersistentFlags().Bool(co.Name, co.FlagDefault.(bool), co.UsageText()) + case types.Uint: + cmd.PersistentFlags().Uint(co.Name, co.FlagDefault.(uint), co.UsageText()) + case types.Uint32: + cmd.PersistentFlags().Uint32(co.Name, co.FlagDefault.(uint32), co.UsageText()) + default: + return errors.New("Unexpected OptType") + } + + co.flag = cmd.PersistentFlags().Lookup(co.Name) + + return nil +} + +// SetDuration converts a command line int to a duration, and stores it in the final config. +func SetDuration(co *ConfigOption) error { + *(co.ConfigKey.(*time.Duration)) = time.Duration(viper.GetInt(co.Name)) * time.Second + return nil +} + +// SetURL converts a command line string to a URL, and stores it in the final config. +func SetURL(co *ConfigOption) error { + urlString := viper.GetString(co.Name) + if urlString != "" { + urlType, err := url.Parse(urlString) + if err != nil { + return fmt.Errorf("Unable to parse URL: %s/%v", urlString, err) + } + *(co.ConfigKey.(**url.URL)) = urlType + } + return nil +} + +// SetOptionalUint converts a command line uint to a *uint where the nil +// value indicates the flag was not explicitly set +func SetOptionalUint(co *ConfigOption) error { + key := co.ConfigKey.(**uint) + if IsExplicitlySet(co) { + *key = new(uint) + **key = uint(viper.GetInt(co.Name)) + } else { + *key = nil + } + return nil +} + +func parseEnvVars(entries []string) map[string]bool { + set := map[string]bool{} + for _, entry := range entries { + key := strings.Split(entry, "=")[0] + set[key] = true + } + return set +} + +var envVars = parseEnvVars(os.Environ()) + +// IsExplicitlySet returns true if and only if the given config option was set explicitly either +// via a command line argument or via an environment variable +func IsExplicitlySet(co *ConfigOption) bool { + // co.flag.Changed is only set to true when the configuration is set via command line parameter. + // In the case where a variable is configured via environment variable we need to check envVars. + return co.flag.Changed || envVars[co.EnvVar] +} + +// SetOptionalString converts a command line uint to a *string where the nil +// value indicates the flag was not explicitly set +func SetOptionalString(co *ConfigOption) error { + key := co.ConfigKey.(**string) + if IsExplicitlySet(co) { + *key = new(string) + **key = viper.GetString(co.Name) + } else { + *key = nil + } + return nil +} diff --git a/support/config/config_option_test.go b/support/config/config_option_test.go new file mode 100644 index 0000000000..30be01424b --- /dev/null +++ b/support/config/config_option_test.go @@ -0,0 +1,374 @@ +package config + +import ( + "go/types" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" +) + +func TestConfigOption_UsageText(t *testing.T) { + configOpt := ConfigOption{ + Usage: "Port to listen and serve on", + EnvVar: "PORT", + } + assert.Equal(t, "Port to listen and serve on (PORT)", configOpt.UsageText()) +} + +type testOptions struct { + String string + Int int + Bool bool + Uint uint + Uint32 uint32 +} + +// Test that optional flags are set to nil if they are not configured explicitly. +func TestConfigOption_optionalFlags_defaults(t *testing.T) { + var optUint *uint + var optString *string + configOpts := ConfigOptions{ + {Name: "uint", OptType: types.Uint, ConfigKey: &optUint, CustomSetValue: SetOptionalUint, FlagDefault: uint(0)}, + {Name: "string", OptType: types.String, ConfigKey: &optString, CustomSetValue: SetOptionalString}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + cmd.SetArgs([]string{}) + cmd.Execute() + assert.Equal(t, (*string)(nil), optString) + assert.Equal(t, (*uint)(nil), optUint) +} + +// Test that optional flags are set to non nil values when they are configured explicitly. +func TestConfigOption_optionalFlags_set(t *testing.T) { + var optUint *uint + var optString *string + configOpts := ConfigOptions{ + {Name: "uint", OptType: types.Uint, ConfigKey: &optUint, CustomSetValue: SetOptionalUint, FlagDefault: uint(0)}, + {Name: "string", OptType: types.String, ConfigKey: &optString, CustomSetValue: SetOptionalString}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + cmd.SetArgs([]string{"--uint", "6", "--string", "test-string"}) + cmd.Execute() + assert.Equal(t, "test-string", *optString) + assert.Equal(t, uint(6), *optUint) +} + +// Test that optional flags are set to non nil values when they are configured explicitly. +func TestConfigOption_optionalFlags_env_set_empty(t *testing.T) { + var optUint *uint + var optString *string + configOpts := ConfigOptions{ + {Name: "uint", OptType: types.Uint, ConfigKey: &optUint, CustomSetValue: SetOptionalUint, FlagDefault: uint(0)}, + {Name: "string", OptType: types.String, ConfigKey: &optString, CustomSetValue: SetOptionalString}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + prev := envVars + envVars = map[string]bool{ + "STRING": true, + } + defer func() { + envVars = prev + }() + + cmd.Execute() + assert.Equal(t, "", *optString) + assert.Equal(t, (*uint)(nil), optUint) +} + +// Test that optional flags are set to non nil values when they are configured explicitly. +func TestConfigOption_optionalFlags_env_set(t *testing.T) { + var optUint *uint + var optString *string + configOpts := ConfigOptions{ + {Name: "uint", OptType: types.Uint, ConfigKey: &optUint, CustomSetValue: SetOptionalUint, FlagDefault: uint(0)}, + {Name: "string", OptType: types.String, ConfigKey: &optString, CustomSetValue: SetOptionalString}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + prev := envVars + envVars = map[string]bool{ + "STRING": true, + "UINT": true, + } + defer func() { + envVars = prev + }() + + defer os.Setenv("STRING", os.Getenv("STRING")) + defer os.Setenv("UINT", os.Getenv("UINT")) + os.Setenv("STRING", "str") + os.Setenv("UINT", "6") + + cmd.Execute() + assert.Equal(t, "str", *optString) + assert.Equal(t, uint(6), *optUint) +} + +// Test that when there are no args the defaults in the config options are +// used. +func TestConfigOption_getSimpleValue_defaults(t *testing.T) { + opts := testOptions{} + configOpts := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts.String, FlagDefault: "default"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts.Int, FlagDefault: 1}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts.Bool, FlagDefault: true}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts.Uint, FlagDefault: uint(2)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts.Uint32, FlagDefault: uint32(3)}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + cmd.SetArgs([]string{}) + cmd.Execute() + assert.Equal(t, "default", opts.String) + assert.Equal(t, 1, opts.Int) + assert.Equal(t, true, opts.Bool) + assert.Equal(t, uint(2), opts.Uint) + assert.Equal(t, uint32(3), opts.Uint32) + for _, opt := range configOpts { + assert.False(t, opt.flag.Changed) + } +} + +// Test that when args are given, their values are used. +func TestConfigOption_getSimpleValue_setFlag(t *testing.T) { + opts := testOptions{} + configOpts := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts.String, FlagDefault: "default"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts.Int, FlagDefault: 1}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts.Uint, FlagDefault: uint(2)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts.Uint32, FlagDefault: uint32(3)}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + cmd.SetArgs([]string{ + "--string", "value", + "--int", "10", + "--bool", + "--uint", "20", + "--uint32", "30", + }) + cmd.Execute() + assert.Equal(t, "value", opts.String) + assert.Equal(t, 10, opts.Int) + assert.Equal(t, true, opts.Bool) + assert.Equal(t, uint(20), opts.Uint) + assert.Equal(t, uint32(30), opts.Uint32) + for _, opt := range configOpts { + assert.True(t, opt.flag.Changed) + } +} + +// Test that when args are not given but env vars are, their values are used. +func TestConfigOption_getSimpleValue_setEnv(t *testing.T) { + opts := testOptions{} + configOpts := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts.String, FlagDefault: "default"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts.Int, FlagDefault: 1}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts.Uint, FlagDefault: uint(2)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts.Uint32, FlagDefault: uint32(3)}, + } + cmd := &cobra.Command{ + Use: "doathing", + Run: func(_ *cobra.Command, _ []string) { + configOpts.Require() + configOpts.SetValues() + }, + } + configOpts.Init(cmd) + + defer os.Setenv("STRING", os.Getenv("STRING")) + defer os.Setenv("INT", os.Getenv("INT")) + defer os.Setenv("BOOL", os.Getenv("BOOL")) + defer os.Setenv("UINT", os.Getenv("UINT")) + defer os.Setenv("UINT32", os.Getenv("UINT32")) + os.Setenv("STRING", "value") + os.Setenv("INT", "10") + os.Setenv("BOOL", "true") + os.Setenv("UINT", "20") + os.Setenv("UINT32", "30") + cmd.Execute() + assert.Equal(t, "value", opts.String) + assert.Equal(t, 10, opts.Int) + assert.Equal(t, true, opts.Bool) + assert.Equal(t, uint(20), opts.Uint) + assert.Equal(t, uint32(30), opts.Uint32) +} + +// Test that when multiple commands register the same option, they can be set +// with flags. +func TestConfigOption_getSimpleValue_setMultipleFlag(t *testing.T) { + opts1 := testOptions{} + configOpts1 := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts1.String, FlagDefault: "default1"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts1.Int, FlagDefault: 11}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts1.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts1.Uint, FlagDefault: uint(12)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts1.Uint32, FlagDefault: uint32(13)}, + } + cmd1 := &cobra.Command{ + Use: "doathing1", + Run: func(_ *cobra.Command, _ []string) { + configOpts1.Require() + configOpts1.SetValues() + }, + } + configOpts1.Init(cmd1) + + opts2 := testOptions{} + configOpts2 := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts2.String, FlagDefault: "default2"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts2.Int, FlagDefault: 21}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts2.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts2.Uint, FlagDefault: uint(22)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts2.Uint32, FlagDefault: uint32(23)}, + } + cmd2 := &cobra.Command{ + Use: "doathing2", + Run: func(_ *cobra.Command, _ []string) { + configOpts2.Require() + configOpts2.SetValues() + }, + } + configOpts2.Init(cmd2) + + cmd1.SetArgs([]string{ + "--string", "value1", + "--int", "110", + "--bool", + "--uint", "120", + "--uint32", "130", + }) + cmd1.Execute() + assert.Equal(t, "value1", opts1.String) + assert.Equal(t, 110, opts1.Int) + assert.Equal(t, true, opts1.Bool) + assert.Equal(t, uint(120), opts1.Uint) + assert.Equal(t, uint32(130), opts1.Uint32) + + cmd2.SetArgs([]string{ + "--string", "value2", + "--int", "210", + "--bool", + "--uint", "220", + "--uint32", "230", + }) + cmd2.Execute() + assert.Equal(t, "value2", opts2.String) + assert.Equal(t, 210, opts2.Int) + assert.Equal(t, true, opts2.Bool) + assert.Equal(t, uint(220), opts2.Uint) + assert.Equal(t, uint32(230), opts2.Uint32) +} + +// Test that when multiple commands register the same option, they can be set +// with environment variables. +func TestConfigOption_getSimpleValue_setMultipleEnv(t *testing.T) { + opts1 := testOptions{} + configOpts1 := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts1.String, FlagDefault: "default1"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts1.Int, FlagDefault: 11}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts1.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts1.Uint, FlagDefault: uint(12)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts1.Uint32, FlagDefault: uint32(13)}, + } + cmd1 := &cobra.Command{ + Use: "doathing1", + Run: func(_ *cobra.Command, _ []string) { + configOpts1.Require() + configOpts1.SetValues() + }, + } + configOpts1.Init(cmd1) + + opts2 := testOptions{} + configOpts2 := ConfigOptions{ + {Name: "string", OptType: types.String, ConfigKey: &opts2.String, FlagDefault: "default2"}, + {Name: "int", OptType: types.Int, ConfigKey: &opts2.Int, FlagDefault: 21}, + {Name: "bool", OptType: types.Bool, ConfigKey: &opts2.Bool, FlagDefault: false}, + {Name: "uint", OptType: types.Uint, ConfigKey: &opts2.Uint, FlagDefault: uint(22)}, + {Name: "uint32", OptType: types.Uint32, ConfigKey: &opts2.Uint32, FlagDefault: uint32(23)}, + } + cmd2 := &cobra.Command{ + Use: "doathing2", + Run: func(_ *cobra.Command, _ []string) { + configOpts2.Require() + configOpts2.SetValues() + }, + } + configOpts2.Init(cmd2) + + defer os.Setenv("STRING", os.Getenv("STRING")) + defer os.Setenv("INT", os.Getenv("INT")) + defer os.Setenv("BOOL", os.Getenv("BOOL")) + defer os.Setenv("UINT", os.Getenv("UINT")) + defer os.Setenv("UINT32", os.Getenv("UINT32")) + + os.Setenv("STRING", "value1") + os.Setenv("INT", "110") + os.Setenv("BOOL", "true") + os.Setenv("UINT", "120") + os.Setenv("UINT32", "130") + + cmd1.Execute() + assert.Equal(t, "value1", opts1.String) + assert.Equal(t, 110, opts1.Int) + assert.Equal(t, true, opts1.Bool) + assert.Equal(t, uint(120), opts1.Uint) + assert.Equal(t, uint32(130), opts1.Uint32) + + cmd2.Execute() + assert.Equal(t, "value1", opts2.String) + assert.Equal(t, 110, opts2.Int) + assert.Equal(t, true, opts2.Bool) + assert.Equal(t, uint(120), opts2.Uint) + assert.Equal(t, uint32(130), opts2.Uint32) +} diff --git a/support/config/main.go b/support/config/main.go index 0bf0e908e7..c60b8fc81b 100644 --- a/support/config/main.go +++ b/support/config/main.go @@ -3,11 +3,22 @@ package config import ( + "fmt" + "io/ioutil" + "github.com/BurntSushi/toml" "github.com/asaskevich/govalidator" - "github.com/pkg/errors" + "github.com/stellar/go/amount" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" ) +// TLS represents a common configuration snippet for configuring TLS in a server process +type TLS struct { + CertificateFile string `toml:"certificate-file" valid:"required"` + PrivateKeyFile string `toml:"private-key-file" valid:"required"` +} + // InvalidConfigError is the error that is returned when an invalid // configuration is encountered by the `Read` func. type InvalidConfigError struct { @@ -17,11 +28,26 @@ type InvalidConfigError struct { // Read takes the TOML configuration file at `path`, parses it into `dest` and // then uses github.com/asaskevich/govalidator to validate the struct. func Read(path string, dest interface{}) error { - _, err := toml.DecodeFile(path, dest) + bs, err := ioutil.ReadFile(path) + if err != nil { + return err + } + return decode(string(bs), dest) +} + +func decode(content string, dest interface{}) error { + metadata, err := toml.Decode(content, dest) if err != nil { return errors.Wrap(err, "decode-file failed") } + // Undecoded keys correspond to keys in the TOML document + // that do not have a concrete type in config struct. + undecoded := metadata.Undecoded() + if len(undecoded) > 0 { + return errors.New("Unknown fields: " + fmt.Sprintf("%+v", undecoded)) + } + valid, err := govalidator.ValidateStruct(dest) if valid { @@ -37,4 +63,44 @@ func Read(path string, dest interface{}) error { func init() { govalidator.SetFieldsRequiredByDefault(true) + govalidator.CustomTypeTagMap.Set("stellar_accountid", govalidator.CustomTypeValidator(isStellarAccountID)) + govalidator.CustomTypeTagMap.Set("stellar_seed", govalidator.CustomTypeValidator(isStellarSeed)) + govalidator.CustomTypeTagMap.Set("stellar_amount", govalidator.CustomTypeValidator(isStellarAmount)) + +} + +func isStellarAmount(i interface{}, context interface{}) bool { + enc, ok := i.(string) + + if !ok { + return false + } + + _, err := amount.Parse(enc) + + return err == nil +} + +func isStellarAccountID(i interface{}, context interface{}) bool { + enc, ok := i.(string) + + if !ok { + return false + } + + _, err := strkey.Decode(strkey.VersionByteAccountID, enc) + + return err == nil +} + +func isStellarSeed(i interface{}, context interface{}) bool { + enc, ok := i.(string) + + if !ok { + return false + } + + _, err := strkey.Decode(strkey.VersionByteSeed, enc) + + return err == nil } diff --git a/support/config/main_test.go b/support/config/main_test.go new file mode 100644 index 0000000000..105e83c74b --- /dev/null +++ b/support/config/main_test.go @@ -0,0 +1,124 @@ +package config + +import ( + "testing" + + "github.com/asaskevich/govalidator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccountIDValidator(t *testing.T) { + var val struct { + Empty string `valid:"stellar_accountid"` + NotSTRKey string `valid:"stellar_accountid"` + NotAccountID string `valid:"stellar_accountid"` + Valid string `valid:"stellar_accountid"` + WrongType int `valid:"stellar_accountid"` + } + + val.NotSTRKey = "hello" + val.NotAccountID = "SA5MATAU4RNJDKCTIC6VVSYSGB7MFFBVU3OKWOA5K67S62EYB5ESKLTV" + val.Valid = "GBXS6WTZNRS7LOGHM3SCMAJD6M6JCXB3GATXECCZ3C5NJ3PVSZ23PEWX" + val.WrongType = 100 + + // run the validation + ok, err := govalidator.ValidateStruct(val) + require.False(t, ok) + require.Error(t, err) + + fields := govalidator.ErrorsByField(err) + + // ensure valid is not in the invalid map + _, ok = fields["Valid"] + assert.False(t, ok) + + _, ok = fields["Empty"] + assert.True(t, ok, "Empty is not an invalid field") + + _, ok = fields["NotSTRKey"] + assert.True(t, ok, "NotSTRKey is not an invalid field") + + _, ok = fields["NotAccountID"] + assert.True(t, ok, "NotAccountID is not an invalid field") + + _, ok = fields["WrongType"] + assert.True(t, ok, "WrongType is not an invalid field") +} + +func TestSeedValidator(t *testing.T) { + var val struct { + Empty string `valid:"stellar_seed"` + NotSTRKey string `valid:"stellar_seed"` + NotSeed string `valid:"stellar_seed"` + Valid string `valid:"stellar_seed"` + WrongType int `valid:"stellar_seed"` + } + + val.NotSTRKey = "hello" + val.NotSeed = "GBXS6WTZNRS7LOGHM3SCMAJD6M6JCXB3GATXECCZ3C5NJ3PVSZ23PEWX" + val.Valid = "SA5MATAU4RNJDKCTIC6VVSYSGB7MFFBVU3OKWOA5K67S62EYB5ESKLTV" + val.WrongType = 100 + + // run the validation + ok, err := govalidator.ValidateStruct(val) + require.False(t, ok) + require.Error(t, err) + + fields := govalidator.ErrorsByField(err) + + // ensure valid is not in the invalid map + _, ok = fields["Valid"] + assert.False(t, ok) + + _, ok = fields["Empty"] + assert.True(t, ok, "Empty is not an invalid field") + + _, ok = fields["NotSTRKey"] + assert.True(t, ok, "NotSTRKey is not an invalid field") + + _, ok = fields["NotSeed"] + assert.True(t, ok, "NotSeed is not an invalid field") + + _, ok = fields["WrongType"] + assert.True(t, ok, "WrongType is not an invalid field") +} + +func TestUndecoded(t *testing.T) { + var val struct { + Test string `toml:"test" valid:"optional"` + TLS struct { + CertificateFile string `toml:"certificate-file" valid:"required"` + PrivateKeyFile string `toml:"private-key-file" valid:"required"` + } `valid:"optional"` + } + + // Notice _ in certificate_file + toml := `test="abc" +[tls] +certificate_file="hello" +private-key-file="world"` + + err := decode(toml, &val) + require.Error(t, err) + assert.Equal(t, "Unknown fields: [tls.certificate_file]", err.Error()) +} + +func TestCorrect(t *testing.T) { + var val struct { + Test string `toml:"test" valid:"optional"` + TLS struct { + CertificateFile string `toml:"certificate-file" valid:"required"` + PrivateKeyFile string `toml:"private-key-file" valid:"required"` + } `valid:"optional"` + } + + // Notice _ in certificate_file + toml := `test="abc" +[tls] +certificate-file="hello" +private-key-file="world"` + + err := decode(toml, &val) + require.NoError(t, err) +} diff --git a/support/db/batch_insert_builder.go b/support/db/batch_insert_builder.go new file mode 100644 index 0000000000..ee6427286d --- /dev/null +++ b/support/db/batch_insert_builder.go @@ -0,0 +1,142 @@ +package db + +import ( + "context" + "fmt" + "reflect" + "sort" + + sq "github.com/Masterminds/squirrel" + "github.com/stellar/go/support/errors" +) + +// BatchInsertBuilder works like sq.InsertBuilder but has a better support for batching +// large number of rows. +// It is NOT safe for concurrent use. +type BatchInsertBuilder struct { + Table *Table + // MaxBatchSize defines the maximum size of a batch. If this number is + // reached after calling Row() it will call Exec() immediately inserting + // all rows to a DB. + // Zero (default) will not add rows until explicitly calling Exec. + MaxBatchSize int + + // Suffix adds a sql expression to the end of the query (e.g. an ON CONFLICT clause) + Suffix string + + columns []string + rows [][]interface{} + rowStructType reflect.Type +} + +// Row adds a new row to the batch. All rows must have exactly the same columns +// (map keys). Otherwise, error will be returned. Please note that rows are not +// added one by one but in batches when `Exec` is called (or `MaxBatchSize` is +// reached). +func (b *BatchInsertBuilder) Row(ctx context.Context, row map[string]interface{}) error { + if b.columns == nil { + b.columns = make([]string, 0, len(row)) + b.rows = make([][]interface{}, 0) + + for column := range row { + b.columns = append(b.columns, column) + } + + sort.Strings(b.columns) + } + + if len(b.columns) != len(row) { + return errors.Errorf("invalid number of columns (expected=%d, actual=%d)", len(b.columns), len(row)) + } + + rowSlice := make([]interface{}, 0, len(b.columns)) + for _, column := range b.columns { + val, ok := row[column] + if !ok { + return errors.Errorf(`column "%s" does not exist`, column) + } + rowSlice = append(rowSlice, val) + } + + b.rows = append(b.rows, rowSlice) + + // Call Exec when MaxBatchSize is reached. + if len(b.rows) == b.MaxBatchSize { + return b.Exec(ctx) + } + + return nil +} + +func (b *BatchInsertBuilder) RowStruct(ctx context.Context, row interface{}) error { + if b.columns == nil { + b.columns = ColumnsForStruct(row) + b.rows = make([][]interface{}, 0) + } + + rowType := reflect.TypeOf(row) + if b.rowStructType == nil { + b.rowStructType = rowType + } else if b.rowStructType != rowType { + return errors.Errorf(`expected value of type "%s" but got "%s" value`, b.rowStructType.String(), rowType.String()) + } + + rrow := reflect.ValueOf(row) + rvals := mapper.FieldsByName(rrow, b.columns) + + // convert fields values to interface{} + columnValues := make([]interface{}, len(b.columns)) + for i, rval := range rvals { + columnValues[i] = rval.Interface() + } + + b.rows = append(b.rows, columnValues) + + // Call Exec when MaxBatchSize is reached. + if len(b.rows) == b.MaxBatchSize { + return b.Exec(ctx) + } + + return nil +} + +func (b *BatchInsertBuilder) insertSQL() sq.InsertBuilder { + insertStatement := sq.Insert(b.Table.Name).Columns(b.columns...) + if len(b.Suffix) > 0 { + return insertStatement.Suffix(b.Suffix) + } + return insertStatement +} + +// Exec inserts rows in batches. In case of errors it's possible that some batches +// were added so this should be run in a DB transaction for easy rollbacks. +func (b *BatchInsertBuilder) Exec(ctx context.Context) error { + sql := b.insertSQL() + paramsCount := 0 + + for _, row := range b.rows { + sql = sql.Values(row...) + paramsCount += len(row) + + if paramsCount > postgresQueryMaxParams-2*len(b.columns) { + _, err := b.Table.Session.Exec(ctx, sql) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("error adding values while inserting to %s", b.Table.Name)) + } + paramsCount = 0 + sql = b.insertSQL() + } + } + + // Insert last batch + if paramsCount > 0 { + _, err := b.Table.Session.Exec(ctx, sql) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("error adding values while inserting to %s", b.Table.Name)) + } + } + + // Clear the rows so user can reuse it for batch inserting to a single table + b.rows = make([][]interface{}, 0) + return nil +} diff --git a/support/db/batch_insert_builder_test.go b/support/db/batch_insert_builder_test.go new file mode 100644 index 0000000000..e283e8bf57 --- /dev/null +++ b/support/db/batch_insert_builder_test.go @@ -0,0 +1,193 @@ +package db + +import ( + "context" + "fmt" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type hungerRow struct { + Name string `db:"name"` + HungerLevel string `db:"hunger_level"` +} + +type invalidHungerRow struct { + Name string `db:"name"` + HungerLevel string `db:"hunger_level"` + LastName string `db:"last_name"` +} + +func BenchmarkBatchInsertBuilder(b *testing.B) { + // In order to show SQL queries + // log.SetLevel(logrus.DebugLevel) + db := dbtest.Postgres(b).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + ctx := context.Background() + maxBatchSize := 1000 + insertBuilder := &BatchInsertBuilder{ + Table: sess.GetTable("people"), + MaxBatchSize: maxBatchSize, + } + + // Do not count the test initialization + b.ResetTimer() + for i := 0; i < b.N; i++ { + for j := 0; j < maxBatchSize; j++ { + err := insertBuilder.RowStruct(ctx, hungerRow{ + Name: fmt.Sprintf("bubba%d", i*maxBatchSize+j), + HungerLevel: "1202", + }) + require.NoError(b, err) + } + } + err := insertBuilder.Exec(ctx) + + // Do not count the test ending + b.StopTimer() + assert.NoError(b, err) + var count []int + err = sess.SelectRaw(ctx, + &count, + "SELECT COUNT(*) FROM people", + ) + assert.NoError(b, err) + preexistingCount := 3 + assert.Equal(b, b.N*maxBatchSize+preexistingCount, count[0]) +} + +func TestBatchInsertBuilder(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + ctx := context.Background() + + insertBuilder := &BatchInsertBuilder{ + Table: sess.GetTable("people"), + } + + // exec on the empty set should produce no errors + assert.NoError(t, insertBuilder.Exec(ctx)) + + var err error + + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hunger_level": "120", + }) + assert.NoError(t, err) + + err = insertBuilder.RowStruct(ctx, hungerRow{ + Name: "bubba2", + HungerLevel: "1202", + }) + assert.NoError(t, err) + + // Extra column + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hunger_level": "120", + "abc": "def", + }) + assert.EqualError(t, err, "invalid number of columns (expected=2, actual=3)") + + // Not enough columns + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + }) + assert.EqualError(t, err, "invalid number of columns (expected=2, actual=1)") + + // Invalid column + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hello": "120", + }) + assert.EqualError(t, err, `column "hunger_level" does not exist`) + + err = insertBuilder.RowStruct(ctx, invalidHungerRow{ + Name: "Max", + HungerLevel: "500", + }) + assert.EqualError(t, err, `expected value of type "db.hungerRow" but got "db.invalidHungerRow" value`) + + err = insertBuilder.Exec(ctx) + assert.NoError(t, err) + + // Check rows + var found []person + err = sess.SelectRaw(ctx, &found, `SELECT * FROM people WHERE name like 'bubba%'`) + + require.NoError(t, err) + assert.Equal( + t, + found, + []person{ + {Name: "bubba", HungerLevel: "120"}, + {Name: "bubba2", HungerLevel: "1202"}, + }, + ) + + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hunger_level": "1", + }) + assert.NoError(t, err) + + err = insertBuilder.Exec(ctx) + assert.EqualError( + t, err, "error adding values while inserting to people: exec failed: pq:"+ + " duplicate key value violates unique constraint \"people_pkey\"", + ) + + insertBuilder.Suffix = "ON CONFLICT (name) DO NOTHING" + + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hunger_level": "1", + }) + assert.NoError(t, err) + + err = insertBuilder.Exec(ctx) + assert.NoError(t, err) + + err = sess.SelectRaw(ctx, &found, `SELECT * FROM people WHERE name like 'bubba%'`) + + require.NoError(t, err) + assert.Equal( + t, + found, + []person{ + {Name: "bubba", HungerLevel: "120"}, + {Name: "bubba2", HungerLevel: "1202"}, + }, + ) + + insertBuilder.Suffix = "ON CONFLICT (name) DO UPDATE SET hunger_level = EXCLUDED.hunger_level" + + err = insertBuilder.Row(ctx, map[string]interface{}{ + "name": "bubba", + "hunger_level": "1", + }) + assert.NoError(t, err) + + err = insertBuilder.Exec(ctx) + assert.NoError(t, err) + + err = sess.SelectRaw(ctx, &found, `SELECT * FROM people WHERE name like 'bubba%' order by name desc`) + + require.NoError(t, err) + assert.Equal( + t, + found, + []person{ + {Name: "bubba2", HungerLevel: "1202"}, + {Name: "bubba", HungerLevel: "1"}, + }, + ) +} diff --git a/support/db/dbtest/db.go b/support/db/dbtest/db.go index 6f9c45c1da..bda67c49e8 100644 --- a/support/db/dbtest/db.go +++ b/support/db/dbtest/db.go @@ -1,11 +1,49 @@ package dbtest import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + "testing" + "github.com/jmoiron/sqlx" - "github.com/pkg/errors" + "github.com/lib/pq" "github.com/stellar/go/support/db/sqlutils" + "github.com/stellar/go/support/errors" + "github.com/stretchr/testify/require" ) +// DB represents an ephemeral database that starts blank and can be used +// to run tests against. +type DB struct { + Dialect string + DSN string + dbName string + t testing.TB + closer func() + closed bool +} + +// randomName returns a new psuedo-random name that is sufficient for naming a +// test database. In the event that reading from the source of randomness +// fails, a panic will occur. +func randomName() string { + raw := make([]byte, 6) + + _, err := rand.Read(raw) + if err != nil { + err = errors.Wrap(err, "read from rand failed") + panic(err) + } + + enc := hex.EncodeToString(raw) + + return fmt.Sprintf("test_%s", enc) +} + // Close closes and deletes the database represented by `db` func (db *DB) Close() { if db.closed { @@ -24,26 +62,17 @@ func (db *DB) Load(sql string) *DB { defer conn.Close() tx, err := conn.Begin() - if err != nil { - err = errors.Wrap(err, "begin failed") - panic(err) - } + require.NoError(db.t, err) defer tx.Rollback() for i, cmd := range sqlutils.AllStatements(sql) { _, err = tx.Exec(cmd) - if err != nil { - err = errors.Wrapf(err, "failed execing statement: %d", i) - panic(err) - } + require.NoError(db.t, err, "failed execing statement: %d", i) } err = tx.Commit() - if err != nil { - err = errors.Wrap(err, "commit failed") - panic(err) - } + require.NoError(db.t, err) return db } @@ -51,10 +80,60 @@ func (db *DB) Load(sql string) *DB { // Open opens a sqlx connection to the db. func (db *DB) Open() *sqlx.DB { conn, err := sqlx.Open(db.Dialect, db.DSN) - if err != nil { - err = errors.Wrap(err, "open failed") - panic(err) - } + require.NoError(db.t, err) return conn } + +func (db *DB) Version() (major int) { + conn := db.Open() + defer conn.Close() + + versionFull := "" + err := conn.Get(&versionFull, "SHOW server_version") + require.NoError(db.t, err) + + version := strings.Fields(versionFull) + parts := strings.Split(version[0], ".") + major, err = strconv.Atoi(parts[0]) + require.NoError(db.t, err) + + return major +} + +func execStatement(t testing.TB, pguser, query string) { + db, err := sqlx.Open("postgres", fmt.Sprintf("postgres://%s@localhost/?sslmode=disable", pguser)) + require.NoError(t, err) + _, err = db.Exec(query) + require.NoError(t, err) + require.NoError(t, db.Close()) +} + +// Postgres provisions a new, blank database with a random name on the localhost +// of the running process. It assumes that you have postgres running on the +// default port, have the command line postgres tools installed, and that the +// current user has access to the server. It panics on the event of a failure. +func Postgres(t testing.TB) *DB { + var result DB + result.dbName = randomName() + result.Dialect = "postgres" + result.t = t + + t.Log("Test Database:", result.dbName) + + pgUser := os.Getenv("PGUSER") + if len(pgUser) == 0 { + pgUser = "postgres" + } + + // create the db + execStatement(t, pgUser, "CREATE DATABASE "+pq.QuoteIdentifier(result.dbName)) + + result.DSN = fmt.Sprintf("postgres://%s@localhost/%s?sslmode=disable&timezone=UTC", pgUser, result.dbName) + + result.closer = func() { + execStatement(t, pgUser, "DROP DATABASE "+pq.QuoteIdentifier(result.dbName)) + } + + return &result +} diff --git a/support/db/dbtest/main.go b/support/db/dbtest/main.go deleted file mode 100644 index 0e1a07da3a..0000000000 --- a/support/db/dbtest/main.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package dbtest is a package to ease the pain of developing test code that -// works against external databases. It provides helper functions to provision -// temporary databases and load them with test data. -package dbtest - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - - "github.com/stellar/go/support/errors" -) - -// DB represents an ephemeral database that can be starts blank and can be used -// to run tests against. -type DB struct { - Dialect string - DSN string - closer func() - closed bool -} - -// randomName returns a new psuedo-random name that is sufficient for naming a -// test database. In the event that reading from the source of randomness -// fails, a panic will occur. -func randomName() string { - raw := make([]byte, 6) - - _, err := rand.Read(raw) - if err != nil { - err = errors.Wrap(err, "read from rand failed") - panic(err) - } - - enc := hex.EncodeToString(raw) - - return fmt.Sprintf("test_%s", enc) -} diff --git a/support/db/dbtest/mysql.go b/support/db/dbtest/mysql.go deleted file mode 100644 index 9d233e7b7c..0000000000 --- a/support/db/dbtest/mysql.go +++ /dev/null @@ -1,37 +0,0 @@ -package dbtest - -import ( - "fmt" - "os/exec" - - _ "github.com/go-sql-driver/mysql" - "github.com/stellar/go/support/errors" -) - -// Mysql provisions a new, blank database with a random name on the localhost of -// the running process. It assumes that you have mysql running and that the -// root user has access with no password. It panics on -// the event of a failure. -func Mysql() *DB { - var result DB - name := randomName() - result.Dialect = "mysql" - result.DSN = fmt.Sprintf("root@/%s", name) - - // create the db - err := exec.Command("mysql", "-e", fmt.Sprintf("CREATE DATABASE %s;", name)).Run() - if err != nil { - err = errors.Wrap(err, "createdb failed") - panic(err) - } - - result.closer = func() { - err := exec.Command("mysql", "-e", fmt.Sprintf("DROP DATABASE %s;", name)).Run() - if err != nil { - err = errors.Wrap(err, "dropdb failed") - panic(err) - } - } - - return &result -} diff --git a/support/db/dbtest/mysql_test.go b/support/db/dbtest/mysql_test.go deleted file mode 100644 index 39cdbea7ad..0000000000 --- a/support/db/dbtest/mysql_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package dbtest - -import ( - "testing" - - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" -) - -func TestMysql(t *testing.T) { - db := Mysql() - t.Log("tempdb url", db.DSN) - - conn, err := sqlx.Open("mysql", db.DSN) - require.NoError(t, err) - - _, err = conn.Exec("CREATE TABLE t1 (c1 INT PRIMARY KEY) ;") - require.NoError(t, err) - - db.Close() - _, err = conn.Exec("CREATE TABLE t2 (c1 INT PRIMARY KEY) ;") - require.Error(t, err) - - // ensure Close() can be called multiple times - db.Close() -} diff --git a/support/db/dbtest/postgres.go b/support/db/dbtest/postgres.go deleted file mode 100644 index 32aefd4d75..0000000000 --- a/support/db/dbtest/postgres.go +++ /dev/null @@ -1,37 +0,0 @@ -package dbtest - -import ( - "fmt" - "os/exec" - - _ "github.com/lib/pq" - "github.com/stellar/go/support/errors" -) - -// Postgres provisions a new, blank database with a random name on the localhost -// of the running process. It assumes that you have postgres running on the -// default port, have the command line postgres tools installed, and that the -// current user has access to the server. It panics on the event of a failure. -func Postgres() *DB { - var result DB - name := randomName() - result.Dialect = "postgres" - result.DSN = fmt.Sprintf("postgres://localhost/%s?sslmode=disable", name) - - // create the db - err := exec.Command("createdb", name).Run() - if err != nil { - err = errors.Wrap(err, "createdb failed") - panic(err) - } - - result.closer = func() { - err := exec.Command("dropdb", name).Run() - if err != nil { - err = errors.Wrap(err, "dropdb failed") - panic(err) - } - } - - return &result -} diff --git a/support/db/dbtest/postgres_test.go b/support/db/dbtest/postgres_test.go index e9f065fe26..202738b2e1 100644 --- a/support/db/dbtest/postgres_test.go +++ b/support/db/dbtest/postgres_test.go @@ -1,23 +1,49 @@ package dbtest import ( - "os/exec" + "fmt" "testing" + "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPostgres(t *testing.T) { - db := Postgres() + db := Postgres(t) t.Log("tempdb url", db.DSN) - err := exec.Command("psql", db.DSN, "-c", "SELECT 1").Run() + conn := db.Open() + _, err := conn.Exec("SELECT 1") require.NoError(t, err) + require.NoError(t, conn.Close()) db.Close() - err = exec.Command("psql", db.DSN, "-c", "SELECT 1").Run() - require.Error(t, err) + + conn = db.Open() + _, err = conn.Exec("SELECT 1") + require.EqualError(t, err, fmt.Sprintf("pq: database \"%s\" does not exist", db.dbName)) + require.Contains(t, err.Error(), "data") // ensure Close() can be called multiple times db.Close() } + +func TestPostgres_clientTimezone(t *testing.T) { + db := Postgres(t) + conn := db.Open() + defer conn.Close() + + timestamp := time.Time{} + err := conn.Get(×tamp, "SELECT TO_TIMESTAMP('2020-03-19 16:56:00', 'YYYY-MM-DD HH24:MI:SS')") + require.NoError(t, err) + + wantTimestamp := time.Date(2020, 3, 19, 16, 56, 0, 0, time.UTC) + assert.Equal(t, wantTimestamp, timestamp) +} + +func TestPostgres_Version(t *testing.T) { + db := Postgres(t) + majorVersion := db.Version() + assert.GreaterOrEqual(t, majorVersion, 9) +} diff --git a/support/db/dbtest/sqlite.go b/support/db/dbtest/sqlite.go deleted file mode 100644 index 844f825c55..0000000000 --- a/support/db/dbtest/sqlite.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build cgo - -package dbtest - -import ( - "io/ioutil" - "os" - - _ "github.com/mattn/go-sqlite3" - "github.com/pkg/errors" -) - -// Sqlite provisions a new, blank database sqlite database. It panics on the event of a failure. -func Sqlite() *DB { - var result DB - - tmpfile, err := ioutil.TempFile("", "test.sqlite") - if err != nil { - err = errors.Wrap(err, "create temp file failed") - panic(err) - } - - tmpfile.Close() - err = os.Remove(tmpfile.Name()) - - if err != nil { - err = errors.Wrap(err, "remove first temp file failed") - panic(err) - } - - result.Dialect = "sqlite3" - result.DSN = tmpfile.Name() - result.closer = func() { - err := os.Remove(tmpfile.Name()) - if err != nil { - err = errors.Wrap(err, "remove db file failed") - panic(err) - } - } - - return &result -} diff --git a/support/db/dbtest/sqlite_test.go b/support/db/dbtest/sqlite_test.go deleted file mode 100644 index cab95ba863..0000000000 --- a/support/db/dbtest/sqlite_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build cgo - -package dbtest - -import ( - "testing" - - "github.com/jmoiron/sqlx" - "github.com/stretchr/testify/require" -) - -func TestSqlite(t *testing.T) { - tdb := Sqlite() - t.Log(tdb.DSN) - - db, err := sqlx.Open("sqlite3", tdb.DSN) - require.NoError(t, err) - _, err = db.Exec("SELECT 1") - require.NoError(t, err) - - db.Close() - tdb.Close() - db, err = sqlx.Open("sqlite", tdb.DSN) - require.Error(t, err) - - tdb.Close() -} diff --git a/support/db/delete_builder.go b/support/db/delete_builder.go new file mode 100644 index 0000000000..e467f6da91 --- /dev/null +++ b/support/db/delete_builder.go @@ -0,0 +1,28 @@ +package db + +import ( + "context" + "database/sql" + + "github.com/pkg/errors" +) + +// Exec executes the query represented by the builder, deleting any rows that +// match the queries where clauses. +func (delb *DeleteBuilder) Exec(ctx context.Context) (sql.Result, error) { + r, err := delb.Table.Session.Exec(ctx, delb.sql) + if err != nil { + return nil, errors.Wrap(err, "delete failed") + } + return r, nil +} + +// Where is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#DeleteBuilder.Where +func (delb *DeleteBuilder) Where( + pred interface{}, + args ...interface{}, +) *DeleteBuilder { + delb.sql = delb.sql.Where(pred, args...) + return delb +} diff --git a/support/db/delete_builder_test.go b/support/db/delete_builder_test.go new file mode 100644 index 0000000000..cf0eb40594 --- /dev/null +++ b/support/db/delete_builder_test.go @@ -0,0 +1,32 @@ +package db + +import ( + "context" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDeleteBuilder_Exec(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + ctx := context.Background() + tbl := sess.GetTable("people") + r, err := tbl.Delete("name = ?", "scott").Exec(ctx) + + if assert.NoError(t, err, "query error") { + actual, err := r.RowsAffected() + require.NoError(t, err) + assert.Equal(t, int64(1), actual) + + var found int + err = sess.GetRaw(ctx, &found, "SELECT COUNT(*) FROM people WHERE name = ?", "scott") + require.NoError(t, err) + assert.Equal(t, 0, found) + } +} diff --git a/support/db/errors.go b/support/db/errors.go new file mode 100644 index 0000000000..9d2966173c --- /dev/null +++ b/support/db/errors.go @@ -0,0 +1,12 @@ +package db + +// NoRowsError is returned when an insert is attempted without providing any +// values to insert. +type NoRowsError struct { +} + +func (err *NoRowsError) Error() string { + return "no rows provided to insert" +} + +var _ error = &NoRowsError{} diff --git a/support/db/get_builder.go b/support/db/get_builder.go new file mode 100644 index 0000000000..6d5ef64134 --- /dev/null +++ b/support/db/get_builder.go @@ -0,0 +1,64 @@ +package db + +import ( + "context" + "github.com/stellar/go/support/errors" +) + +// Exec executes the query represented by the builder, populating the +// destination with the results returned by running the query against the +// current database session. +func (gb *GetBuilder) Exec(ctx context.Context) error { + err := gb.Table.Session.Get(ctx, gb.dest, gb.sql) + if err != nil { + return errors.Wrap(err, "get failed") + } + + return nil +} + +// Offset is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Offset +func (gb *GetBuilder) Offset(offset uint64) *GetBuilder { + gb.sql = gb.sql.Offset(offset) + return gb +} + +// OrderBy is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.OrderBy +func (gb *GetBuilder) OrderBy( + orderBys ...string, +) *GetBuilder { + gb.sql = gb.sql.OrderBy(orderBys...) + return gb +} + +// Prefix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Prefix +func (gb *GetBuilder) Prefix( + sql string, + args ...interface{}, +) *GetBuilder { + gb.sql = gb.sql.Prefix(sql, args...) + return gb +} + +// Suffix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Suffix +func (gb *GetBuilder) Suffix( + sql string, + args ...interface{}, +) *GetBuilder { + gb.sql = gb.sql.Suffix(sql, args...) + return gb +} + +// Where is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#GetBuilder.Where +func (gb *GetBuilder) Where( + pred interface{}, + args ...interface{}, +) *GetBuilder { + gb.sql = gb.sql.Where(pred, args...) + return gb +} diff --git a/support/db/get_builder_test.go b/support/db/get_builder_test.go new file mode 100644 index 0000000000..7af3d47372 --- /dev/null +++ b/support/db/get_builder_test.go @@ -0,0 +1,26 @@ +package db + +import ( + "context" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" +) + +func TestGetBuilder_Exec(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + var found person + + tbl := sess.GetTable("people") + err := tbl.Get(&found, "name = ?", "scott").Exec(context.Background()) + + if assert.NoError(t, err, "query error") { + assert.Equal(t, "scott", found.Name) + assert.Equal(t, "1000000", found.HungerLevel) + } +} diff --git a/support/db/insert_builder.go b/support/db/insert_builder.go new file mode 100644 index 0000000000..0cb465ef42 --- /dev/null +++ b/support/db/insert_builder.go @@ -0,0 +1,76 @@ +package db + +import ( + "context" + "database/sql" + "reflect" + + "github.com/pkg/errors" +) + +// Exec executes the query represented by the builder, inserting each val +// provided to the builder into the database. +func (ib *InsertBuilder) Exec(ctx context.Context) (sql.Result, error) { + if len(ib.rows) == 0 { + return nil, &NoRowsError{} + } + + template := ib.rows[0] + cols := ColumnsForStruct(template) + + if ib.ignoredCols != nil { + finalCols := make([]string, 0, len(cols)) + for _, col := range cols { + if ib.ignoredCols[col] { + continue + } + finalCols = append(finalCols, col) + } + cols = finalCols + } + + sql := ib.sql.Columns(cols...) + + // add rows onto the builder + for _, row := range ib.rows { + + // extract field values + rrow := reflect.ValueOf(row) + rvals := mapper.FieldsByName(rrow, cols) + + // convert fields values to interface{} + vals := make([]interface{}, len(cols)) + for i, rval := range rvals { + vals[i] = rval.Interface() + } + + // append row to insert statement + sql = sql.Values(vals...) + } + + // TODO: support return inserted id + + r, err := ib.Table.Session.Exec(ctx, sql) + if err != nil { + return nil, errors.Wrap(err, "insert failed") + } + + return r, nil +} + +// IgnoreCols adds colums to ignore list (will not be inserted) +func (ib *InsertBuilder) IgnoreCols(cols ...string) *InsertBuilder { + if ib.ignoredCols == nil { + ib.ignoredCols = make(map[string]bool) + } + for _, col := range cols { + ib.ignoredCols[col] = true + } + return ib +} + +// Rows appends more rows onto the insert statement +func (ib *InsertBuilder) Rows(rows ...interface{}) *InsertBuilder { + ib.rows = append(ib.rows, rows...) + return ib +} diff --git a/support/db/insert_builder_test.go b/support/db/insert_builder_test.go new file mode 100644 index 0000000000..85c2465640 --- /dev/null +++ b/support/db/insert_builder_test.go @@ -0,0 +1,76 @@ +package db + +import ( + "context" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInsertBuilder_Exec(t *testing.T) { + ctx := context.Background() + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + tbl := sess.GetTable("people") + + _, err := tbl.Insert(person{ + Name: "bubba", + HungerLevel: "120", + }).Exec(ctx) + + if assert.NoError(t, err) { + var found []person + err = sess.SelectRaw(ctx, + &found, + "SELECT * FROM people WHERE name = ?", + "bubba", + ) + + require.NoError(t, err) + + if assert.Len(t, found, 1) { + assert.Equal(t, "bubba", found[0].Name) + assert.Equal(t, "120", found[0].HungerLevel) + } + } + + // no rows + _, err = tbl.Insert().Exec(ctx) + if assert.Error(t, err) { + assert.IsType(t, &NoRowsError{}, err) + assert.EqualError(t, err, "no rows provided to insert") + } + + // multi rows + r, err := tbl.Insert(person{ + Name: "bubba2", + HungerLevel: "120", + }, person{ + Name: "bubba3", + HungerLevel: "120", + }).Exec(ctx) + + if assert.NoError(t, err) { + count, err2 := r.RowsAffected() + require.NoError(t, err2) + assert.Equal(t, int64(2), count) + } + + // invalid columns in struct + _, err = tbl.Insert(struct { + Name string `db:"name"` + HungerLevel string `db:"hunger_level"` + NotAColumn int `db:"not_a_column"` + }{ + Name: "bubba2", + HungerLevel: "120", + NotAColumn: 3, + }).Exec(ctx) + + assert.Error(t, err) +} diff --git a/support/db/internal.go b/support/db/internal.go new file mode 100644 index 0000000000..ab85ed4108 --- /dev/null +++ b/support/db/internal.go @@ -0,0 +1,47 @@ +package db + +import ( + "reflect" + "sort" + "strings" + + "github.com/jmoiron/sqlx/reflectx" +) + +var mapper = reflectx.NewMapper("db") + +// ColumnsForStruct returns a slice of column names for the provided value +// (which should be a struct, a slice of structs). +func ColumnsForStruct(dest interface{}) []string { + typ := reflect.TypeOf(dest) + + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + if typ.Kind() == reflect.Slice { + typ = typ.Elem() + } + + typmap := mapper.TypeMap(typ) + + var keys []string + for k := range typmap.Names { + // If a struct contains another struct (ex. sql.NullString) mapper.TypeMap + // will return the fields of an internal struct (like: "payment_id.String", + // "payment_id.Valid"). + // This will break the query so skip these fields. + if strings.Contains(k, ".") { + continue + } + keys = append(keys, k) + } + + // Ensure keys are sorted. keys is populated from a map, which has no + // defined iteration order. Different versions of go or different + // architectures may cause non-deterministic results to occur (and in our CI + // environment, they have). To make testing easier, we sort the keys. + sort.Strings(keys) + + return keys +} diff --git a/support/db/internal_test.go b/support/db/internal_test.go new file mode 100644 index 0000000000..8ce0370a92 --- /dev/null +++ b/support/db/internal_test.go @@ -0,0 +1,85 @@ +package db + +import "testing" +import "github.com/stretchr/testify/assert" + +const testSchema = ` +CREATE TABLE IF NOT EXISTS people ( + name character varying NOT NULL, + hunger_level integer NOT NULL, + PRIMARY KEY (name) +); +DELETE FROM people; +INSERT INTO people (name, hunger_level) VALUES ('scott', 1000000); +INSERT INTO people (name, hunger_level) VALUES ('jed', 10); +INSERT INTO people (name, hunger_level) VALUES ('bartek', 10); +` + +func TestColumnsForStruct(t *testing.T) { + cases := []struct { + Name string + Struct interface{} + Expected []string + }{ + { + Name: "simple", + Struct: struct { + Name string `db:"name"` + }{}, + Expected: []string{"name"}, + }, + { + Name: "simple pointer", + Struct: &struct { + Name string `db:"name"` + }{}, + Expected: []string{"name"}, + }, + { + Name: "slice", + Struct: []struct { + Name string `db:"name"` + }{}, + Expected: []string{"name"}, + }, + { + Name: "slice pointer", + Struct: &[]struct { + Name string `db:"name"` + }{}, + Expected: []string{"name"}, + }, + { + Name: "ignored", + Struct: struct { + Name string `db:"name"` + Ignore string `db:"-"` + Age string `db:"age"` + }{}, + Expected: []string{"age", "name"}, + }, + { + Name: "unannotated", + Struct: struct { + Name string `db:"name"` + Age string + Level int `json:"level"` + }{}, + Expected: []string{"Age", "Level", "name"}, + }, + { + Name: "private", + Struct: struct { + Name string `db:"name"` + age string + }{}, + Expected: []string{"name"}, + }, + } + + for _, kase := range cases { + actual := ColumnsForStruct(kase.Struct) + + assert.Equal(t, kase.Expected, actual, "case '%s' failed", kase.Name) + } +} diff --git a/support/db/main.go b/support/db/main.go index ac3830f80c..5a316899c5 100644 --- a/support/db/main.go +++ b/support/db/main.go @@ -1,61 +1,187 @@ // Package db is the base package for database access at stellar. It primarily -// exposes Repo which is a lightweight wrapper around a *sqlx.DB that provides -// utility methods (See the repo tests for examples). +// exposes Session which is a lightweight wrapper around a *sqlx.DB that +// provides utility methods (See the repo tests for examples). // // In addition to the query methods, this package provides query logging and // stateful transaction management. +// +// In addition to the lower-level access facilities, this package exposes a +// system to build queries more dynamically using the help of +// https://github.com/Masterminds/squirrel. These builder method are access +// through the `Table` type. package db import ( + "context" "database/sql" + "time" + "github.com/Masterminds/squirrel" "github.com/jmoiron/sqlx" "github.com/stellar/go/support/errors" - "golang.org/x/net/context" - // Enable mysql - _ "github.com/go-sql-driver/mysql" // Enable postgres _ "github.com/lib/pq" ) +const ( + // postgresQueryMaxParams defines the maximum number of parameters in a query. + postgresQueryMaxParams = 65535 + maxDBPingAttempts = 30 +) + +var ( + // ErrTimeout is an error returned by Session methods when request has + // taken longer than context's deadline max duration + ErrTimeout = errors.New("canceling statement due to lack of response within timeout period") + // ErrCancelled is an error returned by Session methods when request has + // been cancelled (ex. context cancelled). + ErrCancelled = errors.New("canceling statement due to user request") + // ErrConflictWithRecovery is an error returned by Session methods when + // read replica cancels the query due to conflict with about-to-be-applied + // WAL entries (https://www.postgresql.org/docs/current/hot-standby.html). + ErrConflictWithRecovery = errors.New("canceling statement due to conflict with recovery") + // ErrBadConnection is an error returned when driver returns `bad connection` + // error. + ErrBadConnection = errors.New("bad connection") +) + // Conn represents a connection to a single database. type Conn interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Get(dest interface{}, query string, args ...interface{}) error + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error Rebind(sql string) string - Queryx(query string, args ...interface{}) (*sqlx.Rows, error) - Select(dest interface{}, query string, args ...interface{}) error + QueryxContext(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) + SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error +} + +// DeleteBuilder is a helper struct used to construct sql queries of the DELETE +// variety. +type DeleteBuilder struct { + Table *Table + sql squirrel.DeleteBuilder +} + +// InsertBuilder is a helper struct used to construct sql queries of the INSERT +// variety. +// NOTE: InsertBuilder will use "zero" value of a type in case of nil pointer values. +// If you need to insert `NULL` use sql.Null* or build your own type that implements +// database/sql/driver.Valuer. +type InsertBuilder struct { + Table *Table + + rows []interface{} + ignoredCols map[string]bool + sql squirrel.InsertBuilder +} + +// GetBuilder is a helper struct used to construct sql queries of the SELECT +// variety. +type GetBuilder struct { + Table *Table + + dest interface{} + sql squirrel.SelectBuilder +} + +// SelectBuilder is a helper struct used to construct sql queries of the SELECT +// variety. +type SelectBuilder struct { + Table *Table + + dest interface{} + sql squirrel.SelectBuilder } -// Repo provides helper methods for making queries against `DB` and provides -// utilities such as automatic query logging and transaction management. NOTE: -// A Repo is designed to be lightweight and temporarily lived (usually request -// scoped) which is one reason it is acceptable for it to store a context. -type Repo struct { +// UpdateBuilder is a helper struct used to construct sql queries of the UPDATE +// variety. +type UpdateBuilder struct { + Table *Table + + sql squirrel.UpdateBuilder +} + +// Session provides helper methods for making queries against `DB` and provides +// utilities such as automatic query logging and transaction management. NOTE: +// Because transaction-handling is stateful, it is not presently intended to +// cross goroutine boundaries and is not concurrency safe. +type Session struct { // DB is the database connection that queries should be executed against. DB *sqlx.DB - // Ctx is the optional context in which the repo is operating under. - Ctx context.Context + tx *sqlx.Tx + txOptions *sql.TxOptions +} + +type SessionInterface interface { + BeginTx(opts *sql.TxOptions) error + Begin() error + Rollback() error + Commit() error + GetTx() *sqlx.Tx + GetTxOptions() *sql.TxOptions + TruncateTables(ctx context.Context, tables []string) error + Clone() SessionInterface + Close() error + Get(ctx context.Context, dest interface{}, query squirrel.Sqlizer) error + GetRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Select(ctx context.Context, dest interface{}, query squirrel.Sqlizer) error + SelectRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Query(ctx context.Context, query squirrel.Sqlizer) (*sqlx.Rows, error) + QueryRaw(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) + GetTable(name string) *Table + Exec(ctx context.Context, query squirrel.Sqlizer) (sql.Result, error) + ExecRaw(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + NoRows(err error) bool + Ping(ctx context.Context, timeout time.Duration) error + DeleteRange( + ctx context.Context, + start, end int64, + table string, + idCol string, + ) error +} + +// Table helps to build sql queries against a given table. It logically +// represents a SQL table on the database that `Session` is connected to. +type Table struct { + // Name is the name of the table + Name string - tx *sqlx.Tx + Session *Session } -// Open the postgres database at `url` and returns a new *Repo using it. -func Open(dialect, url string) (*Repo, error) { - db, err := sqlx.Connect(dialect, url) +func pingDB(db *sqlx.DB) error { + var err error + for attempt := 0; attempt < maxDBPingAttempts; attempt++ { + if err = db.Ping(); err == nil { + return nil + } + time.Sleep(time.Second) + } + + return errors.Wrapf(err, "failed to connect to DB after %v attempts", maxDBPingAttempts) +} + +// Open the database at `dsn` and returns a new *Session using it. +func Open(dialect, dsn string) (*Session, error) { + db, err := sqlx.Open(dialect, dsn) if err != nil { - return nil, errors.Wrap(err, "connect failed") + return nil, errors.Wrap(err, "open failed") + } + if err = pingDB(db); err != nil { + return nil, errors.Wrap(err, "ping failed") } - return &Repo{DB: db}, nil + return &Session{DB: db}, nil } // Wrap wraps a bare *sql.DB (from the database/sql stdlib package) in a -// *db.Repo instance. -func Wrap(base *sql.DB, dialect string) *Repo { - return &Repo{DB: sqlx.NewDb(base, dialect)} +// *db.Session instance. It is meant to be used in cases where you do not +// control the instantiation of the database connection, but would still like to +// leverage the facilities provided in Session. +func Wrap(base *sql.DB, dialect string) *Session { + return &Session{DB: sqlx.NewDb(base, dialect)} } // ensure various types conform to Conn interface diff --git a/support/db/main_test.go b/support/db/main_test.go new file mode 100644 index 0000000000..8ca94f1e3b --- /dev/null +++ b/support/db/main_test.go @@ -0,0 +1,29 @@ +package db + +import ( + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" +) + +type person struct { + Name string `db:"name"` + HungerLevel string `db:"hunger_level"` + + SomethingIgnored int `db:"-"` +} + +func TestGetTable(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + tbl := sess.GetTable("users") + if assert.NotNil(t, tbl) { + assert.Equal(t, "users", tbl.Name) + assert.Equal(t, sess, tbl.Session) + } + +} diff --git a/support/db/metrics.go b/support/db/metrics.go new file mode 100644 index 0000000000..5e893f026e --- /dev/null +++ b/support/db/metrics.go @@ -0,0 +1,481 @@ +package db + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Masterminds/squirrel" + "github.com/prometheus/client_golang/prometheus" +) + +type CtxKey string + +var RouteContextKey = CtxKey("route") +var QueryTypeContextKey = CtxKey("query_type") + +type Subservice string + +var CoreSubservice = Subservice("core") +var HistoryPrimarySubservice = Subservice("history_primary") +var HistorySubservice = Subservice("history") +var IngestSubservice = Subservice("ingest") + +type QueryType string + +var DeleteQueryType = QueryType("delete") +var InsertQueryType = QueryType("insert") +var SelectQueryType = QueryType("select") +var UndefinedQueryType = QueryType("undefined") +var UpdateQueryType = QueryType("update") +var UpsertQueryType = QueryType("upsert") + +// contextRoute returns a string representing the request endpoint, or "undefined" if it wasn't found +func contextRoute(ctx context.Context) string { + if endpoint, ok := ctx.Value(&RouteContextKey).(string); ok { + return endpoint + } + return "undefined" +} + +type SessionWithMetrics struct { + SessionInterface + + registry *prometheus.Registry + queryCounter *prometheus.CounterVec + queryDurationSummary *prometheus.SummaryVec + maxOpenConnectionsGauge prometheus.GaugeFunc + openConnectionsGauge prometheus.GaugeFunc + inUseConnectionsGauge prometheus.GaugeFunc + idleConnectionsGauge prometheus.GaugeFunc + waitCountCounter prometheus.CounterFunc + waitDurationCounter prometheus.CounterFunc + maxIdleClosedCounter prometheus.CounterFunc + maxIdleTimeClosedCounter prometheus.CounterFunc + maxLifetimeClosedCounter prometheus.CounterFunc + roundTripProbe *roundTripProbe + roundTripTimeSummary prometheus.Summary +} + +func RegisterMetrics(base *Session, namespace string, sub Subservice, registry *prometheus.Registry) SessionInterface { + s := &SessionWithMetrics{ + SessionInterface: base, + registry: registry, + } + + s.queryCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "query_total", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + []string{"query_type", "error", "route"}, + ) + registry.MustRegister(s.queryCounter) + + s.queryDurationSummary = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Namespace: namespace, Subsystem: "db", + Name: "query_duration_seconds", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + []string{"query_type", "error", "route"}, + ) + registry.MustRegister(s.queryDurationSummary) + + // txnCounter: prometheus.NewCounter( + // prometheus.CounterOpts{Namespace: namespace, Subsystem: "db", Name: "transaction_total"}, + // ), + // registry.MustRegister(s.txnCounter) + // txnDuration: prometheus.NewHistogram( + // prometheus.HistogramOpts{ + // Namespace: namespace, Subsystem: "db", + // Name: "transaction_duration_seconds", + // Buckets: prometheus.ExponentialBuckets(0.1, 3, 5), + // }, + // ), + // registry.MustRegister(s.txnDuration) + + s.maxOpenConnectionsGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "max_open_connections", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + // Right now MaxOpenConnections in Horizon is static however it's possible that + // it will change one day. In such case, using GaugeFunc is very cheap and will + // prevent issues with this metric in the future. + return float64(base.DB.Stats().MaxOpenConnections) + }, + ) + registry.MustRegister(s.maxOpenConnectionsGauge) + + s.openConnectionsGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "open_connections", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().OpenConnections) + }, + ) + registry.MustRegister(s.openConnectionsGauge) + + s.inUseConnectionsGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "in_use_connections", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().InUse) + }, + ) + registry.MustRegister(s.inUseConnectionsGauge) + + s.idleConnectionsGauge = prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "idle_connections", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().Idle) + }, + ) + registry.MustRegister(s.idleConnectionsGauge) + + s.waitCountCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "wait_count_total", + Help: "total number of number of connections waited for", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().WaitCount) + }, + ) + registry.MustRegister(s.waitCountCounter) + + s.waitDurationCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "wait_duration_seconds_total", + Help: "total time blocked waiting for a new connection", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return base.DB.Stats().WaitDuration.Seconds() + }, + ) + registry.MustRegister(s.waitDurationCounter) + + s.maxIdleClosedCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "max_idle_closed_total", + Help: "total number of number of connections closed due to SetMaxIdleConns", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().MaxIdleClosed) + }, + ) + registry.MustRegister(s.maxIdleClosedCounter) + + s.maxIdleTimeClosedCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "max_idle_time_closed_total", + Help: "total number of number of connections closed due to SetConnMaxIdleTime", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().MaxIdleTimeClosed) + }, + ) + registry.MustRegister(s.maxIdleTimeClosedCounter) + + s.maxLifetimeClosedCounter = prometheus.NewCounterFunc( + prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "max_lifetime_closed_total", + Help: "total number of number of connections closed due to SetConnMaxLifetime", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + func() float64 { + return float64(base.DB.Stats().MaxLifetimeClosed) + }, + ) + registry.MustRegister(s.maxLifetimeClosedCounter) + + s.roundTripTimeSummary = prometheus.NewSummary( + prometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: "db", + Name: "round_trip_time_seconds", + Help: "time required to run `select 1` query in a DB - effectively measures round trip time, if time exceeds 1s it will be recorded as 1", + ConstLabels: prometheus.Labels{"subservice": string(sub)}, + }, + ) + registry.MustRegister(s.roundTripTimeSummary) + + s.roundTripProbe = &roundTripProbe{ + session: base, + roundTripTimeSummary: s.roundTripTimeSummary, + } + s.roundTripProbe.start() + return s +} + +func (s *SessionWithMetrics) Close() error { + s.roundTripProbe.close() + + s.registry.Unregister(s.queryCounter) + s.registry.Unregister(s.queryDurationSummary) + // s.registry.Unregister(s.txnCounter) + // s.registry.Unregister(s.txnDurationSummary) + s.registry.Unregister(s.maxOpenConnectionsGauge) + s.registry.Unregister(s.openConnectionsGauge) + s.registry.Unregister(s.inUseConnectionsGauge) + s.registry.Unregister(s.idleConnectionsGauge) + s.registry.Unregister(s.waitCountCounter) + s.registry.Unregister(s.waitDurationCounter) + s.registry.Unregister(s.maxIdleClosedCounter) + s.registry.Unregister(s.maxIdleTimeClosedCounter) + s.registry.Unregister(s.maxLifetimeClosedCounter) + return s.SessionInterface.Close() +} + +// TODO: Implement these +// func (s *SessionWithMetrics) BeginTx(ctx context.Context, opts *sql.TxOptions) error { +// func (s *SessionWithMetrics) Begin(ctx context.Context) error { +// func (s *SessionWithMetrics) Commit(ctx context.Context) error +// func (s *SessionWithMetrics) Rollback(ctx context.Context) error + +func (s *SessionWithMetrics) TruncateTables(ctx context.Context, tables []string) (err error) { + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": "truncate_tables", + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": "truncate_tables", + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + err = s.SessionInterface.TruncateTables(ctx, tables) + return err +} + +func (s *SessionWithMetrics) Clone() SessionInterface { + return &SessionWithMetrics{ + SessionInterface: s.SessionInterface.Clone(), + + // Note that cloned Session will point at the same roundTripProbe + // to avoid starting multiple go routines. + roundTripProbe: s.roundTripProbe, + + registry: s.registry, + queryCounter: s.queryCounter, + queryDurationSummary: s.queryDurationSummary, + // txnCounter: s.txnCounter, + // txnDurationSummary: s.txnDurationSummary, + maxOpenConnectionsGauge: s.maxOpenConnectionsGauge, + openConnectionsGauge: s.openConnectionsGauge, + inUseConnectionsGauge: s.inUseConnectionsGauge, + idleConnectionsGauge: s.idleConnectionsGauge, + waitCountCounter: s.waitCountCounter, + waitDurationCounter: s.waitDurationCounter, + maxIdleClosedCounter: s.maxIdleClosedCounter, + maxIdleTimeClosedCounter: s.maxIdleTimeClosedCounter, + maxLifetimeClosedCounter: s.maxLifetimeClosedCounter, + } +} + +func getQueryType(ctx context.Context, query squirrel.Sqlizer) QueryType { + // Do we have an explicit query type set in the context? For raw execs, in + // lieu of better detection. e.g. "upsert" + if q, ok := ctx.Value(&QueryTypeContextKey).(QueryType); ok { + return q + } + + // is it a squirrel builder? + if _, ok := query.(squirrel.DeleteBuilder); ok { + return DeleteQueryType + } + if _, ok := query.(squirrel.InsertBuilder); ok { + return InsertQueryType + } + if _, ok := query.(squirrel.SelectBuilder); ok { + return SelectQueryType + } + if _, ok := query.(squirrel.UpdateBuilder); ok { + return UpdateQueryType + } + + // Try to guess based on the first word of the string. + // e.g. "SELECT * FROM table" + str, _, err := query.ToSql() + words := strings.Fields(strings.TrimSpace(strings.ToLower(str))) + if err == nil && len(words) > 0 { + // Make sure we don't only get known keywords here, incase it's a more + // complex query. + for _, word := range []string{"delete", "insert", "select", "update"} { + if word == words[0] { + return QueryType(word) + } + } + } + + // Fresh out of ideas. + return UndefinedQueryType +} + +func (s *SessionWithMetrics) Get(ctx context.Context, dest interface{}, query squirrel.Sqlizer) (err error) { + queryType := string(getQueryType(ctx, query)) + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + err = s.SessionInterface.Get(ctx, dest, query) + return err +} + +func (s *SessionWithMetrics) GetRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error) { + return s.Get(ctx, dest, squirrel.Expr(query, args...)) +} + +func (s *SessionWithMetrics) Select(ctx context.Context, dest interface{}, query squirrel.Sqlizer) (err error) { + queryType := string(getQueryType(ctx, query)) + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + err = s.SessionInterface.Select(ctx, dest, query) + return err +} + +func (s *SessionWithMetrics) SelectRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) (err error) { + return s.Select(ctx, dest, squirrel.Expr(query, args...)) +} + +func (s *SessionWithMetrics) Exec(ctx context.Context, query squirrel.Sqlizer) (result sql.Result, err error) { + queryType := string(getQueryType(ctx, query)) + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + result, err = s.SessionInterface.Exec(ctx, query) + return result, err +} + +func (s *SessionWithMetrics) ExecRaw(ctx context.Context, query string, args ...interface{}) (result sql.Result, err error) { + return s.Exec(ctx, squirrel.Expr(query, args...)) +} + +func (s *SessionWithMetrics) Ping(ctx context.Context, timeout time.Duration) (err error) { + queryType := "ping" + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + err = s.SessionInterface.Ping(ctx, timeout) + return err +} + +func (s *SessionWithMetrics) DeleteRange( + ctx context.Context, + start, end int64, + table string, + idCol string, +) (err error) { + queryType := "delete" + timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) { + s.queryDurationSummary.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Observe(v) + })) + defer func() { + timer.ObserveDuration() + s.queryCounter.With(prometheus.Labels{ + "query_type": queryType, + "error": fmt.Sprint(err != nil), + "route": contextRoute(ctx), + }).Inc() + }() + + err = s.SessionInterface.DeleteRange(ctx, start, end, table, idCol) + return err +} diff --git a/support/db/mock_session.go b/support/db/mock_session.go new file mode 100644 index 0000000000..9c3c4e7861 --- /dev/null +++ b/support/db/mock_session.go @@ -0,0 +1,130 @@ +package db + +import ( + "context" + "database/sql" + "time" + + "github.com/Masterminds/squirrel" + sq "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "github.com/stretchr/testify/mock" +) + +var _ SessionInterface = (*MockSession)(nil) + +type MockSession struct { + mock.Mock +} + +func (m *MockSession) Begin() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockSession) BeginTx(opts *sql.TxOptions) error { + args := m.Called(opts) + return args.Error(0) +} + +func (m *MockSession) Rollback() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockSession) Commit() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockSession) GetTx() *sqlx.Tx { + args := m.Called() + return args.Get(0).(*sqlx.Tx) +} + +func (m *MockSession) GetTxOptions() *sql.TxOptions { + args := m.Called() + return args.Get(0).(*sql.TxOptions) +} + +func (m *MockSession) TruncateTables(ctx context.Context, tables []string) error { + args := m.Called(ctx, tables) + return args.Error(0) +} + +func (m *MockSession) Clone() SessionInterface { + args := m.Called() + return args.Get(0).(SessionInterface) +} + +func (m *MockSession) Close() error { + args := m.Called() + return args.Error(0) +} + +func (m *MockSession) Get(ctx context.Context, dest interface{}, query sq.Sqlizer) error { + args := m.Called(ctx, dest, query) + return args.Error(0) +} + +func (m *MockSession) GetRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + argss := m.Called(ctx, dest, query, args) + return argss.Error(0) +} + +func (m *MockSession) Query(ctx context.Context, query squirrel.Sqlizer) (*sqlx.Rows, error) { + args := m.Called(ctx, query) + return args.Get(0).(*sqlx.Rows), args.Error(1) +} + +func (m *MockSession) QueryRaw(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + argss := m.Called(ctx, query, args) + return argss.Get(0).(*sqlx.Rows), argss.Error(1) +} + +func (m *MockSession) Select(ctx context.Context, dest interface{}, query squirrel.Sqlizer) error { + argss := m.Called(ctx, dest, query) + return argss.Error(0) +} + +func (m *MockSession) SelectRaw(ctx context.Context, + dest interface{}, + query string, + args ...interface{}, +) error { + argss := m.Called(ctx, dest, query, args) + return argss.Error(0) +} + +func (m *MockSession) GetTable(name string) *Table { + args := m.Called(name) + return args.Get(0).(*Table) +} + +func (m *MockSession) Exec(ctx context.Context, query squirrel.Sqlizer) (sql.Result, error) { + args := m.Called(ctx, query) + return args.Get(0).(sql.Result), args.Error(1) +} + +func (m *MockSession) ExecRaw(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + argss := m.Called(ctx, query, args) + return argss.Get(0).(sql.Result), argss.Error(1) +} + +func (m *MockSession) NoRows(err error) bool { + args := m.Called(err) + return args.Get(0).(bool) +} + +func (m *MockSession) Ping(ctx context.Context, timeout time.Duration) error { + return m.Called(ctx, timeout).Error(0) +} + +func (m *MockSession) DeleteRange( + ctx context.Context, + start, end int64, + table string, + idCol string, +) (err error) { + return m.Called(ctx, start, end, table, idCol).Error(0) +} diff --git a/support/db/pg/pg.go b/support/db/pg/pg.go new file mode 100644 index 0000000000..b8c591ce2e --- /dev/null +++ b/support/db/pg/pg.go @@ -0,0 +1,15 @@ +package pg + +import ( + "github.com/lib/pq" + "github.com/stellar/go/support/errors" +) + +func IsUniqueViolation(err error) bool { + switch pgerr := errors.Cause(err).(type) { + case *pq.Error: + return string(pgerr.Code) == "23505" + default: + return false + } +} diff --git a/support/db/repo.go b/support/db/repo.go deleted file mode 100644 index 27ccc17c55..0000000000 --- a/support/db/repo.go +++ /dev/null @@ -1,289 +0,0 @@ -package db - -import ( - "database/sql" - "fmt" - "reflect" - "time" - - "github.com/jmoiron/sqlx" - sq "github.com/lann/squirrel" - "github.com/stellar/go/support/db/sqlutils" - "github.com/stellar/go/support/errors" - "github.com/stellar/go/support/log" - "golang.org/x/net/context" -) - -// Begin binds this repo to a new transaction. -func (r *Repo) Begin() error { - if r.tx != nil { - return errors.New("already in transaction") - } - - tx, err := r.DB.Beginx() - if err != nil { - return errors.Wrap(err, "beginx failed") - } - r.logBegin() - - r.tx = tx - return nil -} - -// Clone clones the receiver, returning a new instance backed by the same -// context and db. The result will not be bound to any transaction that the -// source is currently within. -func (r *Repo) Clone() *Repo { - return &Repo{ - DB: r.DB, - Ctx: r.Ctx, - } -} - -// Commit commits the current transaction -func (r *Repo) Commit() error { - if r.tx == nil { - return errors.New("not in transaction") - } - - err := r.tx.Commit() - r.logCommit() - r.tx = nil - return err -} - -func (r *Repo) DeleteRange( - start, end int64, - table string, - idCol string, -) error { - del := sq.Delete(table).Where( - fmt.Sprintf("%s >= ? AND %s < ?", idCol, idCol), - start, - end, - ) - _, err := r.Exec(del) - return err -} - -// Get runs `query`, setting the first result found on `dest`, if -// any. -func (r *Repo) Get(dest interface{}, query sq.Sqlizer) error { - sql, args, err := r.build(query) - if err != nil { - return err - } - return r.GetRaw(dest, sql, args...) -} - -// GetRaw runs `query` with `args`, setting the first result found on -// `dest`, if any. -func (r *Repo) GetRaw(dest interface{}, query string, args ...interface{}) error { - query = r.conn().Rebind(query) - start := time.Now() - err := r.conn().Get(dest, query, args...) - r.log("get", start, query, args) - - if err == nil { - return nil - } - - if r.NoRows(err) { - return err - } - - return errors.Wrap(err, "get failed") -} - -// Exec runs `query` -func (r *Repo) Exec(query sq.Sqlizer) (sql.Result, error) { - sql, args, err := r.build(query) - if err != nil { - return nil, err - } - return r.ExecRaw(sql, args...) -} - -// ExecAll runs all sql commands in `script` against `r` within a single -// transaction. -func (r *Repo) ExecAll(script string) error { - err := r.Begin() - if err != nil { - return err - } - - defer r.Rollback() - - for _, cmd := range sqlutils.AllStatements(script) { - _, err = r.ExecRaw(cmd) - if err != nil { - return err - } - } - - return r.Commit() -} - -// ExecRaw runs `query` with `args` -func (r *Repo) ExecRaw(query string, args ...interface{}) (sql.Result, error) { - query = r.conn().Rebind(query) - start := time.Now() - result, err := r.conn().Exec(query, args...) - r.log("exec", start, query, args) - - if err == nil { - return result, nil - } - - if r.NoRows(err) { - return nil, err - } - - return nil, errors.Wrap(err, "exec failed") -} - -// NoRows returns true if the provided error resulted from a query that found -// no results. -func (r *Repo) NoRows(err error) bool { - return err == sql.ErrNoRows -} - -// Query runs `query`, returns a *sqlx.Rows instance -func (r *Repo) Query(query sq.Sqlizer) (*sqlx.Rows, error) { - sql, args, err := r.build(query) - if err != nil { - return nil, err - } - return r.QueryRaw(sql, args...) -} - -// QueryRaw runs `query` with `args` -func (r *Repo) QueryRaw(query string, args ...interface{}) (*sqlx.Rows, error) { - query = r.conn().Rebind(query) - start := time.Now() - result, err := r.conn().Queryx(query, args...) - r.log("query", start, query, args) - - if err == nil { - return result, nil - } - - if r.NoRows(err) { - return nil, err - } - - return nil, errors.Wrap(err, "query failed") -} - -// Rollback rolls back the current transaction -func (r *Repo) Rollback() error { - if r.tx == nil { - return errors.New("not in transaction") - } - - err := r.tx.Rollback() - r.logRollback() - r.tx = nil - return err -} - -// Select runs `query`, setting the results found on `dest`. -func (r *Repo) Select(dest interface{}, query sq.Sqlizer) error { - sql, args, err := r.build(query) - if err != nil { - return err - } - return r.SelectRaw(dest, sql, args...) -} - -// SelectRaw runs `query` with `args`, setting the results found on `dest`. -func (r *Repo) SelectRaw( - dest interface{}, - query string, - args ...interface{}, -) error { - r.clearSliceIfPossible(dest) - query = r.conn().Rebind(query) - start := time.Now() - err := r.conn().Select(dest, query, args...) - r.log("select", start, query, args) - - if err == nil { - return nil - } - - if r.NoRows(err) { - return err - } - - return errors.Wrap(err, "select failed") -} - -// build converts the provided sql builder `b` into the sql and args to execute -// against the raw database connections. -func (r *Repo) build(b sq.Sqlizer) (sql string, args []interface{}, err error) { - sql, args, err = b.ToSql() - - if err != nil { - err = errors.Wrap(err, "to-sql failed") - } - return -} - -// clearSliceIfPossible is a utility function that clears a slice if the -// provided interface wraps one. In the event that `dest` is not a pointer to a -// slice this func will fail with a warning, this allowing the forthcoming db -// select fail more concretely due to an incompatible destination. -func (r *Repo) clearSliceIfPossible(dest interface{}) { - v := reflect.ValueOf(dest) - vt := v.Type() - - if vt.Kind() != reflect.Ptr { - log.Warn("cannot clear slice: dest is not pointer") - return - } - - if vt.Elem().Kind() != reflect.Slice { - log.Warn("cannot clear slice: dest is a pointer, but not to a slice") - return - } - - reflect.Indirect(v).SetLen(0) -} - -func (r *Repo) conn() Conn { - if r.tx != nil { - return r.tx - } - - return r.DB -} - -func (r *Repo) log(typ string, start time.Time, query string, args []interface{}) { - log. - Ctx(r.logCtx()). - WithField("args", args). - WithField("sql", query). - WithField("dur", time.Since(start).String()). - Debugf("sql: %s", typ) -} - -func (r *Repo) logBegin() { - log.Ctx(r.logCtx()).Debug("sql: begin") -} - -func (r *Repo) logCommit() { - log.Ctx(r.logCtx()).Debug("sql: commit") -} - -func (r *Repo) logRollback() { - log.Ctx(r.logCtx()).Debug("sql: rollback") -} - -func (r *Repo) logCtx() context.Context { - if r.Ctx != nil { - return r.Ctx - } - - return context.Background() -} diff --git a/support/db/repo_test.go b/support/db/repo_test.go deleted file mode 100644 index 81979066c2..0000000000 --- a/support/db/repo_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stellar/go/support/db/dbtest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRepo(t *testing.T) { - db := dbtest.Postgres().Load(testSchema) - defer db.Close() - - assert := assert.New(t) - require := require.New(t) - repo := &Repo{DB: db.Open()} - defer repo.DB.Close() - - var count int - err := repo.GetRaw(&count, "SELECT COUNT(*) FROM people") - assert.NoError(err) - assert.Equal(3, count) - - var names []string - err = repo.SelectRaw(&names, "SELECT name FROM people") - assert.NoError(err) - assert.Len(names, 3) - - ret, err := repo.ExecRaw("DELETE FROM people") - assert.NoError(err) - deleted, err := ret.RowsAffected() - assert.NoError(err) - assert.Equal(int64(3), deleted) - - // Test args - db.Load(testSchema) - var name string - err = repo.GetRaw( - &name, - "SELECT name FROM people WHERE hunger_level = ?", - 1000000, - ) - assert.NoError(err) - assert.Equal("scott", name) - - // Test NoRows - err = repo.GetRaw( - &name, - "SELECT name FROM people WHERE hunger_level = ?", - 1234, - ) - assert.True(repo.NoRows(err)) - - // Test transactions - db.Load(testSchema) - require.NoError(repo.Begin(), "begin failed") - err = repo.GetRaw(&count, "SELECT COUNT(*) FROM people") - assert.NoError(err) - assert.Equal(3, count) - _, err = repo.ExecRaw("DELETE FROM people") - assert.NoError(err) - err = repo.GetRaw(&count, "SELECT COUNT(*) FROM people") - assert.NoError(err) - assert.Equal(0, count, "people did not appear deleted inside transaction") - assert.NoError(repo.Rollback(), "rollback failed") - - // Ensure commit works - require.NoError(repo.Begin(), "begin failed") - repo.ExecRaw("DELETE FROM people") - assert.NoError(repo.Commit(), "commit failed") - err = repo.GetRaw(&count, "SELECT COUNT(*) FROM people") - assert.NoError(err) - assert.Equal(0, count) - - // ensure that selecting into a populated slice clears the slice first - db.Load(testSchema) - require.Len(names, 3, "ids slice was not preloaded with data") - err = repo.SelectRaw(&names, "SELECT name FROM people limit 2") - assert.NoError(err) - assert.Len(names, 2) - -} - -const testSchema = ` -CREATE TABLE IF NOT EXISTS people ( - name character varying NOT NULL, - hunger_level integer NOT NULL -); -DELETE FROM people; -INSERT INTO people (name, hunger_level) VALUES ('scott', 1000000); -INSERT INTO people (name, hunger_level) VALUES ('jed', 10); -INSERT INTO people (name, hunger_level) VALUES ('bartek', 10); -` diff --git a/support/db/round_trip_probe.go b/support/db/round_trip_probe.go new file mode 100644 index 0000000000..180702bd2e --- /dev/null +++ b/support/db/round_trip_probe.go @@ -0,0 +1,51 @@ +package db + +import ( + "context" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +type roundTripProbe struct { + session SessionInterface + roundTripTimeSummary prometheus.Summary + + closeChan chan struct{} + closeOnce sync.Once +} + +func (p *roundTripProbe) start() { + p.closeChan = make(chan struct{}) + // session must be cloned because will be used concurrently in a + // separate go routine in roundTripProbe + p.session = p.session.Clone() + ticker := time.NewTicker(time.Second) + + go func() { + for { + select { + case <-ticker.C: + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + startTime := time.Now() + _, err := p.session.ExecRaw(ctx, "select 1") + duration := time.Since(startTime).Seconds() + if err != nil { + duration = 1 + } + p.roundTripTimeSummary.Observe(duration) + cancel() + case <-p.closeChan: + ticker.Stop() + return + } + } + }() +} + +func (p *roundTripProbe) close() { + p.closeOnce.Do(func() { + close(p.closeChan) + }) +} diff --git a/support/db/schema/main.go b/support/db/schema/main.go new file mode 100644 index 0000000000..8eff0ff8f4 --- /dev/null +++ b/support/db/schema/main.go @@ -0,0 +1,62 @@ +package schema + +import ( + "context" + "database/sql" + "errors" + + migrate "github.com/rubenv/sql-migrate" + "github.com/stellar/go/support/db" +) + +// MigrateDir represents a direction in which to perform schema migrations. +type MigrateDir string + +const ( + // MigrateUp causes migrations to be run in the "up" direction. + MigrateUp MigrateDir = "up" + // MigrateDown causes migrations to be run in the "down" direction. + MigrateDown MigrateDir = "down" + // MigrateRedo causes migrations to be run down, then up + MigrateRedo MigrateDir = "redo" +) + +// Init installs the latest schema into db after clearing it first +func Init(ctx context.Context, db *db.Session, latest []byte) error { + return db.ExecAll(ctx, string(latest)) +} + +// Migrate performs schema migration. Migrations can occur in one of three +// ways: +// +// - up: migrations are performed from the currently installed version upwards. +// If count is 0, all unapplied migrations will be run. +// +// - down: migrations are performed from the current version downard. If count +// is 0, all applied migrations will be run in a downard direction. +// +// - redo: migrations are first ran downard `count` times, and then are rand +// upward back to the current version at the start of the process. If count is +// 0, a count of 1 will be assumed. +func Migrate(db *sql.DB, migrations migrate.MigrationSource, dir MigrateDir, count int) (int, error) { + switch dir { + case MigrateUp: + return migrate.ExecMax(db, "postgres", migrations, migrate.Up, count) + case MigrateDown: + return migrate.ExecMax(db, "postgres", migrations, migrate.Down, count) + case MigrateRedo: + + if count == 0 { + count = 1 + } + + down, err := migrate.ExecMax(db, "postgres", migrations, migrate.Down, count) + if err != nil { + return down, err + } + + return migrate.ExecMax(db, "postgres", migrations, migrate.Up, down) + default: + return 0, errors.New("Invalid migration direction") + } +} diff --git a/support/db/select_builder.go b/support/db/select_builder.go new file mode 100644 index 0000000000..968c4b58c2 --- /dev/null +++ b/support/db/select_builder.go @@ -0,0 +1,71 @@ +package db + +import ( + "context" + "github.com/stellar/go/support/errors" +) + +// Exec executes the query represented by the builder, populating the +// destination with the results returned by running the query against the +// current database session. +func (sb *SelectBuilder) Exec(ctx context.Context) error { + err := sb.Table.Session.Select(ctx, sb.dest, sb.sql) + if err != nil { + return errors.Wrap(err, "select failed") + } + + return nil +} + +// Limit is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Limit +func (sb *SelectBuilder) Limit(limit uint64) *SelectBuilder { + sb.sql = sb.sql.Limit(limit) + return sb +} + +// Offset is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Offset +func (sb *SelectBuilder) Offset(offset uint64) *SelectBuilder { + sb.sql = sb.sql.Offset(offset) + return sb +} + +// OrderBy is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.OrderBy +func (sb *SelectBuilder) OrderBy( + orderBys ...string, +) *SelectBuilder { + sb.sql = sb.sql.OrderBy(orderBys...) + return sb +} + +// Prefix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Prefix +func (sb *SelectBuilder) Prefix( + sql string, + args ...interface{}, +) *SelectBuilder { + sb.sql = sb.sql.Prefix(sql, args...) + return sb +} + +// Suffix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Suffix +func (sb *SelectBuilder) Suffix( + sql string, + args ...interface{}, +) *SelectBuilder { + sb.sql = sb.sql.Suffix(sql, args...) + return sb +} + +// Where is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#SelectBuilder.Where +func (sb *SelectBuilder) Where( + pred interface{}, + args ...interface{}, +) *SelectBuilder { + sb.sql = sb.sql.Where(pred, args...) + return sb +} diff --git a/support/db/select_builder_test.go b/support/db/select_builder_test.go new file mode 100644 index 0000000000..93cc888aeb --- /dev/null +++ b/support/db/select_builder_test.go @@ -0,0 +1,41 @@ +package db + +import ( + "context" + "testing" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSelectBuilder_Exec(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + var results []person + + tbl := sess.GetTable("people") + sb := tbl.Select(&results, "name = ?", "scott") + sql, args, err := sb.sql.ToSql() + require.NoError(t, err) + + assert.Contains(t, sql, "name") + assert.Contains(t, sql, "hunger_level") + assert.NotContains(t, sql, "-") + + if assert.Len(t, args, 1) { + assert.Equal(t, "scott", args[0]) + } + + err = sb.Exec(context.Background()) + + if assert.NoError(t, err, "query error") { + if assert.Len(t, results, 1) { + assert.Equal(t, "scott", results[0].Name) + assert.Equal(t, "1000000", results[0].HungerLevel) + } + } +} diff --git a/support/db/session.go b/support/db/session.go new file mode 100644 index 0000000000..4bc0218f90 --- /dev/null +++ b/support/db/session.go @@ -0,0 +1,406 @@ +package db + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "strings" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/jmoiron/sqlx" + "github.com/stellar/go/support/db/sqlutils" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +// Begin binds this session to a new transaction. +func (s *Session) Begin() error { + if s.tx != nil { + return errors.New("already in transaction") + } + + tx, err := s.DB.BeginTxx(context.Background(), nil) + if err != nil { + if knownErr := s.replaceWithKnownError(err, context.Background()); knownErr != nil { + return knownErr + } + + return errors.Wrap(err, "beginx failed") + } + log.Debug("sql: begin") + s.tx = tx + s.txOptions = nil + return nil +} + +// BeginTx binds this session to a new transaction which is configured with the +// given transaction options +func (s *Session) BeginTx(opts *sql.TxOptions) error { + if s.tx != nil { + return errors.New("already in transaction") + } + + tx, err := s.DB.BeginTxx(context.Background(), opts) + if err != nil { + if knownErr := s.replaceWithKnownError(err, context.Background()); knownErr != nil { + return knownErr + } + + return errors.Wrap(err, "beginTx failed") + } + log.Debug("sql: begin") + + s.tx = tx + s.txOptions = opts + return nil +} + +func (s *Session) GetTx() *sqlx.Tx { + return s.tx +} + +func (s *Session) GetTxOptions() *sql.TxOptions { + return s.txOptions +} + +// Clone clones the receiver, returning a new instance backed by the same +// context and db. The result will not be bound to any transaction that the +// source is currently within. +func (s *Session) Clone() SessionInterface { + return &Session{ + DB: s.DB, + } +} + +// Close delegates to the underlying database Close method, closing the database +// and releasing any resources. It is rare to Close a DB, as the DB handle is meant +// to be long-lived and shared between many goroutines. +func (s *Session) Close() error { + return s.DB.Close() +} + +// Commit commits the current transaction +func (s *Session) Commit() error { + if s.tx == nil { + return errors.New("not in transaction") + } + + err := s.tx.Commit() + log.Debug("sql: commit") + s.tx = nil + s.txOptions = nil + return err +} + +// Dialect returns the SQL dialect that this session is configured to use +func (s *Session) Dialect() string { + return s.DB.DriverName() +} + +// DeleteRange deletes a range of rows from a sql table between `start` and +// `end` (exclusive). +func (s *Session) DeleteRange( + ctx context.Context, + start, end int64, + table string, + idCol string, +) error { + del := sq.Delete(table).Where( + fmt.Sprintf("%s >= ? AND %s < ?", idCol, idCol), + start, + end, + ) + _, err := s.Exec(ctx, del) + return err +} + +// Get runs `query`, setting the first result found on `dest`, if +// any. +func (s *Session) Get(ctx context.Context, dest interface{}, query sq.Sqlizer) error { + sql, args, err := s.build(query) + if err != nil { + return err + } + return s.GetRaw(ctx, dest, sql, args...) +} + +// GetRaw runs `query` with `args`, setting the first result found on +// `dest`, if any. +func (s *Session) GetRaw(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + query, err := s.ReplacePlaceholders(query) + if err != nil { + return errors.Wrap(err, "replace placeholders failed") + } + + start := time.Now() + err = s.conn().GetContext(ctx, dest, query, args...) + s.log(ctx, "get", start, query, args) + + if err == nil { + return nil + } + + if knownErr := s.replaceWithKnownError(err, ctx); knownErr != nil { + return knownErr + } + + if s.NoRows(err) { + return err + } + + return errors.Wrap(err, "get failed") +} + +// GetTable translates the provided struct into a Table, +func (s *Session) GetTable(name string) *Table { + return &Table{ + Name: name, + Session: s, + } +} + +func (s *Session) TruncateTables(ctx context.Context, tables []string) error { + truncateCmd := fmt.Sprintf("truncate %s restart identity cascade", strings.Join(tables[:], ",")) + _, err := s.ExecRaw(ctx, truncateCmd) + return err +} + +// Exec runs `query` +func (s *Session) Exec(ctx context.Context, query sq.Sqlizer) (sql.Result, error) { + sql, args, err := s.build(query) + if err != nil { + return nil, err + } + return s.ExecRaw(ctx, sql, args...) +} + +// ExecAll runs all sql commands in `script` against `r` within a single +// transaction. +func (s *Session) ExecAll(ctx context.Context, script string) error { + err := s.Begin() + if err != nil { + return err + } + + defer s.Rollback() + + for _, cmd := range sqlutils.AllStatements(script) { + _, err = s.ExecRaw(ctx, cmd) + if err != nil { + return err + } + } + + return s.Commit() +} + +// ExecRaw runs `query` with `args` +func (s *Session) ExecRaw(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + query, err := s.ReplacePlaceholders(query) + if err != nil { + return nil, errors.Wrap(err, "replace placeholders failed") + } + + start := time.Now() + result, err := s.conn().ExecContext(ctx, query, args...) + s.log(ctx, "exec", start, query, args) + + if err == nil { + return result, nil + } + + if knownErr := s.replaceWithKnownError(err, ctx); knownErr != nil { + return nil, knownErr + } + + if s.NoRows(err) { + return nil, err + } + + return nil, errors.Wrap(err, "exec failed") +} + +// NoRows returns true if the provided error resulted from a query that found +// no results. +func (s *Session) NoRows(err error) bool { + return err == sql.ErrNoRows +} + +// replaceWithKnownError tries to replace Postgres error with package error. +// Returns a new error if the err is known. +func (s *Session) replaceWithKnownError(err error, ctx context.Context) error { + switch { + case ctx.Err() == context.Canceled: + return ErrCancelled + case ctx.Err() == context.DeadlineExceeded: + // if libpq waits too long to obtain conn from pool, can get ctx timeout before server trip + return ErrTimeout + case strings.Contains(err.Error(), "pq: canceling statement due to user request"): + return ErrTimeout + case strings.Contains(err.Error(), "pq: canceling statement due to conflict with recovery"): + return ErrConflictWithRecovery + case strings.Contains(err.Error(), "driver: bad connection"): + return ErrBadConnection + default: + return nil + } +} + +// Query runs `query`, returns a *sqlx.Rows instance +func (s *Session) Query(ctx context.Context, query sq.Sqlizer) (*sqlx.Rows, error) { + sql, args, err := s.build(query) + if err != nil { + return nil, err + } + return s.QueryRaw(ctx, sql, args...) +} + +// QueryRaw runs `query` with `args` +func (s *Session) QueryRaw(ctx context.Context, query string, args ...interface{}) (*sqlx.Rows, error) { + query, err := s.ReplacePlaceholders(query) + if err != nil { + return nil, errors.Wrap(err, "replace placeholders failed") + } + + start := time.Now() + result, err := s.conn().QueryxContext(ctx, query, args...) + s.log(ctx, "query", start, query, args) + + if err == nil { + return result, nil + } + + if knownErr := s.replaceWithKnownError(err, ctx); knownErr != nil { + return nil, knownErr + } + + if s.NoRows(err) { + return nil, err + } + + return nil, errors.Wrap(err, "query failed") +} + +// ReplacePlaceholders replaces the '?' parameter placeholders in the provided +// sql query with a sql dialect appropriate version. Use '??' to escape a +// placeholder. +func (s *Session) ReplacePlaceholders(query string) (string, error) { + var format sq.PlaceholderFormat = sq.Question + + if s.DB.DriverName() == "postgres" { + format = sq.Dollar + } + return format.ReplacePlaceholders(query) +} + +// Rollback rolls back the current transaction +func (s *Session) Rollback() error { + if s.tx == nil { + return errors.New("not in transaction") + } + + err := s.tx.Rollback() + log.Debug("sql: rollback") + s.tx = nil + s.txOptions = nil + return err +} + +// Ping verifies a connection to the database is still alive, +// establishing a connection if necessary. +func (s *Session) Ping(ctx context.Context, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + return s.DB.PingContext(ctx) +} + +// Select runs `query`, setting the results found on `dest`. +func (s *Session) Select(ctx context.Context, dest interface{}, query sq.Sqlizer) error { + sql, args, err := s.build(query) + if err != nil { + return err + } + return s.SelectRaw(ctx, dest, sql, args...) +} + +// SelectRaw runs `query` with `args`, setting the results found on `dest`. +func (s *Session) SelectRaw( + ctx context.Context, + dest interface{}, + query string, + args ...interface{}, +) error { + s.clearSliceIfPossible(dest) + query, err := s.ReplacePlaceholders(query) + if err != nil { + return errors.Wrap(err, "replace placeholders failed") + } + + start := time.Now() + err = s.conn().SelectContext(ctx, dest, query, args...) + s.log(ctx, "select", start, query, args) + + if err == nil { + return nil + } + + if knownErr := s.replaceWithKnownError(err, ctx); knownErr != nil { + return knownErr + } + + if s.NoRows(err) { + return err + } + + return errors.Wrap(err, "select failed") +} + +// build converts the provided sql builder `b` into the sql and args to execute +// against the raw database connections. +func (s *Session) build(b sq.Sqlizer) (sql string, args []interface{}, err error) { + sql, args, err = b.ToSql() + + if err != nil { + err = errors.Wrap(err, "to-sql failed") + } + return +} + +// clearSliceIfPossible is a utility function that clears a slice if the +// provided interface wraps one. In the event that `dest` is not a pointer to a +// slice this func will fail with a warning, this allowing the forthcoming db +// select fail more concretely due to an incompatible destination. +func (s *Session) clearSliceIfPossible(dest interface{}) { + v := reflect.ValueOf(dest) + vt := v.Type() + + if vt.Kind() != reflect.Ptr { + log.Warn("cannot clear slice: dest is not pointer") + return + } + + if vt.Elem().Kind() != reflect.Slice { + log.Warn("cannot clear slice: dest is a pointer, but not to a slice") + return + } + + reflect.Indirect(v).SetLen(0) +} + +func (s *Session) conn() Conn { + if s.tx != nil { + return s.tx + } + + return s.DB +} + +func (s *Session) log(ctx context.Context, typ string, start time.Time, query string, args []interface{}) { + log. + WithField("args", args). + WithField("sql", query). + WithField("dur", time.Since(start).String()). + Debugf("sql: %s", typ) +} diff --git a/support/db/session_test.go b/support/db/session_test.go new file mode 100644 index 0000000000..742167bee0 --- /dev/null +++ b/support/db/session_test.go @@ -0,0 +1,131 @@ +package db + +import ( + "context" + "testing" + "time" + + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestServerTimeout(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + + var cancel context.CancelFunc + ctx := context.Background() + ctx, cancel = context.WithTimeout(ctx, time.Duration(1)) + assert := assert.New(t) + + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + defer cancel() + + var count int + err := sess.GetRaw(ctx, &count, "SELECT pg_sleep(2), COUNT(*) FROM people") + assert.ErrorIs(err, ErrTimeout, "long running db server operation past context timeout, should return timeout") +} + +func TestUserCancel(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + + var cancel context.CancelFunc + ctx := context.Background() + ctx, cancel = context.WithCancel(ctx) + assert := assert.New(t) + + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + defer cancel() + + var count int + cancel() + err := sess.GetRaw(ctx, &count, "SELECT pg_sleep(2), COUNT(*) FROM people") + assert.ErrorIs(err, ErrCancelled, "any ongoing db server operation should return error immediately after user cancel") +} + +func TestSession(t *testing.T) { + db := dbtest.Postgres(t).Load(testSchema) + defer db.Close() + + ctx := context.Background() + assert := assert.New(t) + require := require.New(t) + sess := &Session{DB: db.Open()} + defer sess.DB.Close() + + assert.Equal("postgres", sess.Dialect()) + + var count int + err := sess.GetRaw(ctx, &count, "SELECT COUNT(*) FROM people") + assert.NoError(err) + assert.Equal(3, count) + + var names []string + err = sess.SelectRaw(ctx, &names, "SELECT name FROM people") + assert.NoError(err) + assert.Len(names, 3) + + ret, err := sess.ExecRaw(ctx, "DELETE FROM people") + assert.NoError(err) + deleted, err := ret.RowsAffected() + assert.NoError(err) + assert.Equal(int64(3), deleted) + + // Test args (NOTE: there is a simple escaped arg to ensure no error is raised + // during execution) + db.Load(testSchema) + var name string + err = sess.GetRaw(ctx, + &name, + "SELECT name FROM people WHERE hunger_level = ? AND name != '??'", + 1000000, + ) + assert.NoError(err) + assert.Equal("scott", name) + + // Test NoRows + err = sess.GetRaw(ctx, + &name, + "SELECT name FROM people WHERE hunger_level = ?", + 1234, + ) + assert.True(sess.NoRows(err)) + + // Test transactions + db.Load(testSchema) + require.NoError(sess.Begin(), "begin failed") + err = sess.GetRaw(ctx, &count, "SELECT COUNT(*) FROM people") + assert.NoError(err) + assert.Equal(3, count) + _, err = sess.ExecRaw(ctx, "DELETE FROM people") + assert.NoError(err) + err = sess.GetRaw(ctx, &count, "SELECT COUNT(*) FROM people") + assert.NoError(err) + assert.Equal(0, count, "people did not appear deleted inside transaction") + assert.NoError(sess.Rollback(), "rollback failed") + + // Ensure commit works + require.NoError(sess.Begin(), "begin failed") + sess.ExecRaw(ctx, "DELETE FROM people") + assert.NoError(sess.Commit(), "commit failed") + err = sess.GetRaw(ctx, &count, "SELECT COUNT(*) FROM people") + assert.NoError(err) + assert.Equal(0, count) + + // ensure that selecting into a populated slice clears the slice first + db.Load(testSchema) + require.Len(names, 3, "ids slice was not preloaded with data") + err = sess.SelectRaw(ctx, &names, "SELECT name FROM people limit 2") + assert.NoError(err) + assert.Len(names, 2) + + // Test ReplacePlaceholders + out, err := sess.ReplacePlaceholders("? = ? = ? = ??") + if assert.NoError(err) { + assert.Equal("$1 = $2 = $3 = ?", out) + } +} diff --git a/support/db/sqlite.go b/support/db/sqlite.go deleted file mode 100644 index 6f596758a2..0000000000 --- a/support/db/sqlite.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build cgo - -package db - -import ( - _ "github.com/mattn/go-sqlite3" -) - -// This file simply includes the sqlite3 driver when in a cgo environment, -// enabling it for use when using the db package -var _ = 0 diff --git a/support/db/table.go b/support/db/table.go new file mode 100644 index 0000000000..8327bef0e9 --- /dev/null +++ b/support/db/table.go @@ -0,0 +1,88 @@ +package db + +import sq "github.com/Masterminds/squirrel" + +// Delete returns a new query builder configured to delete rows from the table. +// +func (tbl *Table) Delete( + pred interface{}, + args ...interface{}, +) *DeleteBuilder { + return &DeleteBuilder{ + Table: tbl, + sql: sq.Delete(tbl.Name).Where(pred, args...), + } +} + +// Get returns a new query builder configured to select into the provided +// `dest`. +// +// Get behaves the same was as Select, but automatically limits the query +// generated to a single value and only populates a single struct. +func (tbl *Table) Get( + dest interface{}, + pred interface{}, + args ...interface{}, +) *GetBuilder { + + cols := ColumnsForStruct(dest) + sql := sq.Select(cols...).From(tbl.Name).Where(pred, args...).Limit(1) + + return &GetBuilder{ + Table: tbl, + dest: dest, + sql: sql, + } +} + +// Insert returns a new query builder configured to insert structs into the +// table. +// +// Insert takes one or more struct (or pointer to struct) values, each of which +// represents a single row to be created in the table. The first value provided +// in a call to this function will operate as the template for the insert and +// will determine what columns are populated in the query. For this reason, it +// is highly recommmended that you always use the same struct type for any +// single call this function. +// +// An InsertBuilder uses the "db" struct tag to determine the column names that +// a given struct should be mapped to, and by default the unmofdified name of +// the field will be used. Similar to other struct tags, the value "-" will +// cause the field to be skipped. +// +// NOTE: using the omitempty option, such as used with json struct tags, is not +// supported. +func (tbl *Table) Insert(rows ...interface{}) *InsertBuilder { + return &InsertBuilder{ + Table: tbl, + sql: sq.Insert(tbl.Name), + rows: rows, + } +} + +// Select returns a new query builder configured to select into the provided +// `dest`. +func (tbl *Table) Select( + dest interface{}, + pred interface{}, + args ...interface{}, +) *SelectBuilder { + + cols := ColumnsForStruct(dest) + sql := sq.Select(cols...).From(tbl.Name).Where(pred, args...) + + return &SelectBuilder{ + Table: tbl, + dest: dest, + sql: sql, + } +} + +// Update returns a new query builder configured to update the table. +// See docs for `UpdateBuilderExec` for more documentation. +func (tbl *Table) Update() *UpdateBuilder { + return &UpdateBuilder{ + Table: tbl, + sql: sq.Update(tbl.Name), + } +} diff --git a/support/db/update_builder.go b/support/db/update_builder.go new file mode 100644 index 0000000000..f0ea779dfc --- /dev/null +++ b/support/db/update_builder.go @@ -0,0 +1,109 @@ +package db + +import ( + "context" + "database/sql" + "reflect" + + "github.com/stellar/go/support/errors" +) + +// Exec executes the query that has been previously configured on the +// UpdateBuilder. +func (ub *UpdateBuilder) Exec(ctx context.Context) (sql.Result, error) { + r, err := ub.Table.Session.Exec(ctx, ub.sql) + if err != nil { + return nil, errors.Wrap(err, "select failed") + } + + return r, nil +} + +// Limit is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Limit +func (ub *UpdateBuilder) Limit(limit uint64) *UpdateBuilder { + ub.sql = ub.sql.Limit(limit) + return ub +} + +// Offset is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Offset +func (ub *UpdateBuilder) Offset(offset uint64) *UpdateBuilder { + ub.sql = ub.sql.Offset(offset) + return ub +} + +// OrderBy is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.OrderBy +func (ub *UpdateBuilder) OrderBy( + orderBys ...string, +) *UpdateBuilder { + ub.sql = ub.sql.OrderBy(orderBys...) + return ub +} + +// Prefix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Prefix +func (ub *UpdateBuilder) Prefix( + sql string, + args ...interface{}, +) *UpdateBuilder { + ub.sql = ub.sql.Prefix(sql, args...) + return ub +} + +// Set is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Set +func (ub *UpdateBuilder) Set(column string, value interface{}) *UpdateBuilder { + ub.sql = ub.sql.Set(column, value) + return ub +} + +// SetMap is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.SetMap +func (ub *UpdateBuilder) SetMap(clauses map[string]interface{}) *UpdateBuilder { + ub.sql = ub.sql.SetMap(clauses) + return ub +} + +// SetStruct is using `db` tag on the struct and updates the query with struct +// values for each field (except `ignored` fields). +func (ub *UpdateBuilder) SetStruct(s interface{}, ignored []string) *UpdateBuilder { + ignoredMap := map[string]bool{} + for _, ig := range ignored { + ignoredMap[ig] = true + } + + cols := ColumnsForStruct(s) + row := reflect.ValueOf(s) + rvals := mapper.FieldsByName(row, cols) + + for i, col := range cols { + if ignoredMap[col] { + continue + } + ub.Set(col, rvals[i].Interface()) + } + + return ub +} + +// Suffix is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Suffix +func (ub *UpdateBuilder) Suffix( + sql string, + args ...interface{}, +) *UpdateBuilder { + ub.sql = ub.sql.Suffix(sql, args...) + return ub +} + +// Where is a passthrough call to the squirrel. See +// https://godoc.org/github.com/Masterminds/squirrel#UpdateBuilder.Where +func (ub *UpdateBuilder) Where( + pred interface{}, + args ...interface{}, +) *UpdateBuilder { + ub.sql = ub.sql.Where(pred, args...) + return ub +} diff --git a/support/env/env.go b/support/env/env.go new file mode 100644 index 0000000000..010dca70f5 --- /dev/null +++ b/support/env/env.go @@ -0,0 +1,47 @@ +package env + +import ( + "os" + "strconv" + "time" + + "github.com/stellar/go/support/errors" +) + +// String returns the value of the environment variable "name". +// If name is not set, it returns value. +func String(name string, value string) string { + if s := os.Getenv(name); s != "" { + value = s + } + return value +} + +// Int returns the value of the environment variable "name" as an int. +// If name is not set, it returns value. +func Int(name string, value int) int { + if s := os.Getenv(name); s != "" { + var err error + value, err = strconv.Atoi(s) + if err != nil { + panic(errors.Wrapf(err, "env var %q cannot be parsed as int", name)) + } + } + return value +} + +// Duration returns the value of the environment variable "name" as a +// time.Duration where the value of the environment variable is parsed as a +// duration string as defined in the Go stdlib time documentation. e.g. 5m30s. +// If name is not set, it returns value. +// Ref: https://golang.org/pkg/time/#ParseDuration +func Duration(name string, value time.Duration) time.Duration { + if s := os.Getenv(name); s != "" { + var err error + value, err = time.ParseDuration(s) + if err != nil { + panic(errors.Wrapf(err, "env var %q cannot be parsed as time.Duration", name)) + } + } + return value +} diff --git a/support/env/env_test.go b/support/env/env_test.go new file mode 100644 index 0000000000..e2461cfd34 --- /dev/null +++ b/support/env/env_test.go @@ -0,0 +1,119 @@ +package env_test + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "os" + "testing" + "time" + + "github.com/stellar/go/support/env" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// randomEnvName generates a random name for an environment variable. Calls +// t.Fatal if sufficient randomness is unavailable. +func randomEnvName(t *testing.T) string { + raw := make([]byte, 5) + _, err := rand.Read(raw) + require.NoError(t, err) + return t.Name() + "_" + hex.EncodeToString(raw) +} + +// TestString_set tests that env.String will return the value of the +// environment variable when the environment variable is set. +func TestString_set(t *testing.T) { + envVar := randomEnvName(t) + err := os.Setenv(envVar, "value") + require.NoError(t, err) + defer os.Unsetenv(envVar) + + value := env.String(envVar, "default") + assert.Equal(t, "value", value) +} + +// TestString_set tests that env.String will return the default value given +// when the environment variable is not set. +func TestString_notSet(t *testing.T) { + envVar := randomEnvName(t) + value := env.String(envVar, "default") + assert.Equal(t, "default", value) +} + +// TestInt_set tests that env.Int will return the value of the environment +// variable as an int when the environment variable is set. +func TestInt_set(t *testing.T) { + envVar := randomEnvName(t) + err := os.Setenv(envVar, "12345") + require.NoError(t, err) + defer os.Unsetenv(envVar) + + value := env.Int(envVar, 67890) + assert.Equal(t, 12345, value) +} + +// TestInt_set tests that env.Int will return the default value given when the +// environment variable is not set. +func TestInt_notSet(t *testing.T) { + envVar := randomEnvName(t) + value := env.Int(envVar, 67890) + assert.Equal(t, 67890, value) +} + +// TestInt_setInvalid tests that env.Int will panic if the set value cannot be +// parsed as an integer. +func TestInt_setInvalid(t *testing.T) { + envVar := randomEnvName(t) + err := os.Setenv(envVar, "1a345") + require.NoError(t, err) + defer os.Unsetenv(envVar) + + wantPanic := errors.New(`env var "` + envVar + `" cannot be parsed as int: strconv.Atoi: parsing "1a345": invalid syntax`) + defer func() { + r := recover() + assert.Error(t, wantPanic, r) + }() + env.Int(envVar, 67890) +} + +// TestDuration_set tests that env.Duration will return the value of the +// environment variable as a time.Duration when the environment variable is +// set to a duration string. +func TestDuration_set(t *testing.T) { + envVar := randomEnvName(t) + err := os.Setenv(envVar, "5m30s") + require.NoError(t, err) + defer os.Unsetenv(envVar) + + setValue := 5*time.Minute + 30*time.Second + defaultValue := 2 * time.Minute + value := env.Duration(envVar, defaultValue) + assert.Equal(t, setValue, value) +} + +// TestDuration_set tests that env.Duration will return the default value given +// when the environment variable is not set. +func TestDuration_notSet(t *testing.T) { + envVar := randomEnvName(t) + defaultValue := 5*time.Minute + 30*time.Second + value := env.Duration(envVar, defaultValue) + assert.Equal(t, defaultValue, value) +} + +// TestDuration_setInvalid tests that env.Duration will panic if the value set +// cannot be parsed as a duration. +func TestDuration_setInvalid(t *testing.T) { + envVar := randomEnvName(t) + err := os.Setenv(envVar, "5q30s") + require.NoError(t, err) + defer os.Unsetenv(envVar) + + wantPanic := errors.New(`env var "` + envVar + `" cannot be parsed as time.Duration: time: unknown unit q in duration 5q30s`) + defer func() { + r := recover() + assert.Error(t, wantPanic, r) + }() + env.Duration(envVar, 5*time.Minute+30*time.Second) +} diff --git a/support/errors/main.go b/support/errors/main.go index 759f7f08e8..91d0ae3f54 100644 --- a/support/errors/main.go +++ b/support/errors/main.go @@ -8,9 +8,6 @@ package errors import ( - "net/http" - - "github.com/getsentry/raven-go" "github.com/pkg/errors" ) @@ -39,24 +36,6 @@ func New(message string) error { return errors.New(message) } -// ReportToSentry reports err to the configured sentry server. Optionally, -// specifying a non-nil `r` will include information in the report about the -// current http request. -func ReportToSentry(err error, r *http.Request) { - st := raven.NewStacktrace(4, 3, []string{"github.org/stellar"}) - exc := raven.NewException(err, st) - - var packet *raven.Packet - if r != nil { - h := raven.NewHttp(r) - packet = raven.NewPacket(err.Error(), exc, h) - } else { - packet = raven.NewPacket(err.Error(), exc) - } - - raven.Capture(packet, nil) -} - // Wrap returns an error annotating err with message. If err is nil, Wrap // returns nil. See https://godoc.org/github.com/pkg/errors#Wrap for more // details. diff --git a/support/http/fs.go b/support/http/fs.go new file mode 100644 index 0000000000..864ac353af --- /dev/null +++ b/support/http/fs.go @@ -0,0 +1,87 @@ +package http + +import ( + "bytes" + "io/ioutil" + "net/http" + "path/filepath" + "sort" +) + +// EqualFileSystems traverses two http.FileSystem instances and returns true +// if they are equal +func EqualFileSystems(fs, otherFS http.FileSystem, currentPath string) bool { + file, err := fs.Open(currentPath) + if err != nil { + return false + } + + otherFile, err := otherFS.Open(currentPath) + if err != nil { + return false + } + + stat, err := file.Stat() + if err != nil { + return false + } + + otherStat, err := otherFile.Stat() + if err != nil { + return false + } + + if stat.IsDir() != otherStat.IsDir() { + return false + } + + if !stat.IsDir() { + var fileBytes, otherFileBytes []byte + fileBytes, err = ioutil.ReadAll(file) + if err != nil { + return false + } + + otherFileBytes, err = ioutil.ReadAll(otherFile) + if err != nil { + return false + } + + return bytes.Equal(fileBytes, otherFileBytes) + } + + files, err := file.Readdir(0) + if err != nil { + return false + } + + otherFiles, err := otherFile.Readdir(0) + if err != nil { + return false + } + + if len(files) != len(otherFiles) { + return false + } + + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + sort.Slice(otherFiles, func(i, j int) bool { + return otherFiles[i].Name() < otherFiles[j].Name() + }) + + for i := range files { + if files[i].Name() != otherFiles[i].Name() { + return false + } + + nextPath := filepath.Join(currentPath, files[i].Name()) + + if !EqualFileSystems(fs, otherFS, nextPath) { + return false + } + } + + return true +} diff --git a/support/http/headers_middleware.go b/support/http/headers_middleware.go new file mode 100644 index 0000000000..10ad203cef --- /dev/null +++ b/support/http/headers_middleware.go @@ -0,0 +1,28 @@ +package http + +import ( + stdhttp "net/http" + "strings" +) + +// HeadersMiddleware sends headers +func HeadersMiddleware(headers stdhttp.Header, ignoredPrefixes ...string) func(next stdhttp.Handler) stdhttp.Handler { + return func(next stdhttp.Handler) stdhttp.Handler { + fn := func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + // Do not change ignored prefixes + for _, prefix := range ignoredPrefixes { + if strings.HasPrefix(r.URL.Path, prefix) { + next.ServeHTTP(w, r) + return + } + } + + // headers.Write(w) + for key := range headers { + w.Header().Set(key, headers.Get(key)) + } + next.ServeHTTP(w, r) + } + return stdhttp.HandlerFunc(fn) + } +} diff --git a/support/http/httpauthz/httpauthz.go b/support/http/httpauthz/httpauthz.go new file mode 100644 index 0000000000..a48002b717 --- /dev/null +++ b/support/http/httpauthz/httpauthz.go @@ -0,0 +1,24 @@ +// Package httpauthz contains helper functions for +// parsing the 'Authorization' header in HTTP requests. +package httpauthz + +import "strings" + +// ParseBearerToken parses a bearer token's value from a HTTP Authorization +// header. If the prefix of the authorization header value is 'Bearer ' (case +// ignored) the rest of the value that follows the prefix is returned, +// otherwise an empty string is returned. +func ParseBearerToken(authorizationHeaderValue string) string { + const prefix = "Bearer " + if hasPrefixFold(authorizationHeaderValue, prefix) { + return authorizationHeaderValue[len(prefix):] + } + return "" +} + +func hasPrefixFold(s string, prefix string) bool { + if len(s) < len(prefix) { + return false + } + return strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/support/http/httpauthz/httpauthz_test.go b/support/http/httpauthz/httpauthz_test.go new file mode 100644 index 0000000000..cf1dc301fd --- /dev/null +++ b/support/http/httpauthz/httpauthz_test.go @@ -0,0 +1,37 @@ +package httpauthz + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseBearerToken(t *testing.T) { + testCases := []struct { + AuthorizationHeaderValue string + WantToken string + }{ + // An empty header results in no token. + {"", ""}, + + // Other schemes are ignored. + {"Basic dXNlcm5hbWU6cGFzc3dvcmQ=", ""}, + + // Bearer is the classic example. + {"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEyMzM3NDk1Mzh9.eyJpYXQiOjEyMzM3NDk1Mzh9", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEyMzM3NDk1Mzh9.eyJpYXQiOjEyMzM3NDk1Mzh9"}, + + // Bearer may be any case. + {"bEARER eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEyMzM3NDk1Mzh9.eyJpYXQiOjEyMzM3NDk1Mzh9", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEyMzM3NDk1Mzh9.eyJpYXQiOjEyMzM3NDk1Mzh9"}, + + // Bearer is required. + {"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpYXQiOjEyMzM3NDk1Mzh9.eyJpYXQiOjEyMzM3NDk1Mzh9", ""}, + } + + for i, tc := range testCases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + token := ParseBearerToken(tc.AuthorizationHeaderValue) + assert.Equal(t, tc.WantToken, token) + }) + } +} diff --git a/support/http/httpdecode/httpdecode.go b/support/http/httpdecode/httpdecode.go new file mode 100644 index 0000000000..df27dafcf3 --- /dev/null +++ b/support/http/httpdecode/httpdecode.go @@ -0,0 +1,130 @@ +package httpdecode + +import ( + "encoding/json" + "mime" + "net/http" + + "github.com/go-chi/chi" + "github.com/gorilla/schema" + "github.com/stellar/go/support/errors" +) + +// DecodePath decodes parameters from the path in a request used with the +// github.com/go-chi/chi muxing module. +func DecodePath(r *http.Request, v interface{}) error { + rctx := chi.RouteContext(r.Context()) + if rctx == nil { + return nil + } + params := rctx.URLParams + paramMap := map[string][]string{} + for i, k := range params.Keys { + if i >= len(params.Values) { + break + } + v := params.Values[i] + paramMap[k] = append(paramMap[k], v) + } + dec := schema.NewDecoder() + dec.SetAliasTag("path") + dec.IgnoreUnknownKeys(true) + return dec.Decode(v, paramMap) +} + +// DecodeQuery decodes the query string from r into v. +func DecodeQuery(r *http.Request, v interface{}) error { + dec := schema.NewDecoder() + dec.SetAliasTag("query") + dec.IgnoreUnknownKeys(true) + return dec.Decode(v, r.URL.Query()) +} + +// DecodeJSON decodes JSON request from r into v. +func DecodeJSON(r *http.Request, v interface{}) error { + dec := json.NewDecoder(r.Body) + dec.UseNumber() + return dec.Decode(v) +} + +// DecodeForm decodes form URL encoded requests from r into v. +// +// The type of the value given can use `form` tags on fields in the same way as +// the `json` tag to name fields. +// +// An error will be returned if the request is not a POST, PUT, or PATCH +// request. +// +// An error will be returned if the request has a media type in the +// Content-Type not equal to application/x-www-form-urlencoded, or if the +// Content-Type header cannot be parsed. +func DecodeForm(r *http.Request, v interface{}) error { + if r.Method != "POST" && r.Method != "PUT" && r.Method != "PATCH" { + return errors.Errorf("method POST, PUT, or PATCH required for form decoding: request has method %q", r.Method) + } + + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return errors.Wrap(err, "content type application/x-www-form-urlencoded required for form decoding") + } + if mediaType != "application/x-www-form-urlencoded" { + return errors.Errorf("content type application/x-www-form-urlencoded required for form decoding: received content type %q", mediaType) + } + + err = r.ParseForm() + if err != nil { + return err + } + + dec := schema.NewDecoder() + dec.SetAliasTag("form") + dec.IgnoreUnknownKeys(true) + return dec.Decode(v, r.PostForm) +} + +// Decode decodes form URL encoded requests and JSON requests from r into v. +// Also decodes path (chi only) and query parameters. +// +// The requests Content-Type header informs if the request should be decoded +// using a form URL encoded decoder or using a JSON decoder. +// +// A Content-Type of application/x-www-form-urlencoded will result in form +// decoding. Any other content type will result in JSON decoding because it is +// common to make JSON requests without a Content-Type where-as correctly +// formatted form URL encoded requests are more often accompanied by the +// appropriate Content-Type. +// +// An error is returned if the Content-Type cannot be parsed by a mime +// media-type parser. +// +// See DecodePath, DecodeQuery, DecodeForm and DecodeJSON for details about +// the types of errors that may occur. +func Decode(r *http.Request, v interface{}) error { + err := DecodePath(r, v) + if err != nil { + return errors.Wrap(err, "path params could not be parsed") + } + err = DecodeQuery(r, v) + if err != nil { + return errors.Wrap(err, "query could not be parsed") + } + contentType := r.Header.Get("Content-Type") + if contentType != "" { + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return errors.Wrap(err, "content type could not be parsed") + } + if mediaType == "application/x-www-form-urlencoded" { + return DecodeForm(r, v) + } + } + + // A nil body means the request has no body, such as a GET request. + // Calling DecodeJSON when receiving GET requests will result in EOF. + if r.Body != nil && r.Body != http.NoBody { + return DecodeJSON(r, v) + } + + return nil +} diff --git a/support/http/httpdecode/httpdecode_test.go b/support/http/httpdecode/httpdecode_test.go new file mode 100644 index 0000000000..5989b27f73 --- /dev/null +++ b/support/http/httpdecode/httpdecode_test.go @@ -0,0 +1,488 @@ +package httpdecode + +import ( + "bufio" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/go-chi/chi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDecodePath_valid(t *testing.T) { + pathDecoded := struct { + Foo string `path:"foo"` + }{} + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := DecodePath(r, &pathDecoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + r, _ := http.NewRequest("POST", "/path/bar/path", nil) + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", pathDecoded.Foo) +} + +func TestDecodePath_empty(t *testing.T) { + pathDecoded := struct { + Foo string `path:"foo"` + }{} + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := DecodePath(r, &pathDecoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + r, _ := http.NewRequest("POST", "/path//path", nil) + mux.ServeHTTP(w, r) + + assert.Equal(t, "", pathDecoded.Foo) +} + +func TestDecodePath_notUsingChi(t *testing.T) { + pathDecoded := struct { + Foo string `path:"foo"` + }{} + r, _ := http.NewRequest("POST", "/path/bar/path", nil) + err := DecodePath(r, &pathDecoded) + require.NoError(t, err) + + assert.Equal(t, "", pathDecoded.Foo) +} + +func TestDecodeQuery_valid(t *testing.T) { + q := "foo=bar&list=a&list=b&enc=%2B+-%2F" + r, _ := http.NewRequest("POST", "/?"+q, nil) + + queryDecoded := struct { + Foo string `query:"foo"` + List []string `query:"list"` + Enc string `query:"enc"` + }{} + err := DecodeQuery(r, &queryDecoded) + require.NoError(t, err) + + assert.Equal(t, "bar", queryDecoded.Foo) + assert.ElementsMatch(t, []string{"a", "b"}, queryDecoded.List) + assert.Equal(t, "+ -/", queryDecoded.Enc) +} + +func TestDecodeQuery_validNone(t *testing.T) { + r, _ := http.NewRequest("POST", "/", nil) + + queryDecoded := struct { + Foo string `query:"foo"` + List []string `query:"list"` + Enc string `query:"enc"` + }{} + err := DecodeQuery(r, &queryDecoded) + require.NoError(t, err) + + assert.Equal(t, "", queryDecoded.Foo) + assert.Empty(t, queryDecoded.List) + assert.Equal(t, "", queryDecoded.Enc) +} + +// Test that DecodeQuery ignores query parameters that are invalid in the same +// way that reading out query parameters that are invalid is normally ignored +// with the built-in net/http package. +func TestDecodeQuery_invalid(t *testing.T) { + req := `GET /?far=baf&enc=%2%B+-%2F&foo=bar HTTP/1.1 + +` + r, err := http.ReadRequest(bufio.NewReader(strings.NewReader(req))) + require.NoError(t, err) + + queryDecoded := struct { + Far string `query:"far"` + Enc string `query:"enc"` + Foo string `query:"foo"` + }{} + err = DecodeQuery(r, &queryDecoded) + require.NoError(t, err) + + assert.Equal(t, "baf", queryDecoded.Far) + assert.Equal(t, "", queryDecoded.Enc) + assert.Equal(t, "bar", queryDecoded.Foo) +} + +func TestDecodeJSON_valid(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + + bodyDecoded := struct { + Foo string `json:"foo"` + }{} + err := DecodeJSON(r, &bodyDecoded) + require.NoError(t, err) + + assert.Equal(t, "bar", bodyDecoded.Foo) +} + +func TestDecodeJSON_invalid(t *testing.T) { + body := `{"foo:"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + + bodyDecoded := struct { + Foo string `json:"foo"` + }{} + err := DecodeJSON(r, &bodyDecoded) + require.EqualError(t, err, "invalid character 'b' after object key") +} + +func TestDecodeForm_valid(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.Foo) +} + +func TestDecodeForm_validTags(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + FooName string `form:"foo"` + }{} + err := DecodeForm(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecodeForm_validIgnoresUnkownKeys(t *testing.T) { + body := `foo=bar&foz=baz` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.Foo) +} + +func TestDecodeForm_validContentTypeWithOptions(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.Foo) +} + +func TestDecodeForm_invalidBody(t *testing.T) { + body := `foo=%=` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.EqualError(t, err, `invalid URL escape "%="`) + assert.Equal(t, "", bodyDecoded.Foo) +} + +func TestDecodeForm_invalidNoContentType(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.EqualError(t, err, `content type application/x-www-form-urlencoded required for form decoding: mime: no media type`) + assert.Equal(t, "", bodyDecoded.Foo) +} + +func TestDecodeForm_invalidUnrecognizedContentType(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/xwwwformurlencoded") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.EqualError(t, err, `content type application/x-www-form-urlencoded required for form decoding: received content type "application/xwwwformurlencoded"`) + assert.Equal(t, "", bodyDecoded.Foo) +} + +func TestDecodeForm_invalidMethodType(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("GET", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + Foo string + }{} + err := DecodeForm(r, &bodyDecoded) + assert.EqualError(t, err, `method POST, PUT, or PATCH required for form decoding: request has method "GET"`) + assert.Equal(t, "", bodyDecoded.Foo) +} + +func TestDecode_validJSONNoContentType(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecode_validJSONWithContentType(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/json") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecode_validJSONWithContentTypeOptions(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/json; charset=utf-8") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecode_validForm(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecode_validFormWithContentTypeOptions(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) +} + +func TestDecode_cannotParseContentType(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application=json") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.EqualError(t, err, "content type could not be parsed: mime: expected slash after first token") + assert.Equal(t, "", bodyDecoded.FooName) +} + +func TestDecode_invalidJSON(t *testing.T) { + body := `{"foo""bar"}` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.EqualError(t, err, `invalid character '"' after object key`) + assert.Equal(t, "", bodyDecoded.FooName) +} + +func TestDecode_clientReqNoBody(t *testing.T) { + pathDecoded := struct { + Foo string `path:"foo"` + }{} + mux := chi.NewMux() + mux.Get("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &pathDecoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + r, _ := http.NewRequest("GET", "/path/bar/path", nil) + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", pathDecoded.Foo) +} + +func TestDecode_serverReqNoBody(t *testing.T) { + pathDecoded := struct { + Foo string `path:"foo"` + }{} + mux := chi.NewMux() + mux.Get("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &pathDecoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/path/bar/path", nil) + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", pathDecoded.Foo) +} + +func TestDecode_invalidForm(t *testing.T) { + body := `foo=%=bar` + r, _ := http.NewRequest("POST", "/", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + }{} + err := Decode(r, &bodyDecoded) + assert.EqualError(t, err, `invalid URL escape "%=b"`) + assert.Equal(t, "", bodyDecoded.FooName) +} + +func TestDecode_validFormAndQuery(t *testing.T) { + body := `foo=bar` + r, _ := http.NewRequest("POST", "/?far=boo&foo=ba2", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `query:"far"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) + assert.Equal(t, "boo", bodyDecoded.FarName) +} + +func TestDecode_validJSONAndQuery(t *testing.T) { + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/?far=boo&foo=ba2", strings.NewReader(body)) + + bodyDecoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `query:"far"` + }{} + err := Decode(r, &bodyDecoded) + assert.NoError(t, err) + assert.Equal(t, "bar", bodyDecoded.FooName) + assert.Equal(t, "boo", bodyDecoded.FarName) +} + +func TestDecode_validFormAndPath(t *testing.T) { + decoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `path:"foo"` + }{} + + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &decoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + body := `foo=bar` + r, _ := http.NewRequest("POST", "/path/boo/path", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", decoded.FooName) + assert.Equal(t, "boo", decoded.FarName) +} + +func TestDecode_validJSONAndPath(t *testing.T) { + decoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `path:"foo"` + }{} + + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &decoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/path/boo/path", strings.NewReader(body)) + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", decoded.FooName) + assert.Equal(t, "boo", decoded.FarName) +} + +func TestDecode_validFormAndPathAndQuery(t *testing.T) { + decoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `path:"foo"` + QueryName string `query:"q"` + }{} + + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &decoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + body := `foo=bar` + r, _ := http.NewRequest("POST", "/path/boo/path?q=search+value", strings.NewReader(body)) + r.Header.Set("Content-Type", "application/x-www-form-urlencoded") + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", decoded.FooName) + assert.Equal(t, "boo", decoded.FarName) + assert.Equal(t, "search value", decoded.QueryName) +} + +func TestDecode_validJSONAndPathAndQuery(t *testing.T) { + decoded := struct { + FooName string `json:"foo" form:"foo"` + FarName string `path:"foo"` + QueryName string `query:"q"` + }{} + + mux := chi.NewMux() + mux.Post("/path/{foo}/path", func(w http.ResponseWriter, r *http.Request) { + err := Decode(r, &decoded) + require.NoError(t, err) + }) + w := httptest.NewRecorder() + body := `{"foo":"bar"}` + r, _ := http.NewRequest("POST", "/path/boo/path?q=search+value", strings.NewReader(body)) + mux.ServeHTTP(w, r) + + assert.Equal(t, "bar", decoded.FooName) + assert.Equal(t, "boo", decoded.FarName) + assert.Equal(t, "search value", decoded.QueryName) +} diff --git a/support/http/httptest/client.go b/support/http/httptest/client.go new file mode 100644 index 0000000000..32fcf069fd --- /dev/null +++ b/support/http/httptest/client.go @@ -0,0 +1,10 @@ +package httptest + +// On is the entrypoint method into this packages "client" mocking system. +func (c *Client) On(method string, url string) *ClientExpectation { + return &ClientExpectation{ + Method: method, + URL: url, + Client: c, + } +} diff --git a/support/http/httptest/client_expectation.go b/support/http/httptest/client_expectation.go new file mode 100644 index 0000000000..1d5ca6d33e --- /dev/null +++ b/support/http/httptest/client_expectation.go @@ -0,0 +1,124 @@ +package httptest + +import ( + "net/http" + "net/url" + "strconv" + + "github.com/jarcoal/httpmock" + "github.com/stellar/go/support/errors" +) + +// Return specifies the response for a ClientExpectation, which is then +// committed to the connected mock client. +func (ce *ClientExpectation) Return(r httpmock.Responder) *ClientExpectation { + ce.Client.MockTransport.RegisterResponder( + ce.Method, + ce.URL, + r, + ) + return ce +} + +// ReturnError causes this expectation to resolve to an error. +func (ce *ClientExpectation) ReturnError(msg string) *ClientExpectation { + return ce.Return(func(*http.Request) (*http.Response, error) { + return nil, errors.New(msg) + }) +} + +// ReturnString causes this expectation to resolve to a string-based body with +// the provided status code. +func (ce *ClientExpectation) ReturnString( + status int, + body string, +) *ClientExpectation { + return ce.Return( + httpmock.NewStringResponder(status, body), + ) +} + +// ReturnJSON causes this expectation to resolve to a json-based body with the +// provided status code. Panics when the provided body cannot be encoded to +// JSON. +func (ce *ClientExpectation) ReturnJSON( + status int, + body interface{}, +) *ClientExpectation { + + r, err := httpmock.NewJsonResponder(status, body) + if err != nil { + panic(err) + } + + return ce.Return(r) +} + +// ReturnNotFound is a simple helper that causes this expectation to resolve to +// a 404 error. If a customized body is needed, use something like +// `ReturnString`` instead. +func (ce *ClientExpectation) ReturnNotFound() *ClientExpectation { + return ce.ReturnString(http.StatusNotFound, "not found") +} + +// ReturnStringWithHeader causes this expectation to resolve to a string-based body with +// the provided status code and response header. +func (ce *ClientExpectation) ReturnStringWithHeader( + status int, + body string, + header http.Header, +) *ClientExpectation { + + req, err := ce.clientRequest() + if err != nil { + panic(err) + } + + cResp := http.Response{ + Status: strconv.Itoa(status), + StatusCode: status, + Body: httpmock.NewRespBodyFromString(body), + Header: header, + Request: req, + } + + return ce.Return(httpmock.ResponderFromResponse(&cResp)) +} + +// ReturnJSONWithHeader causes this expectation to resolve to a json-based body with the provided +// status code and response header. Panics when the provided body cannot be encoded to JSON. +func (ce *ClientExpectation) ReturnJSONWithHeader( + status int, + body interface{}, + header http.Header, +) *ClientExpectation { + + r, err := httpmock.NewJsonResponse(status, body) + if err != nil { + panic(err) + } + + req, err := ce.clientRequest() + if err != nil { + panic(err) + } + + r.Header = header + r.Request = req + return ce.Return(httpmock.ResponderFromResponse(r)) +} + +// clientRequest builds a http.Request struct from the supplied request parameters. +func (ce *ClientExpectation) clientRequest() (*http.Request, error) { + rurl, err := url.Parse(ce.URL) + if err != nil { + return nil, errors.Wrap(err, "failed to parse request url") + } + + req := http.Request{ + Method: ce.Method, + URL: rurl, + Host: rurl.Host, + } + return &req, nil +} diff --git a/support/http/httptest/main.go b/support/http/httptest/main.go index bf1040a330..47a00b1991 100644 --- a/support/http/httptest/main.go +++ b/support/http/httptest/main.go @@ -1,6 +1,21 @@ // Package httptest enhances the stdlib net/http/httptest package by integrating // it with gopkg.in/gavv/httpexpect.v1, reducing the boilerplate needed for http -// tests +// tests. In addition to the functions that make it easier to stand up a test +// server, this package also provides a set of tools to make it easier to mock +// out http client responses. +// +// Test Servers vs. Client mocking +// +// When building a testing fixture for HTTP, you can approach the problem in two +// ways: Use a mocked server and make real http requests to the server, or use +// a mocked client and have _no_ server. While this package provides facilities +// for both, we recommend that you follow the conventions for decided which to +// use in your tests. +// +// The test server system should be used when the object under test is our own +// server code; usually that means our own http.Handler implementations. The +// mocked client system should be used when the object under test is making http +// requests. package httptest import ( @@ -8,14 +23,43 @@ import ( stdtest "net/http/httptest" "testing" + "github.com/jarcoal/httpmock" "gopkg.in/gavv/httpexpect.v1" ) +// Client represents an easier way to mock http client behavior. It assumes +// that your packages use interfaces to store an http client instead of a +// concrete *http.Client. +type Client struct { + *http.Client + *httpmock.MockTransport +} + +// ClientExpectation represents a in-process-of-being-built http client mocking +// operation. The `On` method of `Client` returns an instance of this struct +// upon which you can call further methods to customize the response. +type ClientExpectation struct { + Method string + URL string + Client *Client +} + type Server struct { *httpexpect.Expect *stdtest.Server } +// NewClient returns a new mocked http client. A value being tested can be +// configured to use this http client allowing the tester to control the server +// responses without needing to run an actual server. +func NewClient() *Client { + var result Client + result.MockTransport = httpmock.NewMockTransport() + result.Client = &http.Client{Transport: result.MockTransport} + return &result +} + +// NewServer returns a new test server instance using the provided handler. func NewServer(t *testing.T, handler http.Handler) *Server { server := stdtest.NewServer(handler) return &Server{ diff --git a/support/http/logging_middleware.go b/support/http/logging_middleware.go new file mode 100644 index 0000000000..0a2f784051 --- /dev/null +++ b/support/http/logging_middleware.go @@ -0,0 +1,102 @@ +package http + +import ( + stdhttp "net/http" + "strings" + "time" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/stellar/go/support/http/mutil" + "github.com/stellar/go/support/log" +) + +// Options allow the middleware logger to accept additional information. +type Options struct { + ExtraHeaders []string +} + +// SetLogger is a middleware that sets a logger on the context. +func SetLoggerMiddleware(l *log.Entry) func(stdhttp.Handler) stdhttp.Handler { + return func(next stdhttp.Handler) stdhttp.Handler { + return stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + ctx := r.Context() + ctx = log.Set(ctx, l) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) + } +} + +// LoggingMiddleware is a middleware that logs requests to the logger. +func LoggingMiddleware(next stdhttp.Handler) stdhttp.Handler { + return LoggingMiddlewareWithOptions(Options{})(next) +} + +// LoggingMiddlewareWithOptions is a middleware that logs requests to the logger. +// Requires an Options struct to accept additional information. +func LoggingMiddlewareWithOptions(options Options) func(stdhttp.Handler) stdhttp.Handler { + return func(next stdhttp.Handler) stdhttp.Handler { + return stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + mw := mutil.WrapWriter(w) + ctx := log.PushContext(r.Context(), func(l *log.Entry) *log.Entry { + return l.WithFields(log.F{ + "req": middleware.GetReqID(r.Context()), + }) + }) + r = r.WithContext(ctx) + + logStartOfRequest(r, options.ExtraHeaders) + + then := time.Now() + next.ServeHTTP(mw, r) + duration := time.Since(then) + + logEndOfRequest(r, duration, mw) + }) + } +} + +// logStartOfRequest emits the logline that reports that an http request is +// beginning processing. +func logStartOfRequest( + r *stdhttp.Request, + extraHeaders []string, +) { + fields := log.F{} + for _, header := range extraHeaders { + // Strips "-" characters and lowercases new logrus.Fields keys to be uniform with the other keys in the logger. + // Simplifies querying extended fields. + var headerkey = strings.ToLower(strings.ReplaceAll(header, "-", "")) + fields[headerkey] = r.Header.Get(header) + } + fields["subsys"] = "http" + fields["path"] = r.URL.String() + fields["method"] = r.Method + fields["ip"] = r.RemoteAddr + fields["host"] = r.Host + fields["useragent"] = r.Header.Get("User-Agent") + l := log.Ctx(r.Context()).WithFields(fields) + + l.Info("starting request") +} + +// logEndOfRequest emits the logline for the end of the request +func logEndOfRequest( + r *stdhttp.Request, + duration time.Duration, + mw mutil.WriterProxy, +) { + l := log.Ctx(r.Context()).WithFields(log.F{ + "subsys": "http", + "path": r.URL.String(), + "method": r.Method, + "status": mw.Status(), + "bytes": mw.BytesWritten(), + "duration": duration, + }) + if routeContext := chi.RouteContext(r.Context()); routeContext != nil { + l = l.WithField("route", routeContext.RoutePattern()) + } + l.Info("finished request") +} diff --git a/support/http/logging_middleware_test.go b/support/http/logging_middleware_test.go new file mode 100644 index 0000000000..0e2eb45bb2 --- /dev/null +++ b/support/http/logging_middleware_test.go @@ -0,0 +1,326 @@ +package http + +import ( + stdhttp "net/http" + "testing" + + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/stellar/go/support/http/httptest" + "github.com/stellar/go/support/log" + "github.com/stretchr/testify/assert" +) + +// setXFFMiddleware sets "X-Forwarded-For" header to test LoggingMiddlewareWithOptions. +func setXFFMiddleware(next stdhttp.Handler) stdhttp.Handler { + return stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + r.Header.Set("X-Forwarded-For", "203.0.113.195") + next.ServeHTTP(w, r) + }) +} + +// setContentMD5MiddleWare sets header to test LoggingMiddlewareWithOptions. +func setContentMD5Middleware(next stdhttp.Handler) stdhttp.Handler { + return stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + r.Header.Set("Content-MD5", "U3RlbGxhciBpcyBBd2Vzb21lIQ==") + next.ServeHTTP(w, r) + }) +} + +func TestHTTPMiddleware(t *testing.T) { + done := log.DefaultLogger.StartTest(log.InfoLevel) + mux := chi.NewMux() + + mux.Use(middleware.RequestID) + mux.Use(LoggingMiddleware) + + mux.Get("/path/{value}", stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + log.Ctx(r.Context()).Info("handler log line") + })) + mux.Handle("/not_found", stdhttp.NotFoundHandler()) + + src := httptest.NewServer(t, mux) + src.GET("/path/1234").Expect().Status(stdhttp.StatusOK) + src.GET("/not_found").Expect().Status(stdhttp.StatusNotFound) + src.GET("/really_not_found").Expect().Status(stdhttp.StatusNotFound) + + // get the log buffer and ensure it has both the start and end log lines for + // each request + logged := done() + if assert.Len(t, logged, 7, "unexpected log line count") { + assert.Equal(t, "starting request", logged[0].Message) + assert.Equal(t, "http", logged[0].Data["subsys"]) + assert.Equal(t, "GET", logged[0].Data["method"]) + assert.NotEmpty(t, logged[0].Data["req"]) + assert.Equal(t, "/path/1234", logged[0].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[0].Data["useragent"]) + req1 := logged[0].Data["req"] + + assert.Equal(t, "handler log line", logged[1].Message) + assert.Equal(t, req1, logged[1].Data["req"]) + + assert.Equal(t, "finished request", logged[2].Message) + assert.Equal(t, "http", logged[2].Data["subsys"]) + assert.Equal(t, "GET", logged[2].Data["method"]) + assert.Equal(t, req1, logged[2].Data["req"]) + assert.Equal(t, "/path/1234", logged[2].Data["path"]) + assert.Equal(t, "/path/{value}", logged[2].Data["route"]) + + assert.Equal(t, "starting request", logged[3].Message) + assert.Equal(t, "http", logged[3].Data["subsys"]) + assert.Equal(t, "GET", logged[3].Data["method"]) + assert.NotEmpty(t, logged[3].Data["req"]) + assert.NotEmpty(t, logged[3].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[3].Data["useragent"]) + req2 := logged[3].Data["req"] + + assert.Equal(t, "finished request", logged[4].Message) + assert.Equal(t, "http", logged[4].Data["subsys"]) + assert.Equal(t, "GET", logged[4].Data["method"]) + assert.Equal(t, req2, logged[4].Data["req"]) + assert.Equal(t, "/not_found", logged[4].Data["path"]) + assert.Equal(t, "/not_found", logged[4].Data["route"]) + + assert.Equal(t, "starting request", logged[5].Message) + assert.Equal(t, "http", logged[5].Data["subsys"]) + assert.Equal(t, "GET", logged[5].Data["method"]) + assert.NotEmpty(t, logged[5].Data["req"]) + assert.NotEmpty(t, logged[5].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[5].Data["useragent"]) + req3 := logged[5].Data["req"] + + assert.Equal(t, "finished request", logged[6].Message) + assert.Equal(t, "http", logged[6].Data["subsys"]) + assert.Equal(t, "GET", logged[6].Data["method"]) + assert.Equal(t, req3, logged[6].Data["req"]) + assert.Equal(t, "/really_not_found", logged[6].Data["path"]) + assert.Equal(t, "", logged[6].Data["route"]) + } +} + +func TestHTTPMiddlewareWithOptions(t *testing.T) { + done := log.DefaultLogger.StartTest(log.InfoLevel) + mux := chi.NewMux() + + mux.Use(setXFFMiddleware) + mux.Use(setContentMD5Middleware) + mux.Use(middleware.RequestID) + options := Options{ExtraHeaders: []string{"X-Forwarded-For", "Content-MD5"}} + mux.Use(LoggingMiddlewareWithOptions(options)) + + mux.Get("/path/{value}", stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + log.Ctx(r.Context()).Info("handler log line") + })) + mux.Handle("/not_found", stdhttp.NotFoundHandler()) + + src := httptest.NewServer(t, mux) + src.GET("/path/1234").Expect().Status(stdhttp.StatusOK) + src.GET("/not_found").Expect().Status(stdhttp.StatusNotFound) + src.GET("/really_not_found").Expect().Status(stdhttp.StatusNotFound) + + // get the log buffer and ensure it has both the start and end log lines for + // each request + logged := done() + if assert.Len(t, logged, 7, "unexpected log line count") { + assert.Equal(t, "starting request", logged[0].Message) + assert.Equal(t, "http", logged[0].Data["subsys"]) + assert.Equal(t, "GET", logged[0].Data["method"]) + assert.NotEmpty(t, logged[0].Data["req"]) + assert.Equal(t, "/path/1234", logged[0].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[0].Data["useragent"]) + assert.Equal(t, "203.0.113.195", logged[0].Data["xforwardedfor"]) + assert.Equal(t, "U3RlbGxhciBpcyBBd2Vzb21lIQ==", logged[0].Data["contentmd5"]) + assert.Equal(t, 10, len(logged[0].Data)) + req1 := logged[0].Data["req"] + + assert.Equal(t, "handler log line", logged[1].Message) + assert.Equal(t, req1, logged[1].Data["req"]) + assert.Equal(t, 2, len(logged[1].Data)) + + assert.Equal(t, "finished request", logged[2].Message) + assert.Equal(t, "http", logged[2].Data["subsys"]) + assert.Equal(t, "GET", logged[2].Data["method"]) + assert.Equal(t, req1, logged[2].Data["req"]) + assert.Equal(t, "/path/1234", logged[2].Data["path"]) + assert.Equal(t, "/path/{value}", logged[2].Data["route"]) + assert.Equal(t, 9, len(logged[2].Data)) + + assert.Equal(t, "starting request", logged[3].Message) + assert.Equal(t, "http", logged[3].Data["subsys"]) + assert.Equal(t, "GET", logged[3].Data["method"]) + assert.NotEmpty(t, logged[3].Data["req"]) + assert.NotEmpty(t, logged[3].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[3].Data["useragent"]) + assert.Equal(t, "203.0.113.195", logged[3].Data["xforwardedfor"]) + assert.Equal(t, "U3RlbGxhciBpcyBBd2Vzb21lIQ==", logged[3].Data["contentmd5"]) + assert.Equal(t, 10, len(logged[3].Data)) + req2 := logged[3].Data["req"] + + assert.Equal(t, "finished request", logged[4].Message) + assert.Equal(t, "http", logged[4].Data["subsys"]) + assert.Equal(t, "GET", logged[4].Data["method"]) + assert.Equal(t, req2, logged[4].Data["req"]) + assert.Equal(t, "/not_found", logged[4].Data["path"]) + assert.Equal(t, "/not_found", logged[4].Data["route"]) + assert.Equal(t, 9, len(logged[4].Data)) + + assert.Equal(t, "starting request", logged[5].Message) + assert.Equal(t, "http", logged[5].Data["subsys"]) + assert.Equal(t, "GET", logged[5].Data["method"]) + assert.NotEmpty(t, logged[5].Data["req"]) + assert.NotEmpty(t, logged[5].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[5].Data["useragent"]) + assert.Equal(t, "203.0.113.195", logged[5].Data["xforwardedfor"]) + assert.Equal(t, "U3RlbGxhciBpcyBBd2Vzb21lIQ==", logged[5].Data["contentmd5"]) + assert.Equal(t, 10, len(logged[5].Data)) + req3 := logged[5].Data["req"] + + assert.Equal(t, "finished request", logged[6].Message) + assert.Equal(t, "http", logged[6].Data["subsys"]) + assert.Equal(t, "GET", logged[6].Data["method"]) + assert.Equal(t, req3, logged[6].Data["req"]) + assert.Equal(t, "/really_not_found", logged[6].Data["path"]) + assert.Equal(t, "", logged[6].Data["route"]) + assert.Equal(t, 9, len(logged[6].Data)) + } +} + +func TestHTTPMiddleware_stdlibServeMux(t *testing.T) { + done := log.DefaultLogger.StartTest(log.InfoLevel) + + mux := stdhttp.ServeMux{} + mux.Handle( + "/path/1234", + middleware.RequestID( + LoggingMiddleware( + stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + log.Ctx(r.Context()).Info("handler log line") + }), + ), + ), + ) + mux.Handle( + "/not_found", + middleware.RequestID( + LoggingMiddleware( + stdhttp.NotFoundHandler(), + ), + ), + ) + + src := httptest.NewServer(t, &mux) + src.GET("/path/1234").Expect().Status(stdhttp.StatusOK) + src.GET("/not_found").Expect().Status(stdhttp.StatusNotFound) + src.GET("/really_not_found").Expect().Status(stdhttp.StatusNotFound) + + // get the log buffer and ensure it has both the start and end log lines for + // each request + logged := done() + if assert.Len(t, logged, 5, "unexpected log line count") { + assert.Equal(t, "starting request", logged[0].Message) + assert.Equal(t, "http", logged[0].Data["subsys"]) + assert.Equal(t, "GET", logged[0].Data["method"]) + assert.NotEmpty(t, logged[0].Data["req"]) + assert.Equal(t, "/path/1234", logged[0].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[0].Data["useragent"]) + req1 := logged[0].Data["req"] + + assert.Equal(t, "handler log line", logged[1].Message) + assert.Equal(t, req1, logged[1].Data["req"]) + + assert.Equal(t, "finished request", logged[2].Message) + assert.Equal(t, "http", logged[2].Data["subsys"]) + assert.Equal(t, "GET", logged[2].Data["method"]) + assert.Equal(t, req1, logged[2].Data["req"]) + assert.Equal(t, "/path/1234", logged[2].Data["path"]) + assert.Equal(t, nil, logged[2].Data["route"]) + + assert.Equal(t, "starting request", logged[3].Message) + assert.Equal(t, "http", logged[3].Data["subsys"]) + assert.Equal(t, "GET", logged[3].Data["method"]) + assert.NotEmpty(t, logged[3].Data["req"]) + assert.NotEmpty(t, logged[3].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[3].Data["useragent"]) + req2 := logged[3].Data["req"] + + assert.Equal(t, "finished request", logged[4].Message) + assert.Equal(t, "http", logged[4].Data["subsys"]) + assert.Equal(t, "GET", logged[4].Data["method"]) + assert.Equal(t, req2, logged[4].Data["req"]) + assert.Equal(t, "/not_found", logged[4].Data["path"]) + assert.Equal(t, nil, logged[4].Data["route"]) + } +} + +func TestHTTPMiddlewareWithLoggerSet(t *testing.T) { + logger := log.New() + done := logger.StartTest(log.InfoLevel) + mux := chi.NewMux() + + mux.Use(middleware.RequestID) + mux.Use(SetLoggerMiddleware(logger)) + mux.Use(LoggingMiddleware) + + mux.Get("/path/{value}", stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + log.Ctx(r.Context()).Info("handler log line") + })) + mux.Handle("/not_found", stdhttp.NotFoundHandler()) + + src := httptest.NewServer(t, mux) + src.GET("/path/1234").Expect().Status(stdhttp.StatusOK) + src.GET("/not_found").Expect().Status(stdhttp.StatusNotFound) + src.GET("/really_not_found").Expect().Status(stdhttp.StatusNotFound) + + // get the log buffer and ensure it has both the start and end log lines for + // each request + logged := done() + if assert.Len(t, logged, 7, "unexpected log line count") { + assert.Equal(t, "starting request", logged[0].Message) + assert.Equal(t, "http", logged[0].Data["subsys"]) + assert.Equal(t, "GET", logged[0].Data["method"]) + assert.NotEmpty(t, logged[0].Data["req"]) + assert.Equal(t, "/path/1234", logged[0].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[0].Data["useragent"]) + req1 := logged[0].Data["req"] + + assert.Equal(t, "handler log line", logged[1].Message) + assert.Equal(t, req1, logged[1].Data["req"]) + + assert.Equal(t, "finished request", logged[2].Message) + assert.Equal(t, "http", logged[2].Data["subsys"]) + assert.Equal(t, "GET", logged[2].Data["method"]) + assert.Equal(t, req1, logged[2].Data["req"]) + assert.Equal(t, "/path/1234", logged[2].Data["path"]) + assert.Equal(t, "/path/{value}", logged[2].Data["route"]) + + assert.Equal(t, "starting request", logged[3].Message) + assert.Equal(t, "http", logged[3].Data["subsys"]) + assert.Equal(t, "GET", logged[3].Data["method"]) + assert.NotEmpty(t, logged[3].Data["req"]) + assert.NotEmpty(t, logged[3].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[3].Data["useragent"]) + req2 := logged[3].Data["req"] + + assert.Equal(t, "finished request", logged[4].Message) + assert.Equal(t, "http", logged[4].Data["subsys"]) + assert.Equal(t, "GET", logged[4].Data["method"]) + assert.Equal(t, req2, logged[4].Data["req"]) + assert.Equal(t, "/not_found", logged[4].Data["path"]) + assert.Equal(t, "/not_found", logged[4].Data["route"]) + + assert.Equal(t, "starting request", logged[5].Message) + assert.Equal(t, "http", logged[5].Data["subsys"]) + assert.Equal(t, "GET", logged[5].Data["method"]) + assert.NotEmpty(t, logged[5].Data["req"]) + assert.NotEmpty(t, logged[5].Data["path"]) + assert.Equal(t, "Go-http-client/1.1", logged[5].Data["useragent"]) + req3 := logged[5].Data["req"] + + assert.Equal(t, "finished request", logged[6].Message) + assert.Equal(t, "http", logged[6].Data["subsys"]) + assert.Equal(t, "GET", logged[6].Data["method"]) + assert.Equal(t, req3, logged[6].Data["req"]) + assert.Equal(t, "/really_not_found", logged[6].Data["path"]) + assert.Equal(t, "", logged[6].Data["route"]) + } +} diff --git a/support/http/main.go b/support/http/main.go index 273486b97a..48232753f3 100644 --- a/support/http/main.go +++ b/support/http/main.go @@ -7,21 +7,22 @@ package http import ( stdhttp "net/http" + "net/url" "os" "time" + "github.com/stellar/go/support/config" "github.com/stellar/go/support/errors" "github.com/stellar/go/support/log" - "golang.org/x/net/http2" "gopkg.in/tylerb/graceful.v1" ) -// DefaultListenAddr represents the default address and port on which a server +// defaultListenAddr represents the default address and port on which a server // will listen, provided it is not overridden by setting the `ListenAddr` field // on a `Config` struct. -const DefaultListenAddr = "0.0.0.0:8080" +const defaultListenAddr = "0.0.0.0:8080" -// DefaultShutdownGracePeriod represents the default time in which the running +// defaultShutdownGracePeriod represents the default time in which the running // process will allow outstanding http requests to complete before aborting // them. It will be used when a grace period of 0 is used, which normally // signifies "no timeout" to our graceful shutdown package. We choose not to @@ -29,16 +30,27 @@ const DefaultListenAddr = "0.0.0.0:8080" // or something if you need a timeout that is effectively "no timeout"; We // believe that most servers should use a sane timeout and prefer one for the // default configuration. -const DefaultShutdownGracePeriod = 10 * time.Second +const defaultShutdownGracePeriod = 10 * time.Second + +const defaultReadTimeout = 5 * time.Second + +// SimpleHTTPClientInterface helps mocking http.Client in tests +type SimpleHTTPClientInterface interface { + PostForm(url string, data url.Values) (*stdhttp.Response, error) + Get(url string) (*stdhttp.Response, error) +} // Config represents the configuration of an http server that can be provided to // `Run`. type Config struct { Handler stdhttp.Handler ListenAddr string - TLSCert string - TLSKey string + TLS *config.TLS ShutdownGracePeriod time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration + TCPKeepAlive time.Duration OnStarting func() OnStopping func() OnStopped func() @@ -52,15 +64,13 @@ type Config struct { func Run(conf Config) { srv := setup(conf) - http2.ConfigureServer(srv.Server, nil) - if conf.OnStarting != nil { conf.OnStarting() } var err error - if conf.TLSCert != "" { - err = srv.ListenAndServeTLS(conf.TLSCert, conf.TLSKey) + if conf.TLS != nil { + err = srv.ListenAndServeTLS(conf.TLS.CertificateFile, conf.TLS.PrivateKeyFile) } else { err = srv.ListenAndServe() } @@ -85,20 +95,27 @@ func setup(conf Config) *graceful.Server { } if conf.ListenAddr == "" { - conf.ListenAddr = DefaultListenAddr + conf.ListenAddr = defaultListenAddr + } + + if conf.ShutdownGracePeriod == time.Duration(0) { + conf.ShutdownGracePeriod = defaultShutdownGracePeriod } - timeout := DefaultShutdownGracePeriod - if conf.ShutdownGracePeriod != 0 { - timeout = conf.ShutdownGracePeriod + if conf.ReadTimeout == time.Duration(0) { + conf.ReadTimeout = defaultReadTimeout } return &graceful.Server{ - Timeout: timeout, + Timeout: conf.ShutdownGracePeriod, + TCPKeepAlive: conf.TCPKeepAlive, Server: &stdhttp.Server{ - Addr: conf.ListenAddr, - Handler: conf.Handler, + Addr: conf.ListenAddr, + Handler: conf.Handler, + ReadTimeout: conf.ReadTimeout, + WriteTimeout: conf.WriteTimeout, + IdleTimeout: conf.IdleTimeout, }, ShutdownInitiated: func() { diff --git a/support/http/main_test.go b/support/http/main_test.go index 655d05722b..7f05255174 100644 --- a/support/http/main_test.go +++ b/support/http/main_test.go @@ -1,13 +1,15 @@ package http import ( + "fmt" stdhttp "net/http" "testing" + "time" "github.com/stretchr/testify/assert" ) -func TestRun_setup(t *testing.T) { +func TestRun_setupDefault(t *testing.T) { // test that using no handler panics assert.Panics(t, func() { @@ -21,6 +23,45 @@ func TestRun_setup(t *testing.T) { Handler: stdhttp.NotFoundHandler(), }) - assert.Equal(t, DefaultShutdownGracePeriod, srv.Timeout) - assert.Equal(t, DefaultListenAddr, srv.Server.Addr) + assert.Equal(t, defaultShutdownGracePeriod, srv.Timeout) + assert.Equal(t, defaultReadTimeout, srv.ReadTimeout) + assert.Equal(t, time.Duration(0), srv.WriteTimeout) + assert.Equal(t, time.Duration(0), srv.IdleTimeout) + assert.Equal(t, defaultListenAddr, srv.Server.Addr) + assert.Equal(t, time.Duration(0), srv.TCPKeepAlive) +} + +func TestRun_setupNonDefault(t *testing.T) { + + testHandler := stdhttp.HandlerFunc(func(w stdhttp.ResponseWriter, r *stdhttp.Request) {}) + + onStarting := func() { + fmt.Println("starting server") + } + onStopping := func() { + fmt.Println("stopping server") + } + onStopped := func() { + fmt.Println("stopped server") + } + + srv := setup(Config{ + Handler: testHandler, + ListenAddr: "1234", + ShutdownGracePeriod: 25 * time.Second, + ReadTimeout: 5 * time.Second, + WriteTimeout: 35 * time.Second, + IdleTimeout: 2 * time.Minute, + TCPKeepAlive: 3 * time.Minute, + OnStarting: onStarting, + OnStopping: onStopping, + OnStopped: onStopped, + }) + + assert.Equal(t, "1234", srv.Addr) + assert.Equal(t, 25*time.Second, srv.Timeout) + assert.Equal(t, 5*time.Second, srv.ReadTimeout) + assert.Equal(t, 35*time.Second, srv.WriteTimeout) + assert.Equal(t, 2*time.Minute, srv.IdleTimeout) + assert.Equal(t, 3*time.Minute, srv.TCPKeepAlive) } diff --git a/support/http/mutil/LICENSE.txt b/support/http/mutil/LICENSE.txt new file mode 100644 index 0000000000..f8e7bcdea7 --- /dev/null +++ b/support/http/mutil/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2014, 2015, 2016 Carl Jackson (carl@avtok.com) + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/support/http/mutil/main.go b/support/http/mutil/main.go new file mode 100644 index 0000000000..e8d5b28d31 --- /dev/null +++ b/support/http/mutil/main.go @@ -0,0 +1,3 @@ +// Package mutil contains various functions that are helpful when writing http +// middleware. +package mutil diff --git a/support/http/mutil/writer_proxy.go b/support/http/mutil/writer_proxy.go new file mode 100644 index 0000000000..9f6d776b42 --- /dev/null +++ b/support/http/mutil/writer_proxy.go @@ -0,0 +1,139 @@ +package mutil + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// WriterProxy is a proxy around an http.ResponseWriter that allows you to hook +// into various parts of the response process. +type WriterProxy interface { + http.ResponseWriter + // Status returns the HTTP status of the request, or 0 if one has not + // yet been sent. + Status() int + // BytesWritten returns the total number of bytes sent to the client. + BytesWritten() int + // Tee causes the response body to be written to the given io.Writer in + // addition to proxying the writes through. Only one io.Writer can be + // tee'd to at once: setting a second one will overwrite the first. + // Writes will be sent to the proxy before being written to this + // io.Writer. It is illegal for the tee'd writer to be modified + // concurrently with writes. + Tee(io.Writer) + // Unwrap returns the original proxied target. + Unwrap() http.ResponseWriter +} + +// WrapWriter wraps an http.ResponseWriter, returning a proxy that allows you to +// hook into various parts of the response process. +func WrapWriter(w http.ResponseWriter) WriterProxy { + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + + bw := basicWriter{ResponseWriter: w} + if cn && fl && hj && rf { + return &fancyWriter{bw} + } + if fl { + return &flushWriter{bw} + } + return &bw +} + +// basicWriter wraps a http.ResponseWriter that implements the minimal +// http.ResponseWriter interface. +type basicWriter struct { + http.ResponseWriter + wroteHeader bool + code int + bytes int + tee io.Writer +} + +func (b *basicWriter) WriteHeader(code int) { + if !b.wroteHeader { + b.code = code + b.wroteHeader = true + b.ResponseWriter.WriteHeader(code) + } +} +func (b *basicWriter) Write(buf []byte) (int, error) { + b.WriteHeader(http.StatusOK) + n, err := b.ResponseWriter.Write(buf) + if b.tee != nil { + _, err2 := b.tee.Write(buf[:n]) + // Prefer errors generated by the proxied writer. + if err == nil { + err = err2 + } + } + b.bytes += n + return n, err +} +func (b *basicWriter) maybeWriteHeader() { + if !b.wroteHeader { + b.WriteHeader(http.StatusOK) + } +} +func (b *basicWriter) Status() int { + return b.code +} +func (b *basicWriter) BytesWritten() int { + return b.bytes +} +func (b *basicWriter) Tee(w io.Writer) { + b.tee = w +} +func (b *basicWriter) Unwrap() http.ResponseWriter { + return b.ResponseWriter +} + +// fancyWriter is a writer that additionally satisfies http.CloseNotifier, +// http.Flusher, http.Hijacker, and io.ReaderFrom. It exists for the common case +// of wrapping the http.ResponseWriter that package http gives you, in order to +// make the proxied object support the full method set of the proxied object. +type fancyWriter struct { + basicWriter +} + +func (f *fancyWriter) CloseNotify() <-chan bool { + cn := f.basicWriter.ResponseWriter.(http.CloseNotifier) + return cn.CloseNotify() +} +func (f *fancyWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} +func (f *fancyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj := f.basicWriter.ResponseWriter.(http.Hijacker) + return hj.Hijack() +} +func (f *fancyWriter) ReadFrom(r io.Reader) (int64, error) { + if f.basicWriter.tee != nil { + return io.Copy(&f.basicWriter, r) + } + rf := f.basicWriter.ResponseWriter.(io.ReaderFrom) + f.basicWriter.maybeWriteHeader() + return rf.ReadFrom(r) +} + +var _ http.CloseNotifier = &fancyWriter{} +var _ http.Flusher = &fancyWriter{} +var _ http.Hijacker = &fancyWriter{} +var _ io.ReaderFrom = &fancyWriter{} + +type flushWriter struct { + basicWriter +} + +func (f *flushWriter) Flush() { + fl := f.basicWriter.ResponseWriter.(http.Flusher) + fl.Flush() +} + +var _ http.Flusher = &flushWriter{} diff --git a/support/http/mux.go b/support/http/mux.go new file mode 100644 index 0000000000..ca041d3797 --- /dev/null +++ b/support/http/mux.go @@ -0,0 +1,36 @@ +package http + +import ( + "github.com/go-chi/chi" + "github.com/go-chi/chi/middleware" + "github.com/rs/cors" + "github.com/stellar/go/support/log" +) + +// NewMux returns a new server mux configured with the common defaults used across all +// stellar services. +func NewMux(l *log.Entry) *chi.Mux { + mux := chi.NewMux() + + mux.Use(middleware.RequestID) + mux.Use(middleware.Recoverer) + mux.Use(SetLoggerMiddleware(l)) + mux.Use(LoggingMiddleware) + + return mux +} + +// NewAPIMux returns a new server mux configured with the common defaults used for a web API in +// stellar. +func NewAPIMux(l *log.Entry) *chi.Mux { + mux := NewMux(l) + + c := cors.New(cors.Options{ + AllowedOrigins: []string{"*"}, + AllowedHeaders: []string{"*"}, + AllowedMethods: []string{"GET", "PUT", "POST", "PATCH", "DELETE", "HEAD", "OPTIONS"}, + }) + + mux.Use(c.Handler) + return mux +} diff --git a/support/http/strip_trailing_slash_middleware.go b/support/http/strip_trailing_slash_middleware.go new file mode 100644 index 0000000000..5867130836 --- /dev/null +++ b/support/http/strip_trailing_slash_middleware.go @@ -0,0 +1,34 @@ +package http + +import ( + stdhttp "net/http" + "strings" +) + +// StripTrailingSlashMiddleware strips trailing slash. +func StripTrailingSlashMiddleware(ignoredPrefixes ...string) func(next stdhttp.Handler) stdhttp.Handler { + return func(next stdhttp.Handler) stdhttp.Handler { + fn := func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + path := r.URL.Path + + // Do not change ignored prefixes + for _, prefix := range ignoredPrefixes { + if strings.HasPrefix(path, prefix) { + next.ServeHTTP(w, r) + return + } + } + + l := len(path) + + // if the path is longer than 1 char (i.e., not '/') + // and has a trailing slash, remove it. + if l > 1 && path[l-1] == '/' { + r.URL.Path = path[0 : l-1] + } + + next.ServeHTTP(w, r) + } + return stdhttp.HandlerFunc(fn) + } +} diff --git a/support/http/xff_middleware.go b/support/http/xff_middleware.go new file mode 100644 index 0000000000..4e09e7cd43 --- /dev/null +++ b/support/http/xff_middleware.go @@ -0,0 +1,55 @@ +package http + +import ( + stdhttp "net/http" + "strings" +) + +// XFFMiddlewareConfig provides a configuration for XFFMiddleware. +type XFFMiddlewareConfig struct { + BehindCloudflare bool + BehindAWSLoadBalancer bool +} + +// XFFMiddleware is a middleware that replaces http.Request.RemoteAddr with a +// visitor value based on a given config: +// +// * If BehindCloudflare is true CF-Connecting-IP header is used. +// * If BehindAWSLoadBalancer is true the last value of X-Forwarded-For header +// is used. +// * If none of above is set the first value of X-Forwarded-For header is +// used. Note: it's easy to spoof the real IP address if the application is +// not behind a proxy that maintains a X-Forwarded-For header. +// +// Please note that the new RemoteAddr value may not contain the port part! +func XFFMiddleware(config XFFMiddlewareConfig) func(next stdhttp.Handler) stdhttp.Handler { + if config.BehindCloudflare && config.BehindAWSLoadBalancer { + panic("Only one of BehindCloudflare and BehindAWSLoadBalancer options can be selected") + } + + return func(next stdhttp.Handler) stdhttp.Handler { + fn := func(w stdhttp.ResponseWriter, r *stdhttp.Request) { + var newRemoteAddr string + + if config.BehindCloudflare { + newRemoteAddr = r.Header.Get("CF-Connecting-IP") + } else { + ips := strings.Split(r.Header.Get("X-Forwarded-For"), ",") + if len(ips) > 0 { + if config.BehindAWSLoadBalancer { + newRemoteAddr = ips[len(ips)-1] + } else { + newRemoteAddr = ips[0] + } + } + } + + newRemoteAddr = strings.TrimSpace(newRemoteAddr) + if newRemoteAddr != "" { + r.RemoteAddr = newRemoteAddr + } + next.ServeHTTP(w, r) + } + return stdhttp.HandlerFunc(fn) + } +} diff --git a/support/http/xff_middleware_test.go b/support/http/xff_middleware_test.go new file mode 100644 index 0000000000..2c772fe7fd --- /dev/null +++ b/support/http/xff_middleware_test.go @@ -0,0 +1,84 @@ +package http + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type MockHandler struct { + mock.Mock +} + +func (m *MockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + m.Called(w, r) +} + +func TestXFFMiddlewareWrongConfig(t *testing.T) { + assert.Panics(t, func() { + XFFMiddleware(XFFMiddlewareConfig{ + BehindCloudflare: true, + BehindAWSLoadBalancer: true, + }) + }) +} + +func TestXFFMiddlewareCloudFlare(t *testing.T) { + xff := XFFMiddleware(XFFMiddlewareConfig{ + BehindCloudflare: true, + }) + + mockHandler := &MockHandler{} + mockHandler.On("ServeHTTP", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + r := args.Get(1).(*http.Request) + assert.Equal(t, "2.2.2.2", r.RemoteAddr) + }).Once() + handler := xff(mockHandler) + handler.ServeHTTP(nil, &http.Request{ + RemoteAddr: "127.0.0.1", + Header: xffHeaders("CF-Connecting-IP", "2.2.2.2"), + }) +} + +func TestXFFMiddlewareAWS(t *testing.T) { + xff := XFFMiddleware(XFFMiddlewareConfig{ + BehindAWSLoadBalancer: true, + }) + + mockHandler := &MockHandler{} + mockHandler.On("ServeHTTP", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + r := args.Get(1).(*http.Request) + assert.Equal(t, "2.2.2.2", r.RemoteAddr) + }).Once() + handler := xff(mockHandler) + handler.ServeHTTP(nil, &http.Request{ + RemoteAddr: "127.0.0.1", + Header: xffHeaders("X-Forwarded-For", "1.1.1.1,2.2.2.2"), + }) +} + +func TestXFFMiddlewareNormalProxy(t *testing.T) { + xff := XFFMiddleware(XFFMiddlewareConfig{}) + + mockHandler := &MockHandler{} + mockHandler.On("ServeHTTP", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + r := args.Get(1).(*http.Request) + assert.Equal(t, "1.1.1.1", r.RemoteAddr) + }).Once() + handler := xff(mockHandler) + handler.ServeHTTP(nil, &http.Request{ + RemoteAddr: "127.0.0.1", + Header: xffHeaders("X-Forwarded-For", "1.1.1.1,2.2.2.2"), + }) +} + +func xffHeaders(name, value string) http.Header { + headers := http.Header{} + headers.Add(name, value) + return headers +} diff --git a/support/keypairgen/keypairgen.go b/support/keypairgen/keypairgen.go new file mode 100644 index 0000000000..ef55564787 --- /dev/null +++ b/support/keypairgen/keypairgen.go @@ -0,0 +1,35 @@ +package keypairgen + +import "github.com/stellar/go/keypair" + +// Generator generates new keys with the underlying source. The underlying +// source defaults to the RandomSource if not specified. +type Generator struct { + Source Source +} + +func (g *Generator) getSource() Source { + if g == nil || g.Source == nil { + return RandomSource{} + } + return g.Source +} + +// Generate returns a new key using the underlying source. +func (g *Generator) Generate() (*keypair.Full, error) { + return g.getSource().Generate() +} + +// Source provides keys. +type Source interface { + Generate() (*keypair.Full, error) +} + +// RandomSource provides new keys that are randomly generated using the +// keypair.Random function. +type RandomSource struct{} + +// Generated returns a new key using keypair.Random. +func (RandomSource) Generate() (*keypair.Full, error) { + return keypair.Random() +} diff --git a/support/keypairgen/keypairgen_test.go b/support/keypairgen/keypairgen_test.go new file mode 100644 index 0000000000..81d28aa613 --- /dev/null +++ b/support/keypairgen/keypairgen_test.go @@ -0,0 +1,51 @@ +package keypairgen_test + +import ( + "testing" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/keypairgen" + "github.com/stellar/go/support/keypairgen/keypairgentest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGenerator_Generate_sourceNotSet(t *testing.T) { + g := keypairgen.Generator{} + k1, err := g.Generate() + require.NoError(t, err) + t.Log("k1", k1.Address(), k1.Seed()) + k2, err := g.Generate() + require.NoError(t, err) + t.Log("k2", k2.Address(), k2.Seed()) + assert.NotEqual(t, k2.Address(), k1.Address()) +} + +func TestGenerator_Generate_sourceNotSetPtrNil(t *testing.T) { + g := (*keypairgen.Generator)(nil) + k1, err := g.Generate() + require.NoError(t, err) + t.Log("k1", k1.Address(), k1.Seed()) + k2, err := g.Generate() + require.NoError(t, err) + t.Log("k2", k2.Address(), k2.Seed()) + assert.NotEqual(t, k2.Address(), k1.Address()) +} + +func TestGenerator_Generate_sourceSet(t *testing.T) { + s := keypairgentest.SliceSource{ + keypair.MustRandom(), + keypair.MustRandom(), + keypair.MustRandom(), + } + g := keypairgen.Generator{ + Source: &s, + } + k1, err := g.Generate() + require.NoError(t, err) + t.Log("k1", k1.Address(), k1.Seed()) + k2, err := g.Generate() + require.NoError(t, err) + t.Log("k2", k2.Address(), k2.Seed()) + assert.NotEqual(t, k2.Address(), k1.Address()) +} diff --git a/support/keypairgen/keypairgentest/slice.go b/support/keypairgen/keypairgentest/slice.go new file mode 100644 index 0000000000..c2b2cae475 --- /dev/null +++ b/support/keypairgen/keypairgentest/slice.go @@ -0,0 +1,19 @@ +package keypairgentest + +import ( + "github.com/stellar/go/keypair" +) + +// SliceSource is a keypairgen.Generator source that has the values returned +// from a slice of keys that are provided at generation one at a time. +type SliceSource []*keypair.Full + +// Generate returns the first key in the slice, and then shortens the slice +// removing the returned key, so that each call returns the next key in the +// original source. If called when no keys are available the function will +// panic. +func (s *SliceSource) Generate() (*keypair.Full, error) { + kp := (*s)[0] + *s = (*s)[1:] + return kp, nil +} diff --git a/support/keypairgen/keypairgentest/slice_test.go b/support/keypairgen/keypairgentest/slice_test.go new file mode 100644 index 0000000000..fc4005043e --- /dev/null +++ b/support/keypairgen/keypairgentest/slice_test.go @@ -0,0 +1,44 @@ +package keypairgentest_test + +import ( + "testing" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/keypairgen/keypairgentest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestSliceSource_Generate tests that the key returned from the slice source +// is equal to the next key in the slice provided when creating the slice +// source, and each subsequent call returns the key after it and so on. +func TestSliceSource_Generate(t *testing.T) { + kp1 := keypair.MustRandom() + kp2 := keypair.MustRandom() + s := keypairgentest.SliceSource{kp1, kp2} + + gkp1, err := s.Generate() + require.NoError(t, err) + assert.Equal(t, gkp1, kp1) + + gkp2, err := s.Generate() + require.NoError(t, err) + assert.Equal(t, gkp2, kp2) +} + +// TestSliceSource_Generate_noMoreAvailable tests that when Generate is called +// by the slice has been exhausted of values the function panics. +func TestSliceSource_Generate_noMoreAvailable(t *testing.T) { + kp := keypair.MustRandom() + s := keypairgentest.SliceSource{kp} + + _, err := s.Generate() + require.NoError(t, err) + + defer func() { + r := recover() + assert.NotNil(t, r) + assert.EqualError(t, r.(error), "runtime error: index out of range [0] with length 0") + }() + _, _ = s.Generate() +} diff --git a/support/log/entry.go b/support/log/entry.go index 717ffc7875..5cd8b7718d 100644 --- a/support/log/entry.go +++ b/support/log/entry.go @@ -1,14 +1,49 @@ package log import ( + "context" "fmt" + "io" + "io/ioutil" - "github.com/Sirupsen/logrus" + gerr "github.com/go-errors/errors" + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" "github.com/stellar/go/support/errors" ) +// Ctx appends all fields from `e` to the new logger created from `ctx` +// logger and returns it. +func (e *Entry) Ctx(ctx context.Context) *Entry { + if ctx == nil { + return e + } + + found := ctx.Value(&loggerContextKey) + if found == nil { + return e + } + + entry := found.(*Entry) + + // Copy all fields from e to entry + for key, value := range e.entry.Data { + entry = entry.WithField(key, value) + } + + return entry +} + func (e *Entry) SetLevel(level logrus.Level) { - e.Logger.Level = level + e.entry.Logger.SetLevel(level) +} + +func (e *Entry) DisableColors() { + e.entry.Logger.Formatter.(*logrus.TextFormatter).DisableColors = true +} + +func (e *Entry) DisableTimestamp() { + e.entry.Logger.Formatter.(*logrus.TextFormatter).DisableTimestamp = true } // WithField creates a child logger annotated with the provided key value pair. @@ -16,13 +51,27 @@ func (e *Entry) SetLevel(level logrus.Level) { // the return value from this function will cause the emitted log line to // include the provided value. func (e *Entry) WithField(key string, value interface{}) *Entry { - return &Entry{*e.Entry.WithField(key, value)} + return &Entry{ + entry: *e.entry.WithField(key, value), + } } // WithFields creates a child logger annotated with the provided key value // pairs. func (e *Entry) WithFields(fields F) *Entry { - return &Entry{*e.Entry.WithFields(logrus.Fields(fields))} + return &Entry{ + entry: *e.entry.WithFields(logrus.Fields(fields)), + } +} + +// AddHook adds a hook to the logger hooks. +func (e *Entry) AddHook(hook logrus.Hook) { + e.entry.Logger.AddHook(hook) +} + +// SetOutput sets the logger output. +func (e *Entry) SetOutput(output io.Writer) { + e.entry.Logger.SetOutput(output) } // WithStack annotates this error with a stack trace from `stackProvider`, if @@ -31,59 +80,134 @@ func (e *Entry) WithFields(fields F) *Entry { func (e *Entry) WithStack(stackProvider interface{}) *Entry { stack := "unknown" - if stackProvider, ok := stackProvider.(errors.StackTracer); ok { - stack = fmt.Sprint(stackProvider.StackTrace()) + if sp1, ok := stackProvider.(errors.StackTracer); ok { + stack = fmt.Sprint(sp1.StackTrace()) + } else if sp2, ok := stackProvider.(*gerr.Error); ok { + stack = fmt.Sprint(sp2.ErrorStack()) } return e.WithField("stack", stack) } +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (e *Entry) WithError(err error) *Entry { + return &Entry{ + entry: *e.entry.WithError(err), + } +} + +// Add a context to the log entry. +func (e *Entry) WithContext(ctx context.Context) *Entry { + return &Entry{ + entry: *e.entry.WithContext(ctx), + } +} + // Debugf logs a message at the debug severity. func (e *Entry) Debugf(format string, args ...interface{}) { - e.Entry.Debugf(format, args...) + e.entry.Debugf(format, args...) } // Debug logs a message at the debug severity. func (e *Entry) Debug(args ...interface{}) { - e.Entry.Debug(args...) + e.entry.Debug(args...) } // Infof logs a message at the Info severity. func (e *Entry) Infof(format string, args ...interface{}) { - e.Entry.Infof(format, args...) + e.entry.Infof(format, args...) } // Info logs a message at the Info severity. func (e *Entry) Info(args ...interface{}) { - e.Entry.Info(args...) + e.entry.Info(args...) } // Warnf logs a message at the Warn severity. func (e *Entry) Warnf(format string, args ...interface{}) { - e.Entry.Warnf(format, args...) + e.entry.Warnf(format, args...) } // Warn logs a message at the Warn severity. func (e *Entry) Warn(args ...interface{}) { - e.Entry.Warn(args...) + e.entry.Warn(args...) } // Errorf logs a message at the Error severity. func (e *Entry) Errorf(format string, args ...interface{}) { - e.Entry.Errorf(format, args...) + e.entry.Errorf(format, args...) } // Error logs a message at the Error severity. func (e *Entry) Error(args ...interface{}) { - e.Entry.Error(args...) + e.entry.Error(args...) +} + +// Fatalf logs a message at the Fatal severity. +func (e *Entry) Fatalf(format string, args ...interface{}) { + e.entry.Fatalf(format, args...) +} + +// Fatal logs a message at the Fatal severity. +func (e *Entry) Fatal(args ...interface{}) { + e.entry.Fatal(args...) } // Panicf logs a message at the Panic severity. func (e *Entry) Panicf(format string, args ...interface{}) { - e.Entry.Panicf(format, args...) + e.entry.Panicf(format, args...) } // Panic logs a message at the Panic severity. func (e *Entry) Panic(args ...interface{}) { - e.Entry.Panic(args...) + e.entry.Panic(args...) +} + +func (e *Entry) Print(args ...interface{}) { + e.entry.Print(args...) +} + +// StartTest shifts this logger into "test" mode, ensuring that log lines will +// be recorded (rather than outputted). The returned function concludes the +// test, switches the logger back into normal mode and returns a slice of all +// raw logrus entries that were created during the test. +func (e *Entry) StartTest(level logrus.Level) func() []logrus.Entry { + if e.isTesting { + panic("cannot start logger test: already testing") + } + + e.isTesting = true + + hook := &test.Hook{} + e.entry.Logger.AddHook(hook) + + old := e.entry.Logger.Out + e.entry.Logger.Out = ioutil.Discard + + oldLevel := e.entry.Logger.GetLevel() + e.entry.Logger.SetLevel(level) + + return func() []logrus.Entry { + e.entry.Logger.SetLevel(oldLevel) + e.entry.Logger.SetOutput(old) + e.removeHook(hook) + e.isTesting = false + return hook.Entries + } +} + +// removeHook removes a hook, in the most complicated way possible. +func (e *Entry) removeHook(target logrus.Hook) { + for lvl, hooks := range e.entry.Logger.Hooks { + kept := []logrus.Hook{} + + for _, hook := range hooks { + if hook != target { + kept = append(kept, hook) + } + } + + e.entry.Logger.Hooks[lvl] = kept + } } diff --git a/support/log/entry_test.go b/support/log/entry_test.go new file mode 100644 index 0000000000..603281cb61 --- /dev/null +++ b/support/log/entry_test.go @@ -0,0 +1,27 @@ +package log + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntry_StartTest(t *testing.T) { + var out bytes.Buffer + e := New() + e.SetOutput(&out) + + // when in test mode, out gets no output + done := e.StartTest(WarnLevel) + e.Warn("hello") + logged := done() + + assert.Empty(t, out.String()) + if assert.Len(t, logged, 1) { + assert.Equal(t, "hello", logged[0].Message) + } + + e.Warn("goodbye") + assert.Contains(t, out.String(), "goodbye", "output was not logged after test") +} diff --git a/support/log/loggly_hook.go b/support/log/loggly_hook.go index 393fe16e0c..0130123a9e 100644 --- a/support/log/loggly_hook.go +++ b/support/log/loggly_hook.go @@ -4,13 +4,13 @@ import ( "os" "time" - "github.com/Sirupsen/logrus" "github.com/segmentio/go-loggly" + "github.com/sirupsen/logrus" ) // NewLogglyHook creates a new hook -func NewLogglyHook(token string) *LogglyHook { - client := loggly.New(token, "horizon") +func NewLogglyHook(token, tag string) *LogglyHook { + client := loggly.New(token, tag) host, err := os.Hostname() if err != nil { diff --git a/support/log/main.go b/support/log/main.go index 29a89ab0bc..1e9893c34a 100644 --- a/support/log/main.go +++ b/support/log/main.go @@ -1,11 +1,11 @@ package log import ( + "context" "os" - "github.com/Sirupsen/logrus" - "github.com/segmentio/go-loggly" - "golang.org/x/net/context" + loggly "github.com/segmentio/go-loggly" + "github.com/sirupsen/logrus" ) // DefaultLogger represents the default logger that is not bound to any specific @@ -22,7 +22,9 @@ const ( // Entry repre type Entry struct { - logrus.Entry + entry logrus.Entry + + isTesting bool } // F wraps the logrus.Fields type for the convenience of typing less. @@ -37,24 +39,27 @@ type LogglyHook struct { // New creates a new logger, starting at a WARN level and including the current // processes pid as a field. -func New() (result *Entry) { +func New() *Entry { l := logrus.New() l.Level = logrus.WarnLevel - - result = &Entry{*logrus.NewEntry(l).WithField("pid", os.Getpid())} - return + l.Formatter.(*logrus.TextFormatter).FullTimestamp = true + l.Formatter.(*logrus.TextFormatter).TimestampFormat = "2006-01-02T15:04:05.000Z07:00" + return &Entry{entry: *logrus.NewEntry(l).WithField("pid", os.Getpid())} } // Set establishes a new context to which the provided sub-logger is bound func Set(parent context.Context, logger *Entry) context.Context { - return context.WithValue(parent, &contextKey, logger) + return context.WithValue(parent, &loggerContextKey, logger) } // Ctx returns the logger bound to the provided context, otherwise // providing the default logger. func Ctx(ctx context.Context) *Entry { - found := ctx.Value(&contextKey) + if ctx == nil { + return DefaultLogger + } + found := ctx.Value(&loggerContextKey) if found == nil { return DefaultLogger } @@ -130,6 +135,16 @@ func Error(args ...interface{}) { DefaultLogger.Error(args...) } +// Fatalf logs a message at the Fatal severity. +func Fatalf(format string, args ...interface{}) { + DefaultLogger.Fatalf(format, args...) +} + +// Fatal logs a message at the Fatal severity. +func Fatal(args ...interface{}) { + DefaultLogger.Fatal(args...) +} + // Panicf logs a message at the Panic severity. func Panicf(format string, args ...interface{}) { DefaultLogger.Panicf(format, args...) @@ -140,7 +155,9 @@ func Panic(args ...interface{}) { DefaultLogger.Panic(args...) } -var contextKey = 0 +type contextKey string + +var loggerContextKey = contextKey("logger") func init() { DefaultLogger = New() diff --git a/support/log/main_test.go b/support/log/main_test.go index 803b816976..1a429fe9dc 100644 --- a/support/log/main_test.go +++ b/support/log/main_test.go @@ -2,20 +2,20 @@ package log import ( "bytes" + "context" "errors" "testing" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" serr "github.com/stellar/go/support/errors" "github.com/stretchr/testify/assert" - "golang.org/x/net/context" ) func TestSet(t *testing.T) { - assert.Nil(t, context.Background().Value(&contextKey)) + assert.Nil(t, context.Background().Value(&loggerContextKey)) l := New() ctx := Set(context.Background(), l) - assert.Equal(t, l, ctx.Value(&contextKey)) + assert.Equal(t, l, ctx.Value(&loggerContextKey)) } func TestCtx(t *testing.T) { @@ -37,8 +37,8 @@ func TestPushCtx(t *testing.T) { output := new(bytes.Buffer) l := New() - l.Logger.Formatter.(*logrus.TextFormatter).DisableColors = true - l.Logger.Out = output + l.DisableColors() + l.entry.Logger.Out = output ctx := Set(context.Background(), l.WithField("foo", "bar")) Ctx(ctx).Warn("hello") @@ -56,8 +56,8 @@ func TestPushCtx(t *testing.T) { func TestLoggingStatements(t *testing.T) { output := new(bytes.Buffer) l := New() - l.Logger.Formatter.(*logrus.TextFormatter).DisableColors = true - l.Logger.Out = output + l.DisableColors() + l.entry.Logger.Out = output // level defaults to warn l.Debug("debug") @@ -71,7 +71,7 @@ func TestLoggingStatements(t *testing.T) { // when on debug level, all statements are logged output.Reset() assert.Empty(t, output.String()) - l.Logger.Level = logrus.DebugLevel + l.SetLevel(logrus.DebugLevel) l.Debug("1") l.Info("1") l.Warn("1") @@ -89,8 +89,8 @@ func TestLoggingStatements(t *testing.T) { func TestWithStack(t *testing.T) { output := new(bytes.Buffer) l := New() - l.Logger.Formatter.(*logrus.TextFormatter).DisableColors = true - l.Logger.Out = output + l.DisableColors() + l.entry.Logger.Out = output // Adds stack=unknown when the provided err has not stack info l.WithStack(errors.New("broken")).Error("test") diff --git a/support/render/hal/handler.go b/support/render/hal/handler.go new file mode 100644 index 0000000000..2eaa4b1e2c --- /dev/null +++ b/support/render/hal/handler.go @@ -0,0 +1,16 @@ +package hal + +import ( + "context" + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +func Handler(fn, param interface{}) (http.Handler, error) { + return httpjson.Handler(fn, param, httpjson.HALJSON) +} + +func ExecuteFunc(ctx context.Context, fn, param interface{}) (interface{}, bool, error) { + return httpjson.ExecuteFunc(ctx, fn, param, httpjson.HALJSON) +} diff --git a/support/render/hal/io.go b/support/render/hal/io.go new file mode 100644 index 0000000000..b5164b4419 --- /dev/null +++ b/support/render/hal/io.go @@ -0,0 +1,12 @@ +package hal + +import ( + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +// Render write data to w, after marshalling to json +func Render(w http.ResponseWriter, data interface{}) { + httpjson.Render(w, data, httpjson.HALJSON) +} diff --git a/support/render/hal/link.go b/support/render/hal/link.go new file mode 100644 index 0000000000..51f81b4408 --- /dev/null +++ b/support/render/hal/link.go @@ -0,0 +1,20 @@ +package hal + +import ( + "strings" +) + +type Link struct { + Href string `json:"href"` + Templated bool `json:"templated,omitempty"` +} + +func (l *Link) PopulateTemplated() { + l.Templated = strings.Contains(l.Href, "{") +} + +func NewLink(href string) Link { + l := Link{Href: href} + l.PopulateTemplated() + return l +} diff --git a/support/render/hal/link_builder.go b/support/render/hal/link_builder.go new file mode 100644 index 0000000000..dd0b7d0771 --- /dev/null +++ b/support/render/hal/link_builder.go @@ -0,0 +1,69 @@ +package hal + +import ( + "fmt" + "net/url" + "strings" +) + +// StandardPagingOptions is a helper string to make creating paged collection +// URIs simpler. +const StandardPagingOptions = "{?cursor,limit,order}" + +// LinkBuilder is a helper for constructing URLs in horizon. +type LinkBuilder struct { + Base *url.URL +} + +// Link returns a hal.Link whose href is each of the +// provided parts joined by '/' +func (lb *LinkBuilder) Link(parts ...string) Link { + path := strings.Join(parts, "/") + + href := lb.expandLink(path) + + return NewLink(href) +} + +// PagedLink creates a link using the `Link` method and +// appends the common paging options +func (lb *LinkBuilder) PagedLink(parts ...string) Link { + nl := lb.Link(parts...) + nl.Href += StandardPagingOptions + nl.PopulateTemplated() + return nl +} + +// Linkf provides a helper function that returns a link with an +// href created by passing the arguments into fmt.Sprintf +func (lb *LinkBuilder) Linkf(format string, args ...interface{}) Link { + return lb.Link(fmt.Sprintf(format, args...)) +} + +// expandLink takes an href and resolves it against the LinkBuilders base url, +// if set. NOTE: this method panics if the input href cannot be parsed. It is +// meant to be used by developer author ed links, not with external data. +func (lb *LinkBuilder) expandLink(href string) string { + if lb.Base == nil { + return href + } + + u, err := url.Parse(href) + if err != nil { + panic(err) + } + + if u.Host == "" { + u.Host = lb.Base.Host + + if u.Scheme == "" { + u.Scheme = lb.Base.Scheme + } + } + + //HACK: replace the encoded path with the un-encoded path, which preserves + //the uritemplate parameters. + result := strings.Replace(u.String(), u.EscapedPath(), u.Path, -1) + + return result +} diff --git a/support/render/hal/link_builder_test.go b/support/render/hal/link_builder_test.go new file mode 100644 index 0000000000..f2a6779329 --- /dev/null +++ b/support/render/hal/link_builder_test.go @@ -0,0 +1,42 @@ +package hal + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLinkBuilder(t *testing.T) { + // Link expansion test + check := func(href string, base string, expectedResult string) { + lb := LinkBuilder{mustParseURL(base)} + result := lb.expandLink(href) + assert.Equal(t, expectedResult, result) + } + + check("/root", "", "/root") + check("/root", "//stellar.org", "//stellar.org/root") + check("/root", "https://stellar.org", "https://stellar.org/root") + check("//else.org/root", "", "//else.org/root") + check("//else.org/root", "//stellar.org", "//else.org/root") + check("//else.org/root", "https://stellar.org", "//else.org/root") + check("https://else.org/root", "", "https://else.org/root") + check("https://else.org/root", "//stellar.org", "https://else.org/root") + check("https://else.org/root", "https://stellar.org", "https://else.org/root") + + // Regression: ensure that parameters are not escaped + check("/accounts/{id}", "https://stellar.org", "https://stellar.org/accounts/{id}") +} + +func mustParseURL(base string) *url.URL { + if base == "" { + return nil + } + + u, err := url.Parse(base) + if err != nil { + panic(err) + } + return u +} diff --git a/support/render/hal/page.go b/support/render/hal/page.go new file mode 100644 index 0000000000..0907633dec --- /dev/null +++ b/support/render/hal/page.go @@ -0,0 +1,94 @@ +package hal + +import ( + "net/url" + "strconv" + + sUrl "github.com/stellar/go/support/url" +) + +// BasePage represents the simplest page: one with no links and only embedded records. +// Can be used to build custom page-like resources +type BasePage struct { + FullURL *url.URL `json:"-"` + Embedded struct { + Records []Pageable `json:"records"` + } `json:"_embedded"` +} + +// Add appends the provided record onto the page +func (p *BasePage) Add(rec Pageable) { + p.Embedded.Records = append(p.Embedded.Records, rec) +} + +// Init initialized the Records slice. This ensures that an empty page +// renders its records as an empty array, rather than `null` +func (p *BasePage) Init() { + if p.Embedded.Records == nil { + p.Embedded.Records = make([]Pageable, 0, 1) + } +} + +// Links represents the Links in a Page +type Links struct { + Self Link `json:"self"` + Next Link `json:"next"` + Prev Link `json:"prev"` +} + +// Page represents the common page configuration (i.e. has self, next, and prev +// links) and has a helper method `PopulateLinks` to automate their +// initialization. +type Page struct { + Links Links `json:"_links"` + BasePage + Order string `json:"-"` + Limit uint64 `json:"-"` + Cursor string `json:"-"` +} + +// PopulateLinks sets the common links for a page. +func (p *Page) PopulateLinks() { + p.Init() + + rec := p.Embedded.Records + + //verify paging params + var selfUrl sUrl.URL + if p.FullURL != nil { + selfUrl = sUrl.URL(*p.FullURL). + SetParam("cursor", p.Cursor). + SetParam("order", p.Order). + SetParam("limit", strconv.FormatInt(int64(p.Limit), 10)) + } + + //self: re-encode existing query params + p.Links.Self = NewLink(selfUrl.String()) + + //next: update cursor to last record (if any) + nextUrl := selfUrl + if len(rec) > 0 { + nextUrl = nextUrl.SetParam("cursor", rec[len(rec)-1].PagingToken()) + } + p.Links.Next = NewLink(nextUrl.String()) + + //prev: inverse order and update cursor to first record (if any) + prevUrl := selfUrl.SetParam("order", p.InvertedOrder()) + if len(rec) > 0 { + prevUrl = prevUrl.SetParam("cursor", rec[0].PagingToken()) + } + p.Links.Prev = NewLink(prevUrl.String()) +} + +// InvertedOrder returns the inversion of the page's current order. Used to +// populate the prev link +func (p *Page) InvertedOrder() string { + switch p.Order { + case "asc": + return "desc" + case "desc": + return "asc" + default: + return "asc" + } +} diff --git a/support/render/hal/paging_token.go b/support/render/hal/paging_token.go new file mode 100644 index 0000000000..b83fb2c115 --- /dev/null +++ b/support/render/hal/paging_token.go @@ -0,0 +1,6 @@ +package hal + +// Pageable implementors can be added to hal.Page collections +type Pageable interface { + PagingToken() string +} diff --git a/support/render/health/doc.go b/support/render/health/doc.go new file mode 100644 index 0000000000..8a3fef0c13 --- /dev/null +++ b/support/render/health/doc.go @@ -0,0 +1,6 @@ +// Package health contains simple utilities for implementing a /health endpoint +// that adheres to the requirements defined in the draft IETF network working +// group standard, Health Check Response Format for HTTP APIs. +// +// https://tools.ietf.org/id/draft-inadarei-api-health-check-01.html +package health diff --git a/support/render/health/example_test.go b/support/render/health/example_test.go new file mode 100644 index 0000000000..3ff0cc136b --- /dev/null +++ b/support/render/health/example_test.go @@ -0,0 +1,68 @@ +package health_test + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + + supporthttp "github.com/stellar/go/support/http" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/render/health" + "github.com/stellar/go/support/render/httpjson" +) + +func ExampleResponse() { + mux := supporthttp.NewAPIMux(log.DefaultLogger) + + mux.Get("/health", func(w http.ResponseWriter, r *http.Request) { + healthCheckResult := false + response := health.Response{} + if healthCheckResult { + response.Status = health.StatusPass + } else { + response.Status = health.StatusFail + } + httpjson.Render(w, response, httpjson.HEALTHJSON) + }) + + r := httptest.NewRequest("GET", "/health", nil) + w := httptest.NewRecorder() + mux.ServeHTTP(w, r) + resp := w.Result() + + fmt.Println("Content Type:", resp.Header.Get("Content-Type")) + fmt.Println("Status Code:", resp.StatusCode) + body, _ := ioutil.ReadAll(resp.Body) + fmt.Println("Body:", string(body)) + + // Output: + // Content Type: application/health+json; charset=utf-8 + // Status Code: 200 + // Body: { + // "status": "fail" + // } +} + +func ExampleHandler() { + mux := supporthttp.NewAPIMux(log.DefaultLogger) + + mux.Get("/health", health.PassHandler{}.ServeHTTP) + + r := httptest.NewRequest("GET", "/health", nil) + w := httptest.NewRecorder() + mux.ServeHTTP(w, r) + resp := w.Result() + + fmt.Println("Content Type:", resp.Header.Get("Content-Type")) + fmt.Println("Status Code:", resp.StatusCode) + body, _ := ioutil.ReadAll(resp.Body) + fmt.Println("Body:", string(body)) + + // Output: + // Content Type: application/health+json; charset=utf-8 + // Status Code: 200 + // Body: { + // "status": "pass" + // } +} diff --git a/support/render/health/handler.go b/support/render/health/handler.go new file mode 100644 index 0000000000..1b5ff4f82a --- /dev/null +++ b/support/render/health/handler.go @@ -0,0 +1,18 @@ +package health + +import ( + "net/http" + + "github.com/stellar/go/support/render/httpjson" +) + +// PassHandler implements a simple handler that returns the most basic health +// response with a status of 'pass'. +type PassHandler struct{} + +func (h PassHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + response := Response{ + Status: StatusPass, + } + httpjson.Render(w, response, httpjson.HEALTHJSON) +} diff --git a/support/render/health/handler_test.go b/support/render/health/handler_test.go new file mode 100644 index 0000000000..6d2f589ef5 --- /dev/null +++ b/support/render/health/handler_test.go @@ -0,0 +1,28 @@ +package health + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHealth(t *testing.T) { + h := PassHandler{} + + r := httptest.NewRequest("GET", "/", nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + resp := w.Result() + + require.Equal(t, http.StatusOK, resp.StatusCode) + assert.Equal(t, "application/health+json; charset=utf-8", resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + assert.JSONEq(t, `{"status":"pass"}`, string(body)) +} diff --git a/support/render/health/response.go b/support/render/health/response.go new file mode 100644 index 0000000000..d64c4b9d1f --- /dev/null +++ b/support/render/health/response.go @@ -0,0 +1,10 @@ +package health + +// Response implements the most basic required fields for the health response +// based on the format defined in the draft IETF network working group +// standard, Health Check Response Format for HTTP APIs. +// +// https://tools.ietf.org/id/draft-inadarei-api-health-check-01.html +type Response struct { + Status Status `json:"status"` +} diff --git a/support/render/health/status.go b/support/render/health/status.go new file mode 100644 index 0000000000..1f4aac8713 --- /dev/null +++ b/support/render/health/status.go @@ -0,0 +1,11 @@ +package health + +// Status indicates whether the service is health or not. +type Status string + +const ( + // StatusPass indicates that the service is healthy. + StatusPass Status = "pass" + // StatusFail indicates that the service is unhealthy. + StatusFail Status = "fail" +) diff --git a/support/render/httpjson/encoding.go b/support/render/httpjson/encoding.go new file mode 100644 index 0000000000..3a0195368f --- /dev/null +++ b/support/render/httpjson/encoding.go @@ -0,0 +1,86 @@ +package httpjson + +import ( + "encoding/json" + + "github.com/stellar/go/support/errors" +) + +// ErrNotJSONObject is returned when Object.UnmarshalJSON is called +// with bytes not representing a valid json object. +// A valid json object means it starts with `null` or `{`, not `[`. +var ErrNotJSONObject = errors.New("input is not a json object") + +// RawObject can be used directly to make sure that what's in the request body +// is not a json array: +// +// func example(ctx context.Context, in httpjson.RawObject) +// +// It can also be used as a field in a struct: +// +// type example struct { +// name string +// extra httpjson.RawObject +// } +// +// In this case, Unmarshaler will check whether extra is a json object ot not. +// It will error if extra is a json number/string/array/boolean. +// +// RawObject also implements Marshaler so that we would populate an empty json +// object is extra is not set. +type RawObject []byte + +func (o RawObject) MarshalJSON() ([]byte, error) { + if len(o) == 0 { + return []byte("{}"), nil + } + return o, nil +} + +func (o *RawObject) UnmarshalJSON(in []byte) error { + var first byte + for _, c := range in { + if !isSpace(c) { + first = c + break + } + } + // input does not start with 'n' ("null") or '{' + if first != 'n' && first != '{' { + return ErrNotJSONObject + } + + *o = in + return nil +} + +// https://github.com/golang/go/blob/9f193fbe31d7ffa5f6e71a6387cbcf4636306660/src/encoding/json/scanner.go#L160-L162 +func isSpace(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' +} + +// This type is used to tell whether a JSON key is presented with its value +// being a JSON null value or is not presented. +type OptString struct { + Value string + Valid bool + IsSet bool +} + +func (s *OptString) UnmarshalJSON(in []byte) error { + s.IsSet = true + + if string(in) == "null" { + s.Valid = false + return nil + } + + var val string + if err := json.Unmarshal(in, &val); err != nil { + return err + } + + s.Value = val + s.Valid = true + return nil +} diff --git a/support/render/httpjson/encoding_test.go b/support/render/httpjson/encoding_test.go new file mode 100644 index 0000000000..4fb5356dfd --- /dev/null +++ b/support/render/httpjson/encoding_test.go @@ -0,0 +1,99 @@ +package httpjson + +import ( + "bytes" + "encoding/json" + "testing" +) + +func TestRawObjectMarshaler(t *testing.T) { + var in RawObject + got, err := json.Marshal(in) + if err != nil { + t.Fatal(err) + } + + want := []byte("{}") + if !bytes.Equal(got, want) { + t.Errorf("got: %s, want: %s", string(got), string(want)) + } + + var inField struct { + Input RawObject `json:"input"` + } + + got, err = json.Marshal(inField) + if err != nil { + t.Fatal(err) + } + + want = []byte(`{"input":{}}`) + if !bytes.Equal(got, want) { + t.Errorf("got: %s, want: %s", string(got), string(want)) + } +} + +func TestRawObjectUnmarshaler(t *testing.T) { + cases := []struct { + input []byte + wantErr bool + }{ + {[]byte(`{"input":{}}`), false}, // empty object + {[]byte(`{"input":{"key":"value"}}`), false}, // object + {[]byte(`{"input":null}`), false}, // null + {[]byte(`{"input":[]}`), true}, // empty array + {[]byte(`{"input":"json string"}`), true}, // string + {[]byte(`{"input":10}`), true}, // positive number + {[]byte(`{"input":-10}`), true}, // negative number + {[]byte(`{"input":false}`), true}, // boolean + {[]byte(`{"input":true}`), true}, // boolean + } + + for _, tc := range cases { + var out struct { + Input RawObject `json:"input"` + } + + err := json.Unmarshal(tc.input, &out) + if tc.wantErr { + if err != ErrNotJSONObject { + t.Errorf("case %s wanted error but did not", string(tc.input)) + } + continue + } + if err != nil { + t.Errorf("case %s got error %v but shouldn't", string(tc.input), err) + } + } +} + +func TestOptStringUnmarshaler(t *testing.T) { + cases := []struct { + input []byte + isSet bool + valid bool + }{ + {[]byte(`{}`), false, false}, + {[]byte(`{"input":null}`), true, false}, + {[]byte(`{"input":"a string"}`), true, true}, + } + + for _, tc := range cases { + var out struct { + Input OptString `json:"input"` + } + + err := json.Unmarshal(tc.input, &out) + if err != nil { + t.Errorf("case %s got error %v but shouldn't", string(tc.input), err) + continue + } + + if out.Input.IsSet != tc.isSet { + t.Errorf("case %s got IsSet: %t, want: %t ", tc.input, out.Input.IsSet, tc.isSet) + } + if out.Input.Valid != tc.valid { + t.Errorf("case %s got Valid: %t, want: %t ", tc.input, out.Input.Valid, tc.valid) + } + } +} diff --git a/support/render/httpjson/handler.go b/support/render/httpjson/handler.go new file mode 100644 index 0000000000..d31b4d6d9a --- /dev/null +++ b/support/render/httpjson/handler.go @@ -0,0 +1,167 @@ +package httpjson + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "reflect" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/http/httpdecode" + "github.com/stellar/go/support/render/problem" +) + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() +) + +var DefaultResponse = json.RawMessage(`{"message":"ok"}`) + +type handler struct { + fv reflect.Value + inType reflect.Type + inValue reflect.Value + readFromBody bool + cType contentType +} + +// ReqBodyHandler returns an HTTP Handler for function fn. +// If fn has an input type, it will try to decode the request body into the +// function's input type. +// If fn returns a non-nil error, the handler will use problem.Render. +// Please refer to funcParamType for the allowed function signature. +// The caller of this function should probably panic on the returned error, if +// any. +func ReqBodyHandler(fn interface{}, cType contentType) (http.Handler, error) { + fv := reflect.ValueOf(fn) + inType, err := funcParamType(fv) + if err != nil { + return nil, errors.Wrap(err, "parsing function prototype") + } + + return &handler{fv, inType, reflect.Value{}, inType != nil, cType}, nil +} + +// Handler returns an HTTP Handler for function fn. +// If fn returns a non-nil error, the handler will use problem.Render. +// Please refer to funcParamType for the allowed function signature. +// The caller of this function should probably panic on the returned error, if +// any. +func Handler(fn, param interface{}, cType contentType) (http.Handler, error) { + fv := reflect.ValueOf(fn) + inType, err := funcParamType(fv) + if err != nil { + return nil, errors.Wrap(err, "parsing function prototype") + } + + var inValue reflect.Value + if inType != nil { + inValue = reflect.ValueOf(param) + } + + return &handler{fv, inType, inValue, false, cType}, nil +} + +func (h *handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + ctx := req.Context() + res, err := h.executeFunc(ctx, req) + if err != nil { + problem.Render(ctx, w, err) + return + } + + Render(w, res, h.cType) +} + +// executeFunc executes the function provided in the handler together with the +// provided param value, if any, in the handler. +func (h *handler) executeFunc(ctx context.Context, req *http.Request) (interface{}, error) { + var a []reflect.Value + a = append(a, reflect.ValueOf(ctx)) + if h.inType != nil { + if h.readFromBody { + inPtr := reflect.New(h.inType) + err := httpdecode.DecodeJSON(req, inPtr.Interface()) + if err != nil { + return nil, ErrBadRequest + } + a = append(a, inPtr.Elem()) + } else { + a = append(a, h.inValue) + } + } + + var ( + res interface{} + err error + ) + rv := h.fv.Call(a) + switch n := len(rv); { + case n == 0: + res = &DefaultResponse + case n == 1: + if h.fv.Type().Out(0).Implements(errorType) { + res = &DefaultResponse + err, _ = rv[0].Interface().(error) + } else { + res = rv[0].Interface() + } + case n == 2: + res = rv[0].Interface() + err, _ = rv[1].Interface().(error) + } + + return res, err +} + +// ExecuteFunc executes the fn with the param after checking whether the +// function signature is valid or not by calling Handler. +// The first return value is the result that fn returns. +// The second return value is a boolean indicating whether the caller should +// panic on the err or not. If it's true, it means the caller can process the +// error normally; if it's false, it means the caller should probably panic on +// the error. +// The third return value is an error either from Handler() or from fn, if any. +func ExecuteFunc(ctx context.Context, fn, param interface{}, cType contentType) (interface{}, bool, error) { + dontPanic := true + h, err := Handler(fn, param, cType) + if err != nil { + dontPanic = false + return nil, dontPanic, err + } + + res, err := h.(*handler).executeFunc(ctx, nil) + return res, dontPanic, err +} + +// funcParamType checks whether fv is valid. We only accept nonvariadic +// functions with certain signatures. +// The allowed function signature is as following: +// +// func fn(ctx context.Context, an_optional_param) (at_most_two_return_values) +// +// The caller must provide a function with at least 1 input (request context) +// and up to 2 inputs, and up to 2 return values. If there are two return +// values, the second value has to be error type. +func funcParamType(fv reflect.Value) (reflect.Type, error) { + ft := fv.Type() + + if ft.Kind() != reflect.Func || ft.IsVariadic() || ft.NumIn() > 2 || ft.NumIn() == 0 || !ft.In(0).Implements(contextType) { + return nil, fmt.Errorf("%s must be nonvariadic func and has at most one parameter other than context", ft.String()) + } + + var paramType reflect.Type + if ft.NumIn() == 2 { + paramType = ft.In(1) + } + + if n := ft.NumOut(); n == 2 && !ft.Out(1).Implements(errorType) { + return nil, fmt.Errorf("%s: second return value must be an error", ft.String()) + } else if n > 2 { + return nil, fmt.Errorf("%s can have at most two return values", ft.String()) + } + + return paramType, nil +} diff --git a/support/render/httpjson/handler_test.go b/support/render/httpjson/handler_test.go new file mode 100644 index 0000000000..4881f9454d --- /dev/null +++ b/support/render/httpjson/handler_test.go @@ -0,0 +1,146 @@ +package httpjson + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" +) + +func TestHandler(t *testing.T) { + cases := []struct { + input interface{} + output string + f interface{} + cType contentType + wantErr bool + }{ + {`foo`, `"foo"`, func(ctx context.Context, s string) (string, error) { return s, nil }, JSON, false}, + {struct{ Foo int }{1}, `1`, func(ctx context.Context, param struct{ Foo int }) (int, error) { return param.Foo, nil }, HALJSON, false}, + {``, ``, func(ctx context.Context) (int, error) { return 0, errors.New("test") }, JSON, true}, + } + + for _, tc := range cases { + h, err := Handler(tc.f, tc.input, tc.cType) + if err != nil { + t.Errorf("Handler(%v) got err %v", tc.f, err) + continue + } + + resp := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/", nil) + h.ServeHTTP(resp, req.WithContext(context.Background())) + if tc.wantErr { + if resp.Code != 500 { + t.Errorf("%T response code = %d want 200", tc.f, resp.Code) + } + continue + } + + if resp.Code != 200 { + t.Errorf("%T response code = %d want 200", tc.f, resp.Code) + } + + if tc.cType == JSON { + want := "application/json; charset=utf-8" + if ct := resp.Header().Get("Content-Type"); ct != want { + t.Errorf(`Content-Type = %s, want %s`, ct, want) + } + } else if tc.cType == HALJSON { + want := "application/hal+json; charset=utf-8" + if ct := resp.Header().Get("Content-Type"); ct != want { + t.Errorf(`Content-Type = %s, want %s`, ct, want) + } + } + + got := resp.Body.String() + if got != tc.output { + t.Errorf("%T response body = %#q want %#q", tc.f, got, tc.output) + } + } +} + +func TestReqBodyHandler(t *testing.T) { + cases := []struct { + input string + output string + f interface{} + cType contentType + wantErr bool + }{ + {`{"Foo":1}`, `1`, func(ctx context.Context, param struct{ Foo int }) (int, error) { return param.Foo, nil }, JSON, false}, + {``, ``, func(ctx context.Context) (int, error) { return 0, errors.New("test") }, JSON, true}, + } + + for _, tc := range cases { + h, err := ReqBodyHandler(tc.f, tc.cType) + if err != nil { + t.Errorf("Handler(%v) got err %v", tc.f, err) + continue + } + + resp := httptest.NewRecorder() + req, _ := http.NewRequest("POST", "/", strings.NewReader(tc.input)) + h.ServeHTTP(resp, req.WithContext(context.Background())) + if tc.wantErr { + if resp.Code != 500 { + t.Errorf("%T response code = %d want 200", tc.f, resp.Code) + } + continue + } + + if resp.Code != 200 { + t.Errorf("%T response code = %d want 200", tc.f, resp.Code) + } + + if tc.cType == JSON { + want := "application/json; charset=utf-8" + if ct := resp.Header().Get("Content-Type"); ct != want { + t.Errorf(`Content-Type = %s, want %s`, ct, want) + } + } + + got := resp.Body.String() + if got != tc.output { + t.Errorf("%T response body = %#q want %#q", tc.f, got, tc.output) + } + } +} + +func TestFuncParamTypeError(t *testing.T) { + cases := []interface{}{ + 0, // not a function + "a string", // not a function + func() (int, error) { return 0, nil }, // no inputs + func(int) (int, error) { return 0, nil }, // first input is not context + func(context.Context, int, int) (int, error) { return 0, nil }, // too many inputs + func(context.Context, int) (int, int) { return 0, 0 }, // second return value is not an error + func() (int, int, error) { return 0, 0, nil }, // too many return values + } + + for _, tc := range cases { + _, err := funcParamType(reflect.ValueOf(tc)) + if err == nil { + t.Errorf("funcParamType(%T) wants error", tc) + } + } +} + +func TestFuncParamTypeNoError(t *testing.T) { + cases := []interface{}{ + func(context.Context) {}, // no return value + func(context.Context) int { return 0 }, // one non-error type return value + func(context.Context) error { return nil }, // one error type return value + func(context.Context, int) (int, error) { return 0, nil }, // two return values + } + + for _, tc := range cases { + _, err := funcParamType(reflect.ValueOf(tc)) + if err != nil { + t.Errorf("funcParamType(%T) got error %v", tc, err) + } + } +} diff --git a/support/render/httpjson/io.go b/support/render/httpjson/io.go new file mode 100644 index 0000000000..79ad907ead --- /dev/null +++ b/support/render/httpjson/io.go @@ -0,0 +1,56 @@ +package httpjson + +import ( + "encoding/json" + "net/http" + + "github.com/stellar/go/support/errors" +) + +type contentType int + +const ( + JSON contentType = iota + HALJSON + HEALTHJSON +) + +// renderToString renders the provided data as a json string +func renderToString(data interface{}, pretty bool) ([]byte, error) { + if pretty { + return json.MarshalIndent(data, "", " ") + } + + return json.Marshal(data) +} + +// Render write data to w, after marshalling to json. The response header is +// set based on cType. +func Render(w http.ResponseWriter, data interface{}, cType contentType) { + RenderStatus(w, http.StatusOK, data, cType) +} + +// RenderStatus write data to w, after marshalling to json. +// The response header is set based on cType. +// The response status code is set to the statusCode. +func RenderStatus(w http.ResponseWriter, statusCode int, data interface{}, cType contentType) { + js, err := renderToString(data, true) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Disposition", "inline") + switch cType { + case HALJSON: + w.Header().Set("Content-Type", "application/hal+json; charset=utf-8") + case HEALTHJSON: + w.Header().Set("Content-Type", "application/health+json; charset=utf-8") + default: + w.Header().Set("Content-Type", "application/json; charset=utf-8") + } + w.WriteHeader(statusCode) + w.Write(js) +} + +var ErrBadRequest = errors.New("bad request") diff --git a/support/render/httpjson/io_test.go b/support/render/httpjson/io_test.go new file mode 100644 index 0000000000..9a9a284e0f --- /dev/null +++ b/support/render/httpjson/io_test.go @@ -0,0 +1,101 @@ +package httpjson + +import ( + "io/ioutil" + "net/http/httptest" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRender(t *testing.T) { + cases := []struct { + data interface{} + contentType contentType + wantContentType string + wantBody string + }{ + { + data: map[string]interface{}{"key": "value"}, + contentType: JSON, + wantContentType: "application/json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + { + data: map[string]interface{}{"key": "value"}, + contentType: HALJSON, + wantContentType: "application/hal+json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + { + data: map[string]interface{}{"key": "value"}, + contentType: HEALTHJSON, + wantContentType: "application/health+json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + } + + for i, tc := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + w := httptest.NewRecorder() + Render(w, tc.data, tc.contentType) + resp := w.Result() + + assert.Equal(t, 200, resp.StatusCode) + assert.Equal(t, tc.wantContentType, resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, tc.wantBody, string(body)) + }) + } +} + +func TestRenderStatus(t *testing.T) { + cases := []struct { + data interface{} + status int + contentType contentType + wantContentType string + wantBody string + }{ + { + data: map[string]interface{}{"key": "value"}, + status: 200, + contentType: JSON, + wantContentType: "application/json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + { + data: map[string]interface{}{"key": "value"}, + status: 400, + contentType: HALJSON, + wantContentType: "application/hal+json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + { + data: map[string]interface{}{"key": "value"}, + status: 400, + contentType: HEALTHJSON, + wantContentType: "application/health+json; charset=utf-8", + wantBody: `{"key":"value"}`, + }, + } + + for i, tc := range cases { + t.Run(strconv.Itoa(i), func(t *testing.T) { + w := httptest.NewRecorder() + RenderStatus(w, tc.status, tc.data, tc.contentType) + resp := w.Result() + + assert.Equal(t, tc.status, resp.StatusCode) + assert.Equal(t, tc.wantContentType, resp.Header.Get("Content-Type")) + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + assert.JSONEq(t, tc.wantBody, string(body)) + }) + } +} diff --git a/support/render/problem/default.go b/support/render/problem/default.go new file mode 100644 index 0000000000..d4af6ac5a3 --- /dev/null +++ b/support/render/problem/default.go @@ -0,0 +1,70 @@ +package problem + +import ( + "context" + "net/http" + + "github.com/stellar/go/support/log" +) + +// DefaultServiceHost is the default service host used with the default problem +// instance. +var DefaultServiceHost = "https://stellar.org/horizon-errors/" + +// DefaultLogger is the default logger used with the default problem instance. +var DefaultLogger = log.DefaultLogger + +// Default is the problem instance used by the package functions providing a +// global state registry and rendering of problems for an application. For a +// non-global state registry instantiate a new Problem with New. +var Default = New(DefaultServiceHost, DefaultLogger, LogAllErrors) + +// RegisterError records an error -> P mapping, allowing the app to register +// specific errors that may occur in other packages to be rendered as a specific +// P instance. +// +// For example, you might want to render any sql.ErrNoRows errors as a +// problem.NotFound, and you would do so by calling: +// +// problem.RegisterError(sql.ErrNoRows, problem.NotFound) in you application +// initialization sequence +func RegisterError(err error, p P) { + Default.RegisterError(err, p) +} + +// IsKnownError maps an error to a list of known errors +func IsKnownError(err error) error { + return Default.IsKnownError(err) +} + +// SetLogFilter sets log filter of the default Problem +func SetLogFilter(filter LogFilter) { + Default.SetLogFilter(filter) +} + +// UnRegisterErrors removes all registered errors +func UnRegisterErrors() { + Default.UnRegisterErrors() +} + +// RegisterHost registers the service host url. It is used to prepend the host +// url to the error type. If you don't wish to prepend anything to the error +// type, register host as an empty string. +// The default service host points to `https://stellar.org/horizon-errors/`. +func RegisterHost(host string) { + Default.RegisterHost(host) +} + +// RegisterReportFunc registers the report function that you want to use to +// report errors. Once reportFn is initialzied, it will be used to report +// unexpected errors. +func RegisterReportFunc(fn ReportFunc) { + Default.RegisterReportFunc(fn) +} + +// Render writes a http response to `w`, compliant with the "Problem +// Details for HTTP APIs" RFC: +// https://tools.ietf.org/html/draft-ietf-appsawg-http-problem-00 +func Render(ctx context.Context, w http.ResponseWriter, err error) { + Default.Render(ctx, w, err) +} diff --git a/support/render/problem/default_test.go b/support/render/problem/default_test.go new file mode 100644 index 0000000000..f99e90a5a5 --- /dev/null +++ b/support/render/problem/default_test.go @@ -0,0 +1,173 @@ +package problem + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http/httptest" + "strings" + "testing" + + ge "github.com/go-errors/errors" + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" +) + +// TestRender tests the render cases +func TestRender(t *testing.T) { + testCases := []struct { + name string + p P + wantList []string + wantCode int + }{ + { + "server error", + ServerError, + []string{"500"}, + 500, + }, { + "renders the type correctly", + P{Type: "foo", Status: 500}, + []string{"foo"}, + 500, + }, { + "renders the status correctly", + P{Status: 201}, + []string{"201"}, + 201, + }, { + "renders the extras correctly", + P{Extras: map[string]interface{}{"hello": "stellar"}, Status: 500}, + []string{"hello", "stellar"}, + 500, + }, + } + + for _, kase := range testCases { + t.Run(kase.name, func(t *testing.T) { + w := testRender(context.Background(), kase.p) + for _, wantItem := range kase.wantList { + assert.True(t, strings.Contains(w.Body.String(), wantItem), w.Body.String()) + assert.Equal(t, kase.wantCode, w.Code) + } + }) + } +} + +// TestServerErrorConversion tests that we convert errors to ServerError problems and also log the +// stacktrace as unknown for non-rich errors +func TestServerErrorConversion(t *testing.T) { + testCases := []struct { + name string + err error + wantSubstring string + }{ + { + "non-rich errors", + errors.New("broke"), + "stack=unknown", // logs the stacktrace as unknown for non-rich errors + }, { + "rich errors", + ge.New("broke"), + "default_test.go:", + }, + } + + for _, kase := range testCases { + t.Run(kase.name, func(t *testing.T) { + ctx, buf := test.ContextWithLogBuffer() + w := testRender(ctx, kase.err) + logged := buf.String() + + assert.True(t, strings.Contains(w.Body.String(), "server_error"), w.Body.String()) + assert.Equal(t, 500, w.Code) + + // don't expose private error info in the response body + assert.False(t, strings.Contains(w.Body.String(), "broke"), w.Body.String()) + + // log additional information about the error + assert.True( + t, + strings.Contains(logged, kase.wantSubstring), + fmt.Sprintf("expecting substring: '%s' in '%s'", kase.wantSubstring, logged), + ) + }) + } +} + +// TestInflate test errors that come inflated from horizon +func TestInflate(t *testing.T) { + testCase := struct { + name string + p P + want string + }{ + "renders the type correctly", + P{Type: "https://stellar.org/horizon-errors/not_found", Status: 404}, + "https://stellar.org/horizon-errors/not_found", + } + + t.Run(testCase.name, func(t *testing.T) { + w := testRender(context.Background(), testCase.p) + var payload P + err := json.Unmarshal([]byte(w.Body.String()), &payload) + if assert.NoError(t, err) { + assert.Equal(t, testCase.want, payload.Type) + } + }) +} + +func testRender(ctx context.Context, err error) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + Render(ctx, w, err) + return w +} + +func TestRegisterReportFunc(t *testing.T) { + var buf strings.Builder + ctx := context.Background() + + reportFunc := func(ctx context.Context, err error) { + buf.WriteString("captured ") + buf.WriteString(err.Error()) + } + + err := errors.New("an unexpected error") + + w := httptest.NewRecorder() + + // before register the reportFunc + Render(ctx, w, err) + assert.Equal(t, "", buf.String()) + + RegisterReportFunc(reportFunc) + defer RegisterReportFunc(nil) + + // after register the reportFunc + want := "captured an unexpected error" + Render(ctx, w, err) + assert.Equal(t, want, buf.String()) +} + +func TestUnRegisterErrors(t *testing.T) { + RegisterError(context.DeadlineExceeded, ServerError) + err := IsKnownError(context.DeadlineExceeded) + assert.Error(t, err, ServerError.Error()) + + UnRegisterErrors() + + err = IsKnownError(context.DeadlineExceeded) + assert.NoError(t, err) +} + +func TestIsKnownError(t *testing.T) { + RegisterError(context.DeadlineExceeded, ServerError) + defer UnRegisterErrors() + err := IsKnownError(context.DeadlineExceeded) + assert.Error(t, err, ServerError.Error()) + + err = IsKnownError(errors.New("foo")) + assert.NoError(t, err) +} diff --git a/support/render/problem/problem.go b/support/render/problem/problem.go new file mode 100644 index 0000000000..23d61d8d6c --- /dev/null +++ b/support/render/problem/problem.go @@ -0,0 +1,232 @@ +// Package problem provides utility functions for rendering errors as RFC7807 +// compatible responses. +// +// RFC7807: https://tools.ietf.org/html/rfc7807 +// +// The P type is used to define application problems. +// The Render function is used to serialize problems in a HTTP response. +package problem + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/support/log" +) + +var ( + // ServerError is a well-known problem type. Use it as a shortcut. + ServerError = P{ + Type: "server_error", + Title: "Internal Server Error", + Status: http.StatusInternalServerError, + Detail: "An error occurred while processing this request. This is usually due " + + "to a bug within the server software. Trying this request again may " + + "succeed if the bug is transient. Otherwise, please contact the system " + + "administrator.", + } + + // NotFound is a well-known problem type. Use it as a shortcut in your actions + NotFound = P{ + Type: "not_found", + Title: "Resource Missing", + Status: http.StatusNotFound, + Detail: "The resource at the url requested was not found. This usually " + + "occurs for one of two reasons: The url requested is not valid, or no " + + "data in our database could be found with the parameters provided.", + } + + // BadRequest is a well-known problem type. Use it as a shortcut + // in your actions. + BadRequest = P{ + Type: "bad_request", + Title: "Bad Request", + Status: http.StatusBadRequest, + Detail: "The request you sent was invalid in some way.", + } +) + +// P is a struct that represents an error response to be rendered to a connected +// client. +type P struct { + Type string `json:"type"` + Title string `json:"title"` + Status int `json:"status"` + Detail string `json:"detail,omitempty"` + Extras map[string]interface{} `json:"extras,omitempty"` +} + +func (p P) Error() string { + return fmt.Sprintf("problem: %s", p.Type) +} + +// LogFilter describes which errors should be logged when terminating requests in +// Problem.Render() +type LogFilter int + +const ( + _ = iota + // LogNoErrors indicates that the Problem instance should not log any errors + LogNoErrors = LogFilter(iota) + // LogUnknownErrors indicates that the Problem instance should only log errors + // which are not registered + LogUnknownErrors = LogFilter(iota) + // LogAllErrors indicates that the Problem instance should log all errors + LogAllErrors = LogFilter(iota) +) + +// Problem is an instance of the functionality served by the problem package. +type Problem struct { + serviceHost string + log *log.Entry + errToProblemMap map[error]P + reportFn ReportFunc + filter LogFilter +} + +// New returns a new instance of Problem. +func New(serviceHost string, log *log.Entry, filter LogFilter) *Problem { + return &Problem{ + serviceHost: serviceHost, + log: log, + errToProblemMap: map[error]P{}, + filter: filter, + } +} + +// ServiceHost returns the service host the Problem instance is configured with. +func (ps *Problem) ServiceHost() string { + return ps.serviceHost +} + +// RegisterError records an error -> P mapping, allowing the app to register +// specific errors that may occur in other packages to be rendered as a specific +// P instance. +// +// For example, you might want to render any sql.ErrNoRows errors as a +// problem.NotFound, and you would do so by calling: +// +// problem.RegisterError(sql.ErrNoRows, problem.NotFound) in you application +// initialization sequence +func (ps *Problem) RegisterError(err error, p P) { + ps.errToProblemMap[err] = p +} + +// IsKnownError maps an error to a list of known errors +func (ps *Problem) IsKnownError(err error) error { + origErr := errors.Cause(err) + + switch origErr.(type) { + case error: + if err, ok := ps.errToProblemMap[origErr]; ok { + return err + } + return nil + default: + return nil + } +} + +// SetLogFilter sets log filter +func (ps *Problem) SetLogFilter(filter LogFilter) { + ps.filter = filter +} + +// UnRegisterErrors removes all registered errors +func (ps *Problem) UnRegisterErrors() { + ps.errToProblemMap = map[error]P{} +} + +// RegisterHost registers the service host url. It is used to prepend the host +// url to the error type. If you don't wish to prepend anything to the error +// type, register host as an empty string. +func (ps *Problem) RegisterHost(host string) { + ps.serviceHost = host +} + +// ReportFunc is a function type used to report unexpected errors. +type ReportFunc func(context.Context, error) + +// RegisterReportFunc registers the report function that you want to use to +// report errors. Once reportFn is initialzied, it will be used to report +// unexpected errors. +func (ps *Problem) RegisterReportFunc(fn ReportFunc) { + ps.reportFn = fn +} + +// Render writes a http response to `w`, compliant with the "Problem +// Details for HTTP APIs" RFC: https://www.rfc-editor.org/rfc/rfc7807.txt +func (ps *Problem) Render(ctx context.Context, w http.ResponseWriter, err error) { + origErr := errors.Cause(err) + + if ps.filter == LogAllErrors { + ps.log.Ctx(ctx).WithStack(err).WithError(err).Info("request failed due to error") + } + + var problem P + switch p := origErr.(type) { + case P: + problem = p + case *P: + problem = *p + case error: + var ok bool + problem, ok = ps.errToProblemMap[origErr] + + // If this error is not a registered error + // log it and replace it with a 500 error + if !ok { + if ps.filter != LogNoErrors { + ps.log.Ctx(ctx).WithStack(err).Error(err) + } + if ps.reportFn != nil { + ps.reportFn(ctx, err) + } + problem = ServerError + } + } + + ps.renderProblem(ctx, w, problem) +} + +func (ps *Problem) renderProblem(ctx context.Context, w http.ResponseWriter, p P) { + if ps.serviceHost != "" && !strings.HasPrefix(p.Type, ps.serviceHost) { + p.Type = ps.serviceHost + p.Type + } + + w.Header().Set("Content-Type", "application/problem+json; charset=utf-8") + + js, err := json.MarshalIndent(p, "", " ") + if err != nil { + err = errors.Wrap(err, "failed to encode problem") + ps.log.Ctx(ctx).WithStack(err).Error(err) + http.Error(w, "error rendering problem", http.StatusInternalServerError) + return + } + + w.WriteHeader(p.Status) + w.Write(js) +} + +// MakeInvalidFieldProblem is a helper function to make a BadRequest with extras +func MakeInvalidFieldProblem(name string, reason error) *P { + return NewProblemWithInvalidField( + BadRequest, + name, + reason, + ) +} + +// NewProblemWithInvalidField creates a copy of the given problem, setting the +// invalid_field key in extras with the given reason. +func NewProblemWithInvalidField(p P, name string, reason error) *P { + p.Extras = map[string]interface{}{ + "invalid_field": name, + "reason": reason.Error(), + } + return &p +} diff --git a/support/render/problem/problem_test.go b/support/render/problem/problem_test.go new file mode 100644 index 0000000000..2b142b3deb --- /dev/null +++ b/support/render/problem/problem_test.go @@ -0,0 +1,186 @@ +package problem + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http/httptest" + "strings" + "testing" + + ge "github.com/go-errors/errors" + "github.com/stellar/go/support/log" + "github.com/stellar/go/support/test" + "github.com/stretchr/testify/assert" +) + +// TestProblemRender tests the render cases +func TestProblemRender(t *testing.T) { + problem := New("", log.DefaultLogger, LogNoErrors) + + testCases := []struct { + name string + p P + wantList []string + wantCode int + }{ + { + "server error", + ServerError, + []string{"500"}, + 500, + }, { + "renders the type correctly", + P{Type: "foo", Status: 500}, + []string{"foo"}, + 500, + }, { + "renders the status correctly", + P{Status: 201}, + []string{"201"}, + 201, + }, { + "renders the extras correctly", + P{Extras: map[string]interface{}{"hello": "stellar"}, Status: 500}, + []string{"hello", "stellar"}, + 500, + }, + } + + for _, kase := range testCases { + t.Run(kase.name, func(t *testing.T) { + w := testProblemRender(context.Background(), problem, kase.p) + for _, wantItem := range kase.wantList { + assert.True(t, strings.Contains(w.Body.String(), wantItem), w.Body.String()) + assert.Equal(t, kase.wantCode, w.Code) + } + }) + } +} + +// TestProblemServerErrorConversion tests that we convert errors to ServerError problems and also log the +// stacktrace as unknown for non-rich errors +func TestProblemServerErrorConversion(t *testing.T) { + problem := New("", log.DefaultLogger, LogUnknownErrors) + + testCases := []struct { + name string + err error + wantSubstring string + }{ + { + "non-rich errors", + errors.New("broke"), + "stack=unknown", // logs the stacktrace as unknown for non-rich errors + }, { + "rich errors", + ge.New("broke"), + "problem_test.go:", + }, + } + + for _, kase := range testCases { + t.Run(kase.name, func(t *testing.T) { + ctx, buf := test.ContextWithLogBuffer() + w := testProblemRender(ctx, problem, kase.err) + logged := buf.String() + + assert.True(t, strings.Contains(w.Body.String(), "server_error"), w.Body.String()) + assert.Equal(t, 500, w.Code) + + // don't expose private error info in the response body + assert.False(t, strings.Contains(w.Body.String(), "broke"), w.Body.String()) + + // log additional information about the error + assert.True( + t, + strings.Contains(logged, kase.wantSubstring), + fmt.Sprintf("expecting substring: '%s' in '%s'", kase.wantSubstring, logged), + ) + }) + } +} + +// TestProblemInflate test errors that come inflated from horizon +func TestProblemInflate(t *testing.T) { + problem := New("", log.DefaultLogger, LogNoErrors) + + testCase := struct { + name string + p P + want string + }{ + "renders the type correctly", + P{Type: "https://stellar.org/horizon-errors/not_found", Status: 404}, + "https://stellar.org/horizon-errors/not_found", + } + + t.Run(testCase.name, func(t *testing.T) { + w := testProblemRender(context.Background(), problem, testCase.p) + var payload P + err := json.Unmarshal([]byte(w.Body.String()), &payload) + if assert.NoError(t, err) { + assert.Equal(t, testCase.want, payload.Type) + } + }) +} + +func testProblemRender(ctx context.Context, problem *Problem, err error) *httptest.ResponseRecorder { + w := httptest.NewRecorder() + problem.Render(ctx, w, err) + return w +} + +func TestProblemRegisterReportFunc(t *testing.T) { + problem := New("", log.DefaultLogger, LogAllErrors) + + var buf strings.Builder + ctx := context.Background() + + reportFunc := func(ctx context.Context, err error) { + buf.WriteString("captured ") + buf.WriteString(err.Error()) + } + + err := errors.New("an unexpected error") + + w := httptest.NewRecorder() + + // before register the reportFunc + problem.Render(ctx, w, err) + assert.Equal(t, "", buf.String()) + + problem.RegisterReportFunc(reportFunc) + defer problem.RegisterReportFunc(nil) + + // after register the reportFunc + want := "captured an unexpected error" + problem.Render(ctx, w, err) + assert.Equal(t, want, buf.String()) +} + +func TestProblemUnRegisterErrors(t *testing.T) { + problem := New("", log.DefaultLogger, LogNoErrors) + + problem.RegisterError(context.DeadlineExceeded, ServerError) + err := problem.IsKnownError(context.DeadlineExceeded) + assert.Error(t, err, ServerError.Error()) + + problem.UnRegisterErrors() + + err = problem.IsKnownError(context.DeadlineExceeded) + assert.NoError(t, err) +} + +func TestProblemIsKnownError(t *testing.T) { + problem := New("", log.DefaultLogger, LogNoErrors) + + problem.RegisterError(context.DeadlineExceeded, ServerError) + defer problem.UnRegisterErrors() + err := problem.IsKnownError(context.DeadlineExceeded) + assert.Error(t, err, ServerError.Error()) + + err = problem.IsKnownError(errors.New("foo")) + assert.NoError(t, err) +} diff --git a/support/scripts/build_release_artifacts/main.go b/support/scripts/build_release_artifacts/main.go index a4eeb0bf50..f0d4cdd9bb 100644 --- a/support/scripts/build_release_artifacts/main.go +++ b/support/scripts/build_release_artifacts/main.go @@ -1,82 +1,102 @@ package main -// See README.md for a description of this script +// This is a build script that Travis uses to build Stellar release packages. import ( + "flag" "fmt" + "io/ioutil" "os" "os/exec" "path/filepath" "regexp" "strings" - "time" - "github.com/stellar/go/support/errors" "github.com/stellar/go/support/log" ) -var extractBinName = regexp.MustCompile(`^(?P[a-z-]+)-(?P.+)$`) +var extractBinName = regexp.MustCompile(`^(?P[a-z0-9-]+)-(?P.+)$`) -var builds = []struct { - OS string - Arch string -}{ +var builds = []buildConfig{ {"darwin", "amd64"}, {"linux", "amd64"}, {"linux", "arm"}, {"windows", "amd64"}, } +var binFilter = flag.String("bin", "", "restrict build to single binary") +var osFilter = flag.String("os", "", "restrict build to single os") +var archFilter = flag.String("arch", "", "restrict build to single arch") +var keepDir = flag.Bool("keep", false, "when true, artifact directories are not removed after packaging") + +type buildConfig struct { + OS string + Arch string +} + func main() { + flag.Parse() log.SetLevel(log.InfoLevel) - bin, version := extractFromTag(os.Getenv("TRAVIS_TAG")) - pkg := packageName(bin) - run("rm", "-rf", "dist/*") - if bin == "" { - log.Info("could not extract info from TRAVIS_TAG: skipping artifact packaging") + if os.Getenv("TRAVIS_EVENT_TYPE") == "cron" { + buildNightlies() + os.Exit(0) + } else if os.Getenv("CIRCLE_TAG") != "" { + buildByTag() + os.Exit(0) + } else { + buildSnapshots() os.Exit(0) } - for _, cfg := range builds { - name := fmt.Sprintf("%s-%s-%s-%s", bin, version, cfg.OS, cfg.Arch) - dest := filepath.Join("dist", name) + log.Info("nothing to do") +} - // make destination directories - run("mkdir", "-p", dest) - run("cp", "LICENSE-APACHE.txt", dest) - run("cp", "COPYING", dest) - run("cp", filepath.Join(pkg, "README.md"), dest) - run("cp", filepath.Join(pkg, "CHANGELOG.md"), dest) +// package searches the `tools` and `services` packages of this repo to find +// the source directory. This is used within the script to find the README and +// other files that should be packaged with the binary. +func binPkgNames() []string { + result := []string{} + result = append(result, binNamesForDir("services")...) + result = append(result, binNamesForDir("tools")...) + return result +} - // rebuild the binary with the version variable set - build( - fmt.Sprintf("github.com/stellar/go/%s", pkg), - filepath.Join(dest, bin), - version, - cfg.OS, - cfg.Arch, - ) +func binNamesForDir(dir string) []string { + files, err := ioutil.ReadDir(dir) + if err != nil { + panic(errors.Wrap(err, "read-dir failed")) + } - packageArchive(dest, cfg.OS) + result := []string{} + for _, file := range files { + if file.IsDir() && file.Name() != "internal" { + result = append(result, filepath.Join(dir, file.Name())) + } } + + return result } func build(pkg, dest, version, buildOS, buildArch string) { - buildTime := time.Now().Format(time.RFC3339) - - timeFlag := fmt.Sprintf("-X github.com/stellar/go/support/app.buildTime=%s", buildTime) - versionFlag := fmt.Sprintf("-X github.com/stellar/go/support/app.version=%s", version) + // Note: verison string should match other build pipelines to create + // reproducible builds for Horizon (and other projects in the future). + rev := runOutput("git", "rev-parse", "HEAD") + versionString := version[1:] // Remove letter `v` + versionFlag := fmt.Sprintf( + "-X=github.com/stellar/go/support/app.version=%s-%s", + versionString, rev, + ) if buildOS == "windows" { dest = dest + ".exe" } cmd := exec.Command("go", "build", + "-trimpath", "-o", dest, - "-ldflags", fmt.Sprintf("%s %s", timeFlag, versionFlag), pkg, ) cmd.Stderr = os.Stderr @@ -84,6 +104,8 @@ func build(pkg, dest, version, buildOS, buildArch string) { cmd.Env = append( os.Environ(), + "CGO_ENABLED=0", + fmt.Sprintf("GOFLAGS=-ldflags=%s", versionFlag), fmt.Sprintf("GOOS=%s", buildOS), fmt.Sprintf("GOARCH=%s", buildArch), ) @@ -96,24 +118,100 @@ func build(pkg, dest, version, buildOS, buildArch string) { } } -// enableCgo replaces any CGO_ENABLED flags in `env` with CGO_ENABLED=1 -func enableCgo(env []string) (ret []string) { - for _, e := range env { - if !strings.HasPrefix(e, "CGO_ENABLED") { - ret = append(ret, e) +func buildNightlies() { + version := runOutput("git", "describe", "--always", "--dirty", "--tags") + repo := repoName() + + for _, pkg := range binPkgNames() { + bin := filepath.Base(pkg) + + if *binFilter != "" && *binFilter != bin { + continue + } + + for _, cfg := range getBuildConfigs() { + dest := prepareDest(pkg, bin, "nightly", cfg.OS, cfg.Arch) + + build( + fmt.Sprintf("%s/%s", repo, pkg), + filepath.Join(dest, bin), + version, + cfg.OS, + cfg.Arch, + ) + + packageArchive(dest, cfg.OS) + } + } +} + +func buildByTag() { + bin, version := extractFromTag(os.Getenv("CIRCLE_TAG")) + pkg := packageName(bin) + repo := repoName() + + if bin == "" { + log.Info("could not extract info from CIRCLE_TAG: skipping artifact packaging") + os.Exit(0) + } + + // Don't build anything if no package can be found + if pkg == "" { + log.Infof("could not find `%s` in expected binary locations: skipping artifact packaging", bin) + os.Exit(0) + } + + for _, cfg := range getBuildConfigs() { + dest := prepareDest(pkg, bin, version, cfg.OS, cfg.Arch) + + // rebuild the binary with the version variable set + build( + fmt.Sprintf("%s/%s", repo, pkg), + filepath.Join(dest, bin), + version, + cfg.OS, + cfg.Arch, + ) + + packageArchive(dest, cfg.OS) + } +} + +func buildSnapshots() { + rev := runOutput("git", "describe", "--always", "--dirty") + version := fmt.Sprintf("snapshot-%s", rev) + repo := repoName() + + for _, pkg := range binPkgNames() { + bin := filepath.Base(pkg) + + if *binFilter != "" && *binFilter != bin { + continue + } + + for _, cfg := range getBuildConfigs() { + dest := prepareDest(pkg, bin, "snapshot", cfg.OS, cfg.Arch) + + build( + fmt.Sprintf("%s/%s", repo, pkg), + filepath.Join(dest, bin), + version, + cfg.OS, + cfg.Arch, + ) + + packageArchive(dest, cfg.OS) } } - ret = append(ret, "CGO_ENABLED=1") - return } // extractFromTag extracts the name of the binary that should be packaged in the // course of execution this script as well as the version it should be packaged -// as, based on the name of the tag in the TRAVIS_TAG environment variable. +// as, based on the name of the tag in the CIRCLE_TAG environment variable. // Tags must be of the form `NAME-vSEMVER`, such as `horizon-v1.0.0` to be // matched by this function. // -// In the event that the TRAVIS_TAG is missing or the match fails, an empty +// In the event that the CIRCLE_TAG is missing or the match fails, an empty // string will be returned. func extractFromTag(tag string) (string, string) { match := extractBinName.FindStringSubmatch(tag) @@ -124,8 +222,24 @@ func extractFromTag(tag string) (string, string) { return match[1], match[2] } +func getBuildConfigs() (result []buildConfig) { + for _, cfg := range builds { + + if *osFilter != "" && *osFilter != cfg.OS { + continue + } + + if *archFilter != "" && *archFilter != cfg.Arch { + continue + } + + result = append(result, cfg) + } + return +} + // packageArchive tars or zips `dest`, depending upon the OS, then removes -// `dest`, in preparation of travis uploading all artifacts to github releases. +// `dest`, in preparation of Circle uploading all artifacts to github releases. func packageArchive(dest, buildOS string) { release := filepath.Base(dest) dir := filepath.Dir(dest) @@ -140,7 +254,9 @@ func packageArchive(dest, buildOS string) { run("tar", "-czf", dest+".tar.gz", "-C", dir, release) } - run("rm", "-rf", dest) + if !*keepDir { + run("rm", "-rf", dest) + } } // package searches the `tools` and `services` packages of this repo to find @@ -169,7 +285,8 @@ func packageName(binName string) string { } if result != "" { - panic("sourceDir() found multiple results!") + msg := fmt.Sprintf("sourceDir() found multiple results!: binName: %s", binName) + panic(msg) } result = t @@ -178,6 +295,24 @@ func packageName(binName string) string { return result } +func prepareDest(pkg, bin, version, os, arch string) string { + name := fmt.Sprintf("%s-%s-%s-%s", bin, version, os, arch) + dest := filepath.Join("dist", name) + + // make destination directories + run("mkdir", "-p", dest) + run("cp", "LICENSE-APACHE.txt", dest) + run("cp", "COPYING", dest) + run("cp", filepath.Join(pkg, "README.md"), dest) + run("cp", filepath.Join(pkg, "CHANGELOG.md"), dest) + if bin == "horizon" { + // Add default config files for Captive-Core + run("cp", filepath.Join(pkg, "configs/captive-core-pubnet.cfg"), dest) + run("cp", filepath.Join(pkg, "configs/captive-core-testnet.cfg"), dest) + } + return dest +} + // pushdir is a utility function to temporarily change directories. It returns // a func that can be called to restore the current working directory to the // state it was in when first calling pushdir. @@ -200,6 +335,14 @@ func pushdir(dir string) func() { } } +func repoName() string { + if os.Getenv("REPO") != "" { + return os.Getenv("REPO") + } + return "github.com/stellar/go" + +} + // utility command to run the provided command that echoes any output. A failed // command will trigger a panic. func run(name string, args ...string) { @@ -214,3 +357,19 @@ func run(name string, args ...string) { panic(err) } } + +// utility command to run the provided command that returns the output. A +// failed command will trigger a panic. +func runOutput(name string, args ...string) string { + cmd := exec.Command(name, args...) + cmd.Stderr = os.Stderr + + log.Infof("running: %s %s", name, strings.Join(args, " ")) + out, err := cmd.Output() + + if err != nil { + panic(err) + } + + return strings.TrimSpace(string(out)) +} diff --git a/support/strutils/main.go b/support/strutils/main.go new file mode 100644 index 0000000000..5e702ab8ec --- /dev/null +++ b/support/strutils/main.go @@ -0,0 +1,11 @@ +package strutils + +import "strings" + +// KebabToConstantCase converts a string from lower-case-dashes to UPPER_CASE_UNDERSCORES. +func KebabToConstantCase(kebab string) (constantCase string) { + constantCase = strings.ToUpper(kebab) + constantCase = strings.Replace(constantCase, "-", "_", -1) + + return constantCase +} diff --git a/support/strutils/main_test.go b/support/strutils/main_test.go new file mode 100644 index 0000000000..60ec388c0a --- /dev/null +++ b/support/strutils/main_test.go @@ -0,0 +1,12 @@ +package strutils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKebabToConstantCase(t *testing.T) { + assert.Equal(t, "ENABLE_ASSET_STATS", KebabToConstantCase("enable-asset-stats"), "ordinary use") + assert.Equal(t, "ABC_DEF", KebabToConstantCase("ABC_DEF"), "ignores uppercase and underscores") +} diff --git a/support/test/main.go b/support/test/main.go new file mode 100644 index 0000000000..34ca4db772 --- /dev/null +++ b/support/test/main.go @@ -0,0 +1,26 @@ +// Package test contains simple test helpers that should not +// have any service-specific dependencies. +// think constants, custom matchers, generic helpers etc. +package test + +import ( + "bytes" + "context" + + "github.com/sirupsen/logrus" + "github.com/stellar/go/support/log" +) + +// ContextWithLogBuffer returns a context and a buffer into which the new, bound +// logger will write into. This method allows you to inspect what data was +// logged more easily in your tests. +func ContextWithLogBuffer() (context.Context, *bytes.Buffer) { + output := new(bytes.Buffer) + l := log.New() + l.SetOutput(output) + l.DisableColors() + l.SetLevel(logrus.DebugLevel) + + ctx := log.Set(context.Background(), l) + return ctx, output +} diff --git a/support/time/main.go b/support/time/main.go new file mode 100644 index 0000000000..07991866d2 --- /dev/null +++ b/support/time/main.go @@ -0,0 +1,79 @@ +package time + +import ( + "strconv" + goTime "time" +) + +//Millis represents time as milliseconds since epoch without any timezone adjustments +type Millis int64 + +//MillisFromString generates a Millis struct from a string representing an int64 +func MillisFromString(s string) (Millis, error) { + millis, err := strconv.ParseInt(s, 10, 64) + return Millis(int64(millis)), err +} + +//MillisFromInt64 generates a Millis struct from given millis int64 +func MillisFromInt64(millis int64) Millis { + return Millis(millis) +} + +//MillisFromTime generates a Millis struct from given go time +func MillisFromTime(t goTime.Time) Millis { + return Millis(t.UTC().UnixNano() / int64(goTime.Millisecond)) +} + +//MillisFromSeconds generates a Millis struct from given seconds int64 +func MillisFromSeconds(seconds int64) Millis { + return Millis(seconds * 1000) +} + +func (t Millis) increment(millisToAdd int64) Millis { + return Millis(int64(t) + millisToAdd) +} + +//IsNil returns true if the timeMillis has not been initialized to a date other then 0 from epoch +func (t Millis) IsNil() bool { + return t == 0 +} + +//RoundUp returns a new Millis instance with a rounded up to d millis +func (t Millis) RoundUp(d int64) Millis { + if d == 0 { + return t + } + if int64(t)%d != 0 { + return t.RoundDown(d).increment(d) + } + return t +} + +//RoundUp returns a new ToInt64 instance with a down to d millis +func (t Millis) RoundDown(d int64) Millis { + //round down to the nearest d + return Millis(int64(int64(t)/d) * d) +} + +//ToInt64 returns the actual int64 millis since epoch +func (t Millis) ToInt64() int64 { + return int64(t) +} + +//ToTime returns a go time.Time timestamp, UTC adjusted +func (t Millis) ToTime() goTime.Time { + // Milliseconds 1510831636149 + // Nanoseconds 1510831636149000000 + // Unix sec arg 1510831636 + // Unix nsec arg 149000000 + return goTime.Unix(int64(t)/1000, int64(t)%1000*int64(goTime.Millisecond)).UTC() +} + +//Now returns current time in millis +func Now() Millis { + return MillisFromTime(goTime.Now()) +} + +func (t Millis) String() string { + return strconv.FormatInt(t.ToInt64(), 10) +} diff --git a/support/time/main_test.go b/support/time/main_test.go new file mode 100644 index 0000000000..886970d9b0 --- /dev/null +++ b/support/time/main_test.go @@ -0,0 +1,33 @@ +package time + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMillisRoundUp(t *testing.T) { + assert.Equal(t, MillisFromInt64(20), MillisFromInt64(13).RoundUp(10)) + assert.Equal(t, MillisFromInt64(10), MillisFromInt64(10).RoundUp(10)) + assert.Equal(t, MillisFromInt64(15), MillisFromInt64(15).RoundUp(0)) +} + +func TestMillisRoundDown(t *testing.T) { + assert.Equal(t, MillisFromInt64(10), MillisFromInt64(13).RoundDown(10)) + assert.Equal(t, MillisFromInt64(10), MillisFromInt64(10).RoundDown(10)) +} + +func TestMillisParsing(t *testing.T) { + expected := Now() + actual, err := MillisFromString(expected.String()) + assert.NoError(t, err) + assert.Equal(t, actual, expected) +} + +func TestMillisToTime(t *testing.T) { + assert.Equal(t, int64(1510831636149000000), Millis(1510831636149).ToTime().UnixNano()) +} + +func TestMillisFromSeconds(t *testing.T) { + assert.Equal(t, MillisFromInt64(10000), MillisFromSeconds(10)) +} diff --git a/support/url/main.go b/support/url/main.go new file mode 100644 index 0000000000..4fd6adb39b --- /dev/null +++ b/support/url/main.go @@ -0,0 +1,34 @@ +package url + +import ( + gUrl "net/url" +) + +// URL wraps around the native golang URL struct to allow for custom methods +type URL gUrl.URL + +// SetParam returns a new URL with the given param created or modified if already exists +func (u URL) SetParam(key string, val string) URL { + gu := gUrl.URL(u) + q := gu.Query() + q.Del(key) + q.Add(key, val) + gu.RawQuery = q.Encode() + return URL(gu) +} + +// String encodes all URL segments to a fully qualified URL string +func (u URL) String() string { + gu := gUrl.URL(u) + return gu.String() +} + +// Parse decodes a URL string. Returns error if string is not a legal +func Parse(s string) (u URL, err error) { + gu, err := gUrl.Parse(s) + if err != nil { + return + } + u = URL(*gu) + return +} diff --git a/support/url/main_test.go b/support/url/main_test.go new file mode 100644 index 0000000000..d6da734f75 --- /dev/null +++ b/support/url/main_test.go @@ -0,0 +1,27 @@ +package url + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSetUrlParam(t *testing.T) { + testCases := []struct { + input string + key string + val string + expected string + }{ + {"http://localhost/a", "x", "1", "http://localhost/a?x=1"}, + {"http://localhost/a?x=1", "y", "2", "http://localhost/a?x=1&y=2"}, + {"http://localhost/a?x=1", "x", "2", "http://localhost/a?x=2"}, + } + + for _, kase := range testCases { + u, err := Parse(kase.input) + if assert.NoError(t, err) { + actual := u.SetParam(kase.key, kase.val).String() + assert.Equal(t, kase.expected, actual) + } + } +} diff --git a/tools/alb-replay/CHANGELOG.md b/tools/alb-replay/CHANGELOG.md new file mode 100644 index 0000000000..2b5b7a61bc --- /dev/null +++ b/tools/alb-replay/CHANGELOG.md @@ -0,0 +1,3 @@ +## 2021-08-04 + +Initial version diff --git a/tools/alb-replay/README.md b/tools/alb-replay/README.md new file mode 100644 index 0000000000..e767ee7319 --- /dev/null +++ b/tools/alb-replay/README.md @@ -0,0 +1,39 @@ +# ALB log replayer + +Tool that replays the successful GET requests found in an [AWS Application Load Balancer log file](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html). + +## Install + +Compile the `alb-replay` binary: + +```bash +go install ./tools/alb-replay +``` + +## Usage + +``` +Usage of ./alb-replay: + alb-replay + -start-from int + What URL number to start from (default 1) + -timeout duration + HTTP request timeout (default 5s) + -workers int + How many parallel workers to use (default 1) +``` + +## Example + +``` +alb-replay --workers 100 746476062914_elasticloadbalancing_us-east-1_app.stellar002-prd-horizon2.d65c0ca4271aa022_20210628T0000Z_54.208.185.115_567ts68u.log https://horizon.stellar.org +2021/08/04 16:36:13 (4) 506.607706ms https://horizon.stellar.org/ledgers?limit=1&order=desc&c=0.344613637344948 +2021/08/04 16:36:13 (1) 517.0601ms https://horizon.stellar.org/ +2021/08/04 16:36:13 (5) 518.765858ms https://horizon.stellar.org/trades?cursor=155070734820388867-0&X-Client-Name=js-stellar-sdk&X-Client-Version=7.0.0 +2021/08/04 16:36:13 (10) 518.778775ms https://horizon.stellar.org/offers?seller=GDYYFHJ34WSXDSNTPGQMS3NIS6PJR5WPZKSVZPADAR43RKDHZRIU7A5V +2021/08/04 16:36:13 (3) 519.966962ms https://horizon.stellar.org/order_book?selling_asset_type=credit_alphanum12&selling_asset_code=DOGET&selling_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&buying_asset_type=native&X-Client-Name=js-stellar-sdk&X-Client-Version=8.0.0 +2021/08/04 16:36:13 (2) 520.147353ms https://horizon.stellar.org/accounts/GDNXSZSF7HIGVRL2LG6VWXN5PWV3KHI77DQTHLLPKNPLUZFKRRDQJBXP?c=0.020849836853811032 +2021/08/04 16:36:13 (6) 520.645528ms https://horizon.stellar.org/trades?base_asset_type=native&counter_asset_type=credit_alphanum12&counter_asset_code=DOGET&counter_asset_issuer=GDOEVDDBU6OBWKL7VHDAOKD77UP4DKHQYKOKJJT5PR3WRDBTX35HUEUX&limit=50&order=desc&c=0.433731649711667 +2021/08/04 16:36:13 (8) 523.850289ms https://horizon.stellar.org/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum12&buying_asset_code=Falcon9&buying_asset_issuer=GCHG35QMNQ6MOZEIQNHKGABUWOEVF7STLOBYEPQXARI7QAIV6ZVVPNKQ&limit=200&c=0.7329191653797233 +[...] +``` diff --git a/tools/alb-replay/main.go b/tools/alb-replay/main.go new file mode 100644 index 0000000000..692c83429a --- /dev/null +++ b/tools/alb-replay/main.go @@ -0,0 +1,199 @@ +// alb-replay replays the successful GET requests found in an AWS ALB log file +// see https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html +package main + +import ( + "context" + "encoding/csv" + "errors" + "flag" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "os/signal" + "strconv" + "strings" + "sync" + "syscall" + "time" +) + +const ( + albLogEntryCount = 29 + albTargetStatusCodeRecordIndex = 9 + albRequestIndex = 12 +) + +type NumberedURL struct { + Number int + URL string +} + +func isSuccesfulStatusCode(statusCode int) bool { + // consider all 2XX HTTP errors a success + return statusCode/100 == 2 +} + +type ALBLogEntryReader csv.Reader + +func newALBLogEntryReader(input io.Reader) *ALBLogEntryReader { + reader := csv.NewReader(input) + reader.Comma = ' ' + reader.FieldsPerRecord = albLogEntryCount + reader.ReuseRecord = true + return (*ALBLogEntryReader)(reader) +} + +func (r *ALBLogEntryReader) GetRequestURI() (string, error) { + records, err := (*csv.Reader)(r).Read() + if err != nil { + return "", err + } + + statusCodeStr := records[albTargetStatusCodeRecordIndex] + // discard requests with unknown status code + if statusCodeStr == "-" { + return "", nil + } + statusCode, err := strconv.Atoi(statusCodeStr) + if err != nil { + return "", fmt.Errorf("error parsing target status code %q: %v", statusCodeStr, err) + } + + // discard unsuccesful requests + if !isSuccesfulStatusCode(statusCode) { + return "", nil + } + + reqStr := records[albRequestIndex] + reqFields := strings.Split(reqStr, " ") + if len(reqFields) != 3 { + return "", fmt.Errorf("error parsing request %q: 3 fields exepcted, found %d", reqStr, len(reqFields)) + } + method := reqFields[0] + + // discard non-get requests + if method != http.MethodGet { + return "", nil + } + + urlStr := reqFields[1] + parsed, err := url.Parse(urlStr) + if err != nil { + return "", fmt.Errorf("error parsing url %q: %v", urlStr, err) + } + + return parsed.RequestURI(), nil +} + +func parseURLs(ctx context.Context, startFrom int, baseURL string, logReader *ALBLogEntryReader, urlChan chan NumberedURL) { + counter := 0 + for { + uri, err := logReader.GetRequestURI() + if err != nil { + if err == io.EOF { + // we are done + return + } + log.Fatal(err.Error()) + } + if uri == "" { + // no usable URL found in the current log line + continue + } + counter++ + if counter < startFrom { + // we haven't yet reached the expected start point + continue + } + url := NumberedURL{ + Number: counter, + URL: baseURL + uri, + } + select { + case <-ctx.Done(): + return + case urlChan <- url: + } + } +} + +func queryURLs(ctx context.Context, timeout time.Duration, urlChan chan NumberedURL) { + client := http.Client{ + Timeout: timeout, + } + for numURL := range urlChan { + if ctx.Err() != nil { + return + } + + req, err := http.NewRequest(http.MethodGet, numURL.URL, nil) + if err != nil { + log.Printf("(%d) unexpected error creating request: %v", numURL.Number, err) + continue + } + req = req.WithContext(ctx) + start := time.Now() + resp, err := client.Do(req) + if err != nil { + // we don't want to print cancel errors due to a signal interrupt + if errors.Unwrap(err) != context.Canceled { + log.Printf("(%d) unexpected request error: %v %q", numURL.Number, errors.Unwrap(err), numURL.URL) + } + continue + } + resp.Body.Close() + if !isSuccesfulStatusCode(resp.StatusCode) { + log.Printf("(%d) unexpected status code: %d %q", numURL.Number, resp.StatusCode, numURL.URL) + continue + } + log.Printf("(%d) %s %s", numURL.Number, time.Since(start), numURL.URL) + } +} + +func main() { + workers := flag.Int("workers", 1, "How many parallel workers to use") + startFromURLNum := flag.Int("start-from", 1, "What URL number to start from") + timeout := flag.Duration("timeout", time.Second*5, "HTTP request timeout") + flag.Parse() + if *workers < 1 { + log.Fatal("--workers parameter must be > 0") + } + if *startFromURLNum < 1 { + log.Fatal("--start-from must be > 0") + } + if flag.NArg() != 2 { + log.Fatalf("usage: %s ", os.Args[0]) + } + + file, err := os.Open(flag.Args()[0]) + if err != nil { + log.Fatalf("error opening file %q: %v", os.Args[1], err) + } + baseURL := flag.Args()[1] + logReader := newALBLogEntryReader(file) + urlChan := make(chan NumberedURL, *workers) + var wg sync.WaitGroup + + // setup interrupt cleanup code + ctx, stopped := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer stopped() + + // spawn url consumers + for i := 0; i < *workers; i++ { + wg.Add(1) + go func() { + queryURLs(ctx, *timeout, urlChan) + wg.Done() + }() + } + + parseURLs(ctx, *startFromURLNum, baseURL, logReader, urlChan) + // signal the consumers there won't be more urls + close(urlChan) + // wait for to consumers to be done + wg.Wait() +} diff --git a/tools/archive-reader/CHANGELOG.md b/tools/archive-reader/CHANGELOG.md new file mode 100644 index 0000000000..8a802039f7 --- /dev/null +++ b/tools/archive-reader/CHANGELOG.md @@ -0,0 +1 @@ +## Changelog diff --git a/tools/archive-reader/README.md b/tools/archive-reader/README.md new file mode 100644 index 0000000000..ae001ee24c --- /dev/null +++ b/tools/archive-reader/README.md @@ -0,0 +1 @@ +# Archive Reader diff --git a/tools/archive-reader/archive_reader.go b/tools/archive-reader/archive_reader.go new file mode 100644 index 0000000000..06991f9c68 --- /dev/null +++ b/tools/archive-reader/archive_reader.go @@ -0,0 +1,72 @@ +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/ingest" +) + +func main() { + ledgerPtr := flag.Uint64("ledger", 0, "`ledger to analyze` (tip: has to be of the form `ledger = 64*n - 1`, where n is > 0)") + flag.Parse() + var seqNum uint32 = uint32(*ledgerPtr) + + if seqNum == 0 { + flag.Usage() + return + } + + archive, e := archive() + if e != nil { + panic(e) + } + + sr, e := ingest.NewCheckpointChangeReader(context.Background(), archive, seqNum) + if e != nil { + panic(e) + } + + accounts := map[string]bool{} + var i uint64 = 0 + var count uint64 = 0 + for { + le, e := sr.Read() + if e != nil { + panic(e) + } + if e == io.EOF { + log.Printf("total seen %d entries of which %d were accounts", i, count) + return + } + + if ae, valid := le.Post.Data.GetAccount(); valid { + addr := ae.AccountId.Address() + if _, exists := accounts[addr]; exists { + log.Fatalf("error, total seen %d entries of which %d were unique accounts; repeated account: %s", i, count, addr) + } + + accounts[addr] = true + count += 1 + } + i += 1 + + if i%1000 == 0 { + log.Printf("seen %d entries of which %d were accounts", i, count) + } + } +} + +func archive() (*historyarchive.Archive, error) { + return historyarchive.Connect( + fmt.Sprintf("s3://history.stellar.org/prd/core-live/core_live_001/"), + historyarchive.ConnectOptions{ + S3Region: "eu-west-1", + UnsignedRequests: true, + }, + ) +} diff --git a/tools/horizon-cmp/CHANGELOG.md b/tools/horizon-cmp/CHANGELOG.md new file mode 100644 index 0000000000..8c32c333b7 --- /dev/null +++ b/tools/horizon-cmp/CHANGELOG.md @@ -0,0 +1,3 @@ +## 2019-04-25 + +Initial version diff --git a/tools/horizon-cmp/README.md b/tools/horizon-cmp/README.md new file mode 100644 index 0000000000..0d0aee6442 --- /dev/null +++ b/tools/horizon-cmp/README.md @@ -0,0 +1,68 @@ +# Horizon cmp + +Tool that compares the responses of two Horizon servers and shows the diffs. +Useful for checking for regressions. + +## Install + +Compile the `horizon-cmp` binary: + +```bash +go install ./tools/horizon-cmp +``` + +## Usage + +`horizon-cmp` can be run in two modes: + +- Crawling: start with a set of paths (defined in [init_paths.go](https://github.com/stellar/go/blob/master/tools/horizon-cmp/init_paths.go)) and then uses `_links` to find new paths. +- ELB access log: send requests found in a provided ELB access log. + +### Crawling mode + +To run in crawling mode specify a `base` and `test` URL, where `base` is the current version of Horizon and `test` is the version you want to test. + +```bash +horizon-cmp -t https://new-horizon.host.org -b https://horizon.stellar.org +``` + +The paths to be tested can be found in [init_paths.go](https://github.com/stellar/go/blob/master/tools/horizon-cmp/init_paths.go). + +### ELB access log + +To run using an ELB access log, use the flag `-a`. + +```bash +horizon-cmp -t https://new-horizon.host.org -b https://horizon.stellar.org -a ./elb_access.log +``` + +Additionally you can specify which line to start in by using the flag `-s`. + +### History + +You can use the `history` command to compare the history endpoints for a given range of ledgers. + +``` +horizon-cmp history -t https://new-horizon.domain.org -b https://base-horizon.domain.org +``` + +By default this command will check the last 120 ledgers (~10 minutes), but you can specify `--from` and `--to`. + +``` +horizon-cmp history -t https://new-horizon.domain.org -b https://base-horizon.domain.org --count 20 +``` + +or + +``` +horizon-cmp history -t https://new-horizon.domain.org -b https://base-horizon.domain.org --from 10 --to 20 +``` + + +### Request per second + +By default `horizon-cmp` will send 1 request per second, however, you can change this value using the `--rps` flag. The following will run `10` request per second. Please note that sending too many requests to a production server can result in rate limiting of requests. + +```bash +horizon-cmp -t https://new-horizon.host.org -b https://horizon.stellar.org --rps 10 +``` diff --git a/tools/horizon-cmp/history.go b/tools/horizon-cmp/history.go new file mode 100644 index 0000000000..6092b3ca9d --- /dev/null +++ b/tools/horizon-cmp/history.go @@ -0,0 +1,140 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/spf13/cobra" + slog "github.com/stellar/go/support/log" + cmp "github.com/stellar/go/tools/horizon-cmp/internal" +) + +var ( + count uint32 + from uint32 + to uint32 +) + +func init() { + historyCmd.Flags().Uint32Var(&count, "count", 0, "number of last ledgers to check (if from/to not set)") + historyCmd.Flags().Uint32Var(&from, "from", 0, "start of the range") + historyCmd.Flags().Uint32Var(&to, "to", 0, "end of the range") +} + +func runHistoryCmp(cmd *cobra.Command) { + if count == 0 && from == 0 && to == 0 { + // Defaults to checking the last 120 ledgers = ~10 minutes. + count = 120 + } + + if count != 0 && (from != 0 || to != 0) { + log.Error("--count and --from/--to are mutually exclusive") + cmd.Help() + os.Exit(1) + } + + if count != 0 { + ledger := getLatestLedger(horizonBase) + to = uint32(ledger.Sequence) + from = uint32(ledger.Sequence) - count + 1 + } + + // Check this after all calculations to catch possible underflow + if from > to || from == 0 || to == 0 { + log.Error("Invalid --from/--to range") + cmd.Help() + os.Exit(1) + } + + for cur := from; cur <= to; cur++ { + log.Infof("Getting paths for %d...", cur) + paths := getAllPathsForLedger(cur) + checkPaths(paths) + } +} + +func checkPaths(paths []string) { + for _, path := range paths { + a := cmp.NewResponse(horizonBase, path, false) + b := cmp.NewResponse(horizonTest, path, false) + + log = log.WithFields(slog.F{ + "status_code": a.StatusCode, + "size_base": a.Size(), + "size_test": b.Size(), + }) + + if a.Equal(b) { + log.Info(path) + } else { + log.Error("DIFF " + path) + os.Exit(1) + } + } +} + +func getAllPathsForLedger(sequence uint32) []string { + var paths []string + ledgerPath := fmt.Sprintf("/ledgers/%d", sequence) + paths = append(paths, ledgerPath) + paths = append(paths, getAllPagesPaths(ledgerPath+"/transactions?limit=200")...) + paths = append(paths, getAllPagesPaths(ledgerPath+"/operations?limit=200")...) + paths = append(paths, getAllPagesPaths(ledgerPath+"/payments?limit=200")...) + paths = append(paths, getAllPagesPaths(ledgerPath+"/effects?limit=200")...) + return paths +} + +func getAllPagesPaths(page string) []string { + pageBody := struct { + Links struct { + Self struct { + Href string `json:"href"` + } `json:"self"` + Next struct { + Href string `json:"href"` + } `json:"next"` + } `json:"_links"` + Embedded struct { + Records []interface{} `json:"records"` + } `json:"_embedded"` + }{} + + var paths []string + + for { + resp, err := http.Get(horizonBase + page) + if err != nil { + panic(err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + panic(err) + } + err = json.Unmarshal(body, &pageBody) + if err != nil { + panic(err) + } + + // Add current page + page = pageBody.Links.Self.Href + page = strings.Replace(page, horizonBase, "", -1) + paths = append(paths, page) + + // Check next page + page = pageBody.Links.Next.Href + page = strings.Replace(page, horizonBase, "", -1) + + // We always add the last empty page (above) to check if there are + // no extra objects in the Horizon we are testing. + if len(pageBody.Embedded.Records) == 0 { + break + } + } + + return paths +} diff --git a/tools/horizon-cmp/init_paths.go b/tools/horizon-cmp/init_paths.go new file mode 100644 index 0000000000..e4a1e761b0 --- /dev/null +++ b/tools/horizon-cmp/init_paths.go @@ -0,0 +1,50 @@ +package main + +// Starting corpus of paths to test. You may want to extend this with a list of +// paths that you want to ensure are tested. +var initPaths = []string{ + "/transactions?order=desc", + "/transactions?order=desc", + "/transactions?order=desc&include_failed=false", + "/transactions?order=desc&include_failed=true", + + "/operations?order=desc", + "/operations?order=desc&include_failed=false", + "/operations?order=desc&include_failed=true", + + "/operations?join=transactions&order=desc", + "/operations?join=transactions&order=desc&include_failed=false", + "/operations?join=transactions&order=desc&include_failed=true", + + "/payments?order=desc", + "/payments?order=desc&include_failed=false", + "/payments?order=desc&include_failed=true", + + "/payments?join=transactions&order=desc", + "/payments?join=transactions&order=desc&include_failed=false", + "/payments?join=transactions&order=desc&include_failed=true", + + "/ledgers?order=desc", + "/effects?order=desc", + "/trades?order=desc", + + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/transactions?limit=200", + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/transactions?limit=200&include_failed=false", + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/transactions?limit=200&include_failed=true", + + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/operations?limit=200", + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/payments?limit=200", + "/accounts/GAKLCFRTFDXKOEEUSBS23FBSUUVJRMDQHGCHNGGGJZQRK7BCPIMHUC4P/effects?limit=200", + + "/accounts/GC2ZV6KGGFLQIMDVDWBWCP6LTODUDXYBLUPTUZCFHIMDCWHR43ULZITJ/trades?limit=200", + "/accounts/GC2ZV6KGGFLQIMDVDWBWCP6LTODUDXYBLUPTUZCFHIMDCWHR43ULZITJ/offers?limit=200", + + // Pubnet markets + "/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=LTC&buying_asset_issuer=GCSTRLTC73UVXIYPHYTTQUUSDTQU2KQW5VKCE4YCMEHWF44JKDMQAL23", + "/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=XRP&buying_asset_issuer=GCSTRLTC73UVXIYPHYTTQUUSDTQU2KQW5VKCE4YCMEHWF44JKDMQAL23", + "/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=BTC&buying_asset_issuer=GCSTRLTC73UVXIYPHYTTQUUSDTQU2KQW5VKCE4YCMEHWF44JKDMQAL23", + "/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=USD&buying_asset_issuer=GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK", + "/order_book?selling_asset_type=native&buying_asset_type=credit_alphanum4&buying_asset_code=SLT&buying_asset_issuer=GCKA6K5PCQ6PNF5RQBF7PQDJWRHO6UOGFMRLK3DYHDOI244V47XKQ4GP", + + "/trade_aggregations?base_asset_type=native&counter_asset_code=USD&counter_asset_issuer=GBSTRUSD7IRX73RQZBL3RQUH6KS3O4NYFY3QCALDLZD77XMZOPWAVTUK&counter_asset_type=credit_alphanum4&end_time=1551866400000&limit=200&order=desc&resolution=900000&start_time=1514764800", +} diff --git a/tools/horizon-cmp/init_routes.go b/tools/horizon-cmp/init_routes.go new file mode 100644 index 0000000000..c43293f954 --- /dev/null +++ b/tools/horizon-cmp/init_routes.go @@ -0,0 +1,56 @@ +package main + +import ( + cmp "github.com/stellar/go/tools/horizon-cmp/internal" +) + +var routes = cmp.Routes{ + List: []*cmp.Route{ + // The order of the list is important because some regexps can match + // a wider route. + cmp.MakeRoute(`/accounts/*/effects`), + cmp.MakeRoute(`/accounts/*/payments`), + cmp.MakeRoute(`/accounts/*/operations`), + cmp.MakeRoute(`/accounts/*/trades`), + cmp.MakeRoute(`/accounts/*/transactions`), + cmp.MakeRoute(`/accounts/*/offers`), + cmp.MakeRoute(`/accounts/*/data/*`), + cmp.MakeRoute(`/accounts/*`), + cmp.MakeRoute(`/accounts`), + + cmp.MakeRoute(`/ledgers/*/transactions`), + cmp.MakeRoute(`/ledgers/*/operations`), + cmp.MakeRoute(`/ledgers/*/payments`), + cmp.MakeRoute(`/ledgers/*/effects`), + cmp.MakeRoute(`/ledgers/*`), + cmp.MakeRoute(`/ledgers`), + + cmp.MakeRoute(`/operations/*/effects`), + cmp.MakeRoute(`/operations/*`), + cmp.MakeRoute(`/operations`), + + cmp.MakeRoute(`/transactions/*/effects`), + cmp.MakeRoute(`/transactions/*/operations`), + cmp.MakeRoute(`/transactions/*/payments`), + cmp.MakeRoute(`/transactions/*`), + cmp.MakeRoute(`/transactions`), + + cmp.MakeRoute(`/offers/*/trades`), + cmp.MakeRoute(`/offers/*`), + cmp.MakeRoute(`/offers`), + + cmp.MakeRoute(`/payments`), + cmp.MakeRoute(`/effects`), + cmp.MakeRoute(`/trades`), + cmp.MakeRoute(`/trade_aggregations`), + cmp.MakeRoute(`/order_book`), + cmp.MakeRoute(`/assets`), + cmp.MakeRoute(`/fee_stats`), + + cmp.MakeRoute(`/paths/strict-receive`), + cmp.MakeRoute(`/paths/strict-send`), + cmp.MakeRoute(`/paths`), + + cmp.MakeRoute(`/`), + }, +} diff --git a/tools/horizon-cmp/internal/paths.go b/tools/horizon-cmp/internal/paths.go new file mode 100644 index 0000000000..458e6a7df2 --- /dev/null +++ b/tools/horizon-cmp/internal/paths.go @@ -0,0 +1,35 @@ +package cmp + +import ( + "fmt" + "net/url" +) + +type Path struct { + Path string + Level int + Line int + Stream bool +} + +// ID returns a path identifier. It is saved to not repeat the same requests +// over again. +func (p Path) ID() string { + path := removeRandomC(p.Path) + return fmt.Sprintf("%t%s", p.Stream, path) +} + +// removeRandomC removes random `c` param that is part of many requests +// and originates in js-stellar-sdk +func removeRandomC(path string) string { + urlObj, err := url.Parse(path) + if err != nil { + panic(err) + } + + q := urlObj.Query() + q.Del("c") + + urlObj.RawQuery = q.Encode() + return urlObj.String() +} diff --git a/tools/horizon-cmp/internal/response.go b/tools/horizon-cmp/internal/response.go new file mode 100644 index 0000000000..97e9da0a24 --- /dev/null +++ b/tools/horizon-cmp/internal/response.go @@ -0,0 +1,253 @@ +package cmp + +import ( + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "os/exec" + "regexp" + "strings" + "time" +) + +const fileLengthLimit = 100 + +var findResultMetaXDR = regexp.MustCompile(`"result_meta_xdr":[ ]?"([^"]*)",`) + +// removeRegexps contains a list of regular expressions that, when matched, +// will be changed to an empty string. This is done to exclude known +// differences in responses between two Horizon version. +// +// Let's say that next Horizon version adds a new bool field: +// `is_authorized` on account balances list. You want to remove this +// field so it's not reported for each `/accounts/{id}` response. +var removeRegexps = []*regexp.Regexp{ + // regexp.MustCompile(`This (is ){0,1}usually`), + // Removes joined transaction (join=transactions) added in Horizon 0.19.0. + // Remove for future versions. + // regexp.MustCompile(`(?msU)"transaction":\s*{\s*("memo|"_links)[\n\s\S]*][\n\s\S]*}(,\s{9}|,)`), + // regexp.MustCompile(`\s*"is_authorized": true,`), + // regexp.MustCompile(`\s*"is_authorized": false,`), + // regexp.MustCompile(`\s*"successful": true,`), + // regexp.MustCompile(`\s*"transaction_count": [0-9]+,`), + // regexp.MustCompile(`\s*"last_modified_ledger": [0-9]+,`), + // regexp.MustCompile(`\s*"public_key": "G.*",`), + // regexp.MustCompile(`,\s*"paging_token": ?""`), + // Removes last_modified_time field, introduced in horizon 1.3.0 + // regexp.MustCompile(`\s*"last_modified_time": ?"[^"]*",`), +} + +type replace struct { + regexp *regexp.Regexp + repl string +} + +var replaceRegexps = []replace{ + // Offer ID in /offers + // {regexp.MustCompile(`"id":( ?)([0-9]+)`), `"id":${1}"${2}"`}, + // {regexp.MustCompile(`"timestamp":( ?)([0-9]+)`), `"timestamp":${1}"${2}"`}, + // {regexp.MustCompile(`"trade_count":( ?)([0-9]+)`), `"trade_count":${1}"${2}"`}, + // {regexp.MustCompile(`"type":( ?)"manage_offer",`), `"type":${1}"manage_sell_offer",`}, + // {regexp.MustCompile(`"type":( ?)"path_payment",`), `"type":${1}"path_payment_strict_receive",`}, + // {regexp.MustCompile(`"type":( ?)"create_passive_offer",`), `"type":${1}"create_passive_sell_offer",`}, + // {regexp.MustCompile( + // // Removes paging_token from /accounts/* + // `"data":( ?){([^}]*)},\s*"paging_token":( ?)"([0-9A-Z]*)"`), + // `"data":${1}{${2}},"paging_token":${3}""`, + // }, + // {regexp.MustCompile( + // // fee_charged is a string since horizon 1.3.0 + // `"fee_charged":( ?)([\d]+),`), + // `"fee_charged":${1}"${2}",`, + // }, + // {regexp.MustCompile( + // // max_fee is a string since horizon 1.3.0 + // `"max_fee":( ?)([\d]+),`), + // `"max_fee":${1}"${2}",`, + // }, + // // Removes trailing SSE data, fixed in horizon 1.7.0 + // {regexp.MustCompile( + // `\nretry:.*\nevent:.*\ndata:.*\n`), + // ``, + // }, + // // Removes clawback, fixed in horizon 2.1.0 + // {regexp.MustCompile( + // `,\s*"auth_clawback_enabled":\s*false`), + // ``, + // }, +} + +var newAccountDetailsPathWithLastestLedger = regexp.MustCompile(`^/accounts/[A-Z0-9]+/(transactions|operations|payments|effects|trades)/?`) + +type Response struct { + Domain string + Path string + Stream bool + + StatusCode int + LatestLedger string + Body string + // NormalizedBody is body without parts that identify a single + // server (ex. domain) and fields known to be different between + // instances (ex. `result_meta_xdr`). + NormalizedBody string +} + +func NewResponse(domain, path string, stream bool) *Response { + response := &Response{ + Domain: domain, + Path: path, + Stream: stream, + } + + req, err := http.NewRequest("GET", domain+path, nil) + if err != nil { + panic(err) + } + + client := &http.Client{} + + if stream { + req.Header.Add("Accept", "text/event-stream") + // Since requests are made in separate go routines we can + // set timeout to one minute. + client.Timeout = time.Minute + } + + resp, err := client.Do(req) + if err != nil { + response.Body = err.Error() + response.NormalizedBody = err.Error() + return response + } + + response.StatusCode = resp.StatusCode + + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusNotFound && + resp.StatusCode != http.StatusNotAcceptable && + resp.StatusCode != http.StatusBadRequest && + resp.StatusCode != http.StatusGatewayTimeout && + resp.StatusCode != http.StatusGone && + resp.StatusCode != http.StatusServiceUnavailable { + panic(resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + // We ignore the error below to timeout streaming requests. + // net/http: request canceled (Client.Timeout exceeded while reading body) + if err != nil && !stream { + response.Body = err.Error() + response.NormalizedBody = err.Error() + return response + } + + if string(body) == "" { + response.Body = fmt.Sprintf("Empty body [%d]", rand.Uint64()) + } + + response.LatestLedger = resp.Header.Get("Latest-Ledger") + response.Body = string(body) + + normalizedBody := response.Body + // `result_meta_xdr` can differ between core instances (confirmed this with core team) + normalizedBody = findResultMetaXDR.ReplaceAllString(normalizedBody, "") + // Remove Horizon URL from the _links + normalizedBody = strings.Replace(normalizedBody, domain, "", -1) + + for _, reg := range removeRegexps { + normalizedBody = reg.ReplaceAllString(normalizedBody, "") + } + + for _, reg := range replaceRegexps { + normalizedBody = reg.regexp.ReplaceAllString(normalizedBody, reg.repl) + } + + // 1.1.0 - skip Latest-Ledger header in newly incorporated endpoints + if !(newAccountDetailsPathWithLastestLedger.Match([]byte(path)) || + strings.HasPrefix(path, "/ledgers") || + strings.HasPrefix(path, "/transactions") || + strings.HasPrefix(path, "/operations") || + strings.HasPrefix(path, "/payments") || + strings.HasPrefix(path, "/effects") || + strings.HasPrefix(path, "/transactions") || + strings.Contains(path, "/trade")) { + normalizedBody = fmt.Sprintf("Latest-Ledger: %s\n%s", resp.Header.Get("Latest-Ledger"), normalizedBody) + } + response.NormalizedBody = normalizedBody + return response +} + +func (r *Response) Equal(other *Response) bool { + return r.NormalizedBody == other.NormalizedBody +} + +func (r *Response) Size() int { + return len(r.Body) +} + +func (r *Response) SaveDiff(outputDir string, other *Response) { + if r.Path != other.Path { + panic("Paths are different") + } + + fileName := pathToFileName(r.Path, r.Stream) + + if len(fileName) > fileLengthLimit { + fileName = fileName[0:fileLengthLimit] + } + + fileA := fmt.Sprintf("%s/%s.old", outputDir, fileName) + fileB := fmt.Sprintf("%s/%s.new", outputDir, fileName) + fileDiff := fmt.Sprintf("%s/%s.diff", outputDir, fileName) + + // We compare normalized body to see actual differences in the diff instead + // of a lot of domain diffs. + err := ioutil.WriteFile(fileA, []byte(r.Domain+" "+r.Path+"\n\n"+r.NormalizedBody), 0744) + if err != nil { + panic(err) + } + + err = ioutil.WriteFile(fileB, []byte(other.Domain+" "+other.Path+"\n\n"+other.NormalizedBody), 0744) + if err != nil { + panic(err) + } + + // Ignore `err`, user will generate diff manually + out, _ := exec.Command("diff", fileA, fileB).Output() + + if len(out) != 0 { + err = ioutil.WriteFile(fileDiff, out, 0744) + if err != nil { + panic(err) + } + } +} + +// GetPaths finds all URLs in the response body and returns paths +// (without domain). +func (r *Response) GetPaths() []string { + // escapedDomain := strings.Replace(r.Domain, `\`, `\\`, -1) + var linksRegexp = regexp.MustCompile(`"` + r.Domain + `(.*?)["{]`) + found := linksRegexp.FindAllSubmatch([]byte(r.Body), -1) + links := make([]string, 0, len(found)) + + for _, link := range found { + l := strings.Replace(string(link[1]), "\\u0026", "&", -1) + links = append(links, l) + } + + return links +} + +func pathToFileName(path string, stream bool) string { + if stream { + path = "stream_" + path + } + path = strings.Replace(path, "/", "_", -1) + path = strings.Replace(path, "?", "_", -1) + path = strings.Replace(path, "&", "_", -1) + path = strings.Replace(path, "=", "_", -1) + return path +} diff --git a/tools/horizon-cmp/internal/route_counter.go b/tools/horizon-cmp/internal/route_counter.go new file mode 100644 index 0000000000..0b192e0fb5 --- /dev/null +++ b/tools/horizon-cmp/internal/route_counter.go @@ -0,0 +1,66 @@ +package cmp + +import ( + "fmt" + "regexp" + "strings" + "sync" +) + +// MakeRoute translates route with * wildcards into a regexp. It adds start/end +// of line asserts, changes * into any characters except when in used for lists +// (like /accounts/*) - in such case it ensures there is no more `/` characters. +// This is not ideal and requires checking routes in correct order but is enough +// for horizon-cmp. +// More here: https://regex101.com/r/EhBRtS/1 +func MakeRoute(route string) *Route { + name := route + route = "^" + route + route = strings.ReplaceAll(route, "*", "[^?/]+") // everything except `/` and `?` + route = route + "[?/]?[^/]*$" // ? or / or nothing and then everything except `/`` + return &Route{ + name: name, + regexp: regexp.MustCompile(route), + } +} + +type Route struct { + name string + regexp *regexp.Regexp + counter int +} + +type Routes struct { + List []*Route + unmatched []string + mutex sync.Mutex +} + +func (r *Routes) Count(path string) { + for _, route := range r.List { + if route.regexp.Match([]byte(path)) { + r.mutex.Lock() + route.counter++ + r.mutex.Unlock() + return + } + } + + r.mutex.Lock() + r.unmatched = append(r.unmatched, path) + r.mutex.Unlock() +} + +func (r *Routes) Print() { + r.mutex.Lock() + defer r.mutex.Unlock() + fmt.Println("Routes coverage:") + for _, route := range r.List { + fmt.Println(route.counter, route.name) + } + + fmt.Println("Unmatched paths:") + for _, path := range r.unmatched { + fmt.Println(path) + } +} diff --git a/tools/horizon-cmp/internal/route_counter_test.go b/tools/horizon-cmp/internal/route_counter_test.go new file mode 100644 index 0000000000..edd0c9c60e --- /dev/null +++ b/tools/horizon-cmp/internal/route_counter_test.go @@ -0,0 +1,25 @@ +package cmp + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMakeRoute(t *testing.T) { + var testCases = []struct { + Input string + Output string + }{ + {"/accounts/*/payments", "^/accounts/[^?/]+/payments[?/]?[^/]*$"}, + {"/accounts/*", "^/accounts/[^?/]+[?/]?[^/]*$"}, + {"/accounts", "^/accounts[?/]?[^/]*$"}, + } + + for _, tc := range testCases { + t.Run(fmt.Sprintf("%s -> %s", tc.Input, tc.Output), func(t *testing.T) { + assert.Equal(t, tc.Output, MakeRoute(tc.Input).regexp.String()) + }) + } +} diff --git a/tools/horizon-cmp/internal/scanner.go b/tools/horizon-cmp/internal/scanner.go new file mode 100644 index 0000000000..072238cbfa --- /dev/null +++ b/tools/horizon-cmp/internal/scanner.go @@ -0,0 +1,20 @@ +package cmp + +import "bufio" + +type Scanner struct { + *bufio.Scanner + linesRead int +} + +func (s *Scanner) Scan() bool { + ret := s.Scanner.Scan() + if ret { + s.linesRead++ + } + return ret +} + +func (s *Scanner) LinesRead() int { + return s.linesRead +} diff --git a/tools/horizon-cmp/main.go b/tools/horizon-cmp/main.go new file mode 100644 index 0000000000..9b2e10c070 --- /dev/null +++ b/tools/horizon-cmp/main.go @@ -0,0 +1,376 @@ +package main + +import ( + "bufio" + "fmt" + "net/http" + "net/url" + "os" + "os/signal" + "regexp" + "strings" + "sync" + "time" + + "github.com/spf13/cobra" + client "github.com/stellar/go/clients/horizonclient" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/support/errors" + slog "github.com/stellar/go/support/log" + cmp "github.com/stellar/go/tools/horizon-cmp/internal" +) + +// maxLevels defines the maximum number of levels deep the crawler +// should go. Here's an example crawl stack: +// Level 1 = /ledgers?order=desc (finds a link to tx details) +// Level 2 = /transactions/abcdef (finds a link to a list of operations) +// Level 3 = /transactions/abcdef/operations (will not follow any links - at level 3) +const maxLevels = 3 +const pathsQueueCap = 10000 + +const timeFormat = "2006-01-02-15-04-05" + +// pathAccessLog is a regexp that gets path from ELB access log line. Example: +// 2015-05-13T23:39:43.945958Z my-loadbalancer 192.168.131.39:2817 10.0.0.1:80 0.000086 0.001048 0.001337 200 200 0 57 "GET https://www.example.com:443/transactions?order=desc HTTP/1.1" "curl/7.38.0" DHE-RSA-AES128-SHA TLSv1.2 +var pathAccessLog = regexp.MustCompile(`([A-Z]+) https?://[^/]*(/[^ ]*)`) + +var ( + paths = make(chan cmp.Path, pathsQueueCap) + + visitedPathsMutex sync.Mutex + visitedPaths map[string]bool +) + +// CLI params +var ( + horizonBase string + horizonTest string + elbAccessLogFile string + elbAccessLogStartLine int + requestsPerSecond int +) + +var log *slog.Entry + +var rootCmd = &cobra.Command{ + Use: "horizon-cmp", + Short: "horizon-cmp compares two horizon servers' responses", + Run: func(cmd *cobra.Command, args []string) { + run(cmd) + }, +} + +var historyCmd = &cobra.Command{ + Use: "history", + Short: "compares history endpoints for a given range of ledgers", + Run: func(cmd *cobra.Command, args []string) { + runHistoryCmp(cmd) + }, +} + +func init() { + log = slog.New() + log.SetLevel(slog.InfoLevel) + log.DisableTimestamp() + + if cap(paths) < len(initPaths) { + panic("cap(paths) must be higher or equal len(initPaths)") + } + + visitedPaths = make(map[string]bool) + + rootCmd.PersistentFlags().StringVarP(&horizonBase, "base", "b", "", "URL of the base/old version Horizon server") + rootCmd.PersistentFlags().StringVarP(&horizonTest, "test", "t", "", "URL of the test/new version Horizon server") + rootCmd.Flags().StringVarP(&elbAccessLogFile, "elb-access-log-file", "a", "", "ELB access log file to replay") + rootCmd.Flags().IntVarP(&elbAccessLogStartLine, "elb-access-log-start-line", "s", 1, "Start line of ELB access log (useful to continue from a given point)") + rootCmd.Flags().IntVar(&requestsPerSecond, "rps", 1, "Requests per second") + + rootCmd.AddCommand(historyCmd) +} + +func main() { + rootCmd.Execute() +} + +func run(cmd *cobra.Command) { + if horizonBase == "" || horizonTest == "" { + log.Error("--base and --test params are required") + cmd.Help() + os.Exit(1) + } + + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + <-c + routes.Print() + os.Exit(0) + }() + + // Get latest ledger and operate on it's cursor to get responses at a given ledger. + ledger := getLatestLedger(horizonBase) + cursor := ledger.PagingToken() + + var accessLog *cmp.Scanner + if elbAccessLogFile == "" { + for _, p := range initPaths { + paths <- cmp.Path{Path: getPathWithCursor(p, cursor), Level: 0, Stream: false} + paths <- cmp.Path{Path: getPathWithCursor(p, cursor), Level: 0, Stream: true} + } + } else { + file, err := os.Open(elbAccessLogFile) + if err != nil { + panic(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + accessLog = &cmp.Scanner{Scanner: scanner} + // Seek + if elbAccessLogStartLine > 1 { + log.Info("Seeking file...") + } + for i := 1; i < elbAccessLogStartLine; i++ { + accessLog.Scan() + } + // Streams lines to channel from another go routine + go streamFile(accessLog) + } + + pwd, err := os.Getwd() + if err != nil { + panic(err) + } + outputDir := fmt.Sprintf("%s/horizon-cmp-diff/%s", pwd, time.Now().Format(timeFormat)) + + log.WithFields(slog.F{ + "base": horizonBase, + "test": horizonTest, + "access_log": elbAccessLogFile, + "ledger": ledger.Sequence, + "cursor": cursor, + "output_dir": outputDir, + }).Info("Starting...") + + err = os.MkdirAll(outputDir, 0744) + if err != nil { + panic(err) + } + + var wg sync.WaitGroup + for { + pl, more := <-paths + if !more { + break + } + + if pl.Level > maxLevels { + continue + } + + visitedPathsMutex.Lock() + if visitedPaths[pl.ID()] { + visitedPathsMutex.Unlock() + continue + } + visitedPaths[pl.ID()] = true + visitedPathsMutex.Unlock() + + time.Sleep(time.Second / time.Duration(requestsPerSecond)) + wg.Add(1) + go func() { + defer wg.Done() + + var requestWg sync.WaitGroup + requestWg.Add(2) + + var a, b *cmp.Response + go func() { + a = cmp.NewResponse(horizonBase, pl.Path, pl.Stream) + requestWg.Done() + }() + go func() { + b = cmp.NewResponse(horizonTest, pl.Path, pl.Stream) + requestWg.Done() + }() + + requestWg.Wait() + + // Retry when LatestLedger not equal but only if not empty because + // older Horizon versions don't send this header. + if a.LatestLedger != "" && b.LatestLedger != "" && + a.LatestLedger != b.LatestLedger { + visitedPathsMutex.Lock() + visitedPaths[pl.ID()] = false + visitedPathsMutex.Unlock() + paths <- pl + log.Warnf("LatestLedger does not match, retry queued: %s", pl.Path) + return + } + + // Retry when any of responses size equals 0. + if a.Size() == 0 || b.Size() == 0 { + paths <- pl + log.Warnf("One of responses' size equals 0, retry queued: %s", pl.Path) + return + } + + var status string + if a.Equal(b) { + status = "ok" + } else { + status = "diff" + a.SaveDiff(outputDir, b) + } + + localLog := log.WithFields(slog.F{ + "status_code": a.StatusCode, + "size_base": a.Size(), + "size_test": b.Size(), + "stream": pl.Stream, + }) + + if accessLog != nil { + localLog = localLog.WithField("access_log_line", pl.Line) + } + + if status == "diff" { + localLog.Error("DIFF " + pl.Path) + } else { + localLog.Info(pl.Path) + } + + routes.Count(pl.Path) + + // Add new paths (only for non-ELB) + if accessLog == nil { + addPathsFromResponse(a, pl.Level+1) + } + }() + } + + wg.Wait() + + routes.Print() +} + +func getLatestLedger(url string) protocol.Ledger { + horizon := client.Client{ + HorizonURL: url, + HTTP: http.DefaultClient, + } + + ledgers, err := horizon.Ledgers(client.LedgerRequest{ + Order: client.OrderDesc, + Limit: 1, + }) + + if err != nil { + panic(err) + } + + return ledgers.Embedded.Records[0] +} + +func getPathWithCursor(path, cursor string) string { + urlObj, err := url.Parse(path) + if err != nil { + panic(err) + } + + // Add cursor if not present + q := urlObj.Query() + if q.Get("cursor") == "" { + q.Set("cursor", cursor) + } + + urlObj.RawQuery = q.Encode() + return urlObj.String() +} + +func getPathFromAccessLog(line string) (string, error) { + matches := pathAccessLog.FindStringSubmatch(line) + if len(matches) != 3 { + return "", errors.Errorf("Can't find match: %s", line) + } + + if matches[1] != "GET" { + return "", nil + } + + // Remove duplicate / + path := "/" + strings.TrimLeft(matches[2], "/") + return path, nil +} + +func streamFile(accessLog *cmp.Scanner) { + for accessLog.Scan() { + p := accessLog.Text() + path, err := getPathFromAccessLog(p) + if err != nil { + log.Error(err) + continue + } + + if path == "" { + continue + } + + paths <- cmp.Path{Path: path, Level: 0, Line: accessLog.LinesRead(), Stream: false} + paths <- cmp.Path{Path: path, Level: 0, Line: accessLog.LinesRead(), Stream: true} + } + + if err := accessLog.Err(); err != nil { + panic("Invalid input: " + err.Error()) + } + + close(paths) +} + +func addPathsFromResponse(a *cmp.Response, level int) { + newPaths := a.GetPaths() + for _, newPath := range newPaths { + // For all indexes with chronological sort ignore order=asc + // without cursor. There will always be a diff if Horizon started + // at a different ledger. + if strings.Contains(newPath, "/ledgers") || + strings.Contains(newPath, "/transactions") || + strings.Contains(newPath, "/operations") || + strings.Contains(newPath, "/payments") || + strings.Contains(newPath, "/effects") || + strings.Contains(newPath, "/trades") { + u, err := url.Parse(newPath) + if err != nil { + panic(err) + } + + if u.Query().Get("cursor") == "" && + (u.Query().Get("order") == "" || u.Query().Get("order") == "asc") { + continue + } + } + + if (strings.Contains(newPath, "/transactions") || + strings.Contains(newPath, "/operations") || + strings.Contains(newPath, "/payments")) && !strings.Contains(newPath, "include_failed") { + prefix := "?" + if strings.Contains(newPath, "?") { + prefix = "&" + } + + paths <- cmp.Path{newPath + prefix + "include_failed=false", level, 0, false} + paths <- cmp.Path{newPath + prefix + "include_failed=false", level, 0, true} + + paths <- cmp.Path{newPath + prefix + "include_failed=true", level, 0, false} + paths <- cmp.Path{newPath + prefix + "include_failed=true", level, 0, true} + continue + } + + paths <- cmp.Path{newPath, level, 0, false} + paths <- cmp.Path{newPath, level, 0, true} + } + + if len(paths) == 0 { + close(paths) + } +} diff --git a/tools/horizon-verify/CHANGELOG.md b/tools/horizon-verify/CHANGELOG.md new file mode 100644 index 0000000000..8c32c333b7 --- /dev/null +++ b/tools/horizon-verify/CHANGELOG.md @@ -0,0 +1,3 @@ +## 2019-04-25 + +Initial version diff --git a/tools/horizon-verify/README.md b/tools/horizon-verify/README.md new file mode 100644 index 0000000000..a04bad962e --- /dev/null +++ b/tools/horizon-verify/README.md @@ -0,0 +1,9 @@ +# Horizon verify + +Tool that checks some invariants about Horizon responses: + +- successful response codes when getting transactions from ledgers +- successful transaction counts are correct +- failed transaction counts are correct + +TODO: add more info diff --git a/tools/horizon-verify/main.go b/tools/horizon-verify/main.go new file mode 100644 index 0000000000..b958a8c71a --- /dev/null +++ b/tools/horizon-verify/main.go @@ -0,0 +1,167 @@ +package main + +import ( + "fmt" + "net/http" + "os" + "sync" + + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + protocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/xdr" +) + +var horizonURL string +var startSequence uint32 +var count uint + +func init() { + rootCmd.PersistentFlags().StringVarP(&horizonURL, "url", "u", "", "Horizon server URL") + rootCmd.PersistentFlags().Uint32VarP(&startSequence, "start", "s", 0, "Sequence number of the ledger to start with (follows descending order, defaults to the latest ledger)") + rootCmd.PersistentFlags().UintVarP(&count, "count", "c", 10000, "Number of ledgers to check") +} + +var rootCmd = &cobra.Command{ + Use: "horizon-verify", + Short: "tool to check horizon data consistency", + Run: func(cmd *cobra.Command, args []string) { + if horizonURL == "" { + cmd.Help() + return + } + + client := horizonclient.Client{ + HorizonURL: horizonURL, + HTTP: http.DefaultClient, + } + + ledgerCursor := "" + + if startSequence != 0 { + startSequence++ + + ledger, err := client.LedgerDetail(startSequence) + if err != nil { + panic(err) + } + + ledgerCursor = ledger.PagingToken() + } + + fmt.Printf("%s: Checking %d ledgers starting from cursor \"%s\"\n\n", horizonURL, count, ledgerCursor) + + for { + ledgersPage, err := client.Ledgers(horizonclient.LedgerRequest{ + Limit: 200, + Order: horizonclient.OrderDesc, + Cursor: ledgerCursor, + }) + + if err != nil { + panic(err) + } + + if len(ledgersPage.Embedded.Records) == 0 { + fmt.Println("Done") + return + } + + for _, ledger := range ledgersPage.Embedded.Records { + fmt.Printf("Checking ledger: %d (successful=%d failed=%d)\n", ledger.Sequence, ledger.SuccessfulTransactionCount, *ledger.FailedTransactionCount) + + ledgerCursor = ledger.PagingToken() + + transactionsPage, err := client.Transactions(horizonclient.TransactionRequest{ + ForLedger: uint(ledger.Sequence), + Limit: 200, + IncludeFailed: true, + }) + + if err != nil { + panic(err) + } + + var ( + wg sync.WaitGroup + successful, failed int32 + ) + + for _, transaction := range transactionsPage.Embedded.Records { + wg.Add(1) + + if transaction.Successful { + successful++ + } else { + failed++ + } + + go func(transaction protocol.Transaction) { + defer wg.Done() + + var resultXDR xdr.TransactionResult + err = xdr.SafeUnmarshalBase64(transaction.ResultXdr, &resultXDR) + if err != nil { + panic(err) + } + + if (transaction.Successful && !resultXDR.Successful()) || + (!transaction.Successful && resultXDR.Successful()) { + panic(fmt.Sprintf("Corrupted data! %s %s", transaction.Hash, transaction.ResultXdr)) + } + + operationsPage, err := client.Operations(horizonclient.OperationRequest{ + ForTransaction: transaction.Hash, + Limit: 200, + }) + + if err != nil { + panic(err) + } + + if len(operationsPage.Embedded.Records) != int(transaction.OperationCount) { + panic(fmt.Sprintf("Corrupted data! %s operations count %d vs %d", transaction.Hash, len(operationsPage.Embedded.Records), transaction.OperationCount)) + } + }(transaction) + } + + wg.Wait() + + if successful != ledger.SuccessfulTransactionCount || failed != *ledger.FailedTransactionCount { + panic(fmt.Sprintf("Invalid ledger counters %d", ledger.Sequence)) + } + + count-- + if count == 0 { + fmt.Println("Done") + return + } + } + } + }, +} + +func main() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +// func getBody(url string) []byte { +// resp, err := http.Get(url) +// if err != nil { +// panic(err) +// } + +// if resp.StatusCode != http.StatusOK { +// panic(fmt.Sprintf("%d response for %s", resp.StatusCode, url)) +// } + +// body, err := ioutil.ReadAll(resp.Body) +// if err != nil { +// panic(err) +// } + +// return body +// } diff --git a/tools/stellar-archivist/CHANGELOG.md b/tools/stellar-archivist/CHANGELOG.md index 10add533ea..6df05e2539 100644 --- a/tools/stellar-archivist/CHANGELOG.md +++ b/tools/stellar-archivist/CHANGELOG.md @@ -6,6 +6,13 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). As this project is pre 1.0, breaking changes may happen for minor version bumps. A breaking change will get clearly notified in this log. +## ??? + +* Fix race condition in `mirror` command +* Dropped support for Go 1.10, 1.11, 1.12. +* Add `log` command +* Add `--recent` flag for `mirror` command + ## [v0.1.0] - 2016-08-17 Initial release after import from https://github.com/stellar/archivist diff --git a/tools/stellar-archivist/README.md b/tools/stellar-archivist/README.md index b0e3fc4c85..7b8086f5d3 100644 --- a/tools/stellar-archivist/README.md +++ b/tools/stellar-archivist/README.md @@ -42,7 +42,9 @@ Flags: --last int number of recent ledgers to act on (default -1) --low int first ledger to act on --profile collect and serve profile locally + -r, --recent act on ledger-range difference between achives --s3region string S3 region to connect to (default "us-east-1") + --s3endpoint string S3 endpoint (default to AWS endpoint for selected region) --thorough decode and re-encode all buckets --verify verify file contents @@ -67,6 +69,29 @@ archive; the advantage is that more operations are supported, and the tool can s archives much more quickly. This is necessary to handle bulk operations on archives with many thousands of files efficiently. +### S3 backend + +`stellar-archivist` supports reading from and writing to any S3-compatible storage. + +The following options are specific to S3 backend: + + - `--s3region string` β€” AWS S3 region to connect to (default "us-east-1") + - `--s3endpoint string` β€” S3-compatible endpoint (default to AWS S3 endpoint for selected region) + +For example, to check the current status of an archive in DigitalOcean Spaces (ams3 region): + +``` +$ stellar-archivist status --s3endpoint ams3.digitaloceanspaces.com s3://bucketname/prefix +``` + +In order to use this backend with Google Cloud Storage, you need to enable interoperability access in +the [Cloud Storage Settings](https://console.cloud.google.com/storage/settings) and generate interoperable +storage access keys. + +``` +$ export AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= +$ stellar-archivist status --s3endpoint https://storage.googleapis.com s3://google-storage-bucketname +``` ## Examples of use @@ -97,9 +122,23 @@ $ stellar-archivist mirror http://s3-eu-west-1.amazonaws.com/history.stellar.org ``` -### Incremental update to a mirror +### Incremental update to a mirror with --recent +``` +$ stellar-archivist mirror --recent http://history.stellar.org/prd/core-live/core_live_001 file://local-archive + +2019/08/21 13:53:15 mirroring http://history.stellar.org/prd/core-live/core_live_001 -> file://local-archive +2019/08/21 13:53:15 copying range [0x01843cbf, 0x01843d7f] +2019/08/21 13:53:19 skipping existing bucket/18/43/cc/bucket-1843cce32e1c4d6d0765858c9464a7435a6f46c25c8ab164a0d9a11b3da5098b.xdr.gz +2019/08/21 13:53:20 skipping existing bucket/ff/08/f4/bucket-ff08f47f9d90fdaed40c1d6f0fdb1ea20344cdfac18849690d52aab7125c303c.xdr.gz +... +2019/08/21 13:53:20 skipping existing bucket/1e/1e/97/bucket-1e1e97031a1829fe3dff1875abed509103f5fc0569eef0718e02392d411b9c0a.xdr.gz +2019/08/21 13:53:20 skipping existing bucket/36/e1/0c/bucket-36e10c7087567c3dd230e615851771062acc71cb3e8e7bc08605b1829f49f53c.xdr.gz +2019/08/21 13:53:22 copied 3 checkpoints, 49 buckets, range [0x01843cbf, 0x01843d7f] +``` + +### Incremental update to a mirror with --last N ``` -$ stellar-archivist --last 1024 mirror http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-testnet/core_testnet_001 file://local-archive +$ stellar-archivist --last 1024 mirror http://history.stellar.org/prd/core-testnet/core_testnet_001 file://local-archive 2016/02/10 19:14:01 mirroring http://s3-eu-west-1.amazonaws.com/history.stellar.org/prd/core-testnet/core_testnet_001 -> file://local-archive 2016/02/10 19:14:02 copying range [0x0025b23f, 0x0025b6bf] diff --git a/tools/stellar-archivist/internal/archive.go b/tools/stellar-archivist/internal/archive.go deleted file mode 100644 index f4dcae9bf3..0000000000 --- a/tools/stellar-archivist/internal/archive.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "io" - "io/ioutil" - "path" - "encoding/json" - "regexp" - "strconv" - "net/url" - "errors" - "log" - "bytes" - "sync" -) - -const hexPrefixPat = "/[0-9a-f]{2}/[0-9a-f]{2}/[0-9a-f]{2}/" -const rootHASPath = ".well-known/stellar-history.json" - -type CommandOptions struct { - Concurrency int - Range Range - DryRun bool - Force bool - Verify bool - Thorough bool -} - -type ConnectOptions struct { - S3Region string -} - -type ArchiveBackend interface { - Exists(path string) bool - GetFile(path string) (io.ReadCloser, error) - PutFile(path string, in io.ReadCloser) error - ListFiles(path string) (chan string, chan error) - CanListFiles() bool -} - - -type Archive struct { - mutex sync.Mutex - checkpointFiles map[string](map[uint32]bool) - allBuckets map[Hash]bool - referencedBuckets map[Hash]bool - - expectLedgerHashes map[uint32]Hash - actualLedgerHashes map[uint32]Hash - expectTxSetHashes map[uint32]Hash - actualTxSetHashes map[uint32]Hash - expectTxResultSetHashes map[uint32]Hash - actualTxResultSetHashes map[uint32]Hash - - missingBuckets int - invalidBuckets int - - invalidLedgers int - invalidTxSets int - invalidTxResultSets int - - backend ArchiveBackend -} - -func (a *Archive) GetPathHAS(path string) (HistoryArchiveState, error) { - var has HistoryArchiveState - rdr, err := a.backend.GetFile(path) - if err != nil { - return has, err - } - defer rdr.Close() - dec := json.NewDecoder(rdr) - err = dec.Decode(&has) - return has, err -} - -func (a *Archive) PutPathHAS(path string, has HistoryArchiveState, opts *CommandOptions) error { - if a.backend.Exists(path) && !opts.Force { - log.Printf("skipping existing " + path) - return nil - } - buf, err := json.MarshalIndent(has, "", " ") - if err != nil { - return err - } - return a.backend.PutFile(path, - ioutil.NopCloser(bytes.NewReader(buf))) -} - -func (a *Archive) BucketExists(bucket Hash) bool { - return a.backend.Exists(BucketPath(bucket)) -} - -func (a *Archive) CategoryCheckpointExists(cat string, chk uint32) bool { - return a.backend.Exists(CategoryCheckpointPath(cat, chk)) -} - -func (a *Archive) GetRootHAS() (HistoryArchiveState, error) { - return a.GetPathHAS(rootHASPath) -} - -func (a *Archive) GetCheckpointHAS(chk uint32) (HistoryArchiveState, error) { - return a.GetPathHAS(CategoryCheckpointPath("history", chk)) -} - -func (a *Archive) PutCheckpointHAS(chk uint32, has HistoryArchiveState, opts *CommandOptions) error { - return a.PutPathHAS(CategoryCheckpointPath("history", chk), has, opts) -} - -func (a *Archive) PutRootHAS(has HistoryArchiveState, opts *CommandOptions) error { - force := opts.Force - opts.Force = true - e := a.PutPathHAS(rootHASPath, has, opts) - opts.Force = force - return e -} - -func (a *Archive) ListBucket(dp DirPrefix) (chan string, chan error) { - return a.backend.ListFiles(path.Join("bucket", dp.Path())) -} - -func (a *Archive) ListAllBuckets() (chan string, chan error) { - return a.backend.ListFiles("bucket") -} - -func (a *Archive) ListAllBucketHashes() (chan Hash, chan error) { - sch, errs := a.backend.ListFiles("bucket") - ch := make(chan Hash) - rx := regexp.MustCompile("bucket" + hexPrefixPat + "bucket-([0-9a-f]{64})\\.xdr\\.gz$") - errs = makeErrorPump(errs) - go func() { - for s := range sch { - m := rx.FindStringSubmatch(s) - if m != nil { - ch <- MustDecodeHash(m[1]) - } - } - close(ch) - }() - return ch, errs -} - -func (a *Archive) ListCategoryCheckpoints(cat string, pth string) (chan uint32, chan error) { - ext := categoryExt(cat) - rx := regexp.MustCompile(cat + hexPrefixPat + cat + - "-([0-9a-f]{8})\\." + regexp.QuoteMeta(ext) + "$") - sch, errs := a.backend.ListFiles(path.Join(cat, pth)) - ch := make(chan uint32) - errs = makeErrorPump(errs) - - go func() { - for s := range sch { - m := rx.FindStringSubmatch(s) - if m != nil { - i, e := strconv.ParseUint(m[1], 16, 32) - if e == nil { - ch <- uint32(i) - } else { - errs <- errors.New("decoding checkpoint number in filename " + s) - } - } - } - close(ch) - }() - return ch, errs -} - -func Connect(u string, opts *ConnectOptions) (*Archive, error) { - arch := Archive{ - checkpointFiles:make(map[string](map[uint32]bool)), - allBuckets:make(map[Hash]bool), - referencedBuckets:make(map[Hash]bool), - expectLedgerHashes:make(map[uint32]Hash), - actualLedgerHashes:make(map[uint32]Hash), - expectTxSetHashes:make(map[uint32]Hash), - actualTxSetHashes:make(map[uint32]Hash), - expectTxResultSetHashes:make(map[uint32]Hash), - actualTxResultSetHashes:make(map[uint32]Hash), - } - if opts == nil { - opts = new(ConnectOptions) - } - for _, cat := range Categories() { - arch.checkpointFiles[cat] = make(map[uint32]bool) - } - parsed, err := url.Parse(u) - if err != nil { - return &arch, err - } - pth := parsed.Path - if parsed.Scheme == "s3" { - // Inside s3, all paths start _without_ the leading / - if len(pth) > 0 && pth[0] == '/' { - pth = pth[1:] - } - arch.backend = MakeS3Backend(parsed.Host, pth, opts) - } else if parsed.Scheme == "file" { - pth = path.Join(parsed.Host, pth) - arch.backend = MakeFsBackend(pth, opts) - } else if parsed.Scheme == "http" { - arch.backend = MakeHttpBackend(parsed, opts) - } else if parsed.Scheme == "mock" { - arch.backend = MakeMockBackend(opts) - } else { - err = errors.New("unknown URL scheme: '" + parsed.Scheme + "'") - } - return &arch, err -} - -func MustConnect(u string, opts *ConnectOptions) *Archive { - arch, err := Connect(u, opts) - if err != nil { - log.Fatal(err) - } - return arch -} diff --git a/tools/stellar-archivist/internal/archive_test.go b/tools/stellar-archivist/internal/archive_test.go deleted file mode 100644 index 4428a120ba..0000000000 --- a/tools/stellar-archivist/internal/archive_test.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "fmt" - "io/ioutil" - "math/big" - "os" - "testing" - - "github.com/stellar/go/xdr" - "github.com/stretchr/testify/assert" -) - -func GetTestS3Archive() *Archive { - mx := big.NewInt(0xffffffff) - r, e := rand.Int(rand.Reader, mx) - if e != nil { - panic(e) - } - return MustConnect(fmt.Sprintf("s3://history-stg.stellar.org/dev/archivist/test-%s", r), - &ConnectOptions{S3Region: "eu-west-1"}) -} - -func GetTestMockArchive() *Archive { - return MustConnect("mock://test", nil) -} - -var tmpdirs []string - -func GetTestFileArchive() *Archive { - d, e := ioutil.TempDir("/tmp", "archivist") - if e != nil { - panic(e) - } - if tmpdirs == nil { - tmpdirs = []string{d} - } else { - tmpdirs = append(tmpdirs, d) - } - return MustConnect("file://"+d, nil) -} - -func cleanup() { - for _, d := range tmpdirs { - os.RemoveAll(d) - } -} - -func GetTestArchive() *Archive { - ty := os.Getenv("ARCHIVIST_TEST_TYPE") - if ty == "file" { - return GetTestFileArchive() - } else if ty == "s3" { - return GetTestS3Archive() - } else { - return GetTestMockArchive() - } -} - -func (arch *Archive) AddRandomBucket() (Hash, error) { - var h Hash - buf := make([]byte, 1024) - _, e := rand.Read(buf) - if e != nil { - return h, e - } - h = sha256.Sum256(buf) - pth := BucketPath(h) - e = arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf))) - return h, e -} - -func (arch *Archive) AddRandomCheckpointFile(cat string, chk uint32) error { - buf := make([]byte, 1024) - _, e := rand.Read(buf) - if e != nil { - return e - } - pth := CategoryCheckpointPath(cat, chk) - return arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf))) -} - -func (arch *Archive) AddRandomCheckpoint(chk uint32) error { - opts := &CommandOptions{Force: true} - for _, cat := range Categories() { - if cat == "history" { - var has HistoryArchiveState - has.CurrentLedger = chk - for i := 0; i < NumLevels; i++ { - curr, e := arch.AddRandomBucket() - if e != nil { - return e - } - snap, e := arch.AddRandomBucket() - if e != nil { - return e - } - next, e := arch.AddRandomBucket() - if e != nil { - return e - } - has.CurrentBuckets[i].Curr = curr.String() - has.CurrentBuckets[i].Snap = snap.String() - has.CurrentBuckets[i].Next.Output = next.String() - } - arch.PutCheckpointHAS(chk, has, opts) - arch.PutRootHAS(has, opts) - } else { - arch.AddRandomCheckpointFile(cat, chk) - } - } - return nil -} - -func (arch *Archive) PopulateRandomRange(rng Range) error { - for chk := range rng.Checkpoints() { - if e := arch.AddRandomCheckpoint(chk); e != nil { - return e - } - } - return nil -} - -func testRange() Range { - return Range{Low: 63, High: 0x3bf} -} - -func testOptions() *CommandOptions { - return &CommandOptions{Range: testRange(), Concurrency: 16} -} - -func GetRandomPopulatedArchive() *Archive { - a := GetTestArchive() - a.PopulateRandomRange(testRange()) - return a -} - -func TestScan(t *testing.T) { - defer cleanup() - opts := testOptions() - GetRandomPopulatedArchive().Scan(opts) -} - -func countMissing(arch *Archive, opts *CommandOptions) int { - n := 0 - arch.Scan(opts) - for _, missing := range arch.CheckCheckpointFilesMissing(opts) { - n += len(missing) - } - n += len(arch.CheckBucketsMissing()) - return n -} - -func TestMirror(t *testing.T) { - defer cleanup() - opts := testOptions() - src := GetRandomPopulatedArchive() - dst := GetTestArchive() - Mirror(src, dst, opts) - assert.Equal(t, 0, countMissing(dst, opts)) -} - -func copyFile(category string, checkpoint uint32, src *Archive, dst *Archive) { - pth := CategoryCheckpointPath(category, checkpoint) - rdr, err := src.backend.GetFile(pth) - if err != nil { - panic(err) - } - if err = dst.backend.PutFile(pth, rdr); err != nil { - panic(err) - } -} - -func TestMirrorThenRepair(t *testing.T) { - defer cleanup() - opts := testOptions() - src := GetRandomPopulatedArchive() - dst := GetTestArchive() - Mirror(src, dst, opts) - assert.Equal(t, 0, countMissing(dst, opts)) - bad := opts.Range.Low + uint32(opts.Range.Size()/2) - src.AddRandomCheckpoint(bad) - copyFile("history", bad, src, dst) - assert.NotEqual(t, 0, countMissing(dst, opts)) - Repair(src, dst, opts) - assert.Equal(t, 0, countMissing(dst, opts)) -} - -func TestDryRunNoRepair(t *testing.T) { - defer cleanup() - opts := testOptions() - src := GetRandomPopulatedArchive() - dst := GetTestArchive() - Mirror(src, dst, opts) - assert.Equal(t, 0, countMissing(dst, opts)) - bad := opts.Range.Low + uint32(opts.Range.Size()/2) - src.AddRandomCheckpoint(bad) - copyFile("history", bad, src, dst) - assert.NotEqual(t, 0, countMissing(dst, opts)) - opts.DryRun = true - Repair(src, dst, opts) - assert.NotEqual(t, 0, countMissing(dst, opts)) -} - -func TestXdrDecode(t *testing.T) { - - xdrbytes := []byte{ - - 0, 0, 0, 0, // entry type 0, liveentry - - 0, 32, 223, 100, // lastmodified 2154340 - - 0, 0, 0, 0, // entry type 0, account - - 0, 0, 0, 0, // key type 0 - 23, 140, 68, 253, // ed25519 key (32 bytes) - 184, 162, 186, 195, - 118, 239, 158, 210, - 100, 241, 174, 254, - 108, 110, 165, 140, - 75, 76, 83, 141, - 104, 212, 227, 80, - 1, 214, 157, 7, - - 0, 0, 0, 29, // 64bit balance: 125339976000 - 46, 216, 65, 64, - - 0, 0, 129, 170, // 64bit seqnum: 142567144423475 - 0, 0, 0, 51, - - 0, 0, 0, 1, // numsubentries: 1 - - 0, 0, 0, 1, // inflationdest type, populated - - 0, 0, 0, 0, // key type 0 - 87, 240, 19, 71, // ed25519 key (32 bytes) - 52, 91, 9, 62, - 213, 239, 178, 85, - 161, 119, 108, 251, - 168, 90, 76, 116, - 12, 48, 134, 248, - 115, 255, 117, 50, - 19, 18, 170, 203, - - 0, 0, 0, 0, // flags - - 0, 0, 0, 19, // homedomain: 19 bytes + 1 null padding - 99, 101, 110, 116, // "centaurus.xcoins.de" - 97, 117, 114, 117, - 115, 46, 120, 99, - 111, 105, 110, 115, - 46, 100, 101, 0, - - 1, 0, 0, 0, // thresholds - 0, 0, 0, 0, // signers (null) - - 0, 0, 0, 0, // entry.account.ext.v: 0 - - 0, 0, 0, 0, // entry.ext.v: 0 - } - - assert.Equal(t, len(xdrbytes), 152) - - var tmp xdr.BucketEntry - n, err := xdr.Unmarshal(bytes.NewReader(xdrbytes[:]), &tmp) - fmt.Printf("Decoded %d bytes\n", n) - if err != nil { - panic(err) - } - assert.Equal(t, len(xdrbytes), n) - - var out bytes.Buffer - n, err = xdr.Marshal(&out, &tmp) - fmt.Printf("Encoded %d bytes\n", n) - if err != nil { - panic(err) - } - - assert.Equal(t, out.Len(), n) - assert.Equal(t, out.Bytes(), xdrbytes) -} diff --git a/tools/stellar-archivist/internal/fs_archive.go b/tools/stellar-archivist/internal/fs_archive.go deleted file mode 100644 index 6900a32f56..0000000000 --- a/tools/stellar-archivist/internal/fs_archive.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "io" - "os" - "path" - "path/filepath" -) - -type FsArchiveBackend struct { - prefix string -} - -func (b *FsArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { - return os.Open(path.Join(b.prefix, pth)) -} - -func (b *FsArchiveBackend) Exists(pth string) bool { - pth = path.Join(b.prefix, pth) - _, err := os.Stat(pth) - if err != nil && os.IsNotExist(err) { return false } - return true -} - -func (b *FsArchiveBackend) PutFile(pth string, in io.ReadCloser) error { - dir := path.Join(b.prefix, path.Dir(pth)) - if !b.Exists(dir) { - if e := os.MkdirAll(dir, 0755); e != nil { - return e - } - } - - pth = path.Join(b.prefix, pth) - out, e := os.Create(pth) - if e != nil { - return e - } - defer in.Close() - defer out.Close() - _, e = io.Copy(out, in) - return e -} - -func (b *FsArchiveBackend) ListFiles(pth string) (chan string, chan error) { - ch := make(chan string) - errs := make(chan error) - go func() { - filepath.Walk(path.Join(b.prefix, pth), - func(p string, info os.FileInfo, err error) error { - if err != nil { - errs <- err - return nil - } - if info != nil && ! info.IsDir() { - ch <- p - } - return nil - }) - close(ch) - close(errs) - }() - return ch, errs -} - -func (b *FsArchiveBackend) CanListFiles() bool { - return true -} - -func MakeFsBackend(pth string, opts *ConnectOptions) ArchiveBackend { - return &FsArchiveBackend{ - prefix: pth, - } -} diff --git a/tools/stellar-archivist/internal/history_archive_state.go b/tools/stellar-archivist/internal/history_archive_state.go deleted file mode 100644 index 4b3fcc1b34..0000000000 --- a/tools/stellar-archivist/internal/history_archive_state.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -const NumLevels = 11 - -type HistoryArchiveState struct { - Version int `json:"version"` - Server string `json:"server"` - CurrentLedger uint32 `json:"currentLedger"` - CurrentBuckets [NumLevels] struct { - Curr string `json:"curr"` - Snap string `json:"snap"` - Next struct { - State uint32 `json:"state"` - Output string `json:"output,omitempty"` - } `json:"next"` - } `json:"currentBuckets"` -} - -func (h *HistoryArchiveState) LevelSummary() (string, int) { - summ := "" - nz := 0 - for _, b := range h.CurrentBuckets { - state := '_' - for _, bs := range []string { - b.Curr, b.Snap, b.Next.Output, - } { - h, err := DecodeHash(bs) - if err == nil && ! h.IsZero() { - state = '#' - } - } - if state != '_' { - nz += 1 - } - summ += string(state) - } - return summ, nz -} - -func (h *HistoryArchiveState) Buckets() []Hash { - r := []Hash{} - for _, b := range h.CurrentBuckets { - for _, bs := range []string { - b.Curr, b.Snap, b.Next.Output, - } { - h, err := DecodeHash(bs) - if err == nil && ! h.IsZero() { - r = append(r, h) - } - } - } - return r -} - -func (h *HistoryArchiveState) Range() Range { - return Range{Low:63, High: h.CurrentLedger,} -} diff --git a/tools/stellar-archivist/internal/history_archive_state_test.go b/tools/stellar-archivist/internal/history_archive_state_test.go deleted file mode 100644 index d4349875d9..0000000000 --- a/tools/stellar-archivist/internal/history_archive_state_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "testing" - "encoding/json" -) - -func TestUnmarshalState(t *testing.T) { - - var jsonBlob = []byte(`{ - "version": 1, - "server": "v0.4.0-34-g2f015f6", - "currentLedger": 2113919, - "currentBuckets": [ - { - "curr": "0000000000000000000000000000000000000000000000000000000000000000", - "next": { - "state": 0 - }, - "snap": "0000000000000000000000000000000000000000000000000000000000000000" - }, - { - "curr": "0000000000000000000000000000000000000000000000000000000000000000", - "next": { - "state": 1, - "output": "0000000000000000000000000000000000000000000000000000000000000000" - }, - "snap": "0000000000000000000000000000000000000000000000000000000000000000" - } - ] - }`) - - var state HistoryArchiveState - err := json.Unmarshal(jsonBlob, &state) - if err != nil { - t.Error(err) - } else if state.CurrentLedger != 2113919 { - t.Error(state) - } -} diff --git a/tools/stellar-archivist/internal/http_archive.go b/tools/stellar-archivist/internal/http_archive.go deleted file mode 100644 index e7668737ad..0000000000 --- a/tools/stellar-archivist/internal/http_archive.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "io" - "path" - "fmt" - "net/http" - "net/url" - "errors" -) - -type HttpArchiveBackend struct { - client http.Client - base url.URL -} - -func checkResp(r *http.Response) error { - if r.StatusCode >= 200 && r.StatusCode < 400 { - return nil - } else { - return fmt.Errorf("Bad HTTP response '%s' for GET '%s'", - r.Status, r.Request.URL.String()) - } -} - -func (b *HttpArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { - var derived url.URL = b.base - derived.Path = path.Join(derived.Path, pth) - resp, err := b.client.Get(derived.String()) - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - err = checkResp(resp) - if err != nil { - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return nil, err - } - return resp.Body, nil -} - -func (b *HttpArchiveBackend) Exists(pth string) bool { - var derived url.URL = b.base - derived.Path = path.Join(derived.Path, pth) - resp, err := b.client.Head(derived.String()) - if resp != nil && resp.Body != nil { - resp.Body.Close() - } - return err == nil && resp != nil && checkResp(resp) == nil -} - -func (b *HttpArchiveBackend) PutFile(pth string, in io.ReadCloser) error { - in.Close() - return errors.New("PutFile not available over HTTP") -} - -func (b *HttpArchiveBackend) ListFiles(pth string) (chan string, chan error) { - ch := make(chan string) - er := make(chan error) - close(ch) - er <- errors.New("ListFiles not available over HTTP") - close(er) - return ch, er -} - -func (b *HttpArchiveBackend) CanListFiles() bool { - return false -} - -func MakeHttpBackend(base *url.URL, opts *ConnectOptions) ArchiveBackend { - return &HttpArchiveBackend{ - base: *base, - } -} diff --git a/tools/stellar-archivist/internal/range.go b/tools/stellar-archivist/internal/range.go deleted file mode 100644 index bf6d503aa7..0000000000 --- a/tools/stellar-archivist/internal/range.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "fmt" - "sort" - "strings" -) - -const CheckpointFreq = uint32(64) - -type Range struct { - Low uint32 - High uint32 -} - -func PrevCheckpoint(i uint32) uint32 { - freq := CheckpointFreq - if i < freq { - return freq - 1 - } - return ((i / freq) * freq) - 1; -} - -func NextCheckpoint(i uint32) uint32 { - if i == 0 { - return CheckpointFreq - 1 - } - freq := uint64(CheckpointFreq) - v := uint64(i) - n := (((v + freq - 1) / freq) * freq) - 1 - if n >= 0xffffffff { - return 0xffffffff - } - return uint32(n) -} - -func MakeRange(low uint32, high uint32) Range { - if high < low { - high = low - } - return Range{ - Low:PrevCheckpoint(low), - High:NextCheckpoint(high), - } -} - -func (r Range) Clamp(other Range) Range { - low := r.Low - high := r.High - if low < other.Low { - low = other.Low - } - if high > other.High { - high = other.High - } - return MakeRange(low, high) -} - -func (r Range) String() string { - return fmt.Sprintf("[0x%8.8x, 0x%8.8x]", r.Low, r.High) -} - -func (r Range) Checkpoints() chan uint32 { - ch := make(chan uint32) - go func() { - for i := uint64(r.Low); i < uint64(r.High); i += uint64(CheckpointFreq) { - ch <- uint32(i) - } - close(ch) - }() - return ch -} - -func (r Range) Size() int { - return int(r.High - r.Low) / int(CheckpointFreq) -} - -func (r Range) CollapsedString() string { - if r.Low == r.High { - return fmt.Sprintf("0x%8.8x", r.Low) - } else { - return fmt.Sprintf("[0x%8.8x-0x%8.8x]", r.Low, r.High) - } -} - -type ByUint32 []uint32 -func (a ByUint32) Len() int { return len(a) } -func (a ByUint32) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByUint32) Less(i, j int) bool { return a[i] < a[j] } - - -func fmtRangeList(vs []uint32) string { - - sort.Sort(ByUint32(vs)) - - s := make([]string, 0, 10) - var curr *Range - - for _, t := range vs { - if curr != nil { - if curr.High + CheckpointFreq == t { - curr.High = t - continue - } else { - s = append(s, curr.CollapsedString()) - curr = nil - } - } - curr = &Range{Low:t, High:t} - } - if curr != nil { - s = append(s, curr.CollapsedString()) - } - - return strings.Join(s, ", ") -} diff --git a/tools/stellar-archivist/internal/range_test.go b/tools/stellar-archivist/internal/range_test.go deleted file mode 100644 index f34ccc3cad..0000000000 --- a/tools/stellar-archivist/internal/range_test.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "testing" - "github.com/stretchr/testify/assert" -) - -func TestFmtRangeList(t *testing.T) { - - assert.Equal(t, - "", - fmtRangeList([]uint32{})) - - assert.Equal(t, - "0x0000003f", - fmtRangeList([]uint32{0x3f})) - - assert.Equal(t, - "[0x0000003f-0x0000007f]", - fmtRangeList([]uint32{0x3f, 0x7f})) - - assert.Equal(t, - "[0x0000003f-0x000000bf]", - fmtRangeList([]uint32{0x3f, 0x7f, 0xbf})) - - assert.Equal(t, - "[0x0000003f-0x0000007f], 0x000000ff", - fmtRangeList([]uint32{0x3f, 0x7f, 0xff})) - - assert.Equal(t, - "[0x0000003f-0x0000007f], [0x000000ff-0x0000017f]", - fmtRangeList([]uint32{0x3f, 0x7f, 0xff, 0x13f, 0x17f})) - - assert.Equal(t, - "[0x0000003f-0x0000007f], 0x000000ff, [0x0000017f-0x000001bf]", - fmtRangeList([]uint32{0x3f, 0x7f, 0xff, 0x17f, 0x1bf})) -} - diff --git a/tools/stellar-archivist/internal/s3_archive.go b/tools/stellar-archivist/internal/s3_archive.go deleted file mode 100644 index 28dd44d752..0000000000 --- a/tools/stellar-archivist/internal/s3_archive.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "io" - "path" - "bytes" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" -) - -type S3ArchiveBackend struct { - svc *s3.S3 - bucket string - prefix string -} - -func (b *S3ArchiveBackend) GetFile(pth string) (io.ReadCloser, error) { - params := &s3.GetObjectInput{ - Bucket: aws.String(b.bucket), - Key: aws.String(path.Join(b.prefix, pth)), - } - resp, err := b.svc.GetObject(params) - if err != nil { - return nil, err - } - return resp.Body, nil -} - -func (b *S3ArchiveBackend) Exists(pth string) bool { - params := &s3.HeadObjectInput{ - Bucket: aws.String(b.bucket), - Key: aws.String(path.Join(b.prefix, pth)), - } - _, err := b.svc.HeadObject(params) - return err == nil -} - -func (b *S3ArchiveBackend) PutFile(pth string, in io.ReadCloser) error { - var buf bytes.Buffer - _, err := buf.ReadFrom(in) - in.Close() - if err != nil { - return err - } - params := &s3.PutObjectInput{ - Bucket: aws.String(b.bucket), - Key: aws.String(path.Join(b.prefix, pth)), - ACL: aws.String(s3.ObjectCannedACLPublicRead), - Body: bytes.NewReader(buf.Bytes()), - } - _, err = b.svc.PutObject(params) - in.Close() - return err -} - -func (b *S3ArchiveBackend) ListFiles(pth string) (chan string, chan error) { - prefix := path.Join(b.prefix, pth) - ch := make(chan string) - errs := make(chan error) - - params := &s3.ListObjectsInput{ - Bucket: aws.String(b.bucket), - MaxKeys: aws.Int64(1000), - Prefix: aws.String(prefix), - } - resp, err := b.svc.ListObjects(params) - if err != nil { - errs <- err - close(ch) - close(errs) - return ch, errs - } - go func() { - for { - for _, c := range resp.Contents { - params.Marker = c.Key - ch <- *c.Key - } - if *resp.IsTruncated { - resp, err = b.svc.ListObjects(params) - if err != nil { - errs <- err - } - } else { - break - } - } - close(ch) - close(errs) - }() - return ch, errs -} - -func (b *S3ArchiveBackend) CanListFiles() bool { - return true -} - -func MakeS3Backend(bucket string, prefix string, opts *ConnectOptions) ArchiveBackend { - cfg := aws.Config{} - if opts != nil && opts.S3Region != "" { - cfg.Region = aws.String(opts.S3Region) - } - sess := session.New(&cfg) - return &S3ArchiveBackend{ - svc: s3.New(sess), - bucket: bucket, - prefix: prefix, - } -} diff --git a/tools/stellar-archivist/internal/xdrstream.go b/tools/stellar-archivist/internal/xdrstream.go deleted file mode 100644 index e4214f1ae6..0000000000 --- a/tools/stellar-archivist/internal/xdrstream.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -package archivist - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "encoding/binary" - "errors" - "fmt" - "io" - "strings" - - "github.com/stellar/go/xdr" -) - -type XdrStream struct { - buf bytes.Buffer - rdr io.ReadCloser - rdr2 io.ReadCloser -} - -func NewXdrStream(in io.ReadCloser) *XdrStream { - return &XdrStream{rdr: bufReadCloser(in)} -} - -func NewXdrGzStream(in io.ReadCloser) (*XdrStream, error) { - rdr, err := gzip.NewReader(bufReadCloser(in)) - if err != nil { - in.Close() - return nil, err - } - return &XdrStream{rdr: bufReadCloser(rdr), rdr2: in}, nil -} - -func (a *Archive) GetXdrStream(pth string) (*XdrStream, error) { - if !strings.HasSuffix(pth, ".xdr.gz") { - return nil, errors.New("File has non-.xdr.gz suffix: " + pth) - } - rdr, err := a.backend.GetFile(pth) - if err != nil { - return nil, err - } - return NewXdrGzStream(rdr) -} - -func HashXdr(x interface{}) (Hash, error) { - var msg bytes.Buffer - _, err := xdr.Marshal(&msg, x) - if err != nil { - var zero Hash - return zero, err - } - return Hash(sha256.Sum256(msg.Bytes())), nil -} - -func (x *XdrStream) Close() { - if x.rdr != nil { - x.rdr.Close() - } - if x.rdr2 != nil { - x.rdr2.Close() - } -} - -func (x *XdrStream) ReadOne(in interface{}) error { - var nbytes uint32 - err := binary.Read(x.rdr, binary.BigEndian, &nbytes) - if err != nil { - x.rdr.Close() - if err == io.ErrUnexpectedEOF { - return io.EOF - } else { - return err - } - } - nbytes &= 0x7fffffff - x.buf.Reset() - if nbytes == 0 { - x.rdr.Close() - return io.EOF - } - x.buf.Grow(int(nbytes)) - read, err := x.buf.ReadFrom(io.LimitReader(x.rdr, int64(nbytes))) - if read != int64(nbytes) { - x.rdr.Close() - return errors.New("Read wrong number of bytes from XDR") - } - if err != nil { - x.rdr.Close() - return err - } - - readi, err := xdr.Unmarshal(&x.buf, in) - if int64(readi) != int64(nbytes) { - return fmt.Errorf("Unmarshalled %d bytes from XDR, expected %d)", - readi, nbytes) - } - return err -} - -func WriteFramedXdr(out io.Writer, in interface{}) error { - var tmp bytes.Buffer - n, err := xdr.Marshal(&tmp, in) - un := uint32(n) - if un > 0x7fffffff { - return fmt.Errorf("Overlong write: %d bytes", n) - } - - un = un | 0x80000000 - binary.Write(out, binary.BigEndian, &un) - k, err := tmp.WriteTo(out) - if int64(n) != k { - return fmt.Errorf("Mismatched write length: %d vs. %d", n, k) - } - return err -} diff --git a/tools/stellar-archivist/main.go b/tools/stellar-archivist/main.go index debac1f66a..dbcf83c9db 100644 --- a/tools/stellar-archivist/main.go +++ b/tools/stellar-archivist/main.go @@ -6,23 +6,32 @@ package main import ( "fmt" - "log" + log "github.com/sirupsen/logrus" "net/http" _ "net/http/pprof" "os" "github.com/spf13/cobra" - archivist "github.com/stellar/go/tools/stellar-archivist/internal" + "github.com/stellar/go/historyarchive" + "github.com/stellar/go/support/errors" ) +const checkpointFrequency = uint32(64) + func status(a string, opts *Options) { - arch := archivist.MustConnect(a, &opts.ConnectOpts) - state, e := arch.GetRootHAS() - if e != nil { - log.Fatal(e) + arch := historyarchive.MustConnect(a, opts.ConnectOpts) + state, err := arch.GetRootHAS() + if err != nil { + log.Fatal(errors.Wrap(err, "Error getting HAS")) + } + buckets, err := state.Buckets() + if err != nil { + log.Fatal(errors.Wrap(err, "Error getting buckets")) + } + summ, nz, err := state.LevelSummary() + if err != nil { + log.Fatal(errors.Wrap(err, "Error getting level summary")) } - buckets := state.Buckets() - summ, nz := state.LevelSummary() fmt.Printf("\n") fmt.Printf(" Archive: %s\n", a) fmt.Printf(" Server: %s\n", state.Server) @@ -36,24 +45,51 @@ type Options struct { Low int High uint32 Last int + Recent bool Profile bool - CommandOpts archivist.CommandOptions - ConnectOpts archivist.ConnectOptions + Debug bool + Trace bool + CommandOpts historyarchive.CommandOptions + ConnectOpts historyarchive.ConnectOptions } -func (opts *Options) SetRange(arch *archivist.Archive) { - if arch != nil && opts.Last != -1 { - state, e := arch.GetRootHAS() - if e == nil { - low := state.CurrentLedger - uint32(opts.Last) - opts.CommandOpts.Range = - archivist.MakeRange(low, state.CurrentLedger) - return +func (opts *Options) SetRange(srcArch *historyarchive.Archive, dstArch *historyarchive.Archive) { + checkpointMgr := historyarchive.NewCheckpointManager(checkpointFrequency) + if srcArch != nil { + + // If we got a src and dst archive and were passed --recent, we extract + // the range as the sequence-difference between the two. + if dstArch != nil && opts.Recent { + srcState, e1 := srcArch.GetRootHAS() + dstState, e2 := dstArch.GetRootHAS() + if e1 == nil && e2 == nil { + low := dstState.CurrentLedger + high := srcState.CurrentLedger + opts.CommandOpts.Range = + checkpointMgr.MakeRange(low, high) + return + } + + // If we got a src, no dst, and a --last N flag, we extract the + // range as the last N ledgers in the src archive. + } else if opts.Last != -1 { + state, e := srcArch.GetRootHAS() + if e == nil { + low := uint32(0) + if state.CurrentLedger > uint32(opts.Last) { + low = state.CurrentLedger - uint32(opts.Last) + } + opts.CommandOpts.Range = + checkpointMgr.MakeRange(low, state.CurrentLedger) + return + } } } + + // Otherwise we fall back to the provided low and high, which further + // default to the entire numeric range of a uint32. opts.CommandOpts.Range = - archivist.MakeRange(uint32(opts.Low), - uint32(opts.High)) + checkpointMgr.MakeRange(uint32(opts.Low), uint32(opts.High)) } @@ -65,12 +101,29 @@ func (opts *Options) MaybeProfile() { } } +func (opts *Options) SetupLogging() { + if opts.Debug { + log.SetLevel(log.DebugLevel) + log.Debug("set logging to DEBUG level") + } + if opts.Trace { + log.SetLevel(log.TraceLevel) + log.Trace("set logging to TRACE level") + } +} + +func logArchive(a string, opts *Options) { + arch := historyarchive.MustConnect(a, opts.ConnectOpts) + opts.SetRange(arch, nil) + arch.Log(&opts.CommandOpts) +} + func scan(a string, opts *Options) { - arch := archivist.MustConnect(a, &opts.ConnectOpts) - opts.SetRange(arch) + arch := historyarchive.MustConnect(a, opts.ConnectOpts) + opts.SetRange(arch, nil) e1 := arch.Scan(&opts.CommandOpts) - e2 := arch.ReportMissing(&opts.CommandOpts) - e3 := arch.ReportInvalid(&opts.CommandOpts) + missing, e2 := arch.ReportMissing(&opts.CommandOpts) + invalid, e3 := arch.ReportInvalid(&opts.CommandOpts) if e1 != nil { log.Fatal(e1) } @@ -80,25 +133,31 @@ func scan(a string, opts *Options) { if e3 != nil { log.Fatal(e3) } + if missing { + log.Fatal("Some objects were missing") + } + if invalid { + log.Fatal("Some objects were invalid") + } } func mirror(src string, dst string, opts *Options) { - srcArch := archivist.MustConnect(src, &opts.ConnectOpts) - dstArch := archivist.MustConnect(dst, &opts.ConnectOpts) - opts.SetRange(srcArch) + srcArch := historyarchive.MustConnect(src, opts.ConnectOpts) + dstArch := historyarchive.MustConnect(dst, opts.ConnectOpts) + opts.SetRange(srcArch, dstArch) log.Printf("mirroring %v -> %v\n", src, dst) - e := archivist.Mirror(srcArch, dstArch, &opts.CommandOpts) + e := historyarchive.Mirror(srcArch, dstArch, &opts.CommandOpts) if e != nil { log.Fatal(e) } } func repair(src string, dst string, opts *Options) { - srcArch := archivist.MustConnect(src, &opts.ConnectOpts) - dstArch := archivist.MustConnect(dst, &opts.ConnectOpts) - opts.SetRange(srcArch) + srcArch := historyarchive.MustConnect(src, opts.ConnectOpts) + dstArch := historyarchive.MustConnect(dst, opts.ConnectOpts) + opts.SetRange(srcArch, dstArch) log.Printf("repairing %v -> %v\n", src, dst) - e := archivist.Repair(srcArch, dstArch, &opts.CommandOpts) + e := historyarchive.Repair(srcArch, dstArch, &opts.CommandOpts) if e != nil { log.Fatal(e) } @@ -107,6 +166,7 @@ func repair(src string, dst string, opts *Options) { func main() { var opts Options + opts.ConnectOpts.CheckpointFrequency = checkpointFrequency rootCmd := &cobra.Command{ Use: "stellar-archivist", @@ -131,6 +191,14 @@ func main() { "last ledger to act on", ) + rootCmd.PersistentFlags().BoolVarP( + &opts.Recent, + "recent", + "r", + false, + "act on ledger-range difference between achives", + ) + rootCmd.PersistentFlags().IntVar( &opts.Last, "last", @@ -153,6 +221,13 @@ func main() { "S3 region to connect to", ) + rootCmd.PersistentFlags().StringVar( + &opts.ConnectOpts.S3Endpoint, + "s3endpoint", + "", + "S3 endpoint to use", + ) + rootCmd.PersistentFlags().BoolVarP( &opts.CommandOpts.DryRun, "dryrun", @@ -190,16 +265,41 @@ func main() { "collect and serve profile locally", ) + rootCmd.PersistentFlags().BoolVar( + &opts.Debug, + "debug", + false, + "set log level to DEBUG", + ) + + rootCmd.PersistentFlags().BoolVar( + &opts.Trace, + "trace", + false, + "set log level to TRACE", + ) + rootCmd.AddCommand(&cobra.Command{ Use: "status", Run: func(cmd *cobra.Command, args []string) { + opts.SetupLogging() status(firstArg(args), &opts) }, }) + rootCmd.AddCommand(&cobra.Command{ + Use: "log", + Run: func(cmd *cobra.Command, args []string) { + opts.SetupLogging() + opts.MaybeProfile() + logArchive(firstArg(args), &opts) + }, + }) + rootCmd.AddCommand(&cobra.Command{ Use: "scan", Run: func(cmd *cobra.Command, args []string) { + opts.SetupLogging() opts.MaybeProfile() scan(firstArg(args), &opts) }, @@ -208,6 +308,7 @@ func main() { rootCmd.AddCommand(&cobra.Command{ Use: "mirror", Run: func(cmd *cobra.Command, args []string) { + opts.SetupLogging() opts.MaybeProfile() src, dst := srcDst(args) mirror(src, dst, &opts) @@ -217,6 +318,7 @@ func main() { rootCmd.AddCommand(&cobra.Command{ Use: "repair", Run: func(cmd *cobra.Command, args []string) { + opts.SetupLogging() opts.MaybeProfile() src, dst := srcDst(args) repair(src, dst, &opts) @@ -226,7 +328,8 @@ func main() { rootCmd.AddCommand(&cobra.Command{ Use: "dumpxdr", Run: func(cmd *cobra.Command, args []string) { - err := archivist.DumpXdrAsJson(args) + opts.SetupLogging() + err := historyarchive.DumpXdrAsJson(args) if err != nil { log.Fatal(err) } diff --git a/tools/stellar-archivist/main_test.go b/tools/stellar-archivist/main_test.go new file mode 100644 index 0000000000..55a200562d --- /dev/null +++ b/tools/stellar-archivist/main_test.go @@ -0,0 +1,48 @@ +// Copyright 2019 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +package main + +import ( + "testing" + + "github.com/stellar/go/historyarchive" + "github.com/stretchr/testify/assert" +) + +func TestLastOption(t *testing.T) { + src_arch := historyarchive.MustConnect("mock://test", historyarchive.ConnectOptions{CheckpointFrequency: 64}) + assert.NotEqual(t, nil, src_arch) + + var src_has historyarchive.HistoryArchiveState + src_has.CurrentLedger = uint32(0xbf) + cmd_opts := &historyarchive.CommandOptions{Force: true} + src_arch.PutRootHAS(src_has, cmd_opts) + + var opts Options + opts.Last = 10 + opts.SetRange(src_arch, nil) + assert.Equal(t, uint32(0x7f), opts.CommandOpts.Range.Low) + assert.Equal(t, uint32(0xbf), opts.CommandOpts.Range.High) +} + +func TestRecentOption(t *testing.T) { + src_arch := historyarchive.MustConnect("mock://test1", historyarchive.ConnectOptions{CheckpointFrequency: 64}) + dst_arch := historyarchive.MustConnect("mock://test2", historyarchive.ConnectOptions{CheckpointFrequency: 64}) + assert.NotEqual(t, nil, src_arch) + assert.NotEqual(t, nil, dst_arch) + + var src_has, dst_has historyarchive.HistoryArchiveState + src_has.CurrentLedger = uint32(0xbf) + dst_has.CurrentLedger = uint32(0x3f) + cmd_opts := &historyarchive.CommandOptions{Force: true} + src_arch.PutRootHAS(src_has, cmd_opts) + dst_arch.PutRootHAS(dst_has, cmd_opts) + + var opts Options + opts.Recent = true + opts.SetRange(src_arch, dst_arch) + assert.Equal(t, uint32(0x3f), opts.CommandOpts.Range.Low) + assert.Equal(t, uint32(0xbf), opts.CommandOpts.Range.High) +} diff --git a/tools/stellar-hd-wallet/CHANGELOG.md b/tools/stellar-hd-wallet/CHANGELOG.md new file mode 100644 index 0000000000..df99057dc5 --- /dev/null +++ b/tools/stellar-hd-wallet/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + +As this project is pre 1.0, breaking changes may happen for minor version +bumps. A breaking change will get clearly notified in this log. + +## Unreleased + +- Dropped support for Go 1.10, 1.11, 1.12. + +## [v0.0.1] - 2017-12-28 + +Initial release. diff --git a/tools/stellar-hd-wallet/README.md b/tools/stellar-hd-wallet/README.md new file mode 100644 index 0000000000..a5353e400e --- /dev/null +++ b/tools/stellar-hd-wallet/README.md @@ -0,0 +1,23 @@ +# stellar-hd-wallet + +Console tool to generate Stellar HD wallet for a given seed. Implements [SEP-0005](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0005.md). + +This is experimental software. Use at your own risk. + +## Usage + +``` +Simple HD wallet for Stellar Lumens. THIS PROGRAM IS STILL EXPERIMENTAL. USE AT YOUR OWN RISK. + +Usage: + stellar-hd-wallet [command] + +Available Commands: + accounts Display accounts for a given mnemonic code + new Generates a new mnemonic code + +Flags: + -h, --help help for stellar-hd-wallet + +Use "stellar-hd-wallet [command] --help" for more information about a command. +``` diff --git a/tools/stellar-hd-wallet/commands/accounts.go b/tools/stellar-hd-wallet/commands/accounts.go new file mode 100644 index 0000000000..f888b8f8f7 --- /dev/null +++ b/tools/stellar-hd-wallet/commands/accounts.go @@ -0,0 +1,85 @@ +package commands + +import ( + "encoding/hex" + "fmt" + "regexp" + "strings" + + "github.com/spf13/cobra" + "github.com/stellar/go/exp/crypto/derivation" + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + "github.com/tyler-smith/go-bip39" +) + +var wordsRegexp = regexp.MustCompile(`^[a-z]+$`) +var count, startID uint32 + +var allowedNumbers = map[uint32]bool{12: true, 15: true, 18: true, 21: true, 24: true} + +var AccountsCmd = &cobra.Command{ + Use: "accounts", + Short: "Display accounts for a given mnemonic code", + Long: "", + RunE: func(cmd *cobra.Command, args []string) error { + printf("How many words? ") + wordsCount := readUint() + if _, exist := allowedNumbers[wordsCount]; !exist { + return errors.New("Invalid value, allowed values: 12, 15, 18, 21, 24") + } + + words := make([]string, wordsCount) + for i := uint32(0); i < wordsCount; i++ { + printf("Enter word #%-4d", i+1) + words[i] = readString() + if !wordsRegexp.MatchString(words[i]) { + println("Invalid word, try again.") + i-- + } + } + + printf("Enter password (leave empty if none): ") + password := readString() + + mnemonic := strings.Join(words, " ") + println("Mnemonic:", mnemonic) + + seed, err := bip39.NewSeedWithErrorChecking(mnemonic, password) + if err != nil { + return errors.New("Invalid words or checksum") + } + + println("BIP39 Seed:", hex.EncodeToString(seed)) + + masterKey, err := derivation.DeriveForPath(derivation.StellarAccountPrefix, seed) + if err != nil { + return errors.Wrap(err, "Error deriving master key") + } + + println("m/44'/148' key:", hex.EncodeToString(masterKey.Key)) + + println("") + + for i := uint32(startID); i < startID+count; i++ { + key, err := masterKey.Derive(derivation.FirstHardenedIndex + i) + if err != nil { + return errors.Wrap(err, "Error deriving child key") + } + + kp, err := keypair.FromRawSeed(key.RawSeed()) + if err != nil { + return errors.Wrap(err, "Error creating key pair") + } + + println(fmt.Sprintf(derivation.StellarAccountPathFormat, i), kp.Address(), kp.Seed()) + } + + return nil + }, +} + +func init() { + AccountsCmd.Flags().Uint32VarP(&count, "count", "c", 10, "number of accounts to display") + AccountsCmd.Flags().Uint32VarP(&startID, "start", "s", 0, "ID of the first wallet to display") +} diff --git a/tools/stellar-hd-wallet/commands/accounts_test.go b/tools/stellar-hd-wallet/commands/accounts_test.go new file mode 100644 index 0000000000..e112378a98 --- /dev/null +++ b/tools/stellar-hd-wallet/commands/accounts_test.go @@ -0,0 +1,114 @@ +package commands + +import ( + "bufio" + "bytes" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAccounts(t *testing.T) { + tests := []struct { + Words string + Passphrase string + Error string + Want string + }{ + { + Words: "illness spike retreat truth genius clock brain pass fit cave bargain toe", + Want: `m/44'/148'/0' GDRXE2BQUC3AZNPVFSCEZ76NJ3WWL25FYFK6RGZGIEKWE4SOOHSUJUJ6 SBGWSG6BTNCKCOB3DIFBGCVMUPQFYPA2G4O34RMTB343OYPXU5DJDVMN +m/44'/148'/1' GBAW5XGWORWVFE2XTJYDTLDHXTY2Q2MO73HYCGB3XMFMQ562Q2W2GJQX SCEPFFWGAG5P2VX5DHIYK3XEMZYLTYWIPWYEKXFHSK25RVMIUNJ7CTIS +m/44'/148'/2' GAY5PRAHJ2HIYBYCLZXTHID6SPVELOOYH2LBPH3LD4RUMXUW3DOYTLXW SDAILLEZCSA67DUEP3XUPZJ7NYG7KGVRM46XA7K5QWWUIGADUZCZWTJP +m/44'/148'/3' GAOD5NRAEORFE34G5D4EOSKIJB6V4Z2FGPBCJNQI6MNICVITE6CSYIAE SBMWLNV75BPI2VB4G27RWOMABVRTSSF7352CCYGVELZDSHCXWCYFKXIX +m/44'/148'/4' GBCUXLFLSL2JE3NWLHAWXQZN6SQC6577YMAU3M3BEMWKYPFWXBSRCWV4 SCPCY3CEHMOP2TADSV2ERNNZBNHBGP4V32VGOORIEV6QJLXD5NMCJUXI +m/44'/148'/5' GBRQY5JFN5UBG5PGOSUOL4M6D7VRMAYU6WW2ZWXBMCKB7GPT3YCBU2XZ SCK27SFHI3WUDOEMJREV7ZJQG34SCBR6YWCE6OLEXUS2VVYTSNGCRS6X +m/44'/148'/6' GBY27SJVFEWR3DUACNBSMJB6T4ZPR4C7ZXSTHT6GMZUDL23LAM5S2PQX SDJ4WDPOQAJYR3YIAJOJP3E6E4BMRB7VZ4QAEGCP7EYVDW6NQD3LRJMZ +m/44'/148'/7' GAY7T23Z34DWLSTEAUKVBPHHBUE4E3EMZBAQSLV6ZHS764U3TKUSNJOF SA3HXJUCE2N27TBIZ5JRBLEBF3TLPQEBINP47E6BTMIWW2RJ5UKR2B3L +m/44'/148'/8' GDJTCF62UUYSAFAVIXHPRBR4AUZV6NYJR75INVDXLLRZLZQ62S44443R SCD5OSHUUC75MSJG44BAT3HFZL2HZMMQ5M4GPDL7KA6HJHV3FLMUJAME +m/44'/148'/9' GBTVYYDIYWGUQUTKX6ZMLGSZGMTESJYJKJWAATGZGITA25ZB6T5REF44 SCJGVMJ66WAUHQHNLMWDFGY2E72QKSI3XGSBYV6BANDFUFE7VY4XNXXR`, + }, + { + Words: "resource asthma orphan phone ice canvas fire useful arch jewel impose vague theory cushion top", + Want: `m/44'/148'/0' GAVXVW5MCK7Q66RIBWZZKZEDQTRXWCZUP4DIIFXCCENGW2P6W4OA34RH SAKS7I2PNDBE5SJSUSU2XLJ7K5XJ3V3K4UDFAHMSBQYPOKE247VHAGDB +m/44'/148'/1' GDFCYVCICATX5YPJUDS22KM2GW5QU2KKSPPPT2IC5AQIU6TP3BZSLR5K SAZ2H5GLAVWCUWNPQMB6I3OHRI63T2ACUUAWSH7NAGYYPXGIOPLPW3Q4 +m/44'/148'/2' GAUA3XK3SGEQFNCBM423WIM5WCZ4CR4ZDPDFCYSFLCTODGGGJMPOHAAE SDVSSLPL76I33DKAI4LFTOAKCHJNCXUERGPCMVFT655Z4GRLWM6ZZTSC +m/44'/148'/3' GAH3S77QXTAPZ77REY6LGFIJ2XWVXFOKXHCFLA6HQTL3POLVZJDHHUDM SCH56YSGOBYVBC6DO3ZI2PY62GBVXT4SEJSXJOBQYGC2GCEZSB5PEVBZ +m/44'/148'/4' GCSCZVGV2Y3EQ2RATJ7TE6PVWTW5OH5SMG754AF6W6YM3KJF7RMNPB4Y SBWBM73VUNBGBMFD4E2BA7Q756AKVEAAVTQH34RYEUFD6X64VYL5KXQ2 +m/44'/148'/5' GDKWYAJE3W6PWCXDZNMFNFQSPTF6BUDANE6OVRYMJKBYNGL62VKKCNCC SAVS4CDQZI6PSA5DPCC42S5WLKYIPKXPCJSFYY4N3VDK25T2XX2BTGVX +m/44'/148'/6' GCDTVB4XDLNX22HI5GUWHBXJFBCPB6JNU6ZON7E57FA3LFURS74CWDJH SDFC7WZT3GDQVQUQMXN7TC7UWDW5E3GSMFPHUT2TSTQ7RKWTRA4PLBAL +m/44'/148'/7' GBTDPL5S4IOUQHDLCZ7I2UXJ2TEHO6DYIQ3F2P5OOP3IS7JSJI4UMHQJ SA6UO2FIYC6AS2MSDECLR6F7NKCJTG67F7R4LV2GYB4HCZYXJZRLPOBB +m/44'/148'/8' GD3KWA24OIM7V3MZKDAVSLN3NBHGKVURNJ72ZCTAJSDTF7RIGFXPW5FQ SBDNHDDICLLMBIDZ2IF2D3LH44OVUGGAVHQVQ6BZQI5IQO6AB6KNJCOV +m/44'/148'/9' GB3C6RRQB3V7EPDXEDJCMTS45LVDLSZQ46PTIGKZUY37DXXEOAKJIWSV SDHRG2J34MGDAYHMOVKVJC6LX2QZMCTIKRO5I4JQ6BJQ36KVL6QUTT72`, + }, + { + Words: "bench hurt jump file august wise shallow faculty impulse spring exact slush thunder author capable act festival slice deposit sauce coconut afford frown better", + Want: `m/44'/148'/0' GC3MMSXBWHL6CPOAVERSJITX7BH76YU252WGLUOM5CJX3E7UCYZBTPJQ SAEWIVK3VLNEJ3WEJRZXQGDAS5NVG2BYSYDFRSH4GKVTS5RXNVED5AX7 +m/44'/148'/1' GB3MTYFXPBZBUINVG72XR7AQ6P2I32CYSXWNRKJ2PV5H5C7EAM5YYISO SBKSABCPDWXDFSZISAVJ5XKVIEWV4M5O3KBRRLSPY3COQI7ZP423FYB4 +m/44'/148'/2' GDYF7GIHS2TRGJ5WW4MZ4ELIUIBINRNYPPAWVQBPLAZXC2JRDI4DGAKU SD5CCQAFRIPB3BWBHQYQ5SC66IB2AVMFNWWPBYGSUXVRZNCIRJ7IHESQ +m/44'/148'/3' GAFLH7DGM3VXFVUID7JUKSGOYG52ZRAQPZHQASVCEQERYC5I4PPJUWBD SBSGSAIKEF7JYQWQSGXKB4SRHNSKDXTEI33WZDRR6UHYQCQ5I6ZGZQPK +m/44'/148'/4' GAXG3LWEXWCAWUABRO6SMAEUKJXLB5BBX6J2KMHFRIWKAMDJKCFGS3NN SBIZH53PIRFTPI73JG7QYA3YAINOAT2XMNAUARB3QOWWVZVBAROHGXWM +m/44'/148'/5' GA6RUD4DZ2NEMAQY4VZJ4C6K6VSEYEJITNSLUQKLCFHJ2JOGC5UCGCFQ SCVM6ZNVRUOP4NMCMMKLTVBEMAF2THIOMHPYSSMPCD2ZU7VDPARQQ6OY +m/44'/148'/6' GCUDW6ZF5SCGCMS3QUTELZ6LSAH6IVVXNRPRLAUNJ2XYLCA7KH7ZCVQS SBSHUZQNC45IAIRSAHMWJEJ35RY7YNW6SMOEBZHTMMG64NKV7Y52ZEO2 +m/44'/148'/7' GBJ646Q524WGBN5X5NOAPIF5VQCR2WZCN6QZIDOSY6VA2PMHJ2X636G4 SC2QO2K2B4EBNBJMBZIKOYSHEX4EZAZNIF4UNLH63AQYV6BE7SMYWC6E +m/44'/148'/8' GDHX4LU6YBSXGYTR7SX2P4ZYZSN24VXNJBVAFOB2GEBKNN3I54IYSRM4 SCGMC5AHAAVB3D4JXQPCORWW37T44XJZUNPEMLRW6DCOEARY3H5MAQST +m/44'/148'/9' GDXOY6HXPIDT2QD352CH7VWX257PHVFR72COWQ74QE3TEV4PK2KCKZX7 SCPA5OX4EYINOPAUEQCPY6TJMYICUS5M7TVXYKWXR3G5ZRAJXY3C37GF`, + }, + { + Words: "cable spray genius state float twenty onion head street palace net private method loan turn phrase state blanket interest dry amazing dress blast tube", + Passphrase: "p4ssphr4se", + Want: `m/44'/148'/0' GDAHPZ2NSYIIHZXM56Y36SBVTV5QKFIZGYMMBHOU53ETUSWTP62B63EQ SAFWTGXVS7ELMNCXELFWCFZOPMHUZ5LXNBGUVRCY3FHLFPXK4QPXYP2X +m/44'/148'/1' GDY47CJARRHHL66JH3RJURDYXAMIQ5DMXZLP3TDAUJ6IN2GUOFX4OJOC SBQPDFUGLMWJYEYXFRM5TQX3AX2BR47WKI4FDS7EJQUSEUUVY72MZPJF +m/44'/148'/2' GCLAQF5H5LGJ2A6ACOMNEHSWYDJ3VKVBUBHDWFGRBEPAVZ56L4D7JJID SAF2LXRW6FOSVQNC4HHIIDURZL4SCGCG7UEGG23ZQG6Q2DKIGMPZV6BZ +m/44'/148'/3' GBC36J4KG7ZSIQ5UOSJFQNUP4IBRN6LVUFAHQWT2ODEQ7Y3ASWC5ZN3B SDCCVBIYZDMXOR4VPC3IYMIPODNEDZCS44LDN7B5ZWECIE57N3BTV4GQ +m/44'/148'/4' GA6NHA4KPH5LFYD6LZH35SIX3DU5CWU3GX6GCKPJPPTQCCQPP627E3CB SA5TRXTO7BG2Z6QTQT3O2LC7A7DLZZ2RBTGUNCTG346PLVSSHXPNDVNT +m/44'/148'/5' GBOWMXTLABFNEWO34UJNSJJNVEF6ESLCNNS36S5SX46UZT2MNYJOLA5L SDEOED2KPHV355YNOLLDLVQB7HDPQVIGKXCAJMA3HTM4325ZHFZSKKUC +m/44'/148'/6' GBL3F5JUZN3SQKZ7SL4XSXEJI2SNSVGO6WZWNJLG666WOJHNDDLEXTSZ SDYNO6TLFNV3IM6THLNGUG5FII4ET2H7NH3KCT6OAHIUSHKR4XBEEI6A +m/44'/148'/7' GA5XPPWXL22HFFL5K5CE37CEPUHXYGSP3NNWGM6IK6K4C3EFHZFKSAND SDXMJXAY45W3WEFWMYEPLPIF4CXAD5ECQ37XKMGY5EKLM472SSRJXCYD +m/44'/148'/8' GDS5I7L7LWFUVSYVAOHXJET2565MGGHJ4VHGVJXIKVKNO5D4JWXIZ3XU SAIZA26BUP55TDCJ4U7I2MSQEAJDPDSZSBKBPWQTD5OQZQSJAGNN2IQB +m/44'/148'/9' GBOSMFQYKWFDHJWCMCZSMGUMWCZOM4KFMXXS64INDHVCJ2A2JAABCYRR SDXDYPDNRMGOF25AWYYKPHFAD3M54IT7LCLG7RWTGR3TS32A4HTUXNOS`, + }, + { + Words: "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about", + Want: `m/44'/148'/0' GB3JDWCQJCWMJ3IILWIGDTQJJC5567PGVEVXSCVPEQOTDN64VJBDQBYX SBUV3MRWKNS6AYKZ6E6MOUVF2OYMON3MIUASWL3JLY5E3ISDJFELYBRZ +m/44'/148'/1' GDVSYYTUAJ3ACHTPQNSTQBDQ4LDHQCMNY4FCEQH5TJUMSSLWQSTG42MV SCHDCVCWGAKGIMTORV6K5DYYV3BY4WG3RA4M6MCBGJLHUCWU2MC6DL66 +m/44'/148'/2' GBFPWBTN4AXHPWPTQVQBP4KRZ2YVYYOGRMV2PEYL2OBPPJDP7LECEVHR SAPLVTLUXSDLFRDGCCFLPDZMTCEVMP3ZXTM74EBJCVKZKM34LGQPF7K3 +m/44'/148'/3' GCCCOWAKYVFY5M6SYHOW33TSNC7Z5IBRUEU2XQVVT34CIZU7CXZ4OQ4O SDQYXOP2EAUZP4YOEQ5BUJIQ3RDSP5XV4ZFI6C5Y3QCD5Y63LWPXT7PW +m/44'/148'/4' GCQ3J35MKPKJX7JDXRHC5YTXTULFMCBMZ5IC63EDR66QA3LO7264ZL7Q SCT7DUHYZD6DRCETT6M73GWKFJI4D56P3SNWNWNJ7ANLJZS6XIFYYXSB +m/44'/148'/5' GDTA7622ZA5PW7F7JL7NOEFGW62M7GW2GY764EQC2TUJ42YJQE2A3QUL SDTWG5AFDI6GRQNLPWOC7IYS7AKOGMI2GX4OXTBTZHHYPMNZ2PX4ONWU +m/44'/148'/6' GD7A7EACTPTBCYCURD43IEZXGIBCEXNBHN3OFWV2FOX67XKUIGRCTBNU SDJMWY4KFRS4PTA5WBFVCPS2GKYLXOMCLQSBNEIBG7KRGHNQOM25KMCP +m/44'/148'/7' GAF4AGPVLQXFKEWQV3DZU5YEFU6YP7XJHAEEQH4G3R664MSF77FLLRK3 SDOJH5JRCNGT57QTPTJEQGBEBZJPXE7XUDYDB24VTOPP7PH3ALKHAHFG +m/44'/148'/8' GABTYCZJMCP55SS6I46SR76IHETZDLG4L37MLZRZKQDGBLS5RMP65TSX SC6N6GYQ2VA4T7CUP2BWGBRT2P6L2HQSZIUNQRHNDLISF6ND7TW4P4ER +m/44'/148'/9' GAKFARYSPI33KUJE7HYLT47DCX2PFWJ77W3LZMRBPSGPGYPMSDBE7W7X SALJ5LPBTXCFML2CQ7ORP7WJNJOZSVBVRQAAODMVHMUF4P4XXFZB7MKY`, + }, + // Invalid: + { + Words: "illness spike retreat truth genius clock brain pass fit cave bargain illness", + Error: "Invalid words or checksum", + }, + } + + for _, test := range tests { + + t.Run(fmt.Sprintf("words %s passphrase %s", test.Words, test.Passphrase), func(t *testing.T) { + words := strings.Split(test.Words, " ") + input := fmt.Sprintf("%d\n%s\n%s\n", len(words), strings.Join(words, "\n"), test.Passphrase) + + // Global variables, AFAIK there is no elegant way to pass it to cobra.Command + reader = bufio.NewReader(bytes.NewBufferString(input)) + out = &bytes.Buffer{} + + err := AccountsCmd.RunE(nil, []string{}) + if test.Error != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), test.Error) + } else { + assert.NoError(t, err) + output := out.(*bytes.Buffer).String() + assert.Contains(t, output, test.Want) + } + }) + } +} diff --git a/tools/stellar-hd-wallet/commands/io.go b/tools/stellar-hd-wallet/commands/io.go new file mode 100644 index 0000000000..13714b7030 --- /dev/null +++ b/tools/stellar-hd-wallet/commands/io.go @@ -0,0 +1,37 @@ +package commands + +import ( + "bufio" + "fmt" + "io" + "log" + "os" + "strconv" + "strings" +) + +var reader = bufio.NewReader(os.Stdin) +var out io.Writer = os.Stdout + +func readString() string { + line, _ := reader.ReadString('\n') + return strings.TrimRight(line, "\r\n") +} + +func readUint() uint32 { + line := readString() + number, err := strconv.Atoi(line) + if err != nil { + log.Fatal("Invalid value") + } + + return uint32(number) +} + +func printf(format string, a ...interface{}) { + fmt.Fprintf(out, format, a...) +} + +func println(a ...interface{}) { + fmt.Fprintln(out, a...) +} diff --git a/tools/stellar-hd-wallet/commands/new.go b/tools/stellar-hd-wallet/commands/new.go new file mode 100644 index 0000000000..50f94d4d04 --- /dev/null +++ b/tools/stellar-hd-wallet/commands/new.go @@ -0,0 +1,42 @@ +package commands + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/stellar/go/support/errors" + "github.com/tyler-smith/go-bip39" +) + +const DefaultEntropySize = 256 + +var NewCmd = &cobra.Command{ + Use: "new", + Short: "Generates a new mnemonic code", + Long: "", + RunE: func(cmd *cobra.Command, args []string) error { + entropy, err := bip39.NewEntropy(DefaultEntropySize) + if err != nil { + return errors.Wrap(err, "Error generating entropy") + } + + mnemonic, err := bip39.NewMnemonic(entropy) + if err != nil { + return errors.Wrap(err, "Error generating mnemonic code") + } + + words := strings.Split(mnemonic, " ") + for i := 0; i < len(words); i++ { + printf("word %02d/24: %10s", i+1, words[i]) + readString() + } + + println("WARNING! Store the words above in a safe place!") + println("WARNING! If you lose your words, you will lose access to funds in all derived accounts!") + println("WARNING! Anyone who has access to these words can spend your funds!") + println("") + println("Use: `stellar-hd-wallet accounts` command to see generated accounts.") + + return nil + }, +} diff --git a/tools/stellar-hd-wallet/main.go b/tools/stellar-hd-wallet/main.go new file mode 100644 index 0000000000..8b84393354 --- /dev/null +++ b/tools/stellar-hd-wallet/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "log" + + "github.com/spf13/cobra" + "github.com/stellar/go/tools/stellar-hd-wallet/commands" +) + +var mainCmd = &cobra.Command{ + Use: "stellar-hd-wallet", + Short: "Simple HD wallet for Stellar Lumens. THIS PROGRAM IS STILL EXPERIMENTAL. USE AT YOUR OWN RISK.", +} + +func init() { + mainCmd.AddCommand(commands.NewCmd) + mainCmd.AddCommand(commands.AccountsCmd) +} + +func main() { + if err := mainCmd.Execute(); err != nil { + log.Fatal(err) + } +} diff --git a/tools/stellar-key-gen/CHANGELOG.md b/tools/stellar-key-gen/CHANGELOG.md new file mode 100644 index 0000000000..df45f696ad --- /dev/null +++ b/tools/stellar-key-gen/CHANGELOG.md @@ -0,0 +1,3 @@ +# Changelog + +Not yet released. diff --git a/tools/stellar-key-gen/README.md b/tools/stellar-key-gen/README.md new file mode 100644 index 0000000000..5777b469c2 --- /dev/null +++ b/tools/stellar-key-gen/README.md @@ -0,0 +1,30 @@ +# stellar-key-gen + +Generate Stellar keys. + +## Usage + +Run the command with no options to get a public and private key: +``` +stellar-key-gen +GB2QRDI4FY2KERQBGPDS36XVWBJ4JBY3KW376H3KVF6YTNB2ROFNYN5L +SCGP6ZACCIPZXLGSMLNC3DE5VFZMS6GZJRCA4E524WFD5SHYQEE7NMK6 +``` + +Run the command with a format option to change the output: +``` +stellar-key-gen -f '{{.SecretKey}}' +SCGP6ZACCIPZXLGSMLNC3DE5VFZMS6GZJRCA4E524WFD5SHYQEE7NMK6 +``` + +Help: +``` +$ stellar-key-gen -h +Generate a Stellar key. + +Usage: + stellar-key-gen [flags] + +Flags: + -f, --format string Format of output (default "{{.PublicKey}}\n{{.SecretKey}}\n") +``` diff --git a/tools/stellar-key-gen/main.go b/tools/stellar-key-gen/main.go new file mode 100644 index 0000000000..22e68cd77a --- /dev/null +++ b/tools/stellar-key-gen/main.go @@ -0,0 +1,62 @@ +package main + +import ( + "html/template" + "io" + "os" + + "github.com/spf13/cobra" + "github.com/stellar/go/keypair" +) + +func main() { + exitCode := run(os.Args[1:], os.Stdout, os.Stderr) + os.Exit(exitCode) +} + +func run(args []string, stdout io.Writer, stderr io.Writer) int { + cmd := &cobra.Command{ + Use: "stellar-key-gen", + Short: "Generate a Stellar key.", + } + cmd.SetArgs(args) + cmd.SetOutput(stderr) + + outFormat := "{{.PublicKey}}\n{{.SecretKey}}\n" + cmd.Flags().StringVarP(&outFormat, "format", "f", outFormat, "Format of output") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + tmpl, err := template.New("").Parse(outFormat) + if err != nil { + return err + } + + key, err := keypair.Random() + if err != nil { + return err + } + + data := outData{ + PublicKey: key.Address(), + SecretKey: key.Seed(), + } + + err = tmpl.Execute(stdout, data) + if err != nil { + return err + } + + return nil + } + + err := cmd.Execute() + if err != nil { + return 1 + } + return 0 +} + +type outData struct { + PublicKey string + SecretKey string +} diff --git a/tools/stellar-key-gen/main_test.go b/tools/stellar-key-gen/main_test.go new file mode 100644 index 0000000000..fe1facf7e2 --- /dev/null +++ b/tools/stellar-key-gen/main_test.go @@ -0,0 +1,124 @@ +package main + +import ( + "strconv" + "strings" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stretchr/testify/assert" +) + +func TestRun_defaultFormat(t *testing.T) { + args := []string{} + stdout := strings.Builder{} + stderr := strings.Builder{} + + exitCode := run(args, &stdout, &stderr) + + t.Logf("exit code: %d", exitCode) + t.Logf("stdout: %q", stdout.String()) + t.Logf("stderr: %q", stderr.String()) + + // Exit code should be zero for success. + assert.Equal(t, 0, exitCode) + + // Stdout should be public key then secret key new line separated. + lines := strings.Split(stdout.String(), "\n") + if assert.Len(t, lines, 3) { + f, err := keypair.ParseFull(lines[1]) + if assert.NoError(t, err) { + assert.Equal(t, f.Address(), lines[0]) + assert.Equal(t, f.Seed(), lines[1]) + assert.Equal(t, "", lines[2]) + } + } + + // Stderr should be empty. + assert.Equal(t, "", stderr.String()) +} + +func TestRun_customFormat(t *testing.T) { + args := []string{ + "-f", + "{{.SecretKey}},{{.PublicKey}}", + } + stdout := strings.Builder{} + stderr := strings.Builder{} + + exitCode := run(args, &stdout, &stderr) + + t.Logf("exit code: %d", exitCode) + t.Logf("stdout: %q", stdout.String()) + t.Logf("stderr: %q", stderr.String()) + + // Exit code should be zero for success. + assert.Equal(t, 0, exitCode) + + // Stdout should be secret key then public key comma separated. + parts := strings.Split(stdout.String(), ",") + if assert.Len(t, parts, 2) { + f, err := keypair.ParseFull(parts[0]) + if assert.NoError(t, err) { + assert.Equal(t, f.Seed(), parts[0]) + assert.Equal(t, f.Address(), parts[1]) + } + } + + // Stderr should be empty. + assert.Equal(t, "", stderr.String()) +} + +func TestRun_invalidFormat(t *testing.T) { + args := []string{ + "-f", + "{{.FooBar}}", + } + stdout := strings.Builder{} + stderr := strings.Builder{} + + exitCode := run(args, &stdout, &stderr) + + t.Logf("exit code: %d", exitCode) + t.Logf("stdout: %q", stdout.String()) + t.Logf("stderr: %q", stderr.String()) + + // Exit code should be one for failure. + assert.Equal(t, 1, exitCode) + + // Stdout should be empty. + assert.Equal(t, "", stdout.String()) + + // Stderr should contain the error. + assert.Contains(t, stderr.String(), "can't evaluate field FooBar") +} + +func TestRun_random(t *testing.T) { + args := []string{ + "-f", + "{{.SecretKey}}", + } + seen := map[string]bool{} + for i := 0; i < 10; i++ { + t.Run(strconv.Itoa(i), func(t *testing.T) { + stdout := strings.Builder{} + stderr := strings.Builder{} + + exitCode := run(args, &stdout, &stderr) + + // Exit code should be zero for success. + assert.Equal(t, 0, exitCode) + + // Stdout will contain the secret, which should not have be seen before. + key := stdout.String() + if seen[key] { + t.Error(key, "seen before") + } else { + t.Log(key, "not seen before") + } + + // Stderr should be empty. + assert.Equal(t, "", stderr.String()) + }) + } +} diff --git a/tools/stellar-sign/CHANGELOG.md b/tools/stellar-sign/CHANGELOG.md index 64de48f2df..c02aa15c29 100644 --- a/tools/stellar-sign/CHANGELOG.md +++ b/tools/stellar-sign/CHANGELOG.md @@ -6,6 +6,10 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). As this project is pre 1.0, breaking changes may happen for minor version bumps. A breaking change will get clearly notified in this log. +## Unreleased + +- Dropped support for Go 1.10, 1.11, 1.12. + ## [v0.2.0] - 2016-08-19 ### Added diff --git a/tools/stellar-sign/main.go b/tools/stellar-sign/main.go index 6659006ae3..5a02ead8ab 100644 --- a/tools/stellar-sign/main.go +++ b/tools/stellar-sign/main.go @@ -6,25 +6,54 @@ package main import ( "bufio" + "flag" "fmt" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "io/ioutil" "log" "os" "strings" "github.com/howeyc/gopass" - "github.com/stellar/go/build" + "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" ) var in *bufio.Reader +var infile = flag.String("infile", "", "transaction envelope") + func main() { + flag.Parse() in = bufio.NewReader(os.Stdin) - // read envelope - env, err := readLine("Enter envelope (base64): ", false) - if err != nil { - log.Fatal(err) + var ( + env string + err error + ) + + if *infile == "" { + // read envelope + env, err = readLine("Enter envelope (base64): ", false) + if err != nil { + log.Fatal(err) + } + } else { + var file *os.File + file, err = os.Open(*infile) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + var raw []byte + raw, err = ioutil.ReadAll(file) + if err != nil { + log.Fatal(err) + } + + env = string(raw) } // parse the envelope @@ -36,9 +65,14 @@ func main() { fmt.Println("") fmt.Println("Transaction Summary:") - fmt.Printf(" source: %s\n", txe.Tx.SourceAccount.Address()) - fmt.Printf(" ops: %d\n", len(txe.Tx.Operations)) - fmt.Printf(" sigs: %d\n", len(txe.Signatures)) + sourceAccount := txe.SourceAccount().ToAccountId() + fmt.Printf(" type: %s\n", txe.Type.String()) + fmt.Printf(" source: %s\n", sourceAccount.Address()) + fmt.Printf(" ops: %d\n", len(txe.Operations())) + fmt.Printf(" sigs: %d\n", len(txe.Signatures())) + if txe.IsFeeBump() { + fmt.Printf(" fee bump sigs: %d\n", len(txe.FeeBumpSignatures())) + } fmt.Println("") // TODO: add operation details @@ -50,31 +84,53 @@ func main() { } // sign the transaction - b := &build.TransactionEnvelopeBuilder{E: &txe} - b.Init() - b.MutateTX(build.PublicNetwork) - b.Mutate(build.Sign{seed}) - if b.Err != nil { - log.Fatal(b.Err) + kp, err := keypair.ParseFull(seed) + if err != nil { + log.Fatal(err) } - newEnv, err := xdr.MarshalBase64(b.E) + parsed, err := txnbuild.TransactionFromXDR(env) if err != nil { log.Fatal(err) } + var newEnv string + if tx, ok := parsed.Transaction(); ok { + tx, err = tx.Sign(network.PublicNetworkPassphrase, kp) + if err != nil { + log.Fatal(err) + } + newEnv, err = tx.Base64() + if err != nil { + log.Fatal(err) + } + } else { + tx, _ := parsed.FeeBump() + tx, err = tx.Sign(network.PublicNetworkPassphrase, kp) + if err != nil { + log.Fatal(err) + } + newEnv, err = tx.Base64() + if err != nil { + log.Fatal(err) + } + } + fmt.Print("\n==== Result ====\n\n") + fmt.Print("```\n") fmt.Println(newEnv) + fmt.Print("```\n") } func readLine(prompt string, private bool) (string, error) { - fmt.Fprintf(os.Stdout, prompt) + fmt.Println(prompt) var line string var err error if private { - str, err := gopass.GetPasswdMasked() + var str []byte + str, err = gopass.GetPasswdMasked() if err != nil { return "", err } diff --git a/tools/stellar-vanity-gen/CHANGELOG.md b/tools/stellar-vanity-gen/CHANGELOG.md index 826a5e889a..9df1ad88a4 100644 --- a/tools/stellar-vanity-gen/CHANGELOG.md +++ b/tools/stellar-vanity-gen/CHANGELOG.md @@ -6,6 +6,10 @@ file. This project adheres to [Semantic Versioning](http://semver.org/). As this project is pre 1.0, breaking changes may happen for minor version bumps. A breaking change will get clearly notified in this log. +## Unreleased + +- Dropped support for Go 1.10, 1.11, 1.12. + ## [v0.1.0] - 2016-08-17 Initial release after import from https://github.com/stellar/go-stellar-base/cmd/stellar-vanity-gen diff --git a/tools/xdr2go/CHANGELOG.md b/tools/xdr2go/CHANGELOG.md new file mode 100644 index 0000000000..dd2d590eeb --- /dev/null +++ b/tools/xdr2go/CHANGELOG.md @@ -0,0 +1,8 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + +## v0.0.1 + +Initial version. \ No newline at end of file diff --git a/tools/xdr2go/README.md b/tools/xdr2go/README.md new file mode 100644 index 0000000000..dac5739681 --- /dev/null +++ b/tools/xdr2go/README.md @@ -0,0 +1,27 @@ +# xdr2go + +`xdr2go` is a little CLI tool to transform base64 XDR objects into a pretty Go code. This helps in writing mocks and testing. It's using [`fmt.GoStringer`](https://golang.org/pkg/fmt/#GoStringer) interface to print pretty Go code compared to a standard library implementation. + +### Why + +Very often when writing tests we mock objects to make tests independent of other components. There are many ways we can create example XDR objects but they have disadvantages: +* For transactions we can use `txnbuild` package. The problem is that it allows building transactions only. Also, sometimes there's an existing transaction in the network we want to use in tests so building it using `txnbuild` requires extra time. +* `fmt.Printf("%#v", value)` prints a Go-syntax representation of the value. However, there are many problems with the way the values are printed using a standard `GoStringer`: + * If the value is a struct with pointers it prints an hexadecimal memory address instead of it's value, ex: + ```go + &xdr.TransactionEnvelope{Type: 2, V0: (*xdr.TransactionV0Envelope)(nil), V1: (*xdr.TransactionV1Envelope)(0xc0000aa2d0), FeeBump: (*xdr.FeeBumpTransactionEnvelope)(nil) + ``` + * It prints redundant `nil` values that could be skipped. + * Binary values like ed25519 keys and unions (ex. `xdr.Asset`) are rendered in a way that's hard to read. + * Union discriminants are printed as decimal, instead of using a type variable. + * `uint32` values are printed in hex. +* I also checked [`https://godoc.org/github.com/kr/pretty`](https://godoc.org/github.com/kr/pretty). While it works fine, it also doesn't skip `nil` values and prints binary values and unions as standard Go printer. + +Standard print: +```go +xdr.TransactionEnvelope{Type: 2, V0: (*xdr.TransactionV0Envelope)(nil), V1: (*xdr.TransactionV1Envelope)(0xc0000aa2d0), FeeBump: (*xdr.FeeBumpTransactionEnvelope)(nil) +``` +With `GoStringer` implementations: +```go +xdr.TransactionEnvelope{Type: xdr.EnvelopeTypeEnvelopeTypeTx, V1: &xdr.TransactionV1Envelope{Tx: xdr.Transaction{SourceAccount: xdr.MustAddress("GAZ3T7HRWDBJ6SNQ7IWVUS65FP6QMCWHCALFYWX552KFV2O2RLOSRLKI"), Fee: 120, SeqNum: 122783846453215886, TimeBounds: &xdr.TimeBounds{MinTime: xdr.TimePoint(0), MaxTime: xdr.TimePoint(1594645065)}, Memo: xdr.Memo{Type: xdr.MemoTypeMemoNone}, Operations: []xdr.Operation{xdr.OperationBody{SourceAccount: &xdr.MustAddress("GBHC6AMZ3FWLYYHXITCIEZI6VXAU4IEMRCHLICXZXHOVSBFSWCRJ7JS7"), Body: &xdr.OperationBody{Type: xdr.OperationTypeManageSellOffer, ManageSellOfferOp: &xdr.ManageSellOfferOp{Selling: xdr.MustNewCreditAsset("LFEC", "GAG6FS3CR64QJHLHJU7HNXUB4KBLXVDFQBDXM5LG22WOM7CA2ITJAVD2"), Buying: xdr.MustNewNativeAsset(), Amount: 6, Price: xdr.Price{N: 9899999, D: 100000000}, OfferId: 0}}}}, Ext: xdr.TransactionExt{V: 0}}, Signatures: []xdr.DecoratedSignature{xdr.DecoratedSignature{Hint: xdr.SignatureHint{0xda, 0x8a, 0xdd, 0x28}, Signature: xdr.Signature{0x55, 0xb, 0xd0, 0x7d, 0xf3, 0x7, 0x71, 0x56, 0x99, 0x3c, 0x34, 0xfc, 0x47, 0xa0, 0xce, 0x2b, 0x39, 0xa, 0xc4, 0x8c, 0xb7, 0x80, 0x9f, 0x4c, 0xc8, 0x22, 0xae, 0xcc, 0xe9, 0x8b, 0x29, 0xb9, 0x80, 0x94, 0xab, 0x15, 0xbd, 0x6b, 0xc6, 0x3e, 0x2d, 0x12, 0x7a, 0x49, 0xa8, 0x83, 0x75, 0xdd, 0x21, 0x0, 0x14, 0x47, 0xdc, 0xf9, 0x4, 0xe7, 0xdb, 0x16, 0x5a, 0x6e, 0xb0, 0xd6, 0xc0, 0xd}}, xdr.DecoratedSignature{Hint: xdr.SignatureHint{0xb2, 0xb0, 0xa2, 0x9f}, Signature: xdr.Signature{0x6c, 0xb8, 0xe4, 0xd6, 0x39, 0xe2, 0x54, 0x3a, 0x51, 0xf1, 0xc1, 0x20, 0xb9, 0x5f, 0x7d, 0x8, 0xae, 0x31, 0x2c, 0xec, 0x19, 0xce, 0xb0, 0x3f, 0xb6, 0xe6, 0xfa, 0x25, 0xeb, 0x58, 0xf0, 0x33, 0xd1, 0x9f, 0x9a, 0xf5, 0xc2, 0x33, 0x7b, 0xa6, 0x45, 0x92, 0x18, 0xc0, 0x4d, 0xbf, 0x9d, 0xdf, 0xa0, 0xa5, 0x43, 0x73, 0x2a, 0x7c, 0x7d, 0xad, 0x67, 0x54, 0xad, 0xa6, 0xae, 0x30, 0xd1, 0xf}}}}} +``` \ No newline at end of file diff --git a/tools/xdr2go/main.go b/tools/xdr2go/main.go new file mode 100644 index 0000000000..797db57207 --- /dev/null +++ b/tools/xdr2go/main.go @@ -0,0 +1,50 @@ +package main + +import ( + "fmt" + "go/format" + + "github.com/spf13/cobra" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +var ( + typ string +) + +var rootCmd = &cobra.Command{ + Use: "xdr2go [base64-encoded XDR object]", + Short: "xdr2go transforms base64 encoded XDR objects into a pretty Go code", + RunE: run, +} + +func main() { + rootCmd.Flags().StringVarP(&typ, "type", "t", "TransactionEnvelope", "xdr type, currently only TransactionEnvelope is available") + rootCmd.Execute() +} + +func run(cmd *cobra.Command, args []string) error { + if len(args) != 1 { + return errors.New("Exactly one command argument with XDR object is required.") + } + var object interface{} + switch typ { + case "TransactionEnvelope": + object = &xdr.TransactionEnvelope{} + default: + return errors.New("Unknown type.") + } + err := xdr.SafeUnmarshalBase64(args[0], object) + if err != nil { + return errors.Wrap(err, "Error unmarshalling XDR stucture.") + } + + source := fmt.Sprintf("%#v\n", object) + formatted, err := format.Source([]byte(source)) + if err != nil { + return errors.Wrap(err, "Error formatting code.") + } + fmt.Println(string(formatted)) + return nil +} diff --git a/txnbuild/CHANGELOG.md b/txnbuild/CHANGELOG.md new file mode 100644 index 0000000000..8f5f88a083 --- /dev/null +++ b/txnbuild/CHANGELOG.md @@ -0,0 +1,257 @@ +# Changelog + +All notable changes to this project will be documented in this +file. This project adheres to [Semantic Versioning](http://semver.org/). + + +## [9.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v9.0.0) - 2022-01-10 + +* Enable Muxed Accounts ([SEP-23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md)) by default ([#4169](https://github.com/stellar/go/pull/4169)): + * Remove `TransactionParams.EnableMuxedAccounts` + * Remove `TransactionFromXDROptionEnableMuxedAccounts` + * Remove `FeeBumpTransactionParams.EnableMuxedAccounts` + * Remove parameter `withMuxedAccounts bool` from all methods/functions. + * Remove `options ...TransactionFromXDROption` parameter from `TransactionFromXDR()` + * Rename `SetOpSourceMuxedAccount()` to (pre-existing) `SetOpSourceAccount()` which now accepts + both `G` and `M` (muxed) account strkeys. +* Use xdr.Price to represent prices in txnbuild instead of strings ([#4167](https://github.com/stellar/go/pull/4167)). + +## [8.0.0-beta.0](https://github.com/stellar/go/releases/tag/horizonclient-v8.0.0-beta.0) - 2021-10-04 + +**This release adds support for Protocol 18.** + +### New features +* `GenericTransaction`, `Transaction`, and `FeeBumpTransaction` now implement +`encoding.TextMarshaler` and `encoding.TextUnmarshaler`. +* New asset structures that conform to the new ChangeTrust and New assets: +* Support for the core liquidity pool XDR types: `LiquidityPoolId`, `LiquidityPoolParameters`, `LiquidityPoolDeposit`, and `LiquidityPoolWithdraw`. +* Support for the new asset structures: `ChangeTrustAsset` and `TrustLineAsset`. + +### Changes +* There's now a 5-minute grace period to `transaction.ReadChallengeTx`'s minimum time bound constraint ([#3824](https://github.com/stellar/go/pull/3824)). +* Assets can now be liquidity pool shares (`AssetTypePoolShare`). +* All asset objects can now be converted to the new `ChangeTrustAsset` and `TrustLineAsset` objects. +* Assets can now be compared in accordance with the protocol, see their respective `LessThan()` implementations. + +### Breaking changes +* `ChangeTrust` requires a `ChangeTrustAsset`. +* `RevokeSponsorship` requires a `TrustLineAsset` when revoking trustlines. +* `RemoveTrustlineOp` helper now requires a `ChangeTrustAsset` +* `validate*Asset` helpers now require more-specific asset types. + + +## [v7.1.1](https://github.com/stellar/go/releases/tag/horizonclient-v7.1.1) - 2021-06-25 + +### Bug Fixes + +* Claimable balance IDs are now precomputed correctly (`Transaction.ClaimableBalanceID(int)`) even when the transaction's source account is a fully-muxed account ([#3678](https://github.com/stellar/go/pull/3678)). +* Fix muxed account address parsing for account merge operation ([#3722](https://github.com/stellar/go/pull/3722)). + +## [v7.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v7.1.0) - 2021-06-01 + +### New features + +* Add `Transaction.SequenceNumber()` helper function to make retrieving the underlying sequence number easier ([#3616](https://github.com/stellar/go/pull/3616)). + +* Add `Transaction.AddSignatureDecorated()` helper function to make attaching decorated signatures to existing transactions easier ([#3640](https://github.com/stellar/go/pull/3640)). + +### Bug Fix + +* `BaseFee` in `TransactionParams` when calling `NewTransaction` is allowed to be zero because the fee can be paid by wrapping a `Transaction` in a `FeeBumpTransaction` ([#3622](https://github.com/stellar/go/pull/3622)). + + +## [v7.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v7.0.0) - 2021-05-15 + +### Breaking changes + +* `AllowTrustOpAsset` was renamed to `AssetCode`, `{Must}NewAllowTrustAsset` was renamed to `{Must}NewAssetCodeFromString`. +* Some methods from the `Operation` interface (`BuildXDR()`,`FromXDR()` and `Validate()`) now take an additional `bool` parameter (`withMuxedAccounts`) + to indicate whether [SEP23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md) M-strkeys should be enabled. + +### New features + +* Add support for Stellar Protocol 17 (CAP35): `Clawback`, `ClawbackClaimableBalance` and `SetTrustlineFlags` operations. +* Add opt-in support for [SEP23](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0023.md) M-strkeys for `MuxedAccount`s: + * Some methods from the `Operation` interface (`BuildXDR()`,`FromXDR()` and `Validate()`) now take an additional `bool` parameter (`withMuxedAccounts`) + * The parameters from `NewFeeBumpTransaction()` and `NewTransaction()` now include a new field (`EnableMuxedAccounts`) to enable M-strekeys. + * `TransactionFromXDR()` now allows passing a `TransactionFromXDROptionEnableMuxedAccounts` option, to enable M-strkey parsing. + +## [v6.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v6.0.0) - 2021-02-22 + +### Breaking changes + +* Updates the SEP-10 helper function parameters to support [SEP-10 v3.1](https://github.com/stellar/stellar-protocol/commit/6c8c9cf6685c85509835188a136ffb8cd6b9c11c). + * The following functions added the `webAuthDomain` parameter: + * `BuildChallengeTx()` + * `ReadChallengeTx()` + * `VerifyChallengeTxThreshold()` + * `VerifyChallengeTxSigners()` + * The webAuthDomain parameter is verified in the `Read*` and `Verify*` functions if it is contained in the challenge transaction, and is ignored if the challenge transaction was generated by an older implementation that does not support the webAuthDomain. + * The webAuthDomain parameter is included in challenge transactions generated in the `Build*` function, and the resulting challenge transaction is compatible with SEP-10 v2.1 or greater. +* Use strings to represent source accounts in Operation structs ([#3393](https://github.com/stellar/go/pull/3393)) see example below: + ```go + bumpSequenceOp := txnbuild.BumpSequence{BumpTo: 100, SourceAccount: "GB56OJGSA6VHEUFZDX6AL2YDVG2TS5JDZYQJHDYHBDH7PCD5NIQKLSDO"} + ``` +* Remove `TxEnvelope()` functions from `Transaction` and `FeeBumpTransaction` to simplify the API. `ToXDR()` should be used instead of `TxEnvelope()` ([#3377](https://github.com/stellar/go/pull/3377)) + +## [v5.0.1](https://github.com/stellar/go/releases/tag/horizonclient-v5.0.1) - 2021-02-16 + +* Fix a bug in `ClaimableBalanceID()` where the wrong account was used to derive the claimable balance id ([#3406](https://github.com/stellar/go/pull/3406)) + +## [v5.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v5.0.0) - 2020-11-12 + +### Breaking changes + +* Updates the SEP-10 helper function parameters and return values to support [SEP-10 v3.0](https://github.com/stellar/stellar-protocol/commit/9d121f98fd2201a5edfe0ed2befe92f4bf88bfe4) + * The following functions replaced the `homeDomain` parameter with `homeDomains` (note: plural): + * `ReadChallengeTx()` + * `VerifyChallengeTxThreshold()` + * `VerifyChallengeTxSigners()` + * `ReadChallengeTx()` now returns a third non-error value: `matchedHomeDomain` + +## [v4.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.2.0) - 2020-11-11 + +* Add `HashHex()`, `SignWithKeyString()`, `SignHashX()`, and `AddSignatureBase64()` functions back to `FeeBumpTransaction` ([#3199](https://github.com/stellar/go/pull/3199)). + +## [v4.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.1.0) - 2020-10-16 + +* Add helper function `ParseAssetString()`, making it easier to build an `Asset` structure from a string in [canonical form](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0011.md#asset) and check its various properties ([#3105](https://github.com/stellar/go/pull/3105)). + +* Add helper function `Transaction.ClaimableBalanceID()`, making it easier to calculate balance IDs for [claimable balances](https://developers.stellar.org/docs/glossary/claimable-balance/) without actually submitting the transaction ([#3122](https://github.com/stellar/go/pull/3122)). + +* Add support for SEP-10 v2.1.0. + * Remove verification of home domain. (Will be reintroduced and changed in a future release.) + * Allow additional manage data operations that have source account as the server key. + +## [v4.0.1](https://github.com/stellar/go/releases/tag/horizonclient-v4.0.1) - 2020-10-02 + +* Fixed bug in `TransactionFromXDR()` which occurs when parsing transaction XDR envelopes which contain Protocol 14 operations. + +## [v4.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v4.0.0) - 2020-09-29 + +Added support for the new operations in [Protocol 14](https://github.com/stellar/go/issues/3035). Now it is possible to: +* Create and claim claimable balance operations (see [CAP-23](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0023.md)) with the `[Create|Claim]ClaimableBalance` structures and their associated helpers +* Begin/end sponsoring future reserves for other accounts (see [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md)) with the `[Begin|End]SponsoringFutureReserves` operations +* Revoke sponsorships of various objects with the `RevokeSponsorship` operation (see [CAP-33](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0033.md)). + +Also: +* Added support for Go 1.15. +### Breaking changes + +* Dropped support for Go 1.13. +* Add support for SEP-10 v2.0.0. + * Replace `BuildChallengeTx`'s `anchorName` parameter with `homeDomain`. + * Add `homeDomain` parameter to `ReadChallengeTx`, `VerifyChallengeTxThreshold`, and `VerifyChallengeTxSigners`. + +## [v3.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v3.2.0) - 2020-06-18 + +* `txnbuild` now generates V1 transaction envelopes which are only supported by Protocol 13 ([#2640](https://github.com/stellar/go/pull/2640)) +* Add `ToXDR()` functions for `Transaction` and `FeeBumpTransaction` instances which return xdr transaction envelopes without errors ([#2651](https://github.com/stellar/go/pull/2651)) + +## [v3.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v3.1.0) - 2020-05-14 + +* Fix bug which occurs when parsing xdr offers with prices that require more than 7 decimals of precision ([#2588](https://github.com/stellar/go/pull/2588)) +* Add `AddSignatureBase64` function to both `Transaction` and `FeeBumpTransaction` objects for adding a base64-encoded signature. [#2586](https://github.com/stellar/go/pull/2586) + +## [v3.0.1](https://github.com/stellar/go/releases/tag/horizonclient-v3.0.1) - 2020-05-11 + +* Fix bug which occurs when parsing transactions with manage data operations containing nil values ([#2573](https://github.com/stellar/go/pull/2573)) + +## [v3.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v3.0.0) - 2020-04-28 + +### Breaking changes + +* The `Account` interface has been extended to include `GetSequenceNumber() (int64, error)`. Also, `IncrementSequenceNumber()` now returns an `(int64, error)` pair instead of a `(xdr.SequenceNumber, error)` pair. +* Refactor workflow for creating and signing transactions. Previously, you could create a transaction envelope by populating a `Transaction` instance and calling the `Build()` function on the `Transaction` instance. + +`Transaction` is now an opaque type which has accessor functions like `SourceAccount() SimpleAccount`, `Memo() Memo`, etc. The motivation behind this change is to make `Transaction` more immutable. Here is an example of how to use the new transaction type: +```go + kp := keypair.MustParse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := txnbuild.Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + Asset: NativeAsset{}, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + // If IncrementSequenceNum is true, NewTransaction() will call `sourceAccount.IncrementSequenceNumber()` + // to obtain the sequence number for the transaction. + // If IncrementSequenceNum is false, NewTransaction() will call `sourceAccount.GetSequenceNumber()` + // to obtain the sequence number for the transaction. + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) +``` + +* `TransactionFromXDR` now has the following signature `TransactionFromXDR(txeB64 string) (*GenericTransaction, error)`. A `GenericTransaction` is a container which can be unpacked into either a `Transaction` or a `FeeBumpTransaction`. +* `BuildChallengeTx` now returns a `Transaction` instance instead of the base 64 string encoding of the SEP 10 challenge transaction. +* `VerifyChallengeTx` has been removed. Use `VerifyChallengeTxThreshold` or `VerifyChallengeTxSigners` instead. + +### Add + +* Add `NewFeeBumpTransaction(params FeeBumpTransactionParams) (*FeeBumpTransaction, error)` function for creating [fee bump transactions](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0015.md). Note that fee bump transactions will only be accepted by Stellar Core once Protocol 13 is enabled. + +### Updates + +* `AllowTrust` supports [CAP0018](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0018.md) Fine-Grained Control of Authorization by exposing a `AuthorizeToMaintainLiabilities` boolean field. +* `ReadChallengeTx` will reject any challenge transactions which are fee bump transactions. +* `ReadChallengeTx` will reject any challenge transactions which contain a [MuxedAccount](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0027.md) with a memo ID. + +### Remove + +* Dropped support for Go 1.12. + +## [v1.5.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.5.0) - 2019-10-09 + +* Dropped support for Go 1.10, 1.11. +* Add support for stellar-core [protocol 12](https://github.com/stellar/stellar-core/releases/tag/v12.0.0), which implements [CAP-0024](https://github.com/stellar/stellar-protocol/blob/master/core/cap-0024.md) ("Make PathPayment Symmetrical"). ([#1737](https://github.com/stellar/go/issues/1737)). +* **Deprecated:** Following CAP-0024, the operation `txnbuild.PathPayment` is now deprecated in favour of [`txnbuild.PathPaymentStrictReceive`](https://godoc.org/github.com/stellar/go/txnbuild#PathPaymentStrictReceive), and will be removed in a future release. This is a rename - the new operation behaves identically to the old one. Client code should be updated to use the new operation. +* **Add:** New operation [`txnbuild.PathPaymentStrictSend`](https://godoc.org/github.com/stellar/go/txnbuild#PathPaymentStrictSend) allows a path payment to be made where the amount sent is specified, and the amount received can vary. + +## [v1.4.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.4.0) - 2019-08-09 + +* Add `BuildChallengeTx` function for building [SEP-10](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0010.md) challenge transaction([#1466](https://github.com/stellar/go/issues/1466)). +* Add `VerifyChallengeTx` method for verifying [SEP-10](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0010.md) challenge transaction([#1530](https://github.com/stellar/go/issues/1530)). +* Add `TransactionFromXDR` function for building `txnbuild.Transaction` struct from a base64 XDR transaction envelope[#1329](https://github.com/stellar/go/issues/1329). +* Fix bug that allowed multiple calls to `Transaction.Build` increment the number of operations in a transaction [#1448](https://github.com/stellar/go/issues/1448). +* Add `Transaction.SignWithKeyString` helper method for signing transactions using secret keys as strings.([#1564](https://github.com/stellar/go/issues/1564)) + + +## [v1.3.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.3.0) - 2019-07-08 + +* Add support for getting the hex-encoded transaction hash with `Transaction.HashHex` method. +* `TransactionEnvelope` is now available after building a transaction(`Transaction.Build`). Previously, this was only available after signing a transaction. ([#1376](https://github.com/stellar/go/pull/1376)) +* Add support for getting the `TransactionEnvelope` struct with `Transaction.TxEnvelope` method ([#1415](https://github.com/stellar/go/issues/1415)). +* `AllowTrust` operations no longer requires the asset issuer, only asset code is required ([#1330](https://github.com/stellar/go/issues/1330)). +* `Transaction.SetDefaultFee` method is deprecated and will be removed in the next major release ([#1221](https://github.com/stellar/go/issues/1221)). +* `Transaction.TransactionFee` method has been added to get the fee that will be paid for a transaction. +* `Transaction.SignHashX` method adds support for signing transactions with hash(x) signature types. + +## [v1.2.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.2.0) - 2019-05-16 + +* In addition to account responses from horizon, transactions and operations can now be built with txnbuild.SimpleAccount structs constructed locally ([#1266](https://github.com/stellar/go/issues/1266)). +* Added `MaxTrustlineLimit` which represents the maximum value for a trustline limit ([#1265](https://github.com/stellar/go/issues/1265)). +* ChangeTrust operation with no `Limit` field set now defaults to `MaxTrustlineLimit` ([#1265](https://github.com/stellar/go/issues/1265)). +* Add support for building `ManageBuyOffer` operation ([#1165](https://github.com/stellar/go/issues/1165)). +* Fix bug in ChangeTrust operation builder ([1296](https://github.com/stellar/go/issues/1296)). + +## [v1.1.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.1.0) - 2019-02-02 + +* Support for multiple signatures ([#1198](https://github.com/stellar/go/pull/1198)) + +## [v1.0.0](https://github.com/stellar/go/releases/tag/horizonclient-v1.0) - 2019-04-26 + +* Initial release diff --git a/txnbuild/README.md b/txnbuild/README.md new file mode 100644 index 0000000000..e339a1f2e7 --- /dev/null +++ b/txnbuild/README.md @@ -0,0 +1,98 @@ +# txnbuild + +`txnbuild` is a [Stellar SDK](https://developers.stellar.org/docs/software-and-sdks/), implemented in [Go](https://golang.org/). It provides a reference implementation of the complete [set of operations](https://developers.stellar.org/docs/start/list-of-operations/) that compose [transactions](https://developers.stellar.org/docs/glossary/transactions/) for the Stellar distributed ledger. + +This project is maintained by the Stellar Development Foundation. + +```golang + import ( + "log" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/txnbuild" + ) + + // Make a keypair for a known account from a secret seed + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + + // Get the current state of the account from the network + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + if err != nil { + log.Fatalln(err) + } + + // Build an operation to create and fund a new account + op := txnbuild.CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + // Construct the transaction that holds the operations to execute on the network + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&op}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + log.Fatalln(err) + ) + + // Sign the transaction + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + if err != nil { + log.Fatalln(err) + ) + + // Get the base 64 encoded transaction envelope + txe, err := tx.Base64() + if err != nil { + log.Fatalln(err) + } + + // Send the transaction to the network + resp, err := client.SubmitTransactionXDR(txe) + if err != nil { + log.Fatalln(err) + } +``` + +## Getting Started +This library is aimed at developers building Go applications on top of the [Stellar network](https://www.stellar.org/). Transactions constructed by this library may be submitted to any Horizon instance for processing onto the ledger, using any Stellar SDK client. The recommended client for Go programmers is [horizonclient](https://github.com/stellar/go/tree/master/clients/horizonclient). Together, these two libraries provide a complete Stellar SDK. + +* The [txnbuild API reference](https://godoc.org/github.com/stellar/go/txnbuild). +* The [horizonclient API reference](https://godoc.org/github.com/stellar/go/clients/horizonclient). + +An easy-to-follow demonstration that exercises this SDK on the TestNet with actual accounts is also included! See the [Demo](#demo) section below. + +### Prerequisites +* Go (this repository is officially supported on the last two releases of Go) +* [Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies + +### Installing +* `go get github.com/stellar/go/txnbuild` + +## Running the tests +Run the unit tests from the package directory: `go test` + +## Demo +To see the SDK in action, build and run the demo: +* Enter the demo directory: `cd $GOPATH/src/github.com/stellar/go/txnbuild/cmd/demo` +* Build the demo: `go build` +* Run the demo: `./demo init` + + +## Contributing +Please read [Code of Conduct](https://github.com/stellar/.github/blob/master/CODE_OF_CONDUCT.md) to understand this project's communication rules. + +To submit improvements and fixes to this library, please see [CONTRIBUTING](../CONTRIBUTING.md). + +## License +This project is licensed under the Apache License - see the [LICENSE](../../LICENSE-APACHE.txt) file for details. diff --git a/txnbuild/account_merge.go b/txnbuild/account_merge.go new file mode 100644 index 0000000000..5310b4c7e5 --- /dev/null +++ b/txnbuild/account_merge.go @@ -0,0 +1,62 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// AccountMerge represents the Stellar merge account operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type AccountMerge struct { + Destination string + SourceAccount string +} + +// BuildXDR for AccountMerge returns a fully configured XDR Operation. +func (am *AccountMerge) BuildXDR() (xdr.Operation, error) { + var xdrOp xdr.MuxedAccount + err := xdrOp.SetAddress(am.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set destination address") + } + + opType := xdr.OperationTypeAccountMerge + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, am.SourceAccount) + return op, nil +} + +// FromXDR for AccountMerge initialises the txnbuild struct from the corresponding xdr Operation. +func (am *AccountMerge) FromXDR(xdrOp xdr.Operation) error { + if xdrOp.Body.Type != xdr.OperationTypeAccountMerge { + return errors.New("error parsing account_merge operation from xdr") + } + + am.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + if xdrOp.Body.Destination != nil { + am.Destination = xdrOp.Body.Destination.Address() + } + + return nil +} + +// Validate for AccountMerge validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (am *AccountMerge) Validate() error { + var err error + _, err = xdr.AddressToMuxedAccount(am.Destination) + if err != nil { + return NewValidationError("Destination", err.Error()) + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (am *AccountMerge) GetSourceAccount() string { + return am.SourceAccount +} diff --git a/txnbuild/account_merge_test.go b/txnbuild/account_merge_test.go new file mode 100644 index 0000000000..72cf9027ce --- /dev/null +++ b/txnbuild/account_merge_test.go @@ -0,0 +1,43 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAccountMergeValidate(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484298)) + + accountMerge := AccountMerge{ + Destination: "GBAV", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&accountMerge}, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + }, + ) + if assert.Error(t, err) { + expected := "invalid address length" + assert.Contains(t, err.Error(), expected) + } +} + +func TestAccountMergeRoundtrip(t *testing.T) { + accountMerge := AccountMerge{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + testOperationsMarshallingRoundtrip(t, []Operation{&accountMerge}, false) + + accountMerge = AccountMerge{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + testOperationsMarshallingRoundtrip(t, []Operation{&accountMerge}, true) +} diff --git a/txnbuild/allow_trust.go b/txnbuild/allow_trust.go new file mode 100644 index 0000000000..399fa3f945 --- /dev/null +++ b/txnbuild/allow_trust.go @@ -0,0 +1,117 @@ +package txnbuild + +import ( + "bytes" + "fmt" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// Deprecated: use SetTrustLineFlags instead. +// AllowTrust represents the Stellar allow trust operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type AllowTrust struct { + Trustor string + Type Asset + Authorize bool + AuthorizeToMaintainLiabilities bool + ClawbackEnabled bool + SourceAccount string +} + +// BuildXDR for AllowTrust returns a fully configured XDR Operation. +func (at *AllowTrust) BuildXDR() (xdr.Operation, error) { + var xdrOp xdr.AllowTrustOp + + // Set XDR address associated with the trustline + err := xdrOp.Trustor.SetAddress(at.Trustor) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set trustor address") + } + + // Validate this is an issued asset + if at.Type.IsNative() { + return xdr.Operation{}, errors.New("trustline doesn't exist for a native (XLM) asset") + } + + // AllowTrust has a special asset type - map to it + xdrAsset := xdr.Asset{} + + xdrOp.Asset, err = xdrAsset.ToAssetCode(at.Type.GetCode()) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "can't convert asset for trustline to allow trust asset type") + } + + // Set XDR auth flag + if at.Authorize { + xdrOp.Authorize = xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag) + } else if at.AuthorizeToMaintainLiabilities { + xdrOp.Authorize = xdr.Uint32(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + } + + opType := xdr.OperationTypeAllowTrust + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, at.SourceAccount) + return op, nil +} + +func assetCodeToCreditAsset(assetCode xdr.AssetCode) (CreditAsset, error) { + switch assetCode.Type { + case xdr.AssetTypeAssetTypeCreditAlphanum4: + code := bytes.Trim(assetCode.AssetCode4[:], "\x00") + return CreditAsset{Code: string(code[:])}, nil + case xdr.AssetTypeAssetTypeCreditAlphanum12: + code := bytes.Trim(assetCode.AssetCode12[:], "\x00") + return CreditAsset{Code: string(code[:])}, nil + default: + return CreditAsset{}, fmt.Errorf("unknown asset type: %d", assetCode.Type) + } + +} + +// FromXDR for AllowTrust initialises the txnbuild struct from the corresponding xdr Operation. +func (at *AllowTrust) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetAllowTrustOp() + if !ok { + return errors.New("error parsing allow_trust operation from xdr") + } + + at.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + at.Trustor = result.Trustor.Address() + flag := xdr.TrustLineFlags(result.Authorize) + at.Authorize = flag.IsAuthorized() + at.AuthorizeToMaintainLiabilities = flag.IsAuthorizedToMaintainLiabilitiesFlag() + t, err := assetCodeToCreditAsset(result.Asset) + if err != nil { + return errors.Wrap(err, "error parsing allow_trust operation from xdr") + } + at.Type = t + + return nil +} + +// Validate for AllowTrust validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (at *AllowTrust) Validate() error { + err := validateStellarPublicKey(at.Trustor) + if err != nil { + return NewValidationError("Trustor", err.Error()) + } + + err = validateAssetCode(at.Type) + if err != nil { + return NewValidationError("Type", err.Error()) + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (at *AllowTrust) GetSourceAccount() string { + return at.SourceAccount +} diff --git a/txnbuild/allow_trust_test.go b/txnbuild/allow_trust_test.go new file mode 100644 index 0000000000..607e58a81b --- /dev/null +++ b/txnbuild/allow_trust_test.go @@ -0,0 +1,78 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAllowTrustValidateAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484366)) + + issuedAsset := CreditAsset{"", kp1.Address()} + allowTrust := AllowTrust{ + Trustor: kp1.Address(), + Type: issuedAsset, + Authorize: true, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&allowTrust}, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.AllowTrust operation: Field: Type, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestAllowTrustValidateTrustor(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484366)) + + issuedAsset := CreditAsset{"ABCD", kp1.Address()} + allowTrust := AllowTrust{ + Trustor: "", + Type: issuedAsset, + Authorize: true, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&allowTrust}, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.AllowTrust operation: Field: Trustor, Error: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestAllowTrustRoundtrip(t *testing.T) { + allowTrust := AllowTrust{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Trustor: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Type: CreditAsset{"USD", ""}, + Authorize: true, + } + testOperationsMarshallingRoundtrip(t, []Operation{&allowTrust}, false) + + // with muxed accounts + allowTrust = AllowTrust{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Trustor: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Type: CreditAsset{"USD", ""}, + Authorize: true, + } + testOperationsMarshallingRoundtrip(t, []Operation{&allowTrust}, true) +} diff --git a/txnbuild/asset.go b/txnbuild/asset.go new file mode 100644 index 0000000000..a0683da5cf --- /dev/null +++ b/txnbuild/asset.go @@ -0,0 +1,215 @@ +package txnbuild + +import ( + "bytes" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// AssetType represents the type of a Stellar asset. +type AssetType xdr.AssetType + +// AssetTypeNative, AssetTypeCreditAlphanum4, AssetTypeCreditAlphanum12 enumerate the different +// types of asset on the Stellar network. +const ( + AssetTypeNative AssetType = AssetType(xdr.AssetTypeAssetTypeNative) + AssetTypeCreditAlphanum4 AssetType = AssetType(xdr.AssetTypeAssetTypeCreditAlphanum4) + AssetTypeCreditAlphanum12 AssetType = AssetType(xdr.AssetTypeAssetTypeCreditAlphanum12) + AssetTypePoolShare AssetType = AssetType(xdr.AssetTypeAssetTypePoolShare) +) + +// Breaks out some stuff common to all assets +type BasicAsset interface { + GetType() (AssetType, error) + IsNative() bool + GetCode() string + GetIssuer() string + + // Conversions to the 3 asset types + ToAsset() (Asset, error) + MustToAsset() Asset + + ToChangeTrustAsset() (ChangeTrustAsset, error) + MustToChangeTrustAsset() ChangeTrustAsset + + ToTrustLineAsset() (TrustLineAsset, error) + MustToTrustLineAsset() TrustLineAsset +} + +// Asset represents a Stellar asset. +type Asset interface { + BasicAsset + LessThan(other Asset) bool + ToXDR() (xdr.Asset, error) +} + +// NativeAsset represents the native XLM asset. +type NativeAsset struct{} + +// GetType for NativeAsset returns the enum type of the asset. +func (na NativeAsset) GetType() (AssetType, error) { + return AssetTypeNative, nil +} + +// IsNative for NativeAsset returns true (this is an XLM asset). +func (na NativeAsset) IsNative() bool { return true } + +// GetCode for NativeAsset returns an empty string (XLM doesn't have a code). +func (na NativeAsset) GetCode() string { return "" } + +// GetIssuer for NativeAsset returns an empty string (XLM doesn't have an issuer). +func (na NativeAsset) GetIssuer() string { return "" } + +// LessThan returns true if this asset sorts before some other asset. +func (na NativeAsset) LessThan(other Asset) bool { return true } + +// ToXDR for NativeAsset produces a corresponding XDR asset. +func (na NativeAsset) ToXDR() (xdr.Asset, error) { + xdrAsset := xdr.Asset{} + err := xdrAsset.SetNative() + if err != nil { + return xdr.Asset{}, err + } + return xdrAsset, nil +} + +// ToAsset for NativeAsset returns itself unchanged. +func (na NativeAsset) ToAsset() (Asset, error) { + return na, nil +} + +// MustToAsset for NativeAsset returns itself unchanged. +func (na NativeAsset) MustToAsset() Asset { + return na +} + +// ToChangeTrustAsset for NativeAsset produces a corresponding ChangeTrustAsset. +func (na NativeAsset) ToChangeTrustAsset() (ChangeTrustAsset, error) { + return ChangeTrustAssetWrapper{na}, nil +} + +// MustToChangeTrustAsset for NativeAsset produces a corresponding ChangeTrustAsset. +func (na NativeAsset) MustToChangeTrustAsset() ChangeTrustAsset { + return ChangeTrustAssetWrapper{na} +} + +// ToTrustLineAsset for NativeAsset produces a corresponding TrustLineAsset. +func (na NativeAsset) ToTrustLineAsset() (TrustLineAsset, error) { + return TrustLineAssetWrapper{na}, nil +} + +// MustToTrustLineAsset for NativeAsset produces a corresponding TrustLineAsset. +func (na NativeAsset) MustToTrustLineAsset() TrustLineAsset { + return TrustLineAssetWrapper{na} +} + +// CreditAsset represents non-XLM assets on the Stellar network. +type CreditAsset struct { + Code string + Issuer string +} + +// GetType for CreditAsset returns the enum type of the asset, based on its code length. +func (ca CreditAsset) GetType() (AssetType, error) { + switch { + case len(ca.Code) >= 1 && len(ca.Code) <= 4: + return AssetTypeCreditAlphanum4, nil + case len(ca.Code) >= 5 && len(ca.Code) <= 12: + return AssetTypeCreditAlphanum12, nil + default: + return AssetTypeCreditAlphanum4, errors.New("asset code length must be between 1 and 12 characters") + } +} + +// IsNative for CreditAsset returns false (this is not an XLM asset). +func (ca CreditAsset) IsNative() bool { return false } + +// GetCode for CreditAsset returns the asset code. +func (ca CreditAsset) GetCode() string { return ca.Code } + +// GetIssuer for CreditAsset returns the address of the issuing account. +func (ca CreditAsset) GetIssuer() string { return ca.Issuer } + +// LessThan returns true if this asset sorts before some other asset. +func (ca CreditAsset) LessThan(other Asset) bool { + caXDR, err := ca.ToXDR() + if err != nil { + return false + } + otherXDR, err := other.ToXDR() + if err != nil { + return false + } + return caXDR.LessThan(otherXDR) +} + +// ToXDR for CreditAsset produces a corresponding XDR asset. +func (ca CreditAsset) ToXDR() (xdr.Asset, error) { + xdrAsset := xdr.Asset{} + var issuer xdr.AccountId + + err := issuer.SetAddress(ca.Issuer) + if err != nil { + return xdr.Asset{}, err + } + + err = xdrAsset.SetCredit(ca.Code, issuer) + if err != nil { + return xdr.Asset{}, errors.Wrap(err, "asset code length must be between 1 and 12 characters") + } + + return xdrAsset, nil +} + +// ToAsset for CreditAsset returns itself unchanged. +func (ca CreditAsset) ToAsset() (Asset, error) { + return ca, nil +} + +// MustToAsset for CreditAsset returns itself unchanged. +func (ca CreditAsset) MustToAsset() Asset { + return ca +} + +// ToChangeTrustAsset for CreditAsset produces a corresponding XDR asset. +func (ca CreditAsset) ToChangeTrustAsset() (ChangeTrustAsset, error) { + return ChangeTrustAssetWrapper{ca}, nil +} + +// MustToChangeTrustAsset for CreditAsset produces a corresponding XDR asset. +func (ca CreditAsset) MustToChangeTrustAsset() ChangeTrustAsset { + return ChangeTrustAssetWrapper{ca} +} + +// ToTrustLineAsset for CreditAsset produces a corresponding XDR asset. +func (ca CreditAsset) ToTrustLineAsset() (TrustLineAsset, error) { + return TrustLineAssetWrapper{ca}, nil +} + +// MustToTrustLineAsset for CreditAsset produces a corresponding XDR asset. +func (ca CreditAsset) MustToTrustLineAsset() TrustLineAsset { + return TrustLineAssetWrapper{ca} +} + +// to do: consider exposing function or adding it to asset interface +func assetFromXDR(xAsset xdr.Asset) (Asset, error) { + switch xAsset.Type { + case xdr.AssetTypeAssetTypeNative: + return NativeAsset{}, nil + case xdr.AssetTypeAssetTypeCreditAlphanum4: + code := bytes.Trim(xAsset.AlphaNum4.AssetCode[:], "\x00") + return CreditAsset{ + Code: string(code[:]), + Issuer: xAsset.AlphaNum4.Issuer.Address(), + }, nil + case xdr.AssetTypeAssetTypeCreditAlphanum12: + code := bytes.Trim(xAsset.AlphaNum12.AssetCode[:], "\x00") + return CreditAsset{ + Code: string(code[:]), + Issuer: xAsset.AlphaNum12.Issuer.Address(), + }, nil + } + + return nil, errors.New("invalid asset") +} diff --git a/txnbuild/asset_amount.go b/txnbuild/asset_amount.go new file mode 100644 index 0000000000..6a2ec88b32 --- /dev/null +++ b/txnbuild/asset_amount.go @@ -0,0 +1,8 @@ +package txnbuild + +// AssetAmount is a "tuple", pairing an asset with an amount. Used for +// LiquidityPoolDeposit and LiquidityPoolWithdraw operations. +type AssetAmount struct { + Asset Asset + Amount string +} diff --git a/txnbuild/asset_test.go b/txnbuild/asset_test.go new file mode 100644 index 0000000000..5ac1f8fd3d --- /dev/null +++ b/txnbuild/asset_test.go @@ -0,0 +1,104 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/stellar/go/xdr" +) + +func TestNativeAssetToXDR(t *testing.T) { + asset := NativeAsset{} + + received, err := asset.ToXDR() + assert.Nil(t, err) + + expected := xdr.Asset{Type: xdr.AssetTypeAssetTypeNative} + assert.Equal(t, expected, received, "Empty asset converts to native XDR") +} + +func TestAlphaNum4AssetToXDR(t *testing.T) { + asset := CreditAsset{ + Code: "USD", + Issuer: newKeypair0().Address(), + } + var xdrAssetCode [4]byte + copy(xdrAssetCode[:], asset.Code) + var xdrIssuer xdr.AccountId + require.NoError(t, xdrIssuer.SetAddress(asset.Issuer)) + + received, err := asset.ToXDR() + assert.Nil(t, err) + + expected := xdr.Asset{Type: xdr.AssetTypeAssetTypeCreditAlphanum4, + AlphaNum4: &xdr.AlphaNum4{ + AssetCode: xdrAssetCode, + Issuer: xdrIssuer, + }} + assert.Equal(t, expected, received, "4 digit codes ok") +} + +func TestAlphaNum12AssetToXDR(t *testing.T) { + asset := CreditAsset{ + Code: "MEGAUSD", + Issuer: newKeypair0().Address(), + } + var xdrAssetCode [12]byte + copy(xdrAssetCode[:], asset.Code) + var xdrIssuer xdr.AccountId + require.NoError(t, xdrIssuer.SetAddress(asset.Issuer)) + + received, err := asset.ToXDR() + assert.Nil(t, err) + + expected := xdr.Asset{Type: xdr.AssetTypeAssetTypeCreditAlphanum12, + AlphaNum12: &xdr.AlphaNum12{ + AssetCode: xdrAssetCode, + Issuer: xdrIssuer, + }} + assert.Equal(t, expected, received, "12 digit codes ok") +} + +func TestCodeTooShort(t *testing.T) { + asset := CreditAsset{ + Code: "", + Issuer: newKeypair0().Address(), + } + var xdrAssetCode [12]byte + copy(xdrAssetCode[:], asset.Code) + var xdrIssuer xdr.AccountId + require.NoError(t, xdrIssuer.SetAddress(asset.Issuer)) + + _, err := asset.ToXDR() + expectedErrMsg := "asset code length must be between 1 and 12 characters: Asset code length is invalid" + require.EqualError(t, err, expectedErrMsg, "Minimum code length should be enforced") +} + +func TestCodeTooLong(t *testing.T) { + asset := CreditAsset{ + Code: "THIRTEENCHARS", + Issuer: newKeypair0().Address(), + } + var xdrAssetCode [12]byte + copy(xdrAssetCode[:], asset.Code) + var xdrIssuer xdr.AccountId + require.NoError(t, xdrIssuer.SetAddress(asset.Issuer)) + + _, err := asset.ToXDR() + expectedErrMsg := "asset code length must be between 1 and 12 characters: Asset code length is invalid" + require.EqualError(t, err, expectedErrMsg, "Maximum code length should be enforced") +} + +func TestBadIssuer(t *testing.T) { + asset := CreditAsset{ + Code: "USD", + Issuer: "DOESNTLOOKLIKEANADDRESS", + } + var xdrAssetCode [4]byte + copy(xdrAssetCode[:], asset.Code) + var xdrIssuer xdr.AccountId + expectedErrMsg := "non-canonical strkey; unused bits should be set to 0" + require.EqualError(t, xdrIssuer.SetAddress(asset.Issuer), expectedErrMsg, "Issuer address should be validated") +} diff --git a/txnbuild/assets.go b/txnbuild/assets.go new file mode 100644 index 0000000000..f2c5c055e6 --- /dev/null +++ b/txnbuild/assets.go @@ -0,0 +1,8 @@ +package txnbuild + +// Assets represents a list of Stellar assets. Importantly, it is sortable. +type Assets []Asset + +func (s Assets) Len() int { return len(s) } +func (s Assets) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Assets) Less(i, j int) bool { return s[i].LessThan(s[j]) } diff --git a/txnbuild/assets_test.go b/txnbuild/assets_test.go new file mode 100644 index 0000000000..fb9281d050 --- /dev/null +++ b/txnbuild/assets_test.go @@ -0,0 +1,40 @@ +package txnbuild + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAssetsSorting(t *testing.T) { + // Native is always first + a := NativeAsset{} + + // Type is Alphanum4 + b := CreditAsset{Code: "BCDE", Issuer: "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO"} + + // Type is Alphanum12 + c := CreditAsset{Code: "ABCD1", Issuer: "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO"} + + // Code is > + d := CreditAsset{Code: "ABCD2", Issuer: "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO"} + + // Issuer is > + e := CreditAsset{Code: "ABCD2", Issuer: "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ"} + + expected := Assets([]Asset{a, b, c, d, e}) + + t.Run("basic check it doesn't change stuff", func(t *testing.T) { + assets := Assets([]Asset{a, b, c, d, e}) + sort.Sort(assets) + assert.Equal(t, expected, assets) + }) + + // Reverse it and check it still sorts to the same + t.Run("reverse it and check it sorts the same", func(t *testing.T) { + assets := Assets([]Asset{e, d, c, b, a}) + sort.Sort(assets) + assert.Equal(t, expected, assets) + }) +} diff --git a/txnbuild/begin_sponsoring_future_reserves.go b/txnbuild/begin_sponsoring_future_reserves.go new file mode 100644 index 0000000000..fcfb497870 --- /dev/null +++ b/txnbuild/begin_sponsoring_future_reserves.go @@ -0,0 +1,61 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// BeginSponsoringFutureReserves represents the Stellar begin sponsoring future reserves operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type BeginSponsoringFutureReserves struct { + SponsoredID string + SourceAccount string +} + +// BuildXDR for BeginSponsoringFutureReserves returns a fully configured XDR Operation. +func (bs *BeginSponsoringFutureReserves) BuildXDR() (xdr.Operation, error) { + xdrOp := xdr.BeginSponsoringFutureReservesOp{} + err := xdrOp.SponsoredId.SetAddress(bs.SponsoredID) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set sponsored id address") + } + opType := xdr.OperationTypeBeginSponsoringFutureReserves + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, bs.SourceAccount) + return op, nil +} + +// FromXDR for BeginSponsoringFutureReserves initializes the txnbuild struct from the corresponding xdr Operation. +func (bs *BeginSponsoringFutureReserves) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetBeginSponsoringFutureReservesOp() + if !ok { + return errors.New("error parsing begin_sponsoring_future_reserves operation from xdr") + } + bs.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + bs.SponsoredID = result.SponsoredId.Address() + + return nil +} + +// Validate for BeginSponsoringFutureReserves validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (bs *BeginSponsoringFutureReserves) Validate() error { + err := validateStellarPublicKey(bs.SponsoredID) + if err != nil { + return NewValidationError("SponsoredID", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (bs *BeginSponsoringFutureReserves) GetSourceAccount() string { + return bs.SourceAccount +} diff --git a/txnbuild/begin_sponsoring_future_reserves_test.go b/txnbuild/begin_sponsoring_future_reserves_test.go new file mode 100644 index 0000000000..bcacb96c1d --- /dev/null +++ b/txnbuild/begin_sponsoring_future_reserves_test.go @@ -0,0 +1,13 @@ +package txnbuild + +import ( + "testing" +) + +func TestBeginSponsoringFutureReservesRoundTrip(t *testing.T) { + beginSponsoring := &BeginSponsoringFutureReserves{ + SponsoredID: newKeypair1().Address(), + } + + testOperationsMarshallingRoundtrip(t, []Operation{beginSponsoring}, false) +} diff --git a/txnbuild/bump_sequence.go b/txnbuild/bump_sequence.go new file mode 100644 index 0000000000..a93aa2c525 --- /dev/null +++ b/txnbuild/bump_sequence.go @@ -0,0 +1,54 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// BumpSequence represents the Stellar bump sequence operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type BumpSequence struct { + BumpTo int64 + SourceAccount string +} + +// BuildXDR for BumpSequence returns a fully configured XDR Operation. +func (bs *BumpSequence) BuildXDR() (xdr.Operation, error) { + opType := xdr.OperationTypeBumpSequence + xdrOp := xdr.BumpSequenceOp{BumpTo: xdr.SequenceNumber(bs.BumpTo)} + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, bs.SourceAccount) + return op, nil +} + +// FromXDR for BumpSequence initialises the txnbuild struct from the corresponding xdr Operation. +func (bs *BumpSequence) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetBumpSequenceOp() + if !ok { + return errors.New("error parsing bump_sequence operation from xdr") + } + + bs.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + bs.BumpTo = int64(result.BumpTo) + return nil +} + +// Validate for BumpSequence validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (bs *BumpSequence) Validate() error { + err := validateAmount(bs.BumpTo) + if err != nil { + return NewValidationError("BumpTo", err.Error()) + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (bs *BumpSequence) GetSourceAccount() string { + return bs.SourceAccount +} diff --git a/txnbuild/bump_sequence_test.go b/txnbuild/bump_sequence_test.go new file mode 100644 index 0000000000..067cdd0b8f --- /dev/null +++ b/txnbuild/bump_sequence_test.go @@ -0,0 +1,43 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBumpSequenceValidate(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(9606132444168199)) + + bumpSequence := BumpSequence{ + BumpTo: -10, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&bumpSequence}, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.BumpSequence operation: Field: BumpTo, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestBumpSequenceRountrip(t *testing.T) { + bumpSequence := BumpSequence{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + BumpTo: 10, + } + testOperationsMarshallingRoundtrip(t, []Operation{&bumpSequence}, false) + + bumpSequence = BumpSequence{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + BumpTo: 10, + } + testOperationsMarshallingRoundtrip(t, []Operation{&bumpSequence}, true) +} diff --git a/txnbuild/change_trust.go b/txnbuild/change_trust.go new file mode 100644 index 0000000000..598e2588a0 --- /dev/null +++ b/txnbuild/change_trust.go @@ -0,0 +1,104 @@ +package txnbuild + +import ( + "math" + + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ChangeTrust represents the Stellar change trust operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +// If Limit is omitted, it defaults to txnbuild.MaxTrustlineLimit. +type ChangeTrust struct { + Line ChangeTrustAsset + Limit string + SourceAccount string +} + +// MaxTrustlineLimit represents the maximum value that can be set as a trustline limit. +var MaxTrustlineLimit = amount.StringFromInt64(math.MaxInt64) + +// RemoveTrustlineOp returns a ChangeTrust operation to remove the trustline of the described asset, +// by setting the limit to "0". +func RemoveTrustlineOp(issuedAsset ChangeTrustAsset) ChangeTrust { + return ChangeTrust{ + Line: issuedAsset, + Limit: "0", + } +} + +// BuildXDR for ChangeTrust returns a fully configured XDR Operation. +func (ct *ChangeTrust) BuildXDR() (xdr.Operation, error) { + if ct.Line.IsNative() { + return xdr.Operation{}, errors.New("trustline cannot be extended to a native (XLM) asset") + } + xdrLine, err := ct.Line.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "can't convert trustline asset to XDR") + } + + if ct.Limit == "" { + ct.Limit = MaxTrustlineLimit + } + + xdrLimit, err := amount.Parse(ct.Limit) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse limit amount") + } + + opType := xdr.OperationTypeChangeTrust + xdrOp := xdr.ChangeTrustOp{ + Line: xdrLine, + Limit: xdrLimit, + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, ct.SourceAccount) + return op, nil +} + +// FromXDR for ChangeTrust initialises the txnbuild struct from the corresponding xdr Operation. +func (ct *ChangeTrust) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetChangeTrustOp() + if !ok { + return errors.New("error parsing change_trust operation from xdr") + } + + ct.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + ct.Limit = amount.String(result.Limit) + asset, err := assetFromChangeTrustAssetXDR(result.Line) + if err != nil { + return errors.Wrap(err, "error parsing asset in change_trust operation") + } + ct.Line = asset + return nil +} + +// Validate for ChangeTrust validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (ct *ChangeTrust) Validate() error { + // only validate limit if it has a value. Empty limit is set to the max trustline limit. + if ct.Limit != "" { + err := validateAmount(ct.Limit) + if err != nil { + return NewValidationError("Limit", err.Error()) + } + } + + err := validateChangeTrustAsset(ct.Line) + if err != nil { + return NewValidationError("Line", err.Error()) + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (ct *ChangeTrust) GetSourceAccount() string { + return ct.SourceAccount +} diff --git a/txnbuild/change_trust_asset.go b/txnbuild/change_trust_asset.go new file mode 100644 index 0000000000..6085921e59 --- /dev/null +++ b/txnbuild/change_trust_asset.go @@ -0,0 +1,134 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ChangeTrustAsset represents a Stellar change trust asset. +type ChangeTrustAsset interface { + BasicAsset + GetLiquidityPoolID() (LiquidityPoolId, bool) + GetLiquidityPoolParameters() (LiquidityPoolParameters, bool) + ToXDR() (xdr.ChangeTrustAsset, error) + ToChangeTrustAsset() (ChangeTrustAsset, error) + ToTrustLineAsset() (TrustLineAsset, error) +} + +// LiquidityPoolShareChangeTrustAsset represents non-XLM assets on the Stellar network. +type LiquidityPoolShareChangeTrustAsset struct { + LiquidityPoolParameters LiquidityPoolParameters +} + +// GetType for LiquidityPoolShareChangeTrustAsset returns the enum type of the asset, based on its code length. +func (lpsa LiquidityPoolShareChangeTrustAsset) GetType() (AssetType, error) { + return AssetTypePoolShare, nil +} + +// IsNative for LiquidityPoolShareChangeTrustAsset returns false (this is not an XLM asset). +func (lpsa LiquidityPoolShareChangeTrustAsset) IsNative() bool { return false } + +// GetCode for LiquidityPoolShareChangeTrustAsset returns blank string +func (lpsa LiquidityPoolShareChangeTrustAsset) GetCode() string { return "" } + +// GetIssuer for LiquidityPoolShareChangeTrustAsset returns blank string +func (lpsa LiquidityPoolShareChangeTrustAsset) GetIssuer() string { return "" } + +// GetLiquidityPoolID for LiquidityPoolShareChangeTrustAsset returns the pool id computed from the parameters. +func (lpsa LiquidityPoolShareChangeTrustAsset) GetLiquidityPoolID() (LiquidityPoolId, bool) { + poolId, err := NewLiquidityPoolId(lpsa.LiquidityPoolParameters.AssetA, lpsa.LiquidityPoolParameters.AssetB) + return poolId, err == nil +} + +// GetLiquidityPoolParameters for LiquidityPoolShareChangeTrustAsset returns the pool parameters. +func (lpsa LiquidityPoolShareChangeTrustAsset) GetLiquidityPoolParameters() (LiquidityPoolParameters, bool) { + return lpsa.LiquidityPoolParameters, true +} + +// ToXDR for LiquidityPoolShareChangeTrustAsset produces a corresponding XDR change trust asset. +func (lpsa LiquidityPoolShareChangeTrustAsset) ToXDR() (xdr.ChangeTrustAsset, error) { + xdrPoolParams, err := lpsa.LiquidityPoolParameters.ToXDR() + if err != nil { + return xdr.ChangeTrustAsset{}, errors.Wrap(err, "failed to build XDR liquidity pool parameters") + } + return xdr.ChangeTrustAsset{Type: xdr.AssetTypeAssetTypePoolShare, LiquidityPool: &xdrPoolParams}, nil +} + +// ToAsset for LiquidityPoolShareChangeTrustAsset returns an error. +func (lpsa LiquidityPoolShareChangeTrustAsset) ToAsset() (Asset, error) { + return nil, errors.New("Cannot transform LiquidityPoolShare into Asset") +} + +// MustToAsset for LiquidityPoolShareChangeTrustAsset panics. +func (lpsa LiquidityPoolShareChangeTrustAsset) MustToAsset() Asset { + panic("Cannot transform LiquidityPoolShare into Asset") +} + +// ToChangeTrustAsset for LiquidityPoolShareChangeTrustAsset returns itself unchanged. +func (lpsa LiquidityPoolShareChangeTrustAsset) ToChangeTrustAsset() (ChangeTrustAsset, error) { + return lpsa, nil +} + +// MustToChangeTrustAsset for LiquidityPoolShareChangeTrustAsset returns itself unchanged. +func (lpsa LiquidityPoolShareChangeTrustAsset) MustToChangeTrustAsset() ChangeTrustAsset { + return lpsa +} + +// ToTrustLineAsset for LiquidityPoolShareChangeTrustAsset hashes the pool parameters to get the pool id, and converts this to a TrustLineAsset. +func (lpsa LiquidityPoolShareChangeTrustAsset) ToTrustLineAsset() (TrustLineAsset, error) { + poolId, err := NewLiquidityPoolId(lpsa.LiquidityPoolParameters.AssetA, lpsa.LiquidityPoolParameters.AssetB) + if err != nil { + return nil, err + } + return LiquidityPoolShareTrustLineAsset{ + LiquidityPoolID: poolId, + }, nil +} + +// MustToTrustLineAsset for LiquidityPoolShareChangeTrustAsset hashes the pool parameters to get the pool id, and converts this to a TrustLineAsset. It panics on failure. +func (lpsa LiquidityPoolShareChangeTrustAsset) MustToTrustLineAsset() TrustLineAsset { + tla, err := lpsa.ToTrustLineAsset() + if err != nil { + panic(err) + } + return tla +} + +func assetFromChangeTrustAssetXDR(xAsset xdr.ChangeTrustAsset) (ChangeTrustAsset, error) { + if xAsset.Type == xdr.AssetTypeAssetTypePoolShare { + params, err := liquidityPoolParametersFromXDR(*xAsset.LiquidityPool) + if err != nil { + return nil, errors.Wrap(err, "invalid XDR liquidity pool parameters") + } + return LiquidityPoolShareChangeTrustAsset{LiquidityPoolParameters: params}, nil + } + a, err := assetFromXDR(xAsset.ToAsset()) + if err != nil { + return nil, err + } + return ChangeTrustAssetWrapper{a}, nil +} + +// ChangeTrustAssetWrapper wraps a native/credit Asset so it generates xdr to be used in a change trust operation. +type ChangeTrustAssetWrapper struct { + Asset +} + +// GetLiquidityPoolID for ChangeTrustAssetWrapper returns false. +func (ctaw ChangeTrustAssetWrapper) GetLiquidityPoolID() (LiquidityPoolId, bool) { + return LiquidityPoolId{}, false +} + +// GetLiquidityPoolParameters for ChangeTrustAssetWrapper returns false. +func (ctaw ChangeTrustAssetWrapper) GetLiquidityPoolParameters() (LiquidityPoolParameters, bool) { + return LiquidityPoolParameters{}, false +} + +// ToXDR for ChangeTrustAssetWrapper generates the xdr.TrustLineAsset. +func (ctaw ChangeTrustAssetWrapper) ToXDR() (xdr.ChangeTrustAsset, error) { + x, err := ctaw.Asset.ToXDR() + if err != nil { + return xdr.ChangeTrustAsset{}, err + } + return x.ToChangeTrustAsset(), nil +} diff --git a/txnbuild/change_trust_test.go b/txnbuild/change_trust_test.go new file mode 100644 index 0000000000..9ba0c38ec4 --- /dev/null +++ b/txnbuild/change_trust_test.go @@ -0,0 +1,97 @@ +package txnbuild + +import ( + "testing" + + "github.com/stellar/go/network" + "github.com/stretchr/testify/assert" +) + +func TestChangeTrustMaxLimit(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + changeTrust := ChangeTrust{ + Line: CreditAsset{"ABCD", kp0.Address()}.MustToChangeTrustAsset(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFBQkNEAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFf/////////8AAAAAAAAAAeoucsUAAABAXp/gGvNtaqn2/gEh4QoNO+LpT3AmyLFDb81INsfdkf70USiBheUc7bzxgZJLVpFy2qw3ucqpQQPi986XFbPsAQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestChangeTrustValidateInvalidAsset(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + changeTrust := ChangeTrust{ + Line: NativeAsset{}.MustToChangeTrustAsset(), + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ChangeTrust operation: Field: Line, Error: native (XLM) asset type is not allowed" + assert.Contains(t, err.Error(), expected) + } +} + +func TestChangeTrustValidateInvalidLimit(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + changeTrust := ChangeTrust{ + Line: CreditAsset{"ABCD", kp0.Address()}.MustToChangeTrustAsset(), + Limit: "-1", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ChangeTrust operation: Field: Limit, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestChangeTrustRoundtrip(t *testing.T) { + changeTrust := ChangeTrust{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Line: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}.MustToChangeTrustAsset(), + Limit: "1.0000000", + } + testOperationsMarshallingRoundtrip(t, []Operation{&changeTrust}, false) + + // with muxed accounts + changeTrust = ChangeTrust{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Line: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}.MustToChangeTrustAsset(), + Limit: "1.0000000", + } + testOperationsMarshallingRoundtrip(t, []Operation{&changeTrust}, true) +} diff --git a/txnbuild/claim_claimable_balance.go b/txnbuild/claim_claimable_balance.go new file mode 100644 index 0000000000..ace6acecbe --- /dev/null +++ b/txnbuild/claim_claimable_balance.go @@ -0,0 +1,72 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ClaimClaimableBalance represents the Stellar claim claimable balance operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type ClaimClaimableBalance struct { + BalanceID string + SourceAccount string +} + +// BuildXDR for ClaimClaimableBalance returns a fully configured XDR Operation. +func (cb *ClaimClaimableBalance) BuildXDR() (xdr.Operation, error) { + var xdrBalanceID xdr.ClaimableBalanceId + err := xdr.SafeUnmarshalHex(cb.BalanceID, &xdrBalanceID) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'ClaimableBalanceId' field") + } + xdrOp := xdr.ClaimClaimableBalanceOp{ + BalanceId: xdrBalanceID, + } + + opType := xdr.OperationTypeClaimClaimableBalance + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, cb.SourceAccount) + + return op, nil +} + +// FromXDR for ClaimClaimableBalance initializes the txnbuild struct from the corresponding xdr Operation. +func (cb *ClaimClaimableBalance) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetClaimClaimableBalanceOp() + if !ok { + return errors.New("error parsing claim_claimable_balance operation from xdr") + } + + cb.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + balanceID, err := xdr.MarshalHex(result.BalanceId) + if err != nil { + return errors.New("error parsing BalanceID in claim_claimable_balance operation from xdr") + } + cb.BalanceID = balanceID + + return nil +} + +// Validate for ClaimClaimableBalance validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (cb *ClaimClaimableBalance) Validate() error { + var xdrBalanceID xdr.ClaimableBalanceId + err := xdr.SafeUnmarshalHex(cb.BalanceID, &xdrBalanceID) + if err != nil { + return NewValidationError("BalanceID", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (cb *ClaimClaimableBalance) GetSourceAccount() string { + return cb.SourceAccount +} diff --git a/txnbuild/claim_claimable_balance_test.go b/txnbuild/claim_claimable_balance_test.go new file mode 100644 index 0000000000..1a1e285292 --- /dev/null +++ b/txnbuild/claim_claimable_balance_test.go @@ -0,0 +1,22 @@ +package txnbuild + +import ( + "testing" +) + +func TestClaimClaimableBalanceRoundTrip(t *testing.T) { + claimClaimableBalance := &ClaimClaimableBalance{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + BalanceID: "00000000929b20b72e5890ab51c24f1cc46fa01c4f318d8d33367d24dd614cfdf5491072", + } + + testOperationsMarshallingRoundtrip(t, []Operation{claimClaimableBalance}, false) + + // with muxed accounts + claimClaimableBalance = &ClaimClaimableBalance{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + BalanceID: "00000000929b20b72e5890ab51c24f1cc46fa01c4f318d8d33367d24dd614cfdf5491072", + } + + testOperationsMarshallingRoundtrip(t, []Operation{claimClaimableBalance}, true) +} diff --git a/txnbuild/clawback.go b/txnbuild/clawback.go new file mode 100644 index 0000000000..6413051b1f --- /dev/null +++ b/txnbuild/clawback.go @@ -0,0 +1,107 @@ +package txnbuild + +import ( + "github.com/pkg/errors" + + "github.com/stellar/go/amount" + "github.com/stellar/go/xdr" +) + +// Clawback represents the Stellar clawback operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type Clawback struct { + From string + Amount string + Asset Asset + SourceAccount string +} + +// BuildXDR for Clawback returns a fully configured XDR Operation. +func (cb *Clawback) BuildXDR() (xdr.Operation, error) { + var fromMuxedAccount xdr.MuxedAccount + err := fromMuxedAccount.SetAddress(cb.From) + + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set from address") + } + + if cb.Asset == nil { + return xdr.Operation{}, errors.New("you must specify an asset for the clawback") + } + // Validate this is an issued asset + if cb.Asset.IsNative() { + return xdr.Operation{}, errors.New("clawbacks don't support the native (XLM) asset") + } + + xdrAmount, err := amount.Parse(cb.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse amount") + } + + xdrAsset, err := cb.Asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "can't convert asset to XDR") + } + + opType := xdr.OperationTypeClawback + xdrOp := xdr.ClawbackOp{ + From: fromMuxedAccount, + Amount: xdrAmount, + Asset: xdrAsset, + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR Operation") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, cb.SourceAccount) + return op, nil +} + +// FromXDR for Clawback initialises the txnbuild struct from the corresponding xdr Operation. +func (cb *Clawback) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetClawbackOp() + if !ok { + return errors.New("error parsing clawback operation from xdr") + } + + cb.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + cb.From = accountFromXDR(&result.From) + cb.Amount = amount.String(result.Amount) + asset, err := assetFromXDR(result.Asset) + if err != nil { + return errors.Wrap(err, "error parsing clawback operation from XDR") + } + cb.Asset = asset + + return nil +} + +// Validate for Clawback validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (cb *Clawback) Validate() error { + var err error + _, err = xdr.AddressToMuxedAccount(cb.From) + + if err != nil { + return NewValidationError("From", err.Error()) + } + + err = validateAmount(cb.Amount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + err = validateAssetCode(cb.Asset) + if err != nil { + return NewValidationError("Asset", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (cb *Clawback) GetSourceAccount() string { + return cb.SourceAccount +} diff --git a/txnbuild/clawback_claimable_balance.go b/txnbuild/clawback_claimable_balance.go new file mode 100644 index 0000000000..f0237aebc1 --- /dev/null +++ b/txnbuild/clawback_claimable_balance.go @@ -0,0 +1,71 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ClawbackClaimableBalance represents the Stellar clawback claimable balance operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type ClawbackClaimableBalance struct { + BalanceID string + SourceAccount string +} + +// BuildXDR for ClawbackClaimableBalance returns a fully configured XDR Operation. +func (cb *ClawbackClaimableBalance) BuildXDR() (xdr.Operation, error) { + var xdrBalanceID xdr.ClaimableBalanceId + err := xdr.SafeUnmarshalHex(cb.BalanceID, &xdrBalanceID) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'ClaimableBalanceId' field") + } + xdrOp := xdr.ClawbackClaimableBalanceOp{ + BalanceId: xdrBalanceID, + } + + opType := xdr.OperationTypeClawbackClaimableBalance + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, cb.SourceAccount) + + return op, nil +} + +// FromXDR for ClawbackClaimableBalance initializes the txnbuild struct from the corresponding xdr Operation. +func (cb *ClawbackClaimableBalance) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetClawbackClaimableBalanceOp() + if !ok { + return errors.New("error parsing clawback_claimable_balance operation from xdr") + } + + cb.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + balanceID, err := xdr.MarshalHex(result.BalanceId) + if err != nil { + return errors.New("error parsing BalanceID in claim_claimable_balance operation from xdr") + } + cb.BalanceID = balanceID + + return nil +} + +// Validate for ClawbackClaimableBalance validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (cb *ClawbackClaimableBalance) Validate() error { + var xdrBalanceID xdr.ClaimableBalanceId + err := xdr.SafeUnmarshalHex(cb.BalanceID, &xdrBalanceID) + if err != nil { + return NewValidationError("BalanceID", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (cb *ClawbackClaimableBalance) GetSourceAccount() string { + return cb.SourceAccount +} diff --git a/txnbuild/clawback_claimable_balance_test.go b/txnbuild/clawback_claimable_balance_test.go new file mode 100644 index 0000000000..9ff2914a0e --- /dev/null +++ b/txnbuild/clawback_claimable_balance_test.go @@ -0,0 +1,22 @@ +package txnbuild + +import ( + "testing" +) + +func TestClawbackClaimableBalanceRoundTrip(t *testing.T) { + claimClaimableBalance := &ClawbackClaimableBalance{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + BalanceID: "00000000929b20b72e5890ab51c24f1cc46fa01c4f318d8d33367d24dd614cfdf5491072", + } + + testOperationsMarshallingRoundtrip(t, []Operation{claimClaimableBalance}, false) + + // with muxed accounts + claimClaimableBalance = &ClawbackClaimableBalance{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + BalanceID: "00000000929b20b72e5890ab51c24f1cc46fa01c4f318d8d33367d24dd614cfdf5491072", + } + + testOperationsMarshallingRoundtrip(t, []Operation{claimClaimableBalance}, false) +} diff --git a/txnbuild/clawback_test.go b/txnbuild/clawback_test.go new file mode 100644 index 0000000000..dab757c5a3 --- /dev/null +++ b/txnbuild/clawback_test.go @@ -0,0 +1,101 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClawbackValidateFrom(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + clawback := Clawback{ + From: "", + Amount: "10", + Asset: CreditAsset{"", kp0.Address()}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&clawback}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Clawback operation: Field: From" + assert.Contains(t, err.Error(), expected) + } +} + +func TestClawbackValidateAmount(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + clawback := Clawback{ + From: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "ten", + Asset: CreditAsset{"", kp0.Address()}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&clawback}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Clawback operation: Field: Amount, Error: invalid amount format: ten" + assert.Contains(t, err.Error(), expected) + } +} + +func TestClawbackValidateAsset(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + clawback := Clawback{ + From: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10", + Asset: CreditAsset{}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&clawback}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Clawback operation: Field: Asset, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestClawbackRoundTrip(t *testing.T) { + clawback := Clawback{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + From: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10.0000000", + Asset: CreditAsset{"USD", "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&clawback}, false) + + // with muxed accounts + clawback = Clawback{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + From: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Amount: "10.0000000", + Asset: CreditAsset{"USD", "GCXKG6RN4ONIEPCMNFB732A436Z5PNDSRLGWK7GBLCMQLIFO4S7EYWVU"}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&clawback}, true) +} diff --git a/txnbuild/cmd/demo/cmd/init.go b/txnbuild/cmd/demo/cmd/init.go new file mode 100644 index 0000000000..382fcf1d5c --- /dev/null +++ b/txnbuild/cmd/demo/cmd/init.go @@ -0,0 +1,39 @@ +package cmd + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + demo "github.com/stellar/go/txnbuild/cmd/demo/operations" +) + +// initCmd represents the init command +var initCmd = &cobra.Command{ + Use: "init", + Short: "Create and fund some demo accounts on the TestNet", + Long: `This command creates four test accounts for use with further operations.`, + Run: func(cmd *cobra.Command, args []string) { + log.Info("Initialising TestNet accounts...") + keys := demo.InitKeys(4) + client := horizonclient.DefaultTestNetClient + + demo.Initialise(client, keys) + fmt.Println("Initialisation complete.") + }, +} + +func init() { + rootCmd.AddCommand(initCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // initCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // initCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/txnbuild/cmd/demo/cmd/reset.go b/txnbuild/cmd/demo/cmd/reset.go new file mode 100644 index 0000000000..8e68a23c0c --- /dev/null +++ b/txnbuild/cmd/demo/cmd/reset.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "github.com/stellar/go/clients/horizonclient" + demo "github.com/stellar/go/txnbuild/cmd/demo/operations" +) + +// resetCmd represents the reset command +var resetCmd = &cobra.Command{ + Use: "reset", + Short: "Reset the state of all demo accounts on the TestNet", + Long: `Run this command before trying other commands in order to have a clean slate +for testing.`, + Run: func(cmd *cobra.Command, args []string) { + log.Info("Resetting TestNet state...") + keys := demo.InitKeys(4) + client := horizonclient.DefaultTestNetClient + + demo.Reset(client, keys) + fmt.Println("Reset complete.") + }, +} + +func init() { + rootCmd.AddCommand(resetCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // resetCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // resetCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/txnbuild/cmd/demo/cmd/root.go b/txnbuild/cmd/demo/cmd/root.go new file mode 100644 index 0000000000..81846f78fa --- /dev/null +++ b/txnbuild/cmd/demo/cmd/root.go @@ -0,0 +1,83 @@ +package cmd + +import ( + "fmt" + "os" + + log "github.com/sirupsen/logrus" + + homedir "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var cfgFile string +var verbose bool + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "demo", + Short: "Exercise the Stellar Go SDK (horizonclient + txnbuild)", + Long: `This CLI is a working demonstration of the Stellar Go SDK. The SDK is used to interact +with the Stellar network. It can retrieve information about the state of the network, and can +submit _transactions_, made up of _operations_, to change the state of the network. + +Try out the different commands here to see the SDK in action, then browse the source code in +/operations/demo.go to see how the SDK is used.`, + // Uncomment the following line if your bare application + // has an action associated with it: + // Run: func(cmd *cobra.Command, args []string) { }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func init() { + cobra.OnInitialize(initConfig) + + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.demo.yaml)") + rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enable verbose logging") + + // Cobra also supports local flags, which will only run + // when this action is called directly. + // rootCmd.Flags().BoolP("verbose", "v", false, "Enable verbose logging") +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + if cfgFile != "" { + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + } else { + // Find home directory. + home, err := homedir.Dir() + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + // Search config in home directory with name ".demo" (without extension). + viper.AddConfigPath(home) + viper.SetConfigName(".demo") + } + + viper.AutomaticEnv() // read in environment variables that match + + // If a config file is found, read it in. + if err := viper.ReadInConfig(); err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } + + if verbose { + log.SetLevel(log.DebugLevel) + } +} diff --git a/txnbuild/cmd/demo/cmd/txerror.go b/txnbuild/cmd/demo/cmd/txerror.go new file mode 100644 index 0000000000..7db945967d --- /dev/null +++ b/txnbuild/cmd/demo/cmd/txerror.go @@ -0,0 +1,40 @@ +package cmd + +import ( + "fmt" + + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + + "github.com/stellar/go/clients/horizonclient" + demo "github.com/stellar/go/txnbuild/cmd/demo/operations" +) + +// txerrorCmd represents the txerror command +var txerrorCmd = &cobra.Command{ + Use: "txerror", + Short: "Submit a purposefully invalid transaction", + Long: `This command submits an invalid transaction, in order to demonstrate a Horizon error return.`, + Run: func(cmd *cobra.Command, args []string) { + log.Info("Demonstrating a bad transaction response...") + keys := demo.InitKeys(4) + client := horizonclient.DefaultTestNetClient + + demo.TXError(client, keys) + fmt.Println("Transaction complete.") + }, +} + +func init() { + rootCmd.AddCommand(txerrorCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // txerrorCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // txerrorCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} diff --git a/txnbuild/cmd/demo/main.go b/txnbuild/cmd/demo/main.go new file mode 100644 index 0000000000..01a87df777 --- /dev/null +++ b/txnbuild/cmd/demo/main.go @@ -0,0 +1,10 @@ +// Demo is an interactive demonstration of the Go SDK using the Stellar TestNet. +package main + +import ( + "github.com/stellar/go/txnbuild/cmd/demo/cmd" +) + +func main() { + cmd.Execute() +} diff --git a/txnbuild/cmd/demo/operations/demo.go b/txnbuild/cmd/demo/operations/demo.go new file mode 100644 index 0000000000..9621773603 --- /dev/null +++ b/txnbuild/cmd/demo/operations/demo.go @@ -0,0 +1,524 @@ +// Package demo is an interactive demonstration of the Go SDK using the Stellar TestNet. +package demo + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + + log "github.com/sirupsen/logrus" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/network" + hProtocol "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/txnbuild" + "github.com/stellar/go/xdr" + + "github.com/stellar/go/keypair" +) + +// The account address of the TestNet "friendbot" +const friendbotAddress = "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR" + +// The local file where your generated demo account keys will be stored +// For convenience, the address is also stored so you can look up accounts on the network +const accountsFile = "demo.keys" + +// Account represents a Stellar account for this demo. +type Account struct { + Seed string `json:"name"` + Address string `json:"address"` + HAccount *hProtocol.Account `json:"account"` + Keypair *keypair.Full `json:"keypair"` + Exists bool `json:"exists"` +} + +// InitKeys creates n random new keypairs, storing them in a local file. If the file exists, +// InitKeys reads the file instead to construct the keypairs (and ignores n). +func InitKeys(n int) []Account { + accounts := make([]Account, n) + + fh, err := os.Open(accountsFile) + if os.IsNotExist(err) { + // Create the accounts and record them in a file + log.Info("Accounts file not found - creating new keypairs...") + for i := 0; i < n; i++ { + accounts[i] = createKeypair() + } + + jsonAccounts, err2 := json.MarshalIndent(accounts, "", " ") + dieIfError("problem marshalling json accounts", err2) + err = ioutil.WriteFile(accountsFile, jsonAccounts, 0644) + dieIfError("problem writing json accounts file", err) + log.Info("Wrote keypairs to local file ", accountsFile) + + return accounts + } + + // Read the file and create keypairs + log.Infof("Found accounts file %s...", accountsFile) + defer fh.Close() + bytes, err := ioutil.ReadAll(fh) + dieIfError("problem converting json file to bytes", err) + json.Unmarshal(bytes, &accounts) + + // Create the keypair objects + for i, k := range accounts { + kp, err := keypair.Parse(k.Seed) + dieIfError("keypair didn't parse!", err) + accounts[i].Keypair = kp.(*keypair.Full) + } + + return accounts +} + +// Reset is a command that removes all test accounts created by this demo. All funds are +// transferred back to Friendbot using the account merge operation. +func Reset(client *horizonclient.Client, keys []Account) { + keys = loadAccounts(client, keys) + for _, k := range keys { + if !k.Exists { + log.Infof("Account %s not found - skipping further operations on it...", k.Address) + continue + } + + // It exists - so we will proceed to deconstruct any existing account entries, and then merge it + // See https://developers.stellar.org/docs/glossary/ledger/#ledger-entries + log.Info("Found testnet account with ID:", k.HAccount.ID) + + // Find any offers that need deleting... + offerRequest := horizonclient.OfferRequest{ + ForAccount: k.Address, + Order: horizonclient.OrderDesc, + } + offers, err := client.Offers(offerRequest) + dieIfError("error while getting offers", err) + log.Infof("Account %s has %v offers...", k.Address, len(offers.Embedded.Records)) + + // ...and delete them + for _, o := range offers.Embedded.Records { + log.Info(" ", o) + txe, err := deleteOffer(k.HAccount, o.ID, k) + dieIfError("problem building deleteOffer op", err) + log.Infof("Deleting offer %d...", o.ID) + resp := submit(client, txe) + log.Debug(resp) + } + + // Find any authorised trustlines on this account... + log.Infof("Account %s has %d balances...", k.Address, len(k.HAccount.Balances)) + + // ...and delete them + for _, b := range k.HAccount.Balances { + // Native balances don't have trustlines + if b.Type == "native" { + continue + } + asset := txnbuild.CreditAsset{ + Code: b.Code, + Issuer: b.Issuer, + } + + // Send the asset back to the issuer... + log.Infof("Sending %v of surplus asset %s:%s back to issuer...", b.Balance, asset.Code, asset.Issuer) + txe, err := payment(k.HAccount, asset.Issuer, b.Balance, asset, k) + dieIfError("problem building payment op", err) + resp := submit(client, txe) + log.Debug(resp) + + // Delete the now-empty trustline... + log.Infof("Deleting trustline for asset %s:%s...", b.Code, b.Issuer) + ctAsset, err := asset.ToChangeTrustAsset() + dieIfError("problem building deleteTrustline op", err) + txe, err = deleteTrustline(k.HAccount, ctAsset, k) + dieIfError("problem building deleteTrustline op", err) + resp = submit(client, txe) + log.Debug(resp) + } + + // Find any data entries on this account... + log.Infof("Account %s has %d data entries...", k.Address, len(k.HAccount.Data)) + for dataKey := range k.HAccount.Data { + decodedV, _ := k.HAccount.GetData(dataKey) + log.Infof("Deleting data entry '%s' -> '%s'...", dataKey, decodedV) + txe, err := deleteData(k.HAccount, dataKey, k) + dieIfError("problem building manageData op", err) + resp := submit(client, txe) + log.Debug(resp) + } + } + + // Finally, the accounts may be merged + for _, k := range keys { + if !k.Exists { + continue + } + log.Infof("Merging account %s back to friendbot (%s)...", k.Address, friendbotAddress) + txe, err := mergeAccount(k.HAccount, friendbotAddress, k) + dieIfError("problem building mergeAccount op", err) + resp := submit(client, txe) + log.Debug(resp) + } +} + +// Initialise is a command that funds an initial set of accounts for use with other demo operations. +// The first account is funded from Friendbot; subseqeuent accounts are created and funded from this +// first account. +func Initialise(client *horizonclient.Client, keys []Account) { + // Fund the first account from friendbot + log.Infof("Funding account %s from friendbot...", keys[0].Address) + _, err := client.Fund(keys[0].Address) + dieIfError(fmt.Sprintf("couldn't fund account %s from friendbot", keys[0].Address), err) + keys[0].HAccount = loadAccount(client, keys[0].Address) + keys[0].Exists = true + + // Fund the others using the create account operation + for i := 1; i < len(keys); i++ { + log.Infof("Funding account %s from account %s...", keys[i].Address, keys[0].Address) + txe, err := createAccount(keys[0].HAccount, keys[i].Address, keys[0]) + dieIfError("problem building createAccount op", err) + resp := submit(client, txe) + log.Debug(resp) + } +} + +// TXError is a command that deliberately creates a bad transaction to trigger an error response +// from Horizon. This code demonstrates how to retrieve and inspect the error. +func TXError(client *horizonclient.Client, keys []Account) { + keys = loadAccounts(client, keys) + // Create a bump seq operation + // Set the seq number to -1 (invalid) + // Create the transaction + txe, err := bumpSequence(keys[0].HAccount, -1, keys[0]) + dieIfError("problem building createAccount op", err) + + // Submit + resp := submit(client, txe) + + // Inspect and print error + log.Info(resp) +} + +/***** Examples of operation building follow *****/ + +func bumpSequence(source *hProtocol.Account, seqNum int64, signer Account) (string, error) { + bumpSequenceOp := txnbuild.BumpSequence{ + BumpTo: seqNum, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&bumpSequenceOp}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func createAccount(source *hProtocol.Account, dest string, signer Account) (string, error) { + createAccountOp := txnbuild.CreateAccount{ + Destination: dest, + Amount: "100", + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&createAccountOp}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func deleteData(source *hProtocol.Account, dataKey string, signer Account) (string, error) { + manageDataOp := txnbuild.ManageData{ + Name: dataKey, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&manageDataOp}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func payment(source *hProtocol.Account, dest, amount string, asset txnbuild.Asset, signer Account) (string, error) { + paymentOp := txnbuild.Payment{ + Destination: dest, + Amount: amount, + Asset: asset, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&paymentOp}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func deleteTrustline(source *hProtocol.Account, asset txnbuild.ChangeTrustAsset, signer Account) (string, error) { + deleteTrustline := txnbuild.RemoveTrustlineOp(asset) + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&deleteTrustline}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func deleteOffer(source *hProtocol.Account, offerID int64, signer Account) (string, error) { + deleteOffer, err := txnbuild.DeleteOfferOp(offerID) + if err != nil { + return "", errors.Wrap(err, "building offer") + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&deleteOffer}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +func mergeAccount(source *hProtocol.Account, destAddress string, signer Account) (string, error) { + accountMerge := txnbuild.AccountMerge{ + Destination: destAddress, + } + + tx, err := txnbuild.NewTransaction( + txnbuild.TransactionParams{ + SourceAccount: source, + IncrementSequenceNum: true, + Operations: []txnbuild.Operation{&accountMerge}, + BaseFee: txnbuild.MinBaseFee, + Timebounds: txnbuild.NewTimeout(300), + }, + ) + if err != nil { + return "", errors.Wrap(err, "couldn't build transaction") + } + tx, err = tx.Sign(network.TestNetworkPassphrase, signer.Keypair) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return txeBase64, errors.Wrap(err, "couldn't serialise transaction") + } + return txeBase64, nil +} + +// createKeypair constructs a new random keypair, and returns it in a DemoAccount. +func createKeypair() Account { + pair, err := keypair.Random() + if err != nil { + log.Fatal(err) + } + log.Info("Seed:", pair.Seed()) + log.Info("Address:", pair.Address()) + + return Account{ + Seed: pair.Seed(), + Address: pair.Address(), + Keypair: pair, + } +} + +// loadAccounts looks up each account in the provided list and stores the returned information. +func loadAccounts(client *horizonclient.Client, accounts []Account) []Account { + for i, a := range accounts { + accounts[i].HAccount = loadAccount(client, a.Address) + accounts[i].Exists = true + } + + return accounts +} + +// loadAccount is an example of how to get an account's details from Horizon. +func loadAccount(client *horizonclient.Client, address string) *hProtocol.Account { + accountRequest := horizonclient.AccountRequest{AccountID: address} + horizonSourceAccount, err := client.AccountDetail(accountRequest) + if err != nil { + dieIfError(fmt.Sprintf("couldn't get account detail for %s", address), err) + } + + return &horizonSourceAccount +} + +func submit(client *horizonclient.Client, txeBase64 string) (resp hProtocol.Transaction) { + resp, err := client.SubmitTransactionXDR(txeBase64) + if err != nil { + hError := err.(*horizonclient.Error) + err = printHorizonError(hError) + dieIfError("couldn't print Horizon eror", err) + os.Exit(1) + } + + return +} + +func dieIfError(desc string, err error) { + if err != nil { + log.Fatalf("Fatal error (%s): %s", desc, err) + } +} + +// printHorizonError is an example of how to inspect the error returned from Horizon. +func printHorizonError(hError *horizonclient.Error) error { + problem := hError.Problem + log.Println("Error type:", problem.Type) + log.Println("Error title:", problem.Title) + log.Println("Error status:", problem.Status) + log.Println("Error detail:", problem.Detail) + + resultCodes, err := hError.ResultCodes() + if err != nil { + return errors.Wrap(err, "Couldn't read ResultCodes") + } + log.Println("TransactionCode:", resultCodes.TransactionCode) + log.Println("OperationCodes:") + for _, code := range resultCodes.OperationCodes { + log.Println(" ", code) + } + + resultString, err := hError.ResultString() + if err != nil { + return errors.Wrap(err, "Couldn't read ResultString") + } + log.Println("TransactionResult XDR (base 64):", resultString) + + envelope, err := hError.Envelope() + if err != nil { + return errors.Wrap(err, "Couldn't read Envelope") + } + + aid := envelope.SourceAccount().MustEd25519() + decodedAID, err := strkey.Encode(strkey.VersionByteAccountID, aid[:]) + if err != nil { + log.Println("Couldn't decode account ID:", err) + } else { + log.Printf("SourceAccount (%s): %s", envelope.SourceAccount().Type, decodedAID) + } + log.Println("Fee:", envelope.Fee()) + log.Println("SequenceNumber:", envelope.SeqNum()) + log.Println("TimeBounds:", envelope.TimeBounds()) + log.Println("Memo:", envelope.Memo()) + log.Println("Memo.Type:", envelope.Memo().Type) + if envelope.Memo().Type != xdr.MemoTypeMemoNone { + log.Println("Memo.Text:", envelope.Memo().Text) + log.Println("Memo.Id:", envelope.Memo().Id) + log.Println("Memo.Hash:", envelope.Memo().Hash) + log.Println("Memo.RetHash:", envelope.Memo().RetHash) + } + log.Println("Operations:", envelope.Operations()) + + for _, op := range envelope.Operations() { + log.Println("Operations.SourceAccount:", op.SourceAccount) + log.Println("Operations.Body.Type:", op.Body.Type) + } + // TODO is Ext a useful field which we should print? + // log.Println("Ext:", txe.Ext) + + return nil +} diff --git a/txnbuild/create_account.go b/txnbuild/create_account.go new file mode 100644 index 0000000000..899402b40b --- /dev/null +++ b/txnbuild/create_account.go @@ -0,0 +1,76 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// CreateAccount represents the Stellar create account operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type CreateAccount struct { + Destination string + Amount string + SourceAccount string +} + +// BuildXDR for CreateAccount returns a fully configured XDR Operation. +func (ca *CreateAccount) BuildXDR() (xdr.Operation, error) { + var xdrOp xdr.CreateAccountOp + + err := xdrOp.Destination.SetAddress(ca.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set destination address") + } + + xdrOp.StartingBalance, err = amount.Parse(ca.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse amount") + } + + opType := xdr.OperationTypeCreateAccount + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, ca.SourceAccount) + + return op, nil +} + +// FromXDR for CreateAccount initialises the txnbuild struct from the corresponding xdr Operation. +func (ca *CreateAccount) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetCreateAccountOp() + if !ok { + return errors.New("error parsing create_account operation from xdr") + } + + ca.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + ca.Destination = result.Destination.Address() + ca.Amount = amount.String(result.StartingBalance) + + return nil +} + +// Validate for CreateAccount validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (ca *CreateAccount) Validate() error { + err := validateStellarPublicKey(ca.Destination) + if err != nil { + return NewValidationError("Destination", err.Error()) + } + + err = validateAmount(ca.Amount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (ca *CreateAccount) GetSourceAccount() string { + return ca.SourceAccount +} diff --git a/txnbuild/create_account_test.go b/txnbuild/create_account_test.go new file mode 100644 index 0000000000..b715dca70d --- /dev/null +++ b/txnbuild/create_account_test.go @@ -0,0 +1,74 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCreateAccountValidateDestination(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "", + Amount: "43", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.CreateAccount operation: Field: Destination, Error: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreateAccountValidateAmount(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "GDYNXQFHU6W5RBW2CCCDDAAU3TMTSU2RMGIBM6HGHAR4NJJKY3IJETHT", + Amount: "", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.CreateAccount operation: Field: Amount, Error: invalid amount format" + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreateAccountRoundtrip(t *testing.T) { + createAccount := CreateAccount{ + SourceAccount: "GDYNXQFHU6W5RBW2CCCDDAAU3TMTSU2RMGIBM6HGHAR4NJJKY3IJETHT", + Destination: "GDYNXQFHU6W5RBW2CCCDDAAU3TMTSU2RMGIBM6HGHAR4NJJKY3IJETHT", + Amount: "1.0000000", + } + testOperationsMarshallingRoundtrip(t, []Operation{&createAccount}, false) + + // with muxed accounts + createAccount = CreateAccount{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Destination: "GDYNXQFHU6W5RBW2CCCDDAAU3TMTSU2RMGIBM6HGHAR4NJJKY3IJETHT", + Amount: "1.0000000", + } + testOperationsMarshallingRoundtrip(t, []Operation{&createAccount}, true) + +} diff --git a/txnbuild/create_claimable_balance.go b/txnbuild/create_claimable_balance.go new file mode 100644 index 0000000000..6fdf3c5fd7 --- /dev/null +++ b/txnbuild/create_claimable_balance.go @@ -0,0 +1,194 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// CreateClaimableBalance represents the Stellar create claimable balance operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type CreateClaimableBalance struct { + Amount string + Asset Asset + Destinations []Claimant + SourceAccount string +} + +// Claimant represents a claimable balance claimant +type Claimant struct { + Destination string + Predicate xdr.ClaimPredicate +} + +var ( + UnconditionalPredicate = xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + } +) + +// NewClaimant returns a new Claimant, if predicate is nil then a Claimant with +// unconditional predicate is returned. +func NewClaimant(destination string, predicate *xdr.ClaimPredicate) Claimant { + if predicate == nil { + predicate = &UnconditionalPredicate + } + + return Claimant{ + Destination: destination, + Predicate: *predicate, + } +} + +// AndPredicate returns a xdr.ClaimPredicate +func AndPredicate(left xdr.ClaimPredicate, right xdr.ClaimPredicate) xdr.ClaimPredicate { + predicates := []xdr.ClaimPredicate{left, right} + return xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateAnd, + AndPredicates: &predicates, + } +} + +// OrPredicate returns a xdr.ClaimPredicate +func OrPredicate(left xdr.ClaimPredicate, right xdr.ClaimPredicate) xdr.ClaimPredicate { + predicates := []xdr.ClaimPredicate{left, right} + return xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateOr, + OrPredicates: &predicates, + } +} + +// NotPredicate returns a new predicate inverting the passed in predicate +func NotPredicate(pred xdr.ClaimPredicate) xdr.ClaimPredicate { + innerPred := &pred // workaround to keep API the same as Or/And + return xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateNot, + NotPredicate: &innerPred, + } +} + +// BeforeAbsoluteTimePredicate returns a Before Absolute Time xdr.ClaimPredicate +// +// This predicate will be fulfilled if the closing time of the ledger that +// includes the CreateClaimableBalance operation is less than this (absolute) +// Unix timestamp. +func BeforeAbsoluteTimePredicate(epochSeconds int64) xdr.ClaimPredicate { + absBefore := xdr.Int64(epochSeconds) + return xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime, + AbsBefore: &absBefore, + } +} + +// BeforeRelativeTimePredicate returns a Before Relative Time xdr.ClaimPredicate +// +// This predicate will be fulfilled if the closing time of the ledger that +// includes the CreateClaimableBalance operation plus this relative time delta +// (in seconds) is less than the current time. +func BeforeRelativeTimePredicate(secondsBefore int64) xdr.ClaimPredicate { + relBefore := xdr.Int64(secondsBefore) + return xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime, + RelBefore: &relBefore, + } +} + +// BuildXDR for CreateClaimableBalance returns a fully configured XDR Operation. +func (cb *CreateClaimableBalance) BuildXDR() (xdr.Operation, error) { + xdrAsset, err := cb.Asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Asset' field") + } + xdrAmount, err := amount.Parse(cb.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'Amount'") + } + + claimants := []xdr.Claimant{} + + for _, d := range cb.Destinations { + c := xdr.Claimant{ + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Predicate: d.Predicate, + }, + } + err = c.V0.Destination.SetAddress(d.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrapf(err, "failed to set destination address: %s", d.Destination) + } + claimants = append(claimants, c) + } + + xdrOp := xdr.CreateClaimableBalanceOp{ + Asset: xdrAsset, + Amount: xdrAmount, + Claimants: claimants, + } + + opType := xdr.OperationTypeCreateClaimableBalance + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, cb.SourceAccount) + return op, nil +} + +// FromXDR for CreateClaimableBalance initializes the txnbuild struct from the corresponding xdr Operation. +func (cb *CreateClaimableBalance) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetCreateClaimableBalanceOp() + if !ok { + return errors.New("error parsing create_claimable_balance operation from xdr") + } + + cb.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + for _, c := range result.Claimants { + claimant := c.MustV0() + cb.Destinations = append(cb.Destinations, Claimant{ + Destination: claimant.Destination.Address(), + Predicate: claimant.Predicate, + }) + } + + asset, err := assetFromXDR(result.Asset) + if err != nil { + return errors.Wrap(err, "error parsing asset in create_claimable_balance operation") + } + cb.Asset = asset + cb.Amount = amount.String(result.Amount) + + return nil +} + +// Validate for CreateClaimableBalance validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (cb *CreateClaimableBalance) Validate() error { + for _, d := range cb.Destinations { + err := validateStellarPublicKey(d.Destination) + if err != nil { + return NewValidationError("Destinations", err.Error()) + } + } + + err := validateAmount(cb.Amount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + err = validateStellarAsset(cb.Asset) + if err != nil { + return NewValidationError("Asset", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (cb *CreateClaimableBalance) GetSourceAccount() string { + return cb.SourceAccount +} diff --git a/txnbuild/create_claimable_balance_test.go b/txnbuild/create_claimable_balance_test.go new file mode 100644 index 0000000000..d6516c7983 --- /dev/null +++ b/txnbuild/create_claimable_balance_test.go @@ -0,0 +1,92 @@ +package txnbuild + +import ( + "testing" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stretchr/testify/assert" +) + +func TestCreateClaimableBalanceRoundTrip(t *testing.T) { + and := AndPredicate(BeforeAbsoluteTimePredicate(100), BeforeRelativeTimePredicate(10)) + createNativeBalance := &CreateClaimableBalance{ + Amount: "1234.0000000", + Asset: NativeAsset{}, + Destinations: []Claimant{ + NewClaimant(newKeypair1().Address(), &UnconditionalPredicate), + NewClaimant(newKeypair1().Address(), &and), + }, + } + + not := NotPredicate(UnconditionalPredicate) + or := OrPredicate(BeforeAbsoluteTimePredicate(100), BeforeRelativeTimePredicate(10)) + createAssetBalance := &CreateClaimableBalance{ + Amount: "99.0000000", + Asset: CreditAsset{ + Code: "COP", + Issuer: "GB56OJGSA6VHEUFZDX6AL2YDVG2TS5JDZYQJHDYHBDH7PCD5NIQKLSDO", + }, + Destinations: []Claimant{ + NewClaimant(newKeypair1().Address(), ¬), + NewClaimant(newKeypair1().Address(), &or), + }, + } + + testOperationsMarshallingRoundtrip(t, []Operation{createNativeBalance, createAssetBalance}, false) + + createNativeBalanceWithMuxedAcounts := &CreateClaimableBalance{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Amount: "1234.0000000", + Asset: NativeAsset{}, + Destinations: []Claimant{ + NewClaimant(newKeypair1().Address(), &UnconditionalPredicate), + NewClaimant(newKeypair1().Address(), &and), + }, + } + + testOperationsMarshallingRoundtrip(t, []Operation{createNativeBalanceWithMuxedAcounts}, true) +} + +func TestClaimableBalanceID(t *testing.T) { + A := "SCZANGBA5YHTNYVVV4C3U252E2B6P6F5T3U6MM63WBSBZATAQI3EBTQ4" + B := "GA2C5RFPE6GCKMY3US5PAB6UZLKIGSPIUKSLRB6Q723BM2OARMDUYEJ5" + + aKeys := keypair.MustParseFull(A) + aAccount := SimpleAccount{AccountID: aKeys.Address(), Sequence: 123} + + soon := time.Now().Add(time.Second * 60) + bCanClaim := BeforeRelativeTimePredicate(60) + aCanReclaim := NotPredicate(BeforeAbsoluteTimePredicate(soon.Unix())) + + claimants := []Claimant{ + NewClaimant(B, &bCanClaim), + NewClaimant(aKeys.Address(), &aCanReclaim), + } + + claimableBalanceEntry := CreateClaimableBalance{ + Destinations: claimants, + Asset: NativeAsset{}, + Amount: "420", + SourceAccount: "GB56OJGSA6VHEUFZDX6AL2YDVG2TS5JDZYQJHDYHBDH7PCD5NIQKLSDO", + } + + // Build and sign the transaction + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &aAccount, + IncrementSequenceNum: true, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&claimableBalanceEntry}, + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, aKeys) + assert.NoError(t, err) + + balanceId, err := tx.ClaimableBalanceID(0) + assert.NoError(t, err) + assert.Equal(t, "0000000095001252ab3b4d16adbfa5364ce526dfcda03cb2258b827edbb2e0450087be51", balanceId) +} diff --git a/txnbuild/create_passive_offer.go b/txnbuild/create_passive_offer.go new file mode 100644 index 0000000000..02ff86313f --- /dev/null +++ b/txnbuild/create_passive_offer.go @@ -0,0 +1,88 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// CreatePassiveSellOffer represents the Stellar create passive offer operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type CreatePassiveSellOffer struct { + Selling Asset + Buying Asset + Amount string + Price xdr.Price + SourceAccount string +} + +// BuildXDR for CreatePassiveSellOffer returns a fully configured XDR Operation. +func (cpo *CreatePassiveSellOffer) BuildXDR() (xdr.Operation, error) { + xdrSelling, err := cpo.Selling.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Selling' field") + } + + xdrBuying, err := cpo.Buying.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Buying' field") + } + + xdrAmount, err := amount.Parse(cpo.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'Amount'") + } + + xdrOp := xdr.CreatePassiveSellOfferOp{ + Selling: xdrSelling, + Buying: xdrBuying, + Amount: xdrAmount, + Price: cpo.Price, + } + + opType := xdr.OperationTypeCreatePassiveSellOffer + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, cpo.SourceAccount) + + return op, nil +} + +// FromXDR for CreatePassiveSellOffer initialises the txnbuild struct from the corresponding xdr Operation. +func (cpo *CreatePassiveSellOffer) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetCreatePassiveSellOfferOp() + if !ok { + return errors.New("error parsing create_passive_sell_offer operation from xdr") + } + + cpo.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + cpo.Amount = amount.String(result.Amount) + cpo.Price = result.Price + buyingAsset, err := assetFromXDR(result.Buying) + if err != nil { + return errors.Wrap(err, "error parsing buying_asset in create_passive_sell_offer operation") + } + cpo.Buying = buyingAsset + + sellingAsset, err := assetFromXDR(result.Selling) + if err != nil { + return errors.Wrap(err, "error parsing selling_asset in create_passive_sell_offer operation") + } + cpo.Selling = sellingAsset + return nil +} + +// Validate for CreatePassiveSellOffer validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (cpo *CreatePassiveSellOffer) Validate() error { + return validatePassiveOffer(cpo.Buying, cpo.Selling, cpo.Amount, cpo.Price) +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (cpo *CreatePassiveSellOffer) GetSourceAccount() string { + return cpo.SourceAccount +} diff --git a/txnbuild/create_passive_offer_test.go b/txnbuild/create_passive_offer_test.go new file mode 100644 index 0000000000..d4d0efbea4 --- /dev/null +++ b/txnbuild/create_passive_offer_test.go @@ -0,0 +1,136 @@ +package txnbuild + +import ( + "github.com/stellar/go/xdr" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCreatePassiveSellOfferValidateBuyingAsset(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", ""}, + Amount: "10", + Price: xdr.Price{1, 1}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.CreatePassiveSellOffer operation: Field: Buying, Error: asset issuer: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreatePassiveSellOfferValidateSellingAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: CreditAsset{"ABCD0123456789", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "10", + Price: xdr.Price{1, 1}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := `validation failed for *txnbuild.CreatePassiveSellOffer operation: Field: Selling, Error: asset code length must be between 1 and 12 characters` + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreatePassiveSellOfferValidateAmount(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "-3", + Price: xdr.Price{1, 1}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := `validation failed for *txnbuild.CreatePassiveSellOffer operation: Field: Amount, Error: amount can not be negative` + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreatePassiveSellOfferValidatePrice(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "3", + Price: xdr.Price{-1, 0}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := `validation failed for *txnbuild.CreatePassiveSellOffer operation: Field: Price, Error: price denominator cannot be 0: -1/0` + assert.Contains(t, err.Error(), expected) + } +} + +func TestCreatePassiveSellOfferPrice(t *testing.T) { + kp0 := newKeypair0() + + offer := CreatePassiveSellOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "1", + Price: xdr.Price{1, 1000000000}, + SourceAccount: kp0.Address(), + } + + xdrOp, err := offer.BuildXDR() + assert.NoError(t, err) + expectedPrice := xdr.Price{N: 1, D: 1000000000} + assert.Equal(t, expectedPrice, xdrOp.Body.CreatePassiveSellOfferOp.Price) + + parsed := CreatePassiveSellOffer{} + assert.NoError(t, parsed.FromXDR(xdrOp)) + assert.Equal(t, offer.Price, parsed.Price) +} diff --git a/txnbuild/end_sponsoring_future_reserves.go b/txnbuild/end_sponsoring_future_reserves.go new file mode 100644 index 0000000000..1ac4b9a2d7 --- /dev/null +++ b/txnbuild/end_sponsoring_future_reserves.go @@ -0,0 +1,49 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite + +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// EndSponsoringFutureReserves represents the Stellar begin sponsoring future reserves operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type EndSponsoringFutureReserves struct { + SourceAccount string +} + +// BuildXDR for EndSponsoringFutureReserves returns a fully configured XDR Operation. +func (es *EndSponsoringFutureReserves) BuildXDR() (xdr.Operation, error) { + opType := xdr.OperationTypeEndSponsoringFutureReserves + body, err := xdr.NewOperationBody(opType, nil) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, es.SourceAccount) + + return op, nil +} + +// FromXDR for EndSponsoringFutureReserves initializes the txnbuild struct from the corresponding xdr Operation. +func (es *EndSponsoringFutureReserves) FromXDR(xdrOp xdr.Operation) error { + if xdrOp.Body.Type != xdr.OperationTypeEndSponsoringFutureReserves { + return errors.New("error parsing end_sponsoring_future_reserves operation from xdr") + } + + es.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + return nil +} + +// Validate for EndSponsoringFutureReserves validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (es *EndSponsoringFutureReserves) Validate() error { + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (es *EndSponsoringFutureReserves) GetSourceAccount() string { + return es.SourceAccount +} diff --git a/txnbuild/end_sponsoring_future_reserves_test.go b/txnbuild/end_sponsoring_future_reserves_test.go new file mode 100644 index 0000000000..8b5ab5cdde --- /dev/null +++ b/txnbuild/end_sponsoring_future_reserves_test.go @@ -0,0 +1,10 @@ +package txnbuild + +import "testing" + +func TestEndSponsoringFutureReservesRoundTrip(t *testing.T) { + withoutMuxedAccounts := &EndSponsoringFutureReserves{SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"} + testOperationsMarshallingRoundtrip(t, []Operation{withoutMuxedAccounts}, false) + withMuxedAccounts := &EndSponsoringFutureReserves{SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK"} + testOperationsMarshallingRoundtrip(t, []Operation{withMuxedAccounts}, true) +} diff --git a/txnbuild/example_test.go b/txnbuild/example_test.go new file mode 100644 index 0000000000..eea43fb3e0 --- /dev/null +++ b/txnbuild/example_test.go @@ -0,0 +1,1099 @@ +package txnbuild + +import ( + "fmt" + "github.com/stellar/go/price" + "github.com/stellar/go/xdr" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + horizonclient "github.com/stellar/go/txnbuild/examplehorizonclient" +) + +func ExampleInflation() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := Inflation{} + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAB6i5yxQAAAED9zR1l78yiBwd/o44RyE3XP7QT57VmI90qE46TjfncYyqlOaIRWpkh3qouTjV5IRPVGo6+bFWV40H1HE087FgA +} + +func ExampleCreateAccount() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQKsrlxt6Ri/WuDGcK1+Tk1hdYHdPeK7KMIds10mcwzw6BpQFZYxP8o6O6ejJFGO06TAGt2PolwuWnpeiVQ9Kcg0= +} + +func ExamplePayment() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + Asset: NativeAsset{}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAAAAAABfXhAAAAAAAAAAAB6i5yxQAAAEB2/C066OEFac3Bszk6FtvKd+NKOeCl+f8caHQATPos8HkJW1Sm/WyEkVDrvrDX4udMHl3gHhlS/qE0EuWEeJYC +} + +func ExamplePayment_setBaseFee() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op1 := Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + Asset: NativeAsset{}, + } + + op2 := Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "100", + Asset: NativeAsset{}, + } + + // get fees from network + feeStats, err := client.FeeStats() + check(err) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2}, + BaseFee: feeStats.MaxFee.P50, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAABLAADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAQAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAAAAAABfXhAAAAAAAAAAABAAAAAITg3tq8G0kvnvoIhZPMYJsY+9KVV8xAA6NxhtKxIXZUAAAAAAAAAAA7msoAAAAAAAAAAAHqLnLFAAAAQMmOXP+k93ENYtu7evNTu2h63UkNrQnF6ci49Oh1XufQ3rhzS4Dd1+6AXqgWa4FbcvlTVRjxCurkflI4Rov2xgQ= +} + +func ExampleBumpSequence() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := BumpSequence{ + BumpTo: 9606132444168300, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACwAiILoAAABsAAAAAAAAAAHqLnLFAAAAQEIvyOHdPn82ckKXISGF6sR4YU5ox735ivKrC/wS4615j1AA42vbXSLqShJA5/7/DX56UUv+Lt7vlcu9M7jsRw4= +} + +func ExampleAccountMerge() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := AccountMerge{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAAAAAB6i5yxQAAAEAvOx3WHzmaTsf4rK+yRDsvXn9xh+dU6CkpAum+FCXQ5LZQqhxQg9HErbSfxeTFMdknEpMKXgJRFUfAetl+jf4O +} + +func ExampleManageData() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := ManageData{ + Name: "Fruit preference", + Value: []byte("Apple"), + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAABBGcnVpdCBwcmVmZXJlbmNlAAAAAQAAAAVBcHBsZQAAAAAAAAAAAAAB6i5yxQAAAEDtRCyQRKKgQ8iLEu7kicHtSzoplfxPtPTMhdRv/sq8UoIBVTxIw+S13Jv+jzs3tyLDLiGCVNXreUNlbfX+980K +} + +func ExampleManageData_removeDataEntry() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := ManageData{ + Name: "Fruit preference", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAABBGcnVpdCBwcmVmZXJlbmNlAAAAAAAAAAAAAAAB6i5yxQAAAEDFpI1vphzG8Dny4aVDA7tyOlP579d9kWO0U/vmq6pWTrNocd6+xTiU753W50ksEscA6f1WNwUsQf+DCwmZfqIA +} + +func ExampleSetOptions() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := SetOptions{ + InflationDestination: NewInflationDestination("GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z"), + ClearFlags: []AccountFlag{AuthRevocable}, + SetFlags: []AccountFlag{AuthRequired, AuthImmutable}, + MasterWeight: NewThreshold(10), + LowThreshold: NewThreshold(1), + MediumThreshold: NewThreshold(2), + HighThreshold: NewThreshold(2), + HomeDomain: NewHomeDomain("LovelyLumensLookLuminous.com"), + Signer: &Signer{Address: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", Weight: Threshold(4)}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAEAAAAAhODe2rwbSS+e+giFk8xgmxj70pVXzEADo3GG0rEhdlQAAAABAAAAAgAAAAEAAAAFAAAAAQAAAAoAAAABAAAAAQAAAAEAAAACAAAAAQAAAAIAAAABAAAAHExvdmVseUx1bWVuc0xvb2tMdW1pbm91cy5jb20AAAABAAAAAITg3tq8G0kvnvoIhZPMYJsY+9KVV8xAA6NxhtKxIXZUAAAABAAAAAAAAAAB6i5yxQAAAEBxncRuLogeNQ8sG9TojUMB6QmKDWYmhF00Wz43UX90pAQnSNcJAQxur0RA7Fn6LjJLObqyjcdIc4P2DC02u08G +} + +func ExampleChangeTrust() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + asset, err := CreditAsset{"ABCD", "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z"}.ToChangeTrustAsset() + check(err) + + op := ChangeTrust{ + Line: asset, + Limit: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFBQkNEAAAAAITg3tq8G0kvnvoIhZPMYJsY+9KVV8xAA6NxhtKxIXZUAAAAAAX14QAAAAAAAAAAAeoucsUAAABAqqUuIlFMrlElYnGSLHlaI/A41oGA3rdtc1EHhza9bXk35ZwlEvmsBUOZTasZfgBzwd+CczekWKBCEqBCHzaSBw== +} + +func ExampleChangeTrust_removeTrustline() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := RemoveTrustlineOp(CreditAsset{"ABCD", "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z"}.MustToChangeTrustAsset()) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFBQkNEAAAAAITg3tq8G0kvnvoIhZPMYJsY+9KVV8xAA6NxhtKxIXZUAAAAAAAAAAAAAAAAAAAAAeoucsUAAABAKLmUWcLjxeY+vG8jEXMNprU6EupxbMRiXGYzuKBptnVlbFUtTBqhYa/ibyCZTEVCinT8bWQKDvZI0m6VLKVHAg== +} + +func ExampleAllowTrust() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := AllowTrust{ + Trustor: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Type: CreditAsset{"ABCD", "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z"}, + Authorize: true, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABwAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAFBQkNEAAAAAQAAAAAAAAAB6i5yxQAAAEAY3MnWiMcL18SxRITSuI5tZSXmEo0Q38UZg0jiJGU2U6kSnsCNTTJiGACGQlIrPfAMYt9koarrX11w7HLBosQN +} + +func ExampleManageSellOffer() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP"} + sellAmount := "100" + op, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01")) + check(err) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABQUJDRAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAA7msoAAAAAAQAAAGQAAAAAAAAAAAAAAAAAAAAB6i5yxQAAAEBtfrN+VUE7iCwBk0+rmg0/Ua4DItMWEy6naGWxoDBi4ksCIJSZPzkv79Q65rIaFyIcC/zuyJcnIcv73AP+HQEK +} + +func ExampleManageSellOffer_deleteOffer() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + offerID := int64(2921622) + op, err := DeleteOfferOp(offerID) + check(err) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABRkFLRQAAAABBB4BkxJWGYvNgJBoiXUo2tjgWlNmhHMMKdwGN7RSdsQAAAAAAAAAAAAAAAQAAAAEAAAAAACyUlgAAAAAAAAAB6i5yxQAAAEBnE+oILauqt6m8fj7DIBNW/XBmKJ34SLvHdxP04vb26aI8q9i/2p9/pJMnWPeOoIw0f6jreR306qPJFhjMtl4G +} + +func ExampleManageSellOffer_updateOffer() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP"} + sellAmount := "50" + offerID := int64(2497628) + op, err := UpdateOfferOp(selling, buying, sellAmount, price.MustParse("0.02"), offerID) + check(err) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABQUJDRAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAdzWUAAAAAAQAAADIAAAAAACYcXAAAAAAAAAAB6i5yxQAAAECmO+4yukAuLRtR4IRWPVtoyZ2LJeaipPuec+/M1JGDoTFPULDl3kgugPwV3mr0jvMNArBdR8S3NUw31gtT5TcO +} + +func ExampleCreatePassiveSellOffer() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := CreatePassiveSellOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP"}, + Amount: "10", + Price: xdr.Price{1, 1}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABAAAAAAAAAABQUJDRAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAF9eEAAAAAAQAAAAEAAAAAAAAAAeoucsUAAABAE4XbLdDVz1MwC9Bs84nkqK8hyHheVbYznNSiAP0hiP8auvcKAMnYz3HJvzM8H0q/K5MPvgBaehHZ/tQtaPSGBg== +} + +func ExamplePathPayment() { + kp, _ := keypair.Parse("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + abcdAsset := CreditAsset{"ABCD", "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"} + op := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp.Address(), + DestAsset: NativeAsset{}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAgAAAAAAAAAABfXhAAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAAAJiWgAAAAAEAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAABLhVZmAAAAEDhhPsNm7yKfCUCDyBV1pOZDu+3DVDpT2cJSLQOVevP6pmU2yVqvMKnWbYxC5GbTXEEF+MfBE6EoW5+Z4rRt0QO +} + +func ExamplePathPaymentStrictReceive() { + kp, _ := keypair.Parse("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + abcdAsset := CreditAsset{"ABCD", "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"} + op := PathPaymentStrictReceive{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp.Address(), + DestAsset: NativeAsset{}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAgAAAAAAAAAABfXhAAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAAAJiWgAAAAAEAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAABLhVZmAAAAEDhhPsNm7yKfCUCDyBV1pOZDu+3DVDpT2cJSLQOVevP6pmU2yVqvMKnWbYxC5GbTXEEF+MfBE6EoW5+Z4rRt0QO +} + +func ExamplePathPaymentStrictSend() { + kp, _ := keypair.Parse("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + abcdAsset := CreditAsset{"ABCD", "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"} + op := PathPaymentStrictSend{ + SendAsset: NativeAsset{}, + SendAmount: "1", + Destination: kp.Address(), + DestAsset: NativeAsset{}, + DestMin: "10", + Path: []Asset{abcdAsset}, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADQAAAAAAAAAAAJiWgAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAABfXhAAAAAAEAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAABLhVZmAAAAEDV6CmR4ATvtm2qBzHE9UqqS95ZnIIHgpuU7hTZO38DHhf+oeZQ02DGvst4vYMMAIPGkMAsLlfAN/AFinz74DAD +} + +func ExampleManageBuyOffer() { + kp, _ := keypair.Parse("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + buyOffer := ManageBuyOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"}, + Amount: "100", + Price: price.MustParse("0.01"), + OfferID: 0, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + txe, err := tx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADAAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAA7msoAAAAAAQAAAGQAAAAAAAAAAAAAAAAAAAABLhVZmAAAAED4fIdU68w6XIMwf1RPFdF9qRRlfPycrmK8dCOW0XwSbiya9JfMi9YrD9cGY7zHV+3zYpLcEi7lLo++PZ1gOsAK + +} + +func ExampleFeeBumpTransaction() { + kp, _ := keypair.Parse("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + client := horizonclient.DefaultTestNetClient + ar := horizonclient.AccountRequest{AccountID: kp.Address()} + sourceAccount, err := client.AccountDetail(ar) + check(err) + + op := BumpSequence{ + BumpTo: 9606132444168300, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), // Use a real timeout in production! + }, + ) + check(err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp.(*keypair.Full)) + check(err) + + feeBumpKP, _ := keypair.Parse("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + feeBumpTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: feeBumpKP.Address(), + BaseFee: MinBaseFee, + }, + ) + check(err) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, feeBumpKP.(*keypair.Full)) + check(err) + + txe, err := feeBumpTx.Base64() + check(err) + fmt.Println(txe) + + // Output: AAAABQAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAADIAAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACwAiILoAAABsAAAAAAAAAAHqLnLFAAAAQEIvyOHdPn82ckKXISGF6sR4YU5ox735ivKrC/wS4615j1AA42vbXSLqShJA5/7/DX56UUv+Lt7vlcu9M7jsRw4AAAAAAAAAAS4VWZgAAABAeD0gL6WpzSdGTzWd4c9yUu3r+W21hOTLT4ItHGBTHYPT20Wk3dytuqfP89EzlkZXvtG8/N0HH4w+oJCLOL/5Aw== +} + +func ExampleBuildChallengeTx() { + // Generate random nonce + serverSignerSeed := "SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY" + clientAccountID := "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3" + anchorName := "SDF" + webAuthDomain := "webauthdomain.example.org" + timebound := time.Duration(5 * time.Minute) + + tx, err := BuildChallengeTx(serverSignerSeed, clientAccountID, webAuthDomain, anchorName, network.TestNetworkPassphrase, timebound) + check(err) + + txeBase64, err := tx.Base64() + check(err) + ok, err := checkChallengeTx(txeBase64, anchorName) + check(err) + + fmt.Println(ok) + // Output: true +} + +func ExampleCreateClaimableBalance() { + A := "SCZANGBA5YHTNYVVV4C3U252E2B6P6F5T3U6MM63WBSBZATAQI3EBTQ4" + B := "GA2C5RFPE6GCKMY3US5PAB6UZLKIGSPIUKSLRB6Q723BM2OARMDUYEJ5" + + aKeys := keypair.MustParseFull(A) + aAccount := SimpleAccount{AccountID: aKeys.Address()} + + soon := time.Now().Add(time.Second * 60) + bCanClaim := BeforeRelativeTimePredicate(60) + aCanReclaim := NotPredicate(BeforeAbsoluteTimePredicate(soon.Unix())) + + claimants := []Claimant{ + NewClaimant(B, &bCanClaim), + NewClaimant(aKeys.Address(), &aCanReclaim), + } + + claimableBalanceEntry := CreateClaimableBalance{ + Destinations: claimants, + Asset: NativeAsset{}, + Amount: "420", + } + + // Build and sign the transaction + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &aAccount, + IncrementSequenceNum: true, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&claimableBalanceEntry}, + }, + ) + check(err) + tx, err = tx.Sign(network.TestNetworkPassphrase, aKeys) + check(err) + + balanceId, err := tx.ClaimableBalanceID(0) + check(err) + fmt.Println(balanceId) + + // Output: 000000000bf0a78c7ca2a980768b66980ba97934f3b3b45a05ce7a5195a44b64b7dedadb +} + +func ExampleClaimClaimableBalance() { + A := "SCZANGBA5YHTNYVVV4C3U252E2B6P6F5T3U6MM63WBSBZATAQI3EBTQ4" + aKeys := keypair.MustParseFull(A) + aAccount := SimpleAccount{AccountID: aKeys.Address()} + + balanceId := "000000000bf0a78c7ca2a980768b66980ba97934f3b3b45a05ce7a5195a44b64b7dedadb" + claimBalance := ClaimClaimableBalance{BalanceID: balanceId} + + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &aAccount, // or Account B, depending on the condition! + IncrementSequenceNum: true, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&claimBalance}, + }, + network.TestNetworkPassphrase, + aKeys, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAAGQAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADwAAAAAL8KeMfKKpgHaLZpgLqXk087O0WgXOelGVpEtkt97a2wAAAAAAAAABu9LOKAAAAEAesnN9L5oVpoZloBoUYfafhhuGSXAsJL2q15zyyWysc7fOADPdiQXQTEuySp12/ciGYWbZhw/fvyzLJlTgqmsI +} + +type SponsorshipTestConfig struct { + A *keypair.Full + S1 *keypair.Full + S2 *keypair.Full + + Aaccount SimpleAccount + S1account SimpleAccount + S2account SimpleAccount + + Assets []CreditAsset +} + +func InitSponsorshipTestConfig() SponsorshipTestConfig { + A := keypair.MustParseFull("SCZANGBA5YHTNYVVV4C3U252E2B6P6F5T3U6MM63WBSBZATAQI3EBTQ4") + S1 := keypair.MustParseFull("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") + S2 := keypair.MustParseFull("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") + + return SponsorshipTestConfig{ + A: A, S1: S1, S2: S2, + Aaccount: SimpleAccount{AccountID: A.Address()}, + S1account: SimpleAccount{AccountID: S1.Address()}, + S2account: SimpleAccount{AccountID: S2.Address()}, + Assets: []CreditAsset{ + {Code: "ABCD", Issuer: S1.Address()}, + {Code: "EFGH", Issuer: S1.Address()}, + {Code: "IJKL", Issuer: S2.Address()}, + }, + } +} + +func ExampleBeginSponsoringFutureReserves() { + test := InitSponsorshipTestConfig() + + asset, err := test.Assets[0].ToChangeTrustAsset() + check(err) + + // If the sponsoree submits the transaction, the `SourceAccount` fields can + // be omitted for the "sponsor sandwich" operations. + sponsorTrustline := []Operation{ + &BeginSponsoringFutureReserves{SponsoredID: test.A.Address()}, + &ChangeTrust{ + SourceAccount: test.Aaccount.AccountID, + Line: asset, + Limit: MaxTrustlineLimit, + }, + &EndSponsoringFutureReserves{}, + } + + // The sponsorer obviously must sign the tx, but so does the sponsoree, to + // consent to the sponsored operation. + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &test.Aaccount, + Operations: sponsorTrustline, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + IncrementSequenceNum: true, + }, + network.TestNetworkPassphrase, + test.S1, + test.A, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAASwAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAEAAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAAAEAAAAAtBUvDnYeMhUqWrHitbGDDFXU6VQiZspRiaTHmLvSzigAAAAGAAAAAUFCQ0QAAAAAfhHLNNY19eGrAtSgLD3VpaRm2AjNjxIBWQg9zS4VWZh//////////wAAAAAAAAARAAAAAAAAAAIuFVmYAAAAQARLe8wjGKq6WwdOPGkw2jo4eltp6dAHXEum4kYKzIjYx9fs4kdNJAaJE0s3Fy6JAIo1ttrGWp8zq6VX6P5CcAW70s4oAAAAQNpzu6NxKgcYd70mJl6EHyRPdjNTfxGm1w4XIIyIfZElRpmuZ6aWpXA0wwS6BimT3UQizK55T1kt1B2Pi3KyPAw= +} + +func ExampleBeginSponsoringFutureReserves_transfer() { + test := InitSponsorshipTestConfig() + + asset, err := test.Assets[1].ToTrustLineAsset() + check(err) + + transferOps := []Operation{ + &BeginSponsoringFutureReserves{ + SourceAccount: test.S2account.AccountID, + SponsoredID: test.S1.Address(), + }, + &RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeTrustLine, + Account: &test.Aaccount.AccountID, + TrustLine: &TrustLineID{ + Account: test.A.Address(), + Asset: asset, + }, + }, + &EndSponsoringFutureReserves{}, + } + + // For transfers, both the old and new sponsor need to sign. + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &test.S1account, + Operations: transferOps, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + IncrementSequenceNum: true, + }, + network.TestNetworkPassphrase, + test.S1, + test.S2, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAASwAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAABAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAEAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAASAAAAAAAAAAEAAAAAtBUvDnYeMhUqWrHitbGDDFXU6VQiZspRiaTHmLvSzigAAAABRUZHSAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAARAAAAAAAAAAIuFVmYAAAAQDx6tSWzDT5MCVpolKLvhBwM/PpV9d/Om8PlJ4GZekp+DY6H2XAZ+Rldlfa0DqK8KNuMF921Vha6fpmK7FY4/QrqLnLFAAAAQCxxzLrpHFwd+CS6xmAoytq+ORtrkxUy2k6B7wIuASrlJDnYAHZptf7bBKXPn5ImcpJIcB3E5Xl98s/lEA0+YAA= +} + +func ExampleRevokeSponsorship() { + test := InitSponsorshipTestConfig() + + asset1, err := test.Assets[1].ToTrustLineAsset() + check(err) + + asset2, err := test.Assets[2].ToTrustLineAsset() + check(err) + + revokeOps := []Operation{ + &RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeTrustLine, + Account: &test.Aaccount.AccountID, + TrustLine: &TrustLineID{ + Account: test.A.Address(), + Asset: asset1, + }, + }, + &RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeTrustLine, + Account: &test.Aaccount.AccountID, + TrustLine: &TrustLineID{ + Account: test.A.Address(), + Asset: asset2, + }, + }, + } + + // With revocation, only the new sponsor needs to sign. + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &test.S2account, + Operations: revokeOps, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + IncrementSequenceNum: true, + }, + network.TestNetworkPassphrase, + test.S2, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAMgAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAEgAAAAAAAAABAAAAALQVLw52HjIVKlqx4rWxgwxV1OlUImbKUYmkx5i70s4oAAAAAUVGR0gAAAAAfhHLNNY19eGrAtSgLD3VpaRm2AjNjxIBWQg9zS4VWZgAAAAAAAAAEgAAAAAAAAABAAAAALQVLw52HjIVKlqx4rWxgwxV1OlUImbKUYmkx5i70s4oAAAAAUlKS0wAAAAA4Nxt4XJcrGZRYrUvrOc1sooiQ+QdEk1suS1wo+oucsUAAAAAAAAAAeoucsUAAABA9YO+xRc5Vb8ueP1U8go7ka+u/gZJd2z075c2pdFxYb+4AvQUQGvg+N4wvtNll43lPwXq5XAz74BfP99wugplDQ== +} + +type LiquidityPoolTestConfig struct { + A *keypair.Full + AAccount SimpleAccount + Assets []CreditAsset +} + +func InitLiquidityPoolTestConfig() LiquidityPoolTestConfig { + A := keypair.MustParseFull("SCZANGBA5YHTNYVVV4C3U252E2B6P6F5T3U6MM63WBSBZATAQI3EBTQ4") + + return LiquidityPoolTestConfig{ + A: A, + AAccount: SimpleAccount{AccountID: A.Address()}, + Assets: []CreditAsset{ + {Code: "ABCD", Issuer: A.Address()}, + {Code: "EFGH", Issuer: A.Address()}, + {Code: "IJKL", Issuer: A.Address()}, + }, + } +} + +func ExampleLiquidityPoolDeposit() { + test := InitLiquidityPoolTestConfig() + + poolId, err := NewLiquidityPoolId(test.Assets[0], test.Assets[1]) + check(err) + + depositOps := []Operation{ + // Change of trust the first time ensures that the pool exists. + &ChangeTrust{ + Line: LiquidityPoolShareChangeTrustAsset{ + LiquidityPoolParameters: LiquidityPoolParameters{ + AssetA: test.Assets[0], + AssetB: test.Assets[1], + Fee: LiquidityPoolFeeV18, + }, + }, + SourceAccount: test.AAccount.AccountID, + Limit: MaxTrustlineLimit, + }, + + // Add our deposit to the pool + &LiquidityPoolDeposit{ + SourceAccount: test.A.Address(), + LiquidityPoolID: poolId, + MaxAmountA: "0.1000000", + MaxAmountB: "0.1000000", + MinPrice: price.MustParse("0.1000000"), + MaxPrice: price.MustParse("0.1000000"), + }, + } + + // With revocation, only the new sponsor needs to sign. + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &test.AAccount, + Operations: depositOps, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + IncrementSequenceNum: true, + }, + network.TestNetworkPassphrase, + test.A, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAAMgAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAABAAAAALQVLw52HjIVKlqx4rWxgwxV1OlUImbKUYmkx5i70s4oAAAABgAAAAMAAAAAAAAAAUFCQ0QAAAAAtBUvDnYeMhUqWrHitbGDDFXU6VQiZspRiaTHmLvSzigAAAABRUZHSAAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAAB5//////////wAAAAEAAAAAtBUvDnYeMhUqWrHitbGDDFXU6VQiZspRiaTHmLvSzigAAAAWeDFASRchw0mM6/TK0AbZw13MsKye384/nzGgEZhWzzsAAAAAAA9CQAAAAAAAD0JAAAAAAQAAAAoAAAABAAAACgAAAAAAAAABu9LOKAAAAECsEeCUf0w62cgGpgaZxR2cb47Ln3jvfUOvTXl2sJkmEM3CIHIZzkFkMz7RZCRGn70DUjl5TXeow0zxipPL1K0H + +} + +func ExampleLiquidityPoolWithdraw() { + test := InitLiquidityPoolTestConfig() + + poolId, err := NewLiquidityPoolId(test.Assets[0], test.Assets[1]) + check(err) + + withdrawOps := []Operation{ + &LiquidityPoolWithdraw{ + SourceAccount: test.A.Address(), + LiquidityPoolID: poolId, + Amount: "0.1000000", + MinAmountA: "0.1000000", + MinAmountB: "0.1000000", + }, + } + + // With revocation, only the new sponsor needs to sign. + txb64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &test.AAccount, + Operations: withdrawOps, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + IncrementSequenceNum: true, + }, + network.TestNetworkPassphrase, + test.A, + ) + check(err) + fmt.Println(txb64) + + // Output: AAAAAgAAAAC0FS8Odh4yFSpaseK1sYMMVdTpVCJmylGJpMeYu9LOKAAAAGQAAAAAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAALQVLw52HjIVKlqx4rWxgwxV1OlUImbKUYmkx5i70s4oAAAAF3gxQEkXIcNJjOv0ytAG2cNdzLCsnt/OP58xoBGYVs87AAAAAAAPQkAAAAAAAA9CQAAAAAAAD0JAAAAAAAAAAAG70s4oAAAAQHc2K5XVrm6+ICFt3xOrJFbXZXV4jhCZ2kruYJnJ/JJatgRZerVjiCp6BI37hcrd9CM3yTnb8McOSNXHmtgEMQM= +} diff --git a/txnbuild/examplehorizonclient/examplehorizonclient.go b/txnbuild/examplehorizonclient/examplehorizonclient.go new file mode 100644 index 0000000000..12abe3e2e7 --- /dev/null +++ b/txnbuild/examplehorizonclient/examplehorizonclient.go @@ -0,0 +1,67 @@ +// Package examplehorizonclient provides a dummy client for use with the GoDoc examples. +package examplehorizonclient + +import ( + hProtocol "github.com/stellar/go/protocols/horizon" +) + +// AccountRequest is a simple mock +type AccountRequest struct { + AccountID string +} + +// Client is a simple mock +type Client struct { +} + +// DefaultTestNetClient is a simple mock +var DefaultTestNetClient = Client{} + +// AccountDetail returns a minimal, static Account object +func (client *Client) AccountDetail(req AccountRequest) (hProtocol.Account, error) { + return hProtocol.Account{ + AccountID: req.AccountID, + Sequence: "3556091187167235", + }, nil +} + +// FeeStats returns mock network fee information +func (client *Client) FeeStats() (hProtocol.FeeStats, error) { + return hProtocol.FeeStats{ + LastLedger: 22606298, + LastLedgerBaseFee: 100, + LedgerCapacityUsage: 0.97, + MaxFee: hProtocol.FeeDistribution{ + Max: 100, + Min: 100, + Mode: 200, + P10: 250, + P20: 300, + P30: 350, + P40: 500, + P50: 600, + P60: 700, + P70: 800, + P80: 900, + P90: 2000, + P95: 3000, + P99: 5000, + }, + FeeCharged: hProtocol.FeeDistribution{ + Max: 100, + Min: 100, + Mode: 100, + P10: 100, + P20: 100, + P30: 100, + P40: 100, + P50: 100, + P60: 100, + P70: 100, + P80: 100, + P90: 100, + P95: 100, + P99: 100, + }, + }, nil +} diff --git a/txnbuild/fee_bump_test.go b/txnbuild/fee_bump_test.go new file mode 100644 index 0000000000..1a4915041a --- /dev/null +++ b/txnbuild/fee_bump_test.go @@ -0,0 +1,391 @@ +package txnbuild + +import ( + "crypto/sha256" + "encoding/base64" + "testing" + + "github.com/stellar/go/network" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestFeeBumpMissingInner(t *testing.T) { + _, err := NewFeeBumpTransaction(FeeBumpTransactionParams{}) + assert.EqualError(t, err, "inner transaction is missing") +} + +func TestFeeBumpInvalidFeeSource(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: "/.','ml", + BaseFee: MinBaseFee, + Inner: tx, + }, + ) + assert.Contains(t, err.Error(), "fee account is not a valid address") +} + +func TestFeeBumpUpgradesV0Transaction(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&Inflation{}}, + BaseFee: 2 * MinBaseFee, + Memo: MemoText("test-memo"), + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + convertToV0(tx) + + feeBump, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: 3 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + + assert.Equal(t, xdr.EnvelopeTypeEnvelopeTypeTx, feeBump.InnerTransaction().envelope.Type) + assert.Equal(t, xdr.EnvelopeTypeEnvelopeTypeTxV0, tx.envelope.Type) + + innerHash, err := feeBump.InnerTransaction().HashHex(network.TestNetworkPassphrase) + assert.NoError(t, err) + originalHash, err := tx.HashHex(network.TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, originalHash, innerHash) + + assert.Equal(t, tx.Signatures(), feeBump.InnerTransaction().Signatures()) + assert.Equal(t, tx.Operations(), feeBump.InnerTransaction().Operations()) + assert.Equal(t, tx.MaxFee(), feeBump.InnerTransaction().MaxFee()) + assert.Equal(t, tx.BaseFee(), feeBump.InnerTransaction().BaseFee()) + assert.Equal(t, tx.SourceAccount(), feeBump.InnerTransaction().SourceAccount()) + assert.Equal(t, tx.Memo(), feeBump.InnerTransaction().Memo()) + assert.Equal(t, tx.Timebounds(), feeBump.InnerTransaction().Timebounds()) + + innerBase64, err := feeBump.InnerTransaction().Base64() + assert.NoError(t, err) + originalBase64, err := tx.Base64() + assert.NoError(t, err) + assert.NotEqual(t, innerBase64, originalBase64) +} + +func TestFeeBumpInvalidInnerTransactionType(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&Inflation{}}, + BaseFee: 2 * MinBaseFee, + Memo: MemoText("test-memo"), + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + aid := xdr.MustAddress(kp0.Address()) + tx.envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTxFeeBump + tx.envelope.FeeBump = &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: aid.ToMuxedAccount(), + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: tx.envelope.V1, + }, + }, + Signatures: nil, + } + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: 3 * MinBaseFee, + Inner: tx, + }, + ) + assert.EqualError(t, err, "EnvelopeTypeEnvelopeTypeTxFeeBump transactions cannot be fee bumped") +} + +// There is a use case for having a fee bump tx where the fee account is equal to the +// source account of the inner transaction. Consider the case where the signers of the +// inner transaction could be different (which is the case when dealing with operations +// on different source accounts). +func TestFeeBumpAllowsFeeAccountToEqualInnerSourceAccount(t *testing.T) { + sourceAccount := NewSimpleAccount("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", 1) + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: sourceAccount.AccountID, + BaseFee: MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + + muxedAccount := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0, + Ed25519: xdr.Uint256{1, 2, 3}, + }, + } + tx.envelope.V1.Tx.SourceAccount = muxedAccount + + otherAccount := xdr.AccountId{ + Type: xdr.PublicKeyTypePublicKeyTypeEd25519, + Ed25519: &xdr.Uint256{1, 2, 3}, + } + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: otherAccount.Address(), + BaseFee: MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + + otherAccount = xdr.AccountId{ + Type: xdr.PublicKeyTypePublicKeyTypeEd25519, + Ed25519: &xdr.Uint256{1, 2, 3}, + } + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: otherAccount.Address(), + BaseFee: MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) +} + +func TestFeeBumpSignWithKeyString(t *testing.T) { + kp0, kp1 := newKeypair0(), newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + feeBumpTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: kp1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, kp1) + assert.NoError(t, err) + expectedBase64, err := feeBumpTx.Base64() + assert.NoError(t, err) + + feeBumpTx, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: kp1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + feeBumpTx, err = feeBumpTx.SignWithKeyString(network.TestNetworkPassphrase, kp1.Seed()) + assert.NoError(t, err) + base64, err := feeBumpTx.Base64() + assert.NoError(t, err) + + assert.Equal(t, expectedBase64, base64) +} + +func TestFeeBumpSignHashX(t *testing.T) { + // 256 bit preimage + preimage := "this is a preimage for hashx transactions on the stellar network" + preimageHash := sha256.Sum256([]byte(preimage)) + + kp0, kp1 := newKeypair0(), newKeypair1() + payment := Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + Asset: NativeAsset{}, + } + sourceAccount := NewSimpleAccount(kp0.Address(), int64(4353383146192899)) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + feeBumpTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: kp1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + feeBumpTx, err = feeBumpTx.SignHashX([]byte(preimage)) + assert.NoError(t, err) + + signatures := feeBumpTx.Signatures() + assert.Len(t, signatures, 1) + assert.Equal(t, xdr.Signature(preimage), signatures[0].Signature) + var expectedHint [4]byte + copy(expectedHint[:], preimageHash[28:]) + assert.Equal(t, xdr.SignatureHint(expectedHint), signatures[0].Hint) +} + +func TestFeeBumpAddSignatureBase64(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + kp2 := newKeypair2() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + + inner, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + inner, err = inner.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + tx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: kp1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: inner, + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp1, kp2) + assert.NoError(t, err) + expected, err := tx.Base64() + assert.NoError(t, err) + signatures := tx.Signatures() + + otherTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: kp1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: inner, + }, + ) + assert.NoError(t, err) + otherTx, err = otherTx.AddSignatureBase64( + network.TestNetworkPassphrase, + kp1.Address(), + base64.StdEncoding.EncodeToString(signatures[0].Signature), + ) + assert.NoError(t, err) + _, err = otherTx.AddSignatureBase64( + network.TestNetworkPassphrase, + kp2.Address(), + base64.StdEncoding.EncodeToString(signatures[1].Signature), + ) + assert.NoError(t, err) + b64, err := tx.Base64() + assert.NoError(t, err) + + assert.Equal(t, expected, b64) +} + +func TestFeeBumpMuxedAccounts(t *testing.T) { + kp0, kp1 := newKeypair0(), newKeypair1() + accountID0 := xdr.MustAddress(kp0.Address()) + mx0 := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *accountID0.Ed25519, + }, + } + sourceAccount := NewSimpleAccount(mx0.Address(), 1) + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + accountID1 := xdr.MustAddress(kp1.Address()) + mx1 := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xdeadbeef, + Ed25519: *accountID1.Ed25519, + }, + } + feeBumpTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: mx1.Address(), + BaseFee: 2 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + assert.Equal(t, mx0.Address(), feeBumpTx.InnerTransaction().sourceAccount.AccountID) + assert.Equal(t, mx1.Address(), feeBumpTx.FeeAccount()) +} diff --git a/txnbuild/helpers.go b/txnbuild/helpers.go new file mode 100644 index 0000000000..87f548c58d --- /dev/null +++ b/txnbuild/helpers.go @@ -0,0 +1,237 @@ +package txnbuild + +import ( + "fmt" + + "github.com/stellar/go/amount" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// validateStellarPublicKey returns an error if a public key is invalid. Otherwise, it returns nil. +// It is a wrapper around the IsValidEd25519PublicKey method of the strkey package. +func validateStellarPublicKey(publicKey string) error { + if publicKey == "" { + return errors.New("public key is undefined") + } + + if !strkey.IsValidEd25519PublicKey(publicKey) { + return errors.Errorf("%s is not a valid stellar public key", publicKey) + } + return nil +} + +// validateStellarSignerKey returns an error if a signerkey is invalid. Otherwise, it returns nil. +func validateStellarSignerKey(signerKey string) error { + if signerKey == "" { + return errors.New("signer key is undefined") + } + + var xdrKey xdr.SignerKey + if err := xdrKey.SetAddress(signerKey); err != nil { + return errors.Errorf("%s is not a valid stellar signer key", signerKey) + } + return nil +} + +// validateStellarAsset checks if the asset supplied is a valid stellar Asset. It returns an error if the asset is +// nil, has an invalid asset code or issuer. +func validateStellarAsset(asset BasicAsset) error { + if asset == nil { + return errors.New("asset is undefined") + } + + if asset.IsNative() { + return nil + } + + _, err := asset.GetType() + if err != nil { + return err + } + + err = validateStellarPublicKey(asset.GetIssuer()) + if err != nil { + return errors.Errorf("asset issuer: %s", err.Error()) + } + + return nil +} + +// validateAmount checks if the provided value is a valid stellar amount, it returns an error if not. +// This is used to validate price and amount fields in structs. +func validateAmount(n interface{}) error { + var stellarAmount int64 + // type switch can be extended to handle other types. Currently, the types for number values in the txnbuild + // package are string or int64. + switch value := n.(type) { + case int64: + stellarAmount = value + case string: + v, err := amount.ParseInt64(value) + if err != nil { + return err + } + stellarAmount = v + default: + return errors.Errorf("could not parse expected numeric value %v", n) + } + + if stellarAmount < 0 { + return errors.New("amount can not be negative") + } + return nil +} + +func validatePrice(p xdr.Price) error { + if p.N == 0 { + return errors.Errorf("price cannot be 0: %d/%d", p.N, p.D) + } + if p.D == 0 { + return errors.Errorf("price denominator cannot be 0: %d/%d", p.N, p.D) + } + if p.N < 0 || p.D < 0 { + return errors.Errorf("price cannot be negative: %d/%d", p.N, p.D) + } + return nil +} + +// validateAssetCode checks if the provided asset is valid as an asset code. +// It returns an error if the asset is invalid. +// The asset must be non native (XLM) with a valid asset code. +func validateAssetCode(asset BasicAsset) error { + // Note: we are not using validateStellarAsset() function for AllowTrust operations because it requires the + // following : + // - asset is non-native + // - asset code is valid + // - asset issuer is not required. This is actually ignored by the operation + if asset == nil { + return errors.New("asset is undefined") + } + + if asset.IsNative() { + return errors.New("native (XLM) asset type is not allowed") + } + + _, err := asset.GetType() + if err != nil { + return err + } + return nil +} + +// validateChangeTrustAsset checks if the provided asset is valid for use in ChangeTrust operation. +// It returns an error if the asset is invalid. +// The asset must be non native (XLM) with a valid asset code and issuer. +func validateChangeTrustAsset(asset ChangeTrustAsset) error { + // Note: we are not using validateStellarAsset() function for ChangeTrust operations because it requires the + // following : + // - asset is non-native + // - asset code is valid + // - asset issuer is valid + err := validateAssetCode(asset) + if err != nil { + return err + } + + assetType, err := asset.GetType() + if err != nil { + return err + } else if assetType == AssetTypePoolShare { + // No issuer for these to validate. + return nil + } + + err = validateStellarPublicKey(asset.GetIssuer()) + if err != nil { + return errors.Errorf("asset issuer: %s", err.Error()) + } + + return nil +} + +// validatePassiveOffer checks if the fields of a CreatePassiveOffer struct are valid. +// It checks that the buying and selling assets are valid stellar assets, and that amount and price are valid. +// It returns an error if any field is invalid. +func validatePassiveOffer(buying, selling Asset, offerAmount string, price xdr.Price) error { + // Note: see discussion on how this can be improved: + // https://github.com/stellar/go/pull/1707#discussion_r321508440 + err := validateStellarAsset(buying) + if err != nil { + return NewValidationError("Buying", err.Error()) + } + + err = validateStellarAsset(selling) + if err != nil { + return NewValidationError("Selling", err.Error()) + } + + err = validateAmount(offerAmount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + err = validatePrice(price) + if err != nil { + return NewValidationError("Price", err.Error()) + } + + return nil +} + +// validateOffer checks if the fields of ManageBuyOffer or ManageSellOffer struct are valid. +// It checks that the buying and selling assets are valid stellar assets, and that amount, price and offerID +// are valid. It returns an error if any field is invalid. +func validateOffer(buying, selling Asset, offerAmount string, price xdr.Price, offerID int64) error { + err := validatePassiveOffer(buying, selling, offerAmount, price) + if err != nil { + return err + } + + err = validateAmount(offerID) + if err != nil { + return NewValidationError("OfferID", err.Error()) + } + return nil +} + +// ValidationError is a custom error struct that holds validation errors of txnbuild's operation structs. +type ValidationError struct { + Field string // Field is the struct field on which the validation error occured. + Message string // Message is the validation error message. +} + +// Error for ValidationError struct implements the error interface. +func (opError *ValidationError) Error() string { + return fmt.Sprintf("Field: %s, Error: %s", opError.Field, opError.Message) +} + +// NewValidationError creates a ValidationError struct with the provided field and message values. +func NewValidationError(field, message string) *ValidationError { + return &ValidationError{ + Field: field, + Message: message, + } +} + +// ParseAssetString parses an asset string in canonical form (SEP-11) into an Asset structure. +// https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0011.md#asset +func ParseAssetString(canonical string) (Asset, error) { + assets, err := xdr.BuildAssets(canonical) + if err != nil { + return nil, errors.Wrap(err, "error parsing asset string") + } + + if len(assets) != 1 { + return nil, errors.New("error parsing out a single asset") + } + + // The above returned a list, so we'll need to grab the first element. + asset, err := assetFromXDR(assets[0]) + if err != nil { + return nil, errors.Wrap(err, "error parsing asset string via XDR types") + } + + return asset, nil +} diff --git a/txnbuild/helpers_test.go b/txnbuild/helpers_test.go new file mode 100644 index 0000000000..daff17b034 --- /dev/null +++ b/txnbuild/helpers_test.go @@ -0,0 +1,411 @@ +package txnbuild + +import ( + "fmt" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newKeypair0() *keypair.Full { + // Address: GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3 + return newKeypair("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") +} + +func newKeypair1() *keypair.Full { + // Address: GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP + return newKeypair("SBMSVD4KKELKGZXHBUQTIROWUAPQASDX7KEJITARP4VMZ6KLUHOGPTYW") +} + +func newKeypair2() *keypair.Full { + // Address: GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H + return newKeypair("SBZVMB74Z76QZ3ZOY7UTDFYKMEGKW5XFJEB6PFKBF4UYSSWHG4EDH7PY") +} + +func newKeypair(seed string) *keypair.Full { + myKeypair, _ := keypair.Parse(seed) + return myKeypair.(*keypair.Full) +} + +func check(err error) { + if err != nil { + panic(err) + } +} + +func checkChallengeTx(txeBase64, anchorName string) (bool, error) { + var txXDR xdr.TransactionEnvelope + err := xdr.SafeUnmarshalBase64(txeBase64, &txXDR) + if err != nil { + return false, err + } + op := txXDR.Operations()[0] + if (xdr.OperationTypeManageData == op.Body.Type) && (op.Body.ManageDataOp.DataName == xdr.String64(anchorName+" auth")) { + return true, nil + } + return false, errors.New("invalid challenge tx") +} + +func unmarshalBase64(txeB64 string) (xdr.TransactionEnvelope, error) { + var xdrEnv xdr.TransactionEnvelope + err := xdr.SafeUnmarshalBase64(txeB64, &xdrEnv) + return xdrEnv, err +} + +func newSignedTransaction( + params TransactionParams, + network string, + keypairs ...*keypair.Full, +) (string, error) { + tx, err := NewTransaction(params) + if err != nil { + return "", errors.Wrap(err, "couldn't create transaction") + } + + tx, err = tx.Sign(network, keypairs...) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return "", errors.Wrap(err, "couldn't encode transaction") + } + + return txeBase64, err +} + +func newSignedFeeBumpTransaction( + params FeeBumpTransactionParams, + network string, + keypairs ...*keypair.Full, +) (string, error) { + tx, err := NewFeeBumpTransaction(params) + if err != nil { + return "", errors.Wrap(err, "couldn't create transaction") + } + + tx, err = tx.Sign(network, keypairs...) + if err != nil { + return "", errors.Wrap(err, "couldn't sign transaction") + } + + txeBase64, err := tx.Base64() + if err != nil { + return "", errors.Wrap(err, "couldn't encode transaction") + } + + return txeBase64, err +} + +func convertToV0(tx *Transaction) { + signatures := tx.Signatures() + tx.envelope.V0 = &xdr.TransactionV0Envelope{ + Tx: xdr.TransactionV0{ + SourceAccountEd25519: *tx.envelope.SourceAccount().Ed25519, + Fee: xdr.Uint32(tx.envelope.Fee()), + SeqNum: xdr.SequenceNumber(tx.envelope.SeqNum()), + TimeBounds: tx.envelope.V1.Tx.TimeBounds, + Memo: tx.envelope.Memo(), + Operations: tx.envelope.Operations(), + }, + Signatures: signatures, + } + tx.envelope.V1 = nil + tx.envelope.Type = xdr.EnvelopeTypeEnvelopeTypeTxV0 +} + +func TestValidateStellarPublicKey(t *testing.T) { + validKey := "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7YFMTLB65PYM" + err := validateStellarPublicKey(validKey) + assert.NoError(t, err, "public key should be valid") + + invalidKey := "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7Y" + err = validateStellarPublicKey(invalidKey) + expectedErrMsg := "GDWZCOEQRODFCH6ISYQPWY67L3ULLWS5ISXYYL5GH43W7Y is not a valid stellar public key" + require.EqualError(t, err, expectedErrMsg, "public key should be invalid") + + invalidKey = "" + err = validateStellarPublicKey(invalidKey) + expectedErrMsg = "public key is undefined" + require.EqualError(t, err, expectedErrMsg, "public key should be invalid") + + invalidKey = "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSGWRRVO" + err = validateStellarPublicKey(invalidKey) + expectedErrMsg = "SBCVMMCBEDB64TVJZFYJOJAERZC4YVVUOE6SYR2Y76CBTENGUSGWRRVO is not a valid stellar public key" + require.EqualError(t, err, expectedErrMsg, "public key should be invalid") +} + +func TestValidateStellarAssetWithValidAsset(t *testing.T) { + nativeAsset := NativeAsset{} + err := validateStellarAsset(nativeAsset) + assert.NoError(t, err) + + kp0 := newKeypair0() + creditAsset := CreditAsset{"XYZ", kp0.Address()} + err = validateStellarAsset(creditAsset) + assert.NoError(t, err) +} + +func TestValidateStellarAssetWithInValidAsset(t *testing.T) { + err := validateStellarAsset(nil) + assert.Error(t, err) + expectedErrMsg := "asset is undefined" + require.EqualError(t, err, expectedErrMsg, "An asset is required") + + kp0 := newKeypair0() + creditAssetNoCode := CreditAsset{Code: "", Issuer: kp0.Address()} + err = validateStellarAsset(creditAssetNoCode) + assert.Error(t, err) + expectedErrMsg = "asset code length must be between 1 and 12 characters" + require.EqualError(t, err, expectedErrMsg, "An asset code is required") + + creditAssetNoIssuer := CreditAsset{Code: "ABC", Issuer: ""} + err = validateStellarAsset(creditAssetNoIssuer) + assert.Error(t, err) + expectedErrMsg = "asset issuer: public key is undefined" + require.EqualError(t, err, expectedErrMsg, "An asset issuer is required") +} + +func TestValidateAmount(t *testing.T) { + err := validateAmount(int64(10)) + assert.NoError(t, err) + + err = validateAmount("10") + assert.NoError(t, err) + + err = validateAmount(int64(0)) + assert.NoError(t, err) + + err = validateAmount("0") + assert.NoError(t, err) +} + +func TestValidateAmountInvalidValue(t *testing.T) { + err := validateAmount(int64(-10)) + assert.Error(t, err) + expectedErrMsg := "amount can not be negative" + require.EqualError(t, err, expectedErrMsg, "should be a valid stellar amount") + + err = validateAmount("-10") + assert.Error(t, err) + expectedErrMsg = "amount can not be negative" + require.EqualError(t, err, expectedErrMsg, "should be a valid stellar amount") + + err = validateAmount(10) + assert.Error(t, err) + expectedErrMsg = "could not parse expected numeric value 10" + require.EqualError(t, err, expectedErrMsg, "should be a valid stellar amount") + + err = validateAmount("abc") + assert.Error(t, err) + expectedErrMsg = "invalid amount format: abc" + require.EqualError(t, err, expectedErrMsg, "should be a valid stellar amount") +} + +func TestValidateAllowTrustAsset(t *testing.T) { + err := validateAssetCode(nil) + assert.Error(t, err) + expectedErrMsg := "asset is undefined" + require.EqualError(t, err, expectedErrMsg, "An asset is required") + + err = validateAssetCode(NativeAsset{}) + assert.Error(t, err) + expectedErrMsg = "native (XLM) asset type is not allowed" + require.EqualError(t, err, expectedErrMsg, "An asset is required") + + // allow trust asset does not require asset issuer + atAsset := CreditAsset{Code: "ABCD"} + err = validateAssetCode(atAsset) + assert.NoError(t, err) +} + +func TestValidateChangeTrustAsset(t *testing.T) { + err := validateChangeTrustAsset(nil) + assert.Error(t, err) + expectedErrMsg := "asset is undefined" + require.EqualError(t, err, expectedErrMsg, "An asset is required") + + err = validateChangeTrustAsset(NativeAsset{}.MustToChangeTrustAsset()) + assert.Error(t, err) + expectedErrMsg = "native (XLM) asset type is not allowed" + require.EqualError(t, err, expectedErrMsg, "A custom asset is required") + + kp0 := newKeypair0() + ctAsset0 := CreditAsset{Issuer: kp0.Address()} + err = validateChangeTrustAsset(ctAsset0.MustToChangeTrustAsset()) + assert.Error(t, err) + expectedErrMsg = "asset code length must be between 1 and 12 characters" + require.EqualError(t, err, expectedErrMsg, "asset code is required") + + ctAsset1 := CreditAsset{Code: "ABCD"} + err = validateChangeTrustAsset(ctAsset1.MustToChangeTrustAsset()) + assert.Error(t, err) + expectedErrMsg = "asset issuer: public key is undefined" + require.EqualError(t, err, expectedErrMsg, "asset issuer is required") + + ctAsset2 := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + err = validateChangeTrustAsset(ctAsset2.MustToChangeTrustAsset()) + assert.NoError(t, err) +} + +func TestValidatePassiveOfferZeroValues(t *testing.T) { + cpo := CreatePassiveSellOffer{} + err := validatePassiveOffer(cpo.Buying, cpo.Selling, cpo.Amount, cpo.Price) + assert.Error(t, err) + expectedErrMsg := "Field: Buying, Error: asset is undefined" + require.EqualError(t, err, expectedErrMsg, "Buying asset is required") +} + +func TestValidatePassiveOfferInvalidAmount(t *testing.T) { + kp0 := newKeypair0() + buying := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + selling := NativeAsset{} + cpo := CreatePassiveSellOffer{ + Buying: buying, + Selling: selling, + Price: xdr.Price{1, 1}, + Amount: "-1", + } + err := validatePassiveOffer(cpo.Buying, cpo.Selling, cpo.Amount, cpo.Price) + assert.Error(t, err) + expectedErrMsg := "Field: Amount, Error: amount can not be negative" + require.EqualError(t, err, expectedErrMsg, "valid amount is required") +} + +func TestValidatePassiveOfferInvalidPrice(t *testing.T) { + kp0 := newKeypair0() + buying := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + selling := NativeAsset{} + cpo := CreatePassiveSellOffer{ + Buying: buying, + Selling: selling, + Price: xdr.Price{-1, 1}, + Amount: "10", + } + err := validatePassiveOffer(cpo.Buying, cpo.Selling, cpo.Amount, cpo.Price) + assert.Error(t, err) + expectedErrMsg := "Field: Price, Error: price cannot be negative: -1/1" + require.EqualError(t, err, expectedErrMsg, "valid price is required") +} + +func TestValidatePassiveOfferInvalidAsset(t *testing.T) { + buying := NativeAsset{} + selling := CreditAsset{Code: "ABCD"} + cpo := CreatePassiveSellOffer{ + Buying: buying, + Selling: selling, + Price: xdr.Price{1, 1}, + Amount: "10", + } + err := validatePassiveOffer(cpo.Buying, cpo.Selling, cpo.Amount, cpo.Price) + assert.Error(t, err) + expectedErrMsg := "Field: Selling, Error: asset issuer: public key is undefined" + require.EqualError(t, err, expectedErrMsg, "Selling asset is required") + + kp0 := newKeypair0() + buying1 := CreditAsset{Issuer: kp0.Address()} + selling1 := NativeAsset{} + cpo1 := CreatePassiveSellOffer{ + Buying: buying1, + Selling: selling1, + Price: xdr.Price{1, 1}, + Amount: "10", + } + err = validatePassiveOffer(cpo1.Buying, cpo1.Selling, cpo1.Amount, cpo1.Price) + assert.Error(t, err) + expectedErrMsg = "Field: Buying, Error: asset code length must be between 1 and 12 characters" + require.EqualError(t, err, expectedErrMsg, "Selling asset is required") +} + +func TestValidateOfferManageBuyOffer(t *testing.T) { + kp0 := newKeypair0() + buying := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + selling := NativeAsset{} + mbo := ManageBuyOffer{ + Buying: buying, + Selling: selling, + Price: xdr.Price{1, 1}, + Amount: "10", + OfferID: -1, + } + err := validateOffer(mbo.Buying, mbo.Selling, mbo.Amount, mbo.Price, mbo.OfferID) + assert.Error(t, err) + expectedErrMsg := "Field: OfferID, Error: amount can not be negative" + require.EqualError(t, err, expectedErrMsg, "valid offerID is required") +} + +func TestValidateOfferManageSellOffer(t *testing.T) { + kp0 := newKeypair0() + buying := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + selling := NativeAsset{} + mso := ManageSellOffer{ + Buying: buying, + Selling: selling, + Price: xdr.Price{1, 1}, + Amount: "10", + OfferID: -1, + } + err := validateOffer(mso.Buying, mso.Selling, mso.Amount, mso.Price, mso.OfferID) + assert.Error(t, err) + expectedErrMsg := "Field: OfferID, Error: amount can not be negative" + require.EqualError(t, err, expectedErrMsg, "valid offerID is required") +} + +func TestAssetStringParsing(t *testing.T) { + kp0 := newKeypair0() + cred4 := CreditAsset{Code: "ABCD", Issuer: kp0.Address()} + xdr, err := cred4.ToXDR() + assert.NoError(t, err) + cred4String := xdr.StringCanonical() + + kp1 := newKeypair1() + cred12 := CreditAsset{Code: "ABCD1234EFGH", Issuer: kp1.Address()} + xdr, err = cred12.ToXDR() + assert.NoError(t, err) + cred12String := xdr.StringCanonical() + + native := NativeAsset{} + xdr, err = native.ToXDR() + assert.NoError(t, err) + nativeString := xdr.StringCanonical() + + assets := make([]Asset, 3) + for i, input := range []string{nativeString, cred4String, cred12String} { + actual, innerErr := ParseAssetString(input) + assert.NoError(t, innerErr) + assets[i] = actual + } + + compareAssets := func(expected Asset, actual Asset) bool { + expXdr, innerErr := expected.ToXDR() + if innerErr != nil { + return false + } + + actXdr, innerErr := actual.ToXDR() + if innerErr != nil { + return false + } + + return expXdr.Equals(actXdr) + } + + assert.True(t, compareAssets(native, assets[0])) + assert.True(t, compareAssets(cred4, assets[1])) + assert.True(t, compareAssets(cred12, assets[2])) + + // Now sanity-check some basic error cases + + result, err := ParseAssetString("erroneous:maximus") + assert.Error(t, err) + assert.Equal(t, nil, result) + + result, err = ParseAssetString(fmt.Sprintf("ABCD:%s,EFGH:%s", kp0.Address(), kp1.Address())) + assert.Error(t, err) + assert.Equal(t, nil, result) +} diff --git a/txnbuild/inflation.go b/txnbuild/inflation.go new file mode 100644 index 0000000000..e775891396 --- /dev/null +++ b/txnbuild/inflation.go @@ -0,0 +1,46 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// Inflation represents the Stellar inflation operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type Inflation struct { + SourceAccount string +} + +// BuildXDR for Inflation returns a fully configured XDR Operation. +func (inf *Inflation) BuildXDR() (xdr.Operation, error) { + opType := xdr.OperationTypeInflation + body, err := xdr.NewOperationBody(opType, nil) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, inf.SourceAccount) + return op, nil +} + +// FromXDR for Inflation initialises the txnbuild struct from the corresponding xdr Operation. +func (inf *Inflation) FromXDR(xdrOp xdr.Operation) error { + if xdrOp.Body.Type != xdr.OperationTypeInflation { + return errors.New("error parsing inflation operation from xdr") + } + inf.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + return nil +} + +// Validate for Inflation is just a method that implements the Operation interface. No logic is actually performed +// because the inflation operation does not have any required field. Nil is always returned. +func (inf *Inflation) Validate() error { + // no required fields, return nil. + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (inf *Inflation) GetSourceAccount() string { + return inf.SourceAccount +} diff --git a/txnbuild/inflation_test.go b/txnbuild/inflation_test.go new file mode 100644 index 0000000000..038dc10d08 --- /dev/null +++ b/txnbuild/inflation_test.go @@ -0,0 +1,16 @@ +package txnbuild + +import "testing" + +func TestInflationRoundtrip(t *testing.T) { + inflation := Inflation{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + testOperationsMarshallingRoundtrip(t, []Operation{&inflation}, false) + + // with muxed accounts + inflation = Inflation{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + } + testOperationsMarshallingRoundtrip(t, []Operation{&inflation}, true) +} diff --git a/txnbuild/liquidity_pool_deposit.go b/txnbuild/liquidity_pool_deposit.go new file mode 100644 index 0000000000..479f7d1a33 --- /dev/null +++ b/txnbuild/liquidity_pool_deposit.go @@ -0,0 +1,141 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LiquidityPoolDeposit represents the Stellar liquidity pool deposit operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type LiquidityPoolDeposit struct { + SourceAccount string + LiquidityPoolID LiquidityPoolId + MaxAmountA string + MaxAmountB string + MinPrice xdr.Price + MaxPrice xdr.Price +} + +// NewLiquidityPoolDeposit creates a new LiquidityPoolDeposit operation, +// checking the ordering assets so we generate the correct pool id. minPrice, +// and maxPrice are in terms of a/b. Each AssetAmount is a pair of the asset +// with the maximum amount of that asset to deposit. +func NewLiquidityPoolDeposit( + sourceAccount string, + a, b AssetAmount, + minPrice, + maxPrice xdr.Price, +) (LiquidityPoolDeposit, error) { + if b.Asset.LessThan(a.Asset) { + return LiquidityPoolDeposit{}, errors.New("AssetA must be <= AssetB") + } + + poolId, err := NewLiquidityPoolId(a.Asset, b.Asset) + if err != nil { + return LiquidityPoolDeposit{}, err + } + + return LiquidityPoolDeposit{ + SourceAccount: sourceAccount, + LiquidityPoolID: poolId, + MaxAmountA: a.Amount, + MaxAmountB: b.Amount, + MinPrice: minPrice, + MaxPrice: maxPrice, + }, nil +} + +// BuildXDR for LiquidityPoolDeposit returns a fully configured XDR Operation. +func (lpd *LiquidityPoolDeposit) BuildXDR() (xdr.Operation, error) { + xdrLiquidityPoolId, err := lpd.LiquidityPoolID.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "couldn't build liquidity pool ID XDR") + } + + xdrMaxAmountA, err := amount.Parse(lpd.MaxAmountA) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'MaxAmountA'") + } + + xdrMaxAmountB, err := amount.Parse(lpd.MaxAmountB) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'MaxAmountB'") + } + + xdrOp := xdr.LiquidityPoolDepositOp{ + LiquidityPoolId: xdrLiquidityPoolId, + MaxAmountA: xdrMaxAmountA, + MaxAmountB: xdrMaxAmountB, + MinPrice: lpd.MinPrice, + MaxPrice: lpd.MaxPrice, + } + + opType := xdr.OperationTypeLiquidityPoolDeposit + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, lpd.SourceAccount) + return op, nil +} + +// FromXDR for LiquidityPoolDeposit initializes the txnbuild struct from the corresponding xdr Operation. +func (lpd *LiquidityPoolDeposit) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetLiquidityPoolDepositOp() + if !ok { + return errors.New("error parsing liquidity_pool_deposit operation from xdr") + } + + liquidityPoolID, err := liquidityPoolIdFromXDR(result.LiquidityPoolId) + if err != nil { + return errors.New("error parsing LiquidityPoolId in liquidity_pool_deposit operation from xdr") + } + lpd.LiquidityPoolID = liquidityPoolID + + lpd.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + lpd.MaxAmountA = amount.String(result.MaxAmountA) + lpd.MaxAmountB = amount.String(result.MaxAmountB) + if result.MinPrice != (xdr.Price{}) { + lpd.MinPrice = result.MinPrice + } + if result.MaxPrice != (xdr.Price{}) { + lpd.MaxPrice = result.MaxPrice + } + + return nil +} + +// Validate for LiquidityPoolDeposit validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (lpd *LiquidityPoolDeposit) Validate() error { + err := validateAmount(lpd.MaxAmountA) + if err != nil { + return NewValidationError("MaxAmountA", err.Error()) + } + + err = validateAmount(lpd.MaxAmountB) + if err != nil { + return NewValidationError("MaxAmountB", err.Error()) + } + + err = validatePrice(lpd.MinPrice) + if err != nil { + return NewValidationError("MinPrice", err.Error()) + } + + err = validatePrice(lpd.MaxPrice) + if err != nil { + return NewValidationError("MaxPrice", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (lpd *LiquidityPoolDeposit) GetSourceAccount() string { + return lpd.SourceAccount +} diff --git a/txnbuild/liquidity_pool_deposit_test.go b/txnbuild/liquidity_pool_deposit_test.go new file mode 100644 index 0000000000..15f53ad67a --- /dev/null +++ b/txnbuild/liquidity_pool_deposit_test.go @@ -0,0 +1,83 @@ +package txnbuild + +import ( + "github.com/stellar/go/price" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewLiquidityPoolDeposit(t *testing.T) { + assetA := NativeAsset{} + assetB := CreditAsset{ + Code: "EUR", + Issuer: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + + poolId, err := NewLiquidityPoolId(assetA, assetB) + require.NoError(t, err) + + t.Run("basic", func(t *testing.T) { + lpd, err := NewLiquidityPoolDeposit( + "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + AssetAmount{assetA, "0.1000000"}, + AssetAmount{assetB, "0.2000000"}, + price.MustParse("0.3"), + price.MustParse("0.4"), + ) + require.NoError(t, err) + assert.Equal(t, LiquidityPoolDeposit{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + LiquidityPoolID: poolId, + MaxAmountA: "0.1000000", + MaxAmountB: "0.2000000", + MinPrice: price.MustParse("0.3"), + MaxPrice: price.MustParse("0.4"), + }, lpd) + }) + + t.Run("reversed assets", func(t *testing.T) { + _, err := NewLiquidityPoolDeposit( + "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + AssetAmount{assetB, "0.1000000"}, + AssetAmount{assetA, "0.2000000"}, + price.MustParse("0.3"), + price.MustParse("0.4"), + ) + require.EqualError(t, err, "AssetA must be <= AssetB") + }) +} + +func TestLiquidityPoolDepositRoundTrip(t *testing.T) { + assetA := NativeAsset{} + assetB := CreditAsset{ + Code: "EUR", + Issuer: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + poolId, err := NewLiquidityPoolId(assetA, assetB) + require.NoError(t, err) + + lpd := &LiquidityPoolDeposit{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + LiquidityPoolID: poolId, + MaxAmountA: "0.1000000", + MaxAmountB: "0.2000000", + MinPrice: price.MustParse("0.3"), + MaxPrice: price.MustParse("0.4"), + } + + testOperationsMarshallingRoundtrip(t, []Operation{lpd}, false) + + // with muxed accounts + lpd = &LiquidityPoolDeposit{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + LiquidityPoolID: poolId, + MaxAmountA: "0.1000000", + MaxAmountB: "0.2000000", + MinPrice: price.MustParse("0.3"), + MaxPrice: price.MustParse("0.4"), + } + + testOperationsMarshallingRoundtrip(t, []Operation{lpd}, true) +} diff --git a/txnbuild/liquidity_pool_id.go b/txnbuild/liquidity_pool_id.go new file mode 100644 index 0000000000..ec32939020 --- /dev/null +++ b/txnbuild/liquidity_pool_id.go @@ -0,0 +1,42 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package txnbuild + +import ( + "fmt" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LiquidityPoolId represents the Stellar liquidity pool id. +type LiquidityPoolId [32]byte + +func NewLiquidityPoolId(a, b Asset) (LiquidityPoolId, error) { + if b.LessThan(a) { + return LiquidityPoolId{}, fmt.Errorf("AssetA must be <= AssetB") + } + + xdrAssetA, err := a.ToXDR() + if err != nil { + return LiquidityPoolId{}, errors.Wrap(err, "failed to build XDR AssetA ID") + } + + xdrAssetB, err := b.ToXDR() + if err != nil { + return LiquidityPoolId{}, errors.Wrap(err, "failed to build XDR AssetB ID") + } + + id, err := xdr.NewPoolId(xdrAssetA, xdrAssetB, xdr.LiquidityPoolFeeV18) + if err != nil { + return LiquidityPoolId{}, errors.Wrap(err, "failed to build XDR liquidity pool id") + } + return LiquidityPoolId(id), nil +} + +func (lpi LiquidityPoolId) ToXDR() (xdr.PoolId, error) { + return xdr.PoolId(lpi), nil +} + +func liquidityPoolIdFromXDR(poolId xdr.PoolId) (LiquidityPoolId, error) { + return LiquidityPoolId(poolId), nil +} diff --git a/txnbuild/liquidity_pool_id_test.go b/txnbuild/liquidity_pool_id_test.go new file mode 100644 index 0000000000..377fd2104b --- /dev/null +++ b/txnbuild/liquidity_pool_id_test.go @@ -0,0 +1,27 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewLiquidityPoolId(t *testing.T) { + a := NativeAsset{} + b := CreditAsset{Code: "ABC", Issuer: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"} + + id, err := NewLiquidityPoolId(a, b) + if assert.NoError(t, err) { + assert.Equal(t, LiquidityPoolId([32]byte{ + 0xcc, 0x22, 0x41, 0x49, 0x97, 0xd7, 0xe3, 0xd9, + 0xa9, 0xac, 0x3b, 0x1d, 0x65, 0xca, 0x9c, 0xc3, + 0xe5, 0xf3, 0x5c, 0xe3, 0x3e, 0x0b, 0xd6, 0xa8, + 0x85, 0x64, 0x8b, 0x11, 0xaa, 0xa3, 0xb7, 0x2d, + }), id, "pool id should match") + } + + // Wrong asset id order should fail. If users mess this up, and we were to + // silently fix it they could set the wrong MaxAmounts when depositing. + _, err = NewLiquidityPoolId(b, a) + assert.EqualError(t, err, "AssetA must be <= AssetB") +} diff --git a/txnbuild/liquidity_pool_parameters.go b/txnbuild/liquidity_pool_parameters.go new file mode 100644 index 0000000000..624ec0a2b7 --- /dev/null +++ b/txnbuild/liquidity_pool_parameters.go @@ -0,0 +1,58 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package txnbuild + +import ( + "fmt" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +const LiquidityPoolFeeV18 = xdr.LiquidityPoolFeeV18 + +// LiquidityPoolParameters represents the Stellar liquidity pool parameters +type LiquidityPoolParameters struct { + AssetA Asset + AssetB Asset + Fee int32 +} + +func (lpi LiquidityPoolParameters) ToXDR() (xdr.LiquidityPoolParameters, error) { + xdrAssetA, err := lpi.AssetA.ToXDR() + if err != nil { + return xdr.LiquidityPoolParameters{}, errors.Wrap(err, "failed to build XDR AssetA ID") + } + + xdrAssetB, err := lpi.AssetB.ToXDR() + if err != nil { + return xdr.LiquidityPoolParameters{}, errors.Wrap(err, "failed to build XDR AssetB ID") + } + + return xdr.LiquidityPoolParameters{ + Type: xdr.LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &xdr.LiquidityPoolConstantProductParameters{ + AssetA: xdrAssetA, + AssetB: xdrAssetB, + Fee: xdr.Int32(lpi.Fee), + }, + }, nil +} + +func liquidityPoolParametersFromXDR(params xdr.LiquidityPoolParameters) (LiquidityPoolParameters, error) { + if params.Type != xdr.LiquidityPoolTypeLiquidityPoolConstantProduct { + return LiquidityPoolParameters{}, fmt.Errorf("failed to parse XDR type") + } + assetA, err := assetFromXDR(params.ConstantProduct.AssetA) + if err != nil { + return LiquidityPoolParameters{}, errors.Wrap(err, "failed to parse XDR AssetA") + } + assetB, err := assetFromXDR(params.ConstantProduct.AssetB) + if err != nil { + return LiquidityPoolParameters{}, errors.Wrap(err, "failed to parse XDR AssetB") + } + return LiquidityPoolParameters{ + AssetA: assetA, + AssetB: assetB, + Fee: int32(params.ConstantProduct.Fee), + }, nil +} diff --git a/txnbuild/liquidity_pool_withdraw.go b/txnbuild/liquidity_pool_withdraw.go new file mode 100644 index 0000000000..d543263b45 --- /dev/null +++ b/txnbuild/liquidity_pool_withdraw.go @@ -0,0 +1,133 @@ +//lint:file-ignore U1001 Ignore all unused code, staticcheck doesn't understand testify/suite +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// LiquidityPoolWithdraw represents the Stellar liquidity pool withdraw operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type LiquidityPoolWithdraw struct { + SourceAccount string + LiquidityPoolID LiquidityPoolId + Amount string + MinAmountA string + MinAmountB string +} + +// NewLiquidityPoolWithdraw creates a new LiquidityPoolWithdraw operation, +// checking the ordering assets so we generate the correct pool id. Each +// AssetAmount is a pair of the asset with the minimum amount of that asset to +// withdraw. +func NewLiquidityPoolWithdraw( + sourceAccount string, + a, b AssetAmount, + amount string, +) (LiquidityPoolWithdraw, error) { + if b.Asset.LessThan(a.Asset) { + return LiquidityPoolWithdraw{}, errors.New("AssetA must be <= AssetB") + } + + poolId, err := NewLiquidityPoolId(a.Asset, b.Asset) + if err != nil { + return LiquidityPoolWithdraw{}, err + } + + return LiquidityPoolWithdraw{ + SourceAccount: sourceAccount, + LiquidityPoolID: poolId, + Amount: amount, + MinAmountA: a.Amount, + MinAmountB: b.Amount, + }, nil +} + +// BuildXDR for LiquidityPoolWithdraw returns a fully configured XDR Operation. +func (lpd *LiquidityPoolWithdraw) BuildXDR() (xdr.Operation, error) { + xdrLiquidityPoolId, err := lpd.LiquidityPoolID.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "couldn't build liquidity pool ID XDR") + } + + xdrAmount, err := amount.Parse(lpd.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'Amount'") + } + + xdrMinAmountA, err := amount.Parse(lpd.MinAmountA) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'MinAmountA'") + } + + xdrMinAmountB, err := amount.Parse(lpd.MinAmountB) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'MinAmountB'") + } + + xdrOp := xdr.LiquidityPoolWithdrawOp{ + LiquidityPoolId: xdrLiquidityPoolId, + Amount: xdrAmount, + MinAmountA: xdrMinAmountA, + MinAmountB: xdrMinAmountB, + } + + opType := xdr.OperationTypeLiquidityPoolWithdraw + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, lpd.SourceAccount) + return op, nil +} + +// FromXDR for LiquidityPoolWithdraw initializes the txnbuild struct from the corresponding xdr Operation. +func (lpd *LiquidityPoolWithdraw) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetLiquidityPoolWithdrawOp() + if !ok { + return errors.New("error parsing liquidity_pool_withdraw operation from xdr") + } + + liquidityPoolID, err := liquidityPoolIdFromXDR(result.LiquidityPoolId) + if err != nil { + return errors.New("error parsing LiquidityPoolId in liquidity_pool_withdraw operation from xdr") + } + lpd.LiquidityPoolID = liquidityPoolID + + lpd.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + lpd.Amount = amount.String(result.Amount) + lpd.MinAmountA = amount.String(result.MinAmountA) + lpd.MinAmountB = amount.String(result.MinAmountB) + + return nil +} + +// Validate for LiquidityPoolWithdraw validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (lpd *LiquidityPoolWithdraw) Validate() error { + err := validateAmount(lpd.Amount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + err = validateAmount(lpd.MinAmountA) + if err != nil { + return NewValidationError("MinAmountA", err.Error()) + } + + err = validateAmount(lpd.MinAmountB) + if err != nil { + return NewValidationError("MinAmountB", err.Error()) + } + + return nil + +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (lpd *LiquidityPoolWithdraw) GetSourceAccount() string { + return lpd.SourceAccount +} diff --git a/txnbuild/liquidity_pool_withdraw_test.go b/txnbuild/liquidity_pool_withdraw_test.go new file mode 100644 index 0000000000..e503f36853 --- /dev/null +++ b/txnbuild/liquidity_pool_withdraw_test.go @@ -0,0 +1,77 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewLiquidityPoolWithdraw(t *testing.T) { + assetA := NativeAsset{} + assetB := CreditAsset{ + Code: "EUR", + Issuer: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + + poolId, err := NewLiquidityPoolId(assetA, assetB) + require.NoError(t, err) + + t.Run("basic", func(t *testing.T) { + lpd, err := NewLiquidityPoolWithdraw( + "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + AssetAmount{assetA, "0.1000000"}, + AssetAmount{assetB, "0.2000000"}, + "52.5", + ) + require.NoError(t, err) + assert.Equal(t, LiquidityPoolWithdraw{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + LiquidityPoolID: poolId, + Amount: "52.5", + MinAmountA: "0.1000000", + MinAmountB: "0.2000000", + }, lpd) + }) + + t.Run("reversed assets", func(t *testing.T) { + _, err := NewLiquidityPoolWithdraw( + "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + AssetAmount{assetB, "0.1000000"}, + AssetAmount{assetA, "0.2000000"}, + "52.5", + ) + require.EqualError(t, err, "AssetA must be <= AssetB") + }) +} + +func TestLiquidityPoolWithdrawRoundTrip(t *testing.T) { + assetA := NativeAsset{} + assetB := CreditAsset{ + Code: "EUR", + Issuer: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + } + poolId, err := NewLiquidityPoolId(assetA, assetB) + require.NoError(t, err) + + lpd := &LiquidityPoolWithdraw{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + LiquidityPoolID: poolId, + Amount: "0.1000000", + MinAmountA: "0.1000000", + MinAmountB: "0.2000000", + } + + testOperationsMarshallingRoundtrip(t, []Operation{lpd}, false) + + // with muxed accounts + lpd = &LiquidityPoolWithdraw{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + LiquidityPoolID: poolId, + Amount: "0.1000000", + MinAmountA: "0.1000000", + MinAmountB: "0.2000000", + } + + testOperationsMarshallingRoundtrip(t, []Operation{lpd}, true) +} diff --git a/txnbuild/manage_buy_offer.go b/txnbuild/manage_buy_offer.go new file mode 100644 index 0000000000..18f2e0d792 --- /dev/null +++ b/txnbuild/manage_buy_offer.go @@ -0,0 +1,90 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ManageBuyOffer represents the Stellar manage buy offer operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type ManageBuyOffer struct { + Selling Asset + Buying Asset + Amount string + Price xdr.Price + OfferID int64 + SourceAccount string +} + +// BuildXDR for ManageBuyOffer returns a fully configured XDR Operation. +func (mo *ManageBuyOffer) BuildXDR() (xdr.Operation, error) { + xdrSelling, err := mo.Selling.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Selling' field") + } + + xdrBuying, err := mo.Buying.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Buying' field") + } + + xdrAmount, err := amount.Parse(mo.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'Amount'") + } + + opType := xdr.OperationTypeManageBuyOffer + xdrOp := xdr.ManageBuyOfferOp{ + Selling: xdrSelling, + Buying: xdrBuying, + BuyAmount: xdrAmount, + Price: mo.Price, + OfferId: xdr.Int64(mo.OfferID), + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, mo.SourceAccount) + return op, nil +} + +// FromXDR for ManageBuyOffer initialises the txnbuild struct from the corresponding xdr Operation. +func (mo *ManageBuyOffer) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetManageBuyOfferOp() + if !ok { + return errors.New("error parsing manage_buy_offer operation from xdr") + } + + mo.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + mo.OfferID = int64(result.OfferId) + mo.Amount = amount.String(result.BuyAmount) + mo.Price = result.Price + buyingAsset, err := assetFromXDR(result.Buying) + if err != nil { + return errors.Wrap(err, "error parsing buying_asset in manage_buy_offer operation") + } + mo.Buying = buyingAsset + + sellingAsset, err := assetFromXDR(result.Selling) + if err != nil { + return errors.Wrap(err, "error parsing selling_asset in manage_buy_offer operation") + } + mo.Selling = sellingAsset + return nil +} + +// Validate for ManageBuyOffer validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (mo *ManageBuyOffer) Validate() error { + return validateOffer(mo.Buying, mo.Selling, mo.Amount, mo.Price, mo.OfferID) +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (mo *ManageBuyOffer) GetSourceAccount() string { + return mo.SourceAccount +} diff --git a/txnbuild/manage_buy_offer_test.go b/txnbuild/manage_buy_offer_test.go new file mode 100644 index 0000000000..4bd49f5a86 --- /dev/null +++ b/txnbuild/manage_buy_offer_test.go @@ -0,0 +1,193 @@ +package txnbuild + +import ( + "github.com/stellar/go/price" + "github.com/stellar/go/xdr" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestManageBuyOfferValidateSellingAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: CreditAsset{"", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "100", + Price: price.MustParse("0.01"), + OfferID: 0, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageBuyOffer operation: Field: Selling, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageBuyOfferValidateBuyingAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: CreditAsset{"ABC", kp0.Address()}, + Buying: CreditAsset{"XYZ", ""}, + Amount: "100", + Price: price.MustParse("0.01"), + OfferID: 0, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageBuyOffer operation: Field: Buying, Error: asset issuer: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageBuyOfferValidateAmount(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "", + Price: price.MustParse("0.01"), + OfferID: 0, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageBuyOffer operation: Field: Amount, Error: invalid amount format:" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageBuyOfferValidatePrice(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "0", + Price: xdr.Price{-1, 100}, + OfferID: 0, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageBuyOffer operation: Field: Price, Error: price cannot be negative: -1/100" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageBuyOfferValidateOfferID(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "0", + Price: price.MustParse("0.01"), + OfferID: -1, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageBuyOffer operation: Field: OfferID, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageBuyOfferPrice(t *testing.T) { + kp0 := newKeypair0() + + mbo := ManageBuyOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "1", + Price: price.MustParse("0.000000001"), + OfferID: 1, + } + + xdrOp, err := mbo.BuildXDR() + assert.NoError(t, err) + expectedPrice := xdr.Price{N: 1, D: 1000000000} + assert.Equal(t, expectedPrice, xdrOp.Body.ManageBuyOfferOp.Price) + + parsed := ManageBuyOffer{} + assert.NoError(t, parsed.FromXDR(xdrOp)) + assert.Equal(t, mbo.Price, parsed.Price) +} + +func TestManageBuyOfferRoundtrip(t *testing.T) { + manageBuyOffer := ManageBuyOffer{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Selling: CreditAsset{"USD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + Buying: NativeAsset{}, + Amount: "100.0000000", + Price: price.MustParse("0.01"), + OfferID: 0, + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageBuyOffer}, false) + + // with muxed accounts + manageBuyOffer = ManageBuyOffer{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Selling: CreditAsset{"USD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + Buying: NativeAsset{}, + Amount: "100.0000000", + Price: price.MustParse("0.01"), + OfferID: 0, + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageBuyOffer}, true) +} diff --git a/txnbuild/manage_data.go b/txnbuild/manage_data.go new file mode 100644 index 0000000000..86bcc03fb7 --- /dev/null +++ b/txnbuild/manage_data.go @@ -0,0 +1,72 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// ManageData represents the Stellar manage data operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type ManageData struct { + Name string + Value []byte + SourceAccount string +} + +// BuildXDR for ManageData returns a fully configured XDR Operation. +func (md *ManageData) BuildXDR() (xdr.Operation, error) { + xdrOp := xdr.ManageDataOp{DataName: xdr.String64(md.Name)} + + // No data value clears the named data entry on the account + if md.Value == nil { + xdrOp.DataValue = nil + } else { + xdrDV := xdr.DataValue(md.Value) + xdrOp.DataValue = &xdrDV + } + + opType := xdr.OperationTypeManageData + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, md.SourceAccount) + return op, nil +} + +// FromXDR for ManageData initialises the txnbuild struct from the corresponding xdr Operation. +func (md *ManageData) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetManageDataOp() + if !ok { + return errors.New("error parsing create_account operation from xdr") + } + + md.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + md.Name = string(result.DataName) + if result.DataValue != nil { + md.Value = *result.DataValue + } else { + md.Value = nil + } + return nil +} + +// Validate for ManageData validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (md *ManageData) Validate() error { + if len(md.Name) > 64 { + return NewValidationError("Name", "maximum length is 64 characters") + } + + if len(md.Value) > 64 { + return NewValidationError("Value", "maximum length is 64 bytes") + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (md *ManageData) GetSourceAccount() string { + return md.SourceAccount +} diff --git a/txnbuild/manage_data_test.go b/txnbuild/manage_data_test.go new file mode 100644 index 0000000000..9c8074371a --- /dev/null +++ b/txnbuild/manage_data_test.go @@ -0,0 +1,145 @@ +package txnbuild + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestManageDataValidateName(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(3556091187167235)) + + manageData := ManageData{ + Name: "This is a very long name for a field that only accepts 64 characters", + Value: []byte(""), + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageData operation: Field: Name, Error: maximum length is 64 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageDataValidateValue(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(3556091187167235)) + + manageData := ManageData{ + Name: "cars", + Value: []byte("toyota, ford, porsche, lamborghini, hyundai, volkswagen, gmc, kia"), + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageData operation: Field: Value, Error: maximum length is 64 bytes" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageDataRoundTrip(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(3556091187167235)) + + for _, testCase := range []struct { + name string + value []byte + }{ + { + "nil data", + nil, + }, + { + "empty data slice", + []byte{}, + }, + { + "non-empty data slice", + []byte{1, 2, 3}, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + manageData := ManageData{ + Name: "key", + Value: testCase.value, + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + envelope := tx.ToXDR() + assert.NoError(t, err) + assert.Len(t, envelope.Operations(), 1) + assert.Equal(t, xdr.String64(manageData.Name), envelope.Operations()[0].Body.ManageDataOp.DataName) + if testCase.value == nil { + assert.Nil(t, envelope.Operations()[0].Body.ManageDataOp.DataValue) + } else { + assert.Len(t, []byte(*envelope.Operations()[0].Body.ManageDataOp.DataValue), len(testCase.value)) + if len(testCase.value) > 0 { + assert.Equal(t, testCase.value, []byte(*envelope.Operations()[0].Body.ManageDataOp.DataValue)) + } + } + + txe, err := tx.Base64() + if err != nil { + assert.NoError(t, err) + } + + parsed, err := TransactionFromXDR(txe) + assert.NoError(t, err) + + tx, _ = parsed.Transaction() + + assert.Len(t, tx.Operations(), 1) + op := tx.Operations()[0].(*ManageData) + assert.Equal(t, manageData.Name, op.Name) + assert.Len(t, op.Value, len(manageData.Value)) + if len(manageData.Value) > 0 { + assert.Equal(t, manageData.Value, op.Value) + } + }) + } +} + +func TestManageDataRoundtrip(t *testing.T) { + manageData := ManageData{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Name: "foo", + Value: []byte("bar"), + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageData}, false) + + // with muxed accounts + manageData = ManageData{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Name: "foo", + Value: []byte("bar"), + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageData}, true) +} diff --git a/txnbuild/manage_offer.go b/txnbuild/manage_offer.go new file mode 100644 index 0000000000..810eba3d67 --- /dev/null +++ b/txnbuild/manage_offer.go @@ -0,0 +1,157 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// CreateOfferOp returns a ManageSellOffer operation to create a new offer, by +// setting the OfferID to "0". The sourceAccount is optional, and if not provided, +// will be that of the surrounding transaction. +func CreateOfferOp(selling, buying Asset, amount string, price xdr.Price, sourceAccount ...string) (ManageSellOffer, error) { + if len(sourceAccount) > 1 { + return ManageSellOffer{}, errors.New("offer can't have multiple source accounts") + } + offer := ManageSellOffer{ + Selling: selling, + Buying: buying, + Amount: amount, + Price: price, + OfferID: 0, + } + if len(sourceAccount) == 1 { + offer.SourceAccount = sourceAccount[0] + } + return offer, nil +} + +// UpdateOfferOp returns a ManageSellOffer operation to update an offer. +// The sourceAccount is optional, and if not provided, will be that of +// the surrounding transaction. +func UpdateOfferOp(selling, buying Asset, amount string, price xdr.Price, offerID int64, sourceAccount ...string) (ManageSellOffer, error) { + if len(sourceAccount) > 1 { + return ManageSellOffer{}, errors.New("offer can't have multiple source accounts") + } + offer := ManageSellOffer{ + Selling: selling, + Buying: buying, + Amount: amount, + Price: price, + OfferID: offerID, + } + if len(sourceAccount) == 1 { + offer.SourceAccount = sourceAccount[0] + } + return offer, nil +} + +// DeleteOfferOp returns a ManageSellOffer operation to delete an offer, by +// setting the Amount to "0". The sourceAccount is optional, and if not provided, +// will be that of the surrounding transaction. +func DeleteOfferOp(offerID int64, sourceAccount ...string) (ManageSellOffer, error) { + // It turns out Stellar core doesn't care about any of these fields except the amount. + // However, Horizon will reject ManageSellOffer if it is missing fields. + // Horizon will also reject if Buying == Selling. + // Therefore unfortunately we have to make up some dummy values here. + if len(sourceAccount) > 1 { + return ManageSellOffer{}, errors.New("offer can't have multiple source accounts") + } + offer := ManageSellOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{Code: "FAKE", Issuer: "GBAQPADEYSKYMYXTMASBUIS5JI3LMOAWSTM2CHGDBJ3QDDPNCSO3DVAA"}, + Amount: "0", + Price: xdr.Price{ + N: 1, + D: 1, + }, + OfferID: offerID, + } + if len(sourceAccount) == 1 { + offer.SourceAccount = sourceAccount[0] + } + return offer, nil +} + +// ManageSellOffer represents the Stellar manage offer operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type ManageSellOffer struct { + Selling Asset + Buying Asset + Amount string + Price xdr.Price + OfferID int64 + SourceAccount string +} + +// BuildXDR for ManageSellOffer returns a fully configured XDR Operation. +func (mo *ManageSellOffer) BuildXDR() (xdr.Operation, error) { + xdrSelling, err := mo.Selling.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Selling' field") + } + + xdrBuying, err := mo.Buying.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set XDR 'Buying' field") + } + + xdrAmount, err := amount.Parse(mo.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse 'Amount'") + } + + opType := xdr.OperationTypeManageSellOffer + xdrOp := xdr.ManageSellOfferOp{ + Selling: xdrSelling, + Buying: xdrBuying, + Amount: xdrAmount, + Price: mo.Price, + OfferId: xdr.Int64(mo.OfferID), + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, mo.SourceAccount) + return op, nil +} + +// FromXDR for ManageSellOffer initialises the txnbuild struct from the corresponding xdr Operation. +func (mo *ManageSellOffer) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetManageSellOfferOp() + if !ok { + return errors.New("error parsing manage_sell_offer operation from xdr") + } + + mo.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + mo.OfferID = int64(result.OfferId) + mo.Amount = amount.String(result.Amount) + mo.Price = result.Price + buyingAsset, err := assetFromXDR(result.Buying) + if err != nil { + return errors.Wrap(err, "error parsing buying_asset in manage_sell_offer operation") + } + mo.Buying = buyingAsset + + sellingAsset, err := assetFromXDR(result.Selling) + if err != nil { + return errors.Wrap(err, "error parsing selling_asset in manage_sell_offer operation") + } + mo.Selling = sellingAsset + return nil +} + +// Validate for ManageSellOffer validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (mo *ManageSellOffer) Validate() error { + return validateOffer(mo.Buying, mo.Selling, mo.Amount, mo.Price, mo.OfferID) +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (mo *ManageSellOffer) GetSourceAccount() string { + return mo.SourceAccount +} diff --git a/txnbuild/manage_offer_test.go b/txnbuild/manage_offer_test.go new file mode 100644 index 0000000000..d8b7504209 --- /dev/null +++ b/txnbuild/manage_offer_test.go @@ -0,0 +1,185 @@ +package txnbuild + +import ( + "github.com/stellar/go/price" + "github.com/stellar/go/xdr" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestManageSellOfferValidateSellingAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + selling := CreditAsset{"", kp0.Address()} + buying := NativeAsset{} + sellAmount := "100" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01")) + check(err) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageSellOffer operation: Field: Selling, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageSellOfferValidateBuyingAsset(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + selling := NativeAsset{} + buying := CreditAsset{"", kp0.Address()} + sellAmount := "100" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01")) + check(err) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageSellOffer operation: Field: Buying, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageSellOfferValidateAmount(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "-1" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01")) + check(err) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageSellOffer operation: Field: Amount, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageSellOfferValidatePrice(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "0" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, xdr.Price{-1, 100}) + check(err) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageSellOffer operation: Field: Price, Error: price cannot be negative: -1/100" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageSellOfferValidateOfferID(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + mso := ManageSellOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "0", + Price: price.MustParse("0.01"), + OfferID: -1, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&mso}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.ManageSellOffer operation: Field: OfferID, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestManageSellOfferPrice(t *testing.T) { + kp0 := newKeypair0() + + mso := ManageSellOffer{ + Selling: CreditAsset{"ABCD", kp0.Address()}, + Buying: NativeAsset{}, + Amount: "1", + Price: price.MustParse("0.000000001"), + OfferID: 1, + } + + xdrOp, err := mso.BuildXDR() + assert.NoError(t, err) + expectedPrice := xdr.Price{N: 1, D: 1000000000} + assert.Equal(t, expectedPrice, xdrOp.Body.ManageSellOfferOp.Price) + + parsed := ManageSellOffer{} + assert.NoError(t, parsed.FromXDR(xdrOp)) + assert.Equal(t, mso.Price, parsed.Price) +} + +func TestManageSellOfferRoundtrip(t *testing.T) { + manageSellOffer := ManageSellOffer{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Selling: CreditAsset{"USD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + Buying: NativeAsset{}, + Amount: "100.0000000", + Price: price.MustParse("0.01"), + OfferID: 0, + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageSellOffer}, false) + + // with muxed accounts + manageSellOffer = ManageSellOffer{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Selling: CreditAsset{"USD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + Buying: NativeAsset{}, + Amount: "100.0000000", + Price: price.MustParse("0.01"), + OfferID: 0, + } + testOperationsMarshallingRoundtrip(t, []Operation{&manageSellOffer}, true) +} diff --git a/txnbuild/memo.go b/txnbuild/memo.go new file mode 100644 index 0000000000..11bfe939ad --- /dev/null +++ b/txnbuild/memo.go @@ -0,0 +1,85 @@ +package txnbuild + +import ( + "fmt" + + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// MemoText is used to send human messages of up to 28 bytes of ASCII/UTF-8. +type MemoText string + +// MemoID is an identifier representing the transaction originator. +type MemoID uint64 + +// MemoHash is a hash representing a reference to another transaction. +type MemoHash [32]byte + +// MemoReturn is a hash representing the hash of the transaction the sender is refunding. +type MemoReturn [32]byte + +// MemoTextMaxLength is the maximum number of bytes allowed for a text memo. +const MemoTextMaxLength = 28 + +// Memo represents the superset of all memo types. +type Memo interface { + ToXDR() (xdr.Memo, error) +} + +// ToXDR for MemoText returns an XDR object representation of a Memo of the same type. +func (mt MemoText) ToXDR() (xdr.Memo, error) { + if len(mt) > MemoTextMaxLength { + return xdr.Memo{}, fmt.Errorf("Memo text can't be longer than %d bytes", MemoTextMaxLength) + } + + return xdr.NewMemo(xdr.MemoTypeMemoText, string(mt)) +} + +// ToXDR for MemoID returns an XDR object representation of a Memo of the same type. +func (mid MemoID) ToXDR() (xdr.Memo, error) { + return xdr.NewMemo(xdr.MemoTypeMemoId, xdr.Uint64(mid)) +} + +// ToXDR for MemoHash returns an XDR object representation of a Memo of the same type. +func (mh MemoHash) ToXDR() (xdr.Memo, error) { + return xdr.NewMemo(xdr.MemoTypeMemoHash, xdr.Hash(mh)) +} + +// ToXDR for MemoReturn returns an XDR object representation of a Memo of the same type. +func (mr MemoReturn) ToXDR() (xdr.Memo, error) { + return xdr.NewMemo(xdr.MemoTypeMemoReturn, xdr.Hash(mr)) +} + +// memoFromXDR returns a Memo from XDR +func memoFromXDR(memo xdr.Memo) (Memo, error) { + var newMemo Memo + var memoCreated bool + + switch memo.Type { + case xdr.MemoTypeMemoText: + value, ok := memo.GetText() + newMemo = MemoText(value) + memoCreated = ok + case xdr.MemoTypeMemoId: + value, ok := memo.GetId() + newMemo = MemoID(uint64(value)) + memoCreated = ok + case xdr.MemoTypeMemoHash: + value, ok := memo.GetHash() + newMemo = MemoHash(value) + memoCreated = ok + case xdr.MemoTypeMemoReturn: + value, ok := memo.GetRetHash() + newMemo = MemoReturn(value) + memoCreated = ok + case xdr.MemoTypeMemoNone: + memoCreated = true + } + + if !memoCreated { + return nil, errors.New("invalid memo") + } + + return newMemo, nil +} diff --git a/txnbuild/memo_test.go b/txnbuild/memo_test.go new file mode 100644 index 0000000000..f4a9d9b6ce --- /dev/null +++ b/txnbuild/memo_test.go @@ -0,0 +1,51 @@ +package txnbuild + +import ( + "testing" + + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/assert" +) + +func TestMemoFromXDR(t *testing.T) { + // memo text + xdrMemo, err := xdr.NewMemo(xdr.MemoTypeMemoText, "abc123") + assert.NoError(t, err) + memo, err := memoFromXDR(xdrMemo) + if assert.NoError(t, err) { + assert.Equal(t, MemoText("abc123"), memo, "memo text should match") + } + + // memo id + xdrMemo, err = xdr.NewMemo(xdr.MemoTypeMemoId, xdr.Uint64(1234)) + assert.NoError(t, err) + memo, err = memoFromXDR(xdrMemo) + if assert.NoError(t, err) { + assert.Equal(t, MemoID(1234), memo, "memo id should match") + } + + // memo hash + xdrMemo, err = xdr.NewMemo(xdr.MemoTypeMemoHash, xdr.Hash([32]byte{0x10})) + assert.NoError(t, err) + memo, err = memoFromXDR(xdrMemo) + if assert.NoError(t, err) { + assert.Equal(t, MemoHash([32]byte{0x10}), memo, "memo hash should match") + } + + // memo return + xdrMemo, err = xdr.NewMemo(xdr.MemoTypeMemoReturn, xdr.Hash([32]byte{0x01})) + assert.NoError(t, err) + memo, err = memoFromXDR(xdrMemo) + if assert.NoError(t, err) { + assert.Equal(t, MemoReturn([32]byte{0x01}), memo, "memo return should match") + } + + // memo none + xdrMemo, err = xdr.NewMemo(xdr.MemoTypeMemoNone, "") + assert.NoError(t, err) + memo, err = memoFromXDR(xdrMemo) + if assert.NoError(t, err) { + assert.Equal(t, nil, memo, "memo should be nil") + } +} diff --git a/txnbuild/operation.go b/txnbuild/operation.go new file mode 100644 index 0000000000..d0e8ce1b70 --- /dev/null +++ b/txnbuild/operation.go @@ -0,0 +1,92 @@ +package txnbuild + +import ( + "fmt" + + "github.com/stellar/go/xdr" +) + +// Operation represents the operation types of the Stellar network. +type Operation interface { + BuildXDR() (xdr.Operation, error) + FromXDR(xdrOp xdr.Operation) error + Validate() error + GetSourceAccount() string +} + +// SetOpSourceAccount sets the source account ID on an Operation, allowing M-strkeys (as defined in SEP23). +func SetOpSourceAccount(op *xdr.Operation, sourceAccount string) { + if sourceAccount == "" { + return + } + var opSourceAccountID xdr.MuxedAccount + opSourceAccountID.SetAddress(sourceAccount) + op.SourceAccount = &opSourceAccountID +} + +// operationFromXDR returns a txnbuild Operation from its corresponding XDR operation +func operationFromXDR(xdrOp xdr.Operation) (Operation, error) { + var newOp Operation + switch xdrOp.Body.Type { + case xdr.OperationTypeCreateAccount: + newOp = &CreateAccount{} + case xdr.OperationTypePayment: + newOp = &Payment{} + case xdr.OperationTypePathPaymentStrictReceive: + newOp = &PathPayment{} + case xdr.OperationTypeManageSellOffer: + newOp = &ManageSellOffer{} + case xdr.OperationTypeCreatePassiveSellOffer: + newOp = &CreatePassiveSellOffer{} + case xdr.OperationTypeSetOptions: + newOp = &SetOptions{} + case xdr.OperationTypeChangeTrust: + newOp = &ChangeTrust{} + case xdr.OperationTypeAllowTrust: + newOp = &AllowTrust{} + case xdr.OperationTypeAccountMerge: + newOp = &AccountMerge{} + case xdr.OperationTypeInflation: + newOp = &Inflation{} + case xdr.OperationTypeManageData: + newOp = &ManageData{} + case xdr.OperationTypeBumpSequence: + newOp = &BumpSequence{} + case xdr.OperationTypeManageBuyOffer: + newOp = &ManageBuyOffer{} + case xdr.OperationTypePathPaymentStrictSend: + newOp = &PathPaymentStrictSend{} + case xdr.OperationTypeBeginSponsoringFutureReserves: + newOp = &BeginSponsoringFutureReserves{} + case xdr.OperationTypeEndSponsoringFutureReserves: + newOp = &EndSponsoringFutureReserves{} + case xdr.OperationTypeCreateClaimableBalance: + newOp = &CreateClaimableBalance{} + case xdr.OperationTypeClaimClaimableBalance: + newOp = &ClaimClaimableBalance{} + case xdr.OperationTypeRevokeSponsorship: + newOp = &RevokeSponsorship{} + case xdr.OperationTypeClawback: + newOp = &Clawback{} + case xdr.OperationTypeClawbackClaimableBalance: + newOp = &ClawbackClaimableBalance{} + case xdr.OperationTypeSetTrustLineFlags: + newOp = &SetTrustLineFlags{} + case xdr.OperationTypeLiquidityPoolDeposit: + newOp = &LiquidityPoolDeposit{} + case xdr.OperationTypeLiquidityPoolWithdraw: + newOp = &LiquidityPoolWithdraw{} + default: + return nil, fmt.Errorf("unknown operation type: %d", xdrOp.Body.Type) + } + + err := newOp.FromXDR(xdrOp) + return newOp, err +} + +func accountFromXDR(account *xdr.MuxedAccount) string { + if account != nil { + return account.Address() + } + return "" +} diff --git a/txnbuild/operation_test.go b/txnbuild/operation_test.go new file mode 100644 index 0000000000..d462e3a9c5 --- /dev/null +++ b/txnbuild/operation_test.go @@ -0,0 +1,493 @@ +package txnbuild + +import ( + "testing" + + "github.com/stellar/go/amount" + "github.com/stellar/go/xdr" + + "github.com/stretchr/testify/assert" +) + +func TestCreateAccountFromXDR(t *testing.T) { + txeB64 := "AAAAAMOrP0B2tL9IUn5QL8nn8q88kkFui1x3oW9omCj6hLhfAAAAZAAAAMcAAAAWAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAEAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwAAAAAAAAAAJ5yfHhgKAxylgecjAymWqNzLWRk/MqSYt+X9duZ2DfyAAAAF0h26AAAAAAAAAAAAvqEuF8AAABAZ5q2N2BHRylT28T1DbUVU7QKTbKZ+6DLefzJoCjHo2T8vcI/PjF8gsRu/r2M60Uzcw3WmqRFerA6DnJILIEdDoZW4JwAAABAsFL3WXr+tDK5tjR/0ZBVuNyzyqSa8Li2tUMUmB23PWuPG71ObUPTShkhlc7ydNN/qYRaA/Mafm+vsIQWDbCRDA==" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var ca CreateAccount + err = ca.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", ca.SourceAccount, "source accounts should match") + assert.Equal(t, "GCPHE7DYMAUAY4UWA6OIYDFGLKRXGLLEMT6MVETC36L7LW4Z3A37EJW5", ca.Destination, "destination should match") + assert.Equal(t, "10000.0000000", ca.Amount, "starting balance should match") + } + } + + txeB64NoSource := "AAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAAAyAAADXYAAAABAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwAAAAAdgRnAAAAAAAAAAAA" + xdrEnv, err = unmarshalBase64(txeB64NoSource) + if assert.NoError(t, err) { + var ca CreateAccount + err = ca.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "", ca.SourceAccount, "source accounts should match") + assert.Equal(t, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", ca.Destination, "destination should match") + assert.Equal(t, "198.0000000", ca.Amount, "starting balance should match") + } + } +} + +func TestZeroBalanceAccount(t *testing.T) { + sponsor, sponsee := newKeypair0(), newKeypair1() + ops := []Operation{ + &BeginSponsoringFutureReserves{SponsoredID: sponsee.Address()}, + &CreateAccount{ + Destination: sponsee.Address(), + Amount: "0", + }, + &EndSponsoringFutureReserves{ + SourceAccount: sponsee.Address(), + }, + } + + sourceAccount := SimpleAccount{AccountID: sponsor.Address()} + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: ops, + IncrementSequenceNum: true, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + assert.NoErrorf(t, err, "zero-balance account creation should work") +} + +func TestPaymentFromXDR(t *testing.T) { + txeB64 := "AAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAABkAAADXYAAAABAAAAAAAAAAAAAAACAAAAAQAAAABooIjdquKgVwBESJjRJ3KWgA8pcgUbc3aqRLJLugjdMQAAAAEAAAAAEH3Rayw4M0iCLoEe96rPFNGYim8AVHJU0z4ebYZW4JwAAAAAAAAAAAX14QAAAAAAAAAAAQAAAAAQfdFrLDgzSIIugR73qs8U0ZiKbwBUclTTPh5thlbgnAAAAAFYWQAAAAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAAAAE/exwAAAAAAAAAAAA==" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var p Payment + err = p.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", p.SourceAccount, "source accounts should match") + assert.Equal(t, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", p.Destination, "destination should match") + assert.Equal(t, "10.0000000", p.Amount, "amount should match") + assert.Equal(t, true, p.Asset.IsNative(), "Asset should be native") + } + + err = p.FromXDR(xdrEnv.Operations()[1]) + if assert.NoError(t, err) { + assert.Equal(t, "", p.SourceAccount, "source accounts should match") + assert.Equal(t, "GAIH3ULLFQ4DGSECF2AR555KZ4KNDGEKN4AFI4SU2M7B43MGK3QJZNSR", p.Destination, "destination should match") + assert.Equal(t, "134.0000000", p.Amount, "amount should match") + assetType, e := p.Asset.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "XY", p.Asset.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", p.Asset.GetIssuer(), "Asset issuer should match") + } + } +} + +func TestPathPaymentFromXDR(t *testing.T) { + txeB64 := "AAAAAH4RyzTWNfXhqwLUoCw91aWkZtgIzY8SAVkIPc0uFVmYAAAAZAAAql0AAAADAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAACAAAAAAAAAAAF9eEAAAAAAH4RyzTWNfXhqwLUoCw91aWkZtgIzY8SAVkIPc0uFVmYAAAAAAAAAAAAmJaAAAAAAQAAAAFBQkNEAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAAAAAAAEuFVmYAAAAQF2kLUL/RoFIy1cmt+GXdWn2tDUjJYV3YwF4A82zIBhqYSO6ogOoLPNRt3w+IGCAgfR4Q9lpax+wCXWoQERHSw4=" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var pp PathPayment + err = pp.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "", pp.SourceAccount, "source accounts should match") + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", pp.Destination, "destination should match") + assert.Equal(t, "1.0000000", pp.DestAmount, "DestAmount should match") + assert.Equal(t, "10.0000000", pp.SendMax, "SendMax should match") + assert.Equal(t, true, pp.DestAsset.IsNative(), "DestAsset should be native") + assert.Equal(t, 1, len(pp.Path), "Number of paths should be 1") + assetType, e := pp.Path[0].GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "ABCD", pp.Path[0].GetCode(), "Asset code should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", pp.Path[0].GetIssuer(), "Asset issuer should match") + } + } +} + +func TestManageSellOfferFromXDR(t *testing.T) { + txeB64 := "AAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAABkAAADXYAAAABAAAAAAAAAAAAAAACAAAAAQAAAABooIjdquKgVwBESJjRJ3KWgA8pcgUbc3aqRLJLugjdMQAAAAMAAAAAAAAAAkFCQ1hZWgAAAAAAAAAAAABooIjdquKgVwBESJjRJ3KWgA8pcgUbc3aqRLJLugjdMQAAAACy0F4AAAAABQAAAAEAAAAAAAAAAAAAAAAAAAADAAAAAUFCQwAAAAAAaKCI3arioFcAREiY0SdyloAPKXIFG3N2qkSyS7oI3TEAAAAAAAAAAO5rKAAAAAAFAAAAAQAAAAAAAAAAAAAAAAAAAAA=" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var mso ManageSellOffer + err = mso.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mso.SourceAccount, "source accounts should match") + assert.Equal(t, int64(0), mso.OfferID, "OfferID should match") + assert.Equal(t, "300.0000000", mso.Amount, "Amount should match") + assert.Equal(t, xdr.Price{5, 1}, mso.Price, "Price should match") + assert.Equal(t, true, mso.Selling.IsNative(), "Selling should be native") + assetType, e := mso.Buying.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum12, assetType, "Asset type should match") + assert.Equal(t, "ABCXYZ", mso.Buying.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mso.Buying.GetIssuer(), "Asset issuer should match") + } + + err = mso.FromXDR(xdrEnv.Operations()[1]) + if assert.NoError(t, err) { + assert.Equal(t, "", mso.SourceAccount, "source accounts should match") + assert.Equal(t, int64(0), mso.OfferID, "OfferID should match") + assert.Equal(t, "400.0000000", mso.Amount, "Amount should match") + assert.Equal(t, xdr.Price{5, 1}, mso.Price, "Price should match") + assert.Equal(t, true, mso.Buying.IsNative(), "Buying should be native") + assetType, e := mso.Selling.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "ABC", mso.Selling.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mso.Selling.GetIssuer(), "Asset issuer should match") + } + + } +} + +func TestManageBuyOfferFromXDR(t *testing.T) { + txeB64 := "AAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAABkAAADXYAAAABAAAAAAAAAAAAAAACAAAAAQAAAABooIjdquKgVwBESJjRJ3KWgA8pcgUbc3aqRLJLugjdMQAAAAwAAAAAAAAAAkFCQ1hZWgAAAAAAAAAAAABooIjdquKgVwBESJjRJ3KWgA8pcgUbc3aqRLJLugjdMQAAAAA7msoAAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAAMAAAAAUFCQwAAAAAAaKCI3arioFcAREiY0SdyloAPKXIFG3N2qkSyS7oI3TEAAAAAAAAAALLQXgAAAAADAAAABQAAAAAAAAAAAAAAAAAAAAA=" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var mbo ManageBuyOffer + err = mbo.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mbo.SourceAccount, "source accounts should match") + assert.Equal(t, int64(0), mbo.OfferID, "OfferID should match") + assert.Equal(t, "100.0000000", mbo.Amount, "Amount should match") + assert.Equal(t, xdr.Price{1, 2}, mbo.Price, "Price should match") + assert.Equal(t, true, mbo.Selling.IsNative(), "Selling should be native") + assetType, e := mbo.Buying.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum12, assetType, "Asset type should match") + assert.Equal(t, "ABCXYZ", mbo.Buying.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mbo.Buying.GetIssuer(), "Asset issuer should match") + } + + err = mbo.FromXDR(xdrEnv.Operations()[1]) + if assert.NoError(t, err) { + assert.Equal(t, "", mbo.SourceAccount, "source accounts should match") + assert.Equal(t, int64(0), mbo.OfferID, "OfferID should match") + assert.Equal(t, "300.0000000", mbo.Amount, "Amount should match") + assert.Equal(t, xdr.Price{3, 5}, mbo.Price, "Price should match") + assert.Equal(t, true, mbo.Buying.IsNative(), "Buying should be native") + assetType, e := mbo.Selling.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "ABC", mbo.Selling.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", mbo.Selling.GetIssuer(), "Asset issuer should match") + } + + } +} + +func TestCreatePassiveSellOfferFromXDR(t *testing.T) { + txeB64 := "AAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAZAAAJWoAAAANAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAEAAAAAAAAAAFBQkNEAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAAAX14QAAAAABAAAAAQAAAAAAAAAB0odkfgAAAEAgUD7M1UL7x2m2m26ySzcSHxIneOT7/r+s/HLsgWDj6CmpSi1GZrlvtBH+CNuegCwvW09TRZJhp7bLywkaFCoK" + + xdrEnv, err := unmarshalBase64(txeB64) + if assert.NoError(t, err) { + var cpo CreatePassiveSellOffer + err = cpo.FromXDR(xdrEnv.Operations()[0]) + if assert.NoError(t, err) { + assert.Equal(t, "", cpo.SourceAccount, "source accounts should match") + assert.Equal(t, "10.0000000", cpo.Amount, "Amount should match") + assert.Equal(t, xdr.Price{1, 1}, cpo.Price, "Price should match") + assert.Equal(t, true, cpo.Selling.IsNative(), "Selling should be native") + assetType, e := cpo.Buying.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "ABCD", cpo.Buying.GetCode(), "Asset code should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", cpo.Buying.GetIssuer(), "Asset issuer should match") + } + } +} + +func TestSetOptionsFromXDR(t *testing.T) { + + var opSource xdr.AccountId + err := opSource.SetAddress("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + cFlags := xdr.Uint32(0b1101) + sFlags := xdr.Uint32(0b1111) + mw := xdr.Uint32(7) + lt := xdr.Uint32(2) + mt := xdr.Uint32(4) + ht := xdr.Uint32(6) + hDomain := xdr.String32("stellar.org") + var skey xdr.SignerKey + err = skey.SetAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3") + assert.NoError(t, err) + signer := xdr.Signer{ + Key: skey, + Weight: xdr.Uint32(4), + } + + xdrSetOptions := xdr.SetOptionsOp{ + InflationDest: &opSource, + ClearFlags: &cFlags, + SetFlags: &sFlags, + MasterWeight: &mw, + LowThreshold: <, + MedThreshold: &mt, + HighThreshold: &ht, + HomeDomain: &hDomain, + Signer: &signer, + } + + muxSource := opSource.ToMuxedAccount() + xdrOp := xdr.Operation{ + SourceAccount: &muxSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeSetOptions, + SetOptionsOp: &xdrSetOptions, + }, + } + + var so SetOptions + err = so.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", so.SourceAccount, "source accounts should match") + assert.Equal(t, Threshold(7), *so.MasterWeight, "master weight should match") + assert.Equal(t, Threshold(2), *so.LowThreshold, "low threshold should match") + assert.Equal(t, Threshold(4), *so.MediumThreshold, "medium threshold should match") + assert.Equal(t, Threshold(6), *so.HighThreshold, "high threshold should match") + assert.Equal(t, "stellar.org", *so.HomeDomain, "Home domain should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", so.Signer.Address, "Signer address should match") + assert.Equal(t, Threshold(4), so.Signer.Weight, "Signer weight should match") + assert.Equal(t, int(AuthRequired), int(so.SetFlags[0]), "Set AuthRequired flags should match") + assert.Equal(t, int(AuthRevocable), int(so.SetFlags[1]), "Set AuthRevocable flags should match") + assert.Equal(t, int(AuthImmutable), int(so.SetFlags[2]), "Set AuthImmutable flags should match") + assert.Equal(t, int(AuthClawbackEnabled), int(so.SetFlags[3]), "Set AuthClawbackEnabled flags should match") + assert.Equal(t, int(AuthRequired), int(so.ClearFlags[0]), "Clear AuthRequired flags should match") + assert.Equal(t, int(AuthImmutable), int(so.ClearFlags[1]), "Clear AuthImmutable flags should match") + assert.Equal(t, int(AuthClawbackEnabled), int(so.ClearFlags[2]), "Clear AuthClawbackEnabled flags should match") + } + +} + +func TestChangeTrustFromXDR(t *testing.T) { + asset := CreditAsset{Code: "ABC", Issuer: "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3"} + xdrAsset, err := asset.ToXDR() + assert.NoError(t, err) + xdrLimit, err := amount.Parse("5000") + assert.NoError(t, err) + changeTrustOp := xdr.ChangeTrustOp{ + Line: xdrAsset.ToChangeTrustAsset(), + Limit: xdrLimit, + } + + var opSource xdr.MuxedAccount + err = opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeChangeTrust, + ChangeTrustOp: &changeTrustOp, + }, + } + + var ct ChangeTrust + err = ct.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", ct.SourceAccount, "source accounts should match") + assetType, e := ct.Line.GetType() + assert.NoError(t, e) + + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "ABC", ct.Line.GetCode(), "Asset code should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", ct.Line.GetIssuer(), "Asset issuer should match") + assert.Equal(t, "5000.0000000", ct.Limit, "Trustline limit should match") + } +} + +func TestAllowTrustFromXDR(t *testing.T) { + xdrAsset := xdr.Asset{} + allowTrustAsset, err := xdrAsset.ToAssetCode("ABCXYZ") + assert.NoError(t, err) + + var opSource xdr.MuxedAccount + err = opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + + var trustor xdr.AccountId + err = trustor.SetAddress("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3") + assert.NoError(t, err) + + allowTrustOp := xdr.AllowTrustOp{ + Trustor: trustor, + Asset: allowTrustAsset, + Authorize: xdr.Uint32(xdr.TrustLineFlagsAuthorizedFlag), + } + + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &allowTrustOp, + }, + } + + var at AllowTrust + err = at.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", at.SourceAccount, "source accounts should match") + + assetType, e := at.Type.GetType() + assert.NoError(t, e) + assert.Equal(t, AssetTypeCreditAlphanum12, assetType, "Asset type should match") + assert.Equal(t, "ABCXYZ", at.Type.GetCode(), "Asset code should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", at.Trustor, "Trustor should match") + assert.Equal(t, true, at.Authorize, "Authorize value should match") + } +} + +func TestAccountMergeFromXDR(t *testing.T) { + var opSource xdr.MuxedAccount + err := opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + + var destination xdr.MuxedAccount + err = destination.SetEd25519Address("GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3") + assert.NoError(t, err) + + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeAccountMerge, + Destination: &destination, + }, + } + + var am AccountMerge + err = am.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", am.SourceAccount, "source accounts should match") + assert.Equal(t, "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", am.Destination, "destination accounts should match") + } +} + +func TestInflationFromXDR(t *testing.T) { + var opSource xdr.MuxedAccount + err := opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{Type: xdr.OperationTypeInflation}, + } + + var inf Inflation + err = inf.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", inf.SourceAccount, "source accounts should match") + } +} + +func TestManageDataFromXDR(t *testing.T) { + var opSource xdr.MuxedAccount + err := opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + + dv := []byte("value") + xdrdv := xdr.DataValue(dv) + manageDataOp := xdr.ManageDataOp{ + DataName: xdr.String64("data"), + DataValue: &xdrdv, + } + + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeManageData, + ManageDataOp: &manageDataOp, + }, + } + + var md ManageData + err = md.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", md.SourceAccount, "source accounts should match") + assert.Equal(t, "data", md.Name, "Name should match") + assert.Equal(t, "value", string(md.Value), "Value should match") + } +} + +func TestBumpSequenceFromXDR(t *testing.T) { + var opSource xdr.MuxedAccount + err := opSource.SetEd25519Address("GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H") + assert.NoError(t, err) + + bsOp := xdr.BumpSequenceOp{ + BumpTo: xdr.SequenceNumber(45), + } + + xdrOp := xdr.Operation{ + SourceAccount: &opSource, + Body: xdr.OperationBody{ + Type: xdr.OperationTypeBumpSequence, + BumpSequenceOp: &bsOp, + }, + } + + var bs BumpSequence + err = bs.FromXDR(xdrOp) + if assert.NoError(t, err) { + assert.Equal(t, "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", bs.SourceAccount, "source accounts should match") + assert.Equal(t, int64(45), bs.BumpTo, "BumpTo should match") + } +} + +func testOperationsMarshallingRoundtrip(t *testing.T, operations []Operation, withMuxedAccounts bool) { + kp1 := newKeypair1() + accountID := xdr.MustAddress(kp1.Address()) + mx := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *accountID.Ed25519, + }, + } + var sourceAccount SimpleAccount + if withMuxedAccounts { + sourceAccount = NewSimpleAccount(mx.Address(), int64(9605939170639898)) + } else { + sourceAccount = NewSimpleAccount(kp1.Address(), int64(9606132444168199)) + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: operations, + Timebounds: NewInfiniteTimeout(), + BaseFee: MinBaseFee, + }, + ) + assert.NoError(t, err) + + var b64 string + b64, err = tx.Base64() + assert.NoError(t, err) + + var parsedTx *GenericTransaction + if withMuxedAccounts { + parsedTx, err = TransactionFromXDR(b64) + } else { + parsedTx, err = TransactionFromXDR(b64) + } + assert.NoError(t, err) + var ok bool + tx, ok = parsedTx.Transaction() + assert.True(t, ok) + + for i := 0; i < len(operations); i++ { + assert.Equal(t, operations[i], tx.Operations()[i]) + } +} diff --git a/txnbuild/path_payment.go b/txnbuild/path_payment.go new file mode 100644 index 0000000000..3a43e6e580 --- /dev/null +++ b/txnbuild/path_payment.go @@ -0,0 +1,167 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// PathPayment represents the Stellar path_payment operation. This operation was removed +// in Stellar Protocol 12 and replaced by PathPaymentStrictReceive. +// Deprecated: This operation was renamed to PathPaymentStrictReceive, +// which functions identically. +type PathPayment = PathPaymentStrictReceive + +// PathPaymentStrictReceive represents the Stellar path_payment_strict_receive operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type PathPaymentStrictReceive struct { + SendAsset Asset + SendMax string + Destination string + DestAsset Asset + DestAmount string + Path []Asset + SourceAccount string +} + +// BuildXDR for PathPaymentStrictReceive returns a fully configured XDR Operation. +func (pp *PathPaymentStrictReceive) BuildXDR() (xdr.Operation, error) { + // Set XDR send asset + if pp.SendAsset == nil { + return xdr.Operation{}, errors.New("you must specify an asset to send for payment") + } + xdrSendAsset, err := pp.SendAsset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + + // Set XDR send max + xdrSendMax, err := amount.Parse(pp.SendMax) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse maximum amount to send") + } + + // Set XDR destination + var xdrDestination xdr.MuxedAccount + err = xdrDestination.SetAddress(pp.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set destination address") + } + + // Set XDR destination asset + if pp.DestAsset == nil { + return xdr.Operation{}, errors.New("you must specify an asset for destination account to receive") + } + xdrDestAsset, err := pp.DestAsset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + + // Set XDR destination amount + xdrDestAmount, err := amount.Parse(pp.DestAmount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse amount of asset destination account receives") + } + + // Set XDR path + var xdrPath []xdr.Asset + var xdrPathAsset xdr.Asset + for _, asset := range pp.Path { + xdrPathAsset, err = asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + xdrPath = append(xdrPath, xdrPathAsset) + } + + opType := xdr.OperationTypePathPaymentStrictReceive + xdrOp := xdr.PathPaymentStrictReceiveOp{ + SendAsset: xdrSendAsset, + SendMax: xdrSendMax, + Destination: xdrDestination, + DestAsset: xdrDestAsset, + DestAmount: xdrDestAmount, + Path: xdrPath, + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, pp.SourceAccount) + return op, nil +} + +// FromXDR for PathPaymentStrictReceive initialises the txnbuild struct from the corresponding xdr Operation. +func (pp *PathPaymentStrictReceive) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetPathPaymentStrictReceiveOp() + if !ok { + return errors.New("error parsing path_payment operation from xdr") + } + + pp.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + pp.Destination = result.Destination.Address() + + pp.DestAmount = amount.String(result.DestAmount) + pp.SendMax = amount.String(result.SendMax) + + destAsset, err := assetFromXDR(result.DestAsset) + if err != nil { + return errors.Wrap(err, "error parsing dest_asset in path_payment operation") + } + pp.DestAsset = destAsset + + sendAsset, err := assetFromXDR(result.SendAsset) + if err != nil { + return errors.Wrap(err, "error parsing send_asset in path_payment operation") + } + pp.SendAsset = sendAsset + + pp.Path = []Asset{} + for _, p := range result.Path { + pathAsset, err := assetFromXDR(p) + if err != nil { + return errors.Wrap(err, "error parsing paths in path_payment operation") + } + pp.Path = append(pp.Path, pathAsset) + } + + return nil +} + +// Validate for PathPaymentStrictReceive validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (pp *PathPaymentStrictReceive) Validate() error { + _, err := xdr.AddressToMuxedAccount(pp.Destination) + if err != nil { + return NewValidationError("Destination", err.Error()) + } + + err = validateStellarAsset(pp.SendAsset) + if err != nil { + return NewValidationError("SendAsset", err.Error()) + } + + err = validateStellarAsset(pp.DestAsset) + if err != nil { + return NewValidationError("DestAsset", err.Error()) + } + + err = validateAmount(pp.SendMax) + if err != nil { + return NewValidationError("SendMax", err.Error()) + } + + err = validateAmount(pp.DestAmount) + if err != nil { + return NewValidationError("DestAmount", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (pp *PathPaymentStrictReceive) GetSourceAccount() string { + return pp.SourceAccount +} diff --git a/txnbuild/path_payment_strict_send.go b/txnbuild/path_payment_strict_send.go new file mode 100644 index 0000000000..91b9ebfd77 --- /dev/null +++ b/txnbuild/path_payment_strict_send.go @@ -0,0 +1,160 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// PathPaymentStrictSend represents the Stellar path_payment_strict_send operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type PathPaymentStrictSend struct { + SendAsset Asset + SendAmount string + Destination string + DestAsset Asset + DestMin string + Path []Asset + SourceAccount string +} + +// BuildXDR for Payment returns a fully configured XDR Operation. +func (pp *PathPaymentStrictSend) BuildXDR() (xdr.Operation, error) { + // Set XDR send asset + if pp.SendAsset == nil { + return xdr.Operation{}, errors.New("you must specify an asset to send for payment") + } + xdrSendAsset, err := pp.SendAsset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + + // Set XDR dest min + xdrDestMin, err := amount.Parse(pp.DestMin) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse minimum amount to receive") + } + + // Set XDR destination + var xdrDestination xdr.MuxedAccount + err = xdrDestination.SetAddress(pp.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set destination address") + } + + // Set XDR destination asset + if pp.DestAsset == nil { + return xdr.Operation{}, errors.New("you must specify an asset for destination account to receive") + } + xdrDestAsset, err := pp.DestAsset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + + // Set XDR destination amount + xdrSendAmount, err := amount.Parse(pp.SendAmount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse amount of asset source account sends") + } + + // Set XDR path + var xdrPath []xdr.Asset + var xdrPathAsset xdr.Asset + for _, asset := range pp.Path { + xdrPathAsset, err = asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + xdrPath = append(xdrPath, xdrPathAsset) + } + + opType := xdr.OperationTypePathPaymentStrictSend + xdrOp := xdr.PathPaymentStrictSendOp{ + SendAsset: xdrSendAsset, + SendAmount: xdrSendAmount, + Destination: xdrDestination, + DestAsset: xdrDestAsset, + DestMin: xdrDestMin, + Path: xdrPath, + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, pp.SourceAccount) + return op, nil +} + +// FromXDR for PathPaymentStrictSend initialises the txnbuild struct from the corresponding xdr Operation. +func (pp *PathPaymentStrictSend) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetPathPaymentStrictSendOp() + if !ok { + return errors.New("error parsing path_payment operation from xdr") + } + + pp.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + pp.Destination = result.Destination.Address() + pp.SendAmount = amount.String(result.SendAmount) + pp.DestMin = amount.String(result.DestMin) + + destAsset, err := assetFromXDR(result.DestAsset) + if err != nil { + return errors.Wrap(err, "error parsing dest_asset in path_payment operation") + } + pp.DestAsset = destAsset + + sendAsset, err := assetFromXDR(result.SendAsset) + if err != nil { + return errors.Wrap(err, "error parsing send_asset in path_payment operation") + } + pp.SendAsset = sendAsset + + pp.Path = []Asset{} + for _, p := range result.Path { + pathAsset, err := assetFromXDR(p) + if err != nil { + return errors.Wrap(err, "error parsing paths in path_payment operation") + } + pp.Path = append(pp.Path, pathAsset) + } + + return nil +} + +// Validate for PathPaymentStrictSend validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (pp *PathPaymentStrictSend) Validate() error { + _, err := xdr.AddressToMuxedAccount(pp.Destination) + if err != nil { + return NewValidationError("Destination", err.Error()) + } + + err = validateStellarAsset(pp.SendAsset) + if err != nil { + return NewValidationError("SendAsset", err.Error()) + } + + err = validateStellarAsset(pp.DestAsset) + if err != nil { + return NewValidationError("DestAsset", err.Error()) + } + + err = validateAmount(pp.SendAmount) + if err != nil { + return NewValidationError("SendAmount", err.Error()) + } + + err = validateAmount(pp.DestMin) + if err != nil { + return NewValidationError("DestMin", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (pp *PathPaymentStrictSend) GetSourceAccount() string { + return pp.SourceAccount +} diff --git a/txnbuild/path_payment_strict_send_test.go b/txnbuild/path_payment_strict_send_test.go new file mode 100644 index 0000000000..3e3d73a01a --- /dev/null +++ b/txnbuild/path_payment_strict_send_test.go @@ -0,0 +1,183 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPathPaymentStrictSendValidateSendAsset(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPaymentStrictSend{ + SendAsset: CreditAsset{"ABCD", ""}, + SendAmount: "10", + Destination: kp2.Address(), + DestAsset: NativeAsset{}, + DestMin: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictSend operation: Field: SendAsset, Error: asset issuer: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentStrictSendValidateDestAsset(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPaymentStrictSend{ + SendAsset: NativeAsset{}, + SendAmount: "10", + Destination: kp2.Address(), + DestAsset: CreditAsset{"", kp0.Address()}, + DestMin: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictSend operation: Field: DestAsset, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentStrictSendValidateDestination(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPaymentStrictSend{ + SendAsset: NativeAsset{}, + SendAmount: "10", + Destination: "SASND3NRUY5K43PN3H3HOP5JNTIDXJFLOKKNSCZQQAFBRSEIRD5OJKXZ", + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestMin: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictSend operation: Field: Destination" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentStrictSendValidateSendMax(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPaymentStrictSend{ + SendAsset: NativeAsset{}, + SendAmount: "abc", + Destination: kp2.Address(), + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestMin: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictSend operation: Field: SendAmount, Error: invalid amount format: abc" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentStrictSendValidateDestAmount(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPaymentStrictSend{ + SendAsset: NativeAsset{}, + SendAmount: "10", + Destination: kp2.Address(), + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestMin: "-1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictSend operation: Field: DestMin, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentStrictSendRoundtrip(t *testing.T) { + pathPaymentStrictSend := PathPaymentStrictSend{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + SendAsset: NativeAsset{}, + SendAmount: "10.0000000", + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + DestAsset: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + DestMin: "1.0000000", + Path: []Asset{CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&pathPaymentStrictSend}, false) + + // with muxed accounts + pathPaymentStrictSend = PathPaymentStrictSend{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + SendAsset: NativeAsset{}, + SendAmount: "10.0000000", + Destination: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + DestAsset: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + DestMin: "1.0000000", + Path: []Asset{CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&pathPaymentStrictSend}, true) +} diff --git a/txnbuild/path_payment_test.go b/txnbuild/path_payment_test.go new file mode 100644 index 0000000000..f5f78fafa7 --- /dev/null +++ b/txnbuild/path_payment_test.go @@ -0,0 +1,183 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPathPaymentValidateSendAsset(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: CreditAsset{"ABCD", ""}, + SendMax: "10", + Destination: kp2.Address(), + DestAsset: NativeAsset{}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictReceive operation: Field: SendAsset, Error: asset issuer: public key is undefined" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentValidateDestAsset(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp2.Address(), + DestAsset: CreditAsset{"", kp0.Address()}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictReceive operation: Field: DestAsset, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentValidateDestination(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: "SASND3NRUY5K43PN3H3HOP5JNTIDXJFLOKKNSCZQQAFBRSEIRD5OJKXZ", + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictReceive operation: Field: Destination" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentValidateSendMax(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "abc", + Destination: kp2.Address(), + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictReceive operation: Field: SendMax, Error: invalid amount format: abc" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentValidateDestAmount(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp2.Address(), + DestAsset: CreditAsset{"ABCD", kp0.Address()}, + DestAmount: "-1", + Path: []Asset{abcdAsset}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.PathPaymentStrictReceive operation: Field: DestAmount, Error: amount can not be negative" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPathPaymentRoundtrip(t *testing.T) { + pathPayment := PathPayment{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + SendAsset: NativeAsset{}, + SendMax: "10.0000000", + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + DestAsset: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + DestAmount: "1.0000000", + Path: []Asset{CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&pathPayment}, false) + + // with muxed accounts + pathPayment = PathPayment{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + SendAsset: NativeAsset{}, + SendMax: "10.0000000", + Destination: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + DestAsset: CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}, + DestAmount: "1.0000000", + Path: []Asset{CreditAsset{"ABCD", "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H"}}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&pathPayment}, true) +} diff --git a/txnbuild/payment.go b/txnbuild/payment.go new file mode 100644 index 0000000000..ac06bf59f3 --- /dev/null +++ b/txnbuild/payment.go @@ -0,0 +1,103 @@ +package txnbuild + +import ( + "github.com/stellar/go/amount" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// Payment represents the Stellar payment operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type Payment struct { + Destination string + Amount string + Asset Asset + SourceAccount string +} + +// BuildXDR for Payment returns a fully configured XDR Operation. + +func (p *Payment) BuildXDR() (xdr.Operation, error) { + var destMuxedAccount xdr.MuxedAccount + + err := destMuxedAccount.SetAddress(p.Destination) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set destination address") + } + + xdrAmount, err := amount.Parse(p.Amount) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to parse amount") + } + + if p.Asset == nil { + return xdr.Operation{}, errors.New("you must specify an asset for payment") + } + xdrAsset, err := p.Asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set asset type") + } + + opType := xdr.OperationTypePayment + xdrOp := xdr.PaymentOp{ + Destination: destMuxedAccount, + Amount: xdrAmount, + Asset: xdrAsset, + } + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR Operation") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, p.SourceAccount) + return op, nil +} + +// FromXDR for Payment initialises the txnbuild struct from the corresponding xdr Operation. +func (p *Payment) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetPaymentOp() + if !ok { + return errors.New("error parsing payment operation from xdr") + } + + p.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + p.Destination = result.Destination.Address() + + p.Amount = amount.String(result.Amount) + + asset, err := assetFromXDR(result.Asset) + if err != nil { + return errors.Wrap(err, "error parsing asset in payment operation") + } + p.Asset = asset + + return nil +} + +// Validate for Payment validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (p *Payment) Validate() error { + _, err := xdr.AddressToMuxedAccount(p.Destination) + + if err != nil { + return NewValidationError("Destination", err.Error()) + } + + err = validateStellarAsset(p.Asset) + if err != nil { + return NewValidationError("Asset", err.Error()) + } + + err = validateAmount(p.Amount) + if err != nil { + return NewValidationError("Amount", err.Error()) + } + + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (p *Payment) GetSourceAccount() string { + return p.SourceAccount +} diff --git a/txnbuild/payment_test.go b/txnbuild/payment_test.go new file mode 100644 index 0000000000..e82cd7b499 --- /dev/null +++ b/txnbuild/payment_test.go @@ -0,0 +1,101 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPaymentValidateDestination(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "", + Amount: "10", + Asset: NativeAsset{}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Payment operation: Field: Destination" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPaymentValidateAmount(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "ten", + Asset: NativeAsset{}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Payment operation: Field: Amount, Error: invalid amount format: ten" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPaymentValidateAsset(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10", + Asset: CreditAsset{}, + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + if assert.Error(t, err) { + expected := "validation failed for *txnbuild.Payment operation: Field: Asset, Error: asset code length must be between 1 and 12 characters" + assert.Contains(t, err.Error(), expected) + } +} + +func TestPaymentRoundtrip(t *testing.T) { + payment := Payment{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10.0000000", + Asset: NativeAsset{}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&payment}, false) + + // with muxed accounts + payment = Payment{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Destination: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Amount: "10.0000000", + Asset: NativeAsset{}, + } + testOperationsMarshallingRoundtrip(t, []Operation{&payment}, true) +} diff --git a/txnbuild/revoke_sponsorship.go b/txnbuild/revoke_sponsorship.go new file mode 100644 index 0000000000..cee5139efe --- /dev/null +++ b/txnbuild/revoke_sponsorship.go @@ -0,0 +1,284 @@ +package txnbuild + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/stellar/go/xdr" +) + +type RevokeSponsorshipType int + +const ( + RevokeSponsorshipTypeAccount RevokeSponsorshipType = iota + 1 + RevokeSponsorshipTypeTrustLine + RevokeSponsorshipTypeOffer + RevokeSponsorshipTypeData + RevokeSponsorshipTypeClaimableBalance + RevokeSponsorshipTypeSigner +) + +// RevokeSponsorship is a union type representing a RevokeSponsorship Operation. +// SponsorshipType stablishes which sponsorship is being revoked. +// The other fields should be ignored. +type RevokeSponsorship struct { + SourceAccount string + SponsorshipType RevokeSponsorshipType + // Account ID (strkey) + Account *string + TrustLine *TrustLineID + Offer *OfferID + Data *DataID + // Claimable Balance ID + ClaimableBalance *string + Signer *SignerID +} + +type TrustLineID struct { + Account string + Asset TrustLineAsset +} + +type OfferID struct { + SellerAccountAddress string + OfferID int64 +} + +type DataID struct { + Account string + DataName string +} + +type SignerID struct { + AccountID string + SignerAddress string +} + +func (r *RevokeSponsorship) BuildXDR() (xdr.Operation, error) { + xdrOp := xdr.RevokeSponsorshipOp{} + switch r.SponsorshipType { + case RevokeSponsorshipTypeAccount: + var key xdr.LedgerKeyAccount + if r.Account == nil { + return xdr.Operation{}, errors.New("Account can't be nil") + } + if err := key.AccountId.SetAddress(*r.Account); err != nil { + return xdr.Operation{}, errors.Wrap(err, "incorrect Account address") + } + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry + xdrOp.LedgerKey = &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeAccount, + Account: &key, + } + case RevokeSponsorshipTypeTrustLine: + var key xdr.LedgerKeyTrustLine + if r.TrustLine == nil { + return xdr.Operation{}, errors.New("TrustLine can't be nil") + } + if err := key.AccountId.SetAddress(r.TrustLine.Account); err != nil { + return xdr.Operation{}, errors.Wrap(err, "incorrect Account address") + } + asset, err := r.TrustLine.Asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "incorrect TrustLine asset") + } + key.Asset = asset + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry + xdrOp.LedgerKey = &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeTrustline, + TrustLine: &key, + } + case RevokeSponsorshipTypeOffer: + var key xdr.LedgerKeyOffer + if r.Offer == nil { + return xdr.Operation{}, errors.New("Offer can't be nil") + } + if err := key.SellerId.SetAddress(r.Offer.SellerAccountAddress); err != nil { + return xdr.Operation{}, errors.Wrap(err, "incorrect Seller account address") + } + key.OfferId = xdr.Int64(r.Offer.OfferID) + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry + xdrOp.LedgerKey = &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeOffer, + Offer: &key, + } + case RevokeSponsorshipTypeData: + var key xdr.LedgerKeyData + if r.Data == nil { + return xdr.Operation{}, errors.New("Data can't be nil") + } + if err := key.AccountId.SetAddress(r.Data.Account); err != nil { + return xdr.Operation{}, errors.Wrap(err, "incorrect Account address") + } + key.DataName = xdr.String64(r.Data.DataName) + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry + xdrOp.LedgerKey = &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeData, + Data: &key, + } + case RevokeSponsorshipTypeClaimableBalance: + var key xdr.LedgerKeyClaimableBalance + + if r.ClaimableBalance == nil { + return xdr.Operation{}, errors.New("ClaimableBalance can't be nil") + } + if err := xdr.SafeUnmarshalHex(*r.ClaimableBalance, &key.BalanceId); err != nil { + return xdr.Operation{}, errors.Wrap(err, "cannot parse ClaimableBalance") + } + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry + xdrOp.LedgerKey = &xdr.LedgerKey{ + Type: xdr.LedgerEntryTypeClaimableBalance, + ClaimableBalance: &key, + } + case RevokeSponsorshipTypeSigner: + var signer xdr.RevokeSponsorshipOpSigner + if r.Signer == nil { + return xdr.Operation{}, errors.New("Signer can't be nil") + } + if err := signer.AccountId.SetAddress(r.Signer.AccountID); err != nil { + return xdr.Operation{}, errors.New("incorrect Account address") + } + if err := signer.SignerKey.SetAddress(r.Signer.SignerAddress); err != nil { + return xdr.Operation{}, errors.New("incorrect Signer account address") + } + xdrOp.Type = xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner + xdrOp.Signer = &signer + default: + return xdr.Operation{}, fmt.Errorf("unknown SponsorshipType: %d", r.SponsorshipType) + } + opType := xdr.OperationTypeRevokeSponsorship + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, r.SourceAccount) + + return op, nil +} + +func (r *RevokeSponsorship) FromXDR(xdrOp xdr.Operation) error { + r.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + op, ok := xdrOp.Body.GetRevokeSponsorshipOp() + if !ok { + return errors.New("error parsing revoke_sponsorhip operation from xdr") + } + switch op.Type { + case xdr.RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + lkey := op.LedgerKey + switch lkey.Type { + case xdr.LedgerEntryTypeAccount: + var sponsorshipId string + sponsorshipId = lkey.Account.AccountId.Address() + r.SponsorshipType = RevokeSponsorshipTypeAccount + r.Account = &sponsorshipId + case xdr.LedgerEntryTypeTrustline: + var sponsorshipId TrustLineID + sponsorshipId.Account = lkey.TrustLine.AccountId.Address() + asset, err := assetFromTrustLineAssetXDR(lkey.TrustLine.Asset) + if err != nil { + return errors.Wrap(err, "error parsing Trustline Asset") + } + sponsorshipId.Asset = asset + r.SponsorshipType = RevokeSponsorshipTypeTrustLine + r.TrustLine = &sponsorshipId + case xdr.LedgerEntryTypeOffer: + var sponsorshipId OfferID + sponsorshipId.SellerAccountAddress = lkey.Offer.SellerId.Address() + sponsorshipId.OfferID = int64(lkey.Offer.OfferId) + r.SponsorshipType = RevokeSponsorshipTypeOffer + r.Offer = &sponsorshipId + case xdr.LedgerEntryTypeData: + var sponsorshipId DataID + sponsorshipId.Account = lkey.Data.AccountId.Address() + sponsorshipId.DataName = string(lkey.Data.DataName) + r.SponsorshipType = RevokeSponsorshipTypeData + r.Data = &sponsorshipId + case xdr.LedgerEntryTypeClaimableBalance: + if lkey.ClaimableBalance.BalanceId.Type != 0 { + return fmt.Errorf( + "unexpected ClaimableBalance Id Type: %d", + lkey.ClaimableBalance.BalanceId.Type, + ) + } + claimableBalanceId, err := xdr.MarshalHex(&lkey.ClaimableBalance.BalanceId) + if err != nil { + return errors.Wrap(err, "cannot generate Claimable Balance Id") + } + r.SponsorshipType = RevokeSponsorshipTypeClaimableBalance + r.ClaimableBalance = &claimableBalanceId + default: + return fmt.Errorf("unexpected LedgerEntryType: %d", lkey.Type) + } + case xdr.RevokeSponsorshipTypeRevokeSponsorshipSigner: + var sponsorshipId SignerID + sponsorshipId.AccountID = op.Signer.AccountId.Address() + sponsorshipId.SignerAddress = op.Signer.SignerKey.Address() + r.SponsorshipType = RevokeSponsorshipTypeSigner + r.Signer = &sponsorshipId + default: + return fmt.Errorf("unexpected RevokeSponsorshipType: %d", op.Type) + } + return nil +} + +func (r *RevokeSponsorship) Validate() error { + switch r.SponsorshipType { + case RevokeSponsorshipTypeAccount: + if r.Account == nil { + return errors.New("Account can't be nil") + } + return validateStellarPublicKey(*r.Account) + case RevokeSponsorshipTypeTrustLine: + if r.TrustLine == nil { + return errors.New("Trustline can't be nil") + } + if err := validateStellarPublicKey(r.TrustLine.Account); err != nil { + return errors.Wrap(err, "invalid Account address") + } + if err := validateStellarAsset(r.TrustLine.Asset); err != nil { + return errors.Wrap(err, "invalid TrustLine asset") + } + case RevokeSponsorshipTypeOffer: + if r.Offer == nil { + return errors.New("Offer can't be nil") + } + if err := validateStellarPublicKey(r.Offer.SellerAccountAddress); err != nil { + return errors.Wrap(err, "invalid Seller account address") + } + return validateStellarPublicKey(r.Offer.SellerAccountAddress) + case RevokeSponsorshipTypeData: + if r.Data == nil { + return errors.New("Data can't be nil") + } + if err := validateStellarPublicKey(r.Data.Account); err != nil { + return errors.Wrap(err, "invalid Account address") + } + case RevokeSponsorshipTypeClaimableBalance: + if r.ClaimableBalance == nil { + return errors.New("ClaimableBalance can't be nil") + } + var unused xdr.ClaimableBalanceId + if err := xdr.SafeUnmarshalHex(*r.ClaimableBalance, &unused); err != nil { + return errors.Wrap(err, "cannot parse ClaimableBalance") + } + case RevokeSponsorshipTypeSigner: + if r.Signer == nil { + return errors.New("Signer can't be nil") + } + if err := validateStellarPublicKey(r.Signer.AccountID); err != nil { + return errors.New("invalid Account address") + } + if err := validateStellarSignerKey(r.Signer.SignerAddress); err != nil { + return errors.New("invalid Signer account address") + } + default: + return fmt.Errorf("unknown SponsorshipType: %d", r.SponsorshipType) + } + return nil +} + +func (r *RevokeSponsorship) GetSourceAccount() string { + return r.SourceAccount +} diff --git a/txnbuild/revoke_sponsorship_test.go b/txnbuild/revoke_sponsorship_test.go new file mode 100644 index 0000000000..83632e8682 --- /dev/null +++ b/txnbuild/revoke_sponsorship_test.go @@ -0,0 +1,120 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestRevokeSponsorship(t *testing.T) { + accountAddress := "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z" + accountAddress2 := "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU" + claimableBalanceId, err := xdr.MarshalHex(xdr.ClaimableBalanceId{ + Type: xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + V0: &xdr.Hash{0xca, 0xfe, 0xba, 0xbe, 0xde, 0xad, 0xbe, 0xef}, + }) + assert.NoError(t, err) + for _, testcase := range []struct { + name string + op RevokeSponsorship + }{ + { + name: "Account", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeAccount, + Account: &accountAddress, + }, + }, + { + name: "Account with source", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeAccount, + Account: &accountAddress, + SourceAccount: accountAddress2, + }, + }, + { + name: "TrustLine", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeTrustLine, + TrustLine: &TrustLineID{ + Account: accountAddress, + Asset: CreditAsset{ + Code: "USD", + Issuer: newKeypair0().Address(), + }.MustToTrustLineAsset(), + }, + }, + }, + { + name: "Offer", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeOffer, + Offer: &OfferID{ + SellerAccountAddress: accountAddress, + OfferID: 0xdeadbeef, + }, + }, + }, + { + name: "Data", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeData, + Data: &DataID{ + Account: accountAddress, + DataName: "foobar", + }, + }, + }, + { + name: "Data", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeClaimableBalance, + ClaimableBalance: &claimableBalanceId, + }, + }, + { + name: "Signer", + op: RevokeSponsorship{ + SponsorshipType: RevokeSponsorshipTypeSigner, + Signer: &SignerID{ + AccountID: accountAddress, + SignerAddress: "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + }, + }, + }, + } { + t.Run(testcase.name, func(t *testing.T) { + op := testcase.op + assert.NoError(t, op.Validate()) + xdrOp, err := op.BuildXDR() + assert.NoError(t, err) + xdrBin, err := xdrOp.MarshalBinary() + assert.NoError(t, err) + var xdrOp2 xdr.Operation + assert.NoError(t, xdr.SafeUnmarshal(xdrBin, &xdrOp2)) + var op2 RevokeSponsorship + assert.NoError(t, op2.FromXDR(xdrOp2)) + assert.Equal(t, op, op2) + testOperationsMarshallingRoundtrip(t, []Operation{&testcase.op}, false) + }) + } + + // without muxed accounts + revokeOp := RevokeSponsorship{ + SourceAccount: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + SponsorshipType: RevokeSponsorshipTypeAccount, + Account: &accountAddress, + } + testOperationsMarshallingRoundtrip(t, []Operation{&revokeOp}, false) + + // with muxed accounts + revokeOp = RevokeSponsorship{ + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + SponsorshipType: RevokeSponsorshipTypeAccount, + Account: &accountAddress, + } + testOperationsMarshallingRoundtrip(t, []Operation{&revokeOp}, true) +} diff --git a/txnbuild/set_options.go b/txnbuild/set_options.go new file mode 100644 index 0000000000..f61aa7f2c2 --- /dev/null +++ b/txnbuild/set_options.go @@ -0,0 +1,328 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// AccountFlag represents the bitmask flags used to set and clear account authorization options. +type AccountFlag uint32 + +// AuthRequired is a flag that requires the issuing account to give other accounts +// permission before they can hold the issuing account's credit. +const AuthRequired = AccountFlag(xdr.AccountFlagsAuthRequiredFlag) + +// AuthRevocable is a flag that allows the issuing account to revoke its credit +// held by other accounts. +const AuthRevocable = AccountFlag(xdr.AccountFlagsAuthRevocableFlag) + +// AuthImmutable is a flag that if set prevents any authorization flags from being +// set, and prevents the account from ever being merged (deleted). +const AuthImmutable = AccountFlag(xdr.AccountFlagsAuthImmutableFlag) + +// AuthClawbackEnabled is a flag that if set allows clawing back assets. +const AuthClawbackEnabled = AccountFlag(xdr.AccountFlagsAuthClawbackEnabledFlag) + +// Threshold is the datatype for MasterWeight, Signer.Weight, and Thresholds. Each is a number +// between 0-255 inclusive. +type Threshold uint8 + +// Signer represents the Signer in a SetOptions operation. +// If the signer already exists, it is updated. +// If the weight is 0, the signer is deleted. +type Signer struct { + Address string + Weight Threshold +} + +// NewHomeDomain is syntactic sugar that makes instantiating SetOptions more convenient. +func NewHomeDomain(hd string) *string { + return &hd +} + +// NewThreshold is syntactic sugar that makes instantiating SetOptions more convenient. +func NewThreshold(t Threshold) *Threshold { + return &t +} + +// NewInflationDestination is syntactic sugar that makes instantiating SetOptions more convenient. +func NewInflationDestination(ai string) *string { + return &ai +} + +// SetOptions represents the Stellar set options operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type SetOptions struct { + InflationDestination *string + SetFlags []AccountFlag + ClearFlags []AccountFlag + MasterWeight *Threshold + LowThreshold *Threshold + MediumThreshold *Threshold + HighThreshold *Threshold + HomeDomain *string + Signer *Signer + xdrOp xdr.SetOptionsOp + SourceAccount string +} + +// BuildXDR for SetOptions returns a fully configured XDR Operation. +func (so *SetOptions) BuildXDR() (xdr.Operation, error) { + err := so.handleInflation() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set inflation destination address") + } + + so.handleClearFlags() + so.handleSetFlags() + so.handleMasterWeight() + so.handleLowThreshold() + so.handleMediumThreshold() + so.handleHighThreshold() + err = so.handleHomeDomain() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set home domain") + } + err = so.handleSigner() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set signer") + } + + opType := xdr.OperationTypeSetOptions + body, err := xdr.NewOperationBody(opType, so.xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, so.SourceAccount) + return op, nil +} + +// handleInflation for SetOptions sets the XDR inflation destination. +// Once set, a new address can be set, but there's no way to ever unset. +func (so *SetOptions) handleInflation() (err error) { + if so.InflationDestination != nil { + var xdrAccountID xdr.AccountId + err = xdrAccountID.SetAddress(*so.InflationDestination) + if err != nil { + return + } + so.xdrOp.InflationDest = &xdrAccountID + } + return +} + +// handleInflationXDR for SetOptions sets the inflation destination from a XDR object. +func (so *SetOptions) handleInflationXDR(account *xdr.AccountId) { + if account != nil { + address := account.Address() + so.InflationDestination = &address + } +} + +// handleSetFlags for SetOptions sets XDR account flags (represented as a bitmask). +// See https://developers.stellar.org/docs/glossary/accounts/#flags +func (so *SetOptions) handleSetFlags() { + var flags xdr.Uint32 + for _, flag := range so.SetFlags { + flags = flags | xdr.Uint32(flag) + } + if len(so.SetFlags) > 0 { + so.xdrOp.SetFlags = &flags + } +} + +// handleSetFlagsXDR for SetOptions sets account flags from XDR object (represented as a bitmask). +// See https://developers.stellar.org/docs/glossary/accounts/#flags +func (so *SetOptions) handleSetFlagsXDR(flags *xdr.Uint32) { + if flags != nil { + for _, f := range []AccountFlag{AuthRequired, AuthRevocable, AuthImmutable, AuthClawbackEnabled} { + if f&AccountFlag(*flags) != 0 { + so.SetFlags = append(so.SetFlags, f) + } + } + } +} + +// handleClearFlags for SetOptions unsets XDR account flags (represented as a bitmask). +// See https://developers.stellar.org/docs/glossary/accounts/#flags +func (so *SetOptions) handleClearFlags() { + var flags xdr.Uint32 + for _, flag := range so.ClearFlags { + flags = flags | xdr.Uint32(flag) + } + if len(so.ClearFlags) > 0 { + so.xdrOp.ClearFlags = &flags + } +} + +// handleClearFlagsXDR for SetOptions unsets account flags (represented as a bitmask). +// See https://developers.stellar.org/docs/glossary/accounts/#flags +func (so *SetOptions) handleClearFlagsXDR(flags *xdr.Uint32) { + if flags != nil { + for _, f := range []AccountFlag{AuthRequired, AuthRevocable, AuthImmutable, AuthClawbackEnabled} { + if f&AccountFlag(*flags) != 0 { + so.ClearFlags = append(so.ClearFlags, f) + } + } + } +} + +// handleMasterWeight for SetOptions sets the XDR weight of the master signing key. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleMasterWeight() { + if so.MasterWeight != nil { + xdrWeight := xdr.Uint32(*so.MasterWeight) + so.xdrOp.MasterWeight = &xdrWeight + } +} + +// handleMasterWeightXDR for SetOptions sets the weight of the master signing key. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleMasterWeightXDR(weight *xdr.Uint32) { + if weight != nil { + mw := Threshold(uint32(*weight)) + so.MasterWeight = &mw + } +} + +// handleLowThreshold for SetOptions sets the XDR value of the account's "low" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleLowThreshold() { + if so.LowThreshold != nil { + xdrThreshold := xdr.Uint32(*so.LowThreshold) + so.xdrOp.LowThreshold = &xdrThreshold + } +} + +// handleLowThresholdXDR for SetOptions sets value of the account's "low" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleLowThresholdXDR(weight *xdr.Uint32) { + if weight != nil { + lt := Threshold(uint32(*weight)) + so.LowThreshold = < + } +} + +// handleMediumThreshold for SetOptions sets the XDR value of the account's "medium" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleMediumThreshold() { + if so.MediumThreshold != nil { + xdrThreshold := xdr.Uint32(*so.MediumThreshold) + so.xdrOp.MedThreshold = &xdrThreshold + } +} + +// handleLowMediumXDR for SetOptions sets value of the account's "medium" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleMediumThresholdXDR(weight *xdr.Uint32) { + if weight != nil { + mt := Threshold(uint32(*weight)) + so.MediumThreshold = &mt + } +} + +// handleHighThreshold for SetOptions sets the XDR value of the account's "high" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleHighThreshold() { + if so.HighThreshold != nil { + xdrThreshold := xdr.Uint32(*so.HighThreshold) + so.xdrOp.HighThreshold = &xdrThreshold + } +} + +// handleHighThresholdXDR for SetOptions sets value of the account's "high" threshold. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleHighThresholdXDR(weight *xdr.Uint32) { + if weight != nil { + ht := Threshold(uint32(*weight)) + so.HighThreshold = &ht + } +} + +// handleHomeDomain for SetOptions sets the XDR value of the account's home domain. +// https://developers.stellar.org/docs/glossary/federation/ +func (so *SetOptions) handleHomeDomain() error { + if so.HomeDomain != nil { + if len(*so.HomeDomain) > 32 { + return errors.New("homeDomain must be 32 characters or less") + } + xdrHomeDomain := xdr.String32(*so.HomeDomain) + so.xdrOp.HomeDomain = &xdrHomeDomain + } + + return nil +} + +// handleHomeDomainXDR for SetOptions sets the value of the account's home domain. +// https://developers.stellar.org/docs/glossary/federation/ +func (so *SetOptions) handleHomeDomainXDR(xDomain *xdr.String32) { + if xDomain != nil { + domain := string(*xDomain) + so.HomeDomain = &domain + } +} + +// handleSigner for SetOptions sets the XDR value of a signer for the account. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleSigner() (err error) { + if so.Signer != nil { + var xdrSigner xdr.Signer + xdrWeight := xdr.Uint32(so.Signer.Weight) + xdrSigner.Weight = xdrWeight + err = xdrSigner.Key.SetAddress(so.Signer.Address) + if err != nil { + return + } + + so.xdrOp.Signer = &xdrSigner + } + return nil +} + +// handleSignerXDR for SetOptions sets the value of a signer for the account. +// See https://developers.stellar.org/docs/glossary/multisig/ +func (so *SetOptions) handleSignerXDR(xSigner *xdr.Signer) { + if xSigner != nil { + newSigner := Signer{} + newSigner.Address = xSigner.Key.Address() + newSigner.Weight = Threshold(uint32(xSigner.Weight)) + so.Signer = &newSigner + } +} + +// FromXDR for SetOptions initialises the txnbuild struct from the corresponding xdr Operation. +func (so *SetOptions) FromXDR(xdrOp xdr.Operation) error { + result, ok := xdrOp.Body.GetSetOptionsOp() + if !ok { + return errors.New("error parsing set_options operation from xdr") + } + + so.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + so.handleInflationXDR(result.InflationDest) + so.handleClearFlagsXDR(result.ClearFlags) + so.handleSetFlagsXDR(result.SetFlags) + so.handleMasterWeightXDR(result.MasterWeight) + so.handleLowThresholdXDR(result.LowThreshold) + so.handleMediumThresholdXDR(result.MedThreshold) + so.handleHighThresholdXDR(result.HighThreshold) + so.handleHomeDomainXDR(result.HomeDomain) + so.handleSignerXDR(result.Signer) + + return nil +} + +// Validate for SetOptions validates the required struct fields. It returns an error if any +// of the fields are invalid. Otherwise, it returns nil. +func (so *SetOptions) Validate() error { + // skipping checks here because the individual methods above already check for required fields. + // Refactoring is out of the scope of this issue(https://github.com/stellar/go/issues/1041) so will leave as is for now. + return nil +} + +// GetSourceAccount returns the source account of the operation, or the empty string if not +// set. +func (so *SetOptions) GetSourceAccount() string { + return so.SourceAccount +} diff --git a/txnbuild/set_options_test.go b/txnbuild/set_options_test.go new file mode 100644 index 0000000000..0398f37257 --- /dev/null +++ b/txnbuild/set_options_test.go @@ -0,0 +1,138 @@ +package txnbuild + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestHandleSetFlagsThreeDifferent(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{1, 2, 4} + + options.handleSetFlags() + + expected := xdr.Uint32(7) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "three different valid flags are ok") +} + +func TestHandleSetFlagsThreeSame(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{1, 1, 1} + + options.handleSetFlags() + + expected := xdr.Uint32(1) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "three of the same valid flags are ok") +} + +func TestHandleSetFlagsRedundantFlagsAllowed(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{1, 2, 4, 2, 4, 1} + + options.handleSetFlags() + + expected := xdr.Uint32(7) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "additional redundant flags are allowed") +} + +func TestHandleSetFlagsLessThanThreeAreOK(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{1, 2} + + options.handleSetFlags() + + expected := xdr.Uint32(3) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "less than three flags are ok") +} + +func TestHandleSetFlagsInvalidFlagsAllowed(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{3, 3, 3} + + options.handleSetFlags() + + expected := xdr.Uint32(3) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "invalid flags are allowed") +} + +func TestHandleSetFlagsZeroFlagsAreOK(t *testing.T) { + options := SetOptions{} + options.SetFlags = []AccountFlag{0, 2, 0} + + options.handleSetFlags() + + expected := xdr.Uint32(2) + assert.Equal(t, expected, *options.xdrOp.SetFlags, "zero flags are ok") +} + +func TestHandleClearFlagsThreeDifferent(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{1, 2, 4} + + options.handleClearFlags() + + expected := xdr.Uint32(7) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "three different valid flags are ok") +} + +func TestHandleClearFlagsThreeSame(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{1, 1, 1} + + options.handleClearFlags() + + expected := xdr.Uint32(1) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "three of the same valid flags are ok") +} + +func TestHandleClearFlagsRedundantFlagsAllowed(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{1, 2, 4, 2, 4, 1} + + options.handleClearFlags() + + expected := xdr.Uint32(7) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "additional redundant flags are allowed") +} + +func TestHandleClearFlagsLessThanThreeAreOK(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{1, 2} + + options.handleClearFlags() + + expected := xdr.Uint32(3) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "less than three flags are ok") +} + +func TestHandleClearFlagsInvalidFlagsAllowed(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{3, 3, 3} + + options.handleClearFlags() + + expected := xdr.Uint32(3) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "invalid flags are allowed") +} + +func TestHandleClearFlagsZeroFlagsAreOK(t *testing.T) { + options := SetOptions{} + options.ClearFlags = []AccountFlag{0, 2, 0} + + options.handleClearFlags() + + expected := xdr.Uint32(2) + assert.Equal(t, expected, *options.xdrOp.ClearFlags, "zero flags are ok") +} + +func TestEmptyHomeDomainOK(t *testing.T) { + options := SetOptions{ + HomeDomain: NewHomeDomain(""), + } + options.BuildXDR() + + assert.Equal(t, string(*options.xdrOp.HomeDomain), "", "empty string home domain is set") + +} diff --git a/txnbuild/set_trust_line_flags.go b/txnbuild/set_trust_line_flags.go new file mode 100644 index 0000000000..627464e9df --- /dev/null +++ b/txnbuild/set_trust_line_flags.go @@ -0,0 +1,126 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TrustLineFlag represents the bitmask flags used to set and clear account authorization options. +type TrustLineFlag uint32 + +// TrustLineAuthorized is a flag that indicates whether the trustline is authorized. +const TrustLineAuthorized = TrustLineFlag(xdr.TrustLineFlagsAuthorizedFlag) + +// TrustLineAuthorizedToMaintainLiabilities is a flag that if set, will allow a trustline to maintain liabilities +// without permitting any other operations. +const TrustLineAuthorizedToMaintainLiabilities = TrustLineFlag(xdr.TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) + +// TrustLineClawbackEnabled is a flag that if set allows clawing back assets. +const TrustLineClawbackEnabled = TrustLineFlag(xdr.TrustLineFlagsTrustlineClawbackEnabledFlag) + +// SetTrustLineFlags represents the Stellar set trust line flags operation. See +// https://developers.stellar.org/docs/start/list-of-operations/ +type SetTrustLineFlags struct { + Trustor string + Asset Asset + SetFlags []TrustLineFlag + ClearFlags []TrustLineFlag + SourceAccount string +} + +// BuildXDR for SetTrustLineFlags returns a fully configured XDR Operation. +func (stf *SetTrustLineFlags) BuildXDR() (xdr.Operation, error) { + var xdrOp xdr.SetTrustLineFlagsOp + + // Set XDR address associated with the trustline + err := xdrOp.Trustor.SetAddress(stf.Trustor) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to set trustor address") + } + + // Validate this is an issued asset + if stf.Asset.IsNative() { + return xdr.Operation{}, errors.New("trustline doesn't exist for a native (XLM) asset") + } + + xdrOp.Asset, err = stf.Asset.ToXDR() + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "can't convert asset to XDR") + } + + xdrOp.ClearFlags = trustLineFlagsToXDR(stf.ClearFlags) + xdrOp.SetFlags = trustLineFlagsToXDR(stf.SetFlags) + + opType := xdr.OperationTypeSetTrustLineFlags + body, err := xdr.NewOperationBody(opType, xdrOp) + if err != nil { + return xdr.Operation{}, errors.Wrap(err, "failed to build XDR OperationBody") + } + op := xdr.Operation{Body: body} + SetOpSourceAccount(&op, stf.SourceAccount) + return op, nil +} + +func trustLineFlagsToXDR(flags []TrustLineFlag) xdr.Uint32 { + var result xdr.Uint32 + for _, flag := range flags { + result = result | xdr.Uint32(flag) + } + return result +} + +// FromXDR for SetTrustLineFlags initialises the txnbuild struct from the corresponding xdr Operation. +func (stf *SetTrustLineFlags) FromXDR(xdrOp xdr.Operation) error { + op, ok := xdrOp.Body.GetSetTrustLineFlagsOp() + if !ok { + return errors.New("error parsing allow_trust operation from xdr") + } + + stf.SourceAccount = accountFromXDR(xdrOp.SourceAccount) + stf.Trustor = op.Trustor.Address() + asset, err := assetFromXDR(op.Asset) + if err != nil { + return errors.Wrap(err, "error parsing asset from xdr") + } + stf.Asset = asset + stf.ClearFlags = fromXDRTrustlineFlag(op.ClearFlags) + stf.SetFlags = fromXDRTrustlineFlag(op.SetFlags) + + return nil +} + +func fromXDRTrustlineFlag(flags xdr.Uint32) []TrustLineFlag { + flagsValue := xdr.TrustLineFlags(flags) + var result []TrustLineFlag + if flagsValue.IsAuthorized() { + result = append(result, TrustLineAuthorized) + } + if flagsValue.IsAuthorizedToMaintainLiabilitiesFlag() { + result = append(result, TrustLineAuthorizedToMaintainLiabilities) + } + if flagsValue.IsClawbackEnabledFlag() { + result = append(result, TrustLineClawbackEnabled) + } + return result +} + +// Validate for SetTrustLineFlags validates the required struct fields. It returns an error if any of the fields are +// invalid. Otherwise, it returns nil. +func (stf *SetTrustLineFlags) Validate() error { + err := validateStellarPublicKey(stf.Trustor) + if err != nil { + return NewValidationError("Trustor", err.Error()) + } + + err = validateAssetCode(stf.Asset) + if err != nil { + return NewValidationError("Asset", err.Error()) + } + return nil +} + +// GetSourceAccount returns the source account of the operation, or nil if not +// set. +func (stf *SetTrustLineFlags) GetSourceAccount() string { + return stf.SourceAccount +} diff --git a/txnbuild/set_trustline_flags_test.go b/txnbuild/set_trustline_flags_test.go new file mode 100644 index 0000000000..ee56c618b0 --- /dev/null +++ b/txnbuild/set_trustline_flags_test.go @@ -0,0 +1,104 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestSetTrustLineFlags(t *testing.T) { + asset := CreditAsset{"ABCD", "GAEJJMDDCRYF752PKIJICUVL7MROJBNXDV2ZB455T7BAFHU2LCLSE2LW"} + source := "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU" + trustor := "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z" + for _, testcase := range []struct { + name string + op SetTrustLineFlags + }{ + { + name: "Both set and clear", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + ClearFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities}, + SourceAccount: source, + }, + }, + { + name: "Both set and clear 2", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities}, + ClearFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + SourceAccount: source, + }, + }, + { + name: "Only set", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + ClearFlags: nil, + SourceAccount: source, + }, + }, + { + name: "Only clear", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: nil, + ClearFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + SourceAccount: source, + }, + }, + { + name: "No set nor clear", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: nil, + ClearFlags: nil, + SourceAccount: source, + }, + }, + { + name: "No source", + op: SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + ClearFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities}, + }, + }, + } { + t.Run(testcase.name, func(t *testing.T) { + op := testcase.op + assert.NoError(t, op.Validate()) + xdrOp, err := op.BuildXDR() + assert.NoError(t, err) + xdrBin, err := xdrOp.MarshalBinary() + assert.NoError(t, err) + var xdrOp2 xdr.Operation + assert.NoError(t, xdr.SafeUnmarshal(xdrBin, &xdrOp2)) + var op2 SetTrustLineFlags + assert.NoError(t, op2.FromXDR(xdrOp2)) + assert.Equal(t, op, op2) + testOperationsMarshallingRoundtrip(t, []Operation{&testcase.op}, false) + }) + } + + // with muxed accounts + setTrustLineFlags := SetTrustLineFlags{ + Trustor: trustor, + Asset: asset, + SetFlags: []TrustLineFlag{TrustLineClawbackEnabled}, + ClearFlags: []TrustLineFlag{TrustLineAuthorized, TrustLineAuthorizedToMaintainLiabilities}, + SourceAccount: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + } + testOperationsMarshallingRoundtrip(t, []Operation{&setTrustLineFlags}, true) +} diff --git a/txnbuild/signer_summary.go b/txnbuild/signer_summary.go new file mode 100644 index 0000000000..269e65a8f7 --- /dev/null +++ b/txnbuild/signer_summary.go @@ -0,0 +1,4 @@ +package txnbuild + +// SignerSummary is a map of signers to their weights. +type SignerSummary map[string]int32 diff --git a/txnbuild/signers_test.go b/txnbuild/signers_test.go new file mode 100644 index 0000000000..6dd98792cf --- /dev/null +++ b/txnbuild/signers_test.go @@ -0,0 +1,559 @@ +package txnbuild + +import ( + "github.com/stellar/go/price" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/xdr" + + "github.com/stellar/go/network" + "github.com/stretchr/testify/assert" +) + +func TestAccountMergeMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + accountMerge := AccountMerge{ + Destination: "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP", + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&accountMerge}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAACAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAAAAAC6i5yxQAAAEABEvDME7nz+5dkZW4OPtZJcQHhoEsk2/r3RiOzq/y6ecRxmcEPyr1qNFtaLeIcvlpHSQQg9VRed7JAeGWEzxQJ0odkfgAAAEBj72ZPE9hg6dgaWBnkvOVQFdlBis8oxqMLfmDnycCm1uX46Phi3uO6G1xBGMQkA2SLJsBuLubSfRVG47r6ov4N" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestAllowTrustMultSigners(t *testing.T) { + kp0 := newKeypair0() + + kp1 := newKeypair1() + txSourceAccount := NewSimpleAccount(kp1.Address(), int64(9606132444168199)) + + issuedAsset := CreditAsset{"ABCD", kp1.Address()} + allowTrust := AllowTrust{ + Trustor: kp1.Address(), + Type: issuedAsset, + Authorize: true, + SourceAccount: kp0.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&allowTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAIiC6AAAACAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAABwAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAFBQkNEAAAAAQAAAAAAAAAC6i5yxQAAAEB5vvJHErjjFX7YWzUbuSLc6JwNAAry+fIeJQuitCRujgkkeYEWy1DjKlbtcaUGbvurfaR8CjfUKBD6F74k964A0odkfgAAAEAq9Ks21/ca6HhTs5YiYG+/nWSRI8mTKZhd2/dDcJRFrZuCj7vlNi76/dSJnjmLbdf1BpLA5Rgvt2hatxbGygYP" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestBumpSequenceMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + bumpSequence := BumpSequence{ + BumpTo: 9606132444168300, + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&bumpSequence}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAACwAiILoAAABsAAAAAAAAAALqLnLFAAAAQOcGy1wxUHU5CdDqN5pFula3BXspTmoNLq4+pSl2kFd5hnRUAOCfTnswoceQ8p1vhcULbsl20gWE3IF1AA2qUgnSh2R+AAAAQLrmJprrsJDARgt6F+EQOmZDOT32K3VLrgIRLzp7mp38sp6zoA/0T7NETjqXezwDrmYkpFpSWT1AmiUwqPEGXQ4=" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestChangeTrustMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + changeTrust := ChangeTrust{ + Line: CreditAsset{"ABCD", kp0.Address()}.MustToChangeTrustAsset(), + Limit: "10", + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAABgAAAAFBQkNEAAAAAODcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAAAX14QAAAAAAAAAAAuoucsUAAABA3nSc20C4tFs7nUZp/P4kTzpmPEHYaATNtzGcU4mOwOrxrCPJr1TpVnASi/8d3M0AhRXLa2c5tI9s79hc4/w+BNKHZH4AAABAtPLvu8OPMiaXEfDCZivyynR5Q/sFfMWwqOBIEq4wJSbzl24Dz4uqVdjlxyqKAOkdsefKINfrkcaETZrDYRU8BQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestCreateAccountMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAALqLnLFAAAAQDV8bLiIbfvgV6NtYoipI9Ja4VQmDXWw/7gT2y+wFyqJXk9XMp2ke5bgO+J6bDH8xPQFRa/lXJTmPnc0AaiFmQzSh2R+AAAAQNBEP2v1OPVYFzepAB58TCH8v+6wExgpPrLasptj2un3GyCiBcqE0VYvrj05CHEtLtcC9Rb5FrlOGG327VDyeQM=" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestCreatePassiveSellOfferMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", kp0.Address()}, + Amount: "10", + Price: xdr.Price{1, 1}, + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAABAAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAF9eEAAAAAAQAAAAEAAAAAAAAAAuoucsUAAABA0APb892L3NYP8YyXZMonoBOYMOMtUZhpjOnfSnfouxQ/otFnRss5MX/Ro6w6a1EI9f4gxRhNh6WDm+WXeVFHD9KHZH4AAABAqvvW4IA+53gcWg2DuJMUf5bS46gbnKqgG2HCGO28Jxst9gmv477IJcJ1NlIF96oQhB0rITdtW7BiP4eX/sXFBw==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestInflationMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + inflation := Inflation{ + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAACQAAAAAAAAAC6i5yxQAAAEANdI2UgZ566jUekR+rW4r3ya6KQcV2tinB9sjfSd5gRqCMYAUsgQmBHPailp5K5mVBr5m0zvizTnfj3UOGPAgD0odkfgAAAECf29QWzDc7FzBqhhC61x/G3BDOZ12vo6tOsazJyG4DETUbI/jYUsion81j9D0ELx0OAtssOsvhwX1r8MwBT4UB" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageDataMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + manageData := ManageData{ + Name: "Fruit preference", + Value: []byte("Apple"), + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAACgAAABBGcnVpdCBwcmVmZXJlbmNlAAAAAQAAAAVBcHBsZQAAAAAAAAAAAAAC6i5yxQAAAECwrVa4S7aX0RqxYYohiavPdXsBbuo7ut6aNn4I52B4ANjIEhSea0aNx9PbiMlqXJhHngcF4oZ8egIYfUf6Q54O0odkfgAAAEDkq5kiNBo0g0oKdPkRcK2WAYKo1bRBOWngnm2dykdCQhGF8MyBv6vbdVhs+f88nfAZpqiNfqz9EekEqdZA8ocK" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageOfferCreateMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "100" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01"), kp1.Address()) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAwAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAA7msoAAAAAAQAAAGQAAAAAAAAAAAAAAAAAAAAC6i5yxQAAAEAaOoWXzyhFoJqVov0wmaJ47EM/8N0wgoNkHJ9tfG/7wqujo03s07pAicyWboRCO5P0k6df3RKbaJT/crBrKnoI0odkfgAAAECwHJ6t67JJOKe7Icr30S7jZytV4Dp1bb4aNuFFuqan5b/sEWlViYO1afOPBouWwRQfJjyUWDGt5Wy+/J+MGCQN" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageOfferDeleteMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + offerID := int64(2921622) + deleteOffer, err := DeleteOfferOp(offerID, kp1.Address()) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&deleteOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAwAAAAAAAAABRkFLRQAAAABBB4BkxJWGYvNgJBoiXUo2tjgWlNmhHMMKdwGN7RSdsQAAAAAAAAAAAAAAAQAAAAEAAAAAACyUlgAAAAAAAAAC6i5yxQAAAEBaditn57uAGNhrBW+QS/G/Lg8AqB73HR4vnu6HnRKeduLCQsLOJz8BFixbuQyXDKiwrxZK+VIMLUMBazSZjKsG0odkfgAAAEC7UNgojiThuTrJlsnRVhVGnbOkCY+dUXCWyW9Jgsg3sFgaWUS5oeOSDMjEZTCaMZPMCiSuFEdkn6Jc+2jJo68O" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageOfferUpdateMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "50" + offerID := int64(2497628) + updateOffer, err := UpdateOfferOp(selling, buying, sellAmount, price.MustParse("0.02"), offerID, kp1.Address()) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&updateOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAwAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAdzWUAAAAAAQAAADIAAAAAACYcXAAAAAAAAAAC6i5yxQAAAEDhL3pD9+Veot1821y3cQuQRxYNaUJIQt+SlxySg2HV8Bm+WIx4eWpmC+/CS7a5rMLuzW6Vs9zGP628RZ/vCN4B0odkfgAAAEC1PuV3ntuZ0k20SZ1secwrZCEOysw52/1f6/Z4sx7Is53oraNuiUKnhCgR/6s/PHd5EMVlguC39Od7Tw+nfkgN" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPathPaymentMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp0.Address(), + DestAsset: NativeAsset{}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAgAAAAAAAAAABfXhAAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAAAAJiWgAAAAAEAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAAC6i5yxQAAAECmKj83TAGKOza6zjhNh510cwiAYsSE/Y1rXjcrI7tO1lXBqSYaCyVufe1KzJbEVViwf0CZOnuo8Oksy0Q18OcC0odkfgAAAEDM/Wano1U5PSolmQr9Hv4aFvheLmtpjOrR1f5LswgfR6lRoJWyvcTdGjhp60ML8JafNuHFTmJ1JFfPh38LJ0ID" + + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPaymentMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + payment := Payment{ + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10", + Asset: NativeAsset{}, + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAQAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAABfXhAAAAAAAAAAAC6i5yxQAAAEB82JGXqIIh87Wp6kb6118YjUoR/2X+RFI4Gm62+sMIF9XjlAUY6eSfdqqvLP6NQdbMazDYj6VYgKuNLQ/8hn8I0odkfgAAAEDVQumCyGwJxbNxv63X+yMa1mBTsYzilEmbDdKtQZvzF5Pu8nYXAm2AYKvlRmunmX/AXJICHQLQyPFTVj6E8oQD" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsMultSigners(t *testing.T) { + kp0 := newKeypair0() + txSourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + kp1 := newKeypair1() + + setOptions := SetOptions{ + SetFlags: []AccountFlag{AuthRequired, AuthRevocable}, + SourceAccount: kp1.Address(), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAABAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAABQAAAAAAAAAAAAAAAQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAuoucsUAAABAO1oK5K+qtaNQn/a836KapCFEFg/Unt02oFNhoTJ/Toxk++X5RgGjnUPpBywxkI04QyjDHQfIwiRnvCBnP3SED9KHZH4AAABA54vLHhDV5sodEIB5C4zOBJoR5ga+Tb1OlaSWlQX7+t9cmmhz+5TjX4PcfA8h48/LodN0u4qUoRyK0AxTfi/nDA==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +type transactionCommon interface { + Signatures() []xdr.DecoratedSignature + Hash(networkStr string) ([32]byte, error) + Base64() (string, error) + ToXDR() xdr.TransactionEnvelope +} + +func verifySignatures(t *testing.T, tx transactionCommon, signers ...*keypair.Full) { + assert.Len(t, tx.Signatures(), len(signers)) + + hash, err := tx.Hash(network.TestNetworkPassphrase) + assert.NoError(t, err) + signatures := tx.Signatures() + for i, kp := range signers { + assert.NoError(t, kp.Verify(hash[:], signatures[i].Signature)) + } +} + +func assertBase64(t *testing.T, tx transactionCommon) string { + base64, err := tx.Base64() + assert.NoError(t, err) + + envRef := tx.ToXDR() + envRefBase64, err := xdr.MarshalBase64(envRef) + assert.NoError(t, err) + + assert.Equal(t, base64, envRefBase64) + + return base64 +} + +func TestSigningImmutability(t *testing.T) { + kp0, kp1, kp2 := newKeypair0(), newKeypair1(), newKeypair2() + + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + params := TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + } + root, err := NewTransaction(params) + assert.NoError(t, err) + root, err = root.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + rootB64 := assertBase64(t, root) + rootXDR := root.ToXDR() + + left, err := root.Sign(network.TestNetworkPassphrase, kp1) + assert.NoError(t, err) + leftB64 := assertBase64(t, left) + + right, err := root.Sign(network.TestNetworkPassphrase, kp2) + assert.NoError(t, err) + rightB64 := assertBase64(t, right) + + expectedRootB64, err := newSignedTransaction( + params, network.TestNetworkPassphrase, kp0, + ) + assert.NoError(t, err) + expectedLeftB64, err := newSignedTransaction( + params, network.TestNetworkPassphrase, kp0, kp1, + ) + assert.NoError(t, err) + expectedRightB64, err := newSignedTransaction( + params, network.TestNetworkPassphrase, kp0, kp2, + ) + assert.NoError(t, err) + + assert.Equal(t, expectedRootB64, rootB64) + verifySignatures(t, root, kp0) + assert.Equal(t, expectedLeftB64, leftB64) + verifySignatures(t, left, kp0, kp1) + assert.Equal(t, expectedRightB64, rightB64) + verifySignatures(t, right, kp0, kp2) + + rootXDRB64, err := xdr.MarshalBase64(rootXDR) + if assert.NoError(t, err) { + assert.Equal(t, expectedRootB64, rootXDRB64) + } +} + +func TestFeeBumpSigningImmutability(t *testing.T) { + kp0, kp1, kp2 := newKeypair0(), newKeypair1(), newKeypair2() + + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + innerParams := TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + } + inner, err := NewTransaction(innerParams) + assert.NoError(t, err) + + params := FeeBumpTransactionParams{ + Inner: inner, + FeeAccount: kp1.Address(), + BaseFee: MinBaseFee, + } + root, err := NewFeeBumpTransaction(params) + assert.NoError(t, err) + root, err = root.Sign(network.TestNetworkPassphrase, kp1) + assert.NoError(t, err) + rootB64 := assertBase64(t, root) + + left, err := root.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + leftB64 := assertBase64(t, left) + + right, err := root.Sign(network.TestNetworkPassphrase, kp2) + assert.NoError(t, err) + rightB64 := assertBase64(t, right) + + expectedRootB64, err := newSignedFeeBumpTransaction( + params, network.TestNetworkPassphrase, kp1, + ) + assert.NoError(t, err) + + expectedLeftB64, err := newSignedFeeBumpTransaction( + params, network.TestNetworkPassphrase, kp1, kp0, + ) + assert.NoError(t, err) + expectedRightB64, err := newSignedFeeBumpTransaction( + params, network.TestNetworkPassphrase, kp1, kp2, + ) + assert.NoError(t, err) + + assert.Equal(t, expectedRootB64, rootB64) + verifySignatures(t, root, kp1) + assert.Equal(t, expectedLeftB64, leftB64) + verifySignatures(t, left, kp1, kp0) + assert.Equal(t, expectedRightB64, rightB64) + verifySignatures(t, right, kp1, kp2) +} diff --git a/txnbuild/simple_account.go b/txnbuild/simple_account.go new file mode 100644 index 0000000000..0c44b03ee6 --- /dev/null +++ b/txnbuild/simple_account.go @@ -0,0 +1,32 @@ +package txnbuild + +// SimpleAccount is a minimal implementation of an Account. +type SimpleAccount struct { + AccountID string + Sequence int64 +} + +// GetAccountID returns the Account ID. +func (sa *SimpleAccount) GetAccountID() string { + return sa.AccountID +} + +// IncrementSequenceNumber increments the internal record of the +// account's sequence number by 1. +func (sa *SimpleAccount) IncrementSequenceNumber() (int64, error) { + sa.Sequence++ + return sa.Sequence, nil +} + +// GetSequenceNumber returns the sequence number of the account. +func (sa *SimpleAccount) GetSequenceNumber() (int64, error) { + return sa.Sequence, nil +} + +// NewSimpleAccount is a factory method that creates a SimpleAccount from "accountID" and "sequence". +func NewSimpleAccount(accountID string, sequence int64) SimpleAccount { + return SimpleAccount{accountID, sequence} +} + +// ensure that SimpleAccount implements Account interface. +var _ Account = &SimpleAccount{} diff --git a/txnbuild/timebounds.go b/txnbuild/timebounds.go new file mode 100644 index 0000000000..ce1a0b66e7 --- /dev/null +++ b/txnbuild/timebounds.go @@ -0,0 +1,71 @@ +package txnbuild + +import ( + "errors" + "time" +) + +// TimeoutInfinite allows an indefinite upper bound to be set for Transaction.MaxTime. This is usually not +// what you want. +const TimeoutInfinite = int64(0) + +// Timebounds represents the time window during which a Stellar transaction is considered valid. +// +// MinTime and MaxTime represent Stellar timebounds - a window of time over which the Transaction will be +// considered valid. In general, almost all Transactions benefit from setting an upper timebound, because once submitted, +// the status of a pending Transaction may remain unresolved for a long time if the network is congested. +// With an upper timebound, the submitter has a guaranteed time at which the Transaction is known to have either +// succeeded or failed, and can then take appropriate action (e.g. to resubmit or mark as resolved). +// +// Create a Timebounds struct using one of NewTimebounds(), NewTimeout(), or NewInfiniteTimeout(). +type Timebounds struct { + MinTime int64 + MaxTime int64 + wasBuilt bool +} + +// Validate for Timebounds sanity-checks the configured Timebound limits, and confirms the object was built +// using a factory method. This is done to ensure that default Timebound structs (which have no limits) are not +// valid - you must explicitly specifiy the Timebound you require. +func (tb *Timebounds) Validate() error { + if !tb.wasBuilt { + return errors.New("timebounds must be constructed using NewTimebounds(), NewTimeout(), or NewInfiniteTimeout()") + } + if tb.MinTime < 0 { + return errors.New("invalid timebound: minTime cannot be negative") + } + + if tb.MaxTime < 0 { + return errors.New("invalid timebound: maxTime cannot be negative") + } + + if tb.MaxTime != TimeoutInfinite { + if tb.MaxTime < tb.MinTime { + return errors.New("invalid timebound: maxTime < minTime") + } + } + + return nil +} + +// NewTimebounds is a factory method that constructs a Timebounds object from a min and max time. +// A Transaction cannot be built unless a Timebounds object is provided through a factory method. +func NewTimebounds(minTime, maxTime int64) Timebounds { + return Timebounds{minTime, maxTime, true} +} + +// NewTimeout is a factory method that sets the MaxTime to be the duration in seconds in the +// future specified by 'timeout'. +// A Transaction cannot be built unless a Timebounds object is provided through a factory method. +// This method uses the provided system time - make sure it is accurate. +func NewTimeout(timeout int64) Timebounds { + return Timebounds{0, time.Now().UTC().Unix() + timeout, true} +} + +// NewInfiniteTimeout is a factory method that sets the MaxTime to a value representing an indefinite +// upper time bound. This is rarely needed, but is helpful for certain smart contracts, and for +// deterministic testing. A Transaction cannot be built unless a Timebounds object is provided through +// a factory method. +func NewInfiniteTimeout() Timebounds { + return Timebounds{0, TimeoutInfinite, true} +} diff --git a/txnbuild/timebounds_test.go b/txnbuild/timebounds_test.go new file mode 100644 index 0000000000..b852242e66 --- /dev/null +++ b/txnbuild/timebounds_test.go @@ -0,0 +1,51 @@ +package txnbuild + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTimeboundsRequireConstructor(t *testing.T) { + tb := Timebounds{MinTime: -1, MaxTime: 300} + err := tb.Validate() + expectedErrMsg := "timebounds must be constructed using NewTimebounds(), NewTimeout(), or NewInfiniteTimeout()" + + require.EqualError(t, err, expectedErrMsg, "Default timebounds not allowed") +} + +func TestSetTimeboundsNegativeMinTime(t *testing.T) { + tb := NewTimebounds(-1, 300) + err := tb.Validate() + expectedErrMsg := "invalid timebound: minTime cannot be negative" + + require.EqualError(t, err, expectedErrMsg, "No negative minTime allowed") +} + +func TestSetTimeboundsNegativeMaxTime(t *testing.T) { + tb := NewTimebounds(1, -300) + err := tb.Validate() + expectedErrMsg := "invalid timebound: maxTime cannot be negative" + + require.EqualError(t, err, expectedErrMsg, "No negative maxTime allowed") +} + +func TestSetTimeoutNegativeWidth(t *testing.T) { + tb := NewTimeout(300) + tb.MinTime = 5555624032 // Sometime in 2146 + err := tb.Validate() + expectedErrMsg := "invalid timebound: maxTime < minTime" + + require.EqualError(t, err, expectedErrMsg, "No negative width windows") +} + +func TestSetTimeout(t *testing.T) { + tb := NewTimeout(300) + tb.MinTime = 1 + err := tb.Validate() + if assert.NoError(t, err) { + assert.Equal(t, int64(1), tb.MinTime) + assert.NotNil(t, tb.MaxTime) + } +} diff --git a/txnbuild/transaction.go b/txnbuild/transaction.go new file mode 100644 index 0000000000..f73a8f3d56 --- /dev/null +++ b/txnbuild/transaction.go @@ -0,0 +1,1403 @@ +/* +Package txnbuild implements transactions and operations on the Stellar network. +This library provides an interface to the Stellar transaction model. It supports the building of Go applications on +top of the Stellar network (https://www.stellar.org/). Transactions constructed by this library may be submitted +to any Horizon instance for processing onto the ledger, using any Stellar SDK client. The recommended client for Go +programmers is horizonclient (https://github.com/stellar/go/tree/master/clients/horizonclient). Together, these two +libraries provide a complete Stellar SDK. +For more information and further examples, see https://github.com/stellar/go/blob/master/docs/reference/readme.md +*/ +package txnbuild + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "math" + "math/bits" + "strings" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// MinBaseFee is the minimum transaction fee for the Stellar network of 100 stroops (0.00001 XLM). +const MinBaseFee = 100 + +// Account represents the aspects of a Stellar account necessary to construct transactions. See +// https://developers.stellar.org/docs/glossary/accounts/ +type Account interface { + GetAccountID() string + IncrementSequenceNumber() (int64, error) + GetSequenceNumber() (int64, error) +} + +func hashHex(e xdr.TransactionEnvelope, networkStr string) (string, error) { + h, err := network.HashTransactionInEnvelope(e, networkStr) + if err != nil { + return "", err + } + return hex.EncodeToString(h[:]), nil +} + +func concatSignatures( + e xdr.TransactionEnvelope, + networkStr string, + signatures []xdr.DecoratedSignature, + kps ...*keypair.Full, +) ([]xdr.DecoratedSignature, error) { + // Hash the transaction + h, err := network.HashTransactionInEnvelope(e, networkStr) + if err != nil { + return nil, errors.Wrap(err, "failed to hash transaction") + } + + extended := make( + []xdr.DecoratedSignature, + len(signatures), + len(signatures)+len(kps), + ) + copy(extended, signatures) + // Sign the hash + for _, kp := range kps { + sig, err := kp.SignDecorated(h[:]) + if err != nil { + return nil, errors.Wrap(err, "failed to sign transaction") + } + extended = append(extended, sig) + } + return extended, nil +} + +func concatSignatureDecorated(e xdr.TransactionEnvelope, signatures []xdr.DecoratedSignature, newSignatures []xdr.DecoratedSignature) ([]xdr.DecoratedSignature, error) { + extended := make([]xdr.DecoratedSignature, len(signatures)+len(newSignatures)) + copy(extended, signatures) + copy(extended[len(signatures):], newSignatures) + return extended, nil +} + +func concatSignatureBase64(e xdr.TransactionEnvelope, signatures []xdr.DecoratedSignature, networkStr, publicKey, signature string) ([]xdr.DecoratedSignature, error) { + if signature == "" { + return nil, errors.New("signature not presented") + } + + kp, err := keypair.ParseAddress(publicKey) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse the public key %s", publicKey) + } + + sigBytes, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return nil, errors.Wrapf(err, "failed to base64-decode the signature %s", signature) + } + + h, err := network.HashTransactionInEnvelope(e, networkStr) + if err != nil { + return nil, errors.Wrap(err, "failed to hash transaction") + } + + err = kp.Verify(h[:], sigBytes) + if err != nil { + return nil, errors.Wrap(err, "failed to verify the signature") + } + + extended := make([]xdr.DecoratedSignature, len(signatures), len(signatures)+1) + copy(extended, signatures) + extended = append(extended, xdr.DecoratedSignature{ + Hint: xdr.SignatureHint(kp.Hint()), + Signature: xdr.Signature(sigBytes), + }) + + return extended, nil +} + +func stringsToKP(keys ...string) ([]*keypair.Full, error) { + var signers []*keypair.Full + for _, k := range keys { + kp, err := keypair.Parse(k) + if err != nil { + return nil, errors.Wrapf(err, "provided string %s is not a valid Stellar key", k) + } + kpf, ok := kp.(*keypair.Full) + if !ok { + return nil, errors.New("provided string %s is not a valid Stellar secret key") + } + signers = append(signers, kpf) + } + + return signers, nil +} + +func concatHashX(signatures []xdr.DecoratedSignature, preimage []byte) ([]xdr.DecoratedSignature, error) { + if maxSize := xdr.Signature(preimage).XDRMaxSize(); len(preimage) > maxSize { + return nil, errors.Errorf( + "preimage cannnot be more than %d bytes", maxSize, + ) + } + extended := make( + []xdr.DecoratedSignature, + len(signatures), + len(signatures)+1, + ) + copy(extended, signatures) + + preimageHash := sha256.Sum256(preimage) + var hint [4]byte + // copy the last 4-bytes of the signer public key to be used as hint + copy(hint[:], preimageHash[28:]) + + sig := xdr.DecoratedSignature{ + Hint: xdr.SignatureHint(hint), + Signature: xdr.Signature(preimage), + } + return append(extended, sig), nil +} + +func marshallBinary(e xdr.TransactionEnvelope, signatures []xdr.DecoratedSignature) ([]byte, error) { + switch e.Type { + case xdr.EnvelopeTypeEnvelopeTypeTx: + e.V1.Signatures = signatures + case xdr.EnvelopeTypeEnvelopeTypeTxV0: + e.V0.Signatures = signatures + case xdr.EnvelopeTypeEnvelopeTypeTxFeeBump: + e.FeeBump.Signatures = signatures + default: + panic("invalid transaction type: " + e.Type.String()) + } + + var txBytes bytes.Buffer + _, err := xdr.Marshal(&txBytes, e) + if err != nil { + return nil, err + } + return txBytes.Bytes(), nil +} + +func marshallBase64(e xdr.TransactionEnvelope, signatures []xdr.DecoratedSignature) (string, error) { + binary, err := marshallBinary(e, signatures) + if err != nil { + return "", errors.Wrap(err, "failed to get XDR bytestring") + } + + return base64.StdEncoding.EncodeToString(binary), nil +} + +func marshallBase64Bytes(e xdr.TransactionEnvelope, signatures []xdr.DecoratedSignature) ([]byte, error) { + binary, err := marshallBinary(e, signatures) + if err != nil { + return nil, errors.Wrap(err, "failed to get XDR bytestring") + } + + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(binary))) + base64.StdEncoding.Encode(encoded, binary) + return encoded, nil +} + +// Transaction represents a Stellar transaction. See +// https://developers.stellar.org/docs/glossary/transactions/ +// A Transaction may be wrapped by a FeeBumpTransaction in which case +// the account authorizing the FeeBumpTransaction will pay for the transaction fees +// instead of the Transaction's source account. +type Transaction struct { + envelope xdr.TransactionEnvelope + baseFee int64 + maxFee int64 + sourceAccount SimpleAccount + operations []Operation + memo Memo + timebounds Timebounds +} + +// BaseFee returns the per operation fee for this transaction. +func (t *Transaction) BaseFee() int64 { + return t.baseFee +} + +// MaxFee returns the total fees which can be spent to submit this transaction. +func (t *Transaction) MaxFee() int64 { + return t.maxFee +} + +// SourceAccount returns the account which is originating this account. +func (t *Transaction) SourceAccount() SimpleAccount { + return t.sourceAccount +} + +// SequenceNumber returns the sequence number of the transaction. +func (t *Transaction) SequenceNumber() int64 { + return t.sourceAccount.Sequence +} + +// Memo returns the memo configured for this transaction. +func (t *Transaction) Memo() Memo { + return t.memo +} + +// Timebounds returns the Timebounds configured for this transaction. +func (t *Transaction) Timebounds() Timebounds { + return t.timebounds +} + +// Operations returns the list of operations included in this transaction. +// The contents of the returned slice should not be modified. +func (t *Transaction) Operations() []Operation { + return t.operations +} + +// Signatures returns the list of signatures attached to this transaction. +// The contents of the returned slice should not be modified. +func (t *Transaction) Signatures() []xdr.DecoratedSignature { + return t.envelope.Signatures() +} + +// Hash returns the network specific hash of this transaction +// encoded as a byte array. +func (t *Transaction) Hash(networkStr string) ([32]byte, error) { + return network.HashTransactionInEnvelope(t.envelope, networkStr) +} + +// HashHex returns the network specific hash of this transaction +// encoded as a hexadecimal string. +func (t *Transaction) HashHex(network string) (string, error) { + return hashHex(t.envelope, network) +} + +func (t *Transaction) clone(signatures []xdr.DecoratedSignature) *Transaction { + newTx := new(Transaction) + *newTx = *t + newTx.envelope = t.envelope + + switch newTx.envelope.Type { + case xdr.EnvelopeTypeEnvelopeTypeTx: + newTx.envelope.V1 = new(xdr.TransactionV1Envelope) + *newTx.envelope.V1 = *t.envelope.V1 + newTx.envelope.V1.Signatures = signatures + case xdr.EnvelopeTypeEnvelopeTypeTxV0: + newTx.envelope.V0 = new(xdr.TransactionV0Envelope) + *newTx.envelope.V0 = *t.envelope.V0 + newTx.envelope.V0.Signatures = signatures + default: + panic("invalid transaction type: " + newTx.envelope.Type.String()) + } + + return newTx +} + +// Sign returns a new Transaction instance which extends the current instance +// with additional signatures derived from the given list of keypair instances. +func (t *Transaction) Sign(network string, kps ...*keypair.Full) (*Transaction, error) { + extendedSignatures, err := concatSignatures(t.envelope, network, t.Signatures(), kps...) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// SignWithKeyString returns a new Transaction instance which extends the current instance +// with additional signatures derived from the given list of private key strings. +func (t *Transaction) SignWithKeyString(network string, keys ...string) (*Transaction, error) { + kps, err := stringsToKP(keys...) + if err != nil { + return nil, err + } + return t.Sign(network, kps...) +} + +// SignHashX returns a new Transaction instance which extends the current instance +// with HashX signature type. +// See description here: https://developers.stellar.org/docs/glossary/multisig/#hashx +func (t *Transaction) SignHashX(preimage []byte) (*Transaction, error) { + extendedSignatures, err := concatHashX(t.Signatures(), preimage) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// ClearSignatures returns a new Transaction instance which extends the current instance +// with signatures removed. +func (t *Transaction) ClearSignatures() (*Transaction, error) { + return t.clone(nil), nil +} + +// AddSignatureDecorated returns a new Transaction instance which extends the current instance +// with an additional decorated signature(s). +func (t *Transaction) AddSignatureDecorated(signature ...xdr.DecoratedSignature) (*Transaction, error) { + extendedSignatures, err := concatSignatureDecorated(t.envelope, t.Signatures(), signature) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// AddSignatureBase64 returns a new Transaction instance which extends the current instance +// with an additional signature derived from the given base64-encoded signature. +func (t *Transaction) AddSignatureBase64(network, publicKey, signature string) (*Transaction, error) { + extendedSignatures, err := concatSignatureBase64(t.envelope, t.Signatures(), network, publicKey, signature) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// ToXDR returns the a xdr.TransactionEnvelope which is equivalent to this transaction. +// The envelope should not be modified because any changes applied may +// affect the internals of the Transaction instance. +func (t *Transaction) ToXDR() xdr.TransactionEnvelope { + return t.envelope +} + +// MarshalBinary returns the binary XDR representation of the transaction envelope. +func (t *Transaction) MarshalBinary() ([]byte, error) { + return marshallBinary(t.envelope, t.Signatures()) +} + +// MarshalText returns the base64 XDR representation of the transaction envelope. +func (t *Transaction) MarshalText() ([]byte, error) { + return marshallBase64Bytes(t.envelope, t.Signatures()) +} + +// UnmarshalText consumes into the value the base64 XDR representation of the +// transaction envelope. +func (t *Transaction) UnmarshalText(b []byte) error { + gtx, err := TransactionFromXDR(string(b)) + if err != nil { + return err + } + tx, ok := gtx.Transaction() + if !ok { + return errors.New("transaction envelope unmarshaled into FeeBumpTransaction is not a fee bump transaction") + } + *t = *tx + return nil +} + +// Base64 returns the base 64 XDR representation of the transaction envelope. +func (t *Transaction) Base64() (string, error) { + return marshallBase64(t.envelope, t.Signatures()) +} + +// ToGenericTransaction creates a GenericTransaction containing the Transaction. +func (t *Transaction) ToGenericTransaction() *GenericTransaction { + return &GenericTransaction{simple: t} +} + +// ClaimableBalanceID returns the claimable balance ID for the operation at the given index within the transaction. +// given index (which should be a `CreateClaimableBalance` operation). +func (t *Transaction) ClaimableBalanceID(operationIndex int) (string, error) { + if operationIndex < 0 || operationIndex >= len(t.operations) { + return "", errors.New("invalid operation index") + } + + if _, ok := t.operations[operationIndex].(*CreateClaimableBalance); !ok { + return "", errors.New("operation is not CreateClaimableBalance") + } + + // We mimic the relevant code from Stellar Core + // https://github.com/stellar/stellar-core/blob/9f3cc04e6ec02c38974c42545a86cdc79809252b/src/test/TestAccount.cpp#L285 + // + // Note that the source account must be *unmuxed* for this to work. + muxedAccountId := xdr.MustMuxedAddress(t.sourceAccount.AccountID).ToAccountId() + operationId := xdr.HashIdPreimage{ + Type: xdr.EnvelopeTypeEnvelopeTypeOpId, + OperationId: &xdr.HashIdPreimageOperationId{ + SourceAccount: muxedAccountId, + SeqNum: xdr.SequenceNumber(t.sourceAccount.Sequence), + OpNum: xdr.Uint32(operationIndex), + }, + } + + binaryDump, err := operationId.MarshalBinary() + if err != nil { + return "", errors.Wrap(err, "invalid claimable balance operation") + } + + hash := sha256.Sum256(binaryDump) + balanceIdXdr, err := xdr.NewClaimableBalanceId( + // TODO: look into whether this be determined programmatically from the operation structure. + xdr.ClaimableBalanceIdTypeClaimableBalanceIdTypeV0, + xdr.Hash(hash)) + if err != nil { + return "", errors.Wrap(err, "unable to parse balance ID as XDR") + } + + balanceIdHex, err := xdr.MarshalHex(balanceIdXdr) + if err != nil { + return "", errors.Wrap(err, "unable to encode balance ID as hex") + } + + return balanceIdHex, nil +} + +// FeeBumpTransaction represents a CAP 15 fee bump transaction. +// Fee bump transactions allow an arbitrary account to pay the fee for a transaction. +type FeeBumpTransaction struct { + envelope xdr.TransactionEnvelope + baseFee int64 + maxFee int64 + feeAccount string + inner *Transaction +} + +// BaseFee returns the per operation fee for this transaction. +func (t *FeeBumpTransaction) BaseFee() int64 { + return t.baseFee +} + +// MaxFee returns the total fees which can be spent to submit this transaction. +func (t *FeeBumpTransaction) MaxFee() int64 { + return t.maxFee +} + +// FeeAccount returns the address of the account which will be paying for the inner transaction. +func (t *FeeBumpTransaction) FeeAccount() string { + return t.feeAccount +} + +// Signatures returns the list of signatures attached to this transaction. +// The contents of the returned slice should not be modified. +func (t *FeeBumpTransaction) Signatures() []xdr.DecoratedSignature { + return t.envelope.FeeBumpSignatures() +} + +// Hash returns the network specific hash of this transaction +// encoded as a byte array. +func (t *FeeBumpTransaction) Hash(networkStr string) ([32]byte, error) { + return network.HashTransactionInEnvelope(t.envelope, networkStr) +} + +// HashHex returns the network specific hash of this transaction +// encoded as a hexadecimal string. +func (t *FeeBumpTransaction) HashHex(network string) (string, error) { + return hashHex(t.envelope, network) +} + +func (t *FeeBumpTransaction) clone(signatures []xdr.DecoratedSignature) *FeeBumpTransaction { + newTx := new(FeeBumpTransaction) + *newTx = *t + newTx.envelope.FeeBump = new(xdr.FeeBumpTransactionEnvelope) + *newTx.envelope.FeeBump = *t.envelope.FeeBump + newTx.envelope.FeeBump.Signatures = signatures + return newTx +} + +// Sign returns a new FeeBumpTransaction instance which extends the current instance +// with additional signatures derived from the given list of keypair instances. +func (t *FeeBumpTransaction) Sign(network string, kps ...*keypair.Full) (*FeeBumpTransaction, error) { + extendedSignatures, err := concatSignatures(t.envelope, network, t.Signatures(), kps...) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// SignWithKeyString returns a new FeeBumpTransaction instance which extends the current instance +// with additional signatures derived from the given list of private key strings. +func (t *FeeBumpTransaction) SignWithKeyString(network string, keys ...string) (*FeeBumpTransaction, error) { + kps, err := stringsToKP(keys...) + if err != nil { + return nil, err + } + return t.Sign(network, kps...) +} + +// SignHashX returns a new FeeBumpTransaction instance which extends the current instance +// with HashX signature type. +// See description here: https://developers.stellar.org/docs/glossary/multisig/#hashx +func (t *FeeBumpTransaction) SignHashX(preimage []byte) (*FeeBumpTransaction, error) { + extendedSignatures, err := concatHashX(t.Signatures(), preimage) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// ClearSignatures returns a new Transaction instance which extends the current instance +// with signatures removed. +func (t *FeeBumpTransaction) ClearSignatures() (*FeeBumpTransaction, error) { + return t.clone(nil), nil +} + +// AddSignatureDecorated returns a new FeeBumpTransaction instance which extends the current instance +// with an additional decorated signature(s). +func (t *FeeBumpTransaction) AddSignatureDecorated(signature ...xdr.DecoratedSignature) (*FeeBumpTransaction, error) { + extendedSignatures, err := concatSignatureDecorated(t.envelope, t.Signatures(), signature) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// AddSignatureBase64 returns a new FeeBumpTransaction instance which extends the current instance +// with an additional signature derived from the given base64-encoded signature. +func (t *FeeBumpTransaction) AddSignatureBase64(network, publicKey, signature string) (*FeeBumpTransaction, error) { + extendedSignatures, err := concatSignatureBase64(t.envelope, t.Signatures(), network, publicKey, signature) + if err != nil { + return nil, err + } + + return t.clone(extendedSignatures), nil +} + +// ToXDR returns the a xdr.TransactionEnvelope which is equivalent to this transaction. +// The envelope should not be modified because any changes applied may +// affect the internals of the FeeBumpTransaction instance. +func (t *FeeBumpTransaction) ToXDR() xdr.TransactionEnvelope { + return t.envelope +} + +// MarshalBinary returns the binary XDR representation of the transaction envelope. +func (t *FeeBumpTransaction) MarshalBinary() ([]byte, error) { + return marshallBinary(t.envelope, t.Signatures()) +} + +// MarshalText returns the base64 XDR representation of the transaction +// envelope. +func (t *FeeBumpTransaction) MarshalText() ([]byte, error) { + return marshallBase64Bytes(t.envelope, t.Signatures()) +} + +// UnmarshalText consumes into the value the base64 XDR representation of the +// transaction envelope. +func (t *FeeBumpTransaction) UnmarshalText(b []byte) error { + gtx, err := TransactionFromXDR(string(b)) + if err != nil { + return err + } + fbtx, ok := gtx.FeeBump() + if !ok { + return errors.New("transaction envelope unmarshaled into Transaction is not a transaction") + } + *t = *fbtx + return nil +} + +// Base64 returns the base 64 XDR representation of the transaction envelope. +func (t *FeeBumpTransaction) Base64() (string, error) { + return marshallBase64(t.envelope, t.Signatures()) +} + +// ToGenericTransaction creates a GenericTransaction containing the +// FeeBumpTransaction. +func (t *FeeBumpTransaction) ToGenericTransaction() *GenericTransaction { + return &GenericTransaction{feeBump: t} +} + +// InnerTransaction returns the Transaction which is wrapped by +// this FeeBumpTransaction instance. +func (t *FeeBumpTransaction) InnerTransaction() *Transaction { + innerCopy := new(Transaction) + *innerCopy = *t.inner + return innerCopy +} + +// GenericTransaction represents a parsed transaction envelope returned by TransactionFromXDR. +// A GenericTransaction can be either a Transaction or a FeeBumpTransaction. +type GenericTransaction struct { + simple *Transaction + feeBump *FeeBumpTransaction +} + +// Transaction unpacks the GenericTransaction instance into a Transaction. +// The function also returns a boolean which is true if the GenericTransaction can be +// unpacked into a Transaction. +func (t GenericTransaction) Transaction() (*Transaction, bool) { + return t.simple, t.simple != nil +} + +// FeeBump unpacks the GenericTransaction instance into a FeeBumpTransaction. +// The function also returns a boolean which is true if the GenericTransaction +// can be unpacked into a FeeBumpTransaction. +func (t GenericTransaction) FeeBump() (*FeeBumpTransaction, bool) { + return t.feeBump, t.feeBump != nil +} + +// ToXDR returns the a xdr.TransactionEnvelope which is equivalent to this +// transaction. The envelope should not be modified because any changes applied +// may affect the internals of the GenericTransaction. +func (t *GenericTransaction) ToXDR() (xdr.TransactionEnvelope, error) { + if tx, ok := t.Transaction(); ok { + return tx.envelope, nil + } + if fbtx, ok := t.FeeBump(); ok { + return fbtx.envelope, nil + } + return xdr.TransactionEnvelope{}, fmt.Errorf("unable to get xdr of empty GenericTransaction") +} + +// Hash returns the network specific hash of this transaction +// encoded as a byte array. +func (t GenericTransaction) Hash(networkStr string) ([32]byte, error) { + if tx, ok := t.Transaction(); ok { + return tx.Hash(networkStr) + } + if fbtx, ok := t.FeeBump(); ok { + return fbtx.Hash(networkStr) + } + return [32]byte{}, fmt.Errorf("unable to get hash of empty GenericTransaction") +} + +// HashHex returns the network specific hash of this transaction +// encoded as a hexadecimal string. +func (t GenericTransaction) HashHex(network string) (string, error) { + if tx, ok := t.Transaction(); ok { + return tx.HashHex(network) + } + if fbtx, ok := t.FeeBump(); ok { + return fbtx.HashHex(network) + } + return "", fmt.Errorf("unable to get hash of empty GenericTransaction") +} + +// MarshalText returns the base64 XDR representation of the transaction +// envelope. +func (t *GenericTransaction) MarshalText() ([]byte, error) { + if tx, ok := t.Transaction(); ok { + return tx.MarshalText() + } + if fbtx, ok := t.FeeBump(); ok { + return fbtx.MarshalText() + } + return nil, errors.New("unable to marshal empty GenericTransaction") +} + +// UnmarshalText consumes into the value the base64 XDR representation of the +// transaction envelope. +func (t *GenericTransaction) UnmarshalText(b []byte) error { + gtx, err := TransactionFromXDR(string(b)) + if err != nil { + return err + } + *t = *gtx + return nil +} + +// TransactionFromXDR parses the supplied transaction envelope in base64 XDR +// and returns a GenericTransaction instance. +func TransactionFromXDR(txeB64 string) (*GenericTransaction, error) { + var xdrEnv xdr.TransactionEnvelope + err := xdr.SafeUnmarshalBase64(txeB64, &xdrEnv) + if err != nil { + return nil, errors.Wrap(err, "unable to unmarshal transaction envelope") + } + return transactionFromParsedXDR(xdrEnv) +} + +func transactionFromParsedXDR(xdrEnv xdr.TransactionEnvelope) (*GenericTransaction, error) { + var err error + newTx := &GenericTransaction{} + + if xdrEnv.IsFeeBump() { + var innerTx *GenericTransaction + innerTx, err = transactionFromParsedXDR(xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: xdrEnv.FeeBump.Tx.InnerTx.V1, + }) + if err != nil { + return newTx, errors.New("could not parse inner transaction") + } + feeBumpAccount := xdrEnv.FeeBumpAccount() + feeAccount := feeBumpAccount.Address() + + newTx.feeBump = &FeeBumpTransaction{ + envelope: xdrEnv, + // A fee-bump transaction has an effective number of operations equal to one plus the + // number of operations in the inner transaction. Correspondingly, the minimum fee for + // the fee-bump transaction is one base fee more than the minimum fee for the inner + // transaction. + baseFee: xdrEnv.FeeBumpFee() / int64(len(innerTx.simple.operations)+1), + maxFee: xdrEnv.FeeBumpFee(), + inner: innerTx.simple, + feeAccount: feeAccount, + } + + return newTx, nil + } + sourceAccount := xdrEnv.SourceAccount() + accountID := sourceAccount.Address() + + totalFee := int64(xdrEnv.Fee()) + baseFee := totalFee + if count := int64(len(xdrEnv.Operations())); count > 0 { + baseFee = baseFee / count + } + + newTx.simple = &Transaction{ + envelope: xdrEnv, + baseFee: baseFee, + maxFee: totalFee, + sourceAccount: SimpleAccount{ + AccountID: accountID, + Sequence: xdrEnv.SeqNum(), + }, + operations: nil, + memo: nil, + timebounds: Timebounds{}, + } + + if timeBounds := xdrEnv.TimeBounds(); timeBounds != nil { + newTx.simple.timebounds = NewTimebounds(int64(timeBounds.MinTime), int64(timeBounds.MaxTime)) + } + + newTx.simple.memo, err = memoFromXDR(xdrEnv.Memo()) + if err != nil { + return nil, errors.Wrap(err, "unable to parse memo") + } + + operations := xdrEnv.Operations() + for _, op := range operations { + newOp, err := operationFromXDR(op) + if err != nil { + return nil, err + } + newTx.simple.operations = append(newTx.simple.operations, newOp) + } + + return newTx, nil +} + +// TransactionParams is a container for parameters +// which are used to construct new Transaction instances +type TransactionParams struct { + SourceAccount Account + IncrementSequenceNum bool + Operations []Operation + BaseFee int64 + Memo Memo + Timebounds Timebounds +} + +// NewTransaction returns a new Transaction instance +func NewTransaction(params TransactionParams) (*Transaction, error) { + var sequence int64 + var err error + + if params.SourceAccount == nil { + return nil, errors.New("transaction has no source account") + } + + if params.IncrementSequenceNum { + sequence, err = params.SourceAccount.IncrementSequenceNumber() + } else { + sequence, err = params.SourceAccount.GetSequenceNumber() + } + if err != nil { + return nil, errors.Wrap(err, "could not obtain account sequence") + } + + tx := &Transaction{ + baseFee: params.BaseFee, + sourceAccount: SimpleAccount{ + AccountID: params.SourceAccount.GetAccountID(), + Sequence: sequence, + }, + operations: params.Operations, + memo: params.Memo, + timebounds: params.Timebounds, + } + var sourceAccount xdr.MuxedAccount + if err = sourceAccount.SetAddress(tx.sourceAccount.AccountID); err != nil { + return nil, errors.Wrap(err, "account id is not valid") + } + if tx.baseFee < 0 { + return nil, errors.Errorf("base fee cannot be negative") + } + + if len(tx.operations) == 0 { + return nil, errors.New("transaction has no operations") + } + + // check if maxFee fits in a uint32 + // 64 bit fees are only available in fee bump transactions + // if maxFee is negative then there must have been an int overflow + hi, lo := bits.Mul64(uint64(params.BaseFee), uint64(len(params.Operations))) + if hi > 0 || lo > math.MaxUint32 { + return nil, errors.Errorf("base fee %d results in an overflow of max fee", params.BaseFee) + } + tx.maxFee = int64(lo) + + // Check and set the timebounds + err = tx.timebounds.Validate() + if err != nil { + return nil, errors.Wrap(err, "invalid time bounds") + } + + envelope := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: sourceAccount, + Fee: xdr.Uint32(tx.maxFee), + SeqNum: xdr.SequenceNumber(sequence), + TimeBounds: &xdr.TimeBounds{ + MinTime: xdr.TimePoint(tx.timebounds.MinTime), + MaxTime: xdr.TimePoint(tx.timebounds.MaxTime), + }, + }, + Signatures: nil, + }, + } + + // Handle the memo, if one is present + if tx.memo != nil { + xdrMemo, err := tx.memo.ToXDR() + if err != nil { + return nil, errors.Wrap(err, "couldn't build memo XDR") + } + envelope.V1.Tx.Memo = xdrMemo + } + + for _, op := range tx.operations { + if verr := op.Validate(); verr != nil { + return nil, errors.Wrap(verr, fmt.Sprintf("validation failed for %T operation", op)) + } + xdrOperation, err2 := op.BuildXDR() + if err2 != nil { + return nil, errors.Wrap(err2, fmt.Sprintf("failed to build operation %T", op)) + } + envelope.V1.Tx.Operations = append(envelope.V1.Tx.Operations, xdrOperation) + } + + tx.envelope = envelope + return tx, nil +} + +// FeeBumpTransactionParams is a container for parameters +// which are used to construct new FeeBumpTransaction instances +type FeeBumpTransactionParams struct { + Inner *Transaction + FeeAccount string + BaseFee int64 +} + +func convertToV1(tx *Transaction) (*Transaction, error) { + sourceAccount := tx.SourceAccount() + signatures := tx.Signatures() + tx, err := NewTransaction(TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: tx.Operations(), + BaseFee: tx.BaseFee(), + Memo: tx.Memo(), + Timebounds: tx.Timebounds(), + }) + if err != nil { + return tx, err + } + tx.envelope.V1.Signatures = signatures + return tx, nil +} + +// NewFeeBumpTransaction returns a new FeeBumpTransaction instance +func NewFeeBumpTransaction(params FeeBumpTransactionParams) (*FeeBumpTransaction, error) { + inner := params.Inner + if inner == nil { + return nil, errors.New("inner transaction is missing") + } + switch inner.envelope.Type { + case xdr.EnvelopeTypeEnvelopeTypeTx, xdr.EnvelopeTypeEnvelopeTypeTxV0: + default: + return nil, errors.Errorf("%s transactions cannot be fee bumped", inner.envelope.Type) + } + + innerEnv := inner.ToXDR() + if innerEnv.Type == xdr.EnvelopeTypeEnvelopeTypeTxV0 { + var err error + inner, err = convertToV1(inner) + if err != nil { + return nil, errors.Wrap(err, "could not upgrade transaction from v0 to v1") + } + } else if innerEnv.Type != xdr.EnvelopeTypeEnvelopeTypeTx { + return nil, errors.Errorf("%v transactions cannot be fee bumped", innerEnv.Type.String()) + } + + tx := &FeeBumpTransaction{ + baseFee: params.BaseFee, + // A fee-bump transaction has an effective number of operations equal to one plus the + // number of operations in the inner transaction. Correspondingly, the minimum fee for + // the fee-bump transaction is one base fee more than the minimum fee for the inner + // transaction. + maxFee: params.BaseFee * int64(len(inner.operations)+1), + feeAccount: params.FeeAccount, + inner: new(Transaction), + } + *tx.inner = *inner + + hi, lo := bits.Mul64(uint64(params.BaseFee), uint64(len(inner.operations)+1)) + if hi > 0 || lo > math.MaxInt64 { + return nil, errors.Errorf("base fee %d results in an overflow of max fee", params.BaseFee) + } + tx.maxFee = int64(lo) + + if tx.baseFee < tx.inner.baseFee { + return tx, errors.New("base fee cannot be lower than provided inner transaction fee") + } + if tx.baseFee < MinBaseFee { + return tx, errors.Errorf( + "base fee cannot be lower than network minimum of %d", MinBaseFee, + ) + } + + var feeSource xdr.MuxedAccount + if err := feeSource.SetAddress(tx.feeAccount); err != nil { + return tx, errors.Wrap(err, "fee account is not a valid address") + } + + tx.envelope = xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: feeSource, + Fee: xdr.Int64(tx.maxFee), + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: innerEnv.V1, + }, + }, + }, + } + + return tx, nil +} + +// BuildChallengeTx is a factory method that creates a valid SEP 10 challenge, for use in web authentication. +// "timebound" is the time duration the transaction should be valid for, and must be greater than 1s (300s is recommended). +// More details on SEP 10: https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0010.md +func BuildChallengeTx(serverSignerSecret, clientAccountID, webAuthDomain, homeDomain, network string, timebound time.Duration) (*Transaction, error) { + if timebound < time.Second { + return nil, errors.New("provided timebound must be at least 1s (300s is recommended)") + } + + serverKP, err := keypair.Parse(serverSignerSecret) + if err != nil { + return nil, err + } + + // SEP10 spec requires 48 byte cryptographic-quality random string + randomNonce, err := generateRandomNonce(48) + if err != nil { + return nil, err + } + // Encode 48-byte nonce to base64 for a total of 64-bytes + randomNonceToString := base64.StdEncoding.EncodeToString(randomNonce) + if len(randomNonceToString) != 64 { + return nil, errors.New("64 byte long random nonce required") + } + + if _, err = xdr.AddressToAccountId(clientAccountID); err != nil { + return nil, errors.Wrapf(err, "%s is not a valid account id", clientAccountID) + } + + // represent server signing account as SimpleAccount + sa := SimpleAccount{ + AccountID: serverKP.Address(), + Sequence: 0, + } + + currentTime := time.Now().UTC() + maxTime := currentTime.Add(timebound) + + // Create a SEP 10 compatible response. See + // https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0010.md#response + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sa, + IncrementSequenceNum: false, + Operations: []Operation{ + &ManageData{ + SourceAccount: clientAccountID, + Name: homeDomain + " auth", + Value: []byte(randomNonceToString), + }, + &ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte(webAuthDomain), + }, + }, + BaseFee: MinBaseFee, + Memo: nil, + Timebounds: NewTimebounds(currentTime.Unix(), maxTime.Unix()), + }, + ) + if err != nil { + return nil, err + } + tx, err = tx.Sign(network, serverKP.(*keypair.Full)) + if err != nil { + return nil, err + } + + return tx, nil +} + +// generateRandomNonce creates a cryptographically secure random slice of `n` bytes. +func generateRandomNonce(n int) ([]byte, error) { + binary := make([]byte, n) + _, err := rand.Read(binary) + + if err != nil { + return []byte{}, err + } + + return binary, err +} + +// ReadChallengeTx reads a SEP 10 challenge transaction and returns the decoded +// transaction and client account ID contained within. +// +// Before calling this function, retrieve the SIGNING_KEY included in the TOML file +// hosted on the service's homeDomain and ensure it matches the serverAccountID you +// intend to pass. +// +// This function verifies the serverAccountID signed the challenge. If the +// serverAccountID also matches the SIGNING_KEY included in the TOML file hosted the +// service's homeDomain passed, malicious web services will not be able to use the +// challenge transaction POSTed back to the authentication endpoint. +// +// The challenge's first Manage Data operation is expected to contain the homeDomain +// parameter passed to the function. +// +// If the challenge contains a subsequent Manage Data operation with key +// web_auth_domain the value will be checked to match the webAuthDomain +// provided. If it does not match the function will return an error. +// +// It does not verify that the transaction has been signed by the client or +// that any signatures other than the servers on the transaction are valid. Use +// one of the following functions to completely verify the transaction: +// - VerifyChallengeTxThreshold +// - VerifyChallengeTxSigners +func ReadChallengeTx(challengeTx, serverAccountID, network, webAuthDomain string, homeDomains []string) (tx *Transaction, clientAccountID string, matchedHomeDomain string, err error) { + parsed, err := TransactionFromXDR(challengeTx) + if err != nil { + return tx, clientAccountID, matchedHomeDomain, errors.Wrap(err, "could not parse challenge") + } + + var isSimple bool + tx, isSimple = parsed.Transaction() + if !isSimple { + return tx, clientAccountID, matchedHomeDomain, errors.New("challenge cannot be a fee bump transaction") + } + + // Enforce no muxed accounts (at least until we understand their impact) + if tx.envelope.SourceAccount().Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + err = errors.New("invalid source account: only valid Ed25519 accounts are allowed in challenge transactions") + return tx, clientAccountID, matchedHomeDomain, err + } + + // verify transaction source + if tx.SourceAccount().AccountID != serverAccountID { + return tx, clientAccountID, matchedHomeDomain, errors.New("transaction source account is not equal to server's account") + } + + // verify sequence number + if tx.SourceAccount().Sequence != 0 { + return tx, clientAccountID, matchedHomeDomain, errors.New("transaction sequence number must be 0") + } + + // verify timebounds + if tx.Timebounds().MaxTime == TimeoutInfinite { + return tx, clientAccountID, matchedHomeDomain, errors.New("transaction requires non-infinite timebounds") + } + // Apply a grace period to the challenge MinTime to account for clock drift between the server and client + var gracePeriod int64 = 5 * 60 // seconds + currentTime := time.Now().UTC().Unix() + if currentTime+gracePeriod < tx.Timebounds().MinTime || currentTime > tx.Timebounds().MaxTime { + return tx, clientAccountID, matchedHomeDomain, errors.Errorf("transaction is not within range of the specified timebounds (currentTime=%d, MinTime=%d, MaxTime=%d)", + currentTime, tx.Timebounds().MinTime, tx.Timebounds().MaxTime) + } + + // verify operation + operations := tx.Operations() + if len(operations) < 1 { + return tx, clientAccountID, matchedHomeDomain, errors.New("transaction requires at least one manage_data operation") + } + op, ok := operations[0].(*ManageData) + if !ok { + return tx, clientAccountID, matchedHomeDomain, errors.New("operation type should be manage_data") + } + if op.SourceAccount == "" { + return tx, clientAccountID, matchedHomeDomain, errors.New("operation should have a source account") + } + for _, homeDomain := range homeDomains { + if op.Name == homeDomain+" auth" { + matchedHomeDomain = homeDomain + break + } + } + if matchedHomeDomain == "" { + return tx, clientAccountID, matchedHomeDomain, errors.Errorf("operation key does not match any homeDomains passed (key=%q, homeDomains=%v)", op.Name, homeDomains) + } + + clientAccountID = op.SourceAccount + rawOperations := tx.envelope.Operations() + if len(rawOperations) > 0 && rawOperations[0].SourceAccount.Type == xdr.CryptoKeyTypeKeyTypeMuxedEd25519 { + err = errors.New("invalid operation source account: only valid Ed25519 accounts are allowed in challenge transactions") + return tx, clientAccountID, matchedHomeDomain, err + } + + // verify manage data value + nonceB64 := string(op.Value) + if len(nonceB64) != 64 { + return tx, clientAccountID, matchedHomeDomain, errors.New("random nonce encoded as base64 should be 64 bytes long") + } + nonceBytes, err := base64.StdEncoding.DecodeString(nonceB64) + if err != nil { + return tx, clientAccountID, matchedHomeDomain, errors.Wrap(err, "failed to decode random nonce provided in manage_data operation") + } + if len(nonceBytes) != 48 { + return tx, clientAccountID, matchedHomeDomain, errors.New("random nonce before encoding as base64 should be 48 bytes long") + } + + // verify subsequent operations are manage data ops and known, or unknown with source account set to server account + for _, op := range operations[1:] { + op, ok := op.(*ManageData) + if !ok { + return tx, clientAccountID, matchedHomeDomain, errors.New("operation type should be manage_data") + } + if op.SourceAccount == "" { + return tx, clientAccountID, matchedHomeDomain, errors.New("operation should have a source account") + } + switch op.Name { + case "web_auth_domain": + if op.SourceAccount != serverAccountID { + return tx, clientAccountID, matchedHomeDomain, errors.New("web auth domain operation must have server source account") + } + if !bytes.Equal(op.Value, []byte(webAuthDomain)) { + return tx, clientAccountID, matchedHomeDomain, errors.Errorf("web auth domain operation value is %q but expect %q", string(op.Value), webAuthDomain) + } + default: + // verify unknown subsequent operations are manage data ops with source account set to server account + if op.SourceAccount != serverAccountID { + return tx, clientAccountID, matchedHomeDomain, errors.New("subsequent operations are unrecognized") + } + } + } + + err = verifyTxSignature(tx, network, serverAccountID) + if err != nil { + return tx, clientAccountID, matchedHomeDomain, err + } + + return tx, clientAccountID, matchedHomeDomain, nil +} + +// VerifyChallengeTxThreshold verifies that for a SEP 10 challenge transaction +// all signatures on the transaction are accounted for and that the signatures +// meet a threshold on an account. A transaction is verified if it is signed by +// the server account, and all other signatures match a signer that has been +// provided as an argument, and those signatures meet a threshold on the +// account. +// +// Signers that are not prefixed as an address/account ID strkey (G...) will be +// ignored. +// +// The homeDomain field is reserved for future use and not used. +// +// If the challenge contains a subsequent Manage Data operation with key +// web_auth_domain the value will be checked to match the webAuthDomain +// provided. If it does not match the function will return an error. +// +// Errors will be raised if: +// - The transaction is invalid according to ReadChallengeTx. +// - No client signatures are found on the transaction. +// - One or more signatures in the transaction are not identifiable as the +// server account or one of the signers provided in the arguments. +// - The signatures are all valid but do not meet the threshold. +func VerifyChallengeTxThreshold(challengeTx, serverAccountID, network, webAuthDomain string, homeDomains []string, threshold Threshold, signerSummary SignerSummary) (signersFound []string, err error) { + signers := make([]string, 0, len(signerSummary)) + for s := range signerSummary { + signers = append(signers, s) + } + + signersFound, err = VerifyChallengeTxSigners(challengeTx, serverAccountID, network, webAuthDomain, homeDomains, signers...) + if err != nil { + return nil, err + } + + weight := int32(0) + for _, s := range signersFound { + weight += signerSummary[s] + } + + if weight < int32(threshold) { + return nil, errors.Errorf("signers with weight %d do not meet threshold %d", weight, threshold) + } + + return signersFound, nil +} + +// VerifyChallengeTxSigners verifies that for a SEP 10 challenge transaction +// all signatures on the transaction are accounted for. A transaction is +// verified if it is signed by the server account, and all other signatures +// match a signer that has been provided as an argument. Additional signers can +// be provided that do not have a signature, but all signatures must be matched +// to a signer for verification to succeed. If verification succeeds a list of +// signers that were found is returned, excluding the server account ID. +// +// Signers that are not prefixed as an address/account ID strkey (G...) will be +// ignored. +// +// The homeDomain field is reserved for future use and not used. +// +// If the challenge contains a subsequent Manage Data operation with key +// web_auth_domain the value will be checked to match the webAuthDomain +// provided. If it does not match the function will return an error. +// +// Errors will be raised if: +// - The transaction is invalid according to ReadChallengeTx. +// - No client signatures are found on the transaction. +// - One or more signatures in the transaction are not identifiable as the +// server account or one of the signers provided in the arguments. +func VerifyChallengeTxSigners(challengeTx, serverAccountID, network, webAuthDomain string, homeDomains []string, signers ...string) ([]string, error) { + // Read the transaction which validates its structure. + tx, _, _, err := ReadChallengeTx(challengeTx, serverAccountID, network, webAuthDomain, homeDomains) + if err != nil { + return nil, err + } + + // Ensure the server account ID is an address and not a seed. + serverKP, err := keypair.ParseAddress(serverAccountID) + if err != nil { + return nil, err + } + + // Deduplicate the client signers and ensure the server is not included + // anywhere we check or output the list of signers. + clientSigners := []string{} + clientSignersSeen := map[string]struct{}{} + for _, signer := range signers { + // Ignore the server signer if it is in the signers list. It's + // important when verifying signers of a challenge transaction that we + // only verify and return client signers. If an account has the server + // as a signer the server should not play a part in the authentication + // of the client. + if signer == serverKP.Address() { + continue + } + // Deduplicate. + if _, seen := clientSignersSeen[signer]; seen { + continue + } + // Ignore non-G... account/address signers. + strkeyVersionByte, strkeyErr := strkey.Version(signer) + if strkeyErr != nil { + continue + } + if strkeyVersionByte != strkey.VersionByteAccountID { + continue + } + clientSigners = append(clientSigners, signer) + clientSignersSeen[signer] = struct{}{} + } + + // Don't continue if none of the signers provided are in the final list. + if len(clientSigners) == 0 { + return nil, errors.New("no verifiable signers provided, at least one G... address must be provided") + } + + // Verify all the transaction's signers (server and client) in one + // hit. We do this in one hit here even though the server signature was + // checked in the ReadChallengeTx to ensure that every signature and signer + // are consumed only once on the transaction. + allSigners := append([]string{serverKP.Address()}, clientSigners...) + allSignersFound, err := verifyTxSignatures(tx, network, allSigners...) + if err != nil { + return nil, err + } + + // Confirm the server is in the list of signers found and remove it. + serverSignerFound := false + signersFound := make([]string, 0, len(allSignersFound)-1) + for _, signer := range allSignersFound { + if signer == serverKP.Address() { + serverSignerFound = true + continue + } + signersFound = append(signersFound, signer) + } + + // Confirm we matched a signature to the server signer. + if !serverSignerFound { + return nil, errors.Errorf("transaction not signed by %s", serverKP.Address()) + } + + // Confirm we matched signatures to the client signers. + if len(signersFound) == 0 { + return nil, errors.Errorf("transaction not signed by %s", strings.Join(clientSigners, ", ")) + } + + // Confirm all signatures were consumed by a signer. + if len(allSignersFound) != len(tx.Signatures()) { + return signersFound, errors.Errorf("transaction has unrecognized signatures") + } + + return signersFound, nil +} + +// verifyTxSignature checks if a transaction has been signed by the provided Stellar account. +func verifyTxSignature(tx *Transaction, network string, signer string) error { + signersFound, err := verifyTxSignatures(tx, network, signer) + if len(signersFound) == 0 { + return errors.Errorf("transaction not signed by %s", signer) + } + return err +} + +// verifyTxSignature checks if a transaction has been signed by one or more of +// the signers, returning a list of signers that were found to have signed the +// transaction. +func verifyTxSignatures(tx *Transaction, network string, signers ...string) ([]string, error) { + txHash, err := tx.Hash(network) + if err != nil { + return nil, err + } + + // find and verify signatures + signatureUsed := map[int]bool{} + signersFound := map[string]struct{}{} + for _, signer := range signers { + kp, err := keypair.ParseAddress(signer) + if err != nil { + return nil, errors.Wrap(err, "signer not address") + } + + for i, decSig := range tx.Signatures() { + if signatureUsed[i] { + continue + } + if decSig.Hint != kp.Hint() { + continue + } + err := kp.Verify(txHash[:], decSig.Signature) + if err == nil { + signatureUsed[i] = true + signersFound[signer] = struct{}{} + break + } + } + } + + signersFoundList := make([]string, 0, len(signersFound)) + for _, signer := range signers { + if _, ok := signersFound[signer]; ok { + signersFoundList = append(signersFoundList, signer) + delete(signersFound, signer) + } + } + return signersFoundList, nil +} diff --git a/txnbuild/transaction_challenge_example_test.go b/txnbuild/transaction_challenge_example_test.go new file mode 100644 index 0000000000..a183aa44f4 --- /dev/null +++ b/txnbuild/transaction_challenge_example_test.go @@ -0,0 +1,133 @@ +package txnbuild_test + +import ( + "fmt" + "sort" + "time" + + "github.com/stellar/go/clients/horizonclient" + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/protocols/horizon" + "github.com/stellar/go/txnbuild" +) + +var serverAccount, _ = keypair.ParseFull("SCDXPYDGKV5HOAGVZN3FQSS5FKUPP5BAVBWH4FXKTAWAC24AE4757JSI") +var clientAccount, _ = keypair.ParseFull("SANVNCABRBVISCV7KH4SZVBKPJWWTT4424OVWUHUHPH2MVSF6RC7HPGN") +var clientSigner1, _ = keypair.ParseFull("SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R") +var clientSigner2, _ = keypair.ParseFull("SBMSVD4KKELKGZXHBUQTIROWUAPQASDX7KEJITARP4VMZ6KLUHOGPTYW") +var horizonClient = func() horizonclient.ClientInterface { + client := &horizonclient.MockClient{} + client. + On("AccountDetail", horizonclient.AccountRequest{AccountID: clientAccount.Address()}). + Return( + horizon.Account{ + Thresholds: horizon.AccountThresholds{LowThreshold: 1, MedThreshold: 10, HighThreshold: 100}, + Signers: []horizon.Signer{ + {Key: clientSigner1.Address(), Weight: 40}, + {Key: clientSigner2.Address(), Weight: 60}, + }, + }, + nil, + ) + return client +}() + +func ExampleVerifyChallengeTxThreshold() { + // Server builds challenge transaction + var challengeTx string + { + tx, err := txnbuild.BuildChallengeTx(serverAccount.Seed(), clientAccount.Address(), "webauthdomain.stellar.org", "test", network.TestNetworkPassphrase, time.Minute) + if err != nil { + fmt.Println("Error:", err) + return + } + challengeTx, err = tx.Base64() + if err != nil { + fmt.Println("Error:", err) + return + } + } + + // Client reads and signs challenge transaction + var signedChallengeTx string + { + tx, txClientAccountID, _, err := txnbuild.ReadChallengeTx(challengeTx, serverAccount.Address(), network.TestNetworkPassphrase, "webauthdomain.stellar.org", []string{"test"}) + if err != nil { + fmt.Println("Error:", err) + return + } + if txClientAccountID != clientAccount.Address() { + fmt.Println("Error: challenge tx is not for expected client account") + return + } + tx, err = tx.Sign(network.TestNetworkPassphrase, clientSigner1, clientSigner2) + if err != nil { + fmt.Println("Error:", err) + return + } + signedChallengeTx, err = tx.Base64() + if err != nil { + fmt.Println("Error:", err) + return + } + } + + // Server verifies signed challenge transaction + { + _, txClientAccountID, _, err := txnbuild.ReadChallengeTx(challengeTx, serverAccount.Address(), network.TestNetworkPassphrase, "webauthdomain.stellar.org", []string{"test"}) + if err != nil { + fmt.Println("Error:", err) + return + } + + // Server gets account + clientAccountExists := false + horizonClientAccount, err := horizonClient.AccountDetail(horizonclient.AccountRequest{AccountID: txClientAccountID}) + if horizonclient.IsNotFoundError(err) { + clientAccountExists = false + fmt.Println("Account does not exist, use master key to verify") + } else if err == nil { + clientAccountExists = true + } else { + fmt.Println("Error:", err) + return + } + + if clientAccountExists { + // Server gets list of signers from account + signerSummary := horizonClientAccount.SignerSummary() + + // Server chooses the threshold to require: low, med or high + threshold := txnbuild.Threshold(horizonClientAccount.Thresholds.MedThreshold) + + // Server verifies threshold is met + signers, err := txnbuild.VerifyChallengeTxThreshold(signedChallengeTx, serverAccount.Address(), network.TestNetworkPassphrase, "webauthdomain.stellar.org", []string{"test"}, threshold, signerSummary) + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println("Client Signers Verified:") + sort.Strings(signers) + for _, signer := range signers { + fmt.Println(signer, "weight:", signerSummary[signer]) + } + } else { + // Server verifies that master key has signed challenge transaction + signersFound, err := txnbuild.VerifyChallengeTxSigners(signedChallengeTx, serverAccount.Address(), network.TestNetworkPassphrase, "webauthdomain.stellar.org", []string{"test"}, txClientAccountID) + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println("Client Master Key Verified:") + for _, signerFound := range signersFound { + fmt.Println(signerFound) + } + } + } + + // Output: + // Client Signers Verified: + // GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP weight: 60 + // GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3 weight: 40 +} diff --git a/txnbuild/transaction_fee_test.go b/txnbuild/transaction_fee_test.go new file mode 100644 index 0000000000..888f8a2483 --- /dev/null +++ b/txnbuild/transaction_fee_test.go @@ -0,0 +1,153 @@ +package txnbuild + +import ( + "math" + "testing" + + "github.com/stellar/go/keypair" + "github.com/stretchr/testify/assert" +) + +func TestBaseFeeCanBeZeroOrPositive(t *testing.T) { + validBaseFees := []int64{0, MinBaseFee} + for _, bf := range validBaseFees { + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{keypair.MustRandom().Address(), 1}, + Operations: []Operation{&Inflation{}}, + BaseFee: bf, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, bf, tx.baseFee) + } +} + +func TestBaseFeeErrorWhenNegative(t *testing.T) { + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{keypair.MustRandom().Address(), 1}, + Operations: []Operation{&Inflation{}}, + BaseFee: -1, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.EqualError(t, err, "base fee cannot be negative") +} + +func TestFeeBumpMinBaseFee(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + tx.baseFee -= 2 + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: MinBaseFee - 1, + Inner: tx, + }, + ) + assert.EqualError(t, err, "base fee cannot be lower than network minimum of 100") +} + +func TestFeeOverflow(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}, &Inflation{}}, + BaseFee: math.MaxUint32 / 2, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}, &Inflation{}, &Inflation{}}, + BaseFee: math.MaxUint32 / 2, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.EqualError(t, err, "base fee 2147483647 results in an overflow of max fee") +} + +func TestFeeBumpOverflow(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: math.MaxInt64 / 2, + Inner: tx, + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: math.MaxInt64, + Inner: tx, + }, + ) + assert.EqualError(t, err, "base fee 9223372036854775807 results in an overflow of max fee") +} + +func TestFeeBumpFeeGreaterThanOrEqualInner(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{&Inflation{}}, + BaseFee: 2 * MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: 2 * MinBaseFee, + Inner: tx, + }, + ) + assert.NoError(t, err) + + _, err = NewFeeBumpTransaction( + FeeBumpTransactionParams{ + FeeAccount: newKeypair1().Address(), + BaseFee: 2*MinBaseFee - 1, + Inner: tx, + }, + ) + assert.EqualError(t, err, "base fee cannot be lower than provided inner transaction fee") +} diff --git a/txnbuild/transaction_test.go b/txnbuild/transaction_test.go new file mode 100644 index 0000000000..e92d76a4de --- /dev/null +++ b/txnbuild/transaction_test.go @@ -0,0 +1,4754 @@ +package txnbuild + +import ( + "crypto/sha256" + "encoding/base64" + "strings" + "testing" + "time" + + "github.com/stellar/go/keypair" + "github.com/stellar/go/network" + "github.com/stellar/go/price" + "github.com/stellar/go/strkey" + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestMissingTimebounds(t *testing.T) { + kp0 := newKeypair0() + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: kp0.Address(), Sequence: 1}, + Operations: []Operation{&BumpSequence{BumpTo: 0}}, + BaseFee: MinBaseFee, + }, + ) + assert.EqualError(t, err, "invalid time bounds: timebounds must be constructed using NewTimebounds(), NewTimeout(), or NewInfiniteTimeout()") +} + +func TestTimebounds(t *testing.T) { + kp0 := newKeypair0() + + tb := NewTimeout(300) + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: kp0.Address(), Sequence: 1}, + Operations: []Operation{&BumpSequence{BumpTo: 0}}, + BaseFee: MinBaseFee, + Timebounds: tb, + }, + ) + assert.NoError(t, err) + assert.Equal(t, tb, tx.timebounds) + assert.Equal(t, xdr.TimePoint(tb.MinTime), tx.envelope.V1.Tx.TimeBounds.MinTime) + assert.Equal(t, xdr.TimePoint(tb.MaxTime), tx.envelope.V1.Tx.TimeBounds.MaxTime) +} + +func TestMissingSourceAccount(t *testing.T) { + _, err := NewTransaction(TransactionParams{}) + assert.EqualError(t, err, "transaction has no source account") +} + +func TestIncrementSequenceNum(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 1) + inflation := Inflation{} + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, int64(2), sourceAccount.Sequence) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, int64(3), sourceAccount.Sequence) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, int64(3), sourceAccount.Sequence) + + _, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: false, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, int64(3), sourceAccount.Sequence) +} + +func TestFeeNoOperations(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), 5938436531814403) + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + Operations: []Operation{}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.EqualError(t, err, "transaction has no operations") +} + +func TestInflation(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(3556091187167235)) + + inflation := Inflation{} + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&inflation}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACQAAAAAAAAAB6i5yxQAAAED9zR1l78yiBwd/o44RyE3XP7QT57VmI90qE46TjfncYyqlOaIRWpkh3qouTjV5IRPVGo6+bFWV40H1HE087FgA" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestCreateAccount(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQB7MjKIwNEOTIjbEeV+QIjaQp/ZpV5qpbkbDaU54gkfdTOFOUxZq66lTS5FOfP5fmPIVD8InQ00Usy2SmzFC/wc=" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPayment(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10", + Asset: NativeAsset{}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAABfXhAAAAAAAAAAAB6i5yxQAAAEDXBkKYzThQi3/XhJqGzfh/EjaAx/4zK3xBT1/JDNtdkk/kxn4qxHVx++xiV72lqZXxiphNwflA8C7mC8Dvim0E" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPaymentMuxedAccounts(t *testing.T) { + kp0 := newKeypair0() + accountID := xdr.MustAddress(kp0.Address()) + mx := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *accountID.Ed25519, + }, + } + sourceAccount := NewSimpleAccount(mx.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", + Amount: "10", + Asset: NativeAsset{}, + SourceAccount: sourceAccount.AccountID, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAQAAAAAAyv66vuDcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAZAAiII0AAAAbAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAEAAAEAAAAAAMr+ur7g3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAEAAAEAgAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAAAAAAABfXhAAAAAAAAAAAB6i5yxQAAAED4Wkvwf/BJV+fqa6Kvi+T/7ZL82pOinN68GlvEi9qK4klH+qITyvN3jRj5Nfz0+VrE2xBJPVc8sS/qN9LlznoC" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPaymentFailsIfNoAssetSpecified(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639898)) + + payment := Payment{ + Destination: "GB7BDSZU2Y27LYNLALKKALB52WS2IZWYBDGY6EQBLEED3TJOCVMZRH7H", + Amount: "10", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + expectedErrMsg := "validation failed for *txnbuild.Payment operation: Field: Asset, Error: asset is undefined" + require.EqualError(t, err, expectedErrMsg, "An asset is required") +} + +func TestBumpSequence(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(9606132444168199)) + + bumpSequence := BumpSequence{ + BumpTo: 9606132444168300, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&bumpSequence}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAIiC6AAAACAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACwAiILoAAABsAAAAAAAAAAHSh2R+AAAAQJ3Y0klngAqW69ETgBCuo8OQsx4i/6wg6WugDtOfq2hw6MElCQXJJMJRLgo2waDvwNOrWTUU9T3q95Yk0K3PHwo=" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestAccountMerge(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484298)) + + accountMerge := AccountMerge{ + Destination: "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP", + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&accountMerge}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAACwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAAAAAB6i5yxQAAAECf1HDoBOuPhkKcL9Ll12to6yrRXZg7MmemWf7nca8j0vHDQpti+/OIsT2DOF0YJKEAncQt2CvJ+cefgly8668A" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageData(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(3556091187167235)) + + manageData := ManageData{ + Name: "Fruit preference", + Value: []byte("Apple"), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQADKI/AAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAABBGcnVpdCBwcmVmZXJlbmNlAAAAAQAAAAVBcHBsZQAAAAAAAAAAAAAB6i5yxQAAAEDtRCyQRKKgQ8iLEu7kicHtSzoplfxPtPTMhdRv/sq8UoIBVTxIw+S13Jv+jzs3tyLDLiGCVNXreUNlbfX+980K" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageDataRemoveDataEntry(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484309)) + + manageData := ManageData{ + Name: "Fruit preference", + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&manageData}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAFgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAACgAAABBGcnVpdCBwcmVmZXJlbmNlAAAAAAAAAAAAAAAB6i5yxQAAAEAfK5BWYLX31E3QgEs8Cd40XDAsx6VW27hW8nuyotnS2qOruXdmks89zNroDSYzRTH0rt4qPWnQqsFSio5NFCUA" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsInflationDestination(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484315)) + + setOptions := SetOptions{ + InflationDestination: NewInflationDestination(kp1.Address()), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAHAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAEAAAAAJcrx2g/Hbs/ohF5CVFG7B5JJSJR+OqDKzDGK7dKHZH4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6i5yxQAAAEAdES3vQ43R8yzNtsIRY2t2U/ey//NfJb1qZORDkxE6/ZZgx+/wNPxAM3gpEwc2TAotwuqVdT6xga9DSXUaz6MI" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsSetFlags(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484318)) + + setOptions := SetOptions{ + SetFlags: []AccountFlag{AuthRequired, AuthRevocable}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAHwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeoucsUAAABAn2E6acbadQNs0m2+lc5DpMpPQ/+8Y2l0cUfmSKoHSt5VpB0EZI8lQY9smiOtSd7a3aewrMCJqbY5Iy6a7dFiDg==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsClearFlags(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484319)) + + setOptions := SetOptions{ + ClearFlags: []AccountFlag{AuthRequired, AuthRevocable}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAIAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAABAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeoucsUAAABADVzwDfkYL6oxhdJCejMjU4jJ1mhC8Ob2DcMYb/PpotyphljM6IwsXJjAKp4tMwTLBI5fc+x/CU/cdOTpUPZ7Aw==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsMasterWeight(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484320)) + + setOptions := SetOptions{ + MasterWeight: NewThreshold(10), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAIQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAEAAAAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAeoucsUAAABAiMR9luF2eXzLBuufIXSBMrNp5VUgCtRRI0+RgAxerFhE4RhXPlq5pcOhsCp+mTQJsVVCxIIq3I0MePGmEoBWAw==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsThresholds(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484322)) + + setOptions := SetOptions{ + LowThreshold: NewThreshold(1), + MediumThreshold: NewThreshold(2), + HighThreshold: NewThreshold(2), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAIwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAQAAAAEAAAACAAAAAQAAAAIAAAAAAAAAAAAAAAAAAAAB6i5yxQAAAEBcEXBW8xLcaMWTrVpTkJXd51ER2boDY+X2hJ3Kb9F/3XK34kFVO5N35E2A7JIlRMRYqu/AgbGAK9Lrr3x+tSEL" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsHomeDomain(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484325)) + + setOptions := SetOptions{ + HomeDomain: NewHomeDomain("LovelyLumensLookLuminous.com"), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAHExvdmVseUx1bWVuc0xvb2tMdW1pbm91cy5jb20AAAAAAAAAAAAAAAHqLnLFAAAAQLQuB2c70X8qYUYOY45s+Y8wZ/OkgDVwmUufRno0RPC9bgjsYF0hFaIdW/lHrVBIuyTf59RAgRFSa14I9HN+HgY=" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestSetOptionsHomeDomainTooLong(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484323)) + + setOptions := SetOptions{ + HomeDomain: NewHomeDomain("LovelyLumensLookLuminousLately.com"), + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + assert.Error(t, err, "A validation error was expected (home domain > 32 chars)") +} + +func TestSetOptionsSigner(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484325)) + + setOptions := SetOptions{ + Signer: &Signer{Address: kp1.Address(), Weight: Threshold(4)}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAJgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAQAAAAAAAAAAeoucsUAAABAX4JlCvsDY/ETs+/EoNK0NrO5ZrbwOK+XqR5KnPcqMSw6/xkpJoFp3laqCjcVhdCQfS/hqpdfn/DPKdTHBeDLAQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestMultipleOperations(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(9606132444168199)) + + inflation := Inflation{} + bumpSequence := BumpSequence{ + BumpTo: 9606132444168300, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&inflation, &bumpSequence}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAMgAIiC6AAAACAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAACQAAAAAAAAALACIgugAAAGwAAAAAAAAAAdKHZH4AAABA5n9wINh8OTXZb8yaaYeCpvmjSsvJH80tRAISFXSicFJzFVoTqX3V0of2npBFXaMV4dvoqKHK8XbZFgGX0t7DBQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestChangeTrust(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484348)) + + changeTrust := ChangeTrust{ + Line: CreditAsset{"ABCD", kp1.Address()}.MustToChangeTrustAsset(), + Limit: "10", + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAPQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFBQkNEAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAAX14QAAAAAAAAAAAeoucsUAAABA+2EndVXXsBHbRFEQGLsgsvHVm8wCxH9byZ/PP4AhEeAjXSL6IzhGnyRIWIc2SYXRu6GvveVI3yPbzCTvKnVjCg==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestChangeTrustNativeAssetNotAllowed(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484348)) + + changeTrust := ChangeTrust{ + Line: NativeAsset{}.MustToChangeTrustAsset(), + Limit: "10", + } + + _, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&changeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + + expectedErrMsg := "validation failed for *txnbuild.ChangeTrust operation: Field: Line, Error: native (XLM) asset type is not allowed" + require.EqualError(t, err, expectedErrMsg, "No trustlines for native assets") +} + +func TestChangeTrustDeleteTrustline(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484354)) + + issuedAsset := CreditAsset{"ABCD", kp1.Address()} + removeTrust := RemoveTrustlineOp(issuedAsset.MustToChangeTrustAsset()) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&removeTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAAQwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABgAAAAFBQkNEAAAAACXK8doPx27P6IReQlRRuweSSUiUfjqgyswxiu3Sh2R+AAAAAAAAAAAAAAAAAAAAAeoucsUAAABAoHdsJCt+XIr73+jSqbEhQ8iqXcqP3LO8C/kWH2dgQj+3hq1FKbthn0BbX/x5umgcE+pyfnTjU0j158qew6tfCw==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestAllowTrust(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484366)) + + issuedAsset := CreditAsset{"ABCD", kp1.Address()} + allowTrust := AllowTrust{ + Trustor: kp1.Address(), + Type: issuedAsset, + Authorize: true, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&allowTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAATwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABwAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAFBQkNEAAAAAQAAAAAAAAAB6i5yxQAAAEBhgUiorWMaRzTGlVThNgiMpVhSYMKsY4cJyL1mrkkpC2qZ7Q9fBtaTGoS27PC6nK9/nBLOVoyyOHgYculoiYQJ" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestAllowTrustNoIssuer(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(40385577484366)) + + issuedAsset := CreditAsset{Code: "XYZ"} + allowTrust := AllowTrust{ + Trustor: kp1.Address(), + Type: issuedAsset, + Authorize: true, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&allowTrust}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAACS7AAAATwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABwAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAFYWVoAAAAAAQAAAAAAAAAB6i5yxQAAAEDvJnLIv/kTm6yraPLQAbTfEcFIutdNRagQ08KjEKeITbro8PkhhBWgQmCzcP7uNAxxUUKATYus3ASmwUoPoFcB" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageSellOfferNewOffer(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "100" + createOffer, err := CreateOfferOp(selling, buying, sellAmount, price.MustParse("0.01")) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAABQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAA7msoAAAAAAQAAAGQAAAAAAAAAAAAAAAAAAAAB0odkfgAAAEAJl3+AZx/G1ocvk58X/u84LIo+6VdG+1wuK6n2FovWSFVGonVj26xYWlo4kG12AdTSncdF44nc5HAIDCJy6g4L" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageSellOfferDeleteOffer(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761105)) + + offerID := int64(2921622) + deleteOffer, err := DeleteOfferOp(offerID) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&deleteOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAAEgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABRkFLRQAAAABBB4BkxJWGYvNgJBoiXUo2tjgWlNmhHMMKdwGN7RSdsQAAAAAAAAAAAAAAAQAAAAEAAAAAACyUlgAAAAAAAAAB0odkfgAAAEAUo0X6chACDJ0UDj39QQTsfBxQui5um8cXZY2noJ1LbPEpliRkG2TeWvD0Bszk8BnQSgZPV/XfgSKwVXN5MskO" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageSellOfferUpdateOffer(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761097)) + + selling := NativeAsset{} + buying := CreditAsset{"ABCD", kp0.Address()} + sellAmount := "50" + offerID := int64(2497628) + updateOffer, err := UpdateOfferOp(selling, buying, sellAmount, price.MustParse("0.02"), offerID) + check(err) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&updateOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAACgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAwAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAdzWUAAAAAAQAAADIAAAAAACYcXAAAAAAAAAAB0odkfgAAAEAMKloNgv6Hv8x+A92O/8oOUpR6hbxegN4+hkGfTT4d0TqrraLy8gBOtvq718TO4akjc9UbceH6yWjoTmm4egwI" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestCreatePassiveSellOffer(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761100)) + + createPassiveOffer := CreatePassiveSellOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", kp0.Address()}, + Amount: "10", + Price: xdr.Price{1, 1}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createPassiveOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAADQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABAAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAF9eEAAAAAAQAAAAEAAAAAAAAAAdKHZH4AAABAIFA+zNVC+8dptptusks3Eh8SJ3jk+/6/rPxy7IFg4+gpqUotRma5b7QR/gjbnoAsL1tPU0WSYae2y8sJGhQqCg==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestPathPayment(t *testing.T) { + kp0 := newKeypair0() + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(187316408680450)) + + abcdAsset := CreditAsset{"ABCD", kp0.Address()} + pathPayment := PathPayment{ + SendAsset: NativeAsset{}, + SendMax: "10", + Destination: kp2.Address(), + DestAsset: NativeAsset{}, + DestAmount: "1", + Path: []Asset{abcdAsset}, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&pathPayment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp2, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQAAKpdAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAgAAAAAAAAAABfXhAAAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAAAAAAAAAJiWgAAAAAEAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAAAAAABLhVZmAAAAEBdpC1C/0aBSMtXJrfhl3Vp9rQ1IyWFd2MBeAPNsyAYamEjuqIDqCzzUbd8PiBggIH0eEPZaWsfsAl1qEBER0sO" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestMemoText(t *testing.T) { + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(3556099777101824)) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&BumpSequence{BumpTo: 1}}, + Memo: MemoText("Twas brillig"), + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp2, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADKJBAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAxUd2FzIGJyaWxsaWcAAAABAAAAAAAAAAsAAAAAAAAAAQAAAAAAAAABLhVZmAAAAECC0/P+zBk5lpH4zIumNt59nFVrPiDGOu8TrJE4r0mXoae8Fmg1yyHQm3Yo5huuPjc/nzwU/R2DKkkQ3C4mWA0N" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestMemoID(t *testing.T) { + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(3428320205078528)) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&BumpSequence{BumpTo: 1}}, + Memo: MemoID(314159), + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp2, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADC4KAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAABMsvAAAAAQAAAAAAAAALAAAAAAAAAAEAAAAAAAAAAS4VWZgAAABAOT/1f1XoeqY14+wp6rVgwE4fCCPnItc9/85jZN++Fy7lS88e40b3ufQCpzzMCD8AyfHF8BCs/Pn2DiJHxCPQCQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestMemoHash(t *testing.T) { + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(3428320205078528)) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&BumpSequence{BumpTo: 1}}, + Memo: MemoHash([32]byte{0x01}), + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp2, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADC4KAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAwEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAEAAAAAAAAAAS4VWZgAAABAIGrmlKahBhdVXl2LZGINCNfUAtxiVawjzqgxzyHV7xpEPTft1besnyiDdLBP1+Tbg+hYQK0N2ncL2XmjQ4pcDQ==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestMemoReturn(t *testing.T) { + kp2 := newKeypair2() + sourceAccount := NewSimpleAccount(kp2.Address(), int64(3428320205078528)) + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&BumpSequence{BumpTo: 1}}, + Memo: MemoReturn([32]byte{0x01}), + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp2, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAB+Ecs01jX14asC1KAsPdWlpGbYCM2PEgFZCD3NLhVZmAAAAGQADC4KAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAABAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAALAAAAAAAAAAEAAAAAAAAAAS4VWZgAAABALixU7p2NPKW1iqJqaHqR3Wsy5q+7nj1EjswOD99/klUSlorvodrZ4DrD/IYGvsKSyV0/Zf9LjEN4s4kVVK4dCg==" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageBuyOfferNewOffer(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761092)) + + buyOffer := ManageBuyOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", kp0.Address()}, + Amount: "100", + Price: price.MustParse("0.01"), + OfferID: 0, + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAABQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADAAAAAAAAAABQUJDRAAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAA7msoAAAAAAQAAAGQAAAAAAAAAAAAAAAAAAAAB0odkfgAAAEB8LqK1uwbwcCQM/hE0rXng2fVCoaMdctQaiS72iJFkq+azWzqYpo1kMa1DUKMvvsJrWPLYjEr9yW8/A3eEE2kF" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageBuyOfferDeleteOffer(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761105)) + + buyOffer := ManageBuyOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", kp1.Address()}, + Amount: "0", + Price: price.MustParse("0.01"), + OfferID: int64(2921622), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAAEgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADAAAAAAAAAABQUJDRAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAAAAAAAAAAAQAAAGQAAAAAACyUlgAAAAAAAAAB0odkfgAAAECLZ6PnKZlGBb8S3GFWg6J01d3Zr88/tki8yka2KFzqivMAmY3D/5IMzzJl4U7RdrYEPam9KwCGKR/f647WTwYG" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestManageBuyOfferUpdateOffer(t *testing.T) { + kp1 := newKeypair1() + sourceAccount := NewSimpleAccount(kp1.Address(), int64(41137196761097)) + + buyOffer := ManageBuyOffer{ + Selling: NativeAsset{}, + Buying: CreditAsset{"ABCD", kp1.Address()}, + Amount: "50", + Price: price.MustParse("0.02"), + OfferID: int64(2921622), + } + + received, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&buyOffer}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp1, + ) + assert.NoError(t, err) + + expected := "AAAAAgAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAGQAACVqAAAACgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAADAAAAAAAAAABQUJDRAAAAAAlyvHaD8duz+iEXkJUUbsHkklIlH46oMrMMYrt0odkfgAAAAAdzWUAAAAAAQAAADIAAAAAACyUlgAAAAAAAAAB0odkfgAAAECv7GrE8YDar5M93RmgzslIH2vVAAJlAZoIsmkFNXTJTTb01R9Q+z0Cl5E6KFpm+qiuxHvL2kwhVOoBpkoYQPcB" + assert.Equal(t, expected, received, "Base 64 XDR should match") +} + +func TestBuildChallengeTx(t *testing.T) { + kp0 := newKeypair0() + + { + // 1 minute timebound + tx, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "testwebauth.stellar.org", "testanchor.stellar.org", network.TestNetworkPassphrase, time.Minute) + assert.NoError(t, err) + txeBase64, err := tx.Base64() + assert.NoError(t, err) + var txXDR xdr.TransactionEnvelope + err = xdr.SafeUnmarshalBase64(txeBase64, &txXDR) + assert.NoError(t, err) + assert.Equal(t, int64(0), txXDR.SeqNum(), "sequence number should be 0") + assert.Equal(t, uint32(200), txXDR.Fee(), "Fee should be 100") + assert.Equal(t, 2, len(txXDR.Operations()), "number operations should be 2") + timeDiff := txXDR.TimeBounds().MaxTime - txXDR.TimeBounds().MinTime + assert.Equal(t, int64(60), int64(timeDiff), "time difference should be 300 seconds") + op := txXDR.Operations()[0] + assert.Equal(t, xdr.OperationTypeManageData, op.Body.Type, "operation type should be manage data") + assert.Equal(t, xdr.String64("testanchor.stellar.org auth"), op.Body.ManageDataOp.DataName, "DataName should be 'testanchor.stellar.org auth'") + assert.Equal(t, 64, len(*op.Body.ManageDataOp.DataValue), "DataValue should be 64 bytes") + webAuthOp := txXDR.Operations()[1] + assert.Equal(t, xdr.OperationTypeManageData, webAuthOp.Body.Type, "operation type should be manage data") + assert.Equal(t, xdr.String64("web_auth_domain"), webAuthOp.Body.ManageDataOp.DataName, "DataName should be 'web_auth_domain'") + assert.Equal(t, "testwebauth.stellar.org", string(*webAuthOp.Body.ManageDataOp.DataValue), "DataValue should be 'testwebauth.stellar.org'") + } + + { + // 5 minutes timebound + tx, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "testwebauth.stellar.org", "testanchor.stellar.org", network.TestNetworkPassphrase, time.Duration(5*time.Minute)) + assert.NoError(t, err) + txeBase64, err := tx.Base64() + assert.NoError(t, err) + var txXDR1 xdr.TransactionEnvelope + err = xdr.SafeUnmarshalBase64(txeBase64, &txXDR1) + assert.NoError(t, err) + assert.Equal(t, int64(0), txXDR1.SeqNum(), "sequence number should be 0") + assert.Equal(t, uint32(200), txXDR1.Fee(), "Fee should be 100") + assert.Equal(t, 2, len(txXDR1.Operations()), "number operations should be 2") + + timeDiff := txXDR1.TimeBounds().MaxTime - txXDR1.TimeBounds().MinTime + assert.Equal(t, int64(300), int64(timeDiff), "time difference should be 300 seconds") + op1 := txXDR1.Operations()[0] + assert.Equal(t, xdr.OperationTypeManageData, op1.Body.Type, "operation type should be manage data") + assert.Equal(t, xdr.String64("testanchor.stellar.org auth"), op1.Body.ManageDataOp.DataName, "DataName should be 'testanchor.stellar.org auth'") + assert.Equal(t, 64, len(*op1.Body.ManageDataOp.DataValue), "DataValue should be 64 bytes") + webAuthOp := txXDR1.Operations()[1] + assert.Equal(t, xdr.OperationTypeManageData, webAuthOp.Body.Type, "operation type should be manage data") + assert.Equal(t, xdr.String64("web_auth_domain"), webAuthOp.Body.ManageDataOp.DataName, "DataName should be 'web_auth_domain'") + assert.Equal(t, "testwebauth.stellar.org", string(*webAuthOp.Body.ManageDataOp.DataValue), "DataValue should be 'testwebauth.stellar.org'") + } + + //transaction with infinite timebound + _, err := BuildChallengeTx(kp0.Seed(), kp0.Address(), "webauthdomain", "sdf", network.TestNetworkPassphrase, 0) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "provided timebound must be at least 1s (300s is recommended)") + } +} + +func TestHashHex(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + txeB64, err := tx.Base64() + assert.NoError(t, err) + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQB7MjKIwNEOTIjbEeV+QIjaQp/ZpV5qpbkbDaU54gkfdTOFOUxZq66lTS5FOfP5fmPIVD8InQ00Usy2SmzFC/wc=" + assert.Equal(t, expected, txeB64, "Base 64 XDR should match") + + hashHex, err := tx.HashHex(network.TestNetworkPassphrase) + assert.NoError(t, err) + expected = "1b3905ba8c3c0ecc68ae812f2d77f27c697195e8daf568740fc0f5662f65f759" + assert.Equal(t, expected, hashHex, "hex encoded hash should match") + + txEnv := tx.ToXDR() + assert.NoError(t, err) + assert.NotNil(t, txEnv, "transaction xdr envelope should not be nil") + sourceAccountFromEnv := txEnv.SourceAccount().ToAccountId() + assert.Equal(t, sourceAccount.AccountID, sourceAccountFromEnv.Address()) + assert.Equal(t, uint32(100), txEnv.Fee()) + assert.Equal(t, sourceAccount.Sequence, int64(txEnv.SeqNum())) + assert.Equal(t, xdr.MemoTypeMemoNone, txEnv.Memo().Type) + assert.Len(t, txEnv.Operations(), 1) +} + +func TestTransactionFee(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + assert.Equal(t, int64(100), tx.BaseFee(), "Transaction base fee should match") + assert.Equal(t, int64(100), tx.MaxFee(), "Transaction fee should match") + + tx, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: 500, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + assert.Equal(t, int64(500), tx.BaseFee(), "Transaction base fee should match") + assert.Equal(t, int64(500), tx.MaxFee(), "Transaction fee should match") + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + txeB64, err := tx.Base64() + assert.NoError(t, err) + expected := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAfQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQJ3OvWisOnYNS5R8ZCHrSmbvDrvIYG4+JiAldLYjiXroqvA74r0pQJ4Jw/hZVSGqLZoPIt3RMwYPi3C5xvVLbQU=" + assert.Equal(t, expected, txeB64, "Base 64 XDR should match") +} + +func TestPreAuthTransaction(t *testing.T) { + // Address: GDK3YEHGI3ORGVO7ZEV2XF4SV5JU3BOKHMHPP4QFJ74ZRIIRROZ7ITOJ + kp0 := newKeypair("SDY4PF6F6OWWERZT6OL2LVNREHUGHKALUI5W4U2JK4GAKPAC2RM43OAU") + sourceAccount := NewSimpleAccount(kp0.Address(), int64(4353383146192898)) // sequence number is in the future + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + // build transaction to be submitted in the future. + txFuture, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + // save the hash of the future transaction. + txFutureHash, err := txFuture.Hash(network.TestNetworkPassphrase) + assert.NoError(t, err) + + // sign transaction without keypairs, the hash of the future transaction on the account + // will be used for authorisation. + txFuture, err = txFuture.Sign(network.TestNetworkPassphrase) + assert.NoError(t, err) + + txeFutureB64, err := txFuture.Base64() + assert.NoError(t, err) + expected := "AAAAAgAAAADVvBDmRt0TVd/JK6uXkq9TTYXKOw738gVP+ZihEYuz9AAAAGQAD3dhAAAAAwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAA=" + assert.Equal(t, expected, txeFutureB64, "Base 64 XDR should match") + + //encode the txFutureHash as a stellar HashTx signer key. + preAuth, err := strkey.Encode(strkey.VersionByteHashTx, txFutureHash[:]) + assert.NoError(t, err) + + // set sequence number to the current number. + sourceAccount.Sequence = int64(4353383146192897) + + // add hash of future transaction as signer to account + setOptions := SetOptions{ + Signer: &Signer{Address: preAuth, Weight: Threshold(2)}, + } + + // build a transaction to add the hash of the future transaction as a signer on the account. + txNow, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: 500, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + assert.Equal(t, int64(500), txNow.MaxFee(), "Transaction fee should match") + txNow, err = txNow.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + txeNowB64, err := txNow.Base64() + assert.NoError(t, err) + expected = "AAAAAgAAAADVvBDmRt0TVd/JK6uXkq9TTYXKOw738gVP+ZihEYuz9AAAAfQAD3dhAAAAAgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAGPXOumKQj5/MjKKSmjQXe4G4g9nK/mkyzmmROMIZnjtQAAAAIAAAAAAAAAARGLs/QAAABAutrV0Cg03KwfFbzkCGiNxAldLsqQZKRjbsqHZyy2Nu4ouEDHQeIOKLWCLymOp21kKmGGqTYekPXVbGHyujh0DA==" + assert.Equal(t, expected, txeNowB64, "Base 64 XDR should match") + // Note: txeFutureB64 can be submitted to the network after txeNowB64 has been applied to the account +} + +func TestHashXTransaction(t *testing.T) { + // 256 bit preimage + preimage := "this is a preimage for hashx transactions on the stellar network" + + preimageHash := sha256.Sum256([]byte(preimage)) + + //encode preimageHash as a stellar HashX signer key + hashx, err := strkey.Encode(strkey.VersionByteHashX, preimageHash[:]) + assert.NoError(t, err) + + // add hashx as signer to the account + setOptions := SetOptions{ + Signer: &Signer{Address: hashx, Weight: Threshold(1)}, + } + + // Address: GDK3YEHGI3ORGVO7ZEV2XF4SV5JU3BOKHMHPP4QFJ74ZRIIRROZ7ITOJ + kp0 := newKeypair("SDY4PF6F6OWWERZT6OL2LVNREHUGHKALUI5W4U2JK4GAKPAC2RM43OAU") + sourceAccount := NewSimpleAccount(kp0.Address(), int64(4353383146192899)) + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&setOptions}, + BaseFee: 500, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + assert.Equal(t, int64(500), tx.MaxFee(), "Transaction fee should match") + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + txeB64, err := tx.Base64() + assert.NoError(t, err) + + expected := "AAAAAgAAAADVvBDmRt0TVd/JK6uXkq9TTYXKOw738gVP+ZihEYuz9AAAAfQAD3dhAAAABAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAL7JYG+aCH7iEhT/BWL06rHIhtYklHqyQdwLuk9li6jBQAAAAEAAAAAAAAAARGLs/QAAABAhwcHwm3DsBcqCCy1uzmXo73W7FTxMAes+qHABuHERruvb1ygqwRWA9pjHSUQnoJYCYH4GhY9qrIQYC/MkNeFBw==" + assert.Equal(t, expected, txeB64, "Base 64 XDR should match") + + // build a transaction to test hashx signer + payment := Payment{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + Asset: NativeAsset{}, + } + + sourceAccount.Sequence = int64(4353383146192902) + + tx, err = NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&payment}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + // sign transaction with the preimage + tx, err = tx.SignHashX([]byte(preimage)) + assert.NoError(t, err) + + txeB64, err = tx.Base64() + assert.NoError(t, err) + expected = "AAAAAgAAAADVvBDmRt0TVd/JK6uXkq9TTYXKOw738gVP+ZihEYuz9AAAAGQAD3dhAAAABwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAQAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAAAAAABfXhAAAAAAAAAAABli6jBQAAAEB0aGlzIGlzIGEgcHJlaW1hZ2UgZm9yIGhhc2h4IHRyYW5zYWN0aW9ucyBvbiB0aGUgc3RlbGxhciBuZXR3b3Jr" + assert.Equal(t, expected, txeB64, "Base 64 XDR should match") + +} + +func TestFromXDR(t *testing.T) { + txeB64 := "AAAAACYWIvM98KlTMs0IlQBZ06WkYpZ+gILsQN6ega0++I/sAAAAZAAXeEkAAAABAAAAAAAAAAEAAAAQMkExVjZKNTcwM0c0N1hIWQAAAAEAAAABAAAAACYWIvM98KlTMs0IlQBZ06WkYpZ+gILsQN6ega0++I/sAAAAAQAAAADMSEvcRKXsaUNna++Hy7gWm/CfqTjEA7xoGypfrFGUHAAAAAAAAAACCPHRAAAAAAAAAAABPviP7AAAAEBu6BCKf4WZHPum5+29Nxf6SsJNN8bgjp1+e1uNBaHjRg3rdFZYgUqEqbHxVEs7eze3IeRbjMZxS3zPf/xwJCEI" + + tx, err := TransactionFromXDR(txeB64) + assert.NoError(t, err) + newTx, ok := tx.Transaction() + assert.True(t, ok) + _, ok = tx.FeeBump() + assert.False(t, ok) + + assert.Equal(t, "GATBMIXTHXYKSUZSZUEJKACZ2OS2IYUWP2AIF3CA32PIDLJ67CH6Y5UY", newTx.SourceAccount().AccountID, "source accounts should match") + assert.Equal(t, int64(100), newTx.BaseFee(), "Base fee should match") + sa := newTx.SourceAccount() + assert.Equal(t, int64(6606179392290817), sa.Sequence, "Sequence number should match") + assert.Equal(t, int64(6606179392290817), newTx.SequenceNumber(), "Sequence number should match") + assert.Equal(t, 1, len(newTx.Operations()), "Operations length should match") + assert.IsType(t, newTx.Operations()[0], &Payment{}, "Operation types should match") + paymentOp, ok1 := newTx.Operations()[0].(*Payment) + assert.Equal(t, true, ok1) + assert.Equal(t, "GATBMIXTHXYKSUZSZUEJKACZ2OS2IYUWP2AIF3CA32PIDLJ67CH6Y5UY", paymentOp.SourceAccount, "Operation source should match") + assert.Equal(t, "GDGEQS64ISS6Y2KDM5V67B6LXALJX4E7VE4MIA54NANSUX5MKGKBZM5G", paymentOp.Destination, "Operation destination should match") + assert.Equal(t, "874.0000000", paymentOp.Amount, "Operation amount should match") + + txeB64 = "AAAAAGigiN2q4qBXAERImNEncpaADylyBRtzdqpEsku6CN0xAAABkAAADXYAAAABAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAABAAAABm5ldyB0eAAAAAAAAgAAAAEAAAAA+Q2efEMLNGF4i+aYfutUXGMSlf8tNevKeS1Jl/oCVGkAAAAGAAAAAVVTRAAAAAAAaKCI3arioFcAREiY0SdyloAPKXIFG3N2qkSyS7oI3TF//////////wAAAAAAAAAKAAAABHRlc3QAAAABAAAABXZhbHVlAAAAAAAAAAAAAAA=" + + tx2, err := TransactionFromXDR(txeB64) + assert.NoError(t, err) + newTx2, ok := tx2.Transaction() + assert.True(t, ok) + _, ok = tx2.FeeBump() + assert.False(t, ok) + + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", newTx2.SourceAccount().AccountID, "source accounts should match") + assert.Equal(t, int64(200), newTx2.BaseFee(), "Base fee should match") + assert.Equal(t, int64(14800457302017), newTx2.SourceAccount().Sequence, "Sequence number should match") + assert.Equal(t, int64(14800457302017), newTx2.SequenceNumber(), "Sequence number should match") + + memo, ok := newTx2.Memo().(MemoText) + assert.Equal(t, true, ok) + assert.Equal(t, MemoText("new tx"), memo, "memo should match") + assert.Equal(t, 2, len(newTx2.Operations()), "Operations length should match") + assert.IsType(t, newTx2.Operations()[0], &ChangeTrust{}, "Operation types should match") + assert.IsType(t, newTx2.Operations()[1], &ManageData{}, "Operation types should match") + op1, ok1 := newTx2.Operations()[0].(*ChangeTrust) + assert.Equal(t, true, ok1) + assert.Equal(t, "GD4Q3HT4IMFTIYLYRPTJQ7XLKROGGEUV74WTL26KPEWUTF72AJKGSJS7", op1.SourceAccount, "Operation source should match") + assetType, err := op1.Line.GetType() + assert.NoError(t, err) + + assert.Equal(t, AssetTypeCreditAlphanum4, assetType, "Asset type should match") + assert.Equal(t, "USD", op1.Line.GetCode(), "Asset code should match") + assert.Equal(t, "GBUKBCG5VLRKAVYAIREJRUJHOKLIADZJOICRW43WVJCLES52BDOTCQZU", op1.Line.GetIssuer(), "Asset issuer should match") + assert.Equal(t, "922337203685.4775807", op1.Limit, "trustline limit should match") + + op2, ok2 := newTx2.Operations()[1].(*ManageData) + assert.Equal(t, true, ok2) + assert.Equal(t, "", op2.SourceAccount, "Operation source should match") + assert.Equal(t, "test", op2.Name, "Name should match") + assert.Equal(t, "value", string(op2.Value), "Value should match") + + // Muxed accounts + txB64WithMuxedAccounts := "AAAAAgAAAQAAAAAAyv66vuDcbeFyXKxmUWK1L6znNbKKIkPkHRJNbLktcKPqLnLFAAAAZAAiII0AAAAbAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAEAAAEAAAAAAMr+ur7g3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAAEAAAEAgAAAAAAAAAA/DDS/k60NmXHQTMyQ9wVRHIOKrZc0pKL7DXoD/H/omgAAAAAAAAAABfXhAAAAAAAAAAAB6i5yxQAAAED4Wkvwf/BJV+fqa6Kvi+T/7ZL82pOinN68GlvEi9qK4klH+qITyvN3jRj5Nfz0+VrE2xBJPVc8sS/qN9LlznoC" + + // It provides M-addreses when enabling muxed accounts + tx3, err := TransactionFromXDR(txB64WithMuxedAccounts) + assert.NoError(t, err) + newTx3, ok := tx3.Transaction() + assert.True(t, ok) + assert.Equal(t, "MDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKAAAAAAMV7V2XYGQO", newTx3.sourceAccount.AccountID) + op3, ok3 := newTx3.Operations()[0].(*Payment) + assert.True(t, ok3) + assert.Equal(t, "MDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKAAAAAAMV7V2XYGQO", op3.SourceAccount) + assert.Equal(t, "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK", op3.Destination) + +} + +func TestBuild(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + expectedUnsigned := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAA=" + + expectedSigned := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGgAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQB7MjKIwNEOTIjbEeV+QIjaQp/ZpV5qpbkbDaU54gkfdTOFOUxZq66lTS5FOfP5fmPIVD8InQ00Usy2SmzFC/wc=" + + txeB64, err := tx.Base64() + assert.NoError(t, err) + assert.Equal(t, expectedUnsigned, txeB64, "tx envelope should match") + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + txeB64, err = tx.Base64() + assert.NoError(t, err) + assert.Equal(t, expectedSigned, txeB64, "tx envelope should match") +} + +func TestFromXDRBuildSignEncode(t *testing.T) { + expectedUnsigned := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAVuZXd0eAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQAz221zc6QuNPFsmBkLMzd1QPXuNbDabMmdh3EutkV71A7DdAPiFzD0TGgm/loJ9TjOiJGpvaJdDCWDXitAT8Qo=" + + expectedSigned := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAGwAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAVuZXd0eAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAALqLnLFAAAAQAz221zc6QuNPFsmBkLMzd1QPXuNbDabMmdh3EutkV71A7DdAPiFzD0TGgm/loJ9TjOiJGpvaJdDCWDXitAT8QrqLnLFAAAAQAz221zc6QuNPFsmBkLMzd1QPXuNbDabMmdh3EutkV71A7DdAPiFzD0TGgm/loJ9TjOiJGpvaJdDCWDXitAT8Qo=" + + kp0 := newKeypair0() + + // test signing transaction without modification + tx, err := TransactionFromXDR(expectedUnsigned) + assert.NoError(t, err) + newTx, ok := tx.Transaction() + assert.True(t, ok) + _, ok = tx.FeeBump() + assert.False(t, ok) + + //passphrase is needed for signing + newTx, err = newTx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + txeB64, err := newTx.Base64() + assert.NoError(t, err) + assert.Equal(t, expectedSigned, txeB64, "tx envelope should match") + + // test signing transaction with modification + expectedSigned2 := "AAAAAgAAAADg3G3hclysZlFitS+s5zWyiiJD5B0STWy5LXCj6i5yxQAAAGQAIiCNAAAAHAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAVuZXd0eAAAAAAAAAEAAAAAAAAAAAAAAACE4N7avBtJL576CIWTzGCbGPvSlVfMQAOjcYbSsSF2VAAAAAAF9eEAAAAAAAAAAAHqLnLFAAAAQFu7obEnMmrp+1Pnz/8o3IUIOWJ6rVTsJO1dAYapN3/zVjCNW3/JzgewGrKNWjPelF7BNRhk5lx93CFGdHDJ/Ac=" + tx, err = TransactionFromXDR(expectedUnsigned) + assert.NoError(t, err) + newTx, ok = tx.Transaction() + assert.True(t, ok) + _, ok = tx.FeeBump() + assert.False(t, ok) + + txeB64, err = newSignedTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{ + AccountID: newTx.SourceAccount().AccountID, + Sequence: newTx.SourceAccount().Sequence + 1, + }, + IncrementSequenceNum: false, + Operations: newTx.Operations(), + BaseFee: newTx.BaseFee(), + Memo: MemoText("newtx"), + Timebounds: newTx.Timebounds(), + }, + network.TestNetworkPassphrase, + kp0, + ) + assert.NoError(t, err) + assert.Equal(t, expectedSigned2, txeB64, "tx envelope should match") +} + +func TestSignWithSecretKey(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + tx1Source := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + + expected, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + tx1, err := NewTransaction( + TransactionParams{ + SourceAccount: &tx1Source, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx1, err = tx1.SignWithKeyString( + network.TestNetworkPassphrase, + "SBPQUZ6G4FZNWFHKUWC5BEYWF6R52E3SEP7R3GWYSM2XTKGF5LNTWW4R", ""+ + "SBMSVD4KKELKGZXHBUQTIROWUAPQASDX7KEJITARP4VMZ6KLUHOGPTYW", + ) + assert.NoError(t, err) + + actual, err := tx1.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual, "base64 xdr should match") +} + +func TestAddSignatureDecorated(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + tx1Source := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + + expected, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + tx1, err := NewTransaction( + TransactionParams{ + SourceAccount: &tx1Source, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + // Same if signatures added separately. + { + var tx1sigs1 *Transaction + tx1sigs1, err = tx1.AddSignatureDecorated( + xdr.DecoratedSignature{ + Hint: kp0.Hint(), + Signature: func() xdr.Signature { + var sigBytes []byte + sigBytes, err = base64.StdEncoding.DecodeString("TVogR6tbrWLnOc1BsP/j+Qrxpja2NWNgeRIwujECYscRdMG7AMtnb3dkCT7sqlbSM0TTzlRh7G+BcVocYBtqBw==") + if err != nil { + require.NoError(t, err) + } + return xdr.Signature(sigBytes) + }(), + }, + ) + assert.NoError(t, err) + tx1sigs1, err = tx1sigs1.AddSignatureDecorated( + xdr.DecoratedSignature{ + Hint: kp1.Hint(), + Signature: func() xdr.Signature { + var sigBytes []byte + sigBytes, err = base64.StdEncoding.DecodeString("Iy77JteoW/FbeiuViZpgTyvrHP4BnBOeyVOjrdb5O/MpEMwcSlYXAkCBqPt4tBDil4jIcDDLhm7TsN6aUBkIBg==") + if err != nil { + require.NoError(t, err) + } + return xdr.Signature(sigBytes) + }(), + }, + ) + assert.NoError(t, err) + var actual string + actual, err = tx1sigs1.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual, "base64 xdr should match") + } + + // Same if signatures added together. + { + var tx1sigs2 *Transaction + tx1sigs2, err = tx1.AddSignatureDecorated( + xdr.DecoratedSignature{ + Hint: kp0.Hint(), + Signature: func() xdr.Signature { + var sigBytes []byte + sigBytes, err = base64.StdEncoding.DecodeString("TVogR6tbrWLnOc1BsP/j+Qrxpja2NWNgeRIwujECYscRdMG7AMtnb3dkCT7sqlbSM0TTzlRh7G+BcVocYBtqBw==") + if err != nil { + require.NoError(t, err) + } + return xdr.Signature(sigBytes) + }(), + }, + xdr.DecoratedSignature{ + Hint: kp1.Hint(), + Signature: func() xdr.Signature { + var sigBytes []byte + sigBytes, err = base64.StdEncoding.DecodeString("Iy77JteoW/FbeiuViZpgTyvrHP4BnBOeyVOjrdb5O/MpEMwcSlYXAkCBqPt4tBDil4jIcDDLhm7TsN6aUBkIBg==") + if err != nil { + require.NoError(t, err) + } + return xdr.Signature(sigBytes) + }(), + }, + ) + assert.NoError(t, err) + var actual string + actual, err = tx1sigs2.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual, "base64 xdr should match") + } +} + +func TestFeeBumpTransaction_AddSignatureDecorated(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + + tx, err := NewTransaction(TransactionParams{ + SourceAccount: &SimpleAccount{kp0.Address(), int64(9605939170639897)}, + Operations: []Operation{&CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + }}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }) + require.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0, kp1) + require.NoError(t, err) + + fbtx, err := NewFeeBumpTransaction(FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: kp0.Address(), + BaseFee: MinBaseFee, + }) + require.NoError(t, err) + require.Len(t, fbtx.Signatures(), 0) + + fbtxHash, err := fbtx.Hash(network.TestNetworkPassphrase) + require.NoError(t, err) + + sig, err := kp0.SignDecorated(fbtxHash[:]) + require.NoError(t, err) + fbtxWithSig, err := fbtx.AddSignatureDecorated(sig) + require.NoError(t, err) + require.Len(t, fbtx.Signatures(), 0) + require.Len(t, fbtxWithSig.Signatures(), 1) + + sig, err = kp1.SignDecorated(fbtxHash[:]) + require.NoError(t, err) + fbtxWithTwoSigs, err := fbtxWithSig.AddSignatureDecorated(sig) + require.NoError(t, err) + require.Len(t, fbtx.Signatures(), 0) + require.Len(t, fbtxWithSig.Signatures(), 1) + require.Len(t, fbtxWithTwoSigs.Signatures(), 2) +} + +func TestAddSignatureBase64(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + tx1Source := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + + expected, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + network.TestNetworkPassphrase, + kp0, kp1, + ) + assert.NoError(t, err) + + tx1, err := NewTransaction( + TransactionParams{ + SourceAccount: &tx1Source, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx1, err = tx1.AddSignatureBase64( + network.TestNetworkPassphrase, + "GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3", + "TVogR6tbrWLnOc1BsP/j+Qrxpja2NWNgeRIwujECYscRdMG7AMtnb3dkCT7sqlbSM0TTzlRh7G+BcVocYBtqBw==", + ) + assert.NoError(t, err) + + tx1, err = tx1.AddSignatureBase64( + network.TestNetworkPassphrase, + "GAS4V4O2B7DW5T7IQRPEEVCRXMDZESKISR7DVIGKZQYYV3OSQ5SH5LVP", + "Iy77JteoW/FbeiuViZpgTyvrHP4BnBOeyVOjrdb5O/MpEMwcSlYXAkCBqPt4tBDil4jIcDDLhm7TsN6aUBkIBg==", + ) + assert.NoError(t, err) + + actual, err := tx1.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual, "base64 xdr should match") +} + +func TestTransaction_ClearSignatures(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: kp0.Address(), Sequence: int64(9605939170639898)}, + Operations: []Operation{&CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + }}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + require.NoError(t, err) + require.Len(t, tx.Signatures(), 0) + + txWithSig, err := tx.Sign(network.TestNetworkPassphrase, kp0) + require.NoError(t, err) + require.Len(t, txWithSig.Signatures(), 1) + + txSigCleared, err := txWithSig.ClearSignatures() + require.NoError(t, err) + require.Len(t, txSigCleared.Signatures(), 0) + + expected, err := tx.Base64() + assert.NoError(t, err) + actual, err := txSigCleared.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual) +} + +func TestFeeBumpTransaction_ClearSignatures(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + + tx, err := NewTransaction(TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: kp0.Address(), Sequence: int64(9605939170639898)}, + Operations: []Operation{&CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + }}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }) + require.NoError(t, err) + require.Len(t, tx.Signatures(), 0) + txWithSig, err := tx.Sign(network.TestNetworkPassphrase, kp0) + require.NoError(t, err) + require.Len(t, txWithSig.Signatures(), 1) + + fbtx, err := NewFeeBumpTransaction(FeeBumpTransactionParams{ + Inner: txWithSig, + FeeAccount: kp0.Address(), + BaseFee: MinBaseFee, + }) + require.NoError(t, err) + require.Len(t, fbtx.Signatures(), 0) + + fbtxWithSig, err := fbtx.Sign(network.TestNetworkPassphrase, kp0) + require.NoError(t, err) + require.Len(t, fbtxWithSig.Signatures(), 1) + + fbtxSigCleared, err := fbtxWithSig.ClearSignatures() + require.NoError(t, err) + require.Len(t, fbtxSigCleared.Signatures(), 0) + + expected, err := fbtx.Base64() + assert.NoError(t, err) + actual, err := fbtxSigCleared.Base64() + assert.NoError(t, err) + assert.Equal(t, expected, actual) +} + +func TestReadChallengeTx_validSignedByServerAndClient(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP, clientKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.NoError(t, err) +} + +func TestReadChallengeTx_validSignedByServer(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.NoError(t, err) +} + +func TestReadChallengeTx_invalidNotSignedByServer(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.EqualError(t, err, "transaction not signed by "+serverKP.Address()) +} + +func TestReadChallengeTx_invalidCorrupted(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + tx64 = strings.ReplaceAll(tx64, "A", "B") + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Nil(t, readTx) + assert.Equal(t, "", readClientAccountID) + assert.EqualError( + t, + err, + "could not parse challenge: unable to unmarshal transaction envelope: "+ + "decoding EnvelopeType: '68174086' is not a valid EnvelopeType enum value", + ) +} + +func TestReadChallengeTx_invalidServerAccountIDMismatch(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(newKeypair2().Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.EqualError(t, err, "transaction source account is not equal to server's account") +} + +func TestReadChallengeTx_invalidSeqNoNotZero(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), 1234) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.EqualError(t, err, "transaction sequence number must be 0") +} + +func TestReadChallengeTx_invalidTimeboundsInfinite(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.EqualError(t, err, "transaction requires non-infinite timebounds") +} + +func TestReadChallengeTx_invalidTimeboundsOutsideRange(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimebounds(0, 100), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.Error(t, err) + assert.Regexp(t, "transaction is not within range of the specified timebounds", err.Error()) +} + +func TestReadChallengeTx_validTimeboundsWithGracePeriod(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + unixNow := time.Now().UTC().Unix() + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimebounds(unixNow+5*59, unixNow+60*60), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.NoError(t, err) +} + +func TestReadChallengeTx_invalidTimeboundsWithGracePeriod(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + unixNow := time.Now().UTC().Unix() + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimebounds(unixNow+5*61, unixNow+60*60), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.Error(t, err) + assert.Regexp(t, "transaction is not within range of the specified timebounds", err.Error()) +} + +func TestReadChallengeTx_invalidOperationWrongType(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := BumpSequence{ + SourceAccount: clientKP.Address(), + BumpTo: 0, + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, "", readClientAccountID) + assert.EqualError(t, err, "operation type should be manage_data") +} + +func TestReadChallengeTx_invalidOperationNoSourceAccount(t *testing.T) { + serverKP := newKeypair0() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.EqualError(t, err, "operation should have a source account") +} + +func TestReadChallengeTx_invalidDataValueWrongEncodedLength(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 45))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.EqualError(t, err, "random nonce encoded as base64 should be 64 bytes long") +} + +func TestReadChallengeTx_invalidDataValueCorruptBase64(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA?AAAAAAAAAAAAAAAAAAAAAAAAAA"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.EqualError(t, err, "failed to decode random nonce provided in manage_data operation: illegal base64 data at input byte 37") +} + +func TestReadChallengeTx_invalidDataValueWrongByteLength(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 47))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + assert.NoError(t, err) + + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.EqualError(t, err, "random nonce before encoding as base64 should be 48 bytes long") +} + +func TestReadChallengeTx_acceptsV0AndV1Transactions(t *testing.T) { + kp0 := newKeypair0() + tx, err := BuildChallengeTx( + kp0.Seed(), + kp0.Address(), + "testwebauth.stellar.org", + "testanchor.stellar.org", + network.TestNetworkPassphrase, + time.Hour, + ) + assert.NoError(t, err) + + originalHash, err := tx.HashHex(network.TestNetworkPassphrase) + assert.NoError(t, err) + + v1Challenge, err := marshallBase64(tx.envelope, tx.Signatures()) + assert.NoError(t, err) + + convertToV0(tx) + v0Challenge, err := marshallBase64(tx.envelope, tx.Signatures()) + assert.NoError(t, err) + + for _, challenge := range []string{v1Challenge, v0Challenge} { + parsedTx, clientAccountID, _, err := ReadChallengeTx( + challenge, + kp0.Address(), + network.TestNetworkPassphrase, + "testwebauth.stellar.org", + []string{"testanchor.stellar.org"}, + ) + assert.NoError(t, err) + + assert.Equal(t, kp0.Address(), clientAccountID) + + hash, err := parsedTx.HashHex(network.TestNetworkPassphrase) + assert.NoError(t, err) + assert.Equal(t, originalHash, hash) + } +} + +func TestReadChallengeTx_forbidsFeeBumpTransactions(t *testing.T) { + kp0 := newKeypair0() + tx, err := BuildChallengeTx( + kp0.Seed(), + kp0.Address(), + "testwebauth.stellar.org", + "testanchor.stellar.org", + network.TestNetworkPassphrase, + time.Hour, + ) + assert.NoError(t, err) + + kp1 := newKeypair1() + feeBumpTx, err := NewFeeBumpTransaction( + FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: kp1.Address(), + BaseFee: 3 * MinBaseFee, + }, + ) + assert.NoError(t, err) + + feeBumpTx, err = feeBumpTx.Sign(network.TestNetworkPassphrase, kp1) + assert.NoError(t, err) + + challenge, err := feeBumpTx.Base64() + assert.NoError(t, err) + _, _, _, err = ReadChallengeTx( + challenge, + kp0.Address(), + network.TestNetworkPassphrase, + "testwebauth.stellar.org", + []string{"testanchor.stellar.org"}, + ) + assert.EqualError(t, err, "challenge cannot be a fee bump transaction") +} + +func TestReadChallengeTx_forbidsMuxedAccounts(t *testing.T) { + kp0 := newKeypair0() + tx, err := BuildChallengeTx( + kp0.Seed(), + kp0.Address(), + "testwebauth.stellar.org", + "testanchor.stellar.org", + network.TestNetworkPassphrase, + time.Hour, + ) + + env := tx.ToXDR() + assert.NoError(t, err) + aid := xdr.MustAddress(kp0.Address()) + muxedAccount := xdr.MuxedAccount{ + Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519, + Med25519: &xdr.MuxedAccountMed25519{ + Id: 0xcafebabe, + Ed25519: *aid.Ed25519, + }, + } + *env.V1.Tx.Operations[0].SourceAccount = muxedAccount + + challenge, err := marshallBase64(env, env.Signatures()) + assert.NoError(t, err) + + _, _, _, err = ReadChallengeTx( + challenge, + kp0.Address(), + network.TestNetworkPassphrase, + "testwebauth.stellar.org", + []string{"testanchor.stellar.org"}, + ) + errorMessage := "only valid Ed25519 accounts are allowed in challenge transactions" + assert.Contains(t, err.Error(), errorMessage) +} + +func TestReadChallengeTx_doesVerifyHomeDomainFailure(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"willfail"}) + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"testanchor.stellar.org auth\", homeDomains=[willfail])") +} + +func TestReadChallengeTx_doesVerifyHomeDomainSuccess(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, nil, err) +} + +func TestReadChallengeTx_allowsAdditionalManageDataOpsWithSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: txSource.AccountID, + Name: "testanchor.stellar.org auth", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.NoError(t, err) +} + +func TestReadChallengeTx_disallowsAdditionalManageDataOpsWithoutSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "a key", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.EqualError(t, err, "subsequent operations are unrecognized") +} + +func TestReadChallengeTx_disallowsAdditionalOpsOfOtherTypes(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := BumpSequence{ + SourceAccount: txSource.AccountID, + BumpTo: 0, + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + readTx, readClientAccountID, _, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.Equal(t, tx, readTx) + assert.Equal(t, clientKP.Address(), readClientAccountID) + assert.EqualError(t, err, "operation type should be manage_data") +} + +func TestReadChallengeTx_matchesHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, matchedHomeDomain, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + require.NoError(t, err) + assert.Equal(t, matchedHomeDomain, "testanchor.stellar.org") +} + +func TestReadChallengeTx_doesNotMatchHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, matchedHomeDomain, err := ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"not", "going", "to", "match"}) + assert.Equal(t, matchedHomeDomain, "") + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"testanchor.stellar.org auth\", homeDomains=[not going to match])") +} + +func TestReadChallengeTx_validWhenWebAuthDomainMissing(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.NoError(t, err) +} + +func TestReadChallengeTx_invalidWebAuthDomainSourceAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: clientKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.EqualError(t, err, `web auth domain operation must have server source account`) +} + +func TestReadChallengeTx_invalidWebAuthDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.example.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + _, _, _, err = ReadChallengeTx(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.EqualError(t, err, `web auth domain operation value is "testwebauth.example.org" but expect "testwebauth.stellar.org"`) +} + +func TestVerifyChallengeTxThreshold_invalidServer(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+serverKP.Address()) +} + +func TestVerifyChallengeTxThreshold_validServerAndClientKeyMeetingThreshold(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + wantSigners := []string{ + clientKP.Address(), + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_validServerAndMultipleClientKeyMeetingThreshold(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, + ) + assert.NoError(t, err) + + threshold := Threshold(3) + signerSummary := map[string]int32{ + clientKP1.Address(): 1, + clientKP2.Address(): 2, + } + wantSigners := []string{ + clientKP1.Address(), + clientKP2.Address(), + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_validServerAndMultipleClientKeyMeetingThresholdSomeUnused(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + clientKP3 := keypair.MustRandom() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + threshold := Threshold(3) + signerSummary := SignerSummary{ + clientKP1.Address(): 1, + clientKP2.Address(): 2, + clientKP3.Address(): 2, + } + wantSigners := []string{ + clientKP1.Address(), + clientKP2.Address(), + } + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_validServerAndMultipleClientKeyMeetingThresholdSomeUnusedIgnorePreauthTxHashAndXHash(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + clientKP3 := keypair.MustRandom() + preauthTxHash := "TAQCSRX2RIDJNHFIFHWD63X7D7D6TRT5Y2S6E3TEMXTG5W3OECHZ2OG4" + xHash := "XDRPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" + unknownSignerType := "?ARPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + threshold := Threshold(3) + signerSummary := SignerSummary{ + clientKP1.Address(): 1, + clientKP2.Address(): 2, + clientKP3.Address(): 2, + preauthTxHash: 10, + xHash: 10, + unknownSignerType: 10, + } + wantSigners := []string{ + clientKP1.Address(), + clientKP2.Address(), + } + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_invalidServerAndMultipleClientKeyNotMeetingThreshold(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + clientKP3 := keypair.MustRandom() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + threshold := Threshold(10) + signerSummary := SignerSummary{ + clientKP1.Address(): 1, + clientKP2.Address(): 2, + clientKP3.Address(): 2, + } + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, "signers with weight 3 do not meet threshold 10") +} + +func TestVerifyChallengeTxThreshold_invalidClientKeyUnrecognized(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + clientKP3 := keypair.MustRandom() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + threshold := Threshold(10) + signerSummary := map[string]int32{ + clientKP1.Address(): 1, + clientKP2.Address(): 2, + } + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, clientKP3, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, "transaction has unrecognized signatures") +} + +func TestVerifyChallengeTxThreshold_invalidNoSigners(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + clientKP3 := keypair.MustRandom() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + threshold := Threshold(10) + signerSummary := SignerSummary{} + + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, clientKP3, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, "no verifiable signers provided, at least one G... address must be provided") +} + +func TestVerifyChallengeTxThreshold_weightsAddToMoreThan8Bits(t *testing.T) { + serverKP := newKeypair0() + clientKP1 := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP1.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP1, clientKP2, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP1.Address(): 255, + clientKP2.Address(): 1, + } + wantSigners := []string{ + clientKP1.Address(), + clientKP2.Address(), + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_matchesHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + _, err = tx.Base64() + require.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + tx, err = tx.Sign(network.TestNetworkPassphrase, clientKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + require.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_doesNotMatchHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"not", "going", "to", "match"}, threshold, signerSummary) + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"testanchor.stellar.org auth\", homeDomains=[not going to match])") +} + +func TestVerifyChallengeTxThreshold_doesVerifyHomeDomainFailure(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "will fail auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"will fail auth\", homeDomains=[testanchor.stellar.org])") +} + +func TestVerifyChallengeTxThreshold_doesVerifyHomeDomainSuccess(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + wantSigners := []string{ + clientKP.Address(), + } + + signers, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.Equal(t, nil, err) + assert.Equal(t, signers, wantSigners) +} + +func TestVerifyChallengeTxThreshold_allowsAdditionalManageDataOpsWithSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: txSource.AccountID, + Name: "a key", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + wantSigners := []string{ + clientKP.Address(), + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_disallowsAdditionalManageDataOpsWithoutSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "a key", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "subsequent operations are unrecognized") +} + +func TestVerifyChallengeTxThreshold_disallowsAdditionalOpsOfOtherTypes(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := BumpSequence{ + SourceAccount: txSource.AccountID, + BumpTo: 0, + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "operation type should be manage_data") +} + +func TestVerifyChallengeTxThreshold_validWhenWebAuthDomainMissing(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + wantSigners := []string{ + clientKP.Address(), + } + + signersFound, err := VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxThreshold_invalidWebAuthDomainSourceAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: clientKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, `web auth domain operation must have server source account`) +} + +func TestVerifyChallengeTxThreshold_invalidWebAuthDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.example.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + threshold := Threshold(1) + signerSummary := SignerSummary{ + clientKP.Address(): 1, + } + + _, err = VerifyChallengeTxThreshold(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, threshold, signerSummary) + assert.EqualError(t, err, `web auth domain operation value is "testwebauth.example.org" but expect "testwebauth.stellar.org"`) +} + +func TestVerifyChallengeTxSigners_invalidServer(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+serverKP.Address()) +} + +func TestVerifyChallengeTxSigners_validServerAndClientMasterKey(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Equal(t, []string{clientKP.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_invalidServerAndNoClient(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+clientKP.Address()) +} + +func TestVerifyChallengeTxSigners_invalidServerAndUnrecognizedClient(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + unrecognizedKP := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, unrecognizedKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+clientKP.Address()) +} + +func TestVerifyChallengeTxSigners_validServerAndMultipleClientSigners(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address(), clientKP2.Address()) + assert.Equal(t, []string{clientKP.Address(), clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_validServerAndMultipleClientSignersReverseOrder(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address(), clientKP2.Address()) + assert.Equal(t, []string{clientKP.Address(), clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_validServerAndClientSignersNotMasterKey(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Address()) + assert.Equal(t, []string{clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_validServerAndClientSignersIgnoresServerSigner(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, serverKP.Address(), clientKP2.Address()) + assert.Equal(t, []string{clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_invalidServerNoClientSignersIgnoresServerSigner(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, serverKP.Address(), clientKP2.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+clientKP2.Address()) +} + +func TestVerifyChallengeTxSigners_validServerAndClientSignersIgnoresDuplicateSigner(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Address(), clientKP2.Address()) + assert.Equal(t, []string{clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_validIgnorePreauthTxHashAndXHash(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + preauthTxHash := "TAQCSRX2RIDJNHFIFHWD63X7D7D6TRT5Y2S6E3TEMXTG5W3OECHZ2OG4" + xHash := "XDRPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" + unknownSignerType := "?ARPF6NZRR7EEVO7ESIWUDXHAOMM2QSKIQQBJK6I2FB7YKDZES5UCLWD" + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Address(), preauthTxHash, xHash, unknownSignerType) + assert.Equal(t, []string{clientKP2.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_invalidServerAndClientSignersIgnoresDuplicateSignerInError(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address(), clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "transaction not signed by "+clientKP.Address()) +} + +func TestVerifyChallengeTxSigners_invalidServerAndClientSignersFailsDuplicateSignatures(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Address()) + assert.Equal(t, []string{clientKP2.Address()}, signersFound) + assert.EqualError(t, err, "transaction has unrecognized signatures") +} + +func TestVerifyChallengeTxSigners_invalidServerAndClientSignersFailsSignerSeed(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + clientKP2 := newKeypair2() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP2, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP2.Seed()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "no verifiable signers provided, at least one G... address must be provided") +} + +func TestVerifyChallengeTxSigners_invalidNoSigners(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}) + assert.EqualError(t, err, "no verifiable signers provided, at least one G... address must be provided") +} + +func TestVerifyChallengeTxSigners_doesVerifyHomeDomainFailure(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"validation failed"}, clientKP.Address()) + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"testanchor.stellar.org auth\", homeDomains=[validation failed])") +} + +func TestVerifyChallengeTxSigners_matchesHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + _, err = tx.Base64() + require.NoError(t, err) + + signers := []string{clientKP.Address()} + tx, err = tx.Sign(network.TestNetworkPassphrase, clientKP) + require.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, signers...) + require.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_doesNotMatchHomeDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(300), + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, serverKP) + assert.NoError(t, err) + _, err = tx.Base64() + require.NoError(t, err) + + signers := []string{clientKP.Address()} + tx, err = tx.Sign(network.TestNetworkPassphrase, clientKP) + require.NoError(t, err) + tx64, err := tx.Base64() + require.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"not", "going", "to", "match"}, signers...) + assert.EqualError(t, err, "operation key does not match any homeDomains passed (key=\"testanchor.stellar.org auth\", homeDomains=[not going to match])") +} + +func TestVerifyChallengeTxSigners_doesVerifyHomeDomainSuccess(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Equal(t, nil, err) +} + +func TestVerifyChallengeTxSigners_allowsAdditionalManageDataOpsWithSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: txSource.AccountID, + Name: "a key", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Equal(t, []string{clientKP.Address()}, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_disallowsAdditionalManageDataOpsWithoutSourceAccountSetToServerAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "a key", + Value: []byte("a value"), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "subsequent operations are unrecognized") +} + +func TestVerifyChallengeTxSigners_disallowsAdditionalOpsOfOtherTypes(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + + op1 := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + op2 := BumpSequence{ + SourceAccount: txSource.AccountID, + BumpTo: 0, + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op1, &op2, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.Empty(t, signersFound) + assert.EqualError(t, err, "operation type should be manage_data") +} + +func TestVerifyChallengeTxSigners_validWhenWebAuthDomainMissing(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + wantSigners := []string{clientKP.Address()} + + signersFound, err := VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.ElementsMatch(t, wantSigners, signersFound) + assert.NoError(t, err) +} + +func TestVerifyChallengeTxSigners_invalidWebAuthDomainSourceAccount(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: clientKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.stellar.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.EqualError(t, err, `web auth domain operation must have server source account`) +} + +func TestVerifyChallengeTxSigners_invalidWebAuthDomain(t *testing.T) { + serverKP := newKeypair0() + clientKP := newKeypair1() + txSource := NewSimpleAccount(serverKP.Address(), -1) + op := ManageData{ + SourceAccount: clientKP.Address(), + Name: "testanchor.stellar.org auth", + Value: []byte(base64.StdEncoding.EncodeToString(make([]byte, 48))), + } + webAuthDomainOp := ManageData{ + SourceAccount: serverKP.Address(), + Name: "web_auth_domain", + Value: []byte("testwebauth.example.org"), + } + tx64, err := newSignedTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&op, &webAuthDomainOp}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + network.TestNetworkPassphrase, + serverKP, clientKP, + ) + assert.NoError(t, err) + + _, err = VerifyChallengeTxSigners(tx64, serverKP.Address(), network.TestNetworkPassphrase, "testwebauth.stellar.org", []string{"testanchor.stellar.org"}, clientKP.Address()) + assert.EqualError(t, err, `web auth domain operation value is "testwebauth.example.org" but expect "testwebauth.stellar.org"`) +} + +func TestVerifyTxSignatureUnsignedTx(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewTimeout(1000), + }, + ) + assert.NoError(t, err) + + err = verifyTxSignature(tx, network.TestNetworkPassphrase, kp0.Address()) + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "transaction not signed by GDQNY3PBOJOKYZSRMK2S7LHHGWZIUISD4QORETLMXEWXBI7KFZZMKTL3") + } +} + +func TestVerifyTxSignatureSingle(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + err = verifyTxSignature(tx, network.TestNetworkPassphrase, kp0.Address()) + assert.NoError(t, err) +} + +func TestVerifyTxSignatureMultiple(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + // verify tx with multiple signature + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0, kp1) + assert.NoError(t, err) + err = verifyTxSignature(tx, network.TestNetworkPassphrase, kp0.Address()) + assert.NoError(t, err) + err = verifyTxSignature(tx, network.TestNetworkPassphrase, kp1.Address()) + assert.NoError(t, err) +} +func TestVerifyTxSignatureInvalid(t *testing.T) { + kp0 := newKeypair0() + kp1 := newKeypair1() + txSource := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + SourceAccount: kp1.Address(), + } + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &txSource, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + // verify invalid signer + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0, kp1) + assert.NoError(t, err) + err = verifyTxSignature(tx, network.TestNetworkPassphrase, "GATBMIXTHXYKSUZSZUEJKACZ2OS2IYUWP2AIF3CA32PIDLJ67CH6Y5UY") + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "transaction not signed by GATBMIXTHXYKSUZSZUEJKACZ2OS2IYUWP2AIF3CA32PIDLJ67CH6Y5UY") + } +} + +func TestClaimableBalanceIds(t *testing.T) { + aKeys := keypair.MustParseFull("SC4REDCJNPFAYW4SMH44KNGO5JRDQ72G4HE6GILRBSICI3M2IUOC7AAL") + A := "MDUJNO4HVE4YCQHV7LINPWVDQJFSAPHHUNSTT64YRBCCRZ5UYUXAWAAAAAAAAAAE2IUOE" + B := "GCACCFMIWJAHUUASSE2WC7V6VVDLYRLSJYZ3DJEXCG523FSHTNII6KOG" + aMuxedAccount := NewSimpleAccount(A, int64(5894915628204034)) + + // Create the operation and submit it in a transaction. + claimableBalanceEntry := CreateClaimableBalance{ + Destinations: []Claimant{ + NewClaimant(B, &UnconditionalPredicate), + }, + Asset: NativeAsset{}, + Amount: "420", + } + + // Build, sign, and submit the transaction + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &aMuxedAccount, + IncrementSequenceNum: true, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&claimableBalanceEntry}, + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, aKeys) + assert.NoError(t, err) + calculatedBalanceId, err := tx.ClaimableBalanceID(0) + assert.NoError(t, err) + + var txResult xdr.TransactionResult + resultXdr := "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAOAAAAAAAAAABw2JZZYIt4n/WXKcnDow3mbTBMPrOnldetgvGUlpTSEQAAAAA=" + err = xdr.SafeUnmarshalBase64(resultXdr, &txResult) + assert.NoError(t, err) + + results, ok := txResult.OperationResults() + assert.True(t, ok) + + // We look at the first result since our first (and only) operation in the + // transaction was the CreateClaimableBalanceOp. + operationResult := results[0].MustTr().CreateClaimableBalanceResult + actualBalanceId, err := xdr.MarshalHex(operationResult.BalanceId) + assert.NoError(t, err) + + assert.Equal(t, actualBalanceId, calculatedBalanceId) +} + +func TestTransaction_marshalUnmarshalText(t *testing.T) { + k := keypair.MustRandom() + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: k.Address(), Sequence: 1}, + IncrementSequenceNum: false, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&BumpSequence{BumpTo: 2}}, + }, + ) + assert.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, k) + assert.NoError(t, err) + + b64, err := tx.Base64() + require.NoError(t, err) + t.Log("tx base64:", b64) + + marshaled, err := tx.MarshalText() + require.NoError(t, err) + t.Log("tx marshaled text:", string(marshaled)) + assert.Equal(t, b64, string(marshaled)) + + tx2 := &Transaction{} + err = tx2.UnmarshalText(marshaled) + require.NoError(t, err) + assert.Equal(t, tx, tx2) + + err = (&FeeBumpTransaction{}).UnmarshalText(marshaled) + assert.EqualError(t, err, "transaction envelope unmarshaled into Transaction is not a transaction") +} + +func TestFeeBumpTransaction_marshalUnmarshalText(t *testing.T) { + k := keypair.MustRandom() + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: k.Address(), Sequence: 1}, + IncrementSequenceNum: false, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&BumpSequence{BumpTo: 2}}, + }, + ) + require.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, k) + require.NoError(t, err) + + fbtx, err := NewFeeBumpTransaction(FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: k.Address(), + BaseFee: MinBaseFee, + }) + require.NoError(t, err) + + b64, err := fbtx.Base64() + require.NoError(t, err) + t.Log("tx base64:", b64) + + marshaled, err := fbtx.MarshalText() + require.NoError(t, err) + t.Log("tx marshaled text:", string(marshaled)) + assert.Equal(t, b64, string(marshaled)) + + fbtx2 := &FeeBumpTransaction{} + err = fbtx2.UnmarshalText(marshaled) + require.NoError(t, err) + assert.Equal(t, fbtx, fbtx2) + + err = (&Transaction{}).UnmarshalText(marshaled) + assert.EqualError(t, err, "transaction envelope unmarshaled into FeeBumpTransaction is not a fee bump transaction") +} + +func TestGenericTransaction_marshalUnmarshalText(t *testing.T) { + k := keypair.MustRandom() + + // GenericTransaction containing nothing. + gtx := &GenericTransaction{} + _, err := gtx.MarshalText() + assert.EqualError(t, err, "unable to marshal empty GenericTransaction") + + // GenericTransaction containing a Transaction. + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &SimpleAccount{AccountID: k.Address(), Sequence: 1}, + IncrementSequenceNum: false, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + Operations: []Operation{&BumpSequence{BumpTo: 2}}, + }, + ) + require.NoError(t, err) + tx, err = tx.Sign(network.TestNetworkPassphrase, k) + require.NoError(t, err) + txb64, err := tx.Base64() + require.NoError(t, err) + t.Log("tx base64:", txb64) + + gtx = tx.ToGenericTransaction() + marshaled, err := gtx.MarshalText() + require.NoError(t, err) + t.Log("tx marshaled text:", string(marshaled)) + assert.Equal(t, txb64, string(marshaled)) + gtx2 := &GenericTransaction{} + err = gtx2.UnmarshalText(marshaled) + require.NoError(t, err) + assert.Equal(t, gtx, gtx2) + + // GenericTransaction containing a FeeBumpTransaction. + fbtx, err := NewFeeBumpTransaction(FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: k.Address(), + BaseFee: MinBaseFee, + }) + require.NoError(t, err) + fbtxb64, err := fbtx.Base64() + require.NoError(t, err) + t.Log("fbtx base64:", fbtxb64) + + fbgtx := fbtx.ToGenericTransaction() + marshaled, err = fbgtx.MarshalText() + require.NoError(t, err) + t.Log("fbtx marshaled text:", string(marshaled)) + assert.Equal(t, fbtxb64, string(marshaled)) + fbgtx2 := &GenericTransaction{} + err = fbgtx2.UnmarshalText(marshaled) + require.NoError(t, err) + assert.Equal(t, fbgtx, fbgtx2) +} + +func TestGenericTransaction_HashHex(t *testing.T) { + kp0 := newKeypair0() + sourceAccount := NewSimpleAccount(kp0.Address(), int64(9605939170639897)) + + createAccount := CreateAccount{ + Destination: "GCCOBXW2XQNUSL467IEILE6MMCNRR66SSVL4YQADUNYYNUVREF3FIV2Z", + Amount: "10", + } + + tx, err := NewTransaction( + TransactionParams{ + SourceAccount: &sourceAccount, + IncrementSequenceNum: true, + Operations: []Operation{&createAccount}, + BaseFee: MinBaseFee, + Timebounds: NewInfiniteTimeout(), + }, + ) + assert.NoError(t, err) + + tx, err = tx.Sign(network.TestNetworkPassphrase, kp0) + assert.NoError(t, err) + + gtx := tx.ToGenericTransaction() + + expected := "1b3905ba8c3c0ecc68ae812f2d77f27c697195e8daf568740fc0f5662f65f759" + hashHex, err := tx.HashHex(network.TestNetworkPassphrase) + require.NoError(t, err) + assert.Equal(t, expected, hashHex) + hashHex, err = gtx.HashHex(network.TestNetworkPassphrase) + require.NoError(t, err) + assert.Equal(t, expected, hashHex) + + fbtx, err := NewFeeBumpTransaction(FeeBumpTransactionParams{ + Inner: tx, + FeeAccount: kp0.Address(), + BaseFee: MinBaseFee, + }) + require.NoError(t, err) + + gtx = fbtx.ToGenericTransaction() + + expected = "9a194faa93e4b6efcd8da1c4b25797c666d596f3262d7db43b794b6f2db8d767" + hashHex, err = fbtx.HashHex(network.TestNetworkPassphrase) + require.NoError(t, err) + assert.Equal(t, expected, hashHex) + hashHex, err = gtx.HashHex(network.TestNetworkPassphrase) + require.NoError(t, err) + assert.Equal(t, expected, hashHex) +} diff --git a/txnbuild/trust_line_asset.go b/txnbuild/trust_line_asset.go new file mode 100644 index 0000000000..c415918c1a --- /dev/null +++ b/txnbuild/trust_line_asset.go @@ -0,0 +1,113 @@ +package txnbuild + +import ( + "github.com/stellar/go/support/errors" + "github.com/stellar/go/xdr" +) + +// TrustLineAsset represents a Stellar trust line asset. +type TrustLineAsset interface { + BasicAsset + GetLiquidityPoolID() (LiquidityPoolId, bool) + ToXDR() (xdr.TrustLineAsset, error) +} + +// LiquidityPoolShareTrustLineAsset represents shares in a liquidity pool on the Stellar network. +type LiquidityPoolShareTrustLineAsset struct { + LiquidityPoolID LiquidityPoolId +} + +// GetType for LiquidityPoolShareAsset returns the enum type of the asset, based on its code length. +func (lpsa LiquidityPoolShareTrustLineAsset) GetType() (AssetType, error) { + return AssetTypePoolShare, nil +} + +// IsNative for LiquidityPoolShareAsset returns false (this is not an XLM asset). +func (lpsa LiquidityPoolShareTrustLineAsset) IsNative() bool { return false } + +// GetCode for LiquidityPoolShareAsset returns blank string +func (lpsa LiquidityPoolShareTrustLineAsset) GetCode() string { return "" } + +// GetIssuer for LiquidityPoolShareAsset returns blank string +func (lpsa LiquidityPoolShareTrustLineAsset) GetIssuer() string { return "" } + +// GetLiquidityPoolID for LiquidityPoolShareTrustLineAsset returns the pool id. +func (lpsa LiquidityPoolShareTrustLineAsset) GetLiquidityPoolID() (LiquidityPoolId, bool) { + return lpsa.LiquidityPoolID, true +} + +// ToXDR for LiquidityPoolShareAsset produces a corresponding XDR asset. +func (lpsa LiquidityPoolShareTrustLineAsset) ToXDR() (xdr.TrustLineAsset, error) { + xdrPoolId, err := lpsa.LiquidityPoolID.ToXDR() + if err != nil { + return xdr.TrustLineAsset{}, errors.Wrap(err, "failed to build XDR liquidity pool id") + } + return xdr.TrustLineAsset{LiquidityPoolId: &xdrPoolId}, nil +} + +// ToAsset for LiquidityPoolShareTrustLineAsset returns an error. +func (lpsa LiquidityPoolShareTrustLineAsset) ToAsset() (Asset, error) { + return nil, errors.New("Cannot transform LiquidityPoolShare into Asset") +} + +// MustToAsset for LiquidityPoolShareTrustLineAsset panics. +func (lpsa LiquidityPoolShareTrustLineAsset) MustToAsset() Asset { + panic("Cannot transform LiquidityPoolShare into Asset") +} + +// ToChangeTrustAsset for LiquidityPoolShareTrustLineAsset returns an error. +func (lpsa LiquidityPoolShareTrustLineAsset) ToChangeTrustAsset() (ChangeTrustAsset, error) { + return nil, errors.New("Cannot transform LiquidityPoolShare into ChangeTrustAsset") +} + +// MustToChangeTrustAsset for LiquidityPoolShareTrustLineAsset panics. +func (lpsa LiquidityPoolShareTrustLineAsset) MustToChangeTrustAsset() ChangeTrustAsset { + panic("Cannot transform LiquidityPoolShare into ChangeTrustAsset") +} + +// ToTrustLineAsset for LiquidityPoolShareTrustLineAsset returns itself unchanged. +func (lpsa LiquidityPoolShareTrustLineAsset) ToTrustLineAsset() (TrustLineAsset, error) { + return lpsa, nil +} + +// MustToTrustLineAsset for LiquidityPoolShareTrustLineAsset returns itself unchanged. +func (lpsa LiquidityPoolShareTrustLineAsset) MustToTrustLineAsset() TrustLineAsset { + return lpsa +} + +func assetFromTrustLineAssetXDR(xAsset xdr.TrustLineAsset) (TrustLineAsset, error) { + if xAsset.Type == xdr.AssetTypeAssetTypePoolShare { + if xAsset.LiquidityPoolId == nil { + return nil, errors.New("invalid XDR liquidity pool id") + } + poolId, err := liquidityPoolIdFromXDR(*xAsset.LiquidityPoolId) + if err != nil { + return nil, errors.Wrap(err, "invalid XDR liquidity pool id") + } + return LiquidityPoolShareTrustLineAsset{LiquidityPoolID: poolId}, nil + } + a, err := assetFromXDR(xAsset.ToAsset()) + if err != nil { + return nil, err + } + return TrustLineAssetWrapper{a}, nil +} + +// TrustLineAssetWrapper wraps a native/credit Asset so it generates xdr to be used in a trust line operation. +type TrustLineAssetWrapper struct { + Asset +} + +// GetLiquidityPoolID for TrustLineAssetWrapper returns false. +func (tlaw TrustLineAssetWrapper) GetLiquidityPoolID() (LiquidityPoolId, bool) { + return LiquidityPoolId{}, false +} + +// ToXDR for TrustLineAssetWrapper generates the xdr.TrustLineAsset. +func (tlaw TrustLineAssetWrapper) ToXDR() (xdr.TrustLineAsset, error) { + x, err := tlaw.Asset.ToXDR() + if err != nil { + return xdr.TrustLineAsset{}, err + } + return x.ToTrustLineAsset(), nil +} diff --git a/xdr/Stellar-SCP.x b/xdr/Stellar-SCP.x index 1234776f1b..7ec99216f8 100644 --- a/xdr/Stellar-SCP.x +++ b/xdr/Stellar-SCP.x @@ -80,7 +80,7 @@ struct SCPEnvelope struct SCPQuorumSet { uint32 threshold; - PublicKey validators<>; + NodeID validators<>; SCPQuorumSet innerSets<>; }; } diff --git a/xdr/Stellar-ledger-entries.x b/xdr/Stellar-ledger-entries.x index ef6a169ac8..885cf2d473 100644 --- a/xdr/Stellar-ledger-entries.x +++ b/xdr/Stellar-ledger-entries.x @@ -11,14 +11,46 @@ typedef PublicKey AccountID; typedef opaque Thresholds[4]; typedef string string32<32>; typedef string string64<64>; -typedef uint64 SequenceNumber; -typedef opaque DataValue<64>; +typedef int64 SequenceNumber; +typedef uint64 TimePoint; +typedef opaque DataValue<64>; +typedef Hash PoolID; // SHA256(LiquidityPoolParameters) + +// 1-4 alphanumeric characters right-padded with 0 bytes +typedef opaque AssetCode4[4]; + +// 5-12 alphanumeric characters right-padded with 0 bytes +typedef opaque AssetCode12[12]; enum AssetType { ASSET_TYPE_NATIVE = 0, ASSET_TYPE_CREDIT_ALPHANUM4 = 1, - ASSET_TYPE_CREDIT_ALPHANUM12 = 2 + ASSET_TYPE_CREDIT_ALPHANUM12 = 2, + ASSET_TYPE_POOL_SHARE = 3 +}; + +union AssetCode switch (AssetType type) +{ +case ASSET_TYPE_CREDIT_ALPHANUM4: + AssetCode4 assetCode4; + +case ASSET_TYPE_CREDIT_ALPHANUM12: + AssetCode12 assetCode12; + + // add other asset types here in the future +}; + +struct AlphaNum4 +{ + AssetCode4 assetCode; + AccountID issuer; +}; + +struct AlphaNum12 +{ + AssetCode12 assetCode; + AccountID issuer; }; union Asset switch (AssetType type) @@ -27,18 +59,10 @@ case ASSET_TYPE_NATIVE: // Not credit void; case ASSET_TYPE_CREDIT_ALPHANUM4: - struct - { - opaque assetCode[4]; // 1 to 4 characters - AccountID issuer; - } alphaNum4; + AlphaNum4 alphaNum4; case ASSET_TYPE_CREDIT_ALPHANUM12: - struct - { - opaque assetCode[12]; // 5 to 12 characters - AccountID issuer; - } alphaNum12; + AlphaNum12 alphaNum12; // add other asset types here in the future }; @@ -50,6 +74,12 @@ struct Price int32 d; // denominator }; +struct Liabilities +{ + int64 buying; + int64 selling; +}; + // the 'Thresholds' type is packed uint8_t values // defined by these indexes enum ThresholdIndexes @@ -65,13 +95,15 @@ enum LedgerEntryType ACCOUNT = 0, TRUSTLINE = 1, OFFER = 2, - DATA = 3 + DATA = 3, + CLAIMABLE_BALANCE = 4, + LIQUIDITY_POOL = 5 }; struct Signer { - AccountID pubKey; - uint32 weight; // really only need 1byte + SignerKey key; + uint32 weight; // really only need 1 byte }; enum AccountFlags @@ -85,7 +117,48 @@ enum AccountFlags // otherwise, authorization cannot be revoked AUTH_REVOCABLE_FLAG = 0x2, // Once set, causes all AUTH_* flags to be read-only - AUTH_IMMUTABLE_FLAG = 0x4 + AUTH_IMMUTABLE_FLAG = 0x4, + // Trustlines are created with clawback enabled set to "true", + // and claimable balances created from those trustlines are created + // with clawback enabled set to "true" + AUTH_CLAWBACK_ENABLED_FLAG = 0x8 +}; + +// mask for all valid flags +const MASK_ACCOUNT_FLAGS = 0x7; +const MASK_ACCOUNT_FLAGS_V17 = 0xF; + +// maximum number of signers +const MAX_SIGNERS = 20; + +typedef AccountID* SponsorshipDescriptor; + +struct AccountEntryExtensionV2 +{ + uint32 numSponsored; + uint32 numSponsoring; + SponsorshipDescriptor signerSponsoringIDs; + + union switch (int v) + { + case 0: + void; + } + ext; +}; + +struct AccountEntryExtensionV1 +{ + Liabilities liabilities; + + union switch (int v) + { + case 0: + void; + case 2: + AccountEntryExtensionV2 v2; + } + ext; }; /* AccountEntry @@ -96,7 +169,6 @@ enum AccountFlags Other ledger entries created require an account. */ - struct AccountEntry { AccountID accountID; // master public key for this account @@ -113,13 +185,15 @@ struct AccountEntry // thresholds stores unsigned bytes: [weight of master|low|medium|high] Thresholds thresholds; - Signer signers<20>; // possible signers for this account + Signer signers; // possible signers for this account // reserved for future use union switch (int v) { case 0: void; + case 1: + AccountEntryExtensionV1 v1; } ext; }; @@ -133,15 +207,60 @@ struct AccountEntry enum TrustLineFlags { // issuer has authorized account to perform transactions with its credit - AUTHORIZED_FLAG = 1 + AUTHORIZED_FLAG = 1, + // issuer has authorized account to maintain and reduce liabilities for its + // credit + AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG = 2, + // issuer has specified that it may clawback its credit, and that claimable + // balances created with its credit may also be clawed back + TRUSTLINE_CLAWBACK_ENABLED_FLAG = 4 +}; + +// mask for all trustline flags +const MASK_TRUSTLINE_FLAGS = 1; +const MASK_TRUSTLINE_FLAGS_V13 = 3; +const MASK_TRUSTLINE_FLAGS_V17 = 7; + +enum LiquidityPoolType +{ + LIQUIDITY_POOL_CONSTANT_PRODUCT = 0 +}; + +union TrustLineAsset switch (AssetType type) +{ +case ASSET_TYPE_NATIVE: // Not credit + void; + +case ASSET_TYPE_CREDIT_ALPHANUM4: + AlphaNum4 alphaNum4; + +case ASSET_TYPE_CREDIT_ALPHANUM12: + AlphaNum12 alphaNum12; + +case ASSET_TYPE_POOL_SHARE: + PoolID liquidityPoolID; + + // add other asset types here in the future +}; + +struct TrustLineEntryExtensionV2 +{ + int32 liquidityPoolUseCount; + + union switch (int v) + { + case 0: + void; + } + ext; }; struct TrustLineEntry { - AccountID accountID; // account this trustline belongs to - Asset asset; // type of asset (with issuer) - int64 balance; // how much of this asset the user has. - // Asset defines the unit for this; + AccountID accountID; // account this trustline belongs to + TrustLineAsset asset; // type of asset (with issuer) + int64 balance; // how much of this asset the user has. + // Asset defines the unit for this; int64 limit; // balance cannot be above this uint32 flags; // see TrustLineFlags @@ -151,6 +270,20 @@ struct TrustLineEntry { case 0: void; + case 1: + struct + { + Liabilities liabilities; + + union switch (int v) + { + case 0: + void; + case 2: + TrustLineEntryExtensionV2 v2; + } + ext; + } v1; } ext; }; @@ -161,6 +294,9 @@ enum OfferEntryFlags PASSIVE_FLAG = 1 }; +// Mask for OfferEntry flags +const MASK_OFFERENTRY_FLAGS = 1; + /* OfferEntry An offer is the building block of the offer book, they are automatically claimed by payments when the price set by the owner is met. @@ -171,7 +307,7 @@ enum OfferEntryFlags struct OfferEntry { AccountID sellerID; - uint64 offerID; + int64 offerID; Asset selling; // A Asset buying; // B int64 amount; // amount of A @@ -211,6 +347,143 @@ struct DataEntry ext; }; +enum ClaimPredicateType +{ + CLAIM_PREDICATE_UNCONDITIONAL = 0, + CLAIM_PREDICATE_AND = 1, + CLAIM_PREDICATE_OR = 2, + CLAIM_PREDICATE_NOT = 3, + CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME = 4, + CLAIM_PREDICATE_BEFORE_RELATIVE_TIME = 5 +}; + +union ClaimPredicate switch (ClaimPredicateType type) +{ +case CLAIM_PREDICATE_UNCONDITIONAL: + void; +case CLAIM_PREDICATE_AND: + ClaimPredicate andPredicates<2>; +case CLAIM_PREDICATE_OR: + ClaimPredicate orPredicates<2>; +case CLAIM_PREDICATE_NOT: + ClaimPredicate* notPredicate; +case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: + int64 absBefore; // Predicate will be true if closeTime < absBefore +case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: + int64 relBefore; // Seconds since closeTime of the ledger in which the + // ClaimableBalanceEntry was created +}; + +enum ClaimantType +{ + CLAIMANT_TYPE_V0 = 0 +}; + +union Claimant switch (ClaimantType type) +{ +case CLAIMANT_TYPE_V0: + struct + { + AccountID destination; // The account that can use this condition + ClaimPredicate predicate; // Claimable if predicate is true + } v0; +}; + +enum ClaimableBalanceIDType +{ + CLAIMABLE_BALANCE_ID_TYPE_V0 = 0 +}; + +union ClaimableBalanceID switch (ClaimableBalanceIDType type) +{ +case CLAIMABLE_BALANCE_ID_TYPE_V0: + Hash v0; +}; + +enum ClaimableBalanceFlags +{ + // If set, the issuer account of the asset held by the claimable balance may + // clawback the claimable balance + CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG = 0x1 +}; + +const MASK_CLAIMABLE_BALANCE_FLAGS = 0x1; + +struct ClaimableBalanceEntryExtensionV1 +{ + union switch (int v) + { + case 0: + void; + } + ext; + + uint32 flags; // see ClaimableBalanceFlags +}; + +struct ClaimableBalanceEntry +{ + // Unique identifier for this ClaimableBalanceEntry + ClaimableBalanceID balanceID; + + // List of claimants with associated predicate + Claimant claimants<10>; + + // Any asset including native + Asset asset; + + // Amount of asset + int64 amount; + + // reserved for future use + union switch (int v) + { + case 0: + void; + case 1: + ClaimableBalanceEntryExtensionV1 v1; + } + ext; +}; + +struct LiquidityPoolConstantProductParameters +{ + Asset assetA; // assetA < assetB + Asset assetB; + int32 fee; // Fee is in basis points, so the actual rate is (fee/100)% +}; + +struct LiquidityPoolEntry +{ + PoolID liquidityPoolID; + + union switch (LiquidityPoolType type) + { + case LIQUIDITY_POOL_CONSTANT_PRODUCT: + struct + { + LiquidityPoolConstantProductParameters params; + + int64 reserveA; // amount of A in the pool + int64 reserveB; // amount of B in the pool + int64 totalPoolShares; // total number of pool shares issued + int64 poolSharesTrustLineCount; // number of trust lines for the associated pool shares + } constantProduct; + } + body; +}; + +struct LedgerEntryExtensionV1 +{ + SponsorshipDescriptor sponsoringID; + + union switch (int v) + { + case 0: + void; + } + ext; +}; struct LedgerEntry { @@ -226,6 +499,10 @@ struct LedgerEntry OfferEntry offer; case DATA: DataEntry data; + case CLAIMABLE_BALANCE: + ClaimableBalanceEntry claimableBalance; + case LIQUIDITY_POOL: + LiquidityPoolEntry liquidityPool; } data; @@ -234,17 +511,66 @@ struct LedgerEntry { case 0: void; + case 1: + LedgerEntryExtensionV1 v1; } ext; }; +union LedgerKey switch (LedgerEntryType type) +{ +case ACCOUNT: + struct + { + AccountID accountID; + } account; + +case TRUSTLINE: + struct + { + AccountID accountID; + TrustLineAsset asset; + } trustLine; + +case OFFER: + struct + { + AccountID sellerID; + int64 offerID; + } offer; + +case DATA: + struct + { + AccountID accountID; + string64 dataName; + } data; + +case CLAIMABLE_BALANCE: + struct + { + ClaimableBalanceID balanceID; + } claimableBalance; + +case LIQUIDITY_POOL: + struct + { + PoolID liquidityPoolID; + } liquidityPool; +}; + // list of all envelope types used in the application // those are prefixes used when building signatures for // the respective envelopes enum EnvelopeType { + ENVELOPE_TYPE_TX_V0 = 0, ENVELOPE_TYPE_SCP = 1, ENVELOPE_TYPE_TX = 2, - ENVELOPE_TYPE_AUTH = 3 + ENVELOPE_TYPE_AUTH = 3, + ENVELOPE_TYPE_SCPVALUE = 4, + ENVELOPE_TYPE_TX_FEE_BUMP = 5, + ENVELOPE_TYPE_OP_ID = 6, + ENVELOPE_TYPE_POOL_REVOKE_OP_ID = 7 }; } diff --git a/xdr/Stellar-ledger.x b/xdr/Stellar-ledger.x index 42ce99f4f5..84b84cbf77 100644 --- a/xdr/Stellar-ledger.x +++ b/xdr/Stellar-ledger.x @@ -2,6 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +%#include "xdr/Stellar-SCP.h" %#include "xdr/Stellar-transaction.h" namespace stellar @@ -9,12 +10,24 @@ namespace stellar typedef opaque UpgradeType<128>; +enum StellarValueType +{ + STELLAR_VALUE_BASIC = 0, + STELLAR_VALUE_SIGNED = 1 +}; + +struct LedgerCloseValueSignature +{ + NodeID nodeID; // which node introduced the value + Signature signature; // nodeID's signature +}; + /* StellarValue is the value used by SCP to reach consensus on a given ledger -*/ + */ struct StellarValue { - Hash txSetHash; // transaction set to apply to previous ledger - uint64 closeTime; // network close time + Hash txSetHash; // transaction set to apply to previous ledger + TimePoint closeTime; // network close time // upgrades to apply to the previous ledger (usually empty) // this is a vector of encoded 'LedgerUpgrade' so that nodes can drop @@ -24,6 +37,29 @@ struct StellarValue UpgradeType upgrades<6>; // reserved for future use + union switch (StellarValueType v) + { + case STELLAR_VALUE_BASIC: + void; + case STELLAR_VALUE_SIGNED: + LedgerCloseValueSignature lcValueSignature; + } + ext; +}; + +const MASK_LEDGER_HEADER_FLAGS = 0x7; + +enum LedgerHeaderFlags +{ + DISABLE_LIQUIDITY_POOL_TRADING_FLAG = 0x1, + DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG = 0x2, + DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG = 0x4 +}; + +struct LedgerHeaderExtensionV1 +{ + uint32 flags; // LedgerHeaderFlags + union switch (int v) { case 0: @@ -34,7 +70,7 @@ struct StellarValue /* The LedgerHeader is the highest level structure representing the * state of a ledger, cryptographically linked to previous ledgers. -*/ + */ struct LedgerHeader { uint32 ledgerVersion; // the protocol version of the ledger @@ -69,6 +105,8 @@ struct LedgerHeader { case 0: void; + case 1: + LedgerHeaderExtensionV1 v1; } ext; }; @@ -82,7 +120,9 @@ enum LedgerUpgradeType { LEDGER_UPGRADE_VERSION = 1, LEDGER_UPGRADE_BASE_FEE = 2, - LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3 + LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3, + LEDGER_UPGRADE_BASE_RESERVE = 4, + LEDGER_UPGRADE_FLAGS = 5 }; union LedgerUpgrade switch (LedgerUpgradeType type) @@ -93,62 +133,55 @@ case LEDGER_UPGRADE_BASE_FEE: uint32 newBaseFee; // update baseFee case LEDGER_UPGRADE_MAX_TX_SET_SIZE: uint32 newMaxTxSetSize; // update maxTxSetSize +case LEDGER_UPGRADE_BASE_RESERVE: + uint32 newBaseReserve; // update baseReserve +case LEDGER_UPGRADE_FLAGS: + uint32 newFlags; // update flags }; /* Entries used to define the bucket list */ - -union LedgerKey switch (LedgerEntryType type) +enum BucketEntryType { -case ACCOUNT: - struct - { - AccountID accountID; - } account; - -case TRUSTLINE: - struct - { - AccountID accountID; - Asset asset; - } trustLine; - -case OFFER: - struct - { - AccountID sellerID; - uint64 offerID; - } offer; - -case DATA: - struct - { - AccountID accountID; - string64 dataName; - } data; + METAENTRY = + -1, // At-and-after protocol 11: bucket metadata, should come first. + LIVEENTRY = 0, // Before protocol 11: created-or-updated; + // At-and-after protocol 11: only updated. + DEADENTRY = 1, + INITENTRY = 2 // At-and-after protocol 11: only created. }; -enum BucketEntryType +struct BucketMetadata { - LIVEENTRY = 0, - DEADENTRY = 1 + // Indicates the protocol version used to create / merge this bucket. + uint32 ledgerVersion; + + // reserved for future use + union switch (int v) + { + case 0: + void; + } + ext; }; union BucketEntry switch (BucketEntryType type) { case LIVEENTRY: +case INITENTRY: LedgerEntry liveEntry; case DEADENTRY: LedgerKey deadEntry; +case METAENTRY: + BucketMetadata metaEntry; }; // Transaction sets are the unit used by SCP to decide on transitions // between ledgers -const MAX_TX_PER_LEDGER = 5000; struct TransactionSet { Hash previousLedgerHash; - TransactionEnvelope txs; + TransactionEnvelope txs<>; }; struct TransactionResultPair @@ -160,7 +193,7 @@ struct TransactionResultPair // TransactionResultSet is used to recover results between ledgers struct TransactionResultSet { - TransactionResultPair results; + TransactionResultPair results<>; }; // Entries below are used in the historical subsystem @@ -262,9 +295,72 @@ struct OperationMeta LedgerEntryChanges changes; }; +struct TransactionMetaV1 +{ + LedgerEntryChanges txChanges; // tx level changes if any + OperationMeta operations<>; // meta for each operation +}; + +struct TransactionMetaV2 +{ + LedgerEntryChanges txChangesBefore; // tx level changes before operations + // are applied if any + OperationMeta operations<>; // meta for each operation + LedgerEntryChanges txChangesAfter; // tx level changes after operations are + // applied if any +}; + +// this is the meta produced when applying transactions +// it does not include pre-apply updates such as fees union TransactionMeta switch (int v) { case 0: OperationMeta operations<>; +case 1: + TransactionMetaV1 v1; +case 2: + TransactionMetaV2 v2; +}; + +// This struct groups together changes on a per transaction basis +// note however that fees and transaction application are done in separate +// phases +struct TransactionResultMeta +{ + TransactionResultPair result; + LedgerEntryChanges feeProcessing; + TransactionMeta txApplyProcessing; +}; + +// this represents a single upgrade that was performed as part of a ledger +// upgrade +struct UpgradeEntryMeta +{ + LedgerUpgrade upgrade; + LedgerEntryChanges changes; +}; + +struct LedgerCloseMetaV0 +{ + LedgerHeaderHistoryEntry ledgerHeader; + // NB: txSet is sorted in "Hash order" + TransactionSet txSet; + + // NB: transactions are sorted in apply order here + // fees for all transactions are processed first + // followed by applying transactions + TransactionResultMeta txProcessing<>; + + // upgrades are applied last + UpgradeEntryMeta upgradesProcessing<>; + + // other misc information attached to the ledger close + SCPHistoryEntry scpInfo<>; +}; + +union LedgerCloseMeta switch (int v) +{ +case 0: + LedgerCloseMetaV0 v0; }; } diff --git a/xdr/Stellar-overlay.x b/xdr/Stellar-overlay.x index 79ae734138..a4ab6b0779 100644 --- a/xdr/Stellar-overlay.x +++ b/xdr/Stellar-overlay.x @@ -90,7 +90,10 @@ enum MessageType GET_SCP_STATE = 12, // new messages - HELLO = 13 + HELLO = 13, + + SURVEY_REQUEST = 14, + SURVEY_RESPONSE = 15 }; struct DontHave @@ -99,6 +102,80 @@ struct DontHave uint256 reqHash; }; +enum SurveyMessageCommandType +{ + SURVEY_TOPOLOGY = 0 +}; + +struct SurveyRequestMessage +{ + NodeID surveyorPeerID; + NodeID surveyedPeerID; + uint32 ledgerNum; + Curve25519Public encryptionKey; + SurveyMessageCommandType commandType; +}; + +struct SignedSurveyRequestMessage +{ + Signature requestSignature; + SurveyRequestMessage request; +}; + +typedef opaque EncryptedBody<64000>; +struct SurveyResponseMessage +{ + NodeID surveyorPeerID; + NodeID surveyedPeerID; + uint32 ledgerNum; + SurveyMessageCommandType commandType; + EncryptedBody encryptedBody; +}; + +struct SignedSurveyResponseMessage +{ + Signature responseSignature; + SurveyResponseMessage response; +}; + +struct PeerStats +{ + NodeID id; + string versionStr<100>; + uint64 messagesRead; + uint64 messagesWritten; + uint64 bytesRead; + uint64 bytesWritten; + uint64 secondsConnected; + + uint64 uniqueFloodBytesRecv; + uint64 duplicateFloodBytesRecv; + uint64 uniqueFetchBytesRecv; + uint64 duplicateFetchBytesRecv; + + uint64 uniqueFloodMessageRecv; + uint64 duplicateFloodMessageRecv; + uint64 uniqueFetchMessageRecv; + uint64 duplicateFetchMessageRecv; +}; + +typedef PeerStats PeerStatList<25>; + +struct TopologyResponseBody +{ + PeerStatList inboundPeers; + PeerStatList outboundPeers; + + uint32 totalInboundPeerCount; + uint32 totalOutboundPeerCount; +}; + +union SurveyResponseBody switch (SurveyMessageCommandType type) +{ +case SURVEY_TOPOLOGY: + TopologyResponseBody topologyResponseBody; +}; + union StellarMessage switch (MessageType type) { case ERROR_MSG: @@ -112,7 +189,7 @@ case DONT_HAVE: case GET_PEERS: void; case PEERS: - PeerAddress peers<>; + PeerAddress peers<100>; case GET_TX_SET: uint256 txSetHash; @@ -122,6 +199,12 @@ case TX_SET: case TRANSACTION: TransactionEnvelope transaction; +case SURVEY_REQUEST: + SignedSurveyRequestMessage signedSurveyRequestMessage; + +case SURVEY_RESPONSE: + SignedSurveyResponseMessage signedSurveyResponseMessage; + // SCP case GET_SCP_QUORUMSET: uint256 qSetHash; @@ -137,10 +220,10 @@ union AuthenticatedMessage switch (uint32 v) { case 0: struct -{ - uint64 sequence; - StellarMessage message; - HmacSha256Mac mac; + { + uint64 sequence; + StellarMessage message; + HmacSha256Mac mac; } v0; }; } diff --git a/xdr/Stellar-transaction.x b/xdr/Stellar-transaction.x index a268a89288..1a4e491a16 100644 --- a/xdr/Stellar-transaction.x +++ b/xdr/Stellar-transaction.x @@ -7,6 +7,25 @@ namespace stellar { +union LiquidityPoolParameters switch (LiquidityPoolType type) +{ +case LIQUIDITY_POOL_CONSTANT_PRODUCT: + LiquidityPoolConstantProductParameters constantProduct; +}; + +// Source or destination of a payment operation +union MuxedAccount switch (CryptoKeyType type) +{ +case KEY_TYPE_ED25519: + uint256 ed25519; +case KEY_TYPE_MUXED_ED25519: + struct + { + uint64 id; + uint256 ed25519; + } med25519; +}; + struct DecoratedSignature { SignatureHint hint; // last 4 bytes of the public key, used as a hint @@ -17,15 +36,28 @@ enum OperationType { CREATE_ACCOUNT = 0, PAYMENT = 1, - PATH_PAYMENT = 2, - MANAGE_OFFER = 3, - CREATE_PASSIVE_OFFER = 4, + PATH_PAYMENT_STRICT_RECEIVE = 2, + MANAGE_SELL_OFFER = 3, + CREATE_PASSIVE_SELL_OFFER = 4, SET_OPTIONS = 5, CHANGE_TRUST = 6, ALLOW_TRUST = 7, ACCOUNT_MERGE = 8, INFLATION = 9, - MANAGE_DATA = 10 + MANAGE_DATA = 10, + BUMP_SEQUENCE = 11, + MANAGE_BUY_OFFER = 12, + PATH_PAYMENT_STRICT_SEND = 13, + CREATE_CLAIMABLE_BALANCE = 14, + CLAIM_CLAIMABLE_BALANCE = 15, + BEGIN_SPONSORING_FUTURE_RESERVES = 16, + END_SPONSORING_FUTURE_RESERVES = 17, + REVOKE_SPONSORSHIP = 18, + CLAWBACK = 19, + CLAWBACK_CLAIMABLE_BALANCE = 20, + SET_TRUST_LINE_FLAGS = 21, + LIQUIDITY_POOL_DEPOSIT = 22, + LIQUIDITY_POOL_WITHDRAW = 23 }; /* CreateAccount @@ -36,7 +68,6 @@ Threshold: med Result: CreateAccountResult */ - struct CreateAccountOp { AccountID destination; // account to create @@ -53,12 +84,12 @@ struct CreateAccountOp */ struct PaymentOp { - AccountID destination; // recipient of the payment - Asset asset; // what they end up with - int64 amount; // amount they end up with + MuxedAccount destination; // recipient of the payment + Asset asset; // what they end up with + int64 amount; // amount they end up with }; -/* PathPayment +/* PathPaymentStrictReceive send an amount to a destination account through a path. (up to sendMax, sendAsset) @@ -67,18 +98,43 @@ send an amount to a destination account through a path. Threshold: med -Result: PathPaymentResult +Result: PathPaymentStrictReceiveResult */ -struct PathPaymentOp +struct PathPaymentStrictReceiveOp { Asset sendAsset; // asset we pay with int64 sendMax; // the maximum amount of sendAsset to // send (excluding fees). // The operation will fail if can't be met - AccountID destination; // recipient of the payment - Asset destAsset; // what they end up with - int64 destAmount; // amount they end up with + MuxedAccount destination; // recipient of the payment + Asset destAsset; // what they end up with + int64 destAmount; // amount they end up with + + Asset path<5>; // additional hops it must go through to get there +}; + +/* PathPaymentStrictSend + +send an amount to a destination account through a path. +(sendMax, sendAsset) +(X0, Path[0]) .. (Xn, Path[n]) +(at least destAmount, destAsset) + +Threshold: med + +Result: PathPaymentStrictSendResult +*/ +struct PathPaymentStrictSendOp +{ + Asset sendAsset; // asset we pay with + int64 sendAmount; // amount of sendAsset to send (excluding fees) + + MuxedAccount destination; // recipient of the payment + Asset destAsset; // what they end up with + int64 destMin; // the minimum amount of dest asset to + // be received + // The operation will fail if it can't be met Asset path<5>; // additional hops it must go through to get there }; @@ -87,10 +143,10 @@ struct PathPaymentOp Threshold: med -Result: ManageOfferResult +Result: ManageSellOfferResult */ -struct ManageOfferOp +struct ManageSellOfferOp { Asset selling; Asset buying; @@ -98,21 +154,40 @@ struct ManageOfferOp Price price; // price of thing being sold in terms of what you are buying // 0=create a new offer, otherwise edit an existing offer - uint64 offerID; + int64 offerID; +}; + +/* Creates, updates or deletes an offer with amount in terms of buying asset + +Threshold: med + +Result: ManageBuyOfferResult + +*/ +struct ManageBuyOfferOp +{ + Asset selling; + Asset buying; + int64 buyAmount; // amount being bought. if set to 0, delete the offer + Price price; // price of thing being bought in terms of what you are + // selling + + // 0=create a new offer, otherwise edit an existing offer + int64 offerID; }; /* Creates an offer that doesn't take offers of the same price Threshold: med -Result: CreatePassiveOfferResult +Result: CreatePassiveSellOfferResult */ -struct CreatePassiveOfferOp +struct CreatePassiveSellOfferOp { Asset selling; // A Asset buying; // B - int64 amount; // amount taker gets. if set to 0, delete the offer + int64 amount; // amount taker gets Price price; // cost of A in terms of B }; @@ -125,7 +200,6 @@ struct CreatePassiveOfferOp Result: SetOptionsResult */ - struct SetOptionsOp { AccountID* inflationDest; // sets the inflation destination @@ -146,6 +220,23 @@ struct SetOptionsOp Signer* signer; }; +union ChangeTrustAsset switch (AssetType type) +{ +case ASSET_TYPE_NATIVE: // Not credit + void; + +case ASSET_TYPE_CREDIT_ALPHANUM4: + AlphaNum4 alphaNum4; + +case ASSET_TYPE_CREDIT_ALPHANUM12: + AlphaNum12 alphaNum12; + +case ASSET_TYPE_POOL_SHARE: + LiquidityPoolParameters liquidityPool; + + // add other asset types here in the future +}; + /* Creates, updates or deletes a trust line Threshold: med @@ -155,7 +246,7 @@ struct SetOptionsOp */ struct ChangeTrustOp { - Asset line; + ChangeTrustAsset line; // if limit is set to 0, deletes the trust line int64 limit; @@ -173,20 +264,10 @@ struct ChangeTrustOp struct AllowTrustOp { AccountID trustor; - union switch (AssetType type) - { - // ASSET_TYPE_NATIVE is not allowed - case ASSET_TYPE_CREDIT_ALPHANUM4: - opaque assetCode4[4]; - - case ASSET_TYPE_CREDIT_ALPHANUM12: - opaque assetCode12[12]; + AssetCode asset; - // add other asset types here in the future - } - asset; - - bool authorize; + // One of 0, AUTHORIZED_FLAG, or AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG + uint32 authorize; }; /* Inflation @@ -207,18 +288,181 @@ Result: InflationResult */ /* ManageData - Adds, Updates, or Deletes a key value pair associated with a particular - account. + Adds, Updates, or Deletes a key value pair associated with a particular + account. Threshold: med Result: ManageDataResult */ - struct ManageDataOp { - string64 dataName; - DataValue* dataValue; // set to null to clear + string64 dataName; + DataValue* dataValue; // set to null to clear +}; + +/* Bump Sequence + + increases the sequence to a given level + + Threshold: low + + Result: BumpSequenceResult +*/ +struct BumpSequenceOp +{ + SequenceNumber bumpTo; +}; + +/* Creates a claimable balance entry + + Threshold: med + + Result: CreateClaimableBalanceResult +*/ +struct CreateClaimableBalanceOp +{ + Asset asset; + int64 amount; + Claimant claimants<10>; +}; + +/* Claims a claimable balance entry + + Threshold: low + + Result: ClaimClaimableBalanceResult +*/ +struct ClaimClaimableBalanceOp +{ + ClaimableBalanceID balanceID; +}; + +/* BeginSponsoringFutureReserves + + Establishes the is-sponsoring-future-reserves-for relationship between + the source account and sponsoredID + + Threshold: med + + Result: BeginSponsoringFutureReservesResult +*/ +struct BeginSponsoringFutureReservesOp +{ + AccountID sponsoredID; +}; + +/* EndSponsoringFutureReserves + + Terminates the current is-sponsoring-future-reserves-for relationship in + which source account is sponsored + + Threshold: med + + Result: EndSponsoringFutureReservesResult +*/ +// EndSponsoringFutureReserves is empty + +/* RevokeSponsorship + + If source account is not sponsored or is sponsored by the owner of the + specified entry or sub-entry, then attempt to revoke the sponsorship. + If source account is sponsored, then attempt to transfer the sponsorship + to the sponsor of source account. + + Threshold: med + + Result: RevokeSponsorshipResult +*/ +enum RevokeSponsorshipType +{ + REVOKE_SPONSORSHIP_LEDGER_ENTRY = 0, + REVOKE_SPONSORSHIP_SIGNER = 1 +}; + +union RevokeSponsorshipOp switch (RevokeSponsorshipType type) +{ +case REVOKE_SPONSORSHIP_LEDGER_ENTRY: + LedgerKey ledgerKey; +case REVOKE_SPONSORSHIP_SIGNER: + struct + { + AccountID accountID; + SignerKey signerKey; + } signer; +}; + +/* Claws back an amount of an asset from an account + + Threshold: med + + Result: ClawbackResult +*/ +struct ClawbackOp +{ + Asset asset; + MuxedAccount from; + int64 amount; +}; + +/* Claws back a claimable balance + + Threshold: med + + Result: ClawbackClaimableBalanceResult +*/ +struct ClawbackClaimableBalanceOp +{ + ClaimableBalanceID balanceID; +}; + +/* SetTrustLineFlagsOp + + Updates the flags of an existing trust line. + This is called by the issuer of the related asset. + + Threshold: low + + Result: SetTrustLineFlagsResult +*/ +struct SetTrustLineFlagsOp +{ + AccountID trustor; + Asset asset; + + uint32 clearFlags; // which flags to clear + uint32 setFlags; // which flags to set +}; + +const LIQUIDITY_POOL_FEE_V18 = 30; + +/* Deposit assets into a liquidity pool + + Threshold: med + + Result: LiquidityPoolDepositResult +*/ +struct LiquidityPoolDepositOp +{ + PoolID liquidityPoolID; + int64 maxAmountA; // maximum amount of first asset to deposit + int64 maxAmountB; // maximum amount of second asset to deposit + Price minPrice; // minimum depositA/depositB + Price maxPrice; // maximum depositA/depositB +}; + +/* Withdraw assets from a liquidity pool + + Threshold: med + + Result: LiquidityPoolWithdrawResult +*/ +struct LiquidityPoolWithdrawOp +{ + PoolID liquidityPoolID; + int64 amount; // amount of pool shares to withdraw + int64 minAmountA; // minimum amount of first asset to withdraw + int64 minAmountB; // minimum amount of second asset to withdraw }; /* An operation is the lowest unit of work that a transaction does */ @@ -227,7 +471,7 @@ struct Operation // sourceAccount is the account used to run the operation // if not set, the runtime defaults to "sourceAccount" specified at // the transaction level - AccountID* sourceAccount; + MuxedAccount* sourceAccount; union switch (OperationType type) { @@ -235,12 +479,12 @@ struct Operation CreateAccountOp createAccountOp; case PAYMENT: PaymentOp paymentOp; - case PATH_PAYMENT: - PathPaymentOp pathPaymentOp; - case MANAGE_OFFER: - ManageOfferOp manageOfferOp; - case CREATE_PASSIVE_OFFER: - CreatePassiveOfferOp createPassiveOfferOp; + case PATH_PAYMENT_STRICT_RECEIVE: + PathPaymentStrictReceiveOp pathPaymentStrictReceiveOp; + case MANAGE_SELL_OFFER: + ManageSellOfferOp manageSellOfferOp; + case CREATE_PASSIVE_SELL_OFFER: + CreatePassiveSellOfferOp createPassiveSellOfferOp; case SET_OPTIONS: SetOptionsOp setOptionsOp; case CHANGE_TRUST: @@ -248,15 +492,61 @@ struct Operation case ALLOW_TRUST: AllowTrustOp allowTrustOp; case ACCOUNT_MERGE: - AccountID destination; + MuxedAccount destination; case INFLATION: void; case MANAGE_DATA: ManageDataOp manageDataOp; + case BUMP_SEQUENCE: + BumpSequenceOp bumpSequenceOp; + case MANAGE_BUY_OFFER: + ManageBuyOfferOp manageBuyOfferOp; + case PATH_PAYMENT_STRICT_SEND: + PathPaymentStrictSendOp pathPaymentStrictSendOp; + case CREATE_CLAIMABLE_BALANCE: + CreateClaimableBalanceOp createClaimableBalanceOp; + case CLAIM_CLAIMABLE_BALANCE: + ClaimClaimableBalanceOp claimClaimableBalanceOp; + case BEGIN_SPONSORING_FUTURE_RESERVES: + BeginSponsoringFutureReservesOp beginSponsoringFutureReservesOp; + case END_SPONSORING_FUTURE_RESERVES: + void; + case REVOKE_SPONSORSHIP: + RevokeSponsorshipOp revokeSponsorshipOp; + case CLAWBACK: + ClawbackOp clawbackOp; + case CLAWBACK_CLAIMABLE_BALANCE: + ClawbackClaimableBalanceOp clawbackClaimableBalanceOp; + case SET_TRUST_LINE_FLAGS: + SetTrustLineFlagsOp setTrustLineFlagsOp; + case LIQUIDITY_POOL_DEPOSIT: + LiquidityPoolDepositOp liquidityPoolDepositOp; + case LIQUIDITY_POOL_WITHDRAW: + LiquidityPoolWithdrawOp liquidityPoolWithdrawOp; } body; }; +union HashIDPreimage switch (EnvelopeType type) +{ +case ENVELOPE_TYPE_OP_ID: + struct + { + AccountID sourceAccount; + SequenceNumber seqNum; + uint32 opNum; + } operationID; +case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: + struct + { + AccountID sourceAccount; + SequenceNumber seqNum; + uint32 opNum; + PoolID liquidityPoolID; + Asset asset; + } revokeID; +}; + enum MemoType { MEMO_NONE = 0, @@ -282,8 +572,41 @@ case MEMO_RETURN: struct TimeBounds { - uint64 minTime; - uint64 maxTime; + TimePoint minTime; + TimePoint maxTime; // 0 here means no maxTime +}; + +// maximum number of operations per transaction +const MAX_OPS_PER_TX = 100; + +// TransactionV0 is a transaction with the AccountID discriminant stripped off, +// leaving a raw ed25519 public key to identify the source account. This is used +// for backwards compatibility starting from the protocol 12/13 boundary. If an +// "old-style" TransactionEnvelope containing a Transaction is parsed with this +// XDR definition, it will be parsed as a "new-style" TransactionEnvelope +// containing a TransactionV0. +struct TransactionV0 +{ + uint256 sourceAccountEd25519; + uint32 fee; + SequenceNumber seqNum; + TimeBounds* timeBounds; + Memo memo; + Operation operations; + union switch (int v) + { + case 0: + void; + } + ext; +}; + +struct TransactionV0Envelope +{ + TransactionV0 tx; + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ + DecoratedSignature signatures<20>; }; /* a transaction is a container for a set of operations @@ -293,11 +616,10 @@ struct TimeBounds either all operations are applied or none are if any returns a failing code */ - struct Transaction { // account used to run the transaction - AccountID sourceAccount; + MuxedAccount sourceAccount; // the fee the sourceAccount will pay uint32 fee; @@ -310,7 +632,7 @@ struct Transaction Memo memo; - Operation operations<100>; + Operation operations; // reserved for future use union switch (int v) @@ -321,21 +643,99 @@ struct Transaction ext; }; -/* A TransactionEnvelope wraps a transaction with signatures. */ -struct TransactionEnvelope +struct TransactionV1Envelope { Transaction tx; + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ + DecoratedSignature signatures<20>; +}; + +struct FeeBumpTransaction +{ + MuxedAccount feeSource; + int64 fee; + union switch (EnvelopeType type) + { + case ENVELOPE_TYPE_TX: + TransactionV1Envelope v1; + } + innerTx; + union switch (int v) + { + case 0: + void; + } + ext; +}; + +struct FeeBumpTransactionEnvelope +{ + FeeBumpTransaction tx; + /* Each decorated signature is a signature over the SHA256 hash of + * a TransactionSignaturePayload */ DecoratedSignature signatures<20>; }; +/* A TransactionEnvelope wraps a transaction with signatures. */ +union TransactionEnvelope switch (EnvelopeType type) +{ +case ENVELOPE_TYPE_TX_V0: + TransactionV0Envelope v0; +case ENVELOPE_TYPE_TX: + TransactionV1Envelope v1; +case ENVELOPE_TYPE_TX_FEE_BUMP: + FeeBumpTransactionEnvelope feeBump; +}; + +struct TransactionSignaturePayload +{ + Hash networkId; + union switch (EnvelopeType type) + { + // Backwards Compatibility: Use ENVELOPE_TYPE_TX to sign ENVELOPE_TYPE_TX_V0 + case ENVELOPE_TYPE_TX: + Transaction tx; + case ENVELOPE_TYPE_TX_FEE_BUMP: + FeeBumpTransaction feeBump; + } + taggedTransaction; +}; + /* Operation Results section */ -/* This result is used when offers are taken during an operation */ +enum ClaimAtomType +{ + CLAIM_ATOM_TYPE_V0 = 0, + CLAIM_ATOM_TYPE_ORDER_BOOK = 1, + CLAIM_ATOM_TYPE_LIQUIDITY_POOL = 2 +}; + +// ClaimOfferAtomV0 is a ClaimOfferAtom with the AccountID discriminant stripped +// off, leaving a raw ed25519 public key to identify the source account. This is +// used for backwards compatibility starting from the protocol 17/18 boundary. +// If an "old-style" ClaimOfferAtom is parsed with this XDR definition, it will +// be parsed as a "new-style" ClaimAtom containing a ClaimOfferAtomV0. +struct ClaimOfferAtomV0 +{ + // emitted to identify the offer + uint256 sellerEd25519; // Account that owns the offer + int64 offerID; + + // amount and asset taken from the owner + Asset assetSold; + int64 amountSold; + + // amount and asset sent to the owner + Asset assetBought; + int64 amountBought; +}; + struct ClaimOfferAtom { // emitted to identify the offer AccountID sellerID; // Account that owns the offer - uint64 offerID; + int64 offerID; // amount and asset taken from the owner Asset assetSold; @@ -346,6 +746,32 @@ struct ClaimOfferAtom int64 amountBought; }; +struct ClaimLiquidityAtom +{ + PoolID liquidityPoolID; + + // amount and asset taken from the pool + Asset assetSold; + int64 amountSold; + + // amount and asset sent to the pool + Asset assetBought; + int64 amountBought; +}; + +/* This result is used when offers are taken or liquidity is exchanged with a + liquidity pool during an operation +*/ +union ClaimAtom switch (ClaimAtomType type) +{ +case CLAIM_ATOM_TYPE_V0: + ClaimOfferAtomV0 v0; +case CLAIM_ATOM_TYPE_ORDER_BOOK: + ClaimOfferAtom orderBook; +case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: + ClaimLiquidityAtom liquidityPool; +}; + /******* CreateAccount Result ********/ enum CreateAccountResultCode @@ -374,7 +800,7 @@ default: enum PaymentResultCode { // codes considered as "success" for the operation - PAYMENT_SUCCESS = 0, // payment successfuly completed + PAYMENT_SUCCESS = 0, // payment successfully completed // codes considered as "failure" for the operation PAYMENT_MALFORMED = -1, // bad input @@ -396,26 +822,35 @@ default: void; }; -/******* Payment Result ********/ +/******* PathPaymentStrictReceive Result ********/ -enum PathPaymentResultCode +enum PathPaymentStrictReceiveResultCode { // codes considered as "success" for the operation - PATH_PAYMENT_SUCCESS = 0, // success + PATH_PAYMENT_STRICT_RECEIVE_SUCCESS = 0, // success // codes considered as "failure" for the operation - PATH_PAYMENT_MALFORMED = -1, // bad input - PATH_PAYMENT_UNDERFUNDED = -2, // not enough funds in source account - PATH_PAYMENT_SRC_NO_TRUST = -3, // no trust line on source account - PATH_PAYMENT_SRC_NOT_AUTHORIZED = -4, // source not authorized to transfer - PATH_PAYMENT_NO_DESTINATION = -5, // destination account does not exist - PATH_PAYMENT_NO_TRUST = -6, // dest missing a trust line for asset - PATH_PAYMENT_NOT_AUTHORIZED = -7, // dest not authorized to hold asset - PATH_PAYMENT_LINE_FULL = -8, // dest would go above their limit - PATH_PAYMENT_NO_ISSUER = -9, // missing issuer on one asset - PATH_PAYMENT_TOO_FEW_OFFERS = -10, // not enough offers to satisfy path - PATH_PAYMENT_OFFER_CROSS_SELF = -11, // would cross one of its own offers - PATH_PAYMENT_OVER_SENDMAX = -12 // could not satisfy sendmax + PATH_PAYMENT_STRICT_RECEIVE_MALFORMED = -1, // bad input + PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED = + -2, // not enough funds in source account + PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST = + -3, // no trust line on source account + PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED = + -4, // source not authorized to transfer + PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION = + -5, // destination account does not exist + PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST = + -6, // dest missing a trust line for asset + PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED = + -7, // dest not authorized to hold asset + PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL = + -8, // dest would go above their limit + PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER = -9, // missing issuer on one asset + PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS = + -10, // not enough offers to satisfy path + PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF = + -11, // would cross one of its own offers + PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX = -12 // could not satisfy sendmax }; struct SimplePaymentResult @@ -425,43 +860,92 @@ struct SimplePaymentResult int64 amount; }; -union PathPaymentResult switch (PathPaymentResultCode code) +union PathPaymentStrictReceiveResult switch ( + PathPaymentStrictReceiveResultCode code) +{ +case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: + struct + { + ClaimAtom offers<>; + SimplePaymentResult last; + } success; +case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: + Asset noIssuer; // the asset that caused the error +default: + void; +}; + +/******* PathPaymentStrictSend Result ********/ + +enum PathPaymentStrictSendResultCode +{ + // codes considered as "success" for the operation + PATH_PAYMENT_STRICT_SEND_SUCCESS = 0, // success + + // codes considered as "failure" for the operation + PATH_PAYMENT_STRICT_SEND_MALFORMED = -1, // bad input + PATH_PAYMENT_STRICT_SEND_UNDERFUNDED = + -2, // not enough funds in source account + PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST = + -3, // no trust line on source account + PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED = + -4, // source not authorized to transfer + PATH_PAYMENT_STRICT_SEND_NO_DESTINATION = + -5, // destination account does not exist + PATH_PAYMENT_STRICT_SEND_NO_TRUST = + -6, // dest missing a trust line for asset + PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED = + -7, // dest not authorized to hold asset + PATH_PAYMENT_STRICT_SEND_LINE_FULL = -8, // dest would go above their limit + PATH_PAYMENT_STRICT_SEND_NO_ISSUER = -9, // missing issuer on one asset + PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS = + -10, // not enough offers to satisfy path + PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF = + -11, // would cross one of its own offers + PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN = -12 // could not satisfy destMin +}; + +union PathPaymentStrictSendResult switch (PathPaymentStrictSendResultCode code) { -case PATH_PAYMENT_SUCCESS: +case PATH_PAYMENT_STRICT_SEND_SUCCESS: struct { - ClaimOfferAtom offers<>; + ClaimAtom offers<>; SimplePaymentResult last; } success; -case PATH_PAYMENT_NO_ISSUER: +case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: Asset noIssuer; // the asset that caused the error default: void; }; -/******* ManageOffer Result ********/ +/******* ManageSellOffer Result ********/ -enum ManageOfferResultCode +enum ManageSellOfferResultCode { // codes considered as "success" for the operation - MANAGE_OFFER_SUCCESS = 0, + MANAGE_SELL_OFFER_SUCCESS = 0, // codes considered as "failure" for the operation - MANAGE_OFFER_MALFORMED = -1, // generated offer would be invalid - MANAGE_OFFER_SELL_NO_TRUST = -2, // no trust line for what we're selling - MANAGE_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying - MANAGE_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell - MANAGE_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy - MANAGE_OFFER_LINE_FULL = -6, // can't receive more of what it's buying - MANAGE_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell - MANAGE_OFFER_CROSS_SELF = -8, // would cross an offer from the same user - MANAGE_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling - MANAGE_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying + MANAGE_SELL_OFFER_MALFORMED = -1, // generated offer would be invalid + MANAGE_SELL_OFFER_SELL_NO_TRUST = + -2, // no trust line for what we're selling + MANAGE_SELL_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying + MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell + MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy + MANAGE_SELL_OFFER_LINE_FULL = -6, // can't receive more of what it's buying + MANAGE_SELL_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell + MANAGE_SELL_OFFER_CROSS_SELF = + -8, // would cross an offer from the same user + MANAGE_SELL_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling + MANAGE_SELL_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying // update errors - MANAGE_OFFER_NOT_FOUND = -11, // offerID does not match an existing offer + MANAGE_SELL_OFFER_NOT_FOUND = + -11, // offerID does not match an existing offer - MANAGE_OFFER_LOW_RESERVE = -12 // not enough funds to create a new Offer + MANAGE_SELL_OFFER_LOW_RESERVE = + -12 // not enough funds to create a new Offer }; enum ManageOfferEffect @@ -474,7 +958,7 @@ enum ManageOfferEffect struct ManageOfferSuccessResult { // offers that got claimed while creating this offer - ClaimOfferAtom offersClaimed<>; + ClaimAtom offersClaimed<>; union switch (ManageOfferEffect effect) { @@ -487,9 +971,43 @@ struct ManageOfferSuccessResult offer; }; -union ManageOfferResult switch (ManageOfferResultCode code) +union ManageSellOfferResult switch (ManageSellOfferResultCode code) +{ +case MANAGE_SELL_OFFER_SUCCESS: + ManageOfferSuccessResult success; +default: + void; +}; + +/******* ManageBuyOffer Result ********/ + +enum ManageBuyOfferResultCode +{ + // codes considered as "success" for the operation + MANAGE_BUY_OFFER_SUCCESS = 0, + + // codes considered as "failure" for the operation + MANAGE_BUY_OFFER_MALFORMED = -1, // generated offer would be invalid + MANAGE_BUY_OFFER_SELL_NO_TRUST = -2, // no trust line for what we're selling + MANAGE_BUY_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying + MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell + MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy + MANAGE_BUY_OFFER_LINE_FULL = -6, // can't receive more of what it's buying + MANAGE_BUY_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell + MANAGE_BUY_OFFER_CROSS_SELF = -8, // would cross an offer from the same user + MANAGE_BUY_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling + MANAGE_BUY_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying + + // update errors + MANAGE_BUY_OFFER_NOT_FOUND = + -11, // offerID does not match an existing offer + + MANAGE_BUY_OFFER_LOW_RESERVE = -12 // not enough funds to create a new Offer +}; + +union ManageBuyOfferResult switch (ManageBuyOfferResultCode code) { -case MANAGE_OFFER_SUCCESS: +case MANAGE_BUY_OFFER_SUCCESS: ManageOfferSuccessResult success; default: void; @@ -510,7 +1028,9 @@ enum SetOptionsResultCode SET_OPTIONS_UNKNOWN_FLAG = -6, // can't set an unknown flag SET_OPTIONS_THRESHOLD_OUT_OF_RANGE = -7, // bad value for weight/threshold SET_OPTIONS_BAD_SIGNER = -8, // signer cannot be masterkey - SET_OPTIONS_INVALID_HOME_DOMAIN = -9 // malformed home domain + SET_OPTIONS_INVALID_HOME_DOMAIN = -9, // malformed home domain + SET_OPTIONS_AUTH_REVOCABLE_REQUIRED = + -10 // auth revocable is required for clawback }; union SetOptionsResult switch (SetOptionsResultCode code) @@ -532,7 +1052,12 @@ enum ChangeTrustResultCode CHANGE_TRUST_NO_ISSUER = -2, // could not find issuer CHANGE_TRUST_INVALID_LIMIT = -3, // cannot drop limit below balance // cannot create with a limit of 0 - CHANGE_TRUST_LOW_RESERVE = -4 // not enough funds to create a new trust line + CHANGE_TRUST_LOW_RESERVE = + -4, // not enough funds to create a new trust line, + CHANGE_TRUST_SELF_NOT_ALLOWED = -5, // trusting self is not allowed + CHANGE_TRUST_TRUST_LINE_MISSING = -6, // Asset trustline is missing for pool + CHANGE_TRUST_CANNOT_DELETE = -7, // Asset trustline is still referenced in a pool + CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES = -8 // Asset trustline is deauthorized }; union ChangeTrustResult switch (ChangeTrustResultCode code) @@ -554,7 +1079,10 @@ enum AllowTrustResultCode ALLOW_TRUST_NO_TRUST_LINE = -2, // trustor does not have a trustline // source account does not require trust ALLOW_TRUST_TRUST_NOT_REQUIRED = -3, - ALLOW_TRUST_CANT_REVOKE = -4 // source account can't revoke trust + ALLOW_TRUST_CANT_REVOKE = -4, // source account can't revoke trust, + ALLOW_TRUST_SELF_NOT_ALLOWED = -5, // trusting self is not allowed + ALLOW_TRUST_LOW_RESERVE = -6 // claimable balances can't be created + // on revoke due to low reserves }; union AllowTrustResult switch (AllowTrustResultCode code) @@ -572,16 +1100,20 @@ enum AccountMergeResultCode // codes considered as "success" for the operation ACCOUNT_MERGE_SUCCESS = 0, // codes considered as "failure" for the operation - ACCOUNT_MERGE_MALFORMED = -1, // can't merge onto itself - ACCOUNT_MERGE_NO_ACCOUNT = -2, // destination does not exist - ACCOUNT_MERGE_IMMUTABLE_SET = -3, // source account has AUTH_IMMUTABLE set - ACCOUNT_MERGE_HAS_SUB_ENTRIES = -4 // account has trust lines/offers + ACCOUNT_MERGE_MALFORMED = -1, // can't merge onto itself + ACCOUNT_MERGE_NO_ACCOUNT = -2, // destination does not exist + ACCOUNT_MERGE_IMMUTABLE_SET = -3, // source account has AUTH_IMMUTABLE set + ACCOUNT_MERGE_HAS_SUB_ENTRIES = -4, // account has trust lines/offers + ACCOUNT_MERGE_SEQNUM_TOO_FAR = -5, // sequence number is over max allowed + ACCOUNT_MERGE_DEST_FULL = -6, // can't add source balance to + // destination balance + ACCOUNT_MERGE_IS_SPONSOR = -7 // can't merge account that is a sponsor }; union AccountMergeResult switch (AccountMergeResultCode code) { case ACCOUNT_MERGE_SUCCESS: - int64 sourceAccountBalance; // how much got transfered from source account + int64 sourceAccountBalance; // how much got transferred from source account default: void; }; @@ -617,10 +1149,12 @@ enum ManageDataResultCode // codes considered as "success" for the operation MANAGE_DATA_SUCCESS = 0, // codes considered as "failure" for the operation - MANAGE_DATA_NOT_SUPPORTED_YET = -1, // The network hasn't moved to this protocol change yet - MANAGE_DATA_NAME_NOT_FOUND = -2, // Trying to remove a Data Entry that isn't there - MANAGE_DATA_LOW_RESERVE = -3, // not enough funds to create a new Data Entry - MANAGE_DATA_INVALID_NAME = -4 // Name not a valid string + MANAGE_DATA_NOT_SUPPORTED_YET = + -1, // The network hasn't moved to this protocol change yet + MANAGE_DATA_NAME_NOT_FOUND = + -2, // Trying to remove a Data Entry that isn't there + MANAGE_DATA_LOW_RESERVE = -3, // not enough funds to create a new Data Entry + MANAGE_DATA_INVALID_NAME = -4 // Name not a valid string }; union ManageDataResult switch (ManageDataResultCode code) @@ -631,14 +1165,267 @@ default: void; }; -/* High level Operation Result */ +/******* BumpSequence Result ********/ + +enum BumpSequenceResultCode +{ + // codes considered as "success" for the operation + BUMP_SEQUENCE_SUCCESS = 0, + // codes considered as "failure" for the operation + BUMP_SEQUENCE_BAD_SEQ = -1 // `bumpTo` is not within bounds +}; + +union BumpSequenceResult switch (BumpSequenceResultCode code) +{ +case BUMP_SEQUENCE_SUCCESS: + void; +default: + void; +}; + +/******* CreateClaimableBalance Result ********/ + +enum CreateClaimableBalanceResultCode +{ + CREATE_CLAIMABLE_BALANCE_SUCCESS = 0, + CREATE_CLAIMABLE_BALANCE_MALFORMED = -1, + CREATE_CLAIMABLE_BALANCE_LOW_RESERVE = -2, + CREATE_CLAIMABLE_BALANCE_NO_TRUST = -3, + CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED = -4, + CREATE_CLAIMABLE_BALANCE_UNDERFUNDED = -5 +}; + +union CreateClaimableBalanceResult switch ( + CreateClaimableBalanceResultCode code) +{ +case CREATE_CLAIMABLE_BALANCE_SUCCESS: + ClaimableBalanceID balanceID; +default: + void; +}; + +/******* ClaimClaimableBalance Result ********/ + +enum ClaimClaimableBalanceResultCode +{ + CLAIM_CLAIMABLE_BALANCE_SUCCESS = 0, + CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST = -1, + CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM = -2, + CLAIM_CLAIMABLE_BALANCE_LINE_FULL = -3, + CLAIM_CLAIMABLE_BALANCE_NO_TRUST = -4, + CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED = -5 + +}; + +union ClaimClaimableBalanceResult switch (ClaimClaimableBalanceResultCode code) +{ +case CLAIM_CLAIMABLE_BALANCE_SUCCESS: + void; +default: + void; +}; + +/******* BeginSponsoringFutureReserves Result ********/ + +enum BeginSponsoringFutureReservesResultCode +{ + // codes considered as "success" for the operation + BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS = 0, + + // codes considered as "failure" for the operation + BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED = -1, + BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED = -2, + BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE = -3 +}; + +union BeginSponsoringFutureReservesResult switch ( + BeginSponsoringFutureReservesResultCode code) +{ +case BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: + void; +default: + void; +}; + +/******* EndSponsoringFutureReserves Result ********/ + +enum EndSponsoringFutureReservesResultCode +{ + // codes considered as "success" for the operation + END_SPONSORING_FUTURE_RESERVES_SUCCESS = 0, + + // codes considered as "failure" for the operation + END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED = -1 +}; + +union EndSponsoringFutureReservesResult switch ( + EndSponsoringFutureReservesResultCode code) +{ +case END_SPONSORING_FUTURE_RESERVES_SUCCESS: + void; +default: + void; +}; +/******* RevokeSponsorship Result ********/ + +enum RevokeSponsorshipResultCode +{ + // codes considered as "success" for the operation + REVOKE_SPONSORSHIP_SUCCESS = 0, + + // codes considered as "failure" for the operation + REVOKE_SPONSORSHIP_DOES_NOT_EXIST = -1, + REVOKE_SPONSORSHIP_NOT_SPONSOR = -2, + REVOKE_SPONSORSHIP_LOW_RESERVE = -3, + REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE = -4, + REVOKE_SPONSORSHIP_MALFORMED = -5 +}; + +union RevokeSponsorshipResult switch (RevokeSponsorshipResultCode code) +{ +case REVOKE_SPONSORSHIP_SUCCESS: + void; +default: + void; +}; + +/******* Clawback Result ********/ + +enum ClawbackResultCode +{ + // codes considered as "success" for the operation + CLAWBACK_SUCCESS = 0, + + // codes considered as "failure" for the operation + CLAWBACK_MALFORMED = -1, + CLAWBACK_NOT_CLAWBACK_ENABLED = -2, + CLAWBACK_NO_TRUST = -3, + CLAWBACK_UNDERFUNDED = -4 +}; + +union ClawbackResult switch (ClawbackResultCode code) +{ +case CLAWBACK_SUCCESS: + void; +default: + void; +}; + +/******* ClawbackClaimableBalance Result ********/ + +enum ClawbackClaimableBalanceResultCode +{ + // codes considered as "success" for the operation + CLAWBACK_CLAIMABLE_BALANCE_SUCCESS = 0, + + // codes considered as "failure" for the operation + CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST = -1, + CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER = -2, + CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED = -3 +}; + +union ClawbackClaimableBalanceResult switch ( + ClawbackClaimableBalanceResultCode code) +{ +case CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: + void; +default: + void; +}; + +/******* SetTrustLineFlags Result ********/ + +enum SetTrustLineFlagsResultCode +{ + // codes considered as "success" for the operation + SET_TRUST_LINE_FLAGS_SUCCESS = 0, + + // codes considered as "failure" for the operation + SET_TRUST_LINE_FLAGS_MALFORMED = -1, + SET_TRUST_LINE_FLAGS_NO_TRUST_LINE = -2, + SET_TRUST_LINE_FLAGS_CANT_REVOKE = -3, + SET_TRUST_LINE_FLAGS_INVALID_STATE = -4, + SET_TRUST_LINE_FLAGS_LOW_RESERVE = -5 // claimable balances can't be created + // on revoke due to low reserves +}; + +union SetTrustLineFlagsResult switch (SetTrustLineFlagsResultCode code) +{ +case SET_TRUST_LINE_FLAGS_SUCCESS: + void; +default: + void; +}; + +/******* LiquidityPoolDeposit Result ********/ + +enum LiquidityPoolDepositResultCode +{ + // codes considered as "success" for the operation + LIQUIDITY_POOL_DEPOSIT_SUCCESS = 0, + + // codes considered as "failure" for the operation + LIQUIDITY_POOL_DEPOSIT_MALFORMED = -1, // bad input + LIQUIDITY_POOL_DEPOSIT_NO_TRUST = -2, // no trust line for one of the + // assets + LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED = -3, // not authorized for one of the + // assets + LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED = -4, // not enough balance for one of + // the assets + LIQUIDITY_POOL_DEPOSIT_LINE_FULL = -5, // pool share trust line doesn't + // have sufficient limit + LIQUIDITY_POOL_DEPOSIT_BAD_PRICE = -6, // deposit price outside bounds + LIQUIDITY_POOL_DEPOSIT_POOL_FULL = -7 // pool reserves are full +}; + +union LiquidityPoolDepositResult switch ( + LiquidityPoolDepositResultCode code) +{ +case LIQUIDITY_POOL_DEPOSIT_SUCCESS: + void; +default: + void; +}; + +/******* LiquidityPoolWithdraw Result ********/ + +enum LiquidityPoolWithdrawResultCode +{ + // codes considered as "success" for the operation + LIQUIDITY_POOL_WITHDRAW_SUCCESS = 0, + + // codes considered as "failure" for the operation + LIQUIDITY_POOL_WITHDRAW_MALFORMED = -1, // bad input + LIQUIDITY_POOL_WITHDRAW_NO_TRUST = -2, // no trust line for one of the + // assets + LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED = -3, // not enough balance of the + // pool share + LIQUIDITY_POOL_WITHDRAW_LINE_FULL = -4, // would go above limit for one + // of the assets + LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM = -5 // didn't withdraw enough +}; + +union LiquidityPoolWithdrawResult switch ( + LiquidityPoolWithdrawResultCode code) +{ +case LIQUIDITY_POOL_WITHDRAW_SUCCESS: + void; +default: + void; +}; + +/* High level Operation Result */ enum OperationResultCode { opINNER = 0, // inner object result is valid - opBAD_AUTH = -1, // too few valid signatures / wrong network - opNO_ACCOUNT = -2 // source account was not found + opBAD_AUTH = -1, // too few valid signatures / wrong network + opNO_ACCOUNT = -2, // source account was not found + opNOT_SUPPORTED = -3, // operation not supported at this time + opTOO_MANY_SUBENTRIES = -4, // max number of subentries already reached + opEXCEEDED_WORK_LIMIT = -5, // operation did too much work + opTOO_MANY_SPONSORING = -6 // account is sponsoring too many entries }; union OperationResult switch (OperationResultCode code) @@ -650,12 +1437,12 @@ case opINNER: CreateAccountResult createAccountResult; case PAYMENT: PaymentResult paymentResult; - case PATH_PAYMENT: - PathPaymentResult pathPaymentResult; - case MANAGE_OFFER: - ManageOfferResult manageOfferResult; - case CREATE_PASSIVE_OFFER: - ManageOfferResult createPassiveOfferResult; + case PATH_PAYMENT_STRICT_RECEIVE: + PathPaymentStrictReceiveResult pathPaymentStrictReceiveResult; + case MANAGE_SELL_OFFER: + ManageSellOfferResult manageSellOfferResult; + case CREATE_PASSIVE_SELL_OFFER: + ManageSellOfferResult createPassiveSellOfferResult; case SET_OPTIONS: SetOptionsResult setOptionsResult; case CHANGE_TRUST: @@ -668,6 +1455,32 @@ case opINNER: InflationResult inflationResult; case MANAGE_DATA: ManageDataResult manageDataResult; + case BUMP_SEQUENCE: + BumpSequenceResult bumpSeqResult; + case MANAGE_BUY_OFFER: + ManageBuyOfferResult manageBuyOfferResult; + case PATH_PAYMENT_STRICT_SEND: + PathPaymentStrictSendResult pathPaymentStrictSendResult; + case CREATE_CLAIMABLE_BALANCE: + CreateClaimableBalanceResult createClaimableBalanceResult; + case CLAIM_CLAIMABLE_BALANCE: + ClaimClaimableBalanceResult claimClaimableBalanceResult; + case BEGIN_SPONSORING_FUTURE_RESERVES: + BeginSponsoringFutureReservesResult beginSponsoringFutureReservesResult; + case END_SPONSORING_FUTURE_RESERVES: + EndSponsoringFutureReservesResult endSponsoringFutureReservesResult; + case REVOKE_SPONSORSHIP: + RevokeSponsorshipResult revokeSponsorshipResult; + case CLAWBACK: + ClawbackResult clawbackResult; + case CLAWBACK_CLAIMABLE_BALANCE: + ClawbackClaimableBalanceResult clawbackClaimableBalanceResult; + case SET_TRUST_LINE_FLAGS: + SetTrustLineFlagsResult setTrustLineFlagsResult; + case LIQUIDITY_POOL_DEPOSIT: + LiquidityPoolDepositResult liquidityPoolDepositResult; + case LIQUIDITY_POOL_WITHDRAW: + LiquidityPoolWithdrawResult liquidityPoolWithdrawResult; } tr; default: @@ -676,7 +1489,8 @@ default: enum TransactionResultCode { - txSUCCESS = 0, // all operations succeeded + txFEE_BUMP_INNER_SUCCESS = 1, // fee bump inner transaction succeeded + txSUCCESS = 0, // all operations succeeded txFAILED = -1, // one of the operations failed (none were applied) @@ -690,7 +1504,56 @@ enum TransactionResultCode txNO_ACCOUNT = -8, // source account not found txINSUFFICIENT_FEE = -9, // fee is too small txBAD_AUTH_EXTRA = -10, // unused signatures attached to transaction - txINTERNAL_ERROR = -11 // an unknown error occured + txINTERNAL_ERROR = -11, // an unknown error occurred + + txNOT_SUPPORTED = -12, // transaction type not supported + txFEE_BUMP_INNER_FAILED = -13, // fee bump inner transaction failed + txBAD_SPONSORSHIP = -14 // sponsorship not confirmed +}; + +// InnerTransactionResult must be binary compatible with TransactionResult +// because it is be used to represent the result of a Transaction. +struct InnerTransactionResult +{ + // Always 0. Here for binary compatibility. + int64 feeCharged; + + union switch (TransactionResultCode code) + { + // txFEE_BUMP_INNER_SUCCESS is not included + case txSUCCESS: + case txFAILED: + OperationResult results<>; + case txTOO_EARLY: + case txTOO_LATE: + case txMISSING_OPERATION: + case txBAD_SEQ: + case txBAD_AUTH: + case txINSUFFICIENT_BALANCE: + case txNO_ACCOUNT: + case txINSUFFICIENT_FEE: + case txBAD_AUTH_EXTRA: + case txINTERNAL_ERROR: + case txNOT_SUPPORTED: + // txFEE_BUMP_INNER_FAILED is not included + case txBAD_SPONSORSHIP: + void; + } + result; + + // reserved for future use + union switch (int v) + { + case 0: + void; + } + ext; +}; + +struct InnerTransactionResultPair +{ + Hash transactionHash; // hash of the inner transaction + InnerTransactionResult result; // result for the inner transaction }; struct TransactionResult @@ -699,6 +1562,9 @@ struct TransactionResult union switch (TransactionResultCode code) { + case txFEE_BUMP_INNER_SUCCESS: + case txFEE_BUMP_INNER_FAILED: + InnerTransactionResultPair innerResultPair; case txSUCCESS: case txFAILED: OperationResult results<>; diff --git a/xdr/Stellar-types.x b/xdr/Stellar-types.x index 0f5adaad0c..8f7d5c2060 100644 --- a/xdr/Stellar-types.x +++ b/xdr/Stellar-types.x @@ -16,15 +16,44 @@ typedef hyper int64; enum CryptoKeyType { - KEY_TYPE_ED25519 = 0 + KEY_TYPE_ED25519 = 0, + KEY_TYPE_PRE_AUTH_TX = 1, + KEY_TYPE_HASH_X = 2, + // MUXED enum values for supported type are derived from the enum values + // above by ORing them with 0x100 + KEY_TYPE_MUXED_ED25519 = 0x100 }; -union PublicKey switch (CryptoKeyType type) +enum PublicKeyType { -case KEY_TYPE_ED25519: + PUBLIC_KEY_TYPE_ED25519 = KEY_TYPE_ED25519 +}; + +enum SignerKeyType +{ + SIGNER_KEY_TYPE_ED25519 = KEY_TYPE_ED25519, + SIGNER_KEY_TYPE_PRE_AUTH_TX = KEY_TYPE_PRE_AUTH_TX, + SIGNER_KEY_TYPE_HASH_X = KEY_TYPE_HASH_X +}; + +union PublicKey switch (PublicKeyType type) +{ +case PUBLIC_KEY_TYPE_ED25519: uint256 ed25519; }; +union SignerKey switch (SignerKeyType type) +{ +case SIGNER_KEY_TYPE_ED25519: + uint256 ed25519; +case SIGNER_KEY_TYPE_PRE_AUTH_TX: + /* SHA-256 Hash of TransactionSignaturePayload structure */ + uint256 preAuthTx; +case SIGNER_KEY_TYPE_HASH_X: + /* Hash of random 256 bit preimage X */ + uint256 hashX; +}; + // variable size as the size depends on the signature scheme used typedef opaque Signature<64>; @@ -34,22 +63,21 @@ typedef PublicKey NodeID; struct Curve25519Secret { - opaque key[32]; + opaque key[32]; }; struct Curve25519Public { - opaque key[32]; + opaque key[32]; }; struct HmacSha256Key { - opaque key[32]; + opaque key[32]; }; struct HmacSha256Mac { - opaque mac[32]; + opaque mac[32]; }; - } diff --git a/xdr/account_entry.go b/xdr/account_entry.go index f42449ce64..41a0dd70bf 100644 --- a/xdr/account_entry.go +++ b/xdr/account_entry.go @@ -1,14 +1,87 @@ package xdr -func (a *AccountEntry) SignerSummary() map[string]int32 { +func (account *AccountEntry) SignerSummary() map[string]int32 { ret := map[string]int32{} - if a.Thresholds[0] > 0 { - ret[a.AccountId.Address()] = int32(a.Thresholds[0]) + if account.MasterKeyWeight() > 0 { + ret[account.AccountId.Address()] = int32(account.Thresholds[0]) } - for _, signer := range a.Signers { - ret[signer.PubKey.Address()] = int32(signer.Weight) + for _, signer := range account.Signers { + ret[signer.Key.Address()] = int32(signer.Weight) } return ret } + +func (account *AccountEntry) MasterKeyWeight() byte { + return account.Thresholds.MasterKeyWeight() +} + +func (account *AccountEntry) ThresholdLow() byte { + return account.Thresholds.ThresholdLow() +} + +func (account *AccountEntry) ThresholdMedium() byte { + return account.Thresholds.ThresholdMedium() +} + +func (account *AccountEntry) ThresholdHigh() byte { + return account.Thresholds.ThresholdHigh() +} + +// Liabilities returns AccountEntry's liabilities +func (account *AccountEntry) Liabilities() Liabilities { + var liabilities Liabilities + if account.Ext.V1 != nil { + liabilities = account.Ext.V1.Liabilities + } + return liabilities +} + +// NumSponsored returns NumSponsored value for account. +func (account *AccountEntry) NumSponsored() Uint32 { + var numSponsored Uint32 + if account.Ext.V1 != nil && + account.Ext.V1.Ext.V2 != nil { + numSponsored = account.Ext.V1.Ext.V2.NumSponsored + } + return numSponsored +} + +// NumSponsoring returns NumSponsoring value for account. +func (account *AccountEntry) NumSponsoring() Uint32 { + var numSponsoring Uint32 + if account.Ext.V1 != nil && + account.Ext.V1.Ext.V2 != nil { + numSponsoring = account.Ext.V1.Ext.V2.NumSponsoring + } + return numSponsoring +} + +// SignerSponsoringIDs returns SignerSponsoringIDs value for account. +// This will return a slice of nil values if V2 extension does not exist. +func (account *AccountEntry) SignerSponsoringIDs() []SponsorshipDescriptor { + var ids []SponsorshipDescriptor + if account.Ext.V1 != nil && + account.Ext.V1.Ext.V2 != nil { + ids = account.Ext.V1.Ext.V2.SignerSponsoringIDs + } else { + ids = make([]SponsorshipDescriptor, len(account.Signers)) + } + return ids +} + +// SponsorPerSigner returns a mapping of signer to its sponsor +func (account *AccountEntry) SponsorPerSigner() map[string]AccountId { + ids := account.SignerSponsoringIDs() + + signerToSponsor := map[string]AccountId{} + + for i, signerEntry := range account.Signers { + if ids[i] != nil { + signerToSponsor[signerEntry.Key.Address()] = *ids[i] + } + } + + return signerToSponsor +} diff --git a/xdr/account_entry_test.go b/xdr/account_entry_test.go index affd190686..20c7220ce7 100644 --- a/xdr/account_entry_test.go +++ b/xdr/account_entry_test.go @@ -1,8 +1,11 @@ package xdr_test import ( + "testing" + . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" . "github.com/stellar/go/xdr" ) @@ -35,7 +38,7 @@ var _ = Describe("xdr.AccountEntry#SignerSummary()", func() { } summary := account.SignerSummary() for _, signer := range account.Signers { - addy := signer.PubKey.Address() + addy := signer.Key.Address() Expect(summary).To(HaveKey(addy)) Expect(summary[addy]).To(Equal(int32(signer.Weight))) } @@ -43,7 +46,70 @@ var _ = Describe("xdr.AccountEntry#SignerSummary()", func() { }) func signer(address string, weight int) (ret Signer) { - ret.PubKey.SetAddress(address) + + ret.Key.SetAddress(address) ret.Weight = Uint32(weight) return } + +func TestAccountEntryLiabilities(t *testing.T) { + account := AccountEntry{} + liabilities := account.Liabilities() + assert.Equal(t, Int64(0), liabilities.Buying) + assert.Equal(t, Int64(0), liabilities.Selling) + + account = AccountEntry{ + Ext: AccountEntryExt{ + V1: &AccountEntryExtensionV1{ + Liabilities: Liabilities{ + Buying: 100, + Selling: 101, + }, + }, + }, + } + liabilities = account.Liabilities() + assert.Equal(t, Int64(100), liabilities.Buying) + assert.Equal(t, Int64(101), liabilities.Selling) +} + +func TestAccountEntrySponsorships(t *testing.T) { + account := AccountEntry{} + sponsored := account.NumSponsored() + sponsoring := account.NumSponsoring() + signerIDs := account.SignerSponsoringIDs() + assert.Equal(t, Uint32(0), sponsored) + assert.Equal(t, Uint32(0), sponsoring) + assert.Empty(t, signerIDs) + + signer := MustSigner("GCA4M7QXVBVEVRBU53PJZPXANRNPESGKGOT7UZ4RR4CBVBMQHMFKLZ4W") + sponsor := MustAddress("GCO26ZSBD63TKYX45H2C7D2WOFWOUSG5BMTNC3BG4QMXM3PAYI6WHKVZ") + desc := SponsorshipDescriptor(&sponsor) + account = AccountEntry{ + Signers: []Signer{ + {Key: signer}, + }, + Ext: AccountEntryExt{ + V1: &AccountEntryExtensionV1{ + Ext: AccountEntryExtensionV1Ext{ + V2: &AccountEntryExtensionV2{ + NumSponsored: 1, + NumSponsoring: 2, + SignerSponsoringIDs: []SponsorshipDescriptor{desc}, + }, + }, + }, + }, + } + sponsored = account.NumSponsored() + sponsoring = account.NumSponsoring() + signerIDs = account.SignerSponsoringIDs() + expectedSponsorsForSigners := map[string]AccountId{ + signer.Address(): sponsor, + } + assert.Equal(t, Uint32(1), sponsored) + assert.Equal(t, Uint32(2), sponsoring) + assert.Len(t, signerIDs, 1) + assert.Equal(t, desc, signerIDs[0]) + assert.Equal(t, expectedSponsorsForSigners, account.SponsorPerSigner()) +} diff --git a/xdr/account_flags.go b/xdr/account_flags.go new file mode 100644 index 0000000000..8989af9801 --- /dev/null +++ b/xdr/account_flags.go @@ -0,0 +1,25 @@ +package xdr + +// IsAuthRequired returns true if the account has the "AUTH_REQUIRED" option +// turned on. +func (accountFlags AccountFlags) IsAuthRequired() bool { + return (accountFlags & AccountFlagsAuthRequiredFlag) != 0 +} + +// IsAuthRevocable returns true if the account has the "AUTH_REVOCABLE" option +// turned on. +func (accountFlags AccountFlags) IsAuthRevocable() bool { + return (accountFlags & AccountFlagsAuthRevocableFlag) != 0 +} + +// IsAuthImmutable returns true if the account has the "AUTH_IMMUTABLE" option +// turned on. +func (accountFlags AccountFlags) IsAuthImmutable() bool { + return (accountFlags & AccountFlagsAuthImmutableFlag) != 0 +} + +// IsAuthClawbackEnabled returns true if the account has the "AUTH_CLAWBACK_ENABLED" option +// turned on. +func (accountFlags AccountFlags) IsAuthClawbackEnabled() bool { + return (accountFlags & AccountFlagsAuthClawbackEnabledFlag) != 0 +} diff --git a/xdr/account_flags_test.go b/xdr/account_flags_test.go new file mode 100644 index 0000000000..40a860c6f7 --- /dev/null +++ b/xdr/account_flags_test.go @@ -0,0 +1,73 @@ +package xdr_test + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestIsAuthRequired(t *testing.T) { + tt := assert.New(t) + + flag := xdr.AccountFlags(1) + tt.True(flag.IsAuthRequired()) + + flag = xdr.AccountFlags(0) + tt.False(flag.IsAuthRequired()) + + flag = xdr.AccountFlags(2) + tt.False(flag.IsAuthRequired()) + + flag = xdr.AccountFlags(4) + tt.False(flag.IsAuthRequired()) + +} + +func TestIsAuthRevocable(t *testing.T) { + tt := assert.New(t) + + flag := xdr.AccountFlags(2) + tt.True(flag.IsAuthRevocable()) + + flag = xdr.AccountFlags(0) + tt.False(flag.IsAuthRevocable()) + + flag = xdr.AccountFlags(1) + tt.False(flag.IsAuthRevocable()) + + flag = xdr.AccountFlags(4) + tt.False(flag.IsAuthRevocable()) + +} +func TestIsAuthImmutable(t *testing.T) { + tt := assert.New(t) + + flag := xdr.AccountFlags(4) + tt.True(flag.IsAuthImmutable()) + + flag = xdr.AccountFlags(0) + tt.False(flag.IsAuthImmutable()) + + flag = xdr.AccountFlags(1) + tt.False(flag.IsAuthImmutable()) + + flag = xdr.AccountFlags(2) + tt.False(flag.IsAuthImmutable()) +} + +func TestIsAuthClawbackEnabled(t *testing.T) { + tt := assert.New(t) + + flag := xdr.AccountFlags(8) + tt.True(flag.IsAuthClawbackEnabled()) + + flag = xdr.AccountFlags(0) + tt.False(flag.IsAuthClawbackEnabled()) + + flag = xdr.AccountFlags(1) + tt.False(flag.IsAuthClawbackEnabled()) + + flag = xdr.AccountFlags(2) + tt.False(flag.IsAuthClawbackEnabled()) +} diff --git a/xdr/account_id.go b/xdr/account_id.go index 79beda2fd4..9b9e231a8c 100644 --- a/xdr/account_id.go +++ b/xdr/account_id.go @@ -9,19 +9,32 @@ import ( // Address returns the strkey encoded form of this AccountId. This method will // panic if the accountid is backed by a public key of an unknown type. -func (aid *AccountId) Address() string { +func (aid AccountId) Address() string { + address, err := aid.GetAddress() + if err != nil { + panic(err) + } + return address +} + +// GetAddress returns the strkey encoded form of this AccountId, and an error +// if the AccountId is backed by a public key of an unknown type. +func (aid *AccountId) GetAddress() (string, error) { if aid == nil { - return "" + return "", nil } switch aid.Type { - case CryptoKeyTypeKeyTypeEd25519: - ed := aid.MustEd25519() + case PublicKeyTypePublicKeyTypeEd25519: + ed, ok := aid.GetEd25519() + if !ok { + return "", fmt.Errorf("Could not get Ed25519") + } raw := make([]byte, 32) copy(raw, ed[:]) - return strkey.MustEncode(strkey.VersionByteAccountID, raw) + return strkey.Encode(strkey.VersionByteAccountID, raw) default: - panic(fmt.Errorf("Unknown account id type: %v", aid.Type)) + return "", fmt.Errorf("Unknown account id type: %v", aid.Type) } } @@ -32,7 +45,7 @@ func (aid *AccountId) Equals(other AccountId) bool { } switch aid.Type { - case CryptoKeyTypeKeyTypeEd25519: + case PublicKeyTypePublicKeyTypeEd25519: l := aid.MustEd25519() r := other.MustEd25519() return l == r @@ -51,6 +64,42 @@ func (aid *AccountId) LedgerKey() (ret LedgerKey) { return } +func (e *EncodingBuffer) accountIdCompressEncodeTo(aid AccountId) error { + if err := e.xdrEncoderBuf.WriteByte(byte(aid.Type)); err != nil { + return err + } + switch aid.Type { + case PublicKeyTypePublicKeyTypeEd25519: + _, err := e.xdrEncoderBuf.Write(aid.Ed25519[:]) + return err + default: + panic("Unknown type") + } +} + +func MustAddress(address string) AccountId { + aid := AccountId{} + err := aid.SetAddress(address) + if err != nil { + panic(err) + } + return aid +} + +func MustAddressPtr(address string) *AccountId { + aid := MustAddress(address) + return &aid +} + +// AddressToAccountId returns an AccountId for a given address string. +// If the address is not valid the error returned will not be nil +func AddressToAccountId(address string) (AccountId, error) { + result := AccountId{} + err := result.SetAddress(address) + + return result, err +} + // SetAddress modifies the receiver, setting it's value to the AccountId form // of the provided address. func (aid *AccountId) SetAddress(address string) error { @@ -70,7 +119,20 @@ func (aid *AccountId) SetAddress(address string) error { var ui Uint256 copy(ui[:], raw) - *aid, err = NewAccountId(CryptoKeyTypeKeyTypeEd25519, ui) + *aid, err = NewAccountId(PublicKeyTypePublicKeyTypeEd25519, ui) return err } + +// ToMuxedAccount transforms an AccountId into a MuxedAccount with +// a zero memo id +func (aid *AccountId) ToMuxedAccount() MuxedAccount { + result := MuxedAccount{Type: CryptoKeyTypeKeyTypeEd25519} + switch aid.Type { + case PublicKeyTypePublicKeyTypeEd25519: + result.Ed25519 = aid.Ed25519 + default: + panic(fmt.Errorf("Unknown account id type: %v", aid.Type)) + } + return result +} diff --git a/xdr/account_id_test.go b/xdr/account_id_test.go index 0b845e6a66..5211b45873 100644 --- a/xdr/account_id_test.go +++ b/xdr/account_id_test.go @@ -1,21 +1,17 @@ package xdr_test import ( - . "github.com/stellar/go/xdr" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + + . "github.com/stellar/go/xdr" ) var _ = Describe("xdr.AccountId#Address()", func() { - It("returns an empty string when account id is nil", func() { - addy := (*AccountId)(nil).Address() - Expect(addy).To(Equal("")) - }) - It("returns a strkey string when account id is valid", func() { var aid AccountId - aid.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + err := aid.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + Expect(err).ShouldNot(HaveOccurred()) addy := aid.Address() Expect(addy).To(Equal("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH")) }) @@ -24,15 +20,19 @@ var _ = Describe("xdr.AccountId#Address()", func() { var _ = Describe("xdr.AccountId#Equals()", func() { It("returns true when the account ids have equivalent values", func() { var l, r AccountId - l.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") - r.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + err := l.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + Expect(err).ShouldNot(HaveOccurred()) + err = r.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + Expect(err).ShouldNot(HaveOccurred()) Expect(l.Equals(r)).To(BeTrue()) }) It("returns false when the account ids have different values", func() { var l, r AccountId - l.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") - r.SetAddress("GBTBXQEVDNVUEESCTPUT3CHJDVNG44EMPMBELH5F7H3YPHXPZXOTEWB4") + err := l.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + Expect(err).ShouldNot(HaveOccurred()) + err = r.SetAddress("GBTBXQEVDNVUEESCTPUT3CHJDVNG44EMPMBELH5F7H3YPHXPZXOTEWB4") + Expect(err).ShouldNot(HaveOccurred()) Expect(l.Equals(r)).To(BeFalse()) }) }) @@ -40,10 +40,37 @@ var _ = Describe("xdr.AccountId#Equals()", func() { var _ = Describe("xdr.AccountId#LedgerKey()", func() { It("works", func() { var aid AccountId - aid.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + err := aid.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") + Expect(err).ShouldNot(HaveOccurred()) key := aid.LedgerKey() packed := key.MustAccount().AccountId Expect(packed.Equals(aid)).To(BeTrue()) }) }) + +var _ = Describe("xdr.AddressToAccountID()", func() { + It("works", func() { + address := "GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH" + accountID, err := AddressToAccountId(address) + + Expect(accountID.Address()).To(Equal("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH")) + Expect(err).ShouldNot(HaveOccurred()) + + _, err = AddressToAccountId("GCR22L3") + + Expect(err).Should(HaveOccurred()) + }) +}) + +var _ = Describe("xdr.AccountId#ToMuxedAccount()", func() { + It("works", func() { + address := "GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH" + accountID, err := AddressToAccountId(address) + Expect(err).ShouldNot(HaveOccurred()) + muxed := accountID.ToMuxedAccount() + + Expect(muxed.Type).To(Equal(CryptoKeyTypeKeyTypeEd25519)) + Expect(*muxed.Ed25519).To(Equal(*accountID.Ed25519)) + }) +}) diff --git a/xdr/account_thresholds.go b/xdr/account_thresholds.go new file mode 100644 index 0000000000..d9e0db35aa --- /dev/null +++ b/xdr/account_thresholds.go @@ -0,0 +1,21 @@ +package xdr + +func (t Thresholds) MasterKeyWeight() byte { + return t[0] +} + +func (t Thresholds) ThresholdLow() byte { + return t[1] +} + +func (t Thresholds) ThresholdMedium() byte { + return t[2] +} + +func (t Thresholds) ThresholdHigh() byte { + return t[3] +} + +func NewThreshold(masterKey, low, medium, high byte) Thresholds { + return Thresholds{masterKey, low, medium, high} +} diff --git a/xdr/accounts_thesholds_test.go b/xdr/accounts_thesholds_test.go new file mode 100644 index 0000000000..c6f445b37f --- /dev/null +++ b/xdr/accounts_thesholds_test.go @@ -0,0 +1,17 @@ +package xdr_test + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestNewThreshold(t *testing.T) { + threshold := xdr.NewThreshold(1, 2, 3, 4) + + assert.Equal(t, byte(1), threshold.MasterKeyWeight()) + assert.Equal(t, byte(2), threshold.ThresholdLow()) + assert.Equal(t, byte(3), threshold.ThresholdMedium()) + assert.Equal(t, byte(4), threshold.ThresholdHigh()) +} diff --git a/xdr/asset.go b/xdr/asset.go index a98415199e..9eea1ece9c 100644 --- a/xdr/asset.go +++ b/xdr/asset.go @@ -3,6 +3,7 @@ package xdr import ( "errors" "fmt" + "regexp" "strings" "github.com/stellar/go/strkey" @@ -10,6 +11,174 @@ import ( // This file contains helpers for working with xdr.Asset structs +// AssetTypeToString maps an xdr.AssetType to its string representation +var AssetTypeToString = map[AssetType]string{ + AssetTypeAssetTypeNative: "native", + AssetTypeAssetTypeCreditAlphanum4: "credit_alphanum4", + AssetTypeAssetTypeCreditAlphanum12: "credit_alphanum12", +} + +// StringToAssetType maps an strings to its xdr.AssetType representation +var StringToAssetType = map[string]AssetType{ + "native": AssetTypeAssetTypeNative, + "credit_alphanum4": AssetTypeAssetTypeCreditAlphanum4, + "credit_alphanum12": AssetTypeAssetTypeCreditAlphanum12, +} + +// MustNewNativeAsset returns a new native asset, panicking if it can't. +func MustNewNativeAsset() Asset { + a := Asset{} + err := a.SetNative() + if err != nil { + panic(err) + } + return a +} + +// MustNewCreditAsset returns a new general asset, panicking if it can't. +func MustNewCreditAsset(code string, issuer string) Asset { + a, err := NewCreditAsset(code, issuer) + if err != nil { + panic(err) + } + return a +} + +// NewAssetCodeFromString returns a new allow trust asset, panicking if it can't. +func NewAssetCodeFromString(code string) (AssetCode, error) { + a := AssetCode{} + length := len(code) + switch { + case length >= 1 && length <= 4: + var newCode AssetCode4 + copy(newCode[:], []byte(code)[:length]) + a.Type = AssetTypeAssetTypeCreditAlphanum4 + a.AssetCode4 = &newCode + case length >= 5 && length <= 12: + var newCode AssetCode12 + copy(newCode[:], []byte(code)[:length]) + a.Type = AssetTypeAssetTypeCreditAlphanum12 + a.AssetCode12 = &newCode + default: + return a, errors.New("Asset code length is invalid") + } + + return a, nil +} + +// MustNewAssetCodeFromString returns a new allow trust asset, panicking if it can't. +func MustNewAssetCodeFromString(code string) AssetCode { + a, err := NewAssetCodeFromString(code) + if err != nil { + panic(err) + } + + return a +} + +// NewCreditAsset returns a new general asset, returning an error if it can't. +func NewCreditAsset(code string, issuer string) (Asset, error) { + a := Asset{} + accountID := AccountId{} + if err := accountID.SetAddress(issuer); err != nil { + return Asset{}, err + } + if err := a.SetCredit(code, accountID); err != nil { + return Asset{}, err + } + return a, nil +} + +// BuildAsset creates a new asset from a given `assetType`, `code`, and `issuer`. +// +// Valid assetTypes are: +// - `native` +// - `credit_alphanum4` +// - `credit_alphanum12` +func BuildAsset(assetType, issuer, code string) (Asset, error) { + t, ok := StringToAssetType[assetType] + + if !ok { + return Asset{}, errors.New("invalid asset type: was not one of 'native', 'credit_alphanum4', 'credit_alphanum12'") + } + + var asset Asset + switch t { + case AssetTypeAssetTypeNative: + if err := asset.SetNative(); err != nil { + return Asset{}, err + } + default: + issuerAccountID := AccountId{} + if err := issuerAccountID.SetAddress(issuer); err != nil { + return Asset{}, err + } + + if err := asset.SetCredit(code, issuerAccountID); err != nil { + return Asset{}, err + } + } + + return asset, nil +} + +var ValidAssetCode = regexp.MustCompile("^[[:alnum:]]{1,12}$") + +// BuildAssets parses a list of assets from a given string. +// The string is expected to be a comma separated list of assets +// encoded in the format (Code:Issuer or "native") defined by SEP-0011 +// https://github.com/stellar/stellar-protocol/pull/313 +// If the string is empty, BuildAssets will return an empty list of assets +func BuildAssets(s string) ([]Asset, error) { + var assets []Asset + if s == "" { + return assets, nil + } + + assetStrings := strings.Split(s, ",") + for _, assetString := range assetStrings { + var asset Asset + + // Technically https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0011.md allows + // any string up to 12 characters not containing an unescaped colon to represent XLM + // however, this function only accepts the string "native" to represent XLM + if strings.ToLower(assetString) == "native" { + if err := asset.SetNative(); err != nil { + return nil, err + } + } else { + parts := strings.Split(assetString, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("%s is not a valid asset", assetString) + } + + code := parts[0] + if !ValidAssetCode.MatchString(code) { + return nil, fmt.Errorf( + "%s is not a valid asset, it contains an invalid asset code", + assetString, + ) + } + + issuer, err := AddressToAccountId(parts[1]) + if err != nil { + return nil, fmt.Errorf( + "%s is not a valid asset, it contains an invalid issuer", + assetString, + ) + } + + if err := asset.SetCredit(code, issuer); err != nil { + return nil, fmt.Errorf("%s is not a valid asset", assetString) + } + } + + assets = append(assets, asset) + } + + return assets, nil +} + // SetCredit overwrites `a` with a credit asset using `code` and `issuer`. The // asset type (CreditAlphanum4 or CreditAlphanum12) is chosen automatically // based upon the length of `code`. @@ -20,14 +189,14 @@ func (a *Asset) SetCredit(code string, issuer AccountId) error { switch { case length >= 1 && length <= 4: - newbody := AssetAlphaNum4{Issuer: issuer} + newbody := AlphaNum4{Issuer: issuer} copy(newbody.AssetCode[:], []byte(code)[:length]) typ = AssetTypeAssetTypeCreditAlphanum4 body = newbody case length >= 5 && length <= 12: - newbody := AssetAlphaNum12{Issuer: issuer} + newbody := AlphaNum12{Issuer: issuer} copy(newbody.AssetCode[:], []byte(code)[:length]) - typ = AssetTypeAssetTypeCreditAlphanum4 + typ = AssetTypeAssetTypeCreditAlphanum12 body = newbody default: return errors.New("Asset code length is invalid") @@ -51,6 +220,27 @@ func (a *Asset) SetNative() error { return nil } +// ToAssetCode for Asset converts the Asset to a corresponding XDR +// "allow trust" asset, used by the XDR allow trust operation. +func (a *Asset) ToAssetCode(code string) (AssetCode, error) { + length := len(code) + + switch { + case length >= 1 && length <= 4: + var bytecode AssetCode4 + byteArray := []byte(code) + copy(bytecode[:], byteArray[0:length]) + return NewAssetCode(AssetTypeAssetTypeCreditAlphanum4, bytecode) + case length >= 5 && length <= 12: + var bytecode AssetCode12 + byteArray := []byte(code) + copy(bytecode[:], byteArray[0:length]) + return NewAssetCode(AssetTypeAssetTypeCreditAlphanum12, bytecode) + default: + return AssetCode{}, errors.New("Asset code length is invalid") + } +} + // String returns a display friendly form of the asset func (a Asset) String() string { var t, c, i string @@ -61,7 +251,21 @@ func (a Asset) String() string { return t } - return fmt.Sprintf("%s/%s/%s", t, c, i) + return t + "/" + c + "/" + i +} + +// StringCanonical returns a display friendly form of the asset following its +// canonical representation +func (a Asset) StringCanonical() string { + var t, c, i string + + a.MustExtract(&t, &c, &i) + + if a.Type == AssetTypeAssetTypeNative { + return t + } + + return c + ":" + i } // Equals returns true if `other` is equivalent to `a` @@ -95,14 +299,7 @@ func (a Asset) Extract(typ interface{}, code interface{}, issuer interface{}) er case *AssetType: *typ = a.Type case *string: - switch a.Type { - case AssetTypeAssetTypeNative: - *typ = "native" - case AssetTypeAssetTypeCreditAlphanum4: - *typ = "credit_alphanum4" - case AssetTypeAssetTypeCreditAlphanum12: - *typ = "credit_alphanum12" - } + *typ = AssetTypeToString[a.Type] default: return errors.New("can't extract type") } @@ -113,10 +310,10 @@ func (a Asset) Extract(typ interface{}, code interface{}, issuer interface{}) er switch a.Type { case AssetTypeAssetTypeCreditAlphanum4: an := a.MustAlphaNum4() - *code = strings.TrimRight(string(an.AssetCode[:]), "\x00") + *code = string(trimRightZeros(an.AssetCode[:])) case AssetTypeAssetTypeCreditAlphanum12: an := a.MustAlphaNum12() - *code = strings.TrimRight(string(an.AssetCode[:]), "\x00") + *code = string(trimRightZeros(an.AssetCode[:])) } default: return errors.New("can't extract code") @@ -152,3 +349,87 @@ func (a Asset) MustExtract(typ interface{}, code interface{}, issuer interface{} panic(err) } } + +// ToChangeTrustAsset converts Asset to ChangeTrustAsset. +func (a Asset) ToChangeTrustAsset() ChangeTrustAsset { + var cta ChangeTrustAsset + + cta.Type = a.Type + + switch a.Type { + case AssetTypeAssetTypeNative: + // Empty branch + case AssetTypeAssetTypeCreditAlphanum4: + assetCode4 := *a.AlphaNum4 + cta.AlphaNum4 = &assetCode4 + case AssetTypeAssetTypeCreditAlphanum12: + assetCode12 := *a.AlphaNum12 + cta.AlphaNum12 = &assetCode12 + default: + panic(fmt.Errorf("Cannot transform type %v to Asset", a.Type)) + } + + return cta +} + +// ToTrustLineAsset converts Asset to TrustLineAsset. +func (a Asset) ToTrustLineAsset() TrustLineAsset { + var tla TrustLineAsset + + tla.Type = a.Type + + switch a.Type { + case AssetTypeAssetTypeNative: + // Empty branch + case AssetTypeAssetTypeCreditAlphanum4: + assetCode4 := *a.AlphaNum4 + tla.AlphaNum4 = &assetCode4 + case AssetTypeAssetTypeCreditAlphanum12: + assetCode12 := *a.AlphaNum12 + tla.AlphaNum12 = &assetCode12 + default: + panic(fmt.Errorf("Cannot transform type %v to Asset", a.Type)) + } + + return tla +} + +func (a *Asset) GetCode() string { + switch a.Type { + case AssetTypeAssetTypeNative: + return "" + case AssetTypeAssetTypeCreditAlphanum4: + return string((*a.AlphaNum4).AssetCode[:]) + case AssetTypeAssetTypeCreditAlphanum12: + return string((*a.AlphaNum12).AssetCode[:]) + default: + return "" + } +} + +func (a *Asset) GetIssuer() string { + switch a.Type { + case AssetTypeAssetTypeNative: + return "" + case AssetTypeAssetTypeCreditAlphanum4: + addr, _ := (*a.AlphaNum4).Issuer.GetAddress() + return addr + case AssetTypeAssetTypeCreditAlphanum12: + addr, _ := (*a.AlphaNum12).Issuer.GetAddress() + return addr + default: + return "" + } +} + +func (a *Asset) LessThan(b Asset) bool { + if a.Type != b.Type { + return int32(a.Type) < int32(b.Type) + } + + if a.GetCode() != b.GetCode() { + return a.GetCode() < b.GetCode() + } + + return a.GetIssuer() < b.GetIssuer() +} diff --git a/xdr/allow_trust_op_asset.go b/xdr/asset_code.go similarity index 58% rename from xdr/allow_trust_op_asset.go rename to xdr/asset_code.go index e2f818cc8c..8ef86d92f0 100644 --- a/xdr/allow_trust_op_asset.go +++ b/xdr/asset_code.go @@ -4,19 +4,18 @@ import ( "fmt" ) -// ToAsset converts `a` to a proper xdr.Asset -func (a AllowTrustOpAsset) ToAsset(issuer AccountId) (ret Asset) { +// ToAsset for AssetCode converts the xdr.AssetCode to a standard xdr.Asset. +func (a AssetCode) ToAsset(issuer AccountId) (asset Asset) { var err error switch a.Type { case AssetTypeAssetTypeCreditAlphanum4: - - ret, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AssetAlphaNum4{ + asset, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AlphaNum4{ AssetCode: a.MustAssetCode4(), Issuer: issuer, }) case AssetTypeAssetTypeCreditAlphanum12: - ret, err = NewAsset(AssetTypeAssetTypeCreditAlphanum12, AssetAlphaNum12{ + asset, err = NewAsset(AssetTypeAssetTypeCreditAlphanum12, AlphaNum12{ AssetCode: a.MustAssetCode12(), Issuer: issuer, }) diff --git a/xdr/allow_trust_op_asset_test.go b/xdr/asset_code_test.go similarity index 78% rename from xdr/allow_trust_op_asset_test.go rename to xdr/asset_code_test.go index 6459e4b666..446e288a6b 100644 --- a/xdr/allow_trust_op_asset_test.go +++ b/xdr/asset_code_test.go @@ -11,12 +11,12 @@ var _ = Describe("xdr.AllowTrustOpAsset#ToAsset()", func() { It("works", func() { var aid AccountId aid.SetAddress("GCR22L3WS7TP72S4Z27YTO6JIQYDJK2KLS2TQNHK6Y7XYPA3AGT3X4FH") - ata, _ := NewAllowTrustOpAsset(AssetTypeAssetTypeCreditAlphanum4, [4]byte{0x01}) + ata, _ := NewAssetCode(AssetTypeAssetTypeCreditAlphanum4, AssetCode4{0x01}) a := ata.ToAsset(aid) Expect(a.Type).To(Equal(AssetTypeAssetTypeCreditAlphanum4)) Expect(a.MustAlphaNum4().AssetCode[0]).To(Equal(uint8(0x01))) - ata, _ = NewAllowTrustOpAsset(AssetTypeAssetTypeCreditAlphanum12, [12]byte{0x02}) + ata, _ = NewAssetCode(AssetTypeAssetTypeCreditAlphanum12, AssetCode12{0x02}) a = ata.ToAsset(aid) Expect(a.Type).To(Equal(AssetTypeAssetTypeCreditAlphanum12)) Expect(a.MustAlphaNum12().AssetCode[0]).To(Equal(uint8(0x02))) diff --git a/xdr/asset_test.go b/xdr/asset_test.go index fb25e99af1..e1d99d9454 100644 --- a/xdr/asset_test.go +++ b/xdr/asset_test.go @@ -1,10 +1,14 @@ package xdr_test import ( + "testing" + . "github.com/stellar/go/xdr" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var _ = Describe("xdr.Asset#Extract()", func() { @@ -35,7 +39,7 @@ var _ = Describe("xdr.Asset#Extract()", func() { Context("asset is credit_alphanum4", func() { BeforeEach(func() { var err error - an := AssetAlphaNum4{} + an := AlphaNum4{} err = an.Issuer.SetAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") Expect(err).To(BeNil()) copy(an.AssetCode[:], []byte("USD")) @@ -86,7 +90,7 @@ var _ = Describe("xdr.Asset#String()", func() { Context("asset is credit_alphanum4", func() { BeforeEach(func() { var err error - an := AssetAlphaNum4{} + an := AlphaNum4{} err = an.Issuer.SetAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") Expect(err).To(BeNil()) copy(an.AssetCode[:], []byte("USD")) @@ -101,6 +105,14 @@ var _ = Describe("xdr.Asset#String()", func() { }) }) +func TestStringCanonical(t *testing.T) { + asset := MustNewNativeAsset() + require.Equal(t, "native", asset.StringCanonical()) + + asset = MustNewCreditAsset("USD", "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + require.Equal(t, "USD:GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", asset.StringCanonical()) +} + var _ = Describe("xdr.Asset#Equals()", func() { var ( issuer1 AccountId @@ -128,25 +140,25 @@ var _ = Describe("xdr.Asset#Equals()", func() { native, err = NewAsset(AssetTypeAssetTypeNative, nil) Expect(err).To(BeNil()) - usd4_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AssetAlphaNum4{ + usd4_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AlphaNum4{ Issuer: issuer1, AssetCode: usd4, }) Expect(err).To(BeNil()) - usd4_issuer2, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AssetAlphaNum4{ + usd4_issuer2, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AlphaNum4{ Issuer: issuer2, AssetCode: usd4, }) Expect(err).To(BeNil()) - usd12_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum12, AssetAlphaNum12{ + usd12_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum12, AlphaNum12{ Issuer: issuer1, AssetCode: usd12, }) Expect(err).To(BeNil()) - eur4_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AssetAlphaNum4{ + eur4_issuer1, err = NewAsset(AssetTypeAssetTypeCreditAlphanum4, AlphaNum4{ Issuer: issuer1, AssetCode: eur4, }) @@ -175,3 +187,320 @@ var _ = Describe("xdr.Asset#Equals()", func() { }) }) + +func TestAssetSetCredit(t *testing.T) { + issuer := AccountId{} + issuer.SetAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + + a := &Asset{} + a.SetCredit("USD", issuer) + assert.Nil(t, a.AlphaNum12) + assert.NotNil(t, a.AlphaNum4) + assert.Equal(t, AssetTypeAssetTypeCreditAlphanum4, a.Type) + assert.Equal(t, issuer, a.AlphaNum4.Issuer) + assert.Equal(t, AssetCode4{'U', 'S', 'D', 0}, a.AlphaNum4.AssetCode) + + a = &Asset{} + a.SetCredit("USDUSD", issuer) + assert.Nil(t, a.AlphaNum4) + assert.NotNil(t, a.AlphaNum12) + assert.Equal(t, AssetTypeAssetTypeCreditAlphanum12, a.Type) + assert.Equal(t, issuer, a.AlphaNum12.Issuer) + assert.Equal(t, AssetCode12{'U', 'S', 'D', 'U', 'S', 'D', 0, 0, 0, 0, 0, 0}, a.AlphaNum12.AssetCode) +} + +func TestToAllowTrustOpAsset_AlphaNum4(t *testing.T) { + a := &Asset{} + at, err := a.ToAssetCode("ABCD") + if assert.NoError(t, err) { + code, ok := at.GetAssetCode4() + assert.True(t, ok) + var expected AssetCode4 + copy(expected[:], "ABCD") + assert.Equal(t, expected, code) + } +} + +func TestToAllowTrustOpAsset_AlphaNum12(t *testing.T) { + a := &Asset{} + at, err := a.ToAssetCode("ABCDEFGHIJKL") + if assert.NoError(t, err) { + code, ok := at.GetAssetCode12() + assert.True(t, ok) + var expected AssetCode12 + copy(expected[:], "ABCDEFGHIJKL") + assert.Equal(t, expected, code) + } +} + +func TestToAllowTrustOpAsset_Error(t *testing.T) { + a := &Asset{} + _, err := a.ToAssetCode("") + assert.EqualError(t, err, "Asset code length is invalid") +} + +func TestBuildAssets(t *testing.T) { + for _, testCase := range []struct { + name string + value string + expectedAssets []Asset + expectedError string + }{ + { + "empty list", + "", + []Asset{}, + "", + }, + { + "native", + "native", + []Asset{MustNewNativeAsset()}, + "", + }, + { + "asset does not contain :", + "invalid-asset", + []Asset{}, + "invalid-asset is not a valid asset", + }, + { + "asset contains more than one :", + "usd:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V:", + []Asset{}, + "is not a valid asset", + }, + { + "unicode asset code", + "ΓΌsd:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "contains an invalid asset code", + }, + { + "asset code must be alpha numeric", + "!usd:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "contains an invalid asset code", + }, + { + "asset code contains backslash", + "usd\\x23:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "contains an invalid asset code", + }, + { + "contains null characters", + "abcde\\x00:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "contains an invalid asset code", + }, + { + "asset code is too short", + ":GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "is not a valid asset", + }, + { + "asset code is too long", + "0123456789abc:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{}, + "is not a valid asset", + }, + { + "issuer is empty", + "usd:", + []Asset{}, + "contains an invalid issuer", + }, + { + "issuer is invalid", + "usd:kkj9808;l", + []Asset{}, + "contains an invalid issuer", + }, + { + "validation succeeds", + "usd:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V,usdabc:GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V", + []Asset{ + MustNewCreditAsset("usd", "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V"), + MustNewCreditAsset("usdabc", "GAEDTJ4PPEFVW5XV2S7LUXBEHNQMX5Q2GM562RJGOQG7GVCE5H3HIB4V"), + }, + "", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + tt := assert.New(t) + assets, err := BuildAssets(testCase.value) + if testCase.expectedError == "" { + tt.NoError(err) + tt.Len(assets, len(testCase.expectedAssets)) + for i := range assets { + tt.Equal(testCase.expectedAssets[i], assets[i]) + } + } else { + tt.Error(err) + tt.Contains(err.Error(), testCase.expectedError) + } + }) + } +} + +func TestBuildAsset(t *testing.T) { + testCases := []struct { + assetType string + code string + issuer string + valid bool + }{ + { + assetType: "native", + valid: true, + }, + { + assetType: "credit_alphanum4", + code: "USD", + issuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + valid: true, + }, + { + assetType: "credit_alphanum12", + code: "SPOOON", + issuer: "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", + valid: true, + }, + { + assetType: "invalid", + }, + } + for _, tc := range testCases { + t.Run(tc.assetType, func(t *testing.T) { + asset, err := BuildAsset(tc.assetType, tc.issuer, tc.code) + + if tc.valid { + assert.NoError(t, err) + var assetType, code, issuer string + asset.Extract(&assetType, &code, &issuer) + assert.Equal(t, tc.assetType, assetType) + assert.Equal(t, tc.code, code) + assert.Equal(t, tc.issuer, issuer) + } else { + assert.Error(t, err) + } + }) + } +} + +func TestAssetLessThan(t *testing.T) { + xlm := MustNewNativeAsset() + + t.Run("returns false if assets are equal", func(t *testing.T) { + assetA, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + + assetB, err := NewCreditAsset( + "USD", + "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + ) + require.NoError(t, err) + + assert.False(t, xlm.LessThan(xlm)) + assert.False(t, assetA.LessThan(assetA)) + assert.False(t, assetB.LessThan(assetB)) + }) + + t.Run("test if asset types are being validated as native < anum4 < anum12", func(t *testing.T) { + anum4, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + anum12, err := NewCreditAsset( + "ARSTANUM12", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + + assert.False(t, xlm.LessThan(xlm)) + assert.True(t, xlm.LessThan(anum4)) + assert.True(t, xlm.LessThan(anum12)) + + assert.False(t, anum4.LessThan(xlm)) + assert.False(t, anum4.LessThan(anum4)) + assert.True(t, anum4.LessThan(anum12)) + + assert.False(t, anum12.LessThan(xlm)) + assert.False(t, anum12.LessThan(anum4)) + assert.False(t, anum12.LessThan(anum12)) + }) + + t.Run("test if asset codes are being validated as assetCodeA < assetCodeB", func(t *testing.T) { + assetARST, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + assetUSDX, err := NewCreditAsset( + "USDX", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + + assert.False(t, assetARST.LessThan(assetARST)) + assert.True(t, assetARST.LessThan(assetUSDX)) + + assert.False(t, assetUSDX.LessThan(assetARST)) + assert.False(t, assetUSDX.LessThan(assetUSDX)) + }) + + t.Run("test if asset issuers are being validated as assetIssuerA < assetIssuerB", func(t *testing.T) { + assetIssuerA, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(t, err) + assetIssuerB, err := NewCreditAsset( + "ARST", + "GCEZWKCA5VLDNRLN3RPRJMRZOX3Z6G5CHCGSNFHEYVXM3XOJMDS674JZ", + ) + require.NoError(t, err) + + assert.True(t, assetIssuerA.LessThan(assetIssuerB)) + assert.False(t, assetIssuerA.LessThan(assetIssuerA)) + + assert.False(t, assetIssuerB.LessThan(assetIssuerA)) + assert.False(t, assetIssuerB.LessThan(assetIssuerB)) + }) +} + +func BenchmarkAssetString(b *testing.B) { + n := MustNewNativeAsset() + a, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(b, err) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = n.String() + _ = a.String() + } +} + +func BenchmarkAssetStringCanonical(b *testing.B) { + n := MustNewNativeAsset() + a, err := NewCreditAsset( + "ARST", + "GB7TAYRUZGE6TVT7NHP5SMIZRNQA6PLM423EYISAOAP3MKYIQMVYP2JO", + ) + require.NoError(b, err) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = n.StringCanonical() + _ = a.StringCanonical() + } +} diff --git a/xdr/change_trust_asset.go b/xdr/change_trust_asset.go new file mode 100644 index 0000000000..451632c415 --- /dev/null +++ b/xdr/change_trust_asset.go @@ -0,0 +1,29 @@ +package xdr + +import ( + "fmt" +) + +// ToAsset converts ChangeTrustAsset to Asset. Panics on type other than +// AssetTypeAssetTypeNative, AssetTypeAssetTypeCreditAlphanum4 or +// AssetTypeAssetTypeCreditAlphanum12. +func (tla ChangeTrustAsset) ToAsset() Asset { + var a Asset + + a.Type = tla.Type + + switch a.Type { + case AssetTypeAssetTypeNative: + // Empty branch + case AssetTypeAssetTypeCreditAlphanum4: + assetCode4 := *tla.AlphaNum4 + a.AlphaNum4 = &assetCode4 + case AssetTypeAssetTypeCreditAlphanum12: + assetCode12 := *tla.AlphaNum12 + a.AlphaNum12 = &assetCode12 + default: + panic(fmt.Errorf("Cannot transform type %v to Asset", a.Type)) + } + + return a +} diff --git a/xdr/claim_atom.go b/xdr/claim_atom.go new file mode 100644 index 0000000000..7adacfb922 --- /dev/null +++ b/xdr/claim_atom.go @@ -0,0 +1,88 @@ +package xdr + +import ( + "fmt" + + "github.com/stellar/go/support/errors" +) + +func (a ClaimAtom) OfferId() Int64 { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return a.V0.OfferId + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.OfferId + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + panic(errors.New("liquidity pools don't have offers")) + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} + +func (a ClaimAtom) SellerId() AccountId { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return AccountId{ + Type: PublicKeyTypePublicKeyTypeEd25519, + Ed25519: &a.V0.SellerEd25519, + } + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.SellerId + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + panic(errors.New("liquidity pools don't have a seller")) + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} + +func (a ClaimAtom) AssetBought() Asset { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return a.V0.AssetBought + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.AssetBought + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + return a.LiquidityPool.AssetBought + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} + +func (a ClaimAtom) AmountBought() Int64 { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return a.V0.AmountBought + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.AmountBought + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + return a.LiquidityPool.AmountBought + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} + +func (a ClaimAtom) AssetSold() Asset { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return a.V0.AssetSold + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.AssetSold + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + return a.LiquidityPool.AssetSold + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} + +func (a ClaimAtom) AmountSold() Int64 { + switch a.Type { + case ClaimAtomTypeClaimAtomTypeV0: + return a.V0.AmountSold + case ClaimAtomTypeClaimAtomTypeOrderBook: + return a.OrderBook.AmountSold + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + return a.LiquidityPool.AmountSold + default: + panic(fmt.Errorf("Unknown ClaimAtom type: %v", a.Type)) + } +} diff --git a/xdr/claimable_balance_entry.go b/xdr/claimable_balance_entry.go new file mode 100644 index 0000000000..9ad66f456c --- /dev/null +++ b/xdr/claimable_balance_entry.go @@ -0,0 +1,9 @@ +package xdr + +func (entry *ClaimableBalanceEntry) Flags() ClaimableBalanceFlags { + switch entry.Ext.V { + case 1: + return ClaimableBalanceFlags(entry.Ext.V1.Flags) + } + return 0 +} diff --git a/xdr/claimable_balance_entry_test.go b/xdr/claimable_balance_entry_test.go new file mode 100644 index 0000000000..6524bba597 --- /dev/null +++ b/xdr/claimable_balance_entry_test.go @@ -0,0 +1,30 @@ +package xdr_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestClaimableBalanceEntry_Flags(t *testing.T) { + entry := xdr.ClaimableBalanceEntry{ + Ext: xdr.ClaimableBalanceEntryExt{ + V: 0, + }, + } + + assert.Equal(t, xdr.ClaimableBalanceFlags(0), entry.Flags()) + + entry = xdr.ClaimableBalanceEntry{ + Ext: xdr.ClaimableBalanceEntryExt{ + V: 1, + V1: &xdr.ClaimableBalanceEntryExtensionV1{ + Flags: xdr.Uint32(xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag), + }, + }, + } + + assert.Equal(t, xdr.ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag, entry.Flags()) +} diff --git a/xdr/claimable_balance_flags.go b/xdr/claimable_balance_flags.go new file mode 100644 index 0000000000..012ba0b63a --- /dev/null +++ b/xdr/claimable_balance_flags.go @@ -0,0 +1,7 @@ +package xdr + +// IsClawbackEnabled returns true if the claimable balance has the "CLAWBACK_ENABLED" option +// turned on. +func (cbFlags ClaimableBalanceFlags) IsClawbackEnabled() bool { + return (cbFlags & ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag) != 0 +} diff --git a/xdr/claimable_balance_flags_test.go b/xdr/claimable_balance_flags_test.go new file mode 100644 index 0000000000..abecd05c5b --- /dev/null +++ b/xdr/claimable_balance_flags_test.go @@ -0,0 +1,26 @@ +package xdr_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestIsClawbackEnabled(t *testing.T) { + tt := assert.New(t) + + flag := xdr.ClaimableBalanceFlags(1) + tt.True(flag.IsClawbackEnabled()) + + flag = xdr.ClaimableBalanceFlags(0) + tt.False(flag.IsClawbackEnabled()) + + flag = xdr.ClaimableBalanceFlags(2) + tt.False(flag.IsClawbackEnabled()) + + flag = xdr.ClaimableBalanceFlags(4) + tt.False(flag.IsClawbackEnabled()) + +} diff --git a/xdr/claimable_balance_id.go b/xdr/claimable_balance_id.go new file mode 100644 index 0000000000..43b07d084c --- /dev/null +++ b/xdr/claimable_balance_id.go @@ -0,0 +1,14 @@ +package xdr + +func (e *EncodingBuffer) claimableBalanceCompressEncodeTo(cb ClaimableBalanceId) error { + if err := e.xdrEncoderBuf.WriteByte(byte(cb.Type)); err != nil { + return err + } + switch cb.Type { + case ClaimableBalanceIdTypeClaimableBalanceIdTypeV0: + _, err := e.xdrEncoderBuf.Write(cb.V0[:]) + return err + default: + panic("Unknown type") + } +} diff --git a/xdr/claimant.go b/xdr/claimant.go new file mode 100644 index 0000000000..e84a37a3a0 --- /dev/null +++ b/xdr/claimant.go @@ -0,0 +1,27 @@ +package xdr + +import ( + "sort" +) + +// SortClaimantsByDestination returns a new []Claimant array sorted by destination. +func SortClaimantsByDestination(claimants []Claimant) []Claimant { + keys := make([]string, 0, len(claimants)) + keysMap := make(map[string]Claimant) + newClaimants := make([]Claimant, 0, len(claimants)) + + for _, claimant := range claimants { + v0 := claimant.MustV0() + key := v0.Destination.Address() + keys = append(keys, key) + keysMap[key] = claimant + } + + sort.Strings(keys) + + for _, key := range keys { + newClaimants = append(newClaimants, keysMap[key]) + } + + return newClaimants +} diff --git a/xdr/claimant_test.go b/xdr/claimant_test.go new file mode 100644 index 0000000000..36f2118aac --- /dev/null +++ b/xdr/claimant_test.go @@ -0,0 +1,43 @@ +package xdr_test + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestSortClaimantsByDestination(t *testing.T) { + tt := assert.New(t) + + a := xdr.MustAddress("GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H") + b := xdr.MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML") + + claimants := []xdr.Claimant{ + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: b, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + { + Type: xdr.ClaimantTypeClaimantTypeV0, + V0: &xdr.ClaimantV0{ + Destination: a, + Predicate: xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, + }, + }, + } + + expected := []xdr.AccountId{a, b} + + sorted := xdr.SortClaimantsByDestination(claimants) + for i, c := range sorted { + tt.Equal(expected[i], c.MustV0().Destination) + } +} diff --git a/xdr/db.go b/xdr/db.go index dd0ae4ccb5..04fae28752 100644 --- a/xdr/db.go +++ b/xdr/db.go @@ -1,6 +1,8 @@ package xdr import ( + "database/sql/driver" + "encoding/hex" "errors" "fmt" ) @@ -29,6 +31,26 @@ func (t *AssetType) Scan(src interface{}) error { return nil } +// Scan reads from src into an Asset +func (t *Asset) Scan(src interface{}) error { + return safeBase64Scan(src, t) +} + +// Value implements the database/sql/driver Valuer interface. +func (t Asset) Value() (driver.Value, error) { + return MarshalBase64(t) +} + +// Scan reads from src into a ClaimPredicate +func (c *ClaimPredicate) Scan(src interface{}) error { + return safeBase64Scan(src, c) +} + +// Value implements the database/sql/driver Valuer interface. +func (c ClaimPredicate) Value() (driver.Value, error) { + return MarshalBase64(c) +} + // Scan reads from src into an Int64 func (t *Int64) Scan(src interface{}) error { val, ok := src.(int64) @@ -40,6 +62,26 @@ func (t *Int64) Scan(src interface{}) error { return nil } +// Scan reads from a src into an xdr.Hash +func (t *Hash) Scan(src interface{}) error { + decodedBytes, err := hex.DecodeString(string(src.([]uint8))) + if err != nil { + return err + } + + var decodedHash Hash + copy(decodedHash[:], decodedBytes) + + *t = decodedHash + + return nil +} + +// Scan reads from src into an LedgerUpgrade struct +func (t *LedgerUpgrade) Scan(src interface{}) error { + return safeBase64Scan(src, t) +} + // Scan reads from src into an LedgerEntryChanges struct func (t *LedgerEntryChanges) Scan(src interface{}) error { return safeBase64Scan(src, t) diff --git a/xdr/db_test.go b/xdr/db_test.go index b22537748b..5ab4e93c53 100644 --- a/xdr/db_test.go +++ b/xdr/db_test.go @@ -3,6 +3,7 @@ package xdr_test import ( "database/sql" + "github.com/stellar/go/xdr" . "github.com/stellar/go/xdr" . "github.com/onsi/ginkgo" @@ -89,11 +90,31 @@ var _ = Describe("sql.Scanner implementations", func() { Entry("number", 0, Thresholds{}, false), ) + DescribeTable("ClaimPredicate", + func(in interface{}, val ClaimPredicate, shouldSucceed bool) { + var scanned ClaimPredicate + err := scanned.Scan(in) + + if shouldSucceed { + Expect(err).To(BeNil()) + } else { + Expect(err).ToNot(BeNil()) + } + + Expect(scanned).To(Equal(val)) + }, + Entry("default", "AAAAAA==", xdr.ClaimPredicate{ + Type: xdr.ClaimPredicateTypeClaimPredicateUnconditional, + }, true), + ) + DescribeTable("Scanning base64 strings (happy paths only)", func(dest interface{}, in string) { err := dest.(sql.Scanner).Scan(in) Expect(err).To(BeNil()) }, + Entry("ClaimablePredicate", &ClaimPredicate{}, + "AAAAAA=="), Entry("LedgerEntryChanges", &LedgerEntryChanges{}, "AAAAAgAAAAMAAAABAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9w3gtrOnY/+cAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAA=="), Entry("LedgerHeader", &LedgerHeader{}, diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0 b/xdr/fuzz/jsonclaimpredicate/corpus/0 new file mode 100644 index 0000000000..bb88e36ce9 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/059ce73797da3779f67d67b27fbbe1586fb5af4c-11 b/xdr/fuzz/jsonclaimpredicate/corpus/059ce73797da3779f67d67b27fbbe1586fb5af4c-11 new file mode 100644 index 0000000000..f1dd7c4056 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/059ce73797da3779f67d67b27fbbe1586fb5af4c-11 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/06b5aa65dd57296c5d7d71c34e4458704c9d8504-24 b/xdr/fuzz/jsonclaimpredicate/corpus/06b5aa65dd57296c5d7d71c34e4458704c9d8504-24 new file mode 100644 index 0000000000..56db1d84fa Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/06b5aa65dd57296c5d7d71c34e4458704c9d8504-24 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/070fdb451a68170438d23544e162edbe0c53831c-24 b/xdr/fuzz/jsonclaimpredicate/corpus/070fdb451a68170438d23544e162edbe0c53831c-24 new file mode 100644 index 0000000000..3d58ee8f47 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/070fdb451a68170438d23544e162edbe0c53831c-24 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0738efae9b6af6dd2f5b47ee3670e79e120bf8ff-5 b/xdr/fuzz/jsonclaimpredicate/corpus/0738efae9b6af6dd2f5b47ee3670e79e120bf8ff-5 new file mode 100644 index 0000000000..548b46fe5f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0738efae9b6af6dd2f5b47ee3670e79e120bf8ff-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0c11486742bca82a11751928390f3c143964bae5-4 b/xdr/fuzz/jsonclaimpredicate/corpus/0c11486742bca82a11751928390f3c143964bae5-4 new file mode 100644 index 0000000000..13e719eaa1 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0c11486742bca82a11751928390f3c143964bae5-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0c52a7776c7b6699bbb73b57fcd48c92d5861814-1 b/xdr/fuzz/jsonclaimpredicate/corpus/0c52a7776c7b6699bbb73b57fcd48c92d5861814-1 new file mode 100644 index 0000000000..8cb51d937a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0c52a7776c7b6699bbb73b57fcd48c92d5861814-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0e7786143ce4b5034c1df4f0bf398198b7880a06-1 b/xdr/fuzz/jsonclaimpredicate/corpus/0e7786143ce4b5034c1df4f0bf398198b7880a06-1 new file mode 100644 index 0000000000..4b0e07e156 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0e7786143ce4b5034c1df4f0bf398198b7880a06-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/0e92db53270df55314b1629bb2362d00be9ae485-27 b/xdr/fuzz/jsonclaimpredicate/corpus/0e92db53270df55314b1629bb2362d00be9ae485-27 new file mode 100644 index 0000000000..9e27f26690 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/0e92db53270df55314b1629bb2362d00be9ae485-27 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/17b83232cd0cb2405c3e44f22c1a449dd321bab1-14 b/xdr/fuzz/jsonclaimpredicate/corpus/17b83232cd0cb2405c3e44f22c1a449dd321bab1-14 new file mode 100644 index 0000000000..64a6c7196f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/17b83232cd0cb2405c3e44f22c1a449dd321bab1-14 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/18d6cfc1fee128a9ed89b45c644fefb1a67c557f-4 b/xdr/fuzz/jsonclaimpredicate/corpus/18d6cfc1fee128a9ed89b45c644fefb1a67c557f-4 new file mode 100644 index 0000000000..dace4a0479 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/18d6cfc1fee128a9ed89b45c644fefb1a67c557f-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/1dd0e81c3f135bbfcdf5e306727f1fdaa954c4b5-20 b/xdr/fuzz/jsonclaimpredicate/corpus/1dd0e81c3f135bbfcdf5e306727f1fdaa954c4b5-20 new file mode 100644 index 0000000000..ec23ccad0f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/1dd0e81c3f135bbfcdf5e306727f1fdaa954c4b5-20 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/1ffd4bb29dbaf9a8b23a80410b7dcf6d160f20f5-5 b/xdr/fuzz/jsonclaimpredicate/corpus/1ffd4bb29dbaf9a8b23a80410b7dcf6d160f20f5-5 new file mode 100644 index 0000000000..3ccc167d62 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/1ffd4bb29dbaf9a8b23a80410b7dcf6d160f20f5-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/22059050a2075418d91ab3d94cb5b5202dcb5449-10 b/xdr/fuzz/jsonclaimpredicate/corpus/22059050a2075418d91ab3d94cb5b5202dcb5449-10 new file mode 100644 index 0000000000..ffd301d738 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/22059050a2075418d91ab3d94cb5b5202dcb5449-10 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/22afe6d1107dc8421df2d27cf51c1e644c02c61c-18 b/xdr/fuzz/jsonclaimpredicate/corpus/22afe6d1107dc8421df2d27cf51c1e644c02c61c-18 new file mode 100644 index 0000000000..b31ab954fe Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/22afe6d1107dc8421df2d27cf51c1e644c02c61c-18 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/23d671738a3514e6addb20280a0bd45d04991f35-9 b/xdr/fuzz/jsonclaimpredicate/corpus/23d671738a3514e6addb20280a0bd45d04991f35-9 new file mode 100644 index 0000000000..cd5dd099de Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/23d671738a3514e6addb20280a0bd45d04991f35-9 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/24196ca658a9c921b531ee7000b268a21a630b9e-9 b/xdr/fuzz/jsonclaimpredicate/corpus/24196ca658a9c921b531ee7000b268a21a630b9e-9 new file mode 100644 index 0000000000..25afbbb7f6 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/24196ca658a9c921b531ee7000b268a21a630b9e-9 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/2ad5aa06b6a21b80102b70f792a6f5182b1e795e-26 b/xdr/fuzz/jsonclaimpredicate/corpus/2ad5aa06b6a21b80102b70f792a6f5182b1e795e-26 new file mode 100644 index 0000000000..9c8bf09a06 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/2ad5aa06b6a21b80102b70f792a6f5182b1e795e-26 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/2c4456e3a9606ff70b52b741d60629fc0aff6670-24 b/xdr/fuzz/jsonclaimpredicate/corpus/2c4456e3a9606ff70b52b741d60629fc0aff6670-24 new file mode 100644 index 0000000000..5552f1e478 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/2c4456e3a9606ff70b52b741d60629fc0aff6670-24 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/33081e04522f2adea4af265daeb32b9be88972ba-16 b/xdr/fuzz/jsonclaimpredicate/corpus/33081e04522f2adea4af265daeb32b9be88972ba-16 new file mode 100644 index 0000000000..5a92d31cdb Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/33081e04522f2adea4af265daeb32b9be88972ba-16 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/33ede038ddd6a6b2f2db89f2ead0338defbd68bb-26 b/xdr/fuzz/jsonclaimpredicate/corpus/33ede038ddd6a6b2f2db89f2ead0338defbd68bb-26 new file mode 100644 index 0000000000..8ed864fa3c Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/33ede038ddd6a6b2f2db89f2ead0338defbd68bb-26 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/34dabb7d96c82c2808d791c0267e28e79bfac2bf-19 b/xdr/fuzz/jsonclaimpredicate/corpus/34dabb7d96c82c2808d791c0267e28e79bfac2bf-19 new file mode 100644 index 0000000000..d84aea87d7 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/34dabb7d96c82c2808d791c0267e28e79bfac2bf-19 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/3506b22eae67d429090fc709a72819b833c6d96f-25 b/xdr/fuzz/jsonclaimpredicate/corpus/3506b22eae67d429090fc709a72819b833c6d96f-25 new file mode 100644 index 0000000000..36e7032513 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/3506b22eae67d429090fc709a72819b833c6d96f-25 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/35e0ed07d721971cb22d5957dee80f25ead2cb42-22 b/xdr/fuzz/jsonclaimpredicate/corpus/35e0ed07d721971cb22d5957dee80f25ead2cb42-22 new file mode 100644 index 0000000000..af2154938b Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/35e0ed07d721971cb22d5957dee80f25ead2cb42-22 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/377545c22c2ee8e37810716409b46536e840f56b-20 b/xdr/fuzz/jsonclaimpredicate/corpus/377545c22c2ee8e37810716409b46536e840f56b-20 new file mode 100644 index 0000000000..f5d13dbf80 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/377545c22c2ee8e37810716409b46536e840f56b-20 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/3e9f514f89306765a02fba331f057ca66961dd71-17 b/xdr/fuzz/jsonclaimpredicate/corpus/3e9f514f89306765a02fba331f057ca66961dd71-17 new file mode 100644 index 0000000000..e171fdd7b8 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/3e9f514f89306765a02fba331f057ca66961dd71-17 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/3f2b6cbcae547826b84ba42a9e6ea0b1a3339aa4-2 b/xdr/fuzz/jsonclaimpredicate/corpus/3f2b6cbcae547826b84ba42a9e6ea0b1a3339aa4-2 new file mode 100644 index 0000000000..c1f7f95208 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/3f2b6cbcae547826b84ba42a9e6ea0b1a3339aa4-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/3f6582b3dcb15d99dab3789e997aa26f721ecbfe-16 b/xdr/fuzz/jsonclaimpredicate/corpus/3f6582b3dcb15d99dab3789e997aa26f721ecbfe-16 new file mode 100644 index 0000000000..4e497abaac Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/3f6582b3dcb15d99dab3789e997aa26f721ecbfe-16 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/4079505f0bc61228e001bc6471699238dde274f4-4 b/xdr/fuzz/jsonclaimpredicate/corpus/4079505f0bc61228e001bc6471699238dde274f4-4 new file mode 100644 index 0000000000..70cfadb379 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/4079505f0bc61228e001bc6471699238dde274f4-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/48afcfe2230cea5d2d3ee438fa0b7a74e372d158-12 b/xdr/fuzz/jsonclaimpredicate/corpus/48afcfe2230cea5d2d3ee438fa0b7a74e372d158-12 new file mode 100644 index 0000000000..9563359924 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/48afcfe2230cea5d2d3ee438fa0b7a74e372d158-12 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/4f3bde9486d6faa2790e56aedf58b4ac44aec8b5-20 b/xdr/fuzz/jsonclaimpredicate/corpus/4f3bde9486d6faa2790e56aedf58b4ac44aec8b5-20 new file mode 100644 index 0000000000..4638fd63e2 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/4f3bde9486d6faa2790e56aedf58b4ac44aec8b5-20 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/4fe56699612c2cf2ff3f386e7c555f560deadf9a-15 b/xdr/fuzz/jsonclaimpredicate/corpus/4fe56699612c2cf2ff3f386e7c555f560deadf9a-15 new file mode 100644 index 0000000000..f8d99dbf77 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/4fe56699612c2cf2ff3f386e7c555f560deadf9a-15 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/60b8e1cbf32b5ca3b1ec60572dab334951058d0b-6 b/xdr/fuzz/jsonclaimpredicate/corpus/60b8e1cbf32b5ca3b1ec60572dab334951058d0b-6 new file mode 100644 index 0000000000..7731723987 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/60b8e1cbf32b5ca3b1ec60572dab334951058d0b-6 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/61cb9be335a9ed20ab522fc29a6e6dfbcb33a601-1 b/xdr/fuzz/jsonclaimpredicate/corpus/61cb9be335a9ed20ab522fc29a6e6dfbcb33a601-1 new file mode 100644 index 0000000000..f60222a147 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/61cb9be335a9ed20ab522fc29a6e6dfbcb33a601-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/63ab224880353a2b49f80077e9da722c18329ac1-10 b/xdr/fuzz/jsonclaimpredicate/corpus/63ab224880353a2b49f80077e9da722c18329ac1-10 new file mode 100644 index 0000000000..7fe7873507 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/63ab224880353a2b49f80077e9da722c18329ac1-10 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/658c4a6cc7e274ed79d0ce7ed12a2c7b8c57e601-5 b/xdr/fuzz/jsonclaimpredicate/corpus/658c4a6cc7e274ed79d0ce7ed12a2c7b8c57e601-5 new file mode 100644 index 0000000000..495cc80395 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/658c4a6cc7e274ed79d0ce7ed12a2c7b8c57e601-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/6adde074d4fe03d42ba8caab1516c4ceb97da570-11 b/xdr/fuzz/jsonclaimpredicate/corpus/6adde074d4fe03d42ba8caab1516c4ceb97da570-11 new file mode 100644 index 0000000000..fab0e8e9f9 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/6adde074d4fe03d42ba8caab1516c4ceb97da570-11 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/6b12dad633f25444a712e5fd0bb47ec718950a18-20 b/xdr/fuzz/jsonclaimpredicate/corpus/6b12dad633f25444a712e5fd0bb47ec718950a18-20 new file mode 100644 index 0000000000..1841f69e97 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/6b12dad633f25444a712e5fd0bb47ec718950a18-20 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/6cd24a62b01b2c0e4dcc1304817ca6700054b339-21 b/xdr/fuzz/jsonclaimpredicate/corpus/6cd24a62b01b2c0e4dcc1304817ca6700054b339-21 new file mode 100644 index 0000000000..f2858453a0 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/6cd24a62b01b2c0e4dcc1304817ca6700054b339-21 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/76ac82e6a9d81c206c98f8305978f3ceb003ef29-2 b/xdr/fuzz/jsonclaimpredicate/corpus/76ac82e6a9d81c206c98f8305978f3ceb003ef29-2 new file mode 100644 index 0000000000..cac2bf112a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/76ac82e6a9d81c206c98f8305978f3ceb003ef29-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/7704823d2993cac4543740727aadf0da46f0b187-19 b/xdr/fuzz/jsonclaimpredicate/corpus/7704823d2993cac4543740727aadf0da46f0b187-19 new file mode 100644 index 0000000000..cb233f5d8b Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/7704823d2993cac4543740727aadf0da46f0b187-19 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/78eb7d11840d3521ebdf34aa850c275d1f48bae8-12 b/xdr/fuzz/jsonclaimpredicate/corpus/78eb7d11840d3521ebdf34aa850c275d1f48bae8-12 new file mode 100644 index 0000000000..814635d507 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/78eb7d11840d3521ebdf34aa850c275d1f48bae8-12 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/7ff2c71aacf7b64291948dfb49369e5d1fee5e1c-2 b/xdr/fuzz/jsonclaimpredicate/corpus/7ff2c71aacf7b64291948dfb49369e5d1fee5e1c-2 new file mode 100644 index 0000000000..8ea482c307 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/7ff2c71aacf7b64291948dfb49369e5d1fee5e1c-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/802c4d37a41acc0a65a47dd9260578db8a91c7b0-2 b/xdr/fuzz/jsonclaimpredicate/corpus/802c4d37a41acc0a65a47dd9260578db8a91c7b0-2 new file mode 100644 index 0000000000..399bb8e207 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/802c4d37a41acc0a65a47dd9260578db8a91c7b0-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/80bce1575cf091d64032193bf508ef39b9a2c929-13 b/xdr/fuzz/jsonclaimpredicate/corpus/80bce1575cf091d64032193bf508ef39b9a2c929-13 new file mode 100644 index 0000000000..14c08f8909 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/80bce1575cf091d64032193bf508ef39b9a2c929-13 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8167706299d763d2680aafb67be0c1de500bd577-1 b/xdr/fuzz/jsonclaimpredicate/corpus/8167706299d763d2680aafb67be0c1de500bd577-1 new file mode 100644 index 0000000000..5b74f7383d Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8167706299d763d2680aafb67be0c1de500bd577-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/836ccf6afc61a555e6c77db43e9972e14e018db3-17 b/xdr/fuzz/jsonclaimpredicate/corpus/836ccf6afc61a555e6c77db43e9972e14e018db3-17 new file mode 100644 index 0000000000..b5c038f843 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/836ccf6afc61a555e6c77db43e9972e14e018db3-17 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8415b311a622e944074e28ca77d538f483201a43-3 b/xdr/fuzz/jsonclaimpredicate/corpus/8415b311a622e944074e28ca77d538f483201a43-3 new file mode 100644 index 0000000000..7e0055f2ad Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8415b311a622e944074e28ca77d538f483201a43-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8700a406939d1a113038c778808c81c32dc3bc28-1 b/xdr/fuzz/jsonclaimpredicate/corpus/8700a406939d1a113038c778808c81c32dc3bc28-1 new file mode 100644 index 0000000000..9c2d4229cb Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8700a406939d1a113038c778808c81c32dc3bc28-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8997d68a77b10c3c9ec4196b33e64224b3241266-2 b/xdr/fuzz/jsonclaimpredicate/corpus/8997d68a77b10c3c9ec4196b33e64224b3241266-2 new file mode 100644 index 0000000000..22c47dd7f1 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8997d68a77b10c3c9ec4196b33e64224b3241266-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8b3e6618aa03e9c7959cfd5a68095cb4d6a203df-20 b/xdr/fuzz/jsonclaimpredicate/corpus/8b3e6618aa03e9c7959cfd5a68095cb4d6a203df-20 new file mode 100644 index 0000000000..45c2452c48 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8b3e6618aa03e9c7959cfd5a68095cb4d6a203df-20 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/8b9538724414e6d819239a7c01199398fab43ffc-3 b/xdr/fuzz/jsonclaimpredicate/corpus/8b9538724414e6d819239a7c01199398fab43ffc-3 new file mode 100644 index 0000000000..2db517f4aa Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/8b9538724414e6d819239a7c01199398fab43ffc-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/9069ca78e7450a285173431b3e52c5c25299e473-1 b/xdr/fuzz/jsonclaimpredicate/corpus/9069ca78e7450a285173431b3e52c5c25299e473-1 new file mode 100644 index 0000000000..593f4708db Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/9069ca78e7450a285173431b3e52c5c25299e473-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/958cc6437a39b63f7396b0a71d04007b88c6379b-7 b/xdr/fuzz/jsonclaimpredicate/corpus/958cc6437a39b63f7396b0a71d04007b88c6379b-7 new file mode 100644 index 0000000000..ff8825265c Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/958cc6437a39b63f7396b0a71d04007b88c6379b-7 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/959ed41ac6a90cd2ebc4795df6ddd40ea0d75b0f-2 b/xdr/fuzz/jsonclaimpredicate/corpus/959ed41ac6a90cd2ebc4795df6ddd40ea0d75b0f-2 new file mode 100644 index 0000000000..a168835f35 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/959ed41ac6a90cd2ebc4795df6ddd40ea0d75b0f-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/9a18ed489961b6bd472665166ae1b92fadb4a30d-25 b/xdr/fuzz/jsonclaimpredicate/corpus/9a18ed489961b6bd472665166ae1b92fadb4a30d-25 new file mode 100644 index 0000000000..0efa966828 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/9a18ed489961b6bd472665166ae1b92fadb4a30d-25 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/a01a7ff06434fa1552a81f30078b392e6d2bfad1-4 b/xdr/fuzz/jsonclaimpredicate/corpus/a01a7ff06434fa1552a81f30078b392e6d2bfad1-4 new file mode 100644 index 0000000000..62c6e0803a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/a01a7ff06434fa1552a81f30078b392e6d2bfad1-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/a46d5ed7d68b26e1c5d9c1b88d657dafd2205150-23 b/xdr/fuzz/jsonclaimpredicate/corpus/a46d5ed7d68b26e1c5d9c1b88d657dafd2205150-23 new file mode 100644 index 0000000000..136d7f692c Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/a46d5ed7d68b26e1c5d9c1b88d657dafd2205150-23 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/a7473f2a9f6d6c330457bf2de9d5969a55d0aee3-16 b/xdr/fuzz/jsonclaimpredicate/corpus/a7473f2a9f6d6c330457bf2de9d5969a55d0aee3-16 new file mode 100644 index 0000000000..177c3c7bc4 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/a7473f2a9f6d6c330457bf2de9d5969a55d0aee3-16 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/a751ccd38c35877231ba5493bfc2616870c2988e-24 b/xdr/fuzz/jsonclaimpredicate/corpus/a751ccd38c35877231ba5493bfc2616870c2988e-24 new file mode 100644 index 0000000000..150fcbda0c Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/a751ccd38c35877231ba5493bfc2616870c2988e-24 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/a9d519b73acfc6693baa94caf8d3775d1b5c2e4d-3 b/xdr/fuzz/jsonclaimpredicate/corpus/a9d519b73acfc6693baa94caf8d3775d1b5c2e4d-3 new file mode 100644 index 0000000000..3c2023cc08 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/a9d519b73acfc6693baa94caf8d3775d1b5c2e4d-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/aaa626f320812eb78a7ef9b957db9e00792c2ae6-25 b/xdr/fuzz/jsonclaimpredicate/corpus/aaa626f320812eb78a7ef9b957db9e00792c2ae6-25 new file mode 100644 index 0000000000..168e49556d Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/aaa626f320812eb78a7ef9b957db9e00792c2ae6-25 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ab8767081fee98bd3c608822c63599d9bb4cd7f0-15 b/xdr/fuzz/jsonclaimpredicate/corpus/ab8767081fee98bd3c608822c63599d9bb4cd7f0-15 new file mode 100644 index 0000000000..20300787bf Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ab8767081fee98bd3c608822c63599d9bb4cd7f0-15 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/abdc9928a9e1d7dc324eba5e7bd9be3d3f648a58-14 b/xdr/fuzz/jsonclaimpredicate/corpus/abdc9928a9e1d7dc324eba5e7bd9be3d3f648a58-14 new file mode 100644 index 0000000000..5e6134bc21 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/abdc9928a9e1d7dc324eba5e7bd9be3d3f648a58-14 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ac8f325e3a178437989e9fb6d577d8b3ec57cfd8-9 b/xdr/fuzz/jsonclaimpredicate/corpus/ac8f325e3a178437989e9fb6d577d8b3ec57cfd8-9 new file mode 100644 index 0000000000..525f4dc9d2 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ac8f325e3a178437989e9fb6d577d8b3ec57cfd8-9 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ae48c762a25dc2b03889d5830768edd235ec5b9e-10 b/xdr/fuzz/jsonclaimpredicate/corpus/ae48c762a25dc2b03889d5830768edd235ec5b9e-10 new file mode 100644 index 0000000000..ddd5bf3d20 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ae48c762a25dc2b03889d5830768edd235ec5b9e-10 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b1a3550eaa19cc0fee2e869c5e126f433910d1d3-12 b/xdr/fuzz/jsonclaimpredicate/corpus/b1a3550eaa19cc0fee2e869c5e126f433910d1d3-12 new file mode 100644 index 0000000000..7b11cc03a9 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b1a3550eaa19cc0fee2e869c5e126f433910d1d3-12 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b20ec8a5c170be3a45ebb6d7736ef7e2450bcfc8-19 b/xdr/fuzz/jsonclaimpredicate/corpus/b20ec8a5c170be3a45ebb6d7736ef7e2450bcfc8-19 new file mode 100644 index 0000000000..8e19867218 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b20ec8a5c170be3a45ebb6d7736ef7e2450bcfc8-19 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b283d6eafb57f10081d075a1e513297d0f003dda-4 b/xdr/fuzz/jsonclaimpredicate/corpus/b283d6eafb57f10081d075a1e513297d0f003dda-4 new file mode 100644 index 0000000000..c2453b5f72 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b283d6eafb57f10081d075a1e513297d0f003dda-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b2af21ff16013174e8b940aeea46a5d2b7ebe959-3 b/xdr/fuzz/jsonclaimpredicate/corpus/b2af21ff16013174e8b940aeea46a5d2b7ebe959-3 new file mode 100644 index 0000000000..819d62b110 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b2af21ff16013174e8b940aeea46a5d2b7ebe959-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b43d6499a479a144d48743aceab226ae04b084b3-27 b/xdr/fuzz/jsonclaimpredicate/corpus/b43d6499a479a144d48743aceab226ae04b084b3-27 new file mode 100644 index 0000000000..2a020235aa Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b43d6499a479a144d48743aceab226ae04b084b3-27 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/b975870c79205eb51bb870470dc43a2247a840b4-2 b/xdr/fuzz/jsonclaimpredicate/corpus/b975870c79205eb51bb870470dc43a2247a840b4-2 new file mode 100644 index 0000000000..575eb9b9c8 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/b975870c79205eb51bb870470dc43a2247a840b4-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ba38988d5c5dcc1ac3a71cd384361208fe07b52b-3 b/xdr/fuzz/jsonclaimpredicate/corpus/ba38988d5c5dcc1ac3a71cd384361208fe07b52b-3 new file mode 100644 index 0000000000..5edd0216a1 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ba38988d5c5dcc1ac3a71cd384361208fe07b52b-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ba66c7f545d45a2f9e3a0ccdf3c890d599be0c04-23 b/xdr/fuzz/jsonclaimpredicate/corpus/ba66c7f545d45a2f9e3a0ccdf3c890d599be0c04-23 new file mode 100644 index 0000000000..0a9ca9fa19 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ba66c7f545d45a2f9e3a0ccdf3c890d599be0c04-23 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/bdc41f8d992fb63574a3502310f8da8fa2f46ecb-6 b/xdr/fuzz/jsonclaimpredicate/corpus/bdc41f8d992fb63574a3502310f8da8fa2f46ecb-6 new file mode 100644 index 0000000000..36a957bc4d Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/bdc41f8d992fb63574a3502310f8da8fa2f46ecb-6 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/c0113ad49c1ab5cb58850b98f057361b51a07de9-19 b/xdr/fuzz/jsonclaimpredicate/corpus/c0113ad49c1ab5cb58850b98f057361b51a07de9-19 new file mode 100644 index 0000000000..92f5d6aa3a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/c0113ad49c1ab5cb58850b98f057361b51a07de9-19 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/c04c37d97e7d5f40e778a9f4f2798e79d67b2894-13 b/xdr/fuzz/jsonclaimpredicate/corpus/c04c37d97e7d5f40e778a9f4f2798e79d67b2894-13 new file mode 100644 index 0000000000..197c7aa522 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/c04c37d97e7d5f40e778a9f4f2798e79d67b2894-13 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/c3e528e0864bdeabc3a493347d081458107d6d02-8 b/xdr/fuzz/jsonclaimpredicate/corpus/c3e528e0864bdeabc3a493347d081458107d6d02-8 new file mode 100644 index 0000000000..464695f4c3 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/c3e528e0864bdeabc3a493347d081458107d6d02-8 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/c8e6898a08dcd50eb9d4277f52b5a1316363b1ce-8 b/xdr/fuzz/jsonclaimpredicate/corpus/c8e6898a08dcd50eb9d4277f52b5a1316363b1ce-8 new file mode 100644 index 0000000000..74e8fd5d05 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/c8e6898a08dcd50eb9d4277f52b5a1316363b1ce-8 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/cd9e58d3d455ad5a8dbe295c15bcc497ad0a5b3f-6 b/xdr/fuzz/jsonclaimpredicate/corpus/cd9e58d3d455ad5a8dbe295c15bcc497ad0a5b3f-6 new file mode 100644 index 0000000000..1a443f8f77 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/cd9e58d3d455ad5a8dbe295c15bcc497ad0a5b3f-6 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/d1c5c02b37cee617709474c716cff28931a83f0a-1 b/xdr/fuzz/jsonclaimpredicate/corpus/d1c5c02b37cee617709474c716cff28931a83f0a-1 new file mode 100644 index 0000000000..088b3d380a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/d1c5c02b37cee617709474c716cff28931a83f0a-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/d1c92868d562107bef8080a1e36abb5118cd6c1f-1 b/xdr/fuzz/jsonclaimpredicate/corpus/d1c92868d562107bef8080a1e36abb5118cd6c1f-1 new file mode 100644 index 0000000000..d22a26ae5f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/d1c92868d562107bef8080a1e36abb5118cd6c1f-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/d47aadd63b71c1edf2560bd7790b6ece114ac150-4 b/xdr/fuzz/jsonclaimpredicate/corpus/d47aadd63b71c1edf2560bd7790b6ece114ac150-4 new file mode 100644 index 0000000000..8bb9ffd303 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/d47aadd63b71c1edf2560bd7790b6ece114ac150-4 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/d4e6caed58ca3f001ee3f9b719e72f48af941e69-8 b/xdr/fuzz/jsonclaimpredicate/corpus/d4e6caed58ca3f001ee3f9b719e72f48af941e69-8 new file mode 100644 index 0000000000..c7aabe67bf Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/d4e6caed58ca3f001ee3f9b719e72f48af941e69-8 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/dd2292f040e7417fd33bffef779136045e3e42e9-8 b/xdr/fuzz/jsonclaimpredicate/corpus/dd2292f040e7417fd33bffef779136045e3e42e9-8 new file mode 100644 index 0000000000..f19b3303a2 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/dd2292f040e7417fd33bffef779136045e3e42e9-8 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/e1e0b62f78224847064adfb1db88db5c2ad7314f-22 b/xdr/fuzz/jsonclaimpredicate/corpus/e1e0b62f78224847064adfb1db88db5c2ad7314f-22 new file mode 100644 index 0000000000..4e0e245cc8 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/e1e0b62f78224847064adfb1db88db5c2ad7314f-22 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/e496e6bcb958104db803f6987f7463e64dd03c36-3 b/xdr/fuzz/jsonclaimpredicate/corpus/e496e6bcb958104db803f6987f7463e64dd03c36-3 new file mode 100644 index 0000000000..6221fd128a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/e496e6bcb958104db803f6987f7463e64dd03c36-3 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/e6d28f2def888dd7b4e3e7bb0602a9a54032fc25-14 b/xdr/fuzz/jsonclaimpredicate/corpus/e6d28f2def888dd7b4e3e7bb0602a9a54032fc25-14 new file mode 100644 index 0000000000..a93132f551 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/e6d28f2def888dd7b4e3e7bb0602a9a54032fc25-14 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/e9ae6ca39d51bd4198dc8b4f2ed429dd7464a1b7-15 b/xdr/fuzz/jsonclaimpredicate/corpus/e9ae6ca39d51bd4198dc8b4f2ed429dd7464a1b7-15 new file mode 100644 index 0000000000..dab0aa11f6 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/e9ae6ca39d51bd4198dc8b4f2ed429dd7464a1b7-15 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ea5ef46f0ad1b2f1477437d6a8fea4a05bf589f9-21 b/xdr/fuzz/jsonclaimpredicate/corpus/ea5ef46f0ad1b2f1477437d6a8fea4a05bf589f9-21 new file mode 100644 index 0000000000..eaecbc2863 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ea5ef46f0ad1b2f1477437d6a8fea4a05bf589f9-21 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ec7ef371ebfc7964bd80d7783b5e4eb0f614b9f2-18 b/xdr/fuzz/jsonclaimpredicate/corpus/ec7ef371ebfc7964bd80d7783b5e4eb0f614b9f2-18 new file mode 100644 index 0000000000..e9ee6c308e Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ec7ef371ebfc7964bd80d7783b5e4eb0f614b9f2-18 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/ed23eb5f747659b11a5505b08e56624399351eb3-16 b/xdr/fuzz/jsonclaimpredicate/corpus/ed23eb5f747659b11a5505b08e56624399351eb3-16 new file mode 100644 index 0000000000..591e8e82d2 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/ed23eb5f747659b11a5505b08e56624399351eb3-16 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/efad3ce838161e85a374929a9c0fd4d51812cc5f-2 b/xdr/fuzz/jsonclaimpredicate/corpus/efad3ce838161e85a374929a9c0fd4d51812cc5f-2 new file mode 100644 index 0000000000..f484f0764e Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/efad3ce838161e85a374929a9c0fd4d51812cc5f-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/f4e16e4477ecc1f88f51319373564404a1e9b03d-7 b/xdr/fuzz/jsonclaimpredicate/corpus/f4e16e4477ecc1f88f51319373564404a1e9b03d-7 new file mode 100644 index 0000000000..b42e7d2d91 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/f4e16e4477ecc1f88f51319373564404a1e9b03d-7 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/f54b8d6090a285885734d804129213715c3c5b55-10 b/xdr/fuzz/jsonclaimpredicate/corpus/f54b8d6090a285885734d804129213715c3c5b55-10 new file mode 100644 index 0000000000..63895b6a2f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/f54b8d6090a285885734d804129213715c3c5b55-10 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/f54d55cac405aed2c99ca12e5f630db204f46b67-2 b/xdr/fuzz/jsonclaimpredicate/corpus/f54d55cac405aed2c99ca12e5f630db204f46b67-2 new file mode 100644 index 0000000000..d80921c1ef Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/f54d55cac405aed2c99ca12e5f630db204f46b67-2 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/f76bac376810acce74267a853b664a0a29d47d2b-5 b/xdr/fuzz/jsonclaimpredicate/corpus/f76bac376810acce74267a853b664a0a29d47d2b-5 new file mode 100644 index 0000000000..43ed439c5a Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/f76bac376810acce74267a853b664a0a29d47d2b-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/f816b4e064d14656872cadca92ecb1f5179a6319-5 b/xdr/fuzz/jsonclaimpredicate/corpus/f816b4e064d14656872cadca92ecb1f5179a6319-5 new file mode 100644 index 0000000000..160fc0ff13 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/f816b4e064d14656872cadca92ecb1f5179a6319-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/fb1c8df7fa2faac955b0e03fbc7423030d6ab8ae-1 b/xdr/fuzz/jsonclaimpredicate/corpus/fb1c8df7fa2faac955b0e03fbc7423030d6ab8ae-1 new file mode 100644 index 0000000000..21755fd82f Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/fb1c8df7fa2faac955b0e03fbc7423030d6ab8ae-1 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/fb4933de8b968e1cda99b1d7d8dfb600d60123ce-7 b/xdr/fuzz/jsonclaimpredicate/corpus/fb4933de8b968e1cda99b1d7d8dfb600d60123ce-7 new file mode 100644 index 0000000000..c1b2b95302 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/fb4933de8b968e1cda99b1d7d8dfb600d60123ce-7 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/fbfab2089beccd80677b39f984d4a2e1151000e4-18 b/xdr/fuzz/jsonclaimpredicate/corpus/fbfab2089beccd80677b39f984d4a2e1151000e4-18 new file mode 100644 index 0000000000..aa999c7ccd Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/fbfab2089beccd80677b39f984d4a2e1151000e4-18 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/fd447488be54b05d69fa33e39e3754eb7b34a5ba-5 b/xdr/fuzz/jsonclaimpredicate/corpus/fd447488be54b05d69fa33e39e3754eb7b34a5ba-5 new file mode 100644 index 0000000000..b2f86e241d Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/fd447488be54b05d69fa33e39e3754eb7b34a5ba-5 differ diff --git a/xdr/fuzz/jsonclaimpredicate/corpus/fef930fcbbe7b532642eb9bed98dd04d6bbff761-11 b/xdr/fuzz/jsonclaimpredicate/corpus/fef930fcbbe7b532642eb9bed98dd04d6bbff761-11 new file mode 100644 index 0000000000..7746501bb0 Binary files /dev/null and b/xdr/fuzz/jsonclaimpredicate/corpus/fef930fcbbe7b532642eb9bed98dd04d6bbff761-11 differ diff --git a/xdr/fuzz/jsonclaimpredicate/fuzz.go b/xdr/fuzz/jsonclaimpredicate/fuzz.go new file mode 100644 index 0000000000..4f861d7954 --- /dev/null +++ b/xdr/fuzz/jsonclaimpredicate/fuzz.go @@ -0,0 +1,79 @@ +//go:build gofuzz +// +build gofuzz + +package jsonclaimpredicate + +import ( + "bytes" + "encoding/json" + + "github.com/stellar/go/xdr" +) + +// Fuzz is go-fuzz function for fuzzing xdr.ClaimPredicate JSON +// marshaller and unmarshaller. +func Fuzz(data []byte) int { + // Ignore malformed ClaimPredicate + var p xdr.ClaimPredicate + err := xdr.SafeUnmarshal(data, &p) + if err != nil { + return -1 + } + + // Ignore invalid predicates: (and/or length != 2, nil not) + if !validate(p) { + return -1 + } + + j, err := json.Marshal(p) + if err != nil { + panic(err) + } + + var p2 xdr.ClaimPredicate + err = json.Unmarshal(j, &p2) + if err != nil { + panic(err) + } + + j2, err := json.Marshal(p2) + if err != nil { + panic(err) + } + + if !bytes.Equal(j, j2) { + panic("not equal " + string(j) + " " + string(j2)) + } + + return 1 +} + +func validate(p xdr.ClaimPredicate) bool { + switch p.Type { + case xdr.ClaimPredicateTypeClaimPredicateUnconditional: + return true + case xdr.ClaimPredicateTypeClaimPredicateAnd: + and := *p.AndPredicates + if len(and) != 2 { + return false + } + return validate(and[0]) && validate(and[1]) + case xdr.ClaimPredicateTypeClaimPredicateOr: + or := *p.OrPredicates + if len(or) != 2 { + return false + } + return validate(or[0]) && validate(or[1]) + case xdr.ClaimPredicateTypeClaimPredicateNot: + if *p.NotPredicate == nil { + return false + } + return validate(**p.NotPredicate) + case xdr.ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + return *p.AbsBefore >= 0 + case xdr.ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + return *p.RelBefore >= 0 + } + + panic("Invalid type") +} diff --git a/xdr/go_string.go b/xdr/go_string.go new file mode 100644 index 0000000000..397a756acd --- /dev/null +++ b/xdr/go_string.go @@ -0,0 +1,231 @@ +package xdr + +import ( + "fmt" + "strconv" + "strings" +) + +// GoString prints Uint32 as decimal instead of hexadecimal numbers. +func (u Uint32) GoString() string { + return strconv.FormatInt(int64(u), 10) +} + +// GoString implements fmt.GoStringer. +func (e TransactionEnvelope) GoString() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("xdr.TransactionEnvelope{Type: xdr.%s,", envelopeTypeMap[int32(e.Type)])) + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxV0: + sb.WriteString(fmt.Sprintf("V0: &%#v", *e.V0)) + case EnvelopeTypeEnvelopeTypeTx: + sb.WriteString(fmt.Sprintf("V1: &%#v", *e.V1)) + case EnvelopeTypeEnvelopeTypeTxFeeBump: + sb.WriteString(fmt.Sprintf("FeeBump: &%#v", *e.FeeBump)) + default: + panic("Unknown type") + } + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (e FeeBumpTransactionInnerTx) GoString() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("xdr.FeeBumpTransactionInnerTx{Type: xdr.%s,", envelopeTypeMap[int32(e.Type)])) + switch e.Type { + case EnvelopeTypeEnvelopeTypeTx: + sb.WriteString(fmt.Sprintf("V1: &%#v", *e.V1)) + default: + panic("Unknown type") + } + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (a AccountId) GoString() string { + return fmt.Sprintf("xdr.MustAddress(%#v)", a.Address()) +} + +// GoString implements fmt.GoStringer. +func (a Asset) GoString() string { + if a.Type == AssetTypeAssetTypeNative { + return "xdr.MustNewNativeAsset()" + } + + var typ, code, issuer string + a.MustExtract(&typ, &code, &issuer) + return fmt.Sprintf("xdr.MustNewCreditAsset(%#v, %#v)", code, issuer) +} + +// GoString implements fmt.GoStringer. +func (m Memo) GoString() string { + switch m.Type { + case MemoTypeMemoNone: + return fmt.Sprintf("xdr.Memo{Type: xdr.%s}", memoTypeMap[int32(m.Type)]) + case MemoTypeMemoText: + return fmt.Sprintf(`xdr.MemoText(%#v)`, *m.Text) + case MemoTypeMemoId: + return fmt.Sprintf(`xdr.MemoID(%d)`, *m.Id) + case MemoTypeMemoHash: + return fmt.Sprintf(`xdr.MemoHash(%#v)`, *m.Hash) + case MemoTypeMemoReturn: + return fmt.Sprintf(`xdr.MemoRetHash(%#v)`, *m.RetHash) + default: + panic("Unknown type") + } +} + +// GoString implements fmt.GoStringer. +func (m MuxedAccount) GoString() string { + switch m.Type { + case CryptoKeyTypeKeyTypeEd25519: + accountID := m.ToAccountId() + return fmt.Sprintf("xdr.MustMuxedAddress(%#v)", accountID.Address()) + case CryptoKeyTypeKeyTypeMuxedEd25519: + var sb strings.Builder + sb.WriteString("xdr.MuxedAccount{Type: xdr.CryptoKeyTypeKeyTypeMuxedEd25519,") + sb.WriteString(fmt.Sprintf("Med25519: &%#v", *m.Med25519)) + sb.WriteString("}") + return sb.String() + default: + panic("Unknown type") + } +} + +// GoString implements fmt.GoStringer. +func (o Operation) GoString() string { + var sb strings.Builder + sb.WriteString("xdr.Operation{") + if o.SourceAccount != nil { + if o.SourceAccount.Type == CryptoKeyTypeKeyTypeEd25519 { + accountID := o.SourceAccount.ToAccountId() + sb.WriteString(fmt.Sprintf("SourceAccount: xdr.MustMuxedAddressPtr(%#v),", accountID.Address())) + } else { + sb.WriteString(fmt.Sprintf("SourceAccount: &%#v,", *o.SourceAccount)) + } + } + sb.WriteString(fmt.Sprintf("Body: %#v", o.Body)) + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (o OperationBody) GoString() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("xdr.OperationBody{Type: xdr.%s,", operationTypeMap[int32(o.Type)])) + switch { + case o.CreateAccountOp != nil: + sb.WriteString(fmt.Sprintf("CreateAccountOp: &%#v", *o.CreateAccountOp)) + case o.PaymentOp != nil: + sb.WriteString(fmt.Sprintf("PaymentOp: &%#v", *o.PaymentOp)) + case o.PathPaymentStrictReceiveOp != nil: + sb.WriteString(fmt.Sprintf("PathPaymentStrictReceiveOp: &%#v", *o.PathPaymentStrictReceiveOp)) + case o.ManageSellOfferOp != nil: + sb.WriteString(fmt.Sprintf("ManageSellOfferOp: &%#v", *o.ManageSellOfferOp)) + case o.CreatePassiveSellOfferOp != nil: + sb.WriteString(fmt.Sprintf("CreatePassiveSellOfferOp: &%#v", *o.CreatePassiveSellOfferOp)) + case o.SetOptionsOp != nil: + sb.WriteString(fmt.Sprintf("SetOptionsOp: &%#v", *o.SetOptionsOp)) + case o.ChangeTrustOp != nil: + sb.WriteString(fmt.Sprintf("ChangeTrustOp: &%#v", *o.ChangeTrustOp)) + case o.AllowTrustOp != nil: + sb.WriteString(fmt.Sprintf("AllowTrustOp: &%#v", *o.AllowTrustOp)) + case o.Destination != nil: + sb.WriteString(fmt.Sprintf("Destination: %#v", *o.Destination)) + case o.ManageDataOp != nil: + sb.WriteString(fmt.Sprintf("ManageDataOp: &%#v", *o.ManageDataOp)) + case o.BumpSequenceOp != nil: + sb.WriteString(fmt.Sprintf("BumpSequenceOp: &%#v", *o.BumpSequenceOp)) + case o.ManageBuyOfferOp != nil: + sb.WriteString(fmt.Sprintf("ManageBuyOfferOp: &%#v", *o.ManageBuyOfferOp)) + case o.PathPaymentStrictSendOp != nil: + sb.WriteString(fmt.Sprintf("PathPaymentStrictSendOp: &%#v", *o.PathPaymentStrictSendOp)) + default: + panic("Unknown type") + } + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (s SetOptionsOp) GoString() string { + var sb strings.Builder + sb.WriteString("xdr.SetOptionsOp{") + if s.InflationDest != nil { + sb.WriteString(fmt.Sprintf("InflationDest: xdr.MustAddressPtr(%#v),", s.InflationDest.Address())) + } + if s.ClearFlags != nil { + sb.WriteString(fmt.Sprintf("ClearFlags: xdr.Uint32Ptr(%#v),", s.ClearFlags)) + } + if s.SetFlags != nil { + sb.WriteString(fmt.Sprintf("SetFlags: xdr.Uint32Ptr(%#v),", s.SetFlags)) + } + if s.MasterWeight != nil { + sb.WriteString(fmt.Sprintf("MasterWeight: xdr.Uint32Ptr(%#v),", s.MasterWeight)) + } + if s.LowThreshold != nil { + sb.WriteString(fmt.Sprintf("LowThreshold: xdr.Uint32Ptr(%#v),", s.LowThreshold)) + } + if s.MedThreshold != nil { + sb.WriteString(fmt.Sprintf("MedThreshold: xdr.Uint32Ptr(%#v),", s.MedThreshold)) + } + if s.HighThreshold != nil { + sb.WriteString(fmt.Sprintf("HighThreshold: xdr.Uint32Ptr(%#v),", s.HighThreshold)) + } + if s.HomeDomain != nil { + sb.WriteString(fmt.Sprintf("HomeDomain: xdr.String32Ptr(%#v),", *s.HomeDomain)) + } + if s.Signer != nil { + sb.WriteString(fmt.Sprintf("Signer: &%#v,", *s.Signer)) + } + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (s ManageDataOp) GoString() string { + var sb strings.Builder + sb.WriteString("xdr.ManageDataOp{") + sb.WriteString(fmt.Sprintf("DataName: %#v,", s.DataName)) + if s.DataValue == nil { + sb.WriteString("DataValue: nil") + } else { + sb.WriteString(fmt.Sprintf("DataValue: &%#v", *s.DataValue)) + } + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (s AssetCode) GoString() string { + var code string + switch s.Type { + case AssetTypeAssetTypeCreditAlphanum4: + code = string(s.AssetCode4[:]) + case AssetTypeAssetTypeCreditAlphanum12: + code = string(s.AssetCode12[:]) + default: + panic("Unknown type") + } + return fmt.Sprintf("xdr.MustNewAssetCodeFromString(%#v)", strings.TrimRight(code, string([]byte{0}))) +} + +// GoString implements fmt.GoStringer. +func (s Signer) GoString() string { + var sb strings.Builder + sb.WriteString("xdr.Signer{") + sb.WriteString(fmt.Sprintf("Key: xdr.MustSigner(%#v),", s.Key.Address())) + sb.WriteString(fmt.Sprintf("Weight: %#v", s.Weight)) + sb.WriteString("}") + return sb.String() +} + +// GoString implements fmt.GoStringer. +func (t *TimeBounds) GoString() string { + if t == nil { + return "nil" + } + return fmt.Sprintf("&xdr.TimeBounds{MinTime: xdr.TimePoint(%d), MaxTime: xdr.TimePoint(%d)}", t.MinTime, t.MaxTime) +} diff --git a/xdr/go_string_test.go b/xdr/go_string_test.go new file mode 100644 index 0000000000..d484b301f4 --- /dev/null +++ b/xdr/go_string_test.go @@ -0,0 +1,282 @@ +package xdr_test + +import ( + "fmt" + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func Uint32Ptr(val uint32) *xdr.Uint32 { + pval := xdr.Uint32(val) + return &pval +} + +func TestAssetGoStringerNative(t *testing.T) { + asset, err := xdr.NewAsset(xdr.AssetTypeAssetTypeNative, nil) + assert.NoError(t, err) + assert.Equal(t, "xdr.MustNewNativeAsset()", fmt.Sprintf("%#v", asset)) +} + +func TestAssetGoStringerCredit(t *testing.T) { + asset, err := xdr.BuildAsset("credit_alphanum4", "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H", "USD") + assert.NoError(t, err) + assert.Equal( + t, + `xdr.MustNewCreditAsset("USD", "GBRPYHIL2CI3FNQ4BXLFMNDLFJUNPU2HY3ZMFSHONUCEOASW7QC7OX2H")`, + fmt.Sprintf("%#v", asset), + ) +} + +func TestMemoGoStringerNone(t *testing.T) { + memo := xdr.Memo{Type: xdr.MemoTypeMemoNone} + assert.Equal( + t, + `xdr.Memo{Type: xdr.MemoTypeMemoNone}`, + fmt.Sprintf("%#v", memo), + ) +} + +func TestMemoGoStringerText(t *testing.T) { + text := "abc" + memo := xdr.Memo{Type: xdr.MemoTypeMemoText, Text: &text} + assert.Equal(t, `xdr.MemoText("abc")`, fmt.Sprintf("%#v", memo)) +} + +func TestMemoGoStringerID(t *testing.T) { + id := xdr.Uint64(123) + memo := xdr.Memo{Type: xdr.MemoTypeMemoId, Id: &id} + assert.Equal(t, `xdr.MemoID(123)`, fmt.Sprintf("%#v", memo)) +} + +func TestMemoGoStringerHash(t *testing.T) { + hash := xdr.Hash{0x7b} + memo := xdr.Memo{Type: xdr.MemoTypeMemoHash, Hash: &hash} + assert.Equal( + t, + `xdr.MemoHash(xdr.Hash{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})`, + fmt.Sprintf("%#v", memo), + ) +} + +func TestMemoGoStringerRetHash(t *testing.T) { + hash := xdr.Hash{0x7b} + memo := xdr.Memo{Type: xdr.MemoTypeMemoReturn, RetHash: &hash} + assert.Equal( + t, + `xdr.MemoRetHash(xdr.Hash{0x7b, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0})`, + fmt.Sprintf("%#v", memo), + ) +} + +func TestOperationGoStringerSource(t *testing.T) { + operation := xdr.Operation{ + SourceAccount: xdr.MustMuxedAddressPtr("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + Body: xdr.OperationBody{ + Type: xdr.OperationTypeManageBuyOffer, + ManageBuyOfferOp: &xdr.ManageBuyOfferOp{ + Selling: xdr.MustNewNativeAsset(), + Buying: xdr.MustNewCreditAsset("USD", "GB2O5PBQJDAFCNM2U2DIMVAEI7ISOYL4UJDTLN42JYYXAENKBWY6OBKZ"), + BuyAmount: 19995825, + Price: xdr.Price{N: 524087, D: 5000000}, + OfferId: 258020376, + }, + }, + } + assert.Equal( + t, + `xdr.Operation{SourceAccount: xdr.MustMuxedAddressPtr("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"),Body: xdr.OperationBody{Type: xdr.OperationTypeManageBuyOffer,ManageBuyOfferOp: &xdr.ManageBuyOfferOp{Selling:xdr.MustNewNativeAsset(), Buying:xdr.MustNewCreditAsset("USD", "GB2O5PBQJDAFCNM2U2DIMVAEI7ISOYL4UJDTLN42JYYXAENKBWY6OBKZ"), BuyAmount:19995825, Price:xdr.Price{N:524087, D:5000000}, OfferId:258020376}}}`, + fmt.Sprintf("%#v", operation), + ) +} + +func TestOperationBodyGoStringerCreateAccount(t *testing.T) { + operationBody := xdr.OperationBody{ + Type: xdr.OperationTypeCreateAccount, + CreateAccountOp: &xdr.CreateAccountOp{ + Destination: xdr.MustAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + StartingBalance: 19995825, + }, + } + + assert.Equal( + t, + `xdr.OperationBody{Type: xdr.OperationTypeCreateAccount,CreateAccountOp: &xdr.CreateAccountOp{Destination:xdr.MustAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), StartingBalance:19995825}}`, + fmt.Sprintf("%#v", operationBody), + ) +} + +func TestOperationBodyGoStringerSetOptions(t *testing.T) { + operationBody := xdr.OperationBody{ + Type: xdr.OperationTypeSetOptions, + SetOptionsOp: &xdr.SetOptionsOp{ + InflationDest: xdr.MustAddressPtr("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + ClearFlags: Uint32Ptr(0), + SetFlags: Uint32Ptr(1), + MasterWeight: Uint32Ptr(2), + LowThreshold: Uint32Ptr(3), + MedThreshold: Uint32Ptr(4), + HighThreshold: Uint32Ptr(5), + HomeDomain: xdr.String32Ptr("stellar.org"), + Signer: &xdr.Signer{ + Key: xdr.MustSigner("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + Weight: 6, + }, + }, + } + + assert.Equal( + t, + `xdr.OperationBody{Type: xdr.OperationTypeSetOptions,SetOptionsOp: &xdr.SetOptionsOp{InflationDest: xdr.MustAddressPtr("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"),ClearFlags: xdr.Uint32Ptr(0),SetFlags: xdr.Uint32Ptr(1),MasterWeight: xdr.Uint32Ptr(2),LowThreshold: xdr.Uint32Ptr(3),MedThreshold: xdr.Uint32Ptr(4),HighThreshold: xdr.Uint32Ptr(5),HomeDomain: xdr.String32Ptr("stellar.org"),Signer: &xdr.Signer{Key: xdr.MustSigner("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"),Weight: 6},}}`, + fmt.Sprintf("%#v", operationBody), + ) +} + +func TestOperationBodyGoStringerManageData(t *testing.T) { + operationBody := xdr.OperationBody{ + Type: xdr.OperationTypeManageData, + ManageDataOp: &xdr.ManageDataOp{ + DataName: "abc", + DataValue: &xdr.DataValue{0xa, 0xb}, + }, + } + + assert.Equal( + t, + `xdr.OperationBody{Type: xdr.OperationTypeManageData,ManageDataOp: &xdr.ManageDataOp{DataName: "abc",DataValue: &xdr.DataValue{0xa, 0xb}}}`, + fmt.Sprintf("%#v", operationBody), + ) +} + +func TestOperationBodyGoStringerAccountMerge(t *testing.T) { + operationBody := xdr.OperationBody{ + Type: xdr.OperationTypeAccountMerge, + Destination: xdr.MustMuxedAddressPtr("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + } + + assert.Equal( + t, + `xdr.OperationBody{Type: xdr.OperationTypeAccountMerge,Destination: xdr.MustMuxedAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4")}`, + fmt.Sprintf("%#v", operationBody), + ) +} + +func TestOperationBodyGoStringerAllowTrust(t *testing.T) { + operationBody := xdr.OperationBody{ + Type: xdr.OperationTypeAllowTrust, + AllowTrustOp: &xdr.AllowTrustOp{ + Trustor: xdr.MustAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + Asset: xdr.MustNewAssetCodeFromString("USD"), + Authorize: 1, + }, + } + + assert.Equal( + t, + `xdr.OperationBody{Type: xdr.OperationTypeAllowTrust,AllowTrustOp: &xdr.AllowTrustOp{Trustor:xdr.MustAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), Asset:xdr.MustNewAssetCodeFromString("USD"), Authorize:1}}`, + fmt.Sprintf("%#v", operationBody), + ) +} + +func TestTransactionEnvelopeGoStringerV1(t *testing.T) { + envelope := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: xdr.MustMuxedAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), + Fee: 100, + SeqNum: 99284448289310326, + TimeBounds: &xdr.TimeBounds{ + MinTime: xdr.TimePoint(0), + MaxTime: xdr.TimePoint(0), + }, + Memo: xdr.Memo{Type: xdr.MemoTypeMemoNone}, + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypeManageSellOffer, + ManageSellOfferOp: &xdr.ManageSellOfferOp{ + Selling: xdr.MustNewNativeAsset(), + Buying: xdr.MustNewCreditAsset("USD", "GB2O5PBQJDAFCNM2U2DIMVAEI7ISOYL4UJDTLN42JYYXAENKBWY6OBKZ"), + Amount: 19995825, + Price: xdr.Price{N: 524087, D: 5000000}, + OfferId: 258020376, + }, + }, + }, + }, + Ext: xdr.TransactionExt{V: 0}}, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{0x23, 0x73, 0x1c, 0x9f}, + Signature: xdr.Signature{0x71, 0xd3, 0xfa, 0x9, 0xd9, 0x12, 0xd3, 0xcf, 0x2c, 0x6f, 0xd9, 0x29, 0x9a, 0xdd, 0xfd, 0x77, 0x84, 0xe1, 0x6, 0x4f, 0xe, 0xed, 0x9, 0x77, 0xe, 0x46, 0x9a, 0xa3, 0x59, 0xf3, 0x7, 0x16, 0xb3, 0x28, 0x4a, 0x40, 0x40, 0x98, 0x1e, 0xe1, 0xea, 0xc6, 0xa4, 0xc, 0x6e, 0x96, 0xc3, 0x1e, 0x46, 0x71, 0x4f, 0x54, 0x32, 0xc5, 0x93, 0x81, 0x7d, 0xb1, 0xa4, 0xf9, 0xa5, 0x3e, 0x33, 0x4}, + }, + }, + }, + } + + assert.Equal( + t, + `xdr.TransactionEnvelope{Type: xdr.EnvelopeTypeEnvelopeTypeTx,V1: &xdr.TransactionV1Envelope{Tx:xdr.Transaction{SourceAccount:xdr.MustMuxedAddress("GC7ERFCD7QLDFRSEPLYB3GYSWX6GYMCHLDL45N4S5Q2N5EJDOMOJ63V4"), Fee:100, SeqNum:99284448289310326, TimeBounds:&xdr.TimeBounds{MinTime: xdr.TimePoint(0), MaxTime: xdr.TimePoint(0)}, Memo:xdr.Memo{Type: xdr.MemoTypeMemoNone}, Operations:[]xdr.Operation{xdr.Operation{Body: xdr.OperationBody{Type: xdr.OperationTypeManageSellOffer,ManageSellOfferOp: &xdr.ManageSellOfferOp{Selling:xdr.MustNewNativeAsset(), Buying:xdr.MustNewCreditAsset("USD", "GB2O5PBQJDAFCNM2U2DIMVAEI7ISOYL4UJDTLN42JYYXAENKBWY6OBKZ"), Amount:19995825, Price:xdr.Price{N:524087, D:5000000}, OfferId:258020376}}}}, Ext:xdr.TransactionExt{V:0}}, Signatures:[]xdr.DecoratedSignature{xdr.DecoratedSignature{Hint:xdr.SignatureHint{0x23, 0x73, 0x1c, 0x9f}, Signature:xdr.Signature{0x71, 0xd3, 0xfa, 0x9, 0xd9, 0x12, 0xd3, 0xcf, 0x2c, 0x6f, 0xd9, 0x29, 0x9a, 0xdd, 0xfd, 0x77, 0x84, 0xe1, 0x6, 0x4f, 0xe, 0xed, 0x9, 0x77, 0xe, 0x46, 0x9a, 0xa3, 0x59, 0xf3, 0x7, 0x16, 0xb3, 0x28, 0x4a, 0x40, 0x40, 0x98, 0x1e, 0xe1, 0xea, 0xc6, 0xa4, 0xc, 0x6e, 0x96, 0xc3, 0x1e, 0x46, 0x71, 0x4f, 0x54, 0x32, 0xc5, 0x93, 0x81, 0x7d, 0xb1, 0xa4, 0xf9, 0xa5, 0x3e, 0x33, 0x4}}}}}`, + fmt.Sprintf("%#v", envelope), + ) +} + +func TestTransactionEnvelopeGoStringerFeeBump(t *testing.T) { + envelope := xdr.TransactionEnvelope{ + Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &xdr.FeeBumpTransactionEnvelope{ + Tx: xdr.FeeBumpTransaction{ + FeeSource: xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), + Fee: 4000, + InnerTx: xdr.FeeBumpTransactionInnerTx{ + Type: xdr.EnvelopeTypeEnvelopeTypeTx, + V1: &xdr.TransactionV1Envelope{ + Tx: xdr.Transaction{ + SourceAccount: xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), + Fee: 0, + SeqNum: 566862668627969, + TimeBounds: &xdr.TimeBounds{ + MinTime: xdr.TimePoint(0), + MaxTime: xdr.TimePoint(0), + }, + Memo: xdr.MemoText("My 1st fee bump! Woohoo!"), + Operations: []xdr.Operation{ + { + Body: xdr.OperationBody{ + Type: xdr.OperationTypePayment, + PaymentOp: &xdr.PaymentOp{ + Destination: xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), + Asset: xdr.MustNewNativeAsset(), + Amount: 1000000000, + }, + }, + }, + }, + Ext: xdr.TransactionExt{V: 0}, + }, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{0x96, 0xa1, 0x20, 0x62}, + Signature: xdr.Signature{0x5e, 0x36, 0x9, 0x6c, 0x7a, 0xa4, 0x73, 0xde, 0x20, 0xf9, 0x4f, 0x2, 0xf4, 0x9c, 0x66, 0x10, 0x42, 0x1f, 0xa1, 0x34, 0x68, 0x6b, 0xe4, 0xbf, 0xce, 0x67, 0x71, 0x3b, 0x61, 0x2c, 0x78, 0xae, 0x25, 0x66, 0xe, 0x28, 0xad, 0xe9, 0xe7, 0xb8, 0x8c, 0xf8, 0x46, 0xba, 0x98, 0x43, 0xde, 0x40, 0x27, 0xb8, 0xb4, 0x52, 0xf3, 0x70, 0xab, 0x80, 0x8b, 0xac, 0x45, 0xb, 0x1, 0xee, 0xbe, 0x6}, + }, + }, + }, + }, + Ext: xdr.FeeBumpTransactionExt{V: 0}}, + Signatures: []xdr.DecoratedSignature{ + { + Hint: xdr.SignatureHint{0x96, 0xa1, 0x20, 0x62}, + Signature: xdr.Signature{0xb2, 0xcc, 0x82, 0x6e, 0x9c, 0xa4, 0x3a, 0x11, 0x75, 0x33, 0xd1, 0xfd, 0xa2, 0x49, 0xc0, 0x50, 0xf1, 0xd8, 0x62, 0x7, 0xf6, 0xdf, 0x2, 0x9a, 0x46, 0xa5, 0xe8, 0x3a, 0xb7, 0xbf, 0x4b, 0xc7, 0xcb, 0xd4, 0x4f, 0xe0, 0xe5, 0x25, 0xb8, 0xe, 0xbe, 0xdc, 0x53, 0x68, 0x69, 0x19, 0xdc, 0x57, 0xf3, 0x39, 0x77, 0x71, 0xca, 0x73, 0x89, 0xa4, 0xdc, 0x2c, 0xca, 0xd4, 0x1d, 0x5f, 0x9d, 0x4}, + }, + }, + }, + } + + assert.Equal( + t, + `xdr.TransactionEnvelope{Type: xdr.EnvelopeTypeEnvelopeTypeTxFeeBump,FeeBump: &xdr.FeeBumpTransactionEnvelope{Tx:xdr.FeeBumpTransaction{FeeSource:xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), Fee:4000, InnerTx:xdr.FeeBumpTransactionInnerTx{Type: xdr.EnvelopeTypeEnvelopeTypeTx,V1: &xdr.TransactionV1Envelope{Tx:xdr.Transaction{SourceAccount:xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), Fee:0, SeqNum:566862668627969, TimeBounds:&xdr.TimeBounds{MinTime: xdr.TimePoint(0), MaxTime: xdr.TimePoint(0)}, Memo:xdr.MemoText("My 1st fee bump! Woohoo!"), Operations:[]xdr.Operation{xdr.Operation{Body: xdr.OperationBody{Type: xdr.OperationTypePayment,PaymentOp: &xdr.PaymentOp{Destination:xdr.MustMuxedAddress("GD6WNNTW664WH7FXC5RUMUTF7P5QSURC2IT36VOQEEGFZ4UWUEQGECAL"), Asset:xdr.MustNewNativeAsset(), Amount:1000000000}}}}, Ext:xdr.TransactionExt{V:0}}, Signatures:[]xdr.DecoratedSignature{xdr.DecoratedSignature{Hint:xdr.SignatureHint{0x96, 0xa1, 0x20, 0x62}, Signature:xdr.Signature{0x5e, 0x36, 0x9, 0x6c, 0x7a, 0xa4, 0x73, 0xde, 0x20, 0xf9, 0x4f, 0x2, 0xf4, 0x9c, 0x66, 0x10, 0x42, 0x1f, 0xa1, 0x34, 0x68, 0x6b, 0xe4, 0xbf, 0xce, 0x67, 0x71, 0x3b, 0x61, 0x2c, 0x78, 0xae, 0x25, 0x66, 0xe, 0x28, 0xad, 0xe9, 0xe7, 0xb8, 0x8c, 0xf8, 0x46, 0xba, 0x98, 0x43, 0xde, 0x40, 0x27, 0xb8, 0xb4, 0x52, 0xf3, 0x70, 0xab, 0x80, 0x8b, 0xac, 0x45, 0xb, 0x1, 0xee, 0xbe, 0x6}}}}}, Ext:xdr.FeeBumpTransactionExt{V:0}}, Signatures:[]xdr.DecoratedSignature{xdr.DecoratedSignature{Hint:xdr.SignatureHint{0x96, 0xa1, 0x20, 0x62}, Signature:xdr.Signature{0xb2, 0xcc, 0x82, 0x6e, 0x9c, 0xa4, 0x3a, 0x11, 0x75, 0x33, 0xd1, 0xfd, 0xa2, 0x49, 0xc0, 0x50, 0xf1, 0xd8, 0x62, 0x7, 0xf6, 0xdf, 0x2, 0x9a, 0x46, 0xa5, 0xe8, 0x3a, 0xb7, 0xbf, 0x4b, 0xc7, 0xcb, 0xd4, 0x4f, 0xe0, 0xe5, 0x25, 0xb8, 0xe, 0xbe, 0xdc, 0x53, 0x68, 0x69, 0x19, 0xdc, 0x57, 0xf3, 0x39, 0x77, 0x71, 0xca, 0x73, 0x89, 0xa4, 0xdc, 0x2c, 0xca, 0xd4, 0x1d, 0x5f, 0x9d, 0x4}}}}}`, + fmt.Sprintf("%#v", envelope), + ) +} diff --git a/xdr/hash.go b/xdr/hash.go new file mode 100644 index 0000000000..80a1800464 --- /dev/null +++ b/xdr/hash.go @@ -0,0 +1,7 @@ +package xdr + +import "encoding/hex" + +func (h Hash) HexString() string { + return hex.EncodeToString(h[:]) +} diff --git a/xdr/json.go b/xdr/json.go new file mode 100644 index 0000000000..a22fbed0b6 --- /dev/null +++ b/xdr/json.go @@ -0,0 +1,192 @@ +package xdr + +import ( + "encoding/json" + "fmt" + "regexp" + "strconv" + "time" + + "github.com/stellar/go/support/errors" +) + +// iso8601Time is a timestamp which supports parsing dates which have a year outside the 0000..9999 range +type iso8601Time struct { + time.Time +} + +// reISO8601 is the regular expression used to parse date strings in the +// ISO 8601 extended format, with or without an expanded year representation. +var reISO8601 = regexp.MustCompile(`^([-+]?\d{4,})-(\d{2})-(\d{2})`) + +// MarshalJSON serializes the timestamp to a string +func (t iso8601Time) MarshalJSON() ([]byte, error) { + ts := t.Format(time.RFC3339) + if t.Year() > 9999 { + ts = "+" + ts + } + + return json.Marshal(ts) +} + +// UnmarshalJSON parses a JSON string into a iso8601Time instance. +func (t *iso8601Time) UnmarshalJSON(b []byte) error { + var s *string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if s == nil { + return nil + } + + text := *s + m := reISO8601.FindStringSubmatch(text) + + if len(m) != 4 { + return fmt.Errorf("UnmarshalJSON: cannot parse %s", text) + } + // No need to check for errors since the regexp guarantees the matches + // are valid integers + year, _ := strconv.Atoi(m[1]) + month, _ := strconv.Atoi(m[2]) + day, _ := strconv.Atoi(m[3]) + + ts, err := time.Parse(time.RFC3339, "2006-01-02"+text[len(m[0]):]) + if err != nil { + return errors.Wrap(err, "Could not extract time") + } + + t.Time = time.Date(year, time.Month(month), day, ts.Hour(), ts.Minute(), ts.Second(), ts.Nanosecond(), ts.Location()) + return nil +} + +func newiso8601Time(epoch int64) *iso8601Time { + return &iso8601Time{time.Unix(epoch, 0).UTC()} +} + +type claimPredicateJSON struct { + And *[]claimPredicateJSON `json:"and,omitempty"` + Or *[]claimPredicateJSON `json:"or,omitempty"` + Not *claimPredicateJSON `json:"not,omitempty"` + Unconditional bool `json:"unconditional,omitempty"` + AbsBefore *iso8601Time `json:"abs_before,omitempty"` + AbsBeforeEpoch *int64 `json:"abs_before_epoch,string,omitempty"` + RelBefore *int64 `json:"rel_before,string,omitempty"` +} + +func convertPredicatesToXDR(input []claimPredicateJSON) ([]ClaimPredicate, error) { + parts := make([]ClaimPredicate, len(input)) + for i, pred := range input { + converted, err := pred.toXDR() + if err != nil { + return parts, err + } + parts[i] = converted + } + return parts, nil +} + +func (c claimPredicateJSON) toXDR() (ClaimPredicate, error) { + var result ClaimPredicate + var err error + + switch { + case c.Unconditional: + result.Type = ClaimPredicateTypeClaimPredicateUnconditional + case c.RelBefore != nil: + relBefore := Int64(*c.RelBefore) + result.Type = ClaimPredicateTypeClaimPredicateBeforeRelativeTime + result.RelBefore = &relBefore + case c.AbsBefore != nil: + absBefore := Int64(c.AbsBefore.UTC().Unix()) + result.Type = ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime + result.AbsBefore = &absBefore + case c.Not != nil: + if inner, innerErr := c.Not.toXDR(); innerErr != nil { + err = innerErr + } else { + result.Type = ClaimPredicateTypeClaimPredicateNot + result.NotPredicate = new(*ClaimPredicate) + *result.NotPredicate = &inner + } + case c.And != nil: + if inner, innerErr := convertPredicatesToXDR(*c.And); innerErr != nil { + err = innerErr + } else { + result.Type = ClaimPredicateTypeClaimPredicateAnd + result.AndPredicates = &inner + } + case c.Or != nil: + if inner, innerErr := convertPredicatesToXDR(*c.Or); innerErr != nil { + err = innerErr + } else { + result.Type = ClaimPredicateTypeClaimPredicateOr + result.OrPredicates = &inner + } + } + + return result, err +} + +func convertPredicatesToJSON(input []ClaimPredicate) ([]claimPredicateJSON, error) { + parts := make([]claimPredicateJSON, len(input)) + for i, pred := range input { + converted, err := pred.toJSON() + if err != nil { + return parts, err + } + parts[i] = converted + } + return parts, nil +} + +func (c ClaimPredicate) toJSON() (claimPredicateJSON, error) { + var payload claimPredicateJSON + var err error + + switch c.Type { + case ClaimPredicateTypeClaimPredicateAnd: + payload.And = new([]claimPredicateJSON) + *payload.And, err = convertPredicatesToJSON(c.MustAndPredicates()) + case ClaimPredicateTypeClaimPredicateOr: + payload.Or = new([]claimPredicateJSON) + *payload.Or, err = convertPredicatesToJSON(c.MustOrPredicates()) + case ClaimPredicateTypeClaimPredicateUnconditional: + payload.Unconditional = true + case ClaimPredicateTypeClaimPredicateNot: + payload.Not = new(claimPredicateJSON) + *payload.Not, err = c.MustNotPredicate().toJSON() + case ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + absBeforeEpoch := int64(c.MustAbsBefore()) + payload.AbsBefore = newiso8601Time(absBeforeEpoch) + payload.AbsBeforeEpoch = &absBeforeEpoch + case ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + relBefore := int64(c.MustRelBefore()) + payload.RelBefore = &relBefore + default: + err = fmt.Errorf("invalid predicate type: " + c.Type.String()) + } + return payload, err +} + +func (c ClaimPredicate) MarshalJSON() ([]byte, error) { + payload, err := c.toJSON() + if err != nil { + return nil, err + } + return json.Marshal(payload) +} + +func (c *ClaimPredicate) UnmarshalJSON(b []byte) error { + var payload claimPredicateJSON + if err := json.Unmarshal(b, &payload); err != nil { + return err + } + + parsed, err := payload.toXDR() + if err != nil { + return err + } + *c = parsed + return nil +} diff --git a/xdr/json_test.go b/xdr/json_test.go new file mode 100644 index 0000000000..1e72912ee7 --- /dev/null +++ b/xdr/json_test.go @@ -0,0 +1,197 @@ +package xdr + +import ( + "bytes" + "encoding/json" + "math" + "testing" + + "github.com/stellar/go/gxdr" + "github.com/stellar/go/randxdr" + + "github.com/stretchr/testify/assert" +) + +func TestClaimPredicateJSON(t *testing.T) { + unconditional := &ClaimPredicate{ + Type: ClaimPredicateTypeClaimPredicateUnconditional, + } + relBefore := Int64(12) + absBefore := Int64(1598440539) + + source := ClaimPredicate{ + Type: ClaimPredicateTypeClaimPredicateAnd, + AndPredicates: &[]ClaimPredicate{ + { + Type: ClaimPredicateTypeClaimPredicateOr, + OrPredicates: &[]ClaimPredicate{ + { + Type: ClaimPredicateTypeClaimPredicateBeforeRelativeTime, + RelBefore: &relBefore, + }, + { + Type: ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime, + AbsBefore: &absBefore, + }, + }, + }, + { + Type: ClaimPredicateTypeClaimPredicateNot, + NotPredicate: &unconditional, + }, + }, + } + + serialized, err := json.Marshal(source) + assert.NoError(t, err) + assert.JSONEq( + t, + `{"and":[{"or":[{"rel_before":"12"},{"abs_before":"2020-08-26T11:15:39Z","abs_before_epoch":"1598440539"}]},{"not":{"unconditional":true}}]}`, + string(serialized), + ) + + var parsed ClaimPredicate + assert.NoError(t, json.Unmarshal(serialized, &parsed)) + + var serializedBase64, parsedBase64 string + serializedBase64, err = MarshalBase64(source) + assert.NoError(t, err) + + parsedBase64, err = MarshalBase64(parsed) + assert.NoError(t, err) + + assert.Equal(t, serializedBase64, parsedBase64) +} + +func TestRandClaimPredicateJSON(t *testing.T) { + gen := randxdr.NewGenerator() + for i := 0; i < 10000; i++ { + cp := &ClaimPredicate{} + shape := &gxdr.ClaimPredicate{} + gen.Next( + shape, + []randxdr.Preset{ + {randxdr.IsPtr, randxdr.SetPtr(true)}, + }, + ) + assert.NoError(t, gxdr.Convert(shape, cp)) + + serializedJSON, err := json.Marshal(cp) + assert.NoError(t, err) + + serializedBytes, err := cp.MarshalBinary() + assert.NoError(t, err) + + var parsed ClaimPredicate + assert.NoError(t, json.Unmarshal(serializedJSON, &parsed)) + parsedBin, err := parsed.MarshalBinary() + assert.NoError(t, err) + + assert.True(t, bytes.Equal(serializedBytes, parsedBin)) + } +} + +func TestAbsBeforeTimestamps(t *testing.T) { + const year = 365 * 24 * 60 * 60 + for _, testCase := range []struct { + unix int64 + expected string + }{ + { + 0, + `{"abs_before":"1970-01-01T00:00:00Z","abs_before_epoch":"0"}`, + }, + { + 900 * year, + `{"abs_before":"2869-05-27T00:00:00Z","abs_before_epoch":"28382400000"}`, + }, + { + math.MaxInt64, + `{"abs_before":"+292277026596-12-04T15:30:07Z","abs_before_epoch":"9223372036854775807"}`, + }, + { + -10, + `{"abs_before":"1969-12-31T23:59:50Z","abs_before_epoch":"-10"}`, + }, + { + -9000 * year, + `{"abs_before":"-7025-12-23T00:00:00Z","abs_before_epoch":"-283824000000"}`, + }, + { + math.MinInt64, + // this serialization doesn't make sense but at least it doesn't crash the marshaller + `{"abs_before":"+292277026596-12-04T15:30:08Z","abs_before_epoch":"-9223372036854775808"}`, + }, + } { + xdrSec := Int64(testCase.unix) + source := ClaimPredicate{ + Type: ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime, + AbsBefore: &xdrSec, + } + + serialized, err := json.Marshal(source) + assert.NoError(t, err) + assert.JSONEq(t, testCase.expected, string(serialized)) + + var parsed ClaimPredicate + assert.NoError(t, json.Unmarshal(serialized, &parsed)) + assert.Equal(t, *parsed.AbsBefore, *source.AbsBefore) + } +} + +func TestISO8601Time_UnmarshalJSON(t *testing.T) { + for _, testCase := range []struct { + name string + timestamp string + expectedParsed iso8601Time + expectedError string + }{ + { + "null timestamp", + "null", + iso8601Time{}, + "", + }, + { + "empty string", + "", + iso8601Time{}, + "unexpected end of JSON input", + }, + { + "not string", + "1", + iso8601Time{}, + "json: cannot unmarshal number into Go value of type string", + }, + { + "does not begin with double quotes", + "'1\"", + iso8601Time{}, + "invalid character '\\'' looking for beginning of value", + }, + { + "does not end with double quotes", + "\"1", + iso8601Time{}, + "unexpected end of JSON input", + }, + { + "could not extract time", + "\"2006-01-02aldfd\"", + iso8601Time{}, + "Could not extract time: parsing time \"2006-01-02aldfd\" as \"2006-01-02T15:04:05Z07:00\": cannot parse \"aldfd\" as \"T\"", + }, + } { + t.Run(testCase.name, func(t *testing.T) { + ts := &iso8601Time{} + err := ts.UnmarshalJSON([]byte(testCase.timestamp)) + if len(testCase.expectedError) == 0 { + assert.NoError(t, err) + assert.Equal(t, *ts, testCase.expectedParsed) + } else { + assert.EqualError(t, err, testCase.expectedError) + } + }) + } +} diff --git a/xdr/ledger_close_meta.go b/xdr/ledger_close_meta.go new file mode 100644 index 0000000000..4d3248f394 --- /dev/null +++ b/xdr/ledger_close_meta.go @@ -0,0 +1,21 @@ +package xdr + +func (l LedgerCloseMeta) LedgerSequence() uint32 { + return uint32(l.MustV0().LedgerHeader.Header.LedgerSeq) +} + +func (l LedgerCloseMeta) LedgerHash() Hash { + return l.MustV0().LedgerHeader.Hash +} + +func (l LedgerCloseMeta) PreviousLedgerHash() Hash { + return l.MustV0().LedgerHeader.Header.PreviousLedgerHash +} + +func (l LedgerCloseMeta) ProtocolVersion() uint32 { + return uint32(l.MustV0().LedgerHeader.Header.LedgerVersion) +} + +func (l LedgerCloseMeta) BucketListHash() Hash { + return l.MustV0().LedgerHeader.Header.BucketListHash +} diff --git a/xdr/ledger_close_meta_test.go b/xdr/ledger_close_meta_test.go new file mode 100644 index 0000000000..ac978bfae8 --- /dev/null +++ b/xdr/ledger_close_meta_test.go @@ -0,0 +1,17 @@ +package xdr + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestLedgerSequence(t *testing.T) { + l := LedgerCloseMeta{ + V0: &LedgerCloseMetaV0{ + LedgerHeader: LedgerHeaderHistoryEntry{ + Header: LedgerHeader{LedgerSeq: 23}, + }, + }, + } + assert.Equal(t, uint32(23), l.LedgerSequence()) +} diff --git a/xdr/ledger_entry.go b/xdr/ledger_entry.go index cc3f6d9fc0..bcf65c7189 100644 --- a/xdr/ledger_entry.go +++ b/xdr/ledger_entry.go @@ -30,6 +30,16 @@ func (entry *LedgerEntry) LedgerKey() LedgerKey { AccountId: tline.AccountId, Asset: tline.Asset, } + case LedgerEntryTypeClaimableBalance: + cBalance := entry.Data.MustClaimableBalance() + body = LedgerKeyClaimableBalance{ + BalanceId: cBalance.BalanceId, + } + case LedgerEntryTypeLiquidityPool: + lPool := entry.Data.MustLiquidityPool() + body = LedgerKeyLiquidityPool{ + LiquidityPoolId: lPool.LiquidityPoolId, + } default: panic(fmt.Errorf("Unknown entry type: %v", entry.Data.Type)) } @@ -41,3 +51,102 @@ func (entry *LedgerEntry) LedgerKey() LedgerKey { return ret } + +// SponsoringID return SponsorshipDescriptor for a given ledger entry +func (entry *LedgerEntry) SponsoringID() SponsorshipDescriptor { + var sponsor SponsorshipDescriptor + if entry.Ext.V == 1 && entry.Ext.V1 != nil { + sponsor = entry.Ext.V1.SponsoringId + } + return sponsor +} + +// Normalize overwrites LedgerEntry with all the extensions set to default values +// (if extension is not present). +// This is helpful to compare two ledger entries that are the same but for one of +// them extensions are not set. +// Returns the same entry. +func (entry *LedgerEntry) Normalize() *LedgerEntry { + // If ledgerEntry is V0, create ext=1 and add a nil sponsor + if entry.Ext.V == 0 { + entry.Ext = LedgerEntryExt{ + V: 1, + V1: &LedgerEntryExtensionV1{ + SponsoringId: nil, + }, + } + } + + switch entry.Data.Type { + case LedgerEntryTypeAccount: + accountEntry := entry.Data.Account + // Account can have ext=0. For those, create ext=1 with 0 liabilities. + if accountEntry.Ext.V == 0 { + accountEntry.Ext.V = 1 + accountEntry.Ext.V1 = &AccountEntryExtensionV1{ + Liabilities: Liabilities{ + Buying: 0, + Selling: 0, + }, + } + } + // if AccountEntryExtensionV1Ext is v=0, then create v2 with 0 values + if accountEntry.Ext.V1.Ext.V == 0 { + accountEntry.Ext.V1.Ext.V = 2 + accountEntry.Ext.V1.Ext.V2 = &AccountEntryExtensionV2{ + NumSponsored: Uint32(0), + NumSponsoring: Uint32(0), + SignerSponsoringIDs: make([]SponsorshipDescriptor, len(accountEntry.Signers)), + } + } + + signerSponsoringIDs := accountEntry.Ext.V1.Ext.V2.SignerSponsoringIDs + + // Map sponsors (signer => SponsorshipDescriptor) + sponsorsMap := make(map[string]SponsorshipDescriptor) + for i, signer := range accountEntry.Signers { + sponsorsMap[signer.Key.Address()] = signerSponsoringIDs[i] + } + + // Sort signers + accountEntry.Signers = SortSignersByKey(accountEntry.Signers) + + // Recreate sponsors for sorted signers + signerSponsoringIDs = make([]SponsorshipDescriptor, len(accountEntry.Signers)) + for i, signer := range accountEntry.Signers { + signerSponsoringIDs[i] = sponsorsMap[signer.Key.Address()] + } + + accountEntry.Ext.V1.Ext.V2.SignerSponsoringIDs = signerSponsoringIDs + case LedgerEntryTypeTrustline: + // Trust line can have ext=0. For those, create ext=1 + // with 0 liabilities. + trustLineEntry := entry.Data.TrustLine + if trustLineEntry.Ext.V == 0 { + trustLineEntry.Ext.V = 1 + trustLineEntry.Ext.V1 = &TrustLineEntryV1{ + Liabilities: Liabilities{ + Buying: 0, + Selling: 0, + }, + } + } else if trustLineEntry.Ext.V == 1 { + // horizon doesn't make use of TrustLineEntryExtensionV2.liquidityPoolUseCount + // so clear out those fields to make state verifier pass + trustLineEntry.Ext.V1.Ext.V = 0 + trustLineEntry.Ext.V1.Ext.V2 = nil + } + case LedgerEntryTypeClaimableBalance: + claimableBalanceEntry := entry.Data.ClaimableBalance + claimableBalanceEntry.Claimants = SortClaimantsByDestination(claimableBalanceEntry.Claimants) + + if claimableBalanceEntry.Ext.V == 0 { + claimableBalanceEntry.Ext.V = 1 + claimableBalanceEntry.Ext.V1 = &ClaimableBalanceEntryExtensionV1{ + Flags: 0, + } + } + } + + return entry +} diff --git a/xdr/ledger_entry_change.go b/xdr/ledger_entry_change.go index 551d4543dc..6330385871 100644 --- a/xdr/ledger_entry_change.go +++ b/xdr/ledger_entry_change.go @@ -1,6 +1,9 @@ package xdr -import "fmt" +import ( + "encoding/base64" + "fmt" +) // EntryType is a helper to get at the entry type for a change. func (change *LedgerEntryChange) EntryType() LedgerEntryType { @@ -26,3 +29,31 @@ func (change *LedgerEntryChange) LedgerKey() LedgerKey { panic(fmt.Errorf("Unknown change type: %v", change.Type)) } } + +// MarshalBinaryBase64 marshals XDR into a binary form and then encodes it +// using base64. +func (change LedgerEntryChange) MarshalBinaryBase64() (string, error) { + b, err := change.MarshalBinary() + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(b), nil +} + +// GetLedgerEntry returns the ledger entry that was changed in `change`, along +// with a boolean indicating whether the entry value was valid. +func (change *LedgerEntryChange) GetLedgerEntry() (LedgerEntry, bool) { + switch change.Type { + case LedgerEntryChangeTypeLedgerEntryCreated: + return change.GetCreated() + case LedgerEntryChangeTypeLedgerEntryState: + return change.GetState() + case LedgerEntryChangeTypeLedgerEntryUpdated: + return change.GetUpdated() + case LedgerEntryChangeTypeLedgerEntryRemoved: + return LedgerEntry{}, false + default: + return LedgerEntry{}, false + } +} diff --git a/xdr/ledger_entry_test.go b/xdr/ledger_entry_test.go new file mode 100644 index 0000000000..765fedef0f --- /dev/null +++ b/xdr/ledger_entry_test.go @@ -0,0 +1,91 @@ +package xdr + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLedgerEntrySponsorship(t *testing.T) { + entry := LedgerEntry{} + desc := entry.SponsoringID() + assert.Nil(t, desc) + + sponsor := MustAddress("GCO26ZSBD63TKYX45H2C7D2WOFWOUSG5BMTNC3BG4QMXM3PAYI6WHKVZ") + desc = SponsorshipDescriptor(&sponsor) + + entry = LedgerEntry{ + Ext: LedgerEntryExt{ + V: 1, + V1: &LedgerEntryExtensionV1{ + SponsoringId: desc, + }, + }, + } + actualDesc := entry.SponsoringID() + assert.Equal(t, desc, actualDesc) +} + +func TestNormalizedClaimableBalance(t *testing.T) { + input := LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: LedgerEntryData{ + Type: LedgerEntryTypeClaimableBalance, + ClaimableBalance: &ClaimableBalanceEntry{ + Claimants: []Claimant{ + { + Type: ClaimantTypeClaimantTypeV0, + V0: &ClaimantV0{ + Destination: MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + { + Type: ClaimantTypeClaimantTypeV0, + V0: &ClaimantV0{ + Destination: MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + }, + }, + }, + Ext: ClaimableBalanceEntryExt{ + V: 0, + }, + }, + }, + } + + expectedOutput := LedgerEntry{ + LastModifiedLedgerSeq: 20, + Data: LedgerEntryData{ + Type: LedgerEntryTypeClaimableBalance, + ClaimableBalance: &ClaimableBalanceEntry{ + Claimants: []Claimant{ + { + Type: ClaimantTypeClaimantTypeV0, + V0: &ClaimantV0{ + Destination: MustAddress("GAHK7EEG2WWHVKDNT4CEQFZGKF2LGDSW2IVM4S5DP42RBW3K6BTODB4A"), + }, + }, + { + Type: ClaimantTypeClaimantTypeV0, + V0: &ClaimantV0{ + Destination: MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML"), + }, + }, + }, + Ext: ClaimableBalanceEntryExt{ + V: 1, + V1: &ClaimableBalanceEntryExtensionV1{ + Flags: 0, + }, + }, + }, + }, + Ext: LedgerEntryExt{ + V: 1, + V1: &LedgerEntryExtensionV1{}, + }, + } + + input.Normalize() + assert.Equal(t, expectedOutput, input) +} diff --git a/xdr/ledger_key.go b/xdr/ledger_key.go index 734100ece2..125c4c20b9 100644 --- a/xdr/ledger_key.go +++ b/xdr/ledger_key.go @@ -1,6 +1,9 @@ package xdr -import "fmt" +import ( + "encoding/base64" + "fmt" +) // LedgerKey implements the `Keyer` interface func (key *LedgerKey) LedgerKey() LedgerKey { @@ -30,6 +33,10 @@ func (key *LedgerKey) Equals(other LedgerKey) bool { l := key.MustTrustLine() r := other.MustTrustLine() return l.AccountId.Equals(r.AccountId) && l.Asset.Equals(r.Asset) + case LedgerEntryTypeLiquidityPool: + l := key.MustLiquidityPool() + r := other.MustLiquidityPool() + return l.LiquidityPoolId == r.LiquidityPoolId default: panic(fmt.Errorf("Unknown ledger key type: %v", key.Type)) } @@ -63,7 +70,7 @@ func (key *LedgerKey) SetData(account AccountId, name string) error { // SetOffer mutates `key` such that it represents the identity of the // data entry owned by `account` and for offer `id`. func (key *LedgerKey) SetOffer(account AccountId, id uint64) error { - data := LedgerKeyOffer{account, Uint64(id)} + data := LedgerKeyOffer{account, Int64(id)} nkey, err := NewLedgerKey(LedgerEntryTypeOffer, data) if err != nil { return err @@ -75,7 +82,7 @@ func (key *LedgerKey) SetOffer(account AccountId, id uint64) error { // SetTrustline mutates `key` such that it represents the identity of the // trustline owned by `account` and for `asset`. -func (key *LedgerKey) SetTrustline(account AccountId, line Asset) error { +func (key *LedgerKey) SetTrustline(account AccountId, line TrustLineAsset) error { data := LedgerKeyTrustLine{account, line} nkey, err := NewLedgerKey(LedgerEntryTypeTrustline, data) if err != nil { @@ -85,3 +92,75 @@ func (key *LedgerKey) SetTrustline(account AccountId, line Asset) error { *key = nkey return nil } + +// SetClaimableBalance mutates `key` such that it represents the identity of a +// claimable balance. +func (key *LedgerKey) SetClaimableBalance(balanceID ClaimableBalanceId) error { + data := LedgerKeyClaimableBalance{balanceID} + nkey, err := NewLedgerKey(LedgerEntryTypeClaimableBalance, data) + if err != nil { + return err + } + + *key = nkey + return nil +} + +// SetLiquidityPool mutates `key` such that it represents the identity of a +// liquidity pool. +func (key *LedgerKey) SetLiquidityPool(poolID PoolId) error { + data := LedgerKeyLiquidityPool{poolID} + nkey, err := NewLedgerKey(LedgerEntryTypeLiquidityPool, data) + if err != nil { + return err + } + + *key = nkey + return nil +} + +func (e *EncodingBuffer) ledgerKeyCompressEncodeTo(key LedgerKey) error { + if err := e.xdrEncoderBuf.WriteByte(byte(key.Type)); err != nil { + return err + } + + switch key.Type { + case LedgerEntryTypeAccount: + return e.accountIdCompressEncodeTo(key.Account.AccountId) + case LedgerEntryTypeTrustline: + if err := e.accountIdCompressEncodeTo(key.TrustLine.AccountId); err != nil { + return err + } + return e.assetTrustlineCompressEncodeTo(key.TrustLine.Asset) + case LedgerEntryTypeOffer: + // We intentionally don't encode the SellerID since the OfferID is enough + // (it's unique to the network) + return key.Offer.OfferId.EncodeTo(e.encoder) + case LedgerEntryTypeData: + if err := e.accountIdCompressEncodeTo(key.Data.AccountId); err != nil { + return err + } + dataName := trimRightZeros([]byte(key.Data.DataName)) + _, err := e.xdrEncoderBuf.Write(dataName) + return err + case LedgerEntryTypeClaimableBalance: + return e.claimableBalanceCompressEncodeTo(key.ClaimableBalance.BalanceId) + case LedgerEntryTypeLiquidityPool: + _, err := e.xdrEncoderBuf.Write(key.LiquidityPool.LiquidityPoolId[:]) + return err + default: + panic("Unknown type") + } + +} + +// MarshalBinaryBase64 marshals XDR into a binary form and then encodes it +// using base64. +func (key LedgerKey) MarshalBinaryBase64() (string, error) { + b, err := key.MarshalBinary() + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(b), nil +} diff --git a/xdr/ledger_key_test.go b/xdr/ledger_key_test.go new file mode 100644 index 0000000000..33f52f90b4 --- /dev/null +++ b/xdr/ledger_key_test.go @@ -0,0 +1,40 @@ +package xdr + +import ( + "encoding/base64" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLedgerKeyTrustLineBinaryMaxLength(t *testing.T) { + key := &LedgerKey{} + err := key.SetTrustline( + MustAddress("GBFLTCDLOE6YQ74B66RH3S2UW5I2MKZ5VLTM75F4YMIWUIXRIFVNRNIF"), + MustNewCreditAsset("123456789012", "GBFLTCDLOE6YQ74B66RH3S2UW5I2MKZ5VLTM75F4YMIWUIXRIFVNRNIF").ToTrustLineAsset(), + ) + assert.NoError(t, err) + + compressed, err := key.MarshalBinary() + assert.NoError(t, err) + assert.Equal(t, len(compressed), 92) + bcompressed := base64.StdEncoding.EncodeToString(compressed) + assert.Equal(t, len(bcompressed), 124) +} + +func TestTrimRightZeros(t *testing.T) { + require.Equal(t, []byte(nil), trimRightZeros(nil)) + require.Equal(t, []byte{}, trimRightZeros([]byte{})) + require.Equal(t, []byte{}, trimRightZeros([]byte{0x0})) + require.Equal(t, []byte{}, trimRightZeros([]byte{0x0, 0x0})) + require.Equal(t, []byte{0x1}, trimRightZeros([]byte{0x1})) + require.Equal(t, []byte{0x1}, trimRightZeros([]byte{0x1, 0x0})) + require.Equal(t, []byte{0x1}, trimRightZeros([]byte{0x1, 0x0, 0x0})) + require.Equal(t, []byte{0x1}, trimRightZeros([]byte{0x1, 0x0, 0x0, 0x0})) + require.Equal(t, []byte{0x1, 0x2}, trimRightZeros([]byte{0x1, 0x2})) + require.Equal(t, []byte{0x1, 0x2}, trimRightZeros([]byte{0x1, 0x2, 0x0})) + require.Equal(t, []byte{0x1, 0x2}, trimRightZeros([]byte{0x1, 0x2, 0x0, 0x0})) + require.Equal(t, []byte{0x0, 0x2}, trimRightZeros([]byte{0x0, 0x2, 0x0, 0x0})) + require.Equal(t, []byte{0x0, 0x2, 0x0, 0x1}, trimRightZeros([]byte{0x0, 0x2, 0x0, 0x1, 0x0})) +} diff --git a/xdr/liquidity_pool.go b/xdr/liquidity_pool.go new file mode 100644 index 0000000000..841700ecc8 --- /dev/null +++ b/xdr/liquidity_pool.go @@ -0,0 +1,6 @@ +package xdr + +// LiquidityPoolTypeToString maps an xdr.LiquidityPoolType to its string representation +var LiquidityPoolTypeToString = map[LiquidityPoolType]string{ + LiquidityPoolTypeLiquidityPoolConstantProduct: "constant_product", +} diff --git a/xdr/main.go b/xdr/main.go index c236854110..77618af842 100644 --- a/xdr/main.go +++ b/xdr/main.go @@ -5,9 +5,14 @@ package xdr import ( "bytes" "encoding/base64" + "encoding/binary" + "encoding/hex" "fmt" "io" "strings" + + xdr "github.com/stellar/go-xdr/xdr3" + "github.com/stellar/go/support/errors" ) // Keyer represents a type that can be converted into a LedgerKey @@ -18,17 +23,13 @@ type Keyer interface { var _ = LedgerEntry{} var _ = LedgerKey{} -// SafeUnmarshalBase64 first decodes the provided reader from base64 before -// decoding the xdr into the provided destination. Also ensures that the reader -// is fully consumed. -func SafeUnmarshalBase64(data string, dest interface{}) error { +var OperationTypeToStringMap = operationTypeMap + +func safeUnmarshalString(decoder func(reader io.Reader) io.Reader, data string, dest interface{}) error { count := &countWriter{} l := len(data) - b64 := io.TeeReader(strings.NewReader(data), count) - raw := base64.NewDecoder(base64.StdEncoding, b64) - _, err := Unmarshal(raw, dest) - + _, err := Unmarshal(decoder(io.TeeReader(strings.NewReader(data), count)), dest) if err != nil { return err } @@ -40,6 +41,26 @@ func SafeUnmarshalBase64(data string, dest interface{}) error { return nil } +// SafeUnmarshalBase64 first decodes the provided reader from base64 before +// decoding the xdr into the provided destination. Also ensures that the reader +// is fully consumed. +func SafeUnmarshalBase64(data string, dest interface{}) error { + return safeUnmarshalString( + func(r io.Reader) io.Reader { + return base64.NewDecoder(base64.StdEncoding, r) + }, + data, + dest, + ) +} + +// SafeUnmarshalHex first decodes the provided reader from hex before +// decoding the xdr into the provided destination. Also ensures that the reader +// is fully consumed. +func SafeUnmarshalHex(data string, dest interface{}) error { + return safeUnmarshalString(hex.NewDecoder, data, dest) +} + // SafeUnmarshal decodes the provided reader into the destination and verifies // that provided bytes are all consumed by the unmarshalling process. func SafeUnmarshal(data []byte, dest interface{}) error { @@ -57,7 +78,33 @@ func SafeUnmarshal(data []byte, dest interface{}) error { return nil } -func MarshalBase64(v interface{}) (string, error) { +type DecoderFrom interface { + decoderFrom +} + +// BytesDecoder efficiently manages a byte reader and an +// xdr decoder so that they don't need to be allocated in +// every decoding call. +type BytesDecoder struct { + decoder *xdr.Decoder + reader *bytes.Reader +} + +func NewBytesDecoder() *BytesDecoder { + reader := bytes.NewReader(nil) + decoder := xdr.NewDecoder(reader) + return &BytesDecoder{ + decoder: decoder, + reader: reader, + } +} + +func (d *BytesDecoder) DecodeBytes(v DecoderFrom, b []byte) (int, error) { + d.reader.Reset(b) + return v.DecodeFrom(d.decoder) +} + +func marshalString(encoder func([]byte) string, v interface{}) (string, error) { var raw bytes.Buffer _, err := Marshal(&raw, v) @@ -66,7 +113,166 @@ func MarshalBase64(v interface{}) (string, error) { return "", err } - return base64.StdEncoding.EncodeToString(raw.Bytes()), nil + return encoder(raw.Bytes()), nil +} + +func MarshalBase64(v interface{}) (string, error) { + return marshalString(base64.StdEncoding.EncodeToString, v) +} + +func MarshalHex(v interface{}) (string, error) { + return marshalString(hex.EncodeToString, v) +} + +// EncodingBuffer reuses internal buffers between invocations to minimize allocations. +// For that reason, it is not thread-safe. +// It intentionally only allows EncodeTo method arguments, to guarantee high performance encoding. +type EncodingBuffer struct { + encoder *xdr.Encoder + xdrEncoderBuf bytes.Buffer + scratchBuf []byte +} + +func growSlice(old []byte, newSize int) []byte { + oldCap := cap(old) + if newSize <= oldCap { + return old[:newSize] + } + // the array doesn't fit, lets return a new one with double the capacity + // to avoid further resizing + return make([]byte, newSize, 2*newSize) +} + +type EncoderTo interface { + EncodeTo(e *xdr.Encoder) error +} + +func NewEncodingBuffer() *EncodingBuffer { + var ret EncodingBuffer + ret.encoder = xdr.NewEncoder(&ret.xdrEncoderBuf) + return &ret +} + +// UnsafeMarshalBinary marshals the input XDR binary, returning +// a slice pointing to the internal buffer. Handled with care this improveds +// performance since copying is not required. +// Subsequent calls to marshalling methods will overwrite the returned buffer. +func (e *EncodingBuffer) UnsafeMarshalBinary(encodable EncoderTo) ([]byte, error) { + e.xdrEncoderBuf.Reset() + if err := encodable.EncodeTo(e.encoder); err != nil { + return nil, err + } + return e.xdrEncoderBuf.Bytes(), nil +} + +// UnsafeMarshalBase64 is the base64 version of UnsafeMarshalBinary +func (e *EncodingBuffer) UnsafeMarshalBase64(encodable EncoderTo) ([]byte, error) { + xdrEncoded, err := e.UnsafeMarshalBinary(encodable) + if err != nil { + return nil, err + } + neededLen := base64.StdEncoding.EncodedLen(len(xdrEncoded)) + e.scratchBuf = growSlice(e.scratchBuf, neededLen) + base64.StdEncoding.Encode(e.scratchBuf, xdrEncoded) + return e.scratchBuf, nil +} + +// UnsafeMarshalHex is the hex version of UnsafeMarshalBinary +func (e *EncodingBuffer) UnsafeMarshalHex(encodable EncoderTo) ([]byte, error) { + xdrEncoded, err := e.UnsafeMarshalBinary(encodable) + if err != nil { + return nil, err + } + neededLen := hex.EncodedLen(len(xdrEncoded)) + e.scratchBuf = growSlice(e.scratchBuf, neededLen) + hex.Encode(e.scratchBuf, xdrEncoded) + return e.scratchBuf, nil +} + +func (e *EncodingBuffer) MarshalBinary(encodable EncoderTo) ([]byte, error) { + xdrEncoded, err := e.UnsafeMarshalBinary(encodable) + if err != nil { + return nil, err + } + ret := make([]byte, len(xdrEncoded)) + copy(ret, xdrEncoded) + return ret, nil +} + +// LedgerKeyUnsafeMarshalBinaryCompress marshals LedgerKey to []byte but unlike +// MarshalBinary() it removes all unnecessary bytes, exploting the fact +// that XDR is padding data to 4 bytes in union discriminants etc. +// It's primary use is in ingest/io.StateReader that keep LedgerKeys in +// memory so this function decrease memory requirements. +// +// Warning, do not use UnmarshalBinary() on data encoded using this method! +// +// Optimizations: +// - Writes a single byte for union discriminants vs 4 bytes. +// - Removes type and code padding for Asset. +// - Removes padding for AccountIds +func (e *EncodingBuffer) LedgerKeyUnsafeMarshalBinaryCompress(key LedgerKey) ([]byte, error) { + e.xdrEncoderBuf.Reset() + err := e.ledgerKeyCompressEncodeTo(key) + if err != nil { + return nil, err + } + return e.xdrEncoderBuf.Bytes(), nil +} + +func (e *EncodingBuffer) MarshalBase64(encodable EncoderTo) (string, error) { + b, err := e.UnsafeMarshalBase64(encodable) + if err != nil { + return "", err + } + return string(b), nil +} + +func (e *EncodingBuffer) MarshalHex(encodable EncoderTo) (string, error) { + b, err := e.UnsafeMarshalHex(encodable) + if err != nil { + return "", err + } + return string(b), nil +} + +func MarshalFramed(w io.Writer, v interface{}) error { + var tmp bytes.Buffer + n, err := Marshal(&tmp, v) + if err != nil { + return err + } + un := uint32(n) + if un > 0x7fffffff { + return fmt.Errorf("Overlong write: %d bytes", n) + } + + un = un | 0x80000000 + err = binary.Write(w, binary.BigEndian, &un) + if err != nil { + return errors.Wrap(err, "error in binary.Write") + } + k, err := tmp.WriteTo(w) + if int64(n) != k { + return fmt.Errorf("Mismatched write length: %d vs. %d", n, k) + } + return err +} + +// ReadFrameLength returns a length of a framed XDR object. +func ReadFrameLength(d *xdr.Decoder) (uint32, error) { + frameLen, n, e := d.DecodeUint() + if e != nil { + return 0, errors.Wrap(e, "unmarshalling XDR frame header") + } + if n != 4 { + return 0, errors.New("bad length of XDR frame header") + } + if (frameLen & 0x80000000) != 0x80000000 { + return 0, errors.New("malformed XDR frame header") + } + frameLen &= 0x7fffffff + return frameLen, nil } type countWriter struct { diff --git a/xdr/main_test.go b/xdr/main_test.go index dd1ebb48cb..ab7c21195e 100644 --- a/xdr/main_test.go +++ b/xdr/main_test.go @@ -1,12 +1,51 @@ -package xdr_test +package xdr import ( - . "github.com/stellar/go/xdr" + "encoding/base64" + "fmt" + "log" + "strings" + "testing" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" ) +// ExampleUnmarshal shows the lowest-level process to decode a base64 +// envelope encoded in base64. +func ExampleUnmarshal() { + data := "AAAAAgAAAABi/B0L0JGythwN1lY0aypo19NHxvLCyO5tBEcCVvwF9wAAAAoAAAAAAAAAAQAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAK6jei3jmoI8TGlD/egc37PXtHKKzWV8wViZBaCu5L5MAAAAADuaygAAAAAAAAAAAVb8BfcAAABACmeyD4/+Oj7llOmTrcjKLHLTQJF0TV/VggCOUZ30ZPgMsQy6A2T//Zdzb7MULVo/Y7kDrqAZRS51rvIp7YMUAA==" + + rawr := strings.NewReader(data) + b64r := base64.NewDecoder(base64.StdEncoding, rawr) + + var tx TransactionEnvelope + bytesRead, err := Unmarshal(b64r, &tx) + + fmt.Printf("read %d bytes\n", bytesRead) + + if err != nil { + log.Fatal(err) + } + + operations := tx.Operations() + fmt.Printf("This tx has %d operations\n", len(operations)) + // Output: read 196 bytes + // This tx has 1 operations +} + +func TestSafeUnmarshalHex(t *testing.T) { + accountID := MustAddress("GC3C4AKRBQLHOJ45U4XG35ESVWRDECWO5XLDGYADO6DPR3L7KIDVUMML") + hex, err := MarshalHex(accountID) + assert.NoError(t, err) + assert.Equal(t, "00000000b62e01510c1677279da72e6df492ada2320aceedd63360037786f8ed7f52075a", hex) + var parsed AccountId + err = SafeUnmarshalHex(hex, &parsed) + assert.NoError(t, err) + assert.True(t, accountID.Equals(parsed)) +} + var _ = Describe("xdr.SafeUnmarshal", func() { var ( result int32 @@ -79,3 +118,76 @@ var _ = Describe("xdr.SafeUnmarshalBase64", func() { }) }) }) + +func TestLedgerKeyBinaryCompress(t *testing.T) { + e := NewEncodingBuffer() + for _, tc := range []struct { + key LedgerKey + expectedOut []byte + }{ + { + key: LedgerKey{Type: LedgerEntryTypeAccount, + Account: &LedgerKeyAccount{ + AccountId: MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + }, + }, + expectedOut: []byte{0x0, 0x0, 0x1d, 0x4, 0x9a, 0x80, 0xf, 0xda, 0x8f, 0xab, 0xe8, 0xf6, 0x9d, 0x10, 0xdd, 0x8d, 0xda, 0x79, 0x29, 0x5a, 0x14, 0x87, 0xca, 0xe2, 0x3e, 0x43, 0x4e, 0xf5, 0xab, 0x68, 0xec, 0x13, 0x6c, 0xf3}, + }, + { + key: LedgerKey{ + Type: LedgerEntryTypeTrustline, + TrustLine: &LedgerKeyTrustLine{ + AccountId: MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + Asset: MustNewCreditAsset("EUR", "GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB").ToTrustLineAsset(), + }, + }, + expectedOut: []byte{0x1, 0x0, 0x1d, 0x4, 0x9a, 0x80, 0xf, 0xda, 0x8f, 0xab, 0xe8, 0xf6, 0x9d, 0x10, 0xdd, 0x8d, 0xda, 0x79, 0x29, 0x5a, 0x14, 0x87, 0xca, 0xe2, 0x3e, 0x43, 0x4e, 0xf5, 0xab, 0x68, 0xec, 0x13, 0x6c, 0xf3, 0x1, 0x45, 0x55, 0x52, 0x0, 0x1d, 0x4, 0x9a, 0x80, 0xf, 0xda, 0x8f, 0xab, 0xe8, 0xf6, 0x9d, 0x10, 0xdd, 0x8d, 0xda, 0x79, 0x29, 0x5a, 0x14, 0x87, 0xca, 0xe2, 0x3e, 0x43, 0x4e, 0xf5, 0xab, 0x68, 0xec, 0x13, 0x6c, 0xf3}, + }, + { + key: LedgerKey{ + Type: LedgerEntryTypeOffer, + Offer: &LedgerKeyOffer{ + SellerId: MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + OfferId: Int64(3), + }, + }, + expectedOut: []byte{0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + { + key: LedgerKey{ + Type: LedgerEntryTypeData, + Data: &LedgerKeyData{ + AccountId: MustAddress("GAOQJGUAB7NI7K7I62ORBXMN3J4SSWQUQ7FOEPSDJ322W2HMCNWPHXFB"), + DataName: "foobar", + }, + }, + expectedOut: []byte{0x3, 0x0, 0x1d, 0x4, 0x9a, 0x80, 0xf, 0xda, 0x8f, 0xab, 0xe8, 0xf6, 0x9d, 0x10, 0xdd, 0x8d, 0xda, 0x79, 0x29, 0x5a, 0x14, 0x87, 0xca, 0xe2, 0x3e, 0x43, 0x4e, 0xf5, 0xab, 0x68, 0xec, 0x13, 0x6c, 0xf3, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72}, + }, + { + key: LedgerKey{ + Type: LedgerEntryTypeClaimableBalance, + ClaimableBalance: &LedgerKeyClaimableBalance{ + BalanceId: ClaimableBalanceId{ + Type: 0, + V0: &Hash{0xca, 0xfe, 0xba, 0xbe}, + }, + }, + }, + expectedOut: []byte{0x4, 0x0, 0xca, 0xfe, 0xba, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, + { + key: LedgerKey{ + Type: LedgerEntryTypeLiquidityPool, + LiquidityPool: &LedgerKeyLiquidityPool{ + LiquidityPoolId: PoolId{0xca, 0xfe, 0xba, 0xbe}, + }, + }, + expectedOut: []byte{0x5, 0xca, 0xfe, 0xba, 0xbe, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, + } { + b, err := e.LedgerKeyUnsafeMarshalBinaryCompress(tc.key) + assert.NoError(t, err) + assert.Equal(t, tc.expectedOut, b) + } + +} diff --git a/xdr/memo.go b/xdr/memo.go new file mode 100644 index 0000000000..a412b73b2f --- /dev/null +++ b/xdr/memo.go @@ -0,0 +1,18 @@ +package xdr + +func MemoText(text string) Memo { + return Memo{Type: MemoTypeMemoText, Text: &text} +} + +func MemoID(id uint64) Memo { + idObj := Uint64(id) + return Memo{Type: MemoTypeMemoId, Id: &idObj} +} + +func MemoHash(hash Hash) Memo { + return Memo{Type: MemoTypeMemoHash, Hash: &hash} +} + +func MemoRetHash(hash Hash) Memo { + return Memo{Type: MemoTypeMemoReturn, RetHash: &hash} +} diff --git a/xdr/muxed_account.go b/xdr/muxed_account.go new file mode 100644 index 0000000000..a6d8b5b7c2 --- /dev/null +++ b/xdr/muxed_account.go @@ -0,0 +1,192 @@ +package xdr + +import ( + "fmt" + + "github.com/pkg/errors" + "github.com/stellar/go/strkey" +) + +func MustMuxedAddress(address string) MuxedAccount { + muxed := MuxedAccount{} + err := muxed.SetAddress(address) + if err != nil { + panic(err) + } + return muxed +} + +func MustMuxedAddressPtr(address string) *MuxedAccount { + muxed := MustMuxedAddress(address) + return &muxed +} + +func MuxedAccountFromAccountId(gAddress string, id uint64) (MuxedAccount, error) { + accountId, err := AddressToAccountId(gAddress) + if err != nil { + return MuxedAccount{}, err + } + + return NewMuxedAccount( + CryptoKeyTypeKeyTypeMuxedEd25519, + MuxedAccountMed25519{ + Id: Uint64(id), + Ed25519: accountId.MustEd25519(), + }, + ) +} + +// SetEd25519Address modifies the receiver, setting it's value to the MuxedAccount form +// of the provided G-address. Unlike SetAddress(), it only supports G-addresses. +func (m *MuxedAccount) SetEd25519Address(address string) error { + if m == nil { + return nil + } + + switch len(address) { + case 56: + raw, err := strkey.Decode(strkey.VersionByteAccountID, address) + if err != nil { + return err + } + if len(raw) != 32 { + return fmt.Errorf("invalid binary length: %d", len(raw)) + } + var ui Uint256 + copy(ui[:], raw) + *m, err = NewMuxedAccount(CryptoKeyTypeKeyTypeEd25519, ui) + return err + default: + return errors.New("invalid address length") + } + +} + +// SetAddress modifies the receiver, setting it's value to the MuxedAccount form +// of the provided strkey G-address or M-address, as described in SEP23. +func (m *MuxedAccount) SetAddress(address string) error { + if m == nil { + return nil + } + + switch len(address) { + case 56: + return m.SetEd25519Address(address) + case 69: + raw, err := strkey.Decode(strkey.VersionByteMuxedAccount, address) + if err != nil { + return err + } + if len(raw) != 40 { + return fmt.Errorf("invalid binary length: %d", len(raw)) + } + var muxed MuxedAccountMed25519 + copy(muxed.Ed25519[:], raw[:32]) + if err = muxed.Id.UnmarshalBinary(raw[32:]); err != nil { + return err + } + *m, err = NewMuxedAccount(CryptoKeyTypeKeyTypeMuxedEd25519, muxed) + return err + default: + return errors.New("invalid address length") + } + +} + +// AddressToMuxedAccount returns an MuxedAccount for a given address string +// or SEP23 M-address. +// If the address is not valid the error returned will not be nil +func AddressToMuxedAccount(address string) (MuxedAccount, error) { + result := MuxedAccount{} + err := result.SetAddress(address) + + return result, err +} + +// Address returns the strkey-encoded form of this MuxedAccount. In particular, it will +// return an M- strkey representation for CryptoKeyTypeKeyTypeMuxedEd25519 variants of the account +// (according to SEP23). This method will panic if the MuxedAccount is backed by a public key of an +// unknown type. +func (m *MuxedAccount) Address() string { + address, err := m.GetAddress() + if err != nil { + panic(err) + } + return address +} + +// GetAddress returns the strkey-encoded form of this MuxedAccount. In particular, it will +// return an M-strkey representation for CryptoKeyTypeKeyTypeMuxedEd25519 variants of the account +// (according to SEP23). In addition it will return an error if the MuxedAccount is backed by a +// public key of an unknown type. +func (m *MuxedAccount) GetAddress() (string, error) { + if m == nil { + return "", nil + } + + raw := make([]byte, 0, 40) + switch m.Type { + case CryptoKeyTypeKeyTypeEd25519: + ed, ok := m.GetEd25519() + if !ok { + return "", errors.New("could not get Ed25519") + } + raw = append(raw, ed[:]...) + return strkey.Encode(strkey.VersionByteAccountID, raw) + case CryptoKeyTypeKeyTypeMuxedEd25519: + ed, ok := m.GetMed25519() + if !ok { + return "", errors.New("could not get Med25519") + } + idBytes, err := ed.Id.MarshalBinary() + if err != nil { + return "", errors.Wrap(err, "could not marshal ID") + } + raw = append(raw, ed.Ed25519[:]...) + raw = append(raw, idBytes...) + return strkey.Encode(strkey.VersionByteMuxedAccount, raw) + default: + return "", fmt.Errorf("Unknown muxed account type: %v", m.Type) + } +} + +// GetId retrieves the underlying memo ID if this is a fully muxed account. It +// will return an error if the muxed account does not have a memo ID (i.e it's +// of the key type Ed25519). +func (m *MuxedAccount) GetId() (uint64, error) { + if m == nil { + return 0, nil + } + + switch m.Type { + case CryptoKeyTypeKeyTypeEd25519: + return 0, errors.New("muxed account has no ID") + + case CryptoKeyTypeKeyTypeMuxedEd25519: + ed, ok := m.GetMed25519() + if !ok { + return 0, errors.New("could not get Med25519") + } + return uint64(ed.Id), nil + + default: + return 0, fmt.Errorf("Unknown muxed account type: %v", m.Type) + } +} + +// ToAccountId transforms a MuxedAccount to an AccountId, dropping the +// memo Id if necessary +func (m MuxedAccount) ToAccountId() AccountId { + result := AccountId{Type: PublicKeyTypePublicKeyTypeEd25519} + switch m.Type { + case CryptoKeyTypeKeyTypeEd25519: + ed := m.MustEd25519() + result.Ed25519 = &ed + case CryptoKeyTypeKeyTypeMuxedEd25519: + ed := m.MustMed25519().Ed25519 + result.Ed25519 = &ed + default: + panic(fmt.Errorf("Unknown muxed account type: %v", m.Type)) + } + return result +} diff --git a/xdr/muxed_account_test.go b/xdr/muxed_account_test.go new file mode 100644 index 0000000000..0436ec2316 --- /dev/null +++ b/xdr/muxed_account_test.go @@ -0,0 +1,124 @@ +package xdr_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + . "github.com/stellar/go/xdr" +) + +var _ = Describe("xdr.MuxedAccount#Get/SetAddress()", func() { + It("returns an empty string when muxed account is nil", func() { + addy := (*MuxedAccount)(nil).Address() + Expect(addy).To(Equal("")) + }) + + It("returns a strkey string when muxed account is valid", func() { + var unmuxed MuxedAccount + err := unmuxed.SetEd25519Address("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ") + Expect(err).ShouldNot(HaveOccurred()) + Expect(unmuxed.Type).To(Equal(CryptoKeyTypeKeyTypeEd25519)) + Expect(*unmuxed.Ed25519).To(Equal(Uint256{63, 12, 52, 191, 147, 173, 13, 153, 113, 208, 76, 204, 144, 247, 5, 81, 28, 131, 138, 173, 151, 52, 164, 162, 251, 13, 122, 3, 252, 127, 232, 154})) + _, err = unmuxed.GetId() + Expect(err).Should(HaveOccurred()) + muxedy := unmuxed.Address() + Expect(muxedy).To(Equal("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ")) + + var muxed MuxedAccount + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK") + Expect(err).ShouldNot(HaveOccurred()) + Expect(muxed.Type).To(Equal(CryptoKeyTypeKeyTypeMuxedEd25519)) + Expect(muxed.Med25519.Id).To(Equal(Uint64(9223372036854775808))) + Expect(muxed.Med25519.Ed25519).To(Equal(*unmuxed.Ed25519)) + muxedy = muxed.Address() + id, err := muxed.GetId() + Expect(id).To(Equal(uint64(9223372036854775808))) + Expect(muxedy).To(Equal("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK")) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ") + Expect(err).ShouldNot(HaveOccurred()) + Expect(muxed.Type).To(Equal(CryptoKeyTypeKeyTypeMuxedEd25519)) + Expect(muxed.Med25519.Id).To(Equal(Uint64(0))) + Expect(muxed.Med25519.Ed25519).To(Equal(*unmuxed.Ed25519)) + id, err = muxed.GetId() + Expect(id).To(Equal(uint64(0))) + muxedy = muxed.Address() + Expect(muxedy).To(Equal("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ")) + }) + + It("returns a muxed account from an account ID and memo", func() { + accountId := "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ" + expectedMuxedId := "MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAABUTGI4" + + muxedAccount, err := MuxedAccountFromAccountId(accountId, uint64(420)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(muxedAccount.GetAddress()).To(Equal(expectedMuxedId)) + Expect(muxedAccount.GetId()).To(Equal(uint64(420))) + }) + + It("returns an error when the strkey is invalid", func() { + var muxed MuxedAccount + + // Test cases from SEP23 + + err := muxed.SetEd25519Address("GAAAAAAAACGC6") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetEd25519Address("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUR") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetEd25519Address("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZA") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetEd25519Address("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUACUSI") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetEd25519Address("G47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVP2I") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLKA") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAAV75I") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetAddress("M47QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUQ") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUK===") + Expect(err).Should(HaveOccurred()) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJUAAAAAAAAAAAACJUO") + Expect(err).Should(HaveOccurred()) + }) +}) + +var _ = Describe("xdr.AddressToMuxedAccount()", func() { + It("works", func() { + address := "GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ" + muxedAccount, err := AddressToMuxedAccount(address) + + Expect(muxedAccount.Address()).To(Equal("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ")) + Expect(err).ShouldNot(HaveOccurred()) + + _, err = AddressToAccountId("GCR22L3") + + Expect(err).Should(HaveOccurred()) + }) +}) + +var _ = Describe("xdr.MuxedAccount.ToAccountId()", func() { + It("works", func() { + var muxed MuxedAccount + + err := muxed.SetEd25519Address("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ") + Expect(err).ShouldNot(HaveOccurred()) + aid := muxed.ToAccountId() + Expect(aid.Address()).To(Equal("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ")) + + err = muxed.SetAddress("MA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVAAAAAAAAAAAAAJLK") + Expect(err).ShouldNot(HaveOccurred()) + aid = muxed.ToAccountId() + Expect(aid.Address()).To(Equal("GA7QYNF7SOWQ3GLR2BGMZEHXAVIRZA4KVWLTJJFC7MGXUA74P7UJVSGZ")) + }) +}) diff --git a/xdr/path_payment_result.go b/xdr/path_payment_result.go index ced6cc0fbe..9ef5a745ca 100644 --- a/xdr/path_payment_result.go +++ b/xdr/path_payment_result.go @@ -2,7 +2,7 @@ package xdr // SendAmount returns the amount spent, denominated in the source asset, in the // course of this path payment -func (pr *PathPaymentResult) SendAmount() Int64 { +func (pr *PathPaymentStrictReceiveResult) SendAmount() Int64 { s, ok := pr.GetSuccess() if !ok { return 0 @@ -12,15 +12,26 @@ func (pr *PathPaymentResult) SendAmount() Int64 { return s.Last.Amount } - sa := s.Offers[0].AssetBought + sa := s.Offers[0].AssetBought() var ret Int64 for _, o := range s.Offers { - if o.AssetBought.String() != sa.String() { + if o.AssetBought().String() != sa.String() { break } - ret += o.AmountBought + ret += o.AmountBought() } return ret } + +// DestAmount returns the amount received, denominated in the destination asset, in the +// course of this path payment +func (pr *PathPaymentStrictSendResult) DestAmount() Int64 { + s, ok := pr.GetSuccess() + if !ok { + return 0 + } + + return s.Last.Amount +} diff --git a/xdr/pool_id.go b/xdr/pool_id.go new file mode 100644 index 0000000000..605c6edaa0 --- /dev/null +++ b/xdr/pool_id.go @@ -0,0 +1,30 @@ +package xdr + +import ( + "bytes" + "crypto/sha256" + + "github.com/stellar/go/support/errors" +) + +func NewPoolId(a, b Asset, fee Int32) (PoolId, error) { + if b.LessThan(a) { + return PoolId{}, errors.New("AssetA must be < AssetB") + } + + // Assume the assets are already sorted. + params := LiquidityPoolParameters{ + Type: LiquidityPoolTypeLiquidityPoolConstantProduct, + ConstantProduct: &LiquidityPoolConstantProductParameters{ + AssetA: a, + AssetB: b, + Fee: fee, + }, + } + + buf := &bytes.Buffer{} + if _, err := Marshal(buf, params); err != nil { + return PoolId{}, errors.Wrap(err, "failed to build liquidity pool id") + } + return sha256.Sum256(buf.Bytes()), nil +} diff --git a/xdr/pool_id_test.go b/xdr/pool_id_test.go new file mode 100644 index 0000000000..fa3524ffc3 --- /dev/null +++ b/xdr/pool_id_test.go @@ -0,0 +1,136 @@ +package xdr + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func makeAccount(t *testing.T, hexKey string) string { + b, err := hex.DecodeString(hexKey) + require.NoError(t, err) + var key Uint256 + copy(key[:], b) + a, err := NewAccountId(PublicKeyTypePublicKeyTypeEd25519, key) + require.NoError(t, err) + addr, err := a.GetAddress() + require.NoError(t, err) + return addr +} + +func TestNewPoolId(t *testing.T) { + testGetPoolID := func(x, y Asset, expectedHex string) { + // TODO: Require x y to be sorted. + // require.Less(t, x, y); + + id, err := NewPoolId(x, y, LiquidityPoolFeeV18) + if assert.NoError(t, err) { + idHex := hex.EncodeToString(id[:]) + assert.Equal(t, expectedHex, idHex) + } + } + + acc1 := makeAccount(t, "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + acc2 := makeAccount(t, "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789") + + t.Run("NATIVE and ALPHANUM4 (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewNativeAsset(), MustNewCreditAsset("AbC", acc1), + "c17f36fbd210e43dca1cda8edc5b6c0f825fcb72b39f0392fd6309844d77ff7d") + testGetPoolID( + MustNewNativeAsset(), MustNewCreditAsset("AbCd", acc1), + "80e0c5dc79ed76bb7e63681f6456136762f0d01ede94bb379dbc793e66db35e6") + }) + + t.Run("NATIVE and ALPHANUM12 (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewNativeAsset(), MustNewCreditAsset("AbCdEfGhIjK", acc1), + "d2306c6e8532f99418e9d38520865e1c1059cddb6793da3cc634224f2ffb5bd4") + testGetPoolID( + MustNewNativeAsset(), MustNewCreditAsset("AbCdEfGhIjKl", acc1), + "807e9e66653b5fda4dd4e672ff64a929fc5fdafe152eeadc07bb460c4849d711") + }) + + t.Run("ALPHANUM4 and ALPHANUM4 (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewCreditAsset("AbC", acc1), MustNewCreditAsset("aBc", acc1), + "0239013ab016985fc3ab077d165a9b21b822efa013fdd422381659e76aec505b") + testGetPoolID( + MustNewCreditAsset("AbCd", acc1), MustNewCreditAsset("aBc", acc1), + "cadb490d15b4333890377cd17400acf7681e14d6d949869ffa1fbbad7a6d2fde") + testGetPoolID( + MustNewCreditAsset("AbC", acc1), MustNewCreditAsset("aBcD", acc1), + "a938f8f346f3aff41d2e03b05137ef1955a723861802a4042f51f0f816e0db36") + testGetPoolID( + MustNewCreditAsset("AbCd", acc1), MustNewCreditAsset("aBcD", acc1), + "c89646bb6db726bfae784ab66041abbf54747cf4b6b16dff2a5c05830ad9c16b") + }) + + t.Run("ALPHANUM12 and ALPHANUM12 (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewCreditAsset("AbCdEfGhIjK", acc1), MustNewCreditAsset("aBcDeFgHiJk", acc1), + "88dc054dd0f8146bac0e691095ce2b90cd902b499761d22b1c94df120ca0b060") + testGetPoolID( + MustNewCreditAsset("AbCdEfGhIjKl", acc1), MustNewCreditAsset("aBcDeFgHiJk", acc1), + "09672910d891e658219d2f33a8885a542b2a5a09e9f486461201bd278a3e92a4") + testGetPoolID( + MustNewCreditAsset("AbCdEfGhIjK", acc1), MustNewCreditAsset("aBcDeFgHiJkl", acc1), + "63501addf8a5a6522eac996226069190b5226c71cfdda22347022418af1948a0") + testGetPoolID( + MustNewCreditAsset("AbCdEfGhIjKl", acc1), MustNewCreditAsset("aBcDeFgHiJkl", acc1), + "e851197a0148e949bdc03d52c53821b9afccc0fadfdc41ae01058c14c252e03b") + }) + + t.Run("ALPHANUM4 same code different issuer (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewCreditAsset("aBc", acc1), MustNewCreditAsset("aBc", acc2), + "5d7188454299529856586e81ea385d2c131c6afdd9d58c82e9aa558c16522fea") + testGetPoolID( + MustNewCreditAsset("aBcD", acc1), MustNewCreditAsset("aBcD", acc2), + "00d152f5f6b7e46eaf558576512207ea835a332f17ca777fba3cb835ef7dc1ef") + }) + + t.Run("ALPHANUM12 same code different issuer (short and full length)", func(t *testing.T) { + testGetPoolID( + MustNewCreditAsset("aBcDeFgHiJk", acc1), MustNewCreditAsset("aBcDeFgHiJk", acc2), + "cad65154300f087e652981fa5f76aa469b43ad53e9a5d348f1f93da57193d022") + testGetPoolID( + MustNewCreditAsset("aBcDeFgHiJkL", acc1), MustNewCreditAsset("aBcDeFgHiJkL", acc2), + "93fa82ecaabe987461d1e3c8e0fd6510558b86ac82a41f7c70b112281be90c71") + }) + + t.Run("ALPHANUM4 before ALPHANUM12 (short and full length) doesn't depend on issuer or code", func(t *testing.T) { + testGetPoolID( + MustNewCreditAsset("aBc", acc1), MustNewCreditAsset("aBcDeFgHiJk", acc2), + "c0d4c87bbaade53764b904fde2901a0353af437e9d3a976f1252670b85a36895") + testGetPoolID( + MustNewCreditAsset("aBcD", acc1), MustNewCreditAsset("aBcDeFgHiJk", acc2), + "1ee5aa0f0e6b8123c2da6592389481f64d816bfe3c3c06be282b0cdb0971f840") + testGetPoolID( + MustNewCreditAsset("aBc", acc1), MustNewCreditAsset("aBcDeFgHiJkL", acc2), + "a87bc151b119c1ea289905f0cb3cf95be7b0f096a0b6685bf2dcae70f9515d53") + testGetPoolID( + MustNewCreditAsset("aBcD", acc1), MustNewCreditAsset("aBcDeFgHiJkL", acc2), + "3caf78118d6cabd42618eef47bbc2da8abe7fe42539b4b502f08766485592a81") + testGetPoolID( + MustNewCreditAsset("aBc", acc2), MustNewCreditAsset("aBcDeFgHiJk", acc1), + "befb7f966ae63adcfde6a6670478bb7d936c29849e25e3387bb9e74566e3a29f") + testGetPoolID( + MustNewCreditAsset("aBcD", acc2), MustNewCreditAsset("aBcDeFgHiJk", acc1), + "593cc996c3f0d32e165fcbee9fdc5dba6ab05140a4a9254e08ad8cb67fe657a1") + testGetPoolID( + MustNewCreditAsset("aBc", acc2), MustNewCreditAsset("aBcDeFgHiJkL", acc1), + "d66af9b7417547c3dc000617533405349d1f622015daf3e9bad703ea34ee1d17") + testGetPoolID( + MustNewCreditAsset("aBcD", acc2), MustNewCreditAsset("aBcDeFgHiJkL", acc1), + "c1c7a4b9db6e3754cae3017f72b6b7c93198f593182c541bcab3795c6413a677") + }) +} + +func TestNewPoolIdRejectsIncorrectOrder(t *testing.T) { + acc1 := makeAccount(t, "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + _, err := NewPoolId(MustNewCreditAsset("AbC", acc1), MustNewNativeAsset(), LiquidityPoolFeeV18) + assert.EqualError(t, err, "AssetA must be < AssetB") +} diff --git a/xdr/price.go b/xdr/price.go index fa362c79a4..bfafb8bfc0 100644 --- a/xdr/price.go +++ b/xdr/price.go @@ -5,6 +5,43 @@ import ( ) // String returns a string represenation of `p` -func (p *Price) String() string { +func (p Price) String() string { return big.NewRat(int64(p.N), int64(p.D)).FloatString(7) } + +// Equal returns whether the price's value is the same, +// taking into account denormalized representation +// (e.g. Price{1, 2}.EqualValue(Price{2,4}) == true ) +func (p Price) Equal(q Price) bool { + // See the Cheaper() method for the reasoning behind this: + return uint64(p.N)*uint64(q.D) == uint64(q.N)*uint64(p.D) +} + +// Cheaper indicates if the Price's value is lower, +// taking into account denormalized representation +// (e.g. Price{1, 2}.Cheaper(Price{2,4}) == false ) +func (p Price) Cheaper(q Price) bool { + // To avoid float precision issues when naively comparing Price.N/Price.D, + // we use the cross product instead: + // + // Price of p < Price of q + // <==> + // (p.N / p.D) < (q.N / q.D) + // <==> + // (p.N / p.D) * (p.D * q.D) < (q.N / q.D) * (p.D * q.D) + // <==> + // p.N * q.D < q.N * p.D + return uint64(p.N)*uint64(q.D) < uint64(q.N)*uint64(p.D) +} + +// Normalize sets Price to its rational canonical form +func (p *Price) Normalize() { + r := big.NewRat(int64(p.N), int64(p.D)) + p.N = Int32(r.Num().Int64()) + p.D = Int32(r.Denom().Int64()) +} + +// Invert inverts Price. +func (p *Price) Invert() { + p.N, p.D = p.D, p.N +} diff --git a/xdr/price_test.go b/xdr/price_test.go new file mode 100644 index 0000000000..c9c069ee73 --- /dev/null +++ b/xdr/price_test.go @@ -0,0 +1,53 @@ +package xdr_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/stellar/go/xdr" +) + +func TestPriceInvert(t *testing.T) { + p := xdr.Price{N: 1, D: 2} + p.Invert() + assert.Equal(t, xdr.Price{N: 2, D: 1}, p) +} + +func TestPriceEqual(t *testing.T) { + // canonical + assert.True(t, xdr.Price{N: 1, D: 2}.Equal(xdr.Price{N: 1, D: 2})) + assert.False(t, xdr.Price{N: 1, D: 2}.Equal(xdr.Price{N: 2, D: 3})) + + // not canonical + assert.True(t, xdr.Price{N: 1, D: 2}.Equal(xdr.Price{N: 5, D: 10})) + assert.True(t, xdr.Price{N: 5, D: 10}.Equal(xdr.Price{N: 1, D: 2})) + assert.True(t, xdr.Price{N: 5, D: 10}.Equal(xdr.Price{N: 50, D: 100})) + assert.False(t, xdr.Price{N: 1, D: 3}.Equal(xdr.Price{N: 5, D: 10})) + assert.False(t, xdr.Price{N: 5, D: 10}.Equal(xdr.Price{N: 1, D: 3})) + assert.False(t, xdr.Price{N: 5, D: 15}.Equal(xdr.Price{N: 50, D: 100})) +} + +func TestPriceCheaper(t *testing.T) { + // canonical + assert.True(t, xdr.Price{N: 1, D: 4}.Cheaper(xdr.Price{N: 1, D: 3})) + assert.False(t, xdr.Price{N: 1, D: 3}.Cheaper(xdr.Price{N: 1, D: 4})) + assert.False(t, xdr.Price{N: 1, D: 4}.Cheaper(xdr.Price{N: 1, D: 4})) + + // not canonical + assert.True(t, xdr.Price{N: 10, D: 40}.Cheaper(xdr.Price{N: 3, D: 9})) + assert.False(t, xdr.Price{N: 3, D: 9}.Cheaper(xdr.Price{N: 10, D: 40})) + assert.False(t, xdr.Price{N: 10, D: 40}.Cheaper(xdr.Price{N: 10, D: 40})) +} + +func TestNormalize(t *testing.T) { + // canonical + p := xdr.Price{N: 1, D: 4} + p.Normalize() + assert.Equal(t, xdr.Price{N: 1, D: 4}, p) + + // not canonical + p = xdr.Price{N: 500, D: 2000} + p.Normalize() + assert.Equal(t, xdr.Price{N: 1, D: 4}, p) +} diff --git a/xdr/signer_key.go b/xdr/signer_key.go new file mode 100644 index 0000000000..f308c61157 --- /dev/null +++ b/xdr/signer_key.go @@ -0,0 +1,123 @@ +package xdr + +import ( + "fmt" + + "github.com/stellar/go/strkey" + "github.com/stellar/go/support/errors" +) + +// Address returns the strkey encoded form of this signer key. This method will +// panic if the SignerKey is of an unknown type. +func (skey *SignerKey) Address() string { + address, err := skey.GetAddress() + if err != nil { + panic(err) + } + return address +} + +// GetAddress returns the strkey encoded form of this signer key, and an error if the +// SignerKey is of an unknown type. +func (skey *SignerKey) GetAddress() (string, error) { + if skey == nil { + return "", nil + } + + vb := strkey.VersionByte(0) + raw := make([]byte, 32) + + switch skey.Type { + case SignerKeyTypeSignerKeyTypeEd25519: + vb = strkey.VersionByteAccountID + key := skey.MustEd25519() + copy(raw, key[:]) + case SignerKeyTypeSignerKeyTypeHashX: + vb = strkey.VersionByteHashX + key := skey.MustHashX() + copy(raw, key[:]) + case SignerKeyTypeSignerKeyTypePreAuthTx: + vb = strkey.VersionByteHashTx + key := skey.MustPreAuthTx() + copy(raw, key[:]) + default: + return "", fmt.Errorf("unknown signer key type: %v", skey.Type) + } + + return strkey.Encode(vb, raw) +} + +// Equals returns true if `other` is equivalent to `skey` +func (skey *SignerKey) Equals(other SignerKey) bool { + if skey.Type != other.Type { + return false + } + + switch skey.Type { + case SignerKeyTypeSignerKeyTypeEd25519: + l := skey.MustEd25519() + r := other.MustEd25519() + return l == r + case SignerKeyTypeSignerKeyTypeHashX: + l := skey.MustHashX() + r := other.MustHashX() + return l == r + case SignerKeyTypeSignerKeyTypePreAuthTx: + l := skey.MustPreAuthTx() + r := other.MustPreAuthTx() + return l == r + default: + panic(fmt.Errorf("Unknown signer key type: %v", skey.Type)) + } +} + +func MustSigner(address string) SignerKey { + aid := SignerKey{} + err := aid.SetAddress(address) + if err != nil { + panic(err) + } + return aid +} + +// SetAddress modifies the receiver, setting it's value to the SignerKey form +// of the provided address. +func (skey *SignerKey) SetAddress(address string) error { + if skey == nil { + return nil + } + + vb, err := strkey.Version(address) + if err != nil { + return errors.Wrap(err, "failed to extract address version") + } + + var keytype SignerKeyType + + switch vb { + case strkey.VersionByteAccountID: + keytype = SignerKeyTypeSignerKeyTypeEd25519 + case strkey.VersionByteHashX: + keytype = SignerKeyTypeSignerKeyTypeHashX + case strkey.VersionByteHashTx: + keytype = SignerKeyTypeSignerKeyTypePreAuthTx + default: + return errors.Errorf("invalid version byte: %v", vb) + } + + raw, err := strkey.Decode(vb, address) + if err != nil { + return err + } + + if len(raw) != 32 { + return errors.New("invalid address") + } + + var ui Uint256 + copy(ui[:], raw) + + *skey, err = NewSignerKey(keytype, ui) + + return err +} diff --git a/xdr/signer_key_test.go b/xdr/signer_key_test.go new file mode 100644 index 0000000000..4a32e38044 --- /dev/null +++ b/xdr/signer_key_test.go @@ -0,0 +1,82 @@ +package xdr_test + +import ( + "testing" + + . "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestSignerKey_GetAddress(t *testing.T) { + tests := []struct { + name string + wantAddress string + }{ + { + "NilKey", + "", + }, + { + "AccountID", + "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5", + }, + { + "HashxX", + "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + }, + { + "HashX", + "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key := &SignerKey{} + if tt.wantAddress != "" { + err := key.SetAddress(tt.wantAddress) + assert.NoError(t, err) + } else { + key = nil + } + + gotAddress, err := key.GetAddress() + assert.Equal(t, tt.wantAddress, gotAddress) + assert.NoError(t, err) + }) + } +} + +func TestSignerKey_SetAddress(t *testing.T) { + cases := []struct { + Name string + Address string + }{ + + { + Name: "AccountID", + Address: "GA3D5KRYM6CB7OWQ6TWYRR3Z4T7GNZLKERYNZGGA5SOAOPIFY6YQHES5", + }, + { + Name: "HashTx", + Address: "TBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHXL7", + }, + { + Name: "HashX", + Address: "XBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWGTOG", + }, + } + + for _, kase := range cases { + var dest SignerKey + + err := dest.SetAddress(kase.Address) + if assert.NoError(t, err, "error in case: %s", kase.Name) { + assert.Equal(t, kase.Address, dest.Address(), "address set incorrectly") + } + } + + // setting a seed causes an error + var dest SignerKey + err := dest.SetAddress("SBU2RRGLXH3E5CQHTD3ODLDF2BWDCYUSSBLLZ5GNW7JXHDIYKXZWHOKR") + assert.Error(t, err) +} diff --git a/xdr/signers.go b/xdr/signers.go new file mode 100644 index 0000000000..2afbc35b88 --- /dev/null +++ b/xdr/signers.go @@ -0,0 +1,26 @@ +package xdr + +import ( + "sort" +) + +// SortSignersByKey returns a new []Signer array sorted by signer key. +func SortSignersByKey(signers []Signer) []Signer { + keys := make([]string, 0, len(signers)) + keysMap := make(map[string]Signer) + newSigners := make([]Signer, 0, len(signers)) + + for _, signer := range signers { + key := signer.Key.Address() + keys = append(keys, key) + keysMap[key] = signer + } + + sort.Strings(keys) + + for _, key := range keys { + newSigners = append(newSigners, keysMap[key]) + } + + return newSigners +} diff --git a/xdr/string.go b/xdr/string.go new file mode 100644 index 0000000000..1ba09728ca --- /dev/null +++ b/xdr/string.go @@ -0,0 +1,6 @@ +package xdr + +func String32Ptr(val string) *String32 { + pval := String32(val) + return &pval +} diff --git a/xdr/transaction_envelope.go b/xdr/transaction_envelope.go new file mode 100644 index 0000000000..7dbdde45c2 --- /dev/null +++ b/xdr/transaction_envelope.go @@ -0,0 +1,136 @@ +package xdr + +// IsFeeBump returns true if the transaction envelope is a fee bump transaction +func (e TransactionEnvelope) IsFeeBump() bool { + return e.Type == EnvelopeTypeEnvelopeTypeTxFeeBump +} + +// FeeBumpAccount returns the account paying for the fee bump transaction +func (e TransactionEnvelope) FeeBumpAccount() MuxedAccount { + return e.MustFeeBump().Tx.FeeSource +} + +// FeeBumpFee returns the fee defined for the fee bump transaction +func (e TransactionEnvelope) FeeBumpFee() int64 { + return int64(e.MustFeeBump().Tx.Fee) +} + +// FeeBumpSignatures returns the list of signatures for the fee bump transaction +func (e TransactionEnvelope) FeeBumpSignatures() []DecoratedSignature { + return e.MustFeeBump().Signatures +} + +// SourceAccount returns the source account for the transaction +// If the transaction envelope is for a fee bump transaction, SourceAccount() +// returns the source account of the inner transaction +func (e TransactionEnvelope) SourceAccount() MuxedAccount { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return e.FeeBump.Tx.InnerTx.V1.Tx.SourceAccount + case EnvelopeTypeEnvelopeTypeTx: + return e.V1.Tx.SourceAccount + case EnvelopeTypeEnvelopeTypeTxV0: + return MuxedAccount{ + Type: CryptoKeyTypeKeyTypeEd25519, + Ed25519: &e.V0.Tx.SourceAccountEd25519, + } + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// Fee returns the fee defined for the transaction envelope +// If the transaction envelope is for a fee bump transaction, Fee() +// returns the fee defined in the inner transaction +func (e TransactionEnvelope) Fee() uint32 { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return uint32(e.FeeBump.Tx.InnerTx.V1.Tx.Fee) + case EnvelopeTypeEnvelopeTypeTx: + return uint32(e.V1.Tx.Fee) + case EnvelopeTypeEnvelopeTypeTxV0: + return uint32(e.V0.Tx.Fee) + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// Signatures returns the list of signatures included in the transaction envelope +// If the transaction envelope is for a fee bump transaction, Signatures() +// returns the signatures for the inner transaction +func (e TransactionEnvelope) Signatures() []DecoratedSignature { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return e.FeeBump.Tx.InnerTx.V1.Signatures + case EnvelopeTypeEnvelopeTypeTx: + return e.V1.Signatures + case EnvelopeTypeEnvelopeTypeTxV0: + return e.V0.Signatures + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// SeqNum returns the sequence number set in the transaction envelope +// Note for fee bump transactions, SeqNum() returns the sequence number +// of the inner transaction +func (e TransactionEnvelope) SeqNum() int64 { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return int64(e.FeeBump.Tx.InnerTx.V1.Tx.SeqNum) + case EnvelopeTypeEnvelopeTypeTx: + return int64(e.V1.Tx.SeqNum) + case EnvelopeTypeEnvelopeTypeTxV0: + return int64(e.V0.Tx.SeqNum) + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// TimeBounds returns the time bounds set in the transaction envelope +// Note for fee bump transactions, TimeBounds() returns the time bounds +// of the inner transaction +func (e TransactionEnvelope) TimeBounds() *TimeBounds { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return e.FeeBump.Tx.InnerTx.V1.Tx.TimeBounds + case EnvelopeTypeEnvelopeTypeTx: + return e.V1.Tx.TimeBounds + case EnvelopeTypeEnvelopeTypeTxV0: + return e.V0.Tx.TimeBounds + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// Operations returns the operations set in the transaction envelope +// Note for fee bump transactions, Operations() returns the operations +// of the inner transaction +func (e TransactionEnvelope) Operations() []Operation { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return e.FeeBump.Tx.InnerTx.V1.Tx.Operations + case EnvelopeTypeEnvelopeTypeTx: + return e.V1.Tx.Operations + case EnvelopeTypeEnvelopeTypeTxV0: + return e.V0.Tx.Operations + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} + +// Memo returns the memo set in the transaction envelope +// Note for fee bump transactions, Memo() returns the memo +// of the inner transaction +func (e TransactionEnvelope) Memo() Memo { + switch e.Type { + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return e.FeeBump.Tx.InnerTx.V1.Tx.Memo + case EnvelopeTypeEnvelopeTypeTx: + return e.V1.Tx.Memo + case EnvelopeTypeEnvelopeTypeTxV0: + return e.V0.Tx.Memo + default: + panic("unsupported transaction type: " + e.Type.String()) + } +} diff --git a/xdr/transaction_envelope_test.go b/xdr/transaction_envelope_test.go new file mode 100644 index 0000000000..01669ffb0f --- /dev/null +++ b/xdr/transaction_envelope_test.go @@ -0,0 +1,340 @@ +package xdr + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func createLegacyTx() TransactionEnvelope { + return TransactionEnvelope{ + Type: EnvelopeTypeEnvelopeTypeTxV0, + V0: &TransactionV0Envelope{ + Tx: TransactionV0{ + SourceAccountEd25519: Uint256{1, 2, 3}, + Fee: 99, + Memo: Memo{ + Type: MemoTypeMemoNone, + }, + SeqNum: 33, + TimeBounds: &TimeBounds{ + MinTime: 1, + MaxTime: 2, + }, + Operations: []Operation{ + { + Body: OperationBody{ + BumpSequenceOp: &BumpSequenceOp{ + BumpTo: 34, + }, + }, + }, + }, + }, + Signatures: []DecoratedSignature{ + { + Hint: SignatureHint{1, 1, 1, 1}, + Signature: Signature{10, 10, 10}, + }, + }, + }, + } +} + +func createTx() TransactionEnvelope { + return TransactionEnvelope{ + Type: EnvelopeTypeEnvelopeTypeTx, + V1: &TransactionV1Envelope{ + Tx: Transaction{ + SourceAccount: MuxedAccount{ + Type: CryptoKeyTypeKeyTypeEd25519, + Ed25519: &Uint256{ + 3, 3, 3, + }, + }, + Fee: 99, + Memo: Memo{ + Type: MemoTypeMemoHash, + Hash: &Hash{1, 1, 1}, + }, + SeqNum: 97, + TimeBounds: &TimeBounds{ + MinTime: 2, + MaxTime: 4, + }, + Operations: []Operation{ + { + Body: OperationBody{ + BumpSequenceOp: &BumpSequenceOp{ + BumpTo: 98, + }, + }, + }, + }, + }, + Signatures: []DecoratedSignature{ + { + Hint: SignatureHint{2, 2, 2, 2}, + Signature: Signature{20, 20, 20}, + }, + }, + }, + } +} + +func createFeeBumpTx() TransactionEnvelope { + return TransactionEnvelope{ + Type: EnvelopeTypeEnvelopeTypeTxFeeBump, + FeeBump: &FeeBumpTransactionEnvelope{ + Tx: FeeBumpTransaction{ + FeeSource: MuxedAccount{ + Type: CryptoKeyTypeKeyTypeEd25519, + Ed25519: &Uint256{2, 2, 2}, + }, + Fee: 776, + InnerTx: FeeBumpTransactionInnerTx{ + Type: EnvelopeTypeEnvelopeTypeTx, + V1: createTx().V1, + }, + }, + Signatures: []DecoratedSignature{ + { + Hint: SignatureHint{3, 3, 3, 3}, + Signature: Signature{30, 30, 30}, + }, + }, + }, + } +} + +func TestIsFeeBump(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.False(t, legacyTx.IsFeeBump()) + assert.False(t, tx.IsFeeBump()) + + assert.True(t, feeBumpTx.IsFeeBump()) +} + +func TestFeeBumpAccount(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Panics(t, func() { + tx.FeeBumpAccount() + }) + assert.Panics(t, func() { + legacyTx.FeeBumpAccount() + }) + + account := feeBumpTx.FeeBumpAccount() + assert.Equal(t, feeBumpTx.FeeBump.Tx.FeeSource, account) +} + +func TestFeeBumpFee(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Panics(t, func() { + tx.FeeBumpFee() + }) + assert.Panics(t, func() { + legacyTx.FeeBumpFee() + }) + + fee := feeBumpTx.FeeBumpFee() + assert.Equal(t, int64(feeBumpTx.FeeBump.Tx.Fee), fee) +} + +func TestFeeBumpSignatures(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Panics(t, func() { + tx.FeeBumpSignatures() + }) + assert.Panics(t, func() { + legacyTx.FeeBumpSignatures() + }) + + sigs := feeBumpTx.FeeBumpSignatures() + assert.Equal(t, feeBumpTx.FeeBump.Signatures, sigs) +} + +func TestSourceAccount(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + CryptoKeyTypeKeyTypeEd25519, + legacyTx.SourceAccount().Type, + ) + assert.Equal( + t, + legacyTx.V0.Tx.SourceAccountEd25519, + *legacyTx.SourceAccount().Ed25519, + ) + + assert.Equal( + t, + tx.V1.Tx.SourceAccount, + tx.SourceAccount(), + ) + + assert.Equal( + t, + feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.SourceAccount, + feeBumpTx.SourceAccount(), + ) +} + +func TestFee(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + uint32(legacyTx.V0.Tx.Fee), + legacyTx.Fee(), + ) + + assert.Equal( + t, + uint32(tx.V1.Tx.Fee), + tx.Fee(), + ) + + assert.Equal( + t, + uint32(feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.Fee), + feeBumpTx.Fee(), + ) +} + +func TestSignatures(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + legacyTx.V0.Signatures, + legacyTx.Signatures(), + ) + + assert.Equal( + t, + tx.V1.Signatures, + tx.Signatures(), + ) + + assert.Equal( + t, + feeBumpTx.FeeBump.Tx.InnerTx.V1.Signatures, + feeBumpTx.Signatures(), + ) +} + +func TestSeqNum(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + int64(legacyTx.V0.Tx.SeqNum), + legacyTx.SeqNum(), + ) + + assert.Equal( + t, + int64(tx.V1.Tx.SeqNum), + tx.SeqNum(), + ) + + assert.Equal( + t, + int64(feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.SeqNum), + feeBumpTx.SeqNum(), + ) +} + +func TestTimeBounds(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + legacyTx.V0.Tx.TimeBounds, + legacyTx.TimeBounds(), + ) + + assert.Equal( + t, + tx.V1.Tx.TimeBounds, + tx.TimeBounds(), + ) + + assert.Equal( + t, + feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.TimeBounds, + feeBumpTx.TimeBounds(), + ) +} + +func TestOperations(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + legacyTx.V0.Tx.Operations, + legacyTx.Operations(), + ) + + assert.Equal( + t, + tx.V1.Tx.Operations, + tx.Operations(), + ) + + assert.Equal( + t, + feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.Operations, + feeBumpTx.Operations(), + ) +} + +func TestMemo(t *testing.T) { + legacyTx := createLegacyTx() + tx := createTx() + feeBumpTx := createFeeBumpTx() + + assert.Equal( + t, + legacyTx.V0.Tx.Memo, + legacyTx.Memo(), + ) + + assert.Equal( + t, + tx.V1.Tx.Memo, + tx.Memo(), + ) + + assert.Equal( + t, + feeBumpTx.FeeBump.Tx.InnerTx.V1.Tx.Memo, + feeBumpTx.Memo(), + ) +} diff --git a/xdr/transaction_meta.go b/xdr/transaction_meta.go new file mode 100644 index 0000000000..0f0c130c63 --- /dev/null +++ b/xdr/transaction_meta.go @@ -0,0 +1,16 @@ +package xdr + +// Operations is a helper on TransactionMeta that returns operations +// meta from `TransactionMeta.Operations` or `TransactionMeta.V1.Operations`. +func (transactionMeta *TransactionMeta) OperationsMeta() []OperationMeta { + switch transactionMeta.V { + case 0: + return *transactionMeta.Operations + case 1: + return transactionMeta.MustV1().Operations + case 2: + return transactionMeta.MustV2().Operations + default: + panic("Unsupported TransactionMeta version") + } +} diff --git a/xdr/transaction_result.go b/xdr/transaction_result.go new file mode 100644 index 0000000000..77abed4ddf --- /dev/null +++ b/xdr/transaction_result.go @@ -0,0 +1,70 @@ +package xdr + +import "github.com/stellar/go/support/errors" + +// Successful returns true if the transaction succeeded +func (r TransactionResult) Successful() bool { + return r.Result.Code == TransactionResultCodeTxSuccess || + r.Result.Code == TransactionResultCodeTxFeeBumpInnerSuccess +} + +// OperationResults returns the operation results for the transaction +func (r TransactionResult) OperationResults() ([]OperationResult, bool) { + innerResults, ok := r.Result.GetInnerResultPair() + if ok { + return innerResults.Result.Result.GetResults() + } + return r.Result.GetResults() +} + +// Successful returns true if the transaction succeeded +func (r TransactionResultPair) Successful() bool { + return r.Result.Successful() +} + +// OperationResults returns the operation results for the transaction +func (r TransactionResultPair) OperationResults() ([]OperationResult, bool) { + return r.Result.OperationResults() +} + +// InnerHash returns the hash of the inner transaction. +// This function can only be called on fee bump transactions. +func (r TransactionResultPair) InnerHash() Hash { + return r.Result.Result.MustInnerResultPair().TransactionHash +} + +// ExtractBalanceID will parse the operation result at `opIndex` within the +// given `txResult`, returning the internal XDR structure for the claimable +// balance ID. +// +// If the specified operation index does not point to a successful +// `CreateClaimableBalance` operation result, this function panics. +func (r TransactionResult) ExtractBalanceID(opIndex int) (*ClaimableBalanceId, error) { + opResults, ok := r.OperationResults() + if !ok { + return nil, errors.New("Failed to retrieve transaction's operation results") + } + + if opIndex < 0 || opIndex >= len(opResults) { + return nil, errors.New("Invalid operation index") + } + + result := opResults[opIndex] + return result.MustTr().MustCreateClaimableBalanceResult().BalanceId, nil +} + +// ExtractBalanceIDHex works like `ExtractBalanceID`, but will return the hex +// encoding of the resulting value. +func (r TransactionResult) ExtractBalanceIDHex(opIndex int) (string, error) { + balanceId, err := r.ExtractBalanceID(opIndex) + if err != nil { + return "", err + } + + hex, err := MarshalHex(balanceId) + if err != nil { + return "", errors.Wrap(err, "Failed to determine balance ID") + } + + return hex, nil +} diff --git a/xdr/transaction_result_test.go b/xdr/transaction_result_test.go new file mode 100644 index 0000000000..bfdfd3a758 --- /dev/null +++ b/xdr/transaction_result_test.go @@ -0,0 +1,208 @@ +package xdr + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func createTxResult(code TransactionResultCode) TransactionResult { + return TransactionResult{ + FeeCharged: 123, + Result: TransactionResultResult{ + Code: code, + Results: &[]OperationResult{}, + }, + } +} + +func TestSuccessful(t *testing.T) { + for _, testCase := range []struct { + code TransactionResultCode + expected bool + }{ + {TransactionResultCodeTxSuccess, true}, + {TransactionResultCodeTxFeeBumpInnerSuccess, true}, + {TransactionResultCodeTxFailed, false}, + {TransactionResultCodeTxFeeBumpInnerFailed, false}, + {TransactionResultCodeTxBadSeq, false}, + } { + + result := createTxResult(testCase.code) + assert.Equal(t, testCase.expected, result.Successful()) + resultPair := TransactionResultPair{ + Result: result, + } + assert.Equal(t, testCase.expected, resultPair.Successful()) + } +} + +func TestOperationResults(t *testing.T) { + successfulEmptyTx := createTxResult(TransactionResultCodeTxSuccess) + successfulEmptyTx.Result.Results = &[]OperationResult{} + + successfulEmptyFeeBumpTx := createTxResult(TransactionResultCodeTxFeeBumpInnerSuccess) + successfulEmptyFeeBumpTx.Result.InnerResultPair = &InnerTransactionResultPair{ + Result: InnerTransactionResult{ + Result: InnerTransactionResultResult{ + Code: TransactionResultCodeTxSuccess, + Results: &[]OperationResult{}, + }, + }, + } + + failedEmptyTx := createTxResult(TransactionResultCodeTxFailed) + failedEmptyTx.Result.Results = &[]OperationResult{} + + failedEmptyFeeBumpTx := createTxResult(TransactionResultCodeTxFeeBumpInnerFailed) + failedEmptyFeeBumpTx.Result.InnerResultPair = &InnerTransactionResultPair{ + Result: InnerTransactionResult{ + Result: InnerTransactionResultResult{ + Code: TransactionResultCodeTxFailed, + Results: &[]OperationResult{}, + }, + }, + } + + bumpSeqOp := OperationResult{ + Tr: &OperationResultTr{ + Type: OperationTypeBumpSequence, + BumpSeqResult: &BumpSequenceResult{ + Code: BumpSequenceResultCodeBumpSequenceSuccess, + }, + }, + } + inflationOp := OperationResult{ + Tr: &OperationResultTr{ + Type: OperationTypeInflation, + InflationResult: &InflationResult{ + Code: InflationResultCodeInflationNotTime, + }, + }, + } + + successfulTx := createTxResult(TransactionResultCodeTxSuccess) + successfulTx.Result.Results = &[]OperationResult{bumpSeqOp} + + successfulFeeBumpTx := createTxResult(TransactionResultCodeTxFeeBumpInnerSuccess) + successfulFeeBumpTx.Result.InnerResultPair = &InnerTransactionResultPair{ + Result: InnerTransactionResult{ + Result: InnerTransactionResultResult{ + Code: TransactionResultCodeTxSuccess, + Results: &[]OperationResult{inflationOp}, + }, + }, + } + + failedBumpSeqOp := OperationResult{ + Tr: &OperationResultTr{ + Type: OperationTypeBumpSequence, + BumpSeqResult: &BumpSequenceResult{ + Code: BumpSequenceResultCodeBumpSequenceBadSeq, + }, + }, + } + failedPaymentOp := OperationResult{ + Tr: &OperationResultTr{ + Type: OperationTypePayment, + PaymentResult: &PaymentResult{ + Code: PaymentResultCodePaymentMalformed, + }, + }, + } + + failedTx := createTxResult(TransactionResultCodeTxFailed) + failedTx.Result.Results = &[]OperationResult{failedBumpSeqOp} + + failedFeeBumpTx := createTxResult(TransactionResultCodeTxFeeBumpInnerFailed) + failedFeeBumpTx.Result.InnerResultPair = &InnerTransactionResultPair{ + Result: InnerTransactionResult{ + Result: InnerTransactionResultResult{ + Code: TransactionResultCodeTxFailed, + Results: &[]OperationResult{failedPaymentOp}, + }, + }, + } + + for _, testCase := range []struct { + result TransactionResult + expectedOk bool + expectedOperations []OperationResult + }{ + { + createTxResult(TransactionResultCodeTxBadSeq), + false, + nil, + }, + { + successfulEmptyTx, + true, + []OperationResult{}, + }, + { + successfulEmptyFeeBumpTx, + true, + []OperationResult{}, + }, + { + failedEmptyTx, + true, + []OperationResult{}, + }, + { + failedEmptyFeeBumpTx, + true, + []OperationResult{}, + }, + { + successfulTx, + true, + []OperationResult{bumpSeqOp}, + }, + { + successfulFeeBumpTx, + true, + []OperationResult{inflationOp}, + }, + { + failedTx, + true, + []OperationResult{failedBumpSeqOp}, + }, + { + failedFeeBumpTx, + true, + []OperationResult{failedPaymentOp}, + }, + } { + opResults, ok := testCase.result.OperationResults() + assert.Equal(t, testCase.expectedOk, ok) + assert.Equal(t, testCase.expectedOperations, opResults) + + resultPair := TransactionResultPair{ + Result: testCase.result, + } + opResults, ok = resultPair.OperationResults() + assert.Equal(t, testCase.expectedOk, ok) + assert.Equal(t, testCase.expectedOperations, opResults) + } +} + +func TestInnerHash(t *testing.T) { + tx := TransactionResultPair{ + TransactionHash: Hash{1, 1, 1}, + Result: createTxResult(TransactionResultCodeTxSuccess), + } + assert.Panics(t, func() { + tx.InnerHash() + }) + + feeBumpTx := TransactionResultPair{ + TransactionHash: Hash{1, 1, 1}, + Result: createTxResult(TransactionResultCodeTxFeeBumpInnerSuccess), + } + feeBumpTx.Result.Result.InnerResultPair = &InnerTransactionResultPair{ + TransactionHash: Hash{1, 2, 3}, + } + assert.Equal(t, Hash{1, 2, 3}, feeBumpTx.InnerHash()) +} diff --git a/xdr/trust_line_asset.go b/xdr/trust_line_asset.go new file mode 100644 index 0000000000..d9eb9d4f6a --- /dev/null +++ b/xdr/trust_line_asset.go @@ -0,0 +1,101 @@ +package xdr + +import ( + "fmt" +) + +// ToAsset converts TrustLineAsset to Asset. Panics on type other than +// AssetTypeAssetTypeNative, AssetTypeAssetTypeCreditAlphanum4 or +// AssetTypeAssetTypeCreditAlphanum12. +func (tla TrustLineAsset) ToAsset() Asset { + var a Asset + + a.Type = tla.Type + + switch a.Type { + case AssetTypeAssetTypeNative: + // Empty branch + case AssetTypeAssetTypeCreditAlphanum4: + assetCode4 := *tla.AlphaNum4 + a.AlphaNum4 = &assetCode4 + case AssetTypeAssetTypeCreditAlphanum12: + assetCode12 := *tla.AlphaNum12 + a.AlphaNum12 = &assetCode12 + default: + panic(fmt.Errorf("Cannot transform type %v to Asset", a.Type)) + } + + return a +} + +// MustExtract behaves as Extract, but panics if an error occurs. +func (a TrustLineAsset) Extract(typ interface{}, code interface{}, issuer interface{}) error { + return a.ToAsset().Extract(typ, code, issuer) +} + +// MustExtract behaves as Extract, but panics if an error occurs. +func (a TrustLineAsset) MustExtract(typ interface{}, code interface{}, issuer interface{}) { + err := a.ToAsset().Extract(typ, code, issuer) + + if err != nil { + panic(err) + } +} + +func trimRightZeros(b []byte) []byte { + if len(b) == 0 { + return b + } + i := len(b) + for ; i > 0; i-- { + if b[i-1] != 0 { + break + } + } + return b[:i] +} + +func (e *EncodingBuffer) assetTrustlineCompressEncodeTo(a TrustLineAsset) error { + if err := e.xdrEncoderBuf.WriteByte(byte(a.Type)); err != nil { + return err + } + + switch a.Type { + case AssetTypeAssetTypeNative: + return nil + case AssetTypeAssetTypeCreditAlphanum4: + code := trimRightZeros(a.AlphaNum4.AssetCode[:]) + if _, err := e.xdrEncoderBuf.Write(code); err != nil { + return err + } + return e.accountIdCompressEncodeTo(a.AlphaNum4.Issuer) + case AssetTypeAssetTypeCreditAlphanum12: + code := trimRightZeros(a.AlphaNum12.AssetCode[:]) + if _, err := e.xdrEncoderBuf.Write(code); err != nil { + return err + } + return e.accountIdCompressEncodeTo(a.AlphaNum12.Issuer) + case AssetTypeAssetTypePoolShare: + _, err := e.xdrEncoderBuf.Write(a.LiquidityPoolId[:]) + return err + default: + panic(fmt.Errorf("Unknown asset type: %v", a.Type)) + } +} + +func (a TrustLineAsset) Equals(other TrustLineAsset) bool { + if a.Type != other.Type { + return false + } + switch a.Type { + case AssetTypeAssetTypeNative, + AssetTypeAssetTypeCreditAlphanum4, + AssetTypeAssetTypeCreditAlphanum12: + // Safe because a.Type == other.Type + return a.ToAsset().Equals(other.ToAsset()) + case AssetTypeAssetTypePoolShare: + return *a.LiquidityPoolId == *other.LiquidityPoolId + default: + panic(fmt.Errorf("Unknown asset type: %v", a.Type)) + } +} diff --git a/xdr/trust_line_entry.go b/xdr/trust_line_entry.go new file mode 100644 index 0000000000..4436156a8c --- /dev/null +++ b/xdr/trust_line_entry.go @@ -0,0 +1,10 @@ +package xdr + +// Liabilities returns TrustLineEntry's liabilities +func (trustLine *TrustLineEntry) Liabilities() Liabilities { + var liabilities Liabilities + if trustLine.Ext.V1 != nil { + liabilities = trustLine.Ext.V1.Liabilities + } + return liabilities +} diff --git a/xdr/trust_line_flags.go b/xdr/trust_line_flags.go new file mode 100644 index 0000000000..6f2edb8a66 --- /dev/null +++ b/xdr/trust_line_flags.go @@ -0,0 +1,19 @@ +package xdr + +// IsAuthorized returns true if issuer has authorized account to perform +// transactions with its credit +func (e TrustLineFlags) IsAuthorized() bool { + return (e & TrustLineFlagsAuthorizedFlag) != 0 +} + +// IsAuthorizedToMaintainLiabilitiesFlag returns true if the issuer has authorized +// the account to maintain and reduce liabilities for its credit +func (e TrustLineFlags) IsAuthorizedToMaintainLiabilitiesFlag() bool { + return (e & TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag) != 0 +} + +// IsClawbackEnabledFlag returns true if the issuer has authorized +// the account to claw assets back +func (e TrustLineFlags) IsClawbackEnabledFlag() bool { + return (e & TrustLineFlagsTrustlineClawbackEnabledFlag) != 0 +} diff --git a/xdr/trust_line_flags_test.go b/xdr/trust_line_flags_test.go new file mode 100644 index 0000000000..1fc42a6cc8 --- /dev/null +++ b/xdr/trust_line_flags_test.go @@ -0,0 +1,47 @@ +package xdr_test + +import ( + "testing" + + "github.com/stellar/go/xdr" + "github.com/stretchr/testify/assert" +) + +func TestIsAuthorized(t *testing.T) { + tt := assert.New(t) + + flag := xdr.TrustLineFlags(1) + tt.True(flag.IsAuthorized()) + + flag = xdr.TrustLineFlags(0) + tt.False(flag.IsAuthorized()) + + flag = xdr.TrustLineFlags(2) + tt.False(flag.IsAuthorized()) +} + +func TestIsAuthorizedToMaintainLiabilitiesFlag(t *testing.T) { + tt := assert.New(t) + + flag := xdr.TrustLineFlags(1) + tt.False(flag.IsAuthorizedToMaintainLiabilitiesFlag()) + + flag = xdr.TrustLineFlags(0) + tt.False(flag.IsAuthorizedToMaintainLiabilitiesFlag()) + + flag = xdr.TrustLineFlags(2) + tt.True(flag.IsAuthorizedToMaintainLiabilitiesFlag()) +} + +func TestIsClawbackEnabledFlag(t *testing.T) { + tt := assert.New(t) + + flag := xdr.TrustLineFlags(1) + tt.False(flag.IsClawbackEnabledFlag()) + + flag = xdr.TrustLineFlags(0) + tt.False(flag.IsClawbackEnabledFlag()) + + flag = xdr.TrustLineFlags(4) + tt.True(flag.IsClawbackEnabledFlag()) +} diff --git a/xdr/xdr_generated.go b/xdr/xdr_generated.go index 23ba0c0f9b..491dff3330 100644 --- a/xdr/xdr_generated.go +++ b/xdr/xdr_generated.go @@ -1,518 +1,18652 @@ +//lint:file-ignore S1005 The issue should be fixed in xdrgen. Unfortunately, there's no way to ignore a single file in staticcheck. +//lint:file-ignore U1000 fmtTest is not needed anywhere, should be removed in xdrgen. // Package xdr is generated from: // -// xdr/Stellar-types.x +// xdr/Stellar-SCP.x // xdr/Stellar-ledger-entries.x -// xdr/Stellar-transaction.x // xdr/Stellar-ledger.x // xdr/Stellar-overlay.x -// xdr/Stellar-SCP.x +// xdr/Stellar-transaction.x +// xdr/Stellar-types.x // // DO NOT EDIT or your changes may be overwritten package xdr import ( + "bytes" + "encoding" "fmt" "io" - "github.com/nullstyle/go-xdr/xdr3" + "github.com/stellar/go-xdr/xdr3" ) +type xdrType interface { + xdrType() +} + +type decoderFrom interface { + DecodeFrom(d *xdr.Decoder) (int, error) +} + // Unmarshal reads an xdr element from `r` into `v`. func Unmarshal(r io.Reader, v interface{}) (int, error) { + if decodable, ok := v.(decoderFrom); ok { + d := xdr.NewDecoder(r) + return decodable.DecodeFrom(d) + } // delegate to xdr package's Unmarshal return xdr.Unmarshal(r, v) } // Marshal writes an xdr element `v` into `w`. func Marshal(w io.Writer, v interface{}) (int, error) { + if _, ok := v.(xdrType); ok { + if bm, ok := v.(encoding.BinaryMarshaler); ok { + b, err := bm.MarshalBinary() + if err != nil { + return 0, err + } + return w.Write(b) + } + } // delegate to xdr package's Marshal return xdr.Marshal(w, v) } -// Hash is an XDR Typedef defines as: -// -// typedef opaque Hash[32]; -// -type Hash [32]byte - -// Uint256 is an XDR Typedef defines as: +// Value is an XDR Typedef defines as: // -// typedef opaque uint256[32]; +// typedef opaque Value<>; // -type Uint256 [32]byte +type Value []byte -// Uint32 is an XDR Typedef defines as: -// -// typedef unsigned int uint32; -// -type Uint32 uint32 +// EncodeTo encodes this value using the Encoder. +func (s Value) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeOpaque(s[:]); err != nil { + return err + } + return nil +} -// Int32 is an XDR Typedef defines as: -// -// typedef int int32; -// -type Int32 int32 +var _ decoderFrom = (*Value)(nil) -// Uint64 is an XDR Typedef defines as: -// -// typedef unsigned hyper uint64; -// -type Uint64 uint64 +// DecodeFrom decodes this value using the Decoder. +func (s *Value) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + (*s), nTmp, err = d.DecodeOpaque(0) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) + } + return n, nil +} -// Int64 is an XDR Typedef defines as: -// -// typedef hyper int64; -// -type Int64 int64 +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Value) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} -// CryptoKeyType is an XDR Enum defines as: -// -// enum CryptoKeyType -// { -// KEY_TYPE_ED25519 = 0 -// }; -// -type CryptoKeyType int32 +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Value) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} -const ( - CryptoKeyTypeKeyTypeEd25519 CryptoKeyType = 0 +var ( + _ encoding.BinaryMarshaler = (*Value)(nil) + _ encoding.BinaryUnmarshaler = (*Value)(nil) ) -var cryptoKeyTypeMap = map[int32]string{ - 0: "CryptoKeyTypeKeyTypeEd25519", -} - -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for CryptoKeyType -func (e CryptoKeyType) ValidEnum(v int32) bool { - _, ok := cryptoKeyTypeMap[v] - return ok -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Value) xdrType() {} -// String returns the name of `e` -func (e CryptoKeyType) String() string { - name, _ := cryptoKeyTypeMap[int32(e)] - return name -} +var _ xdrType = (*Value)(nil) -// PublicKey is an XDR Union defines as: +// ScpBallot is an XDR Struct defines as: // -// union PublicKey switch (CryptoKeyType type) +// struct SCPBallot // { -// case KEY_TYPE_ED25519: -// uint256 ed25519; +// uint32 counter; // n +// Value value; // x // }; // -type PublicKey struct { - Type CryptoKeyType - Ed25519 *Uint256 -} - -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u PublicKey) SwitchFieldName() string { - return "Type" +type ScpBallot struct { + Counter Uint32 + Value Value } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of PublicKey -func (u PublicKey) ArmForSwitch(sw int32) (string, bool) { - switch CryptoKeyType(sw) { - case CryptoKeyTypeKeyTypeEd25519: - return "Ed25519", true +// EncodeTo encodes this value using the Encoder. +func (s *ScpBallot) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Counter.EncodeTo(e); err != nil { + return err } - return "-", false -} - -// NewPublicKey creates a new PublicKey. -func NewPublicKey(aType CryptoKeyType, value interface{}) (result PublicKey, err error) { - result.Type = aType - switch CryptoKeyType(aType) { - case CryptoKeyTypeKeyTypeEd25519: - tv, ok := value.(Uint256) - if !ok { - err = fmt.Errorf("invalid value, must be Uint256") - return - } - result.Ed25519 = &tv + if err = s.Value.EncodeTo(e); err != nil { + return err } - return + return nil } -// MustEd25519 retrieves the Ed25519 value from the union, -// panicing if the value is not set. -func (u PublicKey) MustEd25519() Uint256 { - val, ok := u.GetEd25519() +var _ decoderFrom = (*ScpBallot)(nil) - if !ok { - panic("arm Ed25519 is not set") +// DecodeFrom decodes this value using the Decoder. +func (s *ScpBallot) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Counter.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Value.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) } + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpBallot) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetEd25519 retrieves the Ed25519 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u PublicKey) GetEd25519() (result Uint256, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpBallot) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Ed25519" { - result = *u.Ed25519 - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*ScpBallot)(nil) + _ encoding.BinaryUnmarshaler = (*ScpBallot)(nil) +) - return -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpBallot) xdrType() {} -// Signature is an XDR Typedef defines as: -// -// typedef opaque Signature<64>; -// -type Signature []byte +var _ xdrType = (*ScpBallot)(nil) -// SignatureHint is an XDR Typedef defines as: +// ScpStatementType is an XDR Enum defines as: // -// typedef opaque SignatureHint[4]; +// enum SCPStatementType +// { +// SCP_ST_PREPARE = 0, +// SCP_ST_CONFIRM = 1, +// SCP_ST_EXTERNALIZE = 2, +// SCP_ST_NOMINATE = 3 +// }; // -type SignatureHint [4]byte +type ScpStatementType int32 -// NodeId is an XDR Typedef defines as: -// -// typedef PublicKey NodeID; -// -type NodeId PublicKey +const ( + ScpStatementTypeScpStPrepare ScpStatementType = 0 + ScpStatementTypeScpStConfirm ScpStatementType = 1 + ScpStatementTypeScpStExternalize ScpStatementType = 2 + ScpStatementTypeScpStNominate ScpStatementType = 3 +) -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u NodeId) SwitchFieldName() string { - return PublicKey(u).SwitchFieldName() +var scpStatementTypeMap = map[int32]string{ + 0: "ScpStatementTypeScpStPrepare", + 1: "ScpStatementTypeScpStConfirm", + 2: "ScpStatementTypeScpStExternalize", + 3: "ScpStatementTypeScpStNominate", } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of PublicKey -func (u NodeId) ArmForSwitch(sw int32) (string, bool) { - return PublicKey(u).ArmForSwitch(sw) +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ScpStatementType +func (e ScpStatementType) ValidEnum(v int32) bool { + _, ok := scpStatementTypeMap[v] + return ok } -// NewNodeId creates a new NodeId. -func NewNodeId(aType CryptoKeyType, value interface{}) (result NodeId, err error) { - u, err := NewPublicKey(aType, value) - result = NodeId(u) - return +// String returns the name of `e` +func (e ScpStatementType) String() string { + name, _ := scpStatementTypeMap[int32(e)] + return name } -// MustEd25519 retrieves the Ed25519 value from the union, -// panicing if the value is not set. -func (u NodeId) MustEd25519() Uint256 { - return PublicKey(u).MustEd25519() +// EncodeTo encodes this value using the Encoder. +func (e ScpStatementType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := scpStatementTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ScpStatementType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetEd25519 retrieves the Ed25519 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u NodeId) GetEd25519() (result Uint256, ok bool) { - return PublicKey(u).GetEd25519() -} +var _ decoderFrom = (*ScpStatementType)(nil) -// Curve25519Secret is an XDR Struct defines as: -// -// struct Curve25519Secret -// { -// opaque key[32]; -// }; -// -type Curve25519Secret struct { - Key [32]byte +// DecodeFrom decodes this value using the Decoder. +func (e *ScpStatementType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ScpStatementType: %s", err) + } + if _, ok := scpStatementTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ScpStatementType enum value", v) + } + *e = ScpStatementType(v) + return n, nil } -// Curve25519Public is an XDR Struct defines as: -// -// struct Curve25519Public -// { -// opaque key[32]; -// }; -// -type Curve25519Public struct { - Key [32]byte +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatementType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// HmacSha256Key is an XDR Struct defines as: -// -// struct HmacSha256Key -// { -// opaque key[32]; -// }; -// -type HmacSha256Key struct { - Key [32]byte +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatementType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// HmacSha256Mac is an XDR Struct defines as: +var ( + _ encoding.BinaryMarshaler = (*ScpStatementType)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatementType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatementType) xdrType() {} + +var _ xdrType = (*ScpStatementType)(nil) + +// ScpNomination is an XDR Struct defines as: // -// struct HmacSha256Mac +// struct SCPNomination // { -// opaque mac[32]; +// Hash quorumSetHash; // D +// Value votes<>; // X +// Value accepted<>; // Y // }; // -type HmacSha256Mac struct { - Mac [32]byte +type ScpNomination struct { + QuorumSetHash Hash + Votes []Value + Accepted []Value } -// AccountId is an XDR Typedef defines as: -// -// typedef PublicKey AccountID; -// -type AccountId PublicKey - -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u AccountId) SwitchFieldName() string { - return PublicKey(u).SwitchFieldName() +// EncodeTo encodes this value using the Encoder. +func (s *ScpNomination) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.QuorumSetHash.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Votes))); err != nil { + return err + } + for i := 0; i < len(s.Votes); i++ { + if err = s.Votes[i].EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeUint(uint32(len(s.Accepted))); err != nil { + return err + } + for i := 0; i < len(s.Accepted); i++ { + if err = s.Accepted[i].EncodeTo(e); err != nil { + return err + } + } + return nil } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of PublicKey -func (u AccountId) ArmForSwitch(sw int32) (string, bool) { - return PublicKey(u).ArmForSwitch(sw) -} +var _ decoderFrom = (*ScpNomination)(nil) -// NewAccountId creates a new AccountId. -func NewAccountId(aType CryptoKeyType, value interface{}) (result AccountId, err error) { - u, err := NewPublicKey(aType, value) - result = AccountId(u) - return +// DecodeFrom decodes this value using the Decoder. +func (s *ScpNomination) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.QuorumSetHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) + } + s.Votes = nil + if l > 0 { + s.Votes = make([]Value, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Votes[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) + } + } + } + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) + } + s.Accepted = nil + if l > 0 { + s.Accepted = make([]Value, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Accepted[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Value: %s", err) + } + } + } + return n, nil } -// MustEd25519 retrieves the Ed25519 value from the union, -// panicing if the value is not set. -func (u AccountId) MustEd25519() Uint256 { - return PublicKey(u).MustEd25519() +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpNomination) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetEd25519 retrieves the Ed25519 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u AccountId) GetEd25519() (result Uint256, ok bool) { - return PublicKey(u).GetEd25519() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpNomination) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// Thresholds is an XDR Typedef defines as: -// -// typedef opaque Thresholds[4]; -// -type Thresholds [4]byte +var ( + _ encoding.BinaryMarshaler = (*ScpNomination)(nil) + _ encoding.BinaryUnmarshaler = (*ScpNomination)(nil) +) -// String32 is an XDR Typedef defines as: -// -// typedef string string32<32>; -// -type String32 string +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpNomination) xdrType() {} -// XDRMaxSize implements the Sized interface for String32 -func (e String32) XDRMaxSize() int { - return 32 -} +var _ xdrType = (*ScpNomination)(nil) -// String64 is an XDR Typedef defines as: +// ScpStatementPrepare is an XDR NestedStruct defines as: // -// typedef string string64<64>; +// struct +// { +// Hash quorumSetHash; // D +// SCPBallot ballot; // b +// SCPBallot* prepared; // p +// SCPBallot* preparedPrime; // p' +// uint32 nC; // c.n +// uint32 nH; // h.n +// } // -type String64 string - -// XDRMaxSize implements the Sized interface for String64 -func (e String64) XDRMaxSize() int { - return 64 +type ScpStatementPrepare struct { + QuorumSetHash Hash + Ballot ScpBallot + Prepared *ScpBallot + PreparedPrime *ScpBallot + NC Uint32 + NH Uint32 } -// SequenceNumber is an XDR Typedef defines as: -// -// typedef uint64 SequenceNumber; -// -type SequenceNumber Uint64 - -// DataValue is an XDR Typedef defines as: -// -// typedef opaque DataValue<64>; -// -type DataValue []byte - -// AssetType is an XDR Enum defines as: -// -// enum AssetType -// { -// ASSET_TYPE_NATIVE = 0, -// ASSET_TYPE_CREDIT_ALPHANUM4 = 1, -// ASSET_TYPE_CREDIT_ALPHANUM12 = 2 -// }; -// -type AssetType int32 +// EncodeTo encodes this value using the Encoder. +func (s *ScpStatementPrepare) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.QuorumSetHash.EncodeTo(e); err != nil { + return err + } + if err = s.Ballot.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeBool(s.Prepared != nil); err != nil { + return err + } + if s.Prepared != nil { + if err = (*s.Prepared).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.PreparedPrime != nil); err != nil { + return err + } + if s.PreparedPrime != nil { + if err = (*s.PreparedPrime).EncodeTo(e); err != nil { + return err + } + } + if err = s.NC.EncodeTo(e); err != nil { + return err + } + if err = s.NH.EncodeTo(e); err != nil { + return err + } + return nil +} -const ( - AssetTypeAssetTypeNative AssetType = 0 - AssetTypeAssetTypeCreditAlphanum4 AssetType = 1 - AssetTypeAssetTypeCreditAlphanum12 AssetType = 2 -) +var _ decoderFrom = (*ScpStatementPrepare)(nil) -var assetTypeMap = map[int32]string{ - 0: "AssetTypeAssetTypeNative", - 1: "AssetTypeAssetTypeCreditAlphanum4", - 2: "AssetTypeAssetTypeCreditAlphanum12", +// DecodeFrom decodes this value using the Decoder. +func (s *ScpStatementPrepare) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.QuorumSetHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.Ballot.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + s.Prepared = nil + if b { + s.Prepared = new(ScpBallot) + nTmp, err = s.Prepared.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + s.PreparedPrime = nil + if b { + s.PreparedPrime = new(ScpBallot) + nTmp, err = s.PreparedPrime.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + } + nTmp, err = s.NC.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NH.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for AssetType -func (e AssetType) ValidEnum(v int32) bool { - _, ok := assetTypeMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatementPrepare) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e AssetType) String() string { - name, _ := assetTypeMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatementPrepare) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// AssetAlphaNum4 is an XDR NestedStruct defines as: -// -// struct -// { -// opaque assetCode[4]; // 1 to 4 characters -// AccountID issuer; -// } -// -type AssetAlphaNum4 struct { - AssetCode [4]byte - Issuer AccountId -} +var ( + _ encoding.BinaryMarshaler = (*ScpStatementPrepare)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatementPrepare)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatementPrepare) xdrType() {} -// AssetAlphaNum12 is an XDR NestedStruct defines as: +var _ xdrType = (*ScpStatementPrepare)(nil) + +// ScpStatementConfirm is an XDR NestedStruct defines as: // // struct -// { -// opaque assetCode[12]; // 5 to 12 characters -// AccountID issuer; -// } +// { +// SCPBallot ballot; // b +// uint32 nPrepared; // p.n +// uint32 nCommit; // c.n +// uint32 nH; // h.n +// Hash quorumSetHash; // D +// } // -type AssetAlphaNum12 struct { - AssetCode [12]byte - Issuer AccountId +type ScpStatementConfirm struct { + Ballot ScpBallot + NPrepared Uint32 + NCommit Uint32 + NH Uint32 + QuorumSetHash Hash } -// Asset is an XDR Union defines as: -// -// union Asset switch (AssetType type) -// { -// case ASSET_TYPE_NATIVE: // Not credit -// void; -// -// case ASSET_TYPE_CREDIT_ALPHANUM4: -// struct -// { -// opaque assetCode[4]; // 1 to 4 characters -// AccountID issuer; -// } alphaNum4; -// -// case ASSET_TYPE_CREDIT_ALPHANUM12: -// struct -// { -// opaque assetCode[12]; // 5 to 12 characters -// AccountID issuer; -// } alphaNum12; -// -// // add other asset types here in the future -// }; -// -type Asset struct { - Type AssetType - AlphaNum4 *AssetAlphaNum4 - AlphaNum12 *AssetAlphaNum12 +// EncodeTo encodes this value using the Encoder. +func (s *ScpStatementConfirm) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Ballot.EncodeTo(e); err != nil { + return err + } + if err = s.NPrepared.EncodeTo(e); err != nil { + return err + } + if err = s.NCommit.EncodeTo(e); err != nil { + return err + } + if err = s.NH.EncodeTo(e); err != nil { + return err + } + if err = s.QuorumSetHash.EncodeTo(e); err != nil { + return err + } + return nil } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u Asset) SwitchFieldName() string { - return "Type" -} +var _ decoderFrom = (*ScpStatementConfirm)(nil) -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of Asset -func (u Asset) ArmForSwitch(sw int32) (string, bool) { - switch AssetType(sw) { - case AssetTypeAssetTypeNative: - return "", true - case AssetTypeAssetTypeCreditAlphanum4: - return "AlphaNum4", true - case AssetTypeAssetTypeCreditAlphanum12: - return "AlphaNum12", true +// DecodeFrom decodes this value using the Decoder. +func (s *ScpStatementConfirm) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Ballot.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) } - return "-", false + nTmp, err = s.NPrepared.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NCommit.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NH.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.QuorumSetHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil } -// NewAsset creates a new Asset. -func NewAsset(aType AssetType, value interface{}) (result Asset, err error) { - result.Type = aType - switch AssetType(aType) { - case AssetTypeAssetTypeNative: - // void - case AssetTypeAssetTypeCreditAlphanum4: - tv, ok := value.(AssetAlphaNum4) - if !ok { - err = fmt.Errorf("invalid value, must be AssetAlphaNum4") - return - } - result.AlphaNum4 = &tv - case AssetTypeAssetTypeCreditAlphanum12: - tv, ok := value.(AssetAlphaNum12) - if !ok { - err = fmt.Errorf("invalid value, must be AssetAlphaNum12") - return - } - result.AlphaNum12 = &tv - } - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatementConfirm) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustAlphaNum4 retrieves the AlphaNum4 value from the union, -// panicing if the value is not set. -func (u Asset) MustAlphaNum4() AssetAlphaNum4 { - val, ok := u.GetAlphaNum4() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatementConfirm) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm AlphaNum4 is not set") - } +var ( + _ encoding.BinaryMarshaler = (*ScpStatementConfirm)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatementConfirm)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatementConfirm) xdrType() {} -// GetAlphaNum4 retrieves the AlphaNum4 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u Asset) GetAlphaNum4() (result AssetAlphaNum4, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*ScpStatementConfirm)(nil) - if armName == "AlphaNum4" { - result = *u.AlphaNum4 - ok = true - } +// ScpStatementExternalize is an XDR NestedStruct defines as: +// +// struct +// { +// SCPBallot commit; // c +// uint32 nH; // h.n +// Hash commitQuorumSetHash; // D used before EXTERNALIZE +// } +// +type ScpStatementExternalize struct { + Commit ScpBallot + NH Uint32 + CommitQuorumSetHash Hash +} - return +// EncodeTo encodes this value using the Encoder. +func (s *ScpStatementExternalize) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Commit.EncodeTo(e); err != nil { + return err + } + if err = s.NH.EncodeTo(e); err != nil { + return err + } + if err = s.CommitQuorumSetHash.EncodeTo(e); err != nil { + return err + } + return nil } -// MustAlphaNum12 retrieves the AlphaNum12 value from the union, -// panicing if the value is not set. -func (u Asset) MustAlphaNum12() AssetAlphaNum12 { - val, ok := u.GetAlphaNum12() +var _ decoderFrom = (*ScpStatementExternalize)(nil) - if !ok { - panic("arm AlphaNum12 is not set") +// DecodeFrom decodes this value using the Decoder. +func (s *ScpStatementExternalize) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Commit.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpBallot: %s", err) + } + nTmp, err = s.NH.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) } + nTmp, err = s.CommitQuorumSetHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatementExternalize) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetAlphaNum12 retrieves the AlphaNum12 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u Asset) GetAlphaNum12() (result AssetAlphaNum12, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatementExternalize) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "AlphaNum12" { +var ( + _ encoding.BinaryMarshaler = (*ScpStatementExternalize)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatementExternalize)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatementExternalize) xdrType() {} + +var _ xdrType = (*ScpStatementExternalize)(nil) + +// ScpStatementPledges is an XDR NestedUnion defines as: +// +// union switch (SCPStatementType type) +// { +// case SCP_ST_PREPARE: +// struct +// { +// Hash quorumSetHash; // D +// SCPBallot ballot; // b +// SCPBallot* prepared; // p +// SCPBallot* preparedPrime; // p' +// uint32 nC; // c.n +// uint32 nH; // h.n +// } prepare; +// case SCP_ST_CONFIRM: +// struct +// { +// SCPBallot ballot; // b +// uint32 nPrepared; // p.n +// uint32 nCommit; // c.n +// uint32 nH; // h.n +// Hash quorumSetHash; // D +// } confirm; +// case SCP_ST_EXTERNALIZE: +// struct +// { +// SCPBallot commit; // c +// uint32 nH; // h.n +// Hash commitQuorumSetHash; // D used before EXTERNALIZE +// } externalize; +// case SCP_ST_NOMINATE: +// SCPNomination nominate; +// } +// +type ScpStatementPledges struct { + Type ScpStatementType + Prepare *ScpStatementPrepare + Confirm *ScpStatementConfirm + Externalize *ScpStatementExternalize + Nominate *ScpNomination +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ScpStatementPledges) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ScpStatementPledges +func (u ScpStatementPledges) ArmForSwitch(sw int32) (string, bool) { + switch ScpStatementType(sw) { + case ScpStatementTypeScpStPrepare: + return "Prepare", true + case ScpStatementTypeScpStConfirm: + return "Confirm", true + case ScpStatementTypeScpStExternalize: + return "Externalize", true + case ScpStatementTypeScpStNominate: + return "Nominate", true + } + return "-", false +} + +// NewScpStatementPledges creates a new ScpStatementPledges. +func NewScpStatementPledges(aType ScpStatementType, value interface{}) (result ScpStatementPledges, err error) { + result.Type = aType + switch ScpStatementType(aType) { + case ScpStatementTypeScpStPrepare: + tv, ok := value.(ScpStatementPrepare) + if !ok { + err = fmt.Errorf("invalid value, must be ScpStatementPrepare") + return + } + result.Prepare = &tv + case ScpStatementTypeScpStConfirm: + tv, ok := value.(ScpStatementConfirm) + if !ok { + err = fmt.Errorf("invalid value, must be ScpStatementConfirm") + return + } + result.Confirm = &tv + case ScpStatementTypeScpStExternalize: + tv, ok := value.(ScpStatementExternalize) + if !ok { + err = fmt.Errorf("invalid value, must be ScpStatementExternalize") + return + } + result.Externalize = &tv + case ScpStatementTypeScpStNominate: + tv, ok := value.(ScpNomination) + if !ok { + err = fmt.Errorf("invalid value, must be ScpNomination") + return + } + result.Nominate = &tv + } + return +} + +// MustPrepare retrieves the Prepare value from the union, +// panicing if the value is not set. +func (u ScpStatementPledges) MustPrepare() ScpStatementPrepare { + val, ok := u.GetPrepare() + + if !ok { + panic("arm Prepare is not set") + } + + return val +} + +// GetPrepare retrieves the Prepare value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ScpStatementPledges) GetPrepare() (result ScpStatementPrepare, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Prepare" { + result = *u.Prepare + ok = true + } + + return +} + +// MustConfirm retrieves the Confirm value from the union, +// panicing if the value is not set. +func (u ScpStatementPledges) MustConfirm() ScpStatementConfirm { + val, ok := u.GetConfirm() + + if !ok { + panic("arm Confirm is not set") + } + + return val +} + +// GetConfirm retrieves the Confirm value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ScpStatementPledges) GetConfirm() (result ScpStatementConfirm, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Confirm" { + result = *u.Confirm + ok = true + } + + return +} + +// MustExternalize retrieves the Externalize value from the union, +// panicing if the value is not set. +func (u ScpStatementPledges) MustExternalize() ScpStatementExternalize { + val, ok := u.GetExternalize() + + if !ok { + panic("arm Externalize is not set") + } + + return val +} + +// GetExternalize retrieves the Externalize value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ScpStatementPledges) GetExternalize() (result ScpStatementExternalize, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Externalize" { + result = *u.Externalize + ok = true + } + + return +} + +// MustNominate retrieves the Nominate value from the union, +// panicing if the value is not set. +func (u ScpStatementPledges) MustNominate() ScpNomination { + val, ok := u.GetNominate() + + if !ok { + panic("arm Nominate is not set") + } + + return val +} + +// GetNominate retrieves the Nominate value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ScpStatementPledges) GetNominate() (result ScpNomination, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Nominate" { + result = *u.Nominate + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ScpStatementPledges) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch ScpStatementType(u.Type) { + case ScpStatementTypeScpStPrepare: + if err = (*u.Prepare).EncodeTo(e); err != nil { + return err + } + return nil + case ScpStatementTypeScpStConfirm: + if err = (*u.Confirm).EncodeTo(e); err != nil { + return err + } + return nil + case ScpStatementTypeScpStExternalize: + if err = (*u.Externalize).EncodeTo(e); err != nil { + return err + } + return nil + case ScpStatementTypeScpStNominate: + if err = (*u.Nominate).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (ScpStatementType) switch value '%d' is not valid for union ScpStatementPledges", u.Type) +} + +var _ decoderFrom = (*ScpStatementPledges)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ScpStatementPledges) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatementType: %s", err) + } + switch ScpStatementType(u.Type) { + case ScpStatementTypeScpStPrepare: + u.Prepare = new(ScpStatementPrepare) + nTmp, err = (*u.Prepare).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatementPrepare: %s", err) + } + return n, nil + case ScpStatementTypeScpStConfirm: + u.Confirm = new(ScpStatementConfirm) + nTmp, err = (*u.Confirm).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatementConfirm: %s", err) + } + return n, nil + case ScpStatementTypeScpStExternalize: + u.Externalize = new(ScpStatementExternalize) + nTmp, err = (*u.Externalize).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatementExternalize: %s", err) + } + return n, nil + case ScpStatementTypeScpStNominate: + u.Nominate = new(ScpNomination) + nTmp, err = (*u.Nominate).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpNomination: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ScpStatementPledges has invalid Type (ScpStatementType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatementPledges) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatementPledges) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpStatementPledges)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatementPledges)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatementPledges) xdrType() {} + +var _ xdrType = (*ScpStatementPledges)(nil) + +// ScpStatement is an XDR Struct defines as: +// +// struct SCPStatement +// { +// NodeID nodeID; // v +// uint64 slotIndex; // i +// +// union switch (SCPStatementType type) +// { +// case SCP_ST_PREPARE: +// struct +// { +// Hash quorumSetHash; // D +// SCPBallot ballot; // b +// SCPBallot* prepared; // p +// SCPBallot* preparedPrime; // p' +// uint32 nC; // c.n +// uint32 nH; // h.n +// } prepare; +// case SCP_ST_CONFIRM: +// struct +// { +// SCPBallot ballot; // b +// uint32 nPrepared; // p.n +// uint32 nCommit; // c.n +// uint32 nH; // h.n +// Hash quorumSetHash; // D +// } confirm; +// case SCP_ST_EXTERNALIZE: +// struct +// { +// SCPBallot commit; // c +// uint32 nH; // h.n +// Hash commitQuorumSetHash; // D used before EXTERNALIZE +// } externalize; +// case SCP_ST_NOMINATE: +// SCPNomination nominate; +// } +// pledges; +// }; +// +type ScpStatement struct { + NodeId NodeId + SlotIndex Uint64 + Pledges ScpStatementPledges +} + +// EncodeTo encodes this value using the Encoder. +func (s *ScpStatement) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.NodeId.EncodeTo(e); err != nil { + return err + } + if err = s.SlotIndex.EncodeTo(e); err != nil { + return err + } + if err = s.Pledges.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ScpStatement)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ScpStatement) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.NodeId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.SlotIndex.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.Pledges.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatementPledges: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpStatement) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpStatement) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpStatement)(nil) + _ encoding.BinaryUnmarshaler = (*ScpStatement)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpStatement) xdrType() {} + +var _ xdrType = (*ScpStatement)(nil) + +// ScpEnvelope is an XDR Struct defines as: +// +// struct SCPEnvelope +// { +// SCPStatement statement; +// Signature signature; +// }; +// +type ScpEnvelope struct { + Statement ScpStatement + Signature Signature +} + +// EncodeTo encodes this value using the Encoder. +func (s *ScpEnvelope) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Statement.EncodeTo(e); err != nil { + return err + } + if err = s.Signature.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ScpEnvelope)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ScpEnvelope) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Statement.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpStatement: %s", err) + } + nTmp, err = s.Signature.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpEnvelope) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpEnvelope) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpEnvelope)(nil) + _ encoding.BinaryUnmarshaler = (*ScpEnvelope)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpEnvelope) xdrType() {} + +var _ xdrType = (*ScpEnvelope)(nil) + +// ScpQuorumSet is an XDR Struct defines as: +// +// struct SCPQuorumSet +// { +// uint32 threshold; +// NodeID validators<>; +// SCPQuorumSet innerSets<>; +// }; +// +type ScpQuorumSet struct { + Threshold Uint32 + Validators []NodeId + InnerSets []ScpQuorumSet +} + +// EncodeTo encodes this value using the Encoder. +func (s *ScpQuorumSet) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Threshold.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Validators))); err != nil { + return err + } + for i := 0; i < len(s.Validators); i++ { + if err = s.Validators[i].EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeUint(uint32(len(s.InnerSets))); err != nil { + return err + } + for i := 0; i < len(s.InnerSets); i++ { + if err = s.InnerSets[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*ScpQuorumSet)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ScpQuorumSet) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Threshold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + s.Validators = nil + if l > 0 { + s.Validators = make([]NodeId, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Validators[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + } + } + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpQuorumSet: %s", err) + } + s.InnerSets = nil + if l > 0 { + s.InnerSets = make([]ScpQuorumSet, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.InnerSets[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpQuorumSet: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpQuorumSet) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpQuorumSet) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpQuorumSet)(nil) + _ encoding.BinaryUnmarshaler = (*ScpQuorumSet)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpQuorumSet) xdrType() {} + +var _ xdrType = (*ScpQuorumSet)(nil) + +// AccountId is an XDR Typedef defines as: +// +// typedef PublicKey AccountID; +// +type AccountId PublicKey + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AccountId) SwitchFieldName() string { + return PublicKey(u).SwitchFieldName() +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PublicKey +func (u AccountId) ArmForSwitch(sw int32) (string, bool) { + return PublicKey(u).ArmForSwitch(sw) +} + +// NewAccountId creates a new AccountId. +func NewAccountId(aType PublicKeyType, value interface{}) (result AccountId, err error) { + u, err := NewPublicKey(aType, value) + result = AccountId(u) + return +} + +// MustEd25519 retrieves the Ed25519 value from the union, +// panicing if the value is not set. +func (u AccountId) MustEd25519() Uint256 { + return PublicKey(u).MustEd25519() +} + +// GetEd25519 retrieves the Ed25519 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AccountId) GetEd25519() (result Uint256, ok bool) { + return PublicKey(u).GetEd25519() +} + +// EncodeTo encodes this value using the Encoder. +func (s AccountId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = PublicKey(s).EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AccountId)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AccountId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = (*PublicKey)(s).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PublicKey: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountId)(nil) + _ encoding.BinaryUnmarshaler = (*AccountId)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountId) xdrType() {} + +var _ xdrType = (*AccountId)(nil) + +// Thresholds is an XDR Typedef defines as: +// +// typedef opaque Thresholds[4]; +// +type Thresholds [4]byte + +// XDRMaxSize implements the Sized interface for Thresholds +func (e Thresholds) XDRMaxSize() int { + return 4 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Thresholds) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Thresholds)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Thresholds) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Thresholds: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Thresholds) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Thresholds) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Thresholds)(nil) + _ encoding.BinaryUnmarshaler = (*Thresholds)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Thresholds) xdrType() {} + +var _ xdrType = (*Thresholds)(nil) + +// String32 is an XDR Typedef defines as: +// +// typedef string string32<32>; +// +type String32 string + +// XDRMaxSize implements the Sized interface for String32 +func (e String32) XDRMaxSize() int { + return 32 +} + +// EncodeTo encodes this value using the Encoder. +func (s String32) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeString(string(s)); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*String32)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *String32) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v string + v, nTmp, err = d.DecodeString(32) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String32: %s", err) + } + *s = String32(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s String32) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *String32) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*String32)(nil) + _ encoding.BinaryUnmarshaler = (*String32)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s String32) xdrType() {} + +var _ xdrType = (*String32)(nil) + +// String64 is an XDR Typedef defines as: +// +// typedef string string64<64>; +// +type String64 string + +// XDRMaxSize implements the Sized interface for String64 +func (e String64) XDRMaxSize() int { + return 64 +} + +// EncodeTo encodes this value using the Encoder. +func (s String64) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeString(string(s)); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*String64)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *String64) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v string + v, nTmp, err = d.DecodeString(64) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String64: %s", err) + } + *s = String64(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s String64) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *String64) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*String64)(nil) + _ encoding.BinaryUnmarshaler = (*String64)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s String64) xdrType() {} + +var _ xdrType = (*String64)(nil) + +// SequenceNumber is an XDR Typedef defines as: +// +// typedef int64 SequenceNumber; +// +type SequenceNumber Int64 + +// EncodeTo encodes this value using the Encoder. +func (s SequenceNumber) EncodeTo(e *xdr.Encoder) error { + var err error + if err = Int64(s).EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SequenceNumber)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SequenceNumber) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = (*Int64)(s).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SequenceNumber) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SequenceNumber) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SequenceNumber)(nil) + _ encoding.BinaryUnmarshaler = (*SequenceNumber)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SequenceNumber) xdrType() {} + +var _ xdrType = (*SequenceNumber)(nil) + +// TimePoint is an XDR Typedef defines as: +// +// typedef uint64 TimePoint; +// +type TimePoint Uint64 + +// EncodeTo encodes this value using the Encoder. +func (s TimePoint) EncodeTo(e *xdr.Encoder) error { + var err error + if err = Uint64(s).EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TimePoint)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TimePoint) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = (*Uint64)(s).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TimePoint) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TimePoint) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TimePoint)(nil) + _ encoding.BinaryUnmarshaler = (*TimePoint)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TimePoint) xdrType() {} + +var _ xdrType = (*TimePoint)(nil) + +// DataValue is an XDR Typedef defines as: +// +// typedef opaque DataValue<64>; +// +type DataValue []byte + +// XDRMaxSize implements the Sized interface for DataValue +func (e DataValue) XDRMaxSize() int { + return 64 +} + +// EncodeTo encodes this value using the Encoder. +func (s DataValue) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*DataValue)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *DataValue) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + (*s), nTmp, err = d.DecodeOpaque(64) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataValue: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s DataValue) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *DataValue) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*DataValue)(nil) + _ encoding.BinaryUnmarshaler = (*DataValue)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s DataValue) xdrType() {} + +var _ xdrType = (*DataValue)(nil) + +// PoolId is an XDR Typedef defines as: +// +// typedef Hash PoolID; +// +type PoolId Hash + +// EncodeTo encodes this value using the Encoder. +func (s *PoolId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = (*Hash)(s).EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*PoolId)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PoolId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = (*Hash)(s).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PoolId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PoolId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PoolId)(nil) + _ encoding.BinaryUnmarshaler = (*PoolId)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PoolId) xdrType() {} + +var _ xdrType = (*PoolId)(nil) + +// AssetCode4 is an XDR Typedef defines as: +// +// typedef opaque AssetCode4[4]; +// +type AssetCode4 [4]byte + +// XDRMaxSize implements the Sized interface for AssetCode4 +func (e AssetCode4) XDRMaxSize() int { + return 4 +} + +// EncodeTo encodes this value using the Encoder. +func (s *AssetCode4) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AssetCode4)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AssetCode4) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode4: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AssetCode4) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AssetCode4) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AssetCode4)(nil) + _ encoding.BinaryUnmarshaler = (*AssetCode4)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AssetCode4) xdrType() {} + +var _ xdrType = (*AssetCode4)(nil) + +// AssetCode12 is an XDR Typedef defines as: +// +// typedef opaque AssetCode12[12]; +// +type AssetCode12 [12]byte + +// XDRMaxSize implements the Sized interface for AssetCode12 +func (e AssetCode12) XDRMaxSize() int { + return 12 +} + +// EncodeTo encodes this value using the Encoder. +func (s *AssetCode12) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AssetCode12)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AssetCode12) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode12: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AssetCode12) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AssetCode12) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AssetCode12)(nil) + _ encoding.BinaryUnmarshaler = (*AssetCode12)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AssetCode12) xdrType() {} + +var _ xdrType = (*AssetCode12)(nil) + +// AssetType is an XDR Enum defines as: +// +// enum AssetType +// { +// ASSET_TYPE_NATIVE = 0, +// ASSET_TYPE_CREDIT_ALPHANUM4 = 1, +// ASSET_TYPE_CREDIT_ALPHANUM12 = 2, +// ASSET_TYPE_POOL_SHARE = 3 +// }; +// +type AssetType int32 + +const ( + AssetTypeAssetTypeNative AssetType = 0 + AssetTypeAssetTypeCreditAlphanum4 AssetType = 1 + AssetTypeAssetTypeCreditAlphanum12 AssetType = 2 + AssetTypeAssetTypePoolShare AssetType = 3 +) + +var assetTypeMap = map[int32]string{ + 0: "AssetTypeAssetTypeNative", + 1: "AssetTypeAssetTypeCreditAlphanum4", + 2: "AssetTypeAssetTypeCreditAlphanum12", + 3: "AssetTypeAssetTypePoolShare", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for AssetType +func (e AssetType) ValidEnum(v int32) bool { + _, ok := assetTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e AssetType) String() string { + name, _ := assetTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e AssetType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := assetTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid AssetType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*AssetType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *AssetType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding AssetType: %s", err) + } + if _, ok := assetTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid AssetType enum value", v) + } + *e = AssetType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AssetType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AssetType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AssetType)(nil) + _ encoding.BinaryUnmarshaler = (*AssetType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AssetType) xdrType() {} + +var _ xdrType = (*AssetType)(nil) + +// AssetCode is an XDR Union defines as: +// +// union AssetCode switch (AssetType type) +// { +// case ASSET_TYPE_CREDIT_ALPHANUM4: +// AssetCode4 assetCode4; +// +// case ASSET_TYPE_CREDIT_ALPHANUM12: +// AssetCode12 assetCode12; +// +// // add other asset types here in the future +// }; +// +type AssetCode struct { + Type AssetType + AssetCode4 *AssetCode4 + AssetCode12 *AssetCode12 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AssetCode) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of AssetCode +func (u AssetCode) ArmForSwitch(sw int32) (string, bool) { + switch AssetType(sw) { + case AssetTypeAssetTypeCreditAlphanum4: + return "AssetCode4", true + case AssetTypeAssetTypeCreditAlphanum12: + return "AssetCode12", true + } + return "-", false +} + +// NewAssetCode creates a new AssetCode. +func NewAssetCode(aType AssetType, value interface{}) (result AssetCode, err error) { + result.Type = aType + switch AssetType(aType) { + case AssetTypeAssetTypeCreditAlphanum4: + tv, ok := value.(AssetCode4) + if !ok { + err = fmt.Errorf("invalid value, must be AssetCode4") + return + } + result.AssetCode4 = &tv + case AssetTypeAssetTypeCreditAlphanum12: + tv, ok := value.(AssetCode12) + if !ok { + err = fmt.Errorf("invalid value, must be AssetCode12") + return + } + result.AssetCode12 = &tv + } + return +} + +// MustAssetCode4 retrieves the AssetCode4 value from the union, +// panicing if the value is not set. +func (u AssetCode) MustAssetCode4() AssetCode4 { + val, ok := u.GetAssetCode4() + + if !ok { + panic("arm AssetCode4 is not set") + } + + return val +} + +// GetAssetCode4 retrieves the AssetCode4 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AssetCode) GetAssetCode4() (result AssetCode4, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AssetCode4" { + result = *u.AssetCode4 + ok = true + } + + return +} + +// MustAssetCode12 retrieves the AssetCode12 value from the union, +// panicing if the value is not set. +func (u AssetCode) MustAssetCode12() AssetCode12 { + val, ok := u.GetAssetCode12() + + if !ok { + panic("arm AssetCode12 is not set") + } + + return val +} + +// GetAssetCode12 retrieves the AssetCode12 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AssetCode) GetAssetCode12() (result AssetCode12, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AssetCode12" { + result = *u.AssetCode12 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AssetCode) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeCreditAlphanum4: + if err = (*u.AssetCode4).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypeCreditAlphanum12: + if err = (*u.AssetCode12).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (AssetType) switch value '%d' is not valid for union AssetCode", u.Type) +} + +var _ decoderFrom = (*AssetCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AssetCode) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetType: %s", err) + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeCreditAlphanum4: + u.AssetCode4 = new(AssetCode4) + nTmp, err = (*u.AssetCode4).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode4: %s", err) + } + return n, nil + case AssetTypeAssetTypeCreditAlphanum12: + u.AssetCode12 = new(AssetCode12) + nTmp, err = (*u.AssetCode12).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode12: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union AssetCode has invalid Type (AssetType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AssetCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AssetCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AssetCode)(nil) + _ encoding.BinaryUnmarshaler = (*AssetCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AssetCode) xdrType() {} + +var _ xdrType = (*AssetCode)(nil) + +// AlphaNum4 is an XDR Struct defines as: +// +// struct AlphaNum4 +// { +// AssetCode4 assetCode; +// AccountID issuer; +// }; +// +type AlphaNum4 struct { + AssetCode AssetCode4 + Issuer AccountId +} + +// EncodeTo encodes this value using the Encoder. +func (s *AlphaNum4) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AssetCode.EncodeTo(e); err != nil { + return err + } + if err = s.Issuer.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AlphaNum4)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AlphaNum4) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AssetCode.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode4: %s", err) + } + nTmp, err = s.Issuer.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AlphaNum4) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AlphaNum4) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AlphaNum4)(nil) + _ encoding.BinaryUnmarshaler = (*AlphaNum4)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AlphaNum4) xdrType() {} + +var _ xdrType = (*AlphaNum4)(nil) + +// AlphaNum12 is an XDR Struct defines as: +// +// struct AlphaNum12 +// { +// AssetCode12 assetCode; +// AccountID issuer; +// }; +// +type AlphaNum12 struct { + AssetCode AssetCode12 + Issuer AccountId +} + +// EncodeTo encodes this value using the Encoder. +func (s *AlphaNum12) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AssetCode.EncodeTo(e); err != nil { + return err + } + if err = s.Issuer.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AlphaNum12)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AlphaNum12) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AssetCode.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode12: %s", err) + } + nTmp, err = s.Issuer.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AlphaNum12) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AlphaNum12) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AlphaNum12)(nil) + _ encoding.BinaryUnmarshaler = (*AlphaNum12)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AlphaNum12) xdrType() {} + +var _ xdrType = (*AlphaNum12)(nil) + +// Asset is an XDR Union defines as: +// +// union Asset switch (AssetType type) +// { +// case ASSET_TYPE_NATIVE: // Not credit +// void; +// +// case ASSET_TYPE_CREDIT_ALPHANUM4: +// AlphaNum4 alphaNum4; +// +// case ASSET_TYPE_CREDIT_ALPHANUM12: +// AlphaNum12 alphaNum12; +// +// // add other asset types here in the future +// }; +// +type Asset struct { + Type AssetType + AlphaNum4 *AlphaNum4 + AlphaNum12 *AlphaNum12 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u Asset) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of Asset +func (u Asset) ArmForSwitch(sw int32) (string, bool) { + switch AssetType(sw) { + case AssetTypeAssetTypeNative: + return "", true + case AssetTypeAssetTypeCreditAlphanum4: + return "AlphaNum4", true + case AssetTypeAssetTypeCreditAlphanum12: + return "AlphaNum12", true + } + return "-", false +} + +// NewAsset creates a new Asset. +func NewAsset(aType AssetType, value interface{}) (result Asset, err error) { + result.Type = aType + switch AssetType(aType) { + case AssetTypeAssetTypeNative: + // void + case AssetTypeAssetTypeCreditAlphanum4: + tv, ok := value.(AlphaNum4) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum4") + return + } + result.AlphaNum4 = &tv + case AssetTypeAssetTypeCreditAlphanum12: + tv, ok := value.(AlphaNum12) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum12") + return + } + result.AlphaNum12 = &tv + } + return +} + +// MustAlphaNum4 retrieves the AlphaNum4 value from the union, +// panicing if the value is not set. +func (u Asset) MustAlphaNum4() AlphaNum4 { + val, ok := u.GetAlphaNum4() + + if !ok { + panic("arm AlphaNum4 is not set") + } + + return val +} + +// GetAlphaNum4 retrieves the AlphaNum4 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Asset) GetAlphaNum4() (result AlphaNum4, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum4" { + result = *u.AlphaNum4 + ok = true + } + + return +} + +// MustAlphaNum12 retrieves the AlphaNum12 value from the union, +// panicing if the value is not set. +func (u Asset) MustAlphaNum12() AlphaNum12 { + val, ok := u.GetAlphaNum12() + + if !ok { + panic("arm AlphaNum12 is not set") + } + + return val +} + +// GetAlphaNum12 retrieves the AlphaNum12 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Asset) GetAlphaNum12() (result AlphaNum12, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum12" { + result = *u.AlphaNum12 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u Asset) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return nil + case AssetTypeAssetTypeCreditAlphanum4: + if err = (*u.AlphaNum4).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypeCreditAlphanum12: + if err = (*u.AlphaNum12).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (AssetType) switch value '%d' is not valid for union Asset", u.Type) +} + +var _ decoderFrom = (*Asset)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *Asset) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetType: %s", err) + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return n, nil + case AssetTypeAssetTypeCreditAlphanum4: + u.AlphaNum4 = new(AlphaNum4) + nTmp, err = (*u.AlphaNum4).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum4: %s", err) + } + return n, nil + case AssetTypeAssetTypeCreditAlphanum12: + u.AlphaNum12 = new(AlphaNum12) + nTmp, err = (*u.AlphaNum12).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum12: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union Asset has invalid Type (AssetType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Asset) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Asset) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Asset)(nil) + _ encoding.BinaryUnmarshaler = (*Asset)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Asset) xdrType() {} + +var _ xdrType = (*Asset)(nil) + +// Price is an XDR Struct defines as: +// +// struct Price +// { +// int32 n; // numerator +// int32 d; // denominator +// }; +// +type Price struct { + N Int32 + D Int32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Price) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.N.EncodeTo(e); err != nil { + return err + } + if err = s.D.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Price)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Price) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.N.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int32: %s", err) + } + nTmp, err = s.D.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Price) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Price) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Price)(nil) + _ encoding.BinaryUnmarshaler = (*Price)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Price) xdrType() {} + +var _ xdrType = (*Price)(nil) + +// Liabilities is an XDR Struct defines as: +// +// struct Liabilities +// { +// int64 buying; +// int64 selling; +// }; +// +type Liabilities struct { + Buying Int64 + Selling Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Liabilities) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Buying.EncodeTo(e); err != nil { + return err + } + if err = s.Selling.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Liabilities)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Liabilities) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Buying.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Selling.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Liabilities) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Liabilities) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Liabilities)(nil) + _ encoding.BinaryUnmarshaler = (*Liabilities)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Liabilities) xdrType() {} + +var _ xdrType = (*Liabilities)(nil) + +// ThresholdIndexes is an XDR Enum defines as: +// +// enum ThresholdIndexes +// { +// THRESHOLD_MASTER_WEIGHT = 0, +// THRESHOLD_LOW = 1, +// THRESHOLD_MED = 2, +// THRESHOLD_HIGH = 3 +// }; +// +type ThresholdIndexes int32 + +const ( + ThresholdIndexesThresholdMasterWeight ThresholdIndexes = 0 + ThresholdIndexesThresholdLow ThresholdIndexes = 1 + ThresholdIndexesThresholdMed ThresholdIndexes = 2 + ThresholdIndexesThresholdHigh ThresholdIndexes = 3 +) + +var thresholdIndexesMap = map[int32]string{ + 0: "ThresholdIndexesThresholdMasterWeight", + 1: "ThresholdIndexesThresholdLow", + 2: "ThresholdIndexesThresholdMed", + 3: "ThresholdIndexesThresholdHigh", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ThresholdIndexes +func (e ThresholdIndexes) ValidEnum(v int32) bool { + _, ok := thresholdIndexesMap[v] + return ok +} + +// String returns the name of `e` +func (e ThresholdIndexes) String() string { + name, _ := thresholdIndexesMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ThresholdIndexes) EncodeTo(enc *xdr.Encoder) error { + if _, ok := thresholdIndexesMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ThresholdIndexes enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ThresholdIndexes)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ThresholdIndexes) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ThresholdIndexes: %s", err) + } + if _, ok := thresholdIndexesMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ThresholdIndexes enum value", v) + } + *e = ThresholdIndexes(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ThresholdIndexes) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ThresholdIndexes) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ThresholdIndexes)(nil) + _ encoding.BinaryUnmarshaler = (*ThresholdIndexes)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ThresholdIndexes) xdrType() {} + +var _ xdrType = (*ThresholdIndexes)(nil) + +// LedgerEntryType is an XDR Enum defines as: +// +// enum LedgerEntryType +// { +// ACCOUNT = 0, +// TRUSTLINE = 1, +// OFFER = 2, +// DATA = 3, +// CLAIMABLE_BALANCE = 4, +// LIQUIDITY_POOL = 5 +// }; +// +type LedgerEntryType int32 + +const ( + LedgerEntryTypeAccount LedgerEntryType = 0 + LedgerEntryTypeTrustline LedgerEntryType = 1 + LedgerEntryTypeOffer LedgerEntryType = 2 + LedgerEntryTypeData LedgerEntryType = 3 + LedgerEntryTypeClaimableBalance LedgerEntryType = 4 + LedgerEntryTypeLiquidityPool LedgerEntryType = 5 +) + +var ledgerEntryTypeMap = map[int32]string{ + 0: "LedgerEntryTypeAccount", + 1: "LedgerEntryTypeTrustline", + 2: "LedgerEntryTypeOffer", + 3: "LedgerEntryTypeData", + 4: "LedgerEntryTypeClaimableBalance", + 5: "LedgerEntryTypeLiquidityPool", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LedgerEntryType +func (e LedgerEntryType) ValidEnum(v int32) bool { + _, ok := ledgerEntryTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e LedgerEntryType) String() string { + name, _ := ledgerEntryTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e LedgerEntryType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := ledgerEntryTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LedgerEntryType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LedgerEntryType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LedgerEntryType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryType: %s", err) + } + if _, ok := ledgerEntryTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LedgerEntryType enum value", v) + } + *e = LedgerEntryType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryType)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryType) xdrType() {} + +var _ xdrType = (*LedgerEntryType)(nil) + +// Signer is an XDR Struct defines as: +// +// struct Signer +// { +// SignerKey key; +// uint32 weight; // really only need 1 byte +// }; +// +type Signer struct { + Key SignerKey + Weight Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Signer) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Key.EncodeTo(e); err != nil { + return err + } + if err = s.Weight.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Signer)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Signer) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Key.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignerKey: %s", err) + } + nTmp, err = s.Weight.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Signer) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Signer) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Signer)(nil) + _ encoding.BinaryUnmarshaler = (*Signer)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Signer) xdrType() {} + +var _ xdrType = (*Signer)(nil) + +// AccountFlags is an XDR Enum defines as: +// +// enum AccountFlags +// { // masks for each flag +// +// // Flags set on issuer accounts +// // TrustLines are created with authorized set to "false" requiring +// // the issuer to set it for each TrustLine +// AUTH_REQUIRED_FLAG = 0x1, +// // If set, the authorized flag in TrustLines can be cleared +// // otherwise, authorization cannot be revoked +// AUTH_REVOCABLE_FLAG = 0x2, +// // Once set, causes all AUTH_* flags to be read-only +// AUTH_IMMUTABLE_FLAG = 0x4, +// // Trustlines are created with clawback enabled set to "true", +// // and claimable balances created from those trustlines are created +// // with clawback enabled set to "true" +// AUTH_CLAWBACK_ENABLED_FLAG = 0x8 +// }; +// +type AccountFlags int32 + +const ( + AccountFlagsAuthRequiredFlag AccountFlags = 1 + AccountFlagsAuthRevocableFlag AccountFlags = 2 + AccountFlagsAuthImmutableFlag AccountFlags = 4 + AccountFlagsAuthClawbackEnabledFlag AccountFlags = 8 +) + +var accountFlagsMap = map[int32]string{ + 1: "AccountFlagsAuthRequiredFlag", + 2: "AccountFlagsAuthRevocableFlag", + 4: "AccountFlagsAuthImmutableFlag", + 8: "AccountFlagsAuthClawbackEnabledFlag", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for AccountFlags +func (e AccountFlags) ValidEnum(v int32) bool { + _, ok := accountFlagsMap[v] + return ok +} + +// String returns the name of `e` +func (e AccountFlags) String() string { + name, _ := accountFlagsMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e AccountFlags) EncodeTo(enc *xdr.Encoder) error { + if _, ok := accountFlagsMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid AccountFlags enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*AccountFlags)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *AccountFlags) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding AccountFlags: %s", err) + } + if _, ok := accountFlagsMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid AccountFlags enum value", v) + } + *e = AccountFlags(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountFlags) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountFlags) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountFlags)(nil) + _ encoding.BinaryUnmarshaler = (*AccountFlags)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountFlags) xdrType() {} + +var _ xdrType = (*AccountFlags)(nil) + +// MaskAccountFlags is an XDR Const defines as: +// +// const MASK_ACCOUNT_FLAGS = 0x7; +// +const MaskAccountFlags = 0x7 + +// MaskAccountFlagsV17 is an XDR Const defines as: +// +// const MASK_ACCOUNT_FLAGS_V17 = 0xF; +// +const MaskAccountFlagsV17 = 0xF + +// MaxSigners is an XDR Const defines as: +// +// const MAX_SIGNERS = 20; +// +const MaxSigners = 20 + +// SponsorshipDescriptor is an XDR Typedef defines as: +// +// typedef AccountID* SponsorshipDescriptor; +// +type SponsorshipDescriptor = *AccountId + +// AccountEntryExtensionV2Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type AccountEntryExtensionV2Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AccountEntryExtensionV2Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of AccountEntryExtensionV2Ext +func (u AccountEntryExtensionV2Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewAccountEntryExtensionV2Ext creates a new AccountEntryExtensionV2Ext. +func NewAccountEntryExtensionV2Ext(v int32, value interface{}) (result AccountEntryExtensionV2Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AccountEntryExtensionV2Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union AccountEntryExtensionV2Ext", u.V) +} + +var _ decoderFrom = (*AccountEntryExtensionV2Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AccountEntryExtensionV2Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union AccountEntryExtensionV2Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntryExtensionV2Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntryExtensionV2Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntryExtensionV2Ext)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntryExtensionV2Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntryExtensionV2Ext) xdrType() {} + +var _ xdrType = (*AccountEntryExtensionV2Ext)(nil) + +// AccountEntryExtensionV2 is an XDR Struct defines as: +// +// struct AccountEntryExtensionV2 +// { +// uint32 numSponsored; +// uint32 numSponsoring; +// SponsorshipDescriptor signerSponsoringIDs; +// +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type AccountEntryExtensionV2 struct { + NumSponsored Uint32 + NumSponsoring Uint32 + SignerSponsoringIDs []SponsorshipDescriptor `xdrmaxsize:"20"` + Ext AccountEntryExtensionV2Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *AccountEntryExtensionV2) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.NumSponsored.EncodeTo(e); err != nil { + return err + } + if err = s.NumSponsoring.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.SignerSponsoringIDs))); err != nil { + return err + } + for i := 0; i < len(s.SignerSponsoringIDs); i++ { + if _, err = e.EncodeBool(s.SignerSponsoringIDs[i] != nil); err != nil { + return err + } + if s.SignerSponsoringIDs[i] != nil { + if err = s.SignerSponsoringIDs[i].EncodeTo(e); err != nil { + return err + } + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AccountEntryExtensionV2)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AccountEntryExtensionV2) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.NumSponsored.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NumSponsoring.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SponsorshipDescriptor: %s", err) + } + if l > 20 { + return n, fmt.Errorf("decoding SponsorshipDescriptor: data size (%d) exceeds size limit (20)", l) + } + s.SignerSponsoringIDs = nil + if l > 0 { + s.SignerSponsoringIDs = make([]SponsorshipDescriptor, l) + for i := uint32(0); i < l; i++ { + var eb bool + eb, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SponsorshipDescriptor: %s", err) + } + s.SignerSponsoringIDs[i] = nil + if eb { + s.SignerSponsoringIDs[i] = new(AccountId) + nTmp, err = s.SignerSponsoringIDs[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SponsorshipDescriptor: %s", err) + } + } + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntryExtensionV2Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntryExtensionV2) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntryExtensionV2) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntryExtensionV2)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntryExtensionV2)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntryExtensionV2) xdrType() {} + +var _ xdrType = (*AccountEntryExtensionV2)(nil) + +// AccountEntryExtensionV1Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// AccountEntryExtensionV2 v2; +// } +// +type AccountEntryExtensionV1Ext struct { + V int32 + V2 *AccountEntryExtensionV2 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AccountEntryExtensionV1Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of AccountEntryExtensionV1Ext +func (u AccountEntryExtensionV1Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 2: + return "V2", true + } + return "-", false +} + +// NewAccountEntryExtensionV1Ext creates a new AccountEntryExtensionV1Ext. +func NewAccountEntryExtensionV1Ext(v int32, value interface{}) (result AccountEntryExtensionV1Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 2: + tv, ok := value.(AccountEntryExtensionV2) + if !ok { + err = fmt.Errorf("invalid value, must be AccountEntryExtensionV2") + return + } + result.V2 = &tv + } + return +} + +// MustV2 retrieves the V2 value from the union, +// panicing if the value is not set. +func (u AccountEntryExtensionV1Ext) MustV2() AccountEntryExtensionV2 { + val, ok := u.GetV2() + + if !ok { + panic("arm V2 is not set") + } + + return val +} + +// GetV2 retrieves the V2 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AccountEntryExtensionV1Ext) GetV2() (result AccountEntryExtensionV2, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V2" { + result = *u.V2 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AccountEntryExtensionV1Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 2: + if err = (*u.V2).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union AccountEntryExtensionV1Ext", u.V) +} + +var _ decoderFrom = (*AccountEntryExtensionV1Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AccountEntryExtensionV1Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 2: + u.V2 = new(AccountEntryExtensionV2) + nTmp, err = (*u.V2).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntryExtensionV2: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union AccountEntryExtensionV1Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntryExtensionV1Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntryExtensionV1Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntryExtensionV1Ext)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntryExtensionV1Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntryExtensionV1Ext) xdrType() {} + +var _ xdrType = (*AccountEntryExtensionV1Ext)(nil) + +// AccountEntryExtensionV1 is an XDR Struct defines as: +// +// struct AccountEntryExtensionV1 +// { +// Liabilities liabilities; +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// AccountEntryExtensionV2 v2; +// } +// ext; +// }; +// +type AccountEntryExtensionV1 struct { + Liabilities Liabilities + Ext AccountEntryExtensionV1Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *AccountEntryExtensionV1) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Liabilities.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AccountEntryExtensionV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AccountEntryExtensionV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Liabilities.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Liabilities: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntryExtensionV1Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntryExtensionV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntryExtensionV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntryExtensionV1)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntryExtensionV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntryExtensionV1) xdrType() {} + +var _ xdrType = (*AccountEntryExtensionV1)(nil) + +// AccountEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// AccountEntryExtensionV1 v1; +// } +// +type AccountEntryExt struct { + V int32 + V1 *AccountEntryExtensionV1 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AccountEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of AccountEntryExt +func (u AccountEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 1: + return "V1", true + } + return "-", false +} + +// NewAccountEntryExt creates a new AccountEntryExt. +func NewAccountEntryExt(v int32, value interface{}) (result AccountEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 1: + tv, ok := value.(AccountEntryExtensionV1) + if !ok { + err = fmt.Errorf("invalid value, must be AccountEntryExtensionV1") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u AccountEntryExt) MustV1() AccountEntryExtensionV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AccountEntryExt) GetV1() (result AccountEntryExtensionV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AccountEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union AccountEntryExt", u.V) +} + +var _ decoderFrom = (*AccountEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AccountEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 1: + u.V1 = new(AccountEntryExtensionV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntryExtensionV1: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union AccountEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntryExt) xdrType() {} + +var _ xdrType = (*AccountEntryExt)(nil) + +// AccountEntry is an XDR Struct defines as: +// +// struct AccountEntry +// { +// AccountID accountID; // master public key for this account +// int64 balance; // in stroops +// SequenceNumber seqNum; // last sequence number used for this account +// uint32 numSubEntries; // number of sub-entries this account has +// // drives the reserve +// AccountID* inflationDest; // Account to vote for during inflation +// uint32 flags; // see AccountFlags +// +// string32 homeDomain; // can be used for reverse federation and memo lookup +// +// // fields used for signatures +// // thresholds stores unsigned bytes: [weight of master|low|medium|high] +// Thresholds thresholds; +// +// Signer signers; // possible signers for this account +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// AccountEntryExtensionV1 v1; +// } +// ext; +// }; +// +type AccountEntry struct { + AccountId AccountId + Balance Int64 + SeqNum SequenceNumber + NumSubEntries Uint32 + InflationDest *AccountId + Flags Uint32 + HomeDomain String32 + Thresholds Thresholds + Signers []Signer `xdrmaxsize:"20"` + Ext AccountEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *AccountEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.Balance.EncodeTo(e); err != nil { + return err + } + if err = s.SeqNum.EncodeTo(e); err != nil { + return err + } + if err = s.NumSubEntries.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeBool(s.InflationDest != nil); err != nil { + return err + } + if s.InflationDest != nil { + if err = (*s.InflationDest).EncodeTo(e); err != nil { + return err + } + } + if err = s.Flags.EncodeTo(e); err != nil { + return err + } + if err = s.HomeDomain.EncodeTo(e); err != nil { + return err + } + if err = s.Thresholds.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Signers))); err != nil { + return err + } + for i := 0; i < len(s.Signers); i++ { + if err = s.Signers[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AccountEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AccountEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Balance.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.SeqNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + nTmp, err = s.NumSubEntries.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + s.InflationDest = nil + if b { + s.InflationDest = new(AccountId) + nTmp, err = s.InflationDest.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + } + nTmp, err = s.Flags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.HomeDomain.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String32: %s", err) + } + nTmp, err = s.Thresholds.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Thresholds: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signer: %s", err) + } + if l > 20 { + return n, fmt.Errorf("decoding Signer: data size (%d) exceeds size limit (20)", l) + } + s.Signers = nil + if l > 0 { + s.Signers = make([]Signer, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Signers[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signer: %s", err) + } + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountEntry)(nil) + _ encoding.BinaryUnmarshaler = (*AccountEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountEntry) xdrType() {} + +var _ xdrType = (*AccountEntry)(nil) + +// TrustLineFlags is an XDR Enum defines as: +// +// enum TrustLineFlags +// { +// // issuer has authorized account to perform transactions with its credit +// AUTHORIZED_FLAG = 1, +// // issuer has authorized account to maintain and reduce liabilities for its +// // credit +// AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG = 2, +// // issuer has specified that it may clawback its credit, and that claimable +// // balances created with its credit may also be clawed back +// TRUSTLINE_CLAWBACK_ENABLED_FLAG = 4 +// }; +// +type TrustLineFlags int32 + +const ( + TrustLineFlagsAuthorizedFlag TrustLineFlags = 1 + TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag TrustLineFlags = 2 + TrustLineFlagsTrustlineClawbackEnabledFlag TrustLineFlags = 4 +) + +var trustLineFlagsMap = map[int32]string{ + 1: "TrustLineFlagsAuthorizedFlag", + 2: "TrustLineFlagsAuthorizedToMaintainLiabilitiesFlag", + 4: "TrustLineFlagsTrustlineClawbackEnabledFlag", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for TrustLineFlags +func (e TrustLineFlags) ValidEnum(v int32) bool { + _, ok := trustLineFlagsMap[v] + return ok +} + +// String returns the name of `e` +func (e TrustLineFlags) String() string { + name, _ := trustLineFlagsMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e TrustLineFlags) EncodeTo(enc *xdr.Encoder) error { + if _, ok := trustLineFlagsMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid TrustLineFlags enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*TrustLineFlags)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *TrustLineFlags) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding TrustLineFlags: %s", err) + } + if _, ok := trustLineFlagsMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid TrustLineFlags enum value", v) + } + *e = TrustLineFlags(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineFlags) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineFlags) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineFlags)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineFlags)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineFlags) xdrType() {} + +var _ xdrType = (*TrustLineFlags)(nil) + +// MaskTrustlineFlags is an XDR Const defines as: +// +// const MASK_TRUSTLINE_FLAGS = 1; +// +const MaskTrustlineFlags = 1 + +// MaskTrustlineFlagsV13 is an XDR Const defines as: +// +// const MASK_TRUSTLINE_FLAGS_V13 = 3; +// +const MaskTrustlineFlagsV13 = 3 + +// MaskTrustlineFlagsV17 is an XDR Const defines as: +// +// const MASK_TRUSTLINE_FLAGS_V17 = 7; +// +const MaskTrustlineFlagsV17 = 7 + +// LiquidityPoolType is an XDR Enum defines as: +// +// enum LiquidityPoolType +// { +// LIQUIDITY_POOL_CONSTANT_PRODUCT = 0 +// }; +// +type LiquidityPoolType int32 + +const ( + LiquidityPoolTypeLiquidityPoolConstantProduct LiquidityPoolType = 0 +) + +var liquidityPoolTypeMap = map[int32]string{ + 0: "LiquidityPoolTypeLiquidityPoolConstantProduct", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LiquidityPoolType +func (e LiquidityPoolType) ValidEnum(v int32) bool { + _, ok := liquidityPoolTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e LiquidityPoolType) String() string { + name, _ := liquidityPoolTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e LiquidityPoolType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := liquidityPoolTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LiquidityPoolType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LiquidityPoolType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LiquidityPoolType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolType: %s", err) + } + if _, ok := liquidityPoolTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LiquidityPoolType enum value", v) + } + *e = LiquidityPoolType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolType)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolType) xdrType() {} + +var _ xdrType = (*LiquidityPoolType)(nil) + +// TrustLineAsset is an XDR Union defines as: +// +// union TrustLineAsset switch (AssetType type) +// { +// case ASSET_TYPE_NATIVE: // Not credit +// void; +// +// case ASSET_TYPE_CREDIT_ALPHANUM4: +// AlphaNum4 alphaNum4; +// +// case ASSET_TYPE_CREDIT_ALPHANUM12: +// AlphaNum12 alphaNum12; +// +// case ASSET_TYPE_POOL_SHARE: +// PoolID liquidityPoolID; +// +// // add other asset types here in the future +// }; +// +type TrustLineAsset struct { + Type AssetType + AlphaNum4 *AlphaNum4 + AlphaNum12 *AlphaNum12 + LiquidityPoolId *PoolId +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TrustLineAsset) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TrustLineAsset +func (u TrustLineAsset) ArmForSwitch(sw int32) (string, bool) { + switch AssetType(sw) { + case AssetTypeAssetTypeNative: + return "", true + case AssetTypeAssetTypeCreditAlphanum4: + return "AlphaNum4", true + case AssetTypeAssetTypeCreditAlphanum12: + return "AlphaNum12", true + case AssetTypeAssetTypePoolShare: + return "LiquidityPoolId", true + } + return "-", false +} + +// NewTrustLineAsset creates a new TrustLineAsset. +func NewTrustLineAsset(aType AssetType, value interface{}) (result TrustLineAsset, err error) { + result.Type = aType + switch AssetType(aType) { + case AssetTypeAssetTypeNative: + // void + case AssetTypeAssetTypeCreditAlphanum4: + tv, ok := value.(AlphaNum4) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum4") + return + } + result.AlphaNum4 = &tv + case AssetTypeAssetTypeCreditAlphanum12: + tv, ok := value.(AlphaNum12) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum12") + return + } + result.AlphaNum12 = &tv + case AssetTypeAssetTypePoolShare: + tv, ok := value.(PoolId) + if !ok { + err = fmt.Errorf("invalid value, must be PoolId") + return + } + result.LiquidityPoolId = &tv + } + return +} + +// MustAlphaNum4 retrieves the AlphaNum4 value from the union, +// panicing if the value is not set. +func (u TrustLineAsset) MustAlphaNum4() AlphaNum4 { + val, ok := u.GetAlphaNum4() + + if !ok { + panic("arm AlphaNum4 is not set") + } + + return val +} + +// GetAlphaNum4 retrieves the AlphaNum4 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TrustLineAsset) GetAlphaNum4() (result AlphaNum4, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum4" { + result = *u.AlphaNum4 + ok = true + } + + return +} + +// MustAlphaNum12 retrieves the AlphaNum12 value from the union, +// panicing if the value is not set. +func (u TrustLineAsset) MustAlphaNum12() AlphaNum12 { + val, ok := u.GetAlphaNum12() + + if !ok { + panic("arm AlphaNum12 is not set") + } + + return val +} + +// GetAlphaNum12 retrieves the AlphaNum12 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TrustLineAsset) GetAlphaNum12() (result AlphaNum12, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum12" { + result = *u.AlphaNum12 + ok = true + } + + return +} + +// MustLiquidityPoolId retrieves the LiquidityPoolId value from the union, +// panicing if the value is not set. +func (u TrustLineAsset) MustLiquidityPoolId() PoolId { + val, ok := u.GetLiquidityPoolId() + + if !ok { + panic("arm LiquidityPoolId is not set") + } + + return val +} + +// GetLiquidityPoolId retrieves the LiquidityPoolId value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TrustLineAsset) GetLiquidityPoolId() (result PoolId, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPoolId" { + result = *u.LiquidityPoolId + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TrustLineAsset) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return nil + case AssetTypeAssetTypeCreditAlphanum4: + if err = (*u.AlphaNum4).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypeCreditAlphanum12: + if err = (*u.AlphaNum12).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypePoolShare: + if err = (*u.LiquidityPoolId).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (AssetType) switch value '%d' is not valid for union TrustLineAsset", u.Type) +} + +var _ decoderFrom = (*TrustLineAsset)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TrustLineAsset) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetType: %s", err) + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return n, nil + case AssetTypeAssetTypeCreditAlphanum4: + u.AlphaNum4 = new(AlphaNum4) + nTmp, err = (*u.AlphaNum4).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum4: %s", err) + } + return n, nil + case AssetTypeAssetTypeCreditAlphanum12: + u.AlphaNum12 = new(AlphaNum12) + nTmp, err = (*u.AlphaNum12).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum12: %s", err) + } + return n, nil + case AssetTypeAssetTypePoolShare: + u.LiquidityPoolId = new(PoolId) + nTmp, err = (*u.LiquidityPoolId).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TrustLineAsset has invalid Type (AssetType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineAsset) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineAsset) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineAsset)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineAsset)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineAsset) xdrType() {} + +var _ xdrType = (*TrustLineAsset)(nil) + +// TrustLineEntryExtensionV2Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TrustLineEntryExtensionV2Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TrustLineEntryExtensionV2Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TrustLineEntryExtensionV2Ext +func (u TrustLineEntryExtensionV2Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTrustLineEntryExtensionV2Ext creates a new TrustLineEntryExtensionV2Ext. +func NewTrustLineEntryExtensionV2Ext(v int32, value interface{}) (result TrustLineEntryExtensionV2Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TrustLineEntryExtensionV2Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TrustLineEntryExtensionV2Ext", u.V) +} + +var _ decoderFrom = (*TrustLineEntryExtensionV2Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TrustLineEntryExtensionV2Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TrustLineEntryExtensionV2Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntryExtensionV2Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntryExtensionV2Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntryExtensionV2Ext)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntryExtensionV2Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntryExtensionV2Ext) xdrType() {} + +var _ xdrType = (*TrustLineEntryExtensionV2Ext)(nil) + +// TrustLineEntryExtensionV2 is an XDR Struct defines as: +// +// struct TrustLineEntryExtensionV2 +// { +// int32 liquidityPoolUseCount; +// +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type TrustLineEntryExtensionV2 struct { + LiquidityPoolUseCount Int32 + Ext TrustLineEntryExtensionV2Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *TrustLineEntryExtensionV2) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolUseCount.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TrustLineEntryExtensionV2)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TrustLineEntryExtensionV2) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolUseCount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int32: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntryExtensionV2Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntryExtensionV2) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntryExtensionV2) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntryExtensionV2)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntryExtensionV2)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntryExtensionV2) xdrType() {} + +var _ xdrType = (*TrustLineEntryExtensionV2)(nil) + +// TrustLineEntryV1Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// TrustLineEntryExtensionV2 v2; +// } +// +type TrustLineEntryV1Ext struct { + V int32 + V2 *TrustLineEntryExtensionV2 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TrustLineEntryV1Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TrustLineEntryV1Ext +func (u TrustLineEntryV1Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 2: + return "V2", true + } + return "-", false +} + +// NewTrustLineEntryV1Ext creates a new TrustLineEntryV1Ext. +func NewTrustLineEntryV1Ext(v int32, value interface{}) (result TrustLineEntryV1Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 2: + tv, ok := value.(TrustLineEntryExtensionV2) + if !ok { + err = fmt.Errorf("invalid value, must be TrustLineEntryExtensionV2") + return + } + result.V2 = &tv + } + return +} + +// MustV2 retrieves the V2 value from the union, +// panicing if the value is not set. +func (u TrustLineEntryV1Ext) MustV2() TrustLineEntryExtensionV2 { + val, ok := u.GetV2() + + if !ok { + panic("arm V2 is not set") + } + + return val +} + +// GetV2 retrieves the V2 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TrustLineEntryV1Ext) GetV2() (result TrustLineEntryExtensionV2, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V2" { + result = *u.V2 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TrustLineEntryV1Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 2: + if err = (*u.V2).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TrustLineEntryV1Ext", u.V) +} + +var _ decoderFrom = (*TrustLineEntryV1Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TrustLineEntryV1Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 2: + u.V2 = new(TrustLineEntryExtensionV2) + nTmp, err = (*u.V2).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntryExtensionV2: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TrustLineEntryV1Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntryV1Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntryV1Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntryV1Ext)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntryV1Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntryV1Ext) xdrType() {} + +var _ xdrType = (*TrustLineEntryV1Ext)(nil) + +// TrustLineEntryV1 is an XDR NestedStruct defines as: +// +// struct +// { +// Liabilities liabilities; +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// TrustLineEntryExtensionV2 v2; +// } +// ext; +// } +// +type TrustLineEntryV1 struct { + Liabilities Liabilities + Ext TrustLineEntryV1Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *TrustLineEntryV1) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Liabilities.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TrustLineEntryV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TrustLineEntryV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Liabilities.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Liabilities: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntryV1Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntryV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntryV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntryV1)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntryV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntryV1) xdrType() {} + +var _ xdrType = (*TrustLineEntryV1)(nil) + +// TrustLineEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// struct +// { +// Liabilities liabilities; +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// TrustLineEntryExtensionV2 v2; +// } +// ext; +// } v1; +// } +// +type TrustLineEntryExt struct { + V int32 + V1 *TrustLineEntryV1 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TrustLineEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TrustLineEntryExt +func (u TrustLineEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 1: + return "V1", true + } + return "-", false +} + +// NewTrustLineEntryExt creates a new TrustLineEntryExt. +func NewTrustLineEntryExt(v int32, value interface{}) (result TrustLineEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 1: + tv, ok := value.(TrustLineEntryV1) + if !ok { + err = fmt.Errorf("invalid value, must be TrustLineEntryV1") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u TrustLineEntryExt) MustV1() TrustLineEntryV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TrustLineEntryExt) GetV1() (result TrustLineEntryV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TrustLineEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TrustLineEntryExt", u.V) +} + +var _ decoderFrom = (*TrustLineEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TrustLineEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 1: + u.V1 = new(TrustLineEntryV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntryV1: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TrustLineEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntryExt) xdrType() {} + +var _ xdrType = (*TrustLineEntryExt)(nil) + +// TrustLineEntry is an XDR Struct defines as: +// +// struct TrustLineEntry +// { +// AccountID accountID; // account this trustline belongs to +// TrustLineAsset asset; // type of asset (with issuer) +// int64 balance; // how much of this asset the user has. +// // Asset defines the unit for this; +// +// int64 limit; // balance cannot be above this +// uint32 flags; // see TrustLineFlags +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// struct +// { +// Liabilities liabilities; +// +// union switch (int v) +// { +// case 0: +// void; +// case 2: +// TrustLineEntryExtensionV2 v2; +// } +// ext; +// } v1; +// } +// ext; +// }; +// +type TrustLineEntry struct { + AccountId AccountId + Asset TrustLineAsset + Balance Int64 + Limit Int64 + Flags Uint32 + Ext TrustLineEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *TrustLineEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Balance.EncodeTo(e); err != nil { + return err + } + if err = s.Limit.EncodeTo(e); err != nil { + return err + } + if err = s.Flags.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TrustLineEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TrustLineEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineAsset: %s", err) + } + nTmp, err = s.Balance.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Limit.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Flags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TrustLineEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TrustLineEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TrustLineEntry)(nil) + _ encoding.BinaryUnmarshaler = (*TrustLineEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TrustLineEntry) xdrType() {} + +var _ xdrType = (*TrustLineEntry)(nil) + +// OfferEntryFlags is an XDR Enum defines as: +// +// enum OfferEntryFlags +// { +// // issuer has authorized account to perform transactions with its credit +// PASSIVE_FLAG = 1 +// }; +// +type OfferEntryFlags int32 + +const ( + OfferEntryFlagsPassiveFlag OfferEntryFlags = 1 +) + +var offerEntryFlagsMap = map[int32]string{ + 1: "OfferEntryFlagsPassiveFlag", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for OfferEntryFlags +func (e OfferEntryFlags) ValidEnum(v int32) bool { + _, ok := offerEntryFlagsMap[v] + return ok +} + +// String returns the name of `e` +func (e OfferEntryFlags) String() string { + name, _ := offerEntryFlagsMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e OfferEntryFlags) EncodeTo(enc *xdr.Encoder) error { + if _, ok := offerEntryFlagsMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid OfferEntryFlags enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*OfferEntryFlags)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *OfferEntryFlags) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding OfferEntryFlags: %s", err) + } + if _, ok := offerEntryFlagsMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid OfferEntryFlags enum value", v) + } + *e = OfferEntryFlags(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OfferEntryFlags) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OfferEntryFlags) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OfferEntryFlags)(nil) + _ encoding.BinaryUnmarshaler = (*OfferEntryFlags)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OfferEntryFlags) xdrType() {} + +var _ xdrType = (*OfferEntryFlags)(nil) + +// MaskOfferentryFlags is an XDR Const defines as: +// +// const MASK_OFFERENTRY_FLAGS = 1; +// +const MaskOfferentryFlags = 1 + +// OfferEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type OfferEntryExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u OfferEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of OfferEntryExt +func (u OfferEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewOfferEntryExt creates a new OfferEntryExt. +func NewOfferEntryExt(v int32, value interface{}) (result OfferEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u OfferEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union OfferEntryExt", u.V) +} + +var _ decoderFrom = (*OfferEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *OfferEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union OfferEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OfferEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OfferEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OfferEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*OfferEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OfferEntryExt) xdrType() {} + +var _ xdrType = (*OfferEntryExt)(nil) + +// OfferEntry is an XDR Struct defines as: +// +// struct OfferEntry +// { +// AccountID sellerID; +// int64 offerID; +// Asset selling; // A +// Asset buying; // B +// int64 amount; // amount of A +// +// /* price for this offer: +// price of A in terms of B +// price=AmountB/AmountA=priceNumerator/priceDenominator +// price is after fees +// */ +// Price price; +// uint32 flags; // see OfferEntryFlags +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type OfferEntry struct { + SellerId AccountId + OfferId Int64 + Selling Asset + Buying Asset + Amount Int64 + Price Price + Flags Uint32 + Ext OfferEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *OfferEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SellerId.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + if err = s.Selling.EncodeTo(e); err != nil { + return err + } + if err = s.Buying.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if err = s.Price.EncodeTo(e); err != nil { + return err + } + if err = s.Flags.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*OfferEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *OfferEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SellerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Selling.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Buying.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Price.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + nTmp, err = s.Flags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OfferEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OfferEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OfferEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OfferEntry)(nil) + _ encoding.BinaryUnmarshaler = (*OfferEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OfferEntry) xdrType() {} + +var _ xdrType = (*OfferEntry)(nil) + +// DataEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type DataEntryExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u DataEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of DataEntryExt +func (u DataEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewDataEntryExt creates a new DataEntryExt. +func NewDataEntryExt(v int32, value interface{}) (result DataEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u DataEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union DataEntryExt", u.V) +} + +var _ decoderFrom = (*DataEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *DataEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union DataEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s DataEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *DataEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*DataEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*DataEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s DataEntryExt) xdrType() {} + +var _ xdrType = (*DataEntryExt)(nil) + +// DataEntry is an XDR Struct defines as: +// +// struct DataEntry +// { +// AccountID accountID; // account this data belongs to +// string64 dataName; +// DataValue dataValue; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type DataEntry struct { + AccountId AccountId + DataName String64 + DataValue DataValue + Ext DataEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *DataEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.DataName.EncodeTo(e); err != nil { + return err + } + if err = s.DataValue.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*DataEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *DataEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.DataName.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String64: %s", err) + } + nTmp, err = s.DataValue.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataValue: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s DataEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *DataEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*DataEntry)(nil) + _ encoding.BinaryUnmarshaler = (*DataEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s DataEntry) xdrType() {} + +var _ xdrType = (*DataEntry)(nil) + +// ClaimPredicateType is an XDR Enum defines as: +// +// enum ClaimPredicateType +// { +// CLAIM_PREDICATE_UNCONDITIONAL = 0, +// CLAIM_PREDICATE_AND = 1, +// CLAIM_PREDICATE_OR = 2, +// CLAIM_PREDICATE_NOT = 3, +// CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME = 4, +// CLAIM_PREDICATE_BEFORE_RELATIVE_TIME = 5 +// }; +// +type ClaimPredicateType int32 + +const ( + ClaimPredicateTypeClaimPredicateUnconditional ClaimPredicateType = 0 + ClaimPredicateTypeClaimPredicateAnd ClaimPredicateType = 1 + ClaimPredicateTypeClaimPredicateOr ClaimPredicateType = 2 + ClaimPredicateTypeClaimPredicateNot ClaimPredicateType = 3 + ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime ClaimPredicateType = 4 + ClaimPredicateTypeClaimPredicateBeforeRelativeTime ClaimPredicateType = 5 +) + +var claimPredicateTypeMap = map[int32]string{ + 0: "ClaimPredicateTypeClaimPredicateUnconditional", + 1: "ClaimPredicateTypeClaimPredicateAnd", + 2: "ClaimPredicateTypeClaimPredicateOr", + 3: "ClaimPredicateTypeClaimPredicateNot", + 4: "ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime", + 5: "ClaimPredicateTypeClaimPredicateBeforeRelativeTime", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClaimPredicateType +func (e ClaimPredicateType) ValidEnum(v int32) bool { + _, ok := claimPredicateTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e ClaimPredicateType) String() string { + name, _ := claimPredicateTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ClaimPredicateType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimPredicateTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimPredicateType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClaimPredicateType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimPredicateType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicateType: %s", err) + } + if _, ok := claimPredicateTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimPredicateType enum value", v) + } + *e = ClaimPredicateType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimPredicateType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimPredicateType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimPredicateType)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimPredicateType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimPredicateType) xdrType() {} + +var _ xdrType = (*ClaimPredicateType)(nil) + +// ClaimPredicate is an XDR Union defines as: +// +// union ClaimPredicate switch (ClaimPredicateType type) +// { +// case CLAIM_PREDICATE_UNCONDITIONAL: +// void; +// case CLAIM_PREDICATE_AND: +// ClaimPredicate andPredicates<2>; +// case CLAIM_PREDICATE_OR: +// ClaimPredicate orPredicates<2>; +// case CLAIM_PREDICATE_NOT: +// ClaimPredicate* notPredicate; +// case CLAIM_PREDICATE_BEFORE_ABSOLUTE_TIME: +// int64 absBefore; // Predicate will be true if closeTime < absBefore +// case CLAIM_PREDICATE_BEFORE_RELATIVE_TIME: +// int64 relBefore; // Seconds since closeTime of the ledger in which the +// // ClaimableBalanceEntry was created +// }; +// +type ClaimPredicate struct { + Type ClaimPredicateType + AndPredicates *[]ClaimPredicate `xdrmaxsize:"2"` + OrPredicates *[]ClaimPredicate `xdrmaxsize:"2"` + NotPredicate **ClaimPredicate + AbsBefore *Int64 + RelBefore *Int64 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ClaimPredicate) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ClaimPredicate +func (u ClaimPredicate) ArmForSwitch(sw int32) (string, bool) { + switch ClaimPredicateType(sw) { + case ClaimPredicateTypeClaimPredicateUnconditional: + return "", true + case ClaimPredicateTypeClaimPredicateAnd: + return "AndPredicates", true + case ClaimPredicateTypeClaimPredicateOr: + return "OrPredicates", true + case ClaimPredicateTypeClaimPredicateNot: + return "NotPredicate", true + case ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + return "AbsBefore", true + case ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + return "RelBefore", true + } + return "-", false +} + +// NewClaimPredicate creates a new ClaimPredicate. +func NewClaimPredicate(aType ClaimPredicateType, value interface{}) (result ClaimPredicate, err error) { + result.Type = aType + switch ClaimPredicateType(aType) { + case ClaimPredicateTypeClaimPredicateUnconditional: + // void + case ClaimPredicateTypeClaimPredicateAnd: + tv, ok := value.([]ClaimPredicate) + if !ok { + err = fmt.Errorf("invalid value, must be []ClaimPredicate") + return + } + result.AndPredicates = &tv + case ClaimPredicateTypeClaimPredicateOr: + tv, ok := value.([]ClaimPredicate) + if !ok { + err = fmt.Errorf("invalid value, must be []ClaimPredicate") + return + } + result.OrPredicates = &tv + case ClaimPredicateTypeClaimPredicateNot: + tv, ok := value.(*ClaimPredicate) + if !ok { + err = fmt.Errorf("invalid value, must be *ClaimPredicate") + return + } + result.NotPredicate = &tv + case ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + tv, ok := value.(Int64) + if !ok { + err = fmt.Errorf("invalid value, must be Int64") + return + } + result.AbsBefore = &tv + case ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + tv, ok := value.(Int64) + if !ok { + err = fmt.Errorf("invalid value, must be Int64") + return + } + result.RelBefore = &tv + } + return +} + +// MustAndPredicates retrieves the AndPredicates value from the union, +// panicing if the value is not set. +func (u ClaimPredicate) MustAndPredicates() []ClaimPredicate { + val, ok := u.GetAndPredicates() + + if !ok { + panic("arm AndPredicates is not set") + } + + return val +} + +// GetAndPredicates retrieves the AndPredicates value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimPredicate) GetAndPredicates() (result []ClaimPredicate, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AndPredicates" { + result = *u.AndPredicates + ok = true + } + + return +} + +// MustOrPredicates retrieves the OrPredicates value from the union, +// panicing if the value is not set. +func (u ClaimPredicate) MustOrPredicates() []ClaimPredicate { + val, ok := u.GetOrPredicates() + + if !ok { + panic("arm OrPredicates is not set") + } + + return val +} + +// GetOrPredicates retrieves the OrPredicates value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimPredicate) GetOrPredicates() (result []ClaimPredicate, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "OrPredicates" { + result = *u.OrPredicates + ok = true + } + + return +} + +// MustNotPredicate retrieves the NotPredicate value from the union, +// panicing if the value is not set. +func (u ClaimPredicate) MustNotPredicate() *ClaimPredicate { + val, ok := u.GetNotPredicate() + + if !ok { + panic("arm NotPredicate is not set") + } + + return val +} + +// GetNotPredicate retrieves the NotPredicate value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimPredicate) GetNotPredicate() (result *ClaimPredicate, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NotPredicate" { + result = *u.NotPredicate + ok = true + } + + return +} + +// MustAbsBefore retrieves the AbsBefore value from the union, +// panicing if the value is not set. +func (u ClaimPredicate) MustAbsBefore() Int64 { + val, ok := u.GetAbsBefore() + + if !ok { + panic("arm AbsBefore is not set") + } + + return val +} + +// GetAbsBefore retrieves the AbsBefore value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimPredicate) GetAbsBefore() (result Int64, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AbsBefore" { + result = *u.AbsBefore + ok = true + } + + return +} + +// MustRelBefore retrieves the RelBefore value from the union, +// panicing if the value is not set. +func (u ClaimPredicate) MustRelBefore() Int64 { + val, ok := u.GetRelBefore() + + if !ok { + panic("arm RelBefore is not set") + } + + return val +} + +// GetRelBefore retrieves the RelBefore value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimPredicate) GetRelBefore() (result Int64, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "RelBefore" { + result = *u.RelBefore + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ClaimPredicate) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch ClaimPredicateType(u.Type) { + case ClaimPredicateTypeClaimPredicateUnconditional: + // Void + return nil + case ClaimPredicateTypeClaimPredicateAnd: + if _, err = e.EncodeUint(uint32(len((*u.AndPredicates)))); err != nil { + return err + } + for i := 0; i < len((*u.AndPredicates)); i++ { + if err = (*u.AndPredicates)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case ClaimPredicateTypeClaimPredicateOr: + if _, err = e.EncodeUint(uint32(len((*u.OrPredicates)))); err != nil { + return err + } + for i := 0; i < len((*u.OrPredicates)); i++ { + if err = (*u.OrPredicates)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case ClaimPredicateTypeClaimPredicateNot: + if _, err = e.EncodeBool((*u.NotPredicate) != nil); err != nil { + return err + } + if (*u.NotPredicate) != nil { + if err = (*(*u.NotPredicate)).EncodeTo(e); err != nil { + return err + } + } + return nil + case ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + if err = (*u.AbsBefore).EncodeTo(e); err != nil { + return err + } + return nil + case ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + if err = (*u.RelBefore).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (ClaimPredicateType) switch value '%d' is not valid for union ClaimPredicate", u.Type) +} + +var _ decoderFrom = (*ClaimPredicate)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimPredicate) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicateType: %s", err) + } + switch ClaimPredicateType(u.Type) { + case ClaimPredicateTypeClaimPredicateUnconditional: + // Void + return n, nil + case ClaimPredicateTypeClaimPredicateAnd: + u.AndPredicates = new([]ClaimPredicate) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + if l > 2 { + return n, fmt.Errorf("decoding ClaimPredicate: data size (%d) exceeds size limit (2)", l) + } + (*u.AndPredicates) = nil + if l > 0 { + (*u.AndPredicates) = make([]ClaimPredicate, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.AndPredicates)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + } + } + return n, nil + case ClaimPredicateTypeClaimPredicateOr: + u.OrPredicates = new([]ClaimPredicate) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + if l > 2 { + return n, fmt.Errorf("decoding ClaimPredicate: data size (%d) exceeds size limit (2)", l) + } + (*u.OrPredicates) = nil + if l > 0 { + (*u.OrPredicates) = make([]ClaimPredicate, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.OrPredicates)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + } + } + return n, nil + case ClaimPredicateTypeClaimPredicateNot: + u.NotPredicate = new(*ClaimPredicate) + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + (*u.NotPredicate) = nil + if b { + (*u.NotPredicate) = new(ClaimPredicate) + nTmp, err = (*u.NotPredicate).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + } + return n, nil + case ClaimPredicateTypeClaimPredicateBeforeAbsoluteTime: + u.AbsBefore = new(Int64) + nTmp, err = (*u.AbsBefore).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil + case ClaimPredicateTypeClaimPredicateBeforeRelativeTime: + u.RelBefore = new(Int64) + nTmp, err = (*u.RelBefore).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ClaimPredicate has invalid Type (ClaimPredicateType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimPredicate) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimPredicate) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimPredicate)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimPredicate)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimPredicate) xdrType() {} + +var _ xdrType = (*ClaimPredicate)(nil) + +// ClaimantType is an XDR Enum defines as: +// +// enum ClaimantType +// { +// CLAIMANT_TYPE_V0 = 0 +// }; +// +type ClaimantType int32 + +const ( + ClaimantTypeClaimantTypeV0 ClaimantType = 0 +) + +var claimantTypeMap = map[int32]string{ + 0: "ClaimantTypeClaimantTypeV0", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClaimantType +func (e ClaimantType) ValidEnum(v int32) bool { + _, ok := claimantTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e ClaimantType) String() string { + name, _ := claimantTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ClaimantType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimantTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimantType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClaimantType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimantType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimantType: %s", err) + } + if _, ok := claimantTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimantType enum value", v) + } + *e = ClaimantType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimantType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimantType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimantType)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimantType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimantType) xdrType() {} + +var _ xdrType = (*ClaimantType)(nil) + +// ClaimantV0 is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID destination; // The account that can use this condition +// ClaimPredicate predicate; // Claimable if predicate is true +// } +// +type ClaimantV0 struct { + Destination AccountId + Predicate ClaimPredicate +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClaimantV0) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.Predicate.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimantV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimantV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Predicate.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimPredicate: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimantV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimantV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimantV0)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimantV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimantV0) xdrType() {} + +var _ xdrType = (*ClaimantV0)(nil) + +// Claimant is an XDR Union defines as: +// +// union Claimant switch (ClaimantType type) +// { +// case CLAIMANT_TYPE_V0: +// struct +// { +// AccountID destination; // The account that can use this condition +// ClaimPredicate predicate; // Claimable if predicate is true +// } v0; +// }; +// +type Claimant struct { + Type ClaimantType + V0 *ClaimantV0 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u Claimant) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of Claimant +func (u Claimant) ArmForSwitch(sw int32) (string, bool) { + switch ClaimantType(sw) { + case ClaimantTypeClaimantTypeV0: + return "V0", true + } + return "-", false +} + +// NewClaimant creates a new Claimant. +func NewClaimant(aType ClaimantType, value interface{}) (result Claimant, err error) { + result.Type = aType + switch ClaimantType(aType) { + case ClaimantTypeClaimantTypeV0: + tv, ok := value.(ClaimantV0) + if !ok { + err = fmt.Errorf("invalid value, must be ClaimantV0") + return + } + result.V0 = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u Claimant) MustV0() ClaimantV0 { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Claimant) GetV0() (result ClaimantV0, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u Claimant) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch ClaimantType(u.Type) { + case ClaimantTypeClaimantTypeV0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (ClaimantType) switch value '%d' is not valid for union Claimant", u.Type) +} + +var _ decoderFrom = (*Claimant)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *Claimant) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimantType: %s", err) + } + switch ClaimantType(u.Type) { + case ClaimantTypeClaimantTypeV0: + u.V0 = new(ClaimantV0) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimantV0: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union Claimant has invalid Type (ClaimantType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Claimant) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Claimant) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Claimant)(nil) + _ encoding.BinaryUnmarshaler = (*Claimant)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Claimant) xdrType() {} + +var _ xdrType = (*Claimant)(nil) + +// ClaimableBalanceIdType is an XDR Enum defines as: +// +// enum ClaimableBalanceIDType +// { +// CLAIMABLE_BALANCE_ID_TYPE_V0 = 0 +// }; +// +type ClaimableBalanceIdType int32 + +const ( + ClaimableBalanceIdTypeClaimableBalanceIdTypeV0 ClaimableBalanceIdType = 0 +) + +var claimableBalanceIdTypeMap = map[int32]string{ + 0: "ClaimableBalanceIdTypeClaimableBalanceIdTypeV0", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClaimableBalanceIdType +func (e ClaimableBalanceIdType) ValidEnum(v int32) bool { + _, ok := claimableBalanceIdTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e ClaimableBalanceIdType) String() string { + name, _ := claimableBalanceIdTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ClaimableBalanceIdType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimableBalanceIdTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimableBalanceIdType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClaimableBalanceIdType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimableBalanceIdType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceIdType: %s", err) + } + if _, ok := claimableBalanceIdTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimableBalanceIdType enum value", v) + } + *e = ClaimableBalanceIdType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceIdType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceIdType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceIdType)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceIdType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceIdType) xdrType() {} + +var _ xdrType = (*ClaimableBalanceIdType)(nil) + +// ClaimableBalanceId is an XDR Union defines as: +// +// union ClaimableBalanceID switch (ClaimableBalanceIDType type) +// { +// case CLAIMABLE_BALANCE_ID_TYPE_V0: +// Hash v0; +// }; +// +type ClaimableBalanceId struct { + Type ClaimableBalanceIdType + V0 *Hash +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ClaimableBalanceId) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ClaimableBalanceId +func (u ClaimableBalanceId) ArmForSwitch(sw int32) (string, bool) { + switch ClaimableBalanceIdType(sw) { + case ClaimableBalanceIdTypeClaimableBalanceIdTypeV0: + return "V0", true + } + return "-", false +} + +// NewClaimableBalanceId creates a new ClaimableBalanceId. +func NewClaimableBalanceId(aType ClaimableBalanceIdType, value interface{}) (result ClaimableBalanceId, err error) { + result.Type = aType + switch ClaimableBalanceIdType(aType) { + case ClaimableBalanceIdTypeClaimableBalanceIdTypeV0: + tv, ok := value.(Hash) + if !ok { + err = fmt.Errorf("invalid value, must be Hash") + return + } + result.V0 = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u ClaimableBalanceId) MustV0() Hash { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimableBalanceId) GetV0() (result Hash, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ClaimableBalanceId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch ClaimableBalanceIdType(u.Type) { + case ClaimableBalanceIdTypeClaimableBalanceIdTypeV0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (ClaimableBalanceIdType) switch value '%d' is not valid for union ClaimableBalanceId", u.Type) +} + +var _ decoderFrom = (*ClaimableBalanceId)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimableBalanceId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceIdType: %s", err) + } + switch ClaimableBalanceIdType(u.Type) { + case ClaimableBalanceIdTypeClaimableBalanceIdTypeV0: + u.V0 = new(Hash) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ClaimableBalanceId has invalid Type (ClaimableBalanceIdType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceId)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceId)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceId) xdrType() {} + +var _ xdrType = (*ClaimableBalanceId)(nil) + +// ClaimableBalanceFlags is an XDR Enum defines as: +// +// enum ClaimableBalanceFlags +// { +// // If set, the issuer account of the asset held by the claimable balance may +// // clawback the claimable balance +// CLAIMABLE_BALANCE_CLAWBACK_ENABLED_FLAG = 0x1 +// }; +// +type ClaimableBalanceFlags int32 + +const ( + ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag ClaimableBalanceFlags = 1 +) + +var claimableBalanceFlagsMap = map[int32]string{ + 1: "ClaimableBalanceFlagsClaimableBalanceClawbackEnabledFlag", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClaimableBalanceFlags +func (e ClaimableBalanceFlags) ValidEnum(v int32) bool { + _, ok := claimableBalanceFlagsMap[v] + return ok +} + +// String returns the name of `e` +func (e ClaimableBalanceFlags) String() string { + name, _ := claimableBalanceFlagsMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ClaimableBalanceFlags) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimableBalanceFlagsMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimableBalanceFlags enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClaimableBalanceFlags)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimableBalanceFlags) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceFlags: %s", err) + } + if _, ok := claimableBalanceFlagsMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimableBalanceFlags enum value", v) + } + *e = ClaimableBalanceFlags(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceFlags) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceFlags) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceFlags)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceFlags)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceFlags) xdrType() {} + +var _ xdrType = (*ClaimableBalanceFlags)(nil) + +// MaskClaimableBalanceFlags is an XDR Const defines as: +// +// const MASK_CLAIMABLE_BALANCE_FLAGS = 0x1; +// +const MaskClaimableBalanceFlags = 0x1 + +// ClaimableBalanceEntryExtensionV1Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type ClaimableBalanceEntryExtensionV1Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ClaimableBalanceEntryExtensionV1Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ClaimableBalanceEntryExtensionV1Ext +func (u ClaimableBalanceEntryExtensionV1Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewClaimableBalanceEntryExtensionV1Ext creates a new ClaimableBalanceEntryExtensionV1Ext. +func NewClaimableBalanceEntryExtensionV1Ext(v int32, value interface{}) (result ClaimableBalanceEntryExtensionV1Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ClaimableBalanceEntryExtensionV1Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union ClaimableBalanceEntryExtensionV1Ext", u.V) +} + +var _ decoderFrom = (*ClaimableBalanceEntryExtensionV1Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimableBalanceEntryExtensionV1Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union ClaimableBalanceEntryExtensionV1Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceEntryExtensionV1Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceEntryExtensionV1Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceEntryExtensionV1Ext)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceEntryExtensionV1Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceEntryExtensionV1Ext) xdrType() {} + +var _ xdrType = (*ClaimableBalanceEntryExtensionV1Ext)(nil) + +// ClaimableBalanceEntryExtensionV1 is an XDR Struct defines as: +// +// struct ClaimableBalanceEntryExtensionV1 +// { +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// +// uint32 flags; // see ClaimableBalanceFlags +// }; +// +type ClaimableBalanceEntryExtensionV1 struct { + Ext ClaimableBalanceEntryExtensionV1Ext + Flags Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClaimableBalanceEntryExtensionV1) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + if err = s.Flags.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimableBalanceEntryExtensionV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimableBalanceEntryExtensionV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceEntryExtensionV1Ext: %s", err) + } + nTmp, err = s.Flags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceEntryExtensionV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceEntryExtensionV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceEntryExtensionV1)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceEntryExtensionV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceEntryExtensionV1) xdrType() {} + +var _ xdrType = (*ClaimableBalanceEntryExtensionV1)(nil) + +// ClaimableBalanceEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// ClaimableBalanceEntryExtensionV1 v1; +// } +// +type ClaimableBalanceEntryExt struct { + V int32 + V1 *ClaimableBalanceEntryExtensionV1 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ClaimableBalanceEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ClaimableBalanceEntryExt +func (u ClaimableBalanceEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 1: + return "V1", true + } + return "-", false +} + +// NewClaimableBalanceEntryExt creates a new ClaimableBalanceEntryExt. +func NewClaimableBalanceEntryExt(v int32, value interface{}) (result ClaimableBalanceEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 1: + tv, ok := value.(ClaimableBalanceEntryExtensionV1) + if !ok { + err = fmt.Errorf("invalid value, must be ClaimableBalanceEntryExtensionV1") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u ClaimableBalanceEntryExt) MustV1() ClaimableBalanceEntryExtensionV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ClaimableBalanceEntryExt) GetV1() (result ClaimableBalanceEntryExtensionV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ClaimableBalanceEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union ClaimableBalanceEntryExt", u.V) +} + +var _ decoderFrom = (*ClaimableBalanceEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimableBalanceEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 1: + u.V1 = new(ClaimableBalanceEntryExtensionV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceEntryExtensionV1: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ClaimableBalanceEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceEntryExt) xdrType() {} + +var _ xdrType = (*ClaimableBalanceEntryExt)(nil) + +// ClaimableBalanceEntry is an XDR Struct defines as: +// +// struct ClaimableBalanceEntry +// { +// // Unique identifier for this ClaimableBalanceEntry +// ClaimableBalanceID balanceID; +// +// // List of claimants with associated predicate +// Claimant claimants<10>; +// +// // Any asset including native +// Asset asset; +// +// // Amount of asset +// int64 amount; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// ClaimableBalanceEntryExtensionV1 v1; +// } +// ext; +// }; +// +type ClaimableBalanceEntry struct { + BalanceId ClaimableBalanceId + Claimants []Claimant `xdrmaxsize:"10"` + Asset Asset + Amount Int64 + Ext ClaimableBalanceEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClaimableBalanceEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.BalanceId.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Claimants))); err != nil { + return err + } + for i := 0; i < len(s.Claimants); i++ { + if err = s.Claimants[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimableBalanceEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimableBalanceEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.BalanceId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceId: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Claimant: %s", err) + } + if l > 10 { + return n, fmt.Errorf("decoding Claimant: data size (%d) exceeds size limit (10)", l) + } + s.Claimants = nil + if l > 0 { + s.Claimants = make([]Claimant, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Claimants[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Claimant: %s", err) + } + } + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimableBalanceEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimableBalanceEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimableBalanceEntry)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimableBalanceEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimableBalanceEntry) xdrType() {} + +var _ xdrType = (*ClaimableBalanceEntry)(nil) + +// LiquidityPoolConstantProductParameters is an XDR Struct defines as: +// +// struct LiquidityPoolConstantProductParameters +// { +// Asset assetA; // assetA < assetB +// Asset assetB; +// int32 fee; // Fee is in basis points, so the actual rate is (fee/100)% +// }; +// +type LiquidityPoolConstantProductParameters struct { + AssetA Asset + AssetB Asset + Fee Int32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *LiquidityPoolConstantProductParameters) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AssetA.EncodeTo(e); err != nil { + return err + } + if err = s.AssetB.EncodeTo(e); err != nil { + return err + } + if err = s.Fee.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LiquidityPoolConstantProductParameters)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LiquidityPoolConstantProductParameters) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AssetA.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AssetB.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Fee.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolConstantProductParameters) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolConstantProductParameters) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolConstantProductParameters)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolConstantProductParameters)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolConstantProductParameters) xdrType() {} + +var _ xdrType = (*LiquidityPoolConstantProductParameters)(nil) + +// LiquidityPoolEntryConstantProduct is an XDR NestedStruct defines as: +// +// struct +// { +// LiquidityPoolConstantProductParameters params; +// +// int64 reserveA; // amount of A in the pool +// int64 reserveB; // amount of B in the pool +// int64 totalPoolShares; // total number of pool shares issued +// int64 poolSharesTrustLineCount; // number of trust lines for the associated pool shares +// } +// +type LiquidityPoolEntryConstantProduct struct { + Params LiquidityPoolConstantProductParameters + ReserveA Int64 + ReserveB Int64 + TotalPoolShares Int64 + PoolSharesTrustLineCount Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *LiquidityPoolEntryConstantProduct) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Params.EncodeTo(e); err != nil { + return err + } + if err = s.ReserveA.EncodeTo(e); err != nil { + return err + } + if err = s.ReserveB.EncodeTo(e); err != nil { + return err + } + if err = s.TotalPoolShares.EncodeTo(e); err != nil { + return err + } + if err = s.PoolSharesTrustLineCount.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LiquidityPoolEntryConstantProduct)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LiquidityPoolEntryConstantProduct) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Params.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolConstantProductParameters: %s", err) + } + nTmp, err = s.ReserveA.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.ReserveB.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.TotalPoolShares.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.PoolSharesTrustLineCount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolEntryConstantProduct) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolEntryConstantProduct) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolEntryConstantProduct)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolEntryConstantProduct)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolEntryConstantProduct) xdrType() {} + +var _ xdrType = (*LiquidityPoolEntryConstantProduct)(nil) + +// LiquidityPoolEntryBody is an XDR NestedUnion defines as: +// +// union switch (LiquidityPoolType type) +// { +// case LIQUIDITY_POOL_CONSTANT_PRODUCT: +// struct +// { +// LiquidityPoolConstantProductParameters params; +// +// int64 reserveA; // amount of A in the pool +// int64 reserveB; // amount of B in the pool +// int64 totalPoolShares; // total number of pool shares issued +// int64 poolSharesTrustLineCount; // number of trust lines for the associated pool shares +// } constantProduct; +// } +// +type LiquidityPoolEntryBody struct { + Type LiquidityPoolType + ConstantProduct *LiquidityPoolEntryConstantProduct +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LiquidityPoolEntryBody) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LiquidityPoolEntryBody +func (u LiquidityPoolEntryBody) ArmForSwitch(sw int32) (string, bool) { + switch LiquidityPoolType(sw) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + return "ConstantProduct", true + } + return "-", false +} + +// NewLiquidityPoolEntryBody creates a new LiquidityPoolEntryBody. +func NewLiquidityPoolEntryBody(aType LiquidityPoolType, value interface{}) (result LiquidityPoolEntryBody, err error) { + result.Type = aType + switch LiquidityPoolType(aType) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + tv, ok := value.(LiquidityPoolEntryConstantProduct) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolEntryConstantProduct") + return + } + result.ConstantProduct = &tv + } + return +} + +// MustConstantProduct retrieves the ConstantProduct value from the union, +// panicing if the value is not set. +func (u LiquidityPoolEntryBody) MustConstantProduct() LiquidityPoolEntryConstantProduct { + val, ok := u.GetConstantProduct() + + if !ok { + panic("arm ConstantProduct is not set") + } + + return val +} + +// GetConstantProduct retrieves the ConstantProduct value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LiquidityPoolEntryBody) GetConstantProduct() (result LiquidityPoolEntryConstantProduct, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ConstantProduct" { + result = *u.ConstantProduct + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LiquidityPoolEntryBody) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LiquidityPoolType(u.Type) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + if err = (*u.ConstantProduct).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LiquidityPoolType) switch value '%d' is not valid for union LiquidityPoolEntryBody", u.Type) +} + +var _ decoderFrom = (*LiquidityPoolEntryBody)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LiquidityPoolEntryBody) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolType: %s", err) + } + switch LiquidityPoolType(u.Type) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + u.ConstantProduct = new(LiquidityPoolEntryConstantProduct) + nTmp, err = (*u.ConstantProduct).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolEntryConstantProduct: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LiquidityPoolEntryBody has invalid Type (LiquidityPoolType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolEntryBody) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolEntryBody) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolEntryBody)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolEntryBody)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolEntryBody) xdrType() {} + +var _ xdrType = (*LiquidityPoolEntryBody)(nil) + +// LiquidityPoolEntry is an XDR Struct defines as: +// +// struct LiquidityPoolEntry +// { +// PoolID liquidityPoolID; +// +// union switch (LiquidityPoolType type) +// { +// case LIQUIDITY_POOL_CONSTANT_PRODUCT: +// struct +// { +// LiquidityPoolConstantProductParameters params; +// +// int64 reserveA; // amount of A in the pool +// int64 reserveB; // amount of B in the pool +// int64 totalPoolShares; // total number of pool shares issued +// int64 poolSharesTrustLineCount; // number of trust lines for the associated pool shares +// } constantProduct; +// } +// body; +// }; +// +type LiquidityPoolEntry struct { + LiquidityPoolId PoolId + Body LiquidityPoolEntryBody +} + +// EncodeTo encodes this value using the Encoder. +func (s *LiquidityPoolEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + if err = s.Body.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LiquidityPoolEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LiquidityPoolEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + nTmp, err = s.Body.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolEntryBody: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolEntry)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolEntry) xdrType() {} + +var _ xdrType = (*LiquidityPoolEntry)(nil) + +// LedgerEntryExtensionV1Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type LedgerEntryExtensionV1Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerEntryExtensionV1Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerEntryExtensionV1Ext +func (u LedgerEntryExtensionV1Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewLedgerEntryExtensionV1Ext creates a new LedgerEntryExtensionV1Ext. +func NewLedgerEntryExtensionV1Ext(v int32, value interface{}) (result LedgerEntryExtensionV1Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerEntryExtensionV1Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerEntryExtensionV1Ext", u.V) +} + +var _ decoderFrom = (*LedgerEntryExtensionV1Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerEntryExtensionV1Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union LedgerEntryExtensionV1Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryExtensionV1Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryExtensionV1Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryExtensionV1Ext)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryExtensionV1Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryExtensionV1Ext) xdrType() {} + +var _ xdrType = (*LedgerEntryExtensionV1Ext)(nil) + +// LedgerEntryExtensionV1 is an XDR Struct defines as: +// +// struct LedgerEntryExtensionV1 +// { +// SponsorshipDescriptor sponsoringID; +// +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type LedgerEntryExtensionV1 struct { + SponsoringId SponsorshipDescriptor + Ext LedgerEntryExtensionV1Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerEntryExtensionV1) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeBool(s.SponsoringId != nil); err != nil { + return err + } + if s.SponsoringId != nil { + if err = (*s.SponsoringId).EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerEntryExtensionV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerEntryExtensionV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SponsorshipDescriptor: %s", err) + } + s.SponsoringId = nil + if b { + s.SponsoringId = new(AccountId) + nTmp, err = s.SponsoringId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SponsorshipDescriptor: %s", err) + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryExtensionV1Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryExtensionV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryExtensionV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryExtensionV1)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryExtensionV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryExtensionV1) xdrType() {} + +var _ xdrType = (*LedgerEntryExtensionV1)(nil) + +// LedgerEntryData is an XDR NestedUnion defines as: +// +// union switch (LedgerEntryType type) +// { +// case ACCOUNT: +// AccountEntry account; +// case TRUSTLINE: +// TrustLineEntry trustLine; +// case OFFER: +// OfferEntry offer; +// case DATA: +// DataEntry data; +// case CLAIMABLE_BALANCE: +// ClaimableBalanceEntry claimableBalance; +// case LIQUIDITY_POOL: +// LiquidityPoolEntry liquidityPool; +// } +// +type LedgerEntryData struct { + Type LedgerEntryType + Account *AccountEntry + TrustLine *TrustLineEntry + Offer *OfferEntry + Data *DataEntry + ClaimableBalance *ClaimableBalanceEntry + LiquidityPool *LiquidityPoolEntry +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerEntryData) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerEntryData +func (u LedgerEntryData) ArmForSwitch(sw int32) (string, bool) { + switch LedgerEntryType(sw) { + case LedgerEntryTypeAccount: + return "Account", true + case LedgerEntryTypeTrustline: + return "TrustLine", true + case LedgerEntryTypeOffer: + return "Offer", true + case LedgerEntryTypeData: + return "Data", true + case LedgerEntryTypeClaimableBalance: + return "ClaimableBalance", true + case LedgerEntryTypeLiquidityPool: + return "LiquidityPool", true + } + return "-", false +} + +// NewLedgerEntryData creates a new LedgerEntryData. +func NewLedgerEntryData(aType LedgerEntryType, value interface{}) (result LedgerEntryData, err error) { + result.Type = aType + switch LedgerEntryType(aType) { + case LedgerEntryTypeAccount: + tv, ok := value.(AccountEntry) + if !ok { + err = fmt.Errorf("invalid value, must be AccountEntry") + return + } + result.Account = &tv + case LedgerEntryTypeTrustline: + tv, ok := value.(TrustLineEntry) + if !ok { + err = fmt.Errorf("invalid value, must be TrustLineEntry") + return + } + result.TrustLine = &tv + case LedgerEntryTypeOffer: + tv, ok := value.(OfferEntry) + if !ok { + err = fmt.Errorf("invalid value, must be OfferEntry") + return + } + result.Offer = &tv + case LedgerEntryTypeData: + tv, ok := value.(DataEntry) + if !ok { + err = fmt.Errorf("invalid value, must be DataEntry") + return + } + result.Data = &tv + case LedgerEntryTypeClaimableBalance: + tv, ok := value.(ClaimableBalanceEntry) + if !ok { + err = fmt.Errorf("invalid value, must be ClaimableBalanceEntry") + return + } + result.ClaimableBalance = &tv + case LedgerEntryTypeLiquidityPool: + tv, ok := value.(LiquidityPoolEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolEntry") + return + } + result.LiquidityPool = &tv + } + return +} + +// MustAccount retrieves the Account value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustAccount() AccountEntry { + val, ok := u.GetAccount() + + if !ok { + panic("arm Account is not set") + } + + return val +} + +// GetAccount retrieves the Account value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetAccount() (result AccountEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Account" { + result = *u.Account + ok = true + } + + return +} + +// MustTrustLine retrieves the TrustLine value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustTrustLine() TrustLineEntry { + val, ok := u.GetTrustLine() + + if !ok { + panic("arm TrustLine is not set") + } + + return val +} + +// GetTrustLine retrieves the TrustLine value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetTrustLine() (result TrustLineEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "TrustLine" { + result = *u.TrustLine + ok = true + } + + return +} + +// MustOffer retrieves the Offer value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustOffer() OfferEntry { + val, ok := u.GetOffer() + + if !ok { + panic("arm Offer is not set") + } + + return val +} + +// GetOffer retrieves the Offer value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetOffer() (result OfferEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Offer" { + result = *u.Offer + ok = true + } + + return +} + +// MustData retrieves the Data value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustData() DataEntry { + val, ok := u.GetData() + + if !ok { + panic("arm Data is not set") + } + + return val +} + +// GetData retrieves the Data value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetData() (result DataEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Data" { + result = *u.Data + ok = true + } + + return +} + +// MustClaimableBalance retrieves the ClaimableBalance value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustClaimableBalance() ClaimableBalanceEntry { + val, ok := u.GetClaimableBalance() + + if !ok { + panic("arm ClaimableBalance is not set") + } + + return val +} + +// GetClaimableBalance retrieves the ClaimableBalance value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetClaimableBalance() (result ClaimableBalanceEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClaimableBalance" { + result = *u.ClaimableBalance + ok = true + } + + return +} + +// MustLiquidityPool retrieves the LiquidityPool value from the union, +// panicing if the value is not set. +func (u LedgerEntryData) MustLiquidityPool() LiquidityPoolEntry { + val, ok := u.GetLiquidityPool() + + if !ok { + panic("arm LiquidityPool is not set") + } + + return val +} + +// GetLiquidityPool retrieves the LiquidityPool value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryData) GetLiquidityPool() (result LiquidityPoolEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPool" { + result = *u.LiquidityPool + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerEntryData) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LedgerEntryType(u.Type) { + case LedgerEntryTypeAccount: + if err = (*u.Account).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeTrustline: + if err = (*u.TrustLine).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeOffer: + if err = (*u.Offer).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeData: + if err = (*u.Data).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeClaimableBalance: + if err = (*u.ClaimableBalance).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeLiquidityPool: + if err = (*u.LiquidityPool).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LedgerEntryType) switch value '%d' is not valid for union LedgerEntryData", u.Type) +} + +var _ decoderFrom = (*LedgerEntryData)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerEntryData) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryType: %s", err) + } + switch LedgerEntryType(u.Type) { + case LedgerEntryTypeAccount: + u.Account = new(AccountEntry) + nTmp, err = (*u.Account).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountEntry: %s", err) + } + return n, nil + case LedgerEntryTypeTrustline: + u.TrustLine = new(TrustLineEntry) + nTmp, err = (*u.TrustLine).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineEntry: %s", err) + } + return n, nil + case LedgerEntryTypeOffer: + u.Offer = new(OfferEntry) + nTmp, err = (*u.Offer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OfferEntry: %s", err) + } + return n, nil + case LedgerEntryTypeData: + u.Data = new(DataEntry) + nTmp, err = (*u.Data).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataEntry: %s", err) + } + return n, nil + case LedgerEntryTypeClaimableBalance: + u.ClaimableBalance = new(ClaimableBalanceEntry) + nTmp, err = (*u.ClaimableBalance).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceEntry: %s", err) + } + return n, nil + case LedgerEntryTypeLiquidityPool: + u.LiquidityPool = new(LiquidityPoolEntry) + nTmp, err = (*u.LiquidityPool).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolEntry: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerEntryData has invalid Type (LedgerEntryType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryData) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryData) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryData)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryData)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryData) xdrType() {} + +var _ xdrType = (*LedgerEntryData)(nil) + +// LedgerEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// LedgerEntryExtensionV1 v1; +// } +// +type LedgerEntryExt struct { + V int32 + V1 *LedgerEntryExtensionV1 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerEntryExt +func (u LedgerEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 1: + return "V1", true + } + return "-", false +} + +// NewLedgerEntryExt creates a new LedgerEntryExt. +func NewLedgerEntryExt(v int32, value interface{}) (result LedgerEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 1: + tv, ok := value.(LedgerEntryExtensionV1) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntryExtensionV1") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u LedgerEntryExt) MustV1() LedgerEntryExtensionV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryExt) GetV1() (result LedgerEntryExtensionV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerEntryExt", u.V) +} + +var _ decoderFrom = (*LedgerEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 1: + u.V1 = new(LedgerEntryExtensionV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryExtensionV1: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryExt) xdrType() {} + +var _ xdrType = (*LedgerEntryExt)(nil) + +// LedgerEntry is an XDR Struct defines as: +// +// struct LedgerEntry +// { +// uint32 lastModifiedLedgerSeq; // ledger the LedgerEntry was last changed +// +// union switch (LedgerEntryType type) +// { +// case ACCOUNT: +// AccountEntry account; +// case TRUSTLINE: +// TrustLineEntry trustLine; +// case OFFER: +// OfferEntry offer; +// case DATA: +// DataEntry data; +// case CLAIMABLE_BALANCE: +// ClaimableBalanceEntry claimableBalance; +// case LIQUIDITY_POOL: +// LiquidityPoolEntry liquidityPool; +// } +// data; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// LedgerEntryExtensionV1 v1; +// } +// ext; +// }; +// +type LedgerEntry struct { + LastModifiedLedgerSeq Uint32 + Data LedgerEntryData + Ext LedgerEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LastModifiedLedgerSeq.EncodeTo(e); err != nil { + return err + } + if err = s.Data.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LastModifiedLedgerSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Data.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryData: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntry)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntry) xdrType() {} + +var _ xdrType = (*LedgerEntry)(nil) + +// LedgerKeyAccount is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID accountID; +// } +// +type LedgerKeyAccount struct { + AccountId AccountId +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyAccount) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyAccount)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyAccount) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyAccount) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyAccount) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyAccount)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyAccount)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyAccount) xdrType() {} + +var _ xdrType = (*LedgerKeyAccount)(nil) + +// LedgerKeyTrustLine is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID accountID; +// TrustLineAsset asset; +// } +// +type LedgerKeyTrustLine struct { + AccountId AccountId + Asset TrustLineAsset +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyTrustLine) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyTrustLine)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyTrustLine) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TrustLineAsset: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyTrustLine) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyTrustLine) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyTrustLine)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyTrustLine)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyTrustLine) xdrType() {} + +var _ xdrType = (*LedgerKeyTrustLine)(nil) + +// LedgerKeyOffer is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID sellerID; +// int64 offerID; +// } +// +type LedgerKeyOffer struct { + SellerId AccountId + OfferId Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyOffer) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SellerId.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyOffer)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyOffer) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SellerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyOffer) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyOffer) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyOffer)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyOffer)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyOffer) xdrType() {} + +var _ xdrType = (*LedgerKeyOffer)(nil) + +// LedgerKeyData is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID accountID; +// string64 dataName; +// } +// +type LedgerKeyData struct { + AccountId AccountId + DataName String64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyData) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.DataName.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyData)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyData) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.DataName.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyData) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyData) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyData)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyData)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyData) xdrType() {} + +var _ xdrType = (*LedgerKeyData)(nil) + +// LedgerKeyClaimableBalance is an XDR NestedStruct defines as: +// +// struct +// { +// ClaimableBalanceID balanceID; +// } +// +type LedgerKeyClaimableBalance struct { + BalanceId ClaimableBalanceId +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyClaimableBalance) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.BalanceId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyClaimableBalance)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyClaimableBalance) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.BalanceId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyClaimableBalance) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyClaimableBalance) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyClaimableBalance)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyClaimableBalance)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyClaimableBalance) xdrType() {} + +var _ xdrType = (*LedgerKeyClaimableBalance)(nil) + +// LedgerKeyLiquidityPool is an XDR NestedStruct defines as: +// +// struct +// { +// PoolID liquidityPoolID; +// } +// +type LedgerKeyLiquidityPool struct { + LiquidityPoolId PoolId +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerKeyLiquidityPool) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerKeyLiquidityPool)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerKeyLiquidityPool) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKeyLiquidityPool) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKeyLiquidityPool) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKeyLiquidityPool)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKeyLiquidityPool)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKeyLiquidityPool) xdrType() {} + +var _ xdrType = (*LedgerKeyLiquidityPool)(nil) + +// LedgerKey is an XDR Union defines as: +// +// union LedgerKey switch (LedgerEntryType type) +// { +// case ACCOUNT: +// struct +// { +// AccountID accountID; +// } account; +// +// case TRUSTLINE: +// struct +// { +// AccountID accountID; +// TrustLineAsset asset; +// } trustLine; +// +// case OFFER: +// struct +// { +// AccountID sellerID; +// int64 offerID; +// } offer; +// +// case DATA: +// struct +// { +// AccountID accountID; +// string64 dataName; +// } data; +// +// case CLAIMABLE_BALANCE: +// struct +// { +// ClaimableBalanceID balanceID; +// } claimableBalance; +// +// case LIQUIDITY_POOL: +// struct +// { +// PoolID liquidityPoolID; +// } liquidityPool; +// }; +// +type LedgerKey struct { + Type LedgerEntryType + Account *LedgerKeyAccount + TrustLine *LedgerKeyTrustLine + Offer *LedgerKeyOffer + Data *LedgerKeyData + ClaimableBalance *LedgerKeyClaimableBalance + LiquidityPool *LedgerKeyLiquidityPool +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerKey) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerKey +func (u LedgerKey) ArmForSwitch(sw int32) (string, bool) { + switch LedgerEntryType(sw) { + case LedgerEntryTypeAccount: + return "Account", true + case LedgerEntryTypeTrustline: + return "TrustLine", true + case LedgerEntryTypeOffer: + return "Offer", true + case LedgerEntryTypeData: + return "Data", true + case LedgerEntryTypeClaimableBalance: + return "ClaimableBalance", true + case LedgerEntryTypeLiquidityPool: + return "LiquidityPool", true + } + return "-", false +} + +// NewLedgerKey creates a new LedgerKey. +func NewLedgerKey(aType LedgerEntryType, value interface{}) (result LedgerKey, err error) { + result.Type = aType + switch LedgerEntryType(aType) { + case LedgerEntryTypeAccount: + tv, ok := value.(LedgerKeyAccount) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyAccount") + return + } + result.Account = &tv + case LedgerEntryTypeTrustline: + tv, ok := value.(LedgerKeyTrustLine) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyTrustLine") + return + } + result.TrustLine = &tv + case LedgerEntryTypeOffer: + tv, ok := value.(LedgerKeyOffer) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyOffer") + return + } + result.Offer = &tv + case LedgerEntryTypeData: + tv, ok := value.(LedgerKeyData) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyData") + return + } + result.Data = &tv + case LedgerEntryTypeClaimableBalance: + tv, ok := value.(LedgerKeyClaimableBalance) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyClaimableBalance") + return + } + result.ClaimableBalance = &tv + case LedgerEntryTypeLiquidityPool: + tv, ok := value.(LedgerKeyLiquidityPool) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKeyLiquidityPool") + return + } + result.LiquidityPool = &tv + } + return +} + +// MustAccount retrieves the Account value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustAccount() LedgerKeyAccount { + val, ok := u.GetAccount() + + if !ok { + panic("arm Account is not set") + } + + return val +} + +// GetAccount retrieves the Account value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetAccount() (result LedgerKeyAccount, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Account" { + result = *u.Account + ok = true + } + + return +} + +// MustTrustLine retrieves the TrustLine value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustTrustLine() LedgerKeyTrustLine { + val, ok := u.GetTrustLine() + + if !ok { + panic("arm TrustLine is not set") + } + + return val +} + +// GetTrustLine retrieves the TrustLine value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetTrustLine() (result LedgerKeyTrustLine, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "TrustLine" { + result = *u.TrustLine + ok = true + } + + return +} + +// MustOffer retrieves the Offer value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustOffer() LedgerKeyOffer { + val, ok := u.GetOffer() + + if !ok { + panic("arm Offer is not set") + } + + return val +} + +// GetOffer retrieves the Offer value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetOffer() (result LedgerKeyOffer, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Offer" { + result = *u.Offer + ok = true + } + + return +} + +// MustData retrieves the Data value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustData() LedgerKeyData { + val, ok := u.GetData() + + if !ok { + panic("arm Data is not set") + } + + return val +} + +// GetData retrieves the Data value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetData() (result LedgerKeyData, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Data" { + result = *u.Data + ok = true + } + + return +} + +// MustClaimableBalance retrieves the ClaimableBalance value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustClaimableBalance() LedgerKeyClaimableBalance { + val, ok := u.GetClaimableBalance() + + if !ok { + panic("arm ClaimableBalance is not set") + } + + return val +} + +// GetClaimableBalance retrieves the ClaimableBalance value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetClaimableBalance() (result LedgerKeyClaimableBalance, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClaimableBalance" { + result = *u.ClaimableBalance + ok = true + } + + return +} + +// MustLiquidityPool retrieves the LiquidityPool value from the union, +// panicing if the value is not set. +func (u LedgerKey) MustLiquidityPool() LedgerKeyLiquidityPool { + val, ok := u.GetLiquidityPool() + + if !ok { + panic("arm LiquidityPool is not set") + } + + return val +} + +// GetLiquidityPool retrieves the LiquidityPool value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerKey) GetLiquidityPool() (result LedgerKeyLiquidityPool, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPool" { + result = *u.LiquidityPool + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerKey) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LedgerEntryType(u.Type) { + case LedgerEntryTypeAccount: + if err = (*u.Account).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeTrustline: + if err = (*u.TrustLine).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeOffer: + if err = (*u.Offer).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeData: + if err = (*u.Data).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeClaimableBalance: + if err = (*u.ClaimableBalance).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryTypeLiquidityPool: + if err = (*u.LiquidityPool).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LedgerEntryType) switch value '%d' is not valid for union LedgerKey", u.Type) +} + +var _ decoderFrom = (*LedgerKey)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerKey) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryType: %s", err) + } + switch LedgerEntryType(u.Type) { + case LedgerEntryTypeAccount: + u.Account = new(LedgerKeyAccount) + nTmp, err = (*u.Account).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyAccount: %s", err) + } + return n, nil + case LedgerEntryTypeTrustline: + u.TrustLine = new(LedgerKeyTrustLine) + nTmp, err = (*u.TrustLine).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyTrustLine: %s", err) + } + return n, nil + case LedgerEntryTypeOffer: + u.Offer = new(LedgerKeyOffer) + nTmp, err = (*u.Offer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyOffer: %s", err) + } + return n, nil + case LedgerEntryTypeData: + u.Data = new(LedgerKeyData) + nTmp, err = (*u.Data).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyData: %s", err) + } + return n, nil + case LedgerEntryTypeClaimableBalance: + u.ClaimableBalance = new(LedgerKeyClaimableBalance) + nTmp, err = (*u.ClaimableBalance).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyClaimableBalance: %s", err) + } + return n, nil + case LedgerEntryTypeLiquidityPool: + u.LiquidityPool = new(LedgerKeyLiquidityPool) + nTmp, err = (*u.LiquidityPool).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKeyLiquidityPool: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerKey has invalid Type (LedgerEntryType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerKey) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerKey) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerKey)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerKey)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerKey) xdrType() {} + +var _ xdrType = (*LedgerKey)(nil) + +// EnvelopeType is an XDR Enum defines as: +// +// enum EnvelopeType +// { +// ENVELOPE_TYPE_TX_V0 = 0, +// ENVELOPE_TYPE_SCP = 1, +// ENVELOPE_TYPE_TX = 2, +// ENVELOPE_TYPE_AUTH = 3, +// ENVELOPE_TYPE_SCPVALUE = 4, +// ENVELOPE_TYPE_TX_FEE_BUMP = 5, +// ENVELOPE_TYPE_OP_ID = 6, +// ENVELOPE_TYPE_POOL_REVOKE_OP_ID = 7 +// }; +// +type EnvelopeType int32 + +const ( + EnvelopeTypeEnvelopeTypeTxV0 EnvelopeType = 0 + EnvelopeTypeEnvelopeTypeScp EnvelopeType = 1 + EnvelopeTypeEnvelopeTypeTx EnvelopeType = 2 + EnvelopeTypeEnvelopeTypeAuth EnvelopeType = 3 + EnvelopeTypeEnvelopeTypeScpvalue EnvelopeType = 4 + EnvelopeTypeEnvelopeTypeTxFeeBump EnvelopeType = 5 + EnvelopeTypeEnvelopeTypeOpId EnvelopeType = 6 + EnvelopeTypeEnvelopeTypePoolRevokeOpId EnvelopeType = 7 +) + +var envelopeTypeMap = map[int32]string{ + 0: "EnvelopeTypeEnvelopeTypeTxV0", + 1: "EnvelopeTypeEnvelopeTypeScp", + 2: "EnvelopeTypeEnvelopeTypeTx", + 3: "EnvelopeTypeEnvelopeTypeAuth", + 4: "EnvelopeTypeEnvelopeTypeScpvalue", + 5: "EnvelopeTypeEnvelopeTypeTxFeeBump", + 6: "EnvelopeTypeEnvelopeTypeOpId", + 7: "EnvelopeTypeEnvelopeTypePoolRevokeOpId", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for EnvelopeType +func (e EnvelopeType) ValidEnum(v int32) bool { + _, ok := envelopeTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e EnvelopeType) String() string { + name, _ := envelopeTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e EnvelopeType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := envelopeTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid EnvelopeType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*EnvelopeType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *EnvelopeType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding EnvelopeType: %s", err) + } + if _, ok := envelopeTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid EnvelopeType enum value", v) + } + *e = EnvelopeType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s EnvelopeType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *EnvelopeType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*EnvelopeType)(nil) + _ encoding.BinaryUnmarshaler = (*EnvelopeType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s EnvelopeType) xdrType() {} + +var _ xdrType = (*EnvelopeType)(nil) + +// UpgradeType is an XDR Typedef defines as: +// +// typedef opaque UpgradeType<128>; +// +type UpgradeType []byte + +// XDRMaxSize implements the Sized interface for UpgradeType +func (e UpgradeType) XDRMaxSize() int { + return 128 +} + +// EncodeTo encodes this value using the Encoder. +func (s UpgradeType) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*UpgradeType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *UpgradeType) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + (*s), nTmp, err = d.DecodeOpaque(128) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding UpgradeType: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s UpgradeType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *UpgradeType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*UpgradeType)(nil) + _ encoding.BinaryUnmarshaler = (*UpgradeType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s UpgradeType) xdrType() {} + +var _ xdrType = (*UpgradeType)(nil) + +// StellarValueType is an XDR Enum defines as: +// +// enum StellarValueType +// { +// STELLAR_VALUE_BASIC = 0, +// STELLAR_VALUE_SIGNED = 1 +// }; +// +type StellarValueType int32 + +const ( + StellarValueTypeStellarValueBasic StellarValueType = 0 + StellarValueTypeStellarValueSigned StellarValueType = 1 +) + +var stellarValueTypeMap = map[int32]string{ + 0: "StellarValueTypeStellarValueBasic", + 1: "StellarValueTypeStellarValueSigned", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for StellarValueType +func (e StellarValueType) ValidEnum(v int32) bool { + _, ok := stellarValueTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e StellarValueType) String() string { + name, _ := stellarValueTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e StellarValueType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := stellarValueTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid StellarValueType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*StellarValueType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *StellarValueType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding StellarValueType: %s", err) + } + if _, ok := stellarValueTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid StellarValueType enum value", v) + } + *e = StellarValueType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s StellarValueType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *StellarValueType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*StellarValueType)(nil) + _ encoding.BinaryUnmarshaler = (*StellarValueType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s StellarValueType) xdrType() {} + +var _ xdrType = (*StellarValueType)(nil) + +// LedgerCloseValueSignature is an XDR Struct defines as: +// +// struct LedgerCloseValueSignature +// { +// NodeID nodeID; // which node introduced the value +// Signature signature; // nodeID's signature +// }; +// +type LedgerCloseValueSignature struct { + NodeId NodeId + Signature Signature +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerCloseValueSignature) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.NodeId.EncodeTo(e); err != nil { + return err + } + if err = s.Signature.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerCloseValueSignature)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerCloseValueSignature) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.NodeId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.Signature.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerCloseValueSignature) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerCloseValueSignature) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerCloseValueSignature)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerCloseValueSignature)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerCloseValueSignature) xdrType() {} + +var _ xdrType = (*LedgerCloseValueSignature)(nil) + +// StellarValueExt is an XDR NestedUnion defines as: +// +// union switch (StellarValueType v) +// { +// case STELLAR_VALUE_BASIC: +// void; +// case STELLAR_VALUE_SIGNED: +// LedgerCloseValueSignature lcValueSignature; +// } +// +type StellarValueExt struct { + V StellarValueType + LcValueSignature *LedgerCloseValueSignature +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u StellarValueExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of StellarValueExt +func (u StellarValueExt) ArmForSwitch(sw int32) (string, bool) { + switch StellarValueType(sw) { + case StellarValueTypeStellarValueBasic: + return "", true + case StellarValueTypeStellarValueSigned: + return "LcValueSignature", true + } + return "-", false +} + +// NewStellarValueExt creates a new StellarValueExt. +func NewStellarValueExt(v StellarValueType, value interface{}) (result StellarValueExt, err error) { + result.V = v + switch StellarValueType(v) { + case StellarValueTypeStellarValueBasic: + // void + case StellarValueTypeStellarValueSigned: + tv, ok := value.(LedgerCloseValueSignature) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerCloseValueSignature") + return + } + result.LcValueSignature = &tv + } + return +} + +// MustLcValueSignature retrieves the LcValueSignature value from the union, +// panicing if the value is not set. +func (u StellarValueExt) MustLcValueSignature() LedgerCloseValueSignature { + val, ok := u.GetLcValueSignature() + + if !ok { + panic("arm LcValueSignature is not set") + } + + return val +} + +// GetLcValueSignature retrieves the LcValueSignature value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarValueExt) GetLcValueSignature() (result LedgerCloseValueSignature, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "LcValueSignature" { + result = *u.LcValueSignature + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u StellarValueExt) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.V.EncodeTo(e); err != nil { + return err + } + switch StellarValueType(u.V) { + case StellarValueTypeStellarValueBasic: + // Void + return nil + case StellarValueTypeStellarValueSigned: + if err = (*u.LcValueSignature).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (StellarValueType) switch value '%d' is not valid for union StellarValueExt", u.V) +} + +var _ decoderFrom = (*StellarValueExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *StellarValueExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.V.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding StellarValueType: %s", err) + } + switch StellarValueType(u.V) { + case StellarValueTypeStellarValueBasic: + // Void + return n, nil + case StellarValueTypeStellarValueSigned: + u.LcValueSignature = new(LedgerCloseValueSignature) + nTmp, err = (*u.LcValueSignature).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerCloseValueSignature: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union StellarValueExt has invalid V (StellarValueType) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s StellarValueExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *StellarValueExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*StellarValueExt)(nil) + _ encoding.BinaryUnmarshaler = (*StellarValueExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s StellarValueExt) xdrType() {} + +var _ xdrType = (*StellarValueExt)(nil) + +// StellarValue is an XDR Struct defines as: +// +// struct StellarValue +// { +// Hash txSetHash; // transaction set to apply to previous ledger +// TimePoint closeTime; // network close time +// +// // upgrades to apply to the previous ledger (usually empty) +// // this is a vector of encoded 'LedgerUpgrade' so that nodes can drop +// // unknown steps during consensus if needed. +// // see notes below on 'LedgerUpgrade' for more detail +// // max size is dictated by number of upgrade types (+ room for future) +// UpgradeType upgrades<6>; +// +// // reserved for future use +// union switch (StellarValueType v) +// { +// case STELLAR_VALUE_BASIC: +// void; +// case STELLAR_VALUE_SIGNED: +// LedgerCloseValueSignature lcValueSignature; +// } +// ext; +// }; +// +type StellarValue struct { + TxSetHash Hash + CloseTime TimePoint + Upgrades []UpgradeType `xdrmaxsize:"6"` + Ext StellarValueExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *StellarValue) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.TxSetHash.EncodeTo(e); err != nil { + return err + } + if err = s.CloseTime.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Upgrades))); err != nil { + return err + } + for i := 0; i < len(s.Upgrades); i++ { + if err = s.Upgrades[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*StellarValue)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *StellarValue) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.TxSetHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.CloseTime.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimePoint: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding UpgradeType: %s", err) + } + if l > 6 { + return n, fmt.Errorf("decoding UpgradeType: data size (%d) exceeds size limit (6)", l) + } + s.Upgrades = nil + if l > 0 { + s.Upgrades = make([]UpgradeType, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Upgrades[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding UpgradeType: %s", err) + } + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding StellarValueExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s StellarValue) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *StellarValue) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*StellarValue)(nil) + _ encoding.BinaryUnmarshaler = (*StellarValue)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s StellarValue) xdrType() {} + +var _ xdrType = (*StellarValue)(nil) + +// MaskLedgerHeaderFlags is an XDR Const defines as: +// +// const MASK_LEDGER_HEADER_FLAGS = 0x7; +// +const MaskLedgerHeaderFlags = 0x7 + +// LedgerHeaderFlags is an XDR Enum defines as: +// +// enum LedgerHeaderFlags +// { +// DISABLE_LIQUIDITY_POOL_TRADING_FLAG = 0x1, +// DISABLE_LIQUIDITY_POOL_DEPOSIT_FLAG = 0x2, +// DISABLE_LIQUIDITY_POOL_WITHDRAWAL_FLAG = 0x4 +// }; +// +type LedgerHeaderFlags int32 + +const ( + LedgerHeaderFlagsDisableLiquidityPoolTradingFlag LedgerHeaderFlags = 1 + LedgerHeaderFlagsDisableLiquidityPoolDepositFlag LedgerHeaderFlags = 2 + LedgerHeaderFlagsDisableLiquidityPoolWithdrawalFlag LedgerHeaderFlags = 4 +) + +var ledgerHeaderFlagsMap = map[int32]string{ + 1: "LedgerHeaderFlagsDisableLiquidityPoolTradingFlag", + 2: "LedgerHeaderFlagsDisableLiquidityPoolDepositFlag", + 4: "LedgerHeaderFlagsDisableLiquidityPoolWithdrawalFlag", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LedgerHeaderFlags +func (e LedgerHeaderFlags) ValidEnum(v int32) bool { + _, ok := ledgerHeaderFlagsMap[v] + return ok +} + +// String returns the name of `e` +func (e LedgerHeaderFlags) String() string { + name, _ := ledgerHeaderFlagsMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e LedgerHeaderFlags) EncodeTo(enc *xdr.Encoder) error { + if _, ok := ledgerHeaderFlagsMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LedgerHeaderFlags enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LedgerHeaderFlags)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LedgerHeaderFlags) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderFlags: %s", err) + } + if _, ok := ledgerHeaderFlagsMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LedgerHeaderFlags enum value", v) + } + *e = LedgerHeaderFlags(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderFlags) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderFlags) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderFlags)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderFlags)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderFlags) xdrType() {} + +var _ xdrType = (*LedgerHeaderFlags)(nil) + +// LedgerHeaderExtensionV1Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type LedgerHeaderExtensionV1Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerHeaderExtensionV1Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerHeaderExtensionV1Ext +func (u LedgerHeaderExtensionV1Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewLedgerHeaderExtensionV1Ext creates a new LedgerHeaderExtensionV1Ext. +func NewLedgerHeaderExtensionV1Ext(v int32, value interface{}) (result LedgerHeaderExtensionV1Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerHeaderExtensionV1Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerHeaderExtensionV1Ext", u.V) +} + +var _ decoderFrom = (*LedgerHeaderExtensionV1Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerHeaderExtensionV1Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union LedgerHeaderExtensionV1Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderExtensionV1Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderExtensionV1Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderExtensionV1Ext)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderExtensionV1Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderExtensionV1Ext) xdrType() {} + +var _ xdrType = (*LedgerHeaderExtensionV1Ext)(nil) + +// LedgerHeaderExtensionV1 is an XDR Struct defines as: +// +// struct LedgerHeaderExtensionV1 +// { +// uint32 flags; // LedgerHeaderFlags +// +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type LedgerHeaderExtensionV1 struct { + Flags Uint32 + Ext LedgerHeaderExtensionV1Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerHeaderExtensionV1) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Flags.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerHeaderExtensionV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerHeaderExtensionV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Flags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderExtensionV1Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderExtensionV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderExtensionV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderExtensionV1)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderExtensionV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderExtensionV1) xdrType() {} + +var _ xdrType = (*LedgerHeaderExtensionV1)(nil) + +// LedgerHeaderExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// LedgerHeaderExtensionV1 v1; +// } +// +type LedgerHeaderExt struct { + V int32 + V1 *LedgerHeaderExtensionV1 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerHeaderExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerHeaderExt +func (u LedgerHeaderExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + case 1: + return "V1", true + } + return "-", false +} + +// NewLedgerHeaderExt creates a new LedgerHeaderExt. +func NewLedgerHeaderExt(v int32, value interface{}) (result LedgerHeaderExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + case 1: + tv, ok := value.(LedgerHeaderExtensionV1) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerHeaderExtensionV1") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u LedgerHeaderExt) MustV1() LedgerHeaderExtensionV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerHeaderExt) GetV1() (result LedgerHeaderExtensionV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerHeaderExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerHeaderExt", u.V) +} + +var _ decoderFrom = (*LedgerHeaderExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerHeaderExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + case 1: + u.V1 = new(LedgerHeaderExtensionV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderExtensionV1: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerHeaderExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderExt)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderExt) xdrType() {} + +var _ xdrType = (*LedgerHeaderExt)(nil) + +// LedgerHeader is an XDR Struct defines as: +// +// struct LedgerHeader +// { +// uint32 ledgerVersion; // the protocol version of the ledger +// Hash previousLedgerHash; // hash of the previous ledger header +// StellarValue scpValue; // what consensus agreed to +// Hash txSetResultHash; // the TransactionResultSet that led to this ledger +// Hash bucketListHash; // hash of the ledger state +// +// uint32 ledgerSeq; // sequence number of this ledger +// +// int64 totalCoins; // total number of stroops in existence. +// // 10,000,000 stroops in 1 XLM +// +// int64 feePool; // fees burned since last inflation run +// uint32 inflationSeq; // inflation sequence number +// +// uint64 idPool; // last used global ID, used for generating objects +// +// uint32 baseFee; // base fee per operation in stroops +// uint32 baseReserve; // account base reserve in stroops +// +// uint32 maxTxSetSize; // maximum size a transaction set can be +// +// Hash skipList[4]; // hashes of ledgers in the past. allows you to jump back +// // in time without walking the chain back ledger by ledger +// // each slot contains the oldest ledger that is mod of +// // either 50 5000 50000 or 500000 depending on index +// // skipList[0] mod(50), skipList[1] mod(5000), etc +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// case 1: +// LedgerHeaderExtensionV1 v1; +// } +// ext; +// }; +// +type LedgerHeader struct { + LedgerVersion Uint32 + PreviousLedgerHash Hash + ScpValue StellarValue + TxSetResultHash Hash + BucketListHash Hash + LedgerSeq Uint32 + TotalCoins Int64 + FeePool Int64 + InflationSeq Uint32 + IdPool Uint64 + BaseFee Uint32 + BaseReserve Uint32 + MaxTxSetSize Uint32 + SkipList [4]Hash + Ext LedgerHeaderExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerHeader) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerVersion.EncodeTo(e); err != nil { + return err + } + if err = s.PreviousLedgerHash.EncodeTo(e); err != nil { + return err + } + if err = s.ScpValue.EncodeTo(e); err != nil { + return err + } + if err = s.TxSetResultHash.EncodeTo(e); err != nil { + return err + } + if err = s.BucketListHash.EncodeTo(e); err != nil { + return err + } + if err = s.LedgerSeq.EncodeTo(e); err != nil { + return err + } + if err = s.TotalCoins.EncodeTo(e); err != nil { + return err + } + if err = s.FeePool.EncodeTo(e); err != nil { + return err + } + if err = s.InflationSeq.EncodeTo(e); err != nil { + return err + } + if err = s.IdPool.EncodeTo(e); err != nil { + return err + } + if err = s.BaseFee.EncodeTo(e); err != nil { + return err + } + if err = s.BaseReserve.EncodeTo(e); err != nil { + return err + } + if err = s.MaxTxSetSize.EncodeTo(e); err != nil { + return err + } + for i := 0; i < len(s.SkipList); i++ { + if err = s.SkipList[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerHeader)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerHeader) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerVersion.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.PreviousLedgerHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.ScpValue.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding StellarValue: %s", err) + } + nTmp, err = s.TxSetResultHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.BucketListHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.LedgerSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.TotalCoins.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.FeePool.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.InflationSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.IdPool.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.BaseFee.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.BaseReserve.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.MaxTxSetSize.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + for i := 0; i < len(s.SkipList); i++ { + nTmp, err = s.SkipList[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeader) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeader) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeader)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeader)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeader) xdrType() {} + +var _ xdrType = (*LedgerHeader)(nil) + +// LedgerUpgradeType is an XDR Enum defines as: +// +// enum LedgerUpgradeType +// { +// LEDGER_UPGRADE_VERSION = 1, +// LEDGER_UPGRADE_BASE_FEE = 2, +// LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3, +// LEDGER_UPGRADE_BASE_RESERVE = 4, +// LEDGER_UPGRADE_FLAGS = 5 +// }; +// +type LedgerUpgradeType int32 + +const ( + LedgerUpgradeTypeLedgerUpgradeVersion LedgerUpgradeType = 1 + LedgerUpgradeTypeLedgerUpgradeBaseFee LedgerUpgradeType = 2 + LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize LedgerUpgradeType = 3 + LedgerUpgradeTypeLedgerUpgradeBaseReserve LedgerUpgradeType = 4 + LedgerUpgradeTypeLedgerUpgradeFlags LedgerUpgradeType = 5 +) + +var ledgerUpgradeTypeMap = map[int32]string{ + 1: "LedgerUpgradeTypeLedgerUpgradeVersion", + 2: "LedgerUpgradeTypeLedgerUpgradeBaseFee", + 3: "LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize", + 4: "LedgerUpgradeTypeLedgerUpgradeBaseReserve", + 5: "LedgerUpgradeTypeLedgerUpgradeFlags", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LedgerUpgradeType +func (e LedgerUpgradeType) ValidEnum(v int32) bool { + _, ok := ledgerUpgradeTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e LedgerUpgradeType) String() string { + name, _ := ledgerUpgradeTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e LedgerUpgradeType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := ledgerUpgradeTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LedgerUpgradeType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LedgerUpgradeType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LedgerUpgradeType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LedgerUpgradeType: %s", err) + } + if _, ok := ledgerUpgradeTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LedgerUpgradeType enum value", v) + } + *e = LedgerUpgradeType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerUpgradeType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerUpgradeType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerUpgradeType)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerUpgradeType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerUpgradeType) xdrType() {} + +var _ xdrType = (*LedgerUpgradeType)(nil) + +// LedgerUpgrade is an XDR Union defines as: +// +// union LedgerUpgrade switch (LedgerUpgradeType type) +// { +// case LEDGER_UPGRADE_VERSION: +// uint32 newLedgerVersion; // update ledgerVersion +// case LEDGER_UPGRADE_BASE_FEE: +// uint32 newBaseFee; // update baseFee +// case LEDGER_UPGRADE_MAX_TX_SET_SIZE: +// uint32 newMaxTxSetSize; // update maxTxSetSize +// case LEDGER_UPGRADE_BASE_RESERVE: +// uint32 newBaseReserve; // update baseReserve +// case LEDGER_UPGRADE_FLAGS: +// uint32 newFlags; // update flags +// }; +// +type LedgerUpgrade struct { + Type LedgerUpgradeType + NewLedgerVersion *Uint32 + NewBaseFee *Uint32 + NewMaxTxSetSize *Uint32 + NewBaseReserve *Uint32 + NewFlags *Uint32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerUpgrade) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerUpgrade +func (u LedgerUpgrade) ArmForSwitch(sw int32) (string, bool) { + switch LedgerUpgradeType(sw) { + case LedgerUpgradeTypeLedgerUpgradeVersion: + return "NewLedgerVersion", true + case LedgerUpgradeTypeLedgerUpgradeBaseFee: + return "NewBaseFee", true + case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: + return "NewMaxTxSetSize", true + case LedgerUpgradeTypeLedgerUpgradeBaseReserve: + return "NewBaseReserve", true + case LedgerUpgradeTypeLedgerUpgradeFlags: + return "NewFlags", true + } + return "-", false +} + +// NewLedgerUpgrade creates a new LedgerUpgrade. +func NewLedgerUpgrade(aType LedgerUpgradeType, value interface{}) (result LedgerUpgrade, err error) { + result.Type = aType + switch LedgerUpgradeType(aType) { + case LedgerUpgradeTypeLedgerUpgradeVersion: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.NewLedgerVersion = &tv + case LedgerUpgradeTypeLedgerUpgradeBaseFee: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.NewBaseFee = &tv + case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.NewMaxTxSetSize = &tv + case LedgerUpgradeTypeLedgerUpgradeBaseReserve: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.NewBaseReserve = &tv + case LedgerUpgradeTypeLedgerUpgradeFlags: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.NewFlags = &tv + } + return +} + +// MustNewLedgerVersion retrieves the NewLedgerVersion value from the union, +// panicing if the value is not set. +func (u LedgerUpgrade) MustNewLedgerVersion() Uint32 { + val, ok := u.GetNewLedgerVersion() + + if !ok { + panic("arm NewLedgerVersion is not set") + } + + return val +} + +// GetNewLedgerVersion retrieves the NewLedgerVersion value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerUpgrade) GetNewLedgerVersion() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NewLedgerVersion" { + result = *u.NewLedgerVersion + ok = true + } + + return +} + +// MustNewBaseFee retrieves the NewBaseFee value from the union, +// panicing if the value is not set. +func (u LedgerUpgrade) MustNewBaseFee() Uint32 { + val, ok := u.GetNewBaseFee() + + if !ok { + panic("arm NewBaseFee is not set") + } + + return val +} + +// GetNewBaseFee retrieves the NewBaseFee value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerUpgrade) GetNewBaseFee() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NewBaseFee" { + result = *u.NewBaseFee + ok = true + } + + return +} + +// MustNewMaxTxSetSize retrieves the NewMaxTxSetSize value from the union, +// panicing if the value is not set. +func (u LedgerUpgrade) MustNewMaxTxSetSize() Uint32 { + val, ok := u.GetNewMaxTxSetSize() + + if !ok { + panic("arm NewMaxTxSetSize is not set") + } + + return val +} + +// GetNewMaxTxSetSize retrieves the NewMaxTxSetSize value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerUpgrade) GetNewMaxTxSetSize() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NewMaxTxSetSize" { + result = *u.NewMaxTxSetSize + ok = true + } + + return +} + +// MustNewBaseReserve retrieves the NewBaseReserve value from the union, +// panicing if the value is not set. +func (u LedgerUpgrade) MustNewBaseReserve() Uint32 { + val, ok := u.GetNewBaseReserve() + + if !ok { + panic("arm NewBaseReserve is not set") + } + + return val +} + +// GetNewBaseReserve retrieves the NewBaseReserve value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerUpgrade) GetNewBaseReserve() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NewBaseReserve" { + result = *u.NewBaseReserve + ok = true + } + + return +} + +// MustNewFlags retrieves the NewFlags value from the union, +// panicing if the value is not set. +func (u LedgerUpgrade) MustNewFlags() Uint32 { + val, ok := u.GetNewFlags() + + if !ok { + panic("arm NewFlags is not set") + } + + return val +} + +// GetNewFlags retrieves the NewFlags value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerUpgrade) GetNewFlags() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "NewFlags" { + result = *u.NewFlags + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerUpgrade) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LedgerUpgradeType(u.Type) { + case LedgerUpgradeTypeLedgerUpgradeVersion: + if err = (*u.NewLedgerVersion).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerUpgradeTypeLedgerUpgradeBaseFee: + if err = (*u.NewBaseFee).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: + if err = (*u.NewMaxTxSetSize).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerUpgradeTypeLedgerUpgradeBaseReserve: + if err = (*u.NewBaseReserve).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerUpgradeTypeLedgerUpgradeFlags: + if err = (*u.NewFlags).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LedgerUpgradeType) switch value '%d' is not valid for union LedgerUpgrade", u.Type) +} + +var _ decoderFrom = (*LedgerUpgrade)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerUpgrade) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerUpgradeType: %s", err) + } + switch LedgerUpgradeType(u.Type) { + case LedgerUpgradeTypeLedgerUpgradeVersion: + u.NewLedgerVersion = new(Uint32) + nTmp, err = (*u.NewLedgerVersion).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + case LedgerUpgradeTypeLedgerUpgradeBaseFee: + u.NewBaseFee = new(Uint32) + nTmp, err = (*u.NewBaseFee).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: + u.NewMaxTxSetSize = new(Uint32) + nTmp, err = (*u.NewMaxTxSetSize).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + case LedgerUpgradeTypeLedgerUpgradeBaseReserve: + u.NewBaseReserve = new(Uint32) + nTmp, err = (*u.NewBaseReserve).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + case LedgerUpgradeTypeLedgerUpgradeFlags: + u.NewFlags = new(Uint32) + nTmp, err = (*u.NewFlags).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerUpgrade has invalid Type (LedgerUpgradeType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerUpgrade) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerUpgrade) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerUpgrade)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerUpgrade)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerUpgrade) xdrType() {} + +var _ xdrType = (*LedgerUpgrade)(nil) + +// BucketEntryType is an XDR Enum defines as: +// +// enum BucketEntryType +// { +// METAENTRY = +// -1, // At-and-after protocol 11: bucket metadata, should come first. +// LIVEENTRY = 0, // Before protocol 11: created-or-updated; +// // At-and-after protocol 11: only updated. +// DEADENTRY = 1, +// INITENTRY = 2 // At-and-after protocol 11: only created. +// }; +// +type BucketEntryType int32 + +const ( + BucketEntryTypeMetaentry BucketEntryType = -1 + BucketEntryTypeLiveentry BucketEntryType = 0 + BucketEntryTypeDeadentry BucketEntryType = 1 + BucketEntryTypeInitentry BucketEntryType = 2 +) + +var bucketEntryTypeMap = map[int32]string{ + -1: "BucketEntryTypeMetaentry", + 0: "BucketEntryTypeLiveentry", + 1: "BucketEntryTypeDeadentry", + 2: "BucketEntryTypeInitentry", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for BucketEntryType +func (e BucketEntryType) ValidEnum(v int32) bool { + _, ok := bucketEntryTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e BucketEntryType) String() string { + name, _ := bucketEntryTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e BucketEntryType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := bucketEntryTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid BucketEntryType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*BucketEntryType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *BucketEntryType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding BucketEntryType: %s", err) + } + if _, ok := bucketEntryTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid BucketEntryType enum value", v) + } + *e = BucketEntryType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BucketEntryType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BucketEntryType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BucketEntryType)(nil) + _ encoding.BinaryUnmarshaler = (*BucketEntryType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BucketEntryType) xdrType() {} + +var _ xdrType = (*BucketEntryType)(nil) + +// BucketMetadataExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type BucketMetadataExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u BucketMetadataExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of BucketMetadataExt +func (u BucketMetadataExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewBucketMetadataExt creates a new BucketMetadataExt. +func NewBucketMetadataExt(v int32, value interface{}) (result BucketMetadataExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u BucketMetadataExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union BucketMetadataExt", u.V) +} + +var _ decoderFrom = (*BucketMetadataExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *BucketMetadataExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union BucketMetadataExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BucketMetadataExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BucketMetadataExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BucketMetadataExt)(nil) + _ encoding.BinaryUnmarshaler = (*BucketMetadataExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BucketMetadataExt) xdrType() {} + +var _ xdrType = (*BucketMetadataExt)(nil) + +// BucketMetadata is an XDR Struct defines as: +// +// struct BucketMetadata +// { +// // Indicates the protocol version used to create / merge this bucket. +// uint32 ledgerVersion; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type BucketMetadata struct { + LedgerVersion Uint32 + Ext BucketMetadataExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *BucketMetadata) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerVersion.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*BucketMetadata)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *BucketMetadata) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerVersion.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BucketMetadataExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BucketMetadata) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BucketMetadata) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BucketMetadata)(nil) + _ encoding.BinaryUnmarshaler = (*BucketMetadata)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BucketMetadata) xdrType() {} + +var _ xdrType = (*BucketMetadata)(nil) + +// BucketEntry is an XDR Union defines as: +// +// union BucketEntry switch (BucketEntryType type) +// { +// case LIVEENTRY: +// case INITENTRY: +// LedgerEntry liveEntry; +// +// case DEADENTRY: +// LedgerKey deadEntry; +// case METAENTRY: +// BucketMetadata metaEntry; +// }; +// +type BucketEntry struct { + Type BucketEntryType + LiveEntry *LedgerEntry + DeadEntry *LedgerKey + MetaEntry *BucketMetadata +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u BucketEntry) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of BucketEntry +func (u BucketEntry) ArmForSwitch(sw int32) (string, bool) { + switch BucketEntryType(sw) { + case BucketEntryTypeLiveentry: + return "LiveEntry", true + case BucketEntryTypeInitentry: + return "LiveEntry", true + case BucketEntryTypeDeadentry: + return "DeadEntry", true + case BucketEntryTypeMetaentry: + return "MetaEntry", true + } + return "-", false +} + +// NewBucketEntry creates a new BucketEntry. +func NewBucketEntry(aType BucketEntryType, value interface{}) (result BucketEntry, err error) { + result.Type = aType + switch BucketEntryType(aType) { + case BucketEntryTypeLiveentry: + tv, ok := value.(LedgerEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntry") + return + } + result.LiveEntry = &tv + case BucketEntryTypeInitentry: + tv, ok := value.(LedgerEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntry") + return + } + result.LiveEntry = &tv + case BucketEntryTypeDeadentry: + tv, ok := value.(LedgerKey) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKey") + return + } + result.DeadEntry = &tv + case BucketEntryTypeMetaentry: + tv, ok := value.(BucketMetadata) + if !ok { + err = fmt.Errorf("invalid value, must be BucketMetadata") + return + } + result.MetaEntry = &tv + } + return +} + +// MustLiveEntry retrieves the LiveEntry value from the union, +// panicing if the value is not set. +func (u BucketEntry) MustLiveEntry() LedgerEntry { + val, ok := u.GetLiveEntry() + + if !ok { + panic("arm LiveEntry is not set") + } + + return val +} + +// GetLiveEntry retrieves the LiveEntry value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u BucketEntry) GetLiveEntry() (result LedgerEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiveEntry" { + result = *u.LiveEntry + ok = true + } + + return +} + +// MustDeadEntry retrieves the DeadEntry value from the union, +// panicing if the value is not set. +func (u BucketEntry) MustDeadEntry() LedgerKey { + val, ok := u.GetDeadEntry() + + if !ok { + panic("arm DeadEntry is not set") + } + + return val +} + +// GetDeadEntry retrieves the DeadEntry value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u BucketEntry) GetDeadEntry() (result LedgerKey, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "DeadEntry" { + result = *u.DeadEntry + ok = true + } + + return +} + +// MustMetaEntry retrieves the MetaEntry value from the union, +// panicing if the value is not set. +func (u BucketEntry) MustMetaEntry() BucketMetadata { + val, ok := u.GetMetaEntry() + + if !ok { + panic("arm MetaEntry is not set") + } + + return val +} + +// GetMetaEntry retrieves the MetaEntry value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u BucketEntry) GetMetaEntry() (result BucketMetadata, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "MetaEntry" { + result = *u.MetaEntry + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u BucketEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch BucketEntryType(u.Type) { + case BucketEntryTypeLiveentry: + if err = (*u.LiveEntry).EncodeTo(e); err != nil { + return err + } + return nil + case BucketEntryTypeInitentry: + if err = (*u.LiveEntry).EncodeTo(e); err != nil { + return err + } + return nil + case BucketEntryTypeDeadentry: + if err = (*u.DeadEntry).EncodeTo(e); err != nil { + return err + } + return nil + case BucketEntryTypeMetaentry: + if err = (*u.MetaEntry).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (BucketEntryType) switch value '%d' is not valid for union BucketEntry", u.Type) +} + +var _ decoderFrom = (*BucketEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *BucketEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BucketEntryType: %s", err) + } + switch BucketEntryType(u.Type) { + case BucketEntryTypeLiveentry: + u.LiveEntry = new(LedgerEntry) + nTmp, err = (*u.LiveEntry).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntry: %s", err) + } + return n, nil + case BucketEntryTypeInitentry: + u.LiveEntry = new(LedgerEntry) + nTmp, err = (*u.LiveEntry).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntry: %s", err) + } + return n, nil + case BucketEntryTypeDeadentry: + u.DeadEntry = new(LedgerKey) + nTmp, err = (*u.DeadEntry).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKey: %s", err) + } + return n, nil + case BucketEntryTypeMetaentry: + u.MetaEntry = new(BucketMetadata) + nTmp, err = (*u.MetaEntry).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BucketMetadata: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union BucketEntry has invalid Type (BucketEntryType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BucketEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BucketEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BucketEntry)(nil) + _ encoding.BinaryUnmarshaler = (*BucketEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BucketEntry) xdrType() {} + +var _ xdrType = (*BucketEntry)(nil) + +// TransactionSet is an XDR Struct defines as: +// +// struct TransactionSet +// { +// Hash previousLedgerHash; +// TransactionEnvelope txs<>; +// }; +// +type TransactionSet struct { + PreviousLedgerHash Hash + Txs []TransactionEnvelope +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionSet) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.PreviousLedgerHash.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Txs))); err != nil { + return err + } + for i := 0; i < len(s.Txs); i++ { + if err = s.Txs[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*TransactionSet)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionSet) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.PreviousLedgerHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionEnvelope: %s", err) + } + s.Txs = nil + if l > 0 { + s.Txs = make([]TransactionEnvelope, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Txs[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionEnvelope: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionSet) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionSet) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionSet)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionSet)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionSet) xdrType() {} + +var _ xdrType = (*TransactionSet)(nil) + +// TransactionResultPair is an XDR Struct defines as: +// +// struct TransactionResultPair +// { +// Hash transactionHash; +// TransactionResult result; // result for the transaction +// }; +// +type TransactionResultPair struct { + TransactionHash Hash + Result TransactionResult +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionResultPair) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.TransactionHash.EncodeTo(e); err != nil { + return err + } + if err = s.Result.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionResultPair)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionResultPair) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.TransactionHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.Result.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResult: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultPair) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultPair) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionResultPair)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultPair)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultPair) xdrType() {} + +var _ xdrType = (*TransactionResultPair)(nil) + +// TransactionResultSet is an XDR Struct defines as: +// +// struct TransactionResultSet +// { +// TransactionResultPair results<>; +// }; +// +type TransactionResultSet struct { + Results []TransactionResultPair +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionResultSet) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s.Results))); err != nil { + return err + } + for i := 0; i < len(s.Results); i++ { + if err = s.Results[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*TransactionResultSet)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionResultSet) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultPair: %s", err) + } + s.Results = nil + if l > 0 { + s.Results = make([]TransactionResultPair, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Results[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultPair: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultSet) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultSet) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionResultSet)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultSet)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultSet) xdrType() {} + +var _ xdrType = (*TransactionResultSet)(nil) + +// TransactionHistoryEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TransactionHistoryEntryExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionHistoryEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionHistoryEntryExt +func (u TransactionHistoryEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTransactionHistoryEntryExt creates a new TransactionHistoryEntryExt. +func NewTransactionHistoryEntryExt(v int32, value interface{}) (result TransactionHistoryEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionHistoryEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionHistoryEntryExt", u.V) +} + +var _ decoderFrom = (*TransactionHistoryEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionHistoryEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TransactionHistoryEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionHistoryEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionHistoryEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionHistoryEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionHistoryEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionHistoryEntryExt) xdrType() {} + +var _ xdrType = (*TransactionHistoryEntryExt)(nil) + +// TransactionHistoryEntry is an XDR Struct defines as: +// +// struct TransactionHistoryEntry +// { +// uint32 ledgerSeq; +// TransactionSet txSet; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type TransactionHistoryEntry struct { + LedgerSeq Uint32 + TxSet TransactionSet + Ext TransactionHistoryEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionHistoryEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerSeq.EncodeTo(e); err != nil { + return err + } + if err = s.TxSet.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionHistoryEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionHistoryEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.TxSet.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionSet: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionHistoryEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionHistoryEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionHistoryEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionHistoryEntry)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionHistoryEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionHistoryEntry) xdrType() {} + +var _ xdrType = (*TransactionHistoryEntry)(nil) + +// TransactionHistoryResultEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TransactionHistoryResultEntryExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionHistoryResultEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionHistoryResultEntryExt +func (u TransactionHistoryResultEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTransactionHistoryResultEntryExt creates a new TransactionHistoryResultEntryExt. +func NewTransactionHistoryResultEntryExt(v int32, value interface{}) (result TransactionHistoryResultEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionHistoryResultEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionHistoryResultEntryExt", u.V) +} + +var _ decoderFrom = (*TransactionHistoryResultEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionHistoryResultEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TransactionHistoryResultEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionHistoryResultEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionHistoryResultEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionHistoryResultEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionHistoryResultEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionHistoryResultEntryExt) xdrType() {} + +var _ xdrType = (*TransactionHistoryResultEntryExt)(nil) + +// TransactionHistoryResultEntry is an XDR Struct defines as: +// +// struct TransactionHistoryResultEntry +// { +// uint32 ledgerSeq; +// TransactionResultSet txResultSet; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type TransactionHistoryResultEntry struct { + LedgerSeq Uint32 + TxResultSet TransactionResultSet + Ext TransactionHistoryResultEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionHistoryResultEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerSeq.EncodeTo(e); err != nil { + return err + } + if err = s.TxResultSet.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionHistoryResultEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionHistoryResultEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.TxResultSet.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultSet: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionHistoryResultEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionHistoryResultEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionHistoryResultEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionHistoryResultEntry)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionHistoryResultEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionHistoryResultEntry) xdrType() {} + +var _ xdrType = (*TransactionHistoryResultEntry)(nil) + +// LedgerHeaderHistoryEntryExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type LedgerHeaderHistoryEntryExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerHeaderHistoryEntryExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerHeaderHistoryEntryExt +func (u LedgerHeaderHistoryEntryExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewLedgerHeaderHistoryEntryExt creates a new LedgerHeaderHistoryEntryExt. +func NewLedgerHeaderHistoryEntryExt(v int32, value interface{}) (result LedgerHeaderHistoryEntryExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerHeaderHistoryEntryExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerHeaderHistoryEntryExt", u.V) +} + +var _ decoderFrom = (*LedgerHeaderHistoryEntryExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerHeaderHistoryEntryExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union LedgerHeaderHistoryEntryExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderHistoryEntryExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderHistoryEntryExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderHistoryEntryExt)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderHistoryEntryExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderHistoryEntryExt) xdrType() {} + +var _ xdrType = (*LedgerHeaderHistoryEntryExt)(nil) + +// LedgerHeaderHistoryEntry is an XDR Struct defines as: +// +// struct LedgerHeaderHistoryEntry +// { +// Hash hash; +// LedgerHeader header; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type LedgerHeaderHistoryEntry struct { + Hash Hash + Header LedgerHeader + Ext LedgerHeaderHistoryEntryExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerHeaderHistoryEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Hash.EncodeTo(e); err != nil { + return err + } + if err = s.Header.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LedgerHeaderHistoryEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerHeaderHistoryEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Hash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.Header.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeader: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderHistoryEntryExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerHeaderHistoryEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerHeaderHistoryEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerHeaderHistoryEntry)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerHeaderHistoryEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerHeaderHistoryEntry) xdrType() {} + +var _ xdrType = (*LedgerHeaderHistoryEntry)(nil) + +// LedgerScpMessages is an XDR Struct defines as: +// +// struct LedgerSCPMessages +// { +// uint32 ledgerSeq; +// SCPEnvelope messages<>; +// }; +// +type LedgerScpMessages struct { + LedgerSeq Uint32 + Messages []ScpEnvelope +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerScpMessages) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerSeq.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Messages))); err != nil { + return err + } + for i := 0; i < len(s.Messages); i++ { + if err = s.Messages[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*LedgerScpMessages)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerScpMessages) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerSeq.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpEnvelope: %s", err) + } + s.Messages = nil + if l > 0 { + s.Messages = make([]ScpEnvelope, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Messages[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpEnvelope: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerScpMessages) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerScpMessages) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerScpMessages)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerScpMessages)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerScpMessages) xdrType() {} + +var _ xdrType = (*LedgerScpMessages)(nil) + +// ScpHistoryEntryV0 is an XDR Struct defines as: +// +// struct SCPHistoryEntryV0 +// { +// SCPQuorumSet quorumSets<>; // additional quorum sets used by ledgerMessages +// LedgerSCPMessages ledgerMessages; +// }; +// +type ScpHistoryEntryV0 struct { + QuorumSets []ScpQuorumSet + LedgerMessages LedgerScpMessages +} + +// EncodeTo encodes this value using the Encoder. +func (s *ScpHistoryEntryV0) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s.QuorumSets))); err != nil { + return err + } + for i := 0; i < len(s.QuorumSets); i++ { + if err = s.QuorumSets[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.LedgerMessages.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ScpHistoryEntryV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ScpHistoryEntryV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpQuorumSet: %s", err) + } + s.QuorumSets = nil + if l > 0 { + s.QuorumSets = make([]ScpQuorumSet, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.QuorumSets[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpQuorumSet: %s", err) + } + } + } + nTmp, err = s.LedgerMessages.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerScpMessages: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpHistoryEntryV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpHistoryEntryV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpHistoryEntryV0)(nil) + _ encoding.BinaryUnmarshaler = (*ScpHistoryEntryV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpHistoryEntryV0) xdrType() {} + +var _ xdrType = (*ScpHistoryEntryV0)(nil) + +// ScpHistoryEntry is an XDR Union defines as: +// +// union SCPHistoryEntry switch (int v) +// { +// case 0: +// SCPHistoryEntryV0 v0; +// }; +// +type ScpHistoryEntry struct { + V int32 + V0 *ScpHistoryEntryV0 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ScpHistoryEntry) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ScpHistoryEntry +func (u ScpHistoryEntry) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "V0", true + } + return "-", false +} + +// NewScpHistoryEntry creates a new ScpHistoryEntry. +func NewScpHistoryEntry(v int32, value interface{}) (result ScpHistoryEntry, err error) { + result.V = v + switch int32(v) { + case 0: + tv, ok := value.(ScpHistoryEntryV0) + if !ok { + err = fmt.Errorf("invalid value, must be ScpHistoryEntryV0") + return + } + result.V0 = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u ScpHistoryEntry) MustV0() ScpHistoryEntryV0 { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ScpHistoryEntry) GetV0() (result ScpHistoryEntryV0, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ScpHistoryEntry) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union ScpHistoryEntry", u.V) +} + +var _ decoderFrom = (*ScpHistoryEntry)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ScpHistoryEntry) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + u.V0 = new(ScpHistoryEntryV0) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpHistoryEntryV0: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ScpHistoryEntry has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ScpHistoryEntry) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ScpHistoryEntry) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ScpHistoryEntry)(nil) + _ encoding.BinaryUnmarshaler = (*ScpHistoryEntry)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ScpHistoryEntry) xdrType() {} + +var _ xdrType = (*ScpHistoryEntry)(nil) + +// LedgerEntryChangeType is an XDR Enum defines as: +// +// enum LedgerEntryChangeType +// { +// LEDGER_ENTRY_CREATED = 0, // entry was added to the ledger +// LEDGER_ENTRY_UPDATED = 1, // entry was modified in the ledger +// LEDGER_ENTRY_REMOVED = 2, // entry was removed from the ledger +// LEDGER_ENTRY_STATE = 3 // value of the entry +// }; +// +type LedgerEntryChangeType int32 + +const ( + LedgerEntryChangeTypeLedgerEntryCreated LedgerEntryChangeType = 0 + LedgerEntryChangeTypeLedgerEntryUpdated LedgerEntryChangeType = 1 + LedgerEntryChangeTypeLedgerEntryRemoved LedgerEntryChangeType = 2 + LedgerEntryChangeTypeLedgerEntryState LedgerEntryChangeType = 3 +) + +var ledgerEntryChangeTypeMap = map[int32]string{ + 0: "LedgerEntryChangeTypeLedgerEntryCreated", + 1: "LedgerEntryChangeTypeLedgerEntryUpdated", + 2: "LedgerEntryChangeTypeLedgerEntryRemoved", + 3: "LedgerEntryChangeTypeLedgerEntryState", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LedgerEntryChangeType +func (e LedgerEntryChangeType) ValidEnum(v int32) bool { + _, ok := ledgerEntryChangeTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e LedgerEntryChangeType) String() string { + name, _ := ledgerEntryChangeTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e LedgerEntryChangeType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := ledgerEntryChangeTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LedgerEntryChangeType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LedgerEntryChangeType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LedgerEntryChangeType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChangeType: %s", err) + } + if _, ok := ledgerEntryChangeTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LedgerEntryChangeType enum value", v) + } + *e = LedgerEntryChangeType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryChangeType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryChangeType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryChangeType)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryChangeType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryChangeType) xdrType() {} + +var _ xdrType = (*LedgerEntryChangeType)(nil) + +// LedgerEntryChange is an XDR Union defines as: +// +// union LedgerEntryChange switch (LedgerEntryChangeType type) +// { +// case LEDGER_ENTRY_CREATED: +// LedgerEntry created; +// case LEDGER_ENTRY_UPDATED: +// LedgerEntry updated; +// case LEDGER_ENTRY_REMOVED: +// LedgerKey removed; +// case LEDGER_ENTRY_STATE: +// LedgerEntry state; +// }; +// +type LedgerEntryChange struct { + Type LedgerEntryChangeType + Created *LedgerEntry + Updated *LedgerEntry + Removed *LedgerKey + State *LedgerEntry +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerEntryChange) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerEntryChange +func (u LedgerEntryChange) ArmForSwitch(sw int32) (string, bool) { + switch LedgerEntryChangeType(sw) { + case LedgerEntryChangeTypeLedgerEntryCreated: + return "Created", true + case LedgerEntryChangeTypeLedgerEntryUpdated: + return "Updated", true + case LedgerEntryChangeTypeLedgerEntryRemoved: + return "Removed", true + case LedgerEntryChangeTypeLedgerEntryState: + return "State", true + } + return "-", false +} + +// NewLedgerEntryChange creates a new LedgerEntryChange. +func NewLedgerEntryChange(aType LedgerEntryChangeType, value interface{}) (result LedgerEntryChange, err error) { + result.Type = aType + switch LedgerEntryChangeType(aType) { + case LedgerEntryChangeTypeLedgerEntryCreated: + tv, ok := value.(LedgerEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntry") + return + } + result.Created = &tv + case LedgerEntryChangeTypeLedgerEntryUpdated: + tv, ok := value.(LedgerEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntry") + return + } + result.Updated = &tv + case LedgerEntryChangeTypeLedgerEntryRemoved: + tv, ok := value.(LedgerKey) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKey") + return + } + result.Removed = &tv + case LedgerEntryChangeTypeLedgerEntryState: + tv, ok := value.(LedgerEntry) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerEntry") + return + } + result.State = &tv + } + return +} + +// MustCreated retrieves the Created value from the union, +// panicing if the value is not set. +func (u LedgerEntryChange) MustCreated() LedgerEntry { + val, ok := u.GetCreated() + + if !ok { + panic("arm Created is not set") + } + + return val +} + +// GetCreated retrieves the Created value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryChange) GetCreated() (result LedgerEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Created" { + result = *u.Created + ok = true + } + + return +} + +// MustUpdated retrieves the Updated value from the union, +// panicing if the value is not set. +func (u LedgerEntryChange) MustUpdated() LedgerEntry { + val, ok := u.GetUpdated() + + if !ok { + panic("arm Updated is not set") + } + + return val +} + +// GetUpdated retrieves the Updated value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryChange) GetUpdated() (result LedgerEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Updated" { + result = *u.Updated + ok = true + } + + return +} + +// MustRemoved retrieves the Removed value from the union, +// panicing if the value is not set. +func (u LedgerEntryChange) MustRemoved() LedgerKey { + val, ok := u.GetRemoved() + + if !ok { + panic("arm Removed is not set") + } + + return val +} + +// GetRemoved retrieves the Removed value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryChange) GetRemoved() (result LedgerKey, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Removed" { + result = *u.Removed + ok = true + } + + return +} + +// MustState retrieves the State value from the union, +// panicing if the value is not set. +func (u LedgerEntryChange) MustState() LedgerEntry { + val, ok := u.GetState() + + if !ok { + panic("arm State is not set") + } + + return val +} + +// GetState retrieves the State value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerEntryChange) GetState() (result LedgerEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "State" { + result = *u.State + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerEntryChange) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LedgerEntryChangeType(u.Type) { + case LedgerEntryChangeTypeLedgerEntryCreated: + if err = (*u.Created).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryChangeTypeLedgerEntryUpdated: + if err = (*u.Updated).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryChangeTypeLedgerEntryRemoved: + if err = (*u.Removed).EncodeTo(e); err != nil { + return err + } + return nil + case LedgerEntryChangeTypeLedgerEntryState: + if err = (*u.State).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LedgerEntryChangeType) switch value '%d' is not valid for union LedgerEntryChange", u.Type) +} + +var _ decoderFrom = (*LedgerEntryChange)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerEntryChange) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChangeType: %s", err) + } + switch LedgerEntryChangeType(u.Type) { + case LedgerEntryChangeTypeLedgerEntryCreated: + u.Created = new(LedgerEntry) + nTmp, err = (*u.Created).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntry: %s", err) + } + return n, nil + case LedgerEntryChangeTypeLedgerEntryUpdated: + u.Updated = new(LedgerEntry) + nTmp, err = (*u.Updated).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntry: %s", err) + } + return n, nil + case LedgerEntryChangeTypeLedgerEntryRemoved: + u.Removed = new(LedgerKey) + nTmp, err = (*u.Removed).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKey: %s", err) + } + return n, nil + case LedgerEntryChangeTypeLedgerEntryState: + u.State = new(LedgerEntry) + nTmp, err = (*u.State).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntry: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerEntryChange has invalid Type (LedgerEntryChangeType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryChange) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryChange) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryChange)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryChange)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryChange) xdrType() {} + +var _ xdrType = (*LedgerEntryChange)(nil) + +// LedgerEntryChanges is an XDR Typedef defines as: +// +// typedef LedgerEntryChange LedgerEntryChanges<>; +// +type LedgerEntryChanges []LedgerEntryChange + +// EncodeTo encodes this value using the Encoder. +func (s LedgerEntryChanges) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s))); err != nil { + return err + } + for i := 0; i < len(s); i++ { + if err = s[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*LedgerEntryChanges)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerEntryChanges) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChange: %s", err) + } + (*s) = nil + if l > 0 { + (*s) = make([]LedgerEntryChange, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*s)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChange: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerEntryChanges) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerEntryChanges) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerEntryChanges)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerEntryChanges)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerEntryChanges) xdrType() {} + +var _ xdrType = (*LedgerEntryChanges)(nil) + +// OperationMeta is an XDR Struct defines as: +// +// struct OperationMeta +// { +// LedgerEntryChanges changes; +// }; +// +type OperationMeta struct { + Changes LedgerEntryChanges +} + +// EncodeTo encodes this value using the Encoder. +func (s *OperationMeta) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Changes.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*OperationMeta)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *OperationMeta) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Changes.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationMeta) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationMeta) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OperationMeta)(nil) + _ encoding.BinaryUnmarshaler = (*OperationMeta)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationMeta) xdrType() {} + +var _ xdrType = (*OperationMeta)(nil) + +// TransactionMetaV1 is an XDR Struct defines as: +// +// struct TransactionMetaV1 +// { +// LedgerEntryChanges txChanges; // tx level changes if any +// OperationMeta operations<>; // meta for each operation +// }; +// +type TransactionMetaV1 struct { + TxChanges LedgerEntryChanges + Operations []OperationMeta +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionMetaV1) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.TxChanges.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Operations))); err != nil { + return err + } + for i := 0; i < len(s.Operations); i++ { + if err = s.Operations[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*TransactionMetaV1)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionMetaV1) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.TxChanges.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + s.Operations = nil + if l > 0 { + s.Operations = make([]OperationMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Operations[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionMetaV1) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionMetaV1) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionMetaV1)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionMetaV1)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionMetaV1) xdrType() {} + +var _ xdrType = (*TransactionMetaV1)(nil) + +// TransactionMetaV2 is an XDR Struct defines as: +// +// struct TransactionMetaV2 +// { +// LedgerEntryChanges txChangesBefore; // tx level changes before operations +// // are applied if any +// OperationMeta operations<>; // meta for each operation +// LedgerEntryChanges txChangesAfter; // tx level changes after operations are +// // applied if any +// }; +// +type TransactionMetaV2 struct { + TxChangesBefore LedgerEntryChanges + Operations []OperationMeta + TxChangesAfter LedgerEntryChanges +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionMetaV2) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.TxChangesBefore.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Operations))); err != nil { + return err + } + for i := 0; i < len(s.Operations); i++ { + if err = s.Operations[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.TxChangesAfter.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionMetaV2)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionMetaV2) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.TxChangesBefore.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + s.Operations = nil + if l > 0 { + s.Operations = make([]OperationMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Operations[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + } + } + nTmp, err = s.TxChangesAfter.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionMetaV2) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionMetaV2) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionMetaV2)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionMetaV2)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionMetaV2) xdrType() {} + +var _ xdrType = (*TransactionMetaV2)(nil) + +// TransactionMeta is an XDR Union defines as: +// +// union TransactionMeta switch (int v) +// { +// case 0: +// OperationMeta operations<>; +// case 1: +// TransactionMetaV1 v1; +// case 2: +// TransactionMetaV2 v2; +// }; +// +type TransactionMeta struct { + V int32 + Operations *[]OperationMeta + V1 *TransactionMetaV1 + V2 *TransactionMetaV2 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionMeta) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionMeta +func (u TransactionMeta) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "Operations", true + case 1: + return "V1", true + case 2: + return "V2", true + } + return "-", false +} + +// NewTransactionMeta creates a new TransactionMeta. +func NewTransactionMeta(v int32, value interface{}) (result TransactionMeta, err error) { + result.V = v + switch int32(v) { + case 0: + tv, ok := value.([]OperationMeta) + if !ok { + err = fmt.Errorf("invalid value, must be []OperationMeta") + return + } + result.Operations = &tv + case 1: + tv, ok := value.(TransactionMetaV1) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionMetaV1") + return + } + result.V1 = &tv + case 2: + tv, ok := value.(TransactionMetaV2) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionMetaV2") + return + } + result.V2 = &tv + } + return +} + +// MustOperations retrieves the Operations value from the union, +// panicing if the value is not set. +func (u TransactionMeta) MustOperations() []OperationMeta { + val, ok := u.GetOperations() + + if !ok { + panic("arm Operations is not set") + } + + return val +} + +// GetOperations retrieves the Operations value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionMeta) GetOperations() (result []OperationMeta, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "Operations" { + result = *u.Operations + ok = true + } + + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u TransactionMeta) MustV1() TransactionMetaV1 { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionMeta) GetV1() (result TransactionMetaV1, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// MustV2 retrieves the V2 value from the union, +// panicing if the value is not set. +func (u TransactionMeta) MustV2() TransactionMetaV2 { + val, ok := u.GetV2() + + if !ok { + panic("arm V2 is not set") + } + + return val +} + +// GetV2 retrieves the V2 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionMeta) GetV2() (result TransactionMetaV2, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V2" { + result = *u.V2 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionMeta) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + if _, err = e.EncodeUint(uint32(len((*u.Operations)))); err != nil { + return err + } + for i := 0; i < len((*u.Operations)); i++ { + if err = (*u.Operations)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case 1: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + case 2: + if err = (*u.V2).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionMeta", u.V) +} + +var _ decoderFrom = (*TransactionMeta)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionMeta) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + u.Operations = new([]OperationMeta) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + (*u.Operations) = nil + if l > 0 { + (*u.Operations) = make([]OperationMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Operations)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationMeta: %s", err) + } + } + } + return n, nil + case 1: + u.V1 = new(TransactionMetaV1) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionMetaV1: %s", err) + } + return n, nil + case 2: + u.V2 = new(TransactionMetaV2) + nTmp, err = (*u.V2).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionMetaV2: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TransactionMeta has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionMeta) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionMeta) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionMeta)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionMeta)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionMeta) xdrType() {} + +var _ xdrType = (*TransactionMeta)(nil) + +// TransactionResultMeta is an XDR Struct defines as: +// +// struct TransactionResultMeta +// { +// TransactionResultPair result; +// LedgerEntryChanges feeProcessing; +// TransactionMeta txApplyProcessing; +// }; +// +type TransactionResultMeta struct { + Result TransactionResultPair + FeeProcessing LedgerEntryChanges + TxApplyProcessing TransactionMeta +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionResultMeta) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Result.EncodeTo(e); err != nil { + return err + } + if err = s.FeeProcessing.EncodeTo(e); err != nil { + return err + } + if err = s.TxApplyProcessing.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionResultMeta)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionResultMeta) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Result.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultPair: %s", err) + } + nTmp, err = s.FeeProcessing.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + nTmp, err = s.TxApplyProcessing.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionMeta: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultMeta) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultMeta) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionResultMeta)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultMeta)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultMeta) xdrType() {} + +var _ xdrType = (*TransactionResultMeta)(nil) + +// UpgradeEntryMeta is an XDR Struct defines as: +// +// struct UpgradeEntryMeta +// { +// LedgerUpgrade upgrade; +// LedgerEntryChanges changes; +// }; +// +type UpgradeEntryMeta struct { + Upgrade LedgerUpgrade + Changes LedgerEntryChanges +} + +// EncodeTo encodes this value using the Encoder. +func (s *UpgradeEntryMeta) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Upgrade.EncodeTo(e); err != nil { + return err + } + if err = s.Changes.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*UpgradeEntryMeta)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *UpgradeEntryMeta) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Upgrade.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerUpgrade: %s", err) + } + nTmp, err = s.Changes.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerEntryChanges: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s UpgradeEntryMeta) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *UpgradeEntryMeta) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*UpgradeEntryMeta)(nil) + _ encoding.BinaryUnmarshaler = (*UpgradeEntryMeta)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s UpgradeEntryMeta) xdrType() {} + +var _ xdrType = (*UpgradeEntryMeta)(nil) + +// LedgerCloseMetaV0 is an XDR Struct defines as: +// +// struct LedgerCloseMetaV0 +// { +// LedgerHeaderHistoryEntry ledgerHeader; +// // NB: txSet is sorted in "Hash order" +// TransactionSet txSet; +// +// // NB: transactions are sorted in apply order here +// // fees for all transactions are processed first +// // followed by applying transactions +// TransactionResultMeta txProcessing<>; +// +// // upgrades are applied last +// UpgradeEntryMeta upgradesProcessing<>; +// +// // other misc information attached to the ledger close +// SCPHistoryEntry scpInfo<>; +// }; +// +type LedgerCloseMetaV0 struct { + LedgerHeader LedgerHeaderHistoryEntry + TxSet TransactionSet + TxProcessing []TransactionResultMeta + UpgradesProcessing []UpgradeEntryMeta + ScpInfo []ScpHistoryEntry +} + +// EncodeTo encodes this value using the Encoder. +func (s *LedgerCloseMetaV0) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerHeader.EncodeTo(e); err != nil { + return err + } + if err = s.TxSet.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.TxProcessing))); err != nil { + return err + } + for i := 0; i < len(s.TxProcessing); i++ { + if err = s.TxProcessing[i].EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeUint(uint32(len(s.UpgradesProcessing))); err != nil { + return err + } + for i := 0; i < len(s.UpgradesProcessing); i++ { + if err = s.UpgradesProcessing[i].EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeUint(uint32(len(s.ScpInfo))); err != nil { + return err + } + for i := 0; i < len(s.ScpInfo); i++ { + if err = s.ScpInfo[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*LedgerCloseMetaV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LedgerCloseMetaV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerHeader.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerHeaderHistoryEntry: %s", err) + } + nTmp, err = s.TxSet.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionSet: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultMeta: %s", err) + } + s.TxProcessing = nil + if l > 0 { + s.TxProcessing = make([]TransactionResultMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.TxProcessing[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultMeta: %s", err) + } + } + } + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding UpgradeEntryMeta: %s", err) + } + s.UpgradesProcessing = nil + if l > 0 { + s.UpgradesProcessing = make([]UpgradeEntryMeta, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.UpgradesProcessing[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding UpgradeEntryMeta: %s", err) + } + } + } + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpHistoryEntry: %s", err) + } + s.ScpInfo = nil + if l > 0 { + s.ScpInfo = make([]ScpHistoryEntry, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.ScpInfo[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpHistoryEntry: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerCloseMetaV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerCloseMetaV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerCloseMetaV0)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerCloseMetaV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerCloseMetaV0) xdrType() {} + +var _ xdrType = (*LedgerCloseMetaV0)(nil) + +// LedgerCloseMeta is an XDR Union defines as: +// +// union LedgerCloseMeta switch (int v) +// { +// case 0: +// LedgerCloseMetaV0 v0; +// }; +// +type LedgerCloseMeta struct { + V int32 + V0 *LedgerCloseMetaV0 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LedgerCloseMeta) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LedgerCloseMeta +func (u LedgerCloseMeta) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "V0", true + } + return "-", false +} + +// NewLedgerCloseMeta creates a new LedgerCloseMeta. +func NewLedgerCloseMeta(v int32, value interface{}) (result LedgerCloseMeta, err error) { + result.V = v + switch int32(v) { + case 0: + tv, ok := value.(LedgerCloseMetaV0) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerCloseMetaV0") + return + } + result.V0 = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u LedgerCloseMeta) MustV0() LedgerCloseMetaV0 { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LedgerCloseMeta) GetV0() (result LedgerCloseMetaV0, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LedgerCloseMeta) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union LedgerCloseMeta", u.V) +} + +var _ decoderFrom = (*LedgerCloseMeta)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LedgerCloseMeta) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + u.V0 = new(LedgerCloseMetaV0) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerCloseMetaV0: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LedgerCloseMeta has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LedgerCloseMeta) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LedgerCloseMeta) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LedgerCloseMeta)(nil) + _ encoding.BinaryUnmarshaler = (*LedgerCloseMeta)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LedgerCloseMeta) xdrType() {} + +var _ xdrType = (*LedgerCloseMeta)(nil) + +// ErrorCode is an XDR Enum defines as: +// +// enum ErrorCode +// { +// ERR_MISC = 0, // Unspecific error +// ERR_DATA = 1, // Malformed data +// ERR_CONF = 2, // Misconfiguration error +// ERR_AUTH = 3, // Authentication failure +// ERR_LOAD = 4 // System overloaded +// }; +// +type ErrorCode int32 + +const ( + ErrorCodeErrMisc ErrorCode = 0 + ErrorCodeErrData ErrorCode = 1 + ErrorCodeErrConf ErrorCode = 2 + ErrorCodeErrAuth ErrorCode = 3 + ErrorCodeErrLoad ErrorCode = 4 +) + +var errorCodeMap = map[int32]string{ + 0: "ErrorCodeErrMisc", + 1: "ErrorCodeErrData", + 2: "ErrorCodeErrConf", + 3: "ErrorCodeErrAuth", + 4: "ErrorCodeErrLoad", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ErrorCode +func (e ErrorCode) ValidEnum(v int32) bool { + _, ok := errorCodeMap[v] + return ok +} + +// String returns the name of `e` +func (e ErrorCode) String() string { + name, _ := errorCodeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e ErrorCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := errorCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ErrorCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ErrorCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ErrorCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ErrorCode: %s", err) + } + if _, ok := errorCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ErrorCode enum value", v) + } + *e = ErrorCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ErrorCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ErrorCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ErrorCode)(nil) + _ encoding.BinaryUnmarshaler = (*ErrorCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ErrorCode) xdrType() {} + +var _ xdrType = (*ErrorCode)(nil) + +// Error is an XDR Struct defines as: +// +// struct Error +// { +// ErrorCode code; +// string msg<100>; +// }; +// +type Error struct { + Code ErrorCode + Msg string `xdrmaxsize:"100"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *Error) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Code.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeString(string(s.Msg)); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Error)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Error) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ErrorCode: %s", err) + } + s.Msg, nTmp, err = d.DecodeString(100) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Msg: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Error) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Error) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Error)(nil) + _ encoding.BinaryUnmarshaler = (*Error)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Error) xdrType() {} + +var _ xdrType = (*Error)(nil) + +// AuthCert is an XDR Struct defines as: +// +// struct AuthCert +// { +// Curve25519Public pubkey; +// uint64 expiration; +// Signature sig; +// }; +// +type AuthCert struct { + Pubkey Curve25519Public + Expiration Uint64 + Sig Signature +} + +// EncodeTo encodes this value using the Encoder. +func (s *AuthCert) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Pubkey.EncodeTo(e); err != nil { + return err + } + if err = s.Expiration.EncodeTo(e); err != nil { + return err + } + if err = s.Sig.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AuthCert)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AuthCert) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Pubkey.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Curve25519Public: %s", err) + } + nTmp, err = s.Expiration.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.Sig.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AuthCert) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AuthCert) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AuthCert)(nil) + _ encoding.BinaryUnmarshaler = (*AuthCert)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AuthCert) xdrType() {} + +var _ xdrType = (*AuthCert)(nil) + +// Hello is an XDR Struct defines as: +// +// struct Hello +// { +// uint32 ledgerVersion; +// uint32 overlayVersion; +// uint32 overlayMinVersion; +// Hash networkID; +// string versionStr<100>; +// int listeningPort; +// NodeID peerID; +// AuthCert cert; +// uint256 nonce; +// }; +// +type Hello struct { + LedgerVersion Uint32 + OverlayVersion Uint32 + OverlayMinVersion Uint32 + NetworkId Hash + VersionStr string `xdrmaxsize:"100"` + ListeningPort int32 + PeerId NodeId + Cert AuthCert + Nonce Uint256 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Hello) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LedgerVersion.EncodeTo(e); err != nil { + return err + } + if err = s.OverlayVersion.EncodeTo(e); err != nil { + return err + } + if err = s.OverlayMinVersion.EncodeTo(e); err != nil { + return err + } + if err = s.NetworkId.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeString(string(s.VersionStr)); err != nil { + return err + } + if _, err = e.EncodeInt(int32(s.ListeningPort)); err != nil { + return err + } + if err = s.PeerId.EncodeTo(e); err != nil { + return err + } + if err = s.Cert.EncodeTo(e); err != nil { + return err + } + if err = s.Nonce.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Hello)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Hello) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LedgerVersion.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.OverlayVersion.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.OverlayMinVersion.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NetworkId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + s.VersionStr, nTmp, err = d.DecodeString(100) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding VersionStr: %s", err) + } + s.ListeningPort, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + nTmp, err = s.PeerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.Cert.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AuthCert: %s", err) + } + nTmp, err = s.Nonce.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Hello) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Hello) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Hello)(nil) + _ encoding.BinaryUnmarshaler = (*Hello)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Hello) xdrType() {} + +var _ xdrType = (*Hello)(nil) + +// Auth is an XDR Struct defines as: +// +// struct Auth +// { +// // Empty message, just to confirm +// // establishment of MAC keys. +// int unused; +// }; +// +type Auth struct { + Unused int32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Auth) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(s.Unused)); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Auth)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Auth) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + s.Unused, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Auth) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Auth) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Auth)(nil) + _ encoding.BinaryUnmarshaler = (*Auth)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Auth) xdrType() {} + +var _ xdrType = (*Auth)(nil) + +// IpAddrType is an XDR Enum defines as: +// +// enum IPAddrType +// { +// IPv4 = 0, +// IPv6 = 1 +// }; +// +type IpAddrType int32 + +const ( + IpAddrTypeIPv4 IpAddrType = 0 + IpAddrTypeIPv6 IpAddrType = 1 +) + +var ipAddrTypeMap = map[int32]string{ + 0: "IpAddrTypeIPv4", + 1: "IpAddrTypeIPv6", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for IpAddrType +func (e IpAddrType) ValidEnum(v int32) bool { + _, ok := ipAddrTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e IpAddrType) String() string { + name, _ := ipAddrTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e IpAddrType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := ipAddrTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid IpAddrType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*IpAddrType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *IpAddrType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding IpAddrType: %s", err) + } + if _, ok := ipAddrTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid IpAddrType enum value", v) + } + *e = IpAddrType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s IpAddrType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *IpAddrType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*IpAddrType)(nil) + _ encoding.BinaryUnmarshaler = (*IpAddrType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s IpAddrType) xdrType() {} + +var _ xdrType = (*IpAddrType)(nil) + +// PeerAddressIp is an XDR NestedUnion defines as: +// +// union switch (IPAddrType type) +// { +// case IPv4: +// opaque ipv4[4]; +// case IPv6: +// opaque ipv6[16]; +// } +// +type PeerAddressIp struct { + Type IpAddrType + Ipv4 *[4]byte `xdrmaxsize:"4"` + Ipv6 *[16]byte `xdrmaxsize:"16"` +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u PeerAddressIp) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PeerAddressIp +func (u PeerAddressIp) ArmForSwitch(sw int32) (string, bool) { + switch IpAddrType(sw) { + case IpAddrTypeIPv4: + return "Ipv4", true + case IpAddrTypeIPv6: + return "Ipv6", true + } + return "-", false +} + +// NewPeerAddressIp creates a new PeerAddressIp. +func NewPeerAddressIp(aType IpAddrType, value interface{}) (result PeerAddressIp, err error) { + result.Type = aType + switch IpAddrType(aType) { + case IpAddrTypeIPv4: + tv, ok := value.([4]byte) + if !ok { + err = fmt.Errorf("invalid value, must be [4]byte") + return + } + result.Ipv4 = &tv + case IpAddrTypeIPv6: + tv, ok := value.([16]byte) + if !ok { + err = fmt.Errorf("invalid value, must be [16]byte") + return + } + result.Ipv6 = &tv + } + return +} + +// MustIpv4 retrieves the Ipv4 value from the union, +// panicing if the value is not set. +func (u PeerAddressIp) MustIpv4() [4]byte { + val, ok := u.GetIpv4() + + if !ok { + panic("arm Ipv4 is not set") + } + + return val +} + +// GetIpv4 retrieves the Ipv4 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u PeerAddressIp) GetIpv4() (result [4]byte, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Ipv4" { + result = *u.Ipv4 + ok = true + } + + return +} + +// MustIpv6 retrieves the Ipv6 value from the union, +// panicing if the value is not set. +func (u PeerAddressIp) MustIpv6() [16]byte { + val, ok := u.GetIpv6() + + if !ok { + panic("arm Ipv6 is not set") + } + + return val +} + +// GetIpv6 retrieves the Ipv6 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u PeerAddressIp) GetIpv6() (result [16]byte, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Ipv6" { + result = *u.Ipv6 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u PeerAddressIp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch IpAddrType(u.Type) { + case IpAddrTypeIPv4: + if _, err = e.EncodeFixedOpaque((*u.Ipv4)[:]); err != nil { + return err + } + return nil + case IpAddrTypeIPv6: + if _, err = e.EncodeFixedOpaque((*u.Ipv6)[:]); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (IpAddrType) switch value '%d' is not valid for union PeerAddressIp", u.Type) +} + +var _ decoderFrom = (*PeerAddressIp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *PeerAddressIp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding IpAddrType: %s", err) + } + switch IpAddrType(u.Type) { + case IpAddrTypeIPv4: + u.Ipv4 = new([4]byte) + nTmp, err = d.DecodeFixedOpaqueInplace((*u.Ipv4)[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Ipv4: %s", err) + } + return n, nil + case IpAddrTypeIPv6: + u.Ipv6 = new([16]byte) + nTmp, err = d.DecodeFixedOpaqueInplace((*u.Ipv6)[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Ipv6: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union PeerAddressIp has invalid Type (IpAddrType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PeerAddressIp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PeerAddressIp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PeerAddressIp)(nil) + _ encoding.BinaryUnmarshaler = (*PeerAddressIp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PeerAddressIp) xdrType() {} + +var _ xdrType = (*PeerAddressIp)(nil) + +// PeerAddress is an XDR Struct defines as: +// +// struct PeerAddress +// { +// union switch (IPAddrType type) +// { +// case IPv4: +// opaque ipv4[4]; +// case IPv6: +// opaque ipv6[16]; +// } +// ip; +// uint32 port; +// uint32 numFailures; +// }; +// +type PeerAddress struct { + Ip PeerAddressIp + Port Uint32 + NumFailures Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *PeerAddress) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Ip.EncodeTo(e); err != nil { + return err + } + if err = s.Port.EncodeTo(e); err != nil { + return err + } + if err = s.NumFailures.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*PeerAddress)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PeerAddress) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Ip.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerAddressIp: %s", err) + } + nTmp, err = s.Port.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.NumFailures.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PeerAddress) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PeerAddress) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PeerAddress)(nil) + _ encoding.BinaryUnmarshaler = (*PeerAddress)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PeerAddress) xdrType() {} + +var _ xdrType = (*PeerAddress)(nil) + +// MessageType is an XDR Enum defines as: +// +// enum MessageType +// { +// ERROR_MSG = 0, +// AUTH = 2, +// DONT_HAVE = 3, +// +// GET_PEERS = 4, // gets a list of peers this guy knows about +// PEERS = 5, +// +// GET_TX_SET = 6, // gets a particular txset by hash +// TX_SET = 7, +// +// TRANSACTION = 8, // pass on a tx you have heard about +// +// // SCP +// GET_SCP_QUORUMSET = 9, +// SCP_QUORUMSET = 10, +// SCP_MESSAGE = 11, +// GET_SCP_STATE = 12, +// +// // new messages +// HELLO = 13, +// +// SURVEY_REQUEST = 14, +// SURVEY_RESPONSE = 15 +// }; +// +type MessageType int32 + +const ( + MessageTypeErrorMsg MessageType = 0 + MessageTypeAuth MessageType = 2 + MessageTypeDontHave MessageType = 3 + MessageTypeGetPeers MessageType = 4 + MessageTypePeers MessageType = 5 + MessageTypeGetTxSet MessageType = 6 + MessageTypeTxSet MessageType = 7 + MessageTypeTransaction MessageType = 8 + MessageTypeGetScpQuorumset MessageType = 9 + MessageTypeScpQuorumset MessageType = 10 + MessageTypeScpMessage MessageType = 11 + MessageTypeGetScpState MessageType = 12 + MessageTypeHello MessageType = 13 + MessageTypeSurveyRequest MessageType = 14 + MessageTypeSurveyResponse MessageType = 15 +) + +var messageTypeMap = map[int32]string{ + 0: "MessageTypeErrorMsg", + 2: "MessageTypeAuth", + 3: "MessageTypeDontHave", + 4: "MessageTypeGetPeers", + 5: "MessageTypePeers", + 6: "MessageTypeGetTxSet", + 7: "MessageTypeTxSet", + 8: "MessageTypeTransaction", + 9: "MessageTypeGetScpQuorumset", + 10: "MessageTypeScpQuorumset", + 11: "MessageTypeScpMessage", + 12: "MessageTypeGetScpState", + 13: "MessageTypeHello", + 14: "MessageTypeSurveyRequest", + 15: "MessageTypeSurveyResponse", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for MessageType +func (e MessageType) ValidEnum(v int32) bool { + _, ok := messageTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e MessageType) String() string { + name, _ := messageTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e MessageType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := messageTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid MessageType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*MessageType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *MessageType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding MessageType: %s", err) + } + if _, ok := messageTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid MessageType enum value", v) + } + *e = MessageType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s MessageType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *MessageType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*MessageType)(nil) + _ encoding.BinaryUnmarshaler = (*MessageType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s MessageType) xdrType() {} + +var _ xdrType = (*MessageType)(nil) + +// DontHave is an XDR Struct defines as: +// +// struct DontHave +// { +// MessageType type; +// uint256 reqHash; +// }; +// +type DontHave struct { + Type MessageType + ReqHash Uint256 +} + +// EncodeTo encodes this value using the Encoder. +func (s *DontHave) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Type.EncodeTo(e); err != nil { + return err + } + if err = s.ReqHash.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*DontHave)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *DontHave) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MessageType: %s", err) + } + nTmp, err = s.ReqHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s DontHave) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *DontHave) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*DontHave)(nil) + _ encoding.BinaryUnmarshaler = (*DontHave)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s DontHave) xdrType() {} + +var _ xdrType = (*DontHave)(nil) + +// SurveyMessageCommandType is an XDR Enum defines as: +// +// enum SurveyMessageCommandType +// { +// SURVEY_TOPOLOGY = 0 +// }; +// +type SurveyMessageCommandType int32 + +const ( + SurveyMessageCommandTypeSurveyTopology SurveyMessageCommandType = 0 +) + +var surveyMessageCommandTypeMap = map[int32]string{ + 0: "SurveyMessageCommandTypeSurveyTopology", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for SurveyMessageCommandType +func (e SurveyMessageCommandType) ValidEnum(v int32) bool { + _, ok := surveyMessageCommandTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e SurveyMessageCommandType) String() string { + name, _ := surveyMessageCommandTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e SurveyMessageCommandType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := surveyMessageCommandTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid SurveyMessageCommandType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*SurveyMessageCommandType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *SurveyMessageCommandType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding SurveyMessageCommandType: %s", err) + } + if _, ok := surveyMessageCommandTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid SurveyMessageCommandType enum value", v) + } + *e = SurveyMessageCommandType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SurveyMessageCommandType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SurveyMessageCommandType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SurveyMessageCommandType)(nil) + _ encoding.BinaryUnmarshaler = (*SurveyMessageCommandType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SurveyMessageCommandType) xdrType() {} + +var _ xdrType = (*SurveyMessageCommandType)(nil) + +// SurveyRequestMessage is an XDR Struct defines as: +// +// struct SurveyRequestMessage +// { +// NodeID surveyorPeerID; +// NodeID surveyedPeerID; +// uint32 ledgerNum; +// Curve25519Public encryptionKey; +// SurveyMessageCommandType commandType; +// }; +// +type SurveyRequestMessage struct { + SurveyorPeerId NodeId + SurveyedPeerId NodeId + LedgerNum Uint32 + EncryptionKey Curve25519Public + CommandType SurveyMessageCommandType +} + +// EncodeTo encodes this value using the Encoder. +func (s *SurveyRequestMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SurveyorPeerId.EncodeTo(e); err != nil { + return err + } + if err = s.SurveyedPeerId.EncodeTo(e); err != nil { + return err + } + if err = s.LedgerNum.EncodeTo(e); err != nil { + return err + } + if err = s.EncryptionKey.EncodeTo(e); err != nil { + return err + } + if err = s.CommandType.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SurveyRequestMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SurveyRequestMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SurveyorPeerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.SurveyedPeerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.LedgerNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.EncryptionKey.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Curve25519Public: %s", err) + } + nTmp, err = s.CommandType.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SurveyMessageCommandType: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SurveyRequestMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SurveyRequestMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SurveyRequestMessage)(nil) + _ encoding.BinaryUnmarshaler = (*SurveyRequestMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SurveyRequestMessage) xdrType() {} + +var _ xdrType = (*SurveyRequestMessage)(nil) + +// SignedSurveyRequestMessage is an XDR Struct defines as: +// +// struct SignedSurveyRequestMessage +// { +// Signature requestSignature; +// SurveyRequestMessage request; +// }; +// +type SignedSurveyRequestMessage struct { + RequestSignature Signature + Request SurveyRequestMessage +} + +// EncodeTo encodes this value using the Encoder. +func (s *SignedSurveyRequestMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.RequestSignature.EncodeTo(e); err != nil { + return err + } + if err = s.Request.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SignedSurveyRequestMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SignedSurveyRequestMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.RequestSignature.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + nTmp, err = s.Request.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SurveyRequestMessage: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SignedSurveyRequestMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SignedSurveyRequestMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SignedSurveyRequestMessage)(nil) + _ encoding.BinaryUnmarshaler = (*SignedSurveyRequestMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SignedSurveyRequestMessage) xdrType() {} + +var _ xdrType = (*SignedSurveyRequestMessage)(nil) + +// EncryptedBody is an XDR Typedef defines as: +// +// typedef opaque EncryptedBody<64000>; +// +type EncryptedBody []byte + +// XDRMaxSize implements the Sized interface for EncryptedBody +func (e EncryptedBody) XDRMaxSize() int { + return 64000 +} + +// EncodeTo encodes this value using the Encoder. +func (s EncryptedBody) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeOpaque(s[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*EncryptedBody)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *EncryptedBody) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + (*s), nTmp, err = d.DecodeOpaque(64000) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EncryptedBody: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s EncryptedBody) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *EncryptedBody) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*EncryptedBody)(nil) + _ encoding.BinaryUnmarshaler = (*EncryptedBody)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s EncryptedBody) xdrType() {} + +var _ xdrType = (*EncryptedBody)(nil) + +// SurveyResponseMessage is an XDR Struct defines as: +// +// struct SurveyResponseMessage +// { +// NodeID surveyorPeerID; +// NodeID surveyedPeerID; +// uint32 ledgerNum; +// SurveyMessageCommandType commandType; +// EncryptedBody encryptedBody; +// }; +// +type SurveyResponseMessage struct { + SurveyorPeerId NodeId + SurveyedPeerId NodeId + LedgerNum Uint32 + CommandType SurveyMessageCommandType + EncryptedBody EncryptedBody +} + +// EncodeTo encodes this value using the Encoder. +func (s *SurveyResponseMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SurveyorPeerId.EncodeTo(e); err != nil { + return err + } + if err = s.SurveyedPeerId.EncodeTo(e); err != nil { + return err + } + if err = s.LedgerNum.EncodeTo(e); err != nil { + return err + } + if err = s.CommandType.EncodeTo(e); err != nil { + return err + } + if err = s.EncryptedBody.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SurveyResponseMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SurveyResponseMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SurveyorPeerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.SurveyedPeerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + nTmp, err = s.LedgerNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.CommandType.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SurveyMessageCommandType: %s", err) + } + nTmp, err = s.EncryptedBody.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EncryptedBody: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SurveyResponseMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SurveyResponseMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SurveyResponseMessage)(nil) + _ encoding.BinaryUnmarshaler = (*SurveyResponseMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SurveyResponseMessage) xdrType() {} + +var _ xdrType = (*SurveyResponseMessage)(nil) + +// SignedSurveyResponseMessage is an XDR Struct defines as: +// +// struct SignedSurveyResponseMessage +// { +// Signature responseSignature; +// SurveyResponseMessage response; +// }; +// +type SignedSurveyResponseMessage struct { + ResponseSignature Signature + Response SurveyResponseMessage +} + +// EncodeTo encodes this value using the Encoder. +func (s *SignedSurveyResponseMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.ResponseSignature.EncodeTo(e); err != nil { + return err + } + if err = s.Response.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SignedSurveyResponseMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SignedSurveyResponseMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.ResponseSignature.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + nTmp, err = s.Response.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SurveyResponseMessage: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SignedSurveyResponseMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SignedSurveyResponseMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SignedSurveyResponseMessage)(nil) + _ encoding.BinaryUnmarshaler = (*SignedSurveyResponseMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SignedSurveyResponseMessage) xdrType() {} + +var _ xdrType = (*SignedSurveyResponseMessage)(nil) + +// PeerStats is an XDR Struct defines as: +// +// struct PeerStats +// { +// NodeID id; +// string versionStr<100>; +// uint64 messagesRead; +// uint64 messagesWritten; +// uint64 bytesRead; +// uint64 bytesWritten; +// uint64 secondsConnected; +// +// uint64 uniqueFloodBytesRecv; +// uint64 duplicateFloodBytesRecv; +// uint64 uniqueFetchBytesRecv; +// uint64 duplicateFetchBytesRecv; +// +// uint64 uniqueFloodMessageRecv; +// uint64 duplicateFloodMessageRecv; +// uint64 uniqueFetchMessageRecv; +// uint64 duplicateFetchMessageRecv; +// }; +// +type PeerStats struct { + Id NodeId + VersionStr string `xdrmaxsize:"100"` + MessagesRead Uint64 + MessagesWritten Uint64 + BytesRead Uint64 + BytesWritten Uint64 + SecondsConnected Uint64 + UniqueFloodBytesRecv Uint64 + DuplicateFloodBytesRecv Uint64 + UniqueFetchBytesRecv Uint64 + DuplicateFetchBytesRecv Uint64 + UniqueFloodMessageRecv Uint64 + DuplicateFloodMessageRecv Uint64 + UniqueFetchMessageRecv Uint64 + DuplicateFetchMessageRecv Uint64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *PeerStats) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Id.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeString(string(s.VersionStr)); err != nil { + return err + } + if err = s.MessagesRead.EncodeTo(e); err != nil { + return err + } + if err = s.MessagesWritten.EncodeTo(e); err != nil { + return err + } + if err = s.BytesRead.EncodeTo(e); err != nil { + return err + } + if err = s.BytesWritten.EncodeTo(e); err != nil { + return err + } + if err = s.SecondsConnected.EncodeTo(e); err != nil { + return err + } + if err = s.UniqueFloodBytesRecv.EncodeTo(e); err != nil { + return err + } + if err = s.DuplicateFloodBytesRecv.EncodeTo(e); err != nil { + return err + } + if err = s.UniqueFetchBytesRecv.EncodeTo(e); err != nil { + return err + } + if err = s.DuplicateFetchBytesRecv.EncodeTo(e); err != nil { + return err + } + if err = s.UniqueFloodMessageRecv.EncodeTo(e); err != nil { + return err + } + if err = s.DuplicateFloodMessageRecv.EncodeTo(e); err != nil { + return err + } + if err = s.UniqueFetchMessageRecv.EncodeTo(e); err != nil { + return err + } + if err = s.DuplicateFetchMessageRecv.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*PeerStats)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PeerStats) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Id.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding NodeId: %s", err) + } + s.VersionStr, nTmp, err = d.DecodeString(100) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding VersionStr: %s", err) + } + nTmp, err = s.MessagesRead.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.MessagesWritten.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.BytesRead.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.BytesWritten.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.SecondsConnected.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.UniqueFloodBytesRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.DuplicateFloodBytesRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.UniqueFetchBytesRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.DuplicateFetchBytesRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.UniqueFloodMessageRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.DuplicateFloodMessageRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.UniqueFetchMessageRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.DuplicateFetchMessageRecv.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PeerStats) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PeerStats) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PeerStats)(nil) + _ encoding.BinaryUnmarshaler = (*PeerStats)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PeerStats) xdrType() {} + +var _ xdrType = (*PeerStats)(nil) + +// PeerStatList is an XDR Typedef defines as: +// +// typedef PeerStats PeerStatList<25>; +// +type PeerStatList []PeerStats + +// XDRMaxSize implements the Sized interface for PeerStatList +func (e PeerStatList) XDRMaxSize() int { + return 25 +} + +// EncodeTo encodes this value using the Encoder. +func (s PeerStatList) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s))); err != nil { + return err + } + for i := 0; i < len(s); i++ { + if err = s[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*PeerStatList)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PeerStatList) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerStats: %s", err) + } + if l > 25 { + return n, fmt.Errorf("decoding PeerStats: data size (%d) exceeds size limit (25)", l) + } + (*s) = nil + if l > 0 { + (*s) = make([]PeerStats, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*s)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerStats: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PeerStatList) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PeerStatList) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PeerStatList)(nil) + _ encoding.BinaryUnmarshaler = (*PeerStatList)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PeerStatList) xdrType() {} + +var _ xdrType = (*PeerStatList)(nil) + +// TopologyResponseBody is an XDR Struct defines as: +// +// struct TopologyResponseBody +// { +// PeerStatList inboundPeers; +// PeerStatList outboundPeers; +// +// uint32 totalInboundPeerCount; +// uint32 totalOutboundPeerCount; +// }; +// +type TopologyResponseBody struct { + InboundPeers PeerStatList + OutboundPeers PeerStatList + TotalInboundPeerCount Uint32 + TotalOutboundPeerCount Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *TopologyResponseBody) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.InboundPeers.EncodeTo(e); err != nil { + return err + } + if err = s.OutboundPeers.EncodeTo(e); err != nil { + return err + } + if err = s.TotalInboundPeerCount.EncodeTo(e); err != nil { + return err + } + if err = s.TotalOutboundPeerCount.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TopologyResponseBody)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TopologyResponseBody) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.InboundPeers.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerStatList: %s", err) + } + nTmp, err = s.OutboundPeers.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerStatList: %s", err) + } + nTmp, err = s.TotalInboundPeerCount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.TotalOutboundPeerCount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TopologyResponseBody) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TopologyResponseBody) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TopologyResponseBody)(nil) + _ encoding.BinaryUnmarshaler = (*TopologyResponseBody)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TopologyResponseBody) xdrType() {} + +var _ xdrType = (*TopologyResponseBody)(nil) + +// SurveyResponseBody is an XDR Union defines as: +// +// union SurveyResponseBody switch (SurveyMessageCommandType type) +// { +// case SURVEY_TOPOLOGY: +// TopologyResponseBody topologyResponseBody; +// }; +// +type SurveyResponseBody struct { + Type SurveyMessageCommandType + TopologyResponseBody *TopologyResponseBody +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u SurveyResponseBody) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of SurveyResponseBody +func (u SurveyResponseBody) ArmForSwitch(sw int32) (string, bool) { + switch SurveyMessageCommandType(sw) { + case SurveyMessageCommandTypeSurveyTopology: + return "TopologyResponseBody", true + } + return "-", false +} + +// NewSurveyResponseBody creates a new SurveyResponseBody. +func NewSurveyResponseBody(aType SurveyMessageCommandType, value interface{}) (result SurveyResponseBody, err error) { + result.Type = aType + switch SurveyMessageCommandType(aType) { + case SurveyMessageCommandTypeSurveyTopology: + tv, ok := value.(TopologyResponseBody) + if !ok { + err = fmt.Errorf("invalid value, must be TopologyResponseBody") + return + } + result.TopologyResponseBody = &tv + } + return +} + +// MustTopologyResponseBody retrieves the TopologyResponseBody value from the union, +// panicing if the value is not set. +func (u SurveyResponseBody) MustTopologyResponseBody() TopologyResponseBody { + val, ok := u.GetTopologyResponseBody() + + if !ok { + panic("arm TopologyResponseBody is not set") + } + + return val +} + +// GetTopologyResponseBody retrieves the TopologyResponseBody value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u SurveyResponseBody) GetTopologyResponseBody() (result TopologyResponseBody, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "TopologyResponseBody" { + result = *u.TopologyResponseBody + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u SurveyResponseBody) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch SurveyMessageCommandType(u.Type) { + case SurveyMessageCommandTypeSurveyTopology: + if err = (*u.TopologyResponseBody).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (SurveyMessageCommandType) switch value '%d' is not valid for union SurveyResponseBody", u.Type) +} + +var _ decoderFrom = (*SurveyResponseBody)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *SurveyResponseBody) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SurveyMessageCommandType: %s", err) + } + switch SurveyMessageCommandType(u.Type) { + case SurveyMessageCommandTypeSurveyTopology: + u.TopologyResponseBody = new(TopologyResponseBody) + nTmp, err = (*u.TopologyResponseBody).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TopologyResponseBody: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union SurveyResponseBody has invalid Type (SurveyMessageCommandType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SurveyResponseBody) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SurveyResponseBody) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SurveyResponseBody)(nil) + _ encoding.BinaryUnmarshaler = (*SurveyResponseBody)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SurveyResponseBody) xdrType() {} + +var _ xdrType = (*SurveyResponseBody)(nil) + +// StellarMessage is an XDR Union defines as: +// +// union StellarMessage switch (MessageType type) +// { +// case ERROR_MSG: +// Error error; +// case HELLO: +// Hello hello; +// case AUTH: +// Auth auth; +// case DONT_HAVE: +// DontHave dontHave; +// case GET_PEERS: +// void; +// case PEERS: +// PeerAddress peers<100>; +// +// case GET_TX_SET: +// uint256 txSetHash; +// case TX_SET: +// TransactionSet txSet; +// +// case TRANSACTION: +// TransactionEnvelope transaction; +// +// case SURVEY_REQUEST: +// SignedSurveyRequestMessage signedSurveyRequestMessage; +// +// case SURVEY_RESPONSE: +// SignedSurveyResponseMessage signedSurveyResponseMessage; +// +// // SCP +// case GET_SCP_QUORUMSET: +// uint256 qSetHash; +// case SCP_QUORUMSET: +// SCPQuorumSet qSet; +// case SCP_MESSAGE: +// SCPEnvelope envelope; +// case GET_SCP_STATE: +// uint32 getSCPLedgerSeq; // ledger seq requested ; if 0, requests the latest +// }; +// +type StellarMessage struct { + Type MessageType + Error *Error + Hello *Hello + Auth *Auth + DontHave *DontHave + Peers *[]PeerAddress `xdrmaxsize:"100"` + TxSetHash *Uint256 + TxSet *TransactionSet + Transaction *TransactionEnvelope + SignedSurveyRequestMessage *SignedSurveyRequestMessage + SignedSurveyResponseMessage *SignedSurveyResponseMessage + QSetHash *Uint256 + QSet *ScpQuorumSet + Envelope *ScpEnvelope + GetScpLedgerSeq *Uint32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u StellarMessage) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of StellarMessage +func (u StellarMessage) ArmForSwitch(sw int32) (string, bool) { + switch MessageType(sw) { + case MessageTypeErrorMsg: + return "Error", true + case MessageTypeHello: + return "Hello", true + case MessageTypeAuth: + return "Auth", true + case MessageTypeDontHave: + return "DontHave", true + case MessageTypeGetPeers: + return "", true + case MessageTypePeers: + return "Peers", true + case MessageTypeGetTxSet: + return "TxSetHash", true + case MessageTypeTxSet: + return "TxSet", true + case MessageTypeTransaction: + return "Transaction", true + case MessageTypeSurveyRequest: + return "SignedSurveyRequestMessage", true + case MessageTypeSurveyResponse: + return "SignedSurveyResponseMessage", true + case MessageTypeGetScpQuorumset: + return "QSetHash", true + case MessageTypeScpQuorumset: + return "QSet", true + case MessageTypeScpMessage: + return "Envelope", true + case MessageTypeGetScpState: + return "GetScpLedgerSeq", true + } + return "-", false +} + +// NewStellarMessage creates a new StellarMessage. +func NewStellarMessage(aType MessageType, value interface{}) (result StellarMessage, err error) { + result.Type = aType + switch MessageType(aType) { + case MessageTypeErrorMsg: + tv, ok := value.(Error) + if !ok { + err = fmt.Errorf("invalid value, must be Error") + return + } + result.Error = &tv + case MessageTypeHello: + tv, ok := value.(Hello) + if !ok { + err = fmt.Errorf("invalid value, must be Hello") + return + } + result.Hello = &tv + case MessageTypeAuth: + tv, ok := value.(Auth) + if !ok { + err = fmt.Errorf("invalid value, must be Auth") + return + } + result.Auth = &tv + case MessageTypeDontHave: + tv, ok := value.(DontHave) + if !ok { + err = fmt.Errorf("invalid value, must be DontHave") + return + } + result.DontHave = &tv + case MessageTypeGetPeers: + // void + case MessageTypePeers: + tv, ok := value.([]PeerAddress) + if !ok { + err = fmt.Errorf("invalid value, must be []PeerAddress") + return + } + result.Peers = &tv + case MessageTypeGetTxSet: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.TxSetHash = &tv + case MessageTypeTxSet: + tv, ok := value.(TransactionSet) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionSet") + return + } + result.TxSet = &tv + case MessageTypeTransaction: + tv, ok := value.(TransactionEnvelope) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionEnvelope") + return + } + result.Transaction = &tv + case MessageTypeSurveyRequest: + tv, ok := value.(SignedSurveyRequestMessage) + if !ok { + err = fmt.Errorf("invalid value, must be SignedSurveyRequestMessage") + return + } + result.SignedSurveyRequestMessage = &tv + case MessageTypeSurveyResponse: + tv, ok := value.(SignedSurveyResponseMessage) + if !ok { + err = fmt.Errorf("invalid value, must be SignedSurveyResponseMessage") + return + } + result.SignedSurveyResponseMessage = &tv + case MessageTypeGetScpQuorumset: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.QSetHash = &tv + case MessageTypeScpQuorumset: + tv, ok := value.(ScpQuorumSet) + if !ok { + err = fmt.Errorf("invalid value, must be ScpQuorumSet") + return + } + result.QSet = &tv + case MessageTypeScpMessage: + tv, ok := value.(ScpEnvelope) + if !ok { + err = fmt.Errorf("invalid value, must be ScpEnvelope") + return + } + result.Envelope = &tv + case MessageTypeGetScpState: + tv, ok := value.(Uint32) + if !ok { + err = fmt.Errorf("invalid value, must be Uint32") + return + } + result.GetScpLedgerSeq = &tv + } + return +} + +// MustError retrieves the Error value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustError() Error { + val, ok := u.GetError() + + if !ok { + panic("arm Error is not set") + } + + return val +} + +// GetError retrieves the Error value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetError() (result Error, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Error" { + result = *u.Error + ok = true + } + + return +} + +// MustHello retrieves the Hello value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustHello() Hello { + val, ok := u.GetHello() + + if !ok { + panic("arm Hello is not set") + } + + return val +} + +// GetHello retrieves the Hello value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetHello() (result Hello, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Hello" { + result = *u.Hello + ok = true + } + + return +} + +// MustAuth retrieves the Auth value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustAuth() Auth { + val, ok := u.GetAuth() + + if !ok { + panic("arm Auth is not set") + } + + return val +} + +// GetAuth retrieves the Auth value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetAuth() (result Auth, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Auth" { + result = *u.Auth + ok = true + } + + return +} + +// MustDontHave retrieves the DontHave value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustDontHave() DontHave { + val, ok := u.GetDontHave() + + if !ok { + panic("arm DontHave is not set") + } + + return val +} + +// GetDontHave retrieves the DontHave value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetDontHave() (result DontHave, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "DontHave" { + result = *u.DontHave + ok = true + } + + return +} + +// MustPeers retrieves the Peers value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustPeers() []PeerAddress { + val, ok := u.GetPeers() + + if !ok { + panic("arm Peers is not set") + } + + return val +} + +// GetPeers retrieves the Peers value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetPeers() (result []PeerAddress, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Peers" { + result = *u.Peers + ok = true + } + + return +} + +// MustTxSetHash retrieves the TxSetHash value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustTxSetHash() Uint256 { + val, ok := u.GetTxSetHash() + + if !ok { + panic("arm TxSetHash is not set") + } + + return val +} + +// GetTxSetHash retrieves the TxSetHash value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetTxSetHash() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "TxSetHash" { + result = *u.TxSetHash + ok = true + } + + return +} + +// MustTxSet retrieves the TxSet value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustTxSet() TransactionSet { + val, ok := u.GetTxSet() + + if !ok { + panic("arm TxSet is not set") + } + + return val +} + +// GetTxSet retrieves the TxSet value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetTxSet() (result TransactionSet, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "TxSet" { + result = *u.TxSet + ok = true + } + + return +} + +// MustTransaction retrieves the Transaction value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustTransaction() TransactionEnvelope { + val, ok := u.GetTransaction() + + if !ok { + panic("arm Transaction is not set") + } + + return val +} + +// GetTransaction retrieves the Transaction value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetTransaction() (result TransactionEnvelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Transaction" { + result = *u.Transaction + ok = true + } + + return +} + +// MustSignedSurveyRequestMessage retrieves the SignedSurveyRequestMessage value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustSignedSurveyRequestMessage() SignedSurveyRequestMessage { + val, ok := u.GetSignedSurveyRequestMessage() + + if !ok { + panic("arm SignedSurveyRequestMessage is not set") + } + + return val +} + +// GetSignedSurveyRequestMessage retrieves the SignedSurveyRequestMessage value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetSignedSurveyRequestMessage() (result SignedSurveyRequestMessage, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SignedSurveyRequestMessage" { + result = *u.SignedSurveyRequestMessage + ok = true + } + + return +} + +// MustSignedSurveyResponseMessage retrieves the SignedSurveyResponseMessage value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustSignedSurveyResponseMessage() SignedSurveyResponseMessage { + val, ok := u.GetSignedSurveyResponseMessage() + + if !ok { + panic("arm SignedSurveyResponseMessage is not set") + } + + return val +} + +// GetSignedSurveyResponseMessage retrieves the SignedSurveyResponseMessage value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetSignedSurveyResponseMessage() (result SignedSurveyResponseMessage, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SignedSurveyResponseMessage" { + result = *u.SignedSurveyResponseMessage + ok = true + } + + return +} + +// MustQSetHash retrieves the QSetHash value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustQSetHash() Uint256 { + val, ok := u.GetQSetHash() + + if !ok { + panic("arm QSetHash is not set") + } + + return val +} + +// GetQSetHash retrieves the QSetHash value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetQSetHash() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "QSetHash" { + result = *u.QSetHash + ok = true + } + + return +} + +// MustQSet retrieves the QSet value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustQSet() ScpQuorumSet { + val, ok := u.GetQSet() + + if !ok { + panic("arm QSet is not set") + } + + return val +} + +// GetQSet retrieves the QSet value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetQSet() (result ScpQuorumSet, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "QSet" { + result = *u.QSet + ok = true + } + + return +} + +// MustEnvelope retrieves the Envelope value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustEnvelope() ScpEnvelope { + val, ok := u.GetEnvelope() + + if !ok { + panic("arm Envelope is not set") + } + + return val +} + +// GetEnvelope retrieves the Envelope value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetEnvelope() (result ScpEnvelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Envelope" { + result = *u.Envelope + ok = true + } + + return +} + +// MustGetScpLedgerSeq retrieves the GetScpLedgerSeq value from the union, +// panicing if the value is not set. +func (u StellarMessage) MustGetScpLedgerSeq() Uint32 { + val, ok := u.GetGetScpLedgerSeq() + + if !ok { + panic("arm GetScpLedgerSeq is not set") + } + + return val +} + +// GetGetScpLedgerSeq retrieves the GetScpLedgerSeq value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u StellarMessage) GetGetScpLedgerSeq() (result Uint32, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "GetScpLedgerSeq" { + result = *u.GetScpLedgerSeq + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u StellarMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch MessageType(u.Type) { + case MessageTypeErrorMsg: + if err = (*u.Error).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeHello: + if err = (*u.Hello).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeAuth: + if err = (*u.Auth).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeDontHave: + if err = (*u.DontHave).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeGetPeers: + // Void + return nil + case MessageTypePeers: + if _, err = e.EncodeUint(uint32(len((*u.Peers)))); err != nil { + return err + } + for i := 0; i < len((*u.Peers)); i++ { + if err = (*u.Peers)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case MessageTypeGetTxSet: + if err = (*u.TxSetHash).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeTxSet: + if err = (*u.TxSet).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeTransaction: + if err = (*u.Transaction).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeSurveyRequest: + if err = (*u.SignedSurveyRequestMessage).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeSurveyResponse: + if err = (*u.SignedSurveyResponseMessage).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeGetScpQuorumset: + if err = (*u.QSetHash).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeScpQuorumset: + if err = (*u.QSet).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeScpMessage: + if err = (*u.Envelope).EncodeTo(e); err != nil { + return err + } + return nil + case MessageTypeGetScpState: + if err = (*u.GetScpLedgerSeq).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (MessageType) switch value '%d' is not valid for union StellarMessage", u.Type) +} + +var _ decoderFrom = (*StellarMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *StellarMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MessageType: %s", err) + } + switch MessageType(u.Type) { + case MessageTypeErrorMsg: + u.Error = new(Error) + nTmp, err = (*u.Error).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Error: %s", err) + } + return n, nil + case MessageTypeHello: + u.Hello = new(Hello) + nTmp, err = (*u.Hello).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hello: %s", err) + } + return n, nil + case MessageTypeAuth: + u.Auth = new(Auth) + nTmp, err = (*u.Auth).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Auth: %s", err) + } + return n, nil + case MessageTypeDontHave: + u.DontHave = new(DontHave) + nTmp, err = (*u.DontHave).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DontHave: %s", err) + } + return n, nil + case MessageTypeGetPeers: + // Void + return n, nil + case MessageTypePeers: + u.Peers = new([]PeerAddress) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerAddress: %s", err) + } + if l > 100 { + return n, fmt.Errorf("decoding PeerAddress: data size (%d) exceeds size limit (100)", l) + } + (*u.Peers) = nil + if l > 0 { + (*u.Peers) = make([]PeerAddress, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Peers)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PeerAddress: %s", err) + } + } + } + return n, nil + case MessageTypeGetTxSet: + u.TxSetHash = new(Uint256) + nTmp, err = (*u.TxSetHash).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil + case MessageTypeTxSet: + u.TxSet = new(TransactionSet) + nTmp, err = (*u.TxSet).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionSet: %s", err) + } + return n, nil + case MessageTypeTransaction: + u.Transaction = new(TransactionEnvelope) + nTmp, err = (*u.Transaction).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionEnvelope: %s", err) + } + return n, nil + case MessageTypeSurveyRequest: + u.SignedSurveyRequestMessage = new(SignedSurveyRequestMessage) + nTmp, err = (*u.SignedSurveyRequestMessage).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignedSurveyRequestMessage: %s", err) + } + return n, nil + case MessageTypeSurveyResponse: + u.SignedSurveyResponseMessage = new(SignedSurveyResponseMessage) + nTmp, err = (*u.SignedSurveyResponseMessage).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignedSurveyResponseMessage: %s", err) + } + return n, nil + case MessageTypeGetScpQuorumset: + u.QSetHash = new(Uint256) + nTmp, err = (*u.QSetHash).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil + case MessageTypeScpQuorumset: + u.QSet = new(ScpQuorumSet) + nTmp, err = (*u.QSet).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpQuorumSet: %s", err) + } + return n, nil + case MessageTypeScpMessage: + u.Envelope = new(ScpEnvelope) + nTmp, err = (*u.Envelope).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ScpEnvelope: %s", err) + } + return n, nil + case MessageTypeGetScpState: + u.GetScpLedgerSeq = new(Uint32) + nTmp, err = (*u.GetScpLedgerSeq).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union StellarMessage has invalid Type (MessageType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s StellarMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *StellarMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*StellarMessage)(nil) + _ encoding.BinaryUnmarshaler = (*StellarMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s StellarMessage) xdrType() {} + +var _ xdrType = (*StellarMessage)(nil) + +// AuthenticatedMessageV0 is an XDR NestedStruct defines as: +// +// struct +// { +// uint64 sequence; +// StellarMessage message; +// HmacSha256Mac mac; +// } +// +type AuthenticatedMessageV0 struct { + Sequence Uint64 + Message StellarMessage + Mac HmacSha256Mac +} + +// EncodeTo encodes this value using the Encoder. +func (s *AuthenticatedMessageV0) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Sequence.EncodeTo(e); err != nil { + return err + } + if err = s.Message.EncodeTo(e); err != nil { + return err + } + if err = s.Mac.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AuthenticatedMessageV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AuthenticatedMessageV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Sequence.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.Message.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding StellarMessage: %s", err) + } + nTmp, err = s.Mac.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding HmacSha256Mac: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AuthenticatedMessageV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AuthenticatedMessageV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AuthenticatedMessageV0)(nil) + _ encoding.BinaryUnmarshaler = (*AuthenticatedMessageV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AuthenticatedMessageV0) xdrType() {} + +var _ xdrType = (*AuthenticatedMessageV0)(nil) + +// AuthenticatedMessage is an XDR Union defines as: +// +// union AuthenticatedMessage switch (uint32 v) +// { +// case 0: +// struct +// { +// uint64 sequence; +// StellarMessage message; +// HmacSha256Mac mac; +// } v0; +// }; +// +type AuthenticatedMessage struct { + V Uint32 + V0 *AuthenticatedMessageV0 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u AuthenticatedMessage) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of AuthenticatedMessage +func (u AuthenticatedMessage) ArmForSwitch(sw int32) (string, bool) { + switch Uint32(sw) { + case 0: + return "V0", true + } + return "-", false +} + +// NewAuthenticatedMessage creates a new AuthenticatedMessage. +func NewAuthenticatedMessage(v Uint32, value interface{}) (result AuthenticatedMessage, err error) { + result.V = v + switch Uint32(v) { + case 0: + tv, ok := value.(AuthenticatedMessageV0) + if !ok { + err = fmt.Errorf("invalid value, must be AuthenticatedMessageV0") + return + } + result.V0 = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u AuthenticatedMessage) MustV0() AuthenticatedMessageV0 { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u AuthenticatedMessage) GetV0() (result AuthenticatedMessageV0, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.V)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AuthenticatedMessage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.V.EncodeTo(e); err != nil { + return err + } + switch Uint32(u.V) { + case 0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("V (Uint32) switch value '%d' is not valid for union AuthenticatedMessage", u.V) +} + +var _ decoderFrom = (*AuthenticatedMessage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AuthenticatedMessage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.V.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + switch Uint32(u.V) { + case 0: + u.V0 = new(AuthenticatedMessageV0) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AuthenticatedMessageV0: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union AuthenticatedMessage has invalid V (Uint32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AuthenticatedMessage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AuthenticatedMessage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AuthenticatedMessage)(nil) + _ encoding.BinaryUnmarshaler = (*AuthenticatedMessage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AuthenticatedMessage) xdrType() {} + +var _ xdrType = (*AuthenticatedMessage)(nil) + +// LiquidityPoolParameters is an XDR Union defines as: +// +// union LiquidityPoolParameters switch (LiquidityPoolType type) +// { +// case LIQUIDITY_POOL_CONSTANT_PRODUCT: +// LiquidityPoolConstantProductParameters constantProduct; +// }; +// +type LiquidityPoolParameters struct { + Type LiquidityPoolType + ConstantProduct *LiquidityPoolConstantProductParameters +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LiquidityPoolParameters) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LiquidityPoolParameters +func (u LiquidityPoolParameters) ArmForSwitch(sw int32) (string, bool) { + switch LiquidityPoolType(sw) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + return "ConstantProduct", true + } + return "-", false +} + +// NewLiquidityPoolParameters creates a new LiquidityPoolParameters. +func NewLiquidityPoolParameters(aType LiquidityPoolType, value interface{}) (result LiquidityPoolParameters, err error) { + result.Type = aType + switch LiquidityPoolType(aType) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + tv, ok := value.(LiquidityPoolConstantProductParameters) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolConstantProductParameters") + return + } + result.ConstantProduct = &tv + } + return +} + +// MustConstantProduct retrieves the ConstantProduct value from the union, +// panicing if the value is not set. +func (u LiquidityPoolParameters) MustConstantProduct() LiquidityPoolConstantProductParameters { + val, ok := u.GetConstantProduct() + + if !ok { + panic("arm ConstantProduct is not set") + } + + return val +} + +// GetConstantProduct retrieves the ConstantProduct value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u LiquidityPoolParameters) GetConstantProduct() (result LiquidityPoolConstantProductParameters, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ConstantProduct" { + result = *u.ConstantProduct + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u LiquidityPoolParameters) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch LiquidityPoolType(u.Type) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + if err = (*u.ConstantProduct).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (LiquidityPoolType) switch value '%d' is not valid for union LiquidityPoolParameters", u.Type) +} + +var _ decoderFrom = (*LiquidityPoolParameters)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LiquidityPoolParameters) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolType: %s", err) + } + switch LiquidityPoolType(u.Type) { + case LiquidityPoolTypeLiquidityPoolConstantProduct: + u.ConstantProduct = new(LiquidityPoolConstantProductParameters) + nTmp, err = (*u.ConstantProduct).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolConstantProductParameters: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union LiquidityPoolParameters has invalid Type (LiquidityPoolType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolParameters) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolParameters) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolParameters)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolParameters)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolParameters) xdrType() {} + +var _ xdrType = (*LiquidityPoolParameters)(nil) + +// MuxedAccountMed25519 is an XDR NestedStruct defines as: +// +// struct +// { +// uint64 id; +// uint256 ed25519; +// } +// +type MuxedAccountMed25519 struct { + Id Uint64 + Ed25519 Uint256 +} + +// EncodeTo encodes this value using the Encoder. +func (s *MuxedAccountMed25519) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Id.EncodeTo(e); err != nil { + return err + } + if err = s.Ed25519.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*MuxedAccountMed25519)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *MuxedAccountMed25519) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Id.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + nTmp, err = s.Ed25519.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s MuxedAccountMed25519) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *MuxedAccountMed25519) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*MuxedAccountMed25519)(nil) + _ encoding.BinaryUnmarshaler = (*MuxedAccountMed25519)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s MuxedAccountMed25519) xdrType() {} + +var _ xdrType = (*MuxedAccountMed25519)(nil) + +// MuxedAccount is an XDR Union defines as: +// +// union MuxedAccount switch (CryptoKeyType type) +// { +// case KEY_TYPE_ED25519: +// uint256 ed25519; +// case KEY_TYPE_MUXED_ED25519: +// struct +// { +// uint64 id; +// uint256 ed25519; +// } med25519; +// }; +// +type MuxedAccount struct { + Type CryptoKeyType + Ed25519 *Uint256 + Med25519 *MuxedAccountMed25519 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u MuxedAccount) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of MuxedAccount +func (u MuxedAccount) ArmForSwitch(sw int32) (string, bool) { + switch CryptoKeyType(sw) { + case CryptoKeyTypeKeyTypeEd25519: + return "Ed25519", true + case CryptoKeyTypeKeyTypeMuxedEd25519: + return "Med25519", true + } + return "-", false +} + +// NewMuxedAccount creates a new MuxedAccount. +func NewMuxedAccount(aType CryptoKeyType, value interface{}) (result MuxedAccount, err error) { + result.Type = aType + switch CryptoKeyType(aType) { + case CryptoKeyTypeKeyTypeEd25519: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.Ed25519 = &tv + case CryptoKeyTypeKeyTypeMuxedEd25519: + tv, ok := value.(MuxedAccountMed25519) + if !ok { + err = fmt.Errorf("invalid value, must be MuxedAccountMed25519") + return + } + result.Med25519 = &tv + } + return +} + +// MustEd25519 retrieves the Ed25519 value from the union, +// panicing if the value is not set. +func (u MuxedAccount) MustEd25519() Uint256 { + val, ok := u.GetEd25519() + + if !ok { + panic("arm Ed25519 is not set") + } + + return val +} + +// GetEd25519 retrieves the Ed25519 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u MuxedAccount) GetEd25519() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Ed25519" { + result = *u.Ed25519 + ok = true + } + + return +} + +// MustMed25519 retrieves the Med25519 value from the union, +// panicing if the value is not set. +func (u MuxedAccount) MustMed25519() MuxedAccountMed25519 { + val, ok := u.GetMed25519() + + if !ok { + panic("arm Med25519 is not set") + } + + return val +} + +// GetMed25519 retrieves the Med25519 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u MuxedAccount) GetMed25519() (result MuxedAccountMed25519, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Med25519" { + result = *u.Med25519 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u MuxedAccount) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch CryptoKeyType(u.Type) { + case CryptoKeyTypeKeyTypeEd25519: + if err = (*u.Ed25519).EncodeTo(e); err != nil { + return err + } + return nil + case CryptoKeyTypeKeyTypeMuxedEd25519: + if err = (*u.Med25519).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (CryptoKeyType) switch value '%d' is not valid for union MuxedAccount", u.Type) +} + +var _ decoderFrom = (*MuxedAccount)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *MuxedAccount) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CryptoKeyType: %s", err) + } + switch CryptoKeyType(u.Type) { + case CryptoKeyTypeKeyTypeEd25519: + u.Ed25519 = new(Uint256) + nTmp, err = (*u.Ed25519).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil + case CryptoKeyTypeKeyTypeMuxedEd25519: + u.Med25519 = new(MuxedAccountMed25519) + nTmp, err = (*u.Med25519).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccountMed25519: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union MuxedAccount has invalid Type (CryptoKeyType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s MuxedAccount) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *MuxedAccount) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*MuxedAccount)(nil) + _ encoding.BinaryUnmarshaler = (*MuxedAccount)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s MuxedAccount) xdrType() {} + +var _ xdrType = (*MuxedAccount)(nil) + +// DecoratedSignature is an XDR Struct defines as: +// +// struct DecoratedSignature +// { +// SignatureHint hint; // last 4 bytes of the public key, used as a hint +// Signature signature; // actual signature +// }; +// +type DecoratedSignature struct { + Hint SignatureHint + Signature Signature +} + +// EncodeTo encodes this value using the Encoder. +func (s *DecoratedSignature) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Hint.EncodeTo(e); err != nil { + return err + } + if err = s.Signature.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*DecoratedSignature)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *DecoratedSignature) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Hint.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignatureHint: %s", err) + } + nTmp, err = s.Signature.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s DecoratedSignature) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *DecoratedSignature) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*DecoratedSignature)(nil) + _ encoding.BinaryUnmarshaler = (*DecoratedSignature)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s DecoratedSignature) xdrType() {} + +var _ xdrType = (*DecoratedSignature)(nil) + +// OperationType is an XDR Enum defines as: +// +// enum OperationType +// { +// CREATE_ACCOUNT = 0, +// PAYMENT = 1, +// PATH_PAYMENT_STRICT_RECEIVE = 2, +// MANAGE_SELL_OFFER = 3, +// CREATE_PASSIVE_SELL_OFFER = 4, +// SET_OPTIONS = 5, +// CHANGE_TRUST = 6, +// ALLOW_TRUST = 7, +// ACCOUNT_MERGE = 8, +// INFLATION = 9, +// MANAGE_DATA = 10, +// BUMP_SEQUENCE = 11, +// MANAGE_BUY_OFFER = 12, +// PATH_PAYMENT_STRICT_SEND = 13, +// CREATE_CLAIMABLE_BALANCE = 14, +// CLAIM_CLAIMABLE_BALANCE = 15, +// BEGIN_SPONSORING_FUTURE_RESERVES = 16, +// END_SPONSORING_FUTURE_RESERVES = 17, +// REVOKE_SPONSORSHIP = 18, +// CLAWBACK = 19, +// CLAWBACK_CLAIMABLE_BALANCE = 20, +// SET_TRUST_LINE_FLAGS = 21, +// LIQUIDITY_POOL_DEPOSIT = 22, +// LIQUIDITY_POOL_WITHDRAW = 23 +// }; +// +type OperationType int32 + +const ( + OperationTypeCreateAccount OperationType = 0 + OperationTypePayment OperationType = 1 + OperationTypePathPaymentStrictReceive OperationType = 2 + OperationTypeManageSellOffer OperationType = 3 + OperationTypeCreatePassiveSellOffer OperationType = 4 + OperationTypeSetOptions OperationType = 5 + OperationTypeChangeTrust OperationType = 6 + OperationTypeAllowTrust OperationType = 7 + OperationTypeAccountMerge OperationType = 8 + OperationTypeInflation OperationType = 9 + OperationTypeManageData OperationType = 10 + OperationTypeBumpSequence OperationType = 11 + OperationTypeManageBuyOffer OperationType = 12 + OperationTypePathPaymentStrictSend OperationType = 13 + OperationTypeCreateClaimableBalance OperationType = 14 + OperationTypeClaimClaimableBalance OperationType = 15 + OperationTypeBeginSponsoringFutureReserves OperationType = 16 + OperationTypeEndSponsoringFutureReserves OperationType = 17 + OperationTypeRevokeSponsorship OperationType = 18 + OperationTypeClawback OperationType = 19 + OperationTypeClawbackClaimableBalance OperationType = 20 + OperationTypeSetTrustLineFlags OperationType = 21 + OperationTypeLiquidityPoolDeposit OperationType = 22 + OperationTypeLiquidityPoolWithdraw OperationType = 23 +) + +var operationTypeMap = map[int32]string{ + 0: "OperationTypeCreateAccount", + 1: "OperationTypePayment", + 2: "OperationTypePathPaymentStrictReceive", + 3: "OperationTypeManageSellOffer", + 4: "OperationTypeCreatePassiveSellOffer", + 5: "OperationTypeSetOptions", + 6: "OperationTypeChangeTrust", + 7: "OperationTypeAllowTrust", + 8: "OperationTypeAccountMerge", + 9: "OperationTypeInflation", + 10: "OperationTypeManageData", + 11: "OperationTypeBumpSequence", + 12: "OperationTypeManageBuyOffer", + 13: "OperationTypePathPaymentStrictSend", + 14: "OperationTypeCreateClaimableBalance", + 15: "OperationTypeClaimClaimableBalance", + 16: "OperationTypeBeginSponsoringFutureReserves", + 17: "OperationTypeEndSponsoringFutureReserves", + 18: "OperationTypeRevokeSponsorship", + 19: "OperationTypeClawback", + 20: "OperationTypeClawbackClaimableBalance", + 21: "OperationTypeSetTrustLineFlags", + 22: "OperationTypeLiquidityPoolDeposit", + 23: "OperationTypeLiquidityPoolWithdraw", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for OperationType +func (e OperationType) ValidEnum(v int32) bool { + _, ok := operationTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e OperationType) String() string { + name, _ := operationTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e OperationType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := operationTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid OperationType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*OperationType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *OperationType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding OperationType: %s", err) + } + if _, ok := operationTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid OperationType enum value", v) + } + *e = OperationType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OperationType)(nil) + _ encoding.BinaryUnmarshaler = (*OperationType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationType) xdrType() {} + +var _ xdrType = (*OperationType)(nil) + +// CreateAccountOp is an XDR Struct defines as: +// +// struct CreateAccountOp +// { +// AccountID destination; // account to create +// int64 startingBalance; // amount they end up with +// }; +// +type CreateAccountOp struct { + Destination AccountId + StartingBalance Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *CreateAccountOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.StartingBalance.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*CreateAccountOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *CreateAccountOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.StartingBalance.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateAccountOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateAccountOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateAccountOp)(nil) + _ encoding.BinaryUnmarshaler = (*CreateAccountOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateAccountOp) xdrType() {} + +var _ xdrType = (*CreateAccountOp)(nil) + +// PaymentOp is an XDR Struct defines as: +// +// struct PaymentOp +// { +// MuxedAccount destination; // recipient of the payment +// Asset asset; // what they end up with +// int64 amount; // amount they end up with +// }; +// +type PaymentOp struct { + Destination MuxedAccount + Asset Asset + Amount Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *PaymentOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*PaymentOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PaymentOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PaymentOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PaymentOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PaymentOp)(nil) + _ encoding.BinaryUnmarshaler = (*PaymentOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PaymentOp) xdrType() {} + +var _ xdrType = (*PaymentOp)(nil) + +// PathPaymentStrictReceiveOp is an XDR Struct defines as: +// +// struct PathPaymentStrictReceiveOp +// { +// Asset sendAsset; // asset we pay with +// int64 sendMax; // the maximum amount of sendAsset to +// // send (excluding fees). +// // The operation will fail if can't be met +// +// MuxedAccount destination; // recipient of the payment +// Asset destAsset; // what they end up with +// int64 destAmount; // amount they end up with +// +// Asset path<5>; // additional hops it must go through to get there +// }; +// +type PathPaymentStrictReceiveOp struct { + SendAsset Asset + SendMax Int64 + Destination MuxedAccount + DestAsset Asset + DestAmount Int64 + Path []Asset `xdrmaxsize:"5"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *PathPaymentStrictReceiveOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SendAsset.EncodeTo(e); err != nil { + return err + } + if err = s.SendMax.EncodeTo(e); err != nil { + return err + } + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.DestAsset.EncodeTo(e); err != nil { + return err + } + if err = s.DestAmount.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Path))); err != nil { + return err + } + for i := 0; i < len(s.Path); i++ { + if err = s.Path[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*PathPaymentStrictReceiveOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PathPaymentStrictReceiveOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SendAsset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.SendMax.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.DestAsset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.DestAmount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + if l > 5 { + return n, fmt.Errorf("decoding Asset: data size (%d) exceeds size limit (5)", l) + } + s.Path = nil + if l > 0 { + s.Path = make([]Asset, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Path[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictReceiveOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictReceiveOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictReceiveOp)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictReceiveOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictReceiveOp) xdrType() {} + +var _ xdrType = (*PathPaymentStrictReceiveOp)(nil) + +// PathPaymentStrictSendOp is an XDR Struct defines as: +// +// struct PathPaymentStrictSendOp +// { +// Asset sendAsset; // asset we pay with +// int64 sendAmount; // amount of sendAsset to send (excluding fees) +// +// MuxedAccount destination; // recipient of the payment +// Asset destAsset; // what they end up with +// int64 destMin; // the minimum amount of dest asset to +// // be received +// // The operation will fail if it can't be met +// +// Asset path<5>; // additional hops it must go through to get there +// }; +// +type PathPaymentStrictSendOp struct { + SendAsset Asset + SendAmount Int64 + Destination MuxedAccount + DestAsset Asset + DestMin Int64 + Path []Asset `xdrmaxsize:"5"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *PathPaymentStrictSendOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SendAsset.EncodeTo(e); err != nil { + return err + } + if err = s.SendAmount.EncodeTo(e); err != nil { + return err + } + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.DestAsset.EncodeTo(e); err != nil { + return err + } + if err = s.DestMin.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Path))); err != nil { + return err + } + for i := 0; i < len(s.Path); i++ { + if err = s.Path[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*PathPaymentStrictSendOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PathPaymentStrictSendOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SendAsset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.SendAmount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.DestAsset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.DestMin.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + if l > 5 { + return n, fmt.Errorf("decoding Asset: data size (%d) exceeds size limit (5)", l) + } + s.Path = nil + if l > 0 { + s.Path = make([]Asset, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Path[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictSendOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictSendOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictSendOp)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictSendOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictSendOp) xdrType() {} + +var _ xdrType = (*PathPaymentStrictSendOp)(nil) + +// ManageSellOfferOp is an XDR Struct defines as: +// +// struct ManageSellOfferOp +// { +// Asset selling; +// Asset buying; +// int64 amount; // amount being sold. if set to 0, delete the offer +// Price price; // price of thing being sold in terms of what you are buying +// +// // 0=create a new offer, otherwise edit an existing offer +// int64 offerID; +// }; +// +type ManageSellOfferOp struct { + Selling Asset + Buying Asset + Amount Int64 + Price Price + OfferId Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ManageSellOfferOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Selling.EncodeTo(e); err != nil { + return err + } + if err = s.Buying.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if err = s.Price.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ManageSellOfferOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ManageSellOfferOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Selling.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Buying.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Price.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageSellOfferOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageSellOfferOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageSellOfferOp)(nil) + _ encoding.BinaryUnmarshaler = (*ManageSellOfferOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageSellOfferOp) xdrType() {} + +var _ xdrType = (*ManageSellOfferOp)(nil) + +// ManageBuyOfferOp is an XDR Struct defines as: +// +// struct ManageBuyOfferOp +// { +// Asset selling; +// Asset buying; +// int64 buyAmount; // amount being bought. if set to 0, delete the offer +// Price price; // price of thing being bought in terms of what you are +// // selling +// +// // 0=create a new offer, otherwise edit an existing offer +// int64 offerID; +// }; +// +type ManageBuyOfferOp struct { + Selling Asset + Buying Asset + BuyAmount Int64 + Price Price + OfferId Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ManageBuyOfferOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Selling.EncodeTo(e); err != nil { + return err + } + if err = s.Buying.EncodeTo(e); err != nil { + return err + } + if err = s.BuyAmount.EncodeTo(e); err != nil { + return err + } + if err = s.Price.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ManageBuyOfferOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ManageBuyOfferOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Selling.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Buying.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.BuyAmount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Price.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageBuyOfferOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageBuyOfferOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageBuyOfferOp)(nil) + _ encoding.BinaryUnmarshaler = (*ManageBuyOfferOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageBuyOfferOp) xdrType() {} + +var _ xdrType = (*ManageBuyOfferOp)(nil) + +// CreatePassiveSellOfferOp is an XDR Struct defines as: +// +// struct CreatePassiveSellOfferOp +// { +// Asset selling; // A +// Asset buying; // B +// int64 amount; // amount taker gets +// Price price; // cost of A in terms of B +// }; +// +type CreatePassiveSellOfferOp struct { + Selling Asset + Buying Asset + Amount Int64 + Price Price +} + +// EncodeTo encodes this value using the Encoder. +func (s *CreatePassiveSellOfferOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Selling.EncodeTo(e); err != nil { + return err + } + if err = s.Buying.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if err = s.Price.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*CreatePassiveSellOfferOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *CreatePassiveSellOfferOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Selling.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Buying.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Price.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreatePassiveSellOfferOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreatePassiveSellOfferOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreatePassiveSellOfferOp)(nil) + _ encoding.BinaryUnmarshaler = (*CreatePassiveSellOfferOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreatePassiveSellOfferOp) xdrType() {} + +var _ xdrType = (*CreatePassiveSellOfferOp)(nil) + +// SetOptionsOp is an XDR Struct defines as: +// +// struct SetOptionsOp +// { +// AccountID* inflationDest; // sets the inflation destination +// +// uint32* clearFlags; // which flags to clear +// uint32* setFlags; // which flags to set +// +// // account threshold manipulation +// uint32* masterWeight; // weight of the master account +// uint32* lowThreshold; +// uint32* medThreshold; +// uint32* highThreshold; +// +// string32* homeDomain; // sets the home domain +// +// // Add, update or remove a signer for the account +// // signer is deleted if the weight is 0 +// Signer* signer; +// }; +// +type SetOptionsOp struct { + InflationDest *AccountId + ClearFlags *Uint32 + SetFlags *Uint32 + MasterWeight *Uint32 + LowThreshold *Uint32 + MedThreshold *Uint32 + HighThreshold *Uint32 + HomeDomain *String32 + Signer *Signer +} + +// EncodeTo encodes this value using the Encoder. +func (s *SetOptionsOp) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeBool(s.InflationDest != nil); err != nil { + return err + } + if s.InflationDest != nil { + if err = (*s.InflationDest).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.ClearFlags != nil); err != nil { + return err + } + if s.ClearFlags != nil { + if err = (*s.ClearFlags).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.SetFlags != nil); err != nil { + return err + } + if s.SetFlags != nil { + if err = (*s.SetFlags).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.MasterWeight != nil); err != nil { + return err + } + if s.MasterWeight != nil { + if err = (*s.MasterWeight).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.LowThreshold != nil); err != nil { + return err + } + if s.LowThreshold != nil { + if err = (*s.LowThreshold).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.MedThreshold != nil); err != nil { + return err + } + if s.MedThreshold != nil { + if err = (*s.MedThreshold).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.HighThreshold != nil); err != nil { + return err + } + if s.HighThreshold != nil { + if err = (*s.HighThreshold).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.HomeDomain != nil); err != nil { + return err + } + if s.HomeDomain != nil { + if err = (*s.HomeDomain).EncodeTo(e); err != nil { + return err + } + } + if _, err = e.EncodeBool(s.Signer != nil); err != nil { + return err + } + if s.Signer != nil { + if err = (*s.Signer).EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*SetOptionsOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SetOptionsOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + s.InflationDest = nil + if b { + s.InflationDest = new(AccountId) + nTmp, err = s.InflationDest.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.ClearFlags = nil + if b { + s.ClearFlags = new(Uint32) + nTmp, err = s.ClearFlags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.SetFlags = nil + if b { + s.SetFlags = new(Uint32) + nTmp, err = s.SetFlags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.MasterWeight = nil + if b { + s.MasterWeight = new(Uint32) + nTmp, err = s.MasterWeight.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.LowThreshold = nil + if b { + s.LowThreshold = new(Uint32) + nTmp, err = s.LowThreshold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.MedThreshold = nil + if b { + s.MedThreshold = new(Uint32) + nTmp, err = s.MedThreshold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + s.HighThreshold = nil + if b { + s.HighThreshold = new(Uint32) + nTmp, err = s.HighThreshold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String32: %s", err) + } + s.HomeDomain = nil + if b { + s.HomeDomain = new(String32) + nTmp, err = s.HomeDomain.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String32: %s", err) + } + } + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signer: %s", err) + } + s.Signer = nil + if b { + s.Signer = new(Signer) + nTmp, err = s.Signer.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signer: %s", err) + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetOptionsOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetOptionsOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SetOptionsOp)(nil) + _ encoding.BinaryUnmarshaler = (*SetOptionsOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetOptionsOp) xdrType() {} + +var _ xdrType = (*SetOptionsOp)(nil) + +// ChangeTrustAsset is an XDR Union defines as: +// +// union ChangeTrustAsset switch (AssetType type) +// { +// case ASSET_TYPE_NATIVE: // Not credit +// void; +// +// case ASSET_TYPE_CREDIT_ALPHANUM4: +// AlphaNum4 alphaNum4; +// +// case ASSET_TYPE_CREDIT_ALPHANUM12: +// AlphaNum12 alphaNum12; +// +// case ASSET_TYPE_POOL_SHARE: +// LiquidityPoolParameters liquidityPool; +// +// // add other asset types here in the future +// }; +// +type ChangeTrustAsset struct { + Type AssetType + AlphaNum4 *AlphaNum4 + AlphaNum12 *AlphaNum12 + LiquidityPool *LiquidityPoolParameters +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ChangeTrustAsset) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ChangeTrustAsset +func (u ChangeTrustAsset) ArmForSwitch(sw int32) (string, bool) { + switch AssetType(sw) { + case AssetTypeAssetTypeNative: + return "", true + case AssetTypeAssetTypeCreditAlphanum4: + return "AlphaNum4", true + case AssetTypeAssetTypeCreditAlphanum12: + return "AlphaNum12", true + case AssetTypeAssetTypePoolShare: + return "LiquidityPool", true + } + return "-", false +} + +// NewChangeTrustAsset creates a new ChangeTrustAsset. +func NewChangeTrustAsset(aType AssetType, value interface{}) (result ChangeTrustAsset, err error) { + result.Type = aType + switch AssetType(aType) { + case AssetTypeAssetTypeNative: + // void + case AssetTypeAssetTypeCreditAlphanum4: + tv, ok := value.(AlphaNum4) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum4") + return + } + result.AlphaNum4 = &tv + case AssetTypeAssetTypeCreditAlphanum12: + tv, ok := value.(AlphaNum12) + if !ok { + err = fmt.Errorf("invalid value, must be AlphaNum12") + return + } + result.AlphaNum12 = &tv + case AssetTypeAssetTypePoolShare: + tv, ok := value.(LiquidityPoolParameters) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolParameters") + return + } + result.LiquidityPool = &tv + } + return +} + +// MustAlphaNum4 retrieves the AlphaNum4 value from the union, +// panicing if the value is not set. +func (u ChangeTrustAsset) MustAlphaNum4() AlphaNum4 { + val, ok := u.GetAlphaNum4() + + if !ok { + panic("arm AlphaNum4 is not set") + } + + return val +} + +// GetAlphaNum4 retrieves the AlphaNum4 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ChangeTrustAsset) GetAlphaNum4() (result AlphaNum4, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum4" { + result = *u.AlphaNum4 + ok = true + } + + return +} + +// MustAlphaNum12 retrieves the AlphaNum12 value from the union, +// panicing if the value is not set. +func (u ChangeTrustAsset) MustAlphaNum12() AlphaNum12 { + val, ok := u.GetAlphaNum12() + + if !ok { + panic("arm AlphaNum12 is not set") + } + + return val +} + +// GetAlphaNum12 retrieves the AlphaNum12 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ChangeTrustAsset) GetAlphaNum12() (result AlphaNum12, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AlphaNum12" { result = *u.AlphaNum12 ok = true } @@ -520,2144 +18654,8316 @@ func (u Asset) GetAlphaNum12() (result AssetAlphaNum12, ok bool) { return } -// Price is an XDR Struct defines as: +// MustLiquidityPool retrieves the LiquidityPool value from the union, +// panicing if the value is not set. +func (u ChangeTrustAsset) MustLiquidityPool() LiquidityPoolParameters { + val, ok := u.GetLiquidityPool() + + if !ok { + panic("arm LiquidityPool is not set") + } + + return val +} + +// GetLiquidityPool retrieves the LiquidityPool value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ChangeTrustAsset) GetLiquidityPool() (result LiquidityPoolParameters, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPool" { + result = *u.LiquidityPool + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ChangeTrustAsset) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return nil + case AssetTypeAssetTypeCreditAlphanum4: + if err = (*u.AlphaNum4).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypeCreditAlphanum12: + if err = (*u.AlphaNum12).EncodeTo(e); err != nil { + return err + } + return nil + case AssetTypeAssetTypePoolShare: + if err = (*u.LiquidityPool).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (AssetType) switch value '%d' is not valid for union ChangeTrustAsset", u.Type) +} + +var _ decoderFrom = (*ChangeTrustAsset)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ChangeTrustAsset) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetType: %s", err) + } + switch AssetType(u.Type) { + case AssetTypeAssetTypeNative: + // Void + return n, nil + case AssetTypeAssetTypeCreditAlphanum4: + u.AlphaNum4 = new(AlphaNum4) + nTmp, err = (*u.AlphaNum4).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum4: %s", err) + } + return n, nil + case AssetTypeAssetTypeCreditAlphanum12: + u.AlphaNum12 = new(AlphaNum12) + nTmp, err = (*u.AlphaNum12).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AlphaNum12: %s", err) + } + return n, nil + case AssetTypeAssetTypePoolShare: + u.LiquidityPool = new(LiquidityPoolParameters) + nTmp, err = (*u.LiquidityPool).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolParameters: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union ChangeTrustAsset has invalid Type (AssetType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ChangeTrustAsset) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ChangeTrustAsset) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ChangeTrustAsset)(nil) + _ encoding.BinaryUnmarshaler = (*ChangeTrustAsset)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ChangeTrustAsset) xdrType() {} + +var _ xdrType = (*ChangeTrustAsset)(nil) + +// ChangeTrustOp is an XDR Struct defines as: +// +// struct ChangeTrustOp +// { +// ChangeTrustAsset line; +// +// // if limit is set to 0, deletes the trust line +// int64 limit; +// }; +// +type ChangeTrustOp struct { + Line ChangeTrustAsset + Limit Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ChangeTrustOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Line.EncodeTo(e); err != nil { + return err + } + if err = s.Limit.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ChangeTrustOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ChangeTrustOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Line.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ChangeTrustAsset: %s", err) + } + nTmp, err = s.Limit.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ChangeTrustOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ChangeTrustOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ChangeTrustOp)(nil) + _ encoding.BinaryUnmarshaler = (*ChangeTrustOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ChangeTrustOp) xdrType() {} + +var _ xdrType = (*ChangeTrustOp)(nil) + +// AllowTrustOp is an XDR Struct defines as: +// +// struct AllowTrustOp +// { +// AccountID trustor; +// AssetCode asset; +// +// // One of 0, AUTHORIZED_FLAG, or AUTHORIZED_TO_MAINTAIN_LIABILITIES_FLAG +// uint32 authorize; +// }; +// +type AllowTrustOp struct { + Trustor AccountId + Asset AssetCode + Authorize Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *AllowTrustOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Trustor.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Authorize.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*AllowTrustOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *AllowTrustOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Trustor.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AssetCode: %s", err) + } + nTmp, err = s.Authorize.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AllowTrustOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AllowTrustOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AllowTrustOp)(nil) + _ encoding.BinaryUnmarshaler = (*AllowTrustOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AllowTrustOp) xdrType() {} + +var _ xdrType = (*AllowTrustOp)(nil) + +// ManageDataOp is an XDR Struct defines as: +// +// struct ManageDataOp +// { +// string64 dataName; +// DataValue* dataValue; // set to null to clear +// }; +// +type ManageDataOp struct { + DataName String64 + DataValue *DataValue +} + +// EncodeTo encodes this value using the Encoder. +func (s *ManageDataOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.DataName.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeBool(s.DataValue != nil); err != nil { + return err + } + if s.DataValue != nil { + if err = (*s.DataValue).EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*ManageDataOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ManageDataOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.DataName.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding String64: %s", err) + } + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataValue: %s", err) + } + s.DataValue = nil + if b { + s.DataValue = new(DataValue) + nTmp, err = s.DataValue.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DataValue: %s", err) + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageDataOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageDataOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageDataOp)(nil) + _ encoding.BinaryUnmarshaler = (*ManageDataOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageDataOp) xdrType() {} + +var _ xdrType = (*ManageDataOp)(nil) + +// BumpSequenceOp is an XDR Struct defines as: +// +// struct BumpSequenceOp +// { +// SequenceNumber bumpTo; +// }; +// +type BumpSequenceOp struct { + BumpTo SequenceNumber +} + +// EncodeTo encodes this value using the Encoder. +func (s *BumpSequenceOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.BumpTo.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*BumpSequenceOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *BumpSequenceOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.BumpTo.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BumpSequenceOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BumpSequenceOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BumpSequenceOp)(nil) + _ encoding.BinaryUnmarshaler = (*BumpSequenceOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BumpSequenceOp) xdrType() {} + +var _ xdrType = (*BumpSequenceOp)(nil) + +// CreateClaimableBalanceOp is an XDR Struct defines as: +// +// struct CreateClaimableBalanceOp +// { +// Asset asset; +// int64 amount; +// Claimant claimants<10>; +// }; +// +type CreateClaimableBalanceOp struct { + Asset Asset + Amount Int64 + Claimants []Claimant `xdrmaxsize:"10"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *CreateClaimableBalanceOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Claimants))); err != nil { + return err + } + for i := 0; i < len(s.Claimants); i++ { + if err = s.Claimants[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*CreateClaimableBalanceOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *CreateClaimableBalanceOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Claimant: %s", err) + } + if l > 10 { + return n, fmt.Errorf("decoding Claimant: data size (%d) exceeds size limit (10)", l) + } + s.Claimants = nil + if l > 0 { + s.Claimants = make([]Claimant, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Claimants[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Claimant: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateClaimableBalanceOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateClaimableBalanceOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateClaimableBalanceOp)(nil) + _ encoding.BinaryUnmarshaler = (*CreateClaimableBalanceOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateClaimableBalanceOp) xdrType() {} + +var _ xdrType = (*CreateClaimableBalanceOp)(nil) + +// ClaimClaimableBalanceOp is an XDR Struct defines as: +// +// struct ClaimClaimableBalanceOp +// { +// ClaimableBalanceID balanceID; +// }; +// +type ClaimClaimableBalanceOp struct { + BalanceId ClaimableBalanceId +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClaimClaimableBalanceOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.BalanceId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimClaimableBalanceOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimClaimableBalanceOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.BalanceId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimClaimableBalanceOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimClaimableBalanceOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimClaimableBalanceOp)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimClaimableBalanceOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimClaimableBalanceOp) xdrType() {} + +var _ xdrType = (*ClaimClaimableBalanceOp)(nil) + +// BeginSponsoringFutureReservesOp is an XDR Struct defines as: +// +// struct BeginSponsoringFutureReservesOp +// { +// AccountID sponsoredID; +// }; +// +type BeginSponsoringFutureReservesOp struct { + SponsoredId AccountId +} + +// EncodeTo encodes this value using the Encoder. +func (s *BeginSponsoringFutureReservesOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SponsoredId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*BeginSponsoringFutureReservesOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *BeginSponsoringFutureReservesOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SponsoredId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BeginSponsoringFutureReservesOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BeginSponsoringFutureReservesOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BeginSponsoringFutureReservesOp)(nil) + _ encoding.BinaryUnmarshaler = (*BeginSponsoringFutureReservesOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BeginSponsoringFutureReservesOp) xdrType() {} + +var _ xdrType = (*BeginSponsoringFutureReservesOp)(nil) + +// RevokeSponsorshipType is an XDR Enum defines as: +// +// enum RevokeSponsorshipType +// { +// REVOKE_SPONSORSHIP_LEDGER_ENTRY = 0, +// REVOKE_SPONSORSHIP_SIGNER = 1 +// }; +// +type RevokeSponsorshipType int32 + +const ( + RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry RevokeSponsorshipType = 0 + RevokeSponsorshipTypeRevokeSponsorshipSigner RevokeSponsorshipType = 1 +) + +var revokeSponsorshipTypeMap = map[int32]string{ + 0: "RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry", + 1: "RevokeSponsorshipTypeRevokeSponsorshipSigner", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for RevokeSponsorshipType +func (e RevokeSponsorshipType) ValidEnum(v int32) bool { + _, ok := revokeSponsorshipTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e RevokeSponsorshipType) String() string { + name, _ := revokeSponsorshipTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e RevokeSponsorshipType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := revokeSponsorshipTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid RevokeSponsorshipType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*RevokeSponsorshipType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *RevokeSponsorshipType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipType: %s", err) + } + if _, ok := revokeSponsorshipTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid RevokeSponsorshipType enum value", v) + } + *e = RevokeSponsorshipType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s RevokeSponsorshipType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *RevokeSponsorshipType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*RevokeSponsorshipType)(nil) + _ encoding.BinaryUnmarshaler = (*RevokeSponsorshipType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s RevokeSponsorshipType) xdrType() {} + +var _ xdrType = (*RevokeSponsorshipType)(nil) + +// RevokeSponsorshipOpSigner is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID accountID; +// SignerKey signerKey; +// } +// +type RevokeSponsorshipOpSigner struct { + AccountId AccountId + SignerKey SignerKey +} + +// EncodeTo encodes this value using the Encoder. +func (s *RevokeSponsorshipOpSigner) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.AccountId.EncodeTo(e); err != nil { + return err + } + if err = s.SignerKey.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*RevokeSponsorshipOpSigner)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *RevokeSponsorshipOpSigner) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.AccountId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.SignerKey.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignerKey: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s RevokeSponsorshipOpSigner) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *RevokeSponsorshipOpSigner) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*RevokeSponsorshipOpSigner)(nil) + _ encoding.BinaryUnmarshaler = (*RevokeSponsorshipOpSigner)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s RevokeSponsorshipOpSigner) xdrType() {} + +var _ xdrType = (*RevokeSponsorshipOpSigner)(nil) + +// RevokeSponsorshipOp is an XDR Union defines as: +// +// union RevokeSponsorshipOp switch (RevokeSponsorshipType type) +// { +// case REVOKE_SPONSORSHIP_LEDGER_ENTRY: +// LedgerKey ledgerKey; +// case REVOKE_SPONSORSHIP_SIGNER: +// struct +// { +// AccountID accountID; +// SignerKey signerKey; +// } signer; +// }; +// +type RevokeSponsorshipOp struct { + Type RevokeSponsorshipType + LedgerKey *LedgerKey + Signer *RevokeSponsorshipOpSigner +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u RevokeSponsorshipOp) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of RevokeSponsorshipOp +func (u RevokeSponsorshipOp) ArmForSwitch(sw int32) (string, bool) { + switch RevokeSponsorshipType(sw) { + case RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + return "LedgerKey", true + case RevokeSponsorshipTypeRevokeSponsorshipSigner: + return "Signer", true + } + return "-", false +} + +// NewRevokeSponsorshipOp creates a new RevokeSponsorshipOp. +func NewRevokeSponsorshipOp(aType RevokeSponsorshipType, value interface{}) (result RevokeSponsorshipOp, err error) { + result.Type = aType + switch RevokeSponsorshipType(aType) { + case RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + tv, ok := value.(LedgerKey) + if !ok { + err = fmt.Errorf("invalid value, must be LedgerKey") + return + } + result.LedgerKey = &tv + case RevokeSponsorshipTypeRevokeSponsorshipSigner: + tv, ok := value.(RevokeSponsorshipOpSigner) + if !ok { + err = fmt.Errorf("invalid value, must be RevokeSponsorshipOpSigner") + return + } + result.Signer = &tv + } + return +} + +// MustLedgerKey retrieves the LedgerKey value from the union, +// panicing if the value is not set. +func (u RevokeSponsorshipOp) MustLedgerKey() LedgerKey { + val, ok := u.GetLedgerKey() + + if !ok { + panic("arm LedgerKey is not set") + } + + return val +} + +// GetLedgerKey retrieves the LedgerKey value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u RevokeSponsorshipOp) GetLedgerKey() (result LedgerKey, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LedgerKey" { + result = *u.LedgerKey + ok = true + } + + return +} + +// MustSigner retrieves the Signer value from the union, +// panicing if the value is not set. +func (u RevokeSponsorshipOp) MustSigner() RevokeSponsorshipOpSigner { + val, ok := u.GetSigner() + + if !ok { + panic("arm Signer is not set") + } + + return val +} + +// GetSigner retrieves the Signer value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u RevokeSponsorshipOp) GetSigner() (result RevokeSponsorshipOpSigner, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Signer" { + result = *u.Signer + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u RevokeSponsorshipOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch RevokeSponsorshipType(u.Type) { + case RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + if err = (*u.LedgerKey).EncodeTo(e); err != nil { + return err + } + return nil + case RevokeSponsorshipTypeRevokeSponsorshipSigner: + if err = (*u.Signer).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (RevokeSponsorshipType) switch value '%d' is not valid for union RevokeSponsorshipOp", u.Type) +} + +var _ decoderFrom = (*RevokeSponsorshipOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *RevokeSponsorshipOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipType: %s", err) + } + switch RevokeSponsorshipType(u.Type) { + case RevokeSponsorshipTypeRevokeSponsorshipLedgerEntry: + u.LedgerKey = new(LedgerKey) + nTmp, err = (*u.LedgerKey).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LedgerKey: %s", err) + } + return n, nil + case RevokeSponsorshipTypeRevokeSponsorshipSigner: + u.Signer = new(RevokeSponsorshipOpSigner) + nTmp, err = (*u.Signer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipOpSigner: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union RevokeSponsorshipOp has invalid Type (RevokeSponsorshipType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s RevokeSponsorshipOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *RevokeSponsorshipOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*RevokeSponsorshipOp)(nil) + _ encoding.BinaryUnmarshaler = (*RevokeSponsorshipOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s RevokeSponsorshipOp) xdrType() {} + +var _ xdrType = (*RevokeSponsorshipOp)(nil) + +// ClawbackOp is an XDR Struct defines as: +// +// struct ClawbackOp +// { +// Asset asset; +// MuxedAccount from; +// int64 amount; +// }; +// +type ClawbackOp struct { + Asset Asset + From MuxedAccount + Amount Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClawbackOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.From.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClawbackOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClawbackOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.From.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackOp)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackOp) xdrType() {} + +var _ xdrType = (*ClawbackOp)(nil) + +// ClawbackClaimableBalanceOp is an XDR Struct defines as: +// +// struct ClawbackClaimableBalanceOp +// { +// ClaimableBalanceID balanceID; +// }; +// +type ClawbackClaimableBalanceOp struct { + BalanceId ClaimableBalanceId +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClawbackClaimableBalanceOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.BalanceId.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClawbackClaimableBalanceOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClawbackClaimableBalanceOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.BalanceId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceId: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackClaimableBalanceOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackClaimableBalanceOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackClaimableBalanceOp)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackClaimableBalanceOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackClaimableBalanceOp) xdrType() {} + +var _ xdrType = (*ClawbackClaimableBalanceOp)(nil) + +// SetTrustLineFlagsOp is an XDR Struct defines as: +// +// struct SetTrustLineFlagsOp +// { +// AccountID trustor; +// Asset asset; +// +// uint32 clearFlags; // which flags to clear +// uint32 setFlags; // which flags to set +// }; +// +type SetTrustLineFlagsOp struct { + Trustor AccountId + Asset Asset + ClearFlags Uint32 + SetFlags Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *SetTrustLineFlagsOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Trustor.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.ClearFlags.EncodeTo(e); err != nil { + return err + } + if err = s.SetFlags.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*SetTrustLineFlagsOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SetTrustLineFlagsOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Trustor.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.ClearFlags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.SetFlags.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetTrustLineFlagsOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetTrustLineFlagsOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SetTrustLineFlagsOp)(nil) + _ encoding.BinaryUnmarshaler = (*SetTrustLineFlagsOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetTrustLineFlagsOp) xdrType() {} + +var _ xdrType = (*SetTrustLineFlagsOp)(nil) + +// LiquidityPoolFeeV18 is an XDR Const defines as: +// +// const LIQUIDITY_POOL_FEE_V18 = 30; +// +const LiquidityPoolFeeV18 = 30 + +// LiquidityPoolDepositOp is an XDR Struct defines as: +// +// struct LiquidityPoolDepositOp +// { +// PoolID liquidityPoolID; +// int64 maxAmountA; // maximum amount of first asset to deposit +// int64 maxAmountB; // maximum amount of second asset to deposit +// Price minPrice; // minimum depositA/depositB +// Price maxPrice; // maximum depositA/depositB +// }; +// +type LiquidityPoolDepositOp struct { + LiquidityPoolId PoolId + MaxAmountA Int64 + MaxAmountB Int64 + MinPrice Price + MaxPrice Price +} + +// EncodeTo encodes this value using the Encoder. +func (s *LiquidityPoolDepositOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + if err = s.MaxAmountA.EncodeTo(e); err != nil { + return err + } + if err = s.MaxAmountB.EncodeTo(e); err != nil { + return err + } + if err = s.MinPrice.EncodeTo(e); err != nil { + return err + } + if err = s.MaxPrice.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LiquidityPoolDepositOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LiquidityPoolDepositOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + nTmp, err = s.MaxAmountA.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.MaxAmountB.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.MinPrice.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + nTmp, err = s.MaxPrice.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Price: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolDepositOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolDepositOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolDepositOp)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolDepositOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolDepositOp) xdrType() {} + +var _ xdrType = (*LiquidityPoolDepositOp)(nil) + +// LiquidityPoolWithdrawOp is an XDR Struct defines as: +// +// struct LiquidityPoolWithdrawOp +// { +// PoolID liquidityPoolID; +// int64 amount; // amount of pool shares to withdraw +// int64 minAmountA; // minimum amount of first asset to withdraw +// int64 minAmountB; // minimum amount of second asset to withdraw +// }; +// +type LiquidityPoolWithdrawOp struct { + LiquidityPoolId PoolId + Amount Int64 + MinAmountA Int64 + MinAmountB Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *LiquidityPoolWithdrawOp) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + if err = s.MinAmountA.EncodeTo(e); err != nil { + return err + } + if err = s.MinAmountB.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*LiquidityPoolWithdrawOp)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *LiquidityPoolWithdrawOp) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.MinAmountA.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.MinAmountB.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolWithdrawOp) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolWithdrawOp) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolWithdrawOp)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolWithdrawOp)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolWithdrawOp) xdrType() {} + +var _ xdrType = (*LiquidityPoolWithdrawOp)(nil) + +// OperationBody is an XDR NestedUnion defines as: +// +// union switch (OperationType type) +// { +// case CREATE_ACCOUNT: +// CreateAccountOp createAccountOp; +// case PAYMENT: +// PaymentOp paymentOp; +// case PATH_PAYMENT_STRICT_RECEIVE: +// PathPaymentStrictReceiveOp pathPaymentStrictReceiveOp; +// case MANAGE_SELL_OFFER: +// ManageSellOfferOp manageSellOfferOp; +// case CREATE_PASSIVE_SELL_OFFER: +// CreatePassiveSellOfferOp createPassiveSellOfferOp; +// case SET_OPTIONS: +// SetOptionsOp setOptionsOp; +// case CHANGE_TRUST: +// ChangeTrustOp changeTrustOp; +// case ALLOW_TRUST: +// AllowTrustOp allowTrustOp; +// case ACCOUNT_MERGE: +// MuxedAccount destination; +// case INFLATION: +// void; +// case MANAGE_DATA: +// ManageDataOp manageDataOp; +// case BUMP_SEQUENCE: +// BumpSequenceOp bumpSequenceOp; +// case MANAGE_BUY_OFFER: +// ManageBuyOfferOp manageBuyOfferOp; +// case PATH_PAYMENT_STRICT_SEND: +// PathPaymentStrictSendOp pathPaymentStrictSendOp; +// case CREATE_CLAIMABLE_BALANCE: +// CreateClaimableBalanceOp createClaimableBalanceOp; +// case CLAIM_CLAIMABLE_BALANCE: +// ClaimClaimableBalanceOp claimClaimableBalanceOp; +// case BEGIN_SPONSORING_FUTURE_RESERVES: +// BeginSponsoringFutureReservesOp beginSponsoringFutureReservesOp; +// case END_SPONSORING_FUTURE_RESERVES: +// void; +// case REVOKE_SPONSORSHIP: +// RevokeSponsorshipOp revokeSponsorshipOp; +// case CLAWBACK: +// ClawbackOp clawbackOp; +// case CLAWBACK_CLAIMABLE_BALANCE: +// ClawbackClaimableBalanceOp clawbackClaimableBalanceOp; +// case SET_TRUST_LINE_FLAGS: +// SetTrustLineFlagsOp setTrustLineFlagsOp; +// case LIQUIDITY_POOL_DEPOSIT: +// LiquidityPoolDepositOp liquidityPoolDepositOp; +// case LIQUIDITY_POOL_WITHDRAW: +// LiquidityPoolWithdrawOp liquidityPoolWithdrawOp; +// } +// +type OperationBody struct { + Type OperationType + CreateAccountOp *CreateAccountOp + PaymentOp *PaymentOp + PathPaymentStrictReceiveOp *PathPaymentStrictReceiveOp + ManageSellOfferOp *ManageSellOfferOp + CreatePassiveSellOfferOp *CreatePassiveSellOfferOp + SetOptionsOp *SetOptionsOp + ChangeTrustOp *ChangeTrustOp + AllowTrustOp *AllowTrustOp + Destination *MuxedAccount + ManageDataOp *ManageDataOp + BumpSequenceOp *BumpSequenceOp + ManageBuyOfferOp *ManageBuyOfferOp + PathPaymentStrictSendOp *PathPaymentStrictSendOp + CreateClaimableBalanceOp *CreateClaimableBalanceOp + ClaimClaimableBalanceOp *ClaimClaimableBalanceOp + BeginSponsoringFutureReservesOp *BeginSponsoringFutureReservesOp + RevokeSponsorshipOp *RevokeSponsorshipOp + ClawbackOp *ClawbackOp + ClawbackClaimableBalanceOp *ClawbackClaimableBalanceOp + SetTrustLineFlagsOp *SetTrustLineFlagsOp + LiquidityPoolDepositOp *LiquidityPoolDepositOp + LiquidityPoolWithdrawOp *LiquidityPoolWithdrawOp +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u OperationBody) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of OperationBody +func (u OperationBody) ArmForSwitch(sw int32) (string, bool) { + switch OperationType(sw) { + case OperationTypeCreateAccount: + return "CreateAccountOp", true + case OperationTypePayment: + return "PaymentOp", true + case OperationTypePathPaymentStrictReceive: + return "PathPaymentStrictReceiveOp", true + case OperationTypeManageSellOffer: + return "ManageSellOfferOp", true + case OperationTypeCreatePassiveSellOffer: + return "CreatePassiveSellOfferOp", true + case OperationTypeSetOptions: + return "SetOptionsOp", true + case OperationTypeChangeTrust: + return "ChangeTrustOp", true + case OperationTypeAllowTrust: + return "AllowTrustOp", true + case OperationTypeAccountMerge: + return "Destination", true + case OperationTypeInflation: + return "", true + case OperationTypeManageData: + return "ManageDataOp", true + case OperationTypeBumpSequence: + return "BumpSequenceOp", true + case OperationTypeManageBuyOffer: + return "ManageBuyOfferOp", true + case OperationTypePathPaymentStrictSend: + return "PathPaymentStrictSendOp", true + case OperationTypeCreateClaimableBalance: + return "CreateClaimableBalanceOp", true + case OperationTypeClaimClaimableBalance: + return "ClaimClaimableBalanceOp", true + case OperationTypeBeginSponsoringFutureReserves: + return "BeginSponsoringFutureReservesOp", true + case OperationTypeEndSponsoringFutureReserves: + return "", true + case OperationTypeRevokeSponsorship: + return "RevokeSponsorshipOp", true + case OperationTypeClawback: + return "ClawbackOp", true + case OperationTypeClawbackClaimableBalance: + return "ClawbackClaimableBalanceOp", true + case OperationTypeSetTrustLineFlags: + return "SetTrustLineFlagsOp", true + case OperationTypeLiquidityPoolDeposit: + return "LiquidityPoolDepositOp", true + case OperationTypeLiquidityPoolWithdraw: + return "LiquidityPoolWithdrawOp", true + } + return "-", false +} + +// NewOperationBody creates a new OperationBody. +func NewOperationBody(aType OperationType, value interface{}) (result OperationBody, err error) { + result.Type = aType + switch OperationType(aType) { + case OperationTypeCreateAccount: + tv, ok := value.(CreateAccountOp) + if !ok { + err = fmt.Errorf("invalid value, must be CreateAccountOp") + return + } + result.CreateAccountOp = &tv + case OperationTypePayment: + tv, ok := value.(PaymentOp) + if !ok { + err = fmt.Errorf("invalid value, must be PaymentOp") + return + } + result.PaymentOp = &tv + case OperationTypePathPaymentStrictReceive: + tv, ok := value.(PathPaymentStrictReceiveOp) + if !ok { + err = fmt.Errorf("invalid value, must be PathPaymentStrictReceiveOp") + return + } + result.PathPaymentStrictReceiveOp = &tv + case OperationTypeManageSellOffer: + tv, ok := value.(ManageSellOfferOp) + if !ok { + err = fmt.Errorf("invalid value, must be ManageSellOfferOp") + return + } + result.ManageSellOfferOp = &tv + case OperationTypeCreatePassiveSellOffer: + tv, ok := value.(CreatePassiveSellOfferOp) + if !ok { + err = fmt.Errorf("invalid value, must be CreatePassiveSellOfferOp") + return + } + result.CreatePassiveSellOfferOp = &tv + case OperationTypeSetOptions: + tv, ok := value.(SetOptionsOp) + if !ok { + err = fmt.Errorf("invalid value, must be SetOptionsOp") + return + } + result.SetOptionsOp = &tv + case OperationTypeChangeTrust: + tv, ok := value.(ChangeTrustOp) + if !ok { + err = fmt.Errorf("invalid value, must be ChangeTrustOp") + return + } + result.ChangeTrustOp = &tv + case OperationTypeAllowTrust: + tv, ok := value.(AllowTrustOp) + if !ok { + err = fmt.Errorf("invalid value, must be AllowTrustOp") + return + } + result.AllowTrustOp = &tv + case OperationTypeAccountMerge: + tv, ok := value.(MuxedAccount) + if !ok { + err = fmt.Errorf("invalid value, must be MuxedAccount") + return + } + result.Destination = &tv + case OperationTypeInflation: + // void + case OperationTypeManageData: + tv, ok := value.(ManageDataOp) + if !ok { + err = fmt.Errorf("invalid value, must be ManageDataOp") + return + } + result.ManageDataOp = &tv + case OperationTypeBumpSequence: + tv, ok := value.(BumpSequenceOp) + if !ok { + err = fmt.Errorf("invalid value, must be BumpSequenceOp") + return + } + result.BumpSequenceOp = &tv + case OperationTypeManageBuyOffer: + tv, ok := value.(ManageBuyOfferOp) + if !ok { + err = fmt.Errorf("invalid value, must be ManageBuyOfferOp") + return + } + result.ManageBuyOfferOp = &tv + case OperationTypePathPaymentStrictSend: + tv, ok := value.(PathPaymentStrictSendOp) + if !ok { + err = fmt.Errorf("invalid value, must be PathPaymentStrictSendOp") + return + } + result.PathPaymentStrictSendOp = &tv + case OperationTypeCreateClaimableBalance: + tv, ok := value.(CreateClaimableBalanceOp) + if !ok { + err = fmt.Errorf("invalid value, must be CreateClaimableBalanceOp") + return + } + result.CreateClaimableBalanceOp = &tv + case OperationTypeClaimClaimableBalance: + tv, ok := value.(ClaimClaimableBalanceOp) + if !ok { + err = fmt.Errorf("invalid value, must be ClaimClaimableBalanceOp") + return + } + result.ClaimClaimableBalanceOp = &tv + case OperationTypeBeginSponsoringFutureReserves: + tv, ok := value.(BeginSponsoringFutureReservesOp) + if !ok { + err = fmt.Errorf("invalid value, must be BeginSponsoringFutureReservesOp") + return + } + result.BeginSponsoringFutureReservesOp = &tv + case OperationTypeEndSponsoringFutureReserves: + // void + case OperationTypeRevokeSponsorship: + tv, ok := value.(RevokeSponsorshipOp) + if !ok { + err = fmt.Errorf("invalid value, must be RevokeSponsorshipOp") + return + } + result.RevokeSponsorshipOp = &tv + case OperationTypeClawback: + tv, ok := value.(ClawbackOp) + if !ok { + err = fmt.Errorf("invalid value, must be ClawbackOp") + return + } + result.ClawbackOp = &tv + case OperationTypeClawbackClaimableBalance: + tv, ok := value.(ClawbackClaimableBalanceOp) + if !ok { + err = fmt.Errorf("invalid value, must be ClawbackClaimableBalanceOp") + return + } + result.ClawbackClaimableBalanceOp = &tv + case OperationTypeSetTrustLineFlags: + tv, ok := value.(SetTrustLineFlagsOp) + if !ok { + err = fmt.Errorf("invalid value, must be SetTrustLineFlagsOp") + return + } + result.SetTrustLineFlagsOp = &tv + case OperationTypeLiquidityPoolDeposit: + tv, ok := value.(LiquidityPoolDepositOp) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolDepositOp") + return + } + result.LiquidityPoolDepositOp = &tv + case OperationTypeLiquidityPoolWithdraw: + tv, ok := value.(LiquidityPoolWithdrawOp) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolWithdrawOp") + return + } + result.LiquidityPoolWithdrawOp = &tv + } + return +} + +// MustCreateAccountOp retrieves the CreateAccountOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustCreateAccountOp() CreateAccountOp { + val, ok := u.GetCreateAccountOp() + + if !ok { + panic("arm CreateAccountOp is not set") + } + + return val +} + +// GetCreateAccountOp retrieves the CreateAccountOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetCreateAccountOp() (result CreateAccountOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "CreateAccountOp" { + result = *u.CreateAccountOp + ok = true + } + + return +} + +// MustPaymentOp retrieves the PaymentOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustPaymentOp() PaymentOp { + val, ok := u.GetPaymentOp() + + if !ok { + panic("arm PaymentOp is not set") + } + + return val +} + +// GetPaymentOp retrieves the PaymentOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetPaymentOp() (result PaymentOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "PaymentOp" { + result = *u.PaymentOp + ok = true + } + + return +} + +// MustPathPaymentStrictReceiveOp retrieves the PathPaymentStrictReceiveOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustPathPaymentStrictReceiveOp() PathPaymentStrictReceiveOp { + val, ok := u.GetPathPaymentStrictReceiveOp() + + if !ok { + panic("arm PathPaymentStrictReceiveOp is not set") + } + + return val +} + +// GetPathPaymentStrictReceiveOp retrieves the PathPaymentStrictReceiveOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetPathPaymentStrictReceiveOp() (result PathPaymentStrictReceiveOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "PathPaymentStrictReceiveOp" { + result = *u.PathPaymentStrictReceiveOp + ok = true + } + + return +} + +// MustManageSellOfferOp retrieves the ManageSellOfferOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustManageSellOfferOp() ManageSellOfferOp { + val, ok := u.GetManageSellOfferOp() + + if !ok { + panic("arm ManageSellOfferOp is not set") + } + + return val +} + +// GetManageSellOfferOp retrieves the ManageSellOfferOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetManageSellOfferOp() (result ManageSellOfferOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ManageSellOfferOp" { + result = *u.ManageSellOfferOp + ok = true + } + + return +} + +// MustCreatePassiveSellOfferOp retrieves the CreatePassiveSellOfferOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustCreatePassiveSellOfferOp() CreatePassiveSellOfferOp { + val, ok := u.GetCreatePassiveSellOfferOp() + + if !ok { + panic("arm CreatePassiveSellOfferOp is not set") + } + + return val +} + +// GetCreatePassiveSellOfferOp retrieves the CreatePassiveSellOfferOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetCreatePassiveSellOfferOp() (result CreatePassiveSellOfferOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "CreatePassiveSellOfferOp" { + result = *u.CreatePassiveSellOfferOp + ok = true + } + + return +} + +// MustSetOptionsOp retrieves the SetOptionsOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustSetOptionsOp() SetOptionsOp { + val, ok := u.GetSetOptionsOp() + + if !ok { + panic("arm SetOptionsOp is not set") + } + + return val +} + +// GetSetOptionsOp retrieves the SetOptionsOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetSetOptionsOp() (result SetOptionsOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SetOptionsOp" { + result = *u.SetOptionsOp + ok = true + } + + return +} + +// MustChangeTrustOp retrieves the ChangeTrustOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustChangeTrustOp() ChangeTrustOp { + val, ok := u.GetChangeTrustOp() + + if !ok { + panic("arm ChangeTrustOp is not set") + } + + return val +} + +// GetChangeTrustOp retrieves the ChangeTrustOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetChangeTrustOp() (result ChangeTrustOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ChangeTrustOp" { + result = *u.ChangeTrustOp + ok = true + } + + return +} + +// MustAllowTrustOp retrieves the AllowTrustOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustAllowTrustOp() AllowTrustOp { + val, ok := u.GetAllowTrustOp() + + if !ok { + panic("arm AllowTrustOp is not set") + } + + return val +} + +// GetAllowTrustOp retrieves the AllowTrustOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetAllowTrustOp() (result AllowTrustOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AllowTrustOp" { + result = *u.AllowTrustOp + ok = true + } + + return +} + +// MustDestination retrieves the Destination value from the union, +// panicing if the value is not set. +func (u OperationBody) MustDestination() MuxedAccount { + val, ok := u.GetDestination() + + if !ok { + panic("arm Destination is not set") + } + + return val +} + +// GetDestination retrieves the Destination value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetDestination() (result MuxedAccount, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Destination" { + result = *u.Destination + ok = true + } + + return +} + +// MustManageDataOp retrieves the ManageDataOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustManageDataOp() ManageDataOp { + val, ok := u.GetManageDataOp() + + if !ok { + panic("arm ManageDataOp is not set") + } + + return val +} + +// GetManageDataOp retrieves the ManageDataOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetManageDataOp() (result ManageDataOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ManageDataOp" { + result = *u.ManageDataOp + ok = true + } + + return +} + +// MustBumpSequenceOp retrieves the BumpSequenceOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustBumpSequenceOp() BumpSequenceOp { + val, ok := u.GetBumpSequenceOp() + + if !ok { + panic("arm BumpSequenceOp is not set") + } + + return val +} + +// GetBumpSequenceOp retrieves the BumpSequenceOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetBumpSequenceOp() (result BumpSequenceOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "BumpSequenceOp" { + result = *u.BumpSequenceOp + ok = true + } + + return +} + +// MustManageBuyOfferOp retrieves the ManageBuyOfferOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustManageBuyOfferOp() ManageBuyOfferOp { + val, ok := u.GetManageBuyOfferOp() + + if !ok { + panic("arm ManageBuyOfferOp is not set") + } + + return val +} + +// GetManageBuyOfferOp retrieves the ManageBuyOfferOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetManageBuyOfferOp() (result ManageBuyOfferOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ManageBuyOfferOp" { + result = *u.ManageBuyOfferOp + ok = true + } + + return +} + +// MustPathPaymentStrictSendOp retrieves the PathPaymentStrictSendOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustPathPaymentStrictSendOp() PathPaymentStrictSendOp { + val, ok := u.GetPathPaymentStrictSendOp() + + if !ok { + panic("arm PathPaymentStrictSendOp is not set") + } + + return val +} + +// GetPathPaymentStrictSendOp retrieves the PathPaymentStrictSendOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetPathPaymentStrictSendOp() (result PathPaymentStrictSendOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "PathPaymentStrictSendOp" { + result = *u.PathPaymentStrictSendOp + ok = true + } + + return +} + +// MustCreateClaimableBalanceOp retrieves the CreateClaimableBalanceOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustCreateClaimableBalanceOp() CreateClaimableBalanceOp { + val, ok := u.GetCreateClaimableBalanceOp() + + if !ok { + panic("arm CreateClaimableBalanceOp is not set") + } + + return val +} + +// GetCreateClaimableBalanceOp retrieves the CreateClaimableBalanceOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetCreateClaimableBalanceOp() (result CreateClaimableBalanceOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "CreateClaimableBalanceOp" { + result = *u.CreateClaimableBalanceOp + ok = true + } + + return +} + +// MustClaimClaimableBalanceOp retrieves the ClaimClaimableBalanceOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustClaimClaimableBalanceOp() ClaimClaimableBalanceOp { + val, ok := u.GetClaimClaimableBalanceOp() + + if !ok { + panic("arm ClaimClaimableBalanceOp is not set") + } + + return val +} + +// GetClaimClaimableBalanceOp retrieves the ClaimClaimableBalanceOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetClaimClaimableBalanceOp() (result ClaimClaimableBalanceOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClaimClaimableBalanceOp" { + result = *u.ClaimClaimableBalanceOp + ok = true + } + + return +} + +// MustBeginSponsoringFutureReservesOp retrieves the BeginSponsoringFutureReservesOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustBeginSponsoringFutureReservesOp() BeginSponsoringFutureReservesOp { + val, ok := u.GetBeginSponsoringFutureReservesOp() + + if !ok { + panic("arm BeginSponsoringFutureReservesOp is not set") + } + + return val +} + +// GetBeginSponsoringFutureReservesOp retrieves the BeginSponsoringFutureReservesOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetBeginSponsoringFutureReservesOp() (result BeginSponsoringFutureReservesOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "BeginSponsoringFutureReservesOp" { + result = *u.BeginSponsoringFutureReservesOp + ok = true + } + + return +} + +// MustRevokeSponsorshipOp retrieves the RevokeSponsorshipOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustRevokeSponsorshipOp() RevokeSponsorshipOp { + val, ok := u.GetRevokeSponsorshipOp() + + if !ok { + panic("arm RevokeSponsorshipOp is not set") + } + + return val +} + +// GetRevokeSponsorshipOp retrieves the RevokeSponsorshipOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetRevokeSponsorshipOp() (result RevokeSponsorshipOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "RevokeSponsorshipOp" { + result = *u.RevokeSponsorshipOp + ok = true + } + + return +} + +// MustClawbackOp retrieves the ClawbackOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustClawbackOp() ClawbackOp { + val, ok := u.GetClawbackOp() + + if !ok { + panic("arm ClawbackOp is not set") + } + + return val +} + +// GetClawbackOp retrieves the ClawbackOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetClawbackOp() (result ClawbackOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClawbackOp" { + result = *u.ClawbackOp + ok = true + } + + return +} + +// MustClawbackClaimableBalanceOp retrieves the ClawbackClaimableBalanceOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustClawbackClaimableBalanceOp() ClawbackClaimableBalanceOp { + val, ok := u.GetClawbackClaimableBalanceOp() + + if !ok { + panic("arm ClawbackClaimableBalanceOp is not set") + } + + return val +} + +// GetClawbackClaimableBalanceOp retrieves the ClawbackClaimableBalanceOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetClawbackClaimableBalanceOp() (result ClawbackClaimableBalanceOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClawbackClaimableBalanceOp" { + result = *u.ClawbackClaimableBalanceOp + ok = true + } + + return +} + +// MustSetTrustLineFlagsOp retrieves the SetTrustLineFlagsOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustSetTrustLineFlagsOp() SetTrustLineFlagsOp { + val, ok := u.GetSetTrustLineFlagsOp() + + if !ok { + panic("arm SetTrustLineFlagsOp is not set") + } + + return val +} + +// GetSetTrustLineFlagsOp retrieves the SetTrustLineFlagsOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetSetTrustLineFlagsOp() (result SetTrustLineFlagsOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SetTrustLineFlagsOp" { + result = *u.SetTrustLineFlagsOp + ok = true + } + + return +} + +// MustLiquidityPoolDepositOp retrieves the LiquidityPoolDepositOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustLiquidityPoolDepositOp() LiquidityPoolDepositOp { + val, ok := u.GetLiquidityPoolDepositOp() + + if !ok { + panic("arm LiquidityPoolDepositOp is not set") + } + + return val +} + +// GetLiquidityPoolDepositOp retrieves the LiquidityPoolDepositOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetLiquidityPoolDepositOp() (result LiquidityPoolDepositOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPoolDepositOp" { + result = *u.LiquidityPoolDepositOp + ok = true + } + + return +} + +// MustLiquidityPoolWithdrawOp retrieves the LiquidityPoolWithdrawOp value from the union, +// panicing if the value is not set. +func (u OperationBody) MustLiquidityPoolWithdrawOp() LiquidityPoolWithdrawOp { + val, ok := u.GetLiquidityPoolWithdrawOp() + + if !ok { + panic("arm LiquidityPoolWithdrawOp is not set") + } + + return val +} + +// GetLiquidityPoolWithdrawOp retrieves the LiquidityPoolWithdrawOp value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationBody) GetLiquidityPoolWithdrawOp() (result LiquidityPoolWithdrawOp, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPoolWithdrawOp" { + result = *u.LiquidityPoolWithdrawOp + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u OperationBody) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch OperationType(u.Type) { + case OperationTypeCreateAccount: + if err = (*u.CreateAccountOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePayment: + if err = (*u.PaymentOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePathPaymentStrictReceive: + if err = (*u.PathPaymentStrictReceiveOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeManageSellOffer: + if err = (*u.ManageSellOfferOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeCreatePassiveSellOffer: + if err = (*u.CreatePassiveSellOfferOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeSetOptions: + if err = (*u.SetOptionsOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeChangeTrust: + if err = (*u.ChangeTrustOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeAllowTrust: + if err = (*u.AllowTrustOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeAccountMerge: + if err = (*u.Destination).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeInflation: + // Void + return nil + case OperationTypeManageData: + if err = (*u.ManageDataOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeBumpSequence: + if err = (*u.BumpSequenceOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeManageBuyOffer: + if err = (*u.ManageBuyOfferOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePathPaymentStrictSend: + if err = (*u.PathPaymentStrictSendOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeCreateClaimableBalance: + if err = (*u.CreateClaimableBalanceOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClaimClaimableBalance: + if err = (*u.ClaimClaimableBalanceOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeBeginSponsoringFutureReserves: + if err = (*u.BeginSponsoringFutureReservesOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeEndSponsoringFutureReserves: + // Void + return nil + case OperationTypeRevokeSponsorship: + if err = (*u.RevokeSponsorshipOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClawback: + if err = (*u.ClawbackOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClawbackClaimableBalance: + if err = (*u.ClawbackClaimableBalanceOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeSetTrustLineFlags: + if err = (*u.SetTrustLineFlagsOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeLiquidityPoolDeposit: + if err = (*u.LiquidityPoolDepositOp).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeLiquidityPoolWithdraw: + if err = (*u.LiquidityPoolWithdrawOp).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (OperationType) switch value '%d' is not valid for union OperationBody", u.Type) +} + +var _ decoderFrom = (*OperationBody)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *OperationBody) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationType: %s", err) + } + switch OperationType(u.Type) { + case OperationTypeCreateAccount: + u.CreateAccountOp = new(CreateAccountOp) + nTmp, err = (*u.CreateAccountOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateAccountOp: %s", err) + } + return n, nil + case OperationTypePayment: + u.PaymentOp = new(PaymentOp) + nTmp, err = (*u.PaymentOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PaymentOp: %s", err) + } + return n, nil + case OperationTypePathPaymentStrictReceive: + u.PathPaymentStrictReceiveOp = new(PathPaymentStrictReceiveOp) + nTmp, err = (*u.PathPaymentStrictReceiveOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictReceiveOp: %s", err) + } + return n, nil + case OperationTypeManageSellOffer: + u.ManageSellOfferOp = new(ManageSellOfferOp) + nTmp, err = (*u.ManageSellOfferOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageSellOfferOp: %s", err) + } + return n, nil + case OperationTypeCreatePassiveSellOffer: + u.CreatePassiveSellOfferOp = new(CreatePassiveSellOfferOp) + nTmp, err = (*u.CreatePassiveSellOfferOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreatePassiveSellOfferOp: %s", err) + } + return n, nil + case OperationTypeSetOptions: + u.SetOptionsOp = new(SetOptionsOp) + nTmp, err = (*u.SetOptionsOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetOptionsOp: %s", err) + } + return n, nil + case OperationTypeChangeTrust: + u.ChangeTrustOp = new(ChangeTrustOp) + nTmp, err = (*u.ChangeTrustOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ChangeTrustOp: %s", err) + } + return n, nil + case OperationTypeAllowTrust: + u.AllowTrustOp = new(AllowTrustOp) + nTmp, err = (*u.AllowTrustOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AllowTrustOp: %s", err) + } + return n, nil + case OperationTypeAccountMerge: + u.Destination = new(MuxedAccount) + nTmp, err = (*u.Destination).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + return n, nil + case OperationTypeInflation: + // Void + return n, nil + case OperationTypeManageData: + u.ManageDataOp = new(ManageDataOp) + nTmp, err = (*u.ManageDataOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageDataOp: %s", err) + } + return n, nil + case OperationTypeBumpSequence: + u.BumpSequenceOp = new(BumpSequenceOp) + nTmp, err = (*u.BumpSequenceOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BumpSequenceOp: %s", err) + } + return n, nil + case OperationTypeManageBuyOffer: + u.ManageBuyOfferOp = new(ManageBuyOfferOp) + nTmp, err = (*u.ManageBuyOfferOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageBuyOfferOp: %s", err) + } + return n, nil + case OperationTypePathPaymentStrictSend: + u.PathPaymentStrictSendOp = new(PathPaymentStrictSendOp) + nTmp, err = (*u.PathPaymentStrictSendOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictSendOp: %s", err) + } + return n, nil + case OperationTypeCreateClaimableBalance: + u.CreateClaimableBalanceOp = new(CreateClaimableBalanceOp) + nTmp, err = (*u.CreateClaimableBalanceOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateClaimableBalanceOp: %s", err) + } + return n, nil + case OperationTypeClaimClaimableBalance: + u.ClaimClaimableBalanceOp = new(ClaimClaimableBalanceOp) + nTmp, err = (*u.ClaimClaimableBalanceOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimClaimableBalanceOp: %s", err) + } + return n, nil + case OperationTypeBeginSponsoringFutureReserves: + u.BeginSponsoringFutureReservesOp = new(BeginSponsoringFutureReservesOp) + nTmp, err = (*u.BeginSponsoringFutureReservesOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BeginSponsoringFutureReservesOp: %s", err) + } + return n, nil + case OperationTypeEndSponsoringFutureReserves: + // Void + return n, nil + case OperationTypeRevokeSponsorship: + u.RevokeSponsorshipOp = new(RevokeSponsorshipOp) + nTmp, err = (*u.RevokeSponsorshipOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipOp: %s", err) + } + return n, nil + case OperationTypeClawback: + u.ClawbackOp = new(ClawbackOp) + nTmp, err = (*u.ClawbackOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackOp: %s", err) + } + return n, nil + case OperationTypeClawbackClaimableBalance: + u.ClawbackClaimableBalanceOp = new(ClawbackClaimableBalanceOp) + nTmp, err = (*u.ClawbackClaimableBalanceOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackClaimableBalanceOp: %s", err) + } + return n, nil + case OperationTypeSetTrustLineFlags: + u.SetTrustLineFlagsOp = new(SetTrustLineFlagsOp) + nTmp, err = (*u.SetTrustLineFlagsOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetTrustLineFlagsOp: %s", err) + } + return n, nil + case OperationTypeLiquidityPoolDeposit: + u.LiquidityPoolDepositOp = new(LiquidityPoolDepositOp) + nTmp, err = (*u.LiquidityPoolDepositOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolDepositOp: %s", err) + } + return n, nil + case OperationTypeLiquidityPoolWithdraw: + u.LiquidityPoolWithdrawOp = new(LiquidityPoolWithdrawOp) + nTmp, err = (*u.LiquidityPoolWithdrawOp).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolWithdrawOp: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union OperationBody has invalid Type (OperationType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationBody) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationBody) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OperationBody)(nil) + _ encoding.BinaryUnmarshaler = (*OperationBody)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationBody) xdrType() {} + +var _ xdrType = (*OperationBody)(nil) + +// Operation is an XDR Struct defines as: +// +// struct Operation +// { +// // sourceAccount is the account used to run the operation +// // if not set, the runtime defaults to "sourceAccount" specified at +// // the transaction level +// MuxedAccount* sourceAccount; +// +// union switch (OperationType type) +// { +// case CREATE_ACCOUNT: +// CreateAccountOp createAccountOp; +// case PAYMENT: +// PaymentOp paymentOp; +// case PATH_PAYMENT_STRICT_RECEIVE: +// PathPaymentStrictReceiveOp pathPaymentStrictReceiveOp; +// case MANAGE_SELL_OFFER: +// ManageSellOfferOp manageSellOfferOp; +// case CREATE_PASSIVE_SELL_OFFER: +// CreatePassiveSellOfferOp createPassiveSellOfferOp; +// case SET_OPTIONS: +// SetOptionsOp setOptionsOp; +// case CHANGE_TRUST: +// ChangeTrustOp changeTrustOp; +// case ALLOW_TRUST: +// AllowTrustOp allowTrustOp; +// case ACCOUNT_MERGE: +// MuxedAccount destination; +// case INFLATION: +// void; +// case MANAGE_DATA: +// ManageDataOp manageDataOp; +// case BUMP_SEQUENCE: +// BumpSequenceOp bumpSequenceOp; +// case MANAGE_BUY_OFFER: +// ManageBuyOfferOp manageBuyOfferOp; +// case PATH_PAYMENT_STRICT_SEND: +// PathPaymentStrictSendOp pathPaymentStrictSendOp; +// case CREATE_CLAIMABLE_BALANCE: +// CreateClaimableBalanceOp createClaimableBalanceOp; +// case CLAIM_CLAIMABLE_BALANCE: +// ClaimClaimableBalanceOp claimClaimableBalanceOp; +// case BEGIN_SPONSORING_FUTURE_RESERVES: +// BeginSponsoringFutureReservesOp beginSponsoringFutureReservesOp; +// case END_SPONSORING_FUTURE_RESERVES: +// void; +// case REVOKE_SPONSORSHIP: +// RevokeSponsorshipOp revokeSponsorshipOp; +// case CLAWBACK: +// ClawbackOp clawbackOp; +// case CLAWBACK_CLAIMABLE_BALANCE: +// ClawbackClaimableBalanceOp clawbackClaimableBalanceOp; +// case SET_TRUST_LINE_FLAGS: +// SetTrustLineFlagsOp setTrustLineFlagsOp; +// case LIQUIDITY_POOL_DEPOSIT: +// LiquidityPoolDepositOp liquidityPoolDepositOp; +// case LIQUIDITY_POOL_WITHDRAW: +// LiquidityPoolWithdrawOp liquidityPoolWithdrawOp; +// } +// body; +// }; +// +type Operation struct { + SourceAccount *MuxedAccount + Body OperationBody +} + +// EncodeTo encodes this value using the Encoder. +func (s *Operation) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeBool(s.SourceAccount != nil); err != nil { + return err + } + if s.SourceAccount != nil { + if err = (*s.SourceAccount).EncodeTo(e); err != nil { + return err + } + } + if err = s.Body.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Operation)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Operation) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + s.SourceAccount = nil + if b { + s.SourceAccount = new(MuxedAccount) + nTmp, err = s.SourceAccount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + } + nTmp, err = s.Body.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationBody: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Operation) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Operation) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Operation)(nil) + _ encoding.BinaryUnmarshaler = (*Operation)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Operation) xdrType() {} + +var _ xdrType = (*Operation)(nil) + +// HashIdPreimageOperationId is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID sourceAccount; +// SequenceNumber seqNum; +// uint32 opNum; +// } +// +type HashIdPreimageOperationId struct { + SourceAccount AccountId + SeqNum SequenceNumber + OpNum Uint32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *HashIdPreimageOperationId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SourceAccount.EncodeTo(e); err != nil { + return err + } + if err = s.SeqNum.EncodeTo(e); err != nil { + return err + } + if err = s.OpNum.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*HashIdPreimageOperationId)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *HashIdPreimageOperationId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SourceAccount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.SeqNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + nTmp, err = s.OpNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s HashIdPreimageOperationId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *HashIdPreimageOperationId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*HashIdPreimageOperationId)(nil) + _ encoding.BinaryUnmarshaler = (*HashIdPreimageOperationId)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s HashIdPreimageOperationId) xdrType() {} + +var _ xdrType = (*HashIdPreimageOperationId)(nil) + +// HashIdPreimageRevokeId is an XDR NestedStruct defines as: +// +// struct +// { +// AccountID sourceAccount; +// SequenceNumber seqNum; +// uint32 opNum; +// PoolID liquidityPoolID; +// Asset asset; +// } +// +type HashIdPreimageRevokeId struct { + SourceAccount AccountId + SeqNum SequenceNumber + OpNum Uint32 + LiquidityPoolId PoolId + Asset Asset +} + +// EncodeTo encodes this value using the Encoder. +func (s *HashIdPreimageRevokeId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SourceAccount.EncodeTo(e); err != nil { + return err + } + if err = s.SeqNum.EncodeTo(e); err != nil { + return err + } + if err = s.OpNum.EncodeTo(e); err != nil { + return err + } + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*HashIdPreimageRevokeId)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *HashIdPreimageRevokeId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SourceAccount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.SeqNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + nTmp, err = s.OpNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s HashIdPreimageRevokeId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *HashIdPreimageRevokeId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*HashIdPreimageRevokeId)(nil) + _ encoding.BinaryUnmarshaler = (*HashIdPreimageRevokeId)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s HashIdPreimageRevokeId) xdrType() {} + +var _ xdrType = (*HashIdPreimageRevokeId)(nil) + +// HashIdPreimage is an XDR Union defines as: +// +// union HashIDPreimage switch (EnvelopeType type) +// { +// case ENVELOPE_TYPE_OP_ID: +// struct +// { +// AccountID sourceAccount; +// SequenceNumber seqNum; +// uint32 opNum; +// } operationID; +// case ENVELOPE_TYPE_POOL_REVOKE_OP_ID: +// struct +// { +// AccountID sourceAccount; +// SequenceNumber seqNum; +// uint32 opNum; +// PoolID liquidityPoolID; +// Asset asset; +// } revokeID; +// }; +// +type HashIdPreimage struct { + Type EnvelopeType + OperationId *HashIdPreimageOperationId + RevokeId *HashIdPreimageRevokeId +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u HashIdPreimage) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of HashIdPreimage +func (u HashIdPreimage) ArmForSwitch(sw int32) (string, bool) { + switch EnvelopeType(sw) { + case EnvelopeTypeEnvelopeTypeOpId: + return "OperationId", true + case EnvelopeTypeEnvelopeTypePoolRevokeOpId: + return "RevokeId", true + } + return "-", false +} + +// NewHashIdPreimage creates a new HashIdPreimage. +func NewHashIdPreimage(aType EnvelopeType, value interface{}) (result HashIdPreimage, err error) { + result.Type = aType + switch EnvelopeType(aType) { + case EnvelopeTypeEnvelopeTypeOpId: + tv, ok := value.(HashIdPreimageOperationId) + if !ok { + err = fmt.Errorf("invalid value, must be HashIdPreimageOperationId") + return + } + result.OperationId = &tv + case EnvelopeTypeEnvelopeTypePoolRevokeOpId: + tv, ok := value.(HashIdPreimageRevokeId) + if !ok { + err = fmt.Errorf("invalid value, must be HashIdPreimageRevokeId") + return + } + result.RevokeId = &tv + } + return +} + +// MustOperationId retrieves the OperationId value from the union, +// panicing if the value is not set. +func (u HashIdPreimage) MustOperationId() HashIdPreimageOperationId { + val, ok := u.GetOperationId() + + if !ok { + panic("arm OperationId is not set") + } + + return val +} + +// GetOperationId retrieves the OperationId value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u HashIdPreimage) GetOperationId() (result HashIdPreimageOperationId, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "OperationId" { + result = *u.OperationId + ok = true + } + + return +} + +// MustRevokeId retrieves the RevokeId value from the union, +// panicing if the value is not set. +func (u HashIdPreimage) MustRevokeId() HashIdPreimageRevokeId { + val, ok := u.GetRevokeId() + + if !ok { + panic("arm RevokeId is not set") + } + + return val +} + +// GetRevokeId retrieves the RevokeId value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u HashIdPreimage) GetRevokeId() (result HashIdPreimageRevokeId, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "RevokeId" { + result = *u.RevokeId + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u HashIdPreimage) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeOpId: + if err = (*u.OperationId).EncodeTo(e); err != nil { + return err + } + return nil + case EnvelopeTypeEnvelopeTypePoolRevokeOpId: + if err = (*u.RevokeId).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (EnvelopeType) switch value '%d' is not valid for union HashIdPreimage", u.Type) +} + +var _ decoderFrom = (*HashIdPreimage)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *HashIdPreimage) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EnvelopeType: %s", err) + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeOpId: + u.OperationId = new(HashIdPreimageOperationId) + nTmp, err = (*u.OperationId).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding HashIdPreimageOperationId: %s", err) + } + return n, nil + case EnvelopeTypeEnvelopeTypePoolRevokeOpId: + u.RevokeId = new(HashIdPreimageRevokeId) + nTmp, err = (*u.RevokeId).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding HashIdPreimageRevokeId: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union HashIdPreimage has invalid Type (EnvelopeType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s HashIdPreimage) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *HashIdPreimage) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*HashIdPreimage)(nil) + _ encoding.BinaryUnmarshaler = (*HashIdPreimage)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s HashIdPreimage) xdrType() {} + +var _ xdrType = (*HashIdPreimage)(nil) + +// MemoType is an XDR Enum defines as: +// +// enum MemoType +// { +// MEMO_NONE = 0, +// MEMO_TEXT = 1, +// MEMO_ID = 2, +// MEMO_HASH = 3, +// MEMO_RETURN = 4 +// }; +// +type MemoType int32 + +const ( + MemoTypeMemoNone MemoType = 0 + MemoTypeMemoText MemoType = 1 + MemoTypeMemoId MemoType = 2 + MemoTypeMemoHash MemoType = 3 + MemoTypeMemoReturn MemoType = 4 +) + +var memoTypeMap = map[int32]string{ + 0: "MemoTypeMemoNone", + 1: "MemoTypeMemoText", + 2: "MemoTypeMemoId", + 3: "MemoTypeMemoHash", + 4: "MemoTypeMemoReturn", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for MemoType +func (e MemoType) ValidEnum(v int32) bool { + _, ok := memoTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e MemoType) String() string { + name, _ := memoTypeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e MemoType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := memoTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid MemoType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*MemoType)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *MemoType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding MemoType: %s", err) + } + if _, ok := memoTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid MemoType enum value", v) + } + *e = MemoType(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s MemoType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *MemoType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*MemoType)(nil) + _ encoding.BinaryUnmarshaler = (*MemoType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s MemoType) xdrType() {} + +var _ xdrType = (*MemoType)(nil) + +// Memo is an XDR Union defines as: +// +// union Memo switch (MemoType type) +// { +// case MEMO_NONE: +// void; +// case MEMO_TEXT: +// string text<28>; +// case MEMO_ID: +// uint64 id; +// case MEMO_HASH: +// Hash hash; // the hash of what to pull from the content server +// case MEMO_RETURN: +// Hash retHash; // the hash of the tx you are rejecting +// }; +// +type Memo struct { + Type MemoType + Text *string `xdrmaxsize:"28"` + Id *Uint64 + Hash *Hash + RetHash *Hash +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u Memo) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of Memo +func (u Memo) ArmForSwitch(sw int32) (string, bool) { + switch MemoType(sw) { + case MemoTypeMemoNone: + return "", true + case MemoTypeMemoText: + return "Text", true + case MemoTypeMemoId: + return "Id", true + case MemoTypeMemoHash: + return "Hash", true + case MemoTypeMemoReturn: + return "RetHash", true + } + return "-", false +} + +// NewMemo creates a new Memo. +func NewMemo(aType MemoType, value interface{}) (result Memo, err error) { + result.Type = aType + switch MemoType(aType) { + case MemoTypeMemoNone: + // void + case MemoTypeMemoText: + tv, ok := value.(string) + if !ok { + err = fmt.Errorf("invalid value, must be string") + return + } + result.Text = &tv + case MemoTypeMemoId: + tv, ok := value.(Uint64) + if !ok { + err = fmt.Errorf("invalid value, must be Uint64") + return + } + result.Id = &tv + case MemoTypeMemoHash: + tv, ok := value.(Hash) + if !ok { + err = fmt.Errorf("invalid value, must be Hash") + return + } + result.Hash = &tv + case MemoTypeMemoReturn: + tv, ok := value.(Hash) + if !ok { + err = fmt.Errorf("invalid value, must be Hash") + return + } + result.RetHash = &tv + } + return +} + +// MustText retrieves the Text value from the union, +// panicing if the value is not set. +func (u Memo) MustText() string { + val, ok := u.GetText() + + if !ok { + panic("arm Text is not set") + } + + return val +} + +// GetText retrieves the Text value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Memo) GetText() (result string, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Text" { + result = *u.Text + ok = true + } + + return +} + +// MustId retrieves the Id value from the union, +// panicing if the value is not set. +func (u Memo) MustId() Uint64 { + val, ok := u.GetId() + + if !ok { + panic("arm Id is not set") + } + + return val +} + +// GetId retrieves the Id value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Memo) GetId() (result Uint64, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Id" { + result = *u.Id + ok = true + } + + return +} + +// MustHash retrieves the Hash value from the union, +// panicing if the value is not set. +func (u Memo) MustHash() Hash { + val, ok := u.GetHash() + + if !ok { + panic("arm Hash is not set") + } + + return val +} + +// GetHash retrieves the Hash value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Memo) GetHash() (result Hash, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "Hash" { + result = *u.Hash + ok = true + } + + return +} + +// MustRetHash retrieves the RetHash value from the union, +// panicing if the value is not set. +func (u Memo) MustRetHash() Hash { + val, ok := u.GetRetHash() + + if !ok { + panic("arm RetHash is not set") + } + + return val +} + +// GetRetHash retrieves the RetHash value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u Memo) GetRetHash() (result Hash, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "RetHash" { + result = *u.RetHash + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u Memo) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch MemoType(u.Type) { + case MemoTypeMemoNone: + // Void + return nil + case MemoTypeMemoText: + if _, err = e.EncodeString(string((*u.Text))); err != nil { + return err + } + return nil + case MemoTypeMemoId: + if err = (*u.Id).EncodeTo(e); err != nil { + return err + } + return nil + case MemoTypeMemoHash: + if err = (*u.Hash).EncodeTo(e); err != nil { + return err + } + return nil + case MemoTypeMemoReturn: + if err = (*u.RetHash).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (MemoType) switch value '%d' is not valid for union Memo", u.Type) +} + +var _ decoderFrom = (*Memo)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *Memo) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MemoType: %s", err) + } + switch MemoType(u.Type) { + case MemoTypeMemoNone: + // Void + return n, nil + case MemoTypeMemoText: + u.Text = new(string) + (*u.Text), nTmp, err = d.DecodeString(28) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Text: %s", err) + } + return n, nil + case MemoTypeMemoId: + u.Id = new(Uint64) + nTmp, err = (*u.Id).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint64: %s", err) + } + return n, nil + case MemoTypeMemoHash: + u.Hash = new(Hash) + nTmp, err = (*u.Hash).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil + case MemoTypeMemoReturn: + u.RetHash = new(Hash) + nTmp, err = (*u.RetHash).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union Memo has invalid Type (MemoType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Memo) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Memo) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Memo)(nil) + _ encoding.BinaryUnmarshaler = (*Memo)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Memo) xdrType() {} + +var _ xdrType = (*Memo)(nil) + +// TimeBounds is an XDR Struct defines as: +// +// struct TimeBounds +// { +// TimePoint minTime; +// TimePoint maxTime; // 0 here means no maxTime +// }; +// +type TimeBounds struct { + MinTime TimePoint + MaxTime TimePoint +} + +// EncodeTo encodes this value using the Encoder. +func (s *TimeBounds) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.MinTime.EncodeTo(e); err != nil { + return err + } + if err = s.MaxTime.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TimeBounds)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TimeBounds) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.MinTime.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimePoint: %s", err) + } + nTmp, err = s.MaxTime.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimePoint: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TimeBounds) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TimeBounds) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TimeBounds)(nil) + _ encoding.BinaryUnmarshaler = (*TimeBounds)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TimeBounds) xdrType() {} + +var _ xdrType = (*TimeBounds)(nil) + +// MaxOpsPerTx is an XDR Const defines as: +// +// const MAX_OPS_PER_TX = 100; +// +const MaxOpsPerTx = 100 + +// TransactionV0Ext is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TransactionV0Ext struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionV0Ext) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionV0Ext +func (u TransactionV0Ext) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTransactionV0Ext creates a new TransactionV0Ext. +func NewTransactionV0Ext(v int32, value interface{}) (result TransactionV0Ext, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionV0Ext) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionV0Ext", u.V) +} + +var _ decoderFrom = (*TransactionV0Ext)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionV0Ext) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TransactionV0Ext has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionV0Ext) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionV0Ext) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionV0Ext)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionV0Ext)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionV0Ext) xdrType() {} + +var _ xdrType = (*TransactionV0Ext)(nil) + +// TransactionV0 is an XDR Struct defines as: +// +// struct TransactionV0 +// { +// uint256 sourceAccountEd25519; +// uint32 fee; +// SequenceNumber seqNum; +// TimeBounds* timeBounds; +// Memo memo; +// Operation operations; +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type TransactionV0 struct { + SourceAccountEd25519 Uint256 + Fee Uint32 + SeqNum SequenceNumber + TimeBounds *TimeBounds + Memo Memo + Operations []Operation `xdrmaxsize:"100"` + Ext TransactionV0Ext +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionV0) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SourceAccountEd25519.EncodeTo(e); err != nil { + return err + } + if err = s.Fee.EncodeTo(e); err != nil { + return err + } + if err = s.SeqNum.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeBool(s.TimeBounds != nil); err != nil { + return err + } + if s.TimeBounds != nil { + if err = (*s.TimeBounds).EncodeTo(e); err != nil { + return err + } + } + if err = s.Memo.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Operations))); err != nil { + return err + } + for i := 0; i < len(s.Operations); i++ { + if err = s.Operations[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SourceAccountEd25519.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + nTmp, err = s.Fee.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.SeqNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimeBounds: %s", err) + } + s.TimeBounds = nil + if b { + s.TimeBounds = new(TimeBounds) + nTmp, err = s.TimeBounds.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimeBounds: %s", err) + } + } + nTmp, err = s.Memo.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Memo: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Operation: %s", err) + } + if l > 100 { + return n, fmt.Errorf("decoding Operation: data size (%d) exceeds size limit (100)", l) + } + s.Operations = nil + if l > 0 { + s.Operations = make([]Operation, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Operations[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Operation: %s", err) + } + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionV0Ext: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionV0)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionV0) xdrType() {} + +var _ xdrType = (*TransactionV0)(nil) + +// TransactionV0Envelope is an XDR Struct defines as: +// +// struct TransactionV0Envelope +// { +// TransactionV0 tx; +// /* Each decorated signature is a signature over the SHA256 hash of +// * a TransactionSignaturePayload */ +// DecoratedSignature signatures<20>; +// }; +// +type TransactionV0Envelope struct { + Tx TransactionV0 + Signatures []DecoratedSignature `xdrmaxsize:"20"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionV0Envelope) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Tx.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Signatures))); err != nil { + return err + } + for i := 0; i < len(s.Signatures); i++ { + if err = s.Signatures[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*TransactionV0Envelope)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionV0Envelope) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Tx.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionV0: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + if l > 20 { + return n, fmt.Errorf("decoding DecoratedSignature: data size (%d) exceeds size limit (20)", l) + } + s.Signatures = nil + if l > 0 { + s.Signatures = make([]DecoratedSignature, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Signatures[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionV0Envelope) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionV0Envelope) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionV0Envelope)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionV0Envelope)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionV0Envelope) xdrType() {} + +var _ xdrType = (*TransactionV0Envelope)(nil) + +// TransactionExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TransactionExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionExt +func (u TransactionExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTransactionExt creates a new TransactionExt. +func NewTransactionExt(v int32, value interface{}) (result TransactionExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionExt", u.V) +} + +var _ decoderFrom = (*TransactionExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TransactionExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionExt)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionExt) xdrType() {} + +var _ xdrType = (*TransactionExt)(nil) + +// Transaction is an XDR Struct defines as: +// +// struct Transaction +// { +// // account used to run the transaction +// MuxedAccount sourceAccount; +// +// // the fee the sourceAccount will pay +// uint32 fee; +// +// // sequence number to consume in the account +// SequenceNumber seqNum; +// +// // validity range (inclusive) for the last ledger close time +// TimeBounds* timeBounds; +// +// Memo memo; +// +// Operation operations; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type Transaction struct { + SourceAccount MuxedAccount + Fee Uint32 + SeqNum SequenceNumber + TimeBounds *TimeBounds + Memo Memo + Operations []Operation `xdrmaxsize:"100"` + Ext TransactionExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *Transaction) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SourceAccount.EncodeTo(e); err != nil { + return err + } + if err = s.Fee.EncodeTo(e); err != nil { + return err + } + if err = s.SeqNum.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeBool(s.TimeBounds != nil); err != nil { + return err + } + if s.TimeBounds != nil { + if err = (*s.TimeBounds).EncodeTo(e); err != nil { + return err + } + } + if err = s.Memo.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Operations))); err != nil { + return err + } + for i := 0; i < len(s.Operations); i++ { + if err = s.Operations[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Transaction)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Transaction) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SourceAccount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.Fee.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint32: %s", err) + } + nTmp, err = s.SeqNum.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SequenceNumber: %s", err) + } + var b bool + b, nTmp, err = d.DecodeBool() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimeBounds: %s", err) + } + s.TimeBounds = nil + if b { + s.TimeBounds = new(TimeBounds) + nTmp, err = s.TimeBounds.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TimeBounds: %s", err) + } + } + nTmp, err = s.Memo.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Memo: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Operation: %s", err) + } + if l > 100 { + return n, fmt.Errorf("decoding Operation: data size (%d) exceeds size limit (100)", l) + } + s.Operations = nil + if l > 0 { + s.Operations = make([]Operation, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Operations[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Operation: %s", err) + } + } + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Transaction) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Transaction) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Transaction)(nil) + _ encoding.BinaryUnmarshaler = (*Transaction)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Transaction) xdrType() {} + +var _ xdrType = (*Transaction)(nil) + +// TransactionV1Envelope is an XDR Struct defines as: +// +// struct TransactionV1Envelope +// { +// Transaction tx; +// /* Each decorated signature is a signature over the SHA256 hash of +// * a TransactionSignaturePayload */ +// DecoratedSignature signatures<20>; +// }; +// +type TransactionV1Envelope struct { + Tx Transaction + Signatures []DecoratedSignature `xdrmaxsize:"20"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionV1Envelope) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Tx.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Signatures))); err != nil { + return err + } + for i := 0; i < len(s.Signatures); i++ { + if err = s.Signatures[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*TransactionV1Envelope)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionV1Envelope) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Tx.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Transaction: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + if l > 20 { + return n, fmt.Errorf("decoding DecoratedSignature: data size (%d) exceeds size limit (20)", l) + } + s.Signatures = nil + if l > 0 { + s.Signatures = make([]DecoratedSignature, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Signatures[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionV1Envelope) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionV1Envelope) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionV1Envelope)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionV1Envelope)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionV1Envelope) xdrType() {} + +var _ xdrType = (*TransactionV1Envelope)(nil) + +// FeeBumpTransactionInnerTx is an XDR NestedUnion defines as: +// +// union switch (EnvelopeType type) +// { +// case ENVELOPE_TYPE_TX: +// TransactionV1Envelope v1; +// } +// +type FeeBumpTransactionInnerTx struct { + Type EnvelopeType + V1 *TransactionV1Envelope +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u FeeBumpTransactionInnerTx) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of FeeBumpTransactionInnerTx +func (u FeeBumpTransactionInnerTx) ArmForSwitch(sw int32) (string, bool) { + switch EnvelopeType(sw) { + case EnvelopeTypeEnvelopeTypeTx: + return "V1", true + } + return "-", false +} + +// NewFeeBumpTransactionInnerTx creates a new FeeBumpTransactionInnerTx. +func NewFeeBumpTransactionInnerTx(aType EnvelopeType, value interface{}) (result FeeBumpTransactionInnerTx, err error) { + result.Type = aType + switch EnvelopeType(aType) { + case EnvelopeTypeEnvelopeTypeTx: + tv, ok := value.(TransactionV1Envelope) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionV1Envelope") + return + } + result.V1 = &tv + } + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u FeeBumpTransactionInnerTx) MustV1() TransactionV1Envelope { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val +} + +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u FeeBumpTransactionInnerTx) GetV1() (result TransactionV1Envelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u FeeBumpTransactionInnerTx) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTx: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (EnvelopeType) switch value '%d' is not valid for union FeeBumpTransactionInnerTx", u.Type) +} + +var _ decoderFrom = (*FeeBumpTransactionInnerTx)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *FeeBumpTransactionInnerTx) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EnvelopeType: %s", err) + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTx: + u.V1 = new(TransactionV1Envelope) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionV1Envelope: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union FeeBumpTransactionInnerTx has invalid Type (EnvelopeType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s FeeBumpTransactionInnerTx) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *FeeBumpTransactionInnerTx) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*FeeBumpTransactionInnerTx)(nil) + _ encoding.BinaryUnmarshaler = (*FeeBumpTransactionInnerTx)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s FeeBumpTransactionInnerTx) xdrType() {} + +var _ xdrType = (*FeeBumpTransactionInnerTx)(nil) + +// FeeBumpTransactionExt is an XDR NestedUnion defines as: // -// struct Price +// union switch (int v) +// { +// case 0: +// void; +// } +// +type FeeBumpTransactionExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u FeeBumpTransactionExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of FeeBumpTransactionExt +func (u FeeBumpTransactionExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewFeeBumpTransactionExt creates a new FeeBumpTransactionExt. +func NewFeeBumpTransactionExt(v int32, value interface{}) (result FeeBumpTransactionExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u FeeBumpTransactionExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union FeeBumpTransactionExt", u.V) +} + +var _ decoderFrom = (*FeeBumpTransactionExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *FeeBumpTransactionExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union FeeBumpTransactionExt has invalid V (int32) switch value '%d'", u.V) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s FeeBumpTransactionExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *FeeBumpTransactionExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*FeeBumpTransactionExt)(nil) + _ encoding.BinaryUnmarshaler = (*FeeBumpTransactionExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s FeeBumpTransactionExt) xdrType() {} + +var _ xdrType = (*FeeBumpTransactionExt)(nil) + +// FeeBumpTransaction is an XDR Struct defines as: +// +// struct FeeBumpTransaction // { -// int32 n; // numerator -// int32 d; // denominator +// MuxedAccount feeSource; +// int64 fee; +// union switch (EnvelopeType type) +// { +// case ENVELOPE_TYPE_TX: +// TransactionV1Envelope v1; +// } +// innerTx; +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; // }; // -type Price struct { - N Int32 - D Int32 +type FeeBumpTransaction struct { + FeeSource MuxedAccount + Fee Int64 + InnerTx FeeBumpTransactionInnerTx + Ext FeeBumpTransactionExt } -// ThresholdIndexes is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (s *FeeBumpTransaction) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.FeeSource.EncodeTo(e); err != nil { + return err + } + if err = s.Fee.EncodeTo(e); err != nil { + return err + } + if err = s.InnerTx.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*FeeBumpTransaction)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *FeeBumpTransaction) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.FeeSource.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding MuxedAccount: %s", err) + } + nTmp, err = s.Fee.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.InnerTx.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding FeeBumpTransactionInnerTx: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding FeeBumpTransactionExt: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s FeeBumpTransaction) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *FeeBumpTransaction) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*FeeBumpTransaction)(nil) + _ encoding.BinaryUnmarshaler = (*FeeBumpTransaction)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s FeeBumpTransaction) xdrType() {} + +var _ xdrType = (*FeeBumpTransaction)(nil) + +// FeeBumpTransactionEnvelope is an XDR Struct defines as: // -// enum ThresholdIndexes +// struct FeeBumpTransactionEnvelope // { -// THRESHOLD_MASTER_WEIGHT = 0, -// THRESHOLD_LOW = 1, -// THRESHOLD_MED = 2, -// THRESHOLD_HIGH = 3 +// FeeBumpTransaction tx; +// /* Each decorated signature is a signature over the SHA256 hash of +// * a TransactionSignaturePayload */ +// DecoratedSignature signatures<20>; // }; // -type ThresholdIndexes int32 +type FeeBumpTransactionEnvelope struct { + Tx FeeBumpTransaction + Signatures []DecoratedSignature `xdrmaxsize:"20"` +} -const ( - ThresholdIndexesThresholdMasterWeight ThresholdIndexes = 0 - ThresholdIndexesThresholdLow ThresholdIndexes = 1 - ThresholdIndexesThresholdMed ThresholdIndexes = 2 - ThresholdIndexesThresholdHigh ThresholdIndexes = 3 +// EncodeTo encodes this value using the Encoder. +func (s *FeeBumpTransactionEnvelope) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Tx.EncodeTo(e); err != nil { + return err + } + if _, err = e.EncodeUint(uint32(len(s.Signatures))); err != nil { + return err + } + for i := 0; i < len(s.Signatures); i++ { + if err = s.Signatures[i].EncodeTo(e); err != nil { + return err + } + } + return nil +} + +var _ decoderFrom = (*FeeBumpTransactionEnvelope)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *FeeBumpTransactionEnvelope) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Tx.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding FeeBumpTransaction: %s", err) + } + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + if l > 20 { + return n, fmt.Errorf("decoding DecoratedSignature: data size (%d) exceeds size limit (20)", l) + } + s.Signatures = nil + if l > 0 { + s.Signatures = make([]DecoratedSignature, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Signatures[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding DecoratedSignature: %s", err) + } + } + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s FeeBumpTransactionEnvelope) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *FeeBumpTransactionEnvelope) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*FeeBumpTransactionEnvelope)(nil) + _ encoding.BinaryUnmarshaler = (*FeeBumpTransactionEnvelope)(nil) ) -var thresholdIndexesMap = map[int32]string{ - 0: "ThresholdIndexesThresholdMasterWeight", - 1: "ThresholdIndexesThresholdLow", - 2: "ThresholdIndexesThresholdMed", - 3: "ThresholdIndexesThresholdHigh", +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s FeeBumpTransactionEnvelope) xdrType() {} + +var _ xdrType = (*FeeBumpTransactionEnvelope)(nil) + +// TransactionEnvelope is an XDR Union defines as: +// +// union TransactionEnvelope switch (EnvelopeType type) +// { +// case ENVELOPE_TYPE_TX_V0: +// TransactionV0Envelope v0; +// case ENVELOPE_TYPE_TX: +// TransactionV1Envelope v1; +// case ENVELOPE_TYPE_TX_FEE_BUMP: +// FeeBumpTransactionEnvelope feeBump; +// }; +// +type TransactionEnvelope struct { + Type EnvelopeType + V0 *TransactionV0Envelope + V1 *TransactionV1Envelope + FeeBump *FeeBumpTransactionEnvelope } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ThresholdIndexes -func (e ThresholdIndexes) ValidEnum(v int32) bool { - _, ok := thresholdIndexesMap[v] - return ok +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionEnvelope) SwitchFieldName() string { + return "Type" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionEnvelope +func (u TransactionEnvelope) ArmForSwitch(sw int32) (string, bool) { + switch EnvelopeType(sw) { + case EnvelopeTypeEnvelopeTypeTxV0: + return "V0", true + case EnvelopeTypeEnvelopeTypeTx: + return "V1", true + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return "FeeBump", true + } + return "-", false +} + +// NewTransactionEnvelope creates a new TransactionEnvelope. +func NewTransactionEnvelope(aType EnvelopeType, value interface{}) (result TransactionEnvelope, err error) { + result.Type = aType + switch EnvelopeType(aType) { + case EnvelopeTypeEnvelopeTypeTxV0: + tv, ok := value.(TransactionV0Envelope) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionV0Envelope") + return + } + result.V0 = &tv + case EnvelopeTypeEnvelopeTypeTx: + tv, ok := value.(TransactionV1Envelope) + if !ok { + err = fmt.Errorf("invalid value, must be TransactionV1Envelope") + return + } + result.V1 = &tv + case EnvelopeTypeEnvelopeTypeTxFeeBump: + tv, ok := value.(FeeBumpTransactionEnvelope) + if !ok { + err = fmt.Errorf("invalid value, must be FeeBumpTransactionEnvelope") + return + } + result.FeeBump = &tv + } + return +} + +// MustV0 retrieves the V0 value from the union, +// panicing if the value is not set. +func (u TransactionEnvelope) MustV0() TransactionV0Envelope { + val, ok := u.GetV0() + + if !ok { + panic("arm V0 is not set") + } + + return val +} + +// GetV0 retrieves the V0 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionEnvelope) GetV0() (result TransactionV0Envelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "V0" { + result = *u.V0 + ok = true + } + + return +} + +// MustV1 retrieves the V1 value from the union, +// panicing if the value is not set. +func (u TransactionEnvelope) MustV1() TransactionV1Envelope { + val, ok := u.GetV1() + + if !ok { + panic("arm V1 is not set") + } + + return val } -// String returns the name of `e` -func (e ThresholdIndexes) String() string { - name, _ := thresholdIndexesMap[int32(e)] - return name +// GetV1 retrieves the V1 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionEnvelope) GetV1() (result TransactionV1Envelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "V1" { + result = *u.V1 + ok = true + } + + return } -// LedgerEntryType is an XDR Enum defines as: -// -// enum LedgerEntryType -// { -// ACCOUNT = 0, -// TRUSTLINE = 1, -// OFFER = 2, -// DATA = 3 -// }; -// -type LedgerEntryType int32 +// MustFeeBump retrieves the FeeBump value from the union, +// panicing if the value is not set. +func (u TransactionEnvelope) MustFeeBump() FeeBumpTransactionEnvelope { + val, ok := u.GetFeeBump() -const ( - LedgerEntryTypeAccount LedgerEntryType = 0 - LedgerEntryTypeTrustline LedgerEntryType = 1 - LedgerEntryTypeOffer LedgerEntryType = 2 - LedgerEntryTypeData LedgerEntryType = 3 -) + if !ok { + panic("arm FeeBump is not set") + } -var ledgerEntryTypeMap = map[int32]string{ - 0: "LedgerEntryTypeAccount", - 1: "LedgerEntryTypeTrustline", - 2: "LedgerEntryTypeOffer", - 3: "LedgerEntryTypeData", + return val } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for LedgerEntryType -func (e LedgerEntryType) ValidEnum(v int32) bool { - _, ok := ledgerEntryTypeMap[v] - return ok -} +// GetFeeBump retrieves the FeeBump value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionEnvelope) GetFeeBump() (result FeeBumpTransactionEnvelope, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) -// String returns the name of `e` -func (e LedgerEntryType) String() string { - name, _ := ledgerEntryTypeMap[int32(e)] - return name -} + if armName == "FeeBump" { + result = *u.FeeBump + ok = true + } -// Signer is an XDR Struct defines as: -// -// struct Signer -// { -// AccountID pubKey; -// uint32 weight; // really only need 1byte -// }; -// -type Signer struct { - PubKey AccountId - Weight Uint32 + return } -// AccountFlags is an XDR Enum defines as: -// -// enum AccountFlags -// { // masks for each flag -// -// // Flags set on issuer accounts -// // TrustLines are created with authorized set to "false" requiring -// // the issuer to set it for each TrustLine -// AUTH_REQUIRED_FLAG = 0x1, -// // If set, the authorized flag in TrustLines can be cleared -// // otherwise, authorization cannot be revoked -// AUTH_REVOCABLE_FLAG = 0x2, -// // Once set, causes all AUTH_* flags to be read-only -// AUTH_IMMUTABLE_FLAG = 0x4 -// }; -// -type AccountFlags int32 +// EncodeTo encodes this value using the Encoder. +func (u TransactionEnvelope) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTxV0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + case EnvelopeTypeEnvelopeTypeTx: + if err = (*u.V1).EncodeTo(e); err != nil { + return err + } + return nil + case EnvelopeTypeEnvelopeTypeTxFeeBump: + if err = (*u.FeeBump).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (EnvelopeType) switch value '%d' is not valid for union TransactionEnvelope", u.Type) +} -const ( - AccountFlagsAuthRequiredFlag AccountFlags = 1 - AccountFlagsAuthRevocableFlag AccountFlags = 2 - AccountFlagsAuthImmutableFlag AccountFlags = 4 -) +var _ decoderFrom = (*TransactionEnvelope)(nil) -var accountFlagsMap = map[int32]string{ - 1: "AccountFlagsAuthRequiredFlag", - 2: "AccountFlagsAuthRevocableFlag", - 4: "AccountFlagsAuthImmutableFlag", +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionEnvelope) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EnvelopeType: %s", err) + } + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTxV0: + u.V0 = new(TransactionV0Envelope) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionV0Envelope: %s", err) + } + return n, nil + case EnvelopeTypeEnvelopeTypeTx: + u.V1 = new(TransactionV1Envelope) + nTmp, err = (*u.V1).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionV1Envelope: %s", err) + } + return n, nil + case EnvelopeTypeEnvelopeTypeTxFeeBump: + u.FeeBump = new(FeeBumpTransactionEnvelope) + nTmp, err = (*u.FeeBump).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding FeeBumpTransactionEnvelope: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TransactionEnvelope has invalid Type (EnvelopeType) switch value '%d'", u.Type) } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for AccountFlags -func (e AccountFlags) ValidEnum(v int32) bool { - _, ok := accountFlagsMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionEnvelope) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e AccountFlags) String() string { - name, _ := accountFlagsMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionEnvelope) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// AccountEntryExt is an XDR NestedUnion defines as: +var ( + _ encoding.BinaryMarshaler = (*TransactionEnvelope)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionEnvelope)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionEnvelope) xdrType() {} + +var _ xdrType = (*TransactionEnvelope)(nil) + +// TransactionSignaturePayloadTaggedTransaction is an XDR NestedUnion defines as: // -// union switch (int v) +// union switch (EnvelopeType type) // { -// case 0: -// void; +// // Backwards Compatibility: Use ENVELOPE_TYPE_TX to sign ENVELOPE_TYPE_TX_V0 +// case ENVELOPE_TYPE_TX: +// Transaction tx; +// case ENVELOPE_TYPE_TX_FEE_BUMP: +// FeeBumpTransaction feeBump; // } // -type AccountEntryExt struct { - V int32 +type TransactionSignaturePayloadTaggedTransaction struct { + Type EnvelopeType + Tx *Transaction + FeeBump *FeeBumpTransaction } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u AccountEntryExt) SwitchFieldName() string { - return "V" +func (u TransactionSignaturePayloadTaggedTransaction) SwitchFieldName() string { + return "Type" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of AccountEntryExt -func (u AccountEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// the value for an instance of TransactionSignaturePayloadTaggedTransaction +func (u TransactionSignaturePayloadTaggedTransaction) ArmForSwitch(sw int32) (string, bool) { + switch EnvelopeType(sw) { + case EnvelopeTypeEnvelopeTypeTx: + return "Tx", true + case EnvelopeTypeEnvelopeTypeTxFeeBump: + return "FeeBump", true } return "-", false } -// NewAccountEntryExt creates a new AccountEntryExt. -func NewAccountEntryExt(v int32, value interface{}) (result AccountEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +// NewTransactionSignaturePayloadTaggedTransaction creates a new TransactionSignaturePayloadTaggedTransaction. +func NewTransactionSignaturePayloadTaggedTransaction(aType EnvelopeType, value interface{}) (result TransactionSignaturePayloadTaggedTransaction, err error) { + result.Type = aType + switch EnvelopeType(aType) { + case EnvelopeTypeEnvelopeTypeTx: + tv, ok := value.(Transaction) + if !ok { + err = fmt.Errorf("invalid value, must be Transaction") + return + } + result.Tx = &tv + case EnvelopeTypeEnvelopeTypeTxFeeBump: + tv, ok := value.(FeeBumpTransaction) + if !ok { + err = fmt.Errorf("invalid value, must be FeeBumpTransaction") + return + } + result.FeeBump = &tv } return } -// AccountEntry is an XDR Struct defines as: -// -// struct AccountEntry -// { -// AccountID accountID; // master public key for this account -// int64 balance; // in stroops -// SequenceNumber seqNum; // last sequence number used for this account -// uint32 numSubEntries; // number of sub-entries this account has -// // drives the reserve -// AccountID* inflationDest; // Account to vote for during inflation -// uint32 flags; // see AccountFlags -// -// string32 homeDomain; // can be used for reverse federation and memo lookup -// -// // fields used for signatures -// // thresholds stores unsigned bytes: [weight of master|low|medium|high] -// Thresholds thresholds; -// -// Signer signers<20>; // possible signers for this account -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type AccountEntry struct { - AccountId AccountId - Balance Int64 - SeqNum SequenceNumber - NumSubEntries Uint32 - InflationDest *AccountId - Flags Uint32 - HomeDomain String32 - Thresholds Thresholds - Signers []Signer `xdrmaxsize:"20"` - Ext AccountEntryExt +// MustTx retrieves the Tx value from the union, +// panicing if the value is not set. +func (u TransactionSignaturePayloadTaggedTransaction) MustTx() Transaction { + val, ok := u.GetTx() + + if !ok { + panic("arm Tx is not set") + } + + return val } -// TrustLineFlags is an XDR Enum defines as: -// -// enum TrustLineFlags -// { -// // issuer has authorized account to perform transactions with its credit -// AUTHORIZED_FLAG = 1 -// }; -// -type TrustLineFlags int32 +// GetTx retrieves the Tx value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionSignaturePayloadTaggedTransaction) GetTx() (result Transaction, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) -const ( - TrustLineFlagsAuthorizedFlag TrustLineFlags = 1 -) + if armName == "Tx" { + result = *u.Tx + ok = true + } -var trustLineFlagsMap = map[int32]string{ - 1: "TrustLineFlagsAuthorizedFlag", + return } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for TrustLineFlags -func (e TrustLineFlags) ValidEnum(v int32) bool { - _, ok := trustLineFlagsMap[v] - return ok -} +// MustFeeBump retrieves the FeeBump value from the union, +// panicing if the value is not set. +func (u TransactionSignaturePayloadTaggedTransaction) MustFeeBump() FeeBumpTransaction { + val, ok := u.GetFeeBump() -// String returns the name of `e` -func (e TrustLineFlags) String() string { - name, _ := trustLineFlagsMap[int32(e)] - return name -} + if !ok { + panic("arm FeeBump is not set") + } -// TrustLineEntryExt is an XDR NestedUnion defines as: -// -// union switch (int v) -// { -// case 0: -// void; -// } -// -type TrustLineEntryExt struct { - V int32 + return val } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u TrustLineEntryExt) SwitchFieldName() string { - return "V" +// GetFeeBump retrieves the FeeBump value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionSignaturePayloadTaggedTransaction) GetFeeBump() (result FeeBumpTransaction, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "FeeBump" { + result = *u.FeeBump + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of TrustLineEntryExt -func (u TrustLineEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// EncodeTo encodes this value using the Encoder. +func (u TransactionSignaturePayloadTaggedTransaction) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err } - return "-", false + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTx: + if err = (*u.Tx).EncodeTo(e); err != nil { + return err + } + return nil + case EnvelopeTypeEnvelopeTypeTxFeeBump: + if err = (*u.FeeBump).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (EnvelopeType) switch value '%d' is not valid for union TransactionSignaturePayloadTaggedTransaction", u.Type) } -// NewTrustLineEntryExt creates a new TrustLineEntryExt. -func NewTrustLineEntryExt(v int32, value interface{}) (result TrustLineEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +var _ decoderFrom = (*TransactionSignaturePayloadTaggedTransaction)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionSignaturePayloadTaggedTransaction) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EnvelopeType: %s", err) } - return + switch EnvelopeType(u.Type) { + case EnvelopeTypeEnvelopeTypeTx: + u.Tx = new(Transaction) + nTmp, err = (*u.Tx).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Transaction: %s", err) + } + return n, nil + case EnvelopeTypeEnvelopeTypeTxFeeBump: + u.FeeBump = new(FeeBumpTransaction) + nTmp, err = (*u.FeeBump).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding FeeBumpTransaction: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union TransactionSignaturePayloadTaggedTransaction has invalid Type (EnvelopeType) switch value '%d'", u.Type) } -// TrustLineEntry is an XDR Struct defines as: +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionSignaturePayloadTaggedTransaction) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionSignaturePayloadTaggedTransaction) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionSignaturePayloadTaggedTransaction)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionSignaturePayloadTaggedTransaction)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionSignaturePayloadTaggedTransaction) xdrType() {} + +var _ xdrType = (*TransactionSignaturePayloadTaggedTransaction)(nil) + +// TransactionSignaturePayload is an XDR Struct defines as: // -// struct TrustLineEntry +// struct TransactionSignaturePayload // { -// AccountID accountID; // account this trustline belongs to -// Asset asset; // type of asset (with issuer) -// int64 balance; // how much of this asset the user has. -// // Asset defines the unit for this; -// -// int64 limit; // balance cannot be above this -// uint32 flags; // see TrustLineFlags -// -// // reserved for future use -// union switch (int v) +// Hash networkId; +// union switch (EnvelopeType type) // { -// case 0: -// void; +// // Backwards Compatibility: Use ENVELOPE_TYPE_TX to sign ENVELOPE_TYPE_TX_V0 +// case ENVELOPE_TYPE_TX: +// Transaction tx; +// case ENVELOPE_TYPE_TX_FEE_BUMP: +// FeeBumpTransaction feeBump; // } -// ext; +// taggedTransaction; // }; // -type TrustLineEntry struct { - AccountId AccountId - Asset Asset - Balance Int64 - Limit Int64 - Flags Uint32 - Ext TrustLineEntryExt +type TransactionSignaturePayload struct { + NetworkId Hash + TaggedTransaction TransactionSignaturePayloadTaggedTransaction } -// OfferEntryFlags is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (s *TransactionSignaturePayload) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.NetworkId.EncodeTo(e); err != nil { + return err + } + if err = s.TaggedTransaction.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*TransactionSignaturePayload)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionSignaturePayload) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.NetworkId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.TaggedTransaction.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionSignaturePayloadTaggedTransaction: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionSignaturePayload) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionSignaturePayload) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionSignaturePayload)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionSignaturePayload)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionSignaturePayload) xdrType() {} + +var _ xdrType = (*TransactionSignaturePayload)(nil) + +// ClaimAtomType is an XDR Enum defines as: // -// enum OfferEntryFlags +// enum ClaimAtomType // { -// // issuer has authorized account to perform transactions with its credit -// PASSIVE_FLAG = 1 +// CLAIM_ATOM_TYPE_V0 = 0, +// CLAIM_ATOM_TYPE_ORDER_BOOK = 1, +// CLAIM_ATOM_TYPE_LIQUIDITY_POOL = 2 // }; // -type OfferEntryFlags int32 +type ClaimAtomType int32 const ( - OfferEntryFlagsPassiveFlag OfferEntryFlags = 1 + ClaimAtomTypeClaimAtomTypeV0 ClaimAtomType = 0 + ClaimAtomTypeClaimAtomTypeOrderBook ClaimAtomType = 1 + ClaimAtomTypeClaimAtomTypeLiquidityPool ClaimAtomType = 2 ) -var offerEntryFlagsMap = map[int32]string{ - 1: "OfferEntryFlagsPassiveFlag", +var claimAtomTypeMap = map[int32]string{ + 0: "ClaimAtomTypeClaimAtomTypeV0", + 1: "ClaimAtomTypeClaimAtomTypeOrderBook", + 2: "ClaimAtomTypeClaimAtomTypeLiquidityPool", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for OfferEntryFlags -func (e OfferEntryFlags) ValidEnum(v int32) bool { - _, ok := offerEntryFlagsMap[v] +// the Enum interface for ClaimAtomType +func (e ClaimAtomType) ValidEnum(v int32) bool { + _, ok := claimAtomTypeMap[v] return ok } // String returns the name of `e` -func (e OfferEntryFlags) String() string { - name, _ := offerEntryFlagsMap[int32(e)] +func (e ClaimAtomType) String() string { + name, _ := claimAtomTypeMap[int32(e)] return name } -// OfferEntryExt is an XDR NestedUnion defines as: -// -// union switch (int v) -// { -// case 0: -// void; -// } -// -type OfferEntryExt struct { - V int32 +// EncodeTo encodes this value using the Encoder. +func (e ClaimAtomType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimAtomTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimAtomType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u OfferEntryExt) SwitchFieldName() string { - return "V" -} +var _ decoderFrom = (*ClaimAtomType)(nil) -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of OfferEntryExt -func (u OfferEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimAtomType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimAtomType: %s", err) } - return "-", false + if _, ok := claimAtomTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimAtomType enum value", v) + } + *e = ClaimAtomType(v) + return n, nil } -// NewOfferEntryExt creates a new OfferEntryExt. -func NewOfferEntryExt(v int32, value interface{}) (result OfferEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void - } - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimAtomType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// OfferEntry is an XDR Struct defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimAtomType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimAtomType)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimAtomType)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimAtomType) xdrType() {} + +var _ xdrType = (*ClaimAtomType)(nil) + +// ClaimOfferAtomV0 is an XDR Struct defines as: // -// struct OfferEntry +// struct ClaimOfferAtomV0 // { -// AccountID sellerID; -// uint64 offerID; -// Asset selling; // A -// Asset buying; // B -// int64 amount; // amount of A +// // emitted to identify the offer +// uint256 sellerEd25519; // Account that owns the offer +// int64 offerID; // -// /* price for this offer: -// price of A in terms of B -// price=AmountB/AmountA=priceNumerator/priceDenominator -// price is after fees -// */ -// Price price; -// uint32 flags; // see OfferEntryFlags +// // amount and asset taken from the owner +// Asset assetSold; +// int64 amountSold; // -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; +// // amount and asset sent to the owner +// Asset assetBought; +// int64 amountBought; // }; // -type OfferEntry struct { - SellerId AccountId - OfferId Uint64 - Selling Asset - Buying Asset - Amount Int64 - Price Price - Flags Uint32 - Ext OfferEntryExt +type ClaimOfferAtomV0 struct { + SellerEd25519 Uint256 + OfferId Int64 + AssetSold Asset + AmountSold Int64 + AssetBought Asset + AmountBought Int64 } -// DataEntryExt is an XDR NestedUnion defines as: +// EncodeTo encodes this value using the Encoder. +func (s *ClaimOfferAtomV0) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SellerEd25519.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + if err = s.AssetSold.EncodeTo(e); err != nil { + return err + } + if err = s.AmountSold.EncodeTo(e); err != nil { + return err + } + if err = s.AssetBought.EncodeTo(e); err != nil { + return err + } + if err = s.AmountBought.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimOfferAtomV0)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimOfferAtomV0) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SellerEd25519.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.AssetSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.AssetBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimOfferAtomV0) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimOfferAtomV0) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimOfferAtomV0)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimOfferAtomV0)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimOfferAtomV0) xdrType() {} + +var _ xdrType = (*ClaimOfferAtomV0)(nil) + +// ClaimOfferAtom is an XDR Struct defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// struct ClaimOfferAtom +// { +// // emitted to identify the offer +// AccountID sellerID; // Account that owns the offer +// int64 offerID; // -type DataEntryExt struct { - V int32 +// // amount and asset taken from the owner +// Asset assetSold; +// int64 amountSold; +// +// // amount and asset sent to the owner +// Asset assetBought; +// int64 amountBought; +// }; +// +type ClaimOfferAtom struct { + SellerId AccountId + OfferId Int64 + AssetSold Asset + AmountSold Int64 + AssetBought Asset + AmountBought Int64 } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u DataEntryExt) SwitchFieldName() string { - return "V" +// EncodeTo encodes this value using the Encoder. +func (s *ClaimOfferAtom) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.SellerId.EncodeTo(e); err != nil { + return err + } + if err = s.OfferId.EncodeTo(e); err != nil { + return err + } + if err = s.AssetSold.EncodeTo(e); err != nil { + return err + } + if err = s.AmountSold.EncodeTo(e); err != nil { + return err + } + if err = s.AssetBought.EncodeTo(e); err != nil { + return err + } + if err = s.AmountBought.EncodeTo(e); err != nil { + return err + } + return nil } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of DataEntryExt -func (u DataEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +var _ decoderFrom = (*ClaimOfferAtom)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimOfferAtom) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.SellerId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) } - return "-", false + nTmp, err = s.OfferId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.AssetSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.AssetBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil } -// NewDataEntryExt creates a new DataEntryExt. -func NewDataEntryExt(v int32, value interface{}) (result DataEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void - } - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimOfferAtom) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// DataEntry is an XDR Struct defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimOfferAtom) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimOfferAtom)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimOfferAtom)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimOfferAtom) xdrType() {} + +var _ xdrType = (*ClaimOfferAtom)(nil) + +// ClaimLiquidityAtom is an XDR Struct defines as: // -// struct DataEntry +// struct ClaimLiquidityAtom // { -// AccountID accountID; // account this data belongs to -// string64 dataName; -// DataValue dataValue; +// PoolID liquidityPoolID; // -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; +// // amount and asset taken from the pool +// Asset assetSold; +// int64 amountSold; +// +// // amount and asset sent to the pool +// Asset assetBought; +// int64 amountBought; // }; // -type DataEntry struct { - AccountId AccountId - DataName String64 - DataValue DataValue - Ext DataEntryExt +type ClaimLiquidityAtom struct { + LiquidityPoolId PoolId + AssetSold Asset + AmountSold Int64 + AssetBought Asset + AmountBought Int64 +} + +// EncodeTo encodes this value using the Encoder. +func (s *ClaimLiquidityAtom) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.LiquidityPoolId.EncodeTo(e); err != nil { + return err + } + if err = s.AssetSold.EncodeTo(e); err != nil { + return err + } + if err = s.AmountSold.EncodeTo(e); err != nil { + return err + } + if err = s.AssetBought.EncodeTo(e); err != nil { + return err + } + if err = s.AmountBought.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ClaimLiquidityAtom)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ClaimLiquidityAtom) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.LiquidityPoolId.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PoolId: %s", err) + } + nTmp, err = s.AssetSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountSold.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.AssetBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + nTmp, err = s.AmountBought.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimLiquidityAtom) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimLiquidityAtom) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// LedgerEntryData is an XDR NestedUnion defines as: +var ( + _ encoding.BinaryMarshaler = (*ClaimLiquidityAtom)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimLiquidityAtom)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimLiquidityAtom) xdrType() {} + +var _ xdrType = (*ClaimLiquidityAtom)(nil) + +// ClaimAtom is an XDR Union defines as: // -// union switch (LedgerEntryType type) -// { -// case ACCOUNT: -// AccountEntry account; -// case TRUSTLINE: -// TrustLineEntry trustLine; -// case OFFER: -// OfferEntry offer; -// case DATA: -// DataEntry data; -// } +// union ClaimAtom switch (ClaimAtomType type) +// { +// case CLAIM_ATOM_TYPE_V0: +// ClaimOfferAtomV0 v0; +// case CLAIM_ATOM_TYPE_ORDER_BOOK: +// ClaimOfferAtom orderBook; +// case CLAIM_ATOM_TYPE_LIQUIDITY_POOL: +// ClaimLiquidityAtom liquidityPool; +// }; // -type LedgerEntryData struct { - Type LedgerEntryType - Account *AccountEntry - TrustLine *TrustLineEntry - Offer *OfferEntry - Data *DataEntry +type ClaimAtom struct { + Type ClaimAtomType + V0 *ClaimOfferAtomV0 + OrderBook *ClaimOfferAtom + LiquidityPool *ClaimLiquidityAtom } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u LedgerEntryData) SwitchFieldName() string { +func (u ClaimAtom) SwitchFieldName() string { return "Type" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerEntryData -func (u LedgerEntryData) ArmForSwitch(sw int32) (string, bool) { - switch LedgerEntryType(sw) { - case LedgerEntryTypeAccount: - return "Account", true - case LedgerEntryTypeTrustline: - return "TrustLine", true - case LedgerEntryTypeOffer: - return "Offer", true - case LedgerEntryTypeData: - return "Data", true +// the value for an instance of ClaimAtom +func (u ClaimAtom) ArmForSwitch(sw int32) (string, bool) { + switch ClaimAtomType(sw) { + case ClaimAtomTypeClaimAtomTypeV0: + return "V0", true + case ClaimAtomTypeClaimAtomTypeOrderBook: + return "OrderBook", true + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + return "LiquidityPool", true } return "-", false } -// NewLedgerEntryData creates a new LedgerEntryData. -func NewLedgerEntryData(aType LedgerEntryType, value interface{}) (result LedgerEntryData, err error) { +// NewClaimAtom creates a new ClaimAtom. +func NewClaimAtom(aType ClaimAtomType, value interface{}) (result ClaimAtom, err error) { result.Type = aType - switch LedgerEntryType(aType) { - case LedgerEntryTypeAccount: - tv, ok := value.(AccountEntry) - if !ok { - err = fmt.Errorf("invalid value, must be AccountEntry") - return - } - result.Account = &tv - case LedgerEntryTypeTrustline: - tv, ok := value.(TrustLineEntry) + switch ClaimAtomType(aType) { + case ClaimAtomTypeClaimAtomTypeV0: + tv, ok := value.(ClaimOfferAtomV0) if !ok { - err = fmt.Errorf("invalid value, must be TrustLineEntry") + err = fmt.Errorf("invalid value, must be ClaimOfferAtomV0") return } - result.TrustLine = &tv - case LedgerEntryTypeOffer: - tv, ok := value.(OfferEntry) + result.V0 = &tv + case ClaimAtomTypeClaimAtomTypeOrderBook: + tv, ok := value.(ClaimOfferAtom) if !ok { - err = fmt.Errorf("invalid value, must be OfferEntry") + err = fmt.Errorf("invalid value, must be ClaimOfferAtom") return } - result.Offer = &tv - case LedgerEntryTypeData: - tv, ok := value.(DataEntry) + result.OrderBook = &tv + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + tv, ok := value.(ClaimLiquidityAtom) if !ok { - err = fmt.Errorf("invalid value, must be DataEntry") + err = fmt.Errorf("invalid value, must be ClaimLiquidityAtom") return } - result.Data = &tv + result.LiquidityPool = &tv } return } -// MustAccount retrieves the Account value from the union, +// MustV0 retrieves the V0 value from the union, // panicing if the value is not set. -func (u LedgerEntryData) MustAccount() AccountEntry { - val, ok := u.GetAccount() +func (u ClaimAtom) MustV0() ClaimOfferAtomV0 { + val, ok := u.GetV0() if !ok { - panic("arm Account is not set") + panic("arm V0 is not set") } return val } -// GetAccount retrieves the Account value from the union, +// GetV0 retrieves the V0 value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryData) GetAccount() (result AccountEntry, ok bool) { +func (u ClaimAtom) GetV0() (result ClaimOfferAtomV0, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Account" { - result = *u.Account + if armName == "V0" { + result = *u.V0 ok = true } return } -// MustTrustLine retrieves the TrustLine value from the union, +// MustOrderBook retrieves the OrderBook value from the union, // panicing if the value is not set. -func (u LedgerEntryData) MustTrustLine() TrustLineEntry { - val, ok := u.GetTrustLine() +func (u ClaimAtom) MustOrderBook() ClaimOfferAtom { + val, ok := u.GetOrderBook() if !ok { - panic("arm TrustLine is not set") + panic("arm OrderBook is not set") } return val } -// GetTrustLine retrieves the TrustLine value from the union, +// GetOrderBook retrieves the OrderBook value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryData) GetTrustLine() (result TrustLineEntry, ok bool) { +func (u ClaimAtom) GetOrderBook() (result ClaimOfferAtom, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "TrustLine" { - result = *u.TrustLine + if armName == "OrderBook" { + result = *u.OrderBook ok = true } return } -// MustOffer retrieves the Offer value from the union, +// MustLiquidityPool retrieves the LiquidityPool value from the union, // panicing if the value is not set. -func (u LedgerEntryData) MustOffer() OfferEntry { - val, ok := u.GetOffer() +func (u ClaimAtom) MustLiquidityPool() ClaimLiquidityAtom { + val, ok := u.GetLiquidityPool() if !ok { - panic("arm Offer is not set") + panic("arm LiquidityPool is not set") } return val } -// GetOffer retrieves the Offer value from the union, +// GetLiquidityPool retrieves the LiquidityPool value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryData) GetOffer() (result OfferEntry, ok bool) { +func (u ClaimAtom) GetLiquidityPool() (result ClaimLiquidityAtom, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Offer" { - result = *u.Offer + if armName == "LiquidityPool" { + result = *u.LiquidityPool ok = true } return } -// MustData retrieves the Data value from the union, -// panicing if the value is not set. -func (u LedgerEntryData) MustData() DataEntry { - val, ok := u.GetData() +// EncodeTo encodes this value using the Encoder. +func (u ClaimAtom) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch ClaimAtomType(u.Type) { + case ClaimAtomTypeClaimAtomTypeV0: + if err = (*u.V0).EncodeTo(e); err != nil { + return err + } + return nil + case ClaimAtomTypeClaimAtomTypeOrderBook: + if err = (*u.OrderBook).EncodeTo(e); err != nil { + return err + } + return nil + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + if err = (*u.LiquidityPool).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (ClaimAtomType) switch value '%d' is not valid for union ClaimAtom", u.Type) +} - if !ok { - panic("arm Data is not set") +var _ decoderFrom = (*ClaimAtom)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimAtom) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtomType: %s", err) + } + switch ClaimAtomType(u.Type) { + case ClaimAtomTypeClaimAtomTypeV0: + u.V0 = new(ClaimOfferAtomV0) + nTmp, err = (*u.V0).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimOfferAtomV0: %s", err) + } + return n, nil + case ClaimAtomTypeClaimAtomTypeOrderBook: + u.OrderBook = new(ClaimOfferAtom) + nTmp, err = (*u.OrderBook).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimOfferAtom: %s", err) + } + return n, nil + case ClaimAtomTypeClaimAtomTypeLiquidityPool: + u.LiquidityPool = new(ClaimLiquidityAtom) + nTmp, err = (*u.LiquidityPool).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimLiquidityAtom: %s", err) + } + return n, nil } + return n, fmt.Errorf("union ClaimAtom has invalid Type (ClaimAtomType) switch value '%d'", u.Type) +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimAtom) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetData retrieves the Data value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryData) GetData() (result DataEntry, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimAtom) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Data" { - result = *u.Data - ok = true +var ( + _ encoding.BinaryMarshaler = (*ClaimAtom)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimAtom)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimAtom) xdrType() {} + +var _ xdrType = (*ClaimAtom)(nil) + +// CreateAccountResultCode is an XDR Enum defines as: +// +// enum CreateAccountResultCode +// { +// // codes considered as "success" for the operation +// CREATE_ACCOUNT_SUCCESS = 0, // account was created +// +// // codes considered as "failure" for the operation +// CREATE_ACCOUNT_MALFORMED = -1, // invalid destination +// CREATE_ACCOUNT_UNDERFUNDED = -2, // not enough funds in source account +// CREATE_ACCOUNT_LOW_RESERVE = +// -3, // would create an account below the min reserve +// CREATE_ACCOUNT_ALREADY_EXIST = -4 // account already exists +// }; +// +type CreateAccountResultCode int32 + +const ( + CreateAccountResultCodeCreateAccountSuccess CreateAccountResultCode = 0 + CreateAccountResultCodeCreateAccountMalformed CreateAccountResultCode = -1 + CreateAccountResultCodeCreateAccountUnderfunded CreateAccountResultCode = -2 + CreateAccountResultCodeCreateAccountLowReserve CreateAccountResultCode = -3 + CreateAccountResultCodeCreateAccountAlreadyExist CreateAccountResultCode = -4 +) + +var createAccountResultCodeMap = map[int32]string{ + 0: "CreateAccountResultCodeCreateAccountSuccess", + -1: "CreateAccountResultCodeCreateAccountMalformed", + -2: "CreateAccountResultCodeCreateAccountUnderfunded", + -3: "CreateAccountResultCodeCreateAccountLowReserve", + -4: "CreateAccountResultCodeCreateAccountAlreadyExist", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for CreateAccountResultCode +func (e CreateAccountResultCode) ValidEnum(v int32) bool { + _, ok := createAccountResultCodeMap[v] + return ok +} + +// String returns the name of `e` +func (e CreateAccountResultCode) String() string { + name, _ := createAccountResultCodeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e CreateAccountResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := createAccountResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid CreateAccountResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*CreateAccountResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *CreateAccountResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding CreateAccountResultCode: %s", err) + } + if _, ok := createAccountResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid CreateAccountResultCode enum value", v) } + *e = CreateAccountResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateAccountResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// LedgerEntryExt is an XDR NestedUnion defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateAccountResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateAccountResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*CreateAccountResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateAccountResultCode) xdrType() {} + +var _ xdrType = (*CreateAccountResultCode)(nil) + +// CreateAccountResult is an XDR Union defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// union CreateAccountResult switch (CreateAccountResultCode code) +// { +// case CREATE_ACCOUNT_SUCCESS: +// void; +// default: +// void; +// }; // -type LedgerEntryExt struct { - V int32 +type CreateAccountResult struct { + Code CreateAccountResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u LedgerEntryExt) SwitchFieldName() string { - return "V" +func (u CreateAccountResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerEntryExt -func (u LedgerEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: +// the value for an instance of CreateAccountResult +func (u CreateAccountResult) ArmForSwitch(sw int32) (string, bool) { + switch CreateAccountResultCode(sw) { + case CreateAccountResultCodeCreateAccountSuccess: + return "", true + default: return "", true } - return "-", false } -// NewLedgerEntryExt creates a new LedgerEntryExt. -func NewLedgerEntryExt(v int32, value interface{}) (result LedgerEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: +// NewCreateAccountResult creates a new CreateAccountResult. +func NewCreateAccountResult(code CreateAccountResultCode, value interface{}) (result CreateAccountResult, err error) { + result.Code = code + switch CreateAccountResultCode(code) { + case CreateAccountResultCodeCreateAccountSuccess: + // void + default: // void } return } -// LedgerEntry is an XDR Struct defines as: -// -// struct LedgerEntry -// { -// uint32 lastModifiedLedgerSeq; // ledger the LedgerEntry was last changed -// -// union switch (LedgerEntryType type) -// { -// case ACCOUNT: -// AccountEntry account; -// case TRUSTLINE: -// TrustLineEntry trustLine; -// case OFFER: -// OfferEntry offer; -// case DATA: -// DataEntry data; -// } -// data; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type LedgerEntry struct { - LastModifiedLedgerSeq Uint32 - Data LedgerEntryData - Ext LedgerEntryExt +// EncodeTo encodes this value using the Encoder. +func (u CreateAccountResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch CreateAccountResultCode(u.Code) { + case CreateAccountResultCodeCreateAccountSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*CreateAccountResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *CreateAccountResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateAccountResultCode: %s", err) + } + switch CreateAccountResultCode(u.Code) { + case CreateAccountResultCodeCreateAccountSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } } -// EnvelopeType is an XDR Enum defines as: +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateAccountResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateAccountResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateAccountResult)(nil) + _ encoding.BinaryUnmarshaler = (*CreateAccountResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateAccountResult) xdrType() {} + +var _ xdrType = (*CreateAccountResult)(nil) + +// PaymentResultCode is an XDR Enum defines as: // -// enum EnvelopeType +// enum PaymentResultCode // { -// ENVELOPE_TYPE_SCP = 1, -// ENVELOPE_TYPE_TX = 2, -// ENVELOPE_TYPE_AUTH = 3 +// // codes considered as "success" for the operation +// PAYMENT_SUCCESS = 0, // payment successfully completed +// +// // codes considered as "failure" for the operation +// PAYMENT_MALFORMED = -1, // bad input +// PAYMENT_UNDERFUNDED = -2, // not enough funds in source account +// PAYMENT_SRC_NO_TRUST = -3, // no trust line on source account +// PAYMENT_SRC_NOT_AUTHORIZED = -4, // source not authorized to transfer +// PAYMENT_NO_DESTINATION = -5, // destination account does not exist +// PAYMENT_NO_TRUST = -6, // destination missing a trust line for asset +// PAYMENT_NOT_AUTHORIZED = -7, // destination not authorized to hold asset +// PAYMENT_LINE_FULL = -8, // destination would go above their limit +// PAYMENT_NO_ISSUER = -9 // missing issuer on asset // }; // -type EnvelopeType int32 +type PaymentResultCode int32 const ( - EnvelopeTypeEnvelopeTypeScp EnvelopeType = 1 - EnvelopeTypeEnvelopeTypeTx EnvelopeType = 2 - EnvelopeTypeEnvelopeTypeAuth EnvelopeType = 3 + PaymentResultCodePaymentSuccess PaymentResultCode = 0 + PaymentResultCodePaymentMalformed PaymentResultCode = -1 + PaymentResultCodePaymentUnderfunded PaymentResultCode = -2 + PaymentResultCodePaymentSrcNoTrust PaymentResultCode = -3 + PaymentResultCodePaymentSrcNotAuthorized PaymentResultCode = -4 + PaymentResultCodePaymentNoDestination PaymentResultCode = -5 + PaymentResultCodePaymentNoTrust PaymentResultCode = -6 + PaymentResultCodePaymentNotAuthorized PaymentResultCode = -7 + PaymentResultCodePaymentLineFull PaymentResultCode = -8 + PaymentResultCodePaymentNoIssuer PaymentResultCode = -9 ) -var envelopeTypeMap = map[int32]string{ - 1: "EnvelopeTypeEnvelopeTypeScp", - 2: "EnvelopeTypeEnvelopeTypeTx", - 3: "EnvelopeTypeEnvelopeTypeAuth", +var paymentResultCodeMap = map[int32]string{ + 0: "PaymentResultCodePaymentSuccess", + -1: "PaymentResultCodePaymentMalformed", + -2: "PaymentResultCodePaymentUnderfunded", + -3: "PaymentResultCodePaymentSrcNoTrust", + -4: "PaymentResultCodePaymentSrcNotAuthorized", + -5: "PaymentResultCodePaymentNoDestination", + -6: "PaymentResultCodePaymentNoTrust", + -7: "PaymentResultCodePaymentNotAuthorized", + -8: "PaymentResultCodePaymentLineFull", + -9: "PaymentResultCodePaymentNoIssuer", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for EnvelopeType -func (e EnvelopeType) ValidEnum(v int32) bool { - _, ok := envelopeTypeMap[v] +// the Enum interface for PaymentResultCode +func (e PaymentResultCode) ValidEnum(v int32) bool { + _, ok := paymentResultCodeMap[v] return ok } // String returns the name of `e` -func (e EnvelopeType) String() string { - name, _ := envelopeTypeMap[int32(e)] +func (e PaymentResultCode) String() string { + name, _ := paymentResultCodeMap[int32(e)] return name } -// DecoratedSignature is an XDR Struct defines as: -// -// struct DecoratedSignature -// { -// SignatureHint hint; // last 4 bytes of the public key, used as a hint -// Signature signature; // actual signature -// }; -// -type DecoratedSignature struct { - Hint SignatureHint - Signature Signature +// EncodeTo encodes this value using the Encoder. +func (e PaymentResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := paymentResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid PaymentResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// OperationType is an XDR Enum defines as: -// -// enum OperationType -// { -// CREATE_ACCOUNT = 0, -// PAYMENT = 1, -// PATH_PAYMENT = 2, -// MANAGE_OFFER = 3, -// CREATE_PASSIVE_OFFER = 4, -// SET_OPTIONS = 5, -// CHANGE_TRUST = 6, -// ALLOW_TRUST = 7, -// ACCOUNT_MERGE = 8, -// INFLATION = 9, -// MANAGE_DATA = 10 -// }; -// -type OperationType int32 - -const ( - OperationTypeCreateAccount OperationType = 0 - OperationTypePayment OperationType = 1 - OperationTypePathPayment OperationType = 2 - OperationTypeManageOffer OperationType = 3 - OperationTypeCreatePassiveOffer OperationType = 4 - OperationTypeSetOptions OperationType = 5 - OperationTypeChangeTrust OperationType = 6 - OperationTypeAllowTrust OperationType = 7 - OperationTypeAccountMerge OperationType = 8 - OperationTypeInflation OperationType = 9 - OperationTypeManageData OperationType = 10 -) +var _ decoderFrom = (*PaymentResultCode)(nil) -var operationTypeMap = map[int32]string{ - 0: "OperationTypeCreateAccount", - 1: "OperationTypePayment", - 2: "OperationTypePathPayment", - 3: "OperationTypeManageOffer", - 4: "OperationTypeCreatePassiveOffer", - 5: "OperationTypeSetOptions", - 6: "OperationTypeChangeTrust", - 7: "OperationTypeAllowTrust", - 8: "OperationTypeAccountMerge", - 9: "OperationTypeInflation", - 10: "OperationTypeManageData", +// DecodeFrom decodes this value using the Decoder. +func (e *PaymentResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding PaymentResultCode: %s", err) + } + if _, ok := paymentResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid PaymentResultCode enum value", v) + } + *e = PaymentResultCode(v) + return n, nil } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for OperationType -func (e OperationType) ValidEnum(v int32) bool { - _, ok := operationTypeMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PaymentResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e OperationType) String() string { - name, _ := operationTypeMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PaymentResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// CreateAccountOp is an XDR Struct defines as: +var ( + _ encoding.BinaryMarshaler = (*PaymentResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*PaymentResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PaymentResultCode) xdrType() {} + +var _ xdrType = (*PaymentResultCode)(nil) + +// PaymentResult is an XDR Union defines as: // -// struct CreateAccountOp +// union PaymentResult switch (PaymentResultCode code) // { -// AccountID destination; // account to create -// int64 startingBalance; // amount they end up with +// case PAYMENT_SUCCESS: +// void; +// default: +// void; // }; // -type CreateAccountOp struct { - Destination AccountId - StartingBalance Int64 +type PaymentResult struct { + Code PaymentResultCode } -// PaymentOp is an XDR Struct defines as: -// -// struct PaymentOp -// { -// AccountID destination; // recipient of the payment -// Asset asset; // what they end up with -// int64 amount; // amount they end up with -// }; -// -type PaymentOp struct { - Destination AccountId - Asset Asset - Amount Int64 +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u PaymentResult) SwitchFieldName() string { + return "Code" } -// PathPaymentOp is an XDR Struct defines as: -// -// struct PathPaymentOp -// { -// Asset sendAsset; // asset we pay with -// int64 sendMax; // the maximum amount of sendAsset to -// // send (excluding fees). -// // The operation will fail if can't be met -// -// AccountID destination; // recipient of the payment -// Asset destAsset; // what they end up with -// int64 destAmount; // amount they end up with -// -// Asset path<5>; // additional hops it must go through to get there -// }; -// -type PathPaymentOp struct { - SendAsset Asset - SendMax Int64 - Destination AccountId - DestAsset Asset - DestAmount Int64 - Path []Asset `xdrmaxsize:"5"` +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PaymentResult +func (u PaymentResult) ArmForSwitch(sw int32) (string, bool) { + switch PaymentResultCode(sw) { + case PaymentResultCodePaymentSuccess: + return "", true + default: + return "", true + } } -// ManageOfferOp is an XDR Struct defines as: -// -// struct ManageOfferOp -// { -// Asset selling; -// Asset buying; -// int64 amount; // amount being sold. if set to 0, delete the offer -// Price price; // price of thing being sold in terms of what you are buying -// -// // 0=create a new offer, otherwise edit an existing offer -// uint64 offerID; -// }; -// -type ManageOfferOp struct { - Selling Asset - Buying Asset - Amount Int64 - Price Price - OfferId Uint64 +// NewPaymentResult creates a new PaymentResult. +func NewPaymentResult(code PaymentResultCode, value interface{}) (result PaymentResult, err error) { + result.Code = code + switch PaymentResultCode(code) { + case PaymentResultCodePaymentSuccess: + // void + default: + // void + } + return } -// CreatePassiveOfferOp is an XDR Struct defines as: -// -// struct CreatePassiveOfferOp -// { -// Asset selling; // A -// Asset buying; // B -// int64 amount; // amount taker gets. if set to 0, delete the offer -// Price price; // cost of A in terms of B -// }; -// -type CreatePassiveOfferOp struct { - Selling Asset - Buying Asset - Amount Int64 - Price Price +// EncodeTo encodes this value using the Encoder. +func (u PaymentResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch PaymentResultCode(u.Code) { + case PaymentResultCodePaymentSuccess: + // Void + return nil + default: + // Void + return nil + } } -// SetOptionsOp is an XDR Struct defines as: -// -// struct SetOptionsOp -// { -// AccountID* inflationDest; // sets the inflation destination -// -// uint32* clearFlags; // which flags to clear -// uint32* setFlags; // which flags to set -// -// // account threshold manipulation -// uint32* masterWeight; // weight of the master account -// uint32* lowThreshold; -// uint32* medThreshold; -// uint32* highThreshold; -// -// string32* homeDomain; // sets the home domain -// -// // Add, update or remove a signer for the account -// // signer is deleted if the weight is 0 -// Signer* signer; -// }; -// -type SetOptionsOp struct { - InflationDest *AccountId - ClearFlags *Uint32 - SetFlags *Uint32 - MasterWeight *Uint32 - LowThreshold *Uint32 - MedThreshold *Uint32 - HighThreshold *Uint32 - HomeDomain *String32 - Signer *Signer +var _ decoderFrom = (*PaymentResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *PaymentResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PaymentResultCode: %s", err) + } + switch PaymentResultCode(u.Code) { + case PaymentResultCodePaymentSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PaymentResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PaymentResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// ChangeTrustOp is an XDR Struct defines as: +var ( + _ encoding.BinaryMarshaler = (*PaymentResult)(nil) + _ encoding.BinaryUnmarshaler = (*PaymentResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PaymentResult) xdrType() {} + +var _ xdrType = (*PaymentResult)(nil) + +// PathPaymentStrictReceiveResultCode is an XDR Enum defines as: // -// struct ChangeTrustOp +// enum PathPaymentStrictReceiveResultCode // { -// Asset line; +// // codes considered as "success" for the operation +// PATH_PAYMENT_STRICT_RECEIVE_SUCCESS = 0, // success // -// // if limit is set to 0, deletes the trust line -// int64 limit; +// // codes considered as "failure" for the operation +// PATH_PAYMENT_STRICT_RECEIVE_MALFORMED = -1, // bad input +// PATH_PAYMENT_STRICT_RECEIVE_UNDERFUNDED = +// -2, // not enough funds in source account +// PATH_PAYMENT_STRICT_RECEIVE_SRC_NO_TRUST = +// -3, // no trust line on source account +// PATH_PAYMENT_STRICT_RECEIVE_SRC_NOT_AUTHORIZED = +// -4, // source not authorized to transfer +// PATH_PAYMENT_STRICT_RECEIVE_NO_DESTINATION = +// -5, // destination account does not exist +// PATH_PAYMENT_STRICT_RECEIVE_NO_TRUST = +// -6, // dest missing a trust line for asset +// PATH_PAYMENT_STRICT_RECEIVE_NOT_AUTHORIZED = +// -7, // dest not authorized to hold asset +// PATH_PAYMENT_STRICT_RECEIVE_LINE_FULL = +// -8, // dest would go above their limit +// PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER = -9, // missing issuer on one asset +// PATH_PAYMENT_STRICT_RECEIVE_TOO_FEW_OFFERS = +// -10, // not enough offers to satisfy path +// PATH_PAYMENT_STRICT_RECEIVE_OFFER_CROSS_SELF = +// -11, // would cross one of its own offers +// PATH_PAYMENT_STRICT_RECEIVE_OVER_SENDMAX = -12 // could not satisfy sendmax // }; // -type ChangeTrustOp struct { - Line Asset - Limit Int64 -} +type PathPaymentStrictReceiveResultCode int32 -// AllowTrustOpAsset is an XDR NestedUnion defines as: -// -// union switch (AssetType type) -// { -// // ASSET_TYPE_NATIVE is not allowed -// case ASSET_TYPE_CREDIT_ALPHANUM4: -// opaque assetCode4[4]; -// -// case ASSET_TYPE_CREDIT_ALPHANUM12: -// opaque assetCode12[12]; -// -// // add other asset types here in the future -// } -// -type AllowTrustOpAsset struct { - Type AssetType - AssetCode4 *[4]byte - AssetCode12 *[12]byte +const ( + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess PathPaymentStrictReceiveResultCode = 0 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveMalformed PathPaymentStrictReceiveResultCode = -1 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveUnderfunded PathPaymentStrictReceiveResultCode = -2 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNoTrust PathPaymentStrictReceiveResultCode = -3 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNotAuthorized PathPaymentStrictReceiveResultCode = -4 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoDestination PathPaymentStrictReceiveResultCode = -5 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoTrust PathPaymentStrictReceiveResultCode = -6 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNotAuthorized PathPaymentStrictReceiveResultCode = -7 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveLineFull PathPaymentStrictReceiveResultCode = -8 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer PathPaymentStrictReceiveResultCode = -9 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveTooFewOffers PathPaymentStrictReceiveResultCode = -10 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOfferCrossSelf PathPaymentStrictReceiveResultCode = -11 + PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOverSendmax PathPaymentStrictReceiveResultCode = -12 +) + +var pathPaymentStrictReceiveResultCodeMap = map[int32]string{ + 0: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess", + -1: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveMalformed", + -2: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveUnderfunded", + -3: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNoTrust", + -4: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSrcNotAuthorized", + -5: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoDestination", + -6: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoTrust", + -7: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNotAuthorized", + -8: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveLineFull", + -9: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer", + -10: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveTooFewOffers", + -11: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOfferCrossSelf", + -12: "PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveOverSendmax", } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u AllowTrustOpAsset) SwitchFieldName() string { - return "Type" +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for PathPaymentStrictReceiveResultCode +func (e PathPaymentStrictReceiveResultCode) ValidEnum(v int32) bool { + _, ok := pathPaymentStrictReceiveResultCodeMap[v] + return ok } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of AllowTrustOpAsset -func (u AllowTrustOpAsset) ArmForSwitch(sw int32) (string, bool) { - switch AssetType(sw) { - case AssetTypeAssetTypeCreditAlphanum4: - return "AssetCode4", true - case AssetTypeAssetTypeCreditAlphanum12: - return "AssetCode12", true - } - return "-", false +// String returns the name of `e` +func (e PathPaymentStrictReceiveResultCode) String() string { + name, _ := pathPaymentStrictReceiveResultCodeMap[int32(e)] + return name } -// NewAllowTrustOpAsset creates a new AllowTrustOpAsset. -func NewAllowTrustOpAsset(aType AssetType, value interface{}) (result AllowTrustOpAsset, err error) { - result.Type = aType - switch AssetType(aType) { - case AssetTypeAssetTypeCreditAlphanum4: - tv, ok := value.([4]byte) - if !ok { - err = fmt.Errorf("invalid value, must be [4]byte") - return - } - result.AssetCode4 = &tv - case AssetTypeAssetTypeCreditAlphanum12: - tv, ok := value.([12]byte) - if !ok { - err = fmt.Errorf("invalid value, must be [12]byte") - return - } - result.AssetCode12 = &tv +// EncodeTo encodes this value using the Encoder. +func (e PathPaymentStrictReceiveResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := pathPaymentStrictReceiveResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid PathPaymentStrictReceiveResultCode enum value", e) } - return + _, err := enc.EncodeInt(int32(e)) + return err } -// MustAssetCode4 retrieves the AssetCode4 value from the union, -// panicing if the value is not set. -func (u AllowTrustOpAsset) MustAssetCode4() [4]byte { - val, ok := u.GetAssetCode4() +var _ decoderFrom = (*PathPaymentStrictReceiveResultCode)(nil) - if !ok { - panic("arm AssetCode4 is not set") +// DecodeFrom decodes this value using the Decoder. +func (e *PathPaymentStrictReceiveResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictReceiveResultCode: %s", err) + } + if _, ok := pathPaymentStrictReceiveResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid PathPaymentStrictReceiveResultCode enum value", v) } + *e = PathPaymentStrictReceiveResultCode(v) + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictReceiveResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetAssetCode4 retrieves the AssetCode4 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u AllowTrustOpAsset) GetAssetCode4() (result [4]byte, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictReceiveResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "AssetCode4" { - result = *u.AssetCode4 - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictReceiveResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictReceiveResultCode)(nil) +) - return -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictReceiveResultCode) xdrType() {} -// MustAssetCode12 retrieves the AssetCode12 value from the union, -// panicing if the value is not set. -func (u AllowTrustOpAsset) MustAssetCode12() [12]byte { - val, ok := u.GetAssetCode12() +var _ xdrType = (*PathPaymentStrictReceiveResultCode)(nil) - if !ok { - panic("arm AssetCode12 is not set") - } +// SimplePaymentResult is an XDR Struct defines as: +// +// struct SimplePaymentResult +// { +// AccountID destination; +// Asset asset; +// int64 amount; +// }; +// +type SimplePaymentResult struct { + Destination AccountId + Asset Asset + Amount Int64 +} - return val +// EncodeTo encodes this value using the Encoder. +func (s *SimplePaymentResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.Asset.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + return nil } -// GetAssetCode12 retrieves the AssetCode12 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u AllowTrustOpAsset) GetAssetCode12() (result [12]byte, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*SimplePaymentResult)(nil) - if armName == "AssetCode12" { - result = *u.AssetCode12 - ok = true +// DecodeFrom decodes this value using the Decoder. +func (s *SimplePaymentResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Asset.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SimplePaymentResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// AllowTrustOp is an XDR Struct defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SimplePaymentResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SimplePaymentResult)(nil) + _ encoding.BinaryUnmarshaler = (*SimplePaymentResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SimplePaymentResult) xdrType() {} + +var _ xdrType = (*SimplePaymentResult)(nil) + +// PathPaymentStrictReceiveResultSuccess is an XDR NestedStruct defines as: // -// struct AllowTrustOp -// { -// AccountID trustor; -// union switch (AssetType type) +// struct // { -// // ASSET_TYPE_NATIVE is not allowed -// case ASSET_TYPE_CREDIT_ALPHANUM4: -// opaque assetCode4[4]; -// -// case ASSET_TYPE_CREDIT_ALPHANUM12: -// opaque assetCode12[12]; -// -// // add other asset types here in the future +// ClaimAtom offers<>; +// SimplePaymentResult last; // } -// asset; -// -// bool authorize; -// }; // -type AllowTrustOp struct { - Trustor AccountId - Asset AllowTrustOpAsset - Authorize bool +type PathPaymentStrictReceiveResultSuccess struct { + Offers []ClaimAtom + Last SimplePaymentResult } -// ManageDataOp is an XDR Struct defines as: -// -// struct ManageDataOp -// { -// string64 dataName; -// DataValue* dataValue; // set to null to clear -// }; -// -type ManageDataOp struct { - DataName String64 - DataValue *DataValue +// EncodeTo encodes this value using the Encoder. +func (s *PathPaymentStrictReceiveResultSuccess) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s.Offers))); err != nil { + return err + } + for i := 0; i < len(s.Offers); i++ { + if err = s.Offers[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Last.EncodeTo(e); err != nil { + return err + } + return nil } -// OperationBody is an XDR NestedUnion defines as: +var _ decoderFrom = (*PathPaymentStrictReceiveResultSuccess)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PathPaymentStrictReceiveResultSuccess) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) + } + s.Offers = nil + if l > 0 { + s.Offers = make([]ClaimAtom, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Offers[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) + } + } + } + nTmp, err = s.Last.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SimplePaymentResult: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictReceiveResultSuccess) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictReceiveResultSuccess) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictReceiveResultSuccess)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictReceiveResultSuccess)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictReceiveResultSuccess) xdrType() {} + +var _ xdrType = (*PathPaymentStrictReceiveResultSuccess)(nil) + +// PathPaymentStrictReceiveResult is an XDR Union defines as: // -// union switch (OperationType type) +// union PathPaymentStrictReceiveResult switch ( +// PathPaymentStrictReceiveResultCode code) +// { +// case PATH_PAYMENT_STRICT_RECEIVE_SUCCESS: +// struct // { -// case CREATE_ACCOUNT: -// CreateAccountOp createAccountOp; -// case PAYMENT: -// PaymentOp paymentOp; -// case PATH_PAYMENT: -// PathPaymentOp pathPaymentOp; -// case MANAGE_OFFER: -// ManageOfferOp manageOfferOp; -// case CREATE_PASSIVE_OFFER: -// CreatePassiveOfferOp createPassiveOfferOp; -// case SET_OPTIONS: -// SetOptionsOp setOptionsOp; -// case CHANGE_TRUST: -// ChangeTrustOp changeTrustOp; -// case ALLOW_TRUST: -// AllowTrustOp allowTrustOp; -// case ACCOUNT_MERGE: -// AccountID destination; -// case INFLATION: -// void; -// case MANAGE_DATA: -// ManageDataOp manageDataOp; -// } +// ClaimAtom offers<>; +// SimplePaymentResult last; +// } success; +// case PATH_PAYMENT_STRICT_RECEIVE_NO_ISSUER: +// Asset noIssuer; // the asset that caused the error +// default: +// void; +// }; // -type OperationBody struct { - Type OperationType - CreateAccountOp *CreateAccountOp - PaymentOp *PaymentOp - PathPaymentOp *PathPaymentOp - ManageOfferOp *ManageOfferOp - CreatePassiveOfferOp *CreatePassiveOfferOp - SetOptionsOp *SetOptionsOp - ChangeTrustOp *ChangeTrustOp - AllowTrustOp *AllowTrustOp - Destination *AccountId - ManageDataOp *ManageDataOp +type PathPaymentStrictReceiveResult struct { + Code PathPaymentStrictReceiveResultCode + Success *PathPaymentStrictReceiveResultSuccess + NoIssuer *Asset } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u OperationBody) SwitchFieldName() string { - return "Type" +func (u PathPaymentStrictReceiveResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of OperationBody -func (u OperationBody) ArmForSwitch(sw int32) (string, bool) { - switch OperationType(sw) { - case OperationTypeCreateAccount: - return "CreateAccountOp", true - case OperationTypePayment: - return "PaymentOp", true - case OperationTypePathPayment: - return "PathPaymentOp", true - case OperationTypeManageOffer: - return "ManageOfferOp", true - case OperationTypeCreatePassiveOffer: - return "CreatePassiveOfferOp", true - case OperationTypeSetOptions: - return "SetOptionsOp", true - case OperationTypeChangeTrust: - return "ChangeTrustOp", true - case OperationTypeAllowTrust: - return "AllowTrustOp", true - case OperationTypeAccountMerge: - return "Destination", true - case OperationTypeInflation: +// the value for an instance of PathPaymentStrictReceiveResult +func (u PathPaymentStrictReceiveResult) ArmForSwitch(sw int32) (string, bool) { + switch PathPaymentStrictReceiveResultCode(sw) { + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess: + return "Success", true + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer: + return "NoIssuer", true + default: return "", true - case OperationTypeManageData: - return "ManageDataOp", true } - return "-", false } - -// NewOperationBody creates a new OperationBody. -func NewOperationBody(aType OperationType, value interface{}) (result OperationBody, err error) { - result.Type = aType - switch OperationType(aType) { - case OperationTypeCreateAccount: - tv, ok := value.(CreateAccountOp) - if !ok { - err = fmt.Errorf("invalid value, must be CreateAccountOp") - return - } - result.CreateAccountOp = &tv - case OperationTypePayment: - tv, ok := value.(PaymentOp) - if !ok { - err = fmt.Errorf("invalid value, must be PaymentOp") - return - } - result.PaymentOp = &tv - case OperationTypePathPayment: - tv, ok := value.(PathPaymentOp) - if !ok { - err = fmt.Errorf("invalid value, must be PathPaymentOp") - return - } - result.PathPaymentOp = &tv - case OperationTypeManageOffer: - tv, ok := value.(ManageOfferOp) - if !ok { - err = fmt.Errorf("invalid value, must be ManageOfferOp") - return - } - result.ManageOfferOp = &tv - case OperationTypeCreatePassiveOffer: - tv, ok := value.(CreatePassiveOfferOp) - if !ok { - err = fmt.Errorf("invalid value, must be CreatePassiveOfferOp") - return - } - result.CreatePassiveOfferOp = &tv - case OperationTypeSetOptions: - tv, ok := value.(SetOptionsOp) - if !ok { - err = fmt.Errorf("invalid value, must be SetOptionsOp") - return - } - result.SetOptionsOp = &tv - case OperationTypeChangeTrust: - tv, ok := value.(ChangeTrustOp) - if !ok { - err = fmt.Errorf("invalid value, must be ChangeTrustOp") - return - } - result.ChangeTrustOp = &tv - case OperationTypeAllowTrust: - tv, ok := value.(AllowTrustOp) - if !ok { - err = fmt.Errorf("invalid value, must be AllowTrustOp") - return - } - result.AllowTrustOp = &tv - case OperationTypeAccountMerge: - tv, ok := value.(AccountId) + +// NewPathPaymentStrictReceiveResult creates a new PathPaymentStrictReceiveResult. +func NewPathPaymentStrictReceiveResult(code PathPaymentStrictReceiveResultCode, value interface{}) (result PathPaymentStrictReceiveResult, err error) { + result.Code = code + switch PathPaymentStrictReceiveResultCode(code) { + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess: + tv, ok := value.(PathPaymentStrictReceiveResultSuccess) if !ok { - err = fmt.Errorf("invalid value, must be AccountId") + err = fmt.Errorf("invalid value, must be PathPaymentStrictReceiveResultSuccess") return } - result.Destination = &tv - case OperationTypeInflation: - // void - case OperationTypeManageData: - tv, ok := value.(ManageDataOp) + result.Success = &tv + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer: + tv, ok := value.(Asset) if !ok { - err = fmt.Errorf("invalid value, must be ManageDataOp") + err = fmt.Errorf("invalid value, must be Asset") return } - result.ManageDataOp = &tv + result.NoIssuer = &tv + default: + // void } return } -// MustCreateAccountOp retrieves the CreateAccountOp value from the union, +// MustSuccess retrieves the Success value from the union, // panicing if the value is not set. -func (u OperationBody) MustCreateAccountOp() CreateAccountOp { - val, ok := u.GetCreateAccountOp() +func (u PathPaymentStrictReceiveResult) MustSuccess() PathPaymentStrictReceiveResultSuccess { + val, ok := u.GetSuccess() if !ok { - panic("arm CreateAccountOp is not set") + panic("arm Success is not set") } return val } -// GetCreateAccountOp retrieves the CreateAccountOp value from the union, +// GetSuccess retrieves the Success value from the union, // returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetCreateAccountOp() (result CreateAccountOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u PathPaymentStrictReceiveResult) GetSuccess() (result PathPaymentStrictReceiveResultSuccess, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "CreateAccountOp" { - result = *u.CreateAccountOp + if armName == "Success" { + result = *u.Success ok = true } return } -// MustPaymentOp retrieves the PaymentOp value from the union, +// MustNoIssuer retrieves the NoIssuer value from the union, // panicing if the value is not set. -func (u OperationBody) MustPaymentOp() PaymentOp { - val, ok := u.GetPaymentOp() +func (u PathPaymentStrictReceiveResult) MustNoIssuer() Asset { + val, ok := u.GetNoIssuer() if !ok { - panic("arm PaymentOp is not set") + panic("arm NoIssuer is not set") } return val } -// GetPaymentOp retrieves the PaymentOp value from the union, +// GetNoIssuer retrieves the NoIssuer value from the union, // returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetPaymentOp() (result PaymentOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u PathPaymentStrictReceiveResult) GetNoIssuer() (result Asset, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "PaymentOp" { - result = *u.PaymentOp + if armName == "NoIssuer" { + result = *u.NoIssuer ok = true } return } -// MustPathPaymentOp retrieves the PathPaymentOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustPathPaymentOp() PathPaymentOp { - val, ok := u.GetPathPaymentOp() - - if !ok { - panic("arm PathPaymentOp is not set") +// EncodeTo encodes this value using the Encoder. +func (u PathPaymentStrictReceiveResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch PathPaymentStrictReceiveResultCode(u.Code) { + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess: + if err = (*u.Success).EncodeTo(e); err != nil { + return err + } + return nil + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer: + if err = (*u.NoIssuer).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil } - - return val } -// GetPathPaymentOp retrieves the PathPaymentOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetPathPaymentOp() (result PathPaymentOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*PathPaymentStrictReceiveResult)(nil) - if armName == "PathPaymentOp" { - result = *u.PathPaymentOp - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *PathPaymentStrictReceiveResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictReceiveResultCode: %s", err) + } + switch PathPaymentStrictReceiveResultCode(u.Code) { + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveSuccess: + u.Success = new(PathPaymentStrictReceiveResultSuccess) + nTmp, err = (*u.Success).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictReceiveResultSuccess: %s", err) + } + return n, nil + case PathPaymentStrictReceiveResultCodePathPaymentStrictReceiveNoIssuer: + u.NoIssuer = new(Asset) + nTmp, err = (*u.NoIssuer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictReceiveResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustManageOfferOp retrieves the ManageOfferOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustManageOfferOp() ManageOfferOp { - val, ok := u.GetManageOfferOp() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictReceiveResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm ManageOfferOp is not set") - } +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictReceiveResult)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictReceiveResult)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictReceiveResult) xdrType() {} -// GetManageOfferOp retrieves the ManageOfferOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetManageOfferOp() (result ManageOfferOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*PathPaymentStrictReceiveResult)(nil) - if armName == "ManageOfferOp" { - result = *u.ManageOfferOp - ok = true - } +// PathPaymentStrictSendResultCode is an XDR Enum defines as: +// +// enum PathPaymentStrictSendResultCode +// { +// // codes considered as "success" for the operation +// PATH_PAYMENT_STRICT_SEND_SUCCESS = 0, // success +// +// // codes considered as "failure" for the operation +// PATH_PAYMENT_STRICT_SEND_MALFORMED = -1, // bad input +// PATH_PAYMENT_STRICT_SEND_UNDERFUNDED = +// -2, // not enough funds in source account +// PATH_PAYMENT_STRICT_SEND_SRC_NO_TRUST = +// -3, // no trust line on source account +// PATH_PAYMENT_STRICT_SEND_SRC_NOT_AUTHORIZED = +// -4, // source not authorized to transfer +// PATH_PAYMENT_STRICT_SEND_NO_DESTINATION = +// -5, // destination account does not exist +// PATH_PAYMENT_STRICT_SEND_NO_TRUST = +// -6, // dest missing a trust line for asset +// PATH_PAYMENT_STRICT_SEND_NOT_AUTHORIZED = +// -7, // dest not authorized to hold asset +// PATH_PAYMENT_STRICT_SEND_LINE_FULL = -8, // dest would go above their limit +// PATH_PAYMENT_STRICT_SEND_NO_ISSUER = -9, // missing issuer on one asset +// PATH_PAYMENT_STRICT_SEND_TOO_FEW_OFFERS = +// -10, // not enough offers to satisfy path +// PATH_PAYMENT_STRICT_SEND_OFFER_CROSS_SELF = +// -11, // would cross one of its own offers +// PATH_PAYMENT_STRICT_SEND_UNDER_DESTMIN = -12 // could not satisfy destMin +// }; +// +type PathPaymentStrictSendResultCode int32 - return +const ( + PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess PathPaymentStrictSendResultCode = 0 + PathPaymentStrictSendResultCodePathPaymentStrictSendMalformed PathPaymentStrictSendResultCode = -1 + PathPaymentStrictSendResultCodePathPaymentStrictSendUnderfunded PathPaymentStrictSendResultCode = -2 + PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNoTrust PathPaymentStrictSendResultCode = -3 + PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNotAuthorized PathPaymentStrictSendResultCode = -4 + PathPaymentStrictSendResultCodePathPaymentStrictSendNoDestination PathPaymentStrictSendResultCode = -5 + PathPaymentStrictSendResultCodePathPaymentStrictSendNoTrust PathPaymentStrictSendResultCode = -6 + PathPaymentStrictSendResultCodePathPaymentStrictSendNotAuthorized PathPaymentStrictSendResultCode = -7 + PathPaymentStrictSendResultCodePathPaymentStrictSendLineFull PathPaymentStrictSendResultCode = -8 + PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer PathPaymentStrictSendResultCode = -9 + PathPaymentStrictSendResultCodePathPaymentStrictSendTooFewOffers PathPaymentStrictSendResultCode = -10 + PathPaymentStrictSendResultCodePathPaymentStrictSendOfferCrossSelf PathPaymentStrictSendResultCode = -11 + PathPaymentStrictSendResultCodePathPaymentStrictSendUnderDestmin PathPaymentStrictSendResultCode = -12 +) + +var pathPaymentStrictSendResultCodeMap = map[int32]string{ + 0: "PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess", + -1: "PathPaymentStrictSendResultCodePathPaymentStrictSendMalformed", + -2: "PathPaymentStrictSendResultCodePathPaymentStrictSendUnderfunded", + -3: "PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNoTrust", + -4: "PathPaymentStrictSendResultCodePathPaymentStrictSendSrcNotAuthorized", + -5: "PathPaymentStrictSendResultCodePathPaymentStrictSendNoDestination", + -6: "PathPaymentStrictSendResultCodePathPaymentStrictSendNoTrust", + -7: "PathPaymentStrictSendResultCodePathPaymentStrictSendNotAuthorized", + -8: "PathPaymentStrictSendResultCodePathPaymentStrictSendLineFull", + -9: "PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer", + -10: "PathPaymentStrictSendResultCodePathPaymentStrictSendTooFewOffers", + -11: "PathPaymentStrictSendResultCodePathPaymentStrictSendOfferCrossSelf", + -12: "PathPaymentStrictSendResultCodePathPaymentStrictSendUnderDestmin", } -// MustCreatePassiveOfferOp retrieves the CreatePassiveOfferOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustCreatePassiveOfferOp() CreatePassiveOfferOp { - val, ok := u.GetCreatePassiveOfferOp() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for PathPaymentStrictSendResultCode +func (e PathPaymentStrictSendResultCode) ValidEnum(v int32) bool { + _, ok := pathPaymentStrictSendResultCodeMap[v] + return ok +} - if !ok { - panic("arm CreatePassiveOfferOp is not set") - } +// String returns the name of `e` +func (e PathPaymentStrictSendResultCode) String() string { + name, _ := pathPaymentStrictSendResultCodeMap[int32(e)] + return name +} - return val +// EncodeTo encodes this value using the Encoder. +func (e PathPaymentStrictSendResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := pathPaymentStrictSendResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid PathPaymentStrictSendResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetCreatePassiveOfferOp retrieves the CreatePassiveOfferOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetCreatePassiveOfferOp() (result CreatePassiveOfferOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*PathPaymentStrictSendResultCode)(nil) - if armName == "CreatePassiveOfferOp" { - result = *u.CreatePassiveOfferOp - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *PathPaymentStrictSendResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictSendResultCode: %s", err) + } + if _, ok := pathPaymentStrictSendResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid PathPaymentStrictSendResultCode enum value", v) } + *e = PathPaymentStrictSendResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictSendResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustSetOptionsOp retrieves the SetOptionsOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustSetOptionsOp() SetOptionsOp { - val, ok := u.GetSetOptionsOp() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictSendResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm SetOptionsOp is not set") - } +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictSendResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictSendResultCode)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictSendResultCode) xdrType() {} + +var _ xdrType = (*PathPaymentStrictSendResultCode)(nil) + +// PathPaymentStrictSendResultSuccess is an XDR NestedStruct defines as: +// +// struct +// { +// ClaimAtom offers<>; +// SimplePaymentResult last; +// } +// +type PathPaymentStrictSendResultSuccess struct { + Offers []ClaimAtom + Last SimplePaymentResult } -// GetSetOptionsOp retrieves the SetOptionsOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetSetOptionsOp() (result SetOptionsOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// EncodeTo encodes this value using the Encoder. +func (s *PathPaymentStrictSendResultSuccess) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s.Offers))); err != nil { + return err + } + for i := 0; i < len(s.Offers); i++ { + if err = s.Offers[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Last.EncodeTo(e); err != nil { + return err + } + return nil +} - if armName == "SetOptionsOp" { - result = *u.SetOptionsOp - ok = true +var _ decoderFrom = (*PathPaymentStrictSendResultSuccess)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *PathPaymentStrictSendResultSuccess) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) } + s.Offers = nil + if l > 0 { + s.Offers = make([]ClaimAtom, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.Offers[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) + } + } + } + nTmp, err = s.Last.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SimplePaymentResult: %s", err) + } + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictSendResultSuccess) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustChangeTrustOp retrieves the ChangeTrustOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustChangeTrustOp() ChangeTrustOp { - val, ok := u.GetChangeTrustOp() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictSendResultSuccess) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm ChangeTrustOp is not set") - } +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictSendResultSuccess)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictSendResultSuccess)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictSendResultSuccess) xdrType() {} + +var _ xdrType = (*PathPaymentStrictSendResultSuccess)(nil) + +// PathPaymentStrictSendResult is an XDR Union defines as: +// +// union PathPaymentStrictSendResult switch (PathPaymentStrictSendResultCode code) +// { +// case PATH_PAYMENT_STRICT_SEND_SUCCESS: +// struct +// { +// ClaimAtom offers<>; +// SimplePaymentResult last; +// } success; +// case PATH_PAYMENT_STRICT_SEND_NO_ISSUER: +// Asset noIssuer; // the asset that caused the error +// default: +// void; +// }; +// +type PathPaymentStrictSendResult struct { + Code PathPaymentStrictSendResultCode + Success *PathPaymentStrictSendResultSuccess + NoIssuer *Asset } -// GetChangeTrustOp retrieves the ChangeTrustOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetChangeTrustOp() (result ChangeTrustOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u PathPaymentStrictSendResult) SwitchFieldName() string { + return "Code" +} - if armName == "ChangeTrustOp" { - result = *u.ChangeTrustOp - ok = true +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PathPaymentStrictSendResult +func (u PathPaymentStrictSendResult) ArmForSwitch(sw int32) (string, bool) { + switch PathPaymentStrictSendResultCode(sw) { + case PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess: + return "Success", true + case PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer: + return "NoIssuer", true + default: + return "", true } +} +// NewPathPaymentStrictSendResult creates a new PathPaymentStrictSendResult. +func NewPathPaymentStrictSendResult(code PathPaymentStrictSendResultCode, value interface{}) (result PathPaymentStrictSendResult, err error) { + result.Code = code + switch PathPaymentStrictSendResultCode(code) { + case PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess: + tv, ok := value.(PathPaymentStrictSendResultSuccess) + if !ok { + err = fmt.Errorf("invalid value, must be PathPaymentStrictSendResultSuccess") + return + } + result.Success = &tv + case PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer: + tv, ok := value.(Asset) + if !ok { + err = fmt.Errorf("invalid value, must be Asset") + return + } + result.NoIssuer = &tv + default: + // void + } return } -// MustAllowTrustOp retrieves the AllowTrustOp value from the union, +// MustSuccess retrieves the Success value from the union, // panicing if the value is not set. -func (u OperationBody) MustAllowTrustOp() AllowTrustOp { - val, ok := u.GetAllowTrustOp() +func (u PathPaymentStrictSendResult) MustSuccess() PathPaymentStrictSendResultSuccess { + val, ok := u.GetSuccess() if !ok { - panic("arm AllowTrustOp is not set") + panic("arm Success is not set") } return val } -// GetAllowTrustOp retrieves the AllowTrustOp value from the union, +// GetSuccess retrieves the Success value from the union, // returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetAllowTrustOp() (result AllowTrustOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u PathPaymentStrictSendResult) GetSuccess() (result PathPaymentStrictSendResultSuccess, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "AllowTrustOp" { - result = *u.AllowTrustOp + if armName == "Success" { + result = *u.Success ok = true } return } -// MustDestination retrieves the Destination value from the union, +// MustNoIssuer retrieves the NoIssuer value from the union, // panicing if the value is not set. -func (u OperationBody) MustDestination() AccountId { - val, ok := u.GetDestination() +func (u PathPaymentStrictSendResult) MustNoIssuer() Asset { + val, ok := u.GetNoIssuer() if !ok { - panic("arm Destination is not set") + panic("arm NoIssuer is not set") } return val } -// GetDestination retrieves the Destination value from the union, +// GetNoIssuer retrieves the NoIssuer value from the union, // returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetDestination() (result AccountId, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u PathPaymentStrictSendResult) GetNoIssuer() (result Asset, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "Destination" { - result = *u.Destination + if armName == "NoIssuer" { + result = *u.NoIssuer ok = true } return } -// MustManageDataOp retrieves the ManageDataOp value from the union, -// panicing if the value is not set. -func (u OperationBody) MustManageDataOp() ManageDataOp { - val, ok := u.GetManageDataOp() +// EncodeTo encodes this value using the Encoder. +func (u PathPaymentStrictSendResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch PathPaymentStrictSendResultCode(u.Code) { + case PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess: + if err = (*u.Success).EncodeTo(e); err != nil { + return err + } + return nil + case PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer: + if err = (*u.NoIssuer).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil + } +} - if !ok { - panic("arm ManageDataOp is not set") +var _ decoderFrom = (*PathPaymentStrictSendResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *PathPaymentStrictSendResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictSendResultCode: %s", err) + } + switch PathPaymentStrictSendResultCode(u.Code) { + case PathPaymentStrictSendResultCodePathPaymentStrictSendSuccess: + u.Success = new(PathPaymentStrictSendResultSuccess) + nTmp, err = (*u.Success).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictSendResultSuccess: %s", err) + } + return n, nil + case PathPaymentStrictSendResultCodePathPaymentStrictSendNoIssuer: + u.NoIssuer = new(Asset) + nTmp, err = (*u.NoIssuer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Asset: %s", err) + } + return n, nil + default: + // Void + return n, nil } +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PathPaymentStrictSendResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetManageDataOp retrieves the ManageDataOp value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationBody) GetManageDataOp() (result ManageDataOp, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PathPaymentStrictSendResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "ManageDataOp" { - result = *u.ManageDataOp - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*PathPaymentStrictSendResult)(nil) + _ encoding.BinaryUnmarshaler = (*PathPaymentStrictSendResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PathPaymentStrictSendResult) xdrType() {} - return -} +var _ xdrType = (*PathPaymentStrictSendResult)(nil) -// Operation is an XDR Struct defines as: +// ManageSellOfferResultCode is an XDR Enum defines as: // -// struct Operation +// enum ManageSellOfferResultCode // { -// // sourceAccount is the account used to run the operation -// // if not set, the runtime defaults to "sourceAccount" specified at -// // the transaction level -// AccountID* sourceAccount; +// // codes considered as "success" for the operation +// MANAGE_SELL_OFFER_SUCCESS = 0, // -// union switch (OperationType type) -// { -// case CREATE_ACCOUNT: -// CreateAccountOp createAccountOp; -// case PAYMENT: -// PaymentOp paymentOp; -// case PATH_PAYMENT: -// PathPaymentOp pathPaymentOp; -// case MANAGE_OFFER: -// ManageOfferOp manageOfferOp; -// case CREATE_PASSIVE_OFFER: -// CreatePassiveOfferOp createPassiveOfferOp; -// case SET_OPTIONS: -// SetOptionsOp setOptionsOp; -// case CHANGE_TRUST: -// ChangeTrustOp changeTrustOp; -// case ALLOW_TRUST: -// AllowTrustOp allowTrustOp; -// case ACCOUNT_MERGE: -// AccountID destination; -// case INFLATION: -// void; -// case MANAGE_DATA: -// ManageDataOp manageDataOp; -// } -// body; -// }; +// // codes considered as "failure" for the operation +// MANAGE_SELL_OFFER_MALFORMED = -1, // generated offer would be invalid +// MANAGE_SELL_OFFER_SELL_NO_TRUST = +// -2, // no trust line for what we're selling +// MANAGE_SELL_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying +// MANAGE_SELL_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell +// MANAGE_SELL_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy +// MANAGE_SELL_OFFER_LINE_FULL = -6, // can't receive more of what it's buying +// MANAGE_SELL_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell +// MANAGE_SELL_OFFER_CROSS_SELF = +// -8, // would cross an offer from the same user +// MANAGE_SELL_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling +// MANAGE_SELL_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying // -type Operation struct { - SourceAccount *AccountId - Body OperationBody -} - -// MemoType is an XDR Enum defines as: +// // update errors +// MANAGE_SELL_OFFER_NOT_FOUND = +// -11, // offerID does not match an existing offer // -// enum MemoType -// { -// MEMO_NONE = 0, -// MEMO_TEXT = 1, -// MEMO_ID = 2, -// MEMO_HASH = 3, -// MEMO_RETURN = 4 +// MANAGE_SELL_OFFER_LOW_RESERVE = +// -12 // not enough funds to create a new Offer // }; // -type MemoType int32 +type ManageSellOfferResultCode int32 const ( - MemoTypeMemoNone MemoType = 0 - MemoTypeMemoText MemoType = 1 - MemoTypeMemoId MemoType = 2 - MemoTypeMemoHash MemoType = 3 - MemoTypeMemoReturn MemoType = 4 + ManageSellOfferResultCodeManageSellOfferSuccess ManageSellOfferResultCode = 0 + ManageSellOfferResultCodeManageSellOfferMalformed ManageSellOfferResultCode = -1 + ManageSellOfferResultCodeManageSellOfferSellNoTrust ManageSellOfferResultCode = -2 + ManageSellOfferResultCodeManageSellOfferBuyNoTrust ManageSellOfferResultCode = -3 + ManageSellOfferResultCodeManageSellOfferSellNotAuthorized ManageSellOfferResultCode = -4 + ManageSellOfferResultCodeManageSellOfferBuyNotAuthorized ManageSellOfferResultCode = -5 + ManageSellOfferResultCodeManageSellOfferLineFull ManageSellOfferResultCode = -6 + ManageSellOfferResultCodeManageSellOfferUnderfunded ManageSellOfferResultCode = -7 + ManageSellOfferResultCodeManageSellOfferCrossSelf ManageSellOfferResultCode = -8 + ManageSellOfferResultCodeManageSellOfferSellNoIssuer ManageSellOfferResultCode = -9 + ManageSellOfferResultCodeManageSellOfferBuyNoIssuer ManageSellOfferResultCode = -10 + ManageSellOfferResultCodeManageSellOfferNotFound ManageSellOfferResultCode = -11 + ManageSellOfferResultCodeManageSellOfferLowReserve ManageSellOfferResultCode = -12 ) -var memoTypeMap = map[int32]string{ - 0: "MemoTypeMemoNone", - 1: "MemoTypeMemoText", - 2: "MemoTypeMemoId", - 3: "MemoTypeMemoHash", - 4: "MemoTypeMemoReturn", +var manageSellOfferResultCodeMap = map[int32]string{ + 0: "ManageSellOfferResultCodeManageSellOfferSuccess", + -1: "ManageSellOfferResultCodeManageSellOfferMalformed", + -2: "ManageSellOfferResultCodeManageSellOfferSellNoTrust", + -3: "ManageSellOfferResultCodeManageSellOfferBuyNoTrust", + -4: "ManageSellOfferResultCodeManageSellOfferSellNotAuthorized", + -5: "ManageSellOfferResultCodeManageSellOfferBuyNotAuthorized", + -6: "ManageSellOfferResultCodeManageSellOfferLineFull", + -7: "ManageSellOfferResultCodeManageSellOfferUnderfunded", + -8: "ManageSellOfferResultCodeManageSellOfferCrossSelf", + -9: "ManageSellOfferResultCodeManageSellOfferSellNoIssuer", + -10: "ManageSellOfferResultCodeManageSellOfferBuyNoIssuer", + -11: "ManageSellOfferResultCodeManageSellOfferNotFound", + -12: "ManageSellOfferResultCodeManageSellOfferLowReserve", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for MemoType -func (e MemoType) ValidEnum(v int32) bool { - _, ok := memoTypeMap[v] +// the Enum interface for ManageSellOfferResultCode +func (e ManageSellOfferResultCode) ValidEnum(v int32) bool { + _, ok := manageSellOfferResultCodeMap[v] return ok } // String returns the name of `e` -func (e MemoType) String() string { - name, _ := memoTypeMap[int32(e)] +func (e ManageSellOfferResultCode) String() string { + name, _ := manageSellOfferResultCodeMap[int32(e)] return name } -// Memo is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e ManageSellOfferResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := manageSellOfferResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ManageSellOfferResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ManageSellOfferResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ManageSellOfferResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ManageSellOfferResultCode: %s", err) + } + if _, ok := manageSellOfferResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ManageSellOfferResultCode enum value", v) + } + *e = ManageSellOfferResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageSellOfferResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageSellOfferResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageSellOfferResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ManageSellOfferResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageSellOfferResultCode) xdrType() {} + +var _ xdrType = (*ManageSellOfferResultCode)(nil) + +// ManageOfferEffect is an XDR Enum defines as: // -// union Memo switch (MemoType type) +// enum ManageOfferEffect // { -// case MEMO_NONE: -// void; -// case MEMO_TEXT: -// string text<28>; -// case MEMO_ID: -// uint64 id; -// case MEMO_HASH: -// Hash hash; // the hash of what to pull from the content server -// case MEMO_RETURN: -// Hash retHash; // the hash of the tx you are rejecting +// MANAGE_OFFER_CREATED = 0, +// MANAGE_OFFER_UPDATED = 1, +// MANAGE_OFFER_DELETED = 2 // }; // -type Memo struct { - Type MemoType - Text *string - Id *Uint64 - Hash *Hash - RetHash *Hash +type ManageOfferEffect int32 + +const ( + ManageOfferEffectManageOfferCreated ManageOfferEffect = 0 + ManageOfferEffectManageOfferUpdated ManageOfferEffect = 1 + ManageOfferEffectManageOfferDeleted ManageOfferEffect = 2 +) + +var manageOfferEffectMap = map[int32]string{ + 0: "ManageOfferEffectManageOfferCreated", + 1: "ManageOfferEffectManageOfferUpdated", + 2: "ManageOfferEffectManageOfferDeleted", } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u Memo) SwitchFieldName() string { - return "Type" +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ManageOfferEffect +func (e ManageOfferEffect) ValidEnum(v int32) bool { + _, ok := manageOfferEffectMap[v] + return ok } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of Memo -func (u Memo) ArmForSwitch(sw int32) (string, bool) { - switch MemoType(sw) { - case MemoTypeMemoNone: - return "", true - case MemoTypeMemoText: - return "Text", true - case MemoTypeMemoId: - return "Id", true - case MemoTypeMemoHash: - return "Hash", true - case MemoTypeMemoReturn: - return "RetHash", true - } - return "-", false +// String returns the name of `e` +func (e ManageOfferEffect) String() string { + name, _ := manageOfferEffectMap[int32(e)] + return name } -// NewMemo creates a new Memo. -func NewMemo(aType MemoType, value interface{}) (result Memo, err error) { - result.Type = aType - switch MemoType(aType) { - case MemoTypeMemoNone: - // void - case MemoTypeMemoText: - tv, ok := value.(string) - if !ok { - err = fmt.Errorf("invalid value, must be string") - return - } - result.Text = &tv - case MemoTypeMemoId: - tv, ok := value.(Uint64) - if !ok { - err = fmt.Errorf("invalid value, must be Uint64") - return - } - result.Id = &tv - case MemoTypeMemoHash: - tv, ok := value.(Hash) - if !ok { - err = fmt.Errorf("invalid value, must be Hash") - return - } - result.Hash = &tv - case MemoTypeMemoReturn: - tv, ok := value.(Hash) - if !ok { - err = fmt.Errorf("invalid value, must be Hash") - return - } - result.RetHash = &tv +// EncodeTo encodes this value using the Encoder. +func (e ManageOfferEffect) EncodeTo(enc *xdr.Encoder) error { + if _, ok := manageOfferEffectMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ManageOfferEffect enum value", e) } - return + _, err := enc.EncodeInt(int32(e)) + return err } -// MustText retrieves the Text value from the union, -// panicing if the value is not set. -func (u Memo) MustText() string { - val, ok := u.GetText() +var _ decoderFrom = (*ManageOfferEffect)(nil) - if !ok { - panic("arm Text is not set") +// DecodeFrom decodes this value using the Decoder. +func (e *ManageOfferEffect) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ManageOfferEffect: %s", err) + } + if _, ok := manageOfferEffectMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ManageOfferEffect enum value", v) } + *e = ManageOfferEffect(v) + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageOfferEffect) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetText retrieves the Text value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u Memo) GetText() (result string, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageOfferEffect) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Text" { - result = *u.Text - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*ManageOfferEffect)(nil) + _ encoding.BinaryUnmarshaler = (*ManageOfferEffect)(nil) +) - return -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageOfferEffect) xdrType() {} -// MustId retrieves the Id value from the union, -// panicing if the value is not set. -func (u Memo) MustId() Uint64 { - val, ok := u.GetId() +var _ xdrType = (*ManageOfferEffect)(nil) - if !ok { - panic("arm Id is not set") - } +// ManageOfferSuccessResultOffer is an XDR NestedUnion defines as: +// +// union switch (ManageOfferEffect effect) +// { +// case MANAGE_OFFER_CREATED: +// case MANAGE_OFFER_UPDATED: +// OfferEntry offer; +// default: +// void; +// } +// +type ManageOfferSuccessResultOffer struct { + Effect ManageOfferEffect + Offer *OfferEntry +} - return val +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u ManageOfferSuccessResultOffer) SwitchFieldName() string { + return "Effect" } -// GetId retrieves the Id value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u Memo) GetId() (result Uint64, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of ManageOfferSuccessResultOffer +func (u ManageOfferSuccessResultOffer) ArmForSwitch(sw int32) (string, bool) { + switch ManageOfferEffect(sw) { + case ManageOfferEffectManageOfferCreated: + return "Offer", true + case ManageOfferEffectManageOfferUpdated: + return "Offer", true + default: + return "", true + } +} - if armName == "Id" { - result = *u.Id - ok = true +// NewManageOfferSuccessResultOffer creates a new ManageOfferSuccessResultOffer. +func NewManageOfferSuccessResultOffer(effect ManageOfferEffect, value interface{}) (result ManageOfferSuccessResultOffer, err error) { + result.Effect = effect + switch ManageOfferEffect(effect) { + case ManageOfferEffectManageOfferCreated: + tv, ok := value.(OfferEntry) + if !ok { + err = fmt.Errorf("invalid value, must be OfferEntry") + return + } + result.Offer = &tv + case ManageOfferEffectManageOfferUpdated: + tv, ok := value.(OfferEntry) + if !ok { + err = fmt.Errorf("invalid value, must be OfferEntry") + return + } + result.Offer = &tv + default: + // void } - return } -// MustHash retrieves the Hash value from the union, +// MustOffer retrieves the Offer value from the union, // panicing if the value is not set. -func (u Memo) MustHash() Hash { - val, ok := u.GetHash() +func (u ManageOfferSuccessResultOffer) MustOffer() OfferEntry { + val, ok := u.GetOffer() if !ok { - panic("arm Hash is not set") + panic("arm Offer is not set") } return val } -// GetHash retrieves the Hash value from the union, +// GetOffer retrieves the Offer value from the union, // returning ok if the union's switch indicated the value is valid. -func (u Memo) GetHash() (result Hash, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u ManageOfferSuccessResultOffer) GetOffer() (result OfferEntry, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Effect)) - if armName == "Hash" { - result = *u.Hash + if armName == "Offer" { + result = *u.Offer ok = true } return } -// MustRetHash retrieves the RetHash value from the union, -// panicing if the value is not set. -func (u Memo) MustRetHash() Hash { - val, ok := u.GetRetHash() - - if !ok { - panic("arm RetHash is not set") +// EncodeTo encodes this value using the Encoder. +func (u ManageOfferSuccessResultOffer) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Effect.EncodeTo(e); err != nil { + return err + } + switch ManageOfferEffect(u.Effect) { + case ManageOfferEffectManageOfferCreated: + if err = (*u.Offer).EncodeTo(e); err != nil { + return err + } + return nil + case ManageOfferEffectManageOfferUpdated: + if err = (*u.Offer).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil } - - return val } -// GetRetHash retrieves the RetHash value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u Memo) GetRetHash() (result Hash, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*ManageOfferSuccessResultOffer)(nil) - if armName == "RetHash" { - result = *u.RetHash - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *ManageOfferSuccessResultOffer) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Effect.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageOfferEffect: %s", err) + } + switch ManageOfferEffect(u.Effect) { + case ManageOfferEffectManageOfferCreated: + u.Offer = new(OfferEntry) + nTmp, err = (*u.Offer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OfferEntry: %s", err) + } + return n, nil + case ManageOfferEffectManageOfferUpdated: + u.Offer = new(OfferEntry) + nTmp, err = (*u.Offer).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OfferEntry: %s", err) + } + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageOfferSuccessResultOffer) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// TimeBounds is an XDR Struct defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageOfferSuccessResultOffer) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageOfferSuccessResultOffer)(nil) + _ encoding.BinaryUnmarshaler = (*ManageOfferSuccessResultOffer)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageOfferSuccessResultOffer) xdrType() {} + +var _ xdrType = (*ManageOfferSuccessResultOffer)(nil) + +// ManageOfferSuccessResult is an XDR Struct defines as: // -// struct TimeBounds +// struct ManageOfferSuccessResult // { -// uint64 minTime; -// uint64 maxTime; +// // offers that got claimed while creating this offer +// ClaimAtom offersClaimed<>; +// +// union switch (ManageOfferEffect effect) +// { +// case MANAGE_OFFER_CREATED: +// case MANAGE_OFFER_UPDATED: +// OfferEntry offer; +// default: +// void; +// } +// offer; // }; // -type TimeBounds struct { - MinTime Uint64 - MaxTime Uint64 +type ManageOfferSuccessResult struct { + OffersClaimed []ClaimAtom + Offer ManageOfferSuccessResultOffer } -// TransactionExt is an XDR NestedUnion defines as: +// EncodeTo encodes this value using the Encoder. +func (s *ManageOfferSuccessResult) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(len(s.OffersClaimed))); err != nil { + return err + } + for i := 0; i < len(s.OffersClaimed); i++ { + if err = s.OffersClaimed[i].EncodeTo(e); err != nil { + return err + } + } + if err = s.Offer.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*ManageOfferSuccessResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *ManageOfferSuccessResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) + } + s.OffersClaimed = nil + if l > 0 { + s.OffersClaimed = make([]ClaimAtom, l) + for i := uint32(0); i < l; i++ { + nTmp, err = s.OffersClaimed[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimAtom: %s", err) + } + } + } + nTmp, err = s.Offer.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageOfferSuccessResultOffer: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageOfferSuccessResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageOfferSuccessResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageOfferSuccessResult)(nil) + _ encoding.BinaryUnmarshaler = (*ManageOfferSuccessResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageOfferSuccessResult) xdrType() {} + +var _ xdrType = (*ManageOfferSuccessResult)(nil) + +// ManageSellOfferResult is an XDR Union defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// union ManageSellOfferResult switch (ManageSellOfferResultCode code) +// { +// case MANAGE_SELL_OFFER_SUCCESS: +// ManageOfferSuccessResult success; +// default: +// void; +// }; // -type TransactionExt struct { - V int32 +type ManageSellOfferResult struct { + Code ManageSellOfferResultCode + Success *ManageOfferSuccessResult } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u TransactionExt) SwitchFieldName() string { - return "V" +func (u ManageSellOfferResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionExt -func (u TransactionExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: +// the value for an instance of ManageSellOfferResult +func (u ManageSellOfferResult) ArmForSwitch(sw int32) (string, bool) { + switch ManageSellOfferResultCode(sw) { + case ManageSellOfferResultCodeManageSellOfferSuccess: + return "Success", true + default: return "", true } - return "-", false } -// NewTransactionExt creates a new TransactionExt. -func NewTransactionExt(v int32, value interface{}) (result TransactionExt, err error) { - result.V = v - switch int32(v) { - case 0: +// NewManageSellOfferResult creates a new ManageSellOfferResult. +func NewManageSellOfferResult(code ManageSellOfferResultCode, value interface{}) (result ManageSellOfferResult, err error) { + result.Code = code + switch ManageSellOfferResultCode(code) { + case ManageSellOfferResultCodeManageSellOfferSuccess: + tv, ok := value.(ManageOfferSuccessResult) + if !ok { + err = fmt.Errorf("invalid value, must be ManageOfferSuccessResult") + return + } + result.Success = &tv + default: // void } return } -// Transaction is an XDR Struct defines as: -// -// struct Transaction -// { -// // account used to run the transaction -// AccountID sourceAccount; -// -// // the fee the sourceAccount will pay -// uint32 fee; -// -// // sequence number to consume in the account -// SequenceNumber seqNum; -// -// // validity range (inclusive) for the last ledger close time -// TimeBounds* timeBounds; -// -// Memo memo; -// -// Operation operations<100>; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type Transaction struct { - SourceAccount AccountId - Fee Uint32 - SeqNum SequenceNumber - TimeBounds *TimeBounds - Memo Memo - Operations []Operation `xdrmaxsize:"100"` - Ext TransactionExt +// MustSuccess retrieves the Success value from the union, +// panicing if the value is not set. +func (u ManageSellOfferResult) MustSuccess() ManageOfferSuccessResult { + val, ok := u.GetSuccess() + + if !ok { + panic("arm Success is not set") + } + + return val } -// TransactionEnvelope is an XDR Struct defines as: -// -// struct TransactionEnvelope -// { -// Transaction tx; -// DecoratedSignature signatures<20>; -// }; -// -type TransactionEnvelope struct { - Tx Transaction - Signatures []DecoratedSignature `xdrmaxsize:"20"` +// GetSuccess retrieves the Success value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ManageSellOfferResult) GetSuccess() (result ManageOfferSuccessResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) + + if armName == "Success" { + result = *u.Success + ok = true + } + + return } -// ClaimOfferAtom is an XDR Struct defines as: -// -// struct ClaimOfferAtom -// { -// // emitted to identify the offer -// AccountID sellerID; // Account that owns the offer -// uint64 offerID; -// -// // amount and asset taken from the owner -// Asset assetSold; -// int64 amountSold; -// -// // amount and asset sent to the owner -// Asset assetBought; -// int64 amountBought; -// }; -// -type ClaimOfferAtom struct { - SellerId AccountId - OfferId Uint64 - AssetSold Asset - AmountSold Int64 - AssetBought Asset - AmountBought Int64 +// EncodeTo encodes this value using the Encoder. +func (u ManageSellOfferResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ManageSellOfferResultCode(u.Code) { + case ManageSellOfferResultCodeManageSellOfferSuccess: + if err = (*u.Success).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil + } } -// CreateAccountResultCode is an XDR Enum defines as: +var _ decoderFrom = (*ManageSellOfferResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ManageSellOfferResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageSellOfferResultCode: %s", err) + } + switch ManageSellOfferResultCode(u.Code) { + case ManageSellOfferResultCodeManageSellOfferSuccess: + u.Success = new(ManageOfferSuccessResult) + nTmp, err = (*u.Success).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageOfferSuccessResult: %s", err) + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageSellOfferResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageSellOfferResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageSellOfferResult)(nil) + _ encoding.BinaryUnmarshaler = (*ManageSellOfferResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageSellOfferResult) xdrType() {} + +var _ xdrType = (*ManageSellOfferResult)(nil) + +// ManageBuyOfferResultCode is an XDR Enum defines as: // -// enum CreateAccountResultCode +// enum ManageBuyOfferResultCode // { // // codes considered as "success" for the operation -// CREATE_ACCOUNT_SUCCESS = 0, // account was created +// MANAGE_BUY_OFFER_SUCCESS = 0, // // // codes considered as "failure" for the operation -// CREATE_ACCOUNT_MALFORMED = -1, // invalid destination -// CREATE_ACCOUNT_UNDERFUNDED = -2, // not enough funds in source account -// CREATE_ACCOUNT_LOW_RESERVE = -// -3, // would create an account below the min reserve -// CREATE_ACCOUNT_ALREADY_EXIST = -4 // account already exists +// MANAGE_BUY_OFFER_MALFORMED = -1, // generated offer would be invalid +// MANAGE_BUY_OFFER_SELL_NO_TRUST = -2, // no trust line for what we're selling +// MANAGE_BUY_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying +// MANAGE_BUY_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell +// MANAGE_BUY_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy +// MANAGE_BUY_OFFER_LINE_FULL = -6, // can't receive more of what it's buying +// MANAGE_BUY_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell +// MANAGE_BUY_OFFER_CROSS_SELF = -8, // would cross an offer from the same user +// MANAGE_BUY_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling +// MANAGE_BUY_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying +// +// // update errors +// MANAGE_BUY_OFFER_NOT_FOUND = +// -11, // offerID does not match an existing offer +// +// MANAGE_BUY_OFFER_LOW_RESERVE = -12 // not enough funds to create a new Offer // }; // -type CreateAccountResultCode int32 +type ManageBuyOfferResultCode int32 const ( - CreateAccountResultCodeCreateAccountSuccess CreateAccountResultCode = 0 - CreateAccountResultCodeCreateAccountMalformed CreateAccountResultCode = -1 - CreateAccountResultCodeCreateAccountUnderfunded CreateAccountResultCode = -2 - CreateAccountResultCodeCreateAccountLowReserve CreateAccountResultCode = -3 - CreateAccountResultCodeCreateAccountAlreadyExist CreateAccountResultCode = -4 + ManageBuyOfferResultCodeManageBuyOfferSuccess ManageBuyOfferResultCode = 0 + ManageBuyOfferResultCodeManageBuyOfferMalformed ManageBuyOfferResultCode = -1 + ManageBuyOfferResultCodeManageBuyOfferSellNoTrust ManageBuyOfferResultCode = -2 + ManageBuyOfferResultCodeManageBuyOfferBuyNoTrust ManageBuyOfferResultCode = -3 + ManageBuyOfferResultCodeManageBuyOfferSellNotAuthorized ManageBuyOfferResultCode = -4 + ManageBuyOfferResultCodeManageBuyOfferBuyNotAuthorized ManageBuyOfferResultCode = -5 + ManageBuyOfferResultCodeManageBuyOfferLineFull ManageBuyOfferResultCode = -6 + ManageBuyOfferResultCodeManageBuyOfferUnderfunded ManageBuyOfferResultCode = -7 + ManageBuyOfferResultCodeManageBuyOfferCrossSelf ManageBuyOfferResultCode = -8 + ManageBuyOfferResultCodeManageBuyOfferSellNoIssuer ManageBuyOfferResultCode = -9 + ManageBuyOfferResultCodeManageBuyOfferBuyNoIssuer ManageBuyOfferResultCode = -10 + ManageBuyOfferResultCodeManageBuyOfferNotFound ManageBuyOfferResultCode = -11 + ManageBuyOfferResultCodeManageBuyOfferLowReserve ManageBuyOfferResultCode = -12 ) -var createAccountResultCodeMap = map[int32]string{ - 0: "CreateAccountResultCodeCreateAccountSuccess", - -1: "CreateAccountResultCodeCreateAccountMalformed", - -2: "CreateAccountResultCodeCreateAccountUnderfunded", - -3: "CreateAccountResultCodeCreateAccountLowReserve", - -4: "CreateAccountResultCodeCreateAccountAlreadyExist", +var manageBuyOfferResultCodeMap = map[int32]string{ + 0: "ManageBuyOfferResultCodeManageBuyOfferSuccess", + -1: "ManageBuyOfferResultCodeManageBuyOfferMalformed", + -2: "ManageBuyOfferResultCodeManageBuyOfferSellNoTrust", + -3: "ManageBuyOfferResultCodeManageBuyOfferBuyNoTrust", + -4: "ManageBuyOfferResultCodeManageBuyOfferSellNotAuthorized", + -5: "ManageBuyOfferResultCodeManageBuyOfferBuyNotAuthorized", + -6: "ManageBuyOfferResultCodeManageBuyOfferLineFull", + -7: "ManageBuyOfferResultCodeManageBuyOfferUnderfunded", + -8: "ManageBuyOfferResultCodeManageBuyOfferCrossSelf", + -9: "ManageBuyOfferResultCodeManageBuyOfferSellNoIssuer", + -10: "ManageBuyOfferResultCodeManageBuyOfferBuyNoIssuer", + -11: "ManageBuyOfferResultCodeManageBuyOfferNotFound", + -12: "ManageBuyOfferResultCodeManageBuyOfferLowReserve", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for CreateAccountResultCode -func (e CreateAccountResultCode) ValidEnum(v int32) bool { - _, ok := createAccountResultCodeMap[v] +// the Enum interface for ManageBuyOfferResultCode +func (e ManageBuyOfferResultCode) ValidEnum(v int32) bool { + _, ok := manageBuyOfferResultCodeMap[v] return ok } // String returns the name of `e` -func (e CreateAccountResultCode) String() string { - name, _ := createAccountResultCodeMap[int32(e)] +func (e ManageBuyOfferResultCode) String() string { + name, _ := manageBuyOfferResultCodeMap[int32(e)] return name } -// CreateAccountResult is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e ManageBuyOfferResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := manageBuyOfferResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ManageBuyOfferResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ManageBuyOfferResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ManageBuyOfferResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ManageBuyOfferResultCode: %s", err) + } + if _, ok := manageBuyOfferResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ManageBuyOfferResultCode enum value", v) + } + *e = ManageBuyOfferResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageBuyOfferResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageBuyOfferResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageBuyOfferResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ManageBuyOfferResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageBuyOfferResultCode) xdrType() {} + +var _ xdrType = (*ManageBuyOfferResultCode)(nil) + +// ManageBuyOfferResult is an XDR Union defines as: // -// union CreateAccountResult switch (CreateAccountResultCode code) +// union ManageBuyOfferResult switch (ManageBuyOfferResultCode code) // { -// case CREATE_ACCOUNT_SUCCESS: -// void; +// case MANAGE_BUY_OFFER_SUCCESS: +// ManageOfferSuccessResult success; // default: // void; // }; // -type CreateAccountResult struct { - Code CreateAccountResultCode +type ManageBuyOfferResult struct { + Code ManageBuyOfferResultCode + Success *ManageOfferSuccessResult } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u CreateAccountResult) SwitchFieldName() string { +func (u ManageBuyOfferResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of CreateAccountResult -func (u CreateAccountResult) ArmForSwitch(sw int32) (string, bool) { - switch CreateAccountResultCode(sw) { - case CreateAccountResultCodeCreateAccountSuccess: - return "", true +// the value for an instance of ManageBuyOfferResult +func (u ManageBuyOfferResult) ArmForSwitch(sw int32) (string, bool) { + switch ManageBuyOfferResultCode(sw) { + case ManageBuyOfferResultCodeManageBuyOfferSuccess: + return "Success", true default: return "", true } } -// NewCreateAccountResult creates a new CreateAccountResult. -func NewCreateAccountResult(code CreateAccountResultCode, value interface{}) (result CreateAccountResult, err error) { +// NewManageBuyOfferResult creates a new ManageBuyOfferResult. +func NewManageBuyOfferResult(code ManageBuyOfferResultCode, value interface{}) (result ManageBuyOfferResult, err error) { result.Code = code - switch CreateAccountResultCode(code) { - case CreateAccountResultCodeCreateAccountSuccess: - // void + switch ManageBuyOfferResultCode(code) { + case ManageBuyOfferResultCodeManageBuyOfferSuccess: + tv, ok := value.(ManageOfferSuccessResult) + if !ok { + err = fmt.Errorf("invalid value, must be ManageOfferSuccessResult") + return + } + result.Success = &tv default: // void } return } -// PaymentResultCode is an XDR Enum defines as: +// MustSuccess retrieves the Success value from the union, +// panicing if the value is not set. +func (u ManageBuyOfferResult) MustSuccess() ManageOfferSuccessResult { + val, ok := u.GetSuccess() + + if !ok { + panic("arm Success is not set") + } + + return val +} + +// GetSuccess retrieves the Success value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u ManageBuyOfferResult) GetSuccess() (result ManageOfferSuccessResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) + + if armName == "Success" { + result = *u.Success + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u ManageBuyOfferResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ManageBuyOfferResultCode(u.Code) { + case ManageBuyOfferResultCodeManageBuyOfferSuccess: + if err = (*u.Success).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*ManageBuyOfferResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ManageBuyOfferResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageBuyOfferResultCode: %s", err) + } + switch ManageBuyOfferResultCode(u.Code) { + case ManageBuyOfferResultCodeManageBuyOfferSuccess: + u.Success = new(ManageOfferSuccessResult) + nTmp, err = (*u.Success).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageOfferSuccessResult: %s", err) + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageBuyOfferResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageBuyOfferResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageBuyOfferResult)(nil) + _ encoding.BinaryUnmarshaler = (*ManageBuyOfferResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageBuyOfferResult) xdrType() {} + +var _ xdrType = (*ManageBuyOfferResult)(nil) + +// SetOptionsResultCode is an XDR Enum defines as: // -// enum PaymentResultCode +// enum SetOptionsResultCode // { // // codes considered as "success" for the operation -// PAYMENT_SUCCESS = 0, // payment successfuly completed -// +// SET_OPTIONS_SUCCESS = 0, // // codes considered as "failure" for the operation -// PAYMENT_MALFORMED = -1, // bad input -// PAYMENT_UNDERFUNDED = -2, // not enough funds in source account -// PAYMENT_SRC_NO_TRUST = -3, // no trust line on source account -// PAYMENT_SRC_NOT_AUTHORIZED = -4, // source not authorized to transfer -// PAYMENT_NO_DESTINATION = -5, // destination account does not exist -// PAYMENT_NO_TRUST = -6, // destination missing a trust line for asset -// PAYMENT_NOT_AUTHORIZED = -7, // destination not authorized to hold asset -// PAYMENT_LINE_FULL = -8, // destination would go above their limit -// PAYMENT_NO_ISSUER = -9 // missing issuer on asset +// SET_OPTIONS_LOW_RESERVE = -1, // not enough funds to add a signer +// SET_OPTIONS_TOO_MANY_SIGNERS = -2, // max number of signers already reached +// SET_OPTIONS_BAD_FLAGS = -3, // invalid combination of clear/set flags +// SET_OPTIONS_INVALID_INFLATION = -4, // inflation account does not exist +// SET_OPTIONS_CANT_CHANGE = -5, // can no longer change this option +// SET_OPTIONS_UNKNOWN_FLAG = -6, // can't set an unknown flag +// SET_OPTIONS_THRESHOLD_OUT_OF_RANGE = -7, // bad value for weight/threshold +// SET_OPTIONS_BAD_SIGNER = -8, // signer cannot be masterkey +// SET_OPTIONS_INVALID_HOME_DOMAIN = -9, // malformed home domain +// SET_OPTIONS_AUTH_REVOCABLE_REQUIRED = +// -10 // auth revocable is required for clawback // }; // -type PaymentResultCode int32 +type SetOptionsResultCode int32 const ( - PaymentResultCodePaymentSuccess PaymentResultCode = 0 - PaymentResultCodePaymentMalformed PaymentResultCode = -1 - PaymentResultCodePaymentUnderfunded PaymentResultCode = -2 - PaymentResultCodePaymentSrcNoTrust PaymentResultCode = -3 - PaymentResultCodePaymentSrcNotAuthorized PaymentResultCode = -4 - PaymentResultCodePaymentNoDestination PaymentResultCode = -5 - PaymentResultCodePaymentNoTrust PaymentResultCode = -6 - PaymentResultCodePaymentNotAuthorized PaymentResultCode = -7 - PaymentResultCodePaymentLineFull PaymentResultCode = -8 - PaymentResultCodePaymentNoIssuer PaymentResultCode = -9 + SetOptionsResultCodeSetOptionsSuccess SetOptionsResultCode = 0 + SetOptionsResultCodeSetOptionsLowReserve SetOptionsResultCode = -1 + SetOptionsResultCodeSetOptionsTooManySigners SetOptionsResultCode = -2 + SetOptionsResultCodeSetOptionsBadFlags SetOptionsResultCode = -3 + SetOptionsResultCodeSetOptionsInvalidInflation SetOptionsResultCode = -4 + SetOptionsResultCodeSetOptionsCantChange SetOptionsResultCode = -5 + SetOptionsResultCodeSetOptionsUnknownFlag SetOptionsResultCode = -6 + SetOptionsResultCodeSetOptionsThresholdOutOfRange SetOptionsResultCode = -7 + SetOptionsResultCodeSetOptionsBadSigner SetOptionsResultCode = -8 + SetOptionsResultCodeSetOptionsInvalidHomeDomain SetOptionsResultCode = -9 + SetOptionsResultCodeSetOptionsAuthRevocableRequired SetOptionsResultCode = -10 ) -var paymentResultCodeMap = map[int32]string{ - 0: "PaymentResultCodePaymentSuccess", - -1: "PaymentResultCodePaymentMalformed", - -2: "PaymentResultCodePaymentUnderfunded", - -3: "PaymentResultCodePaymentSrcNoTrust", - -4: "PaymentResultCodePaymentSrcNotAuthorized", - -5: "PaymentResultCodePaymentNoDestination", - -6: "PaymentResultCodePaymentNoTrust", - -7: "PaymentResultCodePaymentNotAuthorized", - -8: "PaymentResultCodePaymentLineFull", - -9: "PaymentResultCodePaymentNoIssuer", +var setOptionsResultCodeMap = map[int32]string{ + 0: "SetOptionsResultCodeSetOptionsSuccess", + -1: "SetOptionsResultCodeSetOptionsLowReserve", + -2: "SetOptionsResultCodeSetOptionsTooManySigners", + -3: "SetOptionsResultCodeSetOptionsBadFlags", + -4: "SetOptionsResultCodeSetOptionsInvalidInflation", + -5: "SetOptionsResultCodeSetOptionsCantChange", + -6: "SetOptionsResultCodeSetOptionsUnknownFlag", + -7: "SetOptionsResultCodeSetOptionsThresholdOutOfRange", + -8: "SetOptionsResultCodeSetOptionsBadSigner", + -9: "SetOptionsResultCodeSetOptionsInvalidHomeDomain", + -10: "SetOptionsResultCodeSetOptionsAuthRevocableRequired", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for PaymentResultCode -func (e PaymentResultCode) ValidEnum(v int32) bool { - _, ok := paymentResultCodeMap[v] +// the Enum interface for SetOptionsResultCode +func (e SetOptionsResultCode) ValidEnum(v int32) bool { + _, ok := setOptionsResultCodeMap[v] return ok } // String returns the name of `e` -func (e PaymentResultCode) String() string { - name, _ := paymentResultCodeMap[int32(e)] +func (e SetOptionsResultCode) String() string { + name, _ := setOptionsResultCodeMap[int32(e)] return name } -// PaymentResult is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e SetOptionsResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := setOptionsResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid SetOptionsResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*SetOptionsResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *SetOptionsResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding SetOptionsResultCode: %s", err) + } + if _, ok := setOptionsResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid SetOptionsResultCode enum value", v) + } + *e = SetOptionsResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetOptionsResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetOptionsResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SetOptionsResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*SetOptionsResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetOptionsResultCode) xdrType() {} + +var _ xdrType = (*SetOptionsResultCode)(nil) + +// SetOptionsResult is an XDR Union defines as: // -// union PaymentResult switch (PaymentResultCode code) +// union SetOptionsResult switch (SetOptionsResultCode code) // { -// case PAYMENT_SUCCESS: +// case SET_OPTIONS_SUCCESS: // void; // default: // void; // }; // -type PaymentResult struct { - Code PaymentResultCode +type SetOptionsResult struct { + Code SetOptionsResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u PaymentResult) SwitchFieldName() string { +func (u SetOptionsResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of PaymentResult -func (u PaymentResult) ArmForSwitch(sw int32) (string, bool) { - switch PaymentResultCode(sw) { - case PaymentResultCodePaymentSuccess: +// the value for an instance of SetOptionsResult +func (u SetOptionsResult) ArmForSwitch(sw int32) (string, bool) { + switch SetOptionsResultCode(sw) { + case SetOptionsResultCodeSetOptionsSuccess: return "", true default: return "", true } } -// NewPaymentResult creates a new PaymentResult. -func NewPaymentResult(code PaymentResultCode, value interface{}) (result PaymentResult, err error) { +// NewSetOptionsResult creates a new SetOptionsResult. +func NewSetOptionsResult(code SetOptionsResultCode, value interface{}) (result SetOptionsResult, err error) { result.Code = code - switch PaymentResultCode(code) { - case PaymentResultCodePaymentSuccess: + switch SetOptionsResultCode(code) { + case SetOptionsResultCodeSetOptionsSuccess: // void default: // void @@ -2665,778 +26971,1198 @@ func NewPaymentResult(code PaymentResultCode, value interface{}) (result Payment return } -// PathPaymentResultCode is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (u SetOptionsResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch SetOptionsResultCode(u.Code) { + case SetOptionsResultCodeSetOptionsSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*SetOptionsResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *SetOptionsResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetOptionsResultCode: %s", err) + } + switch SetOptionsResultCode(u.Code) { + case SetOptionsResultCodeSetOptionsSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetOptionsResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetOptionsResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SetOptionsResult)(nil) + _ encoding.BinaryUnmarshaler = (*SetOptionsResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetOptionsResult) xdrType() {} + +var _ xdrType = (*SetOptionsResult)(nil) + +// ChangeTrustResultCode is an XDR Enum defines as: // -// enum PathPaymentResultCode +// enum ChangeTrustResultCode // { // // codes considered as "success" for the operation -// PATH_PAYMENT_SUCCESS = 0, // success -// +// CHANGE_TRUST_SUCCESS = 0, // // codes considered as "failure" for the operation -// PATH_PAYMENT_MALFORMED = -1, // bad input -// PATH_PAYMENT_UNDERFUNDED = -2, // not enough funds in source account -// PATH_PAYMENT_SRC_NO_TRUST = -3, // no trust line on source account -// PATH_PAYMENT_SRC_NOT_AUTHORIZED = -4, // source not authorized to transfer -// PATH_PAYMENT_NO_DESTINATION = -5, // destination account does not exist -// PATH_PAYMENT_NO_TRUST = -6, // dest missing a trust line for asset -// PATH_PAYMENT_NOT_AUTHORIZED = -7, // dest not authorized to hold asset -// PATH_PAYMENT_LINE_FULL = -8, // dest would go above their limit -// PATH_PAYMENT_NO_ISSUER = -9, // missing issuer on one asset -// PATH_PAYMENT_TOO_FEW_OFFERS = -10, // not enough offers to satisfy path -// PATH_PAYMENT_OFFER_CROSS_SELF = -11, // would cross one of its own offers -// PATH_PAYMENT_OVER_SENDMAX = -12 // could not satisfy sendmax -// }; -// -type PathPaymentResultCode int32 +// CHANGE_TRUST_MALFORMED = -1, // bad input +// CHANGE_TRUST_NO_ISSUER = -2, // could not find issuer +// CHANGE_TRUST_INVALID_LIMIT = -3, // cannot drop limit below balance +// // cannot create with a limit of 0 +// CHANGE_TRUST_LOW_RESERVE = +// -4, // not enough funds to create a new trust line, +// CHANGE_TRUST_SELF_NOT_ALLOWED = -5, // trusting self is not allowed +// CHANGE_TRUST_TRUST_LINE_MISSING = -6, // Asset trustline is missing for pool +// CHANGE_TRUST_CANNOT_DELETE = -7, // Asset trustline is still referenced in a pool +// CHANGE_TRUST_NOT_AUTH_MAINTAIN_LIABILITIES = -8 // Asset trustline is deauthorized +// }; +// +type ChangeTrustResultCode int32 const ( - PathPaymentResultCodePathPaymentSuccess PathPaymentResultCode = 0 - PathPaymentResultCodePathPaymentMalformed PathPaymentResultCode = -1 - PathPaymentResultCodePathPaymentUnderfunded PathPaymentResultCode = -2 - PathPaymentResultCodePathPaymentSrcNoTrust PathPaymentResultCode = -3 - PathPaymentResultCodePathPaymentSrcNotAuthorized PathPaymentResultCode = -4 - PathPaymentResultCodePathPaymentNoDestination PathPaymentResultCode = -5 - PathPaymentResultCodePathPaymentNoTrust PathPaymentResultCode = -6 - PathPaymentResultCodePathPaymentNotAuthorized PathPaymentResultCode = -7 - PathPaymentResultCodePathPaymentLineFull PathPaymentResultCode = -8 - PathPaymentResultCodePathPaymentNoIssuer PathPaymentResultCode = -9 - PathPaymentResultCodePathPaymentTooFewOffers PathPaymentResultCode = -10 - PathPaymentResultCodePathPaymentOfferCrossSelf PathPaymentResultCode = -11 - PathPaymentResultCodePathPaymentOverSendmax PathPaymentResultCode = -12 -) - -var pathPaymentResultCodeMap = map[int32]string{ - 0: "PathPaymentResultCodePathPaymentSuccess", - -1: "PathPaymentResultCodePathPaymentMalformed", - -2: "PathPaymentResultCodePathPaymentUnderfunded", - -3: "PathPaymentResultCodePathPaymentSrcNoTrust", - -4: "PathPaymentResultCodePathPaymentSrcNotAuthorized", - -5: "PathPaymentResultCodePathPaymentNoDestination", - -6: "PathPaymentResultCodePathPaymentNoTrust", - -7: "PathPaymentResultCodePathPaymentNotAuthorized", - -8: "PathPaymentResultCodePathPaymentLineFull", - -9: "PathPaymentResultCodePathPaymentNoIssuer", - -10: "PathPaymentResultCodePathPaymentTooFewOffers", - -11: "PathPaymentResultCodePathPaymentOfferCrossSelf", - -12: "PathPaymentResultCodePathPaymentOverSendmax", + ChangeTrustResultCodeChangeTrustSuccess ChangeTrustResultCode = 0 + ChangeTrustResultCodeChangeTrustMalformed ChangeTrustResultCode = -1 + ChangeTrustResultCodeChangeTrustNoIssuer ChangeTrustResultCode = -2 + ChangeTrustResultCodeChangeTrustInvalidLimit ChangeTrustResultCode = -3 + ChangeTrustResultCodeChangeTrustLowReserve ChangeTrustResultCode = -4 + ChangeTrustResultCodeChangeTrustSelfNotAllowed ChangeTrustResultCode = -5 + ChangeTrustResultCodeChangeTrustTrustLineMissing ChangeTrustResultCode = -6 + ChangeTrustResultCodeChangeTrustCannotDelete ChangeTrustResultCode = -7 + ChangeTrustResultCodeChangeTrustNotAuthMaintainLiabilities ChangeTrustResultCode = -8 +) + +var changeTrustResultCodeMap = map[int32]string{ + 0: "ChangeTrustResultCodeChangeTrustSuccess", + -1: "ChangeTrustResultCodeChangeTrustMalformed", + -2: "ChangeTrustResultCodeChangeTrustNoIssuer", + -3: "ChangeTrustResultCodeChangeTrustInvalidLimit", + -4: "ChangeTrustResultCodeChangeTrustLowReserve", + -5: "ChangeTrustResultCodeChangeTrustSelfNotAllowed", + -6: "ChangeTrustResultCodeChangeTrustTrustLineMissing", + -7: "ChangeTrustResultCodeChangeTrustCannotDelete", + -8: "ChangeTrustResultCodeChangeTrustNotAuthMaintainLiabilities", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for PathPaymentResultCode -func (e PathPaymentResultCode) ValidEnum(v int32) bool { - _, ok := pathPaymentResultCodeMap[v] +// the Enum interface for ChangeTrustResultCode +func (e ChangeTrustResultCode) ValidEnum(v int32) bool { + _, ok := changeTrustResultCodeMap[v] return ok } // String returns the name of `e` -func (e PathPaymentResultCode) String() string { - name, _ := pathPaymentResultCodeMap[int32(e)] +func (e ChangeTrustResultCode) String() string { + name, _ := changeTrustResultCodeMap[int32(e)] return name } -// SimplePaymentResult is an XDR Struct defines as: -// -// struct SimplePaymentResult -// { -// AccountID destination; -// Asset asset; -// int64 amount; -// }; -// -type SimplePaymentResult struct { - Destination AccountId - Asset Asset - Amount Int64 +// EncodeTo encodes this value using the Encoder. +func (e ChangeTrustResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := changeTrustResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ChangeTrustResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// PathPaymentResultSuccess is an XDR NestedStruct defines as: -// -// struct -// { -// ClaimOfferAtom offers<>; -// SimplePaymentResult last; -// } -// -type PathPaymentResultSuccess struct { - Offers []ClaimOfferAtom - Last SimplePaymentResult +var _ decoderFrom = (*ChangeTrustResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ChangeTrustResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ChangeTrustResultCode: %s", err) + } + if _, ok := changeTrustResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ChangeTrustResultCode enum value", v) + } + *e = ChangeTrustResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ChangeTrustResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ChangeTrustResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// PathPaymentResult is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*ChangeTrustResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ChangeTrustResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ChangeTrustResultCode) xdrType() {} + +var _ xdrType = (*ChangeTrustResultCode)(nil) + +// ChangeTrustResult is an XDR Union defines as: // -// union PathPaymentResult switch (PathPaymentResultCode code) +// union ChangeTrustResult switch (ChangeTrustResultCode code) // { -// case PATH_PAYMENT_SUCCESS: -// struct -// { -// ClaimOfferAtom offers<>; -// SimplePaymentResult last; -// } success; -// case PATH_PAYMENT_NO_ISSUER: -// Asset noIssuer; // the asset that caused the error +// case CHANGE_TRUST_SUCCESS: +// void; // default: // void; // }; // -type PathPaymentResult struct { - Code PathPaymentResultCode - Success *PathPaymentResultSuccess - NoIssuer *Asset +type ChangeTrustResult struct { + Code ChangeTrustResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u PathPaymentResult) SwitchFieldName() string { +func (u ChangeTrustResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of PathPaymentResult -func (u PathPaymentResult) ArmForSwitch(sw int32) (string, bool) { - switch PathPaymentResultCode(sw) { - case PathPaymentResultCodePathPaymentSuccess: - return "Success", true - case PathPaymentResultCodePathPaymentNoIssuer: - return "NoIssuer", true +// the value for an instance of ChangeTrustResult +func (u ChangeTrustResult) ArmForSwitch(sw int32) (string, bool) { + switch ChangeTrustResultCode(sw) { + case ChangeTrustResultCodeChangeTrustSuccess: + return "", true default: return "", true } } -// NewPathPaymentResult creates a new PathPaymentResult. -func NewPathPaymentResult(code PathPaymentResultCode, value interface{}) (result PathPaymentResult, err error) { +// NewChangeTrustResult creates a new ChangeTrustResult. +func NewChangeTrustResult(code ChangeTrustResultCode, value interface{}) (result ChangeTrustResult, err error) { result.Code = code - switch PathPaymentResultCode(code) { - case PathPaymentResultCodePathPaymentSuccess: - tv, ok := value.(PathPaymentResultSuccess) - if !ok { - err = fmt.Errorf("invalid value, must be PathPaymentResultSuccess") - return - } - result.Success = &tv - case PathPaymentResultCodePathPaymentNoIssuer: - tv, ok := value.(Asset) - if !ok { - err = fmt.Errorf("invalid value, must be Asset") - return - } - result.NoIssuer = &tv + switch ChangeTrustResultCode(code) { + case ChangeTrustResultCodeChangeTrustSuccess: + // void default: // void } return } -// MustSuccess retrieves the Success value from the union, -// panicing if the value is not set. -func (u PathPaymentResult) MustSuccess() PathPaymentResultSuccess { - val, ok := u.GetSuccess() - - if !ok { - panic("arm Success is not set") +// EncodeTo encodes this value using the Encoder. +func (u ChangeTrustResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ChangeTrustResultCode(u.Code) { + case ChangeTrustResultCodeChangeTrustSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetSuccess retrieves the Success value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u PathPaymentResult) GetSuccess() (result PathPaymentResultSuccess, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Code)) +var _ decoderFrom = (*ChangeTrustResult)(nil) - if armName == "Success" { - result = *u.Success - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *ChangeTrustResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ChangeTrustResultCode: %s", err) + } + switch ChangeTrustResultCode(u.Code) { + case ChangeTrustResultCodeChangeTrustSuccess: + // Void + return n, nil + default: + // Void + return n, nil } - - return } -// MustNoIssuer retrieves the NoIssuer value from the union, -// panicing if the value is not set. -func (u PathPaymentResult) MustNoIssuer() Asset { - val, ok := u.GetNoIssuer() - - if !ok { - panic("arm NoIssuer is not set") - } +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ChangeTrustResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - return val +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ChangeTrustResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// GetNoIssuer retrieves the NoIssuer value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u PathPaymentResult) GetNoIssuer() (result Asset, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Code)) +var ( + _ encoding.BinaryMarshaler = (*ChangeTrustResult)(nil) + _ encoding.BinaryUnmarshaler = (*ChangeTrustResult)(nil) +) - if armName == "NoIssuer" { - result = *u.NoIssuer - ok = true - } +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ChangeTrustResult) xdrType() {} - return -} +var _ xdrType = (*ChangeTrustResult)(nil) -// ManageOfferResultCode is an XDR Enum defines as: +// AllowTrustResultCode is an XDR Enum defines as: // -// enum ManageOfferResultCode +// enum AllowTrustResultCode // { // // codes considered as "success" for the operation -// MANAGE_OFFER_SUCCESS = 0, -// +// ALLOW_TRUST_SUCCESS = 0, // // codes considered as "failure" for the operation -// MANAGE_OFFER_MALFORMED = -1, // generated offer would be invalid -// MANAGE_OFFER_SELL_NO_TRUST = -2, // no trust line for what we're selling -// MANAGE_OFFER_BUY_NO_TRUST = -3, // no trust line for what we're buying -// MANAGE_OFFER_SELL_NOT_AUTHORIZED = -4, // not authorized to sell -// MANAGE_OFFER_BUY_NOT_AUTHORIZED = -5, // not authorized to buy -// MANAGE_OFFER_LINE_FULL = -6, // can't receive more of what it's buying -// MANAGE_OFFER_UNDERFUNDED = -7, // doesn't hold what it's trying to sell -// MANAGE_OFFER_CROSS_SELF = -8, // would cross an offer from the same user -// MANAGE_OFFER_SELL_NO_ISSUER = -9, // no issuer for what we're selling -// MANAGE_OFFER_BUY_NO_ISSUER = -10, // no issuer for what we're buying -// -// // update errors -// MANAGE_OFFER_NOT_FOUND = -11, // offerID does not match an existing offer -// -// MANAGE_OFFER_LOW_RESERVE = -12 // not enough funds to create a new Offer +// ALLOW_TRUST_MALFORMED = -1, // asset is not ASSET_TYPE_ALPHANUM +// ALLOW_TRUST_NO_TRUST_LINE = -2, // trustor does not have a trustline +// // source account does not require trust +// ALLOW_TRUST_TRUST_NOT_REQUIRED = -3, +// ALLOW_TRUST_CANT_REVOKE = -4, // source account can't revoke trust, +// ALLOW_TRUST_SELF_NOT_ALLOWED = -5, // trusting self is not allowed +// ALLOW_TRUST_LOW_RESERVE = -6 // claimable balances can't be created +// // on revoke due to low reserves // }; // -type ManageOfferResultCode int32 +type AllowTrustResultCode int32 const ( - ManageOfferResultCodeManageOfferSuccess ManageOfferResultCode = 0 - ManageOfferResultCodeManageOfferMalformed ManageOfferResultCode = -1 - ManageOfferResultCodeManageOfferSellNoTrust ManageOfferResultCode = -2 - ManageOfferResultCodeManageOfferBuyNoTrust ManageOfferResultCode = -3 - ManageOfferResultCodeManageOfferSellNotAuthorized ManageOfferResultCode = -4 - ManageOfferResultCodeManageOfferBuyNotAuthorized ManageOfferResultCode = -5 - ManageOfferResultCodeManageOfferLineFull ManageOfferResultCode = -6 - ManageOfferResultCodeManageOfferUnderfunded ManageOfferResultCode = -7 - ManageOfferResultCodeManageOfferCrossSelf ManageOfferResultCode = -8 - ManageOfferResultCodeManageOfferSellNoIssuer ManageOfferResultCode = -9 - ManageOfferResultCodeManageOfferBuyNoIssuer ManageOfferResultCode = -10 - ManageOfferResultCodeManageOfferNotFound ManageOfferResultCode = -11 - ManageOfferResultCodeManageOfferLowReserve ManageOfferResultCode = -12 -) - -var manageOfferResultCodeMap = map[int32]string{ - 0: "ManageOfferResultCodeManageOfferSuccess", - -1: "ManageOfferResultCodeManageOfferMalformed", - -2: "ManageOfferResultCodeManageOfferSellNoTrust", - -3: "ManageOfferResultCodeManageOfferBuyNoTrust", - -4: "ManageOfferResultCodeManageOfferSellNotAuthorized", - -5: "ManageOfferResultCodeManageOfferBuyNotAuthorized", - -6: "ManageOfferResultCodeManageOfferLineFull", - -7: "ManageOfferResultCodeManageOfferUnderfunded", - -8: "ManageOfferResultCodeManageOfferCrossSelf", - -9: "ManageOfferResultCodeManageOfferSellNoIssuer", - -10: "ManageOfferResultCodeManageOfferBuyNoIssuer", - -11: "ManageOfferResultCodeManageOfferNotFound", - -12: "ManageOfferResultCodeManageOfferLowReserve", + AllowTrustResultCodeAllowTrustSuccess AllowTrustResultCode = 0 + AllowTrustResultCodeAllowTrustMalformed AllowTrustResultCode = -1 + AllowTrustResultCodeAllowTrustNoTrustLine AllowTrustResultCode = -2 + AllowTrustResultCodeAllowTrustTrustNotRequired AllowTrustResultCode = -3 + AllowTrustResultCodeAllowTrustCantRevoke AllowTrustResultCode = -4 + AllowTrustResultCodeAllowTrustSelfNotAllowed AllowTrustResultCode = -5 + AllowTrustResultCodeAllowTrustLowReserve AllowTrustResultCode = -6 +) + +var allowTrustResultCodeMap = map[int32]string{ + 0: "AllowTrustResultCodeAllowTrustSuccess", + -1: "AllowTrustResultCodeAllowTrustMalformed", + -2: "AllowTrustResultCodeAllowTrustNoTrustLine", + -3: "AllowTrustResultCodeAllowTrustTrustNotRequired", + -4: "AllowTrustResultCodeAllowTrustCantRevoke", + -5: "AllowTrustResultCodeAllowTrustSelfNotAllowed", + -6: "AllowTrustResultCodeAllowTrustLowReserve", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ManageOfferResultCode -func (e ManageOfferResultCode) ValidEnum(v int32) bool { - _, ok := manageOfferResultCodeMap[v] +// the Enum interface for AllowTrustResultCode +func (e AllowTrustResultCode) ValidEnum(v int32) bool { + _, ok := allowTrustResultCodeMap[v] return ok } // String returns the name of `e` -func (e ManageOfferResultCode) String() string { - name, _ := manageOfferResultCodeMap[int32(e)] +func (e AllowTrustResultCode) String() string { + name, _ := allowTrustResultCodeMap[int32(e)] return name } -// ManageOfferEffect is an XDR Enum defines as: -// -// enum ManageOfferEffect -// { -// MANAGE_OFFER_CREATED = 0, -// MANAGE_OFFER_UPDATED = 1, -// MANAGE_OFFER_DELETED = 2 -// }; -// -type ManageOfferEffect int32 +// EncodeTo encodes this value using the Encoder. +func (e AllowTrustResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := allowTrustResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid AllowTrustResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} -const ( - ManageOfferEffectManageOfferCreated ManageOfferEffect = 0 - ManageOfferEffectManageOfferUpdated ManageOfferEffect = 1 - ManageOfferEffectManageOfferDeleted ManageOfferEffect = 2 -) +var _ decoderFrom = (*AllowTrustResultCode)(nil) -var manageOfferEffectMap = map[int32]string{ - 0: "ManageOfferEffectManageOfferCreated", - 1: "ManageOfferEffectManageOfferUpdated", - 2: "ManageOfferEffectManageOfferDeleted", +// DecodeFrom decodes this value using the Decoder. +func (e *AllowTrustResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding AllowTrustResultCode: %s", err) + } + if _, ok := allowTrustResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid AllowTrustResultCode enum value", v) + } + *e = AllowTrustResultCode(v) + return n, nil } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ManageOfferEffect -func (e ManageOfferEffect) ValidEnum(v int32) bool { - _, ok := manageOfferEffectMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AllowTrustResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e ManageOfferEffect) String() string { - name, _ := manageOfferEffectMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AllowTrustResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// ManageOfferSuccessResultOffer is an XDR NestedUnion defines as: +var ( + _ encoding.BinaryMarshaler = (*AllowTrustResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*AllowTrustResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AllowTrustResultCode) xdrType() {} + +var _ xdrType = (*AllowTrustResultCode)(nil) + +// AllowTrustResult is an XDR Union defines as: // -// union switch (ManageOfferEffect effect) -// { -// case MANAGE_OFFER_CREATED: -// case MANAGE_OFFER_UPDATED: -// OfferEntry offer; -// default: -// void; -// } +// union AllowTrustResult switch (AllowTrustResultCode code) +// { +// case ALLOW_TRUST_SUCCESS: +// void; +// default: +// void; +// }; // -type ManageOfferSuccessResultOffer struct { - Effect ManageOfferEffect - Offer *OfferEntry +type AllowTrustResult struct { + Code AllowTrustResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u ManageOfferSuccessResultOffer) SwitchFieldName() string { - return "Effect" +func (u AllowTrustResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of ManageOfferSuccessResultOffer -func (u ManageOfferSuccessResultOffer) ArmForSwitch(sw int32) (string, bool) { - switch ManageOfferEffect(sw) { - case ManageOfferEffectManageOfferCreated: - return "Offer", true - case ManageOfferEffectManageOfferUpdated: - return "Offer", true +// the value for an instance of AllowTrustResult +func (u AllowTrustResult) ArmForSwitch(sw int32) (string, bool) { + switch AllowTrustResultCode(sw) { + case AllowTrustResultCodeAllowTrustSuccess: + return "", true default: return "", true } } -// NewManageOfferSuccessResultOffer creates a new ManageOfferSuccessResultOffer. -func NewManageOfferSuccessResultOffer(effect ManageOfferEffect, value interface{}) (result ManageOfferSuccessResultOffer, err error) { - result.Effect = effect - switch ManageOfferEffect(effect) { - case ManageOfferEffectManageOfferCreated: - tv, ok := value.(OfferEntry) - if !ok { - err = fmt.Errorf("invalid value, must be OfferEntry") - return - } - result.Offer = &tv - case ManageOfferEffectManageOfferUpdated: - tv, ok := value.(OfferEntry) - if !ok { - err = fmt.Errorf("invalid value, must be OfferEntry") - return - } - result.Offer = &tv - default: - // void - } - return +// NewAllowTrustResult creates a new AllowTrustResult. +func NewAllowTrustResult(code AllowTrustResultCode, value interface{}) (result AllowTrustResult, err error) { + result.Code = code + switch AllowTrustResultCode(code) { + case AllowTrustResultCodeAllowTrustSuccess: + // void + default: + // void + } + return +} + +// EncodeTo encodes this value using the Encoder. +func (u AllowTrustResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch AllowTrustResultCode(u.Code) { + case AllowTrustResultCodeAllowTrustSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*AllowTrustResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AllowTrustResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AllowTrustResultCode: %s", err) + } + switch AllowTrustResultCode(u.Code) { + case AllowTrustResultCodeAllowTrustSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AllowTrustResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AllowTrustResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AllowTrustResult)(nil) + _ encoding.BinaryUnmarshaler = (*AllowTrustResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AllowTrustResult) xdrType() {} + +var _ xdrType = (*AllowTrustResult)(nil) + +// AccountMergeResultCode is an XDR Enum defines as: +// +// enum AccountMergeResultCode +// { +// // codes considered as "success" for the operation +// ACCOUNT_MERGE_SUCCESS = 0, +// // codes considered as "failure" for the operation +// ACCOUNT_MERGE_MALFORMED = -1, // can't merge onto itself +// ACCOUNT_MERGE_NO_ACCOUNT = -2, // destination does not exist +// ACCOUNT_MERGE_IMMUTABLE_SET = -3, // source account has AUTH_IMMUTABLE set +// ACCOUNT_MERGE_HAS_SUB_ENTRIES = -4, // account has trust lines/offers +// ACCOUNT_MERGE_SEQNUM_TOO_FAR = -5, // sequence number is over max allowed +// ACCOUNT_MERGE_DEST_FULL = -6, // can't add source balance to +// // destination balance +// ACCOUNT_MERGE_IS_SPONSOR = -7 // can't merge account that is a sponsor +// }; +// +type AccountMergeResultCode int32 + +const ( + AccountMergeResultCodeAccountMergeSuccess AccountMergeResultCode = 0 + AccountMergeResultCodeAccountMergeMalformed AccountMergeResultCode = -1 + AccountMergeResultCodeAccountMergeNoAccount AccountMergeResultCode = -2 + AccountMergeResultCodeAccountMergeImmutableSet AccountMergeResultCode = -3 + AccountMergeResultCodeAccountMergeHasSubEntries AccountMergeResultCode = -4 + AccountMergeResultCodeAccountMergeSeqnumTooFar AccountMergeResultCode = -5 + AccountMergeResultCodeAccountMergeDestFull AccountMergeResultCode = -6 + AccountMergeResultCodeAccountMergeIsSponsor AccountMergeResultCode = -7 +) + +var accountMergeResultCodeMap = map[int32]string{ + 0: "AccountMergeResultCodeAccountMergeSuccess", + -1: "AccountMergeResultCodeAccountMergeMalformed", + -2: "AccountMergeResultCodeAccountMergeNoAccount", + -3: "AccountMergeResultCodeAccountMergeImmutableSet", + -4: "AccountMergeResultCodeAccountMergeHasSubEntries", + -5: "AccountMergeResultCodeAccountMergeSeqnumTooFar", + -6: "AccountMergeResultCodeAccountMergeDestFull", + -7: "AccountMergeResultCodeAccountMergeIsSponsor", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for AccountMergeResultCode +func (e AccountMergeResultCode) ValidEnum(v int32) bool { + _, ok := accountMergeResultCodeMap[v] + return ok } -// MustOffer retrieves the Offer value from the union, -// panicing if the value is not set. -func (u ManageOfferSuccessResultOffer) MustOffer() OfferEntry { - val, ok := u.GetOffer() +// String returns the name of `e` +func (e AccountMergeResultCode) String() string { + name, _ := accountMergeResultCodeMap[int32(e)] + return name +} - if !ok { - panic("arm Offer is not set") +// EncodeTo encodes this value using the Encoder. +func (e AccountMergeResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := accountMergeResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid AccountMergeResultCode enum value", e) } - - return val + _, err := enc.EncodeInt(int32(e)) + return err } -// GetOffer retrieves the Offer value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u ManageOfferSuccessResultOffer) GetOffer() (result OfferEntry, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Effect)) +var _ decoderFrom = (*AccountMergeResultCode)(nil) - if armName == "Offer" { - result = *u.Offer - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *AccountMergeResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding AccountMergeResultCode: %s", err) } + if _, ok := accountMergeResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid AccountMergeResultCode enum value", v) + } + *e = AccountMergeResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountMergeResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// ManageOfferSuccessResult is an XDR Struct defines as: -// -// struct ManageOfferSuccessResult -// { -// // offers that got claimed while creating this offer -// ClaimOfferAtom offersClaimed<>; -// -// union switch (ManageOfferEffect effect) -// { -// case MANAGE_OFFER_CREATED: -// case MANAGE_OFFER_UPDATED: -// OfferEntry offer; -// default: -// void; -// } -// offer; -// }; -// -type ManageOfferSuccessResult struct { - OffersClaimed []ClaimOfferAtom - Offer ManageOfferSuccessResultOffer +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountMergeResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// ManageOfferResult is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*AccountMergeResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*AccountMergeResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountMergeResultCode) xdrType() {} + +var _ xdrType = (*AccountMergeResultCode)(nil) + +// AccountMergeResult is an XDR Union defines as: // -// union ManageOfferResult switch (ManageOfferResultCode code) +// union AccountMergeResult switch (AccountMergeResultCode code) // { -// case MANAGE_OFFER_SUCCESS: -// ManageOfferSuccessResult success; +// case ACCOUNT_MERGE_SUCCESS: +// int64 sourceAccountBalance; // how much got transferred from source account // default: // void; // }; // -type ManageOfferResult struct { - Code ManageOfferResultCode - Success *ManageOfferSuccessResult +type AccountMergeResult struct { + Code AccountMergeResultCode + SourceAccountBalance *Int64 } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u ManageOfferResult) SwitchFieldName() string { +func (u AccountMergeResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of ManageOfferResult -func (u ManageOfferResult) ArmForSwitch(sw int32) (string, bool) { - switch ManageOfferResultCode(sw) { - case ManageOfferResultCodeManageOfferSuccess: - return "Success", true +// the value for an instance of AccountMergeResult +func (u AccountMergeResult) ArmForSwitch(sw int32) (string, bool) { + switch AccountMergeResultCode(sw) { + case AccountMergeResultCodeAccountMergeSuccess: + return "SourceAccountBalance", true default: return "", true } } -// NewManageOfferResult creates a new ManageOfferResult. -func NewManageOfferResult(code ManageOfferResultCode, value interface{}) (result ManageOfferResult, err error) { +// NewAccountMergeResult creates a new AccountMergeResult. +func NewAccountMergeResult(code AccountMergeResultCode, value interface{}) (result AccountMergeResult, err error) { result.Code = code - switch ManageOfferResultCode(code) { - case ManageOfferResultCodeManageOfferSuccess: - tv, ok := value.(ManageOfferSuccessResult) + switch AccountMergeResultCode(code) { + case AccountMergeResultCodeAccountMergeSuccess: + tv, ok := value.(Int64) if !ok { - err = fmt.Errorf("invalid value, must be ManageOfferSuccessResult") + err = fmt.Errorf("invalid value, must be Int64") return } - result.Success = &tv + result.SourceAccountBalance = &tv default: // void } return } -// MustSuccess retrieves the Success value from the union, +// MustSourceAccountBalance retrieves the SourceAccountBalance value from the union, // panicing if the value is not set. -func (u ManageOfferResult) MustSuccess() ManageOfferSuccessResult { - val, ok := u.GetSuccess() +func (u AccountMergeResult) MustSourceAccountBalance() Int64 { + val, ok := u.GetSourceAccountBalance() if !ok { - panic("arm Success is not set") + panic("arm SourceAccountBalance is not set") } return val } -// GetSuccess retrieves the Success value from the union, +// GetSourceAccountBalance retrieves the SourceAccountBalance value from the union, // returning ok if the union's switch indicated the value is valid. -func (u ManageOfferResult) GetSuccess() (result ManageOfferSuccessResult, ok bool) { +func (u AccountMergeResult) GetSourceAccountBalance() (result Int64, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "Success" { - result = *u.Success + if armName == "SourceAccountBalance" { + result = *u.SourceAccountBalance ok = true } return } -// SetOptionsResultCode is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (u AccountMergeResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch AccountMergeResultCode(u.Code) { + case AccountMergeResultCodeAccountMergeSuccess: + if err = (*u.SourceAccountBalance).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*AccountMergeResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *AccountMergeResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountMergeResultCode: %s", err) + } + switch AccountMergeResultCode(u.Code) { + case AccountMergeResultCodeAccountMergeSuccess: + u.SourceAccountBalance = new(Int64) + nTmp, err = (*u.SourceAccountBalance).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s AccountMergeResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *AccountMergeResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*AccountMergeResult)(nil) + _ encoding.BinaryUnmarshaler = (*AccountMergeResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s AccountMergeResult) xdrType() {} + +var _ xdrType = (*AccountMergeResult)(nil) + +// InflationResultCode is an XDR Enum defines as: // -// enum SetOptionsResultCode +// enum InflationResultCode // { // // codes considered as "success" for the operation -// SET_OPTIONS_SUCCESS = 0, +// INFLATION_SUCCESS = 0, // // codes considered as "failure" for the operation -// SET_OPTIONS_LOW_RESERVE = -1, // not enough funds to add a signer -// SET_OPTIONS_TOO_MANY_SIGNERS = -2, // max number of signers already reached -// SET_OPTIONS_BAD_FLAGS = -3, // invalid combination of clear/set flags -// SET_OPTIONS_INVALID_INFLATION = -4, // inflation account does not exist -// SET_OPTIONS_CANT_CHANGE = -5, // can no longer change this option -// SET_OPTIONS_UNKNOWN_FLAG = -6, // can't set an unknown flag -// SET_OPTIONS_THRESHOLD_OUT_OF_RANGE = -7, // bad value for weight/threshold -// SET_OPTIONS_BAD_SIGNER = -8, // signer cannot be masterkey -// SET_OPTIONS_INVALID_HOME_DOMAIN = -9 // malformed home domain +// INFLATION_NOT_TIME = -1 // }; // -type SetOptionsResultCode int32 +type InflationResultCode int32 const ( - SetOptionsResultCodeSetOptionsSuccess SetOptionsResultCode = 0 - SetOptionsResultCodeSetOptionsLowReserve SetOptionsResultCode = -1 - SetOptionsResultCodeSetOptionsTooManySigners SetOptionsResultCode = -2 - SetOptionsResultCodeSetOptionsBadFlags SetOptionsResultCode = -3 - SetOptionsResultCodeSetOptionsInvalidInflation SetOptionsResultCode = -4 - SetOptionsResultCodeSetOptionsCantChange SetOptionsResultCode = -5 - SetOptionsResultCodeSetOptionsUnknownFlag SetOptionsResultCode = -6 - SetOptionsResultCodeSetOptionsThresholdOutOfRange SetOptionsResultCode = -7 - SetOptionsResultCodeSetOptionsBadSigner SetOptionsResultCode = -8 - SetOptionsResultCodeSetOptionsInvalidHomeDomain SetOptionsResultCode = -9 + InflationResultCodeInflationSuccess InflationResultCode = 0 + InflationResultCodeInflationNotTime InflationResultCode = -1 ) -var setOptionsResultCodeMap = map[int32]string{ - 0: "SetOptionsResultCodeSetOptionsSuccess", - -1: "SetOptionsResultCodeSetOptionsLowReserve", - -2: "SetOptionsResultCodeSetOptionsTooManySigners", - -3: "SetOptionsResultCodeSetOptionsBadFlags", - -4: "SetOptionsResultCodeSetOptionsInvalidInflation", - -5: "SetOptionsResultCodeSetOptionsCantChange", - -6: "SetOptionsResultCodeSetOptionsUnknownFlag", - -7: "SetOptionsResultCodeSetOptionsThresholdOutOfRange", - -8: "SetOptionsResultCodeSetOptionsBadSigner", - -9: "SetOptionsResultCodeSetOptionsInvalidHomeDomain", +var inflationResultCodeMap = map[int32]string{ + 0: "InflationResultCodeInflationSuccess", + -1: "InflationResultCodeInflationNotTime", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for SetOptionsResultCode -func (e SetOptionsResultCode) ValidEnum(v int32) bool { - _, ok := setOptionsResultCodeMap[v] +// the Enum interface for InflationResultCode +func (e InflationResultCode) ValidEnum(v int32) bool { + _, ok := inflationResultCodeMap[v] return ok } // String returns the name of `e` -func (e SetOptionsResultCode) String() string { - name, _ := setOptionsResultCodeMap[int32(e)] +func (e InflationResultCode) String() string { + name, _ := inflationResultCodeMap[int32(e)] return name } -// SetOptionsResult is an XDR Union defines as: -// -// union SetOptionsResult switch (SetOptionsResultCode code) -// { -// case SET_OPTIONS_SUCCESS: -// void; -// default: -// void; -// }; -// -type SetOptionsResult struct { - Code SetOptionsResultCode +// EncodeTo encodes this value using the Encoder. +func (e InflationResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := inflationResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid InflationResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u SetOptionsResult) SwitchFieldName() string { - return "Code" -} +var _ decoderFrom = (*InflationResultCode)(nil) -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of SetOptionsResult -func (u SetOptionsResult) ArmForSwitch(sw int32) (string, bool) { - switch SetOptionsResultCode(sw) { - case SetOptionsResultCodeSetOptionsSuccess: - return "", true - default: - return "", true +// DecodeFrom decodes this value using the Decoder. +func (e *InflationResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding InflationResultCode: %s", err) + } + if _, ok := inflationResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid InflationResultCode enum value", v) } + *e = InflationResultCode(v) + return n, nil } -// NewSetOptionsResult creates a new SetOptionsResult. -func NewSetOptionsResult(code SetOptionsResultCode, value interface{}) (result SetOptionsResult, err error) { - result.Code = code - switch SetOptionsResultCode(code) { - case SetOptionsResultCodeSetOptionsSuccess: - // void - default: - // void - } - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InflationResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// ChangeTrustResultCode is an XDR Enum defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InflationResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*InflationResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*InflationResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InflationResultCode) xdrType() {} + +var _ xdrType = (*InflationResultCode)(nil) + +// InflationPayout is an XDR Struct defines as: // -// enum ChangeTrustResultCode +// struct InflationPayout // or use PaymentResultAtom to limit types? // { -// // codes considered as "success" for the operation -// CHANGE_TRUST_SUCCESS = 0, -// // codes considered as "failure" for the operation -// CHANGE_TRUST_MALFORMED = -1, // bad input -// CHANGE_TRUST_NO_ISSUER = -2, // could not find issuer -// CHANGE_TRUST_INVALID_LIMIT = -3, // cannot drop limit below balance -// // cannot create with a limit of 0 -// CHANGE_TRUST_LOW_RESERVE = -4 // not enough funds to create a new trust line +// AccountID destination; +// int64 amount; // }; // -type ChangeTrustResultCode int32 - -const ( - ChangeTrustResultCodeChangeTrustSuccess ChangeTrustResultCode = 0 - ChangeTrustResultCodeChangeTrustMalformed ChangeTrustResultCode = -1 - ChangeTrustResultCodeChangeTrustNoIssuer ChangeTrustResultCode = -2 - ChangeTrustResultCodeChangeTrustInvalidLimit ChangeTrustResultCode = -3 - ChangeTrustResultCodeChangeTrustLowReserve ChangeTrustResultCode = -4 -) +type InflationPayout struct { + Destination AccountId + Amount Int64 +} -var changeTrustResultCodeMap = map[int32]string{ - 0: "ChangeTrustResultCodeChangeTrustSuccess", - -1: "ChangeTrustResultCodeChangeTrustMalformed", - -2: "ChangeTrustResultCodeChangeTrustNoIssuer", - -3: "ChangeTrustResultCodeChangeTrustInvalidLimit", - -4: "ChangeTrustResultCodeChangeTrustLowReserve", +// EncodeTo encodes this value using the Encoder. +func (s *InflationPayout) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.Destination.EncodeTo(e); err != nil { + return err + } + if err = s.Amount.EncodeTo(e); err != nil { + return err + } + return nil } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ChangeTrustResultCode -func (e ChangeTrustResultCode) ValidEnum(v int32) bool { - _, ok := changeTrustResultCodeMap[v] - return ok +var _ decoderFrom = (*InflationPayout)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *InflationPayout) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.Destination.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountId: %s", err) + } + nTmp, err = s.Amount.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + return n, nil } -// String returns the name of `e` -func (e ChangeTrustResultCode) String() string { - name, _ := changeTrustResultCodeMap[int32(e)] - return name +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InflationPayout) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InflationPayout) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// ChangeTrustResult is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*InflationPayout)(nil) + _ encoding.BinaryUnmarshaler = (*InflationPayout)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InflationPayout) xdrType() {} + +var _ xdrType = (*InflationPayout)(nil) + +// InflationResult is an XDR Union defines as: // -// union ChangeTrustResult switch (ChangeTrustResultCode code) +// union InflationResult switch (InflationResultCode code) // { -// case CHANGE_TRUST_SUCCESS: -// void; +// case INFLATION_SUCCESS: +// InflationPayout payouts<>; // default: // void; // }; // -type ChangeTrustResult struct { - Code ChangeTrustResultCode +type InflationResult struct { + Code InflationResultCode + Payouts *[]InflationPayout } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u ChangeTrustResult) SwitchFieldName() string { +func (u InflationResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of ChangeTrustResult -func (u ChangeTrustResult) ArmForSwitch(sw int32) (string, bool) { - switch ChangeTrustResultCode(sw) { - case ChangeTrustResultCodeChangeTrustSuccess: - return "", true +// the value for an instance of InflationResult +func (u InflationResult) ArmForSwitch(sw int32) (string, bool) { + switch InflationResultCode(sw) { + case InflationResultCodeInflationSuccess: + return "Payouts", true default: return "", true } } -// NewChangeTrustResult creates a new ChangeTrustResult. -func NewChangeTrustResult(code ChangeTrustResultCode, value interface{}) (result ChangeTrustResult, err error) { +// NewInflationResult creates a new InflationResult. +func NewInflationResult(code InflationResultCode, value interface{}) (result InflationResult, err error) { result.Code = code - switch ChangeTrustResultCode(code) { - case ChangeTrustResultCodeChangeTrustSuccess: - // void + switch InflationResultCode(code) { + case InflationResultCodeInflationSuccess: + tv, ok := value.([]InflationPayout) + if !ok { + err = fmt.Errorf("invalid value, must be []InflationPayout") + return + } + result.Payouts = &tv default: // void } return } -// AllowTrustResultCode is an XDR Enum defines as: +// MustPayouts retrieves the Payouts value from the union, +// panicing if the value is not set. +func (u InflationResult) MustPayouts() []InflationPayout { + val, ok := u.GetPayouts() + + if !ok { + panic("arm Payouts is not set") + } + + return val +} + +// GetPayouts retrieves the Payouts value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u InflationResult) GetPayouts() (result []InflationPayout, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) + + if armName == "Payouts" { + result = *u.Payouts + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u InflationResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch InflationResultCode(u.Code) { + case InflationResultCodeInflationSuccess: + if _, err = e.EncodeUint(uint32(len((*u.Payouts)))); err != nil { + return err + } + for i := 0; i < len((*u.Payouts)); i++ { + if err = (*u.Payouts)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*InflationResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *InflationResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InflationResultCode: %s", err) + } + switch InflationResultCode(u.Code) { + case InflationResultCodeInflationSuccess: + u.Payouts = new([]InflationPayout) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InflationPayout: %s", err) + } + (*u.Payouts) = nil + if l > 0 { + (*u.Payouts) = make([]InflationPayout, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Payouts)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InflationPayout: %s", err) + } + } + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InflationResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InflationResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*InflationResult)(nil) + _ encoding.BinaryUnmarshaler = (*InflationResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InflationResult) xdrType() {} + +var _ xdrType = (*InflationResult)(nil) + +// ManageDataResultCode is an XDR Enum defines as: // -// enum AllowTrustResultCode +// enum ManageDataResultCode // { // // codes considered as "success" for the operation -// ALLOW_TRUST_SUCCESS = 0, +// MANAGE_DATA_SUCCESS = 0, // // codes considered as "failure" for the operation -// ALLOW_TRUST_MALFORMED = -1, // asset is not ASSET_TYPE_ALPHANUM -// ALLOW_TRUST_NO_TRUST_LINE = -2, // trustor does not have a trustline -// // source account does not require trust -// ALLOW_TRUST_TRUST_NOT_REQUIRED = -3, -// ALLOW_TRUST_CANT_REVOKE = -4 // source account can't revoke trust +// MANAGE_DATA_NOT_SUPPORTED_YET = +// -1, // The network hasn't moved to this protocol change yet +// MANAGE_DATA_NAME_NOT_FOUND = +// -2, // Trying to remove a Data Entry that isn't there +// MANAGE_DATA_LOW_RESERVE = -3, // not enough funds to create a new Data Entry +// MANAGE_DATA_INVALID_NAME = -4 // Name not a valid string // }; // -type AllowTrustResultCode int32 +type ManageDataResultCode int32 const ( - AllowTrustResultCodeAllowTrustSuccess AllowTrustResultCode = 0 - AllowTrustResultCodeAllowTrustMalformed AllowTrustResultCode = -1 - AllowTrustResultCodeAllowTrustNoTrustLine AllowTrustResultCode = -2 - AllowTrustResultCodeAllowTrustTrustNotRequired AllowTrustResultCode = -3 - AllowTrustResultCodeAllowTrustCantRevoke AllowTrustResultCode = -4 + ManageDataResultCodeManageDataSuccess ManageDataResultCode = 0 + ManageDataResultCodeManageDataNotSupportedYet ManageDataResultCode = -1 + ManageDataResultCodeManageDataNameNotFound ManageDataResultCode = -2 + ManageDataResultCodeManageDataLowReserve ManageDataResultCode = -3 + ManageDataResultCodeManageDataInvalidName ManageDataResultCode = -4 ) -var allowTrustResultCodeMap = map[int32]string{ - 0: "AllowTrustResultCodeAllowTrustSuccess", - -1: "AllowTrustResultCodeAllowTrustMalformed", - -2: "AllowTrustResultCodeAllowTrustNoTrustLine", - -3: "AllowTrustResultCodeAllowTrustTrustNotRequired", - -4: "AllowTrustResultCodeAllowTrustCantRevoke", +var manageDataResultCodeMap = map[int32]string{ + 0: "ManageDataResultCodeManageDataSuccess", + -1: "ManageDataResultCodeManageDataNotSupportedYet", + -2: "ManageDataResultCodeManageDataNameNotFound", + -3: "ManageDataResultCodeManageDataLowReserve", + -4: "ManageDataResultCodeManageDataInvalidName", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for AllowTrustResultCode -func (e AllowTrustResultCode) ValidEnum(v int32) bool { - _, ok := allowTrustResultCodeMap[v] +// the Enum interface for ManageDataResultCode +func (e ManageDataResultCode) ValidEnum(v int32) bool { + _, ok := manageDataResultCodeMap[v] return ok } // String returns the name of `e` -func (e AllowTrustResultCode) String() string { - name, _ := allowTrustResultCodeMap[int32(e)] +func (e ManageDataResultCode) String() string { + name, _ := manageDataResultCodeMap[int32(e)] return name } -// AllowTrustResult is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e ManageDataResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := manageDataResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ManageDataResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ManageDataResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ManageDataResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ManageDataResultCode: %s", err) + } + if _, ok := manageDataResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ManageDataResultCode enum value", v) + } + *e = ManageDataResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageDataResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageDataResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageDataResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ManageDataResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageDataResultCode) xdrType() {} + +var _ xdrType = (*ManageDataResultCode)(nil) + +// ManageDataResult is an XDR Union defines as: // -// union AllowTrustResult switch (AllowTrustResultCode code) +// union ManageDataResult switch (ManageDataResultCode code) // { -// case ALLOW_TRUST_SUCCESS: +// case MANAGE_DATA_SUCCESS: // void; // default: // void; // }; // -type AllowTrustResult struct { - Code AllowTrustResultCode +type ManageDataResult struct { + Code ManageDataResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u AllowTrustResult) SwitchFieldName() string { +func (u ManageDataResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of AllowTrustResult -func (u AllowTrustResult) ArmForSwitch(sw int32) (string, bool) { - switch AllowTrustResultCode(sw) { - case AllowTrustResultCodeAllowTrustSuccess: +// the value for an instance of ManageDataResult +func (u ManageDataResult) ArmForSwitch(sw int32) (string, bool) { + switch ManageDataResultCode(sw) { + case ManageDataResultCodeManageDataSuccess: return "", true default: return "", true } } -// NewAllowTrustResult creates a new AllowTrustResult. -func NewAllowTrustResult(code AllowTrustResultCode, value interface{}) (result AllowTrustResult, err error) { +// NewManageDataResult creates a new ManageDataResult. +func NewManageDataResult(code ManageDataResultCode, value interface{}) (result ManageDataResult, err error) { result.Code = code - switch AllowTrustResultCode(code) { - case AllowTrustResultCodeAllowTrustSuccess: + switch ManageDataResultCode(code) { + case ManageDataResultCodeManageDataSuccess: // void default: // void @@ -3444,326 +28170,638 @@ func NewAllowTrustResult(code AllowTrustResultCode, value interface{}) (result A return } -// AccountMergeResultCode is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (u ManageDataResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ManageDataResultCode(u.Code) { + case ManageDataResultCodeManageDataSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*ManageDataResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ManageDataResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageDataResultCode: %s", err) + } + switch ManageDataResultCode(u.Code) { + case ManageDataResultCodeManageDataSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ManageDataResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ManageDataResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ManageDataResult)(nil) + _ encoding.BinaryUnmarshaler = (*ManageDataResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ManageDataResult) xdrType() {} + +var _ xdrType = (*ManageDataResult)(nil) + +// BumpSequenceResultCode is an XDR Enum defines as: // -// enum AccountMergeResultCode +// enum BumpSequenceResultCode // { // // codes considered as "success" for the operation -// ACCOUNT_MERGE_SUCCESS = 0, +// BUMP_SEQUENCE_SUCCESS = 0, // // codes considered as "failure" for the operation -// ACCOUNT_MERGE_MALFORMED = -1, // can't merge onto itself -// ACCOUNT_MERGE_NO_ACCOUNT = -2, // destination does not exist -// ACCOUNT_MERGE_IMMUTABLE_SET = -3, // source account has AUTH_IMMUTABLE set -// ACCOUNT_MERGE_HAS_SUB_ENTRIES = -4 // account has trust lines/offers +// BUMP_SEQUENCE_BAD_SEQ = -1 // `bumpTo` is not within bounds // }; // -type AccountMergeResultCode int32 +type BumpSequenceResultCode int32 const ( - AccountMergeResultCodeAccountMergeSuccess AccountMergeResultCode = 0 - AccountMergeResultCodeAccountMergeMalformed AccountMergeResultCode = -1 - AccountMergeResultCodeAccountMergeNoAccount AccountMergeResultCode = -2 - AccountMergeResultCodeAccountMergeImmutableSet AccountMergeResultCode = -3 - AccountMergeResultCodeAccountMergeHasSubEntries AccountMergeResultCode = -4 + BumpSequenceResultCodeBumpSequenceSuccess BumpSequenceResultCode = 0 + BumpSequenceResultCodeBumpSequenceBadSeq BumpSequenceResultCode = -1 ) -var accountMergeResultCodeMap = map[int32]string{ - 0: "AccountMergeResultCodeAccountMergeSuccess", - -1: "AccountMergeResultCodeAccountMergeMalformed", - -2: "AccountMergeResultCodeAccountMergeNoAccount", - -3: "AccountMergeResultCodeAccountMergeImmutableSet", - -4: "AccountMergeResultCodeAccountMergeHasSubEntries", +var bumpSequenceResultCodeMap = map[int32]string{ + 0: "BumpSequenceResultCodeBumpSequenceSuccess", + -1: "BumpSequenceResultCodeBumpSequenceBadSeq", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for AccountMergeResultCode -func (e AccountMergeResultCode) ValidEnum(v int32) bool { - _, ok := accountMergeResultCodeMap[v] +// the Enum interface for BumpSequenceResultCode +func (e BumpSequenceResultCode) ValidEnum(v int32) bool { + _, ok := bumpSequenceResultCodeMap[v] return ok } // String returns the name of `e` -func (e AccountMergeResultCode) String() string { - name, _ := accountMergeResultCodeMap[int32(e)] +func (e BumpSequenceResultCode) String() string { + name, _ := bumpSequenceResultCodeMap[int32(e)] return name } -// AccountMergeResult is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e BumpSequenceResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := bumpSequenceResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid BumpSequenceResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*BumpSequenceResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *BumpSequenceResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding BumpSequenceResultCode: %s", err) + } + if _, ok := bumpSequenceResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid BumpSequenceResultCode enum value", v) + } + *e = BumpSequenceResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BumpSequenceResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BumpSequenceResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BumpSequenceResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*BumpSequenceResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BumpSequenceResultCode) xdrType() {} + +var _ xdrType = (*BumpSequenceResultCode)(nil) + +// BumpSequenceResult is an XDR Union defines as: // -// union AccountMergeResult switch (AccountMergeResultCode code) +// union BumpSequenceResult switch (BumpSequenceResultCode code) // { -// case ACCOUNT_MERGE_SUCCESS: -// int64 sourceAccountBalance; // how much got transfered from source account +// case BUMP_SEQUENCE_SUCCESS: +// void; // default: // void; // }; // -type AccountMergeResult struct { - Code AccountMergeResultCode - SourceAccountBalance *Int64 +type BumpSequenceResult struct { + Code BumpSequenceResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u AccountMergeResult) SwitchFieldName() string { +func (u BumpSequenceResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of AccountMergeResult -func (u AccountMergeResult) ArmForSwitch(sw int32) (string, bool) { - switch AccountMergeResultCode(sw) { - case AccountMergeResultCodeAccountMergeSuccess: - return "SourceAccountBalance", true +// the value for an instance of BumpSequenceResult +func (u BumpSequenceResult) ArmForSwitch(sw int32) (string, bool) { + switch BumpSequenceResultCode(sw) { + case BumpSequenceResultCodeBumpSequenceSuccess: + return "", true default: return "", true } } -// NewAccountMergeResult creates a new AccountMergeResult. -func NewAccountMergeResult(code AccountMergeResultCode, value interface{}) (result AccountMergeResult, err error) { +// NewBumpSequenceResult creates a new BumpSequenceResult. +func NewBumpSequenceResult(code BumpSequenceResultCode, value interface{}) (result BumpSequenceResult, err error) { result.Code = code - switch AccountMergeResultCode(code) { - case AccountMergeResultCodeAccountMergeSuccess: - tv, ok := value.(Int64) - if !ok { - err = fmt.Errorf("invalid value, must be Int64") - return - } - result.SourceAccountBalance = &tv + switch BumpSequenceResultCode(code) { + case BumpSequenceResultCodeBumpSequenceSuccess: + // void default: // void } return } -// MustSourceAccountBalance retrieves the SourceAccountBalance value from the union, -// panicing if the value is not set. -func (u AccountMergeResult) MustSourceAccountBalance() Int64 { - val, ok := u.GetSourceAccountBalance() - - if !ok { - panic("arm SourceAccountBalance is not set") +// EncodeTo encodes this value using the Encoder. +func (u BumpSequenceResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch BumpSequenceResultCode(u.Code) { + case BumpSequenceResultCodeBumpSequenceSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetSourceAccountBalance retrieves the SourceAccountBalance value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u AccountMergeResult) GetSourceAccountBalance() (result Int64, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Code)) +var _ decoderFrom = (*BumpSequenceResult)(nil) - if armName == "SourceAccountBalance" { - result = *u.SourceAccountBalance - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *BumpSequenceResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BumpSequenceResultCode: %s", err) + } + switch BumpSequenceResultCode(u.Code) { + case BumpSequenceResultCodeBumpSequenceSuccess: + // Void + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BumpSequenceResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// InflationResultCode is an XDR Enum defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BumpSequenceResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BumpSequenceResult)(nil) + _ encoding.BinaryUnmarshaler = (*BumpSequenceResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BumpSequenceResult) xdrType() {} + +var _ xdrType = (*BumpSequenceResult)(nil) + +// CreateClaimableBalanceResultCode is an XDR Enum defines as: // -// enum InflationResultCode +// enum CreateClaimableBalanceResultCode // { -// // codes considered as "success" for the operation -// INFLATION_SUCCESS = 0, -// // codes considered as "failure" for the operation -// INFLATION_NOT_TIME = -1 +// CREATE_CLAIMABLE_BALANCE_SUCCESS = 0, +// CREATE_CLAIMABLE_BALANCE_MALFORMED = -1, +// CREATE_CLAIMABLE_BALANCE_LOW_RESERVE = -2, +// CREATE_CLAIMABLE_BALANCE_NO_TRUST = -3, +// CREATE_CLAIMABLE_BALANCE_NOT_AUTHORIZED = -4, +// CREATE_CLAIMABLE_BALANCE_UNDERFUNDED = -5 // }; // -type InflationResultCode int32 +type CreateClaimableBalanceResultCode int32 const ( - InflationResultCodeInflationSuccess InflationResultCode = 0 - InflationResultCodeInflationNotTime InflationResultCode = -1 + CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess CreateClaimableBalanceResultCode = 0 + CreateClaimableBalanceResultCodeCreateClaimableBalanceMalformed CreateClaimableBalanceResultCode = -1 + CreateClaimableBalanceResultCodeCreateClaimableBalanceLowReserve CreateClaimableBalanceResultCode = -2 + CreateClaimableBalanceResultCodeCreateClaimableBalanceNoTrust CreateClaimableBalanceResultCode = -3 + CreateClaimableBalanceResultCodeCreateClaimableBalanceNotAuthorized CreateClaimableBalanceResultCode = -4 + CreateClaimableBalanceResultCodeCreateClaimableBalanceUnderfunded CreateClaimableBalanceResultCode = -5 ) -var inflationResultCodeMap = map[int32]string{ - 0: "InflationResultCodeInflationSuccess", - -1: "InflationResultCodeInflationNotTime", +var createClaimableBalanceResultCodeMap = map[int32]string{ + 0: "CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess", + -1: "CreateClaimableBalanceResultCodeCreateClaimableBalanceMalformed", + -2: "CreateClaimableBalanceResultCodeCreateClaimableBalanceLowReserve", + -3: "CreateClaimableBalanceResultCodeCreateClaimableBalanceNoTrust", + -4: "CreateClaimableBalanceResultCodeCreateClaimableBalanceNotAuthorized", + -5: "CreateClaimableBalanceResultCodeCreateClaimableBalanceUnderfunded", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for InflationResultCode -func (e InflationResultCode) ValidEnum(v int32) bool { - _, ok := inflationResultCodeMap[v] +// the Enum interface for CreateClaimableBalanceResultCode +func (e CreateClaimableBalanceResultCode) ValidEnum(v int32) bool { + _, ok := createClaimableBalanceResultCodeMap[v] return ok } // String returns the name of `e` -func (e InflationResultCode) String() string { - name, _ := inflationResultCodeMap[int32(e)] +func (e CreateClaimableBalanceResultCode) String() string { + name, _ := createClaimableBalanceResultCodeMap[int32(e)] return name } -// InflationPayout is an XDR Struct defines as: -// -// struct InflationPayout // or use PaymentResultAtom to limit types? -// { -// AccountID destination; -// int64 amount; -// }; -// -type InflationPayout struct { - Destination AccountId - Amount Int64 +// EncodeTo encodes this value using the Encoder. +func (e CreateClaimableBalanceResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := createClaimableBalanceResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid CreateClaimableBalanceResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// InflationResult is an XDR Union defines as: +var _ decoderFrom = (*CreateClaimableBalanceResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *CreateClaimableBalanceResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding CreateClaimableBalanceResultCode: %s", err) + } + if _, ok := createClaimableBalanceResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid CreateClaimableBalanceResultCode enum value", v) + } + *e = CreateClaimableBalanceResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateClaimableBalanceResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateClaimableBalanceResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateClaimableBalanceResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*CreateClaimableBalanceResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateClaimableBalanceResultCode) xdrType() {} + +var _ xdrType = (*CreateClaimableBalanceResultCode)(nil) + +// CreateClaimableBalanceResult is an XDR Union defines as: // -// union InflationResult switch (InflationResultCode code) +// union CreateClaimableBalanceResult switch ( +// CreateClaimableBalanceResultCode code) // { -// case INFLATION_SUCCESS: -// InflationPayout payouts<>; +// case CREATE_CLAIMABLE_BALANCE_SUCCESS: +// ClaimableBalanceID balanceID; // default: // void; // }; // -type InflationResult struct { - Code InflationResultCode - Payouts *[]InflationPayout +type CreateClaimableBalanceResult struct { + Code CreateClaimableBalanceResultCode + BalanceId *ClaimableBalanceId } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u InflationResult) SwitchFieldName() string { +func (u CreateClaimableBalanceResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of InflationResult -func (u InflationResult) ArmForSwitch(sw int32) (string, bool) { - switch InflationResultCode(sw) { - case InflationResultCodeInflationSuccess: - return "Payouts", true +// the value for an instance of CreateClaimableBalanceResult +func (u CreateClaimableBalanceResult) ArmForSwitch(sw int32) (string, bool) { + switch CreateClaimableBalanceResultCode(sw) { + case CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess: + return "BalanceId", true default: return "", true } } -// NewInflationResult creates a new InflationResult. -func NewInflationResult(code InflationResultCode, value interface{}) (result InflationResult, err error) { +// NewCreateClaimableBalanceResult creates a new CreateClaimableBalanceResult. +func NewCreateClaimableBalanceResult(code CreateClaimableBalanceResultCode, value interface{}) (result CreateClaimableBalanceResult, err error) { result.Code = code - switch InflationResultCode(code) { - case InflationResultCodeInflationSuccess: - tv, ok := value.([]InflationPayout) + switch CreateClaimableBalanceResultCode(code) { + case CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess: + tv, ok := value.(ClaimableBalanceId) if !ok { - err = fmt.Errorf("invalid value, must be []InflationPayout") + err = fmt.Errorf("invalid value, must be ClaimableBalanceId") return } - result.Payouts = &tv + result.BalanceId = &tv default: // void } return } -// MustPayouts retrieves the Payouts value from the union, +// MustBalanceId retrieves the BalanceId value from the union, // panicing if the value is not set. -func (u InflationResult) MustPayouts() []InflationPayout { - val, ok := u.GetPayouts() +func (u CreateClaimableBalanceResult) MustBalanceId() ClaimableBalanceId { + val, ok := u.GetBalanceId() if !ok { - panic("arm Payouts is not set") + panic("arm BalanceId is not set") } return val } -// GetPayouts retrieves the Payouts value from the union, +// GetBalanceId retrieves the BalanceId value from the union, // returning ok if the union's switch indicated the value is valid. -func (u InflationResult) GetPayouts() (result []InflationPayout, ok bool) { +func (u CreateClaimableBalanceResult) GetBalanceId() (result ClaimableBalanceId, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "Payouts" { - result = *u.Payouts + if armName == "BalanceId" { + result = *u.BalanceId ok = true } return } -// ManageDataResultCode is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (u CreateClaimableBalanceResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch CreateClaimableBalanceResultCode(u.Code) { + case CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess: + if err = (*u.BalanceId).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*CreateClaimableBalanceResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *CreateClaimableBalanceResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateClaimableBalanceResultCode: %s", err) + } + switch CreateClaimableBalanceResultCode(u.Code) { + case CreateClaimableBalanceResultCodeCreateClaimableBalanceSuccess: + u.BalanceId = new(ClaimableBalanceId) + nTmp, err = (*u.BalanceId).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimableBalanceId: %s", err) + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CreateClaimableBalanceResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CreateClaimableBalanceResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*CreateClaimableBalanceResult)(nil) + _ encoding.BinaryUnmarshaler = (*CreateClaimableBalanceResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CreateClaimableBalanceResult) xdrType() {} + +var _ xdrType = (*CreateClaimableBalanceResult)(nil) + +// ClaimClaimableBalanceResultCode is an XDR Enum defines as: // -// enum ManageDataResultCode +// enum ClaimClaimableBalanceResultCode // { -// // codes considered as "success" for the operation -// MANAGE_DATA_SUCCESS = 0, -// // codes considered as "failure" for the operation -// MANAGE_DATA_NOT_SUPPORTED_YET = -1, // The network hasn't moved to this protocol change yet -// MANAGE_DATA_NAME_NOT_FOUND = -2, // Trying to remove a Data Entry that isn't there -// MANAGE_DATA_LOW_RESERVE = -3, // not enough funds to create a new Data Entry -// MANAGE_DATA_INVALID_NAME = -4 // Name not a valid string +// CLAIM_CLAIMABLE_BALANCE_SUCCESS = 0, +// CLAIM_CLAIMABLE_BALANCE_DOES_NOT_EXIST = -1, +// CLAIM_CLAIMABLE_BALANCE_CANNOT_CLAIM = -2, +// CLAIM_CLAIMABLE_BALANCE_LINE_FULL = -3, +// CLAIM_CLAIMABLE_BALANCE_NO_TRUST = -4, +// CLAIM_CLAIMABLE_BALANCE_NOT_AUTHORIZED = -5 +// // }; // -type ManageDataResultCode int32 +type ClaimClaimableBalanceResultCode int32 const ( - ManageDataResultCodeManageDataSuccess ManageDataResultCode = 0 - ManageDataResultCodeManageDataNotSupportedYet ManageDataResultCode = -1 - ManageDataResultCodeManageDataNameNotFound ManageDataResultCode = -2 - ManageDataResultCodeManageDataLowReserve ManageDataResultCode = -3 - ManageDataResultCodeManageDataInvalidName ManageDataResultCode = -4 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess ClaimClaimableBalanceResultCode = 0 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceDoesNotExist ClaimClaimableBalanceResultCode = -1 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceCannotClaim ClaimClaimableBalanceResultCode = -2 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceLineFull ClaimClaimableBalanceResultCode = -3 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceNoTrust ClaimClaimableBalanceResultCode = -4 + ClaimClaimableBalanceResultCodeClaimClaimableBalanceNotAuthorized ClaimClaimableBalanceResultCode = -5 ) -var manageDataResultCodeMap = map[int32]string{ - 0: "ManageDataResultCodeManageDataSuccess", - -1: "ManageDataResultCodeManageDataNotSupportedYet", - -2: "ManageDataResultCodeManageDataNameNotFound", - -3: "ManageDataResultCodeManageDataLowReserve", - -4: "ManageDataResultCodeManageDataInvalidName", +var claimClaimableBalanceResultCodeMap = map[int32]string{ + 0: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess", + -1: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceDoesNotExist", + -2: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceCannotClaim", + -3: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceLineFull", + -4: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceNoTrust", + -5: "ClaimClaimableBalanceResultCodeClaimClaimableBalanceNotAuthorized", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ManageDataResultCode -func (e ManageDataResultCode) ValidEnum(v int32) bool { - _, ok := manageDataResultCodeMap[v] +// the Enum interface for ClaimClaimableBalanceResultCode +func (e ClaimClaimableBalanceResultCode) ValidEnum(v int32) bool { + _, ok := claimClaimableBalanceResultCodeMap[v] return ok } // String returns the name of `e` -func (e ManageDataResultCode) String() string { - name, _ := manageDataResultCodeMap[int32(e)] +func (e ClaimClaimableBalanceResultCode) String() string { + name, _ := claimClaimableBalanceResultCodeMap[int32(e)] return name } -// ManageDataResult is an XDR Union defines as: +// EncodeTo encodes this value using the Encoder. +func (e ClaimClaimableBalanceResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := claimClaimableBalanceResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClaimClaimableBalanceResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClaimClaimableBalanceResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClaimClaimableBalanceResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClaimClaimableBalanceResultCode: %s", err) + } + if _, ok := claimClaimableBalanceResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClaimClaimableBalanceResultCode enum value", v) + } + *e = ClaimClaimableBalanceResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimClaimableBalanceResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimClaimableBalanceResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimClaimableBalanceResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimClaimableBalanceResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimClaimableBalanceResultCode) xdrType() {} + +var _ xdrType = (*ClaimClaimableBalanceResultCode)(nil) + +// ClaimClaimableBalanceResult is an XDR Union defines as: // -// union ManageDataResult switch (ManageDataResultCode code) +// union ClaimClaimableBalanceResult switch (ClaimClaimableBalanceResultCode code) // { -// case MANAGE_DATA_SUCCESS: +// case CLAIM_CLAIMABLE_BALANCE_SUCCESS: // void; // default: // void; // }; // -type ManageDataResult struct { - Code ManageDataResultCode +type ClaimClaimableBalanceResult struct { + Code ClaimClaimableBalanceResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u ManageDataResult) SwitchFieldName() string { +func (u ClaimClaimableBalanceResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of ManageDataResult -func (u ManageDataResult) ArmForSwitch(sw int32) (string, bool) { - switch ManageDataResultCode(sw) { - case ManageDataResultCodeManageDataSuccess: +// the value for an instance of ClaimClaimableBalanceResult +func (u ClaimClaimableBalanceResult) ArmForSwitch(sw int32) (string, bool) { + switch ClaimClaimableBalanceResultCode(sw) { + case ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess: return "", true default: return "", true } } -// NewManageDataResult creates a new ManageDataResult. -func NewManageDataResult(code ManageDataResultCode, value interface{}) (result ManageDataResult, err error) { +// NewClaimClaimableBalanceResult creates a new ClaimClaimableBalanceResult. +func NewClaimClaimableBalanceResult(code ClaimClaimableBalanceResultCode, value interface{}) (result ClaimClaimableBalanceResult, err error) { result.Code = code - switch ManageDataResultCode(code) { - case ManageDataResultCodeManageDataSuccess: + switch ClaimClaimableBalanceResultCode(code) { + case ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess: // void default: // void @@ -3771,3493 +28809,5942 @@ func NewManageDataResult(code ManageDataResultCode, value interface{}) (result M return } -// OperationResultCode is an XDR Enum defines as: +// EncodeTo encodes this value using the Encoder. +func (u ClaimClaimableBalanceResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ClaimClaimableBalanceResultCode(u.Code) { + case ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*ClaimClaimableBalanceResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *ClaimClaimableBalanceResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimClaimableBalanceResultCode: %s", err) + } + switch ClaimClaimableBalanceResultCode(u.Code) { + case ClaimClaimableBalanceResultCodeClaimClaimableBalanceSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClaimClaimableBalanceResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClaimClaimableBalanceResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClaimClaimableBalanceResult)(nil) + _ encoding.BinaryUnmarshaler = (*ClaimClaimableBalanceResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClaimClaimableBalanceResult) xdrType() {} + +var _ xdrType = (*ClaimClaimableBalanceResult)(nil) + +// BeginSponsoringFutureReservesResultCode is an XDR Enum defines as: // -// enum OperationResultCode +// enum BeginSponsoringFutureReservesResultCode // { -// opINNER = 0, // inner object result is valid +// // codes considered as "success" for the operation +// BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS = 0, // -// opBAD_AUTH = -1, // too few valid signatures / wrong network -// opNO_ACCOUNT = -2 // source account was not found +// // codes considered as "failure" for the operation +// BEGIN_SPONSORING_FUTURE_RESERVES_MALFORMED = -1, +// BEGIN_SPONSORING_FUTURE_RESERVES_ALREADY_SPONSORED = -2, +// BEGIN_SPONSORING_FUTURE_RESERVES_RECURSIVE = -3 // }; // -type OperationResultCode int32 +type BeginSponsoringFutureReservesResultCode int32 const ( - OperationResultCodeOpInner OperationResultCode = 0 - OperationResultCodeOpBadAuth OperationResultCode = -1 - OperationResultCodeOpNoAccount OperationResultCode = -2 + BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess BeginSponsoringFutureReservesResultCode = 0 + BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesMalformed BeginSponsoringFutureReservesResultCode = -1 + BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesAlreadySponsored BeginSponsoringFutureReservesResultCode = -2 + BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesRecursive BeginSponsoringFutureReservesResultCode = -3 ) -var operationResultCodeMap = map[int32]string{ - 0: "OperationResultCodeOpInner", - -1: "OperationResultCodeOpBadAuth", - -2: "OperationResultCodeOpNoAccount", +var beginSponsoringFutureReservesResultCodeMap = map[int32]string{ + 0: "BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess", + -1: "BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesMalformed", + -2: "BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesAlreadySponsored", + -3: "BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesRecursive", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for OperationResultCode -func (e OperationResultCode) ValidEnum(v int32) bool { - _, ok := operationResultCodeMap[v] +// the Enum interface for BeginSponsoringFutureReservesResultCode +func (e BeginSponsoringFutureReservesResultCode) ValidEnum(v int32) bool { + _, ok := beginSponsoringFutureReservesResultCodeMap[v] return ok } // String returns the name of `e` -func (e OperationResultCode) String() string { - name, _ := operationResultCodeMap[int32(e)] +func (e BeginSponsoringFutureReservesResultCode) String() string { + name, _ := beginSponsoringFutureReservesResultCodeMap[int32(e)] return name } -// OperationResultTr is an XDR NestedUnion defines as: +// EncodeTo encodes this value using the Encoder. +func (e BeginSponsoringFutureReservesResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := beginSponsoringFutureReservesResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid BeginSponsoringFutureReservesResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*BeginSponsoringFutureReservesResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *BeginSponsoringFutureReservesResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding BeginSponsoringFutureReservesResultCode: %s", err) + } + if _, ok := beginSponsoringFutureReservesResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid BeginSponsoringFutureReservesResultCode enum value", v) + } + *e = BeginSponsoringFutureReservesResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BeginSponsoringFutureReservesResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BeginSponsoringFutureReservesResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*BeginSponsoringFutureReservesResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*BeginSponsoringFutureReservesResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BeginSponsoringFutureReservesResultCode) xdrType() {} + +var _ xdrType = (*BeginSponsoringFutureReservesResultCode)(nil) + +// BeginSponsoringFutureReservesResult is an XDR Union defines as: // -// union switch (OperationType type) -// { -// case CREATE_ACCOUNT: -// CreateAccountResult createAccountResult; -// case PAYMENT: -// PaymentResult paymentResult; -// case PATH_PAYMENT: -// PathPaymentResult pathPaymentResult; -// case MANAGE_OFFER: -// ManageOfferResult manageOfferResult; -// case CREATE_PASSIVE_OFFER: -// ManageOfferResult createPassiveOfferResult; -// case SET_OPTIONS: -// SetOptionsResult setOptionsResult; -// case CHANGE_TRUST: -// ChangeTrustResult changeTrustResult; -// case ALLOW_TRUST: -// AllowTrustResult allowTrustResult; -// case ACCOUNT_MERGE: -// AccountMergeResult accountMergeResult; -// case INFLATION: -// InflationResult inflationResult; -// case MANAGE_DATA: -// ManageDataResult manageDataResult; -// } +// union BeginSponsoringFutureReservesResult switch ( +// BeginSponsoringFutureReservesResultCode code) +// { +// case BEGIN_SPONSORING_FUTURE_RESERVES_SUCCESS: +// void; +// default: +// void; +// }; // -type OperationResultTr struct { - Type OperationType - CreateAccountResult *CreateAccountResult - PaymentResult *PaymentResult - PathPaymentResult *PathPaymentResult - ManageOfferResult *ManageOfferResult - CreatePassiveOfferResult *ManageOfferResult - SetOptionsResult *SetOptionsResult - ChangeTrustResult *ChangeTrustResult - AllowTrustResult *AllowTrustResult - AccountMergeResult *AccountMergeResult - InflationResult *InflationResult - ManageDataResult *ManageDataResult +type BeginSponsoringFutureReservesResult struct { + Code BeginSponsoringFutureReservesResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u OperationResultTr) SwitchFieldName() string { - return "Type" +func (u BeginSponsoringFutureReservesResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of OperationResultTr -func (u OperationResultTr) ArmForSwitch(sw int32) (string, bool) { - switch OperationType(sw) { - case OperationTypeCreateAccount: - return "CreateAccountResult", true - case OperationTypePayment: - return "PaymentResult", true - case OperationTypePathPayment: - return "PathPaymentResult", true - case OperationTypeManageOffer: - return "ManageOfferResult", true - case OperationTypeCreatePassiveOffer: - return "CreatePassiveOfferResult", true - case OperationTypeSetOptions: - return "SetOptionsResult", true - case OperationTypeChangeTrust: - return "ChangeTrustResult", true - case OperationTypeAllowTrust: - return "AllowTrustResult", true - case OperationTypeAccountMerge: - return "AccountMergeResult", true - case OperationTypeInflation: - return "InflationResult", true - case OperationTypeManageData: - return "ManageDataResult", true +// the value for an instance of BeginSponsoringFutureReservesResult +func (u BeginSponsoringFutureReservesResult) ArmForSwitch(sw int32) (string, bool) { + switch BeginSponsoringFutureReservesResultCode(sw) { + case BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess: + return "", true + default: + return "", true } - return "-", false } -// NewOperationResultTr creates a new OperationResultTr. -func NewOperationResultTr(aType OperationType, value interface{}) (result OperationResultTr, err error) { - result.Type = aType - switch OperationType(aType) { - case OperationTypeCreateAccount: - tv, ok := value.(CreateAccountResult) - if !ok { - err = fmt.Errorf("invalid value, must be CreateAccountResult") - return - } - result.CreateAccountResult = &tv - case OperationTypePayment: - tv, ok := value.(PaymentResult) - if !ok { - err = fmt.Errorf("invalid value, must be PaymentResult") - return - } - result.PaymentResult = &tv - case OperationTypePathPayment: - tv, ok := value.(PathPaymentResult) - if !ok { - err = fmt.Errorf("invalid value, must be PathPaymentResult") - return - } - result.PathPaymentResult = &tv - case OperationTypeManageOffer: - tv, ok := value.(ManageOfferResult) - if !ok { - err = fmt.Errorf("invalid value, must be ManageOfferResult") - return - } - result.ManageOfferResult = &tv - case OperationTypeCreatePassiveOffer: - tv, ok := value.(ManageOfferResult) - if !ok { - err = fmt.Errorf("invalid value, must be ManageOfferResult") - return - } - result.CreatePassiveOfferResult = &tv - case OperationTypeSetOptions: - tv, ok := value.(SetOptionsResult) - if !ok { - err = fmt.Errorf("invalid value, must be SetOptionsResult") - return - } - result.SetOptionsResult = &tv - case OperationTypeChangeTrust: - tv, ok := value.(ChangeTrustResult) - if !ok { - err = fmt.Errorf("invalid value, must be ChangeTrustResult") - return - } - result.ChangeTrustResult = &tv - case OperationTypeAllowTrust: - tv, ok := value.(AllowTrustResult) - if !ok { - err = fmt.Errorf("invalid value, must be AllowTrustResult") - return - } - result.AllowTrustResult = &tv - case OperationTypeAccountMerge: - tv, ok := value.(AccountMergeResult) - if !ok { - err = fmt.Errorf("invalid value, must be AccountMergeResult") - return - } - result.AccountMergeResult = &tv - case OperationTypeInflation: - tv, ok := value.(InflationResult) - if !ok { - err = fmt.Errorf("invalid value, must be InflationResult") - return - } - result.InflationResult = &tv - case OperationTypeManageData: - tv, ok := value.(ManageDataResult) - if !ok { - err = fmt.Errorf("invalid value, must be ManageDataResult") - return - } - result.ManageDataResult = &tv +// NewBeginSponsoringFutureReservesResult creates a new BeginSponsoringFutureReservesResult. +func NewBeginSponsoringFutureReservesResult(code BeginSponsoringFutureReservesResultCode, value interface{}) (result BeginSponsoringFutureReservesResult, err error) { + result.Code = code + switch BeginSponsoringFutureReservesResultCode(code) { + case BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess: + // void + default: + // void } return } -// MustCreateAccountResult retrieves the CreateAccountResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustCreateAccountResult() CreateAccountResult { - val, ok := u.GetCreateAccountResult() - - if !ok { - panic("arm CreateAccountResult is not set") +// EncodeTo encodes this value using the Encoder. +func (u BeginSponsoringFutureReservesResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch BeginSponsoringFutureReservesResultCode(u.Code) { + case BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetCreateAccountResult retrieves the CreateAccountResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetCreateAccountResult() (result CreateAccountResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*BeginSponsoringFutureReservesResult)(nil) - if armName == "CreateAccountResult" { - result = *u.CreateAccountResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *BeginSponsoringFutureReservesResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BeginSponsoringFutureReservesResultCode: %s", err) + } + switch BeginSponsoringFutureReservesResultCode(u.Code) { + case BeginSponsoringFutureReservesResultCodeBeginSponsoringFutureReservesSuccess: + // Void + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s BeginSponsoringFutureReservesResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustPaymentResult retrieves the PaymentResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustPaymentResult() PaymentResult { - val, ok := u.GetPaymentResult() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *BeginSponsoringFutureReservesResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm PaymentResult is not set") - } +var ( + _ encoding.BinaryMarshaler = (*BeginSponsoringFutureReservesResult)(nil) + _ encoding.BinaryUnmarshaler = (*BeginSponsoringFutureReservesResult)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s BeginSponsoringFutureReservesResult) xdrType() {} -// GetPaymentResult retrieves the PaymentResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetPaymentResult() (result PaymentResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*BeginSponsoringFutureReservesResult)(nil) - if armName == "PaymentResult" { - result = *u.PaymentResult - ok = true - } +// EndSponsoringFutureReservesResultCode is an XDR Enum defines as: +// +// enum EndSponsoringFutureReservesResultCode +// { +// // codes considered as "success" for the operation +// END_SPONSORING_FUTURE_RESERVES_SUCCESS = 0, +// +// // codes considered as "failure" for the operation +// END_SPONSORING_FUTURE_RESERVES_NOT_SPONSORED = -1 +// }; +// +type EndSponsoringFutureReservesResultCode int32 - return +const ( + EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess EndSponsoringFutureReservesResultCode = 0 + EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesNotSponsored EndSponsoringFutureReservesResultCode = -1 +) + +var endSponsoringFutureReservesResultCodeMap = map[int32]string{ + 0: "EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess", + -1: "EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesNotSponsored", } -// MustPathPaymentResult retrieves the PathPaymentResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustPathPaymentResult() PathPaymentResult { - val, ok := u.GetPathPaymentResult() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for EndSponsoringFutureReservesResultCode +func (e EndSponsoringFutureReservesResultCode) ValidEnum(v int32) bool { + _, ok := endSponsoringFutureReservesResultCodeMap[v] + return ok +} - if !ok { - panic("arm PathPaymentResult is not set") - } +// String returns the name of `e` +func (e EndSponsoringFutureReservesResultCode) String() string { + name, _ := endSponsoringFutureReservesResultCodeMap[int32(e)] + return name +} - return val +// EncodeTo encodes this value using the Encoder. +func (e EndSponsoringFutureReservesResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := endSponsoringFutureReservesResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid EndSponsoringFutureReservesResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetPathPaymentResult retrieves the PathPaymentResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetPathPaymentResult() (result PathPaymentResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*EndSponsoringFutureReservesResultCode)(nil) - if armName == "PathPaymentResult" { - result = *u.PathPaymentResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *EndSponsoringFutureReservesResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding EndSponsoringFutureReservesResultCode: %s", err) } + if _, ok := endSponsoringFutureReservesResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid EndSponsoringFutureReservesResultCode enum value", v) + } + *e = EndSponsoringFutureReservesResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s EndSponsoringFutureReservesResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustManageOfferResult retrieves the ManageOfferResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustManageOfferResult() ManageOfferResult { - val, ok := u.GetManageOfferResult() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *EndSponsoringFutureReservesResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm ManageOfferResult is not set") - } +var ( + _ encoding.BinaryMarshaler = (*EndSponsoringFutureReservesResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*EndSponsoringFutureReservesResultCode)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s EndSponsoringFutureReservesResultCode) xdrType() {} + +var _ xdrType = (*EndSponsoringFutureReservesResultCode)(nil) + +// EndSponsoringFutureReservesResult is an XDR Union defines as: +// +// union EndSponsoringFutureReservesResult switch ( +// EndSponsoringFutureReservesResultCode code) +// { +// case END_SPONSORING_FUTURE_RESERVES_SUCCESS: +// void; +// default: +// void; +// }; +// +type EndSponsoringFutureReservesResult struct { + Code EndSponsoringFutureReservesResultCode } -// GetManageOfferResult retrieves the ManageOfferResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetManageOfferResult() (result ManageOfferResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u EndSponsoringFutureReservesResult) SwitchFieldName() string { + return "Code" +} - if armName == "ManageOfferResult" { - result = *u.ManageOfferResult - ok = true +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of EndSponsoringFutureReservesResult +func (u EndSponsoringFutureReservesResult) ArmForSwitch(sw int32) (string, bool) { + switch EndSponsoringFutureReservesResultCode(sw) { + case EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess: + return "", true + default: + return "", true } +} +// NewEndSponsoringFutureReservesResult creates a new EndSponsoringFutureReservesResult. +func NewEndSponsoringFutureReservesResult(code EndSponsoringFutureReservesResultCode, value interface{}) (result EndSponsoringFutureReservesResult, err error) { + result.Code = code + switch EndSponsoringFutureReservesResultCode(code) { + case EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess: + // void + default: + // void + } return } -// MustCreatePassiveOfferResult retrieves the CreatePassiveOfferResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustCreatePassiveOfferResult() ManageOfferResult { - val, ok := u.GetCreatePassiveOfferResult() - - if !ok { - panic("arm CreatePassiveOfferResult is not set") +// EncodeTo encodes this value using the Encoder. +func (u EndSponsoringFutureReservesResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch EndSponsoringFutureReservesResultCode(u.Code) { + case EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetCreatePassiveOfferResult retrieves the CreatePassiveOfferResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetCreatePassiveOfferResult() (result ManageOfferResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*EndSponsoringFutureReservesResult)(nil) - if armName == "CreatePassiveOfferResult" { - result = *u.CreatePassiveOfferResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *EndSponsoringFutureReservesResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EndSponsoringFutureReservesResultCode: %s", err) + } + switch EndSponsoringFutureReservesResultCode(u.Code) { + case EndSponsoringFutureReservesResultCodeEndSponsoringFutureReservesSuccess: + // Void + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s EndSponsoringFutureReservesResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustSetOptionsResult retrieves the SetOptionsResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustSetOptionsResult() SetOptionsResult { - val, ok := u.GetSetOptionsResult() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *EndSponsoringFutureReservesResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm SetOptionsResult is not set") - } +var ( + _ encoding.BinaryMarshaler = (*EndSponsoringFutureReservesResult)(nil) + _ encoding.BinaryUnmarshaler = (*EndSponsoringFutureReservesResult)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s EndSponsoringFutureReservesResult) xdrType() {} -// GetSetOptionsResult retrieves the SetOptionsResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetSetOptionsResult() (result SetOptionsResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*EndSponsoringFutureReservesResult)(nil) - if armName == "SetOptionsResult" { - result = *u.SetOptionsResult - ok = true - } +// RevokeSponsorshipResultCode is an XDR Enum defines as: +// +// enum RevokeSponsorshipResultCode +// { +// // codes considered as "success" for the operation +// REVOKE_SPONSORSHIP_SUCCESS = 0, +// +// // codes considered as "failure" for the operation +// REVOKE_SPONSORSHIP_DOES_NOT_EXIST = -1, +// REVOKE_SPONSORSHIP_NOT_SPONSOR = -2, +// REVOKE_SPONSORSHIP_LOW_RESERVE = -3, +// REVOKE_SPONSORSHIP_ONLY_TRANSFERABLE = -4, +// REVOKE_SPONSORSHIP_MALFORMED = -5 +// }; +// +type RevokeSponsorshipResultCode int32 - return +const ( + RevokeSponsorshipResultCodeRevokeSponsorshipSuccess RevokeSponsorshipResultCode = 0 + RevokeSponsorshipResultCodeRevokeSponsorshipDoesNotExist RevokeSponsorshipResultCode = -1 + RevokeSponsorshipResultCodeRevokeSponsorshipNotSponsor RevokeSponsorshipResultCode = -2 + RevokeSponsorshipResultCodeRevokeSponsorshipLowReserve RevokeSponsorshipResultCode = -3 + RevokeSponsorshipResultCodeRevokeSponsorshipOnlyTransferable RevokeSponsorshipResultCode = -4 + RevokeSponsorshipResultCodeRevokeSponsorshipMalformed RevokeSponsorshipResultCode = -5 +) + +var revokeSponsorshipResultCodeMap = map[int32]string{ + 0: "RevokeSponsorshipResultCodeRevokeSponsorshipSuccess", + -1: "RevokeSponsorshipResultCodeRevokeSponsorshipDoesNotExist", + -2: "RevokeSponsorshipResultCodeRevokeSponsorshipNotSponsor", + -3: "RevokeSponsorshipResultCodeRevokeSponsorshipLowReserve", + -4: "RevokeSponsorshipResultCodeRevokeSponsorshipOnlyTransferable", + -5: "RevokeSponsorshipResultCodeRevokeSponsorshipMalformed", } -// MustChangeTrustResult retrieves the ChangeTrustResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustChangeTrustResult() ChangeTrustResult { - val, ok := u.GetChangeTrustResult() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for RevokeSponsorshipResultCode +func (e RevokeSponsorshipResultCode) ValidEnum(v int32) bool { + _, ok := revokeSponsorshipResultCodeMap[v] + return ok +} + +// String returns the name of `e` +func (e RevokeSponsorshipResultCode) String() string { + name, _ := revokeSponsorshipResultCodeMap[int32(e)] + return name +} - if !ok { - panic("arm ChangeTrustResult is not set") +// EncodeTo encodes this value using the Encoder. +func (e RevokeSponsorshipResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := revokeSponsorshipResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid RevokeSponsorshipResultCode enum value", e) } - - return val + _, err := enc.EncodeInt(int32(e)) + return err } -// GetChangeTrustResult retrieves the ChangeTrustResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetChangeTrustResult() (result ChangeTrustResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*RevokeSponsorshipResultCode)(nil) - if armName == "ChangeTrustResult" { - result = *u.ChangeTrustResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *RevokeSponsorshipResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipResultCode: %s", err) + } + if _, ok := revokeSponsorshipResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid RevokeSponsorshipResultCode enum value", v) } + *e = RevokeSponsorshipResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s RevokeSponsorshipResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustAllowTrustResult retrieves the AllowTrustResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustAllowTrustResult() AllowTrustResult { - val, ok := u.GetAllowTrustResult() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *RevokeSponsorshipResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm AllowTrustResult is not set") - } +var ( + _ encoding.BinaryMarshaler = (*RevokeSponsorshipResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*RevokeSponsorshipResultCode)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s RevokeSponsorshipResultCode) xdrType() {} + +var _ xdrType = (*RevokeSponsorshipResultCode)(nil) + +// RevokeSponsorshipResult is an XDR Union defines as: +// +// union RevokeSponsorshipResult switch (RevokeSponsorshipResultCode code) +// { +// case REVOKE_SPONSORSHIP_SUCCESS: +// void; +// default: +// void; +// }; +// +type RevokeSponsorshipResult struct { + Code RevokeSponsorshipResultCode } -// GetAllowTrustResult retrieves the AllowTrustResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetAllowTrustResult() (result AllowTrustResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u RevokeSponsorshipResult) SwitchFieldName() string { + return "Code" +} - if armName == "AllowTrustResult" { - result = *u.AllowTrustResult - ok = true +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of RevokeSponsorshipResult +func (u RevokeSponsorshipResult) ArmForSwitch(sw int32) (string, bool) { + switch RevokeSponsorshipResultCode(sw) { + case RevokeSponsorshipResultCodeRevokeSponsorshipSuccess: + return "", true + default: + return "", true } +} +// NewRevokeSponsorshipResult creates a new RevokeSponsorshipResult. +func NewRevokeSponsorshipResult(code RevokeSponsorshipResultCode, value interface{}) (result RevokeSponsorshipResult, err error) { + result.Code = code + switch RevokeSponsorshipResultCode(code) { + case RevokeSponsorshipResultCodeRevokeSponsorshipSuccess: + // void + default: + // void + } return } -// MustAccountMergeResult retrieves the AccountMergeResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustAccountMergeResult() AccountMergeResult { - val, ok := u.GetAccountMergeResult() - - if !ok { - panic("arm AccountMergeResult is not set") +// EncodeTo encodes this value using the Encoder. +func (u RevokeSponsorshipResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch RevokeSponsorshipResultCode(u.Code) { + case RevokeSponsorshipResultCodeRevokeSponsorshipSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetAccountMergeResult retrieves the AccountMergeResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetAccountMergeResult() (result AccountMergeResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*RevokeSponsorshipResult)(nil) - if armName == "AccountMergeResult" { - result = *u.AccountMergeResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *RevokeSponsorshipResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipResultCode: %s", err) } + switch RevokeSponsorshipResultCode(u.Code) { + case RevokeSponsorshipResultCodeRevokeSponsorshipSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s RevokeSponsorshipResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustInflationResult retrieves the InflationResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustInflationResult() InflationResult { - val, ok := u.GetInflationResult() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *RevokeSponsorshipResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm InflationResult is not set") - } +var ( + _ encoding.BinaryMarshaler = (*RevokeSponsorshipResult)(nil) + _ encoding.BinaryUnmarshaler = (*RevokeSponsorshipResult)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s RevokeSponsorshipResult) xdrType() {} -// GetInflationResult retrieves the InflationResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetInflationResult() (result InflationResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*RevokeSponsorshipResult)(nil) - if armName == "InflationResult" { - result = *u.InflationResult - ok = true - } +// ClawbackResultCode is an XDR Enum defines as: +// +// enum ClawbackResultCode +// { +// // codes considered as "success" for the operation +// CLAWBACK_SUCCESS = 0, +// +// // codes considered as "failure" for the operation +// CLAWBACK_MALFORMED = -1, +// CLAWBACK_NOT_CLAWBACK_ENABLED = -2, +// CLAWBACK_NO_TRUST = -3, +// CLAWBACK_UNDERFUNDED = -4 +// }; +// +type ClawbackResultCode int32 - return +const ( + ClawbackResultCodeClawbackSuccess ClawbackResultCode = 0 + ClawbackResultCodeClawbackMalformed ClawbackResultCode = -1 + ClawbackResultCodeClawbackNotClawbackEnabled ClawbackResultCode = -2 + ClawbackResultCodeClawbackNoTrust ClawbackResultCode = -3 + ClawbackResultCodeClawbackUnderfunded ClawbackResultCode = -4 +) + +var clawbackResultCodeMap = map[int32]string{ + 0: "ClawbackResultCodeClawbackSuccess", + -1: "ClawbackResultCodeClawbackMalformed", + -2: "ClawbackResultCodeClawbackNotClawbackEnabled", + -3: "ClawbackResultCodeClawbackNoTrust", + -4: "ClawbackResultCodeClawbackUnderfunded", } -// MustManageDataResult retrieves the ManageDataResult value from the union, -// panicing if the value is not set. -func (u OperationResultTr) MustManageDataResult() ManageDataResult { - val, ok := u.GetManageDataResult() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClawbackResultCode +func (e ClawbackResultCode) ValidEnum(v int32) bool { + _, ok := clawbackResultCodeMap[v] + return ok +} - if !ok { - panic("arm ManageDataResult is not set") - } +// String returns the name of `e` +func (e ClawbackResultCode) String() string { + name, _ := clawbackResultCodeMap[int32(e)] + return name +} - return val +// EncodeTo encodes this value using the Encoder. +func (e ClawbackResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := clawbackResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClawbackResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetManageDataResult retrieves the ManageDataResult value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResultTr) GetManageDataResult() (result ManageDataResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*ClawbackResultCode)(nil) - if armName == "ManageDataResult" { - result = *u.ManageDataResult - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *ClawbackResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClawbackResultCode: %s", err) } + if _, ok := clawbackResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClawbackResultCode enum value", v) + } + *e = ClawbackResultCode(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// OperationResult is an XDR Union defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackResultCode) xdrType() {} + +var _ xdrType = (*ClawbackResultCode)(nil) + +// ClawbackResult is an XDR Union defines as: // -// union OperationResult switch (OperationResultCode code) +// union ClawbackResult switch (ClawbackResultCode code) // { -// case opINNER: -// union switch (OperationType type) -// { -// case CREATE_ACCOUNT: -// CreateAccountResult createAccountResult; -// case PAYMENT: -// PaymentResult paymentResult; -// case PATH_PAYMENT: -// PathPaymentResult pathPaymentResult; -// case MANAGE_OFFER: -// ManageOfferResult manageOfferResult; -// case CREATE_PASSIVE_OFFER: -// ManageOfferResult createPassiveOfferResult; -// case SET_OPTIONS: -// SetOptionsResult setOptionsResult; -// case CHANGE_TRUST: -// ChangeTrustResult changeTrustResult; -// case ALLOW_TRUST: -// AllowTrustResult allowTrustResult; -// case ACCOUNT_MERGE: -// AccountMergeResult accountMergeResult; -// case INFLATION: -// InflationResult inflationResult; -// case MANAGE_DATA: -// ManageDataResult manageDataResult; -// } -// tr; +// case CLAWBACK_SUCCESS: +// void; // default: // void; // }; // -type OperationResult struct { - Code OperationResultCode - Tr *OperationResultTr +type ClawbackResult struct { + Code ClawbackResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u OperationResult) SwitchFieldName() string { +func (u ClawbackResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of OperationResult -func (u OperationResult) ArmForSwitch(sw int32) (string, bool) { - switch OperationResultCode(sw) { - case OperationResultCodeOpInner: - return "Tr", true +// the value for an instance of ClawbackResult +func (u ClawbackResult) ArmForSwitch(sw int32) (string, bool) { + switch ClawbackResultCode(sw) { + case ClawbackResultCodeClawbackSuccess: + return "", true default: return "", true } } -// NewOperationResult creates a new OperationResult. -func NewOperationResult(code OperationResultCode, value interface{}) (result OperationResult, err error) { +// NewClawbackResult creates a new ClawbackResult. +func NewClawbackResult(code ClawbackResultCode, value interface{}) (result ClawbackResult, err error) { result.Code = code - switch OperationResultCode(code) { - case OperationResultCodeOpInner: - tv, ok := value.(OperationResultTr) - if !ok { - err = fmt.Errorf("invalid value, must be OperationResultTr") - return - } - result.Tr = &tv + switch ClawbackResultCode(code) { + case ClawbackResultCodeClawbackSuccess: + // void default: // void } return } -// MustTr retrieves the Tr value from the union, -// panicing if the value is not set. -func (u OperationResult) MustTr() OperationResultTr { - val, ok := u.GetTr() - - if !ok { - panic("arm Tr is not set") +// EncodeTo encodes this value using the Encoder. +func (u ClawbackResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ClawbackResultCode(u.Code) { + case ClawbackResultCodeClawbackSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetTr retrieves the Tr value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u OperationResult) GetTr() (result OperationResultTr, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Code)) +var _ decoderFrom = (*ClawbackResult)(nil) - if armName == "Tr" { - result = *u.Tr - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *ClawbackResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackResultCode: %s", err) } + switch ClawbackResultCode(u.Code) { + case ClawbackResultCodeClawbackSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// TransactionResultCode is an XDR Enum defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackResult)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackResult) xdrType() {} + +var _ xdrType = (*ClawbackResult)(nil) + +// ClawbackClaimableBalanceResultCode is an XDR Enum defines as: // -// enum TransactionResultCode +// enum ClawbackClaimableBalanceResultCode // { -// txSUCCESS = 0, // all operations succeeded -// -// txFAILED = -1, // one of the operations failed (none were applied) -// -// txTOO_EARLY = -2, // ledger closeTime before minTime -// txTOO_LATE = -3, // ledger closeTime after maxTime -// txMISSING_OPERATION = -4, // no operation was specified -// txBAD_SEQ = -5, // sequence number does not match source account +// // codes considered as "success" for the operation +// CLAWBACK_CLAIMABLE_BALANCE_SUCCESS = 0, // -// txBAD_AUTH = -6, // too few valid signatures / wrong network -// txINSUFFICIENT_BALANCE = -7, // fee would bring account below reserve -// txNO_ACCOUNT = -8, // source account not found -// txINSUFFICIENT_FEE = -9, // fee is too small -// txBAD_AUTH_EXTRA = -10, // unused signatures attached to transaction -// txINTERNAL_ERROR = -11 // an unknown error occured +// // codes considered as "failure" for the operation +// CLAWBACK_CLAIMABLE_BALANCE_DOES_NOT_EXIST = -1, +// CLAWBACK_CLAIMABLE_BALANCE_NOT_ISSUER = -2, +// CLAWBACK_CLAIMABLE_BALANCE_NOT_CLAWBACK_ENABLED = -3 // }; // -type TransactionResultCode int32 +type ClawbackClaimableBalanceResultCode int32 const ( - TransactionResultCodeTxSuccess TransactionResultCode = 0 - TransactionResultCodeTxFailed TransactionResultCode = -1 - TransactionResultCodeTxTooEarly TransactionResultCode = -2 - TransactionResultCodeTxTooLate TransactionResultCode = -3 - TransactionResultCodeTxMissingOperation TransactionResultCode = -4 - TransactionResultCodeTxBadSeq TransactionResultCode = -5 - TransactionResultCodeTxBadAuth TransactionResultCode = -6 - TransactionResultCodeTxInsufficientBalance TransactionResultCode = -7 - TransactionResultCodeTxNoAccount TransactionResultCode = -8 - TransactionResultCodeTxInsufficientFee TransactionResultCode = -9 - TransactionResultCodeTxBadAuthExtra TransactionResultCode = -10 - TransactionResultCodeTxInternalError TransactionResultCode = -11 + ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess ClawbackClaimableBalanceResultCode = 0 + ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceDoesNotExist ClawbackClaimableBalanceResultCode = -1 + ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotIssuer ClawbackClaimableBalanceResultCode = -2 + ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotClawbackEnabled ClawbackClaimableBalanceResultCode = -3 ) -var transactionResultCodeMap = map[int32]string{ - 0: "TransactionResultCodeTxSuccess", - -1: "TransactionResultCodeTxFailed", - -2: "TransactionResultCodeTxTooEarly", - -3: "TransactionResultCodeTxTooLate", - -4: "TransactionResultCodeTxMissingOperation", - -5: "TransactionResultCodeTxBadSeq", - -6: "TransactionResultCodeTxBadAuth", - -7: "TransactionResultCodeTxInsufficientBalance", - -8: "TransactionResultCodeTxNoAccount", - -9: "TransactionResultCodeTxInsufficientFee", - -10: "TransactionResultCodeTxBadAuthExtra", - -11: "TransactionResultCodeTxInternalError", +var clawbackClaimableBalanceResultCodeMap = map[int32]string{ + 0: "ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess", + -1: "ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceDoesNotExist", + -2: "ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotIssuer", + -3: "ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceNotClawbackEnabled", } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for TransactionResultCode -func (e TransactionResultCode) ValidEnum(v int32) bool { - _, ok := transactionResultCodeMap[v] +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for ClawbackClaimableBalanceResultCode +func (e ClawbackClaimableBalanceResultCode) ValidEnum(v int32) bool { + _, ok := clawbackClaimableBalanceResultCodeMap[v] return ok } // String returns the name of `e` -func (e TransactionResultCode) String() string { - name, _ := transactionResultCodeMap[int32(e)] +func (e ClawbackClaimableBalanceResultCode) String() string { + name, _ := clawbackClaimableBalanceResultCodeMap[int32(e)] return name } -// TransactionResultResult is an XDR NestedUnion defines as: +// EncodeTo encodes this value using the Encoder. +func (e ClawbackClaimableBalanceResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := clawbackClaimableBalanceResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid ClawbackClaimableBalanceResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*ClawbackClaimableBalanceResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *ClawbackClaimableBalanceResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding ClawbackClaimableBalanceResultCode: %s", err) + } + if _, ok := clawbackClaimableBalanceResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid ClawbackClaimableBalanceResultCode enum value", v) + } + *e = ClawbackClaimableBalanceResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackClaimableBalanceResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackClaimableBalanceResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackClaimableBalanceResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackClaimableBalanceResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackClaimableBalanceResultCode) xdrType() {} + +var _ xdrType = (*ClawbackClaimableBalanceResultCode)(nil) + +// ClawbackClaimableBalanceResult is an XDR Union defines as: // -// union switch (TransactionResultCode code) -// { -// case txSUCCESS: -// case txFAILED: -// OperationResult results<>; -// default: -// void; -// } +// union ClawbackClaimableBalanceResult switch ( +// ClawbackClaimableBalanceResultCode code) +// { +// case CLAWBACK_CLAIMABLE_BALANCE_SUCCESS: +// void; +// default: +// void; +// }; // -type TransactionResultResult struct { - Code TransactionResultCode - Results *[]OperationResult +type ClawbackClaimableBalanceResult struct { + Code ClawbackClaimableBalanceResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u TransactionResultResult) SwitchFieldName() string { +func (u ClawbackClaimableBalanceResult) SwitchFieldName() string { return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionResultResult -func (u TransactionResultResult) ArmForSwitch(sw int32) (string, bool) { - switch TransactionResultCode(sw) { - case TransactionResultCodeTxSuccess: - return "Results", true - case TransactionResultCodeTxFailed: - return "Results", true +// the value for an instance of ClawbackClaimableBalanceResult +func (u ClawbackClaimableBalanceResult) ArmForSwitch(sw int32) (string, bool) { + switch ClawbackClaimableBalanceResultCode(sw) { + case ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess: + return "", true default: return "", true } } -// NewTransactionResultResult creates a new TransactionResultResult. -func NewTransactionResultResult(code TransactionResultCode, value interface{}) (result TransactionResultResult, err error) { +// NewClawbackClaimableBalanceResult creates a new ClawbackClaimableBalanceResult. +func NewClawbackClaimableBalanceResult(code ClawbackClaimableBalanceResultCode, value interface{}) (result ClawbackClaimableBalanceResult, err error) { result.Code = code - switch TransactionResultCode(code) { - case TransactionResultCodeTxSuccess: - tv, ok := value.([]OperationResult) - if !ok { - err = fmt.Errorf("invalid value, must be []OperationResult") - return - } - result.Results = &tv - case TransactionResultCodeTxFailed: - tv, ok := value.([]OperationResult) - if !ok { - err = fmt.Errorf("invalid value, must be []OperationResult") - return - } - result.Results = &tv + switch ClawbackClaimableBalanceResultCode(code) { + case ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess: + // void default: // void } return } -// MustResults retrieves the Results value from the union, -// panicing if the value is not set. -func (u TransactionResultResult) MustResults() []OperationResult { - val, ok := u.GetResults() - - if !ok { - panic("arm Results is not set") +// EncodeTo encodes this value using the Encoder. +func (u ClawbackClaimableBalanceResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch ClawbackClaimableBalanceResultCode(u.Code) { + case ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetResults retrieves the Results value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u TransactionResultResult) GetResults() (result []OperationResult, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Code)) +var _ decoderFrom = (*ClawbackClaimableBalanceResult)(nil) - if armName == "Results" { - result = *u.Results - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *ClawbackClaimableBalanceResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackClaimableBalanceResultCode: %s", err) + } + switch ClawbackClaimableBalanceResultCode(u.Code) { + case ClawbackClaimableBalanceResultCodeClawbackClaimableBalanceSuccess: + // Void + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s ClawbackClaimableBalanceResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// TransactionResultExt is an XDR NestedUnion defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *ClawbackClaimableBalanceResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*ClawbackClaimableBalanceResult)(nil) + _ encoding.BinaryUnmarshaler = (*ClawbackClaimableBalanceResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s ClawbackClaimableBalanceResult) xdrType() {} + +var _ xdrType = (*ClawbackClaimableBalanceResult)(nil) + +// SetTrustLineFlagsResultCode is an XDR Enum defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// enum SetTrustLineFlagsResultCode +// { +// // codes considered as "success" for the operation +// SET_TRUST_LINE_FLAGS_SUCCESS = 0, // -type TransactionResultExt struct { - V int32 +// // codes considered as "failure" for the operation +// SET_TRUST_LINE_FLAGS_MALFORMED = -1, +// SET_TRUST_LINE_FLAGS_NO_TRUST_LINE = -2, +// SET_TRUST_LINE_FLAGS_CANT_REVOKE = -3, +// SET_TRUST_LINE_FLAGS_INVALID_STATE = -4, +// SET_TRUST_LINE_FLAGS_LOW_RESERVE = -5 // claimable balances can't be created +// // on revoke due to low reserves +// }; +// +type SetTrustLineFlagsResultCode int32 + +const ( + SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess SetTrustLineFlagsResultCode = 0 + SetTrustLineFlagsResultCodeSetTrustLineFlagsMalformed SetTrustLineFlagsResultCode = -1 + SetTrustLineFlagsResultCodeSetTrustLineFlagsNoTrustLine SetTrustLineFlagsResultCode = -2 + SetTrustLineFlagsResultCodeSetTrustLineFlagsCantRevoke SetTrustLineFlagsResultCode = -3 + SetTrustLineFlagsResultCodeSetTrustLineFlagsInvalidState SetTrustLineFlagsResultCode = -4 + SetTrustLineFlagsResultCodeSetTrustLineFlagsLowReserve SetTrustLineFlagsResultCode = -5 +) + +var setTrustLineFlagsResultCodeMap = map[int32]string{ + 0: "SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess", + -1: "SetTrustLineFlagsResultCodeSetTrustLineFlagsMalformed", + -2: "SetTrustLineFlagsResultCodeSetTrustLineFlagsNoTrustLine", + -3: "SetTrustLineFlagsResultCodeSetTrustLineFlagsCantRevoke", + -4: "SetTrustLineFlagsResultCodeSetTrustLineFlagsInvalidState", + -5: "SetTrustLineFlagsResultCodeSetTrustLineFlagsLowReserve", } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u TransactionResultExt) SwitchFieldName() string { - return "V" +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for SetTrustLineFlagsResultCode +func (e SetTrustLineFlagsResultCode) ValidEnum(v int32) bool { + _, ok := setTrustLineFlagsResultCodeMap[v] + return ok } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionResultExt -func (u TransactionResultExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// String returns the name of `e` +func (e SetTrustLineFlagsResultCode) String() string { + name, _ := setTrustLineFlagsResultCodeMap[int32(e)] + return name +} + +// EncodeTo encodes this value using the Encoder. +func (e SetTrustLineFlagsResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := setTrustLineFlagsResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid SetTrustLineFlagsResultCode enum value", e) } - return "-", false + _, err := enc.EncodeInt(int32(e)) + return err } -// NewTransactionResultExt creates a new TransactionResultExt. -func NewTransactionResultExt(v int32, value interface{}) (result TransactionResultExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +var _ decoderFrom = (*SetTrustLineFlagsResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *SetTrustLineFlagsResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding SetTrustLineFlagsResultCode: %s", err) } - return + if _, ok := setTrustLineFlagsResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid SetTrustLineFlagsResultCode enum value", v) + } + *e = SetTrustLineFlagsResultCode(v) + return n, nil } -// TransactionResult is an XDR Struct defines as: -// -// struct TransactionResult -// { -// int64 feeCharged; // actual fee charged for the transaction -// -// union switch (TransactionResultCode code) -// { -// case txSUCCESS: -// case txFAILED: -// OperationResult results<>; -// default: -// void; -// } -// result; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type TransactionResult struct { - FeeCharged Int64 - Result TransactionResultResult - Ext TransactionResultExt +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetTrustLineFlagsResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// UpgradeType is an XDR Typedef defines as: -// -// typedef opaque UpgradeType<128>; -// -type UpgradeType []byte +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetTrustLineFlagsResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} -// StellarValueExt is an XDR NestedUnion defines as: +var ( + _ encoding.BinaryMarshaler = (*SetTrustLineFlagsResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*SetTrustLineFlagsResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetTrustLineFlagsResultCode) xdrType() {} + +var _ xdrType = (*SetTrustLineFlagsResultCode)(nil) + +// SetTrustLineFlagsResult is an XDR Union defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// union SetTrustLineFlagsResult switch (SetTrustLineFlagsResultCode code) +// { +// case SET_TRUST_LINE_FLAGS_SUCCESS: +// void; +// default: +// void; +// }; // -type StellarValueExt struct { - V int32 +type SetTrustLineFlagsResult struct { + Code SetTrustLineFlagsResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u StellarValueExt) SwitchFieldName() string { - return "V" +func (u SetTrustLineFlagsResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of StellarValueExt -func (u StellarValueExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: +// the value for an instance of SetTrustLineFlagsResult +func (u SetTrustLineFlagsResult) ArmForSwitch(sw int32) (string, bool) { + switch SetTrustLineFlagsResultCode(sw) { + case SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess: + return "", true + default: return "", true } - return "-", false } -// NewStellarValueExt creates a new StellarValueExt. -func NewStellarValueExt(v int32, value interface{}) (result StellarValueExt, err error) { - result.V = v - switch int32(v) { - case 0: +// NewSetTrustLineFlagsResult creates a new SetTrustLineFlagsResult. +func NewSetTrustLineFlagsResult(code SetTrustLineFlagsResultCode, value interface{}) (result SetTrustLineFlagsResult, err error) { + result.Code = code + switch SetTrustLineFlagsResultCode(code) { + case SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess: + // void + default: // void } return } -// StellarValue is an XDR Struct defines as: +// EncodeTo encodes this value using the Encoder. +func (u SetTrustLineFlagsResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch SetTrustLineFlagsResultCode(u.Code) { + case SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess: + // Void + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*SetTrustLineFlagsResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *SetTrustLineFlagsResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetTrustLineFlagsResultCode: %s", err) + } + switch SetTrustLineFlagsResultCode(u.Code) { + case SetTrustLineFlagsResultCodeSetTrustLineFlagsSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SetTrustLineFlagsResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SetTrustLineFlagsResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SetTrustLineFlagsResult)(nil) + _ encoding.BinaryUnmarshaler = (*SetTrustLineFlagsResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SetTrustLineFlagsResult) xdrType() {} + +var _ xdrType = (*SetTrustLineFlagsResult)(nil) + +// LiquidityPoolDepositResultCode is an XDR Enum defines as: // -// struct StellarValue +// enum LiquidityPoolDepositResultCode // { -// Hash txSetHash; // transaction set to apply to previous ledger -// uint64 closeTime; // network close time -// -// // upgrades to apply to the previous ledger (usually empty) -// // this is a vector of encoded 'LedgerUpgrade' so that nodes can drop -// // unknown steps during consensus if needed. -// // see notes below on 'LedgerUpgrade' for more detail -// // max size is dictated by number of upgrade types (+ room for future) -// UpgradeType upgrades<6>; +// // codes considered as "success" for the operation +// LIQUIDITY_POOL_DEPOSIT_SUCCESS = 0, // -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; +// // codes considered as "failure" for the operation +// LIQUIDITY_POOL_DEPOSIT_MALFORMED = -1, // bad input +// LIQUIDITY_POOL_DEPOSIT_NO_TRUST = -2, // no trust line for one of the +// // assets +// LIQUIDITY_POOL_DEPOSIT_NOT_AUTHORIZED = -3, // not authorized for one of the +// // assets +// LIQUIDITY_POOL_DEPOSIT_UNDERFUNDED = -4, // not enough balance for one of +// // the assets +// LIQUIDITY_POOL_DEPOSIT_LINE_FULL = -5, // pool share trust line doesn't +// // have sufficient limit +// LIQUIDITY_POOL_DEPOSIT_BAD_PRICE = -6, // deposit price outside bounds +// LIQUIDITY_POOL_DEPOSIT_POOL_FULL = -7 // pool reserves are full // }; // -type StellarValue struct { - TxSetHash Hash - CloseTime Uint64 - Upgrades []UpgradeType `xdrmaxsize:"6"` - Ext StellarValueExt +type LiquidityPoolDepositResultCode int32 + +const ( + LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess LiquidityPoolDepositResultCode = 0 + LiquidityPoolDepositResultCodeLiquidityPoolDepositMalformed LiquidityPoolDepositResultCode = -1 + LiquidityPoolDepositResultCodeLiquidityPoolDepositNoTrust LiquidityPoolDepositResultCode = -2 + LiquidityPoolDepositResultCodeLiquidityPoolDepositNotAuthorized LiquidityPoolDepositResultCode = -3 + LiquidityPoolDepositResultCodeLiquidityPoolDepositUnderfunded LiquidityPoolDepositResultCode = -4 + LiquidityPoolDepositResultCodeLiquidityPoolDepositLineFull LiquidityPoolDepositResultCode = -5 + LiquidityPoolDepositResultCodeLiquidityPoolDepositBadPrice LiquidityPoolDepositResultCode = -6 + LiquidityPoolDepositResultCodeLiquidityPoolDepositPoolFull LiquidityPoolDepositResultCode = -7 +) + +var liquidityPoolDepositResultCodeMap = map[int32]string{ + 0: "LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess", + -1: "LiquidityPoolDepositResultCodeLiquidityPoolDepositMalformed", + -2: "LiquidityPoolDepositResultCodeLiquidityPoolDepositNoTrust", + -3: "LiquidityPoolDepositResultCodeLiquidityPoolDepositNotAuthorized", + -4: "LiquidityPoolDepositResultCodeLiquidityPoolDepositUnderfunded", + -5: "LiquidityPoolDepositResultCodeLiquidityPoolDepositLineFull", + -6: "LiquidityPoolDepositResultCodeLiquidityPoolDepositBadPrice", + -7: "LiquidityPoolDepositResultCodeLiquidityPoolDepositPoolFull", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for LiquidityPoolDepositResultCode +func (e LiquidityPoolDepositResultCode) ValidEnum(v int32) bool { + _, ok := liquidityPoolDepositResultCodeMap[v] + return ok +} + +// String returns the name of `e` +func (e LiquidityPoolDepositResultCode) String() string { + name, _ := liquidityPoolDepositResultCodeMap[int32(e)] + return name } -// LedgerHeaderExt is an XDR NestedUnion defines as: +// EncodeTo encodes this value using the Encoder. +func (e LiquidityPoolDepositResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := liquidityPoolDepositResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LiquidityPoolDepositResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*LiquidityPoolDepositResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *LiquidityPoolDepositResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolDepositResultCode: %s", err) + } + if _, ok := liquidityPoolDepositResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LiquidityPoolDepositResultCode enum value", v) + } + *e = LiquidityPoolDepositResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolDepositResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolDepositResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolDepositResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolDepositResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolDepositResultCode) xdrType() {} + +var _ xdrType = (*LiquidityPoolDepositResultCode)(nil) + +// LiquidityPoolDepositResult is an XDR Union defines as: // -// union switch (int v) -// { -// case 0: -// void; -// } +// union LiquidityPoolDepositResult switch ( +// LiquidityPoolDepositResultCode code) +// { +// case LIQUIDITY_POOL_DEPOSIT_SUCCESS: +// void; +// default: +// void; +// }; // -type LedgerHeaderExt struct { - V int32 +type LiquidityPoolDepositResult struct { + Code LiquidityPoolDepositResultCode } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u LedgerHeaderExt) SwitchFieldName() string { - return "V" +func (u LiquidityPoolDepositResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerHeaderExt -func (u LedgerHeaderExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: +// the value for an instance of LiquidityPoolDepositResult +func (u LiquidityPoolDepositResult) ArmForSwitch(sw int32) (string, bool) { + switch LiquidityPoolDepositResultCode(sw) { + case LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess: + return "", true + default: return "", true } - return "-", false } -// NewLedgerHeaderExt creates a new LedgerHeaderExt. -func NewLedgerHeaderExt(v int32, value interface{}) (result LedgerHeaderExt, err error) { - result.V = v - switch int32(v) { - case 0: +// NewLiquidityPoolDepositResult creates a new LiquidityPoolDepositResult. +func NewLiquidityPoolDepositResult(code LiquidityPoolDepositResultCode, value interface{}) (result LiquidityPoolDepositResult, err error) { + result.Code = code + switch LiquidityPoolDepositResultCode(code) { + case LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess: + // void + default: // void } return } -// LedgerHeader is an XDR Struct defines as: -// -// struct LedgerHeader -// { -// uint32 ledgerVersion; // the protocol version of the ledger -// Hash previousLedgerHash; // hash of the previous ledger header -// StellarValue scpValue; // what consensus agreed to -// Hash txSetResultHash; // the TransactionResultSet that led to this ledger -// Hash bucketListHash; // hash of the ledger state -// -// uint32 ledgerSeq; // sequence number of this ledger -// -// int64 totalCoins; // total number of stroops in existence. -// // 10,000,000 stroops in 1 XLM -// -// int64 feePool; // fees burned since last inflation run -// uint32 inflationSeq; // inflation sequence number -// -// uint64 idPool; // last used global ID, used for generating objects -// -// uint32 baseFee; // base fee per operation in stroops -// uint32 baseReserve; // account base reserve in stroops -// -// uint32 maxTxSetSize; // maximum size a transaction set can be -// -// Hash skipList[4]; // hashes of ledgers in the past. allows you to jump back -// // in time without walking the chain back ledger by ledger -// // each slot contains the oldest ledger that is mod of -// // either 50 5000 50000 or 500000 depending on index -// // skipList[0] mod(50), skipList[1] mod(5000), etc -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type LedgerHeader struct { - LedgerVersion Uint32 - PreviousLedgerHash Hash - ScpValue StellarValue - TxSetResultHash Hash - BucketListHash Hash - LedgerSeq Uint32 - TotalCoins Int64 - FeePool Int64 - InflationSeq Uint32 - IdPool Uint64 - BaseFee Uint32 - BaseReserve Uint32 - MaxTxSetSize Uint32 - SkipList [4]Hash - Ext LedgerHeaderExt +// EncodeTo encodes this value using the Encoder. +func (u LiquidityPoolDepositResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch LiquidityPoolDepositResultCode(u.Code) { + case LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess: + // Void + return nil + default: + // Void + return nil + } } -// LedgerUpgradeType is an XDR Enum defines as: +var _ decoderFrom = (*LiquidityPoolDepositResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *LiquidityPoolDepositResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolDepositResultCode: %s", err) + } + switch LiquidityPoolDepositResultCode(u.Code) { + case LiquidityPoolDepositResultCodeLiquidityPoolDepositSuccess: + // Void + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolDepositResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolDepositResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolDepositResult)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolDepositResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolDepositResult) xdrType() {} + +var _ xdrType = (*LiquidityPoolDepositResult)(nil) + +// LiquidityPoolWithdrawResultCode is an XDR Enum defines as: // -// enum LedgerUpgradeType +// enum LiquidityPoolWithdrawResultCode // { -// LEDGER_UPGRADE_VERSION = 1, -// LEDGER_UPGRADE_BASE_FEE = 2, -// LEDGER_UPGRADE_MAX_TX_SET_SIZE = 3 +// // codes considered as "success" for the operation +// LIQUIDITY_POOL_WITHDRAW_SUCCESS = 0, +// +// // codes considered as "failure" for the operation +// LIQUIDITY_POOL_WITHDRAW_MALFORMED = -1, // bad input +// LIQUIDITY_POOL_WITHDRAW_NO_TRUST = -2, // no trust line for one of the +// // assets +// LIQUIDITY_POOL_WITHDRAW_UNDERFUNDED = -3, // not enough balance of the +// // pool share +// LIQUIDITY_POOL_WITHDRAW_LINE_FULL = -4, // would go above limit for one +// // of the assets +// LIQUIDITY_POOL_WITHDRAW_UNDER_MINIMUM = -5 // didn't withdraw enough // }; // -type LedgerUpgradeType int32 +type LiquidityPoolWithdrawResultCode int32 const ( - LedgerUpgradeTypeLedgerUpgradeVersion LedgerUpgradeType = 1 - LedgerUpgradeTypeLedgerUpgradeBaseFee LedgerUpgradeType = 2 - LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize LedgerUpgradeType = 3 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess LiquidityPoolWithdrawResultCode = 0 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawMalformed LiquidityPoolWithdrawResultCode = -1 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawNoTrust LiquidityPoolWithdrawResultCode = -2 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderfunded LiquidityPoolWithdrawResultCode = -3 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawLineFull LiquidityPoolWithdrawResultCode = -4 + LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderMinimum LiquidityPoolWithdrawResultCode = -5 ) -var ledgerUpgradeTypeMap = map[int32]string{ - 1: "LedgerUpgradeTypeLedgerUpgradeVersion", - 2: "LedgerUpgradeTypeLedgerUpgradeBaseFee", - 3: "LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize", +var liquidityPoolWithdrawResultCodeMap = map[int32]string{ + 0: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess", + -1: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawMalformed", + -2: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawNoTrust", + -3: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderfunded", + -4: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawLineFull", + -5: "LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawUnderMinimum", } // ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for LedgerUpgradeType -func (e LedgerUpgradeType) ValidEnum(v int32) bool { - _, ok := ledgerUpgradeTypeMap[v] +// the Enum interface for LiquidityPoolWithdrawResultCode +func (e LiquidityPoolWithdrawResultCode) ValidEnum(v int32) bool { + _, ok := liquidityPoolWithdrawResultCodeMap[v] return ok } // String returns the name of `e` -func (e LedgerUpgradeType) String() string { - name, _ := ledgerUpgradeTypeMap[int32(e)] +func (e LiquidityPoolWithdrawResultCode) String() string { + name, _ := liquidityPoolWithdrawResultCodeMap[int32(e)] return name } -// LedgerUpgrade is an XDR Union defines as: -// -// union LedgerUpgrade switch (LedgerUpgradeType type) -// { -// case LEDGER_UPGRADE_VERSION: -// uint32 newLedgerVersion; // update ledgerVersion -// case LEDGER_UPGRADE_BASE_FEE: -// uint32 newBaseFee; // update baseFee -// case LEDGER_UPGRADE_MAX_TX_SET_SIZE: -// uint32 newMaxTxSetSize; // update maxTxSetSize -// }; -// -type LedgerUpgrade struct { - Type LedgerUpgradeType - NewLedgerVersion *Uint32 - NewBaseFee *Uint32 - NewMaxTxSetSize *Uint32 +// EncodeTo encodes this value using the Encoder. +func (e LiquidityPoolWithdrawResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := liquidityPoolWithdrawResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid LiquidityPoolWithdrawResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u LedgerUpgrade) SwitchFieldName() string { - return "Type" -} +var _ decoderFrom = (*LiquidityPoolWithdrawResultCode)(nil) -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerUpgrade -func (u LedgerUpgrade) ArmForSwitch(sw int32) (string, bool) { - switch LedgerUpgradeType(sw) { - case LedgerUpgradeTypeLedgerUpgradeVersion: - return "NewLedgerVersion", true - case LedgerUpgradeTypeLedgerUpgradeBaseFee: - return "NewBaseFee", true - case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: - return "NewMaxTxSetSize", true +// DecodeFrom decodes this value using the Decoder. +func (e *LiquidityPoolWithdrawResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolWithdrawResultCode: %s", err) } - return "-", false -} - -// NewLedgerUpgrade creates a new LedgerUpgrade. -func NewLedgerUpgrade(aType LedgerUpgradeType, value interface{}) (result LedgerUpgrade, err error) { - result.Type = aType - switch LedgerUpgradeType(aType) { - case LedgerUpgradeTypeLedgerUpgradeVersion: - tv, ok := value.(Uint32) - if !ok { - err = fmt.Errorf("invalid value, must be Uint32") - return - } - result.NewLedgerVersion = &tv - case LedgerUpgradeTypeLedgerUpgradeBaseFee: - tv, ok := value.(Uint32) - if !ok { - err = fmt.Errorf("invalid value, must be Uint32") - return - } - result.NewBaseFee = &tv - case LedgerUpgradeTypeLedgerUpgradeMaxTxSetSize: - tv, ok := value.(Uint32) - if !ok { - err = fmt.Errorf("invalid value, must be Uint32") - return - } - result.NewMaxTxSetSize = &tv + if _, ok := liquidityPoolWithdrawResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid LiquidityPoolWithdrawResultCode enum value", v) } - return + *e = LiquidityPoolWithdrawResultCode(v) + return n, nil } -// MustNewLedgerVersion retrieves the NewLedgerVersion value from the union, -// panicing if the value is not set. -func (u LedgerUpgrade) MustNewLedgerVersion() Uint32 { - val, ok := u.GetNewLedgerVersion() - - if !ok { - panic("arm NewLedgerVersion is not set") - } +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolWithdrawResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - return val +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolWithdrawResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// GetNewLedgerVersion retrieves the NewLedgerVersion value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerUpgrade) GetNewLedgerVersion() (result Uint32, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolWithdrawResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolWithdrawResultCode)(nil) +) - if armName == "NewLedgerVersion" { - result = *u.NewLedgerVersion - ok = true - } +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolWithdrawResultCode) xdrType() {} - return +var _ xdrType = (*LiquidityPoolWithdrawResultCode)(nil) + +// LiquidityPoolWithdrawResult is an XDR Union defines as: +// +// union LiquidityPoolWithdrawResult switch ( +// LiquidityPoolWithdrawResultCode code) +// { +// case LIQUIDITY_POOL_WITHDRAW_SUCCESS: +// void; +// default: +// void; +// }; +// +type LiquidityPoolWithdrawResult struct { + Code LiquidityPoolWithdrawResultCode } -// MustNewBaseFee retrieves the NewBaseFee value from the union, -// panicing if the value is not set. -func (u LedgerUpgrade) MustNewBaseFee() Uint32 { - val, ok := u.GetNewBaseFee() +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u LiquidityPoolWithdrawResult) SwitchFieldName() string { + return "Code" +} - if !ok { - panic("arm NewBaseFee is not set") +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of LiquidityPoolWithdrawResult +func (u LiquidityPoolWithdrawResult) ArmForSwitch(sw int32) (string, bool) { + switch LiquidityPoolWithdrawResultCode(sw) { + case LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess: + return "", true + default: + return "", true } - - return val } -// GetNewBaseFee retrieves the NewBaseFee value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerUpgrade) GetNewBaseFee() (result Uint32, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) - - if armName == "NewBaseFee" { - result = *u.NewBaseFee - ok = true +// NewLiquidityPoolWithdrawResult creates a new LiquidityPoolWithdrawResult. +func NewLiquidityPoolWithdrawResult(code LiquidityPoolWithdrawResultCode, value interface{}) (result LiquidityPoolWithdrawResult, err error) { + result.Code = code + switch LiquidityPoolWithdrawResultCode(code) { + case LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess: + // void + default: + // void } - return } -// MustNewMaxTxSetSize retrieves the NewMaxTxSetSize value from the union, -// panicing if the value is not set. -func (u LedgerUpgrade) MustNewMaxTxSetSize() Uint32 { - val, ok := u.GetNewMaxTxSetSize() - - if !ok { - panic("arm NewMaxTxSetSize is not set") +// EncodeTo encodes this value using the Encoder. +func (u LiquidityPoolWithdrawResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch LiquidityPoolWithdrawResultCode(u.Code) { + case LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess: + // Void + return nil + default: + // Void + return nil } - - return val } -// GetNewMaxTxSetSize retrieves the NewMaxTxSetSize value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerUpgrade) GetNewMaxTxSetSize() (result Uint32, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*LiquidityPoolWithdrawResult)(nil) - if armName == "NewMaxTxSetSize" { - result = *u.NewMaxTxSetSize - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *LiquidityPoolWithdrawResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolWithdrawResultCode: %s", err) + } + switch LiquidityPoolWithdrawResultCode(u.Code) { + case LiquidityPoolWithdrawResultCodeLiquidityPoolWithdrawSuccess: + // Void + return n, nil + default: + // Void + return n, nil } +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s LiquidityPoolWithdrawResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// LedgerKeyAccount is an XDR NestedStruct defines as: -// -// struct -// { -// AccountID accountID; -// } -// -type LedgerKeyAccount struct { - AccountId AccountId +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *LiquidityPoolWithdrawResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// LedgerKeyTrustLine is an XDR NestedStruct defines as: +var ( + _ encoding.BinaryMarshaler = (*LiquidityPoolWithdrawResult)(nil) + _ encoding.BinaryUnmarshaler = (*LiquidityPoolWithdrawResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s LiquidityPoolWithdrawResult) xdrType() {} + +var _ xdrType = (*LiquidityPoolWithdrawResult)(nil) + +// OperationResultCode is an XDR Enum defines as: // -// struct -// { -// AccountID accountID; -// Asset asset; -// } +// enum OperationResultCode +// { +// opINNER = 0, // inner object result is valid // -type LedgerKeyTrustLine struct { - AccountId AccountId - Asset Asset +// opBAD_AUTH = -1, // too few valid signatures / wrong network +// opNO_ACCOUNT = -2, // source account was not found +// opNOT_SUPPORTED = -3, // operation not supported at this time +// opTOO_MANY_SUBENTRIES = -4, // max number of subentries already reached +// opEXCEEDED_WORK_LIMIT = -5, // operation did too much work +// opTOO_MANY_SPONSORING = -6 // account is sponsoring too many entries +// }; +// +type OperationResultCode int32 + +const ( + OperationResultCodeOpInner OperationResultCode = 0 + OperationResultCodeOpBadAuth OperationResultCode = -1 + OperationResultCodeOpNoAccount OperationResultCode = -2 + OperationResultCodeOpNotSupported OperationResultCode = -3 + OperationResultCodeOpTooManySubentries OperationResultCode = -4 + OperationResultCodeOpExceededWorkLimit OperationResultCode = -5 + OperationResultCodeOpTooManySponsoring OperationResultCode = -6 +) + +var operationResultCodeMap = map[int32]string{ + 0: "OperationResultCodeOpInner", + -1: "OperationResultCodeOpBadAuth", + -2: "OperationResultCodeOpNoAccount", + -3: "OperationResultCodeOpNotSupported", + -4: "OperationResultCodeOpTooManySubentries", + -5: "OperationResultCodeOpExceededWorkLimit", + -6: "OperationResultCodeOpTooManySponsoring", } -// LedgerKeyOffer is an XDR NestedStruct defines as: -// -// struct -// { -// AccountID sellerID; -// uint64 offerID; -// } -// -type LedgerKeyOffer struct { - SellerId AccountId - OfferId Uint64 +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for OperationResultCode +func (e OperationResultCode) ValidEnum(v int32) bool { + _, ok := operationResultCodeMap[v] + return ok } -// LedgerKeyData is an XDR NestedStruct defines as: -// -// struct -// { -// AccountID accountID; -// string64 dataName; -// } -// -type LedgerKeyData struct { - AccountId AccountId - DataName String64 +// String returns the name of `e` +func (e OperationResultCode) String() string { + name, _ := operationResultCodeMap[int32(e)] + return name } -// LedgerKey is an XDR Union defines as: -// -// union LedgerKey switch (LedgerEntryType type) -// { -// case ACCOUNT: -// struct -// { -// AccountID accountID; -// } account; -// -// case TRUSTLINE: -// struct -// { -// AccountID accountID; -// Asset asset; -// } trustLine; -// -// case OFFER: -// struct -// { -// AccountID sellerID; -// uint64 offerID; -// } offer; +// EncodeTo encodes this value using the Encoder. +func (e OperationResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := operationResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid OperationResultCode enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err +} + +var _ decoderFrom = (*OperationResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *OperationResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding OperationResultCode: %s", err) + } + if _, ok := operationResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid OperationResultCode enum value", v) + } + *e = OperationResultCode(v) + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*OperationResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*OperationResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationResultCode) xdrType() {} + +var _ xdrType = (*OperationResultCode)(nil) + +// OperationResultTr is an XDR NestedUnion defines as: // -// case DATA: -// struct +// union switch (OperationType type) // { -// AccountID accountID; -// string64 dataName; -// } data; -// }; +// case CREATE_ACCOUNT: +// CreateAccountResult createAccountResult; +// case PAYMENT: +// PaymentResult paymentResult; +// case PATH_PAYMENT_STRICT_RECEIVE: +// PathPaymentStrictReceiveResult pathPaymentStrictReceiveResult; +// case MANAGE_SELL_OFFER: +// ManageSellOfferResult manageSellOfferResult; +// case CREATE_PASSIVE_SELL_OFFER: +// ManageSellOfferResult createPassiveSellOfferResult; +// case SET_OPTIONS: +// SetOptionsResult setOptionsResult; +// case CHANGE_TRUST: +// ChangeTrustResult changeTrustResult; +// case ALLOW_TRUST: +// AllowTrustResult allowTrustResult; +// case ACCOUNT_MERGE: +// AccountMergeResult accountMergeResult; +// case INFLATION: +// InflationResult inflationResult; +// case MANAGE_DATA: +// ManageDataResult manageDataResult; +// case BUMP_SEQUENCE: +// BumpSequenceResult bumpSeqResult; +// case MANAGE_BUY_OFFER: +// ManageBuyOfferResult manageBuyOfferResult; +// case PATH_PAYMENT_STRICT_SEND: +// PathPaymentStrictSendResult pathPaymentStrictSendResult; +// case CREATE_CLAIMABLE_BALANCE: +// CreateClaimableBalanceResult createClaimableBalanceResult; +// case CLAIM_CLAIMABLE_BALANCE: +// ClaimClaimableBalanceResult claimClaimableBalanceResult; +// case BEGIN_SPONSORING_FUTURE_RESERVES: +// BeginSponsoringFutureReservesResult beginSponsoringFutureReservesResult; +// case END_SPONSORING_FUTURE_RESERVES: +// EndSponsoringFutureReservesResult endSponsoringFutureReservesResult; +// case REVOKE_SPONSORSHIP: +// RevokeSponsorshipResult revokeSponsorshipResult; +// case CLAWBACK: +// ClawbackResult clawbackResult; +// case CLAWBACK_CLAIMABLE_BALANCE: +// ClawbackClaimableBalanceResult clawbackClaimableBalanceResult; +// case SET_TRUST_LINE_FLAGS: +// SetTrustLineFlagsResult setTrustLineFlagsResult; +// case LIQUIDITY_POOL_DEPOSIT: +// LiquidityPoolDepositResult liquidityPoolDepositResult; +// case LIQUIDITY_POOL_WITHDRAW: +// LiquidityPoolWithdrawResult liquidityPoolWithdrawResult; +// } // -type LedgerKey struct { - Type LedgerEntryType - Account *LedgerKeyAccount - TrustLine *LedgerKeyTrustLine - Offer *LedgerKeyOffer - Data *LedgerKeyData +type OperationResultTr struct { + Type OperationType + CreateAccountResult *CreateAccountResult + PaymentResult *PaymentResult + PathPaymentStrictReceiveResult *PathPaymentStrictReceiveResult + ManageSellOfferResult *ManageSellOfferResult + CreatePassiveSellOfferResult *ManageSellOfferResult + SetOptionsResult *SetOptionsResult + ChangeTrustResult *ChangeTrustResult + AllowTrustResult *AllowTrustResult + AccountMergeResult *AccountMergeResult + InflationResult *InflationResult + ManageDataResult *ManageDataResult + BumpSeqResult *BumpSequenceResult + ManageBuyOfferResult *ManageBuyOfferResult + PathPaymentStrictSendResult *PathPaymentStrictSendResult + CreateClaimableBalanceResult *CreateClaimableBalanceResult + ClaimClaimableBalanceResult *ClaimClaimableBalanceResult + BeginSponsoringFutureReservesResult *BeginSponsoringFutureReservesResult + EndSponsoringFutureReservesResult *EndSponsoringFutureReservesResult + RevokeSponsorshipResult *RevokeSponsorshipResult + ClawbackResult *ClawbackResult + ClawbackClaimableBalanceResult *ClawbackClaimableBalanceResult + SetTrustLineFlagsResult *SetTrustLineFlagsResult + LiquidityPoolDepositResult *LiquidityPoolDepositResult + LiquidityPoolWithdrawResult *LiquidityPoolWithdrawResult } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u LedgerKey) SwitchFieldName() string { +func (u OperationResultTr) SwitchFieldName() string { return "Type" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerKey -func (u LedgerKey) ArmForSwitch(sw int32) (string, bool) { - switch LedgerEntryType(sw) { - case LedgerEntryTypeAccount: - return "Account", true - case LedgerEntryTypeTrustline: - return "TrustLine", true - case LedgerEntryTypeOffer: - return "Offer", true - case LedgerEntryTypeData: - return "Data", true +// the value for an instance of OperationResultTr +func (u OperationResultTr) ArmForSwitch(sw int32) (string, bool) { + switch OperationType(sw) { + case OperationTypeCreateAccount: + return "CreateAccountResult", true + case OperationTypePayment: + return "PaymentResult", true + case OperationTypePathPaymentStrictReceive: + return "PathPaymentStrictReceiveResult", true + case OperationTypeManageSellOffer: + return "ManageSellOfferResult", true + case OperationTypeCreatePassiveSellOffer: + return "CreatePassiveSellOfferResult", true + case OperationTypeSetOptions: + return "SetOptionsResult", true + case OperationTypeChangeTrust: + return "ChangeTrustResult", true + case OperationTypeAllowTrust: + return "AllowTrustResult", true + case OperationTypeAccountMerge: + return "AccountMergeResult", true + case OperationTypeInflation: + return "InflationResult", true + case OperationTypeManageData: + return "ManageDataResult", true + case OperationTypeBumpSequence: + return "BumpSeqResult", true + case OperationTypeManageBuyOffer: + return "ManageBuyOfferResult", true + case OperationTypePathPaymentStrictSend: + return "PathPaymentStrictSendResult", true + case OperationTypeCreateClaimableBalance: + return "CreateClaimableBalanceResult", true + case OperationTypeClaimClaimableBalance: + return "ClaimClaimableBalanceResult", true + case OperationTypeBeginSponsoringFutureReserves: + return "BeginSponsoringFutureReservesResult", true + case OperationTypeEndSponsoringFutureReserves: + return "EndSponsoringFutureReservesResult", true + case OperationTypeRevokeSponsorship: + return "RevokeSponsorshipResult", true + case OperationTypeClawback: + return "ClawbackResult", true + case OperationTypeClawbackClaimableBalance: + return "ClawbackClaimableBalanceResult", true + case OperationTypeSetTrustLineFlags: + return "SetTrustLineFlagsResult", true + case OperationTypeLiquidityPoolDeposit: + return "LiquidityPoolDepositResult", true + case OperationTypeLiquidityPoolWithdraw: + return "LiquidityPoolWithdrawResult", true } return "-", false } -// NewLedgerKey creates a new LedgerKey. -func NewLedgerKey(aType LedgerEntryType, value interface{}) (result LedgerKey, err error) { +// NewOperationResultTr creates a new OperationResultTr. +func NewOperationResultTr(aType OperationType, value interface{}) (result OperationResultTr, err error) { result.Type = aType - switch LedgerEntryType(aType) { - case LedgerEntryTypeAccount: - tv, ok := value.(LedgerKeyAccount) + switch OperationType(aType) { + case OperationTypeCreateAccount: + tv, ok := value.(CreateAccountResult) + if !ok { + err = fmt.Errorf("invalid value, must be CreateAccountResult") + return + } + result.CreateAccountResult = &tv + case OperationTypePayment: + tv, ok := value.(PaymentResult) + if !ok { + err = fmt.Errorf("invalid value, must be PaymentResult") + return + } + result.PaymentResult = &tv + case OperationTypePathPaymentStrictReceive: + tv, ok := value.(PathPaymentStrictReceiveResult) + if !ok { + err = fmt.Errorf("invalid value, must be PathPaymentStrictReceiveResult") + return + } + result.PathPaymentStrictReceiveResult = &tv + case OperationTypeManageSellOffer: + tv, ok := value.(ManageSellOfferResult) + if !ok { + err = fmt.Errorf("invalid value, must be ManageSellOfferResult") + return + } + result.ManageSellOfferResult = &tv + case OperationTypeCreatePassiveSellOffer: + tv, ok := value.(ManageSellOfferResult) + if !ok { + err = fmt.Errorf("invalid value, must be ManageSellOfferResult") + return + } + result.CreatePassiveSellOfferResult = &tv + case OperationTypeSetOptions: + tv, ok := value.(SetOptionsResult) + if !ok { + err = fmt.Errorf("invalid value, must be SetOptionsResult") + return + } + result.SetOptionsResult = &tv + case OperationTypeChangeTrust: + tv, ok := value.(ChangeTrustResult) + if !ok { + err = fmt.Errorf("invalid value, must be ChangeTrustResult") + return + } + result.ChangeTrustResult = &tv + case OperationTypeAllowTrust: + tv, ok := value.(AllowTrustResult) + if !ok { + err = fmt.Errorf("invalid value, must be AllowTrustResult") + return + } + result.AllowTrustResult = &tv + case OperationTypeAccountMerge: + tv, ok := value.(AccountMergeResult) + if !ok { + err = fmt.Errorf("invalid value, must be AccountMergeResult") + return + } + result.AccountMergeResult = &tv + case OperationTypeInflation: + tv, ok := value.(InflationResult) + if !ok { + err = fmt.Errorf("invalid value, must be InflationResult") + return + } + result.InflationResult = &tv + case OperationTypeManageData: + tv, ok := value.(ManageDataResult) if !ok { - err = fmt.Errorf("invalid value, must be LedgerKeyAccount") + err = fmt.Errorf("invalid value, must be ManageDataResult") return } - result.Account = &tv - case LedgerEntryTypeTrustline: - tv, ok := value.(LedgerKeyTrustLine) + result.ManageDataResult = &tv + case OperationTypeBumpSequence: + tv, ok := value.(BumpSequenceResult) if !ok { - err = fmt.Errorf("invalid value, must be LedgerKeyTrustLine") + err = fmt.Errorf("invalid value, must be BumpSequenceResult") return } - result.TrustLine = &tv - case LedgerEntryTypeOffer: - tv, ok := value.(LedgerKeyOffer) + result.BumpSeqResult = &tv + case OperationTypeManageBuyOffer: + tv, ok := value.(ManageBuyOfferResult) if !ok { - err = fmt.Errorf("invalid value, must be LedgerKeyOffer") + err = fmt.Errorf("invalid value, must be ManageBuyOfferResult") return } - result.Offer = &tv - case LedgerEntryTypeData: - tv, ok := value.(LedgerKeyData) + result.ManageBuyOfferResult = &tv + case OperationTypePathPaymentStrictSend: + tv, ok := value.(PathPaymentStrictSendResult) if !ok { - err = fmt.Errorf("invalid value, must be LedgerKeyData") + err = fmt.Errorf("invalid value, must be PathPaymentStrictSendResult") return } - result.Data = &tv + result.PathPaymentStrictSendResult = &tv + case OperationTypeCreateClaimableBalance: + tv, ok := value.(CreateClaimableBalanceResult) + if !ok { + err = fmt.Errorf("invalid value, must be CreateClaimableBalanceResult") + return + } + result.CreateClaimableBalanceResult = &tv + case OperationTypeClaimClaimableBalance: + tv, ok := value.(ClaimClaimableBalanceResult) + if !ok { + err = fmt.Errorf("invalid value, must be ClaimClaimableBalanceResult") + return + } + result.ClaimClaimableBalanceResult = &tv + case OperationTypeBeginSponsoringFutureReserves: + tv, ok := value.(BeginSponsoringFutureReservesResult) + if !ok { + err = fmt.Errorf("invalid value, must be BeginSponsoringFutureReservesResult") + return + } + result.BeginSponsoringFutureReservesResult = &tv + case OperationTypeEndSponsoringFutureReserves: + tv, ok := value.(EndSponsoringFutureReservesResult) + if !ok { + err = fmt.Errorf("invalid value, must be EndSponsoringFutureReservesResult") + return + } + result.EndSponsoringFutureReservesResult = &tv + case OperationTypeRevokeSponsorship: + tv, ok := value.(RevokeSponsorshipResult) + if !ok { + err = fmt.Errorf("invalid value, must be RevokeSponsorshipResult") + return + } + result.RevokeSponsorshipResult = &tv + case OperationTypeClawback: + tv, ok := value.(ClawbackResult) + if !ok { + err = fmt.Errorf("invalid value, must be ClawbackResult") + return + } + result.ClawbackResult = &tv + case OperationTypeClawbackClaimableBalance: + tv, ok := value.(ClawbackClaimableBalanceResult) + if !ok { + err = fmt.Errorf("invalid value, must be ClawbackClaimableBalanceResult") + return + } + result.ClawbackClaimableBalanceResult = &tv + case OperationTypeSetTrustLineFlags: + tv, ok := value.(SetTrustLineFlagsResult) + if !ok { + err = fmt.Errorf("invalid value, must be SetTrustLineFlagsResult") + return + } + result.SetTrustLineFlagsResult = &tv + case OperationTypeLiquidityPoolDeposit: + tv, ok := value.(LiquidityPoolDepositResult) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolDepositResult") + return + } + result.LiquidityPoolDepositResult = &tv + case OperationTypeLiquidityPoolWithdraw: + tv, ok := value.(LiquidityPoolWithdrawResult) + if !ok { + err = fmt.Errorf("invalid value, must be LiquidityPoolWithdrawResult") + return + } + result.LiquidityPoolWithdrawResult = &tv } return } -// MustAccount retrieves the Account value from the union, +// MustCreateAccountResult retrieves the CreateAccountResult value from the union, // panicing if the value is not set. -func (u LedgerKey) MustAccount() LedgerKeyAccount { - val, ok := u.GetAccount() +func (u OperationResultTr) MustCreateAccountResult() CreateAccountResult { + val, ok := u.GetCreateAccountResult() if !ok { - panic("arm Account is not set") + panic("arm CreateAccountResult is not set") } return val } -// GetAccount retrieves the Account value from the union, +// GetCreateAccountResult retrieves the CreateAccountResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerKey) GetAccount() (result LedgerKeyAccount, ok bool) { +func (u OperationResultTr) GetCreateAccountResult() (result CreateAccountResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Account" { - result = *u.Account + if armName == "CreateAccountResult" { + result = *u.CreateAccountResult ok = true } return } -// MustTrustLine retrieves the TrustLine value from the union, +// MustPaymentResult retrieves the PaymentResult value from the union, // panicing if the value is not set. -func (u LedgerKey) MustTrustLine() LedgerKeyTrustLine { - val, ok := u.GetTrustLine() +func (u OperationResultTr) MustPaymentResult() PaymentResult { + val, ok := u.GetPaymentResult() if !ok { - panic("arm TrustLine is not set") + panic("arm PaymentResult is not set") } return val } -// GetTrustLine retrieves the TrustLine value from the union, +// GetPaymentResult retrieves the PaymentResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerKey) GetTrustLine() (result LedgerKeyTrustLine, ok bool) { +func (u OperationResultTr) GetPaymentResult() (result PaymentResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "TrustLine" { - result = *u.TrustLine + if armName == "PaymentResult" { + result = *u.PaymentResult ok = true } return } -// MustOffer retrieves the Offer value from the union, +// MustPathPaymentStrictReceiveResult retrieves the PathPaymentStrictReceiveResult value from the union, // panicing if the value is not set. -func (u LedgerKey) MustOffer() LedgerKeyOffer { - val, ok := u.GetOffer() +func (u OperationResultTr) MustPathPaymentStrictReceiveResult() PathPaymentStrictReceiveResult { + val, ok := u.GetPathPaymentStrictReceiveResult() if !ok { - panic("arm Offer is not set") + panic("arm PathPaymentStrictReceiveResult is not set") } return val } -// GetOffer retrieves the Offer value from the union, +// GetPathPaymentStrictReceiveResult retrieves the PathPaymentStrictReceiveResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerKey) GetOffer() (result LedgerKeyOffer, ok bool) { +func (u OperationResultTr) GetPathPaymentStrictReceiveResult() (result PathPaymentStrictReceiveResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Offer" { - result = *u.Offer + if armName == "PathPaymentStrictReceiveResult" { + result = *u.PathPaymentStrictReceiveResult ok = true } return } -// MustData retrieves the Data value from the union, +// MustManageSellOfferResult retrieves the ManageSellOfferResult value from the union, // panicing if the value is not set. -func (u LedgerKey) MustData() LedgerKeyData { - val, ok := u.GetData() +func (u OperationResultTr) MustManageSellOfferResult() ManageSellOfferResult { + val, ok := u.GetManageSellOfferResult() if !ok { - panic("arm Data is not set") + panic("arm ManageSellOfferResult is not set") } return val } -// GetData retrieves the Data value from the union, +// GetManageSellOfferResult retrieves the ManageSellOfferResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerKey) GetData() (result LedgerKeyData, ok bool) { +func (u OperationResultTr) GetManageSellOfferResult() (result ManageSellOfferResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Data" { - result = *u.Data + if armName == "ManageSellOfferResult" { + result = *u.ManageSellOfferResult ok = true } return } -// BucketEntryType is an XDR Enum defines as: -// -// enum BucketEntryType -// { -// LIVEENTRY = 0, -// DEADENTRY = 1 -// }; -// -type BucketEntryType int32 +// MustCreatePassiveSellOfferResult retrieves the CreatePassiveSellOfferResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustCreatePassiveSellOfferResult() ManageSellOfferResult { + val, ok := u.GetCreatePassiveSellOfferResult() -const ( - BucketEntryTypeLiveentry BucketEntryType = 0 - BucketEntryTypeDeadentry BucketEntryType = 1 -) + if !ok { + panic("arm CreatePassiveSellOfferResult is not set") + } -var bucketEntryTypeMap = map[int32]string{ - 0: "BucketEntryTypeLiveentry", - 1: "BucketEntryTypeDeadentry", + return val } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for BucketEntryType -func (e BucketEntryType) ValidEnum(v int32) bool { - _, ok := bucketEntryTypeMap[v] - return ok +// GetCreatePassiveSellOfferResult retrieves the CreatePassiveSellOfferResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetCreatePassiveSellOfferResult() (result ManageSellOfferResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "CreatePassiveSellOfferResult" { + result = *u.CreatePassiveSellOfferResult + ok = true + } + + return } -// String returns the name of `e` -func (e BucketEntryType) String() string { - name, _ := bucketEntryTypeMap[int32(e)] - return name +// MustSetOptionsResult retrieves the SetOptionsResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustSetOptionsResult() SetOptionsResult { + val, ok := u.GetSetOptionsResult() + + if !ok { + panic("arm SetOptionsResult is not set") + } + + return val } -// BucketEntry is an XDR Union defines as: -// -// union BucketEntry switch (BucketEntryType type) -// { -// case LIVEENTRY: -// LedgerEntry liveEntry; -// -// case DEADENTRY: -// LedgerKey deadEntry; -// }; -// -type BucketEntry struct { - Type BucketEntryType - LiveEntry *LedgerEntry - DeadEntry *LedgerKey +// GetSetOptionsResult retrieves the SetOptionsResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetSetOptionsResult() (result SetOptionsResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SetOptionsResult" { + result = *u.SetOptionsResult + ok = true + } + + return +} + +// MustChangeTrustResult retrieves the ChangeTrustResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustChangeTrustResult() ChangeTrustResult { + val, ok := u.GetChangeTrustResult() + + if !ok { + panic("arm ChangeTrustResult is not set") + } + + return val +} + +// GetChangeTrustResult retrieves the ChangeTrustResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetChangeTrustResult() (result ChangeTrustResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ChangeTrustResult" { + result = *u.ChangeTrustResult + ok = true + } + + return +} + +// MustAllowTrustResult retrieves the AllowTrustResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustAllowTrustResult() AllowTrustResult { + val, ok := u.GetAllowTrustResult() + + if !ok { + panic("arm AllowTrustResult is not set") + } + + return val +} + +// GetAllowTrustResult retrieves the AllowTrustResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetAllowTrustResult() (result AllowTrustResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AllowTrustResult" { + result = *u.AllowTrustResult + ok = true + } + + return +} + +// MustAccountMergeResult retrieves the AccountMergeResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustAccountMergeResult() AccountMergeResult { + val, ok := u.GetAccountMergeResult() + + if !ok { + panic("arm AccountMergeResult is not set") + } + + return val } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u BucketEntry) SwitchFieldName() string { - return "Type" +// GetAccountMergeResult retrieves the AccountMergeResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetAccountMergeResult() (result AccountMergeResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "AccountMergeResult" { + result = *u.AccountMergeResult + ok = true + } + + return +} + +// MustInflationResult retrieves the InflationResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustInflationResult() InflationResult { + val, ok := u.GetInflationResult() + + if !ok { + panic("arm InflationResult is not set") + } + + return val +} + +// GetInflationResult retrieves the InflationResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetInflationResult() (result InflationResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "InflationResult" { + result = *u.InflationResult + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of BucketEntry -func (u BucketEntry) ArmForSwitch(sw int32) (string, bool) { - switch BucketEntryType(sw) { - case BucketEntryTypeLiveentry: - return "LiveEntry", true - case BucketEntryTypeDeadentry: - return "DeadEntry", true +// MustManageDataResult retrieves the ManageDataResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustManageDataResult() ManageDataResult { + val, ok := u.GetManageDataResult() + + if !ok { + panic("arm ManageDataResult is not set") } - return "-", false + + return val } -// NewBucketEntry creates a new BucketEntry. -func NewBucketEntry(aType BucketEntryType, value interface{}) (result BucketEntry, err error) { - result.Type = aType - switch BucketEntryType(aType) { - case BucketEntryTypeLiveentry: - tv, ok := value.(LedgerEntry) - if !ok { - err = fmt.Errorf("invalid value, must be LedgerEntry") - return - } - result.LiveEntry = &tv - case BucketEntryTypeDeadentry: - tv, ok := value.(LedgerKey) - if !ok { - err = fmt.Errorf("invalid value, must be LedgerKey") - return - } - result.DeadEntry = &tv +// GetManageDataResult retrieves the ManageDataResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetManageDataResult() (result ManageDataResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ManageDataResult" { + result = *u.ManageDataResult + ok = true } + return } -// MustLiveEntry retrieves the LiveEntry value from the union, +// MustBumpSeqResult retrieves the BumpSeqResult value from the union, // panicing if the value is not set. -func (u BucketEntry) MustLiveEntry() LedgerEntry { - val, ok := u.GetLiveEntry() +func (u OperationResultTr) MustBumpSeqResult() BumpSequenceResult { + val, ok := u.GetBumpSeqResult() if !ok { - panic("arm LiveEntry is not set") + panic("arm BumpSeqResult is not set") } return val } -// GetLiveEntry retrieves the LiveEntry value from the union, +// GetBumpSeqResult retrieves the BumpSeqResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u BucketEntry) GetLiveEntry() (result LedgerEntry, ok bool) { +func (u OperationResultTr) GetBumpSeqResult() (result BumpSequenceResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "LiveEntry" { - result = *u.LiveEntry + if armName == "BumpSeqResult" { + result = *u.BumpSeqResult ok = true } return } -// MustDeadEntry retrieves the DeadEntry value from the union, +// MustManageBuyOfferResult retrieves the ManageBuyOfferResult value from the union, // panicing if the value is not set. -func (u BucketEntry) MustDeadEntry() LedgerKey { - val, ok := u.GetDeadEntry() +func (u OperationResultTr) MustManageBuyOfferResult() ManageBuyOfferResult { + val, ok := u.GetManageBuyOfferResult() if !ok { - panic("arm DeadEntry is not set") + panic("arm ManageBuyOfferResult is not set") } return val } -// GetDeadEntry retrieves the DeadEntry value from the union, +// GetManageBuyOfferResult retrieves the ManageBuyOfferResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u BucketEntry) GetDeadEntry() (result LedgerKey, ok bool) { +func (u OperationResultTr) GetManageBuyOfferResult() (result ManageBuyOfferResult, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "DeadEntry" { - result = *u.DeadEntry + if armName == "ManageBuyOfferResult" { + result = *u.ManageBuyOfferResult ok = true } return } -// MaxTxPerLedger is an XDR Const defines as: -// -// const MAX_TX_PER_LEDGER = 5000; -// -const MaxTxPerLedger = 5000 +// MustPathPaymentStrictSendResult retrieves the PathPaymentStrictSendResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustPathPaymentStrictSendResult() PathPaymentStrictSendResult { + val, ok := u.GetPathPaymentStrictSendResult() -// TransactionSet is an XDR Struct defines as: -// -// struct TransactionSet -// { -// Hash previousLedgerHash; -// TransactionEnvelope txs; -// }; -// -type TransactionSet struct { - PreviousLedgerHash Hash - Txs []TransactionEnvelope `xdrmaxsize:"5000"` -} + if !ok { + panic("arm PathPaymentStrictSendResult is not set") + } -// TransactionResultPair is an XDR Struct defines as: -// -// struct TransactionResultPair -// { -// Hash transactionHash; -// TransactionResult result; // result for the transaction -// }; -// -type TransactionResultPair struct { - TransactionHash Hash - Result TransactionResult + return val } -// TransactionResultSet is an XDR Struct defines as: -// -// struct TransactionResultSet -// { -// TransactionResultPair results; -// }; -// -type TransactionResultSet struct { - Results []TransactionResultPair `xdrmaxsize:"5000"` +// GetPathPaymentStrictSendResult retrieves the PathPaymentStrictSendResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetPathPaymentStrictSendResult() (result PathPaymentStrictSendResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "PathPaymentStrictSendResult" { + result = *u.PathPaymentStrictSendResult + ok = true + } + + return } -// TransactionHistoryEntryExt is an XDR NestedUnion defines as: -// -// union switch (int v) -// { -// case 0: -// void; -// } -// -type TransactionHistoryEntryExt struct { - V int32 +// MustCreateClaimableBalanceResult retrieves the CreateClaimableBalanceResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustCreateClaimableBalanceResult() CreateClaimableBalanceResult { + val, ok := u.GetCreateClaimableBalanceResult() + + if !ok { + panic("arm CreateClaimableBalanceResult is not set") + } + + return val } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u TransactionHistoryEntryExt) SwitchFieldName() string { - return "V" +// GetCreateClaimableBalanceResult retrieves the CreateClaimableBalanceResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetCreateClaimableBalanceResult() (result CreateClaimableBalanceResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "CreateClaimableBalanceResult" { + result = *u.CreateClaimableBalanceResult + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionHistoryEntryExt -func (u TransactionHistoryEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// MustClaimClaimableBalanceResult retrieves the ClaimClaimableBalanceResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustClaimClaimableBalanceResult() ClaimClaimableBalanceResult { + val, ok := u.GetClaimClaimableBalanceResult() + + if !ok { + panic("arm ClaimClaimableBalanceResult is not set") } - return "-", false + + return val } -// NewTransactionHistoryEntryExt creates a new TransactionHistoryEntryExt. -func NewTransactionHistoryEntryExt(v int32, value interface{}) (result TransactionHistoryEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +// GetClaimClaimableBalanceResult retrieves the ClaimClaimableBalanceResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetClaimClaimableBalanceResult() (result ClaimClaimableBalanceResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClaimClaimableBalanceResult" { + result = *u.ClaimClaimableBalanceResult + ok = true } + return } -// TransactionHistoryEntry is an XDR Struct defines as: -// -// struct TransactionHistoryEntry -// { -// uint32 ledgerSeq; -// TransactionSet txSet; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type TransactionHistoryEntry struct { - LedgerSeq Uint32 - TxSet TransactionSet - Ext TransactionHistoryEntryExt -} +// MustBeginSponsoringFutureReservesResult retrieves the BeginSponsoringFutureReservesResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustBeginSponsoringFutureReservesResult() BeginSponsoringFutureReservesResult { + val, ok := u.GetBeginSponsoringFutureReservesResult() -// TransactionHistoryResultEntryExt is an XDR NestedUnion defines as: -// -// union switch (int v) -// { -// case 0: -// void; -// } -// -type TransactionHistoryResultEntryExt struct { - V int32 + if !ok { + panic("arm BeginSponsoringFutureReservesResult is not set") + } + + return val } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u TransactionHistoryResultEntryExt) SwitchFieldName() string { - return "V" +// GetBeginSponsoringFutureReservesResult retrieves the BeginSponsoringFutureReservesResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetBeginSponsoringFutureReservesResult() (result BeginSponsoringFutureReservesResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "BeginSponsoringFutureReservesResult" { + result = *u.BeginSponsoringFutureReservesResult + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionHistoryResultEntryExt -func (u TransactionHistoryResultEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// MustEndSponsoringFutureReservesResult retrieves the EndSponsoringFutureReservesResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustEndSponsoringFutureReservesResult() EndSponsoringFutureReservesResult { + val, ok := u.GetEndSponsoringFutureReservesResult() + + if !ok { + panic("arm EndSponsoringFutureReservesResult is not set") } - return "-", false + + return val } -// NewTransactionHistoryResultEntryExt creates a new TransactionHistoryResultEntryExt. -func NewTransactionHistoryResultEntryExt(v int32, value interface{}) (result TransactionHistoryResultEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +// GetEndSponsoringFutureReservesResult retrieves the EndSponsoringFutureReservesResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetEndSponsoringFutureReservesResult() (result EndSponsoringFutureReservesResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "EndSponsoringFutureReservesResult" { + result = *u.EndSponsoringFutureReservesResult + ok = true } + return } -// TransactionHistoryResultEntry is an XDR Struct defines as: -// -// struct TransactionHistoryResultEntry -// { -// uint32 ledgerSeq; -// TransactionResultSet txResultSet; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type TransactionHistoryResultEntry struct { - LedgerSeq Uint32 - TxResultSet TransactionResultSet - Ext TransactionHistoryResultEntryExt +// MustRevokeSponsorshipResult retrieves the RevokeSponsorshipResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustRevokeSponsorshipResult() RevokeSponsorshipResult { + val, ok := u.GetRevokeSponsorshipResult() + + if !ok { + panic("arm RevokeSponsorshipResult is not set") + } + + return val } -// LedgerHeaderHistoryEntryExt is an XDR NestedUnion defines as: -// -// union switch (int v) -// { -// case 0: -// void; -// } -// -type LedgerHeaderHistoryEntryExt struct { - V int32 -} +// GetRevokeSponsorshipResult retrieves the RevokeSponsorshipResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetRevokeSponsorshipResult() (result RevokeSponsorshipResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u LedgerHeaderHistoryEntryExt) SwitchFieldName() string { - return "V" + if armName == "RevokeSponsorshipResult" { + result = *u.RevokeSponsorshipResult + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerHeaderHistoryEntryExt -func (u LedgerHeaderHistoryEntryExt) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "", true +// MustClawbackResult retrieves the ClawbackResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustClawbackResult() ClawbackResult { + val, ok := u.GetClawbackResult() + + if !ok { + panic("arm ClawbackResult is not set") } - return "-", false + + return val } -// NewLedgerHeaderHistoryEntryExt creates a new LedgerHeaderHistoryEntryExt. -func NewLedgerHeaderHistoryEntryExt(v int32, value interface{}) (result LedgerHeaderHistoryEntryExt, err error) { - result.V = v - switch int32(v) { - case 0: - // void +// GetClawbackResult retrieves the ClawbackResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetClawbackResult() (result ClawbackResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClawbackResult" { + result = *u.ClawbackResult + ok = true } + return } -// LedgerHeaderHistoryEntry is an XDR Struct defines as: -// -// struct LedgerHeaderHistoryEntry -// { -// Hash hash; -// LedgerHeader header; -// -// // reserved for future use -// union switch (int v) -// { -// case 0: -// void; -// } -// ext; -// }; -// -type LedgerHeaderHistoryEntry struct { - Hash Hash - Header LedgerHeader - Ext LedgerHeaderHistoryEntryExt -} +// MustClawbackClaimableBalanceResult retrieves the ClawbackClaimableBalanceResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustClawbackClaimableBalanceResult() ClawbackClaimableBalanceResult { + val, ok := u.GetClawbackClaimableBalanceResult() -// LedgerScpMessages is an XDR Struct defines as: -// -// struct LedgerSCPMessages -// { -// uint32 ledgerSeq; -// SCPEnvelope messages<>; -// }; -// -type LedgerScpMessages struct { - LedgerSeq Uint32 - Messages []ScpEnvelope + if !ok { + panic("arm ClawbackClaimableBalanceResult is not set") + } + + return val } -// ScpHistoryEntryV0 is an XDR Struct defines as: -// -// struct SCPHistoryEntryV0 -// { -// SCPQuorumSet quorumSets<>; // additional quorum sets used by ledgerMessages -// LedgerSCPMessages ledgerMessages; -// }; -// -type ScpHistoryEntryV0 struct { - QuorumSets []ScpQuorumSet - LedgerMessages LedgerScpMessages +// GetClawbackClaimableBalanceResult retrieves the ClawbackClaimableBalanceResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetClawbackClaimableBalanceResult() (result ClawbackClaimableBalanceResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "ClawbackClaimableBalanceResult" { + result = *u.ClawbackClaimableBalanceResult + ok = true + } + + return } -// ScpHistoryEntry is an XDR Union defines as: -// -// union SCPHistoryEntry switch (int v) -// { -// case 0: -// SCPHistoryEntryV0 v0; -// }; -// -type ScpHistoryEntry struct { - V int32 - V0 *ScpHistoryEntryV0 +// MustSetTrustLineFlagsResult retrieves the SetTrustLineFlagsResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustSetTrustLineFlagsResult() SetTrustLineFlagsResult { + val, ok := u.GetSetTrustLineFlagsResult() + + if !ok { + panic("arm SetTrustLineFlagsResult is not set") + } + + return val } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u ScpHistoryEntry) SwitchFieldName() string { - return "V" +// GetSetTrustLineFlagsResult retrieves the SetTrustLineFlagsResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetSetTrustLineFlagsResult() (result SetTrustLineFlagsResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "SetTrustLineFlagsResult" { + result = *u.SetTrustLineFlagsResult + ok = true + } + + return } -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of ScpHistoryEntry -func (u ScpHistoryEntry) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "V0", true +// MustLiquidityPoolDepositResult retrieves the LiquidityPoolDepositResult value from the union, +// panicing if the value is not set. +func (u OperationResultTr) MustLiquidityPoolDepositResult() LiquidityPoolDepositResult { + val, ok := u.GetLiquidityPoolDepositResult() + + if !ok { + panic("arm LiquidityPoolDepositResult is not set") } - return "-", false + + return val } -// NewScpHistoryEntry creates a new ScpHistoryEntry. -func NewScpHistoryEntry(v int32, value interface{}) (result ScpHistoryEntry, err error) { - result.V = v - switch int32(v) { - case 0: - tv, ok := value.(ScpHistoryEntryV0) - if !ok { - err = fmt.Errorf("invalid value, must be ScpHistoryEntryV0") - return - } - result.V0 = &tv +// GetLiquidityPoolDepositResult retrieves the LiquidityPoolDepositResult value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u OperationResultTr) GetLiquidityPoolDepositResult() (result LiquidityPoolDepositResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) + + if armName == "LiquidityPoolDepositResult" { + result = *u.LiquidityPoolDepositResult + ok = true } + return } -// MustV0 retrieves the V0 value from the union, +// MustLiquidityPoolWithdrawResult retrieves the LiquidityPoolWithdrawResult value from the union, // panicing if the value is not set. -func (u ScpHistoryEntry) MustV0() ScpHistoryEntryV0 { - val, ok := u.GetV0() +func (u OperationResultTr) MustLiquidityPoolWithdrawResult() LiquidityPoolWithdrawResult { + val, ok := u.GetLiquidityPoolWithdrawResult() if !ok { - panic("arm V0 is not set") + panic("arm LiquidityPoolWithdrawResult is not set") } return val } -// GetV0 retrieves the V0 value from the union, +// GetLiquidityPoolWithdrawResult retrieves the LiquidityPoolWithdrawResult value from the union, // returning ok if the union's switch indicated the value is valid. -func (u ScpHistoryEntry) GetV0() (result ScpHistoryEntryV0, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.V)) +func (u OperationResultTr) GetLiquidityPoolWithdrawResult() (result LiquidityPoolWithdrawResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "V0" { - result = *u.V0 + if armName == "LiquidityPoolWithdrawResult" { + result = *u.LiquidityPoolWithdrawResult ok = true } return } -// LedgerEntryChangeType is an XDR Enum defines as: -// -// enum LedgerEntryChangeType -// { -// LEDGER_ENTRY_CREATED = 0, // entry was added to the ledger -// LEDGER_ENTRY_UPDATED = 1, // entry was modified in the ledger -// LEDGER_ENTRY_REMOVED = 2, // entry was removed from the ledger -// LEDGER_ENTRY_STATE = 3 // value of the entry -// }; -// -type LedgerEntryChangeType int32 +// EncodeTo encodes this value using the Encoder. +func (u OperationResultTr) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch OperationType(u.Type) { + case OperationTypeCreateAccount: + if err = (*u.CreateAccountResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePayment: + if err = (*u.PaymentResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePathPaymentStrictReceive: + if err = (*u.PathPaymentStrictReceiveResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeManageSellOffer: + if err = (*u.ManageSellOfferResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeCreatePassiveSellOffer: + if err = (*u.CreatePassiveSellOfferResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeSetOptions: + if err = (*u.SetOptionsResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeChangeTrust: + if err = (*u.ChangeTrustResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeAllowTrust: + if err = (*u.AllowTrustResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeAccountMerge: + if err = (*u.AccountMergeResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeInflation: + if err = (*u.InflationResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeManageData: + if err = (*u.ManageDataResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeBumpSequence: + if err = (*u.BumpSeqResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeManageBuyOffer: + if err = (*u.ManageBuyOfferResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypePathPaymentStrictSend: + if err = (*u.PathPaymentStrictSendResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeCreateClaimableBalance: + if err = (*u.CreateClaimableBalanceResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClaimClaimableBalance: + if err = (*u.ClaimClaimableBalanceResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeBeginSponsoringFutureReserves: + if err = (*u.BeginSponsoringFutureReservesResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeEndSponsoringFutureReserves: + if err = (*u.EndSponsoringFutureReservesResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeRevokeSponsorship: + if err = (*u.RevokeSponsorshipResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClawback: + if err = (*u.ClawbackResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeClawbackClaimableBalance: + if err = (*u.ClawbackClaimableBalanceResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeSetTrustLineFlags: + if err = (*u.SetTrustLineFlagsResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeLiquidityPoolDeposit: + if err = (*u.LiquidityPoolDepositResult).EncodeTo(e); err != nil { + return err + } + return nil + case OperationTypeLiquidityPoolWithdraw: + if err = (*u.LiquidityPoolWithdrawResult).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (OperationType) switch value '%d' is not valid for union OperationResultTr", u.Type) +} -const ( - LedgerEntryChangeTypeLedgerEntryCreated LedgerEntryChangeType = 0 - LedgerEntryChangeTypeLedgerEntryUpdated LedgerEntryChangeType = 1 - LedgerEntryChangeTypeLedgerEntryRemoved LedgerEntryChangeType = 2 - LedgerEntryChangeTypeLedgerEntryState LedgerEntryChangeType = 3 -) +var _ decoderFrom = (*OperationResultTr)(nil) -var ledgerEntryChangeTypeMap = map[int32]string{ - 0: "LedgerEntryChangeTypeLedgerEntryCreated", - 1: "LedgerEntryChangeTypeLedgerEntryUpdated", - 2: "LedgerEntryChangeTypeLedgerEntryRemoved", - 3: "LedgerEntryChangeTypeLedgerEntryState", +// DecodeFrom decodes this value using the Decoder. +func (u *OperationResultTr) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationType: %s", err) + } + switch OperationType(u.Type) { + case OperationTypeCreateAccount: + u.CreateAccountResult = new(CreateAccountResult) + nTmp, err = (*u.CreateAccountResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateAccountResult: %s", err) + } + return n, nil + case OperationTypePayment: + u.PaymentResult = new(PaymentResult) + nTmp, err = (*u.PaymentResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PaymentResult: %s", err) + } + return n, nil + case OperationTypePathPaymentStrictReceive: + u.PathPaymentStrictReceiveResult = new(PathPaymentStrictReceiveResult) + nTmp, err = (*u.PathPaymentStrictReceiveResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictReceiveResult: %s", err) + } + return n, nil + case OperationTypeManageSellOffer: + u.ManageSellOfferResult = new(ManageSellOfferResult) + nTmp, err = (*u.ManageSellOfferResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageSellOfferResult: %s", err) + } + return n, nil + case OperationTypeCreatePassiveSellOffer: + u.CreatePassiveSellOfferResult = new(ManageSellOfferResult) + nTmp, err = (*u.CreatePassiveSellOfferResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageSellOfferResult: %s", err) + } + return n, nil + case OperationTypeSetOptions: + u.SetOptionsResult = new(SetOptionsResult) + nTmp, err = (*u.SetOptionsResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetOptionsResult: %s", err) + } + return n, nil + case OperationTypeChangeTrust: + u.ChangeTrustResult = new(ChangeTrustResult) + nTmp, err = (*u.ChangeTrustResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ChangeTrustResult: %s", err) + } + return n, nil + case OperationTypeAllowTrust: + u.AllowTrustResult = new(AllowTrustResult) + nTmp, err = (*u.AllowTrustResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AllowTrustResult: %s", err) + } + return n, nil + case OperationTypeAccountMerge: + u.AccountMergeResult = new(AccountMergeResult) + nTmp, err = (*u.AccountMergeResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding AccountMergeResult: %s", err) + } + return n, nil + case OperationTypeInflation: + u.InflationResult = new(InflationResult) + nTmp, err = (*u.InflationResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InflationResult: %s", err) + } + return n, nil + case OperationTypeManageData: + u.ManageDataResult = new(ManageDataResult) + nTmp, err = (*u.ManageDataResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageDataResult: %s", err) + } + return n, nil + case OperationTypeBumpSequence: + u.BumpSeqResult = new(BumpSequenceResult) + nTmp, err = (*u.BumpSeqResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BumpSequenceResult: %s", err) + } + return n, nil + case OperationTypeManageBuyOffer: + u.ManageBuyOfferResult = new(ManageBuyOfferResult) + nTmp, err = (*u.ManageBuyOfferResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ManageBuyOfferResult: %s", err) + } + return n, nil + case OperationTypePathPaymentStrictSend: + u.PathPaymentStrictSendResult = new(PathPaymentStrictSendResult) + nTmp, err = (*u.PathPaymentStrictSendResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PathPaymentStrictSendResult: %s", err) + } + return n, nil + case OperationTypeCreateClaimableBalance: + u.CreateClaimableBalanceResult = new(CreateClaimableBalanceResult) + nTmp, err = (*u.CreateClaimableBalanceResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding CreateClaimableBalanceResult: %s", err) + } + return n, nil + case OperationTypeClaimClaimableBalance: + u.ClaimClaimableBalanceResult = new(ClaimClaimableBalanceResult) + nTmp, err = (*u.ClaimClaimableBalanceResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClaimClaimableBalanceResult: %s", err) + } + return n, nil + case OperationTypeBeginSponsoringFutureReserves: + u.BeginSponsoringFutureReservesResult = new(BeginSponsoringFutureReservesResult) + nTmp, err = (*u.BeginSponsoringFutureReservesResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding BeginSponsoringFutureReservesResult: %s", err) + } + return n, nil + case OperationTypeEndSponsoringFutureReserves: + u.EndSponsoringFutureReservesResult = new(EndSponsoringFutureReservesResult) + nTmp, err = (*u.EndSponsoringFutureReservesResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding EndSponsoringFutureReservesResult: %s", err) + } + return n, nil + case OperationTypeRevokeSponsorship: + u.RevokeSponsorshipResult = new(RevokeSponsorshipResult) + nTmp, err = (*u.RevokeSponsorshipResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding RevokeSponsorshipResult: %s", err) + } + return n, nil + case OperationTypeClawback: + u.ClawbackResult = new(ClawbackResult) + nTmp, err = (*u.ClawbackResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackResult: %s", err) + } + return n, nil + case OperationTypeClawbackClaimableBalance: + u.ClawbackClaimableBalanceResult = new(ClawbackClaimableBalanceResult) + nTmp, err = (*u.ClawbackClaimableBalanceResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding ClawbackClaimableBalanceResult: %s", err) + } + return n, nil + case OperationTypeSetTrustLineFlags: + u.SetTrustLineFlagsResult = new(SetTrustLineFlagsResult) + nTmp, err = (*u.SetTrustLineFlagsResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SetTrustLineFlagsResult: %s", err) + } + return n, nil + case OperationTypeLiquidityPoolDeposit: + u.LiquidityPoolDepositResult = new(LiquidityPoolDepositResult) + nTmp, err = (*u.LiquidityPoolDepositResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolDepositResult: %s", err) + } + return n, nil + case OperationTypeLiquidityPoolWithdraw: + u.LiquidityPoolWithdrawResult = new(LiquidityPoolWithdrawResult) + nTmp, err = (*u.LiquidityPoolWithdrawResult).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding LiquidityPoolWithdrawResult: %s", err) + } + return n, nil + } + return n, fmt.Errorf("union OperationResultTr has invalid Type (OperationType) switch value '%d'", u.Type) } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for LedgerEntryChangeType -func (e LedgerEntryChangeType) ValidEnum(v int32) bool { - _, ok := ledgerEntryChangeTypeMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationResultTr) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e LedgerEntryChangeType) String() string { - name, _ := ledgerEntryChangeTypeMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationResultTr) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// LedgerEntryChange is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*OperationResultTr)(nil) + _ encoding.BinaryUnmarshaler = (*OperationResultTr)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationResultTr) xdrType() {} + +var _ xdrType = (*OperationResultTr)(nil) + +// OperationResult is an XDR Union defines as: // -// union LedgerEntryChange switch (LedgerEntryChangeType type) +// union OperationResult switch (OperationResultCode code) // { -// case LEDGER_ENTRY_CREATED: -// LedgerEntry created; -// case LEDGER_ENTRY_UPDATED: -// LedgerEntry updated; -// case LEDGER_ENTRY_REMOVED: -// LedgerKey removed; -// case LEDGER_ENTRY_STATE: -// LedgerEntry state; +// case opINNER: +// union switch (OperationType type) +// { +// case CREATE_ACCOUNT: +// CreateAccountResult createAccountResult; +// case PAYMENT: +// PaymentResult paymentResult; +// case PATH_PAYMENT_STRICT_RECEIVE: +// PathPaymentStrictReceiveResult pathPaymentStrictReceiveResult; +// case MANAGE_SELL_OFFER: +// ManageSellOfferResult manageSellOfferResult; +// case CREATE_PASSIVE_SELL_OFFER: +// ManageSellOfferResult createPassiveSellOfferResult; +// case SET_OPTIONS: +// SetOptionsResult setOptionsResult; +// case CHANGE_TRUST: +// ChangeTrustResult changeTrustResult; +// case ALLOW_TRUST: +// AllowTrustResult allowTrustResult; +// case ACCOUNT_MERGE: +// AccountMergeResult accountMergeResult; +// case INFLATION: +// InflationResult inflationResult; +// case MANAGE_DATA: +// ManageDataResult manageDataResult; +// case BUMP_SEQUENCE: +// BumpSequenceResult bumpSeqResult; +// case MANAGE_BUY_OFFER: +// ManageBuyOfferResult manageBuyOfferResult; +// case PATH_PAYMENT_STRICT_SEND: +// PathPaymentStrictSendResult pathPaymentStrictSendResult; +// case CREATE_CLAIMABLE_BALANCE: +// CreateClaimableBalanceResult createClaimableBalanceResult; +// case CLAIM_CLAIMABLE_BALANCE: +// ClaimClaimableBalanceResult claimClaimableBalanceResult; +// case BEGIN_SPONSORING_FUTURE_RESERVES: +// BeginSponsoringFutureReservesResult beginSponsoringFutureReservesResult; +// case END_SPONSORING_FUTURE_RESERVES: +// EndSponsoringFutureReservesResult endSponsoringFutureReservesResult; +// case REVOKE_SPONSORSHIP: +// RevokeSponsorshipResult revokeSponsorshipResult; +// case CLAWBACK: +// ClawbackResult clawbackResult; +// case CLAWBACK_CLAIMABLE_BALANCE: +// ClawbackClaimableBalanceResult clawbackClaimableBalanceResult; +// case SET_TRUST_LINE_FLAGS: +// SetTrustLineFlagsResult setTrustLineFlagsResult; +// case LIQUIDITY_POOL_DEPOSIT: +// LiquidityPoolDepositResult liquidityPoolDepositResult; +// case LIQUIDITY_POOL_WITHDRAW: +// LiquidityPoolWithdrawResult liquidityPoolWithdrawResult; +// } +// tr; +// default: +// void; // }; // -type LedgerEntryChange struct { - Type LedgerEntryChangeType - Created *LedgerEntry - Updated *LedgerEntry - Removed *LedgerKey - State *LedgerEntry +type OperationResult struct { + Code OperationResultCode + Tr *OperationResultTr } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u LedgerEntryChange) SwitchFieldName() string { - return "Type" +func (u OperationResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of LedgerEntryChange -func (u LedgerEntryChange) ArmForSwitch(sw int32) (string, bool) { - switch LedgerEntryChangeType(sw) { - case LedgerEntryChangeTypeLedgerEntryCreated: - return "Created", true - case LedgerEntryChangeTypeLedgerEntryUpdated: - return "Updated", true - case LedgerEntryChangeTypeLedgerEntryRemoved: - return "Removed", true - case LedgerEntryChangeTypeLedgerEntryState: - return "State", true +// the value for an instance of OperationResult +func (u OperationResult) ArmForSwitch(sw int32) (string, bool) { + switch OperationResultCode(sw) { + case OperationResultCodeOpInner: + return "Tr", true + default: + return "", true } - return "-", false } -// NewLedgerEntryChange creates a new LedgerEntryChange. -func NewLedgerEntryChange(aType LedgerEntryChangeType, value interface{}) (result LedgerEntryChange, err error) { - result.Type = aType - switch LedgerEntryChangeType(aType) { - case LedgerEntryChangeTypeLedgerEntryCreated: - tv, ok := value.(LedgerEntry) - if !ok { - err = fmt.Errorf("invalid value, must be LedgerEntry") - return - } - result.Created = &tv - case LedgerEntryChangeTypeLedgerEntryUpdated: - tv, ok := value.(LedgerEntry) - if !ok { - err = fmt.Errorf("invalid value, must be LedgerEntry") - return - } - result.Updated = &tv - case LedgerEntryChangeTypeLedgerEntryRemoved: - tv, ok := value.(LedgerKey) - if !ok { - err = fmt.Errorf("invalid value, must be LedgerKey") - return - } - result.Removed = &tv - case LedgerEntryChangeTypeLedgerEntryState: - tv, ok := value.(LedgerEntry) +// NewOperationResult creates a new OperationResult. +func NewOperationResult(code OperationResultCode, value interface{}) (result OperationResult, err error) { + result.Code = code + switch OperationResultCode(code) { + case OperationResultCodeOpInner: + tv, ok := value.(OperationResultTr) if !ok { - err = fmt.Errorf("invalid value, must be LedgerEntry") + err = fmt.Errorf("invalid value, must be OperationResultTr") return } - result.State = &tv + result.Tr = &tv + default: + // void } return } -// MustCreated retrieves the Created value from the union, +// MustTr retrieves the Tr value from the union, // panicing if the value is not set. -func (u LedgerEntryChange) MustCreated() LedgerEntry { - val, ok := u.GetCreated() +func (u OperationResult) MustTr() OperationResultTr { + val, ok := u.GetTr() if !ok { - panic("arm Created is not set") + panic("arm Tr is not set") } return val } -// GetCreated retrieves the Created value from the union, +// GetTr retrieves the Tr value from the union, // returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryChange) GetCreated() (result LedgerEntry, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +func (u OperationResult) GetTr() (result OperationResultTr, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "Created" { - result = *u.Created + if armName == "Tr" { + result = *u.Tr ok = true } return } -// MustUpdated retrieves the Updated value from the union, -// panicing if the value is not set. -func (u LedgerEntryChange) MustUpdated() LedgerEntry { - val, ok := u.GetUpdated() - - if !ok { - panic("arm Updated is not set") +// EncodeTo encodes this value using the Encoder. +func (u OperationResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch OperationResultCode(u.Code) { + case OperationResultCodeOpInner: + if err = (*u.Tr).EncodeTo(e); err != nil { + return err + } + return nil + default: + // Void + return nil } - - return val } -// GetUpdated retrieves the Updated value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryChange) GetUpdated() (result LedgerEntry, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*OperationResult)(nil) - if armName == "Updated" { - result = *u.Updated - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *OperationResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResultCode: %s", err) + } + switch OperationResultCode(u.Code) { + case OperationResultCodeOpInner: + u.Tr = new(OperationResultTr) + nTmp, err = (*u.Tr).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResultTr: %s", err) + } + return n, nil + default: + // Void + return n, nil } - - return } -// MustRemoved retrieves the Removed value from the union, -// panicing if the value is not set. -func (u LedgerEntryChange) MustRemoved() LedgerKey { - val, ok := u.GetRemoved() - - if !ok { - panic("arm Removed is not set") - } +// MarshalBinary implements encoding.BinaryMarshaler. +func (s OperationResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - return val +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *OperationResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// GetRemoved retrieves the Removed value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryChange) GetRemoved() (result LedgerKey, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var ( + _ encoding.BinaryMarshaler = (*OperationResult)(nil) + _ encoding.BinaryUnmarshaler = (*OperationResult)(nil) +) - if armName == "Removed" { - result = *u.Removed - ok = true - } +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s OperationResult) xdrType() {} - return -} +var _ xdrType = (*OperationResult)(nil) -// MustState retrieves the State value from the union, -// panicing if the value is not set. -func (u LedgerEntryChange) MustState() LedgerEntry { - val, ok := u.GetState() +// TransactionResultCode is an XDR Enum defines as: +// +// enum TransactionResultCode +// { +// txFEE_BUMP_INNER_SUCCESS = 1, // fee bump inner transaction succeeded +// txSUCCESS = 0, // all operations succeeded +// +// txFAILED = -1, // one of the operations failed (none were applied) +// +// txTOO_EARLY = -2, // ledger closeTime before minTime +// txTOO_LATE = -3, // ledger closeTime after maxTime +// txMISSING_OPERATION = -4, // no operation was specified +// txBAD_SEQ = -5, // sequence number does not match source account +// +// txBAD_AUTH = -6, // too few valid signatures / wrong network +// txINSUFFICIENT_BALANCE = -7, // fee would bring account below reserve +// txNO_ACCOUNT = -8, // source account not found +// txINSUFFICIENT_FEE = -9, // fee is too small +// txBAD_AUTH_EXTRA = -10, // unused signatures attached to transaction +// txINTERNAL_ERROR = -11, // an unknown error occurred +// +// txNOT_SUPPORTED = -12, // transaction type not supported +// txFEE_BUMP_INNER_FAILED = -13, // fee bump inner transaction failed +// txBAD_SPONSORSHIP = -14 // sponsorship not confirmed +// }; +// +type TransactionResultCode int32 - if !ok { - panic("arm State is not set") - } +const ( + TransactionResultCodeTxFeeBumpInnerSuccess TransactionResultCode = 1 + TransactionResultCodeTxSuccess TransactionResultCode = 0 + TransactionResultCodeTxFailed TransactionResultCode = -1 + TransactionResultCodeTxTooEarly TransactionResultCode = -2 + TransactionResultCodeTxTooLate TransactionResultCode = -3 + TransactionResultCodeTxMissingOperation TransactionResultCode = -4 + TransactionResultCodeTxBadSeq TransactionResultCode = -5 + TransactionResultCodeTxBadAuth TransactionResultCode = -6 + TransactionResultCodeTxInsufficientBalance TransactionResultCode = -7 + TransactionResultCodeTxNoAccount TransactionResultCode = -8 + TransactionResultCodeTxInsufficientFee TransactionResultCode = -9 + TransactionResultCodeTxBadAuthExtra TransactionResultCode = -10 + TransactionResultCodeTxInternalError TransactionResultCode = -11 + TransactionResultCodeTxNotSupported TransactionResultCode = -12 + TransactionResultCodeTxFeeBumpInnerFailed TransactionResultCode = -13 + TransactionResultCodeTxBadSponsorship TransactionResultCode = -14 +) - return val +var transactionResultCodeMap = map[int32]string{ + 1: "TransactionResultCodeTxFeeBumpInnerSuccess", + 0: "TransactionResultCodeTxSuccess", + -1: "TransactionResultCodeTxFailed", + -2: "TransactionResultCodeTxTooEarly", + -3: "TransactionResultCodeTxTooLate", + -4: "TransactionResultCodeTxMissingOperation", + -5: "TransactionResultCodeTxBadSeq", + -6: "TransactionResultCodeTxBadAuth", + -7: "TransactionResultCodeTxInsufficientBalance", + -8: "TransactionResultCodeTxNoAccount", + -9: "TransactionResultCodeTxInsufficientFee", + -10: "TransactionResultCodeTxBadAuthExtra", + -11: "TransactionResultCodeTxInternalError", + -12: "TransactionResultCodeTxNotSupported", + -13: "TransactionResultCodeTxFeeBumpInnerFailed", + -14: "TransactionResultCodeTxBadSponsorship", } -// GetState retrieves the State value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u LedgerEntryChange) GetState() (result LedgerEntry, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for TransactionResultCode +func (e TransactionResultCode) ValidEnum(v int32) bool { + _, ok := transactionResultCodeMap[v] + return ok +} + +// String returns the name of `e` +func (e TransactionResultCode) String() string { + name, _ := transactionResultCodeMap[int32(e)] + return name +} - if armName == "State" { - result = *u.State - ok = true +// EncodeTo encodes this value using the Encoder. +func (e TransactionResultCode) EncodeTo(enc *xdr.Encoder) error { + if _, ok := transactionResultCodeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid TransactionResultCode enum value", e) } + _, err := enc.EncodeInt(int32(e)) + return err +} - return +var _ decoderFrom = (*TransactionResultCode)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (e *TransactionResultCode) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding TransactionResultCode: %s", err) + } + if _, ok := transactionResultCodeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid TransactionResultCode enum value", v) + } + *e = TransactionResultCode(v) + return n, nil } -// LedgerEntryChanges is an XDR Typedef defines as: -// -// typedef LedgerEntryChange LedgerEntryChanges<>; -// -type LedgerEntryChanges []LedgerEntryChange +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultCode) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} -// OperationMeta is an XDR Struct defines as: -// -// struct OperationMeta -// { -// LedgerEntryChanges changes; -// }; -// -type OperationMeta struct { - Changes LedgerEntryChanges +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultCode) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// TransactionMeta is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*TransactionResultCode)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultCode)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultCode) xdrType() {} + +var _ xdrType = (*TransactionResultCode)(nil) + +// InnerTransactionResultResult is an XDR NestedUnion defines as: // -// union TransactionMeta switch (int v) -// { -// case 0: -// OperationMeta operations<>; -// }; +// union switch (TransactionResultCode code) +// { +// // txFEE_BUMP_INNER_SUCCESS is not included +// case txSUCCESS: +// case txFAILED: +// OperationResult results<>; +// case txTOO_EARLY: +// case txTOO_LATE: +// case txMISSING_OPERATION: +// case txBAD_SEQ: +// case txBAD_AUTH: +// case txINSUFFICIENT_BALANCE: +// case txNO_ACCOUNT: +// case txINSUFFICIENT_FEE: +// case txBAD_AUTH_EXTRA: +// case txINTERNAL_ERROR: +// case txNOT_SUPPORTED: +// // txFEE_BUMP_INNER_FAILED is not included +// case txBAD_SPONSORSHIP: +// void; +// } // -type TransactionMeta struct { - V int32 - Operations *[]OperationMeta +type InnerTransactionResultResult struct { + Code TransactionResultCode + Results *[]OperationResult } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u TransactionMeta) SwitchFieldName() string { - return "V" +func (u InnerTransactionResultResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of TransactionMeta -func (u TransactionMeta) ArmForSwitch(sw int32) (string, bool) { - switch int32(sw) { - case 0: - return "Operations", true +// the value for an instance of InnerTransactionResultResult +func (u InnerTransactionResultResult) ArmForSwitch(sw int32) (string, bool) { + switch TransactionResultCode(sw) { + case TransactionResultCodeTxSuccess: + return "Results", true + case TransactionResultCodeTxFailed: + return "Results", true + case TransactionResultCodeTxTooEarly: + return "", true + case TransactionResultCodeTxTooLate: + return "", true + case TransactionResultCodeTxMissingOperation: + return "", true + case TransactionResultCodeTxBadSeq: + return "", true + case TransactionResultCodeTxBadAuth: + return "", true + case TransactionResultCodeTxInsufficientBalance: + return "", true + case TransactionResultCodeTxNoAccount: + return "", true + case TransactionResultCodeTxInsufficientFee: + return "", true + case TransactionResultCodeTxBadAuthExtra: + return "", true + case TransactionResultCodeTxInternalError: + return "", true + case TransactionResultCodeTxNotSupported: + return "", true + case TransactionResultCodeTxBadSponsorship: + return "", true } return "-", false } -// NewTransactionMeta creates a new TransactionMeta. -func NewTransactionMeta(v int32, value interface{}) (result TransactionMeta, err error) { - result.V = v - switch int32(v) { - case 0: - tv, ok := value.([]OperationMeta) +// NewInnerTransactionResultResult creates a new InnerTransactionResultResult. +func NewInnerTransactionResultResult(code TransactionResultCode, value interface{}) (result InnerTransactionResultResult, err error) { + result.Code = code + switch TransactionResultCode(code) { + case TransactionResultCodeTxSuccess: + tv, ok := value.([]OperationResult) if !ok { - err = fmt.Errorf("invalid value, must be []OperationMeta") + err = fmt.Errorf("invalid value, must be []OperationResult") return } - result.Operations = &tv + result.Results = &tv + case TransactionResultCodeTxFailed: + tv, ok := value.([]OperationResult) + if !ok { + err = fmt.Errorf("invalid value, must be []OperationResult") + return + } + result.Results = &tv + case TransactionResultCodeTxTooEarly: + // void + case TransactionResultCodeTxTooLate: + // void + case TransactionResultCodeTxMissingOperation: + // void + case TransactionResultCodeTxBadSeq: + // void + case TransactionResultCodeTxBadAuth: + // void + case TransactionResultCodeTxInsufficientBalance: + // void + case TransactionResultCodeTxNoAccount: + // void + case TransactionResultCodeTxInsufficientFee: + // void + case TransactionResultCodeTxBadAuthExtra: + // void + case TransactionResultCodeTxInternalError: + // void + case TransactionResultCodeTxNotSupported: + // void + case TransactionResultCodeTxBadSponsorship: + // void } return } -// MustOperations retrieves the Operations value from the union, +// MustResults retrieves the Results value from the union, // panicing if the value is not set. -func (u TransactionMeta) MustOperations() []OperationMeta { - val, ok := u.GetOperations() +func (u InnerTransactionResultResult) MustResults() []OperationResult { + val, ok := u.GetResults() if !ok { - panic("arm Operations is not set") + panic("arm Results is not set") } return val } -// GetOperations retrieves the Operations value from the union, +// GetResults retrieves the Results value from the union, // returning ok if the union's switch indicated the value is valid. -func (u TransactionMeta) GetOperations() (result []OperationMeta, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.V)) +func (u InnerTransactionResultResult) GetResults() (result []OperationResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) - if armName == "Operations" { - result = *u.Operations + if armName == "Results" { + result = *u.Results ok = true } return } -// ErrorCode is an XDR Enum defines as: -// -// enum ErrorCode -// { -// ERR_MISC = 0, // Unspecific error -// ERR_DATA = 1, // Malformed data -// ERR_CONF = 2, // Misconfiguration error -// ERR_AUTH = 3, // Authentication failure -// ERR_LOAD = 4 // System overloaded -// }; -// -type ErrorCode int32 - -const ( - ErrorCodeErrMisc ErrorCode = 0 - ErrorCodeErrData ErrorCode = 1 - ErrorCodeErrConf ErrorCode = 2 - ErrorCodeErrAuth ErrorCode = 3 - ErrorCodeErrLoad ErrorCode = 4 -) - -var errorCodeMap = map[int32]string{ - 0: "ErrorCodeErrMisc", - 1: "ErrorCodeErrData", - 2: "ErrorCodeErrConf", - 3: "ErrorCodeErrAuth", - 4: "ErrorCodeErrLoad", -} - -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ErrorCode -func (e ErrorCode) ValidEnum(v int32) bool { - _, ok := errorCodeMap[v] - return ok -} - -// String returns the name of `e` -func (e ErrorCode) String() string { - name, _ := errorCodeMap[int32(e)] - return name -} - -// Error is an XDR Struct defines as: -// -// struct Error -// { -// ErrorCode code; -// string msg<100>; -// }; -// -type Error struct { - Code ErrorCode - Msg string `xdrmaxsize:"100"` -} - -// AuthCert is an XDR Struct defines as: -// -// struct AuthCert -// { -// Curve25519Public pubkey; -// uint64 expiration; -// Signature sig; -// }; -// -type AuthCert struct { - Pubkey Curve25519Public - Expiration Uint64 - Sig Signature -} - -// Hello is an XDR Struct defines as: -// -// struct Hello -// { -// uint32 ledgerVersion; -// uint32 overlayVersion; -// uint32 overlayMinVersion; -// Hash networkID; -// string versionStr<100>; -// int listeningPort; -// NodeID peerID; -// AuthCert cert; -// uint256 nonce; -// }; -// -type Hello struct { - LedgerVersion Uint32 - OverlayVersion Uint32 - OverlayMinVersion Uint32 - NetworkId Hash - VersionStr string `xdrmaxsize:"100"` - ListeningPort int32 - PeerId NodeId - Cert AuthCert - Nonce Uint256 -} - -// Auth is an XDR Struct defines as: -// -// struct Auth -// { -// // Empty message, just to confirm -// // establishment of MAC keys. -// int unused; -// }; -// -type Auth struct { - Unused int32 +// EncodeTo encodes this value using the Encoder. +func (u InnerTransactionResultResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch TransactionResultCode(u.Code) { + case TransactionResultCodeTxSuccess: + if _, err = e.EncodeUint(uint32(len((*u.Results)))); err != nil { + return err + } + for i := 0; i < len((*u.Results)); i++ { + if err = (*u.Results)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case TransactionResultCodeTxFailed: + if _, err = e.EncodeUint(uint32(len((*u.Results)))); err != nil { + return err + } + for i := 0; i < len((*u.Results)); i++ { + if err = (*u.Results)[i].EncodeTo(e); err != nil { + return err + } + } + return nil + case TransactionResultCodeTxTooEarly: + // Void + return nil + case TransactionResultCodeTxTooLate: + // Void + return nil + case TransactionResultCodeTxMissingOperation: + // Void + return nil + case TransactionResultCodeTxBadSeq: + // Void + return nil + case TransactionResultCodeTxBadAuth: + // Void + return nil + case TransactionResultCodeTxInsufficientBalance: + // Void + return nil + case TransactionResultCodeTxNoAccount: + // Void + return nil + case TransactionResultCodeTxInsufficientFee: + // Void + return nil + case TransactionResultCodeTxBadAuthExtra: + // Void + return nil + case TransactionResultCodeTxInternalError: + // Void + return nil + case TransactionResultCodeTxNotSupported: + // Void + return nil + case TransactionResultCodeTxBadSponsorship: + // Void + return nil + } + return fmt.Errorf("Code (TransactionResultCode) switch value '%d' is not valid for union InnerTransactionResultResult", u.Code) } -// IpAddrType is an XDR Enum defines as: -// -// enum IPAddrType -// { -// IPv4 = 0, -// IPv6 = 1 -// }; -// -type IpAddrType int32 - -const ( - IpAddrTypeIPv4 IpAddrType = 0 - IpAddrTypeIPv6 IpAddrType = 1 -) +var _ decoderFrom = (*InnerTransactionResultResult)(nil) -var ipAddrTypeMap = map[int32]string{ - 0: "IpAddrTypeIPv4", - 1: "IpAddrTypeIPv6", +// DecodeFrom decodes this value using the Decoder. +func (u *InnerTransactionResultResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultCode: %s", err) + } + switch TransactionResultCode(u.Code) { + case TransactionResultCodeTxSuccess: + u.Results = new([]OperationResult) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + (*u.Results) = nil + if l > 0 { + (*u.Results) = make([]OperationResult, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Results)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + } + } + return n, nil + case TransactionResultCodeTxFailed: + u.Results = new([]OperationResult) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + (*u.Results) = nil + if l > 0 { + (*u.Results) = make([]OperationResult, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Results)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + } + } + return n, nil + case TransactionResultCodeTxTooEarly: + // Void + return n, nil + case TransactionResultCodeTxTooLate: + // Void + return n, nil + case TransactionResultCodeTxMissingOperation: + // Void + return n, nil + case TransactionResultCodeTxBadSeq: + // Void + return n, nil + case TransactionResultCodeTxBadAuth: + // Void + return n, nil + case TransactionResultCodeTxInsufficientBalance: + // Void + return n, nil + case TransactionResultCodeTxNoAccount: + // Void + return n, nil + case TransactionResultCodeTxInsufficientFee: + // Void + return n, nil + case TransactionResultCodeTxBadAuthExtra: + // Void + return n, nil + case TransactionResultCodeTxInternalError: + // Void + return n, nil + case TransactionResultCodeTxNotSupported: + // Void + return n, nil + case TransactionResultCodeTxBadSponsorship: + // Void + return n, nil + } + return n, fmt.Errorf("union InnerTransactionResultResult has invalid Code (TransactionResultCode) switch value '%d'", u.Code) } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for IpAddrType -func (e IpAddrType) ValidEnum(v int32) bool { - _, ok := ipAddrTypeMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InnerTransactionResultResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e IpAddrType) String() string { - name, _ := ipAddrTypeMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InnerTransactionResultResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// PeerAddressIp is an XDR NestedUnion defines as: +var ( + _ encoding.BinaryMarshaler = (*InnerTransactionResultResult)(nil) + _ encoding.BinaryUnmarshaler = (*InnerTransactionResultResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InnerTransactionResultResult) xdrType() {} + +var _ xdrType = (*InnerTransactionResultResult)(nil) + +// InnerTransactionResultExt is an XDR NestedUnion defines as: // -// union switch (IPAddrType type) +// union switch (int v) // { -// case IPv4: -// opaque ipv4[4]; -// case IPv6: -// opaque ipv6[16]; +// case 0: +// void; // } // -type PeerAddressIp struct { - Type IpAddrType - Ipv4 *[4]byte - Ipv6 *[16]byte +type InnerTransactionResultExt struct { + V int32 } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u PeerAddressIp) SwitchFieldName() string { - return "Type" +func (u InnerTransactionResultExt) SwitchFieldName() string { + return "V" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of PeerAddressIp -func (u PeerAddressIp) ArmForSwitch(sw int32) (string, bool) { - switch IpAddrType(sw) { - case IpAddrTypeIPv4: - return "Ipv4", true - case IpAddrTypeIPv6: - return "Ipv6", true +// the value for an instance of InnerTransactionResultExt +func (u InnerTransactionResultExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true } return "-", false } -// NewPeerAddressIp creates a new PeerAddressIp. -func NewPeerAddressIp(aType IpAddrType, value interface{}) (result PeerAddressIp, err error) { - result.Type = aType - switch IpAddrType(aType) { - case IpAddrTypeIPv4: - tv, ok := value.([4]byte) - if !ok { - err = fmt.Errorf("invalid value, must be [4]byte") - return - } - result.Ipv4 = &tv - case IpAddrTypeIPv6: - tv, ok := value.([16]byte) - if !ok { - err = fmt.Errorf("invalid value, must be [16]byte") - return - } - result.Ipv6 = &tv +// NewInnerTransactionResultExt creates a new InnerTransactionResultExt. +func NewInnerTransactionResultExt(v int32, value interface{}) (result InnerTransactionResultExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void } return } -// MustIpv4 retrieves the Ipv4 value from the union, -// panicing if the value is not set. -func (u PeerAddressIp) MustIpv4() [4]byte { - val, ok := u.GetIpv4() - - if !ok { - panic("arm Ipv4 is not set") +// EncodeTo encodes this value using the Encoder. +func (u InnerTransactionResultExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err } - - return val + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union InnerTransactionResultExt", u.V) } -// GetIpv4 retrieves the Ipv4 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u PeerAddressIp) GetIpv4() (result [4]byte, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*InnerTransactionResultExt)(nil) - if armName == "Ipv4" { - result = *u.Ipv4 - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *InnerTransactionResultExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) } - - return + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union InnerTransactionResultExt has invalid V (int32) switch value '%d'", u.V) } -// MustIpv6 retrieves the Ipv6 value from the union, -// panicing if the value is not set. -func (u PeerAddressIp) MustIpv6() [16]byte { - val, ok := u.GetIpv6() - - if !ok { - panic("arm Ipv6 is not set") - } +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InnerTransactionResultExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - return val +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InnerTransactionResultExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// GetIpv6 retrieves the Ipv6 value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u PeerAddressIp) GetIpv6() (result [16]byte, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var ( + _ encoding.BinaryMarshaler = (*InnerTransactionResultExt)(nil) + _ encoding.BinaryUnmarshaler = (*InnerTransactionResultExt)(nil) +) - if armName == "Ipv6" { - result = *u.Ipv6 - ok = true - } +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InnerTransactionResultExt) xdrType() {} - return -} +var _ xdrType = (*InnerTransactionResultExt)(nil) -// PeerAddress is an XDR Struct defines as: +// InnerTransactionResult is an XDR Struct defines as: // -// struct PeerAddress +// struct InnerTransactionResult // { -// union switch (IPAddrType type) +// // Always 0. Here for binary compatibility. +// int64 feeCharged; +// +// union switch (TransactionResultCode code) // { -// case IPv4: -// opaque ipv4[4]; -// case IPv6: -// opaque ipv6[16]; +// // txFEE_BUMP_INNER_SUCCESS is not included +// case txSUCCESS: +// case txFAILED: +// OperationResult results<>; +// case txTOO_EARLY: +// case txTOO_LATE: +// case txMISSING_OPERATION: +// case txBAD_SEQ: +// case txBAD_AUTH: +// case txINSUFFICIENT_BALANCE: +// case txNO_ACCOUNT: +// case txINSUFFICIENT_FEE: +// case txBAD_AUTH_EXTRA: +// case txINTERNAL_ERROR: +// case txNOT_SUPPORTED: +// // txFEE_BUMP_INNER_FAILED is not included +// case txBAD_SPONSORSHIP: +// void; // } -// ip; -// uint32 port; -// uint32 numFailures; +// result; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; // }; // -type PeerAddress struct { - Ip PeerAddressIp - Port Uint32 - NumFailures Uint32 +type InnerTransactionResult struct { + FeeCharged Int64 + Result InnerTransactionResultResult + Ext InnerTransactionResultExt } -// MessageType is an XDR Enum defines as: -// -// enum MessageType -// { -// ERROR_MSG = 0, -// AUTH = 2, -// DONT_HAVE = 3, -// -// GET_PEERS = 4, // gets a list of peers this guy knows about -// PEERS = 5, -// -// GET_TX_SET = 6, // gets a particular txset by hash -// TX_SET = 7, -// -// TRANSACTION = 8, // pass on a tx you have heard about -// -// // SCP -// GET_SCP_QUORUMSET = 9, -// SCP_QUORUMSET = 10, -// SCP_MESSAGE = 11, -// GET_SCP_STATE = 12, -// -// // new messages -// HELLO = 13 -// }; -// -type MessageType int32 +// EncodeTo encodes this value using the Encoder. +func (s *InnerTransactionResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.FeeCharged.EncodeTo(e); err != nil { + return err + } + if err = s.Result.EncodeTo(e); err != nil { + return err + } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} -const ( - MessageTypeErrorMsg MessageType = 0 - MessageTypeAuth MessageType = 2 - MessageTypeDontHave MessageType = 3 - MessageTypeGetPeers MessageType = 4 - MessageTypePeers MessageType = 5 - MessageTypeGetTxSet MessageType = 6 - MessageTypeTxSet MessageType = 7 - MessageTypeTransaction MessageType = 8 - MessageTypeGetScpQuorumset MessageType = 9 - MessageTypeScpQuorumset MessageType = 10 - MessageTypeScpMessage MessageType = 11 - MessageTypeGetScpState MessageType = 12 - MessageTypeHello MessageType = 13 -) +var _ decoderFrom = (*InnerTransactionResult)(nil) -var messageTypeMap = map[int32]string{ - 0: "MessageTypeErrorMsg", - 2: "MessageTypeAuth", - 3: "MessageTypeDontHave", - 4: "MessageTypeGetPeers", - 5: "MessageTypePeers", - 6: "MessageTypeGetTxSet", - 7: "MessageTypeTxSet", - 8: "MessageTypeTransaction", - 9: "MessageTypeGetScpQuorumset", - 10: "MessageTypeScpQuorumset", - 11: "MessageTypeScpMessage", - 12: "MessageTypeGetScpState", - 13: "MessageTypeHello", +// DecodeFrom decodes this value using the Decoder. +func (s *InnerTransactionResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.FeeCharged.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Result.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InnerTransactionResultResult: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InnerTransactionResultExt: %s", err) + } + return n, nil } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for MessageType -func (e MessageType) ValidEnum(v int32) bool { - _, ok := messageTypeMap[v] - return ok +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InnerTransactionResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// String returns the name of `e` -func (e MessageType) String() string { - name, _ := messageTypeMap[int32(e)] - return name +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InnerTransactionResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// DontHave is an XDR Struct defines as: -// -// struct DontHave -// { -// MessageType type; -// uint256 reqHash; -// }; -// -type DontHave struct { - Type MessageType - ReqHash Uint256 -} +var ( + _ encoding.BinaryMarshaler = (*InnerTransactionResult)(nil) + _ encoding.BinaryUnmarshaler = (*InnerTransactionResult)(nil) +) -// StellarMessage is an XDR Union defines as: -// -// union StellarMessage switch (MessageType type) -// { -// case ERROR_MSG: -// Error error; -// case HELLO: -// Hello hello; -// case AUTH: -// Auth auth; -// case DONT_HAVE: -// DontHave dontHave; -// case GET_PEERS: -// void; -// case PEERS: -// PeerAddress peers<>; -// -// case GET_TX_SET: -// uint256 txSetHash; -// case TX_SET: -// TransactionSet txSet; -// -// case TRANSACTION: -// TransactionEnvelope transaction; -// -// // SCP -// case GET_SCP_QUORUMSET: -// uint256 qSetHash; -// case SCP_QUORUMSET: -// SCPQuorumSet qSet; -// case SCP_MESSAGE: -// SCPEnvelope envelope; -// case GET_SCP_STATE: -// uint32 getSCPLedgerSeq; // ledger seq requested ; if 0, requests the latest +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InnerTransactionResult) xdrType() {} + +var _ xdrType = (*InnerTransactionResult)(nil) + +// InnerTransactionResultPair is an XDR Struct defines as: +// +// struct InnerTransactionResultPair +// { +// Hash transactionHash; // hash of the inner transaction +// InnerTransactionResult result; // result for the inner transaction // }; // -type StellarMessage struct { - Type MessageType - Error *Error - Hello *Hello - Auth *Auth - DontHave *DontHave - Peers *[]PeerAddress - TxSetHash *Uint256 - TxSet *TransactionSet - Transaction *TransactionEnvelope - QSetHash *Uint256 - QSet *ScpQuorumSet - Envelope *ScpEnvelope - GetScpLedgerSeq *Uint32 +type InnerTransactionResultPair struct { + TransactionHash Hash + Result InnerTransactionResult +} + +// EncodeTo encodes this value using the Encoder. +func (s *InnerTransactionResultPair) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.TransactionHash.EncodeTo(e); err != nil { + return err + } + if err = s.Result.EncodeTo(e); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*InnerTransactionResultPair)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *InnerTransactionResultPair) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.TransactionHash.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + nTmp, err = s.Result.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InnerTransactionResult: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s InnerTransactionResultPair) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *InnerTransactionResultPair) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*InnerTransactionResultPair)(nil) + _ encoding.BinaryUnmarshaler = (*InnerTransactionResultPair)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s InnerTransactionResultPair) xdrType() {} + +var _ xdrType = (*InnerTransactionResultPair)(nil) + +// TransactionResultResult is an XDR NestedUnion defines as: +// +// union switch (TransactionResultCode code) +// { +// case txFEE_BUMP_INNER_SUCCESS: +// case txFEE_BUMP_INNER_FAILED: +// InnerTransactionResultPair innerResultPair; +// case txSUCCESS: +// case txFAILED: +// OperationResult results<>; +// default: +// void; +// } +// +type TransactionResultResult struct { + Code TransactionResultCode + InnerResultPair *InnerTransactionResultPair + Results *[]OperationResult } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u StellarMessage) SwitchFieldName() string { - return "Type" +func (u TransactionResultResult) SwitchFieldName() string { + return "Code" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of StellarMessage -func (u StellarMessage) ArmForSwitch(sw int32) (string, bool) { - switch MessageType(sw) { - case MessageTypeErrorMsg: - return "Error", true - case MessageTypeHello: - return "Hello", true - case MessageTypeAuth: - return "Auth", true - case MessageTypeDontHave: - return "DontHave", true - case MessageTypeGetPeers: +// the value for an instance of TransactionResultResult +func (u TransactionResultResult) ArmForSwitch(sw int32) (string, bool) { + switch TransactionResultCode(sw) { + case TransactionResultCodeTxFeeBumpInnerSuccess: + return "InnerResultPair", true + case TransactionResultCodeTxFeeBumpInnerFailed: + return "InnerResultPair", true + case TransactionResultCodeTxSuccess: + return "Results", true + case TransactionResultCodeTxFailed: + return "Results", true + default: return "", true - case MessageTypePeers: - return "Peers", true - case MessageTypeGetTxSet: - return "TxSetHash", true - case MessageTypeTxSet: - return "TxSet", true - case MessageTypeTransaction: - return "Transaction", true - case MessageTypeGetScpQuorumset: - return "QSetHash", true - case MessageTypeScpQuorumset: - return "QSet", true - case MessageTypeScpMessage: - return "Envelope", true - case MessageTypeGetScpState: - return "GetScpLedgerSeq", true } - return "-", false } -// NewStellarMessage creates a new StellarMessage. -func NewStellarMessage(aType MessageType, value interface{}) (result StellarMessage, err error) { - result.Type = aType - switch MessageType(aType) { - case MessageTypeErrorMsg: - tv, ok := value.(Error) +// NewTransactionResultResult creates a new TransactionResultResult. +func NewTransactionResultResult(code TransactionResultCode, value interface{}) (result TransactionResultResult, err error) { + result.Code = code + switch TransactionResultCode(code) { + case TransactionResultCodeTxFeeBumpInnerSuccess: + tv, ok := value.(InnerTransactionResultPair) if !ok { - err = fmt.Errorf("invalid value, must be Error") + err = fmt.Errorf("invalid value, must be InnerTransactionResultPair") return } - result.Error = &tv - case MessageTypeHello: - tv, ok := value.(Hello) + result.InnerResultPair = &tv + case TransactionResultCodeTxFeeBumpInnerFailed: + tv, ok := value.(InnerTransactionResultPair) if !ok { - err = fmt.Errorf("invalid value, must be Hello") + err = fmt.Errorf("invalid value, must be InnerTransactionResultPair") return } - result.Hello = &tv - case MessageTypeAuth: - tv, ok := value.(Auth) + result.InnerResultPair = &tv + case TransactionResultCodeTxSuccess: + tv, ok := value.([]OperationResult) if !ok { - err = fmt.Errorf("invalid value, must be Auth") + err = fmt.Errorf("invalid value, must be []OperationResult") return } - result.Auth = &tv - case MessageTypeDontHave: - tv, ok := value.(DontHave) + result.Results = &tv + case TransactionResultCodeTxFailed: + tv, ok := value.([]OperationResult) if !ok { - err = fmt.Errorf("invalid value, must be DontHave") + err = fmt.Errorf("invalid value, must be []OperationResult") return } - result.DontHave = &tv - case MessageTypeGetPeers: + result.Results = &tv + default: // void - case MessageTypePeers: - tv, ok := value.([]PeerAddress) - if !ok { - err = fmt.Errorf("invalid value, must be []PeerAddress") - return + } + return +} + +// MustInnerResultPair retrieves the InnerResultPair value from the union, +// panicing if the value is not set. +func (u TransactionResultResult) MustInnerResultPair() InnerTransactionResultPair { + val, ok := u.GetInnerResultPair() + + if !ok { + panic("arm InnerResultPair is not set") + } + + return val +} + +// GetInnerResultPair retrieves the InnerResultPair value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionResultResult) GetInnerResultPair() (result InnerTransactionResultPair, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) + + if armName == "InnerResultPair" { + result = *u.InnerResultPair + ok = true + } + + return +} + +// MustResults retrieves the Results value from the union, +// panicing if the value is not set. +func (u TransactionResultResult) MustResults() []OperationResult { + val, ok := u.GetResults() + + if !ok { + panic("arm Results is not set") + } + + return val +} + +// GetResults retrieves the Results value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u TransactionResultResult) GetResults() (result []OperationResult, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Code)) + + if armName == "Results" { + result = *u.Results + ok = true + } + + return +} + +// EncodeTo encodes this value using the Encoder. +func (u TransactionResultResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Code.EncodeTo(e); err != nil { + return err + } + switch TransactionResultCode(u.Code) { + case TransactionResultCodeTxFeeBumpInnerSuccess: + if err = (*u.InnerResultPair).EncodeTo(e); err != nil { + return err } - result.Peers = &tv - case MessageTypeGetTxSet: - tv, ok := value.(Uint256) - if !ok { - err = fmt.Errorf("invalid value, must be Uint256") - return + return nil + case TransactionResultCodeTxFeeBumpInnerFailed: + if err = (*u.InnerResultPair).EncodeTo(e); err != nil { + return err } - result.TxSetHash = &tv - case MessageTypeTxSet: - tv, ok := value.(TransactionSet) - if !ok { - err = fmt.Errorf("invalid value, must be TransactionSet") - return + return nil + case TransactionResultCodeTxSuccess: + if _, err = e.EncodeUint(uint32(len((*u.Results)))); err != nil { + return err } - result.TxSet = &tv - case MessageTypeTransaction: - tv, ok := value.(TransactionEnvelope) - if !ok { - err = fmt.Errorf("invalid value, must be TransactionEnvelope") - return + for i := 0; i < len((*u.Results)); i++ { + if err = (*u.Results)[i].EncodeTo(e); err != nil { + return err + } } - result.Transaction = &tv - case MessageTypeGetScpQuorumset: - tv, ok := value.(Uint256) - if !ok { - err = fmt.Errorf("invalid value, must be Uint256") - return + return nil + case TransactionResultCodeTxFailed: + if _, err = e.EncodeUint(uint32(len((*u.Results)))); err != nil { + return err } - result.QSetHash = &tv - case MessageTypeScpQuorumset: - tv, ok := value.(ScpQuorumSet) - if !ok { - err = fmt.Errorf("invalid value, must be ScpQuorumSet") - return + for i := 0; i < len((*u.Results)); i++ { + if err = (*u.Results)[i].EncodeTo(e); err != nil { + return err + } } - result.QSet = &tv - case MessageTypeScpMessage: - tv, ok := value.(ScpEnvelope) - if !ok { - err = fmt.Errorf("invalid value, must be ScpEnvelope") - return + return nil + default: + // Void + return nil + } +} + +var _ decoderFrom = (*TransactionResultResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionResultResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Code.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultCode: %s", err) + } + switch TransactionResultCode(u.Code) { + case TransactionResultCodeTxFeeBumpInnerSuccess: + u.InnerResultPair = new(InnerTransactionResultPair) + nTmp, err = (*u.InnerResultPair).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InnerTransactionResultPair: %s", err) } - result.Envelope = &tv - case MessageTypeGetScpState: - tv, ok := value.(Uint32) - if !ok { - err = fmt.Errorf("invalid value, must be Uint32") - return + return n, nil + case TransactionResultCodeTxFeeBumpInnerFailed: + u.InnerResultPair = new(InnerTransactionResultPair) + nTmp, err = (*u.InnerResultPair).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding InnerTransactionResultPair: %s", err) } - result.GetScpLedgerSeq = &tv + return n, nil + case TransactionResultCodeTxSuccess: + u.Results = new([]OperationResult) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + (*u.Results) = nil + if l > 0 { + (*u.Results) = make([]OperationResult, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Results)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + } + } + return n, nil + case TransactionResultCodeTxFailed: + u.Results = new([]OperationResult) + var l uint32 + l, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + (*u.Results) = nil + if l > 0 { + (*u.Results) = make([]OperationResult, l) + for i := uint32(0); i < l; i++ { + nTmp, err = (*u.Results)[i].DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding OperationResult: %s", err) + } + } + } + return n, nil + default: + // Void + return n, nil + } +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionResultResult)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultResult) xdrType() {} + +var _ xdrType = (*TransactionResultResult)(nil) + +// TransactionResultExt is an XDR NestedUnion defines as: +// +// union switch (int v) +// { +// case 0: +// void; +// } +// +type TransactionResultExt struct { + V int32 +} + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u TransactionResultExt) SwitchFieldName() string { + return "V" +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of TransactionResultExt +func (u TransactionResultExt) ArmForSwitch(sw int32) (string, bool) { + switch int32(sw) { + case 0: + return "", true + } + return "-", false +} + +// NewTransactionResultExt creates a new TransactionResultExt. +func NewTransactionResultExt(v int32, value interface{}) (result TransactionResultExt, err error) { + result.V = v + switch int32(v) { + case 0: + // void } return } -// MustError retrieves the Error value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustError() Error { - val, ok := u.GetError() +// EncodeTo encodes this value using the Encoder. +func (u TransactionResultExt) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(u.V)); err != nil { + return err + } + switch int32(u.V) { + case 0: + // Void + return nil + } + return fmt.Errorf("V (int32) switch value '%d' is not valid for union TransactionResultExt", u.V) +} - if !ok { - panic("arm Error is not set") +var _ decoderFrom = (*TransactionResultExt)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (u *TransactionResultExt) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + u.V, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) } + switch int32(u.V) { + case 0: + // Void + return n, nil + } + return n, fmt.Errorf("union TransactionResultExt has invalid V (int32) switch value '%d'", u.V) +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResultExt) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetError retrieves the Error value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetError() (result Error, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResultExt) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Error" { - result = *u.Error - ok = true +var ( + _ encoding.BinaryMarshaler = (*TransactionResultExt)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResultExt)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResultExt) xdrType() {} + +var _ xdrType = (*TransactionResultExt)(nil) + +// TransactionResult is an XDR Struct defines as: +// +// struct TransactionResult +// { +// int64 feeCharged; // actual fee charged for the transaction +// +// union switch (TransactionResultCode code) +// { +// case txFEE_BUMP_INNER_SUCCESS: +// case txFEE_BUMP_INNER_FAILED: +// InnerTransactionResultPair innerResultPair; +// case txSUCCESS: +// case txFAILED: +// OperationResult results<>; +// default: +// void; +// } +// result; +// +// // reserved for future use +// union switch (int v) +// { +// case 0: +// void; +// } +// ext; +// }; +// +type TransactionResult struct { + FeeCharged Int64 + Result TransactionResultResult + Ext TransactionResultExt +} + +// EncodeTo encodes this value using the Encoder. +func (s *TransactionResult) EncodeTo(e *xdr.Encoder) error { + var err error + if err = s.FeeCharged.EncodeTo(e); err != nil { + return err + } + if err = s.Result.EncodeTo(e); err != nil { + return err } + if err = s.Ext.EncodeTo(e); err != nil { + return err + } + return nil +} - return +var _ decoderFrom = (*TransactionResult)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *TransactionResult) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = s.FeeCharged.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int64: %s", err) + } + nTmp, err = s.Result.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultResult: %s", err) + } + nTmp, err = s.Ext.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding TransactionResultExt: %s", err) + } + return n, nil } -// MustHello retrieves the Hello value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustHello() Hello { - val, ok := u.GetHello() +// MarshalBinary implements encoding.BinaryMarshaler. +func (s TransactionResult) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if !ok { - panic("arm Hello is not set") +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *TransactionResult) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*TransactionResult)(nil) + _ encoding.BinaryUnmarshaler = (*TransactionResult)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s TransactionResult) xdrType() {} + +var _ xdrType = (*TransactionResult)(nil) + +// Hash is an XDR Typedef defines as: +// +// typedef opaque Hash[32]; +// +type Hash [32]byte + +// XDRMaxSize implements the Sized interface for Hash +func (e Hash) XDRMaxSize() int { + return 32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Hash) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err } + return nil +} - return val +var _ decoderFrom = (*Hash)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Hash) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hash: %s", err) + } + return n, nil } -// GetHello retrieves the Hello value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetHello() (result Hello, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Hash) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if armName == "Hello" { - result = *u.Hello - ok = true +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Hash) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Hash)(nil) + _ encoding.BinaryUnmarshaler = (*Hash)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Hash) xdrType() {} + +var _ xdrType = (*Hash)(nil) + +// Uint256 is an XDR Typedef defines as: +// +// typedef opaque uint256[32]; +// +type Uint256 [32]byte + +// XDRMaxSize implements the Sized interface for Uint256 +func (e Uint256) XDRMaxSize() int { + return 32 +} + +// EncodeTo encodes this value using the Encoder. +func (s *Uint256) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err } + return nil +} - return +var _ decoderFrom = (*Uint256)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Uint256) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil } -// MustAuth retrieves the Auth value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustAuth() Auth { - val, ok := u.GetAuth() +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Uint256) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if !ok { - panic("arm Auth is not set") +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Uint256) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Uint256)(nil) + _ encoding.BinaryUnmarshaler = (*Uint256)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Uint256) xdrType() {} + +var _ xdrType = (*Uint256)(nil) + +// Uint32 is an XDR Typedef defines as: +// +// typedef unsigned int uint32; +// +type Uint32 uint32 + +// EncodeTo encodes this value using the Encoder. +func (s Uint32) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUint(uint32(s)); err != nil { + return err } + return nil +} - return val +var _ decoderFrom = (*Uint32)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Uint32) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v uint32 + v, nTmp, err = d.DecodeUint() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Unsigned int: %s", err) + } + *s = Uint32(v) + return n, nil } -// GetAuth retrieves the Auth value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetAuth() (result Auth, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Uint32) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if armName == "Auth" { - result = *u.Auth - ok = true +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Uint32) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Uint32)(nil) + _ encoding.BinaryUnmarshaler = (*Uint32)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Uint32) xdrType() {} + +var _ xdrType = (*Uint32)(nil) + +// Int32 is an XDR Typedef defines as: +// +// typedef int int32; +// +type Int32 int32 + +// EncodeTo encodes this value using the Encoder. +func (s Int32) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeInt(int32(s)); err != nil { + return err } + return nil +} - return +var _ decoderFrom = (*Int32)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Int32) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v int32 + v, nTmp, err = d.DecodeInt() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Int: %s", err) + } + *s = Int32(v) + return n, nil } -// MustDontHave retrieves the DontHave value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustDontHave() DontHave { - val, ok := u.GetDontHave() +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Int32) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if !ok { - panic("arm DontHave is not set") +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Int32) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Int32)(nil) + _ encoding.BinaryUnmarshaler = (*Int32)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Int32) xdrType() {} + +var _ xdrType = (*Int32)(nil) + +// Uint64 is an XDR Typedef defines as: +// +// typedef unsigned hyper uint64; +// +type Uint64 uint64 + +// EncodeTo encodes this value using the Encoder. +func (s Uint64) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeUhyper(uint64(s)); err != nil { + return err } + return nil +} - return val +var _ decoderFrom = (*Uint64)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Uint64) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v uint64 + v, nTmp, err = d.DecodeUhyper() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Unsigned hyper: %s", err) + } + *s = Uint64(v) + return n, nil } -// GetDontHave retrieves the DontHave value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetDontHave() (result DontHave, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Uint64) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} - if armName == "DontHave" { - result = *u.DontHave - ok = true +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Uint64) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Uint64)(nil) + _ encoding.BinaryUnmarshaler = (*Uint64)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Uint64) xdrType() {} + +var _ xdrType = (*Uint64)(nil) + +// Int64 is an XDR Typedef defines as: +// +// typedef hyper int64; +// +type Int64 int64 + +// EncodeTo encodes this value using the Encoder. +func (s Int64) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeHyper(int64(s)); err != nil { + return err } + return nil +} - return +var _ decoderFrom = (*Int64)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Int64) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + var v int64 + v, nTmp, err = d.DecodeHyper() + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Hyper: %s", err) + } + *s = Int64(v) + return n, nil } -// MustPeers retrieves the Peers value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustPeers() []PeerAddress { - val, ok := u.GetPeers() +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Int64) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Int64) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Int64)(nil) + _ encoding.BinaryUnmarshaler = (*Int64)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Int64) xdrType() {} + +var _ xdrType = (*Int64)(nil) + +// CryptoKeyType is an XDR Enum defines as: +// +// enum CryptoKeyType +// { +// KEY_TYPE_ED25519 = 0, +// KEY_TYPE_PRE_AUTH_TX = 1, +// KEY_TYPE_HASH_X = 2, +// // MUXED enum values for supported type are derived from the enum values +// // above by ORing them with 0x100 +// KEY_TYPE_MUXED_ED25519 = 0x100 +// }; +// +type CryptoKeyType int32 + +const ( + CryptoKeyTypeKeyTypeEd25519 CryptoKeyType = 0 + CryptoKeyTypeKeyTypePreAuthTx CryptoKeyType = 1 + CryptoKeyTypeKeyTypeHashX CryptoKeyType = 2 + CryptoKeyTypeKeyTypeMuxedEd25519 CryptoKeyType = 256 +) + +var cryptoKeyTypeMap = map[int32]string{ + 0: "CryptoKeyTypeKeyTypeEd25519", + 1: "CryptoKeyTypeKeyTypePreAuthTx", + 2: "CryptoKeyTypeKeyTypeHashX", + 256: "CryptoKeyTypeKeyTypeMuxedEd25519", +} + +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for CryptoKeyType +func (e CryptoKeyType) ValidEnum(v int32) bool { + _, ok := cryptoKeyTypeMap[v] + return ok +} + +// String returns the name of `e` +func (e CryptoKeyType) String() string { + name, _ := cryptoKeyTypeMap[int32(e)] + return name +} - if !ok { - panic("arm Peers is not set") +// EncodeTo encodes this value using the Encoder. +func (e CryptoKeyType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := cryptoKeyTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid CryptoKeyType enum value", e) } - - return val + _, err := enc.EncodeInt(int32(e)) + return err } -// GetPeers retrieves the Peers value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetPeers() (result []PeerAddress, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*CryptoKeyType)(nil) - if armName == "Peers" { - result = *u.Peers - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *CryptoKeyType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding CryptoKeyType: %s", err) + } + if _, ok := cryptoKeyTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid CryptoKeyType enum value", v) } + *e = CryptoKeyType(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s CryptoKeyType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustTxSetHash retrieves the TxSetHash value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustTxSetHash() Uint256 { - val, ok := u.GetTxSetHash() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *CryptoKeyType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm TxSetHash is not set") - } +var ( + _ encoding.BinaryMarshaler = (*CryptoKeyType)(nil) + _ encoding.BinaryUnmarshaler = (*CryptoKeyType)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s CryptoKeyType) xdrType() {} -// GetTxSetHash retrieves the TxSetHash value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetTxSetHash() (result Uint256, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*CryptoKeyType)(nil) - if armName == "TxSetHash" { - result = *u.TxSetHash - ok = true - } +// PublicKeyType is an XDR Enum defines as: +// +// enum PublicKeyType +// { +// PUBLIC_KEY_TYPE_ED25519 = KEY_TYPE_ED25519 +// }; +// +type PublicKeyType int32 - return +const ( + PublicKeyTypePublicKeyTypeEd25519 PublicKeyType = 0 +) + +var publicKeyTypeMap = map[int32]string{ + 0: "PublicKeyTypePublicKeyTypeEd25519", } -// MustTxSet retrieves the TxSet value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustTxSet() TransactionSet { - val, ok := u.GetTxSet() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for PublicKeyType +func (e PublicKeyType) ValidEnum(v int32) bool { + _, ok := publicKeyTypeMap[v] + return ok +} - if !ok { - panic("arm TxSet is not set") - } +// String returns the name of `e` +func (e PublicKeyType) String() string { + name, _ := publicKeyTypeMap[int32(e)] + return name +} - return val +// EncodeTo encodes this value using the Encoder. +func (e PublicKeyType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := publicKeyTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid PublicKeyType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetTxSet retrieves the TxSet value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetTxSet() (result TransactionSet, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*PublicKeyType)(nil) - if armName == "TxSet" { - result = *u.TxSet - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *PublicKeyType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding PublicKeyType: %s", err) + } + if _, ok := publicKeyTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid PublicKeyType enum value", v) } + *e = PublicKeyType(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PublicKeyType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustTransaction retrieves the Transaction value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustTransaction() TransactionEnvelope { - val, ok := u.GetTransaction() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PublicKeyType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm Transaction is not set") - } +var ( + _ encoding.BinaryMarshaler = (*PublicKeyType)(nil) + _ encoding.BinaryUnmarshaler = (*PublicKeyType)(nil) +) - return val -} +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PublicKeyType) xdrType() {} -// GetTransaction retrieves the Transaction value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetTransaction() (result TransactionEnvelope, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ xdrType = (*PublicKeyType)(nil) - if armName == "Transaction" { - result = *u.Transaction - ok = true - } +// SignerKeyType is an XDR Enum defines as: +// +// enum SignerKeyType +// { +// SIGNER_KEY_TYPE_ED25519 = KEY_TYPE_ED25519, +// SIGNER_KEY_TYPE_PRE_AUTH_TX = KEY_TYPE_PRE_AUTH_TX, +// SIGNER_KEY_TYPE_HASH_X = KEY_TYPE_HASH_X +// }; +// +type SignerKeyType int32 - return +const ( + SignerKeyTypeSignerKeyTypeEd25519 SignerKeyType = 0 + SignerKeyTypeSignerKeyTypePreAuthTx SignerKeyType = 1 + SignerKeyTypeSignerKeyTypeHashX SignerKeyType = 2 +) + +var signerKeyTypeMap = map[int32]string{ + 0: "SignerKeyTypeSignerKeyTypeEd25519", + 1: "SignerKeyTypeSignerKeyTypePreAuthTx", + 2: "SignerKeyTypeSignerKeyTypeHashX", } -// MustQSetHash retrieves the QSetHash value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustQSetHash() Uint256 { - val, ok := u.GetQSetHash() +// ValidEnum validates a proposed value for this enum. Implements +// the Enum interface for SignerKeyType +func (e SignerKeyType) ValidEnum(v int32) bool { + _, ok := signerKeyTypeMap[v] + return ok +} - if !ok { - panic("arm QSetHash is not set") - } +// String returns the name of `e` +func (e SignerKeyType) String() string { + name, _ := signerKeyTypeMap[int32(e)] + return name +} - return val +// EncodeTo encodes this value using the Encoder. +func (e SignerKeyType) EncodeTo(enc *xdr.Encoder) error { + if _, ok := signerKeyTypeMap[int32(e)]; !ok { + return fmt.Errorf("'%d' is not a valid SignerKeyType enum value", e) + } + _, err := enc.EncodeInt(int32(e)) + return err } -// GetQSetHash retrieves the QSetHash value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetQSetHash() (result Uint256, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*SignerKeyType)(nil) - if armName == "QSetHash" { - result = *u.QSetHash - ok = true +// DecodeFrom decodes this value using the Decoder. +func (e *SignerKeyType) DecodeFrom(d *xdr.Decoder) (int, error) { + v, n, err := d.DecodeInt() + if err != nil { + return n, fmt.Errorf("decoding SignerKeyType: %s", err) + } + if _, ok := signerKeyTypeMap[v]; !ok { + return n, fmt.Errorf("'%d' is not a valid SignerKeyType enum value", v) } + *e = SignerKeyType(v) + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SignerKeyType) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustQSet retrieves the QSet value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustQSet() ScpQuorumSet { - val, ok := u.GetQSet() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SignerKeyType) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm QSet is not set") - } +var ( + _ encoding.BinaryMarshaler = (*SignerKeyType)(nil) + _ encoding.BinaryUnmarshaler = (*SignerKeyType)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SignerKeyType) xdrType() {} + +var _ xdrType = (*SignerKeyType)(nil) + +// PublicKey is an XDR Union defines as: +// +// union PublicKey switch (PublicKeyType type) +// { +// case PUBLIC_KEY_TYPE_ED25519: +// uint256 ed25519; +// }; +// +type PublicKey struct { + Type PublicKeyType + Ed25519 *Uint256 } -// GetQSet retrieves the QSet value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetQSet() (result ScpQuorumSet, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u PublicKey) SwitchFieldName() string { + return "Type" +} - if armName == "QSet" { - result = *u.QSet - ok = true +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PublicKey +func (u PublicKey) ArmForSwitch(sw int32) (string, bool) { + switch PublicKeyType(sw) { + case PublicKeyTypePublicKeyTypeEd25519: + return "Ed25519", true } + return "-", false +} +// NewPublicKey creates a new PublicKey. +func NewPublicKey(aType PublicKeyType, value interface{}) (result PublicKey, err error) { + result.Type = aType + switch PublicKeyType(aType) { + case PublicKeyTypePublicKeyTypeEd25519: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.Ed25519 = &tv + } return } -// MustEnvelope retrieves the Envelope value from the union, +// MustEd25519 retrieves the Ed25519 value from the union, // panicing if the value is not set. -func (u StellarMessage) MustEnvelope() ScpEnvelope { - val, ok := u.GetEnvelope() +func (u PublicKey) MustEd25519() Uint256 { + val, ok := u.GetEd25519() if !ok { - panic("arm Envelope is not set") + panic("arm Ed25519 is not set") } return val } -// GetEnvelope retrieves the Envelope value from the union, +// GetEd25519 retrieves the Ed25519 value from the union, // returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetEnvelope() (result ScpEnvelope, ok bool) { +func (u PublicKey) GetEd25519() (result Uint256, ok bool) { armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "Envelope" { - result = *u.Envelope + if armName == "Ed25519" { + result = *u.Ed25519 ok = true } return } -// MustGetScpLedgerSeq retrieves the GetScpLedgerSeq value from the union, -// panicing if the value is not set. -func (u StellarMessage) MustGetScpLedgerSeq() Uint32 { - val, ok := u.GetGetScpLedgerSeq() - - if !ok { - panic("arm GetScpLedgerSeq is not set") +// EncodeTo encodes this value using the Encoder. +func (u PublicKey) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err } - - return val + switch PublicKeyType(u.Type) { + case PublicKeyTypePublicKeyTypeEd25519: + if err = (*u.Ed25519).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (PublicKeyType) switch value '%d' is not valid for union PublicKey", u.Type) } -// GetGetScpLedgerSeq retrieves the GetScpLedgerSeq value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u StellarMessage) GetGetScpLedgerSeq() (result Uint32, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*PublicKey)(nil) - if armName == "GetScpLedgerSeq" { - result = *u.GetScpLedgerSeq - ok = true +// DecodeFrom decodes this value using the Decoder. +func (u *PublicKey) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PublicKeyType: %s", err) + } + switch PublicKeyType(u.Type) { + case PublicKeyTypePublicKeyTypeEd25519: + u.Ed25519 = new(Uint256) + nTmp, err = (*u.Ed25519).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) + } + return n, nil } + return n, fmt.Errorf("union PublicKey has invalid Type (PublicKeyType) switch value '%d'", u.Type) +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s PublicKey) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// AuthenticatedMessageV0 is an XDR NestedStruct defines as: -// -// struct -// { -// uint64 sequence; -// StellarMessage message; -// HmacSha256Mac mac; -// } -// -type AuthenticatedMessageV0 struct { - Sequence Uint64 - Message StellarMessage - Mac HmacSha256Mac +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *PublicKey) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err } -// AuthenticatedMessage is an XDR Union defines as: +var ( + _ encoding.BinaryMarshaler = (*PublicKey)(nil) + _ encoding.BinaryUnmarshaler = (*PublicKey)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s PublicKey) xdrType() {} + +var _ xdrType = (*PublicKey)(nil) + +// SignerKey is an XDR Union defines as: // -// union AuthenticatedMessage switch (uint32 v) -// { -// case 0: -// struct +// union SignerKey switch (SignerKeyType type) // { -// uint64 sequence; -// StellarMessage message; -// HmacSha256Mac mac; -// } v0; +// case SIGNER_KEY_TYPE_ED25519: +// uint256 ed25519; +// case SIGNER_KEY_TYPE_PRE_AUTH_TX: +// /* SHA-256 Hash of TransactionSignaturePayload structure */ +// uint256 preAuthTx; +// case SIGNER_KEY_TYPE_HASH_X: +// /* Hash of random 256 bit preimage X */ +// uint256 hashX; // }; // -type AuthenticatedMessage struct { - V Uint32 - V0 *AuthenticatedMessageV0 +type SignerKey struct { + Type SignerKeyType + Ed25519 *Uint256 + PreAuthTx *Uint256 + HashX *Uint256 } // SwitchFieldName returns the field name in which this union's // discriminant is stored -func (u AuthenticatedMessage) SwitchFieldName() string { - return "V" +func (u SignerKey) SwitchFieldName() string { + return "Type" } // ArmForSwitch returns which field name should be used for storing -// the value for an instance of AuthenticatedMessage -func (u AuthenticatedMessage) ArmForSwitch(sw int32) (string, bool) { - switch Uint32(sw) { - case 0: - return "V0", true +// the value for an instance of SignerKey +func (u SignerKey) ArmForSwitch(sw int32) (string, bool) { + switch SignerKeyType(sw) { + case SignerKeyTypeSignerKeyTypeEd25519: + return "Ed25519", true + case SignerKeyTypeSignerKeyTypePreAuthTx: + return "PreAuthTx", true + case SignerKeyTypeSignerKeyTypeHashX: + return "HashX", true } return "-", false } -// NewAuthenticatedMessage creates a new AuthenticatedMessage. -func NewAuthenticatedMessage(v Uint32, value interface{}) (result AuthenticatedMessage, err error) { - result.V = v - switch Uint32(v) { - case 0: - tv, ok := value.(AuthenticatedMessageV0) +// NewSignerKey creates a new SignerKey. +func NewSignerKey(aType SignerKeyType, value interface{}) (result SignerKey, err error) { + result.Type = aType + switch SignerKeyType(aType) { + case SignerKeyTypeSignerKeyTypeEd25519: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.Ed25519 = &tv + case SignerKeyTypeSignerKeyTypePreAuthTx: + tv, ok := value.(Uint256) + if !ok { + err = fmt.Errorf("invalid value, must be Uint256") + return + } + result.PreAuthTx = &tv + case SignerKeyTypeSignerKeyTypeHashX: + tv, ok := value.(Uint256) if !ok { - err = fmt.Errorf("invalid value, must be AuthenticatedMessageV0") + err = fmt.Errorf("invalid value, must be Uint256") return } - result.V0 = &tv + result.HashX = &tv } return } -// MustV0 retrieves the V0 value from the union, +// MustEd25519 retrieves the Ed25519 value from the union, // panicing if the value is not set. -func (u AuthenticatedMessage) MustV0() AuthenticatedMessageV0 { - val, ok := u.GetV0() +func (u SignerKey) MustEd25519() Uint256 { + val, ok := u.GetEd25519() if !ok { - panic("arm V0 is not set") + panic("arm Ed25519 is not set") } return val } -// GetV0 retrieves the V0 value from the union, +// GetEd25519 retrieves the Ed25519 value from the union, // returning ok if the union's switch indicated the value is valid. -func (u AuthenticatedMessage) GetV0() (result AuthenticatedMessageV0, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.V)) +func (u SignerKey) GetEd25519() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) - if armName == "V0" { - result = *u.V0 + if armName == "Ed25519" { + result = *u.Ed25519 ok = true } return } -// Value is an XDR Typedef defines as: -// -// typedef opaque Value<>; -// -type Value []byte +// MustPreAuthTx retrieves the PreAuthTx value from the union, +// panicing if the value is not set. +func (u SignerKey) MustPreAuthTx() Uint256 { + val, ok := u.GetPreAuthTx() -// ScpBallot is an XDR Struct defines as: -// -// struct SCPBallot -// { -// uint32 counter; // n -// Value value; // x -// }; -// -type ScpBallot struct { - Counter Uint32 - Value Value + if !ok { + panic("arm PreAuthTx is not set") + } + + return val } -// ScpStatementType is an XDR Enum defines as: -// -// enum SCPStatementType -// { -// SCP_ST_PREPARE = 0, -// SCP_ST_CONFIRM = 1, -// SCP_ST_EXTERNALIZE = 2, -// SCP_ST_NOMINATE = 3 -// }; -// -type ScpStatementType int32 +// GetPreAuthTx retrieves the PreAuthTx value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u SignerKey) GetPreAuthTx() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) -const ( - ScpStatementTypeScpStPrepare ScpStatementType = 0 - ScpStatementTypeScpStConfirm ScpStatementType = 1 - ScpStatementTypeScpStExternalize ScpStatementType = 2 - ScpStatementTypeScpStNominate ScpStatementType = 3 -) + if armName == "PreAuthTx" { + result = *u.PreAuthTx + ok = true + } -var scpStatementTypeMap = map[int32]string{ - 0: "ScpStatementTypeScpStPrepare", - 1: "ScpStatementTypeScpStConfirm", - 2: "ScpStatementTypeScpStExternalize", - 3: "ScpStatementTypeScpStNominate", + return } -// ValidEnum validates a proposed value for this enum. Implements -// the Enum interface for ScpStatementType -func (e ScpStatementType) ValidEnum(v int32) bool { - _, ok := scpStatementTypeMap[v] - return ok -} +// MustHashX retrieves the HashX value from the union, +// panicing if the value is not set. +func (u SignerKey) MustHashX() Uint256 { + val, ok := u.GetHashX() -// String returns the name of `e` -func (e ScpStatementType) String() string { - name, _ := scpStatementTypeMap[int32(e)] - return name -} + if !ok { + panic("arm HashX is not set") + } -// ScpNomination is an XDR Struct defines as: -// -// struct SCPNomination -// { -// Hash quorumSetHash; // D -// Value votes<>; // X -// Value accepted<>; // Y -// }; -// -type ScpNomination struct { - QuorumSetHash Hash - Votes []Value - Accepted []Value + return val } -// ScpStatementPrepare is an XDR NestedStruct defines as: -// -// struct -// { -// Hash quorumSetHash; // D -// SCPBallot ballot; // b -// SCPBallot* prepared; // p -// SCPBallot* preparedPrime; // p' -// uint32 nC; // c.n -// uint32 nH; // h.n -// } -// -type ScpStatementPrepare struct { - QuorumSetHash Hash - Ballot ScpBallot - Prepared *ScpBallot - PreparedPrime *ScpBallot - NC Uint32 - NH Uint32 -} +// GetHashX retrieves the HashX value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u SignerKey) GetHashX() (result Uint256, ok bool) { + armName, _ := u.ArmForSwitch(int32(u.Type)) -// ScpStatementConfirm is an XDR NestedStruct defines as: -// -// struct -// { -// SCPBallot ballot; // b -// uint32 nPrepared; // p.n -// uint32 nCommit; // c.n -// uint32 nH; // h.n -// Hash quorumSetHash; // D -// } -// -type ScpStatementConfirm struct { - Ballot ScpBallot - NPrepared Uint32 - NCommit Uint32 - NH Uint32 - QuorumSetHash Hash -} + if armName == "HashX" { + result = *u.HashX + ok = true + } -// ScpStatementExternalize is an XDR NestedStruct defines as: -// -// struct -// { -// SCPBallot commit; // c -// uint32 nH; // h.n -// Hash commitQuorumSetHash; // D used before EXTERNALIZE -// } -// -type ScpStatementExternalize struct { - Commit ScpBallot - NH Uint32 - CommitQuorumSetHash Hash + return } -// ScpStatementPledges is an XDR NestedUnion defines as: -// -// union switch (SCPStatementType type) -// { -// case SCP_ST_PREPARE: -// struct -// { -// Hash quorumSetHash; // D -// SCPBallot ballot; // b -// SCPBallot* prepared; // p -// SCPBallot* preparedPrime; // p' -// uint32 nC; // c.n -// uint32 nH; // h.n -// } prepare; -// case SCP_ST_CONFIRM: -// struct -// { -// SCPBallot ballot; // b -// uint32 nPrepared; // p.n -// uint32 nCommit; // c.n -// uint32 nH; // h.n -// Hash quorumSetHash; // D -// } confirm; -// case SCP_ST_EXTERNALIZE: -// struct -// { -// SCPBallot commit; // c -// uint32 nH; // h.n -// Hash commitQuorumSetHash; // D used before EXTERNALIZE -// } externalize; -// case SCP_ST_NOMINATE: -// SCPNomination nominate; -// } -// -type ScpStatementPledges struct { - Type ScpStatementType - Prepare *ScpStatementPrepare - Confirm *ScpStatementConfirm - Externalize *ScpStatementExternalize - Nominate *ScpNomination +// EncodeTo encodes this value using the Encoder. +func (u SignerKey) EncodeTo(e *xdr.Encoder) error { + var err error + if err = u.Type.EncodeTo(e); err != nil { + return err + } + switch SignerKeyType(u.Type) { + case SignerKeyTypeSignerKeyTypeEd25519: + if err = (*u.Ed25519).EncodeTo(e); err != nil { + return err + } + return nil + case SignerKeyTypeSignerKeyTypePreAuthTx: + if err = (*u.PreAuthTx).EncodeTo(e); err != nil { + return err + } + return nil + case SignerKeyTypeSignerKeyTypeHashX: + if err = (*u.HashX).EncodeTo(e); err != nil { + return err + } + return nil + } + return fmt.Errorf("Type (SignerKeyType) switch value '%d' is not valid for union SignerKey", u.Type) } -// SwitchFieldName returns the field name in which this union's -// discriminant is stored -func (u ScpStatementPledges) SwitchFieldName() string { - return "Type" -} +var _ decoderFrom = (*SignerKey)(nil) -// ArmForSwitch returns which field name should be used for storing -// the value for an instance of ScpStatementPledges -func (u ScpStatementPledges) ArmForSwitch(sw int32) (string, bool) { - switch ScpStatementType(sw) { - case ScpStatementTypeScpStPrepare: - return "Prepare", true - case ScpStatementTypeScpStConfirm: - return "Confirm", true - case ScpStatementTypeScpStExternalize: - return "Externalize", true - case ScpStatementTypeScpStNominate: - return "Nominate", true +// DecodeFrom decodes this value using the Decoder. +func (u *SignerKey) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = u.Type.DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignerKeyType: %s", err) } - return "-", false -} - -// NewScpStatementPledges creates a new ScpStatementPledges. -func NewScpStatementPledges(aType ScpStatementType, value interface{}) (result ScpStatementPledges, err error) { - result.Type = aType - switch ScpStatementType(aType) { - case ScpStatementTypeScpStPrepare: - tv, ok := value.(ScpStatementPrepare) - if !ok { - err = fmt.Errorf("invalid value, must be ScpStatementPrepare") - return - } - result.Prepare = &tv - case ScpStatementTypeScpStConfirm: - tv, ok := value.(ScpStatementConfirm) - if !ok { - err = fmt.Errorf("invalid value, must be ScpStatementConfirm") - return + switch SignerKeyType(u.Type) { + case SignerKeyTypeSignerKeyTypeEd25519: + u.Ed25519 = new(Uint256) + nTmp, err = (*u.Ed25519).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) } - result.Confirm = &tv - case ScpStatementTypeScpStExternalize: - tv, ok := value.(ScpStatementExternalize) - if !ok { - err = fmt.Errorf("invalid value, must be ScpStatementExternalize") - return + return n, nil + case SignerKeyTypeSignerKeyTypePreAuthTx: + u.PreAuthTx = new(Uint256) + nTmp, err = (*u.PreAuthTx).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) } - result.Externalize = &tv - case ScpStatementTypeScpStNominate: - tv, ok := value.(ScpNomination) - if !ok { - err = fmt.Errorf("invalid value, must be ScpNomination") - return + return n, nil + case SignerKeyTypeSignerKeyTypeHashX: + u.HashX = new(Uint256) + nTmp, err = (*u.HashX).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Uint256: %s", err) } - result.Nominate = &tv + return n, nil + } + return n, fmt.Errorf("union SignerKey has invalid Type (SignerKeyType) switch value '%d'", u.Type) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SignerKey) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SignerKey) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*SignerKey)(nil) + _ encoding.BinaryUnmarshaler = (*SignerKey)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SignerKey) xdrType() {} + +var _ xdrType = (*SignerKey)(nil) + +// Signature is an XDR Typedef defines as: +// +// typedef opaque Signature<64>; +// +type Signature []byte + +// XDRMaxSize implements the Sized interface for Signature +func (e Signature) XDRMaxSize() int { + return 64 +} + +// EncodeTo encodes this value using the Encoder. +func (s Signature) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeOpaque(s[:]); err != nil { + return err } - return + return nil } -// MustPrepare retrieves the Prepare value from the union, -// panicing if the value is not set. -func (u ScpStatementPledges) MustPrepare() ScpStatementPrepare { - val, ok := u.GetPrepare() +var _ decoderFrom = (*Signature)(nil) - if !ok { - panic("arm Prepare is not set") +// DecodeFrom decodes this value using the Decoder. +func (s *Signature) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + (*s), nTmp, err = d.DecodeOpaque(64) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Signature: %s", err) } + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Signature) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetPrepare retrieves the Prepare value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u ScpStatementPledges) GetPrepare() (result ScpStatementPrepare, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Signature) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Prepare" { - result = *u.Prepare - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*Signature)(nil) + _ encoding.BinaryUnmarshaler = (*Signature)(nil) +) - return +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Signature) xdrType() {} + +var _ xdrType = (*Signature)(nil) + +// SignatureHint is an XDR Typedef defines as: +// +// typedef opaque SignatureHint[4]; +// +type SignatureHint [4]byte + +// XDRMaxSize implements the Sized interface for SignatureHint +func (e SignatureHint) XDRMaxSize() int { + return 4 } -// MustConfirm retrieves the Confirm value from the union, -// panicing if the value is not set. -func (u ScpStatementPledges) MustConfirm() ScpStatementConfirm { - val, ok := u.GetConfirm() +// EncodeTo encodes this value using the Encoder. +func (s *SignatureHint) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s[:]); err != nil { + return err + } + return nil +} - if !ok { - panic("arm Confirm is not set") +var _ decoderFrom = (*SignatureHint)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *SignatureHint) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding SignatureHint: %s", err) } + return n, nil +} - return val +// MarshalBinary implements encoding.BinaryMarshaler. +func (s SignatureHint) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// GetConfirm retrieves the Confirm value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u ScpStatementPledges) GetConfirm() (result ScpStatementConfirm, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *SignatureHint) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if armName == "Confirm" { - result = *u.Confirm - ok = true - } +var ( + _ encoding.BinaryMarshaler = (*SignatureHint)(nil) + _ encoding.BinaryUnmarshaler = (*SignatureHint)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s SignatureHint) xdrType() {} + +var _ xdrType = (*SignatureHint)(nil) + +// NodeId is an XDR Typedef defines as: +// +// typedef PublicKey NodeID; +// +type NodeId PublicKey + +// SwitchFieldName returns the field name in which this union's +// discriminant is stored +func (u NodeId) SwitchFieldName() string { + return PublicKey(u).SwitchFieldName() +} + +// ArmForSwitch returns which field name should be used for storing +// the value for an instance of PublicKey +func (u NodeId) ArmForSwitch(sw int32) (string, bool) { + return PublicKey(u).ArmForSwitch(sw) +} +// NewNodeId creates a new NodeId. +func NewNodeId(aType PublicKeyType, value interface{}) (result NodeId, err error) { + u, err := NewPublicKey(aType, value) + result = NodeId(u) return } -// MustExternalize retrieves the Externalize value from the union, +// MustEd25519 retrieves the Ed25519 value from the union, // panicing if the value is not set. -func (u ScpStatementPledges) MustExternalize() ScpStatementExternalize { - val, ok := u.GetExternalize() +func (u NodeId) MustEd25519() Uint256 { + return PublicKey(u).MustEd25519() +} - if !ok { - panic("arm Externalize is not set") - } +// GetEd25519 retrieves the Ed25519 value from the union, +// returning ok if the union's switch indicated the value is valid. +func (u NodeId) GetEd25519() (result Uint256, ok bool) { + return PublicKey(u).GetEd25519() +} - return val +// EncodeTo encodes this value using the Encoder. +func (s NodeId) EncodeTo(e *xdr.Encoder) error { + var err error + if err = PublicKey(s).EncodeTo(e); err != nil { + return err + } + return nil } -// GetExternalize retrieves the Externalize value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u ScpStatementPledges) GetExternalize() (result ScpStatementExternalize, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +var _ decoderFrom = (*NodeId)(nil) - if armName == "Externalize" { - result = *u.Externalize - ok = true +// DecodeFrom decodes this value using the Decoder. +func (s *NodeId) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = (*PublicKey)(s).DecodeFrom(d) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding PublicKey: %s", err) } + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s NodeId) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// MustNominate retrieves the Nominate value from the union, -// panicing if the value is not set. -func (u ScpStatementPledges) MustNominate() ScpNomination { - val, ok := u.GetNominate() +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *NodeId) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} - if !ok { - panic("arm Nominate is not set") - } +var ( + _ encoding.BinaryMarshaler = (*NodeId)(nil) + _ encoding.BinaryUnmarshaler = (*NodeId)(nil) +) - return val +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s NodeId) xdrType() {} + +var _ xdrType = (*NodeId)(nil) + +// Curve25519Secret is an XDR Struct defines as: +// +// struct Curve25519Secret +// { +// opaque key[32]; +// }; +// +type Curve25519Secret struct { + Key [32]byte `xdrmaxsize:"32"` } -// GetNominate retrieves the Nominate value from the union, -// returning ok if the union's switch indicated the value is valid. -func (u ScpStatementPledges) GetNominate() (result ScpNomination, ok bool) { - armName, _ := u.ArmForSwitch(int32(u.Type)) +// EncodeTo encodes this value using the Encoder. +func (s *Curve25519Secret) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s.Key[:]); err != nil { + return err + } + return nil +} - if armName == "Nominate" { - result = *u.Nominate - ok = true +var _ decoderFrom = (*Curve25519Secret)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Curve25519Secret) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s.Key[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Key: %s", err) } + return n, nil +} - return +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Curve25519Secret) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err } -// ScpStatement is an XDR Struct defines as: +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Curve25519Secret) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Curve25519Secret)(nil) + _ encoding.BinaryUnmarshaler = (*Curve25519Secret)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Curve25519Secret) xdrType() {} + +var _ xdrType = (*Curve25519Secret)(nil) + +// Curve25519Public is an XDR Struct defines as: // -// struct SCPStatement +// struct Curve25519Public // { -// NodeID nodeID; // v -// uint64 slotIndex; // i -// -// union switch (SCPStatementType type) -// { -// case SCP_ST_PREPARE: -// struct -// { -// Hash quorumSetHash; // D -// SCPBallot ballot; // b -// SCPBallot* prepared; // p -// SCPBallot* preparedPrime; // p' -// uint32 nC; // c.n -// uint32 nH; // h.n -// } prepare; -// case SCP_ST_CONFIRM: -// struct -// { -// SCPBallot ballot; // b -// uint32 nPrepared; // p.n -// uint32 nCommit; // c.n -// uint32 nH; // h.n -// Hash quorumSetHash; // D -// } confirm; -// case SCP_ST_EXTERNALIZE: -// struct -// { -// SCPBallot commit; // c -// uint32 nH; // h.n -// Hash commitQuorumSetHash; // D used before EXTERNALIZE -// } externalize; -// case SCP_ST_NOMINATE: -// SCPNomination nominate; -// } -// pledges; +// opaque key[32]; // }; // -type ScpStatement struct { - NodeId NodeId - SlotIndex Uint64 - Pledges ScpStatementPledges +type Curve25519Public struct { + Key [32]byte `xdrmaxsize:"32"` } -// ScpEnvelope is an XDR Struct defines as: +// EncodeTo encodes this value using the Encoder. +func (s *Curve25519Public) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s.Key[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*Curve25519Public)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *Curve25519Public) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s.Key[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Key: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s Curve25519Public) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *Curve25519Public) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*Curve25519Public)(nil) + _ encoding.BinaryUnmarshaler = (*Curve25519Public)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s Curve25519Public) xdrType() {} + +var _ xdrType = (*Curve25519Public)(nil) + +// HmacSha256Key is an XDR Struct defines as: // -// struct SCPEnvelope +// struct HmacSha256Key // { -// SCPStatement statement; -// Signature signature; +// opaque key[32]; // }; // -type ScpEnvelope struct { - Statement ScpStatement - Signature Signature +type HmacSha256Key struct { + Key [32]byte `xdrmaxsize:"32"` } -// ScpQuorumSet is an XDR Struct defines as: +// EncodeTo encodes this value using the Encoder. +func (s *HmacSha256Key) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s.Key[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*HmacSha256Key)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *HmacSha256Key) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s.Key[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Key: %s", err) + } + return n, nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (s HmacSha256Key) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *HmacSha256Key) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*HmacSha256Key)(nil) + _ encoding.BinaryUnmarshaler = (*HmacSha256Key)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s HmacSha256Key) xdrType() {} + +var _ xdrType = (*HmacSha256Key)(nil) + +// HmacSha256Mac is an XDR Struct defines as: // -// struct SCPQuorumSet +// struct HmacSha256Mac // { -// uint32 threshold; -// PublicKey validators<>; -// SCPQuorumSet innerSets<>; +// opaque mac[32]; // }; // -type ScpQuorumSet struct { - Threshold Uint32 - Validators []PublicKey - InnerSets []ScpQuorumSet +type HmacSha256Mac struct { + Mac [32]byte `xdrmaxsize:"32"` +} + +// EncodeTo encodes this value using the Encoder. +func (s *HmacSha256Mac) EncodeTo(e *xdr.Encoder) error { + var err error + if _, err = e.EncodeFixedOpaque(s.Mac[:]); err != nil { + return err + } + return nil +} + +var _ decoderFrom = (*HmacSha256Mac)(nil) + +// DecodeFrom decodes this value using the Decoder. +func (s *HmacSha256Mac) DecodeFrom(d *xdr.Decoder) (int, error) { + var err error + var n, nTmp int + nTmp, err = d.DecodeFixedOpaqueInplace(s.Mac[:]) + n += nTmp + if err != nil { + return n, fmt.Errorf("decoding Mac: %s", err) + } + return n, nil } +// MarshalBinary implements encoding.BinaryMarshaler. +func (s HmacSha256Mac) MarshalBinary() ([]byte, error) { + b := bytes.Buffer{} + e := xdr.NewEncoder(&b) + err := s.EncodeTo(e) + return b.Bytes(), err +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (s *HmacSha256Mac) UnmarshalBinary(inp []byte) error { + r := bytes.NewReader(inp) + d := xdr.NewDecoder(r) + _, err := s.DecodeFrom(d) + return err +} + +var ( + _ encoding.BinaryMarshaler = (*HmacSha256Mac)(nil) + _ encoding.BinaryUnmarshaler = (*HmacSha256Mac)(nil) +) + +// xdrType signals that this type is an type representing +// representing XDR values defined by this package. +func (s HmacSha256Mac) xdrType() {} + +var _ xdrType = (*HmacSha256Mac)(nil) + var fmtTest = fmt.Sprint("this is a dummy usage of fmt")